Creating branches/google/stable and tags/google/stable/2019-01-18 from r351319

git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/google/stable@351578 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/.gitignore b/.gitignore
index fd30887..0aa0a8a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -43,6 +43,8 @@
 /compile_commands.json
 # Visual Studio built-in CMake configuration
 /CMakeSettings.json
+# CLion project configuration
+/.idea
 
 #==============================================================================#
 # Directories to ignore (do not add trailing '/'s, they skip symlinks).
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e2e35dc..b144be5 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -104,7 +104,7 @@
 # LLVM_EXTERNAL_${project}_SOURCE_DIR using LLVM_ALL_PROJECTS
 # This allows an easy way of setting up a build directory for llvm and another
 # one for llvm+clang+... using the same sources.
-set(LLVM_ALL_PROJECTS "clang;libcxx;libcxxabi;lldb;compiler-rt;lld;polly;debuginfo-tests")
+set(LLVM_ALL_PROJECTS "clang;libcxx;libcxxabi;libunwind;lldb;compiler-rt;lld;polly;debuginfo-tests")
 set(LLVM_ENABLE_PROJECTS "" CACHE STRING
 	"Semicolon-separated list of projects to build (${LLVM_ALL_PROJECTS}), or \"all\".")
 if( LLVM_ENABLE_PROJECTS STREQUAL "all" )
diff --git a/CODE_OWNERS.TXT b/CODE_OWNERS.TXT
index 67b91f2..a6e10fc 100644
--- a/CODE_OWNERS.TXT
+++ b/CODE_OWNERS.TXT
@@ -71,7 +71,7 @@
 N: Andrea Di Biagio
 E: andrea.dibiagio@sony.com
 E: andrea.dibiagio@gmail.com
-D: llvm-mca
+D: MCA, llvm-mca
 
 N: Duncan P. N. Exon Smith
 E: dexonsmith@apple.com
diff --git a/LICENSE.TXT b/LICENSE.TXT
index 461398b..e4d67d1 100644
--- a/LICENSE.TXT
+++ b/LICENSE.TXT
@@ -4,7 +4,7 @@
 University of Illinois/NCSA
 Open Source License
 
-Copyright (c) 2003-2018 University of Illinois at Urbana-Champaign.
+Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign.
 All rights reserved.
 
 Developed by:
diff --git a/RELEASE_TESTERS.TXT b/RELEASE_TESTERS.TXT
index 117075c..2e5ac1b 100644
--- a/RELEASE_TESTERS.TXT
+++ b/RELEASE_TESTERS.TXT
@@ -13,8 +13,8 @@
 
 N: Sylvestre Ledru
 E: sylvestre@debian.org
-T: x86
-O: Debian
+T: All supported archs Debian/Ubuntu
+O: Debian/Ubuntu packages
 
 N: Nikola Smiljanic
 E: popizdeh@gmail.com
diff --git a/bindings/go/llvm/InstrumentationBindings.cpp b/bindings/go/llvm/InstrumentationBindings.cpp
index 8b7bafa..c3b4f2e 100644
--- a/bindings/go/llvm/InstrumentationBindings.cpp
+++ b/bindings/go/llvm/InstrumentationBindings.cpp
@@ -16,6 +16,8 @@
 #include "llvm/IR/LegacyPassManager.h"
 #include "llvm/IR/Module.h"
 #include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
+#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
 
 using namespace llvm;
 
@@ -28,11 +30,11 @@
 }
 
 void LLVMAddThreadSanitizerPass(LLVMPassManagerRef PM) {
-  unwrap(PM)->add(createThreadSanitizerPass());
+  unwrap(PM)->add(createThreadSanitizerLegacyPassPass());
 }
 
-void LLVMAddMemorySanitizerPass(LLVMPassManagerRef PM) {
-  unwrap(PM)->add(createMemorySanitizerPass());
+void LLVMAddMemorySanitizerLegacyPassPass(LLVMPassManagerRef PM) {
+  unwrap(PM)->add(createMemorySanitizerLegacyPassPass());
 }
 
 void LLVMAddDataFlowSanitizerPass(LLVMPassManagerRef PM,
diff --git a/bindings/go/llvm/InstrumentationBindings.h b/bindings/go/llvm/InstrumentationBindings.h
index 97af2d5..5d448fc 100644
--- a/bindings/go/llvm/InstrumentationBindings.h
+++ b/bindings/go/llvm/InstrumentationBindings.h
@@ -27,7 +27,7 @@
 void LLVMAddAddressSanitizerFunctionPass(LLVMPassManagerRef PM);
 void LLVMAddAddressSanitizerModulePass(LLVMPassManagerRef PM);
 void LLVMAddThreadSanitizerPass(LLVMPassManagerRef PM);
-void LLVMAddMemorySanitizerPass(LLVMPassManagerRef PM);
+void LLVMAddMemorySanitizerLegacyPassPass(LLVMPassManagerRef PM);
 void LLVMAddDataFlowSanitizerPass(LLVMPassManagerRef PM, int ABIListFilesNum,
                                   const char **ABIListFiles);
 
diff --git a/bindings/go/llvm/transforms_instrumentation.go b/bindings/go/llvm/transforms_instrumentation.go
index 73e2732..73d093a 100644
--- a/bindings/go/llvm/transforms_instrumentation.go
+++ b/bindings/go/llvm/transforms_instrumentation.go
@@ -32,8 +32,8 @@
 	C.LLVMAddThreadSanitizerPass(pm.C)
 }
 
-func (pm PassManager) AddMemorySanitizerPass() {
-	C.LLVMAddMemorySanitizerPass(pm.C)
+func (pm PassManager) AddMemorySanitizerLegacyPassPass() {
+	C.LLVMAddMemorySanitizerLegacyPassPass(pm.C)
 }
 
 func (pm PassManager) AddDataFlowSanitizerPass(abilist []string) {
diff --git a/bindings/python/llvm/core.py b/bindings/python/llvm/core.py
index 6b3da6d..81e354a 100644
--- a/bindings/python/llvm/core.py
+++ b/bindings/python/llvm/core.py
@@ -6,6 +6,7 @@
 # License. See LICENSE.TXT for details.
 #
 #===------------------------------------------------------------------------===#
+from __future__ import print_function
 
 from .common import LLVMObject
 from .common import c_object_p
@@ -18,6 +19,8 @@
 from ctypes import c_char_p
 from ctypes import c_uint
 
+import sys
+
 __all__ = [
     "lib",
     "Enums",
@@ -235,7 +238,7 @@
         def __iter__(self):
             return self
         
-        def next(self):
+        def __next__(self):
             if not isinstance(self.function, Function):
                 raise StopIteration("")
             result = self.function
@@ -244,7 +247,10 @@
             else:
                 self.function = self.function.next
             return result
-    
+
+        if sys.version_info.major == 2:
+            next = __next__
+
     def __iter__(self):
         return Module.__function_iterator(self)
 
@@ -303,7 +309,7 @@
         def __iter__(self):
             return self
         
-        def next(self):
+        def __next__(self):
             if not isinstance(self.bb, BasicBlock):
                 raise StopIteration("")
             result = self.bb
@@ -312,6 +318,9 @@
             else:
                 self.bb = self.bb.next
             return result
+
+        if sys.version_info.major == 2:
+            next = __next__
     
     def __iter__(self):
         return Function.__bb_iterator(self)
@@ -380,7 +389,7 @@
         def __iter__(self):
             return self
         
-        def next(self):
+        def __next__(self):
             if not isinstance(self.inst, Instruction):
                 raise StopIteration("")
             result = self.inst
@@ -389,7 +398,10 @@
             else:
                 self.inst = self.inst.next
             return result
-    
+
+        if sys.version_info.major == 2:
+            next = __next__
+
     def __iter__(self):
         return BasicBlock.__inst_iterator(self)
 
@@ -605,7 +617,7 @@
     ]
     for enum_class, enum_spec in enums:
         for name, value in enum_spec:
-            print name, value
+            print(name, value)
             enum_class.register(name, value)
     return enums
 
diff --git a/bindings/python/llvm/tests/base.py b/bindings/python/llvm/tests/base.py
index 194f1a4..aa435bc 100644
--- a/bindings/python/llvm/tests/base.py
+++ b/bindings/python/llvm/tests/base.py
@@ -1,6 +1,8 @@
 import os.path
+import sys
 import unittest
 
+
 POSSIBLE_TEST_BINARIES = [
     'libreadline.so.5',
     'libreadline.so.6',
@@ -15,6 +17,9 @@
 ]
 
 class TestBase(unittest.TestCase):
+    if sys.version_info.major == 2:
+        assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
+
     def get_test_binary(self):
         """Helper to obtain a test binary for object file testing.
 
diff --git a/bindings/python/llvm/tests/test_bitreader.py b/bindings/python/llvm/tests/test_bitreader.py
index d585009..460005a 100644
--- a/bindings/python/llvm/tests/test_bitreader.py
+++ b/bindings/python/llvm/tests/test_bitreader.py
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
 from .base import TestBase
 from ..core import OpCode
 from ..core import MemoryBuffer
@@ -11,5 +13,5 @@
     def test_parse_bitcode(self):
         source = self.get_test_bc()
         m = parse_bitcode(MemoryBuffer(filename=source))
-        print m.target
-        print m.datalayout
+        print(m.target)
+        print(m.datalayout)
diff --git a/bindings/python/llvm/tests/test_core.py b/bindings/python/llvm/tests/test_core.py
index da7b635..68572b5 100644
--- a/bindings/python/llvm/tests/test_core.py
+++ b/bindings/python/llvm/tests/test_core.py
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
 from .base import TestBase
 from ..core import MemoryBuffer
 from ..core import PassRegistry
@@ -127,7 +129,7 @@
             self.assertEqual(inst.opcode, inst_list[i][1])
             for op in range(len(inst)):
                 o = inst.get_operand(op)
-                print o.name
+                print(o.name)
                 o.dump()
             inst.dump()
             i += 1
diff --git a/bindings/python/llvm/tests/test_disassembler.py b/bindings/python/llvm/tests/test_disassembler.py
index 37a04e4..29f2f70 100644
--- a/bindings/python/llvm/tests/test_disassembler.py
+++ b/bindings/python/llvm/tests/test_disassembler.py
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
 from .base import TestBase
 
 from ..disassembler import Disassembler, Option_UseMarkup
@@ -17,7 +19,7 @@
         self.assertEqual(s, '\tjcxz\t-127')
 
     def test_nonexistent_triple(self):
-        with self.assertRaisesRegexp(Exception, "Could not obtain disassembler for triple"):
+        with self.assertRaisesRegex(Exception, "Could not obtain disassembler for triple"):
             Disassembler("nonexistent-triple-raises")
 
     def test_get_instructions(self):
@@ -38,6 +40,6 @@
         disassembler = Disassembler(triple)
         disassembler.set_options(Option_UseMarkup)
         count, s = disassembler.get_instruction(sequence)
-        print s
+        print(s)
         self.assertEqual(count, 4)
         self.assertEqual(s, '\tpush\t{<reg:r4>, <reg:lr>}')
diff --git a/bindings/python/llvm/tests/test_object.py b/bindings/python/llvm/tests/test_object.py
index 3f92d81..a45b7be 100644
--- a/bindings/python/llvm/tests/test_object.py
+++ b/bindings/python/llvm/tests/test_object.py
@@ -1,3 +1,5 @@
+from numbers import Integral
+
 from .base import TestBase
 from ..object import ObjectFile
 from ..object import Relocation
@@ -20,9 +22,9 @@
             count += 1
             assert isinstance(section, Section)
             assert isinstance(section.name, str)
-            assert isinstance(section.size, long)
+            assert isinstance(section.size, Integral)
             assert isinstance(section.contents, str)
-            assert isinstance(section.address, long)
+            assert isinstance(section.address, Integral)
             assert len(section.contents) == section.size
 
         self.assertGreater(count, 0)
@@ -38,8 +40,8 @@
             count += 1
             assert isinstance(symbol, Symbol)
             assert isinstance(symbol.name, str)
-            assert isinstance(symbol.address, long)
-            assert isinstance(symbol.size, long)
+            assert isinstance(symbol.address, Integral)
+            assert isinstance(symbol.size, Integral)
 
         self.assertGreater(count, 0)
 
@@ -60,8 +62,8 @@
         for section in o.get_sections():
             for relocation in section.get_relocations():
                 assert isinstance(relocation, Relocation)
-                assert isinstance(relocation.address, long)
-                assert isinstance(relocation.offset, long)
-                assert isinstance(relocation.type_number, long)
+                assert isinstance(relocation.address, Integral)
+                assert isinstance(relocation.offset, Integral)
+                assert isinstance(relocation.type_number, Integral)
                 assert isinstance(relocation.type_name, str)
                 assert isinstance(relocation.value_string, str)
diff --git a/cmake/modules/AddLLVM.cmake b/cmake/modules/AddLLVM.cmake
index c5aa961..4dbc0dd 100644
--- a/cmake/modules/AddLLVM.cmake
+++ b/cmake/modules/AddLLVM.cmake
@@ -616,11 +616,13 @@
 
 macro(add_llvm_library name)
   cmake_parse_arguments(ARG
-    "SHARED;BUILDTREE_ONLY"
+    "SHARED;BUILDTREE_ONLY;MODULE"
     ""
     ""
     ${ARGN})
-  if( BUILD_SHARED_LIBS OR ARG_SHARED )
+  if(ARG_MODULE)
+    llvm_add_library(${name} MODULE ${ARG_UNPARSED_ARGUMENTS})
+  elseif( BUILD_SHARED_LIBS OR ARG_SHARED )
     llvm_add_library(${name} SHARED ${ARG_UNPARSED_ARGUMENTS})
   else()
     llvm_add_library(${name} ${ARG_UNPARSED_ARGUMENTS})
@@ -629,11 +631,14 @@
   # Libraries that are meant to only be exposed via the build tree only are
   # never installed and are only exported as a target in the special build tree
   # config file.
-  if (NOT ARG_BUILDTREE_ONLY)
+  if (NOT ARG_BUILDTREE_ONLY AND NOT ARG_MODULE)
     set_property( GLOBAL APPEND PROPERTY LLVM_LIBS ${name} )
   endif()
 
-  if( EXCLUDE_FROM_ALL )
+  if (ARG_MODULE AND NOT TARGET ${name})
+    # Add empty "phony" target
+    add_custom_target(${name})
+  elseif( EXCLUDE_FROM_ALL )
     set_target_properties( ${name} PROPERTIES EXCLUDE_FROM_ALL ON)
   elseif(ARG_BUILDTREE_ONLY)
     set_property(GLOBAL APPEND PROPERTY LLVM_EXPORTS_BUILDTREE_ONLY ${name})
@@ -642,7 +647,7 @@
         ${name} STREQUAL "OptRemarks" OR
         (LLVM_LINK_LLVM_DYLIB AND ${name} STREQUAL "LLVM"))
       set(install_dir lib${LLVM_LIBDIR_SUFFIX})
-      if(ARG_SHARED OR BUILD_SHARED_LIBS)
+      if(ARG_MODULE OR ARG_SHARED OR BUILD_SHARED_LIBS)
         if(WIN32 OR CYGWIN OR MINGW)
           set(install_type RUNTIME)
           set(install_dir bin)
@@ -653,6 +658,10 @@
         set(install_type ARCHIVE)
       endif()
 
+      if (ARG_MODULE)
+        set(install_type LIBRARY)
+      endif()
+
       if(${name} IN_LIST LLVM_DISTRIBUTION_COMPONENTS OR
           NOT LLVM_DISTRIBUTION_COMPONENTS)
         set(export_to_llvmexports EXPORT LLVMExports)
@@ -672,44 +681,12 @@
     endif()
     set_property(GLOBAL APPEND PROPERTY LLVM_EXPORTS ${name})
   endif()
-  set_target_properties(${name} PROPERTIES FOLDER "Libraries")
-endmacro(add_llvm_library name)
-
-macro(add_llvm_loadable_module name)
-  llvm_add_library(${name} MODULE ${ARGN})
-  if(NOT TARGET ${name})
-    # Add empty "phony" target
-    add_custom_target(${name})
+  if (ARG_MODULE)
+    set_target_properties(${name} PROPERTIES FOLDER "Loadable modules")
   else()
-    if( EXCLUDE_FROM_ALL )
-      set_target_properties( ${name} PROPERTIES EXCLUDE_FROM_ALL ON)
-    else()
-      if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
-        if(WIN32 OR CYGWIN)
-          # DLL platform
-          set(dlldir "bin")
-        else()
-          set(dlldir "lib${LLVM_LIBDIR_SUFFIX}")
-        endif()
-
-        if(${name} IN_LIST LLVM_DISTRIBUTION_COMPONENTS OR
-            NOT LLVM_DISTRIBUTION_COMPONENTS)
-          set(export_to_llvmexports EXPORT LLVMExports)
-          set_property(GLOBAL PROPERTY LLVM_HAS_EXPORTS True)
-        endif()
-
-        install(TARGETS ${name}
-                ${export_to_llvmexports}
-                LIBRARY DESTINATION ${dlldir}
-                ARCHIVE DESTINATION lib${LLVM_LIBDIR_SUFFIX})
-      endif()
-      set_property(GLOBAL APPEND PROPERTY LLVM_EXPORTS ${name})
-    endif()
+    set_target_properties(${name} PROPERTIES FOLDER "Libraries")
   endif()
-
-  set_target_properties(${name} PROPERTIES FOLDER "Loadable modules")
-endmacro(add_llvm_loadable_module name)
-
+endmacro(add_llvm_library name)
 
 macro(add_llvm_executable name)
   cmake_parse_arguments(ARG
@@ -943,6 +920,9 @@
                                DEPENDS ${name}
                                COMPONENT ${name})
     endif()
+    set_property(GLOBAL APPEND PROPERTY LLVM_EXPORTS ${name})
+  elseif( LLVM_BUILD_UTILS )
+    set_property(GLOBAL APPEND PROPERTY LLVM_EXPORTS_BUILDTREE_ONLY ${name})
   endif()
 endmacro(add_llvm_utility name)
 
@@ -1643,7 +1623,16 @@
     return()
   endif()
 
-  if(APPLE)
+  if(CMAKE_GENERATOR STREQUAL "Xcode")
+    set_target_properties(${name} PROPERTIES
+      XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY ${LLVM_CODESIGNING_IDENTITY}
+    )
+    if(DEFINED ARG_ENTITLEMENTS)
+      set_target_properties(${name} PROPERTIES
+        XCODE_ATTRIBUTE_CODE_SIGN_ENTITLEMENTS ${ARG_ENTITLEMENTS}
+      )
+    endif()
+  elseif(APPLE)
     if(NOT CMAKE_CODESIGN)
       set(CMAKE_CODESIGN xcrun codesign)
     endif()
@@ -1657,18 +1646,13 @@
     if(DEFINED ARG_ENTITLEMENTS)
       set(pass_entitlements --entitlements ${ARG_ENTITLEMENTS})
     endif()
-    if(CMAKE_GENERATOR STREQUAL "Xcode")
-      # Avoid double-signing error: Since output overwrites input, Xcode runs
-      # the post-build rule even if the actual build-step was skipped.
-      set(pass_force --force)
-    endif()
 
     add_custom_command(
       TARGET ${name} POST_BUILD
       COMMAND ${CMAKE_COMMAND} -E
               env CODESIGN_ALLOCATE=${CMAKE_CODESIGN_ALLOCATE}
               ${CMAKE_CODESIGN} -s ${LLVM_CODESIGNING_IDENTITY}
-              ${pass_entitlements} ${pass_force} $<TARGET_FILE:${name}>
+              ${pass_entitlements} $<TARGET_FILE:${name}>
     )
   endif()
 endfunction()
diff --git a/cmake/modules/TableGen.cmake b/cmake/modules/TableGen.cmake
index d1afcb4..3c84ae7 100644
--- a/cmake/modules/TableGen.cmake
+++ b/cmake/modules/TableGen.cmake
@@ -25,7 +25,7 @@
     file(RELATIVE_PATH ofn_rel
       ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}/${ofn})
     set(additional_cmdline
-      -o ${ofn_rel}.tmp
+      -o ${ofn_rel}
       -d ${ofn_rel}.d
       WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
       DEPFILE ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.d
@@ -36,7 +36,7 @@
     file(GLOB local_tds "*.td")
     file(GLOB_RECURSE global_tds "${LLVM_MAIN_INCLUDE_DIR}/llvm/*.td")
     set(additional_cmdline
-      -o ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
+      -o ${CMAKE_CURRENT_BINARY_DIR}/${ofn}
       )
   endif()
 
@@ -69,8 +69,7 @@
   # dependency twice in the result file when
   # ("${${project}_TABLEGEN_TARGET}" STREQUAL "${${project}_TABLEGEN_EXE}")
   # but lets us having smaller and cleaner code here.
-  add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
-    # Generate tablegen output in a temporary file.
+  add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${ofn}
     COMMAND ${${project}_TABLEGEN_EXE} ${ARGN} -I ${CMAKE_CURRENT_SOURCE_DIR}
     ${LLVM_TABLEGEN_FLAGS}
     ${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
@@ -83,20 +82,9 @@
     ${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
     COMMENT "Building ${ofn}..."
     )
-  add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${ofn}
-    # Only update the real output file if there are any differences.
-    # This prevents recompilation of all the files depending on it if there
-    # aren't any.
-    COMMAND ${CMAKE_COMMAND} -E copy_if_different
-        ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
-        ${CMAKE_CURRENT_BINARY_DIR}/${ofn}
-    DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
-    COMMENT "Updating ${ofn}..."
-    )
 
   # `make clean' must remove all those generated files:
-  set_property(DIRECTORY APPEND
-    PROPERTY ADDITIONAL_MAKE_CLEAN_FILES ${ofn}.tmp ${ofn})
+  set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES ${ofn})
 
   set(TABLEGEN_OUTPUT ${TABLEGEN_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}/${ofn} PARENT_SCOPE)
   set_source_files_properties(${CMAKE_CURRENT_BINARY_DIR}/${ofn} PROPERTIES
@@ -158,6 +146,12 @@
       llvm_ExternalProject_BuildCmd(tblgen_build_cmd ${target}
                                     ${LLVM_NATIVE_BUILD}
                                     CONFIGURATION Release)
+      # Create an artificial dependency between tablegen projects, because they
+      # compile the same dependencies, thus using the same build folders.
+      # FIXME: A proper fix requires sequentially chaining tablegens.
+      if (NOT ${project} STREQUAL LLVM AND TARGET ${project}-tablegen-host)
+        add_dependencies(${project}-tablegen-host LLVM-tablegen-host)
+      endif()
       add_custom_command(OUTPUT ${${project}_TABLEGEN_EXE}
         COMMAND ${tblgen_build_cmd}
         DEPENDS CONFIGURE_LLVM_NATIVE ${target}
diff --git a/docs/AMDGPU/AMDGPUAsmGFX7.rst b/docs/AMDGPU/AMDGPUAsmGFX7.rst
new file mode 100644
index 0000000..b267149
--- /dev/null
+++ b/docs/AMDGPU/AMDGPUAsmGFX7.rst
@@ -0,0 +1,1411 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+============================
+Syntax of GFX7 Instructions
+============================
+
+.. contents::
+  :local:
+
+Notation
+========
+
+Notation used in this document is explained :ref:`here<amdgpu_syn_instruction_notation>`.
+
+Introduction
+============
+
+An overview of generic syntax and other features of AMDGPU instructions may be found :ref:`in this document<amdgpu_syn_instructions>`.
+
+Instructions
+============
+
+
+DS
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**         **SRC0**      **SRC1**      **SRC2**           **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    ds_add_rtn_u32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_rtn_u64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_src2_u32                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_src2_u64                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_u32                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_u64                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_b32                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_b64                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_rtn_b32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_rtn_b64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_src2_b32                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_src2_b64                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_append                      :ref:`vdst<amdgpu_synid7_vdst32_0>`                                           :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_b32                               :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata32_0>`,   :ref:`vdata1<amdgpu_synid7_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_b64                               :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata64_0>`,   :ref:`vdata1<amdgpu_synid7_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_f32                               :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata32_0>`,   :ref:`vdata1<amdgpu_synid7_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_f64                               :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata64_0>`,   :ref:`vdata1<amdgpu_synid7_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_rtn_b32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata32_0>`,   :ref:`vdata1<amdgpu_synid7_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_rtn_b64               :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata64_0>`,   :ref:`vdata1<amdgpu_synid7_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_rtn_f32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata32_0>`,   :ref:`vdata1<amdgpu_synid7_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_rtn_f64               :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata64_0>`,   :ref:`vdata1<amdgpu_synid7_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_condxchg32_rtn_b64          :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_consume                     :ref:`vdst<amdgpu_synid7_vdst32_0>`                                           :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_rtn_u32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_rtn_u64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_src2_u32                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_src2_u64                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_u32                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_u64                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_barrier                             :ref:`vdata<amdgpu_synid7_vdata32_0>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_init                                :ref:`vdata<amdgpu_synid7_vdata32_0>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_sema_br                             :ref:`vdata<amdgpu_synid7_vdata32_0>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_sema_p                                                                 :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_sema_release_all                                                       :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_sema_v                                                                 :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_rtn_u32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_rtn_u64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_src2_u32                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_src2_u64                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_u32                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_u64                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_f32                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_f64                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_i32                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_i64                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_f32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_f64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_i32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_i64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_u32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_u64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_f32                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_f64                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_i32                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_i64                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_u32                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_u64                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_u32                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_u64                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_f32                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_f64                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_i32                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_i64                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_f32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_f64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_i32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_i64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_u32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_u64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_f32                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_f64                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_i32                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_i64                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_u32                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_u64                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_u32                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_u64                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_mskor_b32                               :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata32_0>`,   :ref:`vdata1<amdgpu_synid7_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_mskor_b64                               :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata64_0>`,   :ref:`vdata1<amdgpu_synid7_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_mskor_rtn_b32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata32_0>`,   :ref:`vdata1<amdgpu_synid7_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_mskor_rtn_b64               :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata64_0>`,   :ref:`vdata1<amdgpu_synid7_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_nop
+    ds_or_b32                                  :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_b64                                  :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_rtn_b32                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_rtn_b64                  :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_src2_b32                             :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_src2_b64                             :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_ordered_count               :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read2_b32                   :ref:`vdst<amdgpu_synid7_vdst64_0>`::ref:`b32x2<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read2_b64                   :ref:`vdst<amdgpu_synid7_vdst128_0>`::ref:`b64x2<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read2st64_b32               :ref:`vdst<amdgpu_synid7_vdst64_0>`::ref:`b32x2<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read2st64_b64               :ref:`vdst<amdgpu_synid7_vdst128_0>`::ref:`b64x2<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_b128                   :ref:`vdst<amdgpu_synid7_vdst128_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_b32                    :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_b64                    :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_b96                    :ref:`vdst<amdgpu_synid7_vdst96_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_i16                    :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_i8                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_u16                    :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_u8                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_rtn_u32                :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_rtn_u64                :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_src2_u32                           :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_src2_u64                           :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_u32                                :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_u64                                :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_rtn_u32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_rtn_u64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_src2_u32                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_src2_u64                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_u32                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_u64                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_swizzle_b32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`pattern<amdgpu_synid_sw_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrap_rtn_b32                :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata32_0>`,   :ref:`vdata1<amdgpu_synid7_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write2_b32                              :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata32_0>`,   :ref:`vdata1<amdgpu_synid7_vdata32_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write2_b64                              :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata64_0>`,   :ref:`vdata1<amdgpu_synid7_vdata64_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write2st64_b32                          :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata32_0>`,   :ref:`vdata1<amdgpu_synid7_vdata32_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write2st64_b64                          :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata64_0>`,   :ref:`vdata1<amdgpu_synid7_vdata64_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b128                              :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata128_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b16                               :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b32                               :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b64                               :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b8                                :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b96                               :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata96_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_src2_b32                          :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_src2_b64                          :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg2_rtn_b32             :ref:`vdst<amdgpu_synid7_vdst64_0>`::ref:`b32x2<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata32_0>`,   :ref:`vdata1<amdgpu_synid7_vdata32_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg2_rtn_b64             :ref:`vdst<amdgpu_synid7_vdst128_0>`::ref:`b64x2<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata64_0>`,   :ref:`vdata1<amdgpu_synid7_vdata64_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg2st64_rtn_b32         :ref:`vdst<amdgpu_synid7_vdst64_0>`::ref:`b32x2<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata32_0>`,   :ref:`vdata1<amdgpu_synid7_vdata32_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg2st64_rtn_b64         :ref:`vdst<amdgpu_synid7_vdst128_0>`::ref:`b64x2<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata0<amdgpu_synid7_vdata64_0>`,   :ref:`vdata1<amdgpu_synid7_vdata64_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg_rtn_b32              :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg_rtn_b64              :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_b32                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_b64                                 :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_rtn_b32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_rtn_b64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,       :ref:`vaddr<amdgpu_synid7_addr_ds>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_src2_b32                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_src2_b64                            :ref:`vaddr<amdgpu_synid7_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+
+EXP
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**      **SRC1**      **SRC2**      **SRC3**           **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    exp                            :ref:`tgt<amdgpu_synid7_tgt>`,      :ref:`vsrc0<amdgpu_synid7_src_exp>`,    :ref:`vsrc1<amdgpu_synid7_src_exp>`,    :ref:`vsrc2<amdgpu_synid7_src_exp>`,    :ref:`vsrc3<amdgpu_synid7_src_exp>`          :ref:`done<amdgpu_synid_done>` :ref:`compr<amdgpu_synid_compr>` :ref:`vm<amdgpu_synid_vm>`
+
+FLAT
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**           **SRC0**      **SRC1**             **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    flat_atomic_add                :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`,     :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_add_x2             :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`,     :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_and                :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`,     :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_and_x2             :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`,     :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_cmpswap            :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`,     :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`::ref:`b32x2<amdgpu_synid7_type_dev>`      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_cmpswap_x2         :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`,     :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata128_0>`::ref:`b64x2<amdgpu_synid7_type_dev>`      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_dec                :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`::ref:`u32<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`::ref:`u32<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_dec_x2             :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`::ref:`u64<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`::ref:`u64<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_fcmpswap           :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`::ref:`f32<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`::ref:`f32x2<amdgpu_synid7_type_dev>`      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_fcmpswap_x2        :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`::ref:`f64<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata128_0>`::ref:`f64x2<amdgpu_synid7_type_dev>`      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_fmax               :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`::ref:`f32<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`::ref:`f32<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_fmax_x2            :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`::ref:`f64<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`::ref:`f64<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_fmin               :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`::ref:`f32<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`::ref:`f32<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_fmin_x2            :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`::ref:`f64<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`::ref:`f64<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_inc                :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`::ref:`u32<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`::ref:`u32<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_inc_x2             :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`::ref:`u64<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`::ref:`u64<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_or                 :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`,     :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_or_x2              :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`,     :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_smax               :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`::ref:`s32<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`::ref:`s32<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_smax_x2            :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`::ref:`s64<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`::ref:`s64<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_smin               :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`::ref:`s32<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`::ref:`s32<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_smin_x2            :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`::ref:`s64<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`::ref:`s64<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_sub                :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`,     :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_sub_x2             :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`,     :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_swap               :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`,     :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_swap_x2            :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`,     :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_umax               :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`::ref:`u32<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`::ref:`u32<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_umax_x2            :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`::ref:`u64<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`::ref:`u64<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_umin               :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`::ref:`u32<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`::ref:`u32<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_umin_x2            :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`::ref:`u64<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`::ref:`u64<amdgpu_synid7_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_xor                :ref:`vdst<amdgpu_synid7_dst_flat_atomic32>`::ref:`opt<amdgpu_synid7_opt>`,     :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_xor_x2             :ref:`vdst<amdgpu_synid7_dst_flat_atomic64>`::ref:`opt<amdgpu_synid7_opt>`,     :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_dword                :ref:`vdst<amdgpu_synid7_vdst32_0>`,         :ref:`vaddr<amdgpu_synid7_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_dwordx2              :ref:`vdst<amdgpu_synid7_vdst64_0>`,         :ref:`vaddr<amdgpu_synid7_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_dwordx3              :ref:`vdst<amdgpu_synid7_vdst96_0>`,         :ref:`vaddr<amdgpu_synid7_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_dwordx4              :ref:`vdst<amdgpu_synid7_vdst128_0>`,         :ref:`vaddr<amdgpu_synid7_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_sbyte                :ref:`vdst<amdgpu_synid7_vdst32_0>`,         :ref:`vaddr<amdgpu_synid7_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_sshort               :ref:`vdst<amdgpu_synid7_vdst32_0>`,         :ref:`vaddr<amdgpu_synid7_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_ubyte                :ref:`vdst<amdgpu_synid7_vdst32_0>`,         :ref:`vaddr<amdgpu_synid7_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_ushort               :ref:`vdst<amdgpu_synid7_vdst32_0>`,         :ref:`vaddr<amdgpu_synid7_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_byte                              :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_dword                             :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_dwordx2                           :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata64_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_dwordx3                           :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata96_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_dwordx4                           :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata128_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_short                             :ref:`vaddr<amdgpu_synid7_addr_flat>`,    :ref:`vdata<amdgpu_synid7_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+
+MIMG
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**       **SRC1**      **SRC2**           **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    image_atomic_add                         :ref:`vdata<amdgpu_synid7_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid7_ret>`, :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_and                         :ref:`vdata<amdgpu_synid7_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid7_ret>`, :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_cmpswap                     :ref:`vdata<amdgpu_synid7_data_mimg_atomic_cmp>`::ref:`dst<amdgpu_synid7_ret>`, :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_dec                         :ref:`vdata<amdgpu_synid7_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid7_ret>`, :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_inc                         :ref:`vdata<amdgpu_synid7_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid7_ret>`, :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_or                          :ref:`vdata<amdgpu_synid7_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid7_ret>`, :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_smax                        :ref:`vdata<amdgpu_synid7_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid7_ret>`, :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_smin                        :ref:`vdata<amdgpu_synid7_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid7_ret>`, :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_sub                         :ref:`vdata<amdgpu_synid7_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid7_ret>`, :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_swap                        :ref:`vdata<amdgpu_synid7_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid7_ret>`, :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_umax                        :ref:`vdata<amdgpu_synid7_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid7_ret>`, :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_umin                        :ref:`vdata<amdgpu_synid7_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid7_ret>`, :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_xor                         :ref:`vdata<amdgpu_synid7_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid7_ret>`, :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4                  :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_b                :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_b_cl             :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_b_cl_o           :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_b_o              :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_c                :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_c_b              :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_c_b_cl           :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_c_b_cl_o         :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_c_b_o            :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_c_cl             :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_c_cl_o           :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_c_l              :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_c_l_o            :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_c_lz             :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_c_lz_o           :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_c_o              :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_cl               :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_cl_o             :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_l                :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_l_o              :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_lz               :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_lz_o             :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4_o                :ref:`vdst<amdgpu_synid7_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_get_lod                  :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_get_resinfo              :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`                    :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_load                     :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`                    :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_load_mip                 :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`                    :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_load_mip_pck             :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`                    :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_load_mip_pck_sgn         :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`                    :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_load_pck                 :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`                    :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_load_pck_sgn             :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`                    :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample                   :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_b                 :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_b_cl              :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_b_cl_o            :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_b_o               :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c                 :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_b               :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_b_cl            :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_b_cl_o          :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_b_o             :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_cd              :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_cd_cl           :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_cd_cl_o         :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_cd_o            :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_cl              :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_cl_o            :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_d               :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_d_cl            :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_d_cl_o          :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_d_o             :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_l               :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_l_o             :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_lz              :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_lz_o            :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_c_o               :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_cd                :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_cd_cl             :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_cd_cl_o           :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_cd_o              :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_cl                :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_cl_o              :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_d                 :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_d_cl              :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_d_cl_o            :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_d_o               :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_l                 :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_l_o               :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_lz                :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_lz_o              :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample_o                 :ref:`vdst<amdgpu_synid7_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,     :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid7_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_store                              :ref:`vdata<amdgpu_synid7_data_mimg_store>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_store_mip                          :ref:`vdata<amdgpu_synid7_data_mimg_store>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_store_mip_pck                      :ref:`vdata<amdgpu_synid7_data_mimg_store>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_store_pck                          :ref:`vdata<amdgpu_synid7_data_mimg_store>`,     :ref:`vaddr<amdgpu_synid7_addr_mimg>`,    :ref:`srsrc<amdgpu_synid7_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+
+MUBUF
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**              **DST**   **SRC0**             **SRC1**   **SRC2**    **SRC3**    **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    buffer_atomic_add              :ref:`vdata<amdgpu_synid7_data_buf_atomic32>`::ref:`dst<amdgpu_synid7_ret>`,       :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_add_x2           :ref:`vdata<amdgpu_synid7_data_buf_atomic64>`::ref:`dst<amdgpu_synid7_ret>`,       :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_and              :ref:`vdata<amdgpu_synid7_data_buf_atomic32>`::ref:`dst<amdgpu_synid7_ret>`,       :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_and_x2           :ref:`vdata<amdgpu_synid7_data_buf_atomic64>`::ref:`dst<amdgpu_synid7_ret>`,       :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_cmpswap          :ref:`vdata<amdgpu_synid7_data_buf_atomic64>`::ref:`dst<amdgpu_synid7_ret>`::ref:`b32x2<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_cmpswap_x2       :ref:`vdata<amdgpu_synid7_data_buf_atomic128>`::ref:`dst<amdgpu_synid7_ret>`::ref:`b64x2<amdgpu_synid7_type_dev>`, :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_dec              :ref:`vdata<amdgpu_synid7_data_buf_atomic32>`::ref:`dst<amdgpu_synid7_ret>`::ref:`u32<amdgpu_synid7_type_dev>`,   :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_dec_x2           :ref:`vdata<amdgpu_synid7_data_buf_atomic64>`::ref:`dst<amdgpu_synid7_ret>`::ref:`u64<amdgpu_synid7_type_dev>`,   :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_inc              :ref:`vdata<amdgpu_synid7_data_buf_atomic32>`::ref:`dst<amdgpu_synid7_ret>`::ref:`u32<amdgpu_synid7_type_dev>`,   :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_inc_x2           :ref:`vdata<amdgpu_synid7_data_buf_atomic64>`::ref:`dst<amdgpu_synid7_ret>`::ref:`u64<amdgpu_synid7_type_dev>`,   :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_or               :ref:`vdata<amdgpu_synid7_data_buf_atomic32>`::ref:`dst<amdgpu_synid7_ret>`,       :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_or_x2            :ref:`vdata<amdgpu_synid7_data_buf_atomic64>`::ref:`dst<amdgpu_synid7_ret>`,       :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_smax             :ref:`vdata<amdgpu_synid7_data_buf_atomic32>`::ref:`dst<amdgpu_synid7_ret>`::ref:`s32<amdgpu_synid7_type_dev>`,   :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_smax_x2          :ref:`vdata<amdgpu_synid7_data_buf_atomic64>`::ref:`dst<amdgpu_synid7_ret>`::ref:`s64<amdgpu_synid7_type_dev>`,   :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_smin             :ref:`vdata<amdgpu_synid7_data_buf_atomic32>`::ref:`dst<amdgpu_synid7_ret>`::ref:`s32<amdgpu_synid7_type_dev>`,   :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_smin_x2          :ref:`vdata<amdgpu_synid7_data_buf_atomic64>`::ref:`dst<amdgpu_synid7_ret>`::ref:`s64<amdgpu_synid7_type_dev>`,   :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_sub              :ref:`vdata<amdgpu_synid7_data_buf_atomic32>`::ref:`dst<amdgpu_synid7_ret>`,       :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_sub_x2           :ref:`vdata<amdgpu_synid7_data_buf_atomic64>`::ref:`dst<amdgpu_synid7_ret>`,       :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_swap             :ref:`vdata<amdgpu_synid7_data_buf_atomic32>`::ref:`dst<amdgpu_synid7_ret>`,       :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_swap_x2          :ref:`vdata<amdgpu_synid7_data_buf_atomic64>`::ref:`dst<amdgpu_synid7_ret>`,       :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_umax             :ref:`vdata<amdgpu_synid7_data_buf_atomic32>`::ref:`dst<amdgpu_synid7_ret>`::ref:`u32<amdgpu_synid7_type_dev>`,   :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_umax_x2          :ref:`vdata<amdgpu_synid7_data_buf_atomic64>`::ref:`dst<amdgpu_synid7_ret>`::ref:`u64<amdgpu_synid7_type_dev>`,   :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_umin             :ref:`vdata<amdgpu_synid7_data_buf_atomic32>`::ref:`dst<amdgpu_synid7_ret>`::ref:`u32<amdgpu_synid7_type_dev>`,   :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_umin_x2          :ref:`vdata<amdgpu_synid7_data_buf_atomic64>`::ref:`dst<amdgpu_synid7_ret>`::ref:`u64<amdgpu_synid7_type_dev>`,   :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_xor              :ref:`vdata<amdgpu_synid7_data_buf_atomic32>`::ref:`dst<amdgpu_synid7_ret>`,       :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_xor_x2           :ref:`vdata<amdgpu_synid7_data_buf_atomic64>`::ref:`dst<amdgpu_synid7_ret>`,       :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_dword        :ref:`vdst<amdgpu_synid7_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid7_addr_buf>`,           :ref:`srsrc<amdgpu_synid7_rsrc_buf>`, :ref:`soffset<amdgpu_synid7_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_dwordx2      :ref:`vdst<amdgpu_synid7_dst_buf_64>`, :ref:`vaddr<amdgpu_synid7_addr_buf>`,           :ref:`srsrc<amdgpu_synid7_rsrc_buf>`, :ref:`soffset<amdgpu_synid7_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_dwordx3      :ref:`vdst<amdgpu_synid7_dst_buf_96>`, :ref:`vaddr<amdgpu_synid7_addr_buf>`,           :ref:`srsrc<amdgpu_synid7_rsrc_buf>`, :ref:`soffset<amdgpu_synid7_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_dwordx4      :ref:`vdst<amdgpu_synid7_dst_buf_128>`, :ref:`vaddr<amdgpu_synid7_addr_buf>`,           :ref:`srsrc<amdgpu_synid7_rsrc_buf>`, :ref:`soffset<amdgpu_synid7_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_x     :ref:`vdst<amdgpu_synid7_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid7_addr_buf>`,           :ref:`srsrc<amdgpu_synid7_rsrc_buf>`, :ref:`soffset<amdgpu_synid7_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_format_xy    :ref:`vdst<amdgpu_synid7_dst_buf_64>`, :ref:`vaddr<amdgpu_synid7_addr_buf>`,           :ref:`srsrc<amdgpu_synid7_rsrc_buf>`, :ref:`soffset<amdgpu_synid7_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_xyz   :ref:`vdst<amdgpu_synid7_dst_buf_96>`, :ref:`vaddr<amdgpu_synid7_addr_buf>`,           :ref:`srsrc<amdgpu_synid7_rsrc_buf>`, :ref:`soffset<amdgpu_synid7_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_xyzw  :ref:`vdst<amdgpu_synid7_dst_buf_128>`, :ref:`vaddr<amdgpu_synid7_addr_buf>`,           :ref:`srsrc<amdgpu_synid7_rsrc_buf>`, :ref:`soffset<amdgpu_synid7_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_sbyte        :ref:`vdst<amdgpu_synid7_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid7_addr_buf>`,           :ref:`srsrc<amdgpu_synid7_rsrc_buf>`, :ref:`soffset<amdgpu_synid7_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_sshort       :ref:`vdst<amdgpu_synid7_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid7_addr_buf>`,           :ref:`srsrc<amdgpu_synid7_rsrc_buf>`, :ref:`soffset<amdgpu_synid7_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_ubyte        :ref:`vdst<amdgpu_synid7_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid7_addr_buf>`,           :ref:`srsrc<amdgpu_synid7_rsrc_buf>`, :ref:`soffset<amdgpu_synid7_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_ushort       :ref:`vdst<amdgpu_synid7_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid7_addr_buf>`,           :ref:`srsrc<amdgpu_synid7_rsrc_buf>`, :ref:`soffset<amdgpu_synid7_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_store_byte              :ref:`vdata<amdgpu_synid7_vdata32_0>`,           :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_dword             :ref:`vdata<amdgpu_synid7_vdata32_0>`,           :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_dwordx2           :ref:`vdata<amdgpu_synid7_vdata64_0>`,           :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_dwordx3           :ref:`vdata<amdgpu_synid7_vdata96_0>`,           :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_dwordx4           :ref:`vdata<amdgpu_synid7_vdata128_0>`,           :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_x          :ref:`vdata<amdgpu_synid7_vdata32_0>`,           :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_xy         :ref:`vdata<amdgpu_synid7_vdata64_0>`,           :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_xyz        :ref:`vdata<amdgpu_synid7_vdata96_0>`,           :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_xyzw       :ref:`vdata<amdgpu_synid7_vdata128_0>`,           :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_short             :ref:`vdata<amdgpu_synid7_vdata32_0>`,           :ref:`vaddr<amdgpu_synid7_addr_buf>`, :ref:`srsrc<amdgpu_synid7_rsrc_buf>`,  :ref:`soffset<amdgpu_synid7_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_wbinvl1
+    buffer_wbinvl1_vol
+
+SMRD
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**      **SRC1**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_buffer_load_dword            :ref:`sdst<amdgpu_synid7_sdst32_0>`,     :ref:`sbase<amdgpu_synid7_base_smem_buf>`,    :ref:`soffset<amdgpu_synid7_offset_smem>`
+    s_buffer_load_dwordx16         :ref:`sdst<amdgpu_synid7_sdst512_0>`,     :ref:`sbase<amdgpu_synid7_base_smem_buf>`,    :ref:`soffset<amdgpu_synid7_offset_smem>`
+    s_buffer_load_dwordx2          :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`sbase<amdgpu_synid7_base_smem_buf>`,    :ref:`soffset<amdgpu_synid7_offset_smem>`
+    s_buffer_load_dwordx4          :ref:`sdst<amdgpu_synid7_sdst128_0>`,     :ref:`sbase<amdgpu_synid7_base_smem_buf>`,    :ref:`soffset<amdgpu_synid7_offset_smem>`
+    s_buffer_load_dwordx8          :ref:`sdst<amdgpu_synid7_sdst256_0>`,     :ref:`sbase<amdgpu_synid7_base_smem_buf>`,    :ref:`soffset<amdgpu_synid7_offset_smem>`
+    s_dcache_inv
+    s_dcache_inv_vol
+    s_load_dword                   :ref:`sdst<amdgpu_synid7_sdst32_0>`,     :ref:`sbase<amdgpu_synid7_base_smem_addr>`,    :ref:`soffset<amdgpu_synid7_offset_smem>`
+    s_load_dwordx16                :ref:`sdst<amdgpu_synid7_sdst512_0>`,     :ref:`sbase<amdgpu_synid7_base_smem_addr>`,    :ref:`soffset<amdgpu_synid7_offset_smem>`
+    s_load_dwordx2                 :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`sbase<amdgpu_synid7_base_smem_addr>`,    :ref:`soffset<amdgpu_synid7_offset_smem>`
+    s_load_dwordx4                 :ref:`sdst<amdgpu_synid7_sdst128_0>`,     :ref:`sbase<amdgpu_synid7_base_smem_addr>`,    :ref:`soffset<amdgpu_synid7_offset_smem>`
+    s_load_dwordx8                 :ref:`sdst<amdgpu_synid7_sdst256_0>`,     :ref:`sbase<amdgpu_synid7_base_smem_addr>`,    :ref:`soffset<amdgpu_synid7_offset_smem>`
+    s_memtime                      :ref:`sdst<amdgpu_synid7_sdst64_0>`
+
+SOP1
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_abs_i32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_and_saveexec_b64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_andn2_saveexec_b64           :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_bcnt0_i32_b32                :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_bcnt0_i32_b64                :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_bcnt1_i32_b32                :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_bcnt1_i32_b64                :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_bitset0_b32                  :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_bitset0_b64                  :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`::ref:`b32<amdgpu_synid7_type_dev>`
+    s_bitset1_b32                  :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_bitset1_b64                  :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`::ref:`b32<amdgpu_synid7_type_dev>`
+    s_brev_b32                     :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_brev_b64                     :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_cbranch_join                           :ref:`ssrc<amdgpu_synid7_ssrc32_1>`
+    s_cmov_b32                     :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_cmov_b64                     :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_ff0_i32_b32                  :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_ff0_i32_b64                  :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_ff1_i32_b32                  :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_ff1_i32_b64                  :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_flbit_i32                    :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_flbit_i32_b32                :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_flbit_i32_b64                :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_flbit_i32_i64                :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_getpc_b64                    :ref:`sdst<amdgpu_synid7_sdst64_1>`
+    s_mov_b32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_mov_b64                      :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_mov_fed_b32                  :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_movreld_b32                  :ref:`sdst<amdgpu_synid7_sdst32_0>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_movreld_b64                  :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_movrels_b32                  :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_2>`
+    s_movrels_b64                  :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_1>`
+    s_nand_saveexec_b64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_nor_saveexec_b64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_not_b32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_not_b64                      :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_or_saveexec_b64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_orn2_saveexec_b64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_quadmask_b32                 :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_quadmask_b64                 :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_rfe_b64                                :ref:`ssrc<amdgpu_synid7_ssrc64_1>`
+    s_setpc_b64                              :ref:`ssrc<amdgpu_synid7_ssrc64_1>`
+    s_sext_i32_i16                 :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_3>`
+    s_sext_i32_i8                  :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_3>`
+    s_swappc_b64                   :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_1>`
+    s_wqm_b32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc32_0>`
+    s_wqm_b64                      :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_xnor_saveexec_b64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+    s_xor_saveexec_b64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`ssrc<amdgpu_synid7_ssrc64_0>`
+
+SOP2
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**       **SRC1**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_absdiff_i32                  :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_add_i32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_add_u32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_addc_u32                     :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_and_b32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_and_b64                      :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc64_0>`
+    s_andn2_b32                    :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_andn2_b64                    :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc64_0>`
+    s_ashr_i32                     :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    s_ashr_i64                     :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    s_bfe_i32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    s_bfe_i64                      :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    s_bfe_u32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_bfe_u64                      :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    s_bfm_b32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_bfm_b64                      :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`::ref:`b32<amdgpu_synid7_type_dev>`, :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`::ref:`b32<amdgpu_synid7_type_dev>`
+    s_cbranch_g_fork                         :ref:`ssrc0<amdgpu_synid7_ssrc64_2>`,     :ref:`ssrc1<amdgpu_synid7_ssrc64_2>`
+    s_cselect_b32                  :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_cselect_b64                  :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc64_0>`
+    s_lshl_b32                     :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    s_lshl_b64                     :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    s_lshr_b32                     :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    s_lshr_b64                     :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    s_max_i32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_max_u32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_min_i32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_min_u32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_mul_i32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_nand_b32                     :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_nand_b64                     :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc64_0>`
+    s_nor_b32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_nor_b64                      :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc64_0>`
+    s_or_b32                       :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_or_b64                       :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc64_0>`
+    s_orn2_b32                     :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_orn2_b64                     :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc64_0>`
+    s_sub_i32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_sub_u32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_subb_u32                     :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_xnor_b32                     :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_xnor_b64                     :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc64_0>`
+    s_xor_b32                      :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_xor_b64                      :ref:`sdst<amdgpu_synid7_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid7_ssrc64_0>`
+
+SOPC
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **SRC0**      **SRC1**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_bitcmp0_b32                  :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_bitcmp0_b64                  :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    s_bitcmp1_b32                  :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_bitcmp1_b64                  :ref:`ssrc0<amdgpu_synid7_ssrc64_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    s_cmp_eq_i32                   :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_cmp_eq_u32                   :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_cmp_ge_i32                   :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_cmp_ge_u32                   :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_cmp_gt_i32                   :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_cmp_gt_u32                   :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_cmp_le_i32                   :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_cmp_le_u32                   :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_cmp_lg_i32                   :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_cmp_lg_u32                   :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_cmp_lt_i32                   :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_cmp_lt_u32                   :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+    s_setvskip                     :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_0>`
+
+SOPK
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**      **SRC1**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_addk_i32                     :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`imm16<amdgpu_synid7_simm16>`
+    s_cbranch_i_fork                         :ref:`ssrc<amdgpu_synid7_ssrc64_3>`,     :ref:`label<amdgpu_synid7_label>`
+    s_cmovk_i32                    :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`imm16<amdgpu_synid7_simm16>`
+    s_cmpk_eq_i32                            :ref:`ssrc<amdgpu_synid7_ssrc32_1>`,     :ref:`imm16<amdgpu_synid7_simm16>`
+    s_cmpk_eq_u32                            :ref:`ssrc<amdgpu_synid7_ssrc32_1>`,     :ref:`imm16<amdgpu_synid7_uimm16>`
+    s_cmpk_ge_i32                            :ref:`ssrc<amdgpu_synid7_ssrc32_1>`,     :ref:`imm16<amdgpu_synid7_simm16>`
+    s_cmpk_ge_u32                            :ref:`ssrc<amdgpu_synid7_ssrc32_1>`,     :ref:`imm16<amdgpu_synid7_uimm16>`
+    s_cmpk_gt_i32                            :ref:`ssrc<amdgpu_synid7_ssrc32_1>`,     :ref:`imm16<amdgpu_synid7_simm16>`
+    s_cmpk_gt_u32                            :ref:`ssrc<amdgpu_synid7_ssrc32_1>`,     :ref:`imm16<amdgpu_synid7_uimm16>`
+    s_cmpk_le_i32                            :ref:`ssrc<amdgpu_synid7_ssrc32_1>`,     :ref:`imm16<amdgpu_synid7_simm16>`
+    s_cmpk_le_u32                            :ref:`ssrc<amdgpu_synid7_ssrc32_1>`,     :ref:`imm16<amdgpu_synid7_uimm16>`
+    s_cmpk_lg_i32                            :ref:`ssrc<amdgpu_synid7_ssrc32_1>`,     :ref:`imm16<amdgpu_synid7_simm16>`
+    s_cmpk_lg_u32                            :ref:`ssrc<amdgpu_synid7_ssrc32_1>`,     :ref:`imm16<amdgpu_synid7_uimm16>`
+    s_cmpk_lt_i32                            :ref:`ssrc<amdgpu_synid7_ssrc32_1>`,     :ref:`imm16<amdgpu_synid7_simm16>`
+    s_cmpk_lt_u32                            :ref:`ssrc<amdgpu_synid7_ssrc32_1>`,     :ref:`imm16<amdgpu_synid7_uimm16>`
+    s_getreg_b32                   :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`hwreg<amdgpu_synid7_hwreg>`
+    s_movk_i32                     :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`imm16<amdgpu_synid7_simm16>`
+    s_mulk_i32                     :ref:`sdst<amdgpu_synid7_sdst32_1>`,     :ref:`imm16<amdgpu_synid7_simm16>`
+    s_setreg_b32                   :ref:`hwreg<amdgpu_synid7_hwreg>`,    :ref:`ssrc<amdgpu_synid7_ssrc32_1>`
+    s_setreg_imm32_b32             :ref:`hwreg<amdgpu_synid7_hwreg>`,    :ref:`imm32<amdgpu_synid7_bimm32>`
+
+SOPP
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **SRC**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_barrier
+    s_branch                       :ref:`label<amdgpu_synid7_label>`
+    s_cbranch_cdbgsys              :ref:`label<amdgpu_synid7_label>`
+    s_cbranch_cdbgsys_and_user     :ref:`label<amdgpu_synid7_label>`
+    s_cbranch_cdbgsys_or_user      :ref:`label<amdgpu_synid7_label>`
+    s_cbranch_cdbguser             :ref:`label<amdgpu_synid7_label>`
+    s_cbranch_execnz               :ref:`label<amdgpu_synid7_label>`
+    s_cbranch_execz                :ref:`label<amdgpu_synid7_label>`
+    s_cbranch_scc0                 :ref:`label<amdgpu_synid7_label>`
+    s_cbranch_scc1                 :ref:`label<amdgpu_synid7_label>`
+    s_cbranch_vccnz                :ref:`label<amdgpu_synid7_label>`
+    s_cbranch_vccz                 :ref:`label<amdgpu_synid7_label>`
+    s_decperflevel                 :ref:`imm16<amdgpu_synid7_bimm16>`
+    s_endpgm
+    s_icache_inv
+    s_incperflevel                 :ref:`imm16<amdgpu_synid7_bimm16>`
+    s_nop                          :ref:`imm16<amdgpu_synid7_bimm16>`
+    s_sendmsg                      :ref:`msg<amdgpu_synid7_msg>`
+    s_sendmsghalt                  :ref:`msg<amdgpu_synid7_msg>`
+    s_sethalt                      :ref:`imm16<amdgpu_synid7_bimm16>`
+    s_setkill                      :ref:`imm16<amdgpu_synid7_bimm16>`
+    s_setprio                      :ref:`imm16<amdgpu_synid7_bimm16>`
+    s_sleep                        :ref:`imm16<amdgpu_synid7_bimm16>`
+    s_trap                         :ref:`imm16<amdgpu_synid7_bimm16>`
+    s_ttracedata
+    s_waitcnt                      :ref:`waitcnt<amdgpu_synid7_waitcnt>`
+
+VINTRP
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**       **SRC1**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_interp_mov_f32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`param<amdgpu_synid7_param>`::ref:`b32<amdgpu_synid7_type_dev>`, :ref:`attr<amdgpu_synid7_attr>`::ref:`b32<amdgpu_synid7_type_dev>`
+    v_interp_p1_f32                :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`vsrc<amdgpu_synid7_vsrc32_0>`,      :ref:`attr<amdgpu_synid7_attr>`::ref:`b32<amdgpu_synid7_type_dev>`
+    v_interp_p2_f32                :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`vsrc<amdgpu_synid7_vsrc32_0>`,      :ref:`attr<amdgpu_synid7_attr>`::ref:`b32<amdgpu_synid7_type_dev>`
+
+VOP1
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_bfrev_b32                    :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_ceil_f32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_ceil_f64                     :ref:`vdst<amdgpu_synid7_vdst64_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+    v_clrexcp
+    v_cos_f32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_f16_f32                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_f32_f16                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_1>`
+    v_cvt_f32_f64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+    v_cvt_f32_i32                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_f32_u32                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_f32_ubyte0               :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_f32_ubyte1               :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_f32_ubyte2               :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_f32_ubyte3               :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_f64_f32                  :ref:`vdst<amdgpu_synid7_vdst64_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_f64_i32                  :ref:`vdst<amdgpu_synid7_vdst64_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_f64_u32                  :ref:`vdst<amdgpu_synid7_vdst64_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_flr_i32_f32              :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_i32_f32                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_i32_f64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+    v_cvt_off_f32_i4               :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_rpi_i32_f32              :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_u32_f32                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_cvt_u32_f64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+    v_exp_f32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_exp_legacy_f32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_ffbh_i32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_ffbh_u32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_ffbl_b32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_floor_f32                    :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_floor_f64                    :ref:`vdst<amdgpu_synid7_vdst64_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+    v_fract_f32                    :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_fract_f64                    :ref:`vdst<amdgpu_synid7_vdst64_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+    v_frexp_exp_i32_f32            :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_frexp_exp_i32_f64            :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+    v_frexp_mant_f32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_frexp_mant_f64               :ref:`vdst<amdgpu_synid7_vdst64_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+    v_log_clamp_f32                :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_log_f32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_log_legacy_f32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_mov_b32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_mov_fed_b32                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_movreld_b32                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_movrels_b32                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`vsrc<amdgpu_synid7_vsrc32_0>`
+    v_movrelsd_b32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`vsrc<amdgpu_synid7_vsrc32_0>`
+    v_nop
+    v_not_b32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_rcp_clamp_f32                :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_rcp_clamp_f64                :ref:`vdst<amdgpu_synid7_vdst64_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+    v_rcp_f32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_rcp_f64                      :ref:`vdst<amdgpu_synid7_vdst64_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+    v_rcp_iflag_f32                :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_rcp_legacy_f32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_readfirstlane_b32            :ref:`sdst<amdgpu_synid7_sdst32_2>`,     :ref:`vsrc<amdgpu_synid7_vsrc32_0>`
+    v_rndne_f32                    :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_rndne_f64                    :ref:`vdst<amdgpu_synid7_vdst64_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+    v_rsq_clamp_f32                :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_rsq_clamp_f64                :ref:`vdst<amdgpu_synid7_vdst64_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+    v_rsq_f32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_rsq_f64                      :ref:`vdst<amdgpu_synid7_vdst64_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+    v_rsq_legacy_f32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_sin_f32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_sqrt_f32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_sqrt_f64                     :ref:`vdst<amdgpu_synid7_vdst64_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+    v_trunc_f32                    :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`src<amdgpu_synid7_src32_0>`
+    v_trunc_f64                    :ref:`vdst<amdgpu_synid7_vdst64_0>`,     :ref:`src<amdgpu_synid7_src64_0>`
+
+VOP2
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST0**      **DST1**      **SRC0**      **SRC1**      **SRC2**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_add_f32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_add_i32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_addc_u32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`,    :ref:`vcc<amdgpu_synid7_vcc_64>`
+    v_and_b32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_ashr_i32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_ashrrev_i32                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`::ref:`u32<amdgpu_synid7_type_dev>`, :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_bcnt_u32_b32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_bfm_b32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cndmask_b32                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`,    :ref:`vcc<amdgpu_synid7_vcc_64>`
+    v_cvt_pk_i16_i32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cvt_pk_u16_u32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cvt_pkaccum_u8_f32           :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_cvt_pknorm_i16_f32           :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cvt_pknorm_u16_f32           :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cvt_pkrtz_f16_f32            :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_ldexp_f32                    :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`::ref:`i32<amdgpu_synid7_type_dev>`
+    v_lshl_b32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_lshlrev_b32                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`::ref:`u32<amdgpu_synid7_type_dev>`, :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_lshr_b32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_lshrrev_b32                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`::ref:`u32<amdgpu_synid7_type_dev>`, :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_mac_f32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_mac_legacy_f32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_madak_f32                    :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`,    :ref:`imm32<amdgpu_synid7_fimm32>`
+    v_madmk_f32                    :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`imm32<amdgpu_synid7_fimm32>`,    :ref:`vsrc2<amdgpu_synid7_vsrc32_0>`
+    v_max_f32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_max_i32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_max_legacy_f32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_max_u32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_mbcnt_hi_u32_b32             :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_mbcnt_lo_u32_b32             :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_min_f32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_min_i32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_min_legacy_f32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_min_u32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_mul_f32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_mul_hi_i32_i24               :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_mul_hi_u32_u24               :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_mul_i32_i24                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_mul_legacy_f32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_mul_u32_u24                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_or_b32                       :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_readlane_b32                 :ref:`sdst<amdgpu_synid7_sdst32_2>`,               :ref:`vsrc0<amdgpu_synid7_vsrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_4>`
+    v_sub_f32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_sub_i32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_subb_u32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`,    :ref:`vcc<amdgpu_synid7_vcc_64>`
+    v_subbrev_u32                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`,    :ref:`vcc<amdgpu_synid7_vcc_64>`
+    v_subrev_f32                   :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_subrev_i32                   :ref:`vdst<amdgpu_synid7_vdst32_0>`,     :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_writelane_b32                :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`ssrc0<amdgpu_synid7_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid7_ssrc32_4>`
+    v_xor_b32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,               :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+
+VOP3
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST0**       **DST1**      **SRC0**        **SRC1**        **SRC2**            **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_add_f32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_add_f64                      :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_add_i32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,      :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_addc_u32_e64                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,      :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`ssrc2<amdgpu_synid7_ssrc64_1>`
+    v_alignbit_b32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src32_2>`
+    v_alignbyte_b32                :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src32_2>`
+    v_and_b32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_ashr_i32_e64                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_ashr_i64                     :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_ashrrev_i32_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`,   :ref:`src1<amdgpu_synid7_src32_2>`
+    v_bcnt_u32_b32_e64             :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_bfe_i32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`,   :ref:`src2<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_bfe_u32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src32_2>`
+    v_bfi_b32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src32_2>`
+    v_bfm_b32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_bfrev_b32_e64                :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`
+    v_ceil_f32_e64                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_ceil_f64_e64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_clrexcp_e64
+    v_cmp_class_f32_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`b32<amdgpu_synid7_type_dev>`
+    v_cmp_class_f64_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`b32<amdgpu_synid7_type_dev>`
+    v_cmp_eq_f32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_eq_f64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_eq_i32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_eq_i64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_eq_u32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_eq_u64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_f_f32_e64                :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_f_f64_e64                :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_f_i32_e64                :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_f_i64_e64                :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_f_u32_e64                :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_f_u64_e64                :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_ge_f32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_ge_f64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_ge_i32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_ge_i64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_ge_u32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_ge_u64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_gt_f32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_gt_f64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_gt_i32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_gt_i64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_gt_u32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_gt_u64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_le_f32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_le_f64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_le_i32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_le_i64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_le_u32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_le_u64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_lg_f32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_lg_f64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_lt_f32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_lt_f64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_lt_i32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_lt_i64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_lt_u32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_lt_u64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_ne_i32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_ne_i64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_ne_u32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_ne_u64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_neq_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_neq_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_nge_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_nge_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_ngt_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_ngt_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_nle_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_nle_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_nlg_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_nlg_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_nlt_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_nlt_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_o_f32_e64                :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_o_f64_e64                :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_t_i32_e64                :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_t_i64_e64                :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_t_u32_e64                :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmp_t_u64_e64                :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmp_tru_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_tru_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_u_f32_e64                :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmp_u_f64_e64                :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_eq_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_eq_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_f_f32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_f_f64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_ge_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_ge_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_gt_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_gt_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_le_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_le_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_lg_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_lg_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_lt_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_lt_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_neq_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_neq_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_nge_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_nge_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_ngt_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_ngt_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_nle_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_nle_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_nlg_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_nlg_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_nlt_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_nlt_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_o_f32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_o_f64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_tru_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_tru_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_u_f32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmps_u_f64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_eq_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_eq_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_f_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_f_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_ge_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_ge_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_gt_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_gt_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_le_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_le_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_lg_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_lg_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_lt_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_lt_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_neq_f32_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_neq_f64_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_nge_f32_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_nge_f64_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_ngt_f32_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_ngt_f64_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_nle_f32_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_nle_f64_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_nlg_f32_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_nlg_f64_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_nlt_f32_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_nlt_f64_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_o_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_o_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_tru_f32_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_tru_f64_e64            :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_u_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpsx_u_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_class_f32_e64           :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`b32<amdgpu_synid7_type_dev>`
+    v_cmpx_class_f64_e64           :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`b32<amdgpu_synid7_type_dev>`
+    v_cmpx_eq_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_eq_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_eq_i32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_eq_i64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_eq_u32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_eq_u64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_f_f32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_f_f64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_f_i32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_f_i64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_f_u32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_f_u64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_ge_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_ge_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_ge_i32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_ge_i64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_ge_u32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_ge_u64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_gt_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_gt_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_gt_i32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_gt_i64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_gt_u32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_gt_u64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_le_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_le_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_le_i32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_le_i64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_le_u32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_le_u64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_lg_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_lg_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_lt_f32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_lt_f64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_lt_i32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_lt_i64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_lt_u32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_lt_u64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_ne_i32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_ne_i64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_ne_u32_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_ne_u64_e64              :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_neq_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_neq_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_nge_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_nge_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_ngt_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_ngt_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_nle_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_nle_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_nlg_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_nlg_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_nlt_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_nlt_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_o_f32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_o_f64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_t_i32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_t_i64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_t_u32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cmpx_t_u64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`
+    v_cmpx_tru_f32_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_tru_f64_e64             :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_u_f32_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cmpx_u_f64_e64               :ref:`sdst<amdgpu_synid7_sdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cndmask_b32_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`ssrc2<amdgpu_synid7_ssrc64_1>`
+    v_cos_f32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cubeid_f32                   :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cubema_f32                   :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cubesc_f32                   :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cubetc_f32                   :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f16_f32_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cvt_f32_f16_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_3>`                                     :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_f64_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_i32_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`                                     :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_u32_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`                                     :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_ubyte0_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`
+    v_cvt_f32_ubyte1_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`
+    v_cvt_f32_ubyte2_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`
+    v_cvt_f32_ubyte3_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`
+    v_cvt_f64_f32_e64              :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f64_i32_e64              :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src<amdgpu_synid7_src32_2>`                                     :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f64_u32_e64              :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src<amdgpu_synid7_src32_2>`                                     :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_flr_i32_f32_e64          :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cvt_i32_f32_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cvt_i32_f64_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_cvt_off_f32_i4_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`                                     :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_pk_i16_i32_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cvt_pk_u16_u32_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_cvt_pk_u8_f32                :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`,   :ref:`src2<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_cvt_pkaccum_u8_f32_e64       :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_cvt_pknorm_i16_f32_e64       :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cvt_pknorm_u16_f32_e64       :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cvt_pkrtz_f16_f32_e64        :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cvt_rpi_i32_f32_e64          :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cvt_u32_f32_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`
+    v_cvt_u32_f64_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_div_fixup_f32                :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_div_fixup_f64                :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_div_fmas_f32                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_div_fmas_f64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_div_scale_f32                :ref:`vdst<amdgpu_synid7_vdst32_0>`,      :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src32_2>`
+    v_div_scale_f64                :ref:`vdst<amdgpu_synid7_vdst64_0>`,      :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src64_1>`,       :ref:`src2<amdgpu_synid7_src64_1>`
+    v_exp_f32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_exp_legacy_f32_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_ffbh_i32_e64                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`
+    v_ffbh_u32_e64                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`
+    v_ffbl_b32_e64                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`
+    v_floor_f32_e64                :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_floor_f64_e64                :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_fma_f32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_fma_f64                      :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_fract_f32_e64                :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_fract_f64_e64                :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_frexp_exp_i32_f32_e64        :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`
+    v_frexp_exp_i32_f64_e64        :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`
+    v_frexp_mant_f32_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`
+    v_frexp_mant_f64_e64           :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_ldexp_f32_e64                :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`i32<amdgpu_synid7_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_ldexp_f64                    :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`i32<amdgpu_synid7_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_lerp_u8                      :ref:`vdst<amdgpu_synid7_vdst32_0>`::ref:`u32<amdgpu_synid7_type_dev>`,            :ref:`src0<amdgpu_synid7_src32_1>`::ref:`b32<amdgpu_synid7_type_dev>`,   :ref:`src1<amdgpu_synid7_src32_1>`::ref:`b32<amdgpu_synid7_type_dev>`,   :ref:`src2<amdgpu_synid7_src32_1>`::ref:`b32<amdgpu_synid7_type_dev>`
+    v_log_clamp_f32_e64            :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_log_f32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_log_legacy_f32_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_lshl_b32_e64                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_lshl_b64                     :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_lshlrev_b32_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`,   :ref:`src1<amdgpu_synid7_src32_2>`
+    v_lshr_b32_e64                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_lshr_b64                     :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`,       :ref:`src1<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_lshrrev_b32_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`,   :ref:`src1<amdgpu_synid7_src32_2>`
+    v_mac_f32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mac_legacy_f32_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mad_f32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mad_i32_i24                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src32_2>`::ref:`i32<amdgpu_synid7_type_dev>`
+    v_mad_i64_i32                  :ref:`vdst<amdgpu_synid7_vdst64_0>`,      :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src64_1>`::ref:`i64<amdgpu_synid7_type_dev>`
+    v_mad_legacy_f32               :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mad_u32_u24                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_mad_u64_u32                  :ref:`vdst<amdgpu_synid7_vdst64_0>`,      :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src64_1>`::ref:`u64<amdgpu_synid7_type_dev>`
+    v_max3_f32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_max3_i32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src32_2>`
+    v_max3_u32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src32_2>`
+    v_max_f32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_max_f64                      :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_max_i32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_max_legacy_f32_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_max_u32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_mbcnt_hi_u32_b32_e64         :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_mbcnt_lo_u32_b32_e64         :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_med3_f32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_med3_i32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src32_2>`
+    v_med3_u32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src32_2>`
+    v_min3_f32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_min3_i32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src32_2>`
+    v_min3_u32                     :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src32_2>`
+    v_min_f32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_min_f64                      :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_min_i32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_min_legacy_f32_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_min_u32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_mov_b32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`
+    v_mov_fed_b32_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`
+    v_movreld_b32_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`
+    v_movrels_b32_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`vsrc<amdgpu_synid7_vsrc32_0>`
+    v_movrelsd_b32_e64             :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`vsrc<amdgpu_synid7_vsrc32_0>`
+    v_mqsad_pk_u16_u8              :ref:`vdst<amdgpu_synid7_vdst64_0>`::ref:`b64<amdgpu_synid7_type_dev>`,            :ref:`src0<amdgpu_synid7_src64_2>`::ref:`b64<amdgpu_synid7_type_dev>`,   :ref:`src1<amdgpu_synid7_src32_1>`::ref:`b32<amdgpu_synid7_type_dev>`,   :ref:`src2<amdgpu_synid7_src64_2>`::ref:`b64<amdgpu_synid7_type_dev>`
+    v_mqsad_u32_u8                 :ref:`vdst<amdgpu_synid7_vdst128_0>`::ref:`b128<amdgpu_synid7_type_dev>`,           :ref:`src0<amdgpu_synid7_src64_2>`::ref:`b64<amdgpu_synid7_type_dev>`,   :ref:`src1<amdgpu_synid7_src32_1>`::ref:`b32<amdgpu_synid7_type_dev>`,   :ref:`vsrc2<amdgpu_synid7_vsrc128_0>`::ref:`b128<amdgpu_synid7_type_dev>`
+    v_msad_u8                      :ref:`vdst<amdgpu_synid7_vdst32_0>`::ref:`u32<amdgpu_synid7_type_dev>`,            :ref:`src0<amdgpu_synid7_src32_1>`::ref:`b32<amdgpu_synid7_type_dev>`,   :ref:`src1<amdgpu_synid7_src32_1>`::ref:`b32<amdgpu_synid7_type_dev>`,   :ref:`src2<amdgpu_synid7_src32_1>`::ref:`b32<amdgpu_synid7_type_dev>`
+    v_mul_f32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mul_f64                      :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mul_hi_i32                   :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_mul_hi_i32_i24_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_mul_hi_u32                   :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_mul_hi_u32_u24_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_mul_i32_i24_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_mul_legacy_f32_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mul_lo_i32                   :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_mul_lo_u32                   :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_mul_u32_u24_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_mullit_f32                   :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src2<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_nop_e64
+    v_not_b32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`
+    v_or_b32_e64                   :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_qsad_pk_u16_u8               :ref:`vdst<amdgpu_synid7_vdst64_0>`::ref:`b64<amdgpu_synid7_type_dev>`,            :ref:`src0<amdgpu_synid7_src64_2>`::ref:`b64<amdgpu_synid7_type_dev>`,   :ref:`src1<amdgpu_synid7_src32_1>`::ref:`b32<amdgpu_synid7_type_dev>`,   :ref:`src2<amdgpu_synid7_src64_2>`::ref:`b64<amdgpu_synid7_type_dev>`
+    v_rcp_clamp_f32_e64            :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rcp_clamp_f64_e64            :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rcp_f32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rcp_f64_e64                  :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rcp_iflag_f32_e64            :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rcp_legacy_f32_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rndne_f32_e64                :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rndne_f64_e64                :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rsq_clamp_f32_e64            :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rsq_clamp_f64_e64            :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rsq_f32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rsq_f64_e64                  :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rsq_legacy_f32_e64           :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sad_hi_u8                    :ref:`vdst<amdgpu_synid7_vdst32_0>`::ref:`u32<amdgpu_synid7_type_dev>`,            :ref:`src0<amdgpu_synid7_src32_1>`::ref:`u8x4<amdgpu_synid7_type_dev>`,  :ref:`src1<amdgpu_synid7_src32_1>`::ref:`u8x4<amdgpu_synid7_type_dev>`,  :ref:`src2<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_sad_u16                      :ref:`vdst<amdgpu_synid7_vdst32_0>`::ref:`u32<amdgpu_synid7_type_dev>`,            :ref:`src0<amdgpu_synid7_src32_1>`::ref:`u16x2<amdgpu_synid7_type_dev>`, :ref:`src1<amdgpu_synid7_src32_1>`::ref:`u16x2<amdgpu_synid7_type_dev>`, :ref:`src2<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_sad_u32                      :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`src2<amdgpu_synid7_src32_2>`
+    v_sad_u8                       :ref:`vdst<amdgpu_synid7_vdst32_0>`::ref:`u32<amdgpu_synid7_type_dev>`,            :ref:`src0<amdgpu_synid7_src32_1>`::ref:`u8x4<amdgpu_synid7_type_dev>`,  :ref:`src1<amdgpu_synid7_src32_1>`::ref:`u8x4<amdgpu_synid7_type_dev>`,  :ref:`src2<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`
+    v_sin_f32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sqrt_f32_e64                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sqrt_f64_e64                 :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sub_f32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sub_i32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,      :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_subb_u32_e64                 :ref:`vdst<amdgpu_synid7_vdst32_0>`,      :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`ssrc2<amdgpu_synid7_ssrc64_1>`
+    v_subbrev_u32_e64              :ref:`vdst<amdgpu_synid7_vdst32_0>`,      :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`,       :ref:`ssrc2<amdgpu_synid7_ssrc64_1>`
+    v_subrev_f32_e64               :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_subrev_i32_e64               :ref:`vdst<amdgpu_synid7_vdst32_0>`,      :ref:`sdst<amdgpu_synid7_sdst64_0>`,     :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+    v_trig_preop_f64               :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src0<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`,     :ref:`src1<amdgpu_synid7_src32_2>`::ref:`u32<amdgpu_synid7_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_trunc_f32_e64                :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src<amdgpu_synid7_src32_2>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_trunc_f64_e64                :ref:`vdst<amdgpu_synid7_vdst64_0>`,                :ref:`src<amdgpu_synid7_src64_1>`::ref:`m<amdgpu_synid7_mod>`                                   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_xor_b32_e64                  :ref:`vdst<amdgpu_synid7_vdst32_0>`,                :ref:`src0<amdgpu_synid7_src32_2>`,       :ref:`src1<amdgpu_synid7_src32_2>`
+
+VOPC
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**      **SRC1**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_cmp_class_f32                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`::ref:`b32<amdgpu_synid7_type_dev>`
+    v_cmp_class_f64                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`::ref:`b32<amdgpu_synid7_type_dev>`
+    v_cmp_eq_f32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_eq_f64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_eq_i32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_eq_i64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_eq_u32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_eq_u64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_f_f32                    :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_f_f64                    :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_f_i32                    :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_f_i64                    :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_f_u32                    :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_f_u64                    :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_ge_f32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_ge_f64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_ge_i32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_ge_i64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_ge_u32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_ge_u64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_gt_f32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_gt_f64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_gt_i32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_gt_i64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_gt_u32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_gt_u64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_le_f32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_le_f64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_le_i32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_le_i64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_le_u32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_le_u64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_lg_f32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_lg_f64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_lt_f32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_lt_f64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_lt_i32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_lt_i64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_lt_u32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_lt_u64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_ne_i32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_ne_i64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_ne_u32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_ne_u64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_neq_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_neq_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_nge_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_nge_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_ngt_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_ngt_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_nle_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_nle_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_nlg_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_nlg_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_nlt_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_nlt_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_o_f32                    :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_o_f64                    :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_t_i32                    :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_t_i64                    :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_t_u32                    :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_t_u64                    :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_tru_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_tru_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmp_u_f32                    :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmp_u_f64                    :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_eq_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_eq_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_f_f32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_f_f64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_ge_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_ge_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_gt_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_gt_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_le_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_le_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_lg_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_lg_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_lt_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_lt_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_neq_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_neq_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_nge_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_nge_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_ngt_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_ngt_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_nle_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_nle_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_nlg_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_nlg_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_nlt_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_nlt_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_o_f32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_o_f64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_tru_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_tru_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmps_u_f32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmps_u_f64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_eq_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_eq_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_f_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_f_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_ge_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_ge_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_gt_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_gt_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_le_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_le_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_lg_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_lg_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_lt_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_lt_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_neq_f32                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_neq_f64                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_nge_f32                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_nge_f64                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_ngt_f32                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_ngt_f64                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_nle_f32                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_nle_f64                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_nlg_f32                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_nlg_f64                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_nlt_f32                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_nlt_f64                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_o_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_o_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_tru_f32                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_tru_f64                :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpsx_u_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpsx_u_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_class_f32               :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`::ref:`b32<amdgpu_synid7_type_dev>`
+    v_cmpx_class_f64               :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`::ref:`b32<amdgpu_synid7_type_dev>`
+    v_cmpx_eq_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_eq_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_eq_i32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_eq_i64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_eq_u32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_eq_u64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_f_f32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_f_f64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_f_i32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_f_i64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_f_u32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_f_u64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_ge_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_ge_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_ge_i32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_ge_i64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_ge_u32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_ge_u64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_gt_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_gt_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_gt_i32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_gt_i64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_gt_u32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_gt_u64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_le_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_le_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_le_i32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_le_i64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_le_u32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_le_u64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_lg_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_lg_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_lt_f32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_lt_f64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_lt_i32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_lt_i64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_lt_u32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_lt_u64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_ne_i32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_ne_i64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_ne_u32                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_ne_u64                  :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_neq_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_neq_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_nge_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_nge_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_ngt_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_ngt_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_nle_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_nle_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_nlg_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_nlg_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_nlt_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_nlt_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_o_f32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_o_f64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_t_i32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_t_i64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_t_u32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_t_u64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_tru_f32                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_tru_f64                 :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+    v_cmpx_u_f32                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src32_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc32_0>`
+    v_cmpx_u_f64                   :ref:`vcc<amdgpu_synid7_vcc_64>`,      :ref:`src0<amdgpu_synid7_src64_0>`,     :ref:`vsrc1<amdgpu_synid7_vsrc64_0>`
+
+.. |---| unicode:: U+02014 .. em dash
+
+
+.. toctree::
+    :hidden:
+
+    gfx7_attr
+    gfx7_bimm16
+    gfx7_bimm32
+    gfx7_fimm32
+    gfx7_hwreg
+    gfx7_label
+    gfx7_msg
+    gfx7_param
+    gfx7_simm16
+    gfx7_tgt
+    gfx7_uimm16
+    gfx7_waitcnt
+    gfx7_addr_buf
+    gfx7_addr_ds
+    gfx7_addr_flat
+    gfx7_addr_mimg
+    gfx7_base_smem_addr
+    gfx7_base_smem_buf
+    gfx7_data_buf_atomic128
+    gfx7_data_buf_atomic32
+    gfx7_data_buf_atomic64
+    gfx7_data_mimg_atomic_cmp
+    gfx7_data_mimg_atomic_reg
+    gfx7_data_mimg_store
+    gfx7_dst_buf_128
+    gfx7_dst_buf_64
+    gfx7_dst_buf_96
+    gfx7_dst_buf_lds
+    gfx7_dst_flat_atomic32
+    gfx7_dst_flat_atomic64
+    gfx7_dst_mimg_gather4
+    gfx7_dst_mimg_regular
+    gfx7_offset_buf
+    gfx7_offset_smem
+    gfx7_rsrc_buf
+    gfx7_rsrc_mimg
+    gfx7_samp_mimg
+    gfx7_sdst128_0
+    gfx7_sdst256_0
+    gfx7_sdst32_0
+    gfx7_sdst32_1
+    gfx7_sdst32_2
+    gfx7_sdst512_0
+    gfx7_sdst64_0
+    gfx7_sdst64_1
+    gfx7_src32_0
+    gfx7_src32_1
+    gfx7_src32_2
+    gfx7_src32_3
+    gfx7_src64_0
+    gfx7_src64_1
+    gfx7_src64_2
+    gfx7_src_exp
+    gfx7_ssrc32_0
+    gfx7_ssrc32_1
+    gfx7_ssrc32_2
+    gfx7_ssrc32_3
+    gfx7_ssrc32_4
+    gfx7_ssrc64_0
+    gfx7_ssrc64_1
+    gfx7_ssrc64_2
+    gfx7_ssrc64_3
+    gfx7_vcc_64
+    gfx7_vdata128_0
+    gfx7_vdata32_0
+    gfx7_vdata64_0
+    gfx7_vdata96_0
+    gfx7_vdst128_0
+    gfx7_vdst32_0
+    gfx7_vdst64_0
+    gfx7_vdst96_0
+    gfx7_vsrc128_0
+    gfx7_vsrc32_0
+    gfx7_vsrc64_0
+    gfx7_mod
+    gfx7_opt
+    gfx7_ret
+    gfx7_type_dev
diff --git a/docs/AMDGPU/AMDGPUAsmGFX8.rst b/docs/AMDGPU/AMDGPUAsmGFX8.rst
new file mode 100644
index 0000000..a6dbc9b
--- /dev/null
+++ b/docs/AMDGPU/AMDGPUAsmGFX8.rst
@@ -0,0 +1,1846 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+============================
+Syntax of GFX8 Instructions
+============================
+
+.. contents::
+  :local:
+
+Notation
+========
+
+Notation used in this document is explained :ref:`here<amdgpu_syn_instruction_notation>`.
+
+Introduction
+============
+
+An overview of generic syntax and other features of AMDGPU instructions may be found :ref:`in this document<amdgpu_syn_instructions>`.
+
+Instructions
+============
+
+
+DS
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**         **SRC0**      **SRC1**      **SRC2**           **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    ds_add_f32                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_rtn_f32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_rtn_u32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_rtn_u64                 :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_src2_f32                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_src2_u32                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_src2_u64                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_u32                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_u64                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_b32                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_b64                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_rtn_b32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_rtn_b64                 :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_src2_b32                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_src2_b64                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_append                      :ref:`vdst<amdgpu_synid8_vdst32_0>`                                           :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_bpermute_b32                :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>`
+    ds_cmpst_b32                               :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata32_0>`,   :ref:`vdata1<amdgpu_synid8_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_b64                               :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata64_0>`,   :ref:`vdata1<amdgpu_synid8_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_f32                               :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata32_0>`,   :ref:`vdata1<amdgpu_synid8_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_f64                               :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata64_0>`,   :ref:`vdata1<amdgpu_synid8_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_rtn_b32               :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata32_0>`,   :ref:`vdata1<amdgpu_synid8_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_rtn_b64               :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata64_0>`,   :ref:`vdata1<amdgpu_synid8_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_rtn_f32               :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata32_0>`,   :ref:`vdata1<amdgpu_synid8_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_rtn_f64               :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata64_0>`,   :ref:`vdata1<amdgpu_synid8_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_condxchg32_rtn_b64          :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_consume                     :ref:`vdst<amdgpu_synid8_vdst32_0>`                                           :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_rtn_u32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_rtn_u64                 :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_src2_u32                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_src2_u64                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_u32                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_u64                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_barrier                             :ref:`vdata<amdgpu_synid8_vdata32_0>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_init                                :ref:`vdata<amdgpu_synid8_vdata32_0>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_sema_br                             :ref:`vdata<amdgpu_synid8_vdata32_0>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_sema_p                                                                 :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_sema_release_all                                                       :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_sema_v                                                                 :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_rtn_u32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_rtn_u64                 :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_src2_u32                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_src2_u64                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_u32                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_u64                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_f32                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_f64                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_i32                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_i64                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_f32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_f64                 :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_i32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_i64                 :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_u32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_u64                 :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_f32                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_f64                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_i32                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_i64                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_u32                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_u64                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_u32                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_u64                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_f32                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_f64                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_i32                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_i64                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_f32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_f64                 :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_i32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_i64                 :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_u32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_u64                 :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_f32                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_f64                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_i32                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_i64                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_u32                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_u64                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_u32                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_u64                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_mskor_b32                               :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata32_0>`,   :ref:`vdata1<amdgpu_synid8_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_mskor_b64                               :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata64_0>`,   :ref:`vdata1<amdgpu_synid8_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_mskor_rtn_b32               :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata32_0>`,   :ref:`vdata1<amdgpu_synid8_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_mskor_rtn_b64               :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata64_0>`,   :ref:`vdata1<amdgpu_synid8_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_nop
+    ds_or_b32                                  :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_b64                                  :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_rtn_b32                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_rtn_b64                  :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_src2_b32                             :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_src2_b64                             :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_ordered_count               :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_permute_b32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>`
+    ds_read2_b32                   :ref:`vdst<amdgpu_synid8_vdst64_0>`::ref:`b32x2<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read2_b64                   :ref:`vdst<amdgpu_synid8_vdst128_0>`::ref:`b64x2<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read2st64_b32               :ref:`vdst<amdgpu_synid8_vdst64_0>`::ref:`b32x2<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read2st64_b64               :ref:`vdst<amdgpu_synid8_vdst128_0>`::ref:`b64x2<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_b128                   :ref:`vdst<amdgpu_synid8_vdst128_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_b32                    :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_b64                    :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_b96                    :ref:`vdst<amdgpu_synid8_vdst96_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_i16                    :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_i8                     :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_u16                    :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_u8                     :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_rtn_u32                :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_rtn_u64                :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_src2_u32                           :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_src2_u64                           :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_u32                                :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_u64                                :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_rtn_u32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_rtn_u64                 :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_src2_u32                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_src2_u64                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_u32                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_u64                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_swizzle_b32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`pattern<amdgpu_synid_sw_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrap_rtn_b32                :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata32_0>`,   :ref:`vdata1<amdgpu_synid8_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write2_b32                              :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata32_0>`,   :ref:`vdata1<amdgpu_synid8_vdata32_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write2_b64                              :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata64_0>`,   :ref:`vdata1<amdgpu_synid8_vdata64_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write2st64_b32                          :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata32_0>`,   :ref:`vdata1<amdgpu_synid8_vdata32_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write2st64_b64                          :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata64_0>`,   :ref:`vdata1<amdgpu_synid8_vdata64_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b128                              :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata128_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b16                               :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b32                               :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b64                               :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b8                                :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b96                               :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata96_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_src2_b32                          :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_src2_b64                          :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg2_rtn_b32             :ref:`vdst<amdgpu_synid8_vdst64_0>`::ref:`b32x2<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata32_0>`,   :ref:`vdata1<amdgpu_synid8_vdata32_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg2_rtn_b64             :ref:`vdst<amdgpu_synid8_vdst128_0>`::ref:`b64x2<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata64_0>`,   :ref:`vdata1<amdgpu_synid8_vdata64_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg2st64_rtn_b32         :ref:`vdst<amdgpu_synid8_vdst64_0>`::ref:`b32x2<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata32_0>`,   :ref:`vdata1<amdgpu_synid8_vdata32_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg2st64_rtn_b64         :ref:`vdst<amdgpu_synid8_vdst128_0>`::ref:`b64x2<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata0<amdgpu_synid8_vdata64_0>`,   :ref:`vdata1<amdgpu_synid8_vdata64_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg_rtn_b32              :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg_rtn_b64              :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_b32                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_b64                                 :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_rtn_b32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_rtn_b64                 :ref:`vdst<amdgpu_synid8_vdst64_0>`,       :ref:`vaddr<amdgpu_synid8_addr_ds>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_src2_b32                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_src2_b64                            :ref:`vaddr<amdgpu_synid8_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+
+EXP
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**      **SRC1**      **SRC2**      **SRC3**           **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    exp                            :ref:`tgt<amdgpu_synid8_tgt>`,      :ref:`vsrc0<amdgpu_synid8_src_exp>`,    :ref:`vsrc1<amdgpu_synid8_src_exp>`,    :ref:`vsrc2<amdgpu_synid8_src_exp>`,    :ref:`vsrc3<amdgpu_synid8_src_exp>`          :ref:`done<amdgpu_synid_done>` :ref:`compr<amdgpu_synid_compr>` :ref:`vm<amdgpu_synid_vm>`
+
+FLAT
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**           **SRC0**      **SRC1**             **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    flat_atomic_add                :ref:`vdst<amdgpu_synid8_dst_flat_atomic32>`::ref:`opt<amdgpu_synid8_opt>`,     :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_add_x2             :ref:`vdst<amdgpu_synid8_dst_flat_atomic64>`::ref:`opt<amdgpu_synid8_opt>`,     :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_and                :ref:`vdst<amdgpu_synid8_dst_flat_atomic32>`::ref:`opt<amdgpu_synid8_opt>`,     :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_and_x2             :ref:`vdst<amdgpu_synid8_dst_flat_atomic64>`::ref:`opt<amdgpu_synid8_opt>`,     :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_cmpswap            :ref:`vdst<amdgpu_synid8_dst_flat_atomic32>`::ref:`opt<amdgpu_synid8_opt>`,     :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`::ref:`b32x2<amdgpu_synid8_type_dev>`      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_cmpswap_x2         :ref:`vdst<amdgpu_synid8_dst_flat_atomic64>`::ref:`opt<amdgpu_synid8_opt>`,     :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata128_0>`::ref:`b64x2<amdgpu_synid8_type_dev>`      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_dec                :ref:`vdst<amdgpu_synid8_dst_flat_atomic32>`::ref:`opt<amdgpu_synid8_opt>`::ref:`u32<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`::ref:`u32<amdgpu_synid8_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_dec_x2             :ref:`vdst<amdgpu_synid8_dst_flat_atomic64>`::ref:`opt<amdgpu_synid8_opt>`::ref:`u64<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`::ref:`u64<amdgpu_synid8_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_inc                :ref:`vdst<amdgpu_synid8_dst_flat_atomic32>`::ref:`opt<amdgpu_synid8_opt>`::ref:`u32<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`::ref:`u32<amdgpu_synid8_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_inc_x2             :ref:`vdst<amdgpu_synid8_dst_flat_atomic64>`::ref:`opt<amdgpu_synid8_opt>`::ref:`u64<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`::ref:`u64<amdgpu_synid8_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_or                 :ref:`vdst<amdgpu_synid8_dst_flat_atomic32>`::ref:`opt<amdgpu_synid8_opt>`,     :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_or_x2              :ref:`vdst<amdgpu_synid8_dst_flat_atomic64>`::ref:`opt<amdgpu_synid8_opt>`,     :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_smax               :ref:`vdst<amdgpu_synid8_dst_flat_atomic32>`::ref:`opt<amdgpu_synid8_opt>`::ref:`s32<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`::ref:`s32<amdgpu_synid8_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_smax_x2            :ref:`vdst<amdgpu_synid8_dst_flat_atomic64>`::ref:`opt<amdgpu_synid8_opt>`::ref:`s64<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`::ref:`s64<amdgpu_synid8_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_smin               :ref:`vdst<amdgpu_synid8_dst_flat_atomic32>`::ref:`opt<amdgpu_synid8_opt>`::ref:`s32<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`::ref:`s32<amdgpu_synid8_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_smin_x2            :ref:`vdst<amdgpu_synid8_dst_flat_atomic64>`::ref:`opt<amdgpu_synid8_opt>`::ref:`s64<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`::ref:`s64<amdgpu_synid8_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_sub                :ref:`vdst<amdgpu_synid8_dst_flat_atomic32>`::ref:`opt<amdgpu_synid8_opt>`,     :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_sub_x2             :ref:`vdst<amdgpu_synid8_dst_flat_atomic64>`::ref:`opt<amdgpu_synid8_opt>`,     :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_swap               :ref:`vdst<amdgpu_synid8_dst_flat_atomic32>`::ref:`opt<amdgpu_synid8_opt>`,     :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_swap_x2            :ref:`vdst<amdgpu_synid8_dst_flat_atomic64>`::ref:`opt<amdgpu_synid8_opt>`,     :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_umax               :ref:`vdst<amdgpu_synid8_dst_flat_atomic32>`::ref:`opt<amdgpu_synid8_opt>`::ref:`u32<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`::ref:`u32<amdgpu_synid8_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_umax_x2            :ref:`vdst<amdgpu_synid8_dst_flat_atomic64>`::ref:`opt<amdgpu_synid8_opt>`::ref:`u64<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`::ref:`u64<amdgpu_synid8_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_umin               :ref:`vdst<amdgpu_synid8_dst_flat_atomic32>`::ref:`opt<amdgpu_synid8_opt>`::ref:`u32<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`::ref:`u32<amdgpu_synid8_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_umin_x2            :ref:`vdst<amdgpu_synid8_dst_flat_atomic64>`::ref:`opt<amdgpu_synid8_opt>`::ref:`u64<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`::ref:`u64<amdgpu_synid8_type_dev>`        :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_xor                :ref:`vdst<amdgpu_synid8_dst_flat_atomic32>`::ref:`opt<amdgpu_synid8_opt>`,     :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_xor_x2             :ref:`vdst<amdgpu_synid8_dst_flat_atomic64>`::ref:`opt<amdgpu_synid8_opt>`,     :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_dword                :ref:`vdst<amdgpu_synid8_vdst32_0>`,         :ref:`vaddr<amdgpu_synid8_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_dwordx2              :ref:`vdst<amdgpu_synid8_vdst64_0>`,         :ref:`vaddr<amdgpu_synid8_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_dwordx3              :ref:`vdst<amdgpu_synid8_vdst96_0>`,         :ref:`vaddr<amdgpu_synid8_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_dwordx4              :ref:`vdst<amdgpu_synid8_vdst128_0>`,         :ref:`vaddr<amdgpu_synid8_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_sbyte                :ref:`vdst<amdgpu_synid8_vdst32_0>`,         :ref:`vaddr<amdgpu_synid8_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_sshort               :ref:`vdst<amdgpu_synid8_vdst32_0>`,         :ref:`vaddr<amdgpu_synid8_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_ubyte                :ref:`vdst<amdgpu_synid8_vdst32_0>`,         :ref:`vaddr<amdgpu_synid8_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_ushort               :ref:`vdst<amdgpu_synid8_vdst32_0>`,         :ref:`vaddr<amdgpu_synid8_addr_flat>`                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_byte                              :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_dword                             :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_dwordx2                           :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata64_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_dwordx3                           :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata96_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_dwordx4                           :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata128_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_short                             :ref:`vaddr<amdgpu_synid8_addr_flat>`,    :ref:`vdata<amdgpu_synid8_vdata32_0>`            :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+
+MIMG
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**       **SRC1**      **SRC2**           **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    image_atomic_add                         :ref:`vdata<amdgpu_synid8_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid8_ret>`, :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_and                         :ref:`vdata<amdgpu_synid8_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid8_ret>`, :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_cmpswap                     :ref:`vdata<amdgpu_synid8_data_mimg_atomic_cmp>`::ref:`dst<amdgpu_synid8_ret>`, :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_dec                         :ref:`vdata<amdgpu_synid8_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid8_ret>`, :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_inc                         :ref:`vdata<amdgpu_synid8_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid8_ret>`, :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_or                          :ref:`vdata<amdgpu_synid8_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid8_ret>`, :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_smax                        :ref:`vdata<amdgpu_synid8_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid8_ret>`, :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_smin                        :ref:`vdata<amdgpu_synid8_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid8_ret>`, :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_sub                         :ref:`vdata<amdgpu_synid8_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid8_ret>`, :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_swap                        :ref:`vdata<amdgpu_synid8_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid8_ret>`, :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_umax                        :ref:`vdata<amdgpu_synid8_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid8_ret>`, :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_umin                        :ref:`vdata<amdgpu_synid8_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid8_ret>`, :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_xor                         :ref:`vdata<amdgpu_synid8_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid8_ret>`, :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4                  :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_b                :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_b_cl             :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_b_cl_o           :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_b_o              :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c                :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_b              :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_b_cl           :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_b_cl_o         :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_b_o            :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_cl             :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_cl_o           :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_l              :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_l_o            :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_lz             :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_lz_o           :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_o              :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_cl               :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_cl_o             :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_l                :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_l_o              :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_lz               :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_lz_o             :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_o                :ref:`vdst<amdgpu_synid8_dst_mimg_gather4>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_get_lod                  :ref:`vdst<amdgpu_synid8_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_get_resinfo              :ref:`vdst<amdgpu_synid8_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`                    :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_load                     :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`                    :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_load_mip                 :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`                    :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_load_mip_pck             :ref:`vdst<amdgpu_synid8_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`                    :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_load_mip_pck_sgn         :ref:`vdst<amdgpu_synid8_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`                    :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_load_pck                 :ref:`vdst<amdgpu_synid8_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`                    :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_load_pck_sgn             :ref:`vdst<amdgpu_synid8_dst_mimg_regular>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`                    :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample                   :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_b                 :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_b_cl              :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_b_cl_o            :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_b_o               :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c                 :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_b               :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_b_cl            :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_b_cl_o          :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_b_o             :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_cd              :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_cd_cl           :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_cd_cl_o         :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_cd_o            :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_cl              :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_cl_o            :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_d               :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_d_cl            :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_d_cl_o          :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_d_o             :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_l               :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_l_o             :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_lz              :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_lz_o            :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_o               :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_cd                :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_cd_cl             :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_cd_cl_o           :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_cd_o              :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_cl                :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_cl_o              :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_d                 :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_d_cl              :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_d_cl_o            :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_d_o               :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_l                 :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_l_o               :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_lz                :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_lz_o              :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_o                 :ref:`vdst<amdgpu_synid8_dst_mimg_regular_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,     :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`,    :ref:`ssamp<amdgpu_synid8_samp_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_store                              :ref:`vdata<amdgpu_synid8_data_mimg_store_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_store_mip                          :ref:`vdata<amdgpu_synid8_data_mimg_store_d16>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_store_mip_pck                      :ref:`vdata<amdgpu_synid8_data_mimg_store>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_store_pck                          :ref:`vdata<amdgpu_synid8_data_mimg_store>`,     :ref:`vaddr<amdgpu_synid8_addr_mimg>`,    :ref:`srsrc<amdgpu_synid8_rsrc_mimg>`          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+
+MUBUF
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                  **DST**   **SRC0**             **SRC1**    **SRC2**    **SRC3**    **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    buffer_atomic_add                  :ref:`vdata<amdgpu_synid8_data_buf_atomic32>`::ref:`dst<amdgpu_synid8_ret>`,       :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_add_x2               :ref:`vdata<amdgpu_synid8_data_buf_atomic64>`::ref:`dst<amdgpu_synid8_ret>`,       :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_and                  :ref:`vdata<amdgpu_synid8_data_buf_atomic32>`::ref:`dst<amdgpu_synid8_ret>`,       :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_and_x2               :ref:`vdata<amdgpu_synid8_data_buf_atomic64>`::ref:`dst<amdgpu_synid8_ret>`,       :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_cmpswap              :ref:`vdata<amdgpu_synid8_data_buf_atomic64>`::ref:`dst<amdgpu_synid8_ret>`::ref:`b32x2<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_cmpswap_x2           :ref:`vdata<amdgpu_synid8_data_buf_atomic128>`::ref:`dst<amdgpu_synid8_ret>`::ref:`b64x2<amdgpu_synid8_type_dev>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_dec                  :ref:`vdata<amdgpu_synid8_data_buf_atomic32>`::ref:`dst<amdgpu_synid8_ret>`::ref:`u32<amdgpu_synid8_type_dev>`,   :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_dec_x2               :ref:`vdata<amdgpu_synid8_data_buf_atomic64>`::ref:`dst<amdgpu_synid8_ret>`::ref:`u64<amdgpu_synid8_type_dev>`,   :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_inc                  :ref:`vdata<amdgpu_synid8_data_buf_atomic32>`::ref:`dst<amdgpu_synid8_ret>`::ref:`u32<amdgpu_synid8_type_dev>`,   :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_inc_x2               :ref:`vdata<amdgpu_synid8_data_buf_atomic64>`::ref:`dst<amdgpu_synid8_ret>`::ref:`u64<amdgpu_synid8_type_dev>`,   :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_or                   :ref:`vdata<amdgpu_synid8_data_buf_atomic32>`::ref:`dst<amdgpu_synid8_ret>`,       :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_or_x2                :ref:`vdata<amdgpu_synid8_data_buf_atomic64>`::ref:`dst<amdgpu_synid8_ret>`,       :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_smax                 :ref:`vdata<amdgpu_synid8_data_buf_atomic32>`::ref:`dst<amdgpu_synid8_ret>`::ref:`s32<amdgpu_synid8_type_dev>`,   :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_smax_x2              :ref:`vdata<amdgpu_synid8_data_buf_atomic64>`::ref:`dst<amdgpu_synid8_ret>`::ref:`s64<amdgpu_synid8_type_dev>`,   :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_smin                 :ref:`vdata<amdgpu_synid8_data_buf_atomic32>`::ref:`dst<amdgpu_synid8_ret>`::ref:`s32<amdgpu_synid8_type_dev>`,   :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_smin_x2              :ref:`vdata<amdgpu_synid8_data_buf_atomic64>`::ref:`dst<amdgpu_synid8_ret>`::ref:`s64<amdgpu_synid8_type_dev>`,   :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_sub                  :ref:`vdata<amdgpu_synid8_data_buf_atomic32>`::ref:`dst<amdgpu_synid8_ret>`,       :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_sub_x2               :ref:`vdata<amdgpu_synid8_data_buf_atomic64>`::ref:`dst<amdgpu_synid8_ret>`,       :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_swap                 :ref:`vdata<amdgpu_synid8_data_buf_atomic32>`::ref:`dst<amdgpu_synid8_ret>`,       :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_swap_x2              :ref:`vdata<amdgpu_synid8_data_buf_atomic64>`::ref:`dst<amdgpu_synid8_ret>`,       :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_umax                 :ref:`vdata<amdgpu_synid8_data_buf_atomic32>`::ref:`dst<amdgpu_synid8_ret>`::ref:`u32<amdgpu_synid8_type_dev>`,   :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_umax_x2              :ref:`vdata<amdgpu_synid8_data_buf_atomic64>`::ref:`dst<amdgpu_synid8_ret>`::ref:`u64<amdgpu_synid8_type_dev>`,   :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_umin                 :ref:`vdata<amdgpu_synid8_data_buf_atomic32>`::ref:`dst<amdgpu_synid8_ret>`::ref:`u32<amdgpu_synid8_type_dev>`,   :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_umin_x2              :ref:`vdata<amdgpu_synid8_data_buf_atomic64>`::ref:`dst<amdgpu_synid8_ret>`::ref:`u64<amdgpu_synid8_type_dev>`,   :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_xor                  :ref:`vdata<amdgpu_synid8_data_buf_atomic32>`::ref:`dst<amdgpu_synid8_ret>`,       :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_xor_x2               :ref:`vdata<amdgpu_synid8_data_buf_atomic64>`::ref:`dst<amdgpu_synid8_ret>`,       :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_dword            :ref:`vdst<amdgpu_synid8_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_dwordx2          :ref:`vdst<amdgpu_synid8_dst_buf_64>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_dwordx3          :ref:`vdst<amdgpu_synid8_dst_buf_96>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_dwordx4          :ref:`vdst<amdgpu_synid8_dst_buf_128>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_d16_x     :ref:`vdst<amdgpu_synid8_dst_buf_d16_32>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_d16_xy    :ref:`vdst<amdgpu_synid8_dst_buf_d16_64>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_d16_xyz   :ref:`vdst<amdgpu_synid8_dst_buf_d16_96>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_d16_xyzw  :ref:`vdst<amdgpu_synid8_dst_buf_d16_128>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_x         :ref:`vdst<amdgpu_synid8_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_format_xy        :ref:`vdst<amdgpu_synid8_dst_buf_64>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_xyz       :ref:`vdst<amdgpu_synid8_dst_buf_96>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_xyzw      :ref:`vdst<amdgpu_synid8_dst_buf_128>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_sbyte            :ref:`vdst<amdgpu_synid8_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_sshort           :ref:`vdst<amdgpu_synid8_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_ubyte            :ref:`vdst<amdgpu_synid8_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_ushort           :ref:`vdst<amdgpu_synid8_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid8_addr_buf>`,           :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_store_byte                  :ref:`vdata<amdgpu_synid8_vdata32_0>`,           :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_dword                 :ref:`vdata<amdgpu_synid8_vdata32_0>`,           :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_dwordx2               :ref:`vdata<amdgpu_synid8_vdata64_0>`,           :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_dwordx3               :ref:`vdata<amdgpu_synid8_vdata96_0>`,           :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_dwordx4               :ref:`vdata<amdgpu_synid8_vdata128_0>`,           :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_d16_x          :ref:`vdata<amdgpu_synid8_data_buf_d16_32>`,           :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_d16_xy         :ref:`vdata<amdgpu_synid8_data_buf_d16_64>`,           :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_d16_xyz        :ref:`vdata<amdgpu_synid8_data_buf_d16_96>`,           :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_d16_xyzw       :ref:`vdata<amdgpu_synid8_data_buf_d16_128>`,           :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_x              :ref:`vdata<amdgpu_synid8_vdata32_0>`,           :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_xy             :ref:`vdata<amdgpu_synid8_vdata64_0>`,           :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_xyz            :ref:`vdata<amdgpu_synid8_vdata96_0>`,           :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_xyzw           :ref:`vdata<amdgpu_synid8_vdata128_0>`,           :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_lds_dword             :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,           :ref:`soffset<amdgpu_synid8_offset_buf>`                 :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`lds<amdgpu_synid_lds>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_short                 :ref:`vdata<amdgpu_synid8_vdata32_0>`,           :ref:`vaddr<amdgpu_synid8_addr_buf>`,  :ref:`srsrc<amdgpu_synid8_rsrc_buf>`,  :ref:`soffset<amdgpu_synid8_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_wbinvl1
+    buffer_wbinvl1_vol
+
+SMEM
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**      **SRC1**      **SRC2**           **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_atc_probe                              :ref:`imm3<amdgpu_synid8_perm_smem>`,     :ref:`sbase<amdgpu_synid8_base_smem_addr>`,    :ref:`soffset<amdgpu_synid8_offset_smem_load>`
+    s_atc_probe_buffer                       :ref:`imm3<amdgpu_synid8_perm_smem>`,     :ref:`sbase<amdgpu_synid8_base_smem_buf>`,    :ref:`soffset<amdgpu_synid8_offset_smem_load>`
+    s_buffer_load_dword            :ref:`sdst<amdgpu_synid8_sdst32_0>`,     :ref:`sbase<amdgpu_synid8_base_smem_buf>`,    :ref:`soffset<amdgpu_synid8_offset_smem_load>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_load_dwordx16         :ref:`sdst<amdgpu_synid8_sdst512_0>`,     :ref:`sbase<amdgpu_synid8_base_smem_buf>`,    :ref:`soffset<amdgpu_synid8_offset_smem_load>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_load_dwordx2          :ref:`sdst<amdgpu_synid8_sdst64_0>`,     :ref:`sbase<amdgpu_synid8_base_smem_buf>`,    :ref:`soffset<amdgpu_synid8_offset_smem_load>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_load_dwordx4          :ref:`sdst<amdgpu_synid8_sdst128_0>`,     :ref:`sbase<amdgpu_synid8_base_smem_buf>`,    :ref:`soffset<amdgpu_synid8_offset_smem_load>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_load_dwordx8          :ref:`sdst<amdgpu_synid8_sdst256_0>`,     :ref:`sbase<amdgpu_synid8_base_smem_buf>`,    :ref:`soffset<amdgpu_synid8_offset_smem_load>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_store_dword                     :ref:`sdata<amdgpu_synid8_sdata32_0>`,    :ref:`sbase<amdgpu_synid8_base_smem_buf>`,    :ref:`soffset<amdgpu_synid8_offset_smem_store>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_store_dwordx2                   :ref:`sdata<amdgpu_synid8_sdata64_0>`,    :ref:`sbase<amdgpu_synid8_base_smem_buf>`,    :ref:`soffset<amdgpu_synid8_offset_smem_store>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_store_dwordx4                   :ref:`sdata<amdgpu_synid8_sdata128_0>`,    :ref:`sbase<amdgpu_synid8_base_smem_buf>`,    :ref:`soffset<amdgpu_synid8_offset_smem_store>`        :ref:`glc<amdgpu_synid_glc>`
+    s_dcache_inv
+    s_dcache_inv_vol
+    s_dcache_wb
+    s_dcache_wb_vol
+    s_load_dword                   :ref:`sdst<amdgpu_synid8_sdst32_0>`,     :ref:`sbase<amdgpu_synid8_base_smem_addr>`,    :ref:`soffset<amdgpu_synid8_offset_smem_load>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_load_dwordx16                :ref:`sdst<amdgpu_synid8_sdst512_0>`,     :ref:`sbase<amdgpu_synid8_base_smem_addr>`,    :ref:`soffset<amdgpu_synid8_offset_smem_load>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_load_dwordx2                 :ref:`sdst<amdgpu_synid8_sdst64_0>`,     :ref:`sbase<amdgpu_synid8_base_smem_addr>`,    :ref:`soffset<amdgpu_synid8_offset_smem_load>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_load_dwordx4                 :ref:`sdst<amdgpu_synid8_sdst128_0>`,     :ref:`sbase<amdgpu_synid8_base_smem_addr>`,    :ref:`soffset<amdgpu_synid8_offset_smem_load>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_load_dwordx8                 :ref:`sdst<amdgpu_synid8_sdst256_0>`,     :ref:`sbase<amdgpu_synid8_base_smem_addr>`,    :ref:`soffset<amdgpu_synid8_offset_smem_load>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_memrealtime                  :ref:`sdst<amdgpu_synid8_sdst64_0>`
+    s_memtime                      :ref:`sdst<amdgpu_synid8_sdst64_0>`
+    s_store_dword                            :ref:`sdata<amdgpu_synid8_sdata32_0>`,    :ref:`sbase<amdgpu_synid8_base_smem_addr>`,    :ref:`soffset<amdgpu_synid8_offset_smem_store>`        :ref:`glc<amdgpu_synid_glc>`
+    s_store_dwordx2                          :ref:`sdata<amdgpu_synid8_sdata64_0>`,    :ref:`sbase<amdgpu_synid8_base_smem_addr>`,    :ref:`soffset<amdgpu_synid8_offset_smem_store>`        :ref:`glc<amdgpu_synid_glc>`
+    s_store_dwordx4                          :ref:`sdata<amdgpu_synid8_sdata128_0>`,    :ref:`sbase<amdgpu_synid8_base_smem_addr>`,    :ref:`soffset<amdgpu_synid8_offset_smem_store>`        :ref:`glc<amdgpu_synid_glc>`
+
+SOP1
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_abs_i32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_and_saveexec_b64             :ref:`sdst<amdgpu_synid8_sdst64_0>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_andn2_saveexec_b64           :ref:`sdst<amdgpu_synid8_sdst64_0>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_bcnt0_i32_b32                :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_bcnt0_i32_b64                :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_bcnt1_i32_b32                :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_bcnt1_i32_b64                :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_bitset0_b32                  :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_bitset0_b64                  :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`::ref:`b32<amdgpu_synid8_type_dev>`
+    s_bitset1_b32                  :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_bitset1_b64                  :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`::ref:`b32<amdgpu_synid8_type_dev>`
+    s_brev_b32                     :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_brev_b64                     :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_cbranch_join                           :ref:`ssrc<amdgpu_synid8_ssrc32_1>`
+    s_cmov_b32                     :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_cmov_b64                     :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_ff0_i32_b32                  :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_ff0_i32_b64                  :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_ff1_i32_b32                  :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_ff1_i32_b64                  :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_flbit_i32                    :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_flbit_i32_b32                :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_flbit_i32_b64                :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_flbit_i32_i64                :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_getpc_b64                    :ref:`sdst<amdgpu_synid8_sdst64_1>`
+    s_mov_b32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_mov_b64                      :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_mov_fed_b32                  :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_movreld_b32                  :ref:`sdst<amdgpu_synid8_sdst32_0>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_movreld_b64                  :ref:`sdst<amdgpu_synid8_sdst64_0>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_movrels_b32                  :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_1>`
+    s_movrels_b64                  :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_1>`
+    s_nand_saveexec_b64            :ref:`sdst<amdgpu_synid8_sdst64_0>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_nor_saveexec_b64             :ref:`sdst<amdgpu_synid8_sdst64_0>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_not_b32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_not_b64                      :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_or_saveexec_b64              :ref:`sdst<amdgpu_synid8_sdst64_0>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_orn2_saveexec_b64            :ref:`sdst<amdgpu_synid8_sdst64_0>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_quadmask_b32                 :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_quadmask_b64                 :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_rfe_b64                                :ref:`ssrc<amdgpu_synid8_ssrc64_1>`
+    s_set_gpr_idx_idx                        :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_setpc_b64                              :ref:`ssrc<amdgpu_synid8_ssrc64_1>`
+    s_sext_i32_i16                 :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_sext_i32_i8                  :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_swappc_b64                   :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_1>`
+    s_wqm_b32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc32_0>`
+    s_wqm_b64                      :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_xnor_saveexec_b64            :ref:`sdst<amdgpu_synid8_sdst64_0>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+    s_xor_saveexec_b64             :ref:`sdst<amdgpu_synid8_sdst64_0>`,     :ref:`ssrc<amdgpu_synid8_ssrc64_0>`
+
+SOP2
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**       **SRC1**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_absdiff_i32                  :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_add_i32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_add_u32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_addc_u32                     :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_and_b32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_and_b64                      :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc64_0>`
+    s_andn2_b32                    :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_andn2_b64                    :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc64_0>`
+    s_ashr_i32                     :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`::ref:`u32<amdgpu_synid8_type_dev>`
+    s_ashr_i64                     :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`::ref:`u32<amdgpu_synid8_type_dev>`
+    s_bfe_i32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`::ref:`u32<amdgpu_synid8_type_dev>`
+    s_bfe_i64                      :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`::ref:`u32<amdgpu_synid8_type_dev>`
+    s_bfe_u32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_bfe_u64                      :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`::ref:`u32<amdgpu_synid8_type_dev>`
+    s_bfm_b32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_bfm_b64                      :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`::ref:`b32<amdgpu_synid8_type_dev>`, :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`::ref:`b32<amdgpu_synid8_type_dev>`
+    s_cbranch_g_fork                         :ref:`ssrc0<amdgpu_synid8_ssrc64_2>`,     :ref:`ssrc1<amdgpu_synid8_ssrc64_2>`
+    s_cselect_b32                  :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_cselect_b64                  :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc64_0>`
+    s_lshl_b32                     :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`::ref:`u32<amdgpu_synid8_type_dev>`
+    s_lshl_b64                     :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`::ref:`u32<amdgpu_synid8_type_dev>`
+    s_lshr_b32                     :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`::ref:`u32<amdgpu_synid8_type_dev>`
+    s_lshr_b64                     :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`::ref:`u32<amdgpu_synid8_type_dev>`
+    s_max_i32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_max_u32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_min_i32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_min_u32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_mul_i32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_nand_b32                     :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_nand_b64                     :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc64_0>`
+    s_nor_b32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_nor_b64                      :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc64_0>`
+    s_or_b32                       :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_or_b64                       :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc64_0>`
+    s_orn2_b32                     :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_orn2_b64                     :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc64_0>`
+    s_rfe_restore_b64                        :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`::ref:`b32<amdgpu_synid8_type_dev>`
+    s_sub_i32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_sub_u32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_subb_u32                     :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_xnor_b32                     :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_xnor_b64                     :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc64_0>`
+    s_xor_b32                      :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_xor_b64                      :ref:`sdst<amdgpu_synid8_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,     :ref:`ssrc1<amdgpu_synid8_ssrc64_0>`
+
+SOPC
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **SRC0**      **SRC1**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_bitcmp0_b32                  :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_bitcmp0_b64                  :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`::ref:`u32<amdgpu_synid8_type_dev>`
+    s_bitcmp1_b32                  :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_bitcmp1_b64                  :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`::ref:`u32<amdgpu_synid8_type_dev>`
+    s_cmp_eq_i32                   :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_cmp_eq_u32                   :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_cmp_eq_u64                   :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc64_0>`
+    s_cmp_ge_i32                   :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_cmp_ge_u32                   :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_cmp_gt_i32                   :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_cmp_gt_u32                   :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_cmp_le_i32                   :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_cmp_le_u32                   :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_cmp_lg_i32                   :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_cmp_lg_u32                   :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_cmp_lg_u64                   :ref:`ssrc0<amdgpu_synid8_ssrc64_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc64_0>`
+    s_cmp_lt_i32                   :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_cmp_lt_u32                   :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+    s_set_gpr_idx_on               :ref:`ssrc<amdgpu_synid8_ssrc32_0>`,     :ref:`imm4<amdgpu_synid8_imm4>`
+    s_setvskip                     :ref:`ssrc0<amdgpu_synid8_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid8_ssrc32_0>`
+
+SOPK
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**      **SRC1**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_addk_i32                     :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`imm16<amdgpu_synid8_simm16>`
+    s_cbranch_i_fork                         :ref:`ssrc<amdgpu_synid8_ssrc64_3>`,     :ref:`label<amdgpu_synid8_label>`
+    s_cmovk_i32                    :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`imm16<amdgpu_synid8_simm16>`
+    s_cmpk_eq_i32                            :ref:`ssrc<amdgpu_synid8_ssrc32_2>`,     :ref:`imm16<amdgpu_synid8_simm16>`
+    s_cmpk_eq_u32                            :ref:`ssrc<amdgpu_synid8_ssrc32_2>`,     :ref:`imm16<amdgpu_synid8_uimm16>`
+    s_cmpk_ge_i32                            :ref:`ssrc<amdgpu_synid8_ssrc32_2>`,     :ref:`imm16<amdgpu_synid8_simm16>`
+    s_cmpk_ge_u32                            :ref:`ssrc<amdgpu_synid8_ssrc32_2>`,     :ref:`imm16<amdgpu_synid8_uimm16>`
+    s_cmpk_gt_i32                            :ref:`ssrc<amdgpu_synid8_ssrc32_2>`,     :ref:`imm16<amdgpu_synid8_simm16>`
+    s_cmpk_gt_u32                            :ref:`ssrc<amdgpu_synid8_ssrc32_2>`,     :ref:`imm16<amdgpu_synid8_uimm16>`
+    s_cmpk_le_i32                            :ref:`ssrc<amdgpu_synid8_ssrc32_2>`,     :ref:`imm16<amdgpu_synid8_simm16>`
+    s_cmpk_le_u32                            :ref:`ssrc<amdgpu_synid8_ssrc32_2>`,     :ref:`imm16<amdgpu_synid8_uimm16>`
+    s_cmpk_lg_i32                            :ref:`ssrc<amdgpu_synid8_ssrc32_2>`,     :ref:`imm16<amdgpu_synid8_simm16>`
+    s_cmpk_lg_u32                            :ref:`ssrc<amdgpu_synid8_ssrc32_2>`,     :ref:`imm16<amdgpu_synid8_uimm16>`
+    s_cmpk_lt_i32                            :ref:`ssrc<amdgpu_synid8_ssrc32_2>`,     :ref:`imm16<amdgpu_synid8_simm16>`
+    s_cmpk_lt_u32                            :ref:`ssrc<amdgpu_synid8_ssrc32_2>`,     :ref:`imm16<amdgpu_synid8_uimm16>`
+    s_getreg_b32                   :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`hwreg<amdgpu_synid8_hwreg>`
+    s_movk_i32                     :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`imm16<amdgpu_synid8_simm16>`
+    s_mulk_i32                     :ref:`sdst<amdgpu_synid8_sdst32_1>`,     :ref:`imm16<amdgpu_synid8_simm16>`
+    s_setreg_b32                   :ref:`hwreg<amdgpu_synid8_hwreg>`,    :ref:`ssrc<amdgpu_synid8_ssrc32_2>`
+    s_setreg_imm32_b32             :ref:`hwreg<amdgpu_synid8_hwreg>`,    :ref:`imm32<amdgpu_synid8_bimm32>`
+
+SOPP
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **SRC**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_barrier
+    s_branch                       :ref:`label<amdgpu_synid8_label>`
+    s_cbranch_cdbgsys              :ref:`label<amdgpu_synid8_label>`
+    s_cbranch_cdbgsys_and_user     :ref:`label<amdgpu_synid8_label>`
+    s_cbranch_cdbgsys_or_user      :ref:`label<amdgpu_synid8_label>`
+    s_cbranch_cdbguser             :ref:`label<amdgpu_synid8_label>`
+    s_cbranch_execnz               :ref:`label<amdgpu_synid8_label>`
+    s_cbranch_execz                :ref:`label<amdgpu_synid8_label>`
+    s_cbranch_scc0                 :ref:`label<amdgpu_synid8_label>`
+    s_cbranch_scc1                 :ref:`label<amdgpu_synid8_label>`
+    s_cbranch_vccnz                :ref:`label<amdgpu_synid8_label>`
+    s_cbranch_vccz                 :ref:`label<amdgpu_synid8_label>`
+    s_decperflevel                 :ref:`imm16<amdgpu_synid8_bimm16>`
+    s_endpgm
+    s_endpgm_saved
+    s_icache_inv
+    s_incperflevel                 :ref:`imm16<amdgpu_synid8_bimm16>`
+    s_nop                          :ref:`imm16<amdgpu_synid8_bimm16>`
+    s_sendmsg                      :ref:`msg<amdgpu_synid8_msg>`
+    s_sendmsghalt                  :ref:`msg<amdgpu_synid8_msg>`
+    s_set_gpr_idx_mode             :ref:`imm4<amdgpu_synid8_imm4>`
+    s_set_gpr_idx_off
+    s_sethalt                      :ref:`imm16<amdgpu_synid8_bimm16>`
+    s_setkill                      :ref:`imm16<amdgpu_synid8_bimm16>`
+    s_setprio                      :ref:`imm16<amdgpu_synid8_bimm16>`
+    s_sleep                        :ref:`imm16<amdgpu_synid8_bimm16>`
+    s_trap                         :ref:`imm16<amdgpu_synid8_bimm16>`
+    s_ttracedata
+    s_waitcnt                      :ref:`waitcnt<amdgpu_synid8_waitcnt>`
+    s_wakeup
+
+VINTRP
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**       **SRC1**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_interp_mov_f32               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`param<amdgpu_synid8_param>`::ref:`b32<amdgpu_synid8_type_dev>`, :ref:`attr<amdgpu_synid8_attr>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_interp_p1_f32                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`,      :ref:`attr<amdgpu_synid8_attr>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_interp_p2_f32                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`,      :ref:`attr<amdgpu_synid8_attr>`::ref:`b32<amdgpu_synid8_type_dev>`
+
+VOP1
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC**            **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_bfrev_b32                    :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_bfrev_b32_dpp                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_bfrev_b32_sdwa               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_ceil_f16                     :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_ceil_f16_dpp                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ceil_f16_sdwa                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_ceil_f32                     :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_ceil_f32_dpp                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ceil_f32_sdwa                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_ceil_f64                     :ref:`vdst<amdgpu_synid8_vdst64_0>`,     :ref:`src<amdgpu_synid8_src64_0>`
+    v_clrexcp
+    v_cos_f16                      :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cos_f16_dpp                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cos_f16_sdwa                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cos_f32                      :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cos_f32_dpp                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cos_f32_sdwa                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f16_f32                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_f16_f32_dpp              :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f16_f32_sdwa             :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f16_i16                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_f16_i16_dpp              :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f16_i16_sdwa             :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f16_u16                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_f16_u16_dpp              :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f16_u16_sdwa             :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f32_f16                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_f32_f16_dpp              :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f32_f16_sdwa             :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f32_f64                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src64_0>`
+    v_cvt_f32_i32                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_f32_i32_dpp              :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f32_i32_sdwa             :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f32_u32                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_f32_u32_dpp              :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f32_u32_sdwa             :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f32_ubyte0               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_f32_ubyte0_dpp           :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f32_ubyte0_sdwa          :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f32_ubyte1               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_f32_ubyte1_dpp           :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f32_ubyte1_sdwa          :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f32_ubyte2               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_f32_ubyte2_dpp           :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f32_ubyte2_sdwa          :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f32_ubyte3               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_f32_ubyte3_dpp           :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f32_ubyte3_sdwa          :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f64_f32                  :ref:`vdst<amdgpu_synid8_vdst64_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_f64_i32                  :ref:`vdst<amdgpu_synid8_vdst64_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_f64_u32                  :ref:`vdst<amdgpu_synid8_vdst64_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_flr_i32_f32              :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_flr_i32_f32_dpp          :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_flr_i32_f32_sdwa         :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_i16_f16                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_i16_f16_dpp              :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_i16_f16_sdwa             :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_i32_f32                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_i32_f32_dpp              :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_i32_f32_sdwa             :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_i32_f64                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src64_0>`
+    v_cvt_off_f32_i4               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_off_f32_i4_dpp           :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_off_f32_i4_sdwa          :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_rpi_i32_f32              :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_rpi_i32_f32_dpp          :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_rpi_i32_f32_sdwa         :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_u16_f16                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_u16_f16_dpp              :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_u16_f16_sdwa             :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_u32_f32                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_cvt_u32_f32_dpp              :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_u32_f32_sdwa             :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_u32_f64                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src64_0>`
+    v_exp_f16                      :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_exp_f16_dpp                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_exp_f16_sdwa                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_exp_f32                      :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_exp_f32_dpp                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_exp_f32_sdwa                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_exp_legacy_f32               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_exp_legacy_f32_dpp           :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_exp_legacy_f32_sdwa          :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_ffbh_i32                     :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_ffbh_i32_dpp                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ffbh_i32_sdwa                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_ffbh_u32                     :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_ffbh_u32_dpp                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ffbh_u32_sdwa                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_ffbl_b32                     :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_ffbl_b32_dpp                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ffbl_b32_sdwa                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_floor_f16                    :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_floor_f16_dpp                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_floor_f16_sdwa               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_floor_f32                    :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_floor_f32_dpp                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_floor_f32_sdwa               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_floor_f64                    :ref:`vdst<amdgpu_synid8_vdst64_0>`,     :ref:`src<amdgpu_synid8_src64_0>`
+    v_fract_f16                    :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_fract_f16_dpp                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_fract_f16_sdwa               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_fract_f32                    :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_fract_f32_dpp                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_fract_f32_sdwa               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_fract_f64                    :ref:`vdst<amdgpu_synid8_vdst64_0>`,     :ref:`src<amdgpu_synid8_src64_0>`
+    v_frexp_exp_i16_f16            :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_frexp_exp_i16_f16_dpp        :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_frexp_exp_i16_f16_sdwa       :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_frexp_exp_i32_f32            :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_frexp_exp_i32_f32_dpp        :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_frexp_exp_i32_f32_sdwa       :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_frexp_exp_i32_f64            :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src64_0>`
+    v_frexp_mant_f16               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_frexp_mant_f16_dpp           :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_frexp_mant_f16_sdwa          :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_frexp_mant_f32               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_frexp_mant_f32_dpp           :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_frexp_mant_f32_sdwa          :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_frexp_mant_f64               :ref:`vdst<amdgpu_synid8_vdst64_0>`,     :ref:`src<amdgpu_synid8_src64_0>`
+    v_log_f16                      :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_log_f16_dpp                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_log_f16_sdwa                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_log_f32                      :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_log_f32_dpp                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_log_f32_sdwa                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_log_legacy_f32               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_log_legacy_f32_dpp           :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_log_legacy_f32_sdwa          :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_mov_b32                      :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_mov_b32_dpp                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mov_b32_sdwa                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_mov_fed_b32                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_mov_fed_b32_dpp              :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mov_fed_b32_sdwa             :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_movreld_b32                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_movrels_b32                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`
+    v_movrelsd_b32                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`
+    v_nop
+    v_not_b32                      :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_not_b32_dpp                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_not_b32_sdwa                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`         :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_rcp_f16                      :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_rcp_f16_dpp                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_rcp_f16_sdwa                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_rcp_f32                      :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_rcp_f32_dpp                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_rcp_f32_sdwa                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_rcp_f64                      :ref:`vdst<amdgpu_synid8_vdst64_0>`,     :ref:`src<amdgpu_synid8_src64_0>`
+    v_rcp_iflag_f32                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_rcp_iflag_f32_dpp            :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_rcp_iflag_f32_sdwa           :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_readfirstlane_b32            :ref:`sdst<amdgpu_synid8_sdst32_2>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`
+    v_rndne_f16                    :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_rndne_f16_dpp                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_rndne_f16_sdwa               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_rndne_f32                    :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_rndne_f32_dpp                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_rndne_f32_sdwa               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_rndne_f64                    :ref:`vdst<amdgpu_synid8_vdst64_0>`,     :ref:`src<amdgpu_synid8_src64_0>`
+    v_rsq_f16                      :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_rsq_f16_dpp                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_rsq_f16_sdwa                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_rsq_f32                      :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_rsq_f32_dpp                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_rsq_f32_sdwa                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_rsq_f64                      :ref:`vdst<amdgpu_synid8_vdst64_0>`,     :ref:`src<amdgpu_synid8_src64_0>`
+    v_sin_f16                      :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_sin_f16_dpp                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sin_f16_sdwa                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_sin_f32                      :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_sin_f32_dpp                  :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sin_f32_sdwa                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_sqrt_f16                     :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_sqrt_f16_dpp                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sqrt_f16_sdwa                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_sqrt_f32                     :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_sqrt_f32_dpp                 :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sqrt_f32_sdwa                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_sqrt_f64                     :ref:`vdst<amdgpu_synid8_vdst64_0>`,     :ref:`src<amdgpu_synid8_src64_0>`
+    v_trunc_f16                    :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_trunc_f16_dpp                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_trunc_f16_sdwa               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_trunc_f32                    :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`src<amdgpu_synid8_src32_0>`
+    v_trunc_f32_dpp                :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_trunc_f32_sdwa               :ref:`vdst<amdgpu_synid8_vdst32_0>`,     :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_trunc_f64                    :ref:`vdst<amdgpu_synid8_vdst64_0>`,     :ref:`src<amdgpu_synid8_src64_0>`
+
+VOP2
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**           **DST0**  **DST1** **SRC0**         **SRC1**        **SRC2**  **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_add_f16             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_add_f16_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_add_f16_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_add_f32             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_add_f32_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_add_f32_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_add_u16             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_add_u16_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_add_u16_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_add_u32             :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_add_u32_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_add_u32_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_addc_u32            :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`,      :ref:`vcc<amdgpu_synid8_vcc_64>`
+    v_addc_u32_dpp        :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`,      :ref:`vcc<amdgpu_synid8_vcc_64>`   :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_addc_u32_sdwa       :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,    :ref:`vcc<amdgpu_synid8_vcc_64>`   :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_and_b32             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_and_b32_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_and_b32_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_ashrrev_i16         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`::ref:`u16<amdgpu_synid8_type_dev>`,    :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_ashrrev_i16_dpp     :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`u16<amdgpu_synid8_type_dev>`,   :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ashrrev_i16_sdwa    :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`::ref:`u16<amdgpu_synid8_type_dev>`, :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_ashrrev_i32         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`::ref:`u32<amdgpu_synid8_type_dev>`,    :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_ashrrev_i32_dpp     :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`u32<amdgpu_synid8_type_dev>`,   :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ashrrev_i32_sdwa    :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`::ref:`u32<amdgpu_synid8_type_dev>`, :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cndmask_b32         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`,      :ref:`vcc<amdgpu_synid8_vcc_64>`
+    v_cndmask_b32_dpp     :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`,      :ref:`vcc<amdgpu_synid8_vcc_64>`   :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cndmask_b32_sdwa    :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,    :ref:`vcc<amdgpu_synid8_vcc_64>`   :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_ldexp_f16           :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`i16<amdgpu_synid8_type_dev>`
+    v_ldexp_f16_dpp       :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`i16<amdgpu_synid8_type_dev>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ldexp_f16_sdwa      :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`::ref:`i16<amdgpu_synid8_type_dev>`       :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_lshlrev_b16         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`::ref:`u16<amdgpu_synid8_type_dev>`,    :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_lshlrev_b16_dpp     :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`u16<amdgpu_synid8_type_dev>`,   :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_lshlrev_b16_sdwa    :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`::ref:`u16<amdgpu_synid8_type_dev>`, :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_lshlrev_b32         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`::ref:`u32<amdgpu_synid8_type_dev>`,    :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_lshlrev_b32_dpp     :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`u32<amdgpu_synid8_type_dev>`,   :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_lshlrev_b32_sdwa    :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`::ref:`u32<amdgpu_synid8_type_dev>`, :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_lshrrev_b16         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`::ref:`u16<amdgpu_synid8_type_dev>`,    :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_lshrrev_b16_dpp     :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`u16<amdgpu_synid8_type_dev>`,   :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_lshrrev_b16_sdwa    :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`::ref:`u16<amdgpu_synid8_type_dev>`, :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_lshrrev_b32         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`::ref:`u32<amdgpu_synid8_type_dev>`,    :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_lshrrev_b32_dpp     :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`u32<amdgpu_synid8_type_dev>`,   :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_lshrrev_b32_sdwa    :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`::ref:`u32<amdgpu_synid8_type_dev>`, :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mac_f16             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_mac_f16_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mac_f16_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mac_f32             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_mac_f32_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mac_f32_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_madak_f16           :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`,      :ref:`imm32<amdgpu_synid8_fimm16>`
+    v_madak_f32           :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`,      :ref:`imm32<amdgpu_synid8_fimm32>`
+    v_madmk_f16           :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`imm32<amdgpu_synid8_fimm16>`,      :ref:`vsrc2<amdgpu_synid8_vsrc32_0>`
+    v_madmk_f32           :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`imm32<amdgpu_synid8_fimm32>`,      :ref:`vsrc2<amdgpu_synid8_vsrc32_0>`
+    v_max_f16             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_max_f16_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_max_f16_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_max_f32             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_max_f32_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_max_f32_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_max_i16             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_max_i16_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_max_i16_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_max_i32             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_max_i32_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_max_i32_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_max_u16             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_max_u16_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_max_u16_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_max_u32             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_max_u32_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_max_u32_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_min_f16             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_min_f16_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_min_f16_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_min_f32             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_min_f32_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_min_f32_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_min_i16             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_min_i16_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_min_i16_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_min_i32             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_min_i32_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_min_i32_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_min_u16             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_min_u16_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_min_u16_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_min_u32             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_min_u32_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_min_u32_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_f16             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_mul_f16_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_f16_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_f32             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_mul_f32_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_f32_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_hi_i32_i24      :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_mul_hi_i32_i24_dpp  :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_hi_i32_i24_sdwa :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_hi_u32_u24      :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_mul_hi_u32_u24_dpp  :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_hi_u32_u24_sdwa :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_i32_i24         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_mul_i32_i24_dpp     :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_i32_i24_sdwa    :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_legacy_f32      :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_mul_legacy_f32_dpp  :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_legacy_f32_sdwa :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_lo_u16          :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_mul_lo_u16_dpp      :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_lo_u16_sdwa     :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_u32_u24         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_mul_u32_u24_dpp     :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_u32_u24_sdwa    :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_or_b32              :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_or_b32_dpp          :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_or_b32_sdwa         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_sub_f16             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_sub_f16_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sub_f16_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_sub_f32             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_sub_f32_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sub_f32_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_sub_u16             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_sub_u16_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sub_u16_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_sub_u32             :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_sub_u32_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sub_u32_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_subb_u32            :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`,      :ref:`vcc<amdgpu_synid8_vcc_64>`
+    v_subb_u32_dpp        :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`,      :ref:`vcc<amdgpu_synid8_vcc_64>`   :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_subb_u32_sdwa       :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,    :ref:`vcc<amdgpu_synid8_vcc_64>`   :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_subbrev_u32         :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`,      :ref:`vcc<amdgpu_synid8_vcc_64>`
+    v_subbrev_u32_dpp     :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`,      :ref:`vcc<amdgpu_synid8_vcc_64>`   :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_subbrev_u32_sdwa    :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,    :ref:`vcc<amdgpu_synid8_vcc_64>`   :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_subrev_f16          :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_subrev_f16_dpp      :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_subrev_f16_sdwa     :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_subrev_f32          :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_subrev_f32_dpp      :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_subrev_f32_sdwa     :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_subrev_u16          :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_subrev_u16_dpp      :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_subrev_u16_sdwa     :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_subrev_u32          :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_subrev_u32_dpp      :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_subrev_u32_sdwa     :ref:`vdst<amdgpu_synid8_vdst32_0>`, :ref:`vcc<amdgpu_synid8_vcc_64>`, :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_xor_b32             :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`src0<amdgpu_synid8_src32_0>`,        :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_xor_b32_dpp         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_xor_b32_sdwa        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+
+VOP3
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**             **DST0**       **DST1**     **SRC0**         **SRC1**        **SRC2**            **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_add_f16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_add_f32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_add_f64               :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_add_u16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_add_u32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`sdst<amdgpu_synid8_sdst64_0>`,    :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_addc_u32_e64          :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`sdst<amdgpu_synid8_sdst64_0>`,    :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`ssrc2<amdgpu_synid8_ssrc64_1>`
+    v_alignbit_b32          :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`
+    v_alignbyte_b32         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`
+    v_and_b32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_ashrrev_i16_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`u16<amdgpu_synid8_type_dev>`,    :ref:`src1<amdgpu_synid8_src32_1>`
+    v_ashrrev_i32_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`,    :ref:`src1<amdgpu_synid8_src32_1>`
+    v_ashrrev_i64           :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`,    :ref:`src1<amdgpu_synid8_src64_1>`
+    v_bcnt_u32_b32          :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_bfe_i32               :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`,   :ref:`src2<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`
+    v_bfe_u32               :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`
+    v_bfi_b32               :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`
+    v_bfm_b32               :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_bfrev_b32_e64         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`
+    v_ceil_f16_e64          :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_ceil_f32_e64          :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_ceil_f64_e64          :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_clrexcp_e64
+    v_cmp_class_f16_e64     :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_cmp_class_f32_e64     :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_cmp_class_f64_e64     :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_cmp_eq_f16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_eq_f32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_eq_f64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_eq_i16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_eq_i32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_eq_i64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_eq_u16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_eq_u32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_eq_u64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_f_f16_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_f_f32_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_f_f64_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_f_i16_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_f_i32_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_f_i64_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_f_u16_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_f_u32_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_f_u64_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_ge_f16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_ge_f32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_ge_f64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_ge_i16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_ge_i32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_ge_i64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_ge_u16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_ge_u32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_ge_u64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_gt_f16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_gt_f32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_gt_f64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_gt_i16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_gt_i32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_gt_i64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_gt_u16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_gt_u32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_gt_u64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_le_f16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_le_f32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_le_f64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_le_i16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_le_i32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_le_i64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_le_u16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_le_u32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_le_u64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_lg_f16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_lg_f32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_lg_f64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_lt_f16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_lt_f32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_lt_f64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_lt_i16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_lt_i32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_lt_i64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_lt_u16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_lt_u32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_lt_u64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_ne_i16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_ne_i32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_ne_i64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_ne_u16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_ne_u32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_ne_u64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_neq_f16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_neq_f32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_neq_f64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nge_f16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nge_f32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nge_f64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_ngt_f16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_ngt_f32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_ngt_f64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nle_f16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nle_f32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nle_f64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nlg_f16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nlg_f32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nlg_f64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nlt_f16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nlt_f32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nlt_f64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_o_f16_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_o_f32_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_o_f64_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_t_i16_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_t_i32_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_t_i64_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_t_u16_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_t_u32_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmp_t_u64_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmp_tru_f16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_tru_f32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_tru_f64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_u_f16_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_u_f32_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_u_f64_e64         :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_class_f16_e64    :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_cmpx_class_f32_e64    :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_cmpx_class_f64_e64    :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_cmpx_eq_f16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_eq_f32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_eq_f64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_eq_i16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_eq_i32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_eq_i64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_eq_u16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_eq_u32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_eq_u64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_f_f16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_f_f32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_f_f64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_f_i16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_f_i32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_f_i64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_f_u16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_f_u32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_f_u64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_ge_f16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_ge_f32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_ge_f64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_ge_i16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_ge_i32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_ge_i64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_ge_u16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_ge_u32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_ge_u64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_gt_f16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_gt_f32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_gt_f64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_gt_i16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_gt_i32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_gt_i64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_gt_u16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_gt_u32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_gt_u64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_le_f16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_le_f32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_le_f64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_le_i16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_le_i32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_le_i64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_le_u16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_le_u32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_le_u64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_lg_f16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_lg_f32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_lg_f64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_lt_f16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_lt_f32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_lt_f64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_lt_i16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_lt_i32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_lt_i64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_lt_u16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_lt_u32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_lt_u64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_ne_i16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_ne_i32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_ne_i64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_ne_u16_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_ne_u32_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_ne_u64_e64       :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_neq_f16_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_neq_f32_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_neq_f64_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nge_f16_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nge_f32_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nge_f64_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_ngt_f16_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_ngt_f32_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_ngt_f64_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nle_f16_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nle_f32_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nle_f64_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nlg_f16_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nlg_f32_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nlg_f64_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nlt_f16_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nlt_f32_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nlt_f64_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_o_f16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_o_f32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_o_f64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_t_i16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_t_i32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_t_i64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_t_u16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_t_u32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cmpx_t_u64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`
+    v_cmpx_tru_f16_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_tru_f32_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_tru_f64_e64      :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_u_f16_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_u_f32_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_u_f64_e64        :ref:`sdst<amdgpu_synid8_sdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cndmask_b32_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`ssrc2<amdgpu_synid8_ssrc64_1>`
+    v_cos_f16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_cos_f32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cubeid_f32            :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cubema_f32            :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cubesc_f32            :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cubetc_f32            :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f16_f32_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f16_i16_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cvt_f16_u16_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cvt_f32_f16_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_f64_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_i32_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_u32_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_ubyte0_e64    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_ubyte1_e64    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_ubyte2_e64    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_ubyte3_e64    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f64_f32_e64       :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f64_i32_e64       :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src<amdgpu_synid8_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f64_u32_e64       :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src<amdgpu_synid8_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_flr_i32_f32_e64   :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`
+    v_cvt_i16_f16_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`
+    v_cvt_i32_f32_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`
+    v_cvt_i32_f64_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`
+    v_cvt_off_f32_i4_e64    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_pk_i16_i32        :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cvt_pk_u16_u32        :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_cvt_pk_u8_f32         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`,   :ref:`src2<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`
+    v_cvt_pkaccum_u8_f32    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`
+    v_cvt_pknorm_i16_f32    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`
+    v_cvt_pknorm_u16_f32    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`
+    v_cvt_pkrtz_f16_f32     :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`
+    v_cvt_rpi_i32_f32_e64   :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`
+    v_cvt_u16_f16_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`
+    v_cvt_u32_f32_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`
+    v_cvt_u32_f64_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`
+    v_div_fixup_f16         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>`
+    v_div_fixup_f32         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_div_fixup_f64         :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_div_fmas_f32          :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_div_fmas_f64          :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_div_scale_f32         :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`vcc<amdgpu_synid8_vcc_64>`,     :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`
+    v_div_scale_f64         :ref:`vdst<amdgpu_synid8_vdst64_0>`,      :ref:`vcc<amdgpu_synid8_vcc_64>`,     :ref:`src0<amdgpu_synid8_src64_1>`,        :ref:`src1<amdgpu_synid8_src64_1>`,       :ref:`src2<amdgpu_synid8_src64_1>`
+    v_exp_f16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_exp_f32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_exp_legacy_f32_e64    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_ffbh_i32_e64          :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`
+    v_ffbh_u32_e64          :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`
+    v_ffbl_b32_e64          :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`
+    v_floor_f16_e64         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_floor_f32_e64         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_floor_f64_e64         :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_fma_f16               :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>`
+    v_fma_f32               :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_fma_f64               :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_fract_f16_e64         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_fract_f32_e64         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_fract_f64_e64         :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_frexp_exp_i16_f16_e64 :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`
+    v_frexp_exp_i32_f32_e64 :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`
+    v_frexp_exp_i32_f64_e64 :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`
+    v_frexp_mant_f16_e64    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_frexp_mant_f32_e64    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_frexp_mant_f64_e64    :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_interp_mov_f32_e64    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`param<amdgpu_synid8_param>`::ref:`b32<amdgpu_synid8_type_dev>`,   :ref:`attr<amdgpu_synid8_attr>`::ref:`b32<amdgpu_synid8_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_interp_p1_f32_e64     :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`attr<amdgpu_synid8_attr>`::ref:`b32<amdgpu_synid8_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_interp_p1ll_f16       :ref:`vdst<amdgpu_synid8_vdst32_0>`::ref:`f32<amdgpu_synid8_type_dev>`,           :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`::ref:`f32<amdgpu_synid8_type_dev>`,  :ref:`attr<amdgpu_synid8_attr>`::ref:`b32<amdgpu_synid8_type_dev>`                    :ref:`high<amdgpu_synid_high>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_interp_p1lv_f16       :ref:`vdst<amdgpu_synid8_vdst32_0>`::ref:`f32<amdgpu_synid8_type_dev>`,           :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`::ref:`f32<amdgpu_synid8_type_dev>`, :ref:`attr<amdgpu_synid8_attr>`::ref:`b32<amdgpu_synid8_type_dev>`,   :ref:`vsrc2<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`::ref:`f16x2<amdgpu_synid8_type_dev>`   :ref:`high<amdgpu_synid_high>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_interp_p2_f16         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`::ref:`f32<amdgpu_synid8_type_dev>`, :ref:`attr<amdgpu_synid8_attr>`::ref:`b32<amdgpu_synid8_type_dev>`,   :ref:`vsrc2<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`::ref:`f32<amdgpu_synid8_type_dev>`     :ref:`high<amdgpu_synid_high>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_interp_p2_f32_e64     :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`vsrc<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`attr<amdgpu_synid8_attr>`::ref:`b32<amdgpu_synid8_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_ldexp_f16_e64         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`i16<amdgpu_synid8_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_ldexp_f32             :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`i32<amdgpu_synid8_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_ldexp_f64             :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`i32<amdgpu_synid8_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_lerp_u8               :ref:`vdst<amdgpu_synid8_vdst32_0>`::ref:`u32<amdgpu_synid8_type_dev>`,           :ref:`src0<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`,    :ref:`src1<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`,   :ref:`src2<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_log_f16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_log_f32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_log_legacy_f32_e64    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_lshlrev_b16_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`u16<amdgpu_synid8_type_dev>`,    :ref:`src1<amdgpu_synid8_src32_1>`
+    v_lshlrev_b32_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`,    :ref:`src1<amdgpu_synid8_src32_1>`
+    v_lshlrev_b64           :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`,    :ref:`src1<amdgpu_synid8_src64_1>`
+    v_lshrrev_b16_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`u16<amdgpu_synid8_type_dev>`,    :ref:`src1<amdgpu_synid8_src32_1>`
+    v_lshrrev_b32_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`,    :ref:`src1<amdgpu_synid8_src32_1>`
+    v_lshrrev_b64           :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`,    :ref:`src1<amdgpu_synid8_src64_1>`
+    v_mac_f16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_mac_f32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mad_f16               :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_f32               :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mad_i16               :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`            :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_i32_i24           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`::ref:`i32<amdgpu_synid8_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_i64_i32           :ref:`vdst<amdgpu_synid8_vdst64_0>`,      :ref:`sdst<amdgpu_synid8_sdst64_0>`,    :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src64_1>`::ref:`i64<amdgpu_synid8_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_legacy_f32        :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mad_u16               :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`            :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_u32_u24           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_u64_u32           :ref:`vdst<amdgpu_synid8_vdst64_0>`,      :ref:`sdst<amdgpu_synid8_sdst64_0>`,    :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src64_1>`::ref:`u64<amdgpu_synid8_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_max3_f32              :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_max3_i32              :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`
+    v_max3_u32              :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`
+    v_max_f16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_max_f32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_max_f64               :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_max_i16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_max_i32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_max_u16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_max_u32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_mbcnt_hi_u32_b32      :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_mbcnt_lo_u32_b32      :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_med3_f32              :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_med3_i32              :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`
+    v_med3_u32              :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`
+    v_min3_f32              :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_min3_i32              :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`
+    v_min3_u32              :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`
+    v_min_f16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_min_f32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_min_f64               :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_min_i16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_min_i32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_min_u16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_min_u32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_mov_b32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`
+    v_mov_fed_b32_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`
+    v_movreld_b32_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`
+    v_movrels_b32_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`vsrc<amdgpu_synid8_vsrc32_0>`
+    v_movrelsd_b32_e64      :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`vsrc<amdgpu_synid8_vsrc32_0>`
+    v_mqsad_pk_u16_u8       :ref:`vdst<amdgpu_synid8_vdst64_0>`::ref:`b64<amdgpu_synid8_type_dev>`,           :ref:`src0<amdgpu_synid8_src64_1>`::ref:`b64<amdgpu_synid8_type_dev>`,    :ref:`src1<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`,   :ref:`src2<amdgpu_synid8_src64_1>`::ref:`b64<amdgpu_synid8_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_mqsad_u32_u8          :ref:`vdst<amdgpu_synid8_vdst128_0>`::ref:`b128<amdgpu_synid8_type_dev>`,          :ref:`src0<amdgpu_synid8_src64_1>`::ref:`b64<amdgpu_synid8_type_dev>`,    :ref:`src1<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`,   :ref:`vsrc2<amdgpu_synid8_vsrc128_0>`::ref:`b128<amdgpu_synid8_type_dev>`      :ref:`clamp<amdgpu_synid_clamp>`
+    v_msad_u8               :ref:`vdst<amdgpu_synid8_vdst32_0>`::ref:`u32<amdgpu_synid8_type_dev>`,           :ref:`src0<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`,    :ref:`src1<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`,   :ref:`src2<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_mul_f16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_mul_f32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mul_f64               :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mul_hi_i32            :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_mul_hi_i32_i24_e64    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_mul_hi_u32            :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_mul_hi_u32_u24_e64    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_mul_i32_i24_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_mul_legacy_f32_e64    :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mul_lo_u16_e64        :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_mul_lo_u32            :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_mul_u32_u24_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_nop_e64
+    v_not_b32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`
+    v_or_b32_e64            :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_perm_b32              :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`
+    v_qsad_pk_u16_u8        :ref:`vdst<amdgpu_synid8_vdst64_0>`::ref:`b64<amdgpu_synid8_type_dev>`,           :ref:`src0<amdgpu_synid8_src64_1>`::ref:`b64<amdgpu_synid8_type_dev>`,    :ref:`src1<amdgpu_synid8_src32_1>`::ref:`b32<amdgpu_synid8_type_dev>`,   :ref:`src2<amdgpu_synid8_src64_1>`::ref:`b64<amdgpu_synid8_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_rcp_f16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_rcp_f32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rcp_f64_e64           :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rcp_iflag_f32_e64     :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_readlane_b32          :ref:`sdst<amdgpu_synid8_sdst32_2>`,               :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`,       :ref:`ssrc1<amdgpu_synid8_ssrc32_3>`
+    v_rndne_f16_e64         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_rndne_f32_e64         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rndne_f64_e64         :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rsq_f16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_rsq_f32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rsq_f64_e64           :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sad_hi_u8             :ref:`vdst<amdgpu_synid8_vdst32_0>`::ref:`u32<amdgpu_synid8_type_dev>`,           :ref:`src0<amdgpu_synid8_src32_1>`::ref:`u8x4<amdgpu_synid8_type_dev>`,   :ref:`src1<amdgpu_synid8_src32_1>`::ref:`u8x4<amdgpu_synid8_type_dev>`,  :ref:`src2<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_sad_u16               :ref:`vdst<amdgpu_synid8_vdst32_0>`::ref:`u32<amdgpu_synid8_type_dev>`,           :ref:`src0<amdgpu_synid8_src32_1>`::ref:`u16x2<amdgpu_synid8_type_dev>`,  :ref:`src1<amdgpu_synid8_src32_1>`::ref:`u16x2<amdgpu_synid8_type_dev>`, :ref:`src2<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_sad_u32               :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`src2<amdgpu_synid8_src32_1>`            :ref:`clamp<amdgpu_synid_clamp>`
+    v_sad_u8                :ref:`vdst<amdgpu_synid8_vdst32_0>`::ref:`u32<amdgpu_synid8_type_dev>`,           :ref:`src0<amdgpu_synid8_src32_1>`::ref:`u8x4<amdgpu_synid8_type_dev>`,   :ref:`src1<amdgpu_synid8_src32_1>`::ref:`u8x4<amdgpu_synid8_type_dev>`,  :ref:`src2<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_sin_f16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_sin_f32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sqrt_f16_e64          :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_sqrt_f32_e64          :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sqrt_f64_e64          :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sub_f16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_sub_f32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sub_u16_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_sub_u32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`sdst<amdgpu_synid8_sdst64_0>`,    :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_subb_u32_e64          :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`sdst<amdgpu_synid8_sdst64_0>`,    :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`ssrc2<amdgpu_synid8_ssrc64_1>`
+    v_subbrev_u32_e64       :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`sdst<amdgpu_synid8_sdst64_0>`,    :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`,       :ref:`ssrc2<amdgpu_synid8_ssrc64_1>`
+    v_subrev_f16_e64        :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_subrev_f32_e64        :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_subrev_u16_e64        :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_subrev_u32_e64        :ref:`vdst<amdgpu_synid8_vdst32_0>`,      :ref:`sdst<amdgpu_synid8_sdst64_0>`,    :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+    v_trig_preop_f64        :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src0<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid8_src32_1>`::ref:`u32<amdgpu_synid8_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_trunc_f16_e64         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_trunc_f32_e64         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src<amdgpu_synid8_src32_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_trunc_f64_e64         :ref:`vdst<amdgpu_synid8_vdst64_0>`,               :ref:`src<amdgpu_synid8_src64_1>`::ref:`m<amdgpu_synid8_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_writelane_b32         :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`ssrc0<amdgpu_synid8_ssrc32_4>`,       :ref:`ssrc1<amdgpu_synid8_ssrc32_3>`
+    v_xor_b32_e64           :ref:`vdst<amdgpu_synid8_vdst32_0>`,               :ref:`src0<amdgpu_synid8_src32_1>`,        :ref:`src1<amdgpu_synid8_src32_1>`
+
+VOPC
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**      **SRC1**             **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_cmp_class_f16                :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_cmp_class_f16_sdwa           :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`::ref:`b32<amdgpu_synid8_type_dev>`      :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_class_f32                :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_cmp_class_f32_sdwa           :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`::ref:`b32<amdgpu_synid8_type_dev>`      :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_class_f64                :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_cmp_eq_f16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_eq_f16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_eq_f32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_eq_f32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_eq_f64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_eq_i16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_eq_i16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_eq_i32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_eq_i32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_eq_i64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_eq_u16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_eq_u16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_eq_u32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_eq_u32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_eq_u64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_f_f16                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_f_f16_sdwa               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_f_f32                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_f_f32_sdwa               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_f_f64                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_f_i16                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_f_i16_sdwa               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_f_i32                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_f_i32_sdwa               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_f_i64                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_f_u16                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_f_u16_sdwa               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_f_u32                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_f_u32_sdwa               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_f_u64                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_ge_f16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_ge_f16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ge_f32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_ge_f32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ge_f64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_ge_i16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_ge_i16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ge_i32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_ge_i32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ge_i64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_ge_u16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_ge_u16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ge_u32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_ge_u32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ge_u64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_gt_f16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_gt_f16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_gt_f32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_gt_f32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_gt_f64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_gt_i16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_gt_i16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_gt_i32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_gt_i32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_gt_i64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_gt_u16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_gt_u16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_gt_u32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_gt_u32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_gt_u64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_le_f16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_le_f16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_le_f32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_le_f32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_le_f64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_le_i16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_le_i16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_le_i32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_le_i32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_le_i64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_le_u16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_le_u16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_le_u32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_le_u32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_le_u64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_lg_f16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_lg_f16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lg_f32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_lg_f32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lg_f64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_lt_f16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_lt_f16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lt_f32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_lt_f32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lt_f64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_lt_i16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_lt_i16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lt_i32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_lt_i32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lt_i64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_lt_u16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_lt_u16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lt_u32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_lt_u32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lt_u64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_ne_i16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_ne_i16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ne_i32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_ne_i32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ne_i64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_ne_u16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_ne_u16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ne_u32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_ne_u32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ne_u64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_neq_f16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_neq_f16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_neq_f32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_neq_f32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_neq_f64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_nge_f16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_nge_f16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nge_f32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_nge_f32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nge_f64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_ngt_f16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_ngt_f16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ngt_f32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_ngt_f32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ngt_f64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_nle_f16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_nle_f16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nle_f32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_nle_f32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nle_f64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_nlg_f16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_nlg_f16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nlg_f32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_nlg_f32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nlg_f64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_nlt_f16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_nlt_f16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nlt_f32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_nlt_f32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nlt_f64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_o_f16                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_o_f16_sdwa               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_o_f32                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_o_f32_sdwa               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_o_f64                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_t_i16                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_t_i16_sdwa               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_t_i32                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_t_i32_sdwa               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_t_i64                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_t_u16                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_t_u16_sdwa               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_t_u32                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_t_u32_sdwa               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_t_u64                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_tru_f16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_tru_f16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_tru_f32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_tru_f32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_tru_f64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmp_u_f16                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_u_f16_sdwa               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_u_f32                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmp_u_f32_sdwa               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_u_f64                    :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_class_f16               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_cmpx_class_f16_sdwa          :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`::ref:`b32<amdgpu_synid8_type_dev>`      :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_class_f32               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_cmpx_class_f32_sdwa          :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`::ref:`b32<amdgpu_synid8_type_dev>`      :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_class_f64               :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`b32<amdgpu_synid8_type_dev>`
+    v_cmpx_eq_f16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_eq_f16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_eq_f32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_eq_f32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_eq_f64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_eq_i16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_eq_i16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_eq_i32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_eq_i32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_eq_i64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_eq_u16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_eq_u16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_eq_u32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_eq_u32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_eq_u64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_f_f16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_f_f16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_f_f32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_f_f32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_f_f64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_f_i16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_f_i16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_f_i32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_f_i32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_f_i64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_f_u16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_f_u16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_f_u32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_f_u32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_f_u64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_ge_f16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_ge_f16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ge_f32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_ge_f32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ge_f64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_ge_i16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_ge_i16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ge_i32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_ge_i32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ge_i64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_ge_u16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_ge_u16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ge_u32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_ge_u32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ge_u64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_gt_f16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_gt_f16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_gt_f32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_gt_f32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_gt_f64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_gt_i16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_gt_i16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_gt_i32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_gt_i32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_gt_i64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_gt_u16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_gt_u16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_gt_u32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_gt_u32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_gt_u64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_le_f16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_le_f16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_le_f32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_le_f32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_le_f64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_le_i16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_le_i16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_le_i32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_le_i32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_le_i64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_le_u16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_le_u16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_le_u32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_le_u32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_le_u64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_lg_f16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_lg_f16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lg_f32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_lg_f32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lg_f64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_lt_f16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_lt_f16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lt_f32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_lt_f32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lt_f64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_lt_i16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_lt_i16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lt_i32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_lt_i32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lt_i64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_lt_u16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_lt_u16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lt_u32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_lt_u32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lt_u64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_ne_i16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_ne_i16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ne_i32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_ne_i32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ne_i64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_ne_u16                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_ne_u16_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ne_u32                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_ne_u32_sdwa             :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ne_u64                  :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_neq_f16                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_neq_f16_sdwa            :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_neq_f32                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_neq_f32_sdwa            :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_neq_f64                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_nge_f16                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_nge_f16_sdwa            :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nge_f32                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_nge_f32_sdwa            :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nge_f64                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_ngt_f16                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_ngt_f16_sdwa            :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ngt_f32                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_ngt_f32_sdwa            :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ngt_f64                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_nle_f16                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_nle_f16_sdwa            :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nle_f32                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_nle_f32_sdwa            :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nle_f64                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_nlg_f16                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_nlg_f16_sdwa            :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nlg_f32                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_nlg_f32_sdwa            :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nlg_f64                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_nlt_f16                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_nlt_f16_sdwa            :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nlt_f32                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_nlt_f32_sdwa            :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nlt_f64                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_o_f16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_o_f16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_o_f32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_o_f32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_o_f64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_t_i16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_t_i16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_t_i32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_t_i32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_t_i64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_t_u16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_t_u16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_t_u32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_t_u32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_t_u64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_tru_f16                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_tru_f16_sdwa            :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_tru_f32                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_tru_f32_sdwa            :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_tru_f64                 :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+    v_cmpx_u_f16                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_u_f16_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_u_f32                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src32_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`
+    v_cmpx_u_f32_sdwa              :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`vsrc0<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`,  :ref:`vsrc1<amdgpu_synid8_vsrc32_0>`::ref:`m<amdgpu_synid8_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_u_f64                   :ref:`vcc<amdgpu_synid8_vcc_64>`,      :ref:`src0<amdgpu_synid8_src64_0>`,     :ref:`vsrc1<amdgpu_synid8_vsrc64_0>`
+
+.. |---| unicode:: U+02014 .. em dash
+
+
+.. toctree::
+    :hidden:
+
+    gfx8_attr
+    gfx8_bimm16
+    gfx8_bimm32
+    gfx8_fimm16
+    gfx8_fimm32
+    gfx8_hwreg
+    gfx8_imm4
+    gfx8_label
+    gfx8_msg
+    gfx8_param
+    gfx8_perm_smem
+    gfx8_simm16
+    gfx8_tgt
+    gfx8_uimm16
+    gfx8_waitcnt
+    gfx8_addr_buf
+    gfx8_addr_ds
+    gfx8_addr_flat
+    gfx8_addr_mimg
+    gfx8_base_smem_addr
+    gfx8_base_smem_buf
+    gfx8_data_buf_atomic128
+    gfx8_data_buf_atomic32
+    gfx8_data_buf_atomic64
+    gfx8_data_buf_d16_128
+    gfx8_data_buf_d16_32
+    gfx8_data_buf_d16_64
+    gfx8_data_buf_d16_96
+    gfx8_data_mimg_atomic_cmp
+    gfx8_data_mimg_atomic_reg
+    gfx8_data_mimg_store
+    gfx8_data_mimg_store_d16
+    gfx8_dst_buf_128
+    gfx8_dst_buf_64
+    gfx8_dst_buf_96
+    gfx8_dst_buf_d16_128
+    gfx8_dst_buf_d16_32
+    gfx8_dst_buf_d16_64
+    gfx8_dst_buf_d16_96
+    gfx8_dst_buf_lds
+    gfx8_dst_flat_atomic32
+    gfx8_dst_flat_atomic64
+    gfx8_dst_mimg_gather4
+    gfx8_dst_mimg_regular
+    gfx8_dst_mimg_regular_d16
+    gfx8_offset_buf
+    gfx8_offset_smem_load
+    gfx8_offset_smem_store
+    gfx8_rsrc_buf
+    gfx8_rsrc_mimg
+    gfx8_samp_mimg
+    gfx8_sdata128_0
+    gfx8_sdata32_0
+    gfx8_sdata64_0
+    gfx8_sdst128_0
+    gfx8_sdst256_0
+    gfx8_sdst32_0
+    gfx8_sdst32_1
+    gfx8_sdst32_2
+    gfx8_sdst512_0
+    gfx8_sdst64_0
+    gfx8_sdst64_1
+    gfx8_src32_0
+    gfx8_src32_1
+    gfx8_src64_0
+    gfx8_src64_1
+    gfx8_src_exp
+    gfx8_ssrc32_0
+    gfx8_ssrc32_1
+    gfx8_ssrc32_2
+    gfx8_ssrc32_3
+    gfx8_ssrc32_4
+    gfx8_ssrc64_0
+    gfx8_ssrc64_1
+    gfx8_ssrc64_2
+    gfx8_ssrc64_3
+    gfx8_vcc_64
+    gfx8_vdata128_0
+    gfx8_vdata32_0
+    gfx8_vdata64_0
+    gfx8_vdata96_0
+    gfx8_vdst128_0
+    gfx8_vdst32_0
+    gfx8_vdst64_0
+    gfx8_vdst96_0
+    gfx8_vsrc128_0
+    gfx8_vsrc32_0
+    gfx8_vsrc64_0
+    gfx8_mod_dpp_sdwa_abs_neg
+    gfx8_mod_sdwa_sext
+    gfx8_mod_vop3_abs_neg
+    gfx8_opt
+    gfx8_ret
+    gfx8_type_dev
diff --git a/docs/AMDGPU/AMDGPUAsmGFX9.rst b/docs/AMDGPU/AMDGPUAsmGFX9.rst
new file mode 100644
index 0000000..9dd3b9d
--- /dev/null
+++ b/docs/AMDGPU/AMDGPUAsmGFX9.rst
@@ -0,0 +1,2102 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+============================
+Syntax of GFX9 Instructions
+============================
+
+.. contents::
+  :local:
+
+Notation
+========
+
+Notation used in this document is explained :ref:`here<amdgpu_syn_instruction_notation>`.
+
+Introduction
+============
+
+An overview of generic syntax and other features of AMDGPU instructions may be found :ref:`in this document<amdgpu_syn_instructions>`.
+
+Instructions
+============
+
+
+DS
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**         **SRC0**      **SRC1**      **SRC2**           **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    ds_add_f32                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_rtn_f32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_rtn_u32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_rtn_u64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_src2_f32                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_src2_u32                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_src2_u64                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_u32                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_add_u64                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_b32                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_b64                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_rtn_b32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_rtn_b64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_src2_b32                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_and_src2_b64                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_append                      :ref:`vdst<amdgpu_synid9_vdst32_0>`                                           :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_bpermute_b32                :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>`
+    ds_cmpst_b32                               :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata32_0>`,   :ref:`vdata1<amdgpu_synid9_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_b64                               :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata64_0>`,   :ref:`vdata1<amdgpu_synid9_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_f32                               :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata32_0>`,   :ref:`vdata1<amdgpu_synid9_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_f64                               :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata64_0>`,   :ref:`vdata1<amdgpu_synid9_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_rtn_b32               :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata32_0>`,   :ref:`vdata1<amdgpu_synid9_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_rtn_b64               :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata64_0>`,   :ref:`vdata1<amdgpu_synid9_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_rtn_f32               :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata32_0>`,   :ref:`vdata1<amdgpu_synid9_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_cmpst_rtn_f64               :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata64_0>`,   :ref:`vdata1<amdgpu_synid9_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_condxchg32_rtn_b64          :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_consume                     :ref:`vdst<amdgpu_synid9_vdst32_0>`                                           :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_rtn_u32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_rtn_u64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_src2_u32                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_src2_u64                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_u32                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_dec_u64                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_barrier                             :ref:`vdata<amdgpu_synid9_vdata32_0>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_init                                :ref:`vdata<amdgpu_synid9_vdata32_0>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_sema_br                             :ref:`vdata<amdgpu_synid9_vdata32_0>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_sema_p                                                                 :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_sema_release_all                                                       :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_gws_sema_v                                                                 :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_rtn_u32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_rtn_u64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_src2_u32                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_src2_u64                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_u32                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_inc_u64                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_f32                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_f64                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_i32                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_i64                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_f32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_f64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_i32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_i64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_u32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_rtn_u64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_f32                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_f64                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_i32                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_i64                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_u32                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_src2_u64                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_u32                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_max_u64                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_f32                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_f64                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_i32                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_i64                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_f32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_f64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_i32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_i64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_u32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_rtn_u64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_f32                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_f64                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_i32                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_i64                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_u32                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_src2_u64                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_u32                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_min_u64                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_mskor_b32                               :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata32_0>`,   :ref:`vdata1<amdgpu_synid9_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_mskor_b64                               :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata64_0>`,   :ref:`vdata1<amdgpu_synid9_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_mskor_rtn_b32               :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata32_0>`,   :ref:`vdata1<amdgpu_synid9_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_mskor_rtn_b64               :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata64_0>`,   :ref:`vdata1<amdgpu_synid9_vdata64_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_nop
+    ds_or_b32                                  :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_b64                                  :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_rtn_b32                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_rtn_b64                  :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_src2_b32                             :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_or_src2_b64                             :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_ordered_count               :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_permute_b32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>`
+    ds_read2_b32                   :ref:`vdst<amdgpu_synid9_vdst64_0>`::ref:`b32x2<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read2_b64                   :ref:`vdst<amdgpu_synid9_vdst128_0>`::ref:`b64x2<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read2st64_b32               :ref:`vdst<amdgpu_synid9_vdst64_0>`::ref:`b32x2<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read2st64_b64               :ref:`vdst<amdgpu_synid9_vdst128_0>`::ref:`b64x2<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_b128                   :ref:`vdst<amdgpu_synid9_vdst128_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_b32                    :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_b64                    :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_b96                    :ref:`vdst<amdgpu_synid9_vdst96_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_i16                    :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_i8                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_i8_d16                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_i8_d16_hi              :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_u16                    :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_u16_d16                :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_u16_d16_hi             :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_u8                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_u8_d16                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_read_u8_d16_hi              :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_rtn_u32                :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_rtn_u64                :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_src2_u32                           :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_src2_u64                           :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_u32                                :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_rsub_u64                                :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_rtn_u32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_rtn_u64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_src2_u32                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_src2_u64                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_u32                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_sub_u64                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_swizzle_b32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`pattern<amdgpu_synid_sw_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrap_rtn_b32                :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata32_0>`,   :ref:`vdata1<amdgpu_synid9_vdata32_0>`         :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write2_b32                              :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata32_0>`,   :ref:`vdata1<amdgpu_synid9_vdata32_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write2_b64                              :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata64_0>`,   :ref:`vdata1<amdgpu_synid9_vdata64_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write2st64_b32                          :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata32_0>`,   :ref:`vdata1<amdgpu_synid9_vdata32_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write2st64_b64                          :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata64_0>`,   :ref:`vdata1<amdgpu_synid9_vdata64_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b128                              :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata128_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b16                               :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b16_d16_hi                        :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b32                               :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b64                               :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b8                                :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b8_d16_hi                         :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_b96                               :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata96_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_src2_b32                          :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_write_src2_b64                          :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg2_rtn_b32             :ref:`vdst<amdgpu_synid9_vdst64_0>`::ref:`b32x2<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata32_0>`,   :ref:`vdata1<amdgpu_synid9_vdata32_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg2_rtn_b64             :ref:`vdst<amdgpu_synid9_vdst128_0>`::ref:`b64x2<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata64_0>`,   :ref:`vdata1<amdgpu_synid9_vdata64_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg2st64_rtn_b32         :ref:`vdst<amdgpu_synid9_vdst64_0>`::ref:`b32x2<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata32_0>`,   :ref:`vdata1<amdgpu_synid9_vdata32_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg2st64_rtn_b64         :ref:`vdst<amdgpu_synid9_vdst128_0>`::ref:`b64x2<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata0<amdgpu_synid9_vdata64_0>`,   :ref:`vdata1<amdgpu_synid9_vdata64_0>`         :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg_rtn_b32              :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_wrxchg_rtn_b64              :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_b32                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_b64                                 :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_rtn_b32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_rtn_b64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,       :ref:`vaddr<amdgpu_synid9_addr_ds>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                    :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_src2_b32                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+    ds_xor_src2_b64                            :ref:`vaddr<amdgpu_synid9_addr_ds>`                              :ref:`offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
+
+EXP
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**      **SRC1**      **SRC2**      **SRC3**           **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    exp                            :ref:`tgt<amdgpu_synid9_tgt>`,      :ref:`vsrc0<amdgpu_synid9_src_exp>`,    :ref:`vsrc1<amdgpu_synid9_src_exp>`,    :ref:`vsrc2<amdgpu_synid9_src_exp>`,    :ref:`vsrc3<amdgpu_synid9_src_exp>`          :ref:`done<amdgpu_synid_done>` :ref:`compr<amdgpu_synid_compr>` :ref:`vm<amdgpu_synid_vm>`
+
+FLAT
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**           **SRC0**      **SRC1**         **SRC2**           **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    flat_atomic_add                :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_add_x2             :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_and                :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_and_x2             :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_cmpswap            :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`::ref:`b32x2<amdgpu_synid9_type_dev>`                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_cmpswap_x2         :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata128_0>`::ref:`b64x2<amdgpu_synid9_type_dev>`                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_dec                :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u32<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`::ref:`u32<amdgpu_synid9_type_dev>`                   :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_dec_x2             :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u64<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`::ref:`u64<amdgpu_synid9_type_dev>`                   :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_inc                :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u32<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`::ref:`u32<amdgpu_synid9_type_dev>`                   :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_inc_x2             :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u64<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`::ref:`u64<amdgpu_synid9_type_dev>`                   :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_or                 :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_or_x2              :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_smax               :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`::ref:`s32<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`::ref:`s32<amdgpu_synid9_type_dev>`                   :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_smax_x2            :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`::ref:`s64<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`::ref:`s64<amdgpu_synid9_type_dev>`                   :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_smin               :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`::ref:`s32<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`::ref:`s32<amdgpu_synid9_type_dev>`                   :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_smin_x2            :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`::ref:`s64<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`::ref:`s64<amdgpu_synid9_type_dev>`                   :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_sub                :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_sub_x2             :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_swap               :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_swap_x2            :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_umax               :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u32<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`::ref:`u32<amdgpu_synid9_type_dev>`                   :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_umax_x2            :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u64<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`::ref:`u64<amdgpu_synid9_type_dev>`                   :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_umin               :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u32<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`::ref:`u32<amdgpu_synid9_type_dev>`                   :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_umin_x2            :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u64<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`::ref:`u64<amdgpu_synid9_type_dev>`                   :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_xor                :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_atomic_xor_x2             :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_dword                :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_addr_flat>`                                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_dwordx2              :ref:`vdst<amdgpu_synid9_vdst64_0>`,         :ref:`vaddr<amdgpu_synid9_addr_flat>`                                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_dwordx3              :ref:`vdst<amdgpu_synid9_vdst96_0>`,         :ref:`vaddr<amdgpu_synid9_addr_flat>`                                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_dwordx4              :ref:`vdst<amdgpu_synid9_vdst128_0>`,         :ref:`vaddr<amdgpu_synid9_addr_flat>`                                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_sbyte                :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_addr_flat>`                                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_sbyte_d16            :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_addr_flat>`                                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_sbyte_d16_hi         :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_addr_flat>`                                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_short_d16            :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_addr_flat>`                                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_short_d16_hi         :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_addr_flat>`                                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_sshort               :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_addr_flat>`                                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_ubyte                :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_addr_flat>`                                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_ubyte_d16            :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_addr_flat>`                                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_ubyte_d16_hi         :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_addr_flat>`                                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_load_ushort               :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_addr_flat>`                                 :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_byte                              :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_byte_d16_hi                       :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_dword                             :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_dwordx2                           :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_dwordx3                           :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata96_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_dwordx4                           :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata128_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_short                             :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    flat_store_short_d16_hi                      :ref:`vaddr<amdgpu_synid9_addr_flat>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`                       :ref:`offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_add              :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_add_x2           :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_and              :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_and_x2           :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_cmpswap          :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`::ref:`b32x2<amdgpu_synid9_type_dev>`, :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_cmpswap_x2       :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata128_0>`::ref:`b64x2<amdgpu_synid9_type_dev>`, :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_dec              :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u32<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_dec_x2           :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u64<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_inc              :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u32<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_inc_x2           :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u64<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_or               :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_or_x2            :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_smax             :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`::ref:`s32<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`::ref:`s32<amdgpu_synid9_type_dev>`,   :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_smax_x2          :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`::ref:`s64<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`::ref:`s64<amdgpu_synid9_type_dev>`,   :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_smin             :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`::ref:`s32<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`::ref:`s32<amdgpu_synid9_type_dev>`,   :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_smin_x2          :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`::ref:`s64<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`::ref:`s64<amdgpu_synid9_type_dev>`,   :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_sub              :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_sub_x2           :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_swap             :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_swap_x2          :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_umax             :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u32<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_umax_x2          :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u64<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_umin             :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u32<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_umin_x2          :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`::ref:`u64<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_xor              :ref:`vdst<amdgpu_synid9_dst_flat_atomic32>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_atomic_xor_x2           :ref:`vdst<amdgpu_synid9_dst_flat_atomic64>`::ref:`opt<amdgpu_synid9_opt>`,     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_load_dword              :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_global>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_load_dwordx2            :ref:`vdst<amdgpu_synid9_vdst64_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_global>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_load_dwordx3            :ref:`vdst<amdgpu_synid9_vdst96_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_global>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_load_dwordx4            :ref:`vdst<amdgpu_synid9_vdst128_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_global>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_load_sbyte              :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_global>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_load_sbyte_d16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_global>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_load_sbyte_d16_hi       :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_global>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_load_short_d16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_global>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_load_short_d16_hi       :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_global>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_load_sshort             :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_global>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_load_ubyte              :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_global>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_load_ubyte_d16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_global>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_load_ubyte_d16_hi       :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_global>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_load_ushort             :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_global>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_store_byte                            :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_store_byte_d16_hi                     :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_store_dword                           :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_store_dwordx2                         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_store_dwordx3                         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata96_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_store_dwordx4                         :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata128_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_store_short                           :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    global_store_short_d16_hi                    :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_global>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_load_dword             :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_load_dwordx2           :ref:`vdst<amdgpu_synid9_vdst64_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_load_dwordx3           :ref:`vdst<amdgpu_synid9_vdst96_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_load_dwordx4           :ref:`vdst<amdgpu_synid9_vdst128_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_load_sbyte             :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_load_sbyte_d16         :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_load_sbyte_d16_hi      :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_load_short_d16         :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_load_short_d16_hi      :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_load_sshort            :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_load_ubyte             :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_load_ubyte_d16         :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_load_ubyte_d16_hi      :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_load_ushort            :ref:`vdst<amdgpu_synid9_vdst32_0>`,         :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`                       :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_store_byte                           :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_store_byte_d16_hi                    :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_store_dword                          :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_store_dwordx2                        :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`vdata<amdgpu_synid9_vdata64_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_store_dwordx3                        :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`vdata<amdgpu_synid9_vdata96_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_store_dwordx4                        :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`vdata<amdgpu_synid9_vdata128_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_store_short                          :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    scratch_store_short_d16_hi                   :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>`,    :ref:`vdata<amdgpu_synid9_vdata32_0>`,       :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>`          :ref:`offset13s<amdgpu_synid_flat_offset13s>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+
+MIMG
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**            **DST**      **SRC0**       **SRC1**     **SRC2**       **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    image_atomic_add                :ref:`vdata<amdgpu_synid9_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid9_ret>`, :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_and                :ref:`vdata<amdgpu_synid9_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid9_ret>`, :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_cmpswap            :ref:`vdata<amdgpu_synid9_data_mimg_atomic_cmp>`::ref:`dst<amdgpu_synid9_ret>`, :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_dec                :ref:`vdata<amdgpu_synid9_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid9_ret>`, :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_inc                :ref:`vdata<amdgpu_synid9_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid9_ret>`, :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_or                 :ref:`vdata<amdgpu_synid9_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid9_ret>`, :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_smax               :ref:`vdata<amdgpu_synid9_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid9_ret>`, :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_smin               :ref:`vdata<amdgpu_synid9_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid9_ret>`, :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_sub                :ref:`vdata<amdgpu_synid9_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid9_ret>`, :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_swap               :ref:`vdata<amdgpu_synid9_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid9_ret>`, :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_umax               :ref:`vdata<amdgpu_synid9_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid9_ret>`, :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_umin               :ref:`vdata<amdgpu_synid9_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid9_ret>`, :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_atomic_xor                :ref:`vdata<amdgpu_synid9_data_mimg_atomic_reg>`::ref:`dst<amdgpu_synid9_ret>`, :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_gather4          :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_b        :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_b_cl     :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_b_cl_o   :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_b_o      :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c        :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_b      :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_b_cl   :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_b_cl_o :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_b_o    :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_cl     :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_cl_o   :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_l      :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_l_o    :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_lz     :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_lz_o   :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_c_o      :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_cl       :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_cl_o     :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_l        :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_l_o      :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_lz       :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_lz_o     :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_gather4_o        :ref:`vdst<amdgpu_synid9_dst_mimg_gather4>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_get_lod          :ref:`vdst<amdgpu_synid9_dst_mimg_regular>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_get_resinfo      :ref:`vdst<amdgpu_synid9_dst_mimg_regular>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_load             :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_load_mip         :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_load_mip_pck     :ref:`vdst<amdgpu_synid9_dst_mimg_regular>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_load_mip_pck_sgn :ref:`vdst<amdgpu_synid9_dst_mimg_regular>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_load_pck         :ref:`vdst<amdgpu_synid9_dst_mimg_regular>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_load_pck_sgn     :ref:`vdst<amdgpu_synid9_dst_mimg_regular>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_sample           :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_b         :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_b_cl      :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_b_cl_o    :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_b_o       :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c         :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_b       :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_b_cl    :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_b_cl_o  :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_b_o     :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_cd      :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_cd_cl   :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_cd_cl_o :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_cd_o    :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_cl      :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_cl_o    :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_d       :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_d_cl    :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_d_cl_o  :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_d_o     :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_l       :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_l_o     :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_lz      :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_lz_o    :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_c_o       :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_cd        :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_cd_cl     :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_cd_cl_o   :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_cd_o      :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_cl        :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_cl_o      :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_d         :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_d_cl      :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_d_cl_o    :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_d_o       :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_l         :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_l_o       :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_lz        :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_lz_o      :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_sample_o         :ref:`vdst<amdgpu_synid9_dst_mimg_regular_d16>`,    :ref:`vaddr<amdgpu_synid9_addr_mimg>`,     :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`,   :ref:`ssamp<amdgpu_synid9_samp_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_store                     :ref:`vdata<amdgpu_synid9_data_mimg_store_d16>`,     :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_store_mip                 :ref:`vdata<amdgpu_synid9_data_mimg_store_d16>`,     :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
+    image_store_mip_pck             :ref:`vdata<amdgpu_synid9_data_mimg_store>`,     :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+    image_store_pck                 :ref:`vdata<amdgpu_synid9_data_mimg_store>`,     :ref:`vaddr<amdgpu_synid9_addr_mimg>`,   :ref:`srsrc<amdgpu_synid9_rsrc_mimg>`      :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`a16<amdgpu_synid_a16>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
+
+MUBUF
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                  **DST**   **SRC0**             **SRC1**    **SRC2**    **SRC3**    **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    buffer_atomic_add                  :ref:`vdata<amdgpu_synid9_data_buf_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_add_x2               :ref:`vdata<amdgpu_synid9_data_buf_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_and                  :ref:`vdata<amdgpu_synid9_data_buf_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_and_x2               :ref:`vdata<amdgpu_synid9_data_buf_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_cmpswap              :ref:`vdata<amdgpu_synid9_data_buf_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`b32x2<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_cmpswap_x2           :ref:`vdata<amdgpu_synid9_data_buf_atomic128>`::ref:`dst<amdgpu_synid9_ret>`::ref:`b64x2<amdgpu_synid9_type_dev>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_dec                  :ref:`vdata<amdgpu_synid9_data_buf_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_dec_x2               :ref:`vdata<amdgpu_synid9_data_buf_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_inc                  :ref:`vdata<amdgpu_synid9_data_buf_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_inc_x2               :ref:`vdata<amdgpu_synid9_data_buf_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_or                   :ref:`vdata<amdgpu_synid9_data_buf_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_or_x2                :ref:`vdata<amdgpu_synid9_data_buf_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_smax                 :ref:`vdata<amdgpu_synid9_data_buf_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`s32<amdgpu_synid9_type_dev>`,   :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_smax_x2              :ref:`vdata<amdgpu_synid9_data_buf_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`s64<amdgpu_synid9_type_dev>`,   :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_smin                 :ref:`vdata<amdgpu_synid9_data_buf_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`s32<amdgpu_synid9_type_dev>`,   :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_smin_x2              :ref:`vdata<amdgpu_synid9_data_buf_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`s64<amdgpu_synid9_type_dev>`,   :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_sub                  :ref:`vdata<amdgpu_synid9_data_buf_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_sub_x2               :ref:`vdata<amdgpu_synid9_data_buf_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_swap                 :ref:`vdata<amdgpu_synid9_data_buf_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_swap_x2              :ref:`vdata<amdgpu_synid9_data_buf_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_umax                 :ref:`vdata<amdgpu_synid9_data_buf_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_umax_x2              :ref:`vdata<amdgpu_synid9_data_buf_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_umin                 :ref:`vdata<amdgpu_synid9_data_buf_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_umin_x2              :ref:`vdata<amdgpu_synid9_data_buf_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_xor                  :ref:`vdata<amdgpu_synid9_data_buf_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_atomic_xor_x2               :ref:`vdata<amdgpu_synid9_data_buf_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_dword            :ref:`vdst<amdgpu_synid9_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_dwordx2          :ref:`vdst<amdgpu_synid9_dst_buf_64>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_dwordx3          :ref:`vdst<amdgpu_synid9_dst_buf_96>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_dwordx4          :ref:`vdst<amdgpu_synid9_dst_buf_128>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_d16_hi_x  :ref:`vdst<amdgpu_synid9_dst_buf_32>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_d16_x     :ref:`vdst<amdgpu_synid9_dst_buf_32>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_d16_xy    :ref:`vdst<amdgpu_synid9_dst_buf_32>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_d16_xyz   :ref:`vdst<amdgpu_synid9_dst_buf_64>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_d16_xyzw  :ref:`vdst<amdgpu_synid9_dst_buf_64>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_x         :ref:`vdst<amdgpu_synid9_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_format_xy        :ref:`vdst<amdgpu_synid9_dst_buf_64>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_xyz       :ref:`vdst<amdgpu_synid9_dst_buf_96>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_format_xyzw      :ref:`vdst<amdgpu_synid9_dst_buf_128>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_sbyte            :ref:`vdst<amdgpu_synid9_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_sbyte_d16        :ref:`vdst<amdgpu_synid9_dst_buf_32>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_sbyte_d16_hi     :ref:`vdst<amdgpu_synid9_dst_buf_32>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_short_d16        :ref:`vdst<amdgpu_synid9_dst_buf_32>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_short_d16_hi     :ref:`vdst<amdgpu_synid9_dst_buf_32>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_sshort           :ref:`vdst<amdgpu_synid9_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_ubyte            :ref:`vdst<amdgpu_synid9_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_load_ubyte_d16        :ref:`vdst<amdgpu_synid9_dst_buf_32>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_ubyte_d16_hi     :ref:`vdst<amdgpu_synid9_dst_buf_32>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_load_ushort           :ref:`vdst<amdgpu_synid9_dst_buf_lds>`, :ref:`vaddr<amdgpu_synid9_addr_buf>`,           :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>`         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_store_byte                  :ref:`vdata<amdgpu_synid9_vdata32_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_byte_d16_hi           :ref:`vdata<amdgpu_synid9_vdata32_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_dword                 :ref:`vdata<amdgpu_synid9_vdata32_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_dwordx2               :ref:`vdata<amdgpu_synid9_vdata64_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_dwordx3               :ref:`vdata<amdgpu_synid9_vdata96_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_dwordx4               :ref:`vdata<amdgpu_synid9_vdata128_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_d16_hi_x       :ref:`vdata<amdgpu_synid9_vdata32_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_d16_x          :ref:`vdata<amdgpu_synid9_vdata32_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_d16_xy         :ref:`vdata<amdgpu_synid9_vdata32_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_d16_xyz        :ref:`vdata<amdgpu_synid9_vdata64_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_d16_xyzw       :ref:`vdata<amdgpu_synid9_vdata64_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_x              :ref:`vdata<amdgpu_synid9_vdata32_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_xy             :ref:`vdata<amdgpu_synid9_vdata64_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_xyz            :ref:`vdata<amdgpu_synid9_vdata96_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_format_xyzw           :ref:`vdata<amdgpu_synid9_vdata128_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_lds_dword             :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,           :ref:`soffset<amdgpu_synid9_offset_buf>`                 :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`lds<amdgpu_synid_lds>`
+    buffer_store_short                 :ref:`vdata<amdgpu_synid9_vdata32_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_store_short_d16_hi          :ref:`vdata<amdgpu_synid9_vdata32_0>`,           :ref:`vaddr<amdgpu_synid9_addr_buf>`,  :ref:`srsrc<amdgpu_synid9_rsrc_buf>`,  :ref:`soffset<amdgpu_synid9_offset_buf>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
+    buffer_wbinvl1
+    buffer_wbinvl1_vol
+
+SMEM
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**             **SRC1**      **SRC2**           **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_atc_probe                              :ref:`imm3<amdgpu_synid9_perm_smem>`,            :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`
+    s_atc_probe_buffer                       :ref:`imm3<amdgpu_synid9_perm_smem>`,            :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`
+    s_atomic_add                             :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_add_x2                          :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_and                             :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_and_x2                          :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_cmpswap                         :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`b32x2<amdgpu_synid9_type_dev>`, :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_cmpswap_x2                      :ref:`sdata<amdgpu_synid9_data_smem_atomic128>`::ref:`dst<amdgpu_synid9_ret>`::ref:`b64x2<amdgpu_synid9_type_dev>`, :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_dec                             :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_dec_x2                          :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_inc                             :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_inc_x2                          :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_or                              :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_or_x2                           :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_smax                            :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`s32<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_smax_x2                         :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`s64<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_smin                            :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`s32<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_smin_x2                         :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`s64<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_sub                             :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_sub_x2                          :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_swap                            :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_swap_x2                         :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_umax                            :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_umax_x2                         :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_umin                            :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_umin_x2                         :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_xor                             :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_atomic_xor_x2                          :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_add                      :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_add_x2                   :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_and                      :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_and_x2                   :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_cmpswap                  :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`b32x2<amdgpu_synid9_type_dev>`, :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_cmpswap_x2               :ref:`sdata<amdgpu_synid9_data_smem_atomic128>`::ref:`dst<amdgpu_synid9_ret>`::ref:`b64x2<amdgpu_synid9_type_dev>`, :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_dec                      :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_dec_x2                   :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_inc                      :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_inc_x2                   :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_or                       :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_or_x2                    :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_smax                     :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`s32<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_smax_x2                  :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`s64<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_smin                     :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`s32<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_smin_x2                  :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`s64<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_sub                      :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_sub_x2                   :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_swap                     :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_swap_x2                  :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_umax                     :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_umax_x2                  :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_umin                     :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_umin_x2                  :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`::ref:`u64<amdgpu_synid9_type_dev>`,   :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_xor                      :ref:`sdata<amdgpu_synid9_data_smem_atomic32>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_atomic_xor_x2                   :ref:`sdata<amdgpu_synid9_data_smem_atomic64>`::ref:`dst<amdgpu_synid9_ret>`,       :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_load_dword            :ref:`sdst<amdgpu_synid9_sdst32_0>`,     :ref:`sbase<amdgpu_synid9_base_smem_buf>`,           :ref:`soffset<amdgpu_synid9_offset_smem_buf>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_load_dwordx16         :ref:`sdst<amdgpu_synid9_sdst512_0>`,     :ref:`sbase<amdgpu_synid9_base_smem_buf>`,           :ref:`soffset<amdgpu_synid9_offset_smem_buf>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_load_dwordx2          :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`sbase<amdgpu_synid9_base_smem_buf>`,           :ref:`soffset<amdgpu_synid9_offset_smem_buf>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_load_dwordx4          :ref:`sdst<amdgpu_synid9_sdst128_0>`,     :ref:`sbase<amdgpu_synid9_base_smem_buf>`,           :ref:`soffset<amdgpu_synid9_offset_smem_buf>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_load_dwordx8          :ref:`sdst<amdgpu_synid9_sdst256_0>`,     :ref:`sbase<amdgpu_synid9_base_smem_buf>`,           :ref:`soffset<amdgpu_synid9_offset_smem_buf>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_store_dword                     :ref:`sdata<amdgpu_synid9_sdata32_0>`,           :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_store_dwordx2                   :ref:`sdata<amdgpu_synid9_sdata64_0>`,           :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_buffer_store_dwordx4                   :ref:`sdata<amdgpu_synid9_sdata128_0>`,           :ref:`sbase<amdgpu_synid9_base_smem_buf>`,    :ref:`soffset<amdgpu_synid9_offset_smem_buf>`        :ref:`glc<amdgpu_synid_glc>`
+    s_dcache_discard                         :ref:`sbase<amdgpu_synid9_base_smem_addr>`,           :ref:`soffset<amdgpu_synid9_offset_smem_plain>`
+    s_dcache_discard_x2                      :ref:`sbase<amdgpu_synid9_base_smem_addr>`,           :ref:`soffset<amdgpu_synid9_offset_smem_plain>`
+    s_dcache_inv
+    s_dcache_inv_vol
+    s_dcache_wb
+    s_dcache_wb_vol
+    s_load_dword                   :ref:`sdst<amdgpu_synid9_sdst32_0>`,     :ref:`sbase<amdgpu_synid9_base_smem_addr>`,           :ref:`soffset<amdgpu_synid9_offset_smem_plain>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_load_dwordx16                :ref:`sdst<amdgpu_synid9_sdst512_0>`,     :ref:`sbase<amdgpu_synid9_base_smem_addr>`,           :ref:`soffset<amdgpu_synid9_offset_smem_plain>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_load_dwordx2                 :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`sbase<amdgpu_synid9_base_smem_addr>`,           :ref:`soffset<amdgpu_synid9_offset_smem_plain>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_load_dwordx4                 :ref:`sdst<amdgpu_synid9_sdst128_0>`,     :ref:`sbase<amdgpu_synid9_base_smem_addr>`,           :ref:`soffset<amdgpu_synid9_offset_smem_plain>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_load_dwordx8                 :ref:`sdst<amdgpu_synid9_sdst256_0>`,     :ref:`sbase<amdgpu_synid9_base_smem_addr>`,           :ref:`soffset<amdgpu_synid9_offset_smem_plain>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_memrealtime                  :ref:`sdst<amdgpu_synid9_sdst64_0>`
+    s_memtime                      :ref:`sdst<amdgpu_synid9_sdst64_0>`
+    s_scratch_load_dword           :ref:`sdst<amdgpu_synid9_sdst32_0>`,     :ref:`sbase<amdgpu_synid9_base_smem_scratch>`,           :ref:`soffset<amdgpu_synid9_offset_smem_plain>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_scratch_load_dwordx2         :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`sbase<amdgpu_synid9_base_smem_scratch>`,           :ref:`soffset<amdgpu_synid9_offset_smem_plain>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_scratch_load_dwordx4         :ref:`sdst<amdgpu_synid9_sdst128_0>`,     :ref:`sbase<amdgpu_synid9_base_smem_scratch>`,           :ref:`soffset<amdgpu_synid9_offset_smem_plain>`                  :ref:`glc<amdgpu_synid_glc>`
+    s_scratch_store_dword                    :ref:`sdata<amdgpu_synid9_sdata32_0>`,           :ref:`sbase<amdgpu_synid9_base_smem_scratch>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_scratch_store_dwordx2                  :ref:`sdata<amdgpu_synid9_sdata64_0>`,           :ref:`sbase<amdgpu_synid9_base_smem_scratch>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_scratch_store_dwordx4                  :ref:`sdata<amdgpu_synid9_sdata128_0>`,           :ref:`sbase<amdgpu_synid9_base_smem_scratch>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_store_dword                            :ref:`sdata<amdgpu_synid9_sdata32_0>`,           :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_store_dwordx2                          :ref:`sdata<amdgpu_synid9_sdata64_0>`,           :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+    s_store_dwordx4                          :ref:`sdata<amdgpu_synid9_sdata128_0>`,           :ref:`sbase<amdgpu_synid9_base_smem_addr>`,    :ref:`soffset<amdgpu_synid9_offset_smem_plain>`        :ref:`glc<amdgpu_synid_glc>`
+
+SOP1
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_abs_i32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_and_saveexec_b64             :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_andn1_saveexec_b64           :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_andn1_wrexec_b64             :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_andn2_saveexec_b64           :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_andn2_wrexec_b64             :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_bcnt0_i32_b32                :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_bcnt0_i32_b64                :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_bcnt1_i32_b32                :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_bcnt1_i32_b64                :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_bitreplicate_b64_b32         :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_bitset0_b32                  :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_bitset0_b64                  :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`::ref:`b32<amdgpu_synid9_type_dev>`
+    s_bitset1_b32                  :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_bitset1_b64                  :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`::ref:`b32<amdgpu_synid9_type_dev>`
+    s_brev_b32                     :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_brev_b64                     :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_cbranch_join                           :ref:`ssrc<amdgpu_synid9_ssrc32_1>`
+    s_cmov_b32                     :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_cmov_b64                     :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_ff0_i32_b32                  :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_ff0_i32_b64                  :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_ff1_i32_b32                  :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_ff1_i32_b64                  :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_flbit_i32                    :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_flbit_i32_b32                :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_flbit_i32_b64                :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_flbit_i32_i64                :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_getpc_b64                    :ref:`sdst<amdgpu_synid9_sdst64_1>`
+    s_mov_b32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_mov_b64                      :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_mov_fed_b32                  :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_movreld_b32                  :ref:`sdst<amdgpu_synid9_sdst32_0>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_movreld_b64                  :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_movrels_b32                  :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_1>`
+    s_movrels_b64                  :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_1>`
+    s_nand_saveexec_b64            :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_nor_saveexec_b64             :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_not_b32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_not_b64                      :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_or_saveexec_b64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_orn1_saveexec_b64            :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_orn2_saveexec_b64            :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_quadmask_b32                 :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_quadmask_b64                 :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_rfe_b64                                :ref:`ssrc<amdgpu_synid9_ssrc64_1>`
+    s_set_gpr_idx_idx                        :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_setpc_b64                              :ref:`ssrc<amdgpu_synid9_ssrc64_1>`
+    s_sext_i32_i16                 :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_sext_i32_i8                  :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_swappc_b64                   :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_1>`
+    s_wqm_b32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc32_0>`
+    s_wqm_b64                      :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_xnor_saveexec_b64            :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+    s_xor_saveexec_b64             :ref:`sdst<amdgpu_synid9_sdst64_0>`,     :ref:`ssrc<amdgpu_synid9_ssrc64_0>`
+
+SOP2
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**         **SRC1**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_absdiff_i32                  :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_add_i32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_add_u32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_addc_u32                     :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_and_b32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_and_b64                      :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc64_0>`
+    s_andn2_b32                    :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_andn2_b64                    :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc64_0>`
+    s_ashr_i32                     :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`u32<amdgpu_synid9_type_dev>`
+    s_ashr_i64                     :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`u32<amdgpu_synid9_type_dev>`
+    s_bfe_i32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`u32<amdgpu_synid9_type_dev>`
+    s_bfe_i64                      :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`u32<amdgpu_synid9_type_dev>`
+    s_bfe_u32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_bfe_u64                      :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`u32<amdgpu_synid9_type_dev>`
+    s_bfm_b32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_bfm_b64                      :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`::ref:`b32<amdgpu_synid9_type_dev>`,   :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`b32<amdgpu_synid9_type_dev>`
+    s_cbranch_g_fork                         :ref:`ssrc0<amdgpu_synid9_ssrc64_2>`,       :ref:`ssrc1<amdgpu_synid9_ssrc64_2>`
+    s_cselect_b32                  :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_cselect_b64                  :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc64_0>`
+    s_lshl1_add_u32                :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_lshl2_add_u32                :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_lshl3_add_u32                :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_lshl4_add_u32                :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_lshl_b32                     :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`u32<amdgpu_synid9_type_dev>`
+    s_lshl_b64                     :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`u32<amdgpu_synid9_type_dev>`
+    s_lshr_b32                     :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`u32<amdgpu_synid9_type_dev>`
+    s_lshr_b64                     :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`u32<amdgpu_synid9_type_dev>`
+    s_max_i32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_max_u32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_min_i32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_min_u32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_mul_hi_i32                   :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_mul_hi_u32                   :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_mul_i32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_nand_b32                     :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_nand_b64                     :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc64_0>`
+    s_nor_b32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_nor_b64                      :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc64_0>`
+    s_or_b32                       :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_or_b64                       :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc64_0>`
+    s_orn2_b32                     :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_orn2_b64                     :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc64_0>`
+    s_pack_hh_b32_b16              :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`::ref:`b16x2<amdgpu_synid9_type_dev>`, :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`b16x2<amdgpu_synid9_type_dev>`
+    s_pack_lh_b32_b16              :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`b16x2<amdgpu_synid9_type_dev>`
+    s_pack_ll_b32_b16              :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_rfe_restore_b64                        :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`b32<amdgpu_synid9_type_dev>`
+    s_sub_i32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_sub_u32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_subb_u32                     :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_xnor_b32                     :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_xnor_b64                     :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc64_0>`
+    s_xor_b32                      :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_xor_b64                      :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc64_0>`
+
+SOPC
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **SRC0**      **SRC1**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_bitcmp0_b32                  :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_bitcmp0_b64                  :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`u32<amdgpu_synid9_type_dev>`
+    s_bitcmp1_b32                  :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_bitcmp1_b64                  :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`::ref:`u32<amdgpu_synid9_type_dev>`
+    s_cmp_eq_i32                   :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_cmp_eq_u32                   :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_cmp_eq_u64                   :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc64_0>`
+    s_cmp_ge_i32                   :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_cmp_ge_u32                   :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_cmp_gt_i32                   :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_cmp_gt_u32                   :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_cmp_le_i32                   :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_cmp_le_u32                   :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_cmp_lg_i32                   :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_cmp_lg_u32                   :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_cmp_lg_u64                   :ref:`ssrc0<amdgpu_synid9_ssrc64_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc64_0>`
+    s_cmp_lt_i32                   :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_cmp_lt_u32                   :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+    s_set_gpr_idx_on               :ref:`ssrc<amdgpu_synid9_ssrc32_0>`,     :ref:`imm4<amdgpu_synid9_imm4>`
+    s_setvskip                     :ref:`ssrc0<amdgpu_synid9_ssrc32_0>`,    :ref:`ssrc1<amdgpu_synid9_ssrc32_0>`
+
+SOPK
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**      **SRC1**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_addk_i32                     :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`imm16<amdgpu_synid9_simm16>`
+    s_call_b64                     :ref:`sdst<amdgpu_synid9_sdst64_1>`,     :ref:`label<amdgpu_synid9_label>`
+    s_cbranch_i_fork                         :ref:`ssrc<amdgpu_synid9_ssrc64_3>`,     :ref:`label<amdgpu_synid9_label>`
+    s_cmovk_i32                    :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`imm16<amdgpu_synid9_simm16>`
+    s_cmpk_eq_i32                            :ref:`ssrc<amdgpu_synid9_ssrc32_2>`,     :ref:`imm16<amdgpu_synid9_simm16>`
+    s_cmpk_eq_u32                            :ref:`ssrc<amdgpu_synid9_ssrc32_2>`,     :ref:`imm16<amdgpu_synid9_uimm16>`
+    s_cmpk_ge_i32                            :ref:`ssrc<amdgpu_synid9_ssrc32_2>`,     :ref:`imm16<amdgpu_synid9_simm16>`
+    s_cmpk_ge_u32                            :ref:`ssrc<amdgpu_synid9_ssrc32_2>`,     :ref:`imm16<amdgpu_synid9_uimm16>`
+    s_cmpk_gt_i32                            :ref:`ssrc<amdgpu_synid9_ssrc32_2>`,     :ref:`imm16<amdgpu_synid9_simm16>`
+    s_cmpk_gt_u32                            :ref:`ssrc<amdgpu_synid9_ssrc32_2>`,     :ref:`imm16<amdgpu_synid9_uimm16>`
+    s_cmpk_le_i32                            :ref:`ssrc<amdgpu_synid9_ssrc32_2>`,     :ref:`imm16<amdgpu_synid9_simm16>`
+    s_cmpk_le_u32                            :ref:`ssrc<amdgpu_synid9_ssrc32_2>`,     :ref:`imm16<amdgpu_synid9_uimm16>`
+    s_cmpk_lg_i32                            :ref:`ssrc<amdgpu_synid9_ssrc32_2>`,     :ref:`imm16<amdgpu_synid9_simm16>`
+    s_cmpk_lg_u32                            :ref:`ssrc<amdgpu_synid9_ssrc32_2>`,     :ref:`imm16<amdgpu_synid9_uimm16>`
+    s_cmpk_lt_i32                            :ref:`ssrc<amdgpu_synid9_ssrc32_2>`,     :ref:`imm16<amdgpu_synid9_simm16>`
+    s_cmpk_lt_u32                            :ref:`ssrc<amdgpu_synid9_ssrc32_2>`,     :ref:`imm16<amdgpu_synid9_uimm16>`
+    s_getreg_b32                   :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`hwreg<amdgpu_synid9_hwreg>`
+    s_movk_i32                     :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`imm16<amdgpu_synid9_simm16>`
+    s_mulk_i32                     :ref:`sdst<amdgpu_synid9_sdst32_1>`,     :ref:`imm16<amdgpu_synid9_simm16>`
+    s_setreg_b32                   :ref:`hwreg<amdgpu_synid9_hwreg>`,    :ref:`ssrc<amdgpu_synid9_ssrc32_2>`
+    s_setreg_imm32_b32             :ref:`hwreg<amdgpu_synid9_hwreg>`,    :ref:`imm32<amdgpu_synid9_bimm32>`
+
+SOPP
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **SRC**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    s_barrier
+    s_branch                       :ref:`label<amdgpu_synid9_label>`
+    s_cbranch_cdbgsys              :ref:`label<amdgpu_synid9_label>`
+    s_cbranch_cdbgsys_and_user     :ref:`label<amdgpu_synid9_label>`
+    s_cbranch_cdbgsys_or_user      :ref:`label<amdgpu_synid9_label>`
+    s_cbranch_cdbguser             :ref:`label<amdgpu_synid9_label>`
+    s_cbranch_execnz               :ref:`label<amdgpu_synid9_label>`
+    s_cbranch_execz                :ref:`label<amdgpu_synid9_label>`
+    s_cbranch_scc0                 :ref:`label<amdgpu_synid9_label>`
+    s_cbranch_scc1                 :ref:`label<amdgpu_synid9_label>`
+    s_cbranch_vccnz                :ref:`label<amdgpu_synid9_label>`
+    s_cbranch_vccz                 :ref:`label<amdgpu_synid9_label>`
+    s_decperflevel                 :ref:`imm16<amdgpu_synid9_bimm16>`
+    s_endpgm
+    s_endpgm_ordered_ps_done
+    s_endpgm_saved
+    s_icache_inv
+    s_incperflevel                 :ref:`imm16<amdgpu_synid9_bimm16>`
+    s_nop                          :ref:`imm16<amdgpu_synid9_bimm16>`
+    s_sendmsg                      :ref:`msg<amdgpu_synid9_msg>`
+    s_sendmsghalt                  :ref:`msg<amdgpu_synid9_msg>`
+    s_set_gpr_idx_mode             :ref:`imm4<amdgpu_synid9_imm4>`
+    s_set_gpr_idx_off
+    s_sethalt                      :ref:`imm16<amdgpu_synid9_bimm16>`
+    s_setkill                      :ref:`imm16<amdgpu_synid9_bimm16>`
+    s_setprio                      :ref:`imm16<amdgpu_synid9_bimm16>`
+    s_sleep                        :ref:`imm16<amdgpu_synid9_bimm16>`
+    s_trap                         :ref:`imm16<amdgpu_synid9_bimm16>`
+    s_ttracedata
+    s_waitcnt                      :ref:`waitcnt<amdgpu_synid9_waitcnt>`
+    s_wakeup
+
+VINTRP
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**       **SRC1**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_interp_mov_f32               :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`param<amdgpu_synid9_param>`::ref:`b32<amdgpu_synid9_type_dev>`, :ref:`attr<amdgpu_synid9_attr>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_interp_p1_f32                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`,      :ref:`attr<amdgpu_synid9_attr>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_interp_p2_f32                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`,      :ref:`attr<amdgpu_synid9_attr>`::ref:`b32<amdgpu_synid9_type_dev>`
+
+VOP1
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                     **DST**       **SRC**            **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_bfrev_b32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_bfrev_b32_dpp                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_bfrev_b32_sdwa                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_ceil_f16                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_ceil_f16_dpp                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ceil_f16_sdwa                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_ceil_f32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_ceil_f32_dpp                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ceil_f32_sdwa                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_ceil_f64                      :ref:`vdst<amdgpu_synid9_vdst64_0>`,     :ref:`src<amdgpu_synid9_src64_0>`
+    v_clrexcp
+    v_cos_f16                       :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cos_f16_dpp                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cos_f16_sdwa                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cos_f32                       :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cos_f32_dpp                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cos_f32_sdwa                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f16_f32                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_f16_f32_dpp               :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f16_f32_sdwa              :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f16_i16                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_f16_i16_dpp               :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f16_i16_sdwa              :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f16_u16                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_f16_u16_dpp               :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f16_u16_sdwa              :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f32_f16                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_f32_f16_dpp               :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f32_f16_sdwa              :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f32_f64                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src64_0>`
+    v_cvt_f32_i32                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_f32_i32_dpp               :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f32_i32_sdwa              :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f32_u32                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_f32_u32_dpp               :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f32_u32_sdwa              :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f32_ubyte0                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_f32_ubyte0_dpp            :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f32_ubyte0_sdwa           :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f32_ubyte1                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_f32_ubyte1_dpp            :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f32_ubyte1_sdwa           :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f32_ubyte2                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_f32_ubyte2_dpp            :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f32_ubyte2_sdwa           :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f32_ubyte3                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_f32_ubyte3_dpp            :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_f32_ubyte3_sdwa           :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_f64_f32                   :ref:`vdst<amdgpu_synid9_vdst64_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_f64_i32                   :ref:`vdst<amdgpu_synid9_vdst64_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_f64_u32                   :ref:`vdst<amdgpu_synid9_vdst64_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_flr_i32_f32               :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_flr_i32_f32_dpp           :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_flr_i32_f32_sdwa          :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_i16_f16                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_i16_f16_dpp               :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_i16_f16_sdwa              :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_i32_f32                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_i32_f32_dpp               :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_i32_f32_sdwa              :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_i32_f64                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src64_0>`
+    v_cvt_norm_i16_f16              :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_norm_i16_f16_dpp          :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_norm_i16_f16_sdwa         :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_norm_u16_f16              :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_norm_u16_f16_dpp          :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_norm_u16_f16_sdwa         :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_off_f32_i4                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_off_f32_i4_dpp            :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_off_f32_i4_sdwa           :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_rpi_i32_f32               :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_rpi_i32_f32_dpp           :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_rpi_i32_f32_sdwa          :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_u16_f16                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_u16_f16_dpp               :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_u16_f16_sdwa              :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_u32_f32                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_cvt_u32_f32_dpp               :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cvt_u32_f32_sdwa              :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_cvt_u32_f64                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src64_0>`
+    v_exp_f16                       :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_exp_f16_dpp                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_exp_f16_sdwa                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_exp_f32                       :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_exp_f32_dpp                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_exp_f32_sdwa                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_exp_legacy_f32                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_exp_legacy_f32_dpp            :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_exp_legacy_f32_sdwa           :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_ffbh_i32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_ffbh_i32_dpp                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ffbh_i32_sdwa                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_ffbh_u32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_ffbh_u32_dpp                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ffbh_u32_sdwa                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_ffbl_b32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_ffbl_b32_dpp                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ffbl_b32_sdwa                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_floor_f16                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_floor_f16_dpp                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_floor_f16_sdwa                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_floor_f32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_floor_f32_dpp                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_floor_f32_sdwa                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_floor_f64                     :ref:`vdst<amdgpu_synid9_vdst64_0>`,     :ref:`src<amdgpu_synid9_src64_0>`
+    v_fract_f16                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_fract_f16_dpp                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_fract_f16_sdwa                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_fract_f32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_fract_f32_dpp                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_fract_f32_sdwa                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_fract_f64                     :ref:`vdst<amdgpu_synid9_vdst64_0>`,     :ref:`src<amdgpu_synid9_src64_0>`
+    v_frexp_exp_i16_f16             :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_frexp_exp_i16_f16_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_frexp_exp_i16_f16_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_frexp_exp_i32_f32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_frexp_exp_i32_f32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_frexp_exp_i32_f32_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_frexp_exp_i32_f64             :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src64_0>`
+    v_frexp_mant_f16                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_frexp_mant_f16_dpp            :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_frexp_mant_f16_sdwa           :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_frexp_mant_f32                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_frexp_mant_f32_dpp            :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_frexp_mant_f32_sdwa           :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_frexp_mant_f64                :ref:`vdst<amdgpu_synid9_vdst64_0>`,     :ref:`src<amdgpu_synid9_src64_0>`
+    v_log_f16                       :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_log_f16_dpp                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_log_f16_sdwa                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_log_f32                       :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_log_f32_dpp                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_log_f32_sdwa                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_log_legacy_f32                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_log_legacy_f32_dpp            :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_log_legacy_f32_sdwa           :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_mov_b32                       :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_mov_b32_dpp                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mov_b32_sdwa                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_mov_fed_b32                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_mov_fed_b32_dpp               :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mov_fed_b32_sdwa              :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_nop
+    v_not_b32                       :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_not_b32_dpp                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_not_b32_sdwa                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_rcp_f16                       :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_rcp_f16_dpp                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_rcp_f16_sdwa                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_rcp_f32                       :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_rcp_f32_dpp                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_rcp_f32_sdwa                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_rcp_f64                       :ref:`vdst<amdgpu_synid9_vdst64_0>`,     :ref:`src<amdgpu_synid9_src64_0>`
+    v_rcp_iflag_f32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_rcp_iflag_f32_dpp             :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_rcp_iflag_f32_sdwa            :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_readfirstlane_b32             :ref:`sdst<amdgpu_synid9_sdst32_2>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`
+    v_rndne_f16                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_rndne_f16_dpp                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_rndne_f16_sdwa                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_rndne_f32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_rndne_f32_dpp                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_rndne_f32_sdwa                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_rndne_f64                     :ref:`vdst<amdgpu_synid9_vdst64_0>`,     :ref:`src<amdgpu_synid9_src64_0>`
+    v_rsq_f16                       :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_rsq_f16_dpp                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_rsq_f16_sdwa                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_rsq_f32                       :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_rsq_f32_dpp                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_rsq_f32_sdwa                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_rsq_f64                       :ref:`vdst<amdgpu_synid9_vdst64_0>`,     :ref:`src<amdgpu_synid9_src64_0>`
+    v_sat_pk_u8_i16                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_sat_pk_u8_i16_dpp             :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sat_pk_u8_i16_sdwa            :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_screen_partition_4se_b32      :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_screen_partition_4se_b32_dpp  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_screen_partition_4se_b32_sdwa :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_sin_f16                       :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_sin_f16_dpp                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sin_f16_sdwa                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_sin_f32                       :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_sin_f32_dpp                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sin_f32_sdwa                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_sqrt_f16                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_sqrt_f16_dpp                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sqrt_f16_sdwa                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_sqrt_f32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_sqrt_f32_dpp                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sqrt_f32_sdwa                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_sqrt_f64                      :ref:`vdst<amdgpu_synid9_vdst64_0>`,     :ref:`src<amdgpu_synid9_src64_0>`
+    v_swap_b32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`
+    v_trunc_f16                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_trunc_f16_dpp                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_trunc_f16_sdwa                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_trunc_f32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`
+    v_trunc_f32_dpp                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_trunc_f32_sdwa                :ref:`vdst<amdgpu_synid9_vdst32_0>`,     :ref:`src<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
+    v_trunc_f64                     :ref:`vdst<amdgpu_synid9_vdst64_0>`,     :ref:`src<amdgpu_synid9_src64_0>`
+
+VOP2
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**           **DST0**  **DST1** **SRC0**        **SRC1**        **SRC2**  **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_add_co_u32          :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_add_co_u32_dpp      :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_add_co_u32_sdwa     :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_add_f16             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_add_f16_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_add_f16_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_add_f32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_add_f32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_add_f32_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_add_u16             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_add_u16_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_add_u16_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_add_u32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_add_u32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_add_u32_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_addc_co_u32         :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`,      :ref:`vcc<amdgpu_synid9_vcc_64>`
+    v_addc_co_u32_dpp     :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`,      :ref:`vcc<amdgpu_synid9_vcc_64>`   :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_addc_co_u32_sdwa    :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,    :ref:`vcc<amdgpu_synid9_vcc_64>`   :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_and_b32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_and_b32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_and_b32_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_ashrrev_i16         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`u16<amdgpu_synid9_type_dev>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_ashrrev_i16_dpp     :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`u16<amdgpu_synid9_type_dev>`,  :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ashrrev_i16_sdwa    :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`::ref:`u16<amdgpu_synid9_type_dev>`, :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_ashrrev_i32         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_ashrrev_i32_dpp     :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,  :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ashrrev_i32_sdwa    :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`::ref:`u32<amdgpu_synid9_type_dev>`, :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cndmask_b32         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`,      :ref:`vcc<amdgpu_synid9_vcc_64>`
+    v_cndmask_b32_dpp     :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`,      :ref:`vcc<amdgpu_synid9_vcc_64>`   :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_cndmask_b32_sdwa    :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,    :ref:`vcc<amdgpu_synid9_vcc_64>`   :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_ldexp_f16           :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`i16<amdgpu_synid9_type_dev>`
+    v_ldexp_f16_dpp       :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`i16<amdgpu_synid9_type_dev>`         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_ldexp_f16_sdwa      :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`::ref:`i16<amdgpu_synid9_type_dev>`       :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_lshlrev_b16         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`u16<amdgpu_synid9_type_dev>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_lshlrev_b16_dpp     :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`u16<amdgpu_synid9_type_dev>`,  :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_lshlrev_b16_sdwa    :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`::ref:`u16<amdgpu_synid9_type_dev>`, :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_lshlrev_b32         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_lshlrev_b32_dpp     :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,  :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_lshlrev_b32_sdwa    :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`::ref:`u32<amdgpu_synid9_type_dev>`, :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_lshrrev_b16         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`u16<amdgpu_synid9_type_dev>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_lshrrev_b16_dpp     :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`u16<amdgpu_synid9_type_dev>`,  :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_lshrrev_b16_sdwa    :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`::ref:`u16<amdgpu_synid9_type_dev>`, :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_lshrrev_b32         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_lshrrev_b32_dpp     :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,  :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_lshrrev_b32_sdwa    :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`::ref:`u32<amdgpu_synid9_type_dev>`, :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mac_f16             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_mac_f16_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mac_f32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_mac_f32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_madak_f16           :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`,      :ref:`imm32<amdgpu_synid9_fimm16>`
+    v_madak_f32           :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`,      :ref:`imm32<amdgpu_synid9_fimm32>`
+    v_madmk_f16           :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`imm32<amdgpu_synid9_fimm16>`,      :ref:`vsrc2<amdgpu_synid9_vsrc32_0>`
+    v_madmk_f32           :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`imm32<amdgpu_synid9_fimm32>`,      :ref:`vsrc2<amdgpu_synid9_vsrc32_0>`
+    v_max_f16             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_max_f16_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_max_f16_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_max_f32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_max_f32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_max_f32_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_max_i16             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_max_i16_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_max_i16_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_max_i32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_max_i32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_max_i32_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_max_u16             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_max_u16_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_max_u16_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_max_u32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_max_u32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_max_u32_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_min_f16             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_min_f16_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_min_f16_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_min_f32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_min_f32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_min_f32_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_min_i16             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_min_i16_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_min_i16_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_min_i32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_min_i32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_min_i32_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_min_u16             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_min_u16_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_min_u16_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_min_u32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_min_u32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_min_u32_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_f16             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_mul_f16_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_f16_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_f32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_mul_f32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_f32_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_hi_i32_i24      :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_mul_hi_i32_i24_dpp  :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_hi_i32_i24_sdwa :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_hi_u32_u24      :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_mul_hi_u32_u24_dpp  :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_hi_u32_u24_sdwa :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_i32_i24         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_mul_i32_i24_dpp     :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_i32_i24_sdwa    :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_legacy_f32      :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_mul_legacy_f32_dpp  :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_legacy_f32_sdwa :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_lo_u16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_mul_lo_u16_dpp      :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_lo_u16_sdwa     :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_mul_u32_u24         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_mul_u32_u24_dpp     :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_mul_u32_u24_sdwa    :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_or_b32              :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_or_b32_dpp          :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_or_b32_sdwa         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_sub_co_u32          :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_sub_co_u32_dpp      :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sub_co_u32_sdwa     :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_sub_f16             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_sub_f16_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sub_f16_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_sub_f32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_sub_f32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sub_f32_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_sub_u16             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_sub_u16_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sub_u16_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_sub_u32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_sub_u32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_sub_u32_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_subb_co_u32         :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`,      :ref:`vcc<amdgpu_synid9_vcc_64>`
+    v_subb_co_u32_dpp     :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`,      :ref:`vcc<amdgpu_synid9_vcc_64>`   :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_subb_co_u32_sdwa    :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,    :ref:`vcc<amdgpu_synid9_vcc_64>`   :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_subbrev_co_u32      :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`,      :ref:`vcc<amdgpu_synid9_vcc_64>`
+    v_subbrev_co_u32_dpp  :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`,      :ref:`vcc<amdgpu_synid9_vcc_64>`   :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_subbrev_co_u32_sdwa :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,    :ref:`vcc<amdgpu_synid9_vcc_64>`   :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_subrev_co_u32       :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_subrev_co_u32_dpp   :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_subrev_co_u32_sdwa  :ref:`vdst<amdgpu_synid9_vdst32_0>`, :ref:`vcc<amdgpu_synid9_vcc_64>`, :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_subrev_f16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_subrev_f16_dpp      :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_subrev_f16_sdwa     :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_subrev_f32          :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_subrev_f32_dpp      :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,    :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_subrev_f32_sdwa     :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_subrev_u16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_subrev_u16_dpp      :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_subrev_u16_sdwa     :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_subrev_u32          :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_subrev_u32_dpp      :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_subrev_u32_sdwa     :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_xor_b32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`,       :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_xor_b32_dpp         :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,      :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`             :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
+    v_xor_b32_sdwa        :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`           :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+
+VOP3
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST0**       **DST1**     **SRC0**         **SRC1**        **SRC2**            **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_add3_u32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_add_co_u32_e64               :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`sdst<amdgpu_synid9_sdst64_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_add_f16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_add_f32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_add_f64                      :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_add_i16                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`                        :ref:`op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_add_i32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_add_lshl_u32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_add_u16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_add_u32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_addc_co_u32_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`sdst<amdgpu_synid9_sdst64_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`ssrc2<amdgpu_synid9_ssrc64_1>`
+    v_alignbit_b32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_alignbyte_b32                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_and_b32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_and_or_b32                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_ashrrev_i16_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u16<amdgpu_synid9_type_dev>`,    :ref:`src1<amdgpu_synid9_src32_1>`
+    v_ashrrev_i32_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`,    :ref:`src1<amdgpu_synid9_src32_1>`
+    v_ashrrev_i64                  :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`,    :ref:`src1<amdgpu_synid9_src64_1>`
+    v_bcnt_u32_b32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_bfe_i32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`src2<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`
+    v_bfe_u32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_bfi_b32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_bfm_b32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_bfrev_b32_e64                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`
+    v_ceil_f16_e64                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_ceil_f32_e64                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_ceil_f64_e64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_clrexcp_e64
+    v_cmp_class_f16_e64            :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_cmp_class_f32_e64            :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_cmp_class_f64_e64            :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_cmp_eq_f16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_eq_f32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_eq_f64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_eq_i16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_eq_i32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_eq_i64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_eq_u16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_eq_u32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_eq_u64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_f_f16_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_f_f32_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_f_f64_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_f_i16_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_f_i32_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_f_i64_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_f_u16_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_f_u32_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_f_u64_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_ge_f16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_ge_f32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_ge_f64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_ge_i16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_ge_i32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_ge_i64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_ge_u16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_ge_u32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_ge_u64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_gt_f16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_gt_f32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_gt_f64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_gt_i16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_gt_i32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_gt_i64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_gt_u16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_gt_u32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_gt_u64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_le_f16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_le_f32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_le_f64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_le_i16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_le_i32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_le_i64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_le_u16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_le_u32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_le_u64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_lg_f16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_lg_f32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_lg_f64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_lt_f16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_lt_f32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_lt_f64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_lt_i16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_lt_i32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_lt_i64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_lt_u16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_lt_u32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_lt_u64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_ne_i16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_ne_i32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_ne_i64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_ne_u16_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_ne_u32_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_ne_u64_e64               :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_neq_f16_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_neq_f32_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_neq_f64_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nge_f16_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nge_f32_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nge_f64_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_ngt_f16_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_ngt_f32_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_ngt_f64_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nle_f16_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nle_f32_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nle_f64_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nlg_f16_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nlg_f32_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nlg_f64_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nlt_f16_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nlt_f32_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_nlt_f64_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_o_f16_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_o_f32_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_o_f64_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_t_i16_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_t_i32_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_t_i64_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_t_u16_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_t_u32_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmp_t_u64_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmp_tru_f16_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_tru_f32_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_tru_f64_e64              :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_u_f16_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_u_f32_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmp_u_f64_e64                :ref:`sdst<amdgpu_synid9_sdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_class_f16_e64           :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_cmpx_class_f32_e64           :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_cmpx_class_f64_e64           :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_cmpx_eq_f16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_eq_f32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_eq_f64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_eq_i16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_eq_i32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_eq_i64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_eq_u16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_eq_u32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_eq_u64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_f_f16_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_f_f32_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_f_f64_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_f_i16_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_f_i32_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_f_i64_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_f_u16_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_f_u32_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_f_u64_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_ge_f16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_ge_f32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_ge_f64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_ge_i16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_ge_i32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_ge_i64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_ge_u16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_ge_u32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_ge_u64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_gt_f16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_gt_f32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_gt_f64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_gt_i16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_gt_i32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_gt_i64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_gt_u16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_gt_u32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_gt_u64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_le_f16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_le_f32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_le_f64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_le_i16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_le_i32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_le_i64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_le_u16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_le_u32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_le_u64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_lg_f16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_lg_f32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_lg_f64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_lt_f16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_lt_f32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_lt_f64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_lt_i16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_lt_i32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_lt_i64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_lt_u16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_lt_u32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_lt_u64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_ne_i16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_ne_i32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_ne_i64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_ne_u16_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_ne_u32_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_ne_u64_e64              :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_neq_f16_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_neq_f32_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_neq_f64_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nge_f16_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nge_f32_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nge_f64_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_ngt_f16_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_ngt_f32_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_ngt_f64_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nle_f16_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nle_f32_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nle_f64_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nlg_f16_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nlg_f32_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nlg_f64_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nlt_f16_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nlt_f32_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_nlt_f64_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_o_f16_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_o_f32_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_o_f64_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_t_i16_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_t_i32_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_t_i64_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_t_u16_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_t_u32_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cmpx_t_u64_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`
+    v_cmpx_tru_f16_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_tru_f32_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_tru_f64_e64             :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_u_f16_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_u_f32_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cmpx_u_f64_e64               :ref:`sdst<amdgpu_synid9_sdst64_1>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cndmask_b32_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`ssrc2<amdgpu_synid9_ssrc64_1>`
+    v_cos_f16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_cos_f32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cubeid_f32                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cubema_f32                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cubesc_f32                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cubetc_f32                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f16_f32_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f16_i16_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cvt_f16_u16_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_cvt_f32_f16_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_f64_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_i32_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_u32_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_ubyte0_e64           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_ubyte1_e64           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_ubyte2_e64           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f32_ubyte3_e64           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f64_f32_e64              :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f64_i32_e64              :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src<amdgpu_synid9_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_f64_u32_e64              :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src<amdgpu_synid9_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_flr_i32_f32_e64          :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`
+    v_cvt_i16_f16_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_cvt_i32_f32_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_cvt_i32_f64_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_cvt_norm_i16_f16_e64         :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`
+    v_cvt_norm_u16_f16_e64         :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`
+    v_cvt_off_f32_i4_e64           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`                                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_cvt_pk_i16_i32               :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cvt_pk_u16_u32               :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_cvt_pk_u8_f32                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`src2<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`
+    v_cvt_pkaccum_u8_f32           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`
+    v_cvt_pknorm_i16_f16           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`op_sel<amdgpu_synid_vop3_op_sel>`
+    v_cvt_pknorm_i16_f32           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`
+    v_cvt_pknorm_u16_f16           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`op_sel<amdgpu_synid_vop3_op_sel>`
+    v_cvt_pknorm_u16_f32           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`
+    v_cvt_pkrtz_f16_f32            :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`
+    v_cvt_rpi_i32_f32_e64          :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`
+    v_cvt_u16_f16_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_cvt_u32_f32_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_cvt_u32_f64_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_div_fixup_f16                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_div_fixup_f32                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_div_fixup_f64                :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_div_fixup_legacy_f16         :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>`
+    v_div_fmas_f32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_div_fmas_f64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_div_scale_f32                :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`vcc<amdgpu_synid9_vcc_64>`,     :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_div_scale_f64                :ref:`vdst<amdgpu_synid9_vdst64_0>`,      :ref:`vcc<amdgpu_synid9_vcc_64>`,     :ref:`src0<amdgpu_synid9_src64_1>`,        :ref:`src1<amdgpu_synid9_src64_1>`,       :ref:`src2<amdgpu_synid9_src64_1>`
+    v_exp_f16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_exp_f32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_exp_legacy_f32_e64           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_ffbh_i32_e64                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`
+    v_ffbh_u32_e64                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`
+    v_ffbl_b32_e64                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`
+    v_floor_f16_e64                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_floor_f32_e64                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_floor_f64_e64                :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_fma_f16                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_fma_f32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_fma_f64                      :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_fma_legacy_f16               :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>`
+    v_fract_f16_e64                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_fract_f32_e64                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_fract_f64_e64                :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_frexp_exp_i16_f16_e64        :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`
+    v_frexp_exp_i32_f32_e64        :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`
+    v_frexp_exp_i32_f64_e64        :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`
+    v_frexp_mant_f16_e64           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_frexp_mant_f32_e64           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_frexp_mant_f64_e64           :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_interp_mov_f32_e64           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`param<amdgpu_synid9_param>`::ref:`b32<amdgpu_synid9_type_dev>`,   :ref:`attr<amdgpu_synid9_attr>`::ref:`b32<amdgpu_synid9_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_interp_p1_f32_e64            :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`attr<amdgpu_synid9_attr>`::ref:`b32<amdgpu_synid9_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_interp_p1ll_f16              :ref:`vdst<amdgpu_synid9_vdst32_0>`::ref:`f32<amdgpu_synid9_type_dev>`,           :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`::ref:`f32<amdgpu_synid9_type_dev>`,  :ref:`attr<amdgpu_synid9_attr>`::ref:`b32<amdgpu_synid9_type_dev>`                    :ref:`high<amdgpu_synid_high>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_interp_p1lv_f16              :ref:`vdst<amdgpu_synid9_vdst32_0>`::ref:`f32<amdgpu_synid9_type_dev>`,           :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`::ref:`f32<amdgpu_synid9_type_dev>`, :ref:`attr<amdgpu_synid9_attr>`::ref:`b32<amdgpu_synid9_type_dev>`,   :ref:`vsrc2<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`::ref:`f16x2<amdgpu_synid9_type_dev>`   :ref:`high<amdgpu_synid_high>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_interp_p2_f16                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`::ref:`f32<amdgpu_synid9_type_dev>`, :ref:`attr<amdgpu_synid9_attr>`::ref:`b32<amdgpu_synid9_type_dev>`,   :ref:`vsrc2<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`::ref:`f32<amdgpu_synid9_type_dev>`     :ref:`high<amdgpu_synid_high>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_interp_p2_f32_e64            :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`vsrc<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`attr<amdgpu_synid9_attr>`::ref:`b32<amdgpu_synid9_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_interp_p2_legacy_f16         :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`::ref:`f32<amdgpu_synid9_type_dev>`, :ref:`attr<amdgpu_synid9_attr>`::ref:`b32<amdgpu_synid9_type_dev>`,   :ref:`vsrc2<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`::ref:`f32<amdgpu_synid9_type_dev>`     :ref:`high<amdgpu_synid_high>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_ldexp_f16_e64                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`i16<amdgpu_synid9_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_ldexp_f32                    :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`i32<amdgpu_synid9_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_ldexp_f64                    :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`i32<amdgpu_synid9_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_lerp_u8                      :ref:`vdst<amdgpu_synid9_vdst32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,           :ref:`src0<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`,    :ref:`src1<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`,   :ref:`src2<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_log_f16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_log_f32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_log_legacy_f32_e64           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_lshl_add_u32                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_lshl_or_b32                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`,   :ref:`src2<amdgpu_synid9_src32_1>`
+    v_lshlrev_b16_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u16<amdgpu_synid9_type_dev>`,    :ref:`src1<amdgpu_synid9_src32_1>`
+    v_lshlrev_b32_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`,    :ref:`src1<amdgpu_synid9_src32_1>`
+    v_lshlrev_b64                  :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`,    :ref:`src1<amdgpu_synid9_src64_1>`
+    v_lshrrev_b16_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u16<amdgpu_synid9_type_dev>`,    :ref:`src1<amdgpu_synid9_src32_1>`
+    v_lshrrev_b32_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`,    :ref:`src1<amdgpu_synid9_src32_1>`
+    v_lshrrev_b64                  :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`,    :ref:`src1<amdgpu_synid9_src64_1>`
+    v_mac_f16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_mac_f32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mad_f16                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_f32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mad_i16                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`            :ref:`op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_i32_i16                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`::ref:`i32<amdgpu_synid9_type_dev>`        :ref:`op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_i32_i24                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`::ref:`i32<amdgpu_synid9_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_i64_i32                  :ref:`vdst<amdgpu_synid9_vdst64_0>`,      :ref:`sdst<amdgpu_synid9_sdst64_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src64_1>`::ref:`i64<amdgpu_synid9_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_legacy_f16               :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_legacy_f32               :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mad_legacy_i16               :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`            :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_legacy_u16               :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`            :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_u16                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`            :ref:`op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_u32_u16                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`        :ref:`op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_u32_u24                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_u64_u32                  :ref:`vdst<amdgpu_synid9_vdst64_0>`,      :ref:`sdst<amdgpu_synid9_sdst64_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src64_1>`::ref:`u64<amdgpu_synid9_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_max3_f16                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_max3_f32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_max3_i16                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`            :ref:`op_sel<amdgpu_synid_vop3_op_sel>`
+    v_max3_i32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_max3_u16                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`            :ref:`op_sel<amdgpu_synid_vop3_op_sel>`
+    v_max3_u32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_max_f16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_max_f32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_max_f64                      :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_max_i16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_max_i32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_max_u16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_max_u32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_mbcnt_hi_u32_b32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_mbcnt_lo_u32_b32             :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_med3_f16                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_med3_f32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_med3_i16                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`            :ref:`op_sel<amdgpu_synid_vop3_op_sel>`
+    v_med3_i32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_med3_u16                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`            :ref:`op_sel<amdgpu_synid_vop3_op_sel>`
+    v_med3_u32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_min3_f16                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_min3_f32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,     :ref:`src2<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_min3_i16                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`            :ref:`op_sel<amdgpu_synid_vop3_op_sel>`
+    v_min3_i32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_min3_u16                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`            :ref:`op_sel<amdgpu_synid_vop3_op_sel>`
+    v_min3_u32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_min_f16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_min_f32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_min_f64                      :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_min_i16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_min_i32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_min_u16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_min_u32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_mov_b32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`
+    v_mov_fed_b32_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`
+    v_mqsad_pk_u16_u8              :ref:`vdst<amdgpu_synid9_vdst64_0>`::ref:`b64<amdgpu_synid9_type_dev>`,           :ref:`src0<amdgpu_synid9_src64_1>`::ref:`b64<amdgpu_synid9_type_dev>`,    :ref:`src1<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`,   :ref:`src2<amdgpu_synid9_src64_1>`::ref:`b64<amdgpu_synid9_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_mqsad_u32_u8                 :ref:`vdst<amdgpu_synid9_vdst128_0>`::ref:`b128<amdgpu_synid9_type_dev>`,          :ref:`src0<amdgpu_synid9_src64_1>`::ref:`b64<amdgpu_synid9_type_dev>`,    :ref:`src1<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`,   :ref:`vsrc2<amdgpu_synid9_vsrc128_0>`::ref:`b128<amdgpu_synid9_type_dev>`      :ref:`clamp<amdgpu_synid_clamp>`
+    v_msad_u8                      :ref:`vdst<amdgpu_synid9_vdst32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,           :ref:`src0<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`,    :ref:`src1<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`,   :ref:`src2<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_mul_f16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_mul_f32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mul_f64                      :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mul_hi_i32                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_mul_hi_i32_i24_e64           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_mul_hi_u32                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_mul_hi_u32_u24_e64           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_mul_i32_i24_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_mul_legacy_f32_e64           :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_mul_lo_u16_e64               :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_mul_lo_u32                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_mul_u32_u24_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_nop_e64
+    v_not_b32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`
+    v_or3_b32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_or_b32_e64                   :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_pack_b32_f16                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`op_sel<amdgpu_synid_vop3_op_sel>`
+    v_perm_b32                     :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_qsad_pk_u16_u8               :ref:`vdst<amdgpu_synid9_vdst64_0>`::ref:`b64<amdgpu_synid9_type_dev>`,           :ref:`src0<amdgpu_synid9_src64_1>`::ref:`b64<amdgpu_synid9_type_dev>`,    :ref:`src1<amdgpu_synid9_src32_1>`::ref:`b32<amdgpu_synid9_type_dev>`,   :ref:`src2<amdgpu_synid9_src64_1>`::ref:`b64<amdgpu_synid9_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_rcp_f16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_rcp_f32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rcp_f64_e64                  :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rcp_iflag_f32_e64            :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_readlane_b32                 :ref:`sdst<amdgpu_synid9_sdst32_2>`,               :ref:`vsrc0<amdgpu_synid9_vsrc32_0>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_3>`
+    v_rndne_f16_e64                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_rndne_f32_e64                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rndne_f64_e64                :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rsq_f16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_rsq_f32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_rsq_f64_e64                  :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sad_hi_u8                    :ref:`vdst<amdgpu_synid9_vdst32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,           :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u8x4<amdgpu_synid9_type_dev>`,   :ref:`src1<amdgpu_synid9_src32_1>`::ref:`u8x4<amdgpu_synid9_type_dev>`,  :ref:`src2<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_sad_u16                      :ref:`vdst<amdgpu_synid9_vdst32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,           :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u16x2<amdgpu_synid9_type_dev>`,  :ref:`src1<amdgpu_synid9_src32_1>`::ref:`u16x2<amdgpu_synid9_type_dev>`, :ref:`src2<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_sad_u32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`            :ref:`clamp<amdgpu_synid_clamp>`
+    v_sad_u8                       :ref:`vdst<amdgpu_synid9_vdst32_0>`::ref:`u32<amdgpu_synid9_type_dev>`,           :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u8x4<amdgpu_synid9_type_dev>`,   :ref:`src1<amdgpu_synid9_src32_1>`::ref:`u8x4<amdgpu_synid9_type_dev>`,  :ref:`src2<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`        :ref:`clamp<amdgpu_synid_clamp>`
+    v_sat_pk_u8_i16_e64            :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`
+    v_screen_partition_4se_b32_e64 :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`
+    v_sin_f16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_sin_f32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sqrt_f16_e64                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_sqrt_f32_e64                 :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sqrt_f64_e64                 :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sub_co_u32_e64               :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`sdst<amdgpu_synid9_sdst64_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_sub_f16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_sub_f32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_sub_i16                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`                        :ref:`op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_sub_i32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_sub_u16_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_sub_u32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_subb_co_u32_e64              :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`sdst<amdgpu_synid9_sdst64_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`ssrc2<amdgpu_synid9_ssrc64_1>`
+    v_subbrev_co_u32_e64           :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`sdst<amdgpu_synid9_sdst64_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`ssrc2<amdgpu_synid9_ssrc64_1>`
+    v_subrev_co_u32_e64            :ref:`vdst<amdgpu_synid9_vdst32_0>`,      :ref:`sdst<amdgpu_synid9_sdst64_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_subrev_f16_e64               :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>`
+    v_subrev_f32_e64               :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_subrev_u16_e64               :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_subrev_u32_e64               :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+    v_trig_preop_f64               :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src0<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`,      :ref:`src1<amdgpu_synid9_src32_1>`::ref:`u32<amdgpu_synid9_type_dev>`                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_trunc_f16_e64                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>`
+    v_trunc_f32_e64                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src<amdgpu_synid9_src32_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_trunc_f64_e64                :ref:`vdst<amdgpu_synid9_vdst64_0>`,               :ref:`src<amdgpu_synid9_src64_1>`::ref:`m<amdgpu_synid9_mod_vop3_abs_neg>`                                    :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
+    v_writelane_b32                :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`ssrc0<amdgpu_synid9_ssrc32_4>`,       :ref:`ssrc1<amdgpu_synid9_ssrc32_3>`
+    v_xad_u32                      :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`,       :ref:`src2<amdgpu_synid9_src32_1>`
+    v_xor_b32_e64                  :ref:`vdst<amdgpu_synid9_vdst32_0>`,               :ref:`src0<amdgpu_synid9_src32_1>`,        :ref:`src1<amdgpu_synid9_src32_1>`
+
+VOP3P
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**           **DST**      **SRC0**        **SRC1**     **SRC2**       **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_mad_mix_f32         :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`::ref:`fx<amdgpu_synid9_mad_type_dev>`,    :ref:`src1<amdgpu_synid9_src32_1>`::ref:`fx<amdgpu_synid9_mad_type_dev>`, :ref:`src2<amdgpu_synid9_src32_1>`::ref:`fx<amdgpu_synid9_mad_type_dev>`    :ref:`m_op_sel<amdgpu_synid_mad_mix_op_sel>` :ref:`m_op_sel_hi<amdgpu_synid_mad_mix_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_mixhi_f16       :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`::ref:`fx<amdgpu_synid9_mad_type_dev>`,    :ref:`src1<amdgpu_synid9_src32_1>`::ref:`fx<amdgpu_synid9_mad_type_dev>`, :ref:`src2<amdgpu_synid9_src32_1>`::ref:`fx<amdgpu_synid9_mad_type_dev>`    :ref:`m_op_sel<amdgpu_synid_mad_mix_op_sel>` :ref:`m_op_sel_hi<amdgpu_synid_mad_mix_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_mad_mixlo_f16       :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`::ref:`fx<amdgpu_synid9_mad_type_dev>`,    :ref:`src1<amdgpu_synid9_src32_1>`::ref:`fx<amdgpu_synid9_mad_type_dev>`, :ref:`src2<amdgpu_synid9_src32_1>`::ref:`fx<amdgpu_synid9_mad_type_dev>`    :ref:`m_op_sel<amdgpu_synid_mad_mix_op_sel>` :ref:`m_op_sel_hi<amdgpu_synid_mad_mix_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_pk_add_f16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`neg_lo<amdgpu_synid_neg_lo>` :ref:`neg_hi<amdgpu_synid_neg_hi>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_pk_add_i16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_pk_add_u16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_pk_ashrrev_i16      :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u16x2<amdgpu_synid9_type_dev>`, :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
+    v_pk_fma_f16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`,    :ref:`src2<amdgpu_synid9_src32_1>`       :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`neg_lo<amdgpu_synid_neg_lo>` :ref:`neg_hi<amdgpu_synid_neg_hi>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_pk_lshlrev_b16      :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u16x2<amdgpu_synid9_type_dev>`, :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
+    v_pk_lshrrev_b16      :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`::ref:`u16x2<amdgpu_synid9_type_dev>`, :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
+    v_pk_mad_i16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`,    :ref:`src2<amdgpu_synid9_src32_1>`       :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_pk_mad_u16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`,    :ref:`src2<amdgpu_synid9_src32_1>`       :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_pk_max_f16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`neg_lo<amdgpu_synid_neg_lo>` :ref:`neg_hi<amdgpu_synid_neg_hi>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_pk_max_i16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
+    v_pk_max_u16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
+    v_pk_min_f16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`neg_lo<amdgpu_synid_neg_lo>` :ref:`neg_hi<amdgpu_synid_neg_hi>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_pk_min_i16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
+    v_pk_min_u16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
+    v_pk_mul_f16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`neg_lo<amdgpu_synid_neg_lo>` :ref:`neg_hi<amdgpu_synid_neg_hi>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_pk_mul_lo_u16       :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
+    v_pk_sub_i16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
+    v_pk_sub_u16          :ref:`vdst<amdgpu_synid9_vdst32_0>`,    :ref:`src0<amdgpu_synid9_src32_1>`,       :ref:`src1<amdgpu_synid9_src32_1>`                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
+
+VOPC
+-----------------------
+
+.. parsed-literal::
+
+    **INSTRUCTION**                    **DST**       **SRC0**      **SRC1**             **MODIFIERS**
+    \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+    v_cmp_class_f16                :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_cmp_class_f16_sdwa           :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`::ref:`b32<amdgpu_synid9_type_dev>`      :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_class_f32                :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_cmp_class_f32_sdwa           :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`::ref:`b32<amdgpu_synid9_type_dev>`      :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_class_f64                :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_cmp_eq_f16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_eq_f16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_eq_f32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_eq_f32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_eq_f64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_eq_i16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_eq_i16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_eq_i32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_eq_i32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_eq_i64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_eq_u16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_eq_u16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_eq_u32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_eq_u32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_eq_u64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_f_f16                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_f_f16_sdwa               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_f_f32                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_f_f32_sdwa               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_f_f64                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_f_i16                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_f_i16_sdwa               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_f_i32                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_f_i32_sdwa               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_f_i64                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_f_u16                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_f_u16_sdwa               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_f_u32                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_f_u32_sdwa               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_f_u64                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_ge_f16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_ge_f16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ge_f32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_ge_f32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ge_f64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_ge_i16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_ge_i16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ge_i32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_ge_i32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ge_i64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_ge_u16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_ge_u16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ge_u32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_ge_u32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ge_u64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_gt_f16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_gt_f16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_gt_f32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_gt_f32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_gt_f64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_gt_i16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_gt_i16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_gt_i32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_gt_i32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_gt_i64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_gt_u16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_gt_u16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_gt_u32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_gt_u32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_gt_u64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_le_f16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_le_f16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_le_f32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_le_f32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_le_f64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_le_i16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_le_i16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_le_i32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_le_i32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_le_i64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_le_u16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_le_u16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_le_u32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_le_u32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_le_u64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_lg_f16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_lg_f16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lg_f32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_lg_f32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lg_f64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_lt_f16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_lt_f16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lt_f32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_lt_f32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lt_f64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_lt_i16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_lt_i16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lt_i32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_lt_i32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lt_i64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_lt_u16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_lt_u16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lt_u32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_lt_u32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_lt_u64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_ne_i16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_ne_i16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ne_i32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_ne_i32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ne_i64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_ne_u16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_ne_u16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ne_u32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_ne_u32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ne_u64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_neq_f16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_neq_f16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_neq_f32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_neq_f32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_neq_f64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_nge_f16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_nge_f16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nge_f32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_nge_f32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nge_f64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_ngt_f16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_ngt_f16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ngt_f32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_ngt_f32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_ngt_f64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_nle_f16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_nle_f16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nle_f32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_nle_f32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nle_f64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_nlg_f16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_nlg_f16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nlg_f32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_nlg_f32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nlg_f64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_nlt_f16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_nlt_f16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nlt_f32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_nlt_f32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_nlt_f64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_o_f16                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_o_f16_sdwa               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_o_f32                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_o_f32_sdwa               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_o_f64                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_t_i16                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_t_i16_sdwa               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_t_i32                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_t_i32_sdwa               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_t_i64                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_t_u16                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_t_u16_sdwa               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_t_u32                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_t_u32_sdwa               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_t_u64                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_tru_f16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_tru_f16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_tru_f32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_tru_f32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_tru_f64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmp_u_f16                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_u_f16_sdwa               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_u_f32                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmp_u_f32_sdwa               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmp_u_f64                    :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_class_f16               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_cmpx_class_f16_sdwa          :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`::ref:`b32<amdgpu_synid9_type_dev>`      :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_class_f32               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_cmpx_class_f32_sdwa          :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`::ref:`b32<amdgpu_synid9_type_dev>`      :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_class_f64               :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`b32<amdgpu_synid9_type_dev>`
+    v_cmpx_eq_f16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_eq_f16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_eq_f32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_eq_f32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_eq_f64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_eq_i16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_eq_i16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_eq_i32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_eq_i32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_eq_i64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_eq_u16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_eq_u16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_eq_u32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_eq_u32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_eq_u64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_f_f16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_f_f16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_f_f32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_f_f32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_f_f64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_f_i16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_f_i16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_f_i32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_f_i32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_f_i64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_f_u16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_f_u16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_f_u32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_f_u32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_f_u64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_ge_f16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_ge_f16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ge_f32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_ge_f32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ge_f64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_ge_i16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_ge_i16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ge_i32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_ge_i32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ge_i64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_ge_u16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_ge_u16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ge_u32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_ge_u32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ge_u64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_gt_f16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_gt_f16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_gt_f32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_gt_f32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_gt_f64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_gt_i16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_gt_i16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_gt_i32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_gt_i32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_gt_i64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_gt_u16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_gt_u16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_gt_u32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_gt_u32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_gt_u64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_le_f16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_le_f16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_le_f32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_le_f32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_le_f64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_le_i16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_le_i16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_le_i32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_le_i32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_le_i64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_le_u16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_le_u16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_le_u32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_le_u32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_le_u64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_lg_f16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_lg_f16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lg_f32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_lg_f32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lg_f64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_lt_f16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_lt_f16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lt_f32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_lt_f32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lt_f64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_lt_i16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_lt_i16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lt_i32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_lt_i32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lt_i64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_lt_u16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_lt_u16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lt_u32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_lt_u32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_lt_u64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_ne_i16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_ne_i16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ne_i32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_ne_i32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ne_i64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_ne_u16                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_ne_u16_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ne_u32                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_ne_u32_sdwa             :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ne_u64                  :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_neq_f16                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_neq_f16_sdwa            :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_neq_f32                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_neq_f32_sdwa            :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_neq_f64                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_nge_f16                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_nge_f16_sdwa            :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nge_f32                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_nge_f32_sdwa            :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nge_f64                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_ngt_f16                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_ngt_f16_sdwa            :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ngt_f32                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_ngt_f32_sdwa            :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_ngt_f64                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_nle_f16                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_nle_f16_sdwa            :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nle_f32                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_nle_f32_sdwa            :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nle_f64                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_nlg_f16                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_nlg_f16_sdwa            :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nlg_f32                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_nlg_f32_sdwa            :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nlg_f64                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_nlt_f16                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_nlt_f16_sdwa            :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nlt_f32                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_nlt_f32_sdwa            :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_nlt_f64                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_o_f16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_o_f16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_o_f32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_o_f32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_o_f64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_t_i16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_t_i16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_t_i32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_t_i32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_t_i64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_t_u16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_t_u16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_t_u32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_t_u32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_sdwa_sext>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_t_u64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_tru_f16                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_tru_f16_sdwa            :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_tru_f32                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_tru_f32_sdwa            :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_tru_f64                 :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+    v_cmpx_u_f16                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_u_f16_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_u_f32                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`
+    v_cmpx_u_f32_sdwa              :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`,   :ref:`vsrc1<amdgpu_synid9_vsrc32_0>`::ref:`m<amdgpu_synid9_mod_dpp_sdwa_abs_neg>`          :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
+    v_cmpx_u_f64                   :ref:`vcc<amdgpu_synid9_vcc_64>`,      :ref:`src0<amdgpu_synid9_src64_0>`,     :ref:`vsrc1<amdgpu_synid9_vsrc64_0>`
+
+.. |---| unicode:: U+02014 .. em dash
+
+
+.. toctree::
+    :hidden:
+
+    gfx9_attr
+    gfx9_bimm16
+    gfx9_bimm32
+    gfx9_fimm16
+    gfx9_fimm32
+    gfx9_hwreg
+    gfx9_imm4
+    gfx9_label
+    gfx9_msg
+    gfx9_param
+    gfx9_perm_smem
+    gfx9_simm16
+    gfx9_tgt
+    gfx9_uimm16
+    gfx9_waitcnt
+    gfx9_addr_buf
+    gfx9_addr_ds
+    gfx9_addr_flat
+    gfx9_addr_mimg
+    gfx9_base_smem_addr
+    gfx9_base_smem_buf
+    gfx9_base_smem_scratch
+    gfx9_data_buf_atomic128
+    gfx9_data_buf_atomic32
+    gfx9_data_buf_atomic64
+    gfx9_data_mimg_atomic_cmp
+    gfx9_data_mimg_atomic_reg
+    gfx9_data_mimg_store
+    gfx9_data_mimg_store_d16
+    gfx9_data_smem_atomic128
+    gfx9_data_smem_atomic32
+    gfx9_data_smem_atomic64
+    gfx9_dst_buf_128
+    gfx9_dst_buf_32
+    gfx9_dst_buf_64
+    gfx9_dst_buf_96
+    gfx9_dst_buf_lds
+    gfx9_dst_flat_atomic32
+    gfx9_dst_flat_atomic64
+    gfx9_dst_mimg_gather4
+    gfx9_dst_mimg_regular
+    gfx9_dst_mimg_regular_d16
+    gfx9_offset_buf
+    gfx9_offset_smem_buf
+    gfx9_offset_smem_plain
+    gfx9_rsrc_buf
+    gfx9_rsrc_mimg
+    gfx9_saddr_flat_global
+    gfx9_saddr_flat_scratch
+    gfx9_samp_mimg
+    gfx9_sdata128_0
+    gfx9_sdata32_0
+    gfx9_sdata64_0
+    gfx9_sdst128_0
+    gfx9_sdst256_0
+    gfx9_sdst32_0
+    gfx9_sdst32_1
+    gfx9_sdst32_2
+    gfx9_sdst512_0
+    gfx9_sdst64_0
+    gfx9_sdst64_1
+    gfx9_src32_0
+    gfx9_src32_1
+    gfx9_src64_0
+    gfx9_src64_1
+    gfx9_src_exp
+    gfx9_ssrc32_0
+    gfx9_ssrc32_1
+    gfx9_ssrc32_2
+    gfx9_ssrc32_3
+    gfx9_ssrc32_4
+    gfx9_ssrc64_0
+    gfx9_ssrc64_1
+    gfx9_ssrc64_2
+    gfx9_ssrc64_3
+    gfx9_vaddr_flat_global
+    gfx9_vaddr_flat_scratch
+    gfx9_vcc_64
+    gfx9_vdata128_0
+    gfx9_vdata32_0
+    gfx9_vdata64_0
+    gfx9_vdata96_0
+    gfx9_vdst128_0
+    gfx9_vdst32_0
+    gfx9_vdst64_0
+    gfx9_vdst96_0
+    gfx9_vsrc128_0
+    gfx9_vsrc32_0
+    gfx9_vsrc64_0
+    gfx9_mad_type_dev
+    gfx9_mod_dpp_sdwa_abs_neg
+    gfx9_mod_sdwa_sext
+    gfx9_mod_vop3_abs_neg
+    gfx9_opt
+    gfx9_ret
+    gfx9_type_dev
diff --git a/docs/AMDGPU/gfx7_addr_buf.rst b/docs/AMDGPU/gfx7_addr_buf.rst
new file mode 100644
index 0000000..22dc7d3
--- /dev/null
+++ b/docs/AMDGPU/gfx7_addr_buf.rst
@@ -0,0 +1,24 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_addr_buf:
+
+vaddr
+===========================
+
+This is an optional operand which may specify a 64-bit address, offset and/or index.
+
+*Size:* 0, 1 or 2 dwords. Size is controlled by modifiers :ref:`addr64<amdgpu_synid_addr64>`, :ref:`offen<amdgpu_synid_offen>` and :ref:`idxen<amdgpu_synid_idxen>`:
+
+* If only :ref:`addr64<amdgpu_synid_addr64>` is specified, this operand supplies a 64-bit address. Size is 2 dwords.
+* If only :ref:`idxen<amdgpu_synid_idxen>` is specified, this operand supplies an index. Size is 1 dword.
+* If only :ref:`offen<amdgpu_synid_offen>` is specified, this operand supplies an offset. Size is 1 dword.
+* If both :ref:`idxen<amdgpu_synid_idxen>` and :ref:`offen<amdgpu_synid_offen>` are specified, index is in the first register and offset is in the second. Size is 2 dwords.
+* If none of these modifiers are specified, this operand must be set to :ref:`off<amdgpu_synid_off>`.
+* All other combinations of these modifiers are illegal.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`off<amdgpu_synid_off>`
diff --git a/docs/AMDGPU/gfx7_addr_ds.rst b/docs/AMDGPU/gfx7_addr_ds.rst
new file mode 100644
index 0000000..c9cab7d
--- /dev/null
+++ b/docs/AMDGPU/gfx7_addr_ds.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_addr_ds:
+
+vaddr
+===========================
+
+An offset from the start of GDS/LDS memory.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_addr_flat.rst b/docs/AMDGPU/gfx7_addr_flat.rst
new file mode 100644
index 0000000..93deea6
--- /dev/null
+++ b/docs/AMDGPU/gfx7_addr_flat.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_addr_flat:
+
+vaddr
+===========================
+
+A 64-bit flat address.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_addr_mimg.rst b/docs/AMDGPU/gfx7_addr_mimg.rst
new file mode 100644
index 0000000..76eb484
--- /dev/null
+++ b/docs/AMDGPU/gfx7_addr_mimg.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_addr_mimg:
+
+vaddr
+===========================
+
+Image address which includes from one to four dimensional coordinates and other data used to locate a position in the image.
+
+*Size:* 1, 2, 3, 4, 8 or 16 dwords. Actual size depends on opcode and specific image being handled.
+
+    Note 1. Image format and dimensions are encoded in the image resource constant but not in the instruction.
+
+    Note 2. Actually image address size may vary from 1 to 13 dwords, but assembler currently supports a limited range of register sequences.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_attr.rst b/docs/AMDGPU/gfx7_attr.rst
new file mode 100644
index 0000000..219b774
--- /dev/null
+++ b/docs/AMDGPU/gfx7_attr.rst
@@ -0,0 +1,30 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_attr:
+
+attr
+===========================
+
+Interpolation attribute and channel:
+
+    ============== ===================================
+    Syntax         Description
+    ============== ===================================
+    attr{0..32}.x  Attribute 0..32 with *x* channel.
+    attr{0..32}.y  Attribute 0..32 with *y* channel.
+    attr{0..32}.z  Attribute 0..32 with *z* channel.
+    attr{0..32}.w  Attribute 0..32 with *w* channel.
+    ============== ===================================
+
+Examples:
+
+.. parsed-literal::
+
+    v_interp_p1_f32 v1, v0, attr0.x
+    v_interp_p1_f32 v1, v0, attr32.w
+
diff --git a/docs/AMDGPU/gfx7_base_smem_addr.rst b/docs/AMDGPU/gfx7_base_smem_addr.rst
new file mode 100644
index 0000000..9cc3cc7
--- /dev/null
+++ b/docs/AMDGPU/gfx7_base_smem_addr.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_base_smem_addr:
+
+sbase
+===========================
+
+A 64-bit base address for scalar memory operations.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`
diff --git a/docs/AMDGPU/gfx7_base_smem_buf.rst b/docs/AMDGPU/gfx7_base_smem_buf.rst
new file mode 100644
index 0000000..416cac7
--- /dev/null
+++ b/docs/AMDGPU/gfx7_base_smem_buf.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_base_smem_buf:
+
+sbase
+===========================
+
+A 128-bit buffer resource constant for scalar memory operations which provides a base address, a size and a stride.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx7_bimm16.rst b/docs/AMDGPU/gfx7_bimm16.rst
new file mode 100644
index 0000000..eb43f9b
--- /dev/null
+++ b/docs/AMDGPU/gfx7_bimm16.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_bimm16:
+
+imm16
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 16 bits.
+
diff --git a/docs/AMDGPU/gfx7_bimm32.rst b/docs/AMDGPU/gfx7_bimm32.rst
new file mode 100644
index 0000000..4d8f89d
--- /dev/null
+++ b/docs/AMDGPU/gfx7_bimm32.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_bimm32:
+
+imm32
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 32 bits.
+
diff --git a/docs/AMDGPU/gfx7_data_buf_atomic128.rst b/docs/AMDGPU/gfx7_data_buf_atomic128.rst
new file mode 100644
index 0000000..33ff26c
--- /dev/null
+++ b/docs/AMDGPU/gfx7_data_buf_atomic128.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_data_buf_atomic128:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* 4 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_data_buf_atomic32.rst b/docs/AMDGPU/gfx7_data_buf_atomic32.rst
new file mode 100644
index 0000000..df4a6e4
--- /dev/null
+++ b/docs/AMDGPU/gfx7_data_buf_atomic32.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_data_buf_atomic32:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* 1 dword by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_data_buf_atomic64.rst b/docs/AMDGPU/gfx7_data_buf_atomic64.rst
new file mode 100644
index 0000000..4892e41
--- /dev/null
+++ b/docs/AMDGPU/gfx7_data_buf_atomic64.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_data_buf_atomic64:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* 2 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_data_mimg_atomic_cmp.rst b/docs/AMDGPU/gfx7_data_mimg_atomic_cmp.rst
new file mode 100644
index 0000000..82c3337
--- /dev/null
+++ b/docs/AMDGPU/gfx7_data_mimg_atomic_cmp.rst
@@ -0,0 +1,27 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_data_mimg_atomic_cmp:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>` and :ref:`tfe<amdgpu_synid_tfe>`:
+
+* :ref:`dmask<amdgpu_synid_dmask>` may specify 2 data elements for 32-bit-per-pixel surfaces or 4 data elements for 64-bit-per-pixel surfaces. Each data element occupies 1 dword.
+* :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+  Note. The surface data format is indicated in the image resource constant but not in the instruction.
+
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_data_mimg_atomic_reg.rst b/docs/AMDGPU/gfx7_data_mimg_atomic_reg.rst
new file mode 100644
index 0000000..729548d
--- /dev/null
+++ b/docs/AMDGPU/gfx7_data_mimg_atomic_reg.rst
@@ -0,0 +1,26 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_data_mimg_atomic_reg:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>` and :ref:`tfe<amdgpu_synid_tfe>`:
+
+* :ref:`dmask<amdgpu_synid_dmask>` may specify 1 data element for 32-bit-per-pixel surfaces or 2 data elements for 64-bit-per-pixel surfaces. Each data element occupies 1 dword.
+* :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+  Note. The surface data format is indicated in the image resource constant but not in the instruction.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_data_mimg_store.rst b/docs/AMDGPU/gfx7_data_mimg_store.rst
new file mode 100644
index 0000000..1858547
--- /dev/null
+++ b/docs/AMDGPU/gfx7_data_mimg_store.rst
@@ -0,0 +1,18 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_data_mimg_store:
+
+vdata
+===========================
+
+Image data to store by an *image_store* instruction.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>` which may specify from 1 to 4 data elements. Each data element occupies 1 dword.
+
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_dst_buf_128.rst b/docs/AMDGPU/gfx7_dst_buf_128.rst
new file mode 100644
index 0000000..701616e
--- /dev/null
+++ b/docs/AMDGPU/gfx7_dst_buf_128.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_dst_buf_128:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer.
+
+*Size:* 4 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_dst_buf_64.rst b/docs/AMDGPU/gfx7_dst_buf_64.rst
new file mode 100644
index 0000000..de62a56
--- /dev/null
+++ b/docs/AMDGPU/gfx7_dst_buf_64.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_dst_buf_64:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer.
+
+*Size:* 2 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_dst_buf_96.rst b/docs/AMDGPU/gfx7_dst_buf_96.rst
new file mode 100644
index 0000000..1abfcc0
--- /dev/null
+++ b/docs/AMDGPU/gfx7_dst_buf_96.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_dst_buf_96:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer.
+
+*Size:* 3 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_dst_buf_lds.rst b/docs/AMDGPU/gfx7_dst_buf_lds.rst
new file mode 100644
index 0000000..435f6bb
--- /dev/null
+++ b/docs/AMDGPU/gfx7_dst_buf_lds.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_dst_buf_lds:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer.
+
+If :ref:`lds<amdgpu_synid_lds>` is specified, this operand is ignored by H/W and data are stored directly into LDS.
+
+*Size:* 1 dword by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+    Note that :ref:`tfe<amdgpu_synid_tfe>` and :ref:`lds<amdgpu_synid_lds>` cannot be used together.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_dst_flat_atomic32.rst b/docs/AMDGPU/gfx7_dst_flat_atomic32.rst
new file mode 100644
index 0000000..4a85656
--- /dev/null
+++ b/docs/AMDGPU/gfx7_dst_flat_atomic32.rst
@@ -0,0 +1,19 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_dst_flat_atomic32:
+
+vdst
+===========================
+
+Data returned by a 32-bit atomic flat instruction.
+
+This is an optional operand. It must be used if and only if :ref:`glc<amdgpu_synid_glc>` is specified.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_dst_flat_atomic64.rst b/docs/AMDGPU/gfx7_dst_flat_atomic64.rst
new file mode 100644
index 0000000..cb1fddd
--- /dev/null
+++ b/docs/AMDGPU/gfx7_dst_flat_atomic64.rst
@@ -0,0 +1,19 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_dst_flat_atomic64:
+
+vdst
+===========================
+
+Data returned by a 64-bit atomic flat instruction.
+
+This is an optional operand. It must be used if and only if :ref:`glc<amdgpu_synid_glc>` is specified.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_dst_mimg_gather4.rst b/docs/AMDGPU/gfx7_dst_mimg_gather4.rst
new file mode 100644
index 0000000..17fcff5
--- /dev/null
+++ b/docs/AMDGPU/gfx7_dst_mimg_gather4.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_dst_mimg_gather4:
+
+vdst
+===========================
+
+Image data to load by an *image_gather4* instruction.
+
+*Size:* 4 data elements by default. Each data element occupies 1 dword. :ref:`tfe<amdgpu_synid_tfe>` adds one more dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_dst_mimg_regular.rst b/docs/AMDGPU/gfx7_dst_mimg_regular.rst
new file mode 100644
index 0000000..aa165b8
--- /dev/null
+++ b/docs/AMDGPU/gfx7_dst_mimg_regular.rst
@@ -0,0 +1,20 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_dst_mimg_regular:
+
+vdst
+===========================
+
+Image data to load by an image instruction.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>` and :ref:`tfe<amdgpu_synid_tfe>`:
+
+* :ref:`dmask<amdgpu_synid_dmask>` may specify from 1 to 4 data elements. Each data element occupies 1 dword.
+* :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_fimm32.rst b/docs/AMDGPU/gfx7_fimm32.rst
new file mode 100644
index 0000000..70c8189
--- /dev/null
+++ b/docs/AMDGPU/gfx7_fimm32.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_fimm32:
+
+imm32
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>` or a :ref:`floating-point_number<amdgpu_synid_floating-point_number>`. The value is converted to *f32* as described :ref:`here<amdgpu_synid_lit_conv>`.
+
diff --git a/docs/AMDGPU/gfx7_hwreg.rst b/docs/AMDGPU/gfx7_hwreg.rst
new file mode 100644
index 0000000..1e2d964
--- /dev/null
+++ b/docs/AMDGPU/gfx7_hwreg.rst
@@ -0,0 +1,60 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_hwreg:
+
+hwreg
+===========================
+
+Bits of a hardware register being accessed.
+
+The bits of this operand have the following meaning:
+
+    ============ ===================================
+    Bits         Description
+    ============ ===================================
+    5:0          Register *id*.
+    10:6         First bit *offset* (0..31).
+    15:11        *Size* in bits (1..32).
+    ============ ===================================
+
+This operand may be specified as a positive 16-bit :ref:`integer_number<amdgpu_synid_integer_number>` or using the syntax described below.
+
+    ==================================== ============================================================================
+    Syntax                               Description
+    ==================================== ============================================================================
+    hwreg({0..63})                       All bits of a register indicated by its *id*.
+    hwreg(<*name*>)                      All bits of a register indicated by its *name*.
+    hwreg({0..63}, {0..31}, {1..32})     Register bits indicated by register *id*, first bit *offset* and *size*.
+    hwreg(<*name*>, {0..31}, {1..32})    Register bits indicated by register *name*, first bit *offset* and *size*.
+    ==================================== ============================================================================
+
+Register *id*, *offset* and *size* must be specified as positive :ref:`integer numbers<amdgpu_synid_integer_number>`.
+
+Defined register *names* include:
+
+    =================== ==========================================
+    Name                Description
+    =================== ==========================================
+    HW_REG_MODE         Shader writeable mode bits.
+    HW_REG_STATUS       Shader read-only status.
+    HW_REG_TRAPSTS      Trap status.
+    HW_REG_HW_ID        Id of wave, simd, compute unit, etc.
+    HW_REG_GPR_ALLOC    Per-wave SGPR and VGPR allocation.
+    HW_REG_LDS_ALLOC    Per-wave LDS allocation.
+    HW_REG_IB_STS       Counters of outstanding instructions.
+    =================== ==========================================
+
+Examples:
+
+.. parsed-literal::
+
+    s_getreg_b32 s2, 0x6
+    s_getreg_b32 s2, hwreg(15)
+    s_getreg_b32 s2, hwreg(51, 1, 31)
+    s_getreg_b32 s2, hwreg(HW_REG_LDS_ALLOC, 0, 1)
+
diff --git a/docs/AMDGPU/gfx7_label.rst b/docs/AMDGPU/gfx7_label.rst
new file mode 100644
index 0000000..ed2f3a4
--- /dev/null
+++ b/docs/AMDGPU/gfx7_label.rst
@@ -0,0 +1,30 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_label:
+
+label
+===========================
+
+A branch target which is a 16-bit signed integer treated as a PC-relative dword offset.
+
+This operand may be specified as:
+
+* An :ref:`integer_number<amdgpu_synid_integer_number>`. The number is truncated to 16 bits.
+* An :ref:`absolute_expression<amdgpu_synid_absolute_expression>` which must start with an :ref:`integer_number<amdgpu_synid_integer_number>`. The value of the expression is truncated to 16 bits.
+* A :ref:`symbol<amdgpu_synid_symbol>` (for example, a label). The value is handled as a 16-bit PC-relative dword offset to be resolved by a linker.
+
+Examples:
+
+.. parsed-literal::
+
+  offset = 30
+  s_branch loop_end
+  s_branch 2 + offset
+  s_branch 32
+  loop_end:
+
diff --git a/docs/AMDGPU/gfx7_mod.rst b/docs/AMDGPU/gfx7_mod.rst
new file mode 100644
index 0000000..fcaa6ca
--- /dev/null
+++ b/docs/AMDGPU/gfx7_mod.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_mod:
+
+m
+===========================
+
+This operand may be used with floating point operand modifiers :ref:`abs<amdgpu_synid_abs>` and :ref:`neg<amdgpu_synid_neg>`.
+
diff --git a/docs/AMDGPU/gfx7_msg.rst b/docs/AMDGPU/gfx7_msg.rst
new file mode 100644
index 0000000..5476053
--- /dev/null
+++ b/docs/AMDGPU/gfx7_msg.rst
@@ -0,0 +1,72 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_msg:
+
+msg
+===========================
+
+A 16-bit message code. The bits of this operand have the following meaning:
+
+    ============ ======================================================
+    Bits         Description
+    ============ ======================================================
+    3:0          Message *type*.
+    6:4          Optional *operation*.
+    9:7          Optional *parameters*.
+    15:10        Unused.
+    ============ ======================================================
+
+This operand may be specified as a positive 16-bit :ref:`integer_number<amdgpu_synid_integer_number>` or using the syntax described below:
+
+    ======================================== ========================================================================
+    Syntax                                   Description
+    ======================================== ========================================================================
+    sendmsg(<*type*>)                        A message identified by its *type*.
+    sendmsg(<*type*>, <*op*>)                A message identified by its *type* and *operation*.
+    sendmsg(<*type*>, <*op*>, <*stream*>)    A message identified by its *type* and *operation* with a stream *id*.
+    ======================================== ========================================================================
+
+*Type* may be specified using message *name* or message *id*.
+
+*Op* may be specified using operation *name* or operation *id*.
+
+Stream *id* is an integer in the range 0..3.
+
+Message *id*, operation *id* and stream *id* must be specified as positive :ref:`integer numbers<amdgpu_synid_integer_number>`.
+
+Each message type supports specific operations:
+
+    ================= ========== ============================== ============ ==========
+    Message name      Message Id Supported Operations           Operation Id Stream Id
+    ================= ========== ============================== ============ ==========
+    MSG_INTERRUPT     1          \-                             \-           \-
+    MSG_GS            2          GS_OP_CUT                      1            Optional
+    \                            GS_OP_EMIT                     2            Optional
+    \                            GS_OP_EMIT_CUT                 3            Optional
+    MSG_GS_DONE       3          GS_OP_NOP                      0            \-
+    \                            GS_OP_CUT                      1            Optional
+    \                            GS_OP_EMIT                     2            Optional
+    \                            GS_OP_EMIT_CUT                 3            Optional
+    MSG_SYSMSG        15         SYSMSG_OP_ECC_ERR_INTERRUPT    1            \-
+    \                            SYSMSG_OP_REG_RD               2            \-
+    \                            SYSMSG_OP_HOST_TRAP_ACK        3            \-
+    \                            SYSMSG_OP_TTRACE_PC            4            \-
+    ================= ========== ============================== ============ ==========
+
+Examples:
+
+.. parsed-literal::
+
+    s_sendmsg 0x12
+    s_sendmsg sendmsg(MSG_INTERRUPT)
+    s_sendmsg sendmsg(2, GS_OP_CUT)
+    s_sendmsg sendmsg(MSG_GS, GS_OP_EMIT)
+    s_sendmsg sendmsg(MSG_GS, 2)
+    s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_EMIT_CUT, 1)
+    s_sendmsg sendmsg(MSG_SYSMSG, SYSMSG_OP_TTRACE_PC)
+
diff --git a/docs/AMDGPU/gfx7_offset_buf.rst b/docs/AMDGPU/gfx7_offset_buf.rst
new file mode 100644
index 0000000..c36df06
--- /dev/null
+++ b/docs/AMDGPU/gfx7_offset_buf.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_offset_buf:
+
+soffset
+===========================
+
+An unsigned byte offset.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`
diff --git a/docs/AMDGPU/gfx7_offset_smem.rst b/docs/AMDGPU/gfx7_offset_smem.rst
new file mode 100644
index 0000000..85ed5f1
--- /dev/null
+++ b/docs/AMDGPU/gfx7_offset_smem.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_offset_smem:
+
+soffset
+===========================
+
+An unsigned offset added to the base address to get memory address.
+
+* If offset is specified as a register, it supplies an unsigned byte offset but 2 lsb's are ignored.
+* If offset is specified as an :ref:`uimm32<amdgpu_synid_uimm32>`, it supplies a 32-bit unsigned byte offset but 2 lsb's are ignored.
+* If offset is specified as an :ref:`uimm8<amdgpu_synid_uimm8>`, it supplies an 8-bit unsigned dword offset.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`uimm8<amdgpu_synid_uimm8>`, :ref:`uimm32<amdgpu_synid_uimm32>`
diff --git a/docs/AMDGPU/gfx7_opt.rst b/docs/AMDGPU/gfx7_opt.rst
new file mode 100644
index 0000000..1a48733
--- /dev/null
+++ b/docs/AMDGPU/gfx7_opt.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_opt:
+
+opt
+===========================
+
+This is an optional operand. It must be used if and only if :ref:`glc<amdgpu_synid_glc>` is specified.
+
diff --git a/docs/AMDGPU/gfx7_param.rst b/docs/AMDGPU/gfx7_param.rst
new file mode 100644
index 0000000..13e533b
--- /dev/null
+++ b/docs/AMDGPU/gfx7_param.rst
@@ -0,0 +1,22 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_param:
+
+param
+===========================
+
+Interpolation parameter to read:
+
+    ============ ===================================
+    Syntax       Description
+    ============ ===================================
+    p0           Parameter *P0*.
+    p10          Parameter *P10*.
+    p20          Parameter *P20*.
+    ============ ===================================
+
diff --git a/docs/AMDGPU/gfx7_ret.rst b/docs/AMDGPU/gfx7_ret.rst
new file mode 100644
index 0000000..25301c2
--- /dev/null
+++ b/docs/AMDGPU/gfx7_ret.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_ret:
+
+dst
+===========================
+
+This is an input operand. It may optionally serve as a destination if :ref:`glc<amdgpu_synid_glc>` is specified.
+
diff --git a/docs/AMDGPU/gfx7_rsrc_buf.rst b/docs/AMDGPU/gfx7_rsrc_buf.rst
new file mode 100644
index 0000000..7ebcebc
--- /dev/null
+++ b/docs/AMDGPU/gfx7_rsrc_buf.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_rsrc_buf:
+
+srsrc
+===========================
+
+Buffer resource constant which defines the address and characteristics of the buffer in memory.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx7_rsrc_mimg.rst b/docs/AMDGPU/gfx7_rsrc_mimg.rst
new file mode 100644
index 0000000..b0e40fe
--- /dev/null
+++ b/docs/AMDGPU/gfx7_rsrc_mimg.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_rsrc_mimg:
+
+srsrc
+===========================
+
+Image resource constant which defines the location of the image buffer in memory, its dimensions, tiling, and data format.
+
+*Size:* 8 dwords by default, 4 dwords if :ref:`r128<amdgpu_synid_r128>` is specified.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx7_samp_mimg.rst b/docs/AMDGPU/gfx7_samp_mimg.rst
new file mode 100644
index 0000000..738cad4
--- /dev/null
+++ b/docs/AMDGPU/gfx7_samp_mimg.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_samp_mimg:
+
+ssamp
+===========================
+
+Sampler constant used to specify filtering options applied to the image data after it is read.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx7_sdst128_0.rst b/docs/AMDGPU/gfx7_sdst128_0.rst
new file mode 100644
index 0000000..735a30ce
--- /dev/null
+++ b/docs/AMDGPU/gfx7_sdst128_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_sdst128_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx7_sdst256_0.rst b/docs/AMDGPU/gfx7_sdst256_0.rst
new file mode 100644
index 0000000..c4e6f9f
--- /dev/null
+++ b/docs/AMDGPU/gfx7_sdst256_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_sdst256_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 8 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx7_sdst32_0.rst b/docs/AMDGPU/gfx7_sdst32_0.rst
new file mode 100644
index 0000000..183d89f
--- /dev/null
+++ b/docs/AMDGPU/gfx7_sdst32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_sdst32_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`
diff --git a/docs/AMDGPU/gfx7_sdst32_1.rst b/docs/AMDGPU/gfx7_sdst32_1.rst
new file mode 100644
index 0000000..2e49e20
--- /dev/null
+++ b/docs/AMDGPU/gfx7_sdst32_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_sdst32_1:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`
diff --git a/docs/AMDGPU/gfx7_sdst32_2.rst b/docs/AMDGPU/gfx7_sdst32_2.rst
new file mode 100644
index 0000000..8212e5d
--- /dev/null
+++ b/docs/AMDGPU/gfx7_sdst32_2.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_sdst32_2:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`trap<amdgpu_synid_trap>`
diff --git a/docs/AMDGPU/gfx7_sdst512_0.rst b/docs/AMDGPU/gfx7_sdst512_0.rst
new file mode 100644
index 0000000..d8c64b1
--- /dev/null
+++ b/docs/AMDGPU/gfx7_sdst512_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_sdst512_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 16 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`
diff --git a/docs/AMDGPU/gfx7_sdst64_0.rst b/docs/AMDGPU/gfx7_sdst64_0.rst
new file mode 100644
index 0000000..af60880
--- /dev/null
+++ b/docs/AMDGPU/gfx7_sdst64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_sdst64_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`
diff --git a/docs/AMDGPU/gfx7_sdst64_1.rst b/docs/AMDGPU/gfx7_sdst64_1.rst
new file mode 100644
index 0000000..207df73
--- /dev/null
+++ b/docs/AMDGPU/gfx7_sdst64_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_sdst64_1:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`exec<amdgpu_synid_exec>`
diff --git a/docs/AMDGPU/gfx7_simm16.rst b/docs/AMDGPU/gfx7_simm16.rst
new file mode 100644
index 0000000..66e560e
--- /dev/null
+++ b/docs/AMDGPU/gfx7_simm16.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_simm16:
+
+imm16
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 16 bits and then sign-extended to 32 bits.
+
diff --git a/docs/AMDGPU/gfx7_src32_0.rst b/docs/AMDGPU/gfx7_src32_0.rst
new file mode 100644
index 0000000..22ff73d
--- /dev/null
+++ b/docs/AMDGPU/gfx7_src32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_src32_0:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/docs/AMDGPU/gfx7_src32_1.rst b/docs/AMDGPU/gfx7_src32_1.rst
new file mode 100644
index 0000000..0059459
--- /dev/null
+++ b/docs/AMDGPU/gfx7_src32_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_src32_1:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`iconst<amdgpu_synid_iconst>`
diff --git a/docs/AMDGPU/gfx7_src32_2.rst b/docs/AMDGPU/gfx7_src32_2.rst
new file mode 100644
index 0000000..b939c45
--- /dev/null
+++ b/docs/AMDGPU/gfx7_src32_2.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_src32_2:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`
diff --git a/docs/AMDGPU/gfx7_src32_3.rst b/docs/AMDGPU/gfx7_src32_3.rst
new file mode 100644
index 0000000..83aa9ca
--- /dev/null
+++ b/docs/AMDGPU/gfx7_src32_3.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_src32_3:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`
diff --git a/docs/AMDGPU/gfx7_src64_0.rst b/docs/AMDGPU/gfx7_src64_0.rst
new file mode 100644
index 0000000..a19b6ee
--- /dev/null
+++ b/docs/AMDGPU/gfx7_src64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_src64_0:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/docs/AMDGPU/gfx7_src64_1.rst b/docs/AMDGPU/gfx7_src64_1.rst
new file mode 100644
index 0000000..c81864c
--- /dev/null
+++ b/docs/AMDGPU/gfx7_src64_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_src64_1:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`
diff --git a/docs/AMDGPU/gfx7_src64_2.rst b/docs/AMDGPU/gfx7_src64_2.rst
new file mode 100644
index 0000000..189245e
--- /dev/null
+++ b/docs/AMDGPU/gfx7_src64_2.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_src64_2:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`iconst<amdgpu_synid_iconst>`
diff --git a/docs/AMDGPU/gfx7_src_exp.rst b/docs/AMDGPU/gfx7_src_exp.rst
new file mode 100644
index 0000000..32f71a8
--- /dev/null
+++ b/docs/AMDGPU/gfx7_src_exp.rst
@@ -0,0 +1,28 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_src_exp:
+
+vsrc
+===========================
+
+Data to copy to export buffers. This is an optional operand. Must be specified as :ref:`off<amdgpu_synid_off>` if not used.
+
+:ref:`compr<amdgpu_synid_compr>` modifier indicates use of compressed (16-bit) data. This limits number of source operands from 4 to 2:
+
+* src0 and src1 must specify the first register (or :ref:`off<amdgpu_synid_off>`).
+* src2 and src3 must specify the second register (or :ref:`off<amdgpu_synid_off>`).
+
+An example:
+
+.. parsed-literal::
+
+  exp mrtz v3, v3, off, off compr
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`off<amdgpu_synid_off>`
diff --git a/docs/AMDGPU/gfx7_ssrc32_0.rst b/docs/AMDGPU/gfx7_ssrc32_0.rst
new file mode 100644
index 0000000..843db24
--- /dev/null
+++ b/docs/AMDGPU/gfx7_ssrc32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_ssrc32_0:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/docs/AMDGPU/gfx7_ssrc32_1.rst b/docs/AMDGPU/gfx7_ssrc32_1.rst
new file mode 100644
index 0000000..6e626d8
--- /dev/null
+++ b/docs/AMDGPU/gfx7_ssrc32_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_ssrc32_1:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`
diff --git a/docs/AMDGPU/gfx7_ssrc32_2.rst b/docs/AMDGPU/gfx7_ssrc32_2.rst
new file mode 100644
index 0000000..c7ff032
--- /dev/null
+++ b/docs/AMDGPU/gfx7_ssrc32_2.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_ssrc32_2:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`
diff --git a/docs/AMDGPU/gfx7_ssrc32_3.rst b/docs/AMDGPU/gfx7_ssrc32_3.rst
new file mode 100644
index 0000000..68a2415
--- /dev/null
+++ b/docs/AMDGPU/gfx7_ssrc32_3.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_ssrc32_3:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`iconst<amdgpu_synid_iconst>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/docs/AMDGPU/gfx7_ssrc32_4.rst b/docs/AMDGPU/gfx7_ssrc32_4.rst
new file mode 100644
index 0000000..669ae4e
--- /dev/null
+++ b/docs/AMDGPU/gfx7_ssrc32_4.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_ssrc32_4:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`iconst<amdgpu_synid_iconst>`
diff --git a/docs/AMDGPU/gfx7_ssrc64_0.rst b/docs/AMDGPU/gfx7_ssrc64_0.rst
new file mode 100644
index 0000000..283e7ea
--- /dev/null
+++ b/docs/AMDGPU/gfx7_ssrc64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_ssrc64_0:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/docs/AMDGPU/gfx7_ssrc64_1.rst b/docs/AMDGPU/gfx7_ssrc64_1.rst
new file mode 100644
index 0000000..42dc8c5
--- /dev/null
+++ b/docs/AMDGPU/gfx7_ssrc64_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_ssrc64_1:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`
diff --git a/docs/AMDGPU/gfx7_ssrc64_2.rst b/docs/AMDGPU/gfx7_ssrc64_2.rst
new file mode 100644
index 0000000..344147f
--- /dev/null
+++ b/docs/AMDGPU/gfx7_ssrc64_2.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_ssrc64_2:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`
diff --git a/docs/AMDGPU/gfx7_ssrc64_3.rst b/docs/AMDGPU/gfx7_ssrc64_3.rst
new file mode 100644
index 0000000..173f550
--- /dev/null
+++ b/docs/AMDGPU/gfx7_ssrc64_3.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_ssrc64_3:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`exec<amdgpu_synid_exec>`
diff --git a/docs/AMDGPU/gfx7_tgt.rst b/docs/AMDGPU/gfx7_tgt.rst
new file mode 100644
index 0000000..c407c0c
--- /dev/null
+++ b/docs/AMDGPU/gfx7_tgt.rst
@@ -0,0 +1,24 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_tgt:
+
+tgt
+===========================
+
+An export target:
+
+    ============== ===================================
+    Syntax         Description
+    ============== ===================================
+    pos{0..3}      Copy vertex position 0..3.
+    param{0..31}   Copy vertex parameter 0..31.
+    mrt{0..7}      Copy pixel color to the MRTs 0..7.
+    mrtz           Copy pixel depth (Z) data.
+    null           Copy nothing.
+    ============== ===================================
+
diff --git a/docs/AMDGPU/gfx7_type_dev.rst b/docs/AMDGPU/gfx7_type_dev.rst
new file mode 100644
index 0000000..6eab0e1
--- /dev/null
+++ b/docs/AMDGPU/gfx7_type_dev.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_type_dev:
+
+Type deviation
+===========================
+
+*Type* of this operand differs from *type* :ref:`implied by the opcode<amdgpu_syn_instruction_type>`. This tag specifies actual operand *type*.
+
diff --git a/docs/AMDGPU/gfx7_uimm16.rst b/docs/AMDGPU/gfx7_uimm16.rst
new file mode 100644
index 0000000..bd0d4c2
--- /dev/null
+++ b/docs/AMDGPU/gfx7_uimm16.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_uimm16:
+
+imm16
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 16 bits and then zero-extended to 32 bits.
+
diff --git a/docs/AMDGPU/gfx7_vcc_64.rst b/docs/AMDGPU/gfx7_vcc_64.rst
new file mode 100644
index 0000000..b1285e0
--- /dev/null
+++ b/docs/AMDGPU/gfx7_vcc_64.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_vcc_64:
+
+vcc
+===========================
+
+Vector condition code.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`vcc<amdgpu_synid_vcc>`
diff --git a/docs/AMDGPU/gfx7_vdata128_0.rst b/docs/AMDGPU/gfx7_vdata128_0.rst
new file mode 100644
index 0000000..5ed2b82
--- /dev/null
+++ b/docs/AMDGPU/gfx7_vdata128_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_vdata128_0:
+
+vdata
+===========================
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_vdata32_0.rst b/docs/AMDGPU/gfx7_vdata32_0.rst
new file mode 100644
index 0000000..1615abb4
--- /dev/null
+++ b/docs/AMDGPU/gfx7_vdata32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_vdata32_0:
+
+vdata
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_vdata64_0.rst b/docs/AMDGPU/gfx7_vdata64_0.rst
new file mode 100644
index 0000000..fceea9f
--- /dev/null
+++ b/docs/AMDGPU/gfx7_vdata64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_vdata64_0:
+
+vdata
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_vdata96_0.rst b/docs/AMDGPU/gfx7_vdata96_0.rst
new file mode 100644
index 0000000..b9fe599
--- /dev/null
+++ b/docs/AMDGPU/gfx7_vdata96_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_vdata96_0:
+
+vdata
+===========================
+
+Instruction input.
+
+*Size:* 3 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_vdst128_0.rst b/docs/AMDGPU/gfx7_vdst128_0.rst
new file mode 100644
index 0000000..c18652e
--- /dev/null
+++ b/docs/AMDGPU/gfx7_vdst128_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_vdst128_0:
+
+vdst
+===========================
+
+Instruction output.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_vdst32_0.rst b/docs/AMDGPU/gfx7_vdst32_0.rst
new file mode 100644
index 0000000..e936203
--- /dev/null
+++ b/docs/AMDGPU/gfx7_vdst32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_vdst32_0:
+
+vdst
+===========================
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_vdst64_0.rst b/docs/AMDGPU/gfx7_vdst64_0.rst
new file mode 100644
index 0000000..4cacaa0
--- /dev/null
+++ b/docs/AMDGPU/gfx7_vdst64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_vdst64_0:
+
+vdst
+===========================
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_vdst96_0.rst b/docs/AMDGPU/gfx7_vdst96_0.rst
new file mode 100644
index 0000000..3c5bf88
--- /dev/null
+++ b/docs/AMDGPU/gfx7_vdst96_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_vdst96_0:
+
+vdst
+===========================
+
+Instruction output.
+
+*Size:* 3 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_vsrc128_0.rst b/docs/AMDGPU/gfx7_vsrc128_0.rst
new file mode 100644
index 0000000..9752379
--- /dev/null
+++ b/docs/AMDGPU/gfx7_vsrc128_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_vsrc128_0:
+
+vsrc
+===========================
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_vsrc32_0.rst b/docs/AMDGPU/gfx7_vsrc32_0.rst
new file mode 100644
index 0000000..93b12b0
--- /dev/null
+++ b/docs/AMDGPU/gfx7_vsrc32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_vsrc32_0:
+
+vsrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_vsrc64_0.rst b/docs/AMDGPU/gfx7_vsrc64_0.rst
new file mode 100644
index 0000000..d8c9d45
--- /dev/null
+++ b/docs/AMDGPU/gfx7_vsrc64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_vsrc64_0:
+
+vsrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx7_waitcnt.rst b/docs/AMDGPU/gfx7_waitcnt.rst
new file mode 100644
index 0000000..3f5e07d
--- /dev/null
+++ b/docs/AMDGPU/gfx7_waitcnt.rst
@@ -0,0 +1,55 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid7_waitcnt:
+
+waitcnt
+===========================
+
+Counts of outstanding instructions to wait for.
+
+The bits of this operand have the following meaning:
+
+    ============ ======================================================
+    Bits         Description
+    ============ ======================================================
+    3:0          VM_CNT: vector memory operations count.
+    6:4          EXP_CNT: export count.
+    12:8         LGKM_CNT: LDS, GDS, Constant and Message count.
+    ============ ======================================================
+
+This operand may be specified as a positive 16-bit :ref:`integer_number<amdgpu_synid_integer_number>`
+or as a combination of the following symbolic helpers:
+
+    ====================== ======================================================================
+    Syntax                 Description
+    ====================== ======================================================================
+    vmcnt(<*N*>)           VM_CNT value. *N* must not exceed the largest VM_CNT value.
+    expcnt(<*N*>)          EXP_CNT value. *N* must not exceed the largest EXP_CNT value.
+    lgkmcnt(<*N*>)         LGKM_CNT value. *N* must not exceed the largest LGKM_CNT value.
+    vmcnt_sat(<*N*>)       VM_CNT value computed as min(*N*, the largest VM_CNT value).
+    expcnt_sat(<*N*>)      EXP_CNT value computed as min(*N*, the largest EXP_CNT value).
+    lgkmcnt_sat(<*N*>)     LGKM_CNT value computed as min(*N*, the largest LGKM_CNT value).
+    ====================== ======================================================================
+
+These helpers may be specified in any order. Ampersands and commas may be used as optional separators.
+
+*N* is either an
+:ref:`integer number<amdgpu_synid_integer_number>` or an
+:ref:`absolute expression<amdgpu_synid_absolute_expression>`.
+
+Examples:
+
+.. parsed-literal::
+
+    s_waitcnt 0
+    s_waitcnt vmcnt(1)
+    s_waitcnt expcnt(2) lgkmcnt(3)
+    s_waitcnt vmcnt(1) expcnt(2) lgkmcnt(3)
+    s_waitcnt vmcnt(1), expcnt(2), lgkmcnt(3)
+    s_waitcnt vmcnt(1) & lgkmcnt_sat(100) & expcnt(2)
+
diff --git a/docs/AMDGPU/gfx8_addr_buf.rst b/docs/AMDGPU/gfx8_addr_buf.rst
new file mode 100644
index 0000000..74aa275
--- /dev/null
+++ b/docs/AMDGPU/gfx8_addr_buf.rst
@@ -0,0 +1,22 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_addr_buf:
+
+vaddr
+===========================
+
+This is an optional operand which may specify offset and/or index.
+
+*Size:* 0, 1 or 2 dwords. Size is controlled by modifiers :ref:`offen<amdgpu_synid_offen>` and :ref:`idxen<amdgpu_synid_idxen>`:
+
+* If only :ref:`idxen<amdgpu_synid_idxen>` is specified, this operand supplies an index. Size is 1 dword.
+* If only :ref:`offen<amdgpu_synid_offen>` is specified, this operand supplies an offset. Size is 1 dword.
+* If both modifiers are specified, index is in the first register and offset is in the second. Size is 2 dwords.
+* If none of these modifiers are specified, this operand must be set to :ref:`off<amdgpu_synid_off>`.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`off<amdgpu_synid_off>`
diff --git a/docs/AMDGPU/gfx8_addr_ds.rst b/docs/AMDGPU/gfx8_addr_ds.rst
new file mode 100644
index 0000000..7115ff0
--- /dev/null
+++ b/docs/AMDGPU/gfx8_addr_ds.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_addr_ds:
+
+vaddr
+===========================
+
+An offset from the start of GDS/LDS memory.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_addr_flat.rst b/docs/AMDGPU/gfx8_addr_flat.rst
new file mode 100644
index 0000000..53dfcc3
--- /dev/null
+++ b/docs/AMDGPU/gfx8_addr_flat.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_addr_flat:
+
+vaddr
+===========================
+
+A 64-bit flat address.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_addr_mimg.rst b/docs/AMDGPU/gfx8_addr_mimg.rst
new file mode 100644
index 0000000..f1052ba
--- /dev/null
+++ b/docs/AMDGPU/gfx8_addr_mimg.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_addr_mimg:
+
+vaddr
+===========================
+
+Image address which includes from one to four dimensional coordinates and other data used to locate a position in the image.
+
+*Size:* 1, 2, 3, 4, 8 or 16 dwords. Actual size depends on opcode and specific image being handled.
+
+    Note 1. Image format and dimensions are encoded in the image resource constant but not in the instruction.
+
+    Note 2. Actually image address size may vary from 1 to 13 dwords, but assembler currently supports a limited range of register sequences.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_attr.rst b/docs/AMDGPU/gfx8_attr.rst
new file mode 100644
index 0000000..12fa2cd
--- /dev/null
+++ b/docs/AMDGPU/gfx8_attr.rst
@@ -0,0 +1,30 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_attr:
+
+attr
+===========================
+
+Interpolation attribute and channel:
+
+    ============== ===================================
+    Syntax         Description
+    ============== ===================================
+    attr{0..32}.x  Attribute 0..32 with *x* channel.
+    attr{0..32}.y  Attribute 0..32 with *y* channel.
+    attr{0..32}.z  Attribute 0..32 with *z* channel.
+    attr{0..32}.w  Attribute 0..32 with *w* channel.
+    ============== ===================================
+
+Examples:
+
+.. parsed-literal::
+
+    v_interp_p1_f32 v1, v0, attr0.x
+    v_interp_p1_f32 v1, v0, attr32.w
+
diff --git a/docs/AMDGPU/gfx8_base_smem_addr.rst b/docs/AMDGPU/gfx8_base_smem_addr.rst
new file mode 100644
index 0000000..81ef255
--- /dev/null
+++ b/docs/AMDGPU/gfx8_base_smem_addr.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_base_smem_addr:
+
+sbase
+===========================
+
+A 64-bit base address for scalar memory operations.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`
diff --git a/docs/AMDGPU/gfx8_base_smem_buf.rst b/docs/AMDGPU/gfx8_base_smem_buf.rst
new file mode 100644
index 0000000..fb243d0
--- /dev/null
+++ b/docs/AMDGPU/gfx8_base_smem_buf.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_base_smem_buf:
+
+sbase
+===========================
+
+A 128-bit buffer resource constant for scalar memory operations which provides a base address, a size and a stride.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx8_bimm16.rst b/docs/AMDGPU/gfx8_bimm16.rst
new file mode 100644
index 0000000..ed50e55
--- /dev/null
+++ b/docs/AMDGPU/gfx8_bimm16.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_bimm16:
+
+imm16
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 16 bits.
+
diff --git a/docs/AMDGPU/gfx8_bimm32.rst b/docs/AMDGPU/gfx8_bimm32.rst
new file mode 100644
index 0000000..d03c27b
--- /dev/null
+++ b/docs/AMDGPU/gfx8_bimm32.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_bimm32:
+
+imm32
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 32 bits.
+
diff --git a/docs/AMDGPU/gfx8_data_buf_atomic128.rst b/docs/AMDGPU/gfx8_data_buf_atomic128.rst
new file mode 100644
index 0000000..40b6d3a
--- /dev/null
+++ b/docs/AMDGPU/gfx8_data_buf_atomic128.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_data_buf_atomic128:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* 4 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_data_buf_atomic32.rst b/docs/AMDGPU/gfx8_data_buf_atomic32.rst
new file mode 100644
index 0000000..5112182
--- /dev/null
+++ b/docs/AMDGPU/gfx8_data_buf_atomic32.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_data_buf_atomic32:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* 1 dword by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_data_buf_atomic64.rst b/docs/AMDGPU/gfx8_data_buf_atomic64.rst
new file mode 100644
index 0000000..107998e
--- /dev/null
+++ b/docs/AMDGPU/gfx8_data_buf_atomic64.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_data_buf_atomic64:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* 2 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_data_buf_d16_128.rst b/docs/AMDGPU/gfx8_data_buf_d16_128.rst
new file mode 100644
index 0000000..3c98a36
--- /dev/null
+++ b/docs/AMDGPU/gfx8_data_buf_d16_128.rst
@@ -0,0 +1,20 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_data_buf_d16_128:
+
+vdata
+===========================
+
+16-bit data to store by a buffer instruction.
+
+*Size:* depends on GFX8 GPU revision:
+
+* 4 dwords for GFX8.0. This H/W supports no packing.
+* 2 dwords for GFX8.1+. This H/W supports data packing.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_data_buf_d16_32.rst b/docs/AMDGPU/gfx8_data_buf_d16_32.rst
new file mode 100644
index 0000000..6432875
--- /dev/null
+++ b/docs/AMDGPU/gfx8_data_buf_d16_32.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_data_buf_d16_32:
+
+vdata
+===========================
+
+16-bit data to store by a buffer instruction.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_data_buf_d16_64.rst b/docs/AMDGPU/gfx8_data_buf_d16_64.rst
new file mode 100644
index 0000000..932ce69
--- /dev/null
+++ b/docs/AMDGPU/gfx8_data_buf_d16_64.rst
@@ -0,0 +1,20 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_data_buf_d16_64:
+
+vdata
+===========================
+
+16-bit data to store by a buffer instruction.
+
+*Size:* depends on GFX8 GPU revision:
+
+* 2 dwords for GFX8.0. This H/W supports no packing.
+* 1 dword for GFX8.1+. This H/W supports data packing.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_data_buf_d16_96.rst b/docs/AMDGPU/gfx8_data_buf_d16_96.rst
new file mode 100644
index 0000000..b9e6915
--- /dev/null
+++ b/docs/AMDGPU/gfx8_data_buf_d16_96.rst
@@ -0,0 +1,20 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_data_buf_d16_96:
+
+vdata
+===========================
+
+16-bit data to store by a buffer instruction.
+
+*Size:* depends on GFX8 GPU revision:
+
+* 3 dwords for GFX8.0. This H/W supports no packing.
+* 2 dwords for GFX8.1+. This H/W supports data packing.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_data_mimg_atomic_cmp.rst b/docs/AMDGPU/gfx8_data_mimg_atomic_cmp.rst
new file mode 100644
index 0000000..80222ea
--- /dev/null
+++ b/docs/AMDGPU/gfx8_data_mimg_atomic_cmp.rst
@@ -0,0 +1,27 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_data_mimg_atomic_cmp:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>` and :ref:`tfe<amdgpu_synid_tfe>`:
+
+* :ref:`dmask<amdgpu_synid_dmask>` may specify 2 data elements for 32-bit-per-pixel surfaces or 4 data elements for 64-bit-per-pixel surfaces. Each data element occupies 1 dword.
+* :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+  Note. The surface data format is indicated in the image resource constant but not in the instruction.
+
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_data_mimg_atomic_reg.rst b/docs/AMDGPU/gfx8_data_mimg_atomic_reg.rst
new file mode 100644
index 0000000..8baf926
--- /dev/null
+++ b/docs/AMDGPU/gfx8_data_mimg_atomic_reg.rst
@@ -0,0 +1,26 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_data_mimg_atomic_reg:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>` and :ref:`tfe<amdgpu_synid_tfe>`:
+
+* :ref:`dmask<amdgpu_synid_dmask>` may specify 1 data element for 32-bit-per-pixel surfaces or 2 data elements for 64-bit-per-pixel surfaces. Each data element occupies 1 dword.
+* :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+  Note. The surface data format is indicated in the image resource constant but not in the instruction.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_data_mimg_store.rst b/docs/AMDGPU/gfx8_data_mimg_store.rst
new file mode 100644
index 0000000..65a1a49
--- /dev/null
+++ b/docs/AMDGPU/gfx8_data_mimg_store.rst
@@ -0,0 +1,18 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_data_mimg_store:
+
+vdata
+===========================
+
+Image data to store by an *image_store* instruction.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>` which may specify from 1 to 4 data elements. Each data element occupies 1 dword.
+
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_data_mimg_store_d16.rst b/docs/AMDGPU/gfx8_data_mimg_store_d16.rst
new file mode 100644
index 0000000..7524c88
--- /dev/null
+++ b/docs/AMDGPU/gfx8_data_mimg_store_d16.rst
@@ -0,0 +1,24 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_data_mimg_store_d16:
+
+vdata
+===========================
+
+Image data to store by an *image_store* instruction.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>` and :ref:`d16<amdgpu_synid_d16>`:
+
+* :ref:`dmask<amdgpu_synid_dmask>` may specify from 1 to 4 data elements. Each data element occupies either 32 bits or 16 bits depending on :ref:`d16<amdgpu_synid_d16>`.
+* :ref:`d16<amdgpu_synid_d16>` has different meaning for GFX8.0 and GFX8.1:
+
+  * For GFX8.0 this modifier does not affect size of data elements in registers. Data in registers are stored in low 16 bits, high 16 bits are unused. There is no packing.
+  * Starting from GFX8.1 this modifier specifies that data elements in registers are packed; each value occupies 16 bits.
+
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_dst_buf_128.rst b/docs/AMDGPU/gfx8_dst_buf_128.rst
new file mode 100644
index 0000000..d076c70
--- /dev/null
+++ b/docs/AMDGPU/gfx8_dst_buf_128.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_dst_buf_128:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer.
+
+*Size:* 4 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_dst_buf_64.rst b/docs/AMDGPU/gfx8_dst_buf_64.rst
new file mode 100644
index 0000000..f5da65b
--- /dev/null
+++ b/docs/AMDGPU/gfx8_dst_buf_64.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_dst_buf_64:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer.
+
+*Size:* 2 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_dst_buf_96.rst b/docs/AMDGPU/gfx8_dst_buf_96.rst
new file mode 100644
index 0000000..0012c1a
--- /dev/null
+++ b/docs/AMDGPU/gfx8_dst_buf_96.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_dst_buf_96:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer.
+
+*Size:* 3 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_dst_buf_d16_128.rst b/docs/AMDGPU/gfx8_dst_buf_d16_128.rst
new file mode 100644
index 0000000..0f5318d
--- /dev/null
+++ b/docs/AMDGPU/gfx8_dst_buf_d16_128.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_dst_buf_d16_128:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer and converted to a 16-bit format.
+
+*Size:* depends on GFX8 GPU revision and :ref:`tfe<amdgpu_synid_tfe>`:
+
+* 4 dwords for GFX8.0. This H/W supports no packing.
+* 2 dwords for GFX8.1+. This H/W supports data packing.
+* :ref:`tfe<amdgpu_synid_tfe>` adds one dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_dst_buf_d16_32.rst b/docs/AMDGPU/gfx8_dst_buf_d16_32.rst
new file mode 100644
index 0000000..6288c2d
--- /dev/null
+++ b/docs/AMDGPU/gfx8_dst_buf_d16_32.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_dst_buf_d16_32:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer and converted to a 16-bit format.
+
+*Size:* 1 dword by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_dst_buf_d16_64.rst b/docs/AMDGPU/gfx8_dst_buf_d16_64.rst
new file mode 100644
index 0000000..b46310f
--- /dev/null
+++ b/docs/AMDGPU/gfx8_dst_buf_d16_64.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_dst_buf_d16_64:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer and converted to a 16-bit format.
+
+*Size:* depends on GFX8 GPU revision and :ref:`tfe<amdgpu_synid_tfe>`:
+
+* 2 dwords for GFX8.0. This H/W supports no packing.
+* 1 dword for GFX8.1+. This H/W supports data packing.
+* :ref:`tfe<amdgpu_synid_tfe>` adds one dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_dst_buf_d16_96.rst b/docs/AMDGPU/gfx8_dst_buf_d16_96.rst
new file mode 100644
index 0000000..15e7e89
--- /dev/null
+++ b/docs/AMDGPU/gfx8_dst_buf_d16_96.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_dst_buf_d16_96:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer and converted to a 16-bit format.
+
+*Size:* depends on GFX8 GPU revision and :ref:`tfe<amdgpu_synid_tfe>`:
+
+* 3 dwords for GFX8.0. This H/W supports no packing.
+* 2 dwords for GFX8.1+. This H/W supports data packing.
+* :ref:`tfe<amdgpu_synid_tfe>` adds one dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_dst_buf_lds.rst b/docs/AMDGPU/gfx8_dst_buf_lds.rst
new file mode 100644
index 0000000..b1cb145
--- /dev/null
+++ b/docs/AMDGPU/gfx8_dst_buf_lds.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_dst_buf_lds:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer.
+
+If :ref:`lds<amdgpu_synid_lds>` is specified, this operand is ignored by H/W and data are stored directly into LDS.
+
+*Size:* 1 dword by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+    Note that :ref:`tfe<amdgpu_synid_tfe>` and :ref:`lds<amdgpu_synid_lds>` cannot be used together.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_dst_flat_atomic32.rst b/docs/AMDGPU/gfx8_dst_flat_atomic32.rst
new file mode 100644
index 0000000..a8ae464
--- /dev/null
+++ b/docs/AMDGPU/gfx8_dst_flat_atomic32.rst
@@ -0,0 +1,19 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_dst_flat_atomic32:
+
+vdst
+===========================
+
+Data returned by a 32-bit atomic flat instruction.
+
+This is an optional operand. It must be used if and only if :ref:`glc<amdgpu_synid_glc>` is specified.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_dst_flat_atomic64.rst b/docs/AMDGPU/gfx8_dst_flat_atomic64.rst
new file mode 100644
index 0000000..5b46e88
--- /dev/null
+++ b/docs/AMDGPU/gfx8_dst_flat_atomic64.rst
@@ -0,0 +1,19 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_dst_flat_atomic64:
+
+vdst
+===========================
+
+Data returned by a 64-bit atomic flat instruction.
+
+This is an optional operand. It must be used if and only if :ref:`glc<amdgpu_synid_glc>` is specified.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_dst_mimg_gather4.rst b/docs/AMDGPU/gfx8_dst_mimg_gather4.rst
new file mode 100644
index 0000000..6fc0192
--- /dev/null
+++ b/docs/AMDGPU/gfx8_dst_mimg_gather4.rst
@@ -0,0 +1,26 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_dst_mimg_gather4:
+
+vdst
+===========================
+
+Image data to load by an *image_gather4* instruction.
+
+*Size:* 4 data elements by default. Each data element occupies either 32 bits or 16 bits depending on :ref:`d16<amdgpu_synid_d16>`.
+
+:ref:`d16<amdgpu_synid_d16>` and :ref:`tfe<amdgpu_synid_tfe>` affect operand size as follows:
+
+* :ref:`d16<amdgpu_synid_d16>` has different meaning for GFX8.0 and GFX8.1:
+
+  * For GFX8.0 this modifier does not affect size of data elements in registers. Data in registers are stored in low 16 bits, high 16 bits are unused. There is no packing.
+  * Starting from GFX8.1 this modifier specifies that data elements in registers are packed; each value occupies 16 bits.
+
+* :ref:`tfe<amdgpu_synid_tfe>` adds one dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_dst_mimg_regular.rst b/docs/AMDGPU/gfx8_dst_mimg_regular.rst
new file mode 100644
index 0000000..be1037a
--- /dev/null
+++ b/docs/AMDGPU/gfx8_dst_mimg_regular.rst
@@ -0,0 +1,20 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_dst_mimg_regular:
+
+vdst
+===========================
+
+Image data to load by an image instruction.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>` and :ref:`tfe<amdgpu_synid_tfe>`:
+
+* :ref:`dmask<amdgpu_synid_dmask>` may specify from 1 to 4 data elements. Each data element occupies 1 dword.
+* :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_dst_mimg_regular_d16.rst b/docs/AMDGPU/gfx8_dst_mimg_regular_d16.rst
new file mode 100644
index 0000000..4eb7037
--- /dev/null
+++ b/docs/AMDGPU/gfx8_dst_mimg_regular_d16.rst
@@ -0,0 +1,26 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_dst_mimg_regular_d16:
+
+vdst
+===========================
+
+Image data to load by an image instruction.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>`, :ref:`tfe<amdgpu_synid_tfe>` and :ref:`d16<amdgpu_synid_d16>`:
+
+* :ref:`dmask<amdgpu_synid_dmask>` may specify from 1 to 4 data elements. Each data element occupies either 32 bits or 16 bits depending on :ref:`d16<amdgpu_synid_d16>`.
+* :ref:`d16<amdgpu_synid_d16>` has different meaning for GFX8.0 and GFX8.1:
+
+  * For GFX8.0 this modifier does not affect size of data elements in registers. Data in registers are stored in low 16 bits, high 16 bits are unused. There is no packing.
+  * Starting from GFX8.1 this modifier specifies that data elements in registers are packed; each value occupies 16 bits.
+
+* :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_fimm16.rst b/docs/AMDGPU/gfx8_fimm16.rst
new file mode 100644
index 0000000..5e387f5
--- /dev/null
+++ b/docs/AMDGPU/gfx8_fimm16.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_fimm16:
+
+imm32
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>` or a :ref:`floating-point_number<amdgpu_synid_floating-point_number>`. The number is converted to *f16* as described :ref:`here<amdgpu_synid_lit_conv>`.
+
diff --git a/docs/AMDGPU/gfx8_fimm32.rst b/docs/AMDGPU/gfx8_fimm32.rst
new file mode 100644
index 0000000..e29e770
--- /dev/null
+++ b/docs/AMDGPU/gfx8_fimm32.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_fimm32:
+
+imm32
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>` or a :ref:`floating-point_number<amdgpu_synid_floating-point_number>`. The value is converted to *f32* as described :ref:`here<amdgpu_synid_lit_conv>`.
+
diff --git a/docs/AMDGPU/gfx8_hwreg.rst b/docs/AMDGPU/gfx8_hwreg.rst
new file mode 100644
index 0000000..ffa1ea5
--- /dev/null
+++ b/docs/AMDGPU/gfx8_hwreg.rst
@@ -0,0 +1,60 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_hwreg:
+
+hwreg
+===========================
+
+Bits of a hardware register being accessed.
+
+The bits of this operand have the following meaning:
+
+    ============ ===================================
+    Bits         Description
+    ============ ===================================
+    5:0          Register *id*.
+    10:6         First bit *offset* (0..31).
+    15:11        *Size* in bits (1..32).
+    ============ ===================================
+
+This operand may be specified as a positive 16-bit :ref:`integer_number<amdgpu_synid_integer_number>` or using the syntax described below.
+
+    ==================================== ============================================================================
+    Syntax                               Description
+    ==================================== ============================================================================
+    hwreg({0..63})                       All bits of a register indicated by its *id*.
+    hwreg(<*name*>)                      All bits of a register indicated by its *name*.
+    hwreg({0..63}, {0..31}, {1..32})     Register bits indicated by register *id*, first bit *offset* and *size*.
+    hwreg(<*name*>, {0..31}, {1..32})    Register bits indicated by register *name*, first bit *offset* and *size*.
+    ==================================== ============================================================================
+
+Register *id*, *offset* and *size* must be specified as positive :ref:`integer numbers<amdgpu_synid_integer_number>`.
+
+Defined register *names* include:
+
+    =================== ==========================================
+    Name                Description
+    =================== ==========================================
+    HW_REG_MODE         Shader writeable mode bits.
+    HW_REG_STATUS       Shader read-only status.
+    HW_REG_TRAPSTS      Trap status.
+    HW_REG_HW_ID        Id of wave, simd, compute unit, etc.
+    HW_REG_GPR_ALLOC    Per-wave SGPR and VGPR allocation.
+    HW_REG_LDS_ALLOC    Per-wave LDS allocation.
+    HW_REG_IB_STS       Counters of outstanding instructions.
+    =================== ==========================================
+
+Examples:
+
+.. parsed-literal::
+
+    s_getreg_b32 s2, 0x6
+    s_getreg_b32 s2, hwreg(15)
+    s_getreg_b32 s2, hwreg(51, 1, 31)
+    s_getreg_b32 s2, hwreg(HW_REG_LDS_ALLOC, 0, 1)
+
diff --git a/docs/AMDGPU/gfx8_imm4.rst b/docs/AMDGPU/gfx8_imm4.rst
new file mode 100644
index 0000000..a03de76
--- /dev/null
+++ b/docs/AMDGPU/gfx8_imm4.rst
@@ -0,0 +1,25 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_imm4:
+
+imm4
+===========================
+
+A positive :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 4 bits.
+
+This operand is a mask which controls indexing mode for operands of subsequent instructions. Value 1 enables indexing and value 0 disables it.
+
+    ============ ========================================
+    Bit          Meaning
+    ============ ========================================
+    0            Enables or disables *src0* indexing.
+    1            Enables or disables *src1* indexing.
+    2            Enables or disables *src2* indexing.
+    3            Enables or disables *dst* indexing.
+    ============ ========================================
+
diff --git a/docs/AMDGPU/gfx8_label.rst b/docs/AMDGPU/gfx8_label.rst
new file mode 100644
index 0000000..99e384e
--- /dev/null
+++ b/docs/AMDGPU/gfx8_label.rst
@@ -0,0 +1,30 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_label:
+
+label
+===========================
+
+A branch target which is a 16-bit signed integer treated as a PC-relative dword offset.
+
+This operand may be specified as:
+
+* An :ref:`integer_number<amdgpu_synid_integer_number>`. The number is truncated to 16 bits.
+* An :ref:`absolute_expression<amdgpu_synid_absolute_expression>` which must start with an :ref:`integer_number<amdgpu_synid_integer_number>`. The value of the expression is truncated to 16 bits.
+* A :ref:`symbol<amdgpu_synid_symbol>` (for example, a label). The value is handled as a 16-bit PC-relative dword offset to be resolved by a linker.
+
+Examples:
+
+.. parsed-literal::
+
+  offset = 30
+  s_branch loop_end
+  s_branch 2 + offset
+  s_branch 32
+  loop_end:
+
diff --git a/docs/AMDGPU/gfx8_mod_dpp_sdwa_abs_neg.rst b/docs/AMDGPU/gfx8_mod_dpp_sdwa_abs_neg.rst
new file mode 100644
index 0000000..be7b4b5
--- /dev/null
+++ b/docs/AMDGPU/gfx8_mod_dpp_sdwa_abs_neg.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_mod_dpp_sdwa_abs_neg:
+
+m
+===========================
+
+This operand may be used with floating point operand modifiers :ref:`abs<amdgpu_synid_abs>` and :ref:`neg<amdgpu_synid_neg>`.
+
diff --git a/docs/AMDGPU/gfx8_mod_sdwa_sext.rst b/docs/AMDGPU/gfx8_mod_sdwa_sext.rst
new file mode 100644
index 0000000..b48c521
--- /dev/null
+++ b/docs/AMDGPU/gfx8_mod_sdwa_sext.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_mod_sdwa_sext:
+
+m
+===========================
+
+This operand may be used with integer operand modifier :ref:`sext<amdgpu_synid_sext>`.
+
diff --git a/docs/AMDGPU/gfx8_mod_vop3_abs_neg.rst b/docs/AMDGPU/gfx8_mod_vop3_abs_neg.rst
new file mode 100644
index 0000000..960e8b1
--- /dev/null
+++ b/docs/AMDGPU/gfx8_mod_vop3_abs_neg.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_mod_vop3_abs_neg:
+
+m
+===========================
+
+This operand may be used with floating point operand modifiers :ref:`abs<amdgpu_synid_abs>` and :ref:`neg<amdgpu_synid_neg>`.
+
diff --git a/docs/AMDGPU/gfx8_msg.rst b/docs/AMDGPU/gfx8_msg.rst
new file mode 100644
index 0000000..313d8e6
--- /dev/null
+++ b/docs/AMDGPU/gfx8_msg.rst
@@ -0,0 +1,72 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_msg:
+
+msg
+===========================
+
+A 16-bit message code. The bits of this operand have the following meaning:
+
+    ============ ======================================================
+    Bits         Description
+    ============ ======================================================
+    3:0          Message *type*.
+    6:4          Optional *operation*.
+    9:7          Optional *parameters*.
+    15:10        Unused.
+    ============ ======================================================
+
+This operand may be specified as a positive 16-bit :ref:`integer_number<amdgpu_synid_integer_number>` or using the syntax described below:
+
+    ======================================== ========================================================================
+    Syntax                                   Description
+    ======================================== ========================================================================
+    sendmsg(<*type*>)                        A message identified by its *type*.
+    sendmsg(<*type*>, <*op*>)                A message identified by its *type* and *operation*.
+    sendmsg(<*type*>, <*op*>, <*stream*>)    A message identified by its *type* and *operation* with a stream *id*.
+    ======================================== ========================================================================
+
+*Type* may be specified using message *name* or message *id*.
+
+*Op* may be specified using operation *name* or operation *id*.
+
+Stream *id* is an integer in the range 0..3.
+
+Message *id*, operation *id* and stream *id* must be specified as positive :ref:`integer numbers<amdgpu_synid_integer_number>`.
+
+Each message type supports specific operations:
+
+    ================= ========== ============================== ============ ==========
+    Message name      Message Id Supported Operations           Operation Id Stream Id
+    ================= ========== ============================== ============ ==========
+    MSG_INTERRUPT     1          \-                             \-           \-
+    MSG_GS            2          GS_OP_CUT                      1            Optional
+    \                            GS_OP_EMIT                     2            Optional
+    \                            GS_OP_EMIT_CUT                 3            Optional
+    MSG_GS_DONE       3          GS_OP_NOP                      0            \-
+    \                            GS_OP_CUT                      1            Optional
+    \                            GS_OP_EMIT                     2            Optional
+    \                            GS_OP_EMIT_CUT                 3            Optional
+    MSG_SYSMSG        15         SYSMSG_OP_ECC_ERR_INTERRUPT    1            \-
+    \                            SYSMSG_OP_REG_RD               2            \-
+    \                            SYSMSG_OP_HOST_TRAP_ACK        3            \-
+    \                            SYSMSG_OP_TTRACE_PC            4            \-
+    ================= ========== ============================== ============ ==========
+
+Examples:
+
+.. parsed-literal::
+
+    s_sendmsg 0x12
+    s_sendmsg sendmsg(MSG_INTERRUPT)
+    s_sendmsg sendmsg(2, GS_OP_CUT)
+    s_sendmsg sendmsg(MSG_GS, GS_OP_EMIT)
+    s_sendmsg sendmsg(MSG_GS, 2)
+    s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_EMIT_CUT, 1)
+    s_sendmsg sendmsg(MSG_SYSMSG, SYSMSG_OP_TTRACE_PC)
+
diff --git a/docs/AMDGPU/gfx8_offset_buf.rst b/docs/AMDGPU/gfx8_offset_buf.rst
new file mode 100644
index 0000000..42c4524
--- /dev/null
+++ b/docs/AMDGPU/gfx8_offset_buf.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_offset_buf:
+
+soffset
+===========================
+
+An unsigned byte offset.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`
diff --git a/docs/AMDGPU/gfx8_offset_smem_load.rst b/docs/AMDGPU/gfx8_offset_smem_load.rst
new file mode 100644
index 0000000..5c30a87
--- /dev/null
+++ b/docs/AMDGPU/gfx8_offset_smem_load.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_offset_smem_load:
+
+soffset
+===========================
+
+An unsigned byte offset added to the base address to get memory address.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`uimm20<amdgpu_synid_uimm20>`
diff --git a/docs/AMDGPU/gfx8_offset_smem_store.rst b/docs/AMDGPU/gfx8_offset_smem_store.rst
new file mode 100644
index 0000000..9ff90f9
--- /dev/null
+++ b/docs/AMDGPU/gfx8_offset_smem_store.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_offset_smem_store:
+
+soffset
+===========================
+
+An unsigned byte offset added to the base address to get memory address.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`m0<amdgpu_synid_m0>`, :ref:`uimm20<amdgpu_synid_uimm20>`
diff --git a/docs/AMDGPU/gfx8_opt.rst b/docs/AMDGPU/gfx8_opt.rst
new file mode 100644
index 0000000..417d7fa
--- /dev/null
+++ b/docs/AMDGPU/gfx8_opt.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_opt:
+
+opt
+===========================
+
+This is an optional operand. It must be used if and only if :ref:`glc<amdgpu_synid_glc>` is specified.
+
diff --git a/docs/AMDGPU/gfx8_param.rst b/docs/AMDGPU/gfx8_param.rst
new file mode 100644
index 0000000..0bd8854
--- /dev/null
+++ b/docs/AMDGPU/gfx8_param.rst
@@ -0,0 +1,22 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_param:
+
+param
+===========================
+
+Interpolation parameter to read:
+
+    ============ ===================================
+    Syntax       Description
+    ============ ===================================
+    p0           Parameter *P0*.
+    p10          Parameter *P10*.
+    p20          Parameter *P20*.
+    ============ ===================================
+
diff --git a/docs/AMDGPU/gfx8_perm_smem.rst b/docs/AMDGPU/gfx8_perm_smem.rst
new file mode 100644
index 0000000..0035ac8
--- /dev/null
+++ b/docs/AMDGPU/gfx8_perm_smem.rst
@@ -0,0 +1,24 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_perm_smem:
+
+imm3
+===========================
+
+A bit mask which indicates request permissions.
+
+This operand must be specified as an :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 7 bits, but only 3 low bits are significant.
+
+    ============ ==============================
+    Bit Number   Description
+    ============ ==============================
+    0            Request *read* permission.
+    1            Request *write* permission.
+    2            Request *execute* permission.
+    ============ ==============================
+
diff --git a/docs/AMDGPU/gfx8_ret.rst b/docs/AMDGPU/gfx8_ret.rst
new file mode 100644
index 0000000..91fdaf3
--- /dev/null
+++ b/docs/AMDGPU/gfx8_ret.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_ret:
+
+dst
+===========================
+
+This is an input operand. It may optionally serve as a destination if :ref:`glc<amdgpu_synid_glc>` is specified.
+
diff --git a/docs/AMDGPU/gfx8_rsrc_buf.rst b/docs/AMDGPU/gfx8_rsrc_buf.rst
new file mode 100644
index 0000000..ecdb0a0
--- /dev/null
+++ b/docs/AMDGPU/gfx8_rsrc_buf.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_rsrc_buf:
+
+srsrc
+===========================
+
+Buffer resource constant which defines the address and characteristics of the buffer in memory.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx8_rsrc_mimg.rst b/docs/AMDGPU/gfx8_rsrc_mimg.rst
new file mode 100644
index 0000000..3ca2559
--- /dev/null
+++ b/docs/AMDGPU/gfx8_rsrc_mimg.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_rsrc_mimg:
+
+srsrc
+===========================
+
+Image resource constant which defines the location of the image buffer in memory, its dimensions, tiling, and data format.
+
+*Size:* 8 dwords by default, 4 dwords if :ref:`r128<amdgpu_synid_r128>` is specified.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx8_samp_mimg.rst b/docs/AMDGPU/gfx8_samp_mimg.rst
new file mode 100644
index 0000000..c4b2712
--- /dev/null
+++ b/docs/AMDGPU/gfx8_samp_mimg.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_samp_mimg:
+
+ssamp
+===========================
+
+Sampler constant used to specify filtering options applied to the image data after it is read.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx8_sdata128_0.rst b/docs/AMDGPU/gfx8_sdata128_0.rst
new file mode 100644
index 0000000..a52703c
--- /dev/null
+++ b/docs/AMDGPU/gfx8_sdata128_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_sdata128_0:
+
+sdata
+===========================
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx8_sdata32_0.rst b/docs/AMDGPU/gfx8_sdata32_0.rst
new file mode 100644
index 0000000..9ccd7bd
--- /dev/null
+++ b/docs/AMDGPU/gfx8_sdata32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_sdata32_0:
+
+sdata
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`
diff --git a/docs/AMDGPU/gfx8_sdata64_0.rst b/docs/AMDGPU/gfx8_sdata64_0.rst
new file mode 100644
index 0000000..8718449
--- /dev/null
+++ b/docs/AMDGPU/gfx8_sdata64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_sdata64_0:
+
+sdata
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`
diff --git a/docs/AMDGPU/gfx8_sdst128_0.rst b/docs/AMDGPU/gfx8_sdst128_0.rst
new file mode 100644
index 0000000..277e3db
--- /dev/null
+++ b/docs/AMDGPU/gfx8_sdst128_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_sdst128_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx8_sdst256_0.rst b/docs/AMDGPU/gfx8_sdst256_0.rst
new file mode 100644
index 0000000..2e54b9b
--- /dev/null
+++ b/docs/AMDGPU/gfx8_sdst256_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_sdst256_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 8 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx8_sdst32_0.rst b/docs/AMDGPU/gfx8_sdst32_0.rst
new file mode 100644
index 0000000..44e6cdc
--- /dev/null
+++ b/docs/AMDGPU/gfx8_sdst32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_sdst32_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`
diff --git a/docs/AMDGPU/gfx8_sdst32_1.rst b/docs/AMDGPU/gfx8_sdst32_1.rst
new file mode 100644
index 0000000..7156225
--- /dev/null
+++ b/docs/AMDGPU/gfx8_sdst32_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_sdst32_1:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`
diff --git a/docs/AMDGPU/gfx8_sdst32_2.rst b/docs/AMDGPU/gfx8_sdst32_2.rst
new file mode 100644
index 0000000..af446d3
--- /dev/null
+++ b/docs/AMDGPU/gfx8_sdst32_2.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_sdst32_2:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`trap<amdgpu_synid_trap>`
diff --git a/docs/AMDGPU/gfx8_sdst512_0.rst b/docs/AMDGPU/gfx8_sdst512_0.rst
new file mode 100644
index 0000000..95b82a7
--- /dev/null
+++ b/docs/AMDGPU/gfx8_sdst512_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_sdst512_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 16 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`
diff --git a/docs/AMDGPU/gfx8_sdst64_0.rst b/docs/AMDGPU/gfx8_sdst64_0.rst
new file mode 100644
index 0000000..9195778
--- /dev/null
+++ b/docs/AMDGPU/gfx8_sdst64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_sdst64_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`
diff --git a/docs/AMDGPU/gfx8_sdst64_1.rst b/docs/AMDGPU/gfx8_sdst64_1.rst
new file mode 100644
index 0000000..165e0c0
--- /dev/null
+++ b/docs/AMDGPU/gfx8_sdst64_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_sdst64_1:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`exec<amdgpu_synid_exec>`
diff --git a/docs/AMDGPU/gfx8_simm16.rst b/docs/AMDGPU/gfx8_simm16.rst
new file mode 100644
index 0000000..730f239
--- /dev/null
+++ b/docs/AMDGPU/gfx8_simm16.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_simm16:
+
+imm16
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 16 bits and then sign-extended to 32 bits.
+
diff --git a/docs/AMDGPU/gfx8_src32_0.rst b/docs/AMDGPU/gfx8_src32_0.rst
new file mode 100644
index 0000000..a9c11fe
--- /dev/null
+++ b/docs/AMDGPU/gfx8_src32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_src32_0:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/docs/AMDGPU/gfx8_src32_1.rst b/docs/AMDGPU/gfx8_src32_1.rst
new file mode 100644
index 0000000..67dcdc8
--- /dev/null
+++ b/docs/AMDGPU/gfx8_src32_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_src32_1:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`
diff --git a/docs/AMDGPU/gfx8_src64_0.rst b/docs/AMDGPU/gfx8_src64_0.rst
new file mode 100644
index 0000000..573fd68
--- /dev/null
+++ b/docs/AMDGPU/gfx8_src64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_src64_0:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/docs/AMDGPU/gfx8_src64_1.rst b/docs/AMDGPU/gfx8_src64_1.rst
new file mode 100644
index 0000000..d2c78b7
--- /dev/null
+++ b/docs/AMDGPU/gfx8_src64_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_src64_1:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`
diff --git a/docs/AMDGPU/gfx8_src_exp.rst b/docs/AMDGPU/gfx8_src_exp.rst
new file mode 100644
index 0000000..10449b4
--- /dev/null
+++ b/docs/AMDGPU/gfx8_src_exp.rst
@@ -0,0 +1,28 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_src_exp:
+
+vsrc
+===========================
+
+Data to copy to export buffers. This is an optional operand. Must be specified as :ref:`off<amdgpu_synid_off>` if not used.
+
+:ref:`compr<amdgpu_synid_compr>` modifier indicates use of compressed (16-bit) data. This limits number of source operands from 4 to 2:
+
+* src0 and src1 must specify the first register (or :ref:`off<amdgpu_synid_off>`).
+* src2 and src3 must specify the second register (or :ref:`off<amdgpu_synid_off>`).
+
+An example:
+
+.. parsed-literal::
+
+  exp mrtz v3, v3, off, off compr
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`off<amdgpu_synid_off>`
diff --git a/docs/AMDGPU/gfx8_ssrc32_0.rst b/docs/AMDGPU/gfx8_ssrc32_0.rst
new file mode 100644
index 0000000..82d18b1
--- /dev/null
+++ b/docs/AMDGPU/gfx8_ssrc32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_ssrc32_0:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/docs/AMDGPU/gfx8_ssrc32_1.rst b/docs/AMDGPU/gfx8_ssrc32_1.rst
new file mode 100644
index 0000000..203d9c5
--- /dev/null
+++ b/docs/AMDGPU/gfx8_ssrc32_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_ssrc32_1:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`
diff --git a/docs/AMDGPU/gfx8_ssrc32_2.rst b/docs/AMDGPU/gfx8_ssrc32_2.rst
new file mode 100644
index 0000000..9b893e9
--- /dev/null
+++ b/docs/AMDGPU/gfx8_ssrc32_2.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_ssrc32_2:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`
diff --git a/docs/AMDGPU/gfx8_ssrc32_3.rst b/docs/AMDGPU/gfx8_ssrc32_3.rst
new file mode 100644
index 0000000..131765f
--- /dev/null
+++ b/docs/AMDGPU/gfx8_ssrc32_3.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_ssrc32_3:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`iconst<amdgpu_synid_iconst>`
diff --git a/docs/AMDGPU/gfx8_ssrc32_4.rst b/docs/AMDGPU/gfx8_ssrc32_4.rst
new file mode 100644
index 0000000..02d90a4
--- /dev/null
+++ b/docs/AMDGPU/gfx8_ssrc32_4.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_ssrc32_4:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`
diff --git a/docs/AMDGPU/gfx8_ssrc64_0.rst b/docs/AMDGPU/gfx8_ssrc64_0.rst
new file mode 100644
index 0000000..b8389dc
--- /dev/null
+++ b/docs/AMDGPU/gfx8_ssrc64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_ssrc64_0:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/docs/AMDGPU/gfx8_ssrc64_1.rst b/docs/AMDGPU/gfx8_ssrc64_1.rst
new file mode 100644
index 0000000..c4fddf4
--- /dev/null
+++ b/docs/AMDGPU/gfx8_ssrc64_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_ssrc64_1:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`
diff --git a/docs/AMDGPU/gfx8_ssrc64_2.rst b/docs/AMDGPU/gfx8_ssrc64_2.rst
new file mode 100644
index 0000000..209dffc
--- /dev/null
+++ b/docs/AMDGPU/gfx8_ssrc64_2.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_ssrc64_2:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`
diff --git a/docs/AMDGPU/gfx8_ssrc64_3.rst b/docs/AMDGPU/gfx8_ssrc64_3.rst
new file mode 100644
index 0000000..9ab5436
--- /dev/null
+++ b/docs/AMDGPU/gfx8_ssrc64_3.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_ssrc64_3:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`trap<amdgpu_synid_trap>`, :ref:`exec<amdgpu_synid_exec>`
diff --git a/docs/AMDGPU/gfx8_tgt.rst b/docs/AMDGPU/gfx8_tgt.rst
new file mode 100644
index 0000000..1be54a7
--- /dev/null
+++ b/docs/AMDGPU/gfx8_tgt.rst
@@ -0,0 +1,24 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_tgt:
+
+tgt
+===========================
+
+An export target:
+
+    ============== ===================================
+    Syntax         Description
+    ============== ===================================
+    pos{0..3}      Copy vertex position 0..3.
+    param{0..31}   Copy vertex parameter 0..31.
+    mrt{0..7}      Copy pixel color to the MRTs 0..7.
+    mrtz           Copy pixel depth (Z) data.
+    null           Copy nothing.
+    ============== ===================================
+
diff --git a/docs/AMDGPU/gfx8_type_dev.rst b/docs/AMDGPU/gfx8_type_dev.rst
new file mode 100644
index 0000000..2f5b36f
--- /dev/null
+++ b/docs/AMDGPU/gfx8_type_dev.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_type_dev:
+
+Type deviation
+===========================
+
+*Type* of this operand differs from *type* :ref:`implied by the opcode<amdgpu_syn_instruction_type>`. This tag specifies actual operand *type*.
+
diff --git a/docs/AMDGPU/gfx8_uimm16.rst b/docs/AMDGPU/gfx8_uimm16.rst
new file mode 100644
index 0000000..a20abcc
--- /dev/null
+++ b/docs/AMDGPU/gfx8_uimm16.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_uimm16:
+
+imm16
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 16 bits and then zero-extended to 32 bits.
+
diff --git a/docs/AMDGPU/gfx8_vcc_64.rst b/docs/AMDGPU/gfx8_vcc_64.rst
new file mode 100644
index 0000000..e31df0e
--- /dev/null
+++ b/docs/AMDGPU/gfx8_vcc_64.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_vcc_64:
+
+vcc
+===========================
+
+Vector condition code.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`vcc<amdgpu_synid_vcc>`
diff --git a/docs/AMDGPU/gfx8_vdata128_0.rst b/docs/AMDGPU/gfx8_vdata128_0.rst
new file mode 100644
index 0000000..bf7e3db
--- /dev/null
+++ b/docs/AMDGPU/gfx8_vdata128_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_vdata128_0:
+
+vdata
+===========================
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_vdata32_0.rst b/docs/AMDGPU/gfx8_vdata32_0.rst
new file mode 100644
index 0000000..b89d65b
--- /dev/null
+++ b/docs/AMDGPU/gfx8_vdata32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_vdata32_0:
+
+vdata
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_vdata64_0.rst b/docs/AMDGPU/gfx8_vdata64_0.rst
new file mode 100644
index 0000000..4380544
--- /dev/null
+++ b/docs/AMDGPU/gfx8_vdata64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_vdata64_0:
+
+vdata
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_vdata96_0.rst b/docs/AMDGPU/gfx8_vdata96_0.rst
new file mode 100644
index 0000000..b8ad22d
--- /dev/null
+++ b/docs/AMDGPU/gfx8_vdata96_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_vdata96_0:
+
+vdata
+===========================
+
+Instruction input.
+
+*Size:* 3 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_vdst128_0.rst b/docs/AMDGPU/gfx8_vdst128_0.rst
new file mode 100644
index 0000000..1eccc95
--- /dev/null
+++ b/docs/AMDGPU/gfx8_vdst128_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_vdst128_0:
+
+vdst
+===========================
+
+Instruction output.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_vdst32_0.rst b/docs/AMDGPU/gfx8_vdst32_0.rst
new file mode 100644
index 0000000..781fcb6
--- /dev/null
+++ b/docs/AMDGPU/gfx8_vdst32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_vdst32_0:
+
+vdst
+===========================
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_vdst64_0.rst b/docs/AMDGPU/gfx8_vdst64_0.rst
new file mode 100644
index 0000000..af2dfe9
--- /dev/null
+++ b/docs/AMDGPU/gfx8_vdst64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_vdst64_0:
+
+vdst
+===========================
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_vdst96_0.rst b/docs/AMDGPU/gfx8_vdst96_0.rst
new file mode 100644
index 0000000..4895b65
--- /dev/null
+++ b/docs/AMDGPU/gfx8_vdst96_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_vdst96_0:
+
+vdst
+===========================
+
+Instruction output.
+
+*Size:* 3 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_vsrc128_0.rst b/docs/AMDGPU/gfx8_vsrc128_0.rst
new file mode 100644
index 0000000..25b1794
--- /dev/null
+++ b/docs/AMDGPU/gfx8_vsrc128_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_vsrc128_0:
+
+vsrc
+===========================
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_vsrc32_0.rst b/docs/AMDGPU/gfx8_vsrc32_0.rst
new file mode 100644
index 0000000..524f36a
--- /dev/null
+++ b/docs/AMDGPU/gfx8_vsrc32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_vsrc32_0:
+
+vsrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_vsrc64_0.rst b/docs/AMDGPU/gfx8_vsrc64_0.rst
new file mode 100644
index 0000000..7c2c39f
--- /dev/null
+++ b/docs/AMDGPU/gfx8_vsrc64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_vsrc64_0:
+
+vsrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx8_waitcnt.rst b/docs/AMDGPU/gfx8_waitcnt.rst
new file mode 100644
index 0000000..4bad594
--- /dev/null
+++ b/docs/AMDGPU/gfx8_waitcnt.rst
@@ -0,0 +1,55 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid8_waitcnt:
+
+waitcnt
+===========================
+
+Counts of outstanding instructions to wait for.
+
+The bits of this operand have the following meaning:
+
+    ============ ======================================================
+    Bits         Description
+    ============ ======================================================
+    3:0          VM_CNT: vector memory operations count.
+    6:4          EXP_CNT: export count.
+    11:8         LGKM_CNT: LDS, GDS, Constant and Message count.
+    ============ ======================================================
+
+This operand may be specified as a positive 16-bit :ref:`integer_number<amdgpu_synid_integer_number>`
+or as a combination of the following symbolic helpers:
+
+    ====================== ======================================================================
+    Syntax                 Description
+    ====================== ======================================================================
+    vmcnt(<*N*>)           VM_CNT value. *N* must not exceed the largest VM_CNT value.
+    expcnt(<*N*>)          EXP_CNT value. *N* must not exceed the largest EXP_CNT value.
+    lgkmcnt(<*N*>)         LGKM_CNT value. *N* must not exceed the largest LGKM_CNT value.
+    vmcnt_sat(<*N*>)       VM_CNT value computed as min(*N*, the largest VM_CNT value).
+    expcnt_sat(<*N*>)      EXP_CNT value computed as min(*N*, the largest EXP_CNT value).
+    lgkmcnt_sat(<*N*>)     LGKM_CNT value computed as min(*N*, the largest LGKM_CNT value).
+    ====================== ======================================================================
+
+These helpers may be specified in any order. Ampersands and commas may be used as optional separators.
+
+*N* is either an
+:ref:`integer number<amdgpu_synid_integer_number>` or an
+:ref:`absolute expression<amdgpu_synid_absolute_expression>`.
+
+Examples:
+
+.. parsed-literal::
+
+    s_waitcnt 0
+    s_waitcnt vmcnt(1)
+    s_waitcnt expcnt(2) lgkmcnt(3)
+    s_waitcnt vmcnt(1) expcnt(2) lgkmcnt(3)
+    s_waitcnt vmcnt(1), expcnt(2), lgkmcnt(3)
+    s_waitcnt vmcnt(1) & lgkmcnt_sat(100) & expcnt(2)
+
diff --git a/docs/AMDGPU/gfx9_addr_buf.rst b/docs/AMDGPU/gfx9_addr_buf.rst
new file mode 100644
index 0000000..c253640
--- /dev/null
+++ b/docs/AMDGPU/gfx9_addr_buf.rst
@@ -0,0 +1,22 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_addr_buf:
+
+vaddr
+===========================
+
+This is an optional operand which may specify offset and/or index.
+
+*Size:* 0, 1 or 2 dwords. Size is controlled by modifiers :ref:`offen<amdgpu_synid_offen>` and :ref:`idxen<amdgpu_synid_idxen>`:
+
+* If only :ref:`idxen<amdgpu_synid_idxen>` is specified, this operand supplies an index. Size is 1 dword.
+* If only :ref:`offen<amdgpu_synid_offen>` is specified, this operand supplies an offset. Size is 1 dword.
+* If both modifiers are specified, index is in the first register and offset is in the second. Size is 2 dwords.
+* If none of these modifiers are specified, this operand must be set to :ref:`off<amdgpu_synid_off>`.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`off<amdgpu_synid_off>`
diff --git a/docs/AMDGPU/gfx9_addr_ds.rst b/docs/AMDGPU/gfx9_addr_ds.rst
new file mode 100644
index 0000000..1174246
--- /dev/null
+++ b/docs/AMDGPU/gfx9_addr_ds.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_addr_ds:
+
+vaddr
+===========================
+
+An offset from the start of GDS/LDS memory.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_addr_flat.rst b/docs/AMDGPU/gfx9_addr_flat.rst
new file mode 100644
index 0000000..c748d07
--- /dev/null
+++ b/docs/AMDGPU/gfx9_addr_flat.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_addr_flat:
+
+vaddr
+===========================
+
+A 64-bit flat address.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_addr_mimg.rst b/docs/AMDGPU/gfx9_addr_mimg.rst
new file mode 100644
index 0000000..eb6ca88
--- /dev/null
+++ b/docs/AMDGPU/gfx9_addr_mimg.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_addr_mimg:
+
+vaddr
+===========================
+
+Image address which includes from one to four dimensional coordinates and other data used to locate a position in the image.
+
+*Size:* 1, 2, 3, 4, 8 or 16 dwords. Actual size depends on opcode, specific image being handled and :ref:`a16<amdgpu_synid_a16>`.
+
+  Note 1. Image format and dimensions are encoded in the image resource constant but not in the instruction.
+
+  Note 2. Actually image address size may vary from 1 to 13 dwords, but assembler currently supports a limited range of register sequences.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_attr.rst b/docs/AMDGPU/gfx9_attr.rst
new file mode 100644
index 0000000..faffcc7
--- /dev/null
+++ b/docs/AMDGPU/gfx9_attr.rst
@@ -0,0 +1,30 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_attr:
+
+attr
+===========================
+
+Interpolation attribute and channel:
+
+    ============== ===================================
+    Syntax         Description
+    ============== ===================================
+    attr{0..32}.x  Attribute 0..32 with *x* channel.
+    attr{0..32}.y  Attribute 0..32 with *y* channel.
+    attr{0..32}.z  Attribute 0..32 with *z* channel.
+    attr{0..32}.w  Attribute 0..32 with *w* channel.
+    ============== ===================================
+
+Examples:
+
+.. parsed-literal::
+
+    v_interp_p1_f32 v1, v0, attr0.x
+    v_interp_p1_f32 v1, v0, attr32.w
+
diff --git a/docs/AMDGPU/gfx9_base_smem_addr.rst b/docs/AMDGPU/gfx9_base_smem_addr.rst
new file mode 100644
index 0000000..63c2cbd
--- /dev/null
+++ b/docs/AMDGPU/gfx9_base_smem_addr.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_base_smem_addr:
+
+sbase
+===========================
+
+A 64-bit base address for scalar memory operations.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_base_smem_buf.rst b/docs/AMDGPU/gfx9_base_smem_buf.rst
new file mode 100644
index 0000000..191ecba
--- /dev/null
+++ b/docs/AMDGPU/gfx9_base_smem_buf.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_base_smem_buf:
+
+sbase
+===========================
+
+A 128-bit buffer resource constant for scalar memory operations which provides a base address, a size and a stride.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_base_smem_scratch.rst b/docs/AMDGPU/gfx9_base_smem_scratch.rst
new file mode 100644
index 0000000..83fd760
--- /dev/null
+++ b/docs/AMDGPU/gfx9_base_smem_scratch.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_base_smem_scratch:
+
+sbase
+===========================
+
+This operand is ignored by H/W and :ref:`flat_scratch<amdgpu_synid_flat_scratch>` is supplied instead.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_bimm16.rst b/docs/AMDGPU/gfx9_bimm16.rst
new file mode 100644
index 0000000..2c9dc5c
--- /dev/null
+++ b/docs/AMDGPU/gfx9_bimm16.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_bimm16:
+
+imm16
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 16 bits.
+
diff --git a/docs/AMDGPU/gfx9_bimm32.rst b/docs/AMDGPU/gfx9_bimm32.rst
new file mode 100644
index 0000000..e9b8967
--- /dev/null
+++ b/docs/AMDGPU/gfx9_bimm32.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_bimm32:
+
+imm32
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 32 bits.
+
diff --git a/docs/AMDGPU/gfx9_data_buf_atomic128.rst b/docs/AMDGPU/gfx9_data_buf_atomic128.rst
new file mode 100644
index 0000000..11c7c73
--- /dev/null
+++ b/docs/AMDGPU/gfx9_data_buf_atomic128.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_data_buf_atomic128:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* 4 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_data_buf_atomic32.rst b/docs/AMDGPU/gfx9_data_buf_atomic32.rst
new file mode 100644
index 0000000..7b7d88b
--- /dev/null
+++ b/docs/AMDGPU/gfx9_data_buf_atomic32.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_data_buf_atomic32:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* 1 dword by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_data_buf_atomic64.rst b/docs/AMDGPU/gfx9_data_buf_atomic64.rst
new file mode 100644
index 0000000..71b2b4b
--- /dev/null
+++ b/docs/AMDGPU/gfx9_data_buf_atomic64.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_data_buf_atomic64:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* 2 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_data_mimg_atomic_cmp.rst b/docs/AMDGPU/gfx9_data_mimg_atomic_cmp.rst
new file mode 100644
index 0000000..08fe297
--- /dev/null
+++ b/docs/AMDGPU/gfx9_data_mimg_atomic_cmp.rst
@@ -0,0 +1,27 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_data_mimg_atomic_cmp:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>` and :ref:`tfe<amdgpu_synid_tfe>`:
+
+* :ref:`dmask<amdgpu_synid_dmask>` may specify 2 data elements for 32-bit-per-pixel surfaces or 4 data elements for 64-bit-per-pixel surfaces. Each data element occupies 1 dword.
+* :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+  Note. The surface data format is indicated in the image resource constant but not in the instruction.
+
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_data_mimg_atomic_reg.rst b/docs/AMDGPU/gfx9_data_mimg_atomic_reg.rst
new file mode 100644
index 0000000..2037dfd
--- /dev/null
+++ b/docs/AMDGPU/gfx9_data_mimg_atomic_reg.rst
@@ -0,0 +1,26 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_data_mimg_atomic_reg:
+
+vdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>` and :ref:`tfe<amdgpu_synid_tfe>`:
+
+* :ref:`dmask<amdgpu_synid_dmask>` may specify 1 data element for 32-bit-per-pixel surfaces or 2 data elements for 64-bit-per-pixel surfaces. Each data element occupies 1 dword.
+* :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+  Note. The surface data format is indicated in the image resource constant but not in the instruction.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_data_mimg_store.rst b/docs/AMDGPU/gfx9_data_mimg_store.rst
new file mode 100644
index 0000000..5e2b8b6
--- /dev/null
+++ b/docs/AMDGPU/gfx9_data_mimg_store.rst
@@ -0,0 +1,18 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_data_mimg_store:
+
+vdata
+===========================
+
+Image data to store by an *image_store* instruction.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>` which may specify from 1 to 4 data elements. Each data element occupies 1 dword.
+
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_data_mimg_store_d16.rst b/docs/AMDGPU/gfx9_data_mimg_store_d16.rst
new file mode 100644
index 0000000..5c521f8
--- /dev/null
+++ b/docs/AMDGPU/gfx9_data_mimg_store_d16.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_data_mimg_store_d16:
+
+vdata
+===========================
+
+Image data to store by an *image_store* instruction.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>` and :ref:`d16<amdgpu_synid_d16>`:
+
+* :ref:`dmask<amdgpu_synid_dmask>` may specify from 1 to 4 data elements. Each data element occupies either 32 bits or 16 bits depending on :ref:`d16<amdgpu_synid_d16>`.
+* :ref:`d16<amdgpu_synid_d16>` specifies that data in registers are packed; each value occupies 16 bits.
+
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_data_smem_atomic128.rst b/docs/AMDGPU/gfx9_data_smem_atomic128.rst
new file mode 100644
index 0000000..6773618
--- /dev/null
+++ b/docs/AMDGPU/gfx9_data_smem_atomic128.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_data_smem_atomic128:
+
+sdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_data_smem_atomic32.rst b/docs/AMDGPU/gfx9_data_smem_atomic32.rst
new file mode 100644
index 0000000..9ad25f3
--- /dev/null
+++ b/docs/AMDGPU/gfx9_data_smem_atomic32.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_data_smem_atomic32:
+
+sdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_data_smem_atomic64.rst b/docs/AMDGPU/gfx9_data_smem_atomic64.rst
new file mode 100644
index 0000000..6f67bff
--- /dev/null
+++ b/docs/AMDGPU/gfx9_data_smem_atomic64.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_data_smem_atomic64:
+
+sdata
+===========================
+
+Input data for an atomic instruction.
+
+Optionally may serve as an output data:
+
+* If :ref:`glc<amdgpu_synid_glc>` is specified, gets the memory value before the operation.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_dst_buf_128.rst b/docs/AMDGPU/gfx9_dst_buf_128.rst
new file mode 100644
index 0000000..691be0f
--- /dev/null
+++ b/docs/AMDGPU/gfx9_dst_buf_128.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_dst_buf_128:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer.
+
+*Size:* 4 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_dst_buf_32.rst b/docs/AMDGPU/gfx9_dst_buf_32.rst
new file mode 100644
index 0000000..5ee1aa2
--- /dev/null
+++ b/docs/AMDGPU/gfx9_dst_buf_32.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_dst_buf_32:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer.
+
+*Size:* 1 dword by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_dst_buf_64.rst b/docs/AMDGPU/gfx9_dst_buf_64.rst
new file mode 100644
index 0000000..6e27264
--- /dev/null
+++ b/docs/AMDGPU/gfx9_dst_buf_64.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_dst_buf_64:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer.
+
+*Size:* 2 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_dst_buf_96.rst b/docs/AMDGPU/gfx9_dst_buf_96.rst
new file mode 100644
index 0000000..8011edc
--- /dev/null
+++ b/docs/AMDGPU/gfx9_dst_buf_96.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_dst_buf_96:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer.
+
+*Size:* 3 dwords by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_dst_buf_lds.rst b/docs/AMDGPU/gfx9_dst_buf_lds.rst
new file mode 100644
index 0000000..0445619
--- /dev/null
+++ b/docs/AMDGPU/gfx9_dst_buf_lds.rst
@@ -0,0 +1,21 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_dst_buf_lds:
+
+vdst
+===========================
+
+Instruction output: data read from a memory buffer.
+
+If :ref:`lds<amdgpu_synid_lds>` is specified, this operand is ignored by H/W and data are stored directly into LDS.
+
+*Size:* 1 dword by default. :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+    Note that :ref:`tfe<amdgpu_synid_tfe>` and :ref:`lds<amdgpu_synid_lds>` cannot be used together.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_dst_flat_atomic32.rst b/docs/AMDGPU/gfx9_dst_flat_atomic32.rst
new file mode 100644
index 0000000..94a7fda
--- /dev/null
+++ b/docs/AMDGPU/gfx9_dst_flat_atomic32.rst
@@ -0,0 +1,19 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_dst_flat_atomic32:
+
+vdst
+===========================
+
+Data returned by a 32-bit atomic flat instruction.
+
+This is an optional operand. It must be used if and only if :ref:`glc<amdgpu_synid_glc>` is specified.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_dst_flat_atomic64.rst b/docs/AMDGPU/gfx9_dst_flat_atomic64.rst
new file mode 100644
index 0000000..7f684a7
--- /dev/null
+++ b/docs/AMDGPU/gfx9_dst_flat_atomic64.rst
@@ -0,0 +1,19 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_dst_flat_atomic64:
+
+vdst
+===========================
+
+Data returned by a 64-bit atomic flat instruction.
+
+This is an optional operand. It must be used if and only if :ref:`glc<amdgpu_synid_glc>` is specified.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_dst_mimg_gather4.rst b/docs/AMDGPU/gfx9_dst_mimg_gather4.rst
new file mode 100644
index 0000000..3bb6f30
--- /dev/null
+++ b/docs/AMDGPU/gfx9_dst_mimg_gather4.rst
@@ -0,0 +1,22 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_dst_mimg_gather4:
+
+vdst
+===========================
+
+Image data to load by an *image_gather4* instruction.
+
+*Size:* 4 data elements by default. Each data element occupies either 32 bits or 16 bits depending on :ref:`d16<amdgpu_synid_d16>`.
+
+:ref:`d16<amdgpu_synid_d16>` and :ref:`tfe<amdgpu_synid_tfe>` affect operand size as follows:
+
+* :ref:`d16<amdgpu_synid_d16>` specifies that data elements in registers are packed; each value occupies 16 bits.
+* :ref:`tfe<amdgpu_synid_tfe>` adds one dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_dst_mimg_regular.rst b/docs/AMDGPU/gfx9_dst_mimg_regular.rst
new file mode 100644
index 0000000..1a7b848
--- /dev/null
+++ b/docs/AMDGPU/gfx9_dst_mimg_regular.rst
@@ -0,0 +1,20 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_dst_mimg_regular:
+
+vdst
+===========================
+
+Image data to load by an image instruction.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>` and :ref:`tfe<amdgpu_synid_tfe>`:
+
+* :ref:`dmask<amdgpu_synid_dmask>` may specify from 1 to 4 data elements. Each data element occupies 1 dword.
+* :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_dst_mimg_regular_d16.rst b/docs/AMDGPU/gfx9_dst_mimg_regular_d16.rst
new file mode 100644
index 0000000..a155639
--- /dev/null
+++ b/docs/AMDGPU/gfx9_dst_mimg_regular_d16.rst
@@ -0,0 +1,22 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_dst_mimg_regular_d16:
+
+vdst
+===========================
+
+Image data to load by an image instruction.
+
+*Size:* depends on :ref:`dmask<amdgpu_synid_dmask>`, :ref:`tfe<amdgpu_synid_tfe>` and :ref:`d16<amdgpu_synid_d16>`:
+
+* :ref:`dmask<amdgpu_synid_dmask>` may specify from 1 to 4 data elements. Each data element occupies either 32 bits or 16 bits depending on :ref:`d16<amdgpu_synid_d16>`.
+* :ref:`d16<amdgpu_synid_d16>` specifies that data elements in registers are packed; each value occupies 16 bits.
+* :ref:`tfe<amdgpu_synid_tfe>` adds 1 dword if specified.
+
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_fimm16.rst b/docs/AMDGPU/gfx9_fimm16.rst
new file mode 100644
index 0000000..a438b45
--- /dev/null
+++ b/docs/AMDGPU/gfx9_fimm16.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_fimm16:
+
+imm32
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>` or a :ref:`floating-point_number<amdgpu_synid_floating-point_number>`. The number is converted to *f16* as described :ref:`here<amdgpu_synid_lit_conv>`.
+
diff --git a/docs/AMDGPU/gfx9_fimm32.rst b/docs/AMDGPU/gfx9_fimm32.rst
new file mode 100644
index 0000000..11103e7
--- /dev/null
+++ b/docs/AMDGPU/gfx9_fimm32.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_fimm32:
+
+imm32
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>` or a :ref:`floating-point_number<amdgpu_synid_floating-point_number>`. The value is converted to *f32* as described :ref:`here<amdgpu_synid_lit_conv>`.
+
diff --git a/docs/AMDGPU/gfx9_hwreg.rst b/docs/AMDGPU/gfx9_hwreg.rst
new file mode 100644
index 0000000..7ebb38b
--- /dev/null
+++ b/docs/AMDGPU/gfx9_hwreg.rst
@@ -0,0 +1,61 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_hwreg:
+
+hwreg
+===========================
+
+Bits of a hardware register being accessed.
+
+The bits of this operand have the following meaning:
+
+    ============ ===================================
+    Bits         Description
+    ============ ===================================
+    5:0          Register *id*.
+    10:6         First bit *offset* (0..31).
+    15:11        *Size* in bits (1..32).
+    ============ ===================================
+
+This operand may be specified as a positive 16-bit :ref:`integer_number<amdgpu_synid_integer_number>` or using the syntax described below.
+
+    ==================================== ============================================================================
+    Syntax                               Description
+    ==================================== ============================================================================
+    hwreg({0..63})                       All bits of a register indicated by its *id*.
+    hwreg(<*name*>)                      All bits of a register indicated by its *name*.
+    hwreg({0..63}, {0..31}, {1..32})     Register bits indicated by register *id*, first bit *offset* and *size*.
+    hwreg(<*name*>, {0..31}, {1..32})    Register bits indicated by register *name*, first bit *offset* and *size*.
+    ==================================== ============================================================================
+
+Register *id*, *offset* and *size* must be specified as positive :ref:`integer numbers<amdgpu_synid_integer_number>`.
+
+Defined register *names* include:
+
+    =================== ==========================================
+    Name                Description
+    =================== ==========================================
+    HW_REG_MODE         Shader writeable mode bits.
+    HW_REG_STATUS       Shader read-only status.
+    HW_REG_TRAPSTS      Trap status.
+    HW_REG_HW_ID        Id of wave, simd, compute unit, etc.
+    HW_REG_GPR_ALLOC    Per-wave SGPR and VGPR allocation.
+    HW_REG_LDS_ALLOC    Per-wave LDS allocation.
+    HW_REG_IB_STS       Counters of outstanding instructions.
+    HW_REG_SH_MEM_BASES Memory aperture.
+    =================== ==========================================
+
+Examples:
+
+.. parsed-literal::
+
+    s_getreg_b32 s2, 0x6
+    s_getreg_b32 s2, hwreg(15)
+    s_getreg_b32 s2, hwreg(51, 1, 31)
+    s_getreg_b32 s2, hwreg(HW_REG_LDS_ALLOC, 0, 1)
+
diff --git a/docs/AMDGPU/gfx9_imm4.rst b/docs/AMDGPU/gfx9_imm4.rst
new file mode 100644
index 0000000..b1c97fb
--- /dev/null
+++ b/docs/AMDGPU/gfx9_imm4.rst
@@ -0,0 +1,25 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_imm4:
+
+imm4
+===========================
+
+A positive :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 4 bits.
+
+This operand is a mask which controls indexing mode for operands of subsequent instructions. Value 1 enables indexing and value 0 disables it.
+
+    ============ ========================================
+    Bit          Meaning
+    ============ ========================================
+    0            Enables or disables *src0* indexing.
+    1            Enables or disables *src1* indexing.
+    2            Enables or disables *src2* indexing.
+    3            Enables or disables *dst* indexing.
+    ============ ========================================
+
diff --git a/docs/AMDGPU/gfx9_label.rst b/docs/AMDGPU/gfx9_label.rst
new file mode 100644
index 0000000..3277172
--- /dev/null
+++ b/docs/AMDGPU/gfx9_label.rst
@@ -0,0 +1,30 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_label:
+
+label
+===========================
+
+A branch target which is a 16-bit signed integer treated as a PC-relative dword offset.
+
+This operand may be specified as:
+
+* An :ref:`integer_number<amdgpu_synid_integer_number>`. The number is truncated to 16 bits.
+* An :ref:`absolute_expression<amdgpu_synid_absolute_expression>` which must start with an :ref:`integer_number<amdgpu_synid_integer_number>`. The value of the expression is truncated to 16 bits.
+* A :ref:`symbol<amdgpu_synid_symbol>` (for example, a label). The value is handled as a 16-bit PC-relative dword offset to be resolved by a linker.
+
+Examples:
+
+.. parsed-literal::
+
+  offset = 30
+  s_branch loop_end
+  s_branch 2 + offset
+  s_branch 32
+  loop_end:
+
diff --git a/docs/AMDGPU/gfx9_mad_type_dev.rst b/docs/AMDGPU/gfx9_mad_type_dev.rst
new file mode 100644
index 0000000..0602f08
--- /dev/null
+++ b/docs/AMDGPU/gfx9_mad_type_dev.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_mad_type_dev:
+
+fx
+===========================
+
+This is an *f32* or *f16* operand depending on instruction modifiers:
+
+* Operand size is controlled by :ref:`m_op_sel_hi<amdgpu_synid_mad_mix_op_sel_hi>`.
+* Location of 16-bit operand is controlled by :ref:`m_op_sel<amdgpu_synid_mad_mix_op_sel>`.
+
diff --git a/docs/AMDGPU/gfx9_mod_dpp_sdwa_abs_neg.rst b/docs/AMDGPU/gfx9_mod_dpp_sdwa_abs_neg.rst
new file mode 100644
index 0000000..ccbad8a
--- /dev/null
+++ b/docs/AMDGPU/gfx9_mod_dpp_sdwa_abs_neg.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_mod_dpp_sdwa_abs_neg:
+
+m
+===========================
+
+This operand may be used with floating point operand modifiers :ref:`abs<amdgpu_synid_abs>` and :ref:`neg<amdgpu_synid_neg>`.
+
diff --git a/docs/AMDGPU/gfx9_mod_sdwa_sext.rst b/docs/AMDGPU/gfx9_mod_sdwa_sext.rst
new file mode 100644
index 0000000..e832097
--- /dev/null
+++ b/docs/AMDGPU/gfx9_mod_sdwa_sext.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_mod_sdwa_sext:
+
+m
+===========================
+
+This operand may be used with integer operand modifier :ref:`sext<amdgpu_synid_sext>`.
+
diff --git a/docs/AMDGPU/gfx9_mod_vop3_abs_neg.rst b/docs/AMDGPU/gfx9_mod_vop3_abs_neg.rst
new file mode 100644
index 0000000..2dac4b1
--- /dev/null
+++ b/docs/AMDGPU/gfx9_mod_vop3_abs_neg.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_mod_vop3_abs_neg:
+
+m
+===========================
+
+This operand may be used with floating point operand modifiers :ref:`abs<amdgpu_synid_abs>` and :ref:`neg<amdgpu_synid_neg>`.
+
diff --git a/docs/AMDGPU/gfx9_msg.rst b/docs/AMDGPU/gfx9_msg.rst
new file mode 100644
index 0000000..f18cff4
--- /dev/null
+++ b/docs/AMDGPU/gfx9_msg.rst
@@ -0,0 +1,72 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_msg:
+
+msg
+===========================
+
+A 16-bit message code. The bits of this operand have the following meaning:
+
+    ============ ======================================================
+    Bits         Description
+    ============ ======================================================
+    3:0          Message *type*.
+    6:4          Optional *operation*.
+    9:7          Optional *parameters*.
+    15:10        Unused.
+    ============ ======================================================
+
+This operand may be specified as a positive 16-bit :ref:`integer_number<amdgpu_synid_integer_number>` or using the syntax described below:
+
+    ======================================== ========================================================================
+    Syntax                                   Description
+    ======================================== ========================================================================
+    sendmsg(<*type*>)                        A message identified by its *type*.
+    sendmsg(<*type*>, <*op*>)                A message identified by its *type* and *operation*.
+    sendmsg(<*type*>, <*op*>, <*stream*>)    A message identified by its *type* and *operation* with a stream *id*.
+    ======================================== ========================================================================
+
+*Type* may be specified using message *name* or message *id*.
+
+*Op* may be specified using operation *name* or operation *id*.
+
+Stream *id* is an integer in the range 0..3.
+
+Message *id*, operation *id* and stream *id* must be specified as positive :ref:`integer numbers<amdgpu_synid_integer_number>`.
+
+Each message type supports specific operations:
+
+    ================= ========== ============================== ============ ==========
+    Message name      Message Id Supported Operations           Operation Id Stream Id
+    ================= ========== ============================== ============ ==========
+    MSG_INTERRUPT     1          \-                             \-           \-
+    MSG_GS            2          GS_OP_CUT                      1            Optional
+    \                            GS_OP_EMIT                     2            Optional
+    \                            GS_OP_EMIT_CUT                 3            Optional
+    MSG_GS_DONE       3          GS_OP_NOP                      0            \-
+    \                            GS_OP_CUT                      1            Optional
+    \                            GS_OP_EMIT                     2            Optional
+    \                            GS_OP_EMIT_CUT                 3            Optional
+    MSG_SYSMSG        15         SYSMSG_OP_ECC_ERR_INTERRUPT    1            \-
+    \                            SYSMSG_OP_REG_RD               2            \-
+    \                            SYSMSG_OP_HOST_TRAP_ACK        3            \-
+    \                            SYSMSG_OP_TTRACE_PC            4            \-
+    ================= ========== ============================== ============ ==========
+
+Examples:
+
+.. parsed-literal::
+
+    s_sendmsg 0x12
+    s_sendmsg sendmsg(MSG_INTERRUPT)
+    s_sendmsg sendmsg(2, GS_OP_CUT)
+    s_sendmsg sendmsg(MSG_GS, GS_OP_EMIT)
+    s_sendmsg sendmsg(MSG_GS, 2)
+    s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_EMIT_CUT, 1)
+    s_sendmsg sendmsg(MSG_SYSMSG, SYSMSG_OP_TTRACE_PC)
+
diff --git a/docs/AMDGPU/gfx9_offset_buf.rst b/docs/AMDGPU/gfx9_offset_buf.rst
new file mode 100644
index 0000000..ec6cc33
--- /dev/null
+++ b/docs/AMDGPU/gfx9_offset_buf.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_offset_buf:
+
+soffset
+===========================
+
+An unsigned byte offset.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`
diff --git a/docs/AMDGPU/gfx9_offset_smem_buf.rst b/docs/AMDGPU/gfx9_offset_smem_buf.rst
new file mode 100644
index 0000000..fdc4b09
--- /dev/null
+++ b/docs/AMDGPU/gfx9_offset_smem_buf.rst
@@ -0,0 +1,19 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_offset_smem_buf:
+
+soffset
+===========================
+
+An unsigned byte offset added to the base address to get memory address.
+
+.. WARNING:: Assembler currently supports 20-bit offsets only. Use :ref:`uimm20<amdgpu_synid_uimm20>` instead of :ref:`uimm21<amdgpu_synid_uimm21>`.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`uimm21<amdgpu_synid_uimm21>`
diff --git a/docs/AMDGPU/gfx9_offset_smem_plain.rst b/docs/AMDGPU/gfx9_offset_smem_plain.rst
new file mode 100644
index 0000000..a58df55
--- /dev/null
+++ b/docs/AMDGPU/gfx9_offset_smem_plain.rst
@@ -0,0 +1,22 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_offset_smem_plain:
+
+soffset
+===========================
+
+An offset added to the base address to get memory address.
+
+* If offset is specified as a register, it supplies an unsigned byte offset.
+* If offset is specified as a 21-bit immediate, it supplies a signed byte offset.
+
+.. WARNING:: Assembler currently supports 20-bit unsigned offsets only. Use :ref:`uimm20<amdgpu_synid_uimm20>` instead of :ref:`simm21<amdgpu_synid_simm21>`.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`simm21<amdgpu_synid_simm21>`
diff --git a/docs/AMDGPU/gfx9_opt.rst b/docs/AMDGPU/gfx9_opt.rst
new file mode 100644
index 0000000..50d73f9
--- /dev/null
+++ b/docs/AMDGPU/gfx9_opt.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_opt:
+
+opt
+===========================
+
+This is an optional operand. It must be used if and only if :ref:`glc<amdgpu_synid_glc>` is specified.
+
diff --git a/docs/AMDGPU/gfx9_param.rst b/docs/AMDGPU/gfx9_param.rst
new file mode 100644
index 0000000..2831a65
--- /dev/null
+++ b/docs/AMDGPU/gfx9_param.rst
@@ -0,0 +1,22 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_param:
+
+param
+===========================
+
+Interpolation parameter to read:
+
+    ============ ===================================
+    Syntax       Description
+    ============ ===================================
+    p0           Parameter *P0*.
+    p10          Parameter *P10*.
+    p20          Parameter *P20*.
+    ============ ===================================
+
diff --git a/docs/AMDGPU/gfx9_perm_smem.rst b/docs/AMDGPU/gfx9_perm_smem.rst
new file mode 100644
index 0000000..370fb0d
--- /dev/null
+++ b/docs/AMDGPU/gfx9_perm_smem.rst
@@ -0,0 +1,24 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_perm_smem:
+
+imm3
+===========================
+
+A bit mask which indicates request permissions.
+
+This operand must be specified as an :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 7 bits, but only 3 low bits are significant.
+
+    ============ ==============================
+    Bit Number   Description
+    ============ ==============================
+    0            Request *read* permission.
+    1            Request *write* permission.
+    2            Request *execute* permission.
+    ============ ==============================
+
diff --git a/docs/AMDGPU/gfx9_ret.rst b/docs/AMDGPU/gfx9_ret.rst
new file mode 100644
index 0000000..7015be6
--- /dev/null
+++ b/docs/AMDGPU/gfx9_ret.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_ret:
+
+dst
+===========================
+
+This is an input operand. It may optionally serve as a destination if :ref:`glc<amdgpu_synid_glc>` is specified.
+
diff --git a/docs/AMDGPU/gfx9_rsrc_buf.rst b/docs/AMDGPU/gfx9_rsrc_buf.rst
new file mode 100644
index 0000000..3dc1775
--- /dev/null
+++ b/docs/AMDGPU/gfx9_rsrc_buf.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_rsrc_buf:
+
+srsrc
+===========================
+
+Buffer resource constant which defines the address and characteristics of the buffer in memory.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_rsrc_mimg.rst b/docs/AMDGPU/gfx9_rsrc_mimg.rst
new file mode 100644
index 0000000..29c90a1
--- /dev/null
+++ b/docs/AMDGPU/gfx9_rsrc_mimg.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_rsrc_mimg:
+
+srsrc
+===========================
+
+Image resource constant which defines the location of the image buffer in memory, its dimensions, tiling, and data format.
+
+*Size:* 8 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_saddr_flat_global.rst b/docs/AMDGPU/gfx9_saddr_flat_global.rst
new file mode 100644
index 0000000..7396df0
--- /dev/null
+++ b/docs/AMDGPU/gfx9_saddr_flat_global.rst
@@ -0,0 +1,19 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_saddr_flat_global:
+
+saddr
+===========================
+
+An optional 64-bit flat global address. Must be specified as :ref:`off<amdgpu_synid_off>` if not used.
+
+See :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>` for description of available addressing modes.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`off<amdgpu_synid_off>`
diff --git a/docs/AMDGPU/gfx9_saddr_flat_scratch.rst b/docs/AMDGPU/gfx9_saddr_flat_scratch.rst
new file mode 100644
index 0000000..5bdbf39
--- /dev/null
+++ b/docs/AMDGPU/gfx9_saddr_flat_scratch.rst
@@ -0,0 +1,19 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_saddr_flat_scratch:
+
+saddr
+===========================
+
+An optional 32-bit flat scratch offset. Must be specified as :ref:`off<amdgpu_synid_off>` if not used.
+
+Either this operand or :ref:`vaddr<amdgpu_synid9_vaddr_flat_scratch>` must be set to :ref:`off<amdgpu_synid_off>`.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`off<amdgpu_synid_off>`
diff --git a/docs/AMDGPU/gfx9_samp_mimg.rst b/docs/AMDGPU/gfx9_samp_mimg.rst
new file mode 100644
index 0000000..f901142
--- /dev/null
+++ b/docs/AMDGPU/gfx9_samp_mimg.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_samp_mimg:
+
+ssamp
+===========================
+
+Sampler constant used to specify filtering options applied to the image data after it is read.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_sdata128_0.rst b/docs/AMDGPU/gfx9_sdata128_0.rst
new file mode 100644
index 0000000..d3609d4
--- /dev/null
+++ b/docs/AMDGPU/gfx9_sdata128_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_sdata128_0:
+
+sdata
+===========================
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_sdata32_0.rst b/docs/AMDGPU/gfx9_sdata32_0.rst
new file mode 100644
index 0000000..4de3635
--- /dev/null
+++ b/docs/AMDGPU/gfx9_sdata32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_sdata32_0:
+
+sdata
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_sdata64_0.rst b/docs/AMDGPU/gfx9_sdata64_0.rst
new file mode 100644
index 0000000..7cde210
--- /dev/null
+++ b/docs/AMDGPU/gfx9_sdata64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_sdata64_0:
+
+sdata
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_sdst128_0.rst b/docs/AMDGPU/gfx9_sdst128_0.rst
new file mode 100644
index 0000000..974df54
--- /dev/null
+++ b/docs/AMDGPU/gfx9_sdst128_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_sdst128_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_sdst256_0.rst b/docs/AMDGPU/gfx9_sdst256_0.rst
new file mode 100644
index 0000000..b492921
--- /dev/null
+++ b/docs/AMDGPU/gfx9_sdst256_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_sdst256_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 8 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_sdst32_0.rst b/docs/AMDGPU/gfx9_sdst32_0.rst
new file mode 100644
index 0000000..911c843
--- /dev/null
+++ b/docs/AMDGPU/gfx9_sdst32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_sdst32_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_sdst32_1.rst b/docs/AMDGPU/gfx9_sdst32_1.rst
new file mode 100644
index 0000000..8f63024
--- /dev/null
+++ b/docs/AMDGPU/gfx9_sdst32_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_sdst32_1:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`
diff --git a/docs/AMDGPU/gfx9_sdst32_2.rst b/docs/AMDGPU/gfx9_sdst32_2.rst
new file mode 100644
index 0000000..04f3cda
--- /dev/null
+++ b/docs/AMDGPU/gfx9_sdst32_2.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_sdst32_2:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_sdst512_0.rst b/docs/AMDGPU/gfx9_sdst512_0.rst
new file mode 100644
index 0000000..7f7dab6
--- /dev/null
+++ b/docs/AMDGPU/gfx9_sdst512_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_sdst512_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 16 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_sdst64_0.rst b/docs/AMDGPU/gfx9_sdst64_0.rst
new file mode 100644
index 0000000..dc5f4c7
--- /dev/null
+++ b/docs/AMDGPU/gfx9_sdst64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_sdst64_0:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_sdst64_1.rst b/docs/AMDGPU/gfx9_sdst64_1.rst
new file mode 100644
index 0000000..208d68b
--- /dev/null
+++ b/docs/AMDGPU/gfx9_sdst64_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_sdst64_1:
+
+sdst
+===========================
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`exec<amdgpu_synid_exec>`
diff --git a/docs/AMDGPU/gfx9_simm16.rst b/docs/AMDGPU/gfx9_simm16.rst
new file mode 100644
index 0000000..47b200a
--- /dev/null
+++ b/docs/AMDGPU/gfx9_simm16.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_simm16:
+
+imm16
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 16 bits and then sign-extended to 32 bits.
+
diff --git a/docs/AMDGPU/gfx9_src32_0.rst b/docs/AMDGPU/gfx9_src32_0.rst
new file mode 100644
index 0000000..288ccbb
--- /dev/null
+++ b/docs/AMDGPU/gfx9_src32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_src32_0:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/docs/AMDGPU/gfx9_src32_1.rst b/docs/AMDGPU/gfx9_src32_1.rst
new file mode 100644
index 0000000..a06764d
--- /dev/null
+++ b/docs/AMDGPU/gfx9_src32_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_src32_1:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`
diff --git a/docs/AMDGPU/gfx9_src64_0.rst b/docs/AMDGPU/gfx9_src64_0.rst
new file mode 100644
index 0000000..f8ef842
--- /dev/null
+++ b/docs/AMDGPU/gfx9_src64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_src64_0:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/docs/AMDGPU/gfx9_src64_1.rst b/docs/AMDGPU/gfx9_src64_1.rst
new file mode 100644
index 0000000..fe7b7fd
--- /dev/null
+++ b/docs/AMDGPU/gfx9_src64_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_src64_1:
+
+src
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`
diff --git a/docs/AMDGPU/gfx9_src_exp.rst b/docs/AMDGPU/gfx9_src_exp.rst
new file mode 100644
index 0000000..91a5d53
--- /dev/null
+++ b/docs/AMDGPU/gfx9_src_exp.rst
@@ -0,0 +1,28 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_src_exp:
+
+vsrc
+===========================
+
+Data to copy to export buffers. This is an optional operand. Must be specified as :ref:`off<amdgpu_synid_off>` if not used.
+
+:ref:`compr<amdgpu_synid_compr>` modifier indicates use of compressed (16-bit) data. This limits number of source operands from 4 to 2:
+
+* src0 and src1 must specify the first register (or :ref:`off<amdgpu_synid_off>`).
+* src2 and src3 must specify the second register (or :ref:`off<amdgpu_synid_off>`).
+
+An example:
+
+.. parsed-literal::
+
+  exp mrtz v3, v3, off, off compr
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`off<amdgpu_synid_off>`
diff --git a/docs/AMDGPU/gfx9_ssrc32_0.rst b/docs/AMDGPU/gfx9_ssrc32_0.rst
new file mode 100644
index 0000000..25d6b37
--- /dev/null
+++ b/docs/AMDGPU/gfx9_ssrc32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_ssrc32_0:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/docs/AMDGPU/gfx9_ssrc32_1.rst b/docs/AMDGPU/gfx9_ssrc32_1.rst
new file mode 100644
index 0000000..caea764
--- /dev/null
+++ b/docs/AMDGPU/gfx9_ssrc32_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_ssrc32_1:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_ssrc32_2.rst b/docs/AMDGPU/gfx9_ssrc32_2.rst
new file mode 100644
index 0000000..034f20e
--- /dev/null
+++ b/docs/AMDGPU/gfx9_ssrc32_2.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_ssrc32_2:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`
diff --git a/docs/AMDGPU/gfx9_ssrc32_3.rst b/docs/AMDGPU/gfx9_ssrc32_3.rst
new file mode 100644
index 0000000..1b08cad
--- /dev/null
+++ b/docs/AMDGPU/gfx9_ssrc32_3.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_ssrc32_3:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`iconst<amdgpu_synid_iconst>`
diff --git a/docs/AMDGPU/gfx9_ssrc32_4.rst b/docs/AMDGPU/gfx9_ssrc32_4.rst
new file mode 100644
index 0000000..7e55427
--- /dev/null
+++ b/docs/AMDGPU/gfx9_ssrc32_4.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_ssrc32_4:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`
diff --git a/docs/AMDGPU/gfx9_ssrc64_0.rst b/docs/AMDGPU/gfx9_ssrc64_0.rst
new file mode 100644
index 0000000..b2f86e1
--- /dev/null
+++ b/docs/AMDGPU/gfx9_ssrc64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_ssrc64_0:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/docs/AMDGPU/gfx9_ssrc64_1.rst b/docs/AMDGPU/gfx9_ssrc64_1.rst
new file mode 100644
index 0000000..02be6c5
--- /dev/null
+++ b/docs/AMDGPU/gfx9_ssrc64_1.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_ssrc64_1:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`
diff --git a/docs/AMDGPU/gfx9_ssrc64_2.rst b/docs/AMDGPU/gfx9_ssrc64_2.rst
new file mode 100644
index 0000000..72d2c46
--- /dev/null
+++ b/docs/AMDGPU/gfx9_ssrc64_2.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_ssrc64_2:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`exec<amdgpu_synid_exec>`, :ref:`constant<amdgpu_synid_constant>`
diff --git a/docs/AMDGPU/gfx9_ssrc64_3.rst b/docs/AMDGPU/gfx9_ssrc64_3.rst
new file mode 100644
index 0000000..3414802
--- /dev/null
+++ b/docs/AMDGPU/gfx9_ssrc64_3.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_ssrc64_3:
+
+ssrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`flat_scratch<amdgpu_synid_flat_scratch>`, :ref:`xnack<amdgpu_synid_xnack>`, :ref:`vcc<amdgpu_synid_vcc>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`exec<amdgpu_synid_exec>`
diff --git a/docs/AMDGPU/gfx9_tgt.rst b/docs/AMDGPU/gfx9_tgt.rst
new file mode 100644
index 0000000..3ba8bae
--- /dev/null
+++ b/docs/AMDGPU/gfx9_tgt.rst
@@ -0,0 +1,24 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_tgt:
+
+tgt
+===========================
+
+An export target:
+
+    ============== ===================================
+    Syntax         Description
+    ============== ===================================
+    pos{0..3}      Copy vertex position 0..3.
+    param{0..31}   Copy vertex parameter 0..31.
+    mrt{0..7}      Copy pixel color to the MRTs 0..7.
+    mrtz           Copy pixel depth (Z) data.
+    null           Copy nothing.
+    ============== ===================================
+
diff --git a/docs/AMDGPU/gfx9_type_dev.rst b/docs/AMDGPU/gfx9_type_dev.rst
new file mode 100644
index 0000000..ae2b0bf
--- /dev/null
+++ b/docs/AMDGPU/gfx9_type_dev.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_type_dev:
+
+Type deviation
+===========================
+
+*Type* of this operand differs from *type* :ref:`implied by the opcode<amdgpu_syn_instruction_type>`. This tag specifies actual operand *type*.
+
diff --git a/docs/AMDGPU/gfx9_uimm16.rst b/docs/AMDGPU/gfx9_uimm16.rst
new file mode 100644
index 0000000..4d1fe1d
--- /dev/null
+++ b/docs/AMDGPU/gfx9_uimm16.rst
@@ -0,0 +1,14 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_uimm16:
+
+imm16
+===========================
+
+An :ref:`integer_number<amdgpu_synid_integer_number>`. The value is truncated to 16 bits and then zero-extended to 32 bits.
+
diff --git a/docs/AMDGPU/gfx9_vaddr_flat_global.rst b/docs/AMDGPU/gfx9_vaddr_flat_global.rst
new file mode 100644
index 0000000..e08e8fb
--- /dev/null
+++ b/docs/AMDGPU/gfx9_vaddr_flat_global.rst
@@ -0,0 +1,22 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_vaddr_flat_global:
+
+vaddr
+===========================
+
+A 64-bit flat global address or a 32-bit offset depending on addressing mode:
+
+* Address = :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>` + :ref:`offset13s<amdgpu_synid_flat_offset13s>`. :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>` is a 64-bit address. This mode is indicated by :ref:`saddr<amdgpu_synid9_saddr_flat_global>` set to :ref:`off<amdgpu_synid_off>`.
+* Address = :ref:`saddr<amdgpu_synid9_saddr_flat_global>` + :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>` + :ref:`offset13s<amdgpu_synid_flat_offset13s>`. :ref:`vaddr<amdgpu_synid9_vaddr_flat_global>` is a 32-bit offset. This mode is used when :ref:`saddr<amdgpu_synid9_saddr_flat_global>` is not :ref:`off<amdgpu_synid_off>`.
+
+.. WARNING:: Assembler currently expects a 64-bit *vaddr* regardless of addressing mode. This have to be fixed.
+
+*Size:* 1 or 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_vaddr_flat_scratch.rst b/docs/AMDGPU/gfx9_vaddr_flat_scratch.rst
new file mode 100644
index 0000000..a72cf06
--- /dev/null
+++ b/docs/AMDGPU/gfx9_vaddr_flat_scratch.rst
@@ -0,0 +1,19 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_vaddr_flat_scratch:
+
+vaddr
+===========================
+
+An optional 32-bit flat scratch offset. Must be specified as :ref:`off<amdgpu_synid_off>` if not used.
+
+Either this operand or :ref:`saddr<amdgpu_synid9_saddr_flat_scratch>` must be set to :ref:`off<amdgpu_synid_off>`.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`off<amdgpu_synid_off>`
diff --git a/docs/AMDGPU/gfx9_vcc_64.rst b/docs/AMDGPU/gfx9_vcc_64.rst
new file mode 100644
index 0000000..788306b
--- /dev/null
+++ b/docs/AMDGPU/gfx9_vcc_64.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_vcc_64:
+
+vcc
+===========================
+
+Vector condition code.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`vcc<amdgpu_synid_vcc>`
diff --git a/docs/AMDGPU/gfx9_vdata128_0.rst b/docs/AMDGPU/gfx9_vdata128_0.rst
new file mode 100644
index 0000000..3d2b490
--- /dev/null
+++ b/docs/AMDGPU/gfx9_vdata128_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_vdata128_0:
+
+vdata
+===========================
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_vdata32_0.rst b/docs/AMDGPU/gfx9_vdata32_0.rst
new file mode 100644
index 0000000..e285769
--- /dev/null
+++ b/docs/AMDGPU/gfx9_vdata32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_vdata32_0:
+
+vdata
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_vdata64_0.rst b/docs/AMDGPU/gfx9_vdata64_0.rst
new file mode 100644
index 0000000..fcd1d0f
--- /dev/null
+++ b/docs/AMDGPU/gfx9_vdata64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_vdata64_0:
+
+vdata
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_vdata96_0.rst b/docs/AMDGPU/gfx9_vdata96_0.rst
new file mode 100644
index 0000000..8a8cb76
--- /dev/null
+++ b/docs/AMDGPU/gfx9_vdata96_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_vdata96_0:
+
+vdata
+===========================
+
+Instruction input.
+
+*Size:* 3 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_vdst128_0.rst b/docs/AMDGPU/gfx9_vdst128_0.rst
new file mode 100644
index 0000000..b8bbf89
--- /dev/null
+++ b/docs/AMDGPU/gfx9_vdst128_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_vdst128_0:
+
+vdst
+===========================
+
+Instruction output.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_vdst32_0.rst b/docs/AMDGPU/gfx9_vdst32_0.rst
new file mode 100644
index 0000000..ccee55f
--- /dev/null
+++ b/docs/AMDGPU/gfx9_vdst32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_vdst32_0:
+
+vdst
+===========================
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_vdst64_0.rst b/docs/AMDGPU/gfx9_vdst64_0.rst
new file mode 100644
index 0000000..60bf384
--- /dev/null
+++ b/docs/AMDGPU/gfx9_vdst64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_vdst64_0:
+
+vdst
+===========================
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_vdst96_0.rst b/docs/AMDGPU/gfx9_vdst96_0.rst
new file mode 100644
index 0000000..834d32a4
--- /dev/null
+++ b/docs/AMDGPU/gfx9_vdst96_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_vdst96_0:
+
+vdst
+===========================
+
+Instruction output.
+
+*Size:* 3 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_vsrc128_0.rst b/docs/AMDGPU/gfx9_vsrc128_0.rst
new file mode 100644
index 0000000..3e8c6de
--- /dev/null
+++ b/docs/AMDGPU/gfx9_vsrc128_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_vsrc128_0:
+
+vsrc
+===========================
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_vsrc32_0.rst b/docs/AMDGPU/gfx9_vsrc32_0.rst
new file mode 100644
index 0000000..a056f3a
--- /dev/null
+++ b/docs/AMDGPU/gfx9_vsrc32_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_vsrc32_0:
+
+vsrc
+===========================
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_vsrc64_0.rst b/docs/AMDGPU/gfx9_vsrc64_0.rst
new file mode 100644
index 0000000..b91b340
--- /dev/null
+++ b/docs/AMDGPU/gfx9_vsrc64_0.rst
@@ -0,0 +1,17 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_vsrc64_0:
+
+vsrc
+===========================
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/docs/AMDGPU/gfx9_waitcnt.rst b/docs/AMDGPU/gfx9_waitcnt.rst
new file mode 100644
index 0000000..015a51a
--- /dev/null
+++ b/docs/AMDGPU/gfx9_waitcnt.rst
@@ -0,0 +1,56 @@
+..
+    **************************************************
+    *                                                *
+    *   Automatically generated file, do not edit!   *
+    *                                                *
+    **************************************************
+
+.. _amdgpu_synid9_waitcnt:
+
+waitcnt
+===========================
+
+Counts of outstanding instructions to wait for.
+
+The bits of this operand have the following meaning:
+
+    ============ ======================================================
+    Bits         Description
+    ============ ======================================================
+    3:0          VM_CNT: vector memory operations count, lower bits.
+    6:4          EXP_CNT: export count.
+    11:8         LGKM_CNT: LDS, GDS, Constant and Message count.
+    15:14        VM_CNT: vector memory operations count, upper bits.
+    ============ ======================================================
+
+This operand may be specified as a positive 16-bit :ref:`integer_number<amdgpu_synid_integer_number>`
+or as a combination of the following symbolic helpers:
+
+    ====================== ======================================================================
+    Syntax                 Description
+    ====================== ======================================================================
+    vmcnt(<*N*>)           VM_CNT value. *N* must not exceed the largest VM_CNT value.
+    expcnt(<*N*>)          EXP_CNT value. *N* must not exceed the largest EXP_CNT value.
+    lgkmcnt(<*N*>)         LGKM_CNT value. *N* must not exceed the largest LGKM_CNT value.
+    vmcnt_sat(<*N*>)       VM_CNT value computed as min(*N*, the largest VM_CNT value).
+    expcnt_sat(<*N*>)      EXP_CNT value computed as min(*N*, the largest EXP_CNT value).
+    lgkmcnt_sat(<*N*>)     LGKM_CNT value computed as min(*N*, the largest LGKM_CNT value).
+    ====================== ======================================================================
+
+These helpers may be specified in any order. Ampersands and commas may be used as optional separators.
+
+*N* is either an
+:ref:`integer number<amdgpu_synid_integer_number>` or an
+:ref:`absolute expression<amdgpu_synid_absolute_expression>`.
+
+Examples:
+
+.. parsed-literal::
+
+    s_waitcnt 0
+    s_waitcnt vmcnt(1)
+    s_waitcnt expcnt(2) lgkmcnt(3)
+    s_waitcnt vmcnt(1) expcnt(2) lgkmcnt(3)
+    s_waitcnt vmcnt(1), expcnt(2), lgkmcnt(3)
+    s_waitcnt vmcnt(1) & lgkmcnt_sat(100) & expcnt(2)
+
diff --git a/docs/AMDGPUAsmGFX7.rst b/docs/AMDGPUAsmGFX7.rst
deleted file mode 100644
index 8973c50..0000000
--- a/docs/AMDGPUAsmGFX7.rst
+++ /dev/null
@@ -1,1255 +0,0 @@
-..
-    **************************************************
-    *                                                *
-    *   Automatically generated file, do not edit!   *
-    *                                                *
-    **************************************************
-
-===========================
-Syntax of GFX7 Instructions
-===========================
-
-.. contents::
-  :local:
-
-
-DS
-===========================
-
-.. parsed-literal::
-
-    ds_add_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_b32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_b64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_rtn_b32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_rtn_b64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_src2_b32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_src2_b64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_append                      dst                            :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_b32                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_b64                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_f32                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_f64                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_rtn_b32               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_rtn_b64               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_rtn_f32               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_rtn_f64               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_condxchg32_rtn_b64          dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_consume                     dst                            :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_barrier                 src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_init                    src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_sema_br                 src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_sema_p                  src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_sema_release_all        src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_sema_v                  src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_f32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_f64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_i32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_i64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_f32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_f64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_i32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_i64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_f32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_f64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_i32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_i64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_f32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_f64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_i32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_i64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_f32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_f64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_i32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_i64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_f32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_f64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_i32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_i64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_mskor_b32                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_mskor_b64                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_mskor_rtn_b32               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_mskor_rtn_b64               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_nop                         src0
-    ds_or_b32                      src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_b64                      src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_rtn_b32                  dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_rtn_b64                  dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_src2_b32                 src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_src2_b64                 src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_ordered_count               dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read2_b32                   dst, src0                      :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read2_b64                   dst, src0                      :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read2st64_b32               dst, src0                      :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read2st64_b64               dst, src0                      :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_b128                   dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_b32                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_b64                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_b96                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_i16                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_i8                     dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_u16                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_u8                     dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_rtn_u32                dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_rtn_u64                dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_src2_u32               src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_src2_u64               src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_u32                    src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_u64                    src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_swizzle_b32                 dst, src0                      :ref:`sw_offset16<amdgpu_synid_sw_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrap_rtn_b32                dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write2_b32                  src0, src1, src2               :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write2_b64                  src0, src1, src2               :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write2st64_b32              src0, src1, src2               :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write2st64_b64              src0, src1, src2               :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b128                  src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b16                   src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b32                   src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b64                   src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b8                    src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b96                   src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_src2_b32              src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_src2_b64              src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg2_rtn_b32             dst, src0, src1, src2          :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg2_rtn_b64             dst, src0, src1, src2          :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg2st64_rtn_b32         dst, src0, src1, src2          :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg2st64_rtn_b64         dst, src0, src1, src2          :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg_rtn_b32              dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg_rtn_b64              dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_b32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_b64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_rtn_b32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_rtn_b64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_src2_b32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_src2_b64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-
-EXP
-===========================
-
-.. parsed-literal::
-
-    exp                            dst, src0, src1, src2, src3    :ref:`done<amdgpu_synid_done>` :ref:`compr<amdgpu_synid_compr>` :ref:`vm<amdgpu_synid_vm>`
-
-FLAT
-===========================
-
-.. parsed-literal::
-
-    flat_atomic_add                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_add_x2             dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_and                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_and_x2             dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_cmpswap            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_cmpswap_x2         dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_dec                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_dec_x2             dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_fcmpswap           dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_fcmpswap_x2        dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_fmax               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_fmax_x2            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_fmin               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_fmin_x2            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_inc                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_inc_x2             dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_or                 dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_or_x2              dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_smax               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_smax_x2            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_smin               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_smin_x2            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_sub                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_sub_x2             dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_swap               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_swap_x2            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_umax               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_umax_x2            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_umin               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_umin_x2            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_xor                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_xor_x2             dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_dword                dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_dwordx2              dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_dwordx3              dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_dwordx4              dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_sbyte                dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_sshort               dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_ubyte                dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_ushort               dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_byte                src0, src1                     :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_dword               src0, src1                     :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_dwordx2             src0, src1                     :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_dwordx3             src0, src1                     :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_dwordx4             src0, src1                     :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_short               src0, src1                     :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-
-MIMG
-===========================
-
-.. parsed-literal::
-
-    image_atomic_add               src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_and               src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_cmpswap           src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_dec               src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_inc               src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_or                src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_smax              src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_smin              src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_sub               src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_swap              src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_umax              src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_umin              src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_xor               src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4                  dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_b                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_b_cl             dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_b_cl_o           dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_b_o              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_c                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_c_b              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_c_b_cl           dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_c_b_cl_o         dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_c_b_o            dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_c_cl             dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_c_cl_o           dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_c_l              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_c_l_o            dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_c_lz             dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_c_lz_o           dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_c_o              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_cl               dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_cl_o             dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_l                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_l_o              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_lz               dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_lz_o             dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4_o                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_get_lod                  dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_get_resinfo              dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_load                     dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_load_mip                 dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_load_mip_pck             dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_load_mip_pck_sgn         dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_load_pck                 dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_load_pck_sgn             dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample                   dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample_b                 dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample_b_cl              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample_c                 dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample_c_b               dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample_c_b_cl            dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample_c_cd              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample_c_cl              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample_c_d               dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample_c_l               dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample_c_lz              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample_cl                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample_l                 dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample_lz                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_store                    src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_store_mip                src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_store_mip_pck            src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_store_pck                src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-
-MUBUF
-===========================
-
-.. parsed-literal::
-
-    buffer_atomic_add              src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_add_x2           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_and              src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_and_x2           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_cmpswap          src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_cmpswap_x2       src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_dec              src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_dec_x2           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_inc              src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_inc_x2           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_or               src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_or_x2            src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_smax             src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_smax_x2          src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_smin             src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_smin_x2          src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_sub              src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_sub_x2           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_swap             src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_swap_x2          src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_umax             src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_umax_x2          src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_umin             src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_umin_x2          src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_xor              src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_xor_x2           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_dword              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_dwordx2            dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_dwordx3            dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_dwordx4            dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_x           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_format_xy          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_xyz         dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_xyzw        dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_sbyte              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_sshort             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_ubyte              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_ushort             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_store_byte              src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_dword             src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_dwordx2           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_dwordx3           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_dwordx4           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_x          src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_xy         src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_xyz        src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_xyzw       src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_short             src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`addr64<amdgpu_synid_addr64>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_wbinvl1
-    buffer_wbinvl1_vol
-
-SMRD
-===========================
-
-.. parsed-literal::
-
-    s_buffer_load_dword            dst, src0, src1
-    s_buffer_load_dwordx16         dst, src0, src1
-    s_buffer_load_dwordx2          dst, src0, src1
-    s_buffer_load_dwordx4          dst, src0, src1
-    s_buffer_load_dwordx8          dst, src0, src1
-    s_dcache_inv
-    s_dcache_inv_vol
-    s_load_dword                   dst, src0, src1
-    s_load_dwordx16                dst, src0, src1
-    s_load_dwordx2                 dst, src0, src1
-    s_load_dwordx4                 dst, src0, src1
-    s_load_dwordx8                 dst, src0, src1
-    s_memtime                      dst
-
-SOP1
-===========================
-
-.. parsed-literal::
-
-    s_abs_i32                      dst, src0
-    s_and_saveexec_b64             dst, src0
-    s_andn2_saveexec_b64           dst, src0
-    s_bcnt0_i32_b32                dst, src0
-    s_bcnt0_i32_b64                dst, src0
-    s_bcnt1_i32_b32                dst, src0
-    s_bcnt1_i32_b64                dst, src0
-    s_bitset0_b32                  dst, src0
-    s_bitset0_b64                  dst, src0
-    s_bitset1_b32                  dst, src0
-    s_bitset1_b64                  dst, src0
-    s_brev_b32                     dst, src0
-    s_brev_b64                     dst, src0
-    s_cbranch_join                 src0
-    s_cmov_b32                     dst, src0
-    s_cmov_b64                     dst, src0
-    s_ff0_i32_b32                  dst, src0
-    s_ff0_i32_b64                  dst, src0
-    s_ff1_i32_b32                  dst, src0
-    s_ff1_i32_b64                  dst, src0
-    s_flbit_i32                    dst, src0
-    s_flbit_i32_b32                dst, src0
-    s_flbit_i32_b64                dst, src0
-    s_flbit_i32_i64                dst, src0
-    s_getpc_b64                    dst
-    s_mov_b32                      dst, src0
-    s_mov_b64                      dst, src0
-    s_mov_fed_b32                  dst, src0
-    s_movreld_b32                  dst, src0
-    s_movreld_b64                  dst, src0
-    s_movrels_b32                  dst, src0
-    s_movrels_b64                  dst, src0
-    s_nand_saveexec_b64            dst, src0
-    s_nor_saveexec_b64             dst, src0
-    s_not_b32                      dst, src0
-    s_not_b64                      dst, src0
-    s_or_saveexec_b64              dst, src0
-    s_orn2_saveexec_b64            dst, src0
-    s_quadmask_b32                 dst, src0
-    s_quadmask_b64                 dst, src0
-    s_rfe_b64                      src0
-    s_setpc_b64                    src0
-    s_sext_i32_i16                 dst, src0
-    s_sext_i32_i8                  dst, src0
-    s_swappc_b64                   dst, src0
-    s_wqm_b32                      dst, src0
-    s_wqm_b64                      dst, src0
-    s_xnor_saveexec_b64            dst, src0
-    s_xor_saveexec_b64             dst, src0
-
-SOP2
-===========================
-
-.. parsed-literal::
-
-    s_absdiff_i32                  dst, src0, src1
-    s_add_i32                      dst, src0, src1
-    s_add_u32                      dst, src0, src1
-    s_addc_u32                     dst, src0, src1
-    s_and_b32                      dst, src0, src1
-    s_and_b64                      dst, src0, src1
-    s_andn2_b32                    dst, src0, src1
-    s_andn2_b64                    dst, src0, src1
-    s_ashr_i32                     dst, src0, src1
-    s_ashr_i64                     dst, src0, src1
-    s_bfe_i32                      dst, src0, src1
-    s_bfe_i64                      dst, src0, src1
-    s_bfe_u32                      dst, src0, src1
-    s_bfe_u64                      dst, src0, src1
-    s_bfm_b32                      dst, src0, src1
-    s_bfm_b64                      dst, src0, src1
-    s_cbranch_g_fork               src0, src1
-    s_cselect_b32                  dst, src0, src1
-    s_cselect_b64                  dst, src0, src1
-    s_lshl_b32                     dst, src0, src1
-    s_lshl_b64                     dst, src0, src1
-    s_lshr_b32                     dst, src0, src1
-    s_lshr_b64                     dst, src0, src1
-    s_max_i32                      dst, src0, src1
-    s_max_u32                      dst, src0, src1
-    s_min_i32                      dst, src0, src1
-    s_min_u32                      dst, src0, src1
-    s_mul_i32                      dst, src0, src1
-    s_nand_b32                     dst, src0, src1
-    s_nand_b64                     dst, src0, src1
-    s_nor_b32                      dst, src0, src1
-    s_nor_b64                      dst, src0, src1
-    s_or_b32                       dst, src0, src1
-    s_or_b64                       dst, src0, src1
-    s_orn2_b32                     dst, src0, src1
-    s_orn2_b64                     dst, src0, src1
-    s_sub_i32                      dst, src0, src1
-    s_sub_u32                      dst, src0, src1
-    s_subb_u32                     dst, src0, src1
-    s_xnor_b32                     dst, src0, src1
-    s_xnor_b64                     dst, src0, src1
-    s_xor_b32                      dst, src0, src1
-    s_xor_b64                      dst, src0, src1
-
-SOPC
-===========================
-
-.. parsed-literal::
-
-    s_bitcmp0_b32                  src0, src1
-    s_bitcmp0_b64                  src0, src1
-    s_bitcmp1_b32                  src0, src1
-    s_bitcmp1_b64                  src0, src1
-    s_cmp_eq_i32                   src0, src1
-    s_cmp_eq_u32                   src0, src1
-    s_cmp_ge_i32                   src0, src1
-    s_cmp_ge_u32                   src0, src1
-    s_cmp_gt_i32                   src0, src1
-    s_cmp_gt_u32                   src0, src1
-    s_cmp_le_i32                   src0, src1
-    s_cmp_le_u32                   src0, src1
-    s_cmp_lg_i32                   src0, src1
-    s_cmp_lg_u32                   src0, src1
-    s_cmp_lt_i32                   src0, src1
-    s_cmp_lt_u32                   src0, src1
-    s_setvskip                     src0, src1
-
-SOPK
-===========================
-
-.. parsed-literal::
-
-    s_addk_i32                     dst, src0
-    s_cbranch_i_fork               src0, src1
-    s_cmovk_i32                    dst, src0
-    s_cmpk_eq_i32                  src0, src1
-    s_cmpk_eq_u32                  src0, src1
-    s_cmpk_ge_i32                  src0, src1
-    s_cmpk_ge_u32                  src0, src1
-    s_cmpk_gt_i32                  src0, src1
-    s_cmpk_gt_u32                  src0, src1
-    s_cmpk_le_i32                  src0, src1
-    s_cmpk_le_u32                  src0, src1
-    s_cmpk_lg_i32                  src0, src1
-    s_cmpk_lg_u32                  src0, src1
-    s_cmpk_lt_i32                  src0, src1
-    s_cmpk_lt_u32                  src0, src1
-    s_getreg_b32                   dst, src0
-    s_movk_i32                     dst, src0
-    s_mulk_i32                     dst, src0
-    s_setreg_b32                   dst, src0
-    s_setreg_imm32_b32             dst, src0
-
-SOPP
-===========================
-
-.. parsed-literal::
-
-    s_barrier
-    s_branch                       src0
-    s_cbranch_cdbgsys              src0
-    s_cbranch_cdbgsys_and_user     src0
-    s_cbranch_cdbgsys_or_user      src0
-    s_cbranch_cdbguser             src0
-    s_cbranch_execnz               src0
-    s_cbranch_execz                src0
-    s_cbranch_scc0                 src0
-    s_cbranch_scc1                 src0
-    s_cbranch_vccnz                src0
-    s_cbranch_vccz                 src0
-    s_decperflevel                 src0
-    s_endpgm
-    s_icache_inv
-    s_incperflevel                 src0
-    s_nop                          src0
-    s_sendmsg                      src0
-    s_sendmsghalt                  src0
-    s_sethalt                      src0
-    s_setkill                      src0
-    s_setprio                      src0
-    s_sleep                        src0
-    s_trap                         src0
-    s_ttracedata
-    s_waitcnt                      src0
-
-VINTRP
-===========================
-
-.. parsed-literal::
-
-    v_interp_mov_f32               dst, src0, src1
-    v_interp_p1_f32                dst, src0, src1
-    v_interp_p2_f32                dst, src0, src1
-
-VOP1
-===========================
-
-.. parsed-literal::
-
-    v_bfrev_b32                    dst, src0
-    v_ceil_f32                     dst, src0
-    v_ceil_f64                     dst, src0
-    v_clrexcp
-    v_cos_f32                      dst, src0
-    v_cvt_f16_f32                  dst, src0
-    v_cvt_f32_f16                  dst, src0
-    v_cvt_f32_f64                  dst, src0
-    v_cvt_f32_i32                  dst, src0
-    v_cvt_f32_u32                  dst, src0
-    v_cvt_f32_ubyte0               dst, src0
-    v_cvt_f32_ubyte1               dst, src0
-    v_cvt_f32_ubyte2               dst, src0
-    v_cvt_f32_ubyte3               dst, src0
-    v_cvt_f64_f32                  dst, src0
-    v_cvt_f64_i32                  dst, src0
-    v_cvt_f64_u32                  dst, src0
-    v_cvt_flr_i32_f32              dst, src0
-    v_cvt_i32_f32                  dst, src0
-    v_cvt_i32_f64                  dst, src0
-    v_cvt_off_f32_i4               dst, src0
-    v_cvt_rpi_i32_f32              dst, src0
-    v_cvt_u32_f32                  dst, src0
-    v_cvt_u32_f64                  dst, src0
-    v_exp_f32                      dst, src0
-    v_exp_legacy_f32               dst, src0
-    v_ffbh_i32                     dst, src0
-    v_ffbh_u32                     dst, src0
-    v_ffbl_b32                     dst, src0
-    v_floor_f32                    dst, src0
-    v_floor_f64                    dst, src0
-    v_fract_f32                    dst, src0
-    v_fract_f64                    dst, src0
-    v_frexp_exp_i32_f32            dst, src0
-    v_frexp_exp_i32_f64            dst, src0
-    v_frexp_mant_f32               dst, src0
-    v_frexp_mant_f64               dst, src0
-    v_log_clamp_f32                dst, src0
-    v_log_f32                      dst, src0
-    v_log_legacy_f32               dst, src0
-    v_mov_b32                      dst, src0
-    v_mov_fed_b32                  dst, src0
-    v_movreld_b32                  dst, src0
-    v_movrels_b32                  dst, src0
-    v_movrelsd_b32                 dst, src0
-    v_nop
-    v_not_b32                      dst, src0
-    v_rcp_clamp_f32                dst, src0
-    v_rcp_clamp_f64                dst, src0
-    v_rcp_f32                      dst, src0
-    v_rcp_f64                      dst, src0
-    v_rcp_iflag_f32                dst, src0
-    v_rcp_legacy_f32               dst, src0
-    v_readfirstlane_b32            dst, src0
-    v_rndne_f32                    dst, src0
-    v_rndne_f64                    dst, src0
-    v_rsq_clamp_f32                dst, src0
-    v_rsq_clamp_f64                dst, src0
-    v_rsq_f32                      dst, src0
-    v_rsq_f64                      dst, src0
-    v_rsq_legacy_f32               dst, src0
-    v_sin_f32                      dst, src0
-    v_sqrt_f32                     dst, src0
-    v_sqrt_f64                     dst, src0
-    v_trunc_f32                    dst, src0
-    v_trunc_f64                    dst, src0
-
-VOP2
-===========================
-
-.. parsed-literal::
-
-    v_add_f32                      dst, src0, src1
-    v_add_i32                      dst0, dst1, src0, src1
-    v_addc_u32                     dst0, dst1, src0, src1, src2
-    v_and_b32                      dst, src0, src1
-    v_ashr_i32                     dst, src0, src1
-    v_ashrrev_i32                  dst, src0, src1
-    v_bcnt_u32_b32                 dst, src0, src1
-    v_bfm_b32                      dst, src0, src1
-    v_cndmask_b32                  dst, src0, src1, src2
-    v_cvt_pk_i16_i32               dst, src0, src1
-    v_cvt_pk_u16_u32               dst, src0, src1
-    v_cvt_pkaccum_u8_f32           dst, src0, src1
-    v_cvt_pknorm_i16_f32           dst, src0, src1
-    v_cvt_pknorm_u16_f32           dst, src0, src1
-    v_cvt_pkrtz_f16_f32            dst, src0, src1
-    v_ldexp_f32                    dst, src0, src1
-    v_lshl_b32                     dst, src0, src1
-    v_lshlrev_b32                  dst, src0, src1
-    v_lshr_b32                     dst, src0, src1
-    v_lshrrev_b32                  dst, src0, src1
-    v_mac_f32                      dst, src0, src1
-    v_mac_legacy_f32               dst, src0, src1
-    v_madak_f32                    dst, src0, src1, src2
-    v_madmk_f32                    dst, src0, src1, src2
-    v_max_f32                      dst, src0, src1
-    v_max_i32                      dst, src0, src1
-    v_max_legacy_f32               dst, src0, src1
-    v_max_u32                      dst, src0, src1
-    v_mbcnt_hi_u32_b32             dst, src0, src1
-    v_mbcnt_lo_u32_b32             dst, src0, src1
-    v_min_f32                      dst, src0, src1
-    v_min_i32                      dst, src0, src1
-    v_min_legacy_f32               dst, src0, src1
-    v_min_u32                      dst, src0, src1
-    v_mul_f32                      dst, src0, src1
-    v_mul_hi_i32_i24               dst, src0, src1
-    v_mul_hi_u32_u24               dst, src0, src1
-    v_mul_i32_i24                  dst, src0, src1
-    v_mul_legacy_f32               dst, src0, src1
-    v_mul_u32_u24                  dst, src0, src1
-    v_or_b32                       dst, src0, src1
-    v_readlane_b32                 dst, src0, src1
-    v_sub_f32                      dst, src0, src1
-    v_sub_i32                      dst0, dst1, src0, src1
-    v_subb_u32                     dst0, dst1, src0, src1, src2
-    v_subbrev_u32                  dst0, dst1, src0, src1, src2
-    v_subrev_f32                   dst, src0, src1
-    v_subrev_i32                   dst0, dst1, src0, src1
-    v_writelane_b32                dst, src0, src1
-    v_xor_b32                      dst, src0, src1
-
-VOP3
-===========================
-
-.. parsed-literal::
-
-    v_add_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_add_f64                      dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_add_i32_e64                  dst0, dst1, src0, src1         :ref:`omod<amdgpu_synid_omod>`
-    v_addc_u32_e64                 dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_alignbit_b32                 dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_alignbyte_b32                dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_and_b32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_ashr_i32_e64                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_ashr_i64                     dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_ashrrev_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_bcnt_u32_b32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_bfe_i32                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_bfe_u32                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_bfi_b32                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_bfm_b32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_bfrev_b32_e64                dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_ceil_f32_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ceil_f64_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_clrexcp_e64                                                 :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_class_f32_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_class_f64_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_f32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_f64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_f32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_f64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_i32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_i64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_u32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_u64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_f32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_f64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_f32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_f64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_f32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_f64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lg_f32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lg_f64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_f32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_f64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_neq_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_neq_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nge_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nge_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ngt_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ngt_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nle_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nle_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlg_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlg_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlt_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlt_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_o_f32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_o_f64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_i32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_i64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_u32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_u64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_tru_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_tru_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_u_f32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_u_f64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_eq_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_eq_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_f_f32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_f_f64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_ge_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_ge_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_gt_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_gt_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_le_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_le_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_lg_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_lg_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_lt_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_lt_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_neq_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_neq_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_nge_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_nge_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_ngt_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_ngt_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_nle_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_nle_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_nlg_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_nlg_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_nlt_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_nlt_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_o_f32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_o_f64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_tru_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_tru_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_u_f32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmps_u_f64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_eq_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_eq_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_f_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_f_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_ge_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_ge_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_gt_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_gt_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_le_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_le_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_lg_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_lg_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_lt_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_lt_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_neq_f32_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_neq_f64_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_nge_f32_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_nge_f64_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_ngt_f32_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_ngt_f64_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_nle_f32_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_nle_f64_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_nlg_f32_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_nlg_f64_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_nlt_f32_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_nlt_f64_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_o_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_o_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_tru_f32_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_tru_f64_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_u_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpsx_u_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_class_f32_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_class_f64_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_f32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_f64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lg_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lg_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_f32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_f64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_neq_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_neq_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nge_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nge_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ngt_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ngt_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nle_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nle_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlg_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlg_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlt_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlt_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_o_f32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_o_f64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_tru_f32_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_tru_f64_e64             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_u_f32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_u_f64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cndmask_b32_e64              dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_cos_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cubeid_f32                   dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cubema_f32                   dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cubesc_f32                   dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cubetc_f32                   dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f16_f32_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_f16_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_f64_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_i32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_u32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_ubyte0_e64           dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_ubyte1_e64           dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_ubyte2_e64           dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_ubyte3_e64           dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f64_f32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f64_i32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f64_u32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_flr_i32_f32_e64          dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_i32_f32_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_i32_f64_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_off_f32_i4_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pk_i16_i32_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pk_u16_u32_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pk_u8_f32                dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pkaccum_u8_f32_e64       dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pknorm_i16_f32_e64       dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pknorm_u16_f32_e64       dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pkrtz_f16_f32_e64        dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_rpi_i32_f32_e64          dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_u32_f32_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_u32_f64_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_div_fixup_f32                dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_fixup_f64                dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_fmas_f32                 dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_fmas_f64                 dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_scale_f32                dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_div_scale_f64                dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_exp_f32_e64                  dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_exp_legacy_f32_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ffbh_i32_e64                 dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_ffbh_u32_e64                 dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_ffbl_b32_e64                 dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_floor_f32_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_floor_f64_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fma_f32                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fma_f64                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fract_f32_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fract_f64_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_exp_i32_f32_e64        dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_exp_i32_f64_e64        dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_mant_f32_e64           dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_mant_f64_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ldexp_f32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_ldexp_f64                    dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_lerp_u8                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_log_clamp_f32_e64            dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_log_f32_e64                  dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_log_legacy_f32_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_lshl_b32_e64                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshl_b64                     dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshlrev_b32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshr_b32_e64                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshr_b64                     dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshrrev_b32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mac_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mac_legacy_f32_e64           dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_f32                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_i32_i24                  dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_mad_i64_i32                  dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_mad_legacy_f32               dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_u32_u24                  dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_mad_u64_u32                  dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_max3_f32                     dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max3_i32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_max3_u32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_max_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max_f64                      dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max_i32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_max_legacy_f32_e64           dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max_u32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mbcnt_hi_u32_b32_e64         dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mbcnt_lo_u32_b32_e64         dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_med3_f32                     dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_med3_i32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_med3_u32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_min3_f32                     dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_min3_i32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_min3_u32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_min_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_min_f64                      dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_min_i32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_min_legacy_f32_e64           dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_min_u32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mov_b32_e64                  dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_mov_fed_b32_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_movreld_b32_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_movrels_b32_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_movrelsd_b32_e64             dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_mqsad_pk_u16_u8              dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_mqsad_u32_u8                 dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_msad_u8                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_mul_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mul_f64                      dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mul_hi_i32                   dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_hi_i32_i24_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_hi_u32                   dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_hi_u32_u24_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_i32_i24_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_legacy_f32_e64           dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mul_lo_i32                   dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_lo_u32                   dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_u32_u24_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mullit_f32                   dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_nop_e64                                                     :ref:`omod<amdgpu_synid_omod>`
-    v_not_b32_e64                  dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_or_b32_e64                   dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_qsad_pk_u16_u8               dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_rcp_clamp_f32_e64            dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_rcp_clamp_f64_e64            dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rcp_f32_e64                  dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_rcp_f64_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rcp_iflag_f32_e64            dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rcp_legacy_f32_e64           dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_rndne_f32_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rndne_f64_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rsq_clamp_f32_e64            dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rsq_clamp_f64_e64            dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rsq_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rsq_f64_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rsq_legacy_f32_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sad_hi_u8                    dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_sad_u16                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_sad_u32                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_sad_u8                       dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_sin_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sqrt_f32_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sqrt_f64_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sub_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sub_i32_e64                  dst0, dst1, src0, src1         :ref:`omod<amdgpu_synid_omod>`
-    v_subb_u32_e64                 dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_subbrev_u32_e64              dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_subrev_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_subrev_i32_e64               dst0, dst1, src0, src1         :ref:`omod<amdgpu_synid_omod>`
-    v_trig_preop_f64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_trunc_f32_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_trunc_f64_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_xor_b32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-
-VOPC
-===========================
-
-.. parsed-literal::
-
-    v_cmp_class_f32                dst, src0, src1
-    v_cmp_class_f64                dst, src0, src1
-    v_cmp_eq_f32                   dst, src0, src1
-    v_cmp_eq_f64                   dst, src0, src1
-    v_cmp_eq_i32                   dst, src0, src1
-    v_cmp_eq_i64                   dst, src0, src1
-    v_cmp_eq_u32                   dst, src0, src1
-    v_cmp_eq_u64                   dst, src0, src1
-    v_cmp_f_f32                    dst, src0, src1
-    v_cmp_f_f64                    dst, src0, src1
-    v_cmp_f_i32                    dst, src0, src1
-    v_cmp_f_i64                    dst, src0, src1
-    v_cmp_f_u32                    dst, src0, src1
-    v_cmp_f_u64                    dst, src0, src1
-    v_cmp_ge_f32                   dst, src0, src1
-    v_cmp_ge_f64                   dst, src0, src1
-    v_cmp_ge_i32                   dst, src0, src1
-    v_cmp_ge_i64                   dst, src0, src1
-    v_cmp_ge_u32                   dst, src0, src1
-    v_cmp_ge_u64                   dst, src0, src1
-    v_cmp_gt_f32                   dst, src0, src1
-    v_cmp_gt_f64                   dst, src0, src1
-    v_cmp_gt_i32                   dst, src0, src1
-    v_cmp_gt_i64                   dst, src0, src1
-    v_cmp_gt_u32                   dst, src0, src1
-    v_cmp_gt_u64                   dst, src0, src1
-    v_cmp_le_f32                   dst, src0, src1
-    v_cmp_le_f64                   dst, src0, src1
-    v_cmp_le_i32                   dst, src0, src1
-    v_cmp_le_i64                   dst, src0, src1
-    v_cmp_le_u32                   dst, src0, src1
-    v_cmp_le_u64                   dst, src0, src1
-    v_cmp_lg_f32                   dst, src0, src1
-    v_cmp_lg_f64                   dst, src0, src1
-    v_cmp_lt_f32                   dst, src0, src1
-    v_cmp_lt_f64                   dst, src0, src1
-    v_cmp_lt_i32                   dst, src0, src1
-    v_cmp_lt_i64                   dst, src0, src1
-    v_cmp_lt_u32                   dst, src0, src1
-    v_cmp_lt_u64                   dst, src0, src1
-    v_cmp_ne_i32                   dst, src0, src1
-    v_cmp_ne_i64                   dst, src0, src1
-    v_cmp_ne_u32                   dst, src0, src1
-    v_cmp_ne_u64                   dst, src0, src1
-    v_cmp_neq_f32                  dst, src0, src1
-    v_cmp_neq_f64                  dst, src0, src1
-    v_cmp_nge_f32                  dst, src0, src1
-    v_cmp_nge_f64                  dst, src0, src1
-    v_cmp_ngt_f32                  dst, src0, src1
-    v_cmp_ngt_f64                  dst, src0, src1
-    v_cmp_nle_f32                  dst, src0, src1
-    v_cmp_nle_f64                  dst, src0, src1
-    v_cmp_nlg_f32                  dst, src0, src1
-    v_cmp_nlg_f64                  dst, src0, src1
-    v_cmp_nlt_f32                  dst, src0, src1
-    v_cmp_nlt_f64                  dst, src0, src1
-    v_cmp_o_f32                    dst, src0, src1
-    v_cmp_o_f64                    dst, src0, src1
-    v_cmp_t_i32                    dst, src0, src1
-    v_cmp_t_i64                    dst, src0, src1
-    v_cmp_t_u32                    dst, src0, src1
-    v_cmp_t_u64                    dst, src0, src1
-    v_cmp_tru_f32                  dst, src0, src1
-    v_cmp_tru_f64                  dst, src0, src1
-    v_cmp_u_f32                    dst, src0, src1
-    v_cmp_u_f64                    dst, src0, src1
-    v_cmps_eq_f32                  dst, src0, src1
-    v_cmps_eq_f64                  dst, src0, src1
-    v_cmps_f_f32                   dst, src0, src1
-    v_cmps_f_f64                   dst, src0, src1
-    v_cmps_ge_f32                  dst, src0, src1
-    v_cmps_ge_f64                  dst, src0, src1
-    v_cmps_gt_f32                  dst, src0, src1
-    v_cmps_gt_f64                  dst, src0, src1
-    v_cmps_le_f32                  dst, src0, src1
-    v_cmps_le_f64                  dst, src0, src1
-    v_cmps_lg_f32                  dst, src0, src1
-    v_cmps_lg_f64                  dst, src0, src1
-    v_cmps_lt_f32                  dst, src0, src1
-    v_cmps_lt_f64                  dst, src0, src1
-    v_cmps_neq_f32                 dst, src0, src1
-    v_cmps_neq_f64                 dst, src0, src1
-    v_cmps_nge_f32                 dst, src0, src1
-    v_cmps_nge_f64                 dst, src0, src1
-    v_cmps_ngt_f32                 dst, src0, src1
-    v_cmps_ngt_f64                 dst, src0, src1
-    v_cmps_nle_f32                 dst, src0, src1
-    v_cmps_nle_f64                 dst, src0, src1
-    v_cmps_nlg_f32                 dst, src0, src1
-    v_cmps_nlg_f64                 dst, src0, src1
-    v_cmps_nlt_f32                 dst, src0, src1
-    v_cmps_nlt_f64                 dst, src0, src1
-    v_cmps_o_f32                   dst, src0, src1
-    v_cmps_o_f64                   dst, src0, src1
-    v_cmps_tru_f32                 dst, src0, src1
-    v_cmps_tru_f64                 dst, src0, src1
-    v_cmps_u_f32                   dst, src0, src1
-    v_cmps_u_f64                   dst, src0, src1
-    v_cmpsx_eq_f32                 dst, src0, src1
-    v_cmpsx_eq_f64                 dst, src0, src1
-    v_cmpsx_f_f32                  dst, src0, src1
-    v_cmpsx_f_f64                  dst, src0, src1
-    v_cmpsx_ge_f32                 dst, src0, src1
-    v_cmpsx_ge_f64                 dst, src0, src1
-    v_cmpsx_gt_f32                 dst, src0, src1
-    v_cmpsx_gt_f64                 dst, src0, src1
-    v_cmpsx_le_f32                 dst, src0, src1
-    v_cmpsx_le_f64                 dst, src0, src1
-    v_cmpsx_lg_f32                 dst, src0, src1
-    v_cmpsx_lg_f64                 dst, src0, src1
-    v_cmpsx_lt_f32                 dst, src0, src1
-    v_cmpsx_lt_f64                 dst, src0, src1
-    v_cmpsx_neq_f32                dst, src0, src1
-    v_cmpsx_neq_f64                dst, src0, src1
-    v_cmpsx_nge_f32                dst, src0, src1
-    v_cmpsx_nge_f64                dst, src0, src1
-    v_cmpsx_ngt_f32                dst, src0, src1
-    v_cmpsx_ngt_f64                dst, src0, src1
-    v_cmpsx_nle_f32                dst, src0, src1
-    v_cmpsx_nle_f64                dst, src0, src1
-    v_cmpsx_nlg_f32                dst, src0, src1
-    v_cmpsx_nlg_f64                dst, src0, src1
-    v_cmpsx_nlt_f32                dst, src0, src1
-    v_cmpsx_nlt_f64                dst, src0, src1
-    v_cmpsx_o_f32                  dst, src0, src1
-    v_cmpsx_o_f64                  dst, src0, src1
-    v_cmpsx_tru_f32                dst, src0, src1
-    v_cmpsx_tru_f64                dst, src0, src1
-    v_cmpsx_u_f32                  dst, src0, src1
-    v_cmpsx_u_f64                  dst, src0, src1
-    v_cmpx_class_f32               dst, src0, src1
-    v_cmpx_class_f64               dst, src0, src1
-    v_cmpx_eq_f32                  dst, src0, src1
-    v_cmpx_eq_f64                  dst, src0, src1
-    v_cmpx_eq_i32                  dst, src0, src1
-    v_cmpx_eq_i64                  dst, src0, src1
-    v_cmpx_eq_u32                  dst, src0, src1
-    v_cmpx_eq_u64                  dst, src0, src1
-    v_cmpx_f_f32                   dst, src0, src1
-    v_cmpx_f_f64                   dst, src0, src1
-    v_cmpx_f_i32                   dst, src0, src1
-    v_cmpx_f_i64                   dst, src0, src1
-    v_cmpx_f_u32                   dst, src0, src1
-    v_cmpx_f_u64                   dst, src0, src1
-    v_cmpx_ge_f32                  dst, src0, src1
-    v_cmpx_ge_f64                  dst, src0, src1
-    v_cmpx_ge_i32                  dst, src0, src1
-    v_cmpx_ge_i64                  dst, src0, src1
-    v_cmpx_ge_u32                  dst, src0, src1
-    v_cmpx_ge_u64                  dst, src0, src1
-    v_cmpx_gt_f32                  dst, src0, src1
-    v_cmpx_gt_f64                  dst, src0, src1
-    v_cmpx_gt_i32                  dst, src0, src1
-    v_cmpx_gt_i64                  dst, src0, src1
-    v_cmpx_gt_u32                  dst, src0, src1
-    v_cmpx_gt_u64                  dst, src0, src1
-    v_cmpx_le_f32                  dst, src0, src1
-    v_cmpx_le_f64                  dst, src0, src1
-    v_cmpx_le_i32                  dst, src0, src1
-    v_cmpx_le_i64                  dst, src0, src1
-    v_cmpx_le_u32                  dst, src0, src1
-    v_cmpx_le_u64                  dst, src0, src1
-    v_cmpx_lg_f32                  dst, src0, src1
-    v_cmpx_lg_f64                  dst, src0, src1
-    v_cmpx_lt_f32                  dst, src0, src1
-    v_cmpx_lt_f64                  dst, src0, src1
-    v_cmpx_lt_i32                  dst, src0, src1
-    v_cmpx_lt_i64                  dst, src0, src1
-    v_cmpx_lt_u32                  dst, src0, src1
-    v_cmpx_lt_u64                  dst, src0, src1
-    v_cmpx_ne_i32                  dst, src0, src1
-    v_cmpx_ne_i64                  dst, src0, src1
-    v_cmpx_ne_u32                  dst, src0, src1
-    v_cmpx_ne_u64                  dst, src0, src1
-    v_cmpx_neq_f32                 dst, src0, src1
-    v_cmpx_neq_f64                 dst, src0, src1
-    v_cmpx_nge_f32                 dst, src0, src1
-    v_cmpx_nge_f64                 dst, src0, src1
-    v_cmpx_ngt_f32                 dst, src0, src1
-    v_cmpx_ngt_f64                 dst, src0, src1
-    v_cmpx_nle_f32                 dst, src0, src1
-    v_cmpx_nle_f64                 dst, src0, src1
-    v_cmpx_nlg_f32                 dst, src0, src1
-    v_cmpx_nlg_f64                 dst, src0, src1
-    v_cmpx_nlt_f32                 dst, src0, src1
-    v_cmpx_nlt_f64                 dst, src0, src1
-    v_cmpx_o_f32                   dst, src0, src1
-    v_cmpx_o_f64                   dst, src0, src1
-    v_cmpx_t_i32                   dst, src0, src1
-    v_cmpx_t_i64                   dst, src0, src1
-    v_cmpx_t_u32                   dst, src0, src1
-    v_cmpx_t_u64                   dst, src0, src1
-    v_cmpx_tru_f32                 dst, src0, src1
-    v_cmpx_tru_f64                 dst, src0, src1
-    v_cmpx_u_f32                   dst, src0, src1
-    v_cmpx_u_f64                   dst, src0, src1
diff --git a/docs/AMDGPUAsmGFX8.rst b/docs/AMDGPUAsmGFX8.rst
deleted file mode 100644
index 44c4843..0000000
--- a/docs/AMDGPUAsmGFX8.rst
+++ /dev/null
@@ -1,1672 +0,0 @@
-..
-    **************************************************
-    *                                                *
-    *   Automatically generated file, do not edit!   *
-    *                                                *
-    **************************************************
-
-===========================
-Syntax of GFX8 Instructions
-===========================
-
-.. contents::
-  :local:
-
-
-DS
-===========================
-
-.. parsed-literal::
-
-    ds_add_f32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_rtn_f32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_src2_f32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_b32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_b64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_rtn_b32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_rtn_b64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_src2_b32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_src2_b64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_append                      dst                            :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_bpermute_b32                dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>`
-    ds_cmpst_b32                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_b64                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_f32                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_f64                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_rtn_b32               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_rtn_b64               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_rtn_f32               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_rtn_f64               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_condxchg32_rtn_b64          dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_consume                     dst                            :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_barrier                 src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_init                    src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_sema_br                 src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_sema_p                                                 :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_sema_release_all                                       :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_sema_v                                                 :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_f32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_f64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_i32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_i64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_f32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_f64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_i32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_i64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_f32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_f64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_i32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_i64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_f32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_f64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_i32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_i64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_f32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_f64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_i32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_i64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_f32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_f64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_i32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_i64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_mskor_b32                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_mskor_b64                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_mskor_rtn_b32               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_mskor_rtn_b64               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_nop
-    ds_or_b32                      src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_b64                      src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_rtn_b32                  dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_rtn_b64                  dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_src2_b32                 src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_src2_b64                 src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_ordered_count               dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_permute_b32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>`
-    ds_read2_b32                   dst, src0                      :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read2_b64                   dst, src0                      :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read2st64_b32               dst, src0                      :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read2st64_b64               dst, src0                      :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_b128                   dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_b32                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_b64                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_b96                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_i16                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_i8                     dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_u16                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_u8                     dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_rtn_u32                dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_rtn_u64                dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_src2_u32               src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_src2_u64               src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_u32                    src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_u64                    src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_swizzle_b32                 dst, src0                      :ref:`sw_offset16<amdgpu_synid_sw_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrap_rtn_b32                dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write2_b32                  src0, src1, src2               :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write2_b64                  src0, src1, src2               :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write2st64_b32              src0, src1, src2               :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write2st64_b64              src0, src1, src2               :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b128                  src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b16                   src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b32                   src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b64                   src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b8                    src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b96                   src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_src2_b32              src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_src2_b64              src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg2_rtn_b32             dst, src0, src1, src2          :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg2_rtn_b64             dst, src0, src1, src2          :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg2st64_rtn_b32         dst, src0, src1, src2          :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg2st64_rtn_b64         dst, src0, src1, src2          :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg_rtn_b32              dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg_rtn_b64              dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_b32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_b64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_rtn_b32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_rtn_b64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_src2_b32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_src2_b64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-
-EXP
-===========================
-
-.. parsed-literal::
-
-    exp                            dst, src0, src1, src2, src3    :ref:`done<amdgpu_synid_done>` :ref:`compr<amdgpu_synid_compr>` :ref:`vm<amdgpu_synid_vm>`
-
-FLAT
-===========================
-
-.. parsed-literal::
-
-    flat_atomic_add                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_add_x2             dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_and                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_and_x2             dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_cmpswap            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_cmpswap_x2         dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_dec                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_dec_x2             dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_inc                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_inc_x2             dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_or                 dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_or_x2              dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_smax               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_smax_x2            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_smin               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_smin_x2            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_sub                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_sub_x2             dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_swap               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_swap_x2            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_umax               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_umax_x2            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_umin               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_umin_x2            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_xor                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_xor_x2             dst, src0, src1                :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_dword                dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_dwordx2              dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_dwordx3              dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_dwordx4              dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_sbyte                dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_sshort               dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_ubyte                dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_ushort               dst, src0                      :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_byte                src0, src1                     :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_dword               src0, src1                     :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_dwordx2             src0, src1                     :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_dwordx3             src0, src1                     :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_dwordx4             src0, src1                     :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_short               src0, src1                     :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-
-MIMG
-===========================
-
-.. parsed-literal::
-
-    image_atomic_add               dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_and               dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_cmpswap           dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_dec               dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_inc               dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_or                dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_smax              dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_smin              dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_sub               dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_swap              dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_umax              dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_umin              dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_xor               dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4                  dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_b                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_b_cl             dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_b_cl_o           dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_b_o              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_c                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_c_b              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_c_b_cl           dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_c_b_cl_o         dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_c_b_o            dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_c_cl             dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_c_cl_o           dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_c_l              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_c_l_o            dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_c_lz             dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_c_lz_o           dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_c_o              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_cl               dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_cl_o             dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_l                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_l_o              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_lz               dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_lz_o             dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_o                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_get_lod                  dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_get_resinfo              dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_load                     dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_load_mip                 dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_load_mip_pck             dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_load_mip_pck_sgn         dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_load_pck                 dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_load_pck_sgn             dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample                   dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_b                 dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_b_cl              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_c                 dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_c_b               dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_c_b_cl            dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_c_cl              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_c_l               dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_c_lz              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_cl                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_l                 dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_lz                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_store                    src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_store_mip                src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_store_mip_pck            src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_store_pck                src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-
-MUBUF
-===========================
-
-.. parsed-literal::
-
-    buffer_atomic_add              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_add_x2           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_and              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_and_x2           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_cmpswap          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_cmpswap_x2       dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_dec              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_dec_x2           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_inc              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_inc_x2           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_or               dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_or_x2            dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_smax             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_smax_x2          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_smin             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_smin_x2          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_sub              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_sub_x2           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_swap             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_swap_x2          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_umax             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_umax_x2          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_umin             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_umin_x2          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_xor              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_xor_x2           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_dword              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_dwordx2            dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_dwordx3            dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_dwordx4            dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_d16_x       dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_d16_xy      dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_d16_xyz     dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_d16_xyzw    dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_x           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_format_xy          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_xyz         dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_xyzw        dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_sbyte              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_sshort             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_ubyte              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_ushort             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_store_byte              src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_dword             src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_dwordx2           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_dwordx3           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_dwordx4           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_d16_x      src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_d16_xy     src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_d16_xyz    src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_d16_xyzw   src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_x          src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_xy         src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_xyz        src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_xyzw       src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_lds_dword         src0, src1                     :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`lds<amdgpu_synid_lds>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_short             src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_wbinvl1
-    buffer_wbinvl1_vol
-
-SMEM
-===========================
-
-.. parsed-literal::
-
-    s_atc_probe                    src0, src1, src2
-    s_atc_probe_buffer             src0, src1, src2
-    s_buffer_load_dword            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_load_dwordx16         dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_load_dwordx2          dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_load_dwordx4          dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_load_dwordx8          dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_store_dword           src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_store_dwordx2         src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_store_dwordx4         src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-    s_dcache_inv
-    s_dcache_inv_vol
-    s_dcache_wb
-    s_dcache_wb_vol
-    s_load_dword                   dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_load_dwordx16                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_load_dwordx2                 dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_load_dwordx4                 dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_load_dwordx8                 dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_memrealtime                  dst
-    s_memtime                      dst
-    s_store_dword                  src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-    s_store_dwordx2                src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-    s_store_dwordx4                src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-
-SOP1
-===========================
-
-.. parsed-literal::
-
-    s_abs_i32                      dst, src0
-    s_and_saveexec_b64             dst, src0
-    s_andn2_saveexec_b64           dst, src0
-    s_bcnt0_i32_b32                dst, src0
-    s_bcnt0_i32_b64                dst, src0
-    s_bcnt1_i32_b32                dst, src0
-    s_bcnt1_i32_b64                dst, src0
-    s_bitset0_b32                  dst, src0
-    s_bitset0_b64                  dst, src0
-    s_bitset1_b32                  dst, src0
-    s_bitset1_b64                  dst, src0
-    s_brev_b32                     dst, src0
-    s_brev_b64                     dst, src0
-    s_cbranch_join                 src0
-    s_cmov_b32                     dst, src0
-    s_cmov_b64                     dst, src0
-    s_ff0_i32_b32                  dst, src0
-    s_ff0_i32_b64                  dst, src0
-    s_ff1_i32_b32                  dst, src0
-    s_ff1_i32_b64                  dst, src0
-    s_flbit_i32                    dst, src0
-    s_flbit_i32_b32                dst, src0
-    s_flbit_i32_b64                dst, src0
-    s_flbit_i32_i64                dst, src0
-    s_getpc_b64                    dst
-    s_mov_b32                      dst, src0
-    s_mov_b64                      dst, src0
-    s_mov_fed_b32                  dst, src0
-    s_movreld_b32                  dst, src0
-    s_movreld_b64                  dst, src0
-    s_movrels_b32                  dst, src0
-    s_movrels_b64                  dst, src0
-    s_nand_saveexec_b64            dst, src0
-    s_nor_saveexec_b64             dst, src0
-    s_not_b32                      dst, src0
-    s_not_b64                      dst, src0
-    s_or_saveexec_b64              dst, src0
-    s_orn2_saveexec_b64            dst, src0
-    s_quadmask_b32                 dst, src0
-    s_quadmask_b64                 dst, src0
-    s_rfe_b64                      src0
-    s_set_gpr_idx_idx              src0
-    s_setpc_b64                    src0
-    s_sext_i32_i16                 dst, src0
-    s_sext_i32_i8                  dst, src0
-    s_swappc_b64                   dst, src0
-    s_wqm_b32                      dst, src0
-    s_wqm_b64                      dst, src0
-    s_xnor_saveexec_b64            dst, src0
-    s_xor_saveexec_b64             dst, src0
-
-SOP2
-===========================
-
-.. parsed-literal::
-
-    s_absdiff_i32                  dst, src0, src1
-    s_add_i32                      dst, src0, src1
-    s_add_u32                      dst, src0, src1
-    s_addc_u32                     dst, src0, src1
-    s_and_b32                      dst, src0, src1
-    s_and_b64                      dst, src0, src1
-    s_andn2_b32                    dst, src0, src1
-    s_andn2_b64                    dst, src0, src1
-    s_ashr_i32                     dst, src0, src1
-    s_ashr_i64                     dst, src0, src1
-    s_bfe_i32                      dst, src0, src1
-    s_bfe_i64                      dst, src0, src1
-    s_bfe_u32                      dst, src0, src1
-    s_bfe_u64                      dst, src0, src1
-    s_bfm_b32                      dst, src0, src1
-    s_bfm_b64                      dst, src0, src1
-    s_cbranch_g_fork               src0, src1
-    s_cselect_b32                  dst, src0, src1
-    s_cselect_b64                  dst, src0, src1
-    s_lshl_b32                     dst, src0, src1
-    s_lshl_b64                     dst, src0, src1
-    s_lshr_b32                     dst, src0, src1
-    s_lshr_b64                     dst, src0, src1
-    s_max_i32                      dst, src0, src1
-    s_max_u32                      dst, src0, src1
-    s_min_i32                      dst, src0, src1
-    s_min_u32                      dst, src0, src1
-    s_mul_i32                      dst, src0, src1
-    s_nand_b32                     dst, src0, src1
-    s_nand_b64                     dst, src0, src1
-    s_nor_b32                      dst, src0, src1
-    s_nor_b64                      dst, src0, src1
-    s_or_b32                       dst, src0, src1
-    s_or_b64                       dst, src0, src1
-    s_orn2_b32                     dst, src0, src1
-    s_orn2_b64                     dst, src0, src1
-    s_rfe_restore_b64              src0, src1
-    s_sub_i32                      dst, src0, src1
-    s_sub_u32                      dst, src0, src1
-    s_subb_u32                     dst, src0, src1
-    s_xnor_b32                     dst, src0, src1
-    s_xnor_b64                     dst, src0, src1
-    s_xor_b32                      dst, src0, src1
-    s_xor_b64                      dst, src0, src1
-
-SOPC
-===========================
-
-.. parsed-literal::
-
-    s_bitcmp0_b32                  src0, src1
-    s_bitcmp0_b64                  src0, src1
-    s_bitcmp1_b32                  src0, src1
-    s_bitcmp1_b64                  src0, src1
-    s_cmp_eq_i32                   src0, src1
-    s_cmp_eq_u32                   src0, src1
-    s_cmp_eq_u64                   src0, src1
-    s_cmp_ge_i32                   src0, src1
-    s_cmp_ge_u32                   src0, src1
-    s_cmp_gt_i32                   src0, src1
-    s_cmp_gt_u32                   src0, src1
-    s_cmp_le_i32                   src0, src1
-    s_cmp_le_u32                   src0, src1
-    s_cmp_lg_i32                   src0, src1
-    s_cmp_lg_u32                   src0, src1
-    s_cmp_lg_u64                   src0, src1
-    s_cmp_lt_i32                   src0, src1
-    s_cmp_lt_u32                   src0, src1
-    s_set_gpr_idx_on               src0, src1
-    s_setvskip                     src0, src1
-
-SOPK
-===========================
-
-.. parsed-literal::
-
-    s_addk_i32                     dst, src0
-    s_cbranch_i_fork               src0, src1
-    s_cmovk_i32                    dst, src0
-    s_cmpk_eq_i32                  src0, src1
-    s_cmpk_eq_u32                  src0, src1
-    s_cmpk_ge_i32                  src0, src1
-    s_cmpk_ge_u32                  src0, src1
-    s_cmpk_gt_i32                  src0, src1
-    s_cmpk_gt_u32                  src0, src1
-    s_cmpk_le_i32                  src0, src1
-    s_cmpk_le_u32                  src0, src1
-    s_cmpk_lg_i32                  src0, src1
-    s_cmpk_lg_u32                  src0, src1
-    s_cmpk_lt_i32                  src0, src1
-    s_cmpk_lt_u32                  src0, src1
-    s_getreg_b32                   dst, src0
-    s_movk_i32                     dst, src0
-    s_mulk_i32                     dst, src0
-    s_setreg_b32                   dst, src0
-    s_setreg_imm32_b32             dst, src0
-
-SOPP
-===========================
-
-.. parsed-literal::
-
-    s_barrier
-    s_branch                       src0
-    s_cbranch_cdbgsys              src0
-    s_cbranch_cdbgsys_and_user     src0
-    s_cbranch_cdbgsys_or_user      src0
-    s_cbranch_cdbguser             src0
-    s_cbranch_execnz               src0
-    s_cbranch_execz                src0
-    s_cbranch_scc0                 src0
-    s_cbranch_scc1                 src0
-    s_cbranch_vccnz                src0
-    s_cbranch_vccz                 src0
-    s_decperflevel                 src0
-    s_endpgm
-    s_endpgm_saved
-    s_icache_inv
-    s_incperflevel                 src0
-    s_nop                          src0
-    s_sendmsg                      src0
-    s_sendmsghalt                  src0
-    s_set_gpr_idx_mode             src0
-    s_set_gpr_idx_off
-    s_sethalt                      src0
-    s_setkill                      src0
-    s_setprio                      src0
-    s_sleep                        src0
-    s_trap                         src0
-    s_ttracedata
-    s_waitcnt                      src0
-    s_wakeup
-
-VINTRP
-===========================
-
-.. parsed-literal::
-
-    v_interp_mov_f32               dst, src0, src1
-    v_interp_p1_f32                dst, src0, src1
-    v_interp_p2_f32                dst, src0, src1
-
-VOP1
-===========================
-
-.. parsed-literal::
-
-    v_bfrev_b32                    dst, src0
-    v_bfrev_b32_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_bfrev_b32_sdwa               dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_ceil_f16                     dst, src0
-    v_ceil_f16_dpp                 dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ceil_f16_sdwa                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_ceil_f32                     dst, src0
-    v_ceil_f32_dpp                 dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ceil_f32_sdwa                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_ceil_f64                     dst, src0
-    v_clrexcp
-    v_cos_f16                      dst, src0
-    v_cos_f16_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cos_f16_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cos_f32                      dst, src0
-    v_cos_f32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cos_f32_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f16_f32                  dst, src0
-    v_cvt_f16_f32_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f16_f32_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f16_i16                  dst, src0
-    v_cvt_f16_i16_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f16_i16_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f16_u16                  dst, src0
-    v_cvt_f16_u16_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f16_u16_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f32_f16                  dst, src0
-    v_cvt_f32_f16_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f32_f16_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f32_f64                  dst, src0
-    v_cvt_f32_i32                  dst, src0
-    v_cvt_f32_i32_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f32_i32_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f32_u32                  dst, src0
-    v_cvt_f32_u32_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f32_u32_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f32_ubyte0               dst, src0
-    v_cvt_f32_ubyte0_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f32_ubyte0_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f32_ubyte1               dst, src0
-    v_cvt_f32_ubyte1_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f32_ubyte1_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f32_ubyte2               dst, src0
-    v_cvt_f32_ubyte2_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f32_ubyte2_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f32_ubyte3               dst, src0
-    v_cvt_f32_ubyte3_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f32_ubyte3_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f64_f32                  dst, src0
-    v_cvt_f64_i32                  dst, src0
-    v_cvt_f64_u32                  dst, src0
-    v_cvt_flr_i32_f32              dst, src0
-    v_cvt_flr_i32_f32_dpp          dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_flr_i32_f32_sdwa         dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_i16_f16                  dst, src0
-    v_cvt_i16_f16_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_i16_f16_sdwa             dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_i32_f32                  dst, src0
-    v_cvt_i32_f32_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_i32_f32_sdwa             dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_i32_f64                  dst, src0
-    v_cvt_off_f32_i4               dst, src0
-    v_cvt_off_f32_i4_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_off_f32_i4_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_rpi_i32_f32              dst, src0
-    v_cvt_rpi_i32_f32_dpp          dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_rpi_i32_f32_sdwa         dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_u16_f16                  dst, src0
-    v_cvt_u16_f16_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_u16_f16_sdwa             dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_u32_f32                  dst, src0
-    v_cvt_u32_f32_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_u32_f32_sdwa             dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_u32_f64                  dst, src0
-    v_exp_f16                      dst, src0
-    v_exp_f16_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_exp_f16_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_exp_f32                      dst, src0
-    v_exp_f32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_exp_f32_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_exp_legacy_f32               dst, src0
-    v_exp_legacy_f32_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_exp_legacy_f32_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_ffbh_i32                     dst, src0
-    v_ffbh_i32_dpp                 dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ffbh_i32_sdwa                dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_ffbh_u32                     dst, src0
-    v_ffbh_u32_dpp                 dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ffbh_u32_sdwa                dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_ffbl_b32                     dst, src0
-    v_ffbl_b32_dpp                 dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ffbl_b32_sdwa                dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_floor_f16                    dst, src0
-    v_floor_f16_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_floor_f16_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_floor_f32                    dst, src0
-    v_floor_f32_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_floor_f32_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_floor_f64                    dst, src0
-    v_fract_f16                    dst, src0
-    v_fract_f16_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_fract_f16_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_fract_f32                    dst, src0
-    v_fract_f32_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_fract_f32_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_fract_f64                    dst, src0
-    v_frexp_exp_i16_f16            dst, src0
-    v_frexp_exp_i16_f16_dpp        dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_frexp_exp_i16_f16_sdwa       dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_frexp_exp_i32_f32            dst, src0
-    v_frexp_exp_i32_f32_dpp        dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_frexp_exp_i32_f32_sdwa       dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_frexp_exp_i32_f64            dst, src0
-    v_frexp_mant_f16               dst, src0
-    v_frexp_mant_f16_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_frexp_mant_f16_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_frexp_mant_f32               dst, src0
-    v_frexp_mant_f32_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_frexp_mant_f32_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_frexp_mant_f64               dst, src0
-    v_log_f16                      dst, src0
-    v_log_f16_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_log_f16_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_log_f32                      dst, src0
-    v_log_f32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_log_f32_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_log_legacy_f32               dst, src0
-    v_log_legacy_f32_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_log_legacy_f32_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_mov_b32                      dst, src0
-    v_mov_b32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mov_b32_sdwa                 dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_mov_fed_b32                  dst, src0
-    v_mov_fed_b32_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mov_fed_b32_sdwa             dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_movreld_b32                  dst, src0
-    v_movrels_b32                  dst, src0
-    v_movrelsd_b32                 dst, src0
-    v_nop
-    v_not_b32                      dst, src0
-    v_not_b32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_not_b32_sdwa                 dst, src0                      :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_rcp_f16                      dst, src0
-    v_rcp_f16_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_rcp_f16_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_rcp_f32                      dst, src0
-    v_rcp_f32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_rcp_f32_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_rcp_f64                      dst, src0
-    v_rcp_iflag_f32                dst, src0
-    v_rcp_iflag_f32_dpp            dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_rcp_iflag_f32_sdwa           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_readfirstlane_b32            dst, src0
-    v_rndne_f16                    dst, src0
-    v_rndne_f16_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_rndne_f16_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_rndne_f32                    dst, src0
-    v_rndne_f32_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_rndne_f32_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_rndne_f64                    dst, src0
-    v_rsq_f16                      dst, src0
-    v_rsq_f16_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_rsq_f16_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_rsq_f32                      dst, src0
-    v_rsq_f32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_rsq_f32_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_rsq_f64                      dst, src0
-    v_sin_f16                      dst, src0
-    v_sin_f16_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sin_f16_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_sin_f32                      dst, src0
-    v_sin_f32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sin_f32_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_sqrt_f16                     dst, src0
-    v_sqrt_f16_dpp                 dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sqrt_f16_sdwa                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_sqrt_f32                     dst, src0
-    v_sqrt_f32_dpp                 dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sqrt_f32_sdwa                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_sqrt_f64                     dst, src0
-    v_trunc_f16                    dst, src0
-    v_trunc_f16_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_trunc_f16_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_trunc_f32                    dst, src0
-    v_trunc_f32_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_trunc_f32_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_trunc_f64                    dst, src0
-
-VOP2
-===========================
-
-.. parsed-literal::
-
-    v_add_f16                      dst, src0, src1
-    v_add_f16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_add_f16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_add_f32                      dst, src0, src1
-    v_add_f32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_add_f32_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_add_u16                      dst, src0, src1
-    v_add_u16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_add_u16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_add_u32                      dst0, dst1, src0, src1
-    v_add_u32_dpp                  dst0, dst1, src0, src1         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_add_u32_sdwa                 dst0, dst1, src0, src1         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_addc_u32                     dst0, dst1, src0, src1, src2
-    v_addc_u32_dpp                 dst0, dst1, src0, src1, src2   :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_addc_u32_sdwa                dst0, dst1, src0, src1, src2   :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_and_b32                      dst, src0, src1
-    v_and_b32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_and_b32_sdwa                 dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_ashrrev_i16                  dst, src0, src1
-    v_ashrrev_i16_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ashrrev_i16_sdwa             dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_ashrrev_i32                  dst, src0, src1
-    v_ashrrev_i32_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ashrrev_i32_sdwa             dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cndmask_b32                  dst, src0, src1, src2
-    v_cndmask_b32_dpp              dst, src0, src1, src2          :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cndmask_b32_sdwa             dst, src0, src1, src2          :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_ldexp_f16                    dst, src0, src1
-    v_ldexp_f16_dpp                dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ldexp_f16_sdwa               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_lshlrev_b16                  dst, src0, src1
-    v_lshlrev_b16_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_lshlrev_b16_sdwa             dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_lshlrev_b32                  dst, src0, src1
-    v_lshlrev_b32_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_lshlrev_b32_sdwa             dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_lshrrev_b16                  dst, src0, src1
-    v_lshrrev_b16_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_lshrrev_b16_sdwa             dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_lshrrev_b32                  dst, src0, src1
-    v_lshrrev_b32_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_lshrrev_b32_sdwa             dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mac_f16                      dst, src0, src1
-    v_mac_f16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mac_f16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mac_f32                      dst, src0, src1
-    v_mac_f32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mac_f32_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_madak_f16                    dst, src0, src1, src2
-    v_madak_f32                    dst, src0, src1, src2
-    v_madmk_f16                    dst, src0, src1, src2
-    v_madmk_f32                    dst, src0, src1, src2
-    v_max_f16                      dst, src0, src1
-    v_max_f16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_max_f16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_max_f32                      dst, src0, src1
-    v_max_f32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_max_f32_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_max_i16                      dst, src0, src1
-    v_max_i16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_max_i16_sdwa                 dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_max_i32                      dst, src0, src1
-    v_max_i32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_max_i32_sdwa                 dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_max_u16                      dst, src0, src1
-    v_max_u16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_max_u16_sdwa                 dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_max_u32                      dst, src0, src1
-    v_max_u32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_max_u32_sdwa                 dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_min_f16                      dst, src0, src1
-    v_min_f16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_min_f16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_min_f32                      dst, src0, src1
-    v_min_f32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_min_f32_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_min_i16                      dst, src0, src1
-    v_min_i16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_min_i16_sdwa                 dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_min_i32                      dst, src0, src1
-    v_min_i32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_min_i32_sdwa                 dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_min_u16                      dst, src0, src1
-    v_min_u16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_min_u16_sdwa                 dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_min_u32                      dst, src0, src1
-    v_min_u32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_min_u32_sdwa                 dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_f16                      dst, src0, src1
-    v_mul_f16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_f16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_f32                      dst, src0, src1
-    v_mul_f32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_f32_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_hi_i32_i24               dst, src0, src1
-    v_mul_hi_i32_i24_dpp           dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_hi_i32_i24_sdwa          dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_hi_u32_u24               dst, src0, src1
-    v_mul_hi_u32_u24_dpp           dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_hi_u32_u24_sdwa          dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_i32_i24                  dst, src0, src1
-    v_mul_i32_i24_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_i32_i24_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_legacy_f32               dst, src0, src1
-    v_mul_legacy_f32_dpp           dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_legacy_f32_sdwa          dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_lo_u16                   dst, src0, src1
-    v_mul_lo_u16_dpp               dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_lo_u16_sdwa              dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_u32_u24                  dst, src0, src1
-    v_mul_u32_u24_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_u32_u24_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_or_b32                       dst, src0, src1
-    v_or_b32_dpp                   dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_or_b32_sdwa                  dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_sub_f16                      dst, src0, src1
-    v_sub_f16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sub_f16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_sub_f32                      dst, src0, src1
-    v_sub_f32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sub_f32_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_sub_u16                      dst, src0, src1
-    v_sub_u16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sub_u16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_sub_u32                      dst0, dst1, src0, src1
-    v_sub_u32_dpp                  dst0, dst1, src0, src1         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sub_u32_sdwa                 dst0, dst1, src0, src1         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_subb_u32                     dst0, dst1, src0, src1, src2
-    v_subb_u32_dpp                 dst0, dst1, src0, src1, src2   :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_subb_u32_sdwa                dst0, dst1, src0, src1, src2   :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_subbrev_u32                  dst0, dst1, src0, src1, src2
-    v_subbrev_u32_dpp              dst0, dst1, src0, src1, src2   :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_subbrev_u32_sdwa             dst0, dst1, src0, src1, src2   :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_subrev_f16                   dst, src0, src1
-    v_subrev_f16_dpp               dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_subrev_f16_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_subrev_f32                   dst, src0, src1
-    v_subrev_f32_dpp               dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_subrev_f32_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_subrev_u16                   dst, src0, src1
-    v_subrev_u16_dpp               dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_subrev_u16_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_subrev_u32                   dst0, dst1, src0, src1
-    v_subrev_u32_dpp               dst0, dst1, src0, src1         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_subrev_u32_sdwa              dst0, dst1, src0, src1         :ref:`clamp<amdgpu_synid_clamp>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_xor_b32                      dst, src0, src1
-    v_xor_b32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_xor_b32_sdwa                 dst, src0, src1                :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-
-VOP3
-===========================
-
-.. parsed-literal::
-
-    v_add_f16_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_add_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_add_f64                      dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_add_u16_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_add_u32_e64                  dst0, dst1, src0, src1         :ref:`omod<amdgpu_synid_omod>`
-    v_addc_u32_e64                 dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_alignbit_b32                 dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_alignbyte_b32                dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_and_b32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_ashrrev_i16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_ashrrev_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_ashrrev_i64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_bcnt_u32_b32                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_bfe_i32                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_bfe_u32                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_bfi_b32                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_bfm_b32                      dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_bfrev_b32_e64                dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_ceil_f16_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ceil_f32_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ceil_f64_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_clrexcp_e64                                                 :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_class_f16_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_class_f32_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_class_f64_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_f16_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_f32_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_f64_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_i16_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_i32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_i64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_u16_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_u32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_u64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lg_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lg_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lg_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_neq_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_neq_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_neq_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nge_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nge_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nge_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ngt_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ngt_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ngt_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nle_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nle_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nle_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlg_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlg_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlg_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlt_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlt_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlt_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_o_f16_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_o_f32_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_o_f64_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_i16_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_i32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_i64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_u16_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_u32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_u64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_tru_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_tru_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_tru_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_u_f16_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_u_f32_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_u_f64_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_class_f16_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_class_f32_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_class_f64_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_i16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_u16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_i16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_u16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_i16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_u16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_i16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_u16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lg_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lg_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lg_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_i16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_u16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_i16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_u16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_neq_f16_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_neq_f32_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_neq_f64_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nge_f16_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nge_f32_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nge_f64_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ngt_f16_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ngt_f32_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ngt_f64_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nle_f16_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nle_f32_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nle_f64_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlg_f16_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlg_f32_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlg_f64_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlt_f16_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlt_f32_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlt_f64_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_o_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_o_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_o_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_tru_f16_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_tru_f32_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_tru_f64_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_u_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_u_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_u_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cndmask_b32_e64              dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_cos_f16_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cos_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cubeid_f32                   dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cubema_f32                   dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cubesc_f32                   dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cubetc_f32                   dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f16_f32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f16_i16_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f16_u16_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_f16_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_f64_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_i32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_u32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_ubyte0_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_ubyte1_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_ubyte2_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_ubyte3_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f64_f32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f64_i32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f64_u32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_flr_i32_f32_e64          dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_i16_f16_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_i32_f32_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_i32_f64_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_off_f32_i4_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pk_i16_i32               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pk_u16_u32               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pk_u8_f32                dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pkaccum_u8_f32           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pknorm_i16_f32           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pknorm_u16_f32           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pkrtz_f16_f32            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_rpi_i32_f32_e64          dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_u16_f16_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_u32_f32_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_u32_f64_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_div_fixup_f16                dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_fixup_f32                dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_fixup_f64                dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_fmas_f32                 dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_fmas_f64                 dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_scale_f32                dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_div_scale_f64                dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_exp_f16_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_exp_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_exp_legacy_f32_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ffbh_i32_e64                 dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_ffbh_u32_e64                 dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_ffbl_b32_e64                 dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_floor_f16_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_floor_f32_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_floor_f64_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fma_f16                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fma_f32                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fma_f64                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fract_f16_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fract_f32_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fract_f64_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_exp_i16_f16_e64        dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_exp_i32_f32_e64        dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_exp_i32_f64_e64        dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_mant_f16_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_mant_f32_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_mant_f64_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_interp_mov_f32_e64           dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_interp_p1_f32_e64            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_interp_p1ll_f16              dst, src0, src1                :ref:`high<amdgpu_synid_high>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_interp_p1lv_f16              dst, src0, src1, src2          :ref:`high<amdgpu_synid_high>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_interp_p2_f16                dst, src0, src1, src2          :ref:`high<amdgpu_synid_high>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_interp_p2_f32_e64            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ldexp_f16_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ldexp_f32                    dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ldexp_f64                    dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_lerp_u8                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_log_f16_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_log_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_log_legacy_f32_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_lshlrev_b16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshlrev_b32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshlrev_b64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshrrev_b16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshrrev_b32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshrrev_b64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mac_f16_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mac_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_f16                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_f32                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_i16                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_i32_i24                  dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_i64_i32                  dst0, dst1, src0, src1, src2   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_legacy_f32               dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_u16                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_u32_u24                  dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_u64_u32                  dst0, dst1, src0, src1, src2   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max3_f32                     dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max3_i32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_max3_u32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_max_f16_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max_f64                      dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max_i16_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_max_i32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_max_u16_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_max_u32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mbcnt_hi_u32_b32             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mbcnt_lo_u32_b32             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_med3_f32                     dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_med3_i32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_med3_u32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_min3_f32                     dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_min3_i32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_min3_u32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_min_f16_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_min_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_min_f64                      dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_min_i16_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_min_i32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_min_u16_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_min_u32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mov_b32_e64                  dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_mov_fed_b32_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_movreld_b32_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_movrels_b32_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_movrelsd_b32_e64             dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_mqsad_pk_u16_u8              dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mqsad_u32_u8                 dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_msad_u8                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mul_f16_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mul_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mul_f64                      dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mul_hi_i32                   dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_hi_i32_i24_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_hi_u32                   dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_hi_u32_u24_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_i32_i24_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_legacy_f32_e64           dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mul_lo_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_lo_u32                   dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_u32_u24_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_nop_e64                                                     :ref:`omod<amdgpu_synid_omod>`
-    v_not_b32_e64                  dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_or_b32_e64                   dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_perm_b32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_qsad_pk_u16_u8               dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rcp_f16_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rcp_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rcp_f64_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rcp_iflag_f32_e64            dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_readlane_b32                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_rndne_f16_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rndne_f32_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rndne_f64_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rsq_f16_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rsq_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rsq_f64_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sad_hi_u8                    dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sad_u16                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sad_u32                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sad_u8                       dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sin_f16_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sin_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sqrt_f16_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sqrt_f32_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sqrt_f64_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sub_f16_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sub_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sub_u16_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_sub_u32_e64                  dst0, dst1, src0, src1         :ref:`omod<amdgpu_synid_omod>`
-    v_subb_u32_e64                 dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_subbrev_u32_e64              dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_subrev_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_subrev_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_subrev_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_subrev_u32_e64               dst0, dst1, src0, src1         :ref:`omod<amdgpu_synid_omod>`
-    v_trig_preop_f64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_trunc_f16_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_trunc_f32_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_trunc_f64_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_writelane_b32                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_xor_b32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-
-VOPC
-===========================
-
-.. parsed-literal::
-
-    v_cmp_class_f16                dst, src0, src1
-    v_cmp_class_f16_sdwa           dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_class_f32                dst, src0, src1
-    v_cmp_class_f32_sdwa           dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_class_f64                dst, src0, src1
-    v_cmp_eq_f16                   dst, src0, src1
-    v_cmp_eq_f16_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_eq_f32                   dst, src0, src1
-    v_cmp_eq_f32_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_eq_f64                   dst, src0, src1
-    v_cmp_eq_i16                   dst, src0, src1
-    v_cmp_eq_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_eq_i32                   dst, src0, src1
-    v_cmp_eq_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_eq_i64                   dst, src0, src1
-    v_cmp_eq_u16                   dst, src0, src1
-    v_cmp_eq_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_eq_u32                   dst, src0, src1
-    v_cmp_eq_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_eq_u64                   dst, src0, src1
-    v_cmp_f_f16                    dst, src0, src1
-    v_cmp_f_f16_sdwa               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_f_f32                    dst, src0, src1
-    v_cmp_f_f32_sdwa               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_f_f64                    dst, src0, src1
-    v_cmp_f_i16                    dst, src0, src1
-    v_cmp_f_i16_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_f_i32                    dst, src0, src1
-    v_cmp_f_i32_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_f_i64                    dst, src0, src1
-    v_cmp_f_u16                    dst, src0, src1
-    v_cmp_f_u16_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_f_u32                    dst, src0, src1
-    v_cmp_f_u32_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_f_u64                    dst, src0, src1
-    v_cmp_ge_f16                   dst, src0, src1
-    v_cmp_ge_f16_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ge_f32                   dst, src0, src1
-    v_cmp_ge_f32_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ge_f64                   dst, src0, src1
-    v_cmp_ge_i16                   dst, src0, src1
-    v_cmp_ge_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ge_i32                   dst, src0, src1
-    v_cmp_ge_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ge_i64                   dst, src0, src1
-    v_cmp_ge_u16                   dst, src0, src1
-    v_cmp_ge_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ge_u32                   dst, src0, src1
-    v_cmp_ge_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ge_u64                   dst, src0, src1
-    v_cmp_gt_f16                   dst, src0, src1
-    v_cmp_gt_f16_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_gt_f32                   dst, src0, src1
-    v_cmp_gt_f32_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_gt_f64                   dst, src0, src1
-    v_cmp_gt_i16                   dst, src0, src1
-    v_cmp_gt_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_gt_i32                   dst, src0, src1
-    v_cmp_gt_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_gt_i64                   dst, src0, src1
-    v_cmp_gt_u16                   dst, src0, src1
-    v_cmp_gt_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_gt_u32                   dst, src0, src1
-    v_cmp_gt_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_gt_u64                   dst, src0, src1
-    v_cmp_le_f16                   dst, src0, src1
-    v_cmp_le_f16_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_le_f32                   dst, src0, src1
-    v_cmp_le_f32_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_le_f64                   dst, src0, src1
-    v_cmp_le_i16                   dst, src0, src1
-    v_cmp_le_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_le_i32                   dst, src0, src1
-    v_cmp_le_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_le_i64                   dst, src0, src1
-    v_cmp_le_u16                   dst, src0, src1
-    v_cmp_le_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_le_u32                   dst, src0, src1
-    v_cmp_le_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_le_u64                   dst, src0, src1
-    v_cmp_lg_f16                   dst, src0, src1
-    v_cmp_lg_f16_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lg_f32                   dst, src0, src1
-    v_cmp_lg_f32_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lg_f64                   dst, src0, src1
-    v_cmp_lt_f16                   dst, src0, src1
-    v_cmp_lt_f16_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lt_f32                   dst, src0, src1
-    v_cmp_lt_f32_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lt_f64                   dst, src0, src1
-    v_cmp_lt_i16                   dst, src0, src1
-    v_cmp_lt_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lt_i32                   dst, src0, src1
-    v_cmp_lt_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lt_i64                   dst, src0, src1
-    v_cmp_lt_u16                   dst, src0, src1
-    v_cmp_lt_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lt_u32                   dst, src0, src1
-    v_cmp_lt_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lt_u64                   dst, src0, src1
-    v_cmp_ne_i16                   dst, src0, src1
-    v_cmp_ne_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ne_i32                   dst, src0, src1
-    v_cmp_ne_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ne_i64                   dst, src0, src1
-    v_cmp_ne_u16                   dst, src0, src1
-    v_cmp_ne_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ne_u32                   dst, src0, src1
-    v_cmp_ne_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ne_u64                   dst, src0, src1
-    v_cmp_neq_f16                  dst, src0, src1
-    v_cmp_neq_f16_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_neq_f32                  dst, src0, src1
-    v_cmp_neq_f32_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_neq_f64                  dst, src0, src1
-    v_cmp_nge_f16                  dst, src0, src1
-    v_cmp_nge_f16_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nge_f32                  dst, src0, src1
-    v_cmp_nge_f32_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nge_f64                  dst, src0, src1
-    v_cmp_ngt_f16                  dst, src0, src1
-    v_cmp_ngt_f16_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ngt_f32                  dst, src0, src1
-    v_cmp_ngt_f32_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ngt_f64                  dst, src0, src1
-    v_cmp_nle_f16                  dst, src0, src1
-    v_cmp_nle_f16_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nle_f32                  dst, src0, src1
-    v_cmp_nle_f32_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nle_f64                  dst, src0, src1
-    v_cmp_nlg_f16                  dst, src0, src1
-    v_cmp_nlg_f16_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nlg_f32                  dst, src0, src1
-    v_cmp_nlg_f32_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nlg_f64                  dst, src0, src1
-    v_cmp_nlt_f16                  dst, src0, src1
-    v_cmp_nlt_f16_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nlt_f32                  dst, src0, src1
-    v_cmp_nlt_f32_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nlt_f64                  dst, src0, src1
-    v_cmp_o_f16                    dst, src0, src1
-    v_cmp_o_f16_sdwa               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_o_f32                    dst, src0, src1
-    v_cmp_o_f32_sdwa               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_o_f64                    dst, src0, src1
-    v_cmp_t_i16                    dst, src0, src1
-    v_cmp_t_i16_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_t_i32                    dst, src0, src1
-    v_cmp_t_i32_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_t_i64                    dst, src0, src1
-    v_cmp_t_u16                    dst, src0, src1
-    v_cmp_t_u16_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_t_u32                    dst, src0, src1
-    v_cmp_t_u32_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_t_u64                    dst, src0, src1
-    v_cmp_tru_f16                  dst, src0, src1
-    v_cmp_tru_f16_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_tru_f32                  dst, src0, src1
-    v_cmp_tru_f32_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_tru_f64                  dst, src0, src1
-    v_cmp_u_f16                    dst, src0, src1
-    v_cmp_u_f16_sdwa               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_u_f32                    dst, src0, src1
-    v_cmp_u_f32_sdwa               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_u_f64                    dst, src0, src1
-    v_cmpx_class_f16               dst, src0, src1
-    v_cmpx_class_f16_sdwa          dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_class_f32               dst, src0, src1
-    v_cmpx_class_f32_sdwa          dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_class_f64               dst, src0, src1
-    v_cmpx_eq_f16                  dst, src0, src1
-    v_cmpx_eq_f16_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_eq_f32                  dst, src0, src1
-    v_cmpx_eq_f32_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_eq_f64                  dst, src0, src1
-    v_cmpx_eq_i16                  dst, src0, src1
-    v_cmpx_eq_i16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_eq_i32                  dst, src0, src1
-    v_cmpx_eq_i32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_eq_i64                  dst, src0, src1
-    v_cmpx_eq_u16                  dst, src0, src1
-    v_cmpx_eq_u16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_eq_u32                  dst, src0, src1
-    v_cmpx_eq_u32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_eq_u64                  dst, src0, src1
-    v_cmpx_f_f16                   dst, src0, src1
-    v_cmpx_f_f16_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_f_f32                   dst, src0, src1
-    v_cmpx_f_f32_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_f_f64                   dst, src0, src1
-    v_cmpx_f_i16                   dst, src0, src1
-    v_cmpx_f_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_f_i32                   dst, src0, src1
-    v_cmpx_f_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_f_i64                   dst, src0, src1
-    v_cmpx_f_u16                   dst, src0, src1
-    v_cmpx_f_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_f_u32                   dst, src0, src1
-    v_cmpx_f_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_f_u64                   dst, src0, src1
-    v_cmpx_ge_f16                  dst, src0, src1
-    v_cmpx_ge_f16_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ge_f32                  dst, src0, src1
-    v_cmpx_ge_f32_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ge_f64                  dst, src0, src1
-    v_cmpx_ge_i16                  dst, src0, src1
-    v_cmpx_ge_i16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ge_i32                  dst, src0, src1
-    v_cmpx_ge_i32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ge_i64                  dst, src0, src1
-    v_cmpx_ge_u16                  dst, src0, src1
-    v_cmpx_ge_u16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ge_u32                  dst, src0, src1
-    v_cmpx_ge_u32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ge_u64                  dst, src0, src1
-    v_cmpx_gt_f16                  dst, src0, src1
-    v_cmpx_gt_f16_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_gt_f32                  dst, src0, src1
-    v_cmpx_gt_f32_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_gt_f64                  dst, src0, src1
-    v_cmpx_gt_i16                  dst, src0, src1
-    v_cmpx_gt_i16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_gt_i32                  dst, src0, src1
-    v_cmpx_gt_i32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_gt_i64                  dst, src0, src1
-    v_cmpx_gt_u16                  dst, src0, src1
-    v_cmpx_gt_u16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_gt_u32                  dst, src0, src1
-    v_cmpx_gt_u32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_gt_u64                  dst, src0, src1
-    v_cmpx_le_f16                  dst, src0, src1
-    v_cmpx_le_f16_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_le_f32                  dst, src0, src1
-    v_cmpx_le_f32_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_le_f64                  dst, src0, src1
-    v_cmpx_le_i16                  dst, src0, src1
-    v_cmpx_le_i16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_le_i32                  dst, src0, src1
-    v_cmpx_le_i32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_le_i64                  dst, src0, src1
-    v_cmpx_le_u16                  dst, src0, src1
-    v_cmpx_le_u16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_le_u32                  dst, src0, src1
-    v_cmpx_le_u32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_le_u64                  dst, src0, src1
-    v_cmpx_lg_f16                  dst, src0, src1
-    v_cmpx_lg_f16_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lg_f32                  dst, src0, src1
-    v_cmpx_lg_f32_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lg_f64                  dst, src0, src1
-    v_cmpx_lt_f16                  dst, src0, src1
-    v_cmpx_lt_f16_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lt_f32                  dst, src0, src1
-    v_cmpx_lt_f32_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lt_f64                  dst, src0, src1
-    v_cmpx_lt_i16                  dst, src0, src1
-    v_cmpx_lt_i16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lt_i32                  dst, src0, src1
-    v_cmpx_lt_i32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lt_i64                  dst, src0, src1
-    v_cmpx_lt_u16                  dst, src0, src1
-    v_cmpx_lt_u16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lt_u32                  dst, src0, src1
-    v_cmpx_lt_u32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lt_u64                  dst, src0, src1
-    v_cmpx_ne_i16                  dst, src0, src1
-    v_cmpx_ne_i16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ne_i32                  dst, src0, src1
-    v_cmpx_ne_i32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ne_i64                  dst, src0, src1
-    v_cmpx_ne_u16                  dst, src0, src1
-    v_cmpx_ne_u16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ne_u32                  dst, src0, src1
-    v_cmpx_ne_u32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ne_u64                  dst, src0, src1
-    v_cmpx_neq_f16                 dst, src0, src1
-    v_cmpx_neq_f16_sdwa            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_neq_f32                 dst, src0, src1
-    v_cmpx_neq_f32_sdwa            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_neq_f64                 dst, src0, src1
-    v_cmpx_nge_f16                 dst, src0, src1
-    v_cmpx_nge_f16_sdwa            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nge_f32                 dst, src0, src1
-    v_cmpx_nge_f32_sdwa            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nge_f64                 dst, src0, src1
-    v_cmpx_ngt_f16                 dst, src0, src1
-    v_cmpx_ngt_f16_sdwa            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ngt_f32                 dst, src0, src1
-    v_cmpx_ngt_f32_sdwa            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ngt_f64                 dst, src0, src1
-    v_cmpx_nle_f16                 dst, src0, src1
-    v_cmpx_nle_f16_sdwa            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nle_f32                 dst, src0, src1
-    v_cmpx_nle_f32_sdwa            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nle_f64                 dst, src0, src1
-    v_cmpx_nlg_f16                 dst, src0, src1
-    v_cmpx_nlg_f16_sdwa            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nlg_f32                 dst, src0, src1
-    v_cmpx_nlg_f32_sdwa            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nlg_f64                 dst, src0, src1
-    v_cmpx_nlt_f16                 dst, src0, src1
-    v_cmpx_nlt_f16_sdwa            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nlt_f32                 dst, src0, src1
-    v_cmpx_nlt_f32_sdwa            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nlt_f64                 dst, src0, src1
-    v_cmpx_o_f16                   dst, src0, src1
-    v_cmpx_o_f16_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_o_f32                   dst, src0, src1
-    v_cmpx_o_f32_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_o_f64                   dst, src0, src1
-    v_cmpx_t_i16                   dst, src0, src1
-    v_cmpx_t_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_t_i32                   dst, src0, src1
-    v_cmpx_t_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_t_i64                   dst, src0, src1
-    v_cmpx_t_u16                   dst, src0, src1
-    v_cmpx_t_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_t_u32                   dst, src0, src1
-    v_cmpx_t_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_t_u64                   dst, src0, src1
-    v_cmpx_tru_f16                 dst, src0, src1
-    v_cmpx_tru_f16_sdwa            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_tru_f32                 dst, src0, src1
-    v_cmpx_tru_f32_sdwa            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_tru_f64                 dst, src0, src1
-    v_cmpx_u_f16                   dst, src0, src1
-    v_cmpx_u_f16_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_u_f32                   dst, src0, src1
-    v_cmpx_u_f32_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_u_f64                   dst, src0, src1
diff --git a/docs/AMDGPUAsmGFX9.rst b/docs/AMDGPUAsmGFX9.rst
deleted file mode 100644
index 97c13f2..0000000
--- a/docs/AMDGPUAsmGFX9.rst
+++ /dev/null
@@ -1,1906 +0,0 @@
-..
-    **************************************************
-    *                                                *
-    *   Automatically generated file, do not edit!   *
-    *                                                *
-    **************************************************
-
-===========================
-Syntax of GFX9 Instructions
-===========================
-
-.. contents::
-  :local:
-
-
-DS
-===========================
-
-.. parsed-literal::
-
-    ds_add_f32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_rtn_f32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_src2_f32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_add_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_b32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_b64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_rtn_b32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_rtn_b64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_src2_b32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_and_src2_b64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_append                      dst                            :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_bpermute_b32                dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>`
-    ds_cmpst_b32                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_b64                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_f32                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_f64                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_rtn_b32               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_rtn_b64               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_rtn_f32               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_cmpst_rtn_f64               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_condxchg32_rtn_b64          dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_consume                     dst                            :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_dec_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_barrier                 src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_init                    src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_sema_br                 src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_sema_p                                                 :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_sema_release_all                                       :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_gws_sema_v                                                 :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_inc_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_f32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_f64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_i32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_i64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_f32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_f64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_i32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_i64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_f32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_f64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_i32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_i64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_max_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_f32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_f64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_i32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_i64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_f32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_f64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_i32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_i64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_f32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_f64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_i32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_i64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_min_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_mskor_b32                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_mskor_b64                   src0, src1, src2               :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_mskor_rtn_b32               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_mskor_rtn_b64               dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_nop
-    ds_or_b32                      src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_b64                      src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_rtn_b32                  dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_rtn_b64                  dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_src2_b32                 src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_or_src2_b64                 src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_ordered_count               dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_permute_b32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>`
-    ds_read2_b32                   dst, src0                      :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read2_b64                   dst, src0                      :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read2st64_b32               dst, src0                      :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read2st64_b64               dst, src0                      :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_b128                   dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_b32                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_b64                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_b96                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_i16                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_i8                     dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_i8_d16                 dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_i8_d16_hi              dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_u16                    dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_u16_d16                dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_u16_d16_hi             dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_u8                     dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_u8_d16                 dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_read_u8_d16_hi              dst, src0                      :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_rtn_u32                dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_rtn_u64                dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_src2_u32               src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_src2_u64               src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_u32                    src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_rsub_u64                    src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_rtn_u32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_rtn_u64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_src2_u32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_src2_u64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_u32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_sub_u64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_swizzle_b32                 dst, src0                      :ref:`sw_offset16<amdgpu_synid_sw_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrap_rtn_b32                dst, src0, src1, src2          :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write2_b32                  src0, src1, src2               :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write2_b64                  src0, src1, src2               :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write2st64_b32              src0, src1, src2               :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write2st64_b64              src0, src1, src2               :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b128                  src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b16                   src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b16_d16_hi            src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b32                   src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b64                   src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b8                    src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b8_d16_hi             src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_b96                   src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_src2_b32              src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_write_src2_b64              src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg2_rtn_b32             dst, src0, src1, src2          :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg2_rtn_b64             dst, src0, src1, src2          :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg2st64_rtn_b32         dst, src0, src1, src2          :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg2st64_rtn_b64         dst, src0, src1, src2          :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`ds_offset8<amdgpu_synid_ds_offset8>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg_rtn_b32              dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_wrxchg_rtn_b64              dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_b32                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_b64                     src0, src1                     :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_rtn_b32                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_rtn_b64                 dst, src0, src1                :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_src2_b32                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-    ds_xor_src2_b64                src0                           :ref:`ds_offset16<amdgpu_synid_ds_offset16>` :ref:`gds<amdgpu_synid_gds>`
-
-EXP
-===========================
-
-.. parsed-literal::
-
-    exp                            dst, src0, src1, src2, src3    :ref:`done<amdgpu_synid_done>` :ref:`compr<amdgpu_synid_compr>` :ref:`vm<amdgpu_synid_vm>`
-
-FLAT
-===========================
-
-.. parsed-literal::
-
-    flat_atomic_add                dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_add_x2             dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_and                dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_and_x2             dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_cmpswap            dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_cmpswap_x2         dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_dec                dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_dec_x2             dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_inc                dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_inc_x2             dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_or                 dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_or_x2              dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_smax               dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_smax_x2            dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_smin               dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_smin_x2            dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_sub                dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_sub_x2             dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_swap               dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_swap_x2            dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_umax               dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_umax_x2            dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_umin               dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_umin_x2            dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_xor                dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_atomic_xor_x2             dst, src0, src1                :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_dword                dst, src0                      :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_dwordx2              dst, src0                      :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_dwordx3              dst, src0                      :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_dwordx4              dst, src0                      :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_sbyte                dst, src0                      :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_sbyte_d16            dst, src0                      :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_sbyte_d16_hi         dst, src0                      :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_short_d16            dst, src0                      :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_short_d16_hi         dst, src0                      :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_sshort               dst, src0                      :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_ubyte                dst, src0                      :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_ubyte_d16            dst, src0                      :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_ubyte_d16_hi         dst, src0                      :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_load_ushort               dst, src0                      :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_byte                src0, src1                     :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_byte_d16_hi         src0, src1                     :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_dword               src0, src1                     :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_dwordx2             src0, src1                     :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_dwordx3             src0, src1                     :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_dwordx4             src0, src1                     :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_short               src0, src1                     :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    flat_store_short_d16_hi        src0, src1                     :ref:`flat_offset12<amdgpu_synid_flat_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    global_atomic_add              dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_add_x2           dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_and              dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_and_x2           dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_cmpswap          dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_cmpswap_x2       dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_dec              dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_dec_x2           dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_inc              dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_inc_x2           dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_or               dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_or_x2            dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_smax             dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_smax_x2          dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_smin             dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_smin_x2          dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_sub              dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_sub_x2           dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_swap             dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_swap_x2          dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_umax             dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_umax_x2          dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_umin             dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_umin_x2          dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_xor              dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_atomic_xor_x2           dst, src0, src1, src2          :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_load_dword              dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_load_dwordx2            dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_load_dwordx3            dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_load_dwordx4            dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_load_sbyte              dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_load_sbyte_d16          dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_load_sbyte_d16_hi       dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_load_short_d16          dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_load_short_d16_hi       dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_load_sshort             dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_load_ubyte              dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_load_ubyte_d16          dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_load_ubyte_d16_hi       dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_load_ushort             dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_store_byte              src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_store_byte_d16_hi       src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_store_dword             src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_store_dwordx2           src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_store_dwordx3           src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_store_dwordx4           src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_store_short             src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    global_store_short_d16_hi      src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>`
-    scratch_load_dword             dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_load_dwordx2           dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_load_dwordx3           dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_load_dwordx4           dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_load_sbyte             dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_load_sbyte_d16         dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_load_sbyte_d16_hi      dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_load_short_d16         dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_load_short_d16_hi      dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_load_sshort            dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_load_ubyte             dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_load_ubyte_d16         dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_load_ubyte_d16_hi      dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_load_ushort            dst, src0, src1                :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_store_byte             src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_store_byte_d16_hi      src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_store_dword            src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_store_dwordx2          src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_store_dwordx3          src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_store_dwordx4          src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_store_short            src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    scratch_store_short_d16_hi     src0, src1, src2               :ref:`flat_offset13<amdgpu_synid_flat_offset13>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-
-MIMG
-===========================
-
-.. parsed-literal::
-
-    image_atomic_add               dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_and               dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_cmpswap           dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_dec               dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_inc               dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_or                dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_smax              dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_smin              dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_sub               dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_swap              dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_umax              dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_umin              dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_atomic_xor               dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_gather4                  dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_b                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_c                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_c_lz             dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_cl               dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_l                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_lz               dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_lz_o             dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_gather4_o                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_get_lod                  dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_get_resinfo              dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_load                     dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_load_mip                 dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_load_mip_pck             dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_load_mip_pck_sgn         dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_load_pck                 dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_load_pck_sgn             dst, src0, src1                :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_sample                   dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_b                 dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_c                 dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_c_lz              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_cl                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_l                 dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_lz                dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_lz_o              dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_sample_o                 dst, src0, src1, src2          :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_store                    src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_store_mip                src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>` :ref:`d16<amdgpu_synid_d16>`
-    image_store_mip_pck            src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-    image_store_pck                src0, src1, src2               :ref:`dmask<amdgpu_synid_dmask>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`da<amdgpu_synid_da>`
-
-MUBUF
-===========================
-
-.. parsed-literal::
-
-    buffer_atomic_add              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_add_x2           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_and              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_and_x2           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_cmpswap          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_cmpswap_x2       dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_dec              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_dec_x2           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_inc              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_inc_x2           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_or               dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_or_x2            dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_smax             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_smax_x2          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_smin             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_smin_x2          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_sub              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_sub_x2           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_swap             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_swap_x2          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_umax             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_umax_x2          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_umin             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_umin_x2          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_xor              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_atomic_xor_x2           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_dword              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_dwordx2            dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_dwordx3            dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_dwordx4            dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_d16_hi_x    dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_d16_x       dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_d16_xy      dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_d16_xyz     dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_d16_xyzw    dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_x           dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_format_xy          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_xyz         dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_format_xyzw        dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_sbyte              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_sbyte_d16          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_sbyte_d16_hi       dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_short_d16          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_short_d16_hi       dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_sshort             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_ubyte              dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_load_ubyte_d16          dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_ubyte_d16_hi       dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_load_ushort             dst, src0, src1, src2          :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_store_byte              src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_byte_d16_hi       src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_dword             src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_dwordx2           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_dwordx3           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_dwordx4           src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_d16_hi_x   src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_d16_x      src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_d16_xy     src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_d16_xyz    src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_d16_xyzw   src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_x          src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_xy         src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_xyz        src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_format_xyzw       src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_lds_dword         src0, src1                     :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`lds<amdgpu_synid_lds>`
-    buffer_store_short             src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_store_short_d16_hi      src0, src1, src2, src3         :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`buf_offset12<amdgpu_synid_buf_offset12>` :ref:`glc<amdgpu_synid_glc>` :ref:`slc<amdgpu_synid_slc>`
-    buffer_wbinvl1
-    buffer_wbinvl1_vol
-
-SMEM
-===========================
-
-.. parsed-literal::
-
-    s_atc_probe                    src0, src1, src2
-    s_atc_probe_buffer             src0, src1, src2
-    s_atomic_add                   dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_add_x2                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_and                   dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_and_x2                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_cmpswap               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_cmpswap_x2            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_dec                   dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_dec_x2                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_inc                   dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_inc_x2                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_or                    dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_or_x2                 dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_smax                  dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_smax_x2               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_smin                  dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_smin_x2               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_sub                   dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_sub_x2                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_swap                  dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_swap_x2               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_umax                  dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_umax_x2               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_umin                  dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_umin_x2               dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_xor                   dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_atomic_xor_x2                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_add            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_add_x2         dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_and            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_and_x2         dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_cmpswap        dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_cmpswap_x2     dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_dec            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_dec_x2         dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_inc            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_inc_x2         dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_or             dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_or_x2          dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_smax           dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_smax_x2        dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_smin           dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_smin_x2        dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_sub            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_sub_x2         dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_swap           dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_swap_x2        dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_umax           dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_umax_x2        dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_umin           dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_umin_x2        dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_xor            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_atomic_xor_x2         dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_load_dword            dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_load_dwordx16         dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_load_dwordx2          dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_load_dwordx4          dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_load_dwordx8          dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_store_dword           src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_store_dwordx2         src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-    s_buffer_store_dwordx4         src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-    s_dcache_discard               src0, src1
-    s_dcache_discard_x2            src0, src1
-    s_dcache_inv
-    s_dcache_inv_vol
-    s_dcache_wb
-    s_dcache_wb_vol
-    s_load_dword                   dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_load_dwordx16                dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_load_dwordx2                 dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_load_dwordx4                 dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_load_dwordx8                 dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_memrealtime                  dst
-    s_memtime                      dst
-    s_scratch_load_dword           dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_scratch_load_dwordx2         dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_scratch_load_dwordx4         dst, src0, src1                :ref:`glc<amdgpu_synid_glc>`
-    s_scratch_store_dword          src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-    s_scratch_store_dwordx2        src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-    s_scratch_store_dwordx4        src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-    s_store_dword                  src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-    s_store_dwordx2                src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-    s_store_dwordx4                src0, src1, src2               :ref:`glc<amdgpu_synid_glc>`
-
-SOP1
-===========================
-
-.. parsed-literal::
-
-    s_abs_i32                      dst, src0
-    s_and_saveexec_b64             dst, src0
-    s_andn1_saveexec_b64           dst, src0
-    s_andn1_wrexec_b64             dst, src0
-    s_andn2_saveexec_b64           dst, src0
-    s_andn2_wrexec_b64             dst, src0
-    s_bcnt0_i32_b32                dst, src0
-    s_bcnt0_i32_b64                dst, src0
-    s_bcnt1_i32_b32                dst, src0
-    s_bcnt1_i32_b64                dst, src0
-    s_bitreplicate_b64_b32         dst, src0
-    s_bitset0_b32                  dst, src0
-    s_bitset0_b64                  dst, src0
-    s_bitset1_b32                  dst, src0
-    s_bitset1_b64                  dst, src0
-    s_brev_b32                     dst, src0
-    s_brev_b64                     dst, src0
-    s_cbranch_join                 src0
-    s_cmov_b32                     dst, src0
-    s_cmov_b64                     dst, src0
-    s_ff0_i32_b32                  dst, src0
-    s_ff0_i32_b64                  dst, src0
-    s_ff1_i32_b32                  dst, src0
-    s_ff1_i32_b64                  dst, src0
-    s_flbit_i32                    dst, src0
-    s_flbit_i32_b32                dst, src0
-    s_flbit_i32_b64                dst, src0
-    s_flbit_i32_i64                dst, src0
-    s_getpc_b64                    dst
-    s_mov_b32                      dst, src0
-    s_mov_b64                      dst, src0
-    s_mov_fed_b32                  dst, src0
-    s_movreld_b32                  dst, src0
-    s_movreld_b64                  dst, src0
-    s_movrels_b32                  dst, src0
-    s_movrels_b64                  dst, src0
-    s_nand_saveexec_b64            dst, src0
-    s_nor_saveexec_b64             dst, src0
-    s_not_b32                      dst, src0
-    s_not_b64                      dst, src0
-    s_or_saveexec_b64              dst, src0
-    s_orn1_saveexec_b64            dst, src0
-    s_orn2_saveexec_b64            dst, src0
-    s_quadmask_b32                 dst, src0
-    s_quadmask_b64                 dst, src0
-    s_rfe_b64                      src0
-    s_set_gpr_idx_idx              src0
-    s_setpc_b64                    src0
-    s_sext_i32_i16                 dst, src0
-    s_sext_i32_i8                  dst, src0
-    s_swappc_b64                   dst, src0
-    s_wqm_b32                      dst, src0
-    s_wqm_b64                      dst, src0
-    s_xnor_saveexec_b64            dst, src0
-    s_xor_saveexec_b64             dst, src0
-
-SOP2
-===========================
-
-.. parsed-literal::
-
-    s_absdiff_i32                  dst, src0, src1
-    s_add_i32                      dst, src0, src1
-    s_add_u32                      dst, src0, src1
-    s_addc_u32                     dst, src0, src1
-    s_and_b32                      dst, src0, src1
-    s_and_b64                      dst, src0, src1
-    s_andn2_b32                    dst, src0, src1
-    s_andn2_b64                    dst, src0, src1
-    s_ashr_i32                     dst, src0, src1
-    s_ashr_i64                     dst, src0, src1
-    s_bfe_i32                      dst, src0, src1
-    s_bfe_i64                      dst, src0, src1
-    s_bfe_u32                      dst, src0, src1
-    s_bfe_u64                      dst, src0, src1
-    s_bfm_b32                      dst, src0, src1
-    s_bfm_b64                      dst, src0, src1
-    s_cbranch_g_fork               src0, src1
-    s_cselect_b32                  dst, src0, src1
-    s_cselect_b64                  dst, src0, src1
-    s_lshl1_add_u32                dst, src0, src1
-    s_lshl2_add_u32                dst, src0, src1
-    s_lshl3_add_u32                dst, src0, src1
-    s_lshl4_add_u32                dst, src0, src1
-    s_lshl_b32                     dst, src0, src1
-    s_lshl_b64                     dst, src0, src1
-    s_lshr_b32                     dst, src0, src1
-    s_lshr_b64                     dst, src0, src1
-    s_max_i32                      dst, src0, src1
-    s_max_u32                      dst, src0, src1
-    s_min_i32                      dst, src0, src1
-    s_min_u32                      dst, src0, src1
-    s_mul_hi_i32                   dst, src0, src1
-    s_mul_hi_u32                   dst, src0, src1
-    s_mul_i32                      dst, src0, src1
-    s_nand_b32                     dst, src0, src1
-    s_nand_b64                     dst, src0, src1
-    s_nor_b32                      dst, src0, src1
-    s_nor_b64                      dst, src0, src1
-    s_or_b32                       dst, src0, src1
-    s_or_b64                       dst, src0, src1
-    s_orn2_b32                     dst, src0, src1
-    s_orn2_b64                     dst, src0, src1
-    s_pack_hh_b32_b16              dst, src0, src1
-    s_pack_lh_b32_b16              dst, src0, src1
-    s_pack_ll_b32_b16              dst, src0, src1
-    s_rfe_restore_b64              src0, src1
-    s_sub_i32                      dst, src0, src1
-    s_sub_u32                      dst, src0, src1
-    s_subb_u32                     dst, src0, src1
-    s_xnor_b32                     dst, src0, src1
-    s_xnor_b64                     dst, src0, src1
-    s_xor_b32                      dst, src0, src1
-    s_xor_b64                      dst, src0, src1
-
-SOPC
-===========================
-
-.. parsed-literal::
-
-    s_bitcmp0_b32                  src0, src1
-    s_bitcmp0_b64                  src0, src1
-    s_bitcmp1_b32                  src0, src1
-    s_bitcmp1_b64                  src0, src1
-    s_cmp_eq_i32                   src0, src1
-    s_cmp_eq_u32                   src0, src1
-    s_cmp_eq_u64                   src0, src1
-    s_cmp_ge_i32                   src0, src1
-    s_cmp_ge_u32                   src0, src1
-    s_cmp_gt_i32                   src0, src1
-    s_cmp_gt_u32                   src0, src1
-    s_cmp_le_i32                   src0, src1
-    s_cmp_le_u32                   src0, src1
-    s_cmp_lg_i32                   src0, src1
-    s_cmp_lg_u32                   src0, src1
-    s_cmp_lg_u64                   src0, src1
-    s_cmp_lt_i32                   src0, src1
-    s_cmp_lt_u32                   src0, src1
-    s_set_gpr_idx_on               src0, src1
-    s_setvskip                     src0, src1
-
-SOPK
-===========================
-
-.. parsed-literal::
-
-    s_addk_i32                     dst, src0
-    s_call_b64                     dst, src0
-    s_cbranch_i_fork               src0, src1
-    s_cmovk_i32                    dst, src0
-    s_cmpk_eq_i32                  src0, src1
-    s_cmpk_eq_u32                  src0, src1
-    s_cmpk_ge_i32                  src0, src1
-    s_cmpk_ge_u32                  src0, src1
-    s_cmpk_gt_i32                  src0, src1
-    s_cmpk_gt_u32                  src0, src1
-    s_cmpk_le_i32                  src0, src1
-    s_cmpk_le_u32                  src0, src1
-    s_cmpk_lg_i32                  src0, src1
-    s_cmpk_lg_u32                  src0, src1
-    s_cmpk_lt_i32                  src0, src1
-    s_cmpk_lt_u32                  src0, src1
-    s_getreg_b32                   dst, src0
-    s_movk_i32                     dst, src0
-    s_mulk_i32                     dst, src0
-    s_setreg_b32                   dst, src0
-    s_setreg_imm32_b32             dst, src0
-
-SOPP
-===========================
-
-.. parsed-literal::
-
-    s_barrier
-    s_branch                       src0
-    s_cbranch_cdbgsys              src0
-    s_cbranch_cdbgsys_and_user     src0
-    s_cbranch_cdbgsys_or_user      src0
-    s_cbranch_cdbguser             src0
-    s_cbranch_execnz               src0
-    s_cbranch_execz                src0
-    s_cbranch_scc0                 src0
-    s_cbranch_scc1                 src0
-    s_cbranch_vccnz                src0
-    s_cbranch_vccz                 src0
-    s_decperflevel                 src0
-    s_endpgm
-    s_endpgm_ordered_ps_done
-    s_endpgm_saved
-    s_icache_inv
-    s_incperflevel                 src0
-    s_nop                          src0
-    s_sendmsg                      src0
-    s_sendmsghalt                  src0
-    s_set_gpr_idx_mode             src0
-    s_set_gpr_idx_off
-    s_sethalt                      src0
-    s_setkill                      src0
-    s_setprio                      src0
-    s_sleep                        src0
-    s_trap                         src0
-    s_ttracedata
-    s_waitcnt                      src0
-    s_wakeup
-
-VINTRP
-===========================
-
-.. parsed-literal::
-
-    v_interp_mov_f32               dst, src0, src1
-    v_interp_p1_f32                dst, src0, src1
-    v_interp_p2_f32                dst, src0, src1
-
-VOP1
-===========================
-
-.. parsed-literal::
-
-    v_bfrev_b32                    dst, src0
-    v_bfrev_b32_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_bfrev_b32_sdwa               dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_ceil_f16                     dst, src0
-    v_ceil_f16_dpp                 dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ceil_f16_sdwa                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_ceil_f32                     dst, src0
-    v_ceil_f32_dpp                 dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ceil_f32_sdwa                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_ceil_f64                     dst, src0
-    v_clrexcp
-    v_cos_f16                      dst, src0
-    v_cos_f16_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cos_f16_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cos_f32                      dst, src0
-    v_cos_f32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cos_f32_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f16_f32                  dst, src0
-    v_cvt_f16_f32_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f16_f32_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f16_i16                  dst, src0
-    v_cvt_f16_i16_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f16_i16_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f16_u16                  dst, src0
-    v_cvt_f16_u16_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f16_u16_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f32_f16                  dst, src0
-    v_cvt_f32_f16_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f32_f16_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f32_f64                  dst, src0
-    v_cvt_f32_i32                  dst, src0
-    v_cvt_f32_i32_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f32_i32_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f32_u32                  dst, src0
-    v_cvt_f32_u32_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f32_u32_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f32_ubyte0               dst, src0
-    v_cvt_f32_ubyte0_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f32_ubyte0_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f32_ubyte1               dst, src0
-    v_cvt_f32_ubyte1_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f32_ubyte1_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f32_ubyte2               dst, src0
-    v_cvt_f32_ubyte2_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f32_ubyte2_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f32_ubyte3               dst, src0
-    v_cvt_f32_ubyte3_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_f32_ubyte3_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_f64_f32                  dst, src0
-    v_cvt_f64_i32                  dst, src0
-    v_cvt_f64_u32                  dst, src0
-    v_cvt_flr_i32_f32              dst, src0
-    v_cvt_flr_i32_f32_dpp          dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_flr_i32_f32_sdwa         dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_i16_f16                  dst, src0
-    v_cvt_i16_f16_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_i16_f16_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_i32_f32                  dst, src0
-    v_cvt_i32_f32_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_i32_f32_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_i32_f64                  dst, src0
-    v_cvt_norm_i16_f16             dst, src0
-    v_cvt_norm_i16_f16_dpp         dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_norm_i16_f16_sdwa        dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_norm_u16_f16             dst, src0
-    v_cvt_norm_u16_f16_dpp         dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_norm_u16_f16_sdwa        dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_off_f32_i4               dst, src0
-    v_cvt_off_f32_i4_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_off_f32_i4_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_rpi_i32_f32              dst, src0
-    v_cvt_rpi_i32_f32_dpp          dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_rpi_i32_f32_sdwa         dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_u16_f16                  dst, src0
-    v_cvt_u16_f16_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_u16_f16_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_u32_f32                  dst, src0
-    v_cvt_u32_f32_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cvt_u32_f32_sdwa             dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_cvt_u32_f64                  dst, src0
-    v_exp_f16                      dst, src0
-    v_exp_f16_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_exp_f16_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_exp_f32                      dst, src0
-    v_exp_f32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_exp_f32_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_exp_legacy_f32               dst, src0
-    v_exp_legacy_f32_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_exp_legacy_f32_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_ffbh_i32                     dst, src0
-    v_ffbh_i32_dpp                 dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ffbh_i32_sdwa                dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_ffbh_u32                     dst, src0
-    v_ffbh_u32_dpp                 dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ffbh_u32_sdwa                dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_ffbl_b32                     dst, src0
-    v_ffbl_b32_dpp                 dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ffbl_b32_sdwa                dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_floor_f16                    dst, src0
-    v_floor_f16_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_floor_f16_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_floor_f32                    dst, src0
-    v_floor_f32_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_floor_f32_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_floor_f64                    dst, src0
-    v_fract_f16                    dst, src0
-    v_fract_f16_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_fract_f16_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_fract_f32                    dst, src0
-    v_fract_f32_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_fract_f32_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_fract_f64                    dst, src0
-    v_frexp_exp_i16_f16            dst, src0
-    v_frexp_exp_i16_f16_dpp        dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_frexp_exp_i16_f16_sdwa       dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_frexp_exp_i32_f32            dst, src0
-    v_frexp_exp_i32_f32_dpp        dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_frexp_exp_i32_f32_sdwa       dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_frexp_exp_i32_f64            dst, src0
-    v_frexp_mant_f16               dst, src0
-    v_frexp_mant_f16_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_frexp_mant_f16_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_frexp_mant_f32               dst, src0
-    v_frexp_mant_f32_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_frexp_mant_f32_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_frexp_mant_f64               dst, src0
-    v_log_f16                      dst, src0
-    v_log_f16_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_log_f16_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_log_f32                      dst, src0
-    v_log_f32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_log_f32_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_log_legacy_f32               dst, src0
-    v_log_legacy_f32_dpp           dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_log_legacy_f32_sdwa          dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_mov_b32                      dst, src0
-    v_mov_b32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mov_b32_sdwa                 dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_mov_fed_b32                  dst, src0
-    v_mov_fed_b32_dpp              dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mov_fed_b32_sdwa             dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_nop
-    v_not_b32                      dst, src0
-    v_not_b32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_not_b32_sdwa                 dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_rcp_f16                      dst, src0
-    v_rcp_f16_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_rcp_f16_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_rcp_f32                      dst, src0
-    v_rcp_f32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_rcp_f32_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_rcp_f64                      dst, src0
-    v_rcp_iflag_f32                dst, src0
-    v_rcp_iflag_f32_dpp            dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_rcp_iflag_f32_sdwa           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_readfirstlane_b32            dst, src0
-    v_rndne_f16                    dst, src0
-    v_rndne_f16_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_rndne_f16_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_rndne_f32                    dst, src0
-    v_rndne_f32_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_rndne_f32_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_rndne_f64                    dst, src0
-    v_rsq_f16                      dst, src0
-    v_rsq_f16_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_rsq_f16_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_rsq_f32                      dst, src0
-    v_rsq_f32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_rsq_f32_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_rsq_f64                      dst, src0
-    v_sat_pk_u8_i16                dst, src0
-    v_sat_pk_u8_i16_dpp            dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sat_pk_u8_i16_sdwa           dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_screen_partition_4se_b32     dst, src0
-    v_screen_partition_4se_b32_dpp dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_screen_partition_4se_b32_sdwa dst, src0                      :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_sin_f16                      dst, src0
-    v_sin_f16_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sin_f16_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_sin_f32                      dst, src0
-    v_sin_f32_dpp                  dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sin_f32_sdwa                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_sqrt_f16                     dst, src0
-    v_sqrt_f16_dpp                 dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sqrt_f16_sdwa                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_sqrt_f32                     dst, src0
-    v_sqrt_f32_dpp                 dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sqrt_f32_sdwa                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_sqrt_f64                     dst, src0
-    v_swap_b32                     dst, src0
-    v_trunc_f16                    dst, src0
-    v_trunc_f16_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_trunc_f16_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_trunc_f32                    dst, src0
-    v_trunc_f32_dpp                dst, src0                      :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_trunc_f32_sdwa               dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>`
-    v_trunc_f64                    dst, src0
-
-VOP2
-===========================
-
-.. parsed-literal::
-
-    v_add_co_u32                   dst0, dst1, src0, src1
-    v_add_co_u32_dpp               dst0, dst1, src0, src1         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_add_co_u32_sdwa              dst0, dst1, src0, src1         :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_add_f16                      dst, src0, src1
-    v_add_f16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_add_f16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_add_f32                      dst, src0, src1
-    v_add_f32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_add_f32_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_add_u16                      dst, src0, src1
-    v_add_u16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_add_u16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_add_u32                      dst, src0, src1
-    v_add_u32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_add_u32_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_addc_co_u32                  dst0, dst1, src0, src1, src2
-    v_addc_co_u32_dpp              dst0, dst1, src0, src1, src2   :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_addc_co_u32_sdwa             dst0, dst1, src0, src1, src2   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_and_b32                      dst, src0, src1
-    v_and_b32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_and_b32_sdwa                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_ashrrev_i16                  dst, src0, src1
-    v_ashrrev_i16_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ashrrev_i16_sdwa             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_ashrrev_i32                  dst, src0, src1
-    v_ashrrev_i32_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ashrrev_i32_sdwa             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cndmask_b32                  dst, src0, src1, src2
-    v_cndmask_b32_dpp              dst, src0, src1, src2          :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_cndmask_b32_sdwa             dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_ldexp_f16                    dst, src0, src1
-    v_ldexp_f16_dpp                dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_ldexp_f16_sdwa               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_lshlrev_b16                  dst, src0, src1
-    v_lshlrev_b16_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_lshlrev_b16_sdwa             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_lshlrev_b32                  dst, src0, src1
-    v_lshlrev_b32_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_lshlrev_b32_sdwa             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_lshrrev_b16                  dst, src0, src1
-    v_lshrrev_b16_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_lshrrev_b16_sdwa             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_lshrrev_b32                  dst, src0, src1
-    v_lshrrev_b32_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_lshrrev_b32_sdwa             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mac_f16                      dst, src0, src1
-    v_mac_f16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mac_f32                      dst, src0, src1
-    v_mac_f32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_madak_f16                    dst, src0, src1, src2
-    v_madak_f32                    dst, src0, src1, src2
-    v_madmk_f16                    dst, src0, src1, src2
-    v_madmk_f32                    dst, src0, src1, src2
-    v_max_f16                      dst, src0, src1
-    v_max_f16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_max_f16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_max_f32                      dst, src0, src1
-    v_max_f32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_max_f32_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_max_i16                      dst, src0, src1
-    v_max_i16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_max_i16_sdwa                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_max_i32                      dst, src0, src1
-    v_max_i32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_max_i32_sdwa                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_max_u16                      dst, src0, src1
-    v_max_u16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_max_u16_sdwa                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_max_u32                      dst, src0, src1
-    v_max_u32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_max_u32_sdwa                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_min_f16                      dst, src0, src1
-    v_min_f16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_min_f16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_min_f32                      dst, src0, src1
-    v_min_f32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_min_f32_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_min_i16                      dst, src0, src1
-    v_min_i16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_min_i16_sdwa                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_min_i32                      dst, src0, src1
-    v_min_i32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_min_i32_sdwa                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_min_u16                      dst, src0, src1
-    v_min_u16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_min_u16_sdwa                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_min_u32                      dst, src0, src1
-    v_min_u32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_min_u32_sdwa                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_f16                      dst, src0, src1
-    v_mul_f16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_f16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_f32                      dst, src0, src1
-    v_mul_f32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_f32_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_hi_i32_i24               dst, src0, src1
-    v_mul_hi_i32_i24_dpp           dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_hi_i32_i24_sdwa          dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_hi_u32_u24               dst, src0, src1
-    v_mul_hi_u32_u24_dpp           dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_hi_u32_u24_sdwa          dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_i32_i24                  dst, src0, src1
-    v_mul_i32_i24_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_i32_i24_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_legacy_f32               dst, src0, src1
-    v_mul_legacy_f32_dpp           dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_legacy_f32_sdwa          dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_lo_u16                   dst, src0, src1
-    v_mul_lo_u16_dpp               dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_lo_u16_sdwa              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_mul_u32_u24                  dst, src0, src1
-    v_mul_u32_u24_dpp              dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_mul_u32_u24_sdwa             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_or_b32                       dst, src0, src1
-    v_or_b32_dpp                   dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_or_b32_sdwa                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_sub_co_u32                   dst0, dst1, src0, src1
-    v_sub_co_u32_dpp               dst0, dst1, src0, src1         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sub_co_u32_sdwa              dst0, dst1, src0, src1         :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_sub_f16                      dst, src0, src1
-    v_sub_f16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sub_f16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_sub_f32                      dst, src0, src1
-    v_sub_f32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sub_f32_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_sub_u16                      dst, src0, src1
-    v_sub_u16_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sub_u16_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_sub_u32                      dst, src0, src1
-    v_sub_u32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_sub_u32_sdwa                 dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_subb_co_u32                  dst0, dst1, src0, src1, src2
-    v_subb_co_u32_dpp              dst0, dst1, src0, src1, src2   :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_subb_co_u32_sdwa             dst0, dst1, src0, src1, src2   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_subbrev_co_u32               dst0, dst1, src0, src1, src2
-    v_subbrev_co_u32_dpp           dst0, dst1, src0, src1, src2   :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_subbrev_co_u32_sdwa          dst0, dst1, src0, src1, src2   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_subrev_co_u32                dst0, dst1, src0, src1
-    v_subrev_co_u32_dpp            dst0, dst1, src0, src1         :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_subrev_co_u32_sdwa           dst0, dst1, src0, src1         :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_subrev_f16                   dst, src0, src1
-    v_subrev_f16_dpp               dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_subrev_f16_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_subrev_f32                   dst, src0, src1
-    v_subrev_f32_dpp               dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_subrev_f32_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_subrev_u16                   dst, src0, src1
-    v_subrev_u16_dpp               dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_subrev_u16_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_subrev_u32                   dst, src0, src1
-    v_subrev_u32_dpp               dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_subrev_u32_sdwa              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_xor_b32                      dst, src0, src1
-    v_xor_b32_dpp                  dst, src0, src1                :ref:`dpp_ctrl<amdgpu_synid_dpp_ctrl>` :ref:`row_mask<amdgpu_synid_row_mask>` :ref:`bank_mask<amdgpu_synid_bank_mask>` :ref:`bound_ctrl<amdgpu_synid_bound_ctrl>`
-    v_xor_b32_sdwa                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>` :ref:`dst_sel<amdgpu_synid_dst_sel>` :ref:`dst_unused<amdgpu_synid_dst_unused>` :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-
-VOP3
-===========================
-
-.. parsed-literal::
-
-    v_add3_u32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_add_co_u32_e64               dst0, dst1, src0, src1         :ref:`omod<amdgpu_synid_omod>`
-    v_add_f16_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_add_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_add_f64                      dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_add_i16                      dst, src0, src1                :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_add_i32                      dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_add_lshl_u32                 dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_add_u16_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_add_u32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_addc_co_u32_e64              dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_alignbit_b32                 dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`omod<amdgpu_synid_omod>`
-    v_alignbyte_b32                dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`omod<amdgpu_synid_omod>`
-    v_and_b32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_and_or_b32                   dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_ashrrev_i16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_ashrrev_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_ashrrev_i64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_bcnt_u32_b32                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_bfe_i32                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_bfe_u32                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_bfi_b32                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_bfm_b32                      dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_bfrev_b32_e64                dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_ceil_f16_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ceil_f32_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ceil_f64_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_clrexcp_e64                                                 :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_class_f16_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_class_f32_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_class_f64_e64            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_eq_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_f16_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_f32_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_f64_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_i16_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_i32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_i64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_u16_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_u32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_f_u64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ge_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_gt_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_le_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lg_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lg_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lg_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_lt_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ne_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_neq_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_neq_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_neq_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nge_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nge_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nge_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ngt_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ngt_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_ngt_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nle_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nle_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nle_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlg_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlg_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlg_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlt_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlt_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_nlt_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_o_f16_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_o_f32_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_o_f64_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_i16_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_i32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_i64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_u16_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_u32_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_t_u64_e64                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_tru_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_tru_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_tru_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_u_f16_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_u_f32_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmp_u_f64_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_class_f16_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_class_f32_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_class_f64_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_i16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_u16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_eq_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_f_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_i16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_u16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ge_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_i16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_u16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_gt_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_i16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_u16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_le_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lg_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lg_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lg_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_f16_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_f32_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_f64_e64              dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_i16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_u16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_lt_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_i16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_i32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_i64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_u16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_u32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ne_u64_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_neq_f16_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_neq_f32_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_neq_f64_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nge_f16_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nge_f32_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nge_f64_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ngt_f16_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ngt_f32_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_ngt_f64_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nle_f16_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nle_f32_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nle_f64_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlg_f16_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlg_f32_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlg_f64_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlt_f16_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlt_f32_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_nlt_f64_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_o_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_o_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_o_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_i16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_i32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_i64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_t_u64_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_tru_f16_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_tru_f32_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_tru_f64_e64             dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_u_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_u_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cmpx_u_f64_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cndmask_b32_e64              dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_cos_f16_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cos_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cubeid_f32                   dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cubema_f32                   dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cubesc_f32                   dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cubetc_f32                   dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f16_f32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f16_i16_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f16_u16_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_f16_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_f64_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_i32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_u32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_ubyte0_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_ubyte1_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_ubyte2_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f32_ubyte3_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f64_f32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f64_i32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_f64_u32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_flr_i32_f32_e64          dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_i16_f16_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_i32_f32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_i32_f64_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_norm_i16_f16_e64         dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_norm_u16_f16_e64         dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_off_f32_i4_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pk_i16_i32               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pk_u16_u32               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pk_u8_f32                dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pkaccum_u8_f32           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pknorm_i16_f16           dst, src0, src1                :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pknorm_i16_f32           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pknorm_u16_f16           dst, src0, src1                :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pknorm_u16_f32           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_pkrtz_f16_f32            dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_rpi_i32_f32_e64          dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_u16_f16_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_u32_f32_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_cvt_u32_f64_e64              dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_fixup_f16                dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_fixup_f32                dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_fixup_f64                dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_fixup_legacy_f16         dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_fmas_f32                 dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_fmas_f64                 dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_div_scale_f32                dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_div_scale_f64                dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_exp_f16_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_exp_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_exp_legacy_f32_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ffbh_i32_e64                 dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_ffbh_u32_e64                 dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_ffbl_b32_e64                 dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_floor_f16_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_floor_f32_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_floor_f64_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fma_f16                      dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fma_f32                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fma_f64                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fma_legacy_f16               dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fract_f16_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fract_f32_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_fract_f64_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_exp_i16_f16_e64        dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_exp_i32_f32_e64        dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_exp_i32_f64_e64        dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_mant_f16_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_mant_f32_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_frexp_mant_f64_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_interp_mov_f32_e64           dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_interp_p1_f32_e64            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_interp_p1ll_f16              dst, src0, src1                :ref:`high<amdgpu_synid_high>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_interp_p1lv_f16              dst, src0, src1, src2          :ref:`high<amdgpu_synid_high>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_interp_p2_f16                dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`high<amdgpu_synid_high>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_interp_p2_f32_e64            dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_interp_p2_legacy_f16         dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`high<amdgpu_synid_high>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ldexp_f16_e64                dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ldexp_f32                    dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_ldexp_f64                    dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_lerp_u8                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_log_f16_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_log_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_log_legacy_f32_e64           dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_lshl_add_u32                 dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_lshl_or_b32                  dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_lshlrev_b16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshlrev_b32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshlrev_b64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshrrev_b16_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshrrev_b32_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_lshrrev_b64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mac_f16_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mac_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_f16                      dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_f32                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_i16                      dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_i32_i16                  dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_i32_i24                  dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_i64_i32                  dst0, dst1, src0, src1, src2   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_legacy_f16               dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_legacy_f32               dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_legacy_i16               dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_legacy_u16               dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_u16                      dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_u32_u16                  dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_u32_u24                  dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mad_u64_u32                  dst0, dst1, src0, src1, src2   :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max3_f16                     dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max3_f32                     dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max3_i16                     dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`omod<amdgpu_synid_omod>`
-    v_max3_i32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_max3_u16                     dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`omod<amdgpu_synid_omod>`
-    v_max3_u32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_max_f16_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max_f64                      dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_max_i16_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_max_i32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_max_u16_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_max_u32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mbcnt_hi_u32_b32             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mbcnt_lo_u32_b32             dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_med3_f16                     dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_med3_f32                     dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_med3_i16                     dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`omod<amdgpu_synid_omod>`
-    v_med3_i32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_med3_u16                     dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`omod<amdgpu_synid_omod>`
-    v_med3_u32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_min3_f16                     dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_min3_f32                     dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_min3_i16                     dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`omod<amdgpu_synid_omod>`
-    v_min3_i32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_min3_u16                     dst, src0, src1, src2          :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`omod<amdgpu_synid_omod>`
-    v_min3_u32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_min_f16_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_min_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_min_f64                      dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_min_i16_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_min_i32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_min_u16_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_min_u32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mov_b32_e64                  dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_mov_fed_b32_e64              dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_mqsad_pk_u16_u8              dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mqsad_u32_u8                 dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_msad_u8                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mul_f16_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mul_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mul_f64                      dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mul_hi_i32                   dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_hi_i32_i24_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_hi_u32                   dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_hi_u32_u24_e64           dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_i32_i24_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_legacy_f32_e64           dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_mul_lo_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_lo_u32                   dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_mul_u32_u24_e64              dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_nop_e64                                                     :ref:`omod<amdgpu_synid_omod>`
-    v_not_b32_e64                  dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_or3_b32                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_or_b32_e64                   dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_pack_b32_f16                 dst, src0, src1                :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`omod<amdgpu_synid_omod>`
-    v_perm_b32                     dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_qsad_pk_u16_u8               dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rcp_f16_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rcp_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rcp_f64_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rcp_iflag_f32_e64            dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_readlane_b32                 dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_rndne_f16_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rndne_f32_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rndne_f64_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rsq_f16_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rsq_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_rsq_f64_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sad_hi_u8                    dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sad_u16                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sad_u32                      dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sad_u8                       dst, src0, src1, src2          :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sat_pk_u8_i16_e64            dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_screen_partition_4se_b32_e64 dst, src0                      :ref:`omod<amdgpu_synid_omod>`
-    v_sin_f16_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sin_f32_e64                  dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sqrt_f16_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sqrt_f32_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sqrt_f64_e64                 dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sub_co_u32_e64               dst0, dst1, src0, src1         :ref:`omod<amdgpu_synid_omod>`
-    v_sub_f16_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sub_f32_e64                  dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sub_i16                      dst, src0, src1                :ref:`vop3_op_sel<amdgpu_synid_vop3_op_sel>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_sub_i32                      dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_sub_u16_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_sub_u32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_subb_co_u32_e64              dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_subbrev_co_u32_e64           dst0, dst1, src0, src1, src2   :ref:`omod<amdgpu_synid_omod>`
-    v_subrev_co_u32_e64            dst0, dst1, src0, src1         :ref:`omod<amdgpu_synid_omod>`
-    v_subrev_f16_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_subrev_f32_e64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_subrev_u16_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_subrev_u32_e64               dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_trig_preop_f64               dst, src0, src1                :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_trunc_f16_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_trunc_f32_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_trunc_f64_e64                dst, src0                      :ref:`clamp<amdgpu_synid_clamp>` :ref:`omod<amdgpu_synid_omod>`
-    v_writelane_b32                dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-    v_xad_u32                      dst, src0, src1, src2          :ref:`omod<amdgpu_synid_omod>`
-    v_xor_b32_e64                  dst, src0, src1                :ref:`omod<amdgpu_synid_omod>`
-
-VOP3P
-===========================
-
-.. parsed-literal::
-
-    v_mad_mix_f32                  dst, src0, src1, src2          :ref:`mad_mix_op_sel<amdgpu_synid_mad_mix_op_sel>` :ref:`mad_mix_op_sel_hi<amdgpu_synid_mad_mix_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
-    v_mad_mixhi_f16                dst, src0, src1, src2          :ref:`mad_mix_op_sel<amdgpu_synid_mad_mix_op_sel>` :ref:`mad_mix_op_sel_hi<amdgpu_synid_mad_mix_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
-    v_mad_mixlo_f16                dst, src0, src1, src2          :ref:`mad_mix_op_sel<amdgpu_synid_mad_mix_op_sel>` :ref:`mad_mix_op_sel_hi<amdgpu_synid_mad_mix_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
-    v_pk_add_f16                   dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`neg_lo<amdgpu_synid_neg_lo>` :ref:`neg_hi<amdgpu_synid_neg_hi>` :ref:`clamp<amdgpu_synid_clamp>`
-    v_pk_add_i16                   dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
-    v_pk_add_u16                   dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
-    v_pk_ashrrev_i16               dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
-    v_pk_fma_f16                   dst, src0, src1, src2          :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`neg_lo<amdgpu_synid_neg_lo>` :ref:`neg_hi<amdgpu_synid_neg_hi>` :ref:`clamp<amdgpu_synid_clamp>`
-    v_pk_lshlrev_b16               dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
-    v_pk_lshrrev_b16               dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
-    v_pk_mad_i16                   dst, src0, src1, src2          :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
-    v_pk_mad_u16                   dst, src0, src1, src2          :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
-    v_pk_max_f16                   dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`neg_lo<amdgpu_synid_neg_lo>` :ref:`neg_hi<amdgpu_synid_neg_hi>` :ref:`clamp<amdgpu_synid_clamp>`
-    v_pk_max_i16                   dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
-    v_pk_max_u16                   dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
-    v_pk_min_f16                   dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`neg_lo<amdgpu_synid_neg_lo>` :ref:`neg_hi<amdgpu_synid_neg_hi>` :ref:`clamp<amdgpu_synid_clamp>`
-    v_pk_min_i16                   dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
-    v_pk_min_u16                   dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
-    v_pk_mul_f16                   dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`neg_lo<amdgpu_synid_neg_lo>` :ref:`neg_hi<amdgpu_synid_neg_hi>` :ref:`clamp<amdgpu_synid_clamp>`
-    v_pk_mul_lo_u16                dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`
-    v_pk_sub_i16                   dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
-    v_pk_sub_u16                   dst, src0, src1                :ref:`op_sel<amdgpu_synid_op_sel>` :ref:`op_sel_hi<amdgpu_synid_op_sel_hi>` :ref:`clamp<amdgpu_synid_clamp>`
-
-VOPC
-===========================
-
-.. parsed-literal::
-
-    v_cmp_class_f16                dst, src0, src1
-    v_cmp_class_f16_sdwa           dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_class_f32                dst, src0, src1
-    v_cmp_class_f32_sdwa           dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_class_f64                dst, src0, src1
-    v_cmp_eq_f16                   dst, src0, src1
-    v_cmp_eq_f16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_eq_f32                   dst, src0, src1
-    v_cmp_eq_f32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_eq_f64                   dst, src0, src1
-    v_cmp_eq_i16                   dst, src0, src1
-    v_cmp_eq_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_eq_i32                   dst, src0, src1
-    v_cmp_eq_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_eq_i64                   dst, src0, src1
-    v_cmp_eq_u16                   dst, src0, src1
-    v_cmp_eq_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_eq_u32                   dst, src0, src1
-    v_cmp_eq_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_eq_u64                   dst, src0, src1
-    v_cmp_f_f16                    dst, src0, src1
-    v_cmp_f_f16_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_f_f32                    dst, src0, src1
-    v_cmp_f_f32_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_f_f64                    dst, src0, src1
-    v_cmp_f_i16                    dst, src0, src1
-    v_cmp_f_i16_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_f_i32                    dst, src0, src1
-    v_cmp_f_i32_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_f_i64                    dst, src0, src1
-    v_cmp_f_u16                    dst, src0, src1
-    v_cmp_f_u16_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_f_u32                    dst, src0, src1
-    v_cmp_f_u32_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_f_u64                    dst, src0, src1
-    v_cmp_ge_f16                   dst, src0, src1
-    v_cmp_ge_f16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ge_f32                   dst, src0, src1
-    v_cmp_ge_f32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ge_f64                   dst, src0, src1
-    v_cmp_ge_i16                   dst, src0, src1
-    v_cmp_ge_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ge_i32                   dst, src0, src1
-    v_cmp_ge_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ge_i64                   dst, src0, src1
-    v_cmp_ge_u16                   dst, src0, src1
-    v_cmp_ge_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ge_u32                   dst, src0, src1
-    v_cmp_ge_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ge_u64                   dst, src0, src1
-    v_cmp_gt_f16                   dst, src0, src1
-    v_cmp_gt_f16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_gt_f32                   dst, src0, src1
-    v_cmp_gt_f32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_gt_f64                   dst, src0, src1
-    v_cmp_gt_i16                   dst, src0, src1
-    v_cmp_gt_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_gt_i32                   dst, src0, src1
-    v_cmp_gt_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_gt_i64                   dst, src0, src1
-    v_cmp_gt_u16                   dst, src0, src1
-    v_cmp_gt_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_gt_u32                   dst, src0, src1
-    v_cmp_gt_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_gt_u64                   dst, src0, src1
-    v_cmp_le_f16                   dst, src0, src1
-    v_cmp_le_f16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_le_f32                   dst, src0, src1
-    v_cmp_le_f32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_le_f64                   dst, src0, src1
-    v_cmp_le_i16                   dst, src0, src1
-    v_cmp_le_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_le_i32                   dst, src0, src1
-    v_cmp_le_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_le_i64                   dst, src0, src1
-    v_cmp_le_u16                   dst, src0, src1
-    v_cmp_le_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_le_u32                   dst, src0, src1
-    v_cmp_le_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_le_u64                   dst, src0, src1
-    v_cmp_lg_f16                   dst, src0, src1
-    v_cmp_lg_f16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lg_f32                   dst, src0, src1
-    v_cmp_lg_f32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lg_f64                   dst, src0, src1
-    v_cmp_lt_f16                   dst, src0, src1
-    v_cmp_lt_f16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lt_f32                   dst, src0, src1
-    v_cmp_lt_f32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lt_f64                   dst, src0, src1
-    v_cmp_lt_i16                   dst, src0, src1
-    v_cmp_lt_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lt_i32                   dst, src0, src1
-    v_cmp_lt_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lt_i64                   dst, src0, src1
-    v_cmp_lt_u16                   dst, src0, src1
-    v_cmp_lt_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lt_u32                   dst, src0, src1
-    v_cmp_lt_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_lt_u64                   dst, src0, src1
-    v_cmp_ne_i16                   dst, src0, src1
-    v_cmp_ne_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ne_i32                   dst, src0, src1
-    v_cmp_ne_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ne_i64                   dst, src0, src1
-    v_cmp_ne_u16                   dst, src0, src1
-    v_cmp_ne_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ne_u32                   dst, src0, src1
-    v_cmp_ne_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ne_u64                   dst, src0, src1
-    v_cmp_neq_f16                  dst, src0, src1
-    v_cmp_neq_f16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_neq_f32                  dst, src0, src1
-    v_cmp_neq_f32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_neq_f64                  dst, src0, src1
-    v_cmp_nge_f16                  dst, src0, src1
-    v_cmp_nge_f16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nge_f32                  dst, src0, src1
-    v_cmp_nge_f32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nge_f64                  dst, src0, src1
-    v_cmp_ngt_f16                  dst, src0, src1
-    v_cmp_ngt_f16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ngt_f32                  dst, src0, src1
-    v_cmp_ngt_f32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_ngt_f64                  dst, src0, src1
-    v_cmp_nle_f16                  dst, src0, src1
-    v_cmp_nle_f16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nle_f32                  dst, src0, src1
-    v_cmp_nle_f32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nle_f64                  dst, src0, src1
-    v_cmp_nlg_f16                  dst, src0, src1
-    v_cmp_nlg_f16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nlg_f32                  dst, src0, src1
-    v_cmp_nlg_f32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nlg_f64                  dst, src0, src1
-    v_cmp_nlt_f16                  dst, src0, src1
-    v_cmp_nlt_f16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nlt_f32                  dst, src0, src1
-    v_cmp_nlt_f32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_nlt_f64                  dst, src0, src1
-    v_cmp_o_f16                    dst, src0, src1
-    v_cmp_o_f16_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_o_f32                    dst, src0, src1
-    v_cmp_o_f32_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_o_f64                    dst, src0, src1
-    v_cmp_t_i16                    dst, src0, src1
-    v_cmp_t_i16_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_t_i32                    dst, src0, src1
-    v_cmp_t_i32_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_t_i64                    dst, src0, src1
-    v_cmp_t_u16                    dst, src0, src1
-    v_cmp_t_u16_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_t_u32                    dst, src0, src1
-    v_cmp_t_u32_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_t_u64                    dst, src0, src1
-    v_cmp_tru_f16                  dst, src0, src1
-    v_cmp_tru_f16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_tru_f32                  dst, src0, src1
-    v_cmp_tru_f32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_tru_f64                  dst, src0, src1
-    v_cmp_u_f16                    dst, src0, src1
-    v_cmp_u_f16_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_u_f32                    dst, src0, src1
-    v_cmp_u_f32_sdwa               dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmp_u_f64                    dst, src0, src1
-    v_cmpx_class_f16               dst, src0, src1
-    v_cmpx_class_f16_sdwa          dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_class_f32               dst, src0, src1
-    v_cmpx_class_f32_sdwa          dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_class_f64               dst, src0, src1
-    v_cmpx_eq_f16                  dst, src0, src1
-    v_cmpx_eq_f16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_eq_f32                  dst, src0, src1
-    v_cmpx_eq_f32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_eq_f64                  dst, src0, src1
-    v_cmpx_eq_i16                  dst, src0, src1
-    v_cmpx_eq_i16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_eq_i32                  dst, src0, src1
-    v_cmpx_eq_i32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_eq_i64                  dst, src0, src1
-    v_cmpx_eq_u16                  dst, src0, src1
-    v_cmpx_eq_u16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_eq_u32                  dst, src0, src1
-    v_cmpx_eq_u32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_eq_u64                  dst, src0, src1
-    v_cmpx_f_f16                   dst, src0, src1
-    v_cmpx_f_f16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_f_f32                   dst, src0, src1
-    v_cmpx_f_f32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_f_f64                   dst, src0, src1
-    v_cmpx_f_i16                   dst, src0, src1
-    v_cmpx_f_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_f_i32                   dst, src0, src1
-    v_cmpx_f_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_f_i64                   dst, src0, src1
-    v_cmpx_f_u16                   dst, src0, src1
-    v_cmpx_f_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_f_u32                   dst, src0, src1
-    v_cmpx_f_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_f_u64                   dst, src0, src1
-    v_cmpx_ge_f16                  dst, src0, src1
-    v_cmpx_ge_f16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ge_f32                  dst, src0, src1
-    v_cmpx_ge_f32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ge_f64                  dst, src0, src1
-    v_cmpx_ge_i16                  dst, src0, src1
-    v_cmpx_ge_i16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ge_i32                  dst, src0, src1
-    v_cmpx_ge_i32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ge_i64                  dst, src0, src1
-    v_cmpx_ge_u16                  dst, src0, src1
-    v_cmpx_ge_u16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ge_u32                  dst, src0, src1
-    v_cmpx_ge_u32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ge_u64                  dst, src0, src1
-    v_cmpx_gt_f16                  dst, src0, src1
-    v_cmpx_gt_f16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_gt_f32                  dst, src0, src1
-    v_cmpx_gt_f32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_gt_f64                  dst, src0, src1
-    v_cmpx_gt_i16                  dst, src0, src1
-    v_cmpx_gt_i16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_gt_i32                  dst, src0, src1
-    v_cmpx_gt_i32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_gt_i64                  dst, src0, src1
-    v_cmpx_gt_u16                  dst, src0, src1
-    v_cmpx_gt_u16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_gt_u32                  dst, src0, src1
-    v_cmpx_gt_u32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_gt_u64                  dst, src0, src1
-    v_cmpx_le_f16                  dst, src0, src1
-    v_cmpx_le_f16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_le_f32                  dst, src0, src1
-    v_cmpx_le_f32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_le_f64                  dst, src0, src1
-    v_cmpx_le_i16                  dst, src0, src1
-    v_cmpx_le_i16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_le_i32                  dst, src0, src1
-    v_cmpx_le_i32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_le_i64                  dst, src0, src1
-    v_cmpx_le_u16                  dst, src0, src1
-    v_cmpx_le_u16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_le_u32                  dst, src0, src1
-    v_cmpx_le_u32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_le_u64                  dst, src0, src1
-    v_cmpx_lg_f16                  dst, src0, src1
-    v_cmpx_lg_f16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lg_f32                  dst, src0, src1
-    v_cmpx_lg_f32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lg_f64                  dst, src0, src1
-    v_cmpx_lt_f16                  dst, src0, src1
-    v_cmpx_lt_f16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lt_f32                  dst, src0, src1
-    v_cmpx_lt_f32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lt_f64                  dst, src0, src1
-    v_cmpx_lt_i16                  dst, src0, src1
-    v_cmpx_lt_i16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lt_i32                  dst, src0, src1
-    v_cmpx_lt_i32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lt_i64                  dst, src0, src1
-    v_cmpx_lt_u16                  dst, src0, src1
-    v_cmpx_lt_u16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lt_u32                  dst, src0, src1
-    v_cmpx_lt_u32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_lt_u64                  dst, src0, src1
-    v_cmpx_ne_i16                  dst, src0, src1
-    v_cmpx_ne_i16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ne_i32                  dst, src0, src1
-    v_cmpx_ne_i32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ne_i64                  dst, src0, src1
-    v_cmpx_ne_u16                  dst, src0, src1
-    v_cmpx_ne_u16_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ne_u32                  dst, src0, src1
-    v_cmpx_ne_u32_sdwa             dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ne_u64                  dst, src0, src1
-    v_cmpx_neq_f16                 dst, src0, src1
-    v_cmpx_neq_f16_sdwa            dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_neq_f32                 dst, src0, src1
-    v_cmpx_neq_f32_sdwa            dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_neq_f64                 dst, src0, src1
-    v_cmpx_nge_f16                 dst, src0, src1
-    v_cmpx_nge_f16_sdwa            dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nge_f32                 dst, src0, src1
-    v_cmpx_nge_f32_sdwa            dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nge_f64                 dst, src0, src1
-    v_cmpx_ngt_f16                 dst, src0, src1
-    v_cmpx_ngt_f16_sdwa            dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ngt_f32                 dst, src0, src1
-    v_cmpx_ngt_f32_sdwa            dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_ngt_f64                 dst, src0, src1
-    v_cmpx_nle_f16                 dst, src0, src1
-    v_cmpx_nle_f16_sdwa            dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nle_f32                 dst, src0, src1
-    v_cmpx_nle_f32_sdwa            dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nle_f64                 dst, src0, src1
-    v_cmpx_nlg_f16                 dst, src0, src1
-    v_cmpx_nlg_f16_sdwa            dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nlg_f32                 dst, src0, src1
-    v_cmpx_nlg_f32_sdwa            dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nlg_f64                 dst, src0, src1
-    v_cmpx_nlt_f16                 dst, src0, src1
-    v_cmpx_nlt_f16_sdwa            dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nlt_f32                 dst, src0, src1
-    v_cmpx_nlt_f32_sdwa            dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_nlt_f64                 dst, src0, src1
-    v_cmpx_o_f16                   dst, src0, src1
-    v_cmpx_o_f16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_o_f32                   dst, src0, src1
-    v_cmpx_o_f32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_o_f64                   dst, src0, src1
-    v_cmpx_t_i16                   dst, src0, src1
-    v_cmpx_t_i16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_t_i32                   dst, src0, src1
-    v_cmpx_t_i32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_t_i64                   dst, src0, src1
-    v_cmpx_t_u16                   dst, src0, src1
-    v_cmpx_t_u16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_t_u32                   dst, src0, src1
-    v_cmpx_t_u32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_t_u64                   dst, src0, src1
-    v_cmpx_tru_f16                 dst, src0, src1
-    v_cmpx_tru_f16_sdwa            dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_tru_f32                 dst, src0, src1
-    v_cmpx_tru_f32_sdwa            dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_tru_f64                 dst, src0, src1
-    v_cmpx_u_f16                   dst, src0, src1
-    v_cmpx_u_f16_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_u_f32                   dst, src0, src1
-    v_cmpx_u_f32_sdwa              dst, src0, src1                :ref:`src0_sel<amdgpu_synid_src0_sel>` :ref:`src1_sel<amdgpu_synid_src1_sel>`
-    v_cmpx_u_f64                   dst, src0, src1
diff --git a/docs/AMDGPUInstructionNotation.rst b/docs/AMDGPUInstructionNotation.rst
new file mode 100644
index 0000000..7f23cf4
--- /dev/null
+++ b/docs/AMDGPUInstructionNotation.rst
@@ -0,0 +1,110 @@
+============================
+AMDGPU Instructions Notation
+============================
+
+.. contents::
+   :local:
+
+.. _amdgpu_syn_instruction_notation:
+
+Introduction
+============
+
+This is an overview of notation used to describe the syntax of AMDGPU assembler instructions.
+
+This notation mimics the :ref:`syntax of assembler instructions<amdgpu_syn_instructions>`
+except that instead of real operands and modifiers it provides references to their description.
+
+Instructions
+============
+
+Notation
+~~~~~~~~
+
+This is the notation used to describe AMDGPU instructions:
+
+    ``<``\ :ref:`opcode description<amdgpu_syn_opcode_notation>`\ ``>  <``\ :ref:`operands description<amdgpu_syn_instruction_operands_notation>`\ ``>  <``\ :ref:`modifiers description<amdgpu_syn_instruction_modifiers_notation>`\ ``>``
+
+.. _amdgpu_syn_opcode_notation:
+
+Opcode
+======
+
+Notation
+~~~~~~~~
+
+TBD
+
+.. _amdgpu_syn_instruction_operands_notation:
+
+Operands
+========
+
+An instruction may have zero or more *operands*. They are comma-separated in the description:
+
+    ``<``\ :ref:`description of operand 0<amdgpu_syn_instruction_operand_notation>`\ ``>, <``\ :ref:`description of operand 1<amdgpu_syn_instruction_operand_notation>`\ ``>, ...``
+
+The order of *operands* is fixed. *Operands* cannot be omitted
+except for special cases described below.
+
+.. _amdgpu_syn_instruction_operand_notation:
+
+Notation
+~~~~~~~~
+
+An operand is described using the following notation:
+
+    *<name><tag0><tag1>...*
+
+Where:
+
+* *name* is a link to a description of the operand.
+* *tags* are optional. They are used to indicate special operand properties:
+
+.. _amdgpu_syn_instruction_operand_tags:
+
+    ============== =================================================================================
+    Operand tag    Meaning
+    ============== =================================================================================
+    :opt           An optional operand.
+    :m             An operand which may be used with
+                   :ref:`VOP3 operand modifiers<amdgpu_synid_vop3_operand_modifiers>` or
+                   :ref:`SDWA operand modifiers<amdgpu_synid_sdwa_operand_modifiers>`.
+    :dst           An input operand which may also serve as a destination
+                   if :ref:`glc<amdgpu_synid_glc>` modifier is specified.
+    :fx            This is an *f32* or *f16* operand depending on
+                   :ref:`m_op_sel_hi<amdgpu_synid_mad_mix_op_sel_hi>` modifier.
+    :<type>        Operand *type* differs from *type*
+                   :ref:`implied by the opcode name<amdgpu_syn_instruction_type>`.
+                   This tag specifies actual operand *type*.
+    ============== =================================================================================
+
+Examples:
+
+.. parsed-literal::
+
+    src1:m             // src1 operand may be used with operand modifiers
+    vdata:dst          // vdata operand may be used as both source and destination
+    vdst:u32           // vdst operand has u32 type
+
+.. _amdgpu_syn_instruction_modifiers_notation:
+
+Modifiers
+=========
+
+An instruction may have zero or more optional *modifiers*. They are space-separated in the description:
+
+    ``<``\ :ref:`description of modifier 0<amdgpu_syn_instruction_modifier_notation>`\ ``> <``\ :ref:`description of modifier 1<amdgpu_syn_instruction_modifier_notation>`\ ``> ...``
+
+The order of *modifiers* is fixed.
+
+.. _amdgpu_syn_instruction_modifier_notation:
+
+Notation
+~~~~~~~~
+
+A *modifier* is described using the following notation:
+
+    *<name>*
+
+Where *name* is a link to a description of the *modifier*.
diff --git a/docs/AMDGPUInstructionSyntax.rst b/docs/AMDGPUInstructionSyntax.rst
new file mode 100644
index 0000000..90ad54a
--- /dev/null
+++ b/docs/AMDGPUInstructionSyntax.rst
@@ -0,0 +1,170 @@
+=========================
+AMDGPU Instruction Syntax
+=========================
+
+.. contents::
+   :local:
+
+.. _amdgpu_syn_instructions:
+
+Instructions
+============
+
+Syntax
+~~~~~~
+
+An instruction has the following syntax:
+
+    ``<``\ *opcode mnemonic*\ ``>    <``\ *operand0*\ ``>, <``\ *operand1*\ ``>,...    <``\ *modifier0*\ ``> <``\ *modifier1*\ ``>...``
+
+:doc:`Operands<AMDGPUOperandSyntax>` are normally comma-separated while
+:doc:`modifiers<AMDGPUModifierSyntax>` are space-separated.
+
+The order of *operands* and *modifiers* is fixed.
+Most *modifiers* are optional and may be omitted.
+
+.. _amdgpu_syn_instruction_mnemo:
+
+Opcode Mnemonic
+~~~~~~~~~~~~~~~
+
+Opcode mnemonic describes opcode semantics and may include one or more suffices in this order:
+
+* :ref:`Destination operand type suffix<amdgpu_syn_instruction_type>`.
+* :ref:`Source operand type suffix<amdgpu_syn_instruction_type>`.
+* :ref:`Encoding suffix<amdgpu_syn_instruction_enc>`.
+
+.. _amdgpu_syn_instruction_type:
+
+Type and Size Suffices
+~~~~~~~~~~~~~~~~~~~~~~
+
+Instructions which operate with data have an implied type of *data* operands.
+This data type is specified as a suffix of instruction mnemonic.
+
+There are instructions which have 2 type suffices:
+the first is the data type of the destination operand,
+the second is the data type of source *data* operand(s).
+
+Note that data type specified by an instruction does not apply
+to other kinds of operands such as *addresses*, *offsets* and so on.
+
+The following table enumerates the most frequently used type suffices.
+
+    ============================================ ======================= =================
+    Type Suffices                                Packed instruction?     Data Type
+    ============================================ ======================= =================
+    _b512, _b256, _b128, _b64, _b32, _b16, _b8   No                      Bits.
+    _u64, _u32, _u16, _u8                        No                      Unsigned integer.
+    _i64, _i32, _i16, _i8                        No                      Signed integer.
+    _f64, _f32, _f16                             No                      Floating-point.
+    _b16, _u16, _i16, _f16                       Yes                     Packed.
+    ============================================ ======================= =================
+
+Instructions which have no type suffices are assumed to operate with typeless data.
+The size of data is specified by size suffices:
+
+    ================= =================== =====================================
+    Size Suffix       Implied data type   Required register size in dwords
+    ================= =================== =====================================
+    \-                b32                 1
+    x2                b64                 2
+    x3                b96                 3
+    x4                b128                4
+    x8                b256                8
+    x16               b512                16
+    x                 b32                 1
+    xy                b64                 2
+    xyz               b96                 3
+    xyzw              b128                4
+    d16_x             b16                 1
+    d16_xy            b16x2               2 for GFX8.0, 1 for GFX8.1 and GFX9
+    d16_xyz           b16x3               3 for GFX8.0, 2 for GFX8.1 and GFX9
+    d16_xyzw          b16x4               4 for GFX8.0, 2 for GFX8.1 and GFX9
+    ================= =================== =====================================
+
+.. WARNING::
+    There are exceptions from rules described above.
+    Operands which have type different from type specified by the opcode are
+    :ref:`tagged<amdgpu_syn_instruction_operand_tags>` in the description.
+
+Examples of instructions with different types of source and destination operands:
+
+.. parsed-literal::
+
+    s_bcnt0_i32_b64
+    v_cvt_f32_u32
+
+Examples of instructions with one data type:
+
+.. parsed-literal::
+
+    v_max3_f32
+    v_max3_i16
+
+Examples of instructions which operate with packed data:
+
+.. parsed-literal::
+
+    v_pk_add_u16
+    v_pk_add_i16
+    v_pk_add_f16
+
+Examples of typeless instructions which operate on b128 data:
+
+.. parsed-literal::
+
+    buffer_store_dwordx4
+    flat_load_dwordx4
+
+.. _amdgpu_syn_instruction_enc:
+
+Encoding Suffices
+~~~~~~~~~~~~~~~~~
+
+Most *VOP1*, *VOP2* and *VOPC* instructions have several variants:
+they may also be encoded in *VOP3*, *DPP* and *SDWA* formats.
+
+The assembler will automatically use optimal encoding based on instruction operands.
+To force specific encoding, one can add a suffix to the opcode of the instruction:
+
+    =================================================== =================
+    Encoding                                            Encoding Suffix
+    =================================================== =================
+    Native 32-bit encoding (*VOP1*, *VOP2* or *VOPC*)   _e32
+    *VOP3* (64-bit) encoding                            _e64
+    *DPP* encoding                                      _dpp
+    *SDWA* encoding                                     _sdwa
+    =================================================== =================
+
+These suffices are used in this reference to indicate the assumed encoding.
+When no suffix is specified, a native encoding is implied.
+
+Operands
+========
+
+Syntax
+~~~~~~
+
+Syntax of most operands is described :doc:`in this document<AMDGPUOperandSyntax>`.
+
+For detailed information about operands follow *operand links* in GPU-specific documents:
+
+* :doc:`GFX7<AMDGPU/AMDGPUAsmGFX7>`
+* :doc:`GFX8<AMDGPU/AMDGPUAsmGFX8>`
+* :doc:`GFX9<AMDGPU/AMDGPUAsmGFX9>`
+
+Modifiers
+=========
+
+Syntax
+~~~~~~
+
+Syntax of modifiers is described :doc:`in this document<AMDGPUModifierSyntax>`.
+
+Information about modifiers supported for individual instructions may be found in GPU-specific documents:
+
+* :doc:`GFX7<AMDGPU/AMDGPUAsmGFX7>`
+* :doc:`GFX8<AMDGPU/AMDGPUAsmGFX8>`
+* :doc:`GFX9<AMDGPU/AMDGPUAsmGFX9>`
+
diff --git a/docs/AMDGPUModifierSyntax.rst b/docs/AMDGPUModifierSyntax.rst
new file mode 100644
index 0000000..1a555b6
--- /dev/null
+++ b/docs/AMDGPUModifierSyntax.rst
@@ -0,0 +1,1248 @@
+======================================
+Syntax of AMDGPU Instruction Modifiers
+======================================
+
+.. contents::
+   :local:
+
+Conventions
+===========
+
+The following notation is used throughout this document:
+
+    =================== =============================================================
+    Notation            Description
+    =================== =============================================================
+    {0..N}              Any integer value in the range from 0 to N (inclusive).
+    <x>                 Syntax and meaning of *x* is explained elsewhere.
+    =================== =============================================================
+
+.. _amdgpu_syn_modifiers:
+
+Modifiers
+=========
+
+DS Modifiers
+------------
+
+.. _amdgpu_synid_ds_offset8:
+
+offset8
+~~~~~~~
+
+Specifies an immediate unsigned 8-bit offset, in bytes. The default value is 0.
+
+Used with DS instructions which have 2 addresses.
+
+    =================== =====================================================
+    Syntax              Description
+    =================== =====================================================
+    offset:{0..0xFF}    Specifies an unsigned 8-bit offset as a positive
+                        :ref:`integer number <amdgpu_synid_integer_number>`.
+    =================== =====================================================
+
+Examples:
+
+.. parsed-literal::
+
+  offset:255
+  offset:0xff
+
+.. _amdgpu_synid_ds_offset16:
+
+offset16
+~~~~~~~~
+
+Specifies an immediate unsigned 16-bit offset, in bytes. The default value is 0.
+
+Used with DS instructions which have 1 address.
+
+    ==================== ======================================================
+    Syntax               Description
+    ==================== ======================================================
+    offset:{0..0xFFFF}   Specifies an unsigned 16-bit offset as a positive
+                         :ref:`integer number <amdgpu_synid_integer_number>`.
+    ==================== ======================================================
+
+Examples:
+
+.. parsed-literal::
+
+  offset:65535
+  offset:0xffff
+
+.. _amdgpu_synid_sw_offset16:
+
+pattern
+~~~~~~~
+
+This is a special modifier which may be used with *ds_swizzle_b32* instruction only.
+It specifies a swizzle pattern in numeric or symbolic form. The default value is 0.
+
+See AMD documentation for more information.
+
+    ======================================================= ===========================================================
+    Syntax                                                  Description
+    ======================================================= ===========================================================
+    offset:{0..0xFFFF}                                      Specifies a 16-bit swizzle pattern.
+    offset:swizzle(QUAD_PERM,{0..3},{0..3},{0..3},{0..3})   Specifies a quad permute mode pattern
+
+                                                            Each number is a lane *id*.
+    offset:swizzle(BITMASK_PERM, "<mask>")                  Specifies a bitmask permute mode pattern.
+
+                                                            The pattern converts a 5-bit lane *id* to another
+                                                            lane *id* with which the lane interacts.
+
+                                                            *mask* is a 5 character sequence which
+                                                            specifies how to transform the bits of the
+                                                            lane *id*. 
+
+                                                            The following characters are allowed:
+
+                                                            * "0" - set bit to 0.
+
+                                                            * "1" - set bit to 1.
+
+                                                            * "p" - preserve bit.
+
+                                                            * "i" - inverse bit.
+
+    offset:swizzle(BROADCAST,{2..32},{0..N})                Specifies a broadcast mode.
+
+                                                            Broadcasts the value of any particular lane to
+                                                            all lanes in its group.
+
+                                                            The first numeric parameter is a group
+                                                            size and must be equal to 2, 4, 8, 16 or 32.
+
+                                                            The second numeric parameter is an index of the
+                                                            lane being broadcasted. 
+
+                                                            The index must not exceed group size.
+    offset:swizzle(SWAP,{1..16})                            Specifies a swap mode.
+
+                                                            Swaps the neighboring groups of
+                                                            1, 2, 4, 8 or 16 lanes.
+    offset:swizzle(REVERSE,{2..32})                         Specifies a reverse mode.
+
+                                                            Reverses the lanes for groups of 2, 4, 8, 16 or 32 lanes.
+    ======================================================= ===========================================================
+
+Numeric parameters may be specified as either :ref:`integer numbers<amdgpu_synid_integer_number>` or
+:ref:`absolute expressions<amdgpu_synid_absolute_expression>`.
+
+Examples:
+
+.. parsed-literal::
+
+  offset:255
+  offset:0xffff
+  offset:swizzle(QUAD_PERM, 0, 1, 2 ,3)
+  offset:swizzle(BITMASK_PERM, "01pi0")
+  offset:swizzle(BROADCAST, 2, 0)
+  offset:swizzle(SWAP, 8)
+  offset:swizzle(REVERSE, 30 + 2)
+
+.. _amdgpu_synid_gds:
+
+gds
+~~~
+
+Specifies whether to use GDS or LDS memory (LDS is the default).
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    gds                                      Use GDS memory.
+    ======================================== ================================================
+
+
+EXP Modifiers
+-------------
+
+.. _amdgpu_synid_done:
+
+done
+~~~~
+
+Specifies if this is the last export from the shader to the target. By default, current
+instruction does not finish an export sequence.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    done                                     Indicates the last export operation.
+    ======================================== ================================================
+
+.. _amdgpu_synid_compr:
+
+compr
+~~~~~
+
+Indicates if the data are compressed (data are not compressed by default).
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    compr                                    Data are compressed.
+    ======================================== ================================================
+
+.. _amdgpu_synid_vm:
+
+vm
+~~
+
+Specifies valid mask flag state (off by default).
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    vm                                       Set valid mask flag.
+    ======================================== ================================================
+
+FLAT Modifiers
+--------------
+
+.. _amdgpu_synid_flat_offset12:
+
+offset12
+~~~~~~~~
+
+Specifies an immediate unsigned 12-bit offset, in bytes. The default value is 0.
+
+Cannot be used with *global/scratch* opcodes. GFX9 only.
+
+    ================= ======================================================
+    Syntax            Description
+    ================= ======================================================
+    offset:{0..4095}  Specifies a 12-bit unsigned offset as a positive
+                      :ref:`integer number <amdgpu_synid_integer_number>`.
+    ================= ======================================================
+
+Examples:
+
+.. parsed-literal::
+
+  offset:4095
+  offset:0xff
+
+.. _amdgpu_synid_flat_offset13s:
+
+offset13s
+~~~~~~~~~
+
+Specifies an immediate signed 13-bit offset, in bytes. The default value is 0.
+
+Can be used with *global/scratch* opcodes only. GFX9 only.
+
+    ============================ =======================================================
+    Syntax                       Description
+    ============================ =======================================================
+    offset:{-4096..4095}         Specifies a 13-bit signed offset as an
+                                 :ref:`integer number <amdgpu_synid_integer_number>`.
+    ============================ =======================================================
+
+Examples:
+
+.. parsed-literal::
+
+  offset:-4000
+  offset:0x10
+
+glc
+~~~
+
+See a description :ref:`here<amdgpu_synid_glc>`.
+
+slc
+~~~
+
+See a description :ref:`here<amdgpu_synid_slc>`.
+
+tfe
+~~~
+
+See a description :ref:`here<amdgpu_synid_tfe>`.
+
+nv
+~~
+
+See a description :ref:`here<amdgpu_synid_nv>`.
+
+MIMG Modifiers
+--------------
+
+.. _amdgpu_synid_dmask:
+
+dmask
+~~~~~
+
+Specifies which channels (image components) are used by the operation. By default, no channels
+are used.
+
+    =============== =====================================================
+    Syntax          Description
+    =============== =====================================================
+    dmask:{0..15}   Specifies image channels as a positive
+                    :ref:`integer number <amdgpu_synid_integer_number>`.
+
+                    Each bit corresponds to one of 4 image
+                    components (RGBA).
+
+                    If the specified bit value
+                    is 0, the component is not used, value 1 means
+                    that the component is used.
+    =============== =====================================================
+
+This modifier has some limitations depending on instruction kind:
+
+    =================================================== ========================
+    Instruction Kind                                    Valid dmask Values
+    =================================================== ========================
+    32-bit atomic *cmpswap*                             0x3
+    32-bit atomic instructions except for *cmpswap*     0x1
+    64-bit atomic *cmpswap*                             0xF
+    64-bit atomic instructions except for *cmpswap*     0x3
+    *gather4*                                           0x1, 0x2, 0x4, 0x8
+    Other instructions                                  any value
+    =================================================== ========================
+
+Examples:
+
+.. parsed-literal::
+
+  dmask:0xf
+  dmask:0b1111
+  dmask:3
+
+.. _amdgpu_synid_unorm:
+
+unorm
+~~~~~
+
+Specifies whether the address is normalized or not (the address is normalized by default).
+
+    ======================== ========================================
+    Syntax                   Description
+    ======================== ========================================
+    unorm                    Force the address to be unnormalized.
+    ======================== ========================================
+
+glc
+~~~
+
+See a description :ref:`here<amdgpu_synid_glc>`.
+
+slc
+~~~
+
+See a description :ref:`here<amdgpu_synid_slc>`.
+
+.. _amdgpu_synid_r128:
+
+r128
+~~~~
+
+Specifies texture resource size. The default size is 256 bits.
+
+GFX7 and GFX8 only.
+
+    =================== ================================================
+    Syntax              Description
+    =================== ================================================
+    r128                Specifies 128 bits texture resource size.
+    =================== ================================================
+
+.. WARNING:: Using this modifier should descrease *rsrc* operand size from 8 to 4 dwords, but assembler does not currently support this feature.
+
+tfe
+~~~
+
+See a description :ref:`here<amdgpu_synid_tfe>`.
+
+.. _amdgpu_synid_lwe:
+
+lwe
+~~~
+
+Specifies LOD warning status (LOD warning is disabled by default).
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    lwe                                      Enables LOD warning.
+    ======================================== ================================================
+
+.. _amdgpu_synid_da:
+
+da
+~~
+
+Specifies if an array index must be sent to TA. By default, array index is not sent.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    da                                       Send an array-index to TA.
+    ======================================== ================================================
+
+.. _amdgpu_synid_d16:
+
+d16
+~~~
+
+Specifies data size: 16 or 32 bits (32 bits by default). Not supported by GFX7.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    d16                                      Enables 16-bits data mode.
+
+                                             On loads, convert data in memory to 16-bit
+                                             format before storing it in VGPRs.
+
+                                             For stores, convert 16-bit data in VGPRs to
+                                             32 bits before going to memory.
+
+                                             Note that GFX8.0 does not support data packing.
+                                             Each 16-bit data element occupies 1 VGPR.
+
+                                             GFX8.1 and GFX9 support data packing.
+                                             Each pair of 16-bit data elements 
+                                             occupies 1 VGPR.
+    ======================================== ================================================
+
+.. _amdgpu_synid_a16:
+
+a16
+~~~
+
+Specifies size of image address components: 16 or 32 bits (32 bits by default). GFX9 only.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    a16                                      Enables 16-bits image address components.
+    ======================================== ================================================
+
+Miscellaneous Modifiers
+-----------------------
+
+.. _amdgpu_synid_glc:
+
+glc
+~~~
+
+This modifier has different meaning for loads, stores, and atomic operations.
+The default value is off (0).
+
+See AMD documentation for details.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    glc                                      Set glc bit to 1.
+    ======================================== ================================================
+
+.. _amdgpu_synid_slc:
+
+slc
+~~~
+
+Specifies cache policy. The default value is off (0).
+
+See AMD documentation for details.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    slc                                      Set slc bit to 1.
+    ======================================== ================================================
+
+.. _amdgpu_synid_tfe:
+
+tfe
+~~~
+
+Controls access to partially resident textures. The default value is off (0).
+
+See AMD documentation for details.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    tfe                                      Set tfe bit to 1.
+    ======================================== ================================================
+
+.. _amdgpu_synid_nv:
+
+nv
+~~
+
+Specifies if instruction is operating on non-volatile memory. By default, memory is volatile.
+
+GFX9 only.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    nv                                       Indicates that instruction operates on
+                                             non-volatile memory.
+    ======================================== ================================================
+
+MUBUF/MTBUF Modifiers
+---------------------
+
+.. _amdgpu_synid_idxen:
+
+idxen
+~~~~~
+
+Specifies whether address components include an index. By default, no components are used.
+
+Can be used together with :ref:`offen<amdgpu_synid_offen>`.
+
+Cannot be used with :ref:`addr64<amdgpu_synid_addr64>`.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    idxen                                    Address components include an index.
+    ======================================== ================================================
+
+.. _amdgpu_synid_offen:
+
+offen
+~~~~~
+
+Specifies whether address components include an offset. By default, no components are used.
+
+Can be used together with :ref:`idxen<amdgpu_synid_idxen>`.
+
+Cannot be used with :ref:`addr64<amdgpu_synid_addr64>`.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    offen                                    Address components include an offset.
+    ======================================== ================================================
+
+.. _amdgpu_synid_addr64:
+
+addr64
+~~~~~~
+
+Specifies whether a 64-bit address is used. By default, no address is used.
+
+GFX7 only. Cannot be used with :ref:`offen<amdgpu_synid_offen>` and
+:ref:`idxen<amdgpu_synid_idxen>` modifiers.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    addr64                                   A 64-bit address is used.
+    ======================================== ================================================
+
+.. _amdgpu_synid_buf_offset12:
+
+offset12
+~~~~~~~~
+
+Specifies an immediate unsigned 12-bit offset, in bytes. The default value is 0.
+
+    =============================== ======================================================
+    Syntax                          Description
+    =============================== ======================================================
+    offset:{0..0xFFF}               Specifies a 12-bit unsigned offset as a positive
+                                    :ref:`integer number <amdgpu_synid_integer_number>`.
+    =============================== ======================================================
+
+Examples:
+
+.. parsed-literal::
+
+  offset:0
+  offset:0x10
+
+glc
+~~~
+
+See a description :ref:`here<amdgpu_synid_glc>`.
+
+slc
+~~~
+
+See a description :ref:`here<amdgpu_synid_slc>`.
+
+.. _amdgpu_synid_lds:
+
+lds
+~~~
+
+Specifies where to store the result: VGPRs or LDS (VGPRs by default).
+
+    ======================================== ===========================
+    Syntax                                   Description
+    ======================================== ===========================
+    lds                                      Store result in LDS.
+    ======================================== ===========================
+
+tfe
+~~~
+
+See a description :ref:`here<amdgpu_synid_tfe>`.
+
+.. _amdgpu_synid_dfmt:
+
+dfmt
+~~~~
+
+TBD
+
+.. _amdgpu_synid_nfmt:
+
+nfmt
+~~~~
+
+TBD
+
+SMRD/SMEM Modifiers
+-------------------
+
+glc
+~~~
+
+See a description :ref:`here<amdgpu_synid_glc>`.
+
+nv
+~~
+
+See a description :ref:`here<amdgpu_synid_nv>`.
+
+VINTRP Modifiers
+----------------
+
+.. _amdgpu_synid_high:
+
+high
+~~~~
+
+Specifies which half of the LDS word to use. Low half of LDS word is used by default.
+GFX9 only.
+
+    ======================================== ================================
+    Syntax                                   Description
+    ======================================== ================================
+    high                                     Use high half of LDS word.
+    ======================================== ================================
+
+VOP1/VOP2 DPP Modifiers
+-----------------------
+
+GFX8 and GFX9 only.
+
+.. _amdgpu_synid_dpp_ctrl:
+
+dpp_ctrl
+~~~~~~~~
+
+Specifies how data are shared between threads. This is a mandatory modifier.
+There is no default value.
+
+Note. The lanes of a wavefront are organized in four banks and four rows.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    quad_perm:[{0..3},{0..3},{0..3},{0..3}]  Full permute of 4 threads.
+    row_mirror                               Mirror threads within row.
+    row_half_mirror                          Mirror threads within 1/2 row (8 threads).
+    row_bcast:15                             Broadcast 15th thread of each row to next row.
+    row_bcast:31                             Broadcast thread 31 to rows 2 and 3.
+    wave_shl:1                               Wavefront left shift by 1 thread.
+    wave_rol:1                               Wavefront left rotate by 1 thread.
+    wave_shr:1                               Wavefront right shift by 1 thread.
+    wave_ror:1                               Wavefront right rotate by 1 thread.
+    row_shl:{1..15}                          Row shift left by 1-15 threads.
+    row_shr:{1..15}                          Row shift right by 1-15 threads.
+    row_ror:{1..15}                          Row rotate right by 1-15 threads.
+    ======================================== ================================================
+
+Note: Numeric parameters may be specified as either
+:ref:`integer numbers<amdgpu_synid_integer_number>` or
+:ref:`absolute expressions<amdgpu_synid_absolute_expression>`.
+
+Examples:
+
+.. parsed-literal::
+
+  quad_perm:[0, 1, 2, 3]
+  row_shl:3
+
+.. _amdgpu_synid_row_mask:
+
+row_mask
+~~~~~~~~
+
+Controls which rows are enabled for data sharing. By default, all rows are enabled.
+
+Note. The lanes of a wavefront are organized in four banks and four rows.
+
+    ======================================== =====================================================
+    Syntax                                   Description
+    ======================================== =====================================================
+    row_mask:{0..15}                         Specifies a *row mask* as a positive
+                                             :ref:`integer number <amdgpu_synid_integer_number>`.
+
+                                             Each of 4 bits in the mask controls one
+                                             row (0 - disabled, 1 - enabled).
+    ======================================== =====================================================
+
+Examples:
+
+.. parsed-literal::
+
+  row_mask:0xf
+  row_mask:0b1010
+  row_mask:0b1111
+
+.. _amdgpu_synid_bank_mask:
+
+bank_mask
+~~~~~~~~~
+
+Controls which banks are enabled for data sharing. By default, all banks are enabled.
+
+Note. The lanes of a wavefront are organized in four banks and four rows.
+
+    ======================================== =======================================================
+    Syntax                                   Description
+    ======================================== =======================================================
+    bank_mask:{0..15}                        Specifies a *bank mask* as a positive
+                                             :ref:`integer number <amdgpu_synid_integer_number>`.
+
+                                             Each of 4 bits in the mask controls one
+                                             bank (0 - disabled, 1 - enabled).
+    ======================================== =======================================================
+
+Examples:
+
+.. parsed-literal::
+
+  bank_mask:0x3
+  bank_mask:0b0011
+  bank_mask:0b1111
+
+.. _amdgpu_synid_bound_ctrl:
+
+bound_ctrl
+~~~~~~~~~~
+
+Controls data sharing when accessing an invalid lane. By default, data sharing with
+invalid lanes is disabled.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    bound_ctrl:0                             Enables data sharing with invalid lanes.
+
+                                             Accessing data from an invalid lane will
+                                             return zero.
+    ======================================== ================================================
+
+VOP1/VOP2/VOPC SDWA Modifiers
+-----------------------------
+
+GFX8 and GFX9 only.
+
+clamp
+~~~~~
+
+See a description :ref:`here<amdgpu_synid_clamp>`.
+
+omod
+~~~~
+
+See a description :ref:`here<amdgpu_synid_omod>`.
+
+GFX9 only.
+
+.. _amdgpu_synid_dst_sel:
+
+dst_sel
+~~~~~~~
+
+Selects which bits in the destination are affected. By default, all bits are affected.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    dst_sel:DWORD                            Use bits 31:0.
+    dst_sel:BYTE_0                           Use bits 7:0.
+    dst_sel:BYTE_1                           Use bits 15:8.
+    dst_sel:BYTE_2                           Use bits 23:16.
+    dst_sel:BYTE_3                           Use bits 31:24.
+    dst_sel:WORD_0                           Use bits 15:0.
+    dst_sel:WORD_1                           Use bits 31:16.
+    ======================================== ================================================
+
+
+.. _amdgpu_synid_dst_unused:
+
+dst_unused
+~~~~~~~~~~
+
+Controls what to do with the bits in the destination which are not selected
+by :ref:`dst_sel<amdgpu_synid_dst_sel>`.
+By default, unused bits are preserved.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    dst_unused:UNUSED_PAD                    Pad with zeros.
+    dst_unused:UNUSED_SEXT                   Sign-extend upper bits, zero lower bits.
+    dst_unused:UNUSED_PRESERVE               Preserve bits.
+    ======================================== ================================================
+
+.. _amdgpu_synid_src0_sel:
+
+src0_sel
+~~~~~~~~
+
+Controls which bits in the src0 are used. By default, all bits are used.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    src0_sel:DWORD                           Use bits 31:0.
+    src0_sel:BYTE_0                          Use bits 7:0.
+    src0_sel:BYTE_1                          Use bits 15:8.
+    src0_sel:BYTE_2                          Use bits 23:16.
+    src0_sel:BYTE_3                          Use bits 31:24.
+    src0_sel:WORD_0                          Use bits 15:0.
+    src0_sel:WORD_1                          Use bits 31:16.
+    ======================================== ================================================
+
+.. _amdgpu_synid_src1_sel:
+
+src1_sel
+~~~~~~~~
+
+Controls which bits in the src1 are used. By default, all bits are used.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    src1_sel:DWORD                           Use bits 31:0.
+    src1_sel:BYTE_0                          Use bits 7:0.
+    src1_sel:BYTE_1                          Use bits 15:8.
+    src1_sel:BYTE_2                          Use bits 23:16.
+    src1_sel:BYTE_3                          Use bits 31:24.
+    src1_sel:WORD_0                          Use bits 15:0.
+    src1_sel:WORD_1                          Use bits 31:16.
+    ======================================== ================================================
+
+.. _amdgpu_synid_sdwa_operand_modifiers:
+
+VOP1/VOP2/VOPC SDWA Operand Modifiers
+-------------------------------------
+
+Operand modifiers are not used separately. They are applied to source operands.
+
+GFX8 and GFX9 only.
+
+abs
+~~~
+
+See a description :ref:`here<amdgpu_synid_abs>`.
+
+neg
+~~~
+
+See a description :ref:`here<amdgpu_synid_neg>`.
+
+.. _amdgpu_synid_sext:
+
+sext
+~~~~
+
+Sign-extends value of a (sub-dword) operand to fill all 32 bits.
+Has no effect for 32-bit operands.
+
+Valid for integer operands only.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    sext(<operand>)                          Sign-extend operand value.
+    ======================================== ================================================
+
+Examples:
+
+.. parsed-literal::
+
+  sext(v4)
+  sext(v255)
+
+VOP3 Modifiers
+--------------
+
+.. _amdgpu_synid_vop3_op_sel:
+
+op_sel
+~~~~~~
+
+Selects the low [15:0] or high [31:16] operand bits for source and destination operands.
+By default, low bits are used for all operands.
+
+The number of values specified with the op_sel modifier must match the number of instruction
+operands (both source and destination). First value controls src0, second value controls src1
+and so on, except that the last value controls destination.
+The value 0 selects the low bits, while 1 selects the high bits.
+
+Note. op_sel modifier affects 16-bit operands only. For 32-bit operands the value specified
+by op_sel must be 0.
+
+GFX9 only.
+
+    ======================================== ============================================================
+    Syntax                                   Description
+    ======================================== ============================================================
+    op_sel:[{0..1},{0..1}]                   Select operand bits for instructions with 1 source operand.
+    op_sel:[{0..1},{0..1},{0..1}]            Select operand bits for instructions with 2 source operands.
+    op_sel:[{0..1},{0..1},{0..1},{0..1}]     Select operand bits for instructions with 3 source operands.
+    ======================================== ============================================================
+
+Examples:
+
+.. parsed-literal::
+
+  op_sel:[0,0]
+  op_sel:[0,1]
+
+.. _amdgpu_synid_clamp:
+
+clamp
+~~~~~
+
+Clamp meaning depends on instruction.
+
+For *v_cmp* instructions, clamp modifier indicates that the compare signals
+if a floating point exception occurs. By default, signaling is disabled.
+Not supported by GFX7.
+
+For integer operations, clamp modifier indicates that the result must be clamped
+to the largest and smallest representable value. By default, there is no clamping.
+Integer clamping is not supported by GFX7.
+
+For floating point operations, clamp modifier indicates that the result must be clamped
+to the range [0.0, 1.0]. By default, there is no clamping.
+
+Note. Clamp modifier is applied after :ref:`output modifiers<amdgpu_synid_omod>` (if any).
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    clamp                                    Enables clamping (or signaling).
+    ======================================== ================================================
+
+.. _amdgpu_synid_omod:
+
+omod
+~~~~
+
+Specifies if an output modifier must be applied to the result.
+By default, no output modifiers are applied.
+
+Note. Output modifiers are applied before :ref:`clamping<amdgpu_synid_clamp>` (if any).
+
+Output modifiers are valid for f32 and f64 floating point results only.
+They must not be used with f16.
+
+Note. *v_cvt_f16_f32* is an exception. This instruction produces f16 result
+but accepts output modifiers.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    mul:2                                    Multiply the result by 2.
+    mul:4                                    Multiply the result by 4.
+    div:2                                    Multiply the result by 0.5.
+    ======================================== ================================================
+
+.. _amdgpu_synid_vop3_operand_modifiers:
+
+VOP3 Operand Modifiers
+----------------------
+
+Operand modifiers are not used separately. They are applied to source operands.
+
+.. _amdgpu_synid_abs:
+
+abs
+~~~
+
+Computes absolute value of its operand. Applied before :ref:`neg<amdgpu_synid_neg>` (if any).
+Valid for floating point operands only.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    abs(<operand>)                           Get absolute value of operand.
+    \|<operand>|                             The same as above.
+    ======================================== ================================================
+
+Examples:
+
+.. parsed-literal::
+
+  abs(v36)
+  \|v36|
+
+.. _amdgpu_synid_neg:
+
+neg
+~~~
+
+Computes negative value of its operand. Applied after :ref:`abs<amdgpu_synid_abs>` (if any).
+Valid for floating point operands only.
+
+    ======================================== ================================================
+    Syntax                                   Description
+    ======================================== ================================================
+    neg(<operand>)                           Get negative value of operand.
+    -<operand>                               The same as above.
+    ======================================== ================================================
+
+Examples:
+
+.. parsed-literal::
+
+  neg(v[0])
+  -v4
+
+VOP3P Modifiers
+---------------
+
+This section describes modifiers of *regular* VOP3P instructions.
+
+*v_mad_mix_f32*, *v_mad_mixhi_f16* and *v_mad_mixlo_f16*
+instructions use these modifiers :ref:`in a special manner<amdgpu_synid_mad_mix>`.
+
+GFX9 only.
+
+.. _amdgpu_synid_op_sel:
+
+op_sel
+~~~~~~
+
+Selects the low [15:0] or high [31:16] operand bits as input to the operation
+which results in the lower-half of the destination.
+By default, low bits are used for all operands.
+
+The number of values specified by the *op_sel* modifier must match the number of source
+operands. First value controls src0, second value controls src1 and so on.
+
+The value 0 selects the low bits, while 1 selects the high bits.
+
+    ================================= =============================================================
+    Syntax                            Description
+    ================================= =============================================================
+    op_sel:[{0..1}]                   Select operand bits for instructions with 1 source operand.
+    op_sel:[{0..1},{0..1}]            Select operand bits for instructions with 2 source operands.
+    op_sel:[{0..1},{0..1},{0..1}]     Select operand bits for instructions with 3 source operands.
+    ================================= =============================================================
+
+Examples:
+
+.. parsed-literal::
+
+  op_sel:[0,0]
+  op_sel:[0,1,0]
+
+.. _amdgpu_synid_op_sel_hi:
+
+op_sel_hi
+~~~~~~~~~
+
+Selects the low [15:0] or high [31:16] operand bits as input to the operation
+which results in the upper-half of the destination.
+By default, high bits are used for all operands.
+
+The number of values specified by the *op_sel_hi* modifier must match the number of source
+operands. First value controls src0, second value controls src1 and so on.
+
+The value 0 selects the low bits, while 1 selects the high bits.
+
+    =================================== =============================================================
+    Syntax                              Description
+    =================================== =============================================================
+    op_sel_hi:[{0..1}]                  Select operand bits for instructions with 1 source operand.
+    op_sel_hi:[{0..1},{0..1}]           Select operand bits for instructions with 2 source operands.
+    op_sel_hi:[{0..1},{0..1},{0..1}]    Select operand bits for instructions with 3 source operands.
+    =================================== =============================================================
+
+Examples:
+
+.. parsed-literal::
+
+  op_sel_hi:[0,0]
+  op_sel_hi:[0,0,1]
+
+.. _amdgpu_synid_neg_lo:
+
+neg_lo
+~~~~~~
+
+Specifies whether to change sign of operand values selected by
+:ref:`op_sel<amdgpu_synid_op_sel>`. These values are then used
+as input to the operation which results in the upper-half of the destination.
+
+The number of values specified by this modifier must match the number of source
+operands. First value controls src0, second value controls src1 and so on.
+
+The value 0 indicates that the corresponding operand value is used unmodified,
+the value 1 indicates that negative value of the operand must be used.
+
+By default, operand values are used unmodified.
+
+This modifier is valid for floating point operands only.
+
+    ================================ ==================================================================
+    Syntax                           Description
+    ================================ ==================================================================
+    neg_lo:[{0..1}]                  Select affected operands for instructions with 1 source operand.
+    neg_lo:[{0..1},{0..1}]           Select affected operands for instructions with 2 source operands.
+    neg_lo:[{0..1},{0..1},{0..1}]    Select affected operands for instructions with 3 source operands.
+    ================================ ==================================================================
+
+Examples:
+
+.. parsed-literal::
+
+  neg_lo:[0]
+  neg_lo:[0,1]
+
+.. _amdgpu_synid_neg_hi:
+
+neg_hi
+~~~~~~
+
+Specifies whether to change sign of operand values selected by
+:ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`. These values are then used
+as input to the operation which results in the upper-half of the destination.
+
+The number of values specified by this modifier must match the number of source
+operands. First value controls src0, second value controls src1 and so on.
+
+The value 0 indicates that the corresponding operand value is used unmodified,
+the value 1 indicates that negative value of the operand must be used.
+
+By default, operand values are used unmodified.
+
+This modifier is valid for floating point operands only.
+
+    =============================== ==================================================================
+    Syntax                          Description
+    =============================== ==================================================================
+    neg_hi:[{0..1}]                 Select affected operands for instructions with 1 source operand.
+    neg_hi:[{0..1},{0..1}]          Select affected operands for instructions with 2 source operands.
+    neg_hi:[{0..1},{0..1},{0..1}]   Select affected operands for instructions with 3 source operands.
+    =============================== ==================================================================
+
+Examples:
+
+.. parsed-literal::
+
+  neg_hi:[1,0]
+  neg_hi:[0,1,1]
+
+clamp
+~~~~~
+
+See a description :ref:`here<amdgpu_synid_clamp>`.
+
+.. _amdgpu_synid_mad_mix:
+
+VOP3P V_MAD_MIX Modifiers
+-------------------------
+
+*v_mad_mix_f32*, *v_mad_mixhi_f16* and *v_mad_mixlo_f16* instructions
+use *op_sel* and *op_sel_hi* modifiers 
+in a manner different from *regular* VOP3P instructions.
+
+See a description below.
+
+GFX9 only.
+
+.. _amdgpu_synid_mad_mix_op_sel:
+
+m_op_sel
+~~~~~~~~
+
+This operand has meaning only for 16-bit source operands as indicated by
+:ref:`m_op_sel_hi<amdgpu_synid_mad_mix_op_sel_hi>`.
+It specifies to select either the low [15:0] or high [31:16] operand bits
+as input to the operation.
+
+The number of values specified by the *op_sel* modifier must match the number of source
+operands. First value controls src0, second value controls src1 and so on.
+
+The value 0 indicates the low bits, the value 1 indicates the high 16 bits.
+
+By default, low bits are used for all operands.
+
+    =============================== ================================================
+    Syntax                          Description
+    =============================== ================================================
+    op_sel:[{0..1},{0..1},{0..1}]   Select location of each 16-bit source operand.
+    =============================== ================================================
+
+Examples:
+
+.. parsed-literal::
+
+  op_sel:[0,1]
+
+.. _amdgpu_synid_mad_mix_op_sel_hi:
+
+m_op_sel_hi
+~~~~~~~~~~~
+
+Selects the size of source operands: either 32 bits or 16 bits.
+By default, 32 bits are used for all source operands.
+
+The number of values specified by the *op_sel_hi* modifier must match the number of source
+operands. First value controls src0, second value controls src1 and so on.
+
+The value 0 indicates 32 bits, the value 1 indicates 16 bits.
+
+The location of 16 bits in the operand may be specified by
+:ref:`m_op_sel<amdgpu_synid_mad_mix_op_sel>`.
+
+    ======================================== ====================================
+    Syntax                                   Description
+    ======================================== ====================================
+    op_sel_hi:[{0..1},{0..1},{0..1}]         Select size of each source operand.
+    ======================================== ====================================
+
+Examples:
+
+.. parsed-literal::
+
+  op_sel_hi:[1,1,1]
+
+abs
+~~~
+
+See a description :ref:`here<amdgpu_synid_abs>`.
+
+neg
+~~~
+
+See a description :ref:`here<amdgpu_synid_neg>`.
+
+clamp
+~~~~~
+
+See a description :ref:`here<amdgpu_synid_clamp>`.
diff --git a/docs/AMDGPUOperandSyntax.rst b/docs/AMDGPUOperandSyntax.rst
index 4f3536e..51e477a 100644
--- a/docs/AMDGPUOperandSyntax.rst
+++ b/docs/AMDGPUOperandSyntax.rst
@@ -1,6 +1,6 @@
-=================================================
-Syntax of AMDGPU Assembler Operands and Modifiers
-=================================================
+=====================================
+Syntax of AMDGPU Instruction Operands
+=====================================
 
 .. contents::
    :local:
@@ -8,1048 +8,1057 @@
 Conventions
 ===========
 
-The following conventions are used in syntax description:
+The following notation is used throughout this document:
 
-    =================== =============================================================
+    =================== =============================================================================
     Notation            Description
-    =================== =============================================================
+    =================== =============================================================================
     {0..N}              Any integer value in the range from 0 to N (inclusive).
-                        Unless stated otherwise, this value may be specified as
-                        either a literal or an llvm expression.
-    <x>                 Syntax and meaning of *<x>* is explained elsewhere.
-    =================== =============================================================
+    <x>                 Syntax and meaning of *x* is explained elsewhere.
+    =================== =============================================================================
 
 .. _amdgpu_syn_operands:
 
 Operands
 ========
 
-TBD
+.. _amdgpu_synid_v:
 
-.. _amdgpu_syn_modifiers:
+v
+-
 
-Modifiers
-=========
+Vector registers. There are 256 32-bit vector registers.
 
-DS Modifiers
+A sequence of *vector* registers may be used to operate with more than 32 bits of data.
+
+Assembler currently supports sequences of 1, 2, 3, 4, 8 and 16 *vector* registers.
+
+    =================================================== ====================================================================
+    Syntax                                              Description
+    =================================================== ====================================================================
+    **v**\<N>                                           A single 32-bit *vector* register.
+
+                                                        *N* must be a decimal integer number.
+    **v[**\ <N>\ **]**                                  A single 32-bit *vector* register.
+
+                                                        *N* may be specified as an
+                                                        :ref:`integer number<amdgpu_synid_integer_number>`
+                                                        or an :ref:`absolute expression<amdgpu_synid_absolute_expression>`.
+    **v[**\ <N>:<K>\ **]**                              A sequence of (\ *K-N+1*\ ) *vector* registers.
+
+                                                        *N* and *K* may be specified as
+                                                        :ref:`integer numbers<amdgpu_synid_integer_number>`
+                                                        or :ref:`absolute expressions<amdgpu_synid_absolute_expression>`.
+    **[v**\ <N>, \ **v**\ <N+1>, ... **v**\ <K>\ **]**  A sequence of (\ *K-N+1*\ ) *vector* registers.
+
+                                                        Register indices must be specified as decimal integer numbers.
+    =================================================== ====================================================================
+
+Note. *N* and *K* must satisfy the following conditions:
+
+* *N* <= *K*.
+* 0 <= *N* <= 255.
+* 0 <= *K* <= 255.
+* *K-N+1* must be equal to 1, 2, 3, 4, 8 or 16.
+
+Examples:
+
+.. parsed-literal::
+
+  v255
+  v[0]
+  v[0:1]
+  v[1:1]
+  v[0:3]
+  v[2*2]
+  v[1-1:2-1]
+  [v252]
+  [v252,v253,v254,v255]
+
+.. _amdgpu_synid_s:
+
+s
+-
+
+Scalar 32-bit registers. The number of available *scalar* registers depends on GPU:
+
+    ======= ============================
+    GPU     Number of *scalar* registers
+    ======= ============================
+    GFX7    104
+    GFX8    102
+    GFX9    102
+    ======= ============================
+
+A sequence of *scalar* registers may be used to operate with more than 32 bits of data.
+Assembler currently supports sequences of 1, 2, 4, 8 and 16 *scalar* registers.
+
+Pairs of *scalar* registers must be even-aligned (the first register must be even).
+Sequences of 4 and more *scalar* registers must be quad-aligned.
+
+    ======================================================== ====================================================================
+    Syntax                                                   Description
+    ======================================================== ====================================================================
+    **s**\ <N>                                               A single 32-bit *scalar* register.
+
+                                                             *N* must be a decimal integer number.
+    **s[**\ <N>\ **]**                                       A single 32-bit *scalar* register.
+
+                                                             *N* may be specified as an
+                                                             :ref:`integer number<amdgpu_synid_integer_number>`
+                                                             or an :ref:`absolute expression<amdgpu_synid_absolute_expression>`.
+    **s[**\ <N>:<K>\ **]**                                   A sequence of (\ *K-N+1*\ ) *scalar* registers.
+
+                                                             *N* and *K* may be specified as
+                                                             :ref:`integer numbers<amdgpu_synid_integer_number>`
+                                                             or :ref:`absolute expressions<amdgpu_synid_absolute_expression>`.
+    **[s**\ <N>, \ **s**\ <N+1>, ... **s**\ <K>\ **]**       A sequence of (\ *K-N+1*\ ) *scalar* registers.
+
+                                                             Register indices must be specified as decimal integer numbers.
+    ======================================================== ====================================================================
+
+Note. *N* and *K* must satisfy the following conditions:
+
+* *N* must be properly aligned based on sequence size.
+* *N* <= *K*.
+* 0 <= *N* < *SMAX*\ , where *SMAX* is the number of available *scalar* registers.
+* 0 <= *K* < *SMAX*\ , where *SMAX* is the number of available *scalar* registers.
+* *K-N+1* must be equal to 1, 2, 4, 8 or 16.
+
+Examples:
+
+.. parsed-literal::
+
+  s0
+  s[0]
+  s[0:1]
+  s[1:1]
+  s[0:3]
+  s[2*2]
+  s[1-1:2-1]
+  [s4]
+  [s4,s5,s6,s7]
+
+Examples of *scalar* registers with an invalid alignment:
+
+.. parsed-literal::
+
+  s[1:2]
+  s[2:5]
+
+.. _amdgpu_synid_trap:
+
+trap
+----
+
+A set of trap handler registers:
+
+* :ref:`ttmp<amdgpu_synid_ttmp>`
+* :ref:`tba<amdgpu_synid_tba>`
+* :ref:`tma<amdgpu_synid_tma>`
+
+.. _amdgpu_synid_ttmp:
+
+ttmp
+----
+
+Trap handler temporary scalar registers, 32-bits wide.
+The number of available *ttmp* registers depends on GPU:
+
+    ======= ===========================
+    GPU     Number of *ttmp* registers
+    ======= ===========================
+    GFX7    12
+    GFX8    12
+    GFX9    16
+    ======= ===========================
+
+A sequence of *ttmp* registers may be used to operate with more than 32 bits of data.
+Assembler currently supports sequences of 1, 2, 4, 8 and 16 *ttmp* registers.
+
+Pairs of *ttmp* registers must be even-aligned (the first register must be even).
+Sequences of 4 and more *ttmp* registers must be quad-aligned.
+
+    ============================================================= ====================================================================
+    Syntax                                                        Description
+    ============================================================= ====================================================================
+    **ttmp**\ <N>                                                 A single 32-bit *ttmp* register.
+
+                                                                  *N* must be a decimal integer number.
+    **ttmp[**\ <N>\ **]**                                         A single 32-bit *ttmp* register.
+
+                                                                  *N* may be specified as an
+                                                                  :ref:`integer number<amdgpu_synid_integer_number>`
+                                                                  or an :ref:`absolute expression<amdgpu_synid_absolute_expression>`.
+    **ttmp[**\ <N>:<K>\ **]**                                     A sequence of (\ *K-N+1*\ ) *ttmp* registers.
+
+                                                                  *N* and *K* may be specified as
+                                                                  :ref:`integer numbers<amdgpu_synid_integer_number>`
+                                                                  or :ref:`absolute expressions<amdgpu_synid_absolute_expression>`.
+    **[ttmp**\ <N>, \ **ttmp**\ <N+1>, ... **ttmp**\ <K>\ **]**   A sequence of (\ *K-N+1*\ ) *ttmp* registers.
+
+                                                                  Register indices must be specified as decimal integer numbers.
+    ============================================================= ====================================================================
+
+Note. *N* and *K* must satisfy the following conditions:
+
+* *N* must be properly aligned based on sequence size.
+* *N* <= *K*.
+* 0 <= *N* < *TMAX*, where *TMAX* is the number of available *ttmp* registers.
+* 0 <= *K* < *TMAX*, where *TMAX* is the number of available *ttmp* registers.
+* *K-N+1* must be equal to 1, 2, 4, 8 or 16.
+
+Examples:
+
+.. parsed-literal::
+
+  ttmp0
+  ttmp[0]
+  ttmp[0:1]
+  ttmp[1:1]
+  ttmp[0:3]
+  ttmp[2*2]
+  ttmp[1-1:2-1]
+  [ttmp4]
+  [ttmp4,ttmp5,ttmp6,ttmp7]
+
+Examples of *ttmp* registers with an invalid alignment:
+
+.. parsed-literal::
+
+  ttmp[1:2]
+  ttmp[2:5]
+
+.. _amdgpu_synid_tba:
+
+tba
+---
+
+Trap base address, 64-bits wide. Holds the pointer to the current trap handler program.
+
+    ================== ======================================================================= =============
+    Syntax             Description                                                             Availability
+    ================== ======================================================================= =============
+    tba                64-bit *trap base address* register.                                    GFX7, GFX8
+    [tba]              64-bit *trap base address* register (an alternative syntax).            GFX7, GFX8
+    [tba_lo,tba_hi]    64-bit *trap base address* register (an alternative syntax).            GFX7, GFX8
+    ================== ======================================================================= =============
+
+High and low 32 bits of *trap base address* may be accessed as separate registers:
+
+    ================== ======================================================================= =============
+    Syntax             Description                                                             Availability
+    ================== ======================================================================= =============
+    tba_lo             Low 32 bits of *trap base address* register.                            GFX7, GFX8
+    tba_hi             High 32 bits of *trap base address* register.                           GFX7, GFX8
+    [tba_lo]           Low 32 bits of *trap base address* register (an alternative syntax).    GFX7, GFX8
+    [tba_hi]           High 32 bits of *trap base address* register (an alternative syntax).   GFX7, GFX8
+    ================== ======================================================================= =============
+
+Note that *tba*, *tba_lo* and *tba_hi* are not accessible as assembler registers in GFX9,
+but *tba* is readable/writable with the help of *s_get_reg* and *s_set_reg* instructions.
+
+.. _amdgpu_synid_tma:
+
+tma
+---
+
+Trap memory address, 64-bits wide.
+
+    ================= ======================================================================= ==================
+    Syntax            Description                                                             Availability
+    ================= ======================================================================= ==================
+    tma               64-bit *trap memory address* register.                                  GFX7, GFX8
+    [tma]             64-bit *trap memory address* register (an alternative syntax).          GFX7, GFX8
+    [tma_lo,tma_hi]   64-bit *trap memory address* register (an alternative syntax).          GFX7, GFX8
+    ================= ======================================================================= ==================
+
+High and low 32 bits of *trap memory address* may be accessed as separate registers:
+
+    ================= ======================================================================= ==================
+    Syntax            Description                                                             Availability
+    ================= ======================================================================= ==================
+    tma_lo            Low 32 bits of *trap memory address* register.                          GFX7, GFX8
+    tma_hi            High 32 bits of *trap memory address* register.                         GFX7, GFX8
+    [tma_lo]          Low 32 bits of *trap memory address* register (an alternative syntax).  GFX7, GFX8
+    [tma_hi]          High 32 bits of *trap memory address* register (an alternative syntax). GFX7, GFX8
+    ================= ======================================================================= ==================
+
+Note that *tma*, *tma_lo* and *tma_hi* are not accessible as assembler registers in GFX9,
+but *tma* is readable/writable with the help of *s_get_reg* and *s_set_reg* instructions.
+
+.. _amdgpu_synid_flat_scratch:
+
+flat_scratch
 ------------
 
-.. _amdgpu_synid_ds_offset8:
+Flat scratch address, 64-bits wide. Holds the base address of scratch memory.
 
-ds_offset8
-~~~~~~~~~~
+    ================================== ================================================================
+    Syntax                             Description
+    ================================== ================================================================
+    flat_scratch                       64-bit *flat scratch* address register.
+    [flat_scratch]                     64-bit *flat scratch* address register (an alternative syntax).
+    [flat_scratch_lo,flat_scratch_hi]  64-bit *flat scratch* address register (an alternative syntax).
+    ================================== ================================================================
 
-Specifies an immediate unsigned 8-bit offset, in bytes. The default value is 0.
+High and low 32 bits of *flat scratch* address may be accessed as separate registers:
 
-Used with DS instructions which have 2 addresses.
+    ========================= =========================================================================
+    Syntax                    Description
+    ========================= =========================================================================
+    flat_scratch_lo           Low 32 bits of *flat scratch* address register.
+    flat_scratch_hi           High 32 bits of *flat scratch* address register.
+    [flat_scratch_lo]         Low 32 bits of *flat scratch* address register (an alternative syntax).
+    [flat_scratch_hi]         High 32 bits of *flat scratch* address register (an alternative syntax).
+    ========================= =========================================================================
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    offset:{0..0xFF}                         Specifies a 8-bit offset.
-    ======================================== ================================================
+.. _amdgpu_synid_xnack:
 
-.. _amdgpu_synid_ds_offset16:
+xnack
+-----
 
-ds_offset16
-~~~~~~~~~~~
+Xnack mask, 64-bits wide. Holds a 64-bit mask of which threads
+received an *XNACK* due to a vector memory operation.
 
-Specifies an immediate unsigned 16-bit offset, in bytes. The default value is 0.
+.. WARNING:: GFX7 does not support *xnack* feature. Not all GFX8 and GFX9 :ref:`processors<amdgpu-processors>` support *xnack* feature.
 
-Used with DS instructions which have 1 address.
+\
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    offset:{0..0xFFFF}                       Specifies a 16-bit offset.
-    ======================================== ================================================
+    ============================== =====================================================
+    Syntax                         Description
+    ============================== =====================================================
+    xnack_mask                     64-bit *xnack mask* register.
+    [xnack_mask]                   64-bit *xnack mask* register (an alternative syntax).
+    [xnack_mask_lo,xnack_mask_hi]  64-bit *xnack mask* register (an alternative syntax).
+    ============================== =====================================================
 
-.. _amdgpu_synid_sw_offset16:
+High and low 32 bits of *xnack mask* may be accessed as separate registers:
 
-sw_offset16
-~~~~~~~~~~~
+    ===================== ==============================================================
+    Syntax                Description
+    ===================== ==============================================================
+    xnack_mask_lo         Low 32 bits of *xnack mask* register.
+    xnack_mask_hi         High 32 bits of *xnack mask* register.
+    [xnack_mask_lo]       Low 32 bits of *xnack mask* register (an alternative syntax).
+    [xnack_mask_hi]       High 32 bits of *xnack mask* register (an alternative syntax).
+    ===================== ==============================================================
 
-This is a special modifier which may be used with *ds_swizzle_b32* instruction only.
-Specifies a sizzle pattern in numeric or symbolic form. The default value is 0.
+.. _amdgpu_synid_vcc:
 
-See AMD documentation for more information.
+vcc
+---
 
-    ======================================================= ===================================================
-    Syntax                                                  Description
-    ======================================================= ===================================================
-    offset:{0..0xFFFF}                                      Specifies a 16-bit swizzle pattern
-                                                            in a numeric form.
-    offset:swizzle(QUAD_PERM,{0..3},{0..3},{0..3},{0..3})   Specifies a quad permute mode pattern; each
-                                                            number is a lane id.
-    offset:swizzle(BITMASK_PERM, "<mask>")                  Specifies a bitmask permute mode pattern
-                                                            which converts a 5-bit lane id to another
-                                                            lane id with which the lane interacts.
+Vector condition code, 64-bits wide. A bit mask with one bit per thread;
+it holds the result of a vector compare operation.
 
-                                                            <mask> is a 5 character sequence which
-                                                            specifies how to transform the bits of the
-                                                            lane id. The following characters are allowed:
+    ================ =========================================================================
+    Syntax           Description
+    ================ =========================================================================
+    vcc              64-bit *vector condition code* register.
+    [vcc]            64-bit *vector condition code* register (an alternative syntax).
+    [vcc_lo,vcc_hi]  64-bit *vector condition code* register (an alternative syntax).
+    ================ =========================================================================
 
-                                                              * "0" - set bit to 0.
+High and low 32 bits of *vector condition code* may be accessed as separate registers:
 
-                                                              * "1" - set bit to 1.
+    ================ =========================================================================
+    Syntax           Description
+    ================ =========================================================================
+    vcc_lo           Low 32 bits of *vector condition code* register.
+    vcc_hi           High 32 bits of *vector condition code* register.
+    [vcc_lo]         Low 32 bits of *vector condition code* register (an alternative syntax).
+    [vcc_hi]         High 32 bits of *vector condition code* register (an alternative syntax).
+    ================ =========================================================================
 
-                                                              * "p" - preserve bit.
+.. _amdgpu_synid_m0:
 
-                                                              * "i" - inverse bit.
+m0
+--
 
-    offset:swizzle(BROADCAST,{2..32},{0..N})                Specifies a broadcast mode.
-                                                            Broadcasts the value of any particular lane to
-                                                            all lanes in its group.
+A 32-bit memory register. It has various uses,
+including register indexing and bounds checking.
 
-                                                            The first numeric parameter is a group
-                                                            size and must be equal to 2, 4, 8, 16 or 32.
+    =========== ===================================================
+    Syntax      Description
+    =========== ===================================================
+    m0          A 32-bit *memory* register.
+    [m0]        A 32-bit *memory* register (an alternative syntax).
+    =========== ===================================================
 
-                                                            The second numeric parameter is an index of the
-                                                            lane being broadcasted. The index must not exceed
-                                                            group size.
-    offset:swizzle(SWAP,{1..16})                            Specifies a swap mode.
-                                                            Swaps the neighboring groups of
-                                                            1, 2, 4, 8 or 16 lanes.
-    offset:swizzle(REVERSE,{2..32})                         Specifies a reverse mode. Reverses
-                                                            the lanes for groups of 2, 4, 8, 16 or 32 lanes.
-    ======================================================= ===================================================
+.. _amdgpu_synid_exec:
 
-.. _amdgpu_synid_gds:
+exec
+----
 
-gds
-~~~
+Execute mask, 64-bits wide. A bit mask with one bit per thread,
+which is applied to vector instructions and controls which threads execute
+and which ignore the instruction.
 
-Specifies whether to use GDS or LDS memory (LDS is the default).
+    ===================== =================================================================
+    Syntax                Description
+    ===================== =================================================================
+    exec                  64-bit *execute mask* register.
+    [exec]                64-bit *execute mask* register (an alternative syntax).
+    [exec_lo,exec_hi]     64-bit *execute mask* register (an alternative syntax).
+    ===================== =================================================================
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    gds                                      Use GDS memory.
-    ======================================== ================================================
+High and low 32 bits of *execute mask* may be accessed as separate registers:
 
+    ===================== =================================================================
+    Syntax                Description
+    ===================== =================================================================
+    exec_lo               Low 32 bits of *execute mask* register.
+    exec_hi               High 32 bits of *execute mask* register.
+    [exec_lo]             Low 32 bits of *execute mask* register (an alternative syntax).
+    [exec_hi]             High 32 bits of *execute mask* register (an alternative syntax).
+    ===================== =================================================================
 
-EXP Modifiers
--------------
+.. _amdgpu_synid_vccz:
 
-.. _amdgpu_synid_done:
+vccz
+----
 
-done
-~~~~
+A single bit-flag indicating that the :ref:`vcc<amdgpu_synid_vcc>` is all zeros.
 
-Specifies if this is the last export from the shader to the target. By default, current
-instruction does not finish an export sequence.
+.. WARNING:: This operand is not currently supported by AMDGPU assembler.
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    done                                     Indicates the last export operation.
-    ======================================== ================================================
+.. _amdgpu_synid_execz:
 
-.. _amdgpu_synid_compr:
+execz
+-----
 
-compr
-~~~~~
+A single bit flag indicating that the :ref:`exec<amdgpu_synid_exec>` is all zeros.
 
-Indicates if the data are compressed (not compressed by default).
+.. WARNING:: This operand is not currently supported by AMDGPU assembler.
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    compr                                    Data are compressed.
-    ======================================== ================================================
+.. _amdgpu_synid_scc:
 
-.. _amdgpu_synid_vm:
+scc
+---
 
-vm
-~~
+A single bit flag indicating the result of a scalar compare operation.
 
-Specifies valid mask flag state (off by default).
+.. WARNING:: This operand is not currently supported by AMDGPU assembler.
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    vm                                       Set valid mask flag.
-    ======================================== ================================================
+lds_direct
+----------
 
-FLAT Modifiers
---------------
+A special operand which supplies a 32-bit value
+fetched from *LDS* memory using :ref:`m0<amdgpu_synid_m0>` as an address.
 
-.. _amdgpu_synid_flat_offset12:
+.. WARNING:: This operand is not currently supported by AMDGPU assembler.
 
-flat_offset12
-~~~~~~~~~~~~~
+.. _amdgpu_synid_constant:
 
-Specifies an immediate unsigned 12-bit offset, in bytes. The default value is 0.
+constant
+--------
 
-Cannot be used with *global/scratch* opcodes. GFX9 only.
+A set of integer and floating-point *inline constants*:
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    offset:{0..4095}                         Specifies a 12-bit unsigned offset.
-    ======================================== ================================================
+* :ref:`iconst<amdgpu_synid_iconst>`
+* :ref:`fconst<amdgpu_synid_fconst>`
 
-.. _amdgpu_synid_flat_offset13:
+These operands are encoded as a part of instruction.
 
-flat_offset13
-~~~~~~~~~~~~~
+If a number may be encoded as either
+a :ref:`literal<amdgpu_synid_literal>` or 
+an :ref:`inline constant<amdgpu_synid_constant>`,
+assembler selects the latter encoding as more efficient.
 
-Specifies an immediate signed 13-bit offset, in bytes. The default value is 0.
+.. _amdgpu_synid_iconst:
 
-Can be used with *global/scratch* opcodes only. GFX9 only.
+iconst
+------
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    offset:{-4096..+4095}                    Specifies a 13-bit signed offset.
-    ======================================== ================================================
+An :ref:`integer number<amdgpu_synid_integer_number>`
+encoded as an *inline constant*.
 
-glc
-~~~
+Only a small fraction of integer numbers may be encoded as *inline constants*.
+They are enumerated in the table below.
+Other integer numbers have to be encoded as :ref:`literals<amdgpu_synid_literal>`.
 
-See a description :ref:`here<amdgpu_synid_glc>`.
+Integer *inline constants* are converted to
+:ref:`expected operand type<amdgpu_syn_instruction_type>`
+as described :ref:`here<amdgpu_synid_int_const_conv>`.
 
-slc
-~~~
+    ================================== ====================================
+    Value                              Note
+    ================================== ====================================
+    {0..64}                            Positive integer inline constants.
+    {-16..-1}                          Negative integer inline constants.
+    ================================== ====================================
 
-See a description :ref:`here<amdgpu_synid_slc>`.
+.. WARNING:: GFX7 does not support inline constants for *f16* operands.
 
-tfe
-~~~
+There are also symbolic inline constants which provide read-only access to H/W registers.
 
-See a description :ref:`here<amdgpu_synid_tfe>`.
+.. WARNING:: These inline constants are not currently supported by AMDGPU assembler.
 
-nv
-~~
+\
 
-See a description :ref:`here<amdgpu_synid_nv>`.
+    ======================== ================================================ =============
+    Syntax                   Note                                             Availability
+    ======================== ================================================ =============
+    shared_base              Base address of shared memory region.            GFX9
+    shared_limit             Address of the end of shared memory region.      GFX9
+    private_base             Base address of private memory region.           GFX9
+    private_limit            Address of the end of private memory region.     GFX9
+    pops_exiting_wave_id     A dedicated counter for POPS.                    GFX9
+    ======================== ================================================ =============
 
-MIMG Modifiers
---------------
+.. _amdgpu_synid_fconst:
 
-.. _amdgpu_synid_dmask:
+fconst
+------
 
-dmask
-~~~~~
+A :ref:`floating-point number<amdgpu_synid_floating-point_number>`
+encoded as an *inline constant*.
 
-Specifies which channels (image components) are used by the operation. By default, no channels
-are used.
+Only a small fraction of floating-point numbers may be encoded as *inline constants*.
+They are enumerated in the table below.
+Other floating-point numbers have to be encoded as :ref:`literals<amdgpu_synid_literal>`.
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    dmask:{0..15}                            Each bit corresponds to one of 4 image
-                                             components (RGBA). If the specified bit value
-                                             is 0, the component is not used, value 1 means
-                                             that the component is used.
-    ======================================== ================================================
+Floating-point *inline constants* are converted to
+:ref:`expected operand type<amdgpu_syn_instruction_type>`
+as described :ref:`here<amdgpu_synid_fp_const_conv>`.
 
-This modifier has some limitations depending on instruction kind:
+    ================================== ===================================================== ==================
+    Value                              Note                                                  Availability
+    ================================== ===================================================== ==================
+    0.0                                The same as integer constant 0.                       All GPUs
+    0.5                                Floating-point constant 0.5                           All GPUs
+    1.0                                Floating-point constant 1.0                           All GPUs
+    2.0                                Floating-point constant 2.0                           All GPUs
+    4.0                                Floating-point constant 4.0                           All GPUs
+    -0.5                               Floating-point constant -0.5                          All GPUs
+    -1.0                               Floating-point constant -1.0                          All GPUs
+    -2.0                               Floating-point constant -2.0                          All GPUs
+    -4.0                               Floating-point constant -4.0                          All GPUs
+    0.1592                             1.0/(2.0*pi). Use only for 16-bit operands.           GFX8, GFX9
+    0.15915494                         1.0/(2.0*pi). Use only for 16- and 32-bit operands.   GFX8, GFX9
+    0.159154943091895317852646485335   1.0/(2.0*pi).                                         GFX8, GFX9
+    ================================== ===================================================== ==================
 
-    ======================================== ================================================
-    Instruction Kind                         Valid dmask Values
-    ======================================== ================================================
-    32-bit atomic cmpswap                    0x3
-    other 32-bit atomic instructions         0x1
-    64-bit atomic cmpswap                    0xF
-    other 64-bit atomic instructions         0x3
-    GATHER4                                  0x1, 0x2, 0x4, 0x8
-    Other instructions                       any value
-    ======================================== ================================================
+.. WARNING:: GFX7 does not support inline constants for *f16* operands.
 
-.. _amdgpu_synid_unorm:
+.. _amdgpu_synid_literal:
 
-unorm
-~~~~~
+literal
+-------
 
-Specifies whether address is normalized or not (normalized by default).
+A literal is a 64-bit value which is encoded as a separate 32-bit dword in the instruction stream.
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    unorm                                    Force address to be un-normalized.
-    ======================================== ================================================
+If a number may be encoded as either
+a :ref:`literal<amdgpu_synid_literal>` or 
+an :ref:`inline constant<amdgpu_synid_constant>`,
+assembler selects the latter encoding as more efficient.
 
-glc
-~~~
+Literals may be specified as :ref:`integer numbers<amdgpu_synid_integer_number>`,
+:ref:`floating-point numbers<amdgpu_synid_floating-point_number>` or
+:ref:`expressions<amdgpu_synid_expression>`
+(expressions are currently supported for 32-bit operands only).
 
-See a description :ref:`here<amdgpu_synid_glc>`.
+A 64-bit literal value is converted by assembler
+to an :ref:`expected operand type<amdgpu_syn_instruction_type>`
+as described :ref:`here<amdgpu_synid_lit_conv>`.
 
-slc
-~~~
+An instruction may use only one literal but several operands may refer the same literal.
 
-See a description :ref:`here<amdgpu_synid_slc>`.
+.. _amdgpu_synid_uimm8:
 
-.. _amdgpu_synid_r128:
+uimm8
+-----
 
-r128
-~~~~
+A 8-bit positive :ref:`integer number<amdgpu_synid_integer_number>`.
+The value is encoded as part of the opcode so it is free to use.
 
-Specifies texture resource size. The default size is 256 bits.
+.. _amdgpu_synid_uimm32:
 
-GFX7 and GFX8 only.
+uimm32
+------
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    r128                                     Specifies 128 bits texture resource size.
-    ======================================== ================================================
+A 32-bit positive :ref:`integer number<amdgpu_synid_integer_number>`.
+The value is stored as a separate 32-bit dword in the instruction stream.
 
-tfe
-~~~
+.. _amdgpu_synid_uimm20:
 
-See a description :ref:`here<amdgpu_synid_tfe>`.
+uimm20
+------
 
-.. _amdgpu_synid_lwe:
+A 20-bit positive :ref:`integer number<amdgpu_synid_integer_number>`.
 
-lwe
-~~~
+.. _amdgpu_synid_uimm21:
 
-Specifies LOD warning status (LOD warning is disabled by default).
+uimm21
+------
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    lwe                                      Enables LOD warning.
-    ======================================== ================================================
+A 21-bit positive :ref:`integer number<amdgpu_synid_integer_number>`.
 
-.. _amdgpu_synid_da:
+.. WARNING:: Assembler currently supports 20-bit offsets only. Use :ref:`uimm20<amdgpu_synid_uimm20>` as a replacement.
 
-da
-~~
+.. _amdgpu_synid_simm21:
 
-Specifies if an array index must be sent to TA. By default, array index is not sent.
+simm21
+------
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    da                                       Send an array-index to TA.
-    ======================================== ================================================
+A 21-bit :ref:`integer number<amdgpu_synid_integer_number>`.
 
-.. _amdgpu_synid_d16:
+.. WARNING:: Assembler currently supports 20-bit unsigned offsets only .Use :ref:`uimm20<amdgpu_synid_uimm20>` as a replacement.
 
-d16
-~~~
+.. _amdgpu_synid_off:
 
-Specifies data size: 16 or 32 bits (32 bits by default). Not supported by GFX7.
+off
+---
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    d16                                      Enables 16-bits data mode.
+A special entity which indicates that the value of this operand is not used.
 
-                                             On loads, convert data in memory to 16-bit
-                                             format before storing it in VGPRs.
+    ================================== ===================================================
+    Syntax                             Description
+    ================================== ===================================================
+    off                                Indicates an unused operand.
+    ================================== ===================================================
 
-                                             For stores, convert 16-bit data in VGPRs to
-                                             32 bits before going to memory.
 
-                                             Note that 16-bit data are stored in VGPRs
-                                             unpacked in GFX8.0. In GFX8.1 and GFX9 16-bit
-                                             data are packed.
-    ======================================== ================================================
+.. _amdgpu_synid_number:
 
-.. _amdgpu_synid_a16:
+Numbers
+=======
 
-a16
-~~~
+.. _amdgpu_synid_integer_number:
 
-Specifies size of image address components: 16 or 32 bits (32 bits by default). GFX9 only.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    a16                                      Enables 16-bits image address components.
-    ======================================== ================================================
-
-Miscellaneous Modifiers
------------------------
-
-.. _amdgpu_synid_glc:
-
-glc
-~~~
-
-This modifier has different meaning for loads, stores, and atomic operations.
-The default value is off (0).
-
-See AMD documentation for details.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    glc                                      Set glc bit to 1.
-    ======================================== ================================================
-
-.. _amdgpu_synid_slc:
-
-slc
-~~~
-
-Specifies cache policy. The default value is off (0).
-
-See AMD documentation for details.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    slc                                      Set slc bit to 1.
-    ======================================== ================================================
-
-.. _amdgpu_synid_tfe:
-
-tfe
-~~~
-
-Controls access to partially resident textures. The default value is off (0).
-
-See AMD documentation for details.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    tfe                                      Set tfe bit to 1.
-    ======================================== ================================================
-
-.. _amdgpu_synid_nv:
-
-nv
-~~
-
-Specifies if instruction is operating on non-volatile memory. By default, memory is volatile.
-
-GFX9 only.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    nv                                       Indicates that instruction operates on
-                                             non-volatile memory.
-    ======================================== ================================================
-
-MUBUF/MTBUF Modifiers
----------------------
-
-.. _amdgpu_synid_idxen:
-
-idxen
-~~~~~
-
-Specifies whether address components include an index. By default, no components are used.
-
-Can be used together with :ref:`offen<amdgpu_synid_offen>`.
-
-Cannot be used with :ref:`addr64<amdgpu_synid_addr64>`.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    idxen                                    Address components include an index.
-    ======================================== ================================================
-
-.. _amdgpu_synid_offen:
-
-offen
-~~~~~
-
-Specifies whether address components include an offset. By default, no components are used.
-
-Can be used together with :ref:`idxen<amdgpu_synid_idxen>`.
-
-Cannot be used with :ref:`addr64<amdgpu_synid_addr64>`.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    offen                                    Address components include an offset.
-    ======================================== ================================================
-
-.. _amdgpu_synid_addr64:
-
-addr64
-~~~~~~
-
-Specifies whether a 64-bit address is used. By default, no address is used.
-
-GFX7 only. Cannot be used with :ref:`offen<amdgpu_synid_offen>` and
-:ref:`idxen<amdgpu_synid_idxen>` modifiers.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    addr64                                   A 64-bit address is used.
-    ======================================== ================================================
-
-.. _amdgpu_synid_buf_offset12:
-
-buf_offset12
-~~~~~~~~~~~~
-
-Specifies an immediate unsigned 12-bit offset, in bytes. The default value is 0.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    offset:{0..0xFFF}                        Specifies a 12-bit unsigned offset.
-    ======================================== ================================================
-
-glc
-~~~
-
-See a description :ref:`here<amdgpu_synid_glc>`.
-
-slc
-~~~
-
-See a description :ref:`here<amdgpu_synid_slc>`.
-
-.. _amdgpu_synid_lds:
-
-lds
-~~~
-
-Specifies where to store the result: VGPRs or LDS (VGPRs by default).
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    lds                                      Store result in LDS.
-    ======================================== ================================================
-
-tfe
-~~~
-
-See a description :ref:`here<amdgpu_synid_tfe>`.
-
-.. _amdgpu_synid_dfmt:
-
-dfmt
-~~~~
-
-TBD
-
-.. _amdgpu_synid_nfmt:
-
-nfmt
-~~~~
-
-TBD
-
-SMRD/SMEM Modifiers
--------------------
-
-glc
-~~~
-
-See a description :ref:`here<amdgpu_synid_glc>`.
-
-nv
-~~
-
-See a description :ref:`here<amdgpu_synid_nv>`.
-
-VINTRP Modifiers
-----------------
-
-.. _amdgpu_synid_high:
-
-high
-~~~~
-
-Specifies which half of the LDS word to use. Low half of LDS word is used by default.
-GFX9 only.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    high                                     Use high half of LDS word.
-    ======================================== ================================================
-
-VOP1/VOP2 DPP Modifiers
------------------------
-
-GFX8 and GFX9 only.
-
-.. _amdgpu_synid_dpp_ctrl:
-
-dpp_ctrl
-~~~~~~~~
-
-Specifies how data are shared between threads. This is a mandatory modifier.
-There is no default value.
-
-Note. The lanes of a wavefront are organized in four banks and four rows.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    quad_perm:[{0..3},{0..3},{0..3},{0..3}]  Full permute of 4 threads.
-    row_mirror                               Mirror threads within row.
-    row_half_mirror                          Mirror threads within 1/2 row (8 threads).
-    row_bcast:15                             Broadcast 15th thread of each row to next row.
-    row_bcast:31                             Broadcast thread 31 to rows 2 and 3.
-    wave_shl:1                               Wavefront left shift by 1 thread.
-    wave_rol:1                               Wavefront left rotate by 1 thread.
-    wave_shr:1                               Wavefront right shift by 1 thread.
-    wave_ror:1                               Wavefront right rotate by 1 thread.
-    row_shl:{1..15}                          Row shift left by 1-15 threads.
-    row_shr:{1..15}                          Row shift right by 1-15 threads.
-    row_ror:{1..15}                          Row rotate right by 1-15 threads.
-    ======================================== ================================================
-
-.. _amdgpu_synid_row_mask:
-
-row_mask
-~~~~~~~~
-
-Controls which rows are enabled for data sharing. By default, all rows are enabled.
-
-Note. The lanes of a wavefront are organized in four banks and four rows.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    row_mask:{0..15}                         Each of 4 bits in the mask controls one
-                                             row (0 - disabled, 1 - enabled).
-    ======================================== ================================================
-
-.. _amdgpu_synid_bank_mask:
-
-bank_mask
-~~~~~~~~~
-
-Controls which banks are enabled for data sharing. By default, all banks are enabled.
-
-Note. The lanes of a wavefront are organized in four banks and four rows.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    bank_mask:{0..15}                        Each of 4 bits in the mask controls one
-                                             bank (0 - disabled, 1 - enabled).
-    ======================================== ================================================
-
-.. _amdgpu_synid_bound_ctrl:
-
-bound_ctrl
-~~~~~~~~~~
-
-Controls data sharing when accessing an invalid lane. By default, data sharing with
-invalid lanes is disabled.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    bound_ctrl:0                             Enables data sharing with invalid lanes.
-                                             Accessing data from an invalid lane will
-                                             return zero.
-    ======================================== ================================================
-
-VOP1/VOP2/VOPC SDWA Modifiers
------------------------------
-
-GFX8 and GFX9 only.
-
-clamp
-~~~~~
-
-See a description :ref:`here<amdgpu_synid_clamp>`.
-
-omod
-~~~~
-
-See a description :ref:`here<amdgpu_synid_omod>`.
-
-GFX9 only.
-
-.. _amdgpu_synid_dst_sel:
-
-dst_sel
-~~~~~~~
-
-Selects which bits in the destination are affected. By default, all bits are affected.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    dst_sel:DWORD                            Use bits 31:0.
-    dst_sel:BYTE_0                           Use bits 7:0.
-    dst_sel:BYTE_1                           Use bits 15:8.
-    dst_sel:BYTE_2                           Use bits 23:16.
-    dst_sel:BYTE_3                           Use bits 31:24.
-    dst_sel:WORD_0                           Use bits 15:0.
-    dst_sel:WORD_1                           Use bits 31:16.
-    ======================================== ================================================
-
-
-.. _amdgpu_synid_dst_unused:
-
-dst_unused
-~~~~~~~~~~
-
-Controls what to do with the bits in the destination which are not selected
-by :ref:`dst_sel<amdgpu_synid_dst_sel>`.
-By default, unused bits are preserved.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    dst_unused:UNUSED_PAD                    Pad with zeros.
-    dst_unused:UNUSED_SEXT                   Sign-extend upper bits, zero lower bits.
-    dst_unused:UNUSED_PRESERVE               Preserve bits.
-    ======================================== ================================================
-
-.. _amdgpu_synid_src0_sel:
-
-src0_sel
-~~~~~~~~
-
-Controls which bits in the src0 are used. By default, all bits are used.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    src0_sel:DWORD                           Use bits 31:0.
-    src0_sel:BYTE_0                          Use bits 7:0.
-    src0_sel:BYTE_1                          Use bits 15:8.
-    src0_sel:BYTE_2                          Use bits 23:16.
-    src0_sel:BYTE_3                          Use bits 31:24.
-    src0_sel:WORD_0                          Use bits 15:0.
-    src0_sel:WORD_1                          Use bits 31:16.
-    ======================================== ================================================
-
-.. _amdgpu_synid_src1_sel:
-
-src1_sel
-~~~~~~~~
-
-Controls which bits in the src1 are used. By default, all bits are used.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    src1_sel:DWORD                           Use bits 31:0.
-    src1_sel:BYTE_0                          Use bits 7:0.
-    src1_sel:BYTE_1                          Use bits 15:8.
-    src1_sel:BYTE_2                          Use bits 23:16.
-    src1_sel:BYTE_3                          Use bits 31:24.
-    src1_sel:WORD_0                          Use bits 15:0.
-    src1_sel:WORD_1                          Use bits 31:16.
-    ======================================== ================================================
-
-VOP1/VOP2/VOPC SDWA Operand Modifiers
--------------------------------------
-
-Operand modifiers are not used separately. They are applied to source operands.
-
-GFX8 and GFX9 only.
-
-abs
-~~~
-
-See a description :ref:`here<amdgpu_synid_abs>`.
-
-neg
-~~~
-
-See a description :ref:`here<amdgpu_synid_neg>`.
-
-.. _amdgpu_synid_sext:
-
-sext
-~~~~
-
-Sign-extends value of a (sub-dword) operand to fill all 32 bits.
-Has no effect for 32-bit operands.
-
-Valid for integer operands only.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    sext(<operand>)                          Sign-extend operand value.
-    ======================================== ================================================
-
-VOP3 Modifiers
---------------
-
-.. _amdgpu_synid_vop3_op_sel:
-
-vop3_op_sel
-~~~~~~~~~~~
-
-Selects the low [15:0] or high [31:16] operand bits for source and destination operands.
-By default, low bits are used for all operands.
-
-The number of values specified with the op_sel modifier must match the number of instruction
-operands (both source and destination). First value controls src0, second value controls src1
-and so on, except that the last value controls destination.
-The value 0 selects the low bits, while 1 selects the high bits.
-
-Note. op_sel modifier affects 16-bit operands only. For 32-bit operands the value specified
-by op_sel must be 0.
-
-GFX9 only.
-
-    ======================================== ============================================================
-    Syntax                                   Description
-    ======================================== ============================================================
-    op_sel:[{0..1},{0..1}]                   Select operand bits for instructions with 1 source operand.
-    op_sel:[{0..1},{0..1},{0..1}]            Select operand bits for instructions with 2 source operands.
-    op_sel:[{0..1},{0..1},{0..1},{0..1}]     Select operand bits for instructions with 3 source operands.
-    ======================================== ============================================================
-
-.. _amdgpu_synid_clamp:
-
-clamp
-~~~~~
-
-Clamp meaning depends on instruction.
-
-For *v_cmp* instructions, clamp modifier indicates that the compare signals
-if a floating point exception occurs. By default, signaling is disabled.
-Not supported by GFX7.
-
-For integer operations, clamp modifier indicates that the result must be clamped
-to the largest and smallest representable value. By default, there is no clamping.
-Integer clamping is not supported by GFX7.
-
-For floating point operations, clamp modifier indicates that the result must be clamped
-to the range [0.0, 1.0]. By default, there is no clamping.
-
-Note. Clamp modifier is applied after :ref:`output modifiers<amdgpu_synid_omod>` (if any).
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    clamp                                    Enables clamping (or signaling).
-    ======================================== ================================================
-
-.. _amdgpu_synid_omod:
-
-omod
-~~~~
-
-Specifies if an output modifier must be applied to the result.
-By default, no output modifiers are applied.
-
-Note. Output modifiers are applied before :ref:`clamping<amdgpu_synid_clamp>` (if any).
-
-Output modifiers are valid for f32 and f64 floating point results only.
-They must not be used with f16.
-
-Note. *v_cvt_f16_f32* is an exception. This instruction produces f16 result
-but accepts output modifiers.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    mul:2                                    Multiply the result by 2.
-    mul:4                                    Multiply the result by 4.
-    div:2                                    Multiply the result by 0.5.
-    ======================================== ================================================
-
-VOP3 Operand Modifiers
-----------------------
-
-Operand modifiers are not used separately. They are applied to source operands.
-
-.. _amdgpu_synid_abs:
-
-abs
-~~~
-
-Computes absolute value of its operand. Applied before :ref:`neg<amdgpu_synid_neg>` (if any).
-Valid for floating point operands only.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    abs(<operand>)                           Get absolute value of operand.
-    \|<operand>|                             The same as above.
-    ======================================== ================================================
-
-.. _amdgpu_synid_neg:
-
-neg
-~~~
-
-Computes negative value of its operand. Applied after :ref:`abs<amdgpu_synid_abs>` (if any).
-Valid for floating point operands only.
-
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    neg(<operand>)                           Get negative value of operand.
-    -<operand>                               The same as above.
-    ======================================== ================================================
-
-VOP3P Modifiers
+Integer Numbers
 ---------------
 
-This section describes modifiers of regular VOP3P instructions.
-*v_mad_mix* modifiers are described :ref:`in a separate section<amdgpu_synid_mad_mix>`.
+Integer numbers are 64 bits wide.
+They may be specified in binary, octal, hexadecimal and decimal formats:
 
-GFX9 only.
+    ============== ====================================
+    Format         Syntax
+    ============== ====================================
+    Decimal        [-]?[1-9][0-9]*
+    Binary         [-]?0b[01]+
+    Octal          [-]?0[0-7]+
+    Hexadecimal    [-]?0x[0-9a-fA-F]+
+    \              [-]?[0x]?[0-9][0-9a-fA-F]*[hH]
+    ============== ====================================
 
-.. _amdgpu_synid_op_sel:
+Examples:
 
-op_sel
-~~~~~~
+.. parsed-literal::
 
-Selects the low [15:0] or high [31:16] operand bits as input to the operation
-which results in the lower-half of the destination.
-By default, low bits are used for all operands.
+  -1234
+  0b1010
+  010
+  0xff
+  0ffh
 
-The number of values specified with the op_sel modifier must match the number of source
-operands. First value controls src0, second value controls src1 and so on.
-The value 0 selects the low bits, while 1 selects the high bits.
+.. _amdgpu_synid_floating-point_number:
 
-    ======================================== =============================================================
-    Syntax                                   Description
-    ======================================== =============================================================
-    op_sel:[{0..1}]                          Select operand bits for instructions with 1 source operand.
-    op_sel:[{0..1},{0..1}]                   Select operand bits for instructions with 2 source operands.
-    op_sel:[{0..1},{0..1},{0..1}]            Select operand bits for instructions with 3 source operands.
-    ======================================== =============================================================
+Floating-Point Numbers
+----------------------
 
-.. _amdgpu_synid_op_sel_hi:
+All floating-point numbers are handled as double (64 bits wide).
 
-op_sel_hi
-~~~~~~~~~
+Floating-point numbers may be specified in hexadecimal and decimal formats:
 
-Selects the low [15:0] or high [31:16] operand bits as input to the operation
-which results in the upper-half of the destination.
-By default, high bits are used for all operands.
+    ============== ======================================================== ========================================================
+    Format         Syntax                                                   Note
+    ============== ======================================================== ========================================================
+    Decimal        [-]?[0-9]*[.][0-9]*([eE][+-]?[0-9]*)?                    Must include either a decimal separator or an exponent.
+    Hexadecimal    [-]0x[0-9a-fA-F]*(.[0-9a-fA-F]*)?[pP][+-]?[0-9a-fA-F]+
+    ============== ======================================================== ========================================================
 
-The number of values specified with the op_sel_hi modifier must match the number of source
-operands. First value controls src0, second value controls src1 and so on.
-The value 0 selects the low bits, while 1 selects the high bits.
+Examples:
 
-    ======================================== =============================================================
-    Syntax                                   Description
-    ======================================== =============================================================
-    op_sel_hi:[{0..1}]                       Select operand bits for instructions with 1 source operand.
-    op_sel_hi:[{0..1},{0..1}]                Select operand bits for instructions with 2 source operands.
-    op_sel_hi:[{0..1},{0..1},{0..1}]         Select operand bits for instructions with 3 source operands.
-    ======================================== =============================================================
+.. parsed-literal::
 
-.. _amdgpu_synid_neg_lo:
+ -1.234
+ 234e2
+ -0x1afp-10
+ 0x.1afp10
 
-neg_lo
-~~~~~~
+.. _amdgpu_synid_expression:
 
-Specifies whether to change sign of operand values selected by
-:ref:`op_sel<amdgpu_synid_op_sel>`. These values are then used
-as input to the operation which results in the upper-half of the destination.
+Expressions
+===========
 
-The number of values specified with this modifier must match the number of source
-operands. First value controls src0, second value controls src1 and so on.
+An expression specifies an address or a numeric value.
+There are two kinds of expressions:
 
-The value 0 indicates that the corresponding operand value is used unmodified,
-the value 1 indicates that negative value of the operand must be used.
+* :ref:`Absolute<amdgpu_synid_absolute_expression>`.
+* :ref:`Relocatable<amdgpu_synid_relocatable_expression>`.
 
-By default, operand values are used unmodified.
+.. _amdgpu_synid_absolute_expression:
 
-This modifier is valid for floating point operands only.
+Absolute Expressions
+--------------------
 
-    ======================================== ==================================================================
-    Syntax                                   Description
-    ======================================== ==================================================================
-    neg_lo:[{0..1}]                          Select affected operands for instructions with 1 source operand.
-    neg_lo:[{0..1},{0..1}]                   Select affected operands for instructions with 2 source operands.
-    neg_lo:[{0..1},{0..1},{0..1}]            Select affected operands for instructions with 3 source operands.
-    ======================================== ==================================================================
+The value of an absolute expression remains the same after program relocation.
+Absolute expressions must not include unassigned and relocatable values
+such as labels.
 
-.. _amdgpu_synid_neg_hi:
+Examples:
 
-neg_hi
-~~~~~~
+.. parsed-literal::
 
-Specifies whether to change sign of operand values selected by
-:ref:`op_sel_hi<amdgpu_synid_op_sel_hi>`. These values are then used
-as input to the operation which results in the upper-half of the destination.
+    x = -1
+    y = x + 10
 
-The number of values specified with this modifier must match the number of source
-operands. First value controls src0, second value controls src1 and so on.
+.. _amdgpu_synid_relocatable_expression:
 
-The value 0 indicates that the corresponding operand value is used unmodified,
-the value 1 indicates that negative value of the operand must be used.
+Relocatable Expressions
+-----------------------
 
-By default, operand values are used unmodified.
+The value of a relocatable expression depends on program relocation.
 
-This modifier is valid for floating point operands only.
+Note that use of relocatable expressions is limited with branch targets
+and 32-bit :ref:`literals<amdgpu_synid_literal>`.
 
-    ======================================== ==================================================================
-    Syntax                                   Description
-    ======================================== ==================================================================
-    neg_hi:[{0..1}]                          Select affected operands for instructions with 1 source operand.
-    neg_hi:[{0..1},{0..1}]                   Select affected operands for instructions with 2 source operands.
-    neg_hi:[{0..1},{0..1},{0..1}]            Select affected operands for instructions with 3 source operands.
-    ======================================== ==================================================================
+Addition information about relocation may be found :ref:`here<amdgpu-relocation-records>`.
 
-clamp
-~~~~~
+Examples:
 
-See a description :ref:`here<amdgpu_synid_clamp>`.
+.. parsed-literal::
 
-.. _amdgpu_synid_mad_mix:
+    y = x + 10 // x is not yet defined. Undefined symbols are assumed to be PC-relative.
+    z = .
 
-VOP3P V_MAD_MIX Modifiers
--------------------------
+Expression Data Type
+--------------------
 
-These instructions use VOP3P format but have different modifiers.
+Expressions and operands of expressions are interpreted as 64-bit integers.
 
-GFX9 only.
+Expressions may include 64-bit :ref:`floating-point numbers<amdgpu_synid_floating-point_number>` (double).
+However these operands are also handled as 64-bit integers
+using binary representation of specified floating-point numbers.
+No conversion from floating-point to integer is performed.
 
-.. _amdgpu_synid_mad_mix_op_sel:
+Examples:
 
-mad_mix_op_sel
-~~~~~~~~~~~~~~
+.. parsed-literal::
 
-This operand has meaning only for 16-bit source operands as indicated by
-:ref:`mad_mix_op_sel_hi<amdgpu_synid_mad_mix_op_sel_hi>`.
-It specifies to select either the low [15:0] or high [31:16] operand bits
-as input to the operation.
+    x = 0.1    // x is assigned an integer 4591870180066957722 which is a binary representation of 0.1.
+    y = x + x  // y is a sum of two integer values; it is not equal to 0.2!
 
-The value 0 indicates the low bits, the value 1 indicates the high 16 bits.
-By default, low bits are used for all operands.
+Syntax
+------
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    op_sel:[{0..1},{0..1},{0..1}]            Select location of each 16-bit source operand.
-    ======================================== ================================================
+Expressions are composed of
+:ref:`symbols<amdgpu_synid_symbol>`,
+:ref:`integer numbers<amdgpu_synid_integer_number>`,
+:ref:`floating-point numbers<amdgpu_synid_floating-point_number>`,
+:ref:`binary operators<amdgpu_synid_expression_bin_op>`,
+:ref:`unary operators<amdgpu_synid_expression_un_op>` and subexpressions.
 
-.. _amdgpu_synid_mad_mix_op_sel_hi:
+Expressions may also use "." which is a reference to the current PC (program counter).
 
-mad_mix_op_sel_hi
-~~~~~~~~~~~~~~~~~
+The syntax of expressions is shown below::
 
-Selects the size of source operands: either 32 bits or 16 bits.
-By default, 32 bits are used for all source operands.
+    expr ::= expr binop expr | primaryexpr ;
 
-The value 0 indicates 32 bits, the value 1 indicates 16 bits.
-The location of 16 bits in the operand may be specified by
-:ref:`mad_mix_op_sel<amdgpu_synid_mad_mix_op_sel>`.
+    primaryexpr ::= '(' expr ')' | symbol | number | '.' | unop primaryexpr ;
 
-    ======================================== ================================================
-    Syntax                                   Description
-    ======================================== ================================================
-    op_sel_hi:[{0..1},{0..1},{0..1}]         Select size of each source operand.
-    ======================================== ================================================
+    binop ::= '&&'
+            | '||'
+            | '|'
+            | '^'
+            | '&'
+            | '!'
+            | '=='
+            | '!='
+            | '<>'
+            | '<'
+            | '<='
+            | '>'
+            | '>='
+            | '<<'
+            | '>>'
+            | '+'
+            | '-'
+            | '*'
+            | '/'
+            | '%' ;
 
-abs
-~~~
+    unop ::= '~'
+           | '+'
+           | '-'
+           | '!' ;
 
-See a description :ref:`here<amdgpu_synid_abs>`.
+.. _amdgpu_synid_expression_bin_op:
 
-neg
-~~~
+Binary Operators
+----------------
 
-See a description :ref:`here<amdgpu_synid_neg>`.
+Binary operators are described in the following table.
+They operate on and produce 64-bit integers.
+Operators with higher priority are performed first.
 
-clamp
-~~~~~
+    ========== ========= ===============================================
+    Operator   Priority  Meaning
+    ========== ========= ===============================================
+       \*         5      Integer multiplication.
+       /          5      Integer division.
+       %          5      Integer signed remainder.
+       \+         4      Integer addition.
+       \-         4      Integer subtraction.
+       <<         3      Integer shift left.
+       >>         3      Logical shift right.
+       ==         2      Equality comparison.
+       !=         2      Inequality comparison.
+       <>         2      Inequality comparison.
+       <          2      Signed less than comparison.
+       <=         2      Signed less than or equal comparison.
+       >          2      Signed greater than comparison.
+       >=         2      Signed greater than or equal comparison.
+      \|          1      Bitwise or.
+       ^          1      Bitwise xor.
+       &          1      Bitwise and.
+       &&         0      Logical and.
+       ||         0      Logical or.
+    ========== ========= ===============================================
 
-See a description :ref:`here<amdgpu_synid_clamp>`.
+.. _amdgpu_synid_expression_un_op:
+
+Unary Operators
+---------------
+
+Unary operators are described in the following table.
+They operate on and produce 64-bit integers.
+
+    ========== ===============================================
+    Operator   Meaning
+    ========== ===============================================
+       !       Logical negation.
+       ~       Bitwise negation.
+       \+      Integer unary plus.
+       \-      Integer unary minus.
+    ========== ===============================================
+
+.. _amdgpu_synid_symbol:
+
+Symbols
+-------
+
+A symbol is a named 64-bit value, representing a relocatable
+address or an absolute (non-relocatable) number.
+
+Symbol names have the following syntax:
+    ``[a-zA-Z_.][a-zA-Z0-9_$.@]*``
+
+The table below provides several examples of syntax used for symbol definition.
+
+    ================ ==========================================================
+    Syntax           Meaning
+    ================ ==========================================================
+    .globl <S>       Declares a global symbol S without assigning it a value.
+    .set <S>, <E>    Assigns the value of an expression E to a symbol S.
+    <S> = <E>        Assigns the value of an expression E to a symbol S.
+    <S>:             Declares a label S and assigns it the current PC value.
+    ================ ==========================================================
+
+A symbol may be used before it is declared or assigned;
+unassigned symbols are assumed to be PC-relative.
+
+Addition information about symbols may be found :ref:`here<amdgpu-symbols>`.
+
+.. _amdgpu_synid_conv:
+
+Conversions
+===========
+
+This section describes what happens when a 64-bit
+:ref:`integer number<amdgpu_synid_integer_number>`, a
+:ref:`floating-point numbers<amdgpu_synid_floating-point_number>` or a
+:ref:`symbol<amdgpu_synid_symbol>`
+is used for an operand which has a different type or size.
+
+Depending on operand kind, this conversion is performed by either assembler or AMDGPU H/W:
+
+* Values encoded as :ref:`inline constants<amdgpu_synid_constant>` are handled by H/W.
+* Values encoded as :ref:`literals<amdgpu_synid_literal>` are converted by assembler.
+
+.. _amdgpu_synid_const_conv:
+
+Inline Constants
+----------------
+
+.. _amdgpu_synid_int_const_conv:
+
+Integer Inline Constants
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Integer :ref:`inline constants<amdgpu_synid_constant>`
+may be thought of as 64-bit
+:ref:`integer numbers<amdgpu_synid_integer_number>`;
+when used as operands they are truncated to the size of
+:ref:`expected operand type<amdgpu_syn_instruction_type>`.
+No data type conversions are performed.
+
+Examples:
+
+.. parsed-literal::
+
+    // GFX9
+
+    v_add_u16 v0, -1, 0    // v0 = 0xFFFF
+    v_add_f16 v0, -1, 0    // v0 = 0xFFFF (NaN)
+
+    v_add_u32 v0, -1, 0    // v0 = 0xFFFFFFFF
+    v_add_f32 v0, -1, 0    // v0 = 0xFFFFFFFF (NaN)
+
+.. _amdgpu_synid_fp_const_conv:
+
+Floating-Point Inline Constants
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Floating-point :ref:`inline constants<amdgpu_synid_constant>`
+may be thought of as 64-bit
+:ref:`floating-point numbers<amdgpu_synid_floating-point_number>`;
+when used as operands they are converted to a floating-point number of
+:ref:`expected operand size<amdgpu_syn_instruction_type>`.
+
+Examples:
+
+.. parsed-literal::
+
+    // GFX9
+
+    v_add_f16 v0, 1.0, 0    // v0 = 0x3C00 (1.0)
+    v_add_u16 v0, 1.0, 0    // v0 = 0x3C00
+
+    v_add_f32 v0, 1.0, 0    // v0 = 0x3F800000 (1.0)
+    v_add_u32 v0, 1.0, 0    // v0 = 0x3F800000
+
+
+.. _amdgpu_synid_lit_conv:
+
+Literals
+--------
+
+.. _amdgpu_synid_int_lit_conv:
+
+Integer Literals
+~~~~~~~~~~~~~~~~
+
+Integer :ref:`literals<amdgpu_synid_literal>`
+are specified as 64-bit :ref:`integer numbers<amdgpu_synid_integer_number>`.
+
+When used as operands they are converted to
+:ref:`expected operand type<amdgpu_syn_instruction_type>` as described below.
+
+    ============== ============== =============== ====================================================================
+    Expected type  Condition      Result          Note
+    ============== ============== =============== ====================================================================
+    i16, u16, b16  cond(num,16)   num.u16         Truncate to 16 bits.
+    i32, u32, b32  cond(num,32)   num.u32         Truncate to 32 bits.
+    i64            cond(num,32)   {-1,num.i32}    Truncate to 32 bits and then sign-extend the result to 64 bits.
+    u64, b64       cond(num,32)   { 0,num.u32}    Truncate to 32 bits and then zero-extend the result to 64 bits.
+    f16            cond(num,16)   num.u16         Use low 16 bits as an f16 value.
+    f32            cond(num,32)   num.u32         Use low 32 bits as an f32 value.
+    f64            cond(num,32)   {num.u32,0}     Use low 32 bits of the number as high 32 bits
+                                                  of the result; low 32 bits of the result are zeroed.
+    ============== ============== =============== ====================================================================
+
+The condition *cond(X,S)* indicates if a 64-bit number *X*
+can be converted to a smaller size *S* by truncation of upper bits.
+There are two cases when the conversion is possible:
+
+* The truncated bits are all 0.
+* The truncated bits are all 1 and the value after truncation has its MSB bit set.
+
+Examples of valid literals:
+
+.. parsed-literal::
+
+    // GFX9
+                                             // Literal value after conversion:
+    v_add_u16 v0, 0xff00, v0                 //   0xff00
+    v_add_u16 v0, 0xffffffffffffff00, v0     //   0xff00
+    v_add_u16 v0, -256, v0                   //   0xff00
+                                             // Literal value after conversion:
+    s_bfe_i64 s[0:1], 0xffefffff, s3         //   0xffffffffffefffff
+    s_bfe_u64 s[0:1], 0xffefffff, s3         //   0x00000000ffefffff
+    v_ceil_f64_e32 v[0:1], 0xffefffff        //   0xffefffff00000000 (-1.7976922776554302e308)
+
+Examples of invalid literals:
+
+.. parsed-literal::
+
+    // GFX9
+
+    v_add_u16 v0, 0x1ff00, v0               // truncated bits are not all 0 or 1
+    v_add_u16 v0, 0xffffffffffff00ff, v0    // truncated bits do not match MSB of the result
+
+.. _amdgpu_synid_fp_lit_conv:
+
+Floating-Point Literals
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Floating-point :ref:`literals<amdgpu_synid_literal>` are specified as 64-bit
+:ref:`floating-point numbers<amdgpu_synid_floating-point_number>`.
+
+When used as operands they are converted to
+:ref:`expected operand type<amdgpu_syn_instruction_type>` as described below.
+
+    ============== ============== ================= =================================================================
+    Expected type  Condition      Result            Note
+    ============== ============== ================= =================================================================
+    i16, u16, b16  cond(num,16)   f16(num)          Convert to f16 and use bits of the result as an integer value.
+    i32, u32, b32  cond(num,32)   f32(num)          Convert to f32 and use bits of the result as an integer value.
+    i64, u64, b64  false          \-                Conversion disabled because of an unclear semantics.
+    f16            cond(num,16)   f16(num)          Convert to f16.
+    f32            cond(num,32)   f32(num)          Convert to f32.
+    f64            true           {num.u32.hi,0}    Use high 32 bits of the number as high 32 bits of the result;
+                                                    zero-fill low 32 bits of the result.
+
+                                                    Note that the result may differ from the original number.
+    ============== ============== ================= =================================================================
+
+The condition *cond(X,S)* indicates if an f64 number *X* can be converted
+to a smaller *S*-bit floating-point type without overflow or underflow.
+Precision lost is allowed.
+
+Examples of valid literals:
+
+.. parsed-literal::
+
+    // GFX9
+
+    v_add_f16 v1, 65500.0, v2
+    v_add_f32 v1, 65600.0, v2
+
+    // Literal value before conversion: 1.7976931348623157e308 (0x7fefffffffffffff)
+    // Literal value after conversion:  1.7976922776554302e308 (0x7fefffff00000000)
+    v_ceil_f64 v[0:1], 1.7976931348623157e308
+
+Examples of invalid literals:
+
+.. parsed-literal::
+
+    // GFX9
+
+    v_add_f16 v1, 65600.0, v2    // overflow
+
+.. _amdgpu_synid_exp_conv:
+
+Expressions
+~~~~~~~~~~~
+
+Expressions operate with and result in 64-bit integers.
+
+When used as operands they are truncated to
+:ref:`expected operand size<amdgpu_syn_instruction_type>`.
+No data type conversions are performed.
+
+Examples:
+
+.. parsed-literal::
+
+    // GFX9
+
+    x = 0.1
+    v_sqrt_f32 v0, x           // v0 = [low 32 bits of 0.1 (double)]
+    v_sqrt_f32 v0, (0.1 + 0)   // the same as above
+    v_sqrt_f32 v0, 0.1         // v0 = [0.1 (double) converted to float]
+
diff --git a/docs/AMDGPUUsage.rst b/docs/AMDGPUUsage.rst
index 5ca4975..c60d60a 100644
--- a/docs/AMDGPUUsage.rst
+++ b/docs/AMDGPUUsage.rst
@@ -4558,21 +4558,26 @@
 .. toctree::
    :hidden:
 
-   AMDGPUAsmGFX7
-   AMDGPUAsmGFX8
-   AMDGPUAsmGFX9
+   AMDGPU/AMDGPUAsmGFX7
+   AMDGPU/AMDGPUAsmGFX8
+   AMDGPU/AMDGPUAsmGFX9
+   AMDGPUModifierSyntax
    AMDGPUOperandSyntax
+   AMDGPUInstructionSyntax
+   AMDGPUInstructionNotation
 
-An instruction has the following syntax:
+An instruction has the following :doc:`syntax<AMDGPUInstructionSyntax>`:
 
-    *<opcode> <operand0>, <operand1>,... <modifier0> <modifier1>...*
+    ``<``\ *opcode*\ ``>    <``\ *operand0*\ ``>, <``\ *operand1*\ ``>,...    <``\ *modifier0*\ ``> <``\ *modifier1*\ ``>...``
 
-Note that operands are normally comma-separated while modifiers are space-separated.
+:doc:`Operands<AMDGPUOperandSyntax>` are normally comma-separated while
+:doc:`modifiers<AMDGPUModifierSyntax>` are space-separated.
 
-The order of operands and modifiers is fixed. Most modifiers are optional and may be omitted.
+The order of *operands* and *modifiers* is fixed.
+Most *modifiers* are optional and may be omitted.
 
-See detailed instruction syntax description for :doc:`GFX7<AMDGPUAsmGFX7>`,
-:doc:`GFX8<AMDGPUAsmGFX8>` and :doc:`GFX9<AMDGPUAsmGFX9>`.
+See detailed instruction syntax description for :doc:`GFX7<AMDGPU/AMDGPUAsmGFX7>`,
+:doc:`GFX8<AMDGPU/AMDGPUAsmGFX8>` and :doc:`GFX9<AMDGPU/AMDGPUAsmGFX9>`.
 
 Note that features under development are not included in this description.
 
@@ -4583,22 +4588,12 @@
 Operands
 ~~~~~~~~
 
-The following syntax for register operands is supported:
-
-* SGPR registers: s0, ... or s[0], ...
-* VGPR registers: v0, ... or v[0], ...
-* TTMP registers: ttmp0, ... or ttmp[0], ...
-* Special registers: exec (exec_lo, exec_hi), vcc (vcc_lo, vcc_hi), flat_scratch (flat_scratch_lo, flat_scratch_hi)
-* Special trap registers: tba (tba_lo, tba_hi), tma (tma_lo, tma_hi)
-* Register pairs, quads, etc: s[2:3], v[10:11], ttmp[5:6], s[4:7], v[12:15], ttmp[4:7], s[8:15], ...
-* Register lists: [s0, s1], [ttmp0, ttmp1, ttmp2, ttmp3]
-* Register index expressions: v[2*2], s[1-1:2-1]
-* 'off' indicates that an operand is not enabled
+Detailed description of operands may be found :doc:`here<AMDGPUOperandSyntax>`.
 
 Modifiers
 ~~~~~~~~~
 
-Detailed description of modifiers may be found :doc:`here<AMDGPUOperandSyntax>`.
+Detailed description of modifiers may be found :doc:`here<AMDGPUModifierSyntax>`.
 
 Instruction Examples
 ~~~~~~~~~~~~~~~~~~~~
diff --git a/docs/AdvancedBuilds.rst b/docs/AdvancedBuilds.rst
index d2a2ef5..695dcfb 100644
--- a/docs/AdvancedBuilds.rst
+++ b/docs/AdvancedBuilds.rst
@@ -51,6 +51,15 @@
   $ cmake -G Ninja -DCLANG_ENABLE_BOOTSTRAP=On -DCLANG_BOOTSTRAP_PASSTHROUGH="CMAKE_INSTALL_PREFIX;CMAKE_VERBOSE_MAKEFILE" <path to source>
   $ ninja stage2
 
+CMake options starting by ``BOOTSTRAP_`` will be passed only to the stage2 build.
+This gives the opportunity to use Clang specific build flags.
+For example, the following CMake call will enabled '-fno-addrsig' only during
+the stage2 build for C and C++.
+
+.. code-block:: console
+
+  $ cmake [..]  -DBOOTSTRAP_CMAKE_CXX_FLAGS='-fno-addrsig' -DBOOTSTRAP_CMAKE_C_FLAGS='-fno-addrsig' [..]
+
 The clang build system refers to builds as stages. A stage1 build is a standard
 build using the compiler installed on the host, and a stage2 build is built
 using the stage1 compiler. This nomenclature holds up to more stages too. In
diff --git a/docs/CMake.rst b/docs/CMake.rst
index f4f67db..a5a574e 100644
--- a/docs/CMake.rst
+++ b/docs/CMake.rst
@@ -774,7 +774,7 @@
 
 Note if you intend for this pass to be merged into the LLVM source tree at some
 point in the future it might make more sense to use LLVM's internal
-``add_llvm_loadable_module`` function instead by...
+``add_llvm_library`` function with the MODULE argument instead by...
 
 
 Adding the following to ``<project dir>/CMakeLists.txt`` (after
@@ -789,7 +789,7 @@
 
 .. code-block:: cmake
 
-  add_llvm_loadable_module(LLVMPassname
+  add_llvm_library(LLVMPassname MODULE
     Pass.cpp
     )
 
diff --git a/docs/CommandGuide/FileCheck.rst b/docs/CommandGuide/FileCheck.rst
index 8f71095..721d2c2 100644
--- a/docs/CommandGuide/FileCheck.rst
+++ b/docs/CommandGuide/FileCheck.rst
@@ -80,9 +80,16 @@
   -verify``. With this option FileCheck will verify that input does not contain
   warnings not covered by any ``CHECK:`` patterns.
 
+.. option:: --dump-input <mode>
+
+  Dump input to stderr, adding annotations representing currently enabled
+  diagnostics.  Do this either 'always', on 'fail', or 'never'.  Specify 'help'
+  to explain the dump format and quit.
+
 .. option:: --dump-input-on-failure
 
-  When the check fails, dump all of the original input.
+  When the check fails, dump all of the original input.  This option is
+  deprecated in favor of `--dump-input=fail`.
 
 .. option:: --enable-var-scope
 
diff --git a/docs/CommandGuide/llc.rst b/docs/CommandGuide/llc.rst
index 11dfc90..da096f1 100644
--- a/docs/CommandGuide/llc.rst
+++ b/docs/CommandGuide/llc.rst
@@ -87,9 +87,9 @@
 
    llvm-as < /dev/null | llc -march=xyz -mattr=help
 
-.. option:: --disable-fp-elim
+.. option:: --frame-pointer
 
- Disable frame pointer elimination optimization.
+ Specify effect of frame pointer elimination optimization (all,non-leaf,none).
 
 .. option:: --disable-excess-fp-precision
 
diff --git a/docs/CommandGuide/llvm-objdump.rst b/docs/CommandGuide/llvm-objdump.rst
index 0d02915..c3e7c16 100644
--- a/docs/CommandGuide/llvm-objdump.rst
+++ b/docs/CommandGuide/llvm-objdump.rst
@@ -16,12 +16,19 @@
 
 COMMANDS
 --------
-At least one of the following commands are required, and some commands can be combined with other commands:
+At least one of the following commands are required, and some commands can be
+combined with other commands:
 
-.. option:: -disassemble
+.. option:: -d, -disassemble
 
-  Display assembler mnemonics for the machine instructions
- 
+  Display assembler mnemonics for the machine instructions. Disassembles all
+  text sections found in the input file(s).
+
+.. option:: -D, -disassemble-all
+
+  Display assembler mnemonics for the machine instructions. Disassembles all
+  sections found in the input file(s).
+
 .. option:: -help
 
   Display usage information and exit. Does not stack with other commands.
@@ -45,14 +52,14 @@
 .. option:: -version
 
   Display the version of this program. Does not stack with other commands.
-  
+
 OPTIONS
 -------
 :program:`llvm-objdump` supports the following options:
 
 .. option:: -arch=<architecture>
 
-  Specify the architecture to disassemble. see -version for available
+  Specify the architecture to disassemble. see ``-version`` for available
   architectures.
 
 .. option:: -cfg
@@ -68,14 +75,15 @@
 
   Print line information from debug info if available.
 
-.. option:: -macho
+.. option:: -m, -macho
 
-  Use Mach-O specific object file parser.
+  Use Mach-O specific object file parser. Commands and other options may behave
+  differently when used with ``-macho``.
 
 .. option:: -mattr=<a1,+a2,-a3,...>
 
   Target specific attributes.
-  
+
 .. option:: -mc-x86-disable-arith-relaxation
 
   Disable relaxation of arithmetic instruction for X86.
@@ -83,26 +91,26 @@
 .. option:: -stats
 
   Enable statistics output from program.
-  
+
 .. option:: -triple=<string>
 
-  Target triple to disassemble for, see -version for available targets.
-  
+  Target triple to disassemble for, see ``-version`` for available targets.
+
 .. option:: -x86-asm-syntax=<style>
 
   When used with the ``-disassemble`` option, choose style of code to emit from
   X86 backend. Supported values are:
 
    .. option:: att
-   
+
     AT&T-style assembly
-   
+
    .. option:: intel
-   
+
     Intel-style assembly
 
-   
-  The default disassembly style is **att**. 
+
+  The default disassembly style is **att**.
 
 BUGS
 ----
diff --git a/docs/CommandGuide/llvm-profdata.rst b/docs/CommandGuide/llvm-profdata.rst
index 96c91e3..f66fb49 100644
--- a/docs/CommandGuide/llvm-profdata.rst
+++ b/docs/CommandGuide/llvm-profdata.rst
@@ -203,7 +203,7 @@
  annotations.
 
 .. option:: -topn=n
-	     
+
  Instruct the profile dumper to show the top ``n`` functions with the
  hottest basic blocks in the summary section. By default, the topn functions
  are not dumped.
@@ -216,6 +216,16 @@
 
  Show the profiled sizes of the memory intrinsic calls for shown functions.
 
+.. option:: -value-cutoff=n
+
+ Show only those functions whose max count values are greater or equal to ``n``.
+ By default, the value-cutoff is set to 0.
+
+.. option:: -list-below-cutoff
+
+ Only output names of functions whose max count value are below the cutoff
+ value.
+
 EXIT STATUS
 -----------
 
diff --git a/docs/CommandGuide/llvm-symbolizer.rst b/docs/CommandGuide/llvm-symbolizer.rst
index 7bcad1c..3c7a26e 100644
--- a/docs/CommandGuide/llvm-symbolizer.rst
+++ b/docs/CommandGuide/llvm-symbolizer.rst
@@ -68,7 +68,7 @@
 OPTIONS
 -------
 
-.. option:: -obj
+.. option:: -obj, -exe, -e
 
   Path to object file to be symbolized.
 
@@ -83,7 +83,7 @@
  Prefer function names stored in symbol table to function names
  in debug info sections. Defaults to true.
 
-.. option:: -demangle
+.. option:: -demangle, -C
 
  Print demangled function names. Defaults to true.
 
@@ -106,11 +106,11 @@
  location, look for the debug info at the .dSYM path provided via the
  ``-dsym-hint`` flag. This flag can be used multiple times.
 
-.. option:: -print-address
+.. option:: -print-address, -addresses, -a
 
  Print address before the source code location. Defaults to false.
 
-.. option:: -pretty-print
+.. option:: -pretty-print, -p
 
  Print human readable output. If ``-inlining`` is specified, enclosing scope is
  prefixed by (inlined by). Refer to listed examples.
diff --git a/docs/DeveloperPolicy.rst b/docs/DeveloperPolicy.rst
index 9125197..0991294 100644
--- a/docs/DeveloperPolicy.rst
+++ b/docs/DeveloperPolicy.rst
@@ -22,7 +22,7 @@
 
 #. Make life as simple and easy for contributors as possible.
 
-#. Keep the top of Subversion trees as stable as possible.
+#. Keep the tip of tree as stable as possible.
 
 #. Establish awareness of the project's :ref:`copyright, license, and patent
    policies <copyright-license-patents>` with contributors to the project.
@@ -80,23 +80,19 @@
 When making a patch for review, the goal is to make it as easy for the reviewer
 to read it as possible.  As such, we recommend that you:
 
-#. Make your patch against the Subversion trunk, not a branch, and not an old
-   version of LLVM.  This makes it easy to apply the patch.  For information on
-   how to check out SVN trunk, please see the `Getting Started
-   Guide <GettingStarted.html#checkout>`_.
+#. Make your patch against git master, not a branch, and not an old version
+   of LLVM.  This makes it easy to apply the patch.  For information on how to
+   clone from git, please see the :ref:`Getting Started Guide
+   <checkout>`.
 
 #. Similarly, patches should be submitted soon after they are generated.  Old
    patches may not apply correctly if the underlying code changes between the
    time the patch was created and the time it is applied.
 
-#. Patches should be made with ``svn diff``, or similar. If you use a
+#. Patches should be made with ``git format-patch``, or similar. If you use a
    different tool, make sure it uses the ``diff -u`` format and that it
    doesn't contain clutter which makes it hard to read.
 
-#. If you are modifying generated files, such as the top-level ``configure``
-   script, please separate out those changes into a separate patch from the rest
-   of your changes.
-
 Once your patch is ready, submit it by emailing it to the appropriate project's
 commit mailing list (or commit it directly if applicable). Alternatively, some
 patches get sent to the project's development list or component of the LLVM bug
@@ -187,9 +183,9 @@
 problem, we have a notion of an 'owner' for a piece of the code.  The sole
 responsibility of a code owner is to ensure that a commit to their area of the
 code is appropriately reviewed, either by themself or by someone else.  The list
-of current code owners can be found in the file
-`CODE_OWNERS.TXT <http://git.llvm.org/klaus/llvm/blob/master/CODE_OWNERS.TXT>`_
-in the root of the LLVM source tree.
+of current code owners can be found in the file `CODE_OWNERS.TXT
+<https://github.com/llvm/llvm-project/blob/master/llvm/CODE_OWNERS.TXT>`_ in the
+root of the LLVM source tree.
 
 Note that code ownership is completely different than reviewers: anyone can
 review a piece of code, and we welcome code review from anyone who is
diff --git a/docs/ExtendingLLVM.rst b/docs/ExtendingLLVM.rst
index 87f48c9..389dfad 100644
--- a/docs/ExtendingLLVM.rst
+++ b/docs/ExtendingLLVM.rst
@@ -253,10 +253,6 @@
    add enum ``LLVMTypeKind`` and modify
    ``LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty)`` for the new type
 
-#. ``llvm/include/llvm/IR/TypeBuilder.h``:
-
-   add new class to represent new type in the hierarchy
-
 #. ``llvm/lib/AsmParser/LLLexer.cpp``:
 
    add ability to parse in the type from text assembly
@@ -299,10 +295,6 @@
    add enum ``LLVMTypeKind`` and modify
    `LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty)` for the new type
 
-#. ``llvm/include/llvm/IR/TypeBuilder.h``:
-
-   add new class to represent new class in the hierarchy
-
 #. ``llvm/lib/AsmParser/LLLexer.cpp``:
 
    modify ``lltok::Kind LLLexer::LexIdentifier()`` to add ability to
diff --git a/docs/GettingStarted.rst b/docs/GettingStarted.rst
index ff6b696..b714cc6 100644
--- a/docs/GettingStarted.rst
+++ b/docs/GettingStarted.rst
@@ -8,23 +8,22 @@
 Overview
 ========
 
-Welcome to LLVM! In order to get started, you first need to know some basic
-information.
+Welcome to the LLVM project! In order to get started, you first need to know
+some basic information.
 
-First, LLVM comes in three pieces. The first piece is the LLVM suite. This
-contains all of the tools, libraries, and header files needed to use LLVM.  It
-contains an assembler, disassembler, bitcode analyzer and bitcode optimizer.  It
-also contains basic regression tests that can be used to test the LLVM tools and
-the Clang front end.
+First, the LLVM project has multiple components. The core of the project is
+itself called "LLVM". This contains all of the tools, libraries, and header
+files needed to process an intermediate representation and convert it into
+object files.  It contains an assembler, disassembler, bitcode analyzer and
+bitcode optimizer.  It also contains basic regression tests.
 
-The second piece is the `Clang <http://clang.llvm.org/>`_ front end.  This
-component compiles C, C++, Objective C, and Objective C++ code into LLVM
-bitcode. Once compiled into LLVM bitcode, a program can be manipulated with the
-LLVM tools from the LLVM suite.
+Another piece is the `Clang <http://clang.llvm.org/>`_ front end.  This
+component compiles C, C++, Objective C, and Objective C++ code into LLVM bitcode
+-- and from there into object files, using LLVM.
 
-There is a third, optional piece called Test Suite.  It is a suite of programs
-with a testing harness that can be used to further test LLVM's functionality
-and performance.
+There are other components as well:
+the `libc++ C++ standard library <https://libcxx.llvm.org>`_,
+the `LLD linker <https://lld.llvm.org>`_, and more.
 
 Getting Started Quickly (A Summary)
 ===================================
@@ -39,90 +38,38 @@
 #. Read the documentation.
 #. Remember that you were warned twice about reading the documentation.
 
-   * In particular, the *relative paths specified are important*.
+#. Checkout LLVM (including related subprojects like Clang):
 
-#. Checkout LLVM:
+   * ``git clone https://github.com/llvm/llvm-project.git``
+   * Or, on windows, ``git clone --config core.autocrlf=false
+     https://github.com/llvm/llvm-project.git``
 
-   * ``cd where-you-want-llvm-to-live``
-   * ``svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm``
+#. Configure and build LLVM and Clang:.
 
-#. Checkout Clang:
-
-   * ``cd where-you-want-llvm-to-live``
-   * ``cd llvm/tools``
-   * ``svn co http://llvm.org/svn/llvm-project/cfe/trunk clang``
-
-#. Checkout Extra Clang Tools **[Optional]**:
-
-   * ``cd where-you-want-llvm-to-live``
-   * ``cd llvm/tools/clang/tools``
-   * ``svn co http://llvm.org/svn/llvm-project/clang-tools-extra/trunk extra``
-
-#. Checkout LLD linker **[Optional]**:
-
-   * ``cd where-you-want-llvm-to-live``
-   * ``cd llvm/tools``
-   * ``svn co http://llvm.org/svn/llvm-project/lld/trunk lld``
-
-#. Checkout Polly Loop Optimizer **[Optional]**:
-
-   * ``cd where-you-want-llvm-to-live``
-   * ``cd llvm/tools``
-   * ``svn co http://llvm.org/svn/llvm-project/polly/trunk polly``
-
-#. Checkout Compiler-RT (required to build the sanitizers) **[Optional]**:
-
-   * ``cd where-you-want-llvm-to-live``
-   * ``cd llvm/projects``
-   * ``svn co http://llvm.org/svn/llvm-project/compiler-rt/trunk compiler-rt``
-
-#. Checkout Libomp (required for OpenMP support) **[Optional]**:
-
-   * ``cd where-you-want-llvm-to-live``
-   * ``cd llvm/projects``
-   * ``svn co http://llvm.org/svn/llvm-project/openmp/trunk openmp``
-
-#. Checkout libcxx and libcxxabi **[Optional]**:
-
-   * ``cd where-you-want-llvm-to-live``
-   * ``cd llvm/projects``
-   * ``svn co http://llvm.org/svn/llvm-project/libcxx/trunk libcxx``
-   * ``svn co http://llvm.org/svn/llvm-project/libcxxabi/trunk libcxxabi``
-
-#. Get the Test Suite Source Code **[Optional]**
-
-   * ``cd where-you-want-llvm-to-live``
-   * ``cd llvm/projects``
-   * ``svn co http://llvm.org/svn/llvm-project/test-suite/trunk test-suite``
-
-#. Configure and build LLVM and Clang:
-
-   *Warning:* Make sure you've checked out *all of* the source code
-   before trying to configure with cmake.  cmake does not pickup newly
-   added source directories in incremental builds.
-
-   The build uses `CMake <CMake.html>`_. LLVM requires CMake 3.4.3 to build. It
-   is generally recommended to use a recent CMake, especially if you're
-   generating Ninja build files. This is because the CMake project is constantly
-   improving the quality of the generators, and the Ninja generator gets a lot
-   of attention.
-
-   * ``cd where you want to build llvm``
+   * ``cd llvm-project``
    * ``mkdir build``
    * ``cd build``
-   * ``cmake -G <generator> [options] <path to llvm sources>``
+   * ``cmake -G <generator> [options] ../llvm``
 
      Some common generators are:
 
-     * ``Unix Makefiles`` --- for generating make-compatible parallel makefiles.
      * ``Ninja`` --- for generating `Ninja <https://ninja-build.org>`_
        build files. Most llvm developers use Ninja.
+     * ``Unix Makefiles`` --- for generating make-compatible parallel makefiles.
      * ``Visual Studio`` --- for generating Visual Studio projects and
        solutions.
      * ``Xcode`` --- for generating Xcode projects.
 
      Some Common options:
 
+     * ``-DLLVM_ENABLE_PROJECTS='...'`` --- semicolon-separated list of the LLVM
+       subprojects you'd like to additionally build. Can include any of: clang,
+       libcxx, libcxxabi, libunwind, lldb, compiler-rt, lld, polly, or
+       debuginfo-tests.
+
+       For example, to build LLVM, Clang, libcxx, and libcxxabi, use
+       ``-DLLVM_ENABLE_PROJECTS="clang;libcxx;libcxxabi"``.
+
      * ``-DCMAKE_INSTALL_PREFIX=directory`` --- Specify for *directory* the full
        pathname of where you want the LLVM tools and libraries to be installed
        (default ``/usr/local``).
@@ -135,16 +82,18 @@
 
    * Run your build tool of choice!
 
-     * The default target (i.e. ``make``) will build all of LLVM
+     * The default target (i.e. ``ninja`` or ``make``) will build all of LLVM.
 
-     * The ``check-all`` target (i.e. ``make check-all``) will run the
+     * The ``check-all`` target (i.e. ``ninja check-all``) will run the
        regression tests to ensure everything is in working order.
 
      * CMake will generate build targets for each tool and library, and most
        LLVM sub-projects generate their own ``check-<project>`` target.
 
-     * Running a serial build will be *slow*.  Make sure you run a
-       parallel build; for ``make``, use ``make -j``.
+     * Running a serial build will be *slow*.  Make sure you run a parallel
+       build. That's already done by default in Ninja; for ``make``, use
+       ``make -j NNN`` (with an appropriate value of NNN, e.g. number of CPUs
+       you have.)
 
    * For more information see `CMake <CMake.html>`_
 
@@ -172,7 +121,7 @@
 ================== ===================== =============
 Linux              x86\ :sup:`1`         GCC, Clang
 Linux              amd64                 GCC, Clang
-Linux              ARM\ :sup:`4`         GCC, Clang
+Linux              ARM                   GCC, Clang
 Linux              PowerPC               GCC, Clang
 Solaris            V9 (Ultrasparc)       GCC
 FreeBSD            x86\ :sup:`1`         GCC, Clang
@@ -192,7 +141,6 @@
   #. Code generation supported for 32-bit ABI only
   #. To use LLVM modules on Win32-based system, you may configure LLVM
      with ``-DBUILD_SHARED_LIBS=On``.
-  #. MCJIT not working well pre-v7, old JIT engine not supported any more.
 
 Note that Debug builds require a lot of time and disk space.  An LLVM-only build
 will need about 1-3 GB of space.  A full build of LLVM and Clang will need around
@@ -433,10 +381,9 @@
 ---------------------------
 
 If you have the LLVM distribution, you will need to unpack it before you can
-begin to compile it.  LLVM is distributed as a set of two files: the LLVM suite
-and the LLVM GCC front end compiled for your platform.  There is an additional
-test suite that is optional.  Each file is a TAR archive that is compressed with
-the gzip program.
+begin to compile it.  LLVM is distributed as a number of different
+subprojects. Each one has its own download which is a TAR archive that is
+compressed with the gzip program.
 
 The files are as follows, with *x.y* marking the version number:
 
@@ -444,18 +391,132 @@
 
   Source release for the LLVM libraries and tools.
 
-``llvm-test-x.y.tar.gz``
+``cfe-x.y.tar.gz``
 
-  Source release for the LLVM test-suite.
+  Source release for the Clang frontend.
 
 .. _checkout:
 
-Checkout LLVM from Subversion
------------------------------
+Checkout LLVM from Git
+----------------------
 
-If you have access to our Subversion repository, you can get a fresh copy of the
-entire source code.  All you need to do is check it out from Subversion as
-follows:
+You can also checkout the source code for LLVM from Git. While the LLVM
+project's official source-code repository is Subversion, we are in the process
+of migrating to git. We currently recommend that all developers use Git for
+day-to-day development.
+
+.. note::
+
+  Passing ``--config core.autocrlf=false`` should not be required in
+  the future after we adjust the .gitattribute settings correctly, but
+  is required for Windows users at the time of this writing.
+
+Simply run:
+
+.. code-block:: console
+
+  % git clone https://github.com/llvm/llvm-project.git`
+
+or on Windows,
+
+.. code-block:: console
+
+  % git clone --config core.autocrlf=false https://github.com/llvm/llvm-project.git
+
+This will create an '``llvm-project``' directory in the current directory and
+fully populate it with all of the source code, test directories, and local
+copies of documentation files for LLVM and all the related subprojects. Note
+that unlike the tarballs, which contain each subproject in a separate file, the
+git repository contains all of the projects together.
+
+If you want to get a specific release (as opposed to the most recent revision),
+you can check out a tag after cloning the repository. E.g., `git checkout
+llvmorg-6.0.1` inside the ``llvm-project`` directory created by the above
+command.  Use `git tag -l` to list all of them.
+
+Sending patches
+^^^^^^^^^^^^^^^
+
+Please read `Developer Policy <DeveloperPolicy.html#one-off-patches>`_, too.
+
+We don't currently accept github pull requests, so you'll need to send patches
+either via emailing to llvm-commits, or, preferably, via :ref:`Phabricator
+<phabricator-reviews>`.
+
+You'll generally want to make sure your branch has a single commit,
+corresponding to the review you wish to send, up-to-date with the upstream
+``origin/master`` branch, and doesn't contain merges. Once you have that, you
+can use ``git show`` or ``git format-patch`` to output the diff, and attach it
+to a Phabricator review (or to an email message).
+
+However, using the "Arcanist" tool is often easier. After `installing
+arcanist`_, you can upload the latest commit using:
+
+.. code-block:: console
+
+  % arc diff HEAD~1
+
+Additionally, before sending a patch for review, please also try to ensure it's
+formatted properly. We use ``clang-format`` for this, which has git integration
+through the ``git-clang-format`` script. On some systems, it may already be
+installed (or be installable via your package manager). If so, you can simply
+run it -- the following command will format only the code changed in the most
+recent commit:
+
+.. code-block:: console
+
+  % git clang-format HEAD~1
+
+Note that this modifies the files, but doesn't commit them -- you'll likely want
+to run
+
+.. code-block:: console
+
+  % git commit --amend -a
+
+in order to update the last commit with all pending changes.
+
+.. note::
+  If you don't already have ``clang-format`` or ``git clang-format`` installed
+  on your system, the ``clang-format`` binary will be built alongside clang, and
+  the git integration can be run from
+  ``clang/tools/clang-format/git-clang-format``.
+
+
+.. _commit_from_git:
+
+For developers to commit changes from Git
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A helper script is provided in ``llvm/utils/git-svn/git-llvm``. After you add it
+to your path, you can push committed changes upstream with ``git llvm
+push``. While this creates a Subversion checkout and patches it under the hood,
+it does not require you to have interaction with it.
+
+.. code-block:: console
+
+  % export PATH=$PATH:$TOP_LEVEL_DIR/llvm-project/llvm/utils/git-svn/
+  % git llvm push
+
+Within a couple minutes after pushing to subversion, the svn commit will have
+been converted back to a Git commit, and made its way into the official Git
+repository. At that point, ``git pull`` should get back the changes as they were
+committed.
+
+You'll likely want to ``git pull --rebase`` to get the official git commit
+downloaded back to your repository. The SVN revision numbers of each commit can
+be found at the end of the commit message, e.g. ``llvm-svn: 350914``.
+
+You may also find the ``-n`` flag useful, like ``git llvm push -n``. This runs
+through all the steps of committing _without_ actually doing the commit, and
+tell you what it would have done. That can be useful if you're unsure whether
+the right thing will happen.
+
+Checkout via SVN (deprecated)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Until we have fully migrated to Git, you may also get a fresh copy of
+the code from the official Subversion repository.
 
 * ``cd where-you-want-llvm-to-live``
 * Read-Only: ``svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm``
@@ -475,305 +536,13 @@
 * Release 1.1 through 2.8: **RELEASE_11** and so on
 * Release 1.0: **RELEASE_1**
 
-If you would like to get the LLVM test suite (a separate package as of 1.4), you
-get it from the Subversion repository:
-
-.. code-block:: console
-
-  % cd llvm/projects
-  % svn co http://llvm.org/svn/llvm-project/test-suite/trunk test-suite
-
-By placing it in the ``llvm/projects``, it will be automatically configured by
-the LLVM cmake configuration.
-
-Git Mirror
-----------
-
-Git mirrors are available for a number of LLVM subprojects. These mirrors sync
-automatically with each Subversion commit and contain all necessary git-svn
-marks (so, you can recreate git-svn metadata locally). Note that right now
-mirrors reflect only ``trunk`` for each project.
-
-.. note::
-
-  On Windows, first you will want to do ``git config --global core.autocrlf
-  false`` before you clone. This goes a long way toward ensuring that
-  line-endings will be handled correctly (the LLVM project mostly uses Linux
-  line-endings).
-
-You can do the read-only Git clone of LLVM via:
-
-.. code-block:: console
-
-  % git clone https://git.llvm.org/git/llvm.git/
-
-If you want to check out clang too, run:
-
-.. code-block:: console
-
-  % cd llvm/tools
-  % git clone https://git.llvm.org/git/clang.git/
-
-If you want to check out compiler-rt (required to build the sanitizers), run:
-
-.. code-block:: console
-
-  % cd llvm/projects
-  % git clone https://git.llvm.org/git/compiler-rt.git/
-
-If you want to check out libomp (required for OpenMP support), run:
-
-.. code-block:: console
-
-  % cd llvm/projects
-  % git clone https://git.llvm.org/git/openmp.git/
-
-If you want to check out libcxx and libcxxabi (optional), run:
-
-.. code-block:: console
-
-  % cd llvm/projects
-  % git clone https://git.llvm.org/git/libcxx.git/
-  % git clone https://git.llvm.org/git/libcxxabi.git/
-
-If you want to check out the Test Suite Source Code (optional), run:
-
-.. code-block:: console
-
-  % cd llvm/projects
-  % git clone https://git.llvm.org/git/test-suite.git/
-
-Since the upstream repository is in Subversion, you should use ``git
-pull --rebase`` instead of ``git pull`` to avoid generating a non-linear history
-in your clone.  To configure ``git pull`` to pass ``--rebase`` by default on the
-master branch, run the following command:
-
-.. code-block:: console
-
-  % git config branch.master.rebase true
-
-Sending patches with Git
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-Please read `Developer Policy <DeveloperPolicy.html#one-off-patches>`_, too.
-
-Assume ``master`` points the upstream and ``mybranch`` points your working
-branch, and ``mybranch`` is rebased onto ``master``.  At first you may check
-sanity of whitespaces:
-
-.. code-block:: console
-
-  % git diff --check master..mybranch
-
-The easiest way to generate a patch is as below:
-
-.. code-block:: console
-
-  % git diff master..mybranch > /path/to/mybranch.diff
-
-It is a little different from svn-generated diff. git-diff-generated diff has
-prefixes like ``a/`` and ``b/``. Don't worry, most developers might know it
-could be accepted with ``patch -p1 -N``.
-
-But you may generate patchset with git-format-patch. It generates by-each-commit
-patchset. To generate patch files to attach to your article:
-
-.. code-block:: console
-
-  % git format-patch --no-attach master..mybranch -o /path/to/your/patchset
-
-If you would like to send patches directly, you may use git-send-email or
-git-imap-send. Here is an example to generate the patchset in Gmail's [Drafts].
-
-.. code-block:: console
-
-  % git format-patch --attach master..mybranch --stdout | git imap-send
-
-Then, your .git/config should have [imap] sections.
-
-.. code-block:: ini
-
-  [imap]
-        host = imaps://imap.gmail.com
-        user = your.gmail.account@gmail.com
-        pass = himitsu!
-        port = 993
-        sslverify = false
-  ; in English
-        folder = "[Gmail]/Drafts"
-  ; example for Japanese, "Modified UTF-7" encoded.
-        folder = "[Gmail]/&Tgtm+DBN-"
-  ; example for Traditional Chinese
-        folder = "[Gmail]/&g0l6Pw-"
-
-.. _developers-work-with-git-svn:
-
-For developers to work with git-svn
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-To set up clone from which you can submit code using ``git-svn``, run:
-
-.. code-block:: console
-
-  % git clone https://git.llvm.org/git/llvm.git/
-  % cd llvm
-  % git svn init https://llvm.org/svn/llvm-project/llvm/trunk --username=<username>
-  % git config svn-remote.svn.fetch :refs/remotes/origin/master
-  % git svn rebase -l  # -l avoids fetching ahead of the git mirror.
-
-  # If you have clang too:
-  % cd tools
-  % git clone https://git.llvm.org/git/clang.git/
-  % cd clang
-  % git svn init https://llvm.org/svn/llvm-project/cfe/trunk --username=<username>
-  % git config svn-remote.svn.fetch :refs/remotes/origin/master
-  % git svn rebase -l
-
-Likewise for compiler-rt, libomp and test-suite.
-
-To update this clone without generating git-svn tags that conflict with the
-upstream Git repo, run:
-
-.. code-block:: console
-
-  % git fetch && (cd tools/clang && git fetch)  # Get matching revisions of both trees.
-  % git checkout master
-  % git svn rebase -l
-  % (cd tools/clang &&
-     git checkout master &&
-     git svn rebase -l)
-
-Likewise for compiler-rt, libomp and test-suite.
-
-This leaves your working directories on their master branches, so you'll need to
-``checkout`` each working branch individually and ``rebase`` it on top of its
-parent branch.
-
-For those who wish to be able to update an llvm repo/revert patches easily using
-git-svn, please look in the directory for the scripts ``git-svnup`` and
-``git-svnrevert``.
-
-To perform the aforementioned update steps go into your source directory and
-just type ``git-svnup`` or ``git svnup`` and everything will just work.
-
-If one wishes to revert a commit with git-svn, but do not want the git hash to
-escape into the commit message, one can use the script ``git-svnrevert`` or
-``git svnrevert`` which will take in the git hash for the commit you want to
-revert, look up the appropriate svn revision, and output a message where all
-references to the git hash have been replaced with the svn revision.
-
-To commit back changes via git-svn, use ``git svn dcommit``:
-
-.. code-block:: console
-
-  % git svn dcommit
-
-Note that git-svn will create one SVN commit for each Git commit you have pending,
-so squash and edit each commit before executing ``dcommit`` to make sure they all
-conform to the coding standards and the developers' policy.
-
-On success, ``dcommit`` will rebase against the HEAD of SVN, so to avoid conflict,
-please make sure your current branch is up-to-date (via fetch/rebase) before
-proceeding.
-
-The git-svn metadata can get out of sync after you mess around with branches and
-``dcommit``. When that happens, ``git svn dcommit`` stops working, complaining
-about files with uncommitted changes. The fix is to rebuild the metadata:
-
-.. code-block:: console
-
-  % rm -rf .git/svn
-  % git svn rebase -l
-
-Please, refer to the Git-SVN manual (``man git-svn``) for more information.
-
-For developers to work with a git monorepo
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. note::
-
-   This set-up is using an unofficial mirror hosted on GitHub, use with caution.
-
-To set up a clone of all the llvm projects using a unified repository:
-
-.. code-block:: console
-
-  % export TOP_LEVEL_DIR=`pwd`
-  % git clone https://github.com/llvm-project/llvm-project-20170507/ llvm-project
-  % cd llvm-project
-  % git config branch.master.rebase true
-
-You can configure various build directory from this clone, starting with a build
-of LLVM alone:
-
-.. code-block:: console
-
-  % cd $TOP_LEVEL_DIR
-  % mkdir llvm-build && cd llvm-build
-  % cmake -GNinja ../llvm-project/llvm
-
-Or lldb:
-
-.. code-block:: console
-
-  % cd $TOP_LEVEL_DIR
-  % mkdir lldb-build && cd lldb-build
-  % cmake -GNinja ../llvm-project/llvm -DLLVM_ENABLE_PROJECTS=lldb
-
-Or a combination of multiple projects:
-
-.. code-block:: console
-
-  % cd $TOP_LEVEL_DIR
-  % mkdir clang-build && cd clang-build
-  % cmake -GNinja ../llvm-project/llvm -DLLVM_ENABLE_PROJECTS="clang;libcxx;libcxxabi"
-
-A helper script is provided in ``llvm/utils/git-svn/git-llvm``. After you add it
-to your path, you can push committed changes upstream with ``git llvm push``.
-
-.. code-block:: console
-
-  % export PATH=$PATH:$TOP_LEVEL_DIR/llvm-project/llvm/utils/git-svn/
-  % git llvm push
-
-While this is using SVN under the hood, it does not require any interaction from
-you with git-svn.
-After a few minutes, ``git pull`` should get back the changes as they were
-committed. Note that a current limitation is that ``git`` does not directly
-record file rename, and thus it is propagated to SVN as a combination of
-delete-add instead of a file rename.
-
-The SVN revision of each monorepo commit can be found in the commit notes.  git
-does not fetch notes by default. The following commands will fetch the notes and
-configure git to fetch future notes. Use ``git notes show $commit`` to look up
-the SVN revision of a git commit. The notes show up ``git log``, and searching
-the log is currently the recommended way to look up the git commit for a given
-SVN revision.
-
-.. code-block:: console
-
-  % git config --add remote.origin.fetch +refs/notes/commits:refs/notes/commits
-  % git fetch
-
-If you are using `arc` to interact with Phabricator, you need to manually put it
-at the root of the checkout:
-
-.. code-block:: console
-
-  % cd $TOP_LEVEL_DIR
-  % cp llvm/.arcconfig ./
-  % mkdir -p .git/info/
-  % echo .arcconfig >> .git/info/exclude
-
-
 Local LLVM Configuration
 ------------------------
 
-Once checked out from the Subversion repository, the LLVM suite source code must
-be configured before being built. This process uses CMake.
-Unlinke the normal ``configure`` script, CMake
-generates the build files in whatever format you request as well as various
-``*.inc`` files, and ``llvm/include/Config/config.h``.
+Once checked out repository, the LLVM suite source code must be configured
+before being built. This process uses CMake.  Unlinke the normal ``configure``
+script, CMake generates the build files in whatever format you request as well
+as various ``*.inc`` files, and ``llvm/include/Config/config.h``.
 
 Variables are passed to ``cmake`` on the command line using the format
 ``-D<variable name>=<value>``. The following variables are some common options
@@ -797,19 +566,26 @@
 |                         | running the install action of the build files.     |
 +-------------------------+----------------------------------------------------+
 | LLVM_TARGETS_TO_BUILD   | A semicolon delimited list controlling which       |
-|                         | targets will be built and linked into llc. This is |
-|                         | equivalent to the ``--enable-targets`` option in   |
-|                         | the configure script. The default list is defined  |
-|                         | as ``LLVM_ALL_TARGETS``, and can be set to include |
+|                         | targets will be built and linked into llvm.        |
+|                         | The default list is defined as                     |
+|                         | ``LLVM_ALL_TARGETS``, and can be set to include    |
 |                         | out-of-tree targets. The default value includes:   |
 |                         | ``AArch64, AMDGPU, ARM, BPF, Hexagon, Mips,        |
 |                         | MSP430, NVPTX, PowerPC, Sparc, SystemZ, X86,       |
 |                         | XCore``.                                           |
+|                         |                                                    |
 +-------------------------+----------------------------------------------------+
 | LLVM_ENABLE_DOXYGEN     | Build doxygen-based documentation from the source  |
 |                         | code This is disabled by default because it is     |
 |                         | slow and generates a lot of output.                |
 +-------------------------+----------------------------------------------------+
+| LLVM_ENABLE_PROJECTS    | A semicolon-delimited list selecting which of the  |
+|                         | other LLVM subprojects to additionally build. (Only|
+|                         | effective when using a side-by-side project layout |
+|                         | e.g. via git). The default list is empty. Can      |
+|                         | include: clang, libcxx, libcxxabi, libunwind, lldb,|
+|                         | compiler-rt, lld, polly, or debuginfo-tests.       |
++-------------------------+----------------------------------------------------+
 | LLVM_ENABLE_SPHINX      | Build sphinx-based documentation from the source   |
 |                         | code. This is disabled by default because it is    |
 |                         | slow and generates a lot of output. Sphinx version |
@@ -1030,10 +806,10 @@
 
 ``llvm/include/llvm/Config``
 
-  Header files configured by the ``configure`` script.
-  They wrap "standard" UNIX and C header files.  Source code can include these
-  header files which automatically take care of the conditional #includes that
-  the ``configure`` script generates.
+  Header files configured by ``cmake``.  They wrap "standard" UNIX and
+  C header files.  Source code can include these header files which
+  automatically take care of the conditional #includes that ``cmake``
+  generates.
 
 ``llvm/lib``
 ------------
@@ -1105,10 +881,11 @@
 ``test-suite``
 --------------
 
-A comprehensive correctness, performance, and benchmarking test suite for LLVM.
-Comes in a separate Subversion module because not every LLVM user is interested
-in such a comprehensive suite. For details see the :doc:`Testing Guide
-<TestingGuide>` document.
+A comprehensive correctness, performance, and benchmarking test suite
+for LLVM.  This comes in a ``separate git repository
+<https://github.com/llvm/llvm-test-suite>``, because it contains a
+large amount of third-party code under a variety of licenses. For
+details see the :doc:`Testing Guide <TestingGuide>` document.
 
 .. _tools:
 
@@ -1322,3 +1099,5 @@
 * `LLVM Homepage <http://llvm.org/>`_
 * `LLVM Doxygen Tree <http://llvm.org/doxygen/>`_
 * `Starting a Project that Uses LLVM <http://llvm.org/docs/Projects.html>`_
+
+.. _installing arcanist: https://secure.phabricator.com/book/phabricator/article/arcanist_quick_start/
diff --git a/docs/HowToCrossCompileBuiltinsOnArm.rst b/docs/HowToCrossCompileBuiltinsOnArm.rst
index 4b4d563..6ad93c7 100644
--- a/docs/HowToCrossCompileBuiltinsOnArm.rst
+++ b/docs/HowToCrossCompileBuiltinsOnArm.rst
@@ -19,33 +19,46 @@
 Prerequisites
 =============
 
-In this use case we'll be using CMake on a Debian-based Linux system,
+In this use case we'll be using cmake on a Debian-based Linux system,
 cross-compiling from an x86_64 host to a hard-float Armv7-A target. We'll be
 using as many of the LLVM tools as we can, but it is possible to use GNU
 equivalents.
 
  * ``A build of LLVM/clang for the llvm-tools and llvm-config``
+ * ``A clang executable with support for the ARM target``
+ * ``compiler-rt sources``
  * ``The qemu-arm user mode emulator``
  * ``An arm-linux-gnueabihf sysroot``
 
+In this example we will be using ninja.
+
 See https://compiler-rt.llvm.org/ for more information about the dependencies
 on clang and LLVM.
 
+See https://llvm.org/docs/GettingStarted.html for information about obtaining
+the source for LLVM and compiler-rt. Note that the getting started guide
+places compiler-rt in the projects subdirectory, but this is not essential and
+if you are using the BaremetalARM.cmake cache for v6-M, v7-M and v7-EM then
+compiler-rt must be placed in the runtimes directory.
+
 ``qemu-arm`` should be available as a package for your Linux distribution.
 
 The most complicated of the prequisites to satisfy is the arm-linux-gnueabihf
-sysroot. The :doc:`HowToCrossCompileLLVM` has information about how to use the
-Linux distributions multiarch support to fulfill the dependencies for building
-LLVM. Alternatively, as building and testing just the compiler-rt builtins
-requires fewer dependencies than LLVM, it is possible to use the Linaro
-arm-linux-gnueabihf gcc installation as our sysroot.
+sysroot. In theory it is possible to use the Linux distributions multiarch
+support to fulfill the dependencies for building but unfortunately due to
+/usr/local/include being added some host includes are selected. The easiest way
+to supply a sysroot is to download the arm-linux-gnueabihf toolchain. This can
+be found at:
+* https://developer.arm.com/open-source/gnu-toolchain/gnu-a/downloads for gcc 8 and above
+* https://releases.linaro.org/components/toolchain/binaries/ for gcc 4.9 to 7.3
 
 Building compiler-rt builtins for Arm
 =====================================
 We will be doing a standalone build of compiler-rt using the following cmake
 options.
 
-* ``path/to/llvm/projects/compiler-rt``
+* ``path/to/compiler-rt``
+* ``-G Ninja``
 * ``-DCOMPILER_RT_BUILD_BUILTINS=ON``
 * ``-DCOMPILER_RT_BUILD_SANITIZERS=OFF``
 * ``-DCOMPILER_RT_BUILD_XRAY=OFF``
@@ -57,22 +70,38 @@
 * ``-DCMAKE_RANLIB=/path/to/llvm-ranlib``
 * ``-DCMAKE_EXE_LINKER_FLAGS="-fuse-ld=lld"``
 * ``-DCMAKE_C_COMPILER_TARGET="arm-linux-gnueabihf"``
+* ``-DCMAKE_ASM_COMPILER_TARGET="arm-linux-gnueabihf"``
 * ``-DCOMPILER_RT_DEFAULT_TARGET_ONLY=ON``
 * ``-DLLVM_CONFIG_PATH=/path/to/llvm-config``
 * ``-DCMAKE_C_FLAGS="build-c-flags"``
+* ``-DCMAKE_ASM_FLAGS="build-c-flags"``
 
-The build-c-flags need to be sufficient to pass the C-make compiler check and
-to compile compiler-rt. When using a GCC 7 Linaro arm-linux-gnueabihf
-installation the following flags are needed:
+The ``build-c-flags`` need to be sufficient to pass the C-make compiler check,
+compile compiler-rt, and if you are running the tests, compile and link the
+tests. When cross-compiling with clang we will need to pass sufficient
+information to generate code for the Arm architecture we are targeting. We will
+need to select the Arm target, select the Armv7-A architecture and choose
+between using Arm or Thumb.
+instructions. For example:
 
 * ``--target=arm-linux-gnueabihf``
-* ``--march=armv7a``
+* ``-march=armv7a``
+* ``-mthumb``
+
+When using a GCC arm-linux-gnueabihf toolchain the following flags are
+needed to pick up the includes and libraries:
+
 * ``--gcc-toolchain=/path/to/dir/toolchain``
 * ``--sysroot=/path/to/toolchain/arm-linux-gnueabihf/libc``
 
-Depending on how your sysroot is laid out, you may not need ``--gcc-toolchain``.
-For example if you have added armhf as an architecture using your Linux
-distributions multiarch support then you should be able to use ``--sysroot=/``.
+In this example we will be adding all of the command line options to both
+``CMAKE_C_FLAGS`` and ``CMAKE_ASM_FLAGS``. There are cmake flags to pass some of
+these options individually which can be used to simplify the ``build-c-flags``:
+
+* ``-DCMAKE_C_COMPILER_TARGET="arm-linux-gnueabihf"``
+* ``-DCMAKE_ASM_COMPILER_TARGET="arm-linux-gnueabihf"``
+* ``-DCMAKE_C_COMPILER_EXTERNAL_TOOLCHAIN=/path/to/dir/toolchain``
+* ``-DCMAKE_SYSROOT=/path/to/dir/toolchain/arm-linux-gnueabihf/libc``
 
 Once cmake has completed the builtins can be built with ``ninja builtins``
 
@@ -90,12 +119,72 @@
 The ``/path/to/armhf/sysroot`` should be the same as the one passed to
 ``--sysroot`` in the "build-c-flags".
 
-The "test-c-flags" can be the same as the "build-c-flags", with the addition
-of ``"-fuse-ld=lld`` if you wish to use lld to link the tests.
+The "test-c-flags" need to include the target, architecture, gcc-toolchain,
+sysroot and arm/thumb state. The additional cmake defines such as
+``CMAKE_C_COMPILER_EXTERNAL_TOOLCHAIN`` do not apply when building the tests. If
+you have put all of these in "build-c-flags" then these can be repeated. If you
+wish to use lld to link the tests then add ``"-fuse-ld=lld``.
 
 Once cmake has completed the tests can be built and run using
 ``ninja check-builtins``
 
+Troubleshooting
+===============
+
+The cmake try compile stage fails
+---------------------------------
+At an early stage cmake will attempt to compile and link a simple C program to
+test if the toolchain is working.
+
+This stage can often fail at link time if the ``--sysroot`` and
+``--gcc-toolchain`` options are not passed to the compiler. Check the
+``CMAKE_C_FLAGS`` and ``CMAKE_C_COMPILER_TARGET`` flags.
+
+It can be useful to build a simple example outside of cmake with your toolchain
+to make sure it is working. For example: ``clang --target=arm-linux-gnueabi -march=armv7a --gcc-toolchain=/path/to/gcc-toolchain --sysroot=/path/to/gcc-toolchain/arm-linux-gnueabihf/libc helloworld.c``
+
+Clang uses the host header files
+--------------------------------
+On debian based systems it is possible to install multiarch support for
+arm-linux-gnueabi and arm-linux-gnueabihf. In many cases clang can successfully
+use this multiarch support when -gcc-toolchain and --sysroot are not supplied.
+Unfortunately clang adds ``/usr/local/include`` before
+``/usr/include/arm-linux-gnueabihf`` leading to errors when compiling the hosts
+header files.
+
+The multiarch support is not sufficient to build the builtins you will need to
+use a separate arm-linux-gnueabihf toolchain.
+
+No target passed to clang
+-------------------------
+If clang is not given a target it will typically use the host target, this will
+not understand the Arm assembly language files resulting in error messages such
+as ``error: unknown directive .syntax unified``.
+
+You can check the clang invocation in the error message to see if there is no
+``--target`` or if it is set incorrectly. The cause is usually
+``CMAKE_ASM_FLAGS`` not containing ``--target`` or ``CMAKE_ASM_COMPILER_TARGET`` not being present.
+
+Arm architecture not given
+--------------------------
+The ``--target=arm-linux-gnueabihf`` will default to arm architecture v4t which
+cannot assemble the barrier instructions used in the synch_and_fetch source
+files.
+
+The cause is usually a missing ``-march=armv7a`` from the ``CMAKE_ASM_FLAGS``.
+
+Compiler-rt builds but the tests fail to build
+----------------------------------------------
+The flags used to build the tests are not the same as those used to build the
+builtins. The c flags are provided by ``COMPILER_RT_TEST_COMPILE_CFLAGS`` and
+the ``CMAKE_C_COMPILER_TARGET``, ``CMAKE_ASM_COMPILER_TARGET``,
+``CMAKE_C_COMPILER_EXTERNAL_TOOLCHAIN`` and ``CMAKE_SYSROOT`` flags are not
+applied.
+
+Make sure that ``COMPILER_RT_TEST_COMPILE_CFLAGS`` contains all the necessary
+information.
+
+
 Modifications for other Targets
 ===============================
 
@@ -112,6 +201,8 @@
 instructions, and ``-mfloat-abi=soft -mfpu=none`` for software floating-point
 emulation.
 
+You will need to use an arm-linux-gnueabi GNU toolchain for soft-float.
+
 AArch64 Target
 --------------
 The instructions for Arm can be used for AArch64 by substituting AArch64
@@ -125,39 +216,24 @@
 
 Armv6-m, Armv7-m and Armv7E-M targets
 -------------------------------------
-If you wish to build, but not test compiler-rt for Armv6-M, Armv7-M or Armv7E-M
-then the easiest way is to use the BaremetalARM.cmake recipe in
-clang/cmake/caches.
-
-You will need a bare metal sysroot such as that provided by the GNU ARM
-Embedded toolchain.
-
-The libraries can be built with the cmake options:
-
-* ``-DBAREMETAL_ARMV6M_SYSROOT=/path/to/bare/metal/sysroot``
-* ``-DBAREMETAL_ARMV7M_SYSROOT=/path/to/bare/metal/sysroot``
-* ``-DBAREMETAL_ARMV7EM_SYSROOT=/path/to/bare/metal/sysroot``
-* ``-C /path/to/llvm/source/tools/clang/cmake/caches/BaremetalARM.cmake``
-
-**Note** that for the recipe to work the compiler-rt source must be checked out
-into the directory llvm/runtimes and not llvm/projects.
-
 To build and test the libraries using a similar method to Armv7-A is possible
 but more difficult. The main problems are:
 
 * There isn't a ``qemu-arm`` user-mode emulator for bare-metal systems. The ``qemu-system-arm`` can be used but this is significantly more difficult to setup.
-* The target to compile compiler-rt have the suffix -none-eabi. This uses the BareMetal driver in clang and by default won't find the libraries needed to pass the cmake compiler check.
+* The targets to compile compiler-rt have the suffix -none-eabi. This uses the BareMetal driver in clang and by default won't find the libraries needed to pass the cmake compiler check.
 
 As the Armv6-M, Armv7-M and Armv7E-M builds of compiler-rt only use instructions
 that are supported on Armv7-A we can still get most of the value of running the
 tests using the same ``qemu-arm`` that we used for Armv7-A by building and
 running the test cases for Armv7-A but using the builtins compiled for
-Armv6-M, Armv7-M or Armv7E-M. This will not catch instructions that are
-supported on Armv7-A but not Armv6-M, Armv7-M and Armv7E-M.
+Armv6-M, Armv7-M or Armv7E-M. This will test that the builtins can be linked
+into a binary and execute the tests correctly but it will not catch if the
+builtins use instructions that are supported on Armv7-A but not Armv6-M,
+Armv7-M and Armv7E-M.
 
-To get the cmake compile test to pass the libraries needed to successfully link
-the test application will need to be manually added to ``CMAKE_CFLAGS``.
-Alternatively if you are using version 3.6 or above of cmake you can use
+To get the cmake compile test to pass you will need to pass the libraries
+needed to successfully link the cmake test via ``CMAKE_CFLAGS``. It is
+strongly recommended that you use version 3.6 or above of cmake so you can use
 ``CMAKE_TRY_COMPILE_TARGET=STATIC_LIBRARY`` to skip the link step.
 
 * ``-DCMAKE_TRY_COMPILE_TARGET_TYPE=STATIC_LIBRARY``
@@ -169,6 +245,7 @@
 * ``-DCOMPILER_RT_BUILD_PROFILE=OFF``
 * ``-DCMAKE_C_COMPILER=${host_install_dir}/bin/clang``
 * ``-DCMAKE_C_COMPILER_TARGET="your *-none-eabi target"``
+* ``-DCMAKE_ASM_COMPILER_TARGET="your *-none-eabi target"``
 * ``-DCMAKE_AR=/path/to/llvm-ar``
 * ``-DCMAKE_NM=/path/to/llvm-nm``
 * ``-DCMAKE_RANLIB=/path/to/llvm-ranlib``
@@ -176,7 +253,7 @@
 * ``-DCOMPILER_RT_DEFAULT_TARGET_ONLY=ON``
 * ``-DLLVM_CONFIG_PATH=/path/to/llvm-config``
 * ``-DCMAKE_C_FLAGS="build-c-flags"``
-* ``-DCMAKE_ASM_FLAGS="${arm_cflags}"``
+* ``-DCMAKE_ASM_FLAGS="build-c-flags"``
 * ``-DCOMPILER_RT_EMULATOR="qemu-arm -L /path/to/armv7-A/sysroot"``
 * ``-DCOMPILER_RT_INCLUDE_TESTS=ON``
 * ``-DCOMPILER_RT_TEST_COMPILER="/path/to/clang"``
@@ -186,16 +263,28 @@
 Armv7-A we must include ``"-mthumb -mfloat-abi=soft -mfpu=none"`` in the
 test-c-flags. We must use an Armv7-A soft-float abi sysroot for ``qemu-arm``.
 
-Unfortunately at time of writing the Armv7-M and Armv7E-M builds of
-compiler-rt will always include assembler files including floating point
-instructions. This means that building for a cpu without a floating point unit
-requires something like removing the arm_Thumb1_VFPv2_SOURCES from the
-arm_Thumb1_SOURCES in builtins/CMakeLists.txt. The float-abi of the compiler-rt
-library must be matched by the float abi of the Armv7-A sysroot used by
-qemu-arm.
-
 Depending on the linker used for the test cases you may encounter BuildAttribute
 mismatches between the M-profile objects from compiler-rt and the A-profile
-objects from the test. The lld linker does not check the BuildAttributes so it
-can be used to link the tests by adding -fuse-ld=lld to the
+objects from the test. The lld linker does not check the profile
+BuildAttribute so it can be used to link the tests by adding -fuse-ld=lld to the
 ``COMPILER_RT_TEST_COMPILER_CFLAGS``.
+
+Alternative using a cmake cache
+-------------------------------
+If you wish to build, but not test compiler-rt for Armv6-M, Armv7-M or Armv7E-M
+the easiest way is to use the BaremetalARM.cmake recipe in clang/cmake/caches.
+
+You will need a bare metal sysroot such as that provided by the GNU ARM
+Embedded toolchain.
+
+The libraries can be built with the cmake options:
+
+* ``-DBAREMETAL_ARMV6M_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi``
+* ``-DBAREMETAL_ARMV7M_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi``
+* ``-DBAREMETAL_ARMV7EM_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi``
+* ``-C /path/to/llvm/source/tools/clang/cmake/caches/BaremetalARM.cmake``
+* ``/path/to/llvm``
+
+**Note** that for the recipe to work the compiler-rt source must be checked out
+into the directory llvm/runtimes. You will also need clang and lld checked out.
+
diff --git a/docs/LangRef.rst b/docs/LangRef.rst
index ea879a5..496a0f7 100644
--- a/docs/LangRef.rst
+++ b/docs/LangRef.rst
@@ -5140,7 +5140,7 @@
 conjunction with ``llvm.loop`` loop identification metadata. The
 ``llvm.loop.vectorize`` and ``llvm.loop.interleave`` metadata are only
 optimization hints and the optimizer will only interleave and vectorize loops if
-it believes it is safe to do so. The ``llvm.mem.parallel_loop_access`` metadata
+it believes it is safe to do so. The ``llvm.loop.parallel_accesses`` metadata
 which contains information about loop-carried memory dependencies can be helpful
 in determining the safety of these transformations.
 
@@ -5443,89 +5443,119 @@
 loop distribution pass. See
 :ref:`Transformation Metadata <transformation-metadata>` for details.
 
-'``llvm.mem``'
-^^^^^^^^^^^^^^^
+'``llvm.access.group``' Metadata
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-Metadata types used to annotate memory accesses with information helpful
-for optimizations are prefixed with ``llvm.mem``.
+``llvm.access.group`` metadata can be attached to any instruction that
+potentially accesses memory. It can point to a single distinct metadata
+node, which we call access group. This node represents all memory access
+instructions referring to it via ``llvm.access.group``. When an
+instruction belongs to multiple access groups, it can also point to a
+list of accesses groups, illustrated by the following example.
 
-'``llvm.mem.parallel_loop_access``' Metadata
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: llvm
 
-The ``llvm.mem.parallel_loop_access`` metadata refers to a loop identifier,
-or metadata containing a list of loop identifiers for nested loops.
-The metadata is attached to memory accessing instructions and denotes that
-no loop carried memory dependence exist between it and other instructions denoted
-with the same loop identifier. The metadata on memory reads also implies that
-if conversion (i.e. speculative execution within a loop iteration) is safe.
+   %val = load i32, i32* %arrayidx, !llvm.access.group !0
+   ...
+   !0 = !{!1, !2}
+   !1 = distinct !{}
+   !2 = distinct !{}
 
-Precisely, given two instructions ``m1`` and ``m2`` that both have the
-``llvm.mem.parallel_loop_access`` metadata, with ``L1`` and ``L2`` being the
-set of loops associated with that metadata, respectively, then there is no loop
-carried dependence between ``m1`` and ``m2`` for loops in both ``L1`` and
-``L2``.
+It is illegal for the list node to be empty since it might be confused
+with an access group.
 
-As a special case, if all memory accessing instructions in a loop have
-``llvm.mem.parallel_loop_access`` metadata that refers to that loop, then the
-loop has no loop carried memory dependences and is considered to be a parallel
-loop.
+The access group metadata node must be 'distinct' to avoid collapsing
+multiple access groups by content. A access group metadata node must
+always be empty which can be used to distinguish an access group
+metadata node from a list of access groups. Being empty avoids the
+situation that the content must be updated which, because metadata is
+immutable by design, would required finding and updating all references
+to the access group node.
 
-Note that if not all memory access instructions have such metadata referring to
-the loop, then the loop is considered not being trivially parallel. Additional
+The access group can be used to refer to a memory access instruction
+without pointing to it directly (which is not possible in global
+metadata). Currently, the only metadata making use of it is
+``llvm.loop.parallel_accesses``.
+
+'``llvm.loop.parallel_accesses``' Metadata
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``llvm.loop.parallel_accesses`` metadata refers to one or more
+access group metadata nodes (see ``llvm.access.group``). It denotes that
+no loop-carried memory dependence exist between it and other instructions
+in the loop with this metadata.
+
+Let ``m1`` and ``m2`` be two instructions that both have the
+``llvm.access.group`` metadata to the access group ``g1``, respectively
+``g2`` (which might be identical). If a loop contains both access groups
+in its ``llvm.loop.parallel_accesses`` metadata, then the compiler can
+assume that there is no dependency between ``m1`` and ``m2`` carried by
+this loop. Instructions that belong to multiple access groups are
+considered having this property if at least one of the access groups
+matches the ``llvm.loop.parallel_accesses`` list.
+
+If all memory-accessing instructions in a loop have
+``llvm.loop.parallel_accesses`` metadata that refers to that loop, then the
+loop has no loop carried memory dependences and is considered to be a
+parallel loop.
+
+Note that if not all memory access instructions belong to an access
+group referred to by ``llvm.loop.parallel_accesses``, then the loop must
+not be considered trivially parallel. Additional
 memory dependence analysis is required to make that determination. As a fail
 safe mechanism, this causes loops that were originally parallel to be considered
 sequential (if optimization passes that are unaware of the parallel semantics
 insert new memory instructions into the loop body).
 
 Example of a loop that is considered parallel due to its correct use of
-both ``llvm.loop`` and ``llvm.mem.parallel_loop_access``
-metadata types that refer to the same loop identifier metadata.
+both ``llvm.access.group`` and ``llvm.loop.parallel_accesses``
+metadata types.
 
 .. code-block:: llvm
 
    for.body:
      ...
-     %val0 = load i32, i32* %arrayidx, !llvm.mem.parallel_loop_access !0
+     %val0 = load i32, i32* %arrayidx, !llvm.access.group !1
      ...
-     store i32 %val0, i32* %arrayidx1, !llvm.mem.parallel_loop_access !0
+     store i32 %val0, i32* %arrayidx1, !llvm.access.group !1
      ...
      br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
 
    for.end:
    ...
-   !0 = !{!0}
+   !0 = distinct !{!0, !{!"llvm.loop.parallel_accesses", !1}}
+   !1 = distinct !{}
 
-It is also possible to have nested parallel loops. In that case the
-memory accesses refer to a list of loop identifier metadata nodes instead of
-the loop identifier metadata node directly:
+It is also possible to have nested parallel loops:
 
 .. code-block:: llvm
 
    outer.for.body:
      ...
-     %val1 = load i32, i32* %arrayidx3, !llvm.mem.parallel_loop_access !2
+     %val1 = load i32, i32* %arrayidx3, !llvm.access.group !4
      ...
      br label %inner.for.body
 
    inner.for.body:
      ...
-     %val0 = load i32, i32* %arrayidx1, !llvm.mem.parallel_loop_access !0
+     %val0 = load i32, i32* %arrayidx1, !llvm.access.group !3
      ...
-     store i32 %val0, i32* %arrayidx2, !llvm.mem.parallel_loop_access !0
+     store i32 %val0, i32* %arrayidx2, !llvm.access.group !3
      ...
      br i1 %exitcond, label %inner.for.end, label %inner.for.body, !llvm.loop !1
 
    inner.for.end:
      ...
-     store i32 %val1, i32* %arrayidx4, !llvm.mem.parallel_loop_access !2
+     store i32 %val1, i32* %arrayidx4, !llvm.access.group !4
      ...
      br i1 %exitcond, label %outer.for.end, label %outer.for.body, !llvm.loop !2
 
    outer.for.end:                                          ; preds = %for.body
    ...
-   !0 = !{!1, !2} ; a list of loop identifiers
-   !1 = !{!1} ; an identifier for the inner loop
-   !2 = !{!2} ; an identifier for the outer loop
+   !1 = distinct !{!1, !{!"llvm.loop.parallel_accesses", !3}}     ; metadata for the inner loop
+   !2 = distinct !{!2, !{!"llvm.loop.parallel_accesses", !3, !4}} ; metadata for the outer loop
+   !3 = distinct !{} ; access group for instructions in the inner loop (which are implicitly contained in outer loop as well)
+   !4 = distinct !{} ; access group for instructions in the outer, but not the inner loop
 
 '``irr_loop``' Metadata
 ^^^^^^^^^^^^^^^^^^^^^^^
@@ -6636,7 +6666,7 @@
 #. The optional :ref:`Parameter Attributes <paramattrs>` list for return
    values. Only '``zeroext``', '``signext``', and '``inreg``' attributes
    are valid here.
-#. The optional addrspace attribute can be used to indicate the adress space
+#. The optional addrspace attribute can be used to indicate the address space
    of the called function. If it is not specified, the program address space
    from the :ref:`datalayout string<langref_datalayout>` will be used.
 #. '``ty``': the type of the call instruction itself which is also the
@@ -6941,7 +6971,7 @@
 
 The argument to the '``fneg``' instruction must be a
 :ref:`floating-point <t_floating>` or :ref:`vector <t_vector>` of
-floating-point values. 
+floating-point values.
 
 Semantics:
 """"""""""
@@ -9772,7 +9802,7 @@
 #. The optional :ref:`Parameter Attributes <paramattrs>` list for return
    values. Only '``zeroext``', '``signext``', and '``inreg``' attributes
    are valid here.
-#. The optional addrspace attribute can be used to indicate the adress space
+#. The optional addrspace attribute can be used to indicate the address space
    of the called function. If it is not specified, the program address space
    from the :ref:`datalayout string<langref_datalayout>` will be used.
 #. '``ty``': the type of the call instruction itself which is also the
@@ -14957,13 +14987,13 @@
 Overview:
 """""""""
 
-The '``llvm.experimental.constrained.maxnum``' intrinsic returns the maximum 
+The '``llvm.experimental.constrained.maxnum``' intrinsic returns the maximum
 of the two arguments.
 
 Arguments:
 """"""""""
 
-The first two arguments and the return value are floating-point numbers 
+The first two arguments and the return value are floating-point numbers
 of the same type.
 
 The third and forth arguments specify the rounding mode and exception
@@ -15031,7 +15061,7 @@
 Overview:
 """""""""
 
-The '``llvm.experimental.constrained.ceil``' intrinsic returns the ceiling of the 
+The '``llvm.experimental.constrained.ceil``' intrinsic returns the ceiling of the
 first operand.
 
 Arguments:
@@ -15067,7 +15097,7 @@
 Overview:
 """""""""
 
-The '``llvm.experimental.constrained.floor``' intrinsic returns the floor of the 
+The '``llvm.experimental.constrained.floor``' intrinsic returns the floor of the
 first operand.
 
 Arguments:
@@ -15084,7 +15114,7 @@
 """"""""""
 
 This function returns the same values as the libm ``floor`` functions
-would and handles error conditions in the same way. 
+would and handles error conditions in the same way.
 
 
 '``llvm.experimental.constrained.round``' Intrinsic
@@ -15103,7 +15133,7 @@
 Overview:
 """""""""
 
-The '``llvm.experimental.constrained.round``' intrinsic returns the first 
+The '``llvm.experimental.constrained.round``' intrinsic returns the first
 operand rounded to the nearest integer.
 
 Arguments:
@@ -15139,8 +15169,8 @@
 Overview:
 """""""""
 
-The '``llvm.experimental.constrained.trunc``' intrinsic returns the first 
-operand rounded to the nearest integer not larger in magnitude than the 
+The '``llvm.experimental.constrained.trunc``' intrinsic returns the first
+operand rounded to the nearest integer not larger in magnitude than the
 operand.
 
 Arguments:
diff --git a/docs/Phabricator.rst b/docs/Phabricator.rst
index 640e161..ca23ab3 100644
--- a/docs/Phabricator.rst
+++ b/docs/Phabricator.rst
@@ -1,3 +1,5 @@
+.. _phabricator-reviews:
+
 =============================
 Code Reviews with Phabricator
 =============================
@@ -16,7 +18,7 @@
 Sign up
 -------
 
-To get started with Phabricator, navigate to `http://reviews.llvm.org`_ and
+To get started with Phabricator, navigate to `https://reviews.llvm.org`_ and
 click the power icon in the top right. You can register with a GitHub account,
 a Google account, or you can create your own profile.
 
@@ -151,7 +153,7 @@
   Differential Revision: <URL>
 
 where ``<URL>`` is the URL for the code review, starting with
-``http://reviews.llvm.org/``.
+``https://reviews.llvm.org/``.
 
 This allows people reading the version history to see the review for
 context. This also allows Phabricator to detect the commit, close the
@@ -162,11 +164,11 @@
 ``Differential Revision`` line (as the last line) to the commit message
 yourself.
 
-Using the Arcanist tool can simplify the process of committing reviewed code
-as it will retrieve reviewers, the ``Differential Revision``, etc from the review
-and place it in the commit message. Several methods of using Arcanist to commit
-code are given below. If you do not wish to use Arcanist then simply commit
-the reviewed patch as you would normally.
+Using the Arcanist tool can simplify the process of committing reviewed code as
+it will retrieve reviewers, the ``Differential Revision``, etc from the review
+and place it in the commit message. You may also commit an accepted change
+directly using ``git llvm push``, per the section in the :ref:`getting started
+guide <commit_from_git>`.
 
 Note that if you commit the change without using Arcanist and forget to add the
 ``Differential Revision`` line to your commit message then it is recommended
@@ -174,24 +176,9 @@
 the SVN revision number in the Comment, set the Action to "Close Revision" and
 click Submit.  Note the review must have been Accepted first.
 
-Subversion and Arcanist
-^^^^^^^^^^^^^^^^^^^^^^^
 
-On a clean Subversion working copy run the following (where ``<Revision>`` is
-the Phabricator review number):
-
-::
-
-  arc patch D<Revision>
-  arc commit --revision D<Revision>
-
-The first command will take the latest version of the reviewed patch and apply it to the working
-copy. The second command will commit this revision to trunk.
-
-git-svn and Arcanist
-^^^^^^^^^^^^^^^^^^^^
-
-This presumes that the git repository has been configured as described in :ref:`developers-work-with-git-svn`.
+Committing someone's change from Phabricator
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 On a clean Git repository on an up to date ``master`` branch run the
 following (where ``<Revision>`` is the Phabricator review number):
@@ -205,17 +192,31 @@
 current ``master`` and will create a commit corresponding to ``D<Revision>`` with a
 commit message derived from information in the Phabricator review.
 
-Check you are happy with the commit message and amend it if necessary. Now switch to
-the ``master`` branch and add the new commit to it and commit it to trunk. This
-can be done by running the following:
+Check you are happy with the commit message and amend it if necessary. Then,
+make sure the commit is up-to-date, and commit it. This can be done by running
+the following:
 
 ::
 
-  git checkout master
-  git merge --ff-only arcpatch-D<Revision>
-  git svn dcommit
+  git pull --rebase origin master
+  git show # Ensure the patch looks correct.
+  ninja check-$whatever # Rerun the appropriate tests if needed.
+  git llvm push
 
+Subversion and Arcanist (deprecated)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
+To download a change from Phabricator and commit it with subversion, you should
+first make sure you have a clean working directory. Then run the following
+(where ``<Revision>`` is the Phabricator review number):
+
+::
+
+  arc patch D<Revision>
+  arc commit --revision D<Revision>
+
+The first command will take the latest version of the reviewed patch and apply
+it to the working copy. The second command will commit this revision to trunk.
 
 Abandoning a change
 -------------------
@@ -241,9 +242,9 @@
 note that it is a derivative of an existing open source project, and so not
 trivially a good fit for an official LLVM project.
 
-.. _LLVM's Phabricator: http://reviews.llvm.org
-.. _`http://reviews.llvm.org`: http://reviews.llvm.org
-.. _Code Repository Browser: http://reviews.llvm.org/diffusion/
+.. _LLVM's Phabricator: https://reviews.llvm.org
+.. _`https://reviews.llvm.org`: https://reviews.llvm.org
+.. _Code Repository Browser: https://reviews.llvm.org/diffusion/
 .. _Arcanist Quick Start: https://secure.phabricator.com/book/phabricator/article/arcanist_quick_start/
 .. _Arcanist User Guide: https://secure.phabricator.com/book/phabricator/article/arcanist/
 .. _llvm-reviews GitHub project: https://github.com/r4nt/llvm-reviews/
diff --git a/docs/ProgrammersManual.rst b/docs/ProgrammersManual.rst
index 88c5670..0903a22 100644
--- a/docs/ProgrammersManual.rst
+++ b/docs/ProgrammersManual.rst
@@ -2905,37 +2905,6 @@
   GV->eraseFromParent();
 
 
-.. _create_types:
-
-How to Create Types
--------------------
-
-In generating IR, you may need some complex types.  If you know these types
-statically, you can use ``TypeBuilder<...>::get()``, defined in
-``llvm/Support/TypeBuilder.h``, to retrieve them.  ``TypeBuilder`` has two forms
-depending on whether you're building types for cross-compilation or native
-library use.  ``TypeBuilder<T, true>`` requires that ``T`` be independent of the
-host environment, meaning that it's built out of types from the ``llvm::types``
-(`doxygen <http://llvm.org/doxygen/namespacellvm_1_1types.html>`__) namespace
-and pointers, functions, arrays, etc. built of those.  ``TypeBuilder<T, false>``
-additionally allows native C types whose size may depend on the host compiler.
-For example,
-
-.. code-block:: c++
-
-  FunctionType *ft = TypeBuilder<types::i<8>(types::i<32>*), true>::get();
-
-is easier to read and write than the equivalent
-
-.. code-block:: c++
-
-  std::vector<const Type*> params;
-  params.push_back(PointerType::getUnqual(Type::Int32Ty));
-  FunctionType *ft = FunctionType::get(Type::Int8Ty, params, false);
-
-See the `class comment
-<http://llvm.org/doxygen/TypeBuilder_8h_source.html#l00001>`_ for more details.
-
 .. _threading:
 
 Threads and LLVM
diff --git a/docs/ReleaseNotes.rst b/docs/ReleaseNotes.rst
index a3500ba..db9cf51 100644
--- a/docs/ReleaseNotes.rst
+++ b/docs/ReleaseNotes.rst
@@ -43,6 +43,11 @@
 * The **llvm-cov** tool can now export lcov trace files using the
   `-format=lcov` option of the `export` command.
 
+* The add_llvm_loadable_module CMake macro has been removed.  The
+  add_llvm_library macro with the MODULE argument now provides the same
+  functionality.  See `Writing an LLVM Pass
+  <WritingAnLLVMPass.html#setting-up-the-build-environment>`_.
+
 .. NOTE
    If you would like to document a larger change, then you can add a
    subsection about it right here. You can copy the following boilerplate
@@ -77,7 +82,8 @@
 Changes to the X86 Target
 -------------------------
 
- During this release ...
+* Machine model for AMD bdver2 (Piledriver) CPU was added. It is used to support
+  instruction scheduling and other instruction cost heuristics.
 
 Changes to the AMDGPU Target
 -----------------------------
@@ -89,6 +95,19 @@
 
  During this release ...
 
+Changes to the WebAssembly Target
+---------------------------------
+
+The WebAssembly target is no longer "experimental"! It's now built by default,
+rather than needing to be enabled with LLVM_EXPERIMENTAL_TARGETS_TO_BUILD.
+
+The object file format and core C ABI are now considered stable. That said,
+the object file format has an ABI versioning capability, and one anticipated
+use for it will be to add support for returning small structs as multiple
+return values, once the underlying WebAssembly platform itself supports it.
+Additionally, multithreading support is not yet included in the stable ABI.
+
+
 Changes to the OCaml bindings
 -----------------------------
 
diff --git a/docs/SourceLevelDebugging.rst b/docs/SourceLevelDebugging.rst
index dab300a..1a5ed2f 100644
--- a/docs/SourceLevelDebugging.rst
+++ b/docs/SourceLevelDebugging.rst
@@ -478,7 +478,7 @@
 
   ;; Define the compile unit.
   !1 = distinct !DICompileUnit(language: DW_LANG_C99, file: !2,
-                               producer: "clang version 4.0.0 (http://llvm.org/git/clang.git ae4deadbea242e8ea517eef662c30443f75bd086) (http://llvm.org/git/llvm.git 818b4c1539df3e51dc7e62c89ead4abfd348827d)",
+                               producer: "clang version 4.0.0",
                                isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug,
                                enums: !3, globals: !4)
 
@@ -506,7 +506,7 @@
   !7 = !{i32 2, !"Debug Info Version", i32 3}
 
   ;; Compiler identification
-  !8 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git ae4deadbea242e8ea517eef662c30443f75bd086) (http://llvm.org/git/llvm.git 818b4c1539df3e51dc7e62c89ead4abfd348827d)"}
+  !8 = !{!"clang version 4.0.0"}
 
 
 The align value in DIGlobalVariable description specifies variable alignment in
diff --git a/docs/TestSuiteGuide.md b/docs/TestSuiteGuide.md
index fe7a278..0d9bbac 100644
--- a/docs/TestSuiteGuide.md
+++ b/docs/TestSuiteGuide.md
@@ -17,7 +17,7 @@
 
    ```bash
    % mkdir venv
-   % virtualenv -p python2.7 venv
+   % virtualenv venv
    % . venv/bin/activate
    % pip install svn+http://llvm.org/svn/llvm-project/llvm/trunk/utils/lit
    % lit --version
diff --git a/docs/TypeMetadata.rst b/docs/TypeMetadata.rst
index 0382d0c..84cf05b 100644
--- a/docs/TypeMetadata.rst
+++ b/docs/TypeMetadata.rst
@@ -223,4 +223,4 @@
       ret void
     }
 
-.. _GlobalLayoutBuilder: http://git.llvm.org/klaus/llvm/blob/master/include/llvm/Transforms/IPO/LowerTypeTests.h
+.. _GlobalLayoutBuilder: https://github.com/llvm/llvm-project/blob/master/llvm/include/llvm/Transforms/IPO/LowerTypeTests.h
diff --git a/docs/Vectorizers.rst b/docs/Vectorizers.rst
index 42e8d02..fd1525c 100644
--- a/docs/Vectorizers.rst
+++ b/docs/Vectorizers.rst
@@ -311,7 +311,7 @@
 Vectorization of function calls
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-The Loop Vectorize can vectorize intrinsic math functions.
+The Loop Vectorizer can vectorize intrinsic math functions.
 See the table below for a list of these functions.
 
 +-----+-----+---------+
@@ -328,6 +328,11 @@
 |     |     | fmuladd |
 +-----+-----+---------+
 
+Note that the optimizer may not be able to vectorize math library functions 
+that correspond to these intrinsics if the library calls access external state 
+such as "errno". To allow better optimization of C/C++ math library functions, 
+use "-fno-math-errno".
+
 The loop vectorizer knows about special instructions on the target and will
 vectorize a loop containing a function call that maps to the instructions. For
 example, the loop below will be vectorized on Intel x86 if the SSE4.1 roundps
@@ -369,7 +374,7 @@
 -----------
 
 This section shows the execution time of Clang on a simple benchmark:
-`gcc-loops <http://llvm.org/viewvc/llvm-project/test-suite/trunk/SingleSource/UnitTests/Vectorizer/>`_.
+`gcc-loops <https://github.com/llvm/llvm-test-suite/tree/master/SingleSource/UnitTests/Vectorizer>`_.
 This benchmarks is a collection of loops from the GCC autovectorization
 `page <http://gcc.gnu.org/projects/tree-ssa/vectorization.html>`_ by Dorit Nuzman.
 
diff --git a/docs/WritingAnLLVMPass.rst b/docs/WritingAnLLVMPass.rst
index 41f4007..19dc6c1 100644
--- a/docs/WritingAnLLVMPass.rst
+++ b/docs/WritingAnLLVMPass.rst
@@ -55,7 +55,7 @@
 
 .. code-block:: cmake
 
-  add_llvm_loadable_module( LLVMHello
+  add_llvm_library( LLVMHello MODULE
     Hello.cpp
   
     PLUGIN_TOOL
diff --git a/docs/conf.py b/docs/conf.py
index 91c8656..7fdcb3d 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -9,6 +9,7 @@
 #
 # All configuration values have a default; values that are commented out
 # serve to show the default.
+from __future__ import print_function
 
 import sys, os
 from datetime import date
@@ -219,7 +220,7 @@
 # Automatically derive the list of man pages from the contents of the command
 # guide subdirectory.
 basedir = os.path.dirname(__file__)
-man_page_authors = "Maintained by The LLVM Team (http://llvm.org/)."
+man_page_authors = "Maintained by the LLVM Team (https://llvm.org/)."
 command_guide_subpath = 'CommandGuide'
 command_guide_path = os.path.join(basedir, command_guide_subpath)
 for name in os.listdir(command_guide_path):
@@ -234,14 +235,14 @@
         header = f.readline().rstrip('\n')
 
         if len(header) != len(title):
-            print >>sys.stderr, (
+            print((
                 "error: invalid header in %r (does not match title)" % (
-                    file_subpath,))
+                    file_subpath,)), file=sys.stderr)
         if ' - ' not in title:
-            print >>sys.stderr, (
+            print((
                 ("error: invalid title in %r "
                  "(expected '<name> - <description>')") % (
-                    file_subpath,))
+                    file_subpath,)), file=sys.stderr)
 
         # Split the name out of the title.
         name,description = title.split(' - ', 1)
diff --git a/docs/index.rst b/docs/index.rst
index 698e676..4527fe5 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -232,7 +232,7 @@
 
 `Documentation for Go bindings <http://godoc.org/llvm.org/llvm/bindings/go/llvm>`_
 
-`ViewVC Repository Browser <http://llvm.org/viewvc/>`_
+`Github Source Repository Browser <http://github.com/llvm/llvm-project//>`_
    ..
 
 :doc:`CompilerWriterInfo`
diff --git a/docs/llvm-objdump.1 b/docs/llvm-objdump.1
new file mode 100644
index 0000000..72d1b93
--- /dev/null
+++ b/docs/llvm-objdump.1
@@ -0,0 +1,197 @@
+.\" This file is distributed under the University of Illinois Open Source
+.\" License. See LICENSE.TXT for details.
+.\"
+.Dd December 19, 2018
+.Dt LLVM-OBJDUMP 1
+.Os
+.Sh NAME
+.Nm llvm-objdump
+.Nd LLVM object file dumper
+.Sh SYNOPSIS
+.Nm llvm-objdump
+.Op Ar options
+.Ar objfile ...
+.Sh DESCRIPTION
+.Nm
+prints the contents of object files and final linked images named on the
+command line.
+If no file name is specified,
+.Nm
+will attempt to read from
+.Pa a.out .
+If
+.Pa -
+is used as a file name,
+.Nm
+will process a file on its standard input stream.
+.Nm
+accepts many of the same command line arguments as GNU objdump.
+.Sh OPTIONS
+.Ss General Options
+.Bl -tag -width indent
+.It Fl -aarch64-neon-syntax Ns = Ns Ar value
+Choose style of NEON code to emit from AArch64 backend.
+.Ar value
+may be one of:
+.Bl -tag -width indent
+.It generic
+Generic NEON assembly
+.It apple
+Apple-style NEON assembly
+.El
+.It Fl -arch Ns = Ns Ar value
+Choose architecture(s) from a Mach-O file to dump
+.It Fl -arch-name Ns = Ns ar arch
+Target arch to disassemble for.
+See
+.Fl -version
+for available targets.
+.It Fl -bind
+Display mach-o binding info.
+.It Fl -color
+Use colored syntax highlighting.
+Default autodetect.
+.It Fl -disassemble
+Display assembler mnemonics for machine instructions.
+.It Fl -disassemble-all
+Display assembler mnemonics for the machine instruction in all sections.
+.It Fl -dsym Ns = Ns Ar file
+Use
+.Ar file
+for debug info.
+.It Fl -dwarf Ns = Ns Ar sections
+Dump of dwarf debug sections.
+.Bl -tag -width indent
+.It frames
+.Dv .debug_frame
+.El
+.It Fl -exports-trie
+Display mach-o exported symbols.
+.It Fl -fault-map-section
+Display contents of faultmap section.
+.It Fl -filter-print-funcs Ns = Ns Ar functions
+Only print IR for functions whose name match
+.Ar functions
+for all print-[before|after][-all] options.
+.It Fl -full-leading-addr
+Print full leading address.
+.It Fl g
+Print line information from debug info if available.
+.It Fl h , -headers , -section-headers
+Display summaries of the headers for each section.
+.It Fl -help
+Display available options.
+Use
+.Fl -help-hidden
+for more.
+.It Fl -lazy-bind
+Display mach-o lazy binding info.
+.It Fl -line-numbers
+Display source line numbers with disassembly.
+Implies disassemble object.
+.It Fl -macho
+Use MachO specific object file parser.
+.It Fl -mattr Ns = Ns Ar attribute ...
+Target specific attributes.
+.It Fl -mcpu Ns = Ns Ar CPU
+Target a specific cpu type.
+Use
+.Fl mcpu Ns = Ns help
+for details.
+.It Fl -no-leading-addr
+Print no leading address.
+.It Fl -no-leading-headers
+Print no leading headers.
+.It Fl -no-show-raw-insn
+When disassembling instructions, do not print the instruction bytes.
+.It Fl -print-imm-hex
+Use hex format for immediate values.
+.It Fl -private-header
+Display only the first format specific file header.
+.It Fl -private-headers
+Display format specific file headers.
+.It Fl r
+Display the relocation entries in the file.
+.It Fl -raw-clang-ast
+Dump the raw binary contents of the clang AST section.
+.It Fl -rebase
+Display mach-o rebasing info.
+.It Fl -reverse-iterate
+Reverse iterate.
+.It Fl s
+Display the content of each section.
+.It Fl -section Ns = Ns Ar section
+Operate on the specified sections only.
+With
+.Fl -macho
+dump segment,section.
+.It Fl -source
+Display source inline with disassembly.
+Implies disassmble object.
+.It Fl -start-address Ns = Ns Ar address
+Disassemble beginning at
+.Ar address .
+.It Fl -stop-address Ns = Ns Ar address
+Stop disassembly at
+.Ar address .
+.It Fl t
+Display the symbol table.
+.It Fl -triple Ns = Ns Ar triple
+Target triple to disassemble for.
+See
+.Fl -version
+for available targets.
+.It Fl -unwind-info
+Display unwind information.
+.It Fl -version
+Display the version of this program.
+.It Fl -weak-bind
+Display mach-o weak binding info.
+.It Fl -x86-asm-syntax Ns = Ns Ar syntax
+Choose style of code to emit from X86 backend.
+.Bl -tag -width indent
+.It att
+Emit AT&T-style assembly.
+.It intel
+Emit Intel-style assembly.
+.El
+.El
+.Ss Mach-O Options
+There are a number of options specific to the Mach-O format.
+These are used in combination with the
+.Fl -macho
+option.
+.Bl -tag -width indent
+.It Fl -archive-headers
+Print archive headers for Mach-O archives.
+.It Fl -archive-member-offsets
+Print the offset to each archive member for Mach-O archives.
+Requires
+.Fl -macho
+and
+.Fl -archive-headers .
+.It Fl -data-in-code
+Print the data in code table for Mach-O objects.
+.It Fl -dis-symname Ns = Ns Ar symbol
+Disassemble just
+.Ar symbol 's
+instructions.
+.It Fl -dylib-id
+Print the shared library's id for the dylib Mach-O file.
+.It Fl -dylibs-used
+Print the shared libraries used for linked Mach-O files.
+.It Fl -indirect-symbols
+Print indirect symbol table for Mach-O objects.
+.It Fl -info-plist
+Print the info plist section as strings for Mach-O objects.
+.It Fl -link-opt-hints
+Print the linker optimization hints for Mach-O objects.
+.It Fl -no-symbolic-operands
+do not symbolic operands when disassembling.
+.It Fl -non-verbose
+Print the info for Mach-O objects in non-verbose or numeric form.
+.It Fl -objc-meta-data
+Print the Objective-C runtime meta data for Mach-O files.
+.It Fl -universal-headers
+Print Mach-O universal headers.
+.El
diff --git a/examples/Kaleidoscope/MCJIT/cached/genk-timing.py b/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
index 96dd6db..87bbfbf 100644
--- a/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
+++ b/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import print_function
+
 import sys
 import random
 
@@ -173,7 +175,7 @@
 
 def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript):
     """ Generate a random Kaleidoscope script based on the given parameters """
-    print "Generating " + filename
+    print("Generating " + filename)
     print("  %d functions, %d elements per function, %d functions between execution" %
           (numFuncs, elementsPerFunc, funcsBetweenExec))
     print("  Call weighting = %f" % callWeighting)
@@ -200,7 +202,7 @@
     script.writeEmptyLine()
     script.writeFinalFunctionCounts()
     funcsCalled = len(script.calledFunctions)
-    print "  Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted)
+    print("  Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted))
     timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
 
 # Execution begins here
@@ -216,4 +218,4 @@
 for (numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting) in dataSets:
     filename = "test-%d-%d-%d-%d.k" % (numFuncs, elementsPerFunc, funcsBetweenExec, int(callWeighting * 100))
     generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
-print "All done!"
+print("All done!")
diff --git a/examples/Kaleidoscope/MCJIT/cached/split-lib.py b/examples/Kaleidoscope/MCJIT/cached/split-lib.py
index 5cdcc6d..1aa80ee 100644
--- a/examples/Kaleidoscope/MCJIT/cached/split-lib.py
+++ b/examples/Kaleidoscope/MCJIT/cached/split-lib.py
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import print_function
+
 class TimingScriptGenerator:
     """Used to generate a bash script which will invoke the toy and time it"""
     def __init__(self, scriptname, outputname):
@@ -47,7 +49,7 @@
   infile = open(inputname, "r")
   libfile = open(libname, "w")
   callfile = open(callname, "w")
-  print "Splitting %s into %s and %s" % (inputname, callname, libname)
+  print("Splitting %s into %s and %s" % (inputname, callname, libname))
   for line in infile:
     if not line.startswith("#"):
       if line.startswith("print"):
@@ -67,4 +69,4 @@
 
 for script in script_list:
   splitScript(script, libGenScript, timingScript)
-print "All done!"
+print("All done!")
diff --git a/examples/Kaleidoscope/MCJIT/complete/genk-timing.py b/examples/Kaleidoscope/MCJIT/complete/genk-timing.py
index 72591fe..c3b4d23 100644
--- a/examples/Kaleidoscope/MCJIT/complete/genk-timing.py
+++ b/examples/Kaleidoscope/MCJIT/complete/genk-timing.py
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import print_function
+
 import sys
 import random
 
@@ -178,7 +180,7 @@
 
 def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript):
     """ Generate a random Kaleidoscope script based on the given parameters """
-    print "Generating " + filename
+    print("Generating " + filename)
     print("  %d functions, %d elements per function, %d functions between execution" %
           (numFuncs, elementsPerFunc, funcsBetweenExec))
     print("  Call weighting = %f" % callWeighting)
@@ -205,7 +207,7 @@
     script.writeEmptyLine()
     script.writeFinalFunctionCounts()
     funcsCalled = len(script.calledFunctions)
-    print "  Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted)
+    print("  Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted))
     timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
 
 # Execution begins here
@@ -221,4 +223,4 @@
 for (numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting) in dataSets:
     filename = "test-%d-%d-%d-%d.k" % (numFuncs, elementsPerFunc, funcsBetweenExec, int(callWeighting * 100))
     generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
-print "All done!"
+print("All done!")
diff --git a/examples/Kaleidoscope/MCJIT/complete/split-lib.py b/examples/Kaleidoscope/MCJIT/complete/split-lib.py
index f6bec02..61c9a5b 100644
--- a/examples/Kaleidoscope/MCJIT/complete/split-lib.py
+++ b/examples/Kaleidoscope/MCJIT/complete/split-lib.py
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import print_function
+
 class TimingScriptGenerator:
     """Used to generate a bash script which will invoke the toy and time it"""
     def __init__(self, scriptname, outputname):
@@ -47,7 +49,7 @@
   infile = open(inputname, "r")
   libfile = open(libname, "w")
   callfile = open(callname, "w")
-  print "Splitting %s into %s and %s" % (inputname, callname, libname)
+  print("Splitting %s into %s and %s" % (inputname, callname, libname))
   for line in infile:
     if not line.startswith("#"):
       if line.startswith("print"):
@@ -67,4 +69,4 @@
 
 for script in script_list:
   splitScript(script, libGenScript, timingScript)
-print "All done!"
+print("All done!")
diff --git a/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py b/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py
index 96dd6db..87bbfbf 100644
--- a/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py
+++ b/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import print_function
+
 import sys
 import random
 
@@ -173,7 +175,7 @@
 
 def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript):
     """ Generate a random Kaleidoscope script based on the given parameters """
-    print "Generating " + filename
+    print("Generating " + filename)
     print("  %d functions, %d elements per function, %d functions between execution" %
           (numFuncs, elementsPerFunc, funcsBetweenExec))
     print("  Call weighting = %f" % callWeighting)
@@ -200,7 +202,7 @@
     script.writeEmptyLine()
     script.writeFinalFunctionCounts()
     funcsCalled = len(script.calledFunctions)
-    print "  Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted)
+    print("  Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted))
     timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
 
 # Execution begins here
@@ -216,4 +218,4 @@
 for (numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting) in dataSets:
     filename = "test-%d-%d-%d-%d.k" % (numFuncs, elementsPerFunc, funcsBetweenExec, int(callWeighting * 100))
     generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
-print "All done!"
+print("All done!")
diff --git a/include/llvm-c/Core.h b/include/llvm-c/Core.h
index 0784275..06de058 100644
--- a/include/llvm-c/Core.h
+++ b/include/llvm-c/Core.h
@@ -521,6 +521,23 @@
                                  void *OpaqueHandle);
 
 /**
+ * Retrieve whether the given context is set to discard all value names.
+ *
+ * @see LLVMContext::shouldDiscardValueNames()
+ */
+LLVMBool LLVMContextShouldDiscardValueNames(LLVMContextRef C);
+
+/**
+ * Set whether the given context discards all value names.
+ *
+ * If true, only the names of GlobalValue objects will be available in the IR.
+ * This can be used to save memory and runtime, especially in release mode.
+ *
+ * @see LLVMContext::setDiscardValueNames()
+ */
+void LLVMContextSetDiscardValueNames(LLVMContextRef C, LLVMBool Discard);
+
+/**
  * Destroy a context instance.
  *
  * This should be called for every call to LLVMContextCreate() or memory
@@ -2068,9 +2085,14 @@
 LLVMValueRef LLVMConstAShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
 LLVMValueRef LLVMConstGEP(LLVMValueRef ConstantVal,
                           LLVMValueRef *ConstantIndices, unsigned NumIndices);
+LLVMValueRef LLVMConstGEP2(LLVMTypeRef Ty, LLVMValueRef ConstantVal,
+                           LLVMValueRef *ConstantIndices, unsigned NumIndices);
 LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal,
                                   LLVMValueRef *ConstantIndices,
                                   unsigned NumIndices);
+LLVMValueRef LLVMConstInBoundsGEP2(LLVMTypeRef Ty, LLVMValueRef ConstantVal,
+                                   LLVMValueRef *ConstantIndices,
+                                   unsigned NumIndices);
 LLVMValueRef LLVMConstTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
 LLVMValueRef LLVMConstSExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
 LLVMValueRef LLVMConstZExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
@@ -2790,6 +2812,14 @@
 LLVMBasicBlockRef LLVMGetEntryBasicBlock(LLVMValueRef Fn);
 
 /**
+ * Create a new basic block without inserting it into a function.
+ *
+ * @see llvm::BasicBlock::Create()
+ */
+LLVMBasicBlockRef LLVMCreateBasicBlockInContext(LLVMContextRef C,
+                                                const char *Name);
+
+/**
  * Append a basic block to the end of a function.
  *
  * @see llvm::BasicBlock::Create()
@@ -3075,6 +3105,13 @@
                                        const char *K, unsigned KLen);
 
 /**
+ * Obtain the function type called by this instruction.
+ *
+ * @see llvm::CallBase::getFunctionType()
+ */
+LLVMTypeRef LLVMGetCalledFunctionType(LLVMValueRef C);
+
+/**
  * Obtain the pointer to the function invoked by this instruction.
  *
  * This expects an LLVMValueRef that corresponds to a llvm::CallInst or
@@ -3366,10 +3403,16 @@
                              LLVMBasicBlockRef Else, unsigned NumCases);
 LLVMValueRef LLVMBuildIndirectBr(LLVMBuilderRef B, LLVMValueRef Addr,
                                  unsigned NumDests);
+// LLVMBuildInvoke is deprecated in favor of LLVMBuildInvoke2, in preparation
+// for opaque pointer types.
 LLVMValueRef LLVMBuildInvoke(LLVMBuilderRef, LLVMValueRef Fn,
                              LLVMValueRef *Args, unsigned NumArgs,
                              LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
                              const char *Name);
+LLVMValueRef LLVMBuildInvoke2(LLVMBuilderRef, LLVMTypeRef Ty, LLVMValueRef Fn,
+                              LLVMValueRef *Args, unsigned NumArgs,
+                              LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
+                              const char *Name);
 LLVMValueRef LLVMBuildUnreachable(LLVMBuilderRef);
 
 /* Exception Handling */
@@ -3559,9 +3602,15 @@
 LLVMValueRef LLVMBuildArrayAlloca(LLVMBuilderRef, LLVMTypeRef Ty,
                                   LLVMValueRef Val, const char *Name);
 LLVMValueRef LLVMBuildFree(LLVMBuilderRef, LLVMValueRef PointerVal);
+// LLVMBuildLoad is deprecated in favor of LLVMBuildLoad2, in preparation for
+// opaque pointer types.
 LLVMValueRef LLVMBuildLoad(LLVMBuilderRef, LLVMValueRef PointerVal,
                            const char *Name);
+LLVMValueRef LLVMBuildLoad2(LLVMBuilderRef, LLVMTypeRef Ty,
+                            LLVMValueRef PointerVal, const char *Name);
 LLVMValueRef LLVMBuildStore(LLVMBuilderRef, LLVMValueRef Val, LLVMValueRef Ptr);
+// LLVMBuildGEP, LLVMBuildInBoundsGEP, and LLVMBuildStructGEP are deprecated in
+// favor of LLVMBuild*GEP2, in preparation for opaque pointer types.
 LLVMValueRef LLVMBuildGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
                           LLVMValueRef *Indices, unsigned NumIndices,
                           const char *Name);
@@ -3570,6 +3619,15 @@
                                   const char *Name);
 LLVMValueRef LLVMBuildStructGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
                                 unsigned Idx, const char *Name);
+LLVMValueRef LLVMBuildGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+                           LLVMValueRef Pointer, LLVMValueRef *Indices,
+                           unsigned NumIndices, const char *Name);
+LLVMValueRef LLVMBuildInBoundsGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+                                   LLVMValueRef Pointer, LLVMValueRef *Indices,
+                                   unsigned NumIndices, const char *Name);
+LLVMValueRef LLVMBuildStructGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+                                 LLVMValueRef Pointer, unsigned Idx,
+                                 const char *Name);
 LLVMValueRef LLVMBuildGlobalString(LLVMBuilderRef B, const char *Str,
                                    const char *Name);
 LLVMValueRef LLVMBuildGlobalStringPtr(LLVMBuilderRef B, const char *Str,
@@ -3616,11 +3674,16 @@
                            LLVMTypeRef DestTy, const char *Name);
 LLVMValueRef LLVMBuildPointerCast(LLVMBuilderRef, LLVMValueRef Val,
                                   LLVMTypeRef DestTy, const char *Name);
-LLVMValueRef LLVMBuildIntCast(LLVMBuilderRef, LLVMValueRef Val, /*Signed cast!*/
-                              LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildIntCast2(LLVMBuilderRef, LLVMValueRef Val,
+                               LLVMTypeRef DestTy, LLVMBool IsSigned,
+                               const char *Name);
 LLVMValueRef LLVMBuildFPCast(LLVMBuilderRef, LLVMValueRef Val,
                              LLVMTypeRef DestTy, const char *Name);
 
+/** Deprecated: This cast is always signed. Use LLVMBuildIntCast2 instead. */
+LLVMValueRef LLVMBuildIntCast(LLVMBuilderRef, LLVMValueRef Val, /*Signed cast!*/
+                              LLVMTypeRef DestTy, const char *Name);
+
 /* Comparisons */
 LLVMValueRef LLVMBuildICmp(LLVMBuilderRef, LLVMIntPredicate Op,
                            LLVMValueRef LHS, LLVMValueRef RHS,
@@ -3631,9 +3694,14 @@
 
 /* Miscellaneous instructions */
 LLVMValueRef LLVMBuildPhi(LLVMBuilderRef, LLVMTypeRef Ty, const char *Name);
+// LLVMBuildCall is deprecated in favor of LLVMBuildCall2, in preparation for
+// opaque pointer types.
 LLVMValueRef LLVMBuildCall(LLVMBuilderRef, LLVMValueRef Fn,
                            LLVMValueRef *Args, unsigned NumArgs,
                            const char *Name);
+LLVMValueRef LLVMBuildCall2(LLVMBuilderRef, LLVMTypeRef, LLVMValueRef Fn,
+                            LLVMValueRef *Args, unsigned NumArgs,
+                            const char *Name);
 LLVMValueRef LLVMBuildSelect(LLVMBuilderRef, LLVMValueRef If,
                              LLVMValueRef Then, LLVMValueRef Else,
                              const char *Name);
diff --git a/include/llvm-c/DebugInfo.h b/include/llvm-c/DebugInfo.h
index bc341da..87a7203 100644
--- a/include/llvm-c/DebugInfo.h
+++ b/include/llvm-c/DebugInfo.h
@@ -54,7 +54,8 @@
   LLVMDIFlagMainSubprogram = 1 << 21,
   LLVMDIFlagTypePassByValue = 1 << 22,
   LLVMDIFlagTypePassByReference = 1 << 23,
-  LLVMDIFlagFixedEnum = 1 << 24,
+  LLVMDIFlagEnumClass = 1 << 24,
+  LLVMDIFlagFixedEnum = LLVMDIFlagEnumClass, // Deprecated.
   LLVMDIFlagThunk = 1 << 25,
   LLVMDIFlagTrivial = 1 << 26,
   LLVMDIFlagBigEndian = 1 << 27,
diff --git a/include/llvm-c/TargetMachine.h b/include/llvm-c/TargetMachine.h
index bcfa001..c06e9ed 100644
--- a/include/llvm-c/TargetMachine.h
+++ b/include/llvm-c/TargetMachine.h
@@ -39,7 +39,10 @@
     LLVMRelocDefault,
     LLVMRelocStatic,
     LLVMRelocPIC,
-    LLVMRelocDynamicNoPic
+    LLVMRelocDynamicNoPic,
+    LLVMRelocROPI,
+    LLVMRelocRWPI,
+    LLVMRelocROPI_RWPI
 } LLVMRelocMode;
 
 typedef enum {
diff --git a/include/llvm/ADT/IntervalMap.h b/include/llvm/ADT/IntervalMap.h
index b75c492..2af6104 100644
--- a/include/llvm/ADT/IntervalMap.h
+++ b/include/llvm/ADT/IntervalMap.h
@@ -1134,6 +1134,19 @@
     I.find(x);
     return I;
   }
+
+  /// overlaps(a, b) - Return true if the intervals in this map overlap with the
+  /// interval [a;b].
+  bool overlaps(KeyT a, KeyT b) {
+    assert(Traits::nonEmpty(a, b));
+    const_iterator I = find(a);
+    if (!I.valid())
+      return false;
+    // [a;b] and [x;y] overlap iff x<=b and a<=y. The find() call guarantees the
+    // second part (y = find(a).stop()), so it is sufficient to check the first
+    // one.
+    return !Traits::stopLess(b, I.start());
+  }
 };
 
 /// treeSafeLookup - Return the mapped value at x or NotFound, assuming a
diff --git a/include/llvm/ADT/PointerIntPair.h b/include/llvm/ADT/PointerIntPair.h
index 884d051..6d1b53a 100644
--- a/include/llvm/ADT/PointerIntPair.h
+++ b/include/llvm/ADT/PointerIntPair.h
@@ -42,6 +42,8 @@
           typename PtrTraits = PointerLikeTypeTraits<PointerTy>,
           typename Info = PointerIntPairInfo<PointerTy, IntBits, PtrTraits>>
 class PointerIntPair {
+  // Used by MSVC visualizer and generally helpful for debugging/visualizing.
+  using InfoTy = Info;
   intptr_t Value = 0;
 
 public:
diff --git a/include/llvm/ADT/STLExtras.h b/include/llvm/ADT/STLExtras.h
index 8685f0e..f66ca7c 100644
--- a/include/llvm/ADT/STLExtras.h
+++ b/include/llvm/ADT/STLExtras.h
@@ -75,6 +75,12 @@
   using type =
       typename std::add_pointer<typename std::add_const<T>::type>::type;
 };
+
+template <typename T> struct make_const_ref {
+  using type = typename std::add_lvalue_reference<
+      typename std::add_const<T>::type>::type;
+};
+
 //===----------------------------------------------------------------------===//
 //     Extra additions to <functional>
 //===----------------------------------------------------------------------===//
diff --git a/include/llvm/ADT/Triple.h b/include/llvm/ADT/Triple.h
index 04b64e2..e06a68e 100644
--- a/include/llvm/ADT/Triple.h
+++ b/include/llvm/ADT/Triple.h
@@ -60,7 +60,6 @@
     mips64,         // MIPS64: mips64, mips64r6, mipsn32, mipsn32r6
     mips64el,       // MIPS64EL: mips64el, mips64r6el, mipsn32el, mipsn32r6el
     msp430,         // MSP430: msp430
-    nios2,          // NIOSII: nios2
     ppc,            // PPC: powerpc
     ppc64,          // PPC64: powerpc64, ppu
     ppc64le,        // PPC64LE: powerpc64le
@@ -187,7 +186,8 @@
     AMDPAL,     // AMD PAL Runtime
     HermitCore, // HermitCore Unikernel/Multikernel
     Hurd,       // GNU/Hurd
-    LastOSType = Hurd
+    WASI,       // Experimental WebAssembly OS
+    LastOSType = WASI
   };
   enum EnvironmentType {
     UnknownEnvironment,
@@ -588,6 +588,11 @@
     return getOS() == Triple::Hurd;
   }
 
+  /// Tests whether the OS is WASI.
+  bool isOSWASI() const {
+    return getOS() == Triple::WASI;
+  }
+
   /// Tests whether the OS uses glibc.
   bool isOSGlibc() const {
     return (getOS() == Triple::Linux || getOS() == Triple::KFreeBSD ||
diff --git a/include/llvm/Analysis/AliasAnalysis.h b/include/llvm/Analysis/AliasAnalysis.h
index 2efcd9d..e2a2ac0 100644
--- a/include/llvm/Analysis/AliasAnalysis.h
+++ b/include/llvm/Analysis/AliasAnalysis.h
@@ -43,7 +43,6 @@
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Analysis/MemoryLocation.h"
 #include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/Instruction.h"
 #include "llvm/IR/Instructions.h"
@@ -363,7 +362,8 @@
 
   /// A convenience wrapper around the \c isMustAlias helper interface.
   bool isMustAlias(const Value *V1, const Value *V2) {
-    return alias(V1, 1, V2, 1) == MustAlias;
+    return alias(V1, LocationSize::precise(1), V2, LocationSize::precise(1)) ==
+           MustAlias;
   }
 
   /// Checks whether the given location points to constant memory, or if
@@ -381,15 +381,15 @@
   /// \name Simple mod/ref information
   /// @{
 
-  /// Get the ModRef info associated with a pointer argument of a callsite. The
+  /// Get the ModRef info associated with a pointer argument of a call. The
   /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
   /// that these bits do not necessarily account for the overall behavior of
   /// the function, but rather only provide additional per-argument
   /// information. This never sets ModRefInfo::Must.
-  ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
+  ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
 
   /// Return the behavior of the given call site.
-  FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+  FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
 
   /// Return the behavior when calling the given function.
   FunctionModRefBehavior getModRefBehavior(const Function *F);
@@ -405,8 +405,8 @@
   /// property (e.g. calls to 'sin' and 'cos').
   ///
   /// This property corresponds to the GCC 'const' attribute.
-  bool doesNotAccessMemory(ImmutableCallSite CS) {
-    return getModRefBehavior(CS) == FMRB_DoesNotAccessMemory;
+  bool doesNotAccessMemory(const CallBase *Call) {
+    return getModRefBehavior(Call) == FMRB_DoesNotAccessMemory;
   }
 
   /// Checks if the specified function is known to never read or write memory.
@@ -433,8 +433,8 @@
   /// absence of interfering store instructions, such as CSE of strlen calls.
   ///
   /// This property corresponds to the GCC 'pure' attribute.
-  bool onlyReadsMemory(ImmutableCallSite CS) {
-    return onlyReadsMemory(getModRefBehavior(CS));
+  bool onlyReadsMemory(const CallBase *Call) {
+    return onlyReadsMemory(getModRefBehavior(Call));
   }
 
   /// Checks if the specified function is known to only read from non-volatile
@@ -499,36 +499,12 @@
 
   /// getModRefInfo (for call sites) - Return information about whether
   /// a particular call site modifies or reads the specified memory location.
-  ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
 
   /// getModRefInfo (for call sites) - A convenience wrapper.
-  ModRefInfo getModRefInfo(ImmutableCallSite CS, const Value *P,
+  ModRefInfo getModRefInfo(const CallBase *Call, const Value *P,
                            LocationSize Size) {
-    return getModRefInfo(CS, MemoryLocation(P, Size));
-  }
-
-  /// getModRefInfo (for calls) - Return information about whether
-  /// a particular call modifies or reads the specified memory location.
-  ModRefInfo getModRefInfo(const CallInst *C, const MemoryLocation &Loc) {
-    return getModRefInfo(ImmutableCallSite(C), Loc);
-  }
-
-  /// getModRefInfo (for calls) - A convenience wrapper.
-  ModRefInfo getModRefInfo(const CallInst *C, const Value *P,
-                           LocationSize Size) {
-    return getModRefInfo(C, MemoryLocation(P, Size));
-  }
-
-  /// getModRefInfo (for invokes) - Return information about whether
-  /// a particular invoke modifies or reads the specified memory location.
-  ModRefInfo getModRefInfo(const InvokeInst *I, const MemoryLocation &Loc) {
-    return getModRefInfo(ImmutableCallSite(I), Loc);
-  }
-
-  /// getModRefInfo (for invokes) - A convenience wrapper.
-  ModRefInfo getModRefInfo(const InvokeInst *I, const Value *P,
-                           LocationSize Size) {
-    return getModRefInfo(I, MemoryLocation(P, Size));
+    return getModRefInfo(Call, MemoryLocation(P, Size));
   }
 
   /// getModRefInfo (for loads) - Return information about whether
@@ -568,7 +544,7 @@
 
   /// getModRefInfo (for cmpxchges) - A convenience wrapper.
   ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, const Value *P,
-                           unsigned Size) {
+                           LocationSize Size) {
     return getModRefInfo(CX, MemoryLocation(P, Size));
   }
 
@@ -578,7 +554,7 @@
 
   /// getModRefInfo (for atomicrmws) - A convenience wrapper.
   ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const Value *P,
-                           unsigned Size) {
+                           LocationSize Size) {
     return getModRefInfo(RMW, MemoryLocation(P, Size));
   }
 
@@ -625,8 +601,8 @@
   ModRefInfo getModRefInfo(const Instruction *I,
                            const Optional<MemoryLocation> &OptLoc) {
     if (OptLoc == None) {
-      if (auto CS = ImmutableCallSite(I)) {
-        return createModRefInfo(getModRefBehavior(CS));
+      if (const auto *Call = dyn_cast<CallBase>(I)) {
+        return createModRefInfo(getModRefBehavior(Call));
       }
     }
 
@@ -660,12 +636,12 @@
 
   /// Return information about whether a call and an instruction may refer to
   /// the same memory locations.
-  ModRefInfo getModRefInfo(Instruction *I, ImmutableCallSite Call);
+  ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call);
 
   /// Return information about whether two call sites may refer to the same set
   /// of memory locations. See the AA documentation for details:
   ///   http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
-  ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
 
   /// Return information about whether a particular call site modifies
   /// or reads the specified memory location \p MemLoc before instruction \p I
@@ -776,25 +752,25 @@
   /// that these bits do not necessarily account for the overall behavior of
   /// the function, but rather only provide additional per-argument
   /// information.
-  virtual ModRefInfo getArgModRefInfo(ImmutableCallSite CS,
+  virtual ModRefInfo getArgModRefInfo(const CallBase *Call,
                                       unsigned ArgIdx) = 0;
 
   /// Return the behavior of the given call site.
-  virtual FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) = 0;
+  virtual FunctionModRefBehavior getModRefBehavior(const CallBase *Call) = 0;
 
   /// Return the behavior when calling the given function.
   virtual FunctionModRefBehavior getModRefBehavior(const Function *F) = 0;
 
   /// getModRefInfo (for call sites) - Return information about whether
   /// a particular call site modifies or reads the specified memory location.
-  virtual ModRefInfo getModRefInfo(ImmutableCallSite CS,
+  virtual ModRefInfo getModRefInfo(const CallBase *Call,
                                    const MemoryLocation &Loc) = 0;
 
   /// Return information about whether two call sites may refer to the same set
   /// of memory locations. See the AA documentation for details:
   ///   http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
-  virtual ModRefInfo getModRefInfo(ImmutableCallSite CS1,
-                                   ImmutableCallSite CS2) = 0;
+  virtual ModRefInfo getModRefInfo(const CallBase *Call1,
+                                   const CallBase *Call2) = 0;
 
   /// @}
 };
@@ -826,26 +802,26 @@
     return Result.pointsToConstantMemory(Loc, OrLocal);
   }
 
-  ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) override {
-    return Result.getArgModRefInfo(CS, ArgIdx);
+  ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) override {
+    return Result.getArgModRefInfo(Call, ArgIdx);
   }
 
-  FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) override {
-    return Result.getModRefBehavior(CS);
+  FunctionModRefBehavior getModRefBehavior(const CallBase *Call) override {
+    return Result.getModRefBehavior(Call);
   }
 
   FunctionModRefBehavior getModRefBehavior(const Function *F) override {
     return Result.getModRefBehavior(F);
   }
 
-  ModRefInfo getModRefInfo(ImmutableCallSite CS,
+  ModRefInfo getModRefInfo(const CallBase *Call,
                            const MemoryLocation &Loc) override {
-    return Result.getModRefInfo(CS, Loc);
+    return Result.getModRefInfo(Call, Loc);
   }
 
-  ModRefInfo getModRefInfo(ImmutableCallSite CS1,
-                           ImmutableCallSite CS2) override {
-    return Result.getModRefInfo(CS1, CS2);
+  ModRefInfo getModRefInfo(const CallBase *Call1,
+                           const CallBase *Call2) override {
+    return Result.getModRefInfo(Call1, Call2);
   }
 };
 
@@ -900,25 +876,28 @@
                  : CurrentResult.pointsToConstantMemory(Loc, OrLocal);
     }
 
-    ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
-      return AAR ? AAR->getArgModRefInfo(CS, ArgIdx) : CurrentResult.getArgModRefInfo(CS, ArgIdx);
+    ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
+      return AAR ? AAR->getArgModRefInfo(Call, ArgIdx)
+                 : CurrentResult.getArgModRefInfo(Call, ArgIdx);
     }
 
-    FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
-      return AAR ? AAR->getModRefBehavior(CS) : CurrentResult.getModRefBehavior(CS);
+    FunctionModRefBehavior getModRefBehavior(const CallBase *Call) {
+      return AAR ? AAR->getModRefBehavior(Call)
+                 : CurrentResult.getModRefBehavior(Call);
     }
 
     FunctionModRefBehavior getModRefBehavior(const Function *F) {
       return AAR ? AAR->getModRefBehavior(F) : CurrentResult.getModRefBehavior(F);
     }
 
-    ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
-      return AAR ? AAR->getModRefInfo(CS, Loc)
-                 : CurrentResult.getModRefInfo(CS, Loc);
+    ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) {
+      return AAR ? AAR->getModRefInfo(Call, Loc)
+                 : CurrentResult.getModRefInfo(Call, Loc);
     }
 
-    ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
-      return AAR ? AAR->getModRefInfo(CS1, CS2) : CurrentResult.getModRefInfo(CS1, CS2);
+    ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) {
+      return AAR ? AAR->getModRefInfo(Call1, Call2)
+                 : CurrentResult.getModRefInfo(Call1, Call2);
     }
   };
 
@@ -950,11 +929,11 @@
     return false;
   }
 
-  ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
+  ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
     return ModRefInfo::ModRef;
   }
 
-  FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
+  FunctionModRefBehavior getModRefBehavior(const CallBase *Call) {
     return FMRB_UnknownModRefBehavior;
   }
 
@@ -962,11 +941,11 @@
     return FMRB_UnknownModRefBehavior;
   }
 
-  ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
+  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) {
     return ModRefInfo::ModRef;
   }
 
-  ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
+  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) {
     return ModRefInfo::ModRef;
   }
 };
diff --git a/include/llvm/Analysis/BasicAliasAnalysis.h b/include/llvm/Analysis/BasicAliasAnalysis.h
index 6344e84..820d7ac 100644
--- a/include/llvm/Analysis/BasicAliasAnalysis.h
+++ b/include/llvm/Analysis/BasicAliasAnalysis.h
@@ -21,7 +21,7 @@
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/AssumptionCache.h"
 #include "llvm/Analysis/MemoryLocation.h"
-#include "llvm/IR/CallSite.h"
+#include "llvm/IR/InstrTypes.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
 #include <algorithm>
@@ -84,18 +84,18 @@
 
   AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
 
-  ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
 
-  ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
 
   /// Chases pointers until we find a (constant global) or not.
   bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
 
   /// Get the location associated with a pointer argument of a callsite.
-  ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
+  ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
 
   /// Returns the behavior when calling the given call site.
-  FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+  FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
 
   /// Returns the behavior when calling the given function. For use when the
   /// call site is not known.
@@ -115,7 +115,7 @@
     unsigned ZExtBits;
     unsigned SExtBits;
 
-    int64_t Scale;
+    APInt Scale;
 
     bool operator==(const VariableGEPIndex &Other) const {
       return V == Other.V && ZExtBits == Other.ZExtBits &&
@@ -133,10 +133,10 @@
     // Base pointer of the GEP
     const Value *Base;
     // Total constant offset w.r.t the base from indexing into structs
-    int64_t StructOffset;
+    APInt StructOffset;
     // Total constant offset w.r.t the base from indexing through
     // pointers/arrays/vectors
-    int64_t OtherOffset;
+    APInt OtherOffset;
     // Scaled variable (non-constant) indices.
     SmallVector<VariableGEPIndex, 4> VarIndices;
   };
@@ -189,7 +189,7 @@
   bool
   constantOffsetHeuristic(const SmallVectorImpl<VariableGEPIndex> &VarIndices,
                           LocationSize V1Size, LocationSize V2Size,
-                          int64_t BaseOffset, AssumptionCache *AC,
+                          APInt BaseOffset, AssumptionCache *AC,
                           DominatorTree *DT);
 
   bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2);
diff --git a/include/llvm/Analysis/BlockFrequencyInfo.h b/include/llvm/Analysis/BlockFrequencyInfo.h
index ca12db6..0b26187 100644
--- a/include/llvm/Analysis/BlockFrequencyInfo.h
+++ b/include/llvm/Analysis/BlockFrequencyInfo.h
@@ -56,7 +56,7 @@
 
   const Function *getFunction() const;
   const BranchProbabilityInfo *getBPI() const;
-  void view() const;
+  void view(StringRef = "BlockFrequencyDAGs") const;
 
   /// getblockFreq - Return block frequency. Return 0 if we don't have the
   /// information. Please note that initial frequency is equal to ENTRY_FREQ. It
diff --git a/include/llvm/Analysis/DemandedBits.h b/include/llvm/Analysis/DemandedBits.h
index f751d2e..4c4e3f6 100644
--- a/include/llvm/Analysis/DemandedBits.h
+++ b/include/llvm/Analysis/DemandedBits.h
@@ -57,14 +57,17 @@
   /// Return true if, during analysis, I could not be reached.
   bool isInstructionDead(Instruction *I);
 
+  /// Return whether this use is dead by means of not having any demanded bits.
+  bool isUseDead(Use *U);
+
   void print(raw_ostream &OS);
 
 private:
   void performAnalysis();
   void determineLiveOperandBits(const Instruction *UserI,
-    const Instruction *I, unsigned OperandNo,
+    const Value *Val, unsigned OperandNo,
     const APInt &AOut, APInt &AB,
-    KnownBits &Known, KnownBits &Known2);
+    KnownBits &Known, KnownBits &Known2, bool &KnownBitsComputed);
 
   Function &F;
   AssumptionCache &AC;
@@ -75,6 +78,9 @@
   // The set of visited instructions (non-integer-typed only).
   SmallPtrSet<Instruction*, 32> Visited;
   DenseMap<Instruction *, APInt> AliveBits;
+  // Uses with no demanded bits. If the user also has no demanded bits, the use
+  // might not be stored explicitly in this map, to save memory during analysis.
+  SmallPtrSet<Use *, 16> DeadUses;
 };
 
 class DemandedBitsWrapperPass : public FunctionPass {
diff --git a/include/llvm/Analysis/DependenceAnalysis.h b/include/llvm/Analysis/DependenceAnalysis.h
index c8ec737..69d0e2c 100644
--- a/include/llvm/Analysis/DependenceAnalysis.h
+++ b/include/llvm/Analysis/DependenceAnalysis.h
@@ -936,6 +936,17 @@
     friend struct AnalysisInfoMixin<DependenceAnalysis>;
   }; // class DependenceAnalysis
 
+  /// Printer pass to dump DA results.
+  struct DependenceAnalysisPrinterPass
+      : public PassInfoMixin<DependenceAnalysisPrinterPass> {
+    DependenceAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+    PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+
+  private:
+    raw_ostream &OS;
+  }; // class DependenceAnalysisPrinterPass
+
   /// Legacy pass manager pass to access dependence information
   class DependenceAnalysisWrapperPass : public FunctionPass {
   public:
diff --git a/include/llvm/Analysis/GlobalsModRef.h b/include/llvm/Analysis/GlobalsModRef.h
index 09cef68..3a664ca 100644
--- a/include/llvm/Analysis/GlobalsModRef.h
+++ b/include/llvm/Analysis/GlobalsModRef.h
@@ -88,7 +88,7 @@
   AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
 
   using AAResultBase::getModRefInfo;
-  ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
 
   /// getModRefBehavior - Return the behavior of the specified function if
   /// called from the specified call site.  The call site may be null in which
@@ -98,7 +98,7 @@
   /// getModRefBehavior - Return the behavior of the specified function if
   /// called from the specified call site.  The call site may be null in which
   /// case the most generic behavior of this function should be returned.
-  FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+  FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
 
 private:
   FunctionInfo *getFunctionInfo(const Function *F);
@@ -113,7 +113,7 @@
   void CollectSCCMembership(CallGraph &CG);
 
   bool isNonEscapingGlobalNoAlias(const GlobalValue *GV, const Value *V);
-  ModRefInfo getModRefInfoForArgument(ImmutableCallSite CS,
+  ModRefInfo getModRefInfoForArgument(const CallBase *Call,
                                       const GlobalValue *GV);
 };
 
diff --git a/include/llvm/Analysis/IndirectCallSiteVisitor.h b/include/llvm/Analysis/IndirectCallSiteVisitor.h
deleted file mode 100644
index a30b59f..0000000
--- a/include/llvm/Analysis/IndirectCallSiteVisitor.h
+++ /dev/null
@@ -1,40 +0,0 @@
-//===-- IndirectCallSiteVisitor.h - indirect call-sites visitor -----------===//
-//
-//                      The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements defines a visitor class and a helper function that find
-// all indirect call-sites in a function.
-
-#ifndef LLVM_ANALYSIS_INDIRECTCALLSITEVISITOR_H
-#define LLVM_ANALYSIS_INDIRECTCALLSITEVISITOR_H
-
-#include "llvm/IR/InstVisitor.h"
-#include <vector>
-
-namespace llvm {
-// Visitor class that finds all indirect call sites.
-struct PGOIndirectCallSiteVisitor
-    : public InstVisitor<PGOIndirectCallSiteVisitor> {
-  std::vector<Instruction *> IndirectCallInsts;
-  PGOIndirectCallSiteVisitor() {}
-
-  void visitCallSite(CallSite CS) {
-    if (CS.isIndirectCall())
-      IndirectCallInsts.push_back(CS.getInstruction());
-  }
-};
-
-// Helper function that finds all indirect call sites.
-inline std::vector<Instruction *> findIndirectCallSites(Function &F) {
-  PGOIndirectCallSiteVisitor ICV;
-  ICV.visit(F);
-  return ICV.IndirectCallInsts;
-}
-}
-
-#endif
diff --git a/include/llvm/Analysis/IndirectCallVisitor.h b/include/llvm/Analysis/IndirectCallVisitor.h
new file mode 100644
index 0000000..d00cf63
--- /dev/null
+++ b/include/llvm/Analysis/IndirectCallVisitor.h
@@ -0,0 +1,39 @@
+//===-- IndirectCallVisitor.h - indirect call visitor ---------------------===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements defines a visitor class and a helper function that find
+// all indirect call-sites in a function.
+
+#ifndef LLVM_ANALYSIS_INDIRECTCALLVISITOR_H
+#define LLVM_ANALYSIS_INDIRECTCALLVISITOR_H
+
+#include "llvm/IR/InstVisitor.h"
+#include <vector>
+
+namespace llvm {
+// Visitor class that finds all indirect call.
+struct PGOIndirectCallVisitor : public InstVisitor<PGOIndirectCallVisitor> {
+  std::vector<Instruction *> IndirectCalls;
+  PGOIndirectCallVisitor() {}
+
+  void visitCallBase(CallBase &Call) {
+    if (Call.isIndirectCall())
+      IndirectCalls.push_back(&Call);
+  }
+};
+
+// Helper function that finds all indirect call sites.
+inline std::vector<Instruction *> findIndirectCalls(Function &F) {
+  PGOIndirectCallVisitor ICV;
+  ICV.visit(F);
+  return ICV.IndirectCalls;
+}
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/Analysis/InstructionPrecedenceTracking.h b/include/llvm/Analysis/InstructionPrecedenceTracking.h
index b754557..073e6ec 100644
--- a/include/llvm/Analysis/InstructionPrecedenceTracking.h
+++ b/include/llvm/Analysis/InstructionPrecedenceTracking.h
@@ -75,8 +75,14 @@
   virtual ~InstructionPrecedenceTracking() = default;
 
 public:
-  /// Clears cached information about this particular block.
-  void invalidateBlock(const BasicBlock *BB);
+  /// Notifies this tracking that we are going to insert a new instruction \p
+  /// Inst to the basic block \p BB. It makes all necessary updates to internal
+  /// caches to keep them consistent.
+  void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB);
+
+  /// Notifies this tracking that we are going to remove the instruction \p Inst
+  /// It makes all necessary updates to internal caches to keep them consistent.
+  void removeInstruction(const Instruction *Inst);
 
   /// Invalidates all information from this tracking.
   void clear();
diff --git a/include/llvm/Analysis/LoopAccessAnalysis.h b/include/llvm/Analysis/LoopAccessAnalysis.h
index cf24d9c..4ed00e2 100644
--- a/include/llvm/Analysis/LoopAccessAnalysis.h
+++ b/include/llvm/Analysis/LoopAccessAnalysis.h
@@ -97,6 +97,19 @@
   /// Set of potential dependent memory accesses.
   typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
 
+  /// Type to keep track of the status of the dependence check. The order of
+  /// the elements is important and has to be from most permissive to least
+  /// permissive.
+  enum class VectorizationSafetyStatus {
+    // Can vectorize safely without RT checks. All dependences are known to be
+    // safe.
+    Safe,
+    // Can possibly vectorize with RT checks to overcome unknown dependencies.
+    PossiblySafeWithRtChecks,
+    // Cannot vectorize due to known unsafe dependencies.
+    Unsafe,
+  };
+
   /// Dependece between memory access instructions.
   struct Dependence {
     /// The type of the dependence.
@@ -146,7 +159,7 @@
     Instruction *getDestination(const LoopAccessInfo &LAI) const;
 
     /// Dependence types that don't prevent vectorization.
-    static bool isSafeForVectorization(DepType Type);
+    static VectorizationSafetyStatus isSafeForVectorization(DepType Type);
 
     /// Lexically forward dependence.
     bool isForward() const;
@@ -164,8 +177,8 @@
 
   MemoryDepChecker(PredicatedScalarEvolution &PSE, const Loop *L)
       : PSE(PSE), InnermostLoop(L), AccessIdx(0), MaxSafeRegisterWidth(-1U),
-        ShouldRetryWithRuntimeCheck(false), SafeForVectorization(true),
-        RecordDependences(true) {}
+        FoundNonConstantDistanceDependence(false),
+        Status(VectorizationSafetyStatus::Safe), RecordDependences(true) {}
 
   /// Register the location (instructions are given increasing numbers)
   /// of a write access.
@@ -193,7 +206,9 @@
 
   /// No memory dependence was encountered that would inhibit
   /// vectorization.
-  bool isSafeForVectorization() const { return SafeForVectorization; }
+  bool isSafeForVectorization() const {
+    return Status == VectorizationSafetyStatus::Safe;
+  }
 
   /// The maximum number of bytes of a vector register we can vectorize
   /// the accesses safely with.
@@ -205,7 +220,10 @@
 
   /// In same cases when the dependency check fails we can still
   /// vectorize the loop with a dynamic array access check.
-  bool shouldRetryWithRuntimeCheck() { return ShouldRetryWithRuntimeCheck; }
+  bool shouldRetryWithRuntimeCheck() const {
+    return FoundNonConstantDistanceDependence &&
+           Status == VectorizationSafetyStatus::PossiblySafeWithRtChecks;
+  }
 
   /// Returns the memory dependences.  If null is returned we exceeded
   /// the MaxDependences threshold and this information is not
@@ -267,11 +285,12 @@
 
   /// If we see a non-constant dependence distance we can still try to
   /// vectorize this loop with runtime checks.
-  bool ShouldRetryWithRuntimeCheck;
+  bool FoundNonConstantDistanceDependence;
 
-  /// No memory dependence was encountered that would inhibit
-  /// vectorization.
-  bool SafeForVectorization;
+  /// Result of the dependence checks, indicating whether the checked
+  /// dependences are safe for vectorization, require RT checks or are known to
+  /// be unsafe.
+  VectorizationSafetyStatus Status;
 
   //// True if Dependences reflects the dependences in the
   //// loop.  If false we exceeded MaxDependences and
@@ -304,6 +323,11 @@
   /// \return false if we shouldn't vectorize at all or avoid larger
   /// vectorization factors by limiting MaxSafeDepDistBytes.
   bool couldPreventStoreLoadForward(uint64_t Distance, uint64_t TypeByteSize);
+
+  /// Updates the current safety status with \p S. We can go from Safe to
+  /// either PossiblySafeWithRtChecks or Unsafe and from
+  /// PossiblySafeWithRtChecks to Unsafe.
+  void mergeInStatus(VectorizationSafetyStatus S);
 };
 
 /// Holds information about the memory runtime legality checks to verify
diff --git a/include/llvm/Analysis/LoopInfo.h b/include/llvm/Analysis/LoopInfo.h
index 30b29d6..7287354 100644
--- a/include/llvm/Analysis/LoopInfo.h
+++ b/include/llvm/Analysis/LoopInfo.h
@@ -408,6 +408,12 @@
   /// Verify loop structure of this loop and all nested loops.
   void verifyLoopNest(DenseSet<const LoopT *> *Loops) const;
 
+  /// Returns true if the loop is annotated parallel.
+  ///
+  /// Derived classes can override this method using static template
+  /// polymorphism.
+  bool isAnnotatedParallel() const { return false; }
+
   /// Print loop with all the BBs inside it.
   void print(raw_ostream &OS, unsigned Depth = 0, bool Verbose = false) const;
 
@@ -989,6 +995,26 @@
 /// Function to print a loop's contents as LLVM's text IR assembly.
 void printLoop(Loop &L, raw_ostream &OS, const std::string &Banner = "");
 
+/// Find and return the loop attribute node for the attribute @p Name in
+/// @p LoopID. Return nullptr if there is no such attribute.
+MDNode *findOptionMDForLoopID(MDNode *LoopID, StringRef Name);
+
+/// Find string metadata for a loop.
+///
+/// Returns the MDNode where the first operand is the metadata's name. The
+/// following operands are the metadata's values. If no metadata with @p Name is
+/// found, return nullptr.
+MDNode *findOptionMDForLoop(const Loop *TheLoop, StringRef Name);
+
+/// Return whether an MDNode might represent an access group.
+///
+/// Access group metadata nodes have to be distinct and empty. Being
+/// always-empty ensures that it never needs to be changed (which -- because
+/// MDNodes are designed immutable -- would require creating a new MDNode). Note
+/// that this is not a sufficient condition: not every distinct and empty NDNode
+/// is representing an access group.
+bool isValidAsAccessGroup(MDNode *AccGroup);
+
 } // End llvm namespace
 
 #endif
diff --git a/include/llvm/Analysis/LoopInfoImpl.h b/include/llvm/Analysis/LoopInfoImpl.h
index d3054b7..2b80791 100644
--- a/include/llvm/Analysis/LoopInfoImpl.h
+++ b/include/llvm/Analysis/LoopInfoImpl.h
@@ -392,7 +392,10 @@
 template <class BlockT, class LoopT>
 void LoopBase<BlockT, LoopT>::print(raw_ostream &OS, unsigned Depth,
                                     bool Verbose) const {
-  OS.indent(Depth * 2) << "Loop at depth " << getLoopDepth() << " containing: ";
+  OS.indent(Depth * 2);
+  if (static_cast<const LoopT *>(this)->isAnnotatedParallel())
+    OS << "Parallel ";
+  OS << "Loop at depth " << getLoopDepth() << " containing: ";
 
   BlockT *H = getHeader();
   for (unsigned i = 0; i < getBlocks().size(); ++i) {
diff --git a/include/llvm/Analysis/MemoryDependenceAnalysis.h b/include/llvm/Analysis/MemoryDependenceAnalysis.h
index 52340b0..958d4fe 100644
--- a/include/llvm/Analysis/MemoryDependenceAnalysis.h
+++ b/include/llvm/Analysis/MemoryDependenceAnalysis.h
@@ -37,7 +37,6 @@
 namespace llvm {
 
 class AssumptionCache;
-class CallSite;
 class DominatorTree;
 class Function;
 class Instruction;
@@ -398,7 +397,7 @@
   /// invalidated on the next non-local query or when an instruction is
   /// removed.  Clients must copy this data if they want it around longer than
   /// that.
-  const NonLocalDepInfo &getNonLocalCallDependency(CallSite QueryCS);
+  const NonLocalDepInfo &getNonLocalCallDependency(CallBase *QueryCall);
 
   /// Perform a full dependency query for an access to the QueryInst's
   /// specified memory location, returning the set of instructions that either
@@ -482,9 +481,9 @@
   void releaseMemory();
 
 private:
-  MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall,
-                                         BasicBlock::iterator ScanIt,
-                                         BasicBlock *BB);
+  MemDepResult getCallDependencyFrom(CallBase *Call, bool isReadOnlyCall,
+                                     BasicBlock::iterator ScanIt,
+                                     BasicBlock *BB);
   bool getNonLocalPointerDepFromBB(Instruction *QueryInst,
                                    const PHITransAddr &Pointer,
                                    const MemoryLocation &Loc, bool isLoad,
diff --git a/include/llvm/Analysis/MemoryLocation.h b/include/llvm/Analysis/MemoryLocation.h
index cf839c5..fca18c1 100644
--- a/include/llvm/Analysis/MemoryLocation.h
+++ b/include/llvm/Analysis/MemoryLocation.h
@@ -16,9 +16,9 @@
 #ifndef LLVM_ANALYSIS_MEMORYLOCATION_H
 #define LLVM_ANALYSIS_MEMORYLOCATION_H
 
-#include "llvm/ADT/Optional.h"
 #include "llvm/ADT/DenseMapInfo.h"
-#include "llvm/IR/CallSite.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/IR/Instructions.h"
 #include "llvm/IR/Metadata.h"
 
 namespace llvm {
@@ -135,6 +135,9 @@
     return (Value & ImpreciseBit) == 0;
   }
 
+  // Convenience method to check if this LocationSize's value is 0.
+  bool isZero() const { return hasValue() && getValue() == 0; }
+
   bool operator==(const LocationSize &Other) const {
     return Value == Other.Value;
   }
@@ -231,11 +234,11 @@
   static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
 
   /// Return a location representing a particular argument of a call.
-  static MemoryLocation getForArgument(ImmutableCallSite CS, unsigned ArgIdx,
+  static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
                                        const TargetLibraryInfo *TLI);
-  static MemoryLocation getForArgument(ImmutableCallSite CS, unsigned ArgIdx,
+  static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
                                        const TargetLibraryInfo &TLI) {
-    return getForArgument(CS, ArgIdx, &TLI);
+    return getForArgument(Call, ArgIdx, &TLI);
   }
 
   explicit MemoryLocation(const Value *Ptr = nullptr,
diff --git a/include/llvm/Analysis/MemorySSA.h b/include/llvm/Analysis/MemorySSA.h
index 62008371..17e2d0c 100644
--- a/include/llvm/Analysis/MemorySSA.h
+++ b/include/llvm/Analysis/MemorySSA.h
@@ -404,6 +404,7 @@
 
   void resetOptimized() {
     OptimizedID = INVALID_MEMORYACCESS_ID;
+    setOperand(1, nullptr);
   }
 
   void print(raw_ostream &OS) const;
@@ -703,6 +704,7 @@
   ~MemorySSA();
 
   MemorySSAWalker *getWalker();
+  MemorySSAWalker *getSkipSelfWalker();
 
   /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
   /// access associated with it. If passed a basic block gets the memory phi
@@ -828,7 +830,9 @@
                                       const MemoryUseOrDef *Template = nullptr);
 
 private:
+  class ClobberWalkerBase;
   class CachingWalker;
+  class SkipSelfWalker;
   class OptimizeUses;
 
   CachingWalker *getWalkerImpl();
@@ -882,7 +886,9 @@
   mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
 
   // Memory SSA building info
+  std::unique_ptr<ClobberWalkerBase> WalkerBase;
   std::unique_ptr<CachingWalker> Walker;
+  std::unique_ptr<SkipSelfWalker> SkipWalker;
   unsigned NextID;
 };
 
diff --git a/include/llvm/Analysis/MemorySSAUpdater.h b/include/llvm/Analysis/MemorySSAUpdater.h
index 098876e..169d5bd 100644
--- a/include/llvm/Analysis/MemorySSAUpdater.h
+++ b/include/llvm/Analysis/MemorySSAUpdater.h
@@ -35,6 +35,7 @@
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallSet.h"
 #include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/LoopInfo.h"
 #include "llvm/Analysis/LoopIterator.h"
 #include "llvm/Analysis/MemorySSA.h"
 #include "llvm/IR/BasicBlock.h"
diff --git a/include/llvm/Analysis/MustExecute.h b/include/llvm/Analysis/MustExecute.h
index b973447..ad3222c 100644
--- a/include/llvm/Analysis/MustExecute.h
+++ b/include/llvm/Analysis/MustExecute.h
@@ -151,9 +151,9 @@
       const;
 
   /// Inform the safety info that we are planning to insert a new instruction
-  /// into the basic block \p BB. It will make all cache updates to keep it
-  /// correct after this insertion.
-  void insertInstructionTo(const BasicBlock *BB);
+  /// \p Inst into the basic block \p BB. It will make all cache updates to keep
+  /// it correct after this insertion.
+  void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB);
 
   /// Inform safety info that we are planning to remove the instruction \p Inst
   /// from its block. It will make all cache updates to keep it correct after
diff --git a/include/llvm/Analysis/ObjCARCAliasAnalysis.h b/include/llvm/Analysis/ObjCARCAliasAnalysis.h
index 559c77c..58a6704 100644
--- a/include/llvm/Analysis/ObjCARCAliasAnalysis.h
+++ b/include/llvm/Analysis/ObjCARCAliasAnalysis.h
@@ -60,7 +60,7 @@
   FunctionModRefBehavior getModRefBehavior(const Function *F);
 
   using AAResultBase::getModRefInfo;
-  ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
 };
 
 /// Analysis pass providing a never-invalidated alias analysis result.
diff --git a/include/llvm/Analysis/ObjCARCAnalysisUtils.h b/include/llvm/Analysis/ObjCARCAnalysisUtils.h
index 07beb0b..1f497fa 100644
--- a/include/llvm/Analysis/ObjCARCAnalysisUtils.h
+++ b/include/llvm/Analysis/ObjCARCAnalysisUtils.h
@@ -51,25 +51,25 @@
 /// on.
 inline bool ModuleHasARC(const Module &M) {
   return
-    M.getNamedValue("objc_retain") ||
-    M.getNamedValue("objc_release") ||
-    M.getNamedValue("objc_autorelease") ||
-    M.getNamedValue("objc_retainAutoreleasedReturnValue") ||
-    M.getNamedValue("objc_unsafeClaimAutoreleasedReturnValue") ||
-    M.getNamedValue("objc_retainBlock") ||
-    M.getNamedValue("objc_autoreleaseReturnValue") ||
-    M.getNamedValue("objc_autoreleasePoolPush") ||
-    M.getNamedValue("objc_loadWeakRetained") ||
-    M.getNamedValue("objc_loadWeak") ||
-    M.getNamedValue("objc_destroyWeak") ||
-    M.getNamedValue("objc_storeWeak") ||
-    M.getNamedValue("objc_initWeak") ||
-    M.getNamedValue("objc_moveWeak") ||
-    M.getNamedValue("objc_copyWeak") ||
-    M.getNamedValue("objc_retainedObject") ||
-    M.getNamedValue("objc_unretainedObject") ||
-    M.getNamedValue("objc_unretainedPointer") ||
-    M.getNamedValue("clang.arc.use");
+    M.getNamedValue("llvm.objc.retain") ||
+    M.getNamedValue("llvm.objc.release") ||
+    M.getNamedValue("llvm.objc.autorelease") ||
+    M.getNamedValue("llvm.objc.retainAutoreleasedReturnValue") ||
+    M.getNamedValue("llvm.objc.unsafeClaimAutoreleasedReturnValue") ||
+    M.getNamedValue("llvm.objc.retainBlock") ||
+    M.getNamedValue("llvm.objc.autoreleaseReturnValue") ||
+    M.getNamedValue("llvm.objc.autoreleasePoolPush") ||
+    M.getNamedValue("llvm.objc.loadWeakRetained") ||
+    M.getNamedValue("llvm.objc.loadWeak") ||
+    M.getNamedValue("llvm.objc.destroyWeak") ||
+    M.getNamedValue("llvm.objc.storeWeak") ||
+    M.getNamedValue("llvm.objc.initWeak") ||
+    M.getNamedValue("llvm.objc.moveWeak") ||
+    M.getNamedValue("llvm.objc.copyWeak") ||
+    M.getNamedValue("llvm.objc.retainedObject") ||
+    M.getNamedValue("llvm.objc.unretainedObject") ||
+    M.getNamedValue("llvm.objc.unretainedPointer") ||
+    M.getNamedValue("llvm.objc.clang.arc.use");
 }
 
 /// This is a wrapper around getUnderlyingObject which also knows how to
diff --git a/include/llvm/Analysis/ObjCARCInstKind.h b/include/llvm/Analysis/ObjCARCInstKind.h
index 0b92d8b..018ea1f 100644
--- a/include/llvm/Analysis/ObjCARCInstKind.h
+++ b/include/llvm/Analysis/ObjCARCInstKind.h
@@ -11,6 +11,7 @@
 #define LLVM_ANALYSIS_OBJCARCINSTKIND_H
 
 #include "llvm/IR/Function.h"
+#include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/Instructions.h"
 
 namespace llvm {
@@ -48,7 +49,7 @@
   CopyWeak,                 ///< objc_copyWeak (derived)
   DestroyWeak,              ///< objc_destroyWeak (derived)
   StoreStrong,              ///< objc_storeStrong (derived)
-  IntrinsicUser,            ///< clang.arc.use
+  IntrinsicUser,            ///< llvm.objc.clang.arc.use
   CallOrUser,               ///< could call objc_release and/or "use" pointers
   Call,                     ///< could call objc_release
   User,                     ///< could "use" a pointer
diff --git a/include/llvm/Analysis/ScopedNoAliasAA.h b/include/llvm/Analysis/ScopedNoAliasAA.h
index 508968e..1356c6e 100644
--- a/include/llvm/Analysis/ScopedNoAliasAA.h
+++ b/include/llvm/Analysis/ScopedNoAliasAA.h
@@ -16,7 +16,7 @@
 #define LLVM_ANALYSIS_SCOPEDNOALIASAA_H
 
 #include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/IR/CallSite.h"
+#include "llvm/IR/InstrTypes.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
 #include <memory>
@@ -41,8 +41,8 @@
   }
 
   AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
-  ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
-  ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
+  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
 
 private:
   bool mayAliasInScopes(const MDNode *Scopes, const MDNode *NoAlias) const;
diff --git a/include/llvm/Analysis/SyntheticCountsUtils.h b/include/llvm/Analysis/SyntheticCountsUtils.h
index 87f4a01..db80bef0 100644
--- a/include/llvm/Analysis/SyntheticCountsUtils.h
+++ b/include/llvm/Analysis/SyntheticCountsUtils.h
@@ -36,16 +36,17 @@
   using EdgeRef = typename CGT::EdgeRef;
   using SccTy = std::vector<NodeRef>;
 
-  using GetRelBBFreqTy = function_ref<Optional<Scaled64>(EdgeRef)>;
-  using GetCountTy = function_ref<uint64_t(NodeRef)>;
-  using AddCountTy = function_ref<void(NodeRef, uint64_t)>;
+  // Not all EdgeRef have information about the source of the edge. Hence
+  // NodeRef corresponding to the source of the EdgeRef is explicitly passed.
+  using GetProfCountTy = function_ref<Optional<Scaled64>(NodeRef, EdgeRef)>;
+  using AddCountTy = function_ref<void(NodeRef, Scaled64)>;
 
-  static void propagate(const CallGraphType &CG, GetRelBBFreqTy GetRelBBFreq,
-                        GetCountTy GetCount, AddCountTy AddCount);
+  static void propagate(const CallGraphType &CG, GetProfCountTy GetProfCount,
+                        AddCountTy AddCount);
 
 private:
-  static void propagateFromSCC(const SccTy &SCC, GetRelBBFreqTy GetRelBBFreq,
-                               GetCountTy GetCount, AddCountTy AddCount);
+  static void propagateFromSCC(const SccTy &SCC, GetProfCountTy GetProfCount,
+                               AddCountTy AddCount);
 };
 } // namespace llvm
 
diff --git a/include/llvm/Analysis/TargetTransformInfo.h b/include/llvm/Analysis/TargetTransformInfo.h
index 6ddea8b..223175d 100644
--- a/include/llvm/Analysis/TargetTransformInfo.h
+++ b/include/llvm/Analysis/TargetTransformInfo.h
@@ -581,13 +581,17 @@
   struct MemCmpExpansionOptions {
     // The list of available load sizes (in bytes), sorted in decreasing order.
     SmallVector<unsigned, 8> LoadSizes;
+    // Set to true to allow overlapping loads. For example, 7-byte compares can
+    // be done with two 4-byte compares instead of 4+2+1-byte compares. This
+    // requires all loads in LoadSizes to be doable in an unaligned way.
+    bool AllowOverlappingLoads = false;
   };
   const MemCmpExpansionOptions *enableMemCmpExpansion(bool IsZeroCmp) const;
 
   /// Enable matching of interleaved access groups.
   bool enableInterleavedAccessVectorization() const;
 
-  /// Enable matching of interleaved access groups that contain predicated 
+  /// Enable matching of interleaved access groups that contain predicated
   /// accesses or gaps and therefore vectorized using masked
   /// vector loads/stores.
   bool enableMaskedInterleavedAccessVectorization() const;
@@ -772,7 +776,7 @@
   /// \return The cost of a shuffle instruction of kind Kind and of type Tp.
   /// The index and subtype parameters are used by the subvector insertion and
   /// extraction shuffle kinds to show the insert/extract point and the type of
-  /// the subvector being inserted/extracted. 
+  /// the subvector being inserted/extracted.
   /// NOTE: For subvector extractions Tp represents the source type.
   int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0,
                      Type *SubTp = nullptr) const;
@@ -930,6 +934,14 @@
   bool areInlineCompatible(const Function *Caller,
                            const Function *Callee) const;
 
+  /// \returns True if the caller and callee agree on how \p Args will be passed
+  /// to the callee.
+  /// \param[out] Args The list of compatible arguments.  The implementation may
+  /// filter out any incompatible args from this list.
+  bool areFunctionArgsABICompatible(const Function *Caller,
+                                    const Function *Callee,
+                                    SmallPtrSetImpl<Argument *> &Args) const;
+
   /// The type of load/store indexing.
   enum MemIndexedMode {
     MIM_Unindexed,  ///< No indexing.
@@ -1175,6 +1187,9 @@
       unsigned RemainingBytes, unsigned SrcAlign, unsigned DestAlign) const = 0;
   virtual bool areInlineCompatible(const Function *Caller,
                                    const Function *Callee) const = 0;
+  virtual bool
+  areFunctionArgsABICompatible(const Function *Caller, const Function *Callee,
+                               SmallPtrSetImpl<Argument *> &Args) const = 0;
   virtual bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const = 0;
   virtual bool isIndexedStoreLegal(MemIndexedMode Mode,Type *Ty) const = 0;
   virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
@@ -1553,6 +1568,11 @@
                            const Function *Callee) const override {
     return Impl.areInlineCompatible(Caller, Callee);
   }
+  bool areFunctionArgsABICompatible(
+      const Function *Caller, const Function *Callee,
+      SmallPtrSetImpl<Argument *> &Args) const override {
+    return Impl.areFunctionArgsABICompatible(Caller, Callee, Args);
+  }
   bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const override {
     return Impl.isIndexedLoadLegal(Mode, Ty, getDataLayout());
   }
diff --git a/include/llvm/Analysis/TargetTransformInfoImpl.h b/include/llvm/Analysis/TargetTransformInfoImpl.h
index 5e79c5c..c9a234d 100644
--- a/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -526,6 +526,14 @@
             Callee->getFnAttribute("target-features"));
   }
 
+  bool areFunctionArgsABICompatible(const Function *Caller, const Function *Callee,
+                                    SmallPtrSetImpl<Argument *> &Args) const {
+    return (Caller->getFnAttribute("target-cpu") ==
+            Callee->getFnAttribute("target-cpu")) &&
+           (Caller->getFnAttribute("target-features") ==
+            Callee->getFnAttribute("target-features"));
+  }
+
   bool isIndexedLoadLegal(TTI::MemIndexedMode Mode, Type *Ty,
                           const DataLayout &DL) const {
     return false;
diff --git a/include/llvm/Analysis/TypeBasedAliasAnalysis.h b/include/llvm/Analysis/TypeBasedAliasAnalysis.h
index 7fcfdb3..d2e6df2 100644
--- a/include/llvm/Analysis/TypeBasedAliasAnalysis.h
+++ b/include/llvm/Analysis/TypeBasedAliasAnalysis.h
@@ -17,7 +17,7 @@
 #define LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
 
 #include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/IR/CallSite.h"
+#include "llvm/IR/InstrTypes.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
 #include <memory>
@@ -43,10 +43,10 @@
 
   AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
   bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
-  FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+  FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
   FunctionModRefBehavior getModRefBehavior(const Function *F);
-  ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
-  ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
+  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
 
 private:
   bool Aliases(const MDNode *A, const MDNode *B) const;
diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h
index 7c2e9a6..f46fdfc 100644
--- a/include/llvm/Analysis/ValueTracking.h
+++ b/include/llvm/Analysis/ValueTracking.h
@@ -297,10 +297,10 @@
 
   /// This function returns call pointer argument that is considered the same by
   /// aliasing rules. You CAN'T use it to replace one value with another.
-  const Value *getArgumentAliasingToReturnedPointer(ImmutableCallSite CS);
-  inline Value *getArgumentAliasingToReturnedPointer(CallSite CS) {
-    return const_cast<Value *>(
-        getArgumentAliasingToReturnedPointer(ImmutableCallSite(CS)));
+  const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call);
+  inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call) {
+    return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
+        const_cast<const CallBase *>(Call)));
   }
 
   // {launder,strip}.invariant.group returns pointer that aliases its argument,
@@ -309,7 +309,7 @@
   // considered as capture. The arguments are not marked as returned neither,
   // because it would make it useless.
   bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
-      ImmutableCallSite CS);
+      const CallBase *Call);
 
   /// This method strips off any GEP address adjustments and pointer casts from
   /// the specified value, returning the original object being addressed. Note
diff --git a/include/llvm/Analysis/VectorUtils.h b/include/llvm/Analysis/VectorUtils.h
index 5d44a52..be4d4f1 100644
--- a/include/llvm/Analysis/VectorUtils.h
+++ b/include/llvm/Analysis/VectorUtils.h
@@ -117,8 +117,24 @@
                          DemandedBits &DB,
                          const TargetTransformInfo *TTI=nullptr);
 
+/// Compute the union of two access-group lists.
+///
+/// If the list contains just one access group, it is returned directly. If the
+/// list is empty, returns nullptr.
+MDNode *uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2);
+
+/// Compute the access-group list of access groups that @p Inst1 and @p Inst2
+/// are both in. If either instruction does not access memory at all, it is
+/// considered to be in every list.
+///
+/// If the list contains just one access group, it is returned directly. If the
+/// list is empty, returns nullptr.
+MDNode *intersectAccessGroups(const Instruction *Inst1,
+                              const Instruction *Inst2);
+
 /// Specifically, let Kinds = [MD_tbaa, MD_alias_scope, MD_noalias, MD_fpmath,
-/// MD_nontemporal].  For K in Kinds, we get the MDNode for K from each of the
+/// MD_nontemporal, MD_access_group].
+/// For K in Kinds, we get the MDNode for K from each of the
 /// elements of VL, compute their "intersection" (i.e., the most generic
 /// metadata value that covers all of the individual values), and set I's
 /// metadata for M equal to the intersection value.
@@ -142,7 +158,7 @@
 
 /// Create a mask with replicated elements.
 ///
-/// This function creates a shuffle mask for replicating each of the \p VF 
+/// This function creates a shuffle mask for replicating each of the \p VF
 /// elements in a vector \p ReplicationFactor times. It can be used to
 /// transform a mask of \p VF elements into a mask of
 /// \p VF * \p ReplicationFactor elements used by a predicated
diff --git a/include/llvm/BinaryFormat/Dwarf.def b/include/llvm/BinaryFormat/Dwarf.def
index 512cc64..6ad3cb5 100644
--- a/include/llvm/BinaryFormat/Dwarf.def
+++ b/include/llvm/BinaryFormat/Dwarf.def
@@ -18,7 +18,8 @@
     defined HANDLE_DW_VIRTUALITY || defined HANDLE_DW_DEFAULTED ||             \
     defined HANDLE_DW_CC || defined HANDLE_DW_LNS || defined HANDLE_DW_LNE ||  \
     defined HANDLE_DW_LNCT || defined HANDLE_DW_MACRO ||                       \
-    defined HANDLE_DW_RLE || defined HANDLE_DW_CFA ||                          \
+    defined HANDLE_DW_RLE ||                                                   \
+    (defined HANDLE_DW_CFA && defined HANDLE_DW_CFA_PRED) ||                   \
     defined HANDLE_DW_APPLE_PROPERTY || defined HANDLE_DW_UT ||                \
     defined HANDLE_DWARF_SECTION || defined HANDLE_DW_IDX ||                   \
     defined HANDLE_DW_END)
@@ -42,7 +43,7 @@
 #endif
 
 #ifndef HANDLE_DW_LANG
-#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR)
+#define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR)
 #endif
 
 #ifndef HANDLE_DW_ATE
@@ -85,6 +86,10 @@
 #define HANDLE_DW_CFA(ID, NAME)
 #endif
 
+#ifndef HANDLE_DW_CFA_PRED
+#define HANDLE_DW_CFA_PRED(ID, NAME, PRED)
+#endif
+
 #ifndef HANDLE_DW_APPLE_PROPERTY
 #define HANDLE_DW_APPLE_PROPERTY(ID, NAME)
 #endif
@@ -627,50 +632,50 @@
 HANDLE_DW_OP(0xfc, GNU_const_index, 0, GNU)
 
 // DWARF languages.
-HANDLE_DW_LANG(0x0001, C89, 2, DWARF)
-HANDLE_DW_LANG(0x0002, C, 2, DWARF)
-HANDLE_DW_LANG(0x0003, Ada83, 2, DWARF)
-HANDLE_DW_LANG(0x0004, C_plus_plus, 2, DWARF)
-HANDLE_DW_LANG(0x0005, Cobol74, 2, DWARF)
-HANDLE_DW_LANG(0x0006, Cobol85, 2, DWARF)
-HANDLE_DW_LANG(0x0007, Fortran77, 2, DWARF)
-HANDLE_DW_LANG(0x0008, Fortran90, 2, DWARF)
-HANDLE_DW_LANG(0x0009, Pascal83, 2, DWARF)
-HANDLE_DW_LANG(0x000a, Modula2, 2, DWARF)
+HANDLE_DW_LANG(0x0001, C89, 0, 2, DWARF)
+HANDLE_DW_LANG(0x0002, C, 0, 2, DWARF)
+HANDLE_DW_LANG(0x0003, Ada83, 1, 2, DWARF)
+HANDLE_DW_LANG(0x0004, C_plus_plus, 0, 2, DWARF)
+HANDLE_DW_LANG(0x0005, Cobol74, 1, 2, DWARF)
+HANDLE_DW_LANG(0x0006, Cobol85, 1, 2, DWARF)
+HANDLE_DW_LANG(0x0007, Fortran77, 1, 2, DWARF)
+HANDLE_DW_LANG(0x0008, Fortran90, 1, 2, DWARF)
+HANDLE_DW_LANG(0x0009, Pascal83, 1, 2, DWARF)
+HANDLE_DW_LANG(0x000a, Modula2, 1, 2, DWARF)
 // New in DWARF v3:
-HANDLE_DW_LANG(0x000b, Java, 3, DWARF)
-HANDLE_DW_LANG(0x000c, C99, 3, DWARF)
-HANDLE_DW_LANG(0x000d, Ada95, 3, DWARF)
-HANDLE_DW_LANG(0x000e, Fortran95, 3, DWARF)
-HANDLE_DW_LANG(0x000f, PLI, 3, DWARF)
-HANDLE_DW_LANG(0x0010, ObjC, 3, DWARF)
-HANDLE_DW_LANG(0x0011, ObjC_plus_plus, 3, DWARF)
-HANDLE_DW_LANG(0x0012, UPC, 3, DWARF)
-HANDLE_DW_LANG(0x0013, D, 3, DWARF)
+HANDLE_DW_LANG(0x000b, Java, 0, 3, DWARF)
+HANDLE_DW_LANG(0x000c, C99, 0, 3, DWARF)
+HANDLE_DW_LANG(0x000d, Ada95, 1, 3, DWARF)
+HANDLE_DW_LANG(0x000e, Fortran95, 1, 3, DWARF)
+HANDLE_DW_LANG(0x000f, PLI, 1, 3, DWARF)
+HANDLE_DW_LANG(0x0010, ObjC, 0, 3, DWARF)
+HANDLE_DW_LANG(0x0011, ObjC_plus_plus, 0, 3, DWARF)
+HANDLE_DW_LANG(0x0012, UPC, 0, 3, DWARF)
+HANDLE_DW_LANG(0x0013, D, 0, 3, DWARF)
 // New in DWARF v4:
-HANDLE_DW_LANG(0x0014, Python, 4, DWARF)
+HANDLE_DW_LANG(0x0014, Python, 0, 4, DWARF)
 // New in DWARF v5:
-HANDLE_DW_LANG(0x0015, OpenCL, 5, DWARF)
-HANDLE_DW_LANG(0x0016, Go, 5, DWARF)
-HANDLE_DW_LANG(0x0017, Modula3, 5, DWARF)
-HANDLE_DW_LANG(0x0018, Haskell, 5, DWARF)
-HANDLE_DW_LANG(0x0019, C_plus_plus_03, 5, DWARF)
-HANDLE_DW_LANG(0x001a, C_plus_plus_11, 5, DWARF)
-HANDLE_DW_LANG(0x001b, OCaml, 5, DWARF)
-HANDLE_DW_LANG(0x001c, Rust, 5, DWARF)
-HANDLE_DW_LANG(0x001d, C11, 5, DWARF)
-HANDLE_DW_LANG(0x001e, Swift, 5, DWARF)
-HANDLE_DW_LANG(0x001f, Julia, 5, DWARF)
-HANDLE_DW_LANG(0x0020, Dylan, 5, DWARF)
-HANDLE_DW_LANG(0x0021, C_plus_plus_14, 5, DWARF)
-HANDLE_DW_LANG(0x0022, Fortran03, 5, DWARF)
-HANDLE_DW_LANG(0x0023, Fortran08, 5, DWARF)
-HANDLE_DW_LANG(0x0024, RenderScript, 5, DWARF)
-HANDLE_DW_LANG(0x0025, BLISS, 5, DWARF)
+HANDLE_DW_LANG(0x0015, OpenCL, 0, 5, DWARF)
+HANDLE_DW_LANG(0x0016, Go, 0, 5, DWARF)
+HANDLE_DW_LANG(0x0017, Modula3, 1, 5, DWARF)
+HANDLE_DW_LANG(0x0018, Haskell, 0, 5, DWARF)
+HANDLE_DW_LANG(0x0019, C_plus_plus_03, 0, 5, DWARF)
+HANDLE_DW_LANG(0x001a, C_plus_plus_11, 0, 5, DWARF)
+HANDLE_DW_LANG(0x001b, OCaml, 0, 5, DWARF)
+HANDLE_DW_LANG(0x001c, Rust, 0, 5, DWARF)
+HANDLE_DW_LANG(0x001d, C11, 0, 5, DWARF)
+HANDLE_DW_LANG(0x001e, Swift, 0, 5, DWARF)
+HANDLE_DW_LANG(0x001f, Julia, 1, 5, DWARF)
+HANDLE_DW_LANG(0x0020, Dylan, 0, 5, DWARF)
+HANDLE_DW_LANG(0x0021, C_plus_plus_14, 0, 5, DWARF)
+HANDLE_DW_LANG(0x0022, Fortran03, 1, 5, DWARF)
+HANDLE_DW_LANG(0x0023, Fortran08, 1, 5, DWARF)
+HANDLE_DW_LANG(0x0024, RenderScript, 0, 5, DWARF)
+HANDLE_DW_LANG(0x0025, BLISS, 0, 5, DWARF)
 // Vendor extensions:
-HANDLE_DW_LANG(0x8001, Mips_Assembler, 0, MIPS)
-HANDLE_DW_LANG(0x8e57, GOOGLE_RenderScript, 0, GOOGLE)
-HANDLE_DW_LANG(0xb000, BORLAND_Delphi, 0, BORLAND)
+HANDLE_DW_LANG(0x8001, Mips_Assembler, None, 0, MIPS)
+HANDLE_DW_LANG(0x8e57, GOOGLE_RenderScript, 0, 0, GOOGLE)
+HANDLE_DW_LANG(0xb000, BORLAND_Delphi, 0, 0, BORLAND)
 
 // DWARF attribute type encodings.
 HANDLE_DW_ATE(0x01, address, 2, DWARF)
@@ -831,9 +836,10 @@
 HANDLE_DW_CFA(0x15, val_offset_sf)
 HANDLE_DW_CFA(0x16, val_expression)
 // Vendor extensions:
-HANDLE_DW_CFA(0x1d, MIPS_advance_loc8)
-HANDLE_DW_CFA(0x2d, GNU_window_save)
-HANDLE_DW_CFA(0x2e, GNU_args_size)
+HANDLE_DW_CFA_PRED(0x1d, MIPS_advance_loc8, SELECT_MIPS64)
+HANDLE_DW_CFA_PRED(0x2d, GNU_window_save, SELECT_SPARC)
+HANDLE_DW_CFA_PRED(0x2d, AARCH64_negate_ra_state, SELECT_AARCH64)
+HANDLE_DW_CFA_PRED(0x2e, GNU_args_size, SELECT_X86)
 
 // Apple Objective-C Property Attributes.
 // Keep this list in sync with clang's DeclSpec.h ObjCPropertyAttributeKind!
@@ -916,6 +922,7 @@
 #undef HANDLE_DW_MACRO
 #undef HANDLE_DW_RLE
 #undef HANDLE_DW_CFA
+#undef HANDLE_DW_CFA_PRED
 #undef HANDLE_DW_APPLE_PROPERTY
 #undef HANDLE_DW_UT
 #undef HANDLE_DWARF_SECTION
diff --git a/include/llvm/BinaryFormat/Dwarf.h b/include/llvm/BinaryFormat/Dwarf.h
index 330e31c..525a04d 100644
--- a/include/llvm/BinaryFormat/Dwarf.h
+++ b/include/llvm/BinaryFormat/Dwarf.h
@@ -26,6 +26,7 @@
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/Format.h"
 #include "llvm/Support/FormatVariadicDetails.h"
+#include "llvm/ADT/Triple.h"
 
 namespace llvm {
 class StringRef;
@@ -183,7 +184,8 @@
 };
 
 enum SourceLanguage {
-#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) DW_LANG_##NAME = ID,
+#define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR)                 \
+  DW_LANG_##NAME = ID,
 #include "llvm/BinaryFormat/Dwarf.def"
   DW_LANG_lo_user = 0x8000,
   DW_LANG_hi_user = 0xffff
@@ -272,6 +274,7 @@
 /// Call frame instruction encodings.
 enum CallFrameInfo {
 #define HANDLE_DW_CFA(ID, NAME) DW_CFA_##NAME = ID,
+#define HANDLE_DW_CFA_PRED(ID, NAME, ARCH) DW_CFA_##NAME = ID,
 #include "llvm/BinaryFormat/Dwarf.def"
   DW_CFA_extended = 0x00,
 
@@ -430,7 +433,7 @@
 StringRef LNExtendedString(unsigned Encoding);
 StringRef MacinfoString(unsigned Encoding);
 StringRef RangeListEncodingString(unsigned Encoding);
-StringRef CallFrameString(unsigned Encoding);
+StringRef CallFrameString(unsigned Encoding, Triple::ArchType Arch);
 StringRef ApplePropertyString(unsigned);
 StringRef UnitTypeString(unsigned);
 StringRef AtomTypeString(unsigned Atom);
@@ -488,6 +491,8 @@
 unsigned LanguageVendor(SourceLanguage L);
 /// @}
 
+Optional<unsigned> LanguageLowerBound(SourceLanguage L);
+
 /// A helper struct providing information about the byte size of DW_FORM
 /// values that vary in size depending on the DWARF version, address byte
 /// size, or DWARF32/DWARF64.
diff --git a/include/llvm/BinaryFormat/MachO.h b/include/llvm/BinaryFormat/MachO.h
index c5294c7..b3d6098 100644
--- a/include/llvm/BinaryFormat/MachO.h
+++ b/include/llvm/BinaryFormat/MachO.h
@@ -486,7 +486,10 @@
   PLATFORM_IOS = 2,
   PLATFORM_TVOS = 3,
   PLATFORM_WATCHOS = 4,
-  PLATFORM_BRIDGEOS = 5
+  PLATFORM_BRIDGEOS = 5,
+  PLATFORM_IOSSIMULATOR = 7,
+  PLATFORM_TVOSSIMULATOR = 8,
+  PLATFORM_WATCHOSSIMULATOR = 9
 };
 
 // Values for tools enum in build_tool_version.
diff --git a/include/llvm/BinaryFormat/Wasm.h b/include/llvm/BinaryFormat/Wasm.h
index 9b8ca28..d9f0f94 100644
--- a/include/llvm/BinaryFormat/Wasm.h
+++ b/include/llvm/BinaryFormat/Wasm.h
@@ -26,7 +26,7 @@
 // Wasm binary format version
 const uint32_t WasmVersion = 0x1;
 // Wasm linking metadata version
-const uint32_t WasmMetadataVersion = 0x1;
+const uint32_t WasmMetadataVersion = 0x2;
 // Wasm uses a 64k page size
 const uint32_t WasmPageSize = 65536;
 
@@ -188,19 +188,20 @@
 };
 
 enum : unsigned {
-  WASM_SEC_CUSTOM = 0,   // Custom / User-defined section
-  WASM_SEC_TYPE = 1,     // Function signature declarations
-  WASM_SEC_IMPORT = 2,   // Import declarations
-  WASM_SEC_FUNCTION = 3, // Function declarations
-  WASM_SEC_TABLE = 4,    // Indirect function table and other tables
-  WASM_SEC_MEMORY = 5,   // Memory attributes
-  WASM_SEC_GLOBAL = 6,   // Global declarations
-  WASM_SEC_EXPORT = 7,   // Exports
-  WASM_SEC_START = 8,    // Start function declaration
-  WASM_SEC_ELEM = 9,     // Elements section
-  WASM_SEC_CODE = 10,    // Function bodies (code)
-  WASM_SEC_DATA = 11,    // Data segments
-  WASM_SEC_EVENT = 13    // Event declarations
+  WASM_SEC_CUSTOM = 0,     // Custom / User-defined section
+  WASM_SEC_TYPE = 1,       // Function signature declarations
+  WASM_SEC_IMPORT = 2,     // Import declarations
+  WASM_SEC_FUNCTION = 3,   // Function declarations
+  WASM_SEC_TABLE = 4,      // Indirect function table and other tables
+  WASM_SEC_MEMORY = 5,     // Memory attributes
+  WASM_SEC_GLOBAL = 6,     // Global declarations
+  WASM_SEC_EXPORT = 7,     // Exports
+  WASM_SEC_START = 8,      // Start function declaration
+  WASM_SEC_ELEM = 9,       // Elements section
+  WASM_SEC_CODE = 10,      // Function bodies (code)
+  WASM_SEC_DATA = 11,      // Data segments
+  WASM_SEC_DATACOUNT = 12, // Data segment count
+  WASM_SEC_EVENT = 13      // Event declarations
 };
 
 // Type immediate encodings used in various contexts.
@@ -210,7 +211,7 @@
   WASM_TYPE_F32 = 0x7D,
   WASM_TYPE_F64 = 0x7C,
   WASM_TYPE_V128 = 0x7B,
-  WASM_TYPE_ANYFUNC = 0x70,
+  WASM_TYPE_FUNCREF = 0x70,
   WASM_TYPE_EXCEPT_REF = 0x68,
   WASM_TYPE_FUNC = 0x60,
   WASM_TYPE_NORESULT = 0x40, // for blocks with no result values
@@ -228,7 +229,7 @@
 // Opcodes used in initializer expressions.
 enum : unsigned {
   WASM_OPCODE_END = 0x0b,
-  WASM_OPCODE_GET_GLOBAL = 0x23,
+  WASM_OPCODE_GLOBAL_GET = 0x23,
   WASM_OPCODE_I32_CONST = 0x41,
   WASM_OPCODE_I64_CONST = 0x42,
   WASM_OPCODE_F32_CONST = 0x43,
diff --git a/include/llvm/Bitcode/BitcodeReader.h b/include/llvm/Bitcode/BitcodeReader.h
index ce8bdd9..0d7cc14 100644
--- a/include/llvm/Bitcode/BitcodeReader.h
+++ b/include/llvm/Bitcode/BitcodeReader.h
@@ -51,6 +51,7 @@
   struct BitcodeLTOInfo {
     bool IsThinLTO;
     bool HasSummary;
+    bool EnableSplitLTOUnit;
   };
 
   /// Represents a module in a bitcode file.
diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h
index 3016888..413901d 100644
--- a/include/llvm/CodeGen/AsmPrinter.h
+++ b/include/llvm/CodeGen/AsmPrinter.h
@@ -138,6 +138,9 @@
 
   static char ID;
 
+protected:
+  /// Protected struct HandlerInfo and Handlers permit target extended
+  /// AsmPrinter adds their own handlers.
   struct HandlerInfo {
     AsmPrinterHandler *Handler;
     const char *TimerName;
diff --git a/include/llvm/CodeGen/AsmPrinterHandler.h b/include/llvm/CodeGen/AsmPrinterHandler.h
new file mode 100644
index 0000000..a8b1320
--- /dev/null
+++ b/include/llvm/CodeGen/AsmPrinterHandler.h
@@ -0,0 +1,74 @@
+//===-- llvm/CodeGen/AsmPrinterHandler.h -----------------------*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a generic interface for AsmPrinter handlers,
+// like debug and EH info emitters.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ASMPRINTERHANDLER_H
+#define LLVM_CODEGEN_ASMPRINTERHANDLER_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class AsmPrinter;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+class MCSymbol;
+
+typedef MCSymbol *ExceptionSymbolProvider(AsmPrinter *Asm);
+
+/// Collects and handles AsmPrinter objects required to build debug
+/// or EH information.
+class AsmPrinterHandler {
+public:
+  virtual ~AsmPrinterHandler();
+
+  /// For symbols that have a size designated (e.g. common symbols),
+  /// this tracks that size.
+  virtual void setSymbolSize(const MCSymbol *Sym, uint64_t Size) = 0;
+
+  /// Emit all sections that should come after the content.
+  virtual void endModule() = 0;
+
+  /// Gather pre-function debug information.
+  /// Every beginFunction(MF) call should be followed by an endFunction(MF)
+  /// call.
+  virtual void beginFunction(const MachineFunction *MF) = 0;
+
+  // Emit any of function marker (like .cfi_endproc). This is called
+  // before endFunction and cannot switch sections.
+  virtual void markFunctionEnd();
+
+  /// Gather post-function debug information.
+  /// Please note that some AsmPrinter implementations may not call
+  /// beginFunction at all.
+  virtual void endFunction(const MachineFunction *MF) = 0;
+
+  virtual void beginFragment(const MachineBasicBlock *MBB,
+                             ExceptionSymbolProvider ESP) {}
+  virtual void endFragment() {}
+
+  /// Emit target-specific EH funclet machinery.
+  virtual void beginFunclet(const MachineBasicBlock &MBB,
+                            MCSymbol *Sym = nullptr) {}
+  virtual void endFunclet() {}
+
+  /// Process beginning of an instruction.
+  virtual void beginInstruction(const MachineInstr *MI) = 0;
+
+  /// Process end of an instruction.
+  virtual void endInstruction() = 0;
+};
+} // End of namespace llvm
+
+#endif
diff --git a/include/llvm/CodeGen/CommandFlags.inc b/include/llvm/CodeGen/CommandFlags.inc
index 6535e06..568d329 100644
--- a/include/llvm/CodeGen/CommandFlags.inc
+++ b/include/llvm/CodeGen/CommandFlags.inc
@@ -114,10 +114,16 @@
                clEnumValN(TargetMachine::CGFT_Null, "null",
                           "Emit nothing, for performance testing")));
 
-static cl::opt<bool>
-    DisableFPElim("disable-fp-elim",
-                  cl::desc("Disable frame pointer elimination optimization"),
-                  cl::init(false));
+static cl::opt<llvm::FramePointer::FP> FramePointerUsage(
+    "frame-pointer", cl::desc("Specify frame pointer elimination optimization"),
+    cl::init(llvm::FramePointer::None),
+    cl::values(
+        clEnumValN(llvm::FramePointer::All, "all",
+                   "Disable frame pointer elimination"),
+        clEnumValN(llvm::FramePointer::NonLeaf, "non-leaf",
+                   "Disable frame pointer elimination for non-leaf frame"),
+        clEnumValN(llvm::FramePointer::None, "none",
+                   "Enable frame pointer elimination")));
 
 static cl::opt<bool> EnableUnsafeFPMath(
     "enable-unsafe-fp-math",
@@ -368,9 +374,14 @@
       NewAttrs.addAttribute("target-cpu", CPU);
     if (!Features.empty())
       NewAttrs.addAttribute("target-features", Features);
-    if (DisableFPElim.getNumOccurrences() > 0)
-      NewAttrs.addAttribute("no-frame-pointer-elim",
-                            DisableFPElim ? "true" : "false");
+    if (FramePointerUsage.getNumOccurrences() > 0) {
+      if (FramePointerUsage == llvm::FramePointer::All)
+        NewAttrs.addAttribute("frame-pointer", "all");
+      else if (FramePointerUsage == llvm::FramePointer::NonLeaf)
+        NewAttrs.addAttribute("frame-pointer", "non-leaf");
+      else if (FramePointerUsage == llvm::FramePointer::None)
+        NewAttrs.addAttribute("frame-pointer", "none");
+    }
     if (DisableTailCalls.getNumOccurrences() > 0)
       NewAttrs.addAttribute("disable-tail-calls",
                             toStringRef(DisableTailCalls));
diff --git a/include/llvm/CodeGen/DbgEntityHistoryCalculator.h b/include/llvm/CodeGen/DbgEntityHistoryCalculator.h
new file mode 100644
index 0000000..befc28f
--- /dev/null
+++ b/include/llvm/CodeGen/DbgEntityHistoryCalculator.h
@@ -0,0 +1,87 @@
+//===- llvm/CodeGen/DbgEntityHistoryCalculator.h ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DBGVALUEHISTORYCALCULATOR_H
+#define LLVM_CODEGEN_DBGVALUEHISTORYCALCULATOR_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include <utility>
+
+namespace llvm {
+
+class DILocalVariable;
+class MachineFunction;
+class MachineInstr;
+class TargetRegisterInfo;
+
+// For each user variable, keep a list of instruction ranges where this variable
+// is accessible. The variables are listed in order of appearance.
+class DbgValueHistoryMap {
+  // Each instruction range starts with a DBG_VALUE instruction, specifying the
+  // location of a variable, which is assumed to be valid until the end of the
+  // range. If end is not specified, location is valid until the start
+  // instruction of the next instruction range, or until the end of the
+  // function.
+public:
+  using InstrRange = std::pair<const MachineInstr *, const MachineInstr *>;
+  using InstrRanges = SmallVector<InstrRange, 4>;
+  using InlinedEntity = std::pair<const DINode *, const DILocation *>;
+  using InstrRangesMap = MapVector<InlinedEntity, InstrRanges>;
+
+private:
+  InstrRangesMap VarInstrRanges;
+
+public:
+  void startInstrRange(InlinedEntity Var, const MachineInstr &MI);
+  void endInstrRange(InlinedEntity Var, const MachineInstr &MI);
+
+  // Returns register currently describing @Var. If @Var is currently
+  // unaccessible or is not described by a register, returns 0.
+  unsigned getRegisterForVar(InlinedEntity Var) const;
+
+  bool empty() const { return VarInstrRanges.empty(); }
+  void clear() { VarInstrRanges.clear(); }
+  InstrRangesMap::const_iterator begin() const { return VarInstrRanges.begin(); }
+  InstrRangesMap::const_iterator end() const { return VarInstrRanges.end(); }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+  LLVM_DUMP_METHOD void dump() const;
+#endif
+};
+
+/// For each inlined instance of a source-level label, keep the corresponding
+/// DBG_LABEL instruction. The DBG_LABEL instruction could be used to generate
+/// a temporary (assembler) label before it.
+class DbgLabelInstrMap {
+public:
+  using InlinedEntity = std::pair<const DINode *, const DILocation *>;
+  using InstrMap = MapVector<InlinedEntity, const MachineInstr *>;
+
+private:
+  InstrMap LabelInstr;
+
+public:
+  void  addInstr(InlinedEntity Label, const MachineInstr &MI);
+
+  bool empty() const { return LabelInstr.empty(); }
+  void clear() { LabelInstr.clear(); }
+  InstrMap::const_iterator begin() const { return LabelInstr.begin(); }
+  InstrMap::const_iterator end() const { return LabelInstr.end(); }
+};
+
+void calculateDbgEntityHistory(const MachineFunction *MF,
+                               const TargetRegisterInfo *TRI,
+                               DbgValueHistoryMap &DbgValues,
+                               DbgLabelInstrMap &DbgLabels);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_DBGVALUEHISTORYCALCULATOR_H
diff --git a/include/llvm/CodeGen/DebugHandlerBase.h b/include/llvm/CodeGen/DebugHandlerBase.h
new file mode 100644
index 0000000..4f0d14d
--- /dev/null
+++ b/include/llvm/CodeGen/DebugHandlerBase.h
@@ -0,0 +1,138 @@
+//===-- llvm/CodeGen/DebugHandlerBase.h -----------------------*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common functionality for different debug information format backends.
+// LLVM currently supports DWARF and CodeView.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DEBUGHANDLERBASE_H
+#define LLVM_CODEGEN_DEBUGHANDLERBASE_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/CodeGen/AsmPrinterHandler.h"
+#include "llvm/CodeGen/DbgEntityHistoryCalculator.h"
+#include "llvm/CodeGen/LexicalScopes.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+
+namespace llvm {
+
+class AsmPrinter;
+class MachineInstr;
+class MachineModuleInfo;
+
+/// Represents the location at which a variable is stored.
+struct DbgVariableLocation {
+  /// Base register.
+  unsigned Register;
+
+  /// Chain of offsetted loads necessary to load the value if it lives in
+  /// memory. Every load except for the last is pointer-sized.
+  SmallVector<int64_t, 1> LoadChain;
+
+  /// Present if the location is part of a larger variable.
+  llvm::Optional<llvm::DIExpression::FragmentInfo> FragmentInfo;
+
+  /// Extract a VariableLocation from a MachineInstr.
+  /// This will only work if Instruction is a debug value instruction
+  /// and the associated DIExpression is in one of the supported forms.
+  /// If these requirements are not met, the returned Optional will not
+  /// have a value.
+  static Optional<DbgVariableLocation>
+  extractFromMachineInstruction(const MachineInstr &Instruction);
+};
+
+/// Base class for debug information backends. Common functionality related to
+/// tracking which variables and scopes are alive at a given PC live here.
+class DebugHandlerBase : public AsmPrinterHandler {
+protected:
+  DebugHandlerBase(AsmPrinter *A);
+
+  /// Target of debug info emission.
+  AsmPrinter *Asm;
+
+  /// Collected machine module information.
+  MachineModuleInfo *MMI;
+
+  /// Previous instruction's location information. This is used to
+  /// determine label location to indicate scope boundaries in debug info.
+  /// We track the previous instruction's source location (if not line 0),
+  /// whether it was a label, and its parent BB.
+  DebugLoc PrevInstLoc;
+  MCSymbol *PrevLabel = nullptr;
+  const MachineBasicBlock *PrevInstBB = nullptr;
+
+  /// This location indicates end of function prologue and beginning of
+  /// function body.
+  DebugLoc PrologEndLoc;
+
+  /// If nonnull, stores the current machine instruction we're processing.
+  const MachineInstr *CurMI = nullptr;
+
+  LexicalScopes LScopes;
+
+  /// History of DBG_VALUE and clobber instructions for each user
+  /// variable.  Variables are listed in order of appearance.
+  DbgValueHistoryMap DbgValues;
+
+  /// Mapping of inlined labels and DBG_LABEL machine instruction.
+  DbgLabelInstrMap DbgLabels;
+
+  /// Maps instruction with label emitted before instruction.
+  /// FIXME: Make this private from DwarfDebug, we have the necessary accessors
+  /// for it.
+  DenseMap<const MachineInstr *, MCSymbol *> LabelsBeforeInsn;
+
+  /// Maps instruction with label emitted after instruction.
+  DenseMap<const MachineInstr *, MCSymbol *> LabelsAfterInsn;
+
+  /// Indentify instructions that are marking the beginning of or
+  /// ending of a scope.
+  void identifyScopeMarkers();
+
+  /// Ensure that a label will be emitted before MI.
+  void requestLabelBeforeInsn(const MachineInstr *MI) {
+    LabelsBeforeInsn.insert(std::make_pair(MI, nullptr));
+  }
+
+  /// Ensure that a label will be emitted after MI.
+  void requestLabelAfterInsn(const MachineInstr *MI) {
+    LabelsAfterInsn.insert(std::make_pair(MI, nullptr));
+  }
+
+  virtual void beginFunctionImpl(const MachineFunction *MF) = 0;
+  virtual void endFunctionImpl(const MachineFunction *MF) = 0;
+  virtual void skippedNonDebugFunction() {}
+
+  // AsmPrinterHandler overrides.
+public:
+  void beginInstruction(const MachineInstr *MI) override;
+  void endInstruction() override;
+
+  void beginFunction(const MachineFunction *MF) override;
+  void endFunction(const MachineFunction *MF) override;
+
+  /// Return Label preceding the instruction.
+  MCSymbol *getLabelBeforeInsn(const MachineInstr *MI);
+
+  /// Return Label immediately following the instruction.
+  MCSymbol *getLabelAfterInsn(const MachineInstr *MI);
+
+  /// Return the function-local offset of an instruction. A label for the
+  /// instruction \p MI should exist (\ref getLabelAfterInsn).
+  const MCExpr *getFunctionLocalOffsetAfterInsn(const MachineInstr *MI);
+
+  /// If this type is derived from a base type then return base type size.
+  static uint64_t getBaseTypeSize(const DITypeRef TyRef);
+};
+
+}
+
+#endif
diff --git a/include/llvm/CodeGen/GlobalISel/CSEInfo.h b/include/llvm/CodeGen/GlobalISel/CSEInfo.h
new file mode 100644
index 0000000..ce2d285
--- /dev/null
+++ b/include/llvm/CodeGen/GlobalISel/CSEInfo.h
@@ -0,0 +1,237 @@
+//===- llvm/CodeGen/GlobalISel/CSEInfo.h ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Provides analysis for continuously CSEing during GISel passes.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_GLOBALISEL_CSEINFO_H
+#define LLVM_CODEGEN_GLOBALISEL_CSEINFO_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
+#include "llvm/CodeGen/GlobalISel/GISelWorkList.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Allocator.h"
+
+namespace llvm {
+
+/// A class that wraps MachineInstrs and derives from FoldingSetNode in order to
+/// be uniqued in a CSEMap. The tradeoff here is extra memory allocations for
+/// UniqueMachineInstr vs making MachineInstr bigger.
+class UniqueMachineInstr : public FoldingSetNode {
+  friend class GISelCSEInfo;
+  const MachineInstr *MI;
+  explicit UniqueMachineInstr(const MachineInstr *MI) : MI(MI) {}
+
+public:
+  void Profile(FoldingSetNodeID &ID);
+};
+
+// Class representing some configuration that can be done during CSE analysis.
+// Currently it only supports shouldCSE method that each pass can set.
+class CSEConfig {
+public:
+  virtual ~CSEConfig() = default;
+  // Hook for defining which Generic instructions should be CSEd.
+  // GISelCSEInfo currently only calls this hook when dealing with generic
+  // opcodes.
+  virtual bool shouldCSEOpc(unsigned Opc);
+};
+
+// TODO: Find a better place for this.
+// Commonly used for O0 config.
+class CSEConfigConstantOnly : public CSEConfig {
+public:
+  virtual ~CSEConfigConstantOnly() = default;
+  virtual bool shouldCSEOpc(unsigned Opc) override;
+};
+
+/// The CSE Analysis object.
+/// This installs itself as a delegate to the MachineFunction to track
+/// new instructions as well as deletions. It however will not be able to
+/// track instruction mutations. In such cases, recordNewInstruction should be
+/// called (for eg inside MachineIRBuilder::recordInsertion).
+/// Also because of how just the instruction can be inserted without adding any
+/// operands to the instruction, instructions are uniqued and inserted lazily.
+/// CSEInfo should assert when trying to enter an incomplete instruction into
+/// the CSEMap. There is Opcode level granularity on which instructions can be
+/// CSE'd and for now, only Generic instructions are CSEable.
+class GISelCSEInfo : public GISelChangeObserver {
+  // Make it accessible only to CSEMIRBuilder.
+  friend class CSEMIRBuilder;
+
+  BumpPtrAllocator UniqueInstrAllocator;
+  FoldingSet<UniqueMachineInstr> CSEMap;
+  MachineRegisterInfo *MRI = nullptr;
+  MachineFunction *MF = nullptr;
+  std::unique_ptr<CSEConfig> CSEOpt;
+  /// Keep a cache of UniqueInstrs for each MachineInstr. In GISel,
+  /// often instructions are mutated (while their ID has completely changed).
+  /// Whenever mutation happens, invalidate the UniqueMachineInstr for the
+  /// MachineInstr
+  DenseMap<const MachineInstr *, UniqueMachineInstr *> InstrMapping;
+
+  /// Store instructions that are not fully formed in TemporaryInsts.
+  /// Also because CSE insertion happens lazily, we can remove insts from this
+  /// list and avoid inserting and then removing from the CSEMap.
+  GISelWorkList<8> TemporaryInsts;
+
+  // Only used in asserts.
+  DenseMap<unsigned, unsigned> OpcodeHitTable;
+
+  bool isUniqueMachineInstValid(const UniqueMachineInstr &UMI) const;
+
+  void invalidateUniqueMachineInstr(UniqueMachineInstr *UMI);
+
+  UniqueMachineInstr *getNodeIfExists(FoldingSetNodeID &ID,
+                                      MachineBasicBlock *MBB, void *&InsertPos);
+
+  /// Allocate and construct a new UniqueMachineInstr for MI and return.
+  UniqueMachineInstr *getUniqueInstrForMI(const MachineInstr *MI);
+
+  void insertNode(UniqueMachineInstr *UMI, void *InsertPos = nullptr);
+
+  /// Get the MachineInstr(Unique) if it exists already in the CSEMap and the
+  /// same MachineBasicBlock.
+  MachineInstr *getMachineInstrIfExists(FoldingSetNodeID &ID,
+                                        MachineBasicBlock *MBB,
+                                        void *&InsertPos);
+
+  /// Use this method to allocate a new UniqueMachineInstr for MI and insert it
+  /// into the CSEMap. MI should return true for shouldCSE(MI->getOpcode())
+  void insertInstr(MachineInstr *MI, void *InsertPos = nullptr);
+
+public:
+  GISelCSEInfo() = default;
+
+  virtual ~GISelCSEInfo();
+
+  void setMF(MachineFunction &MF);
+
+  /// Records a newly created inst in a list and lazily insert it to the CSEMap.
+  /// Sometimes, this method might be called with a partially constructed
+  /// MachineInstr,
+  //  (right after BuildMI without adding any operands) - and in such cases,
+  //  defer the hashing of the instruction to a later stage.
+  void recordNewInstruction(MachineInstr *MI);
+
+  /// Use this callback to inform CSE about a newly fully created instruction.
+  void handleRecordedInst(MachineInstr *MI);
+
+  /// Use this callback to insert all the recorded instructions. At this point,
+  /// all of these insts need to be fully constructed and should not be missing
+  /// any operands.
+  void handleRecordedInsts();
+
+  /// Remove this inst from the CSE map. If this inst has not been inserted yet,
+  /// it will be removed from the Tempinsts list if it exists.
+  void handleRemoveInst(MachineInstr *MI);
+
+  void releaseMemory();
+
+  void setCSEConfig(std::unique_ptr<CSEConfig> Opt) { CSEOpt = std::move(Opt); }
+
+  bool shouldCSE(unsigned Opc) const;
+
+  void analyze(MachineFunction &MF);
+
+  void countOpcodeHit(unsigned Opc);
+
+  void print();
+
+  // Observer API
+  void erasingInstr(MachineInstr &MI) override;
+  void createdInstr(MachineInstr &MI) override;
+  void changingInstr(MachineInstr &MI) override;
+  void changedInstr(MachineInstr &MI) override;
+};
+
+class TargetRegisterClass;
+class RegisterBank;
+
+// Simple builder class to easily profile properties about MIs.
+class GISelInstProfileBuilder {
+  FoldingSetNodeID &ID;
+  const MachineRegisterInfo &MRI;
+
+public:
+  GISelInstProfileBuilder(FoldingSetNodeID &ID, const MachineRegisterInfo &MRI)
+      : ID(ID), MRI(MRI) {}
+  // Profiling methods.
+  const GISelInstProfileBuilder &addNodeIDOpcode(unsigned Opc) const;
+  const GISelInstProfileBuilder &addNodeIDRegType(const LLT &Ty) const;
+  const GISelInstProfileBuilder &addNodeIDRegType(const unsigned) const;
+
+  const GISelInstProfileBuilder &
+  addNodeIDRegType(const TargetRegisterClass *RC) const;
+  const GISelInstProfileBuilder &addNodeIDRegType(const RegisterBank *RB) const;
+
+  const GISelInstProfileBuilder &addNodeIDRegNum(unsigned Reg) const;
+
+  const GISelInstProfileBuilder &addNodeIDImmediate(int64_t Imm) const;
+  const GISelInstProfileBuilder &
+  addNodeIDMBB(const MachineBasicBlock *MBB) const;
+
+  const GISelInstProfileBuilder &
+  addNodeIDMachineOperand(const MachineOperand &MO) const;
+
+  const GISelInstProfileBuilder &addNodeIDFlag(unsigned Flag) const;
+  const GISelInstProfileBuilder &addNodeID(const MachineInstr *MI) const;
+};
+
+/// Simple wrapper that does the following.
+/// 1) Lazily evaluate the MachineFunction to compute CSEable instructions.
+/// 2) Allows configuration of which instructions are CSEd through CSEConfig
+/// object. Provides a method called get which takes a CSEConfig object.
+class GISelCSEAnalysisWrapper {
+  GISelCSEInfo Info;
+  MachineFunction *MF = nullptr;
+  bool AlreadyComputed = false;
+
+public:
+  /// Takes a CSEConfig object that defines what opcodes get CSEd.
+  /// If CSEConfig is already set, and the CSE Analysis has been preserved,
+  /// it will not use the new CSEOpt(use Recompute to force using the new
+  /// CSEOpt).
+  GISelCSEInfo &get(std::unique_ptr<CSEConfig> CSEOpt, bool ReCompute = false);
+  void setMF(MachineFunction &MFunc) { MF = &MFunc; }
+  void setComputed(bool Computed) { AlreadyComputed = Computed; }
+  void releaseMemory() { Info.releaseMemory(); }
+};
+
+/// The actual analysis pass wrapper.
+class GISelCSEAnalysisWrapperPass : public MachineFunctionPass {
+  GISelCSEAnalysisWrapper Wrapper;
+
+public:
+  static char ID;
+  GISelCSEAnalysisWrapperPass() : MachineFunctionPass(ID) {
+    initializeGISelCSEAnalysisWrapperPassPass(*PassRegistry::getPassRegistry());
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  const GISelCSEAnalysisWrapper &getCSEWrapper() const { return Wrapper; }
+  GISelCSEAnalysisWrapper &getCSEWrapper() { return Wrapper; }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  void releaseMemory() override {
+    Wrapper.releaseMemory();
+    Wrapper.setComputed(false);
+  }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h b/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h
new file mode 100644
index 0000000..a8fb736
--- /dev/null
+++ b/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h
@@ -0,0 +1,110 @@
+//===-- llvm/CodeGen/GlobalISel/CSEMIRBuilder.h  --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements a version of MachineIRBuilder which CSEs insts within
+/// a MachineBasicBlock.
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_GLOBALISEL_CSEMIRBUILDER_H
+#define LLVM_CODEGEN_GLOBALISEL_CSEMIRBUILDER_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+
+namespace llvm {
+
+/// Defines a builder that does CSE of MachineInstructions using GISelCSEInfo.
+/// Eg usage.
+///
+///
+/// GISelCSEInfo *Info =
+/// &getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEInfo(); CSEMIRBuilder
+/// CB(Builder.getState()); CB.setCSEInfo(Info); auto A = CB.buildConstant(s32,
+/// 42); auto B = CB.buildConstant(s32, 42); assert(A == B); unsigned CReg =
+/// MRI.createGenericVirtualRegister(s32); auto C = CB.buildConstant(CReg, 42);
+/// assert(C->getOpcode() == TargetOpcode::COPY);
+/// Explicitly passing in a register would materialize a copy if possible.
+/// CSEMIRBuilder also does trivial constant folding for binary ops.
+class CSEMIRBuilder : public MachineIRBuilder {
+
+  /// Returns true if A dominates B (within the same basic block).
+  /// Both iterators must be in the same basic block.
+  //
+  // TODO: Another approach for checking dominance is having two iterators and
+  // making them go towards each other until they meet or reach begin/end. Which
+  // approach is better? Should this even change dynamically? For G_CONSTANTS
+  // most of which will be at the top of the BB, the top down approach would be
+  // a better choice. Does IRTranslator placing constants at the beginning still
+  // make sense? Should this change based on Opcode?
+  bool dominates(MachineBasicBlock::const_iterator A,
+                 MachineBasicBlock::const_iterator B) const;
+
+  /// For given ID, find a machineinstr in the CSE Map. If found, check if it
+  /// dominates the current insertion point and if not, move it just before the
+  /// current insertion point and return it. If not found, return Null
+  /// MachineInstrBuilder.
+  MachineInstrBuilder getDominatingInstrForID(FoldingSetNodeID &ID,
+                                              void *&NodeInsertPos);
+  /// Simple check if we can CSE (we have the CSEInfo) or if this Opcode is
+  /// safe to CSE.
+  bool canPerformCSEForOpc(unsigned Opc) const;
+
+  void profileDstOp(const DstOp &Op, GISelInstProfileBuilder &B) const;
+
+  void profileDstOps(ArrayRef<DstOp> Ops, GISelInstProfileBuilder &B) const {
+    for (const DstOp &Op : Ops)
+      profileDstOp(Op, B);
+  }
+
+  void profileSrcOp(const SrcOp &Op, GISelInstProfileBuilder &B) const;
+
+  void profileSrcOps(ArrayRef<SrcOp> Ops, GISelInstProfileBuilder &B) const {
+    for (const SrcOp &Op : Ops)
+      profileSrcOp(Op, B);
+  }
+
+  void profileMBBOpcode(GISelInstProfileBuilder &B, unsigned Opc) const;
+
+  void profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps,
+                         ArrayRef<SrcOp> SrcOps, Optional<unsigned> Flags,
+                         GISelInstProfileBuilder &B) const;
+
+  // Takes a MachineInstrBuilder and inserts it into the CSEMap using the
+  // NodeInsertPos.
+  MachineInstrBuilder memoizeMI(MachineInstrBuilder MIB, void *NodeInsertPos);
+
+  // If we have can CSE an instruction, but still need to materialize to a VReg,
+  // we emit a copy from the CSE'd inst to the VReg.
+  MachineInstrBuilder generateCopiesIfRequired(ArrayRef<DstOp> DstOps,
+                                               MachineInstrBuilder &MIB);
+
+  // If we have can CSE an instruction, but still need to materialize to a VReg,
+  // check if we can generate copies. It's not possible to return a single MIB,
+  // while emitting copies to multiple vregs.
+  bool checkCopyToDefsPossible(ArrayRef<DstOp> DstOps);
+
+public:
+  // Pull in base class constructors.
+  using MachineIRBuilder::MachineIRBuilder;
+  // Unhide buildInstr
+  MachineInstrBuilder buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
+                                 ArrayRef<SrcOp> SrcOps,
+                                 Optional<unsigned> Flag = None) override;
+  // Bring in the other overload from the base class.
+  using MachineIRBuilder::buildConstant;
+
+  MachineInstrBuilder buildConstant(const DstOp &Res,
+                                    const ConstantInt &Val) override;
+
+  // Bring in the other overload from the base class.
+  using MachineIRBuilder::buildFConstant;
+  MachineInstrBuilder buildFConstant(const DstOp &Res,
+                                     const ConstantFP &Val) override;
+};
+} // namespace llvm
+#endif
diff --git a/include/llvm/CodeGen/GlobalISel/CallLowering.h b/include/llvm/CodeGen/GlobalISel/CallLowering.h
index 32980ce..ab498e8 100644
--- a/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -40,6 +40,7 @@
 class CallLowering {
   const TargetLowering *TLI;
 
+  virtual void anchor();
 public:
   struct ArgInfo {
     unsigned Reg;
@@ -108,6 +109,9 @@
     MachineIRBuilder &MIRBuilder;
     MachineRegisterInfo &MRI;
     CCAssignFn *AssignFn;
+
+  private:
+    virtual void anchor();
   };
 
 protected:
diff --git a/include/llvm/CodeGen/GlobalISel/Combiner.h b/include/llvm/CodeGen/GlobalISel/Combiner.h
index 36a33de..b097c78 100644
--- a/include/llvm/CodeGen/GlobalISel/Combiner.h
+++ b/include/llvm/CodeGen/GlobalISel/Combiner.h
@@ -21,6 +21,7 @@
 namespace llvm {
 class MachineRegisterInfo;
 class CombinerInfo;
+class GISelCSEInfo;
 class TargetPassConfig;
 class MachineFunction;
 
@@ -28,14 +29,17 @@
 public:
   Combiner(CombinerInfo &CombinerInfo, const TargetPassConfig *TPC);
 
-  bool combineMachineInstrs(MachineFunction &MF);
+  /// If CSEInfo is not null, then the Combiner will setup observer for
+  /// CSEInfo and instantiate a CSEMIRBuilder. Pass nullptr if CSE is not
+  /// needed.
+  bool combineMachineInstrs(MachineFunction &MF, GISelCSEInfo *CSEInfo);
 
 protected:
   CombinerInfo &CInfo;
 
   MachineRegisterInfo *MRI = nullptr;
   const TargetPassConfig *TPC;
-  MachineIRBuilder Builder;
+  std::unique_ptr<MachineIRBuilder> Builder;
 };
 
 } // End namespace llvm.
diff --git a/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h b/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
index 118c65e..220a571 100644
--- a/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
+++ b/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
@@ -15,57 +15,6 @@
 
 namespace llvm {
 
-static Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
-                                         const unsigned Op2,
-                                         const MachineRegisterInfo &MRI) {
-  auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
-  auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
-  if (MaybeOp1Cst && MaybeOp2Cst) {
-    LLT Ty = MRI.getType(Op1);
-    APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
-    APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true);
-    switch (Opcode) {
-    default:
-      break;
-    case TargetOpcode::G_ADD:
-      return C1 + C2;
-    case TargetOpcode::G_AND:
-      return C1 & C2;
-    case TargetOpcode::G_ASHR:
-      return C1.ashr(C2);
-    case TargetOpcode::G_LSHR:
-      return C1.lshr(C2);
-    case TargetOpcode::G_MUL:
-      return C1 * C2;
-    case TargetOpcode::G_OR:
-      return C1 | C2;
-    case TargetOpcode::G_SHL:
-      return C1 << C2;
-    case TargetOpcode::G_SUB:
-      return C1 - C2;
-    case TargetOpcode::G_XOR:
-      return C1 ^ C2;
-    case TargetOpcode::G_UDIV:
-      if (!C2.getBoolValue())
-        break;
-      return C1.udiv(C2);
-    case TargetOpcode::G_SDIV:
-      if (!C2.getBoolValue())
-        break;
-      return C1.sdiv(C2);
-    case TargetOpcode::G_UREM:
-      if (!C2.getBoolValue())
-        break;
-      return C1.urem(C2);
-    case TargetOpcode::G_SREM:
-      if (!C2.getBoolValue())
-        break;
-      return C1.srem(C2);
-    }
-  }
-  return None;
-}
-
 /// An MIRBuilder which does trivial constant folding of binary ops.
 /// Calls to buildInstr will also try to constant fold binary ops.
 class ConstantFoldingMIRBuilder : public MachineIRBuilder {
diff --git a/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h b/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
index d21c733..c8e8a7a 100644
--- a/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
+++ b/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
@@ -15,6 +15,7 @@
 #define LLVM_CODEGEN_GLOBALISEL_GISELCHANGEOBSERVER_H
 
 #include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/CodeGen/MachineFunction.h"
 
 namespace llvm {
 class MachineInstr;
@@ -32,13 +33,13 @@
   virtual ~GISelChangeObserver() {}
 
   /// An instruction is about to be erased.
-  virtual void erasingInstr(const MachineInstr &MI) = 0;
+  virtual void erasingInstr(MachineInstr &MI) = 0;
   /// An instruction was created and inserted into the function.
-  virtual void createdInstr(const MachineInstr &MI) = 0;
+  virtual void createdInstr(MachineInstr &MI) = 0;
   /// This instruction is about to be mutated in some way.
-  virtual void changingInstr(const MachineInstr &MI) = 0;
+  virtual void changingInstr(MachineInstr &MI) = 0;
   /// This instruction was mutated in some way.
-  virtual void changedInstr(const MachineInstr &MI) = 0;
+  virtual void changedInstr(MachineInstr &MI) = 0;
 
   /// All the instructions using the given register are being changed.
   /// For convenience, finishedChangingAllUsesOfReg() will report the completion
@@ -51,5 +52,60 @@
 
 };
 
+/// Simple wrapper observer that takes several observers, and calls
+/// each one for each event. If there are multiple observers (say CSE,
+/// Legalizer, Combiner), it's sufficient to register this to the machine
+/// function as the delegate.
+class GISelObserverWrapper : public MachineFunction::Delegate,
+                             public GISelChangeObserver {
+  SmallVector<GISelChangeObserver *, 4> Observers;
+
+public:
+  GISelObserverWrapper() = default;
+  GISelObserverWrapper(ArrayRef<GISelChangeObserver *> Obs)
+      : Observers(Obs.begin(), Obs.end()) {}
+  // Adds an observer.
+  void addObserver(GISelChangeObserver *O) { Observers.push_back(O); }
+  // Removes an observer from the list and does nothing if observer is not
+  // present.
+  void removeObserver(GISelChangeObserver *O) {
+    auto It = std::find(Observers.begin(), Observers.end(), O);
+    if (It != Observers.end())
+      Observers.erase(It);
+  }
+  // API for Observer.
+  void erasingInstr(MachineInstr &MI) override {
+    for (auto &O : Observers)
+      O->erasingInstr(MI);
+  }
+  void createdInstr(MachineInstr &MI) override {
+    for (auto &O : Observers)
+      O->createdInstr(MI);
+  }
+  void changingInstr(MachineInstr &MI) override {
+    for (auto &O : Observers)
+      O->changingInstr(MI);
+  }
+  void changedInstr(MachineInstr &MI) override {
+    for (auto &O : Observers)
+      O->changedInstr(MI);
+  }
+  // API for MachineFunction::Delegate
+  void MF_HandleInsertion(MachineInstr &MI) override { createdInstr(MI); }
+  void MF_HandleRemoval(MachineInstr &MI) override { erasingInstr(MI); }
+};
+
+/// A simple RAII based CSEInfo installer.
+/// Use this in a scope to install a delegate to the MachineFunction and reset
+/// it at the end of the scope.
+class RAIIDelegateInstaller {
+  MachineFunction &MF;
+  MachineFunction::Delegate *Delegate;
+
+public:
+  RAIIDelegateInstaller(MachineFunction &MF, MachineFunction::Delegate *Del);
+  ~RAIIDelegateInstaller();
+};
+
 } // namespace llvm
 #endif
diff --git a/include/llvm/CodeGen/GlobalISel/GISelWorkList.h b/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
index b32c5af..1571841 100644
--- a/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
+++ b/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
@@ -18,6 +18,7 @@
 
 namespace llvm {
 
+class MachineInstr;
 class MachineFunction;
 
 // Worklist which mostly works similar to InstCombineWorkList, but on
@@ -25,23 +26,15 @@
 // erasing an element doesn't move all elements over one place - instead just
 // nulls out the element of the vector.
 //
-// This worklist operates on instructions within a particular function. This is
-// important for acquiring the rights to modify/replace instructions a
-// GISelChangeObserver reports as the observer doesn't have the right to make
-// changes to the instructions it sees so we use our access to the
-// MachineFunction to establish that it's ok to add a given instruction to the
-// worklist.
-//
 // FIXME: Does it make sense to factor out common code with the
 // instcombinerWorkList?
 template<unsigned N>
 class GISelWorkList {
-  MachineFunction *MF;
   SmallVector<MachineInstr *, N> Worklist;
   DenseMap<MachineInstr *, unsigned> WorklistMap;
 
 public:
-  GISelWorkList(MachineFunction *MF) : MF(MF) {}
+  GISelWorkList() {}
 
   bool empty() const { return WorklistMap.empty(); }
 
@@ -49,27 +42,8 @@
 
   /// Add the specified instruction to the worklist if it isn't already in it.
   void insert(MachineInstr *I) {
-    // It would be safe to add this instruction to the worklist regardless but
-    // for consistency with the const version, check that the instruction we're
-    // adding would have been accepted if we were given a const pointer instead.
-    insert(const_cast<const MachineInstr *>(I));
-  }
-
-  void insert(const MachineInstr *I) {
-    // Confirm we'd be able to find the non-const pointer we want to schedule if
-    // we wanted to. We have the right to schedule work that may modify any
-    // instruction in MF.
-    assert(I->getParent() && "Expected parent BB");
-    assert(I->getParent()->getParent() && "Expected parent function");
-    assert((!MF || I->getParent()->getParent() == MF) &&
-           "Expected parent function to be current function or not given");
-
-    // But don't actually do the search since we can derive it from the const
-    // pointer.
-    MachineInstr *NonConstI = const_cast<MachineInstr *>(I);
-    if (WorklistMap.try_emplace(NonConstI, Worklist.size()).second) {
-      Worklist.push_back(NonConstI);
-    }
+    if (WorklistMap.try_emplace(I, Worklist.size()).second)
+      Worklist.push_back(I);
   }
 
   /// Remove I from the worklist if it exists.
@@ -83,6 +57,11 @@
     WorklistMap.erase(It);
   }
 
+  void clear() {
+    Worklist.clear();
+    WorklistMap.clear();
+  }
+
   MachineInstr *pop_back_val() {
     MachineInstr *I;
     do {
diff --git a/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 04629f6..d1770bf 100644
--- a/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -21,11 +21,11 @@
 
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
 #include "llvm/CodeGen/GlobalISel/Types.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/Support/Allocator.h"
 #include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/Allocator.h"
 #include <memory>
 #include <utility>
 
@@ -444,11 +444,13 @@
   // I.e., compared to regular MIBuilder, this one also inserts the instruction
   // in the current block, it can creates block, etc., basically a kind of
   // IRBuilder, but for Machine IR.
-  MachineIRBuilder CurBuilder;
+  // CSEMIRBuilder CurBuilder;
+  std::unique_ptr<MachineIRBuilder> CurBuilder;
 
   // Builder set to the entry block (just after ABI lowering instructions). Used
   // as a convenient location for Constants.
-  MachineIRBuilder EntryBuilder;
+  // CSEMIRBuilder EntryBuilder;
+  std::unique_ptr<MachineIRBuilder> EntryBuilder;
 
   // The MachineFunction currently being translated.
   MachineFunction *MF;
diff --git a/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 4d3e4a3..9b4ecf9 100644
--- a/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -49,9 +49,10 @@
     UnableToLegalize,
   };
 
-  LegalizerHelper(MachineFunction &MF, GISelChangeObserver &Observer);
+  LegalizerHelper(MachineFunction &MF, GISelChangeObserver &Observer,
+                  MachineIRBuilder &B);
   LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
-                  GISelChangeObserver &Observer);
+                  GISelChangeObserver &Observer, MachineIRBuilder &B);
 
   /// Replace \p MI by a sequence of legal instructions that can implement the
   /// same operation. Note that this means \p MI may be deleted, so any iterator
@@ -90,7 +91,7 @@
 
   /// Expose MIRBuilder so clients can set their own RecordInsertInstruction
   /// functions
-  MachineIRBuilder MIRBuilder;
+  MachineIRBuilder &MIRBuilder;
 
   /// Expose LegalizerInfo so the clients can re-use.
   const LegalizerInfo &getLegalizerInfo() const { return LI; }
diff --git a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 1745b97..37de8f0 100644
--- a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
 #define LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
 
+#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
 #include "llvm/CodeGen/GlobalISel/Types.h"
 
 #include "llvm/CodeGen/LowLevelType.h"
@@ -52,6 +53,8 @@
   /// @}
 
   GISelChangeObserver *Observer;
+
+  GISelCSEInfo *CSEInfo;
 };
 
 class DstOp {
@@ -81,8 +84,6 @@
     }
   }
 
-  DstType getType() const { return Ty; }
-
   LLT getLLTTy(const MachineRegisterInfo &MRI) const {
     switch (Ty) {
     case DstType::Ty_RC:
@@ -95,6 +96,20 @@
     llvm_unreachable("Unrecognised DstOp::DstType enum");
   }
 
+  unsigned getReg() const {
+    assert(Ty == DstType::Ty_Reg && "Not a register");
+    return Reg;
+  }
+
+  const TargetRegisterClass *getRegClass() const {
+    switch (Ty) {
+    case DstType::Ty_RC:
+      return RC;
+    default:
+      llvm_unreachable("Not a RC Operand");
+    }
+  }
+
   DstType getDstOpKind() const { return Ty; }
 
 private:
@@ -220,16 +235,25 @@
 
   /// Getter for MRI
   MachineRegisterInfo *getMRI() { return State.MRI; }
+  const MachineRegisterInfo *getMRI() const { return State.MRI; }
 
   /// Getter for the State
   MachineIRBuilderState &getState() { return State; }
 
   /// Getter for the basic block we currently build.
-  MachineBasicBlock &getMBB() {
+  const MachineBasicBlock &getMBB() const {
     assert(State.MBB && "MachineBasicBlock is not set");
     return *State.MBB;
   }
 
+  MachineBasicBlock &getMBB() {
+    return const_cast<MachineBasicBlock &>(
+        const_cast<const MachineIRBuilder *>(this)->getMBB());
+  }
+
+  GISelCSEInfo *getCSEInfo() { return State.CSEInfo; }
+  const GISelCSEInfo *getCSEInfo() const { return State.CSEInfo; }
+
   /// Current insertion point for new instructions.
   MachineBasicBlock::iterator getInsertPt() { return State.II; }
 
@@ -239,10 +263,12 @@
   void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II);
   /// @}
 
+  void setCSEInfo(GISelCSEInfo *Info);
+
   /// \name Setters for the insertion point.
   /// @{
   /// Set the MachineFunction where to build instructions.
-  void setMF(MachineFunction &);
+  void setMF(MachineFunction &MF);
 
   /// Set the insertion point to the  end of \p MBB.
   /// \pre \p MBB must be contained by getMF().
@@ -534,7 +560,8 @@
   ///      type.
   ///
   /// \return The newly created instruction.
-  MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val);
+  virtual MachineInstrBuilder buildConstant(const DstOp &Res,
+                                            const ConstantInt &Val);
 
   /// Build and insert \p Res = G_CONSTANT \p Val
   ///
@@ -555,7 +582,8 @@
   /// \pre \p Res must be a generic virtual register with scalar type.
   ///
   /// \return The newly created instruction.
-  MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val);
+  virtual MachineInstrBuilder buildFConstant(const DstOp &Res,
+                                             const ConstantFP &Val);
 
   MachineInstrBuilder buildFConstant(const DstOp &Res, double Val);
 
diff --git a/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h b/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
index 82fd7ed..c33b32b 100644
--- a/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
+++ b/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
@@ -103,8 +103,8 @@
   /// Currently the TableGen-like file would look like:
   /// \code
   /// PartialMapping[] = {
-  /// /*32-bit add*/ {0, 32, GPR},
-  /// /*2x32-bit add*/ {0, 32, GPR}, {0, 32, GPR}, // <-- Same entry 3x
+  /// /*32-bit add*/    {0, 32, GPR}, // Scalar entry repeated for first vec elt.
+  /// /*2x32-bit add*/  {0, 32, GPR}, {32, 32, GPR},
   /// /*<2x32-bit> vadd {0, 64, VPR}
   /// }; // PartialMapping duplicated.
   ///
@@ -118,14 +118,15 @@
   /// With the array of pointer, we would have:
   /// \code
   /// PartialMapping[] = {
-  /// /*32-bit add*/ {0, 32, GPR},
+  /// /*32-bit add lower */ {0, 32, GPR},
+  /// /*32-bit add upper */ {32, 32, GPR},
   /// /*<2x32-bit> vadd {0, 64, VPR}
   /// }; // No more duplication.
   ///
   /// BreakDowns[] = {
   /// /*AddBreakDown*/ &PartialMapping[0],
-  /// /*2xAddBreakDown*/ &PartialMapping[0], &PartialMapping[0],
-  /// /*VAddBreakDown*/ &PartialMapping[1]
+  /// /*2xAddBreakDown*/ &PartialMapping[0], &PartialMapping[1],
+  /// /*VAddBreakDown*/ &PartialMapping[2]
   /// }; // Addresses of PartialMapping duplicated (smaller).
   ///
   /// ValueMapping[] {
diff --git a/include/llvm/CodeGen/GlobalISel/Utils.h b/include/llvm/CodeGen/GlobalISel/Utils.h
index 51e3a27..82b791d 100644
--- a/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -108,5 +108,8 @@
 /// fallback.
 void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
 
+Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
+                                  const unsigned Op2,
+                                  const MachineRegisterInfo &MRI);
 } // End namespace llvm.
 #endif
diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h
index 35305bd..25edf5b 100644
--- a/include/llvm/CodeGen/MachineFunction.h
+++ b/include/llvm/CodeGen/MachineFunction.h
@@ -372,16 +372,18 @@
 
   public:
     virtual ~Delegate() = default;
-    virtual void MF_HandleInsertion(const MachineInstr &MI) = 0;
-    virtual void MF_HandleRemoval(const MachineInstr &MI) = 0;
+    /// Callback after an insertion. This should not modify the MI directly.
+    virtual void MF_HandleInsertion(MachineInstr &MI) = 0;
+    /// Callback before a removal. This should not modify the MI directly.
+    virtual void MF_HandleRemoval(MachineInstr &MI) = 0;
   };
 
 private:
   Delegate *TheDelegate = nullptr;
 
   // Callbacks for insertion and removal.
-  void handleInsertion(const MachineInstr &MI);
-  void handleRemoval(const MachineInstr &MI);
+  void handleInsertion(MachineInstr &MI);
+  void handleRemoval(MachineInstr &MI);
   friend struct ilist_traits<MachineInstr>;
 
 public:
diff --git a/include/llvm/CodeGen/MachinePipeliner.h b/include/llvm/CodeGen/MachinePipeliner.h
new file mode 100644
index 0000000..38cb33e
--- /dev/null
+++ b/include/llvm/CodeGen/MachinePipeliner.h
@@ -0,0 +1,608 @@
+//===- MachinePipeliner.h - Machine Software Pipeliner Pass -------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// An implementation of the Swing Modulo Scheduling (SMS) software pipeliner.
+//
+// Software pipelining (SWP) is an instruction scheduling technique for loops
+// that overlap loop iterations and exploits ILP via a compiler transformation.
+//
+// Swing Modulo Scheduling is an implementation of software pipelining
+// that generates schedules that are near optimal in terms of initiation
+// interval, register requirements, and stage count. See the papers:
+//
+// "Swing Modulo Scheduling: A Lifetime-Sensitive Approach", by J. Llosa,
+// A. Gonzalez, E. Ayguade, and M. Valero. In PACT '96 Proceedings of the 1996
+// Conference on Parallel Architectures and Compilation Techiniques.
+//
+// "Lifetime-Sensitive Modulo Scheduling in a Production Environment", by J.
+// Llosa, E. Ayguade, A. Gonzalez, M. Valero, and J. Eckhardt. In IEEE
+// Transactions on Computers, Vol. 50, No. 3, 2001.
+//
+// "An Implementation of Swing Modulo Scheduling With Extensions for
+// Superblocks", by T. Lattner, Master's Thesis, University of Illinois at
+// Urbana-Champaign, 2005.
+//
+//
+// The SMS algorithm consists of three main steps after computing the minimal
+// initiation interval (MII).
+// 1) Analyze the dependence graph and compute information about each
+//    instruction in the graph.
+// 2) Order the nodes (instructions) by priority based upon the heuristics
+//    described in the algorithm.
+// 3) Attempt to schedule the nodes in the specified order using the MII.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIB_CODEGEN_MACHINEPIPELINER_H
+#define LLVM_LIB_CODEGEN_MACHINEPIPELINER_H
+
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+
+namespace llvm {
+
+class NodeSet;
+class SMSchedule;
+
+extern cl::opt<bool> SwpEnableCopyToPhi;
+
+/// The main class in the implementation of the target independent
+/// software pipeliner pass.
+class MachinePipeliner : public MachineFunctionPass {
+public:
+  MachineFunction *MF = nullptr;
+  const MachineLoopInfo *MLI = nullptr;
+  const MachineDominatorTree *MDT = nullptr;
+  const InstrItineraryData *InstrItins;
+  const TargetInstrInfo *TII = nullptr;
+  RegisterClassInfo RegClassInfo;
+
+#ifndef NDEBUG
+  static int NumTries;
+#endif
+
+  /// Cache the target analysis information about the loop.
+  struct LoopInfo {
+    MachineBasicBlock *TBB = nullptr;
+    MachineBasicBlock *FBB = nullptr;
+    SmallVector<MachineOperand, 4> BrCond;
+    MachineInstr *LoopInductionVar = nullptr;
+    MachineInstr *LoopCompare = nullptr;
+  };
+  LoopInfo LI;
+
+  static char ID;
+
+  MachinePipeliner() : MachineFunctionPass(ID) {
+    initializeMachinePipelinerPass(*PassRegistry::getPassRegistry());
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addRequired<AAResultsWrapperPass>();
+    AU.addPreserved<AAResultsWrapperPass>();
+    AU.addRequired<MachineLoopInfo>();
+    AU.addRequired<MachineDominatorTree>();
+    AU.addRequired<LiveIntervals>();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+private:
+  void preprocessPhiNodes(MachineBasicBlock &B);
+  bool canPipelineLoop(MachineLoop &L);
+  bool scheduleLoop(MachineLoop &L);
+  bool swingModuloScheduler(MachineLoop &L);
+};
+
+/// This class builds the dependence graph for the instructions in a loop,
+/// and attempts to schedule the instructions using the SMS algorithm.
+class SwingSchedulerDAG : public ScheduleDAGInstrs {
+  MachinePipeliner &Pass;
+  /// The minimum initiation interval between iterations for this schedule.
+  unsigned MII = 0;
+  /// Set to true if a valid pipelined schedule is found for the loop.
+  bool Scheduled = false;
+  MachineLoop &Loop;
+  LiveIntervals &LIS;
+  const RegisterClassInfo &RegClassInfo;
+
+  /// A toplogical ordering of the SUnits, which is needed for changing
+  /// dependences and iterating over the SUnits.
+  ScheduleDAGTopologicalSort Topo;
+
+  struct NodeInfo {
+    int ASAP = 0;
+    int ALAP = 0;
+    int ZeroLatencyDepth = 0;
+    int ZeroLatencyHeight = 0;
+
+    NodeInfo() = default;
+  };
+  /// Computed properties for each node in the graph.
+  std::vector<NodeInfo> ScheduleInfo;
+
+  enum OrderKind { BottomUp = 0, TopDown = 1 };
+  /// Computed node ordering for scheduling.
+  SetVector<SUnit *> NodeOrder;
+
+  using NodeSetType = SmallVector<NodeSet, 8>;
+  using ValueMapTy = DenseMap<unsigned, unsigned>;
+  using MBBVectorTy = SmallVectorImpl<MachineBasicBlock *>;
+  using InstrMapTy = DenseMap<MachineInstr *, MachineInstr *>;
+
+  /// Instructions to change when emitting the final schedule.
+  DenseMap<SUnit *, std::pair<unsigned, int64_t>> InstrChanges;
+
+  /// We may create a new instruction, so remember it because it
+  /// must be deleted when the pass is finished.
+  SmallPtrSet<MachineInstr *, 4> NewMIs;
+
+  /// Ordered list of DAG postprocessing steps.
+  std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
+
+  /// Helper class to implement Johnson's circuit finding algorithm.
+  class Circuits {
+    std::vector<SUnit> &SUnits;
+    SetVector<SUnit *> Stack;
+    BitVector Blocked;
+    SmallVector<SmallPtrSet<SUnit *, 4>, 10> B;
+    SmallVector<SmallVector<int, 4>, 16> AdjK;
+    // Node to Index from ScheduleDAGTopologicalSort
+    std::vector<int> *Node2Idx;
+    unsigned NumPaths;
+    static unsigned MaxPaths;
+
+  public:
+    Circuits(std::vector<SUnit> &SUs, ScheduleDAGTopologicalSort &Topo)
+        : SUnits(SUs), Blocked(SUs.size()), B(SUs.size()), AdjK(SUs.size()) {
+      Node2Idx = new std::vector<int>(SUs.size());
+      unsigned Idx = 0;
+      for (const auto &NodeNum : Topo)
+        Node2Idx->at(NodeNum) = Idx++;
+    }
+
+    ~Circuits() { delete Node2Idx; }
+
+    /// Reset the data structures used in the circuit algorithm.
+    void reset() {
+      Stack.clear();
+      Blocked.reset();
+      B.assign(SUnits.size(), SmallPtrSet<SUnit *, 4>());
+      NumPaths = 0;
+    }
+
+    void createAdjacencyStructure(SwingSchedulerDAG *DAG);
+    bool circuit(int V, int S, NodeSetType &NodeSets, bool HasBackedge = false);
+    void unblock(int U);
+  };
+
+  struct CopyToPhiMutation : public ScheduleDAGMutation {
+    void apply(ScheduleDAGInstrs *DAG) override;
+  };
+
+public:
+  SwingSchedulerDAG(MachinePipeliner &P, MachineLoop &L, LiveIntervals &lis,
+                    const RegisterClassInfo &rci)
+      : ScheduleDAGInstrs(*P.MF, P.MLI, false), Pass(P), Loop(L), LIS(lis),
+        RegClassInfo(rci), Topo(SUnits, &ExitSU) {
+    P.MF->getSubtarget().getSMSMutations(Mutations);
+    if (SwpEnableCopyToPhi)
+      Mutations.push_back(llvm::make_unique<CopyToPhiMutation>());
+  }
+
+  void schedule() override;
+  void finishBlock() override;
+
+  /// Return true if the loop kernel has been scheduled.
+  bool hasNewSchedule() { return Scheduled; }
+
+  /// Return the earliest time an instruction may be scheduled.
+  int getASAP(SUnit *Node) { return ScheduleInfo[Node->NodeNum].ASAP; }
+
+  /// Return the latest time an instruction my be scheduled.
+  int getALAP(SUnit *Node) { return ScheduleInfo[Node->NodeNum].ALAP; }
+
+  /// The mobility function, which the number of slots in which
+  /// an instruction may be scheduled.
+  int getMOV(SUnit *Node) { return getALAP(Node) - getASAP(Node); }
+
+  /// The depth, in the dependence graph, for a node.
+  unsigned getDepth(SUnit *Node) { return Node->getDepth(); }
+
+  /// The maximum unweighted length of a path from an arbitrary node to the
+  /// given node in which each edge has latency 0
+  int getZeroLatencyDepth(SUnit *Node) {
+    return ScheduleInfo[Node->NodeNum].ZeroLatencyDepth;
+  }
+
+  /// The height, in the dependence graph, for a node.
+  unsigned getHeight(SUnit *Node) { return Node->getHeight(); }
+
+  /// The maximum unweighted length of a path from the given node to an
+  /// arbitrary node in which each edge has latency 0
+  int getZeroLatencyHeight(SUnit *Node) {
+    return ScheduleInfo[Node->NodeNum].ZeroLatencyHeight;
+  }
+
+  /// Return true if the dependence is a back-edge in the data dependence graph.
+  /// Since the DAG doesn't contain cycles, we represent a cycle in the graph
+  /// using an anti dependence from a Phi to an instruction.
+  bool isBackedge(SUnit *Source, const SDep &Dep) {
+    if (Dep.getKind() != SDep::Anti)
+      return false;
+    return Source->getInstr()->isPHI() || Dep.getSUnit()->getInstr()->isPHI();
+  }
+
+  bool isLoopCarriedDep(SUnit *Source, const SDep &Dep, bool isSucc = true);
+
+  /// The distance function, which indicates that operation V of iteration I
+  /// depends on operations U of iteration I-distance.
+  unsigned getDistance(SUnit *U, SUnit *V, const SDep &Dep) {
+    // Instructions that feed a Phi have a distance of 1. Computing larger
+    // values for arrays requires data dependence information.
+    if (V->getInstr()->isPHI() && Dep.getKind() == SDep::Anti)
+      return 1;
+    return 0;
+  }
+
+  /// Set the Minimum Initiation Interval for this schedule attempt.
+  void setMII(unsigned mii) { MII = mii; }
+
+  void applyInstrChange(MachineInstr *MI, SMSchedule &Schedule);
+
+  void fixupRegisterOverlaps(std::deque<SUnit *> &Instrs);
+
+  /// Return the new base register that was stored away for the changed
+  /// instruction.
+  unsigned getInstrBaseReg(SUnit *SU) {
+    DenseMap<SUnit *, std::pair<unsigned, int64_t>>::iterator It =
+        InstrChanges.find(SU);
+    if (It != InstrChanges.end())
+      return It->second.first;
+    return 0;
+  }
+
+  void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
+    Mutations.push_back(std::move(Mutation));
+  }
+
+  static bool classof(const ScheduleDAGInstrs *DAG) { return true; }
+
+private:
+  void addLoopCarriedDependences(AliasAnalysis *AA);
+  void updatePhiDependences();
+  void changeDependences();
+  unsigned calculateResMII();
+  unsigned calculateRecMII(NodeSetType &RecNodeSets);
+  void findCircuits(NodeSetType &NodeSets);
+  void fuseRecs(NodeSetType &NodeSets);
+  void removeDuplicateNodes(NodeSetType &NodeSets);
+  void computeNodeFunctions(NodeSetType &NodeSets);
+  void registerPressureFilter(NodeSetType &NodeSets);
+  void colocateNodeSets(NodeSetType &NodeSets);
+  void checkNodeSets(NodeSetType &NodeSets);
+  void groupRemainingNodes(NodeSetType &NodeSets);
+  void addConnectedNodes(SUnit *SU, NodeSet &NewSet,
+                         SetVector<SUnit *> &NodesAdded);
+  void computeNodeOrder(NodeSetType &NodeSets);
+  void checkValidNodeOrder(const NodeSetType &Circuits) const;
+  bool schedulePipeline(SMSchedule &Schedule);
+  void generatePipelinedLoop(SMSchedule &Schedule);
+  void generateProlog(SMSchedule &Schedule, unsigned LastStage,
+                      MachineBasicBlock *KernelBB, ValueMapTy *VRMap,
+                      MBBVectorTy &PrologBBs);
+  void generateEpilog(SMSchedule &Schedule, unsigned LastStage,
+                      MachineBasicBlock *KernelBB, ValueMapTy *VRMap,
+                      MBBVectorTy &EpilogBBs, MBBVectorTy &PrologBBs);
+  void generateExistingPhis(MachineBasicBlock *NewBB, MachineBasicBlock *BB1,
+                            MachineBasicBlock *BB2, MachineBasicBlock *KernelBB,
+                            SMSchedule &Schedule, ValueMapTy *VRMap,
+                            InstrMapTy &InstrMap, unsigned LastStageNum,
+                            unsigned CurStageNum, bool IsLast);
+  void generatePhis(MachineBasicBlock *NewBB, MachineBasicBlock *BB1,
+                    MachineBasicBlock *BB2, MachineBasicBlock *KernelBB,
+                    SMSchedule &Schedule, ValueMapTy *VRMap,
+                    InstrMapTy &InstrMap, unsigned LastStageNum,
+                    unsigned CurStageNum, bool IsLast);
+  void removeDeadInstructions(MachineBasicBlock *KernelBB,
+                              MBBVectorTy &EpilogBBs);
+  void splitLifetimes(MachineBasicBlock *KernelBB, MBBVectorTy &EpilogBBs,
+                      SMSchedule &Schedule);
+  void addBranches(MBBVectorTy &PrologBBs, MachineBasicBlock *KernelBB,
+                   MBBVectorTy &EpilogBBs, SMSchedule &Schedule,
+                   ValueMapTy *VRMap);
+  bool computeDelta(MachineInstr &MI, unsigned &Delta);
+  void updateMemOperands(MachineInstr &NewMI, MachineInstr &OldMI,
+                         unsigned Num);
+  MachineInstr *cloneInstr(MachineInstr *OldMI, unsigned CurStageNum,
+                           unsigned InstStageNum);
+  MachineInstr *cloneAndChangeInstr(MachineInstr *OldMI, unsigned CurStageNum,
+                                    unsigned InstStageNum,
+                                    SMSchedule &Schedule);
+  void updateInstruction(MachineInstr *NewMI, bool LastDef,
+                         unsigned CurStageNum, unsigned InstrStageNum,
+                         SMSchedule &Schedule, ValueMapTy *VRMap);
+  MachineInstr *findDefInLoop(unsigned Reg);
+  unsigned getPrevMapVal(unsigned StageNum, unsigned PhiStage, unsigned LoopVal,
+                         unsigned LoopStage, ValueMapTy *VRMap,
+                         MachineBasicBlock *BB);
+  void rewritePhiValues(MachineBasicBlock *NewBB, unsigned StageNum,
+                        SMSchedule &Schedule, ValueMapTy *VRMap,
+                        InstrMapTy &InstrMap);
+  void rewriteScheduledInstr(MachineBasicBlock *BB, SMSchedule &Schedule,
+                             InstrMapTy &InstrMap, unsigned CurStageNum,
+                             unsigned PhiNum, MachineInstr *Phi,
+                             unsigned OldReg, unsigned NewReg,
+                             unsigned PrevReg = 0);
+  bool canUseLastOffsetValue(MachineInstr *MI, unsigned &BasePos,
+                             unsigned &OffsetPos, unsigned &NewBase,
+                             int64_t &NewOffset);
+  void postprocessDAG();
+};
+
+/// A NodeSet contains a set of SUnit DAG nodes with additional information
+/// that assigns a priority to the set.
+class NodeSet {
+  SetVector<SUnit *> Nodes;
+  bool HasRecurrence = false;
+  unsigned RecMII = 0;
+  int MaxMOV = 0;
+  unsigned MaxDepth = 0;
+  unsigned Colocate = 0;
+  SUnit *ExceedPressure = nullptr;
+  unsigned Latency = 0;
+
+public:
+  using iterator = SetVector<SUnit *>::const_iterator;
+
+  NodeSet() = default;
+  NodeSet(iterator S, iterator E) : Nodes(S, E), HasRecurrence(true) {
+    Latency = 0;
+    for (unsigned i = 0, e = Nodes.size(); i < e; ++i)
+      for (const SDep &Succ : Nodes[i]->Succs)
+        if (Nodes.count(Succ.getSUnit()))
+          Latency += Succ.getLatency();
+  }
+
+  bool insert(SUnit *SU) { return Nodes.insert(SU); }
+
+  void insert(iterator S, iterator E) { Nodes.insert(S, E); }
+
+  template <typename UnaryPredicate> bool remove_if(UnaryPredicate P) {
+    return Nodes.remove_if(P);
+  }
+
+  unsigned count(SUnit *SU) const { return Nodes.count(SU); }
+
+  bool hasRecurrence() { return HasRecurrence; };
+
+  unsigned size() const { return Nodes.size(); }
+
+  bool empty() const { return Nodes.empty(); }
+
+  SUnit *getNode(unsigned i) const { return Nodes[i]; };
+
+  void setRecMII(unsigned mii) { RecMII = mii; };
+
+  void setColocate(unsigned c) { Colocate = c; };
+
+  void setExceedPressure(SUnit *SU) { ExceedPressure = SU; }
+
+  bool isExceedSU(SUnit *SU) { return ExceedPressure == SU; }
+
+  int compareRecMII(NodeSet &RHS) { return RecMII - RHS.RecMII; }
+
+  int getRecMII() { return RecMII; }
+
+  /// Summarize node functions for the entire node set.
+  void computeNodeSetInfo(SwingSchedulerDAG *SSD) {
+    for (SUnit *SU : *this) {
+      MaxMOV = std::max(MaxMOV, SSD->getMOV(SU));
+      MaxDepth = std::max(MaxDepth, SSD->getDepth(SU));
+    }
+  }
+
+  unsigned getLatency() { return Latency; }
+
+  unsigned getMaxDepth() { return MaxDepth; }
+
+  void clear() {
+    Nodes.clear();
+    RecMII = 0;
+    HasRecurrence = false;
+    MaxMOV = 0;
+    MaxDepth = 0;
+    Colocate = 0;
+    ExceedPressure = nullptr;
+  }
+
+  operator SetVector<SUnit *> &() { return Nodes; }
+
+  /// Sort the node sets by importance. First, rank them by recurrence MII,
+  /// then by mobility (least mobile done first), and finally by depth.
+  /// Each node set may contain a colocate value which is used as the first
+  /// tie breaker, if it's set.
+  bool operator>(const NodeSet &RHS) const {
+    if (RecMII == RHS.RecMII) {
+      if (Colocate != 0 && RHS.Colocate != 0 && Colocate != RHS.Colocate)
+        return Colocate < RHS.Colocate;
+      if (MaxMOV == RHS.MaxMOV)
+        return MaxDepth > RHS.MaxDepth;
+      return MaxMOV < RHS.MaxMOV;
+    }
+    return RecMII > RHS.RecMII;
+  }
+
+  bool operator==(const NodeSet &RHS) const {
+    return RecMII == RHS.RecMII && MaxMOV == RHS.MaxMOV &&
+           MaxDepth == RHS.MaxDepth;
+  }
+
+  bool operator!=(const NodeSet &RHS) const { return !operator==(RHS); }
+
+  iterator begin() { return Nodes.begin(); }
+  iterator end() { return Nodes.end(); }
+  void print(raw_ostream &os) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+  LLVM_DUMP_METHOD void dump() const;
+#endif
+};
+
+/// This class represents the scheduled code.  The main data structure is a
+/// map from scheduled cycle to instructions.  During scheduling, the
+/// data structure explicitly represents all stages/iterations.   When
+/// the algorithm finshes, the schedule is collapsed into a single stage,
+/// which represents instructions from different loop iterations.
+///
+/// The SMS algorithm allows negative values for cycles, so the first cycle
+/// in the schedule is the smallest cycle value.
+class SMSchedule {
+private:
+  /// Map from execution cycle to instructions.
+  DenseMap<int, std::deque<SUnit *>> ScheduledInstrs;
+
+  /// Map from instruction to execution cycle.
+  std::map<SUnit *, int> InstrToCycle;
+
+  /// Map for each register and the max difference between its uses and def.
+  /// The first element in the pair is the max difference in stages. The
+  /// second is true if the register defines a Phi value and loop value is
+  /// scheduled before the Phi.
+  std::map<unsigned, std::pair<unsigned, bool>> RegToStageDiff;
+
+  /// Keep track of the first cycle value in the schedule.  It starts
+  /// as zero, but the algorithm allows negative values.
+  int FirstCycle = 0;
+
+  /// Keep track of the last cycle value in the schedule.
+  int LastCycle = 0;
+
+  /// The initiation interval (II) for the schedule.
+  int InitiationInterval = 0;
+
+  /// Target machine information.
+  const TargetSubtargetInfo &ST;
+
+  /// Virtual register information.
+  MachineRegisterInfo &MRI;
+
+  std::unique_ptr<DFAPacketizer> Resources;
+
+public:
+  SMSchedule(MachineFunction *mf)
+      : ST(mf->getSubtarget()), MRI(mf->getRegInfo()),
+        Resources(ST.getInstrInfo()->CreateTargetScheduleState(ST)) {}
+
+  void reset() {
+    ScheduledInstrs.clear();
+    InstrToCycle.clear();
+    RegToStageDiff.clear();
+    FirstCycle = 0;
+    LastCycle = 0;
+    InitiationInterval = 0;
+  }
+
+  /// Set the initiation interval for this schedule.
+  void setInitiationInterval(int ii) { InitiationInterval = ii; }
+
+  /// Return the first cycle in the completed schedule.  This
+  /// can be a negative value.
+  int getFirstCycle() const { return FirstCycle; }
+
+  /// Return the last cycle in the finalized schedule.
+  int getFinalCycle() const { return FirstCycle + InitiationInterval - 1; }
+
+  /// Return the cycle of the earliest scheduled instruction in the dependence
+  /// chain.
+  int earliestCycleInChain(const SDep &Dep);
+
+  /// Return the cycle of the latest scheduled instruction in the dependence
+  /// chain.
+  int latestCycleInChain(const SDep &Dep);
+
+  void computeStart(SUnit *SU, int *MaxEarlyStart, int *MinLateStart,
+                    int *MinEnd, int *MaxStart, int II, SwingSchedulerDAG *DAG);
+  bool insert(SUnit *SU, int StartCycle, int EndCycle, int II);
+
+  /// Iterators for the cycle to instruction map.
+  using sched_iterator = DenseMap<int, std::deque<SUnit *>>::iterator;
+  using const_sched_iterator =
+      DenseMap<int, std::deque<SUnit *>>::const_iterator;
+
+  /// Return true if the instruction is scheduled at the specified stage.
+  bool isScheduledAtStage(SUnit *SU, unsigned StageNum) {
+    return (stageScheduled(SU) == (int)StageNum);
+  }
+
+  /// Return the stage for a scheduled instruction.  Return -1 if
+  /// the instruction has not been scheduled.
+  int stageScheduled(SUnit *SU) const {
+    std::map<SUnit *, int>::const_iterator it = InstrToCycle.find(SU);
+    if (it == InstrToCycle.end())
+      return -1;
+    return (it->second - FirstCycle) / InitiationInterval;
+  }
+
+  /// Return the cycle for a scheduled instruction. This function normalizes
+  /// the first cycle to be 0.
+  unsigned cycleScheduled(SUnit *SU) const {
+    std::map<SUnit *, int>::const_iterator it = InstrToCycle.find(SU);
+    assert(it != InstrToCycle.end() && "Instruction hasn't been scheduled.");
+    return (it->second - FirstCycle) % InitiationInterval;
+  }
+
+  /// Return the maximum stage count needed for this schedule.
+  unsigned getMaxStageCount() {
+    return (LastCycle - FirstCycle) / InitiationInterval;
+  }
+
+  /// Return the max. number of stages/iterations that can occur between a
+  /// register definition and its uses.
+  unsigned getStagesForReg(int Reg, unsigned CurStage) {
+    std::pair<unsigned, bool> Stages = RegToStageDiff[Reg];
+    if (CurStage > getMaxStageCount() && Stages.first == 0 && Stages.second)
+      return 1;
+    return Stages.first;
+  }
+
+  /// The number of stages for a Phi is a little different than other
+  /// instructions. The minimum value computed in RegToStageDiff is 1
+  /// because we assume the Phi is needed for at least 1 iteration.
+  /// This is not the case if the loop value is scheduled prior to the
+  /// Phi in the same stage.  This function returns the number of stages
+  /// or iterations needed between the Phi definition and any uses.
+  unsigned getStagesForPhi(int Reg) {
+    std::pair<unsigned, bool> Stages = RegToStageDiff[Reg];
+    if (Stages.second)
+      return Stages.first;
+    return Stages.first - 1;
+  }
+
+  /// Return the instructions that are scheduled at the specified cycle.
+  std::deque<SUnit *> &getInstructions(int cycle) {
+    return ScheduledInstrs[cycle];
+  }
+
+  bool isValidSchedule(SwingSchedulerDAG *SSD);
+  void finalizeSchedule(SwingSchedulerDAG *SSD);
+  void orderDependence(SwingSchedulerDAG *SSD, SUnit *SU,
+                       std::deque<SUnit *> &Insts);
+  bool isLoopCarried(SwingSchedulerDAG *SSD, MachineInstr &Phi);
+  bool isLoopCarriedDefOfUse(SwingSchedulerDAG *SSD, MachineInstr *Def,
+                             MachineOperand &MO);
+  void print(raw_ostream &os) const;
+  void dump() const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_CODEGEN_MACHINEPIPELINER_H
diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h
index 60e2581..acf1ebb 100644
--- a/include/llvm/CodeGen/Passes.h
+++ b/include/llvm/CodeGen/Passes.h
@@ -389,9 +389,10 @@
   ///
   ModulePass *createLowerEmuTLSPass();
 
-  /// This pass lowers the \@llvm.load.relative intrinsic to instructions.
-  /// This is unsafe to do earlier because a pass may combine the constant
-  /// initializer into the load, which may result in an overflowing evaluation.
+  /// This pass lowers the \@llvm.load.relative and \@llvm.objc.* intrinsics to
+  /// instructions.  This is unsafe to do earlier because a pass may combine the
+  /// constant initializer into the load, which may result in an overflowing
+  /// evaluation.
   ModulePass *createPreISelIntrinsicLoweringPass();
 
   /// GlobalMerge - This pass merges internal (by default) globals into structs
diff --git a/include/llvm/CodeGen/PreISelIntrinsicLowering.h b/include/llvm/CodeGen/PreISelIntrinsicLowering.h
index 7a007eb..b7f83e5 100644
--- a/include/llvm/CodeGen/PreISelIntrinsicLowering.h
+++ b/include/llvm/CodeGen/PreISelIntrinsicLowering.h
@@ -7,7 +7,8 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This pass implements IR lowering for the llvm.load.relative intrinsic.
+// This pass implements IR lowering for the llvm.load.relative and llvm.objc.*
+// intrinsics.
 //
 //===----------------------------------------------------------------------===//
 #ifndef LLVM_CODEGEN_PREISELINTRINSICLOWERING_H
diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h
index 8093fda..67fe87f 100644
--- a/include/llvm/CodeGen/SelectionDAG.h
+++ b/include/llvm/CodeGen/SelectionDAG.h
@@ -308,6 +308,9 @@
         : DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
 
     void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
+
+   private:
+    virtual void anchor();
   };
 
   /// When true, additional steps are taken to
@@ -1128,6 +1131,13 @@
   /// Expand the specified \c ISD::VACOPY node as the Legalize pass would.
   SDValue expandVACopy(SDNode *Node);
 
+  /// Returs an GlobalAddress of the function from the current module with
+  /// name matching the given ExternalSymbol. Additionally can provide the
+  /// matched function.
+  /// Panics the function doesn't exists.
+  SDValue getSymbolFunctionGlobalAddress(SDValue Op,
+                                         Function **TargetFunction = nullptr);
+
   /// *Mutate* the specified node in-place to have the
   /// specified operands.  If the resultant node already exists in the DAG,
   /// this does not modify the specified node, instead it returns the node that
@@ -1432,18 +1442,6 @@
   KnownBits computeKnownBits(SDValue Op, const APInt &DemandedElts,
                              unsigned Depth = 0) const;
 
-  /// \copydoc SelectionDAG::computeKnownBits(SDValue,unsigned)
-  void computeKnownBits(SDValue Op, KnownBits &Known,
-                        unsigned Depth = 0) const {
-    Known = computeKnownBits(Op, Depth);
-  }
-
-  /// \copydoc SelectionDAG::computeKnownBits(SDValue,const APInt&,unsigned)
-  void computeKnownBits(SDValue Op, KnownBits &Known, const APInt &DemandedElts,
-                        unsigned Depth = 0) const {
-    Known = computeKnownBits(Op, DemandedElts, Depth);
-  }
-
   /// Used to represent the possible overflow behavior of an operation.
   /// Never: the operation cannot overflow.
   /// Always: the operation will always overflow.
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
index 2931717..10f2841 100644
--- a/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -2487,15 +2487,18 @@
 
   /// Attempt to match a unary predicate against a scalar/splat constant or
   /// every element of a constant BUILD_VECTOR.
+  /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
   bool matchUnaryPredicate(SDValue Op,
-                           std::function<bool(ConstantSDNode *)> Match);
+                           std::function<bool(ConstantSDNode *)> Match,
+                           bool AllowUndefs = false);
 
   /// Attempt to match a binary predicate against a pair of scalar/splat
   /// constants or every element of a pair of constant BUILD_VECTORs.
+  /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
   bool matchBinaryPredicate(
       SDValue LHS, SDValue RHS,
-      std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match);
-
+      std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
+      bool AllowUndefs = false);
 } // end namespace ISD
 
 } // end namespace llvm
diff --git a/include/llvm/CodeGen/TargetFrameLowering.h b/include/llvm/CodeGen/TargetFrameLowering.h
index f8effee..b4d1da9 100644
--- a/include/llvm/CodeGen/TargetFrameLowering.h
+++ b/include/llvm/CodeGen/TargetFrameLowering.h
@@ -207,8 +207,11 @@
     return false;
   }
 
-  /// Return true if the target needs to disable frame pointer elimination.
-  virtual bool noFramePointerElim(const MachineFunction &MF) const;
+  /// Return true if the target wants to keep the frame pointer regardless of
+  /// the function attribute "frame-pointer".
+  virtual bool keepFramePointer(const MachineFunction &MF) const {
+    return false;
+  }
 
   /// hasFP - Return true if the specified function should have a dedicated
   /// frame pointer register. For most targets this is true only if the function
diff --git a/include/llvm/CodeGen/TargetLowering.h b/include/llvm/CodeGen/TargetLowering.h
index 8cb9ac8..23dbaac 100644
--- a/include/llvm/CodeGen/TargetLowering.h
+++ b/include/llvm/CodeGen/TargetLowering.h
@@ -2155,6 +2155,8 @@
     case ISD::UADDO:
     case ISD::ADDC:
     case ISD::ADDE:
+    case ISD::SADDSAT:
+    case ISD::UADDSAT:
     case ISD::FMINNUM:
     case ISD::FMAXNUM:
     case ISD::FMINIMUM:
@@ -2407,6 +2409,12 @@
     return false;
   }
 
+  /// Try to convert an extract element of a vector binary operation into an
+  /// extract element followed by a scalar operation.
+  virtual bool shouldScalarizeBinop(SDValue VecOp) const {
+    return false;
+  }
+
   // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
   // even if the vector itself has multiple uses.
   virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
@@ -2889,32 +2897,28 @@
   bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
                         TargetLoweringOpt &TLO) const;
 
-  /// Helper for SimplifyDemandedBits that can simplify an operation with
-  /// multiple uses.  This function simplifies operand \p OpIdx of \p User and
-  /// then updates \p User with the simplified version. No other uses of
-  /// \p OpIdx are updated. If \p User is the only user of \p OpIdx, this
-  /// function behaves exactly like function SimplifyDemandedBits declared
-  /// below except that it also updates the DAG by calling
-  /// DCI.CommitTargetLoweringOpt.
-  bool SimplifyDemandedBits(SDNode *User, unsigned OpIdx, const APInt &Demanded,
-                            DAGCombinerInfo &DCI, TargetLoweringOpt &TLO) const;
-
-  /// Look at Op.  At this point, we know that only the DemandedMask bits of the
+  /// Look at Op.  At this point, we know that only the DemandedBits bits of the
   /// result of Op are ever used downstream.  If we can use this information to
   /// simplify Op, create a new simplified DAG node and return true, returning
   /// the original and new nodes in Old and New.  Otherwise, analyze the
   /// expression and return a mask of KnownOne and KnownZero bits for the
   /// expression (used to simplify the caller).  The KnownZero/One bits may only
-  /// be accurate for those bits in the DemandedMask.
+  /// be accurate for those bits in the Demanded masks.
   /// \p AssumeSingleUse When this parameter is true, this function will
   ///    attempt to simplify \p Op even if there are multiple uses.
   ///    Callers are responsible for correctly updating the DAG based on the
   ///    results of this function, because simply replacing replacing TLO.Old
   ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
   ///    has multiple uses.
-  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
-                            KnownBits &Known,
-                            TargetLoweringOpt &TLO,
+  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
+                            const APInt &DemandedElts, KnownBits &Known,
+                            TargetLoweringOpt &TLO, unsigned Depth = 0,
+                            bool AssumeSingleUse = false) const;
+
+  /// Helper wrapper around SimplifyDemandedBits, demanding all elements.
+  /// Adds Op back to the worklist upon success.
+  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
+                            KnownBits &Known, TargetLoweringOpt &TLO,
                             unsigned Depth = 0,
                             bool AssumeSingleUse = false) const;
 
@@ -2985,13 +2989,14 @@
       SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
       APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
 
-  /// Attempt to simplify any target nodes based on the demanded bits,
+  /// Attempt to simplify any target nodes based on the demanded bits/elts,
   /// returning true on success. Otherwise, analyze the
   /// expression and return a mask of KnownOne and KnownZero bits for the
   /// expression (used to simplify the caller).  The KnownZero/One bits may only
-  /// be accurate for those bits in the DemandedMask.
+  /// be accurate for those bits in the Demanded masks.
   virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op,
                                                  const APInt &DemandedBits,
+                                                 const APInt &DemandedElts,
                                                  KnownBits &Known,
                                                  TargetLoweringOpt &TLO,
                                                  unsigned Depth = 0) const;
@@ -3083,6 +3088,15 @@
     return true;
   }
 
+  /// Return true if it is profitable to fold a pair of shifts into a mask.
+  /// This is usually true on most targets. But some targets, like Thumb1,
+  /// have immediate shift instructions, but no immediate "and" instruction;
+  /// this makes the fold unprofitable.
+  virtual bool shouldFoldShiftPairToMask(const SDNode *N,
+                                         CombineLevel Level) const {
+    return true;
+  }
+
   // Return true if it is profitable to combine a BUILD_VECTOR with a stride-pattern
   // to a shuffle and a truncate.
   // Example of such a combine:
@@ -3775,6 +3789,14 @@
   /// \returns True, if the expansion was successful, false otherwise
   bool expandCTTZ(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
 
+  /// Expand ABS nodes. Expands vector/scalar ABS nodes,
+  /// vector nodes can only succeed if all operations are legal/custom.
+  /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size))
+  /// \param N Node to expand
+  /// \param Result output after conversion
+  /// \returns True, if the expansion was successful, false otherwise
+  bool expandABS(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
+
   /// Turn load of vector type into a load of the individual elements.
   /// \param LD load to expand
   /// \returns MERGE_VALUEs of the scalar loads with their chains.
@@ -3814,8 +3836,7 @@
 
   /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This
   /// method accepts integers as its arguments.
-  SDValue getExpandedSaturationAdditionSubtraction(SDNode *Node,
-                                                   SelectionDAG &DAG) const;
+  SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const;
 
   /// Method for building the DAG expansion of ISD::SMULFIX. This method accepts
   /// integers as its arguments.
diff --git a/include/llvm/DebugInfo/CodeView/SymbolRecordHelpers.h b/include/llvm/DebugInfo/CodeView/SymbolRecordHelpers.h
index 6bfd1b1..3713fe1 100644
--- a/include/llvm/DebugInfo/CodeView/SymbolRecordHelpers.h
+++ b/include/llvm/DebugInfo/CodeView/SymbolRecordHelpers.h
@@ -51,6 +51,7 @@
 /// Given a symbol P for which symbolOpensScope(P) == true, return the
 /// corresponding end offset.
 uint32_t getScopeEndOffset(const CVSymbol &Symbol);
+uint32_t getScopeParentOffset(const CVSymbol &Symbol);
 
 CVSymbolArray limitSymbolArrayToScope(const CVSymbolArray &Symbols,
                                       uint32_t ScopeBegin);
diff --git a/include/llvm/DebugInfo/CodeView/TypeRecord.h b/include/llvm/DebugInfo/CodeView/TypeRecord.h
index e7b5293..7b4a30e 100644
--- a/include/llvm/DebugInfo/CodeView/TypeRecord.h
+++ b/include/llvm/DebugInfo/CodeView/TypeRecord.h
@@ -95,6 +95,11 @@
     return MP == MethodKind::IntroducingVirtual ||
            MP == MethodKind::PureIntroducingVirtual;
   }
+
+  /// Is this method static.
+  bool isStatic() const {
+    return getMethodKind() == MethodKind::Static;
+  }
 };
 
 // Does not correspond to any tag, this is the tail of an LF_POINTER record
diff --git a/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h b/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h
index a84f074..0b9f54e 100644
--- a/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h
+++ b/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h
@@ -84,20 +84,20 @@
                             MergingTypeTableBuilder &DestTypes,
                             SmallVectorImpl<TypeIndex> &SourceToDest,
                             const CVTypeArray &IdsAndTypes,
-                            Optional<EndPrecompRecord> &EndPrecomp);
+                            Optional<uint32_t> &PCHSignature);
 
 Error mergeTypeAndIdRecords(GlobalTypeTableBuilder &DestIds,
                             GlobalTypeTableBuilder &DestTypes,
                             SmallVectorImpl<TypeIndex> &SourceToDest,
                             const CVTypeArray &IdsAndTypes,
                             ArrayRef<GloballyHashedType> Hashes,
-                            Optional<EndPrecompRecord> &EndPrecomp);
+                            Optional<uint32_t> &PCHSignature);
 
 Error mergeTypeRecords(GlobalTypeTableBuilder &Dest,
                        SmallVectorImpl<TypeIndex> &SourceToDest,
                        const CVTypeArray &Types,
                        ArrayRef<GloballyHashedType> Hashes,
-                       Optional<EndPrecompRecord> &EndPrecomp);
+                       Optional<uint32_t> &PCHSignature);
 
 Error mergeIdRecords(GlobalTypeTableBuilder &Dest, ArrayRef<TypeIndex> Types,
                      SmallVectorImpl<TypeIndex> &SourceToDest,
diff --git a/include/llvm/DebugInfo/DIContext.h b/include/llvm/DebugInfo/DIContext.h
index bbdd5e0..85e9640 100644
--- a/include/llvm/DebugInfo/DIContext.h
+++ b/include/llvm/DebugInfo/DIContext.h
@@ -81,7 +81,7 @@
 public:
   DIInliningInfo() = default;
 
-  DILineInfo getFrame(unsigned Index) const {
+  const DILineInfo & getFrame(unsigned Index) const {
     assert(Index < Frames.size());
     return Frames[Index];
   }
@@ -98,6 +98,11 @@
   void addFrame(const DILineInfo &Frame) {
     Frames.push_back(Frame);
   }
+  
+  void resize(unsigned i) {
+    Frames.resize(i);
+  }
+  
 };
 
 /// Container for description of a global variable.
diff --git a/include/llvm/DebugInfo/DWARF/DWARFContext.h b/include/llvm/DebugInfo/DWARF/DWARFContext.h
index 221f1f7..dbb6be0 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFContext.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFContext.h
@@ -360,6 +360,10 @@
   /// Dump Error as warning message to stderr.
   static void dumpWarning(Error Warning);
 
+  Triple::ArchType getArch() const {
+    return getDWARFObj().getFile()->getArch();
+  }
+
 private:
   /// Return the compile unit which contains instruction with provided
   /// address.
diff --git a/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h b/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h
index ff1c7fb..7dc07d7 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h
@@ -13,6 +13,7 @@
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/iterator.h"
 #include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Triple.h"
 #include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
 #include "llvm/DebugInfo/DWARF/DWARFExpression.h"
 #include "llvm/Support/Error.h"
@@ -59,9 +60,11 @@
   unsigned size() const { return (unsigned)Instructions.size(); }
   bool empty() const { return Instructions.empty(); }
 
-  CFIProgram(uint64_t CodeAlignmentFactor, int64_t DataAlignmentFactor)
+  CFIProgram(uint64_t CodeAlignmentFactor, int64_t DataAlignmentFactor,
+             Triple::ArchType Arch)
       : CodeAlignmentFactor(CodeAlignmentFactor),
-        DataAlignmentFactor(DataAlignmentFactor) {}
+        DataAlignmentFactor(DataAlignmentFactor),
+        Arch(Arch) {}
 
   /// Parse and store a sequence of CFI instructions from Data,
   /// starting at *Offset and ending at EndOffset. *Offset is updated
@@ -76,6 +79,7 @@
   std::vector<Instruction> Instructions;
   const uint64_t CodeAlignmentFactor;
   const int64_t DataAlignmentFactor;
+  Triple::ArchType Arch;
 
   /// Convenience method to add a new instruction with the given opcode.
   void addInstruction(uint8_t Opcode) {
@@ -130,8 +134,9 @@
   enum FrameKind { FK_CIE, FK_FDE };
 
   FrameEntry(FrameKind K, uint64_t Offset, uint64_t Length, uint64_t CodeAlign,
-             int64_t DataAlign)
-      : Kind(K), Offset(Offset), Length(Length), CFIs(CodeAlign, DataAlign) {}
+             int64_t DataAlign, Triple::ArchType Arch)
+      : Kind(K), Offset(Offset), Length(Length),
+        CFIs(CodeAlign, DataAlign, Arch) {}
 
   virtual ~FrameEntry() {}
 
@@ -168,9 +173,9 @@
       int64_t DataAlignmentFactor, uint64_t ReturnAddressRegister,
       SmallString<8> AugmentationData, uint32_t FDEPointerEncoding,
       uint32_t LSDAPointerEncoding, Optional<uint64_t> Personality,
-      Optional<uint32_t> PersonalityEnc)
+      Optional<uint32_t> PersonalityEnc, Triple::ArchType Arch)
       : FrameEntry(FK_CIE, Offset, Length, CodeAlignmentFactor,
-                   DataAlignmentFactor),
+                   DataAlignmentFactor, Arch),
         Version(Version), Augmentation(std::move(Augmentation)),
         AddressSize(AddressSize), SegmentDescriptorSize(SegmentDescriptorSize),
         CodeAlignmentFactor(CodeAlignmentFactor),
@@ -224,10 +229,11 @@
   // is obtained lazily once it's actually required.
   FDE(uint64_t Offset, uint64_t Length, int64_t LinkedCIEOffset,
       uint64_t InitialLocation, uint64_t AddressRange, CIE *Cie,
-      Optional<uint64_t> LSDAAddress)
+      Optional<uint64_t> LSDAAddress, Triple::ArchType Arch)
       : FrameEntry(FK_FDE, Offset, Length,
                    Cie ? Cie->getCodeAlignmentFactor() : 0,
-                   Cie ? Cie->getDataAlignmentFactor() : 0),
+                   Cie ? Cie->getDataAlignmentFactor() : 0,
+                   Arch),
         LinkedCIEOffset(LinkedCIEOffset), InitialLocation(InitialLocation),
         AddressRange(AddressRange), LinkedCIE(Cie), LSDAAddress(LSDAAddress) {}
 
@@ -256,6 +262,7 @@
 
 /// A parsed .debug_frame or .eh_frame section
 class DWARFDebugFrame {
+  const Triple::ArchType Arch;
   // True if this is parsing an eh_frame section.
   const bool IsEH;
   // Not zero for sane pointer values coming out of eh_frame
@@ -272,7 +279,8 @@
   // it is a .debug_frame section. EHFrameAddress should be different
   // than zero for correct parsing of .eh_frame addresses when they
   // use a PC-relative encoding.
-  DWARFDebugFrame(bool IsEH = false, uint64_t EHFrameAddress = 0);
+  DWARFDebugFrame(Triple::ArchType Arch,
+                  bool IsEH = false, uint64_t EHFrameAddress = 0);
   ~DWARFDebugFrame();
 
   /// Dump the section data into the given stream.
diff --git a/include/llvm/DebugInfo/DWARF/DWARFFormValue.h b/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
index edf9442..727e853 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
@@ -61,7 +61,6 @@
 
   dwarf::Form getForm() const { return Form; }
   uint64_t getRawUValue() const { return Value.uval; }
-  uint64_t getSectionIndex() const { return Value.SectionIndex; }
   void setForm(dwarf::Form F) { Form = F; }
   void setUValue(uint64_t V) { Value.uval = V; }
   void setSValue(int64_t V) { Value.sval = V; }
@@ -75,6 +74,10 @@
   bool isFormClass(FormClass FC) const;
   const DWARFUnit *getUnit() const { return U; }
   void dump(raw_ostream &OS, DIDumpOptions DumpOpts = DIDumpOptions()) const;
+  void dumpSectionedAddress(raw_ostream &OS, DIDumpOptions DumpOpts,
+                            SectionedAddress SA) const;
+  static void dumpAddressSection(const DWARFObject &Obj, raw_ostream &OS,
+                                 DIDumpOptions DumpOpts, uint64_t SectionIndex);
 
   /// Extracts a value in \p Data at offset \p *OffsetPtr. The information
   /// in \p FormParams is needed to interpret some forms. The optional
diff --git a/include/llvm/DebugInfo/DWARF/DWARFUnit.h b/include/llvm/DebugInfo/DWARF/DWARFUnit.h
index 9909def..79c3ce1 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFUnit.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFUnit.h
@@ -410,7 +410,7 @@
     return None;
   }
 
-  void collectAddressRanges(DWARFAddressRangesVector &CURanges);
+  Expected<DWARFAddressRangesVector> collectAddressRanges();
 
   /// Returns subprogram DIE with address range encompassing the provided
   /// address. The pointer is alive as long as parsed compile unit DIEs are not
diff --git a/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h b/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h
index 19b0ebd..8d590df 100644
--- a/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h
+++ b/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h
@@ -44,6 +44,8 @@
   symbols(bool *HadError) const;
 
   const codeview::CVSymbolArray &getSymbolArray() const { return SymbolArray; }
+  const codeview::CVSymbolArray
+  getSymbolArrayForScope(uint32_t ScopeBegin) const;
 
   BinarySubstreamRef getSymbolsSubstream() const;
   BinarySubstreamRef getC11LinesSubstream() const;
diff --git a/include/llvm/Demangle/MicrosoftDemangle.h b/include/llvm/Demangle/MicrosoftDemangle.h
index b186758..97b918f 100644
--- a/include/llvm/Demangle/MicrosoftDemangle.h
+++ b/include/llvm/Demangle/MicrosoftDemangle.h
@@ -245,7 +245,7 @@
   FuncClass demangleFunctionClass(StringView &MangledName);
   CallingConv demangleCallingConvention(StringView &MangledName);
   StorageClass demangleVariableStorageClass(StringView &MangledName);
-  void demangleThrowSpecification(StringView &MangledName);
+  bool demangleThrowSpecification(StringView &MangledName);
   wchar_t demangleWcharLiteral(StringView &MangledName);
   uint8_t demangleCharLiteral(StringView &MangledName);
 
diff --git a/include/llvm/Demangle/MicrosoftDemangleNodes.h b/include/llvm/Demangle/MicrosoftDemangleNodes.h
index 1d0b66a..9e3478e 100644
--- a/include/llvm/Demangle/MicrosoftDemangleNodes.h
+++ b/include/llvm/Demangle/MicrosoftDemangleNodes.h
@@ -53,6 +53,7 @@
 enum OutputFlags {
   OF_Default = 0,
   OF_NoCallingConvention = 1,
+  OF_NoTagSpecifier = 2,
 };
 
 // Types
@@ -235,6 +236,8 @@
 
   virtual void output(OutputStream &OS, OutputFlags Flags) const = 0;
 
+  std::string toString(OutputFlags Flags = OF_Default) const;
+
 private:
   NodeKind Kind;
 };
@@ -320,6 +323,9 @@
 
   // Function parameters
   NodeArrayNode *Params = nullptr;
+
+  // True if the function type is noexcept
+  bool IsNoexcept = false;
 };
 
 struct IdentifierNode : public Node {
diff --git a/include/llvm/ExecutionEngine/Orc/OrcABISupport.h b/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
index 49e7b53..a70fc37 100644
--- a/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
+++ b/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
@@ -287,7 +287,7 @@
 public:
   static const unsigned PointerSize = 8;
   static const unsigned TrampolineSize = 40;
-  static const unsigned ResolverCodeSize = 0x11C;
+  static const unsigned ResolverCodeSize = 0x120;
 
   using IndirectStubsInfo = GenericIndirectStubsInfo<32>;
   using JITReentryFn = JITTargetAddress (*)(void *CallbackMgr,
diff --git a/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h b/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
index 401f6e3..6f90f03 100644
--- a/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
@@ -443,11 +443,14 @@
 private:
   ExecutionSession &ES;
 
-  std::map<VModuleKey, std::unique_ptr<LinkedObject>> LinkedObjects;
   ResourcesGetter GetResources;
   NotifyLoadedFtor NotifyLoaded;
   NotifyFinalizedFtor NotifyFinalized;
   NotifyFreedFtor NotifyFreed;
+
+  // NB!  `LinkedObjects` needs to be destroyed before `NotifyFreed` because
+  // `~ConcreteLinkedObject` calls `NotifyFreed`
+  std::map<VModuleKey, std::unique_ptr<LinkedObject>> LinkedObjects;
   bool ProcessAllSections = false;
 };
 
diff --git a/include/llvm/IR/DIBuilder.h b/include/llvm/IR/DIBuilder.h
index 1fe7b36..443332b 100644
--- a/include/llvm/IR/DIBuilder.h
+++ b/include/llvm/IR/DIBuilder.h
@@ -502,11 +502,11 @@
     /// \param Elements       Enumeration elements.
     /// \param UnderlyingType Underlying type of a C++11/ObjC fixed enum.
     /// \param UniqueIdentifier A unique identifier for the enum.
-    /// \param IsFixed Boolean flag indicate if this is C++11/ObjC fixed enum.
+    /// \param IsScoped Boolean flag indicate if this is C++11/ObjC 'enum class'.
     DICompositeType *createEnumerationType(
         DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
         uint64_t SizeInBits, uint32_t AlignInBits, DINodeArray Elements,
-        DIType *UnderlyingType, StringRef UniqueIdentifier = "", bool IsFixed = false);
+        DIType *UnderlyingType, StringRef UniqueIdentifier = "", bool IsScoped = false);
 
     /// Create subroutine type.
     /// \param ParameterTypes  An array of subroutine parameter types. This
diff --git a/include/llvm/IR/DataLayout.h b/include/llvm/IR/DataLayout.h
index d796a65..c144d1c 100644
--- a/include/llvm/IR/DataLayout.h
+++ b/include/llvm/IR/DataLayout.h
@@ -334,6 +334,9 @@
   /// the backends/clients are updated.
   unsigned getPointerSize(unsigned AS = 0) const;
 
+  /// Returns the maximum pointer size over all address spaces.
+  unsigned getMaxPointerSize() const;
+
   // Index size used for address calculation.
   unsigned getIndexSize(unsigned AS) const;
 
@@ -361,6 +364,11 @@
     return getPointerSize(AS) * 8;
   }
 
+  /// Returns the maximum pointer size over all address spaces.
+  unsigned getMaxPointerSizeInBits() const {
+    return getMaxPointerSize() * 8;
+  }
+
   /// Size in bits of index used for address calculation in getelementptr.
   unsigned getIndexSizeInBits(unsigned AS) const {
     return getIndexSize(AS) * 8;
diff --git a/include/llvm/IR/DebugInfoFlags.def b/include/llvm/IR/DebugInfoFlags.def
index a0a9c6b..ce117aa 100644
--- a/include/llvm/IR/DebugInfoFlags.def
+++ b/include/llvm/IR/DebugInfoFlags.def
@@ -54,7 +54,7 @@
 HANDLE_DI_FLAG((1 << 21), MainSubprogram)
 HANDLE_DI_FLAG((1 << 22), TypePassByValue)
 HANDLE_DI_FLAG((1 << 23), TypePassByReference)
-HANDLE_DI_FLAG((1 << 24), FixedEnum)
+HANDLE_DI_FLAG((1 << 24), EnumClass)
 HANDLE_DI_FLAG((1 << 25), Thunk)
 HANDLE_DI_FLAG((1 << 26), Trivial)
 HANDLE_DI_FLAG((1 << 27), BigEndian)
diff --git a/include/llvm/IR/DebugInfoMetadata.h b/include/llvm/IR/DebugInfoMetadata.h
index 0830b5c..a461d1b 100644
--- a/include/llvm/IR/DebugInfoMetadata.h
+++ b/include/llvm/IR/DebugInfoMetadata.h
@@ -1430,6 +1430,9 @@
 
   /// Reverse transformation as getPrefixEncodingFromUnsigned.
   static unsigned getUnsignedFromPrefixEncoding(unsigned U) {
+    if (U & 1)
+      return 0;
+    U >>= 1;
     return (U & 0x20) ? (((U >> 1) & 0xfe0) | (U & 0x1f)) : (U & 0x1f);
   }
 
@@ -1448,6 +1451,14 @@
                         getRawInlinedAt(), isImplicitCode());
   }
 
+  static unsigned encodeComponent(unsigned C) {
+    return (C == 0) ? 1U : (getPrefixEncodingFromUnsigned(C) << 1);
+  }
+
+  static unsigned encodingBits(unsigned C) {
+    return (C == 0) ? 1 : (C > 0x1f ? 14 : 7);
+  }
+
 public:
   // Disallow replacing operands.
   void replaceOperandWith(unsigned I, Metadata *New) = delete;
@@ -1518,20 +1529,35 @@
   /// order. If the lowest bit is 1, the current component is empty, and the
   /// next component will start in the next bit. Otherwise, the current
   /// component is non-empty, and its content starts in the next bit. The
-  /// length of each components is either 5 bit or 12 bit: if the 7th bit
+  /// value of each components is either 5 bit or 12 bit: if the 7th bit
   /// is 0, the bit 2~6 (5 bits) are used to represent the component; if the
   /// 7th bit is 1, the bit 2~6 (5 bits) and 8~14 (7 bits) are combined to
-  /// represent the component.
+  /// represent the component. Thus, the number of bits used for a component
+  /// is either 0 (if it and all the next components are empty); 1 - if it is
+  /// empty; 7 - if its value is up to and including 0x1f (lsb and msb are both
+  /// 0); or 14, if its value is up to and including 0x1ff. Note that the last
+  /// component is also capped at 0x1ff, even in the case when both first
+  /// components are 0, and we'd technically have 29 bits available.
+  ///
+  /// For precise control over the data being encoded in the discriminator,
+  /// use encodeDiscriminator/decodeDiscriminator.
+  ///
+  /// Use {get|set}BaseDiscriminator and cloneWithDuplicationFactor after reading
+  /// their documentation, as their behavior has side-effects.
 
   inline unsigned getDiscriminator() const;
 
   /// Returns a new DILocation with updated \p Discriminator.
   inline const DILocation *cloneWithDiscriminator(unsigned Discriminator) const;
 
-  /// Returns a new DILocation with updated base discriminator \p BD.
-  inline const DILocation *setBaseDiscriminator(unsigned BD) const;
+  /// Returns a new DILocation with updated base discriminator \p BD. Only the
+  /// base discriminator is set in the new DILocation, the other encoded values
+  /// are elided.
+  /// If the discriminator cannot be encoded, the function returns None.
+  inline Optional<const DILocation *> setBaseDiscriminator(unsigned BD) const;
 
-  /// Returns the duplication factor stored in the discriminator.
+  /// Returns the duplication factor stored in the discriminator, or 1 if no
+  /// duplication factor (or 0) is encoded.
   inline unsigned getDuplicationFactor() const;
 
   /// Returns the copy identifier stored in the discriminator.
@@ -1540,9 +1566,11 @@
   /// Returns the base discriminator stored in the discriminator.
   inline unsigned getBaseDiscriminator() const;
 
-  /// Returns a new DILocation with duplication factor \p DF encoded in the
-  /// discriminator.
-  inline const DILocation *cloneWithDuplicationFactor(unsigned DF) const;
+  /// Returns a new DILocation with duplication factor \p DF * current
+  /// duplication factor encoded in the discriminator. The current duplication
+  /// factor is as defined by getDuplicationFactor().
+  /// Returns None if encoding failed.
+  inline Optional<const DILocation *> cloneWithDuplicationFactor(unsigned DF) const;
 
   /// When two instructions are combined into a single instruction we also
   /// need to combine the original locations into a single location.
@@ -1563,19 +1591,31 @@
 
   /// Returns the base discriminator for a given encoded discriminator \p D.
   static unsigned getBaseDiscriminatorFromDiscriminator(unsigned D) {
-    if ((D & 1) == 0)
-      return getUnsignedFromPrefixEncoding(D >> 1);
-    else
-      return 0;
+    return getUnsignedFromPrefixEncoding(D);
   }
 
-  /// Returns the duplication factor for a given encoded discriminator \p D.
+  /// Raw encoding of the discriminator. APIs such as setBaseDiscriminator or
+  /// cloneWithDuplicationFactor have certain side-effects. This API, in
+  /// conjunction with cloneWithDiscriminator, may be used to encode precisely
+  /// the values provided. \p BD: base discriminator \p DF: duplication factor
+  /// \p CI: copy index
+  /// The return is None if the values cannot be encoded in 32 bits - for
+  /// example, values for BD or DF larger than 12 bits. Otherwise, the return
+  /// is the encoded value.
+  static Optional<unsigned> encodeDiscriminator(unsigned BD, unsigned DF, unsigned CI);
+
+  /// Raw decoder for values in an encoded discriminator D.
+  static void decodeDiscriminator(unsigned D, unsigned &BD, unsigned &DF,
+                                  unsigned &CI);
+
+  /// Returns the duplication factor for a given encoded discriminator \p D, or
+  /// 1 if no value or 0 is encoded.
   static unsigned getDuplicationFactorFromDiscriminator(unsigned D) {
     D = getNextComponentInDiscriminator(D);
-    if (D == 0 || (D & 1))
+    unsigned Ret = getUnsignedFromPrefixEncoding(D);
+    if (Ret == 0)
       return 1;
-    else
-      return getUnsignedFromPrefixEncoding(D >> 1);
+    return Ret;
   }
 
   /// Returns the copy identifier for a given encoded discriminator \p D.
@@ -1999,28 +2039,24 @@
   return getCopyIdentifierFromDiscriminator(getDiscriminator());
 }
 
-const DILocation *DILocation::setBaseDiscriminator(unsigned D) const {
+Optional<const DILocation *> DILocation::setBaseDiscriminator(unsigned D) const {
   if (D == 0)
     return this;
-  else
-    return cloneWithDiscriminator(getPrefixEncodingFromUnsigned(D) << 1);
+  if (D > 0xfff)
+    return None;
+  return cloneWithDiscriminator(encodeComponent(D));
 }
 
-const DILocation *DILocation::cloneWithDuplicationFactor(unsigned DF) const {
+Optional<const DILocation *> DILocation::cloneWithDuplicationFactor(unsigned DF) const {
   DF *= getDuplicationFactor();
   if (DF <= 1)
     return this;
 
   unsigned BD = getBaseDiscriminator();
-  unsigned CI = getCopyIdentifier() << (DF > 0x1f ? 14 : 7);
-  unsigned D = CI | (getPrefixEncodingFromUnsigned(DF) << 1);
-
-  if (BD == 0)
-    D = (D << 1) | 1;
-  else
-    D = (D << (BD > 0x1f ? 14 : 7)) | (getPrefixEncodingFromUnsigned(BD) << 1);
-
-  return cloneWithDiscriminator(D);
+  unsigned CI = getCopyIdentifier();
+  if (Optional<unsigned> D = encodeDiscriminator(BD, DF, CI))
+    return cloneWithDiscriminator(*D);
+  return None;
 }
 
 class DINamespace : public DIScope {
diff --git a/include/llvm/IR/DiagnosticInfo.h b/include/llvm/IR/DiagnosticInfo.h
index b5ed2c7..3a55a7d 100644
--- a/include/llvm/IR/DiagnosticInfo.h
+++ b/include/llvm/IR/DiagnosticInfo.h
@@ -101,6 +101,7 @@
   /// Severity gives the severity of the diagnostic.
   const DiagnosticSeverity Severity;
 
+  virtual void anchor();
 public:
   DiagnosticInfo(/* DiagnosticKind */ int Kind, DiagnosticSeverity Severity)
       : Kind(Kind), Severity(Severity) {}
@@ -210,6 +211,7 @@
 };
 
 class DiagnosticInfoStackSize : public DiagnosticInfoResourceLimit {
+  virtual void anchor() override;
 public:
   DiagnosticInfoStackSize(const Function &Fn, uint64_t StackSize,
                           DiagnosticSeverity Severity = DS_Warning,
@@ -360,6 +362,7 @@
 
 /// Common features for diagnostics with an associated location.
 class DiagnosticInfoWithLocationBase : public DiagnosticInfo {
+  virtual void anchor() override;
 public:
   /// \p Fn is the function where the diagnostic is being emitted. \p Loc is
   /// the location information to use in the diagnostic.
@@ -598,6 +601,7 @@
 /// Common features for diagnostics dealing with optimization remarks
 /// that are used by IR passes.
 class DiagnosticInfoIROptimization : public DiagnosticInfoOptimizationBase {
+  virtual void anchor() override;
 public:
   /// \p PassName is the name of the pass emitting this diagnostic. \p
   /// RemarkName is a textual identifier for the remark (single-word,
@@ -818,6 +822,7 @@
 /// Diagnostic information for optimization analysis remarks related to
 /// floating-point non-commutativity.
 class OptimizationRemarkAnalysisFPCommute : public OptimizationRemarkAnalysis {
+  virtual void anchor();
 public:
   /// \p PassName is the name of the pass emitting this diagnostic. If this name
   /// matches the regular expression given in -Rpass-analysis=, then the
@@ -859,6 +864,7 @@
 /// Diagnostic information for optimization analysis remarks related to
 /// pointer aliasing.
 class OptimizationRemarkAnalysisAliasing : public OptimizationRemarkAnalysis {
+  virtual void anchor();
 public:
   /// \p PassName is the name of the pass emitting this diagnostic. If this name
   /// matches the regular expression given in -Rpass-analysis=, then the
diff --git a/include/llvm/IR/IRBuilder.h b/include/llvm/IR/IRBuilder.h
index e89c443..fac2ff4 100644
--- a/include/llvm/IR/IRBuilder.h
+++ b/include/llvm/IR/IRBuilder.h
@@ -889,19 +889,59 @@
   }
 
   /// Create an invoke instruction.
-  InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
+  InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
+                           BasicBlock *NormalDest, BasicBlock *UnwindDest,
+                           ArrayRef<Value *> Args,
+                           ArrayRef<OperandBundleDef> OpBundles,
+                           const Twine &Name = "") {
+    return Insert(
+        InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles),
+        Name);
+  }
+  InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
+                           BasicBlock *NormalDest, BasicBlock *UnwindDest,
+                           ArrayRef<Value *> Args = None,
+                           const Twine &Name = "") {
+    return Insert(InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args),
+                  Name);
+  }
+
+  InvokeInst *CreateInvoke(Function *Callee, BasicBlock *NormalDest,
+                           BasicBlock *UnwindDest, ArrayRef<Value *> Args,
+                           ArrayRef<OperandBundleDef> OpBundles,
+                           const Twine &Name = "") {
+    return CreateInvoke(Callee->getFunctionType(), Callee, NormalDest,
+                        UnwindDest, Args, OpBundles, Name);
+  }
+
+  InvokeInst *CreateInvoke(Function *Callee, BasicBlock *NormalDest,
                            BasicBlock *UnwindDest,
                            ArrayRef<Value *> Args = None,
                            const Twine &Name = "") {
-    return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Args),
-                  Name);
+    return CreateInvoke(Callee->getFunctionType(), Callee, NormalDest,
+                        UnwindDest, Args, Name);
   }
+
+  // Deprecated [opaque pointer types]
   InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
                            BasicBlock *UnwindDest, ArrayRef<Value *> Args,
                            ArrayRef<OperandBundleDef> OpBundles,
                            const Twine &Name = "") {
-    return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Args,
-                                     OpBundles), Name);
+    return CreateInvoke(
+        cast<FunctionType>(
+            cast<PointerType>(Callee->getType())->getElementType()),
+        Callee, NormalDest, UnwindDest, Args, OpBundles, Name);
+  }
+
+  // Deprecated [opaque pointer types]
+  InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
+                           BasicBlock *UnwindDest,
+                           ArrayRef<Value *> Args = None,
+                           const Twine &Name = "") {
+    return CreateInvoke(
+        cast<FunctionType>(
+            cast<PointerType>(Callee->getType())->getElementType()),
+        Callee, NormalDest, UnwindDest, Args, Name);
   }
 
   ResumeInst *CreateResume(Value *Exn) {
@@ -1312,22 +1352,35 @@
     return Insert(new AllocaInst(Ty, DL.getAllocaAddrSpace(), ArraySize), Name);
   }
 
-  /// Provided to resolve 'CreateLoad(Ptr, "...")' correctly, instead of
+  /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
   /// converting the string to 'bool' for the isVolatile parameter.
-  LoadInst *CreateLoad(Value *Ptr, const char *Name) {
-    return Insert(new LoadInst(Ptr), Name);
-  }
-
-  LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") {
-    return Insert(new LoadInst(Ptr), Name);
+  LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
+    return Insert(new LoadInst(Ty, Ptr), Name);
   }
 
   LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
     return Insert(new LoadInst(Ty, Ptr), Name);
   }
 
+  LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
+                       const Twine &Name = "") {
+    return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile), Name);
+  }
+
+  // Deprecated [opaque pointer types]
+  LoadInst *CreateLoad(Value *Ptr, const char *Name) {
+    return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
+  }
+
+  // Deprecated [opaque pointer types]
+  LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") {
+    return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
+  }
+
+  // Deprecated [opaque pointer types]
   LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") {
-    return Insert(new LoadInst(Ptr, nullptr, isVolatile), Name);
+    return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, isVolatile,
+                      Name);
   }
 
   StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
@@ -1337,22 +1390,41 @@
   /// Provided to resolve 'CreateAlignedLoad(Ptr, Align, "...")'
   /// correctly, instead of converting the string to 'bool' for the isVolatile
   /// parameter.
-  LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) {
-    LoadInst *LI = CreateLoad(Ptr, Name);
+  LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
+                              const char *Name) {
+    LoadInst *LI = CreateLoad(Ty, Ptr, Name);
     LI->setAlignment(Align);
     return LI;
   }
+  LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
+                              const Twine &Name = "") {
+    LoadInst *LI = CreateLoad(Ty, Ptr, Name);
+    LI->setAlignment(Align);
+    return LI;
+  }
+  LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
+                              bool isVolatile, const Twine &Name = "") {
+    LoadInst *LI = CreateLoad(Ty, Ptr, isVolatile, Name);
+    LI->setAlignment(Align);
+    return LI;
+  }
+
+  // Deprecated [opaque pointer types]
+  LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) {
+    return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
+                             Align, Name);
+  }
+  // Deprecated [opaque pointer types]
   LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align,
                               const Twine &Name = "") {
-    LoadInst *LI = CreateLoad(Ptr, Name);
-    LI->setAlignment(Align);
-    return LI;
+    return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
+                             Align, Name);
   }
+  // Deprecated [opaque pointer types]
   LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile,
                               const Twine &Name = "") {
-    LoadInst *LI = CreateLoad(Ptr, isVolatile, Name);
-    LI->setAlignment(Align);
-    return LI;
+    return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
+                             Align, isVolatile, Name);
   }
 
   StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,
@@ -1491,50 +1563,69 @@
     return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
   }
 
-  Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") {
+  Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
+                            const Twine &Name = "") {
     Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
 
     if (auto *PC = dyn_cast<Constant>(Ptr))
-      return Insert(Folder.CreateGetElementPtr(nullptr, PC, Idx), Name);
+      return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
 
-    return Insert(GetElementPtrInst::Create(nullptr, Ptr, Idx), Name);
+    return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
+  }
+
+  Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") {
+    return CreateConstGEP1_64(nullptr, Ptr, Idx0, Name);
+  }
+
+  Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
+                                    const Twine &Name = "") {
+    Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
+
+    if (auto *PC = dyn_cast<Constant>(Ptr))
+      return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
+
+    return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
   }
 
   Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,
                                     const Twine &Name = "") {
-    Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
-
-    if (auto *PC = dyn_cast<Constant>(Ptr))
-      return Insert(Folder.CreateInBoundsGetElementPtr(nullptr, PC, Idx), Name);
-
-    return Insert(GetElementPtrInst::CreateInBounds(nullptr, Ptr, Idx), Name);
+    return CreateConstInBoundsGEP1_64(nullptr, Ptr, Idx0, Name);
   }
 
-  Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
-                    const Twine &Name = "") {
+  Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
+                            const Twine &Name = "") {
     Value *Idxs[] = {
       ConstantInt::get(Type::getInt64Ty(Context), Idx0),
       ConstantInt::get(Type::getInt64Ty(Context), Idx1)
     };
 
     if (auto *PC = dyn_cast<Constant>(Ptr))
-      return Insert(Folder.CreateGetElementPtr(nullptr, PC, Idxs), Name);
+      return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
 
-    return Insert(GetElementPtrInst::Create(nullptr, Ptr, Idxs), Name);
+    return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
+  }
+
+  Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
+                            const Twine &Name = "") {
+    return CreateConstGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
+  }
+
+  Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
+                                    uint64_t Idx1, const Twine &Name = "") {
+    Value *Idxs[] = {
+      ConstantInt::get(Type::getInt64Ty(Context), Idx0),
+      ConstantInt::get(Type::getInt64Ty(Context), Idx1)
+    };
+
+    if (auto *PC = dyn_cast<Constant>(Ptr))
+      return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
+
+    return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
   }
 
   Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
                                     const Twine &Name = "") {
-    Value *Idxs[] = {
-      ConstantInt::get(Type::getInt64Ty(Context), Idx0),
-      ConstantInt::get(Type::getInt64Ty(Context), Idx1)
-    };
-
-    if (auto *PC = dyn_cast<Constant>(Ptr))
-      return Insert(Folder.CreateInBoundsGetElementPtr(nullptr, PC, Idxs),
-                    Name);
-
-    return Insert(GetElementPtrInst::CreateInBounds(nullptr, Ptr, Idxs), Name);
+    return CreateConstInBoundsGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
   }
 
   Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
@@ -1880,15 +1971,8 @@
     return Insert(PHINode::Create(Ty, NumReservedValues), Name);
   }
 
-  CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args = None,
-                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
-    auto *PTy = cast<PointerType>(Callee->getType());
-    auto *FTy = cast<FunctionType>(PTy->getElementType());
-    return CreateCall(FTy, Callee, Args, Name, FPMathTag);
-  }
-
   CallInst *CreateCall(FunctionType *FTy, Value *Callee,
-                       ArrayRef<Value *> Args, const Twine &Name = "",
+                       ArrayRef<Value *> Args = None, const Twine &Name = "",
                        MDNode *FPMathTag = nullptr) {
     CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
     if (isa<FPMathOperator>(CI))
@@ -1896,20 +1980,44 @@
     return Insert(CI, Name);
   }
 
-  CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args,
+  CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
                        ArrayRef<OperandBundleDef> OpBundles,
                        const Twine &Name = "", MDNode *FPMathTag = nullptr) {
-    CallInst *CI = CallInst::Create(Callee, Args, OpBundles);
+    CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
     if (isa<FPMathOperator>(CI))
       CI = cast<CallInst>(setFPAttrs(CI, FPMathTag, FMF));
     return Insert(CI, Name);
   }
 
-  CallInst *CreateCall(Function *Callee, ArrayRef<Value *> Args,
+  CallInst *CreateCall(Function *Callee, ArrayRef<Value *> Args = None,
                        const Twine &Name = "", MDNode *FPMathTag = nullptr) {
     return CreateCall(Callee->getFunctionType(), Callee, Args, Name, FPMathTag);
   }
 
+  CallInst *CreateCall(Function *Callee, ArrayRef<Value *> Args,
+                       ArrayRef<OperandBundleDef> OpBundles,
+                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+    return CreateCall(Callee->getFunctionType(), Callee, Args, OpBundles, Name,
+                      FPMathTag);
+  }
+
+  // Deprecated [opaque pointer types]
+  CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args = None,
+                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+    return CreateCall(
+        cast<FunctionType>(Callee->getType()->getPointerElementType()), Callee,
+        Args, Name, FPMathTag);
+  }
+
+  // Deprecated [opaque pointer types]
+  CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args,
+                       ArrayRef<OperandBundleDef> OpBundles,
+                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+    return CreateCall(
+        cast<FunctionType>(Callee->getType()->getPointerElementType()), Callee,
+        Args, OpBundles, Name, FPMathTag);
+  }
+
   Value *CreateSelect(Value *C, Value *True, Value *False,
                       const Twine &Name = "", Instruction *MDFrom = nullptr) {
     if (auto *CC = dyn_cast<Constant>(C))
@@ -2126,11 +2234,12 @@
 private:
   /// Helper function that creates an assume intrinsic call that
   /// represents an alignment assumption on the provided Ptr, Mask, Type
-  /// and Offset.
+  /// and Offset. It may be sometimes useful to do some other logic
+  /// based on this alignment check, thus it can be stored into 'TheCheck'.
   CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
                                             Value *PtrValue, Value *Mask,
-                                            Type *IntPtrTy,
-                                            Value *OffsetValue) {
+                                            Type *IntPtrTy, Value *OffsetValue,
+                                            Value **TheCheck) {
     Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
 
     if (OffsetValue) {
@@ -2149,6 +2258,9 @@
     Value *Zero = ConstantInt::get(IntPtrTy, 0);
     Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr");
     Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond");
+    if (TheCheck)
+      *TheCheck = InvCond;
+
     return CreateAssumption(InvCond);
   }
 
@@ -2159,9 +2271,13 @@
   /// An optional offset can be provided, and if it is provided, the offset
   /// must be subtracted from the provided pointer to get the pointer with the
   /// specified alignment.
+  ///
+  /// It may be sometimes useful to do some other logic
+  /// based on this alignment check, thus it can be stored into 'TheCheck'.
   CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
                                       unsigned Alignment,
-                                      Value *OffsetValue = nullptr) {
+                                      Value *OffsetValue = nullptr,
+                                      Value **TheCheck = nullptr) {
     assert(isa<PointerType>(PtrValue->getType()) &&
            "trying to create an alignment assumption on a non-pointer?");
     auto *PtrTy = cast<PointerType>(PtrValue->getType());
@@ -2169,7 +2285,7 @@
 
     Value *Mask = ConstantInt::get(IntPtrTy, Alignment > 0 ? Alignment - 1 : 0);
     return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
-                                           OffsetValue);
+                                           OffsetValue, TheCheck);
   }
 
   /// Create an assume intrinsic call that represents an alignment
@@ -2179,11 +2295,15 @@
   /// must be subtracted from the provided pointer to get the pointer with the
   /// specified alignment.
   ///
+  /// It may be sometimes useful to do some other logic
+  /// based on this alignment check, thus it can be stored into 'TheCheck'.
+  ///
   /// This overload handles the condition where the Alignment is dependent
   /// on an existing value rather than a static value.
   CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
                                       Value *Alignment,
-                                      Value *OffsetValue = nullptr) {
+                                      Value *OffsetValue = nullptr,
+                                      Value **TheCheck = nullptr) {
     assert(isa<PointerType>(PtrValue->getType()) &&
            "trying to create an alignment assumption on a non-pointer?");
     auto *PtrTy = cast<PointerType>(PtrValue->getType());
@@ -2201,7 +2321,7 @@
                                ConstantInt::get(IntPtrTy, 0), "mask");
 
     return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
-                                           OffsetValue);
+                                           OffsetValue, TheCheck);
   }
 };
 
diff --git a/include/llvm/IR/InstVisitor.h b/include/llvm/IR/InstVisitor.h
index 5e9b18d..c5b4c6f 100644
--- a/include/llvm/IR/InstVisitor.h
+++ b/include/llvm/IR/InstVisitor.h
@@ -268,17 +268,23 @@
   RetTy visitCmpInst(CmpInst &I)                  { DELEGATE(Instruction);}
   RetTy visitUnaryInstruction(UnaryInstruction &I){ DELEGATE(Instruction);}
 
-  // Provide a special visitor for a 'callsite' that visits both calls and
-  // invokes. When unimplemented, properly delegates to either the terminator or
-  // regular instruction visitor.
+  // The next level delegation for `CallBase` is slightly more complex in order
+  // to support visiting cases where the call is also a terminator.
+  RetTy visitCallBase(CallBase &I) {
+    if (isa<InvokeInst>(I))
+      return static_cast<SubClass *>(this)->visitTerminator(I);
+
+    DELEGATE(Instruction);
+  }
+
+  // Provide a legacy visitor for a 'callsite' that visits both calls and
+  // invokes.
+  //
+  // Prefer overriding the type system based `CallBase` instead.
   RetTy visitCallSite(CallSite CS) {
     assert(CS);
     Instruction &I = *CS.getInstruction();
-    if (CS.isCall())
-      DELEGATE(Instruction);
-
-    assert(CS.isInvoke());
-    return static_cast<SubClass *>(this)->visitTerminator(I);
+    DELEGATE(CallBase);
   }
 
   // If the user wants a 'default' case, they can choose to override this
diff --git a/include/llvm/IR/InstrTypes.h b/include/llvm/IR/InstrTypes.h
index 4611a61..3f384a6 100644
--- a/include/llvm/IR/InstrTypes.h
+++ b/include/llvm/IR/InstrTypes.h
@@ -46,6 +46,10 @@
 
 namespace llvm {
 
+namespace Intrinsic {
+enum ID : unsigned;
+}
+
 //===----------------------------------------------------------------------===//
 //                          UnaryInstruction Class
 //===----------------------------------------------------------------------===//
@@ -1040,6 +1044,9 @@
     return I->getOpcode() == Instruction::Call ||
            I->getOpcode() == Instruction::Invoke;
   }
+  static bool classof(const Value *V) {
+    return isa<Instruction>(V) && classof(cast<Instruction>(V));
+  }
 
   FunctionType *getFunctionType() const { return FTy; }
 
@@ -1048,6 +1055,47 @@
     this->FTy = FTy;
   }
 
+  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+  /// data_operands_begin/data_operands_end - Return iterators iterating over
+  /// the call / invoke argument list and bundle operands.  For invokes, this is
+  /// the set of instruction operands except the invoke target and the two
+  /// successor blocks; and for calls this is the set of instruction operands
+  /// except the call target.
+  User::op_iterator data_operands_begin() { return op_begin(); }
+  User::const_op_iterator data_operands_begin() const {
+    return const_cast<CallBase *>(this)->data_operands_begin();
+  }
+  User::op_iterator data_operands_end() {
+    // Walk from the end of the operands over the called operand and any
+    // subclass operands.
+    return op_end() - getNumSubclassExtraOperands() - 1;
+  }
+  User::const_op_iterator data_operands_end() const {
+    return const_cast<CallBase *>(this)->data_operands_end();
+  }
+  iterator_range<User::op_iterator> data_ops() {
+    return make_range(data_operands_begin(), data_operands_end());
+  }
+  iterator_range<User::const_op_iterator> data_ops() const {
+    return make_range(data_operands_begin(), data_operands_end());
+  }
+  bool data_operands_empty() const {
+    return data_operands_end() == data_operands_begin();
+  }
+  unsigned data_operands_size() const {
+    return std::distance(data_operands_begin(), data_operands_end());
+  }
+
+  bool isDataOperand(const Use *U) const {
+    assert(this == U->getUser() &&
+           "Only valid to query with a use of this instruction!");
+    return data_operands_begin() <= U && U < data_operands_end();
+  }
+  bool isDataOperand(Value::const_user_iterator UI) const {
+    return isDataOperand(&UI.getUse());
+  }
+
   /// Return the iterator pointing to the beginning of the argument list.
   User::op_iterator arg_begin() { return op_begin(); }
   User::const_op_iterator arg_begin() const {
@@ -1056,25 +1104,33 @@
 
   /// Return the iterator pointing to the end of the argument list.
   User::op_iterator arg_end() {
-    // Walk from the end of the operands over the called operand, the subclass
-    // operands, and any operands for bundles to find the end of the argument
+    // From the end of the data operands, walk backwards past the bundle
     // operands.
-    return op_end() - getNumTotalBundleOperands() -
-           getNumSubclassExtraOperands() - 1;
+    return data_operands_end() - getNumTotalBundleOperands();
   }
   User::const_op_iterator arg_end() const {
     return const_cast<CallBase *>(this)->arg_end();
   }
 
   /// Iteration adapter for range-for loops.
+  iterator_range<User::op_iterator> args() {
+    return make_range(arg_begin(), arg_end());
+  }
+  iterator_range<User::const_op_iterator> args() const {
+    return make_range(arg_begin(), arg_end());
+  }
+  bool arg_empty() const { return arg_end() == arg_begin(); }
+  unsigned arg_size() const { return arg_end() - arg_begin(); }
+
+  // Legacy API names that duplicate the above and will be removed once users
+  // are migrated.
   iterator_range<User::op_iterator> arg_operands() {
     return make_range(arg_begin(), arg_end());
   }
   iterator_range<User::const_op_iterator> arg_operands() const {
     return make_range(arg_begin(), arg_end());
   }
-
-  unsigned getNumArgOperands() const { return arg_end() - arg_begin(); }
+  unsigned getNumArgOperands() const { return arg_size(); }
 
   Value *getArgOperand(unsigned i) const {
     assert(i < getNumArgOperands() && "Out of bounds!");
@@ -1096,7 +1152,20 @@
     return User::getOperandUse(i);
   }
 
-  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+  bool isArgOperand(const Use *U) const {
+    assert(this == U->getUser() &&
+           "Only valid to query with a use of this instruction!");
+    return arg_begin() <= U && U < arg_end();
+  }
+  bool isArgOperand(Value::const_user_iterator UI) const {
+    return isArgOperand(&UI.getUse());
+  }
+
+  /// Returns true if this CallSite passes the given Value* as an argument to
+  /// the called function.
+  bool hasArgument(const Value *V) const {
+    return llvm::any_of(args(), [V](const Value *Arg) { return Arg == V; });
+  }
 
   Value *getCalledOperand() const { return Op<CalledOperandOpEndIdx>(); }
 
@@ -1113,6 +1182,28 @@
     return dyn_cast_or_null<Function>(getCalledOperand());
   }
 
+  /// Return true if the callsite is an indirect call.
+  bool isIndirectCall() const;
+
+  /// Determine whether the passed iterator points to the callee operand's Use.
+  bool isCallee(Value::const_user_iterator UI) const {
+    return isCallee(&UI.getUse());
+  }
+
+  /// Determine whether this Use is the callee operand's Use.
+  bool isCallee(const Use *U) const { return &getCalledOperandUse() == U; }
+
+  /// Helper to get the caller (the parent function).
+  Function *getCaller();
+  const Function *getCaller() const {
+    return const_cast<CallBase *>(this)->getCaller();
+  }
+
+  /// Returns the intrinsic ID of the intrinsic called or
+  /// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if
+  /// this is an indirect call.
+  Intrinsic::ID getIntrinsicID() const;
+
   void setCalledOperand(Value *V) { Op<CalledOperandOpEndIdx>() = V; }
 
   /// Sets the function called, including updating the function type.
@@ -1304,6 +1395,55 @@
     return bundleOperandHasAttr(i - 1, Kind);
   }
 
+  /// Determine whether this data operand is not captured.
+  // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
+  // better indicate that this may return a conservative answer.
+  bool doesNotCapture(unsigned OpNo) const {
+    return dataOperandHasImpliedAttr(OpNo + 1, Attribute::NoCapture);
+  }
+
+  /// Determine whether this argument is passed by value.
+  bool isByValArgument(unsigned ArgNo) const {
+    return paramHasAttr(ArgNo, Attribute::ByVal);
+  }
+
+  /// Determine whether this argument is passed in an alloca.
+  bool isInAllocaArgument(unsigned ArgNo) const {
+    return paramHasAttr(ArgNo, Attribute::InAlloca);
+  }
+
+  /// Determine whether this argument is passed by value or in an alloca.
+  bool isByValOrInAllocaArgument(unsigned ArgNo) const {
+    return paramHasAttr(ArgNo, Attribute::ByVal) ||
+           paramHasAttr(ArgNo, Attribute::InAlloca);
+  }
+
+  /// Determine if there are is an inalloca argument. Only the last argument can
+  /// have the inalloca attribute.
+  bool hasInAllocaArgument() const {
+    return !arg_empty() && paramHasAttr(arg_size() - 1, Attribute::InAlloca);
+  }
+
+  // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
+  // better indicate that this may return a conservative answer.
+  bool doesNotAccessMemory(unsigned OpNo) const {
+    return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
+  }
+
+  // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
+  // better indicate that this may return a conservative answer.
+  bool onlyReadsMemory(unsigned OpNo) const {
+    return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadOnly) ||
+           dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
+  }
+
+  // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
+  // better indicate that this may return a conservative answer.
+  bool doesNotReadMemory(unsigned OpNo) const {
+    return dataOperandHasImpliedAttr(OpNo + 1, Attribute::WriteOnly) ||
+           dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
+  }
+
   /// Extract the alignment of the return value.
   unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
 
@@ -1324,6 +1464,11 @@
     return Attrs.getDereferenceableOrNullBytes(i);
   }
 
+  /// Return true if the return value is known to be not null.
+  /// This may be because it has the nonnull attribute, or because at least
+  /// one byte is dereferenceable and the pointer is in addrspace(0).
+  bool isReturnNonNull() const;
+
   /// Determine if the return value is marked with NoAlias attribute.
   bool returnDoesNotAlias() const {
     return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
@@ -1477,6 +1622,16 @@
            Idx < getBundleOperandsEndIndex();
   }
 
+  /// Returns true if the use is a bundle operand.
+  bool isBundleOperand(const Use *U) const {
+    assert(this == U->getUser() &&
+           "Only valid to query with a use of this instruction!");
+    return hasOperandBundles() && isBundleOperand(U - op_begin());
+  }
+  bool isBundleOperand(Value::const_user_iterator UI) const {
+    return isBundleOperand(&UI.getUse());
+  }
+
   /// Return the total number operands (not operand bundles) used by
   /// every operand bundle in this OperandBundleUser.
   unsigned getNumTotalBundleOperands() const {
diff --git a/include/llvm/IR/Instruction.h b/include/llvm/IR/Instruction.h
index 7a81f70..5e78cb1e 100644
--- a/include/llvm/IR/Instruction.h
+++ b/include/llvm/IR/Instruction.h
@@ -582,6 +582,10 @@
     }
   }
 
+  /// Return true if the instruction is a llvm.lifetime.start or
+  /// llvm.lifetime.end marker.
+  bool isLifetimeStartOrEnd() const;
+
   /// Return a pointer to the next non-debug instruction in the same basic
   /// block as 'this', or nullptr if no such instruction exists.
   const Instruction *getNextNonDebugInstruction() const;
diff --git a/include/llvm/IR/Instructions.h b/include/llvm/IR/Instructions.h
index 7700e7c..0ff8f56 100644
--- a/include/llvm/IR/Instructions.h
+++ b/include/llvm/IR/Instructions.h
@@ -175,47 +175,58 @@
   LoadInst *cloneImpl() const;
 
 public:
-  LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore);
-  LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
-  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile = false,
+  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr = "",
            Instruction *InsertBefore = nullptr);
-  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
-           Instruction *InsertBefore = nullptr)
-      : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
-                 NameStr, isVolatile, InsertBefore) {}
-  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
+  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
+           Instruction *InsertBefore = nullptr);
+  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
            BasicBlock *InsertAtEnd);
-  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
-           Instruction *InsertBefore = nullptr)
-      : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
-                 NameStr, isVolatile, Align, InsertBefore) {}
   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
            unsigned Align, Instruction *InsertBefore = nullptr);
-  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
            unsigned Align, BasicBlock *InsertAtEnd);
-  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
-           AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
-           Instruction *InsertBefore = nullptr)
-      : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
-                 NameStr, isVolatile, Align, Order, SSID, InsertBefore) {}
   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
            unsigned Align, AtomicOrdering Order,
            SyncScope::ID SSID = SyncScope::System,
            Instruction *InsertBefore = nullptr);
-  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
            unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
            BasicBlock *InsertAtEnd);
-  LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
-  LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
-  LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr,
-           bool isVolatile = false, Instruction *InsertBefore = nullptr);
-  explicit LoadInst(Value *Ptr, const char *NameStr = nullptr,
-                    bool isVolatile = false,
+
+  // Deprecated [opaque pointer types]
+  explicit LoadInst(Value *Ptr, const Twine &NameStr = "",
                     Instruction *InsertBefore = nullptr)
-      : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
-                 NameStr, isVolatile, InsertBefore) {}
-  LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
-           BasicBlock *InsertAtEnd);
+      : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+                 InsertBefore) {}
+  LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd)
+      : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+                 InsertAtEnd) {}
+  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+           Instruction *InsertBefore = nullptr)
+      : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+                 isVolatile, InsertBefore) {}
+  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+           BasicBlock *InsertAtEnd)
+      : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+                 isVolatile, InsertAtEnd) {}
+  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+           Instruction *InsertBefore = nullptr)
+      : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+                 isVolatile, Align, InsertBefore) {}
+  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+           BasicBlock *InsertAtEnd)
+      : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+                 isVolatile, Align, InsertAtEnd) {}
+  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+           AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
+           Instruction *InsertBefore = nullptr)
+      : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+                 isVolatile, Align, Order, SSID, InsertBefore) {}
+  LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+           AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd)
+      : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+                 isVolatile, Align, Order, SSID, InsertAtEnd) {}
 
   /// Return true if this is a load from a volatile memory location.
   bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
@@ -1433,36 +1444,25 @@
                   ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
                   Instruction *InsertBefore);
 
-  inline CallInst(Value *Func, ArrayRef<Value *> Args,
-                  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
-                  Instruction *InsertBefore)
-      : CallInst(cast<FunctionType>(
-                     cast<PointerType>(Func->getType())->getElementType()),
-                 Func, Args, Bundles, NameStr, InsertBefore) {}
-
-  inline CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr,
-                  Instruction *InsertBefore)
-      : CallInst(Func, Args, None, NameStr, InsertBefore) {}
+  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
+                  const Twine &NameStr, Instruction *InsertBefore)
+      : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
 
   /// Construct a CallInst given a range of arguments.
   /// Construct a CallInst from a range of arguments
-  inline CallInst(Value *Func, ArrayRef<Value *> Args,
+  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
                   ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
                   BasicBlock *InsertAtEnd);
 
-  explicit CallInst(Value *F, const Twine &NameStr, Instruction *InsertBefore);
+  explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
+                    Instruction *InsertBefore);
 
-  CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd);
+  CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
+           BasicBlock *InsertAtEnd);
 
-  void init(Value *Func, ArrayRef<Value *> Args,
-            ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
-    init(cast<FunctionType>(
-             cast<PointerType>(Func->getType())->getElementType()),
-         Func, Args, Bundles, NameStr);
-  }
   void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
             ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
-  void init(Value *Func, const Twine &NameStr);
+  void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
 
   /// Compute the number of operands to allocate.
   static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
@@ -1478,21 +1478,9 @@
   CallInst *cloneImpl() const;
 
 public:
-  static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
-                          ArrayRef<OperandBundleDef> Bundles = None,
-                          const Twine &NameStr = "",
+  static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
                           Instruction *InsertBefore = nullptr) {
-    return Create(cast<FunctionType>(
-                      cast<PointerType>(Func->getType())->getElementType()),
-                  Func, Args, Bundles, NameStr, InsertBefore);
-  }
-
-  static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
-                          const Twine &NameStr,
-                          Instruction *InsertBefore = nullptr) {
-    return Create(cast<FunctionType>(
-                      cast<PointerType>(Func->getType())->getElementType()),
-                  Func, Args, None, NameStr, InsertBefore);
+    return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
   }
 
   static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
@@ -1514,7 +1502,18 @@
         CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
   }
 
-  static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+  static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
+                          BasicBlock *InsertAtEnd) {
+    return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
+  }
+
+  static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
+                          const Twine &NameStr, BasicBlock *InsertAtEnd) {
+    return new (ComputeNumOperands(Args.size()))
+        CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
+  }
+
+  static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
                           ArrayRef<OperandBundleDef> Bundles,
                           const Twine &NameStr, BasicBlock *InsertAtEnd) {
     const int NumOperands =
@@ -1522,23 +1521,80 @@
     const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
 
     return new (NumOperands, DescriptorBytes)
-        CallInst(Func, Args, Bundles, NameStr, InsertAtEnd);
+        CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
   }
 
+  static CallInst *Create(Function *Func, const Twine &NameStr = "",
+                          Instruction *InsertBefore = nullptr) {
+    return Create(Func->getFunctionType(), Func, NameStr, InsertBefore);
+  }
+
+  static CallInst *Create(Function *Func, ArrayRef<Value *> Args,
+                          const Twine &NameStr = "",
+                          Instruction *InsertBefore = nullptr) {
+    return Create(Func->getFunctionType(), Func, Args, NameStr, InsertBefore);
+  }
+
+  static CallInst *Create(Function *Func, const Twine &NameStr,
+                          BasicBlock *InsertAtEnd) {
+    return Create(Func->getFunctionType(), Func, NameStr, InsertAtEnd);
+  }
+
+  static CallInst *Create(Function *Func, ArrayRef<Value *> Args,
+                          const Twine &NameStr, BasicBlock *InsertAtEnd) {
+    return Create(Func->getFunctionType(), Func, Args, NameStr, InsertAtEnd);
+  }
+
+  // Deprecated [opaque pointer types]
+  static CallInst *Create(Value *Func, const Twine &NameStr = "",
+                          Instruction *InsertBefore = nullptr) {
+    return Create(cast<FunctionType>(
+                      cast<PointerType>(Func->getType())->getElementType()),
+                  Func, NameStr, InsertBefore);
+  }
+
+  // Deprecated [opaque pointer types]
+  static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+                          const Twine &NameStr,
+                          Instruction *InsertBefore = nullptr) {
+    return Create(cast<FunctionType>(
+                      cast<PointerType>(Func->getType())->getElementType()),
+                  Func, Args, NameStr, InsertBefore);
+  }
+
+  // Deprecated [opaque pointer types]
+  static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+                          ArrayRef<OperandBundleDef> Bundles = None,
+                          const Twine &NameStr = "",
+                          Instruction *InsertBefore = nullptr) {
+    return Create(cast<FunctionType>(
+                      cast<PointerType>(Func->getType())->getElementType()),
+                  Func, Args, Bundles, NameStr, InsertBefore);
+  }
+
+  // Deprecated [opaque pointer types]
+  static CallInst *Create(Value *Func, const Twine &NameStr,
+                          BasicBlock *InsertAtEnd) {
+    return Create(cast<FunctionType>(
+                      cast<PointerType>(Func->getType())->getElementType()),
+                  Func, NameStr, InsertAtEnd);
+  }
+
+  // Deprecated [opaque pointer types]
   static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
                           const Twine &NameStr, BasicBlock *InsertAtEnd) {
-    return new (ComputeNumOperands(Args.size()))
-        CallInst(Func, Args, None, NameStr, InsertAtEnd);
+    return Create(cast<FunctionType>(
+                      cast<PointerType>(Func->getType())->getElementType()),
+                  Func, Args, NameStr, InsertAtEnd);
   }
 
-  static CallInst *Create(Value *F, const Twine &NameStr = "",
-                          Instruction *InsertBefore = nullptr) {
-    return new (ComputeNumOperands(0)) CallInst(F, NameStr, InsertBefore);
-  }
-
-  static CallInst *Create(Value *F, const Twine &NameStr,
-                          BasicBlock *InsertAtEnd) {
-    return new (ComputeNumOperands(0)) CallInst(F, NameStr, InsertAtEnd);
+  // Deprecated [opaque pointer types]
+  static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+                          ArrayRef<OperandBundleDef> Bundles,
+                          const Twine &NameStr, BasicBlock *InsertAtEnd) {
+    return Create(cast<FunctionType>(
+                      cast<PointerType>(Func->getType())->getElementType()),
+                  Func, Args, Bundles, NameStr, InsertAtEnd);
   }
 
   /// Create a clone of \p CI with a different set of operand bundles and
@@ -1647,18 +1703,15 @@
   }
 };
 
-CallInst::CallInst(Value *Func, ArrayRef<Value *> Args,
+CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
                    ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
                    BasicBlock *InsertAtEnd)
-    : CallBase(cast<FunctionType>(
-                   cast<PointerType>(Func->getType())->getElementType())
-                   ->getReturnType(),
-               Instruction::Call,
+    : CallBase(Ty->getReturnType(), Instruction::Call,
                OperandTraits<CallBase>::op_end(this) -
                    (Args.size() + CountBundleInputs(Bundles) + 1),
                unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
                InsertAtEnd) {
-  init(Func, Args, Bundles, NameStr);
+  init(Ty, Func, Args, Bundles, NameStr);
 }
 
 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
@@ -3561,36 +3614,17 @@
   /// Construct an InvokeInst given a range of arguments.
   ///
   /// Construct an InvokeInst from a range of arguments
-  inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
-                    ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
-                    int NumOperands, const Twine &NameStr,
-                    Instruction *InsertBefore)
-      : InvokeInst(cast<FunctionType>(
-                       cast<PointerType>(Func->getType())->getElementType()),
-                   Func, IfNormal, IfException, Args, Bundles, NumOperands,
-                   NameStr, InsertBefore) {}
-
   inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
                     BasicBlock *IfException, ArrayRef<Value *> Args,
                     ArrayRef<OperandBundleDef> Bundles, int NumOperands,
                     const Twine &NameStr, Instruction *InsertBefore);
-  /// Construct an InvokeInst given a range of arguments.
-  ///
-  /// Construct an InvokeInst from a range of arguments
-  inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
-                    ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
-                    int NumOperands, const Twine &NameStr,
-                    BasicBlock *InsertAtEnd);
 
-  void init(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
-            ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
-            const Twine &NameStr) {
-    init(cast<FunctionType>(
-             cast<PointerType>(Func->getType())->getElementType()),
-         Func, IfNormal, IfException, Args, Bundles, NameStr);
-  }
+  inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
+                    BasicBlock *IfException, ArrayRef<Value *> Args,
+                    ArrayRef<OperandBundleDef> Bundles, int NumOperands,
+                    const Twine &NameStr, BasicBlock *InsertAtEnd);
 
-  void init(FunctionType *FTy, Value *Func, BasicBlock *IfNormal,
+  void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
             BasicBlock *IfException, ArrayRef<Value *> Args,
             ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
 
@@ -3608,27 +3642,6 @@
   InvokeInst *cloneImpl() const;
 
 public:
-  static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
-                            BasicBlock *IfException, ArrayRef<Value *> Args,
-                            const Twine &NameStr,
-                            Instruction *InsertBefore = nullptr) {
-    return Create(cast<FunctionType>(
-                      cast<PointerType>(Func->getType())->getElementType()),
-                  Func, IfNormal, IfException, Args, None, NameStr,
-                  InsertBefore);
-  }
-
-  static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
-                            BasicBlock *IfException, ArrayRef<Value *> Args,
-                            ArrayRef<OperandBundleDef> Bundles = None,
-                            const Twine &NameStr = "",
-                            Instruction *InsertBefore = nullptr) {
-    return Create(cast<FunctionType>(
-                      cast<PointerType>(Func->getType())->getElementType()),
-                  Func, IfNormal, IfException, Args, Bundles, NameStr,
-                  InsertBefore);
-  }
-
   static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
                             BasicBlock *IfException, ArrayRef<Value *> Args,
                             const Twine &NameStr,
@@ -3653,16 +3666,16 @@
                    NameStr, InsertBefore);
   }
 
-  static InvokeInst *Create(Value *Func,
-                            BasicBlock *IfNormal, BasicBlock *IfException,
-                            ArrayRef<Value *> Args, const Twine &NameStr,
-                            BasicBlock *InsertAtEnd) {
+  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
+                            BasicBlock *IfException, ArrayRef<Value *> Args,
+                            const Twine &NameStr, BasicBlock *InsertAtEnd) {
     int NumOperands = ComputeNumOperands(Args.size());
-    return new (NumOperands) InvokeInst(Func, IfNormal, IfException, Args, None,
-                                        NumOperands, NameStr, InsertAtEnd);
+    return new (NumOperands)
+        InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
+                   NameStr, InsertAtEnd);
   }
 
-  static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
                             BasicBlock *IfException, ArrayRef<Value *> Args,
                             ArrayRef<OperandBundleDef> Bundles,
                             const Twine &NameStr, BasicBlock *InsertAtEnd) {
@@ -3671,10 +3684,85 @@
     unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
 
     return new (NumOperands, DescriptorBytes)
-        InvokeInst(Func, IfNormal, IfException, Args, Bundles, NumOperands,
+        InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
                    NameStr, InsertAtEnd);
   }
 
+  static InvokeInst *Create(Function *Func, BasicBlock *IfNormal,
+                            BasicBlock *IfException, ArrayRef<Value *> Args,
+                            const Twine &NameStr,
+                            Instruction *InsertBefore = nullptr) {
+    return Create(Func->getFunctionType(), Func, IfNormal, IfException, Args,
+                  None, NameStr, InsertBefore);
+  }
+
+  static InvokeInst *Create(Function *Func, BasicBlock *IfNormal,
+                            BasicBlock *IfException, ArrayRef<Value *> Args,
+                            ArrayRef<OperandBundleDef> Bundles = None,
+                            const Twine &NameStr = "",
+                            Instruction *InsertBefore = nullptr) {
+    return Create(Func->getFunctionType(), Func, IfNormal, IfException, Args,
+                  Bundles, NameStr, InsertBefore);
+  }
+
+  static InvokeInst *Create(Function *Func, BasicBlock *IfNormal,
+                            BasicBlock *IfException, ArrayRef<Value *> Args,
+                            const Twine &NameStr, BasicBlock *InsertAtEnd) {
+    return Create(Func->getFunctionType(), Func, IfNormal, IfException, Args,
+                  NameStr, InsertAtEnd);
+  }
+
+  static InvokeInst *Create(Function *Func, BasicBlock *IfNormal,
+                            BasicBlock *IfException, ArrayRef<Value *> Args,
+                            ArrayRef<OperandBundleDef> Bundles,
+                            const Twine &NameStr, BasicBlock *InsertAtEnd) {
+    return Create(Func->getFunctionType(), Func, IfNormal, IfException, Args,
+                  Bundles, NameStr, InsertAtEnd);
+  }
+
+  // Deprecated [opaque pointer types]
+  static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+                            BasicBlock *IfException, ArrayRef<Value *> Args,
+                            const Twine &NameStr,
+                            Instruction *InsertBefore = nullptr) {
+    return Create(cast<FunctionType>(
+                      cast<PointerType>(Func->getType())->getElementType()),
+                  Func, IfNormal, IfException, Args, None, NameStr,
+                  InsertBefore);
+  }
+
+  // Deprecated [opaque pointer types]
+  static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+                            BasicBlock *IfException, ArrayRef<Value *> Args,
+                            ArrayRef<OperandBundleDef> Bundles = None,
+                            const Twine &NameStr = "",
+                            Instruction *InsertBefore = nullptr) {
+    return Create(cast<FunctionType>(
+                      cast<PointerType>(Func->getType())->getElementType()),
+                  Func, IfNormal, IfException, Args, Bundles, NameStr,
+                  InsertBefore);
+  }
+
+  // Deprecated [opaque pointer types]
+  static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+                            BasicBlock *IfException, ArrayRef<Value *> Args,
+                            const Twine &NameStr, BasicBlock *InsertAtEnd) {
+    return Create(cast<FunctionType>(
+                      cast<PointerType>(Func->getType())->getElementType()),
+                  Func, IfNormal, IfException, Args, NameStr, InsertAtEnd);
+  }
+
+  // Deprecated [opaque pointer types]
+  static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+                            BasicBlock *IfException, ArrayRef<Value *> Args,
+                            ArrayRef<OperandBundleDef> Bundles,
+                            const Twine &NameStr, BasicBlock *InsertAtEnd) {
+    return Create(cast<FunctionType>(
+                      cast<PointerType>(Func->getType())->getElementType()),
+                  Func, IfNormal, IfException, Args, Bundles, NameStr,
+                  InsertAtEnd);
+  }
+
   /// Create a clone of \p II with a different set of operand bundles and
   /// insert it before \p InsertPt.
   ///
@@ -3753,17 +3841,14 @@
   init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
 }
 
-InvokeInst::InvokeInst(Value *Func, BasicBlock *IfNormal,
+InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
                        BasicBlock *IfException, ArrayRef<Value *> Args,
                        ArrayRef<OperandBundleDef> Bundles, int NumOperands,
                        const Twine &NameStr, BasicBlock *InsertAtEnd)
-    : CallBase(cast<FunctionType>(
-                   cast<PointerType>(Func->getType())->getElementType())
-                   ->getReturnType(),
-               Instruction::Invoke,
+    : CallBase(Ty->getReturnType(), Instruction::Invoke,
                OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
                InsertAtEnd) {
-  init(Func, IfNormal, IfException, Args, Bundles, NameStr);
+  init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td
index f503d3e..64603d8 100644
--- a/include/llvm/IR/Intrinsics.td
+++ b/include/llvm/IR/Intrinsics.td
@@ -414,6 +414,13 @@
 def int_localrecover : Intrinsic<[llvm_ptr_ty],
                                  [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
                                  [IntrNoMem]>;
+
+// Given the frame pointer passed into an SEH filter function, returns a
+// pointer to the local variable area suitable for use with llvm.localrecover.
+def int_eh_recoverfp : Intrinsic<[llvm_ptr_ty],
+                                 [llvm_ptr_ty, llvm_ptr_ty],
+                                 [IntrNoMem]>;
+
 // Note: we treat stacksave/stackrestore as writemem because we don't otherwise
 // model their dependencies on allocas.
 def int_stacksave     : Intrinsic<[llvm_ptr_ty]>,
diff --git a/include/llvm/IR/IntrinsicsAMDGPU.td b/include/llvm/IR/IntrinsicsAMDGPU.td
index 3ea364c..7913ce8 100644
--- a/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -590,7 +590,7 @@
                              AMDGPUDimProps dim,
                              AMDGPUSampleVariant sample> : AMDGPUDimProfile<opmod, dim> {
   let IsSample = 1;
-  let RetTypes = [llvm_anyfloat_ty];
+  let RetTypes = [llvm_any_ty];
   let ExtraAddrArgs = sample.ExtraAddrArgs;
   let Gradients = sample.Gradients;
   let LodClampMip = sample.LodOrClamp;
@@ -683,11 +683,11 @@
   }
 
   defm int_amdgcn_image_load
-    : AMDGPUImageDimIntrinsicsAll<"LOAD", [llvm_anyfloat_ty], [], [IntrReadMem],
+    : AMDGPUImageDimIntrinsicsAll<"LOAD", [llvm_any_ty], [], [IntrReadMem],
                                   [SDNPMemOperand]>,
       AMDGPUImageDMaskIntrinsic;
   defm int_amdgcn_image_load_mip
-    : AMDGPUImageDimIntrinsicsNoMsaa<"LOAD_MIP", [llvm_anyfloat_ty], [],
+    : AMDGPUImageDimIntrinsicsNoMsaa<"LOAD_MIP", [llvm_any_ty], [],
                                      [IntrReadMem], [SDNPMemOperand], 1>,
       AMDGPUImageDMaskIntrinsic;
 
diff --git a/include/llvm/IR/IntrinsicsWebAssembly.td b/include/llvm/IR/IntrinsicsWebAssembly.td
index ff5964c..b015650 100644
--- a/include/llvm/IR/IntrinsicsWebAssembly.td
+++ b/include/llvm/IR/IntrinsicsWebAssembly.td
@@ -24,18 +24,6 @@
                                      [llvm_i32_ty, LLVMMatchType<0>],
                                      []>;
 
-// These are the old names.
-def int_wasm_mem_size : Intrinsic<[llvm_anyint_ty],
-                                  [llvm_i32_ty],
-                                  [IntrReadMem]>;
-def int_wasm_mem_grow : Intrinsic<[llvm_anyint_ty],
-                                  [llvm_i32_ty, LLVMMatchType<0>],
-                                  []>;
-
-// These are the old old names. They also lack the immediate field.
-def int_wasm_current_memory : Intrinsic<[llvm_anyint_ty], [], [IntrReadMem]>;
-def int_wasm_grow_memory : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], []>;
-
 //===----------------------------------------------------------------------===//
 // Saturating float-to-int conversions
 //===----------------------------------------------------------------------===//
diff --git a/include/llvm/IR/IntrinsicsX86.td b/include/llvm/IR/IntrinsicsX86.td
index a59dbe7..8d8cc8e 100644
--- a/include/llvm/IR/IntrinsicsX86.td
+++ b/include/llvm/IR/IntrinsicsX86.td
@@ -27,12 +27,6 @@
 
   // Marks the EH guard slot node created in LLVM IR prior to code generation.
   def int_x86_seh_ehguard : Intrinsic<[], [llvm_ptr_ty], []>;
-
-  // Given a pointer to the end of an EH registration object, returns the true
-  // parent frame address that can be used with llvm.localrecover.
-  def int_x86_seh_recoverfp : Intrinsic<[llvm_ptr_ty],
-                                        [llvm_ptr_ty, llvm_ptr_ty],
-                                        [IntrNoMem]>;
 }
 
 //===----------------------------------------------------------------------===//
@@ -364,18 +358,6 @@
 
 // Integer arithmetic ops.
 let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
-  def int_x86_sse2_padds_b : GCCBuiltin<"__builtin_ia32_paddsb128">,
-              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
-                         llvm_v16i8_ty], [IntrNoMem, Commutative]>;
-  def int_x86_sse2_padds_w : GCCBuiltin<"__builtin_ia32_paddsw128">,
-              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
-                         llvm_v8i16_ty], [IntrNoMem, Commutative]>;
-  def int_x86_sse2_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb128">,
-              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
-                         llvm_v16i8_ty], [IntrNoMem]>;
-  def int_x86_sse2_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw128">,
-              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
-                         llvm_v8i16_ty], [IntrNoMem]>;
   def int_x86_sse2_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw128">,
               Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                          llvm_v8i16_ty], [IntrNoMem, Commutative]>;
@@ -1324,21 +1306,12 @@
 
 // BITALG bits shuffle
 let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
-  def int_x86_avx512_mask_vpshufbitqmb_128 :
-    GCCBuiltin<"__builtin_ia32_vpshufbitqmb128_mask">,
-    Intrinsic<[llvm_i16_ty],
-              [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
-              [IntrNoMem]>;
-  def int_x86_avx512_mask_vpshufbitqmb_256 :
-    GCCBuiltin<"__builtin_ia32_vpshufbitqmb256_mask">,
-    Intrinsic<[llvm_i32_ty],
-              [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
-              [IntrNoMem]>;
-  def int_x86_avx512_mask_vpshufbitqmb_512 :
-    GCCBuiltin<"__builtin_ia32_vpshufbitqmb512_mask">,
-    Intrinsic<[llvm_i64_ty],
-              [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
-              [IntrNoMem]>;
+  def int_x86_avx512_vpshufbitqmb_128 :
+    Intrinsic<[llvm_v16i1_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_vpshufbitqmb_256 :
+    Intrinsic<[llvm_v32i1_ty], [llvm_v32i8_ty, llvm_v32i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_vpshufbitqmb_512 :
+    Intrinsic<[llvm_v64i1_ty], [llvm_v64i8_ty, llvm_v64i8_ty], [IntrNoMem]>;
 }
 
 //===----------------------------------------------------------------------===//
@@ -1346,18 +1319,6 @@
 
 // Integer arithmetic ops.
 let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
-  def int_x86_avx2_padds_b : GCCBuiltin<"__builtin_ia32_paddsb256">,
-              Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
-                         llvm_v32i8_ty], [IntrNoMem, Commutative]>;
-  def int_x86_avx2_padds_w : GCCBuiltin<"__builtin_ia32_paddsw256">,
-              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
-                         llvm_v16i16_ty], [IntrNoMem, Commutative]>;
-  def int_x86_avx2_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb256">,
-              Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
-                         llvm_v32i8_ty], [IntrNoMem]>;
-  def int_x86_avx2_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw256">,
-              Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
-                         llvm_v16i16_ty], [IntrNoMem]>;
   def int_x86_avx2_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw256">,
               Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                          llvm_v16i16_ty], [IntrNoMem, Commutative]>;
@@ -1494,18 +1455,15 @@
               Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
                          llvm_i32_ty], [IntrNoMem]>;
 
-  def int_x86_avx512_mask_pmultishift_qb_128:
-        GCCBuiltin<"__builtin_ia32_vpmultishiftqb128_mask">,
-        Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
-                   llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
-  def int_x86_avx512_mask_pmultishift_qb_256:
-        GCCBuiltin<"__builtin_ia32_vpmultishiftqb256_mask">,
-        Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
-                   llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_mask_pmultishift_qb_512:
-        GCCBuiltin<"__builtin_ia32_vpmultishiftqb512_mask">,
-        Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty,
-                   llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+  def int_x86_avx512_pmultishift_qb_128:
+        GCCBuiltin<"__builtin_ia32_vpmultishiftqb128">,
+        Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_pmultishift_qb_256:
+        GCCBuiltin<"__builtin_ia32_vpmultishiftqb256">,
+        Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_pmultishift_qb_512:
+        GCCBuiltin<"__builtin_ia32_vpmultishiftqb512">,
+        Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty], [IntrNoMem]>;
 }
 
 // Pack ops.
@@ -1715,83 +1673,6 @@
   def int_x86_avx512_psrav_w_512 : GCCBuiltin<"__builtin_ia32_psrav32hi">,
               Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
                         [IntrNoMem]>;
-
-  def int_x86_avx512_prorv_d_128 : GCCBuiltin<"__builtin_ia32_prorvd128">,
-              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
-                         llvm_v4i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_prorv_d_256 : GCCBuiltin<"__builtin_ia32_prorvd256">,
-              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
-                         llvm_v8i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_prorv_d_512 : GCCBuiltin<"__builtin_ia32_prorvd512">,
-              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
-                         llvm_v16i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_prorv_q_128 : GCCBuiltin<"__builtin_ia32_prorvq128">,
-              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
-                         llvm_v2i64_ty], [IntrNoMem]>;
-  def int_x86_avx512_prorv_q_256 : GCCBuiltin<"__builtin_ia32_prorvq256">,
-              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
-                         llvm_v4i64_ty], [IntrNoMem]>;
-  def int_x86_avx512_prorv_q_512 : GCCBuiltin<"__builtin_ia32_prorvq512">,
-              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
-                         llvm_v8i64_ty], [IntrNoMem]>;
-
-   def int_x86_avx512_prol_d_128 : GCCBuiltin<"__builtin_ia32_prold128">,
-              Intrinsic<[llvm_v4i32_ty] , [llvm_v4i32_ty,
-                         llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_prol_d_256 : GCCBuiltin<"__builtin_ia32_prold256">,
-              Intrinsic<[llvm_v8i32_ty] , [llvm_v8i32_ty,
-                         llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_prol_d_512 : GCCBuiltin<"__builtin_ia32_prold512">,
-              Intrinsic<[llvm_v16i32_ty] , [llvm_v16i32_ty,
-                         llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_prol_q_128 : GCCBuiltin<"__builtin_ia32_prolq128">,
-              Intrinsic<[llvm_v2i64_ty] , [llvm_v2i64_ty,
-                         llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_prol_q_256 : GCCBuiltin<"__builtin_ia32_prolq256">,
-              Intrinsic<[llvm_v4i64_ty] , [llvm_v4i64_ty,
-                         llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_prol_q_512 : GCCBuiltin<"__builtin_ia32_prolq512">,
-              Intrinsic<[llvm_v8i64_ty] , [llvm_v8i64_ty,
-                         llvm_i32_ty], [IntrNoMem]>;
-
-
-  def int_x86_avx512_prolv_d_128 : GCCBuiltin<"__builtin_ia32_prolvd128">,
-              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
-                         llvm_v4i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_prolv_d_256 : GCCBuiltin<"__builtin_ia32_prolvd256">,
-              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
-                         llvm_v8i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_prolv_d_512 : GCCBuiltin<"__builtin_ia32_prolvd512">,
-              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
-                         llvm_v16i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_prolv_q_128 : GCCBuiltin<"__builtin_ia32_prolvq128">,
-              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
-                         llvm_v2i64_ty], [IntrNoMem]>;
-  def int_x86_avx512_prolv_q_256 : GCCBuiltin<"__builtin_ia32_prolvq256">,
-              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
-                         llvm_v4i64_ty], [IntrNoMem]>;
-  def int_x86_avx512_prolv_q_512 : GCCBuiltin<"__builtin_ia32_prolvq512">,
-              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
-                         llvm_v8i64_ty], [IntrNoMem]>;
-  def int_x86_avx512_pror_d_128 : GCCBuiltin<"__builtin_ia32_prord128">,
-              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
-                         llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_pror_d_256 : GCCBuiltin<"__builtin_ia32_prord256">,
-              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
-                         llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_pror_d_512 : GCCBuiltin<"__builtin_ia32_prord512">,
-              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
-                         llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_pror_q_128 : GCCBuiltin<"__builtin_ia32_prorq128">,
-              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
-                         llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_pror_q_256 : GCCBuiltin<"__builtin_ia32_prorq256">,
-              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
-                         llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_pror_q_512 : GCCBuiltin<"__builtin_ia32_prorq512">,
-              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
-                         llvm_i32_ty], [IntrNoMem]>;
-
 }
 
 // Gather ops
@@ -2163,32 +2044,6 @@
               Intrinsic<[llvm_v16i8_ty],
                         [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
                         [IntrNoMem]>;
-
-  def int_x86_xop_vprotb : GCCBuiltin<"__builtin_ia32_vprotb">,
-              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
-                        [IntrNoMem]>;
-  def int_x86_xop_vprotd : GCCBuiltin<"__builtin_ia32_vprotd">,
-              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
-                        [IntrNoMem]>;
-  def int_x86_xop_vprotq : GCCBuiltin<"__builtin_ia32_vprotq">,
-              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
-                        [IntrNoMem]>;
-  def int_x86_xop_vprotw : GCCBuiltin<"__builtin_ia32_vprotw">,
-              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
-                        [IntrNoMem]>;
-  def int_x86_xop_vprotbi : GCCBuiltin<"__builtin_ia32_vprotbi">,
-              Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i8_ty],
-                        [IntrNoMem]>;
-  def int_x86_xop_vprotdi : GCCBuiltin<"__builtin_ia32_vprotdi">,
-              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i8_ty],
-                        [IntrNoMem]>;
-  def int_x86_xop_vprotqi : GCCBuiltin<"__builtin_ia32_vprotqi">,
-              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i8_ty],
-                        [IntrNoMem]>;
-  def int_x86_xop_vprotwi : GCCBuiltin<"__builtin_ia32_vprotwi">,
-              Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i8_ty],
-                        [IntrNoMem]>;
-
   def int_x86_xop_vpshab :
               GCCBuiltin<"__builtin_ia32_vpshab">,
               Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
@@ -3677,18 +3532,6 @@
 }
 // Integer arithmetic ops
 let TargetPrefix = "x86" in {
-  def int_x86_avx512_padds_b_512 : GCCBuiltin<"__builtin_ia32_paddsb512">,
-          Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
-                    [IntrNoMem]>;
-  def int_x86_avx512_padds_w_512 : GCCBuiltin<"__builtin_ia32_paddsw512">,
-          Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
-                     [IntrNoMem]>;
-  def int_x86_avx512_psubs_b_512 : GCCBuiltin<"__builtin_ia32_psubsb512">,
-          Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
-                     [IntrNoMem]>;
-  def int_x86_avx512_psubs_w_512 : GCCBuiltin<"__builtin_ia32_psubsw512">,
-          Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
-                     [IntrNoMem]>;
   def int_x86_avx512_pmulhu_w_512 : GCCBuiltin<"__builtin_ia32_pmulhuw512">,
               Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
                          llvm_v32i16_ty], [IntrNoMem, Commutative]>;
@@ -3720,6 +3563,7 @@
 
 // Gather and Scatter ops
 let TargetPrefix = "x86" in {
+  // NOTE: These are deprecated in favor of the versions that take a vXi1 mask.
   def int_x86_avx512_gather_dpd_512  : GCCBuiltin<"__builtin_ia32_gathersiv8df">,
           Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
                      llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
@@ -3852,6 +3696,7 @@
           [IntrReadMem, IntrArgMemOnly]>;
 
 // scatter
+  // NOTE: These are deprecated in favor of the versions that take a vXi1 mask.
   def int_x86_avx512_scatter_dpd_512  : GCCBuiltin<"__builtin_ia32_scattersiv8df">,
           Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
                         llvm_v8i32_ty, llvm_v8f64_ty, llvm_i32_ty],
@@ -4012,6 +3857,239 @@
                      llvm_i32_ty, llvm_i32_ty], [IntrArgMemOnly]>;
 }
 
+// AVX512 gather/scatter intrinsics that use vXi1 masks.
+let TargetPrefix = "x86" in {
+  def int_x86_avx512_mask_gather_dpd_512  :
+          Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
+                     llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_gather_dps_512  :
+          Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_ptr_ty,
+                     llvm_v16i32_ty, llvm_v16i1_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_gather_qpd_512  :
+          Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
+                     llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_gather_qps_512  :
+          Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_ptr_ty,
+                     llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+
+
+  def int_x86_avx512_mask_gather_dpq_512  :
+          Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
+                     llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_gather_dpi_512  :
+          Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_ptr_ty,
+                     llvm_v16i32_ty, llvm_v16i1_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_gather_qpq_512  :
+          Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
+                     llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+  def int_x86_avx512_mask_gather_qpi_512  :
+          Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_ptr_ty,
+                     llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
+                    [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3div2_df :
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3div2_di :
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3div4_df :
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3div4_di :
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3div4_sf :
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3div4_si :
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4i32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3div8_sf :
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3div8_si :
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3siv2_df :
+          Intrinsic<[llvm_v2f64_ty],
+          [llvm_v2f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3siv2_di :
+          Intrinsic<[llvm_v2i64_ty],
+          [llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3siv4_df :
+          Intrinsic<[llvm_v4f64_ty],
+          [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3siv4_di :
+          Intrinsic<[llvm_v4i64_ty],
+          [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3siv4_sf :
+          Intrinsic<[llvm_v4f32_ty],
+          [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3siv4_si :
+          Intrinsic<[llvm_v4i32_ty],
+          [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3siv8_sf :
+          Intrinsic<[llvm_v8f32_ty],
+          [llvm_v8f32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_gather3siv8_si :
+          Intrinsic<[llvm_v8i32_ty],
+          [llvm_v8i32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
+          [IntrReadMem, IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scatter_dpd_512  :
+          Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
+                        llvm_v8i32_ty, llvm_v8f64_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_scatter_dps_512  :
+          Intrinsic<[], [llvm_ptr_ty, llvm_v16i1_ty,
+                       llvm_v16i32_ty, llvm_v16f32_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_scatter_qpd_512  :
+          Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
+                     llvm_v8i64_ty, llvm_v8f64_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_scatter_qps_512  :
+          Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
+                     llvm_v8i64_ty, llvm_v8f32_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+
+
+  def int_x86_avx512_mask_scatter_dpq_512  :
+          Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
+                         llvm_v8i32_ty, llvm_v8i64_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_scatter_dpi_512  :
+          Intrinsic<[], [llvm_ptr_ty, llvm_v16i1_ty,
+                     llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_scatter_qpq_512  :
+          Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,llvm_v8i64_ty, llvm_v8i64_ty,
+                         llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+  def int_x86_avx512_mask_scatter_qpi_512  :
+          Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty, llvm_v8i64_ty, llvm_v8i32_ty,
+                         llvm_i32_ty],
+                    [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scatterdiv2_df :
+        Intrinsic<[],
+        [llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i32_ty],
+        [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scatterdiv2_di :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scatterdiv4_df :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scatterdiv4_di :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scatterdiv4_sf :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v4f32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scatterdiv4_si :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v4i32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scatterdiv8_sf :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4f32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scatterdiv8_si :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4i32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scattersiv2_df :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v2i1_ty, llvm_v4i32_ty, llvm_v2f64_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scattersiv2_di :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v2i1_ty, llvm_v4i32_ty, llvm_v2i64_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scattersiv4_df :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4f64_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scattersiv4_di :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4i64_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scattersiv4_sf :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scattersiv4_si :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scattersiv8_sf :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v8i1_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+
+  def int_x86_avx512_mask_scattersiv8_si :
+          Intrinsic<[],
+          [llvm_ptr_ty, llvm_v8i1_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty],
+          [IntrArgMemOnly]>;
+}
+
 // AVX-512 conflict detection instruction
 // Instructions that count the number of leading zero bits
 let TargetPrefix = "x86" in {
@@ -4213,237 +4291,6 @@
                    llvm_i8_ty], [IntrNoMem]>;
 }
 
-// VBMI2 Concat & Shift
-let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
-  def int_x86_avx512_vpshld_q_512 :
-        GCCBuiltin<"__builtin_ia32_vpshldq512">,
-        Intrinsic<[llvm_v8i64_ty],
-                  [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_vpshld_q_256 :
-        GCCBuiltin<"__builtin_ia32_vpshldq256">,
-        Intrinsic<[llvm_v4i64_ty],
-                  [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_vpshld_q_128 :
-        GCCBuiltin<"__builtin_ia32_vpshldq128">,
-        Intrinsic<[llvm_v2i64_ty],
-                  [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
-
-  def int_x86_avx512_vpshld_d_512 :
-        GCCBuiltin<"__builtin_ia32_vpshldd512">,
-        Intrinsic<[llvm_v16i32_ty],
-                  [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_vpshld_d_256 :
-        GCCBuiltin<"__builtin_ia32_vpshldd256">,
-        Intrinsic<[llvm_v8i32_ty],
-                  [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_vpshld_d_128 :
-        GCCBuiltin<"__builtin_ia32_vpshldd128">,
-        Intrinsic<[llvm_v4i32_ty],
-                  [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
-
-  def int_x86_avx512_vpshld_w_512 :
-        GCCBuiltin<"__builtin_ia32_vpshldw512">,
-        Intrinsic<[llvm_v32i16_ty],
-                  [llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_vpshld_w_256 :
-        GCCBuiltin<"__builtin_ia32_vpshldw256">,
-        Intrinsic<[llvm_v16i16_ty],
-                  [llvm_v16i16_ty, llvm_v16i16_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_vpshld_w_128 :
-        GCCBuiltin<"__builtin_ia32_vpshldw128">,
-        Intrinsic<[llvm_v8i16_ty],
-                  [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
-
-  def int_x86_avx512_vpshrd_q_512 :
-        GCCBuiltin<"__builtin_ia32_vpshrdq512">,
-        Intrinsic<[llvm_v8i64_ty],
-                  [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_vpshrd_q_256 :
-        GCCBuiltin<"__builtin_ia32_vpshrdq256">,
-        Intrinsic<[llvm_v4i64_ty],
-                  [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_vpshrd_q_128 :
-        GCCBuiltin<"__builtin_ia32_vpshrdq128">,
-        Intrinsic<[llvm_v2i64_ty],
-                  [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
-
-  def int_x86_avx512_vpshrd_d_512 :
-        GCCBuiltin<"__builtin_ia32_vpshrdd512">,
-        Intrinsic<[llvm_v16i32_ty],
-                  [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_vpshrd_d_256 :
-        GCCBuiltin<"__builtin_ia32_vpshrdd256">,
-        Intrinsic<[llvm_v8i32_ty],
-                  [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_vpshrd_d_128 :
-        GCCBuiltin<"__builtin_ia32_vpshrdd128">,
-        Intrinsic<[llvm_v4i32_ty],
-                  [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
-
-  def int_x86_avx512_vpshrd_w_512 :
-        GCCBuiltin<"__builtin_ia32_vpshrdw512">,
-        Intrinsic<[llvm_v32i16_ty],
-                  [llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_vpshrd_w_256 :
-        GCCBuiltin<"__builtin_ia32_vpshrdw256">,
-        Intrinsic<[llvm_v16i16_ty],
-                  [llvm_v16i16_ty, llvm_v16i16_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_vpshrd_w_128 :
-        GCCBuiltin<"__builtin_ia32_vpshrdw128">,
-        Intrinsic<[llvm_v8i16_ty],
-                  [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
-
-  def int_x86_avx512_mask_vpshldv_w_128 :
-        GCCBuiltin<"__builtin_ia32_vpshldvw128_mask">,
-        Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
-                   llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshldv_w_128 :
-        GCCBuiltin<"__builtin_ia32_vpshldvw128_maskz">,
-        Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
-                   llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_mask_vpshldv_w_256 :
-        GCCBuiltin<"__builtin_ia32_vpshldvw256_mask">,
-        Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
-                   llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshldv_w_256 :
-        GCCBuiltin<"__builtin_ia32_vpshldvw256_maskz">,
-        Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
-                   llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
-  def int_x86_avx512_mask_vpshldv_w_512 :
-        GCCBuiltin<"__builtin_ia32_vpshldvw512_mask">,
-        Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
-                   llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshldv_w_512 :
-        GCCBuiltin<"__builtin_ia32_vpshldvw512_maskz">,
-        Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
-                   llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
-
-  def int_x86_avx512_mask_vpshldv_q_128 :
-        GCCBuiltin<"__builtin_ia32_vpshldvq128_mask">,
-        Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
-                   llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshldv_q_128 :
-        GCCBuiltin<"__builtin_ia32_vpshldvq128_maskz">,
-        Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
-                   llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_mask_vpshldv_q_256 :
-        GCCBuiltin<"__builtin_ia32_vpshldvq256_mask">,
-        Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
-                   llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshldv_q_256 :
-        GCCBuiltin<"__builtin_ia32_vpshldvq256_maskz">,
-        Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
-                   llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_mask_vpshldv_q_512 :
-        GCCBuiltin<"__builtin_ia32_vpshldvq512_mask">,
-        Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
-                   llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshldv_q_512 :
-        GCCBuiltin<"__builtin_ia32_vpshldvq512_maskz">,
-        Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
-                   llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
-
-  def int_x86_avx512_mask_vpshldv_d_128 :
-        GCCBuiltin<"__builtin_ia32_vpshldvd128_mask">,
-        Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
-                   llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshldv_d_128 :
-        GCCBuiltin<"__builtin_ia32_vpshldvd128_maskz">,
-        Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
-                   llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_mask_vpshldv_d_256 :
-        GCCBuiltin<"__builtin_ia32_vpshldvd256_mask">,
-        Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
-                   llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshldv_d_256 :
-        GCCBuiltin<"__builtin_ia32_vpshldvd256_maskz">,
-        Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
-                   llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_mask_vpshldv_d_512 :
-        GCCBuiltin<"__builtin_ia32_vpshldvd512_mask">,
-        Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
-                   llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshldv_d_512 :
-        GCCBuiltin<"__builtin_ia32_vpshldvd512_maskz">,
-        Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
-                   llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
-
-  def int_x86_avx512_mask_vpshrdv_w_128 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvw128_mask">,
-        Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
-                   llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshrdv_w_128 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvw128_maskz">,
-        Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
-                   llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_mask_vpshrdv_w_256 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvw256_mask">,
-        Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
-                   llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshrdv_w_256 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvw256_maskz">,
-        Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
-                   llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
-  def int_x86_avx512_mask_vpshrdv_w_512 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvw512_mask">,
-        Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
-                   llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshrdv_w_512 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvw512_maskz">,
-        Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
-                   llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
-
-  def int_x86_avx512_mask_vpshrdv_q_128 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvq128_mask">,
-        Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
-                   llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshrdv_q_128 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvq128_maskz">,
-        Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
-                   llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_mask_vpshrdv_q_256 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvq256_mask">,
-        Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
-                   llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshrdv_q_256 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvq256_maskz">,
-        Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
-                   llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_mask_vpshrdv_q_512 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvq512_mask">,
-        Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
-                   llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshrdv_q_512 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvq512_maskz">,
-        Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
-                   llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
-
-  def int_x86_avx512_mask_vpshrdv_d_128 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvd128_mask">,
-        Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
-                   llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshrdv_d_128 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvd128_maskz">,
-        Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
-                   llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_mask_vpshrdv_d_256 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvd256_mask">,
-        Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
-                   llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshrdv_d_256 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvd256_maskz">,
-        Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
-                   llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
-  def int_x86_avx512_mask_vpshrdv_d_512 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvd512_mask">,
-        Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
-                   llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
-  def int_x86_avx512_maskz_vpshrdv_d_512 :
-        GCCBuiltin<"__builtin_ia32_vpshrdvd512_maskz">,
-        Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
-                   llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
-}
-
 // truncate
 let TargetPrefix = "x86" in {
   def int_x86_avx512_mask_pmov_qb_128 :
diff --git a/include/llvm/IR/LLVMContext.h b/include/llvm/IR/LLVMContext.h
index ebd4455..bd7097b 100644
--- a/include/llvm/IR/LLVMContext.h
+++ b/include/llvm/IR/LLVMContext.h
@@ -102,6 +102,7 @@
     MD_associated = 22,               // "associated"
     MD_callees = 23,                  // "callees"
     MD_irr_loop = 24,                 // "irr_loop"
+    MD_access_group = 25,             // "llvm.access.group"
   };
 
   /// Known operand bundle tag IDs, which always have the same value.  All
diff --git a/include/llvm/IR/Module.h b/include/llvm/IR/Module.h
index ef4a4a9..9ef35f1 100644
--- a/include/llvm/IR/Module.h
+++ b/include/llvm/IR/Module.h
@@ -367,6 +367,11 @@
     return getOrInsertFunction(Name, AttributeList{}, RetTy, Args...);
   }
 
+  // Avoid an incorrect ordering that'd otherwise compile incorrectly.
+  template <typename... ArgsTy>
+  Constant *getOrInsertFunction(StringRef Name, AttributeList AttributeList,
+                                FunctionType *Invalid, ArgsTy... Args) = delete;
+
   /// Look up the specified function in the module symbol table. If it does not
   /// exist, return null.
   Function *getFunction(StringRef Name) const;
@@ -403,11 +408,15 @@
   }
 
   /// Look up the specified global in the module symbol table.
-  ///   1. If it does not exist, add a declaration of the global and return it.
-  ///   2. Else, the global exists but has the wrong type: return the function
-  ///      with a constantexpr cast to the right type.
-  ///   3. Finally, if the existing global is the correct declaration, return
-  ///      the existing global.
+  /// If it does not exist, invoke a callback to create a declaration of the
+  /// global and return it. The global is constantexpr casted to the expected
+  /// type if necessary.
+  Constant *
+  getOrInsertGlobal(StringRef Name, Type *Ty,
+                    function_ref<GlobalVariable *()> CreateGlobalCallback);
+
+  /// Look up the specified global in the module symbol table. If required, this
+  /// overload constructs the global variable using its constructor's defaults.
   Constant *getOrInsertGlobal(StringRef Name, Type *Ty);
 
 /// @}
diff --git a/include/llvm/IR/ModuleSummaryIndex.h b/include/llvm/IR/ModuleSummaryIndex.h
index 6653795..a1acee4 100644
--- a/include/llvm/IR/ModuleSummaryIndex.h
+++ b/include/llvm/IR/ModuleSummaryIndex.h
@@ -831,6 +831,13 @@
   /// union.
   bool HaveGVs;
 
+  // True if the index was created for a module compiled with -fsplit-lto-unit.
+  bool EnableSplitLTOUnit;
+
+  // True if some of the modules were compiled with -fsplit-lto-unit and
+  // some were not. Set when the combined index is created during the thin link.
+  bool PartiallySplitLTOUnits = false;
+
   std::set<std::string> CfiFunctionDefs;
   std::set<std::string> CfiFunctionDecls;
 
@@ -850,7 +857,9 @@
 
 public:
   // See HaveGVs variable comment.
-  ModuleSummaryIndex(bool HaveGVs) : HaveGVs(HaveGVs), Saver(Alloc) {}
+  ModuleSummaryIndex(bool HaveGVs, bool EnableSplitLTOUnit = false)
+      : HaveGVs(HaveGVs), EnableSplitLTOUnit(EnableSplitLTOUnit), Saver(Alloc) {
+  }
 
   bool haveGVs() const { return HaveGVs; }
 
@@ -940,6 +949,12 @@
     SkipModuleByDistributedBackend = true;
   }
 
+  bool enableSplitLTOUnit() const { return EnableSplitLTOUnit; }
+  void setEnableSplitLTOUnit() { EnableSplitLTOUnit = true; }
+
+  bool partiallySplitLTOUnits() const { return PartiallySplitLTOUnits; }
+  void setPartiallySplitLTOUnits() { PartiallySplitLTOUnits = true; }
+
   bool isGlobalValueLive(const GlobalValueSummary *GVS) const {
     return !WithGlobalValueDeadStripping || GVS->isLive();
   }
diff --git a/include/llvm/IR/PatternMatch.h b/include/llvm/IR/PatternMatch.h
index e1e7c72..120fc25 100644
--- a/include/llvm/IR/PatternMatch.h
+++ b/include/llvm/IR/PatternMatch.h
@@ -31,7 +31,6 @@
 
 #include "llvm/ADT/APFloat.h"
 #include "llvm/ADT/APInt.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/Constant.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/InstrTypes.h"
@@ -1486,8 +1485,10 @@
   Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
 
   template <typename OpTy> bool match(OpTy *V) {
-    CallSite CS(V);
-    return CS.isCall() && Val.match(CS.getArgument(OpI));
+    // FIXME: Should likely be switched to use `CallBase`.
+    if (const auto *CI = dyn_cast<CallInst>(V))
+      return Val.match(CI->getArgOperand(OpI));
+    return false;
   }
 };
 
diff --git a/include/llvm/IR/TypeBuilder.h b/include/llvm/IR/TypeBuilder.h
deleted file mode 100644
index d2c6f00..0000000
--- a/include/llvm/IR/TypeBuilder.h
+++ /dev/null
@@ -1,407 +0,0 @@
-//===---- llvm/TypeBuilder.h - Builder for LLVM types -----------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the TypeBuilder class, which is used as a convenient way to
-// create LLVM types with a consistent and simplified interface.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_IR_TYPEBUILDER_H
-#define LLVM_IR_TYPEBUILDER_H
-
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/LLVMContext.h"
-#include <climits>
-
-namespace llvm {
-
-/// TypeBuilder - This provides a uniform API for looking up types
-/// known at compile time.  To support cross-compilation, we define a
-/// series of tag types in the llvm::types namespace, like i<N>,
-/// ieee_float, ppc_fp128, etc.  TypeBuilder<T, false> allows T to be
-/// any of these, a native C type (whose size may depend on the host
-/// compiler), or a pointer, function, or struct type built out of
-/// these.  TypeBuilder<T, true> removes native C types from this set
-/// to guarantee that its result is suitable for cross-compilation.
-/// We define the primitive types, pointer types, and functions up to
-/// 5 arguments here, but to use this class with your own types,
-/// you'll need to specialize it.  For example, say you want to call a
-/// function defined externally as:
-///
-/// \code{.cpp}
-///
-///   struct MyType {
-///     int32 a;
-///     int32 *b;
-///     void *array[1];  // Intended as a flexible array.
-///   };
-///   int8 AFunction(struct MyType *value);
-///
-/// \endcode
-///
-/// You'll want to use
-///   Function::Create(TypeBuilder<types::i<8>(MyType*), true>::get(), ...)
-/// to declare the function, but when you first try this, your compiler will
-/// complain that TypeBuilder<MyType, true>::get() doesn't exist. To fix this,
-/// write:
-///
-/// \code{.cpp}
-///
-///   namespace llvm {
-///   template<bool xcompile> class TypeBuilder<MyType, xcompile> {
-///   public:
-///     static StructType *get(LLVMContext &Context) {
-///       // If you cache this result, be sure to cache it separately
-///       // for each LLVMContext.
-///       return StructType::get(
-///         TypeBuilder<types::i<32>, xcompile>::get(Context),
-///         TypeBuilder<types::i<32>*, xcompile>::get(Context),
-///         TypeBuilder<types::i<8>*[], xcompile>::get(Context),
-///         nullptr);
-///     }
-///
-///     // You may find this a convenient place to put some constants
-///     // to help with getelementptr.  They don't have any effect on
-///     // the operation of TypeBuilder.
-///     enum Fields {
-///       FIELD_A,
-///       FIELD_B,
-///       FIELD_ARRAY
-///     };
-///   }
-///   }  // namespace llvm
-///
-/// \endcode
-///
-/// TypeBuilder cannot handle recursive types or types you only know at runtime.
-/// If you try to give it a recursive type, it will deadlock, infinitely
-/// recurse, or do something similarly undesirable.
-template<typename T, bool cross_compilable> class TypeBuilder {};
-
-// Types for use with cross-compilable TypeBuilders.  These correspond
-// exactly with an LLVM-native type.
-namespace types {
-/// i<N> corresponds to the LLVM IntegerType with N bits.
-template<uint32_t num_bits> class i {};
-
-// The following classes represent the LLVM floating types.
-class ieee_float {};
-class ieee_double {};
-class x86_fp80 {};
-class fp128 {};
-class ppc_fp128 {};
-// X86 MMX.
-class x86_mmx {};
-}  // namespace types
-
-// LLVM doesn't have const or volatile types.
-template<typename T, bool cross> class TypeBuilder<const T, cross>
-  : public TypeBuilder<T, cross> {};
-template<typename T, bool cross> class TypeBuilder<volatile T, cross>
-  : public TypeBuilder<T, cross> {};
-template<typename T, bool cross> class TypeBuilder<const volatile T, cross>
-  : public TypeBuilder<T, cross> {};
-
-// Pointers
-template<typename T, bool cross> class TypeBuilder<T*, cross> {
-public:
-  static PointerType *get(LLVMContext &Context) {
-    return PointerType::getUnqual(TypeBuilder<T,cross>::get(Context));
-  }
-};
-
-/// There is no support for references
-template<typename T, bool cross> class TypeBuilder<T&, cross> {};
-
-// Arrays
-template<typename T, size_t N, bool cross> class TypeBuilder<T[N], cross> {
-public:
-  static ArrayType *get(LLVMContext &Context) {
-    return ArrayType::get(TypeBuilder<T, cross>::get(Context), N);
-  }
-};
-/// LLVM uses an array of length 0 to represent an unknown-length array.
-template<typename T, bool cross> class TypeBuilder<T[], cross> {
-public:
-  static ArrayType *get(LLVMContext &Context) {
-    return ArrayType::get(TypeBuilder<T, cross>::get(Context), 0);
-  }
-};
-
-// Define the C integral types only for TypeBuilder<T, false>.
-//
-// C integral types do not have a defined size. It would be nice to use the
-// stdint.h-defined typedefs that do have defined sizes, but we'd run into the
-// following problem:
-//
-// On an ILP32 machine, stdint.h might define:
-//
-//   typedef int int32_t;
-//   typedef long long int64_t;
-//   typedef long size_t;
-//
-// If we defined TypeBuilder<int32_t> and TypeBuilder<int64_t>, then any use of
-// TypeBuilder<size_t> would fail.  We couldn't define TypeBuilder<size_t> in
-// addition to the defined-size types because we'd get duplicate definitions on
-// platforms where stdint.h instead defines:
-//
-//   typedef int int32_t;
-//   typedef long long int64_t;
-//   typedef int size_t;
-//
-// So we define all the primitive C types and nothing else.
-#define DEFINE_INTEGRAL_TYPEBUILDER(T) \
-template<> class TypeBuilder<T, false> { \
-public: \
-  static IntegerType *get(LLVMContext &Context) { \
-    return IntegerType::get(Context, sizeof(T) * CHAR_BIT); \
-  } \
-}; \
-template<> class TypeBuilder<T, true> { \
-  /* We provide a definition here so users don't accidentally */ \
-  /* define these types to work. */ \
-}
-DEFINE_INTEGRAL_TYPEBUILDER(char);
-DEFINE_INTEGRAL_TYPEBUILDER(signed char);
-DEFINE_INTEGRAL_TYPEBUILDER(unsigned char);
-DEFINE_INTEGRAL_TYPEBUILDER(short);
-DEFINE_INTEGRAL_TYPEBUILDER(unsigned short);
-DEFINE_INTEGRAL_TYPEBUILDER(int);
-DEFINE_INTEGRAL_TYPEBUILDER(unsigned int);
-DEFINE_INTEGRAL_TYPEBUILDER(long);
-DEFINE_INTEGRAL_TYPEBUILDER(unsigned long);
-#ifdef _MSC_VER
-DEFINE_INTEGRAL_TYPEBUILDER(__int64);
-DEFINE_INTEGRAL_TYPEBUILDER(unsigned __int64);
-#else /* _MSC_VER */
-DEFINE_INTEGRAL_TYPEBUILDER(long long);
-DEFINE_INTEGRAL_TYPEBUILDER(unsigned long long);
-#endif /* _MSC_VER */
-#undef DEFINE_INTEGRAL_TYPEBUILDER
-
-template<uint32_t num_bits, bool cross>
-class TypeBuilder<types::i<num_bits>, cross> {
-public:
-  static IntegerType *get(LLVMContext &C) {
-    return IntegerType::get(C, num_bits);
-  }
-};
-
-template<> class TypeBuilder<float, false> {
-public:
-  static Type *get(LLVMContext& C) {
-    return Type::getFloatTy(C);
-  }
-};
-template<> class TypeBuilder<float, true> {};
-
-template<> class TypeBuilder<double, false> {
-public:
-  static Type *get(LLVMContext& C) {
-    return Type::getDoubleTy(C);
-  }
-};
-template<> class TypeBuilder<double, true> {};
-
-template<bool cross> class TypeBuilder<types::ieee_float, cross> {
-public:
-  static Type *get(LLVMContext& C) { return Type::getFloatTy(C); }
-};
-template<bool cross> class TypeBuilder<types::ieee_double, cross> {
-public:
-  static Type *get(LLVMContext& C) { return Type::getDoubleTy(C); }
-};
-template<bool cross> class TypeBuilder<types::x86_fp80, cross> {
-public:
-  static Type *get(LLVMContext& C) { return Type::getX86_FP80Ty(C); }
-};
-template<bool cross> class TypeBuilder<types::fp128, cross> {
-public:
-  static Type *get(LLVMContext& C) { return Type::getFP128Ty(C); }
-};
-template<bool cross> class TypeBuilder<types::ppc_fp128, cross> {
-public:
-  static Type *get(LLVMContext& C) { return Type::getPPC_FP128Ty(C); }
-};
-template<bool cross> class TypeBuilder<types::x86_mmx, cross> {
-public:
-  static Type *get(LLVMContext& C) { return Type::getX86_MMXTy(C); }
-};
-
-template<bool cross> class TypeBuilder<void, cross> {
-public:
-  static Type *get(LLVMContext &C) {
-    return Type::getVoidTy(C);
-  }
-};
-
-/// void* is disallowed in LLVM types, but it occurs often enough in C code that
-/// we special case it.
-template<> class TypeBuilder<void*, false>
-  : public TypeBuilder<types::i<8>*, false> {};
-template<> class TypeBuilder<const void*, false>
-  : public TypeBuilder<types::i<8>*, false> {};
-template<> class TypeBuilder<volatile void*, false>
-  : public TypeBuilder<types::i<8>*, false> {};
-template<> class TypeBuilder<const volatile void*, false>
-  : public TypeBuilder<types::i<8>*, false> {};
-
-template<typename R, bool cross> class TypeBuilder<R(), cross> {
-public:
-  static FunctionType *get(LLVMContext &Context) {
-    return FunctionType::get(TypeBuilder<R, cross>::get(Context), false);
-  }
-};
-template<typename R, typename A1, bool cross> class TypeBuilder<R(A1), cross> {
-public:
-  static FunctionType *get(LLVMContext &Context) {
-    Type *params[] = {
-      TypeBuilder<A1, cross>::get(Context),
-    };
-    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
-                             params, false);
-  }
-};
-template<typename R, typename A1, typename A2, bool cross>
-class TypeBuilder<R(A1, A2), cross> {
-public:
-  static FunctionType *get(LLVMContext &Context) {
-    Type *params[] = {
-      TypeBuilder<A1, cross>::get(Context),
-      TypeBuilder<A2, cross>::get(Context),
-    };
-    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
-                             params, false);
-  }
-};
-template<typename R, typename A1, typename A2, typename A3, bool cross>
-class TypeBuilder<R(A1, A2, A3), cross> {
-public:
-  static FunctionType *get(LLVMContext &Context) {
-    Type *params[] = {
-      TypeBuilder<A1, cross>::get(Context),
-      TypeBuilder<A2, cross>::get(Context),
-      TypeBuilder<A3, cross>::get(Context),
-    };
-    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
-                             params, false);
-  }
-};
-
-template<typename R, typename A1, typename A2, typename A3, typename A4,
-         bool cross>
-class TypeBuilder<R(A1, A2, A3, A4), cross> {
-public:
-  static FunctionType *get(LLVMContext &Context) {
-    Type *params[] = {
-      TypeBuilder<A1, cross>::get(Context),
-      TypeBuilder<A2, cross>::get(Context),
-      TypeBuilder<A3, cross>::get(Context),
-      TypeBuilder<A4, cross>::get(Context),
-    };
-    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
-                             params, false);
-  }
-};
-
-template<typename R, typename A1, typename A2, typename A3, typename A4,
-         typename A5, bool cross>
-class TypeBuilder<R(A1, A2, A3, A4, A5), cross> {
-public:
-  static FunctionType *get(LLVMContext &Context) {
-    Type *params[] = {
-      TypeBuilder<A1, cross>::get(Context),
-      TypeBuilder<A2, cross>::get(Context),
-      TypeBuilder<A3, cross>::get(Context),
-      TypeBuilder<A4, cross>::get(Context),
-      TypeBuilder<A5, cross>::get(Context),
-    };
-    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
-                             params, false);
-  }
-};
-
-template<typename R, bool cross> class TypeBuilder<R(...), cross> {
-public:
-  static FunctionType *get(LLVMContext &Context) {
-    return FunctionType::get(TypeBuilder<R, cross>::get(Context), true);
-  }
-};
-template<typename R, typename A1, bool cross>
-class TypeBuilder<R(A1, ...), cross> {
-public:
-  static FunctionType *get(LLVMContext &Context) {
-    Type *params[] = {
-      TypeBuilder<A1, cross>::get(Context),
-    };
-    return FunctionType::get(TypeBuilder<R, cross>::get(Context), params, true);
-  }
-};
-template<typename R, typename A1, typename A2, bool cross>
-class TypeBuilder<R(A1, A2, ...), cross> {
-public:
-  static FunctionType *get(LLVMContext &Context) {
-    Type *params[] = {
-      TypeBuilder<A1, cross>::get(Context),
-      TypeBuilder<A2, cross>::get(Context),
-    };
-    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
-                                   params, true);
-  }
-};
-template<typename R, typename A1, typename A2, typename A3, bool cross>
-class TypeBuilder<R(A1, A2, A3, ...), cross> {
-public:
-  static FunctionType *get(LLVMContext &Context) {
-    Type *params[] = {
-      TypeBuilder<A1, cross>::get(Context),
-      TypeBuilder<A2, cross>::get(Context),
-      TypeBuilder<A3, cross>::get(Context),
-    };
-    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
-                                   params, true);
-  }
-};
-
-template<typename R, typename A1, typename A2, typename A3, typename A4,
-         bool cross>
-class TypeBuilder<R(A1, A2, A3, A4, ...), cross> {
-public:
-  static FunctionType *get(LLVMContext &Context) {
-    Type *params[] = {
-      TypeBuilder<A1, cross>::get(Context),
-      TypeBuilder<A2, cross>::get(Context),
-      TypeBuilder<A3, cross>::get(Context),
-      TypeBuilder<A4, cross>::get(Context),
-    };
-    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
-                             params, true);
-  }
-};
-
-template<typename R, typename A1, typename A2, typename A3, typename A4,
-         typename A5, bool cross>
-class TypeBuilder<R(A1, A2, A3, A4, A5, ...), cross> {
-public:
-  static FunctionType *get(LLVMContext &Context) {
-    Type *params[] = {
-      TypeBuilder<A1, cross>::get(Context),
-      TypeBuilder<A2, cross>::get(Context),
-      TypeBuilder<A3, cross>::get(Context),
-      TypeBuilder<A4, cross>::get(Context),
-      TypeBuilder<A5, cross>::get(Context),
-    };
-    return FunctionType::get(TypeBuilder<R, cross>::get(Context),
-                                   params, true);
-  }
-};
-
-}  // namespace llvm
-
-#endif
diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h
index 206089c..037c0db 100644
--- a/include/llvm/InitializePasses.h
+++ b/include/llvm/InitializePasses.h
@@ -85,6 +85,7 @@
 void initializeBranchRelaxationPass(PassRegistry&);
 void initializeBreakCriticalEdgesPass(PassRegistry&);
 void initializeBreakFalseDepsPass(PassRegistry&);
+void initializeCanonicalizeAliasesLegacyPassPass(PassRegistry &);
 void initializeCFGOnlyPrinterLegacyPassPass(PassRegistry&);
 void initializeCFGOnlyViewerLegacyPassPass(PassRegistry&);
 void initializeCFGPrinterLegacyPassPass(PassRegistry&);
@@ -198,6 +199,7 @@
 void initializeLegacyLICMPassPass(PassRegistry&);
 void initializeLegacyLoopSinkPassPass(PassRegistry&);
 void initializeLegalizerPass(PassRegistry&);
+void initializeGISelCSEAnalysisWrapperPassPass(PassRegistry &);
 void initializeLibCallsShrinkWrapLegacyPassPass(PassRegistry&);
 void initializeLintPass(PassRegistry&);
 void initializeLiveDebugValuesPass(PassRegistry&);
@@ -273,7 +275,7 @@
 void initializeMemoryDependenceWrapperPassPass(PassRegistry&);
 void initializeMemorySSAPrinterLegacyPassPass(PassRegistry&);
 void initializeMemorySSAWrapperPassPass(PassRegistry&);
-void initializeMemorySanitizerPass(PassRegistry&);
+void initializeMemorySanitizerLegacyPassPass(PassRegistry&);
 void initializeMergeFunctionsPass(PassRegistry&);
 void initializeMergeICmpsPass(PassRegistry&);
 void initializeMergedLoadStoreMotionLegacyPassPass(PassRegistry&);
@@ -390,7 +392,7 @@
 void initializeTargetLibraryInfoWrapperPassPass(PassRegistry&);
 void initializeTargetPassConfigPass(PassRegistry&);
 void initializeTargetTransformInfoWrapperPassPass(PassRegistry&);
-void initializeThreadSanitizerPass(PassRegistry&);
+void initializeThreadSanitizerLegacyPassPass(PassRegistry&);
 void initializeTwoAddressInstructionPassPass(PassRegistry&);
 void initializeTypeBasedAAWrapperPassPass(PassRegistry&);
 void initializeUnifyFunctionExitNodesPass(PassRegistry&);
diff --git a/include/llvm/LTO/LTO.h b/include/llvm/LTO/LTO.h
index 1539087..534d9b6 100644
--- a/include/llvm/LTO/LTO.h
+++ b/include/llvm/LTO/LTO.h
@@ -400,6 +400,9 @@
   Error runThinLTO(AddStreamFn AddStream, NativeObjectCache Cache);
 
   mutable bool CalledGetMaxTasks = false;
+
+  // Use Optional to distinguish false from not yet initialized.
+  Optional<bool> EnableSplitLTOUnit;
 };
 
 /// The resolution for a symbol. The linker must provide a SymbolResolution for
diff --git a/include/llvm/LinkAllPasses.h b/include/llvm/LinkAllPasses.h
index a31caee..0851c2f 100644
--- a/include/llvm/LinkAllPasses.h
+++ b/include/llvm/LinkAllPasses.h
@@ -230,7 +230,8 @@
       llvm::TargetLibraryInfo TLI(TLII);
       llvm::AliasAnalysis AA(TLI);
       llvm::AliasSetTracker X(AA);
-      X.add(nullptr, 0, llvm::AAMDNodes()); // for -print-alias-sets
+      X.add(nullptr, llvm::LocationSize::unknown(),
+            llvm::AAMDNodes()); // for -print-alias-sets
       (void) llvm::AreStatisticsEnabled();
       (void) llvm::sys::RunningOnValgrind();
     }
diff --git a/include/llvm/MC/MCCodeView.h b/include/llvm/MC/MCCodeView.h
index 2678cf4..cef03a4 100644
--- a/include/llvm/MC/MCCodeView.h
+++ b/include/llvm/MC/MCCodeView.h
@@ -194,7 +194,7 @@
   void encodeInlineLineTable(MCAsmLayout &Layout,
                              MCCVInlineLineTableFragment &F);
 
-  void
+  MCFragment *
   emitDefRange(MCObjectStreamer &OS,
                ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
                StringRef FixedSizePortion);
diff --git a/include/llvm/MC/MCDwarf.h b/include/llvm/MC/MCDwarf.h
index 2bfaf19..7b96e9a 100644
--- a/include/llvm/MC/MCDwarf.h
+++ b/include/llvm/MC/MCDwarf.h
@@ -430,6 +430,7 @@
     OpUndefined,
     OpRegister,
     OpWindowSave,
+    OpNegateRAState,
     OpGnuArgsSize
   };
 
@@ -509,6 +510,11 @@
     return MCCFIInstruction(OpWindowSave, L, 0, 0, "");
   }
 
+  /// .cfi_negate_ra_state AArch64 negate RA state.
+  static MCCFIInstruction createNegateRAState(MCSymbol *L) {
+    return MCCFIInstruction(OpNegateRAState, L, 0, 0, "");
+  }
+
   /// .cfi_restore says that the rule for Register is now the same as it
   /// was at the beginning of the function, after all initial instructions added
   /// by .cfi_startproc were executed.
@@ -593,6 +599,7 @@
   bool IsSignalFrame = false;
   bool IsSimple = false;
   unsigned RAReg = static_cast<unsigned>(INT_MAX);
+  bool IsBKeyFrame = false;
 };
 
 class MCDwarfFrameEmitter {
diff --git a/include/llvm/MC/MCParser/MCTargetAsmParser.h b/include/llvm/MC/MCParser/MCTargetAsmParser.h
index bb97942..ccf13a6 100644
--- a/include/llvm/MC/MCParser/MCTargetAsmParser.h
+++ b/include/llvm/MC/MCParser/MCTargetAsmParser.h
@@ -490,6 +490,9 @@
                                               MCContext &Ctx) {
     return nullptr;
   }
+
+  // For any checks or cleanups at the end of parsing.
+  virtual void onEndOfFile() {}
 };
 
 } // end namespace llvm
diff --git a/include/llvm/MC/MCStreamer.h b/include/llvm/MC/MCStreamer.h
index c9b9174..f613d3a 100644
--- a/include/llvm/MC/MCStreamer.h
+++ b/include/llvm/MC/MCStreamer.h
@@ -806,6 +806,8 @@
                                        Optional<StringRef> Source,
                                        unsigned CUID = 0);
 
+  virtual void EmitCFIBKeyFrame();
+
   /// This implements the DWARF2 '.loc fileno lineno ...' assembler
   /// directive.
   virtual void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
@@ -900,6 +902,7 @@
   virtual void EmitCFIUndefined(int64_t Register);
   virtual void EmitCFIRegister(int64_t Register1, int64_t Register2);
   virtual void EmitCFIWindowSave();
+  virtual void EmitCFINegateRAState();
 
   virtual void EmitWinCFIStartProc(const MCSymbol *Symbol, SMLoc Loc = SMLoc());
   virtual void EmitWinCFIEndProc(SMLoc Loc = SMLoc());
diff --git a/include/llvm/MCA/Context.h b/include/llvm/MCA/Context.h
new file mode 100644
index 0000000..6b2bee0
--- /dev/null
+++ b/include/llvm/MCA/Context.h
@@ -0,0 +1,69 @@
+//===---------------------------- Context.h ---------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a class for holding ownership of various simulated
+/// hardware units.  A Context also provides a utility routine for constructing
+/// a default out-of-order pipeline with fetch, dispatch, execute, and retire
+/// stages.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_CONTEXT_H
+#define LLVM_MCA_CONTEXT_H
+
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
+#include "llvm/MCA/InstrBuilder.h"
+#include "llvm/MCA/Pipeline.h"
+#include "llvm/MCA/SourceMgr.h"
+#include <memory>
+
+namespace llvm {
+namespace mca {
+
+/// This is a convenience struct to hold the parameters necessary for creating
+/// the pre-built "default" out-of-order pipeline.
+struct PipelineOptions {
+  PipelineOptions(unsigned DW, unsigned RFS, unsigned LQS, unsigned SQS,
+                  bool NoAlias)
+      : DispatchWidth(DW), RegisterFileSize(RFS), LoadQueueSize(LQS),
+        StoreQueueSize(SQS), AssumeNoAlias(NoAlias) {}
+  unsigned DispatchWidth;
+  unsigned RegisterFileSize;
+  unsigned LoadQueueSize;
+  unsigned StoreQueueSize;
+  bool AssumeNoAlias;
+};
+
+class Context {
+  SmallVector<std::unique_ptr<HardwareUnit>, 4> Hardware;
+  const MCRegisterInfo &MRI;
+  const MCSubtargetInfo &STI;
+
+public:
+  Context(const MCRegisterInfo &R, const MCSubtargetInfo &S) : MRI(R), STI(S) {}
+  Context(const Context &C) = delete;
+  Context &operator=(const Context &C) = delete;
+
+  void addHardwareUnit(std::unique_ptr<HardwareUnit> H) {
+    Hardware.push_back(std::move(H));
+  }
+
+  /// Construct a basic pipeline for simulating an out-of-order pipeline.
+  /// This pipeline consists of Fetch, Dispatch, Execute, and Retire stages.
+  std::unique_ptr<Pipeline> createDefaultPipeline(const PipelineOptions &Opts,
+                                                  InstrBuilder &IB,
+                                                  SourceMgr &SrcMgr);
+};
+
+} // namespace mca
+} // namespace llvm
+#endif // LLVM_MCA_CONTEXT_H
diff --git a/include/llvm/MCA/HWEventListener.h b/include/llvm/MCA/HWEventListener.h
new file mode 100644
index 0000000..3b32b2c
--- /dev/null
+++ b/include/llvm/MCA/HWEventListener.h
@@ -0,0 +1,156 @@
+//===----------------------- HWEventListener.h ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the main interface for hardware event listeners.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_HWEVENTLISTENER_H
+#define LLVM_MCA_HWEVENTLISTENER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/MCA/Instruction.h"
+#include "llvm/MCA/Support.h"
+
+namespace llvm {
+namespace mca {
+
+// An HWInstructionEvent represents state changes of instructions that
+// listeners might be interested in. Listeners can choose to ignore any event
+// they are not interested in.
+class HWInstructionEvent {
+public:
+  // This is the list of event types that are shared by all targets, that
+  // generic subtarget-agnostic classes (e.g., Pipeline, HWInstructionEvent,
+  // ...) and generic Views can manipulate.
+  // Subtargets are free to define additional event types, that are goin to be
+  // handled by generic components as opaque values, but can still be
+  // emitted by subtarget-specific pipeline stages (e.g., ExecuteStage,
+  // DispatchStage, ...) and interpreted by subtarget-specific EventListener
+  // implementations.
+  enum GenericEventType {
+    Invalid = 0,
+    // Events generated by the Retire Control Unit.
+    Retired,
+    // Events generated by the Scheduler.
+    Ready,
+    Issued,
+    Executed,
+    // Events generated by the Dispatch logic.
+    Dispatched,
+
+    LastGenericEventType,
+  };
+
+  HWInstructionEvent(unsigned type, const InstRef &Inst)
+      : Type(type), IR(Inst) {}
+
+  // The event type. The exact meaning depends on the subtarget.
+  const unsigned Type;
+
+  // The instruction this event was generated for.
+  const InstRef &IR;
+};
+
+class HWInstructionIssuedEvent : public HWInstructionEvent {
+public:
+  using ResourceRef = std::pair<uint64_t, uint64_t>;
+  HWInstructionIssuedEvent(const InstRef &IR,
+                           ArrayRef<std::pair<ResourceRef, ResourceCycles>> UR)
+      : HWInstructionEvent(HWInstructionEvent::Issued, IR), UsedResources(UR) {}
+
+  ArrayRef<std::pair<ResourceRef, ResourceCycles>> UsedResources;
+};
+
+class HWInstructionDispatchedEvent : public HWInstructionEvent {
+public:
+  HWInstructionDispatchedEvent(const InstRef &IR, ArrayRef<unsigned> Regs,
+                               unsigned UOps)
+      : HWInstructionEvent(HWInstructionEvent::Dispatched, IR),
+        UsedPhysRegs(Regs), MicroOpcodes(UOps) {}
+  // Number of physical register allocated for this instruction. There is one
+  // entry per register file.
+  ArrayRef<unsigned> UsedPhysRegs;
+  // Number of micro opcodes dispatched.
+  // This field is often set to the total number of micro-opcodes specified by
+  // the instruction descriptor of IR.
+  // The only exception is when IR declares a number of micro opcodes
+  // which exceeds the processor DispatchWidth, and - by construction - it
+  // requires multiple cycles to be fully dispatched. In that particular case,
+  // the dispatch logic would generate more than one dispatch event (one per
+  // cycle), and each event would declare how many micro opcodes are effectively
+  // been dispatched to the schedulers.
+  unsigned MicroOpcodes;
+};
+
+class HWInstructionRetiredEvent : public HWInstructionEvent {
+public:
+  HWInstructionRetiredEvent(const InstRef &IR, ArrayRef<unsigned> Regs)
+      : HWInstructionEvent(HWInstructionEvent::Retired, IR),
+        FreedPhysRegs(Regs) {}
+  // Number of register writes that have been architecturally committed. There
+  // is one entry per register file.
+  ArrayRef<unsigned> FreedPhysRegs;
+};
+
+// A HWStallEvent represents a pipeline stall caused by the lack of hardware
+// resources.
+class HWStallEvent {
+public:
+  enum GenericEventType {
+    Invalid = 0,
+    // Generic stall events generated by the DispatchStage.
+    RegisterFileStall,
+    RetireControlUnitStall,
+    // Generic stall events generated by the Scheduler.
+    DispatchGroupStall,
+    SchedulerQueueFull,
+    LoadQueueFull,
+    StoreQueueFull,
+    LastGenericEvent
+  };
+
+  HWStallEvent(unsigned type, const InstRef &Inst) : Type(type), IR(Inst) {}
+
+  // The exact meaning of the stall event type depends on the subtarget.
+  const unsigned Type;
+
+  // The instruction this event was generated for.
+  const InstRef &IR;
+};
+
+class HWEventListener {
+public:
+  // Generic events generated by the pipeline.
+  virtual void onCycleBegin() {}
+  virtual void onCycleEnd() {}
+
+  virtual void onEvent(const HWInstructionEvent &Event) {}
+  virtual void onEvent(const HWStallEvent &Event) {}
+
+  using ResourceRef = std::pair<uint64_t, uint64_t>;
+  virtual void onResourceAvailable(const ResourceRef &RRef) {}
+
+  // Events generated by the Scheduler when buffered resources are
+  // consumed/freed for an instruction.
+  virtual void onReservedBuffers(const InstRef &Inst,
+                                 ArrayRef<unsigned> Buffers) {}
+  virtual void onReleasedBuffers(const InstRef &Inst,
+                                 ArrayRef<unsigned> Buffers) {}
+
+  virtual ~HWEventListener() {}
+
+private:
+  virtual void anchor();
+};
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_HWEVENTLISTENER_H
diff --git a/include/llvm/MCA/HardwareUnits/HardwareUnit.h b/include/llvm/MCA/HardwareUnits/HardwareUnit.h
new file mode 100644
index 0000000..104a200
--- /dev/null
+++ b/include/llvm/MCA/HardwareUnits/HardwareUnit.h
@@ -0,0 +1,33 @@
+//===-------------------------- HardwareUnit.h ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a base class for describing a simulated hardware
+/// unit.  These units are used to construct a simulated backend.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_HARDWAREUNIT_H
+#define LLVM_MCA_HARDWAREUNIT_H
+
+namespace llvm {
+namespace mca {
+
+class HardwareUnit {
+  HardwareUnit(const HardwareUnit &H) = delete;
+  HardwareUnit &operator=(const HardwareUnit &H) = delete;
+
+public:
+  HardwareUnit() = default;
+  virtual ~HardwareUnit();
+};
+
+} // namespace mca
+} // namespace llvm
+#endif // LLVM_MCA_HARDWAREUNIT_H
diff --git a/include/llvm/MCA/HardwareUnits/LSUnit.h b/include/llvm/MCA/HardwareUnits/LSUnit.h
new file mode 100644
index 0000000..e217fc5
--- /dev/null
+++ b/include/llvm/MCA/HardwareUnits/LSUnit.h
@@ -0,0 +1,207 @@
+//===------------------------- LSUnit.h --------------------------*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// A Load/Store unit class that models load/store queues and that implements
+/// a simple weak memory consistency model.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_LSUNIT_H
+#define LLVM_MCA_LSUNIT_H
+
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
+
+namespace llvm {
+namespace mca {
+
+class InstRef;
+class Scheduler;
+
+/// A Load/Store Unit implementing a load and store queues.
+///
+/// This class implements a load queue and a store queue to emulate the
+/// out-of-order execution of memory operations.
+/// Each load (or store) consumes an entry in the load (or store) queue.
+///
+/// Rules are:
+/// 1) A younger load is allowed to pass an older load only if there are no
+///    stores nor barriers in between the two loads.
+/// 2) An younger store is not allowed to pass an older store.
+/// 3) A younger store is not allowed to pass an older load.
+/// 4) A younger load is allowed to pass an older store only if the load does
+///    not alias with the store.
+///
+/// This class optimistically assumes that loads don't alias store operations.
+/// Under this assumption, younger loads are always allowed to pass older
+/// stores (this would only affects rule 4).
+/// Essentially, this class doesn't perform any sort alias analysis to
+/// identify aliasing loads and stores.
+///
+/// To enforce aliasing between loads and stores, flag `AssumeNoAlias` must be
+/// set to `false` by the constructor of LSUnit.
+///
+/// Note that this class doesn't know about the existence of different memory
+/// types for memory operations (example: write-through, write-combining, etc.).
+/// Derived classes are responsible for implementing that extra knowledge, and
+/// provide different sets of rules for loads and stores by overriding method
+/// `isReady()`.
+/// To emulate a write-combining memory type, rule 2. must be relaxed in a
+/// derived class to enable the reordering of non-aliasing store operations.
+///
+/// No assumptions are made by this class on the size of the store buffer.  This
+/// class doesn't know how to identify cases where store-to-load forwarding may
+/// occur.
+///
+/// LSUnit doesn't attempt to predict whether a load or store hits or misses
+/// the L1 cache. To be more specific, LSUnit doesn't know anything about
+/// cache hierarchy and memory types.
+/// It only knows if an instruction "mayLoad" and/or "mayStore". For loads, the
+/// scheduling model provides an "optimistic" load-to-use latency (which usually
+/// matches the load-to-use latency for when there is a hit in the L1D).
+/// Derived classes may expand this knowledge.
+///
+/// Class MCInstrDesc in LLVM doesn't know about serializing operations, nor
+/// memory-barrier like instructions.
+/// LSUnit conservatively assumes that an instruction which `mayLoad` and has
+/// `unmodeled side effects` behave like a "soft" load-barrier. That means, it
+/// serializes loads without forcing a flush of the load queue.
+/// Similarly, instructions that both `mayStore` and have `unmodeled side
+/// effects` are treated like store barriers. A full memory
+/// barrier is a 'mayLoad' and 'mayStore' instruction with unmodeled side
+/// effects. This is obviously inaccurate, but this is the best that we can do
+/// at the moment.
+///
+/// Each load/store barrier consumes one entry in the load/store queue. A
+/// load/store barrier enforces ordering of loads/stores:
+///  - A younger load cannot pass a load barrier.
+///  - A younger store cannot pass a store barrier.
+///
+/// A younger load has to wait for the memory load barrier to execute.
+/// A load/store barrier is "executed" when it becomes the oldest entry in
+/// the load/store queue(s). That also means, all the older loads/stores have
+/// already been executed.
+class LSUnit : public HardwareUnit {
+  // Load queue size.
+  // LQ_Size == 0 means that there are infinite slots in the load queue.
+  unsigned LQ_Size;
+
+  // Store queue size.
+  // SQ_Size == 0 means that there are infinite slots in the store queue.
+  unsigned SQ_Size;
+
+  // If true, loads will never alias with stores. This is the default.
+  bool NoAlias;
+
+  // When a `MayLoad` instruction is dispatched to the schedulers for execution,
+  // the LSUnit reserves an entry in the `LoadQueue` for it.
+  //
+  // LoadQueue keeps track of all the loads that are in-flight. A load
+  // instruction is eventually removed from the LoadQueue when it reaches
+  // completion stage. That means, a load leaves the queue whe it is 'executed',
+  // and its value can be forwarded on the data path to outside units.
+  //
+  // This class doesn't know about the latency of a load instruction. So, it
+  // conservatively/pessimistically assumes that the latency of a load opcode
+  // matches the instruction latency.
+  //
+  // FIXME: In the absence of cache misses (i.e. L1I/L1D/iTLB/dTLB hits/misses),
+  // and load/store conflicts, the latency of a load is determined by the depth
+  // of the load pipeline. So, we could use field `LoadLatency` in the
+  // MCSchedModel to model that latency.
+  // Field `LoadLatency` often matches the so-called 'load-to-use' latency from
+  // L1D, and it usually already accounts for any extra latency due to data
+  // forwarding.
+  // When doing throughput analysis, `LoadLatency` is likely to
+  // be a better predictor of load latency than instruction latency. This is
+  // particularly true when simulating code with temporal/spatial locality of
+  // memory accesses.
+  // Using `LoadLatency` (instead of the instruction latency) is also expected
+  // to improve the load queue allocation for long latency instructions with
+  // folded memory operands (See PR39829).
+  //
+  // FIXME: On some processors, load/store operations are split into multiple
+  // uOps. For example, X86 AMD Jaguar natively supports 128-bit data types, but
+  // not 256-bit data types. So, a 256-bit load is effectively split into two
+  // 128-bit loads, and each split load consumes one 'LoadQueue' entry. For
+  // simplicity, this class optimistically assumes that a load instruction only
+  // consumes one entry in the LoadQueue.  Similarly, store instructions only
+  // consume a single entry in the StoreQueue.
+  // In future, we should reassess the quality of this design, and consider
+  // alternative approaches that let instructions specify the number of
+  // load/store queue entries which they consume at dispatch stage (See
+  // PR39830).
+  SmallSet<unsigned, 16> LoadQueue;
+  SmallSet<unsigned, 16> StoreQueue;
+
+  void assignLQSlot(unsigned Index);
+  void assignSQSlot(unsigned Index);
+  bool isReadyNoAlias(unsigned Index) const;
+
+  // An instruction that both 'mayStore' and 'HasUnmodeledSideEffects' is
+  // conservatively treated as a store barrier. It forces older store to be
+  // executed before newer stores are issued.
+  SmallSet<unsigned, 8> StoreBarriers;
+
+  // An instruction that both 'MayLoad' and 'HasUnmodeledSideEffects' is
+  // conservatively treated as a load barrier. It forces older loads to execute
+  // before newer loads are issued.
+  SmallSet<unsigned, 8> LoadBarriers;
+
+  bool isSQEmpty() const { return StoreQueue.empty(); }
+  bool isLQEmpty() const { return LoadQueue.empty(); }
+  bool isSQFull() const { return SQ_Size != 0 && StoreQueue.size() == SQ_Size; }
+  bool isLQFull() const { return LQ_Size != 0 && LoadQueue.size() == LQ_Size; }
+
+public:
+  LSUnit(const MCSchedModel &SM, unsigned LQ = 0, unsigned SQ = 0,
+         bool AssumeNoAlias = false);
+
+#ifndef NDEBUG
+  void dump() const;
+#endif
+
+  enum Status { LSU_AVAILABLE = 0, LSU_LQUEUE_FULL, LSU_SQUEUE_FULL };
+
+  // Returns LSU_AVAILABLE if there are enough load/store queue entries to serve
+  // IR. It also returns LSU_AVAILABLE if IR is not a memory operation.
+  Status isAvailable(const InstRef &IR) const;
+
+  // Allocates load/store queue resources for IR.
+  //
+  // This method assumes that a previous call to `isAvailable(IR)` returned
+  // LSU_AVAILABLE, and that IR is a memory operation.
+  void dispatch(const InstRef &IR);
+
+  // By default, rules are:
+  // 1. A store may not pass a previous store.
+  // 2. A load may not pass a previous store unless flag 'NoAlias' is set.
+  // 3. A load may pass a previous load.
+  // 4. A store may not pass a previous load (regardless of flag 'NoAlias').
+  // 5. A load has to wait until an older load barrier is fully executed.
+  // 6. A store has to wait until an older store barrier is fully executed.
+  virtual bool isReady(const InstRef &IR) const;
+
+  // Load and store instructions are tracked by their corresponding queues from
+  // dispatch until the "instruction executed" event.
+  // Only when a load instruction reaches the 'Executed' stage, its value
+  // becomes available to the users. At that point, the load no longer needs to
+  // be tracked by the load queue.
+  // FIXME: For simplicity, we optimistically assume a similar behavior for
+  // store instructions. In practice, store operations don't tend to leave the
+  // store queue until they reach the 'Retired' stage (See PR39830).
+  void onInstructionExecuted(const InstRef &IR);
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_LSUNIT_H
diff --git a/include/llvm/MCA/HardwareUnits/RegisterFile.h b/include/llvm/MCA/HardwareUnits/RegisterFile.h
new file mode 100644
index 0000000..c23ab03
--- /dev/null
+++ b/include/llvm/MCA/HardwareUnits/RegisterFile.h
@@ -0,0 +1,239 @@
+//===--------------------- RegisterFile.h -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a register mapping file class.  This class is responsible
+/// for managing hardware register files and the tracking of data dependencies
+/// between registers.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_REGISTER_FILE_H
+#define LLVM_MCA_REGISTER_FILE_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace mca {
+
+class ReadState;
+class WriteState;
+class WriteRef;
+
+/// Manages hardware register files, and tracks register definitions for
+/// register renaming purposes.
+class RegisterFile : public HardwareUnit {
+  const MCRegisterInfo &MRI;
+
+  // class RegisterMappingTracker is a  physical register file (PRF) descriptor.
+  // There is one RegisterMappingTracker for every PRF definition in the
+  // scheduling model.
+  //
+  // An instance of RegisterMappingTracker tracks the number of physical
+  // registers available for renaming. It also tracks  the number of register
+  // moves eliminated per cycle.
+  struct RegisterMappingTracker {
+    // The total number of physical registers that are available in this
+    // register file for register renaming purpouses.  A value of zero for this
+    // field means: this register file has an unbounded number of physical
+    // registers.
+    const unsigned NumPhysRegs;
+    // Number of physical registers that are currently in use.
+    unsigned NumUsedPhysRegs;
+
+    // Maximum number of register moves that can be eliminated by this PRF every
+    // cycle. A value of zero means that there is no limit in the number of
+    // moves which can be eliminated every cycle.
+    const unsigned MaxMoveEliminatedPerCycle;
+
+    // Number of register moves eliminated during this cycle.
+    //
+    // This value is increased by one every time a register move is eliminated.
+    // Every new cycle, this value is reset to zero.
+    // A move can be eliminated only if MaxMoveEliminatedPerCycle is zero, or if
+    // NumMoveEliminated is less than MaxMoveEliminatedPerCycle.
+    unsigned NumMoveEliminated;
+
+    // If set, move elimination is restricted to zero-register moves only.
+    bool AllowZeroMoveEliminationOnly;
+
+    RegisterMappingTracker(unsigned NumPhysRegisters,
+                           unsigned MaxMoveEliminated = 0U,
+                           bool AllowZeroMoveElimOnly = false)
+        : NumPhysRegs(NumPhysRegisters), NumUsedPhysRegs(0),
+          MaxMoveEliminatedPerCycle(MaxMoveEliminated), NumMoveEliminated(0U),
+          AllowZeroMoveEliminationOnly(AllowZeroMoveElimOnly) {}
+  };
+
+  // A vector of register file descriptors.  This set always contains at least
+  // one entry. Entry at index #0 is reserved.  That entry describes a register
+  // file with an unbounded number of physical registers that "sees" all the
+  // hardware registers declared by the target (i.e. all the register
+  // definitions in the target specific `XYZRegisterInfo.td` - where `XYZ` is
+  // the target name).
+  //
+  // Users can limit the number of physical registers that are available in
+  // regsiter file #0 specifying command line flag `-register-file-size=<uint>`.
+  SmallVector<RegisterMappingTracker, 4> RegisterFiles;
+
+  // This type is used to propagate information about the owner of a register,
+  // and the cost of allocating it in the PRF. Register cost is defined as the
+  // number of physical registers consumed by the PRF to allocate a user
+  // register.
+  //
+  // For example: on X86 BtVer2, a YMM register consumes 2 128-bit physical
+  // registers. So, the cost of allocating a YMM register in BtVer2 is 2.
+  using IndexPlusCostPairTy = std::pair<unsigned, unsigned>;
+
+  // Struct RegisterRenamingInfo is used to map logical registers to register
+  // files.
+  //
+  // There is a RegisterRenamingInfo object for every logical register defined
+  // by the target. RegisteRenamingInfo objects are stored into vector
+  // `RegisterMappings`, and MCPhysReg IDs can be used to reference
+  // elements in that vector.
+  //
+  // Each RegisterRenamingInfo is owned by a PRF, and field `IndexPlusCost`
+  // specifies both the owning PRF, as well as the number of physical registers
+  // consumed at register renaming stage.
+  //
+  // Field `AllowMoveElimination` is set for registers that are used as
+  // destination by optimizable register moves.
+  //
+  // Field `AliasRegID` is set by writes from register moves that have been
+  // eliminated at register renaming stage. A move eliminated at register
+  // renaming stage is effectively bypassed, and its write aliases the source
+  // register definition.
+  struct RegisterRenamingInfo {
+    IndexPlusCostPairTy IndexPlusCost;
+    MCPhysReg RenameAs;
+    MCPhysReg AliasRegID;
+    bool AllowMoveElimination;
+    RegisterRenamingInfo()
+        : IndexPlusCost(std::make_pair(0U, 1U)), RenameAs(0U), AliasRegID(0U),
+          AllowMoveElimination(false) {}
+  };
+
+  // RegisterMapping objects are mainly used to track physical register
+  // definitions and resolve data dependencies.
+  //
+  // Every register declared by the Target is associated with an instance of
+  // RegisterMapping. RegisterMapping objects keep track of writes to a logical
+  // register.  That information is used by class RegisterFile to resolve data
+  // dependencies, and correctly set latencies for register uses.
+  //
+  // This implementation does not allow overlapping register files. The only
+  // register file that is allowed to overlap with other register files is
+  // register file #0. If we exclude register #0, every register is "owned" by
+  // at most one register file.
+  using RegisterMapping = std::pair<WriteRef, RegisterRenamingInfo>;
+
+  // There is one entry per each register defined by the target.
+  std::vector<RegisterMapping> RegisterMappings;
+
+  // Used to track zero registers. There is one bit for each register defined by
+  // the target. Bits are set for registers that are known to be zero.
+  APInt ZeroRegisters;
+
+  // This method creates a new register file descriptor.
+  // The new register file owns all of the registers declared by register
+  // classes in the 'RegisterClasses' set.
+  //
+  // Processor models allow the definition of RegisterFile(s) via tablegen. For
+  // example, this is a tablegen definition for a x86 register file for
+  // XMM[0-15] and YMM[0-15], that allows up to 60 renames (each rename costs 1
+  // physical register).
+  //
+  //    def FPRegisterFile : RegisterFile<60, [VR128RegClass, VR256RegClass]>
+  //
+  // Here FPRegisterFile contains all the registers defined by register class
+  // VR128RegClass and VR256RegClass. FPRegisterFile implements 60
+  // registers which can be used for register renaming purpose.
+  void addRegisterFile(const MCRegisterFileDesc &RF,
+                       ArrayRef<MCRegisterCostEntry> Entries);
+
+  // Consumes physical registers in each register file specified by the
+  // `IndexPlusCostPairTy`. This method is called from `addRegisterMapping()`.
+  void allocatePhysRegs(const RegisterRenamingInfo &Entry,
+                        MutableArrayRef<unsigned> UsedPhysRegs);
+
+  // Releases previously allocated physical registers from the register file(s).
+  // This method is called from `invalidateRegisterMapping()`.
+  void freePhysRegs(const RegisterRenamingInfo &Entry,
+                    MutableArrayRef<unsigned> FreedPhysRegs);
+
+  // Collects writes that are in a RAW dependency with RS.
+  // This method is called from `addRegisterRead()`.
+  void collectWrites(const ReadState &RS,
+                     SmallVectorImpl<WriteRef> &Writes) const;
+
+  // Create an instance of RegisterMappingTracker for every register file
+  // specified by the processor model.
+  // If no register file is specified, then this method creates a default
+  // register file with an unbounded number of physical registers.
+  void initialize(const MCSchedModel &SM, unsigned NumRegs);
+
+public:
+  RegisterFile(const MCSchedModel &SM, const MCRegisterInfo &mri,
+               unsigned NumRegs = 0);
+
+  // This method updates the register mappings inserting a new register
+  // definition. This method is also responsible for updating the number of
+  // allocated physical registers in each register file modified by the write.
+  // No physical regiser is allocated if this write is from a zero-idiom.
+  void addRegisterWrite(WriteRef Write, MutableArrayRef<unsigned> UsedPhysRegs);
+
+  // Collect writes that are in a data dependency with RS, and update RS
+  // internal state.
+  void addRegisterRead(ReadState &RS, SmallVectorImpl<WriteRef> &Writes) const;
+
+  // Removes write \param WS from the register mappings.
+  // Physical registers may be released to reflect this update.
+  // No registers are released if this write is from a zero-idiom.
+  void removeRegisterWrite(const WriteState &WS,
+                           MutableArrayRef<unsigned> FreedPhysRegs);
+
+  // Returns true if a move from RS to WS can be eliminated.
+  // On success, it updates WriteState by setting flag `WS.isEliminated`.
+  // If RS is a read from a zero register, and WS is eliminated, then
+  // `WS.WritesZero` is also set, so that method addRegisterWrite() would not
+  // reserve a physical register for it.
+  bool tryEliminateMove(WriteState &WS, ReadState &RS);
+
+  // Checks if there are enough physical registers in the register files.
+  // Returns a "response mask" where each bit represents the response from a
+  // different register file.  A mask of all zeroes means that all register
+  // files are available.  Otherwise, the mask can be used to identify which
+  // register file was busy.  This sematic allows us to classify dispatch
+  // stalls caused by the lack of register file resources.
+  //
+  // Current implementation can simulate up to 32 register files (including the
+  // special register file at index #0).
+  unsigned isAvailable(ArrayRef<unsigned> Regs) const;
+
+  // Returns the number of PRFs implemented by this processor.
+  unsigned getNumRegisterFiles() const { return RegisterFiles.size(); }
+
+  // Notify each PRF that a new cycle just started.
+  void cycleStart();
+
+#ifndef NDEBUG
+  void dump() const;
+#endif
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_REGISTER_FILE_H
diff --git a/include/llvm/MCA/HardwareUnits/ResourceManager.h b/include/llvm/MCA/HardwareUnits/ResourceManager.h
new file mode 100644
index 0000000..549a46c
--- /dev/null
+++ b/include/llvm/MCA/HardwareUnits/ResourceManager.h
@@ -0,0 +1,410 @@
+//===--------------------- ResourceManager.h --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// The classes here represent processor resource units and their management
+/// strategy.  These classes are managed by the Scheduler.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_RESOURCE_MANAGER_H
+#define LLVM_MCA_RESOURCE_MANAGER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MCA/Instruction.h"
+#include "llvm/MCA/Support.h"
+
+namespace llvm {
+namespace mca {
+
+/// Used to notify the internal state of a processor resource.
+///
+/// A processor resource is available if it is not reserved, and there are
+/// available slots in the buffer.  A processor resource is unavailable if it
+/// is either reserved, or the associated buffer is full. A processor resource
+/// with a buffer size of -1 is always available if it is not reserved.
+///
+/// Values of type ResourceStateEvent are returned by method
+/// ResourceState::isBufferAvailable(), which is used to query the internal
+/// state of a resource.
+///
+/// The naming convention for resource state events is:
+///  * Event names start with prefix RS_
+///  * Prefix RS_ is followed by a string describing the actual resource state.
+enum ResourceStateEvent {
+  RS_BUFFER_AVAILABLE,
+  RS_BUFFER_UNAVAILABLE,
+  RS_RESERVED
+};
+
+/// Resource allocation strategy used by hardware scheduler resources.
+class ResourceStrategy {
+  ResourceStrategy(const ResourceStrategy &) = delete;
+  ResourceStrategy &operator=(const ResourceStrategy &) = delete;
+
+public:
+  ResourceStrategy() {}
+  virtual ~ResourceStrategy();
+
+  /// Selects a processor resource unit from a ReadyMask.
+  virtual uint64_t select(uint64_t ReadyMask) = 0;
+
+  /// Called by the ResourceManager when a processor resource group, or a
+  /// processor resource with multiple units has become unavailable.
+  ///
+  /// The default strategy uses this information to bias its selection logic.
+  virtual void used(uint64_t ResourceMask) {}
+};
+
+/// Default resource allocation strategy used by processor resource groups and
+/// processor resources with multiple units.
+class DefaultResourceStrategy final : public ResourceStrategy {
+  /// A Mask of resource unit identifiers.
+  ///
+  /// There is one bit set for every available resource unit.
+  /// It defaults to the value of field ResourceSizeMask in ResourceState.
+  const uint64_t ResourceUnitMask;
+
+  /// A simple round-robin selector for processor resource units.
+  /// Each bit of this mask identifies a sub resource within a group.
+  ///
+  /// As an example, lets assume that this is a default policy for a
+  /// processor resource group composed by the following three units:
+  ///   ResourceA -- 0b001
+  ///   ResourceB -- 0b010
+  ///   ResourceC -- 0b100
+  ///
+  /// Field NextInSequenceMask is used to select the next unit from the set of
+  /// resource units. It defaults to the value of field `ResourceUnitMasks` (in
+  /// this example, it defaults to mask '0b111').
+  ///
+  /// The round-robin selector would firstly select 'ResourceC', then
+  /// 'ResourceB', and eventually 'ResourceA'.  When a resource R is used, the
+  /// corresponding bit in NextInSequenceMask is cleared.  For example, if
+  /// 'ResourceC' is selected, then the new value of NextInSequenceMask becomes
+  /// 0xb011.
+  ///
+  /// When NextInSequenceMask becomes zero, it is automatically reset to the
+  /// default value (i.e. ResourceUnitMask).
+  uint64_t NextInSequenceMask;
+
+  /// This field is used to track resource units that are used (i.e. selected)
+  /// by other groups other than the one associated with this strategy object.
+  ///
+  /// In LLVM processor resource groups are allowed to partially (or fully)
+  /// overlap. That means, a same unit may be visible to multiple groups.
+  /// This field keeps track of uses that have originated from outside of
+  /// this group. The idea is to bias the selection strategy, so that resources
+  /// that haven't been used by other groups get prioritized.
+  ///
+  /// The end goal is to (try to) keep the resource distribution as much uniform
+  /// as possible. By construction, this mask only tracks one-level of resource
+  /// usage. Therefore, this strategy is expected to be less accurate when same
+  /// units are used multiple times by other groups within a single round of
+  /// select.
+  ///
+  /// Note: an LRU selector would have a better accuracy at the cost of being
+  /// slightly more expensive (mostly in terms of runtime cost). Methods
+  /// 'select' and 'used', are always in the hot execution path of llvm-mca.
+  /// Therefore, a slow implementation of 'select' would have a negative impact
+  /// on the overall performance of the tool.
+  uint64_t RemovedFromNextInSequence;
+
+public:
+  DefaultResourceStrategy(uint64_t UnitMask)
+      : ResourceStrategy(), ResourceUnitMask(UnitMask),
+        NextInSequenceMask(UnitMask), RemovedFromNextInSequence(0) {}
+  virtual ~DefaultResourceStrategy() = default;
+
+  uint64_t select(uint64_t ReadyMask) override;
+  void used(uint64_t Mask) override;
+};
+
+/// A processor resource descriptor.
+///
+/// There is an instance of this class for every processor resource defined by
+/// the machine scheduling model.
+/// Objects of class ResourceState dynamically track the usage of processor
+/// resource units.
+class ResourceState {
+  /// An index to the MCProcResourceDesc entry in the processor model.
+  const unsigned ProcResourceDescIndex;
+  /// A resource mask. This is generated by the tool with the help of
+  /// function `mca::computeProcResourceMasks' (see Support.h).
+  ///
+  /// Field ResourceMask only has one bit set if this resource state describes a
+  /// processor resource unit (i.e. this is not a group). That means, we can
+  /// quickly check if a resource is a group by simply counting the number of
+  /// bits that are set in the mask.
+  ///
+  /// The most significant bit of a mask (MSB) uniquely identifies a resource.
+  /// Remaining bits are used to describe the composition of a group (Group).
+  ///
+  /// Example (little endian):
+  ///            Resource |  Mask      |  MSB       |  Group
+  ///            ---------+------------+------------+------------
+  ///            A        |  0b000001  |  0b000001  |  0b000000
+  ///                     |            |            |
+  ///            B        |  0b000010  |  0b000010  |  0b000000
+  ///                     |            |            |
+  ///            C        |  0b010000  |  0b010000  |  0b000000
+  ///                     |            |            |
+  ///            D        |  0b110010  |  0b100000  |  0b010010
+  ///
+  /// In this example, resources A, B and C are processor resource units.
+  /// Only resource D is a group resource, and it contains resources B and C.
+  /// That is because MSB(B) and MSB(C) are both contained within Group(D).
+  const uint64_t ResourceMask;
+
+  /// A ProcResource can have multiple units.
+  ///
+  /// For processor resource groups this field is a mask of contained resource
+  /// units. It is obtained from ResourceMask by clearing the highest set bit.
+  /// The number of resource units in a group can be simply computed as the
+  /// population count of this field.
+  ///
+  /// For normal (i.e. non-group) resources, the number of bits set in this mask
+  /// is equivalent to the number of units declared by the processor model (see
+  /// field 'NumUnits' in 'ProcResourceUnits').
+  uint64_t ResourceSizeMask;
+
+  /// A mask of ready units.
+  uint64_t ReadyMask;
+
+  /// Buffered resources will have this field set to a positive number different
+  /// than zero. A buffered resource behaves like a reservation station
+  /// implementing its own buffer for out-of-order execution.
+  ///
+  /// A BufferSize of 1 is used by scheduler resources that force in-order
+  /// execution.
+  ///
+  /// A BufferSize of 0 is used to model in-order issue/dispatch resources.
+  /// Since in-order issue/dispatch resources don't implement buffers, dispatch
+  /// events coincide with issue events.
+  /// Also, no other instruction ca be dispatched/issue while this resource is
+  /// in use. Only when all the "resource cycles" are consumed (after the issue
+  /// event), a new instruction ca be dispatched.
+  const int BufferSize;
+
+  /// Available slots in the buffer (zero, if this is not a buffered resource).
+  unsigned AvailableSlots;
+
+  /// This field is set if this resource is currently reserved.
+  ///
+  /// Resources can be reserved for a number of cycles.
+  /// Instructions can still be dispatched to reserved resources. However,
+  /// istructions dispatched to a reserved resource cannot be issued to the
+  /// underlying units (i.e. pipelines) until the resource is released.
+  bool Unavailable;
+
+  const bool IsAGroup;
+
+  /// Checks for the availability of unit 'SubResMask' in the group.
+  bool isSubResourceReady(uint64_t SubResMask) const {
+    return ReadyMask & SubResMask;
+  }
+
+public:
+  ResourceState(const MCProcResourceDesc &Desc, unsigned Index, uint64_t Mask);
+
+  unsigned getProcResourceID() const { return ProcResourceDescIndex; }
+  uint64_t getResourceMask() const { return ResourceMask; }
+  uint64_t getReadyMask() const { return ReadyMask; }
+  int getBufferSize() const { return BufferSize; }
+
+  bool isBuffered() const { return BufferSize > 0; }
+  bool isInOrder() const { return BufferSize == 1; }
+
+  /// Returns true if this is an in-order dispatch/issue resource.
+  bool isADispatchHazard() const { return BufferSize == 0; }
+  bool isReserved() const { return Unavailable; }
+
+  void setReserved() { Unavailable = true; }
+  void clearReserved() { Unavailable = false; }
+
+  /// Returs true if this resource is not reserved, and if there are at least
+  /// `NumUnits` available units.
+  bool isReady(unsigned NumUnits = 1) const;
+
+  bool isAResourceGroup() const { return IsAGroup; }
+
+  bool containsResource(uint64_t ID) const { return ResourceMask & ID; }
+
+  void markSubResourceAsUsed(uint64_t ID) {
+    assert(isSubResourceReady(ID));
+    ReadyMask ^= ID;
+  }
+
+  void releaseSubResource(uint64_t ID) {
+    assert(!isSubResourceReady(ID));
+    ReadyMask ^= ID;
+  }
+
+  unsigned getNumUnits() const {
+    return isAResourceGroup() ? 1U : countPopulation(ResourceSizeMask);
+  }
+
+  /// Checks if there is an available slot in the resource buffer.
+  ///
+  /// Returns RS_BUFFER_AVAILABLE if this is not a buffered resource, or if
+  /// there is a slot available.
+  ///
+  /// Returns RS_RESERVED if this buffered resource is a dispatch hazard, and it
+  /// is reserved.
+  ///
+  /// Returns RS_BUFFER_UNAVAILABLE if there are no available slots.
+  ResourceStateEvent isBufferAvailable() const;
+
+  /// Reserve a slot in the buffer.
+  void reserveBuffer() {
+    if (AvailableSlots)
+      AvailableSlots--;
+  }
+
+  /// Release a slot in the buffer.
+  void releaseBuffer() {
+    if (BufferSize > 0)
+      AvailableSlots++;
+    assert(AvailableSlots <= static_cast<unsigned>(BufferSize));
+  }
+
+#ifndef NDEBUG
+  void dump() const;
+#endif
+};
+
+/// A resource unit identifier.
+///
+/// This is used to identify a specific processor resource unit using a pair
+/// of indices where the 'first' index is a processor resource mask, and the
+/// 'second' index is an index for a "sub-resource" (i.e. unit).
+typedef std::pair<uint64_t, uint64_t> ResourceRef;
+
+// First: a MCProcResourceDesc index identifying a buffered resource.
+// Second: max number of buffer entries used in this resource.
+typedef std::pair<unsigned, unsigned> BufferUsageEntry;
+
+/// A resource manager for processor resource units and groups.
+///
+/// This class owns all the ResourceState objects, and it is responsible for
+/// acting on requests from a Scheduler by updating the internal state of
+/// ResourceState objects.
+/// This class doesn't know about instruction itineraries and functional units.
+/// In future, it can be extended to support itineraries too through the same
+/// public interface.
+class ResourceManager {
+  // Set of resources available on the subtarget.
+  //
+  // There is an instance of ResourceState for every resource declared by the
+  // target scheduling model.
+  //
+  // Elements of this vector are ordered by resource kind. In particular,
+  // resource units take precedence over resource groups.
+  //
+  // The index of a processor resource in this vector depends on the value of
+  // its mask (see the description of field ResourceState::ResourceMask).  In
+  // particular, it is computed as the position of the most significant bit set
+  // (MSB) in the mask plus one (since we want to ignore the invalid resource
+  // descriptor at index zero).
+  //
+  // Example (little endian):
+  //
+  //             Resource | Mask    |  MSB    | Index
+  //             ---------+---------+---------+-------
+  //                 A    | 0b00001 | 0b00001 |   1
+  //                      |         |         |
+  //                 B    | 0b00100 | 0b00100 |   3
+  //                      |         |         |
+  //                 C    | 0b10010 | 0b10000 |   5
+  //
+  //
+  // The same index is also used to address elements within vector `Strategies`
+  // and vector `Resource2Groups`.
+  std::vector<std::unique_ptr<ResourceState>> Resources;
+  std::vector<std::unique_ptr<ResourceStrategy>> Strategies;
+
+  // Used to quickly identify groups that own a particular resource unit.
+  std::vector<uint64_t> Resource2Groups;
+
+  // A table to map processor resource IDs to processor resource masks.
+  SmallVector<uint64_t, 8> ProcResID2Mask;
+
+  // Keeps track of which resources are busy, and how many cycles are left
+  // before those become usable again.
+  SmallDenseMap<ResourceRef, unsigned> BusyResources;
+
+  // Returns the actual resource unit that will be used.
+  ResourceRef selectPipe(uint64_t ResourceID);
+
+  void use(const ResourceRef &RR);
+  void release(const ResourceRef &RR);
+
+  unsigned getNumUnits(uint64_t ResourceID) const;
+
+  // Overrides the selection strategy for the processor resource with the given
+  // mask.
+  void setCustomStrategyImpl(std::unique_ptr<ResourceStrategy> S,
+                             uint64_t ResourceMask);
+
+public:
+  ResourceManager(const MCSchedModel &SM);
+  virtual ~ResourceManager() = default;
+
+  // Overrides the selection strategy for the resource at index ResourceID in
+  // the MCProcResourceDesc table.
+  void setCustomStrategy(std::unique_ptr<ResourceStrategy> S,
+                         unsigned ResourceID) {
+    assert(ResourceID < ProcResID2Mask.size() &&
+           "Invalid resource index in input!");
+    return setCustomStrategyImpl(std::move(S), ProcResID2Mask[ResourceID]);
+  }
+
+  // Returns RS_BUFFER_AVAILABLE if buffered resources are not reserved, and if
+  // there are enough available slots in the buffers.
+  ResourceStateEvent canBeDispatched(ArrayRef<uint64_t> Buffers) const;
+
+  // Return the processor resource identifier associated to this Mask.
+  unsigned resolveResourceMask(uint64_t Mask) const;
+
+  // Consume a slot in every buffered resource from array 'Buffers'. Resource
+  // units that are dispatch hazards (i.e. BufferSize=0) are marked as reserved.
+  void reserveBuffers(ArrayRef<uint64_t> Buffers);
+
+  // Release buffer entries previously allocated by method reserveBuffers.
+  void releaseBuffers(ArrayRef<uint64_t> Buffers);
+
+  // Reserve a processor resource. A reserved resource is not available for
+  // instruction issue until it is released.
+  void reserveResource(uint64_t ResourceID);
+
+  // Release a previously reserved processor resource.
+  void releaseResource(uint64_t ResourceID);
+
+  bool canBeIssued(const InstrDesc &Desc) const;
+
+  void issueInstruction(
+      const InstrDesc &Desc,
+      SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Pipes);
+
+  void cycleEvent(SmallVectorImpl<ResourceRef> &ResourcesFreed);
+
+#ifndef NDEBUG
+  void dump() const {
+    for (const std::unique_ptr<ResourceState> &Resource : Resources)
+      Resource->dump();
+  }
+#endif
+};
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_RESOURCE_MANAGER_H
diff --git a/include/llvm/MCA/HardwareUnits/RetireControlUnit.h b/include/llvm/MCA/HardwareUnits/RetireControlUnit.h
new file mode 100644
index 0000000..71360e9
--- /dev/null
+++ b/include/llvm/MCA/HardwareUnits/RetireControlUnit.h
@@ -0,0 +1,104 @@
+//===---------------------- RetireControlUnit.h -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file simulates the hardware responsible for retiring instructions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_RETIRE_CONTROL_UNIT_H
+#define LLVM_MCA_RETIRE_CONTROL_UNIT_H
+
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
+#include "llvm/MCA/Instruction.h"
+#include <vector>
+
+namespace llvm {
+namespace mca {
+
+/// This class tracks which instructions are in-flight (i.e., dispatched but not
+/// retired) in the OoO backend.
+//
+/// This class checks on every cycle if/which instructions can be retired.
+/// Instructions are retired in program order.
+/// In the event of an instruction being retired, the pipeline that owns
+/// this RetireControlUnit (RCU) gets notified.
+///
+/// On instruction retired, register updates are all architecturally
+/// committed, and any physicall registers previously allocated for the
+/// retired instruction are freed.
+struct RetireControlUnit : public HardwareUnit {
+  // A RUToken is created by the RCU for every instruction dispatched to the
+  // schedulers.  These "tokens" are managed by the RCU in its token Queue.
+  //
+  // On every cycle ('cycleEvent'), the RCU iterates through the token queue
+  // looking for any token with its 'Executed' flag set.  If a token has that
+  // flag set, then the instruction has reached the write-back stage and will
+  // be retired by the RCU.
+  //
+  // 'NumSlots' represents the number of entries consumed by the instruction in
+  // the reorder buffer. Those entries will become available again once the
+  // instruction is retired.
+  //
+  // Note that the size of the reorder buffer is defined by the scheduling
+  // model via field 'NumMicroOpBufferSize'.
+  struct RUToken {
+    InstRef IR;
+    unsigned NumSlots; // Slots reserved to this instruction.
+    bool Executed;     // True if the instruction is past the WB stage.
+  };
+
+private:
+  unsigned NextAvailableSlotIdx;
+  unsigned CurrentInstructionSlotIdx;
+  unsigned AvailableSlots;
+  unsigned MaxRetirePerCycle; // 0 means no limit.
+  std::vector<RUToken> Queue;
+
+public:
+  RetireControlUnit(const MCSchedModel &SM);
+
+  bool isEmpty() const { return AvailableSlots == Queue.size(); }
+  bool isAvailable(unsigned Quantity = 1) const {
+    // Some instructions may declare a number of uOps which exceeds the size
+    // of the reorder buffer. To avoid problems, cap the amount of slots to
+    // the size of the reorder buffer.
+    Quantity = std::min(Quantity, static_cast<unsigned>(Queue.size()));
+
+    // Further normalize the number of micro opcodes for instructions that
+    // declare zero opcodes. This should match the behavior of method
+    // reserveSlot().
+    Quantity = std::max(Quantity, 1U);
+    return AvailableSlots >= Quantity;
+  }
+
+  unsigned getMaxRetirePerCycle() const { return MaxRetirePerCycle; }
+
+  // Reserves a number of slots, and returns a new token.
+  unsigned reserveSlot(const InstRef &IS, unsigned NumMicroOps);
+
+  // Return the current token from the RCU's circular token queue.
+  const RUToken &peekCurrentToken() const;
+
+  // Advance the pointer to the next token in the circular token queue.
+  void consumeCurrentToken();
+
+  // Update the RCU token to represent the executed state.
+  void onInstructionExecuted(unsigned TokenID);
+
+#ifndef NDEBUG
+  void dump() const;
+#endif
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_RETIRE_CONTROL_UNIT_H
diff --git a/include/llvm/MCA/HardwareUnits/Scheduler.h b/include/llvm/MCA/HardwareUnits/Scheduler.h
new file mode 100644
index 0000000..351ea48
--- /dev/null
+++ b/include/llvm/MCA/HardwareUnits/Scheduler.h
@@ -0,0 +1,214 @@
+//===--------------------- Scheduler.h ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// A scheduler for Processor Resource Units and Processor Resource Groups.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_SCHEDULER_H
+#define LLVM_MCA_SCHEDULER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
+#include "llvm/MCA/HardwareUnits/LSUnit.h"
+#include "llvm/MCA/HardwareUnits/ResourceManager.h"
+#include "llvm/MCA/Support.h"
+
+namespace llvm {
+namespace mca {
+
+class SchedulerStrategy {
+public:
+  SchedulerStrategy() = default;
+  virtual ~SchedulerStrategy();
+
+  /// Returns true if Lhs should take priority over Rhs.
+  ///
+  /// This method is used by class Scheduler to select the "best" ready
+  /// instruction to issue to the underlying pipelines.
+  virtual bool compare(const InstRef &Lhs, const InstRef &Rhs) const = 0;
+};
+
+/// Default instruction selection strategy used by class Scheduler.
+class DefaultSchedulerStrategy : public SchedulerStrategy {
+  /// This method ranks instructions based on their age, and the number of known
+  /// users. The lower the rank value, the better.
+  int computeRank(const InstRef &Lhs) const {
+    return Lhs.getSourceIndex() - Lhs.getInstruction()->getNumUsers();
+  }
+
+public:
+  DefaultSchedulerStrategy() = default;
+  virtual ~DefaultSchedulerStrategy();
+
+  bool compare(const InstRef &Lhs, const InstRef &Rhs) const override {
+    int LhsRank = computeRank(Lhs);
+    int RhsRank = computeRank(Rhs);
+
+    /// Prioritize older instructions over younger instructions to minimize the
+    /// pressure on the reorder buffer.
+    if (LhsRank == RhsRank)
+      return Lhs.getSourceIndex() < Rhs.getSourceIndex();
+    return LhsRank < RhsRank;
+  }
+};
+
+/// Class Scheduler is responsible for issuing instructions to pipeline
+/// resources.
+///
+/// Internally, it delegates to a ResourceManager the management of processor
+/// resources. This class is also responsible for tracking the progress of
+/// instructions from the dispatch stage, until the write-back stage.
+///
+/// An instruction dispatched to the Scheduler is initially placed into either
+/// the 'WaitSet' or the 'ReadySet' depending on the availability of the input
+/// operands.
+///
+/// An instruction is moved from the WaitSet to the ReadySet when register
+/// operands become available, and all memory dependencies are met.
+/// Instructions that are moved from the WaitSet to the ReadySet transition
+/// in state from 'IS_AVAILABLE' to 'IS_READY'.
+///
+/// On every cycle, the Scheduler checks if it can promote instructions from the
+/// WaitSet to the ReadySet.
+///
+/// An Instruction is moved from the ReadySet the `IssuedSet` when it is issued
+/// to a (one or more) pipeline(s). This event also causes an instruction state
+/// transition (i.e. from state IS_READY, to state IS_EXECUTING). An Instruction
+/// leaves the IssuedSet when it reaches the write-back stage.
+class Scheduler : public HardwareUnit {
+  LSUnit &LSU;
+
+  // Instruction selection strategy for this Scheduler.
+  std::unique_ptr<SchedulerStrategy> Strategy;
+
+  // Hardware resources that are managed by this scheduler.
+  std::unique_ptr<ResourceManager> Resources;
+
+  std::vector<InstRef> WaitSet;
+  std::vector<InstRef> ReadySet;
+  std::vector<InstRef> IssuedSet;
+
+  /// Verify the given selection strategy and set the Strategy member
+  /// accordingly.  If no strategy is provided, the DefaultSchedulerStrategy is
+  /// used.
+  void initializeStrategy(std::unique_ptr<SchedulerStrategy> S);
+
+  /// Issue an instruction without updating the ready queue.
+  void issueInstructionImpl(
+      InstRef &IR,
+      SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Pipes);
+
+  // Identify instructions that have finished executing, and remove them from
+  // the IssuedSet. References to executed instructions are added to input
+  // vector 'Executed'.
+  void updateIssuedSet(SmallVectorImpl<InstRef> &Executed);
+
+  // Try to promote instructions from WaitSet to ReadySet.
+  // Add promoted instructions to the 'Ready' vector in input.
+  void promoteToReadySet(SmallVectorImpl<InstRef> &Ready);
+
+public:
+  Scheduler(const MCSchedModel &Model, LSUnit &Lsu)
+      : Scheduler(Model, Lsu, nullptr) {}
+
+  Scheduler(const MCSchedModel &Model, LSUnit &Lsu,
+            std::unique_ptr<SchedulerStrategy> SelectStrategy)
+      : Scheduler(make_unique<ResourceManager>(Model), Lsu,
+                  std::move(SelectStrategy)) {}
+
+  Scheduler(std::unique_ptr<ResourceManager> RM, LSUnit &Lsu,
+            std::unique_ptr<SchedulerStrategy> SelectStrategy)
+      : LSU(Lsu), Resources(std::move(RM)) {
+    initializeStrategy(std::move(SelectStrategy));
+  }
+
+  // Stalls generated by the scheduler.
+  enum Status {
+    SC_AVAILABLE,
+    SC_LOAD_QUEUE_FULL,
+    SC_STORE_QUEUE_FULL,
+    SC_BUFFERS_FULL,
+    SC_DISPATCH_GROUP_STALL,
+  };
+
+  /// Check if the instruction in 'IR' can be dispatched and returns an answer
+  /// in the form of a Status value.
+  ///
+  /// The DispatchStage is responsible for querying the Scheduler before
+  /// dispatching new instructions. This routine is used for performing such
+  /// a query.  If the instruction 'IR' can be dispatched, then true is
+  /// returned, otherwise false is returned with Event set to the stall type.
+  /// Internally, it also checks if the load/store unit is available.
+  Status isAvailable(const InstRef &IR) const;
+
+  /// Reserves buffer and LSUnit queue resources that are necessary to issue
+  /// this instruction.
+  ///
+  /// Returns true if instruction IR is ready to be issued to the underlying
+  /// pipelines. Note that this operation cannot fail; it assumes that a
+  /// previous call to method `isAvailable(IR)` returned `SC_AVAILABLE`.
+  void dispatch(const InstRef &IR);
+
+  /// Returns true if IR is ready to be executed by the underlying pipelines.
+  /// This method assumes that IR has been previously dispatched.
+  bool isReady(const InstRef &IR) const;
+
+  /// Issue an instruction and populates a vector of used pipeline resources,
+  /// and a vector of instructions that transitioned to the ready state as a
+  /// result of this event.
+  void issueInstruction(
+      InstRef &IR,
+      SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Used,
+      SmallVectorImpl<InstRef> &Ready);
+
+  /// Returns true if IR has to be issued immediately, or if IR is a zero
+  /// latency instruction.
+  bool mustIssueImmediately(const InstRef &IR) const;
+
+  /// This routine notifies the Scheduler that a new cycle just started.
+  ///
+  /// It notifies the underlying ResourceManager that a new cycle just started.
+  /// Vector `Freed` is populated with resourceRef related to resources that
+  /// have changed in state, and that are now available to new instructions.
+  /// Instructions executed are added to vector Executed, while vector Ready is
+  /// populated with instructions that have become ready in this new cycle.
+  void cycleEvent(SmallVectorImpl<ResourceRef> &Freed,
+                  SmallVectorImpl<InstRef> &Ready,
+                  SmallVectorImpl<InstRef> &Executed);
+
+  /// Convert a resource mask into a valid llvm processor resource identifier.
+  unsigned getResourceID(uint64_t Mask) const {
+    return Resources->resolveResourceMask(Mask);
+  }
+
+  /// Select the next instruction to issue from the ReadySet. Returns an invalid
+  /// instruction reference if there are no ready instructions, or if processor
+  /// resources are not available.
+  InstRef select();
+
+#ifndef NDEBUG
+  // Update the ready queues.
+  void dump() const;
+
+  // This routine performs a sanity check.  This routine should only be called
+  // when we know that 'IR' is not in the scheduler's instruction queues.
+  void sanityCheck(const InstRef &IR) const {
+    assert(find(WaitSet, IR) == WaitSet.end() && "Already in the wait set!");
+    assert(find(ReadySet, IR) == ReadySet.end() && "Already in the ready set!");
+    assert(find(IssuedSet, IR) == IssuedSet.end() && "Already executing!");
+  }
+#endif // !NDEBUG
+};
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_SCHEDULER_H
diff --git a/include/llvm/MCA/InstrBuilder.h b/include/llvm/MCA/InstrBuilder.h
new file mode 100644
index 0000000..5f998db
--- /dev/null
+++ b/include/llvm/MCA/InstrBuilder.h
@@ -0,0 +1,77 @@
+//===--------------------- InstrBuilder.h -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// A builder class for instructions that are statically analyzed by llvm-mca.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_INSTRBUILDER_H
+#define LLVM_MCA_INSTRBUILDER_H
+
+#include "llvm/MC/MCInstrAnalysis.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MCA/Instruction.h"
+#include "llvm/MCA/Support.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace mca {
+
+/// A builder class that knows how to construct Instruction objects.
+///
+/// Every llvm-mca Instruction is described by an object of class InstrDesc.
+/// An InstrDesc describes which registers are read/written by the instruction,
+/// as well as the instruction latency and hardware resources consumed.
+///
+/// This class is used by the tool to construct Instructions and instruction
+/// descriptors (i.e. InstrDesc objects).
+/// Information from the machine scheduling model is used to identify processor
+/// resources that are consumed by an instruction.
+class InstrBuilder {
+  const MCSubtargetInfo &STI;
+  const MCInstrInfo &MCII;
+  const MCRegisterInfo &MRI;
+  const MCInstrAnalysis *MCIA;
+  SmallVector<uint64_t, 8> ProcResourceMasks;
+
+  DenseMap<unsigned short, std::unique_ptr<const InstrDesc>> Descriptors;
+  DenseMap<const MCInst *, std::unique_ptr<const InstrDesc>> VariantDescriptors;
+
+  bool FirstCallInst;
+  bool FirstReturnInst;
+
+  Expected<const InstrDesc &> createInstrDescImpl(const MCInst &MCI);
+  Expected<const InstrDesc &> getOrCreateInstrDesc(const MCInst &MCI);
+
+  InstrBuilder(const InstrBuilder &) = delete;
+  InstrBuilder &operator=(const InstrBuilder &) = delete;
+
+  void populateWrites(InstrDesc &ID, const MCInst &MCI, unsigned SchedClassID);
+  void populateReads(InstrDesc &ID, const MCInst &MCI, unsigned SchedClassID);
+  Error verifyInstrDesc(const InstrDesc &ID, const MCInst &MCI) const;
+
+public:
+  InstrBuilder(const MCSubtargetInfo &STI, const MCInstrInfo &MCII,
+               const MCRegisterInfo &RI, const MCInstrAnalysis *IA);
+
+  void clear() {
+    VariantDescriptors.shrink_and_clear();
+    FirstCallInst = true;
+    FirstReturnInst = true;
+  }
+
+  Expected<std::unique_ptr<Instruction>> createInstruction(const MCInst &MCI);
+};
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_INSTRBUILDER_H
diff --git a/include/llvm/MCA/Instruction.h b/include/llvm/MCA/Instruction.h
new file mode 100644
index 0000000..b91610c
--- /dev/null
+++ b/include/llvm/MCA/Instruction.h
@@ -0,0 +1,551 @@
+//===--------------------- Instruction.h ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines abstractions used by the Pipeline to model register reads,
+/// register writes and instructions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_INSTRUCTION_H
+#define LLVM_MCA_INSTRUCTION_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/MathExtras.h"
+
+#ifndef NDEBUG
+#include "llvm/Support/raw_ostream.h"
+#endif
+
+#include <memory>
+
+namespace llvm {
+
+namespace mca {
+
+constexpr int UNKNOWN_CYCLES = -512;
+
+/// A register write descriptor.
+struct WriteDescriptor {
+  // Operand index. The index is negative for implicit writes only.
+  // For implicit writes, the actual operand index is computed performing
+  // a bitwise not of the OpIndex.
+  int OpIndex;
+  // Write latency. Number of cycles before write-back stage.
+  unsigned Latency;
+  // This field is set to a value different than zero only if this
+  // is an implicit definition.
+  unsigned RegisterID;
+  // Instruction itineraries would set this field to the SchedClass ID.
+  // Otherwise, it defaults to the WriteResourceID from the MCWriteLatencyEntry
+  // element associated to this write.
+  // When computing read latencies, this value is matched against the
+  // "ReadAdvance" information. The hardware backend may implement
+  // dedicated forwarding paths to quickly propagate write results to dependent
+  // instructions waiting in the reservation station (effectively bypassing the
+  // write-back stage).
+  unsigned SClassOrWriteResourceID;
+  // True only if this is a write obtained from an optional definition.
+  // Optional definitions are allowed to reference regID zero (i.e. "no
+  // register").
+  bool IsOptionalDef;
+
+  bool isImplicitWrite() const { return OpIndex < 0; };
+};
+
+/// A register read descriptor.
+struct ReadDescriptor {
+  // A MCOperand index. This is used by the Dispatch logic to identify register
+  // reads. Implicit reads have negative indices. The actual operand index of an
+  // implicit read is the bitwise not of field OpIndex.
+  int OpIndex;
+  // The actual "UseIdx". This is used to query the ReadAdvance table. Explicit
+  // uses always come first in the sequence of uses.
+  unsigned UseIndex;
+  // This field is only set if this is an implicit read.
+  unsigned RegisterID;
+  // Scheduling Class Index. It is used to query the scheduling model for the
+  // MCSchedClassDesc object.
+  unsigned SchedClassID;
+
+  bool isImplicitRead() const { return OpIndex < 0; };
+};
+
+class ReadState;
+
+/// Tracks uses of a register definition (e.g. register write).
+///
+/// Each implicit/explicit register write is associated with an instance of
+/// this class. A WriteState object tracks the dependent users of a
+/// register write. It also tracks how many cycles are left before the write
+/// back stage.
+class WriteState {
+  const WriteDescriptor *WD;
+  // On instruction issue, this field is set equal to the write latency.
+  // Before instruction issue, this field defaults to -512, a special
+  // value that represents an "unknown" number of cycles.
+  int CyclesLeft;
+
+  // Actual register defined by this write. This field is only used
+  // to speedup queries on the register file.
+  // For implicit writes, this field always matches the value of
+  // field RegisterID from WD.
+  unsigned RegisterID;
+
+  // Physical register file that serves register RegisterID.
+  unsigned PRFID;
+
+  // True if this write implicitly clears the upper portion of RegisterID's
+  // super-registers.
+  bool ClearsSuperRegs;
+
+  // True if this write is from a dependency breaking zero-idiom instruction.
+  bool WritesZero;
+
+  // True if this write has been eliminated at register renaming stage.
+  // Example: a register move doesn't consume scheduler/pipleline resources if
+  // it is eliminated at register renaming stage. It still consumes
+  // decode bandwidth, and ROB entries.
+  bool IsEliminated;
+
+  // This field is set if this is a partial register write, and it has a false
+  // dependency on any previous write of the same register (or a portion of it).
+  // DependentWrite must be able to complete before this write completes, so
+  // that we don't break the WAW, and the two writes can be merged together.
+  const WriteState *DependentWrite;
+
+  // A partial write that is in a false dependency with this write.
+  WriteState *PartialWrite;
+
+  unsigned DependentWriteCyclesLeft;
+
+  // A list of dependent reads. Users is a set of dependent
+  // reads. A dependent read is added to the set only if CyclesLeft
+  // is "unknown". As soon as CyclesLeft is 'known', each user in the set
+  // gets notified with the actual CyclesLeft.
+
+  // The 'second' element of a pair is a "ReadAdvance" number of cycles.
+  SmallVector<std::pair<ReadState *, int>, 4> Users;
+
+public:
+  WriteState(const WriteDescriptor &Desc, unsigned RegID,
+             bool clearsSuperRegs = false, bool writesZero = false)
+      : WD(&Desc), CyclesLeft(UNKNOWN_CYCLES), RegisterID(RegID), PRFID(0),
+        ClearsSuperRegs(clearsSuperRegs), WritesZero(writesZero),
+        IsEliminated(false), DependentWrite(nullptr), PartialWrite(nullptr),
+        DependentWriteCyclesLeft(0) {}
+
+  WriteState(const WriteState &Other) = default;
+  WriteState &operator=(const WriteState &Other) = default;
+
+  int getCyclesLeft() const { return CyclesLeft; }
+  unsigned getWriteResourceID() const { return WD->SClassOrWriteResourceID; }
+  unsigned getRegisterID() const { return RegisterID; }
+  unsigned getRegisterFileID() const { return PRFID; }
+  unsigned getLatency() const { return WD->Latency; }
+
+  void addUser(ReadState *Use, int ReadAdvance);
+  void addUser(WriteState *Use);
+
+  unsigned getDependentWriteCyclesLeft() const {
+    return DependentWriteCyclesLeft;
+  }
+
+  unsigned getNumUsers() const {
+    unsigned NumUsers = Users.size();
+    if (PartialWrite)
+      ++NumUsers;
+    return NumUsers;
+  }
+
+  bool clearsSuperRegisters() const { return ClearsSuperRegs; }
+  bool isWriteZero() const { return WritesZero; }
+  bool isEliminated() const { return IsEliminated; }
+  bool isExecuted() const {
+    return CyclesLeft != UNKNOWN_CYCLES && CyclesLeft <= 0;
+  }
+
+  const WriteState *getDependentWrite() const { return DependentWrite; }
+  void setDependentWrite(WriteState *Other) { DependentWrite = Other; }
+  void writeStartEvent(unsigned Cycles) {
+    DependentWriteCyclesLeft = Cycles;
+    DependentWrite = nullptr;
+  }
+
+  void setWriteZero() { WritesZero = true; }
+  void setEliminated() {
+    assert(Users.empty() && "Write is in an inconsistent state.");
+    CyclesLeft = 0;
+    IsEliminated = true;
+  }
+
+  void setPRF(unsigned PRF) { PRFID = PRF; }
+
+  // On every cycle, update CyclesLeft and notify dependent users.
+  void cycleEvent();
+  void onInstructionIssued();
+
+#ifndef NDEBUG
+  void dump() const;
+#endif
+};
+
+/// Tracks register operand latency in cycles.
+///
+/// A read may be dependent on more than one write. This occurs when some
+/// writes only partially update the register associated to this read.
+class ReadState {
+  const ReadDescriptor *RD;
+  // Physical register identified associated to this read.
+  unsigned RegisterID;
+  // Physical register file that serves register RegisterID.
+  unsigned PRFID;
+  // Number of writes that contribute to the definition of RegisterID.
+  // In the absence of partial register updates, the number of DependentWrites
+  // cannot be more than one.
+  unsigned DependentWrites;
+  // Number of cycles left before RegisterID can be read. This value depends on
+  // the latency of all the dependent writes. It defaults to UNKNOWN_CYCLES.
+  // It gets set to the value of field TotalCycles only when the 'CyclesLeft' of
+  // every dependent write is known.
+  int CyclesLeft;
+  // This field is updated on every writeStartEvent(). When the number of
+  // dependent writes (i.e. field DependentWrite) is zero, this value is
+  // propagated to field CyclesLeft.
+  unsigned TotalCycles;
+  // This field is set to true only if there are no dependent writes, and
+  // there are no `CyclesLeft' to wait.
+  bool IsReady;
+  // True if this is a read from a known zero register.
+  bool IsZero;
+  // True if this register read is from a dependency-breaking instruction.
+  bool IndependentFromDef;
+
+public:
+  ReadState(const ReadDescriptor &Desc, unsigned RegID)
+      : RD(&Desc), RegisterID(RegID), PRFID(0), DependentWrites(0),
+        CyclesLeft(UNKNOWN_CYCLES), TotalCycles(0), IsReady(true),
+        IsZero(false), IndependentFromDef(false) {}
+
+  const ReadDescriptor &getDescriptor() const { return *RD; }
+  unsigned getSchedClass() const { return RD->SchedClassID; }
+  unsigned getRegisterID() const { return RegisterID; }
+  unsigned getRegisterFileID() const { return PRFID; }
+
+  bool isReady() const { return IsReady; }
+  bool isImplicitRead() const { return RD->isImplicitRead(); }
+
+  bool isIndependentFromDef() const { return IndependentFromDef; }
+  void setIndependentFromDef() { IndependentFromDef = true; }
+
+  void cycleEvent();
+  void writeStartEvent(unsigned Cycles);
+  void setDependentWrites(unsigned Writes) {
+    DependentWrites = Writes;
+    IsReady = !Writes;
+  }
+
+  bool isReadZero() const { return IsZero; }
+  void setReadZero() { IsZero = true; }
+  void setPRF(unsigned ID) { PRFID = ID; }
+};
+
+/// A sequence of cycles.
+///
+/// This class can be used as a building block to construct ranges of cycles.
+class CycleSegment {
+  unsigned Begin; // Inclusive.
+  unsigned End;   // Exclusive.
+  bool Reserved;  // Resources associated to this segment must be reserved.
+
+public:
+  CycleSegment(unsigned StartCycle, unsigned EndCycle, bool IsReserved = false)
+      : Begin(StartCycle), End(EndCycle), Reserved(IsReserved) {}
+
+  bool contains(unsigned Cycle) const { return Cycle >= Begin && Cycle < End; }
+  bool startsAfter(const CycleSegment &CS) const { return End <= CS.Begin; }
+  bool endsBefore(const CycleSegment &CS) const { return Begin >= CS.End; }
+  bool overlaps(const CycleSegment &CS) const {
+    return !startsAfter(CS) && !endsBefore(CS);
+  }
+  bool isExecuting() const { return Begin == 0 && End != 0; }
+  bool isExecuted() const { return End == 0; }
+  bool operator<(const CycleSegment &Other) const {
+    return Begin < Other.Begin;
+  }
+  CycleSegment &operator--(void) {
+    if (Begin)
+      Begin--;
+    if (End)
+      End--;
+    return *this;
+  }
+
+  bool isValid() const { return Begin <= End; }
+  unsigned size() const { return End - Begin; };
+  void subtract(unsigned Cycles) {
+    assert(End >= Cycles);
+    End -= Cycles;
+  }
+
+  unsigned begin() const { return Begin; }
+  unsigned end() const { return End; }
+  void setEnd(unsigned NewEnd) { End = NewEnd; }
+  bool isReserved() const { return Reserved; }
+  void setReserved() { Reserved = true; }
+};
+
+/// Helper used by class InstrDesc to describe how hardware resources
+/// are used.
+///
+/// This class describes how many resource units of a specific resource kind
+/// (and how many cycles) are "used" by an instruction.
+struct ResourceUsage {
+  CycleSegment CS;
+  unsigned NumUnits;
+  ResourceUsage(CycleSegment Cycles, unsigned Units = 1)
+      : CS(Cycles), NumUnits(Units) {}
+  unsigned size() const { return CS.size(); }
+  bool isReserved() const { return CS.isReserved(); }
+  void setReserved() { CS.setReserved(); }
+};
+
+/// An instruction descriptor
+struct InstrDesc {
+  SmallVector<WriteDescriptor, 4> Writes; // Implicit writes are at the end.
+  SmallVector<ReadDescriptor, 4> Reads;   // Implicit reads are at the end.
+
+  // For every resource used by an instruction of this kind, this vector
+  // reports the number of "consumed cycles".
+  SmallVector<std::pair<uint64_t, ResourceUsage>, 4> Resources;
+
+  // A list of buffered resources consumed by this instruction.
+  SmallVector<uint64_t, 4> Buffers;
+
+  unsigned MaxLatency;
+  // Number of MicroOps for this instruction.
+  unsigned NumMicroOps;
+
+  bool MayLoad;
+  bool MayStore;
+  bool HasSideEffects;
+  bool BeginGroup;
+  bool EndGroup;
+
+  // True if all buffered resources are in-order, and there is at least one
+  // buffer which is a dispatch hazard (BufferSize = 0).
+  bool MustIssueImmediately;
+
+  // A zero latency instruction doesn't consume any scheduler resources.
+  bool isZeroLatency() const { return !MaxLatency && Resources.empty(); }
+
+  InstrDesc() = default;
+  InstrDesc(const InstrDesc &Other) = delete;
+  InstrDesc &operator=(const InstrDesc &Other) = delete;
+};
+
+/// Base class for instructions consumed by the simulation pipeline.
+///
+/// This class tracks data dependencies as well as generic properties
+/// of the instruction.
+class InstructionBase {
+  const InstrDesc &Desc;
+
+  // This field is set for instructions that are candidates for move
+  // elimination. For more information about move elimination, see the
+  // definition of RegisterMappingTracker in RegisterFile.h
+  bool IsOptimizableMove;
+
+  // Output dependencies.
+  // One entry per each implicit and explicit register definition.
+  SmallVector<WriteState, 4> Defs;
+
+  // Input dependencies.
+  // One entry per each implicit and explicit register use.
+  SmallVector<ReadState, 4> Uses;
+
+public:
+  InstructionBase(const InstrDesc &D) : Desc(D), IsOptimizableMove(false) {}
+
+  SmallVectorImpl<WriteState> &getDefs() { return Defs; }
+  const ArrayRef<WriteState> getDefs() const { return Defs; }
+  SmallVectorImpl<ReadState> &getUses() { return Uses; }
+  const ArrayRef<ReadState> getUses() const { return Uses; }
+  const InstrDesc &getDesc() const { return Desc; }
+
+  unsigned getLatency() const { return Desc.MaxLatency; }
+
+  bool hasDependentUsers() const {
+    return any_of(Defs,
+                  [](const WriteState &Def) { return Def.getNumUsers() > 0; });
+  }
+
+  unsigned getNumUsers() const {
+    unsigned NumUsers = 0;
+    for (const WriteState &Def : Defs)
+      NumUsers += Def.getNumUsers();
+    return NumUsers;
+  }
+
+  // Returns true if this instruction is a candidate for move elimination.
+  bool isOptimizableMove() const { return IsOptimizableMove; }
+  void setOptimizableMove() { IsOptimizableMove = true; }
+};
+
+/// An instruction propagated through the simulated instruction pipeline.
+///
+/// This class is used to monitor changes to the internal state of instructions
+/// that are sent to the various components of the simulated hardware pipeline.
+class Instruction : public InstructionBase {
+  enum InstrStage {
+    IS_INVALID,   // Instruction in an invalid state.
+    IS_AVAILABLE, // Instruction dispatched but operands are not ready.
+    IS_READY,     // Instruction dispatched and operands ready.
+    IS_EXECUTING, // Instruction issued.
+    IS_EXECUTED,  // Instruction executed. Values are written back.
+    IS_RETIRED    // Instruction retired.
+  };
+
+  // The current instruction stage.
+  enum InstrStage Stage;
+
+  // This value defaults to the instruction latency. This instruction is
+  // considered executed when field CyclesLeft goes to zero.
+  int CyclesLeft;
+
+  // Retire Unit token ID for this instruction.
+  unsigned RCUTokenID;
+
+public:
+  Instruction(const InstrDesc &D)
+      : InstructionBase(D), Stage(IS_INVALID), CyclesLeft(UNKNOWN_CYCLES),
+        RCUTokenID(0) {}
+
+  unsigned getRCUTokenID() const { return RCUTokenID; }
+  int getCyclesLeft() const { return CyclesLeft; }
+
+  // Transition to the dispatch stage, and assign a RCUToken to this
+  // instruction. The RCUToken is used to track the completion of every
+  // register write performed by this instruction.
+  void dispatch(unsigned RCUTokenID);
+
+  // Instruction issued. Transition to the IS_EXECUTING state, and update
+  // all the definitions.
+  void execute();
+
+  // Force a transition from the IS_AVAILABLE state to the IS_READY state if
+  // input operands are all ready. State transitions normally occur at the
+  // beginning of a new cycle (see method cycleEvent()). However, the scheduler
+  // may decide to promote instructions from the wait queue to the ready queue
+  // as the result of another issue event.  This method is called every time the
+  // instruction might have changed in state.
+  void update();
+
+  bool isDispatched() const { return Stage == IS_AVAILABLE; }
+  bool isReady() const { return Stage == IS_READY; }
+  bool isExecuting() const { return Stage == IS_EXECUTING; }
+  bool isExecuted() const { return Stage == IS_EXECUTED; }
+  bool isRetired() const { return Stage == IS_RETIRED; }
+
+  bool isEliminated() const {
+    return isReady() && getDefs().size() &&
+           all_of(getDefs(),
+                  [](const WriteState &W) { return W.isEliminated(); });
+  }
+
+  // Forces a transition from state IS_AVAILABLE to state IS_EXECUTED.
+  void forceExecuted();
+
+  void retire() {
+    assert(isExecuted() && "Instruction is in an invalid state!");
+    Stage = IS_RETIRED;
+  }
+
+  void cycleEvent();
+};
+
+/// An InstRef contains both a SourceMgr index and Instruction pair.  The index
+/// is used as a unique identifier for the instruction.  MCA will make use of
+/// this index as a key throughout MCA.
+class InstRef {
+  std::pair<unsigned, Instruction *> Data;
+
+public:
+  InstRef() : Data(std::make_pair(0, nullptr)) {}
+  InstRef(unsigned Index, Instruction *I) : Data(std::make_pair(Index, I)) {}
+
+  bool operator==(const InstRef &Other) const { return Data == Other.Data; }
+
+  unsigned getSourceIndex() const { return Data.first; }
+  Instruction *getInstruction() { return Data.second; }
+  const Instruction *getInstruction() const { return Data.second; }
+
+  /// Returns true if this references a valid instruction.
+  operator bool() const { return Data.second != nullptr; }
+
+  /// Invalidate this reference.
+  void invalidate() { Data.second = nullptr; }
+
+#ifndef NDEBUG
+  void print(raw_ostream &OS) const { OS << getSourceIndex(); }
+#endif
+};
+
+#ifndef NDEBUG
+inline raw_ostream &operator<<(raw_ostream &OS, const InstRef &IR) {
+  IR.print(OS);
+  return OS;
+}
+#endif
+
+/// A reference to a register write.
+///
+/// This class is mainly used by the register file to describe register
+/// mappings. It correlates a register write to the source index of the
+/// defining instruction.
+class WriteRef {
+  std::pair<unsigned, WriteState *> Data;
+  static const unsigned INVALID_IID;
+
+public:
+  WriteRef() : Data(INVALID_IID, nullptr) {}
+  WriteRef(unsigned SourceIndex, WriteState *WS) : Data(SourceIndex, WS) {}
+
+  unsigned getSourceIndex() const { return Data.first; }
+  const WriteState *getWriteState() const { return Data.second; }
+  WriteState *getWriteState() { return Data.second; }
+  void invalidate() { Data.second = nullptr; }
+  bool isWriteZero() const {
+    assert(isValid() && "Invalid null WriteState found!");
+    return getWriteState()->isWriteZero();
+  }
+
+  /// Returns true if this register write has been executed, and the new
+  /// register value is therefore available to users.
+  bool isAvailable() const {
+    if (getSourceIndex() == INVALID_IID)
+      return false;
+    const WriteState *WS = getWriteState();
+    return !WS || WS->isExecuted();
+  }
+
+  bool isValid() const { return Data.first != INVALID_IID && Data.second; }
+  bool operator==(const WriteRef &Other) const { return Data == Other.Data; }
+
+#ifndef NDEBUG
+  void dump() const;
+#endif
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_INSTRUCTION_H
diff --git a/include/llvm/MCA/Pipeline.h b/include/llvm/MCA/Pipeline.h
new file mode 100644
index 0000000..acd2560
--- /dev/null
+++ b/include/llvm/MCA/Pipeline.h
@@ -0,0 +1,79 @@
+//===--------------------- Pipeline.h ---------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file implements an ordered container of stages that simulate the
+/// pipeline of a hardware backend.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_PIPELINE_H
+#define LLVM_MCA_PIPELINE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MCA/HardwareUnits/Scheduler.h"
+#include "llvm/MCA/Stages/Stage.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace mca {
+
+class HWEventListener;
+
+/// A pipeline for a specific subtarget.
+///
+/// It emulates an out-of-order execution of instructions. Instructions are
+/// fetched from a MCInst sequence managed by an initial 'Fetch' stage.
+/// Instructions are firstly fetched, then dispatched to the schedulers, and
+/// then executed.
+///
+/// This class tracks the lifetime of an instruction from the moment where
+/// it gets dispatched to the schedulers, to the moment where it finishes
+/// executing and register writes are architecturally committed.
+/// In particular, it monitors changes in the state of every instruction
+/// in flight.
+///
+/// Instructions are executed in a loop of iterations. The number of iterations
+/// is defined by the SourceMgr object, which is managed by the initial stage
+/// of the instruction pipeline.
+///
+/// The Pipeline entry point is method 'run()' which executes cycles in a loop
+/// until there are new instructions to dispatch, and not every instruction
+/// has been retired.
+///
+/// Internally, the Pipeline collects statistical information in the form of
+/// histograms. For example, it tracks how the dispatch group size changes
+/// over time.
+class Pipeline {
+  Pipeline(const Pipeline &P) = delete;
+  Pipeline &operator=(const Pipeline &P) = delete;
+
+  /// An ordered list of stages that define this instruction pipeline.
+  SmallVector<std::unique_ptr<Stage>, 8> Stages;
+  std::set<HWEventListener *> Listeners;
+  unsigned Cycles;
+
+  Error runCycle();
+  bool hasWorkToProcess();
+  void notifyCycleBegin();
+  void notifyCycleEnd();
+
+public:
+  Pipeline() : Cycles(0) {}
+  void appendStage(std::unique_ptr<Stage> S);
+
+  /// Returns the total number of simulated cycles.
+  Expected<unsigned> run();
+
+  void addEventListener(HWEventListener *Listener);
+};
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_PIPELINE_H
diff --git a/include/llvm/MCA/SourceMgr.h b/include/llvm/MCA/SourceMgr.h
new file mode 100644
index 0000000..5e0ca64
--- /dev/null
+++ b/include/llvm/MCA/SourceMgr.h
@@ -0,0 +1,57 @@
+//===--------------------- SourceMgr.h --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements class SourceMgr. Class SourceMgr abstracts the input
+/// code sequence (a sequence of MCInst), and assings unique identifiers to
+/// every instruction in the sequence.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_SOURCEMGR_H
+#define LLVM_MCA_SOURCEMGR_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+namespace mca {
+
+class Instruction;
+
+typedef std::pair<unsigned, const Instruction &> SourceRef;
+
+class SourceMgr {
+  using UniqueInst = std::unique_ptr<Instruction>;
+  ArrayRef<UniqueInst> Sequence;
+  unsigned Current;
+  const unsigned Iterations;
+  static const unsigned DefaultIterations = 100;
+
+public:
+  SourceMgr(ArrayRef<UniqueInst> S, unsigned Iter)
+      : Sequence(S), Current(0), Iterations(Iter ? Iter : DefaultIterations) {}
+
+  unsigned getNumIterations() const { return Iterations; }
+  unsigned size() const { return Sequence.size(); }
+  bool hasNext() const { return Current < (Iterations * Sequence.size()); }
+  void updateNext() { ++Current; }
+
+  SourceRef peekNext() const {
+    assert(hasNext() && "Already at end of sequence!");
+    return SourceRef(Current, *Sequence[Current % Sequence.size()]);
+  }
+
+  using const_iterator = ArrayRef<UniqueInst>::const_iterator;
+  const_iterator begin() const { return Sequence.begin(); }
+  const_iterator end() const { return Sequence.end(); }
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_SOURCEMGR_H
diff --git a/include/llvm/MCA/Stages/DispatchStage.h b/include/llvm/MCA/Stages/DispatchStage.h
new file mode 100644
index 0000000..f015cd7
--- /dev/null
+++ b/include/llvm/MCA/Stages/DispatchStage.h
@@ -0,0 +1,93 @@
+//===----------------------- DispatchStage.h --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file models the dispatch component of an instruction pipeline.
+///
+/// The DispatchStage is responsible for updating instruction dependencies
+/// and communicating to the simulated instruction scheduler that an instruction
+/// is ready to be scheduled for execution.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_DISPATCH_STAGE_H
+#define LLVM_MCA_DISPATCH_STAGE_H
+
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MCA/HWEventListener.h"
+#include "llvm/MCA/HardwareUnits/RegisterFile.h"
+#include "llvm/MCA/HardwareUnits/RetireControlUnit.h"
+#include "llvm/MCA/Instruction.h"
+#include "llvm/MCA/Stages/Stage.h"
+
+namespace llvm {
+namespace mca {
+
+// Implements the hardware dispatch logic.
+//
+// This class is responsible for the dispatch stage, in which instructions are
+// dispatched in groups to the Scheduler.  An instruction can be dispatched if
+// the following conditions are met:
+//  1) There are enough entries in the reorder buffer (see class
+//     RetireControlUnit) to write the opcodes associated with the instruction.
+//  2) There are enough physical registers to rename output register operands.
+//  3) There are enough entries available in the used buffered resource(s).
+//
+// The number of micro opcodes that can be dispatched in one cycle is limited by
+// the value of field 'DispatchWidth'. A "dynamic dispatch stall" occurs when
+// processor resources are not available. Dispatch stall events are counted
+// during the entire execution of the code, and displayed by the performance
+// report when flag '-dispatch-stats' is specified.
+//
+// If the number of micro opcodes exceedes DispatchWidth, then the instruction
+// is dispatched in multiple cycles.
+class DispatchStage final : public Stage {
+  unsigned DispatchWidth;
+  unsigned AvailableEntries;
+  unsigned CarryOver;
+  InstRef CarriedOver;
+  const MCSubtargetInfo &STI;
+  RetireControlUnit &RCU;
+  RegisterFile &PRF;
+
+  bool checkRCU(const InstRef &IR) const;
+  bool checkPRF(const InstRef &IR) const;
+  bool canDispatch(const InstRef &IR) const;
+  Error dispatch(InstRef IR);
+
+  void updateRAWDependencies(ReadState &RS, const MCSubtargetInfo &STI);
+
+  void notifyInstructionDispatched(const InstRef &IR,
+                                   ArrayRef<unsigned> UsedPhysRegs,
+                                   unsigned uOps) const;
+
+public:
+  DispatchStage(const MCSubtargetInfo &Subtarget, const MCRegisterInfo &MRI,
+                unsigned MaxDispatchWidth, RetireControlUnit &R,
+                RegisterFile &F)
+      : DispatchWidth(MaxDispatchWidth), AvailableEntries(MaxDispatchWidth),
+        CarryOver(0U), CarriedOver(), STI(Subtarget), RCU(R), PRF(F) {}
+
+  bool isAvailable(const InstRef &IR) const override;
+
+  // The dispatch logic internally doesn't buffer instructions. So there is
+  // never work to do at the beginning of every cycle.
+  bool hasWorkToComplete() const override { return false; }
+  Error cycleStart() override;
+  Error execute(InstRef &IR) override;
+
+#ifndef NDEBUG
+  void dump() const;
+#endif
+};
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_DISPATCH_STAGE_H
diff --git a/include/llvm/MCA/Stages/EntryStage.h b/include/llvm/MCA/Stages/EntryStage.h
new file mode 100644
index 0000000..cd9a65b
--- /dev/null
+++ b/include/llvm/MCA/Stages/EntryStage.h
@@ -0,0 +1,52 @@
+//===---------------------- EntryStage.h ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the Entry stage of an instruction pipeline.  Its sole
+/// purpose in life is to pick instructions in sequence and move them to the
+/// next pipeline stage.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_ENTRY_STAGE_H
+#define LLVM_MCA_ENTRY_STAGE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MCA/SourceMgr.h"
+#include "llvm/MCA/Stages/Stage.h"
+
+namespace llvm {
+namespace mca {
+
+class EntryStage final : public Stage {
+  InstRef CurrentInstruction;
+  SmallVector<std::unique_ptr<Instruction>, 16> Instructions;
+  SourceMgr &SM;
+  unsigned NumRetired;
+
+  // Updates the program counter, and sets 'CurrentInstruction'.
+  void getNextInstruction();
+
+  EntryStage(const EntryStage &Other) = delete;
+  EntryStage &operator=(const EntryStage &Other) = delete;
+
+public:
+  EntryStage(SourceMgr &SM) : CurrentInstruction(), SM(SM), NumRetired(0) { }
+
+  bool isAvailable(const InstRef &IR) const override;
+  bool hasWorkToComplete() const override;
+  Error execute(InstRef &IR) override;
+  Error cycleStart() override;
+  Error cycleEnd() override;
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_FETCH_STAGE_H
diff --git a/include/llvm/MCA/Stages/ExecuteStage.h b/include/llvm/MCA/Stages/ExecuteStage.h
new file mode 100644
index 0000000..8cb287e
--- /dev/null
+++ b/include/llvm/MCA/Stages/ExecuteStage.h
@@ -0,0 +1,80 @@
+//===---------------------- ExecuteStage.h ----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the execution stage of a default instruction pipeline.
+///
+/// The ExecuteStage is responsible for managing the hardware scheduler
+/// and issuing notifications that an instruction has been executed.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_EXECUTE_STAGE_H
+#define LLVM_MCA_EXECUTE_STAGE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/MCA/HardwareUnits/Scheduler.h"
+#include "llvm/MCA/Instruction.h"
+#include "llvm/MCA/Stages/Stage.h"
+
+namespace llvm {
+namespace mca {
+
+class ExecuteStage final : public Stage {
+  Scheduler &HWS;
+
+  Error issueInstruction(InstRef &IR);
+
+  // Called at the beginning of each cycle to issue already dispatched
+  // instructions to the underlying pipelines.
+  Error issueReadyInstructions();
+
+  // Used to notify instructions eliminated at register renaming stage.
+  Error handleInstructionEliminated(InstRef &IR);
+
+  ExecuteStage(const ExecuteStage &Other) = delete;
+  ExecuteStage &operator=(const ExecuteStage &Other) = delete;
+
+public:
+  ExecuteStage(Scheduler &S) : Stage(), HWS(S) {}
+
+  // This stage works under the assumption that the Pipeline will eventually
+  // execute a retire stage. We don't need to check if pipelines and/or
+  // schedulers have instructions to process, because those instructions are
+  // also tracked by the retire control unit. That means,
+  // RetireControlUnit::hasWorkToComplete() is responsible for checking if there
+  // are still instructions in-flight in the out-of-order backend.
+  bool hasWorkToComplete() const override { return false; }
+  bool isAvailable(const InstRef &IR) const override;
+
+  // Notifies the scheduler that a new cycle just started.
+  //
+  // This method notifies the scheduler that a new cycle started.
+  // This method is also responsible for notifying listeners about instructions
+  // state changes, and processor resources freed by the scheduler.
+  // Instructions that transitioned to the 'Executed' state are automatically
+  // moved to the next stage (i.e. RetireStage).
+  Error cycleStart() override;
+  Error execute(InstRef &IR) override;
+
+  void notifyInstructionIssued(
+      const InstRef &IR,
+      MutableArrayRef<std::pair<ResourceRef, ResourceCycles>> Used) const;
+  void notifyInstructionExecuted(const InstRef &IR) const;
+  void notifyInstructionReady(const InstRef &IR) const;
+  void notifyResourceAvailable(const ResourceRef &RR) const;
+
+  // Notify listeners that buffered resources have been consumed or freed.
+  void notifyReservedOrReleasedBuffers(const InstRef &IR, bool Reserved) const;
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_EXECUTE_STAGE_H
diff --git a/include/llvm/MCA/Stages/InstructionTables.h b/include/llvm/MCA/Stages/InstructionTables.h
new file mode 100644
index 0000000..34e338f
--- /dev/null
+++ b/include/llvm/MCA/Stages/InstructionTables.h
@@ -0,0 +1,46 @@
+//===--------------------- InstructionTables.h ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file implements a custom stage to generate instruction tables.
+/// See the description of command-line flag -instruction-tables in
+/// docs/CommandGuide/lvm-mca.rst
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_INSTRUCTIONTABLES_H
+#define LLVM_MCA_INSTRUCTIONTABLES_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MCA/HardwareUnits/Scheduler.h"
+#include "llvm/MCA/Stages/Stage.h"
+#include "llvm/MCA/Support.h"
+
+namespace llvm {
+namespace mca {
+
+class InstructionTables final : public Stage {
+  const MCSchedModel &SM;
+  SmallVector<std::pair<ResourceRef, ResourceCycles>, 4> UsedResources;
+  SmallVector<uint64_t, 8> Masks;
+
+public:
+  InstructionTables(const MCSchedModel &Model)
+      : Stage(), SM(Model), Masks(Model.getNumProcResourceKinds()) {
+    computeProcResourceMasks(Model, Masks);
+  }
+
+  bool hasWorkToComplete() const override { return false; }
+  Error execute(InstRef &IR) override;
+};
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_INSTRUCTIONTABLES_H
diff --git a/include/llvm/MCA/Stages/RetireStage.h b/include/llvm/MCA/Stages/RetireStage.h
new file mode 100644
index 0000000..2051ce5
--- /dev/null
+++ b/include/llvm/MCA/Stages/RetireStage.h
@@ -0,0 +1,48 @@
+//===---------------------- RetireStage.h -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the retire stage of a default instruction pipeline.
+/// The RetireStage represents the process logic that interacts with the
+/// simulated RetireControlUnit hardware.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_RETIRE_STAGE_H
+#define LLVM_MCA_RETIRE_STAGE_H
+
+#include "llvm/MCA/HardwareUnits/RegisterFile.h"
+#include "llvm/MCA/HardwareUnits/RetireControlUnit.h"
+#include "llvm/MCA/Stages/Stage.h"
+
+namespace llvm {
+namespace mca {
+
+class RetireStage final : public Stage {
+  // Owner will go away when we move listeners/eventing to the stages.
+  RetireControlUnit &RCU;
+  RegisterFile &PRF;
+
+  RetireStage(const RetireStage &Other) = delete;
+  RetireStage &operator=(const RetireStage &Other) = delete;
+
+public:
+  RetireStage(RetireControlUnit &R, RegisterFile &F)
+      : Stage(), RCU(R), PRF(F) {}
+
+  bool hasWorkToComplete() const override { return !RCU.isEmpty(); }
+  Error cycleStart() override;
+  Error execute(InstRef &IR) override;
+  void notifyInstructionRetired(const InstRef &IR) const;
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_RETIRE_STAGE_H
diff --git a/include/llvm/MCA/Stages/Stage.h b/include/llvm/MCA/Stages/Stage.h
new file mode 100644
index 0000000..fc7ab56
--- /dev/null
+++ b/include/llvm/MCA/Stages/Stage.h
@@ -0,0 +1,88 @@
+//===---------------------- Stage.h -----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a stage.
+/// A chain of stages compose an instruction pipeline.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_STAGE_H
+#define LLVM_MCA_STAGE_H
+
+#include "llvm/MCA/HWEventListener.h"
+#include "llvm/Support/Error.h"
+#include <set>
+
+namespace llvm {
+namespace mca {
+
+class InstRef;
+
+class Stage {
+  Stage *NextInSequence;
+  std::set<HWEventListener *> Listeners;
+
+  Stage(const Stage &Other) = delete;
+  Stage &operator=(const Stage &Other) = delete;
+
+protected:
+  const std::set<HWEventListener *> &getListeners() const { return Listeners; }
+
+public:
+  Stage() : NextInSequence(nullptr) {}
+  virtual ~Stage();
+
+  /// Returns true if it can execute IR during this cycle.
+  virtual bool isAvailable(const InstRef &IR) const { return true; }
+
+  /// Returns true if some instructions are still executing this stage.
+  virtual bool hasWorkToComplete() const = 0;
+
+  /// Called once at the start of each cycle.  This can be used as a setup
+  /// phase to prepare for the executions during the cycle.
+  virtual Error cycleStart() { return ErrorSuccess(); }
+
+  /// Called once at the end of each cycle.
+  virtual Error cycleEnd() { return ErrorSuccess(); }
+
+  /// The primary action that this stage performs on instruction IR.
+  virtual Error execute(InstRef &IR) = 0;
+
+  void setNextInSequence(Stage *NextStage) {
+    assert(!NextInSequence && "This stage already has a NextInSequence!");
+    NextInSequence = NextStage;
+  }
+
+  bool checkNextStage(const InstRef &IR) const {
+    return NextInSequence && NextInSequence->isAvailable(IR);
+  }
+
+  /// Called when an instruction is ready to move the next pipeline stage.
+  ///
+  /// Stages are responsible for moving instructions to their immediate
+  /// successor stages.
+  Error moveToTheNextStage(InstRef &IR) {
+    assert(checkNextStage(IR) && "Next stage is not ready!");
+    return NextInSequence->execute(IR);
+  }
+
+  /// Add a listener to receive callbacks during the execution of this stage.
+  void addListener(HWEventListener *Listener);
+
+  /// Notify listeners of a particular hardware event.
+  template <typename EventT> void notifyEvent(const EventT &Event) const {
+    for (HWEventListener *Listener : Listeners)
+      Listener->onEvent(Event);
+  }
+};
+
+} // namespace mca
+} // namespace llvm
+#endif // LLVM_MCA_STAGE_H
diff --git a/include/llvm/MCA/Support.h b/include/llvm/MCA/Support.h
new file mode 100644
index 0000000..7b0c5bf
--- /dev/null
+++ b/include/llvm/MCA/Support.h
@@ -0,0 +1,119 @@
+//===--------------------- Support.h ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// Helper functions used by various pipeline components.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_SUPPORT_H
+#define LLVM_MCA_SUPPORT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace mca {
+
+template <typename T>
+class InstructionError : public ErrorInfo<InstructionError<T>> {
+public:
+  static char ID;
+  std::string Message;
+  const T &Inst;
+
+  InstructionError(std::string M, const T &MCI)
+      : Message(std::move(M)), Inst(MCI) {}
+
+  void log(raw_ostream &OS) const override { OS << Message; }
+
+  std::error_code convertToErrorCode() const override {
+    return inconvertibleErrorCode();
+  }
+};
+
+template <typename T> char InstructionError<T>::ID;
+
+/// This class represents the number of cycles per resource (fractions of
+/// cycles).  That quantity is managed here as a ratio, and accessed via the
+/// double cast-operator below.  The two quantities, number of cycles and
+/// number of resources, are kept separate.  This is used by the
+/// ResourcePressureView to calculate the average resource cycles
+/// per instruction/iteration.
+class ResourceCycles {
+  unsigned Numerator, Denominator;
+
+public:
+  ResourceCycles() : Numerator(0), Denominator(1) {}
+  ResourceCycles(unsigned Cycles, unsigned ResourceUnits = 1)
+      : Numerator(Cycles), Denominator(ResourceUnits) {}
+
+  operator double() const {
+    assert(Denominator && "Invalid denominator (must be non-zero).");
+    return (Denominator == 1) ? Numerator : (double)Numerator / Denominator;
+  }
+
+  // Add the components of RHS to this instance.  Instead of calculating
+  // the final value here, we keep track of the numerator and denominator
+  // separately, to reduce floating point error.
+  ResourceCycles &operator+=(const ResourceCycles &RHS) {
+    if (Denominator == RHS.Denominator)
+      Numerator += RHS.Numerator;
+    else {
+      // Create a common denominator for LHS and RHS by calculating the least
+      // common multiple from the GCD.
+      unsigned GCD = GreatestCommonDivisor64(Denominator, RHS.Denominator);
+      unsigned LCM = (Denominator * RHS.Denominator) / GCD;
+      unsigned LHSNumerator = Numerator * (LCM / Denominator);
+      unsigned RHSNumerator = RHS.Numerator * (LCM / RHS.Denominator);
+      Numerator = LHSNumerator + RHSNumerator;
+      Denominator = LCM;
+    }
+    return *this;
+  }
+};
+
+/// Populates vector Masks with processor resource masks.
+///
+/// The number of bits set in a mask depends on the processor resource type.
+/// Each processor resource mask has at least one bit set. For groups, the
+/// number of bits set in the mask is equal to the cardinality of the group plus
+/// one. Excluding the most significant bit, the remaining bits in the mask
+/// identify processor resources that are part of the group.
+///
+/// Example:
+///
+///  ResourceA  -- Mask: 0b001
+///  ResourceB  -- Mask: 0b010
+///  ResourceAB -- Mask: 0b100 U (ResourceA::Mask | ResourceB::Mask) == 0b111
+///
+/// ResourceAB is a processor resource group containing ResourceA and ResourceB.
+/// Each resource mask uniquely identifies a resource; both ResourceA and
+/// ResourceB only have one bit set.
+/// ResourceAB is a group; excluding the most significant bit in the mask, the
+/// remaining bits identify the composition of the group.
+///
+/// Resource masks are used by the ResourceManager to solve set membership
+/// problems with simple bit manipulation operations.
+void computeProcResourceMasks(const MCSchedModel &SM,
+                              MutableArrayRef<uint64_t> Masks);
+
+/// Compute the reciprocal block throughput from a set of processor resource
+/// cycles. The reciprocal block throughput is computed as the MAX between:
+///  - NumMicroOps / DispatchWidth
+///  - ProcResourceCycles / #ProcResourceUnits  (for every consumed resource).
+double computeBlockRThroughput(const MCSchedModel &SM, unsigned DispatchWidth,
+                               unsigned NumMicroOps,
+                               ArrayRef<unsigned> ProcResourceUsage);
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_SUPPORT_H
diff --git a/include/llvm/Object/COFF.h b/include/llvm/Object/COFF.h
index 0547846..b753d26 100644
--- a/include/llvm/Object/COFF.h
+++ b/include/llvm/Object/COFF.h
@@ -971,6 +971,9 @@
       return nullptr;
     return reinterpret_cast<const dos_header *>(base());
   }
+  std::error_code getCOFFHeader(const coff_file_header *&Res) const;
+  std::error_code
+  getCOFFBigObjHeader(const coff_bigobj_file_header *&Res) const;
   std::error_code getPE32Header(const pe32_header *&Res) const;
   std::error_code getPE32PlusHeader(const pe32plus_header *&Res) const;
   std::error_code getDataDirectory(uint32_t index,
@@ -1019,6 +1022,8 @@
 
   ArrayRef<uint8_t> getSymbolAuxData(COFFSymbolRef Symbol) const;
 
+  uint32_t getSymbolIndex(COFFSymbolRef Symbol) const;
+
   size_t getSymbolTableEntrySize() const {
     if (COFFHeader)
       return sizeof(coff_symbol16);
diff --git a/include/llvm/Object/Error.h b/include/llvm/Object/Error.h
index eb93833..a15f8b9 100644
--- a/include/llvm/Object/Error.h
+++ b/include/llvm/Object/Error.h
@@ -50,6 +50,7 @@
 /// Currently inherits from ECError for easy interoperability with
 /// std::error_code, but this will be removed in the future.
 class BinaryError : public ErrorInfo<BinaryError, ECError> {
+  virtual void anchor();
 public:
   static char ID;
   BinaryError() {
diff --git a/include/llvm/Object/MachO.h b/include/llvm/Object/MachO.h
index 159c176..c2f4f40 100644
--- a/include/llvm/Object/MachO.h
+++ b/include/llvm/Object/MachO.h
@@ -356,7 +356,7 @@
   basic_symbol_iterator symbol_end() const override;
 
   // MachO specific.
-  basic_symbol_iterator getSymbolByIndex(unsigned Index) const;
+  symbol_iterator getSymbolByIndex(unsigned Index) const;
   uint64_t getSymbolIndex(DataRefImpl Symb) const;
 
   section_iterator section_begin() const override;
@@ -616,6 +616,9 @@
     case MachO::PLATFORM_TVOS: return "tvos";
     case MachO::PLATFORM_WATCHOS: return "watchos";
     case MachO::PLATFORM_BRIDGEOS: return "bridgeos";
+    case MachO::PLATFORM_IOSSIMULATOR: return "iossimulator";
+    case MachO::PLATFORM_TVOSSIMULATOR: return "tvossimulator";
+    case MachO::PLATFORM_WATCHOSSIMULATOR: return "watchossimulator";
     default:
       std::string ret;
       raw_string_ostream ss(ret);
diff --git a/include/llvm/Object/RelocVisitor.h b/include/llvm/Object/RelocVisitor.h
index 23e796c..9a978de 100644
--- a/include/llvm/Object/RelocVisitor.h
+++ b/include/llvm/Object/RelocVisitor.h
@@ -129,6 +129,8 @@
     case ELF::R_X86_64_NONE:
       return 0;
     case ELF::R_X86_64_64:
+    case ELF::R_X86_64_DTPOFF32:
+    case ELF::R_X86_64_DTPOFF64:
       return Value + getELFAddend(R);
     case ELF::R_X86_64_PC32:
       return Value + getELFAddend(R) - R.getOffset();
diff --git a/include/llvm/Object/Wasm.h b/include/llvm/Object/Wasm.h
index 2edb938..ed85765 100644
--- a/include/llvm/Object/Wasm.h
+++ b/include/llvm/Object/Wasm.h
@@ -283,6 +283,49 @@
   uint32_t EventSection = 0;
 };
 
+class WasmSectionOrderChecker {
+public:
+  // We define orders for all core wasm sections and known custom sections.
+  enum : int {
+    // Core sections
+    // The order of standard sections is precisely given by the spec.
+    WASM_SEC_ORDER_TYPE = 1,
+    WASM_SEC_ORDER_IMPORT = 2,
+    WASM_SEC_ORDER_FUNCTION = 3,
+    WASM_SEC_ORDER_TABLE = 4,
+    WASM_SEC_ORDER_MEMORY = 5,
+    WASM_SEC_ORDER_GLOBAL = 6,
+    WASM_SEC_ORDER_EVENT = 7,
+    WASM_SEC_ORDER_EXPORT = 8,
+    WASM_SEC_ORDER_START = 9,
+    WASM_SEC_ORDER_ELEM = 10,
+    WASM_SEC_ORDER_DATACOUNT = 11,
+    WASM_SEC_ORDER_CODE = 12,
+    WASM_SEC_ORDER_DATA = 13,
+
+    // Custom sections
+    // "dylink" should be the very first section in the module
+    WASM_SEC_ORDER_DYLINK = 0,
+    // "linking" section requires DATA section in order to validate data symbols
+    WASM_SEC_ORDER_LINKING = 100,
+    // Must come after "linking" section in order to validate reloc indexes.
+    WASM_SEC_ORDER_RELOC = 101,
+    // "name" section must appear after DATA. Comes after "linking" to allow
+    // symbol table to set default function name.
+    WASM_SEC_ORDER_NAME = 102,
+    // "producers" section must appear after "name" section.
+    WASM_SEC_ORDER_PRODUCERS = 103
+  };
+
+  bool isValidSectionOrder(unsigned ID, StringRef CustomSectionName = "");
+
+private:
+  int LastOrder = -1; // Lastly seen known section's order
+
+  // Returns -1 for unknown sections.
+  int getSectionOrder(unsigned ID, StringRef CustomSectionName = "");
+};
+
 } // end namespace object
 
 inline raw_ostream &operator<<(raw_ostream &OS, const object::WasmSymbol &Sym) {
diff --git a/include/llvm/ObjectYAML/COFFYAML.h b/include/llvm/ObjectYAML/COFFYAML.h
index 78f021f..253c627 100644
--- a/include/llvm/ObjectYAML/COFFYAML.h
+++ b/include/llvm/ObjectYAML/COFFYAML.h
@@ -58,7 +58,13 @@
 struct Relocation {
   uint32_t VirtualAddress;
   uint16_t Type;
+
+  // Normally a Relocation can refer to the symbol via its name.
+  // It can also use a direct symbol table index instead (with no name
+  // specified), allowing disambiguating between multiple symbols with the
+  // same name or crafting intentionally broken files for testing.
   StringRef SymbolName;
+  Optional<uint32_t> SymbolTableIndex;
 };
 
 struct Section {
diff --git a/include/llvm/ObjectYAML/ELFYAML.h b/include/llvm/ObjectYAML/ELFYAML.h
index 92081f0..f2b0c35 100644
--- a/include/llvm/ObjectYAML/ELFYAML.h
+++ b/include/llvm/ObjectYAML/ELFYAML.h
@@ -68,6 +68,7 @@
   ELF_ELFCLASS Class;
   ELF_ELFDATA Data;
   ELF_ELFOSABI OSABI;
+  llvm::yaml::Hex8 ABIVersion;
   ELF_ET Type;
   ELF_EM Machine;
   ELF_EF Flags;
diff --git a/include/llvm/Passes/StandardInstrumentations.h b/include/llvm/Passes/StandardInstrumentations.h
index b6a73b0..8c6f5e1 100644
--- a/include/llvm/Passes/StandardInstrumentations.h
+++ b/include/llvm/Passes/StandardInstrumentations.h
@@ -16,14 +16,48 @@
 #ifndef LLVM_PASSES_STANDARDINSTRUMENTATIONS_H
 #define LLVM_PASSES_STANDARDINSTRUMENTATIONS_H
 
+#include "llvm/ADT/SmallVector.h"
 #include "llvm/IR/PassInstrumentation.h"
 #include "llvm/IR/PassTimingInfo.h"
 
+#include <string>
+#include <utility>
+
 namespace llvm {
 
+class Module;
+
+/// Instrumentation to print IR before/after passes.
+///
+/// Needs state to be able to print module after pass that invalidates IR unit
+/// (typically Loop or SCC).
+class PrintIRInstrumentation {
+public:
+  PrintIRInstrumentation() = default;
+  ~PrintIRInstrumentation();
+
+  void registerCallbacks(PassInstrumentationCallbacks &PIC);
+
+private:
+  bool printBeforePass(StringRef PassID, Any IR);
+  void printAfterPass(StringRef PassID, Any IR);
+  void printAfterPassInvalidated(StringRef PassID);
+
+  using PrintModuleDesc = std::tuple<const Module *, std::string, StringRef>;
+
+  void pushModuleDesc(StringRef PassID, Any IR);
+  PrintModuleDesc popModuleDesc(StringRef PassID);
+
+  /// Stack of Module description, enough to print the module after a given
+  /// pass.
+  SmallVector<PrintModuleDesc, 2> ModuleDescStack;
+  bool StoreModuleDesc = false;
+};
+
 /// This class provides an interface to register all the standard pass
 /// instrumentations and manages their state (if any).
 class StandardInstrumentations {
+  PrintIRInstrumentation PrintIR;
   TimePassesHandler TimePasses;
 
 public:
diff --git a/include/llvm/ProfileData/SampleProfReader.h b/include/llvm/ProfileData/SampleProfReader.h
index 3c477cc..5cc729e 100644
--- a/include/llvm/ProfileData/SampleProfReader.h
+++ b/include/llvm/ProfileData/SampleProfReader.h
@@ -548,6 +548,9 @@
       : SampleProfileReader(std::move(B), C, Underlying->getFormat()) {
     Profiles = std::move(Underlying->getProfiles());
     Summary = takeSummary(*Underlying);
+    // Keep the underlying reader alive; the profile data may contain
+    // StringRefs referencing names in its name table.
+    UnderlyingReader = std::move(Underlying);
   }
 
   /// Create a remapped sample profile from the given remapping file and
@@ -569,6 +572,7 @@
 private:
   SymbolRemappingReader Remappings;
   DenseMap<SymbolRemappingReader::Key, FunctionSamples*> SampleMap;
+  std::unique_ptr<SampleProfileReader> UnderlyingReader;
 };
 
 } // end namespace sampleprof
diff --git a/include/llvm/Support/AArch64TargetParser.def b/include/llvm/Support/AArch64TargetParser.def
index 6a7d16a..e03297b 100644
--- a/include/llvm/Support/AArch64TargetParser.def
+++ b/include/llvm/Support/AArch64TargetParser.def
@@ -73,6 +73,8 @@
 AARCH64_ARCH_EXT_NAME("rng",      AArch64::AEK_RAND,     "+rand",  "-rand")
 AARCH64_ARCH_EXT_NAME("memtag",   AArch64::AEK_MTE,      "+mte",   "-mte")
 AARCH64_ARCH_EXT_NAME("ssbs",     AArch64::AEK_SSBS,     "+ssbs",  "-ssbs")
+AARCH64_ARCH_EXT_NAME("sb",       AArch64::AEK_SB,       "+sb",    "-sb")
+AARCH64_ARCH_EXT_NAME("predres",  AArch64::AEK_PREDRES,  "+predres", "-predres")
 #undef AARCH64_ARCH_EXT_NAME
 
 #ifndef AARCH64_CPU_NAME
@@ -100,8 +102,8 @@
                 (AArch64::AEK_CRC))
 AARCH64_CPU_NAME("exynos-m3", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC))
-AARCH64_CPU_NAME("exynos-m4", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
-                (AArch64::AEK_CRC))
+AARCH64_CPU_NAME("exynos-m4", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
+                (AArch64::AEK_FP16 | AArch64::AEK_DOTPROD))
 AARCH64_CPU_NAME("falkor", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC | AArch64::AEK_RDM))
 AARCH64_CPU_NAME("saphira", ARMV8_3A, FK_CRYPTO_NEON_FP_ARMV8, false,
diff --git a/include/llvm/Support/AArch64TargetParser.h b/include/llvm/Support/AArch64TargetParser.h
index aea4062..76b77d4 100644
--- a/include/llvm/Support/AArch64TargetParser.h
+++ b/include/llvm/Support/AArch64TargetParser.h
@@ -48,6 +48,8 @@
   AEK_RAND =        1 << 18,
   AEK_MTE =         1 << 19,
   AEK_SSBS =        1 << 20,
+  AEK_SB =          1 << 21,
+  AEK_PREDRES =     1 << 22,
 };
 
 enum class ArchKind {
diff --git a/include/llvm/Support/ARMTargetParser.def b/include/llvm/Support/ARMTargetParser.def
index adf6439..9e844e2 100644
--- a/include/llvm/Support/ARMTargetParser.def
+++ b/include/llvm/Support/ARMTargetParser.def
@@ -158,6 +158,7 @@
 ARM_ARCH_EXT_NAME("maverick", ARM::AEK_MAVERICK, nullptr,  nullptr)
 ARM_ARCH_EXT_NAME("xscale",   ARM::AEK_XSCALE,   nullptr,  nullptr)
 ARM_ARCH_EXT_NAME("fp16fml",  ARM::AEK_FP16FML,  "+fp16fml", "-fp16fml")
+ARM_ARCH_EXT_NAME("sb",       ARM::AEK_SB,       "+sb",      "-sb")
 #undef ARM_ARCH_EXT_NAME
 
 #ifndef ARM_HW_DIV_NAME
@@ -265,7 +266,8 @@
 ARM_CPU_NAME("exynos-m1", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
 ARM_CPU_NAME("exynos-m2", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
 ARM_CPU_NAME("exynos-m3", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
-ARM_CPU_NAME("exynos-m4", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+ARM_CPU_NAME("exynos-m4", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
+             (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
 ARM_CPU_NAME("kryo", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
 // Non-standard Arch names.
 ARM_CPU_NAME("iwmmxt", IWMMXT, FK_NONE, true, ARM::AEK_NONE)
diff --git a/include/llvm/Support/ARMTargetParser.h b/include/llvm/Support/ARMTargetParser.h
index e41675a..71acc0d 100644
--- a/include/llvm/Support/ARMTargetParser.h
+++ b/include/llvm/Support/ARMTargetParser.h
@@ -45,6 +45,7 @@
   AEK_SHA2    =     1 << 15,
   AEK_AES     =     1 << 16,
   AEK_FP16FML =     1 << 17,
+  AEK_SB      =     1 << 18,
   // Unsupported extensions.
   AEK_OS = 0x8000000,
   AEK_IWMMXT = 0x10000000,
diff --git a/include/llvm/Support/CodeGen.h b/include/llvm/Support/CodeGen.h
index ce3a623..22e7416 100644
--- a/include/llvm/Support/CodeGen.h
+++ b/include/llvm/Support/CodeGen.h
@@ -57,6 +57,11 @@
     };
   }
 
+  // Specify effect of frame pointer elimination optimization.
+  namespace FramePointer {
+    enum FP {All, NonLeaf, None};
+  }
+
 }  // end llvm namespace
 
 #endif
diff --git a/include/llvm/Support/CommandLine.h b/include/llvm/Support/CommandLine.h
index cd3543c..a8ad893 100644
--- a/include/llvm/Support/CommandLine.h
+++ b/include/llvm/Support/CommandLine.h
@@ -156,6 +156,9 @@
 // enabled, and used, the value for the flag comes from the suffix of the
 // argument.
 //
+// AlwaysPrefix - Only allow the behavior enabled by the Prefix flag and reject
+// the Option=Value form.
+//
 // Grouping - With this option enabled, multiple letter options are allowed to
 // bunch together with only a single hyphen for the whole group.  This allows
 // emulation of the behavior that ls uses for example: ls -la === ls -l -a
@@ -165,7 +168,8 @@
   NormalFormatting = 0x00, // Nothing special
   Positional = 0x01,       // Is a positional argument, no '-' required
   Prefix = 0x02,           // Can this option directly prefix its value?
-  Grouping = 0x03          // Can this option group with other options?
+  AlwaysPrefix = 0x03,     // Can this option only directly prefix its value?
+  Grouping = 0x04          // Can this option group with other options?
 };
 
 enum MiscFlags {             // Miscellaneous flags to adjust argument
@@ -265,7 +269,7 @@
   // detail representing the non-value
   unsigned Value : 2;
   unsigned HiddenFlag : 2; // enum OptionHidden
-  unsigned Formatting : 2; // enum FormattingFlags
+  unsigned Formatting : 3; // enum FormattingFlags
   unsigned Misc : 3;
   unsigned Position = 0;       // Position of last occurrence of the option
   unsigned AdditionalVals = 0; // Greater than 0 for multi-valued option.
diff --git a/include/llvm/Support/Error.h b/include/llvm/Support/Error.h
index 4962812..ee2cbee 100644
--- a/include/llvm/Support/Error.h
+++ b/include/llvm/Support/Error.h
@@ -1066,6 +1066,8 @@
 class ECError : public ErrorInfo<ECError> {
   friend Error errorCodeToError(std::error_code);
 
+  virtual void anchor() override;
+
 public:
   void setErrorCode(std::error_code EC) { this->EC = EC; }
   std::error_code convertToErrorCode() const override { return EC; }
diff --git a/include/llvm/Support/FileCheck.h b/include/llvm/Support/FileCheck.h
index 1c2ac2b..4061a26 100644
--- a/include/llvm/Support/FileCheck.h
+++ b/include/llvm/Support/FileCheck.h
@@ -82,6 +82,8 @@
 };
 }
 
+struct FileCheckDiag;
+
 class FileCheckPattern {
   SMLoc PatternLoc;
 
@@ -125,7 +127,8 @@
                          const StringMap<StringRef> &VariableTable,
                          SMRange MatchRange = None) const;
   void PrintFuzzyMatch(const SourceMgr &SM, StringRef Buffer,
-                       const StringMap<StringRef> &VariableTable) const;
+                       const StringMap<StringRef> &VariableTable,
+                       std::vector<FileCheckDiag> *Diags) const;
 
   bool hasVariable() const {
     return !(VariableUses.empty() && VariableDefs.empty());
@@ -146,6 +149,59 @@
 };
 
 //===----------------------------------------------------------------------===//
+/// Summary of a FileCheck diagnostic.
+//===----------------------------------------------------------------------===//
+
+struct FileCheckDiag {
+  /// What is the FileCheck directive for this diagnostic?
+  Check::FileCheckType CheckTy;
+  /// Where is the FileCheck directive for this diagnostic?
+  unsigned CheckLine, CheckCol;
+  /// What type of match result does this diagnostic describe?
+  ///
+  /// A directive's supplied pattern is said to be either expected or excluded
+  /// depending on whether the pattern must have or must not have a match in
+  /// order for the directive to succeed.  For example, a CHECK directive's
+  /// pattern is expected, and a CHECK-NOT directive's pattern is excluded.
+  /// All match result types whose names end with "Excluded" are for excluded
+  /// patterns, and all others are for expected patterns.
+  ///
+  /// There might be more than one match result for a single pattern.  For
+  /// example, there might be several discarded matches
+  /// (MatchFoundButDiscarded) before either a good match
+  /// (MatchFoundAndExpected) or a failure to match (MatchNoneButExpected),
+  /// and there might be a fuzzy match (MatchFuzzy) after the latter.
+  enum MatchType {
+    /// Indicates a good match for an expected pattern.
+    MatchFoundAndExpected,
+    /// Indicates a match for an excluded pattern.
+    MatchFoundButExcluded,
+    /// Indicates a match for an expected pattern, but the match is on the
+    /// wrong line.
+    MatchFoundButWrongLine,
+    /// Indicates a discarded match for an expected pattern.
+    MatchFoundButDiscarded,
+    /// Indicates no match for an excluded pattern.
+    MatchNoneAndExcluded,
+    /// Indicates no match for an expected pattern, but this might follow good
+    /// matches when multiple matches are expected for the pattern, or it might
+    /// follow discarded matches for the pattern.
+    MatchNoneButExpected,
+    /// Indicates a fuzzy match that serves as a suggestion for the next
+    /// intended match for an expected pattern with too few or no good matches.
+    MatchFuzzy,
+  } MatchTy;
+  /// The search range if MatchTy is MatchNoneAndExcluded or
+  /// MatchNoneButExpected, or the match range otherwise.
+  unsigned InputStartLine;
+  unsigned InputStartCol;
+  unsigned InputEndLine;
+  unsigned InputEndCol;
+  FileCheckDiag(const SourceMgr &SM, const Check::FileCheckType &CheckTy,
+                SMLoc CheckLoc, MatchType MatchTy, SMRange InputRange);
+};
+
+//===----------------------------------------------------------------------===//
 // Check Strings.
 //===----------------------------------------------------------------------===//
 
@@ -169,18 +225,20 @@
 
   size_t Check(const SourceMgr &SM, StringRef Buffer, bool IsLabelScanMode,
                size_t &MatchLen, StringMap<StringRef> &VariableTable,
-               FileCheckRequest &Req) const;
+               FileCheckRequest &Req, std::vector<FileCheckDiag> *Diags) const;
 
   bool CheckNext(const SourceMgr &SM, StringRef Buffer) const;
   bool CheckSame(const SourceMgr &SM, StringRef Buffer) const;
   bool CheckNot(const SourceMgr &SM, StringRef Buffer,
                 const std::vector<const FileCheckPattern *> &NotStrings,
                 StringMap<StringRef> &VariableTable,
-                const FileCheckRequest &Req) const;
+                const FileCheckRequest &Req,
+                std::vector<FileCheckDiag> *Diags) const;
   size_t CheckDag(const SourceMgr &SM, StringRef Buffer,
                   std::vector<const FileCheckPattern *> &NotStrings,
                   StringMap<StringRef> &VariableTable,
-                  const FileCheckRequest &Req) const;
+                  const FileCheckRequest &Req,
+                  std::vector<FileCheckDiag> *Diags) const;
 };
 
 /// FileCheck class takes the request and exposes various methods that
@@ -217,7 +275,8 @@
   ///
   /// Returns false if the input fails to satisfy the checks.
   bool CheckInput(SourceMgr &SM, StringRef Buffer,
-                  ArrayRef<FileCheckString> CheckStrings);
+                  ArrayRef<FileCheckString> CheckStrings,
+                  std::vector<FileCheckDiag> *Diags = nullptr);
 };
 } // namespace llvm
 #endif
diff --git a/include/llvm/Support/FileSystem.h b/include/llvm/Support/FileSystem.h
index 827e2e9..d2042f5 100644
--- a/include/llvm/Support/FileSystem.h
+++ b/include/llvm/Support/FileSystem.h
@@ -302,10 +302,7 @@
 /// relative/../path => <current-directory>/relative/../path
 ///
 /// @param path A path that is modified to be an absolute path.
-/// @returns errc::success if \a path has been made absolute, otherwise a
-///          platform-specific error_code.
-std::error_code make_absolute(const Twine &current_directory,
-                              SmallVectorImpl<char> &path);
+void make_absolute(const Twine &current_directory, SmallVectorImpl<char> &path);
 
 /// Make \a path an absolute path.
 ///
diff --git a/include/llvm/Support/FormatVariadicDetails.h b/include/llvm/Support/FormatVariadicDetails.h
index 56dda43..e8bd90f 100644
--- a/include/llvm/Support/FormatVariadicDetails.h
+++ b/include/llvm/Support/FormatVariadicDetails.h
@@ -21,6 +21,8 @@
 
 namespace detail {
 class format_adapter {
+  virtual void anchor();
+
 protected:
   virtual ~format_adapter() {}
 
diff --git a/include/llvm/Support/LowLevelTypeImpl.h b/include/llvm/Support/LowLevelTypeImpl.h
index a0a5a52..2a1075c 100644
--- a/include/llvm/Support/LowLevelTypeImpl.h
+++ b/include/llvm/Support/LowLevelTypeImpl.h
@@ -147,6 +147,7 @@
   bool operator!=(const LLT &RHS) const { return !(*this == RHS); }
 
   friend struct DenseMapInfo<LLT>;
+  friend class GISelInstProfileBuilder;
 
 private:
   /// LLT is packed into 64 bits as follows:
@@ -231,6 +232,11 @@
             maskAndShift(AddressSpace, PointerVectorAddressSpaceFieldInfo);
     }
   }
+
+  uint64_t getUniqueRAWLLTData() const {
+    return ((uint64_t)RawData) << 2 | ((uint64_t)IsPointer) << 1 |
+           ((uint64_t)IsVector);
+  }
 };
 
 inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
@@ -250,8 +256,7 @@
     return Invalid;
   }
   static inline unsigned getHashValue(const LLT &Ty) {
-    uint64_t Val = ((uint64_t)Ty.RawData) << 2 | ((uint64_t)Ty.IsPointer) << 1 |
-                   ((uint64_t)Ty.IsVector);
+    uint64_t Val = Ty.getUniqueRAWLLTData();
     return DenseMapInfo<uint64_t>::getHashValue(Val);
   }
   static bool isEqual(const LLT &LHS, const LLT &RHS) {
diff --git a/include/llvm/Support/TargetOpcodes.def b/include/llvm/Support/TargetOpcodes.def
index 3245276..3e8193a 100644
--- a/include/llvm/Support/TargetOpcodes.def
+++ b/include/llvm/Support/TargetOpcodes.def
@@ -518,6 +518,9 @@
 /// Generic byte swap.
 HANDLE_TARGET_OPCODE(G_BSWAP)
 
+/// Floating point ceil.
+HANDLE_TARGET_OPCODE(G_FCEIL)
+
 /// Generic AddressSpaceCast.
 HANDLE_TARGET_OPCODE(G_ADDRSPACE_CAST)
 
diff --git a/include/llvm/Support/VirtualFileSystem.h b/include/llvm/Support/VirtualFileSystem.h
index b3326bb..61c3d2f 100644
--- a/include/llvm/Support/VirtualFileSystem.h
+++ b/include/llvm/Support/VirtualFileSystem.h
@@ -24,6 +24,7 @@
 #include "llvm/Support/Chrono.h"
 #include "llvm/Support/ErrorOr.h"
 #include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
 #include "llvm/Support/SourceMgr.h"
 #include <cassert>
 #include <cstdint>
@@ -374,12 +375,17 @@
                               SmallVectorImpl<char> &Output) const override {
     return FS->getRealPath(Path, Output);
   }
+  std::error_code isLocal(const Twine &Path, bool &Result) override {
+    return FS->isLocal(Path, Result);
+  }
 
 protected:
   FileSystem &getUnderlyingFS() { return *FS; }
 
 private:
   IntrusiveRefCntPtr<FileSystem> FS;
+
+  virtual void anchor();
 };
 
 namespace detail {
@@ -490,6 +496,230 @@
   std::string RPath;
 };
 
+class VFSFromYamlDirIterImpl;
+class RedirectingFileSystemParser;
+
+/// A virtual file system parsed from a YAML file.
+///
+/// Currently, this class allows creating virtual directories and mapping
+/// virtual file paths to existing external files, available in \c ExternalFS.
+///
+/// The basic structure of the parsed file is:
+/// \verbatim
+/// {
+///   'version': <version number>,
+///   <optional configuration>
+///   'roots': [
+///              <directory entries>
+///            ]
+/// }
+/// \endverbatim
+///
+/// All configuration options are optional.
+///   'case-sensitive': <boolean, default=true>
+///   'use-external-names': <boolean, default=true>
+///   'overlay-relative': <boolean, default=false>
+///   'fallthrough': <boolean, default=true>
+///
+/// Virtual directories are represented as
+/// \verbatim
+/// {
+///   'type': 'directory',
+///   'name': <string>,
+///   'contents': [ <file or directory entries> ]
+/// }
+/// \endverbatim
+///
+/// The default attributes for virtual directories are:
+/// \verbatim
+/// MTime = now() when created
+/// Perms = 0777
+/// User = Group = 0
+/// Size = 0
+/// UniqueID = unspecified unique value
+/// \endverbatim
+///
+/// Re-mapped files are represented as
+/// \verbatim
+/// {
+///   'type': 'file',
+///   'name': <string>,
+///   'use-external-name': <boolean> # Optional
+///   'external-contents': <path to external file>
+/// }
+/// \endverbatim
+///
+/// and inherit their attributes from the external contents.
+///
+/// In both cases, the 'name' field may contain multiple path components (e.g.
+/// /path/to/file). However, any directory that contains more than one child
+/// must be uniquely represented by a directory entry.
+class RedirectingFileSystem : public vfs::FileSystem {
+public:
+  enum EntryKind { EK_Directory, EK_File };
+
+  /// A single file or directory in the VFS.
+  class Entry {
+    EntryKind Kind;
+    std::string Name;
+
+  public:
+    Entry(EntryKind K, StringRef Name) : Kind(K), Name(Name) {}
+    virtual ~Entry() = default;
+
+    StringRef getName() const { return Name; }
+    EntryKind getKind() const { return Kind; }
+  };
+
+  class RedirectingDirectoryEntry : public Entry {
+    std::vector<std::unique_ptr<Entry>> Contents;
+    Status S;
+
+  public:
+    RedirectingDirectoryEntry(StringRef Name,
+                              std::vector<std::unique_ptr<Entry>> Contents,
+                              Status S)
+        : Entry(EK_Directory, Name), Contents(std::move(Contents)),
+          S(std::move(S)) {}
+    RedirectingDirectoryEntry(StringRef Name, Status S)
+        : Entry(EK_Directory, Name), S(std::move(S)) {}
+
+    Status getStatus() { return S; }
+
+    void addContent(std::unique_ptr<Entry> Content) {
+      Contents.push_back(std::move(Content));
+    }
+
+    Entry *getLastContent() const { return Contents.back().get(); }
+
+    using iterator = decltype(Contents)::iterator;
+
+    iterator contents_begin() { return Contents.begin(); }
+    iterator contents_end() { return Contents.end(); }
+
+    static bool classof(const Entry *E) { return E->getKind() == EK_Directory; }
+  };
+
+  class RedirectingFileEntry : public Entry {
+  public:
+    enum NameKind { NK_NotSet, NK_External, NK_Virtual };
+
+  private:
+    std::string ExternalContentsPath;
+    NameKind UseName;
+
+  public:
+    RedirectingFileEntry(StringRef Name, StringRef ExternalContentsPath,
+                         NameKind UseName)
+        : Entry(EK_File, Name), ExternalContentsPath(ExternalContentsPath),
+          UseName(UseName) {}
+
+    StringRef getExternalContentsPath() const { return ExternalContentsPath; }
+
+    /// whether to use the external path as the name for this file.
+    bool useExternalName(bool GlobalUseExternalName) const {
+      return UseName == NK_NotSet ? GlobalUseExternalName
+                                  : (UseName == NK_External);
+    }
+
+    NameKind getUseName() const { return UseName; }
+
+    static bool classof(const Entry *E) { return E->getKind() == EK_File; }
+  };
+
+private:
+  friend class VFSFromYamlDirIterImpl;
+  friend class RedirectingFileSystemParser;
+
+  /// The root(s) of the virtual file system.
+  std::vector<std::unique_ptr<Entry>> Roots;
+
+  /// The file system to use for external references.
+  IntrusiveRefCntPtr<FileSystem> ExternalFS;
+
+  /// If IsRelativeOverlay is set, this represents the directory
+  /// path that should be prefixed to each 'external-contents' entry
+  /// when reading from YAML files.
+  std::string ExternalContentsPrefixDir;
+
+  /// @name Configuration
+  /// @{
+
+  /// Whether to perform case-sensitive comparisons.
+  ///
+  /// Currently, case-insensitive matching only works correctly with ASCII.
+  bool CaseSensitive = true;
+
+  /// IsRelativeOverlay marks whether a ExternalContentsPrefixDir path must
+  /// be prefixed in every 'external-contents' when reading from YAML files.
+  bool IsRelativeOverlay = false;
+
+  /// Whether to use to use the value of 'external-contents' for the
+  /// names of files.  This global value is overridable on a per-file basis.
+  bool UseExternalNames = true;
+
+  /// Whether to attempt a file lookup in external file system after it wasn't
+  /// found in VFS.
+  bool IsFallthrough = true;
+  /// @}
+
+  /// Virtual file paths and external files could be canonicalized without "..",
+  /// "." and "./" in their paths. FIXME: some unittests currently fail on
+  /// win32 when using remove_dots and remove_leading_dotslash on paths.
+  bool UseCanonicalizedPaths =
+#ifdef _WIN32
+      false;
+#else
+      true;
+#endif
+
+  RedirectingFileSystem(IntrusiveRefCntPtr<FileSystem> ExternalFS)
+      : ExternalFS(std::move(ExternalFS)) {}
+
+  /// Looks up the path <tt>[Start, End)</tt> in \p From, possibly
+  /// recursing into the contents of \p From if it is a directory.
+  ErrorOr<Entry *> lookupPath(llvm::sys::path::const_iterator Start,
+                              llvm::sys::path::const_iterator End,
+                              Entry *From) const;
+
+  /// Get the status of a given an \c Entry.
+  ErrorOr<Status> status(const Twine &Path, Entry *E);
+
+public:
+  /// Looks up \p Path in \c Roots.
+  ErrorOr<Entry *> lookupPath(const Twine &Path) const;
+
+  /// Parses \p Buffer, which is expected to be in YAML format and
+  /// returns a virtual file system representing its contents.
+  static RedirectingFileSystem *
+  create(std::unique_ptr<MemoryBuffer> Buffer,
+         SourceMgr::DiagHandlerTy DiagHandler, StringRef YAMLFilePath,
+         void *DiagContext, IntrusiveRefCntPtr<FileSystem> ExternalFS);
+
+  ErrorOr<Status> status(const Twine &Path) override;
+  ErrorOr<std::unique_ptr<File>> openFileForRead(const Twine &Path) override;
+
+  std::error_code getRealPath(const Twine &Path,
+                              SmallVectorImpl<char> &Output) const override;
+
+  llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override;
+
+  std::error_code setCurrentWorkingDirectory(const Twine &Path) override;
+
+  std::error_code isLocal(const Twine &Path, bool &Result) override;
+
+  directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override;
+
+  void setExternalContentsPrefixDir(StringRef PrefixDir);
+
+  StringRef getExternalContentsPrefixDir() const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+  LLVM_DUMP_METHOD void dump() const;
+  LLVM_DUMP_METHOD void dumpEntry(Entry *E, int NumSpaces = 0) const;
+#endif
+};
+
 /// Collect all pairs of <virtual path, real path> entries from the
 /// \p YAMLFilePath. This is used by the module dependency collector to forward
 /// the entries into the reproducer output VFS YAML file.
@@ -523,6 +753,8 @@
     OverlayDir.assign(OverlayDirectory.str());
   }
 
+  const std::vector<YAMLVFSEntry> &getMappings() const { return Mappings; }
+
   void write(llvm::raw_ostream &OS);
 };
 
diff --git a/include/llvm/Support/raw_ostream.h b/include/llvm/Support/raw_ostream.h
index 9a86f3a..d062e71 100644
--- a/include/llvm/Support/raw_ostream.h
+++ b/include/llvm/Support/raw_ostream.h
@@ -554,6 +554,8 @@
   raw_ostream &OS;
   SmallVector<char, 0> Buffer;
 
+  virtual void anchor() override;
+
 public:
   buffer_ostream(raw_ostream &OS) : raw_svector_ostream(Buffer), OS(OS) {}
   ~buffer_ostream() override { OS << str(); }
diff --git a/include/llvm/Target/GenericOpcodes.td b/include/llvm/Target/GenericOpcodes.td
index 775221a..045fe25 100644
--- a/include/llvm/Target/GenericOpcodes.td
+++ b/include/llvm/Target/GenericOpcodes.td
@@ -547,6 +547,13 @@
   let hasSideEffects = 0;
 }
 
+// Floating point ceiling of a value.
+def G_FCEIL : GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1);
+  let hasSideEffects = 0;
+}
+
 //------------------------------------------------------------------------------
 // Opcodes for LLVM Intrinsics
 //------------------------------------------------------------------------------
diff --git a/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
index af26375..31d2636 100644
--- a/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
+++ b/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -89,6 +89,7 @@
 def : GINodeEquiv<G_CTTZ_ZERO_UNDEF, cttz_zero_undef>;
 def : GINodeEquiv<G_CTPOP, ctpop>;
 def : GINodeEquiv<G_EXTRACT_VECTOR_ELT, vector_extract>;
+def : GINodeEquiv<G_FCEIL, fceil>;
 
 // Broadly speaking G_LOAD is equivalent to ISD::LOAD but there are some
 // complications that tablegen must take care of. For example, Predicates such
diff --git a/include/llvm/Transforms/Instrumentation.h b/include/llvm/Transforms/Instrumentation.h
index 3624cd2..017cab0 100644
--- a/include/llvm/Transforms/Instrumentation.h
+++ b/include/llvm/Transforms/Instrumentation.h
@@ -152,17 +152,9 @@
                                              bool UseGlobalsGC = true,
                                              bool UseOdrIndicator = true);
 
-// Insert MemorySanitizer instrumentation (detection of uninitialized reads)
-FunctionPass *createMemorySanitizerPass(int TrackOrigins = 0,
-                                        bool Recover = false,
-                                        bool EnableKmsan = false);
-
 FunctionPass *createHWAddressSanitizerPass(bool CompileKernel = false,
                                            bool Recover = false);
 
-// Insert ThreadSanitizer (race detection) instrumentation
-FunctionPass *createThreadSanitizerPass();
-
 // Insert DataFlowSanitizer (dynamic data flow analysis) instrumentation
 ModulePass *createDataFlowSanitizerPass(
     const std::vector<std::string> &ABIListFiles = std::vector<std::string>(),
@@ -230,7 +222,6 @@
   assert(Scaled <= std::numeric_limits<uint32_t>::max() && "overflow 32-bits");
   return Scaled;
 }
-
 } // end namespace llvm
 
 #endif // LLVM_TRANSFORMS_INSTRUMENTATION_H
diff --git a/include/llvm/Transforms/Instrumentation/MemorySanitizer.h b/include/llvm/Transforms/Instrumentation/MemorySanitizer.h
new file mode 100644
index 0000000..54f0e2f
--- /dev/null
+++ b/include/llvm/Transforms/Instrumentation/MemorySanitizer.h
@@ -0,0 +1,48 @@
+//===- Transforms/Instrumentation/MemorySanitizer.h - MSan Pass -----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the memoy sanitizer pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_MEMORYSANITIZER_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_MEMORYSANITIZER_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+// Insert MemorySanitizer instrumentation (detection of uninitialized reads)
+FunctionPass *createMemorySanitizerLegacyPassPass(int TrackOrigins = 0,
+                                        bool Recover = false,
+                                        bool EnableKmsan = false);
+
+/// A function pass for msan instrumentation.
+///
+/// Instruments functions to detect unitialized reads. This function pass
+/// inserts calls to runtime library functions. If the functions aren't declared
+/// yet, the pass inserts the declarations. Otherwise the existing globals are
+/// used.
+struct MemorySanitizerPass : public PassInfoMixin<MemorySanitizerPass> {
+  MemorySanitizerPass(int TrackOrigins = 0, bool Recover = false,
+                      bool EnableKmsan = false)
+      : TrackOrigins(TrackOrigins), Recover(Recover), EnableKmsan(EnableKmsan) {
+  }
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+
+private:
+  int TrackOrigins;
+  bool Recover;
+  bool EnableKmsan;
+};
+}
+
+#endif /* LLVM_TRANSFORMS_INSTRUMENTATION_MEMORYSANITIZER_H */
diff --git a/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h b/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h
new file mode 100644
index 0000000..701e2e6
--- /dev/null
+++ b/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h
@@ -0,0 +1,33 @@
+//===- Transforms/Instrumentation/MemorySanitizer.h - TSan Pass -----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the thread sanitizer pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_THREADSANITIZER_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_THREADSANITIZER_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+// Insert ThreadSanitizer (race detection) instrumentation
+FunctionPass *createThreadSanitizerLegacyPassPass();
+
+/// A function pass for tsan instrumentation.
+///
+/// Instruments functions to detect race conditions reads. This function pass
+/// inserts calls to runtime library functions. If the functions aren't declared
+/// yet, the pass inserts the declarations. Otherwise the existing globals are
+struct ThreadSanitizerPass : public PassInfoMixin<ThreadSanitizerPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+};
+} // namespace llvm
+#endif /* LLVM_TRANSFORMS_INSTRUMENTATION_THREADSANITIZER_H */
diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h
index 6df2f9a..8fcf929 100644
--- a/include/llvm/Transforms/Scalar.h
+++ b/include/llvm/Transforms/Scalar.h
@@ -183,11 +183,12 @@
 //
 // LoopUnroll - This pass is a simple loop unrolling pass.
 //
-Pass *createLoopUnrollPass(int OptLevel = 2, int Threshold = -1, int Count = -1,
+Pass *createLoopUnrollPass(int OptLevel = 2, bool OnlyWhenForced = false,
+                           int Threshold = -1, int Count = -1,
                            int AllowPartial = -1, int Runtime = -1,
                            int UpperBound = -1, int AllowPeeling = -1);
 // Create an unrolling pass for full unrolling that uses exact trip count only.
-Pass *createSimpleLoopUnrollPass(int OptLevel = 2);
+Pass *createSimpleLoopUnrollPass(int OptLevel = 2, bool OnlyWhenForced = false);
 
 //===----------------------------------------------------------------------===//
 //
@@ -470,6 +471,7 @@
 
 ///===---------------------------------------------------------------------===//
 ModulePass *createNameAnonGlobalPass();
+ModulePass *createCanonicalizeAliasesPass();
 
 //===----------------------------------------------------------------------===//
 //
diff --git a/include/llvm/Transforms/Scalar/GVN.h b/include/llvm/Transforms/Scalar/GVN.h
index 784de7f..9827678 100644
--- a/include/llvm/Transforms/Scalar/GVN.h
+++ b/include/llvm/Transforms/Scalar/GVN.h
@@ -27,6 +27,7 @@
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/InstrTypes.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
 #include "llvm/Support/Allocator.h"
 #include "llvm/Support/Compiler.h"
 #include <cstdint>
@@ -180,7 +181,12 @@
 
   // Map the block to reversed postorder traversal number. It is used to
   // find back edge easily.
-  DenseMap<const BasicBlock *, uint32_t> BlockRPONumber;
+  DenseMap<AssertingVH<BasicBlock>, uint32_t> BlockRPONumber;
+
+  // This is set 'true' initially and also when new blocks have been added to
+  // the function being analyzed. This boolean is used to control the updating
+  // of BlockRPONumber prior to accessing the contents of BlockRPONumber.
+  bool InvalidBlockRPONumbers = true;
 
   using LoadDepVect = SmallVector<NonLocalDepResult, 64>;
   using AvailValInBlkVect = SmallVector<gvn::AvailableValueInBlock, 64>;
diff --git a/include/llvm/Transforms/Scalar/JumpThreading.h b/include/llvm/Transforms/Scalar/JumpThreading.h
index 7851894..9894345 100644
--- a/include/llvm/Transforms/Scalar/JumpThreading.h
+++ b/include/llvm/Transforms/Scalar/JumpThreading.h
@@ -139,7 +139,11 @@
   bool ProcessImpliedCondition(BasicBlock *BB);
 
   bool SimplifyPartiallyRedundantLoad(LoadInst *LI);
+  void UnfoldSelectInstr(BasicBlock *Pred, BasicBlock *BB, SelectInst *SI,
+                         PHINode *SIUse, unsigned Idx);
+
   bool TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB);
+  bool TryToUnfoldSelect(SwitchInst *SI, BasicBlock *BB);
   bool TryToUnfoldSelectInCurrBB(BasicBlock *BB);
 
   bool ProcessGuards(BasicBlock *BB);
diff --git a/include/llvm/Transforms/Scalar/LoopUnrollPass.h b/include/llvm/Transforms/Scalar/LoopUnrollPass.h
index 20c9a26..e38e983 100644
--- a/include/llvm/Transforms/Scalar/LoopUnrollPass.h
+++ b/include/llvm/Transforms/Scalar/LoopUnrollPass.h
@@ -24,8 +24,14 @@
 class LoopFullUnrollPass : public PassInfoMixin<LoopFullUnrollPass> {
   const int OptLevel;
 
+  /// If false, use a cost model to determine whether unrolling of a loop is
+  /// profitable. If true, only loops that explicitly request unrolling via
+  /// metadata are considered. All other loops are skipped.
+  const bool OnlyWhenForced;
+
 public:
-  explicit LoopFullUnrollPass(int OptLevel = 2) : OptLevel(OptLevel) {}
+  explicit LoopFullUnrollPass(int OptLevel = 2, bool OnlyWhenForced = false)
+      : OptLevel(OptLevel), OnlyWhenForced(OnlyWhenForced) {}
 
   PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                         LoopStandardAnalysisResults &AR, LPMUpdater &U);
@@ -50,7 +56,13 @@
   Optional<bool> AllowUpperBound;
   int OptLevel;
 
-  LoopUnrollOptions(int OptLevel = 2) : OptLevel(OptLevel) {}
+  /// If false, use a cost model to determine whether unrolling of a loop is
+  /// profitable. If true, only loops that explicitly request unrolling via
+  /// metadata are considered. All other loops are skipped.
+  bool OnlyWhenForced;
+
+  LoopUnrollOptions(int OptLevel = 2, bool OnlyWhenForced = false)
+      : OptLevel(OptLevel), OnlyWhenForced(OnlyWhenForced) {}
 
   /// Enables or disables partial unrolling. When disabled only full unrolling
   /// is allowed.
diff --git a/include/llvm/Transforms/Utils/BasicBlockUtils.h b/include/llvm/Transforms/Utils/BasicBlockUtils.h
index a0fc188..5b16a2c 100644
--- a/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -43,6 +43,13 @@
 /// Delete the specified block, which must have no predecessors.
 void DeleteDeadBlock(BasicBlock *BB, DomTreeUpdater *DTU = nullptr);
 
+/// Delete the specified blocks from \p BB. The set of deleted blocks must have
+/// no predecessors that are not being deleted themselves. \p BBs must have no
+/// duplicating blocks. If there are loops among this set of blocks, all
+/// relevant loop info updates should be done before this function is called.
+void DeleteDeadBlocks(SmallVectorImpl <BasicBlock *> &BBs,
+                      DomTreeUpdater *DTU = nullptr);
+
 /// We know that BB has one predecessor. If there are any single-entry PHI nodes
 /// in it, fold them away. This handles the case when all entries to the PHI
 /// nodes in a block are guaranteed equal, such as when the block has exactly
diff --git a/include/llvm/Transforms/Utils/CanonicalizeAliases.h b/include/llvm/Transforms/Utils/CanonicalizeAliases.h
new file mode 100644
index 0000000..f232637
--- /dev/null
+++ b/include/llvm/Transforms/Utils/CanonicalizeAliases.h
@@ -0,0 +1,32 @@
+//===-- CanonicalizeAliases.h - Alias Canonicalization Pass -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file canonicalizes aliases.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CANONICALIZE_ALIASES_H
+#define LLVM_TRANSFORMS_UTILS_CANONICALIZE_ALIASES_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Simple pass that canonicalizes aliases.
+class CanonicalizeAliasesPass : public PassInfoMixin<CanonicalizeAliasesPass> {
+public:
+  CanonicalizeAliasesPass() = default;
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CANONICALIZE_ALIASESH
diff --git a/include/llvm/Transforms/Utils/CodeExtractor.h b/include/llvm/Transforms/Utils/CodeExtractor.h
index 498ff5a..fee79fd 100644
--- a/include/llvm/Transforms/Utils/CodeExtractor.h
+++ b/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -27,6 +27,7 @@
 class BlockFrequency;
 class BlockFrequencyInfo;
 class BranchProbabilityInfo;
+class CallInst;
 class DominatorTree;
 class Function;
 class Instruction;
@@ -164,10 +165,9 @@
         DenseMap<BasicBlock *, BlockFrequency> &ExitWeights,
         BranchProbabilityInfo *BPI);
 
-    void emitCallAndSwitchStatement(Function *newFunction,
-                                    BasicBlock *newHeader,
-                                    ValueSet &inputs,
-                                    ValueSet &outputs);
+    CallInst *emitCallAndSwitchStatement(Function *newFunction,
+                                         BasicBlock *newHeader,
+                                         ValueSet &inputs, ValueSet &outputs);
   };
 
 } // end namespace llvm
diff --git a/include/llvm/Transforms/Utils/LoopUtils.h b/include/llvm/Transforms/Utils/LoopUtils.h
index faf9131..8c2527b 100644
--- a/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/include/llvm/Transforms/Utils/LoopUtils.h
@@ -41,6 +41,7 @@
 class DataLayout;
 class Loop;
 class LoopInfo;
+class MemorySSAUpdater;
 class OptimizationRemarkEmitter;
 class PredicatedScalarEvolution;
 class PredIteratorCache;
@@ -109,7 +110,7 @@
 /// arguments. Diagnostics is emitted via \p ORE. It returns changed status.
 bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
                 TargetLibraryInfo *, TargetTransformInfo *, Loop *,
-                AliasSetTracker *, ICFLoopSafetyInfo *,
+                AliasSetTracker *, MemorySSAUpdater *, ICFLoopSafetyInfo *,
                 OptimizationRemarkEmitter *ORE);
 
 /// Walk the specified region of the CFG (defined by all blocks
@@ -122,7 +123,8 @@
 /// ORE. It returns changed status.
 bool hoistRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
                  TargetLibraryInfo *, Loop *, AliasSetTracker *,
-                 ICFLoopSafetyInfo *, OptimizationRemarkEmitter *ORE);
+                 MemorySSAUpdater *, ICFLoopSafetyInfo *,
+                 OptimizationRemarkEmitter *ORE);
 
 /// This function deletes dead loops. The caller of this function needs to
 /// guarantee that the loop is infact dead.
@@ -168,7 +170,7 @@
 /// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
 /// operand or null otherwise.  If the string metadata is not found return
 /// Optional's not-a-value.
-Optional<const MDOperand *> findStringMetadataForLoop(Loop *TheLoop,
+Optional<const MDOperand *> findStringMetadataForLoop(const Loop *TheLoop,
                                                       StringRef Name);
 
 /// Find named metadata for a loop with an integer value.
@@ -274,7 +276,7 @@
 /// If \p ORE is set use it to emit optimization remarks.
 bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
                         Loop *CurLoop, AliasSetTracker *CurAST,
-                        bool TargetExecutesOncePerLoop,
+                        MemorySSAUpdater *MSSAU, bool TargetExecutesOncePerLoop,
                         OptimizationRemarkEmitter *ORE = nullptr);
 
 /// Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
@@ -321,6 +323,23 @@
 /// Flag set: NSW, NUW, exact, and all of fast-math.
 void propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue = nullptr);
 
+/// Returns true if we can prove that \p S is defined and always negative in
+/// loop \p L.
+bool isKnownNegativeInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE);
+
+/// Returns true if we can prove that \p S is defined and always non-negative in
+/// loop \p L.
+bool isKnownNonNegativeInLoop(const SCEV *S, const Loop *L,
+                              ScalarEvolution &SE);
+
+/// Returns true if \p S is defined and never is equal to signed/unsigned max.
+bool cannotBeMaxInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
+                       bool Signed);
+
+/// Returns true if \p S is defined and never is equal to signed/unsigned min.
+bool cannotBeMinInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
+                       bool Signed);
+
 } // end namespace llvm
 
 #endif // LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
diff --git a/include/llvm/Transforms/Utils/ModuleUtils.h b/include/llvm/Transforms/Utils/ModuleUtils.h
index 14615c2..fee492b 100644
--- a/include/llvm/Transforms/Utils/ModuleUtils.h
+++ b/include/llvm/Transforms/Utils/ModuleUtils.h
@@ -58,6 +58,24 @@
     ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs,
     StringRef VersionCheckName = StringRef());
 
+/// Creates sanitizer constructor function lazily. If a constructor and init
+/// function already exist, this function returns it. Otherwise it calls \c
+/// createSanitizerCtorAndInitFunctions. The FunctionsCreatedCallback is invoked
+/// in that case, passing the new Ctor and Init function.
+///
+/// \return Returns pair of pointers to constructor, and init functions
+/// respectively.
+std::pair<Function *, Function *> getOrCreateSanitizerCtorAndInitFunctions(
+    Module &M, StringRef CtorName, StringRef InitName,
+    ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs,
+    function_ref<void(Function *, Function *)> FunctionsCreatedCallback,
+    StringRef VersionCheckName = StringRef());
+
+// Creates and returns a sanitizer init function without argument if it doesn't
+// exist, and adds it to the global constructors list. Otherwise it returns the
+// existing function.
+Function *getOrCreateInitFunction(Module &M, StringRef Name);
+
 /// Rename all the anon globals in the module using a hash computed from
 /// the list of public globals in the module.
 bool nameUnamedGlobals(Module &M);
diff --git a/include/llvm/Transforms/Vectorize.h b/include/llvm/Transforms/Vectorize.h
index 950af7f..70f9a2e 100644
--- a/include/llvm/Transforms/Vectorize.h
+++ b/include/llvm/Transforms/Vectorize.h
@@ -110,8 +110,8 @@
 //
 // LoopVectorize - Create a loop vectorization pass.
 //
-Pass *createLoopVectorizePass(bool NoUnrolling = false,
-                              bool AlwaysVectorize = true);
+Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced = false,
+                              bool VectorizeOnlyWhenForced = false);
 
 //===----------------------------------------------------------------------===//
 //
diff --git a/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
index 124a456..5c7bba0 100644
--- a/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
+++ b/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
@@ -95,7 +95,7 @@
     FK_Enabled = 1,    ///< Forcing enabled.
   };
 
-  LoopVectorizeHints(const Loop *L, bool DisableInterleaving,
+  LoopVectorizeHints(const Loop *L, bool InterleaveOnlyWhenForced,
                      OptimizationRemarkEmitter &ORE);
 
   /// Mark the loop L as already vectorized by setting the width to 1.
@@ -105,7 +105,8 @@
     writeHintsToMetadata(Hints);
   }
 
-  bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const;
+  bool allowVectorization(Function *F, Loop *L,
+                          bool VectorizeOnlyWhenForced) const;
 
   /// Dumps all the hint information.
   void emitRemarkWithHints() const;
diff --git a/include/llvm/Transforms/Vectorize/LoopVectorize.h b/include/llvm/Transforms/Vectorize/LoopVectorize.h
index d79d846..d9c4f7b 100644
--- a/include/llvm/Transforms/Vectorize/LoopVectorize.h
+++ b/include/llvm/Transforms/Vectorize/LoopVectorize.h
@@ -78,12 +78,13 @@
 
 /// The LoopVectorize Pass.
 struct LoopVectorizePass : public PassInfoMixin<LoopVectorizePass> {
-  bool DisableUnrolling = false;
+  /// If false, consider all loops for interleaving.
+  /// If true, only loops that explicitly request interleaving are considered.
+  bool InterleaveOnlyWhenForced = false;
 
-  /// If true, consider all loops for vectorization.
-  /// If false, only loops that explicitly request vectorization are
-  /// considered.
-  bool AlwaysVectorize = true;
+  /// If false, consider all loops for vectorization.
+  /// If true, only loops that explicitly request vectorization are considered.
+  bool VectorizeOnlyWhenForced = false;
 
   ScalarEvolution *SE;
   LoopInfo *LI;
diff --git a/include/llvm/module.modulemap b/include/llvm/module.modulemap
index 3b6e2a8..bcc1253 100644
--- a/include/llvm/module.modulemap
+++ b/include/llvm/module.modulemap
@@ -7,7 +7,11 @@
   textual header "Analysis/TargetLibraryInfo.def"
 }
 
-module LLVM_AsmParser { requires cplusplus umbrella "AsmParser" module * { export * } }
+module LLVM_AsmParser {
+  requires cplusplus
+  umbrella "AsmParser"
+  module * { export * }
+}
 
 // A module covering CodeGen/ and Target/. These are intertwined
 // and codependent, and thus notionally form a single module.
@@ -27,14 +31,20 @@
     textual header "CodeGen/CommandFlags.inc"
     textual header "CodeGen/DIEValue.def"
   }
-
-  module Target {
-    umbrella "Target"
-    module * { export * }
-  }
 }
 
-module LLVM_Bitcode { requires cplusplus umbrella "Bitcode" module * { export * } }
+// FIXME: Make this as a submodule of LLVM_Backend again.
+//        Doing so causes a linker error in clang-format.
+module LLVM_Backend_Target {
+  umbrella "Target"
+  module * { export * }
+}
+
+module LLVM_Bitcode {
+ requires cplusplus
+ umbrella "Bitcode"
+ module * { export * }
+}
 
 
 module LLVM_BinaryFormat {
@@ -244,9 +254,23 @@
   textual header "IR/RuntimeLibcalls.def"
 }
 
-module LLVM_IRReader { requires cplusplus umbrella "IRReader" module * { export * } }
-module LLVM_LineEditor { requires cplusplus umbrella "LineEditor" module * { export * } }
-module LLVM_LTO { requires cplusplus umbrella "LTO" module * { export * } }
+module LLVM_IRReader {
+  requires cplusplus
+  umbrella "IRReader"
+  module * { export * }
+}
+
+module LLVM_LineEditor {
+  requires cplusplus
+  umbrella "LineEditor"
+  module * { export * }
+}
+
+module LLVM_LTO {
+  requires cplusplus
+  umbrella "LTO"
+  module * { export * }
+}
 
 module LLVM_MC {
   requires cplusplus
@@ -273,7 +297,11 @@
   module * { export * }
 }
 
-module LLVM_Option { requires cplusplus umbrella "Option" module * { export * } }
+module LLVM_Option {
+  requires cplusplus
+  umbrella "Option"
+  module * { export * }
+}
 
 module LLVM_ProfileData {
   requires cplusplus
@@ -291,7 +319,11 @@
   export *
 }
 
-module LLVM_TableGen { requires cplusplus umbrella "TableGen" module * { export * } }
+module LLVM_TableGen {
+  requires cplusplus
+  umbrella "TableGen"
+  module * { export * }
+}
 
 module LLVM_Transforms {
   requires cplusplus
diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp
index 8ed4839..3446aef 100644
--- a/lib/Analysis/AliasAnalysis.cpp
+++ b/lib/Analysis/AliasAnalysis.cpp
@@ -40,7 +40,6 @@
 #include "llvm/IR/Argument.h"
 #include "llvm/IR/Attributes.h"
 #include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/Instruction.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/Module.h"
@@ -118,11 +117,11 @@
   return false;
 }
 
-ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
+ModRefInfo AAResults::getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
   ModRefInfo Result = ModRefInfo::ModRef;
 
   for (const auto &AA : AAs) {
-    Result = intersectModRef(Result, AA->getArgModRefInfo(CS, ArgIdx));
+    Result = intersectModRef(Result, AA->getArgModRefInfo(Call, ArgIdx));
 
     // Early-exit the moment we reach the bottom of the lattice.
     if (isNoModRef(Result))
@@ -132,11 +131,11 @@
   return Result;
 }
 
-ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
+ModRefInfo AAResults::getModRefInfo(Instruction *I, const CallBase *Call2) {
   // We may have two calls.
-  if (auto CS = ImmutableCallSite(I)) {
+  if (const auto *Call1 = dyn_cast<CallBase>(I)) {
     // Check if the two calls modify the same memory.
-    return getModRefInfo(CS, Call);
+    return getModRefInfo(Call1, Call2);
   } else if (I->isFenceLike()) {
     // If this is a fence, just return ModRef.
     return ModRefInfo::ModRef;
@@ -146,19 +145,19 @@
     // is that if the call references what this instruction
     // defines, it must be clobbered by this location.
     const MemoryLocation DefLoc = MemoryLocation::get(I);
-    ModRefInfo MR = getModRefInfo(Call, DefLoc);
+    ModRefInfo MR = getModRefInfo(Call2, DefLoc);
     if (isModOrRefSet(MR))
       return setModAndRef(MR);
   }
   return ModRefInfo::NoModRef;
 }
 
-ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
+ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
                                     const MemoryLocation &Loc) {
   ModRefInfo Result = ModRefInfo::ModRef;
 
   for (const auto &AA : AAs) {
-    Result = intersectModRef(Result, AA->getModRefInfo(CS, Loc));
+    Result = intersectModRef(Result, AA->getModRefInfo(Call, Loc));
 
     // Early-exit the moment we reach the bottom of the lattice.
     if (isNoModRef(Result))
@@ -167,7 +166,7 @@
 
   // Try to refine the mod-ref info further using other API entry points to the
   // aggregate set of AA results.
-  auto MRB = getModRefBehavior(CS);
+  auto MRB = getModRefBehavior(Call);
   if (MRB == FMRB_DoesNotAccessMemory ||
       MRB == FMRB_OnlyAccessesInaccessibleMem)
     return ModRefInfo::NoModRef;
@@ -181,15 +180,16 @@
     bool IsMustAlias = true;
     ModRefInfo AllArgsMask = ModRefInfo::NoModRef;
     if (doesAccessArgPointees(MRB)) {
-      for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) {
+      for (auto AI = Call->arg_begin(), AE = Call->arg_end(); AI != AE; ++AI) {
         const Value *Arg = *AI;
         if (!Arg->getType()->isPointerTy())
           continue;
-        unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
-        MemoryLocation ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx, TLI);
+        unsigned ArgIdx = std::distance(Call->arg_begin(), AI);
+        MemoryLocation ArgLoc =
+            MemoryLocation::getForArgument(Call, ArgIdx, TLI);
         AliasResult ArgAlias = alias(ArgLoc, Loc);
         if (ArgAlias != NoAlias) {
-          ModRefInfo ArgMask = getArgModRefInfo(CS, ArgIdx);
+          ModRefInfo ArgMask = getArgModRefInfo(Call, ArgIdx);
           AllArgsMask = unionModRef(AllArgsMask, ArgMask);
         }
         // Conservatively clear IsMustAlias unless only MustAlias is found.
@@ -213,12 +213,12 @@
   return Result;
 }
 
-ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
-                                    ImmutableCallSite CS2) {
+ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
+                                    const CallBase *Call2) {
   ModRefInfo Result = ModRefInfo::ModRef;
 
   for (const auto &AA : AAs) {
-    Result = intersectModRef(Result, AA->getModRefInfo(CS1, CS2));
+    Result = intersectModRef(Result, AA->getModRefInfo(Call1, Call2));
 
     // Early-exit the moment we reach the bottom of the lattice.
     if (isNoModRef(Result))
@@ -228,59 +228,61 @@
   // Try to refine the mod-ref info further using other API entry points to the
   // aggregate set of AA results.
 
-  // If CS1 or CS2 are readnone, they don't interact.
-  auto CS1B = getModRefBehavior(CS1);
-  if (CS1B == FMRB_DoesNotAccessMemory)
+  // If Call1 or Call2 are readnone, they don't interact.
+  auto Call1B = getModRefBehavior(Call1);
+  if (Call1B == FMRB_DoesNotAccessMemory)
     return ModRefInfo::NoModRef;
 
-  auto CS2B = getModRefBehavior(CS2);
-  if (CS2B == FMRB_DoesNotAccessMemory)
+  auto Call2B = getModRefBehavior(Call2);
+  if (Call2B == FMRB_DoesNotAccessMemory)
     return ModRefInfo::NoModRef;
 
   // If they both only read from memory, there is no dependence.
-  if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B))
+  if (onlyReadsMemory(Call1B) && onlyReadsMemory(Call2B))
     return ModRefInfo::NoModRef;
 
-  // If CS1 only reads memory, the only dependence on CS2 can be
-  // from CS1 reading memory written by CS2.
-  if (onlyReadsMemory(CS1B))
+  // If Call1 only reads memory, the only dependence on Call2 can be
+  // from Call1 reading memory written by Call2.
+  if (onlyReadsMemory(Call1B))
     Result = clearMod(Result);
-  else if (doesNotReadMemory(CS1B))
+  else if (doesNotReadMemory(Call1B))
     Result = clearRef(Result);
 
-  // If CS2 only access memory through arguments, accumulate the mod/ref
-  // information from CS1's references to the memory referenced by
-  // CS2's arguments.
-  if (onlyAccessesArgPointees(CS2B)) {
-    if (!doesAccessArgPointees(CS2B))
+  // If Call2 only access memory through arguments, accumulate the mod/ref
+  // information from Call1's references to the memory referenced by
+  // Call2's arguments.
+  if (onlyAccessesArgPointees(Call2B)) {
+    if (!doesAccessArgPointees(Call2B))
       return ModRefInfo::NoModRef;
     ModRefInfo R = ModRefInfo::NoModRef;
     bool IsMustAlias = true;
-    for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
+    for (auto I = Call2->arg_begin(), E = Call2->arg_end(); I != E; ++I) {
       const Value *Arg = *I;
       if (!Arg->getType()->isPointerTy())
         continue;
-      unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I);
-      auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, TLI);
+      unsigned Call2ArgIdx = std::distance(Call2->arg_begin(), I);
+      auto Call2ArgLoc =
+          MemoryLocation::getForArgument(Call2, Call2ArgIdx, TLI);
 
-      // ArgModRefCS2 indicates what CS2 might do to CS2ArgLoc, and the
-      // dependence of CS1 on that location is the inverse:
-      // - If CS2 modifies location, dependence exists if CS1 reads or writes.
-      // - If CS2 only reads location, dependence exists if CS1 writes.
-      ModRefInfo ArgModRefCS2 = getArgModRefInfo(CS2, CS2ArgIdx);
+      // ArgModRefC2 indicates what Call2 might do to Call2ArgLoc, and the
+      // dependence of Call1 on that location is the inverse:
+      // - If Call2 modifies location, dependence exists if Call1 reads or
+      //   writes.
+      // - If Call2 only reads location, dependence exists if Call1 writes.
+      ModRefInfo ArgModRefC2 = getArgModRefInfo(Call2, Call2ArgIdx);
       ModRefInfo ArgMask = ModRefInfo::NoModRef;
-      if (isModSet(ArgModRefCS2))
+      if (isModSet(ArgModRefC2))
         ArgMask = ModRefInfo::ModRef;
-      else if (isRefSet(ArgModRefCS2))
+      else if (isRefSet(ArgModRefC2))
         ArgMask = ModRefInfo::Mod;
 
-      // ModRefCS1 indicates what CS1 might do to CS2ArgLoc, and we use
+      // ModRefC1 indicates what Call1 might do to Call2ArgLoc, and we use
       // above ArgMask to update dependence info.
-      ModRefInfo ModRefCS1 = getModRefInfo(CS1, CS2ArgLoc);
-      ArgMask = intersectModRef(ArgMask, ModRefCS1);
+      ModRefInfo ModRefC1 = getModRefInfo(Call1, Call2ArgLoc);
+      ArgMask = intersectModRef(ArgMask, ModRefC1);
 
       // Conservatively clear IsMustAlias unless only MustAlias is found.
-      IsMustAlias &= isMustSet(ModRefCS1);
+      IsMustAlias &= isMustSet(ModRefC1);
 
       R = intersectModRef(unionModRef(R, ArgMask), Result);
       if (R == Result) {
@@ -298,31 +300,32 @@
     return IsMustAlias ? setMust(R) : clearMust(R);
   }
 
-  // If CS1 only accesses memory through arguments, check if CS2 references
-  // any of the memory referenced by CS1's arguments. If not, return NoModRef.
-  if (onlyAccessesArgPointees(CS1B)) {
-    if (!doesAccessArgPointees(CS1B))
+  // If Call1 only accesses memory through arguments, check if Call2 references
+  // any of the memory referenced by Call1's arguments. If not, return NoModRef.
+  if (onlyAccessesArgPointees(Call1B)) {
+    if (!doesAccessArgPointees(Call1B))
       return ModRefInfo::NoModRef;
     ModRefInfo R = ModRefInfo::NoModRef;
     bool IsMustAlias = true;
-    for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) {
+    for (auto I = Call1->arg_begin(), E = Call1->arg_end(); I != E; ++I) {
       const Value *Arg = *I;
       if (!Arg->getType()->isPointerTy())
         continue;
-      unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I);
-      auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, TLI);
+      unsigned Call1ArgIdx = std::distance(Call1->arg_begin(), I);
+      auto Call1ArgLoc =
+          MemoryLocation::getForArgument(Call1, Call1ArgIdx, TLI);
 
-      // ArgModRefCS1 indicates what CS1 might do to CS1ArgLoc; if CS1 might
-      // Mod CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If
-      // CS1 might Ref, then we care only about a Mod by CS2.
-      ModRefInfo ArgModRefCS1 = getArgModRefInfo(CS1, CS1ArgIdx);
-      ModRefInfo ModRefCS2 = getModRefInfo(CS2, CS1ArgLoc);
-      if ((isModSet(ArgModRefCS1) && isModOrRefSet(ModRefCS2)) ||
-          (isRefSet(ArgModRefCS1) && isModSet(ModRefCS2)))
-        R = intersectModRef(unionModRef(R, ArgModRefCS1), Result);
+      // ArgModRefC1 indicates what Call1 might do to Call1ArgLoc; if Call1
+      // might Mod Call1ArgLoc, then we care about either a Mod or a Ref by
+      // Call2. If Call1 might Ref, then we care only about a Mod by Call2.
+      ModRefInfo ArgModRefC1 = getArgModRefInfo(Call1, Call1ArgIdx);
+      ModRefInfo ModRefC2 = getModRefInfo(Call2, Call1ArgLoc);
+      if ((isModSet(ArgModRefC1) && isModOrRefSet(ModRefC2)) ||
+          (isRefSet(ArgModRefC1) && isModSet(ModRefC2)))
+        R = intersectModRef(unionModRef(R, ArgModRefC1), Result);
 
       // Conservatively clear IsMustAlias unless only MustAlias is found.
-      IsMustAlias &= isMustSet(ModRefCS2);
+      IsMustAlias &= isMustSet(ModRefC2);
 
       if (R == Result) {
         // On early exit, not all args were checked, cannot set Must.
@@ -342,11 +345,11 @@
   return Result;
 }
 
-FunctionModRefBehavior AAResults::getModRefBehavior(ImmutableCallSite CS) {
+FunctionModRefBehavior AAResults::getModRefBehavior(const CallBase *Call) {
   FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior;
 
   for (const auto &AA : AAs) {
-    Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(CS));
+    Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(Call));
 
     // Early-exit the moment we reach the bottom of the lattice.
     if (Result == FMRB_DoesNotAccessMemory)
@@ -558,8 +561,8 @@
       isa<Constant>(Object))
     return ModRefInfo::ModRef;
 
-  ImmutableCallSite CS(I);
-  if (!CS.getInstruction() || CS.getInstruction() == Object)
+  const auto *Call = dyn_cast<CallBase>(I);
+  if (!Call || Call == Object)
     return ModRefInfo::ModRef;
 
   if (PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
@@ -572,14 +575,14 @@
   ModRefInfo R = ModRefInfo::NoModRef;
   bool IsMustAlias = true;
   // Set flag only if no May found and all operands processed.
-  for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
+  for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
        CI != CE; ++CI, ++ArgNo) {
     // Only look at the no-capture or byval pointer arguments.  If this
     // pointer were passed to arguments that were neither of these, then it
     // couldn't be no-capture.
     if (!(*CI)->getType()->isPointerTy() ||
-        (!CS.doesNotCapture(ArgNo) &&
-         ArgNo < CS.getNumArgOperands() && !CS.isByValArgument(ArgNo)))
+        (!Call->doesNotCapture(ArgNo) && ArgNo < Call->getNumArgOperands() &&
+         !Call->isByValArgument(ArgNo)))
       continue;
 
     AliasResult AR = alias(MemoryLocation(*CI), MemoryLocation(Object));
@@ -591,9 +594,9 @@
       IsMustAlias = false;
     if (AR == NoAlias)
       continue;
-    if (CS.doesNotAccessMemory(ArgNo))
+    if (Call->doesNotAccessMemory(ArgNo))
       continue;
-    if (CS.onlyReadsMemory(ArgNo)) {
+    if (Call->onlyReadsMemory(ArgNo)) {
       R = ModRefInfo::Ref;
       continue;
     }
@@ -775,8 +778,8 @@
 }
 
 bool llvm::isNoAliasCall(const Value *V) {
-  if (auto CS = ImmutableCallSite(V))
-    return CS.hasRetAttr(Attribute::NoAlias);
+  if (const auto *Call = dyn_cast<CallBase>(V))
+    return Call->hasRetAttr(Attribute::NoAlias);
   return false;
 }
 
diff --git a/lib/Analysis/AliasAnalysisEvaluator.cpp b/lib/Analysis/AliasAnalysisEvaluator.cpp
index 764ae91..85dd4fe 100644
--- a/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -66,11 +66,10 @@
   }
 }
 
-static inline void PrintModRefResults(const char *Msg, bool P, CallSite CSA,
-                                      CallSite CSB, Module *M) {
+static inline void PrintModRefResults(const char *Msg, bool P, CallBase *CallA,
+                                      CallBase *CallB, Module *M) {
   if (PrintAll || P) {
-    errs() << "  " << Msg << ": " << *CSA.getInstruction() << " <-> "
-           << *CSB.getInstruction() << '\n';
+    errs() << "  " << Msg << ": " << *CallA << " <-> " << *CallB << '\n';
   }
 }
 
@@ -98,7 +97,7 @@
   ++FunctionCount;
 
   SetVector<Value *> Pointers;
-  SmallSetVector<CallSite, 16> CallSites;
+  SmallSetVector<CallBase *, 16> Calls;
   SetVector<Value *> Loads;
   SetVector<Value *> Stores;
 
@@ -114,16 +113,16 @@
     if (EvalAAMD && isa<StoreInst>(&*I))
       Stores.insert(&*I);
     Instruction &Inst = *I;
-    if (auto CS = CallSite(&Inst)) {
-      Value *Callee = CS.getCalledValue();
+    if (auto *Call = dyn_cast<CallBase>(&Inst)) {
+      Value *Callee = Call->getCalledValue();
       // Skip actual functions for direct function calls.
       if (!isa<Function>(Callee) && isInterestingPointer(Callee))
         Pointers.insert(Callee);
       // Consider formals.
-      for (Use &DataOp : CS.data_ops())
+      for (Use &DataOp : Call->data_ops())
         if (isInterestingPointer(DataOp))
           Pointers.insert(DataOp);
-      CallSites.insert(CS);
+      Calls.insert(Call);
     } else {
       // Consider all operands.
       for (Instruction::op_iterator OI = Inst.op_begin(), OE = Inst.op_end();
@@ -136,19 +135,21 @@
   if (PrintAll || PrintNoAlias || PrintMayAlias || PrintPartialAlias ||
       PrintMustAlias || PrintNoModRef || PrintMod || PrintRef || PrintModRef)
     errs() << "Function: " << F.getName() << ": " << Pointers.size()
-           << " pointers, " << CallSites.size() << " call sites\n";
+           << " pointers, " << Calls.size() << " call sites\n";
 
   // iterate over the worklist, and run the full (n^2)/2 disambiguations
   for (SetVector<Value *>::iterator I1 = Pointers.begin(), E = Pointers.end();
        I1 != E; ++I1) {
-    uint64_t I1Size = MemoryLocation::UnknownSize;
+    auto I1Size = LocationSize::unknown();
     Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
-    if (I1ElTy->isSized()) I1Size = DL.getTypeStoreSize(I1ElTy);
+    if (I1ElTy->isSized())
+      I1Size = LocationSize::precise(DL.getTypeStoreSize(I1ElTy));
 
     for (SetVector<Value *>::iterator I2 = Pointers.begin(); I2 != I1; ++I2) {
-      uint64_t I2Size = MemoryLocation::UnknownSize;
-      Type *I2ElTy =cast<PointerType>((*I2)->getType())->getElementType();
-      if (I2ElTy->isSized()) I2Size = DL.getTypeStoreSize(I2ElTy);
+      auto I2Size = LocationSize::unknown();
+      Type *I2ElTy = cast<PointerType>((*I2)->getType())->getElementType();
+      if (I2ElTy->isSized())
+        I2Size = LocationSize::precise(DL.getTypeStoreSize(I2ElTy));
 
       AliasResult AR = AA.alias(*I1, I1Size, *I2, I2Size);
       switch (AR) {
@@ -228,49 +229,48 @@
   }
 
   // Mod/ref alias analysis: compare all pairs of calls and values
-  for (CallSite C : CallSites) {
-    Instruction *I = C.getInstruction();
-
+  for (CallBase *Call : Calls) {
     for (auto Pointer : Pointers) {
-      uint64_t Size = MemoryLocation::UnknownSize;
+      auto Size = LocationSize::unknown();
       Type *ElTy = cast<PointerType>(Pointer->getType())->getElementType();
-      if (ElTy->isSized()) Size = DL.getTypeStoreSize(ElTy);
+      if (ElTy->isSized())
+        Size = LocationSize::precise(DL.getTypeStoreSize(ElTy));
 
-      switch (AA.getModRefInfo(C, Pointer, Size)) {
+      switch (AA.getModRefInfo(Call, Pointer, Size)) {
       case ModRefInfo::NoModRef:
-        PrintModRefResults("NoModRef", PrintNoModRef, I, Pointer,
+        PrintModRefResults("NoModRef", PrintNoModRef, Call, Pointer,
                            F.getParent());
         ++NoModRefCount;
         break;
       case ModRefInfo::Mod:
-        PrintModRefResults("Just Mod", PrintMod, I, Pointer, F.getParent());
+        PrintModRefResults("Just Mod", PrintMod, Call, Pointer, F.getParent());
         ++ModCount;
         break;
       case ModRefInfo::Ref:
-        PrintModRefResults("Just Ref", PrintRef, I, Pointer, F.getParent());
+        PrintModRefResults("Just Ref", PrintRef, Call, Pointer, F.getParent());
         ++RefCount;
         break;
       case ModRefInfo::ModRef:
-        PrintModRefResults("Both ModRef", PrintModRef, I, Pointer,
+        PrintModRefResults("Both ModRef", PrintModRef, Call, Pointer,
                            F.getParent());
         ++ModRefCount;
         break;
       case ModRefInfo::Must:
-        PrintModRefResults("Must", PrintMust, I, Pointer, F.getParent());
+        PrintModRefResults("Must", PrintMust, Call, Pointer, F.getParent());
         ++MustCount;
         break;
       case ModRefInfo::MustMod:
-        PrintModRefResults("Just Mod (MustAlias)", PrintMustMod, I, Pointer,
+        PrintModRefResults("Just Mod (MustAlias)", PrintMustMod, Call, Pointer,
                            F.getParent());
         ++MustModCount;
         break;
       case ModRefInfo::MustRef:
-        PrintModRefResults("Just Ref (MustAlias)", PrintMustRef, I, Pointer,
+        PrintModRefResults("Just Ref (MustAlias)", PrintMustRef, Call, Pointer,
                            F.getParent());
         ++MustRefCount;
         break;
       case ModRefInfo::MustModRef:
-        PrintModRefResults("Both ModRef (MustAlias)", PrintMustModRef, I,
+        PrintModRefResults("Both ModRef (MustAlias)", PrintMustModRef, Call,
                            Pointer, F.getParent());
         ++MustModRefCount;
         break;
@@ -279,44 +279,46 @@
   }
 
   // Mod/ref alias analysis: compare all pairs of calls
-  for (auto C = CallSites.begin(), Ce = CallSites.end(); C != Ce; ++C) {
-    for (auto D = CallSites.begin(); D != Ce; ++D) {
-      if (D == C)
+  for (CallBase *CallA : Calls) {
+    for (CallBase *CallB : Calls) {
+      if (CallA == CallB)
         continue;
-      switch (AA.getModRefInfo(*C, *D)) {
+      switch (AA.getModRefInfo(CallA, CallB)) {
       case ModRefInfo::NoModRef:
-        PrintModRefResults("NoModRef", PrintNoModRef, *C, *D, F.getParent());
+        PrintModRefResults("NoModRef", PrintNoModRef, CallA, CallB,
+                           F.getParent());
         ++NoModRefCount;
         break;
       case ModRefInfo::Mod:
-        PrintModRefResults("Just Mod", PrintMod, *C, *D, F.getParent());
+        PrintModRefResults("Just Mod", PrintMod, CallA, CallB, F.getParent());
         ++ModCount;
         break;
       case ModRefInfo::Ref:
-        PrintModRefResults("Just Ref", PrintRef, *C, *D, F.getParent());
+        PrintModRefResults("Just Ref", PrintRef, CallA, CallB, F.getParent());
         ++RefCount;
         break;
       case ModRefInfo::ModRef:
-        PrintModRefResults("Both ModRef", PrintModRef, *C, *D, F.getParent());
+        PrintModRefResults("Both ModRef", PrintModRef, CallA, CallB,
+                           F.getParent());
         ++ModRefCount;
         break;
       case ModRefInfo::Must:
-        PrintModRefResults("Must", PrintMust, *C, *D, F.getParent());
+        PrintModRefResults("Must", PrintMust, CallA, CallB, F.getParent());
         ++MustCount;
         break;
       case ModRefInfo::MustMod:
-        PrintModRefResults("Just Mod (MustAlias)", PrintMustMod, *C, *D,
+        PrintModRefResults("Just Mod (MustAlias)", PrintMustMod, CallA, CallB,
                            F.getParent());
         ++MustModCount;
         break;
       case ModRefInfo::MustRef:
-        PrintModRefResults("Just Ref (MustAlias)", PrintMustRef, *C, *D,
+        PrintModRefResults("Just Ref (MustAlias)", PrintMustRef, CallA, CallB,
                            F.getParent());
         ++MustRefCount;
         break;
       case ModRefInfo::MustModRef:
-        PrintModRefResults("Both ModRef (MustAlias)", PrintMustModRef, *C, *D,
-                           F.getParent());
+        PrintModRefResults("Both ModRef (MustAlias)", PrintMustModRef, CallA,
+                           CallB, F.getParent());
         ++MustModRefCount;
         break;
       }
diff --git a/lib/Analysis/AliasSetTracker.cpp b/lib/Analysis/AliasSetTracker.cpp
index c152b0d..f6ad704 100644
--- a/lib/Analysis/AliasSetTracker.cpp
+++ b/lib/Analysis/AliasSetTracker.cpp
@@ -16,7 +16,6 @@
 #include "llvm/Analysis/GuardUtils.h"
 #include "llvm/Analysis/MemoryLocation.h"
 #include "llvm/Config/llvm-config.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/Function.h"
@@ -236,7 +235,8 @@
 
   for (unsigned i = 0, e = UnknownInsts.size(); i != e; ++i) {
     if (auto *UnknownInst = getUnknownInst(i)) {
-      ImmutableCallSite C1(UnknownInst), C2(Inst);
+      const auto *C1 = dyn_cast<CallBase>(UnknownInst);
+      const auto *C2 = dyn_cast<CallBase>(Inst);
       if (!C1 || !C2 || isModOrRefSet(AA.getModRefInfo(C1, C2)) ||
           isModOrRefSet(AA.getModRefInfo(C2, C1)))
         return true;
@@ -446,44 +446,44 @@
     return add(MTI);
 
   // Handle all calls with known mod/ref sets genericall
-  CallSite CS(I);
-  if (CS && CS.onlyAccessesArgMemory()) {
-    auto getAccessFromModRef = [](ModRefInfo MRI) {
-      if (isRefSet(MRI) && isModSet(MRI))
-        return AliasSet::ModRefAccess;
-      else if (isModSet(MRI))
-        return AliasSet::ModAccess;
-      else if (isRefSet(MRI))
-        return AliasSet::RefAccess;
-      else
-        return AliasSet::NoAccess;
-     
-    };
-    
-    ModRefInfo CallMask = createModRefInfo(AA.getModRefBehavior(CS));
+  if (auto *Call = dyn_cast<CallBase>(I))
+    if (Call->onlyAccessesArgMemory()) {
+      auto getAccessFromModRef = [](ModRefInfo MRI) {
+        if (isRefSet(MRI) && isModSet(MRI))
+          return AliasSet::ModRefAccess;
+        else if (isModSet(MRI))
+          return AliasSet::ModAccess;
+        else if (isRefSet(MRI))
+          return AliasSet::RefAccess;
+        else
+          return AliasSet::NoAccess;
+      };
 
-    // Some intrinsics are marked as modifying memory for control flow
-    // modelling purposes, but don't actually modify any specific memory
-    // location. 
-    using namespace PatternMatch;
-    if (I->use_empty() && match(I, m_Intrinsic<Intrinsic::invariant_start>()))
-      CallMask = clearMod(CallMask);
+      ModRefInfo CallMask = createModRefInfo(AA.getModRefBehavior(Call));
 
-    for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) {
-      const Value *Arg = *AI;
-      if (!Arg->getType()->isPointerTy())
-        continue;
-      unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
-      MemoryLocation ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx,
-                                                             nullptr);
-      ModRefInfo ArgMask = AA.getArgModRefInfo(CS, ArgIdx);
-      ArgMask = intersectModRef(CallMask, ArgMask);
-      if (!isNoModRef(ArgMask))
-        addPointer(ArgLoc, getAccessFromModRef(ArgMask));
+      // Some intrinsics are marked as modifying memory for control flow
+      // modelling purposes, but don't actually modify any specific memory
+      // location.
+      using namespace PatternMatch;
+      if (Call->use_empty() &&
+          match(Call, m_Intrinsic<Intrinsic::invariant_start>()))
+        CallMask = clearMod(CallMask);
+
+      for (auto IdxArgPair : enumerate(Call->args())) {
+        int ArgIdx = IdxArgPair.index();
+        const Value *Arg = IdxArgPair.value();
+        if (!Arg->getType()->isPointerTy())
+          continue;
+        MemoryLocation ArgLoc =
+            MemoryLocation::getForArgument(Call, ArgIdx, nullptr);
+        ModRefInfo ArgMask = AA.getArgModRefInfo(Call, ArgIdx);
+        ArgMask = intersectModRef(CallMask, ArgMask);
+        if (!isNoModRef(ArgMask))
+          addPointer(ArgLoc, getAccessFromModRef(ArgMask));
+      }
+      return;
     }
-    return;
-  }
-  
+
   return addUnknown(I);
 }
 
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index b7aa395..332eeaa 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -31,7 +31,6 @@
 #include "llvm/Analysis/PhiValues.h"
 #include "llvm/IR/Argument.h"
 #include "llvm/IR/Attributes.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/Constant.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DataLayout.h"
@@ -68,6 +67,16 @@
 /// Enable analysis of recursive PHI nodes.
 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
                                           cl::init(false));
+
+/// By default, even on 32-bit architectures we use 64-bit integers for
+/// calculations. This will allow us to more-aggressively decompose indexing
+/// expressions calculated using i64 values (e.g., long long in C) which is
+/// common enough to worry about.
+static cl::opt<bool> ForceAtLeast64Bits("basicaa-force-at-least-64b",
+                                        cl::Hidden, cl::init(true));
+static cl::opt<bool> DoubleCalcBits("basicaa-double-calc-bits",
+                                    cl::Hidden, cl::init(false));
+
 /// SearchLimitReached / SearchTimes shows how often the limit of
 /// to decompose GEPs is reached. It will affect the precision
 /// of basic alias analysis.
@@ -134,7 +143,7 @@
 /// Returns true if the pointer is one which would have been considered an
 /// escape by isNonEscapingLocalObject.
 static bool isEscapeSource(const Value *V) {
-  if (ImmutableCallSite(V))
+  if (isa<CallBase>(V))
     return true;
 
   if (isa<Argument>(V))
@@ -381,13 +390,22 @@
 }
 
 /// To ensure a pointer offset fits in an integer of size PointerSize
-/// (in bits) when that size is smaller than 64. This is an issue in
-/// particular for 32b programs with negative indices that rely on two's
-/// complement wrap-arounds for precise alias information.
-static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize) {
-  assert(PointerSize <= 64 && "Invalid PointerSize!");
-  unsigned ShiftBits = 64 - PointerSize;
-  return (int64_t)((uint64_t)Offset << ShiftBits) >> ShiftBits;
+/// (in bits) when that size is smaller than the maximum pointer size. This is
+/// an issue, for example, in particular for 32b pointers with negative indices
+/// that rely on two's complement wrap-arounds for precise alias information
+/// where the maximum pointer size is 64b.
+static APInt adjustToPointerSize(APInt Offset, unsigned PointerSize) {
+  assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
+  unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
+  return (Offset << ShiftBits).ashr(ShiftBits);
+}
+
+static unsigned getMaxPointerSize(const DataLayout &DL) {
+  unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
+  if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
+  if (DoubleCalcBits) MaxPointerSize *= 2;
+
+  return MaxPointerSize;
 }
 
 /// If V is a symbolic pointer expression, decompose it into a base pointer
@@ -410,8 +428,7 @@
   unsigned MaxLookup = MaxLookupSearchDepth;
   SearchTimes++;
 
-  Decomposed.StructOffset = 0;
-  Decomposed.OtherOffset = 0;
+  unsigned MaxPointerSize = getMaxPointerSize(DL);
   Decomposed.VarIndices.clear();
   do {
     // See if this is a bitcast or GEP.
@@ -436,7 +453,7 @@
 
     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
     if (!GEPOp) {
-      if (auto CS = ImmutableCallSite(V)) {
+      if (const auto *Call = dyn_cast<CallBase>(V)) {
         // CaptureTracking can know about special capturing properties of some
         // intrinsics like launder.invariant.group, that can't be expressed with
         // the attributes, but have properties like returning aliasing pointer.
@@ -446,7 +463,7 @@
         // because it should be in sync with CaptureTracking. Not using it may
         // cause weird miscompilations where 2 aliasing pointers are assumed to
         // noalias.
-        if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
+        if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) {
           V = RP;
           continue;
         }
@@ -501,13 +518,15 @@
         if (CIdx->isZero())
           continue;
         Decomposed.OtherOffset +=
-          DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue();
+          (DL.getTypeAllocSize(GTI.getIndexedType()) *
+            CIdx->getValue().sextOrSelf(MaxPointerSize))
+              .sextOrTrunc(MaxPointerSize);
         continue;
       }
 
       GepHasConstantOffset = false;
 
-      uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType());
+      APInt Scale(MaxPointerSize, DL.getTypeAllocSize(GTI.getIndexedType()));
       unsigned ZExtBits = 0, SExtBits = 0;
 
       // If the integer type is smaller than the pointer size, it is implicitly
@@ -519,20 +538,34 @@
       // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
       APInt IndexScale(Width, 0), IndexOffset(Width, 0);
       bool NSW = true, NUW = true;
+      const Value *OrigIndex = Index;
       Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
                                   SExtBits, DL, 0, AC, DT, NSW, NUW);
 
-      // All GEP math happens in the width of the pointer type,
-      // so we can truncate the value to 64-bits as we don't handle
-      // currently pointers larger than 64 bits and we would crash
-      // later. TODO: Make `Scale` an APInt to avoid this problem.
-      if (IndexScale.getBitWidth() > 64)
-        IndexScale = IndexScale.sextOrTrunc(64);
-
       // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
       // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
-      Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
-      Scale *= IndexScale.getSExtValue();
+
+      // It can be the case that, even through C1*V+C2 does not overflow for
+      // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
+      // decompose the expression in this way.
+      //
+      // FIXME: C1*Scale and the other operations in the decomposed
+      // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
+      // possibility.
+      APInt WideScaledOffset = IndexOffset.sextOrTrunc(MaxPointerSize*2) *
+                                 Scale.sext(MaxPointerSize*2);
+      if (WideScaledOffset.getMinSignedBits() > MaxPointerSize) {
+        Index = OrigIndex;
+        IndexScale = 1;
+        IndexOffset = 0;
+
+        ZExtBits = SExtBits = 0;
+        if (PointerSize > Width)
+          SExtBits += PointerSize - Width;
+      } else {
+        Decomposed.OtherOffset += IndexOffset.sextOrTrunc(MaxPointerSize) * Scale;
+        Scale *= IndexScale.sextOrTrunc(MaxPointerSize);
+      }
 
       // If we already had an occurrence of this index variable, merge this
       // scale into it.  For example, we want to handle:
@@ -552,9 +585,8 @@
       // pointer size.
       Scale = adjustToPointerSize(Scale, PointerSize);
 
-      if (Scale) {
-        VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
-                                  static_cast<int64_t>(Scale)};
+      if (!!Scale) {
+        VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, Scale};
         Decomposed.VarIndices.push_back(Entry);
       }
     }
@@ -640,8 +672,8 @@
 }
 
 /// Returns the behavior when calling the given call site.
-FunctionModRefBehavior BasicAAResult::getModRefBehavior(ImmutableCallSite CS) {
-  if (CS.doesNotAccessMemory())
+FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
+  if (Call->doesNotAccessMemory())
     // Can't do better than this.
     return FMRB_DoesNotAccessMemory;
 
@@ -649,23 +681,23 @@
 
   // If the callsite knows it only reads memory, don't return worse
   // than that.
-  if (CS.onlyReadsMemory())
+  if (Call->onlyReadsMemory())
     Min = FMRB_OnlyReadsMemory;
-  else if (CS.doesNotReadMemory())
+  else if (Call->doesNotReadMemory())
     Min = FMRB_DoesNotReadMemory;
 
-  if (CS.onlyAccessesArgMemory())
+  if (Call->onlyAccessesArgMemory())
     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
-  else if (CS.onlyAccessesInaccessibleMemory())
+  else if (Call->onlyAccessesInaccessibleMemory())
     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
-  else if (CS.onlyAccessesInaccessibleMemOrArgMem())
+  else if (Call->onlyAccessesInaccessibleMemOrArgMem())
     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
 
-  // If CS has operand bundles then aliasing attributes from the function it
-  // calls do not directly apply to the CallSite.  This can be made more
-  // precise in the future.
-  if (!CS.hasOperandBundles())
-    if (const Function *F = CS.getCalledFunction())
+  // If the call has operand bundles then aliasing attributes from the function
+  // it calls do not directly apply to the call.  This can be made more precise
+  // in the future.
+  if (!Call->hasOperandBundles())
+    if (const Function *F = Call->getCalledFunction())
       Min =
           FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
 
@@ -698,9 +730,9 @@
 }
 
 /// Returns true if this is a writeonly (i.e Mod only) parameter.
-static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx,
+static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
                              const TargetLibraryInfo &TLI) {
-  if (CS.paramHasAttr(ArgIdx, Attribute::WriteOnly))
+  if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
     return true;
 
   // We can bound the aliasing properties of memset_pattern16 just as we can
@@ -710,7 +742,8 @@
   // FIXME Consider handling this in InferFunctionAttr.cpp together with other
   // attributes.
   LibFunc F;
-  if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) &&
+  if (Call->getCalledFunction() &&
+      TLI.getLibFunc(*Call->getCalledFunction(), F) &&
       F == LibFunc_memset_pattern16 && TLI.has(F))
     if (ArgIdx == 0)
       return true;
@@ -722,23 +755,23 @@
   return false;
 }
 
-ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS,
+ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
                                            unsigned ArgIdx) {
   // Checking for known builtin intrinsics and target library functions.
-  if (isWriteOnlyParam(CS, ArgIdx, TLI))
+  if (isWriteOnlyParam(Call, ArgIdx, TLI))
     return ModRefInfo::Mod;
 
-  if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly))
+  if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
     return ModRefInfo::Ref;
 
-  if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone))
+  if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
     return ModRefInfo::NoModRef;
 
-  return AAResultBase::getArgModRefInfo(CS, ArgIdx);
+  return AAResultBase::getArgModRefInfo(Call, ArgIdx);
 }
 
-static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID) {
-  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
+static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
+  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
   return II && II->getIntrinsicID() == IID;
 }
 
@@ -794,9 +827,9 @@
 /// Since we only look at local properties of this function, we really can't
 /// say much about this query.  We do, however, use simple "address taken"
 /// analysis on local objects.
-ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
+ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
                                         const MemoryLocation &Loc) {
-  assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
+  assert(notDifferentParent(Call, Loc.Ptr) &&
          "AliasAnalysis query involving multiple functions!");
 
   const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
@@ -807,15 +840,21 @@
   // contents of the alloca into argument registers or stack slots, so there is
   // no lifetime issue.
   if (isa<AllocaInst>(Object))
-    if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
+    if (const CallInst *CI = dyn_cast<CallInst>(Call))
       if (CI->isTailCall() &&
           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
         return ModRefInfo::NoModRef;
 
+  // Stack restore is able to modify unescaped dynamic allocas. Assume it may
+  // modify them even though the alloca is not escaped.
+  if (auto *AI = dyn_cast<AllocaInst>(Object))
+    if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
+      return ModRefInfo::Mod;
+
   // If the pointer is to a locally allocated object that does not escape,
   // then the call can not mod/ref the pointer unless the call takes the pointer
   // as an argument, and itself doesn't capture it.
-  if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
+  if (!isa<Constant>(Object) && Call != Object &&
       isNonEscapingLocalObject(Object)) {
 
     // Optimistically assume that call doesn't touch Object and check this
@@ -824,19 +863,20 @@
     bool IsMustAlias = true;
 
     unsigned OperandNo = 0;
-    for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
+    for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
          CI != CE; ++CI, ++OperandNo) {
       // Only look at the no-capture or byval pointer arguments.  If this
       // pointer were passed to arguments that were neither of these, then it
       // couldn't be no-capture.
       if (!(*CI)->getType()->isPointerTy() ||
-          (!CS.doesNotCapture(OperandNo) &&
-           OperandNo < CS.getNumArgOperands() && !CS.isByValArgument(OperandNo)))
+          (!Call->doesNotCapture(OperandNo) &&
+           OperandNo < Call->getNumArgOperands() &&
+           !Call->isByValArgument(OperandNo)))
         continue;
 
       // Call doesn't access memory through this operand, so we don't care
       // if it aliases with Object.
-      if (CS.doesNotAccessMemory(OperandNo))
+      if (Call->doesNotAccessMemory(OperandNo))
         continue;
 
       // If this is a no-capture pointer argument, see if we can tell that it
@@ -850,12 +890,12 @@
         continue;
       // Operand aliases 'Object', but call doesn't modify it. Strengthen
       // initial assumption and keep looking in case if there are more aliases.
-      if (CS.onlyReadsMemory(OperandNo)) {
+      if (Call->onlyReadsMemory(OperandNo)) {
         Result = setRef(Result);
         continue;
       }
       // Operand aliases 'Object' but call only writes into it.
-      if (CS.doesNotReadMemory(OperandNo)) {
+      if (Call->doesNotReadMemory(OperandNo)) {
         Result = setMod(Result);
         continue;
       }
@@ -879,17 +919,16 @@
     }
   }
 
-  // If the CallSite is to malloc or calloc, we can assume that it doesn't
+  // If the call is to malloc or calloc, we can assume that it doesn't
   // modify any IR visible value.  This is only valid because we assume these
   // routines do not read values visible in the IR.  TODO: Consider special
   // casing realloc and strdup routines which access only their arguments as
   // well.  Or alternatively, replace all of this with inaccessiblememonly once
   // that's implemented fully.
-  auto *Inst = CS.getInstruction();
-  if (isMallocOrCallocLikeFn(Inst, &TLI)) {
+  if (isMallocOrCallocLikeFn(Call, &TLI)) {
     // Be conservative if the accessed pointer may alias the allocation -
     // fallback to the generic handling below.
-    if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
+    if (getBestAAResults().alias(MemoryLocation(Call), Loc) == NoAlias)
       return ModRefInfo::NoModRef;
   }
 
@@ -897,7 +936,7 @@
   // operands, i.e., source and destination of any given memcpy must no-alias.
   // If Loc must-aliases either one of these two locations, then it necessarily
   // no-aliases the other.
-  if (auto *Inst = dyn_cast<AnyMemCpyInst>(CS.getInstruction())) {
+  if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
     AliasResult SrcAA, DestAA;
 
     if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
@@ -921,7 +960,7 @@
   // While the assume intrinsic is marked as arbitrarily writing so that
   // proper control dependencies will be maintained, it never aliases any
   // particular memory location.
-  if (isIntrinsicCall(CS, Intrinsic::assume))
+  if (isIntrinsicCall(Call, Intrinsic::assume))
     return ModRefInfo::NoModRef;
 
   // Like assumes, guard intrinsics are also marked as arbitrarily writing so
@@ -931,7 +970,7 @@
   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
   // heap state at the point the guard is issued needs to be consistent in case
   // the guard invokes the "deopt" continuation.
-  if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
+  if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
     return ModRefInfo::Ref;
 
   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
@@ -957,20 +996,20 @@
   // The transformation will cause the second store to be ignored (based on
   // rules of invariant.start)  and print 40, while the first program always
   // prints 50.
-  if (isIntrinsicCall(CS, Intrinsic::invariant_start))
+  if (isIntrinsicCall(Call, Intrinsic::invariant_start))
     return ModRefInfo::Ref;
 
   // The AAResultBase base class has some smarts, lets use them.
-  return AAResultBase::getModRefInfo(CS, Loc);
+  return AAResultBase::getModRefInfo(Call, Loc);
 }
 
-ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
-                                        ImmutableCallSite CS2) {
+ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
+                                        const CallBase *Call2) {
   // While the assume intrinsic is marked as arbitrarily writing so that
   // proper control dependencies will be maintained, it never aliases any
   // particular memory location.
-  if (isIntrinsicCall(CS1, Intrinsic::assume) ||
-      isIntrinsicCall(CS2, Intrinsic::assume))
+  if (isIntrinsicCall(Call1, Intrinsic::assume) ||
+      isIntrinsicCall(Call2, Intrinsic::assume))
     return ModRefInfo::NoModRef;
 
   // Like assumes, guard intrinsics are also marked as arbitrarily writing so
@@ -984,18 +1023,18 @@
   // NB! This function is *not* commutative, so we specical case two
   // possibilities for guard intrinsics.
 
-  if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
-    return isModSet(createModRefInfo(getModRefBehavior(CS2)))
+  if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
+    return isModSet(createModRefInfo(getModRefBehavior(Call2)))
                ? ModRefInfo::Ref
                : ModRefInfo::NoModRef;
 
-  if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
-    return isModSet(createModRefInfo(getModRefBehavior(CS1)))
+  if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
+    return isModSet(createModRefInfo(getModRefBehavior(Call1)))
                ? ModRefInfo::Mod
                : ModRefInfo::NoModRef;
 
   // The AAResultBase base class has some smarts, lets use them.
-  return AAResultBase::getModRefInfo(CS1, CS2);
+  return AAResultBase::getModRefInfo(Call1, Call2);
 }
 
 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
@@ -1033,8 +1072,12 @@
 
   // If the last (struct) indices are constants and are equal, the other indices
   // might be also be dynamically equal, so the GEPs can alias.
-  if (C1 && C2 && C1->getSExtValue() == C2->getSExtValue())
-    return MayAlias;
+  if (C1 && C2) {
+    unsigned BitWidth = std::max(C1->getBitWidth(), C2->getBitWidth());
+    if (C1->getValue().sextOrSelf(BitWidth) ==
+        C2->getValue().sextOrSelf(BitWidth))
+      return MayAlias;
+  }
 
   // Find the last-indexed type of the GEP, i.e., the type you'd get if
   // you stripped the last index.
@@ -1117,6 +1160,10 @@
     return MayAlias;
   }
 
+  if (C1->getValue().getActiveBits() > 64 ||
+      C2->getValue().getActiveBits() > 64)
+    return MayAlias;
+
   // We know that:
   // - both GEPs begin indexing from the exact same pointer;
   // - the last indices in both GEPs are constants, indexing into a struct;
@@ -1197,8 +1244,8 @@
       !DecompObject.VarIndices.empty())
     return false;
 
-  int64_t ObjectBaseOffset = DecompObject.StructOffset +
-                             DecompObject.OtherOffset;
+  APInt ObjectBaseOffset = DecompObject.StructOffset +
+                           DecompObject.OtherOffset;
 
   // If the GEP has no variable indices, we know the precise offset
   // from the base, then use it. If the GEP has variable indices,
@@ -1206,10 +1253,11 @@
   // false in that case.
   if (!DecompGEP.VarIndices.empty())
     return false;
-  int64_t GEPBaseOffset = DecompGEP.StructOffset;
+
+  APInt GEPBaseOffset = DecompGEP.StructOffset;
   GEPBaseOffset += DecompGEP.OtherOffset;
 
-  return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize);
+  return GEPBaseOffset.sge(ObjectBaseOffset + (int64_t)ObjectAccessSize);
 }
 
 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
@@ -1224,13 +1272,17 @@
                         LocationSize V2Size, const AAMDNodes &V2AAInfo,
                         const Value *UnderlyingV1, const Value *UnderlyingV2) {
   DecomposedGEP DecompGEP1, DecompGEP2;
+  unsigned MaxPointerSize = getMaxPointerSize(DL);
+  DecompGEP1.StructOffset = DecompGEP1.OtherOffset = APInt(MaxPointerSize, 0);
+  DecompGEP2.StructOffset = DecompGEP2.OtherOffset = APInt(MaxPointerSize, 0);
+
   bool GEP1MaxLookupReached =
     DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
   bool GEP2MaxLookupReached =
     DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
 
-  int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
-  int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
+  APInt GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
+  APInt GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
 
   assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
          "DecomposeGEPExpression returned a result different from "
@@ -1348,9 +1400,9 @@
   // that the objects are partially overlapping.  If the difference is
   // greater, we know they do not overlap.
   if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
-    if (GEP1BaseOffset >= 0) {
+    if (GEP1BaseOffset.sge(0)) {
       if (V2Size != LocationSize::unknown()) {
-        if ((uint64_t)GEP1BaseOffset < V2Size.getValue())
+        if (GEP1BaseOffset.ult(V2Size.getValue()))
           return PartialAlias;
         return NoAlias;
       }
@@ -1365,7 +1417,7 @@
       // stripped a gep with negative index ('gep <ptr>, -1, ...).
       if (V1Size != LocationSize::unknown() &&
           V2Size != LocationSize::unknown()) {
-        if (-(uint64_t)GEP1BaseOffset < V1Size.getValue())
+        if ((-GEP1BaseOffset).ult(V1Size.getValue()))
           return PartialAlias;
         return NoAlias;
       }
@@ -1373,7 +1425,7 @@
   }
 
   if (!DecompGEP1.VarIndices.empty()) {
-    uint64_t Modulo = 0;
+    APInt Modulo(MaxPointerSize, 0);
     bool AllPositive = true;
     for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
 
@@ -1381,7 +1433,7 @@
       // Grab the least significant bit set in any of the scales. We
       // don't need std::abs here (even if the scale's negative) as we'll
       // be ^'ing Modulo with itself later.
-      Modulo |= (uint64_t)DecompGEP1.VarIndices[i].Scale;
+      Modulo |= DecompGEP1.VarIndices[i].Scale;
 
       if (AllPositive) {
         // If the Value could change between cycles, then any reasoning about
@@ -1402,9 +1454,9 @@
         // If the variable begins with a zero then we know it's
         // positive, regardless of whether the value is signed or
         // unsigned.
-        int64_t Scale = DecompGEP1.VarIndices[i].Scale;
+        APInt Scale = DecompGEP1.VarIndices[i].Scale;
         AllPositive =
-            (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0);
+            (SignKnownZero && Scale.sge(0)) || (SignKnownOne && Scale.slt(0));
       }
     }
 
@@ -1413,18 +1465,18 @@
     // We can compute the difference between the two addresses
     // mod Modulo. Check whether that difference guarantees that the
     // two locations do not alias.
-    uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
+    APInt ModOffset = GEP1BaseOffset & (Modulo - 1);
     if (V1Size != LocationSize::unknown() &&
-        V2Size != LocationSize::unknown() && ModOffset >= V2Size.getValue() &&
-        V1Size.getValue() <= Modulo - ModOffset)
+        V2Size != LocationSize::unknown() && ModOffset.uge(V2Size.getValue()) &&
+        (Modulo - ModOffset).uge(V1Size.getValue()))
       return NoAlias;
 
     // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
     // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
     // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
-    if (AllPositive && GEP1BaseOffset > 0 &&
+    if (AllPositive && GEP1BaseOffset.sgt(0) &&
         V2Size != LocationSize::unknown() &&
-        V2Size.getValue() <= (uint64_t)GEP1BaseOffset)
+        GEP1BaseOffset.uge(V2Size.getValue()))
       return NoAlias;
 
     if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
@@ -1638,7 +1690,7 @@
                                       const Value *O1, const Value *O2) {
   // If either of the memory references is empty, it doesn't matter what the
   // pointer values are.
-  if (V1Size == 0 || V2Size == 0)
+  if (V1Size.isZero() || V2Size.isZero())
     return NoAlias;
 
   // Strip off any casts if they exist.
@@ -1830,7 +1882,7 @@
   for (unsigned i = 0, e = Src.size(); i != e; ++i) {
     const Value *V = Src[i].V;
     unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
-    int64_t Scale = Src[i].Scale;
+    APInt Scale = Src[i].Scale;
 
     // Find V in Dest.  This is N^2, but pointer indices almost never have more
     // than a few variable indexes.
@@ -1850,7 +1902,7 @@
     }
 
     // If we didn't consume this entry, add it to the end of the Dest list.
-    if (Scale) {
+    if (!!Scale) {
       VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
       Dest.push_back(Entry);
     }
@@ -1859,7 +1911,7 @@
 
 bool BasicAAResult::constantOffsetHeuristic(
     const SmallVectorImpl<VariableGEPIndex> &VarIndices,
-    LocationSize MaybeV1Size, LocationSize MaybeV2Size, int64_t BaseOffset,
+    LocationSize MaybeV1Size, LocationSize MaybeV2Size, APInt BaseOffset,
     AssumptionCache *AC, DominatorTree *DT) {
   if (VarIndices.size() != 2 || MaybeV1Size == LocationSize::unknown() ||
       MaybeV2Size == LocationSize::unknown())
@@ -1904,14 +1956,15 @@
   // the minimum distance between %i and %i + 5 is 3.
   APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
   MinDiff = APIntOps::umin(MinDiff, Wrapped);
-  uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale);
+  APInt MinDiffBytes =
+    MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
 
   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
   // V2Size can fit in the MinDiffBytes gap.
-  return V1Size + std::abs(BaseOffset) <= MinDiffBytes &&
-         V2Size + std::abs(BaseOffset) <= MinDiffBytes;
+  return MinDiffBytes.uge(V1Size + BaseOffset.abs()) &&
+         MinDiffBytes.uge(V2Size + BaseOffset.abs());
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/lib/Analysis/BlockFrequencyInfo.cpp b/lib/Analysis/BlockFrequencyInfo.cpp
index 41c2958..ef27c36 100644
--- a/lib/Analysis/BlockFrequencyInfo.cpp
+++ b/lib/Analysis/BlockFrequencyInfo.cpp
@@ -252,8 +252,8 @@
 
 /// Pop up a ghostview window with the current block frequency propagation
 /// rendered using dot.
-void BlockFrequencyInfo::view() const {
-  ViewGraph(const_cast<BlockFrequencyInfo *>(this), "BlockFrequencyDAGs");
+void BlockFrequencyInfo::view(StringRef title) const {
+  ViewGraph(const_cast<BlockFrequencyInfo *>(this), title);
 }
 
 const Function *BlockFrequencyInfo::getFunction() const {
diff --git a/lib/Analysis/CaptureTracking.cpp b/lib/Analysis/CaptureTracking.cpp
index 7c74c65..669f4f2 100644
--- a/lib/Analysis/CaptureTracking.cpp
+++ b/lib/Analysis/CaptureTracking.cpp
@@ -23,7 +23,6 @@
 #include "llvm/Analysis/CFG.h"
 #include "llvm/Analysis/OrderedBasicBlock.h"
 #include "llvm/Analysis/ValueTracking.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/Instructions.h"
@@ -170,7 +169,7 @@
   (void)StoreCaptures;
 
   SimpleCaptureTracker SCT(ReturnCaptures);
-  PointerMayBeCaptured(V, &SCT);
+  PointerMayBeCaptured(V, &SCT, MaxUsesToExplore);
   return SCT.Captured;
 }
 
@@ -194,7 +193,8 @@
   bool UseNewOBB = OBB == nullptr;
 
   if (!DT)
-    return PointerMayBeCaptured(V, ReturnCaptures, StoreCaptures);
+    return PointerMayBeCaptured(V, ReturnCaptures, StoreCaptures,
+                                MaxUsesToExplore);
   if (UseNewOBB)
     OBB = new OrderedBasicBlock(I->getParent());
 
@@ -202,7 +202,7 @@
   // with StoreCaptures.
 
   CapturesBefore CB(ReturnCaptures, I, DT, IncludeI, OBB);
-  PointerMayBeCaptured(V, &CB);
+  PointerMayBeCaptured(V, &CB, MaxUsesToExplore);
 
   if (UseNewOBB)
     delete OBB;
@@ -239,11 +239,12 @@
     switch (I->getOpcode()) {
     case Instruction::Call:
     case Instruction::Invoke: {
-      CallSite CS(I);
+      auto *Call = cast<CallBase>(I);
       // Not captured if the callee is readonly, doesn't return a copy through
       // its return value and doesn't unwind (a readonly function can leak bits
       // by throwing an exception or not depending on the input value).
-      if (CS.onlyReadsMemory() && CS.doesNotThrow() && I->getType()->isVoidTy())
+      if (Call->onlyReadsMemory() && Call->doesNotThrow() &&
+          Call->getType()->isVoidTy())
         break;
 
       // The pointer is not captured if returned pointer is not captured.
@@ -251,14 +252,14 @@
       // marked with nocapture do not capture. This means that places like
       // GetUnderlyingObject in ValueTracking or DecomposeGEPExpression
       // in BasicAA also need to know about this property.
-      if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CS)) {
-        AddUses(I);
+      if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call)) {
+        AddUses(Call);
         break;
       }
 
       // Volatile operations effectively capture the memory location that they
       // load and store to.
-      if (auto *MI = dyn_cast<MemIntrinsic>(I))
+      if (auto *MI = dyn_cast<MemIntrinsic>(Call))
         if (MI->isVolatile())
           if (Tracker->captured(U))
             return;
@@ -270,13 +271,14 @@
       // that loading a value from a pointer does not cause the pointer to be
       // captured, even though the loaded value might be the pointer itself
       // (think of self-referential objects).
-      CallSite::data_operand_iterator B =
-        CS.data_operands_begin(), E = CS.data_operands_end();
-      for (CallSite::data_operand_iterator A = B; A != E; ++A)
-        if (A->get() == V && !CS.doesNotCapture(A - B))
+      for (auto IdxOpPair : enumerate(Call->data_ops())) {
+        int Idx = IdxOpPair.index();
+        Value *A = IdxOpPair.value();
+        if (A == V && !Call->doesNotCapture(Idx))
           // The parameter is not marked 'nocapture' - captured.
           if (Tracker->captured(U))
             return;
+      }
       break;
     }
     case Instruction::Load:
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 6180886..5da29d6 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -1629,6 +1629,18 @@
   return false;
 }
 
+static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
+  if (auto *CI = dyn_cast<ConstantInt>(Op)) {
+    C = &CI->getValue();
+    return true;
+  }
+  if (isa<UndefValue>(Op)) {
+    C = nullptr;
+    return true;
+  }
+  return false;
+}
+
 Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
                                  ArrayRef<Constant *> Operands,
                                  const TargetLibraryInfo *TLI,
@@ -1643,8 +1655,10 @@
       return nullptr;
     }
     if (isa<UndefValue>(Operands[0])) {
-      // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN
-      if (IntrinsicID == Intrinsic::cos)
+      // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
+      // ctpop() is between 0 and bitwidth, pick 0 for undef.
+      if (IntrinsicID == Intrinsic::cos ||
+          IntrinsicID == Intrinsic::ctpop)
         return Constant::getNullValue(Ty);
       if (IntrinsicID == Intrinsic::bswap ||
           IntrinsicID == Intrinsic::bitreverse ||
@@ -1995,62 +2009,92 @@
       return nullptr;
     }
 
-    if (auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
-      if (auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
+    if (Operands[0]->getType()->isIntegerTy() &&
+        Operands[1]->getType()->isIntegerTy()) {
+      const APInt *C0, *C1;
+      if (!getConstIntOrUndef(Operands[0], C0) ||
+          !getConstIntOrUndef(Operands[1], C1))
+        return nullptr;
+
+      switch (IntrinsicID) {
+      default: break;
+      case Intrinsic::smul_with_overflow:
+      case Intrinsic::umul_with_overflow:
+        // Even if both operands are undef, we cannot fold muls to undef
+        // in the general case. For example, on i2 there are no inputs
+        // that would produce { i2 -1, i1 true } as the result.
+        if (!C0 || !C1)
+          return Constant::getNullValue(Ty);
+        LLVM_FALLTHROUGH;
+      case Intrinsic::sadd_with_overflow:
+      case Intrinsic::uadd_with_overflow:
+      case Intrinsic::ssub_with_overflow:
+      case Intrinsic::usub_with_overflow: {
+        if (!C0 || !C1)
+          return UndefValue::get(Ty);
+
+        APInt Res;
+        bool Overflow;
         switch (IntrinsicID) {
-        default: break;
+        default: llvm_unreachable("Invalid case");
         case Intrinsic::sadd_with_overflow:
+          Res = C0->sadd_ov(*C1, Overflow);
+          break;
         case Intrinsic::uadd_with_overflow:
+          Res = C0->uadd_ov(*C1, Overflow);
+          break;
         case Intrinsic::ssub_with_overflow:
+          Res = C0->ssub_ov(*C1, Overflow);
+          break;
         case Intrinsic::usub_with_overflow:
+          Res = C0->usub_ov(*C1, Overflow);
+          break;
         case Intrinsic::smul_with_overflow:
-        case Intrinsic::umul_with_overflow: {
-          APInt Res;
-          bool Overflow;
-          switch (IntrinsicID) {
-          default: llvm_unreachable("Invalid case");
-          case Intrinsic::sadd_with_overflow:
-            Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
-            break;
-          case Intrinsic::uadd_with_overflow:
-            Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow);
-            break;
-          case Intrinsic::ssub_with_overflow:
-            Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow);
-            break;
-          case Intrinsic::usub_with_overflow:
-            Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow);
-            break;
-          case Intrinsic::smul_with_overflow:
-            Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow);
-            break;
-          case Intrinsic::umul_with_overflow:
-            Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow);
-            break;
-          }
-          Constant *Ops[] = {
-            ConstantInt::get(Ty->getContext(), Res),
-            ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
-          };
-          return ConstantStruct::get(cast<StructType>(Ty), Ops);
+          Res = C0->smul_ov(*C1, Overflow);
+          break;
+        case Intrinsic::umul_with_overflow:
+          Res = C0->umul_ov(*C1, Overflow);
+          break;
         }
-        case Intrinsic::uadd_sat:
-          return ConstantInt::get(Ty, Op1->getValue().uadd_sat(Op2->getValue()));
-        case Intrinsic::sadd_sat:
-          return ConstantInt::get(Ty, Op1->getValue().sadd_sat(Op2->getValue()));
-        case Intrinsic::usub_sat:
-          return ConstantInt::get(Ty, Op1->getValue().usub_sat(Op2->getValue()));
-        case Intrinsic::ssub_sat:
-          return ConstantInt::get(Ty, Op1->getValue().ssub_sat(Op2->getValue()));
-        case Intrinsic::cttz:
-          if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef.
-            return UndefValue::get(Ty);
-          return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros());
-        case Intrinsic::ctlz:
-          if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef.
-            return UndefValue::get(Ty);
-          return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
-        }
+        Constant *Ops[] = {
+          ConstantInt::get(Ty->getContext(), Res),
+          ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
+        };
+        return ConstantStruct::get(cast<StructType>(Ty), Ops);
+      }
+      case Intrinsic::uadd_sat:
+      case Intrinsic::sadd_sat:
+        if (!C0 && !C1)
+          return UndefValue::get(Ty);
+        if (!C0 || !C1)
+          return Constant::getAllOnesValue(Ty);
+        if (IntrinsicID == Intrinsic::uadd_sat)
+          return ConstantInt::get(Ty, C0->uadd_sat(*C1));
+        else
+          return ConstantInt::get(Ty, C0->sadd_sat(*C1));
+      case Intrinsic::usub_sat:
+      case Intrinsic::ssub_sat:
+        if (!C0 && !C1)
+          return UndefValue::get(Ty);
+        if (!C0 || !C1)
+          return Constant::getNullValue(Ty);
+        if (IntrinsicID == Intrinsic::usub_sat)
+          return ConstantInt::get(Ty, C0->usub_sat(*C1));
+        else
+          return ConstantInt::get(Ty, C0->ssub_sat(*C1));
+      case Intrinsic::cttz:
+      case Intrinsic::ctlz:
+        assert(C1 && "Must be constant int");
+
+        // cttz(0, 1) and ctlz(0, 1) are undef.
+        if (C1->isOneValue() && (!C0 || C0->isNullValue()))
+          return UndefValue::get(Ty);
+        if (!C0)
+          return Constant::getNullValue(Ty);
+        if (IntrinsicID == Intrinsic::cttz)
+          return ConstantInt::get(Ty, C0->countTrailingZeros());
+        else
+          return ConstantInt::get(Ty, C0->countLeadingZeros());
       }
 
       return nullptr;
@@ -2136,26 +2180,33 @@
   }
 
   if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
-    auto *C0 = dyn_cast<ConstantInt>(Operands[0]);
-    auto *C1 = dyn_cast<ConstantInt>(Operands[1]);
-    auto *C2 = dyn_cast<ConstantInt>(Operands[2]);
-    if (!(C0 && C1 && C2))
+    const APInt *C0, *C1, *C2;
+    if (!getConstIntOrUndef(Operands[0], C0) ||
+        !getConstIntOrUndef(Operands[1], C1) ||
+        !getConstIntOrUndef(Operands[2], C2))
       return nullptr;
 
+    bool IsRight = IntrinsicID == Intrinsic::fshr;
+    if (!C2)
+      return Operands[IsRight ? 1 : 0];
+    if (!C0 && !C1)
+      return UndefValue::get(Ty);
+
     // The shift amount is interpreted as modulo the bitwidth. If the shift
     // amount is effectively 0, avoid UB due to oversized inverse shift below.
-    unsigned BitWidth = C0->getBitWidth();
-    unsigned ShAmt = C2->getValue().urem(BitWidth);
-    bool IsRight = IntrinsicID == Intrinsic::fshr;
+    unsigned BitWidth = C2->getBitWidth();
+    unsigned ShAmt = C2->urem(BitWidth);
     if (!ShAmt)
-      return IsRight ? C1 : C0;
+      return Operands[IsRight ? 1 : 0];
 
-    // (X << ShlAmt) | (Y >> LshrAmt)
-    const APInt &X = C0->getValue();
-    const APInt &Y = C1->getValue();
+    // (C0 << ShlAmt) | (C1 >> LshrAmt)
     unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
     unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
-    return ConstantInt::get(Ty->getContext(), X.shl(ShlAmt) | Y.lshr(LshrAmt));
+    if (!C0)
+      return ConstantInt::get(Ty, C1->lshr(LshrAmt));
+    if (!C1)
+      return ConstantInt::get(Ty, C0->shl(ShlAmt));
+    return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
   }
 
   return nullptr;
diff --git a/lib/Analysis/DemandedBits.cpp b/lib/Analysis/DemandedBits.cpp
index 0382787..34f785f 100644
--- a/lib/Analysis/DemandedBits.cpp
+++ b/lib/Analysis/DemandedBits.cpp
@@ -21,8 +21,7 @@
 
 #include "llvm/Analysis/DemandedBits.h"
 #include "llvm/ADT/APInt.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SetVector.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/Analysis/AssumptionCache.h"
 #include "llvm/Analysis/ValueTracking.h"
@@ -85,8 +84,9 @@
 }
 
 void DemandedBits::determineLiveOperandBits(
-    const Instruction *UserI, const Instruction *I, unsigned OperandNo,
-    const APInt &AOut, APInt &AB, KnownBits &Known, KnownBits &Known2) {
+    const Instruction *UserI, const Value *Val, unsigned OperandNo,
+    const APInt &AOut, APInt &AB, KnownBits &Known, KnownBits &Known2,
+    bool &KnownBitsComputed) {
   unsigned BitWidth = AB.getBitWidth();
 
   // We're called once per operand, but for some instructions, we need to
@@ -97,7 +97,11 @@
   // provided here.
   auto ComputeKnownBits =
       [&](unsigned BitWidth, const Value *V1, const Value *V2) {
-        const DataLayout &DL = I->getModule()->getDataLayout();
+        if (KnownBitsComputed)
+          return;
+        KnownBitsComputed = true;
+
+        const DataLayout &DL = UserI->getModule()->getDataLayout();
         Known = KnownBits(BitWidth);
         computeKnownBits(V1, Known, DL, 0, &AC, UserI, &DT);
 
@@ -129,7 +133,7 @@
           // We need some output bits, so we need all bits of the
           // input to the left of, and including, the leftmost bit
           // known to be one.
-          ComputeKnownBits(BitWidth, I, nullptr);
+          ComputeKnownBits(BitWidth, Val, nullptr);
           AB = APInt::getHighBitsSet(BitWidth,
                  std::min(BitWidth, Known.countMaxLeadingZeros()+1));
         }
@@ -139,7 +143,7 @@
           // We need some output bits, so we need all bits of the
           // input to the right of, and including, the rightmost bit
           // known to be one.
-          ComputeKnownBits(BitWidth, I, nullptr);
+          ComputeKnownBits(BitWidth, Val, nullptr);
           AB = APInt::getLowBitsSet(BitWidth,
                  std::min(BitWidth, Known.countMaxTrailingZeros()+1));
         }
@@ -234,14 +238,11 @@
     // other operand are dead (unless they're both zero, in which
     // case they can't both be dead, so just mark the LHS bits as
     // dead).
-    if (OperandNo == 0) {
-      ComputeKnownBits(BitWidth, I, UserI->getOperand(1));
+    ComputeKnownBits(BitWidth, UserI->getOperand(0), UserI->getOperand(1));
+    if (OperandNo == 0)
       AB &= ~Known2.Zero;
-    } else {
-      if (!isa<Instruction>(UserI->getOperand(0)))
-        ComputeKnownBits(BitWidth, UserI->getOperand(0), I);
+    else
       AB &= ~(Known.Zero & ~Known2.Zero);
-    }
     break;
   case Instruction::Or:
     AB = AOut;
@@ -250,14 +251,11 @@
     // other operand are dead (unless they're both one, in which
     // case they can't both be dead, so just mark the LHS bits as
     // dead).
-    if (OperandNo == 0) {
-      ComputeKnownBits(BitWidth, I, UserI->getOperand(1));
+    ComputeKnownBits(BitWidth, UserI->getOperand(0), UserI->getOperand(1));
+    if (OperandNo == 0)
       AB &= ~Known2.One;
-    } else {
-      if (!isa<Instruction>(UserI->getOperand(0)))
-        ComputeKnownBits(BitWidth, UserI->getOperand(0), I);
+    else
       AB &= ~(Known.One & ~Known2.One);
-    }
     break;
   case Instruction::Xor:
   case Instruction::PHI:
@@ -314,8 +312,9 @@
 
   Visited.clear();
   AliveBits.clear();
+  DeadUses.clear();
 
-  SmallVector<Instruction*, 128> Worklist;
+  SmallSetVector<Instruction*, 16> Worklist;
 
   // Collect the set of "root" instructions that are known live.
   for (Instruction &I : instructions(F)) {
@@ -330,7 +329,7 @@
     Type *T = I.getType();
     if (T->isIntOrIntVectorTy()) {
       if (AliveBits.try_emplace(&I, T->getScalarSizeInBits(), 0).second)
-        Worklist.push_back(&I);
+        Worklist.insert(&I);
 
       continue;
     }
@@ -341,7 +340,7 @@
         Type *T = J->getType();
         if (T->isIntOrIntVectorTy())
           AliveBits[J] = APInt::getAllOnesValue(T->getScalarSizeInBits());
-        Worklist.push_back(J);
+        Worklist.insert(J);
       }
     }
     // To save memory, we don't add I to the Visited set here. Instead, we
@@ -358,7 +357,8 @@
     APInt AOut;
     if (UserI->getType()->isIntOrIntVectorTy()) {
       AOut = AliveBits[UserI];
-      LLVM_DEBUG(dbgs() << " Alive Out: " << AOut);
+      LLVM_DEBUG(dbgs() << " Alive Out: 0x"
+                        << Twine::utohexstr(AOut.getLimitedValue()));
     }
     LLVM_DEBUG(dbgs() << "\n");
 
@@ -366,26 +366,40 @@
       Visited.insert(UserI);
 
     KnownBits Known, Known2;
+    bool KnownBitsComputed = false;
     // Compute the set of alive bits for each operand. These are anded into the
     // existing set, if any, and if that changes the set of alive bits, the
     // operand is added to the work-list.
     for (Use &OI : UserI->operands()) {
-      if (Instruction *I = dyn_cast<Instruction>(OI)) {
-        Type *T = I->getType();
-        if (T->isIntOrIntVectorTy()) {
-          unsigned BitWidth = T->getScalarSizeInBits();
-          APInt AB = APInt::getAllOnesValue(BitWidth);
-          if (UserI->getType()->isIntOrIntVectorTy() && !AOut &&
-              !isAlwaysLive(UserI)) {
-            AB = APInt(BitWidth, 0);
-          } else {
-            // If all bits of the output are dead, then all bits of the input
-            // Bits of each operand that are used to compute alive bits of the
-            // output are alive, all others are dead.
-            determineLiveOperandBits(UserI, I, OI.getOperandNo(), AOut, AB,
-                                     Known, Known2);
-          }
+      // We also want to detect dead uses of arguments, but will only store
+      // demanded bits for instructions.
+      Instruction *I = dyn_cast<Instruction>(OI);
+      if (!I && !isa<Argument>(OI))
+        continue;
 
+      Type *T = OI->getType();
+      if (T->isIntOrIntVectorTy()) {
+        unsigned BitWidth = T->getScalarSizeInBits();
+        APInt AB = APInt::getAllOnesValue(BitWidth);
+        if (UserI->getType()->isIntOrIntVectorTy() && !AOut &&
+            !isAlwaysLive(UserI)) {
+          // If all bits of the output are dead, then all bits of the input
+          // are also dead.
+          AB = APInt(BitWidth, 0);
+        } else {
+          // Bits of each operand that are used to compute alive bits of the
+          // output are alive, all others are dead.
+          determineLiveOperandBits(UserI, OI, OI.getOperandNo(), AOut, AB,
+                                   Known, Known2, KnownBitsComputed);
+
+          // Keep track of uses which have no demanded bits.
+          if (AB.isNullValue())
+            DeadUses.insert(&OI);
+          else
+            DeadUses.erase(&OI);
+        }
+
+        if (I) {
           // If we've added to the set of alive bits (or the operand has not
           // been previously visited), then re-queue the operand to be visited
           // again.
@@ -397,11 +411,11 @@
           APInt ABNew = AB | ABPrev;
           if (ABNew != ABPrev || ABI == AliveBits.end()) {
             AliveBits[I] = std::move(ABNew);
-            Worklist.push_back(I);
+            Worklist.insert(I);
           }
-        } else if (!Visited.count(I)) {
-          Worklist.push_back(I);
         }
+      } else if (I && !Visited.count(I)) {
+        Worklist.insert(I);
       }
     }
   }
@@ -426,6 +440,31 @@
     !isAlwaysLive(I);
 }
 
+bool DemandedBits::isUseDead(Use *U) {
+  // We only track integer uses, everything else is assumed live.
+  if (!(*U)->getType()->isIntOrIntVectorTy())
+    return false;
+
+  // Uses by always-live instructions are never dead.
+  Instruction *UserI = cast<Instruction>(U->getUser());
+  if (isAlwaysLive(UserI))
+    return false;
+
+  performAnalysis();
+  if (DeadUses.count(U))
+    return true;
+
+  // If no output bits are demanded, no input bits are demanded and the use
+  // is dead. These uses might not be explicitly present in the DeadUses map.
+  if (UserI->getType()->isIntOrIntVectorTy()) {
+    auto Found = AliveBits.find(UserI);
+    if (Found != AliveBits.end() && Found->second.isNullValue())
+      return true;
+  }
+
+  return false;
+}
+
 void DemandedBits::print(raw_ostream &OS) {
   performAnalysis();
   for (auto &KV : AliveBits) {
diff --git a/lib/Analysis/DependenceAnalysis.cpp b/lib/Analysis/DependenceAnalysis.cpp
index b544ae5..3f4dfa5 100644
--- a/lib/Analysis/DependenceAnalysis.cpp
+++ b/lib/Analysis/DependenceAnalysis.cpp
@@ -194,6 +194,13 @@
   dumpExampleDependence(OS, info.get());
 }
 
+PreservedAnalyses
+DependenceAnalysisPrinterPass::run(Function &F, FunctionAnalysisManager &FAM) {
+  OS << "'Dependence Analysis' for function '" << F.getName() << "':\n";
+  dumpExampleDependence(OS, &FAM.getResult<DependenceAnalysis>(F));
+  return PreservedAnalyses::all();
+}
+
 //===----------------------------------------------------------------------===//
 // Dependence methods
 
diff --git a/lib/Analysis/GlobalsModRef.cpp b/lib/Analysis/GlobalsModRef.cpp
index 2c50360..b28abca 100644
--- a/lib/Analysis/GlobalsModRef.cpp
+++ b/lib/Analysis/GlobalsModRef.cpp
@@ -255,11 +255,11 @@
 }
 
 FunctionModRefBehavior
-GlobalsAAResult::getModRefBehavior(ImmutableCallSite CS) {
+GlobalsAAResult::getModRefBehavior(const CallBase *Call) {
   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
 
-  if (!CS.hasOperandBundles())
-    if (const Function *F = CS.getCalledFunction())
+  if (!Call->hasOperandBundles())
+    if (const Function *F = Call->getCalledFunction())
       if (FunctionInfo *FI = getFunctionInfo(F)) {
         if (!isModOrRefSet(FI->getModRefInfo()))
           Min = FMRB_DoesNotAccessMemory;
@@ -267,7 +267,7 @@
           Min = FMRB_OnlyReadsMemory;
       }
 
-  return FunctionModRefBehavior(AAResultBase::getModRefBehavior(CS) & Min);
+  return FunctionModRefBehavior(AAResultBase::getModRefBehavior(Call) & Min);
 }
 
 /// Returns the function info for the function, or null if we don't have
@@ -366,14 +366,14 @@
     } else if (Operator::getOpcode(I) == Instruction::BitCast) {
       if (AnalyzeUsesOfPointer(I, Readers, Writers, OkayStoreDest))
         return true;
-    } else if (auto CS = CallSite(I)) {
+    } else if (auto *Call = dyn_cast<CallBase>(I)) {
       // Make sure that this is just the function being called, not that it is
       // passing into the function.
-      if (CS.isDataOperand(&U)) {
+      if (Call->isDataOperand(&U)) {
         // Detect calls to free.
-        if (CS.isArgOperand(&U) && isFreeCall(I, &TLI)) {
+        if (Call->isArgOperand(&U) && isFreeCall(I, &TLI)) {
           if (Writers)
-            Writers->insert(CS->getParent()->getParent());
+            Writers->insert(Call->getParent()->getParent());
         } else {
           return true; // Argument of an unknown call.
         }
@@ -576,15 +576,15 @@
 
         // We handle calls specially because the graph-relevant aspects are
         // handled above.
-        if (auto CS = CallSite(&I)) {
-          if (isAllocationFn(&I, &TLI) || isFreeCall(&I, &TLI)) {
+        if (auto *Call = dyn_cast<CallBase>(&I)) {
+          if (isAllocationFn(Call, &TLI) || isFreeCall(Call, &TLI)) {
             // FIXME: It is completely unclear why this is necessary and not
             // handled by the above graph code.
             FI.addModRefInfo(ModRefInfo::ModRef);
-          } else if (Function *Callee = CS.getCalledFunction()) {
+          } else if (Function *Callee = Call->getCalledFunction()) {
             // The callgraph doesn't include intrinsic calls.
             if (Callee->isIntrinsic()) {
-              if (isa<DbgInfoIntrinsic>(I))
+              if (isa<DbgInfoIntrinsic>(Call))
                 // Don't let dbg intrinsics affect alias info.
                 continue;
 
@@ -885,16 +885,16 @@
   return AAResultBase::alias(LocA, LocB);
 }
 
-ModRefInfo GlobalsAAResult::getModRefInfoForArgument(ImmutableCallSite CS,
+ModRefInfo GlobalsAAResult::getModRefInfoForArgument(const CallBase *Call,
                                                      const GlobalValue *GV) {
-  if (CS.doesNotAccessMemory())
+  if (Call->doesNotAccessMemory())
     return ModRefInfo::NoModRef;
   ModRefInfo ConservativeResult =
-      CS.onlyReadsMemory() ? ModRefInfo::Ref : ModRefInfo::ModRef;
+      Call->onlyReadsMemory() ? ModRefInfo::Ref : ModRefInfo::ModRef;
 
   // Iterate through all the arguments to the called function. If any argument
   // is based on GV, return the conservative result.
-  for (auto &A : CS.args()) {
+  for (auto &A : Call->args()) {
     SmallVector<Value*, 4> Objects;
     GetUnderlyingObjects(A, Objects, DL);
 
@@ -914,7 +914,7 @@
   return ModRefInfo::NoModRef;
 }
 
-ModRefInfo GlobalsAAResult::getModRefInfo(ImmutableCallSite CS,
+ModRefInfo GlobalsAAResult::getModRefInfo(const CallBase *Call,
                                           const MemoryLocation &Loc) {
   ModRefInfo Known = ModRefInfo::ModRef;
 
@@ -923,15 +923,15 @@
   if (const GlobalValue *GV =
           dyn_cast<GlobalValue>(GetUnderlyingObject(Loc.Ptr, DL)))
     if (GV->hasLocalLinkage())
-      if (const Function *F = CS.getCalledFunction())
+      if (const Function *F = Call->getCalledFunction())
         if (NonAddressTakenGlobals.count(GV))
           if (const FunctionInfo *FI = getFunctionInfo(F))
             Known = unionModRef(FI->getModRefInfoForGlobal(*GV),
-                                getModRefInfoForArgument(CS, GV));
+                                getModRefInfoForArgument(Call, GV));
 
   if (!isModOrRefSet(Known))
     return ModRefInfo::NoModRef; // No need to query other mod/ref analyses
-  return intersectModRef(Known, AAResultBase::getModRefInfo(CS, Loc));
+  return intersectModRef(Known, AAResultBase::getModRefInfo(Call, Loc));
 }
 
 GlobalsAAResult::GlobalsAAResult(const DataLayout &DL,
diff --git a/lib/Analysis/IndirectCallPromotionAnalysis.cpp b/lib/Analysis/IndirectCallPromotionAnalysis.cpp
index 4659c0a..d6e6e76 100644
--- a/lib/Analysis/IndirectCallPromotionAnalysis.cpp
+++ b/lib/Analysis/IndirectCallPromotionAnalysis.cpp
@@ -15,7 +15,7 @@
 
 #include "llvm/Analysis/IndirectCallPromotionAnalysis.h"
 #include "llvm/ADT/STLExtras.h"
-#include "llvm/Analysis/IndirectCallSiteVisitor.h"
+#include "llvm/Analysis/IndirectCallVisitor.h"
 #include "llvm/IR/CallSite.h"
 #include "llvm/IR/InstIterator.h"
 #include "llvm/IR/InstVisitor.h"
diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp
index a3347db..6ddb3cb 100644
--- a/lib/Analysis/InlineCost.cpp
+++ b/lib/Analysis/InlineCost.cpp
@@ -1731,6 +1731,13 @@
   // Update the threshold based on callsite properties
   updateThreshold(CS, F);
 
+  // While Threshold depends on commandline options that can take negative
+  // values, we want to enforce the invariant that the computed threshold and
+  // bonuses are non-negative.
+  assert(Threshold >= 0);
+  assert(SingleBBBonus >= 0);
+  assert(VectorBonus >= 0);
+
   // Speculatively apply all possible bonuses to Threshold. If cost exceeds
   // this Threshold any time, and cost cannot decrease, we can stop processing
   // the rest of the function body.
diff --git a/lib/Analysis/InstructionPrecedenceTracking.cpp b/lib/Analysis/InstructionPrecedenceTracking.cpp
index b98975b..816126f 100644
--- a/lib/Analysis/InstructionPrecedenceTracking.cpp
+++ b/lib/Analysis/InstructionPrecedenceTracking.cpp
@@ -99,9 +99,17 @@
 }
 #endif
 
-void InstructionPrecedenceTracking::invalidateBlock(const BasicBlock *BB) {
+void InstructionPrecedenceTracking::insertInstructionTo(const Instruction *Inst,
+                                                        const BasicBlock *BB) {
+  if (isSpecialInstruction(Inst))
+    FirstSpecialInsts.erase(BB);
   OI.invalidateBlock(BB);
-  FirstSpecialInsts.erase(BB);
+}
+
+void InstructionPrecedenceTracking::removeInstruction(const Instruction *Inst) {
+  if (isSpecialInstruction(Inst))
+    FirstSpecialInsts.erase(Inst->getParent());
+  OI.invalidateBlock(Inst->getParent());
 }
 
 void InstructionPrecedenceTracking::clear() {
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index 120a024..ccf907c 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -2630,6 +2630,70 @@
   }
 }
 
+/// Some intrinsics with a constant operand have an easy-to-compute range of
+/// outputs. This can be used to fold a comparison to always true or always
+/// false.
+static void setLimitsForIntrinsic(IntrinsicInst &II, APInt &Lower,
+                                  APInt &Upper) {
+  unsigned Width = Lower.getBitWidth();
+  const APInt *C;
+  switch (II.getIntrinsicID()) {
+  case Intrinsic::uadd_sat:
+    // uadd.sat(x, C) produces [C, UINT_MAX].
+    if (match(II.getOperand(0), m_APInt(C)) ||
+        match(II.getOperand(1), m_APInt(C)))
+      Lower = *C;
+    break;
+  case Intrinsic::sadd_sat:
+    if (match(II.getOperand(0), m_APInt(C)) ||
+        match(II.getOperand(1), m_APInt(C))) {
+      if (C->isNegative()) {
+        // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
+        Lower = APInt::getSignedMinValue(Width);
+        Upper = APInt::getSignedMaxValue(Width) + *C + 1;
+      } else {
+        // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
+        Lower = APInt::getSignedMinValue(Width) + *C;
+        Upper = APInt::getSignedMaxValue(Width) + 1;
+      }
+    }
+    break;
+  case Intrinsic::usub_sat:
+    // usub.sat(C, x) produces [0, C].
+    if (match(II.getOperand(0), m_APInt(C)))
+      Upper = *C + 1;
+    // usub.sat(x, C) produces [0, UINT_MAX - C].
+    else if (match(II.getOperand(1), m_APInt(C)))
+      Upper = APInt::getMaxValue(Width) - *C + 1;
+    break;
+  case Intrinsic::ssub_sat:
+    if (match(II.getOperand(0), m_APInt(C))) {
+      if (C->isNegative()) {
+        // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
+        Lower = APInt::getSignedMinValue(Width);
+        Upper = *C - APInt::getSignedMinValue(Width) + 1;
+      } else {
+        // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
+        Lower = *C - APInt::getSignedMaxValue(Width);
+        Upper = APInt::getSignedMaxValue(Width) + 1;
+      }
+    } else if (match(II.getOperand(1), m_APInt(C))) {
+      if (C->isNegative()) {
+        // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
+        Lower = APInt::getSignedMinValue(Width) - *C;
+        Upper = APInt::getSignedMaxValue(Width) + 1;
+      } else {
+        // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
+        Lower = APInt::getSignedMinValue(Width);
+        Upper = APInt::getSignedMaxValue(Width) - *C + 1;
+      }
+    }
+    break;
+  default:
+    break;
+  }
+}
+
 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
                                        Value *RHS, const InstrInfoQuery &IIQ) {
   Type *ITy = GetCompareTy(RHS); // The return type.
@@ -2663,6 +2727,8 @@
   APInt Upper = APInt(Width, 0);
   if (auto *BO = dyn_cast<BinaryOperator>(LHS))
     setLimitsForBinOp(*BO, Lower, Upper, IIQ);
+  else if (auto *II = dyn_cast<IntrinsicInst>(LHS))
+    setLimitsForIntrinsic(*II, Lower, Upper);
 
   ConstantRange LHS_CR =
       Lower != Upper ? ConstantRange(Lower, Upper) : ConstantRange(Width, true);
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index db919bd..5d0a627 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -330,12 +330,12 @@
       // Check that the memcpy arguments don't overlap. The AliasAnalysis API
       // isn't expressive enough for what we really want to do. Known partial
       // overlap is not distinguished from the case where nothing is known.
-      uint64_t Size = 0;
+      auto Size = LocationSize::unknown();
       if (const ConstantInt *Len =
               dyn_cast<ConstantInt>(findValue(MCI->getLength(),
                                               /*OffsetOk=*/false)))
         if (Len->getValue().isIntN(32))
-          Size = Len->getValue().getZExtValue();
+          Size = LocationSize::precise(Len->getValue().getZExtValue());
       Assert(AA->alias(MCI->getSource(), Size, MCI->getDest(), Size) !=
                  MustAlias,
              "Undefined behavior: memcpy source and destination overlap", &I);
diff --git a/lib/Analysis/Loads.cpp b/lib/Analysis/Loads.cpp
index d319d4c..8129795 100644
--- a/lib/Analysis/Loads.cpp
+++ b/lib/Analysis/Loads.cpp
@@ -107,8 +107,8 @@
     return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
                                               DL, CtxI, DT, Visited);
 
-  if (auto CS = ImmutableCallSite(V))
-    if (auto *RP = getArgumentAliasingToReturnedPointer(CS))
+  if (const auto *Call = dyn_cast<CallBase>(V))
+    if (auto *RP = getArgumentAliasingToReturnedPointer(Call))
       return isDereferenceableAndAlignedPointer(RP, Align, Size, DL, CtxI, DT,
                                                 Visited);
 
@@ -345,7 +345,7 @@
   const DataLayout &DL = ScanBB->getModule()->getDataLayout();
 
   // Try to get the store size for the type.
-  uint64_t AccessSize = DL.getTypeStoreSize(AccessTy);
+  auto AccessSize = LocationSize::precise(DL.getTypeStoreSize(AccessTy));
 
   Value *StrippedPtr = Ptr->stripPointerCasts();
 
diff --git a/lib/Analysis/LoopAccessAnalysis.cpp b/lib/Analysis/LoopAccessAnalysis.cpp
index bc01f04..7f3480f 100644
--- a/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/lib/Analysis/LoopAccessAnalysis.cpp
@@ -342,7 +342,7 @@
   //
   // The above case requires that we have an UnknownDependence between
   // accesses to the same underlying object. This cannot happen unless
-  // ShouldRetryWithRuntimeCheck is set, and therefore UseDependencies
+  // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
   // is also false. In this case we will use the fallback path and create
   // separate checking groups for all pointers.
 
@@ -556,7 +556,7 @@
   /// perform dependency checking.
   ///
   /// Note that this can later be cleared if we retry memcheck analysis without
-  /// dependency checking (i.e. ShouldRetryWithRuntimeCheck).
+  /// dependency checking (i.e. FoundNonConstantDistanceDependence).
   bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
 
   /// We decided that no dependence analysis would be used.  Reset the state.
@@ -604,8 +604,8 @@
   ///
   /// Note that, this is different from isDependencyCheckNeeded.  When we retry
   /// memcheck analysis without dependency checking
-  /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared
-  /// while this remains set if we have potentially dependent accesses.
+  /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
+  /// cleared while this remains set if we have potentially dependent accesses.
   bool IsRTCheckAnalysisNeeded;
 
   /// The SCEV predicate containing all the SCEV-related assumptions.
@@ -1221,18 +1221,20 @@
   return X == PtrSCEVB;
 }
 
-bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
+MemoryDepChecker::VectorizationSafetyStatus
+MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
   switch (Type) {
   case NoDep:
   case Forward:
   case BackwardVectorizable:
-    return true;
+    return VectorizationSafetyStatus::Safe;
 
   case Unknown:
+    return VectorizationSafetyStatus::PossiblySafeWithRtChecks;
   case ForwardButPreventsForwarding:
   case Backward:
   case BackwardVectorizableButPreventsForwarding:
-    return false;
+    return VectorizationSafetyStatus::Unsafe;
   }
   llvm_unreachable("unexpected DepType!");
 }
@@ -1317,6 +1319,11 @@
   return false;
 }
 
+void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
+  if (Status < S)
+    Status = S;
+}
+
 /// Given a non-constant (unknown) dependence-distance \p Dist between two
 /// memory accesses, that have the same stride whose absolute value is given
 /// in \p Stride, and that have the same type size \p TypeByteSize,
@@ -1485,7 +1492,7 @@
       return Dependence::NoDep;
 
     LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
-    ShouldRetryWithRuntimeCheck = true;
+    FoundNonConstantDistanceDependence = true;
     return Dependence::Unknown;
   }
 
@@ -1652,7 +1659,7 @@
 
             Dependence::DepType Type =
                 isDependent(*A.first, A.second, *B.first, B.second, Strides);
-            SafeForVectorization &= Dependence::isSafeForVectorization(Type);
+            mergeInStatus(Dependence::isSafeForVectorization(Type));
 
             // Gather dependences unless we accumulated MaxDependences
             // dependences.  In that case return as soon as we find the first
@@ -1669,7 +1676,7 @@
                            << "Too many dependences, stopped recording\n");
               }
             }
-            if (!RecordDependences && !SafeForVectorization)
+            if (!RecordDependences && !isSafeForVectorization())
               return false;
           }
         ++OI;
@@ -1679,7 +1686,7 @@
   }
 
   LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
-  return SafeForVectorization;
+  return isSafeForVectorization();
 }
 
 SmallVector<Instruction *, 4>
diff --git a/lib/Analysis/LoopInfo.cpp b/lib/Analysis/LoopInfo.cpp
index 6c779bf..ef2b125 100644
--- a/lib/Analysis/LoopInfo.cpp
+++ b/lib/Analysis/LoopInfo.cpp
@@ -293,16 +293,50 @@
   if (!DesiredLoopIdMetadata)
     return false;
 
+  MDNode *ParallelAccesses =
+      findOptionMDForLoop(this, "llvm.loop.parallel_accesses");
+  SmallPtrSet<MDNode *, 4>
+      ParallelAccessGroups; // For scalable 'contains' check.
+  if (ParallelAccesses) {
+    for (const MDOperand &MD : drop_begin(ParallelAccesses->operands(), 1)) {
+      MDNode *AccGroup = cast<MDNode>(MD.get());
+      assert(isValidAsAccessGroup(AccGroup) &&
+             "List item must be an access group");
+      ParallelAccessGroups.insert(AccGroup);
+    }
+  }
+
   // The loop branch contains the parallel loop metadata. In order to ensure
   // that any parallel-loop-unaware optimization pass hasn't added loop-carried
   // dependencies (thus converted the loop back to a sequential loop), check
-  // that all the memory instructions in the loop contain parallelism metadata
-  // that point to the same unique "loop id metadata" the loop branch does.
+  // that all the memory instructions in the loop belong to an access group that
+  // is parallel to this loop.
   for (BasicBlock *BB : this->blocks()) {
     for (Instruction &I : *BB) {
       if (!I.mayReadOrWriteMemory())
         continue;
 
+      if (MDNode *AccessGroup = I.getMetadata(LLVMContext::MD_access_group)) {
+        auto ContainsAccessGroup = [&ParallelAccessGroups](MDNode *AG) -> bool {
+          if (AG->getNumOperands() == 0) {
+            assert(isValidAsAccessGroup(AG) && "Item must be an access group");
+            return ParallelAccessGroups.count(AG);
+          }
+
+          for (const MDOperand &AccessListItem : AG->operands()) {
+            MDNode *AccGroup = cast<MDNode>(AccessListItem.get());
+            assert(isValidAsAccessGroup(AccGroup) &&
+                   "List item must be an access group");
+            if (ParallelAccessGroups.count(AccGroup))
+              return true;
+          }
+          return false;
+        };
+
+        if (ContainsAccessGroup(AccessGroup))
+          continue;
+      }
+
       // The memory instruction can refer to the loop identifier metadata
       // directly or indirectly through another list metadata (in case of
       // nested parallel loops). The loop identifier metadata refers to
@@ -693,6 +727,40 @@
   }
 }
 
+MDNode *llvm::findOptionMDForLoopID(MDNode *LoopID, StringRef Name) {
+  // No loop metadata node, no loop properties.
+  if (!LoopID)
+    return nullptr;
+
+  // First operand should refer to the metadata node itself, for legacy reasons.
+  assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
+  assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
+
+  // Iterate over the metdata node operands and look for MDString metadata.
+  for (unsigned i = 1, e = LoopID->getNumOperands(); i < e; ++i) {
+    MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
+    if (!MD || MD->getNumOperands() < 1)
+      continue;
+    MDString *S = dyn_cast<MDString>(MD->getOperand(0));
+    if (!S)
+      continue;
+    // Return the operand node if MDString holds expected metadata.
+    if (Name.equals(S->getString()))
+      return MD;
+  }
+
+  // Loop property not found.
+  return nullptr;
+}
+
+MDNode *llvm::findOptionMDForLoop(const Loop *TheLoop, StringRef Name) {
+  return findOptionMDForLoopID(TheLoop->getLoopID(), Name);
+}
+
+bool llvm::isValidAsAccessGroup(MDNode *Node) {
+  return Node->getNumOperands() == 0 && Node->isDistinct();
+}
+
 //===----------------------------------------------------------------------===//
 // LoopInfo implementation
 //
diff --git a/lib/Analysis/MemDepPrinter.cpp b/lib/Analysis/MemDepPrinter.cpp
index 5a6bbd7..907b321 100644
--- a/lib/Analysis/MemDepPrinter.cpp
+++ b/lib/Analysis/MemDepPrinter.cpp
@@ -13,7 +13,6 @@
 #include "llvm/ADT/SetVector.h"
 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
 #include "llvm/Analysis/Passes.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/InstIterator.h"
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/Support/ErrorHandling.h"
@@ -106,9 +105,9 @@
     if (!Res.isNonLocal()) {
       Deps[Inst].insert(std::make_pair(getInstTypePair(Res),
                                        static_cast<BasicBlock *>(nullptr)));
-    } else if (auto CS = CallSite(Inst)) {
+    } else if (auto *Call = dyn_cast<CallBase>(Inst)) {
       const MemoryDependenceResults::NonLocalDepInfo &NLDI =
-        MDA.getNonLocalCallDependency(CS);
+          MDA.getNonLocalCallDependency(Call);
 
       DepSet &InstDeps = Deps[Inst];
       for (const NonLocalDepEntry &I : NLDI) {
diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp
index 2fe012d..e22182b 100644
--- a/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -31,7 +31,6 @@
 #include "llvm/Analysis/ValueTracking.h"
 #include "llvm/IR/Attributes.h"
 #include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/DerivedTypes.h"
@@ -182,8 +181,8 @@
 }
 
 /// Private helper for finding the local dependencies of a call site.
-MemDepResult MemoryDependenceResults::getCallSiteDependencyFrom(
-    CallSite CS, bool isReadOnlyCall, BasicBlock::iterator ScanIt,
+MemDepResult MemoryDependenceResults::getCallDependencyFrom(
+    CallBase *Call, bool isReadOnlyCall, BasicBlock::iterator ScanIt,
     BasicBlock *BB) {
   unsigned Limit = BlockScanLimit;
 
@@ -205,21 +204,21 @@
     ModRefInfo MR = GetLocation(Inst, Loc, TLI);
     if (Loc.Ptr) {
       // A simple instruction.
-      if (isModOrRefSet(AA.getModRefInfo(CS, Loc)))
+      if (isModOrRefSet(AA.getModRefInfo(Call, Loc)))
         return MemDepResult::getClobber(Inst);
       continue;
     }
 
-    if (auto InstCS = CallSite(Inst)) {
+    if (auto *CallB = dyn_cast<CallBase>(Inst)) {
       // If these two calls do not interfere, look past it.
-      if (isNoModRef(AA.getModRefInfo(CS, InstCS))) {
-        // If the two calls are the same, return InstCS as a Def, so that
-        // CS can be found redundant and eliminated.
+      if (isNoModRef(AA.getModRefInfo(Call, CallB))) {
+        // If the two calls are the same, return Inst as a Def, so that
+        // Call can be found redundant and eliminated.
         if (isReadOnlyCall && !isModSet(MR) &&
-            CS.getInstruction()->isIdenticalToWhenDefined(Inst))
+            Call->isIdenticalToWhenDefined(CallB))
           return MemDepResult::getDef(Inst);
 
-        // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
+        // Otherwise if the two calls don't interact (e.g. CallB is readnone)
         // keep scanning.
         continue;
       } else
@@ -750,11 +749,10 @@
 
       LocalCache = getPointerDependencyFrom(
           MemLoc, isLoad, ScanPos->getIterator(), QueryParent, QueryInst);
-    } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
-      CallSite QueryCS(QueryInst);
-      bool isReadOnly = AA.onlyReadsMemory(QueryCS);
-      LocalCache = getCallSiteDependencyFrom(
-          QueryCS, isReadOnly, ScanPos->getIterator(), QueryParent);
+    } else if (auto *QueryCall = dyn_cast<CallBase>(QueryInst)) {
+      bool isReadOnly = AA.onlyReadsMemory(QueryCall);
+      LocalCache = getCallDependencyFrom(QueryCall, isReadOnly,
+                                         ScanPos->getIterator(), QueryParent);
     } else
       // Non-memory instruction.
       LocalCache = MemDepResult::getUnknown();
@@ -780,11 +778,11 @@
 #endif
 
 const MemoryDependenceResults::NonLocalDepInfo &
-MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) {
-  assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
+MemoryDependenceResults::getNonLocalCallDependency(CallBase *QueryCall) {
+  assert(getDependency(QueryCall).isNonLocal() &&
          "getNonLocalCallDependency should only be used on calls with "
          "non-local deps!");
-  PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
+  PerInstNLInfo &CacheP = NonLocalDeps[QueryCall];
   NonLocalDepInfo &Cache = CacheP.first;
 
   // This is the set of blocks that need to be recomputed.  In the cached case,
@@ -814,14 +812,14 @@
     //     << Cache.size() << " cached: " << *QueryInst;
   } else {
     // Seed DirtyBlocks with each of the preds of QueryInst's block.
-    BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
+    BasicBlock *QueryBB = QueryCall->getParent();
     for (BasicBlock *Pred : PredCache.get(QueryBB))
       DirtyBlocks.push_back(Pred);
     ++NumUncacheNonLocal;
   }
 
   // isReadonlyCall - If this is a read-only call, we can be more aggressive.
-  bool isReadonlyCall = AA.onlyReadsMemory(QueryCS);
+  bool isReadonlyCall = AA.onlyReadsMemory(QueryCall);
 
   SmallPtrSet<BasicBlock *, 32> Visited;
 
@@ -865,8 +863,8 @@
       if (Instruction *Inst = ExistingResult->getResult().getInst()) {
         ScanPos = Inst->getIterator();
         // We're removing QueryInst's use of Inst.
-        RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
-                             QueryCS.getInstruction());
+        RemoveFromReverseMap<Instruction *>(ReverseNonLocalDeps, Inst,
+                                            QueryCall);
       }
     }
 
@@ -874,8 +872,7 @@
     MemDepResult Dep;
 
     if (ScanPos != DirtyBB->begin()) {
-      Dep =
-          getCallSiteDependencyFrom(QueryCS, isReadonlyCall, ScanPos, DirtyBB);
+      Dep = getCallDependencyFrom(QueryCall, isReadonlyCall, ScanPos, DirtyBB);
     } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
       // No dependence found.  If this is the entry block of the function, it is
       // a clobber, otherwise it is unknown.
@@ -897,7 +894,7 @@
       // Keep the ReverseNonLocalDeps map up to date so we can efficiently
       // update this when we remove instructions.
       if (Instruction *Inst = Dep.getInst())
-        ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
+        ReverseNonLocalDeps[Inst].insert(QueryCall);
     } else {
 
       // If the block *is* completely transparent to the load, we need to check
diff --git a/lib/Analysis/MemoryLocation.cpp b/lib/Analysis/MemoryLocation.cpp
index c0605f6..27e8d72 100644
--- a/lib/Analysis/MemoryLocation.cpp
+++ b/lib/Analysis/MemoryLocation.cpp
@@ -37,8 +37,9 @@
   LI->getAAMetadata(AATags);
   const auto &DL = LI->getModule()->getDataLayout();
 
-  return MemoryLocation(LI->getPointerOperand(),
-                        DL.getTypeStoreSize(LI->getType()), AATags);
+  return MemoryLocation(
+      LI->getPointerOperand(),
+      LocationSize::precise(DL.getTypeStoreSize(LI->getType())), AATags);
 }
 
 MemoryLocation MemoryLocation::get(const StoreInst *SI) {
@@ -47,7 +48,8 @@
   const auto &DL = SI->getModule()->getDataLayout();
 
   return MemoryLocation(SI->getPointerOperand(),
-                        DL.getTypeStoreSize(SI->getValueOperand()->getType()),
+                        LocationSize::precise(DL.getTypeStoreSize(
+                            SI->getValueOperand()->getType())),
                         AATags);
 }
 
@@ -64,9 +66,10 @@
   CXI->getAAMetadata(AATags);
   const auto &DL = CXI->getModule()->getDataLayout();
 
-  return MemoryLocation(
-      CXI->getPointerOperand(),
-      DL.getTypeStoreSize(CXI->getCompareOperand()->getType()), AATags);
+  return MemoryLocation(CXI->getPointerOperand(),
+                        LocationSize::precise(DL.getTypeStoreSize(
+                            CXI->getCompareOperand()->getType())),
+                        AATags);
 }
 
 MemoryLocation MemoryLocation::get(const AtomicRMWInst *RMWI) {
@@ -75,7 +78,8 @@
   const auto &DL = RMWI->getModule()->getDataLayout();
 
   return MemoryLocation(RMWI->getPointerOperand(),
-                        DL.getTypeStoreSize(RMWI->getValOperand()->getType()),
+                        LocationSize::precise(DL.getTypeStoreSize(
+                            RMWI->getValOperand()->getType())),
                         AATags);
 }
 
@@ -88,9 +92,9 @@
 }
 
 MemoryLocation MemoryLocation::getForSource(const AnyMemTransferInst *MTI) {
-  uint64_t Size = MemoryLocation::UnknownSize;
+  auto Size = LocationSize::unknown();
   if (ConstantInt *C = dyn_cast<ConstantInt>(MTI->getLength()))
-    Size = C->getValue().getZExtValue();
+    Size = LocationSize::precise(C->getValue().getZExtValue());
 
   // memcpy/memmove can have AA tags. For memcpy, they apply
   // to both the source and the destination.
@@ -109,9 +113,9 @@
 }
 
 MemoryLocation MemoryLocation::getForDest(const AnyMemIntrinsic *MI) {
-  uint64_t Size = MemoryLocation::UnknownSize;
+  auto Size = LocationSize::unknown();
   if (ConstantInt *C = dyn_cast<ConstantInt>(MI->getLength()))
-    Size = C->getValue().getZExtValue();
+    Size = LocationSize::precise(C->getValue().getZExtValue());
 
   // memcpy/memmove can have AA tags. For memcpy, they apply
   // to both the source and the destination.
@@ -121,15 +125,15 @@
   return MemoryLocation(MI->getRawDest(), Size, AATags);
 }
 
-MemoryLocation MemoryLocation::getForArgument(ImmutableCallSite CS,
+MemoryLocation MemoryLocation::getForArgument(const CallBase *Call,
                                               unsigned ArgIdx,
                                               const TargetLibraryInfo *TLI) {
   AAMDNodes AATags;
-  CS->getAAMetadata(AATags);
-  const Value *Arg = CS.getArgument(ArgIdx);
+  Call->getAAMetadata(AATags);
+  const Value *Arg = Call->getArgOperand(ArgIdx);
 
   // We may be able to produce an exact size for known intrinsics.
-  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
+  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call)) {
     const DataLayout &DL = II->getModule()->getDataLayout();
 
     switch (II->getIntrinsicID()) {
@@ -141,7 +145,8 @@
       assert((ArgIdx == 0 || ArgIdx == 1) &&
              "Invalid argument index for memory intrinsic");
       if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
-        return MemoryLocation(Arg, LenCI->getZExtValue(), AATags);
+        return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
+                              AATags);
       break;
 
     case Intrinsic::lifetime_start:
@@ -149,27 +154,37 @@
     case Intrinsic::invariant_start:
       assert(ArgIdx == 1 && "Invalid argument index");
       return MemoryLocation(
-          Arg, cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AATags);
+          Arg,
+          LocationSize::precise(
+              cast<ConstantInt>(II->getArgOperand(0))->getZExtValue()),
+          AATags);
 
     case Intrinsic::invariant_end:
       // The first argument to an invariant.end is a "descriptor" type (e.g. a
       // pointer to a empty struct) which is never actually dereferenced.
       if (ArgIdx == 0)
-        return MemoryLocation(Arg, 0, AATags);
+        return MemoryLocation(Arg, LocationSize::precise(0), AATags);
       assert(ArgIdx == 2 && "Invalid argument index");
       return MemoryLocation(
-          Arg, cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AATags);
+          Arg,
+          LocationSize::precise(
+              cast<ConstantInt>(II->getArgOperand(1))->getZExtValue()),
+          AATags);
 
     case Intrinsic::arm_neon_vld1:
       assert(ArgIdx == 0 && "Invalid argument index");
       // LLVM's vld1 and vst1 intrinsics currently only support a single
       // vector register.
-      return MemoryLocation(Arg, DL.getTypeStoreSize(II->getType()), AATags);
+      return MemoryLocation(
+          Arg, LocationSize::precise(DL.getTypeStoreSize(II->getType())),
+          AATags);
 
     case Intrinsic::arm_neon_vst1:
       assert(ArgIdx == 0 && "Invalid argument index");
-      return MemoryLocation(
-          Arg, DL.getTypeStoreSize(II->getArgOperand(1)->getType()), AATags);
+      return MemoryLocation(Arg,
+                            LocationSize::precise(DL.getTypeStoreSize(
+                                II->getArgOperand(1)->getType())),
+                            AATags);
     }
   }
 
@@ -178,18 +193,20 @@
   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
   // whenever possible.
   LibFunc F;
-  if (TLI && CS.getCalledFunction() &&
-      TLI->getLibFunc(*CS.getCalledFunction(), F) &&
+  if (TLI && Call->getCalledFunction() &&
+      TLI->getLibFunc(*Call->getCalledFunction(), F) &&
       F == LibFunc_memset_pattern16 && TLI->has(F)) {
     assert((ArgIdx == 0 || ArgIdx == 1) &&
            "Invalid argument index for memset_pattern16");
     if (ArgIdx == 1)
-      return MemoryLocation(Arg, 16, AATags);
-    if (const ConstantInt *LenCI = dyn_cast<ConstantInt>(CS.getArgument(2)))
-      return MemoryLocation(Arg, LenCI->getZExtValue(), AATags);
+      return MemoryLocation(Arg, LocationSize::precise(16), AATags);
+    if (const ConstantInt *LenCI =
+            dyn_cast<ConstantInt>(Call->getArgOperand(2)))
+      return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
+                            AATags);
   }
   // FIXME: Handle memset_pattern4 and memset_pattern8 also.
 
-  return MemoryLocation(CS.getArgument(ArgIdx), LocationSize::unknown(),
+  return MemoryLocation(Call->getArgOperand(ArgIdx), LocationSize::unknown(),
                         AATags);
 }
diff --git a/lib/Analysis/MemorySSA.cpp b/lib/Analysis/MemorySSA.cpp
index 3d98fca..6a5567e 100644
--- a/lib/Analysis/MemorySSA.cpp
+++ b/lib/Analysis/MemorySSA.cpp
@@ -30,7 +30,6 @@
 #include "llvm/Config/llvm-config.h"
 #include "llvm/IR/AssemblyAnnotationWriter.h"
 #include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/Instruction.h"
@@ -131,9 +130,9 @@
       : MemoryLocOrCall(MUD->getMemoryInst()) {}
 
   MemoryLocOrCall(Instruction *Inst) {
-    if (ImmutableCallSite(Inst)) {
+    if (auto *C = dyn_cast<CallBase>(Inst)) {
       IsCall = true;
-      CS = ImmutableCallSite(Inst);
+      Call = C;
     } else {
       IsCall = false;
       // There is no such thing as a memorylocation for a fence inst, and it is
@@ -145,9 +144,9 @@
 
   explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
 
-  ImmutableCallSite getCS() const {
+  const CallBase *getCall() const {
     assert(IsCall);
-    return CS;
+    return Call;
   }
 
   MemoryLocation getLoc() const {
@@ -162,16 +161,17 @@
     if (!IsCall)
       return Loc == Other.Loc;
 
-    if (CS.getCalledValue() != Other.CS.getCalledValue())
+    if (Call->getCalledValue() != Other.Call->getCalledValue())
       return false;
 
-    return CS.arg_size() == Other.CS.arg_size() &&
-           std::equal(CS.arg_begin(), CS.arg_end(), Other.CS.arg_begin());
+    return Call->arg_size() == Other.Call->arg_size() &&
+           std::equal(Call->arg_begin(), Call->arg_end(),
+                      Other.Call->arg_begin());
   }
 
 private:
   union {
-    ImmutableCallSite CS;
+    const CallBase *Call;
     MemoryLocation Loc;
   };
 };
@@ -197,9 +197,9 @@
 
     hash_code hash =
         hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
-                                      MLOC.getCS().getCalledValue()));
+                                      MLOC.getCall()->getCalledValue()));
 
-    for (const Value *Arg : MLOC.getCS().args())
+    for (const Value *Arg : MLOC.getCall()->args())
       hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
     return hash;
   }
@@ -258,7 +258,7 @@
                                              AliasAnalysis &AA) {
   Instruction *DefInst = MD->getMemoryInst();
   assert(DefInst && "Defining instruction not actually an instruction");
-  ImmutableCallSite UseCS(UseInst);
+  const auto *UseCall = dyn_cast<CallBase>(UseInst);
   Optional<AliasResult> AR;
 
   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
@@ -271,7 +271,7 @@
     // context.
     switch (II->getIntrinsicID()) {
     case Intrinsic::lifetime_start:
-      if (UseCS)
+      if (UseCall)
         return {false, NoAlias};
       AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
       return {AR != NoAlias, AR};
@@ -285,8 +285,8 @@
     }
   }
 
-  if (UseCS) {
-    ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
+  if (UseCall) {
+    ModRefInfo I = AA.getModRefInfo(DefInst, UseCall);
     AR = isMustSet(I) ? MustAlias : MayAlias;
     return {isModOrRefSet(I), AR};
   }
@@ -332,11 +332,12 @@
   // The MemoryAccess we actually got called with, used to test local domination
   const MemoryAccess *OriginalAccess = nullptr;
   Optional<AliasResult> AR = MayAlias;
+  bool SkipSelfAccess = false;
 
   UpwardsMemoryQuery() = default;
 
   UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
-      : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
+      : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
     if (!IsCall)
       StartingLoc = MemoryLocation::get(Inst);
   }
@@ -535,13 +536,13 @@
   ///
   /// This does not test for whether StopAt is a clobber
   UpwardsWalkResult
-  walkToPhiOrClobber(DefPath &Desc,
-                     const MemoryAccess *StopAt = nullptr) const {
+  walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
+                     const MemoryAccess *SkipStopAt = nullptr) const {
     assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
 
     for (MemoryAccess *Current : def_chain(Desc.Last)) {
       Desc.Last = Current;
-      if (Current == StopAt)
+      if (Current == StopAt || Current == SkipStopAt)
         return {Current, false, MayAlias};
 
       if (auto *MD = dyn_cast<MemoryDef>(Current)) {
@@ -619,9 +620,16 @@
       if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
         continue;
 
-      UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere);
+      const MemoryAccess *SkipStopWhere = nullptr;
+      if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
+        assert(isa<MemoryDef>(Query->OriginalAccess));
+        SkipStopWhere = Query->OriginalAccess;
+      }
+
+      UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere,
+                                                 /*SkipStopAt=*/SkipStopWhere);
       if (Res.IsKnownClobber) {
-        assert(Res.Result != StopWhere);
+        assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
         // If this wasn't a cache hit, we hit a clobber when walking. That's a
         // failure.
         TerminatedPath Term{Res.Result, PathIndex};
@@ -633,10 +641,13 @@
         continue;
       }
 
-      if (Res.Result == StopWhere) {
+      if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
         // We've hit our target. Save this path off for if we want to continue
-        // walking.
-        NewPaused.push_back(PathIndex);
+        // walking. If we are in the mode of skipping the OriginalAccess, and
+        // we've reached back to the OriginalAccess, do not save path, we've
+        // just looped back to self.
+        if (Res.Result != SkipStopWhere)
+          NewPaused.push_back(PathIndex);
         continue;
       }
 
@@ -907,7 +918,8 @@
     }
 
 #ifdef EXPENSIVE_CHECKS
-    checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
+    if (!Q.SkipSelfAccess)
+      checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
 #endif
     return Result;
   }
@@ -935,28 +947,76 @@
 
 namespace llvm {
 
+class MemorySSA::ClobberWalkerBase {
+  ClobberWalker Walker;
+  MemorySSA *MSSA;
+
+public:
+  ClobberWalkerBase(MemorySSA *M, AliasAnalysis *A, DominatorTree *D)
+      : Walker(*M, *A, *D), MSSA(M) {}
+
+  MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
+                                              const MemoryLocation &);
+  // Second argument (bool), defines whether the clobber search should skip the
+  // original queried access. If true, there will be a follow-up query searching
+  // for a clobber access past "self". Note that the Optimized access is not
+  // updated if a new clobber is found by this SkipSelf search. If this
+  // additional query becomes heavily used we may decide to cache the result.
+  // Walker instantiations will decide how to set the SkipSelf bool.
+  MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, bool);
+  void verify(const MemorySSA *MSSA) { Walker.verify(MSSA); }
+};
+
 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
 /// longer does caching on its own, but the name has been retained for the
 /// moment.
 class MemorySSA::CachingWalker final : public MemorySSAWalker {
-  ClobberWalker Walker;
-
-  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
+  ClobberWalkerBase *Walker;
 
 public:
-  CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
+  CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
+      : MemorySSAWalker(M), Walker(W) {}
   ~CachingWalker() override = default;
 
   using MemorySSAWalker::getClobberingMemoryAccess;
 
-  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
-  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
-                                          const MemoryLocation &) override;
-  void invalidateInfo(MemoryAccess *) override;
+  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override;
+  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
+                                          const MemoryLocation &Loc) override;
+
+  void invalidateInfo(MemoryAccess *MA) override {
+    if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
+      MUD->resetOptimized();
+  }
 
   void verify(const MemorySSA *MSSA) override {
     MemorySSAWalker::verify(MSSA);
-    Walker.verify(MSSA);
+    Walker->verify(MSSA);
+  }
+};
+
+class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
+  ClobberWalkerBase *Walker;
+
+public:
+  SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
+      : MemorySSAWalker(M), Walker(W) {}
+  ~SkipSelfWalker() override = default;
+
+  using MemorySSAWalker::getClobberingMemoryAccess;
+
+  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override;
+  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
+                                          const MemoryLocation &Loc) override;
+
+  void invalidateInfo(MemoryAccess *MA) override {
+    if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
+      MUD->resetOptimized();
+  }
+
+  void verify(const MemorySSA *MSSA) override {
+    MemorySSAWalker::verify(MSSA);
+    Walker->verify(MSSA);
   }
 };
 
@@ -1095,7 +1155,7 @@
 
 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
     : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
-      NextID(0) {
+      SkipWalker(nullptr), NextID(0) {
   buildMemorySSA();
 }
 
@@ -1426,10 +1486,25 @@
   if (Walker)
     return Walker.get();
 
-  Walker = llvm::make_unique<CachingWalker>(this, AA, DT);
+  if (!WalkerBase)
+    WalkerBase = llvm::make_unique<ClobberWalkerBase>(this, AA, DT);
+
+  Walker = llvm::make_unique<CachingWalker>(this, WalkerBase.get());
   return Walker.get();
 }
 
+MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
+  if (SkipWalker)
+    return SkipWalker.get();
+
+  if (!WalkerBase)
+    WalkerBase = llvm::make_unique<ClobberWalkerBase>(this, AA, DT);
+
+  SkipWalker = llvm::make_unique<SkipSelfWalker>(this, WalkerBase.get());
+  return SkipWalker.get();
+ }
+
+
 // This is a helper function used by the creation routines. It places NewAccess
 // into the access and defs lists for a given basic block, at the given
 // insertion point.
@@ -2131,25 +2206,11 @@
 
 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
 
-MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A,
-                                        DominatorTree *D)
-    : MemorySSAWalker(M), Walker(*M, *A, *D) {}
-
-void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {
-  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
-    MUD->resetOptimized();
-}
-
-/// Walk the use-def chains starting at \p MA and find
+/// Walk the use-def chains starting at \p StartingAccess and find
 /// the MemoryAccess that actually clobbers Loc.
 ///
 /// \returns our clobbering memory access
-MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
-    MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
-  return Walker.findClobber(StartingAccess, Q);
-}
-
-MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
+MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
     MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
   if (isa<MemoryPhi>(StartingAccess))
     return StartingAccess;
@@ -2162,7 +2223,7 @@
 
   // Conservatively, fences are always clobbers, so don't perform the walk if we
   // hit a fence.
-  if (!ImmutableCallSite(I) && I->isFenceLike())
+  if (!isa<CallBase>(I) && I->isFenceLike())
     return StartingUseOrDef;
 
   UpwardsMemoryQuery Q;
@@ -2173,11 +2234,12 @@
 
   // Unlike the other function, do not walk to the def of a def, because we are
   // handed something we already believe is the clobbering access.
+  // We never set SkipSelf to true in Q in this method.
   MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
                                      ? StartingUseOrDef->getDefiningAccess()
                                      : StartingUseOrDef;
 
-  MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
+  MemoryAccess *Clobber = Walker.findClobber(DefiningAccess, Q);
   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
   LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
   LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
@@ -2186,23 +2248,29 @@
 }
 
 MemoryAccess *
-MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
+MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(MemoryAccess *MA,
+                                                            bool SkipSelf) {
   auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
   // If this is a MemoryPhi, we can't do anything.
   if (!StartingAccess)
     return MA;
 
+  bool IsOptimized = false;
+
   // If this is an already optimized use or def, return the optimized result.
   // Note: Currently, we store the optimized def result in a separate field,
   // since we can't use the defining access.
-  if (StartingAccess->isOptimized())
-    return StartingAccess->getOptimized();
+  if (StartingAccess->isOptimized()) {
+    if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
+      return StartingAccess->getOptimized();
+    IsOptimized = true;
+  }
 
   const Instruction *I = StartingAccess->getMemoryInst();
   // We can't sanely do anything with a fence, since they conservatively clobber
   // all memory, and have no locations to get pointers from to try to
   // disambiguate.
-  if (!ImmutableCallSite(I) && I->isFenceLike())
+  if (!isa<CallBase>(I) && I->isFenceLike())
     return StartingAccess;
 
   UpwardsMemoryQuery Q(I, StartingAccess);
@@ -2214,33 +2282,71 @@
     return LiveOnEntry;
   }
 
-  // Start with the thing we already think clobbers this location
-  MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
+  MemoryAccess *OptimizedAccess;
+  if (!IsOptimized) {
+    // Start with the thing we already think clobbers this location
+    MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
 
-  // At this point, DefiningAccess may be the live on entry def.
-  // If it is, we will not get a better result.
-  if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
-    StartingAccess->setOptimized(DefiningAccess);
-    StartingAccess->setOptimizedAccessType(None);
-    return DefiningAccess;
-  }
+    // At this point, DefiningAccess may be the live on entry def.
+    // If it is, we will not get a better result.
+    if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
+      StartingAccess->setOptimized(DefiningAccess);
+      StartingAccess->setOptimizedAccessType(None);
+      return DefiningAccess;
+    }
 
-  MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
+    OptimizedAccess = Walker.findClobber(DefiningAccess, Q);
+    StartingAccess->setOptimized(OptimizedAccess);
+    if (MSSA->isLiveOnEntryDef(OptimizedAccess))
+      StartingAccess->setOptimizedAccessType(None);
+    else if (Q.AR == MustAlias)
+      StartingAccess->setOptimizedAccessType(MustAlias);
+  } else
+    OptimizedAccess = StartingAccess->getOptimized();
+
   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
-  LLVM_DEBUG(dbgs() << *DefiningAccess << "\n");
-  LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
-  LLVM_DEBUG(dbgs() << *Result << "\n");
+  LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
+  LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
+  LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
 
-  StartingAccess->setOptimized(Result);
-  if (MSSA->isLiveOnEntryDef(Result))
-    StartingAccess->setOptimizedAccessType(None);
-  else if (Q.AR == MustAlias)
-    StartingAccess->setOptimizedAccessType(MustAlias);
+  MemoryAccess *Result;
+  if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
+      isa<MemoryDef>(StartingAccess)) {
+    assert(isa<MemoryDef>(Q.OriginalAccess));
+    Q.SkipSelfAccess = true;
+    Result = Walker.findClobber(OptimizedAccess, Q);
+  } else
+    Result = OptimizedAccess;
+
+  LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
+  LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
 
   return Result;
 }
 
 MemoryAccess *
+MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
+  return Walker->getClobberingMemoryAccessBase(MA, false);
+}
+
+MemoryAccess *
+MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA,
+                                                    const MemoryLocation &Loc) {
+  return Walker->getClobberingMemoryAccessBase(MA, Loc);
+}
+
+MemoryAccess *
+MemorySSA::SkipSelfWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
+  return Walker->getClobberingMemoryAccessBase(MA, true);
+}
+
+MemoryAccess *
+MemorySSA::SkipSelfWalker::getClobberingMemoryAccess(MemoryAccess *MA,
+                                                    const MemoryLocation &Loc) {
+  return Walker->getClobberingMemoryAccessBase(MA, Loc);
+}
+
+MemoryAccess *
 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
   if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
     return Use->getDefiningAccess();
diff --git a/lib/Analysis/ModuleSummaryAnalysis.cpp b/lib/Analysis/ModuleSummaryAnalysis.cpp
index 6bda1d1..87f76d4 100644
--- a/lib/Analysis/ModuleSummaryAnalysis.cpp
+++ b/lib/Analysis/ModuleSummaryAnalysis.cpp
@@ -457,7 +457,11 @@
     std::function<BlockFrequencyInfo *(const Function &F)> GetBFICallback,
     ProfileSummaryInfo *PSI) {
   assert(PSI);
-  ModuleSummaryIndex Index(/*HaveGVs=*/true);
+  bool EnableSplitLTOUnit = false;
+  if (auto *MD = mdconst::extract_or_null<ConstantInt>(
+          M.getModuleFlag("EnableSplitLTOUnit")))
+    EnableSplitLTOUnit = MD->getZExtValue();
+  ModuleSummaryIndex Index(/*HaveGVs=*/true, EnableSplitLTOUnit);
 
   // Identify the local values in the llvm.used and llvm.compiler.used sets,
   // which should not be exported as they would then require renaming and
diff --git a/lib/Analysis/MustExecute.cpp b/lib/Analysis/MustExecute.cpp
index 281b8f5..180c38d 100644
--- a/lib/Analysis/MustExecute.cpp
+++ b/lib/Analysis/MustExecute.cpp
@@ -83,16 +83,15 @@
   computeBlockColors(CurLoop);
 }
 
-void ICFLoopSafetyInfo::insertInstructionTo(const BasicBlock *BB) {
-  ICF.invalidateBlock(BB);
-  MW.invalidateBlock(BB);
+void ICFLoopSafetyInfo::insertInstructionTo(const Instruction *Inst,
+                                            const BasicBlock *BB) {
+  ICF.insertInstructionTo(Inst, BB);
+  MW.insertInstructionTo(Inst, BB);
 }
 
 void ICFLoopSafetyInfo::removeInstruction(const Instruction *Inst) {
-  // TODO: So far we just conservatively drop cache, but maybe we can not do it
-  // when Inst is not an ICF instruction. Follow-up on that.
-  ICF.invalidateBlock(Inst->getParent());
-  MW.invalidateBlock(Inst->getParent());
+  ICF.removeInstruction(Inst);
+  MW.removeInstruction(Inst);
 }
 
 void LoopSafetyInfo::computeBlockColors(const Loop *CurLoop) {
diff --git a/lib/Analysis/ObjCARCAliasAnalysis.cpp b/lib/Analysis/ObjCARCAliasAnalysis.cpp
index 096ea66..95ae1a6 100644
--- a/lib/Analysis/ObjCARCAliasAnalysis.cpp
+++ b/lib/Analysis/ObjCARCAliasAnalysis.cpp
@@ -106,12 +106,12 @@
   return AAResultBase::getModRefBehavior(F);
 }
 
-ModRefInfo ObjCARCAAResult::getModRefInfo(ImmutableCallSite CS,
+ModRefInfo ObjCARCAAResult::getModRefInfo(const CallBase *Call,
                                           const MemoryLocation &Loc) {
   if (!EnableARCOpts)
-    return AAResultBase::getModRefInfo(CS, Loc);
+    return AAResultBase::getModRefInfo(Call, Loc);
 
-  switch (GetBasicARCInstKind(CS.getInstruction())) {
+  switch (GetBasicARCInstKind(Call)) {
   case ARCInstKind::Retain:
   case ARCInstKind::RetainRV:
   case ARCInstKind::Autorelease:
@@ -128,7 +128,7 @@
     break;
   }
 
-  return AAResultBase::getModRefInfo(CS, Loc);
+  return AAResultBase::getModRefInfo(Call, Loc);
 }
 
 ObjCARCAAResult ObjCARCAA::run(Function &F, FunctionAnalysisManager &AM) {
diff --git a/lib/Analysis/ObjCARCInstKind.cpp b/lib/Analysis/ObjCARCInstKind.cpp
index f268e2a..31c4327 100644
--- a/lib/Analysis/ObjCARCInstKind.cpp
+++ b/lib/Analysis/ObjCARCInstKind.cpp
@@ -85,97 +85,73 @@
 }
 
 ARCInstKind llvm::objcarc::GetFunctionClass(const Function *F) {
-  Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
 
-  // No (mandatory) arguments.
-  if (AI == AE)
-    return StringSwitch<ARCInstKind>(F->getName())
-        .Case("objc_autoreleasePoolPush", ARCInstKind::AutoreleasepoolPush)
-        .Case("clang.arc.use", ARCInstKind::IntrinsicUser)
-        .Default(ARCInstKind::CallOrUser);
-
-  // One argument.
-  const Argument *A0 = &*AI++;
-  if (AI == AE) {
-    // Argument is a pointer.
-    PointerType *PTy = dyn_cast<PointerType>(A0->getType());
-    if (!PTy)
-      return ARCInstKind::CallOrUser;
-
-    Type *ETy = PTy->getElementType();
-    // Argument is i8*.
-    if (ETy->isIntegerTy(8))
-      return StringSwitch<ARCInstKind>(F->getName())
-          .Case("objc_retain", ARCInstKind::Retain)
-          .Case("objc_retainAutoreleasedReturnValue", ARCInstKind::RetainRV)
-          .Case("objc_unsafeClaimAutoreleasedReturnValue", ARCInstKind::ClaimRV)
-          .Case("objc_retainBlock", ARCInstKind::RetainBlock)
-          .Case("objc_release", ARCInstKind::Release)
-          .Case("objc_autorelease", ARCInstKind::Autorelease)
-          .Case("objc_autoreleaseReturnValue", ARCInstKind::AutoreleaseRV)
-          .Case("objc_autoreleasePoolPop", ARCInstKind::AutoreleasepoolPop)
-          .Case("objc_retainedObject", ARCInstKind::NoopCast)
-          .Case("objc_unretainedObject", ARCInstKind::NoopCast)
-          .Case("objc_unretainedPointer", ARCInstKind::NoopCast)
-          .Case("objc_retain_autorelease", ARCInstKind::FusedRetainAutorelease)
-          .Case("objc_retainAutorelease", ARCInstKind::FusedRetainAutorelease)
-          .Case("objc_retainAutoreleaseReturnValue",
-                ARCInstKind::FusedRetainAutoreleaseRV)
-          .Case("objc_sync_enter", ARCInstKind::User)
-          .Case("objc_sync_exit", ARCInstKind::User)
-          .Default(ARCInstKind::CallOrUser);
-
-    // Argument is i8**
-    if (PointerType *Pte = dyn_cast<PointerType>(ETy))
-      if (Pte->getElementType()->isIntegerTy(8))
-        return StringSwitch<ARCInstKind>(F->getName())
-            .Case("objc_loadWeakRetained", ARCInstKind::LoadWeakRetained)
-            .Case("objc_loadWeak", ARCInstKind::LoadWeak)
-            .Case("objc_destroyWeak", ARCInstKind::DestroyWeak)
-            .Default(ARCInstKind::CallOrUser);
-
-    // Anything else with one argument.
+  Intrinsic::ID ID = F->getIntrinsicID();
+  switch (ID) {
+  default:
     return ARCInstKind::CallOrUser;
+  case Intrinsic::objc_autorelease:
+    return ARCInstKind::Autorelease;
+  case Intrinsic::objc_autoreleasePoolPop:
+    return ARCInstKind::AutoreleasepoolPop;
+  case Intrinsic::objc_autoreleasePoolPush:
+    return ARCInstKind::AutoreleasepoolPush;
+  case Intrinsic::objc_autoreleaseReturnValue:
+    return ARCInstKind::AutoreleaseRV;
+  case Intrinsic::objc_copyWeak:
+    return ARCInstKind::CopyWeak;
+  case Intrinsic::objc_destroyWeak:
+    return ARCInstKind::DestroyWeak;
+  case Intrinsic::objc_initWeak:
+    return ARCInstKind::InitWeak;
+  case Intrinsic::objc_loadWeak:
+    return ARCInstKind::LoadWeak;
+  case Intrinsic::objc_loadWeakRetained:
+    return ARCInstKind::LoadWeakRetained;
+  case Intrinsic::objc_moveWeak:
+    return ARCInstKind::MoveWeak;
+  case Intrinsic::objc_release:
+    return ARCInstKind::Release;
+  case Intrinsic::objc_retain:
+    return ARCInstKind::Retain;
+  case Intrinsic::objc_retainAutorelease:
+    return ARCInstKind::FusedRetainAutorelease;
+  case Intrinsic::objc_retainAutoreleaseReturnValue:
+    return ARCInstKind::FusedRetainAutoreleaseRV;
+  case Intrinsic::objc_retainAutoreleasedReturnValue:
+    return ARCInstKind::RetainRV;
+  case Intrinsic::objc_retainBlock:
+    return ARCInstKind::RetainBlock;
+  case Intrinsic::objc_storeStrong:
+    return ARCInstKind::StoreStrong;
+  case Intrinsic::objc_storeWeak:
+    return ARCInstKind::StoreWeak;
+  case Intrinsic::objc_clang_arc_use:
+    return ARCInstKind::IntrinsicUser;
+  case Intrinsic::objc_unsafeClaimAutoreleasedReturnValue:
+    return ARCInstKind::ClaimRV;
+  case Intrinsic::objc_retainedObject:
+    return ARCInstKind::NoopCast;
+  case Intrinsic::objc_unretainedObject:
+    return ARCInstKind::NoopCast;
+  case Intrinsic::objc_unretainedPointer:
+    return ARCInstKind::NoopCast;
+  case Intrinsic::objc_retain_autorelease:
+    return ARCInstKind::FusedRetainAutorelease;
+  case Intrinsic::objc_sync_enter:
+    return ARCInstKind::User;
+  case Intrinsic::objc_sync_exit:
+    return ARCInstKind::User;
+  case Intrinsic::objc_arc_annotation_topdown_bbstart:
+  case Intrinsic::objc_arc_annotation_topdown_bbend:
+  case Intrinsic::objc_arc_annotation_bottomup_bbstart:
+  case Intrinsic::objc_arc_annotation_bottomup_bbend:
+    // Ignore annotation calls. This is important to stop the
+    // optimizer from treating annotations as uses which would
+    // make the state of the pointers they are attempting to
+    // elucidate to be incorrect.
+    return ARCInstKind::None;
   }
-
-  // Two arguments, first is i8**.
-  const Argument *A1 = &*AI++;
-  if (AI == AE)
-    if (PointerType *PTy = dyn_cast<PointerType>(A0->getType()))
-      if (PointerType *Pte = dyn_cast<PointerType>(PTy->getElementType()))
-        if (Pte->getElementType()->isIntegerTy(8))
-          if (PointerType *PTy1 = dyn_cast<PointerType>(A1->getType())) {
-            Type *ETy1 = PTy1->getElementType();
-            // Second argument is i8*
-            if (ETy1->isIntegerTy(8))
-              return StringSwitch<ARCInstKind>(F->getName())
-                  .Case("objc_storeWeak", ARCInstKind::StoreWeak)
-                  .Case("objc_initWeak", ARCInstKind::InitWeak)
-                  .Case("objc_storeStrong", ARCInstKind::StoreStrong)
-                  .Default(ARCInstKind::CallOrUser);
-            // Second argument is i8**.
-            if (PointerType *Pte1 = dyn_cast<PointerType>(ETy1))
-              if (Pte1->getElementType()->isIntegerTy(8))
-                return StringSwitch<ARCInstKind>(F->getName())
-                    .Case("objc_moveWeak", ARCInstKind::MoveWeak)
-                    .Case("objc_copyWeak", ARCInstKind::CopyWeak)
-                    // Ignore annotation calls. This is important to stop the
-                    // optimizer from treating annotations as uses which would
-                    // make the state of the pointers they are attempting to
-                    // elucidate to be incorrect.
-                    .Case("llvm.arc.annotation.topdown.bbstart",
-                          ARCInstKind::None)
-                    .Case("llvm.arc.annotation.topdown.bbend",
-                          ARCInstKind::None)
-                    .Case("llvm.arc.annotation.bottomup.bbstart",
-                          ARCInstKind::None)
-                    .Case("llvm.arc.annotation.bottomup.bbend",
-                          ARCInstKind::None)
-                    .Default(ARCInstKind::CallOrUser);
-          }
-
-  // Anything else.
-  return ARCInstKind::CallOrUser;
 }
 
 // A whitelist of intrinsics that we know do not use objc pointers or decrement
diff --git a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
index 61f6254..289d4f8 100644
--- a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
+++ b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
@@ -27,7 +27,7 @@
   // If either of the memory references is empty, it doesn't matter what the
   // pointer values are. This allows the code below to ignore this special
   // case.
-  if (LocA.Size == 0 || LocB.Size == 0)
+  if (LocA.Size.isZero() || LocB.Size.isZero())
     return NoAlias;
 
   // This is SCEVAAResult. Get the SCEVs!
@@ -82,10 +82,10 @@
   Value *BO = GetBaseValue(BS);
   if ((AO && AO != LocA.Ptr) || (BO && BO != LocB.Ptr))
     if (alias(MemoryLocation(AO ? AO : LocA.Ptr,
-                             AO ? +MemoryLocation::UnknownSize : LocA.Size,
+                             AO ? LocationSize::unknown() : LocA.Size,
                              AO ? AAMDNodes() : LocA.AATags),
               MemoryLocation(BO ? BO : LocB.Ptr,
-                             BO ? +MemoryLocation::UnknownSize : LocB.Size,
+                             BO ? LocationSize::unknown() : LocB.Size,
                              BO ? AAMDNodes() : LocB.AATags)) == NoAlias)
       return NoAlias;
 
diff --git a/lib/Analysis/ScopedNoAliasAA.cpp b/lib/Analysis/ScopedNoAliasAA.cpp
index f12275a..9a581fe 100644
--- a/lib/Analysis/ScopedNoAliasAA.cpp
+++ b/lib/Analysis/ScopedNoAliasAA.cpp
@@ -95,39 +95,36 @@
   return AAResultBase::alias(LocA, LocB);
 }
 
-ModRefInfo ScopedNoAliasAAResult::getModRefInfo(ImmutableCallSite CS,
+ModRefInfo ScopedNoAliasAAResult::getModRefInfo(const CallBase *Call,
                                                 const MemoryLocation &Loc) {
   if (!EnableScopedNoAlias)
-    return AAResultBase::getModRefInfo(CS, Loc);
+    return AAResultBase::getModRefInfo(Call, Loc);
 
-  if (!mayAliasInScopes(Loc.AATags.Scope, CS.getInstruction()->getMetadata(
-                                              LLVMContext::MD_noalias)))
+  if (!mayAliasInScopes(Loc.AATags.Scope,
+                        Call->getMetadata(LLVMContext::MD_noalias)))
     return ModRefInfo::NoModRef;
 
-  if (!mayAliasInScopes(
-          CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
-          Loc.AATags.NoAlias))
+  if (!mayAliasInScopes(Call->getMetadata(LLVMContext::MD_alias_scope),
+                        Loc.AATags.NoAlias))
     return ModRefInfo::NoModRef;
 
-  return AAResultBase::getModRefInfo(CS, Loc);
+  return AAResultBase::getModRefInfo(Call, Loc);
 }
 
-ModRefInfo ScopedNoAliasAAResult::getModRefInfo(ImmutableCallSite CS1,
-                                                ImmutableCallSite CS2) {
+ModRefInfo ScopedNoAliasAAResult::getModRefInfo(const CallBase *Call1,
+                                                const CallBase *Call2) {
   if (!EnableScopedNoAlias)
-    return AAResultBase::getModRefInfo(CS1, CS2);
+    return AAResultBase::getModRefInfo(Call1, Call2);
 
-  if (!mayAliasInScopes(
-          CS1.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
-          CS2.getInstruction()->getMetadata(LLVMContext::MD_noalias)))
+  if (!mayAliasInScopes(Call1->getMetadata(LLVMContext::MD_alias_scope),
+                        Call2->getMetadata(LLVMContext::MD_noalias)))
     return ModRefInfo::NoModRef;
 
-  if (!mayAliasInScopes(
-          CS2.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
-          CS1.getInstruction()->getMetadata(LLVMContext::MD_noalias)))
+  if (!mayAliasInScopes(Call2->getMetadata(LLVMContext::MD_alias_scope),
+                        Call1->getMetadata(LLVMContext::MD_noalias)))
     return ModRefInfo::NoModRef;
 
-  return AAResultBase::getModRefInfo(CS1, CS2);
+  return AAResultBase::getModRefInfo(Call1, Call2);
 }
 
 static void collectMDInDomain(const MDNode *List, const MDNode *Domain,
diff --git a/lib/Analysis/StackSafetyAnalysis.cpp b/lib/Analysis/StackSafetyAnalysis.cpp
index 49d9b3f..66b0384 100644
--- a/lib/Analysis/StackSafetyAnalysis.cpp
+++ b/lib/Analysis/StackSafetyAnalysis.cpp
@@ -323,11 +323,8 @@
       case Instruction::Invoke: {
         ImmutableCallSite CS(I);
 
-        if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
-          if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
-              II->getIntrinsicID() == Intrinsic::lifetime_end)
-            break;
-        }
+        if (I->isLifetimeStartOrEnd())
+          break;
 
         if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
           US.updateRange(getMemIntrinsicAccessRange(MI, UI, Ptr));
diff --git a/lib/Analysis/SyntheticCountsUtils.cpp b/lib/Analysis/SyntheticCountsUtils.cpp
index 386396b..c2d7bb1 100644
--- a/lib/Analysis/SyntheticCountsUtils.cpp
+++ b/lib/Analysis/SyntheticCountsUtils.cpp
@@ -26,8 +26,7 @@
 // Given an SCC, propagate entry counts along the edge of the SCC nodes.
 template <typename CallGraphType>
 void SyntheticCountsUtils<CallGraphType>::propagateFromSCC(
-    const SccTy &SCC, GetRelBBFreqTy GetRelBBFreq, GetCountTy GetCount,
-    AddCountTy AddCount) {
+    const SccTy &SCC, GetProfCountTy GetProfCount, AddCountTy AddCount) {
 
   DenseSet<NodeRef> SCCNodes;
   SmallVector<std::pair<NodeRef, EdgeRef>, 8> SCCEdges, NonSCCEdges;
@@ -54,17 +53,13 @@
   // This ensures that the order of
   // traversal of nodes within the SCC doesn't affect the final result.
 
-  DenseMap<NodeRef, uint64_t> AdditionalCounts;
+  DenseMap<NodeRef, Scaled64> AdditionalCounts;
   for (auto &E : SCCEdges) {
-    auto OptRelFreq = GetRelBBFreq(E.second);
-    if (!OptRelFreq)
+    auto OptProfCount = GetProfCount(E.first, E.second);
+    if (!OptProfCount)
       continue;
-    Scaled64 RelFreq = OptRelFreq.getValue();
-    auto Caller = E.first;
     auto Callee = CGT::edge_dest(E.second);
-    RelFreq *= Scaled64(GetCount(Caller), 0);
-    uint64_t AdditionalCount = RelFreq.toInt<uint64_t>();
-    AdditionalCounts[Callee] += AdditionalCount;
+    AdditionalCounts[Callee] += OptProfCount.getValue();
   }
 
   // Update the counts for the nodes in the SCC.
@@ -73,14 +68,11 @@
 
   // Now update the counts for nodes outside the SCC.
   for (auto &E : NonSCCEdges) {
-    auto OptRelFreq = GetRelBBFreq(E.second);
-    if (!OptRelFreq)
+    auto OptProfCount = GetProfCount(E.first, E.second);
+    if (!OptProfCount)
       continue;
-    Scaled64 RelFreq = OptRelFreq.getValue();
-    auto Caller = E.first;
     auto Callee = CGT::edge_dest(E.second);
-    RelFreq *= Scaled64(GetCount(Caller), 0);
-    AddCount(Callee, RelFreq.toInt<uint64_t>());
+    AddCount(Callee, OptProfCount.getValue());
   }
 }
 
@@ -94,8 +86,7 @@
 
 template <typename CallGraphType>
 void SyntheticCountsUtils<CallGraphType>::propagate(const CallGraphType &CG,
-                                                    GetRelBBFreqTy GetRelBBFreq,
-                                                    GetCountTy GetCount,
+                                                    GetProfCountTy GetProfCount,
                                                     AddCountTy AddCount) {
   std::vector<SccTy> SCCs;
 
@@ -107,7 +98,7 @@
   // The scc iterator returns the scc in bottom-up order, so reverse the SCCs
   // and call propagateFromSCC.
   for (auto &SCC : reverse(SCCs))
-    propagateFromSCC(SCC, GetRelBBFreq, GetCount, AddCount);
+    propagateFromSCC(SCC, GetProfCount, AddCount);
 }
 
 template class llvm::SyntheticCountsUtils<const CallGraph *>;
diff --git a/lib/Analysis/TargetTransformInfo.cpp b/lib/Analysis/TargetTransformInfo.cpp
index 79fe6dc..9151d46 100644
--- a/lib/Analysis/TargetTransformInfo.cpp
+++ b/lib/Analysis/TargetTransformInfo.cpp
@@ -625,6 +625,12 @@
   return TTIImpl->areInlineCompatible(Caller, Callee);
 }
 
+bool TargetTransformInfo::areFunctionArgsABICompatible(
+    const Function *Caller, const Function *Callee,
+    SmallPtrSetImpl<Argument *> &Args) const {
+  return TTIImpl->areFunctionArgsABICompatible(Caller, Callee, Args);
+}
+
 bool TargetTransformInfo::isIndexedLoadLegal(MemIndexedMode Mode,
                                              Type *Ty) const {
   return TTIImpl->isIndexedLoadLegal(Mode, Ty);
diff --git a/lib/Analysis/TypeBasedAliasAnalysis.cpp b/lib/Analysis/TypeBasedAliasAnalysis.cpp
index 25a154e..83974da 100644
--- a/lib/Analysis/TypeBasedAliasAnalysis.cpp
+++ b/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -399,20 +399,20 @@
 }
 
 FunctionModRefBehavior
-TypeBasedAAResult::getModRefBehavior(ImmutableCallSite CS) {
+TypeBasedAAResult::getModRefBehavior(const CallBase *Call) {
   if (!EnableTBAA)
-    return AAResultBase::getModRefBehavior(CS);
+    return AAResultBase::getModRefBehavior(Call);
 
   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
 
   // If this is an "immutable" type, we can assume the call doesn't write
   // to memory.
-  if (const MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
+  if (const MDNode *M = Call->getMetadata(LLVMContext::MD_tbaa))
     if ((!isStructPathTBAA(M) && TBAANode(M).isTypeImmutable()) ||
         (isStructPathTBAA(M) && TBAAStructTagNode(M).isTypeImmutable()))
       Min = FMRB_OnlyReadsMemory;
 
-  return FunctionModRefBehavior(AAResultBase::getModRefBehavior(CS) & Min);
+  return FunctionModRefBehavior(AAResultBase::getModRefBehavior(Call) & Min);
 }
 
 FunctionModRefBehavior TypeBasedAAResult::getModRefBehavior(const Function *F) {
@@ -420,33 +420,30 @@
   return AAResultBase::getModRefBehavior(F);
 }
 
-ModRefInfo TypeBasedAAResult::getModRefInfo(ImmutableCallSite CS,
+ModRefInfo TypeBasedAAResult::getModRefInfo(const CallBase *Call,
                                             const MemoryLocation &Loc) {
   if (!EnableTBAA)
-    return AAResultBase::getModRefInfo(CS, Loc);
+    return AAResultBase::getModRefInfo(Call, Loc);
 
   if (const MDNode *L = Loc.AATags.TBAA)
-    if (const MDNode *M =
-            CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
+    if (const MDNode *M = Call->getMetadata(LLVMContext::MD_tbaa))
       if (!Aliases(L, M))
         return ModRefInfo::NoModRef;
 
-  return AAResultBase::getModRefInfo(CS, Loc);
+  return AAResultBase::getModRefInfo(Call, Loc);
 }
 
-ModRefInfo TypeBasedAAResult::getModRefInfo(ImmutableCallSite CS1,
-                                            ImmutableCallSite CS2) {
+ModRefInfo TypeBasedAAResult::getModRefInfo(const CallBase *Call1,
+                                            const CallBase *Call2) {
   if (!EnableTBAA)
-    return AAResultBase::getModRefInfo(CS1, CS2);
+    return AAResultBase::getModRefInfo(Call1, Call2);
 
-  if (const MDNode *M1 =
-          CS1.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
-    if (const MDNode *M2 =
-            CS2.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
+  if (const MDNode *M1 = Call1->getMetadata(LLVMContext::MD_tbaa))
+    if (const MDNode *M2 = Call2->getMetadata(LLVMContext::MD_tbaa))
       if (!Aliases(M1, M2))
         return ModRefInfo::NoModRef;
 
-  return AAResultBase::getModRefInfo(CS1, CS2);
+  return AAResultBase::getModRefInfo(Call1, Call2);
 }
 
 bool MDNode::isTBAAVtableAccess() const {
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 6f01960..0446426 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -2023,10 +2023,10 @@
       if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
         return true;
 
-    if (auto CS = ImmutableCallSite(V)) {
-      if (CS.isReturnNonNull())
+    if (const auto *Call = dyn_cast<CallBase>(V)) {
+      if (Call->isReturnNonNull())
         return true;
-      if (const auto *RP = getArgumentAliasingToReturnedPointer(CS))
+      if (const auto *RP = getArgumentAliasingToReturnedPointer(Call))
         return isKnownNonZero(RP, Depth, Q);
     }
   }
@@ -3389,7 +3389,14 @@
       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
         break;
 
-      ByteOffset += GEPOffset.getSExtValue();
+      APInt OrigByteOffset(ByteOffset);
+      ByteOffset += GEPOffset.sextOrTrunc(ByteOffset.getBitWidth());
+      if (ByteOffset.getMinSignedBits() > 64) {
+        // Stop traversal if the pointer offset wouldn't fit into int64_t
+        // (this should be removed if Offset is updated to an APInt)
+        ByteOffset = OrigByteOffset;
+        break;
+      }
 
       Ptr = GEP->getPointerOperand();
     } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
@@ -3617,21 +3624,21 @@
   return Len == ~0ULL ? 1 : Len;
 }
 
-const Value *llvm::getArgumentAliasingToReturnedPointer(ImmutableCallSite CS) {
-  assert(CS &&
-         "getArgumentAliasingToReturnedPointer only works on nonnull CallSite");
-  if (const Value *RV = CS.getReturnedArgOperand())
+const Value *llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call) {
+  assert(Call &&
+         "getArgumentAliasingToReturnedPointer only works on nonnull calls");
+  if (const Value *RV = Call->getReturnedArgOperand())
     return RV;
   // This can be used only as a aliasing property.
-  if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CS))
-    return CS.getArgOperand(0);
+  if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call))
+    return Call->getArgOperand(0);
   return nullptr;
 }
 
 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
-    ImmutableCallSite CS) {
-  return CS.getIntrinsicID() == Intrinsic::launder_invariant_group ||
-         CS.getIntrinsicID() == Intrinsic::strip_invariant_group;
+    const CallBase *Call) {
+  return Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
+         Call->getIntrinsicID() == Intrinsic::strip_invariant_group;
 }
 
 /// \p PN defines a loop-variant pointer to an object.  Check if the
@@ -3679,7 +3686,7 @@
       // An alloca can't be further simplified.
       return V;
     } else {
-      if (auto CS = CallSite(V)) {
+      if (auto *Call = dyn_cast<CallBase>(V)) {
         // CaptureTracking can know about special capturing properties of some
         // intrinsics like launder.invariant.group, that can't be expressed with
         // the attributes, but have properties like returning aliasing pointer.
@@ -3689,7 +3696,7 @@
         // because it should be in sync with CaptureTracking. Not using it may
         // cause weird miscompilations where 2 aliasing pointers are assumed to
         // noalias.
-        if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
+        if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) {
           V = RP;
           continue;
         }
@@ -3822,8 +3829,7 @@
     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
     if (!II) return false;
 
-    if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
-        II->getIntrinsicID() != Intrinsic::lifetime_end)
+    if (!II->isLifetimeStartOrEnd())
       return false;
   }
   return true;
@@ -5236,21 +5242,16 @@
   return IsMatchingOps || IsSwappedOps;
 }
 
-/// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
-/// true.  Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
-/// BRHS" is false.  Otherwise, return None if we can't infer anything.
+/// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
+/// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
+/// Otherwise, return None if we can't infer anything.
 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
-                                                    const Value *ALHS,
-                                                    const Value *ARHS,
                                                     CmpInst::Predicate BPred,
-                                                    const Value *BLHS,
-                                                    const Value *BRHS,
-                                                    bool IsSwappedOps) {
-  // Canonicalize the operands so they're matching.
-  if (IsSwappedOps) {
-    std::swap(BLHS, BRHS);
+                                                    bool AreSwappedOps) {
+  // Canonicalize the predicate as if the operands were not commuted.
+  if (AreSwappedOps)
     BPred = ICmpInst::getSwappedPredicate(BPred);
-  }
+
   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
     return true;
   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
@@ -5259,15 +5260,14 @@
   return None;
 }
 
-/// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
-/// true.  Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
-/// C2" is false.  Otherwise, return None if we can't infer anything.
+/// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
+/// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
+/// Otherwise, return None if we can't infer anything.
 static Optional<bool>
-isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
+isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
                                  const ConstantInt *C1,
                                  CmpInst::Predicate BPred,
-                                 const Value *BLHS, const ConstantInt *C2) {
-  assert(ALHS == BLHS && "LHS operands must match.");
+                                 const ConstantInt *C2) {
   ConstantRange DomCR =
       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
   ConstantRange CR =
@@ -5299,10 +5299,10 @@
   ICmpInst::Predicate BPred = RHS->getPredicate();
 
   // Can we infer anything when the two compares have matching operands?
-  bool IsSwappedOps;
-  if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
+  bool AreSwappedOps;
+  if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
-            APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
+            APred, BPred, AreSwappedOps))
       return Implication;
     // No amount of additional analysis will infer the second condition, so
     // early exit.
@@ -5313,8 +5313,7 @@
   // constants (not necessarily matching)?
   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
-            APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
-            cast<ConstantInt>(BRHS)))
+            APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
       return Implication;
     // No amount of additional analysis will infer the second condition, so
     // early exit.
diff --git a/lib/Analysis/VectorUtils.cpp b/lib/Analysis/VectorUtils.cpp
index 9ebb016..5656a19 100644
--- a/lib/Analysis/VectorUtils.cpp
+++ b/lib/Analysis/VectorUtils.cpp
@@ -74,6 +74,10 @@
   case Intrinsic::fmuladd:
   case Intrinsic::powi:
   case Intrinsic::canonicalize:
+  case Intrinsic::sadd_sat:
+  case Intrinsic::ssub_sat:
+  case Intrinsic::uadd_sat:
+  case Intrinsic::usub_sat:
     return true;
   default:
     return false;
@@ -464,16 +468,100 @@
   return MinBWs;
 }
 
+/// Add all access groups in @p AccGroups to @p List.
+template <typename ListT>
+static void addToAccessGroupList(ListT &List, MDNode *AccGroups) {
+  // Interpret an access group as a list containing itself.
+  if (AccGroups->getNumOperands() == 0) {
+    assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group");
+    List.insert(AccGroups);
+    return;
+  }
+
+  for (auto &AccGroupListOp : AccGroups->operands()) {
+    auto *Item = cast<MDNode>(AccGroupListOp.get());
+    assert(isValidAsAccessGroup(Item) && "List item must be an access group");
+    List.insert(Item);
+  }
+}
+
+MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) {
+  if (!AccGroups1)
+    return AccGroups2;
+  if (!AccGroups2)
+    return AccGroups1;
+  if (AccGroups1 == AccGroups2)
+    return AccGroups1;
+
+  SmallSetVector<Metadata *, 4> Union;
+  addToAccessGroupList(Union, AccGroups1);
+  addToAccessGroupList(Union, AccGroups2);
+
+  if (Union.size() == 0)
+    return nullptr;
+  if (Union.size() == 1)
+    return cast<MDNode>(Union.front());
+
+  LLVMContext &Ctx = AccGroups1->getContext();
+  return MDNode::get(Ctx, Union.getArrayRef());
+}
+
+MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
+                                    const Instruction *Inst2) {
+  bool MayAccessMem1 = Inst1->mayReadOrWriteMemory();
+  bool MayAccessMem2 = Inst2->mayReadOrWriteMemory();
+
+  if (!MayAccessMem1 && !MayAccessMem2)
+    return nullptr;
+  if (!MayAccessMem1)
+    return Inst2->getMetadata(LLVMContext::MD_access_group);
+  if (!MayAccessMem2)
+    return Inst1->getMetadata(LLVMContext::MD_access_group);
+
+  MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group);
+  MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group);
+  if (!MD1 || !MD2)
+    return nullptr;
+  if (MD1 == MD2)
+    return MD1;
+
+  // Use set for scalable 'contains' check.
+  SmallPtrSet<Metadata *, 4> AccGroupSet2;
+  addToAccessGroupList(AccGroupSet2, MD2);
+
+  SmallVector<Metadata *, 4> Intersection;
+  if (MD1->getNumOperands() == 0) {
+    assert(isValidAsAccessGroup(MD1) && "Node must be an access group");
+    if (AccGroupSet2.count(MD1))
+      Intersection.push_back(MD1);
+  } else {
+    for (const MDOperand &Node : MD1->operands()) {
+      auto *Item = cast<MDNode>(Node.get());
+      assert(isValidAsAccessGroup(Item) && "List item must be an access group");
+      if (AccGroupSet2.count(Item))
+        Intersection.push_back(Item);
+    }
+  }
+
+  if (Intersection.size() == 0)
+    return nullptr;
+  if (Intersection.size() == 1)
+    return cast<MDNode>(Intersection.front());
+
+  LLVMContext &Ctx = Inst1->getContext();
+  return MDNode::get(Ctx, Intersection);
+}
+
 /// \returns \p I after propagating metadata from \p VL.
 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
   Instruction *I0 = cast<Instruction>(VL[0]);
   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
   I0->getAllMetadataOtherThanDebugLoc(Metadata);
 
-  for (auto Kind :
-       {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
-        LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
-        LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load}) {
+  for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
+                    LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
+                    LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load,
+                    LLVMContext::MD_access_group}) {
     MDNode *MD = I0->getMetadata(Kind);
 
     for (int J = 1, E = VL.size(); MD && J != E; ++J) {
@@ -494,6 +582,9 @@
       case LLVMContext::MD_invariant_load:
         MD = MDNode::intersect(MD, IMD);
         break;
+      case LLVMContext::MD_access_group:
+        MD = intersectAccessGroups(Inst, IJ);
+        break;
       default:
         llvm_unreachable("unhandled metadata");
       }
diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp
index f887372..ee63450 100644
--- a/lib/AsmParser/LLParser.cpp
+++ b/lib/AsmParser/LLParser.cpp
@@ -7519,7 +7519,7 @@
   return false;
 }
 
-auto FwdVIRef = (GlobalValueSummaryMapTy::value_type *)-8;
+static const auto FwdVIRef = (GlobalValueSummaryMapTy::value_type *)-8;
 
 static void resolveFwdRef(ValueInfo *Fwd, ValueInfo &Resolved) {
   bool ReadOnly = Fwd->isReadOnly();
diff --git a/lib/BinaryFormat/Dwarf.cpp b/lib/BinaryFormat/Dwarf.cpp
index 5984de7..46f8056 100644
--- a/lib/BinaryFormat/Dwarf.cpp
+++ b/lib/BinaryFormat/Dwarf.cpp
@@ -13,6 +13,7 @@
 
 #include "llvm/BinaryFormat/Dwarf.h"
 #include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
 #include "llvm/Support/ErrorHandling.h"
 
 using namespace llvm;
@@ -300,7 +301,7 @@
   switch (Language) {
   default:
     return StringRef();
-#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR)                              \
+#define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR)                 \
   case DW_LANG_##NAME:                                                         \
     return "DW_LANG_" #NAME;
 #include "llvm/BinaryFormat/Dwarf.def"
@@ -309,7 +310,7 @@
 
 unsigned llvm::dwarf::getLanguage(StringRef LanguageString) {
   return StringSwitch<unsigned>(LanguageString)
-#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR)                              \
+#define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR)                 \
   .Case("DW_LANG_" #NAME, DW_LANG_##NAME)
 #include "llvm/BinaryFormat/Dwarf.def"
       .Default(0);
@@ -319,7 +320,7 @@
   switch (Lang) {
   default:
     return 0;
-#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR)                              \
+#define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR)                 \
   case DW_LANG_##NAME:                                                         \
     return VERSION;
 #include "llvm/BinaryFormat/Dwarf.def"
@@ -330,13 +331,24 @@
   switch (Lang) {
   default:
     return 0;
-#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR)                              \
+#define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR)                 \
   case DW_LANG_##NAME:                                                         \
     return DWARF_VENDOR_##VENDOR;
 #include "llvm/BinaryFormat/Dwarf.def"
   }
 }
 
+Optional<unsigned> llvm::dwarf::LanguageLowerBound(dwarf::SourceLanguage Lang) {
+  switch (Lang) {
+  default:
+    return None;
+#define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR)                 \
+  case DW_LANG_##NAME:                                                         \
+    return LOWER_BOUND;
+#include "llvm/BinaryFormat/Dwarf.def"
+  }
+}
+
 StringRef llvm::dwarf::CaseString(unsigned Case) {
   switch (Case) {
   case DW_ID_case_sensitive:
@@ -455,14 +467,32 @@
   }
 }
 
-StringRef llvm::dwarf::CallFrameString(unsigned Encoding) {
+StringRef llvm::dwarf::CallFrameString(unsigned Encoding,
+    Triple::ArchType Arch) {
+  assert(Arch != llvm::Triple::ArchType::UnknownArch);
+#define SELECT_AARCH64 (Arch == llvm::Triple::aarch64_be || Arch == llvm::Triple::aarch64)
+#define SELECT_MIPS64 Arch == llvm::Triple::mips64
+#define SELECT_SPARC (Arch == llvm::Triple::sparc || Arch == llvm::Triple::sparcv9)
+#define SELECT_X86 (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64)
+#define HANDLE_DW_CFA(ID, NAME)
+#define HANDLE_DW_CFA_PRED(ID, NAME, PRED) \
+  if (ID == Encoding && PRED) \
+    return "DW_CFA_" #NAME;
+#include "llvm/BinaryFormat/Dwarf.def"
+
   switch (Encoding) {
   default:
     return StringRef();
+#define HANDLE_DW_CFA_PRED(ID, NAME, PRED)
 #define HANDLE_DW_CFA(ID, NAME)                                                \
   case DW_CFA_##NAME:                                                          \
     return "DW_CFA_" #NAME;
 #include "llvm/BinaryFormat/Dwarf.def"
+
+#undef SELECT_X86
+#undef SELECT_SPARC
+#undef SELECT_MIPS64
+#undef SELECT_AARCH64
   }
 }
 
diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp
index 846ce3a..fe051e7 100644
--- a/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -3720,16 +3720,16 @@
           return error("EXTRACTVAL: Invalid type");
         if ((unsigned)Index != Index)
           return error("Invalid value");
-        if (IsStruct && Index >= CurTy->subtypes().size())
+        if (IsStruct && Index >= CurTy->getStructNumElements())
           return error("EXTRACTVAL: Invalid struct index");
         if (IsArray && Index >= CurTy->getArrayNumElements())
           return error("EXTRACTVAL: Invalid array index");
         EXTRACTVALIdx.push_back((unsigned)Index);
 
         if (IsStruct)
-          CurTy = CurTy->subtypes()[Index];
+          CurTy = CurTy->getStructElementType(Index);
         else
-          CurTy = CurTy->subtypes()[0];
+          CurTy = CurTy->getArrayElementType();
       }
 
       I = ExtractValueInst::Create(Agg, EXTRACTVALIdx);
@@ -3762,16 +3762,16 @@
           return error("INSERTVAL: Invalid type");
         if ((unsigned)Index != Index)
           return error("Invalid value");
-        if (IsStruct && Index >= CurTy->subtypes().size())
+        if (IsStruct && Index >= CurTy->getStructNumElements())
           return error("INSERTVAL: Invalid struct index");
         if (IsArray && Index >= CurTy->getArrayNumElements())
           return error("INSERTVAL: Invalid array index");
 
         INSERTVALIdx.push_back((unsigned)Index);
         if (IsStruct)
-          CurTy = CurTy->subtypes()[Index];
+          CurTy = CurTy->getStructElementType(Index);
         else
-          CurTy = CurTy->subtypes()[0];
+          CurTy = CurTy->getArrayElementType();
       }
 
       if (CurTy != Val->getType())
@@ -5294,18 +5294,30 @@
       break;
     case bitc::FS_FLAGS: {  // [flags]
       uint64_t Flags = Record[0];
-      // Scan flags (set only on the combined index).
-      assert(Flags <= 0x3 && "Unexpected bits in flag");
+      // Scan flags.
+      assert(Flags <= 0x1f && "Unexpected bits in flag");
 
       // 1 bit: WithGlobalValueDeadStripping flag.
+      // Set on combined index only.
       if (Flags & 0x1)
         TheIndex.setWithGlobalValueDeadStripping();
       // 1 bit: SkipModuleByDistributedBackend flag.
+      // Set on combined index only.
       if (Flags & 0x2)
         TheIndex.setSkipModuleByDistributedBackend();
       // 1 bit: HasSyntheticEntryCounts flag.
+      // Set on combined index only.
       if (Flags & 0x4)
         TheIndex.setHasSyntheticEntryCounts();
+      // 1 bit: DisableSplitLTOUnit flag.
+      // Set on per module indexes. It is up to the client to validate
+      // the consistency of this flag across modules being linked.
+      if (Flags & 0x8)
+        TheIndex.setEnableSplitLTOUnit();
+      // 1 bit: PartiallySplitLTOUnits flag.
+      // Set on combined index only.
+      if (Flags & 0x10)
+        TheIndex.setPartiallySplitLTOUnits();
       break;
     }
     case bitc::FS_VALUE_GUID: { // [valueid, refguid]
@@ -5917,6 +5929,46 @@
   return std::move(Index);
 }
 
+static Expected<bool> getEnableSplitLTOUnitFlag(BitstreamCursor &Stream,
+                                                unsigned ID) {
+  if (Stream.EnterSubBlock(ID))
+    return error("Invalid record");
+  SmallVector<uint64_t, 64> Record;
+
+  while (true) {
+    BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::SubBlock: // Handled for us already.
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      // If no flags record found, conservatively return true to mimic
+      // behavior before this flag was added.
+      return true;
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Look for the FS_FLAGS record.
+    Record.clear();
+    auto BitCode = Stream.readRecord(Entry.ID, Record);
+    switch (BitCode) {
+    default: // Default behavior: ignore.
+      break;
+    case bitc::FS_FLAGS: { // [flags]
+      uint64_t Flags = Record[0];
+      // Scan flags.
+      assert(Flags <= 0x1f && "Unexpected bits in flag");
+
+      return Flags & 0x8;
+    }
+    }
+  }
+  llvm_unreachable("Exit infinite loop");
+}
+
 // Check if the given bitcode buffer contains a global value summary block.
 Expected<BitcodeLTOInfo> BitcodeModule::getLTOInfo() {
   BitstreamCursor Stream(Buffer);
@@ -5932,14 +5984,27 @@
     case BitstreamEntry::Error:
       return error("Malformed block");
     case BitstreamEntry::EndBlock:
-      return BitcodeLTOInfo{/*IsThinLTO=*/false, /*HasSummary=*/false};
+      return BitcodeLTOInfo{/*IsThinLTO=*/false, /*HasSummary=*/false,
+                            /*EnableSplitLTOUnit=*/false};
 
     case BitstreamEntry::SubBlock:
-      if (Entry.ID == bitc::GLOBALVAL_SUMMARY_BLOCK_ID)
-        return BitcodeLTOInfo{/*IsThinLTO=*/true, /*HasSummary=*/true};
+      if (Entry.ID == bitc::GLOBALVAL_SUMMARY_BLOCK_ID) {
+        Expected<bool> EnableSplitLTOUnit =
+            getEnableSplitLTOUnitFlag(Stream, Entry.ID);
+        if (!EnableSplitLTOUnit)
+          return EnableSplitLTOUnit.takeError();
+        return BitcodeLTOInfo{/*IsThinLTO=*/true, /*HasSummary=*/true,
+                              *EnableSplitLTOUnit};
+      }
 
-      if (Entry.ID == bitc::FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID)
-        return BitcodeLTOInfo{/*IsThinLTO=*/false, /*HasSummary=*/true};
+      if (Entry.ID == bitc::FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID) {
+        Expected<bool> EnableSplitLTOUnit =
+            getEnableSplitLTOUnitFlag(Stream, Entry.ID);
+        if (!EnableSplitLTOUnit)
+          return EnableSplitLTOUnit.takeError();
+        return BitcodeLTOInfo{/*IsThinLTO=*/false, /*HasSummary=*/true,
+                              *EnableSplitLTOUnit};
+      }
 
       // Ignore other sub-blocks.
       if (Stream.SkipBlock())
diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp
index 68d79ed..ba4f932 100644
--- a/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -3618,6 +3618,13 @@
 
   Stream.EmitRecord(bitc::FS_VERSION, ArrayRef<uint64_t>{INDEX_VERSION});
 
+  // Write the index flags.
+  uint64_t Flags = 0;
+  // Bits 1-3 are set only in the combined index, skip them.
+  if (Index->enableSplitLTOUnit())
+    Flags |= 0x8;
+  Stream.EmitRecord(bitc::FS_FLAGS, ArrayRef<uint64_t>{Flags});
+
   if (Index->begin() == Index->end()) {
     Stream.ExitBlock();
     return;
@@ -3734,6 +3741,10 @@
     Flags |= 0x2;
   if (Index.hasSyntheticEntryCounts())
     Flags |= 0x4;
+  if (Index.enableSplitLTOUnit())
+    Flags |= 0x8;
+  if (Index.partiallySplitLTOUnits())
+    Flags |= 0x10;
   Stream.EmitRecord(bitc::FS_FLAGS, ArrayRef<uint64_t>{Flags});
 
   for (const auto &GVI : valueIds()) {
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
index d45dc7c..f25c7a0 100644
--- a/lib/CMakeLists.txt
+++ b/lib/CMakeLists.txt
@@ -12,6 +12,7 @@
 add_subdirectory(Analysis)
 add_subdirectory(LTO)
 add_subdirectory(MC)
+add_subdirectory(MCA)
 add_subdirectory(Object)
 add_subdirectory(ObjectYAML)
 add_subdirectory(Option)
diff --git a/lib/CodeGen/Analysis.cpp b/lib/CodeGen/Analysis.cpp
index 27dce7f..797f05e 100644
--- a/lib/CodeGen/Analysis.cpp
+++ b/lib/CodeGen/Analysis.cpp
@@ -546,6 +546,21 @@
     CalleeAttrs.removeAttribute(Attribute::SExt);
   }
 
+  // Drop sext and zext return attributes if the result is not used.
+  // This enables tail calls for code like:
+  //
+  // define void @caller() {
+  // entry:
+  //   %unused_result = tail call zeroext i1 @callee()
+  //   br label %retlabel
+  // retlabel:
+  //   ret void
+  // }
+  if (I->use_empty()) {
+    CalleeAttrs.removeAttribute(Attribute::SExt);
+    CalleeAttrs.removeAttribute(Attribute::ZExt);
+  }
+
   // If they're still different, there's some facet we don't understand
   // (currently only "inreg", but in future who knows). It may be OK but the
   // only safe option is to reject the tail call.
diff --git a/lib/CodeGen/AsmPrinter/AddressPool.cpp b/lib/CodeGen/AsmPrinter/AddressPool.cpp
index f8143b9..042243b 100644
--- a/lib/CodeGen/AsmPrinter/AddressPool.cpp
+++ b/lib/CodeGen/AsmPrinter/AddressPool.cpp
@@ -31,9 +31,13 @@
                   + sizeof(uint8_t)  // address_size
                   + sizeof(uint8_t)  // segment_selector_size
                   + AddrSize * Pool.size(); // entries
+  Asm.OutStreamer->AddComment("Length of contribution");
   Asm.emitInt32(Length); // TODO: Support DWARF64 format.
+  Asm.OutStreamer->AddComment("DWARF version number");
   Asm.emitInt16(Asm.getDwarfVersion());
+  Asm.OutStreamer->AddComment("Address size");
   Asm.emitInt8(AddrSize);
+  Asm.OutStreamer->AddComment("Segment selector size");
   Asm.emitInt8(0); // TODO: Support non-zero segment_selector_size.
 }
 
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 6647346..7070451 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -12,7 +12,6 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/CodeGen/AsmPrinter.h"
-#include "AsmPrinterHandler.h"
 #include "CodeViewDebug.h"
 #include "DwarfDebug.h"
 #include "DwarfException.h"
@@ -36,6 +35,7 @@
 #include "llvm/BinaryFormat/COFF.h"
 #include "llvm/BinaryFormat/Dwarf.h"
 #include "llvm/BinaryFormat/ELF.h"
+#include "llvm/CodeGen/AsmPrinterHandler.h"
 #include "llvm/CodeGen/GCMetadata.h"
 #include "llvm/CodeGen/GCMetadataPrinter.h"
 #include "llvm/CodeGen/GCStrategy.h"
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp b/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
index 6055884..afce3ad 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
@@ -212,6 +212,9 @@
   case MCCFIInstruction::OpWindowSave:
     OutStreamer->EmitCFIWindowSave();
     break;
+  case MCCFIInstruction::OpNegateRAState:
+    OutStreamer->EmitCFINegateRAState();
+    break;
   case MCCFIInstruction::OpSameValue:
     OutStreamer->EmitCFISameValue(Inst.getRegister());
     break;
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h b/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h
deleted file mode 100644
index f5ac95a..0000000
--- a/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h
+++ /dev/null
@@ -1,74 +0,0 @@
-//===-- lib/CodeGen/AsmPrinter/AsmPrinterHandler.h -------------*- C++ -*--===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a generic interface for AsmPrinter handlers,
-// like debug and EH info emitters.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_ASMPRINTERHANDLER_H
-#define LLVM_LIB_CODEGEN_ASMPRINTER_ASMPRINTERHANDLER_H
-
-#include "llvm/Support/DataTypes.h"
-
-namespace llvm {
-
-class AsmPrinter;
-class MachineBasicBlock;
-class MachineFunction;
-class MachineInstr;
-class MCSymbol;
-
-typedef MCSymbol *ExceptionSymbolProvider(AsmPrinter *Asm);
-
-/// Collects and handles AsmPrinter objects required to build debug
-/// or EH information.
-class AsmPrinterHandler {
-public:
-  virtual ~AsmPrinterHandler();
-
-  /// For symbols that have a size designated (e.g. common symbols),
-  /// this tracks that size.
-  virtual void setSymbolSize(const MCSymbol *Sym, uint64_t Size) = 0;
-
-  /// Emit all sections that should come after the content.
-  virtual void endModule() = 0;
-
-  /// Gather pre-function debug information.
-  /// Every beginFunction(MF) call should be followed by an endFunction(MF)
-  /// call.
-  virtual void beginFunction(const MachineFunction *MF) = 0;
-
-  // Emit any of function marker (like .cfi_endproc). This is called
-  // before endFunction and cannot switch sections.
-  virtual void markFunctionEnd();
-
-  /// Gather post-function debug information.
-  /// Please note that some AsmPrinter implementations may not call
-  /// beginFunction at all.
-  virtual void endFunction(const MachineFunction *MF) = 0;
-
-  virtual void beginFragment(const MachineBasicBlock *MBB,
-                             ExceptionSymbolProvider ESP) {}
-  virtual void endFragment() {}
-
-  /// Emit target-specific EH funclet machinery.
-  virtual void beginFunclet(const MachineBasicBlock &MBB,
-                            MCSymbol *Sym = nullptr) {}
-  virtual void endFunclet() {}
-
-  /// Process beginning of an instruction.
-  virtual void beginInstruction(const MachineInstr *MI) = 0;
-
-  /// Process end of an instruction.
-  virtual void endInstruction() = 0;
-};
-} // End of namespace llvm
-
-#endif
diff --git a/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
index 03d8777..8cabad4 100644
--- a/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
+++ b/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
@@ -44,6 +44,7 @@
 #include "llvm/DebugInfo/CodeView/CodeView.h"
 #include "llvm/DebugInfo/CodeView/ContinuationRecordBuilder.h"
 #include "llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h"
+#include "llvm/DebugInfo/CodeView/EnumTables.h"
 #include "llvm/DebugInfo/CodeView/Line.h"
 #include "llvm/DebugInfo/CodeView/SymbolRecord.h"
 #include "llvm/DebugInfo/CodeView/TypeDumpVisitor.h"
@@ -123,6 +124,8 @@
   TheCPU =
       mapArchToCVCPUType(Triple(MMI->getModule()->getTargetTriple()).getArch());
 
+  collectGlobalVariableInfo();
+
   // Check if we should emit type record hashes.
   ConstantInt *GH = mdconst::extract_or_null<ConstantInt>(
       MMI->getModule()->getModuleFlag("CodeViewGHash"));
@@ -731,14 +734,7 @@
 }
 
 void CodeViewDebug::emitCompilerInformation() {
-  MCContext &Context = MMI->getContext();
-  MCSymbol *CompilerBegin = Context.createTempSymbol(),
-           *CompilerEnd = Context.createTempSymbol();
-  OS.AddComment("Record length");
-  OS.emitAbsoluteSymbolDiff(CompilerEnd, CompilerBegin, 2);
-  OS.EmitLabel(CompilerBegin);
-  OS.AddComment("Record kind: S_COMPILE3");
-  OS.EmitIntValue(SymbolKind::S_COMPILE3, 2);
+  MCSymbol *CompilerEnd = beginSymbolRecord(SymbolKind::S_COMPILE3);
   uint32_t Flags = 0;
 
   NamedMDNode *CUs = MMI->getModule()->getNamedMetadata("llvm.dbg.cu");
@@ -777,7 +773,7 @@
   OS.AddComment("Null-terminated compiler version string");
   emitNullTerminatedSymbolName(OS, CompilerVersion);
 
-  OS.EmitLabel(CompilerEnd);
+  endSymbolRecord(CompilerEnd);
 }
 
 static TypeIndex getStringIdTypeIdx(GlobalTypeTableBuilder &TypeTable,
@@ -813,14 +809,12 @@
 
   // Make a new .debug$S subsection for the S_BUILDINFO record, which points
   // from the module symbols into the type stream.
-  MCSymbol *BuildInfoEnd = beginCVSubsection(DebugSubsectionKind::Symbols);
-  OS.AddComment("Record length");
-  OS.EmitIntValue(6, 2);
-  OS.AddComment("Record kind: S_BUILDINFO");
-  OS.EmitIntValue(unsigned(SymbolKind::S_BUILDINFO), 2);
+  MCSymbol *BISubsecEnd = beginCVSubsection(DebugSubsectionKind::Symbols);
+  MCSymbol *BIEnd = beginSymbolRecord(SymbolKind::S_BUILDINFO);
   OS.AddComment("LF_BUILDINFO index");
   OS.EmitIntValue(BuildInfoIndex.getIndex(), 4);
-  endCVSubsection(BuildInfoEnd);
+  endSymbolRecord(BIEnd);
+  endCVSubsection(BISubsecEnd);
 }
 
 void CodeViewDebug::emitInlineeLinesSubsection() {
@@ -860,18 +854,11 @@
 void CodeViewDebug::emitInlinedCallSite(const FunctionInfo &FI,
                                         const DILocation *InlinedAt,
                                         const InlineSite &Site) {
-  MCSymbol *InlineBegin = MMI->getContext().createTempSymbol(),
-           *InlineEnd = MMI->getContext().createTempSymbol();
-
   assert(TypeIndices.count({Site.Inlinee, nullptr}));
   TypeIndex InlineeIdx = TypeIndices[{Site.Inlinee, nullptr}];
 
   // SymbolRecord
-  OS.AddComment("Record length");
-  OS.emitAbsoluteSymbolDiff(InlineEnd, InlineBegin, 2);   // RecordLength
-  OS.EmitLabel(InlineBegin);
-  OS.AddComment("Record kind: S_INLINESITE");
-  OS.EmitIntValue(SymbolKind::S_INLINESITE, 2); // RecordKind
+  MCSymbol *InlineEnd = beginSymbolRecord(SymbolKind::S_INLINESITE);
 
   OS.AddComment("PtrParent");
   OS.EmitIntValue(0, 4);
@@ -886,7 +873,7 @@
   OS.EmitCVInlineLinetableDirective(Site.SiteFuncId, FileId, StartLineNum,
                                     FI.Begin, FI.End);
 
-  OS.EmitLabel(InlineEnd);
+  endSymbolRecord(InlineEnd);
 
   emitLocalVariableList(FI, Site.InlinedLocals);
 
@@ -899,10 +886,7 @@
   }
 
   // Close the scope.
-  OS.AddComment("Record length");
-  OS.EmitIntValue(2, 2);                                  // RecordLength
-  OS.AddComment("Record kind: S_INLINESITE_END");
-  OS.EmitIntValue(SymbolKind::S_INLINESITE_END, 2); // RecordKind
+  emitEndSymbolRecord(SymbolKind::S_INLINESITE_END);
 }
 
 void CodeViewDebug::switchToDebugSectionForSymbol(const MCSymbol *GVSym) {
@@ -937,13 +921,7 @@
   MCSymbol *SymbolsEnd = beginCVSubsection(DebugSubsectionKind::Symbols);
 
   // Emit S_THUNK32
-  MCSymbol *ThunkRecordBegin = MMI->getContext().createTempSymbol(),
-           *ThunkRecordEnd   = MMI->getContext().createTempSymbol();
-  OS.AddComment("Record length");
-  OS.emitAbsoluteSymbolDiff(ThunkRecordEnd, ThunkRecordBegin, 2);
-  OS.EmitLabel(ThunkRecordBegin);
-  OS.AddComment("Record kind: S_THUNK32");
-  OS.EmitIntValue(unsigned(SymbolKind::S_THUNK32), 2);
+  MCSymbol *ThunkRecordEnd = beginSymbolRecord(SymbolKind::S_THUNK32);
   OS.AddComment("PtrParent");
   OS.EmitIntValue(0, 4);
   OS.AddComment("PtrEnd");
@@ -961,17 +939,13 @@
   OS.AddComment("Function name");
   emitNullTerminatedSymbolName(OS, FuncName);
   // Additional fields specific to the thunk ordinal would go here.
-  OS.EmitLabel(ThunkRecordEnd);
+  endSymbolRecord(ThunkRecordEnd);
 
   // Local variables/inlined routines are purposely omitted here.  The point of
   // marking this as a thunk is so Visual Studio will NOT stop in this routine.
 
   // Emit S_PROC_ID_END
-  const unsigned RecordLengthForSymbolEnd = 2;
-  OS.AddComment("Record length");
-  OS.EmitIntValue(RecordLengthForSymbolEnd, 2);
-  OS.AddComment("Record kind: S_PROC_ID_END");
-  OS.EmitIntValue(unsigned(SymbolKind::S_PROC_ID_END), 2);
+  emitEndSymbolRecord(SymbolKind::S_PROC_ID_END);
 
   endCVSubsection(SymbolsEnd);
 }
@@ -1014,19 +988,9 @@
   OS.AddComment("Symbol subsection for " + Twine(FuncName));
   MCSymbol *SymbolsEnd = beginCVSubsection(DebugSubsectionKind::Symbols);
   {
-    MCSymbol *ProcRecordBegin = MMI->getContext().createTempSymbol(),
-             *ProcRecordEnd = MMI->getContext().createTempSymbol();
-    OS.AddComment("Record length");
-    OS.emitAbsoluteSymbolDiff(ProcRecordEnd, ProcRecordBegin, 2);
-    OS.EmitLabel(ProcRecordBegin);
-
-    if (GV->hasLocalLinkage()) {
-      OS.AddComment("Record kind: S_LPROC32_ID");
-      OS.EmitIntValue(unsigned(SymbolKind::S_LPROC32_ID), 2);
-    } else {
-      OS.AddComment("Record kind: S_GPROC32_ID");
-      OS.EmitIntValue(unsigned(SymbolKind::S_GPROC32_ID), 2);
-    }
+    SymbolKind ProcKind = GV->hasLocalLinkage() ? SymbolKind::S_LPROC32_ID
+                                                : SymbolKind::S_GPROC32_ID;
+    MCSymbol *ProcRecordEnd = beginSymbolRecord(ProcKind);
 
     // These fields are filled in by tools like CVPACK which run after the fact.
     OS.AddComment("PtrParent");
@@ -1055,15 +1019,9 @@
     OS.AddComment("Function name");
     // Truncate the name so we won't overflow the record length field.
     emitNullTerminatedSymbolName(OS, FuncName);
-    OS.EmitLabel(ProcRecordEnd);
+    endSymbolRecord(ProcRecordEnd);
 
-    MCSymbol *FrameProcBegin = MMI->getContext().createTempSymbol(),
-             *FrameProcEnd = MMI->getContext().createTempSymbol();
-    OS.AddComment("Record length");
-    OS.emitAbsoluteSymbolDiff(FrameProcEnd, FrameProcBegin, 2);
-    OS.EmitLabel(FrameProcBegin);
-    OS.AddComment("Record kind: S_FRAMEPROC");
-    OS.EmitIntValue(unsigned(SymbolKind::S_FRAMEPROC), 2);
+    MCSymbol *FrameProcEnd = beginSymbolRecord(SymbolKind::S_FRAMEPROC);
     // Subtract out the CSR size since MSVC excludes that and we include it.
     OS.AddComment("FrameSize");
     OS.EmitIntValue(FI.FrameSize - FI.CSRSize, 4);
@@ -1079,9 +1037,10 @@
     OS.EmitIntValue(0, 2);
     OS.AddComment("Flags (defines frame register)");
     OS.EmitIntValue(uint32_t(FI.FrameProcOpts), 4);
-    OS.EmitLabel(FrameProcEnd);
+    endSymbolRecord(FrameProcEnd);
 
     emitLocalVariableList(FI, FI.Locals);
+    emitGlobalVariableList(FI.Globals);
     emitLexicalBlockList(FI.ChildBlocks, FI);
 
     // Emit inlined call site information. Only emit functions inlined directly
@@ -1097,13 +1056,7 @@
     for (auto Annot : FI.Annotations) {
       MCSymbol *Label = Annot.first;
       MDTuple *Strs = cast<MDTuple>(Annot.second);
-      MCSymbol *AnnotBegin = MMI->getContext().createTempSymbol(),
-               *AnnotEnd = MMI->getContext().createTempSymbol();
-      OS.AddComment("Record length");
-      OS.emitAbsoluteSymbolDiff(AnnotEnd, AnnotBegin, 2);
-      OS.EmitLabel(AnnotBegin);
-      OS.AddComment("Record kind: S_ANNOTATION");
-      OS.EmitIntValue(SymbolKind::S_ANNOTATION, 2);
+      MCSymbol *AnnotEnd = beginSymbolRecord(SymbolKind::S_ANNOTATION);
       OS.EmitCOFFSecRel32(Label, /*Offset=*/0);
       // FIXME: Make sure we don't overflow the max record size.
       OS.EmitCOFFSectionIndex(Label);
@@ -1115,17 +1068,14 @@
         assert(Str.data()[Str.size()] == '\0' && "non-nullterminated MDString");
         OS.EmitBytes(StringRef(Str.data(), Str.size() + 1));
       }
-      OS.EmitLabel(AnnotEnd);
+      endSymbolRecord(AnnotEnd);
     }
 
     if (SP != nullptr)
       emitDebugInfoForUDTs(LocalUDTs);
 
     // We're done with this function.
-    OS.AddComment("Record length");
-    OS.EmitIntValue(0x0002, 2);
-    OS.AddComment("Record kind: S_PROC_ID_END");
-    OS.EmitIntValue(unsigned(SymbolKind::S_PROC_ID_END), 2);
+    emitEndSymbolRecord(SymbolKind::S_PROC_ID_END);
   }
   endCVSubsection(SymbolsEnd);
 
@@ -1888,9 +1838,19 @@
   SmallVector<TypeIndex, 8> ArgTypeIndices;
   TypeIndex ReturnTypeIndex = getTypeIndex(ReturnAndArgs[Index++]);
 
+  // If the first argument is a pointer type and this isn't a static method,
+  // treat it as the special 'this' parameter, which is encoded separately from
+  // the arguments.
   TypeIndex ThisTypeIndex;
-  if (!IsStaticMethod && ReturnAndArgs.size() > 1)
-    ThisTypeIndex = getTypeIndexForThisPtr(ReturnAndArgs[Index++], Ty);
+  if (!IsStaticMethod && ReturnAndArgs.size() > Index) {
+    if (const DIDerivedType *PtrTy =
+            dyn_cast_or_null<DIDerivedType>(ReturnAndArgs[Index].resolve())) {
+      if (PtrTy->getTag() == dwarf::DW_TAG_pointer_type) {
+        ThisTypeIndex = getTypeIndexForThisPtr(PtrTy, Ty);
+        Index++;
+      }
+    }
+  }
 
   while (Index < ReturnAndArgs.size())
     ArgTypeIndices.push_back(getTypeIndex(ReturnAndArgs[Index++]));
@@ -2099,6 +2059,7 @@
   GlobalUDTs.clear();
   TypeIndices.clear();
   CompleteTypeIndices.clear();
+  ScopeGlobals.clear();
 }
 
 void CodeViewDebug::collectMemberInfo(ClassInfo &Info,
@@ -2445,9 +2406,10 @@
 }
 
 codeview::TypeIndex
-CodeViewDebug::getTypeIndexForThisPtr(DITypeRef TypeRef,
+CodeViewDebug::getTypeIndexForThisPtr(const DIDerivedType *PtrTy,
                                       const DISubroutineType *SubroutineTy) {
-  const DIType *Ty = TypeRef.resolve();
+  assert(PtrTy->getTag() == dwarf::DW_TAG_pointer_type &&
+         "this type must be a pointer type");
 
   PointerOptions Options = PointerOptions::None;
   if (SubroutineTy->getFlags() & DINode::DIFlags::FlagLValueReference)
@@ -2460,13 +2422,13 @@
   // so that the TypeIndex for the this pointer can be shared with the type
   // index for other pointers to this class type.  If there is a ref qualifier
   // then we lookup the pointer using the subroutine as the parent type.
-  auto I = TypeIndices.find({Ty, SubroutineTy});
+  auto I = TypeIndices.find({PtrTy, SubroutineTy});
   if (I != TypeIndices.end())
     return I->second;
 
   TypeLoweringScope S(*this);
-  TypeIndex TI = lowerTypePointer(cast<DIDerivedType>(Ty), Options);
-  return recordTypeIndexForDINode(Ty, TI, SubroutineTy);
+  TypeIndex TI = lowerTypePointer(PtrTy, Options);
+  return recordTypeIndexForDINode(PtrTy, TI, SubroutineTy);
 }
 
 TypeIndex CodeViewDebug::getTypeIndexForReferenceTo(DITypeRef TypeRef) {
@@ -2595,14 +2557,7 @@
 void CodeViewDebug::emitLocalVariable(const FunctionInfo &FI,
                                       const LocalVariable &Var) {
   // LocalSym record, see SymbolRecord.h for more info.
-  MCSymbol *LocalBegin = MMI->getContext().createTempSymbol(),
-           *LocalEnd = MMI->getContext().createTempSymbol();
-  OS.AddComment("Record length");
-  OS.emitAbsoluteSymbolDiff(LocalEnd, LocalBegin, 2);
-  OS.EmitLabel(LocalBegin);
-
-  OS.AddComment("Record kind: S_LOCAL");
-  OS.EmitIntValue(unsigned(SymbolKind::S_LOCAL), 2);
+  MCSymbol *LocalEnd = beginSymbolRecord(SymbolKind::S_LOCAL);
 
   LocalSymFlags Flags = LocalSymFlags::None;
   if (Var.DIVar->isParameter())
@@ -2619,7 +2574,7 @@
   OS.EmitIntValue(static_cast<uint16_t>(Flags), 2);
   // Truncate the name so we won't overflow the record length field.
   emitNullTerminatedSymbolName(OS, Var.DIVar->getName());
-  OS.EmitLabel(LocalEnd);
+  endSymbolRecord(LocalEnd);
 
   // Calculate the on disk prefix of the appropriate def range record. The
   // records and on disk formats are described in SymbolRecords.h. BytePrefix
@@ -2691,15 +2646,7 @@
 /// lexical block scope.
 void CodeViewDebug::emitLexicalBlock(const LexicalBlock &Block,
                                      const FunctionInfo& FI) {
-  MCSymbol *RecordBegin = MMI->getContext().createTempSymbol(),
-           *RecordEnd   = MMI->getContext().createTempSymbol();
-
-  // Lexical block symbol record.
-  OS.AddComment("Record length");
-  OS.emitAbsoluteSymbolDiff(RecordEnd, RecordBegin, 2);   // Record Length
-  OS.EmitLabel(RecordBegin);
-  OS.AddComment("Record kind: S_BLOCK32");
-  OS.EmitIntValue(SymbolKind::S_BLOCK32, 2);              // Record Kind
+  MCSymbol *RecordEnd = beginSymbolRecord(SymbolKind::S_BLOCK32);
   OS.AddComment("PtrParent");
   OS.EmitIntValue(0, 4);                                  // PtrParent
   OS.AddComment("PtrEnd");
@@ -2712,19 +2659,17 @@
   OS.EmitCOFFSectionIndex(FI.Begin);                      // Func Symbol
   OS.AddComment("Lexical block name");
   emitNullTerminatedSymbolName(OS, Block.Name);           // Name
-  OS.EmitLabel(RecordEnd);
+  endSymbolRecord(RecordEnd);
 
   // Emit variables local to this lexical block.
   emitLocalVariableList(FI, Block.Locals);
+  emitGlobalVariableList(Block.Globals);
 
   // Emit lexical blocks contained within this block.
   emitLexicalBlockList(Block.Children, FI);
 
   // Close the lexical block scope.
-  OS.AddComment("Record length");
-  OS.EmitIntValue(2, 2);                                  // Record Length
-  OS.AddComment("Record kind: S_END");
-  OS.EmitIntValue(SymbolKind::S_END, 2);                  // Record Kind
+  emitEndSymbolRecord(SymbolKind::S_END);
 }
 
 /// Convenience routine for collecting lexical block information for a list
@@ -2732,9 +2677,10 @@
 void CodeViewDebug::collectLexicalBlockInfo(
         SmallVectorImpl<LexicalScope *> &Scopes,
         SmallVectorImpl<LexicalBlock *> &Blocks,
-        SmallVectorImpl<LocalVariable> &Locals) {
+        SmallVectorImpl<LocalVariable> &Locals,
+        SmallVectorImpl<CVGlobalVariable> &Globals) {
   for (LexicalScope *Scope : Scopes)
-    collectLexicalBlockInfo(*Scope, Blocks, Locals);
+    collectLexicalBlockInfo(*Scope, Blocks, Locals, Globals);
 }
 
 /// Populate the lexical blocks and local variable lists of the parent with
@@ -2742,45 +2688,58 @@
 void CodeViewDebug::collectLexicalBlockInfo(
     LexicalScope &Scope,
     SmallVectorImpl<LexicalBlock *> &ParentBlocks,
-    SmallVectorImpl<LocalVariable> &ParentLocals) {
+    SmallVectorImpl<LocalVariable> &ParentLocals,
+    SmallVectorImpl<CVGlobalVariable> &ParentGlobals) {
   if (Scope.isAbstractScope())
     return;
 
-  auto LocalsIter = ScopeVariables.find(&Scope);
-  if (LocalsIter == ScopeVariables.end()) {
-    // This scope does not contain variables and can be eliminated.
-    collectLexicalBlockInfo(Scope.getChildren(), ParentBlocks, ParentLocals);
-    return;
-  }
-  SmallVectorImpl<LocalVariable> &Locals = LocalsIter->second;
-
+  // Gather information about the lexical scope including local variables,
+  // global variables, and address ranges.
+  bool IgnoreScope = false;
+  auto LI = ScopeVariables.find(&Scope);
+  SmallVectorImpl<LocalVariable> *Locals =
+      LI != ScopeVariables.end() ? &LI->second : nullptr;
+  auto GI = ScopeGlobals.find(Scope.getScopeNode());
+  SmallVectorImpl<CVGlobalVariable> *Globals =
+      GI != ScopeGlobals.end() ? GI->second.get() : nullptr;
   const DILexicalBlock *DILB = dyn_cast<DILexicalBlock>(Scope.getScopeNode());
-  if (!DILB) {
-    // This scope is not a lexical block and can be eliminated, but keep any
-    // local variables it contains.
-    ParentLocals.append(Locals.begin(), Locals.end());
-    collectLexicalBlockInfo(Scope.getChildren(), ParentBlocks, ParentLocals);
-    return;
-  }
-
   const SmallVectorImpl<InsnRange> &Ranges = Scope.getRanges();
-  if (Ranges.size() != 1 || !getLabelAfterInsn(Ranges.front().second)) {
-    // This lexical block scope has too many address ranges to represent in the
-    // current CodeView format or does not have a valid address range.
-    // Eliminate this lexical scope and promote any locals it contains to the
-    // parent scope.
-    //
-    // For lexical scopes with multiple address ranges you may be tempted to
-    // construct a single range covering every instruction where the block is
-    // live and everything in between.  Unfortunately, Visual Studio only
-    // displays variables from the first matching lexical block scope.  If the
-    // first lexical block contains exception handling code or cold code which
-    // is moved to the bottom of the routine creating a single range covering
-    // nearly the entire routine, then it will hide all other lexical blocks
-    // and the variables they contain.
-    //
-    ParentLocals.append(Locals.begin(), Locals.end());
-    collectLexicalBlockInfo(Scope.getChildren(), ParentBlocks, ParentLocals);
+
+  // Ignore lexical scopes which do not contain variables.
+  if (!Locals && !Globals)
+    IgnoreScope = true;
+
+  // Ignore lexical scopes which are not lexical blocks.
+  if (!DILB)
+    IgnoreScope = true;
+
+  // Ignore scopes which have too many address ranges to represent in the
+  // current CodeView format or do not have a valid address range.
+  //
+  // For lexical scopes with multiple address ranges you may be tempted to
+  // construct a single range covering every instruction where the block is
+  // live and everything in between.  Unfortunately, Visual Studio only
+  // displays variables from the first matching lexical block scope.  If the
+  // first lexical block contains exception handling code or cold code which
+  // is moved to the bottom of the routine creating a single range covering
+  // nearly the entire routine, then it will hide all other lexical blocks
+  // and the variables they contain.
+  if (Ranges.size() != 1 || !getLabelAfterInsn(Ranges.front().second))
+    IgnoreScope = true;
+
+  if (IgnoreScope) {
+    // This scope can be safely ignored and eliminating it will reduce the
+    // size of the debug information. Be sure to collect any variable and scope
+    // information from the this scope or any of its children and collapse them
+    // into the parent scope.
+    if (Locals)
+      ParentLocals.append(Locals->begin(), Locals->end());
+    if (Globals)
+      ParentGlobals.append(Globals->begin(), Globals->end());
+    collectLexicalBlockInfo(Scope.getChildren(),
+                            ParentBlocks,
+                            ParentLocals,
+                            ParentGlobals);
     return;
   }
 
@@ -2791,8 +2750,8 @@
   if (!BlockInsertion.second)
     return;
 
-  // Create a lexical block containing the local variables and collect the
-  // the lexical block information for the children.
+  // Create a lexical block containing the variables and collect the the
+  // lexical block information for the children.
   const InsnRange &Range = Ranges.front();
   assert(Range.first && Range.second);
   LexicalBlock &Block = BlockInsertion.first->second;
@@ -2801,9 +2760,15 @@
   assert(Block.Begin && "missing label for scope begin");
   assert(Block.End && "missing label for scope end");
   Block.Name = DILB->getName();
-  Block.Locals = std::move(Locals);
+  if (Locals)
+    Block.Locals = std::move(*Locals);
+  if (Globals)
+    Block.Globals = std::move(*Globals);
   ParentBlocks.push_back(&Block);
-  collectLexicalBlockInfo(Scope.getChildren(), Block.Children, Block.Locals);
+  collectLexicalBlockInfo(Scope.getChildren(),
+                          Block.Children,
+                          Block.Locals,
+                          Block.Globals);
 }
 
 void CodeViewDebug::endFunctionImpl(const MachineFunction *MF) {
@@ -2815,7 +2780,10 @@
 
   // Build the lexical block structure to emit for this routine.
   if (LexicalScope *CFS = LScopes.getCurrentFunctionScope())
-    collectLexicalBlockInfo(*CFS, CurFn->ChildBlocks, CurFn->Locals);
+    collectLexicalBlockInfo(*CFS,
+                            CurFn->ChildBlocks,
+                            CurFn->Locals,
+                            CurFn->Globals);
 
   // Clear the scope and variable information from the map which will not be
   // valid after we have finished processing this routine.  This also prepares
@@ -2882,30 +2850,57 @@
   OS.EmitValueToAlignment(4);
 }
 
+static StringRef getSymbolName(SymbolKind SymKind) {
+  for (const EnumEntry<SymbolKind> &EE : getSymbolTypeNames())
+    if (EE.Value == SymKind)
+      return EE.Name;
+  return "";
+}
+
+MCSymbol *CodeViewDebug::beginSymbolRecord(SymbolKind SymKind) {
+  MCSymbol *BeginLabel = MMI->getContext().createTempSymbol(),
+           *EndLabel = MMI->getContext().createTempSymbol();
+  OS.AddComment("Record length");
+  OS.emitAbsoluteSymbolDiff(EndLabel, BeginLabel, 2);
+  OS.EmitLabel(BeginLabel);
+  if (OS.isVerboseAsm())
+    OS.AddComment("Record kind: " + getSymbolName(SymKind));
+  OS.EmitIntValue(unsigned(SymKind), 2);
+  return EndLabel;
+}
+
+void CodeViewDebug::endSymbolRecord(MCSymbol *SymEnd) {
+  // MSVC does not pad out symbol records to four bytes, but LLVM does to avoid
+  // an extra copy of every symbol record in LLD. This increases object file
+  // size by less than 1% in the clang build, and is compatible with the Visual
+  // C++ linker.
+  OS.EmitValueToAlignment(4);
+  OS.EmitLabel(SymEnd);
+}
+
+void CodeViewDebug::emitEndSymbolRecord(SymbolKind EndKind) {
+  OS.AddComment("Record length");
+  OS.EmitIntValue(2, 2);
+  if (OS.isVerboseAsm())
+    OS.AddComment("Record kind: " + getSymbolName(EndKind));
+  OS.EmitIntValue(unsigned(EndKind), 2); // Record Kind
+}
+
 void CodeViewDebug::emitDebugInfoForUDTs(
     ArrayRef<std::pair<std::string, const DIType *>> UDTs) {
   for (const auto &UDT : UDTs) {
     const DIType *T = UDT.second;
     assert(shouldEmitUdt(T));
 
-    MCSymbol *UDTRecordBegin = MMI->getContext().createTempSymbol(),
-             *UDTRecordEnd = MMI->getContext().createTempSymbol();
-    OS.AddComment("Record length");
-    OS.emitAbsoluteSymbolDiff(UDTRecordEnd, UDTRecordBegin, 2);
-    OS.EmitLabel(UDTRecordBegin);
-
-    OS.AddComment("Record kind: S_UDT");
-    OS.EmitIntValue(unsigned(SymbolKind::S_UDT), 2);
-
+    MCSymbol *UDTRecordEnd = beginSymbolRecord(SymbolKind::S_UDT);
     OS.AddComment("Type");
     OS.EmitIntValue(getCompleteTypeIndex(T).getIndex(), 4);
-
     emitNullTerminatedSymbolName(OS, UDT.first);
-    OS.EmitLabel(UDTRecordEnd);
+    endSymbolRecord(UDTRecordEnd);
   }
 }
 
-void CodeViewDebug::emitDebugInfoForGlobals() {
+void CodeViewDebug::collectGlobalVariableInfo() {
   DenseMap<const DIGlobalVariableExpression *, const GlobalVariable *>
       GlobalMap;
   for (const GlobalVariable &GV : MMI->getModule()->globals()) {
@@ -2918,42 +2913,56 @@
   NamedMDNode *CUs = MMI->getModule()->getNamedMetadata("llvm.dbg.cu");
   for (const MDNode *Node : CUs->operands()) {
     const auto *CU = cast<DICompileUnit>(Node);
-
-    // First, emit all globals that are not in a comdat in a single symbol
-    // substream. MSVC doesn't like it if the substream is empty, so only open
-    // it if we have at least one global to emit.
-    switchToDebugSectionForSymbol(nullptr);
-    MCSymbol *EndLabel = nullptr;
     for (const auto *GVE : CU->getGlobalVariables()) {
-      if (const auto *GV = GlobalMap.lookup(GVE))
-        if (!GV->hasComdat() && !GV->isDeclarationForLinker()) {
-          if (!EndLabel) {
-            OS.AddComment("Symbol subsection for globals");
-            EndLabel = beginCVSubsection(DebugSubsectionKind::Symbols);
-          }
-          // FIXME: emitDebugInfoForGlobal() doesn't handle DIExpressions.
-          emitDebugInfoForGlobal(GVE->getVariable(), GV, Asm->getSymbol(GV));
-        }
+      const auto *GV = GlobalMap.lookup(GVE);
+      if (!GV || GV->isDeclarationForLinker())
+        continue;
+      const DIGlobalVariable *DIGV = GVE->getVariable();
+      DIScope *Scope = DIGV->getScope();
+      SmallVector<CVGlobalVariable, 1> *VariableList;
+      if (Scope && isa<DILocalScope>(Scope)) {
+        // Locate a global variable list for this scope, creating one if
+        // necessary.
+        auto Insertion = ScopeGlobals.insert(
+            {Scope, std::unique_ptr<GlobalVariableList>()});
+        if (Insertion.second)
+          Insertion.first->second = llvm::make_unique<GlobalVariableList>();
+        VariableList = Insertion.first->second.get();
+      } else if (GV->hasComdat())
+        // Emit this global variable into a COMDAT section.
+        VariableList = &ComdatVariables;
+      else
+        // Emit this globla variable in a single global symbol section.
+        VariableList = &GlobalVariables;
+      CVGlobalVariable CVGV = {DIGV, GV};
+      VariableList->emplace_back(std::move(CVGV));
     }
-    if (EndLabel)
-      endCVSubsection(EndLabel);
+  }
+}
 
-    // Second, emit each global that is in a comdat into its own .debug$S
-    // section along with its own symbol substream.
-    for (const auto *GVE : CU->getGlobalVariables()) {
-      if (const auto *GV = GlobalMap.lookup(GVE)) {
-        if (GV->hasComdat()) {
-          MCSymbol *GVSym = Asm->getSymbol(GV);
-          OS.AddComment("Symbol subsection for " +
-                        Twine(GlobalValue::dropLLVMManglingEscape(GV->getName())));
-          switchToDebugSectionForSymbol(GVSym);
-          EndLabel = beginCVSubsection(DebugSubsectionKind::Symbols);
-          // FIXME: emitDebugInfoForGlobal() doesn't handle DIExpressions.
-          emitDebugInfoForGlobal(GVE->getVariable(), GV, GVSym);
-          endCVSubsection(EndLabel);
-        }
-      }
-    }
+void CodeViewDebug::emitDebugInfoForGlobals() {
+  // First, emit all globals that are not in a comdat in a single symbol
+  // substream. MSVC doesn't like it if the substream is empty, so only open
+  // it if we have at least one global to emit.
+  switchToDebugSectionForSymbol(nullptr);
+  if (!GlobalVariables.empty()) {
+    OS.AddComment("Symbol subsection for globals");
+    MCSymbol *EndLabel = beginCVSubsection(DebugSubsectionKind::Symbols);
+    emitGlobalVariableList(GlobalVariables);
+    endCVSubsection(EndLabel);
+  }
+
+  // Second, emit each global that is in a comdat into its own .debug$S
+  // section along with its own symbol substream.
+  for (const CVGlobalVariable &CVGV : ComdatVariables) {
+    MCSymbol *GVSym = Asm->getSymbol(CVGV.GV);
+    OS.AddComment("Symbol subsection for " +
+            Twine(GlobalValue::dropLLVMManglingEscape(CVGV.GV->getName())));
+    switchToDebugSectionForSymbol(GVSym);
+    MCSymbol *EndLabel = beginCVSubsection(DebugSubsectionKind::Symbols);
+    // FIXME: emitDebugInfoForGlobal() doesn't handle DIExpressions.
+    emitDebugInfoForGlobal(CVGV.DIGV, CVGV.GV, GVSym);
+    endCVSubsection(EndLabel);
   }
 }
 
@@ -2969,34 +2978,26 @@
   }
 }
 
+// Emit each global variable in the specified array.
+void CodeViewDebug::emitGlobalVariableList(ArrayRef<CVGlobalVariable> Globals) {
+  for (const CVGlobalVariable &CVGV : Globals) {
+    MCSymbol *GVSym = Asm->getSymbol(CVGV.GV);
+    // FIXME: emitDebugInfoForGlobal() doesn't handle DIExpressions.
+    emitDebugInfoForGlobal(CVGV.DIGV, CVGV.GV, GVSym);
+  }
+}
+
 void CodeViewDebug::emitDebugInfoForGlobal(const DIGlobalVariable *DIGV,
                                            const GlobalVariable *GV,
                                            MCSymbol *GVSym) {
-  // DataSym record, see SymbolRecord.h for more info.
-  // FIXME: Thread local data, etc
-  MCSymbol *DataBegin = MMI->getContext().createTempSymbol(),
-           *DataEnd = MMI->getContext().createTempSymbol();
-  const unsigned FixedLengthOfThisRecord = 12;
-  OS.AddComment("Record length");
-  OS.emitAbsoluteSymbolDiff(DataEnd, DataBegin, 2);
-  OS.EmitLabel(DataBegin);
-  if (DIGV->isLocalToUnit()) {
-    if (GV->isThreadLocal()) {
-      OS.AddComment("Record kind: S_LTHREAD32");
-      OS.EmitIntValue(unsigned(SymbolKind::S_LTHREAD32), 2);
-    } else {
-      OS.AddComment("Record kind: S_LDATA32");
-      OS.EmitIntValue(unsigned(SymbolKind::S_LDATA32), 2);
-    }
-  } else {
-    if (GV->isThreadLocal()) {
-      OS.AddComment("Record kind: S_GTHREAD32");
-      OS.EmitIntValue(unsigned(SymbolKind::S_GTHREAD32), 2);
-    } else {
-      OS.AddComment("Record kind: S_GDATA32");
-      OS.EmitIntValue(unsigned(SymbolKind::S_GDATA32), 2);
-    }
-  }
+  // DataSym record, see SymbolRecord.h for more info. Thread local data
+  // happens to have the same format as global data.
+  SymbolKind DataSym = GV->isThreadLocal()
+                           ? (DIGV->isLocalToUnit() ? SymbolKind::S_LTHREAD32
+                                                    : SymbolKind::S_GTHREAD32)
+                           : (DIGV->isLocalToUnit() ? SymbolKind::S_LDATA32
+                                                    : SymbolKind::S_GDATA32);
+  MCSymbol *DataEnd = beginSymbolRecord(DataSym);
   OS.AddComment("Type");
   OS.EmitIntValue(getCompleteTypeIndex(DIGV->getType()).getIndex(), 4);
   OS.AddComment("DataOffset");
@@ -3004,6 +3005,7 @@
   OS.AddComment("Segment");
   OS.EmitCOFFSectionIndex(GVSym);
   OS.AddComment("Name");
-  emitNullTerminatedSymbolName(OS, DIGV->getName(), FixedLengthOfThisRecord);
-  OS.EmitLabel(DataEnd);
+  const unsigned LengthOfDataRecord = 12;
+  emitNullTerminatedSymbolName(OS, DIGV->getName(), LengthOfDataRecord);
+  endSymbolRecord(DataEnd);
 }
diff --git a/lib/CodeGen/AsmPrinter/CodeViewDebug.h b/lib/CodeGen/AsmPrinter/CodeViewDebug.h
index e64197a..21557ed 100644
--- a/lib/CodeGen/AsmPrinter/CodeViewDebug.h
+++ b/lib/CodeGen/AsmPrinter/CodeViewDebug.h
@@ -14,14 +14,14 @@
 #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_CODEVIEWDEBUG_H
 #define LLVM_LIB_CODEGEN_ASMPRINTER_CODEVIEWDEBUG_H
 
-#include "DbgEntityHistoryCalculator.h"
-#include "DebugHandlerBase.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseSet.h"
 #include "llvm/ADT/MapVector.h"
 #include "llvm/ADT/SetVector.h"
 #include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/DbgEntityHistoryCalculator.h"
+#include "llvm/CodeGen/DebugHandlerBase.h"
 #include "llvm/DebugInfo/CodeView/CodeView.h"
 #include "llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h"
 #include "llvm/DebugInfo/CodeView/TypeIndex.h"
@@ -99,6 +99,11 @@
     bool UseReferenceType = false;
   };
 
+  struct CVGlobalVariable {
+    const DIGlobalVariable *DIGV;
+    const GlobalVariable *GV;
+  };
+
   struct InlineSite {
     SmallVector<LocalVariable, 1> InlinedLocals;
     SmallVector<const DILocation *, 1> ChildSites;
@@ -112,6 +117,7 @@
   // Combines information from DILexicalBlock and LexicalScope.
   struct LexicalBlock {
     SmallVector<LocalVariable, 1> Locals;
+    SmallVector<CVGlobalVariable, 1> Globals;
     SmallVector<LexicalBlock *, 1> Children;
     const MCSymbol *Begin;
     const MCSymbol *End;
@@ -134,6 +140,7 @@
     SmallVector<const DILocation *, 1> ChildSites;
 
     SmallVector<LocalVariable, 1> Locals;
+    SmallVector<CVGlobalVariable, 1> Globals;
 
     std::unordered_map<const DILexicalBlockBase*, LexicalBlock> LexicalBlocks;
 
@@ -183,6 +190,17 @@
   // and LexicalBlocks.
   DenseMap<const LexicalScope *, SmallVector<LocalVariable, 1>> ScopeVariables;
 
+  // Map to separate global variables according to the lexical scope they
+  // belong in. A null local scope represents the global scope.
+  typedef SmallVector<CVGlobalVariable, 1> GlobalVariableList;
+  DenseMap<const DIScope*, std::unique_ptr<GlobalVariableList> > ScopeGlobals;
+
+  // Array of global variables which  need to be emitted into a COMDAT section.
+  SmallVector<CVGlobalVariable, 1> ComdatVariables;
+
+  // Array of non-COMDAT global variables.
+  SmallVector<CVGlobalVariable, 1> GlobalVariables;
+
   /// The set of comdat .debug$S sections that we've seen so far. Each section
   /// must start with a magic version number that must only be emitted once.
   /// This set tracks which sections we've already opened.
@@ -288,13 +306,13 @@
 
   void emitDebugInfoForFunction(const Function *GV, FunctionInfo &FI);
 
-  void emitDebugInfoForGlobals();
-
   void emitDebugInfoForRetainedTypes();
 
   void
   emitDebugInfoForUDTs(ArrayRef<std::pair<std::string, const DIType *>> UDTs);
 
+  void emitDebugInfoForGlobals();
+  void emitGlobalVariableList(ArrayRef<CVGlobalVariable> Globals);
   void emitDebugInfoForGlobal(const DIGlobalVariable *DIGV,
                               const GlobalVariable *GV, MCSymbol *GVSym);
 
@@ -302,14 +320,24 @@
   /// Returns an end label for use with endCVSubsection when the subsection is
   /// finished.
   MCSymbol *beginCVSubsection(codeview::DebugSubsectionKind Kind);
-
   void endCVSubsection(MCSymbol *EndLabel);
 
+  /// Opens a symbol record of the given kind. Returns an end label for use with
+  /// endSymbolRecord.
+  MCSymbol *beginSymbolRecord(codeview::SymbolKind Kind);
+  void endSymbolRecord(MCSymbol *SymEnd);
+
+  /// Emits an S_END, S_INLINESITE_END, or S_PROC_ID_END record. These records
+  /// are empty, so we emit them with a simpler assembly sequence that doesn't
+  /// involve labels.
+  void emitEndSymbolRecord(codeview::SymbolKind EndKind);
+
   void emitInlinedCallSite(const FunctionInfo &FI, const DILocation *InlinedAt,
                            const InlineSite &Site);
 
   using InlinedEntity = DbgValueHistoryMap::InlinedEntity;
 
+  void collectGlobalVariableInfo();
   void collectVariableInfo(const DISubprogram *SP);
 
   void collectVariableInfoFromMFTable(DenseSet<InlinedEntity> &Processed);
@@ -318,10 +346,12 @@
   // scopes, and populate it with local variables.
   void collectLexicalBlockInfo(SmallVectorImpl<LexicalScope *> &Scopes,
                                SmallVectorImpl<LexicalBlock *> &Blocks,
-                               SmallVectorImpl<LocalVariable> &Locals);
+                               SmallVectorImpl<LocalVariable> &Locals,
+                               SmallVectorImpl<CVGlobalVariable> &Globals);
   void collectLexicalBlockInfo(LexicalScope &Scope,
                                SmallVectorImpl<LexicalBlock *> &ParentBlocks,
-                               SmallVectorImpl<LocalVariable> &ParentLocals);
+                               SmallVectorImpl<LocalVariable> &ParentLocals,
+                               SmallVectorImpl<CVGlobalVariable> &ParentGlobals);
 
   /// Records information about a local variable in the appropriate scope. In
   /// particular, locals from inlined code live inside the inlining site.
@@ -347,7 +377,7 @@
                                    DITypeRef ClassTyRef = DITypeRef());
 
   codeview::TypeIndex
-  getTypeIndexForThisPtr(DITypeRef TypeRef,
+  getTypeIndexForThisPtr(const DIDerivedType *PtrTy,
                          const DISubroutineType *SubroutineTy);
 
   codeview::TypeIndex getTypeIndexForReferenceTo(DITypeRef TypeRef);
diff --git a/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp b/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp
index bb2fa7d..0986782 100644
--- a/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp
+++ b/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp
@@ -7,7 +7,7 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "DbgEntityHistoryCalculator.h"
+#include "llvm/CodeGen/DbgEntityHistoryCalculator.h"
 #include "llvm/ADT/BitVector.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SmallVector.h"
diff --git a/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.h b/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.h
deleted file mode 100644
index 660d2ff..0000000
--- a/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//===- llvm/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.h -----*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DBGVALUEHISTORYCALCULATOR_H
-#define LLVM_LIB_CODEGEN_ASMPRINTER_DBGVALUEHISTORYCALCULATOR_H
-
-#include "llvm/ADT/MapVector.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/IR/DebugInfoMetadata.h"
-#include <utility>
-
-namespace llvm {
-
-class DILocalVariable;
-class MachineFunction;
-class MachineInstr;
-class TargetRegisterInfo;
-
-// For each user variable, keep a list of instruction ranges where this variable
-// is accessible. The variables are listed in order of appearance.
-class DbgValueHistoryMap {
-  // Each instruction range starts with a DBG_VALUE instruction, specifying the
-  // location of a variable, which is assumed to be valid until the end of the
-  // range. If end is not specified, location is valid until the start
-  // instruction of the next instruction range, or until the end of the
-  // function.
-public:
-  using InstrRange = std::pair<const MachineInstr *, const MachineInstr *>;
-  using InstrRanges = SmallVector<InstrRange, 4>;
-  using InlinedEntity = std::pair<const DINode *, const DILocation *>;
-  using InstrRangesMap = MapVector<InlinedEntity, InstrRanges>;
-
-private:
-  InstrRangesMap VarInstrRanges;
-
-public:
-  void startInstrRange(InlinedEntity Var, const MachineInstr &MI);
-  void endInstrRange(InlinedEntity Var, const MachineInstr &MI);
-
-  // Returns register currently describing @Var. If @Var is currently
-  // unaccessible or is not described by a register, returns 0.
-  unsigned getRegisterForVar(InlinedEntity Var) const;
-
-  bool empty() const { return VarInstrRanges.empty(); }
-  void clear() { VarInstrRanges.clear(); }
-  InstrRangesMap::const_iterator begin() const { return VarInstrRanges.begin(); }
-  InstrRangesMap::const_iterator end() const { return VarInstrRanges.end(); }
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-  LLVM_DUMP_METHOD void dump() const;
-#endif
-};
-
-/// For each inlined instance of a source-level label, keep the corresponding
-/// DBG_LABEL instruction. The DBG_LABEL instruction could be used to generate
-/// a temporary (assembler) label before it.
-class DbgLabelInstrMap {
-public:
-  using InlinedEntity = std::pair<const DINode *, const DILocation *>;
-  using InstrMap = MapVector<InlinedEntity, const MachineInstr *>;
-
-private:
-  InstrMap LabelInstr;
-
-public:
-  void  addInstr(InlinedEntity Label, const MachineInstr &MI);
-
-  bool empty() const { return LabelInstr.empty(); }
-  void clear() { LabelInstr.clear(); }
-  InstrMap::const_iterator begin() const { return LabelInstr.begin(); }
-  InstrMap::const_iterator end() const { return LabelInstr.end(); }
-};
-
-void calculateDbgEntityHistory(const MachineFunction *MF,
-                               const TargetRegisterInfo *TRI,
-                               DbgValueHistoryMap &DbgValues,
-                               DbgLabelInstrMap &DbgLabels);
-
-} // end namespace llvm
-
-#endif // LLVM_LIB_CODEGEN_ASMPRINTER_DBGVALUEHISTORYCALCULATOR_H
diff --git a/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp b/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp
index a362dd4..551cd36 100644
--- a/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp
+++ b/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp
@@ -12,7 +12,7 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "DebugHandlerBase.h"
+#include "llvm/CodeGen/DebugHandlerBase.h"
 #include "llvm/ADT/Optional.h"
 #include "llvm/ADT/Twine.h"
 #include "llvm/CodeGen/AsmPrinter.h"
diff --git a/lib/CodeGen/AsmPrinter/DebugHandlerBase.h b/lib/CodeGen/AsmPrinter/DebugHandlerBase.h
deleted file mode 100644
index cdf8dc7..0000000
--- a/lib/CodeGen/AsmPrinter/DebugHandlerBase.h
+++ /dev/null
@@ -1,138 +0,0 @@
-//===-- llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.h --------*- C++ -*--===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Common functionality for different debug information format backends.
-// LLVM currently supports DWARF and CodeView.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DEBUGHANDLERBASE_H
-#define LLVM_LIB_CODEGEN_ASMPRINTER_DEBUGHANDLERBASE_H
-
-#include "AsmPrinterHandler.h"
-#include "DbgEntityHistoryCalculator.h"
-#include "llvm/ADT/Optional.h"
-#include "llvm/CodeGen/LexicalScopes.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/IR/DebugInfoMetadata.h"
-
-namespace llvm {
-
-class AsmPrinter;
-class MachineInstr;
-class MachineModuleInfo;
-
-/// Represents the location at which a variable is stored.
-struct DbgVariableLocation {
-  /// Base register.
-  unsigned Register;
-
-  /// Chain of offsetted loads necessary to load the value if it lives in
-  /// memory. Every load except for the last is pointer-sized.
-  SmallVector<int64_t, 1> LoadChain;
-
-  /// Present if the location is part of a larger variable.
-  llvm::Optional<llvm::DIExpression::FragmentInfo> FragmentInfo;
-
-  /// Extract a VariableLocation from a MachineInstr.
-  /// This will only work if Instruction is a debug value instruction
-  /// and the associated DIExpression is in one of the supported forms.
-  /// If these requirements are not met, the returned Optional will not
-  /// have a value.
-  static Optional<DbgVariableLocation>
-  extractFromMachineInstruction(const MachineInstr &Instruction);
-};
-
-/// Base class for debug information backends. Common functionality related to
-/// tracking which variables and scopes are alive at a given PC live here.
-class DebugHandlerBase : public AsmPrinterHandler {
-protected:
-  DebugHandlerBase(AsmPrinter *A);
-
-  /// Target of debug info emission.
-  AsmPrinter *Asm;
-
-  /// Collected machine module information.
-  MachineModuleInfo *MMI;
-
-  /// Previous instruction's location information. This is used to
-  /// determine label location to indicate scope boundaries in debug info.
-  /// We track the previous instruction's source location (if not line 0),
-  /// whether it was a label, and its parent BB.
-  DebugLoc PrevInstLoc;
-  MCSymbol *PrevLabel = nullptr;
-  const MachineBasicBlock *PrevInstBB = nullptr;
-
-  /// This location indicates end of function prologue and beginning of
-  /// function body.
-  DebugLoc PrologEndLoc;
-
-  /// If nonnull, stores the current machine instruction we're processing.
-  const MachineInstr *CurMI = nullptr;
-
-  LexicalScopes LScopes;
-
-  /// History of DBG_VALUE and clobber instructions for each user
-  /// variable.  Variables are listed in order of appearance.
-  DbgValueHistoryMap DbgValues;
-
-  /// Mapping of inlined labels and DBG_LABEL machine instruction.
-  DbgLabelInstrMap DbgLabels;
-
-  /// Maps instruction with label emitted before instruction.
-  /// FIXME: Make this private from DwarfDebug, we have the necessary accessors
-  /// for it.
-  DenseMap<const MachineInstr *, MCSymbol *> LabelsBeforeInsn;
-
-  /// Maps instruction with label emitted after instruction.
-  DenseMap<const MachineInstr *, MCSymbol *> LabelsAfterInsn;
-
-  /// Indentify instructions that are marking the beginning of or
-  /// ending of a scope.
-  void identifyScopeMarkers();
-
-  /// Ensure that a label will be emitted before MI.
-  void requestLabelBeforeInsn(const MachineInstr *MI) {
-    LabelsBeforeInsn.insert(std::make_pair(MI, nullptr));
-  }
-
-  /// Ensure that a label will be emitted after MI.
-  void requestLabelAfterInsn(const MachineInstr *MI) {
-    LabelsAfterInsn.insert(std::make_pair(MI, nullptr));
-  }
-
-  virtual void beginFunctionImpl(const MachineFunction *MF) = 0;
-  virtual void endFunctionImpl(const MachineFunction *MF) = 0;
-  virtual void skippedNonDebugFunction() {}
-
-  // AsmPrinterHandler overrides.
-public:
-  void beginInstruction(const MachineInstr *MI) override;
-  void endInstruction() override;
-
-  void beginFunction(const MachineFunction *MF) override;
-  void endFunction(const MachineFunction *MF) override;
-
-  /// Return Label preceding the instruction.
-  MCSymbol *getLabelBeforeInsn(const MachineInstr *MI);
-
-  /// Return Label immediately following the instruction.
-  MCSymbol *getLabelAfterInsn(const MachineInstr *MI);
-
-  /// Return the function-local offset of an instruction. A label for the
-  /// instruction \p MI should exist (\ref getLabelAfterInsn).
-  const MCExpr *getFunctionLocalOffsetAfterInsn(const MachineInstr *MI);
-
-  /// If this type is derived from a base type then return base type size.
-  static uint64_t getBaseTypeSize(const DITypeRef TyRef);
-};
-
-}
-
-#endif
diff --git a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index d93c7f6..1dca3f0 100644
--- a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -1125,3 +1125,12 @@
   return getCUNode()->getEmissionKind() == DICompileUnit::LineTablesOnly ||
          (DD->useSplitDwarf() && !Skeleton);
 }
+
+void DwarfCompileUnit::addAddrTableBase() {
+  const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
+  MCSymbol *Label = DD->getAddressPool().getLabel();
+  addSectionLabel(getUnitDie(),
+                  getDwarfVersion() >= 5 ? dwarf::DW_AT_addr_base
+                                         : dwarf::DW_AT_GNU_addr_base,
+                  Label, TLOF.getDwarfAddrSection()->getBeginSymbol());
+}
diff --git a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
index 13679c3..9ec22f6 100644
--- a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
+++ b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
@@ -14,7 +14,6 @@
 #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DWARFCOMPILEUNIT_H
 #define LLVM_LIB_CODEGEN_ASMPRINTER_DWARFCOMPILEUNIT_H
 
-#include "DbgEntityHistoryCalculator.h"
 #include "DwarfDebug.h"
 #include "DwarfUnit.h"
 #include "llvm/ADT/ArrayRef.h"
@@ -23,6 +22,7 @@
 #include "llvm/ADT/StringMap.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/CodeGen/DbgEntityHistoryCalculator.h"
 #include "llvm/CodeGen/DIE.h"
 #include "llvm/CodeGen/LexicalScopes.h"
 #include "llvm/IR/DebugInfoMetadata.h"
@@ -243,6 +243,9 @@
 
   void emitHeader(bool UseOffsets) override;
 
+  /// Add the DW_AT_addr_base attribute to the unit DIE.
+  void addAddrTableBase();
+
   MCSymbol *getLabelBegin() const {
     assert(getSection());
     return LabelBegin;
diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 774f7d1..1de2ffb 100644
--- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -568,37 +568,10 @@
   U.addFlag(D, dwarf::DW_AT_GNU_pubnames);
 }
 
-// Create new DwarfCompileUnit for the given metadata node with tag
-// DW_TAG_compile_unit.
-DwarfCompileUnit &
-DwarfDebug::getOrCreateDwarfCompileUnit(const DICompileUnit *DIUnit) {
-  if (auto *CU = CUMap.lookup(DIUnit))
-    return *CU;
-  StringRef FN = DIUnit->getFilename();
-  CompilationDir = DIUnit->getDirectory();
-
-  auto OwnedUnit = llvm::make_unique<DwarfCompileUnit>(
-      InfoHolder.getUnits().size(), DIUnit, Asm, this, &InfoHolder);
-  DwarfCompileUnit &NewCU = *OwnedUnit;
+void DwarfDebug::finishUnitAttributes(const DICompileUnit *DIUnit,
+                                      DwarfCompileUnit &NewCU) {
   DIE &Die = NewCU.getUnitDie();
-  InfoHolder.addUnit(std::move(OwnedUnit));
-  if (useSplitDwarf()) {
-    NewCU.setSkeleton(constructSkeletonCU(NewCU));
-    NewCU.addString(Die, dwarf::DW_AT_GNU_dwo_name,
-                  Asm->TM.Options.MCOptions.SplitDwarfFile);
-  }
-
-  for (auto *IE : DIUnit->getImportedEntities())
-    NewCU.addImportedEntity(IE);
-
-  // LTO with assembly output shares a single line table amongst multiple CUs.
-  // To avoid the compilation directory being ambiguous, let the line table
-  // explicitly describe the directory of all files, never relying on the
-  // compilation directory.
-  if (!Asm->OutStreamer->hasRawTextSupport() || SingleCU)
-    Asm->OutStreamer->emitDwarfFile0Directive(
-        CompilationDir, FN, NewCU.getMD5AsBytes(DIUnit->getFile()),
-        DIUnit->getSource(), NewCU.getUniqueID());
+  StringRef FN = DIUnit->getFilename();
 
   StringRef Producer = DIUnit->getProducer();
   StringRef Flags = DIUnit->getFlags();
@@ -640,11 +613,6 @@
                     dwarf::DW_FORM_data1, RVer);
   }
 
-  if (useSplitDwarf())
-    NewCU.setSection(Asm->getObjFileLowering().getDwarfInfoDWOSection());
-  else
-    NewCU.setSection(Asm->getObjFileLowering().getDwarfInfoSection());
-
   if (DIUnit->getDWOId()) {
     // This CU is either a clang module DWO or a skeleton CU.
     NewCU.addUInt(Die, dwarf::DW_AT_GNU_dwo_id, dwarf::DW_FORM_data8,
@@ -654,9 +622,44 @@
       NewCU.addString(Die, dwarf::DW_AT_GNU_dwo_name,
                       DIUnit->getSplitDebugFilename());
   }
+}
+// Create new DwarfCompileUnit for the given metadata node with tag
+// DW_TAG_compile_unit.
+DwarfCompileUnit &
+DwarfDebug::getOrCreateDwarfCompileUnit(const DICompileUnit *DIUnit) {
+  if (auto *CU = CUMap.lookup(DIUnit))
+    return *CU;
+
+  CompilationDir = DIUnit->getDirectory();
+
+  auto OwnedUnit = llvm::make_unique<DwarfCompileUnit>(
+      InfoHolder.getUnits().size(), DIUnit, Asm, this, &InfoHolder);
+  DwarfCompileUnit &NewCU = *OwnedUnit;
+  InfoHolder.addUnit(std::move(OwnedUnit));
+
+  for (auto *IE : DIUnit->getImportedEntities())
+    NewCU.addImportedEntity(IE);
+
+  // LTO with assembly output shares a single line table amongst multiple CUs.
+  // To avoid the compilation directory being ambiguous, let the line table
+  // explicitly describe the directory of all files, never relying on the
+  // compilation directory.
+  if (!Asm->OutStreamer->hasRawTextSupport() || SingleCU)
+    Asm->OutStreamer->emitDwarfFile0Directive(
+        CompilationDir, DIUnit->getFilename(),
+        NewCU.getMD5AsBytes(DIUnit->getFile()), DIUnit->getSource(),
+        NewCU.getUniqueID());
+
+  if (useSplitDwarf()) {
+    NewCU.setSkeleton(constructSkeletonCU(NewCU));
+    NewCU.setSection(Asm->getObjFileLowering().getDwarfInfoDWOSection());
+  } else {
+    finishUnitAttributes(DIUnit, NewCU);
+    NewCU.setSection(Asm->getObjFileLowering().getDwarfInfoSection());
+  }
 
   CUMap.insert({DIUnit, &NewCU});
-  CUDieMap.insert({&Die, &NewCU});
+  CUDieMap.insert({&NewCU.getUnitDie(), &NewCU});
   return NewCU;
 }
 
@@ -851,7 +854,12 @@
     // If we're splitting the dwarf out now that we've got the entire
     // CU then add the dwo id to it.
     auto *SkCU = TheCU.getSkeleton();
-    if (useSplitDwarf()) {
+    if (useSplitDwarf() && !empty(TheCU.getUnitDie().children())) {
+      finishUnitAttributes(TheCU.getCUNode(), TheCU);
+      TheCU.addString(TheCU.getUnitDie(), dwarf::DW_AT_GNU_dwo_name,
+                      Asm->TM.Options.MCOptions.SplitDwarfFile);
+      SkCU->addString(SkCU->getUnitDie(), dwarf::DW_AT_GNU_dwo_name,
+                      Asm->TM.Options.MCOptions.SplitDwarfFile);
       // Emit a unique identifier for this CU.
       uint64_t ID =
           DIEHash(Asm).computeCUSignature(DWOName, TheCU.getUnitDie());
@@ -870,6 +878,8 @@
         SkCU->addSectionLabel(SkCU->getUnitDie(), dwarf::DW_AT_GNU_ranges_base,
                               Sym, Sym);
       }
+    } else if (SkCU) {
+      finishUnitAttributes(SkCU->getCUNode(), *SkCU);
     }
 
     // If we have code split among multiple sections or non-contiguous
@@ -882,7 +892,9 @@
 
     // We don't keep track of which addresses are used in which CU so this
     // is a bit pessimistic under LTO.
-    if (!AddrPool.isEmpty())
+    if (!AddrPool.isEmpty() &&
+        (getDwarfVersion() >= 5 ||
+         (SkCU && !empty(TheCU.getUnitDie().children()))))
       U.addAddrTableBase();
 
     if (unsigned NumRanges = TheCU.getRanges().size()) {
@@ -1168,6 +1180,18 @@
     LLVM_DEBUG(dbgs() << "DotDebugLoc: " << *Begin << "\n");
 
     auto Value = getDebugLocValue(Begin);
+
+    // Omit entries with empty ranges as they do not have any effect in DWARF.
+    if (StartLabel == EndLabel) {
+      // If this is a fragment, we must still add the value to the list of
+      // open ranges, since it may describe non-overlapping parts of the
+      // variable.
+      if (DIExpr->isFragment())
+        OpenRanges.push_back(Value);
+      LLVM_DEBUG(dbgs() << "Omitting location list entry with empty range.\n");
+      continue;
+    }
+
     DebugLocEntry Loc(StartLabel, EndLabel, Value);
     bool couldMerge = false;
 
@@ -1906,6 +1930,7 @@
 void DebugLocEntry::finalize(const AsmPrinter &AP,
                              DebugLocStream::ListBuilder &List,
                              const DIBasicType *BT) {
+  assert(Begin != End && "unexpected location list entry with empty range");
   DebugLocStream::EntryBuilder Entry(List, Begin, End);
   BufferByteStreamer Streamer = Entry.getStreamer();
   DebugLocDwarfExpression DwarfExpr(AP.getDwarfVersion(), Streamer);
@@ -2351,8 +2376,8 @@
   }
 }
 
-void emitDebugRangesImpl(DwarfDebug &DD, AsmPrinter *Asm,
-                         const DwarfFile &Holder, MCSymbol *TableEnd) {
+static void emitDebugRangesImpl(DwarfDebug &DD, AsmPrinter *Asm,
+                                const DwarfFile &Holder, MCSymbol *TableEnd) {
   for (const RangeSpanList &List : Holder.getRangeLists())
     emitRangeList(DD, Asm, List);
 
@@ -2483,8 +2508,6 @@
 
 void DwarfDebug::initSkeletonUnit(const DwarfUnit &U, DIE &Die,
                                   std::unique_ptr<DwarfCompileUnit> NewU) {
-  NewU->addString(Die, dwarf::DW_AT_GNU_dwo_name,
-                  Asm->TM.Options.MCOptions.SplitDwarfFile);
 
   if (!CompilationDir.empty())
     NewU->addString(Die, dwarf::DW_AT_comp_dir, CompilationDir);
diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.h b/lib/CodeGen/AsmPrinter/DwarfDebug.h
index c73d442..8a31e98 100644
--- a/lib/CodeGen/AsmPrinter/DwarfDebug.h
+++ b/lib/CodeGen/AsmPrinter/DwarfDebug.h
@@ -15,8 +15,6 @@
 #define LLVM_LIB_CODEGEN_ASMPRINTER_DWARFDEBUG_H
 
 #include "AddressPool.h"
-#include "DbgEntityHistoryCalculator.h"
-#include "DebugHandlerBase.h"
 #include "DebugLocStream.h"
 #include "DwarfFile.h"
 #include "llvm/ADT/ArrayRef.h"
@@ -31,6 +29,8 @@
 #include "llvm/ADT/StringRef.h"
 #include "llvm/BinaryFormat/Dwarf.h"
 #include "llvm/CodeGen/AccelTable.h"
+#include "llvm/CodeGen/DbgEntityHistoryCalculator.h"
+#include "llvm/CodeGen/DebugHandlerBase.h"
 #include "llvm/CodeGen/MachineInstr.h"
 #include "llvm/IR/DebugInfoMetadata.h"
 #include "llvm/IR/DebugLoc.h"
@@ -540,6 +540,8 @@
   /// Create new DwarfCompileUnit for the given metadata node with tag
   /// DW_TAG_compile_unit.
   DwarfCompileUnit &getOrCreateDwarfCompileUnit(const DICompileUnit *DIUnit);
+  void finishUnitAttributes(const DICompileUnit *DIUnit,
+                            DwarfCompileUnit &NewCU);
 
   /// Construct imported_module or imported_declaration DIE.
   void constructAndAddImportedEntityDIE(DwarfCompileUnit &TheCU,
diff --git a/lib/CodeGen/AsmPrinter/DwarfExpression.h b/lib/CodeGen/AsmPrinter/DwarfExpression.h
index d47c4d1..91568ba 100644
--- a/lib/CodeGen/AsmPrinter/DwarfExpression.h
+++ b/lib/CodeGen/AsmPrinter/DwarfExpression.h
@@ -190,7 +190,7 @@
   /// DW_OP_stack_value.  Unfortunately, DW_OP_stack_value was not available
   /// until DWARF 4, so we will continue to generate DW_OP_constu <const> for
   /// DWARF 2 and DWARF 3. Technically, this is incorrect since DW_OP_const
-  /// <const> actually describes a value at a constant addess, not a constant
+  /// <const> actually describes a value at a constant address, not a constant
   /// value.  However, in the past there was no better way to describe a
   /// constant value, so the producers and consumers started to rely on
   /// heuristics to disambiguate the value vs. location status of the
diff --git a/lib/CodeGen/AsmPrinter/DwarfFile.cpp b/lib/CodeGen/AsmPrinter/DwarfFile.cpp
index 4e410bb..78ccad4 100644
--- a/lib/CodeGen/AsmPrinter/DwarfFile.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfFile.cpp
@@ -39,13 +39,17 @@
   if (TheU->getCUNode()->isDebugDirectivesOnly())
     return;
 
-  DIE &Die = TheU->getUnitDie();
-  MCSection *USection = TheU->getSection();
-  Asm->OutStreamer->SwitchSection(USection);
+  MCSection *S = TheU->getSection();
 
+  if (!S)
+    return;
+
+  Asm->OutStreamer->SwitchSection(S);
   TheU->emitHeader(UseOffsets);
+  Asm->emitDwarfDIE(TheU->getUnitDie());
 
-  Asm->emitDwarfDIE(Die);
+  if (MCSymbol *EndLabel = TheU->getEndLabel())
+    Asm->OutStreamer->EmitLabel(EndLabel);
 }
 
 // Compute the size and offset for each DIE.
diff --git a/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index 2053395..80b365f 100644
--- a/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -1361,7 +1361,7 @@
   if (DTy) {
     if (DD->getDwarfVersion() >= 3)
       addType(Buffer, DTy);
-    if (DD->getDwarfVersion() >= 4 && (CTy->getFlags() & DINode::FlagFixedEnum))
+    if (DD->getDwarfVersion() >= 4 && (CTy->getFlags() & DINode::FlagEnumClass))
       addFlag(Buffer, dwarf::DW_AT_enum_class);
   }
 
@@ -1553,7 +1553,14 @@
 void DwarfUnit::emitCommonHeader(bool UseOffsets, dwarf::UnitType UT) {
   // Emit size of content not including length itself
   Asm->OutStreamer->AddComment("Length of Unit");
-  Asm->emitInt32(getHeaderSize() + getUnitDie().getSize());
+  if (!DD->useSectionsAsReferences()) {
+    StringRef Prefix = isDwoUnit() ? "debug_info_dwo_" : "debug_info_";
+    MCSymbol *BeginLabel = Asm->createTempSymbol(Prefix + "start");
+    EndLabel = Asm->createTempSymbol(Prefix + "end");
+    Asm->EmitLabelDifference(EndLabel, BeginLabel, 4);
+    Asm->OutStreamer->EmitLabel(BeginLabel);
+  } else
+    Asm->emitInt32(getHeaderSize() + getUnitDie().getSize());
 
   Asm->OutStreamer->AddComment("DWARF version number");
   unsigned Version = DD->getDwarfVersion();
@@ -1664,12 +1671,3 @@
                   DU->getLoclistsTableBaseSym(),
                   TLOF.getDwarfLoclistsSection()->getBeginSymbol());
 }
-
-void DwarfUnit::addAddrTableBase() {
-  const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
-  MCSymbol *Label = DD->getAddressPool().getLabel();
-  addSectionLabel(getUnitDie(),
-                  getDwarfVersion() >= 5 ? dwarf::DW_AT_addr_base
-                                         : dwarf::DW_AT_GNU_addr_base,
-                  Label, TLOF.getDwarfAddrSection()->getBeginSymbol());
-}
diff --git a/lib/CodeGen/AsmPrinter/DwarfUnit.h b/lib/CodeGen/AsmPrinter/DwarfUnit.h
index 860d165..a59ebb7 100644
--- a/lib/CodeGen/AsmPrinter/DwarfUnit.h
+++ b/lib/CodeGen/AsmPrinter/DwarfUnit.h
@@ -49,6 +49,9 @@
   /// Target of Dwarf emission.
   AsmPrinter *Asm;
 
+  /// Emitted at the end of the CU and used to compute the CU Length field.
+  MCSymbol *EndLabel = nullptr;
+
   // Holders for some common dwarf information.
   DwarfDebug *DD;
   DwarfFile *DU;
@@ -82,6 +85,7 @@
 public:
   // Accessors.
   AsmPrinter* getAsmPrinter() const { return Asm; }
+  MCSymbol *getEndLabel() const { return EndLabel; }
   uint16_t getLanguage() const { return CUNode->getSourceLanguage(); }
   const DICompileUnit *getCUNode() const { return CUNode; }
 
@@ -275,9 +279,6 @@
   /// Add the DW_AT_loclists_base attribute to the unit DIE.
   void addLoclistsBase();
 
-  /// Add the DW_AT_addr_base attribute to the unit DIE.
-  void addAddrTableBase();
-
   virtual DwarfCompileUnit &getCU() = 0;
 
   void constructTypeDIE(DIE &Buffer, const DICompositeType *CTy);
diff --git a/lib/CodeGen/AsmPrinter/EHStreamer.h b/lib/CodeGen/AsmPrinter/EHStreamer.h
index e3a6f8e9..ce912d0 100644
--- a/lib/CodeGen/AsmPrinter/EHStreamer.h
+++ b/lib/CodeGen/AsmPrinter/EHStreamer.h
@@ -14,8 +14,8 @@
 #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_EHSTREAMER_H
 #define LLVM_LIB_CODEGEN_ASMPRINTER_EHSTREAMER_H
 
-#include "AsmPrinterHandler.h"
 #include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/AsmPrinterHandler.h"
 #include "llvm/Support/Compiler.h"
 
 namespace llvm {
diff --git a/lib/CodeGen/AsmPrinter/WinCFGuard.h b/lib/CodeGen/AsmPrinter/WinCFGuard.h
index 124e8f0..28f119e 100644
--- a/lib/CodeGen/AsmPrinter/WinCFGuard.h
+++ b/lib/CodeGen/AsmPrinter/WinCFGuard.h
@@ -14,7 +14,7 @@
 #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_WINCFGUARD_H
 #define LLVM_LIB_CODEGEN_ASMPRINTER_WINCFGUARD_H
 
-#include "AsmPrinterHandler.h"
+#include "llvm/CodeGen/AsmPrinterHandler.h"
 #include "llvm/Support/Compiler.h"
 
 namespace llvm {
diff --git a/lib/CodeGen/AsmPrinter/WinException.cpp b/lib/CodeGen/AsmPrinter/WinException.cpp
index 2a97a2f..cf8e8c6 100644
--- a/lib/CodeGen/AsmPrinter/WinException.cpp
+++ b/lib/CodeGen/AsmPrinter/WinException.cpp
@@ -546,7 +546,7 @@
   };
 
   // Emit a label assignment with the SEH frame offset so we can use it for
-  // llvm.x86.seh.recoverfp.
+  // llvm.eh.recoverfp.
   StringRef FLinkageName =
       GlobalValue::dropLLVMManglingEscape(MF->getFunction().getName());
   MCSymbol *ParentFrameOffset =
diff --git a/lib/CodeGen/AsmPrinter/WinException.h b/lib/CodeGen/AsmPrinter/WinException.h
index 728cde3..37c796f 100644
--- a/lib/CodeGen/AsmPrinter/WinException.h
+++ b/lib/CodeGen/AsmPrinter/WinException.h
@@ -68,7 +68,7 @@
       const MachineFunction *MF, const WinEHFuncInfo &FuncInfo,
       SmallVectorImpl<std::pair<const MCExpr *, int>> &IPToStateTable);
 
-  /// Emits the label used with llvm.x86.seh.recoverfp, which is used by
+  /// Emits the label used with llvm.eh.recoverfp, which is used by
   /// outlined funclets.
   void emitEHRegistrationOffsetLabel(const WinEHFuncInfo &FuncInfo,
                                      StringRef FLinkageName);
diff --git a/lib/CodeGen/CFIInstrInserter.cpp b/lib/CodeGen/CFIInstrInserter.cpp
index 4fd1194..c479985 100644
--- a/lib/CodeGen/CFIInstrInserter.cpp
+++ b/lib/CodeGen/CFIInstrInserter.cpp
@@ -207,6 +207,7 @@
       case MCCFIInstruction::OpUndefined:
       case MCCFIInstruction::OpRegister:
       case MCCFIInstruction::OpWindowSave:
+      case MCCFIInstruction::OpNegateRAState:
       case MCCFIInstruction::OpGnuArgsSize:
         break;
       }
diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp
index 392e8bc..c35f866 100644
--- a/lib/CodeGen/CodeGenPrepare.cpp
+++ b/lib/CodeGen/CodeGenPrepare.cpp
@@ -5160,11 +5160,11 @@
       }
 
       // Generate a new GEP to replace the current one.
-      IRBuilder<> Builder(GEP);
+      LLVMContext &Ctx = GEP->getContext();
       Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
       Type *I8PtrTy =
-          Builder.getInt8PtrTy(GEP->getType()->getPointerAddressSpace());
-      Type *I8Ty = Builder.getInt8Ty();
+          Type::getInt8PtrTy(Ctx, GEP->getType()->getPointerAddressSpace());
+      Type *I8Ty = Type::getInt8Ty(Ctx);
 
       if (!NewBaseGEP) {
         // Create a new base if we don't have one yet.  Find the insertion
@@ -5200,6 +5200,7 @@
         NewGEPBases.insert(NewBaseGEP);
       }
 
+      IRBuilder<> Builder(GEP);
       Value *NewGEP = NewBaseGEP;
       if (Offset == BaseOffset) {
         if (GEP->getType() != I8PtrTy)
diff --git a/lib/CodeGen/EarlyIfConversion.cpp b/lib/CodeGen/EarlyIfConversion.cpp
index 098afd8..364e1f0 100644
--- a/lib/CodeGen/EarlyIfConversion.cpp
+++ b/lib/CodeGen/EarlyIfConversion.cpp
@@ -398,6 +398,13 @@
     return false;
   }
 
+  // Make sure the analyzed branch is conditional; one of the successors
+  // could be a landing pad. (Empty landing pads can be generated on Windows.)
+  if (Cond.empty()) {
+    LLVM_DEBUG(dbgs() << "AnalyzeBranch found an unconditional branch.\n");
+    return false;
+  }
+
   // AnalyzeBranch doesn't set FBB on a fall-through branch.
   // Make sure it is always set.
   FBB = TBB == Succ0 ? Succ1 : Succ0;
diff --git a/lib/CodeGen/ExpandMemCmp.cpp b/lib/CodeGen/ExpandMemCmp.cpp
index d7562cb..ee7683a 100644
--- a/lib/CodeGen/ExpandMemCmp.cpp
+++ b/lib/CodeGen/ExpandMemCmp.cpp
@@ -66,23 +66,18 @@
   // Represents the decomposition in blocks of the expansion. For example,
   // comparing 33 bytes on X86+sse can be done with 2x16-byte loads and
   // 1x1-byte load, which would be represented as [{16, 0}, {16, 16}, {32, 1}.
-  // TODO(courbet): Involve the target more in this computation. On X86, 7
-  // bytes can be done more efficiently with two overlaping 4-byte loads than
-  // covering the interval with [{4, 0},{2, 4},{1, 6}}.
   struct LoadEntry {
     LoadEntry(unsigned LoadSize, uint64_t Offset)
         : LoadSize(LoadSize), Offset(Offset) {
-      assert(Offset % LoadSize == 0 && "invalid load entry");
     }
 
-    uint64_t getGEPIndex() const { return Offset / LoadSize; }
-
     // The size of the load for this block, in bytes.
-    const unsigned LoadSize;
-    // The offset of this load WRT the base pointer, in bytes.
-    const uint64_t Offset;
+    unsigned LoadSize;
+    // The offset of this load from the base pointer, in bytes.
+    uint64_t Offset;
   };
-  SmallVector<LoadEntry, 8> LoadSequence;
+  using LoadEntryVector = SmallVector<LoadEntry, 8>;
+  LoadEntryVector LoadSequence;
 
   void createLoadCmpBlocks();
   void createResultBlock();
@@ -92,13 +87,23 @@
   void emitLoadCompareBlock(unsigned BlockIndex);
   void emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
                                          unsigned &LoadIndex);
-  void emitLoadCompareByteBlock(unsigned BlockIndex, unsigned GEPIndex);
+  void emitLoadCompareByteBlock(unsigned BlockIndex, unsigned OffsetBytes);
   void emitMemCmpResultBlock();
   Value *getMemCmpExpansionZeroCase();
   Value *getMemCmpEqZeroOneBlock();
   Value *getMemCmpOneBlock();
+  Value *getPtrToElementAtOffset(Value *Source, Type *LoadSizeType,
+                                 uint64_t OffsetBytes);
 
- public:
+  static LoadEntryVector
+  computeGreedyLoadSequence(uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
+                            unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte);
+  static LoadEntryVector
+  computeOverlappingLoadSequence(uint64_t Size, unsigned MaxLoadSize,
+                                 unsigned MaxNumLoads,
+                                 unsigned &NumLoadsNonOneByte);
+
+public:
   MemCmpExpansion(CallInst *CI, uint64_t Size,
                   const TargetTransformInfo::MemCmpExpansionOptions &Options,
                   unsigned MaxNumLoads, const bool IsUsedForZeroCmp,
@@ -110,6 +115,76 @@
   Value *getMemCmpExpansion();
 };
 
+MemCmpExpansion::LoadEntryVector MemCmpExpansion::computeGreedyLoadSequence(
+    uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
+    const unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte) {
+  NumLoadsNonOneByte = 0;
+  LoadEntryVector LoadSequence;
+  uint64_t Offset = 0;
+  while (Size && !LoadSizes.empty()) {
+    const unsigned LoadSize = LoadSizes.front();
+    const uint64_t NumLoadsForThisSize = Size / LoadSize;
+    if (LoadSequence.size() + NumLoadsForThisSize > MaxNumLoads) {
+      // Do not expand if the total number of loads is larger than what the
+      // target allows. Note that it's important that we exit before completing
+      // the expansion to avoid using a ton of memory to store the expansion for
+      // large sizes.
+      return {};
+    }
+    if (NumLoadsForThisSize > 0) {
+      for (uint64_t I = 0; I < NumLoadsForThisSize; ++I) {
+        LoadSequence.push_back({LoadSize, Offset});
+        Offset += LoadSize;
+      }
+      if (LoadSize > 1)
+        ++NumLoadsNonOneByte;
+      Size = Size % LoadSize;
+    }
+    LoadSizes = LoadSizes.drop_front();
+  }
+  return LoadSequence;
+}
+
+MemCmpExpansion::LoadEntryVector
+MemCmpExpansion::computeOverlappingLoadSequence(uint64_t Size,
+                                                const unsigned MaxLoadSize,
+                                                const unsigned MaxNumLoads,
+                                                unsigned &NumLoadsNonOneByte) {
+  // These are already handled by the greedy approach.
+  if (Size < 2 || MaxLoadSize < 2)
+    return {};
+
+  // We try to do as many non-overlapping loads as possible starting from the
+  // beginning.
+  const uint64_t NumNonOverlappingLoads = Size / MaxLoadSize;
+  assert(NumNonOverlappingLoads && "there must be at least one load");
+  // There remain 0 to (MaxLoadSize - 1) bytes to load, this will be done with
+  // an overlapping load.
+  Size = Size - NumNonOverlappingLoads * MaxLoadSize;
+  // Bail if we do not need an overloapping store, this is already handled by
+  // the greedy approach.
+  if (Size == 0)
+    return {};
+  // Bail if the number of loads (non-overlapping + potential overlapping one)
+  // is larger than the max allowed.
+  if ((NumNonOverlappingLoads + 1) > MaxNumLoads)
+    return {};
+
+  // Add non-overlapping loads.
+  LoadEntryVector LoadSequence;
+  uint64_t Offset = 0;
+  for (uint64_t I = 0; I < NumNonOverlappingLoads; ++I) {
+    LoadSequence.push_back({MaxLoadSize, Offset});
+    Offset += MaxLoadSize;
+  }
+
+  // Add the last overlapping load.
+  assert(Size > 0 && Size < MaxLoadSize && "broken invariant");
+  LoadSequence.push_back({MaxLoadSize, Offset - (MaxLoadSize - Size)});
+  NumLoadsNonOneByte = 1;
+  return LoadSequence;
+}
+
 // Initialize the basic block structure required for expansion of memcmp call
 // with given maximum load size and memcmp size parameter.
 // This structure includes:
@@ -133,38 +208,31 @@
       Builder(CI) {
   assert(Size > 0 && "zero blocks");
   // Scale the max size down if the target can load more bytes than we need.
-  size_t LoadSizeIndex = 0;
-  while (LoadSizeIndex < Options.LoadSizes.size() &&
-         Options.LoadSizes[LoadSizeIndex] > Size) {
-    ++LoadSizeIndex;
+  llvm::ArrayRef<unsigned> LoadSizes(Options.LoadSizes);
+  while (!LoadSizes.empty() && LoadSizes.front() > Size) {
+    LoadSizes = LoadSizes.drop_front();
   }
-  this->MaxLoadSize = Options.LoadSizes[LoadSizeIndex];
+  assert(!LoadSizes.empty() && "cannot load Size bytes");
+  MaxLoadSize = LoadSizes.front();
   // Compute the decomposition.
-  uint64_t CurSize = Size;
-  uint64_t Offset = 0;
-  while (CurSize && LoadSizeIndex < Options.LoadSizes.size()) {
-    const unsigned LoadSize = Options.LoadSizes[LoadSizeIndex];
-    assert(LoadSize > 0 && "zero load size");
-    const uint64_t NumLoadsForThisSize = CurSize / LoadSize;
-    if (LoadSequence.size() + NumLoadsForThisSize > MaxNumLoads) {
-      // Do not expand if the total number of loads is larger than what the
-      // target allows. Note that it's important that we exit before completing
-      // the expansion to avoid using a ton of memory to store the expansion for
-      // large sizes.
-      LoadSequence.clear();
-      return;
+  unsigned GreedyNumLoadsNonOneByte = 0;
+  LoadSequence = computeGreedyLoadSequence(Size, LoadSizes, MaxNumLoads,
+                                           GreedyNumLoadsNonOneByte);
+  NumLoadsNonOneByte = GreedyNumLoadsNonOneByte;
+  assert(LoadSequence.size() <= MaxNumLoads && "broken invariant");
+  // If we allow overlapping loads and the load sequence is not already optimal,
+  // use overlapping loads.
+  if (Options.AllowOverlappingLoads &&
+      (LoadSequence.empty() || LoadSequence.size() > 2)) {
+    unsigned OverlappingNumLoadsNonOneByte = 0;
+    auto OverlappingLoads = computeOverlappingLoadSequence(
+        Size, MaxLoadSize, MaxNumLoads, OverlappingNumLoadsNonOneByte);
+    if (!OverlappingLoads.empty() &&
+        (LoadSequence.empty() ||
+         OverlappingLoads.size() < LoadSequence.size())) {
+      LoadSequence = OverlappingLoads;
+      NumLoadsNonOneByte = OverlappingNumLoadsNonOneByte;
     }
-    if (NumLoadsForThisSize > 0) {
-      for (uint64_t I = 0; I < NumLoadsForThisSize; ++I) {
-        LoadSequence.push_back({LoadSize, Offset});
-        Offset += LoadSize;
-      }
-      if (LoadSize > 1) {
-        ++NumLoadsNonOneByte;
-      }
-      CurSize = CurSize % LoadSize;
-    }
-    ++LoadSizeIndex;
   }
   assert(LoadSequence.size() <= MaxNumLoads && "broken invariant");
 }
@@ -189,30 +257,32 @@
                                    EndBlock->getParent(), EndBlock);
 }
 
+/// Return a pointer to an element of type `LoadSizeType` at offset
+/// `OffsetBytes`.
+Value *MemCmpExpansion::getPtrToElementAtOffset(Value *Source,
+                                                Type *LoadSizeType,
+                                                uint64_t OffsetBytes) {
+  if (OffsetBytes > 0) {
+    auto *ByteType = Type::getInt8Ty(CI->getContext());
+    Source = Builder.CreateGEP(
+        ByteType, Builder.CreateBitCast(Source, ByteType->getPointerTo()),
+        ConstantInt::get(ByteType, OffsetBytes));
+  }
+  return Builder.CreateBitCast(Source, LoadSizeType->getPointerTo());
+}
+
 // This function creates the IR instructions for loading and comparing 1 byte.
 // It loads 1 byte from each source of the memcmp parameters with the given
 // GEPIndex. It then subtracts the two loaded values and adds this result to the
 // final phi node for selecting the memcmp result.
 void MemCmpExpansion::emitLoadCompareByteBlock(unsigned BlockIndex,
-                                               unsigned GEPIndex) {
-  Value *Source1 = CI->getArgOperand(0);
-  Value *Source2 = CI->getArgOperand(1);
-
+                                               unsigned OffsetBytes) {
   Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
   Type *LoadSizeType = Type::getInt8Ty(CI->getContext());
-  // Cast source to LoadSizeType*.
-  if (Source1->getType() != LoadSizeType)
-    Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo());
-  if (Source2->getType() != LoadSizeType)
-    Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo());
-
-  // Get the base address using the GEPIndex.
-  if (GEPIndex != 0) {
-    Source1 = Builder.CreateGEP(LoadSizeType, Source1,
-                                ConstantInt::get(LoadSizeType, GEPIndex));
-    Source2 = Builder.CreateGEP(LoadSizeType, Source2,
-                                ConstantInt::get(LoadSizeType, GEPIndex));
-  }
+  Value *Source1 =
+      getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType, OffsetBytes);
+  Value *Source2 =
+      getPtrToElementAtOffset(CI->getArgOperand(1), LoadSizeType, OffsetBytes);
 
   Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1);
   Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2);
@@ -270,24 +340,10 @@
     IntegerType *LoadSizeType =
         IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8);
 
-    Value *Source1 = CI->getArgOperand(0);
-    Value *Source2 = CI->getArgOperand(1);
-
-    // Cast source to LoadSizeType*.
-    if (Source1->getType() != LoadSizeType)
-      Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo());
-    if (Source2->getType() != LoadSizeType)
-      Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo());
-
-    // Get the base address using a GEP.
-    if (CurLoadEntry.Offset != 0) {
-      Source1 = Builder.CreateGEP(
-          LoadSizeType, Source1,
-          ConstantInt::get(LoadSizeType, CurLoadEntry.getGEPIndex()));
-      Source2 = Builder.CreateGEP(
-          LoadSizeType, Source2,
-          ConstantInt::get(LoadSizeType, CurLoadEntry.getGEPIndex()));
-    }
+    Value *Source1 = getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType,
+                                             CurLoadEntry.Offset);
+    Value *Source2 = getPtrToElementAtOffset(CI->getArgOperand(1), LoadSizeType,
+                                             CurLoadEntry.Offset);
 
     // Get a constant or load a value for each source address.
     Value *LoadSrc1 = nullptr;
@@ -378,8 +434,7 @@
   const LoadEntry &CurLoadEntry = LoadSequence[BlockIndex];
 
   if (CurLoadEntry.LoadSize == 1) {
-    MemCmpExpansion::emitLoadCompareByteBlock(BlockIndex,
-                                              CurLoadEntry.getGEPIndex());
+    MemCmpExpansion::emitLoadCompareByteBlock(BlockIndex, CurLoadEntry.Offset);
     return;
   }
 
@@ -388,25 +443,12 @@
   Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8);
   assert(CurLoadEntry.LoadSize <= MaxLoadSize && "Unexpected load type");
 
-  Value *Source1 = CI->getArgOperand(0);
-  Value *Source2 = CI->getArgOperand(1);
-
   Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
-  // Cast source to LoadSizeType*.
-  if (Source1->getType() != LoadSizeType)
-    Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo());
-  if (Source2->getType() != LoadSizeType)
-    Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo());
 
-  // Get the base address using a GEP.
-  if (CurLoadEntry.Offset != 0) {
-    Source1 = Builder.CreateGEP(
-        LoadSizeType, Source1,
-        ConstantInt::get(LoadSizeType, CurLoadEntry.getGEPIndex()));
-    Source2 = Builder.CreateGEP(
-        LoadSizeType, Source2,
-        ConstantInt::get(LoadSizeType, CurLoadEntry.getGEPIndex()));
-  }
+  Value *Source1 = getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType,
+                                           CurLoadEntry.Offset);
+  Value *Source2 = getPtrToElementAtOffset(CI->getArgOperand(1), LoadSizeType,
+                                           CurLoadEntry.Offset);
 
   // Load LoadSizeType from the base address.
   Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1);
@@ -694,7 +736,6 @@
   if (SizeVal == 0) {
     return false;
   }
-
   // TTI call to check if target would like to expand memcmp. Also, get the
   // available load sizes.
   const bool IsUsedForZeroCmp = isOnlyUsedInZeroEqualityComparison(CI);
diff --git a/lib/CodeGen/GlobalISel/CMakeLists.txt b/lib/CodeGen/GlobalISel/CMakeLists.txt
index 5f13692..da2fd3b 100644
--- a/lib/CodeGen/GlobalISel/CMakeLists.txt
+++ b/lib/CodeGen/GlobalISel/CMakeLists.txt
@@ -1,4 +1,6 @@
 add_llvm_library(LLVMGlobalISel
+        CSEInfo.cpp
+        CSEMIRBuilder.cpp
         CallLowering.cpp
         GlobalISel.cpp
         Combiner.cpp
diff --git a/lib/CodeGen/GlobalISel/CSEInfo.cpp b/lib/CodeGen/GlobalISel/CSEInfo.cpp
new file mode 100644
index 0000000..89c525c
--- /dev/null
+++ b/lib/CodeGen/GlobalISel/CSEInfo.cpp
@@ -0,0 +1,370 @@
+//===- CSEInfo.cpp ------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+#define DEBUG_TYPE "cseinfo"
+
+using namespace llvm;
+char llvm::GISelCSEAnalysisWrapperPass::ID = 0;
+INITIALIZE_PASS_BEGIN(GISelCSEAnalysisWrapperPass, DEBUG_TYPE,
+                      "Analysis containing CSE Info", false, true)
+INITIALIZE_PASS_END(GISelCSEAnalysisWrapperPass, DEBUG_TYPE,
+                    "Analysis containing CSE Info", false, true)
+
+/// -------- UniqueMachineInstr -------------//
+
+void UniqueMachineInstr::Profile(FoldingSetNodeID &ID) {
+  GISelInstProfileBuilder(ID, MI->getMF()->getRegInfo()).addNodeID(MI);
+}
+/// -----------------------------------------
+
+/// --------- CSEConfig ---------- ///
+bool CSEConfig::shouldCSEOpc(unsigned Opc) {
+  switch (Opc) {
+  default:
+    break;
+  case TargetOpcode::G_ADD:
+  case TargetOpcode::G_AND:
+  case TargetOpcode::G_ASHR:
+  case TargetOpcode::G_LSHR:
+  case TargetOpcode::G_MUL:
+  case TargetOpcode::G_OR:
+  case TargetOpcode::G_SHL:
+  case TargetOpcode::G_SUB:
+  case TargetOpcode::G_XOR:
+  case TargetOpcode::G_UDIV:
+  case TargetOpcode::G_SDIV:
+  case TargetOpcode::G_UREM:
+  case TargetOpcode::G_SREM:
+  case TargetOpcode::G_CONSTANT:
+  case TargetOpcode::G_FCONSTANT:
+  case TargetOpcode::G_ZEXT:
+  case TargetOpcode::G_SEXT:
+  case TargetOpcode::G_ANYEXT:
+  case TargetOpcode::G_UNMERGE_VALUES:
+  case TargetOpcode::G_TRUNC:
+    return true;
+  }
+  return false;
+}
+
+bool CSEConfigConstantOnly::shouldCSEOpc(unsigned Opc) {
+  return Opc == TargetOpcode::G_CONSTANT;
+}
+/// -----------------------------------------
+
+/// -------- GISelCSEInfo -------------//
+void GISelCSEInfo::setMF(MachineFunction &MF) {
+  this->MF = &MF;
+  this->MRI = &MF.getRegInfo();
+}
+
+GISelCSEInfo::~GISelCSEInfo() {}
+
+bool GISelCSEInfo::isUniqueMachineInstValid(
+    const UniqueMachineInstr &UMI) const {
+  // Should we check here and assert that the instruction has been fully
+  // constructed?
+  // FIXME: Any other checks required to be done here? Remove this method if
+  // none.
+  return true;
+}
+
+void GISelCSEInfo::invalidateUniqueMachineInstr(UniqueMachineInstr *UMI) {
+  bool Removed = CSEMap.RemoveNode(UMI);
+  (void)Removed;
+  assert(Removed && "Invalidation called on invalid UMI");
+  // FIXME: Should UMI be deallocated/destroyed?
+}
+
+UniqueMachineInstr *GISelCSEInfo::getNodeIfExists(FoldingSetNodeID &ID,
+                                                  MachineBasicBlock *MBB,
+                                                  void *&InsertPos) {
+  auto *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
+  if (Node) {
+    if (!isUniqueMachineInstValid(*Node)) {
+      invalidateUniqueMachineInstr(Node);
+      return nullptr;
+    }
+
+    if (Node->MI->getParent() != MBB)
+      return nullptr;
+  }
+  return Node;
+}
+
+void GISelCSEInfo::insertNode(UniqueMachineInstr *UMI, void *InsertPos) {
+  handleRecordedInsts();
+  assert(UMI);
+  UniqueMachineInstr *MaybeNewNode = UMI;
+  if (InsertPos)
+    CSEMap.InsertNode(UMI, InsertPos);
+  else
+    MaybeNewNode = CSEMap.GetOrInsertNode(UMI);
+  if (MaybeNewNode != UMI) {
+    // A similar node exists in the folding set. Let's ignore this one.
+    return;
+  }
+  assert(InstrMapping.count(UMI->MI) == 0 &&
+         "This instruction should not be in the map");
+  InstrMapping[UMI->MI] = MaybeNewNode;
+}
+
+UniqueMachineInstr *GISelCSEInfo::getUniqueInstrForMI(const MachineInstr *MI) {
+  assert(shouldCSE(MI->getOpcode()) && "Trying to CSE an unsupported Node");
+  auto *Node = new (UniqueInstrAllocator) UniqueMachineInstr(MI);
+  return Node;
+}
+
+void GISelCSEInfo::insertInstr(MachineInstr *MI, void *InsertPos) {
+  assert(MI);
+  // If it exists in temporary insts, remove it.
+  TemporaryInsts.remove(MI);
+  auto *Node = getUniqueInstrForMI(MI);
+  insertNode(Node, InsertPos);
+}
+
+MachineInstr *GISelCSEInfo::getMachineInstrIfExists(FoldingSetNodeID &ID,
+                                                    MachineBasicBlock *MBB,
+                                                    void *&InsertPos) {
+  handleRecordedInsts();
+  if (auto *Inst = getNodeIfExists(ID, MBB, InsertPos)) {
+    LLVM_DEBUG(dbgs() << "CSEInfo: Found Instr " << *Inst->MI << "\n";);
+    return const_cast<MachineInstr *>(Inst->MI);
+  }
+  return nullptr;
+}
+
+void GISelCSEInfo::countOpcodeHit(unsigned Opc) {
+#ifndef NDEBUG
+  if (OpcodeHitTable.count(Opc))
+    OpcodeHitTable[Opc] += 1;
+  else
+    OpcodeHitTable[Opc] = 1;
+#endif
+  // Else do nothing.
+}
+
+void GISelCSEInfo::recordNewInstruction(MachineInstr *MI) {
+  if (shouldCSE(MI->getOpcode())) {
+    TemporaryInsts.insert(MI);
+    LLVM_DEBUG(dbgs() << "CSEInfo: Recording new MI" << *MI << "\n";);
+  }
+}
+
+void GISelCSEInfo::handleRecordedInst(MachineInstr *MI) {
+  assert(shouldCSE(MI->getOpcode()) && "Invalid instruction for CSE");
+  auto *UMI = InstrMapping.lookup(MI);
+  LLVM_DEBUG(dbgs() << "CSEInfo: Handling recorded MI" << *MI << "\n";);
+  if (UMI) {
+    // Invalidate this MI.
+    invalidateUniqueMachineInstr(UMI);
+    InstrMapping.erase(MI);
+  }
+  /// Now insert the new instruction.
+  if (UMI) {
+    /// We'll reuse the same UniqueMachineInstr to avoid the new
+    /// allocation.
+    *UMI = UniqueMachineInstr(MI);
+    insertNode(UMI, nullptr);
+  } else {
+    /// This is a new instruction. Allocate a new UniqueMachineInstr and
+    /// Insert.
+    insertInstr(MI);
+  }
+}
+
+void GISelCSEInfo::handleRemoveInst(MachineInstr *MI) {
+  if (auto *UMI = InstrMapping.lookup(MI)) {
+    invalidateUniqueMachineInstr(UMI);
+    InstrMapping.erase(MI);
+  }
+  TemporaryInsts.remove(MI);
+}
+
+void GISelCSEInfo::handleRecordedInsts() {
+  while (!TemporaryInsts.empty()) {
+    auto *MI = TemporaryInsts.pop_back_val();
+    handleRecordedInst(MI);
+  }
+}
+
+bool GISelCSEInfo::shouldCSE(unsigned Opc) const {
+  // Only GISel opcodes are CSEable
+  if (!isPreISelGenericOpcode(Opc))
+    return false;
+  assert(CSEOpt.get() && "CSEConfig not set");
+  return CSEOpt->shouldCSEOpc(Opc);
+}
+
+void GISelCSEInfo::erasingInstr(MachineInstr &MI) { handleRemoveInst(&MI); }
+void GISelCSEInfo::createdInstr(MachineInstr &MI) { recordNewInstruction(&MI); }
+void GISelCSEInfo::changingInstr(MachineInstr &MI) {
+  // For now, perform erase, followed by insert.
+  erasingInstr(MI);
+  createdInstr(MI);
+}
+void GISelCSEInfo::changedInstr(MachineInstr &MI) { changingInstr(MI); }
+
+void GISelCSEInfo::analyze(MachineFunction &MF) {
+  setMF(MF);
+  for (auto &MBB : MF) {
+    if (MBB.empty())
+      continue;
+    for (MachineInstr &MI : MBB) {
+      if (!shouldCSE(MI.getOpcode()))
+        continue;
+      LLVM_DEBUG(dbgs() << "CSEInfo::Add MI: " << MI << "\n";);
+      insertInstr(&MI);
+    }
+  }
+}
+
+void GISelCSEInfo::releaseMemory() {
+  // print();
+  CSEMap.clear();
+  InstrMapping.clear();
+  UniqueInstrAllocator.Reset();
+  TemporaryInsts.clear();
+  CSEOpt.reset();
+  MRI = nullptr;
+  MF = nullptr;
+#ifndef NDEBUG
+  OpcodeHitTable.clear();
+#endif
+}
+
+void GISelCSEInfo::print() {
+#ifndef NDEBUG
+  for (auto &It : OpcodeHitTable) {
+    dbgs() << "CSE Count for Opc " << It.first << " : " << It.second << "\n";
+  };
+#endif
+}
+/// -----------------------------------------
+// ---- Profiling methods for FoldingSetNode --- //
+const GISelInstProfileBuilder &
+GISelInstProfileBuilder::addNodeID(const MachineInstr *MI) const {
+  addNodeIDMBB(MI->getParent());
+  addNodeIDOpcode(MI->getOpcode());
+  for (auto &Op : MI->operands())
+    addNodeIDMachineOperand(Op);
+  addNodeIDFlag(MI->getFlags());
+  return *this;
+}
+
+const GISelInstProfileBuilder &
+GISelInstProfileBuilder::addNodeIDOpcode(unsigned Opc) const {
+  ID.AddInteger(Opc);
+  return *this;
+}
+
+const GISelInstProfileBuilder &
+GISelInstProfileBuilder::addNodeIDRegType(const LLT &Ty) const {
+  uint64_t Val = Ty.getUniqueRAWLLTData();
+  ID.AddInteger(Val);
+  return *this;
+}
+
+const GISelInstProfileBuilder &
+GISelInstProfileBuilder::addNodeIDRegType(const TargetRegisterClass *RC) const {
+  ID.AddPointer(RC);
+  return *this;
+}
+
+const GISelInstProfileBuilder &
+GISelInstProfileBuilder::addNodeIDRegType(const RegisterBank *RB) const {
+  ID.AddPointer(RB);
+  return *this;
+}
+
+const GISelInstProfileBuilder &
+GISelInstProfileBuilder::addNodeIDImmediate(int64_t Imm) const {
+  ID.AddInteger(Imm);
+  return *this;
+}
+
+const GISelInstProfileBuilder &
+GISelInstProfileBuilder::addNodeIDRegNum(unsigned Reg) const {
+  ID.AddInteger(Reg);
+  return *this;
+}
+
+const GISelInstProfileBuilder &
+GISelInstProfileBuilder::addNodeIDRegType(const unsigned Reg) const {
+  addNodeIDMachineOperand(MachineOperand::CreateReg(Reg, false));
+  return *this;
+}
+
+const GISelInstProfileBuilder &
+GISelInstProfileBuilder::addNodeIDMBB(const MachineBasicBlock *MBB) const {
+  ID.AddPointer(MBB);
+  return *this;
+}
+
+const GISelInstProfileBuilder &
+GISelInstProfileBuilder::addNodeIDFlag(unsigned Flag) const {
+  if (Flag)
+    ID.AddInteger(Flag);
+  return *this;
+}
+
+const GISelInstProfileBuilder &GISelInstProfileBuilder::addNodeIDMachineOperand(
+    const MachineOperand &MO) const {
+  if (MO.isReg()) {
+    unsigned Reg = MO.getReg();
+    if (!MO.isDef())
+      addNodeIDRegNum(Reg);
+    LLT Ty = MRI.getType(Reg);
+    if (Ty.isValid())
+      addNodeIDRegType(Ty);
+    auto *RB = MRI.getRegBankOrNull(Reg);
+    if (RB)
+      addNodeIDRegType(RB);
+    auto *RC = MRI.getRegClassOrNull(Reg);
+    if (RC)
+      addNodeIDRegType(RC);
+    assert(!MO.isImplicit() && "Unhandled case");
+  } else if (MO.isImm())
+    ID.AddInteger(MO.getImm());
+  else if (MO.isCImm())
+    ID.AddPointer(MO.getCImm());
+  else if (MO.isFPImm())
+    ID.AddPointer(MO.getFPImm());
+  else if (MO.isPredicate())
+    ID.AddInteger(MO.getPredicate());
+  else
+    llvm_unreachable("Unhandled operand type");
+  // Handle other types
+  return *this;
+}
+
+GISelCSEInfo &GISelCSEAnalysisWrapper::get(std::unique_ptr<CSEConfig> CSEOpt,
+                                           bool Recompute) {
+  if (!AlreadyComputed || Recompute) {
+    Info.setCSEConfig(std::move(CSEOpt));
+    Info.analyze(*MF);
+    AlreadyComputed = true;
+  }
+  return Info;
+}
+void GISelCSEAnalysisWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
+  AU.setPreservesAll();
+  MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+bool GISelCSEAnalysisWrapperPass::runOnMachineFunction(MachineFunction &MF) {
+  releaseMemory();
+  Wrapper.setMF(MF);
+  return false;
+}
diff --git a/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp b/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
new file mode 100644
index 0000000..863efe0
--- /dev/null
+++ b/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
@@ -0,0 +1,231 @@
+//===-- llvm/CodeGen/GlobalISel/CSEMIRBuilder.cpp - MIBuilder--*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements the CSEMIRBuilder class which CSEs as it builds
+/// instructions.
+//===----------------------------------------------------------------------===//
+//
+
+#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
+
+using namespace llvm;
+
+bool CSEMIRBuilder::dominates(MachineBasicBlock::const_iterator A,
+                              MachineBasicBlock::const_iterator B) const {
+  auto MBBEnd = getMBB().end();
+  if (B == MBBEnd)
+    return true;
+  assert(A->getParent() == B->getParent() &&
+         "Iterators should be in same block");
+  const MachineBasicBlock *BBA = A->getParent();
+  MachineBasicBlock::const_iterator I = BBA->begin();
+  for (; &*I != A && &*I != B; ++I)
+    ;
+  return &*I == A;
+}
+
+MachineInstrBuilder
+CSEMIRBuilder::getDominatingInstrForID(FoldingSetNodeID &ID,
+                                       void *&NodeInsertPos) {
+  GISelCSEInfo *CSEInfo = getCSEInfo();
+  assert(CSEInfo && "Can't get here without setting CSEInfo");
+  MachineBasicBlock *CurMBB = &getMBB();
+  MachineInstr *MI =
+      CSEInfo->getMachineInstrIfExists(ID, CurMBB, NodeInsertPos);
+  if (MI) {
+    auto CurrPos = getInsertPt();
+    if (!dominates(MI, CurrPos))
+      CurMBB->splice(CurrPos, CurMBB, MI);
+    return MachineInstrBuilder(getMF(), MI);
+  }
+  return MachineInstrBuilder();
+}
+
+bool CSEMIRBuilder::canPerformCSEForOpc(unsigned Opc) const {
+  const GISelCSEInfo *CSEInfo = getCSEInfo();
+  if (!CSEInfo || !CSEInfo->shouldCSE(Opc))
+    return false;
+  return true;
+}
+
+void CSEMIRBuilder::profileDstOp(const DstOp &Op,
+                                 GISelInstProfileBuilder &B) const {
+  switch (Op.getDstOpKind()) {
+  case DstOp::DstType::Ty_RC:
+    B.addNodeIDRegType(Op.getRegClass());
+    break;
+  default:
+    B.addNodeIDRegType(Op.getLLTTy(*getMRI()));
+    break;
+  }
+}
+
+void CSEMIRBuilder::profileSrcOp(const SrcOp &Op,
+                                 GISelInstProfileBuilder &B) const {
+  switch (Op.getSrcOpKind()) {
+  case SrcOp::SrcType::Ty_Predicate:
+    B.addNodeIDImmediate(static_cast<int64_t>(Op.getPredicate()));
+    break;
+  default:
+    B.addNodeIDRegType(Op.getReg());
+    break;
+  }
+}
+
+void CSEMIRBuilder::profileMBBOpcode(GISelInstProfileBuilder &B,
+                                     unsigned Opc) const {
+  // First add the MBB (Local CSE).
+  B.addNodeIDMBB(&getMBB());
+  // Then add the opcode.
+  B.addNodeIDOpcode(Opc);
+}
+
+void CSEMIRBuilder::profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps,
+                                      ArrayRef<SrcOp> SrcOps,
+                                      Optional<unsigned> Flags,
+                                      GISelInstProfileBuilder &B) const {
+
+  profileMBBOpcode(B, Opc);
+  // Then add the DstOps.
+  profileDstOps(DstOps, B);
+  // Then add the SrcOps.
+  profileSrcOps(SrcOps, B);
+  // Add Flags if passed in.
+  if (Flags)
+    B.addNodeIDFlag(*Flags);
+}
+
+MachineInstrBuilder CSEMIRBuilder::memoizeMI(MachineInstrBuilder MIB,
+                                             void *NodeInsertPos) {
+  assert(canPerformCSEForOpc(MIB->getOpcode()) &&
+         "Attempting to CSE illegal op");
+  MachineInstr *MIBInstr = MIB;
+  getCSEInfo()->insertInstr(MIBInstr, NodeInsertPos);
+  return MIB;
+}
+
+bool CSEMIRBuilder::checkCopyToDefsPossible(ArrayRef<DstOp> DstOps) {
+  if (DstOps.size() == 1)
+    return true; // always possible to emit copy to just 1 vreg.
+
+  return std::all_of(DstOps.begin(), DstOps.end(), [](const DstOp &Op) {
+    DstOp::DstType DT = Op.getDstOpKind();
+    return DT == DstOp::DstType::Ty_LLT || DT == DstOp::DstType::Ty_RC;
+  });
+}
+
+MachineInstrBuilder
+CSEMIRBuilder::generateCopiesIfRequired(ArrayRef<DstOp> DstOps,
+                                        MachineInstrBuilder &MIB) {
+  assert(checkCopyToDefsPossible(DstOps) &&
+         "Impossible return a single MIB with copies to multiple defs");
+  if (DstOps.size() == 1) {
+    const DstOp &Op = DstOps[0];
+    if (Op.getDstOpKind() == DstOp::DstType::Ty_Reg)
+      return buildCopy(Op.getReg(), MIB->getOperand(0).getReg());
+  }
+  return MIB;
+}
+
+MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc,
+                                              ArrayRef<DstOp> DstOps,
+                                              ArrayRef<SrcOp> SrcOps,
+                                              Optional<unsigned> Flag) {
+  switch (Opc) {
+  default:
+    break;
+  case TargetOpcode::G_ADD:
+  case TargetOpcode::G_AND:
+  case TargetOpcode::G_ASHR:
+  case TargetOpcode::G_LSHR:
+  case TargetOpcode::G_MUL:
+  case TargetOpcode::G_OR:
+  case TargetOpcode::G_SHL:
+  case TargetOpcode::G_SUB:
+  case TargetOpcode::G_XOR:
+  case TargetOpcode::G_UDIV:
+  case TargetOpcode::G_SDIV:
+  case TargetOpcode::G_UREM:
+  case TargetOpcode::G_SREM: {
+    // Try to constant fold these.
+    assert(SrcOps.size() == 2 && "Invalid sources");
+    assert(DstOps.size() == 1 && "Invalid dsts");
+    if (Optional<APInt> Cst = ConstantFoldBinOp(Opc, SrcOps[0].getReg(),
+                                                SrcOps[1].getReg(), *getMRI()))
+      return buildConstant(DstOps[0], Cst->getSExtValue());
+    break;
+  }
+  }
+  bool CanCopy = checkCopyToDefsPossible(DstOps);
+  if (!canPerformCSEForOpc(Opc))
+    return MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag);
+  // If we can CSE this instruction, but involves generating copies to multiple
+  // regs, give up. This frequently happens to UNMERGEs.
+  if (!CanCopy) {
+    auto MIB = MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag);
+    // CSEInfo would have tracked this instruction. Remove it from the temporary
+    // insts.
+    getCSEInfo()->handleRemoveInst(&*MIB);
+    return MIB;
+  }
+  FoldingSetNodeID ID;
+  GISelInstProfileBuilder ProfBuilder(ID, *getMRI());
+  void *InsertPos = nullptr;
+  profileEverything(Opc, DstOps, SrcOps, Flag, ProfBuilder);
+  MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos);
+  if (MIB) {
+    // Handle generating copies here.
+    return generateCopiesIfRequired(DstOps, MIB);
+  }
+  // This instruction does not exist in the CSEInfo. Build it and CSE it.
+  MachineInstrBuilder NewMIB =
+      MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag);
+  return memoizeMI(NewMIB, InsertPos);
+}
+
+MachineInstrBuilder CSEMIRBuilder::buildConstant(const DstOp &Res,
+                                                 const ConstantInt &Val) {
+  constexpr unsigned Opc = TargetOpcode::G_CONSTANT;
+  if (!canPerformCSEForOpc(Opc))
+    return MachineIRBuilder::buildConstant(Res, Val);
+  FoldingSetNodeID ID;
+  GISelInstProfileBuilder ProfBuilder(ID, *getMRI());
+  void *InsertPos = nullptr;
+  profileMBBOpcode(ProfBuilder, Opc);
+  profileDstOp(Res, ProfBuilder);
+  ProfBuilder.addNodeIDMachineOperand(MachineOperand::CreateCImm(&Val));
+  MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos);
+  if (MIB) {
+    // Handle generating copies here.
+    return generateCopiesIfRequired({Res}, MIB);
+  }
+  MachineInstrBuilder NewMIB = MachineIRBuilder::buildConstant(Res, Val);
+  return memoizeMI(NewMIB, InsertPos);
+}
+
+MachineInstrBuilder CSEMIRBuilder::buildFConstant(const DstOp &Res,
+                                                  const ConstantFP &Val) {
+  constexpr unsigned Opc = TargetOpcode::G_FCONSTANT;
+  if (!canPerformCSEForOpc(Opc))
+    return MachineIRBuilder::buildFConstant(Res, Val);
+  FoldingSetNodeID ID;
+  GISelInstProfileBuilder ProfBuilder(ID, *getMRI());
+  void *InsertPos = nullptr;
+  profileMBBOpcode(ProfBuilder, Opc);
+  profileDstOp(Res, ProfBuilder);
+  ProfBuilder.addNodeIDMachineOperand(MachineOperand::CreateFPImm(&Val));
+  MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos);
+  if (MIB) {
+    // Handle generating copies here.
+    return generateCopiesIfRequired({Res}, MIB);
+  }
+  MachineInstrBuilder NewMIB = MachineIRBuilder::buildFConstant(Res, Val);
+  return memoizeMI(NewMIB, InsertPos);
+}
diff --git a/lib/CodeGen/GlobalISel/CallLowering.cpp b/lib/CodeGen/GlobalISel/CallLowering.cpp
index da972ea..724eced 100644
--- a/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -23,6 +23,8 @@
 
 using namespace llvm;
 
+void CallLowering::anchor() {}
+
 bool CallLowering::lowerCall(
     MachineIRBuilder &MIRBuilder, ImmutableCallSite CS, unsigned ResReg,
     ArrayRef<unsigned> ArgRegs, std::function<unsigned()> GetCalleeReg) const {
@@ -180,3 +182,5 @@
   }
   llvm_unreachable("unable to extend register");
 }
+
+void CallLowering::ValueHandler::anchor() {}
diff --git a/lib/CodeGen/GlobalISel/Combiner.cpp b/lib/CodeGen/GlobalISel/Combiner.cpp
index 90fd54e..45b0e36 100644
--- a/lib/CodeGen/GlobalISel/Combiner.cpp
+++ b/lib/CodeGen/GlobalISel/Combiner.cpp
@@ -13,7 +13,9 @@
 
 #include "llvm/CodeGen/GlobalISel/Combiner.h"
 #include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
 #include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
+#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
 #include "llvm/CodeGen/GlobalISel/GISelWorkList.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
@@ -35,38 +37,33 @@
 /// instruction creation will schedule that instruction for a future visit.
 /// Other Combiner implementations may require more complex behaviour from
 /// their GISelChangeObserver subclass.
-class WorkListMaintainer : public GISelChangeObserver,
-                           public MachineFunction::Delegate {
+class WorkListMaintainer : public GISelChangeObserver {
   using WorkListTy = GISelWorkList<512>;
-  MachineFunction &MF;
   WorkListTy &WorkList;
   /// The instructions that have been created but we want to report once they
   /// have their operands. This is only maintained if debug output is requested.
   SmallPtrSet<const MachineInstr *, 4> CreatedInstrs;
 
 public:
-  WorkListMaintainer(MachineFunction &MF, WorkListTy &WorkList)
-      : GISelChangeObserver(), MF(MF), WorkList(WorkList) {
-    MF.setDelegate(this);
-  }
+  WorkListMaintainer(WorkListTy &WorkList)
+      : GISelChangeObserver(), WorkList(WorkList) {}
   virtual ~WorkListMaintainer() {
-    MF.resetDelegate(this);
   }
 
-  void erasingInstr(const MachineInstr &MI) override {
+  void erasingInstr(MachineInstr &MI) override {
     LLVM_DEBUG(dbgs() << "Erased: " << MI << "\n");
     WorkList.remove(&MI);
   }
-  void createdInstr(const MachineInstr &MI) override {
+  void createdInstr(MachineInstr &MI) override {
     LLVM_DEBUG(dbgs() << "Creating: " << MI << "\n");
     WorkList.insert(&MI);
     LLVM_DEBUG(CreatedInstrs.insert(&MI));
   }
-  void changingInstr(const MachineInstr &MI) override {
+  void changingInstr(MachineInstr &MI) override {
     LLVM_DEBUG(dbgs() << "Changing: " << MI << "\n");
     WorkList.insert(&MI);
   }
-  void changedInstr(const MachineInstr &MI) override {
+  void changedInstr(MachineInstr &MI) override {
     LLVM_DEBUG(dbgs() << "Changed: " << MI << "\n");
     WorkList.insert(&MI);
   }
@@ -79,13 +76,6 @@
     });
     LLVM_DEBUG(CreatedInstrs.clear());
   }
-
-  void MF_HandleInsertion(const MachineInstr &MI) override {
-    createdInstr(MI);
-  }
-  void MF_HandleRemoval(const MachineInstr &MI) override {
-    erasingInstr(MI);
-  }
 };
 }
 
@@ -94,15 +84,20 @@
   (void)this->TPC; // FIXME: Remove when used.
 }
 
-bool Combiner::combineMachineInstrs(MachineFunction &MF) {
+bool Combiner::combineMachineInstrs(MachineFunction &MF,
+                                    GISelCSEInfo *CSEInfo) {
   // If the ISel pipeline failed, do not bother running this pass.
   // FIXME: Should this be here or in individual combiner passes.
   if (MF.getProperties().hasProperty(
           MachineFunctionProperties::Property::FailedISel))
     return false;
 
+  Builder =
+      CSEInfo ? make_unique<CSEMIRBuilder>() : make_unique<MachineIRBuilder>();
   MRI = &MF.getRegInfo();
-  Builder.setMF(MF);
+  Builder->setMF(MF);
+  if (CSEInfo)
+    Builder->setCSEInfo(CSEInfo);
 
   LLVM_DEBUG(dbgs() << "Generic MI Combiner for: " << MF.getName() << '\n');
 
@@ -110,14 +105,19 @@
 
   bool MFChanged = false;
   bool Changed;
+  MachineIRBuilder &B = *Builder.get();
 
   do {
     // Collect all instructions. Do a post order traversal for basic blocks and
     // insert with list bottom up, so while we pop_back_val, we'll traverse top
     // down RPOT.
     Changed = false;
-    GISelWorkList<512> WorkList(&MF);
-    WorkListMaintainer Observer(MF, WorkList);
+    GISelWorkList<512> WorkList;
+    WorkListMaintainer Observer(WorkList);
+    GISelObserverWrapper WrapperObserver(&Observer);
+    if (CSEInfo)
+      WrapperObserver.addObserver(CSEInfo);
+    RAIIDelegateInstaller DelInstall(MF, &WrapperObserver);
     for (MachineBasicBlock *MBB : post_order(&MF)) {
       if (MBB->empty())
         continue;
@@ -137,7 +137,7 @@
     while (!WorkList.empty()) {
       MachineInstr *CurrInst = WorkList.pop_back_val();
       LLVM_DEBUG(dbgs() << "\nTry combining " << *CurrInst;);
-      Changed |= CInfo.combine(Observer, *CurrInst, Builder);
+      Changed |= CInfo.combine(WrapperObserver, *CurrInst, B);
       Observer.reportFullyCreatedInstrs();
     }
     MFChanged |= Changed;
diff --git a/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp b/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp
index 993a919..c693acb 100644
--- a/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp
+++ b/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp
@@ -29,3 +29,12 @@
     changedInstr(*ChangedMI);
 }
 
+RAIIDelegateInstaller::RAIIDelegateInstaller(MachineFunction &MF,
+                                             MachineFunction::Delegate *Del)
+    : MF(MF), Delegate(Del) {
+  // Register this as the delegate for handling insertions and deletions of
+  // instructions.
+  MF.setDelegate(Del);
+}
+
+RAIIDelegateInstaller::~RAIIDelegateInstaller() { MF.resetDelegate(Delegate); }
diff --git a/lib/CodeGen/GlobalISel/IRTranslator.cpp b/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 5098e15..95f6274 100644
--- a/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -19,6 +19,7 @@
 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
 #include "llvm/CodeGen/Analysis.h"
 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
+#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
 #include "llvm/CodeGen/LowLevelType.h"
 #include "llvm/CodeGen/MachineBasicBlock.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
@@ -75,11 +76,16 @@
 
 using namespace llvm;
 
+static cl::opt<bool>
+    EnableCSEInIRTranslator("enable-cse-in-irtranslator",
+                            cl::desc("Should enable CSE in irtranslator"),
+                            cl::Optional, cl::init(false));
 char IRTranslator::ID = 0;
 
 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
                 false, false)
 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
                 false, false)
 
@@ -105,20 +111,24 @@
 }
 
 #ifndef NDEBUG
+namespace {
 /// Verify that every instruction created has the same DILocation as the
 /// instruction being translated.
-class DILocationVerifier : MachineFunction::Delegate {
-  MachineFunction &MF;
+class DILocationVerifier : public GISelChangeObserver {
   const Instruction *CurrInst = nullptr;
 
 public:
-  DILocationVerifier(MachineFunction &MF) : MF(MF) { MF.setDelegate(this); }
-  ~DILocationVerifier() { MF.resetDelegate(this); }
+  DILocationVerifier() = default;
+  ~DILocationVerifier() = default;
 
   const Instruction *getCurrentInst() const { return CurrInst; }
   void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
 
-  void MF_HandleInsertion(const MachineInstr &MI) override {
+  void erasingInstr(MachineInstr &MI) override {}
+  void changingInstr(MachineInstr &MI) override {}
+  void changedInstr(MachineInstr &MI) override {}
+
+  void createdInstr(MachineInstr &MI) override {
     assert(getCurrentInst() && "Inserted instruction without a current MI");
 
     // Only print the check message if we're actually checking it.
@@ -129,14 +139,15 @@
     assert(CurrInst->getDebugLoc() == MI.getDebugLoc() &&
            "Line info was not transferred to all instructions");
   }
-  void MF_HandleRemoval(const MachineInstr &MI) override {}
 };
+} // namespace
 #endif // ifndef NDEBUG
 
 
 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
   AU.addRequired<StackProtector>();
   AU.addRequired<TargetPassConfig>();
+  AU.addRequired<GISelCSEAnalysisWrapperPass>();
   getSelectionDAGFallbackAnalysisUsage(AU);
   MachineFunctionPass::getAnalysisUsage(AU);
 }
@@ -354,8 +365,10 @@
   else if (Pred == CmpInst::FCMP_TRUE)
     MIRBuilder.buildCopy(
         Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
-  else
-    MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
+  else {
+    auto FCmp = MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
+    FCmp->copyIRFlags(*CI);
+  }
 
   return true;
 }
@@ -588,8 +601,15 @@
   ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
   ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
 
-  for (unsigned i = 0; i < ResRegs.size(); ++i)
-    MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]);
+  const SelectInst &SI = cast<SelectInst>(U);
+  const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition());
+  for (unsigned i = 0; i < ResRegs.size(); ++i) {
+    auto Select =
+        MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]);
+    if (Cmp && isa<FPMathOperator>(Cmp)) {
+      Select->copyIRFlags(*Cmp);
+    }
+  }
 
   return true;
 }
@@ -869,42 +889,56 @@
     return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
   case Intrinsic::smul_with_overflow:
     return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
-  case Intrinsic::pow:
-    MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
+  case Intrinsic::pow: {
+    auto Pow = MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
         .addDef(getOrCreateVReg(CI))
         .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
         .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
+    Pow->copyIRFlags(CI);
     return true;
-  case Intrinsic::exp:
-    MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
+  }
+  case Intrinsic::exp: {
+    auto Exp = MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
         .addDef(getOrCreateVReg(CI))
         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+    Exp->copyIRFlags(CI);
     return true;
-  case Intrinsic::exp2:
-    MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
+  }
+  case Intrinsic::exp2: {
+    auto Exp2 = MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
         .addDef(getOrCreateVReg(CI))
         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+    Exp2->copyIRFlags(CI);
     return true;
-  case Intrinsic::log:
-    MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
+  }
+  case Intrinsic::log: {
+    auto Log = MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
         .addDef(getOrCreateVReg(CI))
         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+    Log->copyIRFlags(CI);
     return true;
-  case Intrinsic::log2:
-    MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
+  }
+  case Intrinsic::log2: {
+    auto Log2 = MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
         .addDef(getOrCreateVReg(CI))
         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+    Log2->copyIRFlags(CI);
     return true;
-  case Intrinsic::log10:
-    MIRBuilder.buildInstr(TargetOpcode::G_FLOG10)
+  }
+  case Intrinsic::log10: {
+    auto Log10 = MIRBuilder.buildInstr(TargetOpcode::G_FLOG10)
         .addDef(getOrCreateVReg(CI))
         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+    Log10->copyIRFlags(CI);
     return true;
-  case Intrinsic::fabs:
-    MIRBuilder.buildInstr(TargetOpcode::G_FABS)
+  }
+  case Intrinsic::fabs: {
+    auto Fabs = MIRBuilder.buildInstr(TargetOpcode::G_FABS)
         .addDef(getOrCreateVReg(CI))
         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+    Fabs->copyIRFlags(CI);
     return true;
+  }
   case Intrinsic::trunc:
     MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC)
         .addDef(getOrCreateVReg(CI))
@@ -915,13 +949,15 @@
         .addDef(getOrCreateVReg(CI))
         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
     return true;
-  case Intrinsic::fma:
-    MIRBuilder.buildInstr(TargetOpcode::G_FMA)
+  case Intrinsic::fma: {
+    auto FMA = MIRBuilder.buildInstr(TargetOpcode::G_FMA)
         .addDef(getOrCreateVReg(CI))
         .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
         .addUse(getOrCreateVReg(*CI.getArgOperand(1)))
         .addUse(getOrCreateVReg(*CI.getArgOperand(2)));
+    FMA->copyIRFlags(CI);
     return true;
+  }
   case Intrinsic::fmuladd: {
     const TargetMachine &TM = MF->getTarget();
     const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
@@ -933,11 +969,14 @@
         TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
       // TODO: Revisit this to see if we should move this part of the
       // lowering to the combiner.
-      MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2});
+      auto FMA =  MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2});
+      FMA->copyIRFlags(CI);
     } else {
       LLT Ty = getLLTForType(*CI.getType(), *DL);
       auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1});
-      MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2});
+      FMul->copyIRFlags(CI);
+      auto FAdd =  MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2});
+      FAdd->copyIRFlags(CI);
     }
     return true;
   }
@@ -1012,6 +1051,11 @@
   }
   case Intrinsic::invariant_end:
     return true;
+  case Intrinsic::ceil:
+    MIRBuilder.buildInstr(TargetOpcode::G_FCEIL)
+        .addDef(getOrCreateVReg(CI))
+        .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+    return true;
   }
   return false;
 }
@@ -1518,12 +1562,14 @@
 
 void IRTranslator::finishPendingPhis() {
 #ifndef NDEBUG
-  DILocationVerifier Verifier(*MF);
+  DILocationVerifier Verifier;
+  GISelObserverWrapper WrapperObserver(&Verifier);
+  RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
 #endif // ifndef NDEBUG
   for (auto &Phi : PendingPHIs) {
     const PHINode *PI = Phi.first;
     ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
-    EntryBuilder.setDebugLoc(PI->getDebugLoc());
+    EntryBuilder->setDebugLoc(PI->getDebugLoc());
 #ifndef NDEBUG
     Verifier.setCurrentInst(PI);
 #endif // ifndef NDEBUG
@@ -1564,11 +1610,12 @@
 }
 
 bool IRTranslator::translate(const Instruction &Inst) {
-  CurBuilder.setDebugLoc(Inst.getDebugLoc());
-  EntryBuilder.setDebugLoc(Inst.getDebugLoc());
+  CurBuilder->setDebugLoc(Inst.getDebugLoc());
+  EntryBuilder->setDebugLoc(Inst.getDebugLoc());
   switch(Inst.getOpcode()) {
-#define HANDLE_INST(NUM, OPCODE, CLASS) \
-    case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder);
+#define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
+  case Instruction::OPCODE:                                                    \
+    return translate##OPCODE(Inst, *CurBuilder.get());
 #include "llvm/IR/Instruction.def"
   default:
     return false;
@@ -1577,11 +1624,11 @@
 
 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
   if (auto CI = dyn_cast<ConstantInt>(&C))
-    EntryBuilder.buildConstant(Reg, *CI);
+    EntryBuilder->buildConstant(Reg, *CI);
   else if (auto CF = dyn_cast<ConstantFP>(&C))
-    EntryBuilder.buildFConstant(Reg, *CF);
+    EntryBuilder->buildFConstant(Reg, *CF);
   else if (isa<UndefValue>(C))
-    EntryBuilder.buildUndef(Reg);
+    EntryBuilder->buildUndef(Reg);
   else if (isa<ConstantPointerNull>(C)) {
     // As we are trying to build a constant val of 0 into a pointer,
     // insert a cast to make them correct with respect to types.
@@ -1589,9 +1636,9 @@
     auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
     auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
     unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
-    EntryBuilder.buildCast(Reg, ZeroReg);
+    EntryBuilder->buildCast(Reg, ZeroReg);
   } else if (auto GV = dyn_cast<GlobalValue>(&C))
-    EntryBuilder.buildGlobalValue(Reg, GV);
+    EntryBuilder->buildGlobalValue(Reg, GV);
   else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
     if (!CAZ->getType()->isVectorTy())
       return false;
@@ -1603,7 +1650,7 @@
       Constant &Elt = *CAZ->getElementValue(i);
       Ops.push_back(getOrCreateVReg(Elt));
     }
-    EntryBuilder.buildBuildVector(Reg, Ops);
+    EntryBuilder->buildBuildVector(Reg, Ops);
   } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
     // Return the scalar if it is a <1 x Ty> vector.
     if (CV->getNumElements() == 1)
@@ -1613,11 +1660,12 @@
       Constant &Elt = *CV->getElementAsConstant(i);
       Ops.push_back(getOrCreateVReg(Elt));
     }
-    EntryBuilder.buildBuildVector(Reg, Ops);
+    EntryBuilder->buildBuildVector(Reg, Ops);
   } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
     switch(CE->getOpcode()) {
-#define HANDLE_INST(NUM, OPCODE, CLASS)                         \
-      case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder);
+#define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
+  case Instruction::OPCODE:                                                    \
+    return translate##OPCODE(*CE, *EntryBuilder.get());
 #include "llvm/IR/Instruction.def"
     default:
       return false;
@@ -1629,9 +1677,9 @@
     for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
       Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
     }
-    EntryBuilder.buildBuildVector(Reg, Ops);
+    EntryBuilder->buildBuildVector(Reg, Ops);
   } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
-    EntryBuilder.buildBlockAddress(Reg, BA);
+    EntryBuilder->buildBlockAddress(Reg, BA);
   } else
     return false;
 
@@ -1648,8 +1696,8 @@
   // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
   // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
   // destroying it twice (in ~IRTranslator() and ~LLVMContext())
-  EntryBuilder = MachineIRBuilder();
-  CurBuilder = MachineIRBuilder();
+  EntryBuilder.reset();
+  CurBuilder.reset();
 }
 
 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
@@ -1657,12 +1705,30 @@
   const Function &F = MF->getFunction();
   if (F.empty())
     return false;
+  GISelCSEAnalysisWrapper &Wrapper =
+      getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
+  // Set the CSEConfig and run the analysis.
+  GISelCSEInfo *CSEInfo = nullptr;
+  TPC = &getAnalysis<TargetPassConfig>();
+  bool IsO0 = TPC->getOptLevel() == CodeGenOpt::Level::None;
+  // Disable CSE for O0.
+  bool EnableCSE = !IsO0 && EnableCSEInIRTranslator;
+  if (EnableCSE) {
+    EntryBuilder = make_unique<CSEMIRBuilder>(CurMF);
+    std::unique_ptr<CSEConfig> Config = make_unique<CSEConfig>();
+    CSEInfo = &Wrapper.get(std::move(Config));
+    EntryBuilder->setCSEInfo(CSEInfo);
+    CurBuilder = make_unique<CSEMIRBuilder>(CurMF);
+    CurBuilder->setCSEInfo(CSEInfo);
+  } else {
+    EntryBuilder = make_unique<MachineIRBuilder>();
+    CurBuilder = make_unique<MachineIRBuilder>();
+  }
   CLI = MF->getSubtarget().getCallLowering();
-  CurBuilder.setMF(*MF);
-  EntryBuilder.setMF(*MF);
+  CurBuilder->setMF(*MF);
+  EntryBuilder->setMF(*MF);
   MRI = &MF->getRegInfo();
   DL = &F.getParent()->getDataLayout();
-  TPC = &getAnalysis<TargetPassConfig>();
   ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
 
   assert(PendingPHIs.empty() && "stale PHIs");
@@ -1681,7 +1747,7 @@
   // Setup a separate basic-block for the arguments and constants
   MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
   MF->push_back(EntryBB);
-  EntryBuilder.setMBB(*EntryBB);
+  EntryBuilder->setMBB(*EntryBB);
 
   // Create all blocks, in IR order, to preserve the layout.
   for (const BasicBlock &BB: F) {
@@ -1718,7 +1784,7 @@
     }
   }
 
-  if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) {
+  if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
                                F.getSubprogram(), &F.getEntryBlock());
     R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
@@ -1735,22 +1801,27 @@
       assert(VRegs.empty() && "VRegs already populated?");
       VRegs.push_back(VArg);
     } else {
-      unpackRegs(*ArgIt, VArg, EntryBuilder);
+      unpackRegs(*ArgIt, VArg, *EntryBuilder.get());
     }
     ArgIt++;
   }
 
   // Need to visit defs before uses when translating instructions.
+  GISelObserverWrapper WrapperObserver;
+  if (EnableCSE && CSEInfo)
+    WrapperObserver.addObserver(CSEInfo);
   {
     ReversePostOrderTraversal<const Function *> RPOT(&F);
 #ifndef NDEBUG
-    DILocationVerifier Verifier(*MF);
+    DILocationVerifier Verifier;
+    WrapperObserver.addObserver(&Verifier);
 #endif // ifndef NDEBUG
+    RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
     for (const BasicBlock *BB : RPOT) {
       MachineBasicBlock &MBB = getMBB(*BB);
       // Set the insertion point of all the following translations to
       // the end of this basic block.
-      CurBuilder.setMBB(MBB);
+      CurBuilder->setMBB(MBB);
 
       for (const Instruction &Inst : *BB) {
 #ifndef NDEBUG
@@ -1775,6 +1846,9 @@
         return false;
       }
     }
+#ifndef NDEBUG
+    WrapperObserver.removeObserver(&Verifier);
+#endif
   }
 
   finishPendingPhis();
diff --git a/lib/CodeGen/GlobalISel/Legalizer.cpp b/lib/CodeGen/GlobalISel/Legalizer.cpp
index df2dcac..84131e5 100644
--- a/lib/CodeGen/GlobalISel/Legalizer.cpp
+++ b/lib/CodeGen/GlobalISel/Legalizer.cpp
@@ -16,6 +16,8 @@
 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
 #include "llvm/ADT/PostOrderIterator.h"
 #include "llvm/ADT/SetVector.h"
+#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
+#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
 #include "llvm/CodeGen/GlobalISel/GISelWorkList.h"
 #include "llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h"
@@ -33,11 +35,17 @@
 
 using namespace llvm;
 
+static cl::opt<bool>
+    EnableCSEInLegalizer("enable-cse-in-legalizer",
+                         cl::desc("Should enable CSE in Legalizer"),
+                         cl::Optional, cl::init(false));
+
 char Legalizer::ID = 0;
 INITIALIZE_PASS_BEGIN(Legalizer, DEBUG_TYPE,
                       "Legalize the Machine IR a function's Machine IR", false,
                       false)
 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
 INITIALIZE_PASS_END(Legalizer, DEBUG_TYPE,
                     "Legalize the Machine IR a function's Machine IR", false,
                     false)
@@ -48,6 +56,8 @@
 
 void Legalizer::getAnalysisUsage(AnalysisUsage &AU) const {
   AU.addRequired<TargetPassConfig>();
+  AU.addRequired<GISelCSEAnalysisWrapperPass>();
+  AU.addPreserved<GISelCSEAnalysisWrapperPass>();
   getSelectionDAGFallbackAnalysisUsage(AU);
   MachineFunctionPass::getAnalysisUsage(AU);
 }
@@ -73,6 +83,7 @@
 using InstListTy = GISelWorkList<256>;
 using ArtifactListTy = GISelWorkList<128>;
 
+namespace {
 class LegalizerWorkListManager : public GISelChangeObserver {
   InstListTy &InstList;
   ArtifactListTy &ArtifactList;
@@ -81,7 +92,7 @@
   LegalizerWorkListManager(InstListTy &Insts, ArtifactListTy &Arts)
       : InstList(Insts), ArtifactList(Arts) {}
 
-  void createdInstr(const MachineInstr &MI) override {
+  void createdInstr(MachineInstr &MI) override {
     // Only legalize pre-isel generic instructions.
     // Legalization process could generate Target specific pseudo
     // instructions with generic types. Don't record them
@@ -94,23 +105,24 @@
     LLVM_DEBUG(dbgs() << ".. .. New MI: " << MI);
   }
 
-  void erasingInstr(const MachineInstr &MI) override {
+  void erasingInstr(MachineInstr &MI) override {
     LLVM_DEBUG(dbgs() << ".. .. Erasing: " << MI);
     InstList.remove(&MI);
     ArtifactList.remove(&MI);
   }
 
-  void changingInstr(const MachineInstr &MI) override {
+  void changingInstr(MachineInstr &MI) override {
     LLVM_DEBUG(dbgs() << ".. .. Changing MI: " << MI);
   }
 
-  void changedInstr(const MachineInstr &MI) override {
+  void changedInstr(MachineInstr &MI) override {
     // When insts change, we want to revisit them to legalize them again.
     // We'll consider them the same as created.
     LLVM_DEBUG(dbgs() << ".. .. Changed MI: " << MI);
     createdInstr(MI);
   }
 };
+} // namespace
 
 bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
   // If the ISel pipeline failed, do not bother running that pass.
@@ -120,14 +132,16 @@
   LLVM_DEBUG(dbgs() << "Legalize Machine IR for: " << MF.getName() << '\n');
   init(MF);
   const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
+  GISelCSEAnalysisWrapper &Wrapper =
+      getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
   MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr);
 
   const size_t NumBlocks = MF.size();
   MachineRegisterInfo &MRI = MF.getRegInfo();
 
   // Populate Insts
-  InstListTy InstList(&MF);
-  ArtifactListTy ArtifactList(&MF);
+  InstListTy InstList;
+  ArtifactListTy ArtifactList;
   ReversePostOrderTraversal<MachineFunction *> RPOT(&MF);
   // Perform legalization bottom up so we can DCE as we legalize.
   // Traverse BB in RPOT and within each basic block, add insts top down,
@@ -146,12 +160,34 @@
         InstList.insert(&MI);
     }
   }
+  std::unique_ptr<MachineIRBuilder> MIRBuilder;
+  GISelCSEInfo *CSEInfo = nullptr;
+  bool IsO0 = TPC.getOptLevel() == CodeGenOpt::Level::None;
+  // Disable CSE for O0.
+  bool EnableCSE = !IsO0 && EnableCSEInLegalizer;
+  if (EnableCSE) {
+    MIRBuilder = make_unique<CSEMIRBuilder>();
+    std::unique_ptr<CSEConfig> Config = make_unique<CSEConfig>();
+    CSEInfo = &Wrapper.get(std::move(Config));
+    MIRBuilder->setCSEInfo(CSEInfo);
+  } else
+    MIRBuilder = make_unique<MachineIRBuilder>();
+  // This observer keeps the worklist updated.
   LegalizerWorkListManager WorkListObserver(InstList, ArtifactList);
-  LegalizerHelper Helper(MF, WorkListObserver);
+  // We want both WorkListObserver as well as CSEInfo to observe all changes.
+  // Use the wrapper observer.
+  GISelObserverWrapper WrapperObserver(&WorkListObserver);
+  if (EnableCSE && CSEInfo)
+    WrapperObserver.addObserver(CSEInfo);
+  // Now install the observer as the delegate to MF.
+  // This will keep all the observers notified about new insertions/deletions.
+  RAIIDelegateInstaller DelInstall(MF, &WrapperObserver);
+  LegalizerHelper Helper(MF, WrapperObserver, *MIRBuilder.get());
   const LegalizerInfo &LInfo(Helper.getLegalizerInfo());
-  LegalizationArtifactCombiner ArtCombiner(Helper.MIRBuilder, MF.getRegInfo(), LInfo);
-  auto RemoveDeadInstFromLists = [&WorkListObserver](MachineInstr *DeadMI) {
-    WorkListObserver.erasingInstr(*DeadMI);
+  LegalizationArtifactCombiner ArtCombiner(*MIRBuilder.get(), MF.getRegInfo(),
+                                           LInfo);
+  auto RemoveDeadInstFromLists = [&WrapperObserver](MachineInstr *DeadMI) {
+    WrapperObserver.erasingInstr(*DeadMI);
   };
   bool Changed = false;
   do {
diff --git a/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 890ef52..b3fc94c 100644
--- a/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -31,16 +31,18 @@
 using namespace LegalizeActions;
 
 LegalizerHelper::LegalizerHelper(MachineFunction &MF,
-                                 GISelChangeObserver &Observer)
-    : MRI(MF.getRegInfo()), LI(*MF.getSubtarget().getLegalizerInfo()),
-      Observer(Observer) {
+                                 GISelChangeObserver &Observer,
+                                 MachineIRBuilder &Builder)
+    : MIRBuilder(Builder), MRI(MF.getRegInfo()),
+      LI(*MF.getSubtarget().getLegalizerInfo()), Observer(Observer) {
   MIRBuilder.setMF(MF);
   MIRBuilder.setChangeObserver(Observer);
 }
 
 LegalizerHelper::LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
-                                 GISelChangeObserver &Observer)
-    : MRI(MF.getRegInfo()), LI(LI), Observer(Observer) {
+                                 GISelChangeObserver &Observer,
+                                 MachineIRBuilder &B)
+    : MIRBuilder(B), MRI(MF.getRegInfo()), LI(LI), Observer(Observer) {
   MIRBuilder.setMF(MF);
   MIRBuilder.setChangeObserver(Observer);
 }
@@ -88,17 +90,17 @@
 static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
   switch (Opcode) {
   case TargetOpcode::G_SDIV:
-    assert(Size == 32 && "Unsupported size");
-    return RTLIB::SDIV_I32;
+    assert((Size == 32 || Size == 64) && "Unsupported size");
+    return Size == 64 ? RTLIB::SDIV_I64 : RTLIB::SDIV_I32;
   case TargetOpcode::G_UDIV:
-    assert(Size == 32 && "Unsupported size");
-    return RTLIB::UDIV_I32;
+    assert((Size == 32 || Size == 64) && "Unsupported size");
+    return Size == 64 ? RTLIB::UDIV_I64 : RTLIB::UDIV_I32;
   case TargetOpcode::G_SREM:
-    assert(Size == 32 && "Unsupported size");
-    return RTLIB::SREM_I32;
+    assert((Size == 32 || Size == 64) && "Unsupported size");
+    return Size == 64 ? RTLIB::SREM_I64 : RTLIB::SREM_I32;
   case TargetOpcode::G_UREM:
-    assert(Size == 32 && "Unsupported size");
-    return RTLIB::UREM_I32;
+    assert((Size == 32 || Size == 64) && "Unsupported size");
+    return Size == 64 ? RTLIB::UREM_I64 : RTLIB::UREM_I32;
   case TargetOpcode::G_CTLZ_ZERO_UNDEF:
     assert(Size == 32 && "Unsupported size");
     return RTLIB::CTLZ_I32;
@@ -200,7 +202,7 @@
   case TargetOpcode::G_SREM:
   case TargetOpcode::G_UREM:
   case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
-    Type *HLTy = Type::getInt32Ty(Ctx);
+    Type *HLTy = IntegerType::get(Ctx, Size);
     auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy);
     if (Status != Legalized)
       return Status;
@@ -578,7 +580,9 @@
     MI.eraseFromParent();
     return Legalized;
   }
-  case TargetOpcode::G_OR: {
+  case TargetOpcode::G_AND:
+  case TargetOpcode::G_OR:
+  case TargetOpcode::G_XOR: {
     // Legalize bitwise operation:
     // A = BinOp<Ty> B, C
     // into:
@@ -617,7 +621,8 @@
 
     // Do the operation on each small part.
     for (int i = 0; i < NumParts; ++i)
-      MIRBuilder.buildOr(DstRegs[i], SrcsReg1[i], SrcsReg2[i]);
+      MIRBuilder.buildInstr(MI.getOpcode(), {DstRegs[i]},
+                            {SrcsReg1[i], SrcsReg2[i]});
 
     // Gather the destination registers into the final destination.
     unsigned DstReg = MI.getOperand(0).getReg();
@@ -774,15 +779,18 @@
     return Legalized;
 
   case TargetOpcode::G_SELECT:
-    if (TypeIdx != 0)
-      return UnableToLegalize;
-    // Perform operation at larger width (any extension is fine here, high bits
-    // don't affect the result) and then truncate the result back to the
-    // original type.
     Observer.changingInstr(MI);
-    widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
-    widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT);
-    widenScalarDst(MI, WideTy);
+    if (TypeIdx == 0) {
+      // Perform operation at larger width (any extension is fine here, high
+      // bits don't affect the result) and then truncate the result back to the
+      // original type.
+      widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
+      widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT);
+      widenScalarDst(MI, WideTy);
+    } else {
+      // Explicit extension is required here since high bits affect the result.
+      widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
+    }
     Observer.changedInstr(MI);
     return Legalized;
 
@@ -878,6 +886,12 @@
     Observer.changedInstr(MI);
     return Legalized;
   }
+  case TargetOpcode::G_IMPLICIT_DEF: {
+    Observer.changingInstr(MI);
+    widenScalarDst(MI, WideTy);
+    Observer.changedInstr(MI);
+    return Legalized;
+  }
   case TargetOpcode::G_BRCOND:
     Observer.changingInstr(MI);
     widenScalarSrc(MI, WideTy, 0, TargetOpcode::G_ANYEXT);
@@ -940,6 +954,15 @@
     widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
     Observer.changedInstr(MI);
     return Legalized;
+
+  case TargetOpcode::G_FCEIL:
+    if (TypeIdx != 0)
+      return UnableToLegalize;
+    Observer.changingInstr(MI);
+    widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_FPEXT);
+    widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
+    Observer.changedInstr(MI);
+    return Legalized;
   }
 }
 
@@ -1119,6 +1142,24 @@
   case TargetOpcode::G_CTTZ:
   case TargetOpcode::G_CTPOP:
     return lowerBitCount(MI, TypeIdx, Ty);
+  case G_UADDE: {
+    unsigned Res = MI.getOperand(0).getReg();
+    unsigned CarryOut = MI.getOperand(1).getReg();
+    unsigned LHS = MI.getOperand(2).getReg();
+    unsigned RHS = MI.getOperand(3).getReg();
+    unsigned CarryIn = MI.getOperand(4).getReg();
+
+    unsigned TmpRes = MRI.createGenericVirtualRegister(Ty);
+    unsigned ZExtCarryIn = MRI.createGenericVirtualRegister(Ty);
+
+    MIRBuilder.buildAdd(TmpRes, LHS, RHS);
+    MIRBuilder.buildZExt(ZExtCarryIn, CarryIn);
+    MIRBuilder.buildAdd(Res, TmpRes, ZExtCarryIn);
+    MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, LHS);
+
+    MI.eraseFromParent();
+    return Legalized;
+  }
   }
 }
 
@@ -1133,6 +1174,32 @@
   switch (MI.getOpcode()) {
   default:
     return UnableToLegalize;
+  case TargetOpcode::G_IMPLICIT_DEF: {
+    SmallVector<unsigned, 2> DstRegs;
+
+    unsigned NarrowSize = NarrowTy.getSizeInBits();
+    unsigned DstReg = MI.getOperand(0).getReg();
+    unsigned Size = MRI.getType(DstReg).getSizeInBits();
+    int NumParts = Size / NarrowSize;
+    // FIXME: Don't know how to handle the situation where the small vectors
+    // aren't all the same size yet.
+    if (Size % NarrowSize != 0)
+      return UnableToLegalize;
+
+    for (int i = 0; i < NumParts; ++i) {
+      unsigned TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
+      MIRBuilder.buildUndef(TmpReg);
+      DstRegs.push_back(TmpReg);
+    }
+
+    if (NarrowTy.isVector())
+      MIRBuilder.buildConcatVectors(DstReg, DstRegs);
+    else
+      MIRBuilder.buildBuildVector(DstReg, DstRegs);
+
+    MI.eraseFromParent();
+    return Legalized;
+  }
   case TargetOpcode::G_ADD: {
     unsigned NarrowSize = NarrowTy.getSizeInBits();
     unsigned DstReg = MI.getOperand(0).getReg();
diff --git a/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
index 926af3f..fa36ede 100644
--- a/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
+++ b/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
@@ -585,7 +585,7 @@
     for (const MachineBasicBlock &MBB : MF)
       for (const MachineInstr &MI : MBB)
         if (isPreISelGenericOpcode(MI.getOpcode()) && !MLI->isLegal(MI, MRI))
-	  return &MI;
+          return &MI;
   }
   return nullptr;
 }
diff --git a/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index c1109e6..1f56110 100644
--- a/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -46,6 +46,8 @@
   State.II = MI.getIterator();
 }
 
+void MachineIRBuilder::setCSEInfo(GISelCSEInfo *Info) { State.CSEInfo = Info; }
+
 void MachineIRBuilder::setInsertPt(MachineBasicBlock &MBB,
                                    MachineBasicBlock::iterator II) {
   assert(MBB.getParent() == &getMF() &&
diff --git a/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/lib/CodeGen/GlobalISel/RegBankSelect.cpp
index 6bb48dc..dcc8b7c 100644
--- a/lib/CodeGen/GlobalISel/RegBankSelect.cpp
+++ b/lib/CodeGen/GlobalISel/RegBankSelect.cpp
@@ -115,8 +115,8 @@
   // By default we assume we will have to repair something.
   OnlyAssign = false;
   // Each part of a break down needs to end up in a different register.
-  // In other word, Reg assignement does not match.
-  if (ValMapping.NumBreakDowns > 1)
+  // In other word, Reg assignment does not match.
+  if (ValMapping.NumBreakDowns != 1)
     return false;
 
   const RegisterBank *CurRegBank = RBI->getRegBank(Reg, *MRI, *TRI);
@@ -528,7 +528,7 @@
 bool RegBankSelect::applyMapping(
     MachineInstr &MI, const RegisterBankInfo::InstructionMapping &InstrMapping,
     SmallVectorImpl<RegBankSelect::RepairingPlacement> &RepairPts) {
-  // OpdMapper will hold all the information needed for the rewritting.
+  // OpdMapper will hold all the information needed for the rewriting.
   RegisterBankInfo::OperandsMapper OpdMapper(MI, InstrMapping, *MRI);
 
   // First, place the repairing code.
@@ -714,18 +714,23 @@
     // - Terminators must be the last instructions:
     //   * Before, move the insert point before the first terminator.
     //   * After, we have to split the outcoming edges.
-    unsigned Reg = MO.getReg();
     if (Before) {
       // Check whether Reg is defined by any terminator.
-      MachineBasicBlock::iterator It = MI;
-      for (auto Begin = MI.getParent()->begin();
-           --It != Begin && It->isTerminator();)
-        if (It->modifiesRegister(Reg, &TRI)) {
-          // Insert the repairing code right after the definition.
-          addInsertPoint(*It, /*Before*/ false);
-          return;
-        }
-      addInsertPoint(*It, /*Before*/ true);
+      MachineBasicBlock::reverse_iterator It = MI;
+      auto REnd = MI.getParent()->rend();
+
+      for (; It != REnd && It->isTerminator(); ++It) {
+        assert(!It->modifiesRegister(MO.getReg(), &TRI) &&
+               "copy insertion in middle of terminators not handled");
+      }
+
+      if (It == REnd) {
+        addInsertPoint(*MI.getParent()->begin(), true);
+        return;
+      }
+
+      // We are sure to be right before the first terminator.
+      addInsertPoint(*It, /*Before*/ false);
       return;
     }
     // Make sure Reg is not redefined by other terminators, otherwise
@@ -733,7 +738,8 @@
     for (MachineBasicBlock::iterator It = MI, End = MI.getParent()->end();
          ++It != End;)
       // The machine verifier should reject this kind of code.
-      assert(It->modifiesRegister(Reg, &TRI) && "Do not know where to split");
+      assert(It->modifiesRegister(MO.getReg(), &TRI) &&
+             "Do not know where to split");
     // Split each outcoming edges.
     MachineBasicBlock &Src = *MI.getParent();
     for (auto &Succ : Src.successors())
diff --git a/lib/CodeGen/GlobalISel/Utils.cpp b/lib/CodeGen/GlobalISel/Utils.cpp
index 4d3a375..59cbf93 100644
--- a/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/lib/CodeGen/GlobalISel/Utils.cpp
@@ -235,6 +235,57 @@
   return APF;
 }
 
+Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
+                                        const unsigned Op2,
+                                        const MachineRegisterInfo &MRI) {
+  auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
+  auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
+  if (MaybeOp1Cst && MaybeOp2Cst) {
+    LLT Ty = MRI.getType(Op1);
+    APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
+    APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true);
+    switch (Opcode) {
+    default:
+      break;
+    case TargetOpcode::G_ADD:
+      return C1 + C2;
+    case TargetOpcode::G_AND:
+      return C1 & C2;
+    case TargetOpcode::G_ASHR:
+      return C1.ashr(C2);
+    case TargetOpcode::G_LSHR:
+      return C1.lshr(C2);
+    case TargetOpcode::G_MUL:
+      return C1 * C2;
+    case TargetOpcode::G_OR:
+      return C1 | C2;
+    case TargetOpcode::G_SHL:
+      return C1 << C2;
+    case TargetOpcode::G_SUB:
+      return C1 - C2;
+    case TargetOpcode::G_XOR:
+      return C1 ^ C2;
+    case TargetOpcode::G_UDIV:
+      if (!C2.getBoolValue())
+        break;
+      return C1.udiv(C2);
+    case TargetOpcode::G_SDIV:
+      if (!C2.getBoolValue())
+        break;
+      return C1.sdiv(C2);
+    case TargetOpcode::G_UREM:
+      if (!C2.getBoolValue())
+        break;
+      return C1.urem(C2);
+    case TargetOpcode::G_SREM:
+      if (!C2.getBoolValue())
+        break;
+      return C1.srem(C2);
+    }
+  }
+  return None;
+}
+
 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
   AU.addPreserved<StackProtector>();
 }
diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp
index 5666626..ceeba63 100644
--- a/lib/CodeGen/IfConversion.cpp
+++ b/lib/CodeGen/IfConversion.cpp
@@ -1444,7 +1444,7 @@
   Redefs.init(*TRI);
 
   if (MRI->tracksLiveness()) {
-    // Initialize liveins to the first BB. These are potentiall redefined by
+    // Initialize liveins to the first BB. These are potentially redefined by
     // predicated instructions.
     Redefs.addLiveIns(CvtMBB);
     Redefs.addLiveIns(NextMBB);
@@ -2148,7 +2148,7 @@
       // Calculate the edge probability for the edge from ToBBI.BB to Succ,
       // which is a portion of the edge probability from FromMBB to Succ. The
       // portion ratio is the edge probability from ToBBI.BB to FromMBB (if
-      // FromBBI is a successor of ToBBI.BB. See comment below for excepion).
+      // FromBBI is a successor of ToBBI.BB. See comment below for exception).
       NewProb = MBPI->getEdgeProbability(&FromMBB, Succ);
 
       // To2FromProb is 0 when FromMBB is not a successor of ToBBI.BB. This
diff --git a/lib/CodeGen/MIRParser/MILexer.cpp b/lib/CodeGen/MIRParser/MILexer.cpp
index 6d6d551..265877c 100644
--- a/lib/CodeGen/MIRParser/MILexer.cpp
+++ b/lib/CodeGen/MIRParser/MILexer.cpp
@@ -220,6 +220,7 @@
       .Case("undefined", MIToken::kw_cfi_undefined)
       .Case("register", MIToken::kw_cfi_register)
       .Case("window_save", MIToken::kw_cfi_window_save)
+      .Case("negate_ra_sign_state", MIToken::kw_cfi_aarch64_negate_ra_sign_state)
       .Case("blockaddress", MIToken::kw_blockaddress)
       .Case("intrinsic", MIToken::kw_intrinsic)
       .Case("target-index", MIToken::kw_target_index)
diff --git a/lib/CodeGen/MIRParser/MILexer.h b/lib/CodeGen/MIRParser/MILexer.h
index a52f620..ceff7908 100644
--- a/lib/CodeGen/MIRParser/MILexer.h
+++ b/lib/CodeGen/MIRParser/MILexer.h
@@ -89,6 +89,7 @@
     kw_cfi_restore_state,
     kw_cfi_undefined,
     kw_cfi_window_save,
+    kw_cfi_aarch64_negate_ra_sign_state,
     kw_blockaddress,
     kw_intrinsic,
     kw_target_index,
diff --git a/lib/CodeGen/MIRParser/MIParser.cpp b/lib/CodeGen/MIRParser/MIParser.cpp
index 080b945..6f2d8bb 100644
--- a/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/lib/CodeGen/MIRParser/MIParser.cpp
@@ -1931,6 +1931,9 @@
   case MIToken::kw_cfi_window_save:
     CFIIndex = MF.addFrameInst(MCCFIInstruction::createWindowSave(nullptr));
     break;
+  case MIToken::kw_cfi_aarch64_negate_ra_sign_state:
+    CFIIndex = MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr));
+    break;
   case MIToken::kw_cfi_escape: {
     std::string Values;
     if (parseCFIEscapeValues(Values))
@@ -2223,6 +2226,7 @@
   case MIToken::kw_cfi_restore_state:
   case MIToken::kw_cfi_undefined:
   case MIToken::kw_cfi_window_save:
+  case MIToken::kw_cfi_aarch64_negate_ra_sign_state:
     return parseCFIOperand(Dest);
   case MIToken::kw_blockaddress:
     return parseBlockAddressOperand(Dest);
diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp
index 624d336..4fee9c4 100644
--- a/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/lib/CodeGen/MachineBlockPlacement.cpp
@@ -316,7 +316,7 @@
   /// A type for a block filter set.
   using BlockFilterSet = SmallSetVector<const MachineBasicBlock *, 16>;
 
-  /// Pair struct containing basic block and taildup profitiability
+  /// Pair struct containing basic block and taildup profitability
   struct BlockAndTailDupResult {
     MachineBasicBlock *BB;
     bool ShouldTailDup;
diff --git a/lib/CodeGen/MachineCombiner.cpp b/lib/CodeGen/MachineCombiner.cpp
index 0c6efff..f51b482 100644
--- a/lib/CodeGen/MachineCombiner.cpp
+++ b/lib/CodeGen/MachineCombiner.cpp
@@ -231,6 +231,8 @@
     // Get the first instruction that uses MO
     MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(MO.getReg());
     RI++;
+    if (RI == MRI->reg_end())
+      continue;
     MachineInstr *UseMO = RI->getParent();
     unsigned LatencyOp = 0;
     if (UseMO && BlockTrace.isDepInTrace(*Root, *UseMO)) {
diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp
index 3ded00b..3495319 100644
--- a/lib/CodeGen/MachineFunction.cpp
+++ b/lib/CodeGen/MachineFunction.cpp
@@ -139,12 +139,12 @@
   init();
 }
 
-void MachineFunction::handleInsertion(const MachineInstr &MI) {
+void MachineFunction::handleInsertion(MachineInstr &MI) {
   if (TheDelegate)
     TheDelegate->MF_HandleInsertion(MI);
 }
 
-void MachineFunction::handleRemoval(const MachineInstr &MI) {
+void MachineFunction::handleRemoval(MachineInstr &MI) {
   if (TheDelegate)
     TheDelegate->MF_HandleRemoval(MI);
 }
diff --git a/lib/CodeGen/MachineOperand.cpp b/lib/CodeGen/MachineOperand.cpp
index 4fe51f6..05e51e1 100644
--- a/lib/CodeGen/MachineOperand.cpp
+++ b/lib/CodeGen/MachineOperand.cpp
@@ -697,6 +697,11 @@
     if (MCSymbol *Label = CFI.getLabel())
       MachineOperand::printSymbol(OS, *Label);
     break;
+  case MCCFIInstruction::OpNegateRAState:
+    OS << "negate_ra_sign_state ";
+    if (MCSymbol *Label = CFI.getLabel())
+      MachineOperand::printSymbol(OS, *Label);
+    break;
   default:
     // TODO: Print the other CFI Operations.
     OS << "<unserializable cfi directive>";
diff --git a/lib/CodeGen/MachinePipeliner.cpp b/lib/CodeGen/MachinePipeliner.cpp
index de8c555..4d451bd 100644
--- a/lib/CodeGen/MachinePipeliner.cpp
+++ b/lib/CodeGen/MachinePipeliner.cpp
@@ -9,34 +9,6 @@
 //
 // An implementation of the Swing Modulo Scheduling (SMS) software pipeliner.
 //
-// Software pipelining (SWP) is an instruction scheduling technique for loops
-// that overlap loop iterations and exploits ILP via a compiler transformation.
-//
-// Swing Modulo Scheduling is an implementation of software pipelining
-// that generates schedules that are near optimal in terms of initiation
-// interval, register requirements, and stage count. See the papers:
-//
-// "Swing Modulo Scheduling: A Lifetime-Sensitive Approach", by J. Llosa,
-// A. Gonzalez, E. Ayguade, and M. Valero. In PACT '96 Proceedings of the 1996
-// Conference on Parallel Architectures and Compilation Techiniques.
-//
-// "Lifetime-Sensitive Modulo Scheduling in a Production Environment", by J.
-// Llosa, E. Ayguade, A. Gonzalez, M. Valero, and J. Eckhardt. In IEEE
-// Transactions on Computers, Vol. 50, No. 3, 2001.
-//
-// "An Implementation of Swing Modulo Scheduling With Extensions for
-// Superblocks", by T. Lattner, Master's Thesis, University of Illinois at
-// Urbana-Chambpain, 2005.
-//
-//
-// The SMS algorithm consists of three main steps after computing the minimal
-// initiation interval (MII).
-// 1) Analyze the dependence graph and compute information about each
-//    instruction in the graph.
-// 2) Order the nodes (instructions) by priority based upon the heuristics
-//    described in the algorithm.
-// 3) Attempt to schedule the nodes in the specified order using the MII.
-//
 // This SMS implementation is a target-independent back-end pass. When enabled,
 // the pass runs just prior to the register allocation pass, while the machine
 // IR is in SSA form. If software pipelining is successful, then the original
@@ -83,13 +55,11 @@
 #include "llvm/CodeGen/MachineLoopInfo.h"
 #include "llvm/CodeGen/MachineMemOperand.h"
 #include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachinePipeliner.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/RegisterClassInfo.h"
 #include "llvm/CodeGen/RegisterPressure.h"
 #include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/CodeGen/ScheduleDAGInstrs.h"
 #include "llvm/CodeGen/ScheduleDAGMutation.h"
-#include "llvm/CodeGen/TargetInstrInfo.h"
 #include "llvm/CodeGen/TargetOpcodes.h"
 #include "llvm/CodeGen/TargetRegisterInfo.h"
 #include "llvm/CodeGen/TargetSubtargetInfo.h"
@@ -171,575 +141,15 @@
                                      cl::ReallyHidden, cl::init(false),
                                      cl::ZeroOrMore, cl::desc("Ignore RecMII"));
 
+namespace llvm {
+
 // A command line option to enable the CopyToPhi DAG mutation.
-static cl::opt<bool>
+cl::opt<bool>
     SwpEnableCopyToPhi("pipeliner-enable-copytophi", cl::ReallyHidden,
                        cl::init(true), cl::ZeroOrMore,
                        cl::desc("Enable CopyToPhi DAG Mutation"));
 
-namespace {
-
-class NodeSet;
-class SMSchedule;
-
-/// The main class in the implementation of the target independent
-/// software pipeliner pass.
-class MachinePipeliner : public MachineFunctionPass {
-public:
-  MachineFunction *MF = nullptr;
-  const MachineLoopInfo *MLI = nullptr;
-  const MachineDominatorTree *MDT = nullptr;
-  const InstrItineraryData *InstrItins;
-  const TargetInstrInfo *TII = nullptr;
-  RegisterClassInfo RegClassInfo;
-
-#ifndef NDEBUG
-  static int NumTries;
-#endif
-
-  /// Cache the target analysis information about the loop.
-  struct LoopInfo {
-    MachineBasicBlock *TBB = nullptr;
-    MachineBasicBlock *FBB = nullptr;
-    SmallVector<MachineOperand, 4> BrCond;
-    MachineInstr *LoopInductionVar = nullptr;
-    MachineInstr *LoopCompare = nullptr;
-  };
-  LoopInfo LI;
-
-  static char ID;
-
-  MachinePipeliner() : MachineFunctionPass(ID) {
-    initializeMachinePipelinerPass(*PassRegistry::getPassRegistry());
-  }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.addRequired<AAResultsWrapperPass>();
-    AU.addPreserved<AAResultsWrapperPass>();
-    AU.addRequired<MachineLoopInfo>();
-    AU.addRequired<MachineDominatorTree>();
-    AU.addRequired<LiveIntervals>();
-    MachineFunctionPass::getAnalysisUsage(AU);
-  }
-
-private:
-  void preprocessPhiNodes(MachineBasicBlock &B);
-  bool canPipelineLoop(MachineLoop &L);
-  bool scheduleLoop(MachineLoop &L);
-  bool swingModuloScheduler(MachineLoop &L);
-};
-
-/// This class builds the dependence graph for the instructions in a loop,
-/// and attempts to schedule the instructions using the SMS algorithm.
-class SwingSchedulerDAG : public ScheduleDAGInstrs {
-  MachinePipeliner &Pass;
-  /// The minimum initiation interval between iterations for this schedule.
-  unsigned MII = 0;
-  /// Set to true if a valid pipelined schedule is found for the loop.
-  bool Scheduled = false;
-  MachineLoop &Loop;
-  LiveIntervals &LIS;
-  const RegisterClassInfo &RegClassInfo;
-
-  /// A toplogical ordering of the SUnits, which is needed for changing
-  /// dependences and iterating over the SUnits.
-  ScheduleDAGTopologicalSort Topo;
-
-  struct NodeInfo {
-    int ASAP = 0;
-    int ALAP = 0;
-    int ZeroLatencyDepth = 0;
-    int ZeroLatencyHeight = 0;
-
-    NodeInfo() = default;
-  };
-  /// Computed properties for each node in the graph.
-  std::vector<NodeInfo> ScheduleInfo;
-
-  enum OrderKind { BottomUp = 0, TopDown = 1 };
-  /// Computed node ordering for scheduling.
-  SetVector<SUnit *> NodeOrder;
-
-  using NodeSetType = SmallVector<NodeSet, 8>;
-  using ValueMapTy = DenseMap<unsigned, unsigned>;
-  using MBBVectorTy = SmallVectorImpl<MachineBasicBlock *>;
-  using InstrMapTy = DenseMap<MachineInstr *, MachineInstr *>;
-
-  /// Instructions to change when emitting the final schedule.
-  DenseMap<SUnit *, std::pair<unsigned, int64_t>> InstrChanges;
-
-  /// We may create a new instruction, so remember it because it
-  /// must be deleted when the pass is finished.
-  SmallPtrSet<MachineInstr *, 4> NewMIs;
-
-  /// Ordered list of DAG postprocessing steps.
-  std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
-
-  /// Helper class to implement Johnson's circuit finding algorithm.
-  class Circuits {
-    std::vector<SUnit> &SUnits;
-    SetVector<SUnit *> Stack;
-    BitVector Blocked;
-    SmallVector<SmallPtrSet<SUnit *, 4>, 10> B;
-    SmallVector<SmallVector<int, 4>, 16> AdjK;
-    // Node to Index from ScheduleDAGTopologicalSort
-    std::vector<int> *Node2Idx;
-    unsigned NumPaths;
-    static unsigned MaxPaths;
-
-  public:
-    Circuits(std::vector<SUnit> &SUs, ScheduleDAGTopologicalSort &Topo)
-        : SUnits(SUs), Blocked(SUs.size()), B(SUs.size()), AdjK(SUs.size()) {
-      Node2Idx = new std::vector<int>(SUs.size());
-      unsigned Idx = 0;
-      for (const auto &NodeNum : Topo)
-        Node2Idx->at(NodeNum) = Idx++;
-    }
-
-    ~Circuits() { delete Node2Idx; }
-
-    /// Reset the data structures used in the circuit algorithm.
-    void reset() {
-      Stack.clear();
-      Blocked.reset();
-      B.assign(SUnits.size(), SmallPtrSet<SUnit *, 4>());
-      NumPaths = 0;
-    }
-
-    void createAdjacencyStructure(SwingSchedulerDAG *DAG);
-    bool circuit(int V, int S, NodeSetType &NodeSets, bool HasBackedge = false);
-    void unblock(int U);
-  };
-
-  struct CopyToPhiMutation : public ScheduleDAGMutation {
-    void apply(ScheduleDAGInstrs *DAG) override;
-  };
-
-public:
-  SwingSchedulerDAG(MachinePipeliner &P, MachineLoop &L, LiveIntervals &lis,
-                    const RegisterClassInfo &rci)
-      : ScheduleDAGInstrs(*P.MF, P.MLI, false), Pass(P), Loop(L), LIS(lis),
-        RegClassInfo(rci), Topo(SUnits, &ExitSU) {
-    P.MF->getSubtarget().getSMSMutations(Mutations);
-    if (SwpEnableCopyToPhi)
-      Mutations.push_back(llvm::make_unique<CopyToPhiMutation>());
-  }
-
-  void schedule() override;
-  void finishBlock() override;
-
-  /// Return true if the loop kernel has been scheduled.
-  bool hasNewSchedule() { return Scheduled; }
-
-  /// Return the earliest time an instruction may be scheduled.
-  int getASAP(SUnit *Node) { return ScheduleInfo[Node->NodeNum].ASAP; }
-
-  /// Return the latest time an instruction my be scheduled.
-  int getALAP(SUnit *Node) { return ScheduleInfo[Node->NodeNum].ALAP; }
-
-  /// The mobility function, which the number of slots in which
-  /// an instruction may be scheduled.
-  int getMOV(SUnit *Node) { return getALAP(Node) - getASAP(Node); }
-
-  /// The depth, in the dependence graph, for a node.
-  unsigned getDepth(SUnit *Node) { return Node->getDepth(); }
-
-  /// The maximum unweighted length of a path from an arbitrary node to the
-  /// given node in which each edge has latency 0
-  int getZeroLatencyDepth(SUnit *Node) {
-    return ScheduleInfo[Node->NodeNum].ZeroLatencyDepth;
-  }
-
-  /// The height, in the dependence graph, for a node.
-  unsigned getHeight(SUnit *Node) { return Node->getHeight(); }
-
-  /// The maximum unweighted length of a path from the given node to an
-  /// arbitrary node in which each edge has latency 0
-  int getZeroLatencyHeight(SUnit *Node) {
-    return ScheduleInfo[Node->NodeNum].ZeroLatencyHeight;
-  }
-
-  /// Return true if the dependence is a back-edge in the data dependence graph.
-  /// Since the DAG doesn't contain cycles, we represent a cycle in the graph
-  /// using an anti dependence from a Phi to an instruction.
-  bool isBackedge(SUnit *Source, const SDep &Dep) {
-    if (Dep.getKind() != SDep::Anti)
-      return false;
-    return Source->getInstr()->isPHI() || Dep.getSUnit()->getInstr()->isPHI();
-  }
-
-  bool isLoopCarriedDep(SUnit *Source, const SDep &Dep, bool isSucc = true);
-
-  /// The distance function, which indicates that operation V of iteration I
-  /// depends on operations U of iteration I-distance.
-  unsigned getDistance(SUnit *U, SUnit *V, const SDep &Dep) {
-    // Instructions that feed a Phi have a distance of 1. Computing larger
-    // values for arrays requires data dependence information.
-    if (V->getInstr()->isPHI() && Dep.getKind() == SDep::Anti)
-      return 1;
-    return 0;
-  }
-
-  /// Set the Minimum Initiation Interval for this schedule attempt.
-  void setMII(unsigned mii) { MII = mii; }
-
-  void applyInstrChange(MachineInstr *MI, SMSchedule &Schedule);
-
-  void fixupRegisterOverlaps(std::deque<SUnit *> &Instrs);
-
-  /// Return the new base register that was stored away for the changed
-  /// instruction.
-  unsigned getInstrBaseReg(SUnit *SU) {
-    DenseMap<SUnit *, std::pair<unsigned, int64_t>>::iterator It =
-        InstrChanges.find(SU);
-    if (It != InstrChanges.end())
-      return It->second.first;
-    return 0;
-  }
-
-  void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
-    Mutations.push_back(std::move(Mutation));
-  }
-
-  static bool classof(const ScheduleDAGInstrs *DAG) { return true; }
-
-private:
-  void addLoopCarriedDependences(AliasAnalysis *AA);
-  void updatePhiDependences();
-  void changeDependences();
-  unsigned calculateResMII();
-  unsigned calculateRecMII(NodeSetType &RecNodeSets);
-  void findCircuits(NodeSetType &NodeSets);
-  void fuseRecs(NodeSetType &NodeSets);
-  void removeDuplicateNodes(NodeSetType &NodeSets);
-  void computeNodeFunctions(NodeSetType &NodeSets);
-  void registerPressureFilter(NodeSetType &NodeSets);
-  void colocateNodeSets(NodeSetType &NodeSets);
-  void checkNodeSets(NodeSetType &NodeSets);
-  void groupRemainingNodes(NodeSetType &NodeSets);
-  void addConnectedNodes(SUnit *SU, NodeSet &NewSet,
-                         SetVector<SUnit *> &NodesAdded);
-  void computeNodeOrder(NodeSetType &NodeSets);
-  void checkValidNodeOrder(const NodeSetType &Circuits) const;
-  bool schedulePipeline(SMSchedule &Schedule);
-  void generatePipelinedLoop(SMSchedule &Schedule);
-  void generateProlog(SMSchedule &Schedule, unsigned LastStage,
-                      MachineBasicBlock *KernelBB, ValueMapTy *VRMap,
-                      MBBVectorTy &PrologBBs);
-  void generateEpilog(SMSchedule &Schedule, unsigned LastStage,
-                      MachineBasicBlock *KernelBB, ValueMapTy *VRMap,
-                      MBBVectorTy &EpilogBBs, MBBVectorTy &PrologBBs);
-  void generateExistingPhis(MachineBasicBlock *NewBB, MachineBasicBlock *BB1,
-                            MachineBasicBlock *BB2, MachineBasicBlock *KernelBB,
-                            SMSchedule &Schedule, ValueMapTy *VRMap,
-                            InstrMapTy &InstrMap, unsigned LastStageNum,
-                            unsigned CurStageNum, bool IsLast);
-  void generatePhis(MachineBasicBlock *NewBB, MachineBasicBlock *BB1,
-                    MachineBasicBlock *BB2, MachineBasicBlock *KernelBB,
-                    SMSchedule &Schedule, ValueMapTy *VRMap,
-                    InstrMapTy &InstrMap, unsigned LastStageNum,
-                    unsigned CurStageNum, bool IsLast);
-  void removeDeadInstructions(MachineBasicBlock *KernelBB,
-                              MBBVectorTy &EpilogBBs);
-  void splitLifetimes(MachineBasicBlock *KernelBB, MBBVectorTy &EpilogBBs,
-                      SMSchedule &Schedule);
-  void addBranches(MBBVectorTy &PrologBBs, MachineBasicBlock *KernelBB,
-                   MBBVectorTy &EpilogBBs, SMSchedule &Schedule,
-                   ValueMapTy *VRMap);
-  bool computeDelta(MachineInstr &MI, unsigned &Delta);
-  void updateMemOperands(MachineInstr &NewMI, MachineInstr &OldMI,
-                         unsigned Num);
-  MachineInstr *cloneInstr(MachineInstr *OldMI, unsigned CurStageNum,
-                           unsigned InstStageNum);
-  MachineInstr *cloneAndChangeInstr(MachineInstr *OldMI, unsigned CurStageNum,
-                                    unsigned InstStageNum,
-                                    SMSchedule &Schedule);
-  void updateInstruction(MachineInstr *NewMI, bool LastDef,
-                         unsigned CurStageNum, unsigned InstrStageNum,
-                         SMSchedule &Schedule, ValueMapTy *VRMap);
-  MachineInstr *findDefInLoop(unsigned Reg);
-  unsigned getPrevMapVal(unsigned StageNum, unsigned PhiStage, unsigned LoopVal,
-                         unsigned LoopStage, ValueMapTy *VRMap,
-                         MachineBasicBlock *BB);
-  void rewritePhiValues(MachineBasicBlock *NewBB, unsigned StageNum,
-                        SMSchedule &Schedule, ValueMapTy *VRMap,
-                        InstrMapTy &InstrMap);
-  void rewriteScheduledInstr(MachineBasicBlock *BB, SMSchedule &Schedule,
-                             InstrMapTy &InstrMap, unsigned CurStageNum,
-                             unsigned PhiNum, MachineInstr *Phi,
-                             unsigned OldReg, unsigned NewReg,
-                             unsigned PrevReg = 0);
-  bool canUseLastOffsetValue(MachineInstr *MI, unsigned &BasePos,
-                             unsigned &OffsetPos, unsigned &NewBase,
-                             int64_t &NewOffset);
-  void postprocessDAG();
-};
-
-/// A NodeSet contains a set of SUnit DAG nodes with additional information
-/// that assigns a priority to the set.
-class NodeSet {
-  SetVector<SUnit *> Nodes;
-  bool HasRecurrence = false;
-  unsigned RecMII = 0;
-  int MaxMOV = 0;
-  unsigned MaxDepth = 0;
-  unsigned Colocate = 0;
-  SUnit *ExceedPressure = nullptr;
-  unsigned Latency = 0;
-
-public:
-  using iterator = SetVector<SUnit *>::const_iterator;
-
-  NodeSet() = default;
-  NodeSet(iterator S, iterator E) : Nodes(S, E), HasRecurrence(true) {
-    Latency = 0;
-    for (unsigned i = 0, e = Nodes.size(); i < e; ++i)
-      for (const SDep &Succ : Nodes[i]->Succs)
-        if (Nodes.count(Succ.getSUnit()))
-          Latency += Succ.getLatency();
-  }
-
-  bool insert(SUnit *SU) { return Nodes.insert(SU); }
-
-  void insert(iterator S, iterator E) { Nodes.insert(S, E); }
-
-  template <typename UnaryPredicate> bool remove_if(UnaryPredicate P) {
-    return Nodes.remove_if(P);
-  }
-
-  unsigned count(SUnit *SU) const { return Nodes.count(SU); }
-
-  bool hasRecurrence() { return HasRecurrence; };
-
-  unsigned size() const { return Nodes.size(); }
-
-  bool empty() const { return Nodes.empty(); }
-
-  SUnit *getNode(unsigned i) const { return Nodes[i]; };
-
-  void setRecMII(unsigned mii) { RecMII = mii; };
-
-  void setColocate(unsigned c) { Colocate = c; };
-
-  void setExceedPressure(SUnit *SU) { ExceedPressure = SU; }
-
-  bool isExceedSU(SUnit *SU) { return ExceedPressure == SU; }
-
-  int compareRecMII(NodeSet &RHS) { return RecMII - RHS.RecMII; }
-
-  int getRecMII() { return RecMII; }
-
-  /// Summarize node functions for the entire node set.
-  void computeNodeSetInfo(SwingSchedulerDAG *SSD) {
-    for (SUnit *SU : *this) {
-      MaxMOV = std::max(MaxMOV, SSD->getMOV(SU));
-      MaxDepth = std::max(MaxDepth, SSD->getDepth(SU));
-    }
-  }
-
-  unsigned getLatency() { return Latency; }
-
-  unsigned getMaxDepth() { return MaxDepth; }
-
-  void clear() {
-    Nodes.clear();
-    RecMII = 0;
-    HasRecurrence = false;
-    MaxMOV = 0;
-    MaxDepth = 0;
-    Colocate = 0;
-    ExceedPressure = nullptr;
-  }
-
-  operator SetVector<SUnit *> &() { return Nodes; }
-
-  /// Sort the node sets by importance. First, rank them by recurrence MII,
-  /// then by mobility (least mobile done first), and finally by depth.
-  /// Each node set may contain a colocate value which is used as the first
-  /// tie breaker, if it's set.
-  bool operator>(const NodeSet &RHS) const {
-    if (RecMII == RHS.RecMII) {
-      if (Colocate != 0 && RHS.Colocate != 0 && Colocate != RHS.Colocate)
-        return Colocate < RHS.Colocate;
-      if (MaxMOV == RHS.MaxMOV)
-        return MaxDepth > RHS.MaxDepth;
-      return MaxMOV < RHS.MaxMOV;
-    }
-    return RecMII > RHS.RecMII;
-  }
-
-  bool operator==(const NodeSet &RHS) const {
-    return RecMII == RHS.RecMII && MaxMOV == RHS.MaxMOV &&
-           MaxDepth == RHS.MaxDepth;
-  }
-
-  bool operator!=(const NodeSet &RHS) const { return !operator==(RHS); }
-
-  iterator begin() { return Nodes.begin(); }
-  iterator end() { return Nodes.end(); }
-
-  void print(raw_ostream &os) const {
-    os << "Num nodes " << size() << " rec " << RecMII << " mov " << MaxMOV
-       << " depth " << MaxDepth << " col " << Colocate << "\n";
-    for (const auto &I : Nodes)
-      os << "   SU(" << I->NodeNum << ") " << *(I->getInstr());
-    os << "\n";
-  }
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-  LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
-#endif
-};
-
-/// This class represents the scheduled code.  The main data structure is a
-/// map from scheduled cycle to instructions.  During scheduling, the
-/// data structure explicitly represents all stages/iterations.   When
-/// the algorithm finshes, the schedule is collapsed into a single stage,
-/// which represents instructions from different loop iterations.
-///
-/// The SMS algorithm allows negative values for cycles, so the first cycle
-/// in the schedule is the smallest cycle value.
-class SMSchedule {
-private:
-  /// Map from execution cycle to instructions.
-  DenseMap<int, std::deque<SUnit *>> ScheduledInstrs;
-
-  /// Map from instruction to execution cycle.
-  std::map<SUnit *, int> InstrToCycle;
-
-  /// Map for each register and the max difference between its uses and def.
-  /// The first element in the pair is the max difference in stages. The
-  /// second is true if the register defines a Phi value and loop value is
-  /// scheduled before the Phi.
-  std::map<unsigned, std::pair<unsigned, bool>> RegToStageDiff;
-
-  /// Keep track of the first cycle value in the schedule.  It starts
-  /// as zero, but the algorithm allows negative values.
-  int FirstCycle = 0;
-
-  /// Keep track of the last cycle value in the schedule.
-  int LastCycle = 0;
-
-  /// The initiation interval (II) for the schedule.
-  int InitiationInterval = 0;
-
-  /// Target machine information.
-  const TargetSubtargetInfo &ST;
-
-  /// Virtual register information.
-  MachineRegisterInfo &MRI;
-
-  std::unique_ptr<DFAPacketizer> Resources;
-
-public:
-  SMSchedule(MachineFunction *mf)
-      : ST(mf->getSubtarget()), MRI(mf->getRegInfo()),
-        Resources(ST.getInstrInfo()->CreateTargetScheduleState(ST)) {}
-
-  void reset() {
-    ScheduledInstrs.clear();
-    InstrToCycle.clear();
-    RegToStageDiff.clear();
-    FirstCycle = 0;
-    LastCycle = 0;
-    InitiationInterval = 0;
-  }
-
-  /// Set the initiation interval for this schedule.
-  void setInitiationInterval(int ii) { InitiationInterval = ii; }
-
-  /// Return the first cycle in the completed schedule.  This
-  /// can be a negative value.
-  int getFirstCycle() const { return FirstCycle; }
-
-  /// Return the last cycle in the finalized schedule.
-  int getFinalCycle() const { return FirstCycle + InitiationInterval - 1; }
-
-  /// Return the cycle of the earliest scheduled instruction in the dependence
-  /// chain.
-  int earliestCycleInChain(const SDep &Dep);
-
-  /// Return the cycle of the latest scheduled instruction in the dependence
-  /// chain.
-  int latestCycleInChain(const SDep &Dep);
-
-  void computeStart(SUnit *SU, int *MaxEarlyStart, int *MinLateStart,
-                    int *MinEnd, int *MaxStart, int II, SwingSchedulerDAG *DAG);
-  bool insert(SUnit *SU, int StartCycle, int EndCycle, int II);
-
-  /// Iterators for the cycle to instruction map.
-  using sched_iterator = DenseMap<int, std::deque<SUnit *>>::iterator;
-  using const_sched_iterator =
-      DenseMap<int, std::deque<SUnit *>>::const_iterator;
-
-  /// Return true if the instruction is scheduled at the specified stage.
-  bool isScheduledAtStage(SUnit *SU, unsigned StageNum) {
-    return (stageScheduled(SU) == (int)StageNum);
-  }
-
-  /// Return the stage for a scheduled instruction.  Return -1 if
-  /// the instruction has not been scheduled.
-  int stageScheduled(SUnit *SU) const {
-    std::map<SUnit *, int>::const_iterator it = InstrToCycle.find(SU);
-    if (it == InstrToCycle.end())
-      return -1;
-    return (it->second - FirstCycle) / InitiationInterval;
-  }
-
-  /// Return the cycle for a scheduled instruction. This function normalizes
-  /// the first cycle to be 0.
-  unsigned cycleScheduled(SUnit *SU) const {
-    std::map<SUnit *, int>::const_iterator it = InstrToCycle.find(SU);
-    assert(it != InstrToCycle.end() && "Instruction hasn't been scheduled.");
-    return (it->second - FirstCycle) % InitiationInterval;
-  }
-
-  /// Return the maximum stage count needed for this schedule.
-  unsigned getMaxStageCount() {
-    return (LastCycle - FirstCycle) / InitiationInterval;
-  }
-
-  /// Return the max. number of stages/iterations that can occur between a
-  /// register definition and its uses.
-  unsigned getStagesForReg(int Reg, unsigned CurStage) {
-    std::pair<unsigned, bool> Stages = RegToStageDiff[Reg];
-    if (CurStage > getMaxStageCount() && Stages.first == 0 && Stages.second)
-      return 1;
-    return Stages.first;
-  }
-
-  /// The number of stages for a Phi is a little different than other
-  /// instructions. The minimum value computed in RegToStageDiff is 1
-  /// because we assume the Phi is needed for at least 1 iteration.
-  /// This is not the case if the loop value is scheduled prior to the
-  /// Phi in the same stage.  This function returns the number of stages
-  /// or iterations needed between the Phi definition and any uses.
-  unsigned getStagesForPhi(int Reg) {
-    std::pair<unsigned, bool> Stages = RegToStageDiff[Reg];
-    if (Stages.second)
-      return Stages.first;
-    return Stages.first - 1;
-  }
-
-  /// Return the instructions that are scheduled at the specified cycle.
-  std::deque<SUnit *> &getInstructions(int cycle) {
-    return ScheduledInstrs[cycle];
-  }
-
-  bool isValidSchedule(SwingSchedulerDAG *SSD);
-  void finalizeSchedule(SwingSchedulerDAG *SSD);
-  void orderDependence(SwingSchedulerDAG *SSD, SUnit *SU,
-                       std::deque<SUnit *> &Insts);
-  bool isLoopCarried(SwingSchedulerDAG *SSD, MachineInstr &Phi);
-  bool isLoopCarriedDefOfUse(SwingSchedulerDAG *SSD, MachineInstr *Def,
-                             MachineOperand &MO);
-  void print(raw_ostream &os) const;
-  void dump() const;
-};
-
-} // end anonymous namespace
+} // end namespace llvm
 
 unsigned SwingSchedulerDAG::Circuits::MaxPaths = 5;
 char MachinePipeliner::ID = 0;
@@ -1557,7 +967,7 @@
       }
     }
   }
-  // Add back-eges in the adjacency matrix for the output dependences.
+  // Add back-edges in the adjacency matrix for the output dependences.
   for (auto &OD : OutputDeps)
     if (!Added.test(OD.second)) {
       AdjK[OD.first].push_back(OD.second);
@@ -2773,7 +2183,7 @@
       else if (PrologStage >= AccessStage + StageDiff + np &&
                VRMap[PrologStage - StageDiff - np].count(LoopVal) != 0)
         PhiOp1 = VRMap[PrologStage - StageDiff - np][LoopVal];
-      // Check if the Phi has already been scheduled, but the loop intruction
+      // Check if the Phi has already been scheduled, but the loop instruction
       // is either another Phi, or doesn't occur in the loop.
       else if (PrologStage >= AccessStage + StageDiff + np) {
         // If the Phi references another Phi, we need to examine the other
@@ -4300,6 +3710,14 @@
   LLVM_DEBUG(dump(););
 }
 
+void NodeSet::print(raw_ostream &os) const {
+  os << "Num nodes " << size() << " rec " << RecMII << " mov " << MaxMOV
+     << " depth " << MaxDepth << " col " << Colocate << "\n";
+  for (const auto &I : Nodes)
+    os << "   SU(" << I->NodeNum << ") " << *(I->getInstr());
+  os << "\n";
+}
+
 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
 /// Print the schedule information to the given output.
 void SMSchedule::print(raw_ostream &os) const {
@@ -4318,4 +3736,9 @@
 
 /// Utility function used for debugging to print the schedule.
 LLVM_DUMP_METHOD void SMSchedule::dump() const { print(dbgs()); }
+LLVM_DUMP_METHOD void NodeSet::dump() const { print(dbgs()); }
+
 #endif
+
+
+
diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp
index ec11586..90dad9d 100644
--- a/lib/CodeGen/MachineScheduler.cpp
+++ b/lib/CodeGen/MachineScheduler.cpp
@@ -2460,13 +2460,13 @@
 }
 
 /// Returns true if the current cycle plus remaning latency is greater than
-/// the cirtical path in the scheduling region.
+/// the critical path in the scheduling region.
 bool GenericSchedulerBase::shouldReduceLatency(const CandPolicy &Policy,
                                                SchedBoundary &CurrZone,
                                                bool ComputeRemLatency,
                                                unsigned &RemLatency) const {
   // The current cycle is already greater than the critical path, so we are
-  // already latnecy limited and don't need to compute the remaining latency.
+  // already latency limited and don't need to compute the remaining latency.
   if (CurrZone.getCurrCycle() > Rem.CriticalPath)
     return true;
 
diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp
index f4804e7..534d369 100644
--- a/lib/CodeGen/MachineVerifier.cpp
+++ b/lib/CodeGen/MachineVerifier.cpp
@@ -250,6 +250,7 @@
     void report_context(const LiveRange::Segment &S) const;
     void report_context(const VNInfo &VNI) const;
     void report_context(SlotIndex Pos) const;
+    void report_context(MCPhysReg PhysReg) const;
     void report_context_liverange(const LiveRange &LR) const;
     void report_context_lanemask(LaneBitmask LaneMask) const;
     void report_context_vreg(unsigned VReg) const;
@@ -540,6 +541,10 @@
   errs() << "- liverange:   " << LR << '\n';
 }
 
+void MachineVerifier::report_context(MCPhysReg PReg) const {
+  errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
+}
+
 void MachineVerifier::report_context_vreg(unsigned VReg) const {
   errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
 }
@@ -619,6 +624,7 @@
       if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
           MBB->getIterator() != MBB->getParent()->begin()) {
         report("MBB has allocatable live-in, but isn't entry or landing-pad.", MBB);
+        report_context(LI.PhysReg);
       }
     }
   }
@@ -677,7 +683,7 @@
         // out the bottom of the function.
       } else if (MBB->succ_size() == LandingPadSuccs.size()) {
         // It's possible that the block legitimately ends with a noreturn
-        // call or an unreachable, in which case it won't actuall fall
+        // call or an unreachable, in which case it won't actually fall
         // out of the block.
       } else if (MBB->succ_size() != 1+LandingPadSuccs.size()) {
         report("MBB exits via unconditional fall-through but doesn't have "
diff --git a/lib/CodeGen/OptimizePHIs.cpp b/lib/CodeGen/OptimizePHIs.cpp
index befa842..770f6c5 100644
--- a/lib/CodeGen/OptimizePHIs.cpp
+++ b/lib/CodeGen/OptimizePHIs.cpp
@@ -90,10 +90,10 @@
 }
 
 /// IsSingleValuePHICycle - Check if MI is a PHI where all the source operands
-/// are copies of SingleValReg, possibly via copies through other PHIs.  If
+/// are copies of SingleValReg, possibly via copies through other PHIs. If
 /// SingleValReg is zero on entry, it is set to the register with the single
-/// non-copy value.  PHIsInCycle is a set used to keep track of the PHIs that
-/// have been scanned.
+/// non-copy value. PHIsInCycle is a set used to keep track of the PHIs that
+/// have been scanned. PHIs may be grouped by cycle, several cycles or chains.
 bool OptimizePHIs::IsSingleValuePHICycle(MachineInstr *MI,
                                          unsigned &SingleValReg,
                                          InstrSet &PHIsInCycle) {
@@ -119,8 +119,10 @@
     if (SrcMI && SrcMI->isCopy() &&
         !SrcMI->getOperand(0).getSubReg() &&
         !SrcMI->getOperand(1).getSubReg() &&
-        TargetRegisterInfo::isVirtualRegister(SrcMI->getOperand(1).getReg()))
-      SrcMI = MRI->getVRegDef(SrcMI->getOperand(1).getReg());
+        TargetRegisterInfo::isVirtualRegister(SrcMI->getOperand(1).getReg())) {
+      SrcReg = SrcMI->getOperand(1).getReg();
+      SrcMI = MRI->getVRegDef(SrcReg);
+    }
     if (!SrcMI)
       return false;
 
@@ -129,7 +131,7 @@
         return false;
     } else {
       // Fail if there is more than one non-phi/non-move register.
-      if (SingleValReg != 0)
+      if (SingleValReg != 0 && SingleValReg != SrcReg)
         return false;
       SingleValReg = SrcReg;
     }
@@ -180,6 +182,9 @@
       if (!MRI->constrainRegClass(SingleValReg, MRI->getRegClass(OldReg)))
         continue;
 
+      // for the case SingleValReg taken from copy instr
+      MRI->clearKillFlags(SingleValReg);
+
       MRI->replaceRegWith(OldReg, SingleValReg);
       MI->eraseFromParent();
       ++NumPHICycles;
diff --git a/lib/CodeGen/PreISelIntrinsicLowering.cpp b/lib/CodeGen/PreISelIntrinsicLowering.cpp
index 8f88ef7..b0e9ac0 100644
--- a/lib/CodeGen/PreISelIntrinsicLowering.cpp
+++ b/lib/CodeGen/PreISelIntrinsicLowering.cpp
@@ -7,13 +7,15 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This pass implements IR lowering for the llvm.load.relative intrinsic.
+// This pass implements IR lowering for the llvm.load.relative and llvm.objc.*
+// intrinsics.
 //
 //===----------------------------------------------------------------------===//
 
 #include "llvm/CodeGen/PreISelIntrinsicLowering.h"
 #include "llvm/CodeGen/Passes.h"
 #include "llvm/IR/Function.h"
+#include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/Module.h"
@@ -55,11 +57,129 @@
   return Changed;
 }
 
+static bool lowerObjCCall(Function &F, const char *NewFn,
+                          bool setNonLazyBind = false) {
+  if (F.use_empty())
+    return false;
+
+  // If we haven't already looked up this function, check to see if the
+  // program already contains a function with this name.
+  Module *M = F.getParent();
+  Constant* FCache = M->getOrInsertFunction(NewFn, F.getFunctionType());
+
+  if (Function* Fn = dyn_cast<Function>(FCache)) {
+    Fn->setLinkage(F.getLinkage());
+    if (setNonLazyBind && !Fn->isWeakForLinker()) {
+      // If we have Native ARC, set nonlazybind attribute for these APIs for
+      // performance.
+      Fn->addFnAttr(Attribute::NonLazyBind);
+    }
+  }
+
+  for (auto I = F.use_begin(), E = F.use_end(); I != E;) {
+    auto *CI = dyn_cast<CallInst>(I->getUser());
+    assert(CI->getCalledFunction() && "Cannot lower an indirect call!");
+    ++I;
+
+    IRBuilder<> Builder(CI->getParent(), CI->getIterator());
+    SmallVector<Value *, 8> Args(CI->arg_begin(), CI->arg_end());
+    CallInst *NewCI = Builder.CreateCall(FCache, Args);
+    NewCI->setName(CI->getName());
+    NewCI->setTailCallKind(CI->getTailCallKind());
+    if (!CI->use_empty())
+      CI->replaceAllUsesWith(NewCI);
+    CI->eraseFromParent();
+  }
+
+  return true;
+}
+
 static bool lowerIntrinsics(Module &M) {
   bool Changed = false;
   for (Function &F : M) {
-    if (F.getName().startswith("llvm.load.relative."))
+    if (F.getName().startswith("llvm.load.relative.")) {
       Changed |= lowerLoadRelative(F);
+      continue;
+    }
+    switch (F.getIntrinsicID()) {
+    default:
+      break;
+    case Intrinsic::objc_autorelease:
+      Changed |= lowerObjCCall(F, "objc_autorelease");
+      break;
+    case Intrinsic::objc_autoreleasePoolPop:
+      Changed |= lowerObjCCall(F, "objc_autoreleasePoolPop");
+      break;
+    case Intrinsic::objc_autoreleasePoolPush:
+      Changed |= lowerObjCCall(F, "objc_autoreleasePoolPush");
+      break;
+    case Intrinsic::objc_autoreleaseReturnValue:
+      Changed |= lowerObjCCall(F, "objc_autoreleaseReturnValue");
+      break;
+    case Intrinsic::objc_copyWeak:
+      Changed |= lowerObjCCall(F, "objc_copyWeak");
+      break;
+    case Intrinsic::objc_destroyWeak:
+      Changed |= lowerObjCCall(F, "objc_destroyWeak");
+      break;
+    case Intrinsic::objc_initWeak:
+      Changed |= lowerObjCCall(F, "objc_initWeak");
+      break;
+    case Intrinsic::objc_loadWeak:
+      Changed |= lowerObjCCall(F, "objc_loadWeak");
+      break;
+    case Intrinsic::objc_loadWeakRetained:
+      Changed |= lowerObjCCall(F, "objc_loadWeakRetained");
+      break;
+    case Intrinsic::objc_moveWeak:
+      Changed |= lowerObjCCall(F, "objc_moveWeak");
+      break;
+    case Intrinsic::objc_release:
+      Changed |= lowerObjCCall(F, "objc_release", true);
+      break;
+    case Intrinsic::objc_retain:
+      Changed |= lowerObjCCall(F, "objc_retain", true);
+      break;
+    case Intrinsic::objc_retainAutorelease:
+      Changed |= lowerObjCCall(F, "objc_retainAutorelease");
+      break;
+    case Intrinsic::objc_retainAutoreleaseReturnValue:
+      Changed |= lowerObjCCall(F, "objc_retainAutoreleaseReturnValue");
+      break;
+    case Intrinsic::objc_retainAutoreleasedReturnValue:
+      Changed |= lowerObjCCall(F, "objc_retainAutoreleasedReturnValue");
+      break;
+    case Intrinsic::objc_retainBlock:
+      Changed |= lowerObjCCall(F, "objc_retainBlock");
+      break;
+    case Intrinsic::objc_storeStrong:
+      Changed |= lowerObjCCall(F, "objc_storeStrong");
+      break;
+    case Intrinsic::objc_storeWeak:
+      Changed |= lowerObjCCall(F, "objc_storeWeak");
+      break;
+    case Intrinsic::objc_unsafeClaimAutoreleasedReturnValue:
+      Changed |= lowerObjCCall(F, "objc_unsafeClaimAutoreleasedReturnValue");
+      break;
+    case Intrinsic::objc_retainedObject:
+      Changed |= lowerObjCCall(F, "objc_retainedObject");
+      break;
+    case Intrinsic::objc_unretainedObject:
+      Changed |= lowerObjCCall(F, "objc_unretainedObject");
+      break;
+    case Intrinsic::objc_unretainedPointer:
+      Changed |= lowerObjCCall(F, "objc_unretainedPointer");
+      break;
+    case Intrinsic::objc_retain_autorelease:
+      Changed |= lowerObjCCall(F, "objc_retain_autorelease");
+      break;
+    case Intrinsic::objc_sync_enter:
+      Changed |= lowerObjCCall(F, "objc_sync_enter");
+      break;
+    case Intrinsic::objc_sync_exit:
+      Changed |= lowerObjCCall(F, "objc_sync_exit");
+      break;
+    }
   }
   return Changed;
 }
diff --git a/lib/CodeGen/RegAllocGreedy.cpp b/lib/CodeGen/RegAllocGreedy.cpp
index f244028..81b21b4 100644
--- a/lib/CodeGen/RegAllocGreedy.cpp
+++ b/lib/CodeGen/RegAllocGreedy.cpp
@@ -318,7 +318,7 @@
 
     /// Track new eviction.
     /// The Evictor vreg has evicted the Evictee vreg from Physreg.
-    /// \param PhysReg The phisical register Evictee was evicted from.
+    /// \param PhysReg The physical register Evictee was evicted from.
     /// \param Evictor The evictor Vreg that evicted Evictee.
     /// \param Evictee The evictee Vreg.
     void addEviction(unsigned PhysReg, unsigned Evictor, unsigned Evictee) {
diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp
index 771d50e..2a06d5e 100644
--- a/lib/CodeGen/RegisterCoalescer.cpp
+++ b/lib/CodeGen/RegisterCoalescer.cpp
@@ -176,7 +176,7 @@
     /// If one def has many copy like uses, and those copy uses are all
     /// rematerialized, the live interval update needed for those
     /// rematerializations will be delayed and done all at once instead
-    /// of being done multiple times. This is to save compile cost becuase
+    /// of being done multiple times. This is to save compile cost because
     /// live interval update is costly.
     void lateLiveIntervalUpdate();
 
@@ -1910,6 +1910,13 @@
     }
     LI.removeEmptySubRanges();
   }
+
+  // CP.getSrcReg()'s live interval has been merged into CP.getDstReg's live
+  // interval. Since CP.getSrcReg() is in ToBeUpdated set and its live interval
+  // is not up-to-date, need to update the merged live interval here.
+  if (ToBeUpdated.count(CP.getSrcReg()))
+    ShrinkMainRange = true;
+
   if (ShrinkMainRange) {
     LiveInterval &LI = LIS->getInterval(CP.getDstReg());
     shrinkToUses(&LI);
@@ -2501,8 +2508,10 @@
         // We normally expect IMPLICIT_DEF values to be live only until the end
         // of their block. If the value is really live longer and gets pruned in
         // another block, this flag is cleared again.
+        //
+        // Clearing the valid lanes is deferred until it is sure this can be
+        // erased.
         V.ErasableImplicitDef = true;
-        V.ValidLanes &= ~V.WriteLanes;
       }
     }
   }
@@ -2557,20 +2566,25 @@
   Other.computeAssignment(V.OtherVNI->id, *this);
   Val &OtherV = Other.Vals[V.OtherVNI->id];
 
-  // Check if OtherV is an IMPLICIT_DEF that extends beyond its basic block.
-  // This shouldn't normally happen, but ProcessImplicitDefs can leave such
-  // IMPLICIT_DEF instructions behind, and there is nothing wrong with it
-  // technically.
-  //
-  // When it happens, treat that IMPLICIT_DEF as a normal value, and don't try
-  // to erase the IMPLICIT_DEF instruction.
-  if (OtherV.ErasableImplicitDef && DefMI &&
-      DefMI->getParent() != Indexes->getMBBFromIndex(V.OtherVNI->def)) {
-    LLVM_DEBUG(dbgs() << "IMPLICIT_DEF defined at " << V.OtherVNI->def
-                      << " extends into "
-                      << printMBBReference(*DefMI->getParent())
-                      << ", keeping it.\n");
-    OtherV.ErasableImplicitDef = false;
+  if (OtherV.ErasableImplicitDef) {
+    // Check if OtherV is an IMPLICIT_DEF that extends beyond its basic block.
+    // This shouldn't normally happen, but ProcessImplicitDefs can leave such
+    // IMPLICIT_DEF instructions behind, and there is nothing wrong with it
+    // technically.
+    //
+    // When it happens, treat that IMPLICIT_DEF as a normal value, and don't try
+    // to erase the IMPLICIT_DEF instruction.
+    if (DefMI &&
+        DefMI->getParent() != Indexes->getMBBFromIndex(V.OtherVNI->def)) {
+      LLVM_DEBUG(dbgs() << "IMPLICIT_DEF defined at " << V.OtherVNI->def
+                 << " extends into "
+                 << printMBBReference(*DefMI->getParent())
+                 << ", keeping it.\n");
+      OtherV.ErasableImplicitDef = false;
+    } else {
+      // We deferred clearing these lanes in case we needed to save them
+      OtherV.ValidLanes &= ~OtherV.WriteLanes;
+    }
   }
 
   // Allow overlapping PHI values. Any real interference would show up in a
@@ -2613,6 +2627,12 @@
     return CR_Erase;
   }
 
+  // The remaining checks apply to the lanes, which aren't tracked here.  This
+  // was already decided to be OK via the following CR_Replace condition.
+  // CR_Replace.
+  if (SubRangeJoin)
+    return CR_Replace;
+
   // If the lanes written by this instruction were all undef in OtherVNI, it is
   // still safe to join the live ranges. This can't be done with a simple value
   // mapping, though - OtherVNI will map to multiple values:
@@ -2694,8 +2714,18 @@
     Val &OtherV = Other.Vals[V.OtherVNI->id];
     // We cannot erase an IMPLICIT_DEF if we don't have valid values for all
     // its lanes.
-    if ((OtherV.WriteLanes & ~V.ValidLanes).any() && TrackSubRegLiveness)
+    if (OtherV.ErasableImplicitDef &&
+        TrackSubRegLiveness &&
+        (OtherV.WriteLanes & ~V.ValidLanes).any()) {
+      LLVM_DEBUG(dbgs() << "Cannot erase implicit_def with missing values\n");
+
       OtherV.ErasableImplicitDef = false;
+      // The valid lanes written by the implicit_def were speculatively cleared
+      // before, so make this more conservative. It may be better to track this,
+      // I haven't found a testcase where it matters.
+      OtherV.ValidLanes = LaneBitmask::getAll();
+    }
+
     OtherV.Pruned = true;
     LLVM_FALLTHROUGH;
   }
diff --git a/lib/CodeGen/SafeStack.cpp b/lib/CodeGen/SafeStack.cpp
index 7b1c7fe..c356fb5 100644
--- a/lib/CodeGen/SafeStack.cpp
+++ b/lib/CodeGen/SafeStack.cpp
@@ -324,11 +324,8 @@
       case Instruction::Invoke: {
         ImmutableCallSite CS(I);
 
-        if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
-          if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
-              II->getIntrinsicID() == Intrinsic::lifetime_end)
-            continue;
-        }
+        if (I->isLifetimeStartOrEnd())
+          continue;
 
         if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
           if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) {
diff --git a/lib/CodeGen/SafeStackColoring.cpp b/lib/CodeGen/SafeStackColoring.cpp
index b1914b5..726c380 100644
--- a/lib/CodeGen/SafeStackColoring.cpp
+++ b/lib/CodeGen/SafeStackColoring.cpp
@@ -46,11 +46,10 @@
 }
 
 bool StackColoring::readMarker(Instruction *I, bool *IsStart) {
-  auto *II = dyn_cast<IntrinsicInst>(I);
-  if (!II || (II->getIntrinsicID() != Intrinsic::lifetime_start &&
-              II->getIntrinsicID() != Intrinsic::lifetime_end))
+  if (!I->isLifetimeStartOrEnd())
     return false;
 
+  auto *II = cast<IntrinsicInst>(I);
   *IsStart = II->getIntrinsicID() == Intrinsic::lifetime_start;
   return true;
 }
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index cad130e..ff5505c 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -293,6 +293,8 @@
     SDValue visitADD(SDNode *N);
     SDValue visitADDLike(SDValue N0, SDValue N1, SDNode *LocReference);
     SDValue visitSUB(SDNode *N);
+    SDValue visitADDSAT(SDNode *N);
+    SDValue visitSUBSAT(SDNode *N);
     SDValue visitADDC(SDNode *N);
     SDValue visitUADDO(SDNode *N);
     SDValue visitUADDOLike(SDValue N0, SDValue N1, SDNode *N);
@@ -915,9 +917,11 @@
 
 // Determines if a BUILD_VECTOR is composed of all-constants possibly mixed with
 // undef's.
-static bool isAnyConstantBuildVector(const SDNode *N) {
-  return ISD::isBuildVectorOfConstantSDNodes(N) ||
-         ISD::isBuildVectorOfConstantFPSDNodes(N);
+static bool isAnyConstantBuildVector(SDValue V, bool NoOpaques = false) {
+  if (V.getOpcode() != ISD::BUILD_VECTOR)
+    return false;
+  return isConstantOrConstantVector(V, NoOpaques) ||
+         ISD::isBuildVectorOfConstantFPSDNodes(V.getNode());
 }
 
 SDValue DAGCombiner::ReassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0,
@@ -1484,6 +1488,10 @@
   case ISD::MERGE_VALUES:       return visitMERGE_VALUES(N);
   case ISD::ADD:                return visitADD(N);
   case ISD::SUB:                return visitSUB(N);
+  case ISD::SADDSAT:
+  case ISD::UADDSAT:            return visitADDSAT(N);
+  case ISD::SSUBSAT:
+  case ISD::USUBSAT:            return visitSUBSAT(N);
   case ISD::ADDC:               return visitADDC(N);
   case ISD::UADDO:              return visitUADDO(N);
   case ISD::SUBC:               return visitSUBC(N);
@@ -2168,6 +2176,49 @@
   return SDValue();
 }
 
+SDValue DAGCombiner::visitADDSAT(SDNode *N) {
+  unsigned Opcode = N->getOpcode();
+  SDValue N0 = N->getOperand(0);
+  SDValue N1 = N->getOperand(1);
+  EVT VT = N0.getValueType();
+  SDLoc DL(N);
+
+  // fold vector ops
+  if (VT.isVector()) {
+    // TODO SimplifyVBinOp
+
+    // fold (add_sat x, 0) -> x, vector edition
+    if (ISD::isBuildVectorAllZeros(N1.getNode()))
+      return N0;
+    if (ISD::isBuildVectorAllZeros(N0.getNode()))
+      return N1;
+  }
+
+  // fold (add_sat x, undef) -> -1
+  if (N0.isUndef() || N1.isUndef())
+    return DAG.getAllOnesConstant(DL, VT);
+
+  if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) {
+    // canonicalize constant to RHS
+    if (!DAG.isConstantIntBuildVectorOrConstantInt(N1))
+      return DAG.getNode(Opcode, DL, VT, N1, N0);
+    // fold (add_sat c1, c2) -> c3
+    return DAG.FoldConstantArithmetic(Opcode, DL, VT, N0.getNode(),
+                                      N1.getNode());
+  }
+
+  // fold (add_sat x, 0) -> x
+  if (isNullConstant(N1))
+    return N0;
+
+  // If it cannot overflow, transform into an add.
+  if (Opcode == ISD::UADDSAT)
+    if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never)
+      return DAG.getNode(ISD::ADD, DL, VT, N0, N1);
+
+  return SDValue();
+}
+
 static SDValue getAsCarry(const TargetLowering &TLI, SDValue V) {
   bool Masked = false;
 
@@ -2730,6 +2781,43 @@
   return SDValue();
 }
 
+SDValue DAGCombiner::visitSUBSAT(SDNode *N) {
+  SDValue N0 = N->getOperand(0);
+  SDValue N1 = N->getOperand(1);
+  EVT VT = N0.getValueType();
+  SDLoc DL(N);
+
+  // fold vector ops
+  if (VT.isVector()) {
+    // TODO SimplifyVBinOp
+
+    // fold (sub_sat x, 0) -> x, vector edition
+    if (ISD::isBuildVectorAllZeros(N1.getNode()))
+      return N0;
+  }
+
+  // fold (sub_sat x, undef) -> 0
+  if (N0.isUndef() || N1.isUndef())
+    return DAG.getConstant(0, DL, VT);
+
+  // fold (sub_sat x, x) -> 0
+  if (N0 == N1)
+    return DAG.getConstant(0, DL, VT);
+
+  if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
+      DAG.isConstantIntBuildVectorOrConstantInt(N1)) {
+    // fold (sub_sat c1, c2) -> c3
+    return DAG.FoldConstantArithmetic(N->getOpcode(), DL, VT, N0.getNode(),
+                                      N1.getNode());
+  }
+
+  // fold (sub_sat x, 0) -> x
+  if (isNullConstant(N1))
+    return N0;
+
+  return SDValue();
+}
+
 SDValue DAGCombiner::visitSUBC(SDNode *N) {
   SDValue N0 = N->getOperand(0);
   SDValue N1 = N->getOperand(1);
@@ -3161,8 +3249,19 @@
   if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
     return DAG.getNode(ISD::UDIV, DL, N1.getValueType(), N0, N1);
 
-  if (SDValue V = visitSDIVLike(N0, N1, N))
+  if (SDValue V = visitSDIVLike(N0, N1, N)) {
+    // If the corresponding remainder node exists, update its users with
+    // (Dividend - (Quotient * Divisor).
+    if (SDNode *RemNode = DAG.getNodeIfExists(ISD::SREM, N->getVTList(),
+                                              { N0, N1 })) {
+      SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, V, N1);
+      SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul);
+      AddToWorklist(Mul.getNode());
+      AddToWorklist(Sub.getNode());
+      CombineTo(RemNode, Sub);
+    }
     return V;
+  }
 
   // sdiv, srem -> sdivrem
   // If the divisor is constant, then return DIVREM only if isIntDivCheap() is
@@ -3288,8 +3387,19 @@
   if (SDValue NewSel = foldBinOpIntoSelect(N))
     return NewSel;
 
-  if (SDValue V = visitUDIVLike(N0, N1, N))
+  if (SDValue V = visitUDIVLike(N0, N1, N)) {
+    // If the corresponding remainder node exists, update its users with
+    // (Dividend - (Quotient * Divisor).
+    if (SDNode *RemNode = DAG.getNodeIfExists(ISD::UREM, N->getVTList(),
+                                              { N0, N1 })) {
+      SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, V, N1);
+      SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul);
+      AddToWorklist(Mul.getNode());
+      AddToWorklist(Sub.getNode());
+      CombineTo(RemNode, Sub);
+    }
     return V;
+  }
 
   // sdiv, srem -> sdivrem
   // If the divisor is constant, then return DIVREM only if isIntDivCheap() is
@@ -3408,6 +3518,11 @@
     SDValue OptimizedDiv =
         isSigned ? visitSDIVLike(N0, N1, N) : visitUDIVLike(N0, N1, N);
     if (OptimizedDiv.getNode()) {
+      // If the equivalent Div node also exists, update its users.
+      unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
+      if (SDNode *DivNode = DAG.getNodeIfExists(DivOpcode, N->getVTList(),
+                                                { N0, N1 }))
+        CombineTo(DivNode, OptimizedDiv);
       SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, OptimizedDiv, N1);
       SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul);
       AddToWorklist(OptimizedDiv.getNode());
@@ -3731,13 +3846,12 @@
     if (!N0.hasOneUse() && !N1.hasOneUse())
       return SDValue();
     // We need matching integer source types.
-    // Do not hoist logic op inside of a vector extend, since it may combine
-    // into a vsetcc.
-    // TODO: Should the vector check apply to truncate though?
-    if (VT.isVector() || XVT != Y.getValueType())
+    if (XVT != Y.getValueType())
       return SDValue();
-    // Don't create an illegal op during or after legalization.
-    if (LegalOperations && !TLI.isOperationLegal(LogicOpcode, XVT))
+    // Don't create an illegal op during or after legalization. Don't ever
+    // create an unsupported vector op.
+    if ((VT.isVector() || LegalOperations) &&
+        !TLI.isOperationLegalOrCustom(LogicOpcode, XVT))
       return SDValue();
     // Avoid infinite looping with PromoteIntBinOp.
     // TODO: Should we apply desirable/legal constraints to all opcodes?
@@ -3755,11 +3869,8 @@
     // instructions without eliminating anything.
     if (!N0.hasOneUse() && !N1.hasOneUse())
       return SDValue();
-    // We need matching integer source types.
-    // Do not hoist logic op inside of a vector extend, since it may combine
-    // into a vsetcc.
-    // TODO: Should the vector check apply to truncate though?
-    if (VT.isVector() || XVT != Y.getValueType())
+    // We need matching source types.
+    if (XVT != Y.getValueType())
       return SDValue();
     // Don't create an illegal op during or after legalization.
     if (LegalOperations && !TLI.isOperationLegal(LogicOpcode, XVT))
@@ -4613,9 +4724,8 @@
     if (SDValue Res = ReduceLoadWidth(N)) {
       LoadSDNode *LN0 = N0->getOpcode() == ISD::ANY_EXTEND
         ? cast<LoadSDNode>(N0.getOperand(0)) : cast<LoadSDNode>(N0);
-
       AddToWorklist(N);
-      CombineTo(LN0, Res, Res.getValue(1));
+      DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 0), Res);
       return SDValue(N, 0);
     }
   }
@@ -5178,12 +5288,12 @@
     return ROR;
 
   // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2)
-  // iff (c1 & c2) != 0.
-  auto MatchIntersect = [](ConstantSDNode *LHS, ConstantSDNode *RHS) {
-    return LHS->getAPIntValue().intersects(RHS->getAPIntValue());
+  // iff (c1 & c2) != 0 or c1/c2 are undef.
+  auto MatchIntersect = [](ConstantSDNode *C1, ConstantSDNode *C2) {
+    return !C1 || !C2 || C1->getAPIntValue().intersects(C2->getAPIntValue());
   };
   if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
-      ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchIntersect)) {
+      ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchIntersect, true)) {
     if (SDValue COR = DAG.FoldConstantArithmetic(
             ISD::OR, SDLoc(N1), VT, N1.getNode(), N0.getOperand(1).getNode())) {
       SDValue IOR = DAG.getNode(ISD::OR, SDLoc(N0), VT, N0.getOperand(0), N1);
@@ -5390,8 +5500,7 @@
   unsigned MaskLoBits = 0;
   if (Neg.getOpcode() == ISD::AND && isPowerOf2_64(EltSize)) {
     if (ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(1))) {
-      KnownBits Known;
-      DAG.computeKnownBits(Neg.getOperand(0), Known);
+      KnownBits Known = DAG.computeKnownBits(Neg.getOperand(0));
       unsigned Bits = Log2_64(EltSize);
       if (NegC->getAPIntValue().getActiveBits() <= Bits &&
           ((NegC->getAPIntValue() | Known.Zero).countTrailingOnes() >= Bits)) {
@@ -5413,8 +5522,7 @@
   // Pos'.  The truncation is redundant for the purpose of the equality.
   if (MaskLoBits && Pos.getOpcode() == ISD::AND) {
     if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1))) {
-      KnownBits Known;
-      DAG.computeKnownBits(Pos.getOperand(0), Known);
+      KnownBits Known = DAG.computeKnownBits(Pos.getOperand(0));
       if (PosC->getAPIntValue().getActiveBits() <= MaskLoBits &&
           ((PosC->getAPIntValue() | Known.Zero).countTrailingOnes() >=
            MaskLoBits))
@@ -6521,7 +6629,8 @@
   //                               (and (srl x, (sub c1, c2), MASK)
   // Only fold this if the inner shift has no other uses -- if it does, folding
   // this will increase the total number of instructions.
-  if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
+  if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse() &&
+      TLI.shouldFoldShiftPairToMask(N, Level)) {
     if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
       uint64_t c1 = N0C1->getZExtValue();
       if (c1 < OpSizeInBits) {
@@ -6869,8 +6978,7 @@
   // fold (srl (ctlz x), "5") -> x  iff x has one bit set (the low bit).
   if (N1C && N0.getOpcode() == ISD::CTLZ &&
       N1C->getAPIntValue() == Log2_32(OpSizeInBits)) {
-    KnownBits Known;
-    DAG.computeKnownBits(N0.getOperand(0), Known);
+    KnownBits Known = DAG.computeKnownBits(N0.getOperand(0));
 
     // If any of the input bits are KnownOne, then the input couldn't be all
     // zeros, thus the result of the srl will always be zero.
@@ -6966,8 +7074,10 @@
 
   // fold (fshl N0, N1, 0) -> N0
   // fold (fshr N0, N1, 0) -> N1
-  if (DAG.MaskedValueIsZero(N2, APInt::getAllOnesValue(BitWidth)))
-    return IsFSHL ? N0 : N1;
+  if (isPowerOf2_32(BitWidth))
+    if (DAG.MaskedValueIsZero(
+            N2, APInt(N2.getScalarValueSizeInBits(), BitWidth - 1)))
+      return IsFSHL ? N0 : N1;
 
   // fold (fsh* N0, N1, c) -> (fsh* N0, N1, c % BitWidth)
   if (ConstantSDNode *Cst = isConstOrConstSplat(N2)) {
@@ -8065,10 +8175,15 @@
   unsigned NumElts = VT.getVectorNumElements();
   SDLoc DL(N);
 
-  for (unsigned i=0; i != NumElts; ++i) {
-    SDValue Op = N0->getOperand(i);
-    if (Op->isUndef()) {
-      Elts.push_back(DAG.getUNDEF(SVT));
+  // For zero-extensions, UNDEF elements still guarantee to have the upper
+  // bits set to zero.
+  bool IsZext =
+      Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG;
+
+  for (unsigned i = 0; i != NumElts; ++i) {
+    SDValue Op = N0.getOperand(i);
+    if (Op.isUndef()) {
+      Elts.push_back(IsZext ? DAG.getConstant(0, DL, SVT) : DAG.getUNDEF(SVT));
       continue;
     }
 
@@ -8700,7 +8815,7 @@
                          KnownBits &Known) {
   if (N->getOpcode() == ISD::TRUNCATE) {
     Op = N->getOperand(0);
-    DAG.computeKnownBits(Op, Known);
+    Known = DAG.computeKnownBits(Op);
     return true;
   }
 
@@ -8720,7 +8835,7 @@
   else
     return false;
 
-  DAG.computeKnownBits(Op, Known);
+  Known = DAG.computeKnownBits(Op);
 
   return (Known.Zero | 1).isAllOnesValue();
 }
@@ -9370,18 +9485,15 @@
     if (DAG.getDataLayout().isBigEndian())
       ShAmt = AdjustBigEndianShift(ShAmt);
 
-    // We're using a shifted mask, so the load now has an offset. This means we
-    // now need to shift right the mask to match the new load and then shift
-    // right the result of the AND.
-    const APInt &Mask = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
-    APInt ShiftedMask = Mask.lshr(ShAmt);
-    DAG.UpdateNodeOperands(N, Result, DAG.getConstant(ShiftedMask, DL, VT));
+    // We're using a shifted mask, so the load now has an offset. This means
+    // that data has been loaded into the lower bytes than it would have been
+    // before, so we need to shl the loaded data into the correct position in the
+    // register.
     SDValue ShiftC = DAG.getConstant(ShAmt, DL, VT);
-    SDValue Shifted = DAG.getNode(ISD::SHL, DL, VT, SDValue(N, 0),
-                                  ShiftC);
-    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Shifted);
-    DAG.UpdateNodeOperands(Shifted.getNode(), SDValue(N, 0), ShiftC);
+    Result = DAG.getNode(ISD::SHL, DL, VT, Result, ShiftC);
+    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
   }
+
   // Return the new loaded value.
   return Result;
 }
@@ -9413,12 +9525,15 @@
 
   // fold (sext_in_reg (sext x)) -> (sext x)
   // fold (sext_in_reg (aext x)) -> (sext x)
-  // if x is small enough.
+  // if x is small enough or if we know that x has more than 1 sign bit and the
+  // sign_extend_inreg is extending from one of them.
   if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) {
     SDValue N00 = N0.getOperand(0);
-    if (N00.getScalarValueSizeInBits() <= EVTBits &&
+    unsigned N00Bits = N00.getScalarValueSizeInBits();
+    if ((N00Bits <= EVTBits ||
+         (N00Bits - DAG.ComputeNumSignBits(N00)) < EVTBits) &&
         (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
-      return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1);
+      return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00);
   }
 
   // fold (sext_in_reg (*_extend_vector_inreg x)) -> (sext_vector_inreg x)
@@ -9641,8 +9756,7 @@
       (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::SHL, VT)) &&
       TLI.isTypeDesirableForOp(ISD::SHL, VT)) {
     SDValue Amt = N0.getOperand(1);
-    KnownBits Known;
-    DAG.computeKnownBits(Amt, Known);
+    KnownBits Known = DAG.computeKnownBits(Amt);
     unsigned Size = VT.getScalarSizeInBits();
     if (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size)) {
       SDLoc SL(N);
@@ -9824,8 +9938,7 @@
   // because targets may prefer a wider type during later combines and invert
   // this transform.
   switch (N0.getOpcode()) {
-  // TODO: Add case for ADD - that will likely require a change in logic here
-  // or target-specific changes to avoid regressions.
+  case ISD::ADD:
   case ISD::SUB:
   case ISD::MUL:
   case ISD::AND:
@@ -10157,7 +10270,7 @@
   // float vectors bitcast to integer vectors) into shuffles.
   // bitcast(shuffle(bitcast(s0),bitcast(s1))) -> shuffle(s0,s1)
   if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT) && VT.isVector() &&
-      N0->getOpcode() == ISD::VECTOR_SHUFFLE &&
+      N0->getOpcode() == ISD::VECTOR_SHUFFLE && N0.hasOneUse() &&
       VT.getVectorNumElements() >= N0.getValueType().getVectorNumElements() &&
       !(VT.getVectorNumElements() % N0.getValueType().getVectorNumElements())) {
     ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N0);
@@ -15551,6 +15664,40 @@
   return SDValue(EVE, 0);
 }
 
+/// Transform a vector binary operation into a scalar binary operation by moving
+/// the math/logic after an extract element of a vector.
+static SDValue scalarizeExtractedBinop(SDNode *ExtElt, SelectionDAG &DAG,
+                                       bool LegalOperations) {
+  SDValue Vec = ExtElt->getOperand(0);
+  SDValue Index = ExtElt->getOperand(1);
+  auto *IndexC = dyn_cast<ConstantSDNode>(Index);
+  if (!IndexC || !ISD::isBinaryOp(Vec.getNode()) || !Vec.hasOneUse())
+    return SDValue();
+
+  // Targets may want to avoid this to prevent an expensive register transfer.
+  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+  if (!TLI.shouldScalarizeBinop(Vec))
+    return SDValue();
+
+  // Extracting an element of a vector constant is constant-folded, so this
+  // transform is just replacing a vector op with a scalar op while moving the
+  // extract.
+  SDValue Op0 = Vec.getOperand(0);
+  SDValue Op1 = Vec.getOperand(1);
+  if (isAnyConstantBuildVector(Op0, true) ||
+      isAnyConstantBuildVector(Op1, true)) {
+    // extractelt (binop X, C), IndexC --> binop (extractelt X, IndexC), C'
+    // extractelt (binop C, X), IndexC --> binop C', (extractelt X, IndexC)
+    SDLoc DL(ExtElt);
+    EVT VT = ExtElt->getValueType(0);
+    SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op0, Index);
+    SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op1, Index);
+    return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1);
+  }
+
+  return SDValue();
+}
+
 SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
   SDValue VecOp = N->getOperand(0);
   SDValue Index = N->getOperand(1);
@@ -15641,6 +15788,9 @@
     }
   }
 
+  if (SDValue BO = scalarizeExtractedBinop(N, DAG, LegalOperations))
+    return BO;
+
   // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT.
   // We only perform this optimization before the op legalization phase because
   // we may introduce new vector instructions which are not backed by TD
@@ -15733,14 +15883,13 @@
     ExtVT = BCVT.getVectorElementType();
   }
 
-  // (vextract (vN[if]M load $addr), i) -> ([if]M load $addr + i * size)
+  // extract (vector load $addr), i --> load $addr + i * size
   if (!LegalOperations && !IndexC && VecOp.hasOneUse() &&
       ISD::isNormalLoad(VecOp.getNode()) &&
-      !N->getOperand(1)->hasPredecessor(VecOp.getNode())) {
-    SDValue Index = N->getOperand(1);
-    if (auto *OrigLoad = dyn_cast<LoadSDNode>(VecOp))
-      if (!OrigLoad->isVolatile())
-        return scalarizeExtractedVectorLoad(N, VecVT, Index, OrigLoad);
+      !Index->hasPredecessor(VecOp.getNode())) {
+    auto *VecLoad = dyn_cast<LoadSDNode>(VecOp);
+    if (VecLoad && !VecLoad->isVolatile())
+      return scalarizeExtractedVectorLoad(N, VecVT, Index, VecLoad);
   }
 
   // Perform only after legalization to ensure build_vector / vector_shuffle
@@ -16042,6 +16191,78 @@
   return Shuffle;
 }
 
+static SDValue reduceBuildVecToShuffleWithZero(SDNode *BV, SelectionDAG &DAG) {
+  assert(BV->getOpcode() == ISD::BUILD_VECTOR && "Expected build vector");
+
+  // First, determine where the build vector is not undef.
+  // TODO: We could extend this to handle zero elements as well as undefs.
+  int NumBVOps = BV->getNumOperands();
+  int ZextElt = -1;
+  for (int i = 0; i != NumBVOps; ++i) {
+    SDValue Op = BV->getOperand(i);
+    if (Op.isUndef())
+      continue;
+    if (ZextElt == -1)
+      ZextElt = i;
+    else
+      return SDValue();
+  }
+  // Bail out if there's no non-undef element.
+  if (ZextElt == -1)
+    return SDValue();
+
+  // The build vector contains some number of undef elements and exactly
+  // one other element. That other element must be a zero-extended scalar
+  // extracted from a vector at a constant index to turn this into a shuffle.
+  // TODO: This could be enhanced to allow ANY_EXTEND as well as ZERO_EXTEND.
+  SDValue Zext = BV->getOperand(ZextElt);
+  if (Zext.getOpcode() != ISD::ZERO_EXTEND || !Zext.hasOneUse() ||
+      Zext.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+      !isa<ConstantSDNode>(Zext.getOperand(0).getOperand(1)))
+    return SDValue();
+
+  // The zero-extend must be a multiple of the source size.
+  SDValue Extract = Zext.getOperand(0);
+  unsigned DestSize = Zext.getValueSizeInBits();
+  unsigned SrcSize = Extract.getValueSizeInBits();
+  if (DestSize % SrcSize != 0)
+    return SDValue();
+
+  // Create a shuffle mask that will combine the extracted element with zeros
+  // and undefs.
+  int ZextRatio =  DestSize / SrcSize;
+  int NumMaskElts = NumBVOps * ZextRatio;
+  SmallVector<int, 32> ShufMask(NumMaskElts, -1);
+  for (int i = 0; i != NumMaskElts; ++i) {
+    if (i / ZextRatio == ZextElt) {
+      // The low bits of the (potentially translated) extracted element map to
+      // the source vector. The high bits map to zero. We will use a zero vector
+      // as the 2nd source operand of the shuffle, so use the 1st element of
+      // that vector (mask value is number-of-elements) for the high bits.
+      if (i % ZextRatio == 0)
+        ShufMask[i] = Extract.getConstantOperandVal(1);
+      else
+        ShufMask[i] = NumMaskElts;
+    }
+
+    // Undef elements of the build vector remain undef because we initialize
+    // the shuffle mask with -1.
+  }
+
+  // Turn this into a shuffle with zero if that's legal.
+  EVT VecVT = Extract.getOperand(0).getValueType();
+  if (!DAG.getTargetLoweringInfo().isShuffleMaskLegal(ShufMask, VecVT))
+    return SDValue();
+
+  // buildvec undef, ..., (zext (extractelt V, IndexC)), undef... -->
+  // bitcast (shuffle V, ZeroVec, VectorMask)
+  SDLoc DL(BV);
+  SDValue ZeroVec = DAG.getConstant(0, DL, VecVT);
+  SDValue Shuf = DAG.getVectorShuffle(VecVT, DL, Extract.getOperand(0), ZeroVec,
+                                      ShufMask);
+  return DAG.getBitcast(BV->getValueType(0), Shuf);
+}
+
 // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
 // operations. If the types of the vectors we're extracting from allow it,
 // turn this into a vector_shuffle node.
@@ -16053,6 +16274,9 @@
   if (!isTypeLegal(VT))
     return SDValue();
 
+  if (SDValue V = reduceBuildVecToShuffleWithZero(N, DAG))
+    return V;
+
   // May only combine to shuffle after legalize if shuffle is legal.
   if (LegalOperations && !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, VT))
     return SDValue();
@@ -16846,9 +17070,9 @@
   //    Vi if possible
   // Only operand 0 is checked as 'concat' assumes all inputs of the same
   // type.
-  if (V->getOpcode() == ISD::CONCAT_VECTORS &&
+  if (V.getOpcode() == ISD::CONCAT_VECTORS &&
       isa<ConstantSDNode>(N->getOperand(1)) &&
-      V->getOperand(0).getValueType() == NVT) {
+      V.getOperand(0).getValueType() == NVT) {
     unsigned Idx = N->getConstantOperandVal(1);
     unsigned NumElems = NVT.getVectorNumElements();
     assert((Idx % NumElems) == 0 &&
@@ -16859,9 +17083,9 @@
   V = peekThroughBitcasts(V);
 
   // If the input is a build vector. Try to make a smaller build vector.
-  if (V->getOpcode() == ISD::BUILD_VECTOR) {
+  if (V.getOpcode() == ISD::BUILD_VECTOR) {
     if (auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
-      EVT InVT = V->getValueType(0);
+      EVT InVT = V.getValueType();
       unsigned ExtractSize = NVT.getSizeInBits();
       unsigned EltSize = InVT.getScalarSizeInBits();
       // Only do this if we won't split any elements.
@@ -16894,16 +17118,16 @@
     }
   }
 
-  if (V->getOpcode() == ISD::INSERT_SUBVECTOR) {
+  if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
     // Handle only simple case where vector being inserted and vector
     // being extracted are of same size.
-    EVT SmallVT = V->getOperand(1).getValueType();
+    EVT SmallVT = V.getOperand(1).getValueType();
     if (!NVT.bitsEq(SmallVT))
       return SDValue();
 
     // Only handle cases where both indexes are constants.
-    ConstantSDNode *ExtIdx = dyn_cast<ConstantSDNode>(N->getOperand(1));
-    ConstantSDNode *InsIdx = dyn_cast<ConstantSDNode>(V->getOperand(2));
+    auto *ExtIdx = dyn_cast<ConstantSDNode>(N->getOperand(1));
+    auto *InsIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
 
     if (InsIdx && ExtIdx) {
       // Combine:
@@ -16913,11 +17137,11 @@
       //    otherwise => (extract_subvec V1, ExtIdx)
       if (InsIdx->getZExtValue() * SmallVT.getScalarSizeInBits() ==
           ExtIdx->getZExtValue() * NVT.getScalarSizeInBits())
-        return DAG.getBitcast(NVT, V->getOperand(1));
+        return DAG.getBitcast(NVT, V.getOperand(1));
       return DAG.getNode(
           ISD::EXTRACT_SUBVECTOR, SDLoc(N), NVT,
-          DAG.getBitcast(N->getOperand(0).getValueType(), V->getOperand(0)),
-          N->getOperand(1));
+          DAG.getBitcast(N->getOperand(0).getValueType(), V.getOperand(0)),
+                         N->getOperand(1));
     }
   }
 
@@ -17027,8 +17251,8 @@
     if (!N1->hasOneUse())
       return SDValue();
 
-    bool N0AnyConst = isAnyConstantBuildVector(N0.getNode());
-    bool N1AnyConst = isAnyConstantBuildVector(N1.getNode());
+    bool N0AnyConst = isAnyConstantBuildVector(N0);
+    bool N1AnyConst = isAnyConstantBuildVector(N1);
     if (N0AnyConst && !N1AnyConst && !ISD::isBuildVectorAllZeros(N0.getNode()))
       return SDValue();
     if (!N0AnyConst && N1AnyConst && !ISD::isBuildVectorAllZeros(N1.getNode()))
@@ -17094,8 +17318,7 @@
 static SDValue combineShuffleToVectorExtend(ShuffleVectorSDNode *SVN,
                                             SelectionDAG &DAG,
                                             const TargetLowering &TLI,
-                                            bool LegalOperations,
-                                            bool LegalTypes) {
+                                            bool LegalOperations) {
   EVT VT = SVN->getValueType(0);
   bool IsBigEndian = DAG.getDataLayout().isBigEndian();
 
@@ -17131,7 +17354,9 @@
 
     EVT OutSVT = EVT::getIntegerVT(*DAG.getContext(), EltSizeInBits * Scale);
     EVT OutVT = EVT::getVectorVT(*DAG.getContext(), OutSVT, NumElts / Scale);
-    if (!LegalTypes || TLI.isTypeLegal(OutVT))
+    // Never create an illegal type. Only create unsupported operations if we
+    // are pre-legalization.
+    if (TLI.isTypeLegal(OutVT))
       if (!LegalOperations ||
           TLI.isOperationLegalOrCustom(ISD::ANY_EXTEND_VECTOR_INREG, OutVT))
         return DAG.getBitcast(VT,
@@ -17441,7 +17666,7 @@
     return SDValue(N, 0);
 
   // Match shuffles that can be converted to any_vector_extend_in_reg.
-  if (SDValue V = combineShuffleToVectorExtend(SVN, DAG, TLI, LegalOperations, LegalTypes))
+  if (SDValue V = combineShuffleToVectorExtend(SVN, DAG, TLI, LegalOperations))
     return V;
 
   // Combine "truncate_vector_in_reg" style shuffles.
@@ -17770,6 +17995,14 @@
     return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0),
                        N1, N2);
 
+  // Eliminate an intermediate insert into an undef vector:
+  // insert_subvector undef, (insert_subvector undef, X, 0), N2 -->
+  // insert_subvector undef, X, N2
+  if (N0.isUndef() && N1.getOpcode() == ISD::INSERT_SUBVECTOR &&
+      N1.getOperand(0).isUndef() && isNullConstant(N1.getOperand(2)))
+    return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0,
+                       N1.getOperand(1), N2);
+
   if (!isa<ConstantSDNode>(N2))
     return SDValue();
 
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index d5f066c..a9a3c44 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -547,6 +547,15 @@
   assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
          "Invalid iterator!");
   while (I != E) {
+    if (LastFlushPoint == I)
+      LastFlushPoint = E;
+    if (SavedInsertPt == I)
+      SavedInsertPt = E;
+    if (EmitStartPt == I)
+      EmitStartPt = E.isValid() ? &*E : nullptr;
+    if (LastLocalValue == I)
+      LastLocalValue = E.isValid() ? &*E : nullptr;
+
     MachineInstr *Dead = &*I;
     ++I;
     Dead->eraseFromParent();
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index da6d973..6a61146 100644
--- a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -652,7 +652,9 @@
   const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
   MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg);
   unsigned NumOps = Node->getNumOperands();
-  // REG_SEQUENCE can "inherit" a chain from a subnode.
+  // If the input pattern has a chain, then the root of the corresponding
+  // output pattern will get a chain as well. This can happen to be a
+  // REG_SEQUENCE (which is not "guarded" by countOperands/CountResults).
   if (NumOps && Node->getOperand(NumOps-1).getValueType() == MVT::Other)
     --NumOps; // Ignore chain if it exists.
 
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 9d6a693..d3aea37 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -2645,6 +2645,10 @@
   SDValue Tmp1, Tmp2, Tmp3, Tmp4;
   bool NeedInvert;
   switch (Node->getOpcode()) {
+  case ISD::ABS:
+    if (TLI.expandABS(Node, Tmp1, DAG))
+      Results.push_back(Tmp1);
+    break;
   case ISD::CTPOP:
     if (TLI.expandCTPOP(Node, Tmp1, DAG))
       Results.push_back(Tmp1);
@@ -3283,14 +3287,12 @@
   case ISD::SADDSAT:
   case ISD::UADDSAT:
   case ISD::SSUBSAT:
-  case ISD::USUBSAT: {
-    Results.push_back(TLI.getExpandedSaturationAdditionSubtraction(Node, DAG));
+  case ISD::USUBSAT:
+    Results.push_back(TLI.expandAddSubSat(Node, DAG));
     break;
-  }
-  case ISD::SMULFIX: {
+  case ISD::SMULFIX:
     Results.push_back(TLI.getExpandedFixedPointMultiplication(Node, DAG));
     break;
-  }
   case ISD::SADDO:
   case ISD::SSUBO: {
     SDValue LHS = Node->getOperand(0);
@@ -3683,10 +3685,7 @@
     (void)Legalized;
     assert(Legalized && "Can't legalize BR_CC with legal condition!");
 
-    // If we expanded the SETCC by inverting the condition code, then wrap
-    // the existing SETCC in a NOT to restore the intended condition.
-    if (NeedInvert)
-      Tmp4 = DAG.getNOT(dl, Tmp4, Tmp4->getValueType(0));
+    assert(!NeedInvert && "Don't know how to invert BR_CC!");
 
     // If we expanded the SETCC by swapping LHS and RHS, create a new BR_CC
     // node.
diff --git a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 866744c..4644e95 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -2143,9 +2143,9 @@
   SDValue TrueVal = GetPromotedFloat(N->getOperand(2));
   SDValue FalseVal = GetPromotedFloat(N->getOperand(3));
 
-  return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N->getValueType(0),
-                     N->getOperand(0), N->getOperand(1), TrueVal, FalseVal,
-                     N->getOperand(4));
+  return DAG.getNode(ISD::SELECT_CC, SDLoc(N),
+                     TrueVal.getNode()->getValueType(0), N->getOperand(0),
+                     N->getOperand(1), TrueVal, FalseVal, N->getOperand(4));
 }
 
 // Construct a SDNode that transforms the SINT or UINT operand to the promoted
diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 96d1c3d..5fbc70f 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -140,6 +140,8 @@
   case ISD::SMULO:
   case ISD::UMULO:       Res = PromoteIntRes_XMULO(N, ResNo); break;
 
+  case ISD::ADDE:
+  case ISD::SUBE:
   case ISD::ADDCARRY:
   case ISD::SUBCARRY:    Res = PromoteIntRes_ADDSUBCARRY(N, ResNo); break;
 
@@ -439,8 +441,26 @@
 SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N) {
   SDLoc dl(N);
   EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
-  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NVT, N->getOperand(0),
-                     N->getOperand(1));
+
+  SDValue Op0 = N->getOperand(0);
+  SDValue Op1 = N->getOperand(1);
+
+  // If the input also needs to be promoted, do that first so we can get a
+  // get a good idea for the output type.
+  if (TLI.getTypeAction(*DAG.getContext(), Op0.getValueType())
+      == TargetLowering::TypePromoteInteger) {
+    SDValue In = GetPromotedInteger(Op0);
+
+    // If the new type is larger than NVT, use it. We probably won't need to
+    // promote it again.
+    EVT SVT = In.getValueType().getScalarType();
+    if (SVT.bitsGE(NVT)) {
+      SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, In, Op1);
+      return DAG.getAnyExtOrTrunc(Ext, dl, NVT);
+    }
+  }
+
+  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NVT, Op0, Op1);
 }
 
 SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT(SDNode *N) {
@@ -590,7 +610,7 @@
   SDLoc dl(N);
   SDValue Op1 = N->getOperand(0);
   SDValue Op2 = N->getOperand(1);
-  unsigned OldBits = Op1.getValueSizeInBits();
+  unsigned OldBits = Op1.getScalarValueSizeInBits();
 
   unsigned Opcode = N->getOpcode();
   unsigned ShiftOp;
@@ -612,7 +632,7 @@
   SDValue Op2Promoted = GetPromotedInteger(Op2);
 
   EVT PromotedType = Op1Promoted.getValueType();
-  unsigned NewBits = Op1Promoted.getValueSizeInBits();
+  unsigned NewBits = PromotedType.getScalarSizeInBits();
   unsigned SHLAmount = NewBits - OldBits;
   EVT SHVT = TLI.getShiftAmountTy(PromotedType, DAG.getDataLayout());
   SDValue ShiftAmount = DAG.getConstant(SHLAmount, dl, SHVT);
@@ -865,6 +885,9 @@
   return Res;
 }
 
+// Handle promotion for the ADDE/SUBE/ADDCARRY/SUBCARRY nodes. Notice that
+// the third operand of ADDE/SUBE nodes is carry flag, which differs from 
+// the ADDCARRY/SUBCARRY nodes in that the third operand is carry Boolean.
 SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBCARRY(SDNode *N, unsigned ResNo) {
   if (ResNo == 1)
     return PromoteIntRes_Overflow(N);
@@ -1711,8 +1734,7 @@
   SDLoc dl(N);
 
   APInt HighBitMask = APInt::getHighBitsSet(ShBits, ShBits - Log2_32(NVTBits));
-  KnownBits Known;
-  DAG.computeKnownBits(N->getOperand(1), Known);
+  KnownBits Known = DAG.computeKnownBits(N->getOperand(1));
 
   // If we don't know anything about the high bits, exit.
   if (((Known.Zero|Known.One) & HighBitMask) == 0)
@@ -2555,7 +2577,7 @@
 
 void DAGTypeLegalizer::ExpandIntRes_ADDSUBSAT(SDNode *N, SDValue &Lo,
                                               SDValue &Hi) {
-  SDValue Result = TLI.getExpandedSaturationAdditionSubtraction(N, DAG);
+  SDValue Result = TLI.expandAddSubSat(N, DAG);
   SplitInteger(Result, Lo, Hi);
 }
 
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 4df02c6..4923a52 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -117,6 +117,12 @@
   /// the remaining lanes, finally bitcasting to the proper type.
   SDValue ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op);
 
+  /// Implement expand-based legalization of ABS vector operations.
+  /// If following expanding is legal/custom then do it:
+  /// (ABS x) --> (XOR (ADD x, (SRA x, sizeof(x)-1)), (SRA x, sizeof(x)-1))
+  /// else unroll the operation.
+  SDValue ExpandABS(SDValue Op);
+
   /// Expand bswap of vectors into a shuffle if legal.
   SDValue ExpandBSWAP(SDValue Op);
 
@@ -135,6 +141,7 @@
   SDValue ExpandFunnelShift(SDValue Op);
   SDValue ExpandROT(SDValue Op);
   SDValue ExpandFMINNUM_FMAXNUM(SDValue Op);
+  SDValue ExpandAddSubSat(SDValue Op);
   SDValue ExpandStrictFPOp(SDValue Op);
 
   /// Implements vector promotion.
@@ -351,8 +358,11 @@
   case ISD::SHL:
   case ISD::SRA:
   case ISD::SRL:
+  case ISD::FSHL:
+  case ISD::FSHR:
   case ISD::ROTL:
   case ISD::ROTR:
+  case ISD::ABS:
   case ISD::BSWAP:
   case ISD::BITREVERSE:
   case ISD::CTLZ:
@@ -747,6 +757,8 @@
     return ExpandFSUB(Op);
   case ISD::SETCC:
     return UnrollVSETCC(Op);
+  case ISD::ABS:
+    return ExpandABS(Op);
   case ISD::BITREVERSE:
     return ExpandBITREVERSE(Op);
   case ISD::CTPOP:
@@ -766,6 +778,11 @@
   case ISD::FMINNUM:
   case ISD::FMAXNUM:
     return ExpandFMINNUM_FMAXNUM(Op);
+  case ISD::USUBSAT:
+  case ISD::SSUBSAT:
+  case ISD::UADDSAT:
+  case ISD::SADDSAT:
+    return ExpandAddSubSat(Op);
   case ISD::STRICT_FADD:
   case ISD::STRICT_FSUB:
   case ISD::STRICT_FMUL:
@@ -1062,6 +1079,16 @@
   return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val);
 }
 
+SDValue VectorLegalizer::ExpandABS(SDValue Op) {
+  // Attempt to expand using TargetLowering.
+  SDValue Result;
+  if (TLI.expandABS(Op.getNode(), Result, DAG))
+    return Result;
+
+  // Otherwise go ahead and unroll.
+  return DAG.UnrollVectorOp(Op.getNode());
+}
+
 SDValue VectorLegalizer::ExpandFP_TO_UINT(SDValue Op) {
   // Attempt to expand using TargetLowering.
   SDValue Result;
@@ -1185,6 +1212,12 @@
   return DAG.UnrollVectorOp(Op.getNode());
 }
 
+SDValue VectorLegalizer::ExpandAddSubSat(SDValue Op) {
+  if (SDValue Expanded = TLI.expandAddSubSat(Op.getNode(), DAG))
+    return Expanded;
+  return DAG.UnrollVectorOp(Op.getNode());
+}
+
 SDValue VectorLegalizer::ExpandStrictFPOp(SDValue Op) {
   EVT VT = Op.getValueType();
   EVT EltVT = VT.getVectorElementType();
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index a40618b..f367e93 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -2425,6 +2425,10 @@
   case ISD::SMAX:
   case ISD::UMIN:
   case ISD::UMAX:
+  case ISD::UADDSAT:
+  case ISD::SADDSAT:
+  case ISD::USUBSAT:
+  case ISD::SSUBSAT:
     Res = WidenVecRes_Binary(N);
     break;
 
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index ceb8689..647496c 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -87,6 +87,8 @@
 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
 
+void SelectionDAG::DAGNodeDeletedListener::anchor() {}
+
 #define DEBUG_TYPE "selectiondag"
 
 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
@@ -269,15 +271,24 @@
 }
 
 bool ISD::matchUnaryPredicate(SDValue Op,
-                              std::function<bool(ConstantSDNode *)> Match) {
+                              std::function<bool(ConstantSDNode *)> Match,
+                              bool AllowUndefs) {
+  // FIXME: Add support for scalar UNDEF cases?
   if (auto *Cst = dyn_cast<ConstantSDNode>(Op))
     return Match(Cst);
 
+  // FIXME: Add support for vector UNDEF cases?
   if (ISD::BUILD_VECTOR != Op.getOpcode())
     return false;
 
   EVT SVT = Op.getValueType().getScalarType();
   for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
+    if (AllowUndefs && Op.getOperand(i).isUndef()) {
+      if (!Match(nullptr))
+        return false;
+      continue;
+    }
+
     auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i));
     if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst))
       return false;
@@ -287,26 +298,33 @@
 
 bool ISD::matchBinaryPredicate(
     SDValue LHS, SDValue RHS,
-    std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match) {
+    std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
+    bool AllowUndefs) {
   if (LHS.getValueType() != RHS.getValueType())
     return false;
 
+  // TODO: Add support for scalar UNDEF cases?
   if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
     if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
       return Match(LHSCst, RHSCst);
 
+  // TODO: Add support for vector UNDEF cases?
   if (ISD::BUILD_VECTOR != LHS.getOpcode() ||
       ISD::BUILD_VECTOR != RHS.getOpcode())
     return false;
 
   EVT SVT = LHS.getValueType().getScalarType();
   for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
-    auto *LHSCst = dyn_cast<ConstantSDNode>(LHS.getOperand(i));
-    auto *RHSCst = dyn_cast<ConstantSDNode>(RHS.getOperand(i));
-    if (!LHSCst || !RHSCst)
+    SDValue LHSOp = LHS.getOperand(i);
+    SDValue RHSOp = RHS.getOperand(i);
+    bool LHSUndef = AllowUndefs && LHSOp.isUndef();
+    bool RHSUndef = AllowUndefs && RHSOp.isUndef();
+    auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
+    auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
+    if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
       return false;
-    if (LHSCst->getValueType(0) != SVT ||
-        LHSCst->getValueType(0) != RHSCst->getValueType(0))
+    if (LHSOp.getValueType() != SVT ||
+        LHSOp.getValueType() != RHSOp.getValueType())
       return false;
     if (!Match(LHSCst, RHSCst))
       return false;
@@ -2102,6 +2120,15 @@
       return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc);
     break;
   }
+  case ISD::SIGN_EXTEND_INREG:
+    EVT ExVT = cast<VTSDNode>(V.getOperand(1))->getVT();
+    unsigned ExVTBits = ExVT.getScalarSizeInBits();
+
+    // If none of the extended bits are demanded, eliminate the sextinreg.
+    if (Mask.getActiveBits() <= ExVTBits)
+      return V.getOperand(0);
+
+    break;
   }
   return SDValue();
 }
@@ -2679,6 +2706,39 @@
       Known.One.ashrInPlace(Shift);
     }
     break;
+  case ISD::FSHL:
+  case ISD::FSHR:
+    if (ConstantSDNode *C =
+            isConstOrDemandedConstSplat(Op.getOperand(2), DemandedElts)) {
+      unsigned Amt = C->getAPIntValue().urem(BitWidth);
+
+      // For fshl, 0-shift returns the 1st arg.
+      // For fshr, 0-shift returns the 2nd arg.
+      if (Amt == 0) {
+        Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
+                                 DemandedElts, Depth + 1);
+        break;
+      }
+
+      // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
+      // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
+      Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+      Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+      if (Opcode == ISD::FSHL) {
+        Known.One <<= Amt;
+        Known.Zero <<= Amt;
+        Known2.One.lshrInPlace(BitWidth - Amt);
+        Known2.Zero.lshrInPlace(BitWidth - Amt);
+      } else {
+        Known.One <<= BitWidth - Amt;
+        Known.Zero <<= BitWidth - Amt;
+        Known2.One.lshrInPlace(Amt);
+        Known2.Zero.lshrInPlace(Amt);
+      }
+      Known.One |= Known2.One;
+      Known.Zero |= Known2.Zero;
+    }
+    break;
   case ISD::SIGN_EXTEND_INREG: {
     EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
     unsigned EBits = EVT.getScalarSizeInBits();
@@ -2767,7 +2827,15 @@
     Known.Zero.setBitsFrom(InVT.getScalarSizeInBits());
     break;
   }
-  // TODO ISD::SIGN_EXTEND_VECTOR_INREG
+  case ISD::SIGN_EXTEND_VECTOR_INREG: {
+    EVT InVT = Op.getOperand(0).getValueType();
+    APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
+    Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
+    // If the sign bit is known to be zero or one, then sext will extend
+    // it to the top bits, else it will just zext.
+    Known = Known.sext(BitWidth);
+    break;
+  }
   case ISD::SIGN_EXTEND: {
     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
     // If the sign bit is known to be zero or one, then sext will extend
@@ -3138,11 +3206,9 @@
   if (isNullConstant(N1))
     return OFK_Never;
 
-  KnownBits N1Known;
-  computeKnownBits(N1, N1Known);
+  KnownBits N1Known = computeKnownBits(N1);
   if (N1Known.Zero.getBoolValue()) {
-    KnownBits N0Known;
-    computeKnownBits(N0, N0Known);
+    KnownBits N0Known = computeKnownBits(N0);
 
     bool overflow;
     (void)(~N0Known.Zero).uadd_ov(~N1Known.Zero, overflow);
@@ -3156,8 +3222,7 @@
     return OFK_Never;
 
   if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) {
-    KnownBits N0Known;
-    computeKnownBits(N0, N0Known);
+    KnownBits N0Known = computeKnownBits(N0);
 
     if ((~N0Known.Zero & 0x01) == ~N0Known.Zero)
       return OFK_Never;
@@ -3628,7 +3693,7 @@
     }
     return ComputeNumSignBits(Src, Depth + 1);
   }
-  case ISD::CONCAT_VECTORS:
+  case ISD::CONCAT_VECTORS: {
     // Determine the minimum number of sign bits across all demanded
     // elts of the input vectors. Early out if the result is already 1.
     Tmp = std::numeric_limits<unsigned>::max();
@@ -3646,6 +3711,40 @@
     assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
     return Tmp;
   }
+  case ISD::INSERT_SUBVECTOR: {
+    // If we know the element index, demand any elements from the subvector and
+    // the remainder from the src its inserted into, otherwise demand them all.
+    SDValue Src = Op.getOperand(0);
+    SDValue Sub = Op.getOperand(1);
+    auto *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
+    unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
+    if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) {
+      Tmp = std::numeric_limits<unsigned>::max();
+      uint64_t Idx = SubIdx->getZExtValue();
+      APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
+      if (!!DemandedSubElts) {
+        Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
+        if (Tmp == 1) return 1; // early-out
+      }
+      APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts);
+      APInt DemandedSrcElts = DemandedElts & ~SubMask;
+      if (!!DemandedSrcElts) {
+        Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
+        Tmp = std::min(Tmp, Tmp2);
+      }
+      assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
+      return Tmp;
+    }
+
+    // Not able to determine the index so just assume worst case.
+    Tmp = ComputeNumSignBits(Sub, Depth + 1);
+    if (Tmp == 1) return 1; // early-out
+    Tmp2 = ComputeNumSignBits(Src, Depth + 1);
+    Tmp = std::min(Tmp, Tmp2);
+    assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
+    return Tmp;
+  }
+  }
 
   // If we are looking at the loaded value of the SDNode.
   if (Op.getResNo() == 0) {
@@ -4389,6 +4488,10 @@
   case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true);
   case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true);
   case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true);
+  case ISD::SADDSAT: return std::make_pair(C1.sadd_sat(C2), true);
+  case ISD::UADDSAT: return std::make_pair(C1.uadd_sat(C2), true);
+  case ISD::SSUBSAT: return std::make_pair(C1.ssub_sat(C2), true);
+  case ISD::USUBSAT: return std::make_pair(C1.usub_sat(C2), true);
   case ISD::UDIV:
     if (!C2.getBoolValue())
       break;
@@ -4728,6 +4831,10 @@
   case ISD::SMAX:
   case ISD::UMIN:
   case ISD::UMAX:
+  case ISD::SADDSAT:
+  case ISD::SSUBSAT:
+  case ISD::UADDSAT:
+  case ISD::USUBSAT:
     assert(VT.isInteger() && "This operator does not apply to FP types!");
     assert(N1.getValueType() == N2.getValueType() &&
            N1.getValueType() == VT && "Binary operator types must match!");
@@ -4809,8 +4916,8 @@
     assert(!EVT.isVector() &&
            "AssertSExt/AssertZExt type should be the vector element type "
            "rather than the vector type!");
-    assert(EVT.bitsLE(VT) && "Not extending!");
-    if (VT == EVT) return N1; // noop assertion.
+    assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
+    if (VT.getScalarType() == EVT) return N1; // noop assertion.
     break;
   }
   case ISD::SIGN_EXTEND_INREG: {
@@ -5075,6 +5182,8 @@
       case ISD::SDIV:
       case ISD::UREM:
       case ISD::SREM:
+      case ISD::SSUBSAT:
+      case ISD::USUBSAT:
         return getConstant(0, DL, VT);    // fold op(undef, arg2) -> 0
       }
     }
@@ -5098,8 +5207,12 @@
       return getUNDEF(VT);       // fold op(arg1, undef) -> undef
     case ISD::MUL:
     case ISD::AND:
+    case ISD::SSUBSAT:
+    case ISD::USUBSAT:
       return getConstant(0, DL, VT);  // fold op(arg1, undef) -> 0
     case ISD::OR:
+    case ISD::SADDSAT:
+    case ISD::UADDSAT:
       return getAllOnesConstant(DL, VT);
     }
   }
@@ -6934,11 +7047,11 @@
     return X;
 
   // shift X, C >= bitwidth(X) --> undef
-  // All vector elements must be too big to avoid partial undefs.
+  // All vector elements must be too big (or undef) to avoid partial undefs.
   auto isShiftTooBig = [X](ConstantSDNode *Val) {
-    return Val->getAPIntValue().uge(X.getScalarValueSizeInBits());
+    return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits());
   };
-  if (ISD::matchUnaryPredicate(Y, isShiftTooBig))
+  if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true))
     return getUNDEF(X.getValueType());
 
   return SDValue();
@@ -8409,6 +8522,32 @@
   return TokenFactor;
 }
 
+SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op,
+                                                     Function **OutFunction) {
+  assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol");
+
+  auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol();
+  auto *Module = MF->getFunction().getParent();
+  auto *Function = Module->getFunction(Symbol);
+
+  if (OutFunction != nullptr)
+      *OutFunction = Function;
+
+  if (Function != nullptr) {
+    auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace());
+    return getGlobalAddress(Function, SDLoc(Op), PtrTy);
+  }
+
+  std::string ErrorStr;
+  raw_string_ostream ErrorFormatter(ErrorStr);
+
+  ErrorFormatter << "Undefined external symbol ";
+  ErrorFormatter << '"' << Symbol << '"';
+  ErrorFormatter.flush();
+
+  report_fatal_error(ErrorStr);
+}
+
 //===----------------------------------------------------------------------===//
 //                              SDNode Class
 //===----------------------------------------------------------------------===//
@@ -9127,7 +9266,7 @@
 
 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
   assert(!Node->OperandList && "Node already has operands");
-  assert(std::numeric_limits<decltype(SDNode::NumOperands)>::max() >
+  assert(std::numeric_limits<decltype(SDNode::NumOperands)>::max() >=
              Vals.size() &&
          "too many operands to fit into SDNode");
   SDUse *Ops = OperandRecycler.allocate(
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index cf06c1f..871ab9b 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3694,8 +3694,11 @@
   if (isVolatile || NumValues > MaxParallelChains)
     // Serialize volatile loads with other side effects.
     Root = getRoot();
-  else if (AA && AA->pointsToConstantMemory(MemoryLocation(
-               SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) {
+  else if (AA &&
+           AA->pointsToConstantMemory(MemoryLocation(
+               SV,
+               LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
+               AAInfo))) {
     // Do not serialize (non-volatile) loads of constant memory with anything.
     Root = DAG.getEntryNode();
     ConstantMemory = true;
@@ -3806,9 +3809,12 @@
   Type *Ty = I.getType();
   AAMDNodes AAInfo;
   I.getAAMetadata(AAInfo);
-  assert((!AA || !AA->pointsToConstantMemory(MemoryLocation(
-             SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) &&
-         "load_from_swift_error should not be constant memory");
+  assert(
+      (!AA ||
+       !AA->pointsToConstantMemory(MemoryLocation(
+           SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
+           AAInfo))) &&
+      "load_from_swift_error should not be constant memory");
 
   SmallVector<EVT, 4> ValueVTs;
   SmallVector<uint64_t, 4> Offsets;
@@ -4095,8 +4101,12 @@
   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
 
   // Do not serialize masked loads of constant memory with anything.
-  bool AddToChain = !AA || !AA->pointsToConstantMemory(MemoryLocation(
-      PtrOperand, DAG.getDataLayout().getTypeStoreSize(I.getType()), AAInfo));
+  bool AddToChain =
+      !AA || !AA->pointsToConstantMemory(MemoryLocation(
+                 PtrOperand,
+                 LocationSize::precise(
+                     DAG.getDataLayout().getTypeStoreSize(I.getType())),
+                 AAInfo));
   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
 
   MachineMemOperand *MMO =
@@ -4137,10 +4147,12 @@
   const Value *BasePtr = Ptr;
   bool UniformBase = getUniformBase(BasePtr, Base, Index, Scale, this);
   bool ConstantMemory = false;
-  if (UniformBase &&
-      AA && AA->pointsToConstantMemory(MemoryLocation(
-          BasePtr, DAG.getDataLayout().getTypeStoreSize(I.getType()),
-          AAInfo))) {
+  if (UniformBase && AA &&
+      AA->pointsToConstantMemory(
+          MemoryLocation(BasePtr,
+                         LocationSize::precise(
+                             DAG.getDataLayout().getTypeStoreSize(I.getType())),
+                         AAInfo))) {
     // Do not serialize (non-volatile) loads of constant memory with anything.
     Root = DAG.getEntryNode();
     ConstantMemory = true;
@@ -5762,17 +5774,15 @@
     // avoid the select that is necessary in the general case to filter out
     // the 0-shift possibility that leads to UB.
     if (X == Y && isPowerOf2_32(VT.getScalarSizeInBits())) {
-      // TODO: This should also be done if the operation is custom, but we have
-      // to make sure targets are handling the modulo shift amount as expected.
       auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
-      if (TLI.isOperationLegal(RotateOpcode, VT)) {
+      if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) {
         setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
         return nullptr;
       }
 
       // Some targets only rotate one way. Try the opposite direction.
       RotateOpcode = IsFSHL ? ISD::ROTR : ISD::ROTL;
-      if (TLI.isOperationLegal(RotateOpcode, VT)) {
+      if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) {
         // Negate the shift amount because it is safe to ignore the high bits.
         SDValue NegShAmt = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
         setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, NegShAmt));
@@ -6363,56 +6373,6 @@
     // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
     // delete it now.
     return nullptr;
-  case Intrinsic::objc_autorelease:
-    return "objc_autorelease";
-  case Intrinsic::objc_autoreleasePoolPop:
-    return "objc_autoreleasePoolPop";
-  case Intrinsic::objc_autoreleasePoolPush:
-    return "objc_autoreleasePoolPush";
-  case Intrinsic::objc_autoreleaseReturnValue:
-    return "objc_autoreleaseReturnValue";
-  case Intrinsic::objc_copyWeak:
-    return "objc_copyWeak";
-  case Intrinsic::objc_destroyWeak:
-    return "objc_destroyWeak";
-  case Intrinsic::objc_initWeak:
-    return "objc_initWeak";
-  case Intrinsic::objc_loadWeak:
-    return "objc_loadWeak";
-  case Intrinsic::objc_loadWeakRetained:
-    return "objc_loadWeakRetained";
-  case Intrinsic::objc_moveWeak:
-    return "objc_moveWeak";
-  case Intrinsic::objc_release:
-    return "objc_release";
-  case Intrinsic::objc_retain:
-    return "objc_retain";
-  case Intrinsic::objc_retainAutorelease:
-    return "objc_retainAutorelease";
-  case Intrinsic::objc_retainAutoreleaseReturnValue:
-    return "objc_retainAutoreleaseReturnValue";
-  case Intrinsic::objc_retainAutoreleasedReturnValue:
-    return "objc_retainAutoreleasedReturnValue";
-  case Intrinsic::objc_retainBlock:
-    return "objc_retainBlock";
-  case Intrinsic::objc_storeStrong:
-    return "objc_storeStrong";
-  case Intrinsic::objc_storeWeak:
-    return "objc_storeWeak";
-  case Intrinsic::objc_unsafeClaimAutoreleasedReturnValue:
-    return "objc_unsafeClaimAutoreleasedReturnValue";
-  case Intrinsic::objc_retainedObject:
-    return "objc_retainedObject";
-  case Intrinsic::objc_unretainedObject:
-    return "objc_unretainedObject";
-  case Intrinsic::objc_unretainedPointer:
-    return "objc_unretainedPointer";
-  case Intrinsic::objc_retain_autorelease:
-    return "objc_retain_autorelease";
-  case Intrinsic::objc_sync_enter:
-    return "objc_sync_enter";
-  case Intrinsic::objc_sync_exit:
-    return "objc_sync_exit";
   }
 }
 
@@ -7383,10 +7343,11 @@
 ///
 ///   OpInfo describes the operand
 ///   RefOpInfo describes the matching operand if any, the operand otherwise
-static void GetRegistersForValue(SelectionDAG &DAG, const TargetLowering &TLI,
-                                 const SDLoc &DL, SDISelAsmOperandInfo &OpInfo,
+static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
+                                 SDISelAsmOperandInfo &OpInfo,
                                  SDISelAsmOperandInfo &RefOpInfo) {
   LLVMContext &Context = *DAG.getContext();
+  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
 
   MachineFunction &MF = DAG.getMachineFunction();
   SmallVector<unsigned, 4> Regs;
@@ -7394,11 +7355,19 @@
 
   // If this is a constraint for a single physreg, or a constraint for a
   // register class, find it.
-  std::pair<unsigned, const TargetRegisterClass *> PhysReg =
-      TLI.getRegForInlineAsmConstraint(&TRI, RefOpInfo.ConstraintCode,
-                                       RefOpInfo.ConstraintVT);
+  unsigned AssignedReg;
+  const TargetRegisterClass *RC;
+  std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
+      &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
+  // RC is unset only on failure. Return immediately.
+  if (!RC)
+    return;
 
-  unsigned NumRegs = 1;
+  // Get the actual register value type.  This is important, because the user
+  // may have asked for (e.g.) the AX register in i32 type.  We need to
+  // remember that AX is actually i16 to get the right extension.
+  const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
+
   if (OpInfo.ConstraintVT != MVT::Other) {
     // If this is an FP operand in an integer register (or visa versa), or more
     // generally if the operand value disagrees with the register class we plan
@@ -7408,13 +7377,11 @@
     // Bitcast for output value is done at the end of visitInlineAsm().
     if ((OpInfo.Type == InlineAsm::isOutput ||
          OpInfo.Type == InlineAsm::isInput) &&
-        PhysReg.second &&
-        !TRI.isTypeLegalForClass(*PhysReg.second, OpInfo.ConstraintVT)) {
+        !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
       // Try to convert to the first EVT that the reg class contains.  If the
       // types are identical size, use a bitcast to convert (e.g. two differing
       // vector types).  Note: output bitcast is done at the end of
       // visitInlineAsm().
-      MVT RegVT = *TRI.legalclasstypes_begin(*PhysReg.second);
       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
         // Exclude indirect inputs while they are unsupported because the code
         // to perform the load is missing and thus OpInfo.CallOperand still
@@ -7427,15 +7394,13 @@
         // use the corresponding integer type. This turns an f64 value into
         // i64, which can be passed with two i32 values on a 32-bit machine.
       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
-        RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
+        MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
         if (OpInfo.Type == InlineAsm::isInput)
           OpInfo.CallOperand =
-              DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
-        OpInfo.ConstraintVT = RegVT;
+              DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
+        OpInfo.ConstraintVT = VT;
       }
     }
-
-    NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
   }
 
   // No need to allocate a matching input constraint since the constraint it's
@@ -7443,59 +7408,38 @@
   if (OpInfo.isMatchingInputConstraint())
     return;
 
-  MVT RegVT;
   EVT ValueVT = OpInfo.ConstraintVT;
+  if (OpInfo.ConstraintVT == MVT::Other)
+    ValueVT = RegVT;
+
+  // Initialize NumRegs.
+  unsigned NumRegs = 1;
+  if (OpInfo.ConstraintVT != MVT::Other)
+    NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
 
   // If this is a constraint for a specific physical register, like {r17},
   // assign it now.
-  if (unsigned AssignedReg = PhysReg.first) {
-    const TargetRegisterClass *RC = PhysReg.second;
-    if (OpInfo.ConstraintVT == MVT::Other)
-      ValueVT = *TRI.legalclasstypes_begin(*RC);
 
-    // Get the actual register value type.  This is important, because the user
-    // may have asked for (e.g.) the AX register in i32 type.  We need to
-    // remember that AX is actually i16 to get the right extension.
-    RegVT = *TRI.legalclasstypes_begin(*RC);
+  // If this associated to a specific register, initialize iterator to correct
+  // place. If virtual, make sure we have enough registers
 
-    // This is an explicit reference to a physical register.
-    Regs.push_back(AssignedReg);
+  // Initialize iterator if necessary
+  TargetRegisterClass::iterator I = RC->begin();
+  MachineRegisterInfo &RegInfo = MF.getRegInfo();
 
-    // If this is an expanded reference, add the rest of the regs to Regs.
-    if (NumRegs != 1) {
-      TargetRegisterClass::iterator I = RC->begin();
+  // Do not check for single registers.
+  if (AssignedReg) {
       for (; *I != AssignedReg; ++I)
-        assert(I != RC->end() && "Didn't find reg!");
-
-      // Already added the first reg.
-      --NumRegs; ++I;
-      for (; NumRegs; --NumRegs, ++I) {
-        assert(I != RC->end() && "Ran out of registers to allocate!");
-        Regs.push_back(*I);
-      }
-    }
-
-    OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
-    return;
+        assert(I != RC->end() && "AssignedReg should be member of RC");
   }
 
-  // Otherwise, if this was a reference to an LLVM register class, create vregs
-  // for this reference.
-  if (const TargetRegisterClass *RC = PhysReg.second) {
-    RegVT = *TRI.legalclasstypes_begin(*RC);
-    if (OpInfo.ConstraintVT == MVT::Other)
-      ValueVT = RegVT;
-
-    // Create the appropriate number of virtual registers.
-    MachineRegisterInfo &RegInfo = MF.getRegInfo();
-    for (; NumRegs; --NumRegs)
-      Regs.push_back(RegInfo.createVirtualRegister(RC));
-
-    OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
-    return;
+  for (; NumRegs; --NumRegs, ++I) {
+    assert(I != RC->end() && "Ran out of registers to allocate!");
+    auto R = (AssignedReg) ? *I : RegInfo.createVirtualRegister(RC);
+    Regs.push_back(R);
   }
 
-  // Otherwise, we couldn't allocate enough registers for this.
+  OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
 }
 
 static unsigned
@@ -7516,21 +7460,6 @@
   return CurOp;
 }
 
-/// Fill \p Regs with \p NumRegs new virtual registers of type \p RegVT
-/// \return true if it has succeeded, false otherwise
-static bool createVirtualRegs(SmallVector<unsigned, 4> &Regs, unsigned NumRegs,
-                              MVT RegVT, SelectionDAG &DAG) {
-  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-  MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
-  for (unsigned i = 0, e = NumRegs; i != e; ++i) {
-    if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT))
-      Regs.push_back(RegInfo.createVirtualRegister(RC));
-    else
-      return false;
-  }
-  return true;
-}
-
 namespace {
 
 class ExtraFlags {
@@ -7587,12 +7516,10 @@
 
   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
   unsigned ResNo = 0;   // ResNo - The result number of the next output.
-  for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
-    ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
+  for (auto &T : TargetConstraints) {
+    ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
 
-    MVT OpVT = MVT::Other;
-
     // Compute the value type for each operand.
     if (OpInfo.Type == InlineAsm::isInput ||
         (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
@@ -7606,39 +7533,37 @@
         OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
       }
 
-      OpVT =
+      OpInfo.ConstraintVT =
           OpInfo
               .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout())
               .getSimpleVT();
-    }
-
-    if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
+    } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
       // The return value of the call is this value.  As such, there is no
       // corresponding argument.
       assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
-        OpVT = TLI.getSimpleValueType(DAG.getDataLayout(),
-                                      STy->getElementType(ResNo));
+        OpInfo.ConstraintVT = TLI.getSimpleValueType(
+            DAG.getDataLayout(), STy->getElementType(ResNo));
       } else {
         assert(ResNo == 0 && "Asm only has one result!");
-        OpVT = TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType());
+        OpInfo.ConstraintVT =
+            TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType());
       }
       ++ResNo;
+    } else {
+      OpInfo.ConstraintVT = MVT::Other;
     }
 
-    OpInfo.ConstraintVT = OpVT;
-
     if (!hasMemory)
       hasMemory = OpInfo.hasMemory(TLI);
 
     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
-    // FIXME: Could we compute this on OpInfo rather than TargetConstraints[i]?
-    auto TargetConstraint = TargetConstraints[i];
+    // FIXME: Could we compute this on OpInfo rather than T?
 
     // Compute the constraint code and ConstraintType to use.
-    TLI.ComputeConstraintToUse(TargetConstraint, SDValue());
+    TLI.ComputeConstraintToUse(T, SDValue());
 
-    ExtraInfo.update(TargetConstraint);
+    ExtraInfo.update(T);
   }
 
   SDValue Chain, Flag;
@@ -7652,9 +7577,7 @@
 
   // Second pass over the constraints: compute which constraint option to use
   // and assign registers to constraints that want a specific physreg.
-  for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
-    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
-
+  for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
     // If this is an output operand with a matching input operand, look up the
     // matching input. If their types mismatch, e.g. one is an integer, the
     // other is floating point, or their sizes are different, flag it as an
@@ -7694,24 +7617,23 @@
     SDISelAsmOperandInfo &RefOpInfo =
         OpInfo.isMatchingInputConstraint()
             ? ConstraintOperands[OpInfo.getMatchedOperand()]
-            : ConstraintOperands[i];
+            : OpInfo;
     if (RefOpInfo.ConstraintType == TargetLowering::C_Register)
-      GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo, RefOpInfo);
+      GetRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
   }
 
   // Third pass - Loop over all of the operands, assigning virtual or physregs
   // to register class operands.
-  for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
-    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
+  for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
     SDISelAsmOperandInfo &RefOpInfo =
         OpInfo.isMatchingInputConstraint()
             ? ConstraintOperands[OpInfo.getMatchedOperand()]
-            : ConstraintOperands[i];
+            : OpInfo;
 
     // C_Register operands have already been allocated, Other/Memory don't need
     // to be.
     if (RefOpInfo.ConstraintType == TargetLowering::C_RegisterClass)
-      GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo, RefOpInfo);
+      GetRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
   }
 
   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
@@ -7738,9 +7660,7 @@
   // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
   std::vector<std::pair<RegsForValue, Value *>> IndirectStoresToEmit;
 
-  for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
-    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
-
+  for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
     switch (OpInfo.Type) {
     case InlineAsm::isOutput:
       if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
@@ -7818,9 +7738,13 @@
           MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
           SmallVector<unsigned, 4> Regs;
 
-          if (!createVirtualRegs(Regs,
-                                 InlineAsm::getNumOperandRegisters(OpFlag),
-                                 RegVT, DAG)) {
+          if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT)) {
+            unsigned NumRegs = InlineAsm::getNumOperandRegisters(OpFlag);
+            MachineRegisterInfo &RegInfo =
+                DAG.getMachineFunction().getRegInfo();
+            for (unsigned i = 0; i != NumRegs; ++i)
+              Regs.push_back(RegInfo.createVirtualRegister(RC));
+          } else {
             emitInlineAsmError(CS, "inline asm error: This value type register "
                                    "class is not natively supported!");
             return;
@@ -7955,19 +7879,19 @@
     unsigned numRet;
     ArrayRef<Type *> ResultTypes;
     SmallVector<SDValue, 1> ResultValues(1);
-    if (CSResultType->isSingleValueType()) {
-      numRet = 1;
-      ResultValues[0] = Val;
-      ResultTypes = makeArrayRef(CSResultType);
-    } else {
-      numRet = CSResultType->getNumContainedTypes();
+    if (StructType *StructResult = dyn_cast<StructType>(CSResultType)) {
+      numRet = StructResult->getNumElements();
       assert(Val->getNumOperands() == numRet &&
              "Mismatch in number of output operands in asm result");
-      ResultTypes = CSResultType->subtypes();
+      ResultTypes = StructResult->elements();
       ArrayRef<SDUse> ValueUses = Val->ops();
       ResultValues.resize(numRet);
       std::transform(ValueUses.begin(), ValueUses.end(), ResultValues.begin(),
                      [](const SDUse &u) -> SDValue { return u.get(); });
+    } else {
+      numRet = 1;
+      ResultValues[0] = Val;
+      ResultTypes = makeArrayRef(CSResultType);
     }
     SmallVector<EVT, 1> ResultVTs(numRet);
     for (unsigned i = 0; i < numRet; i++) {
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 7d341da..af5c243 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -704,7 +704,7 @@
       continue;
 
     unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
-    CurDAG->computeKnownBits(Src, Known);
+    Known = CurDAG->computeKnownBits(Src);
     FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, Known);
   } while (!Worklist.empty());
 }
@@ -2211,9 +2211,7 @@
   // Otherwise, the DAG Combiner may have proven that the value coming in is
   // either already zero or is not demanded.  Check for known zero input bits.
   APInt NeededMask = DesiredMask & ~ActualMask;
-
-  KnownBits Known;
-  CurDAG->computeKnownBits(LHS, Known);
+  KnownBits Known = CurDAG->computeKnownBits(LHS);
 
   // If all the missing bits in the or are already known to be set, match!
   if (NeededMask.isSubsetOf(Known.One))
diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index c2f1e37..a2f05c1 100644
--- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -431,56 +431,6 @@
   return false;
 }
 
-bool
-TargetLowering::SimplifyDemandedBits(SDNode *User, unsigned OpIdx,
-                                     const APInt &DemandedBits,
-                                     DAGCombinerInfo &DCI,
-                                     TargetLoweringOpt &TLO) const {
-  SDValue Op = User->getOperand(OpIdx);
-  KnownBits Known;
-
-  if (!SimplifyDemandedBits(Op, DemandedBits, Known, TLO, 0, true))
-    return false;
-
-
-  // Old will not always be the same as Op.  For example:
-  //
-  // Demanded = 0xffffff
-  // Op = i64 truncate (i32 and x, 0xffffff)
-  // In this case simplify demand bits will want to replace the 'and' node
-  // with the value 'x', which will give us:
-  // Old = i32 and x, 0xffffff
-  // New = x
-  if (TLO.Old.hasOneUse()) {
-    // For the one use case, we just commit the change.
-    DCI.CommitTargetLoweringOpt(TLO);
-    return true;
-  }
-
-  // If Old has more than one use then it must be Op, because the
-  // AssumeSingleUse flag is not propogated to recursive calls of
-  // SimplifyDemanded bits, so the only node with multiple use that
-  // it will attempt to combine will be Op.
-  assert(TLO.Old == Op);
-
-  SmallVector <SDValue, 4> NewOps;
-  for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
-    if (i == OpIdx) {
-      NewOps.push_back(TLO.New);
-      continue;
-    }
-    NewOps.push_back(User->getOperand(i));
-  }
-  User = TLO.DAG.UpdateNodeOperands(User, NewOps);
-  // Op has less users now, so we may be able to perform additional combines
-  // with it.
-  DCI.AddToWorklist(Op.getNode());
-  // User's operands have been updated, so we may be able to do new combines
-  // with it.
-  DCI.AddToWorklist(User);
-  return true;
-}
-
 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
                                           DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -496,23 +446,41 @@
   return Simplified;
 }
 
+bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
+                                          KnownBits &Known,
+                                          TargetLoweringOpt &TLO,
+                                          unsigned Depth,
+                                          bool AssumeSingleUse) const {
+  EVT VT = Op.getValueType();
+  APInt DemandedElts = VT.isVector()
+                           ? APInt::getAllOnesValue(VT.getVectorNumElements())
+                           : APInt(1, 1);
+  return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth,
+                              AssumeSingleUse);
+}
+
 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the
 /// result of Op are ever used downstream. If we can use this information to
 /// simplify Op, create a new simplified DAG node and return true, returning the
 /// original and new nodes in Old and New. Otherwise, analyze the expression and
 /// return a mask of Known bits for the expression (used to simplify the
 /// caller).  The Known bits may only be accurate for those bits in the
-/// DemandedMask.
-bool TargetLowering::SimplifyDemandedBits(SDValue Op,
-                                          const APInt &OriginalDemandedBits,
-                                          KnownBits &Known,
-                                          TargetLoweringOpt &TLO,
-                                          unsigned Depth,
-                                          bool AssumeSingleUse) const {
+/// OriginalDemandedBits and OriginalDemandedElts.
+bool TargetLowering::SimplifyDemandedBits(
+    SDValue Op, const APInt &OriginalDemandedBits,
+    const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
+    unsigned Depth, bool AssumeSingleUse) const {
   unsigned BitWidth = OriginalDemandedBits.getBitWidth();
   assert(Op.getScalarValueSizeInBits() == BitWidth &&
          "Mask size mismatches value type size!");
+
+  unsigned NumElts = OriginalDemandedElts.getBitWidth();
+  assert((!Op.getValueType().isVector() ||
+          NumElts == Op.getValueType().getVectorNumElements()) &&
+         "Unexpected vector size");
+
   APInt DemandedBits = OriginalDemandedBits;
+  APInt DemandedElts = OriginalDemandedElts;
   SDLoc dl(Op);
   auto &DL = TLO.DAG.getDataLayout();
 
@@ -532,18 +500,19 @@
     if (Depth != 0) {
       // If not at the root, Just compute the Known bits to
       // simplify things downstream.
-      TLO.DAG.computeKnownBits(Op, Known, Depth);
+      Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
       return false;
     }
     // If this is the root being simplified, allow it to have multiple uses,
-    // just set the DemandedBits to all bits.
+    // just set the DemandedBits/Elts to all bits.
     DemandedBits = APInt::getAllOnesValue(BitWidth);
-  } else if (OriginalDemandedBits == 0) {
-    // Not demanding any bits from Op.
+    DemandedElts = APInt::getAllOnesValue(NumElts);
+  } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
+    // Not demanding any bits/elts from Op.
     if (!Op.isUndef())
       return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
     return false;
-  } else if (Depth == 6) {        // Limit search depth.
+  } else if (Depth == 6) { // Limit search depth.
     return false;
   }
 
@@ -573,18 +542,71 @@
       Known.One &= Known2.One;
       Known.Zero &= Known2.Zero;
     }
-    return false;   // Don't fall through, will infinitely loop.
-  case ISD::CONCAT_VECTORS:
+    return false; // Don't fall through, will infinitely loop.
+  case ISD::CONCAT_VECTORS: {
     Known.Zero.setAllBits();
     Known.One.setAllBits();
-    for (SDValue SrcOp : Op->ops()) {
-      if (SimplifyDemandedBits(SrcOp, DemandedBits, Known2, TLO, Depth + 1))
+    EVT SubVT = Op.getOperand(0).getValueType();
+    unsigned NumSubVecs = Op.getNumOperands();
+    unsigned NumSubElts = SubVT.getVectorNumElements();
+    for (unsigned i = 0; i != NumSubVecs; ++i) {
+      APInt DemandedSubElts =
+          DemandedElts.extractBits(NumSubElts, i * NumSubElts);
+      if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts,
+                               Known2, TLO, Depth + 1))
         return true;
-      // Known bits are the values that are shared by every subvector.
-      Known.One &= Known2.One;
-      Known.Zero &= Known2.Zero;
+      // Known bits are shared by every demanded subvector element.
+      if (!!DemandedSubElts) {
+        Known.One &= Known2.One;
+        Known.Zero &= Known2.Zero;
+      }
     }
     break;
+  }
+  case ISD::VECTOR_SHUFFLE: {
+    ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask();
+
+    // Collect demanded elements from shuffle operands..
+    APInt DemandedLHS(NumElts, 0);
+    APInt DemandedRHS(NumElts, 0);
+    for (unsigned i = 0; i != NumElts; ++i) {
+      if (!DemandedElts[i])
+        continue;
+      int M = ShuffleMask[i];
+      if (M < 0) {
+        // For UNDEF elements, we don't know anything about the common state of
+        // the shuffle result.
+        DemandedLHS.clearAllBits();
+        DemandedRHS.clearAllBits();
+        break;
+      }
+      assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range");
+      if (M < (int)NumElts)
+        DemandedLHS.setBit(M);
+      else
+        DemandedRHS.setBit(M - NumElts);
+    }
+
+    if (!!DemandedLHS || !!DemandedRHS) {
+      Known.Zero.setAllBits();
+      Known.One.setAllBits();
+      if (!!DemandedLHS) {
+        if (SimplifyDemandedBits(Op.getOperand(0), DemandedBits, DemandedLHS,
+                                 Known2, TLO, Depth + 1))
+          return true;
+        Known.One &= Known2.One;
+        Known.Zero &= Known2.Zero;
+      }
+      if (!!DemandedRHS) {
+        if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, DemandedRHS,
+                                 Known2, TLO, Depth + 1))
+          return true;
+        Known.One &= Known2.One;
+        Known.Zero &= Known2.Zero;
+      }
+    }
+    break;
+  }
   case ISD::AND: {
     SDValue Op0 = Op.getOperand(0);
     SDValue Op1 = Op.getOperand(1);
@@ -594,9 +616,8 @@
     // simplify the LHS, here we're using information from the LHS to simplify
     // the RHS.
     if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) {
-      KnownBits LHSKnown;
       // Do not increment Depth here; that can cause an infinite loop.
-      TLO.DAG.computeKnownBits(Op0, LHSKnown, Depth);
+      KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth);
       // If the LHS already has zeros where RHSC does, this 'and' is dead.
       if ((LHSKnown.Zero & DemandedBits) ==
           (~RHSC->getAPIntValue() & DemandedBits))
@@ -619,10 +640,10 @@
       }
     }
 
-    if (SimplifyDemandedBits(Op1, DemandedBits, Known, TLO, Depth + 1))
+    if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, Depth + 1))
       return true;
     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
-    if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, Known2, TLO,
+    if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts, Known2, TLO,
                              Depth + 1))
       return true;
     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
@@ -653,10 +674,11 @@
     SDValue Op0 = Op.getOperand(0);
     SDValue Op1 = Op.getOperand(1);
 
-    if (SimplifyDemandedBits(Op1, DemandedBits, Known, TLO, Depth + 1))
+    if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, Depth + 1))
       return true;
     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
-    if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, Known2, TLO, Depth + 1))
+    if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts, Known2, TLO,
+                             Depth + 1))
       return true;
     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
 
@@ -683,10 +705,10 @@
     SDValue Op0 = Op.getOperand(0);
     SDValue Op1 = Op.getOperand(1);
 
-    if (SimplifyDemandedBits(Op1, DemandedBits, Known, TLO, Depth + 1))
+    if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, Depth + 1))
       return true;
     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
-    if (SimplifyDemandedBits(Op0, DemandedBits, Known2, TLO, Depth + 1))
+    if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO, Depth + 1))
       return true;
     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
 
@@ -840,7 +862,7 @@
         }
       }
 
-      if (SimplifyDemandedBits(Op0, DemandedBits.lshr(ShAmt), Known, TLO,
+      if (SimplifyDemandedBits(Op0, DemandedBits.lshr(ShAmt), DemandedElts, Known, TLO,
                                Depth + 1))
         return true;
 
@@ -935,7 +957,7 @@
       }
 
       // Compute the new bits that are at the top now.
-      if (SimplifyDemandedBits(Op0, InDemandedMask, Known, TLO, Depth + 1))
+      if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, Depth + 1))
         return true;
       assert(!Known.hasConflict() && "Bits known to be one AND zero?");
       Known.Zero.lshrInPlace(ShAmt);
@@ -974,7 +996,7 @@
       if (DemandedBits.countLeadingZeros() < ShAmt)
         InDemandedMask.setSignBit();
 
-      if (SimplifyDemandedBits(Op0, InDemandedMask, Known, TLO, Depth + 1))
+      if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, Depth + 1))
         return true;
       assert(!Known.hasConflict() && "Bits known to be one AND zero?");
       Known.Zero.lshrInPlace(ShAmt);
@@ -1221,18 +1243,26 @@
     break;
   }
   case ISD::EXTRACT_VECTOR_ELT: {
-    // Demand the bits from every vector element.
     SDValue Src = Op.getOperand(0);
+    SDValue Idx = Op.getOperand(1);
+    unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
     unsigned EltBitWidth = Src.getScalarValueSizeInBits();
 
+    // Demand the bits from every vector element without a constant index.
+    APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
+    if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx))
+      if (CIdx->getAPIntValue().ult(NumSrcElts))
+        DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue());
+
     // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
     // anything about the extended bits.
     APInt DemandedSrcBits = DemandedBits;
     if (BitWidth > EltBitWidth)
       DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth);
 
-    if (SimplifyDemandedBits(Src, DemandedSrcBits, Known2, TLO, Depth + 1))
-        return true;
+    if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO,
+                             Depth + 1))
+      return true;
 
     Known = Known2;
     if (BitWidth > EltBitWidth)
@@ -1300,7 +1330,7 @@
     // If this is a bitcast, let computeKnownBits handle it.  Only do this on a
     // recursive call where Known may be useful to the caller.
     if (Depth > 0) {
-      TLO.DAG.computeKnownBits(Op, Known, Depth);
+      Known = TLO.DAG.computeKnownBits(Op, Depth);
       return false;
     }
     break;
@@ -1313,8 +1343,8 @@
     SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
     unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros();
     APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ);
-    if (SimplifyDemandedBits(Op0, LoMask, Known2, TLO, Depth + 1) ||
-        SimplifyDemandedBits(Op1, LoMask, Known2, TLO, Depth + 1) ||
+    if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO, Depth + 1) ||
+        SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO, Depth + 1) ||
         // See if the operation should be performed at a smaller bit width.
         ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) {
       SDNodeFlags Flags = Op.getNode()->getFlags();
@@ -1354,14 +1384,14 @@
   }
   default:
     if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
-      if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, Known, TLO,
-                                            Depth))
+      if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts,
+                                            Known, TLO, Depth))
         return true;
       break;
     }
 
     // Just use computeKnownBits to compute output bits.
-    TLO.DAG.computeKnownBits(Op, Known, Depth);
+    Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
     break;
   }
 
@@ -1767,8 +1797,17 @@
       return true;
     KnownZero = SrcZero.zextOrTrunc(NumElts);
     KnownUndef = SrcUndef.zextOrTrunc(NumElts);
+
+    if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) {
+      // zext(undef) upper bits are guaranteed to be zero.
+      if (DemandedElts.isSubsetOf(KnownUndef))
+        return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
+      KnownUndef.clearAllBits();
+    }
     break;
   }
+  case ISD::OR:
+  case ISD::XOR:
   case ISD::ADD:
   case ISD::SUB:
   case ISD::FADD:
@@ -1809,12 +1848,26 @@
     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef,
                                    KnownZero, TLO, Depth + 1))
       return true;
+
+    if (Op.getOpcode() == ISD::ZERO_EXTEND) {
+      // zext(undef) upper bits are guaranteed to be zero.
+      if (DemandedElts.isSubsetOf(KnownUndef))
+        return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
+      KnownUndef.clearAllBits();
+    }
     break;
   default: {
-    if (Op.getOpcode() >= ISD::BUILTIN_OP_END)
+    if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
       if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef,
                                                   KnownZero, TLO, Depth))
         return true;
+    } else {
+      KnownBits Known;
+      APInt DemandedBits = APInt::getAllOnesValue(EltSizeInBits);
+      if (SimplifyDemandedBits(Op, DemandedBits, DemandedEltMask, Known, TLO,
+                               Depth, AssumeSingleUse))
+        return true;
+    }
     break;
   }
   }
@@ -1885,18 +1938,14 @@
 }
 
 bool TargetLowering::SimplifyDemandedBitsForTargetNode(
-    SDValue Op, const APInt &DemandedBits, KnownBits &Known,
-    TargetLoweringOpt &TLO, unsigned Depth) const {
+    SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
+    KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const {
   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
          "Should use SimplifyDemandedBits if you don't know whether Op"
          " is a target node!");
-  EVT VT = Op.getValueType();
-  APInt DemandedElts = VT.isVector()
-                           ? APInt::getAllOnesValue(VT.getVectorNumElements())
-                           : APInt(1, 1);
   computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth);
   return false;
 }
@@ -4666,6 +4715,27 @@
   return true;
 }
 
+bool TargetLowering::expandABS(SDNode *N, SDValue &Result,
+                               SelectionDAG &DAG) const {
+  SDLoc dl(N);
+  EVT VT = N->getValueType(0);
+  EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
+  SDValue Op = N->getOperand(0);
+
+  // Only expand vector types if we have the appropriate vector operations.
+  if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SRA, VT) ||
+                        !isOperationLegalOrCustom(ISD::ADD, VT) ||
+                        !isOperationLegalOrCustomOrPromote(ISD::XOR, VT)))
+    return false;
+
+  SDValue Shift =
+      DAG.getNode(ISD::SRA, dl, VT, Op,
+                  DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT));
+  SDValue Add = DAG.getNode(ISD::ADD, dl, VT, Op, Shift);
+  Result = DAG.getNode(ISD::XOR, dl, VT, Add, Shift);
+  return true;
+}
+
 SDValue TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
                                             SelectionDAG &DAG) const {
   SDLoc SL(LD);
@@ -5205,9 +5275,24 @@
   return SDValue();
 }
 
-SDValue TargetLowering::getExpandedSaturationAdditionSubtraction(
-    SDNode *Node, SelectionDAG &DAG) const {
+SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const {
   unsigned Opcode = Node->getOpcode();
+  SDValue LHS = Node->getOperand(0);
+  SDValue RHS = Node->getOperand(1);
+  EVT VT = LHS.getValueType();
+  SDLoc dl(Node);
+
+  // usub.sat(a, b) -> umax(a, b) - b
+  if (Opcode == ISD::USUBSAT && isOperationLegalOrCustom(ISD::UMAX, VT)) {
+    SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS);
+    return DAG.getNode(ISD::SUB, dl, VT, Max, RHS);
+  }
+
+  if (VT.isVector()) {
+    // TODO: Consider not scalarizing here.
+    return SDValue();
+  }
+
   unsigned OverflowOp;
   switch (Opcode) {
   case ISD::SADDSAT:
@@ -5226,11 +5311,7 @@
     llvm_unreachable("Expected method to receive signed or unsigned saturation "
                      "addition or subtraction node.");
   }
-  assert(Node->getNumOperands() == 2 && "Expected node to have 2 operands.");
 
-  SDLoc dl(Node);
-  SDValue LHS = Node->getOperand(0);
-  SDValue RHS = Node->getOperand(1);
   assert(LHS.getValueType().isScalarInteger() &&
          "Expected operands to be integers. Vector of int arguments should "
          "already be unrolled.");
diff --git a/lib/CodeGen/StackProtector.cpp b/lib/CodeGen/StackProtector.cpp
index dcf37ca..3b578c7 100644
--- a/lib/CodeGen/StackProtector.cpp
+++ b/lib/CodeGen/StackProtector.cpp
@@ -157,14 +157,6 @@
   return NeedsProtector;
 }
 
-static bool isLifetimeInst(const Instruction *I) {
-  if (const auto Intrinsic = dyn_cast<IntrinsicInst>(I)) {
-    const auto Id = Intrinsic->getIntrinsicID();
-    return Id == Intrinsic::lifetime_start || Id == Intrinsic::lifetime_end;
-  }
-  return false;
-}
-
 bool StackProtector::HasAddressTaken(const Instruction *AI) {
   for (const User *U : AI->users()) {
     if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
@@ -175,7 +167,7 @@
         return true;
     } else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
       // Ignore intrinsics that are not calls. TODO: Use isLoweredToCall().
-      if (!isa<DbgInfoIntrinsic>(CI) && !isLifetimeInst(CI))
+      if (!isa<DbgInfoIntrinsic>(CI) && !CI->isLifetimeStartOrEnd())
         return true;
     } else if (isa<InvokeInst>(U)) {
       return true;
diff --git a/lib/CodeGen/TargetFrameLoweringImpl.cpp b/lib/CodeGen/TargetFrameLoweringImpl.cpp
index f0cfa2f..cf78fb5 100644
--- a/lib/CodeGen/TargetFrameLoweringImpl.cpp
+++ b/lib/CodeGen/TargetFrameLoweringImpl.cpp
@@ -30,12 +30,6 @@
 
 TargetFrameLowering::~TargetFrameLowering() = default;
 
-/// The default implementation just looks at attribute "no-frame-pointer-elim".
-bool TargetFrameLowering::noFramePointerElim(const MachineFunction &MF) const {
-  auto Attr = MF.getFunction().getFnAttribute("no-frame-pointer-elim");
-  return Attr.getValueAsString() == "true";
-}
-
 bool TargetFrameLowering::enableCalleeSaveSkip(const MachineFunction &MF) const {
   assert(MF.getFunction().hasFnAttribute(Attribute::NoReturn) &&
          MF.getFunction().hasFnAttribute(Attribute::NoUnwind) &&
diff --git a/lib/CodeGen/TargetOptionsImpl.cpp b/lib/CodeGen/TargetOptionsImpl.cpp
index 853e71d..3c133fb 100644
--- a/lib/CodeGen/TargetOptionsImpl.cpp
+++ b/lib/CodeGen/TargetOptionsImpl.cpp
@@ -23,15 +23,34 @@
 /// DisableFramePointerElim - This returns true if frame pointer elimination
 /// optimization should be disabled for the given machine function.
 bool TargetOptions::DisableFramePointerElim(const MachineFunction &MF) const {
-  // Check to see if we should eliminate all frame pointers.
-  if (MF.getSubtarget().getFrameLowering()->noFramePointerElim(MF))
+  // Check to see if the target want to forcably keep frame pointer.
+  if (MF.getSubtarget().getFrameLowering()->keepFramePointer(MF))
     return true;
 
-  // Check to see if we should eliminate non-leaf frame pointers.
-  if (MF.getFunction().hasFnAttribute("no-frame-pointer-elim-non-leaf"))
-    return MF.getFrameInfo().hasCalls();
+  const Function &F = MF.getFunction();
 
-  return false;
+  // TODO: Remove support for old `fp elim` function attributes after fully
+  //       migrate to use "frame-pointer"
+  if (!F.hasFnAttribute("frame-pointer")) {
+    // Check to see if we should eliminate all frame pointers.
+    if (F.getFnAttribute("no-frame-pointer-elim").getValueAsString() == "true")
+      return true;
+
+    // Check to see if we should eliminate non-leaf frame pointers.
+    if (F.hasFnAttribute("no-frame-pointer-elim-non-leaf"))
+      return MF.getFrameInfo().hasCalls();
+
+    return false;
+  }
+
+  StringRef FP = F.getFnAttribute("frame-pointer").getValueAsString();
+  if (FP == "all")
+    return true;
+  if (FP == "non-leaf")
+    return MF.getFrameInfo().hasCalls();
+  if (FP == "none")
+    return false;
+  llvm_unreachable("unknown frame pointer flag");
 }
 
 /// HonorSignDependentRoundingFPMath - Return true if the codegen must assume
diff --git a/lib/CodeGen/TargetPassConfig.cpp b/lib/CodeGen/TargetPassConfig.cpp
index defb165..28126fc 100644
--- a/lib/CodeGen/TargetPassConfig.cpp
+++ b/lib/CodeGen/TargetPassConfig.cpp
@@ -755,22 +755,33 @@
 bool TargetPassConfig::addCoreISelPasses() {
   // Enable FastISel with -fast-isel, but allow that to be overridden.
   TM->setO0WantsFastISel(EnableFastISelOption != cl::BOU_FALSE);
-  if (EnableFastISelOption == cl::BOU_TRUE ||
-      (TM->getOptLevel() == CodeGenOpt::None && TM->getO0WantsFastISel() &&
-       !TM->Options.EnableGlobalISel)) {
+
+  // Determine an instruction selector.
+  enum class SelectorType { SelectionDAG, FastISel, GlobalISel };
+  SelectorType Selector;
+
+  if (EnableFastISelOption == cl::BOU_TRUE)
+    Selector = SelectorType::FastISel;
+  else if (EnableGlobalISelOption == cl::BOU_TRUE ||
+           (TM->Options.EnableGlobalISel &&
+            EnableGlobalISelOption != cl::BOU_FALSE))
+    Selector = SelectorType::GlobalISel;
+  else if (TM->getOptLevel() == CodeGenOpt::None && TM->getO0WantsFastISel())
+    Selector = SelectorType::FastISel;
+  else
+    Selector = SelectorType::SelectionDAG;
+
+  // Set consistently TM->Options.EnableFastISel and EnableGlobalISel.
+  if (Selector == SelectorType::FastISel) {
     TM->setFastISel(true);
     TM->setGlobalISel(false);
+  } else if (Selector == SelectorType::GlobalISel) {
+    TM->setFastISel(false);
+    TM->setGlobalISel(true);
   }
 
-  // Ask the target for an instruction selector.
-  // Explicitly enabling fast-isel should override implicitly enabled
-  // global-isel.
-  if (EnableGlobalISelOption == cl::BOU_TRUE ||
-      (EnableGlobalISelOption == cl::BOU_UNSET &&
-       TM->Options.EnableGlobalISel && EnableFastISelOption != cl::BOU_TRUE)) {
-    TM->setGlobalISel(true);
-    TM->setFastISel(false);
-
+  // Add instruction selector passes.
+  if (Selector == SelectorType::GlobalISel) {
     SaveAndRestore<bool> SavedAddingMachinePasses(AddingMachinePasses, true);
     if (addIRTranslator())
       return true;
diff --git a/lib/CodeGen/TwoAddressInstructionPass.cpp b/lib/CodeGen/TwoAddressInstructionPass.cpp
index 2e2fe72..4b72f6a 100644
--- a/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -929,9 +929,12 @@
   MachineBasicBlock::iterator Begin = MI;
   MachineBasicBlock::iterator AfterMI = std::next(Begin);
   MachineBasicBlock::iterator End = AfterMI;
-  while (End->isCopy() &&
-         regOverlapsSet(Defs, End->getOperand(1).getReg(), TRI)) {
-    Defs.push_back(End->getOperand(0).getReg());
+  while (End != MBB->end()) {
+    End = skipDebugInstructionsForward(End, MBB->end());
+    if (End->isCopy() && regOverlapsSet(Defs, End->getOperand(1).getReg(), TRI))
+      Defs.push_back(End->getOperand(0).getReg());
+    else
+      break;
     ++End;
   }
 
diff --git a/lib/DebugInfo/CodeView/SymbolRecordHelpers.cpp b/lib/DebugInfo/CodeView/SymbolRecordHelpers.cpp
index 79f3959..0174613 100644
--- a/lib/DebugInfo/CodeView/SymbolRecordHelpers.cpp
+++ b/lib/DebugInfo/CodeView/SymbolRecordHelpers.cpp
@@ -21,8 +21,7 @@
   return record;
 }
 
-uint32_t
-llvm::codeview::getScopeEndOffset(const llvm::codeview::CVSymbol &Sym) {
+uint32_t llvm::codeview::getScopeEndOffset(const CVSymbol &Sym) {
   assert(symbolOpensScope(Sym.kind()));
   switch (Sym.kind()) {
   case SymbolKind::S_GPROC32:
@@ -52,6 +51,37 @@
   }
 }
 
+uint32_t
+llvm::codeview::getScopeParentOffset(const llvm::codeview::CVSymbol &Sym) {
+  assert(symbolOpensScope(Sym.kind()));
+  switch (Sym.kind()) {
+  case SymbolKind::S_GPROC32:
+  case SymbolKind::S_LPROC32:
+  case SymbolKind::S_GPROC32_ID:
+  case SymbolKind::S_LPROC32_ID:
+  case SymbolKind::S_LPROC32_DPC:
+  case SymbolKind::S_LPROC32_DPC_ID: {
+    ProcSym Proc = createRecord<ProcSym>(Sym);
+    return Proc.Parent;
+  }
+  case SymbolKind::S_BLOCK32: {
+    BlockSym Block = createRecord<BlockSym>(Sym);
+    return Block.Parent;
+  }
+  case SymbolKind::S_THUNK32: {
+    Thunk32Sym Thunk = createRecord<Thunk32Sym>(Sym);
+    return Thunk.Parent;
+  }
+  case SymbolKind::S_INLINESITE: {
+    InlineSiteSym Site = createRecord<InlineSiteSym>(Sym);
+    return Site.Parent;
+  }
+  default:
+    assert(false && "Unknown record type");
+    return 0;
+  }
+}
+
 CVSymbolArray
 llvm::codeview::limitSymbolArrayToScope(const CVSymbolArray &Symbols,
                                         uint32_t ScopeBegin) {
diff --git a/lib/DebugInfo/CodeView/TypeStreamMerger.cpp b/lib/DebugInfo/CodeView/TypeStreamMerger.cpp
index 8038182..bae11ce 100644
--- a/lib/DebugInfo/CodeView/TypeStreamMerger.cpp
+++ b/lib/DebugInfo/CodeView/TypeStreamMerger.cpp
@@ -77,8 +77,7 @@
   // Local hashing entry points
   Error mergeTypesAndIds(MergingTypeTableBuilder &DestIds,
                          MergingTypeTableBuilder &DestTypes,
-                         const CVTypeArray &IdsAndTypes,
-                         Optional<EndPrecompRecord> &EP);
+                         const CVTypeArray &IdsAndTypes, Optional<uint32_t> &S);
   Error mergeIdRecords(MergingTypeTableBuilder &Dest,
                        ArrayRef<TypeIndex> TypeSourceToDest,
                        const CVTypeArray &Ids);
@@ -90,14 +89,14 @@
                          GlobalTypeTableBuilder &DestTypes,
                          const CVTypeArray &IdsAndTypes,
                          ArrayRef<GloballyHashedType> Hashes,
-                         Optional<EndPrecompRecord> &EP);
+                         Optional<uint32_t> &S);
   Error mergeIdRecords(GlobalTypeTableBuilder &Dest,
                        ArrayRef<TypeIndex> TypeSourceToDest,
                        const CVTypeArray &Ids,
                        ArrayRef<GloballyHashedType> Hashes);
   Error mergeTypeRecords(GlobalTypeTableBuilder &Dest, const CVTypeArray &Types,
                          ArrayRef<GloballyHashedType> Hashes,
-                         Optional<EndPrecompRecord> &EP);
+                         Optional<uint32_t> &S);
 
 private:
   Error doit(const CVTypeArray &Types);
@@ -197,7 +196,7 @@
   /// its type indices.
   SmallVector<uint8_t, 256> RemapStorage;
 
-  Optional<EndPrecompRecord> EndPrecomp; 
+  Optional<uint32_t> PCHSignature;
 };
 
 } // end anonymous namespace
@@ -275,12 +274,12 @@
 Error TypeStreamMerger::mergeTypesAndIds(MergingTypeTableBuilder &DestIds,
                                          MergingTypeTableBuilder &DestTypes,
                                          const CVTypeArray &IdsAndTypes,
-                                         Optional<EndPrecompRecord> &EP) {
+                                         Optional<uint32_t> &S) {
   DestIdStream = &DestIds;
   DestTypeStream = &DestTypes;
   UseGlobalHashes = false;
   auto Err = doit(IdsAndTypes);
-  EP = EndPrecomp;
+  S = PCHSignature;
   return Err;
 }
 
@@ -288,12 +287,12 @@
 Error TypeStreamMerger::mergeTypeRecords(GlobalTypeTableBuilder &Dest,
                                          const CVTypeArray &Types,
                                          ArrayRef<GloballyHashedType> Hashes,
-                                         Optional<EndPrecompRecord> &EP) {
+                                         Optional<uint32_t> &S) {
   DestGlobalTypeStream = &Dest;
   UseGlobalHashes = true;
   GlobalHashes = Hashes;
   auto Err = doit(Types);
-  EP = EndPrecomp;
+  S = PCHSignature;
   return Err;
 }
 
@@ -313,13 +312,13 @@
                                          GlobalTypeTableBuilder &DestTypes,
                                          const CVTypeArray &IdsAndTypes,
                                          ArrayRef<GloballyHashedType> Hashes,
-                                         Optional<EndPrecompRecord> &EP) {
+                                         Optional<uint32_t> &S) {
   DestGlobalIdStream = &DestIds;
   DestGlobalTypeStream = &DestTypes;
   UseGlobalHashes = true;
   GlobalHashes = Hashes;
   auto Err = doit(IdsAndTypes);
-  EP = EndPrecomp;
+  S = PCHSignature;
   return Err;
 }
 
@@ -445,28 +444,27 @@
 Error llvm::codeview::mergeTypeAndIdRecords(
     MergingTypeTableBuilder &DestIds, MergingTypeTableBuilder &DestTypes,
     SmallVectorImpl<TypeIndex> &SourceToDest, const CVTypeArray &IdsAndTypes,
-    Optional<EndPrecompRecord> &EndPrecomp) {
+    Optional<uint32_t> &PCHSignature) {
   TypeStreamMerger M(SourceToDest);
-  return M.mergeTypesAndIds(DestIds, DestTypes, IdsAndTypes, EndPrecomp);
+  return M.mergeTypesAndIds(DestIds, DestTypes, IdsAndTypes, PCHSignature);
 }
 
 Error llvm::codeview::mergeTypeAndIdRecords(
     GlobalTypeTableBuilder &DestIds, GlobalTypeTableBuilder &DestTypes,
     SmallVectorImpl<TypeIndex> &SourceToDest, const CVTypeArray &IdsAndTypes,
-    ArrayRef<GloballyHashedType> Hashes,
-    Optional<EndPrecompRecord> &EndPrecomp) {
+    ArrayRef<GloballyHashedType> Hashes, Optional<uint32_t> &PCHSignature) {
   TypeStreamMerger M(SourceToDest);
   return M.mergeTypesAndIds(DestIds, DestTypes, IdsAndTypes, Hashes,
-                            EndPrecomp);
+                            PCHSignature);
 }
 
 Error llvm::codeview::mergeTypeRecords(GlobalTypeTableBuilder &Dest,
                                        SmallVectorImpl<TypeIndex> &SourceToDest,
                                        const CVTypeArray &Types,
                                        ArrayRef<GloballyHashedType> Hashes,
-                                       Optional<EndPrecompRecord> &EndPrecomp) {
+                                       Optional<uint32_t> &PCHSignature) {
   TypeStreamMerger M(SourceToDest);
-  return M.mergeTypeRecords(Dest, Types, Hashes, EndPrecomp);
+  return M.mergeTypeRecords(Dest, Types, Hashes, PCHSignature);
 }
 
 Error llvm::codeview::mergeIdRecords(GlobalTypeTableBuilder &Dest,
@@ -483,11 +481,13 @@
   // signature, through EndPrecompRecord. This is done here for performance
   // reasons, to avoid re-parsing the Types stream.
   if (Type.kind() == LF_ENDPRECOMP) {
-    assert(!EndPrecomp);
-    EndPrecomp.emplace();
+    EndPrecompRecord EP;
     if (auto EC = TypeDeserializer::deserializeAs(const_cast<CVType &>(Type),
-                                                  EndPrecomp.getValue()))
+                                                  EP))
       return joinErrors(std::move(EC), errorCorruptRecord());
+    if (PCHSignature.hasValue())
+      return errorCorruptRecord();
+    PCHSignature.emplace(EP.getSignature());
     return false;
   }
   return true;
diff --git a/lib/DebugInfo/DWARF/DWARFContext.cpp b/lib/DebugInfo/DWARF/DWARFContext.cpp
index e330ce2..e6620ee 100644
--- a/lib/DebugInfo/DWARF/DWARFContext.cpp
+++ b/lib/DebugInfo/DWARF/DWARFContext.cpp
@@ -767,7 +767,7 @@
   // http://lists.dwarfstd.org/htdig.cgi/dwarf-discuss-dwarfstd.org/2011-December/001173.html
   DWARFDataExtractor debugFrameData(DObj->getDebugFrameSection(),
                                     isLittleEndian(), DObj->getAddressSize());
-  DebugFrame.reset(new DWARFDebugFrame(false /* IsEH */));
+  DebugFrame.reset(new DWARFDebugFrame(getArch(), false /* IsEH */));
   DebugFrame->parse(debugFrameData);
   return DebugFrame.get();
 }
@@ -778,7 +778,7 @@
 
   DWARFDataExtractor debugFrameData(DObj->getEHFrameSection(), isLittleEndian(),
                                     DObj->getAddressSize());
-  DebugFrame.reset(new DWARFDebugFrame(true /* IsEH */));
+  DebugFrame.reset(new DWARFDebugFrame(getArch(), true /* IsEH */));
   DebugFrame->parse(debugFrameData);
   return DebugFrame.get();
 }
diff --git a/lib/DebugInfo/DWARF/DWARFDebugAranges.cpp b/lib/DebugInfo/DWARF/DWARFDebugAranges.cpp
index c26db9a..e8c5dec 100644
--- a/lib/DebugInfo/DWARF/DWARFDebugAranges.cpp
+++ b/lib/DebugInfo/DWARF/DWARFDebugAranges.cpp
@@ -12,6 +12,7 @@
 #include "llvm/DebugInfo/DWARF/DWARFContext.h"
 #include "llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h"
 #include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/WithColor.h"
 #include <algorithm>
 #include <cassert>
 #include <cstdint>
@@ -53,10 +54,12 @@
   for (const auto &CU : CTX->compile_units()) {
     uint32_t CUOffset = CU->getOffset();
     if (ParsedCUOffsets.insert(CUOffset).second) {
-      DWARFAddressRangesVector CURanges;
-      CU->collectAddressRanges(CURanges);
-      for (const auto &R : CURanges)
-        appendRange(CUOffset, R.LowPC, R.HighPC);
+      Expected<DWARFAddressRangesVector> CURanges = CU->collectAddressRanges();
+      if (!CURanges)
+        WithColor::error() << toString(CURanges.takeError()) << '\n';
+      else
+        for (const auto &R : *CURanges)
+          appendRange(CUOffset, R.LowPC, R.HighPC);
     }
   }
 
diff --git a/lib/DebugInfo/DWARF/DWARFDebugFrame.cpp b/lib/DebugInfo/DWARF/DWARFDebugFrame.cpp
index f9d35dd..ba55ffc 100644
--- a/lib/DebugInfo/DWARF/DWARFDebugFrame.cpp
+++ b/lib/DebugInfo/DWARF/DWARFDebugFrame.cpp
@@ -225,7 +225,7 @@
   switch (Type) {
   case OT_Unset: {
     OS << " Unsupported " << (OperandIdx ? "second" : "first") << " operand to";
-    auto OpcodeName = CallFrameString(Opcode);
+    auto OpcodeName = CallFrameString(Opcode, Arch);
     if (!OpcodeName.empty())
       OS << " " << OpcodeName;
     else
@@ -279,7 +279,7 @@
     if (Opcode & DWARF_CFI_PRIMARY_OPCODE_MASK)
       Opcode &= DWARF_CFI_PRIMARY_OPCODE_MASK;
     OS.indent(2 * IndentLevel);
-    OS << CallFrameString(Opcode) << ":";
+    OS << CallFrameString(Opcode, Arch) << ":";
     for (unsigned i = 0; i < Instr.Ops.size(); ++i)
       printOperand(OS, MRI, IsEH, Instr, i, Instr.Ops[i]);
     OS << '\n';
@@ -325,8 +325,9 @@
   OS << "\n";
 }
 
-DWARFDebugFrame::DWARFDebugFrame(bool IsEH, uint64_t EHFrameAddress)
-    : IsEH(IsEH), EHFrameAddress(EHFrameAddress) {}
+DWARFDebugFrame::DWARFDebugFrame(Triple::ArchType Arch,
+    bool IsEH, uint64_t EHFrameAddress)
+    : Arch(Arch), IsEH(IsEH), EHFrameAddress(EHFrameAddress) {}
 
 DWARFDebugFrame::~DWARFDebugFrame() = default;
 
@@ -445,6 +446,11 @@
               StartAugmentationOffset = Offset;
               EndAugmentationOffset = Offset +
                 static_cast<uint32_t>(*AugmentationLength);
+              break;
+            case 'B':
+              // B-Key is used for signing functions associated with this
+              // augmentation string
+              break;
           }
         }
 
@@ -461,7 +467,7 @@
           StartOffset, Length, Version, AugmentationString, AddressSize,
           SegmentDescriptorSize, CodeAlignmentFactor, DataAlignmentFactor,
           ReturnAddressRegister, AugmentationData, FDEPointerEncoding,
-          LSDAPointerEncoding, Personality, PersonalityEncoding);
+          LSDAPointerEncoding, Personality, PersonalityEncoding, Arch);
       CIEs[StartOffset] = Cie.get();
       Entries.emplace_back(std::move(Cie));
     } else {
@@ -513,7 +519,7 @@
 
       Entries.emplace_back(new FDE(StartOffset, Length, CIEPointer,
                                    InitialLocation, AddressRange,
-                                   Cie, LSDAAddress));
+                                   Cie, LSDAAddress, Arch));
     }
 
     if (Error E =
diff --git a/lib/DebugInfo/DWARF/DWARFDie.cpp b/lib/DebugInfo/DWARF/DWARFDie.cpp
index 551e292..81ef0c8 100644
--- a/lib/DebugInfo/DWARF/DWARFDie.cpp
+++ b/lib/DebugInfo/DWARF/DWARFDie.cpp
@@ -71,15 +71,7 @@
     OS.indent(Indent);
     R.dump(OS, AddressSize);
 
-    if (SectionNames.empty() || R.SectionIndex == -1ULL)
-      continue;
-
-    StringRef Name = SectionNames[R.SectionIndex].Name;
-    OS << " \"" << Name << '\"';
-
-    // Print section index if name is not unique.
-    if (!SectionNames[R.SectionIndex].IsNameUnique)
-      OS << format(" [%" PRIu64 "]", R.SectionIndex);
+    DWARFFormValue::dumpAddressSection(Obj, OS, DumpOpts, R.SectionIndex);
   }
 }
 
@@ -154,6 +146,52 @@
   OS << TagStr.substr(7, TagStr.size() - 12) << " ";
 }
 
+static void dumpArrayType(raw_ostream &OS, const DWARFDie &D) {
+  Optional<uint64_t> Bound;
+  for (const DWARFDie &C : D.children())
+    if (C.getTag() == DW_TAG_subrange_type) {
+      Optional<uint64_t> LB;
+      Optional<uint64_t> Count;
+      Optional<uint64_t> UB;
+      Optional<unsigned> DefaultLB;
+      if (Optional<DWARFFormValue> L = C.find(DW_AT_lower_bound))
+        LB = L->getAsUnsignedConstant();
+      if (Optional<DWARFFormValue> CountV = C.find(DW_AT_count))
+        Count = CountV->getAsUnsignedConstant();
+      if (Optional<DWARFFormValue> UpperV = C.find(DW_AT_upper_bound))
+        UB = UpperV->getAsUnsignedConstant();
+      if (Optional<DWARFFormValue> LV =
+              D.getDwarfUnit()->getUnitDIE().find(DW_AT_language))
+        if (Optional<uint64_t> LC = LV->getAsUnsignedConstant())
+          if ((DefaultLB =
+                   LanguageLowerBound(static_cast<dwarf::SourceLanguage>(*LC))))
+            if (LB && *LB == *DefaultLB)
+              LB = None;
+      if (!LB && !Count && !UB)
+        OS << "[]";
+      else if (!LB && (Count || UB) && DefaultLB)
+        OS << '[' << (Count ? *Count : *UB - *DefaultLB + 1) << ']';
+      else {
+        OS << "[[";
+        if (LB)
+          OS << *LB;
+        else
+          OS << '?';
+        OS << ", ";
+        if (Count)
+          if (LB)
+            OS << *LB + *Count;
+          else
+            OS << "? + " << *Count;
+        else if (UB)
+          OS << *UB + 1;
+        else
+          OS << '?';
+        OS << ")]";
+      }
+    }
+}
+
 /// Recursively dump the DIE type name when applicable.
 static void dumpTypeName(raw_ostream &OS, const DWARFDie &D) {
   if (!D.isValid())
@@ -201,24 +239,7 @@
     break;
   }
   case DW_TAG_array_type: {
-    Optional<uint64_t> Bound;
-    for (const DWARFDie &C : D.children())
-      if (C.getTag() == DW_TAG_subrange_type) {
-        OS << '[';
-        uint64_t LowerBound = 0;
-        if (Optional<DWARFFormValue> L = C.find(DW_AT_lower_bound))
-          if (Optional<uint64_t> LB = L->getAsUnsignedConstant()) {
-            LowerBound = *LB;
-            OS << LowerBound << '-';
-          }
-        if (Optional<DWARFFormValue> CountV = C.find(DW_AT_count)) {
-          if (Optional<uint64_t> C = CountV->getAsUnsignedConstant())
-            OS << (*C + LowerBound);
-        } else if (Optional<DWARFFormValue> UpperV = C.find(DW_AT_upper_bound))
-          if (Optional<uint64_t> U = UpperV->getAsUnsignedConstant())
-            OS << *U;
-        OS << ']';
-      }
+    dumpArrayType(OS, D);
     break;
   }
   case DW_TAG_pointer_type:
@@ -324,10 +345,9 @@
     const DWARFObject &Obj = Die.getDwarfUnit()->getContext().getDWARFObj();
     // For DW_FORM_rnglistx we need to dump the offset separately, since
     // we have only dumped the index so far.
-    Optional<DWARFFormValue> Value = Die.find(DW_AT_ranges);
-    if (Value && Value->getForm() == DW_FORM_rnglistx)
+    if (formValue.getForm() == DW_FORM_rnglistx)
       if (auto RangeListOffset =
-              U->getRnglistOffset(*Value->getAsSectionOffset())) {
+              U->getRnglistOffset(*formValue.getAsSectionOffset())) {
         DWARFFormValue FV(dwarf::DW_FORM_sec_offset);
         FV.setUValue(*RangeListOffset);
         FV.dump(OS, DumpOpts);
@@ -446,13 +466,13 @@
 bool DWARFDie::getLowAndHighPC(uint64_t &LowPC, uint64_t &HighPC,
                                uint64_t &SectionIndex) const {
   auto F = find(DW_AT_low_pc);
-  auto LowPcAddr = toAddress(F);
+  auto LowPcAddr = toSectionedAddress(F);
   if (!LowPcAddr)
     return false;
-  if (auto HighPcAddr = getHighPC(*LowPcAddr)) {
-    LowPC = *LowPcAddr;
+  if (auto HighPcAddr = getHighPC(LowPcAddr->Address)) {
+    LowPC = LowPcAddr->Address;
     HighPC = *HighPcAddr;
-    SectionIndex = F->getSectionIndex();
+    SectionIndex = LowPcAddr->SectionIndex;
     return true;
   }
   return false;
diff --git a/lib/DebugInfo/DWARF/DWARFFormValue.cpp b/lib/DebugInfo/DWARF/DWARFFormValue.cpp
index 9226dca..7719fea 100644
--- a/lib/DebugInfo/DWARF/DWARFFormValue.cpp
+++ b/lib/DebugInfo/DWARF/DWARFFormValue.cpp
@@ -331,6 +331,29 @@
   return true;
 }
 
+void DWARFFormValue::dumpSectionedAddress(raw_ostream &OS,
+                                          DIDumpOptions DumpOpts,
+                                          SectionedAddress SA) const {
+  OS << format("0x%016" PRIx64, SA.Address);
+  dumpAddressSection(U->getContext().getDWARFObj(), OS, DumpOpts,
+                     SA.SectionIndex);
+}
+
+void DWARFFormValue::dumpAddressSection(const DWARFObject &Obj, raw_ostream &OS,
+                                        DIDumpOptions DumpOpts,
+                                        uint64_t SectionIndex) {
+  if (!DumpOpts.Verbose || SectionIndex == -1ULL)
+    return;
+  ArrayRef<SectionName> SectionNames = Obj.getSectionNames();
+  const auto &SecRef = SectionNames[SectionIndex];
+
+  OS << " \"" << SecRef.Name << '\"';
+
+  // Print section index if name is not unique.
+  if (!SecRef.IsNameUnique)
+    OS << format(" [%" PRIu64 "]", SectionIndex);
+}
+
 void DWARFFormValue::dump(raw_ostream &OS, DIDumpOptions DumpOpts) const {
   uint64_t UValue = Value.uval;
   bool CURelativeOffset = false;
@@ -339,7 +362,7 @@
                             : nulls();
   switch (Form) {
   case DW_FORM_addr:
-    AddrOS << format("0x%016" PRIx64, UValue);
+    dumpSectionedAddress(AddrOS, DumpOpts, {Value.uval, Value.SectionIndex});
     break;
   case DW_FORM_addrx:
   case DW_FORM_addrx1:
@@ -347,11 +370,13 @@
   case DW_FORM_addrx3:
   case DW_FORM_addrx4:
   case DW_FORM_GNU_addr_index: {
-    AddrOS << format(" indexed (%8.8x) address = ", (uint32_t)UValue);
+    Optional<SectionedAddress> A = U->getAddrOffsetSectionItem(UValue);
+    if (!A || DumpOpts.Verbose)
+      AddrOS << format("indexed (%8.8x) address = ", (uint32_t)UValue);
     if (U == nullptr)
       OS << "<invalid dwarf unit>";
-    else if (Optional<SectionedAddress> A = U->getAddrOffsetSectionItem(UValue))
-      AddrOS << format("0x%016" PRIx64, A->Address);
+    else if (A)
+      dumpSectionedAddress(AddrOS, DumpOpts, *A);
     else
       OS << "<no .debug_addr section>";
     break;
@@ -443,7 +468,7 @@
   case DW_FORM_strx4:
   case DW_FORM_GNU_str_index:
     if (DumpOpts.Verbose)
-      OS << format(" indexed (%8.8x) string = ", (uint32_t)UValue);
+      OS << format("indexed (%8.8x) string = ", (uint32_t)UValue);
     dumpString(OS);
     break;
   case DW_FORM_GNU_strp_alt:
diff --git a/lib/DebugInfo/DWARF/DWARFUnit.cpp b/lib/DebugInfo/DWARF/DWARFUnit.cpp
index 48900e4..8023466 100644
--- a/lib/DebugInfo/DWARF/DWARFUnit.cpp
+++ b/lib/DebugInfo/DWARF/DWARFUnit.cpp
@@ -566,42 +566,18 @@
                              "missing or invalid range list table");
 }
 
-void DWARFUnit::collectAddressRanges(DWARFAddressRangesVector &CURanges) {
+Expected<DWARFAddressRangesVector> DWARFUnit::collectAddressRanges() {
   DWARFDie UnitDie = getUnitDIE();
   if (!UnitDie)
-    return;
+    return createStringError(errc::invalid_argument, "No unit DIE");
+
   // First, check if unit DIE describes address ranges for the whole unit.
   auto CUDIERangesOrError = UnitDie.getAddressRanges();
-  if (CUDIERangesOrError) {
-    if (!CUDIERangesOrError.get().empty()) {
-      CURanges.insert(CURanges.end(), CUDIERangesOrError.get().begin(),
-                      CUDIERangesOrError.get().end());
-      return;
-    }
-  } else
-    WithColor::error() << "decoding address ranges: "
-                       << toString(CUDIERangesOrError.takeError()) << '\n';
-
-  // This function is usually called if there in no .debug_aranges section
-  // in order to produce a compile unit level set of address ranges that
-  // is accurate. If the DIEs weren't parsed, then we don't want all dies for
-  // all compile units to stay loaded when they weren't needed. So we can end
-  // up parsing the DWARF and then throwing them all away to keep memory usage
-  // down.
-  const bool ClearDIEs = extractDIEsIfNeeded(false) > 1;
-  getUnitDIE().collectChildrenAddressRanges(CURanges);
-
-  // Collect address ranges from DIEs in .dwo if necessary.
-  bool DWOCreated = parseDWO();
-  if (DWO)
-    DWO->collectAddressRanges(CURanges);
-  if (DWOCreated)
-    DWO.reset();
-
-  // Keep memory down by clearing DIEs if this generate function
-  // caused them to be parsed.
-  if (ClearDIEs)
-    clearDIEs(true);
+  if (!CUDIERangesOrError)
+    return createStringError(errc::invalid_argument,
+                             "decoding address ranges: %s",
+                             toString(CUDIERangesOrError.takeError()).c_str());
+  return *CUDIERangesOrError;
 }
 
 void DWARFUnit::updateAddressDieMap(DWARFDie Die) {
diff --git a/lib/DebugInfo/PDB/Native/ModuleDebugStream.cpp b/lib/DebugInfo/PDB/Native/ModuleDebugStream.cpp
index 5ff7c15..8c97f4a 100644
--- a/lib/DebugInfo/PDB/Native/ModuleDebugStream.cpp
+++ b/lib/DebugInfo/PDB/Native/ModuleDebugStream.cpp
@@ -11,7 +11,9 @@
 #include "llvm/ADT/iterator_range.h"
 #include "llvm/DebugInfo/CodeView/CodeView.h"
 #include "llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h"
+#include "llvm/DebugInfo/CodeView/SymbolDeserializer.h"
 #include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecordHelpers.h"
 #include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
 #include "llvm/DebugInfo/PDB/Native/RawError.h"
 #include "llvm/Support/BinaryStreamReader.h"
@@ -77,6 +79,11 @@
   return Error::success();
 }
 
+const codeview::CVSymbolArray
+ModuleDebugStreamRef::getSymbolArrayForScope(uint32_t ScopeBegin) const {
+  return limitSymbolArrayToScope(SymbolArray, ScopeBegin);
+}
+
 BinarySubstreamRef ModuleDebugStreamRef::getSymbolsSubstream() const {
   return SymbolsSubstream;
 }
diff --git a/lib/Demangle/MicrosoftDemangle.cpp b/lib/Demangle/MicrosoftDemangle.cpp
index cca7bf9..51ffa0b 100644
--- a/lib/Demangle/MicrosoftDemangle.cpp
+++ b/lib/Demangle/MicrosoftDemangle.cpp
@@ -1681,11 +1681,14 @@
   return Ty;
 }
 
-void Demangler::demangleThrowSpecification(StringView &MangledName) {
+bool Demangler::demangleThrowSpecification(StringView &MangledName) {
+  if (MangledName.consumeFront("_E"))
+    return true;
   if (MangledName.consumeFront('Z'))
-    return;
+    return false;
 
   Error = true;
+  return false;
 }
 
 FunctionSignatureNode *Demangler::demangleFunctionType(StringView &MangledName,
@@ -1709,7 +1712,7 @@
 
   FTy->Params = demangleFunctionParameterList(MangledName);
 
-  demangleThrowSpecification(MangledName);
+  FTy->IsNoexcept = demangleThrowSpecification(MangledName);
 
   return FTy;
 }
diff --git a/lib/Demangle/MicrosoftDemangleNodes.cpp b/lib/Demangle/MicrosoftDemangleNodes.cpp
index c57f0cf..622f8e7 100644
--- a/lib/Demangle/MicrosoftDemangleNodes.cpp
+++ b/lib/Demangle/MicrosoftDemangleNodes.cpp
@@ -15,6 +15,7 @@
 #include "llvm/Demangle/Compiler.h"
 #include "llvm/Demangle/Utility.h"
 #include <cctype>
+#include <string>
 
 using namespace llvm;
 using namespace ms_demangle;
@@ -113,6 +114,14 @@
   }
 }
 
+std::string Node::toString(OutputFlags Flags) const {
+  OutputStream OS;
+  initializeOutputStream(nullptr, nullptr, OS, 1024);
+  this->output(OS, Flags);
+  OS << '\0';
+  return {OS.getBuffer()};
+}
+
 void TypeNode::outputQuals(bool SpaceBefore, bool SpaceAfter) const {}
 
 void PrimitiveTypeNode::outputPre(OutputStream &OS, OutputFlags Flags) const {
@@ -414,6 +423,9 @@
   if (Quals & Q_Unaligned)
     OS << " __unaligned";
 
+  if (IsNoexcept)
+    OS << " noexcept";
+
   if (RefQualifier == FunctionRefQualifier::Reference)
     OS << " &";
   else if (RefQualifier == FunctionRefQualifier::RValueReference)
@@ -501,13 +513,15 @@
 }
 
 void TagTypeNode::outputPre(OutputStream &OS, OutputFlags Flags) const {
-  switch (Tag) {
-    OUTPUT_ENUM_CLASS_VALUE(TagKind, Class, "class");
-    OUTPUT_ENUM_CLASS_VALUE(TagKind, Struct, "struct");
-    OUTPUT_ENUM_CLASS_VALUE(TagKind, Union, "union");
-    OUTPUT_ENUM_CLASS_VALUE(TagKind, Enum, "enum");
+  if (!(Flags & OF_NoTagSpecifier)) {
+    switch (Tag) {
+      OUTPUT_ENUM_CLASS_VALUE(TagKind, Class, "class");
+      OUTPUT_ENUM_CLASS_VALUE(TagKind, Struct, "struct");
+      OUTPUT_ENUM_CLASS_VALUE(TagKind, Union, "union");
+      OUTPUT_ENUM_CLASS_VALUE(TagKind, Enum, "enum");
+    }
+    OS << " ";
   }
-  OS << " ";
   QualifiedName->output(OS, Flags);
   outputQualifiers(OS, Quals, true, false);
 }
diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp
index 39cf6d4..98dca11 100644
--- a/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -1778,17 +1778,14 @@
 
 void Interpreter::visitInsertElementInst(InsertElementInst &I) {
   ExecutionContext &SF = ECStack.back();
-  Type *Ty = I.getType();
-
-  if(!(Ty->isVectorTy()) )
-    llvm_unreachable("Unhandled dest type for insertelement instruction");
+  VectorType *Ty = cast<VectorType>(I.getType());
 
   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
   GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
   GenericValue Dest;
 
-  Type *TyContained = Ty->getContainedType(0);
+  Type *TyContained = Ty->getElementType();
 
   const unsigned indx = unsigned(Src3.IntVal.getZExtValue());
   Dest.AggregateVal = Src1.AggregateVal;
@@ -1814,9 +1811,7 @@
 void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){
   ExecutionContext &SF = ECStack.back();
 
-  Type *Ty = I.getType();
-  if(!(Ty->isVectorTy()))
-    llvm_unreachable("Unhandled dest type for shufflevector instruction");
+  VectorType *Ty = cast<VectorType>(I.getType());
 
   GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
   GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
@@ -1827,7 +1822,7 @@
   // bytecode can't contain different types for src1 and src2 for a
   // shufflevector instruction.
 
-  Type *TyContained = Ty->getContainedType(0);
+  Type *TyContained = Ty->getElementType();
   unsigned src1Size = (unsigned)Src1.AggregateVal.size();
   unsigned src2Size = (unsigned)Src2.AggregateVal.size();
   unsigned src3Size = (unsigned)Src3.AggregateVal.size();
diff --git a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
index 044d9b7..334fcac 100644
--- a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
+++ b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -103,8 +103,9 @@
   // composite function name should be.
   std::string ExtName = "lle_";
   FunctionType *FT = F->getFunctionType();
-  for (unsigned i = 0, e = FT->getNumContainedTypes(); i != e; ++i)
-    ExtName += getTypeID(FT->getContainedType(i));
+  ExtName += getTypeID(FT->getReturnType());
+  for (Type *T : FT->params())
+    ExtName += getTypeID(T);
   ExtName += ("_" + F->getName()).str();
 
   sys::ScopedLock Writer(*FunctionsLock);
diff --git a/lib/ExecutionEngine/Orc/OrcABISupport.cpp b/lib/ExecutionEngine/Orc/OrcABISupport.cpp
index cf335df..aa40555 100644
--- a/lib/ExecutionEngine/Orc/OrcABISupport.cpp
+++ b/lib/ExecutionEngine/Orc/OrcABISupport.cpp
@@ -537,7 +537,8 @@
   return Error::success();
 }
 
-void OrcMips32_Base::writeResolverCode(uint8_t *ResolverMem, JITReentryFn ReentryFn,
+void OrcMips32_Base::writeResolverCode(uint8_t *ResolverMem,
+                                       JITReentryFn ReentryFn,
                                        void *CallbackMgr, bool isBigEndian) {
 
   const uint32_t ResolverCode[] = {
@@ -604,51 +605,42 @@
       0x8fb1001c,                    // 0xd4: lw $s1,28($sp)
       0x8fb00018,                    // 0xd8: lw $s0,24($sp)
       0x8fa70014,                    // 0xdc: lw $a3,20($sp)
-      0x8fa70014,                    // 0xe0: lw $a3,20($sp)
-      0x8fa60010,                    // 0xe4: lw $a2,16($sp)
-      0x8fa5000c,                    // 0xe8: lw $a1,12($sp)
-      0x8fa40008,                    // 0xec: lw $a0,8($sp)
-      0x27bd0068,                    // 0xf4: addiu $sp,$sp,104
-      0x0300f825,                    // 0xf8: move $ra, $t8
-      0x00000000                     // 0xfc: jr $v0/v1
+      0x8fa60010,                    // 0xe0: lw $a2,16($sp)
+      0x8fa5000c,                    // 0xe4: lw $a1,12($sp)
+      0x8fa40008,                    // 0xe8: lw $a0,8($sp)
+      0x27bd0068,                    // 0xec: addiu $sp,$sp,104
+      0x0300f825,                    // 0xf0: move $ra, $t8
+      0x03200008,                    // 0xf4: jr $t9
+      0x00000000,                    // 0xf8: move $t9, $v0/v1
   };
 
-
-  const unsigned ReentryFnAddrOffset = 0x7c;  // JIT re-entry fn addr lui
+  const unsigned ReentryFnAddrOffset = 0x7c;   // JIT re-entry fn addr lui
   const unsigned CallbackMgrAddrOffset = 0x6c; // Callback manager addr lui
-  const unsigned offsett = 0xfc;
+  const unsigned Offsett = 0xf8;
 
   memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
 
-  //Depending on endian return value will be in v0 or v1.
-  uint32_t JumpV0 = 0x00400008;
-  uint32_t JumpV1 = 0x00600008;
-
-  if(isBigEndian == true)
-     memcpy(ResolverMem + offsett, &JumpV1,
-         sizeof(JumpV1));
-  else
-     memcpy(ResolverMem + offsett, &JumpV0,
-         sizeof(JumpV0));
+  // Depending on endian return value will be in v0 or v1.
+  uint32_t MoveVxT9 = isBigEndian ? 0x0060c825 : 0x0040c825;
+  memcpy(ResolverMem + Offsett, &MoveVxT9, sizeof(MoveVxT9));
 
   uint64_t CallMgrAddr = reinterpret_cast<uint64_t>(CallbackMgr);
   uint32_t CallMgrLUi = 0x3c040000 | (((CallMgrAddr + 0x8000) >> 16) & 0xFFFF);
   uint32_t CallMgrADDiu = 0x24840000 | ((CallMgrAddr) & 0xFFFF);
-  memcpy(ResolverMem + CallbackMgrAddrOffset, &CallMgrLUi,
-         sizeof(CallMgrLUi));
-  memcpy(ResolverMem + (CallbackMgrAddrOffset + 4), &CallMgrADDiu,
+  memcpy(ResolverMem + CallbackMgrAddrOffset, &CallMgrLUi, sizeof(CallMgrLUi));
+  memcpy(ResolverMem + CallbackMgrAddrOffset + 4, &CallMgrADDiu,
          sizeof(CallMgrADDiu));
 
   uint64_t ReentryAddr = reinterpret_cast<uint64_t>(ReentryFn);
   uint32_t ReentryLUi = 0x3c190000 | (((ReentryAddr + 0x8000) >> 16) & 0xFFFF);
   uint32_t ReentryADDiu = 0x27390000 | ((ReentryAddr) & 0xFFFF);
-  memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryLUi,
-         sizeof(ReentryLUi));
-  memcpy(ResolverMem + (ReentryFnAddrOffset + 4), &ReentryADDiu,
+  memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryLUi, sizeof(ReentryLUi));
+  memcpy(ResolverMem + ReentryFnAddrOffset + 4, &ReentryADDiu,
          sizeof(ReentryADDiu));
 }
 
-void OrcMips32_Base::writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+void OrcMips32_Base::writeTrampolines(uint8_t *TrampolineMem,
+                                      void *ResolverAddr,
                                       unsigned NumTrampolines) {
 
   uint32_t *Trampolines = reinterpret_cast<uint32_t *>(TrampolineMem);
@@ -662,7 +654,6 @@
     Trampolines[5 * I + 3] = 0x0320f809;                           // jalr $t9
     Trampolines[5 * I + 4] = 0x00000000;                           // nop
   }
-
 }
 
 Error OrcMips32_Base::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
@@ -769,7 +760,7 @@
       0xffb700a8,                     // 0x58: sd s7,168(sp)
       0xffb800b0,                     // 0x5c: sd t8,176(sp)
       0xffb900b8,                     // 0x60: sd t9,184(sp)
-      0xffbe00c0,                     // 0x64: sd s8,192(sp)
+      0xffbe00c0,                     // 0x64: sd fp,192(sp)
       0xffbf00c8,                     // 0x68: sd ra,200(sp)
 
       // Callback manager addr.
@@ -793,7 +784,7 @@
       0x0320f809,                     // 0xa4: jalr $t9
       0x00000000,                     // 0xa8: nop
       0xdfbf00c8,                     // 0xac: ld ra, 200(sp)
-      0xdfbe00c0,                     // 0xb0: ld s8, 192(sp)
+      0xdfbe00c0,                     // 0xb0: ld fp, 192(sp)
       0xdfb900b8,                     // 0xb4: ld t9, 184(sp)
       0xdfb800b0,                     // 0xb8: ld t8, 176(sp)
       0xdfb700a8,                     // 0xbc: ld s7, 168(sp)
@@ -819,7 +810,8 @@
       0xdfa30008,                     // 0x10c: ld v1, 8(sp)
       0x67bd00d0,                     // 0x110: daddiu $sp,$sp,208
       0x0300f825,                     // 0x114: move $ra, $t8
-      0x00400008                      // 0x118: jr $v0
+      0x03200008,                     // 0x118: jr $t9
+      0x0040c825,                     // 0x11c: move $t9, $v0
   };
 
   const unsigned ReentryFnAddrOffset = 0x8c;   // JIT re-entry fn addr lui
@@ -827,21 +819,19 @@
 
   memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
 
-
   uint64_t CallMgrAddr = reinterpret_cast<uint64_t>(CallbackMgr);
 
   uint32_t CallMgrLUi =
       0x3c040000 | (((CallMgrAddr + 0x800080008000) >> 48) & 0xFFFF);
   uint32_t CallMgrDADDiu =
-      0x64840000 | (((CallMgrAddr + 0x80008000)  >> 32) & 0xFFFF);
+      0x64840000 | (((CallMgrAddr + 0x80008000) >> 32) & 0xFFFF);
   uint32_t CallMgrDSLL = 0x00042438;
   uint32_t CallMgrDADDiu2 =
       0x64840000 | ((((CallMgrAddr + 0x8000) >> 16) & 0xFFFF));
   uint32_t CallMgrDSLL2 = 0x00042438;
-  uint32_t CallMgrDADDiu3 = 0x64840000 | ((CallMgrAddr) & 0xFFFF);
+  uint32_t CallMgrDADDiu3 = 0x64840000 | ((CallMgrAddr)&0xFFFF);
 
-  memcpy(ResolverMem + CallbackMgrAddrOffset, &CallMgrLUi,
-         sizeof(CallMgrLUi));
+  memcpy(ResolverMem + CallbackMgrAddrOffset, &CallMgrLUi, sizeof(CallMgrLUi));
   memcpy(ResolverMem + (CallbackMgrAddrOffset + 4), &CallMgrDADDiu,
          sizeof(CallMgrDADDiu));
   memcpy(ResolverMem + (CallbackMgrAddrOffset + 8), &CallMgrDSLL,
@@ -868,10 +858,9 @@
 
   uint32_t ReentryDSLL2 = 0x0019cc38;
 
-  uint32_t ReentryDADDiu3 = 0x67390000 | ((ReentryAddr) & 0xFFFF);
+  uint32_t ReentryDADDiu3 = 0x67390000 | ((ReentryAddr)&0xFFFF);
 
-  memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryLUi,
-         sizeof(ReentryLUi));
+  memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryLUi, sizeof(ReentryLUi));
   memcpy(ResolverMem + (ReentryFnAddrOffset + 4), &ReentryDADDiu,
          sizeof(ReentryDADDiu));
   memcpy(ResolverMem + (ReentryFnAddrOffset + 8), &ReentryDSLL,
@@ -906,7 +895,6 @@
     Trampolines[10 * I + 8] = 0x00000000;                            // nop
     Trampolines[10 * I + 9] = 0x00000000;                            // nop
   }
-
 }
 
 Error OrcMips64::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp
index 36f4f3a..a5dc623 100644
--- a/lib/IR/AsmWriter.cpp
+++ b/lib/IR/AsmWriter.cpp
@@ -36,7 +36,6 @@
 #include "llvm/IR/Attributes.h"
 #include "llvm/IR/BasicBlock.h"
 #include "llvm/IR/CFG.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/CallingConv.h"
 #include "llvm/IR/Comdat.h"
 #include "llvm/IR/Constant.h"
@@ -998,9 +997,9 @@
 
       // We allow direct calls to any llvm.foo function here, because the
       // target may not be linked into the optimizer.
-      if (auto CS = ImmutableCallSite(&I)) {
+      if (const auto *Call = dyn_cast<CallBase>(&I)) {
         // Add all the call attributes to the table.
-        AttributeSet Attrs = CS.getAttributes().getFnAttributes();
+        AttributeSet Attrs = Call->getAttributes().getFnAttributes();
         if (Attrs.hasAttributes())
           CreateAttributeSetSlot(Attrs);
       }
@@ -2359,7 +2358,7 @@
 
   void writeOperand(const Value *Op, bool PrintType);
   void writeParamOperand(const Value *Operand, AttributeSet Attrs);
-  void writeOperandBundles(ImmutableCallSite CS);
+  void writeOperandBundles(const CallBase *Call);
   void writeSyncScope(const LLVMContext &Context,
                       SyncScope::ID SSID);
   void writeAtomic(const LLVMContext &Context,
@@ -2510,15 +2509,15 @@
   WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule);
 }
 
-void AssemblyWriter::writeOperandBundles(ImmutableCallSite CS) {
-  if (!CS.hasOperandBundles())
+void AssemblyWriter::writeOperandBundles(const CallBase *Call) {
+  if (!Call->hasOperandBundles())
     return;
 
   Out << " [ ";
 
   bool FirstBundle = true;
-  for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
-    OperandBundleUse BU = CS.getOperandBundleAt(i);
+  for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i) {
+    OperandBundleUse BU = Call->getOperandBundleAt(i);
 
     if (!FirstBundle)
       Out << ", ";
diff --git a/lib/IR/AutoUpgrade.cpp b/lib/IR/AutoUpgrade.cpp
index 39e29a2..b2eb8b0 100644
--- a/lib/IR/AutoUpgrade.cpp
+++ b/lib/IR/AutoUpgrade.cpp
@@ -77,10 +77,18 @@
       Name == "addcarry.u64" || // Added in 8.0
       Name == "subborrow.u32" || // Added in 8.0
       Name == "subborrow.u64" || // Added in 8.0
+      Name.startswith("sse2.padds.") || // Added in 8.0
+      Name.startswith("sse2.psubs.") || // Added in 8.0
       Name.startswith("sse2.paddus.") || // Added in 8.0
       Name.startswith("sse2.psubus.") || // Added in 8.0
+      Name.startswith("avx2.padds.") || // Added in 8.0
+      Name.startswith("avx2.psubs.") || // Added in 8.0
       Name.startswith("avx2.paddus.") || // Added in 8.0
       Name.startswith("avx2.psubus.") || // Added in 8.0
+      Name.startswith("avx512.padds.") || // Added in 8.0
+      Name.startswith("avx512.psubs.") || // Added in 8.0
+      Name.startswith("avx512.mask.padds.") || // Added in 8.0
+      Name.startswith("avx512.mask.psubs.") || // Added in 8.0
       Name.startswith("avx512.mask.paddus.") || // Added in 8.0
       Name.startswith("avx512.mask.psubus.") || // Added in 8.0
       Name=="ssse3.pabs.b.128" || // Added in 6.0
@@ -277,6 +285,12 @@
       Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0
       Name.startswith("avx512.mask.vpshld.") || // Added in 7.0
       Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0
+      Name.startswith("avx512.mask.vpshldv.") || // Added in 8.0
+      Name.startswith("avx512.mask.vpshrdv.") || // Added in 8.0
+      Name.startswith("avx512.maskz.vpshldv.") || // Added in 8.0
+      Name.startswith("avx512.maskz.vpshrdv.") || // Added in 8.0
+      Name.startswith("avx512.vpshld.") || // Added in 8.0
+      Name.startswith("avx512.vpshrd.") || // Added in 8.0
       Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0
       Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0
       Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0
@@ -284,12 +298,8 @@
       Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0
       Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0
       Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0
-      Name.startswith("avx512.mask.prorv.") || // Added in 7.0
-      Name.startswith("avx512.mask.pror.") || // Added in 7.0
-      Name.startswith("avx512.mask.prolv.") || // Added in 7.0
-      Name.startswith("avx512.mask.prol.") || // Added in 7.0
-      Name.startswith("avx512.mask.padds.") || // Added in 8.0
-      Name.startswith("avx512.mask.psubs.") || // Added in 8.0
+      Name.startswith("avx512.mask.vpshufbitqmb.") || // Added in 8.0
+      Name.startswith("avx512.mask.pmultishift.qb.") || // Added in 8.0
       Name == "sse.cvtsi2ss" || // Added in 7.0
       Name == "sse.cvtsi642ss" || // Added in 7.0
       Name == "sse2.cvtsi2sd" || // Added in 7.0
@@ -354,6 +364,13 @@
       Name.startswith("avx512.cvtmask2") || // Added in 5.0
       (Name.startswith("xop.vpcom") && // Added in 3.2
        F->arg_size() == 2) ||
+      Name.startswith("xop.vprot") || // Added in 8.0
+      Name.startswith("avx512.prol") || // Added in 8.0
+      Name.startswith("avx512.pror") || // Added in 8.0
+      Name.startswith("avx512.mask.prorv.") || // Added in 8.0
+      Name.startswith("avx512.mask.pror.") ||  // Added in 8.0
+      Name.startswith("avx512.mask.prolv.") || // Added in 8.0
+      Name.startswith("avx512.mask.prol.") ||  // Added in 8.0
       Name.startswith("avx512.ptestm") || //Added in 6.0
       Name.startswith("avx512.ptestnm") || //Added in 6.0
       Name.startswith("sse2.pavg") || // Added in 6.0
@@ -527,6 +544,10 @@
       NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
       return true;
     }
+    if (Name == "x86.seh.recoverfp") {
+      NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp);
+      return true;
+    }
     break;
   }
 
@@ -924,28 +945,78 @@
   return EmitX86Select(Builder, Mask, Align, Passthru);
 }
 
+static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallInst &CI,
+                                          bool ZeroMask, bool IndexForm) {
+  Type *Ty = CI.getType();
+  unsigned VecWidth = Ty->getPrimitiveSizeInBits();
+  unsigned EltWidth = Ty->getScalarSizeInBits();
+  bool IsFloat = Ty->isFPOrFPVectorTy();
+  Intrinsic::ID IID;
+  if (VecWidth == 128 && EltWidth == 32 && IsFloat)
+    IID = Intrinsic::x86_avx512_vpermi2var_ps_128;
+  else if (VecWidth == 128 && EltWidth == 32 && !IsFloat)
+    IID = Intrinsic::x86_avx512_vpermi2var_d_128;
+  else if (VecWidth == 128 && EltWidth == 64 && IsFloat)
+    IID = Intrinsic::x86_avx512_vpermi2var_pd_128;
+  else if (VecWidth == 128 && EltWidth == 64 && !IsFloat)
+    IID = Intrinsic::x86_avx512_vpermi2var_q_128;
+  else if (VecWidth == 256 && EltWidth == 32 && IsFloat)
+    IID = Intrinsic::x86_avx512_vpermi2var_ps_256;
+  else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
+    IID = Intrinsic::x86_avx512_vpermi2var_d_256;
+  else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
+    IID = Intrinsic::x86_avx512_vpermi2var_pd_256;
+  else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
+    IID = Intrinsic::x86_avx512_vpermi2var_q_256;
+  else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
+    IID = Intrinsic::x86_avx512_vpermi2var_ps_512;
+  else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
+    IID = Intrinsic::x86_avx512_vpermi2var_d_512;
+  else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
+    IID = Intrinsic::x86_avx512_vpermi2var_pd_512;
+  else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
+    IID = Intrinsic::x86_avx512_vpermi2var_q_512;
+  else if (VecWidth == 128 && EltWidth == 16)
+    IID = Intrinsic::x86_avx512_vpermi2var_hi_128;
+  else if (VecWidth == 256 && EltWidth == 16)
+    IID = Intrinsic::x86_avx512_vpermi2var_hi_256;
+  else if (VecWidth == 512 && EltWidth == 16)
+    IID = Intrinsic::x86_avx512_vpermi2var_hi_512;
+  else if (VecWidth == 128 && EltWidth == 8)
+    IID = Intrinsic::x86_avx512_vpermi2var_qi_128;
+  else if (VecWidth == 256 && EltWidth == 8)
+    IID = Intrinsic::x86_avx512_vpermi2var_qi_256;
+  else if (VecWidth == 512 && EltWidth == 8)
+    IID = Intrinsic::x86_avx512_vpermi2var_qi_512;
+  else
+    llvm_unreachable("Unexpected intrinsic");
+
+  Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1),
+                    CI.getArgOperand(2) };
+
+  // If this isn't index form we need to swap operand 0 and 1.
+  if (!IndexForm)
+    std::swap(Args[0], Args[1]);
+
+  Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
+                                Args);
+  Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty)
+                             : Builder.CreateBitCast(CI.getArgOperand(1),
+                                                     Ty);
+  return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
+}
+
 static Value *UpgradeX86AddSubSatIntrinsics(IRBuilder<> &Builder, CallInst &CI,
-                                            bool IsAddition) {
+                                            bool IsSigned, bool IsAddition) {
+  Type *Ty = CI.getType();
   Value *Op0 = CI.getOperand(0);
   Value *Op1 = CI.getOperand(1);
 
-  // Collect vector elements and type data.
-  Type *ResultType = CI.getType();
-
-  Value *Res;
-  if (IsAddition) {
-    // ADDUS: a > (a+b) ? ~0 : (a+b)
-    // If Op0 > Add, overflow occured.
-    Value *Add = Builder.CreateAdd(Op0, Op1);
-    Value *ICmp = Builder.CreateICmp(ICmpInst::ICMP_UGT, Op0, Add);
-    Value *Max = llvm::Constant::getAllOnesValue(ResultType);
-    Res = Builder.CreateSelect(ICmp, Max, Add);
-  } else {
-    // SUBUS: max(a, b) - b
-    Value *ICmp = Builder.CreateICmp(ICmpInst::ICMP_UGT, Op0, Op1);
-    Value *Select = Builder.CreateSelect(ICmp, Op0, Op1);
-    Res = Builder.CreateSub(Select, Op1);
-  }
+  Intrinsic::ID IID =
+      IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat)
+               : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat);
+  Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
+  Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
 
   if (CI.getNumArgOperands() == 4) { // For masked intrinsics.
     Value *VecSrc = CI.getOperand(2);
@@ -955,6 +1026,67 @@
   return Res;
 }
 
+static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallInst &CI,
+                               bool IsRotateRight) {
+  Type *Ty = CI.getType();
+  Value *Src = CI.getArgOperand(0);
+  Value *Amt = CI.getArgOperand(1);
+
+  // Amount may be scalar immediate, in which case create a splat vector.
+  // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
+  // we only care about the lowest log2 bits anyway.
+  if (Amt->getType() != Ty) {
+    unsigned NumElts = Ty->getVectorNumElements();
+    Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
+    Amt = Builder.CreateVectorSplat(NumElts, Amt);
+  }
+
+  Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
+  Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
+  Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt});
+
+  if (CI.getNumArgOperands() == 4) { // For masked intrinsics.
+    Value *VecSrc = CI.getOperand(2);
+    Value *Mask = CI.getOperand(3);
+    Res = EmitX86Select(Builder, Mask, Res, VecSrc);
+  }
+  return Res;
+}
+
+static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallInst &CI,
+                                    bool IsShiftRight, bool ZeroMask) {
+  Type *Ty = CI.getType();
+  Value *Op0 = CI.getArgOperand(0);
+  Value *Op1 = CI.getArgOperand(1);
+  Value *Amt = CI.getArgOperand(2);
+
+  if (IsShiftRight)
+    std::swap(Op0, Op1);
+
+  // Amount may be scalar immediate, in which case create a splat vector.
+  // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
+  // we only care about the lowest log2 bits anyway.
+  if (Amt->getType() != Ty) {
+    unsigned NumElts = Ty->getVectorNumElements();
+    Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
+    Amt = Builder.CreateVectorSplat(NumElts, Amt);
+  }
+
+  Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl;
+  Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
+  Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt});
+
+  unsigned NumArgs = CI.getNumArgOperands();
+  if (NumArgs >= 4) { // For masked intrinsics.
+    Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) :
+                    ZeroMask     ? ConstantAggregateZero::get(CI.getType()) :
+                                   CI.getArgOperand(0);
+    Value *Mask = CI.getOperand(NumArgs - 1);
+    Res = EmitX86Select(Builder, Mask, Res, VecSrc);
+  }
+  return Res;
+}
+
 static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
                                  Value *Ptr, Value *Data, Value *Mask,
                                  bool Aligned) {
@@ -1321,136 +1453,13 @@
       IID = Intrinsic::x86_avx512_dbpsadbw_512;
     else
       llvm_unreachable("Unexpected intrinsic");
-  } else if (Name.startswith("vpshld.")) {
-    if (VecWidth == 128 && Name[7] == 'q')
-      IID = Intrinsic::x86_avx512_vpshld_q_128;
-    else if (VecWidth == 128 && Name[7] == 'd')
-      IID = Intrinsic::x86_avx512_vpshld_d_128;
-    else if (VecWidth == 128 && Name[7] == 'w')
-      IID = Intrinsic::x86_avx512_vpshld_w_128;
-    else if (VecWidth == 256 && Name[7] == 'q')
-      IID = Intrinsic::x86_avx512_vpshld_q_256;
-    else if (VecWidth == 256 && Name[7] == 'd')
-      IID = Intrinsic::x86_avx512_vpshld_d_256;
-    else if (VecWidth == 256 && Name[7] == 'w')
-      IID = Intrinsic::x86_avx512_vpshld_w_256;
-    else if (VecWidth == 512 && Name[7] == 'q')
-      IID = Intrinsic::x86_avx512_vpshld_q_512;
-    else if (VecWidth == 512 && Name[7] == 'd')
-      IID = Intrinsic::x86_avx512_vpshld_d_512;
-    else if (VecWidth == 512 && Name[7] == 'w')
-      IID = Intrinsic::x86_avx512_vpshld_w_512;
-    else
-      llvm_unreachable("Unexpected intrinsic");
-  } else if (Name.startswith("vpshrd.")) {
-    if (VecWidth == 128 && Name[7] == 'q')
-      IID = Intrinsic::x86_avx512_vpshrd_q_128;
-    else if (VecWidth == 128 && Name[7] == 'd')
-      IID = Intrinsic::x86_avx512_vpshrd_d_128;
-    else if (VecWidth == 128 && Name[7] == 'w')
-      IID = Intrinsic::x86_avx512_vpshrd_w_128;
-    else if (VecWidth == 256 && Name[7] == 'q')
-      IID = Intrinsic::x86_avx512_vpshrd_q_256;
-    else if (VecWidth == 256 && Name[7] == 'd')
-      IID = Intrinsic::x86_avx512_vpshrd_d_256;
-    else if (VecWidth == 256 && Name[7] == 'w')
-      IID = Intrinsic::x86_avx512_vpshrd_w_256;
-    else if (VecWidth == 512 && Name[7] == 'q')
-      IID = Intrinsic::x86_avx512_vpshrd_q_512;
-    else if (VecWidth == 512 && Name[7] == 'd')
-      IID = Intrinsic::x86_avx512_vpshrd_d_512;
-    else if (VecWidth == 512 && Name[7] == 'w')
-      IID = Intrinsic::x86_avx512_vpshrd_w_512;
-    else
-      llvm_unreachable("Unexpected intrinsic");
-  } else if (Name.startswith("prorv.")) {
-    if (VecWidth == 128 && EltWidth == 32)
-      IID = Intrinsic::x86_avx512_prorv_d_128;
-    else if (VecWidth == 256 && EltWidth == 32)
-      IID = Intrinsic::x86_avx512_prorv_d_256;
-    else if (VecWidth == 512 && EltWidth == 32)
-      IID = Intrinsic::x86_avx512_prorv_d_512;
-    else if (VecWidth == 128 && EltWidth == 64)
-      IID = Intrinsic::x86_avx512_prorv_q_128;
-    else if (VecWidth == 256 && EltWidth == 64)
-      IID = Intrinsic::x86_avx512_prorv_q_256;
-    else if (VecWidth == 512 && EltWidth == 64)
-      IID = Intrinsic::x86_avx512_prorv_q_512;
-    else
-      llvm_unreachable("Unexpected intrinsic");
-  } else if (Name.startswith("prolv.")) {
-    if (VecWidth == 128 && EltWidth == 32)
-      IID = Intrinsic::x86_avx512_prolv_d_128;
-    else if (VecWidth == 256 && EltWidth == 32)
-      IID = Intrinsic::x86_avx512_prolv_d_256;
-    else if (VecWidth == 512 && EltWidth == 32)
-      IID = Intrinsic::x86_avx512_prolv_d_512;
-    else if (VecWidth == 128 && EltWidth == 64)
-      IID = Intrinsic::x86_avx512_prolv_q_128;
-    else if (VecWidth == 256 && EltWidth == 64)
-      IID = Intrinsic::x86_avx512_prolv_q_256;
-    else if (VecWidth == 512 && EltWidth == 64)
-      IID = Intrinsic::x86_avx512_prolv_q_512;
-    else
-      llvm_unreachable("Unexpected intrinsic");
-  } else if (Name.startswith("pror.")) {
-    if (VecWidth == 128 && EltWidth == 32)
-      IID = Intrinsic::x86_avx512_pror_d_128;
-    else if (VecWidth == 256 && EltWidth == 32)
-      IID = Intrinsic::x86_avx512_pror_d_256;
-    else if (VecWidth == 512 && EltWidth == 32)
-      IID = Intrinsic::x86_avx512_pror_d_512;
-    else if (VecWidth == 128 && EltWidth == 64)
-      IID = Intrinsic::x86_avx512_pror_q_128;
-    else if (VecWidth == 256 && EltWidth == 64)
-      IID = Intrinsic::x86_avx512_pror_q_256;
-    else if (VecWidth == 512 && EltWidth == 64)
-      IID = Intrinsic::x86_avx512_pror_q_512;
-    else
-      llvm_unreachable("Unexpected intrinsic");
-  } else if (Name.startswith("prol.")) {
-    if (VecWidth == 128 && EltWidth == 32)
-      IID = Intrinsic::x86_avx512_prol_d_128;
-    else if (VecWidth == 256 && EltWidth == 32)
-      IID = Intrinsic::x86_avx512_prol_d_256;
-    else if (VecWidth == 512 && EltWidth == 32)
-      IID = Intrinsic::x86_avx512_prol_d_512;
-    else if (VecWidth == 128 && EltWidth == 64)
-      IID = Intrinsic::x86_avx512_prol_q_128;
-    else if (VecWidth == 256 && EltWidth == 64)
-      IID = Intrinsic::x86_avx512_prol_q_256;
-    else if (VecWidth == 512 && EltWidth == 64)
-      IID = Intrinsic::x86_avx512_prol_q_512;
-    else
-      llvm_unreachable("Unexpected intrinsic");
-  } else if (Name.startswith("padds.")) {
-    if (VecWidth == 128 && EltWidth == 8)
-      IID = Intrinsic::x86_sse2_padds_b;
-    else if (VecWidth == 256 && EltWidth == 8)
-      IID = Intrinsic::x86_avx2_padds_b;
-    else if (VecWidth == 512 && EltWidth == 8)
-      IID = Intrinsic::x86_avx512_padds_b_512;
-    else if (VecWidth == 128 && EltWidth == 16)
-      IID = Intrinsic::x86_sse2_padds_w;
-    else if (VecWidth == 256 && EltWidth == 16)
-      IID = Intrinsic::x86_avx2_padds_w;
-    else if (VecWidth == 512 && EltWidth == 16)
-      IID = Intrinsic::x86_avx512_padds_w_512;
-    else
-      llvm_unreachable("Unexpected intrinsic");
-  } else if (Name.startswith("psubs.")) {
-    if (VecWidth == 128 && EltWidth == 8)
-      IID = Intrinsic::x86_sse2_psubs_b;
-    else if (VecWidth == 256 && EltWidth == 8)
-      IID = Intrinsic::x86_avx2_psubs_b;
-    else if (VecWidth == 512 && EltWidth == 8)
-      IID = Intrinsic::x86_avx512_psubs_b_512;
-    else if (VecWidth == 128 && EltWidth == 16)
-      IID = Intrinsic::x86_sse2_psubs_w;
-    else if (VecWidth == 256 && EltWidth == 16)
-      IID = Intrinsic::x86_avx2_psubs_w;
-    else if (VecWidth == 512 && EltWidth == 16)
-      IID = Intrinsic::x86_avx512_psubs_w_512;
+  } else if (Name.startswith("pmultishift.qb.")) {
+    if (VecWidth == 128)
+      IID = Intrinsic::x86_avx512_pmultishift_qb_128;
+    else if (VecWidth == 256)
+      IID = Intrinsic::x86_avx512_pmultishift_qb_256;
+    else if (VecWidth == 512)
+      IID = Intrinsic::x86_avx512_pmultishift_qb_512;
     else
       llvm_unreachable("Unexpected intrinsic");
   } else
@@ -1764,6 +1773,20 @@
       // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt."
       bool CmpEq = Name[16] == 'e';
       Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true);
+    } else if (IsX86 && Name.startswith("avx512.mask.vpshufbitqmb.")) {
+      Type *OpTy = CI->getArgOperand(0)->getType();
+      unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
+      Intrinsic::ID IID;
+      switch (VecWidth) {
+      default: llvm_unreachable("Unexpected intrinsic");
+      case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break;
+      case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break;
+      case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break;
+      }
+
+      Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+                               { CI->getOperand(0), CI->getArgOperand(1) });
+      Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
     } else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) {
       Type *OpTy = CI->getArgOperand(0)->getType();
       unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
@@ -2018,6 +2041,23 @@
       Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel);
       Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel);
       Rep = Builder.CreateOr(Sel0, Sel1);
+    } else if (IsX86 && (Name.startswith("xop.vprot") ||
+                         Name.startswith("avx512.prol") ||
+                         Name.startswith("avx512.mask.prol"))) {
+      Rep = upgradeX86Rotate(Builder, *CI, false);
+    } else if (IsX86 && (Name.startswith("avx512.pror") ||
+                         Name.startswith("avx512.mask.pror"))) {
+      Rep = upgradeX86Rotate(Builder, *CI, true);
+    } else if (IsX86 && (Name.startswith("avx512.vpshld.") ||
+                         Name.startswith("avx512.mask.vpshld") ||
+                         Name.startswith("avx512.maskz.vpshld"))) {
+      bool ZeroMask = Name[11] == 'z';
+      Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask);
+    } else if (IsX86 && (Name.startswith("avx512.vpshrd.") ||
+                         Name.startswith("avx512.mask.vpshrd") ||
+                         Name.startswith("avx512.maskz.vpshrd"))) {
+      bool ZeroMask = Name[11] == 'z';
+      Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask);
     } else if (IsX86 && Name == "sse42.crc32.64.8") {
       Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
                                                Intrinsic::x86_sse42_crc32_32_8);
@@ -2129,6 +2169,16 @@
       if (CI->getNumArgOperands() == 3)
         Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
                             CI->getArgOperand(1));
+    } else if (IsX86 && (Name.startswith("sse2.padds.") ||
+                         Name.startswith("sse2.psubs.") ||
+                         Name.startswith("avx2.padds.") ||
+                         Name.startswith("avx2.psubs.") ||
+                         Name.startswith("avx512.padds.") ||
+                         Name.startswith("avx512.psubs.") ||
+                         Name.startswith("avx512.mask.padds.") ||
+                         Name.startswith("avx512.mask.psubs."))) {
+      bool IsAdd = Name.contains(".padds");
+      Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, true, IsAdd);
     } else if (IsX86 && (Name.startswith("sse2.paddus.") ||
                          Name.startswith("sse2.psubus.") ||
                          Name.startswith("avx2.paddus.") ||
@@ -2136,7 +2186,7 @@
                          Name.startswith("avx512.mask.paddus.") ||
                          Name.startswith("avx512.mask.psubus."))) {
       bool IsAdd = Name.contains(".paddus");
-      Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, IsAdd);
+      Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, false, IsAdd);
     } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) {
       Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
                                       CI->getArgOperand(1),
@@ -2454,24 +2504,8 @@
 
       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
-    } else if (IsX86 && Name.startswith("avx512.mask.pand.")) {
-      Rep = Builder.CreateAnd(CI->getArgOperand(0), CI->getArgOperand(1));
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
-                          CI->getArgOperand(2));
-    } else if (IsX86 && Name.startswith("avx512.mask.pandn.")) {
-      Rep = Builder.CreateAnd(Builder.CreateNot(CI->getArgOperand(0)),
-                              CI->getArgOperand(1));
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
-                          CI->getArgOperand(2));
-    } else if (IsX86 && Name.startswith("avx512.mask.por.")) {
-      Rep = Builder.CreateOr(CI->getArgOperand(0), CI->getArgOperand(1));
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
-                          CI->getArgOperand(2));
-    } else if (IsX86 && Name.startswith("avx512.mask.pxor.")) {
-      Rep = Builder.CreateXor(CI->getArgOperand(0), CI->getArgOperand(1));
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
-                          CI->getArgOperand(2));
-    } else if (IsX86 && Name.startswith("avx512.mask.and.")) {
+    } else if (IsX86 && (Name.startswith("avx512.mask.and.") ||
+                         Name.startswith("avx512.mask.pand."))) {
       VectorType *FTy = cast<VectorType>(CI->getType());
       VectorType *ITy = VectorType::getInteger(FTy);
       Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
@@ -2479,7 +2513,8 @@
       Rep = Builder.CreateBitCast(Rep, FTy);
       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
-    } else if (IsX86 && Name.startswith("avx512.mask.andn.")) {
+    } else if (IsX86 && (Name.startswith("avx512.mask.andn.") ||
+                         Name.startswith("avx512.mask.pandn."))) {
       VectorType *FTy = cast<VectorType>(CI->getType());
       VectorType *ITy = VectorType::getInteger(FTy);
       Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy));
@@ -2488,7 +2523,8 @@
       Rep = Builder.CreateBitCast(Rep, FTy);
       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
-    } else if (IsX86 && Name.startswith("avx512.mask.or.")) {
+    } else if (IsX86 && (Name.startswith("avx512.mask.or.") ||
+                         Name.startswith("avx512.mask.por."))) {
       VectorType *FTy = cast<VectorType>(CI->getType());
       VectorType *ITy = VectorType::getInteger(FTy);
       Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
@@ -2496,7 +2532,8 @@
       Rep = Builder.CreateBitCast(Rep, FTy);
       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
-    } else if (IsX86 && Name.startswith("avx512.mask.xor.")) {
+    } else if (IsX86 && (Name.startswith("avx512.mask.xor.") ||
+                         Name.startswith("avx512.mask.pxor."))) {
       VectorType *FTy = cast<VectorType>(CI->getType());
       VectorType *ITy = VectorType::getInteger(FTy);
       Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
@@ -2580,26 +2617,16 @@
       }
       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
-    } else if (IsX86 && Name.startswith("avx512.mask.max.p") &&
+    } else if (IsX86 && (Name.startswith("avx512.mask.max.p") ||
+                         Name.startswith("avx512.mask.min.p")) &&
                Name.drop_front(18) == ".512") {
-      Intrinsic::ID IID;
-      if (Name[17] == 's')
-        IID = Intrinsic::x86_avx512_max_ps_512;
-      else
-        IID = Intrinsic::x86_avx512_max_pd_512;
-
-      Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
-                               { CI->getArgOperand(0), CI->getArgOperand(1),
-                                 CI->getArgOperand(4) });
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
-                          CI->getArgOperand(2));
-    } else if (IsX86 && Name.startswith("avx512.mask.min.p") &&
-               Name.drop_front(18) == ".512") {
-      Intrinsic::ID IID;
-      if (Name[17] == 's')
-        IID = Intrinsic::x86_avx512_min_ps_512;
-      else
-        IID = Intrinsic::x86_avx512_min_pd_512;
+      bool IsDouble = Name[17] == 'd';
+      bool IsMin = Name[13] == 'i';
+      static const Intrinsic::ID MinMaxTbl[2][2] = {
+        { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 },
+        { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 }
+      };
+      Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble];
 
       Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
                                { CI->getArgOperand(0), CI->getArgOperand(1),
@@ -3143,62 +3170,7 @@
                          Name.startswith("avx512.maskz.vpermt2var."))) {
       bool ZeroMask = Name[11] == 'z';
       bool IndexForm = Name[17] == 'i';
-      unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
-      unsigned EltWidth = CI->getType()->getScalarSizeInBits();
-      bool IsFloat = CI->getType()->isFPOrFPVectorTy();
-      Intrinsic::ID IID;
-      if (VecWidth == 128 && EltWidth == 32 && IsFloat)
-        IID = Intrinsic::x86_avx512_vpermi2var_ps_128;
-      else if (VecWidth == 128 && EltWidth == 32 && !IsFloat)
-        IID = Intrinsic::x86_avx512_vpermi2var_d_128;
-      else if (VecWidth == 128 && EltWidth == 64 && IsFloat)
-        IID = Intrinsic::x86_avx512_vpermi2var_pd_128;
-      else if (VecWidth == 128 && EltWidth == 64 && !IsFloat)
-        IID = Intrinsic::x86_avx512_vpermi2var_q_128;
-      else if (VecWidth == 256 && EltWidth == 32 && IsFloat)
-        IID = Intrinsic::x86_avx512_vpermi2var_ps_256;
-      else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
-        IID = Intrinsic::x86_avx512_vpermi2var_d_256;
-      else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
-        IID = Intrinsic::x86_avx512_vpermi2var_pd_256;
-      else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
-        IID = Intrinsic::x86_avx512_vpermi2var_q_256;
-      else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
-        IID = Intrinsic::x86_avx512_vpermi2var_ps_512;
-      else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
-        IID = Intrinsic::x86_avx512_vpermi2var_d_512;
-      else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
-        IID = Intrinsic::x86_avx512_vpermi2var_pd_512;
-      else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
-        IID = Intrinsic::x86_avx512_vpermi2var_q_512;
-      else if (VecWidth == 128 && EltWidth == 16)
-        IID = Intrinsic::x86_avx512_vpermi2var_hi_128;
-      else if (VecWidth == 256 && EltWidth == 16)
-        IID = Intrinsic::x86_avx512_vpermi2var_hi_256;
-      else if (VecWidth == 512 && EltWidth == 16)
-        IID = Intrinsic::x86_avx512_vpermi2var_hi_512;
-      else if (VecWidth == 128 && EltWidth == 8)
-        IID = Intrinsic::x86_avx512_vpermi2var_qi_128;
-      else if (VecWidth == 256 && EltWidth == 8)
-        IID = Intrinsic::x86_avx512_vpermi2var_qi_256;
-      else if (VecWidth == 512 && EltWidth == 8)
-        IID = Intrinsic::x86_avx512_vpermi2var_qi_512;
-      else
-        llvm_unreachable("Unexpected intrinsic");
-
-      Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
-                        CI->getArgOperand(2) };
-
-      // If this isn't index form we need to swap operand 0 and 1.
-      if (!IndexForm)
-        std::swap(Args[0], Args[1]);
-
-      Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
-                               Args);
-      Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
-                                 : Builder.CreateBitCast(CI->getArgOperand(1),
-                                                         CI->getType());
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
+      Rep = UpgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm);
     } else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") ||
                          Name.startswith("avx512.maskz.vpdpbusd.") ||
                          Name.startswith("avx512.mask.vpdpbusds.") ||
diff --git a/lib/IR/BasicBlock.cpp b/lib/IR/BasicBlock.cpp
index e9eb686..3759243 100644
--- a/lib/IR/BasicBlock.cpp
+++ b/lib/IR/BasicBlock.cpp
@@ -206,10 +206,8 @@
     if (isa<PHINode>(I) || isa<DbgInfoIntrinsic>(I))
       continue;
 
-    if (auto *II = dyn_cast<IntrinsicInst>(&I))
-      if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
-          II->getIntrinsicID() == Intrinsic::lifetime_end)
-        continue;
+    if (I.isLifetimeStartOrEnd())
+      continue;
 
     return &I;
   }
diff --git a/lib/IR/ConstantFold.cpp b/lib/IR/ConstantFold.cpp
index 826199e..57de6b0 100644
--- a/lib/IR/ConstantFold.cpp
+++ b/lib/IR/ConstantFold.cpp
@@ -27,7 +27,6 @@
 #include "llvm/IR/GlobalAlias.h"
 #include "llvm/IR/GlobalVariable.h"
 #include "llvm/IR/Instructions.h"
-#include "llvm/IR/Module.h"
 #include "llvm/IR/Operator.h"
 #include "llvm/IR/PatternMatch.h"
 #include "llvm/Support/ErrorHandling.h"
@@ -1078,10 +1077,10 @@
             isa<GlobalValue>(CE1->getOperand(0))) {
           GlobalValue *GV = cast<GlobalValue>(CE1->getOperand(0));
 
-          unsigned GVAlign =
-              GV->getParent()
-                  ? GV->getPointerAlignment(GV->getParent()->getDataLayout())
-                  : 0;
+          // Functions are at least 4-byte aligned.
+          unsigned GVAlign = GV->getAlignment();
+          if (isa<Function>(GV))
+            GVAlign = std::max(GVAlign, 4U);
 
           if (GVAlign > 1) {
             unsigned DstWidth = CI2->getType()->getBitWidth();
diff --git a/lib/IR/Constants.cpp b/lib/IR/Constants.cpp
index df09d13..d36967f 100644
--- a/lib/IR/Constants.cpp
+++ b/lib/IR/Constants.cpp
@@ -1999,9 +1999,8 @@
   if (!Ty)
     Ty = cast<PointerType>(C->getType()->getScalarType())->getElementType();
   else
-    assert(
-        Ty ==
-        cast<PointerType>(C->getType()->getScalarType())->getContainedType(0u));
+    assert(Ty ==
+           cast<PointerType>(C->getType()->getScalarType())->getElementType());
 
   if (Constant *FC =
           ConstantFoldGetElementPtr(Ty, C, InBounds, InRangeIndex, Idxs))
diff --git a/lib/IR/Core.cpp b/lib/IR/Core.cpp
index a306573..815797f 100644
--- a/lib/IR/Core.cpp
+++ b/lib/IR/Core.cpp
@@ -15,7 +15,6 @@
 #include "llvm-c/Core.h"
 #include "llvm/ADT/StringSwitch.h"
 #include "llvm/IR/Attributes.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DebugInfoMetadata.h"
 #include "llvm/IR/DerivedTypes.h"
@@ -108,6 +107,14 @@
   unwrap(C)->setYieldCallback(YieldCallback, OpaqueHandle);
 }
 
+LLVMBool LLVMContextShouldDiscardValueNames(LLVMContextRef C) {
+  return unwrap(C)->shouldDiscardValueNames();
+}
+
+void LLVMContextSetDiscardValueNames(LLVMContextRef C, LLVMBool Discard) {
+  unwrap(C)->setDiscardValueNames(Discard);
+}
+
 void LLVMContextDispose(LLVMContextRef C) {
   delete unwrap(C);
 }
@@ -1610,17 +1617,21 @@
                           LLVMValueRef *ConstantIndices, unsigned NumIndices) {
   ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
                                NumIndices);
-  return wrap(ConstantExpr::getGetElementPtr(
-      nullptr, unwrap<Constant>(ConstantVal), IdxList));
+  Constant *Val = unwrap<Constant>(ConstantVal);
+  Type *Ty =
+      cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+  return wrap(ConstantExpr::getGetElementPtr(Ty, Val, IdxList));
 }
 
 LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal,
                                   LLVMValueRef *ConstantIndices,
                                   unsigned NumIndices) {
-  Constant* Val = unwrap<Constant>(ConstantVal);
   ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
                                NumIndices);
-  return wrap(ConstantExpr::getInBoundsGetElementPtr(nullptr, Val, IdxList));
+  Constant *Val = unwrap<Constant>(ConstantVal);
+  Type *Ty =
+      cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+  return wrap(ConstantExpr::getInBoundsGetElementPtr(Ty, Val, IdxList));
 }
 
 LLVMValueRef LLVMConstTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
@@ -2525,6 +2536,11 @@
   return wrap(&*--I);
 }
 
+LLVMBasicBlockRef LLVMCreateBasicBlockInContext(LLVMContextRef C,
+                                                const char *Name) {
+  return wrap(llvm::BasicBlock::Create(*unwrap(C), Name));
+}
+
 LLVMBasicBlockRef LLVMAppendBasicBlockInContext(LLVMContextRef C,
                                                 LLVMValueRef FnRef,
                                                 const char *Name) {
@@ -2648,43 +2664,43 @@
   if (FuncletPadInst *FPI = dyn_cast<FuncletPadInst>(unwrap(Instr))) {
     return FPI->getNumArgOperands();
   }
-  return CallSite(unwrap<Instruction>(Instr)).getNumArgOperands();
+  return unwrap<CallBase>(Instr)->getNumArgOperands();
 }
 
 /*--.. Call and invoke instructions ........................................--*/
 
 unsigned LLVMGetInstructionCallConv(LLVMValueRef Instr) {
-  return CallSite(unwrap<Instruction>(Instr)).getCallingConv();
+  return unwrap<CallBase>(Instr)->getCallingConv();
 }
 
 void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC) {
-  return CallSite(unwrap<Instruction>(Instr))
-    .setCallingConv(static_cast<CallingConv::ID>(CC));
+  return unwrap<CallBase>(Instr)->setCallingConv(
+      static_cast<CallingConv::ID>(CC));
 }
 
 void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index,
                                 unsigned align) {
-  CallSite Call = CallSite(unwrap<Instruction>(Instr));
+  auto *Call = unwrap<CallBase>(Instr);
   Attribute AlignAttr = Attribute::getWithAlignment(Call->getContext(), align);
-  Call.addAttribute(index, AlignAttr);
+  Call->addAttribute(index, AlignAttr);
 }
 
 void LLVMAddCallSiteAttribute(LLVMValueRef C, LLVMAttributeIndex Idx,
                               LLVMAttributeRef A) {
-  CallSite(unwrap<Instruction>(C)).addAttribute(Idx, unwrap(A));
+  unwrap<CallBase>(C)->addAttribute(Idx, unwrap(A));
 }
 
 unsigned LLVMGetCallSiteAttributeCount(LLVMValueRef C,
                                        LLVMAttributeIndex Idx) {
-  auto CS = CallSite(unwrap<Instruction>(C));
-  auto AS = CS.getAttributes().getAttributes(Idx);
+  auto *Call = unwrap<CallBase>(C);
+  auto AS = Call->getAttributes().getAttributes(Idx);
   return AS.getNumAttributes();
 }
 
 void LLVMGetCallSiteAttributes(LLVMValueRef C, LLVMAttributeIndex Idx,
                                LLVMAttributeRef *Attrs) {
-  auto CS = CallSite(unwrap<Instruction>(C));
-  auto AS = CS.getAttributes().getAttributes(Idx);
+  auto *Call = unwrap<CallBase>(C);
+  auto AS = Call->getAttributes().getAttributes(Idx);
   for (auto A : AS)
     *Attrs++ = wrap(A);
 }
@@ -2692,30 +2708,32 @@
 LLVMAttributeRef LLVMGetCallSiteEnumAttribute(LLVMValueRef C,
                                               LLVMAttributeIndex Idx,
                                               unsigned KindID) {
-  return wrap(CallSite(unwrap<Instruction>(C))
-    .getAttribute(Idx, (Attribute::AttrKind)KindID));
+  return wrap(
+      unwrap<CallBase>(C)->getAttribute(Idx, (Attribute::AttrKind)KindID));
 }
 
 LLVMAttributeRef LLVMGetCallSiteStringAttribute(LLVMValueRef C,
                                                 LLVMAttributeIndex Idx,
                                                 const char *K, unsigned KLen) {
-  return wrap(CallSite(unwrap<Instruction>(C))
-    .getAttribute(Idx, StringRef(K, KLen)));
+  return wrap(unwrap<CallBase>(C)->getAttribute(Idx, StringRef(K, KLen)));
 }
 
 void LLVMRemoveCallSiteEnumAttribute(LLVMValueRef C, LLVMAttributeIndex Idx,
                                      unsigned KindID) {
-  CallSite(unwrap<Instruction>(C))
-    .removeAttribute(Idx, (Attribute::AttrKind)KindID);
+  unwrap<CallBase>(C)->removeAttribute(Idx, (Attribute::AttrKind)KindID);
 }
 
 void LLVMRemoveCallSiteStringAttribute(LLVMValueRef C, LLVMAttributeIndex Idx,
                                        const char *K, unsigned KLen) {
-  CallSite(unwrap<Instruction>(C)).removeAttribute(Idx, StringRef(K, KLen));
+  unwrap<CallBase>(C)->removeAttribute(Idx, StringRef(K, KLen));
 }
 
 LLVMValueRef LLVMGetCalledValue(LLVMValueRef Instr) {
-  return wrap(CallSite(unwrap<Instruction>(Instr)).getCalledValue());
+  return wrap(unwrap<CallBase>(Instr)->getCalledValue());
+}
+
+LLVMTypeRef LLVMGetCalledFunctionType(LLVMValueRef Instr) {
+  return wrap(unwrap<CallBase>(Instr)->getFunctionType());
 }
 
 /*--.. Operations on call instructions (only) ..............................--*/
@@ -2961,9 +2979,22 @@
                              LLVMValueRef *Args, unsigned NumArgs,
                              LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
                              const char *Name) {
-  return wrap(unwrap(B)->CreateInvoke(unwrap(Fn), unwrap(Then), unwrap(Catch),
-                                      makeArrayRef(unwrap(Args), NumArgs),
-                                      Name));
+  Value *V = unwrap(Fn);
+  FunctionType *FnT =
+      cast<FunctionType>(cast<PointerType>(V->getType())->getElementType());
+
+  return wrap(
+      unwrap(B)->CreateInvoke(FnT, unwrap(Fn), unwrap(Then), unwrap(Catch),
+                              makeArrayRef(unwrap(Args), NumArgs), Name));
+}
+
+LLVMValueRef LLVMBuildInvoke2(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
+                              LLVMValueRef *Args, unsigned NumArgs,
+                              LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
+                              const char *Name) {
+  return wrap(unwrap(B)->CreateInvoke(
+      unwrap<FunctionType>(Ty), unwrap(Fn), unwrap(Then), unwrap(Catch),
+      makeArrayRef(unwrap(Args), NumArgs), Name));
 }
 
 LLVMValueRef LLVMBuildLandingPad(LLVMBuilderRef B, LLVMTypeRef Ty,
@@ -3319,7 +3350,15 @@
 
 LLVMValueRef LLVMBuildLoad(LLVMBuilderRef B, LLVMValueRef PointerVal,
                            const char *Name) {
-  return wrap(unwrap(B)->CreateLoad(unwrap(PointerVal), Name));
+  Value *V = unwrap(PointerVal);
+  PointerType *Ty = cast<PointerType>(V->getType());
+
+  return wrap(unwrap(B)->CreateLoad(Ty->getElementType(), V, Name));
+}
+
+LLVMValueRef LLVMBuildLoad2(LLVMBuilderRef B, LLVMTypeRef Ty,
+                            LLVMValueRef PointerVal, const char *Name) {
+  return wrap(unwrap(B)->CreateLoad(unwrap(Ty), unwrap(PointerVal), Name));
 }
 
 LLVMValueRef LLVMBuildStore(LLVMBuilderRef B, LLVMValueRef Val,
@@ -3374,20 +3413,50 @@
                           LLVMValueRef *Indices, unsigned NumIndices,
                           const char *Name) {
   ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
-  return wrap(unwrap(B)->CreateGEP(nullptr, unwrap(Pointer), IdxList, Name));
+  Value *Val = unwrap(Pointer);
+  Type *Ty =
+      cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+  return wrap(unwrap(B)->CreateGEP(Ty, Val, IdxList, Name));
+}
+
+LLVMValueRef LLVMBuildGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+                           LLVMValueRef Pointer, LLVMValueRef *Indices,
+                           unsigned NumIndices, const char *Name) {
+  ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
+  return wrap(unwrap(B)->CreateGEP(unwrap(Ty), unwrap(Pointer), IdxList, Name));
 }
 
 LLVMValueRef LLVMBuildInBoundsGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
                                   LLVMValueRef *Indices, unsigned NumIndices,
                                   const char *Name) {
   ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
+  Value *Val = unwrap(Pointer);
+  Type *Ty =
+      cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+  return wrap(unwrap(B)->CreateInBoundsGEP(Ty, Val, IdxList, Name));
+}
+
+LLVMValueRef LLVMBuildInBoundsGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+                                   LLVMValueRef Pointer, LLVMValueRef *Indices,
+                                   unsigned NumIndices, const char *Name) {
+  ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
   return wrap(
-      unwrap(B)->CreateInBoundsGEP(nullptr, unwrap(Pointer), IdxList, Name));
+      unwrap(B)->CreateInBoundsGEP(unwrap(Ty), unwrap(Pointer), IdxList, Name));
 }
 
 LLVMValueRef LLVMBuildStructGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
                                 unsigned Idx, const char *Name) {
-  return wrap(unwrap(B)->CreateStructGEP(nullptr, unwrap(Pointer), Idx, Name));
+  Value *Val = unwrap(Pointer);
+  Type *Ty =
+      cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+  return wrap(unwrap(B)->CreateStructGEP(Ty, Val, Idx, Name));
+}
+
+LLVMValueRef LLVMBuildStructGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+                                 LLVMValueRef Pointer, unsigned Idx,
+                                 const char *Name) {
+  return wrap(
+      unwrap(B)->CreateStructGEP(unwrap(Ty), unwrap(Pointer), Idx, Name));
 }
 
 LLVMValueRef LLVMBuildGlobalString(LLVMBuilderRef B, const char *Str,
@@ -3529,6 +3598,13 @@
   return wrap(unwrap(B)->CreatePointerCast(unwrap(Val), unwrap(DestTy), Name));
 }
 
+LLVMValueRef LLVMBuildIntCast2(LLVMBuilderRef B, LLVMValueRef Val,
+                               LLVMTypeRef DestTy, LLVMBool IsSigned,
+                               const char *Name) {
+  return wrap(
+      unwrap(B)->CreateIntCast(unwrap(Val), unwrap(DestTy), IsSigned, Name));
+}
+
 LLVMValueRef LLVMBuildIntCast(LLVMBuilderRef B, LLVMValueRef Val,
                               LLVMTypeRef DestTy, const char *Name) {
   return wrap(unwrap(B)->CreateIntCast(unwrap(Val), unwrap(DestTy),
@@ -3565,9 +3641,20 @@
 LLVMValueRef LLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn,
                            LLVMValueRef *Args, unsigned NumArgs,
                            const char *Name) {
-  return wrap(unwrap(B)->CreateCall(unwrap(Fn),
-                                    makeArrayRef(unwrap(Args), NumArgs),
-                                    Name));
+  Value *V = unwrap(Fn);
+  FunctionType *FnT =
+      cast<FunctionType>(cast<PointerType>(V->getType())->getElementType());
+
+  return wrap(unwrap(B)->CreateCall(FnT, unwrap(Fn),
+                                    makeArrayRef(unwrap(Args), NumArgs), Name));
+}
+
+LLVMValueRef LLVMBuildCall2(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
+                            LLVMValueRef *Args, unsigned NumArgs,
+                            const char *Name) {
+  FunctionType *FTy = unwrap<FunctionType>(Ty);
+  return wrap(unwrap(B)->CreateCall(FTy, unwrap(Fn),
+                                    makeArrayRef(unwrap(Args), NumArgs), Name));
 }
 
 LLVMValueRef LLVMBuildSelect(LLVMBuilderRef B, LLVMValueRef If,
diff --git a/lib/IR/DIBuilder.cpp b/lib/IR/DIBuilder.cpp
index d29759f..fb81634 100644
--- a/lib/IR/DIBuilder.cpp
+++ b/lib/IR/DIBuilder.cpp
@@ -504,11 +504,11 @@
 DICompositeType *DIBuilder::createEnumerationType(
     DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
     uint64_t SizeInBits, uint32_t AlignInBits, DINodeArray Elements,
-    DIType *UnderlyingType, StringRef UniqueIdentifier, bool IsFixed) {
+    DIType *UnderlyingType, StringRef UniqueIdentifier, bool IsScoped) {
   auto *CTy = DICompositeType::get(
       VMContext, dwarf::DW_TAG_enumeration_type, Name, File, LineNumber,
       getNonCompileUnitScope(Scope), UnderlyingType, SizeInBits, AlignInBits, 0,
-      IsFixed ? DINode::FlagFixedEnum : DINode::FlagZero, Elements, 0, nullptr,
+      IsScoped ? DINode::FlagEnumClass : DINode::FlagZero, Elements, 0, nullptr,
       nullptr, UniqueIdentifier);
   AllEnumTypes.push_back(CTy);
   trackIfUnresolved(CTy);
diff --git a/lib/IR/DataLayout.cpp b/lib/IR/DataLayout.cpp
index 18d502a5..63c24b5 100644
--- a/lib/IR/DataLayout.cpp
+++ b/lib/IR/DataLayout.cpp
@@ -635,6 +635,14 @@
   return I->TypeByteWidth;
 }
 
+unsigned DataLayout::getMaxPointerSize() const {
+  unsigned MaxPointerSize = 0;
+  for (auto &P : Pointers)
+    MaxPointerSize = std::max(MaxPointerSize, P.TypeByteWidth);
+
+  return MaxPointerSize;
+}
+
 unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const {
   assert(Ty->isPtrOrPtrVectorTy() &&
          "This should only be called with a pointer or pointer vector type");
diff --git a/lib/IR/DebugInfo.cpp b/lib/IR/DebugInfo.cpp
index d1ff545..9fa3177 100644
--- a/lib/IR/DebugInfo.cpp
+++ b/lib/IR/DebugInfo.cpp
@@ -697,8 +697,9 @@
 
 static unsigned map_from_llvmDWARFsourcelanguage(LLVMDWARFSourceLanguage lang) {
   switch (lang) {
-#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) \
-case LLVMDWARFSourceLanguage##NAME: return ID;
+#define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR)                 \
+  case LLVMDWARFSourceLanguage##NAME:                                          \
+    return ID;
 #include "llvm/BinaryFormat/Dwarf.def"
 #undef HANDLE_DW_LANG
   }
diff --git a/lib/IR/DebugInfoMetadata.cpp b/lib/IR/DebugInfoMetadata.cpp
index 3b702ce..92f3f21 100644
--- a/lib/IR/DebugInfoMetadata.cpp
+++ b/lib/IR/DebugInfoMetadata.cpp
@@ -20,6 +20,8 @@
 #include "llvm/IR/Function.h"
 #include "llvm/IR/Instructions.h"
 
+#include <numeric>
+
 using namespace llvm;
 
 DILocation::DILocation(LLVMContext &C, StorageType Storage, unsigned Line,
@@ -113,6 +115,47 @@
   return DILocation::get(Result->getContext(), 0, 0, S, L);
 }
 
+Optional<unsigned> DILocation::encodeDiscriminator(unsigned BD, unsigned DF, unsigned CI) {
+  SmallVector<unsigned, 3> Components = {BD, DF, CI};
+  uint64_t RemainingWork = 0U;
+  // We use RemainingWork to figure out if we have no remaining components to
+  // encode. For example: if BD != 0 but DF == 0 && CI == 0, we don't need to
+  // encode anything for the latter 2.
+  // Since any of the input components is at most 32 bits, their sum will be
+  // less than 34 bits, and thus RemainingWork won't overflow.
+  RemainingWork = std::accumulate(Components.begin(), Components.end(), RemainingWork);
+
+  int I = 0;
+  unsigned Ret = 0;
+  unsigned NextBitInsertionIndex = 0;
+  while (RemainingWork > 0) {
+    unsigned C = Components[I++];
+    RemainingWork -= C;
+    unsigned EC = encodeComponent(C);
+    Ret |= (EC << NextBitInsertionIndex);
+    NextBitInsertionIndex += encodingBits(C);
+  }
+
+  // Encoding may be unsuccessful because of overflow. We determine success by
+  // checking equivalence of components before & after encoding. Alternatively,
+  // we could determine Success during encoding, but the current alternative is
+  // simpler.
+  unsigned TBD, TDF, TCI = 0;
+  decodeDiscriminator(Ret, TBD, TDF, TCI);
+  if (TBD == BD && TDF == DF && TCI == CI)
+    return Ret;
+  return None;
+}
+
+void DILocation::decodeDiscriminator(unsigned D, unsigned &BD, unsigned &DF,
+                                     unsigned &CI) {
+  BD = getUnsignedFromPrefixEncoding(D);
+  DF = getUnsignedFromPrefixEncoding(getNextComponentInDiscriminator(D));
+  CI = getUnsignedFromPrefixEncoding(
+      getNextComponentInDiscriminator(getNextComponentInDiscriminator(D)));
+}
+
+
 DINode::DIFlags DINode::getFlag(StringRef Flag) {
   return StringSwitch<DIFlags>(Flag)
 #define HANDLE_DI_FLAG(ID, NAME) .Case("DIFlag" #NAME, Flag##NAME)
diff --git a/lib/IR/DiagnosticInfo.cpp b/lib/IR/DiagnosticInfo.cpp
index 3c6665b..dc957ab 100644
--- a/lib/IR/DiagnosticInfo.cpp
+++ b/lib/IR/DiagnosticInfo.cpp
@@ -104,6 +104,11 @@
   DP << getMsg();
 }
 
+void DiagnosticInfo::anchor() {}
+void DiagnosticInfoStackSize::anchor() {}
+void DiagnosticInfoWithLocationBase::anchor() {}
+void DiagnosticInfoIROptimization::anchor() {}
+
 DiagnosticLocation::DiagnosticLocation(const DebugLoc &DL) {
   if (!DL)
     return;
@@ -366,6 +371,9 @@
   return OS.str();
 }
 
+void OptimizationRemarkAnalysisFPCommute::anchor() {}
+void OptimizationRemarkAnalysisAliasing::anchor() {}
+
 namespace llvm {
 namespace yaml {
 
diff --git a/lib/IR/Function.cpp b/lib/IR/Function.cpp
index ec09481..a88478b 100644
--- a/lib/IR/Function.cpp
+++ b/lib/IR/Function.cpp
@@ -24,7 +24,6 @@
 #include "llvm/IR/Argument.h"
 #include "llvm/IR/Attributes.h"
 #include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/Constant.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DerivedTypes.h"
@@ -1257,13 +1256,13 @@
     const User *FU = U.getUser();
     if (isa<BlockAddress>(FU))
       continue;
-    if (!isa<CallInst>(FU) && !isa<InvokeInst>(FU)) {
+    const auto *Call = dyn_cast<CallBase>(FU);
+    if (!Call) {
       if (PutOffender)
         *PutOffender = FU;
       return true;
     }
-    ImmutableCallSite CS(cast<Instruction>(FU));
-    if (!CS.isCallee(&U)) {
+    if (!Call->isCallee(&U)) {
       if (PutOffender)
         *PutOffender = FU;
       return true;
@@ -1289,12 +1288,10 @@
 /// callsFunctionThatReturnsTwice - Return true if the function has a call to
 /// setjmp or other function that gcc recognizes as "returning twice".
 bool Function::callsFunctionThatReturnsTwice() const {
-  for (const_inst_iterator
-         I = inst_begin(this), E = inst_end(this); I != E; ++I) {
-    ImmutableCallSite CS(&*I);
-    if (CS && CS.hasFnAttr(Attribute::ReturnsTwice))
-      return true;
-  }
+  for (const Instruction &I : instructions(this))
+    if (const auto *Call = dyn_cast<CallBase>(&I))
+      if (Call->hasFnAttr(Attribute::ReturnsTwice))
+        return true;
 
   return false;
 }
diff --git a/lib/IR/Instruction.cpp b/lib/IR/Instruction.cpp
index f077957..d861b52 100644
--- a/lib/IR/Instruction.cpp
+++ b/lib/IR/Instruction.cpp
@@ -598,6 +598,14 @@
          !this->isTerminator();
 }
 
+bool Instruction::isLifetimeStartOrEnd() const {
+  auto II = dyn_cast<IntrinsicInst>(this);
+  if (!II)
+    return false;
+  Intrinsic::ID ID = II->getIntrinsicID();
+  return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
+}
+
 const Instruction *Instruction::getNextNonDebugInstruction() const {
   for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
     if (!isa<DbgInfoIntrinsic>(I))
diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp
index 8405bbd..06b4672 100644
--- a/lib/IR/Instructions.cpp
+++ b/lib/IR/Instructions.cpp
@@ -27,6 +27,7 @@
 #include "llvm/IR/Function.h"
 #include "llvm/IR/InstrTypes.h"
 #include "llvm/IR/Instruction.h"
+#include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Metadata.h"
 #include "llvm/IR/Module.h"
@@ -254,6 +255,36 @@
 //                        CallBase Implementation
 //===----------------------------------------------------------------------===//
 
+Function *CallBase::getCaller() { return getParent()->getParent(); }
+
+bool CallBase::isIndirectCall() const {
+  const Value *V = getCalledValue();
+  if (isa<Function>(V) || isa<Constant>(V))
+    return false;
+  if (const CallInst *CI = dyn_cast<CallInst>(this))
+    if (CI->isInlineAsm())
+      return false;
+  return true;
+}
+
+Intrinsic::ID CallBase::getIntrinsicID() const {
+  if (auto *F = getCalledFunction())
+    return F->getIntrinsicID();
+  return Intrinsic::not_intrinsic;
+}
+
+bool CallBase::isReturnNonNull() const {
+  if (hasRetAttr(Attribute::NonNull))
+    return true;
+
+  if (getDereferenceableBytes(AttributeList::ReturnIndex) > 0 &&
+           !NullPointerIsDefined(getCaller(),
+                                 getType()->getPointerAddressSpace()))
+    return true;
+
+  return false;
+}
+
 Value *CallBase::getReturnedArgOperand() const {
   unsigned Index;
 
@@ -357,9 +388,8 @@
   setName(NameStr);
 }
 
-void CallInst::init(Value *Func, const Twine &NameStr) {
-  FTy =
-      cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
+void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
+  this->FTy = FTy;
   assert(getNumOperands() == 1 && "NumOperands not set up?");
   setCalledOperand(Func);
 
@@ -368,22 +398,18 @@
   setName(NameStr);
 }
 
-CallInst::CallInst(Value *Func, const Twine &Name, Instruction *InsertBefore)
-    : CallBase(cast<FunctionType>(
-                   cast<PointerType>(Func->getType())->getElementType())
-                   ->getReturnType(),
-               Instruction::Call, OperandTraits<CallBase>::op_end(this) - 1, 1,
-               InsertBefore) {
-  init(Func, Name);
+CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
+                   Instruction *InsertBefore)
+    : CallBase(Ty->getReturnType(), Instruction::Call,
+               OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
+  init(Ty, Func, Name);
 }
 
-CallInst::CallInst(Value *Func, const Twine &Name, BasicBlock *InsertAtEnd)
-    : CallBase(cast<FunctionType>(
-                   cast<PointerType>(Func->getType())->getElementType())
-                   ->getReturnType(),
-               Instruction::Call, OperandTraits<CallBase>::op_end(this) - 1, 1,
-               InsertAtEnd) {
-  init(Func, Name);
+CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
+                   BasicBlock *InsertAtEnd)
+    : CallBase(Ty->getReturnType(), Instruction::Call,
+               OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
+  init(Ty, Func, Name);
 }
 
 CallInst::CallInst(const CallInst &CI)
@@ -1112,28 +1138,30 @@
          "Alignment required for atomic load");
 }
 
-LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef)
-    : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertBef) {}
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
+                   Instruction *InsertBef)
+    : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
 
-LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE)
-    : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertAE) {}
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
+                   BasicBlock *InsertAE)
+    : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
 
 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
                    Instruction *InsertBef)
     : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertBef) {}
 
-LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
                    BasicBlock *InsertAE)
-    : LoadInst(Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {}
+    : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {}
 
 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
                    unsigned Align, Instruction *InsertBef)
     : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
                SyncScope::System, InsertBef) {}
 
-LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
                    unsigned Align, BasicBlock *InsertAE)
-    : LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
+    : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
                SyncScope::System, InsertAE) {}
 
 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
@@ -1148,12 +1176,11 @@
   setName(Name);
 }
 
-LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
-                   unsigned Align, AtomicOrdering Order,
-                   SyncScope::ID SSID,
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
+                   unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
                    BasicBlock *InsertAE)
-  : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
-                     Load, Ptr, InsertAE) {
+    : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
+  assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
   setVolatile(isVolatile);
   setAlignment(Align);
   setAtomic(Order, SSID);
@@ -1161,48 +1188,6 @@
   setName(Name);
 }
 
-LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef)
-  : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
-                     Load, Ptr, InsertBef) {
-  setVolatile(false);
-  setAlignment(0);
-  setAtomic(AtomicOrdering::NotAtomic);
-  AssertOK();
-  if (Name && Name[0]) setName(Name);
-}
-
-LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE)
-  : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
-                     Load, Ptr, InsertAE) {
-  setVolatile(false);
-  setAlignment(0);
-  setAtomic(AtomicOrdering::NotAtomic);
-  AssertOK();
-  if (Name && Name[0]) setName(Name);
-}
-
-LoadInst::LoadInst(Type *Ty, Value *Ptr, const char *Name, bool isVolatile,
-                   Instruction *InsertBef)
-    : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
-  assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
-  setVolatile(isVolatile);
-  setAlignment(0);
-  setAtomic(AtomicOrdering::NotAtomic);
-  AssertOK();
-  if (Name && Name[0]) setName(Name);
-}
-
-LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
-                   BasicBlock *InsertAE)
-  : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
-                     Load, Ptr, InsertAE) {
-  setVolatile(isVolatile);
-  setAlignment(0);
-  setAtomic(AtomicOrdering::NotAtomic);
-  AssertOK();
-  if (Name && Name[0]) setName(Name);
-}
-
 void LoadInst::setAlignment(unsigned Align) {
   assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
   assert(Align <= MaximumAlignment &&
@@ -3853,7 +3838,7 @@
 }
 
 LoadInst *LoadInst::cloneImpl() const {
-  return new LoadInst(getOperand(0), Twine(), isVolatile(),
+  return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
                       getAlignment(), getOrdering(), getSyncScopeID());
 }
 
diff --git a/lib/IR/LLVMContext.cpp b/lib/IR/LLVMContext.cpp
index 62d9e38..944d826 100644
--- a/lib/IR/LLVMContext.cpp
+++ b/lib/IR/LLVMContext.cpp
@@ -61,6 +61,7 @@
     {MD_associated, "associated"},
     {MD_callees, "callees"},
     {MD_irr_loop, "irr_loop"},
+    {MD_access_group, "llvm.access.group"},
   };
 
   for (auto &MDKind : MDKinds) {
diff --git a/lib/IR/Module.cpp b/lib/IR/Module.cpp
index 70a16cb..93f2730 100644
--- a/lib/IR/Module.cpp
+++ b/lib/IR/Module.cpp
@@ -203,16 +203,14 @@
 ///      with a constantexpr cast to the right type.
 ///   3. Finally, if the existing global is the correct declaration, return the
 ///      existing global.
-Constant *Module::getOrInsertGlobal(StringRef Name, Type *Ty) {
+Constant *Module::getOrInsertGlobal(
+    StringRef Name, Type *Ty,
+    function_ref<GlobalVariable *()> CreateGlobalCallback) {
   // See if we have a definition for the specified global already.
   GlobalVariable *GV = dyn_cast_or_null<GlobalVariable>(getNamedValue(Name));
-  if (!GV) {
-    // Nope, add it
-    GlobalVariable *New =
-      new GlobalVariable(*this, Ty, false, GlobalVariable::ExternalLinkage,
-                         nullptr, Name);
-     return New;                    // Return the new declaration.
-  }
+  if (!GV)
+    GV = CreateGlobalCallback();
+  assert(GV && "The CreateGlobalCallback is expected to create a global");
 
   // If the variable exists but has the wrong type, return a bitcast to the
   // right type.
@@ -225,6 +223,14 @@
   return GV;
 }
 
+// Overload to construct a global variable using its constructor's defaults.
+Constant *Module::getOrInsertGlobal(StringRef Name, Type *Ty) {
+  return getOrInsertGlobal(Name, Ty, [&] {
+    return new GlobalVariable(*this, Ty, false, GlobalVariable::ExternalLinkage,
+                              nullptr, Name);
+  });
+}
+
 //===----------------------------------------------------------------------===//
 // Methods for easy access to the global variables in the module.
 //
diff --git a/lib/IR/SafepointIRVerifier.cpp b/lib/IR/SafepointIRVerifier.cpp
index 3596b31..12ada13 100644
--- a/lib/IR/SafepointIRVerifier.cpp
+++ b/lib/IR/SafepointIRVerifier.cpp
@@ -257,7 +257,7 @@
   if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
     return containsGCPtrType(AT->getElementType());
   if (StructType *ST = dyn_cast<StructType>(Ty))
-    return llvm::any_of(ST->subtypes(), containsGCPtrType);
+    return llvm::any_of(ST->elements(), containsGCPtrType);
   return false;
 }
 
diff --git a/lib/IR/Value.cpp b/lib/IR/Value.cpp
index dc8af6b..80b993c 100644
--- a/lib/IR/Value.cpp
+++ b/lib/IR/Value.cpp
@@ -16,7 +16,6 @@
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/ADT/SetVector.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/Constant.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DataLayout.h"
@@ -503,8 +502,8 @@
         return V;
       V = GA->getAliasee();
     } else {
-      if (auto CS = ImmutableCallSite(V)) {
-        if (const Value *RV = CS.getReturnedArgOperand()) {
+      if (const auto *Call = dyn_cast<CallBase>(V)) {
+        if (const Value *RV = Call->getReturnedArgOperand()) {
           V = RV;
           continue;
         }
@@ -512,9 +511,9 @@
         // but it can't be marked with returned attribute, that's why it needs
         // special case.
         if (StripKind == PSK_ZeroIndicesAndAliasesAndInvariantGroups &&
-            (CS.getIntrinsicID() == Intrinsic::launder_invariant_group ||
-             CS.getIntrinsicID() == Intrinsic::strip_invariant_group)) {
-          V = CS.getArgOperand(0);
+            (Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
+             Call->getIntrinsicID() == Intrinsic::strip_invariant_group)) {
+          V = Call->getArgOperand(0);
           continue;
         }
       }
@@ -573,8 +572,8 @@
     } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
       V = GA->getAliasee();
     } else {
-      if (auto CS = ImmutableCallSite(V))
-        if (const Value *RV = CS.getReturnedArgOperand()) {
+      if (const auto *Call = dyn_cast<CallBase>(V))
+        if (const Value *RV = Call->getReturnedArgOperand()) {
           V = RV;
           continue;
         }
@@ -608,10 +607,11 @@
       DerefBytes = A->getDereferenceableOrNullBytes();
       CanBeNull = true;
     }
-  } else if (auto CS = ImmutableCallSite(this)) {
-    DerefBytes = CS.getDereferenceableBytes(AttributeList::ReturnIndex);
+  } else if (const auto *Call = dyn_cast<CallBase>(this)) {
+    DerefBytes = Call->getDereferenceableBytes(AttributeList::ReturnIndex);
     if (DerefBytes == 0) {
-      DerefBytes = CS.getDereferenceableOrNullBytes(AttributeList::ReturnIndex);
+      DerefBytes =
+          Call->getDereferenceableOrNullBytes(AttributeList::ReturnIndex);
       CanBeNull = true;
     }
   } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
@@ -683,8 +683,8 @@
       if (AllocatedType->isSized())
         Align = DL.getPrefTypeAlignment(AllocatedType);
     }
-  } else if (auto CS = ImmutableCallSite(this))
-    Align = CS.getAttributes().getRetAlignment();
+  } else if (const auto *Call = dyn_cast<CallBase>(this))
+    Align = Call->getAttributes().getRetAlignment();
   else if (const LoadInst *LI = dyn_cast<LoadInst>(this))
     if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
       ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp
index 7fd6df3..30e77b9 100644
--- a/lib/IR/Verifier.cpp
+++ b/lib/IR/Verifier.cpp
@@ -65,7 +65,6 @@
 #include "llvm/IR/Attributes.h"
 #include "llvm/IR/BasicBlock.h"
 #include "llvm/IR/CFG.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/CallingConv.h"
 #include "llvm/IR/Comdat.h"
 #include "llvm/IR/Constant.h"
@@ -140,19 +139,18 @@
   }
 
   void Write(const Value *V) {
-    if (!V)
-      return;
-    if (isa<Instruction>(V)) {
-      V->print(*OS, MST);
-      *OS << '\n';
-    } else {
-      V->printAsOperand(*OS, true, MST);
-      *OS << '\n';
-    }
+    if (V)
+      Write(*V);
   }
 
-  void Write(ImmutableCallSite CS) {
-    Write(CS.getInstruction());
+  void Write(const Value &V) {
+    if (isa<Instruction>(V)) {
+      V.print(*OS, MST);
+      *OS << '\n';
+    } else {
+      V.printAsOperand(*OS, true, MST);
+      *OS << '\n';
+    }
   }
 
   void Write(const Metadata *MD) {
@@ -448,6 +446,7 @@
   void visitBitCastInst(BitCastInst &I);
   void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
   void visitPHINode(PHINode &PN);
+  void visitCallBase(CallBase &Call);
   void visitUnaryOperator(UnaryOperator &U);
   void visitBinaryOperator(BinaryOperator &B);
   void visitICmpInst(ICmpInst &IC);
@@ -471,7 +470,7 @@
   void visitSelectInst(SelectInst &SI);
   void visitUserOp1(Instruction &I);
   void visitUserOp2(Instruction &I) { visitUserOp1(I); }
-  void visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS);
+  void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
   void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
   void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
   void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
@@ -491,8 +490,7 @@
   void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
   void visitCleanupReturnInst(CleanupReturnInst &CRI);
 
-  void verifyCallSite(CallSite CS);
-  void verifySwiftErrorCallSite(CallSite CS, const Value *SwiftErrorVal);
+  void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
   void verifySwiftErrorValue(const Value *SwiftErrorVal);
   void verifyMustTailCall(CallInst &CI);
   bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
@@ -507,7 +505,7 @@
 
   void visitConstantExprsRecursively(const Constant *EntryC);
   void visitConstantExpr(const ConstantExpr *CE);
-  void verifyStatepoint(ImmutableCallSite CS);
+  void verifyStatepoint(const CallBase &Call);
   void verifyFrameRecoverIndices();
   void verifySiblingFuncletUnwinds();
 
@@ -1243,6 +1241,8 @@
   AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
   AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
            "local variable requires a valid scope", &N, N.getRawScope());
+  if (auto Ty = N.getType())
+    AssertDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
 }
 
 void Verifier::visitDILabel(const DILabel &N) {
@@ -1893,134 +1893,136 @@
 }
 
 /// Verify that statepoint intrinsic is well formed.
-void Verifier::verifyStatepoint(ImmutableCallSite CS) {
-  assert(CS.getCalledFunction() &&
-         CS.getCalledFunction()->getIntrinsicID() ==
-           Intrinsic::experimental_gc_statepoint);
+void Verifier::verifyStatepoint(const CallBase &Call) {
+  assert(Call.getCalledFunction() &&
+         Call.getCalledFunction()->getIntrinsicID() ==
+             Intrinsic::experimental_gc_statepoint);
 
-  const Instruction &CI = *CS.getInstruction();
-
-  Assert(!CS.doesNotAccessMemory() && !CS.onlyReadsMemory() &&
-         !CS.onlyAccessesArgMemory(),
+  Assert(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
+             !Call.onlyAccessesArgMemory(),
          "gc.statepoint must read and write all memory to preserve "
          "reordering restrictions required by safepoint semantics",
-         &CI);
+         Call);
 
-  const Value *IDV = CS.getArgument(0);
+  const Value *IDV = Call.getArgOperand(0);
   Assert(isa<ConstantInt>(IDV), "gc.statepoint ID must be a constant integer",
-         &CI);
+         Call);
 
-  const Value *NumPatchBytesV = CS.getArgument(1);
+  const Value *NumPatchBytesV = Call.getArgOperand(1);
   Assert(isa<ConstantInt>(NumPatchBytesV),
          "gc.statepoint number of patchable bytes must be a constant integer",
-         &CI);
+         Call);
   const int64_t NumPatchBytes =
       cast<ConstantInt>(NumPatchBytesV)->getSExtValue();
   assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
-  Assert(NumPatchBytes >= 0, "gc.statepoint number of patchable bytes must be "
-                             "positive",
-         &CI);
+  Assert(NumPatchBytes >= 0,
+         "gc.statepoint number of patchable bytes must be "
+         "positive",
+         Call);
 
-  const Value *Target = CS.getArgument(2);
+  const Value *Target = Call.getArgOperand(2);
   auto *PT = dyn_cast<PointerType>(Target->getType());
   Assert(PT && PT->getElementType()->isFunctionTy(),
-         "gc.statepoint callee must be of function pointer type", &CI, Target);
+         "gc.statepoint callee must be of function pointer type", Call, Target);
   FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
 
-  const Value *NumCallArgsV = CS.getArgument(3);
+  const Value *NumCallArgsV = Call.getArgOperand(3);
   Assert(isa<ConstantInt>(NumCallArgsV),
          "gc.statepoint number of arguments to underlying call "
          "must be constant integer",
-         &CI);
+         Call);
   const int NumCallArgs = cast<ConstantInt>(NumCallArgsV)->getZExtValue();
   Assert(NumCallArgs >= 0,
          "gc.statepoint number of arguments to underlying call "
          "must be positive",
-         &CI);
+         Call);
   const int NumParams = (int)TargetFuncType->getNumParams();
   if (TargetFuncType->isVarArg()) {
     Assert(NumCallArgs >= NumParams,
-           "gc.statepoint mismatch in number of vararg call args", &CI);
+           "gc.statepoint mismatch in number of vararg call args", Call);
 
     // TODO: Remove this limitation
     Assert(TargetFuncType->getReturnType()->isVoidTy(),
            "gc.statepoint doesn't support wrapping non-void "
            "vararg functions yet",
-           &CI);
+           Call);
   } else
     Assert(NumCallArgs == NumParams,
-           "gc.statepoint mismatch in number of call args", &CI);
+           "gc.statepoint mismatch in number of call args", Call);
 
-  const Value *FlagsV = CS.getArgument(4);
+  const Value *FlagsV = Call.getArgOperand(4);
   Assert(isa<ConstantInt>(FlagsV),
-         "gc.statepoint flags must be constant integer", &CI);
+         "gc.statepoint flags must be constant integer", Call);
   const uint64_t Flags = cast<ConstantInt>(FlagsV)->getZExtValue();
   Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
-         "unknown flag used in gc.statepoint flags argument", &CI);
+         "unknown flag used in gc.statepoint flags argument", Call);
 
   // Verify that the types of the call parameter arguments match
   // the type of the wrapped callee.
-  AttributeList Attrs = CS.getAttributes();
+  AttributeList Attrs = Call.getAttributes();
   for (int i = 0; i < NumParams; i++) {
     Type *ParamType = TargetFuncType->getParamType(i);
-    Type *ArgType = CS.getArgument(5 + i)->getType();
+    Type *ArgType = Call.getArgOperand(5 + i)->getType();
     Assert(ArgType == ParamType,
            "gc.statepoint call argument does not match wrapped "
            "function type",
-           &CI);
+           Call);
 
     if (TargetFuncType->isVarArg()) {
       AttributeSet ArgAttrs = Attrs.getParamAttributes(5 + i);
       Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
-             "Attribute 'sret' cannot be used for vararg call arguments!", &CI);
+             "Attribute 'sret' cannot be used for vararg call arguments!",
+             Call);
     }
   }
 
   const int EndCallArgsInx = 4 + NumCallArgs;
 
-  const Value *NumTransitionArgsV = CS.getArgument(EndCallArgsInx+1);
+  const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
   Assert(isa<ConstantInt>(NumTransitionArgsV),
          "gc.statepoint number of transition arguments "
          "must be constant integer",
-         &CI);
+         Call);
   const int NumTransitionArgs =
       cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
   Assert(NumTransitionArgs >= 0,
-         "gc.statepoint number of transition arguments must be positive", &CI);
+         "gc.statepoint number of transition arguments must be positive", Call);
   const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
 
-  const Value *NumDeoptArgsV = CS.getArgument(EndTransitionArgsInx+1);
+  const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
   Assert(isa<ConstantInt>(NumDeoptArgsV),
          "gc.statepoint number of deoptimization arguments "
          "must be constant integer",
-         &CI);
+         Call);
   const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
-  Assert(NumDeoptArgs >= 0, "gc.statepoint number of deoptimization arguments "
-                            "must be positive",
-         &CI);
+  Assert(NumDeoptArgs >= 0,
+         "gc.statepoint number of deoptimization arguments "
+         "must be positive",
+         Call);
 
   const int ExpectedNumArgs =
       7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs;
-  Assert(ExpectedNumArgs <= (int)CS.arg_size(),
-         "gc.statepoint too few arguments according to length fields", &CI);
+  Assert(ExpectedNumArgs <= (int)Call.arg_size(),
+         "gc.statepoint too few arguments according to length fields", Call);
 
   // Check that the only uses of this gc.statepoint are gc.result or
   // gc.relocate calls which are tied to this statepoint and thus part
   // of the same statepoint sequence
-  for (const User *U : CI.users()) {
-    const CallInst *Call = dyn_cast<const CallInst>(U);
-    Assert(Call, "illegal use of statepoint token", &CI, U);
-    if (!Call) continue;
-    Assert(isa<GCRelocateInst>(Call) || isa<GCResultInst>(Call),
+  for (const User *U : Call.users()) {
+    const CallInst *UserCall = dyn_cast<const CallInst>(U);
+    Assert(UserCall, "illegal use of statepoint token", Call, U);
+    if (!UserCall)
+      continue;
+    Assert(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
            "gc.result or gc.relocate are the only value uses "
            "of a gc.statepoint",
-           &CI, U);
-    if (isa<GCResultInst>(Call)) {
-      Assert(Call->getArgOperand(0) == &CI,
-             "gc.result connected to wrong gc.statepoint", &CI, Call);
+           Call, U);
+    if (isa<GCResultInst>(UserCall)) {
+      Assert(UserCall->getArgOperand(0) == &Call,
+             "gc.result connected to wrong gc.statepoint", Call, UserCall);
     } else if (isa<GCRelocateInst>(Call)) {
-      Assert(Call->getArgOperand(0) == &CI,
-             "gc.relocate connected to wrong gc.statepoint", &CI, Call);
+      Assert(UserCall->getArgOperand(0) == &Call,
+             "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
     }
   }
 
@@ -2745,77 +2747,79 @@
   visitInstruction(PN);
 }
 
-void Verifier::verifyCallSite(CallSite CS) {
-  Instruction *I = CS.getInstruction();
-
-  Assert(CS.getCalledValue()->getType()->isPointerTy(),
-         "Called function must be a pointer!", I);
-  PointerType *FPTy = cast<PointerType>(CS.getCalledValue()->getType());
+void Verifier::visitCallBase(CallBase &Call) {
+  Assert(Call.getCalledValue()->getType()->isPointerTy(),
+         "Called function must be a pointer!", Call);
+  PointerType *FPTy = cast<PointerType>(Call.getCalledValue()->getType());
 
   Assert(FPTy->getElementType()->isFunctionTy(),
-         "Called function is not pointer to function type!", I);
+         "Called function is not pointer to function type!", Call);
 
-  Assert(FPTy->getElementType() == CS.getFunctionType(),
-         "Called function is not the same type as the call!", I);
+  Assert(FPTy->getElementType() == Call.getFunctionType(),
+         "Called function is not the same type as the call!", Call);
 
-  FunctionType *FTy = CS.getFunctionType();
+  FunctionType *FTy = Call.getFunctionType();
 
   // Verify that the correct number of arguments are being passed
   if (FTy->isVarArg())
-    Assert(CS.arg_size() >= FTy->getNumParams(),
-           "Called function requires more parameters than were provided!", I);
+    Assert(Call.arg_size() >= FTy->getNumParams(),
+           "Called function requires more parameters than were provided!",
+           Call);
   else
-    Assert(CS.arg_size() == FTy->getNumParams(),
-           "Incorrect number of arguments passed to called function!", I);
+    Assert(Call.arg_size() == FTy->getNumParams(),
+           "Incorrect number of arguments passed to called function!", Call);
 
   // Verify that all arguments to the call match the function type.
   for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
-    Assert(CS.getArgument(i)->getType() == FTy->getParamType(i),
+    Assert(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
            "Call parameter type does not match function signature!",
-           CS.getArgument(i), FTy->getParamType(i), I);
+           Call.getArgOperand(i), FTy->getParamType(i), Call);
 
-  AttributeList Attrs = CS.getAttributes();
+  AttributeList Attrs = Call.getAttributes();
 
-  Assert(verifyAttributeCount(Attrs, CS.arg_size()),
-         "Attribute after last parameter!", I);
+  Assert(verifyAttributeCount(Attrs, Call.arg_size()),
+         "Attribute after last parameter!", Call);
 
   if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) {
     // Don't allow speculatable on call sites, unless the underlying function
     // declaration is also speculatable.
-    Function *Callee
-      = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
+    Function *Callee =
+        dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts());
     Assert(Callee && Callee->isSpeculatable(),
-           "speculatable attribute may not apply to call sites", I);
+           "speculatable attribute may not apply to call sites", Call);
   }
 
   // Verify call attributes.
-  verifyFunctionAttrs(FTy, Attrs, I);
+  verifyFunctionAttrs(FTy, Attrs, &Call);
 
   // Conservatively check the inalloca argument.
   // We have a bug if we can find that there is an underlying alloca without
   // inalloca.
-  if (CS.hasInAllocaArgument()) {
-    Value *InAllocaArg = CS.getArgument(FTy->getNumParams() - 1);
+  if (Call.hasInAllocaArgument()) {
+    Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
     if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
       Assert(AI->isUsedWithInAlloca(),
-             "inalloca argument for call has mismatched alloca", AI, I);
+             "inalloca argument for call has mismatched alloca", AI, Call);
   }
 
   // For each argument of the callsite, if it has the swifterror argument,
   // make sure the underlying alloca/parameter it comes from has a swifterror as
   // well.
   for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
-    if (CS.paramHasAttr(i, Attribute::SwiftError)) {
-      Value *SwiftErrorArg = CS.getArgument(i);
+    if (Call.paramHasAttr(i, Attribute::SwiftError)) {
+      Value *SwiftErrorArg = Call.getArgOperand(i);
       if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
         Assert(AI->isSwiftError(),
-               "swifterror argument for call has mismatched alloca", AI, I);
+               "swifterror argument for call has mismatched alloca", AI, Call);
         continue;
       }
       auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
-      Assert(ArgI, "swifterror argument should come from an alloca or parameter", SwiftErrorArg, I);
+      Assert(ArgI,
+             "swifterror argument should come from an alloca or parameter",
+             SwiftErrorArg, Call);
       Assert(ArgI->hasSwiftErrorAttr(),
-             "swifterror argument for call has mismatched parameter", ArgI, I);
+             "swifterror argument for call has mismatched parameter", ArgI,
+             Call);
     }
 
   if (FTy->isVarArg()) {
@@ -2831,95 +2835,97 @@
     }
 
     // Check attributes on the varargs part.
-    for (unsigned Idx = FTy->getNumParams(); Idx < CS.arg_size(); ++Idx) {
-      Type *Ty = CS.getArgument(Idx)->getType();
+    for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
+      Type *Ty = Call.getArgOperand(Idx)->getType();
       AttributeSet ArgAttrs = Attrs.getParamAttributes(Idx);
-      verifyParameterAttrs(ArgAttrs, Ty, I);
+      verifyParameterAttrs(ArgAttrs, Ty, &Call);
 
       if (ArgAttrs.hasAttribute(Attribute::Nest)) {
-        Assert(!SawNest, "More than one parameter has attribute nest!", I);
+        Assert(!SawNest, "More than one parameter has attribute nest!", Call);
         SawNest = true;
       }
 
       if (ArgAttrs.hasAttribute(Attribute::Returned)) {
         Assert(!SawReturned, "More than one parameter has attribute returned!",
-               I);
+               Call);
         Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
                "Incompatible argument and return types for 'returned' "
                "attribute",
-               I);
+               Call);
         SawReturned = true;
       }
 
       // Statepoint intrinsic is vararg but the wrapped function may be not.
       // Allow sret here and check the wrapped function in verifyStatepoint.
-      if (CS.getCalledFunction() == nullptr ||
-          CS.getCalledFunction()->getIntrinsicID() !=
-            Intrinsic::experimental_gc_statepoint)
+      if (!Call.getCalledFunction() ||
+          Call.getCalledFunction()->getIntrinsicID() !=
+              Intrinsic::experimental_gc_statepoint)
         Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
-               "Attribute 'sret' cannot be used for vararg call arguments!", I);
+               "Attribute 'sret' cannot be used for vararg call arguments!",
+               Call);
 
       if (ArgAttrs.hasAttribute(Attribute::InAlloca))
-        Assert(Idx == CS.arg_size() - 1, "inalloca isn't on the last argument!",
-               I);
+        Assert(Idx == Call.arg_size() - 1,
+               "inalloca isn't on the last argument!", Call);
     }
   }
 
   // Verify that there's no metadata unless it's a direct call to an intrinsic.
-  if (CS.getCalledFunction() == nullptr ||
-      !CS.getCalledFunction()->getName().startswith("llvm.")) {
+  if (!Call.getCalledFunction() ||
+      !Call.getCalledFunction()->getName().startswith("llvm.")) {
     for (Type *ParamTy : FTy->params()) {
       Assert(!ParamTy->isMetadataTy(),
-             "Function has metadata parameter but isn't an intrinsic", I);
+             "Function has metadata parameter but isn't an intrinsic", Call);
       Assert(!ParamTy->isTokenTy(),
-             "Function has token parameter but isn't an intrinsic", I);
+             "Function has token parameter but isn't an intrinsic", Call);
     }
   }
 
   // Verify that indirect calls don't return tokens.
-  if (CS.getCalledFunction() == nullptr)
+  if (!Call.getCalledFunction())
     Assert(!FTy->getReturnType()->isTokenTy(),
            "Return type cannot be token for indirect call!");
 
-  if (Function *F = CS.getCalledFunction())
+  if (Function *F = Call.getCalledFunction())
     if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
-      visitIntrinsicCallSite(ID, CS);
+      visitIntrinsicCall(ID, Call);
 
   // Verify that a callsite has at most one "deopt", at most one "funclet" and
   // at most one "gc-transition" operand bundle.
   bool FoundDeoptBundle = false, FoundFuncletBundle = false,
        FoundGCTransitionBundle = false;
-  for (unsigned i = 0, e = CS.getNumOperandBundles(); i < e; ++i) {
-    OperandBundleUse BU = CS.getOperandBundleAt(i);
+  for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
+    OperandBundleUse BU = Call.getOperandBundleAt(i);
     uint32_t Tag = BU.getTagID();
     if (Tag == LLVMContext::OB_deopt) {
-      Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", I);
+      Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
       FoundDeoptBundle = true;
     } else if (Tag == LLVMContext::OB_gc_transition) {
       Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
-             I);
+             Call);
       FoundGCTransitionBundle = true;
     } else if (Tag == LLVMContext::OB_funclet) {
-      Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", I);
+      Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
       FoundFuncletBundle = true;
       Assert(BU.Inputs.size() == 1,
-             "Expected exactly one funclet bundle operand", I);
+             "Expected exactly one funclet bundle operand", Call);
       Assert(isa<FuncletPadInst>(BU.Inputs.front()),
              "Funclet bundle operands should correspond to a FuncletPadInst",
-             I);
+             Call);
     }
   }
 
   // Verify that each inlinable callsite of a debug-info-bearing function in a
   // debug-info-bearing function has a debug location attached to it. Failure to
   // do so causes assertion failures when the inliner sets up inline scope info.
-  if (I->getFunction()->getSubprogram() && CS.getCalledFunction() &&
-      CS.getCalledFunction()->getSubprogram())
-    AssertDI(I->getDebugLoc(), "inlinable function call in a function with "
-                               "debug info must have a !dbg location",
-             I);
+  if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
+      Call.getCalledFunction()->getSubprogram())
+    AssertDI(Call.getDebugLoc(),
+             "inlinable function call in a function with "
+             "debug info must have a !dbg location",
+             Call);
 
-  visitInstruction(*I);
+  visitInstruction(Call);
 }
 
 /// Two types are "congruent" if they are identical, or if they are both pointer
@@ -3014,14 +3020,14 @@
 }
 
 void Verifier::visitCallInst(CallInst &CI) {
-  verifyCallSite(&CI);
+  visitCallBase(CI);
 
   if (CI.isMustTailCall())
     verifyMustTailCall(CI);
 }
 
 void Verifier::visitInvokeInst(InvokeInst &II) {
-  verifyCallSite(&II);
+  visitCallBase(II);
 
   // Verify that the first non-PHI instruction of the unwind destination is an
   // exception handling instruction.
@@ -3330,16 +3336,15 @@
 }
 
 /// Check that SwiftErrorVal is used as a swifterror argument in CS.
-void Verifier::verifySwiftErrorCallSite(CallSite CS,
-                                        const Value *SwiftErrorVal) {
+void Verifier::verifySwiftErrorCall(CallBase &Call,
+                                    const Value *SwiftErrorVal) {
   unsigned Idx = 0;
-  for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
-       I != E; ++I, ++Idx) {
+  for (auto I = Call.arg_begin(), E = Call.arg_end(); I != E; ++I, ++Idx) {
     if (*I == SwiftErrorVal) {
-      Assert(CS.paramHasAttr(Idx, Attribute::SwiftError),
+      Assert(Call.paramHasAttr(Idx, Attribute::SwiftError),
              "swifterror value when used in a callsite should be marked "
              "with swifterror attribute",
-              SwiftErrorVal, CS);
+             SwiftErrorVal, Call);
     }
   }
 }
@@ -3358,10 +3363,8 @@
       Assert(StoreI->getOperand(1) == SwiftErrorVal,
              "swifterror value should be the second operand when used "
              "by stores", SwiftErrorVal, U);
-    if (auto CallI = dyn_cast<CallInst>(U))
-      verifySwiftErrorCallSite(const_cast<CallInst*>(CallI), SwiftErrorVal);
-    if (auto II = dyn_cast<InvokeInst>(U))
-      verifySwiftErrorCallSite(const_cast<InvokeInst*>(II), SwiftErrorVal);
+    if (auto *Call = dyn_cast<CallBase>(U))
+      verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
   }
 }
 
@@ -4078,8 +4081,8 @@
 }
 
 /// Allow intrinsics to be verified in different ways.
-void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
-  Function *IF = CS.getCalledFunction();
+void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
+  Function *IF = Call.getCalledFunction();
   Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
          IF);
 
@@ -4125,15 +4128,15 @@
 
   // If the intrinsic takes MDNode arguments, verify that they are either global
   // or are local to *this* function.
-  for (Value *V : CS.args())
+  for (Value *V : Call.args())
     if (auto *MD = dyn_cast<MetadataAsValue>(V))
-      visitMetadataAsValue(*MD, CS.getCaller());
+      visitMetadataAsValue(*MD, Call.getCaller());
 
   switch (ID) {
   default:
     break;
   case Intrinsic::coro_id: {
-    auto *InfoArg = CS.getArgOperand(3)->stripPointerCasts();
+    auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
     if (isa<ConstantPointerNull>(InfoArg))
       break;
     auto *GV = dyn_cast<GlobalVariable>(InfoArg);
@@ -4148,10 +4151,10 @@
   }
   case Intrinsic::ctlz:  // llvm.ctlz
   case Intrinsic::cttz:  // llvm.cttz
-    Assert(isa<ConstantInt>(CS.getArgOperand(1)),
+    Assert(isa<ConstantInt>(Call.getArgOperand(1)),
            "is_zero_undef argument of bit counting intrinsics must be a "
            "constant int",
-           CS);
+           Call);
     break;
   case Intrinsic::experimental_constrained_fadd:
   case Intrinsic::experimental_constrained_fsub:
@@ -4177,59 +4180,58 @@
   case Intrinsic::experimental_constrained_floor:
   case Intrinsic::experimental_constrained_round:
   case Intrinsic::experimental_constrained_trunc:
-    visitConstrainedFPIntrinsic(
-        cast<ConstrainedFPIntrinsic>(*CS.getInstruction()));
+    visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
     break;
   case Intrinsic::dbg_declare: // llvm.dbg.declare
-    Assert(isa<MetadataAsValue>(CS.getArgOperand(0)),
-           "invalid llvm.dbg.declare intrinsic call 1", CS);
-    visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(*CS.getInstruction()));
+    Assert(isa<MetadataAsValue>(Call.getArgOperand(0)),
+           "invalid llvm.dbg.declare intrinsic call 1", Call);
+    visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
     break;
   case Intrinsic::dbg_addr: // llvm.dbg.addr
-    visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(*CS.getInstruction()));
+    visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(Call));
     break;
   case Intrinsic::dbg_value: // llvm.dbg.value
-    visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(*CS.getInstruction()));
+    visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
     break;
   case Intrinsic::dbg_label: // llvm.dbg.label
-    visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(*CS.getInstruction()));
+    visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
     break;
   case Intrinsic::memcpy:
   case Intrinsic::memmove:
   case Intrinsic::memset: {
-    const auto *MI = cast<MemIntrinsic>(CS.getInstruction());
+    const auto *MI = cast<MemIntrinsic>(&Call);
     auto IsValidAlignment = [&](unsigned Alignment) -> bool {
       return Alignment == 0 || isPowerOf2_32(Alignment);
     };
     Assert(IsValidAlignment(MI->getDestAlignment()),
            "alignment of arg 0 of memory intrinsic must be 0 or a power of 2",
-           CS);
+           Call);
     if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
       Assert(IsValidAlignment(MTI->getSourceAlignment()),
              "alignment of arg 1 of memory intrinsic must be 0 or a power of 2",
-             CS);
+             Call);
     }
-    Assert(isa<ConstantInt>(CS.getArgOperand(3)),
+    Assert(isa<ConstantInt>(Call.getArgOperand(3)),
            "isvolatile argument of memory intrinsics must be a constant int",
-           CS);
+           Call);
     break;
   }
   case Intrinsic::memcpy_element_unordered_atomic:
   case Intrinsic::memmove_element_unordered_atomic:
   case Intrinsic::memset_element_unordered_atomic: {
-    const auto *AMI = cast<AtomicMemIntrinsic>(CS.getInstruction());
+    const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
 
     ConstantInt *ElementSizeCI =
         dyn_cast<ConstantInt>(AMI->getRawElementSizeInBytes());
     Assert(ElementSizeCI,
            "element size of the element-wise unordered atomic memory "
            "intrinsic must be a constant int",
-           CS);
+           Call);
     const APInt &ElementSizeVal = ElementSizeCI->getValue();
     Assert(ElementSizeVal.isPowerOf2(),
            "element size of the element-wise atomic memory intrinsic "
            "must be a power of 2",
-           CS);
+           Call);
 
     if (auto *LengthCI = dyn_cast<ConstantInt>(AMI->getLength())) {
       uint64_t Length = LengthCI->getZExtValue();
@@ -4237,7 +4239,7 @@
       Assert((Length % ElementSize) == 0,
              "constant length must be a multiple of the element size in the "
              "element-wise atomic memory intrinsic",
-             CS);
+             Call);
     }
 
     auto IsValidAlignment = [&](uint64_t Alignment) {
@@ -4245,11 +4247,11 @@
     };
     uint64_t DstAlignment = AMI->getDestAlignment();
     Assert(IsValidAlignment(DstAlignment),
-           "incorrect alignment of the destination argument", CS);
+           "incorrect alignment of the destination argument", Call);
     if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
       uint64_t SrcAlignment = AMT->getSourceAlignment();
       Assert(IsValidAlignment(SrcAlignment),
-             "incorrect alignment of the source argument", CS);
+             "incorrect alignment of the source argument", Call);
     }
     break;
   }
@@ -4258,76 +4260,76 @@
   case Intrinsic::gcread:
     if (ID == Intrinsic::gcroot) {
       AllocaInst *AI =
-        dyn_cast<AllocaInst>(CS.getArgOperand(0)->stripPointerCasts());
-      Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", CS);
-      Assert(isa<Constant>(CS.getArgOperand(1)),
-             "llvm.gcroot parameter #2 must be a constant.", CS);
+          dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
+      Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
+      Assert(isa<Constant>(Call.getArgOperand(1)),
+             "llvm.gcroot parameter #2 must be a constant.", Call);
       if (!AI->getAllocatedType()->isPointerTy()) {
-        Assert(!isa<ConstantPointerNull>(CS.getArgOperand(1)),
+        Assert(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
                "llvm.gcroot parameter #1 must either be a pointer alloca, "
                "or argument #2 must be a non-null constant.",
-               CS);
+               Call);
       }
     }
 
-    Assert(CS.getParent()->getParent()->hasGC(),
-           "Enclosing function does not use GC.", CS);
+    Assert(Call.getParent()->getParent()->hasGC(),
+           "Enclosing function does not use GC.", Call);
     break;
   case Intrinsic::init_trampoline:
-    Assert(isa<Function>(CS.getArgOperand(1)->stripPointerCasts()),
+    Assert(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
            "llvm.init_trampoline parameter #2 must resolve to a function.",
-           CS);
+           Call);
     break;
   case Intrinsic::prefetch:
-    Assert(isa<ConstantInt>(CS.getArgOperand(1)) &&
-               isa<ConstantInt>(CS.getArgOperand(2)) &&
-               cast<ConstantInt>(CS.getArgOperand(1))->getZExtValue() < 2 &&
-               cast<ConstantInt>(CS.getArgOperand(2))->getZExtValue() < 4,
-           "invalid arguments to llvm.prefetch", CS);
+    Assert(isa<ConstantInt>(Call.getArgOperand(1)) &&
+               isa<ConstantInt>(Call.getArgOperand(2)) &&
+               cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2 &&
+               cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
+           "invalid arguments to llvm.prefetch", Call);
     break;
   case Intrinsic::stackprotector:
-    Assert(isa<AllocaInst>(CS.getArgOperand(1)->stripPointerCasts()),
-           "llvm.stackprotector parameter #2 must resolve to an alloca.", CS);
+    Assert(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
+           "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
     break;
   case Intrinsic::lifetime_start:
   case Intrinsic::lifetime_end:
   case Intrinsic::invariant_start:
-    Assert(isa<ConstantInt>(CS.getArgOperand(0)),
+    Assert(isa<ConstantInt>(Call.getArgOperand(0)),
            "size argument of memory use markers must be a constant integer",
-           CS);
+           Call);
     break;
   case Intrinsic::invariant_end:
-    Assert(isa<ConstantInt>(CS.getArgOperand(1)),
-           "llvm.invariant.end parameter #2 must be a constant integer", CS);
+    Assert(isa<ConstantInt>(Call.getArgOperand(1)),
+           "llvm.invariant.end parameter #2 must be a constant integer", Call);
     break;
 
   case Intrinsic::localescape: {
-    BasicBlock *BB = CS.getParent();
+    BasicBlock *BB = Call.getParent();
     Assert(BB == &BB->getParent()->front(),
-           "llvm.localescape used outside of entry block", CS);
+           "llvm.localescape used outside of entry block", Call);
     Assert(!SawFrameEscape,
-           "multiple calls to llvm.localescape in one function", CS);
-    for (Value *Arg : CS.args()) {
+           "multiple calls to llvm.localescape in one function", Call);
+    for (Value *Arg : Call.args()) {
       if (isa<ConstantPointerNull>(Arg))
         continue; // Null values are allowed as placeholders.
       auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
       Assert(AI && AI->isStaticAlloca(),
-             "llvm.localescape only accepts static allocas", CS);
+             "llvm.localescape only accepts static allocas", Call);
     }
-    FrameEscapeInfo[BB->getParent()].first = CS.getNumArgOperands();
+    FrameEscapeInfo[BB->getParent()].first = Call.getNumArgOperands();
     SawFrameEscape = true;
     break;
   }
   case Intrinsic::localrecover: {
-    Value *FnArg = CS.getArgOperand(0)->stripPointerCasts();
+    Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
     Function *Fn = dyn_cast<Function>(FnArg);
     Assert(Fn && !Fn->isDeclaration(),
            "llvm.localrecover first "
            "argument must be function defined in this module",
-           CS);
-    auto *IdxArg = dyn_cast<ConstantInt>(CS.getArgOperand(2));
+           Call);
+    auto *IdxArg = dyn_cast<ConstantInt>(Call.getArgOperand(2));
     Assert(IdxArg, "idx argument of llvm.localrecover must be a constant int",
-           CS);
+           Call);
     auto &Entry = FrameEscapeInfo[Fn];
     Entry.second = unsigned(
         std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
@@ -4335,45 +4337,46 @@
   }
 
   case Intrinsic::experimental_gc_statepoint:
-    Assert(!CS.isInlineAsm(),
-           "gc.statepoint support for inline assembly unimplemented", CS);
-    Assert(CS.getParent()->getParent()->hasGC(),
-           "Enclosing function does not use GC.", CS);
+    if (auto *CI = dyn_cast<CallInst>(&Call))
+      Assert(!CI->isInlineAsm(),
+             "gc.statepoint support for inline assembly unimplemented", CI);
+    Assert(Call.getParent()->getParent()->hasGC(),
+           "Enclosing function does not use GC.", Call);
 
-    verifyStatepoint(CS);
+    verifyStatepoint(Call);
     break;
   case Intrinsic::experimental_gc_result: {
-    Assert(CS.getParent()->getParent()->hasGC(),
-           "Enclosing function does not use GC.", CS);
+    Assert(Call.getParent()->getParent()->hasGC(),
+           "Enclosing function does not use GC.", Call);
     // Are we tied to a statepoint properly?
-    CallSite StatepointCS(CS.getArgOperand(0));
+    const auto *StatepointCall = dyn_cast<CallBase>(Call.getArgOperand(0));
     const Function *StatepointFn =
-      StatepointCS.getInstruction() ? StatepointCS.getCalledFunction() : nullptr;
+        StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
     Assert(StatepointFn && StatepointFn->isDeclaration() &&
                StatepointFn->getIntrinsicID() ==
                    Intrinsic::experimental_gc_statepoint,
-           "gc.result operand #1 must be from a statepoint", CS,
-           CS.getArgOperand(0));
+           "gc.result operand #1 must be from a statepoint", Call,
+           Call.getArgOperand(0));
 
     // Assert that result type matches wrapped callee.
-    const Value *Target = StatepointCS.getArgument(2);
+    const Value *Target = StatepointCall->getArgOperand(2);
     auto *PT = cast<PointerType>(Target->getType());
     auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
-    Assert(CS.getType() == TargetFuncType->getReturnType(),
-           "gc.result result type does not match wrapped callee", CS);
+    Assert(Call.getType() == TargetFuncType->getReturnType(),
+           "gc.result result type does not match wrapped callee", Call);
     break;
   }
   case Intrinsic::experimental_gc_relocate: {
-    Assert(CS.getNumArgOperands() == 3, "wrong number of arguments", CS);
+    Assert(Call.getNumArgOperands() == 3, "wrong number of arguments", Call);
 
-    Assert(isa<PointerType>(CS.getType()->getScalarType()),
-           "gc.relocate must return a pointer or a vector of pointers", CS);
+    Assert(isa<PointerType>(Call.getType()->getScalarType()),
+           "gc.relocate must return a pointer or a vector of pointers", Call);
 
     // Check that this relocate is correctly tied to the statepoint
 
     // This is case for relocate on the unwinding path of an invoke statepoint
     if (LandingPadInst *LandingPad =
-          dyn_cast<LandingPadInst>(CS.getArgOperand(0))) {
+            dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
 
       const BasicBlock *InvokeBB =
           LandingPad->getParent()->getUniquePredecessor();
@@ -4386,161 +4389,160 @@
              InvokeBB);
       Assert(isStatepoint(InvokeBB->getTerminator()),
              "gc relocate should be linked to a statepoint", InvokeBB);
-    }
-    else {
+    } else {
       // In all other cases relocate should be tied to the statepoint directly.
       // This covers relocates on a normal return path of invoke statepoint and
       // relocates of a call statepoint.
-      auto Token = CS.getArgOperand(0);
+      auto Token = Call.getArgOperand(0);
       Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)),
-             "gc relocate is incorrectly tied to the statepoint", CS, Token);
+             "gc relocate is incorrectly tied to the statepoint", Call, Token);
     }
 
     // Verify rest of the relocate arguments.
-
-    ImmutableCallSite StatepointCS(
-        cast<GCRelocateInst>(*CS.getInstruction()).getStatepoint());
+    const CallBase &StatepointCall =
+        *cast<CallBase>(cast<GCRelocateInst>(Call).getStatepoint());
 
     // Both the base and derived must be piped through the safepoint.
-    Value* Base = CS.getArgOperand(1);
+    Value *Base = Call.getArgOperand(1);
     Assert(isa<ConstantInt>(Base),
-           "gc.relocate operand #2 must be integer offset", CS);
+           "gc.relocate operand #2 must be integer offset", Call);
 
-    Value* Derived = CS.getArgOperand(2);
+    Value *Derived = Call.getArgOperand(2);
     Assert(isa<ConstantInt>(Derived),
-           "gc.relocate operand #3 must be integer offset", CS);
+           "gc.relocate operand #3 must be integer offset", Call);
 
     const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
     const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
     // Check the bounds
-    Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCS.arg_size(),
-           "gc.relocate: statepoint base index out of bounds", CS);
-    Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCS.arg_size(),
-           "gc.relocate: statepoint derived index out of bounds", CS);
+    Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCall.arg_size(),
+           "gc.relocate: statepoint base index out of bounds", Call);
+    Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCall.arg_size(),
+           "gc.relocate: statepoint derived index out of bounds", Call);
 
     // Check that BaseIndex and DerivedIndex fall within the 'gc parameters'
     // section of the statepoint's argument.
-    Assert(StatepointCS.arg_size() > 0,
+    Assert(StatepointCall.arg_size() > 0,
            "gc.statepoint: insufficient arguments");
-    Assert(isa<ConstantInt>(StatepointCS.getArgument(3)),
+    Assert(isa<ConstantInt>(StatepointCall.getArgOperand(3)),
            "gc.statement: number of call arguments must be constant integer");
     const unsigned NumCallArgs =
-        cast<ConstantInt>(StatepointCS.getArgument(3))->getZExtValue();
-    Assert(StatepointCS.arg_size() > NumCallArgs + 5,
+        cast<ConstantInt>(StatepointCall.getArgOperand(3))->getZExtValue();
+    Assert(StatepointCall.arg_size() > NumCallArgs + 5,
            "gc.statepoint: mismatch in number of call arguments");
-    Assert(isa<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5)),
+    Assert(isa<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5)),
            "gc.statepoint: number of transition arguments must be "
            "a constant integer");
     const int NumTransitionArgs =
-        cast<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5))
+        cast<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5))
             ->getZExtValue();
     const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1;
-    Assert(isa<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart)),
+    Assert(isa<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart)),
            "gc.statepoint: number of deoptimization arguments must be "
            "a constant integer");
     const int NumDeoptArgs =
-        cast<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart))
+        cast<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart))
             ->getZExtValue();
     const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs;
-    const int GCParamArgsEnd = StatepointCS.arg_size();
+    const int GCParamArgsEnd = StatepointCall.arg_size();
     Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd,
            "gc.relocate: statepoint base index doesn't fall within the "
            "'gc parameters' section of the statepoint call",
-           CS);
+           Call);
     Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd,
            "gc.relocate: statepoint derived index doesn't fall within the "
            "'gc parameters' section of the statepoint call",
-           CS);
+           Call);
 
     // Relocated value must be either a pointer type or vector-of-pointer type,
     // but gc_relocate does not need to return the same pointer type as the
     // relocated pointer. It can be casted to the correct type later if it's
     // desired. However, they must have the same address space and 'vectorness'
-    GCRelocateInst &Relocate = cast<GCRelocateInst>(*CS.getInstruction());
+    GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
     Assert(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
-           "gc.relocate: relocated value must be a gc pointer", CS);
+           "gc.relocate: relocated value must be a gc pointer", Call);
 
-    auto ResultType = CS.getType();
+    auto ResultType = Call.getType();
     auto DerivedType = Relocate.getDerivedPtr()->getType();
     Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
            "gc.relocate: vector relocates to vector and pointer to pointer",
-           CS);
+           Call);
     Assert(
         ResultType->getPointerAddressSpace() ==
             DerivedType->getPointerAddressSpace(),
         "gc.relocate: relocating a pointer shouldn't change its address space",
-        CS);
+        Call);
     break;
   }
   case Intrinsic::eh_exceptioncode:
   case Intrinsic::eh_exceptionpointer: {
-    Assert(isa<CatchPadInst>(CS.getArgOperand(0)),
-           "eh.exceptionpointer argument must be a catchpad", CS);
+    Assert(isa<CatchPadInst>(Call.getArgOperand(0)),
+           "eh.exceptionpointer argument must be a catchpad", Call);
     break;
   }
   case Intrinsic::masked_load: {
-    Assert(CS.getType()->isVectorTy(), "masked_load: must return a vector", CS);
+    Assert(Call.getType()->isVectorTy(), "masked_load: must return a vector",
+           Call);
 
-    Value *Ptr = CS.getArgOperand(0);
-    //Value *Alignment = CS.getArgOperand(1);
-    Value *Mask = CS.getArgOperand(2);
-    Value *PassThru = CS.getArgOperand(3);
-    Assert(Mask->getType()->isVectorTy(),
-           "masked_load: mask must be vector", CS);
+    Value *Ptr = Call.getArgOperand(0);
+    // Value *Alignment = Call.getArgOperand(1);
+    Value *Mask = Call.getArgOperand(2);
+    Value *PassThru = Call.getArgOperand(3);
+    Assert(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
+           Call);
 
     // DataTy is the overloaded type
     Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
-    Assert(DataTy == CS.getType(),
-           "masked_load: return must match pointer type", CS);
+    Assert(DataTy == Call.getType(),
+           "masked_load: return must match pointer type", Call);
     Assert(PassThru->getType() == DataTy,
-           "masked_load: pass through and data type must match", CS);
+           "masked_load: pass through and data type must match", Call);
     Assert(Mask->getType()->getVectorNumElements() ==
-           DataTy->getVectorNumElements(),
-           "masked_load: vector mask must be same length as data", CS);
+               DataTy->getVectorNumElements(),
+           "masked_load: vector mask must be same length as data", Call);
     break;
   }
   case Intrinsic::masked_store: {
-    Value *Val = CS.getArgOperand(0);
-    Value *Ptr = CS.getArgOperand(1);
-    //Value *Alignment = CS.getArgOperand(2);
-    Value *Mask = CS.getArgOperand(3);
-    Assert(Mask->getType()->isVectorTy(),
-           "masked_store: mask must be vector", CS);
+    Value *Val = Call.getArgOperand(0);
+    Value *Ptr = Call.getArgOperand(1);
+    // Value *Alignment = Call.getArgOperand(2);
+    Value *Mask = Call.getArgOperand(3);
+    Assert(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
+           Call);
 
     // DataTy is the overloaded type
     Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
     Assert(DataTy == Val->getType(),
-           "masked_store: storee must match pointer type", CS);
+           "masked_store: storee must match pointer type", Call);
     Assert(Mask->getType()->getVectorNumElements() ==
-           DataTy->getVectorNumElements(),
-           "masked_store: vector mask must be same length as data", CS);
+               DataTy->getVectorNumElements(),
+           "masked_store: vector mask must be same length as data", Call);
     break;
   }
 
   case Intrinsic::experimental_guard: {
-    Assert(CS.isCall(), "experimental_guard cannot be invoked", CS);
-    Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
+    Assert(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
+    Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
            "experimental_guard must have exactly one "
            "\"deopt\" operand bundle");
     break;
   }
 
   case Intrinsic::experimental_deoptimize: {
-    Assert(CS.isCall(), "experimental_deoptimize cannot be invoked", CS);
-    Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
+    Assert(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
+           Call);
+    Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
            "experimental_deoptimize must have exactly one "
            "\"deopt\" operand bundle");
-    Assert(CS.getType() == CS.getInstruction()->getFunction()->getReturnType(),
+    Assert(Call.getType() == Call.getFunction()->getReturnType(),
            "experimental_deoptimize return type must match caller return type");
 
-    if (CS.isCall()) {
-      auto *DeoptCI = CS.getInstruction();
-      auto *RI = dyn_cast<ReturnInst>(DeoptCI->getNextNode());
+    if (isa<CallInst>(Call)) {
+      auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
       Assert(RI,
              "calls to experimental_deoptimize must be followed by a return");
 
-      if (!CS.getType()->isVoidTy() && RI)
-        Assert(RI->getReturnValue() == DeoptCI,
+      if (!Call.getType()->isVoidTy() && RI)
+        Assert(RI->getReturnValue() == &Call,
                "calls to experimental_deoptimize must be followed by a return "
                "of the value computed by experimental_deoptimize");
     }
@@ -4551,8 +4553,8 @@
   case Intrinsic::uadd_sat:
   case Intrinsic::ssub_sat:
   case Intrinsic::usub_sat: {
-    Value *Op1 = CS.getArgOperand(0);
-    Value *Op2 = CS.getArgOperand(1);
+    Value *Op1 = Call.getArgOperand(0);
+    Value *Op2 = Call.getArgOperand(1);
     Assert(Op1->getType()->isIntOrIntVectorTy(),
            "first operand of [us][add|sub]_sat must be an int type or vector "
            "of ints");
@@ -4562,8 +4564,8 @@
     break;
   }
   case Intrinsic::smul_fix: {
-    Value *Op1 = CS.getArgOperand(0);
-    Value *Op2 = CS.getArgOperand(1);
+    Value *Op1 = Call.getArgOperand(0);
+    Value *Op2 = Call.getArgOperand(1);
     Assert(Op1->getType()->isIntOrIntVectorTy(),
            "first operand of smul_fix must be an int type or vector "
            "of ints");
@@ -4571,7 +4573,7 @@
            "second operand of smul_fix must be an int type or vector "
            "of ints");
 
-    auto *Op3 = dyn_cast<ConstantInt>(CS.getArgOperand(2));
+    auto *Op3 = dyn_cast<ConstantInt>(Call.getArgOperand(2));
     Assert(Op3, "third argument of smul_fix must be a constant integer");
     Assert(Op3->getType()->getBitWidth() <= 32,
            "third argument of smul_fix must fit within 32 bits");
diff --git a/lib/LLVMBuild.txt b/lib/LLVMBuild.txt
index 2f6628e..d87bf79 100644
--- a/lib/LLVMBuild.txt
+++ b/lib/LLVMBuild.txt
@@ -31,6 +31,7 @@
  IRReader
  LTO
  MC
+ MCA
  Object
  BinaryFormat
  ObjectYAML
diff --git a/lib/LTO/LTO.cpp b/lib/LTO/LTO.cpp
index 08924fb..3a95506 100644
--- a/lib/LTO/LTO.cpp
+++ b/lib/LTO/LTO.cpp
@@ -546,6 +546,15 @@
   if (!LTOInfo)
     return LTOInfo.takeError();
 
+  if (EnableSplitLTOUnit.hasValue()) {
+    // If only some modules were split, flag this in the index so that
+    // we can skip or error on optimizations that need consistently split
+    // modules (whole program devirt and lower type tests).
+    if (EnableSplitLTOUnit.getValue() != LTOInfo->EnableSplitLTOUnit)
+      ThinLTO.CombinedIndex.setPartiallySplitLTOUnits();
+  } else
+    EnableSplitLTOUnit = LTOInfo->EnableSplitLTOUnit;
+
   BitcodeModule BM = Input.Mods[ModI];
   auto ModSyms = Input.module_symbols(ModI);
   addModuleToGlobalRes(ModSyms, {ResI, ResE},
@@ -693,8 +702,12 @@
       }
 
       // Set the 'local' flag based on the linker resolution for this symbol.
-      if (Res.FinalDefinitionInLinkageUnit)
+      if (Res.FinalDefinitionInLinkageUnit) {
         GV->setDSOLocal(true);
+        if (GV->hasDLLImportStorageClass())
+          GV->setDLLStorageClass(GlobalValue::DLLStorageClassTypes::
+                                 DefaultStorageClass);
+      }
     }
     // Common resolution: collect the maximum size/alignment over all commons.
     // We also record if we see an instance of a common as prevailing, so that
diff --git a/lib/LTO/SummaryBasedOptimizations.cpp b/lib/LTO/SummaryBasedOptimizations.cpp
index 8b1abb7..bcdd984 100644
--- a/lib/LTO/SummaryBasedOptimizations.cpp
+++ b/lib/LTO/SummaryBasedOptimizations.cpp
@@ -60,21 +60,27 @@
       return UINT64_C(0);
     }
   };
-  auto AddToEntryCount = [](ValueInfo V, uint64_t New) {
+  auto AddToEntryCount = [](ValueInfo V, Scaled64 New) {
     if (!V.getSummaryList().size())
       return;
     for (auto &GVS : V.getSummaryList()) {
       auto S = GVS.get()->getBaseObject();
       auto *F = cast<FunctionSummary>(S);
-      F->setEntryCount(SaturatingAdd(F->entryCount(), New));
+      F->setEntryCount(
+          SaturatingAdd(F->entryCount(), New.template toInt<uint64_t>()));
     }
   };
 
+  auto GetProfileCount = [&](ValueInfo V, FunctionSummary::EdgeTy &Edge) {
+    auto RelFreq = GetCallSiteRelFreq(Edge);
+    Scaled64 EC(GetEntryCount(V), 0);
+    return RelFreq * EC;
+  };
   // After initializing the counts in initializeCounts above, the counts have to
   // be propagated across the combined callgraph.
   // SyntheticCountsUtils::propagate takes care of this propagation on any
   // callgraph that specialized GraphTraits.
-  SyntheticCountsUtils<ModuleSummaryIndex *>::propagate(
-      &Index, GetCallSiteRelFreq, GetEntryCount, AddToEntryCount);
+  SyntheticCountsUtils<ModuleSummaryIndex *>::propagate(&Index, GetProfileCount,
+                                                        AddToEntryCount);
   Index.setHasSyntheticEntryCounts();
 }
diff --git a/lib/MC/MCAsmInfoCOFF.cpp b/lib/MC/MCAsmInfoCOFF.cpp
index d8fb875..15886eb 100644
--- a/lib/MC/MCAsmInfoCOFF.cpp
+++ b/lib/MC/MCAsmInfoCOFF.cpp
@@ -25,7 +25,7 @@
   COMMDirectiveAlignmentIsInBytes = false;
   LCOMMDirectiveAlignmentType = LCOMM::ByteAlignment;
   HasDotTypeDotSizeDirective = false;
-  HasSingleParameterDotFile = false;
+  HasSingleParameterDotFile = true;
   WeakRefDirective = "\t.weak\t";
   HasLinkOnceDirective = true;
 
diff --git a/lib/MC/MCAsmStreamer.cpp b/lib/MC/MCAsmStreamer.cpp
index 0daec98..e017103 100644
--- a/lib/MC/MCAsmStreamer.cpp
+++ b/lib/MC/MCAsmStreamer.cpp
@@ -266,6 +266,7 @@
   void EmitCVFPOData(const MCSymbol *ProcSym, SMLoc L) override;
 
   void EmitIdent(StringRef IdentString) override;
+  void EmitCFIBKeyFrame() override;
   void EmitCFISections(bool EH, bool Debug) override;
   void EmitCFIDefCfa(int64_t Register, int64_t Offset) override;
   void EmitCFIDefCfaOffset(int64_t Offset) override;
@@ -285,6 +286,7 @@
   void EmitCFIUndefined(int64_t Register) override;
   void EmitCFIRegister(int64_t Register1, int64_t Register2) override;
   void EmitCFIWindowSave() override;
+  void EmitCFINegateRAState() override;
   void EmitCFIReturnColumn(int64_t Register) override;
 
   void EmitWinCFIStartProc(const MCSymbol *Symbol, SMLoc Loc) override;
@@ -539,11 +541,14 @@
 
 static const char *getPlatformName(MachO::PlatformType Type) {
   switch (Type) {
-  case MachO::PLATFORM_MACOS:    return "macos";
-  case MachO::PLATFORM_IOS:      return "ios";
-  case MachO::PLATFORM_TVOS:     return "tvos";
-  case MachO::PLATFORM_WATCHOS:  return "watchos";
-  case MachO::PLATFORM_BRIDGEOS: return "bridgeos";
+  case MachO::PLATFORM_MACOS:            return "macos";
+  case MachO::PLATFORM_IOS:              return "ios";
+  case MachO::PLATFORM_TVOS:             return "tvos";
+  case MachO::PLATFORM_WATCHOS:          return "watchos";
+  case MachO::PLATFORM_BRIDGEOS:         return "bridgeos";
+  case MachO::PLATFORM_IOSSIMULATOR:     return "iossimulator";
+  case MachO::PLATFORM_TVOSSIMULATOR:    return "tvossimulator";
+  case MachO::PLATFORM_WATCHOSSIMULATOR: return "watchossimulator";
   }
   llvm_unreachable("Invalid Mach-O platform type");
 }
@@ -1586,12 +1591,24 @@
   EmitEOL();
 }
 
+void MCAsmStreamer::EmitCFINegateRAState() {
+  MCStreamer::EmitCFINegateRAState();
+  OS << "\t.cfi_negate_ra_state";
+  EmitEOL();
+}
+
 void MCAsmStreamer::EmitCFIReturnColumn(int64_t Register) {
   MCStreamer::EmitCFIReturnColumn(Register);
   OS << "\t.cfi_return_column " << Register;
   EmitEOL();
 }
 
+void MCAsmStreamer::EmitCFIBKeyFrame() {
+  MCStreamer::EmitCFIBKeyFrame();
+  OS << "\t.cfi_b_key_frame";
+  EmitEOL();
+}
+
 void MCAsmStreamer::EmitWinCFIStartProc(const MCSymbol *Symbol, SMLoc Loc) {
   MCStreamer::EmitWinCFIStartProc(Symbol, Loc);
 
diff --git a/lib/MC/MCCodeView.cpp b/lib/MC/MCCodeView.cpp
index 234b43e..978ac78 100644
--- a/lib/MC/MCCodeView.cpp
+++ b/lib/MC/MCCodeView.cpp
@@ -432,13 +432,13 @@
                                   OS.getCurrentSectionOnly());
 }
 
-void CodeViewContext::emitDefRange(
+MCFragment *CodeViewContext::emitDefRange(
     MCObjectStreamer &OS,
     ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
     StringRef FixedSizePortion) {
   // Create and insert a fragment into the current section that will be encoded
   // later.
-  new MCCVDefRangeFragment(Ranges, FixedSizePortion,
+  return new MCCVDefRangeFragment(Ranges, FixedSizePortion,
                            OS.getCurrentSectionOnly());
 }
 
diff --git a/lib/MC/MCDwarf.cpp b/lib/MC/MCDwarf.cpp
index 66dfe7b..38b0269 100644
--- a/lib/MC/MCDwarf.cpp
+++ b/lib/MC/MCDwarf.cpp
@@ -1332,6 +1332,10 @@
     Streamer.EmitIntValue(dwarf::DW_CFA_GNU_window_save, 1);
     return;
 
+  case MCCFIInstruction::OpNegateRAState:
+    Streamer.EmitIntValue(dwarf::DW_CFA_AARCH64_negate_ra_state, 1);
+    return;
+
   case MCCFIInstruction::OpUndefined: {
     unsigned Reg = Instr.getRegister();
     Streamer.EmitIntValue(dwarf::DW_CFA_undefined, 1);
@@ -1561,9 +1565,8 @@
   uint8_t CIEVersion = getCIEVersion(IsEH, context.getDwarfVersion());
   Streamer.EmitIntValue(CIEVersion, 1);
 
-  // Augmentation String
-  SmallString<8> Augmentation;
   if (IsEH) {
+    SmallString<8> Augmentation;
     Augmentation += "z";
     if (Frame.Personality)
       Augmentation += "P";
@@ -1572,6 +1575,8 @@
     Augmentation += "R";
     if (Frame.IsSignalFrame)
       Augmentation += "S";
+    if (Frame.IsBKeyFrame)
+      Augmentation += "B";
     Streamer.EmitBytes(Augmentation);
   }
   Streamer.EmitIntValue(0, 1);
@@ -1726,25 +1731,28 @@
 
 struct CIEKey {
   static const CIEKey getEmptyKey() {
-    return CIEKey(nullptr, 0, -1, false, false, static_cast<unsigned>(INT_MAX));
+    return CIEKey(nullptr, 0, -1, false, false, static_cast<unsigned>(INT_MAX),
+                  false);
   }
 
   static const CIEKey getTombstoneKey() {
-    return CIEKey(nullptr, -1, 0, false, false, static_cast<unsigned>(INT_MAX));
+    return CIEKey(nullptr, -1, 0, false, false, static_cast<unsigned>(INT_MAX),
+                  false);
   }
 
   CIEKey(const MCSymbol *Personality, unsigned PersonalityEncoding,
          unsigned LSDAEncoding, bool IsSignalFrame, bool IsSimple,
-         unsigned RAReg)
+         unsigned RAReg, bool IsBKeyFrame)
       : Personality(Personality), PersonalityEncoding(PersonalityEncoding),
         LsdaEncoding(LSDAEncoding), IsSignalFrame(IsSignalFrame),
-        IsSimple(IsSimple), RAReg(RAReg) {}
+        IsSimple(IsSimple), RAReg(RAReg), IsBKeyFrame(IsBKeyFrame) {}
 
   explicit CIEKey(const MCDwarfFrameInfo &Frame)
       : Personality(Frame.Personality),
         PersonalityEncoding(Frame.PersonalityEncoding),
         LsdaEncoding(Frame.LsdaEncoding), IsSignalFrame(Frame.IsSignalFrame),
-        IsSimple(Frame.IsSimple), RAReg(Frame.RAReg) {}
+        IsSimple(Frame.IsSimple), RAReg(Frame.RAReg),
+        IsBKeyFrame(Frame.IsBKeyFrame) {}
 
   const MCSymbol *Personality;
   unsigned PersonalityEncoding;
@@ -1752,6 +1760,7 @@
   bool IsSignalFrame;
   bool IsSimple;
   unsigned RAReg;
+  bool IsBKeyFrame;
 };
 
 } // end anonymous namespace
@@ -1763,9 +1772,9 @@
   static CIEKey getTombstoneKey() { return CIEKey::getTombstoneKey(); }
 
   static unsigned getHashValue(const CIEKey &Key) {
-    return static_cast<unsigned>(
-        hash_combine(Key.Personality, Key.PersonalityEncoding, Key.LsdaEncoding,
-                     Key.IsSignalFrame, Key.IsSimple, Key.RAReg));
+    return static_cast<unsigned>(hash_combine(
+        Key.Personality, Key.PersonalityEncoding, Key.LsdaEncoding,
+        Key.IsSignalFrame, Key.IsSimple, Key.RAReg, Key.IsBKeyFrame));
   }
 
   static bool isEqual(const CIEKey &LHS, const CIEKey &RHS) {
@@ -1773,8 +1782,8 @@
            LHS.PersonalityEncoding == RHS.PersonalityEncoding &&
            LHS.LsdaEncoding == RHS.LsdaEncoding &&
            LHS.IsSignalFrame == RHS.IsSignalFrame &&
-           LHS.IsSimple == RHS.IsSimple &&
-           LHS.RAReg == RHS.RAReg;
+           LHS.IsSimple == RHS.IsSimple && LHS.RAReg == RHS.RAReg &&
+           LHS.IsBKeyFrame == RHS.IsBKeyFrame;
   }
 };
 
diff --git a/lib/MC/MCObjectFileInfo.cpp b/lib/MC/MCObjectFileInfo.cpp
index be671af..9e35355 100644
--- a/lib/MC/MCObjectFileInfo.cpp
+++ b/lib/MC/MCObjectFileInfo.cpp
@@ -510,7 +510,7 @@
       ".rdata", COFF::IMAGE_SCN_CNT_INITIALIZED_DATA | COFF::IMAGE_SCN_MEM_READ,
       SectionKind::getReadOnly());
 
-  if (T.getArch() == Triple::x86_64) {
+  if (T.getArch() == Triple::x86_64 || T.getArch() == Triple::aarch64) {
     // On Windows 64 with SEH, the LSDA is emitted into the .xdata section
     LSDASection = nullptr;
   } else {
diff --git a/lib/MC/MCObjectStreamer.cpp b/lib/MC/MCObjectStreamer.cpp
index 248c5a1..6ec705b 100644
--- a/lib/MC/MCObjectStreamer.cpp
+++ b/lib/MC/MCObjectStreamer.cpp
@@ -497,7 +497,11 @@
 void MCObjectStreamer::EmitCVDefRangeDirective(
     ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
     StringRef FixedSizePortion) {
-  getContext().getCVContext().emitDefRange(*this, Ranges, FixedSizePortion);
+  MCFragment *Frag =
+      getContext().getCVContext().emitDefRange(*this, Ranges, FixedSizePortion);
+  // Attach labels that were pending before we created the defrange fragment to
+  // the beginning of the new fragment.
+  flushPendingLabels(Frag, 0);
   this->MCStreamer::EmitCVDefRangeDirective(Ranges, FixedSizePortion);
 }
 
diff --git a/lib/MC/MCParser/AsmParser.cpp b/lib/MC/MCParser/AsmParser.cpp
index aa07eee..cf42a6f 100644
--- a/lib/MC/MCParser/AsmParser.cpp
+++ b/lib/MC/MCParser/AsmParser.cpp
@@ -495,6 +495,7 @@
     DK_CFI_UNDEFINED,
     DK_CFI_REGISTER,
     DK_CFI_WINDOW_SAVE,
+    DK_CFI_B_KEY_FRAME,
     DK_MACROS_ON,
     DK_MACROS_OFF,
     DK_ALTMACRO,
@@ -898,6 +899,9 @@
       eatToEndOfStatement();
   }
 
+  getTargetParser().onEndOfFile();
+  printPendingErrors();
+
   // All errors should have been emitted.
   assert(!hasPendingError() && "unexpected error from parseStatement");
 
@@ -3359,9 +3363,12 @@
     }
   }
 
-  if (FileNumber == -1)
+  if (FileNumber == -1) {
+    if (!getContext().getAsmInfo()->hasSingleParameterDotFile())
+      return Error(DirectiveLoc,
+                   "target does not support '.file' without a number");
     getStreamer().EmitFileDirective(Filename);
-  else {
+  } else {
     // In case there is a -g option as well as debug info from directive .file,
     // we turn off the -g option, directly use the existing debug info instead.
     // Also reset any implicit ".file 0" for the assembler source.
@@ -5293,6 +5300,7 @@
   DirectiveKindMap[".cfi_undefined"] = DK_CFI_UNDEFINED;
   DirectiveKindMap[".cfi_register"] = DK_CFI_REGISTER;
   DirectiveKindMap[".cfi_window_save"] = DK_CFI_WINDOW_SAVE;
+  DirectiveKindMap[".cfi_b_key_frame"] = DK_CFI_B_KEY_FRAME;
   DirectiveKindMap[".macros_on"] = DK_MACROS_ON;
   DirectiveKindMap[".macros_off"] = DK_MACROS_OFF;
   DirectiveKindMap[".macro"] = DK_MACRO;
diff --git a/lib/MC/MCParser/DarwinAsmParser.cpp b/lib/MC/MCParser/DarwinAsmParser.cpp
index 07926d6c..cd99112 100644
--- a/lib/MC/MCParser/DarwinAsmParser.cpp
+++ b/lib/MC/MCParser/DarwinAsmParser.cpp
@@ -1148,7 +1148,10 @@
   case MachO::PLATFORM_IOS:     return Triple::IOS;
   case MachO::PLATFORM_TVOS:    return Triple::TvOS;
   case MachO::PLATFORM_WATCHOS: return Triple::WatchOS;
-  case MachO::PLATFORM_BRIDGEOS: /* silence warning */break;
+  case MachO::PLATFORM_BRIDGEOS:         /* silence warning */ break;
+  case MachO::PLATFORM_IOSSIMULATOR:     /* silence warning */ break;
+  case MachO::PLATFORM_TVOSSIMULATOR:    /* silence warning */ break;
+  case MachO::PLATFORM_WATCHOSSIMULATOR: /* silence warning */ break;
   }
   llvm_unreachable("Invalid mach-o platform type");
 }
diff --git a/lib/MC/MCStreamer.cpp b/lib/MC/MCStreamer.cpp
index 733dd40..6a8471b 100644
--- a/lib/MC/MCStreamer.cpp
+++ b/lib/MC/MCStreamer.cpp
@@ -221,6 +221,13 @@
                                       Source);
 }
 
+void MCStreamer::EmitCFIBKeyFrame() {
+  MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
+  if (!CurFrame)
+    return;
+  CurFrame->IsBKeyFrame = true;
+}
+
 void MCStreamer::EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
                                        unsigned Column, unsigned Flags,
                                        unsigned Isa,
@@ -577,6 +584,15 @@
   CurFrame->Instructions.push_back(Instruction);
 }
 
+void MCStreamer::EmitCFINegateRAState() {
+  MCSymbol *Label = EmitCFILabel();
+  MCCFIInstruction Instruction = MCCFIInstruction::createNegateRAState(Label);
+  MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
+  if (!CurFrame)
+    return;
+  CurFrame->Instructions.push_back(Instruction);
+}
+
 void MCStreamer::EmitCFIReturnColumn(int64_t Register) {
   MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
   if (!CurFrame)
@@ -849,13 +865,11 @@
   CurFrame->PrologEnd = Label;
 }
 
-void MCStreamer::EmitCOFFSafeSEH(MCSymbol const *Symbol) {
-}
+void MCStreamer::EmitCOFFSafeSEH(MCSymbol const *Symbol) {}
 
 void MCStreamer::EmitCOFFSymbolIndex(MCSymbol const *Symbol) {}
 
-void MCStreamer::EmitCOFFSectionIndex(MCSymbol const *Symbol) {
-}
+void MCStreamer::EmitCOFFSectionIndex(MCSymbol const *Symbol) {}
 
 void MCStreamer::EmitCOFFSecRel32(MCSymbol const *Symbol, uint64_t Offset) {}
 
@@ -865,9 +879,12 @@
 /// the specified string in the output .s file.  This capability is
 /// indicated by the hasRawTextSupport() predicate.
 void MCStreamer::EmitRawTextImpl(StringRef String) {
-  errs() << "EmitRawText called on an MCStreamer that doesn't support it, "
-  " something must not be fully mc'ized\n";
-  abort();
+  // This is not llvm_unreachable for the sake of out of tree backend
+  // developers who may not have assembly streamers and should serve as a
+  // reminder to not accidentally call EmitRawText in the absence of such.
+  report_fatal_error("EmitRawText called on an MCStreamer that doesn't support "
+                     "it (target backend is likely missing an AsmStreamer "
+                     "implementation)");
 }
 
 void MCStreamer::EmitRawText(const Twine &T) {
@@ -901,8 +918,9 @@
     TS->emitAssignment(Symbol, Value);
 }
 
-void MCTargetStreamer::prettyPrintAsm(MCInstPrinter &InstPrinter, raw_ostream &OS,
-                              const MCInst &Inst, const MCSubtargetInfo &STI) {
+void MCTargetStreamer::prettyPrintAsm(MCInstPrinter &InstPrinter,
+                                      raw_ostream &OS, const MCInst &Inst,
+                                      const MCSubtargetInfo &STI) {
   InstPrinter.printInst(&Inst, OS, "", STI);
 }
 
diff --git a/lib/MC/MCWin64EH.cpp b/lib/MC/MCWin64EH.cpp
index 5268d27..0724b10 100644
--- a/lib/MC/MCWin64EH.cpp
+++ b/lib/MC/MCWin64EH.cpp
@@ -468,10 +468,10 @@
   info->Symbol = Label;
 
   uint32_t FuncLength = 0x0;
-  FuncLength = (uint32_t)GetAbsDifference(streamer, info->FuncletOrFuncEnd,
-                                          info->Begin);
-  if (FuncLength)
-    FuncLength /= 4;
+  if (info->FuncletOrFuncEnd)
+    FuncLength = (uint32_t)GetAbsDifference(streamer, info->FuncletOrFuncEnd,
+                                            info->Begin);
+  FuncLength /= 4;
   uint32_t PrologCodeBytes = ARM64CountOfUnwindCodes(info->Instructions);
   uint32_t TotalCodeBytes = PrologCodeBytes;
 
diff --git a/lib/MC/WasmObjectWriter.cpp b/lib/MC/WasmObjectWriter.cpp
index 8a36920..0cca375 100644
--- a/lib/MC/WasmObjectWriter.cpp
+++ b/lib/MC/WasmObjectWriter.cpp
@@ -1176,7 +1176,7 @@
   TableImport.Module = TableSym->getModuleName();
   TableImport.Field = TableSym->getName();
   TableImport.Kind = wasm::WASM_EXTERNAL_TABLE;
-  TableImport.Table.ElemType = wasm::WASM_TYPE_ANYFUNC;
+  TableImport.Table.ElemType = wasm::WASM_TYPE_FUNCREF;
   Imports.push_back(TableImport);
 
   // Populate SignatureIndices, and Imports and WasmIndices for undefined
@@ -1256,7 +1256,7 @@
       Segment.Offset = DataSize;
       Segment.Section = &Section;
       addData(Segment.Data, Section);
-      Segment.Alignment = Section.getAlignment();
+      Segment.Alignment = Log2_32(Section.getAlignment());
       Segment.Flags = 0;
       DataSize += Segment.Data.size();
       Section.setSegmentIndex(SegmentIndex);
diff --git a/lib/MCA/CMakeLists.txt b/lib/MCA/CMakeLists.txt
new file mode 100644
index 0000000..bfd0782
--- /dev/null
+++ b/lib/MCA/CMakeLists.txt
@@ -0,0 +1,23 @@
+add_llvm_library(LLVMMCA
+  Context.cpp
+  HWEventListener.cpp
+  HardwareUnits/HardwareUnit.cpp
+  HardwareUnits/LSUnit.cpp
+  HardwareUnits/RegisterFile.cpp
+  HardwareUnits/ResourceManager.cpp
+  HardwareUnits/RetireControlUnit.cpp
+  HardwareUnits/Scheduler.cpp
+  InstrBuilder.cpp
+  Instruction.cpp
+  Pipeline.cpp
+  Stages/DispatchStage.cpp
+  Stages/EntryStage.cpp
+  Stages/ExecuteStage.cpp
+  Stages/InstructionTables.cpp
+  Stages/RetireStage.cpp
+  Stages/Stage.cpp
+  Support.cpp
+
+  ADDITIONAL_HEADER_DIRS
+  ${LLVM_MAIN_INCLUDE_DIR}/llvm/MCA
+  )
diff --git a/lib/MCA/Context.cpp b/lib/MCA/Context.cpp
new file mode 100644
index 0000000..c1b197d
--- /dev/null
+++ b/lib/MCA/Context.cpp
@@ -0,0 +1,65 @@
+//===---------------------------- Context.cpp -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a class for holding ownership of various simulated
+/// hardware units.  A Context also provides a utility routine for constructing
+/// a default out-of-order pipeline with fetch, dispatch, execute, and retire
+/// stages.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Context.h"
+#include "llvm/MCA/HardwareUnits/RegisterFile.h"
+#include "llvm/MCA/HardwareUnits/RetireControlUnit.h"
+#include "llvm/MCA/HardwareUnits/Scheduler.h"
+#include "llvm/MCA/Stages/DispatchStage.h"
+#include "llvm/MCA/Stages/EntryStage.h"
+#include "llvm/MCA/Stages/ExecuteStage.h"
+#include "llvm/MCA/Stages/RetireStage.h"
+
+namespace llvm {
+namespace mca {
+
+std::unique_ptr<Pipeline>
+Context::createDefaultPipeline(const PipelineOptions &Opts, InstrBuilder &IB,
+                               SourceMgr &SrcMgr) {
+  const MCSchedModel &SM = STI.getSchedModel();
+
+  // Create the hardware units defining the backend.
+  auto RCU = llvm::make_unique<RetireControlUnit>(SM);
+  auto PRF = llvm::make_unique<RegisterFile>(SM, MRI, Opts.RegisterFileSize);
+  auto LSU = llvm::make_unique<LSUnit>(SM, Opts.LoadQueueSize,
+                                       Opts.StoreQueueSize, Opts.AssumeNoAlias);
+  auto HWS = llvm::make_unique<Scheduler>(SM, *LSU);
+
+  // Create the pipeline stages.
+  auto Fetch = llvm::make_unique<EntryStage>(SrcMgr);
+  auto Dispatch = llvm::make_unique<DispatchStage>(STI, MRI, Opts.DispatchWidth,
+                                                   *RCU, *PRF);
+  auto Execute = llvm::make_unique<ExecuteStage>(*HWS);
+  auto Retire = llvm::make_unique<RetireStage>(*RCU, *PRF);
+
+  // Pass the ownership of all the hardware units to this Context.
+  addHardwareUnit(std::move(RCU));
+  addHardwareUnit(std::move(PRF));
+  addHardwareUnit(std::move(LSU));
+  addHardwareUnit(std::move(HWS));
+
+  // Build the pipeline.
+  auto StagePipeline = llvm::make_unique<Pipeline>();
+  StagePipeline->appendStage(std::move(Fetch));
+  StagePipeline->appendStage(std::move(Dispatch));
+  StagePipeline->appendStage(std::move(Execute));
+  StagePipeline->appendStage(std::move(Retire));
+  return StagePipeline;
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/HWEventListener.cpp b/lib/MCA/HWEventListener.cpp
new file mode 100644
index 0000000..4a0e5b1
--- /dev/null
+++ b/lib/MCA/HWEventListener.cpp
@@ -0,0 +1,23 @@
+//===----------------------- HWEventListener.cpp ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a vtable anchor for class HWEventListener.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/HWEventListener.h"
+
+namespace llvm {
+namespace mca {
+
+// Anchor the vtable here.
+void HWEventListener::anchor() {}
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/HardwareUnits/HardwareUnit.cpp b/lib/MCA/HardwareUnits/HardwareUnit.cpp
new file mode 100644
index 0000000..edd32b9
--- /dev/null
+++ b/lib/MCA/HardwareUnits/HardwareUnit.cpp
@@ -0,0 +1,25 @@
+//===------------------------- HardwareUnit.cpp -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the anchor for the base class that describes
+/// simulated hardware units.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
+
+namespace llvm {
+namespace mca {
+
+// Pin the vtable with this method.
+HardwareUnit::~HardwareUnit() = default;
+
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/HardwareUnits/LSUnit.cpp b/lib/MCA/HardwareUnits/LSUnit.cpp
new file mode 100644
index 0000000..8895eb3
--- /dev/null
+++ b/lib/MCA/HardwareUnits/LSUnit.cpp
@@ -0,0 +1,190 @@
+//===----------------------- LSUnit.cpp --------------------------*- C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// A Load-Store Unit for the llvm-mca tool.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/HardwareUnits/LSUnit.h"
+#include "llvm/MCA/Instruction.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "llvm-mca"
+
+namespace llvm {
+namespace mca {
+
+LSUnit::LSUnit(const MCSchedModel &SM, unsigned LQ, unsigned SQ,
+               bool AssumeNoAlias)
+    : LQ_Size(LQ), SQ_Size(SQ), NoAlias(AssumeNoAlias) {
+  if (SM.hasExtraProcessorInfo()) {
+    const MCExtraProcessorInfo &EPI = SM.getExtraProcessorInfo();
+    if (!LQ_Size && EPI.LoadQueueID) {
+      const MCProcResourceDesc &LdQDesc = *SM.getProcResource(EPI.LoadQueueID);
+      LQ_Size = LdQDesc.BufferSize;
+    }
+
+    if (!SQ_Size && EPI.StoreQueueID) {
+      const MCProcResourceDesc &StQDesc = *SM.getProcResource(EPI.StoreQueueID);
+      SQ_Size = StQDesc.BufferSize;
+    }
+  }
+}
+
+#ifndef NDEBUG
+void LSUnit::dump() const {
+  dbgs() << "[LSUnit] LQ_Size = " << LQ_Size << '\n';
+  dbgs() << "[LSUnit] SQ_Size = " << SQ_Size << '\n';
+  dbgs() << "[LSUnit] NextLQSlotIdx = " << LoadQueue.size() << '\n';
+  dbgs() << "[LSUnit] NextSQSlotIdx = " << StoreQueue.size() << '\n';
+}
+#endif
+
+void LSUnit::assignLQSlot(unsigned Index) {
+  assert(!isLQFull());
+  assert(LoadQueue.count(Index) == 0);
+
+  LLVM_DEBUG(dbgs() << "[LSUnit] - AssignLQSlot <Idx=" << Index
+                    << ",slot=" << LoadQueue.size() << ">\n");
+  LoadQueue.insert(Index);
+}
+
+void LSUnit::assignSQSlot(unsigned Index) {
+  assert(!isSQFull());
+  assert(StoreQueue.count(Index) == 0);
+
+  LLVM_DEBUG(dbgs() << "[LSUnit] - AssignSQSlot <Idx=" << Index
+                    << ",slot=" << StoreQueue.size() << ">\n");
+  StoreQueue.insert(Index);
+}
+
+void LSUnit::dispatch(const InstRef &IR) {
+  const InstrDesc &Desc = IR.getInstruction()->getDesc();
+  unsigned IsMemBarrier = Desc.HasSideEffects;
+  assert((Desc.MayLoad || Desc.MayStore) && "Not a memory operation!");
+
+  const unsigned Index = IR.getSourceIndex();
+  if (Desc.MayLoad) {
+    if (IsMemBarrier)
+      LoadBarriers.insert(Index);
+    assignLQSlot(Index);
+  }
+
+  if (Desc.MayStore) {
+    if (IsMemBarrier)
+      StoreBarriers.insert(Index);
+    assignSQSlot(Index);
+  }
+}
+
+LSUnit::Status LSUnit::isAvailable(const InstRef &IR) const {
+  const InstrDesc &Desc = IR.getInstruction()->getDesc();
+  if (Desc.MayLoad && isLQFull())
+    return LSUnit::LSU_LQUEUE_FULL;
+  if (Desc.MayStore && isSQFull())
+    return LSUnit::LSU_SQUEUE_FULL;
+  return LSUnit::LSU_AVAILABLE;
+}
+
+bool LSUnit::isReady(const InstRef &IR) const {
+  const InstrDesc &Desc = IR.getInstruction()->getDesc();
+  const unsigned Index = IR.getSourceIndex();
+  bool IsALoad = Desc.MayLoad;
+  bool IsAStore = Desc.MayStore;
+  assert((IsALoad || IsAStore) && "Not a memory operation!");
+  assert((!IsALoad || LoadQueue.count(Index) == 1) && "Load not in queue!");
+  assert((!IsAStore || StoreQueue.count(Index) == 1) && "Store not in queue!");
+
+  if (IsALoad && !LoadBarriers.empty()) {
+    unsigned LoadBarrierIndex = *LoadBarriers.begin();
+    // A younger load cannot pass a older load barrier.
+    if (Index > LoadBarrierIndex)
+      return false;
+    // A load barrier cannot pass a older load.
+    if (Index == LoadBarrierIndex && Index != *LoadQueue.begin())
+      return false;
+  }
+
+  if (IsAStore && !StoreBarriers.empty()) {
+    unsigned StoreBarrierIndex = *StoreBarriers.begin();
+    // A younger store cannot pass a older store barrier.
+    if (Index > StoreBarrierIndex)
+      return false;
+    // A store barrier cannot pass a older store.
+    if (Index == StoreBarrierIndex && Index != *StoreQueue.begin())
+      return false;
+  }
+
+  // A load may not pass a previous store unless flag 'NoAlias' is set.
+  // A load may pass a previous load.
+  if (NoAlias && IsALoad)
+    return true;
+
+  if (StoreQueue.size()) {
+    // A load may not pass a previous store.
+    // A store may not pass a previous store.
+    if (Index > *StoreQueue.begin())
+      return false;
+  }
+
+  // Okay, we are older than the oldest store in the queue.
+  // If there are no pending loads, then we can say for sure that this
+  // instruction is ready.
+  if (isLQEmpty())
+    return true;
+
+  // Check if there are no older loads.
+  if (Index <= *LoadQueue.begin())
+    return true;
+
+  // There is at least one younger load.
+  //
+  // A store may not pass a previous load.
+  // A load may pass a previous load.
+  return !IsAStore;
+}
+
+void LSUnit::onInstructionExecuted(const InstRef &IR) {
+  const InstrDesc &Desc = IR.getInstruction()->getDesc();
+  const unsigned Index = IR.getSourceIndex();
+  bool IsALoad = Desc.MayLoad;
+  bool IsAStore = Desc.MayStore;
+
+  if (IsALoad) {
+    if (LoadQueue.erase(Index)) {
+      LLVM_DEBUG(dbgs() << "[LSUnit]: Instruction idx=" << Index
+                        << " has been removed from the load queue.\n");
+    }
+    if (!LoadBarriers.empty() && Index == *LoadBarriers.begin()) {
+      LLVM_DEBUG(
+          dbgs() << "[LSUnit]: Instruction idx=" << Index
+                 << " has been removed from the set of load barriers.\n");
+      LoadBarriers.erase(Index);
+    }
+  }
+
+  if (IsAStore) {
+    if (StoreQueue.erase(Index)) {
+      LLVM_DEBUG(dbgs() << "[LSUnit]: Instruction idx=" << Index
+                        << " has been removed from the store queue.\n");
+    }
+
+    if (!StoreBarriers.empty() && Index == *StoreBarriers.begin()) {
+      LLVM_DEBUG(
+          dbgs() << "[LSUnit]: Instruction idx=" << Index
+                 << " has been removed from the set of store barriers.\n");
+      StoreBarriers.erase(Index);
+    }
+  }
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/HardwareUnits/RegisterFile.cpp b/lib/MCA/HardwareUnits/RegisterFile.cpp
new file mode 100644
index 0000000..22977e5
--- /dev/null
+++ b/lib/MCA/HardwareUnits/RegisterFile.cpp
@@ -0,0 +1,491 @@
+//===--------------------- RegisterFile.cpp ---------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a register mapping file class.  This class is responsible
+/// for managing hardware register files and the tracking of data dependencies
+/// between registers.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/HardwareUnits/RegisterFile.h"
+#include "llvm/MCA/Instruction.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "llvm-mca"
+
+namespace llvm {
+namespace mca {
+
+RegisterFile::RegisterFile(const MCSchedModel &SM, const MCRegisterInfo &mri,
+                           unsigned NumRegs)
+    : MRI(mri),
+      RegisterMappings(mri.getNumRegs(), {WriteRef(), RegisterRenamingInfo()}),
+      ZeroRegisters(mri.getNumRegs(), false) {
+  initialize(SM, NumRegs);
+}
+
+void RegisterFile::initialize(const MCSchedModel &SM, unsigned NumRegs) {
+  // Create a default register file that "sees" all the machine registers
+  // declared by the target. The number of physical registers in the default
+  // register file is set equal to `NumRegs`. A value of zero for `NumRegs`
+  // means: this register file has an unbounded number of physical registers.
+  RegisterFiles.emplace_back(NumRegs);
+  if (!SM.hasExtraProcessorInfo())
+    return;
+
+  // For each user defined register file, allocate a RegisterMappingTracker
+  // object. The size of every register file, as well as the mapping between
+  // register files and register classes is specified via tablegen.
+  const MCExtraProcessorInfo &Info = SM.getExtraProcessorInfo();
+
+  // Skip invalid register file at index 0.
+  for (unsigned I = 1, E = Info.NumRegisterFiles; I < E; ++I) {
+    const MCRegisterFileDesc &RF = Info.RegisterFiles[I];
+    assert(RF.NumPhysRegs && "Invalid PRF with zero physical registers!");
+
+    // The cost of a register definition is equivalent to the number of
+    // physical registers that are allocated at register renaming stage.
+    unsigned Length = RF.NumRegisterCostEntries;
+    const MCRegisterCostEntry *FirstElt =
+        &Info.RegisterCostTable[RF.RegisterCostEntryIdx];
+    addRegisterFile(RF, ArrayRef<MCRegisterCostEntry>(FirstElt, Length));
+  }
+}
+
+void RegisterFile::cycleStart() {
+  for (RegisterMappingTracker &RMT : RegisterFiles)
+    RMT.NumMoveEliminated = 0;
+}
+
+void RegisterFile::addRegisterFile(const MCRegisterFileDesc &RF,
+                                   ArrayRef<MCRegisterCostEntry> Entries) {
+  // A default register file is always allocated at index #0. That register file
+  // is mainly used to count the total number of mappings created by all
+  // register files at runtime. Users can limit the number of available physical
+  // registers in register file #0 through the command line flag
+  // `-register-file-size`.
+  unsigned RegisterFileIndex = RegisterFiles.size();
+  RegisterFiles.emplace_back(RF.NumPhysRegs, RF.MaxMovesEliminatedPerCycle,
+                             RF.AllowZeroMoveEliminationOnly);
+
+  // Special case where there is no register class identifier in the set.
+  // An empty set of register classes means: this register file contains all
+  // the physical registers specified by the target.
+  // We optimistically assume that a register can be renamed at the cost of a
+  // single physical register. The constructor of RegisterFile ensures that
+  // a RegisterMapping exists for each logical register defined by the Target.
+  if (Entries.empty())
+    return;
+
+  // Now update the cost of individual registers.
+  for (const MCRegisterCostEntry &RCE : Entries) {
+    const MCRegisterClass &RC = MRI.getRegClass(RCE.RegisterClassID);
+    for (const MCPhysReg Reg : RC) {
+      RegisterRenamingInfo &Entry = RegisterMappings[Reg].second;
+      IndexPlusCostPairTy &IPC = Entry.IndexPlusCost;
+      if (IPC.first && IPC.first != RegisterFileIndex) {
+        // The only register file that is allowed to overlap is the default
+        // register file at index #0. The analysis is inaccurate if register
+        // files overlap.
+        errs() << "warning: register " << MRI.getName(Reg)
+               << " defined in multiple register files.";
+      }
+      IPC = std::make_pair(RegisterFileIndex, RCE.Cost);
+      Entry.RenameAs = Reg;
+      Entry.AllowMoveElimination = RCE.AllowMoveElimination;
+
+      // Assume the same cost for each sub-register.
+      for (MCSubRegIterator I(Reg, &MRI); I.isValid(); ++I) {
+        RegisterRenamingInfo &OtherEntry = RegisterMappings[*I].second;
+        if (!OtherEntry.IndexPlusCost.first &&
+            (!OtherEntry.RenameAs ||
+             MRI.isSuperRegister(*I, OtherEntry.RenameAs))) {
+          OtherEntry.IndexPlusCost = IPC;
+          OtherEntry.RenameAs = Reg;
+        }
+      }
+    }
+  }
+}
+
+void RegisterFile::allocatePhysRegs(const RegisterRenamingInfo &Entry,
+                                    MutableArrayRef<unsigned> UsedPhysRegs) {
+  unsigned RegisterFileIndex = Entry.IndexPlusCost.first;
+  unsigned Cost = Entry.IndexPlusCost.second;
+  if (RegisterFileIndex) {
+    RegisterMappingTracker &RMT = RegisterFiles[RegisterFileIndex];
+    RMT.NumUsedPhysRegs += Cost;
+    UsedPhysRegs[RegisterFileIndex] += Cost;
+  }
+
+  // Now update the default register mapping tracker.
+  RegisterFiles[0].NumUsedPhysRegs += Cost;
+  UsedPhysRegs[0] += Cost;
+}
+
+void RegisterFile::freePhysRegs(const RegisterRenamingInfo &Entry,
+                                MutableArrayRef<unsigned> FreedPhysRegs) {
+  unsigned RegisterFileIndex = Entry.IndexPlusCost.first;
+  unsigned Cost = Entry.IndexPlusCost.second;
+  if (RegisterFileIndex) {
+    RegisterMappingTracker &RMT = RegisterFiles[RegisterFileIndex];
+    RMT.NumUsedPhysRegs -= Cost;
+    FreedPhysRegs[RegisterFileIndex] += Cost;
+  }
+
+  // Now update the default register mapping tracker.
+  RegisterFiles[0].NumUsedPhysRegs -= Cost;
+  FreedPhysRegs[0] += Cost;
+}
+
+void RegisterFile::addRegisterWrite(WriteRef Write,
+                                    MutableArrayRef<unsigned> UsedPhysRegs) {
+  WriteState &WS = *Write.getWriteState();
+  unsigned RegID = WS.getRegisterID();
+  assert(RegID && "Adding an invalid register definition?");
+
+  LLVM_DEBUG({
+    dbgs() << "RegisterFile: addRegisterWrite [ " << Write.getSourceIndex()
+           << ", " << MRI.getName(RegID) << "]\n";
+  });
+
+  // If RenameAs is equal to RegID, then RegID is subject to register renaming
+  // and false dependencies on RegID are all eliminated.
+
+  // If RenameAs references the invalid register, then we optimistically assume
+  // that it can be renamed. In the absence of tablegen descriptors for register
+  // files, RenameAs is always set to the invalid register ID.  In all other
+  // cases, RenameAs must be either equal to RegID, or it must reference a
+  // super-register of RegID.
+
+  // If RenameAs is a super-register of RegID, then a write to RegID has always
+  // a false dependency on RenameAs. The only exception is for when the write
+  // implicitly clears the upper portion of the underlying register.
+  // If a write clears its super-registers, then it is renamed as `RenameAs`.
+  bool IsWriteZero = WS.isWriteZero();
+  bool IsEliminated = WS.isEliminated();
+  bool ShouldAllocatePhysRegs = !IsWriteZero && !IsEliminated;
+  const RegisterRenamingInfo &RRI = RegisterMappings[RegID].second;
+  WS.setPRF(RRI.IndexPlusCost.first);
+
+  if (RRI.RenameAs && RRI.RenameAs != RegID) {
+    RegID = RRI.RenameAs;
+    WriteRef &OtherWrite = RegisterMappings[RegID].first;
+
+    if (!WS.clearsSuperRegisters()) {
+      // The processor keeps the definition of `RegID` together with register
+      // `RenameAs`. Since this partial write is not renamed, no physical
+      // register is allocated.
+      ShouldAllocatePhysRegs = false;
+
+      WriteState *OtherWS = OtherWrite.getWriteState();
+      if (OtherWS && (OtherWrite.getSourceIndex() != Write.getSourceIndex())) {
+        // This partial write has a false dependency on RenameAs.
+        assert(!IsEliminated && "Unexpected partial update!");
+        OtherWS->addUser(&WS);
+      }
+    }
+  }
+
+  // Update zero registers.
+  unsigned ZeroRegisterID =
+      WS.clearsSuperRegisters() ? RegID : WS.getRegisterID();
+  if (IsWriteZero) {
+    ZeroRegisters.setBit(ZeroRegisterID);
+    for (MCSubRegIterator I(ZeroRegisterID, &MRI); I.isValid(); ++I)
+      ZeroRegisters.setBit(*I);
+  } else {
+    ZeroRegisters.clearBit(ZeroRegisterID);
+    for (MCSubRegIterator I(ZeroRegisterID, &MRI); I.isValid(); ++I)
+      ZeroRegisters.clearBit(*I);
+  }
+
+  // If this is move has been eliminated, then the call to tryEliminateMove
+  // should have already updated all the register mappings.
+  if (!IsEliminated) {
+    // Update the mapping for register RegID including its sub-registers.
+    RegisterMappings[RegID].first = Write;
+    RegisterMappings[RegID].second.AliasRegID = 0U;
+    for (MCSubRegIterator I(RegID, &MRI); I.isValid(); ++I) {
+      RegisterMappings[*I].first = Write;
+      RegisterMappings[*I].second.AliasRegID = 0U;
+    }
+
+    // No physical registers are allocated for instructions that are optimized
+    // in hardware. For example, zero-latency data-dependency breaking
+    // instructions don't consume physical registers.
+    if (ShouldAllocatePhysRegs)
+      allocatePhysRegs(RegisterMappings[RegID].second, UsedPhysRegs);
+  }
+
+  if (!WS.clearsSuperRegisters())
+    return;
+
+  for (MCSuperRegIterator I(RegID, &MRI); I.isValid(); ++I) {
+    if (!IsEliminated) {
+      RegisterMappings[*I].first = Write;
+      RegisterMappings[*I].second.AliasRegID = 0U;
+    }
+
+    if (IsWriteZero)
+      ZeroRegisters.setBit(*I);
+    else
+      ZeroRegisters.clearBit(*I);
+  }
+}
+
+void RegisterFile::removeRegisterWrite(
+    const WriteState &WS, MutableArrayRef<unsigned> FreedPhysRegs) {
+  // Early exit if this write was eliminated. A write eliminated at register
+  // renaming stage generates an alias, and it is not added to the PRF.
+  if (WS.isEliminated())
+    return;
+
+  unsigned RegID = WS.getRegisterID();
+
+  assert(RegID != 0 && "Invalidating an already invalid register?");
+  assert(WS.getCyclesLeft() != UNKNOWN_CYCLES &&
+         "Invalidating a write of unknown cycles!");
+  assert(WS.getCyclesLeft() <= 0 && "Invalid cycles left for this write!");
+
+  bool ShouldFreePhysRegs = !WS.isWriteZero();
+  unsigned RenameAs = RegisterMappings[RegID].second.RenameAs;
+  if (RenameAs && RenameAs != RegID) {
+    RegID = RenameAs;
+
+    if (!WS.clearsSuperRegisters()) {
+      // Keep the definition of `RegID` together with register `RenameAs`.
+      ShouldFreePhysRegs = false;
+    }
+  }
+
+  if (ShouldFreePhysRegs)
+    freePhysRegs(RegisterMappings[RegID].second, FreedPhysRegs);
+
+  WriteRef &WR = RegisterMappings[RegID].first;
+  if (WR.getWriteState() == &WS)
+    WR.invalidate();
+
+  for (MCSubRegIterator I(RegID, &MRI); I.isValid(); ++I) {
+    WriteRef &OtherWR = RegisterMappings[*I].first;
+    if (OtherWR.getWriteState() == &WS)
+      OtherWR.invalidate();
+  }
+
+  if (!WS.clearsSuperRegisters())
+    return;
+
+  for (MCSuperRegIterator I(RegID, &MRI); I.isValid(); ++I) {
+    WriteRef &OtherWR = RegisterMappings[*I].first;
+    if (OtherWR.getWriteState() == &WS)
+      OtherWR.invalidate();
+  }
+}
+
+bool RegisterFile::tryEliminateMove(WriteState &WS, ReadState &RS) {
+  const RegisterMapping &RMFrom = RegisterMappings[RS.getRegisterID()];
+  const RegisterMapping &RMTo = RegisterMappings[WS.getRegisterID()];
+
+  // From and To must be owned by the same PRF.
+  const RegisterRenamingInfo &RRIFrom = RMFrom.second;
+  const RegisterRenamingInfo &RRITo = RMTo.second;
+  unsigned RegisterFileIndex = RRIFrom.IndexPlusCost.first;
+  if (RegisterFileIndex != RRITo.IndexPlusCost.first)
+    return false;
+
+  // We only allow move elimination for writes that update a full physical
+  // register. On X86, move elimination is possible with 32-bit general purpose
+  // registers because writes to those registers are not partial writes.  If a
+  // register move is a partial write, then we conservatively assume that move
+  // elimination fails, since it would either trigger a partial update, or the
+  // issue of a merge opcode.
+  //
+  // Note that this constraint may be lifted in future.  For example, we could
+  // make this model more flexible, and let users customize the set of registers
+  // (i.e. register classes) that allow move elimination.
+  //
+  // For now, we assume that there is a strong correlation between registers
+  // that allow move elimination, and how those same registers are renamed in
+  // hardware.
+  if (RRITo.RenameAs && RRITo.RenameAs != WS.getRegisterID()) {
+    // Early exit if the PRF doesn't support move elimination for this register.
+    if (!RegisterMappings[RRITo.RenameAs].second.AllowMoveElimination)
+      return false;
+    if (!WS.clearsSuperRegisters())
+      return false;
+  }
+
+  RegisterMappingTracker &RMT = RegisterFiles[RegisterFileIndex];
+  if (RMT.MaxMoveEliminatedPerCycle &&
+      RMT.NumMoveEliminated == RMT.MaxMoveEliminatedPerCycle)
+    return false;
+
+  bool IsZeroMove = ZeroRegisters[RS.getRegisterID()];
+  if (RMT.AllowZeroMoveEliminationOnly && !IsZeroMove)
+    return false;
+
+  MCPhysReg FromReg = RS.getRegisterID();
+  MCPhysReg ToReg = WS.getRegisterID();
+
+  // Construct an alias.
+  MCPhysReg AliasReg = FromReg;
+  if (RRIFrom.RenameAs)
+    AliasReg = RRIFrom.RenameAs;
+
+  const RegisterRenamingInfo &RMAlias = RegisterMappings[AliasReg].second;
+  if (RMAlias.AliasRegID)
+    AliasReg = RMAlias.AliasRegID;
+
+  if (AliasReg != ToReg) {
+    RegisterMappings[ToReg].second.AliasRegID = AliasReg;
+    for (MCSubRegIterator I(ToReg, &MRI); I.isValid(); ++I)
+      RegisterMappings[*I].second.AliasRegID = AliasReg;
+  }
+
+  RMT.NumMoveEliminated++;
+  if (IsZeroMove) {
+    WS.setWriteZero();
+    RS.setReadZero();
+  }
+  WS.setEliminated();
+
+  return true;
+}
+
+void RegisterFile::collectWrites(const ReadState &RS,
+                                 SmallVectorImpl<WriteRef> &Writes) const {
+  unsigned RegID = RS.getRegisterID();
+  assert(RegID && RegID < RegisterMappings.size());
+  LLVM_DEBUG(dbgs() << "RegisterFile: collecting writes for register "
+                    << MRI.getName(RegID) << '\n');
+
+  // Check if this is an alias.
+  const RegisterRenamingInfo &RRI = RegisterMappings[RegID].second;
+  if (RRI.AliasRegID)
+    RegID = RRI.AliasRegID;
+
+  const WriteRef &WR = RegisterMappings[RegID].first;
+  if (WR.isValid())
+    Writes.push_back(WR);
+
+  // Handle potential partial register updates.
+  for (MCSubRegIterator I(RegID, &MRI); I.isValid(); ++I) {
+    const WriteRef &WR = RegisterMappings[*I].first;
+    if (WR.isValid())
+      Writes.push_back(WR);
+  }
+
+  // Remove duplicate entries and resize the input vector.
+  if (Writes.size() > 1) {
+    sort(Writes, [](const WriteRef &Lhs, const WriteRef &Rhs) {
+      return Lhs.getWriteState() < Rhs.getWriteState();
+    });
+    auto It = std::unique(Writes.begin(), Writes.end());
+    Writes.resize(std::distance(Writes.begin(), It));
+  }
+
+  LLVM_DEBUG({
+    for (const WriteRef &WR : Writes) {
+      const WriteState &WS = *WR.getWriteState();
+      dbgs() << "[PRF] Found a dependent use of Register "
+             << MRI.getName(WS.getRegisterID()) << " (defined by instruction #"
+             << WR.getSourceIndex() << ")\n";
+    }
+  });
+}
+
+void RegisterFile::addRegisterRead(ReadState &RS,
+                                   SmallVectorImpl<WriteRef> &Defs) const {
+  unsigned RegID = RS.getRegisterID();
+  const RegisterRenamingInfo &RRI = RegisterMappings[RegID].second;
+  RS.setPRF(RRI.IndexPlusCost.first);
+  if (RS.isIndependentFromDef())
+    return;
+
+  if (ZeroRegisters[RS.getRegisterID()])
+    RS.setReadZero();
+  collectWrites(RS, Defs);
+  RS.setDependentWrites(Defs.size());
+}
+
+unsigned RegisterFile::isAvailable(ArrayRef<unsigned> Regs) const {
+  SmallVector<unsigned, 4> NumPhysRegs(getNumRegisterFiles());
+
+  // Find how many new mappings must be created for each register file.
+  for (const unsigned RegID : Regs) {
+    const RegisterRenamingInfo &RRI = RegisterMappings[RegID].second;
+    const IndexPlusCostPairTy &Entry = RRI.IndexPlusCost;
+    if (Entry.first)
+      NumPhysRegs[Entry.first] += Entry.second;
+    NumPhysRegs[0] += Entry.second;
+  }
+
+  unsigned Response = 0;
+  for (unsigned I = 0, E = getNumRegisterFiles(); I < E; ++I) {
+    unsigned NumRegs = NumPhysRegs[I];
+    if (!NumRegs)
+      continue;
+
+    const RegisterMappingTracker &RMT = RegisterFiles[I];
+    if (!RMT.NumPhysRegs) {
+      // The register file has an unbounded number of microarchitectural
+      // registers.
+      continue;
+    }
+
+    if (RMT.NumPhysRegs < NumRegs) {
+      // The current register file is too small. This may occur if the number of
+      // microarchitectural registers in register file #0 was changed by the
+      // users via flag -reg-file-size. Alternatively, the scheduling model
+      // specified a too small number of registers for this register file.
+      LLVM_DEBUG(dbgs() << "Not enough registers in the register file.\n");
+
+      // FIXME: Normalize the instruction register count to match the
+      // NumPhysRegs value.  This is a highly unusual case, and is not expected
+      // to occur.  This normalization is hiding an inconsistency in either the
+      // scheduling model or in the value that the user might have specified
+      // for NumPhysRegs.
+      NumRegs = RMT.NumPhysRegs;
+    }
+
+    if (RMT.NumPhysRegs < (RMT.NumUsedPhysRegs + NumRegs))
+      Response |= (1U << I);
+  }
+
+  return Response;
+}
+
+#ifndef NDEBUG
+void RegisterFile::dump() const {
+  for (unsigned I = 0, E = MRI.getNumRegs(); I < E; ++I) {
+    const RegisterMapping &RM = RegisterMappings[I];
+    const RegisterRenamingInfo &RRI = RM.second;
+    if (ZeroRegisters[I]) {
+      dbgs() << MRI.getName(I) << ", " << I
+             << ", PRF=" << RRI.IndexPlusCost.first
+             << ", Cost=" << RRI.IndexPlusCost.second
+             << ", RenameAs=" << RRI.RenameAs << ", IsZero=" << ZeroRegisters[I]
+             << ",";
+      RM.first.dump();
+      dbgs() << '\n';
+    }
+  }
+
+  for (unsigned I = 0, E = getNumRegisterFiles(); I < E; ++I) {
+    dbgs() << "Register File #" << I;
+    const RegisterMappingTracker &RMT = RegisterFiles[I];
+    dbgs() << "\n  TotalMappings:        " << RMT.NumPhysRegs
+           << "\n  NumUsedMappings:      " << RMT.NumUsedPhysRegs << '\n';
+  }
+}
+#endif
+
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/HardwareUnits/ResourceManager.cpp b/lib/MCA/HardwareUnits/ResourceManager.cpp
new file mode 100644
index 0000000..2039b58
--- /dev/null
+++ b/lib/MCA/HardwareUnits/ResourceManager.cpp
@@ -0,0 +1,331 @@
+//===--------------------- ResourceManager.cpp ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// The classes here represent processor resource units and their management
+/// strategy.  These classes are managed by the Scheduler.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/HardwareUnits/ResourceManager.h"
+#include "llvm/MCA/Support.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace mca {
+
+#define DEBUG_TYPE "llvm-mca"
+ResourceStrategy::~ResourceStrategy() = default;
+
+// Returns the index of the highest bit set. For resource masks, the position of
+// the highest bit set can be used to construct a resource mask identifier.
+static unsigned getResourceStateIndex(uint64_t Mask) {
+  return std::numeric_limits<uint64_t>::digits - countLeadingZeros(Mask);
+}
+
+static uint64_t selectImpl(uint64_t CandidateMask,
+                           uint64_t &NextInSequenceMask) {
+  // The upper bit set in CandidateMask identifies our next candidate resource.
+  CandidateMask = 1ULL << (getResourceStateIndex(CandidateMask) - 1);
+  NextInSequenceMask &= (CandidateMask | (CandidateMask - 1));
+  return CandidateMask;
+}
+
+uint64_t DefaultResourceStrategy::select(uint64_t ReadyMask) {
+  // This method assumes that ReadyMask cannot be zero.
+  uint64_t CandidateMask = ReadyMask & NextInSequenceMask;
+  if (CandidateMask)
+    return selectImpl(CandidateMask, NextInSequenceMask);
+
+  NextInSequenceMask = ResourceUnitMask ^ RemovedFromNextInSequence;
+  RemovedFromNextInSequence = 0;
+  CandidateMask = ReadyMask & NextInSequenceMask;
+  if (CandidateMask)
+    return selectImpl(CandidateMask, NextInSequenceMask);
+
+  NextInSequenceMask = ResourceUnitMask;
+  CandidateMask = ReadyMask & NextInSequenceMask;
+  return selectImpl(CandidateMask, NextInSequenceMask);
+}
+
+void DefaultResourceStrategy::used(uint64_t Mask) {
+  if (Mask > NextInSequenceMask) {
+    RemovedFromNextInSequence |= Mask;
+    return;
+  }
+
+  NextInSequenceMask &= (~Mask);
+  if (NextInSequenceMask)
+    return;
+
+  NextInSequenceMask = ResourceUnitMask ^ RemovedFromNextInSequence;
+  RemovedFromNextInSequence = 0;
+}
+
+ResourceState::ResourceState(const MCProcResourceDesc &Desc, unsigned Index,
+                             uint64_t Mask)
+    : ProcResourceDescIndex(Index), ResourceMask(Mask),
+      BufferSize(Desc.BufferSize), IsAGroup(countPopulation(ResourceMask) > 1) {
+  if (IsAGroup) {
+    ResourceSizeMask =
+        ResourceMask ^ 1ULL << (getResourceStateIndex(ResourceMask) - 1);
+  } else {
+    ResourceSizeMask = (1ULL << Desc.NumUnits) - 1;
+  }
+  ReadyMask = ResourceSizeMask;
+  AvailableSlots = BufferSize == -1 ? 0U : static_cast<unsigned>(BufferSize);
+  Unavailable = false;
+}
+
+bool ResourceState::isReady(unsigned NumUnits) const {
+  return (!isReserved() || isADispatchHazard()) &&
+         countPopulation(ReadyMask) >= NumUnits;
+}
+
+ResourceStateEvent ResourceState::isBufferAvailable() const {
+  if (isADispatchHazard() && isReserved())
+    return RS_RESERVED;
+  if (!isBuffered() || AvailableSlots)
+    return RS_BUFFER_AVAILABLE;
+  return RS_BUFFER_UNAVAILABLE;
+}
+
+#ifndef NDEBUG
+void ResourceState::dump() const {
+  dbgs() << "MASK=" << format_hex(ResourceMask, 16)
+         << ", SZMASK=" << format_hex(ResourceSizeMask, 16)
+         << ", RDYMASK=" << format_hex(ReadyMask, 16)
+         << ", BufferSize=" << BufferSize
+         << ", AvailableSlots=" << AvailableSlots
+         << ", Reserved=" << Unavailable << '\n';
+}
+#endif
+
+static std::unique_ptr<ResourceStrategy>
+getStrategyFor(const ResourceState &RS) {
+  if (RS.isAResourceGroup() || RS.getNumUnits() > 1)
+    return llvm::make_unique<DefaultResourceStrategy>(RS.getReadyMask());
+  return std::unique_ptr<ResourceStrategy>(nullptr);
+}
+
+ResourceManager::ResourceManager(const MCSchedModel &SM)
+    : Resources(SM.getNumProcResourceKinds()),
+      Strategies(SM.getNumProcResourceKinds()),
+      Resource2Groups(SM.getNumProcResourceKinds(), 0),
+      ProcResID2Mask(SM.getNumProcResourceKinds()) {
+  computeProcResourceMasks(SM, ProcResID2Mask);
+
+  for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
+    uint64_t Mask = ProcResID2Mask[I];
+    unsigned Index = getResourceStateIndex(Mask);
+    Resources[Index] =
+        llvm::make_unique<ResourceState>(*SM.getProcResource(I), I, Mask);
+    Strategies[Index] = getStrategyFor(*Resources[Index]);
+  }
+
+  for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
+    uint64_t Mask = ProcResID2Mask[I];
+    unsigned Index = getResourceStateIndex(Mask);
+    const ResourceState &RS = *Resources[Index];
+    if (!RS.isAResourceGroup())
+      continue;
+
+    uint64_t GroupMaskIdx = 1ULL << (Index - 1);
+    Mask -= GroupMaskIdx;
+    while (Mask) {
+      // Extract lowest set isolated bit.
+      uint64_t Unit = Mask & (-Mask);
+      unsigned IndexUnit = getResourceStateIndex(Unit);
+      Resource2Groups[IndexUnit] |= GroupMaskIdx;
+      Mask ^= Unit;
+    }
+  }
+}
+
+void ResourceManager::setCustomStrategyImpl(std::unique_ptr<ResourceStrategy> S,
+                                            uint64_t ResourceMask) {
+  unsigned Index = getResourceStateIndex(ResourceMask);
+  assert(Index < Resources.size() && "Invalid processor resource index!");
+  assert(S && "Unexpected null strategy in input!");
+  Strategies[Index] = std::move(S);
+}
+
+unsigned ResourceManager::resolveResourceMask(uint64_t Mask) const {
+  return Resources[getResourceStateIndex(Mask)]->getProcResourceID();
+}
+
+unsigned ResourceManager::getNumUnits(uint64_t ResourceID) const {
+  return Resources[getResourceStateIndex(ResourceID)]->getNumUnits();
+}
+
+// Returns the actual resource consumed by this Use.
+// First, is the primary resource ID.
+// Second, is the specific sub-resource ID.
+ResourceRef ResourceManager::selectPipe(uint64_t ResourceID) {
+  unsigned Index = getResourceStateIndex(ResourceID);
+  assert(Index < Resources.size() && "Invalid resource use!");
+  ResourceState &RS = *Resources[Index];
+  assert(RS.isReady() && "No available units to select!");
+
+  // Special case where RS is not a group, and it only declares a single
+  // resource unit.
+  if (!RS.isAResourceGroup() && RS.getNumUnits() == 1)
+    return std::make_pair(ResourceID, RS.getReadyMask());
+
+  uint64_t SubResourceID = Strategies[Index]->select(RS.getReadyMask());
+  if (RS.isAResourceGroup())
+    return selectPipe(SubResourceID);
+  return std::make_pair(ResourceID, SubResourceID);
+}
+
+void ResourceManager::use(const ResourceRef &RR) {
+  // Mark the sub-resource referenced by RR as used.
+  unsigned RSID = getResourceStateIndex(RR.first);
+  ResourceState &RS = *Resources[RSID];
+  RS.markSubResourceAsUsed(RR.second);
+  // Remember to update the resource strategy for non-group resources with
+  // multiple units.
+  if (RS.getNumUnits() > 1)
+    Strategies[RSID]->used(RR.second);
+
+  // If there are still available units in RR.first,
+  // then we are done.
+  if (RS.isReady())
+    return;
+
+  // Notify groups that RR.first is no longer available.
+  uint64_t Users = Resource2Groups[RSID];
+  while (Users) {
+    // Extract lowest set isolated bit.
+    unsigned GroupIndex = getResourceStateIndex(Users & (-Users));
+    ResourceState &CurrentUser = *Resources[GroupIndex];
+    CurrentUser.markSubResourceAsUsed(RR.first);
+    Strategies[GroupIndex]->used(RR.first);
+    // Reset lowest set bit.
+    Users &= Users - 1;
+  }
+}
+
+void ResourceManager::release(const ResourceRef &RR) {
+  ResourceState &RS = *Resources[getResourceStateIndex(RR.first)];
+  bool WasFullyUsed = !RS.isReady();
+  RS.releaseSubResource(RR.second);
+  if (!WasFullyUsed)
+    return;
+
+  for (std::unique_ptr<ResourceState> &Res : Resources) {
+    ResourceState &Current = *Res;
+    if (!Current.isAResourceGroup() || Current.getResourceMask() == RR.first)
+      continue;
+
+    if (Current.containsResource(RR.first))
+      Current.releaseSubResource(RR.first);
+  }
+}
+
+ResourceStateEvent
+ResourceManager::canBeDispatched(ArrayRef<uint64_t> Buffers) const {
+  ResourceStateEvent Result = ResourceStateEvent::RS_BUFFER_AVAILABLE;
+  for (uint64_t Buffer : Buffers) {
+    ResourceState &RS = *Resources[getResourceStateIndex(Buffer)];
+    Result = RS.isBufferAvailable();
+    if (Result != ResourceStateEvent::RS_BUFFER_AVAILABLE)
+      break;
+  }
+  return Result;
+}
+
+void ResourceManager::reserveBuffers(ArrayRef<uint64_t> Buffers) {
+  for (const uint64_t Buffer : Buffers) {
+    ResourceState &RS = *Resources[getResourceStateIndex(Buffer)];
+    assert(RS.isBufferAvailable() == ResourceStateEvent::RS_BUFFER_AVAILABLE);
+    RS.reserveBuffer();
+
+    if (RS.isADispatchHazard()) {
+      assert(!RS.isReserved());
+      RS.setReserved();
+    }
+  }
+}
+
+void ResourceManager::releaseBuffers(ArrayRef<uint64_t> Buffers) {
+  for (const uint64_t R : Buffers)
+    Resources[getResourceStateIndex(R)]->releaseBuffer();
+}
+
+bool ResourceManager::canBeIssued(const InstrDesc &Desc) const {
+  return all_of(
+      Desc.Resources, [&](const std::pair<uint64_t, const ResourceUsage> &E) {
+        unsigned NumUnits = E.second.isReserved() ? 0U : E.second.NumUnits;
+        unsigned Index = getResourceStateIndex(E.first);
+        return Resources[Index]->isReady(NumUnits);
+      });
+}
+
+void ResourceManager::issueInstruction(
+    const InstrDesc &Desc,
+    SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Pipes) {
+  for (const std::pair<uint64_t, ResourceUsage> &R : Desc.Resources) {
+    const CycleSegment &CS = R.second.CS;
+    if (!CS.size()) {
+      releaseResource(R.first);
+      continue;
+    }
+
+    assert(CS.begin() == 0 && "Invalid {Start, End} cycles!");
+    if (!R.second.isReserved()) {
+      ResourceRef Pipe = selectPipe(R.first);
+      use(Pipe);
+      BusyResources[Pipe] += CS.size();
+      Pipes.emplace_back(std::pair<ResourceRef, ResourceCycles>(
+          Pipe, ResourceCycles(CS.size())));
+    } else {
+      assert((countPopulation(R.first) > 1) && "Expected a group!");
+      // Mark this group as reserved.
+      assert(R.second.isReserved());
+      reserveResource(R.first);
+      BusyResources[ResourceRef(R.first, R.first)] += CS.size();
+    }
+  }
+}
+
+void ResourceManager::cycleEvent(SmallVectorImpl<ResourceRef> &ResourcesFreed) {
+  for (std::pair<ResourceRef, unsigned> &BR : BusyResources) {
+    if (BR.second)
+      BR.second--;
+    if (!BR.second) {
+      // Release this resource.
+      const ResourceRef &RR = BR.first;
+
+      if (countPopulation(RR.first) == 1)
+        release(RR);
+
+      releaseResource(RR.first);
+      ResourcesFreed.push_back(RR);
+    }
+  }
+
+  for (const ResourceRef &RF : ResourcesFreed)
+    BusyResources.erase(RF);
+}
+
+void ResourceManager::reserveResource(uint64_t ResourceID) {
+  ResourceState &Resource = *Resources[getResourceStateIndex(ResourceID)];
+  assert(!Resource.isReserved());
+  Resource.setReserved();
+}
+
+void ResourceManager::releaseResource(uint64_t ResourceID) {
+  ResourceState &Resource = *Resources[getResourceStateIndex(ResourceID)];
+  Resource.clearReserved();
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/HardwareUnits/RetireControlUnit.cpp b/lib/MCA/HardwareUnits/RetireControlUnit.cpp
new file mode 100644
index 0000000..de9f245
--- /dev/null
+++ b/lib/MCA/HardwareUnits/RetireControlUnit.cpp
@@ -0,0 +1,88 @@
+//===---------------------- RetireControlUnit.cpp ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file simulates the hardware responsible for retiring instructions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/HardwareUnits/RetireControlUnit.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "llvm-mca"
+
+namespace llvm {
+namespace mca {
+
+RetireControlUnit::RetireControlUnit(const MCSchedModel &SM)
+    : NextAvailableSlotIdx(0), CurrentInstructionSlotIdx(0),
+      AvailableSlots(SM.MicroOpBufferSize), MaxRetirePerCycle(0) {
+  // Check if the scheduling model provides extra information about the machine
+  // processor. If so, then use that information to set the reorder buffer size
+  // and the maximum number of instructions retired per cycle.
+  if (SM.hasExtraProcessorInfo()) {
+    const MCExtraProcessorInfo &EPI = SM.getExtraProcessorInfo();
+    if (EPI.ReorderBufferSize)
+      AvailableSlots = EPI.ReorderBufferSize;
+    MaxRetirePerCycle = EPI.MaxRetirePerCycle;
+  }
+
+  assert(AvailableSlots && "Invalid reorder buffer size!");
+  Queue.resize(AvailableSlots);
+}
+
+// Reserves a number of slots, and returns a new token.
+unsigned RetireControlUnit::reserveSlot(const InstRef &IR,
+                                        unsigned NumMicroOps) {
+  assert(isAvailable(NumMicroOps) && "Reorder Buffer unavailable!");
+  unsigned NormalizedQuantity =
+      std::min(NumMicroOps, static_cast<unsigned>(Queue.size()));
+  // Zero latency instructions may have zero uOps. Artificially bump this
+  // value to 1. Although zero latency instructions don't consume scheduler
+  // resources, they still consume one slot in the retire queue.
+  NormalizedQuantity = std::max(NormalizedQuantity, 1U);
+  unsigned TokenID = NextAvailableSlotIdx;
+  Queue[NextAvailableSlotIdx] = {IR, NormalizedQuantity, false};
+  NextAvailableSlotIdx += NormalizedQuantity;
+  NextAvailableSlotIdx %= Queue.size();
+  AvailableSlots -= NormalizedQuantity;
+  return TokenID;
+}
+
+const RetireControlUnit::RUToken &RetireControlUnit::peekCurrentToken() const {
+  return Queue[CurrentInstructionSlotIdx];
+}
+
+void RetireControlUnit::consumeCurrentToken() {
+  RetireControlUnit::RUToken &Current = Queue[CurrentInstructionSlotIdx];
+  assert(Current.NumSlots && "Reserved zero slots?");
+  assert(Current.IR && "Invalid RUToken in the RCU queue.");
+  Current.IR.getInstruction()->retire();
+
+  // Update the slot index to be the next item in the circular queue.
+  CurrentInstructionSlotIdx += Current.NumSlots;
+  CurrentInstructionSlotIdx %= Queue.size();
+  AvailableSlots += Current.NumSlots;
+}
+
+void RetireControlUnit::onInstructionExecuted(unsigned TokenID) {
+  assert(Queue.size() > TokenID);
+  assert(Queue[TokenID].Executed == false && Queue[TokenID].IR);
+  Queue[TokenID].Executed = true;
+}
+
+#ifndef NDEBUG
+void RetireControlUnit::dump() const {
+  dbgs() << "Retire Unit: { Total Slots=" << Queue.size()
+         << ", Available Slots=" << AvailableSlots << " }\n";
+}
+#endif
+
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/HardwareUnits/Scheduler.cpp b/lib/MCA/HardwareUnits/Scheduler.cpp
new file mode 100644
index 0000000..355ef79
--- /dev/null
+++ b/lib/MCA/HardwareUnits/Scheduler.cpp
@@ -0,0 +1,247 @@
+//===--------------------- Scheduler.cpp ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A scheduler for processor resource units and processor resource groups.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/HardwareUnits/Scheduler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace mca {
+
+#define DEBUG_TYPE "llvm-mca"
+
+void Scheduler::initializeStrategy(std::unique_ptr<SchedulerStrategy> S) {
+  // Ensure we have a valid (non-null) strategy object.
+  Strategy = S ? std::move(S) : llvm::make_unique<DefaultSchedulerStrategy>();
+}
+
+// Anchor the vtable of SchedulerStrategy and DefaultSchedulerStrategy.
+SchedulerStrategy::~SchedulerStrategy() = default;
+DefaultSchedulerStrategy::~DefaultSchedulerStrategy() = default;
+
+#ifndef NDEBUG
+void Scheduler::dump() const {
+  dbgs() << "[SCHEDULER]: WaitSet size is: " << WaitSet.size() << '\n';
+  dbgs() << "[SCHEDULER]: ReadySet size is: " << ReadySet.size() << '\n';
+  dbgs() << "[SCHEDULER]: IssuedSet size is: " << IssuedSet.size() << '\n';
+  Resources->dump();
+}
+#endif
+
+Scheduler::Status Scheduler::isAvailable(const InstRef &IR) const {
+  const InstrDesc &Desc = IR.getInstruction()->getDesc();
+
+  switch (Resources->canBeDispatched(Desc.Buffers)) {
+  case ResourceStateEvent::RS_BUFFER_UNAVAILABLE:
+    return Scheduler::SC_BUFFERS_FULL;
+  case ResourceStateEvent::RS_RESERVED:
+    return Scheduler::SC_DISPATCH_GROUP_STALL;
+  case ResourceStateEvent::RS_BUFFER_AVAILABLE:
+    break;
+  }
+
+  // Give lower priority to LSUnit stall events.
+  switch (LSU.isAvailable(IR)) {
+  case LSUnit::LSU_LQUEUE_FULL:
+    return Scheduler::SC_LOAD_QUEUE_FULL;
+  case LSUnit::LSU_SQUEUE_FULL:
+    return Scheduler::SC_STORE_QUEUE_FULL;
+  case LSUnit::LSU_AVAILABLE:
+    return Scheduler::SC_AVAILABLE;
+  }
+
+  llvm_unreachable("Don't know how to process this LSU state result!");
+}
+
+void Scheduler::issueInstructionImpl(
+    InstRef &IR,
+    SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &UsedResources) {
+  Instruction *IS = IR.getInstruction();
+  const InstrDesc &D = IS->getDesc();
+
+  // Issue the instruction and collect all the consumed resources
+  // into a vector. That vector is then used to notify the listener.
+  Resources->issueInstruction(D, UsedResources);
+
+  // Notify the instruction that it started executing.
+  // This updates the internal state of each write.
+  IS->execute();
+
+  if (IS->isExecuting())
+    IssuedSet.emplace_back(IR);
+  else if (IS->isExecuted())
+    LSU.onInstructionExecuted(IR);
+}
+
+// Release the buffered resources and issue the instruction.
+void Scheduler::issueInstruction(
+    InstRef &IR,
+    SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &UsedResources,
+    SmallVectorImpl<InstRef> &ReadyInstructions) {
+  const Instruction &Inst = *IR.getInstruction();
+  bool HasDependentUsers = Inst.hasDependentUsers();
+
+  Resources->releaseBuffers(Inst.getDesc().Buffers);
+  issueInstructionImpl(IR, UsedResources);
+  // Instructions that have been issued during this cycle might have unblocked
+  // other dependent instructions. Dependent instructions may be issued during
+  // this same cycle if operands have ReadAdvance entries.  Promote those
+  // instructions to the ReadySet and notify the caller that those are ready.
+  if (HasDependentUsers)
+    promoteToReadySet(ReadyInstructions);
+}
+
+void Scheduler::promoteToReadySet(SmallVectorImpl<InstRef> &Ready) {
+  // Scan the set of waiting instructions and promote them to the
+  // ready queue if operands are all ready.
+  unsigned RemovedElements = 0;
+  for (auto I = WaitSet.begin(), E = WaitSet.end(); I != E;) {
+    InstRef &IR = *I;
+    if (!IR)
+      break;
+
+    // Check if this instruction is now ready. In case, force
+    // a transition in state using method 'update()'.
+    Instruction &IS = *IR.getInstruction();
+    if (!IS.isReady())
+      IS.update();
+
+    // Check if there are still unsolved data dependencies.
+    if (!isReady(IR)) {
+      ++I;
+      continue;
+    }
+
+    Ready.emplace_back(IR);
+    ReadySet.emplace_back(IR);
+
+    IR.invalidate();
+    ++RemovedElements;
+    std::iter_swap(I, E - RemovedElements);
+  }
+
+  WaitSet.resize(WaitSet.size() - RemovedElements);
+}
+
+InstRef Scheduler::select() {
+  unsigned QueueIndex = ReadySet.size();
+  for (unsigned I = 0, E = ReadySet.size(); I != E; ++I) {
+    const InstRef &IR = ReadySet[I];
+    if (QueueIndex == ReadySet.size() ||
+        Strategy->compare(IR, ReadySet[QueueIndex])) {
+      const InstrDesc &D = IR.getInstruction()->getDesc();
+      if (Resources->canBeIssued(D))
+        QueueIndex = I;
+    }
+  }
+
+  if (QueueIndex == ReadySet.size())
+    return InstRef();
+
+  // We found an instruction to issue.
+  InstRef IR = ReadySet[QueueIndex];
+  std::swap(ReadySet[QueueIndex], ReadySet[ReadySet.size() - 1]);
+  ReadySet.pop_back();
+  return IR;
+}
+
+void Scheduler::updateIssuedSet(SmallVectorImpl<InstRef> &Executed) {
+  unsigned RemovedElements = 0;
+  for (auto I = IssuedSet.begin(), E = IssuedSet.end(); I != E;) {
+    InstRef &IR = *I;
+    if (!IR)
+      break;
+    Instruction &IS = *IR.getInstruction();
+    if (!IS.isExecuted()) {
+      LLVM_DEBUG(dbgs() << "[SCHEDULER]: Instruction #" << IR
+                        << " is still executing.\n");
+      ++I;
+      continue;
+    }
+
+    // Instruction IR has completed execution.
+    LSU.onInstructionExecuted(IR);
+    Executed.emplace_back(IR);
+    ++RemovedElements;
+    IR.invalidate();
+    std::iter_swap(I, E - RemovedElements);
+  }
+
+  IssuedSet.resize(IssuedSet.size() - RemovedElements);
+}
+
+void Scheduler::cycleEvent(SmallVectorImpl<ResourceRef> &Freed,
+                           SmallVectorImpl<InstRef> &Executed,
+                           SmallVectorImpl<InstRef> &Ready) {
+  // Release consumed resources.
+  Resources->cycleEvent(Freed);
+
+  // Propagate the cycle event to the 'Issued' and 'Wait' sets.
+  for (InstRef &IR : IssuedSet)
+    IR.getInstruction()->cycleEvent();
+
+  updateIssuedSet(Executed);
+
+  for (InstRef &IR : WaitSet)
+    IR.getInstruction()->cycleEvent();
+
+  promoteToReadySet(Ready);
+}
+
+bool Scheduler::mustIssueImmediately(const InstRef &IR) const {
+  const InstrDesc &Desc = IR.getInstruction()->getDesc();
+  if (Desc.isZeroLatency())
+    return true;
+  // Instructions that use an in-order dispatch/issue processor resource must be
+  // issued immediately to the pipeline(s). Any other in-order buffered
+  // resources (i.e. BufferSize=1) is consumed.
+  return Desc.MustIssueImmediately;
+}
+
+void Scheduler::dispatch(const InstRef &IR) {
+  const InstrDesc &Desc = IR.getInstruction()->getDesc();
+  Resources->reserveBuffers(Desc.Buffers);
+
+  // If necessary, reserve queue entries in the load-store unit (LSU).
+  bool IsMemOp = Desc.MayLoad || Desc.MayStore;
+  if (IsMemOp)
+    LSU.dispatch(IR);
+
+  if (!isReady(IR)) {
+    LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding #" << IR << " to the WaitSet\n");
+    WaitSet.push_back(IR);
+    return;
+  }
+
+  // Don't add a zero-latency instruction to the Ready queue.
+  // A zero-latency instruction doesn't consume any scheduler resources. That is
+  // because it doesn't need to be executed, and it is often removed at register
+  // renaming stage. For example, register-register moves are often optimized at
+  // register renaming stage by simply updating register aliases. On some
+  // targets, zero-idiom instructions (for example: a xor that clears the value
+  // of a register) are treated specially, and are often eliminated at register
+  // renaming stage.
+  if (!mustIssueImmediately(IR)) {
+    LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding #" << IR << " to the ReadySet\n");
+    ReadySet.push_back(IR);
+  }
+}
+
+bool Scheduler::isReady(const InstRef &IR) const {
+  const InstrDesc &Desc = IR.getInstruction()->getDesc();
+  bool IsMemOp = Desc.MayLoad || Desc.MayStore;
+  return IR.getInstruction()->isReady() && (!IsMemOp || LSU.isReady(IR));
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/InstrBuilder.cpp b/lib/MCA/InstrBuilder.cpp
new file mode 100644
index 0000000..d2d65e5
--- /dev/null
+++ b/lib/MCA/InstrBuilder.cpp
@@ -0,0 +1,698 @@
+//===--------------------- InstrBuilder.cpp ---------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file implements the InstrBuilder interface.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/InstrBuilder.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/WithColor.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "llvm-mca"
+
+namespace llvm {
+namespace mca {
+
+InstrBuilder::InstrBuilder(const llvm::MCSubtargetInfo &sti,
+                           const llvm::MCInstrInfo &mcii,
+                           const llvm::MCRegisterInfo &mri,
+                           const llvm::MCInstrAnalysis *mcia)
+    : STI(sti), MCII(mcii), MRI(mri), MCIA(mcia), FirstCallInst(true),
+      FirstReturnInst(true) {
+  const MCSchedModel &SM = STI.getSchedModel();
+  ProcResourceMasks.resize(SM.getNumProcResourceKinds());
+  computeProcResourceMasks(STI.getSchedModel(), ProcResourceMasks);
+}
+
+static void initializeUsedResources(InstrDesc &ID,
+                                    const MCSchedClassDesc &SCDesc,
+                                    const MCSubtargetInfo &STI,
+                                    ArrayRef<uint64_t> ProcResourceMasks) {
+  const MCSchedModel &SM = STI.getSchedModel();
+
+  // Populate resources consumed.
+  using ResourcePlusCycles = std::pair<uint64_t, ResourceUsage>;
+  std::vector<ResourcePlusCycles> Worklist;
+
+  // Track cycles contributed by resources that are in a "Super" relationship.
+  // This is required if we want to correctly match the behavior of method
+  // SubtargetEmitter::ExpandProcResource() in Tablegen. When computing the set
+  // of "consumed" processor resources and resource cycles, the logic in
+  // ExpandProcResource() doesn't update the number of resource cycles
+  // contributed by a "Super" resource to a group.
+  // We need to take this into account when we find that a processor resource is
+  // part of a group, and it is also used as the "Super" of other resources.
+  // This map stores the number of cycles contributed by sub-resources that are
+  // part of a "Super" resource. The key value is the "Super" resource mask ID.
+  DenseMap<uint64_t, unsigned> SuperResources;
+
+  unsigned NumProcResources = SM.getNumProcResourceKinds();
+  APInt Buffers(NumProcResources, 0);
+
+  bool AllInOrderResources = true;
+  bool AnyDispatchHazards = false;
+  for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
+    const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
+    const MCProcResourceDesc &PR = *SM.getProcResource(PRE->ProcResourceIdx);
+    uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
+    if (PR.BufferSize < 0) {
+      AllInOrderResources = false;
+    } else {
+      Buffers.setBit(PRE->ProcResourceIdx);
+      AnyDispatchHazards |= (PR.BufferSize == 0);
+      AllInOrderResources &= (PR.BufferSize <= 1);
+    }
+
+    CycleSegment RCy(0, PRE->Cycles, false);
+    Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
+    if (PR.SuperIdx) {
+      uint64_t Super = ProcResourceMasks[PR.SuperIdx];
+      SuperResources[Super] += PRE->Cycles;
+    }
+  }
+
+  ID.MustIssueImmediately = AllInOrderResources && AnyDispatchHazards;
+
+  // Sort elements by mask popcount, so that we prioritize resource units over
+  // resource groups, and smaller groups over larger groups.
+  sort(Worklist, [](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
+    unsigned popcntA = countPopulation(A.first);
+    unsigned popcntB = countPopulation(B.first);
+    if (popcntA < popcntB)
+      return true;
+    if (popcntA > popcntB)
+      return false;
+    return A.first < B.first;
+  });
+
+  uint64_t UsedResourceUnits = 0;
+
+  // Remove cycles contributed by smaller resources.
+  for (unsigned I = 0, E = Worklist.size(); I < E; ++I) {
+    ResourcePlusCycles &A = Worklist[I];
+    if (!A.second.size()) {
+      A.second.NumUnits = 0;
+      A.second.setReserved();
+      ID.Resources.emplace_back(A);
+      continue;
+    }
+
+    ID.Resources.emplace_back(A);
+    uint64_t NormalizedMask = A.first;
+    if (countPopulation(A.first) == 1) {
+      UsedResourceUnits |= A.first;
+    } else {
+      // Remove the leading 1 from the resource group mask.
+      NormalizedMask ^= PowerOf2Floor(NormalizedMask);
+    }
+
+    for (unsigned J = I + 1; J < E; ++J) {
+      ResourcePlusCycles &B = Worklist[J];
+      if ((NormalizedMask & B.first) == NormalizedMask) {
+        B.second.CS.subtract(A.second.size() - SuperResources[A.first]);
+        if (countPopulation(B.first) > 1)
+          B.second.NumUnits++;
+      }
+    }
+  }
+
+  // A SchedWrite may specify a number of cycles in which a resource group
+  // is reserved. For example (on target x86; cpu Haswell):
+  //
+  //  SchedWriteRes<[HWPort0, HWPort1, HWPort01]> {
+  //    let ResourceCycles = [2, 2, 3];
+  //  }
+  //
+  // This means:
+  // Resource units HWPort0 and HWPort1 are both used for 2cy.
+  // Resource group HWPort01 is the union of HWPort0 and HWPort1.
+  // Since this write touches both HWPort0 and HWPort1 for 2cy, HWPort01
+  // will not be usable for 2 entire cycles from instruction issue.
+  //
+  // On top of those 2cy, SchedWriteRes explicitly specifies an extra latency
+  // of 3 cycles for HWPort01. This tool assumes that the 3cy latency is an
+  // extra delay on top of the 2 cycles latency.
+  // During those extra cycles, HWPort01 is not usable by other instructions.
+  for (ResourcePlusCycles &RPC : ID.Resources) {
+    if (countPopulation(RPC.first) > 1 && !RPC.second.isReserved()) {
+      // Remove the leading 1 from the resource group mask.
+      uint64_t Mask = RPC.first ^ PowerOf2Floor(RPC.first);
+      if ((Mask & UsedResourceUnits) == Mask)
+        RPC.second.setReserved();
+    }
+  }
+
+  // Identify extra buffers that are consumed through super resources.
+  for (const std::pair<uint64_t, unsigned> &SR : SuperResources) {
+    for (unsigned I = 1, E = NumProcResources; I < E; ++I) {
+      const MCProcResourceDesc &PR = *SM.getProcResource(I);
+      if (PR.BufferSize == -1)
+        continue;
+
+      uint64_t Mask = ProcResourceMasks[I];
+      if (Mask != SR.first && ((Mask & SR.first) == SR.first))
+        Buffers.setBit(I);
+    }
+  }
+
+  // Now set the buffers.
+  if (unsigned NumBuffers = Buffers.countPopulation()) {
+    ID.Buffers.resize(NumBuffers);
+    for (unsigned I = 0, E = NumProcResources; I < E && NumBuffers; ++I) {
+      if (Buffers[I]) {
+        --NumBuffers;
+        ID.Buffers[NumBuffers] = ProcResourceMasks[I];
+      }
+    }
+  }
+
+  LLVM_DEBUG({
+    for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
+      dbgs() << "\t\tMask=" << format_hex(R.first, 16) << ", "
+             << "cy=" << R.second.size() << '\n';
+    for (const uint64_t R : ID.Buffers)
+      dbgs() << "\t\tBuffer Mask=" << format_hex(R, 16) << '\n';
+  });
+}
+
+static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
+                              const MCSchedClassDesc &SCDesc,
+                              const MCSubtargetInfo &STI) {
+  if (MCDesc.isCall()) {
+    // We cannot estimate how long this call will take.
+    // Artificially set an arbitrarily high latency (100cy).
+    ID.MaxLatency = 100U;
+    return;
+  }
+
+  int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
+  // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
+  ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
+}
+
+static Error verifyOperands(const MCInstrDesc &MCDesc, const MCInst &MCI) {
+  // Count register definitions, and skip non register operands in the process.
+  unsigned I, E;
+  unsigned NumExplicitDefs = MCDesc.getNumDefs();
+  for (I = 0, E = MCI.getNumOperands(); NumExplicitDefs && I < E; ++I) {
+    const MCOperand &Op = MCI.getOperand(I);
+    if (Op.isReg())
+      --NumExplicitDefs;
+  }
+
+  if (NumExplicitDefs) {
+    return make_error<InstructionError<MCInst>>(
+        "Expected more register operand definitions.", MCI);
+  }
+
+  if (MCDesc.hasOptionalDef()) {
+    // Always assume that the optional definition is the last operand.
+    const MCOperand &Op = MCI.getOperand(MCDesc.getNumOperands() - 1);
+    if (I == MCI.getNumOperands() || !Op.isReg()) {
+      std::string Message =
+          "expected a register operand for an optional definition. Instruction "
+          "has not been correctly analyzed.";
+      return make_error<InstructionError<MCInst>>(Message, MCI);
+    }
+  }
+
+  return ErrorSuccess();
+}
+
+void InstrBuilder::populateWrites(InstrDesc &ID, const MCInst &MCI,
+                                  unsigned SchedClassID) {
+  const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
+  const MCSchedModel &SM = STI.getSchedModel();
+  const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
+
+  // Assumptions made by this algorithm:
+  //  1. The number of explicit and implicit register definitions in a MCInst
+  //     matches the number of explicit and implicit definitions according to
+  //     the opcode descriptor (MCInstrDesc).
+  //  2. Uses start at index #(MCDesc.getNumDefs()).
+  //  3. There can only be a single optional register definition, an it is
+  //     always the last operand of the sequence (excluding extra operands
+  //     contributed by variadic opcodes).
+  //
+  // These assumptions work quite well for most out-of-order in-tree targets
+  // like x86. This is mainly because the vast majority of instructions is
+  // expanded to MCInst using a straightforward lowering logic that preserves
+  // the ordering of the operands.
+  //
+  // About assumption 1.
+  // The algorithm allows non-register operands between register operand
+  // definitions. This helps to handle some special ARM instructions with
+  // implicit operand increment (-mtriple=armv7):
+  //
+  // vld1.32  {d18, d19}, [r1]!  @ <MCInst #1463 VLD1q32wb_fixed
+  //                             @  <MCOperand Reg:59>
+  //                             @  <MCOperand Imm:0>     (!!)
+  //                             @  <MCOperand Reg:67>
+  //                             @  <MCOperand Imm:0>
+  //                             @  <MCOperand Imm:14>
+  //                             @  <MCOperand Reg:0>>
+  //
+  // MCDesc reports:
+  //  6 explicit operands.
+  //  1 optional definition
+  //  2 explicit definitions (!!)
+  //
+  // The presence of an 'Imm' operand between the two register definitions
+  // breaks the assumption that "register definitions are always at the
+  // beginning of the operand sequence".
+  //
+  // To workaround this issue, this algorithm ignores (i.e. skips) any
+  // non-register operands between register definitions.  The optional
+  // definition is still at index #(NumOperands-1).
+  //
+  // According to assumption 2. register reads start at #(NumExplicitDefs-1).
+  // That means, register R1 from the example is both read and written.
+  unsigned NumExplicitDefs = MCDesc.getNumDefs();
+  unsigned NumImplicitDefs = MCDesc.getNumImplicitDefs();
+  unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
+  unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
+  if (MCDesc.hasOptionalDef())
+    TotalDefs++;
+
+  unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
+  ID.Writes.resize(TotalDefs + NumVariadicOps);
+  // Iterate over the operands list, and skip non-register operands.
+  // The first NumExplictDefs register operands are expected to be register
+  // definitions.
+  unsigned CurrentDef = 0;
+  unsigned i = 0;
+  for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
+    const MCOperand &Op = MCI.getOperand(i);
+    if (!Op.isReg())
+      continue;
+
+    WriteDescriptor &Write = ID.Writes[CurrentDef];
+    Write.OpIndex = i;
+    if (CurrentDef < NumWriteLatencyEntries) {
+      const MCWriteLatencyEntry &WLE =
+          *STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
+      // Conservatively default to MaxLatency.
+      Write.Latency =
+          WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
+      Write.SClassOrWriteResourceID = WLE.WriteResourceID;
+    } else {
+      // Assign a default latency for this write.
+      Write.Latency = ID.MaxLatency;
+      Write.SClassOrWriteResourceID = 0;
+    }
+    Write.IsOptionalDef = false;
+    LLVM_DEBUG({
+      dbgs() << "\t\t[Def]    OpIdx=" << Write.OpIndex
+             << ", Latency=" << Write.Latency
+             << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
+    });
+    CurrentDef++;
+  }
+
+  assert(CurrentDef == NumExplicitDefs &&
+         "Expected more register operand definitions.");
+  for (CurrentDef = 0; CurrentDef < NumImplicitDefs; ++CurrentDef) {
+    unsigned Index = NumExplicitDefs + CurrentDef;
+    WriteDescriptor &Write = ID.Writes[Index];
+    Write.OpIndex = ~CurrentDef;
+    Write.RegisterID = MCDesc.getImplicitDefs()[CurrentDef];
+    if (Index < NumWriteLatencyEntries) {
+      const MCWriteLatencyEntry &WLE =
+          *STI.getWriteLatencyEntry(&SCDesc, Index);
+      // Conservatively default to MaxLatency.
+      Write.Latency =
+          WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
+      Write.SClassOrWriteResourceID = WLE.WriteResourceID;
+    } else {
+      // Assign a default latency for this write.
+      Write.Latency = ID.MaxLatency;
+      Write.SClassOrWriteResourceID = 0;
+    }
+
+    Write.IsOptionalDef = false;
+    assert(Write.RegisterID != 0 && "Expected a valid phys register!");
+    LLVM_DEBUG({
+      dbgs() << "\t\t[Def][I] OpIdx=" << ~Write.OpIndex
+             << ", PhysReg=" << MRI.getName(Write.RegisterID)
+             << ", Latency=" << Write.Latency
+             << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
+    });
+  }
+
+  if (MCDesc.hasOptionalDef()) {
+    WriteDescriptor &Write = ID.Writes[NumExplicitDefs + NumImplicitDefs];
+    Write.OpIndex = MCDesc.getNumOperands() - 1;
+    // Assign a default latency for this write.
+    Write.Latency = ID.MaxLatency;
+    Write.SClassOrWriteResourceID = 0;
+    Write.IsOptionalDef = true;
+    LLVM_DEBUG({
+      dbgs() << "\t\t[Def][O] OpIdx=" << Write.OpIndex
+             << ", Latency=" << Write.Latency
+             << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
+    });
+  }
+
+  if (!NumVariadicOps)
+    return;
+
+  // FIXME: if an instruction opcode is flagged 'mayStore', and it has no
+  // "unmodeledSideEffects', then this logic optimistically assumes that any
+  // extra register operands in the variadic sequence is not a register
+  // definition.
+  //
+  // Otherwise, we conservatively assume that any register operand from the
+  // variadic sequence is both a register read and a register write.
+  bool AssumeUsesOnly = MCDesc.mayStore() && !MCDesc.mayLoad() &&
+                        !MCDesc.hasUnmodeledSideEffects();
+  CurrentDef = NumExplicitDefs + NumImplicitDefs + MCDesc.hasOptionalDef();
+  for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
+       I < NumVariadicOps && !AssumeUsesOnly; ++I, ++OpIndex) {
+    const MCOperand &Op = MCI.getOperand(OpIndex);
+    if (!Op.isReg())
+      continue;
+
+    WriteDescriptor &Write = ID.Writes[CurrentDef];
+    Write.OpIndex = OpIndex;
+    // Assign a default latency for this write.
+    Write.Latency = ID.MaxLatency;
+    Write.SClassOrWriteResourceID = 0;
+    Write.IsOptionalDef = false;
+    ++CurrentDef;
+    LLVM_DEBUG({
+      dbgs() << "\t\t[Def][V] OpIdx=" << Write.OpIndex
+             << ", Latency=" << Write.Latency
+             << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
+    });
+  }
+
+  ID.Writes.resize(CurrentDef);
+}
+
+void InstrBuilder::populateReads(InstrDesc &ID, const MCInst &MCI,
+                                 unsigned SchedClassID) {
+  const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
+  unsigned NumExplicitUses = MCDesc.getNumOperands() - MCDesc.getNumDefs();
+  unsigned NumImplicitUses = MCDesc.getNumImplicitUses();
+  // Remove the optional definition.
+  if (MCDesc.hasOptionalDef())
+    --NumExplicitUses;
+  unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
+  unsigned TotalUses = NumExplicitUses + NumImplicitUses + NumVariadicOps;
+  ID.Reads.resize(TotalUses);
+  unsigned CurrentUse = 0;
+  for (unsigned I = 0, OpIndex = MCDesc.getNumDefs(); I < NumExplicitUses;
+       ++I, ++OpIndex) {
+    const MCOperand &Op = MCI.getOperand(OpIndex);
+    if (!Op.isReg())
+      continue;
+
+    ReadDescriptor &Read = ID.Reads[CurrentUse];
+    Read.OpIndex = OpIndex;
+    Read.UseIndex = I;
+    Read.SchedClassID = SchedClassID;
+    ++CurrentUse;
+    LLVM_DEBUG(dbgs() << "\t\t[Use]    OpIdx=" << Read.OpIndex
+                      << ", UseIndex=" << Read.UseIndex << '\n');
+  }
+
+  // For the purpose of ReadAdvance, implicit uses come directly after explicit
+  // uses. The "UseIndex" must be updated according to that implicit layout.
+  for (unsigned I = 0; I < NumImplicitUses; ++I) {
+    ReadDescriptor &Read = ID.Reads[CurrentUse + I];
+    Read.OpIndex = ~I;
+    Read.UseIndex = NumExplicitUses + I;
+    Read.RegisterID = MCDesc.getImplicitUses()[I];
+    Read.SchedClassID = SchedClassID;
+    LLVM_DEBUG(dbgs() << "\t\t[Use][I] OpIdx=" << ~Read.OpIndex
+                      << ", UseIndex=" << Read.UseIndex << ", RegisterID="
+                      << MRI.getName(Read.RegisterID) << '\n');
+  }
+
+  CurrentUse += NumImplicitUses;
+
+  // FIXME: If an instruction opcode is marked as 'mayLoad', and it has no
+  // "unmodeledSideEffects", then this logic optimistically assumes that any
+  // extra register operands in the variadic sequence are not register
+  // definition.
+
+  bool AssumeDefsOnly = !MCDesc.mayStore() && MCDesc.mayLoad() &&
+                        !MCDesc.hasUnmodeledSideEffects();
+  for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
+       I < NumVariadicOps && !AssumeDefsOnly; ++I, ++OpIndex) {
+    const MCOperand &Op = MCI.getOperand(OpIndex);
+    if (!Op.isReg())
+      continue;
+
+    ReadDescriptor &Read = ID.Reads[CurrentUse];
+    Read.OpIndex = OpIndex;
+    Read.UseIndex = NumExplicitUses + NumImplicitUses + I;
+    Read.SchedClassID = SchedClassID;
+    ++CurrentUse;
+    LLVM_DEBUG(dbgs() << "\t\t[Use][V] OpIdx=" << Read.OpIndex
+                      << ", UseIndex=" << Read.UseIndex << '\n');
+  }
+
+  ID.Reads.resize(CurrentUse);
+}
+
+Error InstrBuilder::verifyInstrDesc(const InstrDesc &ID,
+                                    const MCInst &MCI) const {
+  if (ID.NumMicroOps != 0)
+    return ErrorSuccess();
+
+  bool UsesMemory = ID.MayLoad || ID.MayStore;
+  bool UsesBuffers = !ID.Buffers.empty();
+  bool UsesResources = !ID.Resources.empty();
+  if (!UsesMemory && !UsesBuffers && !UsesResources)
+    return ErrorSuccess();
+
+  StringRef Message;
+  if (UsesMemory) {
+    Message = "found an inconsistent instruction that decodes "
+              "into zero opcodes and that consumes load/store "
+              "unit resources.";
+  } else {
+    Message = "found an inconsistent instruction that decodes "
+              "to zero opcodes and that consumes scheduler "
+              "resources.";
+  }
+
+  return make_error<InstructionError<MCInst>>(Message, MCI);
+}
+
+Expected<const InstrDesc &>
+InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
+  assert(STI.getSchedModel().hasInstrSchedModel() &&
+         "Itineraries are not yet supported!");
+
+  // Obtain the instruction descriptor from the opcode.
+  unsigned short Opcode = MCI.getOpcode();
+  const MCInstrDesc &MCDesc = MCII.get(Opcode);
+  const MCSchedModel &SM = STI.getSchedModel();
+
+  // Then obtain the scheduling class information from the instruction.
+  unsigned SchedClassID = MCDesc.getSchedClass();
+  bool IsVariant = SM.getSchedClassDesc(SchedClassID)->isVariant();
+
+  // Try to solve variant scheduling classes.
+  if (IsVariant) {
+    unsigned CPUID = SM.getProcessorID();
+    while (SchedClassID && SM.getSchedClassDesc(SchedClassID)->isVariant())
+      SchedClassID = STI.resolveVariantSchedClass(SchedClassID, &MCI, CPUID);
+
+    if (!SchedClassID) {
+      return make_error<InstructionError<MCInst>>(
+          "unable to resolve scheduling class for write variant.", MCI);
+    }
+  }
+
+  // Check if this instruction is supported. Otherwise, report an error.
+  const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
+  if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
+    return make_error<InstructionError<MCInst>>(
+        "found an unsupported instruction in the input assembly sequence.",
+        MCI);
+  }
+
+  LLVM_DEBUG(dbgs() << "\n\t\tOpcode Name= " << MCII.getName(Opcode) << '\n');
+  LLVM_DEBUG(dbgs() << "\t\tSchedClassID=" << SchedClassID << '\n');
+
+  // Create a new empty descriptor.
+  std::unique_ptr<InstrDesc> ID = llvm::make_unique<InstrDesc>();
+  ID->NumMicroOps = SCDesc.NumMicroOps;
+
+  if (MCDesc.isCall() && FirstCallInst) {
+    // We don't correctly model calls.
+    WithColor::warning() << "found a call in the input assembly sequence.\n";
+    WithColor::note() << "call instructions are not correctly modeled. "
+                      << "Assume a latency of 100cy.\n";
+    FirstCallInst = false;
+  }
+
+  if (MCDesc.isReturn() && FirstReturnInst) {
+    WithColor::warning() << "found a return instruction in the input"
+                         << " assembly sequence.\n";
+    WithColor::note() << "program counter updates are ignored.\n";
+    FirstReturnInst = false;
+  }
+
+  ID->MayLoad = MCDesc.mayLoad();
+  ID->MayStore = MCDesc.mayStore();
+  ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
+  ID->BeginGroup = SCDesc.BeginGroup;
+  ID->EndGroup = SCDesc.EndGroup;
+
+  initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
+  computeMaxLatency(*ID, MCDesc, SCDesc, STI);
+
+  if (Error Err = verifyOperands(MCDesc, MCI))
+    return std::move(Err);
+
+  populateWrites(*ID, MCI, SchedClassID);
+  populateReads(*ID, MCI, SchedClassID);
+
+  LLVM_DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
+  LLVM_DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
+
+  // Sanity check on the instruction descriptor.
+  if (Error Err = verifyInstrDesc(*ID, MCI))
+    return std::move(Err);
+
+  // Now add the new descriptor.
+  SchedClassID = MCDesc.getSchedClass();
+  bool IsVariadic = MCDesc.isVariadic();
+  if (!IsVariadic && !IsVariant) {
+    Descriptors[MCI.getOpcode()] = std::move(ID);
+    return *Descriptors[MCI.getOpcode()];
+  }
+
+  VariantDescriptors[&MCI] = std::move(ID);
+  return *VariantDescriptors[&MCI];
+}
+
+Expected<const InstrDesc &>
+InstrBuilder::getOrCreateInstrDesc(const MCInst &MCI) {
+  if (Descriptors.find_as(MCI.getOpcode()) != Descriptors.end())
+    return *Descriptors[MCI.getOpcode()];
+
+  if (VariantDescriptors.find(&MCI) != VariantDescriptors.end())
+    return *VariantDescriptors[&MCI];
+
+  return createInstrDescImpl(MCI);
+}
+
+Expected<std::unique_ptr<Instruction>>
+InstrBuilder::createInstruction(const MCInst &MCI) {
+  Expected<const InstrDesc &> DescOrErr = getOrCreateInstrDesc(MCI);
+  if (!DescOrErr)
+    return DescOrErr.takeError();
+  const InstrDesc &D = *DescOrErr;
+  std::unique_ptr<Instruction> NewIS = llvm::make_unique<Instruction>(D);
+
+  // Check if this is a dependency breaking instruction.
+  APInt Mask;
+
+  bool IsZeroIdiom = false;
+  bool IsDepBreaking = false;
+  if (MCIA) {
+    unsigned ProcID = STI.getSchedModel().getProcessorID();
+    IsZeroIdiom = MCIA->isZeroIdiom(MCI, Mask, ProcID);
+    IsDepBreaking =
+        IsZeroIdiom || MCIA->isDependencyBreaking(MCI, Mask, ProcID);
+    if (MCIA->isOptimizableRegisterMove(MCI, ProcID))
+      NewIS->setOptimizableMove();
+  }
+
+  // Initialize Reads first.
+  for (const ReadDescriptor &RD : D.Reads) {
+    int RegID = -1;
+    if (!RD.isImplicitRead()) {
+      // explicit read.
+      const MCOperand &Op = MCI.getOperand(RD.OpIndex);
+      // Skip non-register operands.
+      if (!Op.isReg())
+        continue;
+      RegID = Op.getReg();
+    } else {
+      // Implicit read.
+      RegID = RD.RegisterID;
+    }
+
+    // Skip invalid register operands.
+    if (!RegID)
+      continue;
+
+    // Okay, this is a register operand. Create a ReadState for it.
+    assert(RegID > 0 && "Invalid register ID found!");
+    NewIS->getUses().emplace_back(RD, RegID);
+    ReadState &RS = NewIS->getUses().back();
+
+    if (IsDepBreaking) {
+      // A mask of all zeroes means: explicit input operands are not
+      // independent.
+      if (Mask.isNullValue()) {
+        if (!RD.isImplicitRead())
+          RS.setIndependentFromDef();
+      } else {
+        // Check if this register operand is independent according to `Mask`.
+        // Note that Mask may not have enough bits to describe all explicit and
+        // implicit input operands. If this register operand doesn't have a
+        // corresponding bit in Mask, then conservatively assume that it is
+        // dependent.
+        if (Mask.getBitWidth() > RD.UseIndex) {
+          // Okay. This map describe register use `RD.UseIndex`.
+          if (Mask[RD.UseIndex])
+            RS.setIndependentFromDef();
+        }
+      }
+    }
+  }
+
+  // Early exit if there are no writes.
+  if (D.Writes.empty())
+    return std::move(NewIS);
+
+  // Track register writes that implicitly clear the upper portion of the
+  // underlying super-registers using an APInt.
+  APInt WriteMask(D.Writes.size(), 0);
+
+  // Now query the MCInstrAnalysis object to obtain information about which
+  // register writes implicitly clear the upper portion of a super-register.
+  if (MCIA)
+    MCIA->clearsSuperRegisters(MRI, MCI, WriteMask);
+
+  // Initialize writes.
+  unsigned WriteIndex = 0;
+  for (const WriteDescriptor &WD : D.Writes) {
+    unsigned RegID = WD.isImplicitWrite() ? WD.RegisterID
+                                          : MCI.getOperand(WD.OpIndex).getReg();
+    // Check if this is a optional definition that references NoReg.
+    if (WD.IsOptionalDef && !RegID) {
+      ++WriteIndex;
+      continue;
+    }
+
+    assert(RegID && "Expected a valid register ID!");
+    NewIS->getDefs().emplace_back(WD, RegID,
+                                  /* ClearsSuperRegs */ WriteMask[WriteIndex],
+                                  /* WritesZero */ IsZeroIdiom);
+    ++WriteIndex;
+  }
+
+  return std::move(NewIS);
+}
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/Instruction.cpp b/lib/MCA/Instruction.cpp
new file mode 100644
index 0000000..057e95c
--- /dev/null
+++ b/lib/MCA/Instruction.cpp
@@ -0,0 +1,205 @@
+//===--------------------- Instruction.cpp ----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines abstractions used by the Pipeline to model register reads,
+// register writes and instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Instruction.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace mca {
+
+void ReadState::writeStartEvent(unsigned Cycles) {
+  assert(DependentWrites);
+  assert(CyclesLeft == UNKNOWN_CYCLES);
+
+  // This read may be dependent on more than one write. This typically occurs
+  // when a definition is the result of multiple writes where at least one
+  // write does a partial register update.
+  // The HW is forced to do some extra bookkeeping to track of all the
+  // dependent writes, and implement a merging scheme for the partial writes.
+  --DependentWrites;
+  TotalCycles = std::max(TotalCycles, Cycles);
+
+  if (!DependentWrites) {
+    CyclesLeft = TotalCycles;
+    IsReady = !CyclesLeft;
+  }
+}
+
+void WriteState::onInstructionIssued() {
+  assert(CyclesLeft == UNKNOWN_CYCLES);
+  // Update the number of cycles left based on the WriteDescriptor info.
+  CyclesLeft = getLatency();
+
+  // Now that the time left before write-back is known, notify
+  // all the users.
+  for (const std::pair<ReadState *, int> &User : Users) {
+    ReadState *RS = User.first;
+    unsigned ReadCycles = std::max(0, CyclesLeft - User.second);
+    RS->writeStartEvent(ReadCycles);
+  }
+
+  // Notify any writes that are in a false dependency with this write.
+  if (PartialWrite)
+    PartialWrite->writeStartEvent(CyclesLeft);
+}
+
+void WriteState::addUser(ReadState *User, int ReadAdvance) {
+  // If CyclesLeft is different than -1, then we don't need to
+  // update the list of users. We can just notify the user with
+  // the actual number of cycles left (which may be zero).
+  if (CyclesLeft != UNKNOWN_CYCLES) {
+    unsigned ReadCycles = std::max(0, CyclesLeft - ReadAdvance);
+    User->writeStartEvent(ReadCycles);
+    return;
+  }
+
+  if (llvm::find_if(Users, [&User](const std::pair<ReadState *, int> &Use) {
+        return Use.first == User;
+      }) == Users.end()) {
+    Users.emplace_back(User, ReadAdvance);
+  }
+}
+
+void WriteState::addUser(WriteState *User) {
+  if (CyclesLeft != UNKNOWN_CYCLES) {
+    User->writeStartEvent(std::max(0, CyclesLeft));
+    return;
+  }
+
+  assert(!PartialWrite && "PartialWrite already set!");
+  PartialWrite = User;
+  User->setDependentWrite(this);
+}
+
+void WriteState::cycleEvent() {
+  // Note: CyclesLeft can be a negative number. It is an error to
+  // make it an unsigned quantity because users of this write may
+  // specify a negative ReadAdvance.
+  if (CyclesLeft != UNKNOWN_CYCLES)
+    CyclesLeft--;
+
+  if (DependentWriteCyclesLeft)
+    DependentWriteCyclesLeft--;
+}
+
+void ReadState::cycleEvent() {
+  // Update the total number of cycles.
+  if (DependentWrites && TotalCycles) {
+    --TotalCycles;
+    return;
+  }
+
+  // Bail out immediately if we don't know how many cycles are left.
+  if (CyclesLeft == UNKNOWN_CYCLES)
+    return;
+
+  if (CyclesLeft) {
+    --CyclesLeft;
+    IsReady = !CyclesLeft;
+  }
+}
+
+#ifndef NDEBUG
+void WriteState::dump() const {
+  dbgs() << "{ OpIdx=" << WD->OpIndex << ", Lat=" << getLatency() << ", RegID "
+         << getRegisterID() << ", Cycles Left=" << getCyclesLeft() << " }";
+}
+
+void WriteRef::dump() const {
+  dbgs() << "IID=" << getSourceIndex() << ' ';
+  if (isValid())
+    getWriteState()->dump();
+  else
+    dbgs() << "(null)";
+}
+#endif
+
+void Instruction::dispatch(unsigned RCUToken) {
+  assert(Stage == IS_INVALID);
+  Stage = IS_AVAILABLE;
+  RCUTokenID = RCUToken;
+
+  // Check if input operands are already available.
+  update();
+}
+
+void Instruction::execute() {
+  assert(Stage == IS_READY);
+  Stage = IS_EXECUTING;
+
+  // Set the cycles left before the write-back stage.
+  CyclesLeft = getLatency();
+
+  for (WriteState &WS : getDefs())
+    WS.onInstructionIssued();
+
+  // Transition to the "executed" stage if this is a zero-latency instruction.
+  if (!CyclesLeft)
+    Stage = IS_EXECUTED;
+}
+
+void Instruction::forceExecuted() {
+  assert(Stage == IS_READY && "Invalid internal state!");
+  CyclesLeft = 0;
+  Stage = IS_EXECUTED;
+}
+
+void Instruction::update() {
+  assert(isDispatched() && "Unexpected instruction stage found!");
+
+  if (!all_of(getUses(), [](const ReadState &Use) { return Use.isReady(); }))
+    return;
+
+  // A partial register write cannot complete before a dependent write.
+  auto IsDefReady = [&](const WriteState &Def) {
+    if (!Def.getDependentWrite()) {
+      unsigned CyclesLeft = Def.getDependentWriteCyclesLeft();
+      return !CyclesLeft || CyclesLeft < getLatency();
+    }
+    return false;
+  };
+
+  if (all_of(getDefs(), IsDefReady))
+    Stage = IS_READY;
+}
+
+void Instruction::cycleEvent() {
+  if (isReady())
+    return;
+
+  if (isDispatched()) {
+    for (ReadState &Use : getUses())
+      Use.cycleEvent();
+
+    for (WriteState &Def : getDefs())
+      Def.cycleEvent();
+
+    update();
+    return;
+  }
+
+  assert(isExecuting() && "Instruction not in-flight?");
+  assert(CyclesLeft && "Instruction already executed?");
+  for (WriteState &Def : getDefs())
+    Def.cycleEvent();
+  CyclesLeft--;
+  if (!CyclesLeft)
+    Stage = IS_EXECUTED;
+}
+
+const unsigned WriteRef::INVALID_IID = std::numeric_limits<unsigned>::max();
+
+} // namespace mca
+} // namespace llvm
diff --git a/tools/llvm-mca/lib/LLVMBuild.txt b/lib/MCA/LLVMBuild.txt
similarity index 100%
rename from tools/llvm-mca/lib/LLVMBuild.txt
rename to lib/MCA/LLVMBuild.txt
diff --git a/lib/MCA/Pipeline.cpp b/lib/MCA/Pipeline.cpp
new file mode 100644
index 0000000..4c0e37c
--- /dev/null
+++ b/lib/MCA/Pipeline.cpp
@@ -0,0 +1,97 @@
+//===--------------------- Pipeline.cpp -------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file implements an ordered container of stages that simulate the
+/// pipeline of a hardware backend.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Pipeline.h"
+#include "llvm/MCA/HWEventListener.h"
+#include "llvm/Support/Debug.h"
+
+namespace llvm {
+namespace mca {
+
+#define DEBUG_TYPE "llvm-mca"
+
+void Pipeline::addEventListener(HWEventListener *Listener) {
+  if (Listener)
+    Listeners.insert(Listener);
+  for (auto &S : Stages)
+    S->addListener(Listener);
+}
+
+bool Pipeline::hasWorkToProcess() {
+  return any_of(Stages, [](const std::unique_ptr<Stage> &S) {
+    return S->hasWorkToComplete();
+  });
+}
+
+Expected<unsigned> Pipeline::run() {
+  assert(!Stages.empty() && "Unexpected empty pipeline found!");
+
+  do {
+    notifyCycleBegin();
+    if (Error Err = runCycle())
+      return std::move(Err);
+    notifyCycleEnd();
+    ++Cycles;
+  } while (hasWorkToProcess());
+
+  return Cycles;
+}
+
+Error Pipeline::runCycle() {
+  Error Err = ErrorSuccess();
+  // Update stages before we start processing new instructions.
+  for (auto I = Stages.rbegin(), E = Stages.rend(); I != E && !Err; ++I) {
+    const std::unique_ptr<Stage> &S = *I;
+    Err = S->cycleStart();
+  }
+
+  // Now fetch and execute new instructions.
+  InstRef IR;
+  Stage &FirstStage = *Stages[0];
+  while (!Err && FirstStage.isAvailable(IR))
+    Err = FirstStage.execute(IR);
+
+  // Update stages in preparation for a new cycle.
+  for (auto I = Stages.rbegin(), E = Stages.rend(); I != E && !Err; ++I) {
+    const std::unique_ptr<Stage> &S = *I;
+    Err = S->cycleEnd();
+  }
+
+  return Err;
+}
+
+void Pipeline::appendStage(std::unique_ptr<Stage> S) {
+  assert(S && "Invalid null stage in input!");
+  if (!Stages.empty()) {
+    Stage *Last = Stages.back().get();
+    Last->setNextInSequence(S.get());
+  }
+
+  Stages.push_back(std::move(S));
+}
+
+void Pipeline::notifyCycleBegin() {
+  LLVM_DEBUG(dbgs() << "\n[E] Cycle begin: " << Cycles << '\n');
+  for (HWEventListener *Listener : Listeners)
+    Listener->onCycleBegin();
+}
+
+void Pipeline::notifyCycleEnd() {
+  LLVM_DEBUG(dbgs() << "[E] Cycle end: " << Cycles << "\n");
+  for (HWEventListener *Listener : Listeners)
+    Listener->onCycleEnd();
+}
+} // namespace mca.
+} // namespace llvm
diff --git a/lib/MCA/Stages/DispatchStage.cpp b/lib/MCA/Stages/DispatchStage.cpp
new file mode 100644
index 0000000..7fb4eb6
--- /dev/null
+++ b/lib/MCA/Stages/DispatchStage.cpp
@@ -0,0 +1,193 @@
+//===--------------------- DispatchStage.cpp --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file models the dispatch component of an instruction pipeline.
+///
+/// The DispatchStage is responsible for updating instruction dependencies
+/// and communicating to the simulated instruction scheduler that an instruction
+/// is ready to be scheduled for execution.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Stages/DispatchStage.h"
+#include "llvm/MCA/HWEventListener.h"
+#include "llvm/MCA/HardwareUnits/Scheduler.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "llvm-mca"
+
+namespace llvm {
+namespace mca {
+
+void DispatchStage::notifyInstructionDispatched(const InstRef &IR,
+                                                ArrayRef<unsigned> UsedRegs,
+                                                unsigned UOps) const {
+  LLVM_DEBUG(dbgs() << "[E] Instruction Dispatched: #" << IR << '\n');
+  notifyEvent<HWInstructionEvent>(
+      HWInstructionDispatchedEvent(IR, UsedRegs, UOps));
+}
+
+bool DispatchStage::checkPRF(const InstRef &IR) const {
+  SmallVector<unsigned, 4> RegDefs;
+  for (const WriteState &RegDef : IR.getInstruction()->getDefs())
+    RegDefs.emplace_back(RegDef.getRegisterID());
+
+  const unsigned RegisterMask = PRF.isAvailable(RegDefs);
+  // A mask with all zeroes means: register files are available.
+  if (RegisterMask) {
+    notifyEvent<HWStallEvent>(
+        HWStallEvent(HWStallEvent::RegisterFileStall, IR));
+    return false;
+  }
+
+  return true;
+}
+
+bool DispatchStage::checkRCU(const InstRef &IR) const {
+  const unsigned NumMicroOps = IR.getInstruction()->getDesc().NumMicroOps;
+  if (RCU.isAvailable(NumMicroOps))
+    return true;
+  notifyEvent<HWStallEvent>(
+      HWStallEvent(HWStallEvent::RetireControlUnitStall, IR));
+  return false;
+}
+
+bool DispatchStage::canDispatch(const InstRef &IR) const {
+  return checkRCU(IR) && checkPRF(IR) && checkNextStage(IR);
+}
+
+void DispatchStage::updateRAWDependencies(ReadState &RS,
+                                          const MCSubtargetInfo &STI) {
+  SmallVector<WriteRef, 4> DependentWrites;
+
+  // Collect all the dependent writes, and update RS internal state.
+  PRF.addRegisterRead(RS, DependentWrites);
+
+  // We know that this read depends on all the writes in DependentWrites.
+  // For each write, check if we have ReadAdvance information, and use it
+  // to figure out in how many cycles this read becomes available.
+  const ReadDescriptor &RD = RS.getDescriptor();
+  const MCSchedModel &SM = STI.getSchedModel();
+  const MCSchedClassDesc *SC = SM.getSchedClassDesc(RD.SchedClassID);
+  for (WriteRef &WR : DependentWrites) {
+    WriteState &WS = *WR.getWriteState();
+    unsigned WriteResID = WS.getWriteResourceID();
+    int ReadAdvance = STI.getReadAdvanceCycles(SC, RD.UseIndex, WriteResID);
+    WS.addUser(&RS, ReadAdvance);
+  }
+}
+
+Error DispatchStage::dispatch(InstRef IR) {
+  assert(!CarryOver && "Cannot dispatch another instruction!");
+  Instruction &IS = *IR.getInstruction();
+  const InstrDesc &Desc = IS.getDesc();
+  const unsigned NumMicroOps = Desc.NumMicroOps;
+  if (NumMicroOps > DispatchWidth) {
+    assert(AvailableEntries == DispatchWidth);
+    AvailableEntries = 0;
+    CarryOver = NumMicroOps - DispatchWidth;
+    CarriedOver = IR;
+  } else {
+    assert(AvailableEntries >= NumMicroOps);
+    AvailableEntries -= NumMicroOps;
+  }
+
+  // Check if this instructions ends the dispatch group.
+  if (Desc.EndGroup)
+    AvailableEntries = 0;
+
+  // Check if this is an optimizable reg-reg move.
+  bool IsEliminated = false;
+  if (IS.isOptimizableMove()) {
+    assert(IS.getDefs().size() == 1 && "Expected a single input!");
+    assert(IS.getUses().size() == 1 && "Expected a single output!");
+    IsEliminated = PRF.tryEliminateMove(IS.getDefs()[0], IS.getUses()[0]);
+  }
+
+  // A dependency-breaking instruction doesn't have to wait on the register
+  // input operands, and it is often optimized at register renaming stage.
+  // Update RAW dependencies if this instruction is not a dependency-breaking
+  // instruction. A dependency-breaking instruction is a zero-latency
+  // instruction that doesn't consume hardware resources.
+  // An example of dependency-breaking instruction on X86 is a zero-idiom XOR.
+  //
+  // We also don't update data dependencies for instructions that have been
+  // eliminated at register renaming stage.
+  if (!IsEliminated) {
+    for (ReadState &RS : IS.getUses())
+      updateRAWDependencies(RS, STI);
+  }
+
+  // By default, a dependency-breaking zero-idiom is expected to be optimized
+  // at register renaming stage. That means, no physical register is allocated
+  // to the instruction.
+  SmallVector<unsigned, 4> RegisterFiles(PRF.getNumRegisterFiles());
+  for (WriteState &WS : IS.getDefs())
+    PRF.addRegisterWrite(WriteRef(IR.getSourceIndex(), &WS), RegisterFiles);
+
+  // Reserve slots in the RCU, and notify the instruction that it has been
+  // dispatched to the schedulers for execution.
+  IS.dispatch(RCU.reserveSlot(IR, NumMicroOps));
+
+  // Notify listeners of the "instruction dispatched" event,
+  // and move IR to the next stage.
+  notifyInstructionDispatched(IR, RegisterFiles,
+                              std::min(DispatchWidth, NumMicroOps));
+  return moveToTheNextStage(IR);
+}
+
+Error DispatchStage::cycleStart() {
+  PRF.cycleStart();
+
+  if (!CarryOver) {
+    AvailableEntries = DispatchWidth;
+    return ErrorSuccess();
+  }
+
+  AvailableEntries = CarryOver >= DispatchWidth ? 0 : DispatchWidth - CarryOver;
+  unsigned DispatchedOpcodes = DispatchWidth - AvailableEntries;
+  CarryOver -= DispatchedOpcodes;
+  assert(CarriedOver && "Invalid dispatched instruction");
+
+  SmallVector<unsigned, 8> RegisterFiles(PRF.getNumRegisterFiles(), 0U);
+  notifyInstructionDispatched(CarriedOver, RegisterFiles, DispatchedOpcodes);
+  if (!CarryOver)
+    CarriedOver = InstRef();
+  return ErrorSuccess();
+}
+
+bool DispatchStage::isAvailable(const InstRef &IR) const {
+  const InstrDesc &Desc = IR.getInstruction()->getDesc();
+  unsigned Required = std::min(Desc.NumMicroOps, DispatchWidth);
+  if (Required > AvailableEntries)
+    return false;
+
+  if (Desc.BeginGroup && AvailableEntries != DispatchWidth)
+    return false;
+
+  // The dispatch logic doesn't internally buffer instructions.  It only accepts
+  // instructions that can be successfully moved to the next stage during this
+  // same cycle.
+  return canDispatch(IR);
+}
+
+Error DispatchStage::execute(InstRef &IR) {
+  assert(canDispatch(IR) && "Cannot dispatch another instruction!");
+  return dispatch(IR);
+}
+
+#ifndef NDEBUG
+void DispatchStage::dump() const {
+  PRF.dump();
+  RCU.dump();
+}
+#endif
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/Stages/EntryStage.cpp b/lib/MCA/Stages/EntryStage.cpp
new file mode 100644
index 0000000..3325bb3
--- /dev/null
+++ b/lib/MCA/Stages/EntryStage.cpp
@@ -0,0 +1,76 @@
+//===---------------------- EntryStage.cpp ----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the Fetch stage of an instruction pipeline.  Its sole
+/// purpose in life is to produce instructions for the rest of the pipeline.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Stages/EntryStage.h"
+#include "llvm/MCA/Instruction.h"
+
+namespace llvm {
+namespace mca {
+
+bool EntryStage::hasWorkToComplete() const { return CurrentInstruction; }
+
+bool EntryStage::isAvailable(const InstRef & /* unused */) const {
+  if (CurrentInstruction)
+    return checkNextStage(CurrentInstruction);
+  return false;
+}
+
+void EntryStage::getNextInstruction() {
+  assert(!CurrentInstruction && "There is already an instruction to process!");
+  if (!SM.hasNext())
+    return;
+  SourceRef SR = SM.peekNext();
+  std::unique_ptr<Instruction> Inst = llvm::make_unique<Instruction>(SR.second);
+  CurrentInstruction = InstRef(SR.first, Inst.get());
+  Instructions.emplace_back(std::move(Inst));
+  SM.updateNext();
+}
+
+llvm::Error EntryStage::execute(InstRef & /*unused */) {
+  assert(CurrentInstruction && "There is no instruction to process!");
+  if (llvm::Error Val = moveToTheNextStage(CurrentInstruction))
+    return Val;
+
+  // Move the program counter.
+  CurrentInstruction.invalidate();
+  getNextInstruction();
+  return llvm::ErrorSuccess();
+}
+
+llvm::Error EntryStage::cycleStart() {
+  if (!CurrentInstruction)
+    getNextInstruction();
+  return llvm::ErrorSuccess();
+}
+
+llvm::Error EntryStage::cycleEnd() {
+  // Find the first instruction which hasn't been retired.
+  auto Range = make_range(&Instructions[NumRetired], Instructions.end());
+  auto It = find_if(Range, [](const std::unique_ptr<Instruction> &I) {
+    return !I->isRetired();
+  });
+
+  NumRetired = std::distance(Instructions.begin(), It);
+  // Erase instructions up to the first that hasn't been retired.
+  if ((NumRetired * 2) >= Instructions.size()) {
+    Instructions.erase(Instructions.begin(), It);
+    NumRetired = 0;
+  }
+
+  return llvm::ErrorSuccess();
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/Stages/ExecuteStage.cpp b/lib/MCA/Stages/ExecuteStage.cpp
new file mode 100644
index 0000000..e783277
--- /dev/null
+++ b/lib/MCA/Stages/ExecuteStage.cpp
@@ -0,0 +1,225 @@
+//===---------------------- ExecuteStage.cpp --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the execution stage of an instruction pipeline.
+///
+/// The ExecuteStage is responsible for managing the hardware scheduler
+/// and issuing notifications that an instruction has been executed.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Stages/ExecuteStage.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "llvm-mca"
+
+namespace llvm {
+namespace mca {
+
+HWStallEvent::GenericEventType toHWStallEventType(Scheduler::Status Status) {
+  switch (Status) {
+  case Scheduler::SC_LOAD_QUEUE_FULL:
+    return HWStallEvent::LoadQueueFull;
+  case Scheduler::SC_STORE_QUEUE_FULL:
+    return HWStallEvent::StoreQueueFull;
+  case Scheduler::SC_BUFFERS_FULL:
+    return HWStallEvent::SchedulerQueueFull;
+  case Scheduler::SC_DISPATCH_GROUP_STALL:
+    return HWStallEvent::DispatchGroupStall;
+  case Scheduler::SC_AVAILABLE:
+    return HWStallEvent::Invalid;
+  }
+
+  llvm_unreachable("Don't know how to process this StallKind!");
+}
+
+bool ExecuteStage::isAvailable(const InstRef &IR) const {
+  if (Scheduler::Status S = HWS.isAvailable(IR)) {
+    HWStallEvent::GenericEventType ET = toHWStallEventType(S);
+    notifyEvent<HWStallEvent>(HWStallEvent(ET, IR));
+    return false;
+  }
+
+  return true;
+}
+
+Error ExecuteStage::issueInstruction(InstRef &IR) {
+  SmallVector<std::pair<ResourceRef, ResourceCycles>, 4> Used;
+  SmallVector<InstRef, 4> Ready;
+  HWS.issueInstruction(IR, Used, Ready);
+
+  notifyReservedOrReleasedBuffers(IR, /* Reserved */ false);
+
+  notifyInstructionIssued(IR, Used);
+  if (IR.getInstruction()->isExecuted()) {
+    notifyInstructionExecuted(IR);
+    // FIXME: add a buffer of executed instructions.
+    if (Error S = moveToTheNextStage(IR))
+      return S;
+  }
+
+  for (const InstRef &I : Ready)
+    notifyInstructionReady(I);
+  return ErrorSuccess();
+}
+
+Error ExecuteStage::issueReadyInstructions() {
+  InstRef IR = HWS.select();
+  while (IR) {
+    if (Error Err = issueInstruction(IR))
+      return Err;
+
+    // Select the next instruction to issue.
+    IR = HWS.select();
+  }
+
+  return ErrorSuccess();
+}
+
+Error ExecuteStage::cycleStart() {
+  SmallVector<ResourceRef, 8> Freed;
+  SmallVector<InstRef, 4> Executed;
+  SmallVector<InstRef, 4> Ready;
+
+  HWS.cycleEvent(Freed, Executed, Ready);
+
+  for (const ResourceRef &RR : Freed)
+    notifyResourceAvailable(RR);
+
+  for (InstRef &IR : Executed) {
+    notifyInstructionExecuted(IR);
+    // FIXME: add a buffer of executed instructions.
+    if (Error S = moveToTheNextStage(IR))
+      return S;
+  }
+
+  for (const InstRef &IR : Ready)
+    notifyInstructionReady(IR);
+
+  return issueReadyInstructions();
+}
+
+#ifndef NDEBUG
+static void verifyInstructionEliminated(const InstRef &IR) {
+  const Instruction &Inst = *IR.getInstruction();
+  assert(Inst.isEliminated() && "Instruction was not eliminated!");
+  assert(Inst.isReady() && "Instruction in an inconsistent state!");
+
+  // Ensure that instructions eliminated at register renaming stage are in a
+  // consistent state.
+  const InstrDesc &Desc = Inst.getDesc();
+  assert(!Desc.MayLoad && !Desc.MayStore && "Cannot eliminate a memory op!");
+}
+#endif
+
+Error ExecuteStage::handleInstructionEliminated(InstRef &IR) {
+#ifndef NDEBUG
+  verifyInstructionEliminated(IR);
+#endif
+  notifyInstructionReady(IR);
+  notifyInstructionIssued(IR, {});
+  IR.getInstruction()->forceExecuted();
+  notifyInstructionExecuted(IR);
+  return moveToTheNextStage(IR);
+}
+
+// Schedule the instruction for execution on the hardware.
+Error ExecuteStage::execute(InstRef &IR) {
+  assert(isAvailable(IR) && "Scheduler is not available!");
+
+#ifndef NDEBUG
+  // Ensure that the HWS has not stored this instruction in its queues.
+  HWS.sanityCheck(IR);
+#endif
+
+  if (IR.getInstruction()->isEliminated())
+    return handleInstructionEliminated(IR);
+
+  // Reserve a slot in each buffered resource. Also, mark units with
+  // BufferSize=0 as reserved. Resources with a buffer size of zero will only
+  // be released after MCIS is issued, and all the ResourceCycles for those
+  // units have been consumed.
+  HWS.dispatch(IR);
+  notifyReservedOrReleasedBuffers(IR, /* Reserved */ true);
+  if (!HWS.isReady(IR))
+    return ErrorSuccess();
+
+  // If we did not return early, then the scheduler is ready for execution.
+  notifyInstructionReady(IR);
+
+  // If we cannot issue immediately, the HWS will add IR to its ready queue for
+  // execution later, so we must return early here.
+  if (!HWS.mustIssueImmediately(IR))
+    return ErrorSuccess();
+
+  // Issue IR to the underlying pipelines.
+  return issueInstruction(IR);
+}
+
+void ExecuteStage::notifyInstructionExecuted(const InstRef &IR) const {
+  LLVM_DEBUG(dbgs() << "[E] Instruction Executed: #" << IR << '\n');
+  notifyEvent<HWInstructionEvent>(
+      HWInstructionEvent(HWInstructionEvent::Executed, IR));
+}
+
+void ExecuteStage::notifyInstructionReady(const InstRef &IR) const {
+  LLVM_DEBUG(dbgs() << "[E] Instruction Ready: #" << IR << '\n');
+  notifyEvent<HWInstructionEvent>(
+      HWInstructionEvent(HWInstructionEvent::Ready, IR));
+}
+
+void ExecuteStage::notifyResourceAvailable(const ResourceRef &RR) const {
+  LLVM_DEBUG(dbgs() << "[E] Resource Available: [" << RR.first << '.'
+                    << RR.second << "]\n");
+  for (HWEventListener *Listener : getListeners())
+    Listener->onResourceAvailable(RR);
+}
+
+void ExecuteStage::notifyInstructionIssued(
+    const InstRef &IR,
+    MutableArrayRef<std::pair<ResourceRef, ResourceCycles>> Used) const {
+  LLVM_DEBUG({
+    dbgs() << "[E] Instruction Issued: #" << IR << '\n';
+    for (const std::pair<ResourceRef, ResourceCycles> &Resource : Used) {
+      dbgs() << "[E] Resource Used: [" << Resource.first.first << '.'
+             << Resource.first.second << "], ";
+      dbgs() << "cycles: " << Resource.second << '\n';
+    }
+  });
+
+  // Replace resource masks with valid resource processor IDs.
+  for (std::pair<ResourceRef, ResourceCycles> &Use : Used)
+    Use.first.first = HWS.getResourceID(Use.first.first);
+
+  notifyEvent<HWInstructionEvent>(HWInstructionIssuedEvent(IR, Used));
+}
+
+void ExecuteStage::notifyReservedOrReleasedBuffers(const InstRef &IR,
+                                                   bool Reserved) const {
+  const InstrDesc &Desc = IR.getInstruction()->getDesc();
+  if (Desc.Buffers.empty())
+    return;
+
+  SmallVector<unsigned, 4> BufferIDs(Desc.Buffers.begin(), Desc.Buffers.end());
+  std::transform(Desc.Buffers.begin(), Desc.Buffers.end(), BufferIDs.begin(),
+                 [&](uint64_t Op) { return HWS.getResourceID(Op); });
+  if (Reserved) {
+    for (HWEventListener *Listener : getListeners())
+      Listener->onReservedBuffers(IR, BufferIDs);
+    return;
+  }
+
+  for (HWEventListener *Listener : getListeners())
+    Listener->onReleasedBuffers(IR, BufferIDs);
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/Stages/InstructionTables.cpp b/lib/MCA/Stages/InstructionTables.cpp
new file mode 100644
index 0000000..f918c18
--- /dev/null
+++ b/lib/MCA/Stages/InstructionTables.cpp
@@ -0,0 +1,69 @@
+//===--------------------- InstructionTables.cpp ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file implements the method InstructionTables::execute().
+/// Method execute() prints a theoretical resource pressure distribution based
+/// on the information available in the scheduling model, and without running
+/// the pipeline.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Stages/InstructionTables.h"
+
+namespace llvm {
+namespace mca {
+
+Error InstructionTables::execute(InstRef &IR) {
+  const InstrDesc &Desc = IR.getInstruction()->getDesc();
+  UsedResources.clear();
+
+  // Identify the resources consumed by this instruction.
+  for (const std::pair<uint64_t, ResourceUsage> Resource : Desc.Resources) {
+    // Skip zero-cycle resources (i.e., unused resources).
+    if (!Resource.second.size())
+      continue;
+    unsigned Cycles = Resource.second.size();
+    unsigned Index = std::distance(
+        Masks.begin(), std::find(Masks.begin(), Masks.end(), Resource.first));
+    const MCProcResourceDesc &ProcResource = *SM.getProcResource(Index);
+    unsigned NumUnits = ProcResource.NumUnits;
+    if (!ProcResource.SubUnitsIdxBegin) {
+      // The number of cycles consumed by each unit.
+      for (unsigned I = 0, E = NumUnits; I < E; ++I) {
+        ResourceRef ResourceUnit = std::make_pair(Index, 1U << I);
+        UsedResources.emplace_back(
+            std::make_pair(ResourceUnit, ResourceCycles(Cycles, NumUnits)));
+      }
+      continue;
+    }
+
+    // This is a group. Obtain the set of resources contained in this
+    // group. Some of these resources may implement multiple units.
+    // Uniformly distribute Cycles across all of the units.
+    for (unsigned I1 = 0; I1 < NumUnits; ++I1) {
+      unsigned SubUnitIdx = ProcResource.SubUnitsIdxBegin[I1];
+      const MCProcResourceDesc &SubUnit = *SM.getProcResource(SubUnitIdx);
+      // Compute the number of cycles consumed by each resource unit.
+      for (unsigned I2 = 0, E2 = SubUnit.NumUnits; I2 < E2; ++I2) {
+        ResourceRef ResourceUnit = std::make_pair(SubUnitIdx, 1U << I2);
+        UsedResources.emplace_back(std::make_pair(
+            ResourceUnit, ResourceCycles(Cycles, NumUnits * SubUnit.NumUnits)));
+      }
+    }
+  }
+
+  // Send a fake instruction issued event to all the views.
+  HWInstructionIssuedEvent Event(IR, UsedResources);
+  notifyEvent<HWInstructionIssuedEvent>(Event);
+  return ErrorSuccess();
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/Stages/RetireStage.cpp b/lib/MCA/Stages/RetireStage.cpp
new file mode 100644
index 0000000..d6bcc51
--- /dev/null
+++ b/lib/MCA/Stages/RetireStage.cpp
@@ -0,0 +1,62 @@
+//===---------------------- RetireStage.cpp ---------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the retire stage of an instruction pipeline.
+/// The RetireStage represents the process logic that interacts with the
+/// simulated RetireControlUnit hardware.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Stages/RetireStage.h"
+#include "llvm/MCA/HWEventListener.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "llvm-mca"
+
+namespace llvm {
+namespace mca {
+
+llvm::Error RetireStage::cycleStart() {
+  if (RCU.isEmpty())
+    return llvm::ErrorSuccess();
+
+  const unsigned MaxRetirePerCycle = RCU.getMaxRetirePerCycle();
+  unsigned NumRetired = 0;
+  while (!RCU.isEmpty()) {
+    if (MaxRetirePerCycle != 0 && NumRetired == MaxRetirePerCycle)
+      break;
+    const RetireControlUnit::RUToken &Current = RCU.peekCurrentToken();
+    if (!Current.Executed)
+      break;
+    RCU.consumeCurrentToken();
+    notifyInstructionRetired(Current.IR);
+    NumRetired++;
+  }
+
+  return llvm::ErrorSuccess();
+}
+
+llvm::Error RetireStage::execute(InstRef &IR) {
+  RCU.onInstructionExecuted(IR.getInstruction()->getRCUTokenID());
+  return llvm::ErrorSuccess();
+}
+
+void RetireStage::notifyInstructionRetired(const InstRef &IR) const {
+  LLVM_DEBUG(llvm::dbgs() << "[E] Instruction Retired: #" << IR << '\n');
+  llvm::SmallVector<unsigned, 4> FreedRegs(PRF.getNumRegisterFiles());
+  const Instruction &Inst = *IR.getInstruction();
+
+  for (const WriteState &WS : Inst.getDefs())
+    PRF.removeRegisterWrite(WS, FreedRegs);
+  notifyEvent<HWInstructionEvent>(HWInstructionRetiredEvent(IR, FreedRegs));
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/Stages/Stage.cpp b/lib/MCA/Stages/Stage.cpp
new file mode 100644
index 0000000..3819164
--- /dev/null
+++ b/lib/MCA/Stages/Stage.cpp
@@ -0,0 +1,29 @@
+//===---------------------- Stage.cpp ---------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a stage.
+/// A chain of stages compose an instruction pipeline.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Stages/Stage.h"
+
+namespace llvm {
+namespace mca {
+
+// Pin the vtable here in the implementation file.
+Stage::~Stage() = default;
+
+void Stage::addListener(HWEventListener *Listener) {
+  Listeners.insert(Listener);
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/lib/MCA/Support.cpp b/lib/MCA/Support.cpp
new file mode 100644
index 0000000..335953e
--- /dev/null
+++ b/lib/MCA/Support.cpp
@@ -0,0 +1,94 @@
+//===--------------------- Support.cpp --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file implements a few helper functions used by various pipeline
+/// components.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Support.h"
+#include "llvm/MC/MCSchedule.h"
+
+namespace llvm {
+namespace mca {
+
+#define DEBUG_TYPE "llvm-mca"
+
+void computeProcResourceMasks(const MCSchedModel &SM,
+                              MutableArrayRef<uint64_t> Masks) {
+  unsigned ProcResourceID = 0;
+
+  assert(Masks.size() == SM.getNumProcResourceKinds() &&
+         "Invalid number of elements");
+  // Resource at index 0 is the 'InvalidUnit'. Set an invalid mask for it.
+  Masks[0] = 0;
+
+  // Create a unique bitmask for every processor resource unit.
+  for (unsigned I = 1, E = SM.getNumProcResourceKinds(); I < E; ++I) {
+    const MCProcResourceDesc &Desc = *SM.getProcResource(I);
+    if (Desc.SubUnitsIdxBegin)
+      continue;
+    Masks[I] = 1ULL << ProcResourceID;
+    ProcResourceID++;
+  }
+
+  // Create a unique bitmask for every processor resource group.
+  for (unsigned I = 1, E = SM.getNumProcResourceKinds(); I < E; ++I) {
+    const MCProcResourceDesc &Desc = *SM.getProcResource(I);
+    if (!Desc.SubUnitsIdxBegin)
+      continue;
+    Masks[I] = 1ULL << ProcResourceID;
+    for (unsigned U = 0; U < Desc.NumUnits; ++U) {
+      uint64_t OtherMask = Masks[Desc.SubUnitsIdxBegin[U]];
+      Masks[I] |= OtherMask;
+    }
+    ProcResourceID++;
+  }
+
+#ifndef NDEBUG
+  LLVM_DEBUG(dbgs() << "\nProcessor resource masks:"
+                    << "\n");
+  for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
+    const MCProcResourceDesc &Desc = *SM.getProcResource(I);
+    LLVM_DEBUG(dbgs() << '[' << I << "] " << Desc.Name << " - " << Masks[I]
+                      << '\n');
+  }
+#endif
+}
+
+double computeBlockRThroughput(const MCSchedModel &SM, unsigned DispatchWidth,
+                               unsigned NumMicroOps,
+                               ArrayRef<unsigned> ProcResourceUsage) {
+  // The block throughput is bounded from above by the hardware dispatch
+  // throughput. That is because the DispatchWidth is an upper bound on the
+  // number of opcodes that can be part of a single dispatch group.
+  double Max = static_cast<double>(NumMicroOps) / DispatchWidth;
+
+  // The block throughput is also limited by the amount of hardware parallelism.
+  // The number of available resource units affects the resource pressure
+  // distribution, as well as how many blocks can be executed every cycle.
+  for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
+    unsigned ResourceCycles = ProcResourceUsage[I];
+    if (!ResourceCycles)
+      continue;
+
+    const MCProcResourceDesc &MCDesc = *SM.getProcResource(I);
+    double Throughput = static_cast<double>(ResourceCycles) / MCDesc.NumUnits;
+    Max = std::max(Max, Throughput);
+  }
+
+  // The block reciprocal throughput is computed as the MAX of:
+  //  - (NumMicroOps / DispatchWidth)
+  //  - (NumUnits / ResourceCycles)   for every consumed processor resource.
+  return Max;
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/lib/Object/ArchiveWriter.cpp b/lib/Object/ArchiveWriter.cpp
index 7672053..da93602 100644
--- a/lib/Object/ArchiveWriter.cpp
+++ b/lib/Object/ArchiveWriter.cpp
@@ -250,6 +250,7 @@
 
 static void printMemberHeader(raw_ostream &Out, uint64_t Pos,
                               raw_ostream &StringTable,
+                              StringMap<uint64_t> &MemberNames,
                               object::Archive::Kind Kind, bool Thin,
                               StringRef ArcName, const NewArchiveMember &M,
                               sys::TimePoint<std::chrono::seconds> ModTime,
@@ -262,8 +263,18 @@
     return printGNUSmallMemberHeader(Out, M.MemberName, ModTime, M.UID, M.GID,
                                      M.Perms, Size);
   Out << '/';
-  uint64_t NamePos = StringTable.tell();
-  addToStringTable(StringTable, ArcName, M, Thin);
+  uint64_t NamePos;
+  if (Thin) {
+    NamePos = StringTable.tell();
+    addToStringTable(StringTable, ArcName, M, Thin);
+  } else {
+    auto Insertion = MemberNames.insert({M.MemberName, uint64_t(0)});
+    if (Insertion.second) {
+      Insertion.first->second = StringTable.tell();
+      addToStringTable(StringTable, ArcName, M, Thin);
+    }
+    NamePos = Insertion.first->second;
+  }
   printWithSpacePadding(Out, NamePos, 15);
   printRestOfMemberHeader(Out, ModTime, M.UID, M.GID, M.Perms, Size);
 }
@@ -433,6 +444,11 @@
   std::vector<MemberData> Ret;
   bool HasObject = false;
 
+  // Deduplicate long member names in the string table and reuse earlier name
+  // offsets. This especially saves space for COFF Import libraries where all
+  // members have the same name.
+  StringMap<uint64_t> MemberNames;
+
   // UniqueTimestamps is a special case to improve debugging on Darwin:
   //
   // The Darwin linker does not link debug info into the final
@@ -505,8 +521,8 @@
       ModTime = sys::toTimePoint(FilenameCount[M.MemberName]++);
     else
       ModTime = M.ModTime;
-    printMemberHeader(Out, Pos, StringTable, Kind, Thin, ArcName, M, ModTime,
-                      Buf.getBufferSize() + MemberPadding);
+    printMemberHeader(Out, Pos, StringTable, MemberNames, Kind, Thin, ArcName,
+                      M, ModTime, Buf.getBufferSize() + MemberPadding);
     Out.flush();
 
     Expected<std::vector<unsigned>> Symbols =
diff --git a/lib/Object/Binary.cpp b/lib/Object/Binary.cpp
index d7c2592..fe41987 100644
--- a/lib/Object/Binary.cpp
+++ b/lib/Object/Binary.cpp
@@ -88,7 +88,8 @@
 
 Expected<OwningBinary<Binary>> object::createBinary(StringRef Path) {
   ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr =
-      MemoryBuffer::getFileOrSTDIN(Path);
+      MemoryBuffer::getFileOrSTDIN(Path, /*FileSize=*/-1,
+                                   /*RequiresNullTerminator=*/false);
   if (std::error_code EC = FileOrErr.getError())
     return errorCodeToError(EC);
   std::unique_ptr<MemoryBuffer> &Buffer = FileOrErr.get();
diff --git a/lib/Object/COFFObjectFile.cpp b/lib/Object/COFFObjectFile.cpp
index 2ac9c0e..fc1deeb 100644
--- a/lib/Object/COFFObjectFile.cpp
+++ b/lib/Object/COFFObjectFile.cpp
@@ -938,6 +938,18 @@
   return make_range(base_reloc_begin(), base_reloc_end());
 }
 
+std::error_code
+COFFObjectFile::getCOFFHeader(const coff_file_header *&Res) const {
+  Res = COFFHeader;
+  return std::error_code();
+}
+
+std::error_code
+COFFObjectFile::getCOFFBigObjHeader(const coff_bigobj_file_header *&Res) const {
+  Res = COFFBigObjHeader;
+  return std::error_code();
+}
+
 std::error_code COFFObjectFile::getPE32Header(const pe32_header *&Res) const {
   Res = PE32Header;
   return std::error_code();
@@ -1053,6 +1065,16 @@
   return makeArrayRef(Aux, Symbol.getNumberOfAuxSymbols() * SymbolSize);
 }
 
+uint32_t COFFObjectFile::getSymbolIndex(COFFSymbolRef Symbol) const {
+  uintptr_t Offset =
+      reinterpret_cast<uintptr_t>(Symbol.getRawPtr()) - getSymbolTable();
+  assert(Offset % getSymbolTableEntrySize() == 0 &&
+         "Symbol did not point to the beginning of a symbol");
+  size_t Index = Offset / getSymbolTableEntrySize();
+  assert(Index < getNumberOfSymbols());
+  return Index;
+}
+
 std::error_code COFFObjectFile::getSectionName(const coff_section *Sec,
                                                StringRef &Res) const {
   StringRef Name;
diff --git a/lib/Object/Error.cpp b/lib/Object/Error.cpp
index 7d43a84..6fa23e0 100644
--- a/lib/Object/Error.cpp
+++ b/lib/Object/Error.cpp
@@ -57,6 +57,7 @@
                    "defined.");
 }
 
+void BinaryError::anchor() {}
 char BinaryError::ID = 0;
 char GenericBinaryError::ID = 0;
 
diff --git a/lib/Object/MachOObjectFile.cpp b/lib/Object/MachOObjectFile.cpp
index 86f2936..ce4d1cf 100644
--- a/lib/Object/MachOObjectFile.cpp
+++ b/lib/Object/MachOObjectFile.cpp
@@ -2438,7 +2438,7 @@
   return basic_symbol_iterator(SymbolRef(DRI, this));
 }
 
-basic_symbol_iterator MachOObjectFile::getSymbolByIndex(unsigned Index) const {
+symbol_iterator MachOObjectFile::getSymbolByIndex(unsigned Index) const {
   MachO::symtab_command Symtab = getSymtabLoadCommand();
   if (!SymtabLoadCmd || Index >= Symtab.nsyms)
     report_fatal_error("Requested symbol index is out of range.");
diff --git a/lib/Object/WasmObjectFile.cpp b/lib/Object/WasmObjectFile.cpp
index 1a687d9..d84cb48 100644
--- a/lib/Object/WasmObjectFile.cpp
+++ b/lib/Object/WasmObjectFile.cpp
@@ -24,6 +24,7 @@
 #include "llvm/Support/Error.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/LEB128.h"
+#include "llvm/Support/ScopedPrinter.h"
 #include <algorithm>
 #include <cassert>
 #include <cstdint>
@@ -175,7 +176,7 @@
   case wasm::WASM_OPCODE_F64_CONST:
     Expr.Value.Float64 = readFloat64(Ctx);
     break;
-  case wasm::WASM_OPCODE_GET_GLOBAL:
+  case wasm::WASM_OPCODE_GLOBAL_GET:
     Expr.Value.Global = readULEB128(Ctx);
     break;
   default:
@@ -207,8 +208,8 @@
   return Table;
 }
 
-static Error readSection(WasmSection &Section,
-                         WasmObjectFile::ReadContext &Ctx) {
+static Error readSection(WasmSection &Section, WasmObjectFile::ReadContext &Ctx,
+                         WasmSectionOrderChecker &Checker) {
   Section.Offset = Ctx.Ptr - Ctx.Start;
   Section.Type = readUint8(Ctx);
   LLVM_DEBUG(dbgs() << "readSection type=" << Section.Type << "\n");
@@ -231,6 +232,13 @@
     Ctx.Ptr += SectionNameSize;
     Size -= SectionNameSize;
   }
+
+  if (!Checker.isValidSectionOrder(Section.Type, Section.Name)) {
+    return make_error<StringError>("Out of order section type: " +
+                                       llvm::to_string(Section.Type),
+                                   object_error::parse_failed);
+  }
+
   Section.Content = ArrayRef<uint8_t>(Ctx.Ptr, Size);
   Ctx.Ptr += Size;
   return Error::success();
@@ -265,8 +273,9 @@
   }
 
   WasmSection Sec;
+  WasmSectionOrderChecker Checker;
   while (Ctx.Ptr < Ctx.End) {
-    if ((Err = readSection(Sec, Ctx)))
+    if ((Err = readSection(Sec, Ctx, Checker)))
       return;
     if ((Err = parseSection(Sec)))
       return;
@@ -810,7 +819,7 @@
       break;
     case wasm::WASM_EXTERNAL_TABLE:
       Im.Table = readTable(Ctx);
-      if (Im.Table.ElemType != wasm::WASM_TYPE_ANYFUNC)
+      if (Im.Table.ElemType != wasm::WASM_TYPE_FUNCREF)
         return make_error<GenericBinaryError>("Invalid table element type",
                                               object_error::parse_failed);
       break;
@@ -853,7 +862,7 @@
   Tables.reserve(Count);
   while (Count--) {
     Tables.push_back(readTable(Ctx));
-    if (Tables.back().ElemType != wasm::WASM_TYPE_ANYFUNC) {
+    if (Tables.back().ElemType != wasm::WASM_TYPE_FUNCREF) {
       return make_error<GenericBinaryError>("Invalid table element type",
                                             object_error::parse_failed);
     }
@@ -1433,3 +1442,58 @@
   assert(Ref.d.b < Sec.Relocations.size());
   return Sec.Relocations[Ref.d.b];
 }
+
+int WasmSectionOrderChecker::getSectionOrder(unsigned ID,
+                                             StringRef CustomSectionName) {
+  switch (ID) {
+  case wasm::WASM_SEC_CUSTOM:
+    return StringSwitch<unsigned>(CustomSectionName)
+        .Case("dylink", WASM_SEC_ORDER_DYLINK)
+        .Case("linking", WASM_SEC_ORDER_LINKING)
+        .StartsWith("reloc.", WASM_SEC_ORDER_RELOC)
+        .Case("name", WASM_SEC_ORDER_NAME)
+        .Case("producers", WASM_SEC_ORDER_PRODUCERS)
+        .Default(-1);
+  case wasm::WASM_SEC_TYPE:
+    return WASM_SEC_ORDER_TYPE;
+  case wasm::WASM_SEC_IMPORT:
+    return WASM_SEC_ORDER_IMPORT;
+  case wasm::WASM_SEC_FUNCTION:
+    return WASM_SEC_ORDER_FUNCTION;
+  case wasm::WASM_SEC_TABLE:
+    return WASM_SEC_ORDER_TABLE;
+  case wasm::WASM_SEC_MEMORY:
+    return WASM_SEC_ORDER_MEMORY;
+  case wasm::WASM_SEC_GLOBAL:
+    return WASM_SEC_ORDER_GLOBAL;
+  case wasm::WASM_SEC_EXPORT:
+    return WASM_SEC_ORDER_EXPORT;
+  case wasm::WASM_SEC_START:
+    return WASM_SEC_ORDER_START;
+  case wasm::WASM_SEC_ELEM:
+    return WASM_SEC_ORDER_ELEM;
+  case wasm::WASM_SEC_CODE:
+    return WASM_SEC_ORDER_CODE;
+  case wasm::WASM_SEC_DATA:
+    return WASM_SEC_ORDER_DATA;
+  case wasm::WASM_SEC_DATACOUNT:
+    return WASM_SEC_ORDER_DATACOUNT;
+  case wasm::WASM_SEC_EVENT:
+    return WASM_SEC_ORDER_EVENT;
+  default:
+    llvm_unreachable("invalid section");
+  }
+}
+
+bool WasmSectionOrderChecker::isValidSectionOrder(unsigned ID,
+                                                  StringRef CustomSectionName) {
+  int Order = getSectionOrder(ID, CustomSectionName);
+  if (Order == -1) // Skip unknown sections
+    return true;
+  // There can be multiple "reloc." sections. Otherwise there shouldn't be any
+  // duplicate section orders.
+  bool IsValid = (LastOrder == Order && Order == WASM_SEC_ORDER_RELOC) ||
+                 LastOrder < Order;
+  LastOrder = Order;
+  return IsValid;
+}
diff --git a/lib/ObjectYAML/COFFYAML.cpp b/lib/ObjectYAML/COFFYAML.cpp
index 9351ef9..fdd94f4 100644
--- a/lib/ObjectYAML/COFFYAML.cpp
+++ b/lib/ObjectYAML/COFFYAML.cpp
@@ -407,7 +407,8 @@
 void MappingTraits<COFFYAML::Relocation>::mapping(IO &IO,
                                                   COFFYAML::Relocation &Rel) {
   IO.mapRequired("VirtualAddress", Rel.VirtualAddress);
-  IO.mapRequired("SymbolName", Rel.SymbolName);
+  IO.mapOptional("SymbolName", Rel.SymbolName, StringRef());
+  IO.mapOptional("SymbolTableIndex", Rel.SymbolTableIndex);
 
   COFF::header &H = *static_cast<COFF::header *>(IO.getContext());
   if (H.Machine == COFF::IMAGE_FILE_MACHINE_I386) {
diff --git a/lib/ObjectYAML/ELFYAML.cpp b/lib/ObjectYAML/ELFYAML.cpp
index b9f5243..215d6bd 100644
--- a/lib/ObjectYAML/ELFYAML.cpp
+++ b/lib/ObjectYAML/ELFYAML.cpp
@@ -753,6 +753,7 @@
   IO.mapRequired("Class", FileHdr.Class);
   IO.mapRequired("Data", FileHdr.Data);
   IO.mapOptional("OSABI", FileHdr.OSABI, ELFYAML::ELF_ELFOSABI(0));
+  IO.mapOptional("ABIVersion", FileHdr.ABIVersion, Hex8(0));
   IO.mapRequired("Type", FileHdr.Type);
   IO.mapRequired("Machine", FileHdr.Machine);
   IO.mapOptional("Flags", FileHdr.Flags, ELFYAML::ELF_EF(0));
diff --git a/lib/ObjectYAML/WasmYAML.cpp b/lib/ObjectYAML/WasmYAML.cpp
index b978033..47bf853 100644
--- a/lib/ObjectYAML/WasmYAML.cpp
+++ b/lib/ObjectYAML/WasmYAML.cpp
@@ -377,7 +377,7 @@
   case wasm::WASM_OPCODE_F64_CONST:
     IO.mapRequired("Value", Expr.Value.Float64);
     break;
-  case wasm::WASM_OPCODE_GET_GLOBAL:
+  case wasm::WASM_OPCODE_GLOBAL_GET:
     IO.mapRequired("Index", Expr.Value.Global);
     break;
   }
@@ -491,7 +491,7 @@
   ECase(F32);
   ECase(F64);
   ECase(V128);
-  ECase(ANYFUNC);
+  ECase(FUNCREF);
   ECase(FUNC);
   ECase(NORESULT);
 #undef ECase
@@ -516,14 +516,14 @@
   ECase(I64_CONST);
   ECase(F64_CONST);
   ECase(F32_CONST);
-  ECase(GET_GLOBAL);
+  ECase(GLOBAL_GET);
 #undef ECase
 }
 
 void ScalarEnumerationTraits<WasmYAML::TableType>::enumeration(
     IO &IO, WasmYAML::TableType &Type) {
 #define ECase(X) IO.enumCase(Type, #X, wasm::WASM_TYPE_##X);
-  ECase(ANYFUNC);
+  ECase(FUNCREF);
 #undef ECase
 }
 
diff --git a/lib/Passes/PassBuilder.cpp b/lib/Passes/PassBuilder.cpp
index 4da8f54..5ec94ea 100644
--- a/lib/Passes/PassBuilder.cpp
+++ b/lib/Passes/PassBuilder.cpp
@@ -88,11 +88,14 @@
 #include "llvm/Transforms/IPO/SyntheticCountsPropagation.h"
 #include "llvm/Transforms/IPO/WholeProgramDevirt.h"
 #include "llvm/Transforms/InstCombine/InstCombine.h"
+#include "llvm/Transforms/Instrumentation.h"
 #include "llvm/Transforms/Instrumentation/BoundsChecking.h"
 #include "llvm/Transforms/Instrumentation/CGProfile.h"
 #include "llvm/Transforms/Instrumentation/ControlHeightReduction.h"
 #include "llvm/Transforms/Instrumentation/GCOVProfiler.h"
 #include "llvm/Transforms/Instrumentation/InstrProfiling.h"
+#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
+#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
 #include "llvm/Transforms/Instrumentation/PGOInstrumentation.h"
 #include "llvm/Transforms/Scalar/ADCE.h"
 #include "llvm/Transforms/Scalar/AlignmentFromAssumptions.h"
@@ -151,6 +154,7 @@
 #include "llvm/Transforms/Scalar/WarnMissedTransforms.h"
 #include "llvm/Transforms/Utils/AddDiscriminators.h"
 #include "llvm/Transforms/Utils/BreakCriticalEdges.h"
+#include "llvm/Transforms/Utils/CanonicalizeAliases.h"
 #include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
 #include "llvm/Transforms/Utils/LCSSA.h"
 #include "llvm/Transforms/Utils/LibCallsShrinkWrap.h"
@@ -1239,6 +1243,91 @@
   return Count;
 }
 
+static bool checkParametrizedPassName(StringRef Name, StringRef PassName) {
+  if (!Name.consume_front(PassName))
+    return false;
+  // normal pass name w/o parameters == default parameters
+  if (Name.empty())
+    return true;
+  return Name.startswith("<") && Name.endswith(">");
+}
+
+namespace {
+
+/// This performs customized parsing of pass name with parameters.
+///
+/// We do not need parametrization of passes in textual pipeline very often,
+/// yet on a rare occasion ability to specify parameters right there can be
+/// useful.
+///
+/// \p Name - parameterized specification of a pass from a textual pipeline
+/// is a string in a form of :
+///      PassName '<' parameter-list '>'
+///
+/// Parameter list is being parsed by the parser callable argument, \p Parser,
+/// It takes a string-ref of parameters and returns either StringError or a
+/// parameter list in a form of a custom parameters type, all wrapped into
+/// Expected<> template class.
+///
+template <typename ParametersParseCallableT>
+auto parsePassParameters(ParametersParseCallableT &&Parser, StringRef Name,
+                         StringRef PassName) -> decltype(Parser(StringRef{})) {
+  using ParametersT = typename decltype(Parser(StringRef{}))::value_type;
+
+  StringRef Params = Name;
+  if (!Params.consume_front(PassName)) {
+    assert(false &&
+           "unable to strip pass name from parametrized pass specification");
+  }
+  if (Params.empty())
+    return ParametersT{};
+  if (!Params.consume_front("<") || !Params.consume_back(">")) {
+    assert(false && "invalid format for parametrized pass name");
+  }
+
+  Expected<ParametersT> Result = Parser(Params);
+  assert((Result || Result.template errorIsA<StringError>()) &&
+         "Pass parameter parser can only return StringErrors.");
+  return std::move(Result);
+}
+
+/// Parser of parameters for LoopUnroll pass.
+Expected<LoopUnrollOptions> parseLoopUnrollOptions(StringRef Params) {
+  LoopUnrollOptions UnrollOpts;
+  while (!Params.empty()) {
+    StringRef ParamName;
+    std::tie(ParamName, Params) = Params.split(';');
+    int OptLevel = StringSwitch<int>(ParamName)
+                       .Case("O0", 0)
+                       .Case("O1", 1)
+                       .Case("O2", 2)
+                       .Case("O3", 3)
+                       .Default(-1);
+    if (OptLevel >= 0) {
+      UnrollOpts.setOptLevel(OptLevel);
+      continue;
+    }
+
+    bool Enable = !ParamName.consume_front("no-");
+    if (ParamName == "partial") {
+      UnrollOpts.setPartial(Enable);
+    } else if (ParamName == "peeling") {
+      UnrollOpts.setPeeling(Enable);
+    } else if (ParamName == "runtime") {
+      UnrollOpts.setRuntime(Enable);
+    } else if (ParamName == "upperbound") {
+      UnrollOpts.setUpperBound(Enable);
+    } else {
+      return make_error<StringError>(
+          formatv("invalid LoopUnrollPass parameter '{0}' ", ParamName).str(),
+          inconvertibleErrorCode());
+    }
+  }
+  return UnrollOpts;
+}
+
+} // namespace
+
 /// Tests whether a pass name starts with a valid prefix for a default pipeline
 /// alias.
 static bool startsWithDefaultPipelineAliasPrefix(StringRef Name) {
@@ -1334,6 +1423,9 @@
 #define FUNCTION_PASS(NAME, CREATE_PASS)                                       \
   if (Name == NAME)                                                            \
     return true;
+#define FUNCTION_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER)                   \
+  if (checkParametrizedPassName(Name, NAME))                                   \
+    return true;
 #define FUNCTION_ANALYSIS(NAME, CREATE_PASS)                                   \
   if (Name == "require<" NAME ">" || Name == "invalidate<" NAME ">")           \
     return true;
@@ -1671,6 +1763,14 @@
     FPM.addPass(CREATE_PASS);                                                  \
     return Error::success();                                                   \
   }
+#define FUNCTION_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER)                   \
+  if (checkParametrizedPassName(Name, NAME)) {                                 \
+    auto Params = parsePassParameters(PARSER, Name, NAME);                     \
+    if (!Params)                                                               \
+      return Params.takeError();                                               \
+    FPM.addPass(CREATE_PASS(Params.get()));                                    \
+    return Error::success();                                                   \
+  }
 #define FUNCTION_ANALYSIS(NAME, CREATE_PASS)                                   \
   if (Name == "require<" NAME ">") {                                           \
     FPM.addPass(                                                               \
diff --git a/lib/Passes/PassRegistry.def b/lib/Passes/PassRegistry.def
index 97f0d57..771d2f5 100644
--- a/lib/Passes/PassRegistry.def
+++ b/lib/Passes/PassRegistry.def
@@ -42,6 +42,7 @@
 #endif
 MODULE_PASS("always-inline", AlwaysInlinerPass())
 MODULE_PASS("called-value-propagation", CalledValuePropagationPass())
+MODULE_PASS("canonicalize-aliases", CanonicalizeAliasesPass())
 MODULE_PASS("cg-profile", CGProfilePass())
 MODULE_PASS("constmerge", ConstantMergePass())
 MODULE_PASS("cross-dso-cfi", CrossDSOCFIPass())
@@ -200,6 +201,7 @@
 FUNCTION_PASS("print<assumptions>", AssumptionPrinterPass(dbgs()))
 FUNCTION_PASS("print<block-freq>", BlockFrequencyPrinterPass(dbgs()))
 FUNCTION_PASS("print<branch-prob>", BranchProbabilityPrinterPass(dbgs()))
+FUNCTION_PASS("print<da>", DependenceAnalysisPrinterPass(dbgs()))
 FUNCTION_PASS("print<domtree>", DominatorTreePrinterPass(dbgs()))
 FUNCTION_PASS("print<postdomtree>", PostDominatorTreePrinterPass(dbgs()))
 FUNCTION_PASS("print<demanded-bits>", DemandedBitsPrinterPass(dbgs()))
@@ -221,8 +223,6 @@
 FUNCTION_PASS("sroa", SROA())
 FUNCTION_PASS("tailcallelim", TailCallElimPass())
 FUNCTION_PASS("unreachableblockelim", UnreachableBlockElimPass())
-FUNCTION_PASS("unroll", LoopUnrollPass())
-FUNCTION_PASS("unroll<peeling;no-runtime>",LoopUnrollPass(LoopUnrollOptions().setPeeling(true).setRuntime(false)))
 FUNCTION_PASS("verify", VerifierPass())
 FUNCTION_PASS("verify<domtree>", DominatorTreeVerifierPass())
 FUNCTION_PASS("verify<loops>", LoopVerifierPass())
@@ -231,8 +231,18 @@
 FUNCTION_PASS("view-cfg", CFGViewerPass())
 FUNCTION_PASS("view-cfg-only", CFGOnlyViewerPass())
 FUNCTION_PASS("transform-warning", WarnMissedTransformationsPass())
+FUNCTION_PASS("msan", MemorySanitizerPass())
+FUNCTION_PASS("tsan", ThreadSanitizerPass())
 #undef FUNCTION_PASS
 
+#ifndef FUNCTION_PASS_WITH_PARAMS
+#define FUNCTION_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER)
+#endif
+FUNCTION_PASS_WITH_PARAMS("unroll",
+			  [](LoopUnrollOptions Opts) { return LoopUnrollPass(Opts); },
+			  parseLoopUnrollOptions)
+#undef FUNCTION_PASS_WITH_PARAMS
+
 #ifndef LOOP_ANALYSIS
 #define LOOP_ANALYSIS(NAME, CREATE_PASS)
 #endif
diff --git a/lib/Passes/StandardInstrumentations.cpp b/lib/Passes/StandardInstrumentations.cpp
index 765ffe6..a1dfc39 100644
--- a/lib/Passes/StandardInstrumentations.cpp
+++ b/lib/Passes/StandardInstrumentations.cpp
@@ -14,6 +14,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/Passes/StandardInstrumentations.h"
+#include "llvm/ADT/Optional.h"
 #include "llvm/Analysis/CallGraphSCCPass.h"
 #include "llvm/Analysis/LazyCallGraph.h"
 #include "llvm/Analysis/LoopInfo.h"
@@ -28,112 +29,215 @@
 using namespace llvm;
 
 namespace {
-namespace PrintIR {
 
-//===----------------------------------------------------------------------===//
-// IR-printing instrumentation
-//===----------------------------------------------------------------------===//
+/// Extracting Module out of \p IR unit. Also fills a textual description
+/// of \p IR for use in header when printing.
+Optional<std::pair<const Module *, std::string>> unwrapModule(Any IR) {
+  if (any_isa<const Module *>(IR))
+    return std::make_pair(any_cast<const Module *>(IR), std::string());
 
-/// Generic IR-printing helper that unpacks a pointer to IRUnit wrapped into
-/// llvm::Any and does actual print job.
-void unwrapAndPrint(StringRef Banner, Any IR) {
-  SmallString<40> Extra{"\n"};
-  const Module *M = nullptr;
-  if (any_isa<const Module *>(IR)) {
-    M = any_cast<const Module *>(IR);
-  } else if (any_isa<const Function *>(IR)) {
+  if (any_isa<const Function *>(IR)) {
     const Function *F = any_cast<const Function *>(IR);
     if (!llvm::isFunctionInPrintList(F->getName()))
-      return;
-    if (!llvm::forcePrintModuleIR()) {
-      dbgs() << Banner << Extra << static_cast<const Value &>(*F);
-      return;
-    }
-    M = F->getParent();
-    Extra = formatv(" (function: {0})\n", F->getName());
-  } else if (any_isa<const LazyCallGraph::SCC *>(IR)) {
+      return None;
+    const Module *M = F->getParent();
+    return std::make_pair(M, formatv(" (function: {0})", F->getName()).str());
+  }
+
+  if (any_isa<const LazyCallGraph::SCC *>(IR)) {
     const LazyCallGraph::SCC *C = any_cast<const LazyCallGraph::SCC *>(IR);
-    if (!llvm::forcePrintModuleIR()) {
-      Extra = formatv(" (scc: {0})\n", C->getName());
-      bool BannerPrinted = false;
-      for (const LazyCallGraph::Node &N : *C) {
-        const Function &F = N.getFunction();
-        if (!F.isDeclaration() && isFunctionInPrintList(F.getName())) {
-          if (!BannerPrinted) {
-            dbgs() << Banner << Extra;
-            BannerPrinted = true;
-          }
-          F.print(dbgs());
-        }
-      }
-      return;
-    }
     for (const LazyCallGraph::Node &N : *C) {
       const Function &F = N.getFunction();
       if (!F.isDeclaration() && isFunctionInPrintList(F.getName())) {
-        M = F.getParent();
-        break;
+        const Module *M = F.getParent();
+        return std::make_pair(M, formatv(" (scc: {0})", C->getName()).str());
       }
     }
-    if (!M)
-      return;
-    Extra = formatv(" (for scc: {0})\n", C->getName());
-  } else if (any_isa<const Loop *>(IR)) {
+    return None;
+  }
+
+  if (any_isa<const Loop *>(IR)) {
     const Loop *L = any_cast<const Loop *>(IR);
     const Function *F = L->getHeader()->getParent();
     if (!isFunctionInPrintList(F->getName()))
-      return;
-    if (!llvm::forcePrintModuleIR()) {
-      llvm::printLoop(const_cast<Loop &>(*L), dbgs(), Banner);
-      return;
-    }
-    M = F->getParent();
-    {
-      std::string LoopName;
-      raw_string_ostream ss(LoopName);
-      L->getHeader()->printAsOperand(ss, false);
-      Extra = formatv(" (loop: {0})\n", ss.str());
-    }
+      return None;
+    const Module *M = F->getParent();
+    std::string LoopName;
+    raw_string_ostream ss(LoopName);
+    L->getHeader()->printAsOperand(ss, false);
+    return std::make_pair(M, formatv(" (loop: {0})", ss.str()).str());
   }
-  if (M) {
-    dbgs() << Banner << Extra;
-    M->print(dbgs(), nullptr, false);
-  } else {
-    llvm_unreachable("Unknown wrapped IR type");
-  }
+
+  llvm_unreachable("Unknown IR unit");
 }
 
-bool printBeforePass(StringRef PassID, Any IR) {
+void printIR(const Module *M, StringRef Banner, StringRef Extra = StringRef()) {
+  dbgs() << Banner << Extra << "\n";
+  M->print(dbgs(), nullptr, false);
+}
+void printIR(const Function *F, StringRef Banner,
+             StringRef Extra = StringRef()) {
+  if (!llvm::isFunctionInPrintList(F->getName()))
+    return;
+  dbgs() << Banner << Extra << "\n" << static_cast<const Value &>(*F);
+}
+void printIR(const LazyCallGraph::SCC *C, StringRef Banner,
+             StringRef Extra = StringRef()) {
+  bool BannerPrinted = false;
+  for (const LazyCallGraph::Node &N : *C) {
+    const Function &F = N.getFunction();
+    if (!F.isDeclaration() && llvm::isFunctionInPrintList(F.getName())) {
+      if (!BannerPrinted) {
+        dbgs() << Banner << Extra << "\n";
+        BannerPrinted = true;
+      }
+      F.print(dbgs());
+    }
+  }
+}
+void printIR(const Loop *L, StringRef Banner) {
+  const Function *F = L->getHeader()->getParent();
+  if (!llvm::isFunctionInPrintList(F->getName()))
+    return;
+  llvm::printLoop(const_cast<Loop &>(*L), dbgs(), Banner);
+}
+
+/// Generic IR-printing helper that unpacks a pointer to IRUnit wrapped into
+/// llvm::Any and does actual print job.
+void unwrapAndPrint(Any IR, StringRef Banner, bool ForceModule = false) {
+  if (ForceModule) {
+    if (auto UnwrappedModule = unwrapModule(IR))
+      printIR(UnwrappedModule->first, Banner, UnwrappedModule->second);
+    return;
+  }
+
+  if (any_isa<const Module *>(IR)) {
+    const Module *M = any_cast<const Module *>(IR);
+    assert(M && "module should be valid for printing");
+    printIR(M, Banner);
+    return;
+  }
+
+  if (any_isa<const Function *>(IR)) {
+    const Function *F = any_cast<const Function *>(IR);
+    assert(F && "function should be valid for printing");
+    printIR(F, Banner);
+    return;
+  }
+
+  if (any_isa<const LazyCallGraph::SCC *>(IR)) {
+    const LazyCallGraph::SCC *C = any_cast<const LazyCallGraph::SCC *>(IR);
+    assert(C && "scc should be valid for printing");
+    std::string Extra = formatv(" (scc: {0})", C->getName());
+    printIR(C, Banner, Extra);
+    return;
+  }
+
+  if (any_isa<const Loop *>(IR)) {
+    const Loop *L = any_cast<const Loop *>(IR);
+    assert(L && "Loop should be valid for printing");
+    printIR(L, Banner);
+    return;
+  }
+  llvm_unreachable("Unknown wrapped IR type");
+}
+
+} // namespace
+
+PrintIRInstrumentation::~PrintIRInstrumentation() {
+  assert(ModuleDescStack.empty() && "ModuleDescStack is not empty at exit");
+}
+
+void PrintIRInstrumentation::pushModuleDesc(StringRef PassID, Any IR) {
+  assert(StoreModuleDesc);
+  const Module *M = nullptr;
+  std::string Extra;
+  if (auto UnwrappedModule = unwrapModule(IR))
+    std::tie(M, Extra) = UnwrappedModule.getValue();
+  ModuleDescStack.emplace_back(M, Extra, PassID);
+}
+
+PrintIRInstrumentation::PrintModuleDesc
+PrintIRInstrumentation::popModuleDesc(StringRef PassID) {
+  assert(!ModuleDescStack.empty() && "empty ModuleDescStack");
+  PrintModuleDesc ModuleDesc = ModuleDescStack.pop_back_val();
+  assert(std::get<2>(ModuleDesc).equals(PassID) && "malformed ModuleDescStack");
+  return ModuleDesc;
+}
+
+bool PrintIRInstrumentation::printBeforePass(StringRef PassID, Any IR) {
+  if (PassID.startswith("PassManager<") || PassID.contains("PassAdaptor<"))
+    return true;
+
+  // Saving Module for AfterPassInvalidated operations.
+  // Note: here we rely on a fact that we do not change modules while
+  // traversing the pipeline, so the latest captured module is good
+  // for all print operations that has not happen yet.
+  if (StoreModuleDesc && llvm::shouldPrintAfterPass(PassID))
+    pushModuleDesc(PassID, IR);
+
   if (!llvm::shouldPrintBeforePass(PassID))
     return true;
 
-  if (PassID.startswith("PassManager<") || PassID.contains("PassAdaptor<"))
-    return true;
-
   SmallString<20> Banner = formatv("*** IR Dump Before {0} ***", PassID);
-  unwrapAndPrint(Banner, IR);
+  unwrapAndPrint(IR, Banner, llvm::forcePrintModuleIR());
   return true;
 }
 
-void printAfterPass(StringRef PassID, Any IR) {
+void PrintIRInstrumentation::printAfterPass(StringRef PassID, Any IR) {
+  if (PassID.startswith("PassManager<") || PassID.contains("PassAdaptor<"))
+    return;
+
   if (!llvm::shouldPrintAfterPass(PassID))
     return;
 
+  if (StoreModuleDesc)
+    popModuleDesc(PassID);
+
+  SmallString<20> Banner = formatv("*** IR Dump After {0} ***", PassID);
+  unwrapAndPrint(IR, Banner, llvm::forcePrintModuleIR());
+}
+
+void PrintIRInstrumentation::printAfterPassInvalidated(StringRef PassID) {
+  if (!StoreModuleDesc || !llvm::shouldPrintAfterPass(PassID))
+    return;
+
   if (PassID.startswith("PassManager<") || PassID.contains("PassAdaptor<"))
     return;
 
-  SmallString<20> Banner = formatv("*** IR Dump After {0} ***", PassID);
-  unwrapAndPrint(Banner, IR);
-  return;
+  const Module *M;
+  std::string Extra;
+  StringRef StoredPassID;
+  std::tie(M, Extra, StoredPassID) = popModuleDesc(PassID);
+  // Additional filtering (e.g. -filter-print-func) can lead to module
+  // printing being skipped.
+  if (!M)
+    return;
+
+  SmallString<20> Banner =
+      formatv("*** IR Dump After {0} *** invalidated: ", PassID);
+  printIR(M, Banner, Extra);
 }
-} // namespace PrintIR
-} // namespace
+
+void PrintIRInstrumentation::registerCallbacks(
+    PassInstrumentationCallbacks &PIC) {
+  // BeforePass callback is not just for printing, it also saves a Module
+  // for later use in AfterPassInvalidated.
+  StoreModuleDesc = llvm::forcePrintModuleIR() && llvm::shouldPrintAfterPass();
+  if (llvm::shouldPrintBeforePass() || StoreModuleDesc)
+    PIC.registerBeforePassCallback(
+        [this](StringRef P, Any IR) { return this->printBeforePass(P, IR); });
+
+  if (llvm::shouldPrintAfterPass()) {
+    PIC.registerAfterPassCallback(
+        [this](StringRef P, Any IR) { this->printAfterPass(P, IR); });
+    PIC.registerAfterPassInvalidatedCallback(
+        [this](StringRef P) { this->printAfterPassInvalidated(P); });
+  }
+}
 
 void StandardInstrumentations::registerCallbacks(
     PassInstrumentationCallbacks &PIC) {
-  if (llvm::shouldPrintBeforePass())
-    PIC.registerBeforePassCallback(PrintIR::printBeforePass);
-  if (llvm::shouldPrintAfterPass())
-    PIC.registerAfterPassCallback(PrintIR::printAfterPass);
+  PrintIR.registerCallbacks(PIC);
   TimePasses.registerCallbacks(PIC);
 }
diff --git a/lib/ProfileData/InstrProf.cpp b/lib/ProfileData/InstrProf.cpp
index 544a77e..aaa8000 100644
--- a/lib/ProfileData/InstrProf.cpp
+++ b/lib/ProfileData/InstrProf.cpp
@@ -252,11 +252,12 @@
 // data, its original linkage must be non-internal.
 std::string getPGOFuncName(const Function &F, bool InLTO, uint64_t Version) {
   if (!InLTO) {
-    StringRef FileName = (StaticFuncFullModulePrefix
-                              ? F.getParent()->getName()
-                              : sys::path::filename(F.getParent()->getName()));
-    if (StaticFuncFullModulePrefix && StaticFuncStripDirNamePrefix != 0)
-      FileName = stripDirPrefix(FileName, StaticFuncStripDirNamePrefix);
+    StringRef FileName(F.getParent()->getSourceFileName());
+    uint32_t StripLevel = StaticFuncFullModulePrefix ? 0 : (uint32_t)-1;
+    if (StripLevel < StaticFuncStripDirNamePrefix)
+      StripLevel = StaticFuncStripDirNamePrefix;
+    if (StripLevel)
+      FileName = stripDirPrefix(FileName, StripLevel);
     return getPGOFuncName(F.getName(), F.getLinkage(), FileName, Version);
   }
 
diff --git a/lib/Support/CommandLine.cpp b/lib/Support/CommandLine.cpp
index cb2a2e5..f7290b5 100644
--- a/lib/Support/CommandLine.cpp
+++ b/lib/Support/CommandLine.cpp
@@ -426,12 +426,17 @@
     return I != Sub.OptionsMap.end() ? I->second : nullptr;
   }
 
-  // If the argument before the = is a valid option name, we match.  If not,
-  // return Arg unmolested.
+  // If the argument before the = is a valid option name and the option allows
+  // non-prefix form (ie is not AlwaysPrefix), we match.  If not, signal match
+  // failure by returning nullptr.
   auto I = Sub.OptionsMap.find(Arg.substr(0, EqualPos));
   if (I == Sub.OptionsMap.end())
     return nullptr;
 
+  auto O = I->second;
+  if (O->getFormattingFlag() == cl::AlwaysPrefix)
+    return nullptr;
+
   Value = Arg.substr(EqualPos + 1);
   Arg = Arg.substr(0, EqualPos);
   return I->second;
@@ -539,7 +544,9 @@
   switch (Handler->getValueExpectedFlag()) {
   case ValueRequired:
     if (!Value.data()) { // No value specified?
-      if (i + 1 >= argc)
+      // If no other argument or the option only supports prefix form, we
+      // cannot look at the next argument.
+      if (i + 1 >= argc || Handler->getFormattingFlag() == cl::AlwaysPrefix)
         return Handler->error("requires a value!");
       // Steal the next argument, like for '-o filename'
       assert(argv && "null check");
@@ -597,7 +604,8 @@
   return O->getFormattingFlag() == cl::Grouping;
 }
 static inline bool isPrefixedOrGrouping(const Option *O) {
-  return isGrouping(O) || O->getFormattingFlag() == cl::Prefix;
+  return isGrouping(O) || O->getFormattingFlag() == cl::Prefix ||
+         O->getFormattingFlag() == cl::AlwaysPrefix;
 }
 
 // getOptionPred - Check to see if there are any options that satisfy the
@@ -647,7 +655,8 @@
   // If the option is a prefixed option, then the value is simply the
   // rest of the name...  so fall through to later processing, by
   // setting up the argument name flags and value fields.
-  if (PGOpt->getFormattingFlag() == cl::Prefix) {
+  if (PGOpt->getFormattingFlag() == cl::Prefix ||
+      PGOpt->getFormattingFlag() == cl::AlwaysPrefix) {
     Value = Arg.substr(Length);
     Arg = Arg.substr(0, Length);
     assert(OptionsMap.count(Arg) && OptionsMap.find(Arg)->second == PGOpt);
diff --git a/lib/Support/Error.cpp b/lib/Support/Error.cpp
index 0b0be8d..30bfc3e 100644
--- a/lib/Support/Error.cpp
+++ b/lib/Support/Error.cpp
@@ -54,6 +54,7 @@
 void ErrorInfoBase::anchor() {}
 char ErrorInfoBase::ID = 0;
 char ErrorList::ID = 0;
+void ECError::anchor() {}
 char ECError::ID = 0;
 char StringError::ID = 0;
 char FileError::ID = 0;
diff --git a/lib/Support/FileCheck.cpp b/lib/Support/FileCheck.cpp
index bad8ea2..37986c9 100644
--- a/lib/Support/FileCheck.cpp
+++ b/lib/Support/FileCheck.cpp
@@ -412,9 +412,28 @@
   }
 }
 
+static SMRange ProcessMatchResult(FileCheckDiag::MatchType MatchTy,
+                                  const SourceMgr &SM, SMLoc Loc,
+                                  Check::FileCheckType CheckTy,
+                                  StringRef Buffer, size_t Pos, size_t Len,
+                                  std::vector<FileCheckDiag> *Diags,
+                                  bool AdjustPrevDiag = false) {
+  SMLoc Start = SMLoc::getFromPointer(Buffer.data() + Pos);
+  SMLoc End = SMLoc::getFromPointer(Buffer.data() + Pos + Len);
+  SMRange Range(Start, End);
+  if (Diags) {
+    if (AdjustPrevDiag)
+      Diags->rbegin()->MatchTy = MatchTy;
+    else
+      Diags->emplace_back(SM, CheckTy, Loc, MatchTy, Range);
+  }
+  return Range;
+}
+
 void FileCheckPattern::PrintFuzzyMatch(
     const SourceMgr &SM, StringRef Buffer,
-    const StringMap<StringRef> &VariableTable) const {
+    const StringMap<StringRef> &VariableTable,
+    std::vector<FileCheckDiag> *Diags) const {
   // Attempt to find the closest/best fuzzy match.  Usually an error happens
   // because some string in the output didn't exactly match. In these cases, we
   // would like to show the user a best guess at what "should have" matched, to
@@ -448,8 +467,11 @@
   // reasonable and not equal to what we showed in the "scanning from here"
   // line.
   if (Best && Best != StringRef::npos && BestQuality < 50) {
-    SM.PrintMessage(SMLoc::getFromPointer(Buffer.data() + Best),
-                    SourceMgr::DK_Note, "possible intended match here");
+    SMRange MatchRange =
+        ProcessMatchResult(FileCheckDiag::MatchFuzzy, SM, getLoc(),
+                           getCheckTy(), Buffer, Best, 0, Diags);
+    SM.PrintMessage(MatchRange.Start, SourceMgr::DK_Note,
+                    "possible intended match here");
 
     // FIXME: If we wanted to be really friendly we would show why the match
     // failed, as it can be hard to spot simple one character differences.
@@ -531,6 +553,22 @@
   return StringRef(OutputBuffer.data(), OutputBuffer.size() - 1);
 }
 
+FileCheckDiag::FileCheckDiag(const SourceMgr &SM,
+                             const Check::FileCheckType &CheckTy,
+                             SMLoc CheckLoc, MatchType MatchTy,
+                             SMRange InputRange)
+    : CheckTy(CheckTy), MatchTy(MatchTy) {
+  auto Start = SM.getLineAndColumn(InputRange.Start);
+  auto End = SM.getLineAndColumn(InputRange.End);
+  InputStartLine = Start.first;
+  InputStartCol = Start.second;
+  InputEndLine = End.first;
+  InputEndCol = End.second;
+  Start = SM.getLineAndColumn(CheckLoc);
+  CheckLine = Start.first;
+  CheckCol = Start.second;
+}
+
 static bool IsPartOfWord(char c) {
   return (isalnum(c) || c == '-' || c == '_');
 }
@@ -861,16 +899,18 @@
                        StringRef Prefix, SMLoc Loc, const FileCheckPattern &Pat,
                        int MatchedCount, StringRef Buffer,
                        StringMap<StringRef> &VariableTable, size_t MatchPos,
-                       size_t MatchLen, const FileCheckRequest &Req) {
+                       size_t MatchLen, const FileCheckRequest &Req,
+                       std::vector<FileCheckDiag> *Diags) {
   if (ExpectedMatch) {
     if (!Req.Verbose)
       return;
     if (!Req.VerboseVerbose && Pat.getCheckTy() == Check::CheckEOF)
       return;
   }
-  SMLoc MatchStart = SMLoc::getFromPointer(Buffer.data() + MatchPos);
-  SMLoc MatchEnd = SMLoc::getFromPointer(Buffer.data() + MatchPos + MatchLen);
-  SMRange MatchRange(MatchStart, MatchEnd);
+  SMRange MatchRange = ProcessMatchResult(
+      ExpectedMatch ? FileCheckDiag::MatchFoundAndExpected
+                    : FileCheckDiag::MatchFoundButExcluded,
+      SM, Loc, Pat.getCheckTy(), Buffer, MatchPos, MatchLen, Diags);
   std::string Message = formatv("{0}: {1} string found in input",
                                 Pat.getCheckTy().getDescription(Prefix),
                                 (ExpectedMatch ? "expected" : "excluded"))
@@ -880,24 +920,27 @@
 
   SM.PrintMessage(
       Loc, ExpectedMatch ? SourceMgr::DK_Remark : SourceMgr::DK_Error, Message);
-  SM.PrintMessage(MatchStart, SourceMgr::DK_Note, "found here", {MatchRange});
+  SM.PrintMessage(MatchRange.Start, SourceMgr::DK_Note, "found here",
+                  {MatchRange});
   Pat.PrintVariableUses(SM, Buffer, VariableTable, MatchRange);
 }
 
 static void PrintMatch(bool ExpectedMatch, const SourceMgr &SM,
                        const FileCheckString &CheckStr, int MatchedCount,
                        StringRef Buffer, StringMap<StringRef> &VariableTable,
-                       size_t MatchPos, size_t MatchLen,
-                       FileCheckRequest &Req) {
+                       size_t MatchPos, size_t MatchLen, FileCheckRequest &Req,
+                       std::vector<FileCheckDiag> *Diags) {
   PrintMatch(ExpectedMatch, SM, CheckStr.Prefix, CheckStr.Loc, CheckStr.Pat,
-             MatchedCount, Buffer, VariableTable, MatchPos, MatchLen, Req);
+             MatchedCount, Buffer, VariableTable, MatchPos, MatchLen, Req,
+             Diags);
 }
 
 static void PrintNoMatch(bool ExpectedMatch, const SourceMgr &SM,
                          StringRef Prefix, SMLoc Loc,
                          const FileCheckPattern &Pat, int MatchedCount,
                          StringRef Buffer, StringMap<StringRef> &VariableTable,
-                         bool VerboseVerbose) {
+                         bool VerboseVerbose,
+                         std::vector<FileCheckDiag> *Diags) {
   if (!ExpectedMatch && !VerboseVerbose)
     return;
 
@@ -915,22 +958,26 @@
   // Print the "scanning from here" line.  If the current position is at the
   // end of a line, advance to the start of the next line.
   Buffer = Buffer.substr(Buffer.find_first_not_of(" \t\n\r"));
-
-  SM.PrintMessage(SMLoc::getFromPointer(Buffer.data()), SourceMgr::DK_Note,
-                  "scanning from here");
+  SMRange SearchRange = ProcessMatchResult(
+      ExpectedMatch ? FileCheckDiag::MatchNoneButExpected
+                    : FileCheckDiag::MatchNoneAndExcluded,
+      SM, Loc, Pat.getCheckTy(), Buffer, 0, Buffer.size(), Diags);
+  SM.PrintMessage(SearchRange.Start, SourceMgr::DK_Note, "scanning from here");
 
   // Allow the pattern to print additional information if desired.
   Pat.PrintVariableUses(SM, Buffer, VariableTable);
+
   if (ExpectedMatch)
-    Pat.PrintFuzzyMatch(SM, Buffer, VariableTable);
+    Pat.PrintFuzzyMatch(SM, Buffer, VariableTable, Diags);
 }
 
 static void PrintNoMatch(bool ExpectedMatch, const SourceMgr &SM,
                          const FileCheckString &CheckStr, int MatchedCount,
                          StringRef Buffer, StringMap<StringRef> &VariableTable,
-                         bool VerboseVerbose) {
+                         bool VerboseVerbose,
+                         std::vector<FileCheckDiag> *Diags) {
   PrintNoMatch(ExpectedMatch, SM, CheckStr.Prefix, CheckStr.Loc, CheckStr.Pat,
-               MatchedCount, Buffer, VariableTable, VerboseVerbose);
+               MatchedCount, Buffer, VariableTable, VerboseVerbose, Diags);
 }
 
 /// Count the number of newlines in the specified range.
@@ -958,9 +1005,10 @@
 
 /// Match check string and its "not strings" and/or "dag strings".
 size_t FileCheckString::Check(const SourceMgr &SM, StringRef Buffer,
-                          bool IsLabelScanMode, size_t &MatchLen,
-                          StringMap<StringRef> &VariableTable,
-                          FileCheckRequest &Req) const {
+                              bool IsLabelScanMode, size_t &MatchLen,
+                              StringMap<StringRef> &VariableTable,
+                              FileCheckRequest &Req,
+                              std::vector<FileCheckDiag> *Diags) const {
   size_t LastPos = 0;
   std::vector<const FileCheckPattern *> NotStrings;
 
@@ -970,7 +1018,7 @@
   // over the block again (including the last CHECK-LABEL) in normal mode.
   if (!IsLabelScanMode) {
     // Match "dag strings" (with mixed "not strings" if any).
-    LastPos = CheckDag(SM, Buffer, NotStrings, VariableTable, Req);
+    LastPos = CheckDag(SM, Buffer, NotStrings, VariableTable, Req, Diags);
     if (LastPos == StringRef::npos)
       return StringRef::npos;
   }
@@ -992,11 +1040,11 @@
     // report
     if (MatchPos == StringRef::npos) {
       PrintNoMatch(true, SM, *this, i, MatchBuffer, VariableTable,
-                   Req.VerboseVerbose);
+                   Req.VerboseVerbose, Diags);
       return StringRef::npos;
     }
     PrintMatch(true, SM, *this, i, MatchBuffer, VariableTable, MatchPos,
-               CurrentMatchLen, Req);
+               CurrentMatchLen, Req, Diags);
 
     // move start point after the match
     LastMatchEnd += MatchPos + CurrentMatchLen;
@@ -1007,21 +1055,31 @@
   // Similar to the above, in "label-scan mode" we can't yet handle CHECK-NEXT
   // or CHECK-NOT
   if (!IsLabelScanMode) {
-    StringRef SkippedRegion = Buffer.substr(LastPos, FirstMatchPos - LastPos);
+    size_t MatchPos = FirstMatchPos - LastPos;
+    StringRef MatchBuffer = Buffer.substr(LastPos);
+    StringRef SkippedRegion = Buffer.substr(LastPos, MatchPos);
 
     // If this check is a "CHECK-NEXT", verify that the previous match was on
     // the previous line (i.e. that there is one newline between them).
-    if (CheckNext(SM, SkippedRegion))
+    if (CheckNext(SM, SkippedRegion)) {
+      ProcessMatchResult(FileCheckDiag::MatchFoundButWrongLine, SM, Loc,
+                         Pat.getCheckTy(), MatchBuffer, MatchPos, MatchLen,
+                         Diags, Req.Verbose);
       return StringRef::npos;
+    }
 
     // If this check is a "CHECK-SAME", verify that the previous match was on
     // the same line (i.e. that there is no newline between them).
-    if (CheckSame(SM, SkippedRegion))
+    if (CheckSame(SM, SkippedRegion)) {
+      ProcessMatchResult(FileCheckDiag::MatchFoundButWrongLine, SM, Loc,
+                         Pat.getCheckTy(), MatchBuffer, MatchPos, MatchLen,
+                         Diags, Req.Verbose);
       return StringRef::npos;
+    }
 
     // If this match had "not strings", verify that they don't exist in the
     // skipped region.
-    if (CheckNot(SM, SkippedRegion, NotStrings, VariableTable, Req))
+    if (CheckNot(SM, SkippedRegion, NotStrings, VariableTable, Req, Diags))
       return StringRef::npos;
   }
 
@@ -1104,10 +1162,11 @@
 }
 
 /// Verify there's no "not strings" in the given buffer.
-bool FileCheckString::CheckNot(const SourceMgr &SM, StringRef Buffer,
-                           const std::vector<const FileCheckPattern *> &NotStrings,
-                           StringMap<StringRef> &VariableTable,
-                           const FileCheckRequest &Req) const {
+bool FileCheckString::CheckNot(
+    const SourceMgr &SM, StringRef Buffer,
+    const std::vector<const FileCheckPattern *> &NotStrings,
+    StringMap<StringRef> &VariableTable, const FileCheckRequest &Req,
+    std::vector<FileCheckDiag> *Diags) const {
   for (const FileCheckPattern *Pat : NotStrings) {
     assert((Pat->getCheckTy() == Check::CheckNot) && "Expect CHECK-NOT!");
 
@@ -1116,12 +1175,12 @@
 
     if (Pos == StringRef::npos) {
       PrintNoMatch(false, SM, Prefix, Pat->getLoc(), *Pat, 1, Buffer,
-                   VariableTable, Req.VerboseVerbose);
+                   VariableTable, Req.VerboseVerbose, Diags);
       continue;
     }
 
     PrintMatch(false, SM, Prefix, Pat->getLoc(), *Pat, 1, Buffer, VariableTable,
-               Pos, MatchLen, Req);
+               Pos, MatchLen, Req, Diags);
 
     return true;
   }
@@ -1130,10 +1189,12 @@
 }
 
 /// Match "dag strings" and their mixed "not strings".
-size_t FileCheckString::CheckDag(const SourceMgr &SM, StringRef Buffer,
-                             std::vector<const FileCheckPattern *> &NotStrings,
-                             StringMap<StringRef> &VariableTable,
-                             const FileCheckRequest &Req) const {
+size_t
+FileCheckString::CheckDag(const SourceMgr &SM, StringRef Buffer,
+                          std::vector<const FileCheckPattern *> &NotStrings,
+                          StringMap<StringRef> &VariableTable,
+                          const FileCheckRequest &Req,
+                          std::vector<FileCheckDiag> *Diags) const {
   if (DagNotStrings.empty())
     return 0;
 
@@ -1177,14 +1238,14 @@
       // that group of CHECK-DAGs fails immediately.
       if (MatchPosBuf == StringRef::npos) {
         PrintNoMatch(true, SM, Prefix, Pat.getLoc(), Pat, 1, MatchBuffer,
-                     VariableTable, Req.VerboseVerbose);
+                     VariableTable, Req.VerboseVerbose, Diags);
         return StringRef::npos;
       }
       // Re-calc it as the offset relative to the start of the original string.
       MatchPos += MatchPosBuf;
       if (Req.VerboseVerbose)
         PrintMatch(true, SM, Prefix, Pat.getLoc(), Pat, 1, Buffer,
-                   VariableTable, MatchPos, MatchLen, Req);
+                   VariableTable, MatchPos, MatchLen, Req, Diags);
       MatchRange M{MatchPos, MatchPos + MatchLen};
       if (Req.AllowDeprecatedDagOverlap) {
         // We don't need to track all matches in this mode, so we just maintain
@@ -1221,12 +1282,14 @@
         SM.PrintMessage(OldStart, SourceMgr::DK_Note,
                         "match discarded, overlaps earlier DAG match here",
                         {OldRange});
+        if (Diags)
+          Diags->rbegin()->MatchTy = FileCheckDiag::MatchFoundButDiscarded;
       }
       MatchPos = MI->End;
     }
     if (!Req.VerboseVerbose)
       PrintMatch(true, SM, Prefix, Pat.getLoc(), Pat, 1, Buffer, VariableTable,
-                 MatchPos, MatchLen, Req);
+                 MatchPos, MatchLen, Req, Diags);
 
     // Handle the end of a CHECK-DAG group.
     if (std::next(PatItr) == PatEnd ||
@@ -1237,7 +1300,7 @@
         // region.
         StringRef SkippedRegion =
             Buffer.slice(StartPos, MatchRanges.begin()->Pos);
-        if (CheckNot(SM, SkippedRegion, NotStrings, VariableTable, Req))
+        if (CheckNot(SM, SkippedRegion, NotStrings, VariableTable, Req, Diags))
           return StringRef::npos;
         // Clear "not strings".
         NotStrings.clear();
@@ -1318,7 +1381,8 @@
 ///
 /// Returns false if the input fails to satisfy the checks.
 bool llvm::FileCheck::CheckInput(SourceMgr &SM, StringRef Buffer,
-                ArrayRef<FileCheckString> CheckStrings) {
+                                 ArrayRef<FileCheckString> CheckStrings,
+                                 std::vector<FileCheckDiag> *Diags) {
   bool ChecksFailed = false;
 
   /// VariableTable - This holds all the current filecheck variables.
@@ -1341,9 +1405,8 @@
 
       // Scan to next CHECK-LABEL match, ignoring CHECK-NOT and CHECK-DAG
       size_t MatchLabelLen = 0;
-      size_t MatchLabelPos =
-          CheckLabelStr.Check(SM, Buffer, true, MatchLabelLen, VariableTable,
-                              Req);
+      size_t MatchLabelPos = CheckLabelStr.Check(
+          SM, Buffer, true, MatchLabelLen, VariableTable, Req, Diags);
       if (MatchLabelPos == StringRef::npos)
         // Immediately bail of CHECK-LABEL fails, nothing else we can do.
         return false;
@@ -1362,8 +1425,8 @@
       // Check each string within the scanned region, including a second check
       // of any final CHECK-LABEL (to verify CHECK-NOT and CHECK-DAG)
       size_t MatchLen = 0;
-      size_t MatchPos =
-          CheckStr.Check(SM, CheckRegion, false, MatchLen, VariableTable, Req);
+      size_t MatchPos = CheckStr.Check(SM, CheckRegion, false, MatchLen,
+                                       VariableTable, Req, Diags);
 
       if (MatchPos == StringRef::npos) {
         ChecksFailed = true;
diff --git a/lib/Support/FormatVariadic.cpp b/lib/Support/FormatVariadic.cpp
index 6dd133e..1f3505d 100644
--- a/lib/Support/FormatVariadic.cpp
+++ b/lib/Support/FormatVariadic.cpp
@@ -152,3 +152,5 @@
   }
   return Replacements;
 }
+
+void detail::format_adapter::anchor() { }
diff --git a/lib/Support/Path.cpp b/lib/Support/Path.cpp
index a3e6941..5ce2f50 100644
--- a/lib/Support/Path.cpp
+++ b/lib/Support/Path.cpp
@@ -849,9 +849,8 @@
   return createTemporaryFile(Prefix, Suffix, Dummy, ResultPath, FS_Name);
 }
 
-static std::error_code make_absolute(const Twine &current_directory,
-                                     SmallVectorImpl<char> &path,
-                                     bool use_current_directory) {
+void make_absolute(const Twine &current_directory,
+                   SmallVectorImpl<char> &path) {
   StringRef p(path.data(), path.size());
 
   bool rootDirectory = path::has_root_directory(p);
@@ -860,14 +859,11 @@
 
   // Already absolute.
   if (rootName && rootDirectory)
-    return std::error_code();
+    return;
 
   // All of the following conditions will need the current directory.
   SmallString<128> current_dir;
-  if (use_current_directory)
-    current_directory.toVector(current_dir);
-  else if (std::error_code ec = current_path(current_dir))
-    return ec;
+  current_directory.toVector(current_dir);
 
   // Relative path. Prepend the current directory.
   if (!rootName && !rootDirectory) {
@@ -875,7 +871,7 @@
     path::append(current_dir, p);
     // Set path to the result.
     path.swap(current_dir);
-    return std::error_code();
+    return;
   }
 
   if (!rootName && rootDirectory) {
@@ -884,7 +880,7 @@
     path::append(curDirRootName, p);
     // Set path to the result.
     path.swap(curDirRootName);
-    return std::error_code();
+    return;
   }
 
   if (rootName && !rootDirectory) {
@@ -896,20 +892,23 @@
     SmallString<128> res;
     path::append(res, pRootName, bRootDirectory, bRelativePath, pRelativePath);
     path.swap(res);
-    return std::error_code();
+    return;
   }
 
   llvm_unreachable("All rootName and rootDirectory combinations should have "
                    "occurred above!");
 }
 
-std::error_code make_absolute(const Twine &current_directory,
-                              SmallVectorImpl<char> &path) {
-  return make_absolute(current_directory, path, true);
-}
-
 std::error_code make_absolute(SmallVectorImpl<char> &path) {
-  return make_absolute(Twine(), path, false);
+  if (path::is_absolute(path))
+    return {};
+
+  SmallString<128> current_dir;
+  if (std::error_code ec = current_path(current_dir))
+    return ec;
+
+  make_absolute(current_dir, path);
+  return {};
 }
 
 std::error_code create_directories(const Twine &Path, bool IgnoreExisting,
diff --git a/lib/Support/Signals.cpp b/lib/Support/Signals.cpp
index 6534ff6..333f492 100644
--- a/lib/Support/Signals.cpp
+++ b/lib/Support/Signals.cpp
@@ -20,6 +20,8 @@
 #include "llvm/Support/FileSystem.h"
 #include "llvm/Support/FileUtilities.h"
 #include "llvm/Support/Format.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/FormatAdapters.h"
 #include "llvm/Support/ManagedStatic.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/Mutex.h"
@@ -155,7 +157,7 @@
   }
 
   Optional<StringRef> Redirects[] = {StringRef(InputFile),
-                                     StringRef(OutputFile), llvm::None};
+                                     StringRef(OutputFile), StringRef("")};
   StringRef Args[] = {"llvm-symbolizer", "--functions=linkage", "--inlining",
 #ifdef _WIN32
                       // Pass --relative-address on Windows so that we don't
@@ -180,8 +182,14 @@
   auto CurLine = Lines.begin();
   int frame_no = 0;
   for (int i = 0; i < Depth; i++) {
+    auto PrintLineHeader = [&]() {
+      OS << right_justify(formatv("#{0}", frame_no++).str(),
+                          std::log10(Depth) + 2)
+         << ' ' << format_ptr(StackTrace[i]) << ' ';
+    };
     if (!Modules[i]) {
-      OS << '#' << frame_no++ << ' ' << format_ptr(StackTrace[i]) << '\n';
+      PrintLineHeader();
+      OS << '\n';
       continue;
     }
     // Read pairs of lines (function name and file/line info) until we
@@ -192,7 +200,7 @@
       StringRef FunctionName = *CurLine++;
       if (FunctionName.empty())
         break;
-      OS << '#' << frame_no++ << ' ' << format_ptr(StackTrace[i]) << ' ';
+      PrintLineHeader();
       if (!FunctionName.startswith("??"))
         OS << FunctionName << ' ';
       if (CurLine == Lines.end())
diff --git a/lib/Support/Triple.cpp b/lib/Support/Triple.cpp
index 4471fd0..26d9327 100644
--- a/lib/Support/Triple.cpp
+++ b/lib/Support/Triple.cpp
@@ -35,7 +35,6 @@
   case mips64:         return "mips64";
   case mips64el:       return "mips64el";
   case msp430:         return "msp430";
-  case nios2:          return "nios2";
   case ppc64:          return "powerpc64";
   case ppc64le:        return "powerpc64le";
   case ppc:            return "powerpc";
@@ -102,8 +101,6 @@
   case mips64:
   case mips64el:    return "mips";
 
-  case nios2:       return "nios2";
-
   case hexagon:     return "hexagon";
 
   case amdgcn:      return "amdgcn";
@@ -211,6 +208,7 @@
   case AMDPAL: return "amdpal";
   case HermitCore: return "hermit";
   case Hurd: return "hurd";
+  case WASI: return "wasi";
   }
 
   llvm_unreachable("Invalid OSType");
@@ -273,7 +271,6 @@
     .Case("mips64", mips64)
     .Case("mips64el", mips64el)
     .Case("msp430", msp430)
-    .Case("nios2", nios2)
     .Case("ppc64", ppc64)
     .Case("ppc32", ppc)
     .Case("ppc", ppc)
@@ -408,7 +405,6 @@
            "mips64r6", "mipsn32r6", Triple::mips64)
     .Cases("mips64el", "mipsn32el", "mipsisa64r6el", "mips64r6el",
            "mipsn32r6el", Triple::mips64el)
-    .Case("nios2", Triple::nios2)
     .Case("r600", Triple::r600)
     .Case("amdgcn", Triple::amdgcn)
     .Case("riscv32", Triple::riscv32)
@@ -510,6 +506,7 @@
     .StartsWith("amdpal", Triple::AMDPAL)
     .StartsWith("hermit", Triple::HermitCore)
     .StartsWith("hurd", Triple::Hurd)
+    .StartsWith("wasi", Triple::WASI)
     .Default(Triple::UnknownOS);
 }
 
@@ -665,7 +662,6 @@
   case Triple::mips64el:
   case Triple::mipsel:
   case Triple::msp430:
-  case Triple::nios2:
   case Triple::nvptx:
   case Triple::nvptx64:
   case Triple::ppc64le:
@@ -1223,7 +1219,6 @@
   case llvm::Triple::le32:
   case llvm::Triple::mips:
   case llvm::Triple::mipsel:
-  case llvm::Triple::nios2:
   case llvm::Triple::nvptx:
   case llvm::Triple::ppc:
   case llvm::Triple::r600:
@@ -1308,7 +1303,6 @@
   case Triple::le32:
   case Triple::mips:
   case Triple::mipsel:
-  case Triple::nios2:
   case Triple::nvptx:
   case Triple::ppc:
   case Triple::r600:
@@ -1357,7 +1351,6 @@
   case Triple::kalimba:
   case Triple::lanai:
   case Triple::msp430:
-  case Triple::nios2:
   case Triple::r600:
   case Triple::tce:
   case Triple::tcele:
@@ -1429,7 +1422,6 @@
   case Triple::le32:
   case Triple::le64:
   case Triple::msp430:
-  case Triple::nios2:
   case Triple::nvptx64:
   case Triple::nvptx:
   case Triple::r600:
@@ -1516,7 +1508,6 @@
   case Triple::mips64el:
   case Triple::mipsel:
   case Triple::msp430:
-  case Triple::nios2:
   case Triple::nvptx64:
   case Triple::nvptx:
   case Triple::ppc64le:
diff --git a/lib/Support/Unix/Path.inc b/lib/Support/Unix/Path.inc
index d130b33..d7cc0d6 100644
--- a/lib/Support/Unix/Path.inc
+++ b/lib/Support/Unix/Path.inc
@@ -56,7 +56,7 @@
 
 #include <sys/types.h>
 #if !defined(__APPLE__) && !defined(__OpenBSD__) && !defined(__FreeBSD__) &&   \
-    !defined(__linux__)
+    !defined(__linux__) && !defined(__FreeBSD_kernel__)
 #include <sys/statvfs.h>
 #define STATVFS statvfs
 #define FSTATVFS fstatvfs
diff --git a/lib/Support/VirtualFileSystem.cpp b/lib/Support/VirtualFileSystem.cpp
index 2458194..f2a8a1b 100644
--- a/lib/Support/VirtualFileSystem.cpp
+++ b/lib/Support/VirtualFileSystem.cpp
@@ -128,7 +128,8 @@
   if (!WorkingDir)
     return WorkingDir.getError();
 
-  return llvm::sys::fs::make_absolute(WorkingDir.get(), Path);
+  llvm::sys::fs::make_absolute(WorkingDir.get(), Path);
+  return {};
 }
 
 std::error_code FileSystem::getRealPath(const Twine &Path,
@@ -472,6 +473,8 @@
       std::make_shared<OverlayFSDirIterImpl>(Dir, *this, EC));
 }
 
+void ProxyFileSystem::anchor() {}
+
 namespace llvm {
 namespace vfs {
 
@@ -941,84 +944,12 @@
 // RedirectingFileSystem implementation
 //===-----------------------------------------------------------------------===/
 
-namespace {
-
-enum EntryKind { EK_Directory, EK_File };
-
-/// A single file or directory in the VFS.
-class Entry {
-  EntryKind Kind;
-  std::string Name;
-
-public:
-  Entry(EntryKind K, StringRef Name) : Kind(K), Name(Name) {}
-  virtual ~Entry() = default;
-
-  StringRef getName() const { return Name; }
-  EntryKind getKind() const { return Kind; }
-};
-
-class RedirectingDirectoryEntry : public Entry {
-  std::vector<std::unique_ptr<Entry>> Contents;
-  Status S;
-
-public:
-  RedirectingDirectoryEntry(StringRef Name,
-                            std::vector<std::unique_ptr<Entry>> Contents,
-                            Status S)
-      : Entry(EK_Directory, Name), Contents(std::move(Contents)),
-        S(std::move(S)) {}
-  RedirectingDirectoryEntry(StringRef Name, Status S)
-      : Entry(EK_Directory, Name), S(std::move(S)) {}
-
-  Status getStatus() { return S; }
-
-  void addContent(std::unique_ptr<Entry> Content) {
-    Contents.push_back(std::move(Content));
-  }
-
-  Entry *getLastContent() const { return Contents.back().get(); }
-
-  using iterator = decltype(Contents)::iterator;
-
-  iterator contents_begin() { return Contents.begin(); }
-  iterator contents_end() { return Contents.end(); }
-
-  static bool classof(const Entry *E) { return E->getKind() == EK_Directory; }
-};
-
-class RedirectingFileEntry : public Entry {
-public:
-  enum NameKind { NK_NotSet, NK_External, NK_Virtual };
-
-private:
-  std::string ExternalContentsPath;
-  NameKind UseName;
-
-public:
-  RedirectingFileEntry(StringRef Name, StringRef ExternalContentsPath,
-                       NameKind UseName)
-      : Entry(EK_File, Name), ExternalContentsPath(ExternalContentsPath),
-        UseName(UseName) {}
-
-  StringRef getExternalContentsPath() const { return ExternalContentsPath; }
-
-  /// whether to use the external path as the name for this file.
-  bool useExternalName(bool GlobalUseExternalName) const {
-    return UseName == NK_NotSet ? GlobalUseExternalName
-                                : (UseName == NK_External);
-  }
-
-  NameKind getUseName() const { return UseName; }
-
-  static bool classof(const Entry *E) { return E->getKind() == EK_File; }
-};
-
 // FIXME: reuse implementation common with OverlayFSDirIterImpl as these
 // iterators are conceptually similar.
-class VFSFromYamlDirIterImpl : public llvm::vfs::detail::DirIterImpl {
+class llvm::vfs::VFSFromYamlDirIterImpl
+    : public llvm::vfs::detail::DirIterImpl {
   std::string Dir;
-  RedirectingDirectoryEntry::iterator Current, End;
+  RedirectingFileSystem::RedirectingDirectoryEntry::iterator Current, End;
 
   // To handle 'fallthrough' mode we need to iterate at first through
   // RedirectingDirectoryEntry and then through ExternalFS. These operations are
@@ -1048,216 +979,92 @@
   /// @}
 
 public:
-  VFSFromYamlDirIterImpl(const Twine &Path,
-                         RedirectingDirectoryEntry::iterator Begin,
-                         RedirectingDirectoryEntry::iterator End,
-                         bool IterateExternalFS, FileSystem &ExternalFS,
-                         std::error_code &EC);
+  VFSFromYamlDirIterImpl(
+      const Twine &Path,
+      RedirectingFileSystem::RedirectingDirectoryEntry::iterator Begin,
+      RedirectingFileSystem::RedirectingDirectoryEntry::iterator End,
+      bool IterateExternalFS, FileSystem &ExternalFS, std::error_code &EC);
 
   std::error_code increment() override;
 };
 
-/// A virtual file system parsed from a YAML file.
-///
-/// Currently, this class allows creating virtual directories and mapping
-/// virtual file paths to existing external files, available in \c ExternalFS.
-///
-/// The basic structure of the parsed file is:
-/// \verbatim
-/// {
-///   'version': <version number>,
-///   <optional configuration>
-///   'roots': [
-///              <directory entries>
-///            ]
-/// }
-/// \endverbatim
-///
-/// All configuration options are optional.
-///   'case-sensitive': <boolean, default=true>
-///   'use-external-names': <boolean, default=true>
-///   'overlay-relative': <boolean, default=false>
-///   'fallthrough': <boolean, default=true>
-///
-/// Virtual directories are represented as
-/// \verbatim
-/// {
-///   'type': 'directory',
-///   'name': <string>,
-///   'contents': [ <file or directory entries> ]
-/// }
-/// \endverbatim
-///
-/// The default attributes for virtual directories are:
-/// \verbatim
-/// MTime = now() when created
-/// Perms = 0777
-/// User = Group = 0
-/// Size = 0
-/// UniqueID = unspecified unique value
-/// \endverbatim
-///
-/// Re-mapped files are represented as
-/// \verbatim
-/// {
-///   'type': 'file',
-///   'name': <string>,
-///   'use-external-name': <boolean> # Optional
-///   'external-contents': <path to external file>
-/// }
-/// \endverbatim
-///
-/// and inherit their attributes from the external contents.
-///
-/// In both cases, the 'name' field may contain multiple path components (e.g.
-/// /path/to/file). However, any directory that contains more than one child
-/// must be uniquely represented by a directory entry.
-class RedirectingFileSystem : public vfs::FileSystem {
-  friend class RedirectingFileSystemParser;
+llvm::ErrorOr<std::string>
+RedirectingFileSystem::getCurrentWorkingDirectory() const {
+  return ExternalFS->getCurrentWorkingDirectory();
+}
 
-  /// The root(s) of the virtual file system.
-  std::vector<std::unique_ptr<Entry>> Roots;
+std::error_code
+RedirectingFileSystem::setCurrentWorkingDirectory(const Twine &Path) {
+  return ExternalFS->setCurrentWorkingDirectory(Path);
+}
 
-  /// The file system to use for external references.
-  IntrusiveRefCntPtr<FileSystem> ExternalFS;
+std::error_code RedirectingFileSystem::isLocal(const Twine &Path,
+                                               bool &Result) {
+  return ExternalFS->isLocal(Path, Result);
+}
 
-  /// If IsRelativeOverlay is set, this represents the directory
-  /// path that should be prefixed to each 'external-contents' entry
-  /// when reading from YAML files.
-  std::string ExternalContentsPrefixDir;
-
-  /// @name Configuration
-  /// @{
-
-  /// Whether to perform case-sensitive comparisons.
-  ///
-  /// Currently, case-insensitive matching only works correctly with ASCII.
-  bool CaseSensitive = true;
-
-  /// IsRelativeOverlay marks whether a ExternalContentsPrefixDir path must
-  /// be prefixed in every 'external-contents' when reading from YAML files.
-  bool IsRelativeOverlay = false;
-
-  /// Whether to use to use the value of 'external-contents' for the
-  /// names of files.  This global value is overridable on a per-file basis.
-  bool UseExternalNames = true;
-
-  /// Whether to attempt a file lookup in external file system after it wasn't
-  /// found in VFS.
-  bool IsFallthrough = true;
-  /// @}
-
-  /// Virtual file paths and external files could be canonicalized without "..",
-  /// "." and "./" in their paths. FIXME: some unittests currently fail on
-  /// win32 when using remove_dots and remove_leading_dotslash on paths.
-  bool UseCanonicalizedPaths =
-#ifdef _WIN32
-      false;
-#else
-      true;
-#endif
-
-private:
-  RedirectingFileSystem(IntrusiveRefCntPtr<FileSystem> ExternalFS)
-      : ExternalFS(std::move(ExternalFS)) {}
-
-  /// Looks up the path <tt>[Start, End)</tt> in \p From, possibly
-  /// recursing into the contents of \p From if it is a directory.
-  ErrorOr<Entry *> lookupPath(sys::path::const_iterator Start,
-                              sys::path::const_iterator End, Entry *From) const;
-
-  /// Get the status of a given an \c Entry.
-  ErrorOr<Status> status(const Twine &Path, Entry *E);
-
-public:
-  /// Looks up \p Path in \c Roots.
-  ErrorOr<Entry *> lookupPath(const Twine &Path) const;
-
-  /// Parses \p Buffer, which is expected to be in YAML format and
-  /// returns a virtual file system representing its contents.
-  static RedirectingFileSystem *
-  create(std::unique_ptr<MemoryBuffer> Buffer,
-         SourceMgr::DiagHandlerTy DiagHandler, StringRef YAMLFilePath,
-         void *DiagContext, IntrusiveRefCntPtr<FileSystem> ExternalFS);
-
-  ErrorOr<Status> status(const Twine &Path) override;
-  ErrorOr<std::unique_ptr<File>> openFileForRead(const Twine &Path) override;
-
-  std::error_code getRealPath(const Twine &Path,
-                              SmallVectorImpl<char> &Output) const override;
-
-  llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override {
-    return ExternalFS->getCurrentWorkingDirectory();
+directory_iterator RedirectingFileSystem::dir_begin(const Twine &Dir,
+                                                    std::error_code &EC) {
+  ErrorOr<RedirectingFileSystem::Entry *> E = lookupPath(Dir);
+  if (!E) {
+    EC = E.getError();
+    if (IsFallthrough && EC == errc::no_such_file_or_directory)
+      return ExternalFS->dir_begin(Dir, EC);
+    return {};
+  }
+  ErrorOr<Status> S = status(Dir, *E);
+  if (!S) {
+    EC = S.getError();
+    return {};
+  }
+  if (!S->isDirectory()) {
+    EC = std::error_code(static_cast<int>(errc::not_a_directory),
+                         std::system_category());
+    return {};
   }
 
-  std::error_code setCurrentWorkingDirectory(const Twine &Path) override {
-    return ExternalFS->setCurrentWorkingDirectory(Path);
-  }
+  auto *D = cast<RedirectingFileSystem::RedirectingDirectoryEntry>(*E);
+  return directory_iterator(std::make_shared<VFSFromYamlDirIterImpl>(
+      Dir, D->contents_begin(), D->contents_end(),
+      /*IterateExternalFS=*/IsFallthrough, *ExternalFS, EC));
+}
 
-  std::error_code isLocal(const Twine &Path, bool &Result) override {
-    return ExternalFS->isLocal(Path, Result);
-  }
+void RedirectingFileSystem::setExternalContentsPrefixDir(StringRef PrefixDir) {
+  ExternalContentsPrefixDir = PrefixDir.str();
+}
 
-  directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override {
-    ErrorOr<Entry *> E = lookupPath(Dir);
-    if (!E) {
-      EC = E.getError();
-      if (IsFallthrough && EC == errc::no_such_file_or_directory)
-        return ExternalFS->dir_begin(Dir, EC);
-      return {};
-    }
-    ErrorOr<Status> S = status(Dir, *E);
-    if (!S) {
-      EC = S.getError();
-      return {};
-    }
-    if (!S->isDirectory()) {
-      EC = std::error_code(static_cast<int>(errc::not_a_directory),
-                           std::system_category());
-      return {};
-    }
-
-    auto *D = cast<RedirectingDirectoryEntry>(*E);
-    return directory_iterator(std::make_shared<VFSFromYamlDirIterImpl>(
-        Dir, D->contents_begin(), D->contents_end(),
-        /*IterateExternalFS=*/IsFallthrough, *ExternalFS, EC));
-  }
-
-  void setExternalContentsPrefixDir(StringRef PrefixDir) {
-    ExternalContentsPrefixDir = PrefixDir.str();
-  }
-
-  StringRef getExternalContentsPrefixDir() const {
-    return ExternalContentsPrefixDir;
-  }
+StringRef RedirectingFileSystem::getExternalContentsPrefixDir() const {
+  return ExternalContentsPrefixDir;
+}
 
 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-  LLVM_DUMP_METHOD void dump() const {
-    for (const auto &Root : Roots)
-      dumpEntry(Root.get());
+LLVM_DUMP_METHOD void RedirectingFileSystem::dump() const {
+  for (const auto &Root : Roots)
+    dumpEntry(Root.get());
+}
+
+LLVM_DUMP_METHOD void
+RedirectingFileSystem::dumpEntry(RedirectingFileSystem::Entry *E,
+                                 int NumSpaces) const {
+  StringRef Name = E->getName();
+  for (int i = 0, e = NumSpaces; i < e; ++i)
+    dbgs() << " ";
+  dbgs() << "'" << Name.str().c_str() << "'"
+         << "\n";
+
+  if (E->getKind() == RedirectingFileSystem::EK_Directory) {
+    auto *DE = dyn_cast<RedirectingFileSystem::RedirectingDirectoryEntry>(E);
+    assert(DE && "Should be a directory");
+
+    for (std::unique_ptr<Entry> &SubEntry :
+         llvm::make_range(DE->contents_begin(), DE->contents_end()))
+      dumpEntry(SubEntry.get(), NumSpaces + 2);
   }
-
-  LLVM_DUMP_METHOD void dumpEntry(Entry *E, int NumSpaces = 0) const {
-    StringRef Name = E->getName();
-    for (int i = 0, e = NumSpaces; i < e; ++i)
-      dbgs() << " ";
-    dbgs() << "'" << Name.str().c_str() << "'"
-           << "\n";
-
-    if (E->getKind() == EK_Directory) {
-      auto *DE = dyn_cast<RedirectingDirectoryEntry>(E);
-      assert(DE && "Should be a directory");
-
-      for (std::unique_ptr<Entry> &SubEntry :
-           llvm::make_range(DE->contents_begin(), DE->contents_end()))
-        dumpEntry(SubEntry.get(), NumSpaces + 2);
-    }
-  }
+}
 #endif
-};
 
 /// A helper class to hold the common YAML parsing state.
-class RedirectingFileSystemParser {
+class llvm::vfs::RedirectingFileSystemParser {
   yaml::Stream &Stream;
 
   void error(yaml::Node *N, const Twine &Msg) { Stream.printError(N, Msg); }
@@ -1332,8 +1139,9 @@
     return true;
   }
 
-  Entry *lookupOrCreateEntry(RedirectingFileSystem *FS, StringRef Name,
-                             Entry *ParentEntry = nullptr) {
+  RedirectingFileSystem::Entry *
+  lookupOrCreateEntry(RedirectingFileSystem *FS, StringRef Name,
+                      RedirectingFileSystem::Entry *ParentEntry = nullptr) {
     if (!ParentEntry) { // Look for a existent root
       for (const auto &Root : FS->Roots) {
         if (Name.equals(Root->getName())) {
@@ -1342,20 +1150,24 @@
         }
       }
     } else { // Advance to the next component
-      auto *DE = dyn_cast<RedirectingDirectoryEntry>(ParentEntry);
-      for (std::unique_ptr<Entry> &Content :
+      auto *DE = dyn_cast<RedirectingFileSystem::RedirectingDirectoryEntry>(
+          ParentEntry);
+      for (std::unique_ptr<RedirectingFileSystem::Entry> &Content :
            llvm::make_range(DE->contents_begin(), DE->contents_end())) {
-        auto *DirContent = dyn_cast<RedirectingDirectoryEntry>(Content.get());
+        auto *DirContent =
+            dyn_cast<RedirectingFileSystem::RedirectingDirectoryEntry>(
+                Content.get());
         if (DirContent && Name.equals(Content->getName()))
           return DirContent;
       }
     }
 
     // ... or create a new one
-    std::unique_ptr<Entry> E = llvm::make_unique<RedirectingDirectoryEntry>(
-        Name,
-        Status("", getNextVirtualUniqueID(), std::chrono::system_clock::now(),
-               0, 0, 0, file_type::directory_file, sys::fs::all_all));
+    std::unique_ptr<RedirectingFileSystem::Entry> E =
+        llvm::make_unique<RedirectingFileSystem::RedirectingDirectoryEntry>(
+            Name, Status("", getNextVirtualUniqueID(),
+                         std::chrono::system_clock::now(), 0, 0, 0,
+                         file_type::directory_file, sys::fs::all_all));
 
     if (!ParentEntry) { // Add a new root to the overlay
       FS->Roots.push_back(std::move(E));
@@ -1363,42 +1175,47 @@
       return ParentEntry;
     }
 
-    auto *DE = dyn_cast<RedirectingDirectoryEntry>(ParentEntry);
+    auto *DE =
+        dyn_cast<RedirectingFileSystem::RedirectingDirectoryEntry>(ParentEntry);
     DE->addContent(std::move(E));
     return DE->getLastContent();
   }
 
-  void uniqueOverlayTree(RedirectingFileSystem *FS, Entry *SrcE,
-                         Entry *NewParentE = nullptr) {
+  void uniqueOverlayTree(RedirectingFileSystem *FS,
+                         RedirectingFileSystem::Entry *SrcE,
+                         RedirectingFileSystem::Entry *NewParentE = nullptr) {
     StringRef Name = SrcE->getName();
     switch (SrcE->getKind()) {
-    case EK_Directory: {
-      auto *DE = dyn_cast<RedirectingDirectoryEntry>(SrcE);
+    case RedirectingFileSystem::EK_Directory: {
+      auto *DE =
+          dyn_cast<RedirectingFileSystem::RedirectingDirectoryEntry>(SrcE);
       assert(DE && "Must be a directory");
       // Empty directories could be present in the YAML as a way to
       // describe a file for a current directory after some of its subdir
       // is parsed. This only leads to redundant walks, ignore it.
       if (!Name.empty())
         NewParentE = lookupOrCreateEntry(FS, Name, NewParentE);
-      for (std::unique_ptr<Entry> &SubEntry :
+      for (std::unique_ptr<RedirectingFileSystem::Entry> &SubEntry :
            llvm::make_range(DE->contents_begin(), DE->contents_end()))
         uniqueOverlayTree(FS, SubEntry.get(), NewParentE);
       break;
     }
-    case EK_File: {
-      auto *FE = dyn_cast<RedirectingFileEntry>(SrcE);
+    case RedirectingFileSystem::EK_File: {
+      auto *FE = dyn_cast<RedirectingFileSystem::RedirectingFileEntry>(SrcE);
       assert(FE && "Must be a file");
       assert(NewParentE && "Parent entry must exist");
-      auto *DE = dyn_cast<RedirectingDirectoryEntry>(NewParentE);
-      DE->addContent(llvm::make_unique<RedirectingFileEntry>(
-          Name, FE->getExternalContentsPath(), FE->getUseName()));
+      auto *DE = dyn_cast<RedirectingFileSystem::RedirectingDirectoryEntry>(
+          NewParentE);
+      DE->addContent(
+          llvm::make_unique<RedirectingFileSystem::RedirectingFileEntry>(
+              Name, FE->getExternalContentsPath(), FE->getUseName()));
       break;
     }
     }
   }
 
-  std::unique_ptr<Entry> parseEntry(yaml::Node *N, RedirectingFileSystem *FS,
-                                    bool IsRootEntry) {
+  std::unique_ptr<RedirectingFileSystem::Entry>
+  parseEntry(yaml::Node *N, RedirectingFileSystem *FS, bool IsRootEntry) {
     auto *M = dyn_cast<yaml::MappingNode>(N);
     if (!M) {
       error(N, "expected mapping node for file or directory entry");
@@ -1416,12 +1233,14 @@
     DenseMap<StringRef, KeyStatus> Keys(std::begin(Fields), std::end(Fields));
 
     bool HasContents = false; // external or otherwise
-    std::vector<std::unique_ptr<Entry>> EntryArrayContents;
+    std::vector<std::unique_ptr<RedirectingFileSystem::Entry>>
+        EntryArrayContents;
     std::string ExternalContentsPath;
     std::string Name;
     yaml::Node *NameValueNode;
-    auto UseExternalName = RedirectingFileEntry::NK_NotSet;
-    EntryKind Kind;
+    auto UseExternalName =
+        RedirectingFileSystem::RedirectingFileEntry::NK_NotSet;
+    RedirectingFileSystem::EntryKind Kind;
 
     for (auto &I : *M) {
       StringRef Key;
@@ -1454,9 +1273,9 @@
         if (!parseScalarString(I.getValue(), Value, Buffer))
           return nullptr;
         if (Value == "file")
-          Kind = EK_File;
+          Kind = RedirectingFileSystem::EK_File;
         else if (Value == "directory")
-          Kind = EK_Directory;
+          Kind = RedirectingFileSystem::EK_Directory;
         else {
           error(I.getValue(), "unknown value for 'type'");
           return nullptr;
@@ -1476,7 +1295,7 @@
         }
 
         for (auto &I : *Contents) {
-          if (std::unique_ptr<Entry> E =
+          if (std::unique_ptr<RedirectingFileSystem::Entry> E =
                   parseEntry(&I, FS, /*IsRootEntry*/ false))
             EntryArrayContents.push_back(std::move(E));
           else
@@ -1513,8 +1332,9 @@
         bool Val;
         if (!parseScalarBool(I.getValue(), Val))
           return nullptr;
-        UseExternalName = Val ? RedirectingFileEntry::NK_External
-                              : RedirectingFileEntry::NK_Virtual;
+        UseExternalName =
+            Val ? RedirectingFileSystem::RedirectingFileEntry::NK_External
+                : RedirectingFileSystem::RedirectingFileEntry::NK_Virtual;
       } else {
         llvm_unreachable("key missing from Keys");
       }
@@ -1532,8 +1352,9 @@
       return nullptr;
 
     // check invalid configuration
-    if (Kind == EK_Directory &&
-        UseExternalName != RedirectingFileEntry::NK_NotSet) {
+    if (Kind == RedirectingFileSystem::EK_Directory &&
+        UseExternalName !=
+            RedirectingFileSystem::RedirectingFileEntry::NK_NotSet) {
       error(N, "'use-external-name' is not supported for directories");
       return nullptr;
     }
@@ -1554,17 +1375,19 @@
     // Get the last component
     StringRef LastComponent = sys::path::filename(Trimmed);
 
-    std::unique_ptr<Entry> Result;
+    std::unique_ptr<RedirectingFileSystem::Entry> Result;
     switch (Kind) {
-    case EK_File:
-      Result = llvm::make_unique<RedirectingFileEntry>(
+    case RedirectingFileSystem::EK_File:
+      Result = llvm::make_unique<RedirectingFileSystem::RedirectingFileEntry>(
           LastComponent, std::move(ExternalContentsPath), UseExternalName);
       break;
-    case EK_Directory:
-      Result = llvm::make_unique<RedirectingDirectoryEntry>(
-          LastComponent, std::move(EntryArrayContents),
-          Status("", getNextVirtualUniqueID(), std::chrono::system_clock::now(),
-                 0, 0, 0, file_type::directory_file, sys::fs::all_all));
+    case RedirectingFileSystem::EK_Directory:
+      Result =
+          llvm::make_unique<RedirectingFileSystem::RedirectingDirectoryEntry>(
+              LastComponent, std::move(EntryArrayContents),
+              Status("", getNextVirtualUniqueID(),
+                     std::chrono::system_clock::now(), 0, 0, 0,
+                     file_type::directory_file, sys::fs::all_all));
       break;
     }
 
@@ -1576,12 +1399,14 @@
     for (sys::path::reverse_iterator I = sys::path::rbegin(Parent),
                                      E = sys::path::rend(Parent);
          I != E; ++I) {
-      std::vector<std::unique_ptr<Entry>> Entries;
+      std::vector<std::unique_ptr<RedirectingFileSystem::Entry>> Entries;
       Entries.push_back(std::move(Result));
-      Result = llvm::make_unique<RedirectingDirectoryEntry>(
-          *I, std::move(Entries),
-          Status("", getNextVirtualUniqueID(), std::chrono::system_clock::now(),
-                 0, 0, 0, file_type::directory_file, sys::fs::all_all));
+      Result =
+          llvm::make_unique<RedirectingFileSystem::RedirectingDirectoryEntry>(
+              *I, std::move(Entries),
+              Status("", getNextVirtualUniqueID(),
+                     std::chrono::system_clock::now(), 0, 0, 0,
+                     file_type::directory_file, sys::fs::all_all));
     }
     return Result;
   }
@@ -1607,7 +1432,7 @@
     };
 
     DenseMap<StringRef, KeyStatus> Keys(std::begin(Fields), std::end(Fields));
-    std::vector<std::unique_ptr<Entry>> RootEntries;
+    std::vector<std::unique_ptr<RedirectingFileSystem::Entry>> RootEntries;
 
     // Parse configuration and 'roots'
     for (auto &I : *Top) {
@@ -1627,7 +1452,7 @@
         }
 
         for (auto &I : *Roots) {
-          if (std::unique_ptr<Entry> E =
+          if (std::unique_ptr<RedirectingFileSystem::Entry> E =
                   parseEntry(&I, FS, /*IsRootEntry*/ true))
             RootEntries.push_back(std::move(E));
           else
@@ -1684,8 +1509,6 @@
   }
 };
 
-} // namespace
-
 RedirectingFileSystem *
 RedirectingFileSystem::create(std::unique_ptr<MemoryBuffer> Buffer,
                               SourceMgr::DiagHandlerTy DiagHandler,
@@ -1729,7 +1552,8 @@
   return FS.release();
 }
 
-ErrorOr<Entry *> RedirectingFileSystem::lookupPath(const Twine &Path_) const {
+ErrorOr<RedirectingFileSystem::Entry *>
+RedirectingFileSystem::lookupPath(const Twine &Path_) const {
   SmallString<256> Path;
   Path_.toVector(Path);
 
@@ -1751,17 +1575,18 @@
   sys::path::const_iterator Start = sys::path::begin(Path);
   sys::path::const_iterator End = sys::path::end(Path);
   for (const auto &Root : Roots) {
-    ErrorOr<Entry *> Result = lookupPath(Start, End, Root.get());
+    ErrorOr<RedirectingFileSystem::Entry *> Result =
+        lookupPath(Start, End, Root.get());
     if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
       return Result;
   }
   return make_error_code(llvm::errc::no_such_file_or_directory);
 }
 
-ErrorOr<Entry *>
+ErrorOr<RedirectingFileSystem::Entry *>
 RedirectingFileSystem::lookupPath(sys::path::const_iterator Start,
                                   sys::path::const_iterator End,
-                                  Entry *From) const {
+                                  RedirectingFileSystem::Entry *From) const {
 #ifndef _WIN32
   assert(!isTraversalComponent(*Start) &&
          !isTraversalComponent(From->getName()) &&
@@ -1790,13 +1615,14 @@
     }
   }
 
-  auto *DE = dyn_cast<RedirectingDirectoryEntry>(From);
+  auto *DE = dyn_cast<RedirectingFileSystem::RedirectingDirectoryEntry>(From);
   if (!DE)
     return make_error_code(llvm::errc::not_a_directory);
 
-  for (const std::unique_ptr<Entry> &DirEntry :
+  for (const std::unique_ptr<RedirectingFileSystem::Entry> &DirEntry :
        llvm::make_range(DE->contents_begin(), DE->contents_end())) {
-    ErrorOr<Entry *> Result = lookupPath(Start, End, DirEntry.get());
+    ErrorOr<RedirectingFileSystem::Entry *> Result =
+        lookupPath(Start, End, DirEntry.get());
     if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
       return Result;
   }
@@ -1812,9 +1638,10 @@
   return S;
 }
 
-ErrorOr<Status> RedirectingFileSystem::status(const Twine &Path, Entry *E) {
+ErrorOr<Status> RedirectingFileSystem::status(const Twine &Path,
+                                              RedirectingFileSystem::Entry *E) {
   assert(E != nullptr);
-  if (auto *F = dyn_cast<RedirectingFileEntry>(E)) {
+  if (auto *F = dyn_cast<RedirectingFileSystem::RedirectingFileEntry>(E)) {
     ErrorOr<Status> S = ExternalFS->status(F->getExternalContentsPath());
     assert(!S || S->getName() == F->getExternalContentsPath());
     if (S)
@@ -1822,13 +1649,13 @@
                                      *S);
     return S;
   } else { // directory
-    auto *DE = cast<RedirectingDirectoryEntry>(E);
+    auto *DE = cast<RedirectingFileSystem::RedirectingDirectoryEntry>(E);
     return Status::copyWithNewName(DE->getStatus(), Path.str());
   }
 }
 
 ErrorOr<Status> RedirectingFileSystem::status(const Twine &Path) {
-  ErrorOr<Entry *> Result = lookupPath(Path);
+  ErrorOr<RedirectingFileSystem::Entry *> Result = lookupPath(Path);
   if (!Result) {
     if (IsFallthrough &&
         Result.getError() == llvm::errc::no_such_file_or_directory) {
@@ -1866,7 +1693,7 @@
 
 ErrorOr<std::unique_ptr<File>>
 RedirectingFileSystem::openFileForRead(const Twine &Path) {
-  ErrorOr<Entry *> E = lookupPath(Path);
+  ErrorOr<RedirectingFileSystem::Entry *> E = lookupPath(Path);
   if (!E) {
     if (IsFallthrough &&
         E.getError() == llvm::errc::no_such_file_or_directory) {
@@ -1875,7 +1702,7 @@
     return E.getError();
   }
 
-  auto *F = dyn_cast<RedirectingFileEntry>(*E);
+  auto *F = dyn_cast<RedirectingFileSystem::RedirectingFileEntry>(*E);
   if (!F) // FIXME: errc::not_a_file?
     return make_error_code(llvm::errc::invalid_argument);
 
@@ -1897,7 +1724,7 @@
 std::error_code
 RedirectingFileSystem::getRealPath(const Twine &Path,
                                    SmallVectorImpl<char> &Output) const {
-  ErrorOr<Entry *> Result = lookupPath(Path);
+  ErrorOr<RedirectingFileSystem::Entry *> Result = lookupPath(Path);
   if (!Result) {
     if (IsFallthrough &&
         Result.getError() == llvm::errc::no_such_file_or_directory) {
@@ -1906,7 +1733,8 @@
     return Result.getError();
   }
 
-  if (auto *F = dyn_cast<RedirectingFileEntry>(*Result)) {
+  if (auto *F =
+          dyn_cast<RedirectingFileSystem::RedirectingFileEntry>(*Result)) {
     return ExternalFS->getRealPath(F->getExternalContentsPath(), Output);
   }
   // Even if there is a directory entry, fall back to ExternalFS if allowed,
@@ -1925,13 +1753,14 @@
                                        std::move(ExternalFS));
 }
 
-static void getVFSEntries(Entry *SrcE, SmallVectorImpl<StringRef> &Path,
+static void getVFSEntries(RedirectingFileSystem::Entry *SrcE,
+                          SmallVectorImpl<StringRef> &Path,
                           SmallVectorImpl<YAMLVFSEntry> &Entries) {
   auto Kind = SrcE->getKind();
-  if (Kind == EK_Directory) {
-    auto *DE = dyn_cast<RedirectingDirectoryEntry>(SrcE);
+  if (Kind == RedirectingFileSystem::EK_Directory) {
+    auto *DE = dyn_cast<RedirectingFileSystem::RedirectingDirectoryEntry>(SrcE);
     assert(DE && "Must be a directory");
-    for (std::unique_ptr<Entry> &SubEntry :
+    for (std::unique_ptr<RedirectingFileSystem::Entry> &SubEntry :
          llvm::make_range(DE->contents_begin(), DE->contents_end())) {
       Path.push_back(SubEntry->getName());
       getVFSEntries(SubEntry.get(), Path, Entries);
@@ -1940,8 +1769,8 @@
     return;
   }
 
-  assert(Kind == EK_File && "Must be a EK_File");
-  auto *FE = dyn_cast<RedirectingFileEntry>(SrcE);
+  assert(Kind == RedirectingFileSystem::EK_File && "Must be a EK_File");
+  auto *FE = dyn_cast<RedirectingFileSystem::RedirectingFileEntry>(SrcE);
   assert(FE && "Must be a file");
   SmallString<128> VPath;
   for (auto &Comp : Path)
@@ -1958,7 +1787,7 @@
   RedirectingFileSystem *VFS = RedirectingFileSystem::create(
       std::move(Buffer), DiagHandler, YAMLFilePath, DiagContext,
       std::move(ExternalFS));
-  ErrorOr<Entry *> RootE = VFS->lookupPath("/");
+  ErrorOr<RedirectingFileSystem::Entry *> RootE = VFS->lookupPath("/");
   if (!RootE)
     return;
   SmallVector<StringRef, 8> Components;
@@ -2134,9 +1963,10 @@
 }
 
 VFSFromYamlDirIterImpl::VFSFromYamlDirIterImpl(
-    const Twine &_Path, RedirectingDirectoryEntry::iterator Begin,
-    RedirectingDirectoryEntry::iterator End, bool IterateExternalFS,
-    FileSystem &ExternalFS, std::error_code &EC)
+    const Twine &_Path,
+    RedirectingFileSystem::RedirectingDirectoryEntry::iterator Begin,
+    RedirectingFileSystem::RedirectingDirectoryEntry::iterator End,
+    bool IterateExternalFS, FileSystem &ExternalFS, std::error_code &EC)
     : Dir(_Path.str()), Current(Begin), End(End),
       IterateExternalFS(IterateExternalFS), ExternalFS(ExternalFS) {
   EC = incrementImpl(/*IsFirstTime=*/true);
@@ -2176,10 +2006,10 @@
     llvm::sys::path::append(PathStr, (*Current)->getName());
     sys::fs::file_type Type;
     switch ((*Current)->getKind()) {
-    case EK_Directory:
+    case RedirectingFileSystem::EK_Directory:
       Type = sys::fs::file_type::directory_file;
       break;
-    case EK_File:
+    case RedirectingFileSystem::EK_File:
       Type = sys::fs::file_type::regular_file;
       break;
     }
diff --git a/lib/Support/Windows/Threading.inc b/lib/Support/Windows/Threading.inc
index decb488..0bd92f6 100644
--- a/lib/Support/Windows/Threading.inc
+++ b/lib/Support/Windows/Threading.inc
@@ -14,7 +14,7 @@
 #include "llvm/ADT/SmallString.h"
 #include "llvm/ADT/Twine.h"
 
-#include "Windows/WindowsSupport.h"
+#include "WindowsSupport.h"
 #include <process.h>
 
 // Windows will at times define MemoryFence.
diff --git a/lib/Support/raw_ostream.cpp b/lib/Support/raw_ostream.cpp
index 81df38f..21dde7f 100644
--- a/lib/Support/raw_ostream.cpp
+++ b/lib/Support/raw_ostream.cpp
@@ -914,3 +914,5 @@
                                    uint64_t Offset) {}
 
 void raw_pwrite_stream::anchor() {}
+
+void buffer_ostream::anchor() {}
diff --git a/lib/TableGen/Main.cpp b/lib/TableGen/Main.cpp
index df4088a..0269841 100644
--- a/lib/TableGen/Main.cpp
+++ b/lib/TableGen/Main.cpp
@@ -100,23 +100,39 @@
   if (Parser.ParseFile())
     return 1;
 
-  std::error_code EC;
-  ToolOutputFile Out(OutputFilename, EC, sys::fs::F_Text);
-  if (EC)
-    return reportError(argv0, "error opening " + OutputFilename + ":" +
-                                  EC.message() + "\n");
+  // Write output to memory.
+  std::string OutString;
+  raw_string_ostream Out(OutString);
+  if (MainFn(Out, Records))
+    return 1;
+
+  // Always write the depfile, even if the main output hasn't changed.
+  // If it's missing, Ninja considers the output dirty.  If this was below
+  // the early exit below and someone deleted the .inc.d file but not the .inc
+  // file, tablegen would never write the depfile.
   if (!DependFilename.empty()) {
     if (int Ret = createDependencyFile(Parser, argv0))
       return Ret;
   }
 
-  if (MainFn(Out.os(), Records))
-    return 1;
+  // Only updates the real output file if there are any differences.
+  // This prevents recompilation of all the files depending on it if there
+  // aren't any.
+  if (auto ExistingOrErr = MemoryBuffer::getFile(OutputFilename))
+    if (std::move(ExistingOrErr.get())->getBuffer() == Out.str())
+      return 0;
+
+  std::error_code EC;
+  ToolOutputFile OutFile(OutputFilename, EC, sys::fs::F_Text);
+  if (EC)
+    return reportError(argv0, "error opening " + OutputFilename + ":" +
+                                  EC.message() + "\n");
+  OutFile.os() << Out.str();
 
   if (ErrorsPrinted > 0)
     return reportError(argv0, Twine(ErrorsPrinted) + " errors.\n");
 
   // Declare success.
-  Out.keep();
+  OutFile.keep();
   return 0;
 }
diff --git a/lib/Target/AArch64/AArch64.h b/lib/Target/AArch64/AArch64.h
index 2f0d0bf..c36d935 100644
--- a/lib/Target/AArch64/AArch64.h
+++ b/lib/Target/AArch64/AArch64.h
@@ -39,6 +39,7 @@
                                  CodeGenOpt::Level OptLevel);
 FunctionPass *createAArch64StorePairSuppressPass();
 FunctionPass *createAArch64ExpandPseudoPass();
+FunctionPass *createAArch64SpeculationHardeningPass();
 FunctionPass *createAArch64LoadStoreOptimizationPass();
 FunctionPass *createAArch64SIMDInstrOptPass();
 ModulePass *createAArch64PromoteConstantPass();
@@ -68,6 +69,7 @@
 void initializeAArch64ConditionOptimizerPass(PassRegistry&);
 void initializeAArch64DeadRegisterDefinitionsPass(PassRegistry&);
 void initializeAArch64ExpandPseudoPass(PassRegistry&);
+void initializeAArch64SpeculationHardeningPass(PassRegistry&);
 void initializeAArch64LoadStoreOptPass(PassRegistry&);
 void initializeAArch64SIMDInstrOptPass(PassRegistry&);
 void initializeAArch64PreLegalizerCombinerPass(PassRegistry&);
diff --git a/lib/Target/AArch64/AArch64.td b/lib/Target/AArch64/AArch64.td
index a13f6c8..8f79140 100644
--- a/lib/Target/AArch64/AArch64.td
+++ b/lib/Target/AArch64/AArch64.td
@@ -188,14 +188,18 @@
     "fuse-aes", "HasFuseAES", "true",
     "CPU fuses AES crypto operations">;
 
-def FeatureFuseCryptoEOR : SubtargetFeature<
-    "fuse-crypto-eor", "HasFuseCryptoEOR", "true",
-    "CPU fuses AES/PMULL and EOR operations">;
+def FeatureFuseArithmeticLogic : SubtargetFeature<
+    "fuse-arith-logic", "HasFuseArithmeticLogic", "true",
+    "CPU fuses arithmetic and logic operations">;
 
 def FeatureFuseCCSelect : SubtargetFeature<
     "fuse-csel", "HasFuseCCSelect", "true",
     "CPU fuses conditional select operations">;
 
+def FeatureFuseCryptoEOR : SubtargetFeature<
+    "fuse-crypto-eor", "HasFuseCryptoEOR", "true",
+    "CPU fuses AES/PMULL and EOR operations">;
+
 def FeatureFuseLiterals : SubtargetFeature<
     "fuse-literals", "HasFuseLiterals", "true",
     "CPU fuses literal generation operations">;
@@ -306,14 +310,14 @@
 def FeatureSpecRestrict : SubtargetFeature<"specrestrict", "HasSpecRestrict",
   "true", "Enable architectural speculation restriction" >;
 
+def FeatureSB : SubtargetFeature<"sb", "HasSB",
+  "true", "Enable v8.5 Speculation Barrier" >;
+
 def FeatureSSBS : SubtargetFeature<"ssbs", "HasSSBS",
   "true", "Enable Speculative Store Bypass Safe bit" >;
 
-def FeatureSpecCtrl : SubtargetFeature<"specctrl", "HasSpecCtrl", "true",
-  "Enable speculation control barrier" >;
-
-def FeaturePredCtrl : SubtargetFeature<"predctrl", "HasPredCtrl", "true",
-  "Enable execution and data prediction invalidation instructions" >;
+def FeaturePredRes : SubtargetFeature<"predres", "HasPredRes", "true",
+  "Enable v8.5a execution and data prediction invalidation instructions" >;
 
 def FeatureCacheDeepPersist : SubtargetFeature<"ccdp", "HasCCDP",
     "true", "Enable v8.5 Cache Clean to Point of Deep Persistence" >;
@@ -352,7 +356,7 @@
 def HasV8_5aOps : SubtargetFeature<
   "v8.5a", "HasV8_5aOps", "true", "Support ARM v8.5a instructions",
   [HasV8_4aOps, FeatureAltFPCmp, FeatureFRInt3264, FeatureSpecRestrict,
-   FeatureSSBS, FeatureSpecCtrl, FeaturePredCtrl, FeatureCacheDeepPersist,
+   FeatureSSBS, FeatureSB, FeaturePredRes, FeatureCacheDeepPersist,
    FeatureBranchTargetId]
 >;
 
@@ -391,6 +395,7 @@
 include "AArch64SchedKryo.td"
 include "AArch64SchedExynosM1.td"
 include "AArch64SchedExynosM3.td"
+include "AArch64SchedExynosM4.td"
 include "AArch64SchedThunderX.td"
 include "AArch64SchedThunderX2T99.td"
 
@@ -540,6 +545,26 @@
                                      FeaturePredictableSelectIsExpensive,
                                      FeatureZCZeroingFP]>;
 
+def ProcExynosM4 : SubtargetFeature<"exynosm4", "ARMProcFamily", "ExynosM3",
+                                    "Samsung Exynos-M4 processors",
+                                    [HasV8_2aOps,
+                                     FeatureArithmeticBccFusion,
+                                     FeatureArithmeticCbzFusion,
+                                     FeatureCrypto,
+                                     FeatureDotProd,
+                                     FeatureExynosCheapAsMoveHandling,
+                                     FeatureForce32BitJumpTables,
+                                     FeatureFP16FML,
+                                     FeatureFuseAddress,
+                                     FeatureFuseAES,
+                                     FeatureFuseArithmeticLogic,
+                                     FeatureFuseCCSelect,
+                                     FeatureFuseLiterals,
+                                     FeatureLSLFast,
+                                     FeaturePerfMon,
+                                     FeaturePostRAScheduler,
+                                     FeatureZCZeroing]>;
+
 def ProcKryo    : SubtargetFeature<"kryo", "ARMProcFamily", "Kryo",
                                    "Qualcomm Kryo processors", [
                                    FeatureCRC,
@@ -677,7 +702,7 @@
 def : ProcessorModel<"exynos-m1", ExynosM1Model, [ProcExynosM1]>;
 def : ProcessorModel<"exynos-m2", ExynosM1Model, [ProcExynosM2]>;
 def : ProcessorModel<"exynos-m3", ExynosM3Model, [ProcExynosM3]>;
-def : ProcessorModel<"exynos-m4", ExynosM3Model, [ProcExynosM3]>;
+def : ProcessorModel<"exynos-m4", ExynosM4Model, [ProcExynosM4]>;
 def : ProcessorModel<"falkor", FalkorModel, [ProcFalkor]>;
 def : ProcessorModel<"saphira", FalkorModel, [ProcSaphira]>;
 def : ProcessorModel<"kryo", KryoModel, [ProcKryo]>;
diff --git a/lib/Target/AArch64/AArch64AsmPrinter.cpp b/lib/Target/AArch64/AArch64AsmPrinter.cpp
index 828fbb2..0442076 100644
--- a/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -716,6 +716,19 @@
       OutStreamer->EmitRawText(StringRef(OS.str()));
     }
     return;
+
+  case AArch64::EMITBKEY: {
+      ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
+      if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
+          ExceptionHandlingType != ExceptionHandling::ARM)
+        return;
+
+      if (needsCFIMoves() == CFI_M_None)
+        return;
+
+      OutStreamer->EmitCFIBKeyFrame();
+      return;
+    }
   }
 
   // Tail calls use pseudo instructions so they have the proper code-gen
diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp
index dfc08a1..47550ca 100644
--- a/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/lib/Target/AArch64/AArch64FastISel.cpp
@@ -2016,8 +2016,9 @@
       if (RetVT == MVT::i64 && VT <= MVT::i32) {
         if (WantZExt) {
           // Delete the last emitted instruction from emitLoad (SUBREG_TO_REG).
-          std::prev(FuncInfo.InsertPt)->eraseFromParent();
-          ResultReg = std::prev(FuncInfo.InsertPt)->getOperand(0).getReg();
+          MachineBasicBlock::iterator I(std::prev(FuncInfo.InsertPt));
+          ResultReg = std::prev(I)->getOperand(0).getReg();
+          removeDeadCode(I, std::next(I));
         } else
           ResultReg = fastEmitInst_extractsubreg(MVT::i32, ResultReg,
                                                  /*IsKill=*/true,
@@ -2038,7 +2039,8 @@
           break;
         }
       }
-      MI->eraseFromParent();
+      MachineBasicBlock::iterator I(MI);
+      removeDeadCode(I, std::next(I));
       MI = nullptr;
       if (Reg)
         MI = MRI.getUniqueVRegDef(Reg);
@@ -2256,6 +2258,13 @@
 
 /// Try to emit a combined compare-and-branch instruction.
 bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) {
+  // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
+  // will not be produced, as they are conditional branch instructions that do
+  // not set flags.
+  if (FuncInfo.MF->getFunction().hasFnAttribute(
+          Attribute::SpeculativeLoadHardening))
+    return false;
+
   assert(isa<CmpInst>(BI->getCondition()) && "Expected cmp instruction");
   const CmpInst *CI = cast<CmpInst>(BI->getCondition());
   CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
@@ -4508,7 +4517,8 @@
             MI->getOperand(1).getSubReg() == AArch64::sub_32) &&
            "Expected copy instruction");
     Reg = MI->getOperand(1).getReg();
-    MI->eraseFromParent();
+    MachineBasicBlock::iterator I(MI);
+    removeDeadCode(I, std::next(I));
   }
   updateValueMap(I, Reg);
   return true;
diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp
index 3f4bcd2e..538a8d7 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -816,10 +816,21 @@
   DebugLoc DL;
 
   if (ShouldSignReturnAddress(MF)) {
-    BuildMI(
-        MBB, MBBI, DL,
-        TII->get(ShouldSignWithAKey(MF) ? AArch64::PACIASP : AArch64::PACIBSP))
-        .setMIFlag(MachineInstr::FrameSetup);
+    if (ShouldSignWithAKey(MF))
+      BuildMI(MBB, MBBI, DL, TII->get(AArch64::PACIASP))
+          .setMIFlag(MachineInstr::FrameSetup);
+    else {
+      BuildMI(MBB, MBBI, DL, TII->get(AArch64::EMITBKEY))
+          .setMIFlag(MachineInstr::FrameSetup);
+      BuildMI(MBB, MBBI, DL, TII->get(AArch64::PACIBSP))
+          .setMIFlag(MachineInstr::FrameSetup);
+    }
+
+    unsigned CFIIndex =
+        MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr));
+    BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+        .addCFIIndex(CFIIndex)
+        .setMIFlags(MachineInstr::FrameSetup);
   }
 
   // All calls are tail calls in GHC calling conv, and functions have no
diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 80ad23b..fc9855f 100644
--- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -2087,8 +2087,7 @@
   (void)BitWidth;
   assert(BitWidth == 32 || BitWidth == 64);
 
-  KnownBits Known;
-  CurDAG->computeKnownBits(Op, Known);
+  KnownBits Known = CurDAG->computeKnownBits(Op);
 
   // Non-zero in the sense that they're not provably zero, which is the key
   // point if we want to use this value
@@ -2167,8 +2166,7 @@
 
   // Compute the Known Zero for the AND as this allows us to catch more general
   // cases than just looking for AND with imm.
-  KnownBits Known;
-  CurDAG->computeKnownBits(And, Known);
+  KnownBits Known = CurDAG->computeKnownBits(And);
 
   // Non-zero in the sense that they're not provably zero, which is the key
   // point if we want to use this value.
@@ -2309,8 +2307,7 @@
     // This allows to catch more general case than just looking for
     // AND with imm. Indeed, simplify-demanded-bits may have removed
     // the AND instruction because it proves it was useless.
-    KnownBits Known;
-    CurDAG->computeKnownBits(OrOpd1Val, Known);
+    KnownBits Known = CurDAG->computeKnownBits(OrOpd1Val);
 
     // Check if there is enough room for the second operand to appear
     // in the first one
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7b53941..e01ca14 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -333,36 +333,38 @@
     setOperationAction(ISD::FCOPYSIGN, MVT::f16, Promote);
 
   setOperationAction(ISD::FREM,    MVT::f16,   Promote);
-  setOperationAction(ISD::FREM,    MVT::v4f16, Promote);
-  setOperationAction(ISD::FREM,    MVT::v8f16, Promote);
+  setOperationAction(ISD::FREM,    MVT::v4f16, Expand);
+  setOperationAction(ISD::FREM,    MVT::v8f16, Expand);
   setOperationAction(ISD::FPOW,    MVT::f16,   Promote);
-  setOperationAction(ISD::FPOW,    MVT::v4f16, Promote);
-  setOperationAction(ISD::FPOW,    MVT::v8f16, Promote);
+  setOperationAction(ISD::FPOW,    MVT::v4f16, Expand);
+  setOperationAction(ISD::FPOW,    MVT::v8f16, Expand);
   setOperationAction(ISD::FPOWI,   MVT::f16,   Promote);
+  setOperationAction(ISD::FPOWI,   MVT::v4f16, Expand);
+  setOperationAction(ISD::FPOWI,   MVT::v8f16, Expand);
   setOperationAction(ISD::FCOS,    MVT::f16,   Promote);
-  setOperationAction(ISD::FCOS,    MVT::v4f16, Promote);
-  setOperationAction(ISD::FCOS,    MVT::v8f16, Promote);
+  setOperationAction(ISD::FCOS,    MVT::v4f16, Expand);
+  setOperationAction(ISD::FCOS,    MVT::v8f16, Expand);
   setOperationAction(ISD::FSIN,    MVT::f16,   Promote);
-  setOperationAction(ISD::FSIN,    MVT::v4f16, Promote);
-  setOperationAction(ISD::FSIN,    MVT::v8f16, Promote);
+  setOperationAction(ISD::FSIN,    MVT::v4f16, Expand);
+  setOperationAction(ISD::FSIN,    MVT::v8f16, Expand);
   setOperationAction(ISD::FSINCOS, MVT::f16,   Promote);
-  setOperationAction(ISD::FSINCOS, MVT::v4f16, Promote);
-  setOperationAction(ISD::FSINCOS, MVT::v8f16, Promote);
+  setOperationAction(ISD::FSINCOS, MVT::v4f16, Expand);
+  setOperationAction(ISD::FSINCOS, MVT::v8f16, Expand);
   setOperationAction(ISD::FEXP,    MVT::f16,   Promote);
-  setOperationAction(ISD::FEXP,    MVT::v4f16, Promote);
-  setOperationAction(ISD::FEXP,    MVT::v8f16, Promote);
+  setOperationAction(ISD::FEXP,    MVT::v4f16, Expand);
+  setOperationAction(ISD::FEXP,    MVT::v8f16, Expand);
   setOperationAction(ISD::FEXP2,   MVT::f16,   Promote);
-  setOperationAction(ISD::FEXP2,   MVT::v4f16, Promote);
-  setOperationAction(ISD::FEXP2,   MVT::v8f16, Promote);
+  setOperationAction(ISD::FEXP2,   MVT::v4f16, Expand);
+  setOperationAction(ISD::FEXP2,   MVT::v8f16, Expand);
   setOperationAction(ISD::FLOG,    MVT::f16,   Promote);
-  setOperationAction(ISD::FLOG,    MVT::v4f16, Promote);
-  setOperationAction(ISD::FLOG,    MVT::v8f16, Promote);
+  setOperationAction(ISD::FLOG,    MVT::v4f16, Expand);
+  setOperationAction(ISD::FLOG,    MVT::v8f16, Expand);
   setOperationAction(ISD::FLOG2,   MVT::f16,   Promote);
-  setOperationAction(ISD::FLOG2,   MVT::v4f16, Promote);
-  setOperationAction(ISD::FLOG2,   MVT::v8f16, Promote);
+  setOperationAction(ISD::FLOG2,   MVT::v4f16, Expand);
+  setOperationAction(ISD::FLOG2,   MVT::v8f16, Expand);
   setOperationAction(ISD::FLOG10,  MVT::f16,   Promote);
-  setOperationAction(ISD::FLOG10,  MVT::v4f16, Promote);
-  setOperationAction(ISD::FLOG10,  MVT::v8f16, Promote);
+  setOperationAction(ISD::FLOG10,  MVT::v4f16, Expand);
+  setOperationAction(ISD::FLOG10,  MVT::v8f16, Expand);
 
   if (!Subtarget->hasFullFP16()) {
     setOperationAction(ISD::SELECT,      MVT::f16,  Promote);
@@ -993,8 +995,8 @@
     break;
   case AArch64ISD::CSEL: {
     KnownBits Known2;
-    DAG.computeKnownBits(Op->getOperand(0), Known, Depth + 1);
-    DAG.computeKnownBits(Op->getOperand(1), Known2, Depth + 1);
+    Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
+    Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
     Known.Zero &= Known2.Zero;
     Known.One &= Known2.One;
     break;
@@ -2716,9 +2718,19 @@
     EVT PtrVT = getPointerTy(DAG.getDataLayout());
     return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT);
   }
-  case Intrinsic::aarch64_neon_abs:
-    return DAG.getNode(ISD::ABS, dl, Op.getValueType(),
-                       Op.getOperand(1));
+  case Intrinsic::aarch64_neon_abs: {
+    EVT Ty = Op.getValueType();
+    if (Ty == MVT::i64) {
+      SDValue Result = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64,
+                                   Op.getOperand(1));
+      Result = DAG.getNode(ISD::ABS, dl, MVT::v1i64, Result);
+      return DAG.getNode(ISD::BITCAST, dl, MVT::i64, Result);
+    } else if (Ty.isVector() && Ty.isInteger() && isTypeLegal(Ty)) {
+      return DAG.getNode(ISD::ABS, dl, Ty, Op.getOperand(1));
+    } else {
+      report_fatal_error("Unexpected type for AArch64 NEON intrinic");
+    }
+  }
   case Intrinsic::aarch64_neon_smax:
     return DAG.getNode(ISD::SMAX, dl, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2));
@@ -4343,6 +4355,13 @@
   SDValue Dest = Op.getOperand(4);
   SDLoc dl(Op);
 
+  MachineFunction &MF = DAG.getMachineFunction();
+  // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
+  // will not be produced, as they are conditional branch instructions that do
+  // not set flags.
+  bool ProduceNonFlagSettingCondBr =
+      !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
+
   // Handle f128 first, since lowering it will result in comparing the return
   // value of a libcall against zero, which is just what the rest of LowerBR_CC
   // is expecting to deal with.
@@ -4385,7 +4404,7 @@
     // If the RHS of the comparison is zero, we can potentially fold this
     // to a specialized branch.
     const ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS);
-    if (RHSC && RHSC->getZExtValue() == 0) {
+    if (RHSC && RHSC->getZExtValue() == 0 && ProduceNonFlagSettingCondBr) {
       if (CC == ISD::SETEQ) {
         // See if we can use a TBZ to fold in an AND as well.
         // TBZ has a smaller branch displacement than CBZ.  If the offset is
@@ -4428,7 +4447,7 @@
       }
     }
     if (RHSC && RHSC->getSExtValue() == -1 && CC == ISD::SETGT &&
-        LHS.getOpcode() != ISD::AND) {
+        LHS.getOpcode() != ISD::AND && ProduceNonFlagSettingCondBr) {
       // Don't combine AND since emitComparison converts the AND to an ANDS
       // (a.k.a. TST) and the test in the test bit and branch instruction
       // becomes redundant.  This would also increase register pressure.
@@ -10046,6 +10065,7 @@
 
 static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St,
                                SDValue SplatVal, unsigned NumVecElts) {
+  assert(!St.isTruncatingStore() && "cannot split truncating vector store");
   unsigned OrigAlignment = St.getAlignment();
   unsigned EltOffset = SplatVal.getValueType().getSizeInBits() / 8;
 
@@ -10120,6 +10140,11 @@
   if (!StVal.hasOneUse())
     return SDValue();
 
+  // If the store is truncating then it's going down to i16 or smaller, which
+  // means it can be implemented in a single store anyway.
+  if (St.isTruncatingStore())
+    return SDValue();
+
   // If the immediate offset of the address operand is too large for the stp
   // instruction, then bail out.
   if (DAG.isBaseWithConstantOffset(St.getBasePtr())) {
@@ -10170,6 +10195,11 @@
   if (NumVecElts != 4 && NumVecElts != 2)
     return SDValue();
 
+  // If the store is truncating then it's going down to i16 or smaller, which
+  // means it can be implemented in a single store anyway.
+  if (St.isTruncatingStore())
+    return SDValue();
+
   // Check that this is a splat.
   // Make sure that each of the relevant vector element locations are inserted
   // to, i.e. 0 and 1 for v2i64 and 0, 1, 2, 3 for v4i32.
@@ -10807,6 +10837,13 @@
 static SDValue performBRCONDCombine(SDNode *N,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     SelectionDAG &DAG) {
+  MachineFunction &MF = DAG.getMachineFunction();
+  // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
+  // will not be produced, as they are conditional branch instructions that do
+  // not set flags.
+  if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
+    return SDValue();
+
   if (SDValue NV = performCONDCombine(N, DCI, DAG, 2, 3))
     N = NV.getNode();
   SDValue Chain = N->getOperand(0);
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp
index eddb349..ada0678 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -964,6 +964,18 @@
                                             const MachineFunction &MF) const {
   if (TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF))
     return true;
+  switch (MI.getOpcode()) {
+  case AArch64::HINT:
+    // CSDB hints are scheduling barriers.
+    if (MI.getOperand(0).getImm() == 0x14)
+      return true;
+    break;
+  case AArch64::DSB:
+  case AArch64::ISB:
+    // DSB and ISB also are scheduling barriers.
+    return true;
+  default:;
+  }
   return isSEHInstruction(MI);
 }
 
@@ -4739,7 +4751,8 @@
   static const std::pair<unsigned, const char *> TargetFlags[] = {
       {MO_COFFSTUB, "aarch64-coffstub"},
       {MO_GOT, "aarch64-got"},   {MO_NC, "aarch64-nc"},
-      {MO_TLS, "aarch64-tls"},   {MO_DLLIMPORT, "aarch64-dllimport"}};
+      {MO_S, "aarch64-s"},       {MO_TLS, "aarch64-tls"},
+      {MO_DLLIMPORT, "aarch64-dllimport"}};
   return makeArrayRef(TargetFlags);
 }
 
diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td
index ee43b55..c24b8b3 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/lib/Target/AArch64/AArch64InstrInfo.td
@@ -114,10 +114,10 @@
                        AssemblerPredicate<"FeatureAltFPCmp", "altnzcv">;
 def HasFRInt3264     : Predicate<"Subtarget->hasFRInt3264()">,
                        AssemblerPredicate<"FeatureFRInt3264", "frint3264">;
-def HasSpecCtrl      : Predicate<"Subtarget->hasSpecCtrl()">,
-                       AssemblerPredicate<"FeatureSpecCtrl", "specctrl">;
-def HasPredCtrl      : Predicate<"Subtarget->hasPredCtrl()">,
-                       AssemblerPredicate<"FeaturePredCtrl", "predctrl">;
+def HasSB            : Predicate<"Subtarget->hasSB()">,
+                       AssemblerPredicate<"FeatureSB", "sb">;
+def HasPredRes      : Predicate<"Subtarget->hasPredRes()">,
+                       AssemblerPredicate<"FeaturePredRes", "predres">;
 def HasCCDP          : Predicate<"Subtarget->hasCCDP()">,
                        AssemblerPredicate<"FeatureCacheDeepPersist", "ccdp">;
 def HasBTI           : Predicate<"Subtarget->hasBTI()">,
@@ -520,6 +520,14 @@
                    [(set GPR64:$Rd, (int_aarch64_space imm:$size, GPR64:$Rn))]>,
             Sched<[]>;
 
+let hasSideEffects = 1, isCodeGenOnly = 1 in {
+  def SpeculationSafeValueX
+      : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>;
+  def SpeculationSafeValueW
+      : Pseudo<(outs GPR32:$dst), (ins GPR32:$src), []>, Sched<[]>;
+}
+
+
 //===----------------------------------------------------------------------===//
 // System instructions.
 //===----------------------------------------------------------------------===//
@@ -733,7 +741,7 @@
 def SB : SimpleSystemI<0, (ins), "sb", "">, Sched<[]> {
   let Inst{20-5} = 0b0001100110000111;
   let Unpredictable{11-8} = 0b1111;
-  let Predicates = [HasSpecCtrl];
+  let Predicates = [HasSB];
   let hasSideEffects = 1;
 }
 
@@ -1619,6 +1627,10 @@
   let AsmString = ".tlsdesccall $sym";
 }
 
+// Pseudo instruction to tell the streamer to emit a 'B' character into the
+// augmentation string.
+def EMITBKEY : Pseudo<(outs), (ins), []>, Sched<[]> {}
+
 // FIXME: maybe the scratch register used shouldn't be fixed to X1?
 // FIXME: can "hasSideEffects be dropped?
 let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1,
diff --git a/lib/Target/AArch64/AArch64InstructionSelector.cpp b/lib/Target/AArch64/AArch64InstructionSelector.cpp
index 90258cc..5eb589b 100644
--- a/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -73,6 +73,7 @@
                           MachineBasicBlock::iterator MBBI,
                           MachineRegisterInfo &MRI) const;
   bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI) const;
+  bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const;
 
   ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
 
@@ -788,16 +789,36 @@
     const unsigned CondReg = I.getOperand(0).getReg();
     MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
 
-    if (selectCompareBranch(I, MF, MRI))
+    // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
+    // instructions will not be produced, as they are conditional branch
+    // instructions that do not set flags.
+    bool ProduceNonFlagSettingCondBr =
+        !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
+    if (ProduceNonFlagSettingCondBr && selectCompareBranch(I, MF, MRI))
       return true;
 
-    auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
-                   .addUse(CondReg)
-                   .addImm(/*bit offset=*/0)
-                   .addMBB(DestMBB);
+    if (ProduceNonFlagSettingCondBr) {
+      auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
+                     .addUse(CondReg)
+                     .addImm(/*bit offset=*/0)
+                     .addMBB(DestMBB);
 
-    I.eraseFromParent();
-    return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
+      I.eraseFromParent();
+      return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
+    } else {
+      auto CMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
+                     .addDef(AArch64::WZR)
+                     .addUse(CondReg)
+                     .addImm(1);
+      constrainSelectedInstRegOperands(*CMP.getInstr(), TII, TRI, RBI);
+      auto Bcc =
+          BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::Bcc))
+              .addImm(AArch64CC::EQ)
+              .addMBB(DestMBB);
+
+      I.eraseFromParent();
+      return constrainSelectedInstRegOperands(*Bcc.getInstr(), TII, TRI, RBI);
+    }
   }
 
   case TargetOpcode::G_BRINDIRECT: {
@@ -1533,6 +1554,8 @@
   }
   case TargetOpcode::G_BUILD_VECTOR:
     return selectBuildVector(I, MRI);
+  case TargetOpcode::G_MERGE_VALUES:
+    return selectMergeValues(I, MRI);
   }
 
   return false;
@@ -1570,6 +1593,52 @@
   }
 }
 
+bool AArch64InstructionSelector::selectMergeValues(
+    MachineInstr &I, MachineRegisterInfo &MRI) const {
+  assert(I.getOpcode() == TargetOpcode::G_MERGE_VALUES && "unexpected opcode");
+  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
+  const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
+  assert(!DstTy.isVector() && !SrcTy.isVector() && "invalid merge operation");
+
+  // At the moment we only support merging two s32s into an s64.
+  if (I.getNumOperands() != 3)
+    return false;
+  if (DstTy.getSizeInBits() != 64 || SrcTy.getSizeInBits() != 32)
+    return false;
+  const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
+  if (RB.getID() != AArch64::GPRRegBankID)
+    return false;
+
+  auto *DstRC = &AArch64::GPR64RegClass;
+  unsigned SubToRegDef = MRI.createVirtualRegister(DstRC);
+  MachineInstr &SubRegMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
+                                    TII.get(TargetOpcode::SUBREG_TO_REG))
+                                .addDef(SubToRegDef)
+                                .addImm(0)
+                                .addUse(I.getOperand(1).getReg())
+                                .addImm(AArch64::sub_32);
+  unsigned SubToRegDef2 = MRI.createVirtualRegister(DstRC);
+  // Need to anyext the second scalar before we can use bfm
+  MachineInstr &SubRegMI2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
+                                    TII.get(TargetOpcode::SUBREG_TO_REG))
+                                .addDef(SubToRegDef2)
+                                .addImm(0)
+                                .addUse(I.getOperand(2).getReg())
+                                .addImm(AArch64::sub_32);
+  MachineInstr &BFM =
+      *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::BFMXri))
+           .addDef(I.getOperand(0).getReg())
+           .addUse(SubToRegDef)
+           .addUse(SubToRegDef2)
+           .addImm(32)
+           .addImm(31);
+  constrainSelectedInstRegOperands(SubRegMI, TII, TRI, RBI);
+  constrainSelectedInstRegOperands(SubRegMI2, TII, TRI, RBI);
+  constrainSelectedInstRegOperands(BFM, TII, TRI, RBI);
+  I.eraseFromParent();
+  return true;
+}
+
 bool AArch64InstructionSelector::selectBuildVector(
     MachineInstr &I, MachineRegisterInfo &MRI) const {
   assert(I.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
diff --git a/lib/Target/AArch64/AArch64LegalizerInfo.cpp b/lib/Target/AArch64/AArch64LegalizerInfo.cpp
index 5d63f0c..6f7fb7a 100644
--- a/lib/Target/AArch64/AArch64LegalizerInfo.cpp
+++ b/lib/Target/AArch64/AArch64LegalizerInfo.cpp
@@ -48,9 +48,21 @@
   const LLT v2s64 = LLT::vector(2, 64);
 
   getActionDefinitionsBuilder(G_IMPLICIT_DEF)
-      .legalFor({p0, s1, s8, s16, s32, s64, v2s64})
-      .clampScalar(0, s1, s64)
-      .widenScalarToNextPow2(0, 8);
+    .legalFor({p0, s1, s8, s16, s32, s64, v2s64})
+    .clampScalar(0, s1, s64)
+    .widenScalarToNextPow2(0, 8)
+    .fewerElementsIf(
+      [=](const LegalityQuery &Query) {
+        return Query.Types[0].isVector() &&
+          (Query.Types[0].getElementType() != s64 ||
+           Query.Types[0].getNumElements() != 2);
+      },
+      [=](const LegalityQuery &Query) {
+        LLT EltTy = Query.Types[0].getElementType();
+        if (EltTy == s64)
+          return std::make_pair(0, LLT::vector(2, 64));
+        return std::make_pair(0, EltTy);
+      });
 
   getActionDefinitionsBuilder(G_PHI)
       .legalFor({p0, s16, s32, s64})
@@ -97,6 +109,16 @@
 
   getActionDefinitionsBuilder({G_FREM, G_FPOW}).libcallFor({s32, s64});
 
+  getActionDefinitionsBuilder(G_FCEIL)
+      // If we don't have full FP16 support, then widen s16 to s32 if we
+      // encounter it.
+      .widenScalarIf(
+          [=, &ST](const LegalityQuery &Query) {
+            return Query.Types[0] == s16 && !ST.hasFullFP16();
+          },
+          [=](const LegalityQuery &Query) { return std::make_pair(0, s32); })
+      .legalFor({s16, s32, s64, v2s32, v4s32, v2s64});
+
   getActionDefinitionsBuilder(G_INSERT)
       .unsupportedIf([=](const LegalityQuery &Query) {
         return Query.Types[0].getSizeInBits() <= Query.Types[1].getSizeInBits();
diff --git a/lib/Target/AArch64/AArch64MCInstLower.cpp b/lib/Target/AArch64/AArch64MCInstLower.cpp
index 0729598..d713592 100644
--- a/lib/Target/AArch64/AArch64MCInstLower.cpp
+++ b/lib/Target/AArch64/AArch64MCInstLower.cpp
@@ -189,20 +189,51 @@
 
 MCOperand AArch64MCInstLower::lowerSymbolOperandCOFF(const MachineOperand &MO,
                                                      MCSymbol *Sym) const {
-  AArch64MCExpr::VariantKind RefKind = AArch64MCExpr::VK_NONE;
+  uint32_t RefFlags = 0;
+
   if (MO.getTargetFlags() & AArch64II::MO_TLS) {
     if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_PAGEOFF)
-      RefKind = AArch64MCExpr::VK_SECREL_LO12;
+      RefFlags |= AArch64MCExpr::VK_SECREL_LO12;
     else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) ==
              AArch64II::MO_HI12)
-      RefKind = AArch64MCExpr::VK_SECREL_HI12;
+      RefFlags |= AArch64MCExpr::VK_SECREL_HI12;
+
+  } else if (MO.getTargetFlags() & AArch64II::MO_S) {
+    RefFlags |= AArch64MCExpr::VK_SABS;
+  } else {
+    RefFlags |= AArch64MCExpr::VK_ABS;
   }
+
+  if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G3)
+    RefFlags |= AArch64MCExpr::VK_G3;
+  else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G2)
+    RefFlags |= AArch64MCExpr::VK_G2;
+  else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G1)
+    RefFlags |= AArch64MCExpr::VK_G1;
+  else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G0)
+    RefFlags |= AArch64MCExpr::VK_G0;
+
+  // FIXME: Currently we only set VK_NC for MO_G3/MO_G2/MO_G1/MO_G0. This is
+  // because setting VK_NC for others would mean setting their respective
+  // RefFlags correctly.  We should do this in a separate patch.
+  if (MO.getTargetFlags() & AArch64II::MO_NC) {
+    auto MOFrag = (MO.getTargetFlags() & AArch64II::MO_FRAGMENT);
+    if (MOFrag == AArch64II::MO_G3 || MOFrag == AArch64II::MO_G2 ||
+        MOFrag == AArch64II::MO_G1 || MOFrag == AArch64II::MO_G0)
+      RefFlags |= AArch64MCExpr::VK_NC;
+  }
+
   const MCExpr *Expr =
       MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, Ctx);
   if (!MO.isJTI() && MO.getOffset())
     Expr = MCBinaryExpr::createAdd(
         Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx);
+
+  auto RefKind = static_cast<AArch64MCExpr::VariantKind>(RefFlags);
+  assert(RefKind != AArch64MCExpr::VK_INVALID &&
+         "Invalid relocation requested");
   Expr = AArch64MCExpr::create(Expr, RefKind, Ctx);
+
   return MCOperand::createExpr(Expr);
 }
 
diff --git a/lib/Target/AArch64/AArch64MacroFusion.cpp b/lib/Target/AArch64/AArch64MacroFusion.cpp
index fb8a339..bc596dd 100644
--- a/lib/Target/AArch64/AArch64MacroFusion.cpp
+++ b/lib/Target/AArch64/AArch64MacroFusion.cpp
@@ -270,7 +270,107 @@
   return false;
 }
 
-/// Check if the instr pair, FirstMI and SecondMI, should be fused
+// Arithmetic and logic.
+static bool isArithmeticLogicPair(const MachineInstr *FirstMI,
+                                  const MachineInstr &SecondMI) {
+  if (AArch64InstrInfo::hasShiftedReg(SecondMI))
+    return false;
+
+  switch (SecondMI.getOpcode()) {
+  // Arithmetic
+  case AArch64::ADDWrr:
+  case AArch64::ADDXrr:
+  case AArch64::SUBWrr:
+  case AArch64::SUBXrr:
+  case AArch64::ADDWrs:
+  case AArch64::ADDXrs:
+  case AArch64::SUBWrs:
+  case AArch64::SUBXrs:
+  // Logic
+  case AArch64::ANDWrr:
+  case AArch64::ANDXrr:
+  case AArch64::BICWrr:
+  case AArch64::BICXrr:
+  case AArch64::EONWrr:
+  case AArch64::EONXrr:
+  case AArch64::EORWrr:
+  case AArch64::EORXrr:
+  case AArch64::ORNWrr:
+  case AArch64::ORNXrr:
+  case AArch64::ORRWrr:
+  case AArch64::ORRXrr:
+  case AArch64::ANDWrs:
+  case AArch64::ANDXrs:
+  case AArch64::BICWrs:
+  case AArch64::BICXrs:
+  case AArch64::EONWrs:
+  case AArch64::EONXrs:
+  case AArch64::EORWrs:
+  case AArch64::EORXrs:
+  case AArch64::ORNWrs:
+  case AArch64::ORNXrs:
+  case AArch64::ORRWrs:
+  case AArch64::ORRXrs:
+    // Assume the 1st instr to be a wildcard if it is unspecified.
+    if (FirstMI == nullptr)
+      return true;
+
+    // Arithmetic
+    switch (FirstMI->getOpcode()) {
+    case AArch64::ADDWrr:
+    case AArch64::ADDXrr:
+    case AArch64::ADDSWrr:
+    case AArch64::ADDSXrr:
+    case AArch64::SUBWrr:
+    case AArch64::SUBXrr:
+    case AArch64::SUBSWrr:
+    case AArch64::SUBSXrr:
+      return true;
+    case AArch64::ADDWrs:
+    case AArch64::ADDXrs:
+    case AArch64::ADDSWrs:
+    case AArch64::ADDSXrs:
+    case AArch64::SUBWrs:
+    case AArch64::SUBXrs:
+    case AArch64::SUBSWrs:
+    case AArch64::SUBSXrs:
+      return !AArch64InstrInfo::hasShiftedReg(*FirstMI);
+    }
+    break;
+
+  // Arithmetic, setting flags.
+  case AArch64::ADDSWrr:
+  case AArch64::ADDSXrr:
+  case AArch64::SUBSWrr:
+  case AArch64::SUBSXrr:
+  case AArch64::ADDSWrs:
+  case AArch64::ADDSXrs:
+  case AArch64::SUBSWrs:
+  case AArch64::SUBSXrs:
+    // Assume the 1st instr to be a wildcard if it is unspecified.
+    if (FirstMI == nullptr)
+      return true;
+
+    // Arithmetic, not setting flags.
+    switch (FirstMI->getOpcode()) {
+    case AArch64::ADDWrr:
+    case AArch64::ADDXrr:
+    case AArch64::SUBWrr:
+    case AArch64::SUBXrr:
+      return true;
+    case AArch64::ADDWrs:
+    case AArch64::ADDXrs:
+    case AArch64::SUBWrs:
+    case AArch64::SUBXrs:
+      return !AArch64InstrInfo::hasShiftedReg(*FirstMI);
+    }
+    break;
+  }
+
+  return false;
+}
+
+/// \brief Check if the instr pair, FirstMI and SecondMI, should be fused
 /// together. Given SecondMI, when FirstMI is unspecified, then check if
 /// SecondMI may be part of a fused pair at all.
 static bool shouldScheduleAdjacent(const TargetInstrInfo &TII,
@@ -295,6 +395,8 @@
     return true;
   if (ST.hasFuseCCSelect() && isCCSelectPair(FirstMI, SecondMI))
     return true;
+  if (ST.hasFuseArithmeticLogic() && isArithmeticLogicPair(FirstMI, SecondMI))
+    return true;
 
   return false;
 }
diff --git a/lib/Target/AArch64/AArch64PreLegalizerCombiner.cpp b/lib/Target/AArch64/AArch64PreLegalizerCombiner.cpp
index 5022b62..3da9306 100644
--- a/lib/Target/AArch64/AArch64PreLegalizerCombiner.cpp
+++ b/lib/Target/AArch64/AArch64PreLegalizerCombiner.cpp
@@ -88,7 +88,7 @@
   auto *TPC = &getAnalysis<TargetPassConfig>();
   AArch64PreLegalizerCombinerInfo PCInfo;
   Combiner C(PCInfo, TPC);
-  return C.combineMachineInstrs(MF);
+  return C.combineMachineInstrs(MF, /*CSEInfo*/ nullptr);
 }
 
 char AArch64PreLegalizerCombiner::ID = 0;
diff --git a/lib/Target/AArch64/AArch64RegisterBankInfo.cpp b/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
index c497669..68c48a5 100644
--- a/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
+++ b/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
@@ -389,6 +389,7 @@
   case TargetOpcode::G_FCONSTANT:
   case TargetOpcode::G_FPEXT:
   case TargetOpcode::G_FPTRUNC:
+  case TargetOpcode::G_FCEIL:
     return true;
   }
   return false;
diff --git a/lib/Target/AArch64/AArch64RegisterInfo.cpp b/lib/Target/AArch64/AArch64RegisterInfo.cpp
index 55631bc..96ae45a 100644
--- a/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -203,6 +203,10 @@
   if (hasBasePointer(MF))
     markSuperRegs(Reserved, AArch64::W19);
 
+  // SLH uses register W16/X16 as the taint register.
+  if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
+    markSuperRegs(Reserved, AArch64::W16);
+
   assert(checkAllSuperRegsMarked(Reserved));
   return Reserved;
 }
diff --git a/lib/Target/AArch64/AArch64SchedExynosM1.td b/lib/Target/AArch64/AArch64SchedExynosM1.td
index 62a4650..f757d53 100644
--- a/lib/Target/AArch64/AArch64SchedExynosM1.td
+++ b/lib/Target/AArch64/AArch64SchedExynosM1.td
@@ -78,9 +78,8 @@
 def M1WriteAD : SchedWriteRes<[M1UnitALU,
                                M1UnitC]>   { let Latency = 2;
                                              let NumMicroOps = 2; }
-def M1WriteAX : SchedWriteVariant<[SchedVar<ExynosExtPred, [M1WriteA1]>,
-                                   SchedVar<NoSchedPred,   [M1WriteAA]>]>;
-def M1WriteAY : SchedWriteVariant<[SchedVar<ExynosShiftPred, [M1WriteA1]>,
+def M1WriteAX : SchedWriteVariant<[SchedVar<ExynosArithPred, [M1WriteA1]>,
+                                   SchedVar<ExynosLogicPred, [M1WriteA1]>,
                                    SchedVar<NoSchedPred,     [M1WriteAA]>]>;
 def M1WriteC1 : SchedWriteRes<[M1UnitC]>   { let Latency = 1; }
 def M1WriteC2 : SchedWriteRes<[M1UnitC]>   { let Latency = 2; }
@@ -430,7 +429,7 @@
 
 // Arithmetic and logical integer instructions.
 def : InstRW<[M1WriteAX], (instregex ".+rx(64)?$")>;
-def : InstRW<[M1WriteAY], (instregex ".+rs$")>;
+def : InstRW<[M1WriteAX], (instregex ".+rs$")>;
 
 // Move instructions.
 def : InstRW<[M1WriteCOPY], (instrs COPY)>;
@@ -440,14 +439,14 @@
 // Miscellaneous instructions.
 
 // Load instructions.
-def : InstRW<[M1WriteLC,
-              ReadAdrBase], (instregex "^LDR(BB|HH|SBW|SBX|SHW|SWX|SW|W|X)roW")>;
-def : InstRW<[M1WriteL5,
-              ReadAdrBase], (instregex "^LDR(BB|HH|SBW|SBX|SHW|SWX|SW|W|X)roX")>;
 def : InstRW<[M1WriteLB,
               WriteLDHi,
               WriteAdr],    (instregex "^LDP(SW|W|X)(post|pre)")>;
 def : InstRW<[M1WriteLC,
+              ReadAdrBase], (instregex "^LDR(BB|SBW|SBX|HH|SHW|SHX|SW|W|X)roW")>;
+def : InstRW<[M1WriteL5,
+              ReadAdrBase], (instregex "^LDR(BB|SBW|SBX|HH|SHW|SHX|SW|W|X)roX")>;
+def : InstRW<[M1WriteLC,
               ReadAdrBase], (instrs PRFMroW)>;
 def : InstRW<[M1WriteL5,
               ReadAdrBase], (instrs PRFMroX)>;
diff --git a/lib/Target/AArch64/AArch64SchedExynosM3.td b/lib/Target/AArch64/AArch64SchedExynosM3.td
index 39f448e..1593508 100644
--- a/lib/Target/AArch64/AArch64SchedExynosM3.td
+++ b/lib/Target/AArch64/AArch64SchedExynosM3.td
@@ -108,6 +108,8 @@
 
 def M3WriteZ0 : SchedWriteRes<[]> { let Latency = 0;
                                     let NumMicroOps = 1; }
+def M3WriteZ1 : SchedWriteRes<[]> { let Latency = 1;
+                                    let NumMicroOps = 0; }
 
 def M3WriteA1 : SchedWriteRes<[M3UnitALU]> { let Latency = 1; }
 def M3WriteAA : SchedWriteRes<[M3UnitALU]> { let Latency = 2;
@@ -125,18 +127,18 @@
 def M3WriteC1 : SchedWriteRes<[M3UnitC]>   { let Latency = 1; }
 def M3WriteC2 : SchedWriteRes<[M3UnitC]>   { let Latency = 2; }
 def M3WriteAU : SchedWriteVariant<[SchedVar<IsCopyIdiomPred, [M3WriteZ0]>,
-                                   SchedVar<ExynosShiftPred, [M3WriteA1]>,
+                                   SchedVar<ExynosArithPred, [M3WriteA1]>,
+                                   SchedVar<ExynosLogicPred, [M3WriteA1]>,
                                    SchedVar<NoSchedPred,     [M3WriteAA]>]>;
 def M3WriteAV : SchedWriteVariant<[SchedVar<IsCopyIdiomPred, [M3WriteZ0]>,
                                    SchedVar<NoSchedPred,     [M3WriteAA]>]>;
 def M3WriteAW : SchedWriteVariant<[SchedVar<IsZeroIdiomPred, [M3WriteZ0]>,
                                    SchedVar<NoSchedPred,     [M3WriteAA]>]>;
-def M3WriteAX : SchedWriteVariant<[SchedVar<ExynosExtPred, [M3WriteA1]>,
-                                   SchedVar<NoSchedPred,   [M3WriteAA]>]>;
+def M3WriteAX : SchedWriteVariant<[SchedVar<ExynosArithPred, [M3WriteA1]>,
+                                   SchedVar<ExynosLogicPred, [M3WriteA1]>,
+                                   SchedVar<NoSchedPred,     [M3WriteAA]>]>;
 def M3WriteAY : SchedWriteVariant<[SchedVar<ExynosRotateRightImmPred, [M3WriteA1]>,
                                    SchedVar<NoSchedPred,              [M3WriteAA]>]>;
-def M3WriteAZ : SchedWriteVariant<[SchedVar<ExynosShiftPred, [M3WriteA1]>,
-                                   SchedVar<NoSchedPred,     [M3WriteAA]>]>;
 
 def M3WriteB1 : SchedWriteRes<[M3UnitB]> { let Latency = 1; }
 def M3WriteBX : SchedWriteVariant<[SchedVar<ExynosBranchLinkLRPred, [M3WriteAC]>,
@@ -162,8 +164,8 @@
                                            let NumMicroOps = 2; }
 def M3WriteLH : SchedWriteRes<[]>        { let Latency = 5;
                                            let NumMicroOps = 0; }
-def M3WriteLX : SchedWriteVariant<[SchedVar<ScaledIdxPred, [M3WriteLB]>,
-                                   SchedVar<NoSchedPred,   [M3WriteL5]>]>;
+def M3WriteLX : SchedWriteVariant<[SchedVar<ExynosScaledIdxPred, [M3WriteL5]>,
+                                   SchedVar<NoSchedPred,         [M3WriteL4]>]>;
 
 def M3WriteS1 : SchedWriteRes<[M3UnitS]>   { let Latency = 1; }
 def M3WriteSA : SchedWriteRes<[M3UnitA,
@@ -173,24 +175,22 @@
 def M3WriteSB : SchedWriteRes<[M3UnitA,
                                M3UnitS]>   { let Latency = 2;
                                              let NumMicroOps = 2; }
-def M3WriteSX : SchedWriteVariant<[SchedVar<ScaledIdxPred, [M3WriteSB]>,
-                                   SchedVar<NoSchedPred,   [M3WriteS1]>]>;
 
-def M3ReadAdrBase : SchedReadVariant<[SchedVar<ScaledIdxPred, [ReadDefault]>,
-                                      SchedVar<NoSchedPred,   [ReadDefault]>]>;
+def M3ReadAdrBase : SchedReadVariant<[SchedVar<ExynosScaledIdxPred, [ReadDefault]>,
+                                      SchedVar<NoSchedPred,         [ReadDefault]>]>;
 
 // Branch instructions.
 def : SchedAlias<WriteBr, M3WriteZ0>;
-def : WriteRes<WriteBrReg, [M3UnitC]> { let Latency = 1; }
+def : SchedAlias<WriteBrReg, M3WriteC1>;
 
 // Arithmetic and logical integer instructions.
-def : WriteRes<WriteI,     [M3UnitALU]> { let Latency = 1; }
-def : WriteRes<WriteISReg, [M3UnitALU]> { let Latency = 1; }
-def : WriteRes<WriteIEReg, [M3UnitALU]> { let Latency = 1; }
-def : WriteRes<WriteIS,    [M3UnitALU]> { let Latency = 1; }
+def : SchedAlias<WriteI,     M3WriteA1>;
+def : SchedAlias<WriteISReg, M3WriteA1>;
+def : SchedAlias<WriteIEReg, M3WriteA1>;
+def : SchedAlias<WriteIS,    M3WriteA1>;
 
 // Move instructions.
-def : WriteRes<WriteImm, [M3UnitALU]> { let Latency = 1; }
+def : SchedAlias<WriteImm, M3WriteA1>;
 
 // Divide and multiply instructions.
 def : WriteRes<WriteID32, [M3UnitC,
@@ -207,21 +207,20 @@
 def : SchedAlias<WriteExtr, M3WriteAY>;
 
 // Addressing modes.
-def : WriteRes<WriteAdr, []> { let Latency = 1;
-                               let NumMicroOps = 0; }
+def : SchedAlias<WriteAdr,    M3WriteZ1>;
 def : SchedAlias<ReadAdrBase, M3ReadAdrBase>;
 
 // Load instructions.
 def : SchedAlias<WriteLD, M3WriteL4>;
 def : WriteRes<WriteLDHi, []> { let Latency = 4;
                                 let NumMicroOps = 0; }
-def : SchedAlias<WriteLDIdx, M3WriteLX>;
+def : SchedAlias<WriteLDIdx, M3WriteLB>;
 
 // Store instructions.
 def : SchedAlias<WriteST,    M3WriteS1>;
 def : SchedAlias<WriteSTP,   M3WriteS1>;
 def : SchedAlias<WriteSTX,   M3WriteS1>;
-def : SchedAlias<WriteSTIdx, M3WriteSX>;
+def : SchedAlias<WriteSTIdx, M3WriteSB>;
 
 // FP data instructions.
 def : WriteRes<WriteF,    [M3UnitFADD]>  { let Latency = 2; }
@@ -231,7 +230,6 @@
 def : WriteRes<WriteFMul, [M3UnitFMAC]>  { let Latency = 4; }
 
 // FP miscellaneous instructions.
-// TODO: Conversion between register files is much different.
 def : WriteRes<WriteFCvt,  [M3UnitFCVT]> { let Latency = 3; }
 def : WriteRes<WriteFImm,  [M3UnitNALU]> { let Latency = 1; }
 def : WriteRes<WriteFCopy, [M3UnitNALU]> { let Latency = 1; }
@@ -486,10 +484,10 @@
 def : InstRW<[M3WriteAD], (instregex "^TBN?Z[WX]")>;
 
 // Arithmetic and logical integer instructions.
-def : InstRW<[M3WriteAZ], (instregex "^(ADD|AND|BIC|EON|EOR|ORN|SUB)[WX]rs$")>;
+def : InstRW<[M3WriteAX], (instregex "^(ADD|AND|BIC|EON|EOR|ORN|SUB)[WX]rs$")>;
 def : InstRW<[M3WriteAU], (instrs ORRWrs, ORRXrs)>;
 def : InstRW<[M3WriteAX], (instregex "^(ADD|SUB)S?[WX]rx(64)?$")>;
-def : InstRW<[M3WriteAZ], (instregex "^(ADD|AND|BIC|SUB)S[WX]rs$")>;
+def : InstRW<[M3WriteAX], (instregex "^(ADD|AND|BIC|SUB)S[WX]rs$")>;
 def : InstRW<[M3WriteAV], (instrs ADDWri, ADDXri)>;
 def : InstRW<[M3WriteAW], (instrs ORRWri, ORRXri)>;
 
@@ -503,16 +501,16 @@
 // Miscellaneous instructions.
 
 // Load instructions.
-def : InstRW<[M3WriteLB,
-              ReadAdrBase], (instregex "^LDR(BB|HH|SBW|SBX|SHW|SWX|SW|W|X)roW")>;
-def : InstRW<[M3WriteL5,
-              ReadAdrBase], (instregex "^LDR(BB|HH|SBW|SBX|SHW|SWX|SW|W|X)roX")>;
 def : InstRW<[M3WriteLD,
               WriteLDHi,
               WriteAdr],    (instregex "^LDP(SW|W|X)(post|pre)")>;
 def : InstRW<[M3WriteLB,
+              ReadAdrBase], (instregex "^LDR(BB|SBW|SBX|HH|SHW|SHX|SW|W|X)roW")>;
+def : InstRW<[M3WriteLX,
+              ReadAdrBase], (instregex "^LDR(BB|SBW|SBX|HH|SHW|SHX|SW|W|X)roX")>;
+def : InstRW<[M3WriteLB,
               ReadAdrBase], (instrs PRFMroW)>;
-def : InstRW<[M3WriteL5,
+def : InstRW<[M3WriteLX,
               ReadAdrBase], (instrs PRFMroX)>;
 
 // Store instructions.
@@ -672,108 +670,108 @@
 // ASIMD load instructions.
 def : InstRW<[M3WriteL5],   (instregex "LD1Onev(8b|4h|2s|1d)$")>;
 def : InstRW<[M3WriteL5,
-              WriteAdr],    (instregex "LD1Onev(8b|4h|2s|1d)_POST")>;
+              M3WriteA1],   (instregex "LD1Onev(8b|4h|2s|1d)_POST")>;
 def : InstRW<[M3WriteL5],   (instregex "LD1Onev(16b|8h|4s|2d)$")>;
 def : InstRW<[M3WriteL5,
-              WriteAdr],    (instregex "LD1Onev(16b|8h|4s|2d)_POST")>;
+              M3WriteA1],   (instregex "LD1Onev(16b|8h|4s|2d)_POST")>;
 
 def : InstRW<[M3WriteVLDA], (instregex "LD1Twov(8b|4h|2s|1d)$")>;
 def : InstRW<[M3WriteVLDA,
-              WriteAdr],    (instregex "LD1Twov(8b|4h|2s|1d)_POST")>;
+              M3WriteA1],   (instregex "LD1Twov(8b|4h|2s|1d)_POST")>;
 def : InstRW<[M3WriteVLDA], (instregex "LD1Twov(16b|8h|4s|2d)$")>;
 def : InstRW<[M3WriteVLDA,
-              WriteAdr],    (instregex "LD1Twov(16b|8h|4s|2d)_POST")>;
+              M3WriteA1],   (instregex "LD1Twov(16b|8h|4s|2d)_POST")>;
 
 def : InstRW<[M3WriteVLDB], (instregex "LD1Threev(8b|4h|2s|1d)$")>;
 def : InstRW<[M3WriteVLDB,
-              WriteAdr],    (instregex "LD1Threev(8b|4h|2s|1d)_POST")>;
+              M3WriteA1],   (instregex "LD1Threev(8b|4h|2s|1d)_POST")>;
 def : InstRW<[M3WriteVLDB], (instregex "LD1Threev(16b|8h|4s|2d)$")>;
 def : InstRW<[M3WriteVLDB,
-              WriteAdr],    (instregex "LD1Threev(16b|8h|4s|2d)_POST")>;
+              M3WriteA1],   (instregex "LD1Threev(16b|8h|4s|2d)_POST")>;
 
 def : InstRW<[M3WriteVLDC], (instregex "LD1Fourv(8b|4h|2s|1d)$")>;
 def : InstRW<[M3WriteVLDC,
-              WriteAdr],    (instregex "LD1Fourv(8b|4h|2s|1d)_POST")>;
+              M3WriteA1],   (instregex "LD1Fourv(8b|4h|2s|1d)_POST")>;
 def : InstRW<[M3WriteVLDC], (instregex "LD1Fourv(16b|8h|4s|2d)$")>;
 def : InstRW<[M3WriteVLDC,
-              WriteAdr],    (instregex "LD1Fourv(16b|8h|4s|2d)_POST")>;
+              M3WriteA1],   (instregex "LD1Fourv(16b|8h|4s|2d)_POST")>;
 
 def : InstRW<[M3WriteVLDD], (instregex "LD1i(8|16|32)$")>;
 def : InstRW<[M3WriteVLDD,
-              WriteAdr],    (instregex "LD1i(8|16|32)_POST")>;
+              M3WriteA1],   (instregex "LD1i(8|16|32)_POST")>;
 def : InstRW<[M3WriteVLDE], (instregex "LD1i(64)$")>;
 def : InstRW<[M3WriteVLDE,
-              WriteAdr],    (instregex "LD1i(64)_POST")>;
+              M3WriteA1],   (instregex "LD1i(64)_POST")>;
 
 def : InstRW<[M3WriteL5],   (instregex "LD1Rv(8b|4h|2s|1d)$")>;
 def : InstRW<[M3WriteL5,
-              WriteAdr],    (instregex "LD1Rv(8b|4h|2s|1d)_POST")>;
+              M3WriteA1],   (instregex "LD1Rv(8b|4h|2s|1d)_POST")>;
 def : InstRW<[M3WriteL5],   (instregex "LD1Rv(16b|8h|4s|2d)$")>;
 def : InstRW<[M3WriteL5,
-              WriteAdr],    (instregex "LD1Rv(16b|8h|4s|2d)_POST")>;
+              M3WriteA1],   (instregex "LD1Rv(16b|8h|4s|2d)_POST")>;
 
 def : InstRW<[M3WriteVLDF], (instregex "LD2Twov(8b|4h|2s)$")>;
 def : InstRW<[M3WriteVLDF,
-              WriteAdr],    (instregex "LD2Twov(8b|4h|2s)_POST")>;
+              M3WriteA1],   (instregex "LD2Twov(8b|4h|2s)_POST")>;
 def : InstRW<[M3WriteVLDF], (instregex "LD2Twov(16b|8h|4s|2d)$")>;
 def : InstRW<[M3WriteVLDF,
-              WriteAdr],    (instregex "LD2Twov(16b|8h|4s|2d)_POST")>;
+              M3WriteA1],   (instregex "LD2Twov(16b|8h|4s|2d)_POST")>;
 
 def : InstRW<[M3WriteVLDG], (instregex "LD2i(8|16|32)$")>;
 def : InstRW<[M3WriteVLDG,
-              WriteAdr],    (instregex "LD2i(8|16|32)_POST")>;
+              M3WriteA1],   (instregex "LD2i(8|16|32)_POST")>;
 def : InstRW<[M3WriteVLDH], (instregex "LD2i(64)$")>;
 def : InstRW<[M3WriteVLDH,
-              WriteAdr],    (instregex "LD2i(64)_POST")>;
+              M3WriteA1],   (instregex "LD2i(64)_POST")>;
 
 def : InstRW<[M3WriteVLDA], (instregex "LD2Rv(8b|4h|2s|1d)$")>;
 def : InstRW<[M3WriteVLDA,
-              WriteAdr],    (instregex "LD2Rv(8b|4h|2s|1d)_POST")>;
+              M3WriteA1],   (instregex "LD2Rv(8b|4h|2s|1d)_POST")>;
 def : InstRW<[M3WriteVLDA], (instregex "LD2Rv(16b|8h|4s|2d)$")>;
 def : InstRW<[M3WriteVLDA,
-              WriteAdr],    (instregex "LD2Rv(16b|8h|4s|2d)_POST")>;
+              M3WriteA1],   (instregex "LD2Rv(16b|8h|4s|2d)_POST")>;
 
 def : InstRW<[M3WriteVLDI], (instregex "LD3Threev(8b|4h|2s)$")>;
 def : InstRW<[M3WriteVLDI,
-              WriteAdr],    (instregex "LD3Threev(8b|4h|2s)_POST")>;
+              M3WriteA1],   (instregex "LD3Threev(8b|4h|2s)_POST")>;
 def : InstRW<[M3WriteVLDI], (instregex "LD3Threev(16b|8h|4s|2d)$")>;
 def : InstRW<[M3WriteVLDI,
-              WriteAdr],    (instregex "LD3Threev(16b|8h|4s|2d)_POST")>;
+              M3WriteA1],   (instregex "LD3Threev(16b|8h|4s|2d)_POST")>;
 
 def : InstRW<[M3WriteVLDJ], (instregex "LD3i(8|16|32)$")>;
 def : InstRW<[M3WriteVLDJ,
-              WriteAdr],    (instregex "LD3i(8|16|32)_POST")>;
+              M3WriteA1],   (instregex "LD3i(8|16|32)_POST")>;
 def : InstRW<[M3WriteVLDL], (instregex "LD3i(64)$")>;
 def : InstRW<[M3WriteVLDL,
-              WriteAdr],    (instregex "LD3i(64)_POST")>;
+              M3WriteA1],   (instregex "LD3i(64)_POST")>;
 
 def : InstRW<[M3WriteVLDB], (instregex "LD3Rv(8b|4h|2s|1d)$")>;
 def : InstRW<[M3WriteVLDB,
-              WriteAdr],    (instregex "LD3Rv(8b|4h|2s|1d)_POST")>;
+              M3WriteA1],   (instregex "LD3Rv(8b|4h|2s|1d)_POST")>;
 def : InstRW<[M3WriteVLDB], (instregex "LD3Rv(16b|8h|4s|2d)$")>;
 def : InstRW<[M3WriteVLDB,
-              WriteAdr],    (instregex "LD3Rv(16b|8h|4s|2d)_POST")>;
+              M3WriteA1],   (instregex "LD3Rv(16b|8h|4s|2d)_POST")>;
 
 def : InstRW<[M3WriteVLDN], (instregex "LD4Fourv(8b|4h|2s)$")>;
 def : InstRW<[M3WriteVLDN,
-              WriteAdr],    (instregex "LD4Fourv(8b|4h|2s)_POST")>;
+              M3WriteA1],   (instregex "LD4Fourv(8b|4h|2s)_POST")>;
 def : InstRW<[M3WriteVLDN], (instregex "LD4Fourv(16b|8h|4s|2d)$")>;
 def : InstRW<[M3WriteVLDN,
-              WriteAdr],    (instregex "LD4Fourv(16b|8h|4s|2d)_POST")>;
+              M3WriteA1],   (instregex "LD4Fourv(16b|8h|4s|2d)_POST")>;
 
 def : InstRW<[M3WriteVLDK], (instregex "LD4i(8|16|32)$")>;
 def : InstRW<[M3WriteVLDK,
-              WriteAdr],    (instregex "LD4i(8|16|32)_POST")>;
+              M3WriteA1],   (instregex "LD4i(8|16|32)_POST")>;
 def : InstRW<[M3WriteVLDM], (instregex "LD4i(64)$")>;
 def : InstRW<[M3WriteVLDM,
-              WriteAdr],    (instregex "LD4i(64)_POST")>;
+              M3WriteA1],   (instregex "LD4i(64)_POST")>;
 
 def : InstRW<[M3WriteVLDC], (instregex "LD4Rv(8b|4h|2s|1d)$")>;
 def : InstRW<[M3WriteVLDC,
-              WriteAdr],    (instregex "LD4Rv(8b|4h|2s|1d)_POST")>;
+              M3WriteA1],   (instregex "LD4Rv(8b|4h|2s|1d)_POST")>;
 def : InstRW<[M3WriteVLDC], (instregex "LD4Rv(16b|8h|4s|2d)$")>;
 def : InstRW<[M3WriteVLDC,
-              WriteAdr],    (instregex "LD4Rv(16b|8h|4s|2d)_POST")>;
+              M3WriteA1],   (instregex "LD4Rv(16b|8h|4s|2d)_POST")>;
 
 // ASIMD store instructions.
 def : InstRW<[WriteVST],    (instregex "ST1Onev(8b|4h|2s|1d)$")>;
diff --git a/lib/Target/AArch64/AArch64SchedExynosM4.td b/lib/Target/AArch64/AArch64SchedExynosM4.td
new file mode 100644
index 0000000..4d89246
--- /dev/null
+++ b/lib/Target/AArch64/AArch64SchedExynosM4.td
@@ -0,0 +1,1004 @@
+//=- AArch64SchedExynosM4.td - Samsung Exynos M4 Sched Defs --*- tablegen -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the machine model for the Samsung Exynos M4 to support
+// instruction scheduling and other instruction cost heuristics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// The Exynos-M4 is an advanced superscalar microprocessor with a 6-wide
+// in-order stage for decode and dispatch and a wider issue stage.
+// The execution units and loads and stores are out-of-order.
+
+def ExynosM4Model : SchedMachineModel {
+  let IssueWidth            =   6; // Up to 6 uops per cycle.
+  let MicroOpBufferSize     = 228; // ROB size.
+  let LoopMicroOpBufferSize =  48; // Based on the instruction queue size.
+  let LoadLatency           =   4; // Optimistic load cases.
+  let MispredictPenalty     =  16; // Minimum branch misprediction penalty.
+  let CompleteModel         =   1; // Use the default model otherwise.
+
+  list<Predicate> UnsupportedFeatures = [HasSVE];
+}
+
+//===----------------------------------------------------------------------===//
+// Define each kind of processor resource and number available on the Exynos-M4.
+
+let SchedModel = ExynosM4Model in {
+
+def M4UnitA  : ProcResource<2>; // Simple integer
+def M4UnitC  : ProcResource<2>; // Simple and complex integer
+let Super =  M4UnitC, BufferSize = 1 in
+def M4UnitD  : ProcResource<1>; // Integer division (inside C0, serialized)
+let Super =  M4UnitC in
+def M4UnitE  : ProcResource<1>; // CRC (inside C0)
+def M4UnitB  : ProcResource<2>; // Branch
+def M4UnitL0 : ProcResource<1>; // Load
+def M4UnitS0 : ProcResource<1>; // Store
+def M4PipeLS : ProcResource<1>; // Load/Store
+let Super = M4PipeLS in {
+  def M4UnitL1 : ProcResource<1>;
+  def M4UnitS1 : ProcResource<1>;
+}
+def M4PipeF0 : ProcResource<1>; // FP #0
+let Super = M4PipeF0 in {
+  def M4UnitFMAC0 : ProcResource<1>; // FP multiplication
+  def M4UnitFADD0 : ProcResource<1>; // Simple FP
+  def M4UnitFCVT0 : ProcResource<1>; // FP conversion
+  def M4UnitNALU0 : ProcResource<1>; // Simple vector
+  def M4UnitNHAD  : ProcResource<1>; // Horizontal vector
+  def M4UnitNMSC  : ProcResource<1>; // FP and vector miscellanea
+  def M4UnitNMUL0 : ProcResource<1>; // Vector multiplication
+  def M4UnitNSHT0 : ProcResource<1>; // Vector shifting
+  def M4UnitNSHF0 : ProcResource<1>; // Vector shuffling
+  def M4UnitNCRY0 : ProcResource<1>; // Cryptographic
+}
+def M4PipeF1 : ProcResource<1>; // FP #1
+let Super = M4PipeF1 in {
+  def M4UnitFMAC1 : ProcResource<1>; // FP multiplication
+  def M4UnitFADD1 : ProcResource<1>; // Simple FP
+  def M4UnitFDIV0 : ProcResource<2>; // FP division (serialized)
+  def M4UnitFSQR0 : ProcResource<2>; // FP square root (serialized)
+  def M4UnitFST0  : ProcResource<1>; // FP store
+  def M4UnitNALU1 : ProcResource<1>; // Simple vector
+  def M4UnitNSHT1 : ProcResource<1>; // Vector shifting
+  def M4UnitNSHF1 : ProcResource<1>; // Vector shuffling
+}
+def M4PipeF2 : ProcResource<1>; // FP #2
+let Super = M4PipeF2 in {
+  def M4UnitFMAC2 : ProcResource<1>; // FP multiplication
+  def M4UnitFADD2 : ProcResource<1>; // Simple FP
+  def M4UnitFCVT1 : ProcResource<1>; // FP conversion
+  def M4UnitFDIV1 : ProcResource<2>; // FP division (serialized)
+  def M4UnitFSQR1 : ProcResource<2>; // FP square root (serialized)
+  def M4UnitFST1  : ProcResource<1>; // FP store
+  def M4UnitNALU2 : ProcResource<1>; // Simple vector
+  def M4UnitNMUL1 : ProcResource<1>; // Vector multiplication
+  def M4UnitNSHT2 : ProcResource<1>; // Vector shifting
+  def M4UnitNCRY1 : ProcResource<1>; // Cryptographic
+}
+
+def M4UnitALU   : ProcResGroup<[M4UnitA,
+                                M4UnitC]>;
+def M4UnitL     : ProcResGroup<[M4UnitL0,
+                                M4UnitL1]>;
+def M4UnitS     : ProcResGroup<[M4UnitS0,
+                                M4UnitS1]>;
+def M4UnitFMAC  : ProcResGroup<[M4UnitFMAC0,
+                                M4UnitFMAC1,
+                                M4UnitFMAC2]>;
+def M4UnitFMACH : ProcResGroup<[M4UnitFMAC0,
+                                M4UnitFMAC1]>;
+def M4UnitFADD  : ProcResGroup<[M4UnitFADD0,
+                                M4UnitFADD1,
+                                M4UnitFADD2]>;
+def M4UnitFADDH : ProcResGroup<[M4UnitFADD0,
+                                M4UnitFADD1]>;
+def M4UnitFCVT  : ProcResGroup<[M4UnitFCVT0,
+                                M4UnitFCVT1]>;
+def M4UnitFCVTH : ProcResGroup<[M4UnitFCVT0]>;
+def M4UnitFDIV  : ProcResGroup<[M4UnitFDIV0,
+                                M4UnitFDIV1]>;
+def M4UnitFDIVH : ProcResGroup<[M4UnitFDIV0]>;
+def M4UnitFSQR  : ProcResGroup<[M4UnitFSQR0,
+                                M4UnitFSQR1]>;
+def M4UnitFSQRH : ProcResGroup<[M4UnitFSQR0]>;
+def M4UnitFST   : ProcResGroup<[M4UnitFST0,
+                                M4UnitFST1]>;
+def M4UnitNALU  : ProcResGroup<[M4UnitNALU0,
+                                M4UnitNALU1,
+                                M4UnitNALU2]>;
+def M4UnitNALUH : ProcResGroup<[M4UnitNALU0,
+                                M4UnitNALU1]>;
+def M4UnitNMUL  : ProcResGroup<[M4UnitNMUL0,
+                                M4UnitNMUL1]>;
+def M4UnitNSHT  : ProcResGroup<[M4UnitNSHT0,
+                                M4UnitNSHT1,
+                                M4UnitNSHT2]>;
+def M4UnitNSHF  : ProcResGroup<[M4UnitNSHF0,
+                                M4UnitNSHF1]>;
+def M4UnitNSHFH : ProcResGroup<[M4UnitNSHF0]>;
+def M4UnitNCRY  : ProcResGroup<[M4UnitNCRY0,
+                                M4UnitNCRY1]>;
+
+//===----------------------------------------------------------------------===//
+// Resources details.
+
+def M4WriteZ0 : SchedWriteRes<[]> { let Latency = 0; }
+def M4WriteZ1 : SchedWriteRes<[]> { let Latency = 1;
+                                    let NumMicroOps = 0; }
+def M4WriteZ4 : SchedWriteRes<[]> { let Latency = 4;
+                                    let NumMicroOps = 0; }
+
+def M4WriteA1 : SchedWriteRes<[M4UnitALU]> { let Latency = 1; }
+def M4WriteA2 : SchedWriteRes<[M4UnitALU]> { let Latency = 2; }
+def M4WriteAA : SchedWriteRes<[M4UnitALU]> { let Latency = 2;
+                                             let ResourceCycles = [2]; }
+def M4WriteAB : SchedWriteRes<[M4UnitALU,
+                               M4UnitC]>   { let Latency = 2;
+                                             let NumMicroOps = 2; }
+def M4WriteAC : SchedWriteRes<[M4UnitALU,
+                               M4UnitALU,
+                               M4UnitC]>   { let Latency = 3;
+                                             let NumMicroOps = 3; }
+def M4WriteAD : SchedWriteRes<[M4UnitALU,
+                               M4UnitC]>   { let Latency = 2;
+                                             let NumMicroOps = 2; }
+def M4WriteAF : SchedWriteRes<[M4UnitALU]> { let Latency = 2;
+                                             let NumMicroOps = 2; }
+def M4WriteAU : SchedWriteVariant<[SchedVar<IsCopyIdiomPred,   [M4WriteZ0]>,
+                                   SchedVar<ExynosArithPred,   [M4WriteA1]>,
+                                   SchedVar<ExynosLogicExPred, [M4WriteA1]>,
+                                   SchedVar<NoSchedPred,       [M4WriteAA]>]>;
+def M4WriteAV : SchedWriteVariant<[SchedVar<ExynosResetPred, [M4WriteZ0]>,
+                                   SchedVar<NoSchedPred,     [M4WriteAA]>]>;
+def M4WriteAX : SchedWriteVariant<[SchedVar<ExynosArithPred,   [M4WriteA1]>,
+                                   SchedVar<ExynosLogicExPred, [M4WriteA1]>,
+                                   SchedVar<NoSchedPred,       [M4WriteAA]>]>;
+def M4WriteAY : SchedWriteVariant<[SchedVar<ExynosRotateRightImmPred, [M4WriteA1]>,
+                                   SchedVar<NoSchedPred,              [M4WriteAF]>]>;
+
+def M4WriteB1 : SchedWriteRes<[M4UnitB]> { let Latency = 1; }
+def M4WriteBX : SchedWriteVariant<[SchedVar<ExynosBranchLinkLRPred, [M4WriteAC]>,
+                                   SchedVar<NoSchedPred,            [M4WriteAB]>]>;
+
+def M4WriteC1 : SchedWriteRes<[M4UnitC]> { let Latency = 1; }
+def M4WriteC3 : SchedWriteRes<[M4UnitC]> { let Latency = 3; }
+def M4WriteCA : SchedWriteRes<[M4UnitC]> { let Latency = 4;
+                                           let ResourceCycles = [2]; }
+
+def M4WriteD12 : SchedWriteRes<[M4UnitD]> { let Latency = 12; }
+def M4WriteD21 : SchedWriteRes<[M4UnitD]> { let Latency = 21; }
+
+def M4WriteE2 : SchedWriteRes<[M4UnitE]> { let Latency = 2; }
+
+def M4WriteL4 : SchedWriteRes<[M4UnitL]> { let Latency = 4; }
+def M4WriteL5 : SchedWriteRes<[M4UnitL]> { let Latency = 5; }
+def M4WriteLA : SchedWriteRes<[M4UnitL,
+                               M4UnitL]> { let Latency = 5;
+                                           let NumMicroOps = 1; }
+def M4WriteLB : SchedWriteRes<[M4UnitA,
+                               M4UnitL]> { let Latency = 5;
+                                           let NumMicroOps = 2; }
+def M4WriteLC : SchedWriteRes<[M4UnitA,
+                               M4UnitL,
+                               M4UnitL]> { let Latency = 5;
+                                           let NumMicroOps = 2; }
+def M4WriteLD : SchedWriteRes<[M4UnitA,
+                               M4UnitL]> { let Latency = 4;
+                                           let NumMicroOps = 2; }
+def M4WriteLE : SchedWriteRes<[M4UnitA,
+                               M4UnitL]> { let Latency = 6;
+                                           let NumMicroOps = 2; }
+def M4WriteLH : SchedWriteRes<[]>        { let Latency = 5;
+                                           let NumMicroOps = 0; }
+def M4WriteLX : SchedWriteVariant<[SchedVar<ScaledIdxPred, [M4WriteL5]>,
+                                   SchedVar<NoSchedPred,   [M4WriteL4]>]>;
+
+def M4WriteS1 : SchedWriteRes<[M4UnitS]>  { let Latency = 1; }
+def M4WriteSA : SchedWriteRes<[M4UnitS0]> { let Latency = 3; }
+def M4WriteSB : SchedWriteRes<[M4UnitA,
+                               M4UnitS]>  { let Latency = 2;
+                                            let NumMicroOps = 1; }
+def M4WriteSX : SchedWriteVariant<[SchedVar<ExynosScaledIdxPred, [M4WriteSB]>,
+                                   SchedVar<NoSchedPred,         [M4WriteS1]>]>;
+
+def M4ReadAdrBase : SchedReadVariant<[SchedVar<
+                                        MCSchedPredicate<
+                                          CheckAny<
+                                            [ScaledIdxFn,
+                                             ExynosScaledIdxFn]>>, [ReadDefault]>,
+                                      SchedVar<NoSchedPred,        [ReadDefault]>]>;
+
+def M4WriteNEONA   : SchedWriteRes<[M4UnitNSHF,
+                                    M4UnitFADD]>  { let Latency = 3;
+                                                    let NumMicroOps = 2; }
+def M4WriteNEONB   : SchedWriteRes<[M4UnitNALU,
+                                    M4UnitS0]>    { let Latency = 5;
+                                                    let NumMicroOps = 2; }
+def M4WriteNEOND   : SchedWriteRes<[M4UnitNSHF,
+                                    M4UnitFST]>   { let Latency = 6;
+                                                    let NumMicroOps = 2; }
+def M4WriteNEONH   : SchedWriteRes<[M4UnitNALU,
+                                    M4UnitS0]>    { let Latency = 5;
+                                                    let NumMicroOps = 2; }
+def M4WriteNEONI   : SchedWriteRes<[M4UnitNSHF,
+                                    M4UnitS0]>    { let Latency = 2;
+                                                    let NumMicroOps = 2; }
+def M4WriteNEONJ   : SchedWriteRes<[M4UnitNMSC,
+                                    M4UnitS0]>    { let Latency = 4; }
+def M4WriteNEONK   : SchedWriteRes<[M4UnitNSHF,
+                                    M4UnitNMSC,
+                                    M4UnitS0]>    { let Latency = 5;
+                                                    let NumMicroOps = 2; }
+def M4WriteNEONL   : SchedWriteRes<[M4UnitNMUL]>  { let Latency = 3; }
+def M4WriteNEONM   : SchedWriteRes<[M4UnitNMUL]>  { let Latency = 3; }
+def M4WriteNEONN   : SchedWriteRes<[M4UnitNMSC,
+                                    M4UnitNMSC]>  { let Latency = 5;
+                                                    let NumMicroOps = 2; }
+def M4WriteNEONO   : SchedWriteRes<[M4UnitNMSC,
+                                    M4UnitNMSC,
+                                    M4UnitNMSC]>  { let Latency = 8;
+                                                    let NumMicroOps = 3; }
+def M4WriteNEONP   : SchedWriteRes<[M4UnitNSHF,
+                                    M4UnitNMSC]>  { let Latency = 4;
+                                                    let NumMicroOps = 2; }
+def M4WriteNEONQ   : SchedWriteRes<[M4UnitNMSC,
+                                    M4UnitC]>     { let Latency = 3;
+                                                    let NumMicroOps = 1; }
+def M4WriteNEONR   : SchedWriteRes<[M4UnitFCVT0,
+                                    M4UnitS0]>    { let Latency = 4;
+                                                    let NumMicroOps = 1; }
+def M4WriteNEONV   : SchedWriteRes<[M4UnitFDIV,
+                                    M4UnitFDIV]>  { let Latency = 7;
+                                                    let ResourceCycles = [6, 6]; }
+def M4WriteNEONVH  : SchedWriteRes<[M4UnitFDIVH,
+                                    M4UnitFDIVH]> { let Latency = 7;
+                                                    let ResourceCycles = [6, 6]; }
+def M4WriteNEONW   : SchedWriteRes<[M4UnitFDIV,
+                                    M4UnitFDIV]>  { let Latency = 12;
+                                                    let ResourceCycles = [9, 9]; }
+def M4WriteNEONX   : SchedWriteRes<[M4UnitFSQR,
+                                    M4UnitFSQR]>  { let Latency = 8;
+                                                    let ResourceCycles = [7, 7]; }
+def M4WriteNEONXH  : SchedWriteRes<[M4UnitFSQRH,
+                                    M4UnitFSQRH]> { let Latency = 7;
+                                                    let ResourceCycles = [6, 6]; }
+def M4WriteNEONY   : SchedWriteRes<[M4UnitFSQR,
+                                    M4UnitFSQR]>  { let Latency = 12;
+                                                    let ResourceCycles = [9, 9]; }
+def M4WriteNEONZ   : SchedWriteVariant<[SchedVar<ExynosQFormPred, [M4WriteNEONO]>,
+                                        SchedVar<NoSchedPred,     [M4WriteNEONN]>]>;
+
+def M4WriteFADD2   : SchedWriteRes<[M4UnitFADD]>  { let Latency = 2; }
+def M4WriteFADD2H  : SchedWriteRes<[M4UnitFADDH]> { let Latency = 2; }
+
+def M4WriteFCVT2   : SchedWriteRes<[M4UnitFCVT]>  { let Latency = 2; }
+def M4WriteFCVT2A  : SchedWriteRes<[M4UnitFCVT0]> { let Latency = 2; }
+def M4WriteFCVT2H  : SchedWriteRes<[M4UnitFCVTH]> { let Latency = 2; }
+def M4WriteFCVT3   : SchedWriteRes<[M4UnitFCVT]>  { let Latency = 3; }
+def M4WriteFCVT3A  : SchedWriteRes<[M4UnitFCVT0]> { let Latency = 3; }
+def M4WriteFCVT3H  : SchedWriteRes<[M4UnitFCVTH]> { let Latency = 3; }
+def M4WriteFCVT4   : SchedWriteRes<[M4UnitFCVT]>  { let Latency = 4; }
+def M4WriteFCVT4A  : SchedWriteRes<[M4UnitFCVT0]> { let Latency = 4; }
+def M4WriteFCVT6A  : SchedWriteRes<[M4UnitFCVT0]> { let Latency = 6; }
+
+def M4WriteFDIV7   : SchedWriteRes<[M4UnitFDIV]>  { let Latency = 7;
+                                                    let ResourceCycles = [6]; }
+def M4WriteFDIV7H  : SchedWriteRes<[M4UnitFDIVH]> { let Latency = 7;
+                                                    let ResourceCycles = [6]; }
+def M4WriteFDIV12  : SchedWriteRes<[M4UnitFDIV]>  { let Latency = 12;
+                                                    let ResourceCycles = [9]; }
+
+def M4WriteFMAC2H  : SchedWriteRes<[M4UnitFMACH]> { let Latency = 2; }
+def M4WriteFMAC3H  : SchedWriteRes<[M4UnitFMACH]> { let Latency = 3; }
+def M4WriteFMAC3   : SchedWriteRes<[M4UnitFMAC]>  { let Latency = 3; }
+def M4WriteFMAC4   : SchedWriteRes<[M4UnitFMAC]>  { let Latency = 4; }
+def M4WriteFMAC4H  : SchedWriteRes<[M4UnitFMACH]> { let Latency = 4; }
+def M4WriteFMAC5   : SchedWriteRes<[M4UnitFMAC]>  { let Latency = 5; }
+
+def M4WriteFSQR7H  : SchedWriteRes<[M4UnitFSQRH]> { let Latency = 7;
+                                                    let ResourceCycles = [6]; }
+def M4WriteFSQR8   : SchedWriteRes<[M4UnitFSQR]>  { let Latency = 8;
+                                                    let ResourceCycles = [7]; }
+def M4WriteFSQR12  : SchedWriteRes<[M4UnitFSQR]>  { let Latency = 12;
+                                                    let ResourceCycles = [9]; }
+
+def M4WriteNALU1   : SchedWriteRes<[M4UnitNALU]>  { let Latency = 1; }
+def M4WriteNALU1H  : SchedWriteRes<[M4UnitNALUH]> { let Latency = 1; }
+
+def M4WriteNCRY1   : SchedWriteRes<[M4UnitNCRY]>  { let Latency = 1; }
+def M4WriteNCRY1A  : SchedWriteRes<[M4UnitNCRY0]> { let Latency = 1; }
+def M4WriteNCRY3A  : SchedWriteRes<[M4UnitNCRY0]> { let Latency = 3; }
+def M4WriteNCRY5A  : SchedWriteRes<[M4UnitNCRY]>  { let Latency = 5; }
+
+def M4WriteNHAD1   : SchedWriteRes<[M4UnitNHAD]>  { let Latency = 1; }
+def M4WriteNHAD3   : SchedWriteRes<[M4UnitNHAD]>  { let Latency = 3; }
+
+def M4WriteNMSC1   : SchedWriteRes<[M4UnitNMSC]>  { let Latency = 1; }
+def M4WriteNMSC2   : SchedWriteRes<[M4UnitNMSC]>  { let Latency = 2; }
+def M4WriteNMSC3   : SchedWriteRes<[M4UnitNMSC]>  { let Latency = 3; }
+
+def M4WriteNMUL3   : SchedWriteRes<[M4UnitNMUL]>  { let Latency = 3; }
+
+def M4WriteNSHF1   : SchedWriteRes<[M4UnitNSHF]>  { let Latency = 1; }
+def M4WriteNSHF1H  : SchedWriteRes<[M4UnitNSHFH]> { let Latency = 1; }
+def M4WriteNSHF3   : SchedWriteRes<[M4UnitNSHF]>  { let Latency = 3; }
+def M4WriteNSHFA   : SchedWriteRes<[M4UnitNSHF]>  { let Latency = 1;
+                                                    let ResourceCycles = [2]; }
+def M4WriteNSHFB   : SchedWriteRes<[M4UnitNSHF]>  { let Latency = 2;
+                                                    let NumMicroOps = 2;
+                                                    let ResourceCycles = [2]; }
+def M4WriteNSHFC   : SchedWriteRes<[M4UnitNSHF]>  { let Latency = 3;
+                                                    let NumMicroOps = 3;
+                                                    let ResourceCycles = [4]; }
+def M4WriteNSHFD   : SchedWriteRes<[M4UnitNSHF]>  { let Latency = 4;
+                                                    let NumMicroOps = 4;
+                                                    let ResourceCycles = [4]; }
+
+def M4WriteNSHT1   : SchedWriteRes<[M4UnitNSHT]>  { let Latency = 1; }
+def M4WriteNSHT2   : SchedWriteRes<[M4UnitNSHT]>  { let Latency = 2; }
+def M4WriteNSHT3   : SchedWriteRes<[M4UnitNSHT]>  { let Latency = 3; }
+def M4WriteNSHT4A  : SchedWriteRes<[M4UnitNSHT1]> { let Latency = 4; }
+
+def M4WriteVLDA    : SchedWriteRes<[M4UnitL,
+                                    M4UnitL]>     { let Latency = 5;
+                                                    let NumMicroOps = 2; }
+def M4WriteVLDB    : SchedWriteRes<[M4UnitL,
+                                    M4UnitL,
+                                    M4UnitL]>     { let Latency = 6;
+                                                    let NumMicroOps = 3; }
+def M4WriteVLDC    : SchedWriteRes<[M4UnitL,
+                                    M4UnitL,
+                                    M4UnitL,
+                                    M4UnitL]>     { let Latency = 6;
+                                                    let NumMicroOps = 4; }
+def M4WriteVLDD    : SchedWriteRes<[M4UnitL,
+                                    M4UnitNSHF]>  { let Latency = 6;
+                                                    let NumMicroOps = 2;
+                                                    let ResourceCycles = [2, 1]; }
+def M4WriteVLDF    : SchedWriteRes<[M4UnitL,
+                                    M4UnitL]>     { let Latency = 10;
+                                                    let NumMicroOps = 2;
+                                                    let ResourceCycles = [3, 3]; }
+def M4WriteVLDG    : SchedWriteRes<[M4UnitL,
+                                    M4UnitNSHF,
+                                    M4UnitNSHF]>  { let Latency = 6;
+                                                    let NumMicroOps = 3;
+                                                    let ResourceCycles = [2, 1, 1]; }
+def M4WriteVLDI    : SchedWriteRes<[M4UnitL,
+                                    M4UnitL,
+                                    M4UnitL]>     { let Latency = 12;
+                                                    let NumMicroOps = 3;
+                                                    let ResourceCycles = [3, 3, 3]; }
+def M4WriteVLDJ    : SchedWriteRes<[M4UnitL,
+                                    M4UnitNSHF,
+                                    M4UnitNSHF,
+                                    M4UnitNSHF]>  { let Latency = 7;
+                                                    let NumMicroOps = 4;
+                                                    let ResourceCycles = [3, 1, 1, 1]; }
+def M4WriteVLDK    : SchedWriteRes<[M4UnitL,
+                                    M4UnitNSHF,
+                                    M4UnitNSHF,
+                                    M4UnitNSHF,
+                                    M4UnitNSHF]>  { let Latency = 7;
+                                                    let NumMicroOps = 5;
+                                                    let ResourceCycles = [3, 1, 1, 1, 1]; }
+def M4WriteVLDL    : SchedWriteRes<[M4UnitL,
+                                    M4UnitNSHF,
+                                    M4UnitNSHF,
+                                    M4UnitL,
+                                    M4UnitNSHF]>  { let Latency = 7;
+                                                    let NumMicroOps = 5;
+                                                    let ResourceCycles = [3, 1, 1, 6, 1]; }
+def M4WriteVLDM    : SchedWriteRes<[M4UnitL,
+                                    M4UnitNSHF,
+                                    M4UnitNSHF,
+                                    M4UnitL,
+                                    M4UnitNSHF,
+                                    M4UnitNSHF]>  { let Latency = 7;
+                                                    let NumMicroOps = 6;
+                                                    let ResourceCycles = [3, 1, 1, 3, 1, 1]; }
+def M4WriteVLDN    : SchedWriteRes<[M4UnitL,
+                                    M4UnitL,
+                                    M4UnitL,
+                                    M4UnitL]>     { let Latency = 14;
+                                                    let NumMicroOps = 4;
+                                                    let ResourceCycles = [3, 3, 3, 3]; }
+
+def M4WriteVST1    : SchedWriteRes<[M4UnitS,
+                                    M4UnitFST]>  { let Latency = 1;
+                                                   let NumMicroOps = 1; }
+def M4WriteVSTA    : WriteSequence<[WriteVST], 2>;
+def M4WriteVSTB    : WriteSequence<[WriteVST], 3>;
+def M4WriteVSTC    : WriteSequence<[WriteVST], 4>;
+def M4WriteVSTD    : SchedWriteRes<[M4UnitS,
+                                    M4UnitFST]>   { let Latency = 2; }
+def M4WriteVSTE    : SchedWriteRes<[M4UnitS,
+                                    M4UnitFST,
+                                    M4UnitS,
+                                    M4UnitFST]>   { let Latency = 2;
+                                                    let NumMicroOps = 2; }
+def M4WriteVSTF    : SchedWriteRes<[M4UnitNSHF,
+                                    M4UnitS,
+                                    M4UnitFST,
+                                    M4UnitS,
+                                    M4UnitFST]>   { let Latency = 4;
+                                                    let NumMicroOps = 4;
+                                                    let ResourceCycles = [1, 2, 1, 2, 1]; }
+def M4WriteVSTG    : SchedWriteRes<[M4UnitNSHF,
+                                    M4UnitNSHF,
+                                    M4UnitNSHF,
+                                    M4UnitS,
+                                    M4UnitFST,
+                                    M4UnitS,
+                                    M4UnitFST,
+                                    M4UnitS,
+                                    M4UnitFST]>   { let Latency = 5;
+                                                    let NumMicroOps = 6;
+                                                    let ResourceCycles = [1, 1, 1, 2, 1, 2, 1, 2, 1]; }
+def M4WriteVSTI    : SchedWriteRes<[M4UnitNSHF,
+                                    M4UnitNSHF,
+                                    M4UnitNSHF,
+                                    M4UnitNSHF,
+                                    M4UnitS,
+                                    M4UnitFST,
+                                    M4UnitS,
+                                    M4UnitFST,
+                                    M4UnitS,
+                                    M4UnitFST,
+                                    M4UnitS,
+                                    M4UnitFST]>   { let Latency = 8;
+                                                    let NumMicroOps = 5;
+                                                    let ResourceCycles = [1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1]; }
+def M4WriteVSTJ    : SchedWriteRes<[M4UnitA,
+                                    M4UnitS,
+                                    M4UnitFST]>   { let Latency = 1;
+                                                    let NumMicroOps = 2; }
+def M4WriteVSTK    : SchedWriteRes<[M4UnitA,
+                                    M4UnitS,
+                                    M4UnitFST]>   { let Latency = 3;
+                                                    let NumMicroOps = 2; }
+def M4WriteVSTL    : SchedWriteRes<[M4UnitNSHF,
+                                    M4UnitNSHF,
+                                    M4UnitS,
+                                    M4UnitFST,
+                                    M4UnitS,
+                                    M4UnitFST]>   { let Latency = 4;
+                                                    let NumMicroOps = 4;
+                                                    let ResourceCycles = [1, 1, 2, 1, 2, 1]; }
+
+// Special cases.
+def M4WriteCOPY    : SchedWriteVariant<[SchedVar<ExynosFPPred, [M4WriteNALU1]>,
+                                        SchedVar<NoSchedPred,  [M4WriteZ0]>]>;
+def M4WriteMOVI    : SchedWriteVariant<[SchedVar<IsZeroFPIdiomPred, [M4WriteZ0]>,
+                                        SchedVar<NoSchedPred,       [M4WriteNALU1]>]>;
+def M4WriteMULL    : SchedWriteVariant<[SchedVar<ExynosLongVectorUpperPred, [M4WriteNEONM]>,
+                                        SchedVar<NoSchedPred,               [M4WriteNMUL3]>]>;
+
+// Fast forwarding.
+def M4ReadAESM1    : SchedReadAdvance<+1, [M4WriteNCRY1]>;
+def M4ReadFMACM1   : SchedReadAdvance<+1, [M4WriteFMAC4,
+                                           M4WriteFMAC4H,
+                                           M4WriteFMAC5]>;
+def M4ReadNMULM1   : SchedReadAdvance<+1, [M4WriteNMUL3]>;
+def M4ReadMULLP2   : SchedReadAdvance<-2, [M4WriteNEONM]>;
+
+//===----------------------------------------------------------------------===//
+// Coarse scheduling model.
+
+// Branch instructions.
+def : SchedAlias<WriteBr,    M4WriteZ0>;
+def : SchedAlias<WriteBrReg, M4WriteC1>;
+
+// Arithmetic and logical integer instructions.
+def : SchedAlias<WriteI,     M4WriteA1>;
+def : SchedAlias<WriteIEReg, M4WriteAA>; // FIXME: M4WriteAX crashes TableGen.
+def : SchedAlias<WriteISReg, M4WriteAA>; // FIXME: M4WriteAX crashes TableGen.
+def : SchedAlias<WriteIS,    M4WriteA1>;
+
+// Move instructions.
+def : SchedAlias<WriteImm, M4WriteA1>;
+
+// Divide and multiply instructions.
+def : SchedAlias<WriteID32, M4WriteD12>;
+def : SchedAlias<WriteID64, M4WriteD21>;
+def : SchedAlias<WriteIM32, M4WriteC3>;
+def : SchedAlias<WriteIM64, M4WriteCA>;
+
+// Miscellaneous instructions.
+def : SchedAlias<WriteExtr, M4WriteAY>;
+
+// Addressing modes.
+def : SchedAlias<WriteAdr,    M4WriteZ1>;
+def : SchedAlias<ReadAdrBase, M4ReadAdrBase>;
+
+// Load instructions.
+def : SchedAlias<WriteLD,    M4WriteL4>;
+def : SchedAlias<WriteLDHi,  M4WriteZ4>;
+def : SchedAlias<WriteLDIdx, M4WriteLX>;
+
+// Store instructions.
+def : SchedAlias<WriteST,    M4WriteS1>;
+def : SchedAlias<WriteSTP,   M4WriteS1>;
+def : SchedAlias<WriteSTX,   M4WriteS1>;
+def : SchedAlias<WriteSTIdx, M4WriteSX>;
+
+// FP data instructions.
+def : SchedAlias<WriteF,    M4WriteFADD2>;
+def : SchedAlias<WriteFCmp, M4WriteNMSC2>;
+def : SchedAlias<WriteFDiv, M4WriteFDIV12>;
+def : SchedAlias<WriteFMul, M4WriteFMAC3>;
+
+// FP miscellaneous instructions.
+def : SchedAlias<WriteFCvt,  M4WriteFCVT2>;
+def : SchedAlias<WriteFImm,  M4WriteNALU1>;
+def : SchedAlias<WriteFCopy, M4WriteCOPY>;
+
+// FP load instructions.
+def : SchedAlias<WriteVLD, M4WriteL5>;
+
+// FP store instructions.
+def : SchedAlias<WriteVST, M4WriteVST1>;
+
+// ASIMD FP instructions.
+def : SchedAlias<WriteV, M4WriteNALU1>;
+
+// Other miscellaneous instructions.
+def : WriteRes<WriteAtomic,  []> { let Unsupported = 1; }
+def : WriteRes<WriteBarrier, []> { let Latency = 1; }
+def : WriteRes<WriteHint,    []> { let Latency = 1; }
+def : WriteRes<WriteSys,     []> { let Latency = 1; }
+
+//===----------------------------------------------------------------------===//
+// Generic fast forwarding.
+
+// TODO: Add FP register forwarding rules.
+
+def : ReadAdvance<ReadI,       0>;
+def : ReadAdvance<ReadISReg,   0>;
+def : ReadAdvance<ReadIEReg,   0>;
+def : ReadAdvance<ReadIM,      0>;
+// TODO: The forwarding for 32 bits actually saves 2 cycles.
+def : ReadAdvance<ReadIMA,     3, [WriteIM32, WriteIM64]>;
+def : ReadAdvance<ReadID,      0>;
+def : ReadAdvance<ReadExtrHi,  0>;
+def : ReadAdvance<ReadAdrBase, 0>;
+def : ReadAdvance<ReadVLD,     0>;
+
+//===----------------------------------------------------------------------===//
+// Finer scheduling model.
+
+// Branch instructions
+def : InstRW<[M4WriteB1], (instrs Bcc)>;
+def : InstRW<[M4WriteAF], (instrs BL)>;
+def : InstRW<[M4WriteBX], (instrs BLR)>;
+def : InstRW<[M4WriteC1], (instregex "^CBN?Z[WX]")>;
+def : InstRW<[M4WriteAD], (instregex "^TBN?Z[WX]")>;
+
+// Arithmetic and logical integer instructions.
+def : InstRW<[M4WriteAX], (instregex "^(ADD|AND|BIC|EON|EOR|ORN|SUB)[WX]rs$")>;
+def : InstRW<[M4WriteAU], (instrs ORRWrs, ORRXrs)>;
+def : InstRW<[M4WriteAX], (instregex "^(ADD|AND|BIC|SUB)S[WX]rs$")>;
+def : InstRW<[M4WriteAX], (instregex "^(ADD|SUB)S?[WX]rx(64)?$")>;
+def : InstRW<[M4WriteAV], (instrs ADDWri, ADDXri, ORRWri, ORRXri)>;
+
+// Move instructions.
+def : InstRW<[M4WriteCOPY], (instrs COPY)>;
+def : InstRW<[M4WriteZ0],   (instrs ADR, ADRP)>;
+def : InstRW<[M4WriteZ0],   (instregex "^MOV[NZ][WX]i")>;
+
+// Divide and multiply instructions.
+
+// Miscellaneous instructions.
+
+// Load instructions.
+def : InstRW<[M4WriteLD,
+              WriteLDHi,
+              WriteAdr],    (instregex "^LDP(SW|W|X)(post|pre)")>;
+def : InstRW<[M4WriteL5,
+              ReadAdrBase], (instregex "^LDR(BB|SBW|SBX|HH|SHW|SHX|SW|W|X)roW")>;
+def : InstRW<[WriteLDIdx,
+              ReadAdrBase], (instregex "^LDR(BB|SBW|SBX|HH|SHW|SHX|SW|W|X)roX")>;
+def : InstRW<[M4WriteL5,
+              ReadAdrBase], (instrs PRFMroW)>;
+def : InstRW<[WriteLDIdx,
+              ReadAdrBase], (instrs PRFMroX)>;
+
+// Store instructions.
+def : InstRW<[M4WriteSB,
+              ReadAdrBase], (instregex "^STR(BB|HH|W|X)roW")>;
+def : InstRW<[WriteST,
+              ReadAdrBase], (instregex "^STR(BB|HH|W|X)roX")>;
+
+// FP data instructions.
+def : InstRW<[M4WriteNSHF1H], (instrs FABSHr)>;
+def : InstRW<[M4WriteNSHF1],  (instregex "^FABS[SD]r")>;
+def : InstRW<[M4WriteFADD2H], (instregex "^F(ADD|SUB)Hrr")>;
+def : InstRW<[M4WriteFADD2],  (instregex "^F(ADD|SUB)[SD]rr")>;
+def : InstRW<[M4WriteFADD2H], (instregex "^FADDPv.i16")>;
+def : InstRW<[M4WriteFADD2],  (instregex "^FADDPv.i(32|64)")>;
+def : InstRW<[M4WriteNEONQ],  (instregex "^FCCMPE?[HSD]rr")>;
+def : InstRW<[M4WriteNMSC2],  (instregex "^FCMPE?[HSD]r[ir]")>;
+def : InstRW<[M4WriteNMSC1],  (instregex "^F(AC|CM)(EQ|GE|GT|LE|LT)(16|32|64|v1)")>;
+def : InstRW<[M4WriteFDIV7H], (instrs FDIVHrr)>;
+def : InstRW<[M4WriteFDIV7],  (instrs FDIVSrr)>;
+def : InstRW<[M4WriteFDIV12], (instrs FDIVDrr)>;
+def : InstRW<[M4WriteNMSC1],  (instregex "^F(MAX|MIN)(NM)?[HSD]rr")>;
+def : InstRW<[M4WriteFMAC3H], (instregex "^FN?MULHrr")>;
+def : InstRW<[M4WriteFMAC3],  (instregex "^FN?MUL[SD]rr")>;
+def : InstRW<[M4WriteFMAC3H], (instrs FMULX16)>;
+def : InstRW<[M4WriteFMAC3],  (instregex "^FMULX(32|64)")>;
+def : InstRW<[M4WriteFMAC4H,
+              M4ReadFMACM1],  (instregex "^FN?M(ADD|SUB)Hrrr")>;
+def : InstRW<[M4WriteFMAC4,
+              M4ReadFMACM1],  (instregex "^FN?M(ADD|SUB)[SD]rrr")>;
+def : InstRW<[M4WriteNALU1H], (instrs FNEGHr)>;
+def : InstRW<[M4WriteNALU1],  (instregex "^FNEG[SD]r")>;
+def : InstRW<[M4WriteFCVT3A], (instregex "^FRINT.+r")>;
+def : InstRW<[M4WriteNEONH],  (instregex "^FCSEL[HSD]rrr")>;
+def : InstRW<[M4WriteFSQR7H], (instrs FSQRTHr)>;
+def : InstRW<[M4WriteFSQR8],  (instrs FSQRTSr)>;
+def : InstRW<[M4WriteFSQR12], (instrs FSQRTDr)>;
+
+// FP miscellaneous instructions.
+def : InstRW<[M4WriteFCVT2H], (instregex "^FCVTH[SD]r")>;
+def : InstRW<[M4WriteFCVT2H], (instregex "^FCVT[SD]Hr")>;
+def : InstRW<[M4WriteFCVT2],  (instregex "^FCVT[SD][SD]r")>;
+def : InstRW<[M4WriteFCVT6A], (instregex "^[SU]CVTF[SU][XW][HSD]ri")>;
+def : InstRW<[M4WriteNEONR],  (instregex "^FCVT[AMNPZ][SU][SU][XW][HSD]r")>;
+def : InstRW<[M4WriteNALU1],  (instregex "^FMOV[HSD][ir]")>;
+def : InstRW<[M4WriteSA],     (instregex "^FMOV[WX][HSD]r")>;
+def : InstRW<[M4WriteNEONJ],  (instregex "^FMOV[HSD][WX]r")>;
+def : InstRW<[M4WriteNEONI],  (instregex "^FMOVXDHighr")>;
+def : InstRW<[M4WriteNEONK],  (instregex "^FMOVDXHighr")>;
+def : InstRW<[M4WriteFCVT3H], (instregex "^F(RECP|RSQRT)Ev1f16")>;
+def : InstRW<[M4WriteFCVT3],  (instregex "^F(RECP|RSQRT)Ev1i(32|64)")>;
+def : InstRW<[M4WriteNMSC1],  (instregex "^FRECPXv1")>;
+def : InstRW<[M4WriteFMAC4H,
+              M4ReadFMACM1],  (instregex "^F(RECP|RSQRT)S16")>;
+def : InstRW<[M4WriteFMAC4,
+              M4ReadFMACM1],  (instregex "^F(RECP|RSQRT)S(32|64)")>;
+
+// FP load instructions.
+def : InstRW<[WriteVLD],    (instregex "^LDR[SDQ]l")>;
+def : InstRW<[WriteVLD],    (instregex "^LDUR[BHSDQ]i")>;
+def : InstRW<[WriteVLD,
+              WriteAdr],    (instregex "^LDR[BHSDQ](post|pre)")>;
+def : InstRW<[WriteVLD],    (instregex "^LDR[BHSDQ]ui")>;
+def : InstRW<[M4WriteLE,
+              ReadAdrBase], (instregex "^LDR[BHSDQ]roW")>;
+def : InstRW<[WriteVLD,
+              ReadAdrBase], (instregex "^LDR[BHSD]roX")>;
+def : InstRW<[M4WriteLE,
+              ReadAdrBase], (instrs LDRQroX)>;
+def : InstRW<[WriteVLD,
+              M4WriteLH],   (instregex "^LDN?P[SD]i")>;
+def : InstRW<[M4WriteLA,
+              M4WriteLH],   (instregex "^LDN?PQi")>;
+def : InstRW<[M4WriteL5,
+              M4WriteLH,
+              WriteAdr],    (instregex "^LDP[SD]post")>;
+def : InstRW<[M4WriteLB,
+              M4WriteLH,
+              WriteAdr],    (instrs LDPQpost)>;
+def : InstRW<[M4WriteLB,
+              M4WriteLH,
+              WriteAdr],    (instregex "^LDP[SD]pre")>;
+def : InstRW<[M4WriteLC,
+              M4WriteLH,
+              WriteAdr],    (instrs LDPQpre)>;
+
+// FP store instructions.
+def : InstRW<[WriteVST],    (instregex "^STUR[BHSDQ]i")>;
+def : InstRW<[WriteVST,
+              WriteAdr],    (instregex "^STR[BHSDQ](post|pre)")>;
+def : InstRW<[WriteVST],    (instregex "^STR[BHSDQ]ui")>;
+def : InstRW<[M4WriteVSTJ,
+              ReadAdrBase], (instregex "^STR[BHSD]roW")>;
+def : InstRW<[M4WriteVSTK,
+              ReadAdrBase], (instrs STRQroW)>;
+def : InstRW<[WriteVST,
+              ReadAdrBase], (instregex "^STR[BHSD]roX")>;
+def : InstRW<[M4WriteVSTK,
+              ReadAdrBase], (instrs STRQroX)>;
+def : InstRW<[WriteVST],    (instregex "^STN?P[SD]i")>;
+def : InstRW<[M4WriteVSTA], (instregex "^STN?PQi")>;
+def : InstRW<[WriteVST,
+              WriteAdr],    (instregex "^STP[SD](post|pre)")>;
+def : InstRW<[M4WriteVSTJ,
+              WriteAdr],    (instregex "^STPQ(post|pre)")>;
+
+// ASIMD instructions.
+def : InstRW<[M4WriteNHAD1],  (instregex "^[SU]ABDL?v")>;
+def : InstRW<[M4WriteNHAD3],  (instregex "^[SU]ABAL?v")>;
+def : InstRW<[M4WriteNMSC1],  (instregex "^ABSv")>;
+def : InstRW<[M4WriteNALU1],  (instregex "^(ADD|NEG|SUB)v")>;
+def : InstRW<[M4WriteNHAD3],  (instregex "^[SU]?ADDL?Pv")>;
+def : InstRW<[M4WriteNHAD3],  (instregex "^[SU]H(ADD|SUB)v")>;
+def : InstRW<[M4WriteNHAD3],  (instregex "^[SU](ADD|SUB)[LW]v")>;
+def : InstRW<[M4WriteNHAD3],  (instregex "^R?(ADD|SUB)HN2?v")>;
+def : InstRW<[M4WriteNHAD3],  (instregex "^[SU]Q(ADD|SUB)v")>;
+def : InstRW<[M4WriteNHAD3],  (instregex "^(SU|US)QADDv")>;
+def : InstRW<[M4WriteNHAD3],  (instregex "^[SU]RHADDv")>;
+def : InstRW<[M4WriteNMSC1],  (instregex "^SQ(ABS|NEG)v")>;
+def : InstRW<[M4WriteNHAD3],  (instregex "^[SU]?ADDL?Vv")>;
+def : InstRW<[M4WriteNMSC1],  (instregex "^CM(EQ|GE|GT|HI|HS|LE|LT)v")>;
+def : InstRW<[M4WriteNALU1],  (instregex "^CMTSTv")>;
+def : InstRW<[M4WriteNALU1],  (instregex "^(AND|BIC|EOR|NOT|ORN|ORR)v")>;
+def : InstRW<[M4WriteNMSC1],  (instregex "^[SU](MIN|MAX)v")>;
+def : InstRW<[M4WriteNMSC2],  (instregex "^[SU](MIN|MAX)Pv")>;
+def : InstRW<[M4WriteNHAD3],  (instregex "^[SU](MIN|MAX)Vv")>;
+def : InstRW<[M4WriteNMUL3],  (instregex "^(SQR?D)?MULH?v")>;
+def : InstRW<[M4WriteNMUL3,
+              M4ReadNMULM1],  (instregex "^ML[AS]v")>;
+def : InstRW<[M4WriteNMUL3],  (instregex "^SQRDML[AS]H")>;
+def : InstRW<[M4WriteMULL,
+              M4ReadMULLP2],  (instregex "^(S|U|SQD)ML[AS]Lv")>;
+def : InstRW<[M4WriteMULL,
+              M4ReadMULLP2],  (instregex "^(S|U|SQD)MULLv")>;
+def : InstRW<[M4WriteNMUL3],  (instregex "^[SU]DOT(lane)?v")>;
+def : InstRW<[M4WriteNHAD3],  (instregex "^[SU]ADALPv")>;
+def : InstRW<[M4WriteNSHT4A], (instregex "^[SU]R?SRA[dv]")>;
+def : InstRW<[M4WriteNSHT1],  (instregex "^SHL[dv]")>;
+def : InstRW<[M4WriteNSHT1],  (instregex "^S[LR]I[dv]")>;
+def : InstRW<[M4WriteNSHT1],  (instregex "^[SU]SH[LR][dv]")>;
+def : InstRW<[M4WriteNSHT2],  (instregex "^[SU]?SHLLv")>;
+def : InstRW<[M4WriteNSHT4A], (instregex "^[SU]?Q?R?SHRU?N[bhsv]")>;
+def : InstRW<[M4WriteNSHT4A], (instregex "^[SU]RSH[LR][dv]")>;
+def : InstRW<[M4WriteNSHT4A], (instregex "^[SU]QR?SHLU?[bhsdv]")>;
+
+// ASIMD FP instructions.
+def : InstRW<[M4WriteNSHF1H], (instregex "^FABSv.f16")>;
+def : InstRW<[M4WriteNSHF1],  (instregex "^FABSv.f(32|64)")>;
+def : InstRW<[M4WriteFADD2H], (instregex "^F(ABD|ADD|SUB)v.f16")>;
+def : InstRW<[M4WriteFADD2],  (instregex "^F(ABD|ADD|SUB)v.f(32|64)")>;
+def : InstRW<[M4WriteFADD2H], (instregex "^FADDPv.f16")>;
+def : InstRW<[M4WriteFADD2],  (instregex "^FADDPv.f(32|64)")>;
+def : InstRW<[M4WriteNMSC1],  (instregex "^F(AC|CM)(EQ|GE|GT|LE|LT)v[^1]")>;
+def : InstRW<[M4WriteFCVT2],  (instregex "^FCVT(L|N|XN)v")>;
+def : InstRW<[M4WriteFCVT2A], (instregex "^FCVT[AMNPZ][SU]v")>;
+def : InstRW<[M4WriteFCVT2H], (instregex "^[SU]CVTFv.[fi]16")>;
+def : InstRW<[M4WriteFCVT2],  (instregex "^[SU]CVTFv.[fi](32|64)")>;
+def : InstRW<[M4WriteFDIV7H], (instrs FDIVv4f16)>;
+def : InstRW<[M4WriteNEONVH], (instrs FDIVv8f16)>;
+def : InstRW<[M4WriteFDIV7],  (instrs FDIVv2f32)>;
+def : InstRW<[M4WriteNEONV],  (instrs FDIVv4f32)>;
+def : InstRW<[M4WriteNEONW],  (instrs FDIVv2f64)>;
+def : InstRW<[M4WriteNMSC1],  (instregex "^F(MAX|MIN)(NM)?v")>;
+def : InstRW<[M4WriteNMSC2],  (instregex "^F(MAX|MIN)(NM)?Pv")>;
+def : InstRW<[M4WriteNEONZ],  (instregex "^F(MAX|MIN)(NM)?Vv")>;
+def : InstRW<[M4WriteFMAC2H], (instregex "^FMULX?v.[fi]16")>;
+def : InstRW<[M4WriteFMAC3],  (instregex "^FMULX?v.[fi](32|64)")>;
+def : InstRW<[M4WriteFMAC4H,
+              M4ReadFMACM1],  (instregex "^FML[AS]v.[fi]16")>;
+def : InstRW<[M4WriteFMAC4,
+              M4ReadFMACM1],  (instregex "^FML[AS]v.[fi](32|64)")>;
+def : InstRW<[M4WriteNALU1H], (instregex "^FNEGv.f16")>;
+def : InstRW<[M4WriteNALU1],  (instregex "^FNEGv.f(32|64)")>;
+def : InstRW<[M4WriteFCVT3A], (instregex "^FRINT[AIMNPXZ]v")>;
+def : InstRW<[M4WriteFSQR7H], (instrs FSQRTv4f16)>;
+def : InstRW<[M4WriteNEONXH], (instrs FSQRTv8f16)>;
+def : InstRW<[M4WriteFSQR8],  (instrs FSQRTv2f32)>;
+def : InstRW<[M4WriteNEONX],  (instrs FSQRTv4f32)>;
+def : InstRW<[M4WriteNEONY],  (instrs FSQRTv2f64)>;
+
+// ASIMD miscellaneous instructions.
+def : InstRW<[M4WriteNALU1],  (instregex "^RBITv")>;
+def : InstRW<[M4WriteNALU1],  (instregex "^(BIF|BIT|BSL)v")>;
+def : InstRW<[M4WriteNALU1],  (instregex "^CL[STZ]v")>;
+def : InstRW<[M4WriteNEONB],  (instregex "^DUPv.+gpr")>;
+def : InstRW<[M4WriteNSHF1],  (instregex "^CPY")>;
+def : InstRW<[M4WriteNSHF1],  (instregex "^DUPv.+lane")>;
+def : InstRW<[M4WriteNSHF1],  (instregex "^EXTv")>;
+def : InstRW<[M4WriteNSHT4A], (instregex "^XTNv")>;
+def : InstRW<[M4WriteNSHT4A], (instregex "^[SU]?QXTU?Nv")>;
+def : InstRW<[M4WriteNEONB],  (instregex "^INSv.+gpr")>;
+def : InstRW<[M4WriteNSHF1],  (instregex "^INSv.+lane")>;
+def : InstRW<[M4WriteMOVI],   (instregex "^(MOV|MVN)I")>;
+def : InstRW<[M4WriteNALU1H], (instregex "^FMOVv.f16")>;
+def : InstRW<[M4WriteNALU1],  (instregex "^FMOVv.f(32|64)")>;
+def : InstRW<[M4WriteFCVT3H], (instregex "^F(RECP|RSQRT)Ev[248]f16")>;
+def : InstRW<[M4WriteFCVT3],  (instregex "^F(RECP|RSQRT)Ev[248]f(32|64)")>;
+def : InstRW<[M4WriteFCVT3],  (instregex "^U(RECP|RSQRT)Ev[24]i32")>;
+def : InstRW<[M4WriteFMAC4H,
+              M4ReadFMACM1],  (instregex "^F(RECP|RSQRT)Sv.f16")>;
+def : InstRW<[M4WriteFMAC4,
+              M4ReadFMACM1],  (instregex "^F(RECP|RSQRT)Sv.f(32|64)")>;
+def : InstRW<[M4WriteNSHF1],  (instregex "^REV(16|32|64)v")>;
+def : InstRW<[M4WriteNSHFA],  (instregex "^TB[LX]v(8|16)i8One")>;
+def : InstRW<[M4WriteNSHFB],  (instregex "^TB[LX]v(8|16)i8Two")>;
+def : InstRW<[M4WriteNSHFC],  (instregex "^TB[LX]v(8|16)i8Three")>;
+def : InstRW<[M4WriteNSHFD],  (instregex "^TB[LX]v(8|16)i8Four")>;
+def : InstRW<[M4WriteNEONP],  (instregex "^[SU]MOVv")>;
+def : InstRW<[M4WriteNSHF1],  (instregex "^(TRN|UZP|ZIP)[12]v")>;
+
+// ASIMD load instructions.
+def : InstRW<[WriteVLD],    (instregex "LD1Onev(8b|4h|2s|1d)$")>;
+def : InstRW<[WriteVLD,
+              M4WriteA1],   (instregex "LD1Onev(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[WriteVLD],    (instregex "LD1Onev(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteVLD,
+              M4WriteA1],   (instregex "LD1Onev(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVLDA], (instregex "LD1Twov(8b|4h|2s|1d)$")>;
+def : InstRW<[M4WriteVLDA,
+              M4WriteA1],   (instregex "LD1Twov(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[M4WriteVLDA], (instregex "LD1Twov(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVLDA,
+              M4WriteA1],   (instregex "LD1Twov(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVLDB], (instregex "LD1Threev(8b|4h|2s|1d)$")>;
+def : InstRW<[M4WriteVLDB,
+              M4WriteA1],   (instregex "LD1Threev(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[M4WriteVLDB], (instregex "LD1Threev(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVLDB,
+              M4WriteA1],   (instregex "LD1Threev(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVLDC], (instregex "LD1Fourv(8b|4h|2s|1d)$")>;
+def : InstRW<[M4WriteVLDC,
+              M4WriteA1],   (instregex "LD1Fourv(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[M4WriteVLDC], (instregex "LD1Fourv(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVLDC,
+              M4WriteA1],   (instregex "LD1Fourv(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVLDD], (instregex "LD1i(8|16|32|64)$")>;
+def : InstRW<[M4WriteVLDD,
+              M4WriteA1],   (instregex "LD1i(8|16|32|64)_POST$")>;
+
+def : InstRW<[WriteVLD],    (instregex "LD1Rv(8b|4h|2s|1d)$")>;
+def : InstRW<[WriteVLD,
+              M4WriteA1],   (instregex "LD1Rv(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[WriteVLD],    (instregex "LD1Rv(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteVLD,
+              M4WriteA1],   (instregex "LD1Rv(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVLDF], (instregex "LD2Twov(8b|4h|2s)$")>;
+def : InstRW<[M4WriteVLDF,
+              M4WriteA1],   (instregex "LD2Twov(8b|4h|2s)_POST$")>;
+def : InstRW<[M4WriteVLDF], (instregex "LD2Twov(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVLDF,
+              M4WriteA1],   (instregex "LD2Twov(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVLDG], (instregex "LD2i(8|16|32|64)$")>;
+def : InstRW<[M4WriteVLDG,
+              M4WriteA1],   (instregex "LD2i(8|16|32|64)_POST$")>;
+
+def : InstRW<[M4WriteVLDA], (instregex "LD2Rv(8b|4h|2s|1d)$")>;
+def : InstRW<[M4WriteVLDA,
+              M4WriteA1],   (instregex "LD2Rv(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[M4WriteVLDA], (instregex "LD2Rv(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVLDA,
+              M4WriteA1],   (instregex "LD2Rv(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVLDI], (instregex "LD3Threev(8b|4h|2s)$")>;
+def : InstRW<[M4WriteVLDI,
+              M4WriteA1],   (instregex "LD3Threev(8b|4h|2s)_POST$")>;
+def : InstRW<[M4WriteVLDI], (instregex "LD3Threev(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVLDI,
+              M4WriteA1],   (instregex "LD3Threev(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVLDJ], (instregex "LD3i(8|16|32)$")>;
+def : InstRW<[M4WriteVLDJ,
+              M4WriteA1],   (instregex "LD3i(8|16|32)_POST$")>;
+def : InstRW<[M4WriteVLDL], (instregex "LD3i64$")>;
+def : InstRW<[M4WriteVLDL,
+              M4WriteA1],   (instregex "LD3i64_POST$")>;
+
+def : InstRW<[M4WriteVLDB], (instregex "LD3Rv(8b|4h|2s|1d)$")>;
+def : InstRW<[M4WriteVLDB,
+              M4WriteA1],   (instregex "LD3Rv(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[M4WriteVLDB], (instregex "LD3Rv(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVLDB,
+              M4WriteA1],   (instregex "LD3Rv(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVLDN], (instregex "LD4Fourv(8b|4h|2s)$")>;
+def : InstRW<[M4WriteVLDN,
+              M4WriteA1],   (instregex "LD4Fourv(8b|4h|2s)_POST$")>;
+def : InstRW<[M4WriteVLDN], (instregex "LD4Fourv(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVLDN,
+              M4WriteA1],   (instregex "LD4Fourv(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVLDK], (instregex "LD4i(8|16|32)$")>;
+def : InstRW<[M4WriteVLDK,
+              M4WriteA1],   (instregex "LD4i(8|16|32)_POST$")>;
+def : InstRW<[M4WriteVLDM], (instregex "LD4i64$")>;
+def : InstRW<[M4WriteVLDM,
+              M4WriteA1],   (instregex "LD4i64_POST$")>;
+
+def : InstRW<[M4WriteVLDC], (instregex "LD4Rv(8b|4h|2s|1d)$")>;
+def : InstRW<[M4WriteVLDC,
+              M4WriteA1],   (instregex "LD4Rv(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[M4WriteVLDC], (instregex "LD4Rv(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVLDC,
+              M4WriteA1],   (instregex "LD4Rv(16b|8h|4s|2d)_POST$")>;
+
+// ASIMD store instructions.
+def : InstRW<[WriteVST],    (instregex "ST1Onev(8b|4h|2s|1d)$")>;
+def : InstRW<[WriteVST,
+              M4WriteA1],   (instregex "ST1Onev(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[WriteVST],    (instregex "ST1Onev(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteVST,
+              M4WriteA1],   (instregex "ST1Onev(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVSTA], (instregex "ST1Twov(8b|4h|2s|1d)$")>;
+def : InstRW<[M4WriteVSTA,
+              M4WriteA1],   (instregex "ST1Twov(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[M4WriteVSTA], (instregex "ST1Twov(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVSTA,
+              M4WriteA1],   (instregex "ST1Twov(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVSTB], (instregex "ST1Threev(8b|4h|2s|1d)$")>;
+def : InstRW<[M4WriteVSTB,
+              M4WriteA1],   (instregex "ST1Threev(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[M4WriteVSTB], (instregex "ST1Threev(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVSTB,
+              M4WriteA1],   (instregex "ST1Threev(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVSTC], (instregex "ST1Fourv(8b|4h|2s|1d)$")>;
+def : InstRW<[M4WriteVSTC,
+              M4WriteA1],   (instregex "ST1Fourv(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[M4WriteVSTC], (instregex "ST1Fourv(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVSTC,
+              M4WriteA1],   (instregex "ST1Fourv(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[WriteVST],    (instregex "ST1i(8|16|32|64)$")>;
+def : InstRW<[WriteVST,
+              M4WriteA1],   (instregex "ST1i(8|16|32|64)_POST$")>;
+
+def : InstRW<[M4WriteVSTD], (instregex "ST2Twov(8b|4h|2s)$")>;
+def : InstRW<[M4WriteVSTD,
+              M4WriteA1],   (instregex "ST2Twov(8b|4h|2s)_POST$")>;
+def : InstRW<[M4WriteVSTE], (instregex "ST2Twov(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVSTE,
+              M4WriteA1],   (instregex "ST2Twov(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVSTD], (instregex "ST2i(8|16|32|64)$")>;
+def : InstRW<[M4WriteVSTD,
+              M4WriteA1],   (instregex "ST2i(8|16|32|64)_POST$")>;
+
+def : InstRW<[M4WriteVSTF], (instregex "ST3Threev(8b|4h|2s)$")>;
+def : InstRW<[M4WriteVSTF,
+              M4WriteA1],   (instregex "ST3Threev(8b|4h|2s)_POST$")>;
+def : InstRW<[M4WriteVSTG], (instregex "ST3Threev(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVSTG,
+              M4WriteA1],   (instregex "ST3Threev(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVSTE], (instregex "ST3i(8|16|32|64)$")>;
+def : InstRW<[M4WriteVSTE,
+              M4WriteA1],   (instregex "ST3i(8|16|32|64)_POST$")>;
+
+def : InstRW<[M4WriteVSTL], (instregex "ST4Fourv(8b|4h|2s)$")>;
+def : InstRW<[M4WriteVSTL,
+              M4WriteA1],   (instregex "ST4Fourv(8b|4h|2s)_POST$")>;
+def : InstRW<[M4WriteVSTI], (instregex "ST4Fourv(16b|8h|4s|2d)$")>;
+def : InstRW<[M4WriteVSTI,
+              M4WriteA1],   (instregex "ST4Fourv(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[M4WriteVSTE], (instregex "ST4i(8|16|32|64)$")>;
+def : InstRW<[M4WriteVSTE,
+              M4WriteA1],   (instregex "ST4i(8|16|32|64)_POST$")>;
+
+// Cryptography instructions.
+def : InstRW<[M4WriteNCRY1],  (instregex "^AES[DE]")>;
+def : InstRW<[M4WriteNCRY1,
+              M4ReadAESM1],   (instregex "^AESI?MC")>;
+def : InstRW<[M4WriteNCRY1A], (instregex "^PMULv")>;
+def : InstRW<[M4WriteNCRY1A], (instregex "^PMULLv(1|8)i")>;
+def : InstRW<[M4WriteNCRY3A], (instregex "^PMULLv(2|16)i")>;
+def : InstRW<[M4WriteNCRY1A], (instregex "^SHA1([CHMP]|SU[01])")>;
+def : InstRW<[M4WriteNCRY1A], (instrs SHA256SU0rr)>;
+def : InstRW<[M4WriteNCRY5A], (instrs SHA256SU1rrr)>;
+def : InstRW<[M4WriteNCRY5A], (instrs SHA256H2rrr)>;
+
+// CRC instructions.
+def : InstRW<[M4WriteE2], (instregex "^CRC32C?[BHWX]rr$")>;
+
+} // SchedModel = ExynosM4Model
diff --git a/lib/Target/AArch64/AArch64SchedPredExynos.td b/lib/Target/AArch64/AArch64SchedPredExynos.td
index f8533d1..48c5423 100644
--- a/lib/Target/AArch64/AArch64SchedPredExynos.td
+++ b/lib/Target/AArch64/AArch64SchedPredExynos.td
@@ -30,24 +30,61 @@
                                CheckAll<[CheckOpcode<[BLR]>,
                                          CheckRegOperand<0, LR>]>>;
 
-// Identify arithmetic and logic instructions without or with limited extension.
-def ExynosExtFn   : TIIPredicate<
-                      "isExynosExtFast",
-                      MCOpcodeSwitchStatement<
-                        [MCOpcodeSwitchCase<
-                           IsArithExtOp.ValidOpcodes,
-                           MCReturnStatement<
-                             CheckAny<[CheckExtBy0,
-                                       CheckAll<
-                                         [CheckAny<
-                                           [CheckExtUXTW,
-                                            CheckExtUXTX]>,
-                                          CheckAny<
-                                            [CheckExtBy1,
-                                             CheckExtBy2,
-                                             CheckExtBy3]>]>]>>>],
-                        MCReturnStatement<FalsePred>>>;
-def ExynosExtPred : MCSchedPredicate<ExynosExtFn>;
+// Identify arithmetic instructions without or with limited extension or shift.
+def ExynosArithFn   : TIIPredicate<
+                        "isExynosArithFast",
+                        MCOpcodeSwitchStatement<
+                          [MCOpcodeSwitchCase<
+                             IsArithExtOp.ValidOpcodes,
+                             MCReturnStatement<
+                               CheckAny<[CheckExtBy0,
+                                         CheckAll<
+                                           [CheckAny<
+                                             [CheckExtUXTW,
+                                              CheckExtUXTX]>,
+                                            CheckAny<
+                                              [CheckExtBy1,
+                                               CheckExtBy2,
+                                               CheckExtBy3]>]>]>>>,
+                           MCOpcodeSwitchCase<
+                             IsArithShiftOp.ValidOpcodes,
+                             MCReturnStatement<ExynosCheckShift>>,
+                           MCOpcodeSwitchCase<
+                             IsArithUnshiftOp.ValidOpcodes,
+                             MCReturnStatement<TruePred>>],
+                          MCReturnStatement<FalsePred>>>;
+def ExynosArithPred : MCSchedPredicate<ExynosArithFn>;
+
+// Identify logic instructions with limited shift.
+def ExynosLogicFn   : TIIPredicate<
+                        "isExynosLogicFast",
+                        MCOpcodeSwitchStatement<
+                          [MCOpcodeSwitchCase<
+                             IsLogicShiftOp.ValidOpcodes,
+                             MCReturnStatement<ExynosCheckShift>>,
+                           MCOpcodeSwitchCase<
+                             IsLogicUnshiftOp.ValidOpcodes,
+                             MCReturnStatement<TruePred>>],
+                          MCReturnStatement<FalsePred>>>;
+def ExynosLogicPred : MCSchedPredicate<ExynosLogicFn>;
+
+// Identify more logic instructions with limited shift.
+def ExynosLogicExFn   : TIIPredicate<
+                          "isExynosLogicExFast",
+                          MCOpcodeSwitchStatement<
+                            [MCOpcodeSwitchCase<
+                               IsLogicShiftOp.ValidOpcodes,
+                               MCReturnStatement<
+                                 CheckAny<
+                                   [ExynosCheckShift,
+                                    CheckAll<
+                                     [CheckShiftLSL,
+                                      CheckShiftBy8]>]>>>,
+                             MCOpcodeSwitchCase<
+                               IsLogicUnshiftOp.ValidOpcodes,
+                               MCReturnStatement<TruePred>>],
+                            MCReturnStatement<FalsePred>>>;
+def ExynosLogicExPred : MCSchedPredicate<ExynosLogicExFn>;
 
 // Identify a load or store using the register offset addressing mode
 // with a scaled non-extended register.
@@ -88,12 +125,19 @@
                              [ADR, ADRP,
                               MOVNWi, MOVNXi,
                               MOVZWi, MOVZXi],
-                             MCReturnStatement<TruePred>>],
+                             MCReturnStatement<TruePred>>,
+                           MCOpcodeSwitchCase<
+                             [ORRWri, ORRXri],
+                             MCReturnStatement<
+                               CheckAll<
+                                 [CheckIsRegOperand<1>,
+                                  CheckAny<
+                                    [CheckRegOperand<1, WZR>,
+                                     CheckRegOperand<1, XZR>]>]>>>],
                           MCReturnStatement<
                             CheckAny<
                               [IsCopyIdiomFn,
-                               IsZeroFPIdiomFn,
-                               IsZeroIdiomFn]>>>>;
+                               IsZeroFPIdiomFn]>>>>;
 def ExynosResetPred : MCSchedPredicate<ExynosResetFn>;
 
 // Identify EXTR as the alias for ROR (immediate).
@@ -101,33 +145,7 @@
                                  CheckAll<[CheckOpcode<[EXTRWrri, EXTRXrri]>,
                                            CheckSameRegOperand<1, 2>]>>;
 
-// Identify arithmetic and logic instructions with limited shift.
-def ExynosShiftFn   : TIIPredicate<
-                        "isExynosShiftFast",
-                        MCOpcodeSwitchStatement<
-                          [MCOpcodeSwitchCase<
-                             IsArithLogicShiftOp.ValidOpcodes,
-                             MCReturnStatement<ExynosCheckShift>>],
-                          MCReturnStatement<FalsePred>>>;
-def ExynosShiftPred : MCSchedPredicate<ExynosShiftFn>;
-
-// Identify more arithmetic and logic instructions with limited shift.
-def ExynosShiftExFn   : TIIPredicate<
-                          "isExynosShiftExFast",
-                          MCOpcodeSwitchStatement<
-                            [MCOpcodeSwitchCase<
-                               IsArithLogicShiftOp.ValidOpcodes,
-                               MCReturnStatement<
-                                 CheckAny<
-                                   [CheckAll<
-                                     [CheckShiftLSL,
-                                      CheckShiftBy8]>,
-                                    ExynosCheckShift]>>>],
-                            MCReturnStatement<FalsePred>>>;
-def ExynosShiftExPred : MCSchedPredicate<ExynosShiftExFn>;
-
-
-// Identify arithmetic and logic immediate instructions.
+// Identify cheap arithmetic and logic immediate instructions.
 def ExynosCheapFn : TIIPredicate<
                       "isExynosCheapAsMove",
                       MCOpcodeSwitchStatement<
@@ -136,4 +154,4 @@
                            MCReturnStatement<TruePred>>],
                         MCReturnStatement<
                           CheckAny<
-                            [ExynosExtFn, ExynosResetFn, ExynosShiftFn]>>>>;
+                            [ExynosArithFn, ExynosResetFn, ExynosLogicFn]>>>>;
diff --git a/lib/Target/AArch64/AArch64SchedPredicates.td b/lib/Target/AArch64/AArch64SchedPredicates.td
index a48f1dc..dbaf11f 100644
--- a/lib/Target/AArch64/AArch64SchedPredicates.td
+++ b/lib/Target/AArch64/AArch64SchedPredicates.td
@@ -182,6 +182,92 @@
 def IsArithLogicUnshiftOp  : CheckOpcode<!listconcat(IsArithUnshiftOp.ValidOpcodes,
                                                      IsLogicUnshiftOp.ValidOpcodes)>;
 
+// Identify whether an instruction is an ASIMD
+// load using the post index addressing mode.
+def IsLoadASIMDPostOp      : CheckOpcode<[LD1Onev8b_POST, LD1Onev4h_POST, LD1Onev2s_POST, LD1Onev1d_POST,
+                                          LD1Onev16b_POST, LD1Onev8h_POST, LD1Onev4s_POST, LD1Onev2d_POST,
+                                          LD1Twov8b_POST, LD1Twov4h_POST, LD1Twov2s_POST, LD1Twov1d_POST,
+                                          LD1Twov16b_POST, LD1Twov8h_POST, LD1Twov4s_POST, LD1Twov2d_POST,
+                                          LD1Threev8b_POST, LD1Threev4h_POST, LD1Threev2s_POST, LD1Threev1d_POST,
+                                          LD1Threev16b_POST, LD1Threev8h_POST, LD1Threev4s_POST, LD1Threev2d_POST,
+                                          LD1Fourv8b_POST, LD1Fourv4h_POST, LD1Fourv2s_POST, LD1Fourv1d_POST,
+                                          LD1Fourv16b_POST, LD1Fourv8h_POST, LD1Fourv4s_POST, LD1Fourv2d_POST,
+                                          LD1i8_POST, LD1i16_POST, LD1i32_POST, LD1i64_POST,
+                                          LD1Rv8b_POST, LD1Rv4h_POST, LD1Rv2s_POST, LD1Rv1d_POST,
+                                          LD1Rv16b_POST, LD1Rv8h_POST, LD1Rv4s_POST, LD1Rv2d_POST,
+                                          LD2Twov8b_POST, LD2Twov4h_POST, LD2Twov2s_POST,
+                                          LD2Twov16b_POST, LD2Twov8h_POST, LD2Twov4s_POST, LD2Twov2d_POST,
+                                          LD2i8_POST, LD2i16_POST, LD2i32_POST, LD2i64_POST,
+                                          LD2Rv8b_POST, LD2Rv4h_POST, LD2Rv2s_POST, LD2Rv1d_POST,
+                                          LD2Rv16b_POST, LD2Rv8h_POST, LD2Rv4s_POST, LD2Rv2d_POST,
+                                          LD3Threev8b_POST, LD3Threev4h_POST, LD3Threev2s_POST,
+                                          LD3Threev16b_POST, LD3Threev8h_POST, LD3Threev4s_POST, LD3Threev2d_POST,
+                                          LD3i8_POST, LD3i16_POST, LD3i32_POST, LD3i64_POST,
+                                          LD3Rv8b_POST, LD3Rv4h_POST, LD3Rv2s_POST, LD3Rv1d_POST,
+                                          LD3Rv16b_POST, LD3Rv8h_POST, LD3Rv4s_POST, LD3Rv2d_POST,
+                                          LD4Fourv8b_POST, LD4Fourv4h_POST, LD4Fourv2s_POST,
+                                          LD4Fourv16b_POST, LD4Fourv8h_POST, LD4Fourv4s_POST, LD4Fourv2d_POST,
+                                          LD4i8_POST, LD4i16_POST, LD4i32_POST, LD4i64_POST,
+                                          LD4Rv8b_POST, LD4Rv4h_POST, LD4Rv2s_POST, LD4Rv1d_POST,
+                                          LD4Rv16b_POST, LD4Rv8h_POST, LD4Rv4s_POST, LD4Rv2d_POST]>;
+
+// Identify whether an instruction is an ASIMD
+// store using the post index addressing mode.
+def IsStoreASIMDPostOp     : CheckOpcode<[ST1Onev8b_POST, ST1Onev4h_POST, ST1Onev2s_POST, ST1Onev1d_POST,
+                                          ST1Onev16b_POST, ST1Onev8h_POST, ST1Onev4s_POST, ST1Onev2d_POST,
+                                          ST1Twov8b_POST, ST1Twov4h_POST, ST1Twov2s_POST, ST1Twov1d_POST,
+                                          ST1Twov16b_POST, ST1Twov8h_POST, ST1Twov4s_POST, ST1Twov2d_POST,
+                                          ST1Threev8b_POST, ST1Threev4h_POST, ST1Threev2s_POST, ST1Threev1d_POST,
+                                          ST1Threev16b_POST, ST1Threev8h_POST, ST1Threev4s_POST, ST1Threev2d_POST,
+                                          ST1Fourv8b_POST, ST1Fourv4h_POST, ST1Fourv2s_POST, ST1Fourv1d_POST,
+                                          ST1Fourv16b_POST, ST1Fourv8h_POST, ST1Fourv4s_POST, ST1Fourv2d_POST,
+                                          ST1i8_POST, ST1i16_POST, ST1i32_POST, ST1i64_POST,
+                                          ST2Twov8b_POST, ST2Twov4h_POST, ST2Twov2s_POST,
+                                          ST2Twov16b_POST, ST2Twov8h_POST, ST2Twov4s_POST, ST2Twov2d_POST,
+                                          ST2i8_POST, ST2i16_POST, ST2i32_POST, ST2i64_POST,
+                                          ST3Threev8b_POST, ST3Threev4h_POST, ST3Threev2s_POST,
+                                          ST3Threev16b_POST, ST3Threev8h_POST, ST3Threev4s_POST, ST3Threev2d_POST,
+                                          ST3i8_POST, ST3i16_POST, ST3i32_POST, ST3i64_POST,
+                                          ST4Fourv8b_POST, ST4Fourv4h_POST, ST4Fourv2s_POST,
+                                          ST4Fourv16b_POST, ST4Fourv8h_POST, ST4Fourv4s_POST, ST4Fourv2d_POST,
+                                          ST4i8_POST, ST4i16_POST, ST4i32_POST, ST4i64_POST]>;
+
+// Identify whether an instruction is an ASIMD load
+// or store using the post index addressing mode.
+def IsLoadStoreASIMDPostOp : CheckOpcode<!listconcat(IsLoadASIMDPostOp.ValidOpcodes,
+                                                     IsStoreASIMDPostOp.ValidOpcodes)>;
+
+// Identify whether an instruction is a load
+// using the register offset addressing mode.
+def IsLoadRegOffsetOp      : CheckOpcode<[PRFMroW, PRFMroX,
+                                          LDRBBroW, LDRBBroX,
+                                          LDRSBWroW, LDRSBWroX, LDRSBXroW, LDRSBXroX,
+                                          LDRHHroW, LDRHHroX,
+                                          LDRSHWroW, LDRSHWroX, LDRSHXroW, LDRSHXroX,
+                                          LDRWroW, LDRWroX,
+                                          LDRSWroW, LDRSWroX,
+                                          LDRXroW, LDRXroX,
+                                          LDRBroW, LDRBroX,
+                                          LDRHroW, LDRHroX,
+                                          LDRSroW, LDRSroX,
+                                          LDRDroW, LDRDroX]>;
+
+// Identify whether an instruction is a load
+// using the register offset addressing mode.
+def IsStoreRegOffsetOp     : CheckOpcode<[STRBBroW, STRBBroX,
+                                          STRHHroW, STRHHroX,
+                                          STRWroW, STRWroX,
+                                          STRXroW, STRXroX,
+                                          STRBroW, STRBroX,
+                                          STRHroW, STRHroX,
+                                          STRSroW, STRSroX,
+                                          STRDroW, STRDroX]>;
+
+// Identify whether an instruction is a load or
+// store using the register offset addressing mode.
+def IsLoadStoreRegOffsetOp : CheckOpcode<!listconcat(IsLoadRegOffsetOp.ValidOpcodes,
+                                                     IsStoreRegOffsetOp.ValidOpcodes)>;
+
 // Identify whether an instruction whose result is a long vector
 // operates on the upper half of the input registers.
 def IsLongVectorUpperOp    : CheckOpcode<[FCVTLv8i16, FCVTLv4i32,
@@ -235,37 +321,6 @@
                                           USUBWv16i8_v8i16, USUBWv8i16_v4i32, USUBWv4i32_v2i64,
                                           XTNv16i8, XTNv8i16, XTNv4i32]>;
 
-// Identify whether an instruction is a load
-// using the register offset addressing mode.
-def IsLoadRegOffsetOp      : CheckOpcode<[PRFMroW, PRFMroX,
-                                          LDRBBroW, LDRBBroX,
-                                          LDRSBWroW, LDRSBWroX, LDRSBXroW, LDRSBXroX,
-                                          LDRHHroW, LDRHHroX,
-                                          LDRSHWroW, LDRSHWroX, LDRSHXroW, LDRSHXroX,
-                                          LDRWroW, LDRWroX,
-                                          LDRSWroW, LDRSWroX,
-                                          LDRXroW, LDRXroX,
-                                          LDRBroW, LDRBroX,
-                                          LDRHroW, LDRHroX,
-                                          LDRSroW, LDRSroX,
-                                          LDRDroW, LDRDroX]>;
-
-// Identify whether an instruction is a load
-// using the register offset addressing mode.
-def IsStoreRegOffsetOp     : CheckOpcode<[STRBBroW, STRBBroX,
-                                          STRHHroW, STRHHroX,
-                                          STRWroW, STRWroX,
-                                          STRXroW, STRXroX,
-                                          STRBroW, STRBroX,
-                                          STRHroW, STRHroX,
-                                          STRSroW, STRSroX,
-                                          STRDroW, STRDroX]>;
-
-// Identify whether an instruction is a load or
-// store using the register offset addressing mode.
-def IsLoadStoreRegOffsetOp : CheckOpcode<!listconcat(IsLoadRegOffsetOp.ValidOpcodes,
-                                                     IsStoreRegOffsetOp.ValidOpcodes)>;
-
 // Target predicates.
 
 // Identify an instruction that effectively transfers a register to another.
@@ -295,9 +350,7 @@
                                                CheckIsRegOperand<2>,
                                                CheckAny<
                                                  [CheckRegOperand<1, WZR>,
-                                                  CheckRegOperand<1, XZR>,
-                                                  CheckRegOperand<2, WZR>,
-                                                  CheckRegOperand<2, XZR>]>,
+                                                  CheckRegOperand<1, XZR>]>,
                                                CheckShiftBy0]>>>],
                                        MCReturnStatement<FalsePred>>>;
 def IsCopyIdiomPred   : MCSchedPredicate<IsCopyIdiomFn>;
diff --git a/lib/Target/AArch64/AArch64SpeculationHardening.cpp b/lib/Target/AArch64/AArch64SpeculationHardening.cpp
new file mode 100644
index 0000000..e9699b0
--- /dev/null
+++ b/lib/Target/AArch64/AArch64SpeculationHardening.cpp
@@ -0,0 +1,641 @@
+//===- AArch64SpeculationHardening.cpp - Harden Against Missspeculation  --===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass to insert code to mitigate against side channel
+// vulnerabilities that may happen under control flow miss-speculation.
+//
+// The pass implements tracking of control flow miss-speculation into a "taint"
+// register. That taint register can then be used to mask off registers with
+// sensitive data when executing under miss-speculation, a.k.a. "transient
+// execution".
+// This pass is aimed at mitigating against SpectreV1-style vulnarabilities.
+//
+// It also implements speculative load hardening, i.e. using the taint register
+// to automatically mask off loaded data.
+//
+// As a possible follow-on improvement, also an intrinsics-based approach as
+// explained at https://lwn.net/Articles/759423/ could be implemented on top of
+// the current design.
+//
+// For AArch64, the following implementation choices are made to implement the
+// tracking of control flow miss-speculation into a taint register:
+// Some of these are different than the implementation choices made in
+// the similar pass implemented in X86SpeculativeLoadHardening.cpp, as
+// the instruction set characteristics result in different trade-offs.
+// - The speculation hardening is done after register allocation. With a
+//   relative abundance of registers, one register is reserved (X16) to be
+//   the taint register. X16 is expected to not clash with other register
+//   reservation mechanisms with very high probability because:
+//   . The AArch64 ABI doesn't guarantee X16 to be retained across any call.
+//   . The only way to request X16 to be used as a programmer is through
+//     inline assembly. In the rare case a function explicitly demands to
+//     use X16/W16, this pass falls back to hardening against speculation
+//     by inserting a DSB SYS/ISB barrier pair which will prevent control
+//     flow speculation.
+// - It is easy to insert mask operations at this late stage as we have
+//   mask operations available that don't set flags.
+// - The taint variable contains all-ones when no miss-speculation is detected,
+//   and contains all-zeros when miss-speculation is detected. Therefore, when
+//   masking, an AND instruction (which only changes the register to be masked,
+//   no other side effects) can easily be inserted anywhere that's needed.
+// - The tracking of miss-speculation is done by using a data-flow conditional
+//   select instruction (CSEL) to evaluate the flags that were also used to
+//   make conditional branch direction decisions. Speculation of the CSEL
+//   instruction can be limited with a CSDB instruction - so the combination of
+//   CSEL + a later CSDB gives the guarantee that the flags as used in the CSEL
+//   aren't speculated. When conditional branch direction gets miss-speculated,
+//   the semantics of the inserted CSEL instruction is such that the taint
+//   register will contain all zero bits.
+//   One key requirement for this to work is that the conditional branch is
+//   followed by an execution of the CSEL instruction, where the CSEL
+//   instruction needs to use the same flags status as the conditional branch.
+//   This means that the conditional branches must not be implemented as one
+//   of the AArch64 conditional branches that do not use the flags as input
+//   (CB(N)Z and TB(N)Z). This is implemented by ensuring in the instruction
+//   selectors to not produce these instructions when speculation hardening
+//   is enabled. This pass will assert if it does encounter such an instruction.
+// - On function call boundaries, the miss-speculation state is transferred from
+//   the taint register X16 to be encoded in the SP register as value 0.
+//
+// For the aspect of automatically hardening loads, using the taint register,
+// (a.k.a. speculative load hardening, see
+//  https://llvm.org/docs/SpeculativeLoadHardening.html), the following
+// implementation choices are made for AArch64:
+//   - Many of the optimizations described at
+//     https://llvm.org/docs/SpeculativeLoadHardening.html to harden fewer
+//     loads haven't been implemented yet - but for some of them there are
+//     FIXMEs in the code.
+//   - loads that load into general purpose (X or W) registers get hardened by
+//     masking the loaded data. For loads that load into other registers, the
+//     address loaded from gets hardened. It is expected that hardening the
+//     loaded data may be more efficient; but masking data in registers other
+//     than X or W is not easy and may result in being slower than just
+//     hardening the X address register loaded from.
+//   - On AArch64, CSDB instructions are inserted between the masking of the
+//     register and its first use, to ensure there's no non-control-flow
+//     speculation that might undermine the hardening mechanism.
+//
+// Future extensions/improvements could be:
+// - Implement this functionality using full speculation barriers, akin to the
+//   x86-slh-lfence option. This may be more useful for the intrinsics-based
+//   approach than for the SLH approach to masking.
+//   Note that this pass already inserts the full speculation barriers if the
+//   function for some niche reason makes use of X16/W16.
+// - no indirect branch misprediction gets protected/instrumented; but this
+//   could be done for some indirect branches, such as switch jump tables.
+//===----------------------------------------------------------------------===//
+
+#include "AArch64InstrInfo.h"
+#include "AArch64Subtarget.h"
+#include "Utils/AArch64BaseInfo.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Target/TargetMachine.h"
+#include <cassert>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-speculation-hardening"
+
+#define AARCH64_SPECULATION_HARDENING_NAME "AArch64 speculation hardening pass"
+
+cl::opt<bool> HardenLoads("aarch64-slh-loads", cl::Hidden,
+                          cl::desc("Sanitize loads from memory."),
+                          cl::init(true));
+
+namespace {
+
+class AArch64SpeculationHardening : public MachineFunctionPass {
+public:
+  const TargetInstrInfo *TII;
+  const TargetRegisterInfo *TRI;
+
+  static char ID;
+
+  AArch64SpeculationHardening() : MachineFunctionPass(ID) {
+    initializeAArch64SpeculationHardeningPass(*PassRegistry::getPassRegistry());
+  }
+
+  bool runOnMachineFunction(MachineFunction &Fn) override;
+
+  StringRef getPassName() const override {
+    return AARCH64_SPECULATION_HARDENING_NAME;
+  }
+
+private:
+  unsigned MisspeculatingTaintReg;
+  unsigned MisspeculatingTaintReg32Bit;
+  bool UseControlFlowSpeculationBarrier;
+  BitVector RegsNeedingCSDBBeforeUse;
+  BitVector RegsAlreadyMasked;
+
+  bool functionUsesHardeningRegister(MachineFunction &MF) const;
+  bool instrumentControlFlow(MachineBasicBlock &MBB);
+  bool endsWithCondControlFlow(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+                               MachineBasicBlock *&FBB,
+                               AArch64CC::CondCode &CondCode) const;
+  void insertTrackingCode(MachineBasicBlock &SplitEdgeBB,
+                          AArch64CC::CondCode &CondCode, DebugLoc DL) const;
+  void insertSPToRegTaintPropagation(MachineBasicBlock *MBB,
+                                     MachineBasicBlock::iterator MBBI) const;
+  void insertRegToSPTaintPropagation(MachineBasicBlock *MBB,
+                                     MachineBasicBlock::iterator MBBI,
+                                     unsigned TmpReg) const;
+
+  bool slhLoads(MachineBasicBlock &MBB);
+  bool makeGPRSpeculationSafe(MachineBasicBlock &MBB,
+                              MachineBasicBlock::iterator MBBI,
+                              MachineInstr &MI, unsigned Reg);
+  bool lowerSpeculationSafeValuePseudos(MachineBasicBlock &MBB);
+  bool expandSpeculationSafeValue(MachineBasicBlock &MBB,
+                                  MachineBasicBlock::iterator MBBI);
+  bool insertCSDB(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+                  DebugLoc DL);
+};
+
+} // end anonymous namespace
+
+char AArch64SpeculationHardening::ID = 0;
+
+INITIALIZE_PASS(AArch64SpeculationHardening, "aarch64-speculation-hardening",
+                AARCH64_SPECULATION_HARDENING_NAME, false, false)
+
+bool AArch64SpeculationHardening::endsWithCondControlFlow(
+    MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
+    AArch64CC::CondCode &CondCode) const {
+  SmallVector<MachineOperand, 1> analyzeBranchCondCode;
+  if (TII->analyzeBranch(MBB, TBB, FBB, analyzeBranchCondCode, false))
+    return false;
+
+  // Ignore if the BB ends in an unconditional branch/fall-through.
+  if (analyzeBranchCondCode.empty())
+    return false;
+
+  // If the BB ends with a single conditional branch, FBB will be set to
+  // nullptr (see API docs for TII->analyzeBranch). For the rest of the
+  // analysis we want the FBB block to be set always.
+  assert(TBB != nullptr);
+  if (FBB == nullptr)
+    FBB = MBB.getFallThrough();
+
+  // If both the true and the false condition jump to the same basic block,
+  // there isn't need for any protection - whether the branch is speculated
+  // correctly or not, we end up executing the architecturally correct code.
+  if (TBB == FBB)
+    return false;
+
+  assert(MBB.succ_size() == 2);
+  // translate analyzeBranchCondCode to CondCode.
+  assert(analyzeBranchCondCode.size() == 1 && "unknown Cond array format");
+  CondCode = AArch64CC::CondCode(analyzeBranchCondCode[0].getImm());
+  return true;
+}
+
+void AArch64SpeculationHardening::insertTrackingCode(
+    MachineBasicBlock &SplitEdgeBB, AArch64CC::CondCode &CondCode,
+    DebugLoc DL) const {
+  if (UseControlFlowSpeculationBarrier) {
+    // insert full control flow speculation barrier (DSB SYS + ISB)
+    BuildMI(SplitEdgeBB, SplitEdgeBB.begin(), DL, TII->get(AArch64::ISB))
+        .addImm(0xf);
+    BuildMI(SplitEdgeBB, SplitEdgeBB.begin(), DL, TII->get(AArch64::DSB))
+        .addImm(0xf);
+  } else {
+    BuildMI(SplitEdgeBB, SplitEdgeBB.begin(), DL, TII->get(AArch64::CSELXr))
+        .addDef(MisspeculatingTaintReg)
+        .addUse(MisspeculatingTaintReg)
+        .addUse(AArch64::XZR)
+        .addImm(CondCode);
+    SplitEdgeBB.addLiveIn(AArch64::NZCV);
+  }
+}
+
+bool AArch64SpeculationHardening::instrumentControlFlow(
+    MachineBasicBlock &MBB) {
+  LLVM_DEBUG(dbgs() << "Instrument control flow tracking on MBB: " << MBB);
+
+  bool Modified = false;
+  MachineBasicBlock *TBB = nullptr;
+  MachineBasicBlock *FBB = nullptr;
+  AArch64CC::CondCode CondCode;
+
+  if (!endsWithCondControlFlow(MBB, TBB, FBB, CondCode)) {
+    LLVM_DEBUG(dbgs() << "... doesn't end with CondControlFlow\n");
+  } else {
+    // Now insert:
+    // "CSEL MisSpeculatingR, MisSpeculatingR, XZR, cond" on the True edge and
+    // "CSEL MisSpeculatingR, MisSpeculatingR, XZR, Invertcond" on the False
+    // edge.
+    AArch64CC::CondCode InvCondCode = AArch64CC::getInvertedCondCode(CondCode);
+
+    MachineBasicBlock *SplitEdgeTBB = MBB.SplitCriticalEdge(TBB, *this);
+    MachineBasicBlock *SplitEdgeFBB = MBB.SplitCriticalEdge(FBB, *this);
+
+    assert(SplitEdgeTBB != nullptr);
+    assert(SplitEdgeFBB != nullptr);
+
+    DebugLoc DL;
+    if (MBB.instr_end() != MBB.instr_begin())
+      DL = (--MBB.instr_end())->getDebugLoc();
+
+    insertTrackingCode(*SplitEdgeTBB, CondCode, DL);
+    insertTrackingCode(*SplitEdgeFBB, InvCondCode, DL);
+
+    LLVM_DEBUG(dbgs() << "SplitEdgeTBB: " << *SplitEdgeTBB << "\n");
+    LLVM_DEBUG(dbgs() << "SplitEdgeFBB: " << *SplitEdgeFBB << "\n");
+    Modified = true;
+  }
+
+  // Perform correct code generation around function calls and before returns.
+  {
+    SmallVector<MachineInstr *, 4> ReturnInstructions;
+    SmallVector<MachineInstr *, 4> CallInstructions;
+
+    for (MachineInstr &MI : MBB) {
+      if (MI.isReturn())
+        ReturnInstructions.push_back(&MI);
+      else if (MI.isCall())
+        CallInstructions.push_back(&MI);
+    }
+
+    Modified |=
+        (ReturnInstructions.size() > 0) || (CallInstructions.size() > 0);
+
+    for (MachineInstr *Return : ReturnInstructions)
+      insertRegToSPTaintPropagation(Return->getParent(), Return, AArch64::X17);
+    for (MachineInstr *Call : CallInstructions) {
+      // Just after the call:
+      MachineBasicBlock::iterator i = Call;
+      i++;
+      insertSPToRegTaintPropagation(Call->getParent(), i);
+      // Just before the call:
+      insertRegToSPTaintPropagation(Call->getParent(), Call, AArch64::X17);
+    }
+  }
+
+  return Modified;
+}
+
+void AArch64SpeculationHardening::insertSPToRegTaintPropagation(
+    MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI) const {
+  // If full control flow speculation barriers are used, emit a control flow
+  // barrier to block potential miss-speculation in flight coming in to this
+  // function.
+  if (UseControlFlowSpeculationBarrier) {
+    // insert full control flow speculation barrier (DSB SYS + ISB)
+    BuildMI(*MBB, MBBI, DebugLoc(), TII->get(AArch64::DSB)).addImm(0xf);
+    BuildMI(*MBB, MBBI, DebugLoc(), TII->get(AArch64::ISB)).addImm(0xf);
+    return;
+  }
+
+  // CMP   SP, #0   === SUBS   xzr, SP, #0
+  BuildMI(*MBB, MBBI, DebugLoc(), TII->get(AArch64::SUBSXri))
+      .addDef(AArch64::XZR)
+      .addUse(AArch64::SP)
+      .addImm(0)
+      .addImm(0); // no shift
+  // CSETM x16, NE  === CSINV  x16, xzr, xzr, EQ
+  BuildMI(*MBB, MBBI, DebugLoc(), TII->get(AArch64::CSINVXr))
+      .addDef(MisspeculatingTaintReg)
+      .addUse(AArch64::XZR)
+      .addUse(AArch64::XZR)
+      .addImm(AArch64CC::EQ);
+}
+
+void AArch64SpeculationHardening::insertRegToSPTaintPropagation(
+    MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
+    unsigned TmpReg) const {
+  // If full control flow speculation barriers are used, there will not be
+  // miss-speculation when returning from this function, and therefore, also
+  // no need to encode potential miss-speculation into the stack pointer.
+  if (UseControlFlowSpeculationBarrier)
+    return;
+
+  // mov   Xtmp, SP  === ADD  Xtmp, SP, #0
+  BuildMI(*MBB, MBBI, DebugLoc(), TII->get(AArch64::ADDXri))
+      .addDef(TmpReg)
+      .addUse(AArch64::SP)
+      .addImm(0)
+      .addImm(0); // no shift
+  // and   Xtmp, Xtmp, TaintReg === AND Xtmp, Xtmp, TaintReg, #0
+  BuildMI(*MBB, MBBI, DebugLoc(), TII->get(AArch64::ANDXrs))
+      .addDef(TmpReg, RegState::Renamable)
+      .addUse(TmpReg, RegState::Kill | RegState::Renamable)
+      .addUse(MisspeculatingTaintReg, RegState::Kill)
+      .addImm(0);
+  // mov   SP, Xtmp === ADD SP, Xtmp, #0
+  BuildMI(*MBB, MBBI, DebugLoc(), TII->get(AArch64::ADDXri))
+      .addDef(AArch64::SP)
+      .addUse(TmpReg, RegState::Kill)
+      .addImm(0)
+      .addImm(0); // no shift
+}
+
+bool AArch64SpeculationHardening::functionUsesHardeningRegister(
+    MachineFunction &MF) const {
+  for (MachineBasicBlock &MBB : MF) {
+    for (MachineInstr &MI : MBB) {
+      // treat function calls specially, as the hardening register does not
+      // need to remain live across function calls.
+      if (MI.isCall())
+        continue;
+      if (MI.readsRegister(MisspeculatingTaintReg, TRI) ||
+          MI.modifiesRegister(MisspeculatingTaintReg, TRI))
+        return true;
+    }
+  }
+  return false;
+}
+
+// Make GPR register Reg speculation-safe by putting it through the
+// SpeculationSafeValue pseudo instruction, if we can't prove that
+// the value in the register has already been hardened.
+bool AArch64SpeculationHardening::makeGPRSpeculationSafe(
+    MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineInstr &MI,
+    unsigned Reg) {
+  assert(AArch64::GPR32allRegClass.contains(Reg) ||
+         AArch64::GPR64allRegClass.contains(Reg));
+
+  // Loads cannot directly load a value into the SP (nor WSP).
+  // Therefore, if Reg is SP or WSP, it is because the instruction loads from
+  // the stack through the stack pointer.
+  //
+  // Since the stack pointer is never dynamically controllable, don't harden it.
+  if (Reg == AArch64::SP || Reg == AArch64::WSP)
+    return false;
+
+  // Do not harden the register again if already hardened before.
+  if (RegsAlreadyMasked[Reg])
+    return false;
+
+  const bool Is64Bit = AArch64::GPR64allRegClass.contains(Reg);
+  LLVM_DEBUG(dbgs() << "About to harden register : " << Reg << "\n");
+  BuildMI(MBB, MBBI, MI.getDebugLoc(),
+          TII->get(Is64Bit ? AArch64::SpeculationSafeValueX
+                           : AArch64::SpeculationSafeValueW))
+      .addDef(Reg)
+      .addUse(Reg);
+  RegsAlreadyMasked.set(Reg);
+  return true;
+}
+
+bool AArch64SpeculationHardening::slhLoads(MachineBasicBlock &MBB) {
+  bool Modified = false;
+
+  LLVM_DEBUG(dbgs() << "slhLoads running on MBB: " << MBB);
+
+  RegsAlreadyMasked.reset();
+
+  MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+  MachineBasicBlock::iterator NextMBBI;
+  for (; MBBI != E; MBBI = NextMBBI) {
+    MachineInstr &MI = *MBBI;
+    NextMBBI = std::next(MBBI);
+    // Only harden loaded values or addresses used in loads.
+    if (!MI.mayLoad())
+      continue;
+
+    LLVM_DEBUG(dbgs() << "About to harden: " << MI);
+
+    // For general purpose register loads, harden the registers loaded into.
+    // For other loads, harden the address loaded from.
+    // Masking the loaded value is expected to result in less performance
+    // overhead, as the load can still execute speculatively in comparison to
+    // when the address loaded from gets masked. However, masking is only
+    // easy to do efficiently on GPR registers, so for loads into non-GPR
+    // registers (e.g. floating point loads), mask the address loaded from.
+    bool AllDefsAreGPR = llvm::all_of(MI.defs(), [&](MachineOperand &Op) {
+      return Op.isReg() && (AArch64::GPR32allRegClass.contains(Op.getReg()) ||
+                            AArch64::GPR64allRegClass.contains(Op.getReg()));
+    });
+    // FIXME: it might be a worthwhile optimization to not mask loaded
+    // values if all the registers involved in address calculation are already
+    // hardened, leading to this load not able to execute on a miss-speculated
+    // path.
+    bool HardenLoadedData = AllDefsAreGPR;
+    bool HardenAddressLoadedFrom = !HardenLoadedData;
+
+    // First remove registers from AlreadyMaskedRegisters if their value is
+    // updated by this instruction - it makes them contain a new value that is
+    // not guaranteed to already have been masked.
+    for (MachineOperand Op : MI.defs())
+      for (MCRegAliasIterator AI(Op.getReg(), TRI, true); AI.isValid(); ++AI)
+        RegsAlreadyMasked.reset(*AI);
+
+    // FIXME: loads from the stack with an immediate offset from the stack
+    // pointer probably shouldn't be hardened, which could result in a
+    // significant optimization. See section "Don’t check loads from
+    // compile-time constant stack offsets", in
+    // https://llvm.org/docs/SpeculativeLoadHardening.html
+
+    if (HardenLoadedData)
+      for (auto Def : MI.defs()) {
+        if (Def.isDead())
+          // Do not mask a register that is not used further.
+          continue;
+        // FIXME: For pre/post-increment addressing modes, the base register
+        // used in address calculation is also defined by this instruction.
+        // It might be a worthwhile optimization to not harden that
+        // base register increment/decrement when the increment/decrement is
+        // an immediate.
+        Modified |= makeGPRSpeculationSafe(MBB, NextMBBI, MI, Def.getReg());
+      }
+
+    if (HardenAddressLoadedFrom)
+      for (auto Use : MI.uses()) {
+        if (!Use.isReg())
+          continue;
+        unsigned Reg = Use.getReg();
+        // Some loads of floating point data have implicit defs/uses on a
+        // super register of that floating point data. Some examples:
+        // $s0 = LDRSui $sp, 22, implicit-def $q0
+        // $q0 = LD1i64 $q0, 1, renamable $x0
+        // We need to filter out these uses for non-GPR register which occur
+        // because the load partially fills a non-GPR register with the loaded
+        // data. Just skipping all non-GPR registers is safe (for now) as all
+        // AArch64 load instructions only use GPR registers to perform the
+        // address calculation. FIXME: However that might change once we can
+        // produce SVE gather instructions.
+        if (!(AArch64::GPR32allRegClass.contains(Reg) ||
+              AArch64::GPR64allRegClass.contains(Reg)))
+          continue;
+        Modified |= makeGPRSpeculationSafe(MBB, MBBI, MI, Reg);
+      }
+  }
+  return Modified;
+}
+
+/// \brief If MBBI references a pseudo instruction that should be expanded
+/// here, do the expansion and return true. Otherwise return false.
+bool AArch64SpeculationHardening::expandSpeculationSafeValue(
+    MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) {
+  MachineInstr &MI = *MBBI;
+  unsigned Opcode = MI.getOpcode();
+  bool Is64Bit = true;
+
+  switch (Opcode) {
+  default:
+    break;
+  case AArch64::SpeculationSafeValueW:
+    Is64Bit = false;
+    LLVM_FALLTHROUGH;
+  case AArch64::SpeculationSafeValueX:
+    // Just remove the SpeculationSafe pseudo's if control flow
+    // miss-speculation isn't happening because we're already inserting barriers
+    // to guarantee that.
+    if (!UseControlFlowSpeculationBarrier) {
+      unsigned DstReg = MI.getOperand(0).getReg();
+      unsigned SrcReg = MI.getOperand(1).getReg();
+      // Mark this register and all its aliasing registers as needing to be
+      // value speculation hardened before its next use, by using a CSDB
+      // barrier instruction.
+      for (MachineOperand Op : MI.defs())
+        for (MCRegAliasIterator AI(Op.getReg(), TRI, true); AI.isValid(); ++AI)
+          RegsNeedingCSDBBeforeUse.set(*AI);
+
+      // Mask off with taint state.
+      BuildMI(MBB, MBBI, MI.getDebugLoc(),
+              Is64Bit ? TII->get(AArch64::ANDXrs) : TII->get(AArch64::ANDWrs))
+          .addDef(DstReg)
+          .addUse(SrcReg, RegState::Kill)
+          .addUse(Is64Bit ? MisspeculatingTaintReg
+                          : MisspeculatingTaintReg32Bit)
+          .addImm(0);
+    }
+    MI.eraseFromParent();
+    return true;
+  }
+  return false;
+}
+
+bool AArch64SpeculationHardening::insertCSDB(MachineBasicBlock &MBB,
+                                             MachineBasicBlock::iterator MBBI,
+                                             DebugLoc DL) {
+  assert(!UseControlFlowSpeculationBarrier && "No need to insert CSDBs when "
+                                              "control flow miss-speculation "
+                                              "is already blocked");
+  // insert data value speculation barrier (CSDB)
+  BuildMI(MBB, MBBI, DL, TII->get(AArch64::HINT)).addImm(0x14);
+  RegsNeedingCSDBBeforeUse.reset();
+  return true;
+}
+
+bool AArch64SpeculationHardening::lowerSpeculationSafeValuePseudos(
+    MachineBasicBlock &MBB) {
+  bool Modified = false;
+
+  RegsNeedingCSDBBeforeUse.reset();
+
+  // The following loop iterates over all instructions in the basic block,
+  // and performs 2 operations:
+  // 1. Insert a CSDB at this location if needed.
+  // 2. Expand the SpeculationSafeValuePseudo if the current instruction is
+  // one.
+  //
+  // The insertion of the CSDB is done as late as possible (i.e. just before
+  // the use of a masked register), in the hope that that will reduce the
+  // total number of CSDBs in a block when there are multiple masked registers
+  // in the block.
+  MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+  DebugLoc DL;
+  while (MBBI != E) {
+    MachineInstr &MI = *MBBI;
+    DL = MI.getDebugLoc();
+    MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+
+    // First check if a CSDB needs to be inserted due to earlier registers
+    // that were masked and that are used by the next instruction.
+    // Also emit the barrier on any potential control flow changes.
+    bool NeedToEmitBarrier = false;
+    if (RegsNeedingCSDBBeforeUse.any() && (MI.isCall() || MI.isTerminator()))
+      NeedToEmitBarrier = true;
+    if (!NeedToEmitBarrier)
+      for (MachineOperand Op : MI.uses())
+        if (Op.isReg() && RegsNeedingCSDBBeforeUse[Op.getReg()]) {
+          NeedToEmitBarrier = true;
+          break;
+        }
+
+    if (NeedToEmitBarrier)
+      Modified |= insertCSDB(MBB, MBBI, DL);
+
+    Modified |= expandSpeculationSafeValue(MBB, MBBI);
+
+    MBBI = NMBBI;
+  }
+
+  if (RegsNeedingCSDBBeforeUse.any())
+    Modified |= insertCSDB(MBB, MBBI, DL);
+
+  return Modified;
+}
+
+bool AArch64SpeculationHardening::runOnMachineFunction(MachineFunction &MF) {
+  if (!MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
+    return false;
+
+  MisspeculatingTaintReg = AArch64::X16;
+  MisspeculatingTaintReg32Bit = AArch64::W16;
+  TII = MF.getSubtarget().getInstrInfo();
+  TRI = MF.getSubtarget().getRegisterInfo();
+  RegsNeedingCSDBBeforeUse.resize(TRI->getNumRegs());
+  RegsAlreadyMasked.resize(TRI->getNumRegs());
+  UseControlFlowSpeculationBarrier = functionUsesHardeningRegister(MF);
+
+  bool Modified = false;
+
+  // Step 1: Enable automatic insertion of SpeculationSafeValue.
+  if (HardenLoads) {
+    LLVM_DEBUG(
+        dbgs() << "***** AArch64SpeculationHardening - automatic insertion of "
+                  "SpeculationSafeValue intrinsics *****\n");
+    for (auto &MBB : MF)
+      Modified |= slhLoads(MBB);
+  }
+
+  // 2.a Add instrumentation code to function entry and exits.
+  LLVM_DEBUG(
+      dbgs()
+      << "***** AArch64SpeculationHardening - track control flow *****\n");
+
+  SmallVector<MachineBasicBlock *, 2> EntryBlocks;
+  EntryBlocks.push_back(&MF.front());
+  for (const LandingPadInfo &LPI : MF.getLandingPads())
+    EntryBlocks.push_back(LPI.LandingPadBlock);
+  for (auto Entry : EntryBlocks)
+    insertSPToRegTaintPropagation(
+        Entry, Entry->SkipPHIsLabelsAndDebug(Entry->begin()));
+
+  // 2.b Add instrumentation code to every basic block.
+  for (auto &MBB : MF)
+    Modified |= instrumentControlFlow(MBB);
+
+  LLVM_DEBUG(dbgs() << "***** AArch64SpeculationHardening - Lowering "
+                       "SpeculationSafeValue Pseudos *****\n");
+  // Step 3: Lower SpeculationSafeValue pseudo instructions.
+  for (auto &MBB : MF)
+    Modified |= lowerSpeculationSafeValuePseudos(MBB);
+
+  return Modified;
+}
+
+/// \brief Returns an instance of the pseudo instruction expansion pass.
+FunctionPass *llvm::createAArch64SpeculationHardeningPass() {
+  return new AArch64SpeculationHardening();
+}
diff --git a/lib/Target/AArch64/AArch64Subtarget.h b/lib/Target/AArch64/AArch64Subtarget.h
index 9b27e0a..82f7bb7 100644
--- a/lib/Target/AArch64/AArch64Subtarget.h
+++ b/lib/Target/AArch64/AArch64Subtarget.h
@@ -126,9 +126,9 @@
   bool HasAlternativeNZCV = false;
   bool HasFRInt3264 = false;
   bool HasSpecRestrict = false;
-  bool HasSpecCtrl = false;
   bool HasSSBS = false;
-  bool HasPredCtrl = false;
+  bool HasSB = false;
+  bool HasPredRes = false;
   bool HasCCDP = false;
   bool HasBTI = false;
   bool HasRandGen = false;
@@ -166,8 +166,9 @@
   bool HasArithmeticCbzFusion = false;
   bool HasFuseAddress = false;
   bool HasFuseAES = false;
-  bool HasFuseCryptoEOR = false;
+  bool HasFuseArithmeticLogic = false;
   bool HasFuseCCSelect = false;
+  bool HasFuseCryptoEOR = false;
   bool HasFuseLiterals = false;
   bool DisableLatencySchedHeuristic = false;
   bool UseRSqrt = false;
@@ -311,14 +312,16 @@
   bool hasArithmeticCbzFusion() const { return HasArithmeticCbzFusion; }
   bool hasFuseAddress() const { return HasFuseAddress; }
   bool hasFuseAES() const { return HasFuseAES; }
-  bool hasFuseCryptoEOR() const { return HasFuseCryptoEOR; }
+  bool hasFuseArithmeticLogic() const { return HasFuseArithmeticLogic; }
   bool hasFuseCCSelect() const { return HasFuseCCSelect; }
+  bool hasFuseCryptoEOR() const { return HasFuseCryptoEOR; }
   bool hasFuseLiterals() const { return HasFuseLiterals; }
 
   /// Return true if the CPU supports any kind of instruction fusion.
   bool hasFusion() const {
     return hasArithmeticBccFusion() || hasArithmeticCbzFusion() ||
-           hasFuseAES() || hasFuseCCSelect() || hasFuseLiterals();
+           hasFuseAES() || hasFuseArithmeticLogic() ||
+           hasFuseCCSelect() || hasFuseLiterals();
   }
 
   bool useRSqrt() const { return UseRSqrt; }
@@ -355,9 +358,9 @@
   bool hasAlternativeNZCV() const { return HasAlternativeNZCV; }
   bool hasFRInt3264() const { return HasFRInt3264; }
   bool hasSpecRestrict() const { return HasSpecRestrict; }
-  bool hasSpecCtrl() const { return HasSpecCtrl; }
   bool hasSSBS() const { return HasSSBS; }
-  bool hasPredCtrl() const { return HasPredCtrl; }
+  bool hasSB() const { return HasSB; }
+  bool hasPredRes() const { return HasPredRes; }
   bool hasCCDP() const { return HasCCDP; }
   bool hasBTI() const { return HasBTI; }
   bool hasRandGen() const { return HasRandGen; }
diff --git a/lib/Target/AArch64/AArch64SystemOperands.td b/lib/Target/AArch64/AArch64SystemOperands.td
index 60d48e4..a804fb1 100644
--- a/lib/Target/AArch64/AArch64SystemOperands.td
+++ b/lib/Target/AArch64/AArch64SystemOperands.td
@@ -501,7 +501,7 @@
   code Requires = [{ {} }];
 }
 
-let Requires = [{ {AArch64::FeaturePredCtrl} }] in {
+let Requires = [{ {AArch64::FeaturePredRes} }] in {
 def : PRCTX<"RCTX", 0b0011>;
 }
 
diff --git a/lib/Target/AArch64/AArch64TargetMachine.cpp b/lib/Target/AArch64/AArch64TargetMachine.cpp
index d5e2470..4e01652 100644
--- a/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -177,6 +177,7 @@
   initializeFalkorHWPFFixPass(*PR);
   initializeFalkorMarkStridedAccessesLegacyPass(*PR);
   initializeLDTLSCleanupPass(*PR);
+  initializeAArch64SpeculationHardeningPass(*PR);
 }
 
 //===----------------------------------------------------------------------===//
@@ -550,12 +551,28 @@
   if (TM->getOptLevel() != CodeGenOpt::None) {
     if (EnableLoadStoreOpt)
       addPass(createAArch64LoadStoreOptimizationPass());
+  }
+
+  // The AArch64SpeculationHardeningPass destroys dominator tree and natural
+  // loop info, which is needed for the FalkorHWPFFixPass and also later on.
+  // Therefore, run the AArch64SpeculationHardeningPass before the
+  // FalkorHWPFFixPass to avoid recomputing dominator tree and natural loop
+  // info.
+  addPass(createAArch64SpeculationHardeningPass());
+
+  if (TM->getOptLevel() != CodeGenOpt::None) {
     if (EnableFalkorHWPFFix)
       addPass(createFalkorHWPFFixPass());
   }
 }
 
 void AArch64PassConfig::addPreEmitPass() {
+  // Machine Block Placement might have created new opportunities when run
+  // at O3, where the Tail Duplication Threshold is set to 4 instructions.
+  // Run the load/store optimizer once more.
+  if (TM->getOptLevel() >= CodeGenOpt::Aggressive && EnableLoadStoreOpt)
+    addPass(createAArch64LoadStoreOptimizationPass());
+
   if (EnableA53Fix835769)
     addPass(createAArch64A53Fix835769());
   // Relax conditional branch instructions if they're otherwise out of
diff --git a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index 2c78818..6cc9b67 100644
--- a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -165,6 +165,7 @@
                       OperandVector &Operands);
 
   bool parseDirectiveArch(SMLoc L);
+  bool parseDirectiveArchExtension(SMLoc L);
   bool parseDirectiveCPU(SMLoc L);
   bool parseDirectiveInst(SMLoc L);
 
@@ -175,6 +176,8 @@
 
   bool parseDirectiveReq(StringRef Name, SMLoc L);
   bool parseDirectiveUnreq(SMLoc L);
+  bool parseDirectiveCFINegateRAState();
+  bool parseDirectiveCFIBKeyFrame();
 
   bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
                            SmallVectorImpl<SMLoc> &Loc);
@@ -2823,12 +2826,13 @@
     {"simd", {AArch64::FeatureNEON}},
     {"ras", {AArch64::FeatureRAS}},
     {"lse", {AArch64::FeatureLSE}},
-    {"predctrl", {AArch64::FeaturePredCtrl}},
+    {"predres", {AArch64::FeaturePredRes}},
     {"ccdp", {AArch64::FeatureCacheDeepPersist}},
     {"mte", {AArch64::FeatureMTE}},
     {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
     {"pan-rwv", {AArch64::FeaturePAN_RWV}},
     {"ccpp", {AArch64::FeatureCCPP}},
+    {"sve", {AArch64::FeatureSVE}},
     // FIXME: Unsupported extensions
     {"pan", {}},
     {"lor", {}},
@@ -5027,6 +5031,12 @@
     parseDirectiveUnreq(Loc);
   else if (IDVal == ".inst")
     parseDirectiveInst(Loc);
+  else if (IDVal == ".cfi_negate_ra_state")
+    parseDirectiveCFINegateRAState();
+  else if (IDVal == ".cfi_b_key_frame")
+    parseDirectiveCFIBKeyFrame();
+  else if (IDVal == ".arch_extension")
+    parseDirectiveArchExtension(Loc);
   else if (IsMachO) {
     if (IDVal == MCLOHDirectiveName())
       parseDirectiveLOH(IDVal, Loc);
@@ -5147,6 +5157,50 @@
   return false;
 }
 
+/// parseDirectiveArchExtension
+///   ::= .arch_extension [no]feature
+bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
+  MCAsmParser &Parser = getParser();
+
+  if (getLexer().isNot(AsmToken::Identifier))
+    return Error(getLexer().getLoc(), "expected architecture extension name");
+
+  const AsmToken &Tok = Parser.getTok();
+  StringRef Name = Tok.getString();
+  SMLoc ExtLoc = Tok.getLoc();
+  Lex();
+
+  if (parseToken(AsmToken::EndOfStatement,
+                 "unexpected token in '.arch_extension' directive"))
+    return true;
+
+  bool EnableFeature = true;
+  if (Name.startswith_lower("no")) {
+    EnableFeature = false;
+    Name = Name.substr(2);
+  }
+
+  MCSubtargetInfo &STI = copySTI();
+  FeatureBitset Features = STI.getFeatureBits();
+  for (const auto &Extension : ExtensionMap) {
+    if (Extension.Name != Name)
+      continue;
+
+    if (Extension.Features.none())
+      return Error(ExtLoc, "unsupported architectural extension: " + Name);
+
+    FeatureBitset ToggleFeatures = EnableFeature
+                                       ? (~Features & Extension.Features)
+                                       : (Features & Extension.Features);
+    uint64_t Features =
+        ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
+    setAvailableFeatures(Features);
+    return false;
+  }
+
+  return Error(ExtLoc, "unknown architectural extension: " + Name);
+}
+
 static SMLoc incrementLoc(SMLoc L, int Offset) {
   return SMLoc::getFromPointer(L.getPointer() + Offset);
 }
@@ -5400,6 +5454,23 @@
   return false;
 }
 
+bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
+  if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
+    return true;
+  getStreamer().EmitCFINegateRAState();
+  return false;
+}
+
+/// parseDirectiveCFIBKeyFrame
+/// ::= .cfi_b_key
+bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
+  if (parseToken(AsmToken::EndOfStatement,
+                 "unexpected token in '.cfi_b_key_frame'"))
+    return true;
+  getStreamer().EmitCFIBKeyFrame();
+  return false;
+}
+
 bool
 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
                                     AArch64MCExpr::VariantKind &ELFRefKind,
@@ -5424,10 +5495,16 @@
   // Check that it looks like a symbol + an addend
   MCValue Res;
   bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
-  if (!Relocatable || !Res.getSymA() || Res.getSymB())
+  if (!Relocatable || Res.getSymB())
     return false;
 
-  DarwinRefKind = Res.getSymA()->getKind();
+  // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
+  // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
+  if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
+    return false;
+
+  if (Res.getSymA())
+    DarwinRefKind = Res.getSymA()->getKind();
   Addend = Res.getConstant();
 
   // It's some symbol reference + a constant addend, but really
diff --git a/lib/Target/AArch64/CMakeLists.txt b/lib/Target/AArch64/CMakeLists.txt
index 9c8c1d0..7778882 100644
--- a/lib/Target/AArch64/CMakeLists.txt
+++ b/lib/Target/AArch64/CMakeLists.txt
@@ -52,6 +52,7 @@
   AArch64RegisterBankInfo.cpp
   AArch64RegisterInfo.cpp
   AArch64SelectionDAGInfo.cpp
+  AArch64SpeculationHardening.cpp
   AArch64StorePairSuppress.cpp
   AArch64Subtarget.cpp
   AArch64TargetMachine.cpp
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index f21d8b9..ed89d99 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -110,11 +110,11 @@
   case FK_Data_1:
     return 1;
 
-  case AArch64::fixup_aarch64_movw:
   case FK_Data_2:
   case FK_SecRel_2:
     return 2;
 
+  case AArch64::fixup_aarch64_movw:
   case AArch64::fixup_aarch64_pcrel_branch14:
   case AArch64::fixup_aarch64_add_imm12:
   case AArch64::fixup_aarch64_ldst_imm12_scale1:
@@ -145,9 +145,9 @@
   return (hi19 << 5) | (lo2 << 29);
 }
 
-static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
-                                 MCContext &Ctx, const Triple &TheTriple,
-                                 bool IsResolved) {
+static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
+                                 uint64_t Value, MCContext &Ctx,
+                                 const Triple &TheTriple, bool IsResolved) {
   unsigned Kind = Fixup.getKind();
   int64_t SignedValue = static_cast<int64_t>(Value);
   switch (Kind) {
@@ -215,10 +215,79 @@
     if (Value & 0xf)
       Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
     return Value >> 4;
-  case AArch64::fixup_aarch64_movw:
-    Ctx.reportError(Fixup.getLoc(),
-                    "no resolvable MOVZ/MOVK fixups supported yet");
+  case AArch64::fixup_aarch64_movw: {
+    AArch64MCExpr::VariantKind RefKind =
+        static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
+    if (AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_ABS &&
+        AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_SABS) {
+      // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
+      // ever be resolved in the assembler.
+      Ctx.reportError(Fixup.getLoc(),
+                      "relocation for a thread-local variable points to an "
+                      "absolute symbol");
+      return Value;
+    }
+
+    if (!IsResolved) {
+      // FIXME: Figure out when this can actually happen, and verify our
+      // behavior.
+      Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet "
+                                      "implemented");
+      return Value;
+    }
+
+    if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
+      switch (AArch64MCExpr::getAddressFrag(RefKind)) {
+      case AArch64MCExpr::VK_G0:
+        break;
+      case AArch64MCExpr::VK_G1:
+        SignedValue = SignedValue >> 16;
+        break;
+      case AArch64MCExpr::VK_G2:
+        SignedValue = SignedValue >> 32;
+        break;
+      case AArch64MCExpr::VK_G3:
+        SignedValue = SignedValue >> 48;
+        break;
+      default:
+        llvm_unreachable("Variant kind doesn't correspond to fixup");
+      }
+
+    } else {
+      switch (AArch64MCExpr::getAddressFrag(RefKind)) {
+      case AArch64MCExpr::VK_G0:
+        break;
+      case AArch64MCExpr::VK_G1:
+        Value = Value >> 16;
+        break;
+      case AArch64MCExpr::VK_G2:
+        Value = Value >> 32;
+        break;
+      case AArch64MCExpr::VK_G3:
+        Value = Value >> 48;
+        break;
+      default:
+        llvm_unreachable("Variant kind doesn't correspond to fixup");
+      }
+    }
+
+    if (RefKind & AArch64MCExpr::VK_NC) {
+      Value &= 0xFFFF;
+    }
+    else if (RefKind & AArch64MCExpr::VK_SABS) {
+      if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
+        Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+
+      // Invert the negative immediate because it will feed into a MOVN.
+      if (SignedValue < 0)
+        SignedValue = ~SignedValue;
+      Value = static_cast<uint64_t>(SignedValue);
+    }
+    else if (Value > 0xFFFF) {
+      Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+    }
     return Value;
+  }
   case AArch64::fixup_aarch64_pcrel_branch14:
     // Signed 16-bit immediate
     if (SignedValue > 32767 || SignedValue < -32768)
@@ -295,8 +364,9 @@
     return; // Doesn't change encoding.
   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
   MCContext &Ctx = Asm.getContext();
+  int64_t SignedValue = static_cast<int64_t>(Value);
   // Apply any target-specific value adjustments.
-  Value = adjustFixupValue(Fixup, Value, Ctx, TheTriple, IsResolved);
+  Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
 
   // Shift the value into position.
   Value <<= Info.TargetOffset;
@@ -323,6 +393,19 @@
       Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
     }
   }
+
+  // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
+  // handle this more cleanly. This may affect the output of -show-mc-encoding.
+  AArch64MCExpr::VariantKind RefKind =
+    static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
+  if (RefKind & AArch64MCExpr::VK_SABS) {
+    // If the immediate is negative, generate MOVN else MOVZ.
+    // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
+    if (SignedValue < 0)
+      Data[Offset + 3] &= ~(1 << 6);
+    else
+      Data[Offset + 3] |= (1 << 6);
+  }
 }
 
 bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst,
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
index 0e486b9..58e4a9c 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
@@ -132,4 +132,7 @@
 
   CommentString = "//";
   ExceptionsType = ExceptionHandling::DwarfCFI;
+  // The default is dwarf, but WinEH can be enabled optionally, which requires
+  // WinEHEncodingType to be set.
+  WinEHEncodingType = WinEH::EncodingType::Itanium;
 }
diff --git a/lib/Target/AArch64/Utils/AArch64BaseInfo.h b/lib/Target/AArch64/Utils/AArch64BaseInfo.h
index b8d0977..44c6a6b 100644
--- a/lib/Target/AArch64/Utils/AArch64BaseInfo.h
+++ b/lib/Target/AArch64/Utils/AArch64BaseInfo.h
@@ -581,6 +581,10 @@
     /// to the symbol is for an import stub.  This is used for DLL import
     /// storage class indication on Windows.
     MO_DLLIMPORT = 0x80,
+
+    /// MO_S - Indicates that the bits of the symbol operand represented by
+    /// MO_G0 etc are signed.
+    MO_S = 0x100,
   };
 } // end namespace AArch64II
 
diff --git a/lib/Target/AMDGPU/AMDGPU.h b/lib/Target/AMDGPU/AMDGPU.h
index d26397a..bb7801c 100644
--- a/lib/Target/AMDGPU/AMDGPU.h
+++ b/lib/Target/AMDGPU/AMDGPU.h
@@ -43,6 +43,7 @@
 FunctionPass *createSIPeepholeSDWAPass();
 FunctionPass *createSILowerI1CopiesPass();
 FunctionPass *createSIFixupVectorISelPass();
+FunctionPass *createSIAddIMGInitPass();
 FunctionPass *createSIShrinkInstructionsPass();
 FunctionPass *createSILoadStoreOptimizerPass();
 FunctionPass *createSIWholeQuadModePass();
@@ -158,6 +159,9 @@
 void initializeAMDGPUUseNativeCallsPass(PassRegistry &);
 extern char &AMDGPUUseNativeCallsID;
 
+void initializeSIAddIMGInitPass(PassRegistry &);
+extern char &SIAddIMGInitID;
+
 void initializeAMDGPUPerfHintAnalysisPass(PassRegistry &);
 extern char &AMDGPUPerfHintAnalysisID;
 
diff --git a/lib/Target/AMDGPU/AMDGPU.td b/lib/Target/AMDGPU/AMDGPU.td
index 0aacedf..6a4cfe0 100644
--- a/lib/Target/AMDGPU/AMDGPU.td
+++ b/lib/Target/AMDGPU/AMDGPU.td
@@ -267,7 +267,13 @@
 def FeatureDLInsts : SubtargetFeature<"dl-insts",
   "HasDLInsts",
   "true",
-  "Has deep learning instructions"
+  "Has v_fmac_f32 and v_xnor_b32 instructions"
+>;
+
+def FeatureDotInsts : SubtargetFeature<"dot-insts",
+  "HasDotInsts",
+  "true",
+  "Has v_dot* instructions"
 >;
 
 def FeatureSRAMECC : SubtargetFeature<"sram-ecc",
@@ -371,6 +377,16 @@
   "Use ds_{read|write}_b128"
 >;
 
+// Sparse texture support requires that all result registers are zeroed when
+// PRTStrictNull is set to true. This feature is turned on for all architectures
+// but is enabled as a feature in case there are situations where PRTStrictNull
+// is disabled by the driver.
+def FeatureEnablePRTStrictNull : SubtargetFeature<"enable-prt-strict-null",
+  "EnablePRTStrictNull",
+  "true",
+  "Enable zeroing of result registers for sparse texture fetches"
+>;
+
 // Unless +-flat-for-global is specified, turn on FlatForGlobal for
 // all OS-es on VI and newer hardware to avoid assertion failures due
 // to missing ADDR64 variants of MUBUF instructions.
@@ -558,6 +574,7 @@
    FeatureFmaMixInsts,
    FeatureLDSBankCount32,
    FeatureDLInsts,
+   FeatureDotInsts,
    FeatureSRAMECC,
    FeatureCodeObjectV3]>;
 
@@ -756,6 +773,9 @@
 def HasDLInsts : Predicate<"Subtarget->hasDLInsts()">,
   AssemblerPredicate<"FeatureDLInsts">;
 
+def HasDotInsts : Predicate<"Subtarget->hasDotInsts()">,
+  AssemblerPredicate<"FeatureDotInsts">;
+
 
 def EnableLateCFGStructurize : Predicate<
   "EnableLateStructurizeCFG">;
diff --git a/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp b/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
index 4e0cc73..f88e3b0 100644
--- a/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
+++ b/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
@@ -117,14 +117,8 @@
 }
 
 void AMDGPUAnnotateUniformValues::visitBranchInst(BranchInst &I) {
-  if (I.isUnconditional())
-    return;
-
-  Value *Cond = I.getCondition();
-  if (!DA->isUniform(Cond))
-    return;
-
-  setUniformMetadata(I.getParent()->getTerminator());
+  if (DA->isUniform(&I))
+    setUniformMetadata(I.getParent()->getTerminator());
 }
 
 void AMDGPUAnnotateUniformValues::visitLoadInst(LoadInst &I) {
diff --git a/lib/Target/AMDGPU/AMDGPUGenRegisterBankInfo.def b/lib/Target/AMDGPU/AMDGPUGenRegisterBankInfo.def
index 3a58c6c..6eab59a 100644
--- a/lib/Target/AMDGPU/AMDGPUGenRegisterBankInfo.def
+++ b/lib/Target/AMDGPU/AMDGPUGenRegisterBankInfo.def
@@ -16,34 +16,38 @@
 
 enum PartialMappingIdx {
   None = - 1,
-  PM_SGPR1  = 0,
-  PM_SGPR16 = 4,
-  PM_SGPR32 = 5,
-  PM_SGPR64 = 6,
-  PM_SGPR128 = 7,
-  PM_SGPR256 = 8,
-  PM_SGPR512 = 9,
-  PM_VGPR1  = 10,
-  PM_VGPR16 = 14,
-  PM_VGPR32 = 15,
-  PM_VGPR64 = 16,
-  PM_VGPR128 = 17,
-  PM_VGPR256 = 18,
-  PM_VGPR512 = 19,
-  PM_SGPR96 = 20,
-  PM_VGPR96 = 21
+  PM_SGPR1  = 2,
+  PM_SGPR16 = 6,
+  PM_SGPR32 = 7,
+  PM_SGPR64 = 8,
+  PM_SGPR128 = 9,
+  PM_SGPR256 = 10,
+  PM_SGPR512 = 11,
+  PM_VGPR1  = 12,
+  PM_VGPR16 = 16,
+  PM_VGPR32 = 17,
+  PM_VGPR64 = 18,
+  PM_VGPR128 = 19,
+  PM_VGPR256 = 20,
+  PM_VGPR512 = 21,
+  PM_SGPR96 = 22,
+  PM_VGPR96 = 23
 };
 
 const RegisterBankInfo::PartialMapping PartMappings[] {
   // StartIdx, Length, RegBank
   {0, 1,  SCCRegBank},
+  {0, 1,  VCCRegBank},
+
+  {0, 1,  SGPRRegBank}, // SGPR begin
   {0, 16, SGPRRegBank},
   {0, 32, SGPRRegBank},
   {0, 64, SGPRRegBank},
   {0, 128, SGPRRegBank},
   {0, 256, SGPRRegBank},
   {0, 512, SGPRRegBank},
-  {0, 1,  SGPRRegBank},
+
+  {0, 1,  VGPRRegBank}, // VGPR begin
   {0, 16, VGPRRegBank},
   {0, 32, VGPRRegBank},
   {0, 64, VGPRRegBank},
@@ -55,33 +59,43 @@
 };
 
 const RegisterBankInfo::ValueMapping ValMappings[] {
+  // SCC
   {&PartMappings[0], 1},
-  {nullptr, 0},
-  {nullptr, 0},
-  {nullptr, 0},
+
+  // VCC
   {&PartMappings[1], 1},
+
+  // SGPRs
   {&PartMappings[2], 1},
+  {nullptr, 0}, // Illegal power of 2 sizes
+  {nullptr, 0},
+  {nullptr, 0},
   {&PartMappings[3], 1},
   {&PartMappings[4], 1},
   {&PartMappings[5], 1},
   {&PartMappings[6], 1},
   {&PartMappings[7], 1},
-  {nullptr, 0},
-  {nullptr, 0},
-  {nullptr, 0},
   {&PartMappings[8], 1},
+
+    // VGPRs
   {&PartMappings[9], 1},
+  {nullptr, 0},
+  {nullptr, 0},
+  {nullptr, 0},
   {&PartMappings[10], 1},
   {&PartMappings[11], 1},
   {&PartMappings[12], 1},
   {&PartMappings[13], 1},
   {&PartMappings[14], 1},
-  {&PartMappings[15], 1}
+  {&PartMappings[15], 1},
+  {&PartMappings[16], 1},
+  {&PartMappings[17], 1}
 };
 
 enum ValueMappingIdx {
-  SGPRStartIdx = 0,
-  VGPRStartIdx = 10
+  SCCStartIdx = 0,
+  SGPRStartIdx = 2,
+  VGPRStartIdx = 12
 };
 
 const RegisterBankInfo::ValueMapping *getValueMapping(unsigned BankID,
@@ -89,16 +103,28 @@
   unsigned Idx;
   switch (Size) {
   case 1:
-    Idx = BankID == AMDGPU::SCCRegBankID ? PM_SGPR1 : PM_VGPR1;
+    if (BankID == AMDGPU::SCCRegBankID)
+      return &ValMappings[0];
+    if (BankID == AMDGPU::VCCRegBankID)
+      return &ValMappings[1];
+
+    // 1-bit values not from a compare etc.
+    Idx = BankID == AMDGPU::SGPRRegBankID ? PM_SGPR1 : PM_VGPR1;
     break;
   case 96:
+    assert(BankID != AMDGPU::VCCRegBankID);
     Idx = BankID == AMDGPU::SGPRRegBankID ? PM_SGPR96 : PM_VGPR96;
     break;
   default:
+    assert(BankID != AMDGPU::VCCRegBankID);
     Idx = BankID == AMDGPU::VGPRRegBankID ? VGPRStartIdx : SGPRStartIdx;
     Idx += Log2_32_Ceil(Size);
     break;
   }
+
+  assert(Log2_32_Ceil(Size) == Log2_32_Ceil(ValMappings[Idx].BreakDown->Length));
+  assert(BankID == ValMappings[Idx].BreakDown->RegBank->getID());
+
   return &ValMappings[Idx];
 }
 
diff --git a/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 025e2de..a0a045e 100644
--- a/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -1454,9 +1454,13 @@
     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
 
     // (add n0, c0)
-    Base = N0;
-    Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32);
-    return true;
+    // Don't peel off the offset (c0) if doing so could possibly lead
+    // the base (n0) to be negative.
+    if (C1->getSExtValue() <= 0 || CurDAG->SignBitIsZero(N0)) {
+      Base = N0;
+      Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32);
+      return true;
+    }
   }
 
   if (isa<ConstantSDNode>(Index))
diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index f5894c9..6951c91 100644
--- a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -128,10 +128,8 @@
 }
 
 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) {
-  KnownBits Known;
   EVT VT = Op.getValueType();
-  DAG.computeKnownBits(Op, Known);
-
+  KnownBits Known = DAG.computeKnownBits(Op);
   return VT.getSizeInBits() - Known.countMinLeadingZeros();
 }
 
@@ -2719,21 +2717,33 @@
     AMDGPUTargetLowering::numBitsSigned(Op, DAG) < 24;
 }
 
-static bool simplifyI24(SDNode *Node24, unsigned OpIdx,
-                        TargetLowering::DAGCombinerInfo &DCI) {
-
+static SDValue simplifyI24(SDNode *Node24,
+                           TargetLowering::DAGCombinerInfo &DCI) {
   SelectionDAG &DAG = DCI.DAG;
-  SDValue Op = Node24->getOperand(OpIdx);
+  SDValue LHS = Node24->getOperand(0);
+  SDValue RHS = Node24->getOperand(1);
+
+  APInt Demanded = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 24);
+
+  // First try to simplify using GetDemandedBits which allows the operands to
+  // have other uses, but will only perform simplifications that involve
+  // bypassing some nodes for this user.
+  SDValue DemandedLHS = DAG.GetDemandedBits(LHS, Demanded);
+  SDValue DemandedRHS = DAG.GetDemandedBits(RHS, Demanded);
+  if (DemandedLHS || DemandedRHS)
+    return DAG.getNode(Node24->getOpcode(), SDLoc(Node24), Node24->getVTList(),
+                       DemandedLHS ? DemandedLHS : LHS,
+                       DemandedRHS ? DemandedRHS : RHS);
+
+  // Now try SimplifyDemandedBits which can simplify the nodes used by our
+  // operands if this node is the only user.
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-  EVT VT = Op.getValueType();
+  if (TLI.SimplifyDemandedBits(LHS, Demanded, DCI))
+    return SDValue(Node24, 0);
+  if (TLI.SimplifyDemandedBits(RHS, Demanded, DCI))
+    return SDValue(Node24, 0);
 
-  APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
-  APInt KnownZero, KnownOne;
-  TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
-  if (TLI.SimplifyDemandedBits(Node24, OpIdx, Demanded, DCI, TLO))
-    return true;
-
-  return false;
+  return SDValue();
 }
 
 template <typename IntTy>
@@ -2970,8 +2980,7 @@
     // shl (ext x) => zext (shl x), if shift does not overflow int
     if (VT != MVT::i64)
       break;
-    KnownBits Known;
-    DAG.computeKnownBits(X, Known);
+    KnownBits Known = DAG.computeKnownBits(X);
     unsigned LZ = Known.countMinLeadingZeros();
     if (LZ < RHSVal)
       break;
@@ -3130,8 +3139,7 @@
          Src.getOpcode() == ISD::SRA ||
          Src.getOpcode() == ISD::SHL)) {
       SDValue Amt = Src.getOperand(1);
-      KnownBits Known;
-      DAG.computeKnownBits(Amt, Known);
+      KnownBits Known = DAG.computeKnownBits(Amt);
       unsigned Size = VT.getScalarSizeInBits();
       if ((Known.isConstant() && Known.getConstant().ule(Size)) ||
           (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size))) {
@@ -3283,8 +3291,8 @@
   SelectionDAG &DAG = DCI.DAG;
 
   // Simplify demanded bits before splitting into multiple users.
-  if (simplifyI24(N, 0, DCI) || simplifyI24(N, 1, DCI))
-    return SDValue();
+  if (SDValue V = simplifyI24(N, DCI))
+    return V;
 
   SDValue N0 = N->getOperand(0);
   SDValue N1 = N->getOperand(1);
@@ -3870,9 +3878,8 @@
   case AMDGPUISD::MUL_U24:
   case AMDGPUISD::MULHI_I24:
   case AMDGPUISD::MULHI_U24: {
-    // If the first call to simplify is successfull, then N may end up being
-    // deleted, so we shouldn't call simplifyI24 again.
-    simplifyI24(N, 0, DCI) || simplifyI24(N, 1, DCI);
+    if (SDValue V = simplifyI24(N, DCI))
+      return V;
     return SDValue();
   }
   case AMDGPUISD::MUL_LOHI_I24:
@@ -4294,33 +4301,42 @@
   }
   case AMDGPUISD::MUL_U24:
   case AMDGPUISD::MUL_I24: {
-    KnownBits LHSKnown, RHSKnown;
-    DAG.computeKnownBits(Op.getOperand(0), LHSKnown, Depth + 1);
-    DAG.computeKnownBits(Op.getOperand(1), RHSKnown, Depth + 1);
-
+    KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
+    KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
     unsigned TrailZ = LHSKnown.countMinTrailingZeros() +
                       RHSKnown.countMinTrailingZeros();
     Known.Zero.setLowBits(std::min(TrailZ, 32u));
 
-    unsigned LHSValBits = 32 - std::max(LHSKnown.countMinSignBits(), 8u);
-    unsigned RHSValBits = 32 - std::max(RHSKnown.countMinSignBits(), 8u);
-    unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
-    if (MaxValBits >= 32)
-      break;
+    // Truncate to 24 bits.
+    LHSKnown = LHSKnown.trunc(24);
+    RHSKnown = RHSKnown.trunc(24);
+
     bool Negative = false;
     if (Opc == AMDGPUISD::MUL_I24) {
-      bool LHSNegative = !!(LHSKnown.One  & (1 << 23));
-      bool LHSPositive = !!(LHSKnown.Zero & (1 << 23));
-      bool RHSNegative = !!(RHSKnown.One  & (1 << 23));
-      bool RHSPositive = !!(RHSKnown.Zero & (1 << 23));
+      unsigned LHSValBits = 24 - LHSKnown.countMinSignBits();
+      unsigned RHSValBits = 24 - RHSKnown.countMinSignBits();
+      unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
+      if (MaxValBits >= 32)
+        break;
+      bool LHSNegative = LHSKnown.isNegative();
+      bool LHSPositive = LHSKnown.isNonNegative();
+      bool RHSNegative = RHSKnown.isNegative();
+      bool RHSPositive = RHSKnown.isNonNegative();
       if ((!LHSNegative && !LHSPositive) || (!RHSNegative && !RHSPositive))
         break;
       Negative = (LHSNegative && RHSPositive) || (LHSPositive && RHSNegative);
-    }
-    if (Negative)
-      Known.One.setHighBits(32 - MaxValBits);
-    else
+      if (Negative)
+        Known.One.setHighBits(32 - MaxValBits);
+      else
+        Known.Zero.setHighBits(32 - MaxValBits);
+    } else {
+      unsigned LHSValBits = 24 - LHSKnown.countMinLeadingZeros();
+      unsigned RHSValBits = 24 - RHSKnown.countMinLeadingZeros();
+      unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
+      if (MaxValBits >= 32)
+        break;
       Known.Zero.setHighBits(32 - MaxValBits);
+    }
     break;
   }
   case AMDGPUISD::PERM: {
@@ -4328,9 +4344,8 @@
     if (!CMask)
       return;
 
-    KnownBits LHSKnown, RHSKnown;
-    DAG.computeKnownBits(Op.getOperand(0), LHSKnown, Depth + 1);
-    DAG.computeKnownBits(Op.getOperand(1), RHSKnown, Depth + 1);
+    KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
+    KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
     unsigned Sel = CMask->getZExtValue();
 
     for (unsigned I = 0; I < 32; I += 8) {
diff --git a/lib/Target/AMDGPU/AMDGPUInstructions.td b/lib/Target/AMDGPU/AMDGPUInstructions.td
index 282d1c1..eb8f200 100644
--- a/lib/Target/AMDGPU/AMDGPUInstructions.td
+++ b/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -842,6 +842,7 @@
   [{ (void)N; return TM.Options.NoNaNsFPMath; }]
 >;
 
+let AddedComplexity = 2 in {
 class IMad24Pat<Instruction Inst, bit HasClamp = 0> : AMDGPUPat <
   (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2),
   !if(HasClamp, (Inst $src0, $src1, $src2, (i1 0)),
@@ -853,6 +854,7 @@
   !if(HasClamp, (Inst $src0, $src1, $src2, (i1 0)),
                 (Inst $src0, $src1, $src2))
 >;
+} // AddedComplexity.
 
 class RcpPat<Instruction RcpInst, ValueType vt> : AMDGPUPat <
   (fdiv FP_ONE, vt:$src),
diff --git a/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 0e09593..ef85c10 100644
--- a/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -33,12 +33,44 @@
   };
 
   const LLT S1 = LLT::scalar(1);
-  const LLT V2S16 = LLT::vector(2, 16);
-
   const LLT S32 = LLT::scalar(32);
   const LLT S64 = LLT::scalar(64);
   const LLT S512 = LLT::scalar(512);
 
+  const LLT V2S16 = LLT::vector(2, 16);
+  const LLT V4S16 = LLT::vector(4, 16);
+  const LLT V8S16 = LLT::vector(8, 16);
+
+  const LLT V2S32 = LLT::vector(2, 32);
+  const LLT V3S32 = LLT::vector(3, 32);
+  const LLT V4S32 = LLT::vector(4, 32);
+  const LLT V5S32 = LLT::vector(5, 32);
+  const LLT V6S32 = LLT::vector(6, 32);
+  const LLT V7S32 = LLT::vector(7, 32);
+  const LLT V8S32 = LLT::vector(8, 32);
+  const LLT V9S32 = LLT::vector(9, 32);
+  const LLT V10S32 = LLT::vector(10, 32);
+  const LLT V11S32 = LLT::vector(11, 32);
+  const LLT V12S32 = LLT::vector(12, 32);
+  const LLT V13S32 = LLT::vector(13, 32);
+  const LLT V14S32 = LLT::vector(14, 32);
+  const LLT V15S32 = LLT::vector(15, 32);
+  const LLT V16S32 = LLT::vector(16, 32);
+
+  const LLT V2S64 = LLT::vector(2, 64);
+  const LLT V3S64 = LLT::vector(3, 64);
+  const LLT V4S64 = LLT::vector(4, 64);
+  const LLT V5S64 = LLT::vector(5, 64);
+  const LLT V6S64 = LLT::vector(6, 64);
+  const LLT V7S64 = LLT::vector(7, 64);
+  const LLT V8S64 = LLT::vector(8, 64);
+
+  std::initializer_list<LLT> AllS32Vectors =
+    {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
+     V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32};
+  std::initializer_list<LLT> AllS64Vectors =
+    {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64};
+
   const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
   const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
   const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS);
@@ -55,13 +87,20 @@
     PrivatePtr
   };
 
+  setAction({G_BRCOND, S1}, Legal);
+
   setAction({G_ADD, S32}, Legal);
   setAction({G_ASHR, S32}, Legal);
   setAction({G_SUB, S32}, Legal);
   setAction({G_MUL, S32}, Legal);
-  setAction({G_AND, S32}, Legal);
-  setAction({G_OR, S32}, Legal);
-  setAction({G_XOR, S32}, Legal);
+
+  // FIXME: 64-bit ones only legal for scalar
+  getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
+    .legalFor({S32, S1, S64, V2S32});
+
+  getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO,
+                               G_UADDE, G_SADDE, G_USUBE, G_SSUBE})
+    .legalFor({{S32, S1}});
 
   setAction({G_BITCAST, V2S16}, Legal);
   setAction({G_BITCAST, 1, S32}, Legal);
@@ -90,10 +129,21 @@
   // between these two scenarios.
   setAction({G_CONSTANT, S1}, Legal);
 
+  setAction({G_FRAME_INDEX, PrivatePtr}, Legal);
+
   getActionDefinitionsBuilder(
-    { G_FADD, G_FMUL })
+    { G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA})
     .legalFor({S32, S64});
 
+  getActionDefinitionsBuilder(G_FPTRUNC)
+    .legalFor({{S32, S64}});
+
+  // Use actual fsub instruction
+  setAction({G_FSUB, S32}, Legal);
+
+  // Must use fadd + fneg
+  setAction({G_FSUB, S64}, Lower);
+
   setAction({G_FCMP, S1}, Legal);
   setAction({G_FCMP, 1, S32}, Legal);
   setAction({G_FCMP, 1, S64}, Legal);
@@ -113,9 +163,19 @@
   setAction({G_SITOFP, S32}, Legal);
   setAction({G_SITOFP, 1, S32}, Legal);
 
+  setAction({G_UITOFP, S32}, Legal);
+  setAction({G_UITOFP, 1, S32}, Legal);
+
   setAction({G_FPTOUI, S32}, Legal);
   setAction({G_FPTOUI, 1, S32}, Legal);
 
+  setAction({G_FPOW, S32}, Legal);
+  setAction({G_FEXP2, S32}, Legal);
+  setAction({G_FLOG2, S32}, Legal);
+
+  getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND})
+    .legalFor({S32, S64});
+
   for (LLT PtrTy : AddrSpaces) {
     LLT IdxTy = LLT::scalar(PtrTy.getSizeInBits());
     setAction({G_GEP, PtrTy}, Legal);
@@ -169,6 +229,16 @@
       });
 
 
+  auto &Atomics = getActionDefinitionsBuilder(
+    {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB,
+     G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR,
+     G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX,
+     G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG})
+    .legalFor({{S32, GlobalPtr}, {S32, LocalPtr},
+               {S64, GlobalPtr}, {S64, LocalPtr}});
+  if (ST.hasFlatAddressSpace()) {
+    Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
+  }
 
   setAction({G_SELECT, S32}, Legal);
   setAction({G_SELECT, 1, S1}, Legal);
@@ -205,13 +275,22 @@
       });
 
   getActionDefinitionsBuilder(G_BUILD_VECTOR)
-      .legalIf([=](const LegalityQuery &Query) {
-        const LLT &VecTy = Query.Types[0];
-        const LLT &ScalarTy = Query.Types[1];
-        return VecTy.getSizeInBits() % 32 == 0 &&
-               ScalarTy.getSizeInBits() % 32 == 0 &&
-               VecTy.getSizeInBits() <= 512;
-      });
+    .legalForCartesianProduct(AllS32Vectors, {S32})
+    .legalForCartesianProduct(AllS64Vectors, {S64})
+    .clampNumElements(0, V16S32, V16S32)
+    .clampNumElements(0, V2S64, V8S64)
+    .minScalarSameAs(1, 0);
+
+  // TODO: Support any combination of v2s32
+  getActionDefinitionsBuilder(G_CONCAT_VECTORS)
+    .legalFor({{V4S32, V2S32},
+               {V8S32, V2S32},
+               {V8S32, V4S32},
+               {V4S64, V2S64},
+               {V4S16, V2S16},
+               {V8S16, V2S16},
+               {V8S16, V4S16}});
+
   // Merge/Unmerge
   for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
     unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
diff --git a/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 28bb522..7a760dc 100644
--- a/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -74,13 +74,16 @@
                                           const RegisterBank &Src,
                                           unsigned Size) const {
   if (Dst.getID() == AMDGPU::SGPRRegBankID &&
-      Src.getID() == AMDGPU::VGPRRegBankID)
+      Src.getID() == AMDGPU::VGPRRegBankID) {
     return std::numeric_limits<unsigned>::max();
+  }
 
   // SGPRRegBank with size 1 is actually vcc or another 64-bit sgpr written by
   // the valu.
   if (Size == 1 && Dst.getID() == AMDGPU::SCCRegBankID &&
-      Src.getID() == AMDGPU::SGPRRegBankID)
+      (Src.getID() == AMDGPU::SGPRRegBankID ||
+       Src.getID() == AMDGPU::VGPRRegBankID ||
+       Src.getID() == AMDGPU::VCCRegBankID))
     return std::numeric_limits<unsigned>::max();
 
   return RegisterBankInfo::copyCost(Dst, Src, Size);
@@ -145,7 +148,7 @@
     AltMappings.push_back(&SSMapping);
 
     const InstructionMapping &SVMapping = getInstructionMapping(2, 1,
-      getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 1),
+      getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1),
                           nullptr, // Predicate operand.
                           AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
                           AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size)}),
@@ -153,7 +156,7 @@
     AltMappings.push_back(&SVMapping);
 
     const InstructionMapping &VSMapping = getInstructionMapping(3, 1,
-      getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 1),
+      getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1),
                           nullptr, // Predicate operand.
                           AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
                           AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size)}),
@@ -161,7 +164,7 @@
     AltMappings.push_back(&VSMapping);
 
     const InstructionMapping &VVMapping = getInstructionMapping(4, 1,
-      getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 1),
+      getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1),
                           nullptr, // Predicate operand.
                           AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
                           AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size)}),
@@ -180,25 +183,9 @@
       4); // Num Operands
     AltMappings.push_back(&SSMapping);
 
-    const InstructionMapping &SVMapping = getInstructionMapping(2, 1,
-      getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
-                          AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 1),
-                          AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
-                          AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size)}),
-      4); // Num Operands
-    AltMappings.push_back(&SVMapping);
-
-    const InstructionMapping &VSMapping = getInstructionMapping(2, 1,
-      getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
-                          AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 1),
-                          AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
-                          AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size)}),
-      4); // Num Operands
-    AltMappings.push_back(&VSMapping);
-
     const InstructionMapping &VVMapping = getInstructionMapping(2, 1,
       getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
-                          AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 1),
+                          AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1),
                           AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
                           AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size)}),
       4); // Num Operands
@@ -206,6 +193,47 @@
 
     return AltMappings;
   }
+  case TargetOpcode::G_UADDE:
+  case TargetOpcode::G_USUBE:
+  case TargetOpcode::G_SADDE:
+  case TargetOpcode::G_SSUBE: {
+    unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
+    const InstructionMapping &SSMapping = getInstructionMapping(1, 1,
+      getOperandsMapping(
+        {AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
+         AMDGPU::getValueMapping(AMDGPU::SCCRegBankID, 1),
+         AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
+         AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
+         AMDGPU::getValueMapping(AMDGPU::SCCRegBankID, 1)}),
+      5); // Num Operands
+    AltMappings.push_back(&SSMapping);
+
+    const InstructionMapping &VVMapping = getInstructionMapping(2, 1,
+      getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
+                          AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1),
+                          AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
+                          AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
+                          AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1)}),
+      5); // Num Operands
+    AltMappings.push_back(&VVMapping);
+    return AltMappings;
+  }
+  case AMDGPU::G_BRCOND: {
+    assert(MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() == 1);
+
+    const InstructionMapping &SMapping = getInstructionMapping(
+      1, 1, getOperandsMapping(
+        {AMDGPU::getValueMapping(AMDGPU::SCCRegBankID, 1), nullptr}),
+      2); // Num Operands
+    AltMappings.push_back(&SMapping);
+
+    const InstructionMapping &VMapping = getInstructionMapping(
+      1, 1, getOperandsMapping(
+        {AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1), nullptr }),
+      2); // Num Operands
+    AltMappings.push_back(&VMapping);
+    return AltMappings;
+  }
   default:
     break;
   }
@@ -232,9 +260,13 @@
     if (!MI.getOperand(i).isReg())
       continue;
     unsigned Reg = MI.getOperand(i).getReg();
-    const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI);
-    if (Bank && Bank->getID() != AMDGPU::SGPRRegBankID)
-      return false;
+    if (const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI)) {
+      if (Bank->getID() == AMDGPU::VGPRRegBankID)
+        return false;
+
+      assert(Bank->getID() == AMDGPU::SGPRRegBankID ||
+             Bank->getID() == AMDGPU::SCCRegBankID);
+    }
   }
   return true;
 }
@@ -247,7 +279,8 @@
 
   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
     unsigned Size = getSizeInBits(MI.getOperand(i).getReg(), MRI, *TRI);
-    OpdsMapping[i] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
+    unsigned BankID = Size == 1 ? AMDGPU::SCCRegBankID : AMDGPU::SGPRRegBankID;
+    OpdsMapping[i] = AMDGPU::getValueMapping(BankID, Size);
   }
   return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping),
                                MI.getNumOperands());
@@ -268,12 +301,32 @@
 
   unsigned Reg1 = MI.getOperand(OpdIdx).getReg();
   unsigned Size1 = getSizeInBits(Reg1, MRI, *TRI);
-  unsigned Bank1 = getRegBankID(Reg1, MRI, *TRI);
+
+  unsigned DefaultBankID = Size1 == 1 ?
+    AMDGPU::VCCRegBankID : AMDGPU::VGPRRegBankID;
+  unsigned Bank1 = getRegBankID(Reg1, MRI, *TRI, DefaultBankID);
+
   OpdsMapping[OpdIdx++] = AMDGPU::getValueMapping(Bank1, Size1);
 
   for (unsigned e = MI.getNumOperands(); OpdIdx != e; ++OpdIdx) {
     unsigned Size = getSizeInBits(MI.getOperand(OpdIdx).getReg(), MRI, *TRI);
-    OpdsMapping[OpdIdx] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
+    unsigned BankID = Size == 1 ? AMDGPU::VCCRegBankID : AMDGPU::VGPRRegBankID;
+    OpdsMapping[OpdIdx] = AMDGPU::getValueMapping(BankID, Size);
+  }
+
+  return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping),
+                               MI.getNumOperands());
+}
+
+const RegisterBankInfo::InstructionMapping &
+AMDGPURegisterBankInfo::getDefaultMappingAllVGPR(const MachineInstr &MI) const {
+  const MachineFunction &MF = *MI.getParent()->getParent();
+  const MachineRegisterInfo &MRI = MF.getRegInfo();
+  SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands());
+
+  for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
+    unsigned Size = getSizeInBits(MI.getOperand(I).getReg(), MRI, *TRI);
+    OpdsMapping[I] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
   }
 
   return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping),
@@ -342,21 +395,49 @@
   switch (MI.getOpcode()) {
   default:
     return getInvalidInstructionMapping();
+
+  case AMDGPU::G_AND:
+  case AMDGPU::G_OR:
+  case AMDGPU::G_XOR: {
+    unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
+    if (Size == 1) {
+      OpdsMapping[0] = OpdsMapping[1] =
+        OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
+      break;
+    }
+
+    LLVM_FALLTHROUGH;
+  }
+
   case AMDGPU::G_ADD:
   case AMDGPU::G_SUB:
   case AMDGPU::G_MUL:
-  case AMDGPU::G_AND:
-  case AMDGPU::G_OR:
-  case AMDGPU::G_XOR:
   case AMDGPU::G_SHL:
+  case AMDGPU::G_UADDO:
+  case AMDGPU::G_SADDO:
+  case AMDGPU::G_USUBO:
+  case AMDGPU::G_SSUBO:
+  case AMDGPU::G_UADDE:
+  case AMDGPU::G_SADDE:
+  case AMDGPU::G_USUBE:
+  case AMDGPU::G_SSUBE:
     if (isSALUMapping(MI))
       return getDefaultMappingSOP(MI);
     LLVM_FALLTHROUGH;
 
   case AMDGPU::G_FADD:
+  case AMDGPU::G_FSUB:
   case AMDGPU::G_FPTOSI:
   case AMDGPU::G_FPTOUI:
   case AMDGPU::G_FMUL:
+  case AMDGPU::G_FMA:
+  case AMDGPU::G_SITOFP:
+  case AMDGPU::G_UITOFP:
+  case AMDGPU::G_FPTRUNC:
+  case AMDGPU::G_FEXP2:
+  case AMDGPU::G_FLOG2:
+  case AMDGPU::G_INTRINSIC_TRUNC:
+  case AMDGPU::G_INTRINSIC_ROUND:
     return getDefaultMappingVOP(MI);
   case AMDGPU::G_IMPLICIT_DEF: {
     unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
@@ -365,6 +446,7 @@
   }
   case AMDGPU::G_FCONSTANT:
   case AMDGPU::G_CONSTANT:
+  case AMDGPU::G_FRAME_INDEX:
   case AMDGPU::G_BLOCK_ADDR: {
     unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
     OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
@@ -411,7 +493,9 @@
   case AMDGPU::G_CTTZ:
   case AMDGPU::G_CTTZ_ZERO_UNDEF:
   case AMDGPU::G_CTPOP:
-  case AMDGPU::G_BSWAP: {
+  case AMDGPU::G_BSWAP:
+  case AMDGPU::G_FABS:
+  case AMDGPU::G_FNEG: {
     unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
     unsigned BankID = getRegBankID(MI.getOperand(1).getReg(), MRI, *TRI);
     OpdsMapping[0] = OpdsMapping[1] = AMDGPU::getValueMapping(BankID, Size);
@@ -452,7 +536,7 @@
   case AMDGPU::G_FCMP: {
     unsigned Size = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
     unsigned Op2Bank = getRegBankID(MI.getOperand(2).getReg(), MRI, *TRI);
-    OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 1);
+    OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1);
     OpdsMapping[1] = nullptr; // Predicate Operand.
     OpdsMapping[2] = AMDGPU::getValueMapping(Op2Bank, Size);
     OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
@@ -492,7 +576,7 @@
     unsigned Op3Bank = getRegBankID(MI.getOperand(3).getReg(), MRI, *TRI);
     unsigned Op0Bank = Op2Bank == AMDGPU::SGPRRegBankID &&
                        Op3Bank == AMDGPU::SGPRRegBankID ?
-                       AMDGPU::SCCRegBankID : AMDGPU::VGPRRegBankID;
+                       AMDGPU::SCCRegBankID : AMDGPU::VCCRegBankID;
     OpdsMapping[0] = AMDGPU::getValueMapping(Op0Bank, 1);
     OpdsMapping[1] = nullptr; // Predicate Operand.
     OpdsMapping[2] = AMDGPU::getValueMapping(Op2Bank, Size);
@@ -540,6 +624,18 @@
 
     break;
   }
+  case AMDGPU::G_UNMERGE_VALUES: {
+    unsigned Bank = isSALUMapping(MI) ? AMDGPU::SGPRRegBankID :
+      AMDGPU::VGPRRegBankID;
+
+    // Op1 and Dst should use the same register bank.
+    // FIXME: Shouldn't this be the default? Why do we need to handle this?
+    for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+      unsigned Size = getSizeInBits(MI.getOperand(i).getReg(), MRI, *TRI);
+      OpdsMapping[i] = AMDGPU::getValueMapping(Bank, Size);
+    }
+    break;
+  }
   case AMDGPU::G_INTRINSIC: {
     switch (MI.getOperand(1).getIntrinsicID()) {
     default:
@@ -553,6 +649,12 @@
       OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
       break;
     }
+    case Intrinsic::amdgcn_wqm_vote: {
+      unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
+      OpdsMapping[0] = OpdsMapping[2]
+        = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
+      break;
+    }
     }
     break;
   }
@@ -599,7 +701,7 @@
                     Op2Bank == AMDGPU::SGPRRegBankID &&
                     Op3Bank == AMDGPU::SGPRRegBankID;
     unsigned Bank = SGPRSrcs ? AMDGPU::SGPRRegBankID : AMDGPU::VGPRRegBankID;
-    Op1Bank = SGPRSrcs ? AMDGPU::SCCRegBankID : AMDGPU::SGPRRegBankID;
+    Op1Bank = SGPRSrcs ? AMDGPU::SCCRegBankID : AMDGPU::VCCRegBankID;
     OpdsMapping[0] = AMDGPU::getValueMapping(Bank, Size);
     OpdsMapping[1] = AMDGPU::getValueMapping(Op1Bank, 1);
     OpdsMapping[2] = AMDGPU::getValueMapping(Bank, Size);
@@ -609,6 +711,30 @@
 
   case AMDGPU::G_LOAD:
     return getInstrMappingForLoad(MI);
+
+  case AMDGPU::G_ATOMICRMW_XCHG:
+  case AMDGPU::G_ATOMICRMW_ADD:
+  case AMDGPU::G_ATOMICRMW_SUB:
+  case AMDGPU::G_ATOMICRMW_AND:
+  case AMDGPU::G_ATOMICRMW_OR:
+  case AMDGPU::G_ATOMICRMW_XOR:
+  case AMDGPU::G_ATOMICRMW_MAX:
+  case AMDGPU::G_ATOMICRMW_MIN:
+  case AMDGPU::G_ATOMICRMW_UMAX:
+  case AMDGPU::G_ATOMICRMW_UMIN:
+  case AMDGPU::G_ATOMIC_CMPXCHG: {
+    return getDefaultMappingAllVGPR(MI);
+  }
+  case AMDGPU::G_BRCOND: {
+    unsigned Bank = getRegBankID(MI.getOperand(0).getReg(), MRI, *TRI,
+                                 AMDGPU::SGPRRegBankID);
+    assert(MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() == 1);
+    if (Bank != AMDGPU::SCCRegBankID)
+      Bank = AMDGPU::VCCRegBankID;
+
+    OpdsMapping[0] = AMDGPU::getValueMapping(Bank, 1);
+    break;
+  }
   }
 
   return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping),
diff --git a/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h b/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
index d48a665..d29f4bc 100644
--- a/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
+++ b/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
@@ -49,6 +49,8 @@
   bool isSALUMapping(const MachineInstr &MI) const;
   const InstructionMapping &getDefaultMappingSOP(const MachineInstr &MI) const;
   const InstructionMapping &getDefaultMappingVOP(const MachineInstr &MI) const;
+  const InstructionMapping &getDefaultMappingAllVGPR(
+    const MachineInstr &MI) const;
 public:
   AMDGPURegisterBankInfo(const TargetRegisterInfo &TRI);
 
diff --git a/lib/Target/AMDGPU/AMDGPURegisterBanks.td b/lib/Target/AMDGPU/AMDGPURegisterBanks.td
index 7f7f75f..570379a8 100644
--- a/lib/Target/AMDGPU/AMDGPURegisterBanks.td
+++ b/lib/Target/AMDGPU/AMDGPURegisterBanks.td
@@ -15,4 +15,7 @@
   [VGPR_32, VReg_64, VReg_96, VReg_128, VReg_256, VReg_512]
 >;
 
-def SCCRegBank : RegisterBank <"SCC", [SCC_CLASS ]>;
+def SCCRegBank : RegisterBank <"SCC", [SCC_CLASS]>;
+
+// It is helpful to distinguish conditions from ordinary SGPRs.
+def VCCRegBank : RegisterBank <"VCC", [SReg_64]>;
diff --git a/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp b/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
index a861762..efe501c 100644
--- a/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
+++ b/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
@@ -163,7 +163,7 @@
       // some casts between structs and non-structs, but we can't bitcast
       // directly between them.  directly bitcast between them.  Blender uses
       // some casts that look like { <3 x float> }* to <4 x float>*
-      if ((SrcEltTy->isStructTy() && (SrcEltTy->getNumContainedTypes() != 1)))
+      if ((SrcEltTy->isStructTy() && (SrcEltTy->getStructNumElements() != 1)))
         return false;
 
       // Clang emits OpenCL 3-vector type accesses with a bitcast to the
@@ -401,8 +401,8 @@
       if (Val->getType() != EltTy) {
         Type *EffectiveEltTy = EltTy;
         if (StructType *CT = dyn_cast<StructType>(EltTy)) {
-          assert(CT->getNumContainedTypes() == 1);
-          EffectiveEltTy = CT->getContainedType(0);
+          assert(CT->getNumElements() == 1);
+          EffectiveEltTy = CT->getElementType(0);
         }
 
         if (DL->getTypeSizeInBits(EffectiveEltTy) !=
diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index f1acd72..ed0cc70 100644
--- a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
+++ b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -74,6 +74,9 @@
   // We want to be able to turn these off, but making this a subtarget feature
   // for SI has the unhelpful behavior that it unsets everything else if you
   // disable it.
+  //
+  // Similarly we want enable-prt-strict-null to be on by default and not to
+  // unset everything else if it is disabled
 
   SmallString<256> FullFS("+promote-alloca,+dx10-clamp,+load-store-opt,");
 
@@ -89,6 +92,8 @@
     FullFS += "-fp32-denormals,";
   }
 
+  FullFS += "+enable-prt-strict-null,"; // This is overridden by a disable in FS
+
   FullFS += FS;
 
   ParseSubtargetFeatures(GPU, FullFS);
@@ -175,6 +180,7 @@
     EnableUnsafeDSOffsetFolding(false),
     EnableSIScheduler(false),
     EnableDS128(false),
+    EnablePRTStrictNull(false),
     DumpCode(false),
 
     FP64(false),
@@ -198,6 +204,7 @@
     HasDPP(false),
     HasR128A16(false),
     HasDLInsts(false),
+    HasDotInsts(false),
     EnableSRAMECC(false),
     FlatAddressSpace(false),
     FlatInstOffsets(false),
diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.h b/lib/Target/AMDGPU/AMDGPUSubtarget.h
index 886aca4..5584759 100644
--- a/lib/Target/AMDGPU/AMDGPUSubtarget.h
+++ b/lib/Target/AMDGPU/AMDGPUSubtarget.h
@@ -326,6 +326,7 @@
   bool EnableUnsafeDSOffsetFolding;
   bool EnableSIScheduler;
   bool EnableDS128;
+  bool EnablePRTStrictNull;
   bool DumpCode;
 
   // Subtarget statically properties set by tablegen
@@ -353,6 +354,7 @@
   bool HasDPP;
   bool HasR128A16;
   bool HasDLInsts;
+  bool HasDotInsts;
   bool EnableSRAMECC;
   bool FlatAddressSpace;
   bool FlatInstOffsets;
@@ -576,6 +578,12 @@
     return getGeneration() < AMDGPUSubtarget::GFX9;
   }
 
+  /// \returns If target requires PRT Struct NULL support (zero result registers
+  /// for sparse texture support).
+  bool usePRTStrictNull() const {
+    return EnablePRTStrictNull;
+  }
+
   bool hasAutoWaitcntBeforeBarrier() const {
     return AutoWaitcntBeforeBarrier;
   }
@@ -680,6 +688,10 @@
     return HasDLInsts;
   }
 
+  bool hasDotInsts() const {
+    return HasDotInsts;
+  }
+
   bool isSRAMECCEnabled() const {
     return EnableSRAMECC;
   }
@@ -817,6 +829,11 @@
     return getGeneration() != AMDGPUSubtarget::SOUTHERN_ISLANDS;
   }
 
+  // \returns true if the subtarget supports DWORDX3 load/store instructions.
+  bool hasDwordx3LoadStores() const {
+    return CIInsts;
+  }
+
   bool hasSMovFedHazard() const {
     return getGeneration() >= AMDGPUSubtarget::GFX9;
   }
diff --git a/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 70d365f..e8cefdb 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -825,6 +825,7 @@
   addPass(&SIFixSGPRCopiesID);
   addPass(createSILowerI1CopiesPass());
   addPass(createSIFixupVectorISelPass());
+  addPass(createSIAddIMGInitPass());
   return false;
 }
 
diff --git a/lib/Target/AMDGPU/AMDGPUTargetObjectFile.cpp b/lib/Target/AMDGPU/AMDGPUTargetObjectFile.cpp
index 7a72071..c4e1efd 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetObjectFile.cpp
+++ b/lib/Target/AMDGPU/AMDGPUTargetObjectFile.cpp
@@ -34,7 +34,7 @@
     const GlobalObject *GO, SectionKind SK, const TargetMachine &TM) const {
   // Set metadata access for the explicit section
   StringRef SectionName = GO->getSection();
-  if (SectionName.startswith(".AMDGPU.metadata."))
+  if (SectionName.startswith(".AMDGPU.comment."))
     SK = SectionKind::getMetadata();
 
   return TargetLoweringObjectFileELF::getExplicitSectionGlobal(GO, SK, TM);
diff --git a/lib/Target/AMDGPU/CMakeLists.txt b/lib/Target/AMDGPU/CMakeLists.txt
index 7d12199..3933117 100644
--- a/lib/Target/AMDGPU/CMakeLists.txt
+++ b/lib/Target/AMDGPU/CMakeLists.txt
@@ -93,6 +93,7 @@
   R600OptimizeVectorRegisters.cpp
   R600Packetizer.cpp
   R600RegisterInfo.cpp
+  SIAddIMGInit.cpp
   SIAnnotateControlFlow.cpp
   SIDebuggerInsertNops.cpp
   SIFixSGPRCopies.cpp
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp
index 07bef91..c85a1ea 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp
@@ -46,11 +46,9 @@
   if (const auto *SymA = Target.getSymA()) {
     // SCRATCH_RSRC_DWORD[01] is a special global variable that represents
     // the scratch buffer.
-    if (SymA->getSymbol().getName() == "SCRATCH_RSRC_DWORD0")
+    if (SymA->getSymbol().getName() == "SCRATCH_RSRC_DWORD0" ||
+        SymA->getSymbol().getName() == "SCRATCH_RSRC_DWORD1")
       return ELF::R_AMDGPU_ABS32_LO;
-
-    if (SymA->getSymbol().getName() == "SCRATCH_RSRC_DWORD1")
-      return ELF::R_AMDGPU_ABS32_HI;
   }
 
   switch (Target.getAccessVariant()) {
diff --git a/lib/Target/AMDGPU/MIMGInstructions.td b/lib/Target/AMDGPU/MIMGInstructions.td
index 1462682..1c68dbd 100644
--- a/lib/Target/AMDGPU/MIMGInstructions.td
+++ b/lib/Target/AMDGPU/MIMGInstructions.td
@@ -29,6 +29,7 @@
   bit Atomic = 0;
   bit AtomicX2 = 0; // (f)cmpswap
   bit Sampler = 0;
+  bit Gather4 = 0;
   bits<8> NumExtraArgs = 0;
   bit Gradients = 0;
   bit Coordinates = 1;
@@ -43,7 +44,7 @@
 def MIMGBaseOpcodesTable : GenericTable {
   let FilterClass = "MIMGBaseOpcode";
   let CppTypeName = "MIMGBaseOpcodeInfo";
-  let Fields = ["BaseOpcode", "Store", "Atomic", "AtomicX2", "Sampler",
+  let Fields = ["BaseOpcode", "Store", "Atomic", "AtomicX2", "Sampler", "Gather4",
                 "NumExtraArgs", "Gradients", "Coordinates", "LodOrClampOrMip",
                 "HasD16"];
   GenericEnum TypeOf_BaseOpcode = MIMGBaseOpcode;
@@ -179,6 +180,8 @@
     defm _V3 : MIMG_NoSampler_Src_Helper <op, asm, VReg_96, 0>;
     let VDataDwords = 4 in
     defm _V4 : MIMG_NoSampler_Src_Helper <op, asm, VReg_128, 0>;
+    let VDataDwords = 8 in
+    defm _V8 : MIMG_NoSampler_Src_Helper <op, asm, VReg_256, 0>;
   }
 }
 
@@ -411,6 +414,8 @@
     defm _V3 : MIMG_Sampler_Src_Helper<op, asm, sample, VReg_96>;
     let VDataDwords = 4 in
     defm _V4 : MIMG_Sampler_Src_Helper<op, asm, sample, VReg_128>;
+    let VDataDwords = 8 in
+    defm _V8 : MIMG_Sampler_Src_Helper<op, asm, sample, VReg_256>;
   }
 }
 
@@ -421,6 +426,7 @@
                         string asm = "image_gather4"#sample.LowerCaseMod> {
   def "" : MIMG_Sampler_BaseOpcode<sample> {
     let HasD16 = 1;
+    let Gather4 = 1;
   }
 
   let BaseOpcode = !cast<MIMGBaseOpcode>(NAME), WQM = wqm,
@@ -429,6 +435,8 @@
     defm _V2 : MIMG_Sampler_Src_Helper<op, asm, sample, VReg_64>; /* for packed D16 only */
     let VDataDwords = 4 in
     defm _V4 : MIMG_Sampler_Src_Helper<op, asm, sample, VReg_128, 1>;
+    let VDataDwords = 8 in
+    defm _V8 : MIMG_Sampler_Src_Helper<op, asm, sample, VReg_256>;
   }
 }
 
diff --git a/lib/Target/AMDGPU/SIAddIMGInit.cpp b/lib/Target/AMDGPU/SIAddIMGInit.cpp
new file mode 100644
index 0000000..69cafef
--- /dev/null
+++ b/lib/Target/AMDGPU/SIAddIMGInit.cpp
@@ -0,0 +1,181 @@
+//===-- SIAddIMGInit.cpp - Add any required IMG inits ---------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// Any MIMG instructions that use tfe or lwe require an initialization of the
+/// result register that will be written in the case of a memory access failure
+/// The required code is also added to tie this init code to the result of the
+/// img instruction
+///
+//===----------------------------------------------------------------------===//
+//
+
+#include "AMDGPU.h"
+#include "AMDGPUSubtarget.h"
+#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
+#include "SIInstrInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetMachine.h"
+
+#define DEBUG_TYPE "si-img-init"
+
+using namespace llvm;
+
+namespace {
+
+class SIAddIMGInit : public MachineFunctionPass {
+public:
+  static char ID;
+
+public:
+  SIAddIMGInit() : MachineFunctionPass(ID) {
+    initializeSIAddIMGInitPass(*PassRegistry::getPassRegistry());
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesCFG();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+};
+
+} // End anonymous namespace.
+
+INITIALIZE_PASS(SIAddIMGInit, DEBUG_TYPE, "SI Add IMG Init", false, false)
+
+char SIAddIMGInit::ID = 0;
+
+char &llvm::SIAddIMGInitID = SIAddIMGInit::ID;
+
+FunctionPass *llvm::createSIAddIMGInitPass() { return new SIAddIMGInit(); }
+
+bool SIAddIMGInit::runOnMachineFunction(MachineFunction &MF) {
+  MachineRegisterInfo &MRI = MF.getRegInfo();
+  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
+  const SIInstrInfo *TII = ST.getInstrInfo();
+  const SIRegisterInfo *RI = ST.getRegisterInfo();
+  bool Changed = false;
+
+  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE;
+       ++BI) {
+    MachineBasicBlock &MBB = *BI;
+    MachineBasicBlock::iterator I, Next;
+    for (I = MBB.begin(); I != MBB.end(); I = Next) {
+      Next = std::next(I);
+      MachineInstr &MI = *I;
+
+      auto Opcode = MI.getOpcode();
+      if (TII->isMIMG(Opcode) && !MI.mayStore()) {
+        MachineOperand *TFE = TII->getNamedOperand(MI, AMDGPU::OpName::tfe);
+        MachineOperand *LWE = TII->getNamedOperand(MI, AMDGPU::OpName::lwe);
+        MachineOperand *D16 = TII->getNamedOperand(MI, AMDGPU::OpName::d16);
+
+        // Check for instructions that don't have tfe or lwe fields
+        // There shouldn't be any at this point.
+        assert( (TFE && LWE) && "Expected tfe and lwe operands in instruction");
+
+        unsigned TFEVal = TFE->getImm();
+        unsigned LWEVal = LWE->getImm();
+        unsigned D16Val = D16 ? D16->getImm() : 0;
+
+        if (TFEVal || LWEVal) {
+          // At least one of TFE or LWE are non-zero
+          // We have to insert a suitable initialization of the result value and
+          // tie this to the dest of the image instruction.
+
+          const DebugLoc &DL = MI.getDebugLoc();
+
+          int DstIdx =
+              AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata);
+
+          // Calculate which dword we have to initialize to 0.
+          MachineOperand *MO_Dmask =
+              TII->getNamedOperand(MI, AMDGPU::OpName::dmask);
+
+          // check that dmask operand is found.
+          assert(MO_Dmask && "Expected dmask operand in instruction");
+
+          unsigned dmask = MO_Dmask->getImm();
+          // Determine the number of active lanes taking into account the
+          // Gather4 special case
+          unsigned ActiveLanes =
+              TII->isGather4(Opcode) ? 4 : countPopulation(dmask);
+
+          // Subreg indices are counted from 1
+          // When D16 then we want next whole VGPR after write data.
+          static_assert(AMDGPU::sub0 == 1 && AMDGPU::sub4 == 5, "Subreg indices different from expected");
+
+          bool Packed = !ST.hasUnpackedD16VMem();
+
+          unsigned InitIdx =
+              D16Val && Packed ? ((ActiveLanes + 1) >> 1) + 1 : ActiveLanes + 1;
+
+          // Abandon attempt if the dst size isn't large enough
+          // - this is in fact an error but this is picked up elsewhere and
+          // reported correctly.
+          uint32_t DstSize =
+              RI->getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32;
+          if (DstSize < InitIdx)
+            continue;
+
+          // Create a register for the intialization value.
+          unsigned PrevDst =
+              MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx));
+          unsigned NewDst = 0; // Final initialized value will be in here
+
+          // If PRTStrictNull feature is enabled (the default) then initialize
+          // all the result registers to 0, otherwise just the error indication
+          // register (VGPRn+1)
+          unsigned SizeLeft = ST.usePRTStrictNull() ? InitIdx : 1;
+          unsigned CurrIdx = ST.usePRTStrictNull() ? 1 : InitIdx;
+
+          if (DstSize == 1) {
+            // In this case we can just initialize the result directly
+            BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), PrevDst)
+                .addImm(0);
+            NewDst = PrevDst;
+          } else {
+            BuildMI(MBB, MI, DL, TII->get(AMDGPU::IMPLICIT_DEF), PrevDst);
+            for (; SizeLeft; SizeLeft--, CurrIdx++) {
+              NewDst =
+                  MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx));
+              // Initialize dword
+              unsigned SubReg =
+                  MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+              BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), SubReg)
+                  .addImm(0);
+              // Insert into the super-reg
+              BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewDst)
+                  .addReg(PrevDst)
+                  .addReg(SubReg)
+                  .addImm(CurrIdx);
+
+              PrevDst = NewDst;
+            }
+          }
+
+          // Add as an implicit operand
+          MachineInstrBuilder(MF, MI).addReg(NewDst, RegState::Implicit);
+
+          // Tie the just added implicit operand to the dst
+          MI.tieOperands(DstIdx, MI.getNumOperands() - 1);
+
+          Changed = true;
+        }
+      }
+    }
+  }
+
+  return Changed;
+}
diff --git a/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp b/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
index 90f430d..98e9ea6 100644
--- a/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
+++ b/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
@@ -155,7 +155,7 @@
 /// Is the branch condition uniform or did the StructurizeCFG pass
 /// consider it as such?
 bool SIAnnotateControlFlow::isUniform(BranchInst *T) {
-  return DA->isUniform(T->getCondition()) ||
+  return DA->isUniform(T) ||
          T->getMetadata("structurizecfg.uniform") != nullptr;
 }
 
diff --git a/lib/Target/AMDGPU/SIFoldOperands.cpp b/lib/Target/AMDGPU/SIFoldOperands.cpp
index bd0bc73..f4e8669 100644
--- a/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -854,13 +854,17 @@
     }
   } else {
     // Folding register.
+    SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess;
     for (MachineRegisterInfo::use_iterator
            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
          Use != E; ++Use) {
-      MachineInstr *UseMI = Use->getParent();
+      UsesToProcess.push_back(Use);
+    }
+    for (auto U : UsesToProcess) {
+      MachineInstr *UseMI = U->getParent();
 
-      foldOperand(OpToFold, UseMI, Use.getOperandNo(),
-                  FoldList, CopiesToReplace);
+      foldOperand(OpToFold, UseMI, U.getOperandNo(),
+        FoldList, CopiesToReplace);
     }
   }
 
diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp
index 12d0bc5..0ba9216 100644
--- a/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -216,6 +216,7 @@
 
   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
+  setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom);
   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
 
   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
@@ -813,6 +814,47 @@
     Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
 }
 
+static MVT memVTFromAggregate(Type *Ty) {
+  // Only limited forms of aggregate type currently expected.
+  assert(Ty->isStructTy() && "Expected struct type");
+
+
+  Type *ElementType = nullptr;
+  unsigned NumElts;
+  if (Ty->getContainedType(0)->isVectorTy()) {
+    VectorType *VecComponent = cast<VectorType>(Ty->getContainedType(0));
+    ElementType = VecComponent->getElementType();
+    NumElts = VecComponent->getNumElements();
+  } else {
+    ElementType = Ty->getContainedType(0);
+    NumElts = 1;
+  }
+
+  assert((Ty->getContainedType(1) && Ty->getContainedType(1)->isIntegerTy(32)) && "Expected int32 type");
+
+  // Calculate the size of the memVT type from the aggregate
+  unsigned Pow2Elts = 0;
+  unsigned ElementSize;
+  switch (ElementType->getTypeID()) {
+    default:
+      llvm_unreachable("Unknown type!");
+    case Type::IntegerTyID:
+      ElementSize = cast<IntegerType>(ElementType)->getBitWidth();
+      break;
+    case Type::HalfTyID:
+      ElementSize = 16;
+      break;
+    case Type::FloatTyID:
+      ElementSize = 32;
+      break;
+  }
+  unsigned AdditionalElts = ElementSize == 16 ? 2 : 1;
+  Pow2Elts = 1 << Log2_32_Ceil(NumElts + AdditionalElts);
+
+  return MVT::getVectorVT(MVT::getVT(ElementType, false),
+                          Pow2Elts);
+}
+
 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
                                           const CallInst &CI,
                                           MachineFunction &MF,
@@ -840,7 +882,12 @@
     Info.flags = MachineMemOperand::MODereferenceable;
     if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
       Info.opc = ISD::INTRINSIC_W_CHAIN;
-      Info.memVT = MVT::getVT(CI.getType());
+      Info.memVT = MVT::getVT(CI.getType(), true);
+      if (Info.memVT == MVT::Other) {
+        // Some intrinsics return an aggregate type - special case to work out
+        // the correct memVT
+        Info.memVT = memVTFromAggregate(CI.getType());
+      }
       Info.flags |= MachineMemOperand::MOLoad;
     } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
       Info.opc = ISD::INTRINSIC_VOID;
@@ -4613,6 +4660,109 @@
   return Value == 0;
 }
 
+// Re-construct the required return value for a image load intrinsic.
+// This is more complicated due to the optional use TexFailCtrl which means the required
+// return type is an aggregate
+static SDValue constructRetValue(SelectionDAG &DAG,
+                                 MachineSDNode *Result,
+                                 ArrayRef<EVT> ResultTypes,
+                                 bool IsTexFail, bool Unpacked, bool IsD16,
+                                 int DMaskPop, int NumVDataDwords,
+                                 const SDLoc &DL, LLVMContext &Context) {
+  // Determine the required return type. This is the same regardless of IsTexFail flag
+  EVT ReqRetVT = ResultTypes[0];
+  EVT ReqRetEltVT = ReqRetVT.isVector() ? ReqRetVT.getVectorElementType() : ReqRetVT;
+  int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1;
+  EVT AdjEltVT = Unpacked && IsD16 ? MVT::i32 : ReqRetEltVT;
+  EVT AdjVT = Unpacked ? ReqRetNumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, ReqRetNumElts)
+                                           : AdjEltVT
+                       : ReqRetVT;
+
+  // Extract data part of the result
+  // Bitcast the result to the same type as the required return type
+  int NumElts;
+  if (IsD16 && !Unpacked)
+    NumElts = NumVDataDwords << 1;
+  else
+    NumElts = NumVDataDwords;
+
+  EVT CastVT = NumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, NumElts)
+                           : AdjEltVT;
+
+  // Special case for v8f16. Rather than add support for this, use v4i32 to
+  // extract the data elements
+  bool V8F16Special = false;
+  if (CastVT == MVT::v8f16) {
+    CastVT = MVT::v4i32;
+    DMaskPop >>= 1;
+    ReqRetNumElts >>= 1;
+    V8F16Special = true;
+    AdjVT = MVT::v2i32;
+  }
+
+  SDValue N = SDValue(Result, 0);
+  SDValue CastRes = DAG.getNode(ISD::BITCAST, DL, CastVT, N);
+
+  // Iterate over the result
+  SmallVector<SDValue, 4> BVElts;
+
+  if (CastVT.isVector()) {
+    DAG.ExtractVectorElements(CastRes, BVElts, 0, DMaskPop);
+  } else {
+    BVElts.push_back(CastRes);
+  }
+  int ExtraElts = ReqRetNumElts - DMaskPop;
+  while(ExtraElts--)
+    BVElts.push_back(DAG.getUNDEF(AdjEltVT));
+
+  SDValue PreTFCRes;
+  if (ReqRetNumElts > 1) {
+    SDValue NewVec = DAG.getBuildVector(AdjVT, DL, BVElts);
+    if (IsD16 && Unpacked)
+      PreTFCRes = adjustLoadValueTypeImpl(NewVec, ReqRetVT, DL, DAG, Unpacked);
+    else
+      PreTFCRes = NewVec;
+  } else {
+    PreTFCRes = BVElts[0];
+  }
+
+  if (V8F16Special)
+    PreTFCRes = DAG.getNode(ISD::BITCAST, DL, MVT::v4f16, PreTFCRes);
+
+  if (!IsTexFail) {
+    if (Result->getNumValues() > 1)
+      return DAG.getMergeValues({PreTFCRes, SDValue(Result, 1)}, DL);
+    else
+      return PreTFCRes;
+  }
+
+  // Extract the TexFail result and insert into aggregate return
+  SmallVector<SDValue, 1> TFCElt;
+  DAG.ExtractVectorElements(N, TFCElt, DMaskPop, 1);
+  SDValue TFCRes = DAG.getNode(ISD::BITCAST, DL, ResultTypes[1], TFCElt[0]);
+  return DAG.getMergeValues({PreTFCRes, TFCRes, SDValue(Result, 1)}, DL);
+}
+
+static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE,
+                         SDValue *LWE, bool &IsTexFail) {
+  auto TexFailCtrlConst = dyn_cast<ConstantSDNode>(TexFailCtrl.getNode());
+  if (!TexFailCtrlConst)
+    return false;
+
+  uint64_t Value = TexFailCtrlConst->getZExtValue();
+  if (Value) {
+    IsTexFail = true;
+  }
+
+  SDLoc DL(TexFailCtrlConst);
+  *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
+  Value &= ~(uint64_t)0x1;
+  *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
+  Value &= ~(uint64_t)0x2;
+
+  return Value == 0;
+}
+
 SDValue SITargetLowering::lowerImage(SDValue Op,
                                      const AMDGPU::ImageDimIntrinsicInfo *Intr,
                                      SelectionDAG &DAG) const {
@@ -4626,13 +4776,17 @@
       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
   unsigned IntrOpcode = Intr->BaseOpcode;
 
-  SmallVector<EVT, 2> ResultTypes(Op->value_begin(), Op->value_end());
+  SmallVector<EVT, 3> ResultTypes(Op->value_begin(), Op->value_end());
+  SmallVector<EVT, 3> OrigResultTypes(Op->value_begin(), Op->value_end());
   bool IsD16 = false;
   bool IsA16 = false;
   SDValue VData;
   int NumVDataDwords;
+  bool AdjustRetType = false;
+
   unsigned AddrIdx; // Index of first address argument
   unsigned DMask;
+  unsigned DMaskLanes = 0;
 
   if (BaseOpcode->Atomic) {
     VData = Op.getOperand(2);
@@ -4655,7 +4809,12 @@
       AddrIdx = 3;
     }
   } else {
-    unsigned DMaskIdx;
+    unsigned DMaskIdx = BaseOpcode->Store ? 3 : isa<MemSDNode>(Op) ? 2 : 1;
+    auto DMaskConst = dyn_cast<ConstantSDNode>(Op.getOperand(DMaskIdx));
+    if (!DMaskConst)
+      return Op;
+    DMask = DMaskConst->getZExtValue();
+    DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
 
     if (BaseOpcode->Store) {
       VData = Op.getOperand(2);
@@ -4671,37 +4830,32 @@
       }
 
       NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
-      DMaskIdx = 3;
     } else {
-      MVT LoadVT = Op.getSimpleValueType();
+      // Work out the num dwords based on the dmask popcount and underlying type
+      // and whether packing is supported.
+      MVT LoadVT = ResultTypes[0].getSimpleVT();
       if (LoadVT.getScalarType() == MVT::f16) {
         if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ||
             !BaseOpcode->HasD16)
           return Op; // D16 is unsupported for this instruction
 
         IsD16 = true;
-        if (LoadVT.isVector() && Subtarget->hasUnpackedD16VMem())
-          ResultTypes[0] = (LoadVT == MVT::v2f16) ? MVT::v2i32 : MVT::v4i32;
       }
 
-      NumVDataDwords = (ResultTypes[0].getSizeInBits() + 31) / 32;
-      DMaskIdx = isa<MemSDNode>(Op) ? 2 : 1;
-    }
+      // Confirm that the return type is large enough for the dmask specified
+      if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) ||
+          (!LoadVT.isVector() && DMaskLanes > 1))
+          return Op;
 
-    auto DMaskConst = dyn_cast<ConstantSDNode>(Op.getOperand(DMaskIdx));
-    if (!DMaskConst)
-      return Op;
+      if (IsD16 && !Subtarget->hasUnpackedD16VMem())
+        NumVDataDwords = (DMaskLanes + 1) / 2;
+      else
+        NumVDataDwords = DMaskLanes;
+
+      AdjustRetType = true;
+    }
 
     AddrIdx = DMaskIdx + 1;
-    DMask = DMaskConst->getZExtValue();
-    if (!DMask && !BaseOpcode->Store) {
-      // Eliminate no-op loads. Stores with dmask == 0 are *not* no-op: they
-      // store the channels' default values.
-      SDValue Undef = DAG.getUNDEF(Op.getValueType());
-      if (isa<MemSDNode>(Op))
-        return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
-      return Undef;
-    }
   }
 
   unsigned NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0;
@@ -4780,11 +4934,53 @@
     CtrlIdx = AddrIdx + NumVAddrs + 3;
   }
 
+  SDValue TFE;
+  SDValue LWE;
   SDValue TexFail = Op.getOperand(CtrlIdx);
-  auto TexFailConst = dyn_cast<ConstantSDNode>(TexFail.getNode());
-  if (!TexFailConst || TexFailConst->getZExtValue() != 0)
+  bool IsTexFail = false;
+  if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail))
     return Op;
 
+  if (IsTexFail) {
+    if (!DMaskLanes) {
+      // Expecting to get an error flag since TFC is on - and dmask is 0
+      // Force dmask to be at least 1 otherwise the instruction will fail
+      DMask = 0x1;
+      DMaskLanes = 1;
+      NumVDataDwords = 1;
+    }
+    NumVDataDwords += 1;
+    AdjustRetType = true;
+  }
+
+  // Has something earlier tagged that the return type needs adjusting
+  // This happens if the instruction is a load or has set TexFailCtrl flags
+  if (AdjustRetType) {
+    // NumVDataDwords reflects the true number of dwords required in the return type
+    if (DMaskLanes == 0 && !BaseOpcode->Store) {
+      // This is a no-op load. This can be eliminated
+      SDValue Undef = DAG.getUNDEF(Op.getValueType());
+      if (isa<MemSDNode>(Op))
+        return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
+      return Undef;
+    }
+
+    // Have to use a power of 2 number of dwords
+    NumVDataDwords = 1 << Log2_32_Ceil(NumVDataDwords);
+
+    EVT NewVT = NumVDataDwords > 1 ?
+                  EVT::getVectorVT(*DAG.getContext(), MVT::f32, NumVDataDwords)
+                : MVT::f32;
+
+    ResultTypes[0] = NewVT;
+    if (ResultTypes.size() == 3) {
+      // Original result was aggregate type used for TexFailCtrl results
+      // The actual instruction returns as a vector type which has now been
+      // created. Remove the aggregate result.
+      ResultTypes.erase(&ResultTypes[1]);
+    }
+  }
+
   SDValue GLC;
   SDValue SLC;
   if (BaseOpcode->Atomic) {
@@ -4809,8 +5005,8 @@
   Ops.push_back(SLC);
   Ops.push_back(IsA16 &&  // a16 or r128
                 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False);
-  Ops.push_back(False); // tfe
-  Ops.push_back(False); // lwe
+  Ops.push_back(TFE); // tfe
+  Ops.push_back(LWE); // lwe
   Ops.push_back(DimInfo->DA ? True : False);
   if (BaseOpcode->HasD16)
     Ops.push_back(IsD16 ? True : False);
@@ -4838,11 +5034,12 @@
     SmallVector<SDValue, 1> Elt;
     DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
     return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
-  } else if (IsD16 && !BaseOpcode->Store) {
-    MVT LoadVT = Op.getSimpleValueType();
-    SDValue Adjusted = adjustLoadValueTypeImpl(
-        SDValue(NewNode, 0), LoadVT, DL, DAG, Subtarget->hasUnpackedD16VMem());
-    return DAG.getMergeValues({Adjusted, SDValue(NewNode, 1)}, DL);
+  } else if (!BaseOpcode->Store) {
+    return constructRetValue(DAG, NewNode,
+                             OrigResultTypes, IsTexFail,
+                             Subtarget->hasUnpackedD16VMem(), IsD16,
+                             DMaskLanes, NumVDataDwords, DL,
+                             *DAG.getContext());
   }
 
   return SDValue(NewNode, 0);
@@ -5158,6 +5355,11 @@
                        Denominator, Numerator);
   }
   case Intrinsic::amdgcn_icmp: {
+    // There is a Pat that handles this variant, so return it as-is.
+    if (Op.getOperand(1).getValueType() == MVT::i1 &&
+        Op.getConstantOperandVal(2) == 0 &&
+        Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE)
+      return Op;
     return lowerICMPIntrinsic(*this, Op.getNode(), DAG);
   }
   case Intrinsic::amdgcn_fcmp: {
@@ -6005,11 +6207,13 @@
   const unsigned MaxImm = 4095;
   SDValue N0 = Offset;
   ConstantSDNode *C1 = nullptr;
-  if (N0.getOpcode() == ISD::ADD) {
-    if ((C1 = dyn_cast<ConstantSDNode>(N0.getOperand(1))))
-      N0 = N0.getOperand(0);
-  } else if ((C1 = dyn_cast<ConstantSDNode>(N0)))
+
+  if ((C1 = dyn_cast<ConstantSDNode>(N0)))
     N0 = SDValue();
+  else if (DAG.isBaseWithConstantOffset(N0)) {
+    C1 = cast<ConstantSDNode>(N0.getOperand(1));
+    N0 = N0.getOperand(0);
+  }
 
   if (C1) {
     unsigned ImmOffset = C1->getZExtValue();
@@ -8383,7 +8587,7 @@
   EVT VT = N->getValueType(0);
   SDLoc SL(N);
 
-  if (!Subtarget->hasDLInsts() || VT != MVT::f32)
+  if (!Subtarget->hasDotInsts() || VT != MVT::f32)
     return SDValue();
 
   // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) ->
@@ -8584,8 +8788,7 @@
   TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
                                         !DCI.isBeforeLegalizeOps());
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-  if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) ||
-      TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
+  if (TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
     DCI.CommitTargetLoweringOpt(TLO);
   }
 
@@ -8752,6 +8955,7 @@
   case AMDGPU::sub1: return 1;
   case AMDGPU::sub2: return 2;
   case AMDGPU::sub3: return 3;
+  case AMDGPU::sub4: return 4; // Possible with TFE/LWE
   }
 }
 
@@ -8765,11 +8969,16 @@
   if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
     return Node; // not implemented for D16
 
-  SDNode *Users[4] = { nullptr };
+  SDNode *Users[5] = { nullptr };
   unsigned Lane = 0;
   unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
   unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
   unsigned NewDmask = 0;
+  unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1;
+  unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1;
+  bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) ||
+                  Node->getConstantOperandVal(LWEIdx)) ? 1 : 0;
+  unsigned TFCLane = 0;
   bool HasChain = Node->getNumValues() > 1;
 
   if (OldDmask == 0) {
@@ -8777,6 +8986,12 @@
     return Node;
   }
 
+  unsigned OldBitsSet = countPopulation(OldDmask);
+  // Work out which is the TFE/LWE lane if that is enabled.
+  if (UsesTFC) {
+    TFCLane = OldBitsSet;
+  }
+
   // Try to figure out the used register components
   for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
        I != E; ++I) {
@@ -8796,28 +9011,49 @@
     // set, etc.
     Lane = SubIdx2Lane(I->getConstantOperandVal(1));
 
-    // Set which texture component corresponds to the lane.
-    unsigned Comp;
-    for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) {
-      Comp = countTrailingZeros(Dmask);
-      Dmask &= ~(1 << Comp);
+    // Check if the use is for the TFE/LWE generated result at VGPRn+1.
+    if (UsesTFC && Lane == TFCLane) {
+      Users[Lane] = *I;
+    } else {
+      // Set which texture component corresponds to the lane.
+      unsigned Comp;
+      for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) {
+        Comp = countTrailingZeros(Dmask);
+        Dmask &= ~(1 << Comp);
+      }
+
+      // Abort if we have more than one user per component.
+      if (Users[Lane])
+        return Node;
+
+      Users[Lane] = *I;
+      NewDmask |= 1 << Comp;
     }
-
-    // Abort if we have more than one user per component
-    if (Users[Lane])
-      return Node;
-
-    Users[Lane] = *I;
-    NewDmask |= 1 << Comp;
   }
 
+  // Don't allow 0 dmask, as hardware assumes one channel enabled.
+  bool NoChannels = !NewDmask;
+  if (NoChannels) {
+    // If the original dmask has one channel - then nothing to do
+    if (OldBitsSet == 1)
+      return Node;
+    // Use an arbitrary dmask - required for the instruction to work
+    NewDmask = 1;
+  }
   // Abort if there's no change
   if (NewDmask == OldDmask)
     return Node;
 
   unsigned BitsSet = countPopulation(NewDmask);
 
-  int NewOpcode = AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), BitsSet);
+  // Check for TFE or LWE - increase the number of channels by one to account
+  // for the extra return value
+  // This will need adjustment for D16 if this is also included in
+  // adjustWriteMask (this function) but at present D16 are excluded.
+  unsigned NewChannels = BitsSet + UsesTFC;
+
+  int NewOpcode =
+      AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels);
   assert(NewOpcode != -1 &&
          NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
          "failed to find equivalent MIMG op");
@@ -8830,8 +9066,9 @@
 
   MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
 
-  MVT ResultVT = BitsSet == 1 ?
-    SVT : MVT::getVectorVT(SVT, BitsSet == 3 ? 4 : BitsSet);
+  MVT ResultVT = NewChannels == 1 ?
+    SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 :
+                           NewChannels == 5 ? 8 : NewChannels);
   SDVTList NewVTList = HasChain ?
     DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
 
@@ -8845,7 +9082,7 @@
     DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
   }
 
-  if (BitsSet == 1) {
+  if (NewChannels == 1) {
     assert(Node->hasNUsesOfValue(1, 0));
     SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
                                       SDLoc(Node), Users[Lane]->getValueType(0),
@@ -8855,19 +9092,24 @@
   }
 
   // Update the users of the node with the new indices
-  for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) {
+  for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) {
     SDNode *User = Users[i];
-    if (!User)
-      continue;
-
-    SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
-    DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
+    if (!User) {
+      // Handle the special case of NoChannels. We set NewDmask to 1 above, but
+      // Users[0] is still nullptr because channel 0 doesn't really have a use.
+      if (i || !NoChannels)
+        continue;
+    } else {
+      SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
+      DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
+    }
 
     switch (Idx) {
     default: break;
     case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
     case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
     case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
+    case AMDGPU::sub3: Idx = AMDGPU::sub4; break;
     }
   }
 
diff --git a/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 7967404..afc0b44 100644
--- a/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -1216,6 +1216,9 @@
       StrictDom = true;
   }
 
+  VgprUB = std::max(getMaxVGPR(), Other.getMaxVGPR());
+  SgprUB = std::max(getMaxSGPR(), Other.getMaxSGPR());
+
   return StrictDom;
 }
 
diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp
index b7c4eed..2370d5f 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -2972,6 +2972,42 @@
     }
   }
 
+  // Verify MIMG
+  if (isMIMG(MI.getOpcode()) && !MI.mayStore()) {
+    // Ensure that the return type used is large enough for all the options
+    // being used TFE/LWE require an extra result register.
+    const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask);
+    if (DMask) {
+      uint64_t DMaskImm = DMask->getImm();
+      uint32_t RegCount =
+          isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm);
+      const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe);
+      const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe);
+      const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16);
+
+      // Adjust for packed 16 bit values
+      if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem())
+        RegCount >>= 1;
+
+      // Adjust if using LWE or TFE
+      if ((LWE && LWE->getImm()) || (TFE && TFE->getImm()))
+        RegCount += 1;
+
+      const uint32_t DstIdx =
+          AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata);
+      const MachineOperand &Dst = MI.getOperand(DstIdx);
+      if (Dst.isReg()) {
+        const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx);
+        uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32;
+        if (RegCount > DstSize) {
+          ErrInfo = "MIMG instruction returns too many registers for dst "
+                    "register class";
+          return false;
+        }
+      }
+    }
+  }
+
   // Verify VOP*. Ignore multiple sgpr operands on writelane.
   if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32
       && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) {
diff --git a/lib/Target/AMDGPU/SIInstructions.td b/lib/Target/AMDGPU/SIInstructions.td
index 5f78a10..b6b00c2 100644
--- a/lib/Target/AMDGPU/SIInstructions.td
+++ b/lib/Target/AMDGPU/SIInstructions.td
@@ -583,6 +583,11 @@
 
   // TODO: we could add more variants for other types of conditionals
 
+def : Pat <
+  (int_amdgcn_icmp i1:$src, (i1 0), (i32 33)),
+  (COPY $src) // Return the SGPRs representing i1 src
+>;
+
 //===----------------------------------------------------------------------===//
 // VOP1 Patterns
 //===----------------------------------------------------------------------===//
@@ -852,6 +857,8 @@
 def : BitConvert <v2f32, f64, VReg_64>;
 def : BitConvert <f64, v2i32, VReg_64>;
 def : BitConvert <v2i32, f64, VReg_64>;
+def : BitConvert <v4i16, v4f16, VReg_64>;
+def : BitConvert <v4f16, v4i16, VReg_64>;
 
 // FIXME: Make SGPR
 def : BitConvert <v2i32, v4f16, VReg_64>;
diff --git a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index d755f76..be291b1 100644
--- a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -160,7 +160,7 @@
   bool OptimizeAgain;
 
   static bool offsetsCanBeCombined(CombineInfo &CI);
-  static bool widthsFit(const CombineInfo &CI);
+  static bool widthsFit(const GCNSubtarget &STM, const CombineInfo &CI);
   static unsigned getNewOpcode(const CombineInfo &CI);
   static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI);
   const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI);
@@ -367,11 +367,12 @@
   return false;
 }
 
-bool SILoadStoreOptimizer::widthsFit(const CombineInfo &CI) {
+bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
+                                     const CombineInfo &CI) {
   const unsigned Width = (CI.Width0 + CI.Width1);
   switch (CI.InstClass) {
   default:
-    return Width <= 4;
+    return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3));
   case S_BUFFER_LOAD_IMM:
     switch (Width) {
     default:
@@ -645,7 +646,7 @@
       // We also need to go through the list of instructions that we plan to
       // move and make sure they are all safe to move down past the merged
       // instruction.
-      if (widthsFit(CI) && offsetsCanBeCombined(CI))
+      if (widthsFit(*STM, CI) && offsetsCanBeCombined(CI))
         if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
           return true;
     }
@@ -1112,6 +1113,7 @@
   BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
           TII->get(AMDGPU::S_MOV_B32), Reg)
     .addImm(Val);
+  (void)Mov;
   LLVM_DEBUG(dbgs() << "    "; Mov->dump());
   return MachineOperand::CreateReg(Reg, false);
 }
@@ -1146,6 +1148,7 @@
       .addReg(CarryReg, RegState::Define)
       .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
     .add(OffsetLo);
+  (void)LoHalf;
   LLVM_DEBUG(dbgs() << "    "; LoHalf->dump(););
 
   MachineInstr *HiHalf =
@@ -1154,6 +1157,7 @@
     .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
     .add(OffsetHi)
     .addReg(CarryReg, RegState::Kill);
+  (void)HiHalf;
   LLVM_DEBUG(dbgs() << "    "; HiHalf->dump(););
 
   unsigned FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
@@ -1163,6 +1167,7 @@
       .addImm(AMDGPU::sub0)
       .addReg(DestSub1)
       .addImm(AMDGPU::sub1);
+  (void)FullBase;
   LLVM_DEBUG(dbgs() << "    "; FullBase->dump(); dbgs() << "\n";);
 
   return FullDestReg;
@@ -1245,8 +1250,6 @@
   if (!Src1->isImm())
     return;
 
-  assert(isInt<32>(*Offset0P) && isInt<32>(Src1->getImm())
-         && "Expected 32bit immediate!!!");
   uint64_t Offset1 = Src1->getImm();
   BaseHi = *Src0;
 
@@ -1366,8 +1369,8 @@
     AM.HasBaseReg = true;
     AM.BaseOffs = Dist;
     if (TLI->isLegalGlobalAddressingMode(AM) &&
-        (uint32_t)abs(Dist) > MaxDist) {
-      MaxDist = abs(Dist);
+        (uint32_t)std::abs(Dist) > MaxDist) {
+      MaxDist = std::abs(Dist);
 
       AnchorAddr = MAddrNext;
       AnchorInst = &MINext;
diff --git a/lib/Target/AMDGPU/SIRegisterInfo.td b/lib/Target/AMDGPU/SIRegisterInfo.td
index 80bc180..c625ecc 100644
--- a/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -459,15 +459,15 @@
 // Requires 2 s_mov_b64 to copy
 let CopyCost = 2 in {
 
-def SGPR_128 : RegisterClass<"AMDGPU", [v4i32, v4f32, v16i8, v2i64], 32, (add SGPR_128Regs)> {
+def SGPR_128 : RegisterClass<"AMDGPU", [v4i32, v4f32, v2i64], 32, (add SGPR_128Regs)> {
   let AllocationPriority = 10;
 }
 
-def TTMP_128 : RegisterClass<"AMDGPU", [v4i32, v4f32, v16i8, v2i64], 32, (add TTMP_128Regs)> {
+def TTMP_128 : RegisterClass<"AMDGPU", [v4i32, v4f32, v2i64], 32, (add TTMP_128Regs)> {
   let isAllocatable = 0;
 }
 
-def SReg_128 : RegisterClass<"AMDGPU", [v4i32, v4f32, v16i8, v2i64, v2f64], 32,
+def SReg_128 : RegisterClass<"AMDGPU", [v4i32, v4f32, v2i64, v2f64], 32,
   (add SGPR_128, TTMP_128)> {
   let AllocationPriority = 10;
 }
diff --git a/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index 1ef7da3..20123ed 100644
--- a/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -184,6 +184,7 @@
   bool Atomic;
   bool AtomicX2;
   bool Sampler;
+  bool Gather4;
 
   uint8_t NumExtraArgs;
   bool Gradients;
diff --git a/lib/Target/AMDGPU/VOP3PInstructions.td b/lib/Target/AMDGPU/VOP3PInstructions.td
index 0d25a86..91b4558 100644
--- a/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -250,7 +250,7 @@
   (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))
 >;
 
-let SubtargetPredicate = HasDLInsts in {
+let SubtargetPredicate = HasDotInsts in {
 
 def V_DOT2_F32_F16 : VOP3PInst<"v_dot2_f32_f16", VOP3_Profile<VOP_F32_V2F16_V2F16_F32>>;
 def V_DOT2_I32_I16 : VOP3PInst<"v_dot2_i32_i16", VOP3_Profile<VOP_I32_V2I16_V2I16_I32>>;
@@ -302,7 +302,7 @@
                       (NonACAdd_oneuse lhs, (!cast<PatFrag>("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))),
     (!cast<VOP3PInst>("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>;
 
-} // End SubtargetPredicate = HasDLInsts
+} // End SubtargetPredicate = HasDotInsts
 
 multiclass VOP3P_Real_vi<bits<10> op> {
   def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>,
@@ -352,7 +352,7 @@
 }
 
 
-let SubtargetPredicate = HasDLInsts in {
+let SubtargetPredicate = HasDotInsts in {
 
 defm V_DOT2_F32_F16 : VOP3P_Real_vi <0x3a3>;
 defm V_DOT2_I32_I16 : VOP3P_Real_vi <0x3a6>;
@@ -362,4 +362,4 @@
 defm V_DOT8_I32_I4  : VOP3P_Real_vi <0x3aa>;
 defm V_DOT8_U32_U4  : VOP3P_Real_vi <0x3ab>;
 
-} // End SubtargetPredicate = HasDLInsts
+} // End SubtargetPredicate = HasDotInsts
diff --git a/lib/Target/ARM/ARM.td b/lib/Target/ARM/ARM.td
index b71a098..3db60f1 100644
--- a/lib/Target/ARM/ARM.td
+++ b/lib/Target/ARM/ARM.td
@@ -365,8 +365,8 @@
 
 // Armv8.5-A extensions
 
-def FeatureSpecCtrl : SubtargetFeature<"specctrl", "HasSpecCtrl", "true",
-  "Enable speculation control barrier" >;
+def FeatureSB       : SubtargetFeature<"sb", "HasSB", "true",
+  "Enable v8.5a Speculation Barrier" >;
 
 //===----------------------------------------------------------------------===//
 // ARM architecture class
@@ -459,7 +459,7 @@
 
 def HasV8_5aOps   : SubtargetFeature<"v8.5a", "HasV8_5aOps", "true",
                                    "Support ARM v8.5a instructions",
-                                   [HasV8_4aOps, FeatureSpecCtrl]>;
+                                   [HasV8_4aOps, FeatureSB]>;
 
 //===----------------------------------------------------------------------===//
 // ARM Processor subtarget features.
@@ -1078,7 +1078,9 @@
 def : ProcNoItin<"exynos-m1",                           [ARMv8a, ProcExynos]>;
 def : ProcNoItin<"exynos-m2",                           [ARMv8a, ProcExynos]>;
 def : ProcNoItin<"exynos-m3",                           [ARMv8a, ProcExynos]>;
-def : ProcNoItin<"exynos-m4",                           [ARMv8a, ProcExynos]>;
+def : ProcNoItin<"exynos-m4",                           [ARMv82a, ProcExynos,
+                                                         FeatureFullFP16,
+                                                         FeatureDotProd]>;
 
 def : ProcNoItin<"kryo",                                [ARMv8a, ProcKryo,
                                                          FeatureHWDivThumb,
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index fd3d10a..a50abfd 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -2951,7 +2951,8 @@
   unsigned ResultReg = MI->getOperand(0).getReg();
   if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false))
     return false;
-  MI->eraseFromParent();
+  MachineBasicBlock::iterator I(MI);
+  removeDeadCode(I, std::next(I));
   return true;
 }
 
diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp
index 2417166..a9d87ce 100644
--- a/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/lib/Target/ARM/ARMFrameLowering.cpp
@@ -79,12 +79,11 @@
     : TargetFrameLowering(StackGrowsDown, sti.getStackAlignment(), 0, 4),
       STI(sti) {}
 
-bool ARMFrameLowering::noFramePointerElim(const MachineFunction &MF) const {
+bool ARMFrameLowering::keepFramePointer(const MachineFunction &MF) const {
   // iOS always has a FP for backtracking, force other targets to keep their FP
   // when doing FastISel. The emitted code is currently superior, and in cases
   // like test-suite's lencod FastISel isn't quite correct when FP is eliminated.
-  return TargetFrameLowering::noFramePointerElim(MF) ||
-         MF.getSubtarget<ARMSubtarget>().useFastISel();
+  return MF.getSubtarget<ARMSubtarget>().useFastISel();
 }
 
 /// Returns true if the target can safely skip saving callee-saved registers
diff --git a/lib/Target/ARM/ARMFrameLowering.h b/lib/Target/ARM/ARMFrameLowering.h
index e994cab..2f7e238 100644
--- a/lib/Target/ARM/ARMFrameLowering.h
+++ b/lib/Target/ARM/ARMFrameLowering.h
@@ -42,7 +42,7 @@
                                   std::vector<CalleeSavedInfo> &CSI,
                                   const TargetRegisterInfo *TRI) const override;
 
-  bool noFramePointerElim(const MachineFunction &MF) const override;
+  bool keepFramePointer(const MachineFunction &MF) const override;
 
   bool enableCalleeSaveSkip(const MachineFunction &MF) const override;
 
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 8289e95..21de0f6 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -984,7 +984,8 @@
 
     // On v8, we have particularly efficient implementations of atomic fences
     // if they can be combined with nearby atomic loads and stores.
-    if (!Subtarget->hasV8Ops() || getTargetMachine().getOptLevel() == 0) {
+    if (!Subtarget->hasAcquireRelease() ||
+        getTargetMachine().getOptLevel() == 0) {
       // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
       InsertFencesForAtomic = true;
     }
@@ -10431,6 +10432,18 @@
   return false;
 }
 
+bool
+ARMTargetLowering::shouldFoldShiftPairToMask(const SDNode *N,
+                                             CombineLevel Level) const {
+  if (!Subtarget->isThumb1Only())
+    return true;
+
+  if (Level == BeforeLegalizeTypes)
+    return true;
+
+  return false;
+}
+
 static SDValue PerformSHLSimplify(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const ARMSubtarget *ST) {
@@ -10734,6 +10747,12 @@
   if (!C2 || C2 >= 32)
     return SDValue();
 
+  // Clear irrelevant bits in the mask.
+  if (LeftShift)
+    C1 &= (-1U << C2);
+  else
+    C1 &= (-1U >> C2);
+
   SelectionDAG &DAG = DCI.DAG;
   SDLoc DL(N);
 
@@ -10741,9 +10760,7 @@
   // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to
   // transform to a pair of shifts, to save materializing c1.
 
-  // First pattern: right shift, and c1+1 is a power of two.
-  // FIXME: Also check reversed pattern (left shift, and ~c1+1 is a power
-  // of two).
+  // First pattern: right shift, then mask off leading bits.
   // FIXME: Use demanded bits?
   if (!LeftShift && isMask_32(C1)) {
     uint32_t C3 = countLeadingZeros(C1);
@@ -10755,13 +10772,23 @@
     }
   }
 
-  // Second pattern: left shift, and (c1>>c2)+1 is a power of two.
-  // FIXME: Also check reversed pattern (right shift, and ~(c1<<c2)+1
-  // is a power of two).
+  // First pattern, reversed: left shift, then mask off trailing bits.
+  if (LeftShift && isMask_32(~C1)) {
+    uint32_t C3 = countTrailingZeros(C1);
+    if (C2 < C3) {
+      SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0),
+                                DAG.getConstant(C3 - C2, DL, MVT::i32));
+      return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL,
+                         DAG.getConstant(C3, DL, MVT::i32));
+    }
+  }
+
+  // Second pattern: left shift, then mask off leading bits.
   // FIXME: Use demanded bits?
   if (LeftShift && isShiftedMask_32(C1)) {
+    uint32_t Trailing = countTrailingZeros(C1);
     uint32_t C3 = countLeadingZeros(C1);
-    if (C2 + C3 < 32 && C1 == ((-1U << (C2 + C3)) >> C3)) {
+    if (Trailing == C2 && C2 + C3 < 32) {
       SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
                                 DAG.getConstant(C2 + C3, DL, MVT::i32));
       return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL,
@@ -10769,6 +10796,19 @@
     }
   }
 
+  // Second pattern, reversed: right shift, then mask off trailing bits.
+  // FIXME: Handle other patterns of known/demanded bits.
+  if (!LeftShift && isShiftedMask_32(C1)) {
+    uint32_t Leading = countLeadingZeros(C1);
+    uint32_t C3 = countTrailingZeros(C1);
+    if (Leading == C2 && C2 + C3 < 32) {
+      SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0),
+                                DAG.getConstant(C2 + C3, DL, MVT::i32));
+      return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL,
+                         DAG.getConstant(C3, DL, MVT::i32));
+    }
+  }
+
   // FIXME: Transform "(and (shl x, c2) c1)" ->
   // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than
   // c1.
@@ -12536,8 +12576,7 @@
 
   // Lastly, can we determine that the bits defined by OrCI
   // are zero in Y?
-  KnownBits Known;
-  DAG.computeKnownBits(Y, Known);
+  KnownBits Known = DAG.computeKnownBits(Y);
   if ((OrCI & Known.Zero) != OrCI)
     return SDValue();
 
@@ -12767,8 +12806,7 @@
   }
 
   if (Res.getNode()) {
-    KnownBits Known;
-    DAG.computeKnownBits(SDValue(N,0), Known);
+    KnownBits Known = DAG.computeKnownBits(SDValue(N,0));
     // Capture demanded bits information that would be otherwise lost.
     if (Known.Zero == 0xfffffffe)
       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
@@ -13559,12 +13597,11 @@
     break;
   case ARMISD::CMOV: {
     // Bits are known zero/one if known on the LHS and RHS.
-    DAG.computeKnownBits(Op.getOperand(0), Known, Depth+1);
+    Known = DAG.computeKnownBits(Op.getOperand(0), Depth+1);
     if (Known.isUnknown())
       return;
 
-    KnownBits KnownRHS;
-    DAG.computeKnownBits(Op.getOperand(1), KnownRHS, Depth+1);
+    KnownBits KnownRHS = DAG.computeKnownBits(Op.getOperand(1), Depth+1);
     Known.Zero &= KnownRHS.Zero;
     Known.One  &= KnownRHS.One;
     return;
@@ -13586,7 +13623,7 @@
   case ARMISD::BFI: {
     // Conservatively, we can recurse down the first operand
     // and just mask out all affected bits.
-    DAG.computeKnownBits(Op.getOperand(0), Known, Depth + 1);
+    Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
 
     // The operand to BFI is already a mask suitable for removing the bits it
     // sets.
@@ -13596,6 +13633,33 @@
     Known.One &= Mask;
     return;
   }
+  case ARMISD::VGETLANEs:
+  case ARMISD::VGETLANEu: {
+    const SDValue &SrcSV = Op.getOperand(0);
+    EVT VecVT = SrcSV.getValueType();
+    assert(VecVT.isVector() && "VGETLANE expected a vector type");
+    const unsigned NumSrcElts = VecVT.getVectorNumElements();
+    ConstantSDNode *Pos = cast<ConstantSDNode>(Op.getOperand(1).getNode());
+    assert(Pos->getAPIntValue().ult(NumSrcElts) &&
+           "VGETLANE index out of bounds");
+    unsigned Idx = Pos->getZExtValue();
+    APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
+    Known = DAG.computeKnownBits(SrcSV, DemandedElt, Depth + 1);
+
+    EVT VT = Op.getValueType();
+    const unsigned DstSz = VT.getScalarSizeInBits();
+    const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits();
+    assert(SrcSz == Known.getBitWidth());
+    assert(DstSz > SrcSz);
+    if (Op.getOpcode() == ARMISD::VGETLANEs)
+      Known = Known.sext(DstSz);
+    else {
+      Known = Known.zext(DstSz);
+      Known.Zero.setBitsFrom(SrcSz);
+    }
+    assert(DstSz == Known.getBitWidth());
+    break;
+  }
   }
 }
 
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index ccee7fa..7a9fc73 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -593,6 +593,8 @@
     bool isDesirableToCommuteWithShift(const SDNode *N,
                                        CombineLevel Level) const override;
 
+    bool shouldFoldShiftPairToMask(const SDNode *N,
+                                   CombineLevel Level) const override;
   protected:
     std::pair<const TargetRegisterClass *, uint8_t>
     findRepresentativeClass(const TargetRegisterInfo *TRI,
diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td
index 488af7b..13abdc9 100644
--- a/lib/Target/ARM/ARMInstrInfo.td
+++ b/lib/Target/ARM/ARMInstrInfo.td
@@ -395,8 +395,8 @@
 def GenExecuteOnly : Predicate<"Subtarget->genExecuteOnly()">;
 
 // Armv8.5-A extensions
-def HasSpecCtrl      : Predicate<"Subtarget->hasSpecCtrl()">,
-                       AssemblerPredicate<"FeatureSpecCtrl", "specctrl">;
+def HasSB            : Predicate<"Subtarget->hasSB()">,
+                       AssemblerPredicate<"FeatureSB", "sb">;
 
 //===----------------------------------------------------------------------===//
 // ARM Flag Definitions.
@@ -426,25 +426,23 @@
 
 // sext_16_node predicate - True if the SDNode is sign-extended 16 or more bits.
 def sext_16_node : PatLeaf<(i32 GPR:$a), [{
-  if (CurDAG->ComputeNumSignBits(SDValue(N,0)) >= 17)
-    return true;
-
-  if (N->getOpcode() != ISD::SRA)
-    return false;
-  if (N->getOperand(0).getOpcode() != ISD::SHL)
-    return false;
-
-  auto *ShiftVal = dyn_cast<ConstantSDNode>(N->getOperand(1));
-  if (!ShiftVal || ShiftVal->getZExtValue() != 16)
-    return false;
-
-  ShiftVal = dyn_cast<ConstantSDNode>(N->getOperand(0)->getOperand(1));
-  if (!ShiftVal || ShiftVal->getZExtValue() != 16)
-    return false;
-
-  return true;
+  return CurDAG->ComputeNumSignBits(SDValue(N,0)) >= 17;
 }]>;
 
+def sext_bottom_16 : PatFrag<(ops node:$a),
+                             (sext_inreg node:$a, i16)>;
+def sext_top_16 : PatFrag<(ops node:$a),
+                          (i32 (sra node:$a, (i32 16)))>;
+
+def bb_mul : PatFrag<(ops node:$a, node:$b),
+                     (mul (sext_bottom_16 node:$a), (sext_bottom_16 node:$b))>;
+def bt_mul : PatFrag<(ops node:$a, node:$b),
+                     (mul (sext_bottom_16 node:$a), (sra node:$b, (i32 16)))>;
+def tb_mul : PatFrag<(ops node:$a, node:$b),
+                     (mul (sra node:$a, (i32 16)), (sext_bottom_16 node:$b))>;
+def tt_mul : PatFrag<(ops node:$a, node:$b),
+                     (mul (sra node:$a, (i32 16)), (sra node:$b, (i32 16)))>;
+
 /// Split a 32-bit immediate into two 16 bit parts.
 def hi16 : SDNodeXForm<imm, [{
   return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() >> 16, SDLoc(N),
@@ -724,7 +722,20 @@
   if (Subtarget->useMovt(*MF))
     return true;
   return ARM_AM::isSOImmTwoPartVal((unsigned)N->getZExtValue());
-}]>;
+}]> {
+  // Ideally this would be an IntImmLeaf, but then we wouldn't have access to
+  // the MachineFunction.
+  let GISelPredicateCode = [{
+    const auto &MF = *MI.getParent()->getParent();
+    if (STI.useMovt(MF))
+      return true;
+
+    const auto &MO = MI.getOperand(1);
+    if (!MO.isCImm())
+      return false;
+    return ARM_AM::isSOImmTwoPartVal(MO.getCImm()->getZExtValue());
+  }];
+}
 
 /// imm0_1 predicate - Immediate in the range [0,1].
 def Imm0_1AsmOperand: ImmAsmOperand<0,1> { let Name = "Imm0_1"; }
@@ -4241,29 +4252,25 @@
 multiclass AI_smul<string opc> {
   def BB : AMulxyI<0b0001011, 0b00, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
               IIC_iMUL16, !strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm",
-              [(set GPR:$Rd, (mul (sext_inreg GPR:$Rn, i16),
-                                      (sext_inreg GPR:$Rm, i16)))]>,
+              [(set GPR:$Rd, (bb_mul GPR:$Rn, GPR:$Rm))]>,
            Requires<[IsARM, HasV5TE]>,
            Sched<[WriteMUL16, ReadMUL, ReadMUL]>;
 
   def BT : AMulxyI<0b0001011, 0b10, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
               IIC_iMUL16, !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm",
-              [(set GPR:$Rd, (mul (sext_inreg GPR:$Rn, i16),
-                                      (sra GPR:$Rm, (i32 16))))]>,
+              [(set GPR:$Rd, (bt_mul GPR:$Rn, GPR:$Rm))]>,
            Requires<[IsARM, HasV5TE]>,
            Sched<[WriteMUL16, ReadMUL, ReadMUL]>;
 
   def TB : AMulxyI<0b0001011, 0b01, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
               IIC_iMUL16, !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm",
-              [(set GPR:$Rd, (mul (sra GPR:$Rn, (i32 16)),
-                                      (sext_inreg GPR:$Rm, i16)))]>,
+              [(set GPR:$Rd, (tb_mul GPR:$Rn, GPR:$Rm))]>,
            Requires<[IsARM, HasV5TE]>,
            Sched<[WriteMUL16, ReadMUL, ReadMUL]>;
 
   def TT : AMulxyI<0b0001011, 0b11, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
               IIC_iMUL16, !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm",
-              [(set GPR:$Rd, (mul (sra GPR:$Rn, (i32 16)),
-                                      (sra GPR:$Rm, (i32 16))))]>,
+              [(set GPR:$Rd, (tt_mul GPR:$Rn, GPR:$Rm))]>,
             Requires<[IsARM, HasV5TE]>,
            Sched<[WriteMUL16, ReadMUL, ReadMUL]>;
 
@@ -4287,35 +4294,31 @@
               (ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
               IIC_iMAC16, !strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm, $Ra",
               [(set GPRnopc:$Rd, (add GPR:$Ra,
-                               (mul (sext_inreg GPRnopc:$Rn, i16),
-                                       (sext_inreg GPRnopc:$Rm, i16))))]>,
+                                      (bb_mul GPRnopc:$Rn, GPRnopc:$Rm)))]>,
            Requires<[IsARM, HasV5TE, UseMulOps]>,
            Sched<[WriteMAC16, ReadMUL, ReadMUL, ReadMAC]>;
 
   def BT : AMulxyIa<0b0001000, 0b10, (outs GPRnopc:$Rd),
               (ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
               IIC_iMAC16, !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm, $Ra",
-              [(set GPRnopc:$Rd,
-                    (add GPR:$Ra, (mul (sext_inreg GPRnopc:$Rn, i16),
-                                          (sra GPRnopc:$Rm, (i32 16)))))]>,
+              [(set GPRnopc:$Rd, (add GPR:$Ra,
+                                      (bt_mul GPRnopc:$Rn, GPRnopc:$Rm)))]>,
            Requires<[IsARM, HasV5TE, UseMulOps]>,
            Sched<[WriteMAC16, ReadMUL, ReadMUL, ReadMAC]>;
 
   def TB : AMulxyIa<0b0001000, 0b01, (outs GPRnopc:$Rd),
               (ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
               IIC_iMAC16, !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm, $Ra",
-              [(set GPRnopc:$Rd,
-                    (add GPR:$Ra, (mul (sra GPRnopc:$Rn, (i32 16)),
-                                          (sext_inreg GPRnopc:$Rm, i16))))]>,
+              [(set GPRnopc:$Rd, (add GPR:$Ra,
+                                      (tb_mul GPRnopc:$Rn, GPRnopc:$Rm)))]>,
            Requires<[IsARM, HasV5TE, UseMulOps]>,
            Sched<[WriteMAC16, ReadMUL, ReadMUL, ReadMAC]>;
 
   def TT : AMulxyIa<0b0001000, 0b11, (outs GPRnopc:$Rd),
               (ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
               IIC_iMAC16, !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm, $Ra",
-             [(set GPRnopc:$Rd,
-                   (add GPR:$Ra, (mul (sra GPRnopc:$Rn, (i32 16)),
-                                         (sra GPRnopc:$Rm, (i32 16)))))]>,
+             [(set GPRnopc:$Rd, (add GPR:$Ra,
+                                     (tt_mul GPRnopc:$Rn, GPRnopc:$Rm)))]>,
             Requires<[IsARM, HasV5TE, UseMulOps]>,
             Sched<[WriteMAC16, ReadMUL, ReadMUL, ReadMAC]>;
 
@@ -4895,7 +4898,7 @@
 
 // Armv8.5-A speculation barrier
 def SB : AInoP<(outs), (ins), MiscFrm, NoItinerary, "sb", "", []>,
-         Requires<[IsARM, HasSpecCtrl]>, Sched<[]> {
+         Requires<[IsARM, HasSB]>, Sched<[]> {
   let Inst{31-0} = 0xf57ff070;
   let Unpredictable = 0x000fff0f;
   let hasSideEffects = 1;
@@ -4908,7 +4911,7 @@
   def ABS : ARMPseudoInst<(outs GPR:$dst), (ins GPR:$src), 8, NoItinerary, []>;
 }
 
-let usesCustomInserter = 1 in {
+let usesCustomInserter = 1, Defs = [CPSR] in {
     def COPY_STRUCT_BYVAL_I32 : PseudoInst<
       (outs), (ins GPR:$dst, GPR:$src, i32imm:$size, i32imm:$alignment),
       NoItinerary,
@@ -5816,26 +5819,21 @@
 
 // smul* and smla*
 def : ARMV5TEPat<(mul sext_16_node:$a, sext_16_node:$b),
-                 (SMULBB GPR:$a, GPR:$b)>,
-      Sched<[WriteMUL32, ReadMUL, ReadMUL]>;
-def : ARMV5TEPat<(mul sext_16_node:$a, (sra GPR:$b, (i32 16))),
-                 (SMULBT GPR:$a, GPR:$b)>,
-      Sched<[WriteMUL32, ReadMUL, ReadMUL]>;
-def : ARMV5TEPat<(mul (sra GPR:$a, (i32 16)), sext_16_node:$b),
-                (SMULTB GPR:$a, GPR:$b)>,
-      Sched<[WriteMUL32, ReadMUL, ReadMUL]>;
-def : ARMV5MOPat<(add GPR:$acc,
-                      (mul sext_16_node:$a, sext_16_node:$b)),
-                 (SMLABB GPR:$a, GPR:$b, GPR:$acc)>,
-      Sched<[WriteMUL32, ReadMUL, ReadMUL]>;
-def : ARMV5MOPat<(add GPR:$acc,
-                      (mul sext_16_node:$a, (sra GPR:$b, (i32 16)))),
-                 (SMLABT GPR:$a, GPR:$b, GPR:$acc)>,
-      Sched<[WriteMUL32, ReadMUL, ReadMUL]>;
-def : ARMV5MOPat<(add GPR:$acc,
-                      (mul (sra GPR:$a, (i32 16)), sext_16_node:$b)),
-                 (SMLATB GPR:$a, GPR:$b, GPR:$acc)>,
-      Sched<[WriteMUL32, ReadMUL, ReadMUL]>;
+                 (SMULBB GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(mul sext_16_node:$a, (sext_bottom_16 GPR:$b)),
+                 (SMULBB GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(mul sext_16_node:$a, (sext_top_16 GPR:$b)),
+                 (SMULBT GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(mul (sext_top_16 GPR:$a), sext_16_node:$b),
+                 (SMULTB GPR:$a, GPR:$b)>;
+def : ARMV5MOPat<(add GPR:$acc, (mul sext_16_node:$a, sext_16_node:$b)),
+                 (SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5MOPat<(add GPR:$acc, (mul sext_16_node:$a, (sext_bottom_16 GPR:$b))),
+                 (SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5MOPat<(add GPR:$acc, (mul sext_16_node:$a, (sext_top_16 GPR:$b))),
+                 (SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5MOPat<(add GPR:$acc, (mul (sext_top_16 GPR:$a), sext_16_node:$b)),
+                 (SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
 
 def : ARMV5TEPat<(int_arm_smulbb GPR:$a, GPR:$b),
                  (SMULBB GPR:$a, GPR:$b)>;
diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td
index 2617d75..7a6673b 100644
--- a/lib/Target/ARM/ARMInstrThumb2.td
+++ b/lib/Target/ARM/ARMInstrThumb2.td
@@ -2732,28 +2732,25 @@
 }
 
 def t2SMULBB : T2ThreeRegSMUL<0b001, 0b00, "smulbb",
-             [(set rGPR:$Rd, (mul (sext_inreg rGPR:$Rn, i16),
-                                   (sext_inreg rGPR:$Rm, i16)))]>;
+             [(set rGPR:$Rd, (bb_mul rGPR:$Rn, rGPR:$Rm))]>;
 def t2SMULBT : T2ThreeRegSMUL<0b001, 0b01, "smulbt",
-             [(set rGPR:$Rd, (mul (sext_inreg rGPR:$Rn, i16),
-                                   (sra rGPR:$Rm, (i32 16))))]>;
+             [(set rGPR:$Rd, (bt_mul rGPR:$Rn, rGPR:$Rm))]>;
 def t2SMULTB : T2ThreeRegSMUL<0b001, 0b10, "smultb",
-             [(set rGPR:$Rd, (mul (sra rGPR:$Rn, (i32 16)),
-                                   (sext_inreg rGPR:$Rm, i16)))]>;
+             [(set rGPR:$Rd, (tb_mul rGPR:$Rn, rGPR:$Rm))]>;
 def t2SMULTT : T2ThreeRegSMUL<0b001, 0b11, "smultt",
-             [(set rGPR:$Rd, (mul (sra rGPR:$Rn, (i32 16)),
-                                   (sra rGPR:$Rm, (i32 16))))]>;
+             [(set rGPR:$Rd, (tt_mul rGPR:$Rn, rGPR:$Rm))]>;
 def t2SMULWB : T2ThreeRegSMUL<0b011, 0b00, "smulwb",
              [(set rGPR:$Rd, (ARMsmulwb rGPR:$Rn, rGPR:$Rm))]>;
 def t2SMULWT : T2ThreeRegSMUL<0b011, 0b01, "smulwt",
              [(set rGPR:$Rd, (ARMsmulwt rGPR:$Rn, rGPR:$Rm))]>;
 
-def : Thumb2DSPPat<(mul sext_16_node:$Rm, sext_16_node:$Rn),
-                   (t2SMULBB rGPR:$Rm, rGPR:$Rn)>;
-def : Thumb2DSPPat<(mul sext_16_node:$Rn, (sra rGPR:$Rm, (i32 16))),
+def : Thumb2DSPPat<(mul sext_16_node:$Rn, (sext_bottom_16 rGPR:$Rm)),
+                   (t2SMULBB rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(mul sext_16_node:$Rn, (sext_top_16 rGPR:$Rm)),
                    (t2SMULBT rGPR:$Rn, rGPR:$Rm)>;
-def : Thumb2DSPPat<(mul (sra rGPR:$Rn, (i32 16)), sext_16_node:$Rm),
+def : Thumb2DSPPat<(mul (sext_top_16 rGPR:$Rn), sext_16_node:$Rm),
                    (t2SMULTB rGPR:$Rn, rGPR:$Rm)>;
+
 def : Thumb2DSPPat<(int_arm_smulbb rGPR:$Rn, rGPR:$Rm),
                    (t2SMULBB rGPR:$Rn, rGPR:$Rm)>;
 def : Thumb2DSPPat<(int_arm_smulbt rGPR:$Rn, rGPR:$Rm),
@@ -2781,18 +2778,13 @@
 }
 
 def t2SMLABB : T2FourRegSMLA<0b001, 0b00, "smlabb",
-             [(set rGPR:$Rd, (add rGPR:$Ra,
-                               (mul (sext_inreg rGPR:$Rn, i16),
-                                     (sext_inreg rGPR:$Rm, i16))))]>;
+             [(set rGPR:$Rd, (add rGPR:$Ra, (bb_mul rGPR:$Rn, rGPR:$Rm)))]>;
 def t2SMLABT : T2FourRegSMLA<0b001, 0b01, "smlabt",
-             [(set rGPR:$Rd, (add rGPR:$Ra, (mul (sext_inreg rGPR:$Rn, i16),
-                                                 (sra rGPR:$Rm, (i32 16)))))]>;
+             [(set rGPR:$Rd, (add rGPR:$Ra, (bt_mul rGPR:$Rn, rGPR:$Rm)))]>;
 def t2SMLATB : T2FourRegSMLA<0b001, 0b10, "smlatb",
-             [(set rGPR:$Rd, (add rGPR:$Ra, (mul (sra rGPR:$Rn, (i32 16)),
-                                                (sext_inreg rGPR:$Rm, i16))))]>;
+             [(set rGPR:$Rd, (add rGPR:$Ra, (tb_mul rGPR:$Rn, rGPR:$Rm)))]>;
 def t2SMLATT : T2FourRegSMLA<0b001, 0b11, "smlatt",
-             [(set rGPR:$Rd, (add rGPR:$Ra, (mul (sra rGPR:$Rn, (i32 16)),
-                                                 (sra rGPR:$Rm, (i32 16)))))]>;
+             [(set rGPR:$Rd, (add rGPR:$Ra, (tt_mul rGPR:$Rn, rGPR:$Rm)))]>;
 def t2SMLAWB : T2FourRegSMLA<0b011, 0b00, "smlawb",
              [(set rGPR:$Rd, (add rGPR:$Ra, (ARMsmulwb rGPR:$Rn, rGPR:$Rm)))]>;
 def t2SMLAWT : T2FourRegSMLA<0b011, 0b01, "smlawt",
@@ -2800,11 +2792,14 @@
 
 def : Thumb2DSPMulPat<(add rGPR:$Ra, (mul sext_16_node:$Rn, sext_16_node:$Rm)),
                       (t2SMLABB rGPR:$Rn, rGPR:$Rm, rGPR:$Ra)>;
-def : Thumb2DSPMulPat<(add rGPR:$Ra,
-                        (mul sext_16_node:$Rn, (sra rGPR:$Rm, (i32 16)))),
+def : Thumb2DSPMulPat<(add rGPR:$Ra, (mul sext_16_node:$Rn, 
+                                          (sext_bottom_16 rGPR:$Rm))),
+                      (t2SMLABB rGPR:$Rn, rGPR:$Rm, rGPR:$Ra)>;
+def : Thumb2DSPMulPat<(add rGPR:$Ra, (mul sext_16_node:$Rn,
+                                          (sext_top_16 rGPR:$Rm))),
                       (t2SMLABT rGPR:$Rn, rGPR:$Rm, rGPR:$Ra)>;
-def : Thumb2DSPMulPat<(add rGPR:$Ra,
-                        (mul (sra rGPR:$Rn, (i32 16)), sext_16_node:$Rm)),
+def : Thumb2DSPMulPat<(add rGPR:$Ra, (mul (sext_top_16 rGPR:$Rn),
+                                          sext_16_node:$Rm)),
                       (t2SMLATB rGPR:$Rn, rGPR:$Rm, rGPR:$Ra)>;
 
 def : Thumb2DSPPat<(int_arm_smlabb GPR:$a, GPR:$b, GPR:$acc),
@@ -3239,7 +3234,7 @@
 
 // Armv8.5-A speculation barrier
 def t2SB : Thumb2XI<(outs), (ins), AddrModeNone, 4, NoItinerary, "sb", "", []>,
-           Requires<[IsThumb2, HasSpecCtrl]>, Sched<[]> {
+           Requires<[IsThumb2, HasSB]>, Sched<[]> {
   let Inst{31-0} = 0xf3bf8f70;
   let Unpredictable = 0x000f2f0f;
   let hasSideEffects = 1;
@@ -4451,13 +4446,13 @@
 def : T2Pat<(atomic_store_32 t2addrmode_so_reg:$addr, GPR:$val),
             (t2STRs     GPR:$val, t2addrmode_so_reg:$addr)>;
 
-let AddedComplexity = 8 in {
-  def : T2Pat<(atomic_load_acquire_8 addr_offset_none:$addr),  (t2LDAB addr_offset_none:$addr)>;
-  def : T2Pat<(atomic_load_acquire_16 addr_offset_none:$addr), (t2LDAH addr_offset_none:$addr)>;
-  def : T2Pat<(atomic_load_acquire_32 addr_offset_none:$addr), (t2LDA  addr_offset_none:$addr)>;
-  def : T2Pat<(atomic_store_release_8 addr_offset_none:$addr, GPR:$val),  (t2STLB GPR:$val, addr_offset_none:$addr)>;
-  def : T2Pat<(atomic_store_release_16 addr_offset_none:$addr, GPR:$val), (t2STLH GPR:$val, addr_offset_none:$addr)>;
-  def : T2Pat<(atomic_store_release_32 addr_offset_none:$addr, GPR:$val), (t2STL  GPR:$val, addr_offset_none:$addr)>;
+let AddedComplexity = 8, Predicates = [IsThumb, HasAcquireRelease, HasV7Clrex] in {
+  def : Pat<(atomic_load_acquire_8 addr_offset_none:$addr),  (t2LDAB addr_offset_none:$addr)>;
+  def : Pat<(atomic_load_acquire_16 addr_offset_none:$addr), (t2LDAH addr_offset_none:$addr)>;
+  def : Pat<(atomic_load_acquire_32 addr_offset_none:$addr), (t2LDA  addr_offset_none:$addr)>;
+  def : Pat<(atomic_store_release_8 addr_offset_none:$addr, GPR:$val),  (t2STLB GPR:$val, addr_offset_none:$addr)>;
+  def : Pat<(atomic_store_release_16 addr_offset_none:$addr, GPR:$val), (t2STLH GPR:$val, addr_offset_none:$addr)>;
+  def : Pat<(atomic_store_release_32 addr_offset_none:$addr, GPR:$val), (t2STL  GPR:$val, addr_offset_none:$addr)>;
 }
 
 
diff --git a/lib/Target/ARM/ARMLegalizerInfo.cpp b/lib/Target/ARM/ARMLegalizerInfo.cpp
index ec613dd..4a0c24d 100644
--- a/lib/Target/ARM/ARMLegalizerInfo.cpp
+++ b/lib/Target/ARM/ARMLegalizerInfo.cpp
@@ -92,6 +92,10 @@
   getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, s32}});
   getActionDefinitionsBuilder(G_PTRTOINT).legalFor({{s32, p0}});
 
+  getActionDefinitionsBuilder(G_CONSTANT)
+      .legalFor({s32, p0})
+      .clampScalar(0, s32, s32);
+
   // We're keeping these builders around because we'll want to add support for
   // floating point to them.
   auto &LoadStoreBuilder =
@@ -157,10 +161,6 @@
 
   getActionDefinitionsBuilder(G_BRCOND).legalFor({s1});
 
-  getActionDefinitionsBuilder(G_CONSTANT)
-      .legalFor({s32, p0})
-      .clampScalar(0, s32, s32);
-
   getActionDefinitionsBuilder(G_ICMP)
       .legalForCartesianProduct({s1}, {s32, p0})
       .minScalar(1, s32);
diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h
index b06c35f..11841b4 100644
--- a/lib/Target/ARM/ARMSubtarget.h
+++ b/lib/Target/ARM/ARMSubtarget.h
@@ -417,7 +417,7 @@
   bool UseSjLjEH = false;
 
   /// Has speculation barrier
-  bool HasSpecCtrl = false;
+  bool HasSB = false;
 
   /// Implicitly convert an instruction to a different one if its immediates
   /// cannot be encoded. For example, ADD r0, r1, #FFFFFFFF -> SUB r0, r1, #1.
@@ -628,7 +628,7 @@
   bool hasDSP() const { return HasDSP; }
   bool useNaClTrap() const { return UseNaClTrap; }
   bool useSjLjEH() const { return UseSjLjEH; }
-  bool hasSpecCtrl() const { return HasSpecCtrl; }
+  bool hasSB() const { return HasSB; }
   bool genLongCalls() const { return GenLongCalls; }
   bool genExecuteOnly() const { return GenExecuteOnly; }
 
diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp
index 2f510c5..65889fc 100644
--- a/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -122,6 +122,7 @@
   { ARM::t2SUBSrr,ARM::tSUBrr,  0,             0,   0,   1,   0,  2,0, 0,0,0 },
   { ARM::t2SXTB,  ARM::tSXTB,   0,             0,   0,   1,   0,  1,0, 0,1,0 },
   { ARM::t2SXTH,  ARM::tSXTH,   0,             0,   0,   1,   0,  1,0, 0,1,0 },
+  { ARM::t2TEQrr, ARM::tEOR,    0,             0,   0,   1,   0,  2,0, 0,1,0 },
   { ARM::t2TSTrr, ARM::tTST,    0,             0,   0,   1,   0,  2,0, 0,0,0 },
   { ARM::t2UXTB,  ARM::tUXTB,   0,             0,   0,   1,   0,  1,0, 0,1,0 },
   { ARM::t2UXTH,  ARM::tUXTH,   0,             0,   0,   1,   0,  1,0, 0,1,0 },
@@ -717,6 +718,16 @@
       return true;
     return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
   }
+  case ARM::t2TEQrr: {
+    unsigned PredReg = 0;
+    // Can only convert to eors if we're not in an IT block.
+    if (getInstrPredicate(*MI, PredReg) != ARMCC::AL)
+      break;
+    // TODO if Operand 0 is not killed but Operand 1 is, then we could write
+    // to Op1 instead.
+    if (MI->getOperand(0).isKill())
+      return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
+  }
   }
   return false;
 }
@@ -903,9 +914,24 @@
   // Add the 16-bit instruction.
   DebugLoc dl = MI->getDebugLoc();
   MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
-  MIB.add(MI->getOperand(0));
-  if (NewMCID.hasOptionalDef())
-    MIB.add(HasCC ? t1CondCodeOp(CCDead) : condCodeOp());
+
+  // TEQ is special in that it doesn't define a register but we're converting
+  // it into an EOR which does. So add the first operand as a def and then
+  // again as a use.
+  if (MCID.getOpcode() == ARM::t2TEQrr) {
+    MIB.add(MI->getOperand(0));
+    MIB->getOperand(0).setIsKill(false);
+    MIB->getOperand(0).setIsDef(true);
+    MIB->getOperand(0).setIsDead(true);
+
+    if (NewMCID.hasOptionalDef())
+      MIB.add(HasCC ? t1CondCodeOp(CCDead) : condCodeOp());
+    MIB.add(MI->getOperand(0));
+  } else {
+    MIB.add(MI->getOperand(0));
+    if (NewMCID.hasOptionalDef())
+      MIB.add(HasCC ? t1CondCodeOp(CCDead) : condCodeOp());
+  }
 
   // Transfer the rest of operands.
   unsigned NumOps = MCID.getNumOperands();
diff --git a/lib/Target/BPF/BPFAsmPrinter.cpp b/lib/Target/BPF/BPFAsmPrinter.cpp
index 705211b..ada5eb9 100644
--- a/lib/Target/BPF/BPFAsmPrinter.cpp
+++ b/lib/Target/BPF/BPFAsmPrinter.cpp
@@ -16,6 +16,7 @@
 #include "BPFInstrInfo.h"
 #include "BPFMCInstLower.h"
 #include "BPFTargetMachine.h"
+#include "BTFDebug.h"
 #include "InstPrinter/BPFInstPrinter.h"
 #include "llvm/CodeGen/AsmPrinter.h"
 #include "llvm/CodeGen/MachineConstantPool.h"
@@ -40,6 +41,7 @@
       : AsmPrinter(TM, std::move(Streamer)) {}
 
   StringRef getPassName() const override { return "BPF Assembly Printer"; }
+  bool doInitialization(Module &M) override;
   void printOperand(const MachineInstr *MI, int OpNum, raw_ostream &O);
   bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
                        unsigned AsmVariant, const char *ExtraCode,
@@ -52,6 +54,18 @@
 };
 } // namespace
 
+bool BPFAsmPrinter::doInitialization(Module &M) {
+  AsmPrinter::doInitialization(M);
+
+  if (MAI->doesSupportDebugInformation()) {
+    Handlers.push_back(HandlerInfo(new BTFDebug(this), "emit",
+                                   "Debug Info Emission", "BTF",
+                                   "BTF Emission"));
+  }
+
+  return false;
+}
+
 void BPFAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
                                  raw_ostream &O) {
   const MachineOperand &MO = MI->getOperand(OpNum);
diff --git a/lib/Target/BPF/BTF.def b/lib/Target/BPF/BTF.def
new file mode 100644
index 0000000..54c5bc3
--- /dev/null
+++ b/lib/Target/BPF/BTF.def
@@ -0,0 +1,33 @@
+//===- BTF.def - BTF definitions --------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Macros for BTF.
+//
+//===----------------------------------------------------------------------===//
+
+#if !defined(HANDLE_BTF_KIND)
+#error "Missing macro definition of HANDLE_BTF_*"
+#endif
+
+HANDLE_BTF_KIND(0, UNKN)
+HANDLE_BTF_KIND(1, INT)
+HANDLE_BTF_KIND(2, PTR)
+HANDLE_BTF_KIND(3, ARRAY)
+HANDLE_BTF_KIND(4, STRUCT)
+HANDLE_BTF_KIND(5, UNION)
+HANDLE_BTF_KIND(6, ENUM)
+HANDLE_BTF_KIND(7, FWD)
+HANDLE_BTF_KIND(8, TYPEDEF)
+HANDLE_BTF_KIND(9, VOLATILE)
+HANDLE_BTF_KIND(10, CONST)
+HANDLE_BTF_KIND(11, RESTRICT)
+HANDLE_BTF_KIND(12, FUNC)
+HANDLE_BTF_KIND(13, FUNC_PROTO)
+
+#undef HANDLE_BTF_KIND
diff --git a/lib/Target/BPF/BTF.h b/lib/Target/BPF/BTF.h
new file mode 100644
index 0000000..1e1680f
--- /dev/null
+++ b/lib/Target/BPF/BTF.h
@@ -0,0 +1,209 @@
+//===-- BTF.h --------------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the layout of .BTF and .BTF.ext ELF sections.
+///
+/// The binary layout for .BTF section:
+///   struct Header
+///   Type and Str subsections
+/// The Type subsection is a collection of types with type id starting with 1.
+/// The Str subsection is simply a collection of strings.
+///
+/// The binary layout for .BTF.ext section:
+///   struct ExtHeader
+///   FuncInfo and LineInfo subsections
+/// The FuncInfo subsection is defined as below:
+///   BTFFuncInfo Size
+///   struct SecFuncInfo for ELF section #1
+///   A number of struct BPFFuncInfo for ELF section #1
+///   struct SecFuncInfo for ELF section #2
+///   A number of struct BPFFuncInfo for ELF section #2
+///   ...
+/// The LineInfo subsection is defined as below:
+///   BPFLineInfo Size
+///   struct SecLineInfo for ELF section #1
+///   A number of struct BPFLineInfo for ELF section #1
+///   struct SecLineInfo for ELF section #2
+///   A number of struct BPFLineInfo for ELF section #2
+///   ...
+///
+/// The section formats are also defined at
+///    https://github.com/torvalds/linux/blob/master/include/uapi/linux/btf.h
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_BPF_BTF_H
+#define LLVM_LIB_TARGET_BPF_BTF_H
+
+namespace llvm {
+namespace BTF {
+
+enum : uint32_t { MAGIC = 0xeB9F, VERSION = 1 };
+
+/// Sizes in bytes of various things in the BTF format.
+enum {
+  HeaderSize = 24,
+  ExtHeaderSize = 24,
+  CommonTypeSize = 12,
+  BTFArraySize = 12,
+  BTFEnumSize = 8,
+  BTFMemberSize = 12,
+  BTFParamSize = 8,
+  SecFuncInfoSize = 8,
+  SecLineInfoSize = 8,
+  BPFFuncInfoSize = 8,
+  BPFLineInfoSize = 16
+};
+
+/// The .BTF section header definition.
+struct Header {
+  uint16_t Magic;  ///< Magic value
+  uint8_t Version; ///< Version number
+  uint8_t Flags;   ///< Extra flags
+  uint32_t HdrLen; ///< Length of this header
+
+  /// All offsets are in bytes relative to the end of this header.
+  uint32_t TypeOff; ///< Offset of type section
+  uint32_t TypeLen; ///< Length of type section
+  uint32_t StrOff;  ///< Offset of string section
+  uint32_t StrLen;  ///< Length of string section
+};
+
+enum : uint32_t {
+  MAX_VLEN = 0xffff         ///< Max # of struct/union/enum members or func args
+};
+
+enum TypeKinds : uint8_t {
+#define HANDLE_BTF_KIND(ID, NAME) BTF_KIND_##NAME = ID,
+#include "BTF.def"
+};
+
+/// The BTF common type definition. Different kinds may have
+/// additional information after this structure data.
+struct CommonType {
+  /// Type name offset in the string table.
+  uint32_t NameOff;
+
+  /// "Info" bits arrangement:
+  /// Bits  0-15: vlen (e.g. # of struct's members)
+  /// Bits 16-23: unused
+  /// Bits 24-27: kind (e.g. int, ptr, array...etc)
+  /// Bits 28-30: unused
+  /// Bit     31: kind_flag, currently used by
+  ///             struct, union and fwd
+  uint32_t Info;
+
+  /// "Size" is used by INT, ENUM, STRUCT and UNION.
+  /// "Size" tells the size of the type it is describing.
+  ///
+  /// "Type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
+  /// FUNC and FUNC_PROTO.
+  /// "Type" is a type_id referring to another type.
+  union {
+    uint32_t Size;
+    uint32_t Type;
+  };
+};
+
+// For some specific BTF_KIND, "struct CommonType" is immediately
+// followed by extra data.
+
+// BTF_KIND_INT is followed by a u32 and the following
+// is the 32 bits arrangement:
+// BTF_INT_ENCODING(VAL) : (((VAL) & 0x0f000000) >> 24)
+// BTF_INT_OFFSET(VAL) : (((VAL & 0x00ff0000)) >> 16)
+// BTF_INT_BITS(VAL) : ((VAL) & 0x000000ff)
+
+/// Attributes stored in the INT_ENCODING.
+enum : uint8_t { INT_SIGNED = (1 << 0), INT_CHAR = (1 << 1), INT_BOOL = (1 << 2) };
+
+/// BTF_KIND_ENUM is followed by multiple "struct BTFEnum".
+/// The exact number of btf_enum is stored in the vlen (of the
+/// info in "struct CommonType").
+struct BTFEnum {
+  uint32_t NameOff; ///< Enum name offset in the string table
+  int32_t Val;      ///< Enum member value
+};
+
+/// BTF_KIND_ARRAY is followed by one "struct BTFArray".
+struct BTFArray {
+  uint32_t ElemType;  ///< Element type
+  uint32_t IndexType; ///< Index type
+  uint32_t Nelems;    ///< Number of elements for this array
+};
+
+/// BTF_KIND_STRUCT and BTF_KIND_UNION are followed
+/// by multiple "struct BTFMember".  The exact number
+/// of BTFMember is stored in the vlen (of the info in
+/// "struct CommonType").
+///
+/// If the struct/union contains any bitfield member,
+/// the Offset below represents BitOffset (bits 0 - 23)
+/// and BitFieldSize(bits 24 - 31) with BitFieldSize = 0
+/// for non bitfield members. Otherwise, the Offset
+/// represents the BitOffset.
+struct BTFMember {
+  uint32_t NameOff; ///< Member name offset in the string table
+  uint32_t Type;    ///< Member type
+  uint32_t Offset;  ///< BitOffset or BitFieldSize+BitOffset
+};
+
+/// BTF_KIND_FUNC_PROTO are followed by multiple "struct BTFParam".
+/// The exist number of BTFParam is stored in the vlen (of the info
+/// in "struct CommonType").
+struct BTFParam {
+  uint32_t NameOff;
+  uint32_t Type;
+};
+
+/// The .BTF.ext section header definition.
+struct ExtHeader {
+  uint16_t Magic;
+  uint8_t Version;
+  uint8_t Flags;
+  uint32_t HdrLen;
+
+  uint32_t FuncInfoOff; ///< Offset of func info section
+  uint32_t FuncInfoLen; ///< Length of func info section
+  uint32_t LineInfoOff; ///< Offset of line info section
+  uint32_t LineInfoLen; ///< Length of line info section
+};
+
+/// Specifying one function info.
+struct BPFFuncInfo {
+  uint32_t InsnOffset; ///< Byte offset in the section
+  uint32_t TypeId;     ///< Type id referring to .BTF type section
+};
+
+/// Specifying function info's in one section.
+struct SecFuncInfo {
+  uint32_t SecNameOff;  ///< Section name index in the .BTF string table
+  uint32_t NumFuncInfo; ///< Number of func info's in this section
+};
+
+/// Specifying one line info.
+struct BPFLineInfo {
+  uint32_t InsnOffset;  ///< Byte offset in this section
+  uint32_t FileNameOff; ///< File name index in the .BTF string table
+  uint32_t LineOff;     ///< Line index in the .BTF string table
+  uint32_t LineCol;     ///< Line num: line_col >> 10,
+                        ///  col num: line_col & 0x3ff
+};
+
+/// Specifying line info's in one section.
+struct SecLineInfo {
+  uint32_t SecNameOff;  ///< Section name index in the .BTF string tble
+  uint32_t NumLineInfo; ///< Number of line info's in this section
+};
+
+} // End namespace BTF.
+} // End namespace llvm.
+
+#endif
diff --git a/lib/Target/BPF/BTFDebug.cpp b/lib/Target/BPF/BTFDebug.cpp
new file mode 100644
index 0000000..96efea4
--- /dev/null
+++ b/lib/Target/BPF/BTFDebug.cpp
@@ -0,0 +1,759 @@
+//===- BTFDebug.cpp - BTF Generator ---------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing BTF debug info.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BTFDebug.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCStreamer.h"
+#include <fstream>
+#include <sstream>
+
+using namespace llvm;
+
+static const char *BTFKindStr[] = {
+#define HANDLE_BTF_KIND(ID, NAME) "BTF_KIND_" #NAME,
+#include "BTF.def"
+};
+
+/// Emit a BTF common type.
+void BTFTypeBase::emitType(MCStreamer &OS) {
+  OS.AddComment(std::string(BTFKindStr[Kind]) + "(id = " + std::to_string(Id) +
+                ")");
+  OS.EmitIntValue(BTFType.NameOff, 4);
+  OS.AddComment("0x" + Twine::utohexstr(BTFType.Info));
+  OS.EmitIntValue(BTFType.Info, 4);
+  OS.EmitIntValue(BTFType.Size, 4);
+}
+
+BTFTypeDerived::BTFTypeDerived(const DIDerivedType *DTy, unsigned Tag)
+    : DTy(DTy) {
+  switch (Tag) {
+  case dwarf::DW_TAG_pointer_type:
+    Kind = BTF::BTF_KIND_PTR;
+    break;
+  case dwarf::DW_TAG_const_type:
+    Kind = BTF::BTF_KIND_CONST;
+    break;
+  case dwarf::DW_TAG_volatile_type:
+    Kind = BTF::BTF_KIND_VOLATILE;
+    break;
+  case dwarf::DW_TAG_typedef:
+    Kind = BTF::BTF_KIND_TYPEDEF;
+    break;
+  case dwarf::DW_TAG_restrict_type:
+    Kind = BTF::BTF_KIND_RESTRICT;
+    break;
+  default:
+    llvm_unreachable("Unknown DIDerivedType Tag");
+  }
+  BTFType.Info = Kind << 24;
+}
+
+void BTFTypeDerived::completeType(BTFDebug &BDebug) {
+  BTFType.NameOff = BDebug.addString(DTy->getName());
+
+  // The base type for PTR/CONST/VOLATILE could be void.
+  const DIType *ResolvedType = DTy->getBaseType().resolve();
+  if (!ResolvedType) {
+    assert((Kind == BTF::BTF_KIND_PTR || Kind == BTF::BTF_KIND_CONST ||
+            Kind == BTF::BTF_KIND_VOLATILE) &&
+           "Invalid null basetype");
+    BTFType.Type = 0;
+  } else {
+    BTFType.Type = BDebug.getTypeId(ResolvedType);
+  }
+}
+
+void BTFTypeDerived::emitType(MCStreamer &OS) { BTFTypeBase::emitType(OS); }
+
+/// Represent a struct/union forward declaration.
+BTFTypeFwd::BTFTypeFwd(StringRef Name, bool IsUnion) : Name(Name) {
+  Kind = BTF::BTF_KIND_FWD;
+  BTFType.Info = IsUnion << 31 | Kind << 24;
+  BTFType.Type = 0;
+}
+
+void BTFTypeFwd::completeType(BTFDebug &BDebug) {
+  BTFType.NameOff = BDebug.addString(Name);
+}
+
+void BTFTypeFwd::emitType(MCStreamer &OS) { BTFTypeBase::emitType(OS); }
+
+BTFTypeInt::BTFTypeInt(uint32_t Encoding, uint32_t SizeInBits,
+                       uint32_t OffsetInBits, StringRef TypeName)
+    : Name(TypeName) {
+  // Translate IR int encoding to BTF int encoding.
+  uint8_t BTFEncoding;
+  switch (Encoding) {
+  case dwarf::DW_ATE_boolean:
+    BTFEncoding = BTF::INT_BOOL;
+    break;
+  case dwarf::DW_ATE_signed:
+  case dwarf::DW_ATE_signed_char:
+    BTFEncoding = BTF::INT_SIGNED;
+    break;
+  case dwarf::DW_ATE_unsigned:
+  case dwarf::DW_ATE_unsigned_char:
+    BTFEncoding = 0;
+    break;
+  default:
+    llvm_unreachable("Unknown BTFTypeInt Encoding");
+  }
+
+  Kind = BTF::BTF_KIND_INT;
+  BTFType.Info = Kind << 24;
+  BTFType.Size = roundupToBytes(SizeInBits);
+  IntVal = (BTFEncoding << 24) | OffsetInBits << 16 | SizeInBits;
+}
+
+void BTFTypeInt::completeType(BTFDebug &BDebug) {
+  BTFType.NameOff = BDebug.addString(Name);
+}
+
+void BTFTypeInt::emitType(MCStreamer &OS) {
+  BTFTypeBase::emitType(OS);
+  OS.AddComment("0x" + Twine::utohexstr(IntVal));
+  OS.EmitIntValue(IntVal, 4);
+}
+
+BTFTypeEnum::BTFTypeEnum(const DICompositeType *ETy, uint32_t VLen) : ETy(ETy) {
+  Kind = BTF::BTF_KIND_ENUM;
+  BTFType.Info = Kind << 24 | VLen;
+  BTFType.Size = roundupToBytes(ETy->getSizeInBits());
+}
+
+void BTFTypeEnum::completeType(BTFDebug &BDebug) {
+  BTFType.NameOff = BDebug.addString(ETy->getName());
+
+  DINodeArray Elements = ETy->getElements();
+  for (const auto Element : Elements) {
+    const auto *Enum = cast<DIEnumerator>(Element);
+
+    struct BTF::BTFEnum BTFEnum;
+    BTFEnum.NameOff = BDebug.addString(Enum->getName());
+    // BTF enum value is 32bit, enforce it.
+    BTFEnum.Val = static_cast<uint32_t>(Enum->getValue());
+    EnumValues.push_back(BTFEnum);
+  }
+}
+
+void BTFTypeEnum::emitType(MCStreamer &OS) {
+  BTFTypeBase::emitType(OS);
+  for (const auto &Enum : EnumValues) {
+    OS.EmitIntValue(Enum.NameOff, 4);
+    OS.EmitIntValue(Enum.Val, 4);
+  }
+}
+
+BTFTypeArray::BTFTypeArray(const DICompositeType *ATy) : ATy(ATy) {
+  Kind = BTF::BTF_KIND_ARRAY;
+  BTFType.Info = Kind << 24;
+}
+
+/// Represent a BTF array. BTF does not record array dimensions,
+/// so conceptually a BTF array is a one-dimensional array.
+void BTFTypeArray::completeType(BTFDebug &BDebug) {
+  BTFType.NameOff = BDebug.addString(ATy->getName());
+  BTFType.Size = 0;
+
+  auto *BaseType = ATy->getBaseType().resolve();
+  ArrayInfo.ElemType = BDebug.getTypeId(BaseType);
+
+  // The IR does not really have a type for the index.
+  // A special type for array index should have been
+  // created during initial type traversal. Just
+  // retrieve that type id.
+  ArrayInfo.IndexType = BDebug.getArrayIndexTypeId();
+
+  // Get the number of array elements.
+  // If the array size is 0, set the number of elements as 0.
+  // Otherwise, recursively traverse the base types to
+  // find the element size. The number of elements is
+  // the totoal array size in bits divided by
+  // element size in bits.
+  uint64_t ArraySizeInBits = ATy->getSizeInBits();
+  if (!ArraySizeInBits) {
+    ArrayInfo.Nelems = 0;
+  } else {
+    uint32_t BaseTypeSize = BaseType->getSizeInBits();
+    while (!BaseTypeSize) {
+      const auto *DDTy = cast<DIDerivedType>(BaseType);
+      BaseType = DDTy->getBaseType().resolve();
+      assert(BaseType);
+      BaseTypeSize = BaseType->getSizeInBits();
+    }
+    ArrayInfo.Nelems = ATy->getSizeInBits() / BaseTypeSize;
+  }
+}
+
+void BTFTypeArray::emitType(MCStreamer &OS) {
+  BTFTypeBase::emitType(OS);
+  OS.EmitIntValue(ArrayInfo.ElemType, 4);
+  OS.EmitIntValue(ArrayInfo.IndexType, 4);
+  OS.EmitIntValue(ArrayInfo.Nelems, 4);
+}
+
+/// Represent either a struct or a union.
+BTFTypeStruct::BTFTypeStruct(const DICompositeType *STy, bool IsStruct,
+                             bool HasBitField, uint32_t Vlen)
+    : STy(STy), HasBitField(HasBitField) {
+  Kind = IsStruct ? BTF::BTF_KIND_STRUCT : BTF::BTF_KIND_UNION;
+  BTFType.Size = roundupToBytes(STy->getSizeInBits());
+  BTFType.Info = (HasBitField << 31) | (Kind << 24) | Vlen;
+}
+
+void BTFTypeStruct::completeType(BTFDebug &BDebug) {
+  BTFType.NameOff = BDebug.addString(STy->getName());
+
+  // Add struct/union members.
+  const DINodeArray Elements = STy->getElements();
+  for (const auto *Element : Elements) {
+    struct BTF::BTFMember BTFMember;
+    const auto *DDTy = cast<DIDerivedType>(Element);
+
+    BTFMember.NameOff = BDebug.addString(DDTy->getName());
+    if (HasBitField) {
+      uint8_t BitFieldSize = DDTy->isBitField() ? DDTy->getSizeInBits() : 0;
+      BTFMember.Offset = BitFieldSize << 24 | DDTy->getOffsetInBits();
+    } else {
+      BTFMember.Offset = DDTy->getOffsetInBits();
+    }
+    BTFMember.Type = BDebug.getTypeId(DDTy->getBaseType().resolve());
+    Members.push_back(BTFMember);
+  }
+}
+
+void BTFTypeStruct::emitType(MCStreamer &OS) {
+  BTFTypeBase::emitType(OS);
+  for (const auto &Member : Members) {
+    OS.EmitIntValue(Member.NameOff, 4);
+    OS.EmitIntValue(Member.Type, 4);
+    OS.AddComment("0x" + Twine::utohexstr(Member.Offset));
+    OS.EmitIntValue(Member.Offset, 4);
+  }
+}
+
+/// The Func kind represents both subprogram and pointee of function
+/// pointers. If the FuncName is empty, it represents a pointee of function
+/// pointer. Otherwise, it represents a subprogram. The func arg names
+/// are empty for pointee of function pointer case, and are valid names
+/// for subprogram.
+BTFTypeFuncProto::BTFTypeFuncProto(
+    const DISubroutineType *STy, uint32_t VLen,
+    const std::unordered_map<uint32_t, StringRef> &FuncArgNames)
+    : STy(STy), FuncArgNames(FuncArgNames) {
+  Kind = BTF::BTF_KIND_FUNC_PROTO;
+  BTFType.Info = (Kind << 24) | VLen;
+}
+
+void BTFTypeFuncProto::completeType(BTFDebug &BDebug) {
+  DITypeRefArray Elements = STy->getTypeArray();
+  auto RetType = Elements[0].resolve();
+  BTFType.Type = RetType ? BDebug.getTypeId(RetType) : 0;
+  BTFType.NameOff = 0;
+
+  // For null parameter which is typically the last one
+  // to represent the vararg, encode the NameOff/Type to be 0.
+  for (unsigned I = 1, N = Elements.size(); I < N; ++I) {
+    struct BTF::BTFParam Param;
+    auto Element = Elements[I].resolve();
+    if (Element) {
+      Param.NameOff = BDebug.addString(FuncArgNames[I]);
+      Param.Type = BDebug.getTypeId(Element);
+    } else {
+      Param.NameOff = 0;
+      Param.Type = 0;
+    }
+    Parameters.push_back(Param);
+  }
+}
+
+void BTFTypeFuncProto::emitType(MCStreamer &OS) {
+  BTFTypeBase::emitType(OS);
+  for (const auto &Param : Parameters) {
+    OS.EmitIntValue(Param.NameOff, 4);
+    OS.EmitIntValue(Param.Type, 4);
+  }
+}
+
+BTFTypeFunc::BTFTypeFunc(StringRef FuncName, uint32_t ProtoTypeId)
+    : Name(FuncName) {
+  Kind = BTF::BTF_KIND_FUNC;
+  BTFType.Info = Kind << 24;
+  BTFType.Type = ProtoTypeId;
+}
+
+void BTFTypeFunc::completeType(BTFDebug &BDebug) {
+  BTFType.NameOff = BDebug.addString(Name);
+}
+
+void BTFTypeFunc::emitType(MCStreamer &OS) { BTFTypeBase::emitType(OS); }
+
+uint32_t BTFStringTable::addString(StringRef S) {
+  // Check whether the string already exists.
+  for (auto &OffsetM : OffsetToIdMap) {
+    if (Table[OffsetM.second] == S)
+      return OffsetM.first;
+  }
+  // Not find, add to the string table.
+  uint32_t Offset = Size;
+  OffsetToIdMap[Offset] = Table.size();
+  Table.push_back(S);
+  Size += S.size() + 1;
+  return Offset;
+}
+
+BTFDebug::BTFDebug(AsmPrinter *AP)
+    : DebugHandlerBase(AP), OS(*Asm->OutStreamer), SkipInstruction(false),
+      LineInfoGenerated(false), SecNameOff(0), ArrayIndexTypeId(0) {
+  addString("\0");
+}
+
+void BTFDebug::addType(std::unique_ptr<BTFTypeBase> TypeEntry,
+                       const DIType *Ty) {
+  TypeEntry->setId(TypeEntries.size() + 1);
+  DIToIdMap[Ty] = TypeEntry->getId();
+  TypeEntries.push_back(std::move(TypeEntry));
+}
+
+uint32_t BTFDebug::addType(std::unique_ptr<BTFTypeBase> TypeEntry) {
+  TypeEntry->setId(TypeEntries.size() + 1);
+  uint32_t Id = TypeEntry->getId();
+  TypeEntries.push_back(std::move(TypeEntry));
+  return Id;
+}
+
+void BTFDebug::visitBasicType(const DIBasicType *BTy) {
+  // Only int types are supported in BTF.
+  uint32_t Encoding = BTy->getEncoding();
+  if (Encoding != dwarf::DW_ATE_boolean && Encoding != dwarf::DW_ATE_signed &&
+      Encoding != dwarf::DW_ATE_signed_char &&
+      Encoding != dwarf::DW_ATE_unsigned &&
+      Encoding != dwarf::DW_ATE_unsigned_char)
+    return;
+
+  // Create a BTF type instance for this DIBasicType and put it into
+  // DIToIdMap for cross-type reference check.
+  auto TypeEntry = llvm::make_unique<BTFTypeInt>(
+      Encoding, BTy->getSizeInBits(), BTy->getOffsetInBits(), BTy->getName());
+  addType(std::move(TypeEntry), BTy);
+}
+
+/// Handle subprogram or subroutine types.
+void BTFDebug::visitSubroutineType(
+    const DISubroutineType *STy, bool ForSubprog,
+    const std::unordered_map<uint32_t, StringRef> &FuncArgNames,
+    uint32_t &TypeId) {
+  DITypeRefArray Elements = STy->getTypeArray();
+  uint32_t VLen = Elements.size() - 1;
+  if (VLen > BTF::MAX_VLEN)
+    return;
+
+  // Subprogram has a valid non-zero-length name, and the pointee of
+  // a function pointer has an empty name. The subprogram type will
+  // not be added to DIToIdMap as it should not be referenced by
+  // any other types.
+  auto TypeEntry = llvm::make_unique<BTFTypeFuncProto>(STy, VLen, FuncArgNames);
+  if (ForSubprog)
+    TypeId = addType(std::move(TypeEntry)); // For subprogram
+  else
+    addType(std::move(TypeEntry), STy); // For func ptr
+
+  // Visit return type and func arg types.
+  for (const auto Element : Elements) {
+    visitTypeEntry(Element.resolve());
+  }
+}
+
+/// Handle structure/union types.
+void BTFDebug::visitStructType(const DICompositeType *CTy, bool IsStruct) {
+  const DINodeArray Elements = CTy->getElements();
+  uint32_t VLen = Elements.size();
+  if (VLen > BTF::MAX_VLEN)
+    return;
+
+  // Check whether we have any bitfield members or not
+  bool HasBitField = false;
+  for (const auto *Element : Elements) {
+    auto E = cast<DIDerivedType>(Element);
+    if (E->isBitField()) {
+      HasBitField = true;
+      break;
+    }
+  }
+
+  auto TypeEntry =
+      llvm::make_unique<BTFTypeStruct>(CTy, IsStruct, HasBitField, VLen);
+  addType(std::move(TypeEntry), CTy);
+
+  // Visit all struct members.
+  for (const auto *Element : Elements)
+    visitTypeEntry(cast<DIDerivedType>(Element));
+}
+
+void BTFDebug::visitArrayType(const DICompositeType *CTy) {
+  auto TypeEntry = llvm::make_unique<BTFTypeArray>(CTy);
+  addType(std::move(TypeEntry), CTy);
+
+  // The IR does not have a type for array index while BTF wants one.
+  // So create an array index type if there is none.
+  if (!ArrayIndexTypeId) {
+    auto TypeEntry = llvm::make_unique<BTFTypeInt>(dwarf::DW_ATE_unsigned, 32,
+                                                   0, "__ARRAY_SIZE_TYPE__");
+    ArrayIndexTypeId = addType(std::move(TypeEntry));
+  }
+
+  // Visit array element type.
+  visitTypeEntry(CTy->getBaseType().resolve());
+}
+
+void BTFDebug::visitEnumType(const DICompositeType *CTy) {
+  DINodeArray Elements = CTy->getElements();
+  uint32_t VLen = Elements.size();
+  if (VLen > BTF::MAX_VLEN)
+    return;
+
+  auto TypeEntry = llvm::make_unique<BTFTypeEnum>(CTy, VLen);
+  addType(std::move(TypeEntry), CTy);
+  // No need to visit base type as BTF does not encode it.
+}
+
+/// Handle structure/union forward declarations.
+void BTFDebug::visitFwdDeclType(const DICompositeType *CTy, bool IsUnion) {
+  auto TypeEntry = llvm::make_unique<BTFTypeFwd>(CTy->getName(), IsUnion);
+  addType(std::move(TypeEntry), CTy);
+}
+
+/// Handle structure, union, array and enumeration types.
+void BTFDebug::visitCompositeType(const DICompositeType *CTy) {
+  auto Tag = CTy->getTag();
+  if (Tag == dwarf::DW_TAG_structure_type || Tag == dwarf::DW_TAG_union_type) {
+    // Handle forward declaration differently as it does not have members.
+    if (CTy->isForwardDecl())
+      visitFwdDeclType(CTy, Tag == dwarf::DW_TAG_union_type);
+    else
+      visitStructType(CTy, Tag == dwarf::DW_TAG_structure_type);
+  } else if (Tag == dwarf::DW_TAG_array_type)
+    visitArrayType(CTy);
+  else if (Tag == dwarf::DW_TAG_enumeration_type)
+    visitEnumType(CTy);
+}
+
+/// Handle pointer, typedef, const, volatile, restrict and member types.
+void BTFDebug::visitDerivedType(const DIDerivedType *DTy) {
+  unsigned Tag = DTy->getTag();
+
+  if (Tag == dwarf::DW_TAG_pointer_type || Tag == dwarf::DW_TAG_typedef ||
+      Tag == dwarf::DW_TAG_const_type || Tag == dwarf::DW_TAG_volatile_type ||
+      Tag == dwarf::DW_TAG_restrict_type) {
+    auto TypeEntry = llvm::make_unique<BTFTypeDerived>(DTy, Tag);
+    addType(std::move(TypeEntry), DTy);
+  } else if (Tag != dwarf::DW_TAG_member) {
+    return;
+  }
+
+  // Visit base type of pointer, typedef, const, volatile, restrict or
+  // struct/union member.
+  visitTypeEntry(DTy->getBaseType().resolve());
+}
+
+void BTFDebug::visitTypeEntry(const DIType *Ty) {
+  if (!Ty || DIToIdMap.find(Ty) != DIToIdMap.end())
+    return;
+
+  uint32_t TypeId;
+  if (const auto *BTy = dyn_cast<DIBasicType>(Ty))
+    visitBasicType(BTy);
+  else if (const auto *STy = dyn_cast<DISubroutineType>(Ty))
+    visitSubroutineType(STy, false, std::unordered_map<uint32_t, StringRef>(),
+                        TypeId);
+  else if (const auto *CTy = dyn_cast<DICompositeType>(Ty))
+    visitCompositeType(CTy);
+  else if (const auto *DTy = dyn_cast<DIDerivedType>(Ty))
+    visitDerivedType(DTy);
+  else
+    llvm_unreachable("Unknown DIType");
+}
+
+/// Read file contents from the actual file or from the source
+std::string BTFDebug::populateFileContent(const DISubprogram *SP) {
+  auto File = SP->getFile();
+  std::string FileName;
+
+  if (File->getDirectory().size())
+    FileName = File->getDirectory().str() + "/" + File->getFilename().str();
+  else
+    FileName = File->getFilename();
+
+  // No need to populate the contends if it has been populated!
+  if (FileContent.find(FileName) != FileContent.end())
+    return FileName;
+
+  std::vector<std::string> Content;
+  std::string Line;
+  Content.push_back(Line); // Line 0 for empty string
+
+  auto Source = File->getSource();
+  if (Source) {
+    std::istringstream InputString(Source.getValue());
+    while (std::getline(InputString, Line))
+      Content.push_back(Line);
+  } else {
+    std::ifstream InputFile(FileName);
+    while (std::getline(InputFile, Line))
+      Content.push_back(Line);
+  }
+
+  FileContent[FileName] = Content;
+  return FileName;
+}
+
+void BTFDebug::constructLineInfo(const DISubprogram *SP, MCSymbol *Label,
+                                 uint32_t Line, uint32_t Column) {
+  std::string FileName = populateFileContent(SP);
+  BTFLineInfo LineInfo;
+
+  LineInfo.Label = Label;
+  LineInfo.FileNameOff = addString(FileName);
+  // If file content is not available, let LineOff = 0.
+  if (Line < FileContent[FileName].size())
+    LineInfo.LineOff = addString(FileContent[FileName][Line]);
+  else
+    LineInfo.LineOff = 0;
+  LineInfo.LineNum = Line;
+  LineInfo.ColumnNum = Column;
+  LineInfoTable[SecNameOff].push_back(LineInfo);
+}
+
+void BTFDebug::emitCommonHeader() {
+  OS.AddComment("0x" + Twine::utohexstr(BTF::MAGIC));
+  OS.EmitIntValue(BTF::MAGIC, 2);
+  OS.EmitIntValue(BTF::VERSION, 1);
+  OS.EmitIntValue(0, 1);
+}
+
+void BTFDebug::emitBTFSection() {
+  MCContext &Ctx = OS.getContext();
+  OS.SwitchSection(Ctx.getELFSection(".BTF", ELF::SHT_PROGBITS, 0));
+
+  // Emit header.
+  emitCommonHeader();
+  OS.EmitIntValue(BTF::HeaderSize, 4);
+
+  uint32_t TypeLen = 0, StrLen;
+  for (const auto &TypeEntry : TypeEntries)
+    TypeLen += TypeEntry->getSize();
+  StrLen = StringTable.getSize();
+
+  OS.EmitIntValue(0, 4);
+  OS.EmitIntValue(TypeLen, 4);
+  OS.EmitIntValue(TypeLen, 4);
+  OS.EmitIntValue(StrLen, 4);
+
+  // Emit type table.
+  for (const auto &TypeEntry : TypeEntries)
+    TypeEntry->emitType(OS);
+
+  // Emit string table.
+  uint32_t StringOffset = 0;
+  for (const auto &S : StringTable.getTable()) {
+    OS.AddComment("string offset=" + std::to_string(StringOffset));
+    OS.EmitBytes(S);
+    OS.EmitBytes(StringRef("\0", 1));
+    StringOffset += S.size() + 1;
+  }
+}
+
+void BTFDebug::emitBTFExtSection() {
+  MCContext &Ctx = OS.getContext();
+  OS.SwitchSection(Ctx.getELFSection(".BTF.ext", ELF::SHT_PROGBITS, 0));
+
+  // Emit header.
+  emitCommonHeader();
+  OS.EmitIntValue(BTF::ExtHeaderSize, 4);
+
+  // Account for FuncInfo/LineInfo record size as well.
+  uint32_t FuncLen = 4, LineLen = 4;
+  for (const auto &FuncSec : FuncInfoTable) {
+    FuncLen += BTF::SecFuncInfoSize;
+    FuncLen += FuncSec.second.size() * BTF::BPFFuncInfoSize;
+  }
+  for (const auto &LineSec : LineInfoTable) {
+    LineLen += BTF::SecLineInfoSize;
+    LineLen += LineSec.second.size() * BTF::BPFLineInfoSize;
+  }
+
+  OS.EmitIntValue(0, 4);
+  OS.EmitIntValue(FuncLen, 4);
+  OS.EmitIntValue(FuncLen, 4);
+  OS.EmitIntValue(LineLen, 4);
+
+  // Emit func_info table.
+  OS.AddComment("FuncInfo");
+  OS.EmitIntValue(BTF::BPFFuncInfoSize, 4);
+  for (const auto &FuncSec : FuncInfoTable) {
+    OS.AddComment("FuncInfo section string offset=" +
+                  std::to_string(FuncSec.first));
+    OS.EmitIntValue(FuncSec.first, 4);
+    OS.EmitIntValue(FuncSec.second.size(), 4);
+    for (const auto &FuncInfo : FuncSec.second) {
+      Asm->EmitLabelReference(FuncInfo.Label, 4);
+      OS.EmitIntValue(FuncInfo.TypeId, 4);
+    }
+  }
+
+  // Emit line_info table.
+  OS.AddComment("LineInfo");
+  OS.EmitIntValue(BTF::BPFLineInfoSize, 4);
+  for (const auto &LineSec : LineInfoTable) {
+    OS.AddComment("LineInfo section string offset=" +
+                  std::to_string(LineSec.first));
+    OS.EmitIntValue(LineSec.first, 4);
+    OS.EmitIntValue(LineSec.second.size(), 4);
+    for (const auto &LineInfo : LineSec.second) {
+      Asm->EmitLabelReference(LineInfo.Label, 4);
+      OS.EmitIntValue(LineInfo.FileNameOff, 4);
+      OS.EmitIntValue(LineInfo.LineOff, 4);
+      OS.AddComment("Line " + std::to_string(LineInfo.LineNum) + " Col " +
+                    std::to_string(LineInfo.ColumnNum));
+      OS.EmitIntValue(LineInfo.LineNum << 10 | LineInfo.ColumnNum, 4);
+    }
+  }
+}
+
+void BTFDebug::beginFunctionImpl(const MachineFunction *MF) {
+  auto *SP = MF->getFunction().getSubprogram();
+  auto *Unit = SP->getUnit();
+
+  if (Unit->getEmissionKind() == DICompileUnit::NoDebug) {
+    SkipInstruction = true;
+    return;
+  }
+  SkipInstruction = false;
+
+  // Collect all types locally referenced in this function.
+  // Use RetainedNodes so we can collect all argument names
+  // even if the argument is not used.
+  std::unordered_map<uint32_t, StringRef> FuncArgNames;
+  for (const DINode *DN : SP->getRetainedNodes()) {
+    if (const auto *DV = dyn_cast<DILocalVariable>(DN)) {
+      visitTypeEntry(DV->getType().resolve());
+
+      // Collect function arguments for subprogram func type.
+      uint32_t Arg = DV->getArg();
+      if (Arg)
+        FuncArgNames[Arg] = DV->getName();
+    }
+  }
+
+  // Construct subprogram func proto type.
+  uint32_t ProtoTypeId;
+  visitSubroutineType(SP->getType(), true, FuncArgNames, ProtoTypeId);
+
+  // Construct subprogram func type
+  auto FuncTypeEntry =
+      llvm::make_unique<BTFTypeFunc>(SP->getName(), ProtoTypeId);
+  uint32_t FuncTypeId = addType(std::move(FuncTypeEntry));
+
+  // Construct funcinfo and the first lineinfo for the function.
+  MCSymbol *FuncLabel = Asm->getFunctionBegin();
+  BTFFuncInfo FuncInfo;
+  FuncInfo.Label = FuncLabel;
+  FuncInfo.TypeId = FuncTypeId;
+  if (FuncLabel->isInSection()) {
+    MCSection &Section = FuncLabel->getSection();
+    const MCSectionELF *SectionELF = dyn_cast<MCSectionELF>(&Section);
+    assert(SectionELF && "Null section for Function Label");
+    SecNameOff = addString(SectionELF->getSectionName());
+  } else {
+    SecNameOff = addString(".text");
+  }
+  FuncInfoTable[SecNameOff].push_back(FuncInfo);
+}
+
+void BTFDebug::endFunctionImpl(const MachineFunction *MF) {
+  SkipInstruction = false;
+  LineInfoGenerated = false;
+  SecNameOff = 0;
+}
+
+void BTFDebug::beginInstruction(const MachineInstr *MI) {
+  DebugHandlerBase::beginInstruction(MI);
+
+  if (SkipInstruction || MI->isMetaInstruction() ||
+      MI->getFlag(MachineInstr::FrameSetup))
+    return;
+
+  if (MI->isInlineAsm()) {
+    // Count the number of register definitions to find the asm string.
+    unsigned NumDefs = 0;
+    for (; MI->getOperand(NumDefs).isReg() && MI->getOperand(NumDefs).isDef();
+         ++NumDefs)
+      ;
+
+    // Skip this inline asm instruction if the asmstr is empty.
+    const char *AsmStr = MI->getOperand(NumDefs).getSymbolName();
+    if (AsmStr[0] == 0)
+      return;
+  }
+
+  // Skip this instruction if no DebugLoc or the DebugLoc
+  // is the same as the previous instruction.
+  const DebugLoc &DL = MI->getDebugLoc();
+  if (!DL || PrevInstLoc == DL) {
+    // This instruction will be skipped, no LineInfo has
+    // been generated, construct one based on function signature.
+    if (LineInfoGenerated == false) {
+      auto *S = MI->getMF()->getFunction().getSubprogram();
+      MCSymbol *FuncLabel = Asm->getFunctionBegin();
+      constructLineInfo(S, FuncLabel, S->getLine(), 0);
+      LineInfoGenerated = true;
+    }
+
+    return;
+  }
+
+  // Create a temporary label to remember the insn for lineinfo.
+  MCSymbol *LineSym = OS.getContext().createTempSymbol();
+  OS.EmitLabel(LineSym);
+
+  // Construct the lineinfo.
+  auto SP = DL.get()->getScope()->getSubprogram();
+  constructLineInfo(SP, LineSym, DL.getLine(), DL.getCol());
+
+  LineInfoGenerated = true;
+  PrevInstLoc = DL;
+}
+
+void BTFDebug::endModule() {
+  // Collect all types referenced by globals.
+  const Module *M = MMI->getModule();
+  for (const DICompileUnit *CUNode : M->debug_compile_units()) {
+    for (const auto *GVE : CUNode->getGlobalVariables()) {
+      DIGlobalVariable *GV = GVE->getVariable();
+      visitTypeEntry(GV->getType().resolve());
+    }
+  }
+
+  // Complete BTF type cross refereences.
+  for (const auto &TypeEntry : TypeEntries)
+    TypeEntry->completeType(*this);
+
+  // Emit BTF sections.
+  emitBTFSection();
+  emitBTFExtSection();
+}
diff --git a/lib/Target/BPF/BTFDebug.h b/lib/Target/BPF/BTFDebug.h
new file mode 100644
index 0000000..afd4ed8
--- /dev/null
+++ b/lib/Target/BPF/BTFDebug.h
@@ -0,0 +1,285 @@
+//===- BTFDebug.h -----------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains support for writing BTF debug info.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_BPF_BTFDEBUG_H
+#define LLVM_LIB_TARGET_BPF_BTFDEBUG_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/CodeGen/DebugHandlerBase.h"
+#include <unordered_map>
+#include "BTF.h"
+
+namespace llvm {
+
+class AsmPrinter;
+class BTFDebug;
+class DIType;
+class MCStreamer;
+class MCSymbol;
+class MachineFunction;
+
+/// The base class for BTF type generation.
+class BTFTypeBase {
+protected:
+  uint8_t Kind;
+  uint32_t Id;
+  struct BTF::CommonType BTFType;
+
+public:
+  virtual ~BTFTypeBase() = default;
+  void setId(uint32_t Id) { this->Id = Id; }
+  uint32_t getId() { return Id; }
+  uint32_t roundupToBytes(uint32_t NumBits) { return (NumBits + 7) >> 3; }
+  /// Get the size of this BTF type entry.
+  virtual uint32_t getSize() { return BTF::CommonTypeSize; }
+  /// Complete BTF type generation after all related DebugInfo types
+  /// have been visited so their BTF type id's are available
+  /// for cross referece.
+  virtual void completeType(BTFDebug &BDebug) {}
+  /// Emit types for this BTF type entry.
+  virtual void emitType(MCStreamer &OS);
+};
+
+/// Handle several derived types include pointer, const,
+/// volatile, typedef and restrict.
+class BTFTypeDerived : public BTFTypeBase {
+  const DIDerivedType *DTy;
+
+public:
+  BTFTypeDerived(const DIDerivedType *Ty, unsigned Tag);
+  void completeType(BTFDebug &BDebug);
+  void emitType(MCStreamer &OS);
+};
+
+/// Handle struct or union forward declaration.
+class BTFTypeFwd : public BTFTypeBase {
+  StringRef Name;
+
+public:
+  BTFTypeFwd(StringRef Name, bool IsUnion);
+  void completeType(BTFDebug &BDebug);
+  void emitType(MCStreamer &OS);
+};
+
+/// Handle int type.
+class BTFTypeInt : public BTFTypeBase {
+  StringRef Name;
+  uint32_t IntVal; ///< Encoding, offset, bits
+
+public:
+  BTFTypeInt(uint32_t Encoding, uint32_t SizeInBits, uint32_t OffsetInBits,
+             StringRef TypeName);
+  uint32_t getSize() { return BTFTypeBase::getSize() + sizeof(uint32_t); }
+  void completeType(BTFDebug &BDebug);
+  void emitType(MCStreamer &OS);
+};
+
+/// Handle enumerate type.
+class BTFTypeEnum : public BTFTypeBase {
+  const DICompositeType *ETy;
+  std::vector<struct BTF::BTFEnum> EnumValues;
+
+public:
+  BTFTypeEnum(const DICompositeType *ETy, uint32_t NumValues);
+  uint32_t getSize() {
+    return BTFTypeBase::getSize() + EnumValues.size() * BTF::BTFEnumSize;
+  }
+  void completeType(BTFDebug &BDebug);
+  void emitType(MCStreamer &OS);
+};
+
+/// Handle array type.
+class BTFTypeArray : public BTFTypeBase {
+  const DICompositeType *ATy;
+  struct BTF::BTFArray ArrayInfo;
+
+public:
+  BTFTypeArray(const DICompositeType *ATy);
+  uint32_t getSize() { return BTFTypeBase::getSize() + BTF::BTFArraySize; }
+  void completeType(BTFDebug &BDebug);
+  void emitType(MCStreamer &OS);
+};
+
+/// Handle struct/union type.
+class BTFTypeStruct : public BTFTypeBase {
+  const DICompositeType *STy;
+  bool HasBitField;
+  std::vector<struct BTF::BTFMember> Members;
+
+public:
+  BTFTypeStruct(const DICompositeType *STy, bool IsStruct, bool HasBitField,
+                uint32_t NumMembers);
+  uint32_t getSize() {
+    return BTFTypeBase::getSize() + Members.size() * BTF::BTFMemberSize;
+  }
+  void completeType(BTFDebug &BDebug);
+  void emitType(MCStreamer &OS);
+};
+
+/// Handle function pointer.
+class BTFTypeFuncProto : public BTFTypeBase {
+  const DISubroutineType *STy;
+  std::unordered_map<uint32_t, StringRef> FuncArgNames;
+  std::vector<struct BTF::BTFParam> Parameters;
+
+public:
+  BTFTypeFuncProto(const DISubroutineType *STy, uint32_t NumParams,
+                   const std::unordered_map<uint32_t, StringRef> &FuncArgNames);
+  uint32_t getSize() {
+    return BTFTypeBase::getSize() + Parameters.size() * BTF::BTFParamSize;
+  }
+  void completeType(BTFDebug &BDebug);
+  void emitType(MCStreamer &OS);
+};
+
+/// Handle subprogram
+class BTFTypeFunc : public BTFTypeBase {
+  StringRef Name;
+
+public:
+  BTFTypeFunc(StringRef FuncName, uint32_t ProtoTypeId);
+  uint32_t getSize() { return BTFTypeBase::getSize(); }
+  void completeType(BTFDebug &BDebug);
+  void emitType(MCStreamer &OS);
+};
+
+/// String table.
+class BTFStringTable {
+  /// String table size in bytes.
+  uint32_t Size;
+  /// A mapping from string table offset to the index
+  /// of the Table. It is used to avoid putting
+  /// duplicated strings in the table.
+  std::unordered_map<uint32_t, uint32_t> OffsetToIdMap;
+  /// A vector of strings to represent the string table.
+  std::vector<std::string> Table;
+
+public:
+  BTFStringTable() : Size(0) {}
+  uint32_t getSize() { return Size; }
+  std::vector<std::string> &getTable() { return Table; }
+  /// Add a string to the string table and returns its offset
+  /// in the table.
+  uint32_t addString(StringRef S);
+};
+
+/// Represent one func and its type id.
+struct BTFFuncInfo {
+  const MCSymbol *Label; ///< Func MCSymbol
+  uint32_t TypeId;       ///< Type id referring to .BTF type section
+};
+
+/// Represent one line info.
+struct BTFLineInfo {
+  MCSymbol *Label;      ///< MCSymbol identifying insn for the lineinfo
+  uint32_t FileNameOff; ///< file name offset in the .BTF string table
+  uint32_t LineOff;     ///< line offset in the .BTF string table
+  uint32_t LineNum;     ///< the line number
+  uint32_t ColumnNum;   ///< the column number
+};
+
+/// Collect and emit BTF information.
+class BTFDebug : public DebugHandlerBase {
+  MCStreamer &OS;
+  bool SkipInstruction;
+  bool LineInfoGenerated;
+  uint32_t SecNameOff;
+  uint32_t ArrayIndexTypeId;
+  BTFStringTable StringTable;
+  std::vector<std::unique_ptr<BTFTypeBase>> TypeEntries;
+  std::unordered_map<const DIType *, uint32_t> DIToIdMap;
+  std::unordered_map<uint32_t, std::vector<BTFFuncInfo>> FuncInfoTable;
+  std::unordered_map<uint32_t, std::vector<BTFLineInfo>> LineInfoTable;
+  StringMap<std::vector<std::string>> FileContent;
+
+  /// Add types to TypeEntries.
+  /// @{
+  /// Add types to TypeEntries and DIToIdMap.
+  void addType(std::unique_ptr<BTFTypeBase> TypeEntry, const DIType *Ty);
+  /// Add types to TypeEntries only and return type id.
+  uint32_t addType(std::unique_ptr<BTFTypeBase> TypeEntry);
+  /// @}
+
+  /// IR type visiting functions.
+  /// @{
+  void visitTypeEntry(const DIType *Ty);
+  void visitBasicType(const DIBasicType *BTy);
+  void visitSubroutineType(
+      const DISubroutineType *STy, bool ForSubprog,
+      const std::unordered_map<uint32_t, StringRef> &FuncArgNames,
+      uint32_t &TypeId);
+  void visitFwdDeclType(const DICompositeType *CTy, bool IsUnion);
+  void visitCompositeType(const DICompositeType *CTy);
+  void visitStructType(const DICompositeType *STy, bool IsStruct);
+  void visitArrayType(const DICompositeType *ATy);
+  void visitEnumType(const DICompositeType *ETy);
+  void visitDerivedType(const DIDerivedType *DTy);
+  /// @}
+
+  /// Get the file content for the subprogram. Certain lines of the file
+  /// later may be put into string table and referenced by line info.
+  std::string populateFileContent(const DISubprogram *SP);
+
+  /// Construct a line info.
+  void constructLineInfo(const DISubprogram *SP, MCSymbol *Label, uint32_t Line,
+                         uint32_t Column);
+
+  /// Emit common header of .BTF and .BTF.ext sections.
+  void emitCommonHeader();
+
+  /// Emit the .BTF section.
+  void emitBTFSection();
+
+  /// Emit the .BTF.ext section.
+  void emitBTFExtSection();
+
+protected:
+  /// Gather pre-function debug information.
+  void beginFunctionImpl(const MachineFunction *MF) override;
+
+  /// Post process after all instructions in this function are processed.
+  void endFunctionImpl(const MachineFunction *MF) override;
+
+public:
+  BTFDebug(AsmPrinter *AP);
+
+  /// Get the special array index type id.
+  uint32_t getArrayIndexTypeId() {
+    assert(ArrayIndexTypeId);
+    return ArrayIndexTypeId;
+  }
+
+  /// Add string to the string table.
+  size_t addString(StringRef S) { return StringTable.addString(S); }
+
+  /// Get the type id for a particular DIType.
+  uint32_t getTypeId(const DIType *Ty) {
+    assert(Ty && "Invalid null Type");
+    assert(DIToIdMap.find(Ty) != DIToIdMap.end() &&
+           "DIType not added in the BDIToIdMap");
+    return DIToIdMap[Ty];
+  }
+
+  void setSymbolSize(const MCSymbol *Symbol, uint64_t Size) override {}
+
+  /// Process beginning of an instruction.
+  void beginInstruction(const MachineInstr *MI) override;
+
+  /// Complete all the types and emit the BTF sections.
+  void endModule() override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/BPF/CMakeLists.txt b/lib/Target/BPF/CMakeLists.txt
index 5e2ae53..c18adf8 100644
--- a/lib/Target/BPF/CMakeLists.txt
+++ b/lib/Target/BPF/CMakeLists.txt
@@ -25,6 +25,7 @@
   BPFTargetMachine.cpp
   BPFMIPeephole.cpp
   BPFMIChecking.cpp
+  BTFDebug.cpp
   )
 
 add_subdirectory(AsmParser)
diff --git a/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp b/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp
index 134e890..32e79d0 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp
+++ b/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp
@@ -12,6 +12,7 @@
 #include "llvm/MC/MCELFObjectWriter.h"
 #include "llvm/MC/MCFixup.h"
 #include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCValue.h"
 #include "llvm/Support/ErrorHandling.h"
 #include <cstdint>
 
@@ -50,6 +51,23 @@
   case FK_Data_8:
     return ELF::R_BPF_64_64;
   case FK_Data_4:
+    // .BTF.ext generates FK_Data_4 relocations for
+    // insn offset by creating temporary labels.
+    // The insn offset is within the code section and
+    // already been fulfilled by applyFixup(). No
+    // further relocation is needed.
+    if (const MCSymbolRefExpr *A = Target.getSymA()) {
+      if (A->getSymbol().isTemporary()) {
+        MCSection &Section = A->getSymbol().getSection();
+        const MCSectionELF *SectionELF = dyn_cast<MCSectionELF>(&Section);
+        assert(SectionELF && "Null section for reloc symbol");
+
+        // The reloc symbol should be in text section.
+        unsigned Flags = SectionELF->getFlags();
+        if ((Flags & ELF::SHF_ALLOC) && (Flags & ELF::SHF_EXECINSTR))
+          return ELF::R_BPF_NONE;
+      }
+    }
     return ELF::R_BPF_64_32;
   }
 }
diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp
index b5be507..1edf3e4 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -1359,6 +1359,11 @@
   setOperationAction(ISD::BSWAP, MVT::i32, Legal);
   setOperationAction(ISD::BSWAP, MVT::i64, Legal);
 
+  setOperationAction(ISD::FSHL, MVT::i32, Legal);
+  setOperationAction(ISD::FSHL, MVT::i64, Legal);
+  setOperationAction(ISD::FSHR, MVT::i32, Legal);
+  setOperationAction(ISD::FSHR, MVT::i64, Legal);
+
   for (unsigned IntExpOp :
        {ISD::SDIV,      ISD::UDIV,      ISD::SREM,      ISD::UREM,
         ISD::SDIVREM,   ISD::UDIVREM,   ISD::ROTL,      ISD::ROTR,
@@ -1538,8 +1543,10 @@
   // Subtarget-specific operation actions.
   //
   if (Subtarget.hasV60Ops()) {
-    setOperationAction(ISD::ROTL, MVT::i32, Custom);
-    setOperationAction(ISD::ROTL, MVT::i64, Custom);
+    setOperationAction(ISD::ROTL, MVT::i32, Legal);
+    setOperationAction(ISD::ROTL, MVT::i64, Legal);
+    setOperationAction(ISD::ROTR, MVT::i32, Legal);
+    setOperationAction(ISD::ROTR, MVT::i64, Legal);
   }
   if (Subtarget.hasV66Ops()) {
     setOperationAction(ISD::FADD, MVT::f64, Legal);
@@ -1768,11 +1775,8 @@
     // The intrinsic function call is of the form { ElTy, i8* }
     // @llvm.hexagon.L2.loadXX.pbr(i8*, i32). The pointer and memory access type
     // should be derived from ElTy.
-    PointerType *PtrTy = I.getCalledFunction()
-                             ->getReturnType()
-                             ->getContainedType(0)
-                             ->getPointerTo();
-    Info.memVT = MVT::getVT(PtrTy->getElementType());
+    Type *ElTy = I.getCalledFunction()->getReturnType()->getStructElementType(0);
+    Info.memVT = MVT::getVT(ElTy);
     llvm::Value *BasePtrVal = I.getOperand(0);
     Info.ptrVal = getUnderLyingObjectForBrevLdIntr(BasePtrVal);
     // The offset value comes through Modifier register. For now, assume the
diff --git a/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
index 702d68f..985f41f 100644
--- a/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
+++ b/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
@@ -1975,7 +1975,8 @@
   // If the loop iterates a fixed number of times, we can refine the access
   // size to be exactly the size of the memset, which is (BECount+1)*StoreSize
   if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
-    AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize;
+    AccessSize = LocationSize::precise((BECst->getValue()->getZExtValue() + 1) *
+                                       StoreSize);
 
   // TODO: For this to be really effective, we have to dive into the pointer
   // operand in the store.  Store to &A[i] of 100 will always return may alias
diff --git a/lib/Target/Hexagon/HexagonPatterns.td b/lib/Target/Hexagon/HexagonPatterns.td
index 0a7f578..8917756 100644
--- a/lib/Target/Hexagon/HexagonPatterns.td
+++ b/lib/Target/Hexagon/HexagonPatterns.td
@@ -177,6 +177,11 @@
   return CurDAG->getTargetConstant(V-32, SDLoc(N), MVT::i32);
 }]>;
 
+class Subi<int From>: SDNodeXForm<imm,
+  "int32_t V = " # From # " - N->getSExtValue();" #
+  "return CurDAG->getTargetConstant(V, SDLoc(N), MVT::i32);"
+>;
+
 def Log2_32: SDNodeXForm<imm, [{
   uint32_t V = N->getZExtValue();
   return CurDAG->getTargetConstant(Log2_32(V), SDLoc(N), MVT::i32);
@@ -995,10 +1000,90 @@
 def: OpR_RR_pat<S2_lsr_r_p, Srl, i64, I64, I32>;
 def: OpR_RR_pat<S2_asl_r_p, Shl, i64, I64, I32>;
 
-let Predicates = [HasV60] in {
+// Funnel shifts.
+def IsMul8_U3: PatLeaf<(i32 imm), [{
+  uint64_t V = N->getZExtValue();
+  return V % 8 == 0 && isUInt<3>(V / 8);
+}]>;
+
+def Divu8: SDNodeXForm<imm, [{
+  return CurDAG->getTargetConstant(N->getZExtValue() / 8, SDLoc(N), MVT::i32);
+}]>;
+
+// Funnel shift-left.
+def FShl32i: OutPatFrag<(ops node:$Rs, node:$Rt, node:$S),
+  (HiReg (S2_asl_i_p (Combinew $Rs, $Rt), $S))>;
+def FShl32r: OutPatFrag<(ops node:$Rs, node:$Rt, node:$Ru),
+  (HiReg (S2_asl_r_p (Combinew $Rs, $Rt), $Ru))>;
+
+def FShl64i: OutPatFrag<(ops node:$Rs, node:$Rt, node:$S),
+  (S2_lsr_i_p_or (S2_asl_i_p $Rt, $S),  $Rs, (Subi<64> $S))>;
+def FShl64r: OutPatFrag<(ops node:$Rs, node:$Rt, node:$Ru),
+  (S2_lsr_r_p_or (S2_asl_r_p $Rt, $Ru), $Rs, (A2_subri 64, $Ru))>;
+
+// Combined SDNodeXForm: (Divu8 (Subi<64> $S))
+def Divu64_8: SDNodeXForm<imm, [{
+  return CurDAG->getTargetConstant((64 - N->getSExtValue()) / 8,
+                                   SDLoc(N), MVT::i32);
+}]>;
+
+// Special cases:
+let AddedComplexity = 100 in {
+  def: Pat<(fshl I32:$Rs, I32:$Rt, (i32 16)),
+           (A2_combine_hl I32:$Rs, I32:$Rt)>;
+  def: Pat<(fshl I64:$Rs, I64:$Rt, IsMul8_U3:$S),
+           (S2_valignib I64:$Rs, I64:$Rt, (Divu64_8 $S))>;
+}
+
+let Predicates = [HasV60], AddedComplexity = 50 in {
   def: OpR_RI_pat<S6_rol_i_r, Rol, i32, I32, u5_0ImmPred>;
   def: OpR_RI_pat<S6_rol_i_p, Rol, i64, I64, u6_0ImmPred>;
 }
+let AddedComplexity = 30 in {
+  def: Pat<(rotl I32:$Rs, u5_0ImmPred:$S),          (FShl32i $Rs, $Rs, imm:$S)>;
+  def: Pat<(rotl I64:$Rs, u6_0ImmPred:$S),          (FShl64i $Rs, $Rs, imm:$S)>;
+  def: Pat<(fshl I32:$Rs, I32:$Rt, u5_0ImmPred:$S), (FShl32i $Rs, $Rt, imm:$S)>;
+  def: Pat<(fshl I64:$Rs, I64:$Rt, u6_0ImmPred:$S), (FShl64i $Rs, $Rt, imm:$S)>;
+}
+def: Pat<(rotl I32:$Rs, I32:$Rt),           (FShl32r $Rs, $Rs, $Rt)>;
+def: Pat<(rotl I64:$Rs, I32:$Rt),           (FShl64r $Rs, $Rs, $Rt)>;
+def: Pat<(fshl I32:$Rs, I32:$Rt, I32:$Ru),  (FShl32r $Rs, $Rt, $Ru)>;
+def: Pat<(fshl I64:$Rs, I64:$Rt, I32:$Ru),  (FShl64r $Rs, $Rt, $Ru)>;
+
+// Funnel shift-right.
+def FShr32i: OutPatFrag<(ops node:$Rs, node:$Rt, node:$S),
+  (LoReg (S2_lsr_i_p (Combinew $Rs, $Rt), $S))>;
+def FShr32r: OutPatFrag<(ops node:$Rs, node:$Rt, node:$Ru),
+  (LoReg (S2_lsr_r_p (Combinew $Rs, $Rt), $Ru))>;
+
+def FShr64i: OutPatFrag<(ops node:$Rs, node:$Rt, node:$S),
+  (S2_asl_i_p_or (S2_lsr_i_p $Rt, $S),  $Rs, (Subi<64> $S))>;
+def FShr64r: OutPatFrag<(ops node:$Rs, node:$Rt, node:$Ru),
+  (S2_asl_r_p_or (S2_lsr_r_p $Rt, $Ru), $Rs, (A2_subri 64, $Ru))>;
+
+// Special cases:
+let AddedComplexity = 100 in {
+  def: Pat<(fshr I32:$Rs, I32:$Rt, (i32 16)),
+           (A2_combine_hl I32:$Rs, I32:$Rt)>;
+  def: Pat<(fshr I64:$Rs, I64:$Rt, IsMul8_U3:$S),
+           (S2_valignib I64:$Rs, I64:$Rt, (Divu8 $S))>;
+}
+
+let Predicates = [HasV60], AddedComplexity = 50 in {
+  def: Pat<(rotr I32:$Rs, u5_0ImmPred:$S), (S6_rol_i_r I32:$Rs, (Subi<32> $S))>;
+  def: Pat<(rotr I64:$Rs, u6_0ImmPred:$S), (S6_rol_i_p I64:$Rs, (Subi<64> $S))>;
+}
+let AddedComplexity = 30 in {
+  def: Pat<(rotr I32:$Rs, u5_0ImmPred:$S),          (FShr32i $Rs, $Rs, imm:$S)>;
+  def: Pat<(rotr I64:$Rs, u6_0ImmPred:$S),          (FShr64i $Rs, $Rs, imm:$S)>;
+  def: Pat<(fshr I32:$Rs, I32:$Rt, u5_0ImmPred:$S), (FShr32i $Rs, $Rt, imm:$S)>;
+  def: Pat<(fshr I64:$Rs, I64:$Rt, u6_0ImmPred:$S), (FShr64i $Rs, $Rt, imm:$S)>;
+}
+def: Pat<(rotr I32:$Rs, I32:$Rt),           (FShr32r $Rs, $Rs, $Rt)>;
+def: Pat<(rotr I64:$Rs, I32:$Rt),           (FShr64r $Rs, $Rs, $Rt)>;
+def: Pat<(fshr I32:$Rs, I32:$Rt, I32:$Ru),  (FShr32r $Rs, $Rt, $Ru)>;
+def: Pat<(fshr I64:$Rs, I64:$Rt, I32:$Ru),  (FShr64r $Rs, $Rt, $Ru)>;
+
 
 def: Pat<(sra (add (sra I32:$Rs, u5_0ImmPred:$u5), 1), (i32 1)),
          (S2_asr_i_r_rnd I32:$Rs, imm:$u5)>;
@@ -1170,6 +1255,19 @@
 def: Pat<(shl V4I16:$b, (v4i16 (HexagonVSPLAT u4_0ImmPred:$c))),
          (S2_asl_i_vh V4I16:$b, imm:$c)>;
 
+def: Pat<(HexagonVASR V2I16:$Rs, u4_0ImmPred:$S),
+         (LoReg (S2_asr_i_vh (ToAext64 $Rs), imm:$S))>;
+def: Pat<(HexagonVASL V2I16:$Rs, u4_0ImmPred:$S),
+         (LoReg (S2_asl_i_vh (ToAext64 $Rs), imm:$S))>;
+def: Pat<(HexagonVLSR V2I16:$Rs, u4_0ImmPred:$S),
+         (LoReg (S2_lsr_i_vh (ToAext64 $Rs), imm:$S))>;
+def: Pat<(HexagonVASR V2I16:$Rs, I32:$Rt),
+         (LoReg (S2_asr_i_vh (ToAext64 $Rs), I32:$Rt))>;
+def: Pat<(HexagonVASL V2I16:$Rs, I32:$Rt),
+         (LoReg (S2_asl_i_vh (ToAext64 $Rs), I32:$Rt))>;
+def: Pat<(HexagonVLSR V2I16:$Rs, I32:$Rt),
+         (LoReg (S2_lsr_i_vh (ToAext64 $Rs), I32:$Rt))>;
+
 
 // --(9) Arithmetic/bitwise ----------------------------------------------
 //
diff --git a/lib/Target/LLVMBuild.txt b/lib/Target/LLVMBuild.txt
index 0d899a9..0ed7e9f 100644
--- a/lib/Target/LLVMBuild.txt
+++ b/lib/Target/LLVMBuild.txt
@@ -30,7 +30,6 @@
  MSP430
  NVPTX
  Mips
- Nios2
  PowerPC
  RISCV
  Sparc
diff --git a/lib/Target/Lanai/LanaiISelLowering.cpp b/lib/Target/Lanai/LanaiISelLowering.cpp
index 045a897..0411704 100644
--- a/lib/Target/Lanai/LanaiISelLowering.cpp
+++ b/lib/Target/Lanai/LanaiISelLowering.cpp
@@ -1498,8 +1498,8 @@
     break;
   case LanaiISD::SELECT_CC:
     KnownBits Known2;
-    DAG.computeKnownBits(Op->getOperand(0), Known, Depth + 1);
-    DAG.computeKnownBits(Op->getOperand(1), Known2, Depth + 1);
+    Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
+    Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
     Known.Zero &= Known2.Zero;
     Known.One &= Known2.One;
     break;
diff --git a/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp b/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
index 3cc6da2..1ad70ac 100644
--- a/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
+++ b/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
@@ -497,7 +497,11 @@
         getLexer().Lex(); // Eat '+'
         return false;
       }
-      Operands.push_back(MSP430Operand::CreateIndReg(RegNo, StartLoc, EndLoc));
+      if (Operands.size() > 1) // Emulate @rd in destination position as 0(rd)
+        Operands.push_back(MSP430Operand::CreateMem(RegNo,
+            MCConstantExpr::create(0, getContext()), StartLoc, EndLoc));
+      else
+        Operands.push_back(MSP430Operand::CreateIndReg(RegNo, StartLoc, EndLoc));
       return false;
     }
     case AsmToken::Hash:
diff --git a/lib/Target/MSP430/Disassembler/MSP430Disassembler.cpp b/lib/Target/MSP430/Disassembler/MSP430Disassembler.cpp
index 2a66b4e..e5da130 100644
--- a/lib/Target/MSP430/Disassembler/MSP430Disassembler.cpp
+++ b/lib/Target/MSP430/Disassembler/MSP430Disassembler.cpp
@@ -249,6 +249,10 @@
   case amSymbolic:
   case amImmediate:
   case amAbsolute:
+    if (Bytes.size() < (Words + 1) * 2) {
+      Size = 2;
+      return DecodeStatus::Fail;
+    }
     Insn |= (uint64_t)support::endian::read16le(Bytes.data() + 2) << 16;
     ++Words;
     break;
@@ -259,6 +263,10 @@
   case amIndexed:
   case amSymbolic:
   case amAbsolute:
+    if (Bytes.size() < (Words + 1) * 2) {
+      Size = 2;
+      return DecodeStatus::Fail;
+    }
     Insn |= (uint64_t)support::endian::read16le(Bytes.data() + Words * 2)
         << (Words * 16);
     ++Words;
@@ -296,6 +304,10 @@
   case amSymbolic:
   case amImmediate:
   case amAbsolute:
+    if (Bytes.size() < (Words + 1) * 2) {
+      Size = 2;
+      return DecodeStatus::Fail;
+    }
     Insn |= (uint64_t)support::endian::read16le(Bytes.data() + 2) << 16;
     ++Words;
     break;
diff --git a/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp b/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
index 82e6731..36e9a9c 100644
--- a/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
+++ b/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
@@ -20,6 +20,7 @@
   CodePointerSize = CalleeSaveStackSlotSize = 2;
 
   CommentString = ";";
+  SeparatorString = "{";
 
   AlignmentIsInBytes = false;
   UsesELFSectionDirectiveForBSS = true;
diff --git a/lib/Target/MSP430/MCTargetDesc/MSP430MCCodeEmitter.cpp b/lib/Target/MSP430/MCTargetDesc/MSP430MCCodeEmitter.cpp
index adf2384..06f9f30 100644
--- a/lib/Target/MSP430/MCTargetDesc/MSP430MCCodeEmitter.cpp
+++ b/lib/Target/MSP430/MCTargetDesc/MSP430MCCodeEmitter.cpp
@@ -128,7 +128,7 @@
   const MCOperand &MO2 = MI.getOperand(Op + 1);
   if (MO2.isImm()) {
     Offset += 2;
-    return (MO2.getImm() << 4) | Reg;
+    return ((unsigned)MO2.getImm() << 4) | Reg;
   }
 
   assert(MO2.isExpr() && "Expr operand expected");
diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp
index 73c8793..3e70613 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -95,6 +95,8 @@
   setOperationAction(ISD::SIGN_EXTEND,      MVT::i16,   Custom);
   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i8, Expand);
   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i16, Expand);
+  setOperationAction(ISD::STACKSAVE,        MVT::Other, Expand);
+  setOperationAction(ISD::STACKRESTORE,     MVT::Other, Expand);
 
   setOperationAction(ISD::CTTZ,             MVT::i8,    Expand);
   setOperationAction(ISD::CTTZ,             MVT::i16,   Expand);
@@ -952,15 +954,26 @@
   // Expand the stuff into sequence of shifts.
   SDValue Victim = N->getOperand(0);
 
-  if ((Opc == ISD::SRA || Opc == ISD::SRL) && ShiftAmount >= 8) {
-    // foo >> (8 + N) => sxt(swpb(foo)) >> N
+  if (ShiftAmount >= 8) {
     assert(VT == MVT::i16 && "Can not shift i8 by 8 and more");
-    Victim = DAG.getNode(ISD::BSWAP, dl, VT, Victim);
-    if (Opc == ISD::SRA)
-      Victim = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Victim,
-                           DAG.getValueType(MVT::i8));
-    else
+    switch(Opc) {
+    default:
+      llvm_unreachable("Unknown shift");
+    case ISD::SHL:
+      // foo << (8 + N) => swpb(zext(foo)) << N
       Victim = DAG.getZeroExtendInReg(Victim, dl, MVT::i8);
+      Victim = DAG.getNode(ISD::BSWAP, dl, VT, Victim);
+      break;
+    case ISD::SRA:
+    case ISD::SRL:
+      // foo >> (8 + N) => sxt(swpb(foo)) >> N
+      Victim = DAG.getNode(ISD::BSWAP, dl, VT, Victim);
+      Victim = (Opc == ISD::SRA)
+                   ? DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Victim,
+                                 DAG.getValueType(MVT::i8))
+                   : DAG.getZeroExtendInReg(Victim, dl, MVT::i8);
+      break;
+    }
     ShiftAmount -= 8;
   }
 
diff --git a/lib/Target/MSP430/MSP430InstrInfo.td b/lib/Target/MSP430/MSP430InstrInfo.td
index 3ed1737..25c81d9 100644
--- a/lib/Target/MSP430/MSP430InstrInfo.td
+++ b/lib/Target/MSP430/MSP430InstrInfo.td
@@ -226,7 +226,6 @@
 //  Control Flow Instructions...
 //
 
-// FIXME: Provide proper encoding!
 let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
   def RET  : IForm16<0b0100, DstReg, SrcPostInc, 2,
                      (outs), (ins), "ret",  [(MSP430retflag)]> {
@@ -292,6 +291,8 @@
   def CALLm     : II16m<0b101,
                         (outs), (ins memsrc:$src),
                         "call\t$src", [(MSP430call (load addr:$src))]>;
+  def CALLn     : II16n<0b101, (outs), (ins indreg:$rs), "call\t$rs", []>;
+  def CALLp     : II16p<0b101, (outs), (ins postreg:$rs), "call\t$rs", []>;
 }
 
 //===----------------------------------------------------------------------===//
@@ -315,7 +316,6 @@
 //===----------------------------------------------------------------------===//
 // Move Instructions
 
-// FIXME: Provide proper encoding!
 let hasSideEffects = 0 in {
 def MOV8rr  : I8rr<0b0100,
                    (outs GR8:$rd), (ins GR8:$rs),
@@ -327,7 +327,6 @@
                     []>;
 }
 
-// FIXME: Provide proper encoding!
 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
 def MOV8rc : I8rc<0b0100,
                    (outs GR8:$rd), (ins cg8imm:$imm),
@@ -437,6 +436,11 @@
                     "mov\t{$src, $dst}",
                     [(store (i16 (load addr:$src)), addr:$dst)]>;
 
+def MOV8mn  : I8mn<0b0100, (outs), (ins memdst:$dst, indreg:$rs),
+                   "mov.b\t{$rs, $dst}", []>;
+def MOV16mn : I16mn<0b0100, (outs), (ins memdst:$dst, indreg:$rs),
+                    "mov\t{$rs, $dst}", []>;
+
 //===----------------------------------------------------------------------===//
 // Arithmetic Instructions
 
@@ -619,11 +623,11 @@
 def : Pat<(MSP430rla GR8:$dst),  (ADD8rr  $dst, $dst)>;
 def : Pat<(MSP430rla GR16:$dst), (ADD16rr $dst, $dst)>;
 
+// Format-II (Single Operand) Instruction
+// Register mode
 let Constraints = "$rs = $rd" in {
 
 let Defs = [SR] in {
-
-// FIXME: memory variant!
 def RRA8r :   II8r<0b010,
                    (outs GR8:$rd), (ins GR8:$rs),
                    "rra.b\t$rd",
@@ -646,9 +650,8 @@
                    "rrc\t$rd",
                    [(set GR16:$rd, (MSP430rrc GR16:$rs)),
                     (implicit SR)]>;
-}
+} // Uses = [SR]
 
-// FIXME: Memory sext's ?
 def SEXT16r : II16r<0b011,
                     (outs GR16:$rd), (ins GR16:$rs),
                     "sxt\t$rd",
@@ -663,7 +666,6 @@
                    "mov.b\t{$rs, $rd}",
                    [(set GR16:$rd, (zext (trunc GR16:$rs)))]>;
 
-// FIXME: Memory bitswaps?
 def SWPB16r : II16r<0b001,
                     (outs GR16:$rd), (ins GR16:$rs),
                     "swpb\t$rd",
@@ -671,6 +673,61 @@
 
 } // Constraints = "$src = $dst"
 
+// Indexed, indirect register and indirect autoincrement modes
+let Defs = [SR] in {
+def RRA8m  : II8m<0b010,
+                   (outs), (ins memsrc:$src),
+                   "rra.b\t$src",
+                   [(store (MSP430rra (i8 (load addr:$src))), addr:$src),
+                    (implicit SR)]>;
+def RRA16m : II16m<0b010,
+                   (outs), (ins memsrc:$src),
+                   "rra\t$src",
+                   [(store (MSP430rra (i16 (load addr:$src))), addr:$src),
+                    (implicit SR)]>;
+
+def RRA8n  : II8n<0b010, (outs), (ins indreg:$rs), "rra.b\t$rs", []>;
+def RRA16n : II16n<0b010, (outs), (ins indreg:$rs), "rra\t$rs", []>;
+def RRA8p  : II8p<0b010, (outs), (ins postreg:$rs), "rra.b\t$rs", []>;
+def RRA16p : II16p<0b010, (outs), (ins postreg:$rs), "rra\t$rs", []>;
+
+let Uses = [SR] in {
+def RRC8m  : II8m<0b000,
+                   (outs), (ins memsrc:$src),
+                   "rrc.b\t$src",
+                   [(store (MSP430rrc (i8 (load addr:$src))), addr:$src),
+                    (implicit SR)]>;
+def RRC16m : II16m<0b000,
+                   (outs), (ins memsrc:$src),
+                   "rrc\t$src",
+                   [(store (MSP430rrc (i16 (load addr:$src))), addr:$src),
+                    (implicit SR)]>;
+
+def RRC8n  : II8n<0b000, (outs), (ins indreg:$rs), "rrc.b\t$rs", []>;
+def RRC16n : II16n<0b000, (outs), (ins indreg:$rs), "rrc\t$rs", []>;
+def RRC8p  : II8p<0b000, (outs), (ins postreg:$rs), "rrc.b\t$rs", []>;
+def RRC16p : II16p<0b000, (outs), (ins postreg:$rs), "rrc\t$rs", []>;
+
+} // Uses = [SR]
+
+def SEXT16m : II16m<0b011,
+                    (outs), (ins memsrc:$src),
+                    "sxt\t$src",
+                    [(store (sext_inreg (extloadi16i8 addr:$src), i8),
+                             addr:$src),
+                     (implicit SR)]>;
+def SEXT16n : II16n<0b011, (outs), (ins indreg:$rs), "sxt\t$rs", []>;
+def SEXT16p : II16p<0b011, (outs), (ins postreg:$rs), "sxt\t$rs", []>;
+
+} // Defs = [SR]
+
+def SWPB16m : II16m<0b001,
+                   (outs), (ins memsrc:$src),
+                   "swpb\t$src",
+                   [(store (bswap (i16 (load addr:$src))), addr:$src)]>;
+def SWPB16n : II16n<0b001, (outs), (ins indreg:$rs), "swpb\t$rs", []>;
+def SWPB16p : II16p<0b001, (outs), (ins postreg:$rs), "swpb\t$rs", []>;
+
 // Integer comparisons
 let Defs = [SR] in {
 def CMP8rr  : I8rr<0b1001,
@@ -733,6 +790,16 @@
                     [(MSP430cmp GR16:$rd, (load addr:$src)),
                      (implicit SR)]>;
 
+def CMP8rn  : I8rn<0b1001,
+                   (outs), (ins GR8:$rd, indreg:$rs), "cmp.b\t$rs, $rd", []>;
+def CMP16rn : I16rn<0b1001,
+                    (outs), (ins GR16:$rd, indreg:$rs), "cmp\t$rs, $rd", []>;
+
+def CMP8rp  : I8rp<0b1001,
+                   (outs), (ins GR8:$rd, postreg:$rs), "cmp.b\t$rs, $rd", []>;
+def CMP16rp : I16rp<0b1001,
+                    (outs), (ins GR16:$rd, postreg:$rs), "cmp\t$rs, $rd", []>;
+
 def CMP8mr  : I8mr<0b1001,
                    (outs), (ins memsrc:$dst, GR8:$rs),
                    "cmp.b\t$rs, $dst",
@@ -743,6 +810,25 @@
                     "cmp\t$rs, $dst",
                     [(MSP430cmp (load addr:$dst), GR16:$rs), 
                      (implicit SR)]>;
+def CMP8mm  : I8mm<0b1001,
+                   (outs), (ins memdst:$dst, memsrc:$src),
+                   "cmp.b\t$src, $dst",
+                   [(MSP430cmp (load addr:$dst), (i8 (load addr:$src))),
+                    (implicit SR)]>;
+def CMP16mm : I16mm<0b1001, (outs), (ins memdst:$dst, memsrc:$src),
+                    "cmp\t$src, $dst",
+                    [(MSP430cmp (load addr:$dst), (i16 (load addr:$src))),
+                     (implicit SR)]>;
+
+def CMP8mn  : I8mn<0b1001, (outs), (ins memsrc:$dst, indreg:$rs),
+                   "cmp.b\t$rs, $dst", []>;
+def CMP16mn : I16mn<0b1001, (outs), (ins memsrc:$dst, indreg:$rs),
+                    "cmp\t$rs, $dst", []>;
+
+def CMP8mp  : I8mp<0b1001, (outs), (ins memsrc:$dst, postreg:$rs),
+                   "cmp.b\t$rs, $dst", []>;
+def CMP16mp : I16mp<0b1001, (outs), (ins memsrc:$dst, postreg:$rs),
+                    "cmp\t$rs, $dst", []>;
 
 // BIT TESTS, just sets condition codes
 // Note that the C condition is set differently than when using CMP.
@@ -791,6 +877,16 @@
                     [(MSP430cmp (and_su GR16:$rd,  (load addr:$src)), 0),
                      (implicit SR)]>;
 
+def BIT8rn  : I8rn<0b1011, (outs), (ins GR8:$rd, indreg:$rs),
+                   "bit.b\t$rs, $rd", []>;
+def BIT16rn : I16rn<0b1011, (outs), (ins GR16:$rd, indreg:$rs),
+                    "bit\t$rs, $rd", []>;
+
+def BIT8rp  : I8rp<0b1011, (outs), (ins GR8:$rd, postreg:$rs),
+                   "bit.b\t$rs, $rd", []>;
+def BIT16rp : I16rp<0b1011, (outs), (ins GR16:$rd, postreg:$rs),
+                    "bit\t$rs, $rd", []>;
+
 def BIT8mr  : I8mr<0b1011,
                   (outs), (ins memsrc:$dst, GR8:$rs),
                   "bit.b\t$rs, $dst",
@@ -808,7 +904,7 @@
                    [(MSP430cmp (and_su (load addr:$dst), (i8 cg8imm:$imm)), 0),
                     (implicit SR)]>;
 def BIT16mc : I16mc<0b1011,
-                    (outs), (ins memsrc:$dst, i16imm:$imm),
+                    (outs), (ins memdst:$dst, cg16imm:$imm),
                     "bit\t$imm, $dst",
                     [(MSP430cmp (and_su (load addr:$dst), (i16 cg16imm:$imm)), 0),
                      (implicit SR)]>;
@@ -838,6 +934,16 @@
                                         (load addr:$src)),
                                  0),
                      (implicit SR)]>;
+def BIT8mn  : I8mn<0b1011, (outs), (ins memsrc:$dst, indreg:$rs),
+                   "bit.b\t$rs, $dst", []>;
+def BIT16mn : I16mn<0b1011, (outs), (ins memsrc:$dst, indreg:$rs),
+                    "bit\t$rs, $dst", []>;
+
+def BIT8mp  : I8mp<0b1011, (outs), (ins memsrc:$dst, postreg:$rs),
+                   "bit.b\t$rs, $dst", []>;
+def BIT16mp : I16mp<0b1011, (outs), (ins memsrc:$dst, postreg:$rs),
+                    "bit\t$rs, $dst", []>;
+
 } // Defs = [SR]
 
 def TST8r   : InstAlias<"tst.b\t$dst",  (CMP8rc    GR8:$dst,     0)>;
diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 78dfed6..d2fed68 100644
--- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -1711,14 +1711,23 @@
   return MipsInsts[Opcode];
 }
 
-static bool hasShortDelaySlot(unsigned Opcode) {
-  switch (Opcode) {
+static bool hasShortDelaySlot(MCInst &Inst) {
+  switch (Inst.getOpcode()) {
+    case Mips::BEQ_MM:
+    case Mips::BNE_MM:
+    case Mips::BLTZ_MM:
+    case Mips::BGEZ_MM:
+    case Mips::BLEZ_MM:
+    case Mips::BGTZ_MM:
+    case Mips::JRC16_MM:
     case Mips::JALS_MM:
     case Mips::JALRS_MM:
     case Mips::JALRS16_MM:
     case Mips::BGEZALS_MM:
     case Mips::BLTZALS_MM:
       return true;
+    case Mips::J_MM:
+      return !Inst.getOperand(0).isReg();
     default:
       return false;
   }
@@ -2302,7 +2311,7 @@
   // If this instruction has a delay slot and .set reorder is active,
   // emit a NOP after it.
   if (FillDelaySlot) {
-    TOut.emitEmptyDelaySlot(hasShortDelaySlot(Inst.getOpcode()), IDLoc, STI);
+    TOut.emitEmptyDelaySlot(hasShortDelaySlot(Inst), IDLoc, STI);
     TOut.emitDirectiveSetReorder();
   }
 
@@ -2314,7 +2323,7 @@
       // If .set reorder has been used, we've already emitted a NOP.
       // If .set noreorder has been used, we need to emit a NOP at this point.
       if (!AssemblerOptions.back()->isReorder())
-        TOut.emitEmptyDelaySlot(hasShortDelaySlot(Inst.getOpcode()), IDLoc,
+        TOut.emitEmptyDelaySlot(hasShortDelaySlot(Inst), IDLoc,
                                 STI);
 
       // Load the $gp from the stack.
@@ -2601,7 +2610,7 @@
   // emit a NOP after it.
   const MCInstrDesc &MCID = getInstDesc(JalrInst.getOpcode());
   if (MCID.hasDelaySlot() && AssemblerOptions.back()->isReorder())
-    TOut.emitEmptyDelaySlot(hasShortDelaySlot(JalrInst.getOpcode()), IDLoc,
+    TOut.emitEmptyDelaySlot(hasShortDelaySlot(JalrInst), IDLoc,
                             STI);
 
   return false;
diff --git a/lib/Target/Mips/CMakeLists.txt b/lib/Target/Mips/CMakeLists.txt
index 2cacc0a0..b67fb46 100644
--- a/lib/Target/Mips/CMakeLists.txt
+++ b/lib/Target/Mips/CMakeLists.txt
@@ -44,6 +44,7 @@
   MipsModuleISelDAGToDAG.cpp
   MipsOptimizePICCall.cpp
   MipsOs16.cpp
+  MipsPreLegalizerCombiner.cpp
   MipsRegisterBankInfo.cpp
   MipsRegisterInfo.cpp
   MipsSEFrameLowering.cpp
diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
index 0e8de05..8ace289 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -11,6 +11,7 @@
 #include "MCTargetDesc/MipsMCTargetDesc.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/BinaryFormat/ELF.h"
+#include "llvm/MC/MCContext.h"
 #include "llvm/MC/MCELFObjectWriter.h"
 #include "llvm/MC/MCFixup.h"
 #include "llvm/MC/MCObjectWriter.h"
@@ -225,7 +226,9 @@
   case Mips::fixup_Mips_NONE:
     return ELF::R_MIPS_NONE;
   case FK_Data_1:
-    report_fatal_error("MIPS does not support one byte relocations");
+    Ctx.reportError(Fixup.getLoc(),
+                    "MIPS does not support one byte relocations");
+    return ELF::R_MIPS_NONE;
   case Mips::fixup_Mips_16:
   case FK_Data_2:
     return IsPCRel ? ELF::R_MIPS_PC16 : ELF::R_MIPS_16;
@@ -236,6 +239,10 @@
 
   if (IsPCRel) {
     switch (Kind) {
+    case FK_Data_8:
+      Ctx.reportError(Fixup.getLoc(),
+                      "MIPS does not support 64-bit PC-relative relocations");
+      return ELF::R_MIPS_NONE;
     case Mips::fixup_Mips_Branch_PCRel:
     case Mips::fixup_Mips_PC16:
       return ELF::R_MIPS_PC16;
diff --git a/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
index 1eb21b6..58f9717 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
@@ -248,7 +248,11 @@
 }
 
 void MipsTargetStreamer::emitNop(SMLoc IDLoc, const MCSubtargetInfo *STI) {
-  emitRRI(Mips::SLL, Mips::ZERO, Mips::ZERO, 0, IDLoc, STI);
+  const FeatureBitset &Features = STI->getFeatureBits();
+  if (Features[Mips::FeatureMicroMips])
+    emitRR(Mips::MOVE16_MM, Mips::ZERO, Mips::ZERO, IDLoc, STI);
+  else
+    emitRRI(Mips::SLL, Mips::ZERO, Mips::ZERO, 0, IDLoc, STI);
 }
 
 /// Emit the $gp restore operation for .cprestore.
diff --git a/lib/Target/Mips/Mips.h b/lib/Target/Mips/Mips.h
index ef3a807..6bb7aec 100644
--- a/lib/Target/Mips/Mips.h
+++ b/lib/Target/Mips/Mips.h
@@ -38,6 +38,7 @@
   FunctionPass *createMipsConstantIslandPass();
   FunctionPass *createMicroMipsSizeReducePass();
   FunctionPass *createMipsExpandPseudoPass();
+  FunctionPass *createMipsPreLegalizeCombiner();
 
   InstructionSelector *createMipsInstructionSelector(const MipsTargetMachine &,
                                                      MipsSubtarget &,
@@ -46,6 +47,7 @@
   void initializeMipsDelaySlotFillerPass(PassRegistry &);
   void initializeMipsBranchExpansionPass(PassRegistry &);
   void initializeMicroMipsSizeReducePass(PassRegistry &);
+  void initializeMipsPreLegalizerCombinerPass(PassRegistry&);
 } // end namespace llvm;
 
 #endif
diff --git a/lib/Target/Mips/Mips16HardFloat.cpp b/lib/Target/Mips/Mips16HardFloat.cpp
index c310d94..f237bb6 100644
--- a/lib/Target/Mips/Mips16HardFloat.cpp
+++ b/lib/Target/Mips/Mips16HardFloat.cpp
@@ -74,16 +74,18 @@
     return FRet;
   case Type::DoubleTyID:
     return DRet;
-  case Type::StructTyID:
-    if (T->getStructNumElements() != 2)
+  case Type::StructTyID: {
+    StructType *ST = cast<StructType>(T);
+    if (ST->getNumElements() != 2)
       break;
-    if ((T->getContainedType(0)->isFloatTy()) &&
-        (T->getContainedType(1)->isFloatTy()))
+    if ((ST->getElementType(0)->isFloatTy()) &&
+        (ST->getElementType(1)->isFloatTy()))
       return CFRet;
-    if ((T->getContainedType(0)->isDoubleTy()) &&
-        (T->getContainedType(1)->isDoubleTy()))
+    if ((ST->getElementType(0)->isDoubleTy()) &&
+        (ST->getElementType(1)->isDoubleTy()))
       return CDRet;
     break;
+  }
   default:
     break;
   }
diff --git a/lib/Target/Mips/Mips16ISelLowering.cpp b/lib/Target/Mips/Mips16ISelLowering.cpp
index 8ce47e3..79df622 100644
--- a/lib/Target/Mips/Mips16ISelLowering.cpp
+++ b/lib/Target/Mips/Mips16ISelLowering.cpp
@@ -386,27 +386,22 @@
   }
   else if (RetTy ->isDoubleTy()) {
     result = dfMips16Helper[stubNum];
-  }
-  else if (RetTy->isStructTy()) {
+  } else if (StructType *SRetTy = dyn_cast<StructType>(RetTy)) {
     // check if it's complex
-    if (RetTy->getNumContainedTypes() == 2) {
-      if ((RetTy->getContainedType(0)->isFloatTy()) &&
-          (RetTy->getContainedType(1)->isFloatTy())) {
+    if (SRetTy->getNumElements() == 2) {
+      if ((SRetTy->getElementType(0)->isFloatTy()) &&
+          (SRetTy->getElementType(1)->isFloatTy())) {
         result = scMips16Helper[stubNum];
-      }
-      else if ((RetTy->getContainedType(0)->isDoubleTy()) &&
-               (RetTy->getContainedType(1)->isDoubleTy())) {
+      } else if ((SRetTy->getElementType(0)->isDoubleTy()) &&
+                 (SRetTy->getElementType(1)->isDoubleTy())) {
         result = dcMips16Helper[stubNum];
-      }
-      else {
+      } else {
         llvm_unreachable("Uncovered condition");
       }
-    }
-    else {
+    } else {
       llvm_unreachable("Uncovered condition");
     }
-  }
-  else {
+  } else {
     if (stubNum == 0) {
       needHelper = false;
       return "";
diff --git a/lib/Target/Mips/MipsCondMov.td b/lib/Target/Mips/MipsCondMov.td
index 39dc265..0d7e3e2 100644
--- a/lib/Target/Mips/MipsCondMov.td
+++ b/lib/Target/Mips/MipsCondMov.td
@@ -296,3 +296,13 @@
 def PseudoSELECTFP_F_S : SelectFP_Pseudo_F<FGR32Opnd>;
 def PseudoSELECTFP_F_D32 : SelectFP_Pseudo_F<AFGR64Opnd>, FGR_32;
 def PseudoSELECTFP_F_D64 : SelectFP_Pseudo_F<FGR64Opnd>, FGR_64;
+
+let usesCustomInserter = 1 in {
+class D_SELECT_CLASS<RegisterOperand RC> :
+  PseudoSE<(outs RC:$dst1, RC:$dst2),
+           (ins GPR32Opnd:$cond, RC:$a1, RC:$a2, RC:$b1, RC:$b2), []>,
+  ISA_MIPS1_NOT_4_32;
+}
+
+def PseudoD_SELECT_I   : D_SELECT_CLASS<GPR32Opnd>;
+def PseudoD_SELECT_I64 : D_SELECT_CLASS<GPR64Opnd>;
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index 104fa40..8c2a364 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -1396,6 +1396,9 @@
   case Mips::PseudoSELECTFP_T_D32:
   case Mips::PseudoSELECTFP_T_D64:
     return emitPseudoSELECT(MI, BB, true, Mips::BC1T);
+  case Mips::PseudoD_SELECT_I:
+  case Mips::PseudoD_SELECT_I64:
+    return emitPseudoD_SELECT(MI, BB);
   }
 }
 
@@ -2427,6 +2430,16 @@
                              DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
   SDValue Ext = DAG.getNode(ISD::SRA, DL, VT, Hi,
                             DAG.getConstant(VT.getSizeInBits() - 1, DL, VT));
+
+  if (!(Subtarget.hasMips4() || Subtarget.hasMips32())) {
+    SDVTList VTList = DAG.getVTList(VT, VT);
+    return DAG.getNode(Subtarget.isGP64bit() ? Mips::PseudoD_SELECT_I64
+                                             : Mips::PseudoD_SELECT_I,
+                       DL, VTList, Cond, ShiftRightHi,
+                       IsSRA ? Ext : DAG.getConstant(0, DL, VT), Or,
+                       ShiftRightHi);
+  }
+
   Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi, Or);
   Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond,
                    IsSRA ? Ext : DAG.getConstant(0, DL, VT), ShiftRightHi);
@@ -4345,6 +4358,81 @@
   return BB;
 }
 
+MachineBasicBlock *MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI,
+                                                          MachineBasicBlock *BB) const {
+  assert(!(Subtarget.hasMips4() || Subtarget.hasMips32()) &&
+         "Subtarget already supports SELECT nodes with the use of"
+         "conditional-move instructions.");
+
+  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+  DebugLoc DL = MI.getDebugLoc();
+
+  // D_SELECT substitutes two SELECT nodes that goes one after another and
+  // have the same condition operand. On machines which don't have
+  // conditional-move instruction, it reduces unnecessary branch instructions
+  // which are result of using two diamond patterns that are result of two
+  // SELECT pseudo instructions.
+  const BasicBlock *LLVM_BB = BB->getBasicBlock();
+  MachineFunction::iterator It = ++BB->getIterator();
+
+  //  thisMBB:
+  //  ...
+  //   TrueVal = ...
+  //   setcc r1, r2, r3
+  //   bNE   r1, r0, copy1MBB
+  //   fallthrough --> copy0MBB
+  MachineBasicBlock *thisMBB = BB;
+  MachineFunction *F = BB->getParent();
+  MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
+  MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
+  F->insert(It, copy0MBB);
+  F->insert(It, sinkMBB);
+
+  // Transfer the remainder of BB and its successor edges to sinkMBB.
+  sinkMBB->splice(sinkMBB->begin(), BB,
+                  std::next(MachineBasicBlock::iterator(MI)), BB->end());
+  sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
+
+  // Next, add the true and fallthrough blocks as its successors.
+  BB->addSuccessor(copy0MBB);
+  BB->addSuccessor(sinkMBB);
+
+  // bne rs, $0, sinkMBB
+  BuildMI(BB, DL, TII->get(Mips::BNE))
+      .addReg(MI.getOperand(2).getReg())
+      .addReg(Mips::ZERO)
+      .addMBB(sinkMBB);
+
+  //  copy0MBB:
+  //   %FalseValue = ...
+  //   # fallthrough to sinkMBB
+  BB = copy0MBB;
+
+  // Update machine-CFG edges
+  BB->addSuccessor(sinkMBB);
+
+  //  sinkMBB:
+  //   %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
+  //  ...
+  BB = sinkMBB;
+
+  // Use two PHI nodes to select two reults
+  BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
+      .addReg(MI.getOperand(3).getReg())
+      .addMBB(thisMBB)
+      .addReg(MI.getOperand(5).getReg())
+      .addMBB(copy0MBB);
+  BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(1).getReg())
+      .addReg(MI.getOperand(4).getReg())
+      .addMBB(thisMBB)
+      .addReg(MI.getOperand(6).getReg())
+      .addMBB(copy0MBB);
+
+  MI.eraseFromParent(); // The pseudo instruction is gone now.
+
+  return BB;
+}
+
 // FIXME? Maybe this could be a TableGen attribute on some registers and
 // this table could be generated automatically from RegInfo.
 unsigned MipsTargetLowering::getRegisterByName(const char* RegName, EVT VT,
diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h
index 5a0de45..e043f13 100644
--- a/lib/Target/Mips/MipsISelLowering.h
+++ b/lib/Target/Mips/MipsISelLowering.h
@@ -699,6 +699,8 @@
     MachineBasicBlock *emitSEL_D(MachineInstr &MI, MachineBasicBlock *BB) const;
     MachineBasicBlock *emitPseudoSELECT(MachineInstr &MI, MachineBasicBlock *BB,
                                         bool isFPCmp, unsigned Opc) const;
+    MachineBasicBlock *emitPseudoD_SELECT(MachineInstr &MI,
+                                          MachineBasicBlock *BB) const;
   };
 
   /// Create MipsTargetLowering objects.
diff --git a/lib/Target/Mips/MipsInstructionSelector.cpp b/lib/Target/Mips/MipsInstructionSelector.cpp
index 4ac9477..b041590 100644
--- a/lib/Target/Mips/MipsInstructionSelector.cpp
+++ b/lib/Target/Mips/MipsInstructionSelector.cpp
@@ -145,6 +145,42 @@
              .addMemOperand(*I.memoperands_begin());
     break;
   }
+  case G_UDIV:
+  case G_UREM:
+  case G_SDIV:
+  case G_SREM: {
+    unsigned HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
+    bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
+    bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
+
+    MachineInstr *PseudoDIV, *PseudoMove;
+    PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(),
+                        TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV))
+                    .addDef(HILOReg)
+                    .add(I.getOperand(1))
+                    .add(I.getOperand(2));
+    if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI))
+      return false;
+
+    PseudoMove = BuildMI(MBB, I, I.getDebugLoc(),
+                         TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI))
+                     .addDef(I.getOperand(0).getReg())
+                     .addUse(HILOReg);
+    if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
+      return false;
+
+    I.eraseFromParent();
+    return true;
+  }
+  case G_SELECT: {
+    // Handle operands with pointer type.
+    MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I))
+             .add(I.getOperand(0))
+             .add(I.getOperand(2))
+             .add(I.getOperand(1))
+             .add(I.getOperand(3));
+    break;
+  }
   case G_CONSTANT: {
     int Imm = I.getOperand(1).getCImm()->getValue().getLimitedValue();
     unsigned LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
diff --git a/lib/Target/Mips/MipsLegalizerInfo.cpp b/lib/Target/Mips/MipsLegalizerInfo.cpp
index 02e787f..c629f02 100644
--- a/lib/Target/Mips/MipsLegalizerInfo.cpp
+++ b/lib/Target/Mips/MipsLegalizerInfo.cpp
@@ -20,21 +20,38 @@
 MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
   using namespace TargetOpcode;
 
+  const LLT s1 = LLT::scalar(1);
   const LLT s32 = LLT::scalar(32);
   const LLT s64 = LLT::scalar(64);
   const LLT p0 = LLT::pointer(0, 32);
 
   getActionDefinitionsBuilder(G_ADD)
       .legalFor({s32})
-      .minScalar(0, s32)
-      .customFor({s64});
+      .clampScalar(0, s32, s32);
+
+  getActionDefinitionsBuilder(G_UADDE)
+      .lowerFor({{s32, s1}});
 
   getActionDefinitionsBuilder({G_LOAD, G_STORE})
       .legalForCartesianProduct({p0, s32}, {p0});
 
-  getActionDefinitionsBuilder({G_AND, G_OR, G_XOR, G_SHL, G_ASHR, G_LSHR})
+  getActionDefinitionsBuilder(G_SELECT)
+      .legalForCartesianProduct({p0, s32}, {s32})
+      .minScalar(0, s32)
+      .minScalar(1, s32);
+
+  getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
+      .legalFor({s32})
+      .clampScalar(0, s32, s32);
+
+  getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR})
       .legalFor({s32});
 
+  getActionDefinitionsBuilder({G_SDIV, G_SREM, G_UREM, G_UDIV})
+      .legalFor({s32})
+      .minScalar(0, s32)
+      .libcallFor({s64});
+
   getActionDefinitionsBuilder(G_ICMP)
       .legalFor({{s32, s32}})
       .minScalar(0, s32);
@@ -65,37 +82,5 @@
 
   MIRBuilder.setInstr(MI);
 
-  switch (MI.getOpcode()) {
-  case G_ADD: {
-    unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
-
-    const LLT sHalf = LLT::scalar(Size / 2);
-
-    unsigned RHSLow = MRI.createGenericVirtualRegister(sHalf);
-    unsigned RHSHigh = MRI.createGenericVirtualRegister(sHalf);
-    unsigned LHSLow = MRI.createGenericVirtualRegister(sHalf);
-    unsigned LHSHigh = MRI.createGenericVirtualRegister(sHalf);
-    unsigned ResLow = MRI.createGenericVirtualRegister(sHalf);
-    unsigned ResHigh = MRI.createGenericVirtualRegister(sHalf);
-    unsigned Carry = MRI.createGenericVirtualRegister(sHalf);
-    unsigned TmpResHigh = MRI.createGenericVirtualRegister(sHalf);
-
-    MIRBuilder.buildUnmerge({RHSLow, RHSHigh}, MI.getOperand(2).getReg());
-    MIRBuilder.buildUnmerge({LHSLow, LHSHigh}, MI.getOperand(1).getReg());
-
-    MIRBuilder.buildAdd(TmpResHigh, LHSHigh, RHSHigh);
-    MIRBuilder.buildAdd(ResLow, LHSLow, RHSLow);
-    MIRBuilder.buildICmp(CmpInst::ICMP_ULT, Carry, ResLow, LHSLow);
-    MIRBuilder.buildAdd(ResHigh, TmpResHigh, Carry);
-
-    MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {ResLow, ResHigh});
-
-    MI.eraseFromParent();
-    break;
-  }
-  default:
-    return false;
-  }
-
-  return true;
+  return false;
 }
diff --git a/lib/Target/Mips/MipsPreLegalizerCombiner.cpp b/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
new file mode 100644
index 0000000..1cff1c8
--- /dev/null
+++ b/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
@@ -0,0 +1,92 @@
+//=== lib/CodeGen/GlobalISel/MipsPreLegalizerCombiner.cpp --------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass does combining of machine instructions at the generic MI level,
+// before the legalizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MipsTargetMachine.h"
+#include "llvm/CodeGen/GlobalISel/Combiner.h"
+#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
+#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+
+#define DEBUG_TYPE "mips-prelegalizer-combiner"
+
+using namespace llvm;
+
+namespace {
+class MipsPreLegalizerCombinerInfo : public CombinerInfo {
+public:
+  MipsPreLegalizerCombinerInfo()
+      : CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
+                     /*LegalizerInfo*/ nullptr) {}
+  virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
+                       MachineIRBuilder &B) const override;
+};
+
+bool MipsPreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
+                                           MachineInstr &MI,
+                                           MachineIRBuilder &B) const {
+  return false;
+}
+
+// Pass boilerplate
+// ================
+
+class MipsPreLegalizerCombiner : public MachineFunctionPass {
+public:
+  static char ID;
+
+  MipsPreLegalizerCombiner();
+
+  StringRef getPassName() const override { return "MipsPreLegalizerCombiner"; }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+} // end anonymous namespace
+
+void MipsPreLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
+  AU.addRequired<TargetPassConfig>();
+  AU.setPreservesCFG();
+  getSelectionDAGFallbackAnalysisUsage(AU);
+  MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+MipsPreLegalizerCombiner::MipsPreLegalizerCombiner() : MachineFunctionPass(ID) {
+  initializeMipsPreLegalizerCombinerPass(*PassRegistry::getPassRegistry());
+}
+
+bool MipsPreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
+  if (MF.getProperties().hasProperty(
+          MachineFunctionProperties::Property::FailedISel))
+    return false;
+  auto *TPC = &getAnalysis<TargetPassConfig>();
+  MipsPreLegalizerCombinerInfo PCInfo;
+  Combiner C(PCInfo, TPC);
+  return C.combineMachineInstrs(MF, nullptr);
+}
+
+char MipsPreLegalizerCombiner::ID = 0;
+INITIALIZE_PASS_BEGIN(MipsPreLegalizerCombiner, DEBUG_TYPE,
+                      "Combine Mips machine instrs before legalization", false,
+                      false)
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_END(MipsPreLegalizerCombiner, DEBUG_TYPE,
+                    "Combine Mips machine instrs before legalization", false,
+                    false)
+
+namespace llvm {
+FunctionPass *createMipsPreLegalizeCombiner() {
+  return new MipsPreLegalizerCombiner();
+}
+} // end namespace llvm
diff --git a/lib/Target/Mips/MipsRegisterBankInfo.cpp b/lib/Target/Mips/MipsRegisterBankInfo.cpp
index a0c8004..6af1f10 100644
--- a/lib/Target/Mips/MipsRegisterBankInfo.cpp
+++ b/lib/Target/Mips/MipsRegisterBankInfo.cpp
@@ -93,6 +93,10 @@
   case G_SHL:
   case G_ASHR:
   case G_LSHR:
+  case G_SDIV:
+  case G_UDIV:
+  case G_SREM:
+  case G_UREM:
     OperandsMapping = &Mips::ValueMappings[Mips::GPRIdx];
     break;
   case G_CONSTANT:
@@ -107,6 +111,13 @@
                             &Mips::ValueMappings[Mips::GPRIdx],
                             &Mips::ValueMappings[Mips::GPRIdx]});
     break;
+  case G_SELECT:
+    OperandsMapping =
+        getOperandsMapping({&Mips::ValueMappings[Mips::GPRIdx],
+                            &Mips::ValueMappings[Mips::GPRIdx],
+                            &Mips::ValueMappings[Mips::GPRIdx],
+                            &Mips::ValueMappings[Mips::GPRIdx]});
+    break;
   default:
     return getInvalidInstructionMapping();
   }
diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index f030f83..cf196b5 100644
--- a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -795,6 +795,24 @@
   switch(Opcode) {
   default: break;
 
+  case Mips::PseudoD_SELECT_I:
+  case Mips::PseudoD_SELECT_I64: {
+    MVT VT = Subtarget->isGP64bit() ? MVT::i64 : MVT::i32;
+    SDValue cond = Node->getOperand(0);
+    SDValue Hi1 = Node->getOperand(1);
+    SDValue Lo1 = Node->getOperand(2);
+    SDValue Hi2 = Node->getOperand(3);
+    SDValue Lo2 = Node->getOperand(4);
+
+    SDValue ops[] = {cond, Hi1, Lo1, Hi2, Lo2};
+    EVT NodeTys[] = {VT, VT};
+    ReplaceNode(Node, CurDAG->getMachineNode(Subtarget->isGP64bit()
+                                                 ? Mips::PseudoD_SELECT_I64
+                                                 : Mips::PseudoD_SELECT_I,
+                                             DL, NodeTys, ops));
+    return true;
+  }
+
   case ISD::ADDE: {
     selectAddE(Node, DL);
     return true;
diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp
index 9cc91d3..8466298 100644
--- a/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/lib/Target/Mips/MipsTargetMachine.cpp
@@ -56,6 +56,7 @@
   initializeMipsDelaySlotFillerPass(*PR);
   initializeMipsBranchExpansionPass(*PR);
   initializeMicroMipsSizeReducePass(*PR);
+  initializeMipsPreLegalizerCombinerPass(*PR);
 }
 
 static std::string computeDataLayout(const Triple &TT, StringRef CPU,
@@ -235,6 +236,7 @@
   void addPreEmitPass() override;
   void addPreRegAlloc() override;
   bool addIRTranslator() override;
+  void addPreLegalizeMachineIR() override;
   bool addLegalizeMachineIR() override;
   bool addRegBankSelect() override;
   bool addGlobalInstructionSelect() override;
@@ -312,6 +314,10 @@
   return false;
 }
 
+void MipsPassConfig::addPreLegalizeMachineIR() {
+  addPass(createMipsPreLegalizeCombiner());
+}
+
 bool MipsPassConfig::addLegalizeMachineIR() {
   addPass(new Legalizer());
   return false;
diff --git a/lib/Target/NVPTX/CMakeLists.txt b/lib/Target/NVPTX/CMakeLists.txt
index 4a64fe0..d094620 100644
--- a/lib/Target/NVPTX/CMakeLists.txt
+++ b/lib/Target/NVPTX/CMakeLists.txt
@@ -32,6 +32,7 @@
   NVPTXUtilities.cpp
   NVVMIntrRange.cpp
   NVVMReflect.cpp
+  NVPTXProxyRegErasure.cpp
   )
 
 add_llvm_target(NVPTXCodeGen ${NVPTXCodeGen_sources})
diff --git a/lib/Target/NVPTX/NVPTX.h b/lib/Target/NVPTX/NVPTX.h
index 02b8d8f..07bfc58 100644
--- a/lib/Target/NVPTX/NVPTX.h
+++ b/lib/Target/NVPTX/NVPTX.h
@@ -53,6 +53,7 @@
 FunctionPass *createNVPTXLowerArgsPass(const NVPTXTargetMachine *TM);
 BasicBlockPass *createNVPTXLowerAllocaPass();
 MachineFunctionPass *createNVPTXPeephole();
+MachineFunctionPass *createNVPTXProxyRegErasurePass();
 
 Target &getTheNVPTXTarget32();
 Target &getTheNVPTXTarget64();
diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index 63e227a..6284ad8 100644
--- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -730,6 +730,11 @@
   for (Module::const_iterator FI = M.begin(), FE = M.end(); FI != FE; ++FI) {
     const Function *F = &*FI;
 
+    if (F->getAttributes().hasFnAttribute("nvptx-libcall-callee")) {
+      emitDeclaration(F, O);
+      continue;
+    }
+
     if (F->isDeclaration()) {
       if (F->use_empty())
         continue;
@@ -788,11 +793,8 @@
   // Construct a default subtarget off of the TargetMachine defaults. The
   // rest of NVPTX isn't friendly to change subtargets per function and
   // so the default TargetMachine will have all of the options.
-  const Triple &TT = TM.getTargetTriple();
-  StringRef CPU = TM.getTargetCPU();
-  StringRef FS = TM.getTargetFeatureString();
   const NVPTXTargetMachine &NTM = static_cast<const NVPTXTargetMachine &>(TM);
-  const NVPTXSubtarget STI(TT, CPU, FS, NTM);
+  const auto* STI = static_cast<const NVPTXSubtarget*>(NTM.getSubtargetImpl());
 
   if (M.alias_size()) {
     report_fatal_error("Module has aliases, which NVPTX does not support.");
@@ -816,7 +818,7 @@
   bool Result = AsmPrinter::doInitialization(M);
 
   // Emit header before any dwarf directives are emitted below.
-  emitHeader(M, OS1, STI);
+  emitHeader(M, OS1, *STI);
   OutStreamer->EmitRawText(OS1.str());
 
   // Emit module-level inline asm if it exists.
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp
index eceb4fc..bec8ece 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -663,6 +663,8 @@
     return "NVPTXISD::CallSeqEnd";
   case NVPTXISD::CallPrototype:
     return "NVPTXISD::CallPrototype";
+  case NVPTXISD::ProxyReg:
+    return "NVPTXISD::ProxyReg";
   case NVPTXISD::LoadV2:
     return "NVPTXISD::LoadV2";
   case NVPTXISD::LoadV4:
@@ -1661,7 +1663,24 @@
     }
   }
 
-  if (!Func) {
+  // Both indirect calls and libcalls have nullptr Func. In order to distinguish
+  // between them we must rely on the call site value which is valid for
+  // indirect calls but is always null for libcalls.
+  bool isIndirectCall = !Func && CS;
+
+  if (isa<ExternalSymbolSDNode>(Callee)) {
+    Function* CalleeFunc = nullptr;
+
+    // Try to find the callee in the current module.
+    Callee = DAG.getSymbolFunctionGlobalAddress(Callee, &CalleeFunc);
+    assert(CalleeFunc != nullptr && "Libcall callee must be set.");
+
+    // Set the "libcall callee" attribute to indicate that the function
+    // must always have a declaration.
+    CalleeFunc->addFnAttr("nvptx-libcall-callee", "true");
+  }
+
+  if (isIndirectCall) {
     // This is indirect function call case : PTX requires a prototype of the
     // form
     // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
@@ -1685,7 +1704,7 @@
     Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InFlag
   };
   // We model convergent calls as separate opcodes.
-  unsigned Opcode = Func ? NVPTXISD::PrintCallUni : NVPTXISD::PrintCall;
+  unsigned Opcode = isIndirectCall ? NVPTXISD::PrintCall : NVPTXISD::PrintCallUni;
   if (CLI.IsConvergent)
     Opcode = Opcode == NVPTXISD::PrintCallUni ? NVPTXISD::PrintConvergentCallUni
                                               : NVPTXISD::PrintConvergentCall;
@@ -1719,12 +1738,12 @@
   }
   SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
   SDValue CallArgEndOps[] = { Chain,
-                              DAG.getConstant(Func ? 1 : 0, dl, MVT::i32),
+                              DAG.getConstant(isIndirectCall ? 0 : 1, dl, MVT::i32),
                               InFlag };
   Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
   InFlag = Chain.getValue(1);
 
-  if (!Func) {
+  if (isIndirectCall) {
     SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
     SDValue PrototypeOps[] = { Chain,
                                DAG.getConstant(uniqueCallSite, dl, MVT::i32),
@@ -1733,6 +1752,9 @@
     InFlag = Chain.getValue(1);
   }
 
+  SmallVector<SDValue, 16> ProxyRegOps;
+  SmallVector<Optional<MVT>, 16> ProxyRegTruncates;
+
   // Generate loads from param memory/moves from registers for result
   if (Ins.size() > 0) {
     SmallVector<EVT, 16> VTs;
@@ -1803,11 +1825,14 @@
             MachineMemOperand::MOLoad);
 
         for (unsigned j = 0; j < NumElts; ++j) {
-          SDValue Ret = RetVal.getValue(j);
+          ProxyRegOps.push_back(RetVal.getValue(j));
+
           if (needTruncate)
-            Ret = DAG.getNode(ISD::TRUNCATE, dl, Ins[VecIdx + j].VT, Ret);
-          InVals.push_back(Ret);
+            ProxyRegTruncates.push_back(Optional<MVT>(Ins[VecIdx + j].VT));
+          else
+            ProxyRegTruncates.push_back(Optional<MVT>());
         }
+
         Chain = RetVal.getValue(NumElts);
         InFlag = RetVal.getValue(NumElts + 1);
 
@@ -1823,8 +1848,29 @@
                              DAG.getIntPtrConstant(uniqueCallSite + 1, dl,
                                                    true),
                              InFlag, dl);
+  InFlag = Chain.getValue(1);
   uniqueCallSite++;
 
+  // Append ProxyReg instructions to the chain to make sure that `callseq_end`
+  // will not get lost. Otherwise, during libcalls expansion, the nodes can become
+  // dangling.
+  for (unsigned i = 0; i < ProxyRegOps.size(); ++i) {
+    SDValue Ret = DAG.getNode(
+      NVPTXISD::ProxyReg, dl,
+      DAG.getVTList(ProxyRegOps[i].getSimpleValueType(), MVT::Other, MVT::Glue),
+      { Chain, ProxyRegOps[i], InFlag }
+    );
+
+    Chain = Ret.getValue(1);
+    InFlag = Ret.getValue(2);
+
+    if (ProxyRegTruncates[i].hasValue()) {
+      Ret = DAG.getNode(ISD::TRUNCATE, dl, ProxyRegTruncates[i].getValue(), Ret);
+    }
+
+    InVals.push_back(Ret);
+  }
+
   // set isTailCall to false for now, until we figure out how to express
   // tail call optimization in PTX
   isTailCall = false;
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.h b/lib/Target/NVPTX/NVPTXISelLowering.h
index 3e109f7..66fab2b 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -51,6 +51,7 @@
   CallSeqBegin,
   CallSeqEnd,
   CallPrototype,
+  ProxyReg,
   FUN_SHFL_CLAMP,
   FUN_SHFR_CLAMP,
   MUL_WIDE_SIGNED,
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.td b/lib/Target/NVPTX/NVPTXInstrInfo.td
index 48db941..02a40b9 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -1885,6 +1885,7 @@
 def SDTStoreRetvalV2Profile : SDTypeProfile<0, 3, [SDTCisInt<0>]>;
 def SDTStoreRetvalV4Profile : SDTypeProfile<0, 5, [SDTCisInt<0>]>;
 def SDTPseudoUseParamProfile : SDTypeProfile<0, 1, []>;
+def SDTProxyRegProfile : SDTypeProfile<1, 1, []>;
 
 def DeclareParam :
   SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile,
@@ -1972,6 +1973,9 @@
 def RETURNNode :
   SDNode<"NVPTXISD::RETURN", SDTCallArgMarkProfile,
          [SDNPHasChain, SDNPSideEffect]>;
+def ProxyReg :
+  SDNode<"NVPTXISD::ProxyReg", SDTProxyRegProfile,
+         [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
 
 let mayLoad = 1 in {
   class LoadParamMemInst<NVPTXRegClass regclass, string opstr> :
@@ -2249,6 +2253,21 @@
 def PseudoUseParamF64 : PseudoUseParamInst<Float64Regs>;
 def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs>;
 
+class ProxyRegInst<string SzStr, NVPTXRegClass regclass> :
+  NVPTXInst<(outs regclass:$dst), (ins regclass:$src),
+            !strconcat("mov.", SzStr, " \t$dst, $src;"),
+            [(set regclass:$dst, (ProxyReg regclass:$src))]>;
+
+let isCodeGenOnly=1, isPseudo=1 in {
+  def ProxyRegI1    : ProxyRegInst<"pred", Int1Regs>;
+  def ProxyRegI16   : ProxyRegInst<"b16",  Int16Regs>;
+  def ProxyRegI32   : ProxyRegInst<"b32",  Int32Regs>;
+  def ProxyRegI64   : ProxyRegInst<"b64",  Int64Regs>;
+  def ProxyRegF16   : ProxyRegInst<"b16",  Float16Regs>;
+  def ProxyRegF32   : ProxyRegInst<"f32",  Float32Regs>;
+  def ProxyRegF64   : ProxyRegInst<"f64",  Float64Regs>;
+  def ProxyRegF16x2 : ProxyRegInst<"b32",  Float16x2Regs>;
+}
 
 //
 // Load / Store Handling
@@ -2541,7 +2560,7 @@
 class F_BITCONVERT<string SzStr, NVPTXRegClass regclassIn,
   NVPTXRegClass regclassOut> :
            NVPTXInst<(outs regclassOut:$d), (ins regclassIn:$a),
-           !strconcat("mov.b", !strconcat(SzStr, " \t$d, $a;")),
+           !strconcat("mov.b", SzStr, " \t$d, $a;"),
      [(set regclassOut:$d, (bitconvert regclassIn:$a))]>;
 
 def BITCONVERT_16_I2F : F_BITCONVERT<"16", Int16Regs, Float16Regs>;
diff --git a/lib/Target/NVPTX/NVPTXProxyRegErasure.cpp b/lib/Target/NVPTX/NVPTXProxyRegErasure.cpp
new file mode 100644
index 0000000..f60d841
--- /dev/null
+++ b/lib/Target/NVPTX/NVPTXProxyRegErasure.cpp
@@ -0,0 +1,122 @@
+//===- NVPTXProxyRegErasure.cpp - NVPTX Proxy Register Instruction Erasure -==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The pass is needed to remove ProxyReg instructions and restore related
+// registers. The instructions were needed at instruction selection stage to
+// make sure that callseq_end nodes won't be removed as "dead nodes". This can
+// happen when we expand instructions into libcalls and the call site doesn't
+// care about the libcall chain. Call site cares about data flow only, and the
+// latest data flow node happens to be before callseq_end. Therefore the node
+// becomes dangling and "dead". The ProxyReg acts like an additional data flow
+// node *after* the callseq_end in the chain and ensures that everything will be
+// preserved.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTX.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+
+using namespace llvm;
+
+namespace llvm {
+void initializeNVPTXProxyRegErasurePass(PassRegistry &);
+}
+
+namespace {
+
+struct NVPTXProxyRegErasure : public MachineFunctionPass {
+public:
+  static char ID;
+  NVPTXProxyRegErasure() : MachineFunctionPass(ID) {
+    initializeNVPTXProxyRegErasurePass(*PassRegistry::getPassRegistry());
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  StringRef getPassName() const override {
+    return "NVPTX Proxy Register Instruction Erasure";
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+private:
+  void replaceMachineInstructionUsage(MachineFunction &MF, MachineInstr &MI);
+
+  void replaceRegisterUsage(MachineInstr &Instr, MachineOperand &From,
+                            MachineOperand &To);
+};
+
+} // namespace
+
+char NVPTXProxyRegErasure::ID = 0;
+
+INITIALIZE_PASS(NVPTXProxyRegErasure, "nvptx-proxyreg-erasure", "NVPTX ProxyReg Erasure", false, false)
+
+bool NVPTXProxyRegErasure::runOnMachineFunction(MachineFunction &MF) {
+  SmallVector<MachineInstr *, 16> RemoveList;
+
+  for (auto &BB : MF) {
+    for (auto &MI : BB) {
+      switch (MI.getOpcode()) {
+      case NVPTX::ProxyRegI1:
+      case NVPTX::ProxyRegI16:
+      case NVPTX::ProxyRegI32:
+      case NVPTX::ProxyRegI64:
+      case NVPTX::ProxyRegF16:
+      case NVPTX::ProxyRegF16x2:
+      case NVPTX::ProxyRegF32:
+      case NVPTX::ProxyRegF64:
+        replaceMachineInstructionUsage(MF, MI);
+        RemoveList.push_back(&MI);
+        break;
+      }
+    }
+  }
+
+  for (auto *MI : RemoveList) {
+    MI->eraseFromParent();
+  }
+
+  return !RemoveList.empty();
+}
+
+void NVPTXProxyRegErasure::replaceMachineInstructionUsage(MachineFunction &MF,
+                                                          MachineInstr &MI) {
+  auto &InOp = *MI.uses().begin();
+  auto &OutOp = *MI.defs().begin();
+
+  assert(InOp.isReg() && "ProxyReg input operand should be a register.");
+  assert(OutOp.isReg() && "ProxyReg output operand should be a register.");
+
+  for (auto &BB : MF) {
+    for (auto &I : BB) {
+      replaceRegisterUsage(I, OutOp, InOp);
+    }
+  }
+}
+
+void NVPTXProxyRegErasure::replaceRegisterUsage(MachineInstr &Instr,
+                                                MachineOperand &From,
+                                                MachineOperand &To) {
+  for (auto &Op : Instr.uses()) {
+    if (Op.isReg() && Op.getReg() == From.getReg()) {
+      Op.setReg(To.getReg());
+    }
+  }
+}
+
+MachineFunctionPass *llvm::createNVPTXProxyRegErasurePass() {
+  return new NVPTXProxyRegErasure();
+}
diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index 8c009ae..8ec0ddb 100644
--- a/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -68,6 +68,7 @@
 void initializeNVPTXLowerAggrCopiesPass(PassRegistry &);
 void initializeNVPTXLowerArgsPass(PassRegistry &);
 void initializeNVPTXLowerAllocaPass(PassRegistry &);
+void initializeNVPTXProxyRegErasurePass(PassRegistry &);
 
 } // end namespace llvm
 
@@ -87,6 +88,7 @@
   initializeNVPTXLowerArgsPass(PR);
   initializeNVPTXLowerAllocaPass(PR);
   initializeNVPTXLowerAggrCopiesPass(PR);
+  initializeNVPTXProxyRegErasurePass(PR);
 }
 
 static std::string computeDataLayout(bool is64Bit, bool UseShortPointers) {
@@ -160,6 +162,7 @@
 
   void addIRPasses() override;
   bool addInstSelector() override;
+  void addPreRegAlloc() override;
   void addPostRegAlloc() override;
   void addMachineSSAOptimization() override;
 
@@ -301,6 +304,11 @@
   return false;
 }
 
+void NVPTXPassConfig::addPreRegAlloc() {
+  // Remove Proxy Register pseudo instructions used to keep `callseq_end` alive.
+  addPass(createNVPTXProxyRegErasurePass());
+}
+
 void NVPTXPassConfig::addPostRegAlloc() {
   addPass(createNVPTXPrologEpilogPass(), false);
   if (getOptLevel() != CodeGenOpt::None) {
diff --git a/lib/Target/Nios2/CMakeLists.txt b/lib/Target/Nios2/CMakeLists.txt
deleted file mode 100644
index 6393cc5..0000000
--- a/lib/Target/Nios2/CMakeLists.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-set(LLVM_TARGET_DEFINITIONS Nios2.td)
-
-tablegen(LLVM Nios2GenAsmWriter.inc -gen-asm-writer)
-tablegen(LLVM Nios2GenCallingConv.inc -gen-callingconv)
-tablegen(LLVM Nios2GenDAGISel.inc -gen-dag-isel)
-tablegen(LLVM Nios2GenInstrInfo.inc -gen-instr-info)
-tablegen(LLVM Nios2GenRegisterInfo.inc -gen-register-info)
-tablegen(LLVM Nios2GenSubtargetInfo.inc -gen-subtarget)
-
-add_public_tablegen_target(Nios2CommonTableGen)
-
-#Nios2CodeGen should match with LLVMBuild.txt Nios2CodeGen
-add_llvm_target(Nios2CodeGen
- Nios2AsmPrinter.cpp
- Nios2FrameLowering.cpp
- Nios2InstrInfo.cpp
- Nios2ISelDAGToDAG.cpp
- Nios2ISelLowering.cpp
- Nios2MachineFunction.cpp
- Nios2MCInstLower.cpp
- Nios2RegisterInfo.cpp
- Nios2Subtarget.cpp
- Nios2TargetMachine.cpp
- Nios2TargetObjectFile.cpp
- )
-
-#Should match with "subdirectories =  InstPrinter MCTargetDesc TargetInfo" in LLVMBuild.txt
-add_subdirectory(InstPrinter)
-add_subdirectory(MCTargetDesc)
-add_subdirectory(TargetInfo)
diff --git a/lib/Target/Nios2/InstPrinter/CMakeLists.txt b/lib/Target/Nios2/InstPrinter/CMakeLists.txt
deleted file mode 100644
index dc50be7..0000000
--- a/lib/Target/Nios2/InstPrinter/CMakeLists.txt
+++ /dev/null
@@ -1 +0,0 @@
-add_llvm_library(LLVMNios2AsmPrinter Nios2InstPrinter.cpp)
diff --git a/lib/Target/Nios2/InstPrinter/LLVMBuild.txt b/lib/Target/Nios2/InstPrinter/LLVMBuild.txt
deleted file mode 100644
index bc7882d..0000000
--- a/lib/Target/Nios2/InstPrinter/LLVMBuild.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-;===- ./lib/Target/Nios2/InstPrinter/LLVMBuild.txt -------------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = Nios2AsmPrinter
-parent = Nios2
-required_libraries = MC Support
-add_to_library_groups = Nios2
diff --git a/lib/Target/Nios2/InstPrinter/Nios2InstPrinter.cpp b/lib/Target/Nios2/InstPrinter/Nios2InstPrinter.cpp
deleted file mode 100644
index de0a5f9..0000000
--- a/lib/Target/Nios2/InstPrinter/Nios2InstPrinter.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-//===-- Nios2InstPrinter.cpp - Convert Nios2 MCInst to assembly syntax-----===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class prints an Nios2 MCInst to a .s file.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2InstPrinter.h"
-
-#include "Nios2InstrInfo.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-#define DEBUG_TYPE "asm-printer"
-
-#define PRINT_ALIAS_INSTR
-#include "Nios2GenAsmWriter.inc"
-
-void Nios2InstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
-  OS << getRegisterName(RegNo);
-}
-
-void Nios2InstPrinter::printInst(const MCInst *MI, raw_ostream &O,
-                                 StringRef Annot, const MCSubtargetInfo &STI) {
-  // Try to print any aliases first.
-  if (!printAliasInstr(MI, STI, O))
-    printInstruction(MI, STI, O);
-  printAnnotation(O, Annot);
-}
-
-void Nios2InstPrinter::printOperand(const MCInst *MI, int OpNo,
-                                    const MCSubtargetInfo &STI,
-                                    raw_ostream &O) {
-  const MCOperand &Op = MI->getOperand(OpNo);
-  if (Op.isReg()) {
-    printRegName(O, Op.getReg());
-    return;
-  }
-
-  if (Op.isImm()) {
-    O << Op.getImm();
-    return;
-  }
-
-  assert(Op.isExpr() && "unknown operand kind in printOperand");
-  Op.getExpr()->print(O, &MAI, true);
-}
-
-void Nios2InstPrinter::printMemOperand(const MCInst *MI, int opNum,
-                                       const MCSubtargetInfo &STI,
-                                       raw_ostream &O, const char *Modifier) {
-  // Load/Store memory operands -- imm($reg)
-  printOperand(MI, opNum + 1, STI, O);
-  O << "(";
-  printOperand(MI, opNum, STI, O);
-  O << ")";
-}
diff --git a/lib/Target/Nios2/InstPrinter/Nios2InstPrinter.h b/lib/Target/Nios2/InstPrinter/Nios2InstPrinter.h
deleted file mode 100644
index 43a1295..0000000
--- a/lib/Target/Nios2/InstPrinter/Nios2InstPrinter.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//= Nios2InstPrinter.h - Convert Nios2 MCInst to assembly syntax -*- C++ -*-==//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class prints a Nios2 MCInst to a .s file.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_INSTPRINTER_NIOS2INSTPRINTER_H
-#define LLVM_LIB_TARGET_NIOS2_INSTPRINTER_NIOS2INSTPRINTER_H
-
-#include "llvm/MC/MCInstPrinter.h"
-
-namespace llvm {
-
-class Nios2InstPrinter : public MCInstPrinter {
-public:
-  Nios2InstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
-                   const MCRegisterInfo &MRI)
-      : MCInstPrinter(MAI, MII, MRI) {}
-
-  void printRegName(raw_ostream &OS, unsigned RegNo) const override;
-  void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot,
-                 const MCSubtargetInfo &STI) override;
-
-  // Autogenerated by tblgen.
-  void printInstruction(const MCInst *MI, const MCSubtargetInfo &STI,
-                        raw_ostream &O);
-  static const char *getRegisterName(unsigned RegNo);
-
-  bool printAliasInstr(const MCInst *MI, const MCSubtargetInfo &STI,
-                       raw_ostream &O);
-
-  void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
-                               unsigned PrintMethodIdx,
-                               const MCSubtargetInfo &STI, raw_ostream &O);
-  void printOperand(const MCInst *MI, int opNum, const MCSubtargetInfo &STI,
-                    raw_ostream &OS);
-  void printMemOperand(const MCInst *MI, int opNum, const MCSubtargetInfo &STI,
-                       raw_ostream &OS, const char *Modifier = nullptr);
-};
-} // end namespace llvm
-
-#endif
diff --git a/lib/Target/Nios2/LLVMBuild.txt b/lib/Target/Nios2/LLVMBuild.txt
deleted file mode 100644
index 46dd933..0000000
--- a/lib/Target/Nios2/LLVMBuild.txt
+++ /dev/null
@@ -1,66 +0,0 @@
-;===- ./lib/Target/Nios2/LLVMBuild.txt -------------------------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-#Following comments extracted from http: // llvm.org/docs/LLVMBuild.html
-
-[common]
-subdirectories =
-    InstPrinter 
-    MCTargetDesc
-    TargetInfo
-
-[component_0]
-#TargetGroup components are an extension of LibraryGroups, specifically for
-#defining LLVM targets(which are handled specially in a few places).
-type = TargetGroup
-#The name of the component should always be the name of the target.(should
-#match "def Nios2 : Target" in Nios2.td)
-name = Nios2
-#Nios2 component is located in directory Target /
-parent = Target
-#Whether this target defines an assembly parser, assembly printer, disassembler
-#, and supports JIT compilation.They are optional.
-has_asmprinter = 1
-
-[component_1]
-#component_1 is a Library type and name is Nios2CodeGen.After build it will
-#in lib / libLLVMNios2CodeGen.a of your build command directory.
-type = Library
-name = Nios2CodeGen
-#Nios2CodeGen component(Library) is located in directory Nios2 /
-parent = Nios2
-#If given, a list of the names of Library or LibraryGroup components which
-#must also be linked in whenever this library is used.That is, the link time
-#dependencies for this component.When tools are built, the build system will
-#include the transitive closure of all required_libraries for the components
-#the tool needs.
-required_libraries = AsmPrinter
-                     CodeGen
-                     Core
-                     GlobalISel
-                     MC
-                     Nios2AsmPrinter
-                     Nios2Desc
-                     Nios2Info
-                     SelectionDAG
-                     Support
-                     Target
-#end of required_libraries
-
-#All LLVMBuild.txt in Target / Nios2 and subdirectory use 'add_to_library_groups
-#= Nios2'
-add_to_library_groups = Nios2
diff --git a/lib/Target/Nios2/MCTargetDesc/CMakeLists.txt b/lib/Target/Nios2/MCTargetDesc/CMakeLists.txt
deleted file mode 100644
index 138832d..0000000
--- a/lib/Target/Nios2/MCTargetDesc/CMakeLists.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-#MCTargetDesc / CMakeLists.txt
-add_llvm_library(LLVMNios2Desc
-                 Nios2AsmBackend.cpp
-                 Nios2ELFObjectWriter.cpp
-                 Nios2MCAsmInfo.cpp
-                 Nios2MCExpr.cpp
-                 Nios2MCTargetDesc.cpp
-                 Nios2TargetStreamer.cpp)
-
diff --git a/lib/Target/Nios2/MCTargetDesc/LLVMBuild.txt b/lib/Target/Nios2/MCTargetDesc/LLVMBuild.txt
deleted file mode 100644
index 3794c83..0000000
--- a/lib/Target/Nios2/MCTargetDesc/LLVMBuild.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-;===- ./lib/Target/Nios2/MCTargetDesc/LLVMBuild.txt ------------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = Nios2Desc
-parent = Nios2
-required_libraries = MC
-                     Nios2AsmPrinter
-                     Nios2Info
-                     Support
-add_to_library_groups = Nios2
diff --git a/lib/Target/Nios2/MCTargetDesc/Nios2AsmBackend.cpp b/lib/Target/Nios2/MCTargetDesc/Nios2AsmBackend.cpp
deleted file mode 100644
index 8ac08c6..0000000
--- a/lib/Target/Nios2/MCTargetDesc/Nios2AsmBackend.cpp
+++ /dev/null
@@ -1,130 +0,0 @@
-//===-- Nios2AsmBackend.cpp - Nios2 Asm Backend  --------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the Nios2AsmBackend class.
-//
-//===----------------------------------------------------------------------===//
-//
-
-#include "MCTargetDesc/Nios2AsmBackend.h"
-#include "MCTargetDesc/Nios2FixupKinds.h"
-#include "MCTargetDesc/Nios2MCTargetDesc.h"
-#include "llvm/MC/MCAssembler.h"
-#include "llvm/MC/MCELFObjectWriter.h"
-#include "llvm/MC/MCFixupKindInfo.h"
-#include "llvm/MC/MCObjectWriter.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-
-using namespace llvm;
-
-// Prepare value for the target space for it
-static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value) {
-
-  unsigned Kind = Fixup.getKind();
-
-  // Add/subtract and shift
-  switch (Kind) {
-  default:
-    return 0;
-  case Nios2::fixup_Nios2_LO16:
-    break;
-  case Nios2::fixup_Nios2_HI16:
-    // Get the higher 16-bits. Also add 1 if bit 15 is 1.
-    Value = ((Value + 0x8000) >> 16) & 0xffff;
-    break;
-  }
-
-  return Value;
-}
-
-// Calculate index for Nios2 specific little endian byte order
-static unsigned calculateLEIndex(unsigned i) {
-  assert(i <= 3 && "Index out of range!");
-
-  return (1 - i / 2) * 2 + i % 2;
-}
-
-/// ApplyFixup - Apply the \p Value for given \p Fixup into the provided
-/// data fragment, at the offset specified by the fixup and following the
-/// fixup kind as appropriate.
-void Nios2AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
-                                 const MCValue &Target,
-                                 MutableArrayRef<char> Data, uint64_t Value,
-                                 bool IsResolved) const {
-  MCFixupKind Kind = Fixup.getKind();
-  Value = adjustFixupValue(Fixup, Value);
-
-  if (!Value)
-    return; // Doesn't change encoding.
-
-  // Where do we start in the object
-  unsigned Offset = Fixup.getOffset();
-  // Number of bytes we need to fixup
-  unsigned NumBytes = (getFixupKindInfo(Kind).TargetSize + 7) / 8;
-  // Grab current value, if any, from bits.
-  uint64_t CurVal = 0;
-
-  for (unsigned i = 0; i != NumBytes; ++i) {
-    unsigned Idx = calculateLEIndex(i);
-    CurVal |= (uint64_t)((uint8_t)Data[Offset + Idx]) << (i * 8);
-  }
-
-  uint64_t Mask = ((uint64_t)(-1) >> (64 - getFixupKindInfo(Kind).TargetSize));
-  CurVal |= Value & Mask;
-
-  // Write out the fixed up bytes back to the code/data bits.
-  for (unsigned i = 0; i != NumBytes; ++i) {
-    unsigned Idx = calculateLEIndex(i);
-    Data[Offset + Idx] = (uint8_t)((CurVal >> (i * 8)) & 0xff);
-  }
-}
-
-Optional<MCFixupKind> Nios2AsmBackend::getFixupKind(StringRef Name) const {
-  return StringSwitch<Optional<MCFixupKind>>(Name)
-      .Case("R_NIOS2_NONE", (MCFixupKind)Nios2::fixup_Nios2_32)
-      .Case("R_NIOS2_32", FK_Data_4)
-      .Default(MCAsmBackend::getFixupKind(Name));
-}
-
-//@getFixupKindInfo {
-const MCFixupKindInfo &
-Nios2AsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
-  const static MCFixupKindInfo Infos[Nios2::NumTargetFixupKinds] = {
-      // This table *must* be in same the order of fixup_* kinds in
-      // Nios2FixupKinds.h.
-      //
-      // name                        offset  bits  flags
-      {"fixup_Nios2_32", 0, 32, 0},
-      {"fixup_Nios2_HI16", 0, 16, 0},
-      {"fixup_Nios2_LO16", 0, 16, 0}};
-
-  if (Kind < FirstTargetFixupKind)
-    return MCAsmBackend::getFixupKindInfo(Kind);
-
-  assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
-         "Invalid kind!");
-  return Infos[Kind - FirstTargetFixupKind];
-}
-
-std::unique_ptr<MCObjectTargetWriter>
-Nios2AsmBackend::createObjectTargetWriter() const {
-  return createNios2ELFObjectWriter(MCELFObjectTargetWriter::getOSABI(OSType));
-}
-
-bool Nios2AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
-  return true;
-}
-
-// MCAsmBackend
-MCAsmBackend *llvm::createNios2AsmBackend(const Target &T,
-                                          const MCSubtargetInfo &STI,
-                                          const MCRegisterInfo &MRI,
-                                          const MCTargetOptions &Options) {
-  return new Nios2AsmBackend(T, STI.getTargetTriple().getOS());
-}
diff --git a/lib/Target/Nios2/MCTargetDesc/Nios2AsmBackend.h b/lib/Target/Nios2/MCTargetDesc/Nios2AsmBackend.h
deleted file mode 100644
index 1f114bd..0000000
--- a/lib/Target/Nios2/MCTargetDesc/Nios2AsmBackend.h
+++ /dev/null
@@ -1,81 +0,0 @@
-//===-- Nios2AsmBackend.h - Nios2 Asm Backend  ----------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the Nios2AsmBackend class.
-//
-//===----------------------------------------------------------------------===//
-//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_MCTARGETDESC_NIOS2ASMBACKEND_H
-#define LLVM_LIB_TARGET_NIOS2_MCTARGETDESC_NIOS2ASMBACKEND_H
-
-#include "MCTargetDesc/Nios2FixupKinds.h"
-#include "llvm/ADT/Triple.h"
-#include "llvm/MC/MCAsmBackend.h"
-
-namespace llvm {
-
-class MCAssembler;
-struct MCFixupKindInfo;
-class Target;
-class MCObjectWriter;
-
-class Nios2AsmBackend : public MCAsmBackend {
-  Triple::OSType OSType;
-
-public:
-  Nios2AsmBackend(const Target &T, Triple::OSType OSType)
-      : MCAsmBackend(support::little), OSType(OSType) {}
-
-  std::unique_ptr<MCObjectTargetWriter>
-  createObjectTargetWriter() const override;
-
-  bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
-
-  void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
-                  const MCValue &Target, MutableArrayRef<char> Data,
-                  uint64_t Value, bool IsResolved) const override;
-
-  Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
-  const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
-
-  unsigned getNumFixupKinds() const override {
-    return Nios2::NumTargetFixupKinds;
-  }
-
-  /// MayNeedRelaxation - Check whether the given instruction may need
-  /// relaxation.
-  ///
-  /// \param Inst - The instruction to test.
-  bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
-
-  /// fixupNeedsRelaxation - Target specific predicate for whether a given
-  /// fixup requires the associated instruction to be relaxed.
-  bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
-                            const MCRelaxableFragment *DF,
-                            const MCAsmLayout &Layout) const override {
-    // FIXME.
-    llvm_unreachable("RelaxInstruction() unimplemented");
-    return false;
-  }
-
-  /// RelaxInstruction - Relax the instruction in the given fragment
-  /// to the next wider instruction.
-  ///
-  /// \param Inst - The instruction to relax, which may be the same
-  /// as the output.
-  /// \param [out] Res On return, the relaxed instruction.
-  void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
-                        MCInst &Res) const override {}
-
-}; // class Nios2AsmBackend
-
-} // namespace llvm
-
-#endif
diff --git a/lib/Target/Nios2/MCTargetDesc/Nios2BaseInfo.h b/lib/Target/Nios2/MCTargetDesc/Nios2BaseInfo.h
deleted file mode 100644
index 225671e..0000000
--- a/lib/Target/Nios2/MCTargetDesc/Nios2BaseInfo.h
+++ /dev/null
@@ -1,38 +0,0 @@
-//===-- Nios2BaseInfo.h - Top level definitions for NIOS2 MC ----*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains small standalone helper functions and enum definitions for
-// the Nios2 target useful for the compiler back-end and the MC libraries.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_NIOS2_MCTARGETDESC_NIOS2BASEINFO_H
-#define LLVM_LIB_TARGET_NIOS2_MCTARGETDESC_NIOS2BASEINFO_H
-
-namespace llvm {
-
-/// Nios2FG - This namespace holds all of the target specific flags that
-/// instruction info tracks.
-namespace Nios2FG {
-/// Target Operand Flag enum.
-enum TOF {
-  //===------------------------------------------------------------------===//
-  // Nios2 Specific MachineOperand flags.
-
-  MO_NO_FLAG,
-
-  /// MO_ABS_HI/LO - Represents the hi or low part of an absolute symbol
-  /// address.
-  MO_ABS_HI,
-  MO_ABS_LO,
-
-};
-} // namespace Nios2FG
-} // namespace llvm
-
-#endif
diff --git a/lib/Target/Nios2/MCTargetDesc/Nios2ELFObjectWriter.cpp b/lib/Target/Nios2/MCTargetDesc/Nios2ELFObjectWriter.cpp
deleted file mode 100644
index db432d1..0000000
--- a/lib/Target/Nios2/MCTargetDesc/Nios2ELFObjectWriter.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-//===-- Nios2ELFObjectWriter.cpp - Nios2 ELF Writer -----------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/Nios2FixupKinds.h"
-#include "MCTargetDesc/Nios2MCExpr.h"
-#include "MCTargetDesc/Nios2MCTargetDesc.h"
-#include "llvm/MC/MCELFObjectWriter.h"
-#include "llvm/MC/MCObjectWriter.h"
-
-using namespace llvm;
-
-namespace {
-class Nios2ELFObjectWriter : public MCELFObjectTargetWriter {
-public:
-  Nios2ELFObjectWriter(uint8_t OSABI)
-      : MCELFObjectTargetWriter(false, OSABI, ELF::EM_ALTERA_NIOS2, false) {}
-
-  ~Nios2ELFObjectWriter() override;
-
-  unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
-                        const MCFixup &Fixup, bool IsPCRel) const override;
-};
-} // namespace
-
-Nios2ELFObjectWriter::~Nios2ELFObjectWriter() {}
-
-unsigned Nios2ELFObjectWriter::getRelocType(MCContext &Ctx,
-                                            const MCValue &Target,
-                                            const MCFixup &Fixup,
-                                            bool IsPCRel) const {
-  return 0;
-}
-
-std::unique_ptr<MCObjectTargetWriter>
-llvm::createNios2ELFObjectWriter(uint8_t OSABI) {
-  return llvm::make_unique<Nios2ELFObjectWriter>(OSABI);
-}
diff --git a/lib/Target/Nios2/MCTargetDesc/Nios2FixupKinds.h b/lib/Target/Nios2/MCTargetDesc/Nios2FixupKinds.h
deleted file mode 100644
index c169a1b..0000000
--- a/lib/Target/Nios2/MCTargetDesc/Nios2FixupKinds.h
+++ /dev/null
@@ -1,41 +0,0 @@
-//===-- Nios2FixupKinds.h - Nios2 Specific Fixup Entries --------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_MCTARGETDESC_NIOS2FIXUPKINDS_H
-#define LLVM_LIB_TARGET_NIOS2_MCTARGETDESC_NIOS2FIXUPKINDS_H
-
-#include "llvm/MC/MCFixup.h"
-
-namespace llvm {
-namespace Nios2 {
-// Although most of the current fixup types reflect a unique relocation
-// one can have multiple fixup types for a given relocation and thus need
-// to be uniquely named.
-//
-// This table *must* be in the save order of
-// MCFixupKindInfo Infos[Nios2::NumTargetFixupKinds]
-// in Nios2AsmBackend.cpp.
-enum Fixups {
-  // Pure upper 32 bit fixup resulting in - R_NIOS2_32.
-  fixup_Nios2_32 = FirstTargetFixupKind,
-
-  // Pure upper 16 bit fixup resulting in - R_NIOS2_HI16.
-  fixup_Nios2_HI16,
-
-  // Pure lower 16 bit fixup resulting in - R_NIOS2_LO16.
-  fixup_Nios2_LO16,
-
-  // Marker
-  LastTargetFixupKind,
-  NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
-};
-} // namespace Nios2
-} // namespace llvm
-
-#endif // LLVM_NIOS2_NIOS2FIXUPKINDS_H
diff --git a/lib/Target/Nios2/MCTargetDesc/Nios2MCAsmInfo.cpp b/lib/Target/Nios2/MCTargetDesc/Nios2MCAsmInfo.cpp
deleted file mode 100644
index e3c66e6..0000000
--- a/lib/Target/Nios2/MCTargetDesc/Nios2MCAsmInfo.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-//===-- Nios2MCAsmInfo.cpp - Nios2 Asm Properties -------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declarations of the Nios2MCAsmInfo properties.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2MCAsmInfo.h"
-
-#include "llvm/ADT/Triple.h"
-
-using namespace llvm;
-
-void Nios2MCAsmInfo::anchor() {}
-
-Nios2MCAsmInfo::Nios2MCAsmInfo(const Triple &TheTriple) {
-  if ((TheTriple.getArch() == Triple::nios2))
-    IsLittleEndian = true; // the default of IsLittleEndian is true
-
-  AlignmentIsInBytes = false;
-  Data16bitsDirective = "\t.2byte\t";
-  Data32bitsDirective = "\t.4byte\t";
-  Data64bitsDirective = "\t.8byte\t";
-  PrivateLabelPrefix = ".LC";
-  CommentString = "#";
-  ZeroDirective = "\t.space\t";
-  GPRel32Directive = "\t.gpword\t";
-  GPRel64Directive = "\t.gpdword\t";
-  WeakRefDirective = "\t.weak\t";
-  GlobalDirective = "\t.global\t";
-  AscizDirective = "\t.string\t";
-  UseAssignmentForEHBegin = true;
-
-  SupportsDebugInformation = true;
-  ExceptionsType = ExceptionHandling::DwarfCFI;
-  DwarfRegNumForCFI = true;
-  UsesELFSectionDirectiveForBSS = true;
-}
diff --git a/lib/Target/Nios2/MCTargetDesc/Nios2MCAsmInfo.h b/lib/Target/Nios2/MCTargetDesc/Nios2MCAsmInfo.h
deleted file mode 100644
index 0c81276..0000000
--- a/lib/Target/Nios2/MCTargetDesc/Nios2MCAsmInfo.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//===-- Nios2MCAsmInfo.h - Nios2 Asm Info ----------------------*- C++ -*--===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declaration of the Nios2MCAsmInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_MCTARGETDESC_NIOS2MCASMINFO_H
-#define LLVM_LIB_TARGET_NIOS2_MCTARGETDESC_NIOS2MCASMINFO_H
-
-#include "llvm/MC/MCAsmInfoELF.h"
-
-namespace llvm {
-class Triple;
-
-class Nios2MCAsmInfo : public MCAsmInfoELF {
-  void anchor() override;
-
-public:
-  explicit Nios2MCAsmInfo(const Triple &TheTriple);
-};
-
-} // namespace llvm
-
-#endif
diff --git a/lib/Target/Nios2/MCTargetDesc/Nios2MCExpr.cpp b/lib/Target/Nios2/MCTargetDesc/Nios2MCExpr.cpp
deleted file mode 100644
index 0f12c9e..0000000
--- a/lib/Target/Nios2/MCTargetDesc/Nios2MCExpr.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-//===-- Nios2MCExpr.cpp - Nios2 specific MC expression classes ------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2.h"
-
-#include "Nios2MCExpr.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCAssembler.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCObjectStreamer.h"
-#include "llvm/MC/MCSymbolELF.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "nios2mcexpr"
-
-const Nios2MCExpr *Nios2MCExpr::create(Nios2MCExpr::Nios2ExprKind Kind,
-                                       const MCExpr *Expr, MCContext &Ctx) {
-  return new (Ctx) Nios2MCExpr(Kind, Expr);
-}
-
-const Nios2MCExpr *Nios2MCExpr::create(const MCSymbol *Symbol,
-                                       Nios2MCExpr::Nios2ExprKind Kind,
-                                       MCContext &Ctx) {
-  const MCSymbolRefExpr *MCSym =
-      MCSymbolRefExpr::create(Symbol, MCSymbolRefExpr::VK_None, Ctx);
-  return new (Ctx) Nios2MCExpr(Kind, MCSym);
-}
-
-void Nios2MCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
-
-  switch (Kind) {
-  case CEK_None:
-  case CEK_Special:
-    llvm_unreachable("CEK_None and CEK_Special are invalid");
-    break;
-  case CEK_ABS_HI:
-    OS << "%hiadj";
-    break;
-  case CEK_ABS_LO:
-    OS << "%lo";
-    break;
-  }
-
-  OS << '(';
-  Expr->print(OS, MAI, true);
-  OS << ')';
-}
-
-bool Nios2MCExpr::evaluateAsRelocatableImpl(MCValue &Res,
-                                            const MCAsmLayout *Layout,
-                                            const MCFixup *Fixup) const {
-  return getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup);
-}
-
-void Nios2MCExpr::visitUsedExpr(MCStreamer &Streamer) const {
-  Streamer.visitUsedExpr(*getSubExpr());
-}
-
-void Nios2MCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {
-  switch (getKind()) {
-  case CEK_None:
-  case CEK_Special:
-    llvm_unreachable("CEK_None and CEK_Special are invalid");
-    break;
-  case CEK_ABS_HI:
-  case CEK_ABS_LO:
-    break;
-  }
-}
diff --git a/lib/Target/Nios2/MCTargetDesc/Nios2MCExpr.h b/lib/Target/Nios2/MCTargetDesc/Nios2MCExpr.h
deleted file mode 100644
index 5b49005..0000000
--- a/lib/Target/Nios2/MCTargetDesc/Nios2MCExpr.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//===-- Nios2MCExpr.h - Nios2 specific MC expression classes ----*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_MCTARGETDESC_NIOS2MCEXPR_H
-#define LLVM_LIB_TARGET_NIOS2_MCTARGETDESC_NIOS2MCEXPR_H
-
-#include "llvm/MC/MCAsmLayout.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCValue.h"
-
-namespace llvm {
-
-class Nios2MCExpr : public MCTargetExpr {
-public:
-  enum Nios2ExprKind {
-    CEK_None,
-    CEK_ABS_HI,
-    CEK_ABS_LO,
-    CEK_Special,
-  };
-
-private:
-  const Nios2ExprKind Kind;
-  const MCExpr *Expr;
-
-  explicit Nios2MCExpr(Nios2ExprKind Kind, const MCExpr *Expr)
-      : Kind(Kind), Expr(Expr) {}
-
-public:
-  static const Nios2MCExpr *create(Nios2ExprKind Kind, const MCExpr *Expr,
-                                   MCContext &Ctx);
-  static const Nios2MCExpr *create(const MCSymbol *Symbol,
-                                   Nios2MCExpr::Nios2ExprKind Kind,
-                                   MCContext &Ctx);
-
-  /// Get the kind of this expression.
-  Nios2ExprKind getKind() const { return Kind; }
-
-  /// Get the child of this expression.
-  const MCExpr *getSubExpr() const { return Expr; }
-
-  void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override;
-  bool evaluateAsRelocatableImpl(MCValue &Res, const MCAsmLayout *Layout,
-                                 const MCFixup *Fixup) const override;
-  void visitUsedExpr(MCStreamer &Streamer) const override;
-  MCFragment *findAssociatedFragment() const override {
-    return getSubExpr()->findAssociatedFragment();
-  }
-
-  void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override;
-};
-} // end namespace llvm
-
-#endif
diff --git a/lib/Target/Nios2/MCTargetDesc/Nios2MCTargetDesc.cpp b/lib/Target/Nios2/MCTargetDesc/Nios2MCTargetDesc.cpp
deleted file mode 100644
index e57b44d..0000000
--- a/lib/Target/Nios2/MCTargetDesc/Nios2MCTargetDesc.cpp
+++ /dev/null
@@ -1,102 +0,0 @@
-//===-- Nios2MCTargetDesc.cpp - Nios2 Target Descriptions -----------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides Nios2 specific target descriptions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2MCTargetDesc.h"
-#include "InstPrinter/Nios2InstPrinter.h"
-#include "Nios2MCAsmInfo.h"
-#include "Nios2TargetStreamer.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/TargetRegistry.h"
-
-using namespace llvm;
-
-#define GET_INSTRINFO_MC_DESC
-#include "Nios2GenInstrInfo.inc"
-
-#define GET_SUBTARGETINFO_MC_DESC
-#include "Nios2GenSubtargetInfo.inc"
-
-#define GET_REGINFO_MC_DESC
-#include "Nios2GenRegisterInfo.inc"
-
-static MCInstrInfo *createNios2MCInstrInfo() {
-  MCInstrInfo *X = new MCInstrInfo();
-  InitNios2MCInstrInfo(X); // defined in Nios2GenInstrInfo.inc
-  return X;
-}
-
-static MCRegisterInfo *createNios2MCRegisterInfo(const Triple &TT) {
-  MCRegisterInfo *X = new MCRegisterInfo();
-  InitNios2MCRegisterInfo(X, Nios2::R15); // defined in Nios2GenRegisterInfo.inc
-  return X;
-}
-
-static MCSubtargetInfo *
-createNios2MCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
-  if (CPU.empty() || CPU == "generic")
-    CPU = "nios2r1";
-  return createNios2MCSubtargetInfoImpl(TT, CPU, FS);
-  // createNios2MCSubtargetInfoImpl defined in Nios2GenSubtargetInfo.inc
-}
-
-static MCAsmInfo *createNios2MCAsmInfo(const MCRegisterInfo &MRI,
-                                       const Triple &TT) {
-  MCAsmInfo *MAI = new Nios2MCAsmInfo(TT);
-
-  unsigned SP = MRI.getDwarfRegNum(Nios2::SP, true);
-  MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, SP, 0);
-  MAI->addInitialFrameState(Inst);
-
-  return MAI;
-}
-
-static MCInstPrinter *createNios2MCInstPrinter(const Triple &T,
-                                               unsigned SyntaxVariant,
-                                               const MCAsmInfo &MAI,
-                                               const MCInstrInfo &MII,
-                                               const MCRegisterInfo &MRI) {
-  return new Nios2InstPrinter(MAI, MII, MRI);
-}
-
-static MCTargetStreamer *createNios2AsmTargetStreamer(MCStreamer &S,
-                                                      formatted_raw_ostream &OS,
-                                                      MCInstPrinter *InstPrint,
-                                                      bool isVerboseAsm) {
-  return new Nios2TargetAsmStreamer(S, OS);
-}
-
-extern "C" void LLVMInitializeNios2TargetMC() {
-  Target *T = &getTheNios2Target();
-
-  // Register the MC asm info.
-  RegisterMCAsmInfoFn X(*T, createNios2MCAsmInfo);
-
-  // Register the MC instruction info.
-  TargetRegistry::RegisterMCInstrInfo(*T, createNios2MCInstrInfo);
-
-  // Register the MC register info.
-  TargetRegistry::RegisterMCRegInfo(*T, createNios2MCRegisterInfo);
-
-  // Register the asm target streamer.
-  TargetRegistry::RegisterAsmTargetStreamer(*T, createNios2AsmTargetStreamer);
-
-  // Register the MC subtarget info.
-  TargetRegistry::RegisterMCSubtargetInfo(*T, createNios2MCSubtargetInfo);
-  // Register the MCInstPrinter.
-  TargetRegistry::RegisterMCInstPrinter(*T, createNios2MCInstPrinter);
-
-  // Register the asm backend.
-  TargetRegistry::RegisterMCAsmBackend(*T, createNios2AsmBackend);
-}
diff --git a/lib/Target/Nios2/MCTargetDesc/Nios2MCTargetDesc.h b/lib/Target/Nios2/MCTargetDesc/Nios2MCTargetDesc.h
deleted file mode 100644
index a7c4b16..0000000
--- a/lib/Target/Nios2/MCTargetDesc/Nios2MCTargetDesc.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//===-- Nios2MCTargetDesc.h - Nios2 Target Descriptions ---------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides Nios2 specific target descriptions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_MCTARGETDESC_NIOS2MCTARGETDESC_H
-#define LLVM_LIB_TARGET_NIOS2_MCTARGETDESC_NIOS2MCTARGETDESC_H
-
-#include <memory>
-
-namespace llvm {
-class MCAsmBackend;
-class MCObjectTargetWriter;
-class MCRegisterInfo;
-class MCSubtargetInfo;
-class MCTargetOptions;
-class Target;
-class Triple;
-class StringRef;
-class raw_pwrite_stream;
-
-Target &getTheNios2Target();
-
-MCAsmBackend *createNios2AsmBackend(const Target &T, const MCSubtargetInfo &STI,
-                                    const MCRegisterInfo &MRI,
-                                    const MCTargetOptions &Options);
-
-std::unique_ptr<MCObjectTargetWriter> createNios2ELFObjectWriter(uint8_t OSABI);
-
-} // namespace llvm
-
-// Defines symbolic names for Nios2 registers.  This defines a mapping from
-// register name to register number.
-#define GET_REGINFO_ENUM
-#include "Nios2GenRegisterInfo.inc"
-
-// Defines symbolic names for the Nios2 instructions.
-#define GET_INSTRINFO_ENUM
-#include "Nios2GenInstrInfo.inc"
-
-#define GET_SUBTARGETINFO_ENUM
-#include "Nios2GenSubtargetInfo.inc"
-
-#endif
diff --git a/lib/Target/Nios2/MCTargetDesc/Nios2TargetStreamer.cpp b/lib/Target/Nios2/MCTargetDesc/Nios2TargetStreamer.cpp
deleted file mode 100644
index 795fd00..0000000
--- a/lib/Target/Nios2/MCTargetDesc/Nios2TargetStreamer.cpp
+++ /dev/null
@@ -1,22 +0,0 @@
-//===-- Nios2TargetStreamer.cpp - Nios2 Target Streamer Methods -----------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides Nios2 specific target streamer methods.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2TargetStreamer.h"
-
-using namespace llvm;
-
-Nios2TargetStreamer::Nios2TargetStreamer(MCStreamer &S) : MCTargetStreamer(S) {}
-
-Nios2TargetAsmStreamer::Nios2TargetAsmStreamer(MCStreamer &S,
-                                               formatted_raw_ostream &OS)
-    : Nios2TargetStreamer(S) {}
diff --git a/lib/Target/Nios2/Nios2.h b/lib/Target/Nios2/Nios2.h
deleted file mode 100644
index d6c5c1e..0000000
--- a/lib/Target/Nios2/Nios2.h
+++ /dev/null
@@ -1,35 +0,0 @@
-//===-- Nios2.h - Top-level interface for Nios2 representation --*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the entry points for global functions defined in
-// the LLVM Nios2 back-end.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_NIOS2_H
-#define LLVM_LIB_TARGET_NIOS2_NIOS2_H
-
-#include "MCTargetDesc/Nios2MCTargetDesc.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
-class FunctionPass;
-class formatted_raw_ostream;
-class Nios2TargetMachine;
-class AsmPrinter;
-class MachineInstr;
-class MCInst;
-
-FunctionPass *createNios2ISelDag(Nios2TargetMachine &TM,
-                                 CodeGenOpt::Level OptLevel);
-void LowerNios2MachineInstToMCInst(const MachineInstr *MI, MCInst &OutMI,
-                                   AsmPrinter &AP);
-} // namespace llvm
-
-#endif
diff --git a/lib/Target/Nios2/Nios2.td b/lib/Target/Nios2/Nios2.td
deleted file mode 100644
index 1acf4c7..0000000
--- a/lib/Target/Nios2/Nios2.td
+++ /dev/null
@@ -1,59 +0,0 @@
-//===-- Nios2.td - Describe the Nios2 Target Machine -------*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Calling Conv, Instruction Descriptions
-//===----------------------------------------------------------------------===//
-
-include "llvm/Target/Target.td"
-include "Nios2RegisterInfo.td"
-include "Nios2Schedule.td"
-include "Nios2InstrInfo.td"
-include "Nios2CallingConv.td"
-
-//===----------------------------------------------------------------------===//
-// Nios2 Subtarget features
-//===----------------------------------------------------------------------===//
-def FeatureNios2r1     : SubtargetFeature<"nios2r1", "Nios2ArchVersion", 
-                                "Nios2r1", "Nios2 R1 ISA Support">;
-def FeatureNios2r2     : SubtargetFeature<"nios2r2", "Nios2ArchVersion",                      
-                               "Nios2r2", "Nios2 R2 ISA Support">;
-
-//===----------------------------------------------------------------------===//
-// Nios2 processors supported.
-//===----------------------------------------------------------------------===//
-
-class Proc<string Name, list<SubtargetFeature> Features>
- : Processor<Name, Nios2GenericItineraries, Features>;
-
-def : Proc<"nios2r1", [FeatureNios2r1]>;
-def : Proc<"nios2r2", [FeatureNios2r2]>;
-
-def Nios2InstrInfo : InstrInfo;
-
-def Nios2AsmParser : AsmParser {
-  let ShouldEmitMatchRegisterName = 0;
-}
-
-//===----------------------------------------------------------------------===//
-// Declare the target which we are implementing
-//===----------------------------------------------------------------------===//
-
-def Nios2AsmWriter : AsmWriter {
-  string AsmWriterClassName  = "InstPrinter";
-  int PassSubtarget = 1;
-  int Variant = 0;
-}
-
-def Nios2 : Target {
-// def Nios2InstrInfo : InstrInfo as before.
-  let InstructionSet = Nios2InstrInfo;
-  let AssemblyParsers = [Nios2AsmParser];
-  let AssemblyWriters = [Nios2AsmWriter];
-}
diff --git a/lib/Target/Nios2/Nios2AsmPrinter.cpp b/lib/Target/Nios2/Nios2AsmPrinter.cpp
deleted file mode 100644
index 1abf195..0000000
--- a/lib/Target/Nios2/Nios2AsmPrinter.cpp
+++ /dev/null
@@ -1,153 +0,0 @@
-//===-- Nios2AsmPrinter.cpp - Nios2 LLVM Assembly Printer -----------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a printer that converts from our internal representation
-// of machine-dependent LLVM code to GAS-format NIOS2 assembly language.
-//
-//===----------------------------------------------------------------------===//
-
-#include "InstPrinter/Nios2InstPrinter.h"
-#include "MCTargetDesc/Nios2BaseInfo.h"
-#include "Nios2.h"
-#include "Nios2TargetMachine.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/Support/TargetRegistry.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "nios2-asm-printer"
-
-namespace {
-
-class Nios2AsmPrinter : public AsmPrinter {
-
-public:
-  explicit Nios2AsmPrinter(TargetMachine &TM,
-                           std::unique_ptr<MCStreamer> Streamer)
-      : AsmPrinter(TM, std::move(Streamer)) {}
-
-  StringRef getPassName() const override { return "Nios2 Assembly Printer"; }
-
-  //- EmitInstruction() must exists or will have run time error.
-  void EmitInstruction(const MachineInstr *MI) override;
-  bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
-                       unsigned AsmVariant, const char *ExtraCode,
-                       raw_ostream &O) override;
-  bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
-                             unsigned AsmVariant, const char *ExtraCode,
-                             raw_ostream &O) override;
-  void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
-  void EmitFunctionEntryLabel() override;
-};
-} // namespace
-
-//- EmitInstruction() must exists or will have run time error.
-void Nios2AsmPrinter::EmitInstruction(const MachineInstr *MI) {
-
-  //  Print out both ordinary instruction and boudle instruction
-  MachineBasicBlock::const_instr_iterator I = MI->getIterator();
-  MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
-
-  do {
-
-    if (I->isPseudo()) {
-      llvm_unreachable("Pseudo opcode found in EmitInstruction()");
-    }
-
-    MCInst TmpInst0;
-    LowerNios2MachineInstToMCInst(&*I, TmpInst0, *this);
-    EmitToStreamer(*OutStreamer, TmpInst0);
-  } while ((++I != E) && I->isInsideBundle()); // Delay slot check
-}
-
-//		.type	main,@function
-//->		.ent	main                    # @main
-//	main:
-void Nios2AsmPrinter::EmitFunctionEntryLabel() {
-  OutStreamer->EmitLabel(CurrentFnSym);
-}
-
-// Print out an operand for an inline asm expression.
-bool Nios2AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
-                                      unsigned AsmVariant,
-                                      const char *ExtraCode, raw_ostream &O) {
-  printOperand(MI, OpNum, O);
-  return false;
-}
-
-bool Nios2AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
-                                            unsigned OpNum, unsigned AsmVariant,
-                                            const char *ExtraCode,
-                                            raw_ostream &O) {
-  if (ExtraCode && ExtraCode[0])
-    return true; // Unknown modifier
-
-  const MachineOperand &MO = MI->getOperand(OpNum);
-  assert(MO.isReg() && "unexpected inline asm memory operand");
-  O << "($" << Nios2InstPrinter::getRegisterName(MO.getReg()) << ")";
-
-  return false;
-}
-
-void Nios2AsmPrinter::printOperand(const MachineInstr *MI, int opNum,
-                                   raw_ostream &O) {
-  const MachineOperand &MO = MI->getOperand(opNum);
-  bool closeP = false;
-
-  if (MO.getTargetFlags())
-    closeP = true;
-
-  switch (MO.getTargetFlags()) {
-  case Nios2FG::MO_ABS_HI:
-    O << "%hiadj(";
-    break;
-  case Nios2FG::MO_ABS_LO:
-    O << "%lo(";
-    break;
-  }
-
-  switch (MO.getType()) {
-  case MachineOperand::MO_Register:
-    O << '$'
-      << StringRef(Nios2InstPrinter::getRegisterName(MO.getReg())).lower();
-    break;
-
-  case MachineOperand::MO_Immediate:
-    O << MO.getImm();
-    break;
-
-  case MachineOperand::MO_MachineBasicBlock:
-    MO.getMBB()->getSymbol()->print(O, MAI);
-    return;
-
-  case MachineOperand::MO_GlobalAddress:
-    getSymbol(MO.getGlobal())->print(O, MAI);
-    break;
-
-  case MachineOperand::MO_BlockAddress:
-    O << GetBlockAddressSymbol(MO.getBlockAddress())->getName();
-    break;
-
-  case MachineOperand::MO_ExternalSymbol:
-    O << MO.getSymbolName();
-    break;
-
-  default:
-    llvm_unreachable("<unknown operand type>");
-  }
-
-  if (closeP)
-    O << ")";
-}
-
-// Force static initialization.
-extern "C" void LLVMInitializeNios2AsmPrinter() {
-  RegisterAsmPrinter<Nios2AsmPrinter> X(getTheNios2Target());
-}
diff --git a/lib/Target/Nios2/Nios2CallingConv.td b/lib/Target/Nios2/Nios2CallingConv.td
deleted file mode 100644
index f0b172f..0000000
--- a/lib/Target/Nios2/Nios2CallingConv.td
+++ /dev/null
@@ -1,34 +0,0 @@
-//===- Nios2CallingConv.td - Calling Conventions for Nios2 -*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// This describes the calling conventions for Nios2 architecture.
-//===----------------------------------------------------------------------===//
-
-/// CCIfSubtarget - Match if the current subtarget has a feature F.
-class CCIfSubtarget<string F, CCAction A>:
-  CCIf<!strconcat("State.getTarget().getSubtarget<Nios2Subtarget>().", F), A>;
-
-def CC_Nios2 : CallingConv<[
-  // i32 f32 arguments get passed in integer registers if there is space.
-  CCIfType<[i32, f32], CCAssignToReg<[R4, R5, R6, R7]>>,
-
-  // Alternatively, they are assigned to the stack in 4-byte aligned units.
-  CCAssignToStack<4, 4>
-]>;
-
-def RetCC_Nios2EABI : CallingConv<[
-  // i32 are returned in registers R2, R3
-  CCIfType<[i32], CCAssignToReg<[R2, R3]>>,
-  // In case of floating point (FPH2 instr.) also use the same register set
-  CCIfType<[f32], CCAssignToReg<[R2, R3]>>,
-  CCIfByVal<CCPassByVal<4, 4>>,
-  // Stack parameter slots for i32 is 32-bit words and 4-byte aligned.
-  CCIfType<[i32], CCAssignToStack<4, 4>>
-]>;
-
-def CSR : CalleeSavedRegs<(add RA, FP, (sequence "R%u", 16, 23))>;
diff --git a/lib/Target/Nios2/Nios2FrameLowering.cpp b/lib/Target/Nios2/Nios2FrameLowering.cpp
deleted file mode 100644
index 6fb28a6..0000000
--- a/lib/Target/Nios2/Nios2FrameLowering.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-//===-- Nios2FrameLowering.cpp - Nios2 Frame Information ------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Nios2 implementation of TargetFrameLowering class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2FrameLowering.h"
-
-#include "Nios2Subtarget.h"
-#include "llvm/CodeGen/MachineFunction.h"
-
-using namespace llvm;
-
-bool Nios2FrameLowering::hasFP(const MachineFunction &MF) const { return true; }
-
-void Nios2FrameLowering::emitPrologue(MachineFunction &MF,
-                                      MachineBasicBlock &MBB) const {}
-
-void Nios2FrameLowering::emitEpilogue(MachineFunction &MF,
-                                      MachineBasicBlock &MBB) const {}
diff --git a/lib/Target/Nios2/Nios2FrameLowering.h b/lib/Target/Nios2/Nios2FrameLowering.h
deleted file mode 100644
index 4ffb01d..0000000
--- a/lib/Target/Nios2/Nios2FrameLowering.h
+++ /dev/null
@@ -1,39 +0,0 @@
-//===-- Nios2FrameLowering.h - Define frame lowering for Nios2 --*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_NIOS2_NIOS2FRAMELOWERING_H
-#define LLVM_LIB_TARGET_NIOS2_NIOS2FRAMELOWERING_H
-
-#include "Nios2.h"
-#include "llvm/CodeGen/TargetFrameLowering.h"
-
-namespace llvm {
-class Nios2Subtarget;
-
-class Nios2FrameLowering : public TargetFrameLowering {
-protected:
-  const Nios2Subtarget &STI;
-
-public:
-  explicit Nios2FrameLowering(const Nios2Subtarget &sti)
-      : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 4, 0, 4),
-        STI(sti) {}
-
-  bool hasFP(const MachineFunction &MF) const override;
-  /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
-  /// the function.
-  void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
-  void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
-};
-} // namespace llvm
-
-#endif
diff --git a/lib/Target/Nios2/Nios2ISelDAGToDAG.cpp b/lib/Target/Nios2/Nios2ISelDAGToDAG.cpp
deleted file mode 100644
index 5f96794..0000000
--- a/lib/Target/Nios2/Nios2ISelDAGToDAG.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-//===-- Nios2ISelDAGToDAG.cpp - A Dag to Dag Inst Selector for Nios2 ------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines an instruction selector for the NIOS2 target.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2.h"
-#include "Nios2TargetMachine.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/Support/Debug.h"
-using namespace llvm;
-
-#define DEBUG_TYPE "nios2-isel"
-
-//===----------------------------------------------------------------------===//
-// Instruction Selector Implementation
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Nios2DAGToDAGISel - NIOS2 specific code to select NIOS2 machine
-// instructions for SelectionDAG operations.
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class Nios2DAGToDAGISel : public SelectionDAGISel {
-  /// Subtarget - Keep a pointer to the Nios2 Subtarget around so that we can
-  /// make the right decision when generating code for different targets.
-  const Nios2Subtarget *Subtarget;
-
-public:
-  explicit Nios2DAGToDAGISel(Nios2TargetMachine &TM, CodeGenOpt::Level OL)
-      : SelectionDAGISel(TM, OL) {}
-
-  bool runOnMachineFunction(MachineFunction &MF) override {
-    Subtarget = &MF.getSubtarget<Nios2Subtarget>();
-    return SelectionDAGISel::runOnMachineFunction(MF);
-  }
-
-  void Select(SDNode *N) override;
-
-  // Pass Name
-  StringRef getPassName() const override {
-    return "NIOS2 DAG->DAG Pattern Instruction Selection";
-  }
-
-#include "Nios2GenDAGISel.inc"
-};
-} // namespace
-
-// Select instructions not customized! Used for
-// expanded, promoted and normal instructions
-void Nios2DAGToDAGISel::Select(SDNode *Node) {
-
-  // If we have a custom node, we already have selected!
-  if (Node->isMachineOpcode()) {
-    LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
-    Node->setNodeId(-1);
-    return;
-  }
-
-  // Select the default instruction
-  SelectCode(Node);
-}
-
-FunctionPass *llvm::createNios2ISelDag(Nios2TargetMachine &TM,
-                                       CodeGenOpt::Level OptLevel) {
-  return new Nios2DAGToDAGISel(TM, OptLevel);
-}
diff --git a/lib/Target/Nios2/Nios2ISelLowering.cpp b/lib/Target/Nios2/Nios2ISelLowering.cpp
deleted file mode 100644
index 008ce15..0000000
--- a/lib/Target/Nios2/Nios2ISelLowering.cpp
+++ /dev/null
@@ -1,188 +0,0 @@
-//===-- Nios2ISelLowering.cpp - Nios2 DAG Lowering Implementation ---------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the interfaces that Nios2 uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2ISelLowering.h"
-#include "Nios2MachineFunction.h"
-#include "Nios2TargetMachine.h"
-#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// Calling Convention Implementation
-//===----------------------------------------------------------------------===//
-
-#include "Nios2GenCallingConv.inc"
-
-SDValue
-Nios2TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
-                                 bool IsVarArg,
-                                 const SmallVectorImpl<ISD::OutputArg> &Outs,
-                                 const SmallVectorImpl<SDValue> &OutVals,
-                                 const SDLoc &DL, SelectionDAG &DAG) const {
-  // CCValAssign - represent the assignment of
-  // the return value to a location
-  SmallVector<CCValAssign, 16> RVLocs;
-  MachineFunction &MF = DAG.getMachineFunction();
-
-  // CCState - Info about the registers and stack slot.
-  CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
-  // Analyze return values.
-  CCInfo.CheckReturn(Outs, RetCC_Nios2EABI);
-
-  SDValue Flag;
-  SmallVector<SDValue, 4> RetOps(1, Chain);
-
-  // Copy the result values into the output registers.
-  for (unsigned i = 0; i != RVLocs.size(); ++i) {
-    SDValue Val = OutVals[i];
-    CCValAssign &VA = RVLocs[i];
-    assert(VA.isRegLoc() && "Can only return in registers!");
-
-    if (RVLocs[i].getValVT() != RVLocs[i].getLocVT())
-      Val = DAG.getNode(ISD::BITCAST, DL, RVLocs[i].getLocVT(), Val);
-
-    Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Flag);
-
-    // Guarantee that all emitted copies are stuck together with flags.
-    Flag = Chain.getValue(1);
-    RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
-  }
-
-  if (Flag.getNode())
-    RetOps.push_back(Flag);
-
-  return DAG.getNode(Nios2ISD::Ret, DL, MVT::Other, RetOps);
-}
-
-// addLiveIn - This helper function adds the specified physical register to the
-// MachineFunction as a live in value.  It also creates a corresponding
-// virtual register for it.
-static unsigned addLiveIn(MachineFunction &MF, unsigned PReg,
-                          const TargetRegisterClass *RC) {
-  unsigned VReg = MF.getRegInfo().createVirtualRegister(RC);
-  MF.getRegInfo().addLiveIn(PReg, VReg);
-  return VReg;
-}
-
-//===----------------------------------------------------------------------===//
-//            Formal Arguments Calling Convention Implementation
-//===----------------------------------------------------------------------===//
-
-// LowerFormalArguments - transform physical registers into virtual registers
-// and generate load operations for arguments places on the stack.
-SDValue Nios2TargetLowering::LowerFormalArguments(
-    SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
-    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
-    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
-  MachineFunction &MF = DAG.getMachineFunction();
-  MachineFrameInfo &MFI = MF.getFrameInfo();
-
-  // Assign locations to all of the incoming arguments.
-  SmallVector<CCValAssign, 16> ArgLocs;
-  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
-                 *DAG.getContext());
-
-  CCInfo.AnalyzeFormalArguments(Ins, CC_Nios2);
-
-  // Used with vargs to acumulate store chains.
-  std::vector<SDValue> OutChains;
-
-  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
-    CCValAssign &VA = ArgLocs[i];
-
-    EVT ValVT = VA.getValVT();
-
-    // Arguments stored on registers
-    if (VA.isRegLoc()) {
-      MVT RegVT = VA.getLocVT();
-      unsigned ArgReg = VA.getLocReg();
-      const TargetRegisterClass *RC = getRegClassFor(RegVT);
-
-      // Transform the arguments stored on
-      // physical registers into virtual ones
-      unsigned Reg = addLiveIn(MF, ArgReg, RC);
-      SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
-
-      // If this is an 8 or 16-bit value, it has been passed promoted
-      // to 32 bits.  Insert an assert[sz]ext to capture this, then
-      // truncate to the right size.
-      if (VA.getLocInfo() != CCValAssign::Full) {
-        unsigned Opcode = 0;
-        if (VA.getLocInfo() == CCValAssign::SExt)
-          Opcode = ISD::AssertSext;
-        else if (VA.getLocInfo() == CCValAssign::ZExt)
-          Opcode = ISD::AssertZext;
-        if (Opcode)
-          ArgValue =
-              DAG.getNode(Opcode, DL, RegVT, ArgValue, DAG.getValueType(ValVT));
-        ArgValue = DAG.getNode(ISD::TRUNCATE, DL, ValVT, ArgValue);
-      }
-
-      // Handle floating point arguments passed in integer registers.
-      if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
-          (RegVT == MVT::i64 && ValVT == MVT::f64))
-        ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue);
-      InVals.push_back(ArgValue);
-    } else { // VA.isRegLoc()
-      MVT LocVT = VA.getLocVT();
-
-      // sanity check
-      assert(VA.isMemLoc());
-
-      // The stack pointer offset is relative to the caller stack frame.
-      int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
-                                     VA.getLocMemOffset(), true);
-
-      // Create load nodes to retrieve arguments from the stack
-      SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
-      SDValue Load = DAG.getLoad(
-          LocVT, DL, Chain, FIN,
-          MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
-      InVals.push_back(Load);
-      OutChains.push_back(Load.getValue(1));
-    }
-  }
-  if (!OutChains.empty()) {
-    OutChains.push_back(Chain);
-    Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
-  }
-
-  return Chain;
-}
-
-//===----------------------------------------------------------------------===//
-// TargetLowering Implementation
-//===----------------------------------------------------------------------===//
-
-Nios2TargetLowering::Nios2TargetLowering(const TargetMachine &TM,
-                                         const Nios2Subtarget &STI)
-    : TargetLowering(TM), Subtarget(&STI) {
-
-  addRegisterClass(MVT::i32, &Nios2::CPURegsRegClass);
-  computeRegisterProperties(Subtarget->getRegisterInfo());
-}
-
-const char *Nios2TargetLowering::getTargetNodeName(unsigned Opcode) const {
-  switch (Opcode) {
-  case Nios2ISD::Hi:
-    return "Nios2ISD::Hi";
-  case Nios2ISD::Lo:
-    return "Nios2ISD::Lo";
-  case Nios2ISD::Ret:
-    return "Nios2ISD::Ret";
-  }
-  return nullptr;
-}
diff --git a/lib/Target/Nios2/Nios2ISelLowering.h b/lib/Target/Nios2/Nios2ISelLowering.h
deleted file mode 100644
index c3c8179..0000000
--- a/lib/Target/Nios2/Nios2ISelLowering.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//===-- Nios2ISelLowering.h - Nios2 DAG Lowering Interface ------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interfaces that Nios2 uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_NIOS2ISELLOWERING_H
-#define LLVM_LIB_TARGET_NIOS2_NIOS2ISELLOWERING_H
-
-#include "Nios2.h"
-#include "llvm/CodeGen/TargetLowering.h"
-
-namespace llvm {
-class Nios2Subtarget;
-
-namespace Nios2ISD {
-enum NodeType {
-  // Start the numbering from where ISD NodeType finishes.
-  FIRST_NUMBER = ISD::BUILTIN_OP_END,
-
-  // Get the Higher 16 bits from a 32-bit immediate
-  // No relation with Nios2 Hi register
-  Hi,
-  // Get the Lower 16 bits from a 32-bit immediate
-  // No relation with Nios2 Lo register
-  Lo,
-  // Return
-  Ret
-};
-}
-
-class Nios2TargetLowering : public TargetLowering {
-  const Nios2Subtarget *Subtarget;
-
-public:
-  Nios2TargetLowering(const TargetMachine &TM, const Nios2Subtarget &STI);
-
-  /// getTargetNodeName - This method returns the name of a target specific
-  //  DAG node.
-  const char *getTargetNodeName(unsigned Opcode) const override;
-
-  SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
-                               bool IsVarArg,
-                               const SmallVectorImpl<ISD::InputArg> &Ins,
-                               const SDLoc &dl, SelectionDAG &DAG,
-                               SmallVectorImpl<SDValue> &InVals) const override;
-
-  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
-                      const SmallVectorImpl<ISD::OutputArg> &Outs,
-                      const SmallVectorImpl<SDValue> &OutVals, const SDLoc &dl,
-                      SelectionDAG &DAG) const override;
-};
-} // end namespace llvm
-
-#endif // NIOS2_ISELLOWERING_H
diff --git a/lib/Target/Nios2/Nios2InstrFormats.td b/lib/Target/Nios2/Nios2InstrFormats.td
deleted file mode 100644
index f57bf03..0000000
--- a/lib/Target/Nios2/Nios2InstrFormats.td
+++ /dev/null
@@ -1,235 +0,0 @@
-//===-- Nios2InstrFormats.td - Nios2 Instruction Formats ---*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-//  Describe NIOS2 instructions format
-//
-//
-//===----------------------------------------------------------------------===//
-
-// Format specifies the encoding used by the instruction.  This is part of the
-// ad-hoc solution used to emit machine instruction encodings by our machine
-// code emitter.
-class Format<bits<6> val> {
-  bits<6> Value = val;
-}
-
-def Pseudo     : Format<0>;
-// Nios2 R1 instr formats:
-def FrmI       : Format<1>;
-def FrmR       : Format<2>;
-def FrmJ       : Format<3>;
-def FrmOther   : Format<4>;  // Instruction w/ a custom format
-// Nios2 R2 instr 32-bit formats:
-def FrmL26     : Format<5>;  // corresponds to J format in R1
-def FrmF2I16   : Format<6>;  // corresponds to I format in R1
-def FrmF2X4I12 : Format<7>;
-def FrmF1X4I12 : Format<8>;
-def FrmF1X4L17 : Format<9>;
-def FrmF3X6L5  : Format<10>; // corresponds to R format in R1
-def FrmF2X6L10 : Format<11>;
-def FrmF3X6    : Format<12>; // corresponds to R format in R1
-def FrmF3X8    : Format<13>; // corresponds to custom format in R1
-// Nios2 R2 instr 16-bit formats:
-def FrmI10     : Format<14>;
-def FrmT1I7    : Format<15>; 
-def FrmT2I4    : Format<16>;
-def FrmT1X1I6  : Format<17>;
-def FrmX1I7    : Format<18>;
-def FrmL5I4X1  : Format<19>;
-def FrmT2X1L3  : Format<20>;
-def FrmT2X1I3  : Format<21>;
-def FrmT3X1    : Format<22>;
-def FrmT2X3    : Format<23>;
-def FrmF1X1    : Format<24>;
-def FrmX2L5    : Format<25>;
-def FrmF1I5    : Format<26>;
-def FrmF2      : Format<27>;
-
-//===----------------------------------------------------------------------===//
-// Instruction Predicates:
-//===----------------------------------------------------------------------===//
-
-def isNios2r1 : Predicate<"Subtarget->isNios2r1()">;
-def isNios2r2 : Predicate<"Subtarget->isNios2r2()">;
-
-class PredicateControl {
-  // Predicates related to specific target CPU features
-  list<Predicate> FeaturePredicates = [];
-  // Predicates for the instruction group membership in given ISA
-  list<Predicate> InstrPredicates = [];
-  
-  list<Predicate> Predicates = !listconcat(FeaturePredicates, InstrPredicates);
-}
-
-//===----------------------------------------------------------------------===//
-// Base classes for 32-bit, 16-bit and pseudo instructions
-//===----------------------------------------------------------------------===//
-
-class Nios2Inst32<dag outs, dag ins, string asmstr, list<dag> pattern,
-                  InstrItinClass itin, Format f>: Instruction,
-		                                  PredicateControl {
-  field bits<32> Inst;
-  Format Form = f;
-
-  let Namespace = "Nios2";
-  let Size = 4;
-
-  bits<6> Opcode = 0;
-
-  // Bottom 6 bits are the 'opcode' field
-  let Inst{5-0} = Opcode;
-
-  let OutOperandList = outs;
-  let InOperandList  = ins;
-
-  let AsmString = asmstr;
-  let Pattern   = pattern;
-  let Itinerary = itin;
-
-  // Attributes specific to Nios2 instructions:
-
-  // TSFlags layout should be kept in sync with Nios2InstrInfo.h.
-  let TSFlags{5-0} = Form.Value;
-  let DecoderNamespace = "Nios2";
-  field bits<32> SoftFail = 0;
-}
-
-class Nios2Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern,
-      InstrItinClass Itin = IIPseudo>:
-  Nios2Inst32<outs, ins, asmstr, pattern, Itin, Pseudo> {
-
-  let isCodeGenOnly = 1;
-  let isPseudo = 1;
-}
-
-//===----------------------------------------------------------------------===//
-// Base classes for R1 and R2 instructions
-//===----------------------------------------------------------------------===//
-
-class Nios2R1Inst32<dag outs, dag ins, string asmstr, list<dag> pattern,
-                    InstrItinClass itin, Format f>: 
-      Nios2Inst32<outs, ins, asmstr, pattern, itin, f> {
-  let DecoderNamespace = "Nios2";
-  let InstrPredicates = [isNios2r1];
-}
-
-class Nios2R2Inst32<dag outs, dag ins, string asmstr, list<dag> pattern,
-                    InstrItinClass itin, Format f>: 
-      Nios2Inst32<outs, ins, asmstr, pattern, itin, f> {
-  let DecoderNamespace = "Nios2r2";
-  let InstrPredicates = [isNios2r2];
-}
-
-//===----------------------------------------------------------------------===//
-// Format I instruction class in Nios2 : <|A|B|immediate|opcode|>
-//===----------------------------------------------------------------------===//
-
-class FI<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
-         InstrItinClass itin>: Nios2R1Inst32<outs, ins, asmstr,
-	                                     pattern, itin, FrmI> {
-
-  bits<5>  rA;
-  bits<5>  rB;
-  bits<16> imm;
-
-  let Opcode = op;
-
-  let Inst{31-27} = rA;
-  let Inst{26-22} = rB;
-  let Inst{21-6} = imm;
-}
-
-
-//===----------------------------------------------------------------------===//
-// Format R instruction : <|A|B|C|opx|imm|opcode|>
-//===----------------------------------------------------------------------===//
-
-class FR<bits<6> opx, dag outs, dag ins, string asmstr, list<dag> pattern,
-         InstrItinClass itin>: Nios2R1Inst32<outs, ins, asmstr,
-	                                     pattern, itin, FrmR> {
-  bits<5> rA;
-  bits<5> rB;
-  bits<5> rC;
-  bits<5> imm = 0;
-
-  let Opcode = 0x3a; /* opcode is always 0x3a for R instr. */
-
-  let Inst{31-27} = rA;
-  let Inst{26-22} = rB;
-  let Inst{21-17} = rC;
-  let Inst{16-11} = opx; /* opx stands for opcode extension */
-  let Inst{10-6}  = imm; /* optional 5-bit immediate value */
-}
-
-//===----------------------------------------------------------------------===//
-// Format J instruction class in Nios2 : <|address|opcode|>
-//===----------------------------------------------------------------------===//
-
-class FJ<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
-         InstrItinClass itin>:
-      Nios2R1Inst32<outs, ins, asmstr, pattern, itin, FrmJ> {
-  bits<26> addr;
-  let Opcode = op;
-  let Inst{31-6} = addr;
-}
-
-//===----------------------------------------------------------------------===//
-// Format F3X6 (R2) instruction : <|opx|RSV|C|B|A|opcode|>
-//===----------------------------------------------------------------------===//
-
-class F3X6<bits<6> opx, dag outs, dag ins, string asmstr, list<dag> pattern,
-           InstrItinClass itin>:
-      Nios2R2Inst32<outs, ins, asmstr, pattern, itin, FrmF3X6> {
-  bits<5> rC;
-  bits<5> rB;
-  bits<5> rA;
-  bits<5> rsv = 0;
-
-  let Opcode = 0x20; /* opcode is always 0x20 (OPX group) for F3X6 instr. */
-
-  let Inst{31-26} = opx; /* opx stands for opcode extension */
-  let Inst{25-21} = rsv;
-  let Inst{20-16} = rC;
-  let Inst{15-11} = rB;
-  let Inst{10-6}  = rA;
-}
-
-//===----------------------------------------------------------------------===//
-// Multiclasses for common instructions of both R1 and R2:
-//===----------------------------------------------------------------------===//
-
-// Multiclass for instructions that have R format in R1 and F3X6 format in R2
-// and their opx values differ between R1 and R2
-multiclass CommonInstr_R_F3X6_opx<bits<6> opxR1, bits<6> opxR2, dag outs,
-                                  dag ins, string asmstr, list<dag> pattern,
-                                  InstrItinClass itin> {
-  def NAME#_R1 : FR<opxR1, outs, ins, asmstr, pattern, itin>;
-  def NAME#_R2 : F3X6<opxR2, outs, ins, asmstr, pattern, itin>;
-}
-
-// Multiclass for instructions that have R format in R1 and F3X6 format in R2
-// and their opx values are the same in R1 and R2
-multiclass CommonInstr_R_F3X6<bits<6> opx, dag outs, dag ins, string asmstr,
-                              list<dag> pattern, InstrItinClass itin> :
-  CommonInstr_R_F3X6_opx<opx, opx, outs, ins, asmstr, pattern, itin>;
-
-// Multiclass for instructions that have I format in R1 and F2I16 format in R2
-// and their op code values differ between R1 and R2
-multiclass CommonInstr_I_F2I16_op<bits<6> opR1, bits<6> opR2, dag outs, dag ins,
-                                  string asmstr, list<dag> pattern,
-                                  InstrItinClass itin> {
-  def NAME#_R1 : FI<opR1, outs, ins, asmstr, pattern, itin>;
-}
-
-// Multiclass for instructions that have I format in R1 and F2I16 format in R2
-// and their op code values are the same in R1 and R2
-multiclass CommonInstr_I_F2I16<bits<6> op, dag outs, dag ins, string asmstr,
-                               list<dag> pattern, InstrItinClass itin> :
-  CommonInstr_I_F2I16_op<op, op, outs, ins, asmstr, pattern, itin>;
diff --git a/lib/Target/Nios2/Nios2InstrInfo.cpp b/lib/Target/Nios2/Nios2InstrInfo.cpp
deleted file mode 100644
index 9700cba..0000000
--- a/lib/Target/Nios2/Nios2InstrInfo.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-//===-- Nios2InstrInfo.cpp - Nios2 Instruction Information ----------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Nios2 implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2InstrInfo.h"
-#include "Nios2TargetMachine.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-
-using namespace llvm;
-
-#define GET_INSTRINFO_CTOR_DTOR
-#include "Nios2GenInstrInfo.inc"
-
-// Pin the vtable to this file.
-void Nios2InstrInfo::anchor() {}
-
-Nios2InstrInfo::Nios2InstrInfo(Nios2Subtarget &ST)
-    : Nios2GenInstrInfo(), RI(ST), Subtarget(ST) {}
-
-/// Expand Pseudo instructions into real backend instructions
-bool Nios2InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
-  MachineBasicBlock &MBB = *MI.getParent();
-
-  switch (MI.getDesc().getOpcode()) {
-  default:
-    return false;
-  case Nios2::RetRA:
-    BuildMI(MBB, MI, MI.getDebugLoc(), get(Nios2::RET_R1)).addReg(Nios2::RA);
-    break;
-  }
-
-  MBB.erase(MI);
-  return true;
-}
-
-void Nios2InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
-                                 MachineBasicBlock::iterator I,
-                                 const DebugLoc &DL, unsigned DestReg,
-                                 unsigned SrcReg, bool KillSrc) const {
-  unsigned opc = Subtarget.hasNios2r2() ? Nios2::ADD_R2 : Nios2::ADD_R1;
-  BuildMI(MBB, I, DL, get(opc))
-    .addReg(DestReg, RegState::Define)
-    .addReg(Nios2::ZERO)
-    .addReg(SrcReg, getKillRegState(KillSrc));
-}
diff --git a/lib/Target/Nios2/Nios2InstrInfo.h b/lib/Target/Nios2/Nios2InstrInfo.h
deleted file mode 100644
index 52f6e7e..0000000
--- a/lib/Target/Nios2/Nios2InstrInfo.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//===-- Nios2InstrInfo.h - Nios2 Instruction Information --------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Nios2 implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_NIOS2INSTRINFO_H
-#define LLVM_LIB_TARGET_NIOS2_NIOS2INSTRINFO_H
-
-#include "Nios2RegisterInfo.h"
-#include "llvm/CodeGen/TargetInstrInfo.h"
-
-#define GET_INSTRINFO_HEADER
-#include "Nios2GenInstrInfo.inc"
-
-namespace llvm {
-
-class Nios2Subtarget;
-
-class Nios2InstrInfo : public Nios2GenInstrInfo {
-  const Nios2RegisterInfo RI;
-  const Nios2Subtarget &Subtarget;
-  virtual void anchor();
-
-public:
-  explicit Nios2InstrInfo(Nios2Subtarget &ST);
-
-  /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info.  As
-  /// such, whenever a client has an instance of instruction info, it should
-  /// always be able to get register info as well (through this method).
-  ///
-  const Nios2RegisterInfo &getRegisterInfo() const { return RI; };
-
-  bool expandPostRAPseudo(MachineInstr &MI) const override;
-
-  void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
-                   const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
-                   bool KillSrc) const override;
-};
-} // namespace llvm
-
-#endif
diff --git a/lib/Target/Nios2/Nios2InstrInfo.td b/lib/Target/Nios2/Nios2InstrInfo.td
deleted file mode 100644
index dee84f7..0000000
--- a/lib/Target/Nios2/Nios2InstrInfo.td
+++ /dev/null
@@ -1,109 +0,0 @@
-//===- Nios2InstrInfo.td - Target Description for Nios2 ------*- tablegen -*-=//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Nios2 implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Instruction format superclass
-//===----------------------------------------------------------------------===//
-
-include "Nios2InstrFormats.td"
-
-
-//===----------------------------------------------------------------------===//
-// Nios2 Operand, Complex Patterns and Transformations Definitions.
-//===----------------------------------------------------------------------===//
-
-def simm16     : Operand<i32> {
-  let DecoderMethod= "DecodeSimm16";
-}
-
-// Node immediate fits as 16-bit sign extended on target immediate.
-// e.g. addi, andi
-def immSExt16  : PatLeaf<(imm), [{ return isInt<16>(N->getSExtValue()); }]>;
-
-// Custom return SDNode
-def Nios2Ret : SDNode<"Nios2ISD::Ret", SDTNone,
-    [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
-
-//===----------------------------------------------------------------------===//
-// Instructions specific format
-//===----------------------------------------------------------------------===//
-
-// Arithmetic and logical instructions with 2 registers and 16-bit immediate
-// value.
-multiclass ArithLogicRegImm16<bits<6> op, string mnemonic, SDNode opNode,
-                              Operand immOp, PatLeaf immType>:
-           CommonInstr_I_F2I16<op, (outs CPURegs:$rB),
-	                       (ins CPURegs:$rA, immOp:$imm),
-                               !strconcat(mnemonic, "\t$rB, $rA, $imm"),
-                               [(set CPURegs:$rB,
-			         (opNode CPURegs:$rA, immType:$imm))],
-                               IIAlu>;
-
-// Arithmetic and logical instructions with 3 register operands.
-// Defines R1 and R2 instruction at the same time.
-multiclass ArithLogicReg<bits<6> opx, string mnemonic,
-                         SDNode opNode>:
-  CommonInstr_R_F3X6<opx, (outs CPURegs:$rC),
-                     (ins CPURegs:$rA, CPURegs:$rB),
-                     !strconcat(mnemonic, "\t$rC, $rA, $rB"),
-                     [(set CPURegs:$rC, (opNode CPURegs:$rA, CPURegs:$rB))],
-                     IIAlu>;
-
-multiclass Return<bits<6> opx, dag outs, dag ins, string mnemonic> {
-  let rB = 0, rC = 0,
-      isReturn = 1,
-      isCodeGenOnly = 1,
-      hasCtrlDep = 1,
-      hasExtraSrcRegAllocReq = 1 in {
-    defm NAME# : CommonInstr_R_F3X6<opx, outs, ins, mnemonic, [], IIBranch>;
-  }
-}
-
-//===----------------------------------------------------------------------===//
-// Nios2 Instructions
-//===----------------------------------------------------------------------===//
-
-/// Arithmetic instructions operating on registers.
-let isCommutable = 1 ,
-    isReMaterializable = 1 in {
-  defm ADD    : ArithLogicReg<0x31, "add",    add>;
-  defm AND    : ArithLogicReg<0x0e, "and",    and>;
-  defm OR     : ArithLogicReg<0x16, "or",     or>;
-  defm XOR    : ArithLogicReg<0x1e, "xor",    xor>;
-  defm MUL    : ArithLogicReg<0x27, "mul",    mul>;
-}
-
-let isReMaterializable = 1 in {
-  defm SUB    : ArithLogicReg<0x39, "sub",    sub>;
-}
-
-defm DIVU : ArithLogicReg<0x24, "divu",   udiv>;
-defm DIV  : ArithLogicReg<0x25, "div",    sdiv>;
-
-defm SLL : ArithLogicReg<0x13, "sll",  shl>;
-defm SRL : ArithLogicReg<0x1b, "srl",  srl>;
-defm SRA : ArithLogicReg<0x3b, "sra",  sra>;
-
-/// Arithmetic Instructions (ALU Immediate)
-defm ADDI  : ArithLogicRegImm16<0x04, "addi",  add, simm16, immSExt16>;
-
-// Returns:
-defm RET  : Return<0x05, (outs), (ins CPURegs:$rA), "ret">;
-
-//===----------------------------------------------------------------------===//
-// Pseudo instructions
-//===----------------------------------------------------------------------===//
-
-// Return RA.
-let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1, hasCtrlDep=1 in
-def RetRA : Nios2Pseudo<(outs), (ins), "", [(Nios2Ret)]>;
diff --git a/lib/Target/Nios2/Nios2MCInstLower.cpp b/lib/Target/Nios2/Nios2MCInstLower.cpp
deleted file mode 100644
index c43af87..0000000
--- a/lib/Target/Nios2/Nios2MCInstLower.cpp
+++ /dev/null
@@ -1,117 +0,0 @@
-//===-- Nios2MCInstLower.cpp - Convert Nios2 MachineInstr to MCInst -------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains code to lower Nios2 MachineInstrs to their corresponding
-// MCInst records.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/Nios2BaseInfo.h"
-#include "MCTargetDesc/Nios2MCExpr.h"
-#include "Nios2.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineOperand.h"
-
-using namespace llvm;
-
-static MCOperand LowerSymbolOperand(const MachineOperand &MO, AsmPrinter &AP) {
-  MCSymbolRefExpr::VariantKind Kind = MCSymbolRefExpr::VK_None;
-  Nios2MCExpr::Nios2ExprKind TargetKind = Nios2MCExpr::CEK_None;
-  const MCSymbol *Symbol;
-
-  switch (MO.getTargetFlags()) {
-  default:
-    llvm_unreachable("Invalid target flag!");
-  case Nios2FG::MO_NO_FLAG:
-    break;
-  case Nios2FG::MO_ABS_HI:
-    TargetKind = Nios2MCExpr::CEK_ABS_HI;
-    break;
-  case Nios2FG::MO_ABS_LO:
-    TargetKind = Nios2MCExpr::CEK_ABS_LO;
-    break;
-  }
-
-  switch (MO.getType()) {
-  case MachineOperand::MO_GlobalAddress:
-    Symbol = AP.getSymbol(MO.getGlobal());
-    break;
-
-  case MachineOperand::MO_MachineBasicBlock:
-    Symbol = MO.getMBB()->getSymbol();
-    break;
-
-  case MachineOperand::MO_BlockAddress:
-    Symbol = AP.GetBlockAddressSymbol(MO.getBlockAddress());
-    break;
-
-  case MachineOperand::MO_ExternalSymbol:
-    Symbol = AP.GetExternalSymbolSymbol(MO.getSymbolName());
-    break;
-
-  case MachineOperand::MO_JumpTableIndex:
-    Symbol = AP.GetJTISymbol(MO.getIndex());
-    break;
-
-  case MachineOperand::MO_ConstantPoolIndex:
-    Symbol = AP.GetCPISymbol(MO.getIndex());
-    break;
-
-  default:
-    llvm_unreachable("<unknown operand type>");
-  }
-
-  const MCExpr *Expr = MCSymbolRefExpr::create(Symbol, Kind, AP.OutContext);
-
-  if (TargetKind != Nios2MCExpr::CEK_None)
-    Expr = Nios2MCExpr::create(TargetKind, Expr, AP.OutContext);
-
-  return MCOperand::createExpr(Expr);
-}
-
-static MCOperand LowerOperand(const MachineOperand &MO, AsmPrinter &AP) {
-
-  switch (MO.getType()) {
-  default:
-    llvm_unreachable("unknown operand type");
-  case MachineOperand::MO_Register:
-    // Ignore all implicit register operands.
-    if (MO.isImplicit())
-      break;
-    return MCOperand::createReg(MO.getReg());
-  case MachineOperand::MO_Immediate:
-    return MCOperand::createImm(MO.getImm());
-  case MachineOperand::MO_MachineBasicBlock:
-  case MachineOperand::MO_ExternalSymbol:
-  case MachineOperand::MO_JumpTableIndex:
-  case MachineOperand::MO_BlockAddress:
-  case MachineOperand::MO_GlobalAddress:
-  case MachineOperand::MO_ConstantPoolIndex:
-    return LowerSymbolOperand(MO, AP);
-  case MachineOperand::MO_RegisterMask:
-    break;
-  }
-
-  return MCOperand();
-}
-
-void llvm::LowerNios2MachineInstToMCInst(const MachineInstr *MI, MCInst &OutMI,
-                                         AsmPrinter &AP) {
-
-  OutMI.setOpcode(MI->getOpcode());
-
-  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
-    const MachineOperand &MO = MI->getOperand(i);
-    MCOperand MCOp = LowerOperand(MO, AP);
-
-    if (MCOp.isValid())
-      OutMI.addOperand(MCOp);
-  }
-}
diff --git a/lib/Target/Nios2/Nios2MachineFunction.cpp b/lib/Target/Nios2/Nios2MachineFunction.cpp
deleted file mode 100644
index be5b882..0000000
--- a/lib/Target/Nios2/Nios2MachineFunction.cpp
+++ /dev/null
@@ -1,14 +0,0 @@
-//===-- Nios2MachineFunctionInfo.cpp - Private data used for Nios2 --------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2MachineFunction.h"
-
-using namespace llvm;
-
-void Nios2FunctionInfo::anchor() {}
diff --git a/lib/Target/Nios2/Nios2MachineFunction.h b/lib/Target/Nios2/Nios2MachineFunction.h
deleted file mode 100644
index 73baf96..0000000
--- a/lib/Target/Nios2/Nios2MachineFunction.h
+++ /dev/null
@@ -1,62 +0,0 @@
-//===-- Nios2MachineFunctionInfo.h - Private data used for Nios2 --*- C++ -*-=//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the Nios2 specific subclass of MachineFunctionInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_NIOS2MACHINEFUNCTION_H
-#define LLVM_LIB_TARGET_NIOS2_NIOS2MACHINEFUNCTION_H
-
-#include "llvm/CodeGen/MachineFunction.h"
-
-namespace llvm {
-
-/// Nios2FunctionInfo - This class is derived from MachineFunction private
-/// Nios2 target-specific information for each MachineFunction.
-class Nios2FunctionInfo : public MachineFunctionInfo {
-  virtual void anchor();
-
-private:
-  unsigned GlobalBaseReg;
-
-  /// VarArgsFrameOffset - Frame offset to start of varargs area.
-  int VarArgsFrameOffset;
-
-  /// SRetReturnReg - Holds the virtual register into which the sret
-  /// argument is passed.
-  unsigned SRetReturnReg;
-
-  /// IsLeafProc - True if the function is a leaf procedure.
-  bool IsLeafProc;
-
-public:
-  Nios2FunctionInfo()
-      : GlobalBaseReg(0), VarArgsFrameOffset(0), SRetReturnReg(0),
-        IsLeafProc(false) {}
-  explicit Nios2FunctionInfo(MachineFunction &MF)
-      : GlobalBaseReg(0), VarArgsFrameOffset(0), SRetReturnReg(0),
-        IsLeafProc(false) {}
-
-  unsigned getGlobalBaseReg() const { return GlobalBaseReg; }
-  void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; }
-
-  int getVarArgsFrameOffset() const { return VarArgsFrameOffset; }
-  void setVarArgsFrameOffset(int Offset) { VarArgsFrameOffset = Offset; }
-
-  unsigned getSRetReturnReg() const { return SRetReturnReg; }
-  void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
-
-  void setLeafProc(bool rhs) { IsLeafProc = rhs; }
-  bool isLeafProc() const { return IsLeafProc; }
-};
-
-} // end of namespace llvm
-
-#endif // NIOS2_MACHINE_FUNCTION_INFO_H
diff --git a/lib/Target/Nios2/Nios2RegisterInfo.cpp b/lib/Target/Nios2/Nios2RegisterInfo.cpp
deleted file mode 100644
index 9b892f9..0000000
--- a/lib/Target/Nios2/Nios2RegisterInfo.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-//===-- Nios2RegisterInfo.cpp - Nios2 Register Information -== ------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Nios2 implementation of the TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "nios2-reg-info"
-
-#include "Nios2RegisterInfo.h"
-
-#include "Nios2.h"
-#include "Nios2Subtarget.h"
-
-#define GET_REGINFO_TARGET_DESC
-#include "Nios2GenRegisterInfo.inc"
-
-using namespace llvm;
-
-Nios2RegisterInfo::Nios2RegisterInfo(const Nios2Subtarget &ST)
-    : Nios2GenRegisterInfo(Nios2::RA), Subtarget(ST) {}
-
-const TargetRegisterClass *Nios2RegisterInfo::intRegClass(unsigned Size) const {
-  return &Nios2::CPURegsRegClass;
-}
-
-const MCPhysReg *
-Nios2RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
-  return CSR_SaveList;
-}
-
-BitVector Nios2RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
-  static const MCPhysReg ReservedCPURegs[] = {Nios2::ZERO, Nios2::AT, Nios2::SP,
-                                             Nios2::RA,   Nios2::PC, Nios2::GP};
-  BitVector Reserved(getNumRegs());
-
-  for (unsigned I = 0; I < array_lengthof(ReservedCPURegs); ++I)
-    Reserved.set(ReservedCPURegs[I]);
-
-  return Reserved;
-}
-
-void Nios2RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
-                                            int SPAdj, unsigned FIOperandNum,
-                                            RegScavenger *RS) const {}
-
-unsigned Nios2RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
-  return Nios2::SP;
-}
diff --git a/lib/Target/Nios2/Nios2RegisterInfo.h b/lib/Target/Nios2/Nios2RegisterInfo.h
deleted file mode 100644
index 3658343..0000000
--- a/lib/Target/Nios2/Nios2RegisterInfo.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//===-- Nios2RegisterInfo.h - Nios2 Register Information Impl ---*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Nios2 implementation of the TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_NIOS2REGISTERINFO_H
-#define LLVM_LIB_TARGET_NIOS2_NIOS2REGISTERINFO_H
-
-#include "Nios2.h"
-#include "llvm/CodeGen/TargetRegisterInfo.h"
-
-#define GET_REGINFO_HEADER
-#include "Nios2GenRegisterInfo.inc"
-
-namespace llvm {
-class Nios2Subtarget;
-class TargetInstrInfo;
-class Type;
-
-class Nios2RegisterInfo : public Nios2GenRegisterInfo {
-protected:
-  const Nios2Subtarget &Subtarget;
-
-public:
-  Nios2RegisterInfo(const Nios2Subtarget &Subtarget);
-
-  const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
-
-  BitVector getReservedRegs(const MachineFunction &MF) const override;
-
-  /// Stack Frame Processing Methods
-  void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
-                           unsigned FIOperandNum,
-                           RegScavenger *RS = nullptr) const override;
-
-  /// Debug information queries.
-  unsigned getFrameRegister(const MachineFunction &MF) const override;
-
-  /// Return GPR register class.
-  const TargetRegisterClass *intRegClass(unsigned Size) const;
-};
-
-} // end namespace llvm
-#endif
diff --git a/lib/Target/Nios2/Nios2RegisterInfo.td b/lib/Target/Nios2/Nios2RegisterInfo.td
deleted file mode 100644
index 1808815..0000000
--- a/lib/Target/Nios2/Nios2RegisterInfo.td
+++ /dev/null
@@ -1,60 +0,0 @@
-//===-- Nios2RegisterInfo.td - Nios2 Register defs ---------*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-// We have bank of 32 registers.
-class Nios2Reg<string n> : Register<n> {
-  field bits<5> Num;
-  let Namespace = "Nios2";
-}
-
-// Nios2 CPU Registers
-class Nios2GPRReg<bits<5> num, string n> : Nios2Reg<n> {
-  let Num = num;
-}
-
-//===----------------------------------------------------------------------===//
-//  Registers
-//===----------------------------------------------------------------------===//
-
-let Namespace = "Nios2" in {
-  // General Purpose Registers
-  def ZERO : Nios2GPRReg<0, "zero">, DwarfRegNum<[ 0 ]>;
-  def AT : Nios2GPRReg<1, "at">, DwarfRegNum<[ 1 ]>;
-  foreach RegNum = 2 - 23 in {
-    def R #RegNum : Nios2GPRReg<RegNum, "r" #RegNum>, DwarfRegNum<[ RegNum ]>;
-  }
-  def ET : Nios2GPRReg<24, "et">, DwarfRegNum<[ 24 ]>;
-  def BT : Nios2GPRReg<25, "bt">, DwarfRegNum<[ 25 ]>;
-  def GP : Nios2GPRReg<26, "gp">, DwarfRegNum<[ 26 ]>;
-  def SP : Nios2GPRReg<27, "sp">, DwarfRegNum<[ 27 ]>;
-  def FP : Nios2GPRReg<28, "fp">, DwarfRegNum<[ 28 ]>;
-  def EA : Nios2GPRReg<29, "ea">, DwarfRegNum<[ 29 ]>;
-  def BA : Nios2GPRReg<30, "ba">, DwarfRegNum<[ 30 ]>;
-  def RA : Nios2GPRReg<31, "ra">, DwarfRegNum<[ 31 ]>;
-  def PC : Nios2Reg<"pc">, DwarfRegNum<[ 32 ]>;
-}
-
-//===----------------------------------------------------------------------===//
-// Register Classes
-//===----------------------------------------------------------------------===//
-
-def CPURegs : RegisterClass<"Nios2", [ i32 ], 32,
-                            (add
-                            // Reserved
-                            ZERO,
-                            AT,
-                            // Return Values and Arguments
-                            (sequence "R%u", 2, 7),
-                            // Not preserved across procedure calls
-                            // Caller saved
-                            (sequence "R%u", 8, 15),
-                            // Callee saved
-                            (sequence "R%u", 16, 23),
-                            // Reserved
-                            ET, BT, GP, SP, FP, EA, BA, RA, PC)>;
diff --git a/lib/Target/Nios2/Nios2Schedule.td b/lib/Target/Nios2/Nios2Schedule.td
deleted file mode 100644
index 2d1d9d5..0000000
--- a/lib/Target/Nios2/Nios2Schedule.td
+++ /dev/null
@@ -1,39 +0,0 @@
-//===-- Nios2Schedule.td - Nios2 Scheduling Definitions ----*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Functional units across Nios2 chips sets. Based on GCC/Nios2 backend files.
-//===----------------------------------------------------------------------===//
-def ALU     : FuncUnit;
-def IMULDIV : FuncUnit;
-
-//===----------------------------------------------------------------------===//
-// Instruction Itinerary classes used for Nios2
-//===----------------------------------------------------------------------===//
-def IIAlu              : InstrItinClass;
-def IILoad             : InstrItinClass;
-def IIStore            : InstrItinClass;
-def IIFlush            : InstrItinClass;
-def IIIdiv             : InstrItinClass;
-def IIBranch           : InstrItinClass;
-
-def IIPseudo           : InstrItinClass;
-
-//===----------------------------------------------------------------------===//
-// Nios2 Generic instruction itineraries.
-//===----------------------------------------------------------------------===//
-//@ http://llvm.org/docs/doxygen/html/structllvm_1_1InstrStage.html
-def Nios2GenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
-  InstrItinData<IIAlu              , [InstrStage<1,  [ALU]>]>,
-  InstrItinData<IILoad             , [InstrStage<3,  [ALU]>]>,
-  InstrItinData<IIStore            , [InstrStage<1,  [ALU]>]>,
-  InstrItinData<IIFlush            , [InstrStage<1,  [ALU]>]>,
-  InstrItinData<IIIdiv             , [InstrStage<38, [IMULDIV]>]>,
-  InstrItinData<IIBranch           , [InstrStage<1,  [ALU]>]>
-]>;
diff --git a/lib/Target/Nios2/Nios2Subtarget.cpp b/lib/Target/Nios2/Nios2Subtarget.cpp
deleted file mode 100644
index 196bed2..0000000
--- a/lib/Target/Nios2/Nios2Subtarget.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-//===-- Nios2Subtarget.cpp - Nios2 Subtarget Information ------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the Nios2 specific subclass of TargetSubtargetInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2Subtarget.h"
-#include "Nios2.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "nios2-subtarget"
-
-#define GET_SUBTARGETINFO_TARGET_DESC
-#define GET_SUBTARGETINFO_CTOR
-#include "Nios2GenSubtargetInfo.inc"
-
-void Nios2Subtarget::anchor() {}
-
-Nios2Subtarget::Nios2Subtarget(const Triple &TT, const std::string &CPU,
-                               const std::string &FS, const TargetMachine &TM)
-    :
-
-      // Nios2GenSubtargetInfo will display features by llc -march=nios2
-      // -mcpu=help
-      Nios2GenSubtargetInfo(TT, CPU, FS), TargetTriple(TT),
-      InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
-      FrameLowering(*this) {}
-
-Nios2Subtarget &Nios2Subtarget::initializeSubtargetDependencies(StringRef CPU,
-                                                                StringRef FS) {
-  if (TargetTriple.getArch() == Triple::nios2) {
-    if (CPU != "nios2r2") {
-      CPU = "nios2r1";
-      Nios2ArchVersion = Nios2r1;
-    } else {
-      Nios2ArchVersion = Nios2r2;
-    }
-  } else {
-    errs() << "!!!Error, TargetTriple.getArch() = " << TargetTriple.getArch()
-           << "CPU = " << CPU << "\n";
-    exit(0);
-  }
-
-  // Parse features string.
-  ParseSubtargetFeatures(CPU, FS);
-
-  return *this;
-}
diff --git a/lib/Target/Nios2/Nios2Subtarget.h b/lib/Target/Nios2/Nios2Subtarget.h
deleted file mode 100644
index a822dff..0000000
--- a/lib/Target/Nios2/Nios2Subtarget.h
+++ /dev/null
@@ -1,97 +0,0 @@
-//===-- Nios2Subtarget.h - Define Subtarget for the Nios2 -------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the Nios2 specific subclass of TargetSubtargetInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_NIOS2SUBTARGET_H
-#define LLVM_LIB_TARGET_NIOS2_NIOS2SUBTARGET_H
-
-#include "Nios2FrameLowering.h"
-#include "Nios2ISelLowering.h"
-#include "Nios2InstrInfo.h"
-#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
-#include "llvm/CodeGen/TargetFrameLowering.h"
-#include "llvm/CodeGen/TargetSubtargetInfo.h"
-
-#define GET_SUBTARGETINFO_HEADER
-#include "Nios2GenSubtargetInfo.inc"
-
-namespace llvm {
-class StringRef;
-
-class Nios2TargetMachine;
-
-class Nios2Subtarget : public Nios2GenSubtargetInfo {
-  virtual void anchor();
-
-public:
-  // Nios2 R2 features
-  // Bit manipulation instructions extension
-  bool HasBMX;
-  // Code Density instructions extension
-  bool HasCDX;
-  // Multi-Processor instructions extension
-  bool HasMPX;
-  // New mandatory instructions
-  bool HasR2Mandatory;
-
-protected:
-  enum Nios2ArchEnum {
-    // Nios2 R1 ISA
-    Nios2r1,
-    // Nios2 R2 ISA
-    Nios2r2
-  };
-
-  // Nios2 architecture version
-  Nios2ArchEnum Nios2ArchVersion;
-
-  Triple TargetTriple;
-
-  Nios2InstrInfo InstrInfo;
-  Nios2TargetLowering TLInfo;
-  SelectionDAGTargetInfo TSInfo;
-  Nios2FrameLowering FrameLowering;
-
-public:
-  /// This constructor initializes the data members to match that
-  /// of the specified triple.
-  Nios2Subtarget(const Triple &TT, const std::string &CPU,
-                 const std::string &FS, const TargetMachine &TM);
-
-  /// ParseSubtargetFeatures - Parses features string setting specified
-  /// subtarget options.  Definition of function is auto generated by tblgen.
-  void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
-
-  bool hasNios2r1() const { return Nios2ArchVersion >= Nios2r1; }
-  bool isNios2r1() const { return Nios2ArchVersion == Nios2r1; }
-  bool hasNios2r2() const { return Nios2ArchVersion >= Nios2r2; }
-  bool isNios2r2() const { return Nios2ArchVersion == Nios2r2; }
-
-  Nios2Subtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
-
-  const Nios2InstrInfo *getInstrInfo() const override { return &InstrInfo; }
-  const TargetFrameLowering *getFrameLowering() const override {
-    return &FrameLowering;
-  }
-  const Nios2RegisterInfo *getRegisterInfo() const override {
-    return &InstrInfo.getRegisterInfo();
-  }
-  const Nios2TargetLowering *getTargetLowering() const override {
-    return &TLInfo;
-  }
-  const SelectionDAGTargetInfo *getSelectionDAGInfo() const override {
-    return &TSInfo;
-  }
-};
-} // namespace llvm
-
-#endif
diff --git a/lib/Target/Nios2/Nios2TargetMachine.cpp b/lib/Target/Nios2/Nios2TargetMachine.cpp
deleted file mode 100644
index 4f90db9..0000000
--- a/lib/Target/Nios2/Nios2TargetMachine.cpp
+++ /dev/null
@@ -1,111 +0,0 @@
-//===-- Nios2TargetMachine.cpp - Define TargetMachine for Nios2 -----------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Implements the info about Nios2 target spec.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2TargetMachine.h"
-#include "Nios2.h"
-#include "Nios2TargetObjectFile.h"
-
-#include "llvm/CodeGen/TargetPassConfig.h"
-#include "llvm/Support/TargetRegistry.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "nios2"
-
-extern "C" void LLVMInitializeNios2Target() {
-  // Register the target.
-  RegisterTargetMachine<Nios2TargetMachine> X(getTheNios2Target());
-}
-
-static std::string computeDataLayout() {
-  return "e-p:32:32:32-i8:8:32-i16:16:32-n32";
-}
-
-static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
-  if (!RM.hasValue())
-    return Reloc::Static;
-  return *RM;
-}
-
-Nios2TargetMachine::Nios2TargetMachine(const Target &T, const Triple &TT,
-                                       StringRef CPU, StringRef FS,
-                                       const TargetOptions &Options,
-                                       Optional<Reloc::Model> RM,
-                                       Optional<CodeModel::Model> CM,
-                                       CodeGenOpt::Level OL, bool JIT)
-    : LLVMTargetMachine(T, computeDataLayout(), TT, CPU, FS, Options,
-                        getEffectiveRelocModel(RM),
-                        getEffectiveCodeModel(CM, CodeModel::Small), OL),
-      TLOF(make_unique<Nios2TargetObjectFile>()),
-      Subtarget(TT, CPU, FS, *this) {
-  initAsmInfo();
-}
-
-Nios2TargetMachine::~Nios2TargetMachine() {}
-
-const Nios2Subtarget *
-Nios2TargetMachine::getSubtargetImpl(const Function &F) const {
-  Attribute CPUAttr = F.getFnAttribute("target-cpu");
-  Attribute FSAttr = F.getFnAttribute("target-features");
-
-  std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
-                        ? CPUAttr.getValueAsString().str()
-                        : TargetCPU;
-  std::string FS = !FSAttr.hasAttribute(Attribute::None)
-                       ? FSAttr.getValueAsString().str()
-                       : TargetFS;
-
-  auto &I = SubtargetMap[CPU + FS];
-  if (!I) {
-    // This needs to be done before we create a new subtarget since any
-    // creation will depend on the TM and the code generation flags on the
-    // function that reside in TargetOptions.
-    resetTargetOptions(F);
-    I = llvm::make_unique<Nios2Subtarget>(TargetTriple, CPU, FS, *this);
-  }
-  return I.get();
-}
-
-namespace {
-/// Nios2 Code Generator Pass Configuration Options.
-class Nios2PassConfig : public TargetPassConfig {
-public:
-  Nios2PassConfig(Nios2TargetMachine &TM, PassManagerBase *PM)
-      : TargetPassConfig(TM, *PM) {}
-
-  Nios2TargetMachine &getNios2TargetMachine() const {
-    return getTM<Nios2TargetMachine>();
-  }
-
-  void addCodeGenPrepare() override;
-  bool addInstSelector() override;
-  void addIRPasses() override;
-};
-} // namespace
-
-TargetPassConfig *Nios2TargetMachine::createPassConfig(PassManagerBase &PM) {
-  return new Nios2PassConfig(*this, &PM);
-}
-
-void Nios2PassConfig::addCodeGenPrepare() {
-  TargetPassConfig::addCodeGenPrepare();
-}
-
-void Nios2PassConfig::addIRPasses() { TargetPassConfig::addIRPasses(); }
-
-// Install an instruction selector pass using
-// the ISelDag to gen Nios2 code.
-bool Nios2PassConfig::addInstSelector() {
-  addPass(createNios2ISelDag(getNios2TargetMachine(), getOptLevel()));
-  return false;
-}
diff --git a/lib/Target/Nios2/Nios2TargetMachine.h b/lib/Target/Nios2/Nios2TargetMachine.h
deleted file mode 100644
index 1ebfb39..0000000
--- a/lib/Target/Nios2/Nios2TargetMachine.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//===-- Nios2TargetMachine.h - Define TargetMachine for Nios2 ---*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the Nios2 specific subclass of TargetMachine.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_NIOS2TARGETMACHINE_H
-#define LLVM_LIB_TARGET_NIOS2_NIOS2TARGETMACHINE_H
-
-#include "Nios2Subtarget.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
-class Nios2TargetMachine : public LLVMTargetMachine {
-  mutable StringMap<std::unique_ptr<Nios2Subtarget>> SubtargetMap;
-  std::unique_ptr<TargetLoweringObjectFile> TLOF;
-  Nios2Subtarget Subtarget;
-
-public:
-  Nios2TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
-                     StringRef FS, const TargetOptions &Options,
-                     Optional<Reloc::Model> RM, Optional<CodeModel::Model> CM,
-                     CodeGenOpt::Level OL, bool JIT);
-  ~Nios2TargetMachine() override;
-
-  const Nios2Subtarget *getSubtargetImpl() const { return &Subtarget; }
-  const Nios2Subtarget *getSubtargetImpl(const Function &F) const override;
-
-  TargetLoweringObjectFile *getObjFileLowering() const override {
-    return TLOF.get();
-  }
-
-  // Pass Pipeline Configuration
-  TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
-};
-} // namespace llvm
-
-#endif
diff --git a/lib/Target/Nios2/Nios2TargetObjectFile.cpp b/lib/Target/Nios2/Nios2TargetObjectFile.cpp
deleted file mode 100644
index 5fc85ef..0000000
--- a/lib/Target/Nios2/Nios2TargetObjectFile.cpp
+++ /dev/null
@@ -1,18 +0,0 @@
-//===-- Nios2TargetObjectFile.cpp - Nios2 Object Files --------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2TargetObjectFile.h"
-
-using namespace llvm;
-
-void Nios2TargetObjectFile::Initialize(MCContext &Ctx,
-                                       const TargetMachine &TM) {
-  TargetLoweringObjectFileELF::Initialize(Ctx, TM);
-  InitializeELF(TM.Options.UseInitArray);
-}
diff --git a/lib/Target/Nios2/Nios2TargetObjectFile.h b/lib/Target/Nios2/Nios2TargetObjectFile.h
deleted file mode 100644
index e9ed6e3..0000000
--- a/lib/Target/Nios2/Nios2TargetObjectFile.h
+++ /dev/null
@@ -1,26 +0,0 @@
-//===-- llvm/Target/Nios2TargetObjectFile.h - Nios2 Object Info -*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_NIOS2TARGETOBJECTFILE_H
-#define LLVM_LIB_TARGET_NIOS2_NIOS2TARGETOBJECTFILE_H
-
-#include "Nios2TargetMachine.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-
-namespace llvm {
-
-class Nios2TargetObjectFile : public TargetLoweringObjectFileELF {
-public:
-  Nios2TargetObjectFile() : TargetLoweringObjectFileELF() {}
-
-  void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
-};
-} // end namespace llvm
-
-#endif
diff --git a/lib/Target/Nios2/Nios2TargetStreamer.h b/lib/Target/Nios2/Nios2TargetStreamer.h
deleted file mode 100644
index 1520ac2..0000000
--- a/lib/Target/Nios2/Nios2TargetStreamer.h
+++ /dev/null
@@ -1,30 +0,0 @@
-//===-- Nios2TargetStreamer.h - Nios2 Target Streamer ----------*- C++ -*--===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_NIOS2_NIOS2TARGETSTREAMER_H
-#define LLVM_LIB_TARGET_NIOS2_NIOS2TARGETSTREAMER_H
-
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCStreamer.h"
-
-namespace llvm {
-
-class Nios2TargetStreamer : public MCTargetStreamer {
-public:
-  Nios2TargetStreamer(MCStreamer &S);
-};
-
-// This part is for ascii assembly output
-class Nios2TargetAsmStreamer : public Nios2TargetStreamer {
-public:
-  Nios2TargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS);
-};
-
-} // namespace llvm
-#endif
diff --git a/lib/Target/Nios2/TargetInfo/CMakeLists.txt b/lib/Target/Nios2/TargetInfo/CMakeLists.txt
deleted file mode 100644
index 394d2c2..0000000
--- a/lib/Target/Nios2/TargetInfo/CMakeLists.txt
+++ /dev/null
@@ -1 +0,0 @@
-add_llvm_library(LLVMNios2Info Nios2TargetInfo.cpp)
diff --git a/lib/Target/Nios2/TargetInfo/LLVMBuild.txt b/lib/Target/Nios2/TargetInfo/LLVMBuild.txt
deleted file mode 100644
index 558f750..0000000
--- a/lib/Target/Nios2/TargetInfo/LLVMBuild.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-;===- ./lib/Target/Nios2/TargetInfo/LLVMBuild.txt --------------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = Nios2Info
-parent = Nios2
-required_libraries = Support
-add_to_library_groups = Nios2
diff --git a/lib/Target/Nios2/TargetInfo/Nios2TargetInfo.cpp b/lib/Target/Nios2/TargetInfo/Nios2TargetInfo.cpp
deleted file mode 100644
index d808a96..0000000
--- a/lib/Target/Nios2/TargetInfo/Nios2TargetInfo.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//===-- Nios2TargetInfo.cpp - Nios2 Target Implementation -----------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2.h"
-#include "llvm/Support/TargetRegistry.h"
-
-using namespace llvm;
-
-Target &llvm::getTheNios2Target() {
-  static Target TheNios2Target;
-  return TheNios2Target;
-}
-
-extern "C" void LLVMInitializeNios2TargetInfo() {
-  RegisterTarget<Triple::nios2,
-                 /*HasJIT=*/true>
-      X(getTheNios2Target(), "nios2", "Nios2", "Nios2");
-}
diff --git a/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp b/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
index 56307a8..8b3480f 100644
--- a/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
+++ b/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
@@ -21,7 +21,6 @@
 #include "llvm/MC/MCParser/MCAsmParser.h"
 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
-#include "llvm/MC/MCRegisterInfo.h"
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/MC/MCSubtargetInfo.h"
 #include "llvm/MC/MCSymbolELF.h"
@@ -31,169 +30,7 @@
 
 using namespace llvm;
 
-static const MCPhysReg RRegs[32] = {
-  PPC::R0,  PPC::R1,  PPC::R2,  PPC::R3,
-  PPC::R4,  PPC::R5,  PPC::R6,  PPC::R7,
-  PPC::R8,  PPC::R9,  PPC::R10, PPC::R11,
-  PPC::R12, PPC::R13, PPC::R14, PPC::R15,
-  PPC::R16, PPC::R17, PPC::R18, PPC::R19,
-  PPC::R20, PPC::R21, PPC::R22, PPC::R23,
-  PPC::R24, PPC::R25, PPC::R26, PPC::R27,
-  PPC::R28, PPC::R29, PPC::R30, PPC::R31
-};
-static const MCPhysReg RRegsNoR0[32] = {
-  PPC::ZERO,
-            PPC::R1,  PPC::R2,  PPC::R3,
-  PPC::R4,  PPC::R5,  PPC::R6,  PPC::R7,
-  PPC::R8,  PPC::R9,  PPC::R10, PPC::R11,
-  PPC::R12, PPC::R13, PPC::R14, PPC::R15,
-  PPC::R16, PPC::R17, PPC::R18, PPC::R19,
-  PPC::R20, PPC::R21, PPC::R22, PPC::R23,
-  PPC::R24, PPC::R25, PPC::R26, PPC::R27,
-  PPC::R28, PPC::R29, PPC::R30, PPC::R31
-};
-static const MCPhysReg XRegs[32] = {
-  PPC::X0,  PPC::X1,  PPC::X2,  PPC::X3,
-  PPC::X4,  PPC::X5,  PPC::X6,  PPC::X7,
-  PPC::X8,  PPC::X9,  PPC::X10, PPC::X11,
-  PPC::X12, PPC::X13, PPC::X14, PPC::X15,
-  PPC::X16, PPC::X17, PPC::X18, PPC::X19,
-  PPC::X20, PPC::X21, PPC::X22, PPC::X23,
-  PPC::X24, PPC::X25, PPC::X26, PPC::X27,
-  PPC::X28, PPC::X29, PPC::X30, PPC::X31
-};
-static const MCPhysReg XRegsNoX0[32] = {
-  PPC::ZERO8,
-            PPC::X1,  PPC::X2,  PPC::X3,
-  PPC::X4,  PPC::X5,  PPC::X6,  PPC::X7,
-  PPC::X8,  PPC::X9,  PPC::X10, PPC::X11,
-  PPC::X12, PPC::X13, PPC::X14, PPC::X15,
-  PPC::X16, PPC::X17, PPC::X18, PPC::X19,
-  PPC::X20, PPC::X21, PPC::X22, PPC::X23,
-  PPC::X24, PPC::X25, PPC::X26, PPC::X27,
-  PPC::X28, PPC::X29, PPC::X30, PPC::X31
-};
-static const MCPhysReg FRegs[32] = {
-  PPC::F0,  PPC::F1,  PPC::F2,  PPC::F3,
-  PPC::F4,  PPC::F5,  PPC::F6,  PPC::F7,
-  PPC::F8,  PPC::F9,  PPC::F10, PPC::F11,
-  PPC::F12, PPC::F13, PPC::F14, PPC::F15,
-  PPC::F16, PPC::F17, PPC::F18, PPC::F19,
-  PPC::F20, PPC::F21, PPC::F22, PPC::F23,
-  PPC::F24, PPC::F25, PPC::F26, PPC::F27,
-  PPC::F28, PPC::F29, PPC::F30, PPC::F31
-};
-static const MCPhysReg SPERegs[32] = {
-  PPC::S0,  PPC::S1,  PPC::S2,  PPC::S3,
-  PPC::S4,  PPC::S5,  PPC::S6,  PPC::S7,
-  PPC::S8,  PPC::S9,  PPC::S10, PPC::S11,
-  PPC::S12, PPC::S13, PPC::S14, PPC::S15,
-  PPC::S16, PPC::S17, PPC::S18, PPC::S19,
-  PPC::S20, PPC::S21, PPC::S22, PPC::S23,
-  PPC::S24, PPC::S25, PPC::S26, PPC::S27,
-  PPC::S28, PPC::S29, PPC::S30, PPC::S31
-};
-static const MCPhysReg VFRegs[32] = {
-  PPC::VF0,  PPC::VF1,  PPC::VF2,  PPC::VF3,
-  PPC::VF4,  PPC::VF5,  PPC::VF6,  PPC::VF7,
-  PPC::VF8,  PPC::VF9,  PPC::VF10, PPC::VF11,
-  PPC::VF12, PPC::VF13, PPC::VF14, PPC::VF15,
-  PPC::VF16, PPC::VF17, PPC::VF18, PPC::VF19,
-  PPC::VF20, PPC::VF21, PPC::VF22, PPC::VF23,
-  PPC::VF24, PPC::VF25, PPC::VF26, PPC::VF27,
-  PPC::VF28, PPC::VF29, PPC::VF30, PPC::VF31
-};
-static const MCPhysReg VRegs[32] = {
-  PPC::V0,  PPC::V1,  PPC::V2,  PPC::V3,
-  PPC::V4,  PPC::V5,  PPC::V6,  PPC::V7,
-  PPC::V8,  PPC::V9,  PPC::V10, PPC::V11,
-  PPC::V12, PPC::V13, PPC::V14, PPC::V15,
-  PPC::V16, PPC::V17, PPC::V18, PPC::V19,
-  PPC::V20, PPC::V21, PPC::V22, PPC::V23,
-  PPC::V24, PPC::V25, PPC::V26, PPC::V27,
-  PPC::V28, PPC::V29, PPC::V30, PPC::V31
-};
-static const MCPhysReg VSRegs[64] = {
-  PPC::VSL0,  PPC::VSL1,  PPC::VSL2,  PPC::VSL3,
-  PPC::VSL4,  PPC::VSL5,  PPC::VSL6,  PPC::VSL7,
-  PPC::VSL8,  PPC::VSL9,  PPC::VSL10, PPC::VSL11,
-  PPC::VSL12, PPC::VSL13, PPC::VSL14, PPC::VSL15,
-  PPC::VSL16, PPC::VSL17, PPC::VSL18, PPC::VSL19,
-  PPC::VSL20, PPC::VSL21, PPC::VSL22, PPC::VSL23,
-  PPC::VSL24, PPC::VSL25, PPC::VSL26, PPC::VSL27,
-  PPC::VSL28, PPC::VSL29, PPC::VSL30, PPC::VSL31,
-
-  PPC::V0,  PPC::V1,  PPC::V2,  PPC::V3,
-  PPC::V4,  PPC::V5,  PPC::V6,  PPC::V7,
-  PPC::V8,  PPC::V9,  PPC::V10, PPC::V11,
-  PPC::V12, PPC::V13, PPC::V14, PPC::V15,
-  PPC::V16, PPC::V17, PPC::V18, PPC::V19,
-  PPC::V20, PPC::V21, PPC::V22, PPC::V23,
-  PPC::V24, PPC::V25, PPC::V26, PPC::V27,
-  PPC::V28, PPC::V29, PPC::V30, PPC::V31
-};
-static const MCPhysReg VSFRegs[64] = {
-  PPC::F0,  PPC::F1,  PPC::F2,  PPC::F3,
-  PPC::F4,  PPC::F5,  PPC::F6,  PPC::F7,
-  PPC::F8,  PPC::F9,  PPC::F10, PPC::F11,
-  PPC::F12, PPC::F13, PPC::F14, PPC::F15,
-  PPC::F16, PPC::F17, PPC::F18, PPC::F19,
-  PPC::F20, PPC::F21, PPC::F22, PPC::F23,
-  PPC::F24, PPC::F25, PPC::F26, PPC::F27,
-  PPC::F28, PPC::F29, PPC::F30, PPC::F31,
-
-  PPC::VF0,  PPC::VF1,  PPC::VF2,  PPC::VF3,
-  PPC::VF4,  PPC::VF5,  PPC::VF6,  PPC::VF7,
-  PPC::VF8,  PPC::VF9,  PPC::VF10, PPC::VF11,
-  PPC::VF12, PPC::VF13, PPC::VF14, PPC::VF15,
-  PPC::VF16, PPC::VF17, PPC::VF18, PPC::VF19,
-  PPC::VF20, PPC::VF21, PPC::VF22, PPC::VF23,
-  PPC::VF24, PPC::VF25, PPC::VF26, PPC::VF27,
-  PPC::VF28, PPC::VF29, PPC::VF30, PPC::VF31
-};
-static const MCPhysReg VSSRegs[64] = {
-  PPC::F0,  PPC::F1,  PPC::F2,  PPC::F3,
-  PPC::F4,  PPC::F5,  PPC::F6,  PPC::F7,
-  PPC::F8,  PPC::F9,  PPC::F10, PPC::F11,
-  PPC::F12, PPC::F13, PPC::F14, PPC::F15,
-  PPC::F16, PPC::F17, PPC::F18, PPC::F19,
-  PPC::F20, PPC::F21, PPC::F22, PPC::F23,
-  PPC::F24, PPC::F25, PPC::F26, PPC::F27,
-  PPC::F28, PPC::F29, PPC::F30, PPC::F31,
-
-  PPC::VF0,  PPC::VF1,  PPC::VF2,  PPC::VF3,
-  PPC::VF4,  PPC::VF5,  PPC::VF6,  PPC::VF7,
-  PPC::VF8,  PPC::VF9,  PPC::VF10, PPC::VF11,
-  PPC::VF12, PPC::VF13, PPC::VF14, PPC::VF15,
-  PPC::VF16, PPC::VF17, PPC::VF18, PPC::VF19,
-  PPC::VF20, PPC::VF21, PPC::VF22, PPC::VF23,
-  PPC::VF24, PPC::VF25, PPC::VF26, PPC::VF27,
-  PPC::VF28, PPC::VF29, PPC::VF30, PPC::VF31
-};
-static unsigned QFRegs[32] = {
-  PPC::QF0,  PPC::QF1,  PPC::QF2,  PPC::QF3,
-  PPC::QF4,  PPC::QF5,  PPC::QF6,  PPC::QF7,
-  PPC::QF8,  PPC::QF9,  PPC::QF10, PPC::QF11,
-  PPC::QF12, PPC::QF13, PPC::QF14, PPC::QF15,
-  PPC::QF16, PPC::QF17, PPC::QF18, PPC::QF19,
-  PPC::QF20, PPC::QF21, PPC::QF22, PPC::QF23,
-  PPC::QF24, PPC::QF25, PPC::QF26, PPC::QF27,
-  PPC::QF28, PPC::QF29, PPC::QF30, PPC::QF31
-};
-static const MCPhysReg CRBITRegs[32] = {
-  PPC::CR0LT, PPC::CR0GT, PPC::CR0EQ, PPC::CR0UN,
-  PPC::CR1LT, PPC::CR1GT, PPC::CR1EQ, PPC::CR1UN,
-  PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN,
-  PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN,
-  PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN,
-  PPC::CR5LT, PPC::CR5GT, PPC::CR5EQ, PPC::CR5UN,
-  PPC::CR6LT, PPC::CR6GT, PPC::CR6EQ, PPC::CR6UN,
-  PPC::CR7LT, PPC::CR7GT, PPC::CR7EQ, PPC::CR7UN
-};
-static const MCPhysReg CRRegs[8] = {
-  PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3,
-  PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7
-};
+DEFINE_PPC_REGCLASSES;
 
 // Evaluate an expression containing condition register
 // or condition register field symbols.  Returns positive
diff --git a/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp b/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp
index af0fbae..26869f2 100644
--- a/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp
+++ b/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp
@@ -17,6 +17,8 @@
 
 using namespace llvm;
 
+DEFINE_PPC_REGCLASSES;
+
 #define DEBUG_TYPE "ppc-disassembler"
 
 typedef MCDisassembler::DecodeStatus DecodeStatus;
@@ -62,184 +64,9 @@
 // FIXME: These can be generated by TableGen from the existing register
 // encoding values!
 
-static const unsigned CRRegs[] = {
-  PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3,
-  PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7
-};
-
-static const unsigned CRBITRegs[] = {
-  PPC::CR0LT, PPC::CR0GT, PPC::CR0EQ, PPC::CR0UN,
-  PPC::CR1LT, PPC::CR1GT, PPC::CR1EQ, PPC::CR1UN,
-  PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN,
-  PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN,
-  PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN,
-  PPC::CR5LT, PPC::CR5GT, PPC::CR5EQ, PPC::CR5UN,
-  PPC::CR6LT, PPC::CR6GT, PPC::CR6EQ, PPC::CR6UN,
-  PPC::CR7LT, PPC::CR7GT, PPC::CR7EQ, PPC::CR7UN
-};
-
-static const unsigned FRegs[] = {
-  PPC::F0, PPC::F1, PPC::F2, PPC::F3,
-  PPC::F4, PPC::F5, PPC::F6, PPC::F7,
-  PPC::F8, PPC::F9, PPC::F10, PPC::F11,
-  PPC::F12, PPC::F13, PPC::F14, PPC::F15,
-  PPC::F16, PPC::F17, PPC::F18, PPC::F19,
-  PPC::F20, PPC::F21, PPC::F22, PPC::F23,
-  PPC::F24, PPC::F25, PPC::F26, PPC::F27,
-  PPC::F28, PPC::F29, PPC::F30, PPC::F31
-};
-
-static const unsigned VFRegs[] = {
-  PPC::VF0, PPC::VF1, PPC::VF2, PPC::VF3,
-  PPC::VF4, PPC::VF5, PPC::VF6, PPC::VF7,
-  PPC::VF8, PPC::VF9, PPC::VF10, PPC::VF11,
-  PPC::VF12, PPC::VF13, PPC::VF14, PPC::VF15,
-  PPC::VF16, PPC::VF17, PPC::VF18, PPC::VF19,
-  PPC::VF20, PPC::VF21, PPC::VF22, PPC::VF23,
-  PPC::VF24, PPC::VF25, PPC::VF26, PPC::VF27,
-  PPC::VF28, PPC::VF29, PPC::VF30, PPC::VF31
-};
-
-static const unsigned VRegs[] = {
-  PPC::V0, PPC::V1, PPC::V2, PPC::V3,
-  PPC::V4, PPC::V5, PPC::V6, PPC::V7,
-  PPC::V8, PPC::V9, PPC::V10, PPC::V11,
-  PPC::V12, PPC::V13, PPC::V14, PPC::V15,
-  PPC::V16, PPC::V17, PPC::V18, PPC::V19,
-  PPC::V20, PPC::V21, PPC::V22, PPC::V23,
-  PPC::V24, PPC::V25, PPC::V26, PPC::V27,
-  PPC::V28, PPC::V29, PPC::V30, PPC::V31
-};
-
-static const unsigned VSRegs[] = {
-  PPC::VSL0, PPC::VSL1, PPC::VSL2, PPC::VSL3,
-  PPC::VSL4, PPC::VSL5, PPC::VSL6, PPC::VSL7,
-  PPC::VSL8, PPC::VSL9, PPC::VSL10, PPC::VSL11,
-  PPC::VSL12, PPC::VSL13, PPC::VSL14, PPC::VSL15,
-  PPC::VSL16, PPC::VSL17, PPC::VSL18, PPC::VSL19,
-  PPC::VSL20, PPC::VSL21, PPC::VSL22, PPC::VSL23,
-  PPC::VSL24, PPC::VSL25, PPC::VSL26, PPC::VSL27,
-  PPC::VSL28, PPC::VSL29, PPC::VSL30, PPC::VSL31,
-
-  PPC::V0, PPC::V1, PPC::V2, PPC::V3,
-  PPC::V4, PPC::V5, PPC::V6, PPC::V7,
-  PPC::V8, PPC::V9, PPC::V10, PPC::V11,
-  PPC::V12, PPC::V13, PPC::V14, PPC::V15,
-  PPC::V16, PPC::V17, PPC::V18, PPC::V19,
-  PPC::V20, PPC::V21, PPC::V22, PPC::V23,
-  PPC::V24, PPC::V25, PPC::V26, PPC::V27,
-  PPC::V28, PPC::V29, PPC::V30, PPC::V31
-};
-
-static const unsigned VSFRegs[] = {
-  PPC::F0, PPC::F1, PPC::F2, PPC::F3,
-  PPC::F4, PPC::F5, PPC::F6, PPC::F7,
-  PPC::F8, PPC::F9, PPC::F10, PPC::F11,
-  PPC::F12, PPC::F13, PPC::F14, PPC::F15,
-  PPC::F16, PPC::F17, PPC::F18, PPC::F19,
-  PPC::F20, PPC::F21, PPC::F22, PPC::F23,
-  PPC::F24, PPC::F25, PPC::F26, PPC::F27,
-  PPC::F28, PPC::F29, PPC::F30, PPC::F31,
-
-  PPC::VF0, PPC::VF1, PPC::VF2, PPC::VF3,
-  PPC::VF4, PPC::VF5, PPC::VF6, PPC::VF7,
-  PPC::VF8, PPC::VF9, PPC::VF10, PPC::VF11,
-  PPC::VF12, PPC::VF13, PPC::VF14, PPC::VF15,
-  PPC::VF16, PPC::VF17, PPC::VF18, PPC::VF19,
-  PPC::VF20, PPC::VF21, PPC::VF22, PPC::VF23,
-  PPC::VF24, PPC::VF25, PPC::VF26, PPC::VF27,
-  PPC::VF28, PPC::VF29, PPC::VF30, PPC::VF31
-};
-
-static const unsigned VSSRegs[] = {
-  PPC::F0, PPC::F1, PPC::F2, PPC::F3,
-  PPC::F4, PPC::F5, PPC::F6, PPC::F7,
-  PPC::F8, PPC::F9, PPC::F10, PPC::F11,
-  PPC::F12, PPC::F13, PPC::F14, PPC::F15,
-  PPC::F16, PPC::F17, PPC::F18, PPC::F19,
-  PPC::F20, PPC::F21, PPC::F22, PPC::F23,
-  PPC::F24, PPC::F25, PPC::F26, PPC::F27,
-  PPC::F28, PPC::F29, PPC::F30, PPC::F31,
-
-  PPC::VF0, PPC::VF1, PPC::VF2, PPC::VF3,
-  PPC::VF4, PPC::VF5, PPC::VF6, PPC::VF7,
-  PPC::VF8, PPC::VF9, PPC::VF10, PPC::VF11,
-  PPC::VF12, PPC::VF13, PPC::VF14, PPC::VF15,
-  PPC::VF16, PPC::VF17, PPC::VF18, PPC::VF19,
-  PPC::VF20, PPC::VF21, PPC::VF22, PPC::VF23,
-  PPC::VF24, PPC::VF25, PPC::VF26, PPC::VF27,
-  PPC::VF28, PPC::VF29, PPC::VF30, PPC::VF31
-};
-
-static const unsigned GPRegs[] = {
-  PPC::R0, PPC::R1, PPC::R2, PPC::R3,
-  PPC::R4, PPC::R5, PPC::R6, PPC::R7,
-  PPC::R8, PPC::R9, PPC::R10, PPC::R11,
-  PPC::R12, PPC::R13, PPC::R14, PPC::R15,
-  PPC::R16, PPC::R17, PPC::R18, PPC::R19,
-  PPC::R20, PPC::R21, PPC::R22, PPC::R23,
-  PPC::R24, PPC::R25, PPC::R26, PPC::R27,
-  PPC::R28, PPC::R29, PPC::R30, PPC::R31
-};
-
-static const unsigned GP0Regs[] = {
-  PPC::ZERO, PPC::R1, PPC::R2, PPC::R3,
-  PPC::R4, PPC::R5, PPC::R6, PPC::R7,
-  PPC::R8, PPC::R9, PPC::R10, PPC::R11,
-  PPC::R12, PPC::R13, PPC::R14, PPC::R15,
-  PPC::R16, PPC::R17, PPC::R18, PPC::R19,
-  PPC::R20, PPC::R21, PPC::R22, PPC::R23,
-  PPC::R24, PPC::R25, PPC::R26, PPC::R27,
-  PPC::R28, PPC::R29, PPC::R30, PPC::R31
-};
-
-static const unsigned G8Regs[] = {
-  PPC::X0, PPC::X1, PPC::X2, PPC::X3,
-  PPC::X4, PPC::X5, PPC::X6, PPC::X7,
-  PPC::X8, PPC::X9, PPC::X10, PPC::X11,
-  PPC::X12, PPC::X13, PPC::X14, PPC::X15,
-  PPC::X16, PPC::X17, PPC::X18, PPC::X19,
-  PPC::X20, PPC::X21, PPC::X22, PPC::X23,
-  PPC::X24, PPC::X25, PPC::X26, PPC::X27,
-  PPC::X28, PPC::X29, PPC::X30, PPC::X31
-};
-
-static const unsigned G80Regs[] = {
-  PPC::ZERO8, PPC::X1, PPC::X2, PPC::X3,
-  PPC::X4, PPC::X5, PPC::X6, PPC::X7,
-  PPC::X8, PPC::X9, PPC::X10, PPC::X11,
-  PPC::X12, PPC::X13, PPC::X14, PPC::X15,
-  PPC::X16, PPC::X17, PPC::X18, PPC::X19,
-  PPC::X20, PPC::X21, PPC::X22, PPC::X23,
-  PPC::X24, PPC::X25, PPC::X26, PPC::X27,
-  PPC::X28, PPC::X29, PPC::X30, PPC::X31
-};
-
-static const unsigned QFRegs[] = {
-  PPC::QF0, PPC::QF1, PPC::QF2, PPC::QF3,
-  PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7,
-  PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11,
-  PPC::QF12, PPC::QF13, PPC::QF14, PPC::QF15,
-  PPC::QF16, PPC::QF17, PPC::QF18, PPC::QF19,
-  PPC::QF20, PPC::QF21, PPC::QF22, PPC::QF23,
-  PPC::QF24, PPC::QF25, PPC::QF26, PPC::QF27,
-  PPC::QF28, PPC::QF29, PPC::QF30, PPC::QF31
-};
-
-static const unsigned SPERegs[] = {
-  PPC::S0, PPC::S1, PPC::S2, PPC::S3,
-  PPC::S4, PPC::S5, PPC::S6, PPC::S7,
-  PPC::S8, PPC::S9, PPC::S10, PPC::S11,
-  PPC::S12, PPC::S13, PPC::S14, PPC::S15,
-  PPC::S16, PPC::S17, PPC::S18, PPC::S19,
-  PPC::S20, PPC::S21, PPC::S22, PPC::S23,
-  PPC::S24, PPC::S25, PPC::S26, PPC::S27,
-  PPC::S28, PPC::S29, PPC::S30, PPC::S31
-};
-
 template <std::size_t N>
 static DecodeStatus decodeRegisterClass(MCInst &Inst, uint64_t RegNo,
-                                        const unsigned (&Regs)[N]) {
+                                        const MCPhysReg (&Regs)[N]) {
   assert(RegNo < N && "Invalid register number");
   Inst.addOperand(MCOperand::createReg(Regs[RegNo]));
   return MCDisassembler::Success;
@@ -308,25 +135,25 @@
 static DecodeStatus DecodeGPRCRegisterClass(MCInst &Inst, uint64_t RegNo,
                                             uint64_t Address,
                                             const void *Decoder) {
-  return decodeRegisterClass(Inst, RegNo, GPRegs);
+  return decodeRegisterClass(Inst, RegNo, RRegs);
 }
 
 static DecodeStatus DecodeGPRC_NOR0RegisterClass(MCInst &Inst, uint64_t RegNo,
                                             uint64_t Address,
                                             const void *Decoder) {
-  return decodeRegisterClass(Inst, RegNo, GP0Regs);
+  return decodeRegisterClass(Inst, RegNo, RRegsNoR0);
 }
 
 static DecodeStatus DecodeG8RCRegisterClass(MCInst &Inst, uint64_t RegNo,
                                             uint64_t Address,
                                             const void *Decoder) {
-  return decodeRegisterClass(Inst, RegNo, G8Regs);
+  return decodeRegisterClass(Inst, RegNo, XRegs);
 }
 
 static DecodeStatus DecodeG8RC_NOX0RegisterClass(MCInst &Inst, uint64_t RegNo,
                                             uint64_t Address,
                                             const void *Decoder) {
-  return decodeRegisterClass(Inst, RegNo, G80Regs);
+  return decodeRegisterClass(Inst, RegNo, XRegsNoX0);
 }
 
 #define DecodePointerLikeRegClass0 DecodeGPRCRegisterClass
@@ -341,7 +168,7 @@
 static DecodeStatus DecodeSPE4RCRegisterClass(MCInst &Inst, uint64_t RegNo,
                                             uint64_t Address,
                                             const void *Decoder) {
-  return decodeRegisterClass(Inst, RegNo, GPRegs);
+  return decodeRegisterClass(Inst, RegNo, RRegs);
 }
 
 static DecodeStatus DecodeSPERCRegisterClass(MCInst &Inst, uint64_t RegNo,
@@ -388,19 +215,19 @@
   case PPC::LFSU:
   case PPC::LFDU:
     // Add the tied output operand.
-    Inst.addOperand(MCOperand::createReg(GP0Regs[Base]));
+    Inst.addOperand(MCOperand::createReg(RRegsNoR0[Base]));
     break;
   case PPC::STBU:
   case PPC::STHU:
   case PPC::STWU:
   case PPC::STFSU:
   case PPC::STFDU:
-    Inst.insert(Inst.begin(), MCOperand::createReg(GP0Regs[Base]));
+    Inst.insert(Inst.begin(), MCOperand::createReg(RRegsNoR0[Base]));
     break;
   }
 
   Inst.addOperand(MCOperand::createImm(SignExtend64<16>(Disp)));
-  Inst.addOperand(MCOperand::createReg(GP0Regs[Base]));
+  Inst.addOperand(MCOperand::createReg(RRegsNoR0[Base]));
   return MCDisassembler::Success;
 }
 
@@ -416,12 +243,12 @@
 
   if (Inst.getOpcode() == PPC::LDU)
     // Add the tied output operand.
-    Inst.addOperand(MCOperand::createReg(GP0Regs[Base]));
+    Inst.addOperand(MCOperand::createReg(RRegsNoR0[Base]));
   else if (Inst.getOpcode() == PPC::STDU)
-    Inst.insert(Inst.begin(), MCOperand::createReg(GP0Regs[Base]));
+    Inst.insert(Inst.begin(), MCOperand::createReg(RRegsNoR0[Base]));
 
   Inst.addOperand(MCOperand::createImm(SignExtend64<16>(Disp << 2)));
-  Inst.addOperand(MCOperand::createReg(GP0Regs[Base]));
+  Inst.addOperand(MCOperand::createReg(RRegsNoR0[Base]));
   return MCDisassembler::Success;
 }
 
@@ -436,7 +263,7 @@
   assert(Base < 32 && "Invalid base register");
 
   Inst.addOperand(MCOperand::createImm(SignExtend64<16>(Disp << 4)));
-  Inst.addOperand(MCOperand::createReg(GP0Regs[Base]));
+  Inst.addOperand(MCOperand::createReg(RRegsNoR0[Base]));
   return MCDisassembler::Success;
 }
 
@@ -451,7 +278,7 @@
   assert(Base < 32 && "Invalid base register");
 
   Inst.addOperand(MCOperand::createImm(Disp << 3));
-  Inst.addOperand(MCOperand::createReg(GP0Regs[Base]));
+  Inst.addOperand(MCOperand::createReg(RRegsNoR0[Base]));
   return MCDisassembler::Success;
 }
 
@@ -466,7 +293,7 @@
   assert(Base < 32 && "Invalid base register");
 
   Inst.addOperand(MCOperand::createImm(Disp << 2));
-  Inst.addOperand(MCOperand::createReg(GP0Regs[Base]));
+  Inst.addOperand(MCOperand::createReg(RRegsNoR0[Base]));
   return MCDisassembler::Success;
 }
 
@@ -481,7 +308,7 @@
   assert(Base < 32 && "Invalid base register");
 
   Inst.addOperand(MCOperand::createImm(Disp << 1));
-  Inst.addOperand(MCOperand::createReg(GP0Regs[Base]));
+  Inst.addOperand(MCOperand::createReg(RRegsNoR0[Base]));
   return MCDisassembler::Success;
 }
 
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
index 316fd2c..d6e450c 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
@@ -17,6 +17,7 @@
 // GCC #defines PPC on Linux but we use it as our namespace name
 #undef PPC
 
+#include "llvm/MC/MCRegisterInfo.h"
 #include "llvm/Support/MathExtras.h"
 #include <cstdint>
 #include <memory>
@@ -104,4 +105,63 @@
 #define GET_SUBTARGETINFO_ENUM
 #include "PPCGenSubtargetInfo.inc"
 
+#define PPC_REGS0_31(X)                                                        \
+  {                                                                            \
+    X##0, X##1, X##2, X##3, X##4, X##5, X##6, X##7, X##8, X##9, X##10, X##11,  \
+        X##12, X##13, X##14, X##15, X##16, X##17, X##18, X##19, X##20, X##21,  \
+        X##22, X##23, X##24, X##25, X##26, X##27, X##28, X##29, X##30, X##31   \
+  }
+
+#define PPC_REGS_NO0_31(Z, X)                                                  \
+  {                                                                            \
+    Z, X##1, X##2, X##3, X##4, X##5, X##6, X##7, X##8, X##9, X##10, X##11,     \
+        X##12, X##13, X##14, X##15, X##16, X##17, X##18, X##19, X##20, X##21,  \
+        X##22, X##23, X##24, X##25, X##26, X##27, X##28, X##29, X##30, X##31   \
+  }
+
+#define PPC_REGS_LO_HI(LO, HI)                                                 \
+  {                                                                            \
+    LO##0, LO##1, LO##2, LO##3, LO##4, LO##5, LO##6, LO##7, LO##8, LO##9,      \
+        LO##10, LO##11, LO##12, LO##13, LO##14, LO##15, LO##16, LO##17,        \
+        LO##18, LO##19, LO##20, LO##21, LO##22, LO##23, LO##24, LO##25,        \
+        LO##26, LO##27, LO##28, LO##29, LO##30, LO##31, HI##0, HI##1, HI##2,   \
+        HI##3, HI##4, HI##5, HI##6, HI##7, HI##8, HI##9, HI##10, HI##11,       \
+        HI##12, HI##13, HI##14, HI##15, HI##16, HI##17, HI##18, HI##19,        \
+        HI##20, HI##21, HI##22, HI##23, HI##24, HI##25, HI##26, HI##27,        \
+        HI##28, HI##29, HI##30, HI##31                                         \
+  }
+
+using llvm::MCPhysReg;
+
+#define DEFINE_PPC_REGCLASSES \
+  static const MCPhysReg RRegs[32] = PPC_REGS0_31(PPC::R); \
+  static const MCPhysReg XRegs[32] = PPC_REGS0_31(PPC::X); \
+  static const MCPhysReg FRegs[32] = PPC_REGS0_31(PPC::F); \
+  static const MCPhysReg SPERegs[32] = PPC_REGS0_31(PPC::S); \
+  static const MCPhysReg VFRegs[32] = PPC_REGS0_31(PPC::VF); \
+  static const MCPhysReg VRegs[32] = PPC_REGS0_31(PPC::V); \
+  static const MCPhysReg QFRegs[32] = PPC_REGS0_31(PPC::QF); \
+  static const MCPhysReg RRegsNoR0[32] = \
+    PPC_REGS_NO0_31(PPC::ZERO, PPC::R); \
+  static const MCPhysReg XRegsNoX0[32] = \
+    PPC_REGS_NO0_31(PPC::ZERO8, PPC::X); \
+  static const MCPhysReg VSRegs[64] = \
+    PPC_REGS_LO_HI(PPC::VSL, PPC::V); \
+  static const MCPhysReg VSFRegs[64] = \
+    PPC_REGS_LO_HI(PPC::F, PPC::VF); \
+  static const MCPhysReg VSSRegs[64] = \
+    PPC_REGS_LO_HI(PPC::F, PPC::VF); \
+  static const MCPhysReg CRBITRegs[32] = { \
+    PPC::CR0LT, PPC::CR0GT, PPC::CR0EQ, PPC::CR0UN, \
+    PPC::CR1LT, PPC::CR1GT, PPC::CR1EQ, PPC::CR1UN, \
+    PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN, \
+    PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN, \
+    PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN, \
+    PPC::CR5LT, PPC::CR5GT, PPC::CR5EQ, PPC::CR5UN, \
+    PPC::CR6LT, PPC::CR6GT, PPC::CR6EQ, PPC::CR6UN, \
+    PPC::CR7LT, PPC::CR7GT, PPC::CR7EQ, PPC::CR7UN}; \
+  static const MCPhysReg CRRegs[8] = { \
+    PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3, \
+    PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7}
+
 #endif // LLVM_LIB_TARGET_POWERPC_MCTARGETDESC_PPCMCTARGETDESC_H
diff --git a/lib/Target/PowerPC/P9InstrResources.td b/lib/Target/PowerPC/P9InstrResources.td
index c6cbb90..17c3796 100644
--- a/lib/Target/PowerPC/P9InstrResources.td
+++ b/lib/Target/PowerPC/P9InstrResources.td
@@ -111,11 +111,11 @@
     (instregex "CNT(L|T)Z(D|W)(8)?(o)?$"),
     (instregex "POPCNT(D|W)$"),
     (instregex "CMPB(8)?$"),
+    (instregex "SETB(8)?$"),
     XSTDIVDP,
     XSTSQRTDP,
     XSXSIGDP,
     XSCVSPDPN,
-    SETB,
     BPERMD
 )>;
 
diff --git a/lib/Target/PowerPC/PPCFastISel.cpp b/lib/Target/PowerPC/PPCFastISel.cpp
index 6681698..3b2d92d 100644
--- a/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/lib/Target/PowerPC/PPCFastISel.cpp
@@ -861,8 +861,20 @@
     }
   }
 
+  unsigned SrcReg1 = getRegForValue(SrcValue1);
+  if (SrcReg1 == 0)
+    return false;
+
+  unsigned SrcReg2 = 0;
+  if (!UseImm) {
+    SrcReg2 = getRegForValue(SrcValue2);
+    if (SrcReg2 == 0)
+      return false;
+  }
+
   unsigned CmpOpc;
   bool NeedsExt = false;
+  auto RC = MRI.getRegClass(SrcReg1);
   switch (SrcVT.SimpleTy) {
     default: return false;
     case MVT::f32:
@@ -879,8 +891,15 @@
             CmpOpc = PPC::EFSCMPGT;
             break;
         }
-      } else
+      } else {
         CmpOpc = PPC::FCMPUS;
+        if (isVSSRCRegClass(RC)) {
+          unsigned TmpReg = createResultReg(&PPC::F4RCRegClass);
+          BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+                  TII.get(TargetOpcode::COPY), TmpReg).addReg(SrcReg1);
+          SrcReg1 = TmpReg;
+        }
+      }
       break;
     case MVT::f64:
       if (HasSPE) {
@@ -896,8 +915,11 @@
             CmpOpc = PPC::EFDCMPGT;
             break;
         }
-      } else
+      } else if (isVSFRCRegClass(RC)) {
+        CmpOpc = PPC::XSCMPUDP;
+      } else {
         CmpOpc = PPC::FCMPUD;
+      }
       break;
     case MVT::i1:
     case MVT::i8:
@@ -918,17 +940,6 @@
       break;
   }
 
-  unsigned SrcReg1 = getRegForValue(SrcValue1);
-  if (SrcReg1 == 0)
-    return false;
-
-  unsigned SrcReg2 = 0;
-  if (!UseImm) {
-    SrcReg2 = getRegForValue(SrcValue2);
-    if (SrcReg2 == 0)
-      return false;
-  }
-
   if (NeedsExt) {
     unsigned ExtReg = createResultReg(&PPC::GPRCRegClass);
     if (!PPCEmitIntExt(SrcVT, SrcReg1, MVT::i32, ExtReg, IsZExt))
@@ -2354,7 +2365,8 @@
         PPCSubTarget->hasSPE() ? PPC::EVLDD : PPC::LFD))
     return false;
 
-  MI->eraseFromParent();
+  MachineBasicBlock::iterator I(MI);
+  removeDeadCode(I, std::next(I));
   return true;
 }
 
diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 7d9ea93..31acd0f 100644
--- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -81,6 +81,8 @@
           "Number of logical ops on i1 values calculated in GPR.");
 STATISTIC(OmittedForNonExtendUses,
           "Number of compares not eliminated as they have non-extending uses.");
+STATISTIC(NumP9Setb,
+          "Number of compares lowered to setb.");
 
 // FIXME: Remove this once the bug has been fixed!
 cl::opt<bool> ANDIGlueBug("expose-ppc-andi-glue-bug",
@@ -327,7 +329,6 @@
 
     bool isOffsetMultipleOf(SDNode *N, unsigned Val) const;
     void transferMemOperands(SDNode *N, SDNode *Result);
-    MachineSDNode *flipSignBit(const SDValue &N, SDNode **SignBit = nullptr);
   };
 
 } // end anonymous namespace
@@ -687,9 +688,8 @@
   SDValue Op1 = N->getOperand(1);
   SDLoc dl(N);
 
-  KnownBits LKnown, RKnown;
-  CurDAG->computeKnownBits(Op0, LKnown);
-  CurDAG->computeKnownBits(Op1, RKnown);
+  KnownBits LKnown = CurDAG->computeKnownBits(Op0);
+  KnownBits RKnown = CurDAG->computeKnownBits(Op1);
 
   unsigned TargetMask = LKnown.Zero.getZExtValue();
   unsigned InsertMask = RKnown.Zero.getZExtValue();
@@ -733,8 +733,7 @@
        // The AND mask might not be a constant, and we need to make sure that
        // if we're going to fold the masking with the insert, all bits not
        // know to be zero in the mask are known to be one.
-        KnownBits MKnown;
-        CurDAG->computeKnownBits(Op1.getOperand(1), MKnown);
+        KnownBits MKnown = CurDAG->computeKnownBits(Op1.getOperand(1));
         bool CanFoldMask = InsertMask == MKnown.One.getZExtValue();
 
         unsigned SHOpc = Op1.getOperand(0).getOpcode();
@@ -1332,6 +1331,34 @@
 
       return std::make_pair(Interesting, &Bits);
     }
+    case ISD::TRUNCATE: {
+      EVT FromType = V.getOperand(0).getValueType();
+      EVT ToType = V.getValueType();
+      // We support only the case with truncate from i64 to i32.
+      if (FromType != MVT::i64 || ToType != MVT::i32)
+        break;
+      const unsigned NumAllBits = FromType.getSizeInBits();
+      SmallVector<ValueBit, 64> *InBits;
+      std::tie(Interesting, InBits) = getValueBits(V.getOperand(0),
+                                                    NumAllBits);
+      const unsigned NumValidBits = ToType.getSizeInBits();
+
+      // A 32-bit instruction cannot touch upper 32-bit part of 64-bit value.
+      // So, we cannot include this truncate.
+      bool UseUpper32bit = false;
+      for (unsigned i = 0; i < NumValidBits; ++i)
+        if ((*InBits)[i].hasValue() && (*InBits)[i].getValueBitIndex() >= 32) {
+          UseUpper32bit = true;
+          break;
+        }
+      if (UseUpper32bit)
+        break;
+
+      for (unsigned i = 0; i < NumValidBits; ++i)
+        Bits[i] = (*InBits)[i];
+
+      return std::make_pair(Interesting, &Bits);
+    }
     case ISD::AssertZext: {
       // For AssertZext, we look through the operand and
       // mark the bits known to be zero.
@@ -1677,6 +1704,17 @@
     return ExtVal;
   }
 
+  SDValue TruncateToInt32(SDValue V, const SDLoc &dl) {
+    if (V.getValueSizeInBits() == 32)
+      return V;
+
+    assert(V.getValueSizeInBits() == 64);
+    SDValue SubRegIdx = CurDAG->getTargetConstant(PPC::sub_32, dl, MVT::i32);
+    SDValue SubVal = SDValue(CurDAG->getMachineNode(PPC::EXTRACT_SUBREG, dl,
+                                                    MVT::i32, V, SubRegIdx), 0);
+    return SubVal;
+  }
+
   // Depending on the number of groups for a particular value, it might be
   // better to rotate, mask explicitly (using andi/andis), and then or the
   // result. Select this part of the result first.
@@ -1735,12 +1773,12 @@
       SDValue VRot;
       if (VRI.RLAmt) {
         SDValue Ops[] =
-          { VRI.V, getI32Imm(VRI.RLAmt, dl), getI32Imm(0, dl),
-            getI32Imm(31, dl) };
+          { TruncateToInt32(VRI.V, dl), getI32Imm(VRI.RLAmt, dl),
+            getI32Imm(0, dl), getI32Imm(31, dl) };
         VRot = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32,
                                               Ops), 0);
       } else {
-        VRot = VRI.V;
+        VRot = TruncateToInt32(VRI.V, dl);
       }
 
       SDValue ANDIVal, ANDISVal;
@@ -1792,12 +1830,12 @@
       if (VRI.RLAmt) {
         if (InstCnt) *InstCnt += 1;
         SDValue Ops[] =
-          { VRI.V, getI32Imm(VRI.RLAmt, dl), getI32Imm(0, dl),
-            getI32Imm(31, dl) };
+          { TruncateToInt32(VRI.V, dl), getI32Imm(VRI.RLAmt, dl),
+            getI32Imm(0, dl), getI32Imm(31, dl) };
         Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops),
                       0);
       } else {
-        Res = VRI.V;
+        Res = TruncateToInt32(VRI.V, dl);
       }
 
       // Now, remove all groups with this underlying value and rotation factor.
@@ -1812,13 +1850,13 @@
     for (auto &BG : BitGroups) {
       if (!Res) {
         SDValue Ops[] =
-          { BG.V, getI32Imm(BG.RLAmt, dl),
+          { TruncateToInt32(BG.V, dl), getI32Imm(BG.RLAmt, dl),
             getI32Imm(Bits.size() - BG.EndIdx - 1, dl),
             getI32Imm(Bits.size() - BG.StartIdx - 1, dl) };
         Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0);
       } else {
         SDValue Ops[] =
-          { Res, BG.V, getI32Imm(BG.RLAmt, dl),
+          { Res, TruncateToInt32(BG.V, dl), getI32Imm(BG.RLAmt, dl),
               getI32Imm(Bits.size() - BG.EndIdx - 1, dl),
             getI32Imm(Bits.size() - BG.StartIdx - 1, dl) };
         Res = SDValue(CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops), 0);
@@ -4138,49 +4176,144 @@
   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Result), {MemOp});
 }
 
-/// This method returns a node after flipping the MSB of each element
-/// of vector integer type. Additionally, if SignBitVec is non-null,
-/// this method sets a node with one at MSB of all elements
-/// and zero at other bits in SignBitVec.
-MachineSDNode *
-PPCDAGToDAGISel::flipSignBit(const SDValue &N, SDNode **SignBitVec) {
-  SDLoc dl(N);
-  EVT VecVT = N.getValueType();
-  if (VecVT == MVT::v4i32) {
-    if (SignBitVec) {
-      SDNode *ZV = CurDAG->getMachineNode(PPC::V_SET0, dl, MVT::v4i32);
-      *SignBitVec = CurDAG->getMachineNode(PPC::XVNEGSP, dl, VecVT,
-                                        SDValue(ZV, 0));
-    }
-    return CurDAG->getMachineNode(PPC::XVNEGSP, dl, VecVT, N);
+static bool mayUseP9Setb(SDNode *N, const ISD::CondCode &CC, SelectionDAG *DAG,
+                         bool &NeedSwapOps, bool &IsUnCmp) {
+
+  assert(N->getOpcode() == ISD::SELECT_CC && "Expecting a SELECT_CC here.");
+
+  SDValue LHS = N->getOperand(0);
+  SDValue RHS = N->getOperand(1);
+  SDValue TrueRes = N->getOperand(2);
+  SDValue FalseRes = N->getOperand(3);
+  ConstantSDNode *TrueConst = dyn_cast<ConstantSDNode>(TrueRes);
+  if (!TrueConst)
+    return false;
+
+  assert((N->getSimpleValueType(0) == MVT::i64 ||
+          N->getSimpleValueType(0) == MVT::i32) &&
+         "Expecting either i64 or i32 here.");
+
+  // We are looking for any of:
+  // (select_cc lhs, rhs,  1, (sext (setcc [lr]hs, [lr]hs, cc2)), cc1)
+  // (select_cc lhs, rhs, -1, (zext (setcc [lr]hs, [lr]hs, cc2)), cc1)
+  // (select_cc lhs, rhs,  0, (select_cc [lr]hs, [lr]hs,  1, -1, cc2), seteq)
+  // (select_cc lhs, rhs,  0, (select_cc [lr]hs, [lr]hs, -1,  1, cc2), seteq)
+  int64_t TrueResVal = TrueConst->getSExtValue();
+  if ((TrueResVal < -1 || TrueResVal > 1) ||
+      (TrueResVal == -1 && FalseRes.getOpcode() != ISD::ZERO_EXTEND) ||
+      (TrueResVal == 1 && FalseRes.getOpcode() != ISD::SIGN_EXTEND) ||
+      (TrueResVal == 0 &&
+       (FalseRes.getOpcode() != ISD::SELECT_CC || CC != ISD::SETEQ)))
+    return false;
+
+  bool InnerIsSel = FalseRes.getOpcode() == ISD::SELECT_CC;
+  SDValue SetOrSelCC = InnerIsSel ? FalseRes : FalseRes.getOperand(0);
+  if (SetOrSelCC.getOpcode() != ISD::SETCC &&
+      SetOrSelCC.getOpcode() != ISD::SELECT_CC)
+    return false;
+
+  // Without this setb optimization, the outer SELECT_CC will be manually
+  // selected to SELECT_CC_I4/SELECT_CC_I8 Pseudo, then expand-isel-pseudos pass
+  // transforms pseduo instruction to isel instruction. When there are more than
+  // one use for result like zext/sext, with current optimization we only see
+  // isel is replaced by setb but can't see any significant gain. Since
+  // setb has longer latency than original isel, we should avoid this. Another
+  // point is that setb requires comparison always kept, it can break the
+  // oppotunity to get the comparison away if we have in future.
+  if (!SetOrSelCC.hasOneUse() || (!InnerIsSel && !FalseRes.hasOneUse()))
+    return false;
+
+  SDValue InnerLHS = SetOrSelCC.getOperand(0);
+  SDValue InnerRHS = SetOrSelCC.getOperand(1);
+  ISD::CondCode InnerCC =
+      cast<CondCodeSDNode>(SetOrSelCC.getOperand(InnerIsSel ? 4 : 2))->get();
+  // If the inner comparison is a select_cc, make sure the true/false values are
+  // 1/-1 and canonicalize it if needed.
+  if (InnerIsSel) {
+    ConstantSDNode *SelCCTrueConst =
+        dyn_cast<ConstantSDNode>(SetOrSelCC.getOperand(2));
+    ConstantSDNode *SelCCFalseConst =
+        dyn_cast<ConstantSDNode>(SetOrSelCC.getOperand(3));
+    if (!SelCCTrueConst || !SelCCFalseConst)
+      return false;
+    int64_t SelCCTVal = SelCCTrueConst->getSExtValue();
+    int64_t SelCCFVal = SelCCFalseConst->getSExtValue();
+    // The values must be -1/1 (requiring a swap) or 1/-1.
+    if (SelCCTVal == -1 && SelCCFVal == 1) {
+      std::swap(InnerLHS, InnerRHS);
+    } else if (SelCCTVal != 1 || SelCCFVal != -1)
+      return false;
   }
-  else if (VecVT == MVT::v8i16) {
-    SDNode *Hi = CurDAG->getMachineNode(PPC::LIS, dl, MVT::i32,
-                                     getI32Imm(0x8000, dl));
-    SDNode *ScaImm = CurDAG->getMachineNode(PPC::ORI, dl, MVT::i32,
-                                         SDValue(Hi, 0),
-                                         getI32Imm(0x8000, dl));
-    SDNode *VecImm = CurDAG->getMachineNode(PPC::MTVSRWS, dl, VecVT,
-                                         SDValue(ScaImm, 0));
-    /*
-    Alternatively, we can do this as follow to use VRF instead of GPR.
-      vspltish 5, 1
-      vspltish 6, 15
-      vslh 5, 6, 5
-    */
-    if (SignBitVec) *SignBitVec = VecImm;
-    return CurDAG->getMachineNode(PPC::VADDUHM, dl, VecVT, N,
-                                  SDValue(VecImm, 0));
+
+  // Canonicalize unsigned case
+  if (InnerCC == ISD::SETULT || InnerCC == ISD::SETUGT) {
+    IsUnCmp = true;
+    InnerCC = (InnerCC == ISD::SETULT) ? ISD::SETLT : ISD::SETGT;
   }
-  else if (VecVT == MVT::v16i8) {
-    SDNode *VecImm = CurDAG->getMachineNode(PPC::XXSPLTIB, dl, MVT::i32,
-                                         getI32Imm(0x80, dl));
-    if (SignBitVec) *SignBitVec = VecImm;
-    return CurDAG->getMachineNode(PPC::VADDUBM, dl, VecVT, N,
-                                  SDValue(VecImm, 0));
+
+  bool InnerSwapped = false;
+  if (LHS == InnerRHS && RHS == InnerLHS)
+    InnerSwapped = true;
+  else if (LHS != InnerLHS || RHS != InnerRHS)
+    return false;
+
+  switch (CC) {
+  // (select_cc lhs, rhs,  0, \
+  //     (select_cc [lr]hs, [lr]hs, 1, -1, setlt/setgt), seteq)
+  case ISD::SETEQ:
+    if (!InnerIsSel)
+      return false;
+    if (InnerCC != ISD::SETLT && InnerCC != ISD::SETGT)
+      return false;
+    NeedSwapOps = (InnerCC == ISD::SETGT) ? InnerSwapped : !InnerSwapped;
+    break;
+
+  // (select_cc lhs, rhs, -1, (zext (setcc [lr]hs, [lr]hs, setne)), setu?lt)
+  // (select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setgt)), setu?lt)
+  // (select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setlt)), setu?lt)
+  // (select_cc lhs, rhs, 1, (sext (setcc [lr]hs, [lr]hs, setne)), setu?lt)
+  // (select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setgt)), setu?lt)
+  // (select_cc lhs, rhs, 1, (sext (setcc rhs, lhs, setlt)), setu?lt)
+  case ISD::SETULT:
+    if (!IsUnCmp && InnerCC != ISD::SETNE)
+      return false;
+    IsUnCmp = true;
+    LLVM_FALLTHROUGH;
+  case ISD::SETLT:
+    if (InnerCC == ISD::SETNE || (InnerCC == ISD::SETGT && !InnerSwapped) ||
+        (InnerCC == ISD::SETLT && InnerSwapped))
+      NeedSwapOps = (TrueResVal == 1);
+    else
+      return false;
+    break;
+
+  // (select_cc lhs, rhs, 1, (sext (setcc [lr]hs, [lr]hs, setne)), setu?gt)
+  // (select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setlt)), setu?gt)
+  // (select_cc lhs, rhs, 1, (sext (setcc rhs, lhs, setgt)), setu?gt)
+  // (select_cc lhs, rhs, -1, (zext (setcc [lr]hs, [lr]hs, setne)), setu?gt)
+  // (select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setlt)), setu?gt)
+  // (select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setgt)), setu?gt)
+  case ISD::SETUGT:
+    if (!IsUnCmp && InnerCC != ISD::SETNE)
+      return false;
+    IsUnCmp = true;
+    LLVM_FALLTHROUGH;
+  case ISD::SETGT:
+    if (InnerCC == ISD::SETNE || (InnerCC == ISD::SETLT && !InnerSwapped) ||
+        (InnerCC == ISD::SETGT && InnerSwapped))
+      NeedSwapOps = (TrueResVal == -1);
+    else
+      return false;
+    break;
+
+  default:
+    return false;
   }
-  else
-    llvm_unreachable("Unsupported vector data type for flipSignBit");
+
+  LLVM_DEBUG(dbgs() << "Found a node that can be lowered to a SETB: ");
+  LLVM_DEBUG(N->dump());
+
+  return true;
 }
 
 // Select - Convert the specified operand from a target-independent to a
@@ -4517,8 +4650,7 @@
     int16_t Imm;
     if (N->getOperand(0)->getOpcode() == ISD::FrameIndex &&
         isIntS16Immediate(N->getOperand(1), Imm)) {
-      KnownBits LHSKnown;
-      CurDAG->computeKnownBits(N->getOperand(0), LHSKnown);
+      KnownBits LHSKnown = CurDAG->computeKnownBits(N->getOperand(0));
 
       // If this is equivalent to an add, then we can fold it with the
       // FrameIndex calculation.
@@ -4645,6 +4777,31 @@
         N->getOperand(0).getValueType() == MVT::i1)
       break;
 
+    if (PPCSubTarget->isISA3_0() && PPCSubTarget->isPPC64()) {
+      bool NeedSwapOps = false;
+      bool IsUnCmp = false;
+      if (mayUseP9Setb(N, CC, CurDAG, NeedSwapOps, IsUnCmp)) {
+        SDValue LHS = N->getOperand(0);
+        SDValue RHS = N->getOperand(1);
+        if (NeedSwapOps)
+          std::swap(LHS, RHS);
+
+        // Make use of SelectCC to generate the comparison to set CR bits, for
+        // equality comparisons having one literal operand, SelectCC probably
+        // doesn't need to materialize the whole literal and just use xoris to
+        // check it first, it leads the following comparison result can't
+        // exactly represent GT/LT relationship. So to avoid this we specify
+        // SETGT/SETUGT here instead of SETEQ.
+        SDValue GenCC =
+            SelectCC(LHS, RHS, IsUnCmp ? ISD::SETUGT : ISD::SETGT, dl);
+        CurDAG->SelectNodeTo(
+            N, N->getSimpleValueType(0) == MVT::i64 ? PPC::SETB8 : PPC::SETB,
+            N->getValueType(0), GenCC);
+        NumP9Setb++;
+        return;
+      }
+    }
+
     // Handle the setcc cases here.  select_cc lhs, 0, 1, 0, cc
     if (!isPPC64)
       if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
@@ -4993,55 +5150,6 @@
       return;
     }
   }
-  case ISD::ABS: {
-    assert(PPCSubTarget->hasP9Vector() && "ABS is supported with P9 Vector");
-
-    // For vector absolute difference, we use VABSDUW instruction of POWER9.
-    // Since VABSDU instructions are for unsigned integers, we need adjustment
-    // for signed integers.
-    // For abs(sub(a, b)), we generate VABSDUW(a+0x80000000, b+0x80000000).
-    // Otherwise, abs(sub(-1, 0)) returns 0xFFFFFFFF(=-1) instead of 1.
-    // For abs(a), we generate VABSDUW(a+0x80000000, 0x80000000).
-    EVT VecVT = N->getOperand(0).getValueType();
-    SDNode *AbsOp = nullptr;
-    unsigned AbsOpcode;
-
-    if (VecVT == MVT::v4i32)
-      AbsOpcode = PPC::VABSDUW;
-    else if (VecVT == MVT::v8i16)
-      AbsOpcode = PPC::VABSDUH;
-    else if (VecVT == MVT::v16i8)
-      AbsOpcode = PPC::VABSDUB;
-    else
-      llvm_unreachable("Unsupported vector data type for ISD::ABS");
-
-    // Even for signed integers, we can skip adjustment if all values are
-    // known to be positive (as signed integer) due to zero-extended inputs.
-    if (N->getOperand(0).getOpcode() == ISD::SUB &&
-        N->getOperand(0)->getOperand(0).getOpcode() == ISD::ZERO_EXTEND &&
-        N->getOperand(0)->getOperand(1).getOpcode() == ISD::ZERO_EXTEND) {
-      AbsOp = CurDAG->getMachineNode(AbsOpcode, dl, VecVT,
-                                     SDValue(N->getOperand(0)->getOperand(0)),
-                                     SDValue(N->getOperand(0)->getOperand(1)));
-      ReplaceNode(N, AbsOp);
-      return;
-    }
-    if (N->getOperand(0).getOpcode() == ISD::SUB) {
-      SDValue SubVal = N->getOperand(0);
-      SDNode *Op0 = flipSignBit(SubVal->getOperand(0));
-      SDNode *Op1 = flipSignBit(SubVal->getOperand(1));
-      AbsOp = CurDAG->getMachineNode(AbsOpcode, dl, VecVT,
-                                     SDValue(Op0, 0), SDValue(Op1, 0));
-    }
-    else {
-      SDNode *Op1 = nullptr;
-      SDNode *Op0 = flipSignBit(N->getOperand(0), &Op1);
-      AbsOp = CurDAG->getMachineNode(AbsOpcode, dl, VecVT, SDValue(Op0, 0),
-                                     SDValue(Op1, 0));
-    }
-    ReplaceNode(N, AbsOp);
-    return;
-  }
   }
 
   SelectCode(N);
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index dd99b71..39608cb 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -251,12 +251,6 @@
     setOperationAction(ISD::UREM, MVT::i64, Expand);
   }
 
-  if (Subtarget.hasP9Vector()) {
-    setOperationAction(ISD::ABS, MVT::v4i32, Legal);
-    setOperationAction(ISD::ABS, MVT::v8i16, Legal);
-    setOperationAction(ISD::ABS, MVT::v16i8, Legal);
-  }
-
   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
@@ -556,6 +550,7 @@
       // add/sub are legal for all supported vector VT's.
       setOperationAction(ISD::ADD, VT, Legal);
       setOperationAction(ISD::SUB, VT, Legal);
+      setOperationAction(ISD::ABS, VT, Custom);
 
       // Vector instructions introduced in P8
       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
@@ -661,6 +656,11 @@
     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
 
+    // Without hasP8Altivec set, v2i64 SMAX isn't available.
+    // But ABS custom lowering requires SMAX support.
+    if (!Subtarget.hasP8Altivec())
+      setOperationAction(ISD::ABS, MVT::v2i64, Expand);
+
     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
@@ -788,8 +788,17 @@
       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
 
+      // Custom handling for partial vectors of integers converted to
+      // floating point. We already have optimal handling for v2i32 through
+      // the DAG combine, so those aren't necessary.
+      setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
+      setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
       setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
+      setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
+      setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
+      setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
       setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
+      setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
 
       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
@@ -1083,6 +1092,11 @@
     setTargetDAGCombine(ISD::FSQRT);
   }
 
+  if (Subtarget.hasP9Altivec()) {
+    setTargetDAGCombine(ISD::ABS);
+    setTargetDAGCombine(ISD::VSELECT);
+  }
+
   // Darwin long double math library functions have $LDBL128 appended.
   if (Subtarget.isDarwin()) {
     setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
@@ -1343,6 +1357,7 @@
   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
+  case PPCISD::VABSD:           return "PPCISD::VABSD";
   case PPCISD::QVFPERM:         return "PPCISD::QVFPERM";
   case PPCISD::QVGPCI:          return "PPCISD::QVGPCI";
   case PPCISD::QVALIGNI:        return "PPCISD::QVALIGNI";
@@ -2210,11 +2225,10 @@
     // If this is an or of disjoint bitfields, we can codegen this as an add
     // (for better address arithmetic) if the LHS and RHS of the OR are provably
     // disjoint.
-    KnownBits LHSKnown, RHSKnown;
-    DAG.computeKnownBits(N.getOperand(0), LHSKnown);
+    KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
 
     if (LHSKnown.Zero.getBoolValue()) {
-      DAG.computeKnownBits(N.getOperand(1), RHSKnown);
+      KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
       // If all of the bits are known zero on the LHS or RHS, the add won't
       // carry.
       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
@@ -2313,8 +2327,7 @@
       // If this is an or of disjoint bitfields, we can codegen this as an add
       // (for better address arithmetic) if the LHS and RHS of the OR are
       // provably disjoint.
-      KnownBits LHSKnown;
-      DAG.computeKnownBits(N.getOperand(0), LHSKnown);
+      KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
 
       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
         // If all of the bits are known zero on the LHS or RHS, the add won't
@@ -5084,9 +5097,15 @@
 
   // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live
   // into the call.
-  if (isSVR4ABI && isPPC64 && !isPatchPoint) {
+  // We do need to reserve X2 to appease the verifier for the PATCHPOINT.
+  if (isSVR4ABI && isPPC64) {
     setUsesTOCBasePtr(DAG);
-    Ops.push_back(DAG.getRegister(PPC::X2, PtrVT));
+
+    // We cannot add X2 as an operand here for PATCHPOINT, because there is no
+    // way to mark dependencies as implicit here. We will add the X2 dependency
+    // in EmitInstrWithCustomInserter.
+    if (!isPatchPoint) 
+      Ops.push_back(DAG.getRegister(PPC::X2, PtrVT));
   }
 
   return CallOpc;
@@ -7284,43 +7303,49 @@
   return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
 }
 
-SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op,
-                                                SelectionDAG &DAG,
+SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
                                                 const SDLoc &dl) const {
 
   unsigned Opc = Op.getOpcode();
   assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) &&
          "Unexpected conversion type");
-  assert(Op.getValueType() == MVT::v2f64 && "Supports v2f64 only.");
+  assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
+         "Supports conversions to v2f64/v4f32 only.");
 
-  // CPU's prior to P9 don't have a way to sign-extend in vectors.
   bool SignedConv = Opc == ISD::SINT_TO_FP;
-  if (SignedConv && !Subtarget.hasP9Altivec())
-    return SDValue();
+  bool FourEltRes = Op.getValueType() == MVT::v4f32;
 
   SDValue Wide = widenVec(DAG, Op.getOperand(0), dl);
   EVT WideVT = Wide.getValueType();
   unsigned WideNumElts = WideVT.getVectorNumElements();
+  MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
 
   SmallVector<int, 16> ShuffV;
   for (unsigned i = 0; i < WideNumElts; ++i)
     ShuffV.push_back(i + WideNumElts);
 
-  if (Subtarget.isLittleEndian()) {
-    ShuffV[0] = 0;
-    ShuffV[WideNumElts / 2] = 1;
-  }
-  else {
-    ShuffV[WideNumElts / 2 - 1] = 0;
-    ShuffV[WideNumElts - 1] = 1;
-  }
+  int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
+  int SaveElts = FourEltRes ? 4 : 2;
+  if (Subtarget.isLittleEndian())
+    for (int i = 0; i < SaveElts; i++)
+      ShuffV[i * Stride] = i;
+  else
+    for (int i = 1; i <= SaveElts; i++)
+      ShuffV[i * Stride - 1] = i - 1;
 
-  SDValue ShuffleSrc2 = SignedConv ? DAG.getUNDEF(WideVT) :
-                                     DAG.getConstant(0, dl, WideVT);
+  SDValue ShuffleSrc2 =
+      SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
   SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
-  unsigned ExtendOp = SignedConv ? (unsigned) PPCISD::SExtVElems :
-                                   (unsigned) ISD::BITCAST;
-  SDValue Extend = DAG.getNode(ExtendOp, dl, MVT::v2i64, Arrange);
+  unsigned ExtendOp =
+      SignedConv ? (unsigned)PPCISD::SExtVElems : (unsigned)ISD::BITCAST;
+
+  SDValue Extend;
+  if (!Subtarget.hasP9Altivec() && SignedConv) {
+    Arrange = DAG.getBitcast(IntermediateVT, Arrange);
+    Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
+                         DAG.getValueType(Op.getOperand(0).getValueType()));
+  } else
+    Extend = DAG.getNode(ExtendOp, dl, IntermediateVT, Arrange);
 
   return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
 }
@@ -7329,8 +7354,10 @@
                                           SelectionDAG &DAG) const {
   SDLoc dl(Op);
 
-  if (Op.getValueType() == MVT::v2f64 &&
-      Op.getOperand(0).getValueType() == MVT::v2i16)
+  EVT InVT = Op.getOperand(0).getValueType();
+  EVT OutVT = Op.getValueType();
+  if (OutVT.isVector() && OutVT.isFloatingPoint() &&
+      isOperationCustom(Op.getOpcode(), InVT))
     return LowerINT_TO_FPVector(Op, DAG, dl);
 
   // Conversions to f128 are legal.
@@ -9003,35 +9030,6 @@
     return DAG.getRegister(PPC::R2, MVT::i32);
   }
 
-  // We are looking for absolute values here.
-  // The idea is to try to fit one of two patterns:
-  //  max (a, (0-a))  OR  max ((0-a), a)
-  if (Subtarget.hasP9Vector() &&
-      (IntrinsicID == Intrinsic::ppc_altivec_vmaxsw ||
-       IntrinsicID == Intrinsic::ppc_altivec_vmaxsh ||
-       IntrinsicID == Intrinsic::ppc_altivec_vmaxsb)) {
-    SDValue V1 = Op.getOperand(1);
-    SDValue V2 = Op.getOperand(2);
-    if (V1.getSimpleValueType() == V2.getSimpleValueType() &&
-        (V1.getSimpleValueType() == MVT::v4i32 ||
-         V1.getSimpleValueType() == MVT::v8i16 ||
-         V1.getSimpleValueType() == MVT::v16i8)) {
-      if ( V1.getOpcode() == ISD::SUB &&
-           ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
-           V1.getOperand(1) == V2 ) {
-        // Generate the abs instruction with the operands
-        return DAG.getNode(ISD::ABS, dl, V2.getValueType(),V2);
-      }
-
-      if ( V2.getOpcode() == ISD::SUB &&
-           ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
-           V2.getOperand(1) == V1 ) {
-        // Generate the abs instruction with the operands
-        return DAG.getNode(ISD::ABS, dl, V1.getValueType(),V1);
-      }
-    }
-  }
-
   // If this is a lowered altivec predicate compare, CompareOpc is set to the
   // opcode number of the comparison.
   int CompareOpc;
@@ -9572,6 +9570,44 @@
   }
 }
 
+SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
+
+  assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS");
+
+  EVT VT = Op.getValueType();
+  assert(VT.isVector() &&
+         "Only set vector abs as custom, scalar abs shouldn't reach here!");
+  assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
+          VT == MVT::v16i8) &&
+         "Unexpected vector element type!");
+  assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) &&
+         "Current subtarget doesn't support smax v2i64!");
+
+  // For vector abs, it can be lowered to:
+  // abs x
+  // ==>
+  // y = -x
+  // smax(x, y)
+
+  SDLoc dl(Op);
+  SDValue X = Op.getOperand(0);
+  SDValue Zero = DAG.getConstant(0, dl, VT);
+  SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X);
+
+  // SMAX patch https://reviews.llvm.org/D47332
+  // hasn't landed yet, so use intrinsic first here.
+  // TODO: Should use SMAX directly once SMAX patch landed
+  Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw;
+  if (VT == MVT::v2i64)
+    BifID = Intrinsic::ppc_altivec_vmaxsd;
+  else if (VT == MVT::v8i16)
+    BifID = Intrinsic::ppc_altivec_vmaxsh;
+  else if (VT == MVT::v16i8)
+    BifID = Intrinsic::ppc_altivec_vmaxsb;
+  
+  return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
+}
+
 /// LowerOperation - Provide custom lowering hooks for some operations.
 ///
 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
@@ -9624,6 +9660,7 @@
   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
   case ISD::MUL:                return LowerMUL(Op, DAG);
+  case ISD::ABS:                return LowerABS(Op, DAG);
 
   // For counter-based loop handling.
   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
@@ -9866,17 +9903,14 @@
   return BB;
 }
 
-MachineBasicBlock *
-PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI,
-                                            MachineBasicBlock *BB,
-                                            bool is8bit, // operation
-                                            unsigned BinOpcode,
-                                            unsigned CmpOpcode,
-                                            unsigned CmpPred) const {
+MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
+    MachineInstr &MI, MachineBasicBlock *BB,
+    bool is8bit, // operation
+    unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
   // If we support part-word atomic mnemonics, just use them
   if (Subtarget.hasPartwordAtomics())
-    return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode,
-                            CmpOpcode, CmpPred);
+    return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
+                            CmpPred);
 
   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
@@ -9900,7 +9934,7 @@
 
   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
   MachineBasicBlock *loop2MBB =
-    CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
+      CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
   F->insert(It, loopMBB);
   if (CmpOpcode)
@@ -9911,22 +9945,25 @@
   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
 
   MachineRegisterInfo &RegInfo = F->getRegInfo();
-  const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass
-                                          : &PPC::GPRCRegClass;
+  const TargetRegisterClass *RC =
+      is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
+  const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
+
   unsigned PtrReg = RegInfo.createVirtualRegister(RC);
-  unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
+  unsigned Shift1Reg = RegInfo.createVirtualRegister(GPRC);
   unsigned ShiftReg =
-    isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC);
-  unsigned Incr2Reg = RegInfo.createVirtualRegister(RC);
-  unsigned MaskReg = RegInfo.createVirtualRegister(RC);
-  unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
-  unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
-  unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
-  unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC);
-  unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
-  unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
+      isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
+  unsigned Incr2Reg = RegInfo.createVirtualRegister(GPRC);
+  unsigned MaskReg = RegInfo.createVirtualRegister(GPRC);
+  unsigned Mask2Reg = RegInfo.createVirtualRegister(GPRC);
+  unsigned Mask3Reg = RegInfo.createVirtualRegister(GPRC);
+  unsigned Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
+  unsigned Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
+  unsigned Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
+  unsigned TmpDestReg = RegInfo.createVirtualRegister(GPRC);
   unsigned Ptr1Reg;
-  unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC);
+  unsigned TmpReg =
+      (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
 
   //  thisMBB:
   //   ...
@@ -9955,82 +9992,107 @@
   if (ptrA != ZeroReg) {
     Ptr1Reg = RegInfo.createVirtualRegister(RC);
     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
-      .addReg(ptrA).addReg(ptrB);
+        .addReg(ptrA)
+        .addReg(ptrB);
   } else {
     Ptr1Reg = ptrB;
   }
-  BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
-      .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
+  // We need use 32-bit subregister to avoid mismatch register class in 64-bit
+  // mode.
+  BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
+      .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
+      .addImm(3)
+      .addImm(27)
+      .addImm(is8bit ? 28 : 27);
   if (!isLittleEndian)
-    BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
-        .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
+    BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
+        .addReg(Shift1Reg)
+        .addImm(is8bit ? 24 : 16);
   if (is64bit)
     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
-      .addReg(Ptr1Reg).addImm(0).addImm(61);
+        .addReg(Ptr1Reg)
+        .addImm(0)
+        .addImm(61);
   else
     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
-      .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
-  BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg)
-      .addReg(incr).addReg(ShiftReg);
+        .addReg(Ptr1Reg)
+        .addImm(0)
+        .addImm(0)
+        .addImm(29);
+  BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
   if (is8bit)
     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
   else {
     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
-    BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535);
+    BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
+        .addReg(Mask3Reg)
+        .addImm(65535);
   }
   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
-      .addReg(Mask2Reg).addReg(ShiftReg);
+      .addReg(Mask2Reg)
+      .addReg(ShiftReg);
 
   BB = loopMBB;
   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
-    .addReg(ZeroReg).addReg(PtrReg);
+      .addReg(ZeroReg)
+      .addReg(PtrReg);
   if (BinOpcode)
     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
-      .addReg(Incr2Reg).addReg(TmpDestReg);
-  BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg)
-    .addReg(TmpDestReg).addReg(MaskReg);
-  BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg)
-    .addReg(TmpReg).addReg(MaskReg);
+        .addReg(Incr2Reg)
+        .addReg(TmpDestReg);
+  BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
+      .addReg(TmpDestReg)
+      .addReg(MaskReg);
+  BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
   if (CmpOpcode) {
     // For unsigned comparisons, we can directly compare the shifted values.
     // For signed comparisons we shift and sign extend.
-    unsigned SReg = RegInfo.createVirtualRegister(RC);
-    BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg)
-      .addReg(TmpDestReg).addReg(MaskReg);
+    unsigned SReg = RegInfo.createVirtualRegister(GPRC);
+    BuildMI(BB, dl, TII->get(PPC::AND), SReg)
+        .addReg(TmpDestReg)
+        .addReg(MaskReg);
     unsigned ValueReg = SReg;
     unsigned CmpReg = Incr2Reg;
     if (CmpOpcode == PPC::CMPW) {
-      ValueReg = RegInfo.createVirtualRegister(RC);
+      ValueReg = RegInfo.createVirtualRegister(GPRC);
       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
-        .addReg(SReg).addReg(ShiftReg);
-      unsigned ValueSReg = RegInfo.createVirtualRegister(RC);
+          .addReg(SReg)
+          .addReg(ShiftReg);
+      unsigned ValueSReg = RegInfo.createVirtualRegister(GPRC);
       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
-        .addReg(ValueReg);
+          .addReg(ValueReg);
       ValueReg = ValueSReg;
       CmpReg = incr;
     }
     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
-      .addReg(CmpReg).addReg(ValueReg);
+        .addReg(CmpReg)
+        .addReg(ValueReg);
     BuildMI(BB, dl, TII->get(PPC::BCC))
-      .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
+        .addImm(CmpPred)
+        .addReg(PPC::CR0)
+        .addMBB(exitMBB);
     BB->addSuccessor(loop2MBB);
     BB->addSuccessor(exitMBB);
     BB = loop2MBB;
   }
-  BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg)
-    .addReg(Tmp3Reg).addReg(Tmp2Reg);
+  BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
   BuildMI(BB, dl, TII->get(PPC::STWCX))
-    .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg);
+      .addReg(Tmp4Reg)
+      .addReg(ZeroReg)
+      .addReg(PtrReg);
   BuildMI(BB, dl, TII->get(PPC::BCC))
-    .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
+      .addImm(PPC::PRED_NE)
+      .addReg(PPC::CR0)
+      .addMBB(loopMBB);
   BB->addSuccessor(loopMBB);
   BB->addSuccessor(exitMBB);
 
   //  exitMBB:
   //   ...
   BB = exitMBB;
-  BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg)
-    .addReg(ShiftReg);
+  BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
+      .addReg(TmpDestReg)
+      .addReg(ShiftReg);
   return BB;
 }
 
@@ -10290,7 +10352,6 @@
       // way to mark the dependence as implicit there, and so the stackmap code
       // will confuse it with a regular operand. Instead, add the dependence
       // here.
-      setUsesTOCBasePtr(*BB->getParent());
       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
     }
 
@@ -10315,8 +10376,8 @@
   MachineFunction *F = BB->getParent();
 
   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
-       MI.getOpcode() == PPC::SELECT_CC_I8 ||
-       MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8) {
+      MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
+      MI.getOpcode() == PPC::SELECT_I8) {
     SmallVector<MachineOperand, 2> Cond;
     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
         MI.getOpcode() == PPC::SELECT_CC_I8)
@@ -10461,9 +10522,12 @@
     unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
 
     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
-      .addReg(HiReg).addReg(ReadAgainReg);
+        .addReg(HiReg)
+        .addReg(ReadAgainReg);
     BuildMI(BB, dl, TII->get(PPC::BCC))
-      .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB);
+        .addImm(PPC::PRED_NE)
+        .addReg(CmpReg)
+        .addMBB(readMBB);
 
     BB->addSuccessor(readMBB);
     BB->addSuccessor(sinkMBB);
@@ -10633,27 +10697,35 @@
     //   st[bhwd]cx. dest, ptr
     // exitBB:
     BB = loop1MBB;
-    BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
-      .addReg(ptrA).addReg(ptrB);
+    BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
-      .addReg(oldval).addReg(dest);
+        .addReg(oldval)
+        .addReg(dest);
     BuildMI(BB, dl, TII->get(PPC::BCC))
-      .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
+        .addImm(PPC::PRED_NE)
+        .addReg(PPC::CR0)
+        .addMBB(midMBB);
     BB->addSuccessor(loop2MBB);
     BB->addSuccessor(midMBB);
 
     BB = loop2MBB;
     BuildMI(BB, dl, TII->get(StoreMnemonic))
-      .addReg(newval).addReg(ptrA).addReg(ptrB);
+        .addReg(newval)
+        .addReg(ptrA)
+        .addReg(ptrB);
     BuildMI(BB, dl, TII->get(PPC::BCC))
-      .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
+        .addImm(PPC::PRED_NE)
+        .addReg(PPC::CR0)
+        .addMBB(loop1MBB);
     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
     BB->addSuccessor(loop1MBB);
     BB->addSuccessor(exitMBB);
 
     BB = midMBB;
     BuildMI(BB, dl, TII->get(StoreMnemonic))
-      .addReg(dest).addReg(ptrA).addReg(ptrB);
+        .addReg(dest)
+        .addReg(ptrA)
+        .addReg(ptrB);
     BB->addSuccessor(exitMBB);
 
     //  exitMBB:
@@ -10688,24 +10760,26 @@
     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
 
     MachineRegisterInfo &RegInfo = F->getRegInfo();
-    const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass
-                                            : &PPC::GPRCRegClass;
+    const TargetRegisterClass *RC =
+        is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
+    const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
+
     unsigned PtrReg = RegInfo.createVirtualRegister(RC);
-    unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
+    unsigned Shift1Reg = RegInfo.createVirtualRegister(GPRC);
     unsigned ShiftReg =
-      isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC);
-    unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC);
-    unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC);
-    unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC);
-    unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC);
-    unsigned MaskReg = RegInfo.createVirtualRegister(RC);
-    unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
-    unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
-    unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
-    unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
-    unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
+        isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
+    unsigned NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
+    unsigned NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
+    unsigned OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
+    unsigned OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
+    unsigned MaskReg = RegInfo.createVirtualRegister(GPRC);
+    unsigned Mask2Reg = RegInfo.createVirtualRegister(GPRC);
+    unsigned Mask3Reg = RegInfo.createVirtualRegister(GPRC);
+    unsigned Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
+    unsigned Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
+    unsigned TmpDestReg = RegInfo.createVirtualRegister(GPRC);
     unsigned Ptr1Reg;
-    unsigned TmpReg = RegInfo.createVirtualRegister(RC);
+    unsigned TmpReg = RegInfo.createVirtualRegister(GPRC);
     unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
     //  thisMBB:
     //   ...
@@ -10742,74 +10816,107 @@
     if (ptrA != ZeroReg) {
       Ptr1Reg = RegInfo.createVirtualRegister(RC);
       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
-        .addReg(ptrA).addReg(ptrB);
+          .addReg(ptrA)
+          .addReg(ptrB);
     } else {
       Ptr1Reg = ptrB;
     }
-    BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
-        .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
+
+    // We need use 32-bit subregister to avoid mismatch register class in 64-bit
+    // mode.
+    BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
+        .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
+        .addImm(3)
+        .addImm(27)
+        .addImm(is8bit ? 28 : 27);
     if (!isLittleEndian)
-      BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
-          .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
+      BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
+          .addReg(Shift1Reg)
+          .addImm(is8bit ? 24 : 16);
     if (is64bit)
       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
-        .addReg(Ptr1Reg).addImm(0).addImm(61);
+          .addReg(Ptr1Reg)
+          .addImm(0)
+          .addImm(61);
     else
       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
-        .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
+          .addReg(Ptr1Reg)
+          .addImm(0)
+          .addImm(0)
+          .addImm(29);
     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
-        .addReg(newval).addReg(ShiftReg);
+        .addReg(newval)
+        .addReg(ShiftReg);
     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
-        .addReg(oldval).addReg(ShiftReg);
+        .addReg(oldval)
+        .addReg(ShiftReg);
     if (is8bit)
       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
     else {
       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
-        .addReg(Mask3Reg).addImm(65535);
+          .addReg(Mask3Reg)
+          .addImm(65535);
     }
     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
-        .addReg(Mask2Reg).addReg(ShiftReg);
+        .addReg(Mask2Reg)
+        .addReg(ShiftReg);
     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
-        .addReg(NewVal2Reg).addReg(MaskReg);
+        .addReg(NewVal2Reg)
+        .addReg(MaskReg);
     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
-        .addReg(OldVal2Reg).addReg(MaskReg);
+        .addReg(OldVal2Reg)
+        .addReg(MaskReg);
 
     BB = loop1MBB;
     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
-        .addReg(ZeroReg).addReg(PtrReg);
-    BuildMI(BB, dl, TII->get(PPC::AND),TmpReg)
-        .addReg(TmpDestReg).addReg(MaskReg);
+        .addReg(ZeroReg)
+        .addReg(PtrReg);
+    BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
+        .addReg(TmpDestReg)
+        .addReg(MaskReg);
     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
-        .addReg(TmpReg).addReg(OldVal3Reg);
+        .addReg(TmpReg)
+        .addReg(OldVal3Reg);
     BuildMI(BB, dl, TII->get(PPC::BCC))
-        .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
+        .addImm(PPC::PRED_NE)
+        .addReg(PPC::CR0)
+        .addMBB(midMBB);
     BB->addSuccessor(loop2MBB);
     BB->addSuccessor(midMBB);
 
     BB = loop2MBB;
-    BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg)
-        .addReg(TmpDestReg).addReg(MaskReg);
-    BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg)
-        .addReg(Tmp2Reg).addReg(NewVal3Reg);
-    BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg)
-        .addReg(ZeroReg).addReg(PtrReg);
+    BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
+        .addReg(TmpDestReg)
+        .addReg(MaskReg);
+    BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
+        .addReg(Tmp2Reg)
+        .addReg(NewVal3Reg);
+    BuildMI(BB, dl, TII->get(PPC::STWCX))
+        .addReg(Tmp4Reg)
+        .addReg(ZeroReg)
+        .addReg(PtrReg);
     BuildMI(BB, dl, TII->get(PPC::BCC))
-      .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
+        .addImm(PPC::PRED_NE)
+        .addReg(PPC::CR0)
+        .addMBB(loop1MBB);
     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
     BB->addSuccessor(loop1MBB);
     BB->addSuccessor(exitMBB);
 
     BB = midMBB;
-    BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg)
-      .addReg(ZeroReg).addReg(PtrReg);
+    BuildMI(BB, dl, TII->get(PPC::STWCX))
+        .addReg(TmpDestReg)
+        .addReg(ZeroReg)
+        .addReg(PtrReg);
     BB->addSuccessor(exitMBB);
 
     //  exitMBB:
     //   ...
     BB = exitMBB;
-    BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg)
-      .addReg(ShiftReg);
+    BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
+        .addReg(TmpReg)
+        .addReg(ShiftReg);
   } else if (MI.getOpcode() == PPC::FADDrtz) {
     // This pseudo performs an FADD with rounding mode temporarily forced
     // to round-to-zero.  We emit this via custom inserter since the FPSCR
@@ -10846,9 +10953,8 @@
                  MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8);
 
     MachineRegisterInfo &RegInfo = F->getRegInfo();
-    unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ?
-                                                  &PPC::GPRCRegClass :
-                                                  &PPC::G8RCRegClass);
+    unsigned Dest = RegInfo.createVirtualRegister(
+        Opcode == PPC::ANDIo ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
 
     DebugLoc dl = MI.getDebugLoc();
     BuildMI(*BB, MI, dl, TII->get(Opcode), Dest)
@@ -11300,9 +11406,8 @@
     } else {
       // This is neither a signed nor an unsigned comparison, just make sure
       // that the high bits are equal.
-      KnownBits Op1Known, Op2Known;
-      DAG.computeKnownBits(N->getOperand(0), Op1Known);
-      DAG.computeKnownBits(N->getOperand(1), Op2Known);
+      KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
+      KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
 
       // We don't really care about what is known about the first bit (if
       // anything), so clear it in all masks prior to comparing them.
@@ -12985,6 +13090,39 @@
           }
         }
       }
+
+      // Combine vmaxsw/h/b(a, a's negation) to abs(a)
+      // Expose the vabsduw/h/b opportunity for down stream
+      if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
+          (IID == Intrinsic::ppc_altivec_vmaxsw ||
+           IID == Intrinsic::ppc_altivec_vmaxsh ||
+           IID == Intrinsic::ppc_altivec_vmaxsb)) {
+        SDValue V1 = N->getOperand(1);
+        SDValue V2 = N->getOperand(2);
+        if ((V1.getSimpleValueType() == MVT::v4i32 ||
+             V1.getSimpleValueType() == MVT::v8i16 ||
+             V1.getSimpleValueType() == MVT::v16i8) &&
+            V1.getSimpleValueType() == V2.getSimpleValueType()) {
+          // (0-a, a)
+          if (V1.getOpcode() == ISD::SUB &&
+              ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
+              V1.getOperand(1) == V2) {
+            return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
+          }
+          // (a, 0-a)
+          if (V2.getOpcode() == ISD::SUB &&
+              ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
+              V2.getOperand(1) == V1) {
+            return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
+          }
+          // (x-y, y-x)
+          if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
+              V1.getOperand(0) == V2.getOperand(1) &&
+              V1.getOperand(1) == V2.getOperand(0)) {
+            return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
+          }
+        }
+      }
     }
 
     break;
@@ -13217,6 +13355,10 @@
   }
   case ISD::BUILD_VECTOR:
     return DAGCombineBuildVector(N, DCI);
+  case ISD::ABS: 
+    return combineABS(N, DCI);
+  case ISD::VSELECT: 
+    return combineVSelect(N, DCI);
   }
 
   return SDValue();
@@ -14339,7 +14481,7 @@
     return SDValue();
 
   SDLoc DL(N);
-  SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i64);
+  SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
   SDValue Cmp = RHS.getOperand(0);
   SDValue Z = Cmp.getOperand(0);
   auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
@@ -14503,3 +14645,109 @@
   // For non-constant masks, we can always use the record-form and.
   return true;
 }
+
+// Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
+// Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
+// Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
+// Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
+// Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
+SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
+  assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
+  assert(Subtarget.hasP9Altivec() &&
+         "Only combine this when P9 altivec supported!");
+  EVT VT = N->getValueType(0);
+  if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
+    return SDValue();
+
+  SelectionDAG &DAG = DCI.DAG;
+  SDLoc dl(N);
+  if (N->getOperand(0).getOpcode() == ISD::SUB) {
+    // Even for signed integers, if it's known to be positive (as signed
+    // integer) due to zero-extended inputs.
+    unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
+    unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
+    if ((SubOpcd0 == ISD::ZERO_EXTEND ||
+         SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
+        (SubOpcd1 == ISD::ZERO_EXTEND ||
+         SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
+      return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
+                         N->getOperand(0)->getOperand(0),
+                         N->getOperand(0)->getOperand(1),
+                         DAG.getTargetConstant(0, dl, MVT::i32));
+    }
+
+    // For type v4i32, it can be optimized with xvnegsp + vabsduw
+    if (N->getOperand(0).getValueType() == MVT::v4i32 &&
+        N->getOperand(0).hasOneUse()) {
+      return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
+                         N->getOperand(0)->getOperand(0),
+                         N->getOperand(0)->getOperand(1),
+                         DAG.getTargetConstant(1, dl, MVT::i32));
+    }
+  }
+
+  return SDValue();
+}
+
+// For type v4i32/v8ii16/v16i8, transform
+// from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
+// from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
+// from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
+// from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
+SDValue PPCTargetLowering::combineVSelect(SDNode *N,
+                                          DAGCombinerInfo &DCI) const {
+  assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
+  assert(Subtarget.hasP9Altivec() &&
+         "Only combine this when P9 altivec supported!");
+
+  SelectionDAG &DAG = DCI.DAG;
+  SDLoc dl(N);
+  SDValue Cond = N->getOperand(0);
+  SDValue TrueOpnd = N->getOperand(1);
+  SDValue FalseOpnd = N->getOperand(2);
+  EVT VT = N->getOperand(1).getValueType();
+
+  if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
+      FalseOpnd.getOpcode() != ISD::SUB)
+    return SDValue();
+
+  // ABSD only available for type v4i32/v8i16/v16i8
+  if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
+    return SDValue();
+
+  // At least to save one more dependent computation
+  if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
+    return SDValue();
+
+  ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
+
+  // Can only handle unsigned comparison here
+  switch (CC) {
+  default:
+    return SDValue();
+  case ISD::SETUGT:
+  case ISD::SETUGE:
+    break;
+  case ISD::SETULT:
+  case ISD::SETULE:
+    std::swap(TrueOpnd, FalseOpnd);
+    break;
+  }
+
+  SDValue CmpOpnd1 = Cond.getOperand(0);
+  SDValue CmpOpnd2 = Cond.getOperand(1);
+
+  // SETCC CmpOpnd1 CmpOpnd2 cond
+  // TrueOpnd = CmpOpnd1 - CmpOpnd2
+  // FalseOpnd = CmpOpnd2 - CmpOpnd1
+  if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
+      TrueOpnd.getOperand(1) == CmpOpnd2 &&
+      FalseOpnd.getOperand(0) == CmpOpnd2 &&
+      FalseOpnd.getOperand(1) == CmpOpnd1) {
+    return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
+                       CmpOpnd1, CmpOpnd2,
+                       DAG.getTargetConstant(0, dl, MVT::i32));
+  }
+
+  return SDValue();
+}
diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h
index 7a6c22f..30acd60 100644
--- a/lib/Target/PowerPC/PPCISelLowering.h
+++ b/lib/Target/PowerPC/PPCISelLowering.h
@@ -373,6 +373,21 @@
       /// An SDNode for swaps that are not associated with any loads/stores
       /// and thereby have no chain.
       SWAP_NO_CHAIN,
+      
+      /// An SDNode for Power9 vector absolute value difference.
+      /// operand #0 vector
+      /// operand #1 vector
+      /// operand #2 constant i32 0 or 1, to indicate whether needs to patch
+      /// the most significant bit for signed i32
+      ///
+      /// Power9 VABSD* instructions are designed to support unsigned integer
+      /// vectors (byte/halfword/word), if we want to make use of them for signed
+      /// integer vectors, we have to flip their sign bits first. To flip sign bit
+      /// for byte/halfword integer vector would become inefficient, but for word
+      /// integer vector, we can leverage XVNEGSP to make it efficiently. eg:
+      /// abs(sub(a,b)) => VABSDUW(a+0x80000000, b+0x80000000) 
+      ///               => VABSDUW((XVNEGSP a), (XVNEGSP b))
+      VABSD,
 
       /// QVFPERM = This corresponds to the QPX qvfperm instruction.
       QVFPERM,
@@ -561,6 +576,11 @@
     /// DAG node.
     const char *getTargetNodeName(unsigned Opcode) const override;
 
+    bool isSelectSupported(SelectSupportKind Kind) const override {
+      // PowerPC does not support scalar condition selects on vectors.
+      return (Kind != SelectSupportKind::ScalarCondVectorVal);
+    }
+
     /// getPreferredVectorAction - The code we generate when vector types are
     /// legalized by promoting the integer element type is often much worse
     /// than code we generate if we widen the type for applicable vector types.
@@ -998,6 +1018,7 @@
     SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
+    SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const;
 
     SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;
@@ -1101,6 +1122,8 @@
     SDValue combineADD(SDNode *N, DAGCombinerInfo &DCI) const;
     SDValue combineTRUNCATE(SDNode *N, DAGCombinerInfo &DCI) const;
     SDValue combineSetCC(SDNode *N, DAGCombinerInfo &DCI) const;
+    SDValue combineABS(SDNode *N, DAGCombinerInfo &DCI) const;
+    SDValue combineVSelect(SDNode *N, DAGCombinerInfo &DCI) const;
 
     /// ConvertSETCCToSubtract - looks at SETCC that compares ints. It replaces
     /// SETCC with integer subtraction when (1) there is a legal way of doing it
diff --git a/lib/Target/PowerPC/PPCInstr64Bit.td b/lib/Target/PowerPC/PPCInstr64Bit.td
index f8bfc40..2ce6ad3 100644
--- a/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -777,8 +777,12 @@
                        "maddhdu $RT, $RA, $RB, $RC", IIC_IntMulHD, []>, isPPC64;
 def MADDLD : VAForm_1a<51, (outs g8rc :$RT), (ins g8rc:$RA, g8rc:$RB, g8rc:$RC),
                        "maddld $RT, $RA, $RB, $RC", IIC_IntMulHD, []>, isPPC64;
-def SETB : XForm_44<31, 128, (outs g8rc:$RT), (ins crrc:$BFA),
-                     "setb $RT, $BFA", IIC_IntGeneral>, isPPC64;
+def SETB : XForm_44<31, 128, (outs gprc:$RT), (ins crrc:$BFA),
+                       "setb $RT, $BFA", IIC_IntGeneral>, isPPC64;
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
+  def SETB8 : XForm_44<31, 128, (outs g8rc:$RT), (ins crrc:$BFA),
+                       "setb $RT, $BFA", IIC_IntGeneral>, isPPC64;
+}
 def DARN : XForm_45<31, 755, (outs g8rc:$RT), (ins i32imm:$L),
                      "darn $RT, $L", IIC_LdStLD>, isPPC64;
 def ADDPCIS : DXForm<19, 2, (outs g8rc:$RT), (ins i32imm:$D),
diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp
index 0694af6..d754ce2 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -2248,6 +2248,35 @@
   return PPC::NoRegister;
 }
 
+void PPCInstrInfo::replaceInstrOperandWithImm(MachineInstr &MI,
+                                              unsigned OpNo,
+                                              int64_t Imm) const {
+  assert(MI.getOperand(OpNo).isReg() && "Operand must be a REG");
+  // Replace the REG with the Immediate.
+  unsigned InUseReg = MI.getOperand(OpNo).getReg();
+  MI.getOperand(OpNo).ChangeToImmediate(Imm);
+
+  if (empty(MI.implicit_operands()))
+    return;
+
+  // We need to make sure that the MI didn't have any implicit use
+  // of this REG any more.
+  const TargetRegisterInfo *TRI = &getRegisterInfo();
+  int UseOpIdx = MI.findRegisterUseOperandIdx(InUseReg, false, TRI);
+  if (UseOpIdx >= 0) {
+    MachineOperand &MO = MI.getOperand(UseOpIdx);
+    if (MO.isImplicit())
+      // The operands must always be in the following order:
+      // - explicit reg defs,
+      // - other explicit operands (reg uses, immediates, etc.),
+      // - implicit reg defs
+      // - implicit reg uses
+      // Therefore, removing the implicit operand won't change the explicit
+      // operands layout.
+      MI.RemoveOperand(UseOpIdx);
+  }
+}
+
 // Replace an instruction with one that materializes a constant (and sets
 // CR0 if the original instruction was a record-form instruction).
 void PPCInstrInfo::replaceInstrWithLI(MachineInstr &MI,
@@ -2481,7 +2510,7 @@
       // Can't use PPC::COPY to copy PPC::ZERO[8]. Convert it to LI[8] 0.
       if (RegToCopy == PPC::ZERO || RegToCopy == PPC::ZERO8) {
         CompareUseMI.setDesc(get(UseOpc == PPC::ISEL8 ? PPC::LI8 : PPC::LI));
-        CompareUseMI.getOperand(1).ChangeToImmediate(0);
+        replaceInstrOperandWithImm(CompareUseMI, 1, 0);
         CompareUseMI.RemoveOperand(3);
         CompareUseMI.RemoveOperand(2);
         continue;
@@ -3292,7 +3321,7 @@
   if (ImmMO->isImm()) {
     // If the ImmMO is Imm, change the operand that has ZERO to that Imm
     // directly.
-    MI.getOperand(III.ZeroIsSpecialOrig).ChangeToImmediate(Imm);
+    replaceInstrOperandWithImm(MI, III.ZeroIsSpecialOrig, Imm);
   }
   else {
     // Otherwise, it is Constant Pool Index(CPI) or Global,
@@ -3411,24 +3440,24 @@
           uint64_t SH = RightShift ? 32 - ShAmt : ShAmt;
           uint64_t MB = RightShift ? ShAmt : 0;
           uint64_t ME = RightShift ? 31 : 31 - ShAmt;
-          MI.getOperand(III.OpNoForForwarding).ChangeToImmediate(SH);
+          replaceInstrOperandWithImm(MI, III.OpNoForForwarding, SH);
           MachineInstrBuilder(*MI.getParent()->getParent(), MI).addImm(MB)
             .addImm(ME);
         } else {
           // Left shifts use (N, 63-N), right shifts use (64-N, N).
           uint64_t SH = RightShift ? 64 - ShAmt : ShAmt;
           uint64_t ME = RightShift ? ShAmt : 63 - ShAmt;
-          MI.getOperand(III.OpNoForForwarding).ChangeToImmediate(SH);
+          replaceInstrOperandWithImm(MI, III.OpNoForForwarding, SH);
           MachineInstrBuilder(*MI.getParent()->getParent(), MI).addImm(ME);
         }
       }
     } else
-      MI.getOperand(ConstantOpNo).ChangeToImmediate(Imm);
+      replaceInstrOperandWithImm(MI, ConstantOpNo, Imm);
   }
   // Convert commutative instructions (switch the operands and convert the
   // desired one to an immediate.
   else if (III.IsCommutative) {
-    MI.getOperand(ConstantOpNo).ChangeToImmediate(Imm);
+    replaceInstrOperandWithImm(MI, ConstantOpNo, Imm);
     swapMIOperands(MI, ConstantOpNo, III.OpNoForForwarding);
   } else
     llvm_unreachable("Should have exited early!");
@@ -3438,15 +3467,20 @@
   if (III.OpNoForForwarding != III.ImmOpNo)
     swapMIOperands(MI, III.OpNoForForwarding, III.ImmOpNo);
 
-  // If the R0/X0 register is special for the original instruction and not for
-  // the new instruction (or vice versa), we need to fix up the register class.
+  // If the special R0/X0 register index are different for original instruction
+  // and new instruction, we need to fix up the register class in new
+  // instruction.
   if (!PostRA && III.ZeroIsSpecialOrig != III.ZeroIsSpecialNew) {
-    if (!III.ZeroIsSpecialOrig) {
+    if (III.ZeroIsSpecialNew) {
+      // If operand at III.ZeroIsSpecialNew is physical reg(eg: ZERO/ZERO8), no
+      // need to fix up register class.
       unsigned RegToModify = MI.getOperand(III.ZeroIsSpecialNew).getReg();
-      const TargetRegisterClass *NewRC =
-        MRI.getRegClass(RegToModify)->hasSuperClassEq(&PPC::GPRCRegClass) ?
-        &PPC::GPRC_and_GPRC_NOR0RegClass : &PPC::G8RC_and_G8RC_NOX0RegClass;
-      MRI.setRegClass(RegToModify, NewRC);
+      if (TargetRegisterInfo::isVirtualRegister(RegToModify)) {
+        const TargetRegisterClass *NewRC =
+          MRI.getRegClass(RegToModify)->hasSuperClassEq(&PPC::GPRCRegClass) ?
+          &PPC::GPRC_and_GPRC_NOR0RegClass : &PPC::G8RC_and_G8RC_NOX0RegClass;
+        MRI.setRegClass(RegToModify, NewRC);
+      }
     }
   }
   return true;
@@ -3488,6 +3522,7 @@
       Opcode == PPC::EXTSH  || Opcode == PPC::EXTSHo  ||
       Opcode == PPC::EXTSB8 || Opcode == PPC::EXTSH8  ||
       Opcode == PPC::EXTSW  || Opcode == PPC::EXTSWo  ||
+      Opcode == PPC::SETB   || Opcode == PPC::SETB8   ||
       Opcode == PPC::EXTSH8_32_64 || Opcode == PPC::EXTSW_32_64 ||
       Opcode == PPC::EXTSB8_32_64)
     return true;
diff --git a/lib/Target/PowerPC/PPCInstrInfo.h b/lib/Target/PowerPC/PPCInstrInfo.h
index 9c556e3..7ed558b 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/lib/Target/PowerPC/PPCInstrInfo.h
@@ -413,6 +413,8 @@
   bool convertToImmediateForm(MachineInstr &MI,
                               MachineInstr **KilledDef = nullptr) const;
   void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const;
+  void replaceInstrOperandWithImm(MachineInstr &MI, unsigned OpNo,
+                                  int64_t Imm) const;
 
   bool instrHasImmForm(const MachineInstr &MI, ImmInstrInfo &III,
                        bool PostRA) const;
diff --git a/lib/Target/PowerPC/PPCInstrVSX.td b/lib/Target/PowerPC/PPCInstrVSX.td
index bae25ad..0f07338 100644
--- a/lib/Target/PowerPC/PPCInstrVSX.td
+++ b/lib/Target/PowerPC/PPCInstrVSX.td
@@ -67,6 +67,10 @@
 def SDTVecConv : SDTypeProfile<1, 2, [
   SDTCisVec<0>, SDTCisVec<1>, SDTCisPtrTy<2>
 ]>;
+def SDTVabsd : SDTypeProfile<1, 3, [
+  SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<3, i32>
+]>;
+
 
 def PPClxvd2x  : SDNode<"PPCISD::LXVD2X", SDT_PPClxvd2x,
                         [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
@@ -79,6 +83,7 @@
 def PPCsvec2fp : SDNode<"PPCISD::SINT_VEC_TO_FP", SDTVecConv, []>;
 def PPCuvec2fp: SDNode<"PPCISD::UINT_VEC_TO_FP", SDTVecConv, []>;
 def PPCswapNoChain : SDNode<"PPCISD::SWAP_NO_CHAIN", SDT_PPCxxswapd>;
+def PPCvabsd : SDNode<"PPCISD::VABSD", SDTVabsd, []>;
 
 multiclass XX3Form_Rcr<bits<6> opcode, bits<7> xo, string asmbase,
                     string asmstr, InstrItinClass itin, Intrinsic Int,
@@ -1212,6 +1217,27 @@
   dag Li32 = (i32 (load xoaddr:$src));
 }
 
+def DWToSPExtractConv {
+  dag El0US1 = (f32 (PPCfcfidus
+                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S1, 0))))));
+  dag El1US1 = (f32 (PPCfcfidus
+                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S1, 1))))));
+  dag El0US2 = (f32 (PPCfcfidus
+                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S2, 0))))));
+  dag El1US2 = (f32 (PPCfcfidus
+                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S2, 1))))));
+  dag El0SS1 = (f32 (PPCfcfids
+                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S1, 0))))));
+  dag El1SS1 = (f32 (PPCfcfids
+                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S1, 1))))));
+  dag El0SS2 = (f32 (PPCfcfids
+                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S2, 0))))));
+  dag El1SS2 = (f32 (PPCfcfids
+                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S2, 1))))));
+  dag BVU = (v4f32 (build_vector El0US1, El1US1, El0US2, El1US2));
+  dag BVS = (v4f32 (build_vector El0SS1, El1SS1, El0SS2, El1SS2));
+}
+
 // The following VSX instructions were introduced in Power ISA 2.07
 /* FIXME: if the operands are v2i64, these patterns will not match.
    we should define new patterns or otherwise match the same patterns
@@ -1447,35 +1473,27 @@
   } // UseVSXReg = 1
 
   let Predicates = [IsLittleEndian] in {
-  def : Pat<(f32 (PPCfcfids
-                   (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S, 0)))))),
-            (f32 (XSCVSXDSP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
-  def : Pat<(f32 (PPCfcfids
-                   (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S, 1)))))),
+  def : Pat<DWToSPExtractConv.El0SS1,
+            (f32 (XSCVSXDSP (COPY_TO_REGCLASS (XXPERMDI $S1, $S1, 2), VSFRC)))>;
+  def : Pat<DWToSPExtractConv.El1SS1,
             (f32 (XSCVSXDSP (COPY_TO_REGCLASS
-                              (f64 (COPY_TO_REGCLASS $S, VSRC)), VSFRC)))>;
-  def : Pat<(f32 (PPCfcfidus
-                   (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S, 0)))))),
-            (f32 (XSCVUXDSP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
-  def : Pat<(f32 (PPCfcfidus
-                   (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S, 1)))))),
+                              (f64 (COPY_TO_REGCLASS $S1, VSRC)), VSFRC)))>;
+  def : Pat<DWToSPExtractConv.El0US1,
+            (f32 (XSCVUXDSP (COPY_TO_REGCLASS (XXPERMDI $S1, $S1, 2), VSFRC)))>;
+  def : Pat<DWToSPExtractConv.El1US1,
             (f32 (XSCVUXDSP (COPY_TO_REGCLASS
-                              (f64 (COPY_TO_REGCLASS $S, VSRC)), VSFRC)))>;
+                              (f64 (COPY_TO_REGCLASS $S1, VSRC)), VSFRC)))>;
   }
 
   let Predicates = [IsBigEndian] in {
-  def : Pat<(f32 (PPCfcfids
-                   (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S, 0)))))),
-            (f32 (XSCVSXDSP (COPY_TO_REGCLASS $S, VSFRC)))>;
-  def : Pat<(f32 (PPCfcfids
-                   (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S, 1)))))),
-            (f32 (XSCVSXDSP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
-  def : Pat<(f32 (PPCfcfidus
-                   (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S, 0)))))),
-            (f32 (XSCVUXDSP (COPY_TO_REGCLASS $S, VSFRC)))>;
-  def : Pat<(f32 (PPCfcfidus
-                   (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S, 1)))))),
-            (f32 (XSCVUXDSP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
+  def : Pat<DWToSPExtractConv.El0SS1,
+            (f32 (XSCVSXDSP (COPY_TO_REGCLASS $S1, VSFRC)))>;
+  def : Pat<DWToSPExtractConv.El1SS1,
+            (f32 (XSCVSXDSP (COPY_TO_REGCLASS (XXPERMDI $S1, $S1, 2), VSFRC)))>;
+  def : Pat<DWToSPExtractConv.El0US1,
+            (f32 (XSCVUXDSP (COPY_TO_REGCLASS $S1, VSFRC)))>;
+  def : Pat<DWToSPExtractConv.El1US1,
+            (f32 (XSCVUXDSP (COPY_TO_REGCLASS (XXPERMDI $S1, $S1, 2), VSFRC)))>;
   }
 
   // Instructions for converting float to i64 feeding a store.
@@ -3809,6 +3827,15 @@
                                               (XFLOADf32 xoaddr:$A), VSFRC)), 0))>;
   }
 
+  let Predicates = [IsBigEndian, HasP8Vector] in {
+    def : Pat<DWToSPExtractConv.BVU,
+              (v4f32 (VPKUDUM (XXSLDWI (XVCVUXDSP $S1), (XVCVUXDSP $S1), 3),
+                              (XXSLDWI (XVCVUXDSP $S2), (XVCVUXDSP $S2), 3)))>;
+    def : Pat<DWToSPExtractConv.BVS,
+              (v4f32 (VPKUDUM (XXSLDWI (XVCVSXDSP $S1), (XVCVSXDSP $S1), 3),
+                              (XXSLDWI (XVCVSXDSP $S2), (XVCVSXDSP $S2), 3)))>;
+  }
+
   // Big endian, available on all targets with VSX
   let Predicates = [IsBigEndian, HasVSX] in {
     def : Pat<(v2f64 (build_vector f64:$A, f64:$B)),
@@ -3837,6 +3864,15 @@
               (v4i32 (VMRGEW MrgWords.CVA0B0U, MrgWords.CVA1B1U))>;
   }
 
+  let Predicates = [IsLittleEndian, HasP8Vector] in {
+    def : Pat<DWToSPExtractConv.BVU,
+              (v4f32 (VPKUDUM (XXSLDWI (XVCVUXDSP $S2), (XVCVUXDSP $S2), 3),
+                              (XXSLDWI (XVCVUXDSP $S1), (XVCVUXDSP $S1), 3)))>;
+    def : Pat<DWToSPExtractConv.BVS,
+              (v4f32 (VPKUDUM (XXSLDWI (XVCVSXDSP $S2), (XVCVSXDSP $S2), 3),
+                              (XXSLDWI (XVCVSXDSP $S1), (XVCVSXDSP $S1), 3)))>;
+  }
+
   let Predicates = [IsLittleEndian, HasVSX] in {
   // Little endian, available on all targets with VSX
     def : Pat<(v2f64 (build_vector f64:$A, f64:$B)),
@@ -4017,3 +4053,21 @@
   }
 }
 
+// Put this P9Altivec related definition here since it's possible to be 
+// selected to VSX instruction xvnegsp, avoid possible undef.
+let Predicates = [HasP9Altivec] in {
+
+  def : Pat<(v4i32 (PPCvabsd v4i32:$A, v4i32:$B, (i32 0))),
+            (v4i32 (VABSDUW $A, $B))>;
+
+  def : Pat<(v8i16 (PPCvabsd v8i16:$A, v8i16:$B, (i32 0))),
+            (v8i16 (VABSDUH $A, $B))>;
+
+  def : Pat<(v16i8 (PPCvabsd v16i8:$A, v16i8:$B, (i32 0))),
+            (v16i8 (VABSDUB $A, $B))>;
+
+  // As PPCVABSD description, the last operand indicates whether do the
+  // sign bit flip.
+  def : Pat<(v4i32 (PPCvabsd v4i32:$A, v4i32:$B, (i32 1))),
+            (v4i32 (VABSDUW (XVNEGSP $A), (XVNEGSP $B)))>;
+}
diff --git a/lib/Target/PowerPC/PPCPreEmitPeephole.cpp b/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
index 3078a66..4458b92 100644
--- a/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
+++ b/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
@@ -140,7 +140,7 @@
           // This conditional branch is always taken. So, remove all branches
           // and insert an unconditional branch to the destination of this.
           MachineBasicBlock::iterator It = Br, Er = MBB.end();
-          for (; It != Er && !SeenUse; It++) {
+          for (; It != Er; It++) {
             if (It->isDebugInstr()) continue;
             assert(It->isTerminator() && "Non-terminator after a terminator");
             InstrsToErase.push_back(&*It);
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 96923a9..3d067aa 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -673,12 +673,15 @@
   unsigned Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
   unsigned SrcReg = MI.getOperand(0).getReg();
 
-  BuildMI(MBB, II, dl, TII.get(TargetOpcode::KILL),
-          getCRFromCRBit(SrcReg))
-          .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill()));
-
+  // We need to move the CR field that contains the CR bit we are spilling.
+  // The super register may not be explicitly defined (i.e. it can be defined
+  // by a CR-logical that only defines the subreg) so we state that the CR
+  // field is undef. Also, in order to preserve the kill flag on the CR bit,
+  // we add it as an implicit use.
   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg)
-      .addReg(getCRFromCRBit(SrcReg));
+      .addReg(getCRFromCRBit(SrcReg), RegState::Undef)
+      .addReg(SrcReg,
+              RegState::Implicit | getKillRegState(MI.getOperand(0).isKill()));
 
   // If the saved register wasn't CR0LT, shift the bits left so that the bit to
   // store is the first one. Mask all but that bit.
diff --git a/lib/Target/PowerPC/PPCScheduleP9.td b/lib/Target/PowerPC/PPCScheduleP9.td
index e1a4801..a1e625c 100644
--- a/lib/Target/PowerPC/PPCScheduleP9.td
+++ b/lib/Target/PowerPC/PPCScheduleP9.td
@@ -33,6 +33,12 @@
   // A dispatch group is 6 instructions.
   let LoopMicroOpBufferSize = 60;
 
+  // As iops are dispatched to a slice, they are held in an independent slice
+  // issue queue until all register sources and other dependencies have been
+  // resolved and they can be issued. Each of four execution slices has an
+  // 11-entry iop issue queue.
+  let MicroOpBufferSize = 44;
+
   let CompleteModel = 1;
 
   // Do not support QPX (Quad Processing eXtension) or SPE (Signal Procesing
diff --git a/lib/Target/PowerPC/README.txt b/lib/Target/PowerPC/README.txt
index 7f5beca..b4bf635 100644
--- a/lib/Target/PowerPC/README.txt
+++ b/lib/Target/PowerPC/README.txt
@@ -663,4 +663,3 @@
 More general handling of any_extend and zero_extend:
 
 See https://reviews.llvm.org/D24924#555306
-
diff --git a/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 4e70ea4..1d1112c 100644
--- a/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -310,12 +310,14 @@
     return RISCVFPRndMode::stringToRoundingMode(Str) != RISCVFPRndMode::Invalid;
   }
 
-  bool isImmXLen() const {
+  bool isImmXLenLI() const {
     int64_t Imm;
     RISCVMCExpr::VariantKind VK;
     if (!isImm())
       return false;
     bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+    if (VK == RISCVMCExpr::VK_RISCV_LO || VK == RISCVMCExpr::VK_RISCV_PCREL_LO)
+      return true;
     // Given only Imm, ensuring that the actually specified constant is either
     // a signed or unsigned 64-bit number is unfortunately impossible.
     bool IsInRange = isRV64() ? true : isInt<32>(Imm) || isUInt<32>(Imm);
@@ -782,7 +784,7 @@
   switch(Result) {
   default:
     break;
-  case Match_InvalidImmXLen:
+  case Match_InvalidImmXLenLI:
     if (isRV64()) {
       SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
       return Error(ErrorLoc, "operand must be a constant 64-bit integer");
@@ -1031,17 +1033,10 @@
   case AsmToken::Plus:
   case AsmToken::Integer:
   case AsmToken::String:
+  case AsmToken::Identifier:
     if (getParser().parseExpression(Res))
       return MatchOperand_ParseFail;
     break;
-  case AsmToken::Identifier: {
-    StringRef Identifier;
-    if (getParser().parseIdentifier(Identifier))
-      return MatchOperand_ParseFail;
-    MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
-    Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
-    break;
-  }
   case AsmToken::Percent:
     return parseOperandWithModifier(Operands);
   }
@@ -1449,7 +1444,17 @@
   Inst.setLoc(IDLoc);
 
   if (Inst.getOpcode() == RISCV::PseudoLI) {
-    auto Reg = Inst.getOperand(0).getReg();
+    unsigned Reg = Inst.getOperand(0).getReg();
+    const MCOperand &Op1 = Inst.getOperand(1);
+    if (Op1.isExpr()) {
+      // We must have li reg, %lo(sym) or li reg, %pcrel_lo(sym) or similar.
+      // Just convert to an addi. This allows compatibility with gas.
+      emitToStreamer(Out, MCInstBuilder(RISCV::ADDI)
+                              .addReg(Reg)
+                              .addReg(RISCV::X0)
+                              .addExpr(Op1.getExpr()));
+      return false;
+    }
     int64_t Imm = Inst.getOperand(1).getImm();
     // On RV32 the immediate here can either be a signed or an unsigned
     // 32-bit number. Sign extension has to be performed to ensure that Imm
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
index 49239ac..7672fea 100644
--- a/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
+++ b/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
@@ -8,6 +8,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "RISCVAsmBackend.h"
+#include "RISCVMCExpr.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/MC/MCAssembler.h"
 #include "llvm/MC/MCContext.h"
@@ -21,6 +22,44 @@
 
 using namespace llvm;
 
+// If linker relaxation is enabled, or the relax option had previously been
+// enabled, always emit relocations even if the fixup can be resolved. This is
+// necessary for correctness as offsets may change during relaxation.
+bool RISCVAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
+                                            const MCFixup &Fixup,
+                                            const MCValue &Target) {
+  bool ShouldForce = false;
+
+  switch ((unsigned)Fixup.getKind()) {
+  default:
+    break;
+  case RISCV::fixup_riscv_pcrel_lo12_i:
+  case RISCV::fixup_riscv_pcrel_lo12_s:
+    // For pcrel_lo12, force a relocation if the target of the corresponding
+    // pcrel_hi20 is not in the same fragment.
+    const MCFixup *T = cast<RISCVMCExpr>(Fixup.getValue())->getPCRelHiFixup();
+    if (!T) {
+      Asm.getContext().reportError(Fixup.getLoc(),
+                                   "could not find corresponding %pcrel_hi");
+      return false;
+    }
+
+    switch ((unsigned)T->getKind()) {
+    default:
+      llvm_unreachable("Unexpected fixup kind for pcrel_lo12");
+      break;
+    case RISCV::fixup_riscv_pcrel_hi20:
+      ShouldForce = T->getValue()->findAssociatedFragment() !=
+                    Fixup.getValue()->findAssociatedFragment();
+      break;
+    }
+    break;
+  }
+
+  return ShouldForce || STI.getFeatureBits()[RISCV::FeatureRelax] ||
+         ForceRelocs;
+}
+
 bool RISCVAsmBackend::fixupNeedsRelaxationAdvanced(const MCFixup &Fixup,
                                                    bool Resolved,
                                                    uint64_t Value,
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h b/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h
index 5601f07..b98e45f 100644
--- a/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h
+++ b/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h
@@ -49,13 +49,8 @@
   std::unique_ptr<MCObjectTargetWriter>
   createObjectTargetWriter() const override;
 
-  // If linker relaxation is enabled, or the relax option had previously been
-  // enabled, always emit relocations even if the fixup can be resolved. This is
-  // necessary for correctness as offsets may change during relaxation.
   bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
-                             const MCValue &Target) override {
-    return STI.getFeatureBits()[RISCV::FeatureRelax] || ForceRelocs;
-  }
+                             const MCValue &Target) override;
 
   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
                             const MCRelaxableFragment *DF,
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp b/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
index ad8357f..53648a5 100644
--- a/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
+++ b/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
@@ -14,6 +14,7 @@
 
 #include "RISCV.h"
 #include "RISCVMCExpr.h"
+#include "RISCVFixupKinds.h"
 #include "llvm/MC/MCAssembler.h"
 #include "llvm/MC/MCContext.h"
 #include "llvm/MC/MCStreamer.h"
@@ -40,9 +41,90 @@
     OS << ')';
 }
 
+const MCFixup *RISCVMCExpr::getPCRelHiFixup() const {
+  MCValue AUIPCLoc;
+  if (!getSubExpr()->evaluateAsRelocatable(AUIPCLoc, nullptr, nullptr))
+    return nullptr;
+
+  const MCSymbolRefExpr *AUIPCSRE = AUIPCLoc.getSymA();
+  if (!AUIPCSRE)
+    return nullptr;
+
+  const auto *DF =
+      dyn_cast_or_null<MCDataFragment>(AUIPCSRE->findAssociatedFragment());
+  if (!DF)
+    return nullptr;
+
+  const MCSymbol *AUIPCSymbol = &AUIPCSRE->getSymbol();
+  for (const MCFixup &F : DF->getFixups()) {
+    if (F.getOffset() != AUIPCSymbol->getOffset())
+      continue;
+
+    switch ((unsigned)F.getKind()) {
+    default:
+      continue;
+    case RISCV::fixup_riscv_pcrel_hi20:
+      return &F;
+    }
+  }
+
+  return nullptr;
+}
+
+bool RISCVMCExpr::evaluatePCRelLo(MCValue &Res, const MCAsmLayout *Layout,
+                                  const MCFixup *Fixup) const {
+  // VK_RISCV_PCREL_LO has to be handled specially.  The MCExpr inside is
+  // actually the location of a auipc instruction with a VK_RISCV_PCREL_HI fixup
+  // pointing to the real target.  We need to generate an MCValue in the form of
+  // (<real target> + <offset from this fixup to the auipc fixup>).  The Fixup
+  // is pcrel relative to the VK_RISCV_PCREL_LO fixup, so we need to add the
+  // offset to the VK_RISCV_PCREL_HI Fixup from VK_RISCV_PCREL_LO to correct.
+  MCValue AUIPCLoc;
+  if (!getSubExpr()->evaluateAsValue(AUIPCLoc, *Layout))
+    return false;
+
+  const MCSymbolRefExpr *AUIPCSRE = AUIPCLoc.getSymA();
+  // Don't try to evaluate %pcrel_hi/%pcrel_lo pairs that cross fragment
+  // boundries.
+  if (!AUIPCSRE ||
+      findAssociatedFragment() != AUIPCSRE->findAssociatedFragment())
+    return false;
+
+  const MCSymbol *AUIPCSymbol = &AUIPCSRE->getSymbol();
+  if (!AUIPCSymbol)
+    return false;
+
+  const MCFixup *TargetFixup = getPCRelHiFixup();
+  if (!TargetFixup)
+    return false;
+
+  if ((unsigned)TargetFixup->getKind() != RISCV::fixup_riscv_pcrel_hi20)
+    return false;
+
+  MCValue Target;
+  if (!TargetFixup->getValue()->evaluateAsValue(Target, *Layout))
+    return false;
+
+  if (!Target.getSymA() || !Target.getSymA()->getSymbol().isInSection())
+    return false;
+
+  if (&Target.getSymA()->getSymbol().getSection() !=
+      findAssociatedFragment()->getParent())
+    return false;
+
+  uint64_t AUIPCOffset = AUIPCSymbol->getOffset();
+
+  Res = MCValue::get(Target.getSymA(), nullptr,
+                     Target.getConstant() + (Fixup->getOffset() - AUIPCOffset));
+  return true;
+}
+
 bool RISCVMCExpr::evaluateAsRelocatableImpl(MCValue &Res,
                                             const MCAsmLayout *Layout,
                                             const MCFixup *Fixup) const {
+  if (Kind == VK_RISCV_PCREL_LO && evaluatePCRelLo(Res, Layout, Fixup))
+    return true;
+
   if (!getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup))
     return false;
 
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h b/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h
index d2e0f6b..4eafcc0 100644
--- a/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h
+++ b/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h
@@ -39,6 +39,9 @@
 
   int64_t evaluateAsInt64(int64_t Value) const;
 
+  bool evaluatePCRelLo(MCValue &Res, const MCAsmLayout *Layout,
+                       const MCFixup *Fixup) const;
+
   explicit RISCVMCExpr(const MCExpr *Expr, VariantKind Kind)
       : Expr(Expr), Kind(Kind) {}
 
@@ -50,6 +53,13 @@
 
   const MCExpr *getSubExpr() const { return Expr; }
 
+  /// Get the MCExpr of the VK_RISCV_PCREL_HI Fixup that the
+  /// VK_RISCV_PCREL_LO points to.
+  ///
+  /// \returns nullptr if this isn't a VK_RISCV_PCREL_LO pointing to a
+  /// VK_RISCV_PCREL_HI.
+  const MCFixup *getPCRelHiFixup() const;
+
   void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override;
   bool evaluateAsRelocatableImpl(MCValue &Res, const MCAsmLayout *Layout,
                                  const MCFixup *Fixup) const override;
diff --git a/lib/Target/RISCV/RISCVISelLowering.cpp b/lib/Target/RISCV/RISCVISelLowering.cpp
index e78085e..508dcbd 100644
--- a/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -80,6 +80,13 @@
   for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
     setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
 
+  if (Subtarget.is64Bit()) {
+    setTargetDAGCombine(ISD::SHL);
+    setTargetDAGCombine(ISD::SRL);
+    setTargetDAGCombine(ISD::SRA);
+    setTargetDAGCombine(ISD::ANY_EXTEND);
+  }
+
   if (!Subtarget.hasStdExtM()) {
     setOperationAction(ISD::MUL, XLenVT, Expand);
     setOperationAction(ISD::MULHS, XLenVT, Expand);
@@ -506,11 +513,71 @@
   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
 }
 
+// Return true if the given node is a shift with a non-constant shift amount.
+static bool isVariableShift(SDValue Val) {
+  switch (Val.getOpcode()) {
+  default:
+    return false;
+  case ISD::SHL:
+  case ISD::SRA:
+  case ISD::SRL:
+    return Val.getOperand(1).getOpcode() != ISD::Constant;
+  }
+}
+
+// Returns true if the given node is an sdiv, udiv, or urem with non-constant
+// operands.
+static bool isVariableSDivUDivURem(SDValue Val) {
+  switch (Val.getOpcode()) {
+  default:
+    return false;
+  case ISD::SDIV:
+  case ISD::UDIV:
+  case ISD::UREM:
+    return Val.getOperand(0).getOpcode() != ISD::Constant &&
+           Val.getOperand(1).getOpcode() != ISD::Constant;
+  }
+}
+
 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
                                                DAGCombinerInfo &DCI) const {
+  SelectionDAG &DAG = DCI.DAG;
+
   switch (N->getOpcode()) {
   default:
     break;
+  case ISD::SHL:
+  case ISD::SRL:
+  case ISD::SRA: {
+    assert(Subtarget.getXLen() == 64 && "Combine should be 64-bit only");
+    if (!DCI.isBeforeLegalize())
+      break;
+    SDValue RHS = N->getOperand(1);
+    if (N->getValueType(0) != MVT::i32 || RHS->getOpcode() == ISD::Constant ||
+        (RHS->getOpcode() == ISD::AssertZext &&
+         cast<VTSDNode>(RHS->getOperand(1))->getVT().getSizeInBits() <= 5))
+      break;
+    SDValue LHS = N->getOperand(0);
+    SDLoc DL(N);
+    SDValue NewRHS =
+        DAG.getNode(ISD::AssertZext, DL, RHS.getValueType(), RHS,
+                    DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), 5)));
+    return DCI.CombineTo(
+        N, DAG.getNode(N->getOpcode(), DL, LHS.getValueType(), LHS, NewRHS));
+  }
+  case ISD::ANY_EXTEND: {
+    // If any-extending an i32 variable-length shift or sdiv/udiv/urem to i64,
+    // then instead sign-extend in order to increase the chance of being able
+    // to select the sllw/srlw/sraw/divw/divuw/remuw instructions.
+    SDValue Src = N->getOperand(0);
+    if (N->getValueType(0) != MVT::i64 || Src.getValueType() != MVT::i32)
+      break;
+    if (!isVariableShift(Src) &&
+        !(Subtarget.hasStdExtM() && isVariableSDivUDivURem(Src)))
+      break;
+    SDLoc DL(N);
+    return DCI.CombineTo(N, DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src));
+  }
   case RISCVISD::SplitF64: {
     // If the input to SplitF64 is just BuildPairF64 then the operation is
     // redundant. Instead, use BuildPairF64's operands directly.
diff --git a/lib/Target/RISCV/RISCVInstrInfo.td b/lib/Target/RISCV/RISCVInstrInfo.td
index 60c0f0b..d7cc13d 100644
--- a/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/lib/Target/RISCV/RISCVInstrInfo.td
@@ -198,8 +198,10 @@
 }
 
 // A parameterized register class alternative to i32imm/i64imm from Target.td.
-def ixlenimm : Operand<XLenVT> {
-  let ParserMatchClass = ImmXLenAsmOperand<"">;
+def ixlenimm : Operand<XLenVT>;
+
+def ixlenimm_li : Operand<XLenVT> {
+  let ParserMatchClass = ImmXLenAsmOperand<"", "LI">;
 }
 
 // Standalone (codegen-only) immleaf patterns.
@@ -497,7 +499,7 @@
 // expanded to real instructions immediately.
 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 32,
     isCodeGenOnly = 0, isAsmParserOnly = 1 in
-def PseudoLI : Pseudo<(outs GPR:$rd), (ins ixlenimm:$imm), [],
+def PseudoLI : Pseudo<(outs GPR:$rd), (ins ixlenimm_li:$imm), [],
                       "li", "$rd, $imm">;
 
 def : InstAlias<"mv $rd, $rs",   (ADDI GPR:$rd, GPR:$rs,       0)>;
@@ -652,6 +654,30 @@
 def IsOrAdd: PatFrag<(ops node:$A, node:$B), (or node:$A, node:$B), [{
   return isOrEquivalentToAdd(N);
 }]>;
+def assertsexti32 : PatFrag<(ops node:$src), (assertsext node:$src), [{
+  return cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32;
+}]>;
+def sexti32 : PatFrags<(ops node:$src),
+                       [(sext_inreg node:$src, i32),
+                        (assertsexti32 node:$src)]>;
+def assertzexti32 : PatFrag<(ops node:$src), (assertzext node:$src), [{
+  return cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32;
+}]>;
+def assertzexti5 : PatFrag<(ops node:$src), (assertzext node:$src), [{
+  return cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits() <= 5;
+}]>;
+def zexti32 : PatFrags<(ops node:$src),
+                       [(and node:$src, 0xffffffff),
+                        (assertzexti32 node:$src)]>;
+// Defines a legal mask for (assertzexti5 (and src, mask)) to be combinable
+// with a shiftw operation. The mask mustn't modify the lower 5 bits or the
+// upper 32 bits.
+def shiftwamt_mask : ImmLeaf<XLenVT, [{
+  return countTrailingOnes<uint64_t>(Imm) >= 5 && isUInt<32>(Imm);
+}]>;
+def shiftwamt : PatFrags<(ops node:$src),
+                         [(assertzexti5 (and node:$src, shiftwamt_mask)),
+                          (assertzexti5 node:$src)]>;
 
 /// Immediates
 
@@ -909,7 +935,28 @@
 def : Pat<(sra (sext_inreg GPR:$rs1, i32), uimm5:$shamt),
           (SRAIW GPR:$rs1, uimm5:$shamt)>;
 
-// TODO: patterns for SLLW/SRLW/SRAW.
+// For variable-length shifts, we rely on assertzexti5 being inserted during
+// lowering (see RISCVTargetLowering::PerformDAGCombine). This enables us to
+// guarantee that selecting a 32-bit variable shift is legal (as the variable
+// shift is known to be <= 32). We must also be careful not to create
+// semantically incorrect patterns. For instance, selecting SRLW for
+// (srl (zexti32 GPR:$rs1), (shiftwamt GPR:$rs2)),
+// is not guaranteed to be safe, as we don't know whether the upper 32-bits of
+// the result are used or not (in the case where rs2=0, this is a
+// sign-extension operation).
+
+def : Pat<(sext_inreg (shl GPR:$rs1, (shiftwamt GPR:$rs2)), i32),
+          (SLLW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(zexti32 (shl GPR:$rs1, (shiftwamt GPR:$rs2))),
+          (SRLI (SLLI (SLLW GPR:$rs1, GPR:$rs2), 32), 32)>;
+
+def : Pat<(sext_inreg (srl (zexti32 GPR:$rs1), (shiftwamt GPR:$rs2)), i32),
+          (SRLW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(zexti32 (srl (zexti32 GPR:$rs1), (shiftwamt GPR:$rs2))),
+          (SRLI (SLLI (SRLW GPR:$rs1, GPR:$rs2), 32), 32)>;
+
+def : Pat<(sra (sexti32 GPR:$rs1), (shiftwamt GPR:$rs2)),
+          (SRAW GPR:$rs1, GPR:$rs2)>;
 
 /// Loads
 
diff --git a/lib/Target/RISCV/RISCVInstrInfoM.td b/lib/Target/RISCV/RISCVInstrInfoM.td
index 2dd10ad..05dd331 100644
--- a/lib/Target/RISCV/RISCVInstrInfoM.td
+++ b/lib/Target/RISCV/RISCVInstrInfoM.td
@@ -49,3 +49,34 @@
 def : PatGprGpr<srem, REM>;
 def : PatGprGpr<urem, REMU>;
 } // Predicates = [HasStdExtM]
+
+let Predicates = [HasStdExtM, IsRV64] in {
+def : Pat<(sext_inreg (mul GPR:$rs1, GPR:$rs2), i32),
+          (MULW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (sdiv (sexti32 GPR:$rs1),
+                            (sexti32 GPR:$rs2)), i32),
+          (DIVW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(zexti32 (sdiv (sexti32 GPR:$rs1),
+                         (sexti32 GPR:$rs2))),
+          (SRLI (SLLI (DIVW GPR:$rs1, GPR:$rs2), 32), 32)>;
+def : Pat<(sext_inreg (udiv (zexti32 GPR:$rs1), (zexti32 GPR:$rs2)), i32),
+          (DIVUW GPR:$rs1, GPR:$rs2)>;
+// It's cheaper to perform a divuw and zero-extend the result than to
+// zero-extend both inputs to a udiv.
+def : Pat<(udiv (and GPR:$rs1, 0xffffffff), (and GPR:$rs2, 0xffffffff)),
+          (SRLI (SLLI (DIVUW GPR:$rs1, GPR:$rs2), 32), 32)>;
+// Although the sexti32 operands may not have originated from an i32 srem,
+// this pattern is safe as it is impossible for two sign extended inputs to
+// produce a result where res[63:32]=0 and res[31]=1.
+def : Pat<(srem (sexti32 GPR:$rs1), (sexti32 GPR:$rs2)),
+          (REMW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (srem (sexti32 GPR:$rs1),
+                            (sexti32 GPR:$rs2)), i32),
+          (REMW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (urem (zexti32 GPR:$rs1), (zexti32 GPR:$rs2)), i32),
+          (REMUW GPR:$rs1, GPR:$rs2)>;
+// It's cheaper to perform a remuw and zero-extend the result than to
+// zero-extend both inputs to a urem.
+def : Pat<(urem (and GPR:$rs1, 0xffffffff), (and GPR:$rs2, 0xffffffff)),
+          (SRLI (SLLI (REMUW GPR:$rs1, GPR:$rs2), 32), 32)>;
+} // Predicates = [HasStdExtM, IsRV64]
diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp
index 7d908bb..ae22576 100644
--- a/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1866,8 +1866,8 @@
   case SPISD::SELECT_ICC:
   case SPISD::SELECT_XCC:
   case SPISD::SELECT_FCC:
-    DAG.computeKnownBits(Op.getOperand(1), Known, Depth+1);
-    DAG.computeKnownBits(Op.getOperand(0), Known2, Depth+1);
+    Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
+    Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
 
     // Only known if known in both the LHS and RHS.
     Known.One &= Known2.One;
diff --git a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index 0d2c238..5bc2ab0 100644
--- a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -728,8 +728,7 @@
   // The inner check covers all cases but is more expensive.
   uint64_t Used = allOnes(Op.getValueSizeInBits());
   if (Used != (AndMask | InsertMask)) {
-    KnownBits Known;
-    CurDAG->computeKnownBits(Op.getOperand(0), Known);
+    KnownBits Known = CurDAG->computeKnownBits(Op.getOperand(0));
     if (Used != (AndMask | InsertMask | Known.Zero.getZExtValue()))
       return false;
   }
@@ -787,8 +786,7 @@
       // If some bits of Input are already known zeros, those bits will have
       // been removed from the mask.  See if adding them back in makes the
       // mask suitable.
-      KnownBits Known;
-      CurDAG->computeKnownBits(Input, Known);
+      KnownBits Known = CurDAG->computeKnownBits(Input);
       Mask |= Known.Zero.getZExtValue();
       if (!refineRxSBGMask(RxSBG, Mask))
         return false;
@@ -811,8 +809,7 @@
       // If some bits of Input are already known ones, those bits will have
       // been removed from the mask.  See if adding them back in makes the
       // mask suitable.
-      KnownBits Known;
-      CurDAG->computeKnownBits(Input, Known);
+      KnownBits Known = CurDAG->computeKnownBits(Input);
       Mask &= ~Known.One.getZExtValue();
       if (!refineRxSBGMask(RxSBG, Mask))
         return false;
@@ -1147,7 +1144,7 @@
     return false;
 
   auto *Load = dyn_cast<LoadSDNode>(N->getOperand(1));
-  if (!Load || !Load->hasOneUse())
+  if (!Load || !Load->hasNUsesOfValue(1, 0))
     return false;
   if (Load->getMemoryVT().getSizeInBits() !=
       Load->getValueType(0).getSizeInBits())
diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp
index d7951ca..2a825c1 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -527,6 +527,7 @@
   setTargetDAGCombine(ISD::STORE);
   setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
   setTargetDAGCombine(ISD::FP_ROUND);
+  setTargetDAGCombine(ISD::FP_EXTEND);
   setTargetDAGCombine(ISD::BSWAP);
   setTargetDAGCombine(ISD::SDIV);
   setTargetDAGCombine(ISD::UDIV);
@@ -2218,8 +2219,7 @@
   auto *Mask = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
   if (!Mask)
     return;
-  KnownBits Known;
-  DAG.computeKnownBits(C.Op0.getOperand(0), Known);
+  KnownBits Known = DAG.computeKnownBits(C.Op0.getOperand(0));
   if ((~Known.Zero).getZExtValue() & ~Mask->getZExtValue())
     return;
 
@@ -3165,10 +3165,9 @@
   assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation");
 
   // Get the known-zero masks for each operand.
-  SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
-  KnownBits Known[2];
-  DAG.computeKnownBits(Ops[0], Known[0]);
-  DAG.computeKnownBits(Ops[1], Known[1]);
+  SDValue Ops[] = {Op.getOperand(0), Op.getOperand(1)};
+  KnownBits Known[2] = {DAG.computeKnownBits(Ops[0]),
+                        DAG.computeKnownBits(Ops[1])};
 
   // See if the upper 32 bits of one operand and the lower 32 bits of the
   // other are known zero.  They are the low and high operands respectively.
@@ -3351,8 +3350,7 @@
   }
 
   // Get the known-zero mask for the operand.
-  KnownBits Known;
-  DAG.computeKnownBits(Op, Known);
+  KnownBits Known = DAG.computeKnownBits(Op);
   unsigned NumSignificantBits = (~Known.Zero).getActiveBits();
   if (NumSignificantBits == 0)
     return DAG.getConstant(0, DL, VT);
@@ -5485,7 +5483,7 @@
   // (fpround (extract_vector_elt X 0))
   // (fpround (extract_vector_elt X 1)) ->
   // (extract_vector_elt (VROUND X) 0)
-  // (extract_vector_elt (VROUND X) 1)
+  // (extract_vector_elt (VROUND X) 2)
   //
   // This is a special case since the target doesn't really support v2f32s.
   SelectionDAG &DAG = DCI.DAG;
@@ -5527,6 +5525,53 @@
   return SDValue();
 }
 
+SDValue SystemZTargetLowering::combineFP_EXTEND(
+    SDNode *N, DAGCombinerInfo &DCI) const {
+  // (fpextend (extract_vector_elt X 0))
+  // (fpextend (extract_vector_elt X 2)) ->
+  // (extract_vector_elt (VEXTEND X) 0)
+  // (extract_vector_elt (VEXTEND X) 1)
+  //
+  // This is a special case since the target doesn't really support v2f32s.
+  SelectionDAG &DAG = DCI.DAG;
+  SDValue Op0 = N->getOperand(0);
+  if (N->getValueType(0) == MVT::f64 &&
+      Op0.hasOneUse() &&
+      Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+      Op0.getOperand(0).getValueType() == MVT::v4f32 &&
+      Op0.getOperand(1).getOpcode() == ISD::Constant &&
+      cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
+    SDValue Vec = Op0.getOperand(0);
+    for (auto *U : Vec->uses()) {
+      if (U != Op0.getNode() &&
+          U->hasOneUse() &&
+          U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+          U->getOperand(0) == Vec &&
+          U->getOperand(1).getOpcode() == ISD::Constant &&
+          cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 2) {
+        SDValue OtherExtend = SDValue(*U->use_begin(), 0);
+        if (OtherExtend.getOpcode() == ISD::FP_EXTEND &&
+            OtherExtend.getOperand(0) == SDValue(U, 0) &&
+            OtherExtend.getValueType() == MVT::f64) {
+          SDValue VExtend = DAG.getNode(SystemZISD::VEXTEND, SDLoc(N),
+                                        MVT::v2f64, Vec);
+          DCI.AddToWorklist(VExtend.getNode());
+          SDValue Extract1 =
+            DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f64,
+                        VExtend, DAG.getConstant(1, SDLoc(U), MVT::i32));
+          DCI.AddToWorklist(Extract1.getNode());
+          DAG.ReplaceAllUsesOfValueWith(OtherExtend, Extract1);
+          SDValue Extract0 =
+            DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f64,
+                        VExtend, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
+          return Extract0;
+        }
+      }
+    }
+  }
+  return SDValue();
+}
+
 SDValue SystemZTargetLowering::combineBSWAP(
     SDNode *N, DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -5745,6 +5790,7 @@
   case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI);
   case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI);
   case ISD::FP_ROUND:           return combineFP_ROUND(N, DCI);
+  case ISD::FP_EXTEND:          return combineFP_EXTEND(N, DCI);
   case ISD::BSWAP:              return combineBSWAP(N, DCI);
   case SystemZISD::BR_CCMASK:   return combineBR_CCMASK(N, DCI);
   case SystemZISD::SELECT_CCMASK: return combineSELECT_CCMASK(N, DCI);
@@ -5863,10 +5909,10 @@
                                   unsigned OpNo) {
   APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo);
   APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1);
-  unsigned SrcBitWidth = Op.getOperand(OpNo).getScalarValueSizeInBits();
-  KnownBits LHSKnown(SrcBitWidth), RHSKnown(SrcBitWidth);
-  DAG.computeKnownBits(Op.getOperand(OpNo), LHSKnown, Src0DemE, Depth + 1);
-  DAG.computeKnownBits(Op.getOperand(OpNo + 1), RHSKnown, Src1DemE, Depth + 1);
+  KnownBits LHSKnown =
+      DAG.computeKnownBits(Op.getOperand(OpNo), Src0DemE, Depth + 1);
+  KnownBits RHSKnown =
+      DAG.computeKnownBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1);
   Known.Zero = LHSKnown.Zero & RHSKnown.Zero;
   Known.One = LHSKnown.One & RHSKnown.One;
 }
@@ -5932,9 +5978,8 @@
     case Intrinsic::s390_vuplf: {
       SDValue SrcOp = Op.getOperand(1);
       unsigned SrcBitWidth = SrcOp.getScalarValueSizeInBits();
-      Known = KnownBits(SrcBitWidth);
       APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 0);
-      DAG.computeKnownBits(SrcOp, Known, SrcDemE, Depth + 1);
+      Known = DAG.computeKnownBits(SrcOp, SrcDemE, Depth + 1);
       if (IsLogical) {
         Known = Known.zext(BitWidth);
         Known.Zero.setBitsFrom(SrcBitWidth);
@@ -5953,7 +5998,7 @@
       break;
     case SystemZISD::REPLICATE: {
       SDValue SrcOp = Op.getOperand(0);
-      DAG.computeKnownBits(SrcOp, Known, Depth + 1);
+      Known = DAG.computeKnownBits(SrcOp, Depth + 1);
       if (Known.getBitWidth() < BitWidth && isa<ConstantSDNode>(SrcOp))
         Known = Known.sext(BitWidth); // VREPI sign extends the immedate.
       break;
diff --git a/lib/Target/SystemZ/SystemZISelLowering.h b/lib/Target/SystemZ/SystemZISelLowering.h
index 172dbee..622da32 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/lib/Target/SystemZ/SystemZISelLowering.h
@@ -592,6 +592,7 @@
   SDValue combineEXTRACT_VECTOR_ELT(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineJOIN_DWORDS(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineFP_ROUND(SDNode *N, DAGCombinerInfo &DCI) const;
+  SDValue combineFP_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineBSWAP(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineBR_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineSELECT_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
diff --git a/lib/Target/SystemZ/SystemZInstrFormats.td b/lib/Target/SystemZ/SystemZInstrFormats.td
index e3f9a96..1e904a8 100644
--- a/lib/Target/SystemZ/SystemZInstrFormats.td
+++ b/lib/Target/SystemZ/SystemZInstrFormats.td
@@ -2900,7 +2900,7 @@
 }
 
 multiclass UnaryExtraVRRaSPairGeneric<string mnemonic, bits<16> opcode> {
-  let M4 = 0 in
+  let M4 = 0, Defs = [CC] in
     def "" : InstVRRa<opcode, (outs VR128:$V1),
                      (ins VR128:$V2, imm32zx4:$M3, imm32zx4:$M5),
                      mnemonic#"\t$V1, $V2, $M3, $M5", []>;
@@ -3472,7 +3472,9 @@
 class BinaryVRRbSPairGeneric<string mnemonic, bits<16> opcode>
   : InstVRRb<opcode, (outs VR128:$V1),
              (ins VR128:$V2, VR128:$V3, imm32zx4:$M4, imm32zx4:$M5),
-             mnemonic#"\t$V1, $V2, $V3, $M4, $M5", []>;
+             mnemonic#"\t$V1, $V2, $V3, $M4, $M5", []> {
+  let Defs = [CC];
+}
 
 // Declare a pair of instructions, one which sets CC and one which doesn't.
 // The CC-setting form ends with "S" and sets the low bit of M5.
@@ -3496,9 +3498,10 @@
 }
 
 multiclass BinaryExtraVRRbSPairGeneric<string mnemonic, bits<16> opcode> {
-  def "" : InstVRRb<opcode, (outs VR128:$V1),
-                   (ins VR128:$V2, VR128:$V3, imm32zx4:$M4, imm32zx4:$M5),
-                   mnemonic#"\t$V1, $V2, $V3, $M4, $M5", []>;
+  let Defs = [CC] in
+    def "" : InstVRRb<opcode, (outs VR128:$V1),
+                     (ins VR128:$V2, VR128:$V3, imm32zx4:$M4, imm32zx4:$M5),
+                     mnemonic#"\t$V1, $V2, $V3, $M4, $M5", []>;
   def : InstAlias<mnemonic#"\t$V1, $V2, $V3, $M4",
                   (!cast<Instruction>(NAME) VR128:$V1, VR128:$V2, VR128:$V3,
                                             imm32zx4:$M4, 0)>;
@@ -4185,9 +4188,10 @@
 }
 
 multiclass TernaryOptVRRbSPairGeneric<string mnemonic, bits<16> opcode> {
-  def "" : InstVRRb<opcode, (outs VR128:$V1),
-                   (ins VR128:$V2, VR128:$V3, imm32zx4:$M4, imm32zx4:$M5),
-                   mnemonic#"\t$V1, $V2, $V3, $M4, $M5", []>;
+  let Defs = [CC] in
+    def "" : InstVRRb<opcode, (outs VR128:$V1),
+                     (ins VR128:$V2, VR128:$V3, imm32zx4:$M4, imm32zx4:$M5),
+                     mnemonic#"\t$V1, $V2, $V3, $M4, $M5", []>;
   def : InstAlias<mnemonic#"\t$V1, $V2, $V3, $M4",
                   (!cast<Instruction>(NAME) VR128:$V1, VR128:$V2, VR128:$V3,
                                             imm32zx4:$M4, 0)>;
@@ -4385,7 +4389,8 @@
 }
 
 multiclass QuaternaryOptVRRdSPairGeneric<string mnemonic, bits<16> opcode> {
-  def "" : QuaternaryVRRdGeneric<mnemonic, opcode>;
+  let Defs = [CC] in
+    def "" : QuaternaryVRRdGeneric<mnemonic, opcode>;
   def : InstAlias<mnemonic#"\t$V1, $V2, $V3, $V4, $M5",
                   (!cast<Instruction>(NAME) VR128:$V1, VR128:$V2, VR128:$V3,
                                             VR128:$V4, imm32zx4:$M5, 0)>;
diff --git a/lib/Target/SystemZ/SystemZInstrVector.td b/lib/Target/SystemZ/SystemZInstrVector.td
index 8523af7..6c97b85 100644
--- a/lib/Target/SystemZ/SystemZInstrVector.td
+++ b/lib/Target/SystemZ/SystemZInstrVector.td
@@ -151,13 +151,13 @@
   def VLLEZH : UnaryVRX<"vllezh", 0xE704, z_vllezi16, v128h, 2, 1>;
   def VLLEZF : UnaryVRX<"vllezf", 0xE704, z_vllezi32, v128f, 4, 2>;
   def VLLEZG : UnaryVRX<"vllezg", 0xE704, z_vllezi64, v128g, 8, 3>;
-  def : Pat<(v4f32 (z_vllezf32 bdxaddr12only:$addr)),
+  def : Pat<(z_vllezf32 bdxaddr12only:$addr),
             (VLLEZF bdxaddr12only:$addr)>;
-  def : Pat<(v2f64 (z_vllezf64 bdxaddr12only:$addr)),
+  def : Pat<(z_vllezf64 bdxaddr12only:$addr),
             (VLLEZG bdxaddr12only:$addr)>;
   let Predicates = [FeatureVectorEnhancements1] in {
     def VLLEZLF : UnaryVRX<"vllezlf", 0xE704, z_vllezli32, v128f, 4, 6>;
-    def : Pat<(v4f32 (z_vllezlf32 bdxaddr12only:$addr)),
+    def : Pat<(z_vllezlf32 bdxaddr12only:$addr),
               (VLLEZLF bdxaddr12only:$addr)>;
   }
 
diff --git a/lib/Target/SystemZ/SystemZOperators.td b/lib/Target/SystemZ/SystemZOperators.td
index c55a627..626675b 100644
--- a/lib/Target/SystemZ/SystemZOperators.td
+++ b/lib/Target/SystemZ/SystemZOperators.td
@@ -745,37 +745,37 @@
 def z_vllezi8  : z_vllez<i32, anyextloadi8, 7>;
 def z_vllezi16 : z_vllez<i32, anyextloadi16, 3>;
 def z_vllezi32 : z_vllez<i32, load, 1>;
-def z_vllezi64 : PatFrag<(ops node:$addr),
-                         (z_join_dwords (i64 (load node:$addr)), (i64 0))>;
+def z_vllezi64 : PatFrags<(ops node:$addr),
+                          [(z_vector_insert (z_vzero),
+                                            (i64 (load node:$addr)), (i32 0)),
+                           (z_join_dwords (i64 (load node:$addr)), (i64 0))]>;
 // We use high merges to form a v4f32 from four f32s.  Propagating zero
 // into all elements but index 1 gives this expression.
 def z_vllezf32 : PatFrag<(ops node:$addr),
-                         (bitconvert
-                          (z_merge_high
-                           (v2i64
-                            (z_unpackl_high
-                             (v4i32
-                              (bitconvert
-                               (v4f32 (scalar_to_vector
-                                       (f32 (load node:$addr)))))))),
-                           (v2i64 (z_vzero))))>;
+                         (z_merge_high
+                          (v2i64
+                           (z_unpackl_high
+                            (v4i32
+                             (bitconvert
+                              (v4f32 (scalar_to_vector
+                                      (f32 (load node:$addr)))))))),
+                          (v2i64 (z_vzero)))>;
 def z_vllezf64 : PatFrag<(ops node:$addr),
                          (z_merge_high
-                          (scalar_to_vector (f64 (load node:$addr))),
+                          (v2f64 (scalar_to_vector (f64 (load node:$addr)))),
                           (z_vzero))>;
 
 // Similarly for the high element of a zeroed vector.
 def z_vllezli32 : z_vllez<i32, load, 0>;
 def z_vllezlf32 : PatFrag<(ops node:$addr),
-                          (bitconvert
-                           (z_merge_high
-                            (v2i64
-                             (bitconvert
-                              (z_merge_high
-                               (v4f32 (scalar_to_vector
-                                       (f32 (load node:$addr)))),
-                               (v4f32 (z_vzero))))),
-                            (v2i64 (z_vzero))))>;
+                          (z_merge_high
+                           (v2i64
+                            (bitconvert
+                             (z_merge_high
+                              (v4f32 (scalar_to_vector
+                                      (f32 (load node:$addr)))),
+                              (v4f32 (z_vzero))))),
+                           (v2i64 (z_vzero)))>;
 
 // Store one element of a vector.
 class z_vste<ValueType scalartype, SDPatternOperator store>
diff --git a/lib/Target/TargetMachineC.cpp b/lib/Target/TargetMachineC.cpp
index 37d398d..bae45ae 100644
--- a/lib/Target/TargetMachineC.cpp
+++ b/lib/Target/TargetMachineC.cpp
@@ -115,6 +115,15 @@
     case LLVMRelocDynamicNoPic:
       RM = Reloc::DynamicNoPIC;
       break;
+    case LLVMRelocROPI:
+      RM = Reloc::ROPI;
+      break;
+    case LLVMRelocRWPI:
+      RM = Reloc::RWPI;
+      break;
+    case LLVMRelocROPI_RWPI:
+      RM = Reloc::ROPI_RWPI;
+      break;
     default:
       break;
   }
diff --git a/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp b/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
index 9688090..0a5908f 100644
--- a/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
+++ b/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
@@ -38,7 +38,7 @@
 /// WebAssemblyOperand - Instances of this class represent the operands in a
 /// parsed WASM machine instruction.
 struct WebAssemblyOperand : public MCParsedAsmOperand {
-  enum KindTy { Token, Integer, Float, Symbol } Kind;
+  enum KindTy { Token, Integer, Float, Symbol, BrList } Kind;
 
   SMLoc StartLoc, EndLoc;
 
@@ -58,11 +58,16 @@
     const MCExpr *Exp;
   };
 
+  struct BrLOp {
+    std::vector<unsigned> List;
+  };
+
   union {
     struct TokOp Tok;
     struct IntOp Int;
     struct FltOp Flt;
     struct SymOp Sym;
+    struct BrLOp BrL;
   };
 
   WebAssemblyOperand(KindTy K, SMLoc Start, SMLoc End, TokOp T)
@@ -73,6 +78,13 @@
       : Kind(K), StartLoc(Start), EndLoc(End), Flt(F) {}
   WebAssemblyOperand(KindTy K, SMLoc Start, SMLoc End, SymOp S)
       : Kind(K), StartLoc(Start), EndLoc(End), Sym(S) {}
+  WebAssemblyOperand(KindTy K, SMLoc Start, SMLoc End)
+      : Kind(K), StartLoc(Start), EndLoc(End), BrL() {}
+
+  ~WebAssemblyOperand() {
+    if (isBrList())
+      BrL.~BrLOp();
+  }
 
   bool isToken() const override { return Kind == Token; }
   bool isImm() const override {
@@ -80,6 +92,7 @@
   }
   bool isMem() const override { return false; }
   bool isReg() const override { return false; }
+  bool isBrList() const { return Kind == BrList; }
 
   unsigned getReg() const override {
     llvm_unreachable("Assembly inspects a register operand");
@@ -111,6 +124,12 @@
       llvm_unreachable("Should be immediate or symbol!");
   }
 
+  void addBrListOperands(MCInst &Inst, unsigned N) const {
+    assert(N == 1 && isBrList() && "Invalid BrList!");
+    for (auto Br : BrL.List)
+      Inst.addOperand(MCOperand::createImm(Br));
+  }
+
   void print(raw_ostream &OS) const override {
     switch (Kind) {
     case Token:
@@ -125,6 +144,9 @@
     case Symbol:
       OS << "Sym:" << Sym.Exp;
       break;
+    case BrList:
+      OS << "BrList:" << BrL.List.size();
+      break;
     }
   }
 };
@@ -150,6 +172,18 @@
     Instructions,
   } CurrentState = FileStart;
 
+  // For ensuring blocks are properly nested.
+  enum NestingType {
+    Function,
+    Block,
+    Loop,
+    Try,
+    If,
+    Else,
+    Undefined,
+  };
+  std::vector<NestingType> NestingStack;
+
   // We track this to see if a .functype following a label is the same,
   // as this is how we recognize the start of a function.
   MCSymbol *LastLabel = nullptr;
@@ -162,10 +196,6 @@
     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
   }
 
-  void addSignature(std::unique_ptr<wasm::WasmSignature> &&Sig) {
-    Signatures.push_back(std::move(Sig));
-  }
-
 #define GET_ASSEMBLER_HEADER
 #include "WebAssemblyGenAsmMatcher.inc"
 
@@ -175,10 +205,60 @@
     llvm_unreachable("ParseRegister is not implemented.");
   }
 
-  bool error(const StringRef &Msg, const AsmToken &Tok) {
+  bool error(const Twine &Msg, const AsmToken &Tok) {
     return Parser.Error(Tok.getLoc(), Msg + Tok.getString());
   }
 
+  bool error(const Twine &Msg) {
+    return Parser.Error(Lexer.getTok().getLoc(), Msg);
+  }
+
+  void addSignature(std::unique_ptr<wasm::WasmSignature> &&Sig) {
+    Signatures.push_back(std::move(Sig));
+  }
+
+  std::pair<StringRef, StringRef> nestingString(NestingType NT) {
+    switch (NT) {
+    case Function:
+      return {"function", "end_function"};
+    case Block:
+      return {"block", "end_block"};
+    case Loop:
+      return {"loop", "end_loop"};
+    case Try:
+      return {"try", "end_try"};
+    case If:
+      return {"if", "end_if"};
+    case Else:
+      return {"else", "end_if"};
+    default:
+      llvm_unreachable("unknown NestingType");
+    }
+  }
+
+  void push(NestingType NT) { NestingStack.push_back(NT); }
+
+  bool pop(StringRef Ins, NestingType NT1, NestingType NT2 = Undefined) {
+    if (NestingStack.empty())
+      return error(Twine("End of block construct with no start: ") + Ins);
+    auto Top = NestingStack.back();
+    if (Top != NT1 && Top != NT2)
+      return error(Twine("Block construct type mismatch, expected: ") +
+                   nestingString(Top).second + ", instead got: " + Ins);
+    NestingStack.pop_back();
+    return false;
+  }
+
+  bool ensureEmptyNestingStack() {
+    auto err = !NestingStack.empty();
+    while (!NestingStack.empty()) {
+      error(Twine("Unmatched block construct(s) at function end: ") +
+            nestingString(NestingStack.back()).first);
+      NestingStack.pop_back();
+    }
+    return err;
+  }
+
   bool isNext(AsmToken::TokenKind Kind) {
     auto Ok = Lexer.is(Kind);
     if (Ok)
@@ -221,6 +301,18 @@
     return Optional<wasm::ValType>();
   }
 
+  WebAssembly::ExprType parseBlockType(StringRef ID) {
+    return StringSwitch<WebAssembly::ExprType>(ID)
+        .Case("i32", WebAssembly::ExprType::I32)
+        .Case("i64", WebAssembly::ExprType::I64)
+        .Case("f32", WebAssembly::ExprType::F32)
+        .Case("f64", WebAssembly::ExprType::F64)
+        .Case("v128", WebAssembly::ExprType::V128)
+        .Case("except_ref", WebAssembly::ExprType::ExceptRef)
+        .Case("void", WebAssembly::ExprType::Void)
+        .Default(WebAssembly::ExprType::Invalid);
+  }
+
   bool parseRegTypeList(SmallVectorImpl<wasm::ValType> &Types) {
     while (Lexer.is(AsmToken::Identifier)) {
       auto Type = parseType(Lexer.getTok().getString());
@@ -271,6 +363,13 @@
     return false;
   }
 
+  void addBlockTypeOperand(OperandVector &Operands, SMLoc NameLoc,
+                           WebAssembly::ExprType BT) {
+    Operands.push_back(make_unique<WebAssemblyOperand>(
+        WebAssemblyOperand::Integer, NameLoc, NameLoc,
+        WebAssemblyOperand::IntOp{static_cast<int64_t>(BT)}));
+  }
+
   bool ParseInstruction(ParseInstructionInfo & /*Info*/, StringRef Name,
                         SMLoc NameLoc, OperandVector &Operands) override {
     // Note: Name does NOT point into the sourcecode, but to a local, so
@@ -305,18 +404,72 @@
     // If no '.', there is no type prefix.
     auto BaseName = NamePair.second.empty() ? NamePair.first : NamePair.second;
 
+    // If this instruction is part of a control flow structure, ensure
+    // proper nesting.
+    bool ExpectBlockType = false;
+    if (BaseName == "block") {
+      push(Block);
+      ExpectBlockType = true;
+    } else if (BaseName == "loop") {
+      push(Loop);
+      ExpectBlockType = true;
+    } else if (BaseName == "try") {
+      push(Try);
+      ExpectBlockType = true;
+    } else if (BaseName == "if") {
+      push(If);
+      ExpectBlockType = true;
+    } else if (BaseName == "else") {
+      if (pop(BaseName, If))
+        return true;
+      push(Else);
+    } else if (BaseName == "catch") {
+      if (pop(BaseName, Try))
+        return true;
+      push(Try);
+    } else if (BaseName == "catch_all") {
+      if (pop(BaseName, Try))
+        return true;
+      push(Try);
+    } else if (BaseName == "end_if") {
+      if (pop(BaseName, If, Else))
+        return true;
+    } else if (BaseName == "end_try") {
+      if (pop(BaseName, Try))
+        return true;
+    } else if (BaseName == "end_loop") {
+      if (pop(BaseName, Loop))
+        return true;
+    } else if (BaseName == "end_block") {
+      if (pop(BaseName, Block))
+        return true;
+    } else if (BaseName == "end_function") {
+      if (pop(BaseName, Function) || ensureEmptyNestingStack())
+        return true;
+    }
+
     while (Lexer.isNot(AsmToken::EndOfStatement)) {
       auto &Tok = Lexer.getTok();
       switch (Tok.getKind()) {
       case AsmToken::Identifier: {
         auto &Id = Lexer.getTok();
-        const MCExpr *Val;
-        SMLoc End;
-        if (Parser.parsePrimaryExpr(Val, End))
-          return error("Cannot parse symbol: ", Lexer.getTok());
-        Operands.push_back(make_unique<WebAssemblyOperand>(
-            WebAssemblyOperand::Symbol, Id.getLoc(), Id.getEndLoc(),
-            WebAssemblyOperand::SymOp{Val}));
+        if (ExpectBlockType) {
+          // Assume this identifier is a block_type.
+          auto BT = parseBlockType(Id.getString());
+          if (BT == WebAssembly::ExprType::Invalid)
+            return error("Unknown block type: ", Id);
+          addBlockTypeOperand(Operands, NameLoc, BT);
+          Parser.Lex();
+        } else {
+          // Assume this identifier is a label.
+          const MCExpr *Val;
+          SMLoc End;
+          if (Parser.parsePrimaryExpr(Val, End))
+            return error("Cannot parse symbol: ", Lexer.getTok());
+          Operands.push_back(make_unique<WebAssemblyOperand>(
+              WebAssemblyOperand::Symbol, Id.getLoc(), Id.getEndLoc(),
+              WebAssemblyOperand::SymOp{Val}));
+        }
         break;
       }
       case AsmToken::Minus:
@@ -340,6 +493,21 @@
         Parser.Lex();
         break;
       }
+      case AsmToken::LCurly: {
+        Parser.Lex();
+        auto Op = make_unique<WebAssemblyOperand>(
+            WebAssemblyOperand::BrList, Tok.getLoc(), Tok.getEndLoc());
+        if (!Lexer.is(AsmToken::RCurly))
+          for (;;) {
+            Op->BrL.List.push_back(Lexer.getTok().getIntVal());
+            expect(AsmToken::Integer, "integer");
+            if (!isNext(AsmToken::Comma))
+              break;
+          }
+        expect(AsmToken::RCurly, "}");
+        Operands.push_back(std::move(Op));
+        break;
+      }
       default:
         return error("Unexpected token in operand: ", Tok);
       }
@@ -348,17 +516,11 @@
           return true;
       }
     }
-    Parser.Lex();
-
-    // Block instructions require a signature index, but these are missing in
-    // assembly, so we add a dummy one explicitly (since we have no control
-    // over signature tables here, we assume these will be regenerated when
-    // the wasm module is generated).
-    if (BaseName == "block" || BaseName == "loop" || BaseName == "try") {
-      Operands.push_back(make_unique<WebAssemblyOperand>(
-          WebAssemblyOperand::Integer, NameLoc, NameLoc,
-          WebAssemblyOperand::IntOp{-1}));
+    if (ExpectBlockType && Operands.size() == 1) {
+      // Support blocks with no operands as default to void.
+      addBlockTypeOperand(Operands, NameLoc, WebAssembly::ExprType::Void);
     }
+    Parser.Lex();
     return false;
   }
 
@@ -439,7 +601,10 @@
           TOut.getStreamer().getContext().getOrCreateSymbol(SymName));
       if (CurrentState == Label && WasmSym == LastLabel) {
         // This .functype indicates a start of a function.
+        if (ensureEmptyNestingStack())
+          return true;
         CurrentState = FunctionStart;
+        push(Function);
       }
       auto Signature = make_unique<wasm::WasmSignature>();
       if (parseSignature(Signature.get()))
@@ -528,6 +693,8 @@
     }
     llvm_unreachable("Implement any new match types added!");
   }
+
+  void onEndOfFile() override { ensureEmptyNestingStack(); }
 };
 } // end anonymous namespace
 
diff --git a/lib/Target/WebAssembly/CMakeLists.txt b/lib/Target/WebAssembly/CMakeLists.txt
index 549229a..1f3b7d9 100644
--- a/lib/Target/WebAssembly/CMakeLists.txt
+++ b/lib/Target/WebAssembly/CMakeLists.txt
@@ -19,6 +19,7 @@
   WebAssemblyCallIndirectFixup.cpp
   WebAssemblyCFGStackify.cpp
   WebAssemblyCFGSort.cpp
+  WebAssemblyDebugValueManager.cpp
   WebAssemblyLateEHPrepare.cpp
   WebAssemblyEHRestoreStackPointer.cpp
   WebAssemblyExceptionInfo.cpp
@@ -47,7 +48,7 @@
   WebAssemblyRuntimeLibcallSignatures.cpp
   WebAssemblySelectionDAGInfo.cpp
   WebAssemblySetP2AlignOperands.cpp
-  WebAssemblyStoreResults.cpp
+  WebAssemblyMemIntrinsicResults.cpp
   WebAssemblySubtarget.cpp
   WebAssemblyTargetMachine.cpp
   WebAssemblyTargetObjectFile.cpp
diff --git a/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp b/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp
index c068d6b..6acc9b2 100644
--- a/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp
+++ b/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp
@@ -151,7 +151,8 @@
   MI.setOpcode(WasmInst->Opcode);
   // Parse any operands.
   for (uint8_t OPI = 0; OPI < WasmInst->NumOperands; OPI++) {
-    switch (OperandTable[WasmInst->OperandStart + OPI]) {
+    auto OT = OperandTable[WasmInst->OperandStart + OPI];
+    switch (OT) {
     // ULEB operands:
     case WebAssembly::OPERAND_BASIC_BLOCK:
     case WebAssembly::OPERAND_LOCAL:
@@ -167,12 +168,17 @@
     }
     // SLEB operands:
     case WebAssembly::OPERAND_I32IMM:
-    case WebAssembly::OPERAND_I64IMM:
-    case WebAssembly::OPERAND_SIGNATURE: {
+    case WebAssembly::OPERAND_I64IMM: {
       if (!parseLEBImmediate(MI, Size, Bytes, true))
         return MCDisassembler::Fail;
       break;
     }
+    // block_type operands (uint8_t).
+    case WebAssembly::OPERAND_SIGNATURE: {
+      if (!parseImmediate<uint8_t>(MI, Size, Bytes))
+        return MCDisassembler::Fail;
+      break;
+    }
     // FP operands.
     case WebAssembly::OPERAND_F32IMM: {
       if (!parseImmediate<float>(MI, Size, Bytes))
@@ -205,6 +211,19 @@
         return MCDisassembler::Fail;
       break;
     }
+    case WebAssembly::OPERAND_BRLIST: {
+      int64_t TargetTableLen;
+      if (!nextLEB(TargetTableLen, Bytes, Size, false))
+        return MCDisassembler::Fail;
+      for (int64_t I = 0; I < TargetTableLen; I++) {
+        if (!parseLEBImmediate(MI, Size, Bytes, false))
+          return MCDisassembler::Fail;
+      }
+      // Default case.
+      if (!parseLEBImmediate(MI, Size, Bytes, false))
+        return MCDisassembler::Fail;
+      break;
+    }
     case MCOI::OPERAND_REGISTER:
       // The tablegen header currently does not have any register operands since
       // we use only the stack (_S) instructions.
diff --git a/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp b/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp
index e94faa1..15532d7 100644
--- a/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp
+++ b/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp
@@ -40,7 +40,7 @@
 void WebAssemblyInstPrinter::printRegName(raw_ostream &OS,
                                           unsigned RegNo) const {
   assert(RegNo != WebAssemblyFunctionInfo::UnusedReg);
-  // Note that there's an implicit get_local/set_local here!
+  // Note that there's an implicit local.get/local.set here!
   OS << "$" << RegNo;
 }
 
@@ -95,23 +95,32 @@
 
     case WebAssembly::END_LOOP:
     case WebAssembly::END_LOOP_S:
-      assert(!ControlFlowStack.empty() && "End marker mismatch!");
-      ControlFlowStack.pop_back();
+      if (ControlFlowStack.empty()) {
+        printAnnotation(OS, "End marker mismatch!");
+      } else {
+        ControlFlowStack.pop_back();
+      }
       break;
 
     case WebAssembly::END_BLOCK:
     case WebAssembly::END_BLOCK_S:
-      assert(!ControlFlowStack.empty() && "End marker mismatch!");
-      printAnnotation(
-          OS, "label" + utostr(ControlFlowStack.pop_back_val().first) + ':');
+      if (ControlFlowStack.empty()) {
+        printAnnotation(OS, "End marker mismatch!");
+      } else {
+        printAnnotation(
+            OS, "label" + utostr(ControlFlowStack.pop_back_val().first) + ':');
+      }
       break;
 
     case WebAssembly::END_TRY:
     case WebAssembly::END_TRY_S:
-      assert(!ControlFlowStack.empty() && "End marker mismatch!");
-      printAnnotation(
-          OS, "label" + utostr(ControlFlowStack.pop_back_val().first) + ':');
-      LastSeenEHInst = END_TRY;
+      if (ControlFlowStack.empty()) {
+        printAnnotation(OS, "End marker mismatch!");
+      } else {
+        printAnnotation(
+            OS, "label" + utostr(ControlFlowStack.pop_back_val().first) + ':');
+        LastSeenEHInst = END_TRY;
+      }
       break;
 
     case WebAssembly::CATCH_I32:
@@ -123,8 +132,12 @@
       // There can be multiple catch instructions for one try instruction, so we
       // print a label only for the first 'catch' label.
       if (LastSeenEHInst != CATCH) {
-        assert(!EHPadStack.empty() && "try-catch mismatch!");
-        printAnnotation(OS, "catch" + utostr(EHPadStack.pop_back_val()) + ':');
+        if (EHPadStack.empty()) {
+          printAnnotation(OS, "try-catch mismatch!");
+        } else {
+          printAnnotation(OS,
+                          "catch" + utostr(EHPadStack.pop_back_val()) + ':');
+        }
       }
       LastSeenEHInst = CATCH;
       break;
@@ -134,18 +147,27 @@
     unsigned NumFixedOperands = Desc.NumOperands;
     SmallSet<uint64_t, 8> Printed;
     for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) {
-      if (!(i < NumFixedOperands
-                ? (Desc.OpInfo[i].OperandType ==
-                   WebAssembly::OPERAND_BASIC_BLOCK)
-                : (Desc.TSFlags & WebAssemblyII::VariableOpImmediateIsLabel)))
-        continue;
+      // See if this operand denotes a basic block target.
+      if (i < NumFixedOperands) {
+        // A non-variable_ops operand, check its type.
+        if (Desc.OpInfo[i].OperandType != WebAssembly::OPERAND_BASIC_BLOCK)
+          continue;
+      } else {
+        // A variable_ops operand, which currently can be immediates (used in
+        // br_table) which are basic block targets, or for call instructions
+        // when using -wasm-keep-registers (in which case they are registers,
+        // and should not be processed).
+        if (!MI->getOperand(i).isImm())
+          continue;
+      }
       uint64_t Depth = MI->getOperand(i).getImm();
       if (!Printed.insert(Depth).second)
         continue;
 
       if (Opc == WebAssembly::RETHROW || Opc == WebAssembly::RETHROW_S) {
-        assert(Depth <= EHPadStack.size() && "Invalid depth argument!");
-        if (Depth == EHPadStack.size()) {
+        if (Depth > EHPadStack.size()) {
+          printAnnotation(OS, "Invalid depth argument!");
+        } else if (Depth == EHPadStack.size()) {
           // This can happen when rethrow instruction breaks out of all nests
           // and throws up to the current function's caller.
           printAnnotation(OS, utostr(Depth) + ": " + "to caller");
@@ -156,11 +178,14 @@
         }
 
       } else {
-        assert(Depth < ControlFlowStack.size() && "Invalid depth argument!");
-        const auto &Pair = ControlFlowStack.rbegin()[Depth];
-        printAnnotation(OS, utostr(Depth) + ": " +
-                                (Pair.second ? "up" : "down") + " to label" +
-                                utostr(Pair.first));
+        if (Depth >= ControlFlowStack.size()) {
+          printAnnotation(OS, "Invalid depth argument!");
+        } else {
+          const auto &Pair = ControlFlowStack.rbegin()[Depth];
+          printAnnotation(OS, utostr(Depth) + ": " +
+                                  (Pair.second ? "up" : "down") + " to label" +
+                                  utostr(Pair.first));
+        }
       }
     }
   }
@@ -194,9 +219,6 @@
                                           raw_ostream &O) {
   const MCOperand &Op = MI->getOperand(OpNo);
   if (Op.isReg()) {
-    assert((OpNo < MII.get(MI->getOpcode()).getNumOperands() ||
-            MII.get(MI->getOpcode()).TSFlags == 0) &&
-           "WebAssembly variable_ops register ops don't use TSFlags");
     unsigned WAReg = Op.getReg();
     if (int(WAReg) >= 0)
       printRegName(O, WAReg);
@@ -210,23 +232,9 @@
     if (OpNo < MII.get(MI->getOpcode()).getNumDefs())
       O << '=';
   } else if (Op.isImm()) {
-    const MCInstrDesc &Desc = MII.get(MI->getOpcode());
-    assert((OpNo < Desc.getNumOperands() ||
-            (Desc.TSFlags & WebAssemblyII::VariableOpIsImmediate)) &&
-           "WebAssemblyII::VariableOpIsImmediate should be set for "
-           "variable_ops immediate ops");
-    (void)Desc;
-    // TODO: (MII.get(MI->getOpcode()).TSFlags &
-    //        WebAssemblyII::VariableOpImmediateIsLabel)
-    // can tell us whether this is an immediate referencing a label in the
-    // control flow stack, and it may be nice to pretty-print.
     O << Op.getImm();
   } else if (Op.isFPImm()) {
     const MCInstrDesc &Desc = MII.get(MI->getOpcode());
-    assert(OpNo < Desc.getNumOperands() &&
-           "Unexpected floating-point immediate as a non-fixed operand");
-    assert(Desc.TSFlags == 0 &&
-           "WebAssembly variable_ops floating point ops don't use TSFlags");
     const MCOperandInfo &Info = Desc.OpInfo[OpNo];
     if (Info.OperandType == WebAssembly::OPERAND_F32IMM) {
       // TODO: MC converts all floating point immediate operands to double.
@@ -237,16 +245,22 @@
       O << ::toString(APFloat(Op.getFPImm()));
     }
   } else {
-    assert((OpNo < MII.get(MI->getOpcode()).getNumOperands() ||
-            (MII.get(MI->getOpcode()).TSFlags &
-             WebAssemblyII::VariableOpIsImmediate)) &&
-           "WebAssemblyII::VariableOpIsImmediate should be set for "
-           "variable_ops expr ops");
     assert(Op.isExpr() && "unknown operand kind in printOperand");
     Op.getExpr()->print(O, &MAI);
   }
 }
 
+void WebAssemblyInstPrinter::printBrList(const MCInst *MI, unsigned OpNo,
+                                         raw_ostream &O) {
+  O << "{";
+  for (unsigned I = OpNo, E = MI->getNumOperands(); I != E; ++I) {
+    if (I != OpNo)
+      O << ", ";
+    O << MI->getOperand(I).getImm();
+  }
+  O << "}";
+}
+
 void WebAssemblyInstPrinter::printWebAssemblyP2AlignOperand(const MCInst *MI,
                                                             unsigned OpNo,
                                                             raw_ostream &O) {
@@ -259,45 +273,38 @@
 void WebAssemblyInstPrinter::printWebAssemblySignatureOperand(const MCInst *MI,
                                                               unsigned OpNo,
                                                               raw_ostream &O) {
-  int64_t Imm = MI->getOperand(OpNo).getImm();
-  switch (WebAssembly::ExprType(Imm)) {
-  case WebAssembly::ExprType::Void:
-    break;
-  case WebAssembly::ExprType::I32:
-    O << "i32";
-    break;
-  case WebAssembly::ExprType::I64:
-    O << "i64";
-    break;
-  case WebAssembly::ExprType::F32:
-    O << "f32";
-    break;
-  case WebAssembly::ExprType::F64:
-    O << "f64";
-    break;
-  case WebAssembly::ExprType::V128:
-    O << "v128";
-    break;
-  case WebAssembly::ExprType::ExceptRef:
-    O << "except_ref";
-    break;
+  auto Imm = static_cast<unsigned>(MI->getOperand(OpNo).getImm());
+  if (Imm != wasm::WASM_TYPE_NORESULT)
+    O << WebAssembly::anyTypeToString(Imm);
+}
+
+// We have various enums representing a subset of these types, use this
+// function to convert any of them to text.
+const char *llvm::WebAssembly::anyTypeToString(unsigned Ty) {
+  switch (Ty) {
+  case wasm::WASM_TYPE_I32:
+    return "i32";
+  case wasm::WASM_TYPE_I64:
+    return "i64";
+  case wasm::WASM_TYPE_F32:
+    return "f32";
+  case wasm::WASM_TYPE_F64:
+    return "f64";
+  case wasm::WASM_TYPE_V128:
+    return "v128";
+  case wasm::WASM_TYPE_FUNCREF:
+    return "funcref";
+  case wasm::WASM_TYPE_FUNC:
+    return "func";
+  case wasm::WASM_TYPE_EXCEPT_REF:
+    return "except_ref";
+  case wasm::WASM_TYPE_NORESULT:
+    return "void";
+  default:
+    return "invalid_type";
   }
 }
 
-const char *llvm::WebAssembly::TypeToString(wasm::ValType Ty) {
-  switch (Ty) {
-  case wasm::ValType::I32:
-    return "i32";
-  case wasm::ValType::I64:
-    return "i64";
-  case wasm::ValType::F32:
-    return "f32";
-  case wasm::ValType::F64:
-    return "f64";
-  case wasm::ValType::V128:
-    return "v128";
-  case wasm::ValType::EXCEPT_REF:
-    return "except_ref";
-  }
-  llvm_unreachable("Unknown wasm::ValType");
+const char *llvm::WebAssembly::typeToString(wasm::ValType Ty) {
+  return anyTypeToString(static_cast<unsigned>(Ty));
 }
diff --git a/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h b/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h
index ded64f9..5ad45c7 100644
--- a/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h
+++ b/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h
@@ -43,6 +43,7 @@
 
   // Used by tblegen code.
   void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+  void printBrList(const MCInst *MI, unsigned OpNo, raw_ostream &O);
   void printWebAssemblyP2AlignOperand(const MCInst *MI, unsigned OpNo,
                                       raw_ostream &O);
   void printWebAssemblySignatureOperand(const MCInst *MI, unsigned OpNo,
@@ -55,7 +56,8 @@
 
 namespace WebAssembly {
 
-const char *TypeToString(wasm::ValType Ty);
+const char *typeToString(wasm::ValType Ty);
+const char *anyTypeToString(unsigned Ty);
 
 } // end namespace WebAssembly
 
diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
index 490c00a..065a4dc 100644
--- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
+++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
@@ -89,8 +89,6 @@
 
     } else if (MO.isImm()) {
       if (i < Desc.getNumOperands()) {
-        assert(Desc.TSFlags == 0 &&
-               "WebAssembly non-variable_ops don't use TSFlags");
         const MCOperandInfo &Info = Desc.OpInfo[i];
         LLVM_DEBUG(dbgs() << "Encoding immediate: type="
                           << int(Info.OperandType) << "\n");
@@ -125,16 +123,10 @@
           encodeULEB128(uint64_t(MO.getImm()), OS);
         }
       } else {
-        assert(Desc.TSFlags == (WebAssemblyII::VariableOpIsImmediate |
-                                WebAssemblyII::VariableOpImmediateIsLabel));
         encodeULEB128(uint64_t(MO.getImm()), OS);
       }
 
     } else if (MO.isFPImm()) {
-      assert(i < Desc.getNumOperands() &&
-             "Unexpected floating-point immediate as a non-fixed operand");
-      assert(Desc.TSFlags == 0 &&
-             "WebAssembly variable_ops floating point ops don't use TSFlags");
       const MCOperandInfo &Info = Desc.OpInfo[i];
       if (Info.OperandType == WebAssembly::OPERAND_F32IMM) {
         // TODO: MC converts all floating point immediate operands to double.
diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
index 69d4db9..a01517f 100644
--- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
+++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
@@ -79,18 +79,12 @@
   OPERAND_TYPEINDEX,
   /// Event index.
   OPERAND_EVENT,
+  /// A list of branch targets for br_list.
+  OPERAND_BRLIST,
 };
 } // end namespace WebAssembly
 
 namespace WebAssemblyII {
-enum {
-  // For variadic instructions, this flag indicates whether an operand
-  // in the variable_ops range is an immediate value.
-  VariableOpIsImmediate = (1 << 0),
-  // For immediate values in the variable_ops range, this flag indicates
-  // whether the value represents a control-flow label.
-  VariableOpImmediateIsLabel = (1 << 1)
-};
 
 /// Target Operand Flag enum.
 enum TOF {
@@ -354,7 +348,8 @@
   F32 = 0x7D,
   F64 = 0x7C,
   V128 = 0x7B,
-  ExceptRef = 0x68
+  ExceptRef = 0x68,
+  Invalid = 0x00
 };
 
 /// Instruction opcodes emitted via means other than CodeGen.
diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp
index 70ac502..50143fb 100644
--- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp
+++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp
@@ -47,7 +47,7 @@
       First = false;
     else
       OS << ", ";
-    OS << WebAssembly::TypeToString(Type);
+    OS << WebAssembly::typeToString(Type);
   }
   OS << '\n';
 }
@@ -76,7 +76,7 @@
   for (auto &Ty : Params) {
     if (&Ty != &Params[0])
       OS << ", ";
-    OS << WebAssembly::TypeToString(Ty);
+    OS << WebAssembly::typeToString(Ty);
   }
 }
 
@@ -86,7 +86,7 @@
   for (auto &Ty : Returns) {
     if (&Ty != &Returns[0])
       OS << ", ";
-    OS << WebAssembly::TypeToString(Ty);
+    OS << WebAssembly::typeToString(Ty);
   }
 }
 
@@ -99,10 +99,10 @@
 
 void WebAssemblyTargetAsmStreamer::emitGlobalType(const MCSymbolWasm *Sym) {
   assert(Sym->isGlobal());
-  OS << "\t.globaltype\t" << Sym->getName() << ", " <<
-        WebAssembly::TypeToString(
-          static_cast<wasm::ValType>(Sym->getGlobalType().Type)) <<
-        '\n';
+  OS << "\t.globaltype\t" << Sym->getName() << ", "
+     << WebAssembly::typeToString(
+            static_cast<wasm::ValType>(Sym->getGlobalType().Type))
+     << '\n';
 }
 
 void WebAssemblyTargetAsmStreamer::emitEventType(const MCSymbolWasm *Sym) {
diff --git a/lib/Target/WebAssembly/README.txt b/lib/Target/WebAssembly/README.txt
index ef0099f..a154b4b 100644
--- a/lib/Target/WebAssembly/README.txt
+++ b/lib/Target/WebAssembly/README.txt
@@ -94,10 +94,10 @@
 //===---------------------------------------------------------------------===//
 
 Instead of the OptimizeReturned pass, which should consider preserving the
-"returned" attribute through to MachineInstrs and extending the StoreResults
-pass to do this optimization on calls too. That would also let the
-WebAssemblyPeephole pass clean up dead defs for such calls, as it does for
-stores.
+"returned" attribute through to MachineInstrs and extending the
+MemIntrinsicResults pass to do this optimization on calls too. That would also
+let the WebAssemblyPeephole pass clean up dead defs for such calls, as it does
+for stores.
 
 //===---------------------------------------------------------------------===//
 
@@ -120,8 +120,8 @@
 It could be done with a smaller encoding like this:
 
     i32.const   $push5=, 0
-    tee_local   $push6=, $4=, $pop5
-    copy_local  $3=, $pop6
+    local.tee   $push6=, $4=, $pop5
+    local.copy  $3=, $pop6
 
 //===---------------------------------------------------------------------===//
 
@@ -180,11 +180,11 @@
 //===---------------------------------------------------------------------===//
 
 The function @dynamic_alloca_redzone in test/CodeGen/WebAssembly/userstack.ll
-ends up with a tee_local in its prolog which has an unused result, requiring
+ends up with a local.tee in its prolog which has an unused result, requiring
 an extra drop:
 
-    get_global  $push8=, 0
-    tee_local   $push9=, 1, $pop8
+    global.get  $push8=, 0
+    local.tee   $push9=, 1, $pop8
     drop        $pop9
     [...]
 
diff --git a/lib/Target/WebAssembly/WebAssembly.h b/lib/Target/WebAssembly/WebAssembly.h
index 87975ca..45145c0 100644
--- a/lib/Target/WebAssembly/WebAssembly.h
+++ b/lib/Target/WebAssembly/WebAssembly.h
@@ -43,7 +43,7 @@
 FunctionPass *createWebAssemblyReplacePhysRegs();
 FunctionPass *createWebAssemblyPrepareForLiveIntervals();
 FunctionPass *createWebAssemblyOptimizeLiveIntervals();
-FunctionPass *createWebAssemblyStoreResults();
+FunctionPass *createWebAssemblyMemIntrinsicResults();
 FunctionPass *createWebAssemblyRegStackify();
 FunctionPass *createWebAssemblyRegColoring();
 FunctionPass *createWebAssemblyExplicitLocals();
@@ -68,7 +68,7 @@
 void initializeWebAssemblyReplacePhysRegsPass(PassRegistry &);
 void initializeWebAssemblyPrepareForLiveIntervalsPass(PassRegistry &);
 void initializeWebAssemblyOptimizeLiveIntervalsPass(PassRegistry &);
-void initializeWebAssemblyStoreResultsPass(PassRegistry &);
+void initializeWebAssemblyMemIntrinsicResultsPass(PassRegistry &);
 void initializeWebAssemblyRegStackifyPass(PassRegistry &);
 void initializeWebAssemblyRegColoringPass(PassRegistry &);
 void initializeWebAssemblyExplicitLocalsPass(PassRegistry &);
diff --git a/lib/Target/WebAssembly/WebAssembly.td b/lib/Target/WebAssembly/WebAssembly.td
index ec9dbff..6b218f8 100644
--- a/lib/Target/WebAssembly/WebAssembly.td
+++ b/lib/Target/WebAssembly/WebAssembly.td
@@ -23,8 +23,15 @@
 // WebAssembly Subtarget features.
 //===----------------------------------------------------------------------===//
 
-def FeatureSIMD128 : SubtargetFeature<"simd128", "HasSIMD128", "true",
+def FeatureSIMD128 : SubtargetFeature<"simd128", "SIMDLevel", "SIMD128",
                                       "Enable 128-bit SIMD">;
+
+def FeatureUnimplementedSIMD128 :
+      SubtargetFeature<"unimplemented-simd128",
+                       "SIMDLevel", "UnimplementedSIMD128",
+                       "Enable 128-bit SIMD not yet implemented in engines",
+                       [FeatureSIMD128]>;
+
 def FeatureAtomics : SubtargetFeature<"atomics", "HasAtomics", "true",
                                       "Enable Atomics">;
 def FeatureNontrappingFPToInt :
diff --git a/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp b/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp
index 3f17160..e49e2b6 100644
--- a/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp
@@ -60,7 +60,7 @@
 }
 
 bool WebAssemblyAddMissingPrototypes::runOnModule(Module &M) {
-  LLVM_DEBUG(dbgs() << "runnning AddMissingPrototypes\n");
+  LLVM_DEBUG(dbgs() << "********** Add Missing Prototypes **********\n");
 
   std::vector<std::pair<Function *, Function *>> Replacements;
 
diff --git a/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp b/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp
index 326e838..aaa6d28 100644
--- a/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp
@@ -98,7 +98,7 @@
 
 bool WebAssemblyCallIndirectFixup::runOnMachineFunction(MachineFunction &MF) {
   LLVM_DEBUG(dbgs() << "********** Fixing up CALL_INDIRECTs **********\n"
-                    << MF.getName() << '\n');
+                    << "********** Function: " << MF.getName() << '\n');
 
   bool Changed = false;
   const WebAssemblyInstrInfo *TII =
diff --git a/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp b/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp
new file mode 100644
index 0000000..8ecc159
--- /dev/null
+++ b/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp
@@ -0,0 +1,46 @@
+//===-- WebAssemblyDebugValueManager.cpp - WebAssembly DebugValue Manager -===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the manager for MachineInstr DebugValues.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyDebugValueManager.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineInstr.h"
+
+using namespace llvm;
+
+WebAssemblyDebugValueManager::WebAssemblyDebugValueManager(
+    MachineInstr *Instr) {
+  Instr->collectDebugValues(DbgValues);
+}
+
+void WebAssemblyDebugValueManager::move(MachineInstr *Insert) {
+  MachineBasicBlock *MBB = Insert->getParent();
+  for (MachineInstr *DBI : reverse(DbgValues))
+    MBB->splice(Insert, DBI->getParent(), DBI);
+}
+
+void WebAssemblyDebugValueManager::updateReg(unsigned Reg) {
+  for (auto *DBI : DbgValues)
+    DBI->getOperand(0).setReg(Reg);
+}
+
+void WebAssemblyDebugValueManager::clone(MachineInstr *Insert,
+                                         unsigned NewReg) {
+  MachineBasicBlock *MBB = Insert->getParent();
+  MachineFunction *MF = MBB->getParent();
+  for (MachineInstr *DBI : reverse(DbgValues)) {
+    MachineInstr *Clone = MF->CloneMachineInstr(DBI);
+    Clone->getOperand(0).setReg(NewReg);
+    MBB->insert(Insert, Clone);
+  }
+}
diff --git a/lib/Target/WebAssembly/WebAssemblyDebugValueManager.h b/lib/Target/WebAssembly/WebAssemblyDebugValueManager.h
new file mode 100644
index 0000000..73f3172
--- /dev/null
+++ b/lib/Target/WebAssembly/WebAssemblyDebugValueManager.h
@@ -0,0 +1,38 @@
+// WebAssemblyDebugValueManager.h - WebAssembly DebugValue Manager -*- C++ -*-//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the declaration of the WebAssembly-specific
+/// manager for DebugValues associated with the specific MachineInstr.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYDEBUGVALUEMANAGER_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYDEBUGVALUEMANAGER_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class MachineInstr;
+
+class WebAssemblyDebugValueManager {
+  SmallVector<MachineInstr *, 2> DbgValues;
+
+public:
+  WebAssemblyDebugValueManager(MachineInstr *Instr);
+
+  void move(MachineInstr *Insert);
+  void updateReg(unsigned Reg);
+  void clone(MachineInstr *Insert, unsigned NewReg);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/WebAssembly/WebAssemblyEHRestoreStackPointer.cpp b/lib/Target/WebAssembly/WebAssemblyEHRestoreStackPointer.cpp
index bd1ab47..c86260b 100644
--- a/lib/Target/WebAssembly/WebAssemblyEHRestoreStackPointer.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyEHRestoreStackPointer.cpp
@@ -53,6 +53,10 @@
 
 bool WebAssemblyEHRestoreStackPointer::runOnMachineFunction(
     MachineFunction &MF) {
+  LLVM_DEBUG(dbgs() << "********** EH Restore Stack Pointer **********\n"
+                       "********** Function: "
+                    << MF.getName() << '\n');
+
   const auto *FrameLowering = static_cast<const WebAssemblyFrameLowering *>(
       MF.getSubtarget().getFrameLowering());
   if (!FrameLowering->needsPrologForEH(MF))
diff --git a/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp b/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp
index dca7902..6b3a3e7 100644
--- a/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp
@@ -32,7 +32,10 @@
 INITIALIZE_PASS_END(WebAssemblyExceptionInfo, DEBUG_TYPE,
                     "WebAssembly Exception Information", true, true)
 
-bool WebAssemblyExceptionInfo::runOnMachineFunction(MachineFunction &F) {
+bool WebAssemblyExceptionInfo::runOnMachineFunction(MachineFunction &MF) {
+  LLVM_DEBUG(dbgs() << "********** Exception Info Calculation **********\n"
+                       "********** Function: "
+                    << MF.getName() << '\n');
   releaseMemory();
   auto &MDT = getAnalysis<MachineDominatorTree>();
   auto &MDF = getAnalysis<MachineDominanceFrontier>();
diff --git a/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
index 384c0d6..27aabe6 100644
--- a/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
@@ -11,7 +11,7 @@
 /// This file converts any remaining registers into WebAssembly locals.
 ///
 /// After register stackification and register coloring, convert non-stackified
-/// registers into locals, inserting explicit get_local and set_local
+/// registers into locals, inserting explicit local.get and local.set
 /// instructions.
 ///
 //===----------------------------------------------------------------------===//
@@ -96,54 +96,54 @@
   llvm_unreachable("Unexpected register class");
 }
 
-/// Get the appropriate get_local opcode for the given register class.
+/// Get the appropriate local.get opcode for the given register class.
 static unsigned getGetLocalOpcode(const TargetRegisterClass *RC) {
   if (RC == &WebAssembly::I32RegClass)
-    return WebAssembly::GET_LOCAL_I32;
+    return WebAssembly::LOCAL_GET_I32;
   if (RC == &WebAssembly::I64RegClass)
-    return WebAssembly::GET_LOCAL_I64;
+    return WebAssembly::LOCAL_GET_I64;
   if (RC == &WebAssembly::F32RegClass)
-    return WebAssembly::GET_LOCAL_F32;
+    return WebAssembly::LOCAL_GET_F32;
   if (RC == &WebAssembly::F64RegClass)
-    return WebAssembly::GET_LOCAL_F64;
+    return WebAssembly::LOCAL_GET_F64;
   if (RC == &WebAssembly::V128RegClass)
-    return WebAssembly::GET_LOCAL_V128;
+    return WebAssembly::LOCAL_GET_V128;
   if (RC == &WebAssembly::EXCEPT_REFRegClass)
-    return WebAssembly::GET_LOCAL_EXCEPT_REF;
+    return WebAssembly::LOCAL_GET_EXCEPT_REF;
   llvm_unreachable("Unexpected register class");
 }
 
-/// Get the appropriate set_local opcode for the given register class.
+/// Get the appropriate local.set opcode for the given register class.
 static unsigned getSetLocalOpcode(const TargetRegisterClass *RC) {
   if (RC == &WebAssembly::I32RegClass)
-    return WebAssembly::SET_LOCAL_I32;
+    return WebAssembly::LOCAL_SET_I32;
   if (RC == &WebAssembly::I64RegClass)
-    return WebAssembly::SET_LOCAL_I64;
+    return WebAssembly::LOCAL_SET_I64;
   if (RC == &WebAssembly::F32RegClass)
-    return WebAssembly::SET_LOCAL_F32;
+    return WebAssembly::LOCAL_SET_F32;
   if (RC == &WebAssembly::F64RegClass)
-    return WebAssembly::SET_LOCAL_F64;
+    return WebAssembly::LOCAL_SET_F64;
   if (RC == &WebAssembly::V128RegClass)
-    return WebAssembly::SET_LOCAL_V128;
+    return WebAssembly::LOCAL_SET_V128;
   if (RC == &WebAssembly::EXCEPT_REFRegClass)
-    return WebAssembly::SET_LOCAL_EXCEPT_REF;
+    return WebAssembly::LOCAL_SET_EXCEPT_REF;
   llvm_unreachable("Unexpected register class");
 }
 
-/// Get the appropriate tee_local opcode for the given register class.
+/// Get the appropriate local.tee opcode for the given register class.
 static unsigned getTeeLocalOpcode(const TargetRegisterClass *RC) {
   if (RC == &WebAssembly::I32RegClass)
-    return WebAssembly::TEE_LOCAL_I32;
+    return WebAssembly::LOCAL_TEE_I32;
   if (RC == &WebAssembly::I64RegClass)
-    return WebAssembly::TEE_LOCAL_I64;
+    return WebAssembly::LOCAL_TEE_I64;
   if (RC == &WebAssembly::F32RegClass)
-    return WebAssembly::TEE_LOCAL_F32;
+    return WebAssembly::LOCAL_TEE_F32;
   if (RC == &WebAssembly::F64RegClass)
-    return WebAssembly::TEE_LOCAL_F64;
+    return WebAssembly::LOCAL_TEE_F64;
   if (RC == &WebAssembly::V128RegClass)
-    return WebAssembly::TEE_LOCAL_V128;
+    return WebAssembly::LOCAL_TEE_V128;
   if (RC == &WebAssembly::EXCEPT_REFRegClass)
-    return WebAssembly::TEE_LOCAL_EXCEPT_REF;
+    return WebAssembly::LOCAL_TEE_EXCEPT_REF;
   llvm_unreachable("Unexpected register class");
 }
 
@@ -233,8 +233,8 @@
       if (MI.isDebugInstr() || MI.isLabel())
         continue;
 
-      // Replace tee instructions with tee_local. The difference is that tee
-      // instructins have two defs, while tee_local instructions have one def
+      // Replace tee instructions with local.tee. The difference is that tee
+      // instructions have two defs, while local.tee instructions have one def
       // and an index of a local to write to.
       if (WebAssembly::isTee(MI)) {
         assert(MFI.isVRegStackified(MI.getOperand(0).getReg()));
@@ -253,7 +253,7 @@
           MFI.stackifyVReg(NewReg);
         }
 
-        // Replace the TEE with a TEE_LOCAL.
+        // Replace the TEE with a LOCAL_TEE.
         unsigned LocalId =
             getLocalId(Reg2Local, CurLocal, MI.getOperand(1).getReg());
         unsigned Opc = getTeeLocalOpcode(RC);
@@ -267,7 +267,7 @@
         continue;
       }
 
-      // Insert set_locals for any defs that aren't stackified yet. Currently
+      // Insert local.sets for any defs that aren't stackified yet. Currently
       // we handle at most one def.
       assert(MI.getDesc().getNumDefs() <= 1);
       if (MI.getDesc().getNumDefs() == 1) {
@@ -296,15 +296,16 @@
                 .addReg(NewReg);
           }
           MI.getOperand(0).setReg(NewReg);
-          // This register operand is now being used by the inserted drop
-          // instruction, so make it undead.
+          // This register operand of the original instruction is now being used
+          // by the inserted drop or local.set instruction, so make it not dead
+          // yet.
           MI.getOperand(0).setIsDead(false);
           MFI.stackifyVReg(NewReg);
           Changed = true;
         }
       }
 
-      // Insert get_locals for any uses that aren't stackified yet.
+      // Insert local.gets for any uses that aren't stackified yet.
       MachineInstr *InsertPt = &MI;
       for (MachineOperand &MO : reverse(MI.explicit_uses())) {
         if (!MO.isReg())
@@ -326,7 +327,7 @@
         }
 
         // If we see a stackified register, prepare to insert subsequent
-        // get_locals before the start of its tree.
+        // local.gets before the start of its tree.
         if (MFI.isVRegStackified(OldReg)) {
           InsertPt = findStartOfTree(MO, MRI, MFI);
           continue;
@@ -342,7 +343,7 @@
           continue;
         }
 
-        // Insert a get_local.
+        // Insert a local.get.
         unsigned LocalId = getLocalId(Reg2Local, CurLocal, OldReg);
         const TargetRegisterClass *RC = MRI.getRegClass(OldReg);
         unsigned NewReg = MRI.createVirtualRegister(RC);
diff --git a/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
index 00e37a4..3856700 100644
--- a/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
@@ -137,12 +137,15 @@
     case MVT::v16i8:
     case MVT::v8i16:
     case MVT::v4i32:
-    case MVT::v2i64:
     case MVT::v4f32:
-    case MVT::v2f64:
       if (Subtarget->hasSIMD128())
         return VT;
       break;
+    case MVT::v2i64:
+    case MVT::v2f64:
+      if (Subtarget->hasUnimplementedSIMD128())
+        return VT;
+      break;
     default:
       break;
     }
@@ -440,13 +443,11 @@
 
   switch (From) {
   case MVT::i1:
-    // If the value is naturally an i1, we don't need to mask it.
-    // TODO: Recursively examine selects, phis, and, or, xor, constants.
-    if (From == MVT::i1 && V != nullptr) {
-      if (isa<CmpInst>(V) ||
-          (isa<Argument>(V) && cast<Argument>(V)->hasZExtAttr()))
-        return copyValue(Reg);
-    }
+    // If the value is naturally an i1, we don't need to mask it. We only know
+    // if a value is naturally an i1 if it is definitely lowered by FastISel,
+    // not a DAG ISel fallback.
+    if (V != nullptr && isa<Argument>(V) && cast<Argument>(V)->hasZExtAttr())
+      return copyValue(Reg);
     break;
   case MVT::i8:
   case MVT::i16:
diff --git a/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp b/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
index 0644f12..1a41652 100644
--- a/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
@@ -228,6 +228,8 @@
 }
 
 bool FixFunctionBitcasts::runOnModule(Module &M) {
+  LLVM_DEBUG(dbgs() << "********** Fix Function Bitcasts **********\n");
+
   Function *Main = nullptr;
   CallInst *CallMain = nullptr;
   SmallVector<std::pair<Use *, Function *>, 0> Uses;
diff --git a/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp b/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
index bea027b..108f287 100644
--- a/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
@@ -8,8 +8,8 @@
 //===----------------------------------------------------------------------===//
 ///
 /// \file
-/// This file implements a pass that transforms irreducible control flow
-/// into reducible control flow. Irreducible control flow means multiple-entry
+/// This file implements a pass that transforms irreducible control flow into
+/// reducible control flow. Irreducible control flow means multiple-entry
 /// loops; they appear as CFG cycles that are not recorded in MachineLoopInfo
 /// due to being unnatural.
 ///
@@ -17,12 +17,36 @@
 /// it linearizes control flow, turning diamonds into two triangles, which is
 /// both unnecessary and undesirable for WebAssembly.
 ///
-/// TODO: The transformation implemented here handles all irreducible control
-/// flow, without exponential code-size expansion, though it does so by creating
-/// inefficient code in many cases. Ideally, we should add other
-/// transformations, including code-duplicating cases, which can be more
-/// efficient in common cases, and they can fall back to this conservative
-/// implementation as needed.
+/// The big picture: Ignoring natural loops (seeing them monolithically), we
+/// find all the blocks which can return to themselves ("loopers"). Loopers
+/// reachable from the non-loopers are loop entries: if there are 2 or more,
+/// then we have irreducible control flow. We fix that as follows: a new block
+/// is created that can dispatch to each of the loop entries, based on the
+/// value of a label "helper" variable, and we replace direct branches to the
+/// entries with assignments to the label variable and a branch to the dispatch
+/// block. Then the dispatch block is the single entry in a new natural loop.
+///
+/// This is similar to what the Relooper [1] does, both identify looping code
+/// that requires multiple entries, and resolve it in a similar way. In
+/// Relooper terminology, we implement a Multiple shape in a Loop shape. Note
+/// also that like the Relooper, we implement a "minimal" intervention: we only
+/// use the "label" helper for the blocks we absolutely must and no others. We
+/// also prioritize code size and do not perform node splitting (i.e. we don't
+/// duplicate code in order to resolve irreducibility).
+///
+/// The difference between this code and the Relooper is that the Relooper also
+/// generates ifs and loops and works in a recursive manner, knowing at each
+/// point what the entries are, and recursively breaks down the problem. Here
+/// we just want to resolve irreducible control flow, and we also want to use
+/// as much LLVM infrastructure as possible. So we use the MachineLoopInfo to
+/// identify natural loops, etc., and we start with the whole CFG and must
+/// identify both the looping code and its entries.
+///
+/// [1] Alon Zakai. 2011. Emscripten: an LLVM-to-JavaScript compiler. In
+/// Proceedings of the ACM international conference companion on Object oriented
+/// programming systems languages and applications companion (SPLASH '11). ACM,
+/// New York, NY, USA, 301-312. DOI=10.1145/2048147.2048224
+/// http://doi.acm.org/10.1145/2048147.2048224
 ///
 //===----------------------------------------------------------------------===//
 
@@ -46,141 +70,203 @@
 #define DEBUG_TYPE "wasm-fix-irreducible-control-flow"
 
 namespace {
-class WebAssemblyFixIrreducibleControlFlow final : public MachineFunctionPass {
-  StringRef getPassName() const override {
-    return "WebAssembly Fix Irreducible Control Flow";
-  }
 
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.setPreservesCFG();
-    AU.addRequired<MachineDominatorTree>();
-    AU.addPreserved<MachineDominatorTree>();
-    AU.addRequired<MachineLoopInfo>();
-    AU.addPreserved<MachineLoopInfo>();
-    MachineFunctionPass::getAnalysisUsage(AU);
-  }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  bool VisitLoop(MachineFunction &MF, MachineLoopInfo &MLI, MachineLoop *Loop);
-
+class LoopFixer {
 public:
-  static char ID; // Pass identification, replacement for typeid
-  WebAssemblyFixIrreducibleControlFlow() : MachineFunctionPass(ID) {}
-};
-} // end anonymous namespace
+  LoopFixer(MachineFunction &MF, MachineLoopInfo &MLI, MachineLoop *Loop)
+      : MF(MF), MLI(MLI), Loop(Loop) {}
 
-char WebAssemblyFixIrreducibleControlFlow::ID = 0;
-INITIALIZE_PASS(WebAssemblyFixIrreducibleControlFlow, DEBUG_TYPE,
-                "Removes irreducible control flow", false, false)
+  // Run the fixer on the given inputs. Returns whether changes were made.
+  bool run();
 
-FunctionPass *llvm::createWebAssemblyFixIrreducibleControlFlow() {
-  return new WebAssemblyFixIrreducibleControlFlow();
-}
+private:
+  MachineFunction &MF;
+  MachineLoopInfo &MLI;
+  MachineLoop *Loop;
 
-namespace {
+  MachineBasicBlock *Header;
+  SmallPtrSet<MachineBasicBlock *, 4> LoopBlocks;
 
-/// A utility for walking the blocks of a loop, handling a nested inner
-/// loop as a monolithic conceptual block.
-class MetaBlock {
-  MachineBasicBlock *Block;
-  SmallVector<MachineBasicBlock *, 2> Preds;
-  SmallVector<MachineBasicBlock *, 2> Succs;
+  using BlockSet = SmallPtrSet<MachineBasicBlock *, 4>;
+  DenseMap<MachineBasicBlock *, BlockSet> Reachable;
 
-public:
-  explicit MetaBlock(MachineBasicBlock *MBB)
-      : Block(MBB), Preds(MBB->pred_begin(), MBB->pred_end()),
-        Succs(MBB->succ_begin(), MBB->succ_end()) {}
+  // The worklist contains pairs of recent additions, (a, b), where we just
+  // added a link a => b.
+  using BlockPair = std::pair<MachineBasicBlock *, MachineBasicBlock *>;
+  SmallVector<BlockPair, 4> WorkList;
 
-  explicit MetaBlock(MachineLoop *Loop) : Block(Loop->getHeader()) {
-    Loop->getExitBlocks(Succs);
-    for (MachineBasicBlock *Pred : Block->predecessors())
-      if (!Loop->contains(Pred))
-        Preds.push_back(Pred);
-  }
-
-  MachineBasicBlock *getBlock() const { return Block; }
-
-  const SmallVectorImpl<MachineBasicBlock *> &predecessors() const {
-    return Preds;
-  }
-  const SmallVectorImpl<MachineBasicBlock *> &successors() const {
-    return Succs;
-  }
-
-  bool operator==(const MetaBlock &MBB) { return Block == MBB.Block; }
-  bool operator!=(const MetaBlock &MBB) { return Block != MBB.Block; }
-};
-
-class SuccessorList final : public MetaBlock {
-  size_t Index;
-  size_t Num;
-
-public:
-  explicit SuccessorList(MachineBasicBlock *MBB)
-      : MetaBlock(MBB), Index(0), Num(successors().size()) {}
-
-  explicit SuccessorList(MachineLoop *Loop)
-      : MetaBlock(Loop), Index(0), Num(successors().size()) {}
-
-  bool HasNext() const { return Index != Num; }
-
-  MachineBasicBlock *Next() {
-    assert(HasNext());
-    return successors()[Index++];
-  }
-};
-
-} // end anonymous namespace
-
-bool WebAssemblyFixIrreducibleControlFlow::VisitLoop(MachineFunction &MF,
-                                                     MachineLoopInfo &MLI,
-                                                     MachineLoop *Loop) {
-  MachineBasicBlock *Header = Loop ? Loop->getHeader() : &*MF.begin();
-  SetVector<MachineBasicBlock *> RewriteSuccs;
-
-  // DFS through Loop's body, looking for irreducible control flow. Loop is
-  // natural, and we stay in its body, and we treat any nested loops
-  // monolithically, so any cycles we encounter indicate irreducibility.
-  SmallPtrSet<MachineBasicBlock *, 8> OnStack;
-  SmallPtrSet<MachineBasicBlock *, 8> Visited;
-  SmallVector<SuccessorList, 4> LoopWorklist;
-  LoopWorklist.push_back(SuccessorList(Header));
-  OnStack.insert(Header);
-  Visited.insert(Header);
-  while (!LoopWorklist.empty()) {
-    SuccessorList &Top = LoopWorklist.back();
-    if (Top.HasNext()) {
-      MachineBasicBlock *Next = Top.Next();
-      if (Next == Header || (Loop && !Loop->contains(Next)))
-        continue;
-      if (LLVM_LIKELY(OnStack.insert(Next).second)) {
-        if (!Visited.insert(Next).second) {
-          OnStack.erase(Next);
-          continue;
-        }
-        MachineLoop *InnerLoop = MLI.getLoopFor(Next);
-        if (InnerLoop != Loop)
-          LoopWorklist.push_back(SuccessorList(InnerLoop));
-        else
-          LoopWorklist.push_back(SuccessorList(Next));
-      } else {
-        RewriteSuccs.insert(Top.getBlock());
+  // Get a canonical block to represent a block or a loop: the block, or if in
+  // an inner loop, the loop header, of it in an outer loop scope, we can
+  // ignore it. We need to call this on all blocks we work on.
+  MachineBasicBlock *canonicalize(MachineBasicBlock *MBB) {
+    MachineLoop *InnerLoop = MLI.getLoopFor(MBB);
+    if (InnerLoop == Loop) {
+      return MBB;
+    } else {
+      // This is either in an outer or an inner loop, and not in ours.
+      if (!LoopBlocks.count(MBB)) {
+        // It's in outer code, ignore it.
+        return nullptr;
       }
-      continue;
+      assert(InnerLoop);
+      // It's in an inner loop, canonicalize it to the header of that loop.
+      return InnerLoop->getHeader();
     }
-    OnStack.erase(Top.getBlock());
-    LoopWorklist.pop_back();
   }
 
-  // Most likely, we didn't find any irreducible control flow.
-  if (LLVM_LIKELY(RewriteSuccs.empty()))
+  // For a successor we can additionally ignore it if it's a branch back to a
+  // natural loop top, as when we are in the scope of a loop, we just care
+  // about internal irreducibility, and can ignore the loop we are in. We need
+  // to call this on all blocks in a context where they are a successor.
+  MachineBasicBlock *canonicalizeSuccessor(MachineBasicBlock *MBB) {
+    if (Loop && MBB == Loop->getHeader()) {
+      // Ignore branches going to the loop's natural header.
+      return nullptr;
+    }
+    return canonicalize(MBB);
+  }
+
+  // Potentially insert a new reachable edge, and if so, note it as further
+  // work.
+  void maybeInsert(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
+    assert(MBB == canonicalize(MBB));
+    assert(Succ);
+    // Succ may not be interesting as a sucessor.
+    Succ = canonicalizeSuccessor(Succ);
+    if (!Succ)
+      return;
+    if (Reachable[MBB].insert(Succ).second) {
+      // For there to be further work, it means that we have
+      //   X => MBB => Succ
+      // for some other X, and in that case X => Succ would be a new edge for
+      // us to discover later. However, if we don't care about MBB as a
+      // successor, then we don't care about that anyhow.
+      if (canonicalizeSuccessor(MBB)) {
+        WorkList.emplace_back(MBB, Succ);
+      }
+    }
+  }
+};
+
+bool LoopFixer::run() {
+  Header = Loop ? Loop->getHeader() : &*MF.begin();
+
+  // Identify all the blocks in this loop scope.
+  if (Loop) {
+    for (auto *MBB : Loop->getBlocks()) {
+      LoopBlocks.insert(MBB);
+    }
+  } else {
+    for (auto &MBB : MF) {
+      LoopBlocks.insert(&MBB);
+    }
+  }
+
+  // Compute which (canonicalized) blocks each block can reach.
+
+  // Add all the initial work.
+  for (auto *MBB : LoopBlocks) {
+    MachineLoop *InnerLoop = MLI.getLoopFor(MBB);
+
+    if (InnerLoop == Loop) {
+      for (auto *Succ : MBB->successors()) {
+        maybeInsert(MBB, Succ);
+      }
+    } else {
+      // It can't be in an outer loop - we loop on LoopBlocks - and so it must
+      // be an inner loop.
+      assert(InnerLoop);
+      // Check if we are the canonical block for this loop.
+      if (canonicalize(MBB) != MBB) {
+        continue;
+      }
+      // The successors are those of the loop.
+      SmallVector<MachineBasicBlock *, 2> ExitBlocks;
+      InnerLoop->getExitBlocks(ExitBlocks);
+      for (auto *Succ : ExitBlocks) {
+        maybeInsert(MBB, Succ);
+      }
+    }
+  }
+
+  // Do work until we are all done.
+  while (!WorkList.empty()) {
+    MachineBasicBlock *MBB;
+    MachineBasicBlock *Succ;
+    std::tie(MBB, Succ) = WorkList.pop_back_val();
+    // The worklist item is an edge we just added, so it must have valid blocks
+    // (and not something canonicalized to nullptr).
+    assert(MBB);
+    assert(Succ);
+    // The successor in that pair must also be a valid successor.
+    assert(MBB == canonicalizeSuccessor(MBB));
+    // We recently added MBB => Succ, and that means we may have enabled
+    // Pred => MBB => Succ. Check all the predecessors. Note that our loop here
+    // is correct for both a block and a block representing a loop, as the loop
+    // is natural and so the predecessors are all predecessors of the loop
+    // header, which is the block we have here.
+    for (auto *Pred : MBB->predecessors()) {
+      // Canonicalize, make sure it's relevant, and check it's not the same
+      // block (an update to the block itself doesn't help compute that same
+      // block).
+      Pred = canonicalize(Pred);
+      if (Pred && Pred != MBB) {
+        maybeInsert(Pred, Succ);
+      }
+    }
+  }
+
+  // It's now trivial to identify the loopers.
+  SmallPtrSet<MachineBasicBlock *, 4> Loopers;
+  for (auto MBB : LoopBlocks) {
+    if (Reachable[MBB].count(MBB)) {
+      Loopers.insert(MBB);
+    }
+  }
+  // The header cannot be a looper. At the toplevel, LLVM does not allow the
+  // entry to be in a loop, and in a natural loop we should ignore the header.
+  assert(Loopers.count(Header) == 0);
+
+  // Find the entries, loopers reachable from non-loopers.
+  SmallPtrSet<MachineBasicBlock *, 4> Entries;
+  SmallVector<MachineBasicBlock *, 4> SortedEntries;
+  for (auto *Looper : Loopers) {
+    for (auto *Pred : Looper->predecessors()) {
+      Pred = canonicalize(Pred);
+      if (Pred && !Loopers.count(Pred)) {
+        Entries.insert(Looper);
+        SortedEntries.push_back(Looper);
+        break;
+      }
+    }
+  }
+
+  // Check if we found irreducible control flow.
+  if (LLVM_LIKELY(Entries.size() <= 1))
     return false;
 
-  LLVM_DEBUG(dbgs() << "Irreducible control flow detected!\n");
+  // Sort the entries to ensure a deterministic build.
+  llvm::sort(SortedEntries,
+             [&](const MachineBasicBlock *A, const MachineBasicBlock *B) {
+               auto ANum = A->getNumber();
+               auto BNum = B->getNumber();
+               return ANum < BNum;
+             });
 
-  // Ok. We have irreducible control flow! Create a dispatch block which will
-  // contains a jump table to any block in the problematic set of blocks.
+#ifndef NDEBUG
+  for (auto Block : SortedEntries)
+    assert(Block->getNumber() != -1);
+  if (SortedEntries.size() > 1) {
+    for (auto I = SortedEntries.begin(), E = SortedEntries.end() - 1;
+         I != E; ++I) {
+      auto ANum = (*I)->getNumber();
+      auto BNum = (*(std::next(I)))->getNumber();
+      assert(ANum != BNum);
+    }
+  }
+#endif
+
+  // Create a dispatch block which will contain a jump table to the entries.
   MachineBasicBlock *Dispatch = MF.CreateMachineBasicBlock();
   MF.insert(MF.end(), Dispatch);
   MLI.changeLoopFor(Dispatch, Loop);
@@ -196,43 +282,43 @@
   unsigned Reg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
   MIB.addReg(Reg);
 
-  // Collect all the blocks which need to have their successors rewritten,
-  // add the successors to the jump table, and remember their index.
+  // Compute the indices in the superheader, one for each bad block, and
+  // add them as successors.
   DenseMap<MachineBasicBlock *, unsigned> Indices;
-  SmallVector<MachineBasicBlock *, 4> SuccWorklist(RewriteSuccs.begin(),
-                                                   RewriteSuccs.end());
-  while (!SuccWorklist.empty()) {
-    MachineBasicBlock *MBB = SuccWorklist.pop_back_val();
+  for (auto *MBB : SortedEntries) {
     auto Pair = Indices.insert(std::make_pair(MBB, 0));
-    if (!Pair.second)
+    if (!Pair.second) {
       continue;
+    }
 
     unsigned Index = MIB.getInstr()->getNumExplicitOperands() - 1;
-    LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " has index " << Index
-                      << "\n");
-
     Pair.first->second = Index;
-    for (auto Pred : MBB->predecessors())
-      RewriteSuccs.insert(Pred);
 
     MIB.addMBB(MBB);
     Dispatch->addSuccessor(MBB);
-
-    MetaBlock Meta(MBB);
-    for (auto *Succ : Meta.successors())
-      if (Succ != Header && (!Loop || Loop->contains(Succ)))
-        SuccWorklist.push_back(Succ);
   }
 
-  // Rewrite the problematic successors for every block in RewriteSuccs.
-  // For simplicity, we just introduce a new block for every edge we need to
-  // rewrite. Fancier things are possible.
-  for (MachineBasicBlock *MBB : RewriteSuccs) {
+  // Rewrite the problematic successors for every block that wants to reach the
+  // bad blocks. For simplicity, we just introduce a new block for every edge
+  // we need to rewrite. (Fancier things are possible.)
+
+  SmallVector<MachineBasicBlock *, 4> AllPreds;
+  for (auto *MBB : SortedEntries) {
+    for (auto *Pred : MBB->predecessors()) {
+      if (Pred != Dispatch) {
+        AllPreds.push_back(Pred);
+      }
+    }
+  }
+
+  for (MachineBasicBlock *MBB : AllPreds) {
     DenseMap<MachineBasicBlock *, MachineBasicBlock *> Map;
     for (auto *Succ : MBB->successors()) {
-      if (!Indices.count(Succ))
+      if (!Entries.count(Succ)) {
         continue;
+      }
 
+      // This is a successor we need to rewrite.
       MachineBasicBlock *Split = MF.CreateMachineBasicBlock();
       MF.insert(MBB->isLayoutSuccessor(Succ) ? MachineFunction::iterator(Succ)
                                              : MF.end(),
@@ -266,6 +352,55 @@
   return true;
 }
 
+class WebAssemblyFixIrreducibleControlFlow final : public MachineFunctionPass {
+  StringRef getPassName() const override {
+    return "WebAssembly Fix Irreducible Control Flow";
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesCFG();
+    AU.addRequired<MachineDominatorTree>();
+    AU.addPreserved<MachineDominatorTree>();
+    AU.addRequired<MachineLoopInfo>();
+    AU.addPreserved<MachineLoopInfo>();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  bool runIteration(MachineFunction &MF, MachineLoopInfo &MLI) {
+    // Visit the function body, which is identified as a null loop.
+    if (LoopFixer(MF, MLI, nullptr).run()) {
+      return true;
+    }
+
+    // Visit all the loops.
+    SmallVector<MachineLoop *, 8> Worklist(MLI.begin(), MLI.end());
+    while (!Worklist.empty()) {
+      MachineLoop *Loop = Worklist.pop_back_val();
+      Worklist.append(Loop->begin(), Loop->end());
+      if (LoopFixer(MF, MLI, Loop).run()) {
+        return true;
+      }
+    }
+
+    return false;
+  }
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+  WebAssemblyFixIrreducibleControlFlow() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyFixIrreducibleControlFlow::ID = 0;
+INITIALIZE_PASS(WebAssemblyFixIrreducibleControlFlow, DEBUG_TYPE,
+                "Removes irreducible control flow", false, false)
+
+FunctionPass *llvm::createWebAssemblyFixIrreducibleControlFlow() {
+  return new WebAssemblyFixIrreducibleControlFlow();
+}
+
 bool WebAssemblyFixIrreducibleControlFlow::runOnMachineFunction(
     MachineFunction &MF) {
   LLVM_DEBUG(dbgs() << "********** Fixing Irreducible Control Flow **********\n"
@@ -275,24 +410,19 @@
   bool Changed = false;
   auto &MLI = getAnalysis<MachineLoopInfo>();
 
-  // Visit the function body, which is identified as a null loop.
-  Changed |= VisitLoop(MF, MLI, nullptr);
-
-  // Visit all the loops.
-  SmallVector<MachineLoop *, 8> Worklist(MLI.begin(), MLI.end());
-  while (!Worklist.empty()) {
-    MachineLoop *CurLoop = Worklist.pop_back_val();
-    Worklist.append(CurLoop->begin(), CurLoop->end());
-    Changed |= VisitLoop(MF, MLI, CurLoop);
-  }
-
-  // If we made any changes, completely recompute everything.
-  if (LLVM_UNLIKELY(Changed)) {
-    LLVM_DEBUG(dbgs() << "Recomputing dominators and loops.\n");
+  // When we modify something, bail out and recompute MLI, then start again, as
+  // we create a new natural loop when we resolve irreducible control flow, and
+  // other loops may become nested in it, etc. In practice this is not an issue
+  // because irreducible control flow is rare, only very few cycles are needed
+  // here.
+  while (LLVM_UNLIKELY(runIteration(MF, MLI))) {
+    // We rewrote part of the function; recompute MLI and start again.
+    LLVM_DEBUG(dbgs() << "Recomputing loops.\n");
     MF.getRegInfo().invalidateLiveness();
     MF.RenumberBlocks();
     getAnalysis<MachineDominatorTree>().runOnMachineFunction(MF);
     MLI.runOnMachineFunction(MF);
+    Changed = true;
   }
 
   return Changed;
diff --git a/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp b/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp
index 9db3430..2d5aff2 100644
--- a/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp
@@ -130,7 +130,7 @@
 
   const char *ES = "__stack_pointer";
   auto *SPSymbol = MF.createExternalSymbolName(ES);
-  BuildMI(MBB, InsertStore, DL, TII->get(WebAssembly::SET_GLOBAL_I32))
+  BuildMI(MBB, InsertStore, DL, TII->get(WebAssembly::GLOBAL_SET_I32))
       .addExternalSymbol(SPSymbol, WebAssemblyII::MO_SYMBOL_GLOBAL)
       .addReg(SrcReg);
 }
@@ -177,7 +177,7 @@
 
   const char *ES = "__stack_pointer";
   auto *SPSymbol = MF.createExternalSymbolName(ES);
-  BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::GET_GLOBAL_I32), SPReg)
+  BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::GLOBAL_GET_I32), SPReg)
       .addExternalSymbol(SPSymbol, WebAssemblyII::MO_SYMBOL_GLOBAL);
 
   bool HasBP = hasBP(MF);
diff --git a/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp b/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
index fdf3a30..0a7464c 100644
--- a/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
@@ -48,6 +48,10 @@
   }
 
   bool runOnMachineFunction(MachineFunction &MF) override {
+    LLVM_DEBUG(dbgs() << "********** ISelDAGToDAG **********\n"
+                         "********** Function: "
+                      << MF.getName() << '\n');
+
     ForCodeSize = MF.getFunction().hasFnAttribute(Attribute::OptimizeForSize) ||
                   MF.getFunction().hasFnAttribute(Attribute::MinSize);
     Subtarget = &MF.getSubtarget<WebAssemblySubtarget>();
diff --git a/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index a038215..003848e 100644
--- a/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -37,12 +37,6 @@
 
 #define DEBUG_TYPE "wasm-lower"
 
-// Emit proposed instructions that may not have been implemented in engines
-cl::opt<bool> EnableUnimplementedWasmSIMDInstrs(
-    "wasm-enable-unimplemented-simd",
-    cl::desc("Emit potentially-unimplemented WebAssembly SIMD instructions"),
-    cl::init(false));
-
 WebAssemblyTargetLowering::WebAssemblyTargetLowering(
     const TargetMachine &TM, const WebAssemblySubtarget &STI)
     : TargetLowering(TM), Subtarget(&STI) {
@@ -70,7 +64,7 @@
     addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
     addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
     addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
-    if (EnableUnimplementedWasmSIMDInstrs) {
+    if (Subtarget->hasUnimplementedSIMD128()) {
       addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
       addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
     }
@@ -135,7 +129,7 @@
       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) {
         setOperationAction(Op, T, Expand);
       }
-      if (EnableUnimplementedWasmSIMDInstrs) {
+      if (Subtarget->hasUnimplementedSIMD128()) {
         setOperationAction(Op, MVT::v2i64, Expand);
       }
     }
@@ -149,7 +143,7 @@
     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) {
       setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
     }
-    if (EnableUnimplementedWasmSIMDInstrs) {
+    if (Subtarget->hasUnimplementedSIMD128()) {
       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
     }
@@ -160,7 +154,7 @@
     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
       for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
         setOperationAction(Op, T, Custom);
-    if (EnableUnimplementedWasmSIMDInstrs)
+    if (Subtarget->hasUnimplementedSIMD128())
       for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
         setOperationAction(Op, MVT::v2i64, Custom);
   }
@@ -170,7 +164,7 @@
     for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT}) {
       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
         setOperationAction(Op, T, Expand);
-      if (EnableUnimplementedWasmSIMDInstrs)
+      if (Subtarget->hasUnimplementedSIMD128())
         for (auto T : {MVT::v2i64, MVT::v2f64})
           setOperationAction(Op, T, Expand);
     }
@@ -179,8 +173,10 @@
   // sign-extend from.
   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
   if (!Subtarget->hasSignExt()) {
+    // Sign extends are legal only when extending a vector extract
+    auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
     for (auto T : {MVT::i8, MVT::i16, MVT::i32})
-      setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
+      setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
   }
   for (auto T : MVT::integer_vector_valuetypes())
     setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
@@ -224,13 +220,19 @@
     }
   }
 
+  // Expand additional SIMD ops that V8 hasn't implemented yet
+  if (Subtarget->hasSIMD128() && !Subtarget->hasUnimplementedSIMD128()) {
+    setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
+    setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
+  }
+
   // Custom lower lane accesses to expand out variable indices
   if (Subtarget->hasSIMD128()) {
     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) {
       setOperationAction(ISD::EXTRACT_VECTOR_ELT, T, Custom);
       setOperationAction(ISD::INSERT_VECTOR_ELT, T, Custom);
     }
-    if (EnableUnimplementedWasmSIMDInstrs) {
+    if (Subtarget->hasUnimplementedSIMD128()) {
       for (auto T : {MVT::v2i64, MVT::v2f64}) {
         setOperationAction(ISD::EXTRACT_VECTOR_ELT, T, Custom);
         setOperationAction(ISD::INSERT_VECTOR_ELT, T, Custom);
@@ -894,6 +896,8 @@
     return LowerAccessVectorElement(Op, DAG);
   case ISD::INTRINSIC_VOID:
     return LowerINTRINSIC_VOID(Op, DAG);
+  case ISD::SIGN_EXTEND_INREG:
+    return LowerSIGN_EXTEND_INREG(Op, DAG);
   case ISD::VECTOR_SHUFFLE:
     return LowerVECTOR_SHUFFLE(Op, DAG);
   case ISD::SHL:
@@ -911,7 +915,7 @@
     // the FI to some LEA-like instruction, but since we don't have that, we
     // need to insert some kind of instruction that can take an FI operand and
     // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
-    // copy_local between Op and its FI operand.
+    // local.copy between Op and its FI operand.
     SDValue Chain = Op.getOperand(0);
     SDLoc DL(Op);
     unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
@@ -1096,6 +1100,22 @@
 }
 
 SDValue
+WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
+                                                  SelectionDAG &DAG) const {
+  // If sign extension operations are disabled, allow sext_inreg only if operand
+  // is a vector extract. SIMD does not depend on sign extension operations, but
+  // allowing sext_inreg in this context lets us have simple patterns to select
+  // extract_lane_s instructions. Expanding sext_inreg everywhere would be
+  // simpler in this file, but would necessitate large and brittle patterns to
+  // undo the expansion and select extract_lane_s instructions.
+  assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
+  if (Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT)
+    return Op;
+  // Otherwise expand
+  return SDValue();
+}
+
+SDValue
 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
                                                SelectionDAG &DAG) const {
   SDLoc DL(Op);
@@ -1135,6 +1155,31 @@
     return SDValue();
 }
 
+static SDValue UnrollVectorShift(SDValue Op, SelectionDAG &DAG) {
+  EVT LaneT = Op.getSimpleValueType().getVectorElementType();
+  // 32-bit and 64-bit unrolled shifts will have proper semantics
+  if (LaneT.bitsGE(MVT::i32))
+    return DAG.UnrollVectorOp(Op.getNode());
+  // Otherwise mask the shift value to get proper semantics from 32-bit shift
+  SDLoc DL(Op);
+  SDValue ShiftVal = Op.getOperand(1);
+  uint64_t MaskVal = LaneT.getSizeInBits() - 1;
+  SDValue MaskedShiftVal = DAG.getNode(
+      ISD::AND,                    // mask opcode
+      DL, ShiftVal.getValueType(), // masked value type
+      ShiftVal,                    // original shift value operand
+      DAG.getConstant(MaskVal, DL, ShiftVal.getValueType()) // mask operand
+  );
+
+  return DAG.UnrollVectorOp(
+      DAG.getNode(Op.getOpcode(),        // original shift opcode
+                  DL, Op.getValueType(), // original return type
+                  Op.getOperand(0),      // original vector operand,
+                  MaskedShiftVal         // new masked shift value operand
+                  )
+          .getNode());
+}
+
 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
                                               SelectionDAG &DAG) const {
   SDLoc DL(Op);
@@ -1142,12 +1187,17 @@
   // Only manually lower vector shifts
   assert(Op.getSimpleValueType().isVector());
 
+  // Expand all vector shifts until V8 fixes its implementation
+  // TODO: remove this once V8 is fixed
+  if (!Subtarget->hasUnimplementedSIMD128())
+    return UnrollVectorShift(Op, DAG);
+
   // Unroll non-splat vector shifts
   BuildVectorSDNode *ShiftVec;
   SDValue SplatVal;
   if (!(ShiftVec = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) ||
       !(SplatVal = ShiftVec->getSplatValue()))
-    return DAG.UnrollVectorOp(Op.getNode());
+    return UnrollVectorShift(Op, DAG);
 
   // All splats except i64x2 const splats are handled by patterns
   ConstantSDNode *SplatConst = dyn_cast<ConstantSDNode>(SplatVal);
diff --git a/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/lib/Target/WebAssembly/WebAssemblyISelLowering.h
index 8007681..59f4230 100644
--- a/lib/Target/WebAssembly/WebAssemblyISelLowering.h
+++ b/lib/Target/WebAssembly/WebAssemblyISelLowering.h
@@ -99,6 +99,7 @@
   SDValue LowerCopyToReg(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerAccessVectorElement(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td b/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
index f9d092e..5fb8ef9 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
+++ b/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
@@ -114,7 +114,7 @@
 def : LoadPatNoOffset<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>;
 def : LoadPatNoOffset<i64, sext_aload_8_64, ATOMIC_LOAD8_U_I64>;
 def : LoadPatNoOffset<i64, sext_aload_16_64, ATOMIC_LOAD16_U_I64>;
-// 32->64 sext load gets selected as i32.atomic.load, i64.extend_s/i32
+// 32->64 sext load gets selected as i32.atomic.load, i64.extend_i32_s
 
 // Zero-extending loads with constant offset
 def : LoadPatImmOff<i32, zext_aload_8_32, regPlusImm, ATOMIC_LOAD8_U_I32>;
@@ -344,82 +344,82 @@
 defm ATOMIC_RMW_ADD_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.add", 0xfe1e>;
 defm ATOMIC_RMW_ADD_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.add", 0xfe1f>;
 defm ATOMIC_RMW8_U_ADD_I32 :
-  WebAssemblyBinRMW<I32, "i32.atomic.rmw8_u.add", 0xfe20>;
+  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.add_u", 0xfe20>;
 defm ATOMIC_RMW16_U_ADD_I32 :
-  WebAssemblyBinRMW<I32, "i32.atomic.rmw16_u.add", 0xfe21>;
+  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.add_u", 0xfe21>;
 defm ATOMIC_RMW8_U_ADD_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw8_u.add", 0xfe22>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.add_u", 0xfe22>;
 defm ATOMIC_RMW16_U_ADD_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw16_u.add", 0xfe23>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.add_u", 0xfe23>;
 defm ATOMIC_RMW32_U_ADD_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw32_u.add", 0xfe24>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.add_u", 0xfe24>;
 
 defm ATOMIC_RMW_SUB_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.sub", 0xfe25>;
 defm ATOMIC_RMW_SUB_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.sub", 0xfe26>;
 defm ATOMIC_RMW8_U_SUB_I32 :
-  WebAssemblyBinRMW<I32, "i32.atomic.rmw8_u.sub", 0xfe27>;
+  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.sub_u", 0xfe27>;
 defm ATOMIC_RMW16_U_SUB_I32 :
-  WebAssemblyBinRMW<I32, "i32.atomic.rmw16_u.sub", 0xfe28>;
+  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.sub_u", 0xfe28>;
 defm ATOMIC_RMW8_U_SUB_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw8_u.sub", 0xfe29>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.sub_u", 0xfe29>;
 defm ATOMIC_RMW16_U_SUB_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw16_u.sub", 0xfe2a>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.sub_u", 0xfe2a>;
 defm ATOMIC_RMW32_U_SUB_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw32_u.sub", 0xfe2b>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.sub_u", 0xfe2b>;
 
 defm ATOMIC_RMW_AND_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.and", 0xfe2c>;
 defm ATOMIC_RMW_AND_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.and", 0xfe2d>;
 defm ATOMIC_RMW8_U_AND_I32 :
-  WebAssemblyBinRMW<I32, "i32.atomic.rmw8_u.and", 0xfe2e>;
+  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.and_u", 0xfe2e>;
 defm ATOMIC_RMW16_U_AND_I32 :
-  WebAssemblyBinRMW<I32, "i32.atomic.rmw16_u.and", 0xfe2f>;
+  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.and_u", 0xfe2f>;
 defm ATOMIC_RMW8_U_AND_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw8_u.and", 0xfe30>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.and_u", 0xfe30>;
 defm ATOMIC_RMW16_U_AND_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw16_u.and", 0xfe31>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.and_u", 0xfe31>;
 defm ATOMIC_RMW32_U_AND_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw32_u.and", 0xfe32>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.and_u", 0xfe32>;
 
 defm ATOMIC_RMW_OR_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.or", 0xfe33>;
 defm ATOMIC_RMW_OR_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.or", 0xfe34>;
 defm ATOMIC_RMW8_U_OR_I32 :
-  WebAssemblyBinRMW<I32, "i32.atomic.rmw8_u.or", 0xfe35>;
+  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.or_u", 0xfe35>;
 defm ATOMIC_RMW16_U_OR_I32 :
-  WebAssemblyBinRMW<I32, "i32.atomic.rmw16_u.or", 0xfe36>;
+  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.or_u", 0xfe36>;
 defm ATOMIC_RMW8_U_OR_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw8_u.or", 0xfe37>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.or_u", 0xfe37>;
 defm ATOMIC_RMW16_U_OR_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw16_u.or", 0xfe38>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.or_u", 0xfe38>;
 defm ATOMIC_RMW32_U_OR_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw32_u.or", 0xfe39>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.or_u", 0xfe39>;
 
 defm ATOMIC_RMW_XOR_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.xor", 0xfe3a>;
 defm ATOMIC_RMW_XOR_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.xor", 0xfe3b>;
 defm ATOMIC_RMW8_U_XOR_I32 :
-  WebAssemblyBinRMW<I32, "i32.atomic.rmw8_u.xor", 0xfe3c>;
+  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.xor_u", 0xfe3c>;
 defm ATOMIC_RMW16_U_XOR_I32 :
-  WebAssemblyBinRMW<I32, "i32.atomic.rmw16_u.xor", 0xfe3d>;
+  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.xor_u", 0xfe3d>;
 defm ATOMIC_RMW8_U_XOR_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw8_u.xor", 0xfe3e>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.xor_u", 0xfe3e>;
 defm ATOMIC_RMW16_U_XOR_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw16_u.xor", 0xfe3f>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.xor_u", 0xfe3f>;
 defm ATOMIC_RMW32_U_XOR_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw32_u.xor", 0xfe40>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.xor_u", 0xfe40>;
 
 defm ATOMIC_RMW_XCHG_I32 :
   WebAssemblyBinRMW<I32, "i32.atomic.rmw.xchg", 0xfe41>;
 defm ATOMIC_RMW_XCHG_I64 :
   WebAssemblyBinRMW<I64, "i64.atomic.rmw.xchg", 0xfe42>;
 defm ATOMIC_RMW8_U_XCHG_I32 :
-  WebAssemblyBinRMW<I32, "i32.atomic.rmw8_u.xchg", 0xfe43>;
+  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.xchg_u", 0xfe43>;
 defm ATOMIC_RMW16_U_XCHG_I32 :
-  WebAssemblyBinRMW<I32, "i32.atomic.rmw16_u.xchg", 0xfe44>;
+  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.xchg_u", 0xfe44>;
 defm ATOMIC_RMW8_U_XCHG_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw8_u.xchg", 0xfe45>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.xchg_u", 0xfe45>;
 defm ATOMIC_RMW16_U_XCHG_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw16_u.xchg", 0xfe46>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.xchg_u", 0xfe46>;
 defm ATOMIC_RMW32_U_XCHG_I64 :
-  WebAssemblyBinRMW<I64, "i64.atomic.rmw32_u.xchg", 0xfe47>;
+  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.xchg_u", 0xfe47>;
 
 // Select binary RMWs with no constant offset.
 class BinRMWPatNoOffset<ValueType ty, PatFrag kind, NI inst> :
@@ -530,7 +530,7 @@
   PatFrag<(ops node:$addr, node:$val),
           (anyext (i32 (kind node:$addr, (i32 (trunc (i64 node:$val))))))>;
 class sext_bin_rmw_16_64<PatFrag kind> : sext_bin_rmw_8_64<kind>;
-// 32->64 sext RMW gets selected as i32.atomic.rmw.***, i64.extend_s/i32
+// 32->64 sext RMW gets selected as i32.atomic.rmw.***, i64.extend_i32_s
 
 // Patterns for various addressing modes for truncating-extending binary RMWs.
 multiclass BinRMWTruncExtPattern<
@@ -677,15 +677,15 @@
 defm ATOMIC_RMW_CMPXCHG_I64 :
   WebAssemblyTerRMW<I64, "i64.atomic.rmw.cmpxchg", 0xfe49>;
 defm ATOMIC_RMW8_U_CMPXCHG_I32 :
-  WebAssemblyTerRMW<I32, "i32.atomic.rmw8_u.cmpxchg", 0xfe4a>;
+  WebAssemblyTerRMW<I32, "i32.atomic.rmw8.cmpxchg_u", 0xfe4a>;
 defm ATOMIC_RMW16_U_CMPXCHG_I32 :
-  WebAssemblyTerRMW<I32, "i32.atomic.rmw16_u.cmpxchg", 0xfe4b>;
+  WebAssemblyTerRMW<I32, "i32.atomic.rmw16.cmpxchg_u", 0xfe4b>;
 defm ATOMIC_RMW8_U_CMPXCHG_I64 :
-  WebAssemblyTerRMW<I64, "i64.atomic.rmw8_u.cmpxchg", 0xfe4c>;
+  WebAssemblyTerRMW<I64, "i64.atomic.rmw8.cmpxchg_u", 0xfe4c>;
 defm ATOMIC_RMW16_U_CMPXCHG_I64 :
-  WebAssemblyTerRMW<I64, "i64.atomic.rmw16_u.cmpxchg", 0xfe4d>;
+  WebAssemblyTerRMW<I64, "i64.atomic.rmw16.cmpxchg_u", 0xfe4d>;
 defm ATOMIC_RMW32_U_CMPXCHG_I64 :
-  WebAssemblyTerRMW<I64, "i64.atomic.rmw32_u.cmpxchg", 0xfe4e>;
+  WebAssemblyTerRMW<I64, "i64.atomic.rmw32.cmpxchg_u", 0xfe4e>;
 
 // Select ternary RMWs with no constant offset.
 class TerRMWPatNoOffset<ValueType ty, PatFrag kind, NI inst> :
@@ -790,7 +790,7 @@
                   (i32 (trunc (i64 node:$exp))),
                   (i32 (trunc (i64 node:$new))))))))>;
 class sext_ter_rmw_16_64<PatFrag kind> : sext_ter_rmw_8_64<kind>;
-// 32->64 sext RMW gets selected as i32.atomic.rmw.***, i64.extend_s/i32
+// 32->64 sext RMW gets selected as i32.atomic.rmw.***, i64.extend_i32_s
 
 // Patterns for various addressing modes for truncating-extending ternary RMWs.
 multiclass TerRMWTruncExtPattern<
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrControl.td b/lib/Target/WebAssembly/WebAssemblyInstrControl.td
index 718bbfa..7eb6cbf 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrControl.td
+++ b/lib/Target/WebAssembly/WebAssemblyInstrControl.td
@@ -33,42 +33,32 @@
 def : Pat<(brcond (i32 (seteq I32:$cond, 0)), bb:$dst),
           (BR_UNLESS bb_op:$dst, I32:$cond)>;
 
+// A list of branch targets enclosed in {} and separated by comma.
+// Used by br_table only.
+def BrListAsmOperand : AsmOperandClass { let Name = "BrList"; }
+let OperandNamespace = "WebAssembly" in {
+let OperandType = "OPERAND_BRLIST" in {
+def brlist : Operand<i32> {
+  let ParserMatchClass = BrListAsmOperand;
+  let PrintMethod = "printBrList";
+}
+} // OPERAND_BRLIST
+} // OperandNamespace = "WebAssembly"
+
 // TODO: SelectionDAG's lowering insists on using a pointer as the index for
 // jump tables, so in practice we don't ever use BR_TABLE_I64 in wasm32 mode
 // currently.
-// Set TSFlags{0} to 1 to indicate that the variable_ops are immediates.
-// Set TSFlags{1} to 1 to indicate that the immediates represent labels.
-// FIXME: this can't inherit from I<> since there is no way to inherit from a
-// multiclass and still have the let statements.
 let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in {
-let isCodeGenOnly = 1 in
-def BR_TABLE_I32 : NI<(outs), (ins I32:$index, variable_ops),
-                      [(WebAssemblybr_table I32:$index)], "false",
-                      "br_table \t$index", 0x0e> {
-  let TSFlags{0} = 1;
-  let TSFlags{1} = 1;
-}
-let BaseName = "BR_TABLE_I32" in
-def BR_TABLE_I32_S : NI<(outs), (ins variable_ops),
-                        [], "true",
-                        "br_table \t", 0x0e> {
-  let TSFlags{0} = 1;
-  let TSFlags{1} = 1;
-}
-let isCodeGenOnly = 1 in
-def BR_TABLE_I64 : NI<(outs), (ins I64:$index, variable_ops),
-                      [(WebAssemblybr_table I64:$index)], "false",
-                      "br_table \t$index"> {
-  let TSFlags{0} = 1;
-  let TSFlags{1} = 1;
-}
-let BaseName = "BR_TABLE_I64" in
-def BR_TABLE_I64_S : NI<(outs), (ins variable_ops),
-                        [], "true",
-                        "br_table \t"> {
-  let TSFlags{0} = 1;
-  let TSFlags{1} = 1;
-}
+defm BR_TABLE_I32 : I<(outs), (ins I32:$index, variable_ops),
+                      (outs), (ins brlist:$brl),
+                      [(WebAssemblybr_table I32:$index)],
+                      "br_table \t$index", "br_table \t$brl",
+                      0x0e>;
+defm BR_TABLE_I64 : I<(outs), (ins I64:$index, variable_ops),
+                      (outs), (ins brlist:$brl),
+                      [(WebAssemblybr_table I64:$index)],
+                      "br_table \t$index", "br_table \t$brl",
+                      0x0e>;
 } // isTerminator = 1, hasCtrlDep = 1, isBarrier = 1
 
 // This is technically a control-flow instruction, since all it affects is the
@@ -79,13 +69,19 @@
 // These use/clobber VALUE_STACK to prevent them from being moved into the
 // middle of an expression tree.
 let Uses = [VALUE_STACK], Defs = [VALUE_STACK] in {
-defm BLOCK     : NRI<(outs), (ins Signature:$sig), [], "block   \t$sig", 0x02>;
-defm LOOP      : NRI<(outs), (ins Signature:$sig), [], "loop    \t$sig", 0x03>;
+defm BLOCK : NRI<(outs), (ins Signature:$sig), [], "block   \t$sig", 0x02>;
+defm LOOP  : NRI<(outs), (ins Signature:$sig), [], "loop    \t$sig", 0x03>;
 
-// END_BLOCK, END_LOOP, and END_FUNCTION are represented with the same opcode in
-// wasm.
+defm IF : I<(outs), (ins Signature:$sig, I32:$cond),
+            (outs), (ins Signature:$sig),
+            [], "if    \t$sig, $cond", "if    \t$sig", 0x04>;
+defm ELSE : NRI<(outs), (ins), [], "else", 0x05>;
+
+// END_BLOCK, END_LOOP, END_IF and END_FUNCTION are represented with the same
+// opcode in wasm.
 defm END_BLOCK : NRI<(outs), (ins), [], "end_block", 0x0b>;
 defm END_LOOP  : NRI<(outs), (ins), [], "end_loop", 0x0b>;
+defm END_IF    : NRI<(outs), (ins), [], "end_if", 0x0b>;
 let isTerminator = 1, isBarrier = 1 in
 defm END_FUNCTION : NRI<(outs), (ins), [], "end_function", 0x0b>;
 } // Uses = [VALUE_STACK], Defs = [VALUE_STACK]
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrConv.td b/lib/Target/WebAssembly/WebAssemblyInstrConv.td
index 0d772c7..e128656 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrConv.td
+++ b/lib/Target/WebAssembly/WebAssemblyInstrConv.td
@@ -15,15 +15,15 @@
 
 defm I32_WRAP_I64 : I<(outs I32:$dst), (ins I64:$src), (outs), (ins),
                       [(set I32:$dst, (trunc I64:$src))],
-                      "i32.wrap/i64\t$dst, $src", "i32.wrap/i64", 0xa7>;
+                      "i32.wrap_i64\t$dst, $src", "i32.wrap_i64", 0xa7>;
 
 defm I64_EXTEND_S_I32 : I<(outs I64:$dst), (ins I32:$src), (outs), (ins),
                           [(set I64:$dst, (sext I32:$src))],
-                          "i64.extend_s/i32\t$dst, $src", "i64.extend_s/i32",
+                          "i64.extend_i32_s\t$dst, $src", "i64.extend_i32_s",
                           0xac>;
 defm I64_EXTEND_U_I32 : I<(outs I64:$dst), (ins I32:$src), (outs), (ins),
                           [(set I64:$dst, (zext I32:$src))],
-                          "i64.extend_u/i32\t$dst, $src", "i64.extend_u/i32",
+                          "i64.extend_i32_u\t$dst, $src", "i64.extend_i32_u",
                           0xad>;
 
 let Predicates = [HasSignExt] in {
@@ -58,43 +58,43 @@
 // overflow or invalid.
 defm I32_TRUNC_S_SAT_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins),
                              [(set I32:$dst, (fp_to_sint F32:$src))],
-                             "i32.trunc_s:sat/f32\t$dst, $src",
-                             "i32.trunc_s:sat/f32", 0xfc00>,
+                             "i32.trunc_sat_f32_s\t$dst, $src",
+                             "i32.trunc_sat_f32_s", 0xfc00>,
                              Requires<[HasNontrappingFPToInt]>;
 defm I32_TRUNC_U_SAT_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins),
                              [(set I32:$dst, (fp_to_uint F32:$src))],
-                             "i32.trunc_u:sat/f32\t$dst, $src",
-                             "i32.trunc_u:sat/f32", 0xfc01>,
+                             "i32.trunc_sat_f32_u\t$dst, $src",
+                             "i32.trunc_sat_f32_u", 0xfc01>,
                              Requires<[HasNontrappingFPToInt]>;
 defm I64_TRUNC_S_SAT_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins),
                              [(set I64:$dst, (fp_to_sint F32:$src))],
-                             "i64.trunc_s:sat/f32\t$dst, $src",
-                             "i64.trunc_s:sat/f32", 0xfc04>,
+                             "i64.trunc_sat_f32_s\t$dst, $src",
+                             "i64.trunc_sat_f32_s", 0xfc04>,
                              Requires<[HasNontrappingFPToInt]>;
 defm I64_TRUNC_U_SAT_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins),
                              [(set I64:$dst, (fp_to_uint F32:$src))],
-                             "i64.trunc_u:sat/f32\t$dst, $src",
-                             "i64.trunc_u:sat/f32", 0xfc05>,
+                             "i64.trunc_sat_f32_u\t$dst, $src",
+                             "i64.trunc_sat_f32_u", 0xfc05>,
                              Requires<[HasNontrappingFPToInt]>;
 defm I32_TRUNC_S_SAT_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins),
                              [(set I32:$dst, (fp_to_sint F64:$src))],
-                             "i32.trunc_s:sat/f64\t$dst, $src",
-                             "i32.trunc_s:sat/f64", 0xfc02>,
+                             "i32.trunc_sat_f64_s\t$dst, $src",
+                             "i32.trunc_sat_f64_s", 0xfc02>,
                              Requires<[HasNontrappingFPToInt]>;
 defm I32_TRUNC_U_SAT_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins),
                              [(set I32:$dst, (fp_to_uint F64:$src))],
-                             "i32.trunc_u:sat/f64\t$dst, $src",
-                             "i32.trunc_u:sat/f64", 0xfc03>,
+                             "i32.trunc_sat_f64_u\t$dst, $src",
+                             "i32.trunc_sat_f64_u", 0xfc03>,
                              Requires<[HasNontrappingFPToInt]>;
 defm I64_TRUNC_S_SAT_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins),
                              [(set I64:$dst, (fp_to_sint F64:$src))],
-                             "i64.trunc_s:sat/f64\t$dst, $src",
-                             "i64.trunc_s:sat/f64", 0xfc06>,
+                             "i64.trunc_sat_f64_s\t$dst, $src",
+                             "i64.trunc_sat_f64_s", 0xfc06>,
                              Requires<[HasNontrappingFPToInt]>;
 defm I64_TRUNC_U_SAT_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins),
                              [(set I64:$dst, (fp_to_uint F64:$src))],
-                             "i64.trunc_u:sat/f64\t$dst, $src",
-                             "i64.trunc_u:sat/f64", 0xfc07>,
+                             "i64.trunc_sat_f64_u\t$dst, $src",
+                             "i64.trunc_sat_f64_u", 0xfc07>,
                              Requires<[HasNontrappingFPToInt]>;
 
 // Lower llvm.wasm.trunc.saturate.* to saturating instructions
@@ -147,86 +147,86 @@
 // Conversion from floating point to integer traps on overflow and invalid.
 let hasSideEffects = 1 in {
 defm I32_TRUNC_S_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins),
-                         [], "i32.trunc_s/f32\t$dst, $src", "i32.trunc_s/f32",
+                         [], "i32.trunc_f32_s\t$dst, $src", "i32.trunc_f32_s",
                          0xa8>;
 defm I32_TRUNC_U_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins),
-                         [], "i32.trunc_u/f32\t$dst, $src", "i32.trunc_u/f32",
+                         [], "i32.trunc_f32_u\t$dst, $src", "i32.trunc_f32_u",
                          0xa9>;
 defm I64_TRUNC_S_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins),
-                         [], "i64.trunc_s/f32\t$dst, $src", "i64.trunc_s/f32",
+                         [], "i64.trunc_f32_s\t$dst, $src", "i64.trunc_f32_s",
                          0xae>;
 defm I64_TRUNC_U_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins),
-                         [], "i64.trunc_u/f32\t$dst, $src", "i64.trunc_u/f32",
+                         [], "i64.trunc_f32_u\t$dst, $src", "i64.trunc_f32_u",
                          0xaf>;
 defm I32_TRUNC_S_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins),
-                         [], "i32.trunc_s/f64\t$dst, $src", "i32.trunc_s/f64",
+                         [], "i32.trunc_f64_s\t$dst, $src", "i32.trunc_f64_s",
                          0xaa>;
 defm I32_TRUNC_U_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins),
-                         [], "i32.trunc_u/f64\t$dst, $src", "i32.trunc_u/f64",
+                         [], "i32.trunc_f64_u\t$dst, $src", "i32.trunc_f64_u",
                          0xab>;
 defm I64_TRUNC_S_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins),
-                         [], "i64.trunc_s/f64\t$dst, $src", "i64.trunc_s/f64",
+                         [], "i64.trunc_f64_s\t$dst, $src", "i64.trunc_f64_s",
                          0xb0>;
 defm I64_TRUNC_U_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins),
-                         [], "i64.trunc_u/f64\t$dst, $src", "i64.trunc_u/f64",
+                         [], "i64.trunc_f64_u\t$dst, $src", "i64.trunc_f64_u",
                          0xb1>;
 } // hasSideEffects = 1
 
 defm F32_CONVERT_S_I32 : I<(outs F32:$dst), (ins I32:$src), (outs), (ins),
                            [(set F32:$dst, (sint_to_fp I32:$src))],
-                           "f32.convert_s/i32\t$dst, $src", "f32.convert_s/i32",
+                           "f32.convert_i32_s\t$dst, $src", "f32.convert_i32_s",
                            0xb2>;
 defm F32_CONVERT_U_I32 : I<(outs F32:$dst), (ins I32:$src), (outs), (ins),
                            [(set F32:$dst, (uint_to_fp I32:$src))],
-                           "f32.convert_u/i32\t$dst, $src", "f32.convert_u/i32",
+                           "f32.convert_i32_u\t$dst, $src", "f32.convert_i32_u",
                            0xb3>;
 defm F64_CONVERT_S_I32 : I<(outs F64:$dst), (ins I32:$src), (outs), (ins),
                            [(set F64:$dst, (sint_to_fp I32:$src))],
-                           "f64.convert_s/i32\t$dst, $src", "f64.convert_s/i32",
+                           "f64.convert_i32_s\t$dst, $src", "f64.convert_i32_s",
                            0xb7>;
 defm F64_CONVERT_U_I32 : I<(outs F64:$dst), (ins I32:$src), (outs), (ins),
                            [(set F64:$dst, (uint_to_fp I32:$src))],
-                           "f64.convert_u/i32\t$dst, $src", "f64.convert_u/i32",
+                           "f64.convert_i32_u\t$dst, $src", "f64.convert_i32_u",
                            0xb8>;
 defm F32_CONVERT_S_I64 : I<(outs F32:$dst), (ins I64:$src), (outs), (ins),
                            [(set F32:$dst, (sint_to_fp I64:$src))],
-                           "f32.convert_s/i64\t$dst, $src", "f32.convert_s/i64",
+                           "f32.convert_i64_s\t$dst, $src", "f32.convert_i64_s",
                            0xb4>;
 defm F32_CONVERT_U_I64 : I<(outs F32:$dst), (ins I64:$src), (outs), (ins),
                            [(set F32:$dst, (uint_to_fp I64:$src))],
-                           "f32.convert_u/i64\t$dst, $src", "f32.convert_u/i64",
+                           "f32.convert_i64_u\t$dst, $src", "f32.convert_i64_u",
                            0xb5>;
 defm F64_CONVERT_S_I64 : I<(outs F64:$dst), (ins I64:$src), (outs), (ins),
                            [(set F64:$dst, (sint_to_fp I64:$src))],
-                           "f64.convert_s/i64\t$dst, $src", "f64.convert_s/i64",
+                           "f64.convert_i64_s\t$dst, $src", "f64.convert_i64_s",
                            0xb9>;
 defm F64_CONVERT_U_I64 : I<(outs F64:$dst), (ins I64:$src), (outs), (ins),
                            [(set F64:$dst, (uint_to_fp I64:$src))],
-                           "f64.convert_u/i64\t$dst, $src", "f64.convert_u/i64",
+                           "f64.convert_i64_u\t$dst, $src", "f64.convert_i64_u",
                            0xba>;
 
 defm F64_PROMOTE_F32 : I<(outs F64:$dst), (ins F32:$src), (outs), (ins),
                          [(set F64:$dst, (fpextend F32:$src))],
-                         "f64.promote/f32\t$dst, $src", "f64.promote/f32",
+                         "f64.promote_f32\t$dst, $src", "f64.promote_f32",
                          0xbb>;
 defm F32_DEMOTE_F64 : I<(outs F32:$dst), (ins F64:$src), (outs), (ins),
                         [(set F32:$dst, (fpround F64:$src))],
-                        "f32.demote/f64\t$dst, $src", "f32.demote/f64",
+                        "f32.demote_f64\t$dst, $src", "f32.demote_f64",
                         0xb6>;
 
 defm I32_REINTERPRET_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins),
                              [(set I32:$dst, (bitconvert F32:$src))],
-                             "i32.reinterpret/f32\t$dst, $src",
-                             "i32.reinterpret/f32", 0xbc>;
+                             "i32.reinterpret_f32\t$dst, $src",
+                             "i32.reinterpret_f32", 0xbc>;
 defm F32_REINTERPRET_I32 : I<(outs F32:$dst), (ins I32:$src), (outs), (ins),
                              [(set F32:$dst, (bitconvert I32:$src))],
-                             "f32.reinterpret/i32\t$dst, $src",
-                             "f32.reinterpret/i32", 0xbe>;
+                             "f32.reinterpret_i32\t$dst, $src",
+                             "f32.reinterpret_i32", 0xbe>;
 defm I64_REINTERPRET_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins),
                              [(set I64:$dst, (bitconvert F64:$src))],
-                             "i64.reinterpret/f64\t$dst, $src",
-                             "i64.reinterpret/f64", 0xbd>;
+                             "i64.reinterpret_f64\t$dst, $src",
+                             "i64.reinterpret_f64", 0xbd>;
 defm F64_REINTERPRET_I64 : I<(outs F64:$dst), (ins I64:$src), (outs), (ins),
                              [(set F64:$dst, (bitconvert I64:$src))],
-                             "f64.reinterpret/i64\t$dst, $src",
-                             "f64.reinterpret/i64", 0xbf>;
+                             "f64.reinterpret_i64\t$dst, $src",
+                             "f64.reinterpret_i64", 0xbf>;
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrFormats.td b/lib/Target/WebAssembly/WebAssemblyInstrFormats.td
index 97583ea..15a9714 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrFormats.td
+++ b/lib/Target/WebAssembly/WebAssemblyInstrFormats.td
@@ -40,10 +40,10 @@
 // based version of this instruction, as well as the corresponding asmstr.
 // The register versions have virtual-register operands which correspond to wasm
 // locals or stack locations. Each use and def of the register corresponds to an
-// implicit get_local / set_local or access of stack operands in wasm. These
+// implicit local.get / local.set or access of stack operands in wasm. These
 // instructions are used for ISel and all MI passes. The stack versions of the
 // instructions do not have register operands (they implicitly operate on the
-// stack), and get_locals and set_locals are explicit. The register instructions
+// stack), and local.gets and local.sets are explicit. The register instructions
 // are converted to their corresponding stack instructions before lowering to
 // MC.
 // Every instruction should want to be based on this multi-class to guarantee
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrInfo.td b/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
index 085c5a7..e3d795f 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
+++ b/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
@@ -20,6 +20,9 @@
 def HasAddr64 : Predicate<"Subtarget->hasAddr64()">;
 def HasSIMD128 : Predicate<"Subtarget->hasSIMD128()">,
                            AssemblerPredicate<"FeatureSIMD128", "simd128">;
+def HasUnimplementedSIMD128 :
+    Predicate<"Subtarget->hasUnimplementedSIMD128()">,
+    AssemblerPredicate<"FeatureUnimplementedSIMD128", "unimplemented-simd128">;
 def HasAtomics : Predicate<"Subtarget->hasAtomics()">,
                            AssemblerPredicate<"FeatureAtomics", "atomics">;
 def HasNontrappingFPToInt :
@@ -196,49 +199,49 @@
 defm "": ARGUMENT<F64, f64>;
 defm "": ARGUMENT<EXCEPT_REF, ExceptRef>;
 
-// get_local and set_local are not generated by instruction selection; they
+// local.get and local.set are not generated by instruction selection; they
 // are implied by virtual register uses and defs.
 multiclass LOCAL<WebAssemblyRegClass vt> {
 let hasSideEffects = 0 in {
-  // COPY is not an actual instruction in wasm, but since we allow get_local and
-  // set_local to be implicit during most of codegen, we can have a COPY which
-  // is actually a no-op because all the work is done in the implied get_local
-  // and set_local. COPYs are eliminated (and replaced with
-  // get_local/set_local) in the ExplicitLocals pass.
+  // COPY is not an actual instruction in wasm, but since we allow local.get and
+  // local.set to be implicit during most of codegen, we can have a COPY which
+  // is actually a no-op because all the work is done in the implied local.get
+  // and local.set. COPYs are eliminated (and replaced with
+  // local.get/local.set) in the ExplicitLocals pass.
   let isAsCheapAsAMove = 1, isCodeGenOnly = 1 in
   defm COPY_#vt : I<(outs vt:$res), (ins vt:$src), (outs), (ins), [],
-                    "copy_local\t$res, $src", "copy_local">;
+                    "local.copy\t$res, $src", "local.copy">;
 
   // TEE is similar to COPY, but writes two copies of its result. Typically
   // this would be used to stackify one result and write the other result to a
   // local.
   let isAsCheapAsAMove = 1, isCodeGenOnly = 1 in
   defm TEE_#vt : I<(outs vt:$res, vt:$also), (ins vt:$src), (outs), (ins), [],
-                   "tee_local\t$res, $also, $src", "tee_local">;
+                   "local.tee\t$res, $also, $src", "local.tee">;
 
-  // This is the actual get_local instruction in wasm. These are made explicit
+  // This is the actual local.get instruction in wasm. These are made explicit
   // by the ExplicitLocals pass. It has mayLoad because it reads from a wasm
   // local, which is a side effect not otherwise modeled in LLVM.
   let mayLoad = 1, isAsCheapAsAMove = 1 in
-  defm GET_LOCAL_#vt : I<(outs vt:$res), (ins local_op:$local),
+  defm LOCAL_GET_#vt : I<(outs vt:$res), (ins local_op:$local),
                          (outs), (ins local_op:$local), [],
-                         "get_local\t$res, $local", "get_local\t$local", 0x20>;
+                         "local.get\t$res, $local", "local.get\t$local", 0x20>;
 
-  // This is the actual set_local instruction in wasm. These are made explicit
+  // This is the actual local.set instruction in wasm. These are made explicit
   // by the ExplicitLocals pass. It has mayStore because it writes to a wasm
   // local, which is a side effect not otherwise modeled in LLVM.
   let mayStore = 1, isAsCheapAsAMove = 1 in
-  defm SET_LOCAL_#vt : I<(outs), (ins local_op:$local, vt:$src),
+  defm LOCAL_SET_#vt : I<(outs), (ins local_op:$local, vt:$src),
                          (outs), (ins local_op:$local), [],
-                         "set_local\t$local, $src", "set_local\t$local", 0x21>;
+                         "local.set\t$local, $src", "local.set\t$local", 0x21>;
 
-  // This is the actual tee_local instruction in wasm. TEEs are turned into
-  // TEE_LOCALs by the ExplicitLocals pass. It has mayStore for the same reason
-  // as SET_LOCAL.
+  // This is the actual local.tee instruction in wasm. TEEs are turned into
+  // LOCAL_TEEs by the ExplicitLocals pass. It has mayStore for the same reason
+  // as LOCAL_SET.
   let mayStore = 1, isAsCheapAsAMove = 1 in
-  defm TEE_LOCAL_#vt : I<(outs vt:$res), (ins local_op:$local, vt:$src),
+  defm LOCAL_TEE_#vt : I<(outs vt:$res), (ins local_op:$local, vt:$src),
                          (outs), (ins local_op:$local), [],
-                         "tee_local\t$res, $local, $src", "tee_local\t$local",
+                         "local.tee\t$res, $local, $src", "local.tee\t$local",
                          0x22>;
 
   // Unused values must be dropped in some contexts.
@@ -246,15 +249,15 @@
                     "drop\t$src", "drop", 0x1a>;
 
   let mayLoad = 1 in
-  defm GET_GLOBAL_#vt : I<(outs vt:$res), (ins global_op:$local),
+  defm GLOBAL_GET_#vt : I<(outs vt:$res), (ins global_op:$local),
                           (outs), (ins global_op:$local), [],
-                          "get_global\t$res, $local", "get_global\t$local",
+                          "global.get\t$res, $local", "global.get\t$local",
                           0x23>;
 
   let mayStore = 1 in
-  defm SET_GLOBAL_#vt : I<(outs), (ins global_op:$local, vt:$src),
+  defm GLOBAL_SET_#vt : I<(outs), (ins global_op:$local, vt:$src),
                           (outs), (ins global_op:$local), [],
-                          "set_global\t$local, $src", "set_global\t$local",
+                          "global.set\t$local, $src", "global.set\t$local",
                           0x24>;
 
 } // hasSideEffects = 0
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrMemory.td b/lib/Target/WebAssembly/WebAssemblyInstrMemory.td
index ccc331d..518f81c 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrMemory.td
+++ b/lib/Target/WebAssembly/WebAssemblyInstrMemory.td
@@ -33,10 +33,8 @@
   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
     return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
 
-  KnownBits Known0;
-  CurDAG->computeKnownBits(N->getOperand(0), Known0, 0);
-  KnownBits Known1;
-  CurDAG->computeKnownBits(N->getOperand(1), Known1, 0);
+  KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0);
+  KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0);
   return (~Known0.Zero & ~Known1.Zero) == 0;
 }]>;
 
@@ -438,17 +436,6 @@
                          "memory.size\t$dst, $flags", "memory.size\t$flags",
                          0x3f>,
                        Requires<[HasAddr32]>;
-defm MEM_SIZE_I32 : I<(outs I32:$dst), (ins i32imm:$flags),
-                      (outs), (ins i32imm:$flags),
-                      [(set I32:$dst, (int_wasm_mem_size (i32 imm:$flags)))],
-                      "mem.size\t$dst, $flags", "mem.size\t$flags", 0x3f>,
-                    Requires<[HasAddr32]>;
-defm CURRENT_MEMORY_I32 : I<(outs I32:$dst), (ins i32imm:$flags),
-                            (outs), (ins i32imm:$flags),
-                            [],
-                            "current_memory\t$dst",
-                            "current_memory\t$flags", 0x3f>,
-                          Requires<[HasAddr32]>;
 
 // Grow memory.
 defm MEMORY_GROW_I32 : I<(outs I32:$dst), (ins i32imm:$flags, I32:$delta),
@@ -459,21 +446,3 @@
                          "memory.grow\t$dst, $flags, $delta",
                          "memory.grow\t$flags", 0x40>,
                        Requires<[HasAddr32]>;
-defm MEM_GROW_I32 : I<(outs I32:$dst), (ins i32imm:$flags, I32:$delta),
-                      (outs), (ins i32imm:$flags),
-                      [(set I32:$dst,
-                            (int_wasm_mem_grow (i32 imm:$flags), I32:$delta))],
-                      "mem.grow\t$dst, $flags, $delta", "mem.grow\t$flags",
-                      0x40>,
-                    Requires<[HasAddr32]>;
-defm GROW_MEMORY_I32 : I<(outs I32:$dst), (ins i32imm:$flags, I32:$delta),
-                         (outs), (ins i32imm:$flags),
-                         [],
-                         "grow_memory\t$dst, $delta", "grow_memory\t$flags",
-                         0x40>,
-                       Requires<[HasAddr32]>;
-
-def : Pat<(int_wasm_current_memory),
-          (CURRENT_MEMORY_I32 0)>;
-def : Pat<(int_wasm_grow_memory I32:$delta),
-          (GROW_MEMORY_I32 0, $delta)>;
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
index 7ac2d15..587515c 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
+++ b/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
@@ -94,7 +94,8 @@
 
 // Constant: v128.const
 multiclass ConstVec<ValueType vec_t, dag ops, dag pat, string args> {
-  let isMoveImm = 1, isReMaterializable = 1 in
+  let isMoveImm = 1, isReMaterializable = 1,
+    Predicates = [HasSIMD128, HasUnimplementedSIMD128] in
   defm CONST_V128_#vec_t : SIMD_I<(outs V128:$dst), ops, (outs), ops,
                                   [(set V128:$dst, (vec_t pat))],
                                   "v128.const\t$dst, "#args,
@@ -276,17 +277,19 @@
 }
 
 defm "" : ExtractLaneExtended<"_s", 5>;
+let Predicates = [HasSIMD128, HasUnimplementedSIMD128] in
 defm "" : ExtractLaneExtended<"_u", 6>;
 defm "" : ExtractLane<v4i32, "i32x4", LaneIdx4, I32, 13>;
 defm "" : ExtractLane<v2i64, "i64x2", LaneIdx2, I64, 16>;
 defm "" : ExtractLane<v4f32, "f32x4", LaneIdx4, F32, 19>;
 defm "" : ExtractLane<v2f64, "f64x2", LaneIdx2, F64, 22>;
 
-// Follow convention of making implicit expansions unsigned
+// It would be more conventional to use unsigned extracts, but v8
+// doesn't implement them yet
 def : Pat<(i32 (vector_extract (v16i8 V128:$vec), (i32 LaneIdx16:$idx))),
-          (EXTRACT_LANE_v16i8_u V128:$vec, (i32 LaneIdx16:$idx))>;
+          (EXTRACT_LANE_v16i8_s V128:$vec, (i32 LaneIdx16:$idx))>;
 def : Pat<(i32 (vector_extract (v8i16 V128:$vec), (i32 LaneIdx8:$idx))),
-          (EXTRACT_LANE_v8i16_u V128:$vec, (i32 LaneIdx8:$idx))>;
+          (EXTRACT_LANE_v8i16_s V128:$vec, (i32 LaneIdx8:$idx))>;
 
 // Lower undef lane indices to zero
 def : Pat<(and (i32 (vector_extract (v16i8 V128:$vec), undef)), (i32 0xff)),
@@ -576,7 +579,7 @@
     SIMD_I<(outs V128:$dst), (ins V128:$v1, V128:$v2, V128:$c), (outs), (ins),
            [(set (vec_t V128:$dst),
              (vec_t (int_wasm_bitselect
-               (vec_t V128:$c), (vec_t V128:$v1), (vec_t V128:$v2)
+               (vec_t V128:$v1), (vec_t V128:$v2), (vec_t V128:$c)
              ))
            )],
            "v128.bitselect\t$dst, $v1, $v2, $c", "v128.bitselect", 80>;
@@ -725,6 +728,7 @@
 defm NEG : SIMDUnaryFP<fneg, "neg", 150>;
 
 // Square root: sqrt
+let Predicates = [HasSIMD128, HasUnimplementedSIMD128] in
 defm SQRT : SIMDUnaryFP<fsqrt, "sqrt", 151>;
 
 //===----------------------------------------------------------------------===//
@@ -748,6 +752,7 @@
 defm MUL : SIMDBinaryFP<fmul, "mul", 156>;
 
 // Division: div
+let Predicates = [HasSIMD128, HasUnimplementedSIMD128] in
 defm DIV : SIMDBinaryFP<fdiv, "div", 157>;
 
 // NaN-propagating minimum: min
@@ -768,17 +773,17 @@
            name#"\t$dst, $vec", name, simdop>;
 }
 
-// Integer to floating point: convert_s / convert_u
-defm "" : SIMDConvert<v4f32, v4i32, sint_to_fp, "f32x4.convert_s/i32x4", 175>;
-defm "" : SIMDConvert<v4f32, v4i32, uint_to_fp, "f32x4.convert_u/i32x4", 176>;
-defm "" : SIMDConvert<v2f64, v2i64, sint_to_fp, "f64x2.convert_s/i64x2", 177>;
-defm "" : SIMDConvert<v2f64, v2i64, uint_to_fp, "f64x2.convert_u/i64x2", 178>;
+// Integer to floating point: convert
+defm "" : SIMDConvert<v4f32, v4i32, sint_to_fp, "f32x4.convert_i32x4_s", 175>;
+defm "" : SIMDConvert<v4f32, v4i32, uint_to_fp, "f32x4.convert_i32x4_u", 176>;
+defm "" : SIMDConvert<v2f64, v2i64, sint_to_fp, "f64x2.convert_i64x2_s", 177>;
+defm "" : SIMDConvert<v2f64, v2i64, uint_to_fp, "f64x2.convert_i64x2_u", 178>;
 
-// Floating point to integer with saturation: trunc_sat_s / trunc_sat_u
-defm "" : SIMDConvert<v4i32, v4f32, fp_to_sint, "i32x4.trunc_sat_s/f32x4", 171>;
-defm "" : SIMDConvert<v4i32, v4f32, fp_to_uint, "i32x4.trunc_sat_u/f32x4", 172>;
-defm "" : SIMDConvert<v2i64, v2f64, fp_to_sint, "i64x2.trunc_sat_s/f64x2", 173>;
-defm "" : SIMDConvert<v2i64, v2f64, fp_to_uint, "i64x2.trunc_sat_u/f64x2", 174>;
+// Floating point to integer with saturation: trunc_sat
+defm "" : SIMDConvert<v4i32, v4f32, fp_to_sint, "i32x4.trunc_sat_f32x4_s", 171>;
+defm "" : SIMDConvert<v4i32, v4f32, fp_to_uint, "i32x4.trunc_sat_f32x4_u", 172>;
+defm "" : SIMDConvert<v2i64, v2f64, fp_to_sint, "i64x2.trunc_sat_f64x2_s", 173>;
+defm "" : SIMDConvert<v2i64, v2f64, fp_to_uint, "i64x2.trunc_sat_f64x2_u", 174>;
 
 // Lower llvm.wasm.trunc.saturate.* to saturating instructions
 def : Pat<(v4i32 (int_wasm_trunc_saturate_signed (v4f32 V128:$src))),
diff --git a/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp b/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp
index 871e920..ad838df 100644
--- a/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp
@@ -103,6 +103,10 @@
 }
 
 bool WebAssemblyLateEHPrepare::runOnMachineFunction(MachineFunction &MF) {
+  LLVM_DEBUG(dbgs() << "********** Late EH Prepare **********\n"
+                       "********** Function: "
+                    << MF.getName() << '\n');
+
   if (MF.getTarget().getMCAsmInfo()->getExceptionHandlingType() !=
       ExceptionHandling::Wasm)
     return false;
@@ -287,7 +291,7 @@
 //   %exn = catch 0
 //   call @__clang_call_terminate(%exn)
 //   unreachable
-// (There can be set_local and get_locals before the call if we didn't run
+// (There can be local.set and local.gets before the call if we didn't run
 // RegStackify)
 // But code transformations can change or add more control flow, so the call to
 // __clang_call_terminate() function may not be in the original EH pad anymore.
@@ -326,7 +330,7 @@
     // This runs after hoistCatches(), so catch instruction should be at the top
     assert(WebAssembly::isCatch(*Catch));
     // Takes the result register of the catch instruction as argument. There may
-    // have been some other set_local/get_locals in between, but at this point
+    // have been some other local.set/local.gets in between, but at this point
     // we don't care.
     Call->getOperand(1).setReg(Catch->getOperand(0).getReg());
     auto InsertPos = std::next(MachineBasicBlock::iterator(Catch));
diff --git a/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
index 8755198..0491f71 100644
--- a/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
@@ -623,6 +623,8 @@
 }
 
 bool WebAssemblyLowerEmscriptenEHSjLj::runOnModule(Module &M) {
+  LLVM_DEBUG(dbgs() << "********** Lower Emscripten EH & SjLj **********\n");
+
   LLVMContext &C = M.getContext();
   IRBuilder<> IRB(C);
 
diff --git a/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp b/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp
index 3988189..84c877c 100644
--- a/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp
@@ -59,6 +59,8 @@
 }
 
 bool LowerGlobalDtors::runOnModule(Module &M) {
+  LLVM_DEBUG(dbgs() << "********** Lower Global Destructors **********\n");
+
   GlobalVariable *GV = M.getGlobalVariable("llvm.global_dtors");
   if (!GV)
     return false;
diff --git a/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp b/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp
new file mode 100644
index 0000000..c4b5e96
--- /dev/null
+++ b/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp
@@ -0,0 +1,212 @@
+//== WebAssemblyMemIntrinsicResults.cpp - Optimize memory intrinsic results ==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements an optimization pass using memory intrinsic results.
+///
+/// Calls to memory intrinsics (memcpy, memmove, memset) return the destination
+/// address. They are in the form of
+///   %dst_new = call @memcpy %dst, %src, %len
+/// where %dst and %dst_new registers contain the same value.
+///
+/// This is to enable an optimization wherein uses of the %dst register used in
+/// the parameter can be replaced by uses of the %dst_new register used in the
+/// result, making the %dst register more likely to be single-use, thus more
+/// likely to be useful to register stackifying, and potentially also exposing
+/// the call instruction itself to register stackifying. These both can reduce
+/// local.get/local.set traffic.
+///
+/// The LLVM intrinsics for these return void so they can't use the returned
+/// attribute and consequently aren't handled by the OptimizeReturned pass.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-mem-intrinsic-results"
+
+namespace {
+class WebAssemblyMemIntrinsicResults final : public MachineFunctionPass {
+public:
+  static char ID; // Pass identification, replacement for typeid
+  WebAssemblyMemIntrinsicResults() : MachineFunctionPass(ID) {}
+
+  StringRef getPassName() const override {
+    return "WebAssembly Memory Intrinsic Results";
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesCFG();
+    AU.addRequired<MachineBlockFrequencyInfo>();
+    AU.addPreserved<MachineBlockFrequencyInfo>();
+    AU.addRequired<MachineDominatorTree>();
+    AU.addPreserved<MachineDominatorTree>();
+    AU.addRequired<LiveIntervals>();
+    AU.addPreserved<SlotIndexes>();
+    AU.addPreserved<LiveIntervals>();
+    AU.addRequired<TargetLibraryInfoWrapperPass>();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+};
+} // end anonymous namespace
+
+char WebAssemblyMemIntrinsicResults::ID = 0;
+INITIALIZE_PASS(WebAssemblyMemIntrinsicResults, DEBUG_TYPE,
+                "Optimize memory intrinsic result values for WebAssembly",
+                false, false)
+
+FunctionPass *llvm::createWebAssemblyMemIntrinsicResults() {
+  return new WebAssemblyMemIntrinsicResults();
+}
+
+// Replace uses of FromReg with ToReg if they are dominated by MI.
+static bool ReplaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI,
+                                 unsigned FromReg, unsigned ToReg,
+                                 const MachineRegisterInfo &MRI,
+                                 MachineDominatorTree &MDT,
+                                 LiveIntervals &LIS) {
+  bool Changed = false;
+
+  LiveInterval *FromLI = &LIS.getInterval(FromReg);
+  LiveInterval *ToLI = &LIS.getInterval(ToReg);
+
+  SlotIndex FromIdx = LIS.getInstructionIndex(MI).getRegSlot();
+  VNInfo *FromVNI = FromLI->getVNInfoAt(FromIdx);
+
+  SmallVector<SlotIndex, 4> Indices;
+
+  for (auto I = MRI.use_nodbg_begin(FromReg), E = MRI.use_nodbg_end();
+       I != E;) {
+    MachineOperand &O = *I++;
+    MachineInstr *Where = O.getParent();
+
+    // Check that MI dominates the instruction in the normal way.
+    if (&MI == Where || !MDT.dominates(&MI, Where))
+      continue;
+
+    // If this use gets a different value, skip it.
+    SlotIndex WhereIdx = LIS.getInstructionIndex(*Where);
+    VNInfo *WhereVNI = FromLI->getVNInfoAt(WhereIdx);
+    if (WhereVNI && WhereVNI != FromVNI)
+      continue;
+
+    // Make sure ToReg isn't clobbered before it gets there.
+    VNInfo *ToVNI = ToLI->getVNInfoAt(WhereIdx);
+    if (ToVNI && ToVNI != FromVNI)
+      continue;
+
+    Changed = true;
+    LLVM_DEBUG(dbgs() << "Setting operand " << O << " in " << *Where << " from "
+                      << MI << "\n");
+    O.setReg(ToReg);
+
+    // If the store's def was previously dead, it is no longer.
+    if (!O.isUndef()) {
+      MI.getOperand(0).setIsDead(false);
+
+      Indices.push_back(WhereIdx.getRegSlot());
+    }
+  }
+
+  if (Changed) {
+    // Extend ToReg's liveness.
+    LIS.extendToIndices(*ToLI, Indices);
+
+    // Shrink FromReg's liveness.
+    LIS.shrinkToUses(FromLI);
+
+    // If we replaced all dominated uses, FromReg is now killed at MI.
+    if (!FromLI->liveAt(FromIdx.getDeadSlot()))
+      MI.addRegisterKilled(FromReg, MBB.getParent()
+                                        ->getSubtarget<WebAssemblySubtarget>()
+                                        .getRegisterInfo());
+  }
+
+  return Changed;
+}
+
+static bool optimizeCall(MachineBasicBlock &MBB, MachineInstr &MI,
+                         const MachineRegisterInfo &MRI,
+                         MachineDominatorTree &MDT, LiveIntervals &LIS,
+                         const WebAssemblyTargetLowering &TLI,
+                         const TargetLibraryInfo &LibInfo) {
+  MachineOperand &Op1 = MI.getOperand(1);
+  if (!Op1.isSymbol())
+    return false;
+
+  StringRef Name(Op1.getSymbolName());
+  bool callReturnsInput = Name == TLI.getLibcallName(RTLIB::MEMCPY) ||
+                          Name == TLI.getLibcallName(RTLIB::MEMMOVE) ||
+                          Name == TLI.getLibcallName(RTLIB::MEMSET);
+  if (!callReturnsInput)
+    return false;
+
+  LibFunc Func;
+  if (!LibInfo.getLibFunc(Name, Func))
+    return false;
+
+  unsigned FromReg = MI.getOperand(2).getReg();
+  unsigned ToReg = MI.getOperand(0).getReg();
+  if (MRI.getRegClass(FromReg) != MRI.getRegClass(ToReg))
+    report_fatal_error("Memory Intrinsic results: call to builtin function "
+                       "with wrong signature, from/to mismatch");
+  return ReplaceDominatedUses(MBB, MI, FromReg, ToReg, MRI, MDT, LIS);
+}
+
+bool WebAssemblyMemIntrinsicResults::runOnMachineFunction(MachineFunction &MF) {
+  LLVM_DEBUG({
+    dbgs() << "********** Memory Intrinsic Results **********\n"
+           << "********** Function: " << MF.getName() << '\n';
+  });
+
+  MachineRegisterInfo &MRI = MF.getRegInfo();
+  MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
+  const WebAssemblyTargetLowering &TLI =
+      *MF.getSubtarget<WebAssemblySubtarget>().getTargetLowering();
+  const auto &LibInfo = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+  LiveIntervals &LIS = getAnalysis<LiveIntervals>();
+  bool Changed = false;
+
+  // We don't preserve SSA form.
+  MRI.leaveSSA();
+
+  assert(MRI.tracksLiveness() &&
+         "MemIntrinsicResults expects liveness tracking");
+
+  for (auto &MBB : MF) {
+    LLVM_DEBUG(dbgs() << "Basic Block: " << MBB.getName() << '\n');
+    for (auto &MI : MBB)
+      switch (MI.getOpcode()) {
+      default:
+        break;
+      case WebAssembly::CALL_I32:
+      case WebAssembly::CALL_I64:
+        Changed |= optimizeCall(MBB, MI, MRI, MDT, LIS, TLI, LibInfo);
+        break;
+      }
+  }
+
+  return Changed;
+}
diff --git a/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp b/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp
index 113ee25..2c018d0 100644
--- a/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp
@@ -74,6 +74,10 @@
 }
 
 bool OptimizeReturned::runOnFunction(Function &F) {
+  LLVM_DEBUG(dbgs() << "********** Optimize returned Attributes **********\n"
+                       "********** Function: "
+                    << F.getName() << '\n');
+
   DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
   visit(F);
   return true;
diff --git a/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
index dc2aab8..1eb32ed 100644
--- a/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
@@ -22,6 +22,7 @@
 
 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" // for WebAssembly::ARGUMENT_*
 #include "WebAssembly.h"
+#include "WebAssemblyDebugValueManager.h"
 #include "WebAssemblyMachineFunctionInfo.h"
 #include "WebAssemblySubtarget.h"
 #include "WebAssemblyUtilities.h"
@@ -98,7 +99,8 @@
 static void ConvertImplicitDefToConstZero(MachineInstr *MI,
                                           MachineRegisterInfo &MRI,
                                           const TargetInstrInfo *TII,
-                                          MachineFunction &MF) {
+                                          MachineFunction &MF,
+                                          LiveIntervals &LIS) {
   assert(MI->getOpcode() == TargetOpcode::IMPLICIT_DEF);
 
   const auto *RegClass = MRI.getRegClass(MI->getOperand(0).getReg());
@@ -119,10 +121,13 @@
         Type::getDoubleTy(MF.getFunction().getContext())));
     MI->addOperand(MachineOperand::CreateFPImm(Val));
   } else if (RegClass == &WebAssembly::V128RegClass) {
-    // TODO: make splat instead of constant
-    MI->setDesc(TII->get(WebAssembly::CONST_V128_v16i8));
-    for (int I = 0; I < 16; ++I)
-      MI->addOperand(MachineOperand::CreateImm(0));
+    unsigned TempReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
+    MI->setDesc(TII->get(WebAssembly::SPLAT_v4i32));
+    MI->addOperand(MachineOperand::CreateReg(TempReg, false));
+    MachineInstr *Const = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
+                                  TII->get(WebAssembly::CONST_I32), TempReg)
+                              .addImm(0);
+    LIS.InsertMachineInstrInMaps(*Const);
   } else {
     llvm_unreachable("Unexpected reg class");
   }
@@ -177,19 +182,6 @@
   // Check for stores.
   if (MI.mayStore()) {
     Write = true;
-
-    // Check for stores to __stack_pointer.
-    for (auto MMO : MI.memoperands()) {
-      const MachinePointerInfo &MPI = MMO->getPointerInfo();
-      if (MPI.V.is<const PseudoSourceValue *>()) {
-        auto PSV = MPI.V.get<const PseudoSourceValue *>();
-        if (const ExternalSymbolPseudoSourceValue *EPSV =
-                dyn_cast<ExternalSymbolPseudoSourceValue>(PSV))
-          if (StringRef(EPSV->getSymbol()) == "__stack_pointer") {
-            StackPointer = true;
-          }
-      }
-    }
   } else if (MI.hasOrderedMemoryRef()) {
     switch (MI.getOpcode()) {
     case WebAssembly::DIV_S_I32:
@@ -254,6 +246,11 @@
     }
   }
 
+  // Check for writes to __stack_pointer global.
+  if (MI.getOpcode() == WebAssembly::GLOBAL_SET_I32 &&
+      strcmp(MI.getOperand(0).getSymbolName(), "__stack_pointer") == 0)
+    StackPointer = true;
+
   // Analyze calls.
   if (MI.isCall()) {
     unsigned CalleeOpNo = WebAssembly::getCalleeOpNo(MI);
@@ -410,7 +407,6 @@
     if (UseVNI != OneUseVNI)
       continue;
 
-    const MachineInstr *OneUseInst = OneUse.getParent();
     if (UseInst == OneUseInst) {
       // Another use in the same instruction. We need to ensure that the one
       // selected use happens "before" it.
@@ -422,8 +418,8 @@
         // Actually, dominating is over-conservative. Test that the use would
         // happen after the one selected use in the stack evaluation order.
         //
-        // This is needed as a consequence of using implicit get_locals for
-        // uses and implicit set_locals for defs.
+        // This is needed as a consequence of using implicit local.gets for
+        // uses and implicit local.sets for defs.
         if (UseInst->getDesc().getNumDefs() == 0)
           return false;
         const MachineOperand &MO = UseInst->getOperand(0);
@@ -471,27 +467,6 @@
   }
 }
 
-static void MoveDebugValues(unsigned Reg, MachineInstr *Insert,
-                            MachineBasicBlock &MBB, MachineRegisterInfo &MRI) {
-  for (auto &Op : MRI.reg_operands(Reg)) {
-    MachineInstr *MI = Op.getParent();
-    assert(MI != nullptr);
-    if (MI->isDebugValue() && MI->getParent() == &MBB)
-      MBB.splice(Insert, &MBB, MI);
-  }
-}
-
-static void UpdateDebugValuesReg(unsigned Reg, unsigned NewReg,
-                                 MachineBasicBlock &MBB,
-                                 MachineRegisterInfo &MRI) {
-  for (auto &Op : MRI.reg_operands(Reg)) {
-    MachineInstr *MI = Op.getParent();
-    assert(MI != nullptr);
-    if (MI->isDebugValue() && MI->getParent() == &MBB)
-      Op.setReg(NewReg);
-  }
-}
-
 /// A single-use def in the same block with no intervening memory or register
 /// dependencies; move the def down and nest it with the current instruction.
 static MachineInstr *MoveForSingleUse(unsigned Reg, MachineOperand &Op,
@@ -501,8 +476,9 @@
                                       MachineRegisterInfo &MRI) {
   LLVM_DEBUG(dbgs() << "Move for single use: "; Def->dump());
 
+  WebAssemblyDebugValueManager DefDIs(Def);
   MBB.splice(Insert, &MBB, Def);
-  MoveDebugValues(Reg, Insert, MBB, MRI);
+  DefDIs.move(Insert);
   LIS.handleMove(*Def);
 
   if (MRI.hasOneDef(Reg) && MRI.hasOneUse(Reg)) {
@@ -527,7 +503,7 @@
 
     MFI.stackifyVReg(NewReg);
 
-    UpdateDebugValuesReg(Reg, NewReg, MBB, MRI);
+    DefDIs.updateReg(NewReg);
 
     LLVM_DEBUG(dbgs() << " - Replaced register: "; Def->dump());
   }
@@ -536,29 +512,6 @@
   return Def;
 }
 
-static void CloneDebugValues(unsigned Reg, MachineInstr *Insert,
-                             unsigned TargetReg, MachineBasicBlock &MBB,
-                             MachineRegisterInfo &MRI,
-                             const WebAssemblyInstrInfo *TII) {
-  SmallPtrSet<MachineInstr *, 4> Instrs;
-  for (auto &Op : MRI.reg_operands(Reg)) {
-    MachineInstr *MI = Op.getParent();
-    assert(MI != nullptr);
-    if (MI->isDebugValue() && MI->getParent() == &MBB &&
-        Instrs.find(MI) == Instrs.end())
-      Instrs.insert(MI);
-  }
-  for (const auto &MI : Instrs) {
-    MachineInstr &Clone = TII->duplicate(MBB, Insert, *MI);
-    for (unsigned i = 0, e = Clone.getNumOperands(); i != e; ++i) {
-      MachineOperand &MO = Clone.getOperand(i);
-      if (MO.isReg() && MO.getReg() == Reg)
-        MO.setReg(TargetReg);
-    }
-    LLVM_DEBUG(dbgs() << " - - Cloned DBG_VALUE: "; Clone.dump());
-  }
-}
-
 /// A trivially cloneable instruction; clone it and nest the new copy with the
 /// current instruction.
 static MachineInstr *RematerializeCheapDef(
@@ -569,6 +522,8 @@
   LLVM_DEBUG(dbgs() << "Rematerializing cheap def: "; Def.dump());
   LLVM_DEBUG(dbgs() << " - for use in "; Op.getParent()->dump());
 
+  WebAssemblyDebugValueManager DefDIs(&Def);
+
   unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
   TII->reMaterialize(MBB, Insert, NewReg, 0, Def, *TRI);
   Op.setReg(NewReg);
@@ -598,10 +553,10 @@
     LIS.RemoveMachineInstrFromMaps(Def);
     Def.eraseFromParent();
 
-    MoveDebugValues(Reg, &*Insert, MBB, MRI);
-    UpdateDebugValuesReg(Reg, NewReg, MBB, MRI);
+    DefDIs.move(&*Insert);
+    DefDIs.updateReg(NewReg);
   } else {
-    CloneDebugValues(Reg, &*Insert, NewReg, MBB, MRI, TII);
+    DefDIs.clone(&*Insert, NewReg);
   }
 
   return Clone;
@@ -625,7 +580,7 @@
 ///    INST ..., Reg, ...
 ///    INST ..., Reg, ...
 ///
-/// with DefReg and TeeReg stackified. This eliminates a get_local from the
+/// with DefReg and TeeReg stackified. This eliminates a local.get from the
 /// resulting code.
 static MachineInstr *MoveAndTeeForMultiUse(
     unsigned Reg, MachineOperand &Op, MachineInstr *Def, MachineBasicBlock &MBB,
@@ -633,6 +588,8 @@
     MachineRegisterInfo &MRI, const WebAssemblyInstrInfo *TII) {
   LLVM_DEBUG(dbgs() << "Move and tee for multi-use:"; Def->dump());
 
+  WebAssemblyDebugValueManager DefDIs(Def);
+
   // Move Def into place.
   MBB.splice(Insert, &MBB, Def);
   LIS.handleMove(*Def);
@@ -651,7 +608,7 @@
   SlotIndex TeeIdx = LIS.InsertMachineInstrInMaps(*Tee).getRegSlot();
   SlotIndex DefIdx = LIS.getInstructionIndex(*Def).getRegSlot();
 
-  MoveDebugValues(Reg, Insert, MBB, MRI);
+  DefDIs.move(Insert);
 
   // Tell LiveIntervals we moved the original vreg def from Def to Tee.
   LiveInterval &LI = LIS.getInterval(Reg);
@@ -669,8 +626,8 @@
   ImposeStackOrdering(Def);
   ImposeStackOrdering(Tee);
 
-  CloneDebugValues(Reg, Tee, DefReg, MBB, MRI, TII);
-  CloneDebugValues(Reg, Insert, TeeReg, MBB, MRI, TII);
+  DefDIs.clone(Tee, DefReg);
+  DefDIs.clone(Insert, TeeReg);
 
   LLVM_DEBUG(dbgs() << " - Replaced register: "; Def->dump());
   LLVM_DEBUG(dbgs() << " - Tee instruction: "; Tee->dump());
@@ -736,8 +693,8 @@
   /// operand in the tree that we haven't visited yet. Moving a definition of
   /// Reg to a point in the tree after that would change its value.
   ///
-  /// This is needed as a consequence of using implicit get_locals for
-  /// uses and implicit set_locals for defs.
+  /// This is needed as a consequence of using implicit local.gets for
+  /// uses and implicit local.sets for defs.
   bool IsOnStack(unsigned Reg) const {
     for (const RangeTy &Range : Worklist)
       for (const MachineOperand &MO : Range)
@@ -751,9 +708,9 @@
 /// tried for the current instruction and didn't work.
 class CommutingState {
   /// There are effectively three states: the initial state where we haven't
-  /// started commuting anything and we don't know anything yet, the tenative
+  /// started commuting anything and we don't know anything yet, the tentative
   /// state where we've commuted the operands of the current instruction and are
-  /// revisting it, and the declined state where we've reverted the operands
+  /// revisiting it, and the declined state where we've reverted the operands
   /// back to their original order and will no longer commute it further.
   bool TentativelyCommuting;
   bool Declined;
@@ -895,7 +852,7 @@
         // to a constant 0 so that the def is explicit, and the push/pop
         // correspondence is maintained.
         if (Insert->getOpcode() == TargetOpcode::IMPLICIT_DEF)
-          ConvertImplicitDefToConstZero(Insert, MRI, TII, MF);
+          ConvertImplicitDefToConstZero(Insert, MRI, TII, MF, LIS);
 
         // We stackified an operand. Add the defining instruction's operands to
         // the worklist stack now to continue to build an ever deeper tree.
diff --git a/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp b/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp
deleted file mode 100644
index de104d4..0000000
--- a/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp
+++ /dev/null
@@ -1,205 +0,0 @@
-//===-- WebAssemblyStoreResults.cpp - Optimize using store result values --===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-///
-/// \file
-/// This file implements an optimization pass using store result values.
-///
-/// WebAssembly's store instructions return the stored value. This is to enable
-/// an optimization wherein uses of the stored value can be replaced by uses of
-/// the store's result value, making the stored value register more likely to
-/// be single-use, thus more likely to be useful to register stackifying, and
-/// potentially also exposing the store to register stackifying. These both can
-/// reduce get_local/set_local traffic.
-///
-/// This pass also performs this optimization for memcpy, memmove, and memset
-/// calls, since the LLVM intrinsics for these return void so they can't use the
-/// returned attribute and consequently aren't handled by the OptimizeReturned
-/// pass.
-///
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
-#include "WebAssembly.h"
-#include "WebAssemblyMachineFunctionInfo.h"
-#include "WebAssemblySubtarget.h"
-#include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/CodeGen/LiveIntervals.h"
-#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
-#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-#define DEBUG_TYPE "wasm-store-results"
-
-namespace {
-class WebAssemblyStoreResults final : public MachineFunctionPass {
-public:
-  static char ID; // Pass identification, replacement for typeid
-  WebAssemblyStoreResults() : MachineFunctionPass(ID) {}
-
-  StringRef getPassName() const override { return "WebAssembly Store Results"; }
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.setPreservesCFG();
-    AU.addRequired<MachineBlockFrequencyInfo>();
-    AU.addPreserved<MachineBlockFrequencyInfo>();
-    AU.addRequired<MachineDominatorTree>();
-    AU.addPreserved<MachineDominatorTree>();
-    AU.addRequired<LiveIntervals>();
-    AU.addPreserved<SlotIndexes>();
-    AU.addPreserved<LiveIntervals>();
-    AU.addRequired<TargetLibraryInfoWrapperPass>();
-    MachineFunctionPass::getAnalysisUsage(AU);
-  }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-private:
-};
-} // end anonymous namespace
-
-char WebAssemblyStoreResults::ID = 0;
-INITIALIZE_PASS(WebAssemblyStoreResults, DEBUG_TYPE,
-                "Optimize store result values for WebAssembly", false, false)
-
-FunctionPass *llvm::createWebAssemblyStoreResults() {
-  return new WebAssemblyStoreResults();
-}
-
-// Replace uses of FromReg with ToReg if they are dominated by MI.
-static bool ReplaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI,
-                                 unsigned FromReg, unsigned ToReg,
-                                 const MachineRegisterInfo &MRI,
-                                 MachineDominatorTree &MDT,
-                                 LiveIntervals &LIS) {
-  bool Changed = false;
-
-  LiveInterval *FromLI = &LIS.getInterval(FromReg);
-  LiveInterval *ToLI = &LIS.getInterval(ToReg);
-
-  SlotIndex FromIdx = LIS.getInstructionIndex(MI).getRegSlot();
-  VNInfo *FromVNI = FromLI->getVNInfoAt(FromIdx);
-
-  SmallVector<SlotIndex, 4> Indices;
-
-  for (auto I = MRI.use_nodbg_begin(FromReg), E = MRI.use_nodbg_end();
-       I != E;) {
-    MachineOperand &O = *I++;
-    MachineInstr *Where = O.getParent();
-
-    // Check that MI dominates the instruction in the normal way.
-    if (&MI == Where || !MDT.dominates(&MI, Where))
-      continue;
-
-    // If this use gets a different value, skip it.
-    SlotIndex WhereIdx = LIS.getInstructionIndex(*Where);
-    VNInfo *WhereVNI = FromLI->getVNInfoAt(WhereIdx);
-    if (WhereVNI && WhereVNI != FromVNI)
-      continue;
-
-    // Make sure ToReg isn't clobbered before it gets there.
-    VNInfo *ToVNI = ToLI->getVNInfoAt(WhereIdx);
-    if (ToVNI && ToVNI != FromVNI)
-      continue;
-
-    Changed = true;
-    LLVM_DEBUG(dbgs() << "Setting operand " << O << " in " << *Where << " from "
-                      << MI << "\n");
-    O.setReg(ToReg);
-
-    // If the store's def was previously dead, it is no longer.
-    if (!O.isUndef()) {
-      MI.getOperand(0).setIsDead(false);
-
-      Indices.push_back(WhereIdx.getRegSlot());
-    }
-  }
-
-  if (Changed) {
-    // Extend ToReg's liveness.
-    LIS.extendToIndices(*ToLI, Indices);
-
-    // Shrink FromReg's liveness.
-    LIS.shrinkToUses(FromLI);
-
-    // If we replaced all dominated uses, FromReg is now killed at MI.
-    if (!FromLI->liveAt(FromIdx.getDeadSlot()))
-      MI.addRegisterKilled(FromReg, MBB.getParent()
-                                        ->getSubtarget<WebAssemblySubtarget>()
-                                        .getRegisterInfo());
-  }
-
-  return Changed;
-}
-
-static bool optimizeCall(MachineBasicBlock &MBB, MachineInstr &MI,
-                         const MachineRegisterInfo &MRI,
-                         MachineDominatorTree &MDT, LiveIntervals &LIS,
-                         const WebAssemblyTargetLowering &TLI,
-                         const TargetLibraryInfo &LibInfo) {
-  MachineOperand &Op1 = MI.getOperand(1);
-  if (!Op1.isSymbol())
-    return false;
-
-  StringRef Name(Op1.getSymbolName());
-  bool callReturnsInput = Name == TLI.getLibcallName(RTLIB::MEMCPY) ||
-                          Name == TLI.getLibcallName(RTLIB::MEMMOVE) ||
-                          Name == TLI.getLibcallName(RTLIB::MEMSET);
-  if (!callReturnsInput)
-    return false;
-
-  LibFunc Func;
-  if (!LibInfo.getLibFunc(Name, Func))
-    return false;
-
-  unsigned FromReg = MI.getOperand(2).getReg();
-  unsigned ToReg = MI.getOperand(0).getReg();
-  if (MRI.getRegClass(FromReg) != MRI.getRegClass(ToReg))
-    report_fatal_error("Store results: call to builtin function with wrong "
-                       "signature, from/to mismatch");
-  return ReplaceDominatedUses(MBB, MI, FromReg, ToReg, MRI, MDT, LIS);
-}
-
-bool WebAssemblyStoreResults::runOnMachineFunction(MachineFunction &MF) {
-  LLVM_DEBUG({
-    dbgs() << "********** Store Results **********\n"
-           << "********** Function: " << MF.getName() << '\n';
-  });
-
-  MachineRegisterInfo &MRI = MF.getRegInfo();
-  MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
-  const WebAssemblyTargetLowering &TLI =
-      *MF.getSubtarget<WebAssemblySubtarget>().getTargetLowering();
-  const auto &LibInfo = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
-  LiveIntervals &LIS = getAnalysis<LiveIntervals>();
-  bool Changed = false;
-
-  // We don't preserve SSA form.
-  MRI.leaveSSA();
-
-  assert(MRI.tracksLiveness() && "StoreResults expects liveness tracking");
-
-  for (auto &MBB : MF) {
-    LLVM_DEBUG(dbgs() << "Basic Block: " << MBB.getName() << '\n');
-    for (auto &MI : MBB)
-      switch (MI.getOpcode()) {
-      default:
-        break;
-      case WebAssembly::CALL_I32:
-      case WebAssembly::CALL_I64:
-        Changed |= optimizeCall(MBB, MI, MRI, MDT, LIS, TLI, LibInfo);
-        break;
-      }
-  }
-
-  return Changed;
-}
diff --git a/lib/Target/WebAssembly/WebAssemblySubtarget.cpp b/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
index d6af0fb..98133e2 100644
--- a/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
+++ b/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
@@ -40,10 +40,9 @@
                                            const std::string &CPU,
                                            const std::string &FS,
                                            const TargetMachine &TM)
-    : WebAssemblyGenSubtargetInfo(TT, CPU, FS), HasSIMD128(false),
-      HasAtomics(false), HasNontrappingFPToInt(false), HasSignExt(false),
-      HasExceptionHandling(false), CPUString(CPU), TargetTriple(TT),
-      FrameLowering(), InstrInfo(initializeSubtargetDependencies(FS)), TSInfo(),
+    : WebAssemblyGenSubtargetInfo(TT, CPU, FS), CPUString(CPU),
+      TargetTriple(TT), FrameLowering(),
+      InstrInfo(initializeSubtargetDependencies(FS)), TSInfo(),
       TLInfo(TM, *this) {}
 
 bool WebAssemblySubtarget::enableMachineScheduler() const {
diff --git a/lib/Target/WebAssembly/WebAssemblySubtarget.h b/lib/Target/WebAssembly/WebAssemblySubtarget.h
index b170dbf..0a0c046 100644
--- a/lib/Target/WebAssembly/WebAssemblySubtarget.h
+++ b/lib/Target/WebAssembly/WebAssemblySubtarget.h
@@ -29,11 +29,16 @@
 namespace llvm {
 
 class WebAssemblySubtarget final : public WebAssemblyGenSubtargetInfo {
-  bool HasSIMD128;
-  bool HasAtomics;
-  bool HasNontrappingFPToInt;
-  bool HasSignExt;
-  bool HasExceptionHandling;
+  enum SIMDEnum {
+    NoSIMD,
+    SIMD128,
+    UnimplementedSIMD128,
+  } SIMDLevel = NoSIMD;
+
+  bool HasAtomics = false;
+  bool HasNontrappingFPToInt = false;
+  bool HasSignExt = false;
+  bool HasExceptionHandling = false;
 
   /// String name of used CPU.
   std::string CPUString;
@@ -77,7 +82,10 @@
 
   // Predicates used by WebAssemblyInstrInfo.td.
   bool hasAddr64() const { return TargetTriple.isArch64Bit(); }
-  bool hasSIMD128() const { return HasSIMD128; }
+  bool hasSIMD128() const { return SIMDLevel >= SIMD128; }
+  bool hasUnimplementedSIMD128() const {
+    return SIMDLevel >= UnimplementedSIMD128;
+  }
   bool hasAtomics() const { return HasAtomics; }
   bool hasNontrappingFPToInt() const { return HasNontrappingFPToInt; }
   bool hasSignExt() const { return HasSignExt; }
diff --git a/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
index a6fb782..3bf8dd4 100644
--- a/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
@@ -62,7 +62,7 @@
   initializeWebAssemblyReplacePhysRegsPass(PR);
   initializeWebAssemblyPrepareForLiveIntervalsPass(PR);
   initializeWebAssemblyOptimizeLiveIntervalsPass(PR);
-  initializeWebAssemblyStoreResultsPass(PR);
+  initializeWebAssemblyMemIntrinsicResultsPass(PR);
   initializeWebAssemblyRegStackifyPass(PR);
   initializeWebAssemblyRegColoringPass(PR);
   initializeWebAssemblyExplicitLocalsPass(PR);
@@ -298,6 +298,12 @@
   // order of the arguments.
   addPass(createWebAssemblyCallIndirectFixup());
 
+  // Eliminate multiple-entry loops.
+  addPass(createWebAssemblyFixIrreducibleControlFlow());
+
+  // Do various transformations for exception handling.
+  addPass(createWebAssemblyLateEHPrepare());
+
   if (getOptLevel() != CodeGenOpt::None) {
     // LiveIntervals isn't commonly run this late. Re-establish preconditions.
     addPass(createWebAssemblyPrepareForLiveIntervals());
@@ -305,13 +311,14 @@
     // Depend on LiveIntervals and perform some optimizations on it.
     addPass(createWebAssemblyOptimizeLiveIntervals());
 
-    // Prepare store instructions for register stackifying.
-    addPass(createWebAssemblyStoreResults());
+    // Prepare memory intrinsic calls for register stackifying.
+    addPass(createWebAssemblyMemIntrinsicResults());
 
     // Mark registers as representing wasm's value stack. This is a key
     // code-compression technique in WebAssembly. We run this pass (and
-    // StoreResults above) very late, so that it sees as much code as possible,
-    // including code emitted by PEI and expanded by late tail duplication.
+    // MemIntrinsicResults above) very late, so that it sees as much code as
+    // possible, including code emitted by PEI and expanded by late tail
+    // duplication.
     addPass(createWebAssemblyRegStackify());
 
     // Run the register coloring pass to reduce the total number of registers.
@@ -320,17 +327,9 @@
     addPass(createWebAssemblyRegColoring());
   }
 
-  // Eliminate multiple-entry loops. Do this before inserting explicit get_local
-  // and set_local operators because we create a new variable that we want
-  // converted into a local.
-  addPass(createWebAssemblyFixIrreducibleControlFlow());
-
-  // Insert explicit get_local and set_local operators.
+  // Insert explicit local.get and local.set operators.
   addPass(createWebAssemblyExplicitLocals());
 
-  // Do various transformations for exception handling
-  addPass(createWebAssemblyLateEHPrepare());
-
   // Sort the blocks of the CFG into topological order, a prerequisite for
   // BLOCK and LOOP markers.
   addPass(createWebAssemblyCFGSort());
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 4801078..899b50d 100644
--- a/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -804,8 +804,8 @@
     return Parser.Error(L, Msg, Range);
   }
 
-  std::nullptr_t ErrorOperand(SMLoc Loc, StringRef Msg) {
-    Error(Loc, Msg);
+  std::nullptr_t ErrorOperand(SMLoc Loc, StringRef Msg, SMRange R = SMRange()) {
+    Error(Loc, Msg, R);
     return nullptr;
   }
 
@@ -835,7 +835,10 @@
                                      InlineAsmIdentifierInfo &Info,
                                      bool IsUnevaluatedOperand, SMLoc &End);
 
-  std::unique_ptr<X86Operand> ParseMemOperand(unsigned SegReg, SMLoc MemStart);
+  std::unique_ptr<X86Operand> ParseMemOperand(unsigned SegReg,
+                                              const MCExpr *&Disp,
+                                              const SMLoc &StartLoc,
+                                              SMLoc &EndLoc);
 
   bool ParseIntelMemoryOperandSize(unsigned &Size);
   std::unique_ptr<X86Operand>
@@ -1011,8 +1014,7 @@
   // and then only in non-64-bit modes.
   if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
       (Is64BitMode || (BaseReg != X86::BX && BaseReg != X86::BP &&
-                       BaseReg != X86::SI && BaseReg != X86::DI)) &&
-      BaseReg != X86::DX) {
+                       BaseReg != X86::SI && BaseReg != X86::DI))) {
     ErrMsg = "invalid 16-bit base register";
     return true;
   }
@@ -1103,10 +1105,13 @@
     if (RegNo == X86::RIZ || RegNo == X86::RIP ||
         X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo) ||
         X86II::isX86_64NonExtLowByteReg(RegNo) ||
-        X86II::isX86_64ExtendedReg(RegNo))
-      return Error(StartLoc, "register %"
-                   + Tok.getString() + " is only available in 64-bit mode",
+        X86II::isX86_64ExtendedReg(RegNo)) {
+      StringRef RegName = Tok.getString();
+      Parser.Lex(); // Eat register name.
+      return Error(StartLoc,
+                   "register %" + RegName + " is only available in 64-bit mode",
                    SMRange(StartLoc, EndLoc));
+    }
   }
 
   // Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
@@ -1936,49 +1941,61 @@
 std::unique_ptr<X86Operand> X86AsmParser::ParseATTOperand() {
   MCAsmParser &Parser = getParser();
   switch (getLexer().getKind()) {
-  default:
-    // Parse a memory operand with no segment register.
-    return ParseMemOperand(0, Parser.getTok().getLoc());
-  case AsmToken::Percent: {
-    // Read the register.
-    unsigned RegNo;
-    SMLoc Start, End;
-    if (ParseRegister(RegNo, Start, End)) return nullptr;
-    if (RegNo == X86::EIZ || RegNo == X86::RIZ) {
-      Error(Start, "%eiz and %riz can only be used as index registers",
-            SMRange(Start, End));
-      return nullptr;
-    }
-    if (RegNo == X86::RIP) {
-      Error(Start, "%rip can only be used as a base register",
-            SMRange(Start, End));
-      return nullptr;
-    }
-
-    // If this is a segment register followed by a ':', then this is the start
-    // of a memory reference, otherwise this is a normal register reference.
-    if (getLexer().isNot(AsmToken::Colon))
-      return X86Operand::CreateReg(RegNo, Start, End);
-
-    if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo))
-      return ErrorOperand(Start, "invalid segment register");
-
-    getParser().Lex(); // Eat the colon.
-    return ParseMemOperand(RegNo, Start);
-  }
   case AsmToken::Dollar: {
-    // $42 -> immediate.
+    // $42 or $ID -> immediate.
     SMLoc Start = Parser.getTok().getLoc(), End;
     Parser.Lex();
     const MCExpr *Val;
-    if (getParser().parseExpression(Val, End))
+    // This is an immediate, so we should not parse a register. Do a precheck
+    // for '%' to supercede intra-register parse errors.
+    SMLoc L = Parser.getTok().getLoc();
+    if (check(getLexer().is(AsmToken::Percent), L,
+              "expected immediate expression") ||
+        getParser().parseExpression(Val, End) ||
+        check(isa<X86MCExpr>(Val), L, "expected immediate expression"))
       return nullptr;
     return X86Operand::CreateImm(Val, Start, End);
   }
-  case AsmToken::LCurly:{
+  case AsmToken::LCurly: {
     SMLoc Start = Parser.getTok().getLoc();
     return ParseRoundingModeOp(Start);
   }
+  default: {
+    // This a memory operand or a register. We have some parsing complications
+    // as a '(' may be part of an immediate expression or the addressing mode
+    // block. This is complicated by the fact that an assembler-level variable
+    // may refer either to a register or an immediate expression.
+
+    SMLoc Loc = Parser.getTok().getLoc(), EndLoc;
+    const MCExpr *Expr = nullptr;
+    unsigned Reg = 0;
+    if (getLexer().isNot(AsmToken::LParen)) {
+      // No '(' so this is either a displacement expression or a register.
+      if (Parser.parseExpression(Expr, EndLoc))
+        return nullptr;
+      if (auto *RE = dyn_cast<X86MCExpr>(Expr)) {
+        // Segment Register. Reset Expr and copy value to register.
+        Expr = nullptr;
+        Reg = RE->getRegNo();
+
+        // Sanity check register.
+        if (Reg == X86::EIZ || Reg == X86::RIZ)
+          return ErrorOperand(
+              Loc, "%eiz and %riz can only be used as index registers",
+              SMRange(Loc, EndLoc));
+        if (Reg == X86::RIP)
+          return ErrorOperand(Loc, "%rip can only be used as a base register",
+                              SMRange(Loc, EndLoc));
+        // Return register that are not segment prefixes immediately.
+        if (!Parser.parseOptionalToken(AsmToken::Colon))
+          return X86Operand::CreateReg(Reg, Loc, EndLoc);
+        if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(Reg))
+          return ErrorOperand(Loc, "invalid segment register");
+      }
+    }
+    // This is a Memory operand.
+    return ParseMemOperand(Reg, Expr, Loc, EndLoc);
+  }
   }
 }
 
@@ -2087,199 +2104,201 @@
   return false;
 }
 
-/// ParseMemOperand: segment: disp(basereg, indexreg, scale).  The '%ds:' prefix
-/// has already been parsed if present.
+/// ParseMemOperand: 'seg : disp(basereg, indexreg, scale)'.  The '%ds:' prefix
+/// has already been parsed if present. disp may be provided as well.
 std::unique_ptr<X86Operand> X86AsmParser::ParseMemOperand(unsigned SegReg,
-                                                          SMLoc MemStart) {
-
+                                                          const MCExpr *&Disp,
+                                                          const SMLoc &StartLoc,
+                                                          SMLoc &EndLoc) {
   MCAsmParser &Parser = getParser();
-  // We have to disambiguate a parenthesized expression "(4+5)" from the start
-  // of a memory operand with a missing displacement "(%ebx)" or "(,%eax)".  The
-  // only way to do this without lookahead is to eat the '(' and see what is
-  // after it.
-  const MCExpr *Disp = MCConstantExpr::create(0, getParser().getContext());
-  if (getLexer().isNot(AsmToken::LParen)) {
-    SMLoc ExprEnd;
-    if (getParser().parseExpression(Disp, ExprEnd)) return nullptr;
-    // Disp may be a variable, handle register values.
-    if (auto *RE = dyn_cast<X86MCExpr>(Disp))
-      return X86Operand::CreateReg(RE->getRegNo(), MemStart, ExprEnd);
+  SMLoc Loc;
+  // Based on the initial passed values, we may be in any of these cases, we are
+  // in one of these cases (with current position (*)):
 
-    // After parsing the base expression we could either have a parenthesized
-    // memory address or not.  If not, return now.  If so, eat the (.
-    if (getLexer().isNot(AsmToken::LParen)) {
-      // Unless we have a segment register, treat this as an immediate.
-      if (SegReg == 0)
-        return X86Operand::CreateMem(getPointerWidth(), Disp, MemStart, ExprEnd);
-      return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, 0, 0, 1,
-                                   MemStart, ExprEnd);
+  //   1. seg : * disp  (base-index-scale-expr)
+  //   2. seg : *(disp) (base-index-scale-expr)
+  //   3. seg :       *(base-index-scale-expr)
+  //   4.        disp  *(base-index-scale-expr)
+  //   5.      *(disp)  (base-index-scale-expr)
+  //   6.             *(base-index-scale-expr)
+  //   7.  disp *
+  //   8. *(disp)
+
+  // If we do not have an displacement yet, check if we're in cases 4 or 6 by
+  // checking if the first object after the parenthesis is a register (or an
+  // identifier referring to a register) and parse the displacement or default
+  // to 0 as appropriate.
+  auto isAtMemOperand = [this]() {
+    if (this->getLexer().isNot(AsmToken::LParen))
+      return false;
+    AsmToken Buf[2];
+    StringRef Id;
+    auto TokCount = this->getLexer().peekTokens(Buf, true);
+    if (TokCount == 0)
+      return false;
+    switch (Buf[0].getKind()) {
+    case AsmToken::Percent:
+    case AsmToken::Comma:
+      return true;
+    // These lower cases are doing a peekIdentifier.
+    case AsmToken::At:
+    case AsmToken::Dollar:
+      if ((TokCount > 1) &&
+          (Buf[1].is(AsmToken::Identifier) || Buf[1].is(AsmToken::String)) &&
+          (Buf[0].getLoc().getPointer() + 1 == Buf[1].getLoc().getPointer()))
+        Id = StringRef(Buf[0].getLoc().getPointer(),
+                       Buf[1].getIdentifier().size() + 1);
+      break;
+    case AsmToken::Identifier:
+    case AsmToken::String:
+      Id = Buf[0].getIdentifier();
+      break;
+    default:
+      return false;
     }
-
-    // Eat the '('.
-    Parser.Lex();
-  } else {
-    // Okay, we have a '('.  We don't know if this is an expression or not, but
-    // so we have to eat the ( to see beyond it.
-    SMLoc LParenLoc = Parser.getTok().getLoc();
-    Parser.Lex(); // Eat the '('.
-
-    if (getLexer().is(AsmToken::Percent) || getLexer().is(AsmToken::Comma)) {
-      // Nothing to do here, fall into the code below with the '(' part of the
-      // memory operand consumed.
-    } else {
-      SMLoc ExprEnd;
-      getLexer().UnLex(AsmToken(AsmToken::LParen, "("));
-
-      // It must be either an parenthesized expression, or an expression that
-      // begins from a parenthesized expression, parse it now. Example: (1+2) or
-      // (1+2)+3
-      if (getParser().parseExpression(Disp, ExprEnd))
-        return nullptr;
-
-      // After parsing the base expression we could either have a parenthesized
-      // memory address or not.  If not, return now.  If so, eat the (.
-      if (getLexer().isNot(AsmToken::LParen)) {
-        // Unless we have a segment register, treat this as an immediate.
-        if (SegReg == 0)
-          return X86Operand::CreateMem(getPointerWidth(), Disp, LParenLoc,
-                                       ExprEnd);
-        return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, 0, 0, 1,
-                                     MemStart, ExprEnd);
+    // We have an ID. Check if it is bound to a register.
+    if (!Id.empty()) {
+      MCSymbol *Sym = this->getContext().getOrCreateSymbol(Id);
+      if (Sym->isVariable()) {
+        auto V = Sym->getVariableValue(/*SetUsed*/ false);
+        return isa<X86MCExpr>(V);
       }
+    }
+    return false;
+  };
 
-      // Eat the '('.
-      Parser.Lex();
+  if (!Disp) {
+    // Parse immediate if we're not at a mem operand yet.
+    if (!isAtMemOperand()) {
+      if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(Disp, EndLoc))
+        return nullptr;
+      assert(!isa<X86MCExpr>(Disp) && "Expected non-register here.");
+    } else {
+      // Disp is implicitly zero if we haven't parsed it yet.
+      Disp = MCConstantExpr::create(0, Parser.getContext());
     }
   }
 
-  // If we reached here, then we just ate the ( of the memory operand.  Process
+  // We are now either at the end of the operand or at the '(' at the start of a
+  // base-index-scale-expr.
+
+  if (!parseOptionalToken(AsmToken::LParen)) {
+    if (SegReg == 0)
+      return X86Operand::CreateMem(getPointerWidth(), Disp, StartLoc, EndLoc);
+    return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, 0, 0, 1,
+                                 StartLoc, EndLoc);
+  }
+
+  // If we reached here, then eat the '(' and Process
   // the rest of the memory operand.
   unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
-  SMLoc IndexLoc, BaseLoc;
+  SMLoc BaseLoc = getLexer().getLoc();
+  const MCExpr *E;
+  StringRef ErrMsg;
 
-  if (getLexer().is(AsmToken::Percent)) {
-    SMLoc StartLoc, EndLoc;
-    BaseLoc = Parser.getTok().getLoc();
-    if (ParseRegister(BaseReg, StartLoc, EndLoc)) return nullptr;
-    if (BaseReg == X86::EIZ || BaseReg == X86::RIZ) {
-      Error(StartLoc, "eiz and riz can only be used as index registers",
-            SMRange(StartLoc, EndLoc));
+  // Parse BaseReg if one is provided.
+  if (getLexer().isNot(AsmToken::Comma) && getLexer().isNot(AsmToken::RParen)) {
+    if (Parser.parseExpression(E, EndLoc) ||
+        check(!isa<X86MCExpr>(E), BaseLoc, "expected register here"))
       return nullptr;
-    }
+
+    // Sanity check register.
+    BaseReg = cast<X86MCExpr>(E)->getRegNo();
+    if (BaseReg == X86::EIZ || BaseReg == X86::RIZ)
+      return ErrorOperand(BaseLoc,
+                          "eiz and riz can only be used as index registers",
+                          SMRange(BaseLoc, EndLoc));
   }
 
-  if (getLexer().is(AsmToken::Comma)) {
-    Parser.Lex(); // Eat the comma.
-    IndexLoc = Parser.getTok().getLoc();
-
+  if (parseOptionalToken(AsmToken::Comma)) {
     // Following the comma we should have either an index register, or a scale
     // value. We don't support the later form, but we want to parse it
     // correctly.
     //
-    // Not that even though it would be completely consistent to support syntax
-    // like "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this.
-    if (getLexer().is(AsmToken::Percent)) {
-      SMLoc L;
-      if (ParseRegister(IndexReg, L, L))
+    // Even though it would be completely consistent to support syntax like
+    // "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this.
+    if (getLexer().isNot(AsmToken::RParen)) {
+      if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(E, EndLoc))
         return nullptr;
-      if (BaseReg == X86::RIP) {
-        Error(IndexLoc, "%rip as base register can not have an index register");
-        return nullptr;
-      }
-      if (IndexReg == X86::RIP) {
-        Error(IndexLoc, "%rip is not allowed as an index register");
-        return nullptr;
-      }
 
-      if (getLexer().isNot(AsmToken::RParen)) {
-        // Parse the scale amount:
-        //  ::= ',' [scale-expression]
-        if (parseToken(AsmToken::Comma, "expected comma in scale expression"))
-          return nullptr;
+      if (!isa<X86MCExpr>(E)) {
+        // We've parsed an unexpected Scale Value instead of an index
+        // register. Interpret it as an absolute.
+        int64_t ScaleVal;
+        if (!E->evaluateAsAbsolute(ScaleVal, getStreamer().getAssemblerPtr()))
+          return ErrorOperand(Loc, "expected absolute expression");
+        if (ScaleVal != 1)
+          Warning(Loc, "scale factor without index register is ignored");
+        Scale = 1;
+      } else { // IndexReg Found.
+        IndexReg = cast<X86MCExpr>(E)->getRegNo();
 
-        if (getLexer().isNot(AsmToken::RParen)) {
-          SMLoc Loc = Parser.getTok().getLoc();
+        if (BaseReg == X86::RIP)
+          return ErrorOperand(
+              Loc, "%rip as base register can not have an index register");
+        if (IndexReg == X86::RIP)
+          return ErrorOperand(Loc, "%rip is not allowed as an index register");
 
-          int64_t ScaleVal;
-          if (getParser().parseAbsoluteExpression(ScaleVal)){
-            Error(Loc, "expected scale expression");
-            return nullptr;
+        if (parseOptionalToken(AsmToken::Comma)) {
+          // Parse the scale amount:
+          //  ::= ',' [scale-expression]
+
+          // A scale amount without an index is ignored.
+          if (getLexer().isNot(AsmToken::RParen)) {
+            int64_t ScaleVal;
+            if (Parser.parseTokenLoc(Loc) ||
+                Parser.parseAbsoluteExpression(ScaleVal))
+              return ErrorOperand(Loc, "expected scale expression");
+            Scale = (unsigned)ScaleVal;
+            // Validate the scale amount.
+            if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
+                Scale != 1)
+              return ErrorOperand(Loc,
+                                  "scale factor in 16-bit address must be 1");
+            if (checkScale(Scale, ErrMsg))
+              return ErrorOperand(Loc, ErrMsg);
           }
-
-          // Validate the scale amount.
-          if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
-              ScaleVal != 1) {
-            Error(Loc, "scale factor in 16-bit address must be 1");
-            return nullptr;
-          }
-          if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 &&
-              ScaleVal != 8) {
-            Error(Loc, "scale factor in address must be 1, 2, 4 or 8");
-            return nullptr;
-          }
-          Scale = (unsigned)ScaleVal;
         }
       }
-    } else if (getLexer().isNot(AsmToken::RParen)) {
-      // A scale amount without an index is ignored.
-      // index.
-      SMLoc Loc = Parser.getTok().getLoc();
-
-      int64_t Value;
-      if (getParser().parseAbsoluteExpression(Value))
-        return nullptr;
-
-      if (Value != 1)
-        Warning(Loc, "scale factor without index register is ignored");
-      Scale = 1;
     }
   }
 
   // Ok, we've eaten the memory operand, verify we have a ')' and eat it too.
-  SMLoc MemEnd = Parser.getTok().getEndLoc();
   if (parseToken(AsmToken::RParen, "unexpected token in memory operand"))
     return nullptr;
 
-  // This is a terrible hack to handle "out[s]?[bwl]? %al, (%dx)" ->
-  // "outb %al, %dx".  Out doesn't take a memory form, but this is a widely
-  // documented form in various unofficial manuals, so a lot of code uses it.
-  if (BaseReg == X86::DX && IndexReg == 0 && Scale == 1 &&
-      SegReg == 0 && isa<MCConstantExpr>(Disp) &&
-      cast<MCConstantExpr>(Disp)->getValue() == 0)
+  // This is to support otherwise illegal operand (%dx) found in various
+  // unofficial manuals examples (e.g. "out[s]?[bwl]? %al, (%dx)") and must now
+  // be supported. Mark such DX variants separately fix only in special cases.
+  if (BaseReg == X86::DX && IndexReg == 0 && Scale == 1 && SegReg == 0 &&
+      isa<MCConstantExpr>(Disp) && cast<MCConstantExpr>(Disp)->getValue() == 0)
     return X86Operand::CreateDXReg(BaseLoc, BaseLoc);
 
-  StringRef ErrMsg;
   if (CheckBaseRegAndIndexRegAndScale(BaseReg, IndexReg, Scale, is64BitMode(),
-                                      ErrMsg)) {
-    Error(BaseLoc, ErrMsg);
-    return nullptr;
-  }
+                                      ErrMsg))
+    return ErrorOperand(BaseLoc, ErrMsg);
 
   if (SegReg || BaseReg || IndexReg)
     return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg,
-                                 IndexReg, Scale, MemStart, MemEnd);
-  return X86Operand::CreateMem(getPointerWidth(), Disp, MemStart, MemEnd);
+                                 IndexReg, Scale, StartLoc, EndLoc);
+  return X86Operand::CreateMem(getPointerWidth(), Disp, StartLoc, EndLoc);
 }
 
 // Parse either a standard primary expression or a register.
 bool X86AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
   MCAsmParser &Parser = getParser();
-  if (Parser.parsePrimaryExpr(Res, EndLoc)) {
+  // See if this is a register first.
+  if (getTok().is(AsmToken::Percent) ||
+      (isParsingIntelSyntax() && getTok().is(AsmToken::Identifier) &&
+       MatchRegisterName(Parser.getTok().getString()))) {
     SMLoc StartLoc = Parser.getTok().getLoc();
-    // Normal Expression parse fails, check if it could be a register.
     unsigned RegNo;
-    bool TryRegParse =
-        getTok().is(AsmToken::Percent) ||
-        (isParsingIntelSyntax() && getTok().is(AsmToken::Identifier));
-    if (!TryRegParse || ParseRegister(RegNo, StartLoc, EndLoc))
+    if (ParseRegister(RegNo, StartLoc, EndLoc))
       return true;
-    // Clear previous parse error and return correct expression.
-    Parser.clearPendingErrors();
     Res = X86MCExpr::create(RegNo, Parser.getContext());
     return false;
   }
-
-  return false;
+  return Parser.parsePrimaryExpr(Res, EndLoc);
 }
 
 bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt
index 524b4ae..645ca49 100644
--- a/lib/Target/X86/CMakeLists.txt
+++ b/lib/Target/X86/CMakeLists.txt
@@ -7,13 +7,13 @@
 tablegen(LLVM X86GenDAGISel.inc -gen-dag-isel)
 tablegen(LLVM X86GenDisassemblerTables.inc -gen-disassembler)
 tablegen(LLVM X86GenEVEX2VEXTables.inc -gen-x86-EVEX2VEX-tables)
+tablegen(LLVM X86GenExegesis.inc -gen-exegesis)
 tablegen(LLVM X86GenFastISel.inc -gen-fast-isel)
 tablegen(LLVM X86GenGlobalISel.inc -gen-global-isel)
 tablegen(LLVM X86GenInstrInfo.inc -gen-instr-info)
 tablegen(LLVM X86GenRegisterBank.inc -gen-register-bank)
 tablegen(LLVM X86GenRegisterInfo.inc -gen-register-info)
 tablegen(LLVM X86GenSubtargetInfo.inc -gen-subtarget)
-tablegen(LLVM X86GenExegesis.inc -gen-exegesis)
 
 if (X86_GEN_FOLD_TABLES)
   tablegen(LLVM X86GenFoldTables.inc -gen-x86-fold-tables)
diff --git a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 0e4c439..64e6fb9 100644
--- a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -540,7 +540,6 @@
     unsigned InstrOffset = 0;
     unsigned StackAdjust = 0;
     unsigned StackSize = 0;
-    unsigned PrevStackSize = 0;
     unsigned NumDefCFAOffsets = 0;
 
     for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
@@ -588,7 +587,6 @@
         //  L0:
         //     .cfi_def_cfa_offset 80
         //
-        PrevStackSize = StackSize;
         StackSize = std::abs(Inst.getOffset()) / StackDivide;
         ++NumDefCFAOffsets;
         break;
@@ -635,16 +633,6 @@
       CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
       CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
     } else {
-      // If the amount of the stack allocation is the size of a register, then
-      // we "push" the RAX/EAX register onto the stack instead of adjusting the
-      // stack pointer with a SUB instruction. We don't support the push of the
-      // RAX/EAX register with compact unwind. So we check for that situation
-      // here.
-      if ((NumDefCFAOffsets == SavedRegIdx + 1 &&
-           StackSize - PrevStackSize == 1) ||
-          (Instrs.size() == 1 && NumDefCFAOffsets == 1 && StackSize == 2))
-        return CU::UNWIND_MODE_DWARF;
-
       SubtractInstrIdx += InstrOffset;
       ++StackAdjust;
 
diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp
index b4be9ed..36cef98 100644
--- a/lib/Target/X86/X86AsmPrinter.cpp
+++ b/lib/Target/X86/X86AsmPrinter.cpp
@@ -572,9 +572,9 @@
 
       // Emitting an Elf_Prop for the CET properties.
       OutStreamer->EmitIntValue(ELF::GNU_PROPERTY_X86_FEATURE_1_AND, 4);
-      OutStreamer->EmitIntValue(WordSize, 4);               // data size
-      OutStreamer->EmitIntValue(FeatureFlagsAnd, WordSize); // data
-      EmitAlignment(WordSize == 4 ? 2 : 3);                 // padding
+      OutStreamer->EmitIntValue(4, 4);               // data size
+      OutStreamer->EmitIntValue(FeatureFlagsAnd, 4); // data
+      EmitAlignment(WordSize == 4 ? 2 : 3);          // padding
 
       OutStreamer->endSection(Nt);
       OutStreamer->SwitchSection(Cur);
diff --git a/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp b/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
index 2850baf..627a6cb 100644
--- a/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
+++ b/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
@@ -641,21 +641,22 @@
   if (BlockingStoresDispSizeMap.size() <= 1)
     return;
 
-  int64_t PrevDisp = BlockingStoresDispSizeMap.begin()->first;
-  unsigned PrevSize = BlockingStoresDispSizeMap.begin()->second;
-  SmallVector<int64_t, 2> ForRemoval;
-  for (auto DispSizePair = std::next(BlockingStoresDispSizeMap.begin());
-       DispSizePair != BlockingStoresDispSizeMap.end(); ++DispSizePair) {
-    int64_t CurrDisp = DispSizePair->first;
-    unsigned CurrSize = DispSizePair->second;
-    if (CurrDisp + CurrSize <= PrevDisp + PrevSize) {
-      ForRemoval.push_back(PrevDisp);
+  SmallVector<std::pair<int64_t, unsigned>, 0> DispSizeStack;
+  for (auto DispSizePair : BlockingStoresDispSizeMap) {
+    int64_t CurrDisp = DispSizePair.first;
+    unsigned CurrSize = DispSizePair.second;
+    while (DispSizeStack.size()) {
+      int64_t PrevDisp = DispSizeStack.back().first;
+      unsigned PrevSize = DispSizeStack.back().second;
+      if (CurrDisp + CurrSize > PrevDisp + PrevSize)
+        break;
+      DispSizeStack.pop_back();
     }
-    PrevDisp = CurrDisp;
-    PrevSize = CurrSize;
+    DispSizeStack.push_back(DispSizePair);
   }
-  for (auto Disp : ForRemoval)
-    BlockingStoresDispSizeMap.erase(Disp);
+  BlockingStoresDispSizeMap.clear();
+  for (auto Disp : DispSizeStack)
+    BlockingStoresDispSizeMap.insert(Disp);
 }
 
 bool X86AvoidSFBPass::runOnMachineFunction(MachineFunction &MF) {
diff --git a/lib/Target/X86/X86DiscriminateMemOps.cpp b/lib/Target/X86/X86DiscriminateMemOps.cpp
index 5653bab..3654bf0 100644
--- a/lib/Target/X86/X86DiscriminateMemOps.cpp
+++ b/lib/Target/X86/X86DiscriminateMemOps.cpp
@@ -21,6 +21,7 @@
 #include "llvm/IR/DebugInfoMetadata.h"
 #include "llvm/ProfileData/SampleProf.h"
 #include "llvm/ProfileData/SampleProfReader.h"
+#include "llvm/Support/Debug.h"
 #include "llvm/Transforms/IPO/SampleProfile.h"
 using namespace llvm;
 
@@ -107,27 +108,37 @@
       if (!DI) {
         DI = ReferenceDI;
       }
-      DenseSet<unsigned> &Set = Seen[diToLocation(DI)];
+      Location L = diToLocation(DI);
+      DenseSet<unsigned> &Set = Seen[L];
       const std::pair<DenseSet<unsigned>::iterator, bool> TryInsert =
           Set.insert(DI->getBaseDiscriminator());
       if (!TryInsert.second) {
-        DI = DI->setBaseDiscriminator(++MemOpDiscriminators[diToLocation(DI)]);
-        updateDebugInfo(&MI, DI);
-        Changed = true;
-        const std::pair<DenseSet<unsigned>::iterator, bool> MustInsert =
-            Set.insert(DI->getBaseDiscriminator());
-        // FIXME (mtrofin): check if the to-be inserted base discriminator can
-        // be added. This requires a new API on DILocation.
-        // The assumption is that this scenario is infrequent/OK not to support.
-        // If evidence points otherwise, we can explore synthesize unique DIs by
-        // adding fake line numbers.
-        if (!MustInsert.second) {
-          LLVM_DEBUG(dbgs()
-                     << "Unable to create a unique discriminator in "
+        unsigned BF, DF, CI = 0;
+        DILocation::decodeDiscriminator(DI->getDiscriminator(), BF, DF, CI);
+        Optional<unsigned> EncodedDiscriminator = DILocation::encodeDiscriminator(
+            MemOpDiscriminators[L] + 1, DF, CI);
+
+        if (!EncodedDiscriminator) {
+          // FIXME(mtrofin): The assumption is that this scenario is infrequent/OK
+          // not to support. If evidence points otherwise, we can explore synthesizeing
+          // unique DIs by adding fake line numbers, or by constructing 64 bit
+          // discriminators.
+          LLVM_DEBUG(dbgs() << "Unable to create a unique discriminator "
+                     "for instruction with memory operand in: "
                      << DI->getFilename() << " Line: " << DI->getLine()
                      << " Column: " << DI->getColumn()
-                     << ". This is likely due to a large macro expansion.\n");
+                     << ". This is likely due to a large macro expansion. \n");
+          continue;
         }
+        // Since we were able to encode, bump the MemOpDiscriminators.
+        ++MemOpDiscriminators[L];
+        DI = DI->cloneWithDiscriminator(EncodedDiscriminator.getValue());
+        updateDebugInfo(&MI, DI);
+        Changed = true;
+        std::pair<DenseSet<unsigned>::iterator, bool> MustInsert =
+            Set.insert(DI->getBaseDiscriminator());
+        (void)MustInsert; // Silence warning in release build.
+        assert(MustInsert.second && "New discriminator shouldn't be present in set");
       }
 
       // Bump the reference DI to avoid cramming discriminators on line 0.
diff --git a/lib/Target/X86/X86DomainReassignment.cpp b/lib/Target/X86/X86DomainReassignment.cpp
index 7e1f1e7..d9ebbb5 100644
--- a/lib/Target/X86/X86DomainReassignment.cpp
+++ b/lib/Target/X86/X86DomainReassignment.cpp
@@ -732,7 +732,10 @@
   STI = &MF.getSubtarget<X86Subtarget>();
   // GPR->K is the only transformation currently supported, bail out early if no
   // AVX512.
-  if (!STI->hasAVX512())
+  // TODO: We're also bailing of AVX512BW isn't supported since we use VK32 and
+  // VK64 for GR32/GR64, but those aren't legal classes on KNL. If the register
+  // coalescer doesn't clean it up and we generate a spill we will crash.
+  if (!STI->hasAVX512() || !STI->hasBWI())
     return false;
 
   MRI = &MF.getRegInfo();
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index a49ad8b..9dd3f26 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -2900,23 +2900,15 @@
         isCommutativeIntrinsic(II))
       std::swap(LHS, RHS);
 
-    bool UseIncDec = false;
-    if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isOne())
-      UseIncDec = true;
-
     unsigned BaseOpc, CondOpc;
     switch (II->getIntrinsicID()) {
     default: llvm_unreachable("Unexpected intrinsic!");
     case Intrinsic::sadd_with_overflow:
-      BaseOpc = UseIncDec ? unsigned(X86ISD::INC) : unsigned(ISD::ADD);
-      CondOpc = X86::SETOr;
-      break;
+      BaseOpc = ISD::ADD; CondOpc = X86::SETOr; break;
     case Intrinsic::uadd_with_overflow:
       BaseOpc = ISD::ADD; CondOpc = X86::SETBr; break;
     case Intrinsic::ssub_with_overflow:
-      BaseOpc = UseIncDec ? unsigned(X86ISD::DEC) : unsigned(ISD::SUB);
-      CondOpc = X86::SETOr;
-      break;
+      BaseOpc = ISD::SUB; CondOpc = X86::SETOr; break;
     case Intrinsic::usub_with_overflow:
       BaseOpc = ISD::SUB; CondOpc = X86::SETBr; break;
     case Intrinsic::smul_with_overflow:
@@ -2938,9 +2930,11 @@
         { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }
       };
 
-      if (BaseOpc == X86ISD::INC || BaseOpc == X86ISD::DEC) {
+      if (CI->isOne() && (BaseOpc == ISD::ADD || BaseOpc == ISD::SUB) &&
+          CondOpc == X86::SETOr) {
+        // We can use INC/DEC.
         ResultReg = createResultReg(TLI.getRegClassFor(VT));
-        bool IsDec = BaseOpc == X86ISD::DEC;
+        bool IsDec = BaseOpc == ISD::SUB;
         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
                 TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
           .addReg(LHSReg, getKillRegState(LHSIsKill));
@@ -3998,7 +3992,8 @@
   }
 
   Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
-  MI->eraseFromParent();
+  MachineBasicBlock::iterator I(MI);
+  removeDeadCode(I, std::next(I));
   return true;
 }
 
diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp
index ad42cb8..a346085 100644
--- a/lib/Target/X86/X86FixupLEAs.cpp
+++ b/lib/Target/X86/X86FixupLEAs.cpp
@@ -333,13 +333,12 @@
 static inline bool isLEASimpleIncOrDec(MachineInstr &LEA) {
   unsigned SrcReg = LEA.getOperand(1 + X86::AddrBaseReg).getReg();
   unsigned DstReg = LEA.getOperand(0).getReg();
-  unsigned AddrDispOp = 1 + X86::AddrDisp;
+  const MachineOperand &AddrDisp = LEA.getOperand(1 + X86::AddrDisp);
   return SrcReg == DstReg &&
          LEA.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
          LEA.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 &&
-         LEA.getOperand(AddrDispOp).isImm() &&
-         (LEA.getOperand(AddrDispOp).getImm() == 1 ||
-          LEA.getOperand(AddrDispOp).getImm() == -1);
+         AddrDisp.isImm() &&
+         (AddrDisp.getImm() == 1 || AddrDisp.getImm() == -1);
 }
 
 bool FixupLEAPass::fixupIncDec(MachineBasicBlock::iterator &I,
@@ -351,7 +350,7 @@
 
   if (isLEASimpleIncOrDec(MI) && TII->isSafeToClobberEFLAGS(*MFI, I)) {
     int NewOpcode;
-    bool isINC = MI.getOperand(4).getImm() == 1;
+    bool isINC = MI.getOperand(1 + X86::AddrDisp).getImm() == 1;
     switch (Opcode) {
     case X86::LEA16r:
       NewOpcode = isINC ? X86::INC16r : X86::DEC16r;
@@ -368,7 +367,7 @@
     MachineInstr *NewMI =
         BuildMI(*MFI, I, MI.getDebugLoc(), TII->get(NewOpcode))
             .add(MI.getOperand(0))
-            .add(MI.getOperand(1));
+            .add(MI.getOperand(1 + X86::AddrBaseReg));
     MFI->erase(I);
     I = static_cast<MachineBasicBlock::iterator>(NewMI);
     return true;
@@ -420,15 +419,23 @@
   const int Opcode = MI.getOpcode();
   if (!isLEA(Opcode))
     return;
-  if (MI.getOperand(5).getReg() != 0 || !MI.getOperand(4).isImm() ||
+
+  const MachineOperand &Dst =     MI.getOperand(0);
+  const MachineOperand &Base =    MI.getOperand(1 + X86::AddrBaseReg);
+  const MachineOperand &Scale =   MI.getOperand(1 + X86::AddrScaleAmt);
+  const MachineOperand &Index =   MI.getOperand(1 + X86::AddrIndexReg);
+  const MachineOperand &Offset =  MI.getOperand(1 + X86::AddrDisp);
+  const MachineOperand &Segment = MI.getOperand(1 + X86::AddrSegmentReg);
+
+  if (Segment.getReg() != 0 || !Offset.isImm() ||
       !TII->isSafeToClobberEFLAGS(*MFI, I))
     return;
-  const unsigned DstR = MI.getOperand(0).getReg();
-  const unsigned SrcR1 = MI.getOperand(1).getReg();
-  const unsigned SrcR2 = MI.getOperand(3).getReg();
+  const unsigned DstR = Dst.getReg();
+  const unsigned SrcR1 = Base.getReg();
+  const unsigned SrcR2 = Index.getReg();
   if ((SrcR1 == 0 || SrcR1 != DstR) && (SrcR2 == 0 || SrcR2 != DstR))
     return;
-  if (MI.getOperand(2).getImm() > 1)
+  if (Scale.getImm() > 1)
     return;
   LLVM_DEBUG(dbgs() << "FixLEA: Candidate to replace:"; I->dump(););
   LLVM_DEBUG(dbgs() << "FixLEA: Replaced by: ";);
@@ -436,19 +443,19 @@
   // Make ADD instruction for two registers writing to LEA's destination
   if (SrcR1 != 0 && SrcR2 != 0) {
     const MCInstrDesc &ADDrr = TII->get(getADDrrFromLEA(Opcode));
-    const MachineOperand &Src = MI.getOperand(SrcR1 == DstR ? 3 : 1);
+    const MachineOperand &Src = SrcR1 == DstR ? Index : Base;
     NewMI =
         BuildMI(*MFI, I, MI.getDebugLoc(), ADDrr, DstR).addReg(DstR).add(Src);
     LLVM_DEBUG(NewMI->dump(););
   }
   // Make ADD instruction for immediate
-  if (MI.getOperand(4).getImm() != 0) {
+  if (Offset.getImm() != 0) {
     const MCInstrDesc &ADDri =
-        TII->get(getADDriFromLEA(Opcode, MI.getOperand(4)));
-    const MachineOperand &SrcR = MI.getOperand(SrcR1 == DstR ? 1 : 3);
+        TII->get(getADDriFromLEA(Opcode, Offset));
+    const MachineOperand &SrcR = SrcR1 == DstR ? Base : Index;
     NewMI = BuildMI(*MFI, I, MI.getDebugLoc(), ADDri, DstR)
                 .add(SrcR)
-                .addImm(MI.getOperand(4).getImm());
+                .addImm(Offset.getImm());
     LLVM_DEBUG(NewMI->dump(););
   }
   if (NewMI) {
@@ -465,12 +472,12 @@
   if (!isLEA(LEAOpcode))
     return nullptr;
 
-  const MachineOperand &Dst = MI.getOperand(0);
-  const MachineOperand &Base = MI.getOperand(1);
-  const MachineOperand &Scale = MI.getOperand(2);
-  const MachineOperand &Index = MI.getOperand(3);
-  const MachineOperand &Offset = MI.getOperand(4);
-  const MachineOperand &Segment = MI.getOperand(5);
+  const MachineOperand &Dst =     MI.getOperand(0);
+  const MachineOperand &Base =    MI.getOperand(1 + X86::AddrBaseReg);
+  const MachineOperand &Scale =   MI.getOperand(1 + X86::AddrScaleAmt);
+  const MachineOperand &Index =   MI.getOperand(1 + X86::AddrIndexReg);
+  const MachineOperand &Offset =  MI.getOperand(1 + X86::AddrDisp);
+  const MachineOperand &Segment = MI.getOperand(1 + X86::AddrSegmentReg);
 
   if (!(TII->isThreeOperandsLEA(MI) ||
         hasInefficientLEABaseReg(Base, Index)) ||
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index d722c75..984db12 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -185,7 +185,8 @@
     }
 
     for (auto CS : AvailableRegs)
-      if (!Uses.count(CS) && CS != X86::RIP)
+      if (!Uses.count(CS) && CS != X86::RIP && CS != X86::RSP &&
+          CS != X86::ESP)
         return CS;
   }
   }
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index ba8a8a8..5ac1532 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -472,6 +472,10 @@
                                 SDValue &InFlag);
 
     bool tryOptimizeRem8Extend(SDNode *N);
+
+    bool onlyUsesZeroFlag(SDValue Flags) const;
+    bool hasNoSignFlagUses(SDValue Flags) const;
+    bool hasNoCarryFlagUses(SDValue Flags) const;
   };
 }
 
@@ -898,9 +902,89 @@
       continue;
     }
 
-    // Attempt to remove vectors moves that were inserted to zero upper bits.
+    // Look for a TESTrr+ANDrr pattern where both operands of the test are
+    // the same. Rewrite to remove the AND.
+    unsigned Opc = N->getMachineOpcode();
+    if ((Opc == X86::TEST8rr || Opc == X86::TEST16rr ||
+         Opc == X86::TEST32rr || Opc == X86::TEST64rr) &&
+        N->getOperand(0) == N->getOperand(1) &&
+        N->isOnlyUserOf(N->getOperand(0).getNode()) &&
+        N->getOperand(0).isMachineOpcode()) {
+      SDValue And = N->getOperand(0);
+      unsigned N0Opc = And.getMachineOpcode();
+      if (N0Opc == X86::AND8rr || N0Opc == X86::AND16rr ||
+          N0Opc == X86::AND32rr || N0Opc == X86::AND64rr) {
+        MachineSDNode *Test = CurDAG->getMachineNode(Opc, SDLoc(N),
+                                                     MVT::i32,
+                                                     And.getOperand(0),
+                                                     And.getOperand(1));
+        ReplaceUses(N, Test);
+        MadeChange = true;
+        continue;
+      }
+      if (N0Opc == X86::AND8rm || N0Opc == X86::AND16rm ||
+          N0Opc == X86::AND32rm || N0Opc == X86::AND64rm) {
+        unsigned NewOpc;
+        switch (N0Opc) {
+        case X86::AND8rm:  NewOpc = X86::TEST8mr; break;
+        case X86::AND16rm: NewOpc = X86::TEST16mr; break;
+        case X86::AND32rm: NewOpc = X86::TEST32mr; break;
+        case X86::AND64rm: NewOpc = X86::TEST64mr; break;
+        }
 
-    if (N->getMachineOpcode() != TargetOpcode::SUBREG_TO_REG)
+        // Need to swap the memory and register operand.
+        SDValue Ops[] = { And.getOperand(1),
+                          And.getOperand(2),
+                          And.getOperand(3),
+                          And.getOperand(4),
+                          And.getOperand(5),
+                          And.getOperand(0),
+                          And.getOperand(6)  /* Chain */ };
+        MachineSDNode *Test = CurDAG->getMachineNode(NewOpc, SDLoc(N),
+                                                     MVT::i32, MVT::Other, Ops);
+        ReplaceUses(N, Test);
+        MadeChange = true;
+        continue;
+      }
+    }
+
+    // Look for a KAND+KORTEST and turn it into KTEST if only the zero flag is
+    // used. We're doing this late so we can prefer to fold the AND into masked
+    // comparisons. Doing that can be better for the live range of the mask
+    // register.
+    if ((Opc == X86::KORTESTBrr || Opc == X86::KORTESTWrr ||
+         Opc == X86::KORTESTDrr || Opc == X86::KORTESTQrr) &&
+        N->getOperand(0) == N->getOperand(1) &&
+        N->isOnlyUserOf(N->getOperand(0).getNode()) &&
+        N->getOperand(0).isMachineOpcode() &&
+        onlyUsesZeroFlag(SDValue(N, 0))) {
+      SDValue And = N->getOperand(0);
+      unsigned N0Opc = And.getMachineOpcode();
+      // KANDW is legal with AVX512F, but KTESTW requires AVX512DQ. The other
+      // KAND instructions and KTEST use the same ISA feature.
+      if (N0Opc == X86::KANDBrr ||
+          (N0Opc == X86::KANDWrr && Subtarget->hasDQI()) ||
+          N0Opc == X86::KANDDrr || N0Opc == X86::KANDQrr) {
+        unsigned NewOpc;
+        switch (Opc) {
+        default: llvm_unreachable("Unexpected opcode!");
+        case X86::KORTESTBrr: NewOpc = X86::KTESTBrr; break;
+        case X86::KORTESTWrr: NewOpc = X86::KTESTWrr; break;
+        case X86::KORTESTDrr: NewOpc = X86::KTESTDrr; break;
+        case X86::KORTESTQrr: NewOpc = X86::KTESTQrr; break;
+        }
+        MachineSDNode *KTest = CurDAG->getMachineNode(NewOpc, SDLoc(N),
+                                                      MVT::i32,
+                                                      And.getOperand(0),
+                                                      And.getOperand(1));
+        ReplaceUses(N, KTest);
+        MadeChange = true;
+        continue;
+      }
+    }
+
+    // Attempt to remove vectors moves that were inserted to zero upper bits.
+    if (Opc != TargetOpcode::SUBREG_TO_REG)
       continue;
 
     unsigned SubRegIdx = N->getConstantOperandVal(2);
@@ -1356,8 +1440,7 @@
   }
   APInt MaskedHighBits =
     APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
-  KnownBits Known;
-  DAG.computeKnownBits(X, Known);
+  KnownBits Known = DAG.computeKnownBits(X);
   if (MaskedHighBits != Known.Zero) return true;
 
   // We've identified a pattern that can be transformed into a single shift
@@ -2191,18 +2274,30 @@
          CR->getSignedMax().slt(1ull << Width);
 }
 
-/// Test whether the given X86ISD::CMP node has any uses which require the SF
-/// or OF bits to be accurate.
-static bool hasNoSignedComparisonUses(SDNode *N) {
+static X86::CondCode getCondFromOpc(unsigned Opc) {
+  X86::CondCode CC = X86::COND_INVALID;
+  if (CC == X86::COND_INVALID)
+    CC = X86::getCondFromBranchOpc(Opc);
+  if (CC == X86::COND_INVALID)
+    CC = X86::getCondFromSETOpc(Opc);
+  if (CC == X86::COND_INVALID)
+    CC = X86::getCondFromCMovOpc(Opc);
+
+  return CC;
+}
+
+/// Test whether the given X86ISD::CMP node has any users that use a flag
+/// other than ZF.
+bool X86DAGToDAGISel::onlyUsesZeroFlag(SDValue Flags) const {
   // Examine each user of the node.
-  for (SDNode::use_iterator UI = N->use_begin(),
-         UE = N->use_end(); UI != UE; ++UI) {
-    // Only examine CopyToReg uses.
-    if (UI->getOpcode() != ISD::CopyToReg)
-      return false;
+  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
+         UI != UE; ++UI) {
+    // Only check things that use the flags.
+    if (UI.getUse().getResNo() != Flags.getResNo())
+      continue;
     // Only examine CopyToReg uses that copy to EFLAGS.
-    if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
-          X86::EFLAGS)
+    if (UI->getOpcode() != ISD::CopyToReg ||
+        cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
       return false;
     // Examine each user of the CopyToReg use.
     for (SDNode::use_iterator FlagUI = UI->use_begin(),
@@ -2211,105 +2306,12 @@
       if (FlagUI.getUse().getResNo() != 1) continue;
       // Anything unusual: assume conservatively.
       if (!FlagUI->isMachineOpcode()) return false;
-      // Examine the opcode of the user.
-      switch (FlagUI->getMachineOpcode()) {
-      // These comparisons don't treat the most significant bit specially.
-      case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
-      case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
-      case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
-      case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
-      case X86::JA_1: case X86::JAE_1: case X86::JB_1: case X86::JBE_1:
-      case X86::JE_1: case X86::JNE_1: case X86::JP_1: case X86::JNP_1:
-      case X86::CMOVA16rr: case X86::CMOVA16rm:
-      case X86::CMOVA32rr: case X86::CMOVA32rm:
-      case X86::CMOVA64rr: case X86::CMOVA64rm:
-      case X86::CMOVAE16rr: case X86::CMOVAE16rm:
-      case X86::CMOVAE32rr: case X86::CMOVAE32rm:
-      case X86::CMOVAE64rr: case X86::CMOVAE64rm:
-      case X86::CMOVB16rr: case X86::CMOVB16rm:
-      case X86::CMOVB32rr: case X86::CMOVB32rm:
-      case X86::CMOVB64rr: case X86::CMOVB64rm:
-      case X86::CMOVBE16rr: case X86::CMOVBE16rm:
-      case X86::CMOVBE32rr: case X86::CMOVBE32rm:
-      case X86::CMOVBE64rr: case X86::CMOVBE64rm:
-      case X86::CMOVE16rr: case X86::CMOVE16rm:
-      case X86::CMOVE32rr: case X86::CMOVE32rm:
-      case X86::CMOVE64rr: case X86::CMOVE64rm:
-      case X86::CMOVNE16rr: case X86::CMOVNE16rm:
-      case X86::CMOVNE32rr: case X86::CMOVNE32rm:
-      case X86::CMOVNE64rr: case X86::CMOVNE64rm:
-      case X86::CMOVNP16rr: case X86::CMOVNP16rm:
-      case X86::CMOVNP32rr: case X86::CMOVNP32rm:
-      case X86::CMOVNP64rr: case X86::CMOVNP64rm:
-      case X86::CMOVP16rr: case X86::CMOVP16rm:
-      case X86::CMOVP32rr: case X86::CMOVP32rm:
-      case X86::CMOVP64rr: case X86::CMOVP64rm:
-        continue;
-      // Anything else: assume conservatively.
-      default: return false;
-      }
-    }
-  }
-  return true;
-}
+      // Examine the condition code of the user.
+      X86::CondCode CC = getCondFromOpc(FlagUI->getMachineOpcode());
 
-/// Test whether the given node which sets flags has any uses which require the
-/// CF flag to be accurate.
-static bool hasNoCarryFlagUses(SDNode *N) {
-  // Examine each user of the node.
-  for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); UI != UE;
-       ++UI) {
-    // Only check things that use the flags.
-    if (UI.getUse().getResNo() != 1)
-      continue;
-    // Only examine CopyToReg uses.
-    if (UI->getOpcode() != ISD::CopyToReg)
-      return false;
-    // Only examine CopyToReg uses that copy to EFLAGS.
-    if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
-      return false;
-    // Examine each user of the CopyToReg use.
-    for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end();
-         FlagUI != FlagUE; ++FlagUI) {
-      // Only examine the Flag result.
-      if (FlagUI.getUse().getResNo() != 1)
-        continue;
-      // Anything unusual: assume conservatively.
-      if (!FlagUI->isMachineOpcode())
-        return false;
-      // Examine the opcode of the user.
-      switch (FlagUI->getMachineOpcode()) {
-      // Comparisons which don't examine the CF flag.
-      case X86::SETOr: case X86::SETNOr: case X86::SETEr: case X86::SETNEr:
-      case X86::SETSr: case X86::SETNSr: case X86::SETPr: case X86::SETNPr:
-      case X86::SETLr: case X86::SETGEr: case X86::SETLEr: case X86::SETGr:
-      case X86::JO_1: case X86::JNO_1: case X86::JE_1: case X86::JNE_1:
-      case X86::JS_1: case X86::JNS_1: case X86::JP_1: case X86::JNP_1:
-      case X86::JL_1: case X86::JGE_1: case X86::JLE_1: case X86::JG_1:
-      case X86::CMOVO16rr: case X86::CMOVO32rr: case X86::CMOVO64rr:
-      case X86::CMOVO16rm: case X86::CMOVO32rm: case X86::CMOVO64rm:
-      case X86::CMOVNO16rr: case X86::CMOVNO32rr: case X86::CMOVNO64rr:
-      case X86::CMOVNO16rm: case X86::CMOVNO32rm: case X86::CMOVNO64rm:
-      case X86::CMOVE16rr: case X86::CMOVE32rr: case X86::CMOVE64rr:
-      case X86::CMOVE16rm: case X86::CMOVE32rm: case X86::CMOVE64rm:
-      case X86::CMOVNE16rr: case X86::CMOVNE32rr: case X86::CMOVNE64rr:
-      case X86::CMOVNE16rm: case X86::CMOVNE32rm: case X86::CMOVNE64rm:
-      case X86::CMOVS16rr: case X86::CMOVS32rr: case X86::CMOVS64rr:
-      case X86::CMOVS16rm: case X86::CMOVS32rm: case X86::CMOVS64rm:
-      case X86::CMOVNS16rr: case X86::CMOVNS32rr: case X86::CMOVNS64rr:
-      case X86::CMOVNS16rm: case X86::CMOVNS32rm: case X86::CMOVNS64rm:
-      case X86::CMOVP16rr: case X86::CMOVP32rr: case X86::CMOVP64rr:
-      case X86::CMOVP16rm: case X86::CMOVP32rm: case X86::CMOVP64rm:
-      case X86::CMOVNP16rr: case X86::CMOVNP32rr: case X86::CMOVNP64rr:
-      case X86::CMOVNP16rm: case X86::CMOVNP32rm: case X86::CMOVNP64rm:
-      case X86::CMOVL16rr: case X86::CMOVL32rr: case X86::CMOVL64rr:
-      case X86::CMOVL16rm: case X86::CMOVL32rm: case X86::CMOVL64rm:
-      case X86::CMOVGE16rr: case X86::CMOVGE32rr: case X86::CMOVGE64rr:
-      case X86::CMOVGE16rm: case X86::CMOVGE32rm: case X86::CMOVGE64rm:
-      case X86::CMOVLE16rr: case X86::CMOVLE32rr: case X86::CMOVLE64rr:
-      case X86::CMOVLE16rm: case X86::CMOVLE32rm: case X86::CMOVLE64rm:
-      case X86::CMOVG16rr: case X86::CMOVG32rr: case X86::CMOVG64rr:
-      case X86::CMOVG16rm: case X86::CMOVG32rm: case X86::CMOVG64rm:
+      switch (CC) {
+      // Comparisons which only use the zero flag.
+      case X86::COND_E: case X86::COND_NE:
         continue;
       // Anything else: assume conservatively.
       default:
@@ -2320,6 +2322,118 @@
   return true;
 }
 
+/// Test whether the given X86ISD::CMP node has any uses which require the SF
+/// flag to be accurate.
+bool X86DAGToDAGISel::hasNoSignFlagUses(SDValue Flags) const {
+  // Examine each user of the node.
+  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
+         UI != UE; ++UI) {
+    // Only check things that use the flags.
+    if (UI.getUse().getResNo() != Flags.getResNo())
+      continue;
+    // Only examine CopyToReg uses that copy to EFLAGS.
+    if (UI->getOpcode() != ISD::CopyToReg ||
+        cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
+      return false;
+    // Examine each user of the CopyToReg use.
+    for (SDNode::use_iterator FlagUI = UI->use_begin(),
+           FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
+      // Only examine the Flag result.
+      if (FlagUI.getUse().getResNo() != 1) continue;
+      // Anything unusual: assume conservatively.
+      if (!FlagUI->isMachineOpcode()) return false;
+      // Examine the condition code of the user.
+      X86::CondCode CC = getCondFromOpc(FlagUI->getMachineOpcode());
+
+      switch (CC) {
+      // Comparisons which don't examine the SF flag.
+      case X86::COND_A: case X86::COND_AE:
+      case X86::COND_B: case X86::COND_BE:
+      case X86::COND_E: case X86::COND_NE:
+      case X86::COND_O: case X86::COND_NO:
+      case X86::COND_P: case X86::COND_NP:
+        continue;
+      // Anything else: assume conservatively.
+      default:
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+static bool mayUseCarryFlag(X86::CondCode CC) {
+  switch (CC) {
+  // Comparisons which don't examine the CF flag.
+  case X86::COND_O: case X86::COND_NO:
+  case X86::COND_E: case X86::COND_NE:
+  case X86::COND_S: case X86::COND_NS:
+  case X86::COND_P: case X86::COND_NP:
+  case X86::COND_L: case X86::COND_GE:
+  case X86::COND_G: case X86::COND_LE:
+    return false;
+  // Anything else: assume conservatively.
+  default:
+    return true;
+  }
+}
+
+/// Test whether the given node which sets flags has any uses which require the
+/// CF flag to be accurate.
+ bool X86DAGToDAGISel::hasNoCarryFlagUses(SDValue Flags) const {
+  // Examine each user of the node.
+  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
+         UI != UE; ++UI) {
+    // Only check things that use the flags.
+    if (UI.getUse().getResNo() != Flags.getResNo())
+      continue;
+
+    unsigned UIOpc = UI->getOpcode();
+
+    if (UIOpc == ISD::CopyToReg) {
+      // Only examine CopyToReg uses that copy to EFLAGS.
+      if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
+        return false;
+      // Examine each user of the CopyToReg use.
+      for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end();
+           FlagUI != FlagUE; ++FlagUI) {
+        // Only examine the Flag result.
+        if (FlagUI.getUse().getResNo() != 1)
+          continue;
+        // Anything unusual: assume conservatively.
+        if (!FlagUI->isMachineOpcode())
+          return false;
+        // Examine the condition code of the user.
+        X86::CondCode CC = getCondFromOpc(FlagUI->getMachineOpcode());
+
+        if (mayUseCarryFlag(CC))
+          return false;
+      }
+
+      // This CopyToReg is ok. Move on to the next user.
+      continue;
+    }
+
+    // This might be an unselected node. So look for the pre-isel opcodes that
+    // use flags.
+    unsigned CCOpNo;
+    switch (UIOpc) {
+    default:
+      // Something unusual. Be conservative.
+      return false;
+    case X86ISD::SETCC:       CCOpNo = 0; break;
+    case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
+    case X86ISD::CMOV:        CCOpNo = 2; break;
+    case X86ISD::BRCOND:      CCOpNo = 2; break;
+    }
+
+    X86::CondCode CC = (X86::CondCode)UI->getConstantOperandVal(CCOpNo);
+    if (mayUseCarryFlag(CC))
+      return false;
+  }
+  return true;
+}
+
 /// Check whether or not the chain ending in StoreNode is suitable for doing
 /// the {load; op; store} to modify transformation.
 static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode,
@@ -2471,8 +2585,6 @@
   switch (Opc) {
   default:
     return false;
-  case X86ISD::INC:
-  case X86ISD::DEC:
   case X86ISD::SUB:
   case X86ISD::SBB:
     break;
@@ -2523,20 +2635,27 @@
 
   MachineSDNode *Result;
   switch (Opc) {
-  case X86ISD::INC:
-  case X86ISD::DEC: {
-    unsigned NewOpc =
-        Opc == X86ISD::INC
-            ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m)
-            : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m);
-    const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
-    Result =
-        CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other, Ops);
-    break;
-  }
   case X86ISD::ADD:
-  case X86ISD::ADC:
   case X86ISD::SUB:
+    // Try to match inc/dec.
+    if (!Subtarget->slowIncDec() ||
+        CurDAG->getMachineFunction().getFunction().optForSize()) {
+      bool IsOne = isOneConstant(StoredVal.getOperand(1));
+      bool IsNegOne = isAllOnesConstant(StoredVal.getOperand(1));
+      // ADD/SUB with 1/-1 and carry flag isn't used can use inc/dec.
+      if ((IsOne || IsNegOne) && hasNoCarryFlagUses(StoredVal.getValue(1))) {
+        unsigned NewOpc = 
+          ((Opc == X86ISD::ADD) == IsOne)
+              ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m)
+              : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m);
+        const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
+        Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
+                                        MVT::Other, Ops);
+        break;
+      }
+    }
+    LLVM_FALLTHROUGH;
+  case X86ISD::ADC:
   case X86ISD::SBB:
   case X86ISD::AND:
   case X86ISD::OR:
@@ -2631,7 +2750,7 @@
             (-OperandV).getMinSignedBits() <= 8) ||
            (MemVT == MVT::i64 && OperandV.getMinSignedBits() > 32 &&
             (-OperandV).getMinSignedBits() <= 32)) &&
-          hasNoCarryFlagUses(StoredVal.getNode())) {
+          hasNoCarryFlagUses(StoredVal.getValue(1))) {
         OperandV = -OperandV;
         Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD;
       }
@@ -2827,25 +2946,37 @@
 
   SDLoc DL(Node);
 
+  // If we do *NOT* have BMI2, let's find out if the if the 'X' is *logically*
+  // shifted (potentially with one-use trunc inbetween),
+  // and if so look past one-use truncation.
+  MVT XVT = NVT;
+  if (!Subtarget->hasBMI2() && X.getOpcode() == ISD::TRUNCATE &&
+      X.hasOneUse() && X.getOperand(0).getOpcode() == ISD::SRL) {
+    assert(NVT == MVT::i32 && "Expected target valuetype to be i32");
+    X = X.getOperand(0);
+    XVT = X.getSimpleValueType();
+    assert(XVT == MVT::i64 && "Expected truncation from i64");
+  }
+
   SDValue OrigNBits = NBits;
-  if (NBits.getValueType() != NVT) {
+  if (NBits.getValueType() != XVT) {
     // Truncate the shift amount.
     NBits = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NBits);
     insertDAGNode(*CurDAG, OrigNBits, NBits);
 
-    // Insert 8-bit NBits into lowest 8 bits of NVT-sized (32 or 64-bit)
+    // Insert 8-bit NBits into lowest 8 bits of XVT-sized (32 or 64-bit)
     // register. All the other bits are undefined, we do not care about them.
     SDValue ImplDef =
-        SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, NVT), 0);
+        SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, XVT), 0);
     insertDAGNode(*CurDAG, OrigNBits, ImplDef);
     NBits =
-        CurDAG->getTargetInsertSubreg(X86::sub_8bit, DL, NVT, ImplDef, NBits);
+        CurDAG->getTargetInsertSubreg(X86::sub_8bit, DL, XVT, ImplDef, NBits);
     insertDAGNode(*CurDAG, OrigNBits, NBits);
   }
 
   if (Subtarget->hasBMI2()) {
     // Great, just emit the the BZHI..
-    SDValue Extract = CurDAG->getNode(X86ISD::BZHI, DL, NVT, X, NBits);
+    SDValue Extract = CurDAG->getNode(X86ISD::BZHI, DL, XVT, X, NBits);
     ReplaceNode(Node, Extract.getNode());
     SelectCode(Extract.getNode());
     return true;
@@ -2860,7 +2991,7 @@
   // Shift NBits left by 8 bits, thus producing 'control'.
   // This makes the low 8 bits to be zero.
   SDValue C8 = CurDAG->getConstant(8, DL, MVT::i8);
-  SDValue Control = CurDAG->getNode(ISD::SHL, DL, NVT, NBits, C8);
+  SDValue Control = CurDAG->getNode(ISD::SHL, DL, XVT, NBits, C8);
   insertDAGNode(*CurDAG, OrigNBits, Control);
 
   // If the 'X' is *logically* shifted, we can fold that shift into 'control'.
@@ -2873,16 +3004,23 @@
 
     // Now, *zero*-extend the shift amount. The bits 8...15 *must* be zero!
     SDValue OrigShiftAmt = ShiftAmt;
-    ShiftAmt = CurDAG->getNode(ISD::ZERO_EXTEND, DL, NVT, ShiftAmt);
+    ShiftAmt = CurDAG->getNode(ISD::ZERO_EXTEND, DL, XVT, ShiftAmt);
     insertDAGNode(*CurDAG, OrigShiftAmt, ShiftAmt);
 
     // And now 'or' these low 8 bits of shift amount into the 'control'.
-    Control = CurDAG->getNode(ISD::OR, DL, NVT, Control, ShiftAmt);
+    Control = CurDAG->getNode(ISD::OR, DL, XVT, Control, ShiftAmt);
     insertDAGNode(*CurDAG, OrigNBits, Control);
   }
 
   // And finally, form the BEXTR itself.
-  SDValue Extract = CurDAG->getNode(X86ISD::BEXTR, DL, NVT, X, Control);
+  SDValue Extract = CurDAG->getNode(X86ISD::BEXTR, DL, XVT, X, Control);
+
+  // The 'X' was originally truncated. Do that now.
+  if (XVT != NVT) {
+    insertDAGNode(*CurDAG, OrigNBits, Extract);
+    Extract = CurDAG->getNode(ISD::TRUNCATE, DL, NVT, Extract);
+  }
+
   ReplaceNode(Node, Extract.getNode());
   SelectCode(Extract.getNode());
 
@@ -3243,9 +3381,8 @@
     }
     break;
 
-  case X86ISD::SELECT:
-  case X86ISD::SHRUNKBLEND: {
-    // SHRUNKBLEND selects like a regular VSELECT. Same with X86ISD::SELECT.
+  case X86ISD::BLENDV: {
+    // BLENDV selects like a regular VSELECT.
     SDValue VSelect = CurDAG->getNode(
         ISD::VSELECT, SDLoc(Node), Node->getValueType(0), Node->getOperand(0),
         Node->getOperand(1), Node->getOperand(2));
@@ -3361,45 +3498,85 @@
                            getI8Imm(ShlVal, dl));
     return;
   }
-  case X86ISD::UMUL8:
-  case X86ISD::SMUL8: {
-    SDValue N0 = Node->getOperand(0);
-    SDValue N1 = Node->getOperand(1);
-
-    unsigned Opc = (Opcode == X86ISD::SMUL8 ? X86::IMUL8r : X86::MUL8r);
-
-    SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::AL,
-                                          N0, SDValue()).getValue(1);
-
-    SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32);
-    SDValue Ops[] = {N1, InFlag};
-    SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
-
-    ReplaceNode(Node, CNode);
-    return;
-  }
-
+  case X86ISD::SMUL:
+    // i16/i32/i64 are handled with isel patterns.
+    if (NVT != MVT::i8)
+      break;
+    LLVM_FALLTHROUGH;
   case X86ISD::UMUL: {
     SDValue N0 = Node->getOperand(0);
     SDValue N1 = Node->getOperand(1);
 
-    unsigned LoReg, Opc;
+    unsigned LoReg, ROpc, MOpc;
     switch (NVT.SimpleTy) {
     default: llvm_unreachable("Unsupported VT!");
-    // MVT::i8 is handled by X86ISD::UMUL8.
-    case MVT::i16: LoReg = X86::AX;  Opc = X86::MUL16r; break;
-    case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
-    case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
+    case MVT::i8:
+      LoReg = X86::AL;
+      ROpc = Opcode == X86ISD::SMUL ? X86::IMUL8r : X86::MUL8r;
+      MOpc = Opcode == X86ISD::SMUL ? X86::IMUL8m : X86::MUL8m;
+      break;
+    case MVT::i16:
+      LoReg = X86::AX;
+      ROpc = X86::MUL16r;
+      MOpc = X86::MUL16m;
+      break;
+    case MVT::i32:
+      LoReg = X86::EAX;
+      ROpc = X86::MUL32r;
+      MOpc = X86::MUL32m;
+      break;
+    case MVT::i64:
+      LoReg = X86::RAX;
+      ROpc = X86::MUL64r;
+      MOpc = X86::MUL64m;
+      break;
+    }
+
+    SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
+    bool FoldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
+    // Multiply is commmutative.
+    if (!FoldedLoad) {
+      FoldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
+      if (FoldedLoad)
+        std::swap(N0, N1);
     }
 
     SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
                                           N0, SDValue()).getValue(1);
 
-    SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
-    SDValue Ops[] = {N1, InFlag};
-    SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
+    MachineSDNode *CNode;
+    if (FoldedLoad) {
+      // i16/i32/i64 use an instruction that produces a low and high result even
+      // though only the low result is used.
+      SDVTList VTs;
+      if (NVT == MVT::i8)
+        VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
+      else
+        VTs = CurDAG->getVTList(NVT, NVT, MVT::i32, MVT::Other);
 
-    ReplaceNode(Node, CNode);
+      SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
+                        InFlag };
+      CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
+
+      // Update the chain.
+      ReplaceUses(N1.getValue(1), SDValue(CNode, NVT == MVT::i8 ? 2 : 3));
+      // Record the mem-refs
+      CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
+    } else {
+      // i16/i32/i64 use an instruction that produces a low and high result even
+      // though only the low result is used.
+      SDVTList VTs;
+      if (NVT == MVT::i8)
+        VTs = CurDAG->getVTList(NVT, MVT::i32);
+      else
+        VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
+
+      CNode = CurDAG->getMachineNode(ROpc, dl, VTs, {N1, InFlag});
+    }
+
+    ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
+    ReplaceUses(SDValue(Node, 1), SDValue(CNode, NVT == MVT::i8 ? 1 : 2));
+    CurDAG->RemoveDeadNode(Node);
     return;
   }
 
@@ -3672,6 +3849,10 @@
     SDValue N0 = Node->getOperand(0);
     SDValue N1 = Node->getOperand(1);
 
+    // Optimizations for TEST compares.
+    if (!isNullConstant(N1))
+      break;
+
     // Save the original VT of the compare.
     MVT CmpVT = N0.getSimpleValueType();
 
@@ -3679,7 +3860,7 @@
     // by a test instruction. The test should be removed later by
     // analyzeCompare if we are using only the zero flag.
     // TODO: Should we check the users and use the BEXTR flags directly?
-    if (isNullConstant(N1) && N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
+    if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
       if (MachineSDNode *NewNode = matchBEXTRFromAndImm(N0.getNode())) {
         unsigned TestOpc = CmpVT == MVT::i64 ? X86::TEST64rr
                                              : X86::TEST32rr;
@@ -3700,12 +3881,40 @@
     // Look past the truncate if CMP is the only use of it.
     if (N0.getOpcode() == ISD::AND &&
         N0.getNode()->hasOneUse() &&
-        N0.getValueType() != MVT::i8 &&
-        isNullConstant(N1)) {
+        N0.getValueType() != MVT::i8) {
       ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
       if (!C) break;
       uint64_t Mask = C->getZExtValue();
 
+      // Check if we can replace AND+IMM64 with a shift. This is possible for
+      // masks/ like 0xFF000000 or 0x00FFFFFF and if we care only about the zero
+      // flag.
+      if (CmpVT == MVT::i64 && !isInt<32>(Mask) &&
+          onlyUsesZeroFlag(SDValue(Node, 0))) {
+        if (isMask_64(~Mask)) {
+          unsigned TrailingZeros = countTrailingZeros(Mask);
+          SDValue Imm = CurDAG->getTargetConstant(TrailingZeros, dl, MVT::i64);
+          SDValue Shift =
+            SDValue(CurDAG->getMachineNode(X86::SHR64ri, dl, MVT::i64,
+                                           N0.getOperand(0), Imm), 0);
+          MachineSDNode *Test = CurDAG->getMachineNode(X86::TEST64rr, dl,
+                                                       MVT::i32, Shift, Shift);
+          ReplaceNode(Node, Test);
+          return;
+        }
+        if (isMask_64(Mask)) {
+          unsigned LeadingZeros = countLeadingZeros(Mask);
+          SDValue Imm = CurDAG->getTargetConstant(LeadingZeros, dl, MVT::i64);
+          SDValue Shift =
+            SDValue(CurDAG->getMachineNode(X86::SHL64ri, dl, MVT::i64,
+                                           N0.getOperand(0), Imm), 0);
+          MachineSDNode *Test = CurDAG->getMachineNode(X86::TEST64rr, dl,
+                                                       MVT::i32, Shift, Shift);
+          ReplaceNode(Node, Test);
+          return;
+        }
+      }
+
       MVT VT;
       int SubRegOp;
       unsigned ROpc, MOpc;
@@ -3717,7 +3926,7 @@
 
       if (isUInt<8>(Mask) &&
           (!(Mask & 0x80) || CmpVT == MVT::i8 ||
-           hasNoSignedComparisonUses(Node))) {
+           hasNoSignFlagUses(SDValue(Node, 0)))) {
         // For example, convert "testl %eax, $8" to "testb %al, $8"
         VT = MVT::i8;
         SubRegOp = X86::sub_8bit;
@@ -3725,7 +3934,7 @@
         MOpc = X86::TEST8mi;
       } else if (OptForMinSize && isUInt<16>(Mask) &&
                  (!(Mask & 0x8000) || CmpVT == MVT::i16 ||
-                  hasNoSignedComparisonUses(Node))) {
+                  hasNoSignFlagUses(SDValue(Node, 0)))) {
         // For example, "testl %eax, $32776" to "testw %ax, $32776".
         // NOTE: We only want to form TESTW instructions if optimizing for
         // min size. Otherwise we only save one byte and possibly get a length
@@ -3739,7 +3948,8 @@
                    // Without minsize 16-bit Cmps can get here so we need to
                    // be sure we calculate the correct sign flag if needed.
                    (CmpVT != MVT::i16 || !(Mask & 0x8000))) ||
-                  CmpVT == MVT::i32 || hasNoSignedComparisonUses(Node))) {
+                  CmpVT == MVT::i32 ||
+                  hasNoSignFlagUses(SDValue(Node, 0)))) {
         // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
         // NOTE: We only want to run that transform if N0 is 32 or 64 bits.
         // Otherwize, we find ourselves in a position where we have to do
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 3c1e52d..b6a692e 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -540,6 +540,10 @@
       // Use ANDPD and ORPD to simulate FCOPYSIGN.
       setOperationAction(ISD::FCOPYSIGN, VT, Custom);
 
+      // These might be better off as horizontal vector ops.
+      setOperationAction(ISD::FADD, VT, Custom);
+      setOperationAction(ISD::FSUB, VT, Custom);
+
       // We don't support sin/cos/fmod
       setOperationAction(ISD::FSIN   , VT, Expand);
       setOperationAction(ISD::FCOS   , VT, Expand);
@@ -829,6 +833,26 @@
       setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
     }
 
+    setOperationAction(ISD::UADDSAT,            MVT::v16i8, Legal);
+    setOperationAction(ISD::SADDSAT,            MVT::v16i8, Legal);
+    setOperationAction(ISD::USUBSAT,            MVT::v16i8, Legal);
+    setOperationAction(ISD::SSUBSAT,            MVT::v16i8, Legal);
+    setOperationAction(ISD::UADDSAT,            MVT::v8i16, Legal);
+    setOperationAction(ISD::SADDSAT,            MVT::v8i16, Legal);
+    setOperationAction(ISD::USUBSAT,            MVT::v8i16, Legal);
+    setOperationAction(ISD::SSUBSAT,            MVT::v8i16, Legal);
+
+    if (!ExperimentalVectorWideningLegalization) {
+      // Use widening instead of promotion.
+      for (auto VT : { MVT::v8i8, MVT::v4i8, MVT::v2i8,
+                       MVT::v4i16, MVT::v2i16 }) {
+        setOperationAction(ISD::UADDSAT, VT, Custom);
+        setOperationAction(ISD::SADDSAT, VT, Custom);
+        setOperationAction(ISD::USUBSAT, VT, Custom);
+        setOperationAction(ISD::SSUBSAT, VT, Custom);
+      }
+    }
+
     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v8i16, Custom);
     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4i32, Custom);
     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4f32, Custom);
@@ -843,6 +867,7 @@
     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
       setOperationAction(ISD::SETCC,              VT, Custom);
       setOperationAction(ISD::CTPOP,              VT, Custom);
+      setOperationAction(ISD::ABS,                VT, Custom);
 
       // The condition codes aren't legal in SSE/AVX and under AVX512 we use
       // setcc all the way to isel and prefer SETGT in some isel patterns.
@@ -863,12 +888,6 @@
     // scalars) and extend in-register to a legal 128-bit vector type. For sext
     // loads these must work with a single scalar load.
     for (MVT VT : MVT::integer_vector_valuetypes()) {
-      if (!ExperimentalVectorWideningLegalization) {
-        // We don't want narrow result types here when widening.
-        setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
-        setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
-        setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
-      }
       setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
       setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
       setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
@@ -877,14 +896,6 @@
       setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
     }
 
-    if (ExperimentalVectorWideningLegalization &&
-        !Subtarget.hasSSE41() && Subtarget.is64Bit()) {
-      // This lets DAG combine create sextloads that get split and scalarized.
-      // TODO: Does this make sense? What about v2i8->v2i64?
-      setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8,  Custom);
-      setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i8,  Custom);
-    }
-
     for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
@@ -973,6 +984,8 @@
       setOperationAction(ISD::TRUNCATE,    MVT::v4i8,  Custom);
       setOperationAction(ISD::TRUNCATE,    MVT::v4i16, Custom);
       setOperationAction(ISD::TRUNCATE,    MVT::v8i8,  Custom);
+    } else {
+      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i64, Custom);
     }
 
     // In the customized shift lowering, the legal v4i32/v2i64 cases
@@ -986,8 +999,8 @@
     setOperationAction(ISD::ROTL,               MVT::v4i32, Custom);
     setOperationAction(ISD::ROTL,               MVT::v8i16, Custom);
 
-    // With BWI, expanding (and promoting the shifts) is the better.
-    if (!Subtarget.hasBWI())
+    // With AVX512, expanding (and promoting the shifts) is better.
+    if (!Subtarget.hasAVX512())
       setOperationAction(ISD::ROTL,             MVT::v16i8, Custom);
   }
 
@@ -1000,6 +1013,12 @@
     setOperationAction(ISD::CTLZ,               MVT::v8i16, Custom);
     setOperationAction(ISD::CTLZ,               MVT::v4i32, Custom);
     setOperationAction(ISD::CTLZ,               MVT::v2i64, Custom);
+
+    // These might be better off as horizontal vector ops.
+    setOperationAction(ISD::ADD,                MVT::i16, Custom);
+    setOperationAction(ISD::ADD,                MVT::i32, Custom);
+    setOperationAction(ISD::SUB,                MVT::i16, Custom);
+    setOperationAction(ISD::SUB,                MVT::i32, Custom);
   }
 
   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
@@ -1195,11 +1214,21 @@
     setOperationAction(ISD::MULHU,     MVT::v32i8,  Custom);
     setOperationAction(ISD::MULHS,     MVT::v32i8,  Custom);
 
+    setOperationAction(ISD::ABS,       MVT::v4i64,  Custom);
     setOperationAction(ISD::SMAX,      MVT::v4i64,  Custom);
     setOperationAction(ISD::UMAX,      MVT::v4i64,  Custom);
     setOperationAction(ISD::SMIN,      MVT::v4i64,  Custom);
     setOperationAction(ISD::UMIN,      MVT::v4i64,  Custom);
 
+    setOperationAction(ISD::UADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
+    setOperationAction(ISD::SADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
+    setOperationAction(ISD::USUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
+    setOperationAction(ISD::SSUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
+    setOperationAction(ISD::UADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
+    setOperationAction(ISD::SADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
+    setOperationAction(ISD::USUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
+    setOperationAction(ISD::SSUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
+
     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
       setOperationAction(ISD::ABS,  VT, HasInt256 ? Legal : Custom);
       setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
@@ -1317,6 +1346,10 @@
       setOperationAction(ISD::SETCC,            VT, Custom);
       setOperationAction(ISD::SELECT,           VT, Custom);
       setOperationAction(ISD::TRUNCATE,         VT, Custom);
+      setOperationAction(ISD::UADDSAT,          VT, Custom);
+      setOperationAction(ISD::SADDSAT,          VT, Custom);
+      setOperationAction(ISD::USUBSAT,          VT, Custom);
+      setOperationAction(ISD::SSUBSAT,          VT, Custom);
 
       setOperationAction(ISD::BUILD_VECTOR,     VT, Custom);
       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
@@ -1505,6 +1538,13 @@
       setOperationAction(ISD::BITCAST, MVT::v32i16, Custom);
       setOperationAction(ISD::BITCAST, MVT::v64i8,  Custom);
     }
+
+    if (Subtarget.hasVBMI2()) {
+      for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
+        setOperationAction(ISD::FSHL, VT, Custom);
+        setOperationAction(ISD::FSHR, VT, Custom);
+      }
+    }
   }// has  AVX-512
 
   // This block controls legalization for operations that don't have
@@ -1577,6 +1617,10 @@
       setOperationAction(ISD::SUB,                VT, Custom);
       setOperationAction(ISD::MUL,                VT, Custom);
       setOperationAction(ISD::VSELECT,            VT, Expand);
+      setOperationAction(ISD::UADDSAT,            VT, Custom);
+      setOperationAction(ISD::SADDSAT,            VT, Custom);
+      setOperationAction(ISD::USUBSAT,            VT, Custom);
+      setOperationAction(ISD::SSUBSAT,            VT, Custom);
 
       setOperationAction(ISD::TRUNCATE,           VT, Custom);
       setOperationAction(ISD::SETCC,              VT, Custom);
@@ -1657,6 +1701,10 @@
       setOperationAction(ISD::SMIN,         VT, Legal);
       setOperationAction(ISD::UMIN,         VT, Legal);
       setOperationAction(ISD::SETCC,        VT, Custom);
+      setOperationAction(ISD::UADDSAT,      VT, Legal);
+      setOperationAction(ISD::SADDSAT,      VT, Legal);
+      setOperationAction(ISD::USUBSAT,      VT, Legal);
+      setOperationAction(ISD::SSUBSAT,      VT, Legal);
 
       // The condition codes aren't legal in SSE/AVX and under AVX512 we use
       // setcc all the way to isel and prefer SETGT in some isel patterns.
@@ -1672,6 +1720,11 @@
       for (auto VT : { MVT::v64i8, MVT::v32i16 })
         setOperationAction(ISD::CTPOP, VT, Legal);
     }
+
+    if (Subtarget.hasVBMI2()) {
+      setOperationAction(ISD::FSHL, MVT::v32i16, Custom);
+      setOperationAction(ISD::FSHR, MVT::v32i16, Custom);
+    }
   }
 
   if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
@@ -1718,6 +1771,15 @@
       setTruncStoreAction(MVT::v16i16,  MVT::v16i8, Legal);
       setTruncStoreAction(MVT::v8i16,   MVT::v8i8,  Legal);
     }
+
+    if (Subtarget.hasVBMI2()) {
+      // TODO: Make these legal even without VLX?
+      for (auto VT : { MVT::v8i16,  MVT::v4i32, MVT::v2i64,
+                       MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
+        setOperationAction(ISD::FSHL, VT, Custom);
+        setOperationAction(ISD::FSHR, VT, Custom);
+      }
+    }
   }
 
   // We want to custom lower some of our intrinsics.
@@ -4846,6 +4908,18 @@
   return (Index % ResVT.getVectorNumElements()) == 0;
 }
 
+bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
+  // If the vector op is not supported, try to convert to scalar.
+  EVT VecVT = VecOp.getValueType();
+  if (!isOperationLegalOrCustomOrPromote(VecOp.getOpcode(), VecVT))
+    return true;
+
+  // If the vector op is supported, but the scalar op is not, the transform may
+  // not be worthwhile.
+  EVT ScalarVT = VecVT.getScalarType();
+  return isOperationLegalOrCustomOrPromote(VecOp.getOpcode(), ScalarVT);
+}
+
 bool X86TargetLowering::isCheapToSpeculateCttz() const {
   // Speculate cttz only if we can directly use TZCNT.
   return Subtarget.hasBMI();
@@ -4904,17 +4978,14 @@
   if (VT != MVT::i32 && VT != MVT::i64)
     return false;
 
-  // A mask and compare against constant is ok for an 'andn' too
-  // even though the BMI instruction doesn't have an immediate form.
-
-  return true;
+  return !isa<ConstantSDNode>(Y);
 }
 
 bool X86TargetLowering::hasAndNot(SDValue Y) const {
   EVT VT = Y.getValueType();
 
-  if (!VT.isVector()) // x86 can't form 'andn' with an immediate.
-    return !isa<ConstantSDNode>(Y) && hasAndNotCompare(Y);
+  if (!VT.isVector())
+    return hasAndNotCompare(Y);
 
   // Vector.
 
@@ -6910,6 +6981,26 @@
 /// Custom lower build_vector of v4i32 or v4f32.
 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {
+  // If this is a splat of a pair of elements, use MOVDDUP (unless the target
+  // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
+  // Because we're creating a less complicated build vector here, we may enable
+  // further folding of the MOVDDUP via shuffle transforms.
+  if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
+      Op.getOperand(0) == Op.getOperand(2) &&
+      Op.getOperand(1) == Op.getOperand(3) &&
+      Op.getOperand(0) != Op.getOperand(1)) {
+    SDLoc DL(Op);
+    MVT VT = Op.getSimpleValueType();
+    MVT EltVT = VT.getVectorElementType();
+    // Create a new build vector with the first 2 elements followed by undef
+    // padding, bitcast to v2f64, duplicate, and bitcast back.
+    SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
+                       DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
+    SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
+    SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
+    return DAG.getBitcast(VT, Dup);
+  }
+
   // Find all zeroable elements.
   std::bitset<4> Zeroable;
   for (int i=0; i < 4; ++i) {
@@ -7768,13 +7859,14 @@
   return DstVec;
 }
 
-/// Return true if \p N implements a horizontal binop and return the
-/// operands for the horizontal binop into V0 and V1.
-///
 /// This is a helper function of LowerToHorizontalOp().
 /// This function checks that the build_vector \p N in input implements a
-/// horizontal operation. Parameter \p Opcode defines the kind of horizontal
-/// operation to match.
+/// 128-bit partial horizontal operation on a 256-bit vector, but that operation
+/// may not match the layout of an x86 256-bit horizontal instruction.
+/// In other words, if this returns true, then some extraction/insertion will
+/// be required to produce a valid horizontal instruction.
+///
+/// Parameter \p Opcode defines the kind of horizontal operation to match.
 /// For example, if \p Opcode is equal to ISD::ADD, then this function
 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
 /// is equal to ISD::SUB, then this function checks if this is a horizontal
@@ -7782,12 +7874,17 @@
 ///
 /// This function only analyzes elements of \p N whose indices are
 /// in range [BaseIdx, LastIdx).
-static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
-                              SelectionDAG &DAG,
-                              unsigned BaseIdx, unsigned LastIdx,
-                              SDValue &V0, SDValue &V1) {
+///
+/// TODO: This function was originally used to match both real and fake partial
+/// horizontal operations, but the index-matching logic is incorrect for that.
+/// See the corrected implementation in isHopBuildVector(). Can we reduce this
+/// code because it is only used for partial h-op matching now?
+static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
+                                  SelectionDAG &DAG,
+                                  unsigned BaseIdx, unsigned LastIdx,
+                                  SDValue &V0, SDValue &V1) {
   EVT VT = N->getValueType(0);
-
+  assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
   assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
   assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
          "Invalid Vector in input!");
@@ -8128,17 +8225,158 @@
   return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
 }
 
+static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
+                             unsigned &HOpcode, SDValue &V0, SDValue &V1) {
+  // Initialize outputs to known values.
+  MVT VT = BV->getSimpleValueType(0);
+  HOpcode = ISD::DELETED_NODE;
+  V0 = DAG.getUNDEF(VT);
+  V1 = DAG.getUNDEF(VT);
+
+  // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
+  // half of the result is calculated independently from the 128-bit halves of
+  // the inputs, so that makes the index-checking logic below more complicated.
+  unsigned NumElts = VT.getVectorNumElements();
+  unsigned GenericOpcode = ISD::DELETED_NODE;
+  unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
+  unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
+  unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
+  for (unsigned i = 0; i != Num128BitChunks; ++i) {
+    for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
+      // Ignore undef elements.
+      SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
+      if (Op.isUndef())
+        continue;
+
+      // If there's an opcode mismatch, we're done.
+      if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
+        return false;
+
+      // Initialize horizontal opcode.
+      if (HOpcode == ISD::DELETED_NODE) {
+        GenericOpcode = Op.getOpcode();
+        switch (GenericOpcode) {
+        case ISD::ADD: HOpcode = X86ISD::HADD; break;
+        case ISD::SUB: HOpcode = X86ISD::HSUB; break;
+        case ISD::FADD: HOpcode = X86ISD::FHADD; break;
+        case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
+        default: return false;
+        }
+      }
+
+      SDValue Op0 = Op.getOperand(0);
+      SDValue Op1 = Op.getOperand(1);
+      if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+          Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+          Op0.getOperand(0) != Op1.getOperand(0) ||
+          !isa<ConstantSDNode>(Op0.getOperand(1)) ||
+          !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
+        return false;
+
+      // The source vector is chosen based on which 64-bit half of the
+      // destination vector is being calculated.
+      if (j < NumEltsIn64Bits) {
+        if (V0.isUndef())
+          V0 = Op0.getOperand(0);
+      } else {
+        if (V1.isUndef())
+          V1 = Op0.getOperand(0);
+      }
+
+      SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
+      if (SourceVec != Op0.getOperand(0))
+        return false;
+
+      // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
+      unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
+      unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
+      unsigned ExpectedIndex = i * NumEltsIn128Bits +
+                               (j % NumEltsIn64Bits) * 2;
+      if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
+        continue;
+
+      // If this is not a commutative op, this does not match.
+      if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
+        return false;
+
+      // Addition is commutative, so try swapping the extract indexes.
+      // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
+      if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
+        continue;
+
+      // Extract indexes do not match horizontal requirement.
+      return false;
+    }
+  }
+  // We matched. Opcode and operands are returned by reference as arguments.
+  return true;
+}
+
+static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
+                                    SelectionDAG &DAG, unsigned HOpcode,
+                                    SDValue V0, SDValue V1) {
+  // If either input vector is not the same size as the build vector,
+  // extract/insert the low bits to the correct size.
+  // This is free (examples: zmm --> xmm, xmm --> ymm).
+  MVT VT = BV->getSimpleValueType(0);
+  unsigned Width = VT.getSizeInBits();
+  if (V0.getValueSizeInBits() > Width)
+    V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
+  else if (V0.getValueSizeInBits() < Width)
+    V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
+
+  if (V1.getValueSizeInBits() > Width)
+    V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
+  else if (V1.getValueSizeInBits() < Width)
+    V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
+
+  return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
+}
+
 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
                                    const X86Subtarget &Subtarget,
                                    SelectionDAG &DAG) {
+  // We need at least 2 non-undef elements to make this worthwhile by default.
+  unsigned NumNonUndefs = 0;
+  for (const SDValue &V : BV->op_values())
+    if (!V.isUndef())
+      ++NumNonUndefs;
+
+  if (NumNonUndefs < 2)
+    return SDValue();
+
+  // There are 4 sets of horizontal math operations distinguished by type:
+  // int/FP at 128-bit/256-bit. Each type was introduced with a different
+  // subtarget feature. Try to match those "native" patterns first.
   MVT VT = BV->getSimpleValueType(0);
-  unsigned NumElts = VT.getVectorNumElements();
-  unsigned NumUndefsLO = 0;
-  unsigned NumUndefsHI = 0;
-  unsigned Half = NumElts/2;
+  unsigned HOpcode;
+  SDValue V0, V1;
+  if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3())
+    if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
+      return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
+
+  if ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3())
+    if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
+      return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
+
+  if ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX())
+    if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
+      return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
+
+  if ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())
+    if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
+      return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
+
+  // Try harder to match 256-bit ops by using extract/concat.
+  if (!Subtarget.hasAVX() || !VT.is256BitVector())
+    return SDValue();
 
   // Count the number of UNDEF operands in the build_vector in input.
+  unsigned NumElts = VT.getVectorNumElements();
+  unsigned Half = NumElts / 2;
+  unsigned NumUndefsLO = 0;
+  unsigned NumUndefsHI = 0;
   for (unsigned i = 0, e = Half; i != e; ++i)
     if (BV->getOperand(i)->isUndef())
       NumUndefsLO++;
@@ -8147,96 +8385,61 @@
     if (BV->getOperand(i)->isUndef())
       NumUndefsHI++;
 
-  // Early exit if this is either a build_vector of all UNDEFs or all the
-  // operands but one are UNDEF.
-  if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
-    return SDValue();
-
   SDLoc DL(BV);
   SDValue InVec0, InVec1;
-  if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) {
-    // Try to match an SSE3 float HADD/HSUB.
-    if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
-      return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
-
-    if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
-      return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
-  } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget.hasSSSE3()) {
-    // Try to match an SSSE3 integer HADD/HSUB.
-    if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
-      return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
-
-    if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
-      return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
-  }
-
-  if (!Subtarget.hasAVX())
-    return SDValue();
-
-  if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
-    // Try to match an AVX horizontal add/sub of packed single/double
-    // precision floating point values from 256-bit vectors.
-    SDValue InVec2, InVec3;
-    if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
-        isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
-        ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
-        ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
-      return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
-
-    if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
-        isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
-        ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
-        ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
-      return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
-  } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
-    // Try to match an AVX2 horizontal add/sub of signed integers.
+  if (VT == MVT::v8i32 || VT == MVT::v16i16) {
     SDValue InVec2, InVec3;
     unsigned X86Opcode;
     bool CanFold = true;
 
-    if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
-        isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
+    if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
+        isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
+                              InVec3) &&
         ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
         ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
       X86Opcode = X86ISD::HADD;
-    else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
-        isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
-        ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
-        ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
+    else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
+                                   InVec1) &&
+             isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
+                                   InVec3) &&
+             ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
+             ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
       X86Opcode = X86ISD::HSUB;
     else
       CanFold = false;
 
     if (CanFold) {
-      // Fold this build_vector into a single horizontal add/sub.
-      // Do this only if the target has AVX2.
-      if (Subtarget.hasAVX2())
-        return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
-
       // Do not try to expand this build_vector into a pair of horizontal
       // add/sub if we can emit a pair of scalar add/sub.
       if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
         return SDValue();
 
-      // Convert this build_vector into a pair of horizontal binop followed by
-      // a concat vector.
+      // Convert this build_vector into a pair of horizontal binops followed by
+      // a concat vector. We must adjust the outputs from the partial horizontal
+      // matching calls above to account for undefined vector halves.
+      SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
+      SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
+      assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
       bool isUndefLO = NumUndefsLO == Half;
       bool isUndefHI = NumUndefsHI == Half;
-      return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
-                                   isUndefLO, isUndefHI);
+      return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
+                                   isUndefHI);
     }
   }
 
-  if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
-       VT == MVT::v16i16) && Subtarget.hasAVX()) {
+  if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
+      VT == MVT::v16i16) {
     unsigned X86Opcode;
-    if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
+    if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
       X86Opcode = X86ISD::HADD;
-    else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
+    else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
+                                   InVec1))
       X86Opcode = X86ISD::HSUB;
-    else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
+    else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
+                                   InVec1))
       X86Opcode = X86ISD::FHADD;
-    else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
+    else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
+                                   InVec1))
       X86Opcode = X86ISD::FHSUB;
     else
       return SDValue();
@@ -11761,10 +11964,9 @@
 /// because for floating point vectors we have a generalized SHUFPS lowering
 /// strategy that handles everything that doesn't *exactly* match an unpack,
 /// making this clever lowering unnecessary.
-static SDValue lowerVectorShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
-                                                    SDValue V1, SDValue V2,
-                                                    ArrayRef<int> Mask,
-                                                    SelectionDAG &DAG) {
+static SDValue lowerVectorShuffleAsPermuteAndUnpack(
+    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
+    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
   assert(!VT.isFloatingPoint() &&
          "This routine only supports integer vectors.");
   assert(VT.is128BitVector() &&
@@ -11833,6 +12035,12 @@
     if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
       return Unpack;
 
+  // If we're shuffling with a zero vector then we're better off not doing
+  // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
+  if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
+      ISD::isBuildVectorAllZeros(V2.getNode()))
+    return SDValue();
+
   // If none of the unpack-rooted lowerings worked (or were profitable) try an
   // initial unpack.
   if (NumLoInputs == 0 || NumHiInputs == 0) {
@@ -12346,7 +12554,7 @@
 
     // Try to lower by permuting the inputs into an unpack instruction.
     if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(
-            DL, MVT::v4i32, V1, V2, Mask, DAG))
+            DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
       return Unpack;
   }
 
@@ -13042,8 +13250,8 @@
     return BitBlend;
 
   // Try to lower by permuting the inputs into an unpack instruction.
-  if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1,
-                                                            V2, Mask, DAG))
+  if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(
+          DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
     return Unpack;
 
   // If we can't directly blend but can use PSHUFB, that will be better as it
@@ -13331,7 +13539,7 @@
       // shuffles will both be pshufb, in which case we shouldn't bother with
       // this.
       if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(
-              DL, MVT::v16i8, V1, V2, Mask, DAG))
+              DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
         return Unpack;
 
       // If we have VBMI we can use one VPERM instead of multiple PSHUFBs.
@@ -16111,34 +16319,25 @@
   }
 
   unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+  if (IdxVal == 0) // the operation is legal
+    return Op;
 
-  // If the kshift instructions of the correct width aren't natively supported
-  // then we need to promote the vector to the native size to get the correct
-  // zeroing behavior.
-  if (VecVT.getVectorNumElements() < 16) {
-    VecVT = MVT::v16i1;
-    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
-                      DAG.getUNDEF(VecVT), Vec,
+  // Extend to natively supported kshift.
+  unsigned NumElems = VecVT.getVectorNumElements();
+  MVT WideVecVT = VecVT;
+  if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
+    WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
+    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
+                      DAG.getUNDEF(WideVecVT), Vec,
                       DAG.getIntPtrConstant(0, dl));
   }
 
-  // Extracts from element 0 are always allowed.
-  if (IdxVal != 0) {
-    // Use kshiftr instruction to move to the lower element.
-    Vec = DAG.getNode(X86ISD::KSHIFTR, dl, VecVT, Vec,
-                      DAG.getConstant(IdxVal, dl, MVT::i8));
-  }
+  // Use kshiftr instruction to move to the lower element.
+  Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
+                    DAG.getConstant(IdxVal, dl, MVT::i8));
 
-  // Shrink to v16i1 since that's always legal.
-  if (VecVT.getVectorNumElements() > 16) {
-    VecVT = MVT::v16i1;
-    Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VecVT, Vec,
-                      DAG.getIntPtrConstant(0, dl));
-  }
-
-  // Convert to a bitcast+aext/trunc.
-  MVT CastVT = MVT::getIntegerVT(VecVT.getVectorNumElements());
-  return DAG.getAnyExtOrTrunc(DAG.getBitcast(CastVT, Vec), dl, EltVT);
+  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
+                     DAG.getIntPtrConstant(0, dl));
 }
 
 SDValue
@@ -17039,20 +17238,39 @@
   MVT VT = Op.getSimpleValueType();
   assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
          "Unexpected funnel shift opcode!");
-  assert((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
-         "Unexpected funnel shift type!");
 
   SDLoc DL(Op);
   SDValue Op0 = Op.getOperand(0);
   SDValue Op1 = Op.getOperand(1);
   SDValue Amt = Op.getOperand(2);
 
-  // Expand slow SHLD/SHRD cases.
-  // TODO - can we be more selective here: OptSize/RMW etc.?
-  if (Subtarget.isSHLDSlow())
+  bool IsFSHR = Op.getOpcode() == ISD::FSHR;
+
+  if (VT.isVector()) {
+    assert(Subtarget.hasVBMI2() && "Expected VBMI2");
+
+    if (IsFSHR)
+      std::swap(Op0, Op1);
+
+    APInt APIntShiftAmt;
+    if (isConstantSplat(Amt, APIntShiftAmt)) {
+      uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
+      return DAG.getNode(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT,
+                         Op0, Op1, DAG.getConstant(ShiftAmt, DL, MVT::i8));
+    }
+
+    return DAG.getNode(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
+                       Op0, Op1, Amt);
+  }
+
+  assert((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
+         "Unexpected funnel shift type!");
+
+  // Expand slow SHLD/SHRD cases if we are not optimizing for size.
+  bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
+  if (!OptForSize && Subtarget.isSHLDSlow())
     return SDValue();
 
-  bool IsFSHR = Op.getOpcode() == ISD::FSHR;
   if (IsFSHR)
     std::swap(Op0, Op1);
 
@@ -17887,9 +18105,10 @@
                                       const X86Subtarget &Subtarget) {
   assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
          "Unexpected PACK opcode");
+  assert(DstVT.isVector() && "VT not a vector?");
 
   // Requires SSE2 but AVX512 has fast vector truncate.
-  if (!Subtarget.hasSSE2() || Subtarget.hasAVX512() || !DstVT.isVector())
+  if (!Subtarget.hasSSE2())
     return SDValue();
 
   EVT SrcVT = In.getValueType();
@@ -18093,8 +18312,7 @@
   // Truncate with PACKUS if we are truncating a vector with leading zero bits
   // that extend all the way to the packed/truncated value.
   // Pre-SSE41 we can only use PACKUSWB.
-  KnownBits Known;
-  DAG.computeKnownBits(In, Known);
+  KnownBits Known = DAG.computeKnownBits(In);
   if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
     if (SDValue V =
             truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
@@ -18268,6 +18486,98 @@
                                  In, DAG.getUNDEF(SVT)));
 }
 
+/// Horizontal vector math instructions may be slower than normal math with
+/// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
+/// implementation, and likely shuffle complexity of the alternate sequence.
+static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
+                                  const X86Subtarget &Subtarget) {
+  bool IsOptimizingSize = DAG.getMachineFunction().getFunction().optForSize();
+  bool HasFastHOps = Subtarget.hasFastHorizontalOps();
+  return !IsSingleSource || IsOptimizingSize || HasFastHOps;
+}
+
+/// Depending on uarch and/or optimizing for size, we might prefer to use a
+/// vector operation in place of the typical scalar operation.
+static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
+                                         const X86Subtarget &Subtarget) {
+  // If both operands have other uses, this is probably not profitable.
+  SDValue LHS = Op.getOperand(0);
+  SDValue RHS = Op.getOperand(1);
+  if (!LHS.hasOneUse() && !RHS.hasOneUse())
+    return Op;
+
+  // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
+  bool IsFP = Op.getSimpleValueType().isFloatingPoint();
+  if (IsFP && !Subtarget.hasSSE3())
+    return Op;
+  if (!IsFP && !Subtarget.hasSSSE3())
+    return Op;
+
+  // Defer forming the minimal horizontal op if the vector source has more than
+  // the 2 extract element uses that we're matching here. In that case, we might
+  // form a horizontal op that includes more than 1 add/sub op.
+  if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+      RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+      LHS.getOperand(0) != RHS.getOperand(0) ||
+      !LHS.getOperand(0)->hasNUsesOfValue(2, 0))
+    return Op;
+
+  if (!isa<ConstantSDNode>(LHS.getOperand(1)) ||
+      !isa<ConstantSDNode>(RHS.getOperand(1)) ||
+      !shouldUseHorizontalOp(true, DAG, Subtarget))
+    return Op;
+
+  // Allow commuted 'hadd' ops.
+  // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
+  unsigned HOpcode;
+  switch (Op.getOpcode()) {
+    case ISD::ADD: HOpcode = X86ISD::HADD; break;
+    case ISD::SUB: HOpcode = X86ISD::HSUB; break;
+    case ISD::FADD: HOpcode = X86ISD::FHADD; break;
+    case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
+    default:
+      llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
+  }
+  unsigned LExtIndex = LHS.getConstantOperandVal(1);
+  unsigned RExtIndex = RHS.getConstantOperandVal(1);
+  if (LExtIndex == 1 && RExtIndex == 0 &&
+      (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
+    std::swap(LExtIndex, RExtIndex);
+
+  // TODO: This can be extended to handle other adjacent extract pairs.
+  if (LExtIndex != 0 || RExtIndex != 1)
+    return Op;
+
+  SDValue X = LHS.getOperand(0);
+  EVT VecVT = X.getValueType();
+  unsigned BitWidth = VecVT.getSizeInBits();
+  assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
+         "Not expecting illegal vector widths here");
+
+  // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
+  // equivalent, so extract the 256/512-bit source op to 128-bit.
+  // This is free: ymm/zmm -> xmm.
+  SDLoc DL(Op);
+  if (BitWidth == 256 || BitWidth == 512)
+    X = extract128BitVector(X, 0, DAG, DL);
+
+  // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
+  // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
+  // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
+  SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
+  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
+                     DAG.getIntPtrConstant(0, DL));
+}
+
+/// Depending on uarch and/or optimizing for size, we might prefer to use a
+/// vector operation in place of the typical scalar operation.
+static SDValue lowerFaddFsub(SDValue Op, SelectionDAG &DAG,
+                             const X86Subtarget &Subtarget) {
+  assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
+         "Only expecting float/double");
+  return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
+}
+
 /// The only differences between FABS and FNEG are the mask and the logic op.
 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
@@ -18425,7 +18735,8 @@
 // Check whether an OR'd tree is PTEST-able.
 static SDValue LowerVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
                                       const X86Subtarget &Subtarget,
-                                      SelectionDAG &DAG) {
+                                      SelectionDAG &DAG,
+                                      SDValue &X86CC) {
   assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
 
   if (!Subtarget.hasSSE41())
@@ -18511,9 +18822,10 @@
     VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
   }
 
-  SDValue Res = DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
-                            VecIns.back(), VecIns.back());
-  return getSETCC(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE, Res, DL, DAG);
+  X86CC = DAG.getConstant(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE,
+                          DL, MVT::i8);
+  return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
+                     VecIns.back(), VecIns.back());
 }
 
 /// return true if \c Op has a use that doesn't just read flags.
@@ -18581,163 +18893,26 @@
   unsigned Opcode = 0;
   unsigned NumOperands = 0;
 
-  // Truncate operations may prevent the merge of the SETCC instruction
-  // and the arithmetic instruction before it. Attempt to truncate the operands
-  // of the arithmetic instruction and use a reduced bit-width instruction.
-  bool NeedTruncation = false;
   SDValue ArithOp = Op;
-  if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
-    SDValue Arith = Op->getOperand(0);
-    // Both the trunc and the arithmetic op need to have one user each.
-    if (Arith->hasOneUse())
-      switch (Arith.getOpcode()) {
-        default: break;
-        case ISD::ADD:
-        case ISD::SUB:
-        case ISD::AND:
-        case ISD::OR:
-        case ISD::XOR: {
-          NeedTruncation = true;
-          ArithOp = Arith;
-        }
-      }
-  }
-
-  // Sometimes flags can be set either with an AND or with an SRL/SHL
-  // instruction. SRL/SHL variant should be preferred for masks longer than this
-  // number of bits.
-  const int ShiftToAndMaxMaskWidth = 32;
-  const bool ZeroCheck = (X86CC == X86::COND_E || X86CC == X86::COND_NE);
 
   // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
   // which may be the result of a CAST.  We use the variable 'Op', which is the
   // non-casted variable when we check for possible users.
   switch (ArithOp.getOpcode()) {
-  case ISD::ADD:
-    // We only want to rewrite this as a target-specific node with attached
-    // flags if there is a reasonable chance of either using that to do custom
-    // instructions selection that can fold some of the memory operands, or if
-    // only the flags are used. If there are other uses, leave the node alone
-    // and emit a test instruction.
-    for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
-         UE = Op.getNode()->use_end(); UI != UE; ++UI)
-      if (UI->getOpcode() != ISD::CopyToReg &&
-          UI->getOpcode() != ISD::SETCC &&
-          UI->getOpcode() != ISD::STORE)
-        goto default_case;
-
-    if (auto *C = dyn_cast<ConstantSDNode>(ArithOp.getOperand(1))) {
-      // An add of one will be selected as an INC.
-      if (C->isOne() &&
-          (!Subtarget.slowIncDec() ||
-           DAG.getMachineFunction().getFunction().optForSize())) {
-        Opcode = X86ISD::INC;
-        NumOperands = 1;
-        break;
-      }
-
-      // An add of negative one (subtract of one) will be selected as a DEC.
-      if (C->isAllOnesValue() &&
-          (!Subtarget.slowIncDec() ||
-           DAG.getMachineFunction().getFunction().optForSize())) {
-        Opcode = X86ISD::DEC;
-        NumOperands = 1;
-        break;
-      }
-    }
-
-    // Otherwise use a regular EFLAGS-setting add.
-    Opcode = X86ISD::ADD;
-    NumOperands = 2;
-    break;
-  case ISD::SHL:
-  case ISD::SRL:
-    // If we have a constant logical shift that's only used in a comparison
-    // against zero turn it into an equivalent AND. This allows turning it into
-    // a TEST instruction later.
-    if (ZeroCheck && Op->hasOneUse() &&
-        isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
-      EVT VT = Op.getValueType();
-      unsigned BitWidth = VT.getSizeInBits();
-      unsigned ShAmt = Op->getConstantOperandVal(1);
-      if (ShAmt >= BitWidth) // Avoid undefined shifts.
-        break;
-      APInt Mask = ArithOp.getOpcode() == ISD::SRL
-                       ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
-                       : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
-      if (!Mask.isSignedIntN(ShiftToAndMaxMaskWidth))
-        break;
-      Op = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
-                       DAG.getConstant(Mask, dl, VT));
-    }
-    break;
-
   case ISD::AND:
     // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
-    // because a TEST instruction will be better. However, AND should be
-    // preferred if the instruction can be combined into ANDN.
-    if (!hasNonFlagsUse(Op)) {
-      SDValue Op0 = ArithOp->getOperand(0);
-      SDValue Op1 = ArithOp->getOperand(1);
-      EVT VT = ArithOp.getValueType();
-      bool isAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1);
-      bool isLegalAndnType = VT == MVT::i32 || VT == MVT::i64;
-      bool isProperAndn = isAndn && isLegalAndnType && Subtarget.hasBMI();
+    // because a TEST instruction will be better.
+    if (!hasNonFlagsUse(Op))
+      break;
 
-      // If we cannot select an ANDN instruction, check if we can replace
-      // AND+IMM64 with a shift before giving up. This is possible for masks
-      // like 0xFF000000 or 0x00FFFFFF and if we care only about the zero flag.
-      if (!isProperAndn) {
-        if (!ZeroCheck)
-          break;
-
-        // And with cosntant should be canonicalized unless we're dealing
-        // with opaque constants.
-        assert((!isa<ConstantSDNode>(Op0) ||
-                (isa<ConstantSDNode>(Op1) &&
-                 (cast<ConstantSDNode>(Op0)->isOpaque() ||
-                  cast<ConstantSDNode>(Op1)->isOpaque()))) &&
-               "AND node isn't canonicalized");
-        auto *CN = dyn_cast<ConstantSDNode>(Op1);
-        if (!CN)
-          break;
-
-        const APInt &Mask = CN->getAPIntValue();
-        if (Mask.isSignedIntN(ShiftToAndMaxMaskWidth))
-          break; // Prefer TEST instruction.
-
-        unsigned BitWidth = Mask.getBitWidth();
-        unsigned LeadingOnes = Mask.countLeadingOnes();
-        unsigned TrailingZeros = Mask.countTrailingZeros();
-
-        if (LeadingOnes + TrailingZeros == BitWidth) {
-          assert(TrailingZeros < VT.getSizeInBits() &&
-                 "Shift amount should be less than the type width");
-          SDValue ShAmt = DAG.getConstant(TrailingZeros, dl, MVT::i8);
-          Op = DAG.getNode(ISD::SRL, dl, VT, Op0, ShAmt);
-          break;
-        }
-
-        unsigned LeadingZeros = Mask.countLeadingZeros();
-        unsigned TrailingOnes = Mask.countTrailingOnes();
-
-        if (LeadingZeros + TrailingOnes == BitWidth) {
-          assert(LeadingZeros < VT.getSizeInBits() &&
-                 "Shift amount should be less than the type width");
-          SDValue ShAmt = DAG.getConstant(LeadingZeros, dl, MVT::i8);
-          Op = DAG.getNode(ISD::SHL, dl, VT, Op0, ShAmt);
-          break;
-        }
-
-        break;
-      }
-    }
     LLVM_FALLTHROUGH;
+  case ISD::ADD:
   case ISD::SUB:
   case ISD::OR:
   case ISD::XOR:
-    // Similar to ISD::ADD above, check if the uses will preclude useful
-    // lowering of the target-specific node.
+    // Transform to an x86-specific ALU node with flags if there is a chance of
+    // using an RMW op or only the flags are used. Otherwise, leave
+    // the node alone and emit a 'test' instruction.
     for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
            UE = Op.getNode()->use_end(); UI != UE; ++UI)
       if (UI->getOpcode() != ISD::CopyToReg &&
@@ -18748,6 +18923,7 @@
     // Otherwise use a regular EFLAGS-setting instruction.
     switch (ArithOp.getOpcode()) {
     default: llvm_unreachable("unexpected operator!");
+    case ISD::ADD: Opcode = X86ISD::ADD; break;
     case ISD::SUB: Opcode = X86ISD::SUB; break;
     case ISD::XOR: Opcode = X86ISD::XOR; break;
     case ISD::AND: Opcode = X86ISD::AND; break;
@@ -18758,8 +18934,6 @@
     break;
   case X86ISD::ADD:
   case X86ISD::SUB:
-  case X86ISD::INC:
-  case X86ISD::DEC:
   case X86ISD::OR:
   case X86ISD::XOR:
   case X86ISD::AND:
@@ -18769,36 +18943,6 @@
     break;
   }
 
-  // If we found that truncation is beneficial, perform the truncation and
-  // update 'Op'.
-  if (NeedTruncation) {
-    EVT VT = Op.getValueType();
-    SDValue WideVal = Op->getOperand(0);
-    EVT WideVT = WideVal.getValueType();
-    unsigned ConvertedOp = 0;
-    // Use a target machine opcode to prevent further DAGCombine
-    // optimizations that may separate the arithmetic operations
-    // from the setcc node.
-    switch (WideVal.getOpcode()) {
-      default: break;
-      case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
-      case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
-      case ISD::AND: ConvertedOp = X86ISD::AND; break;
-      case ISD::OR:  ConvertedOp = X86ISD::OR;  break;
-      case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
-    }
-
-    if (ConvertedOp) {
-      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-      if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
-        SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
-        SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
-        SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
-        Op = DAG.getNode(ConvertedOp, dl, VTs, V0, V1);
-      }
-    }
-  }
-
   if (Opcode == 0) {
     // Emit a CMP with 0, which is the TEST pattern.
     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
@@ -18961,10 +19105,61 @@
   return 2;
 }
 
-/// Create a BT (Bit Test) node - Test bit \p BitNo in \p Src and set condition
-/// according to equal/not-equal condition code \p CC.
-static SDValue getBitTestCondition(SDValue Src, SDValue BitNo, ISD::CondCode CC,
-                                   const SDLoc &dl, SelectionDAG &DAG) {
+/// Result of 'and' is compared against zero. Change to a BT node if possible.
+/// Returns the BT node and the condition code needed to use it.
+static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
+                            const SDLoc &dl, SelectionDAG &DAG,
+                            SDValue &X86CC) {
+  assert(And.getOpcode() == ISD::AND && "Expected AND node!");
+  SDValue Op0 = And.getOperand(0);
+  SDValue Op1 = And.getOperand(1);
+  if (Op0.getOpcode() == ISD::TRUNCATE)
+    Op0 = Op0.getOperand(0);
+  if (Op1.getOpcode() == ISD::TRUNCATE)
+    Op1 = Op1.getOperand(0);
+
+  SDValue Src, BitNo;
+  if (Op1.getOpcode() == ISD::SHL)
+    std::swap(Op0, Op1);
+  if (Op0.getOpcode() == ISD::SHL) {
+    if (isOneConstant(Op0.getOperand(0))) {
+      // If we looked past a truncate, check that it's only truncating away
+      // known zeros.
+      unsigned BitWidth = Op0.getValueSizeInBits();
+      unsigned AndBitWidth = And.getValueSizeInBits();
+      if (BitWidth > AndBitWidth) {
+        KnownBits Known = DAG.computeKnownBits(Op0);
+        if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
+          return SDValue();
+      }
+      Src = Op1;
+      BitNo = Op0.getOperand(1);
+    }
+  } else if (Op1.getOpcode() == ISD::Constant) {
+    ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
+    uint64_t AndRHSVal = AndRHS->getZExtValue();
+    SDValue AndLHS = Op0;
+
+    if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
+      Src = AndLHS.getOperand(0);
+      BitNo = AndLHS.getOperand(1);
+    } else {
+      // Use BT if the immediate can't be encoded in a TEST instruction or we
+      // are optimizing for size and the immedaite won't fit in a byte.
+      bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
+      if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
+          isPowerOf2_64(AndRHSVal)) {
+        Src = AndLHS;
+        BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
+                                Src.getValueType());
+      }
+    }
+  }
+
+  // No patterns found, give up.
+  if (!Src.getNode())
+    return SDValue();
+
   // If Src is i8, promote it to i32 with any_extend.  There is no i8 BT
   // instruction.  Since the shift amount is in-range-or-undefined, we know
   // that doing a bittest on the i32 value is ok.  We extend to i32 because
@@ -18986,64 +19181,9 @@
   if (Src.getValueType() != BitNo.getValueType())
     BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);
 
-  SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
-  X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
-  return getSETCC(Cond, BT, dl , DAG);
-}
-
-/// Result of 'and' is compared against zero. Change to a BT node if possible.
-static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
-                            const SDLoc &dl, SelectionDAG &DAG) {
-  assert(And.getOpcode() == ISD::AND && "Expected AND node!");
-  SDValue Op0 = And.getOperand(0);
-  SDValue Op1 = And.getOperand(1);
-  if (Op0.getOpcode() == ISD::TRUNCATE)
-    Op0 = Op0.getOperand(0);
-  if (Op1.getOpcode() == ISD::TRUNCATE)
-    Op1 = Op1.getOperand(0);
-
-  SDValue LHS, RHS;
-  if (Op1.getOpcode() == ISD::SHL)
-    std::swap(Op0, Op1);
-  if (Op0.getOpcode() == ISD::SHL) {
-    if (isOneConstant(Op0.getOperand(0))) {
-      // If we looked past a truncate, check that it's only truncating away
-      // known zeros.
-      unsigned BitWidth = Op0.getValueSizeInBits();
-      unsigned AndBitWidth = And.getValueSizeInBits();
-      if (BitWidth > AndBitWidth) {
-        KnownBits Known;
-        DAG.computeKnownBits(Op0, Known);
-        if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
-          return SDValue();
-      }
-      LHS = Op1;
-      RHS = Op0.getOperand(1);
-    }
-  } else if (Op1.getOpcode() == ISD::Constant) {
-    ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
-    uint64_t AndRHSVal = AndRHS->getZExtValue();
-    SDValue AndLHS = Op0;
-
-    if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
-      LHS = AndLHS.getOperand(0);
-      RHS = AndLHS.getOperand(1);
-    } else {
-      // Use BT if the immediate can't be encoded in a TEST instruction or we
-      // are optimizing for size and the immedaite won't fit in a byte.
-      bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
-      if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
-          isPowerOf2_64(AndRHSVal)) {
-        LHS = AndLHS;
-        RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl, LHS.getValueType());
-      }
-    }
-  }
-
-  if (LHS.getNode())
-    return getBitTestCondition(LHS, RHS, CC, dl, DAG);
-
-  return SDValue();
+  X86CC = DAG.getConstant(CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B,
+                          dl, MVT::i8);
+  return DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
 }
 
 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
@@ -19218,7 +19358,7 @@
     break;
   }
 
-  SDValue Result = DAG.getNode(X86ISD::SUBUS, dl, VT, Op0, Op1);
+  SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
   return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
                      DAG.getConstant(0, dl, VT));
 }
@@ -19383,13 +19523,26 @@
   bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
                    !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
 
-  // Special case: Use min/max operations for unsigned compares. We only want
-  // to do this for unsigned compares if we need to flip signs or if it allows
-  // use to avoid an invert.
+  // Special case: Use min/max operations for unsigned compares.
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
   if (ISD::isUnsignedIntSetCC(Cond) &&
       (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
       TLI.isOperationLegal(ISD::UMIN, VT)) {
+    // If we have a constant operand, increment/decrement it and change the
+    // condition to avoid an invert.
+    // TODO: This could be extended to handle a non-splat constant by checking
+    // that each element of the constant is not the max/null value.
+    APInt C;
+    if (Cond == ISD::SETUGT && isConstantSplat(Op1, C) && !C.isMaxValue()) {
+      // X > C --> X >= (C+1) --> X == umax(X, C+1)
+      Op1 = DAG.getConstant(C + 1, dl, VT);
+      Cond = ISD::SETUGE;
+    }
+    if (Cond == ISD::SETULT && isConstantSplat(Op1, C) && !C.isNullValue()) {
+      // X < C --> X <= (C-1) --> X == umin(X, C-1)
+      Op1 = DAG.getConstant(C - 1, dl, VT);
+      Cond = ISD::SETULE;
+    }
     bool Invert = false;
     unsigned Opc;
     switch (Cond) {
@@ -19515,7 +19668,8 @@
 // Try to select this as a KORTEST+SETCC if possible.
 static SDValue EmitKORTEST(SDValue Op0, SDValue Op1, ISD::CondCode CC,
                            const SDLoc &dl, SelectionDAG &DAG,
-                           const X86Subtarget &Subtarget) {
+                           const X86Subtarget &Subtarget,
+                           SDValue &X86CC) {
   // Only support equality comparisons.
   if (CC != ISD::SETEQ && CC != ISD::SETNE)
     return SDValue();
@@ -19531,12 +19685,12 @@
       !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
     return SDValue();
 
-  X86::CondCode X86CC;
+  X86::CondCode X86Cond;
   if (isNullConstant(Op1)) {
-    X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
+    X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
   } else if (isAllOnesConstant(Op1)) {
     // C flag is set for all ones.
-    X86CC = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
+    X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
   } else
     return SDValue();
 
@@ -19548,8 +19702,67 @@
     RHS = Op0.getOperand(1);
   }
 
-  SDValue KORTEST = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
-  return getSETCC(X86CC, KORTEST, dl, DAG);
+  X86CC = DAG.getConstant(X86Cond, dl, MVT::i8);
+  return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
+}
+
+/// Emit flags for the given setcc condition and operands. Also returns the
+/// corresponding X86 condition code constant in X86CC.
+SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
+                                             ISD::CondCode CC, const SDLoc &dl,
+                                             SelectionDAG &DAG,
+                                             SDValue &X86CC) const {
+  // Optimize to BT if possible.
+  // Lower (X & (1 << N)) == 0 to BT(X, N).
+  // Lower ((X >>u N) & 1) != 0 to BT(X, N).
+  // Lower ((X >>s N) & 1) != 0 to BT(X, N).
+  if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
+      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
+    if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CC))
+      return BT;
+  }
+
+  // Try to use PTEST for a tree ORs equality compared with 0.
+  // TODO: We could do AND tree with all 1s as well by using the C flag.
+  if (Op0.getOpcode() == ISD::OR && isNullConstant(Op1) &&
+      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
+    if (SDValue PTEST = LowerVectorAllZeroTest(Op0, CC, Subtarget, DAG, X86CC))
+      return PTEST;
+  }
+
+  // Try to lower using KORTEST.
+  if (SDValue KORTEST = EmitKORTEST(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
+    return KORTEST;
+
+  // Look for X == 0, X == 1, X != 0, or X != 1.  We can simplify some forms of
+  // these.
+  if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
+      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
+    // If the input is a setcc, then reuse the input setcc or use a new one with
+    // the inverted condition.
+    if (Op0.getOpcode() == X86ISD::SETCC) {
+      bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
+
+      X86CC = Op0.getOperand(0);
+      if (Invert) {
+        X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
+        CCode = X86::GetOppositeBranchCondition(CCode);
+        X86CC = DAG.getConstant(CCode, dl, MVT::i8);
+      }
+
+      return Op0.getOperand(1);
+    }
+  }
+
+  bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
+  X86::CondCode CondCode = TranslateX86CC(CC, dl, IsFP, Op0, Op1, DAG);
+  if (CondCode == X86::COND_INVALID)
+    return SDValue();
+
+  SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG);
+  EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
+  X86CC = DAG.getConstant(CondCode, dl, MVT::i8);
+  return EFLAGS;
 }
 
 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
@@ -19564,54 +19777,12 @@
   SDLoc dl(Op);
   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
 
-  // Optimize to BT if possible.
-  // Lower (X & (1 << N)) == 0 to BT(X, N).
-  // Lower ((X >>u N) & 1) != 0 to BT(X, N).
-  // Lower ((X >>s N) & 1) != 0 to BT(X, N).
-  if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
-      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
-    if (SDValue NewSetCC = LowerAndToBT(Op0, CC, dl, DAG))
-      return NewSetCC;
-  }
-
-  // Try to use PTEST for a tree ORs equality compared with 0.
-  // TODO: We could do AND tree with all 1s as well by using the C flag.
-  if (Op0.getOpcode() == ISD::OR && isNullConstant(Op1) &&
-      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
-    if (SDValue NewSetCC = LowerVectorAllZeroTest(Op0, CC, Subtarget, DAG))
-      return NewSetCC;
-  }
-
-  // Try to lower using KORTEST.
-  if (SDValue NewSetCC = EmitKORTEST(Op0, Op1, CC, dl, DAG, Subtarget))
-    return NewSetCC;
-
-  // Look for X == 0, X == 1, X != 0, or X != 1.  We can simplify some forms of
-  // these.
-  if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
-      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
-
-    // If the input is a setcc, then reuse the input setcc or use a new one with
-    // the inverted condition.
-    if (Op0.getOpcode() == X86ISD::SETCC) {
-      X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
-      bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
-      if (!Invert)
-        return Op0;
-
-      CCode = X86::GetOppositeBranchCondition(CCode);
-      return getSETCC(CCode, Op0.getOperand(1), dl, DAG);
-    }
-  }
-
-  bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
-  X86::CondCode X86CC = TranslateX86CC(CC, dl, IsFP, Op0, Op1, DAG);
-  if (X86CC == X86::COND_INVALID)
+  SDValue X86CC;
+  SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
+  if (!EFLAGS)
     return SDValue();
 
-  SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
-  EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
-  return getSETCC(X86CC, EFLAGS, dl, DAG);
+  return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
 }
 
 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
@@ -19635,6 +19806,70 @@
   return getSETCC(CC, Cmp.getValue(1), DL, DAG);
 }
 
+// This function returns three things: the arithmetic computation itself
+// (Value), an EFLAGS result (Overflow), and a condition code (Cond).  The
+// flag and the condition code define the case in which the arithmetic
+// computation overflows.
+static std::pair<SDValue, SDValue>
+getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
+  assert(Op.getResNo() == 0 && "Unexpected result number!");
+  SDValue Value, Overflow;
+  SDValue LHS = Op.getOperand(0);
+  SDValue RHS = Op.getOperand(1);
+  unsigned BaseOp = 0;
+  SDLoc DL(Op);
+  switch (Op.getOpcode()) {
+  default: llvm_unreachable("Unknown ovf instruction!");
+  case ISD::SADDO:
+    BaseOp = X86ISD::ADD;
+    Cond = X86::COND_O;
+    break;
+  case ISD::UADDO:
+    BaseOp = X86ISD::ADD;
+    Cond = X86::COND_B;
+    break;
+  case ISD::SSUBO:
+    BaseOp = X86ISD::SUB;
+    Cond = X86::COND_O;
+    break;
+  case ISD::USUBO:
+    BaseOp = X86ISD::SUB;
+    Cond = X86::COND_B;
+    break;
+  case ISD::SMULO:
+    BaseOp = X86ISD::SMUL;
+    Cond = X86::COND_O;
+    break;
+  case ISD::UMULO:
+    BaseOp = X86ISD::UMUL;
+    Cond = X86::COND_O;
+    break;
+  }
+
+  if (BaseOp) {
+    // Also sets EFLAGS.
+    SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
+    Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
+    Overflow = Value.getValue(1);
+  }
+
+  return std::make_pair(Value, Overflow);
+}
+
+static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
+  // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
+  // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
+  // looks for this combo and may remove the "setcc" instruction if the "setcc"
+  // has only one use.
+  SDLoc DL(Op);
+  X86::CondCode Cond;
+  SDValue Value, Overflow;
+  std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
+
+  SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
+  return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
+}
+
 /// Return true if opcode is a X86 logical comparison.
 static bool isX86LogicalCmp(SDValue Op) {
   unsigned Opc = Op.getOpcode();
@@ -19643,12 +19878,8 @@
     return true;
   if (Op.getResNo() == 1 &&
       (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
-       Opc == X86ISD::SBB || Opc == X86ISD::SMUL ||
-       Opc == X86ISD::INC || Opc == X86ISD::DEC || Opc == X86ISD::OR ||
-       Opc == X86ISD::XOR || Opc == X86ISD::AND))
-    return true;
-
-  if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
+       Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
+       Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
     return true;
 
   return false;
@@ -19895,34 +20126,10 @@
     }
   } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
              CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
-             ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
-              Cond.getOperand(0).getValueType() != MVT::i8)) {
-    SDValue LHS = Cond.getOperand(0);
-    SDValue RHS = Cond.getOperand(1);
-    unsigned X86Opcode;
-    unsigned X86Cond;
-    SDVTList VTs;
-    switch (CondOpcode) {
-    case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
-    case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
-    case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
-    case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
-    case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
-    case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
-    default: llvm_unreachable("unexpected overflowing operator");
-    }
-    if (CondOpcode == ISD::UMULO)
-      VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
-                          MVT::i32);
-    else
-      VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
-
-    SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
-
-    if (CondOpcode == ISD::UMULO)
-      Cond = X86Op.getValue(2);
-    else
-      Cond = X86Op.getValue(1);
+             CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
+    SDValue Value;
+    X86::CondCode X86Cond;
+    std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
 
     CC = DAG.getConstant(X86Cond, DL, MVT::i8);
     AddTest = false;
@@ -19936,9 +20143,10 @@
     // We know the result of AND is compared against zero. Try to match
     // it to BT.
     if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
-      if (SDValue NewSetCC = LowerAndToBT(Cond, ISD::SETNE, DL, DAG)) {
-        CC = NewSetCC.getOperand(0);
-        Cond = NewSetCC.getOperand(1);
+      SDValue BTCC;
+      if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, BTCC)) {
+        CC = BTCC;
+        Cond = BT;
         AddTest = false;
       }
     }
@@ -20260,10 +20468,11 @@
     assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
            "Expected AVX512F without AVX512DQI");
 
-    StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
-                            DAG.getUNDEF(MVT::v8i1), StoredVal,
+    StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
+                            DAG.getUNDEF(MVT::v16i1), StoredVal,
                             DAG.getIntPtrConstant(0, dl));
-    StoredVal = DAG.getBitcast(MVT::i8, StoredVal);
+    StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
+    StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
 
     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
                         St->getPointerInfo(), St->getAlignment(),
@@ -20329,10 +20538,11 @@
     // Replace chain users with the new chain.
     assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
 
-    SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
-                                  DAG.getBitcast(MVT::v8i1, NewLd),
-                                  DAG.getIntPtrConstant(0, dl));
-    return DAG.getMergeValues({Extract, NewLd.getValue(1)}, dl);
+    SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
+    Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
+                      DAG.getBitcast(MVT::v16i1, Val),
+                      DAG.getIntPtrConstant(0, dl));
+    return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
   }
 
   // Nothing useful we can do without SSE2 shuffles.
@@ -20601,49 +20811,13 @@
   CondOpcode = Cond.getOpcode();
   if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
       CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
-      ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
-       Cond.getOperand(0).getValueType() != MVT::i8)) {
-    SDValue LHS = Cond.getOperand(0);
-    SDValue RHS = Cond.getOperand(1);
-    unsigned X86Opcode;
-    unsigned X86Cond;
-    SDVTList VTs;
-    // Keep this in sync with LowerXALUO, otherwise we might create redundant
-    // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
-    // X86ISD::INC).
-    switch (CondOpcode) {
-    case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
-    case ISD::SADDO:
-      if (isOneConstant(RHS)) {
-          X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
-          break;
-        }
-      X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
-    case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
-    case ISD::SSUBO:
-      if (isOneConstant(RHS)) {
-          X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
-          break;
-        }
-      X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
-    case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
-    case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
-    default: llvm_unreachable("unexpected overflowing operator");
-    }
+      CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
+    SDValue Value;
+    X86::CondCode X86Cond;
+    std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
+
     if (Inverted)
-      X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
-    if (CondOpcode == ISD::UMULO)
-      VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
-                          MVT::i32);
-    else
-      VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
-
-    SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
-
-    if (CondOpcode == ISD::UMULO)
-      Cond = X86Op.getValue(2);
-    else
-      Cond = X86Op.getValue(1);
+      X86Cond = X86::GetOppositeBranchCondition(X86Cond);
 
     CC = DAG.getConstant(X86Cond, dl, MVT::i8);
     addTest = false;
@@ -20744,34 +20918,17 @@
     } else if (Cond.getOpcode() == ISD::SETCC &&
                cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
       // For FCMP_UNE, we can emit
-      // two branches instead of an explicit AND instruction with a
-      // separate test. However, we only do this if this block doesn't
-      // have a fall-through edge, because this requires an explicit
-      // jmp when the condition is false.
-      if (Op.getNode()->hasOneUse()) {
-        SDNode *User = *Op.getNode()->use_begin();
-        // Look for an unconditional branch following this conditional branch.
-        // We need this because we need to reverse the successors in order
-        // to implement FCMP_UNE.
-        if (User->getOpcode() == ISD::BR) {
-          SDValue FalseBB = User->getOperand(1);
-          SDNode *NewBR =
-            DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
-          assert(NewBR == User);
-          (void)NewBR;
-
-          SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
-                                    Cond.getOperand(0), Cond.getOperand(1));
-          Cmp = ConvertCmpIfNecessary(Cmp, DAG);
-          CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
-          Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
-                              Chain, Dest, CC, Cmp);
-          CC = DAG.getConstant(X86::COND_NP, dl, MVT::i8);
-          Cond = Cmp;
-          addTest = false;
-          Dest = FalseBB;
-        }
-      }
+      // two branches instead of an explicit OR instruction with a
+      // separate test.
+      SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
+                                Cond.getOperand(0), Cond.getOperand(1));
+      Cmp = ConvertCmpIfNecessary(Cmp, DAG);
+      CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
+      Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
+                          Chain, Dest, CC, Cmp);
+      CC = DAG.getConstant(X86::COND_P, dl, MVT::i8);
+      Cond = Cmp;
+      addTest = false;
     }
   }
 
@@ -20783,9 +20940,10 @@
     // We know the result of AND is compared against zero. Try to match
     // it to BT.
     if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
-      if (SDValue NewSetCC = LowerAndToBT(Cond, ISD::SETNE, dl, DAG)) {
-        CC = NewSetCC.getOperand(0);
-        Cond = NewSetCC.getOperand(1);
+      SDValue BTCC;
+      if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, dl, DAG, BTCC)) {
+        CC = BTCC;
+        Cond = BT;
         addTest = false;
       }
     }
@@ -21212,11 +21370,7 @@
   if (X86::isZeroNode(Mask))
     return DAG.getConstant(0, dl, MaskVT);
 
-  if (MaskVT.bitsGT(Mask.getSimpleValueType())) {
-    // Mask should be extended
-    Mask = DAG.getNode(ISD::ANY_EXTEND, dl,
-                       MVT::getIntegerVT(MaskVT.getSizeInBits()), Mask);
-  }
+  assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
 
   if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
     assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
@@ -21260,24 +21414,6 @@
 
   SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
 
-  switch (Op.getOpcode()) {
-  default: break;
-  case X86ISD::CMPM:
-  case X86ISD::CMPM_RND:
-  case X86ISD::VPSHUFBITQMB:
-  case X86ISD::VFPCLASS:
-    return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
-  case ISD::TRUNCATE:
-  case X86ISD::VTRUNC:
-  case X86ISD::VTRUNCS:
-  case X86ISD::VTRUNCUS:
-  case X86ISD::CVTPS2PH:
-    // We can't use ISD::VSELECT here because it is not always "Legal"
-    // for the destination type. For example vpmovqb require only AVX512
-    // and vselect that can operate on byte element type require BWI
-    OpcodeSelect = X86ISD::SELECT;
-    break;
-  }
   if (PreservedSrc.isUndef())
     PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
   return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
@@ -21408,13 +21544,9 @@
       }
       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
     }
-    case INTR_TYPE_2OP:
-    case INTR_TYPE_2OP_IMM8: {
+    case INTR_TYPE_2OP: {
       SDValue Src2 = Op.getOperand(2);
 
-      if (IntrData->Type == INTR_TYPE_2OP_IMM8)
-        Src2 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src2);
-
       // We specify 2 possible opcodes for intrinsics with rounding modes.
       // First, we check if the intrinsic may have non-default rounding mode,
       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
@@ -21646,38 +21778,6 @@
       // Swap Src1 and Src2 in the node creation
       return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
     }
-    case FMA_OP_MASKZ:
-    case FMA_OP_MASK: {
-      SDValue Src1 = Op.getOperand(1);
-      SDValue Src2 = Op.getOperand(2);
-      SDValue Src3 = Op.getOperand(3);
-      SDValue Mask = Op.getOperand(4);
-      MVT VT = Op.getSimpleValueType();
-      SDValue PassThru = SDValue();
-
-      // set PassThru element
-      if (IntrData->Type == FMA_OP_MASKZ)
-        PassThru = getZeroVector(VT, Subtarget, DAG, dl);
-      else
-        PassThru = Src1;
-
-      // We specify 2 possible opcodes for intrinsics with rounding modes.
-      // First, we check if the intrinsic may have non-default rounding mode,
-      // (IntrData->Opc1 != 0), then we check the rounding mode operand.
-      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
-      if (IntrWithRoundingModeOpcode != 0) {
-        SDValue Rnd = Op.getOperand(5);
-        if (!isRoundModeCurDirection(Rnd))
-          return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
-                                                  dl, Op.getValueType(),
-                                                  Src1, Src2, Src3, Rnd),
-                                      Mask, PassThru, Subtarget, DAG);
-      }
-      return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
-                                              dl, Op.getValueType(),
-                                              Src1, Src2, Src3),
-                                  Mask, PassThru, Subtarget, DAG);
-    }
     case IFMA_OP:
       // NOTE: We need to swizzle the operands to pass the multiply operands
       // first.
@@ -21688,7 +21788,7 @@
       // does not change the value. Set it to 0 since it can change.
       return DAG.getNode(IntrData->Opc0, dl, VT, Op.getOperand(1),
                          DAG.getIntPtrConstant(0, dl));
-    case CVTPD2PS_MASK: {
+    case CVTPD2PS_RND_MASK: {
       SDValue Src = Op.getOperand(1);
       SDValue PassThru = Op.getOperand(2);
       SDValue Mask = Op.getOperand(3);
@@ -21712,13 +21812,6 @@
                                               DAG.getIntPtrConstant(0, dl)),
                                   Mask, PassThru, Subtarget, DAG);
     }
-    case FPCLASS: {
-      // FPclass intrinsics
-      SDValue Src1 = Op.getOperand(1);
-      MVT MaskVT = Op.getSimpleValueType();
-      SDValue Imm = Op.getOperand(2);
-      return DAG.getNode(IntrData->Opc0, dl, MaskVT, Src1, Imm);
-    }
     case FPCLASSS: {
       SDValue Src1 = Op.getOperand(1);
       SDValue Imm = Op.getOperand(2);
@@ -21733,32 +21826,6 @@
                                 FPclassMask, DAG.getIntPtrConstant(0, dl));
       return DAG.getBitcast(MVT::i8, Ins);
     }
-    case CMP_MASK: {
-      // Comparison intrinsics with masks.
-      // Example of transformation:
-      // (i8 (int_x86_avx512_mask_pcmpeq_q_128
-      //             (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
-      // (i8 (bitcast
-      //   (v8i1 (insert_subvector zero,
-      //           (v2i1 (and (PCMPEQM %a, %b),
-      //                      (extract_subvector
-      //                         (v8i1 (bitcast %mask)), 0))), 0))))
-      MVT VT = Op.getOperand(1).getSimpleValueType();
-      MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
-      SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
-      MVT BitcastVT = MVT::getVectorVT(MVT::i1,
-                                       Mask.getSimpleValueType().getSizeInBits());
-      SDValue Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
-                                Op.getOperand(2));
-      SDValue CmpMask = getVectorMaskingNode(Cmp, Mask, SDValue(),
-                                             Subtarget, DAG);
-      // Need to fill with zeros to ensure the bitcast will produce zeroes
-      // for the upper bits in the v2i1/v4i1 case.
-      SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
-                                DAG.getConstant(0, dl, BitcastVT),
-                                CmpMask, DAG.getIntPtrConstant(0, dl));
-      return DAG.getBitcast(Op.getValueType(), Res);
-    }
 
     case CMP_MASK_CC: {
       MVT MaskVT = Op.getSimpleValueType();
@@ -21950,6 +22017,38 @@
       SDValue Results[] = { SetCC, Res };
       return DAG.getMergeValues(Results, dl);
     }
+    case CVTPD2PS_MASK:
+    case CVTPD2I_MASK:
+    case TRUNCATE_TO_REG: {
+      SDValue Src = Op.getOperand(1);
+      SDValue PassThru = Op.getOperand(2);
+      SDValue Mask = Op.getOperand(3);
+
+      if (isAllOnesConstant(Mask))
+        return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
+
+      MVT SrcVT = Src.getSimpleValueType();
+      MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
+      Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
+      return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
+                         Mask);
+    }
+    case CVTPS2PH_MASK: {
+      SDValue Src = Op.getOperand(1);
+      SDValue Rnd = Op.getOperand(2);
+      SDValue PassThru = Op.getOperand(3);
+      SDValue Mask = Op.getOperand(4);
+
+      if (isAllOnesConstant(Mask))
+        return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src, Rnd);
+
+      MVT SrcVT = Src.getSimpleValueType();
+      MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
+      Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
+      return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, Rnd,
+                         PassThru, Mask);
+
+    }
     default:
       break;
     }
@@ -22161,14 +22260,14 @@
     return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
   }
 
-  case Intrinsic::x86_seh_recoverfp: {
+  case Intrinsic::eh_recoverfp: {
     SDValue FnOp = Op.getOperand(1);
     SDValue IncomingFPOp = Op.getOperand(2);
     GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
     auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
     if (!Fn)
       report_fatal_error(
-          "llvm.x86.seh.recoverfp must take a function as the first argument");
+          "llvm.eh.recoverfp must take a function as the first argument");
     return recoverFramePointer(DAG, Fn, IncomingFPOp);
   }
 
@@ -22216,25 +22315,31 @@
                               SDValue Src, SDValue Mask, SDValue Base,
                               SDValue Index, SDValue ScaleOp, SDValue Chain,
                               const X86Subtarget &Subtarget) {
+  MVT VT = Op.getSimpleValueType();
   SDLoc dl(Op);
   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
   // Scale must be constant.
   if (!C)
     return SDValue();
   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
-  MVT MaskVT = MVT::getVectorVT(MVT::i1,
-                             Index.getSimpleValueType().getVectorNumElements());
+  unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
+                              VT.getVectorNumElements());
+  MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
 
-  SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
+  // We support two versions of the gather intrinsics. One with scalar mask and
+  // one with vXi1 mask. Convert scalar to vXi1 if necessary.
+  if (Mask.getValueType() != MaskVT)
+    Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
+
   SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
   SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
   SDValue Segment = DAG.getRegister(0, MVT::i32);
   // If source is undef or we know it won't be used, use a zero vector
   // to break register dependency.
   // TODO: use undef instead and let BreakFalseDeps deal with it?
-  if (Src.isUndef() || ISD::isBuildVectorAllOnes(VMask.getNode()))
+  if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
     Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
-  SDValue Ops[] = {Src, VMask, Base, Scale, Index, Disp, Segment, Chain};
+  SDValue Ops[] = {Src, Mask, Base, Scale, Index, Disp, Segment, Chain};
   SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
   SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
   return DAG.getMergeValues(RetOps, dl);
@@ -22252,12 +22357,17 @@
   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
   SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
   SDValue Segment = DAG.getRegister(0, MVT::i32);
-  MVT MaskVT = MVT::getVectorVT(MVT::i1,
-                             Index.getSimpleValueType().getVectorNumElements());
+  unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
+                              Src.getSimpleValueType().getVectorNumElements());
+  MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
 
-  SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
+  // We support two versions of the scatter intrinsics. One with scalar mask and
+  // one with vXi1 mask. Convert scalar to vXi1 if necessary.
+  if (Mask.getValueType() != MaskVT)
+    Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
+
   SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
-  SDValue Ops[] = {Base, Scale, Index, Disp, Segment, VMask, Src, Chain};
+  SDValue Ops[] = {Base, Scale, Index, Disp, Segment, Mask, Src, Chain};
   SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
   return SDValue(Res, 1);
 }
@@ -23413,18 +23523,46 @@
                      DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
 }
 
-static SDValue LowerADD_SUB(SDValue Op, SelectionDAG &DAG) {
+static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
+                           const X86Subtarget &Subtarget) {
   MVT VT = Op.getSimpleValueType();
+  if (VT == MVT::i16 || VT == MVT::i32)
+    return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
+
   if (VT.getScalarType() == MVT::i1)
     return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
                        Op.getOperand(0), Op.getOperand(1));
+
   assert(Op.getSimpleValueType().is256BitVector() &&
          Op.getSimpleValueType().isInteger() &&
          "Only handle AVX 256-bit vector integer operation");
   return split256IntArith(Op, DAG);
 }
 
-static SDValue LowerABS(SDValue Op, SelectionDAG &DAG) {
+static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG) {
+  MVT VT = Op.getSimpleValueType();
+  if (VT.getScalarType() == MVT::i1) {
+    SDLoc dl(Op);
+    switch (Op.getOpcode()) {
+    default: llvm_unreachable("Expected saturated arithmetic opcode");
+    case ISD::UADDSAT:
+    case ISD::SADDSAT:
+      return DAG.getNode(ISD::OR, dl, VT, Op.getOperand(0), Op.getOperand(1));
+    case ISD::USUBSAT:
+    case ISD::SSUBSAT:
+      return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
+                         DAG.getNOT(dl, Op.getOperand(1), VT));
+    }
+  }
+
+  assert(Op.getSimpleValueType().is256BitVector() &&
+         Op.getSimpleValueType().isInteger() &&
+         "Only handle AVX 256-bit vector integer operation");
+  return split256IntArith(Op, DAG);
+}
+
+static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
+                        SelectionDAG &DAG) {
   MVT VT = Op.getSimpleValueType();
   if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
     // Since X86 does not have CMOV for 8-bit integer, we don't convert
@@ -23438,10 +23576,23 @@
     return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
   }
 
-  assert(Op.getSimpleValueType().is256BitVector() &&
-         Op.getSimpleValueType().isInteger() &&
-         "Only handle AVX 256-bit vector integer operation");
-  return Lower256IntUnary(Op, DAG);
+  // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
+  if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
+    SDLoc DL(Op);
+    SDValue Src = Op.getOperand(0);
+    SDValue Sub =
+        DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
+    return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
+  }
+
+  if (VT.is256BitVector() && !Subtarget.hasInt256()) {
+    assert(VT.isInteger() &&
+           "Only handle AVX 256-bit vector integer operation");
+    return Lower256IntUnary(Op, DAG);
+  }
+
+  // Default to expand.
+  return SDValue();
 }
 
 static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
@@ -23592,9 +23743,8 @@
   //
   //  Hi = psllqi(AloBhi + AhiBlo, 32);
   //  return AloBlo + Hi;
-  KnownBits AKnown, BKnown;
-  DAG.computeKnownBits(A, AKnown);
-  DAG.computeKnownBits(B, BKnown);
+  KnownBits AKnown = DAG.computeKnownBits(A);
+  KnownBits BKnown = DAG.computeKnownBits(B);
 
   APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
   bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
@@ -24764,21 +24914,31 @@
   SDValue Amt = Op.getOperand(1);
   unsigned Opcode = Op.getOpcode();
   unsigned EltSizeInBits = VT.getScalarSizeInBits();
+  int NumElts = VT.getVectorNumElements();
+
+  // Check for constant splat rotation amount.
+  APInt UndefElts;
+  SmallVector<APInt, 32> EltBits;
+  int CstSplatIndex = -1;
+  if (getTargetConstantBitsFromNode(Amt, EltSizeInBits, UndefElts, EltBits))
+    for (int i = 0; i != NumElts; ++i)
+      if (!UndefElts[i]) {
+        if (CstSplatIndex < 0 || EltBits[i] == EltBits[CstSplatIndex]) {
+          CstSplatIndex = i;
+          continue;
+        }
+        CstSplatIndex = -1;
+        break;
+      }
 
   // AVX512 implicitly uses modulo rotation amounts.
   if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
     // Attempt to rotate by immediate.
-    APInt UndefElts;
-    SmallVector<APInt, 16> EltBits;
-    if (getTargetConstantBitsFromNode(Amt, EltSizeInBits, UndefElts, EltBits)) {
-      if (!UndefElts && llvm::all_of(EltBits, [EltBits](APInt &V) {
-            return EltBits[0] == V;
-          })) {
-        unsigned Op = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI);
-        uint64_t RotateAmt = EltBits[0].urem(EltSizeInBits);
-        return DAG.getNode(Op, DL, VT, R,
-                           DAG.getConstant(RotateAmt, DL, MVT::i8));
-      }
+    if (0 <= CstSplatIndex) {
+      unsigned Op = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI);
+      uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
+      return DAG.getNode(Op, DL, VT, R,
+                         DAG.getConstant(RotateAmt, DL, MVT::i8));
     }
 
     // Else, fall-back on VPROLV/VPRORV.
@@ -24796,12 +24956,10 @@
     assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
 
     // Attempt to rotate by immediate.
-    if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
-      if (auto *RotateConst = BVAmt->getConstantSplatNode()) {
-        uint64_t RotateAmt = RotateConst->getAPIntValue().urem(EltSizeInBits);
-        return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
-                           DAG.getConstant(RotateAmt, DL, MVT::i8));
-      }
+    if (0 <= CstSplatIndex) {
+      uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
+      return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
+                         DAG.getConstant(RotateAmt, DL, MVT::i8));
     }
 
     // Use general rotate by variable (per-element).
@@ -24818,15 +24976,19 @@
          "Only vXi32/vXi16/vXi8 vector rotates supported");
 
   // Rotate by an uniform constant - expand back to shifts.
-  if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt))
-    if (BVAmt->getConstantSplatNode())
-      return SDValue();
+  if (0 <= CstSplatIndex)
+    return SDValue();
+
+  bool IsSplatAmt = DAG.isSplatValue(Amt);
 
   // v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by
   // the amount bit.
-  if (EltSizeInBits == 8) {
+  if (EltSizeInBits == 8 && !IsSplatAmt) {
+    if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()))
+      return SDValue();
+
     // We don't need ModuloAmt here as we just peek at individual bits.
-    MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
+    MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
 
     auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
       if (Subtarget.hasSSE41()) {
@@ -24891,8 +25053,7 @@
 
   // Fallback for splats + all supported variable shifts.
   // Fallback for non-constants AVX2 vXi16 as well.
-  if (LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt) ||
-      DAG.isSplatValue(Amt)) {
+  if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
     SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
     AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
     SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
@@ -24933,78 +25094,6 @@
                      DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
 }
 
-static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
-  // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
-  // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
-  // looks for this combo and may remove the "setcc" instruction if the "setcc"
-  // has only one use.
-  SDNode *N = Op.getNode();
-  SDValue LHS = N->getOperand(0);
-  SDValue RHS = N->getOperand(1);
-  unsigned BaseOp = 0;
-  X86::CondCode Cond;
-  SDLoc DL(Op);
-  switch (Op.getOpcode()) {
-  default: llvm_unreachable("Unknown ovf instruction!");
-  case ISD::SADDO:
-    // A subtract of one will be selected as a INC. Note that INC doesn't
-    // set CF, so we can't do this for UADDO.
-    if (isOneConstant(RHS)) {
-      BaseOp = X86ISD::INC;
-      Cond = X86::COND_O;
-      break;
-    }
-    BaseOp = X86ISD::ADD;
-    Cond = X86::COND_O;
-    break;
-  case ISD::UADDO:
-    BaseOp = X86ISD::ADD;
-    Cond = X86::COND_B;
-    break;
-  case ISD::SSUBO:
-    // A subtract of one will be selected as a DEC. Note that DEC doesn't
-    // set CF, so we can't do this for USUBO.
-    if (isOneConstant(RHS)) {
-      BaseOp = X86ISD::DEC;
-      Cond = X86::COND_O;
-      break;
-    }
-    BaseOp = X86ISD::SUB;
-    Cond = X86::COND_O;
-    break;
-  case ISD::USUBO:
-    BaseOp = X86ISD::SUB;
-    Cond = X86::COND_B;
-    break;
-  case ISD::SMULO:
-    BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
-    Cond = X86::COND_O;
-    break;
-  case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
-    if (N->getValueType(0) == MVT::i8) {
-      BaseOp = X86ISD::UMUL8;
-      Cond = X86::COND_O;
-      break;
-    }
-    SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
-                                 MVT::i32);
-    SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
-
-    SDValue SetCC = getSETCC(X86::COND_O, SDValue(Sum.getNode(), 2), DL, DAG);
-
-    return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
-  }
-  }
-
-  // Also sets EFLAGS.
-  SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
-  SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
-
-  SDValue SetCC = getSETCC(Cond, SDValue(Sum.getNode(), 1), DL, DAG);
-
-  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
-}
-
 /// Returns true if the operand type is exactly twice the native width, and
 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
@@ -25147,7 +25236,7 @@
       return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
 
     SDValue Chain = Op.getOperand(0);
-    SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
+    SDValue Zero = DAG.getTargetConstant(0, dl, MVT::i32);
     SDValue Ops[] = {
       DAG.getRegister(X86::ESP, MVT::i32),     // Base
       DAG.getTargetConstant(1, dl, MVT::i8),   // Scale
@@ -25157,7 +25246,7 @@
       Zero,
       Chain
     };
-    SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
+    SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, dl, MVT::Other, Ops);
     return SDValue(Res, 0);
   }
 
@@ -25581,8 +25670,7 @@
 }
 
 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
-                                        const X86Subtarget &Subtarget,
-                                        bool AllowIncDec = true) {
+                                        const X86Subtarget &Subtarget) {
   unsigned NewOpc = 0;
   switch (N->getOpcode()) {
   case ISD::ATOMIC_LOAD_ADD:
@@ -25606,25 +25694,6 @@
 
   MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
 
-  if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
-    // Convert to inc/dec if they aren't slow or we are optimizing for size.
-    if (AllowIncDec && (!Subtarget.slowIncDec() ||
-                        DAG.getMachineFunction().getFunction().optForSize())) {
-      if ((NewOpc == X86ISD::LADD && C->isOne()) ||
-          (NewOpc == X86ISD::LSUB && C->isAllOnesValue()))
-        return DAG.getMemIntrinsicNode(X86ISD::LINC, SDLoc(N),
-                                       DAG.getVTList(MVT::i32, MVT::Other),
-                                       {N->getOperand(0), N->getOperand(1)},
-                                       /*MemVT=*/N->getSimpleValueType(0), MMO);
-      if ((NewOpc == X86ISD::LSUB && C->isOne()) ||
-          (NewOpc == X86ISD::LADD && C->isAllOnesValue()))
-        return DAG.getMemIntrinsicNode(X86ISD::LDEC, SDLoc(N),
-                                       DAG.getVTList(MVT::i32, MVT::Other),
-                                       {N->getOperand(0), N->getOperand(1)},
-                                       /*MemVT=*/N->getSimpleValueType(0), MMO);
-    }
-  }
-
   return DAG.getMemIntrinsicNode(
       NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
       {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
@@ -26145,6 +26214,8 @@
   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
   case ISD::LOAD:               return LowerLoad(Op, Subtarget, DAG);
   case ISD::STORE:              return LowerStore(Op, Subtarget, DAG);
+  case ISD::FADD:
+  case ISD::FSUB:               return lowerFaddFsub(Op, DAG, Subtarget);
   case ISD::FABS:
   case ISD::FNEG:               return LowerFABSorFNEG(Op, DAG);
   case ISD::FCOPYSIGN:          return LowerFCOPYSIGN(Op, DAG);
@@ -26197,12 +26268,16 @@
   case ISD::ADDCARRY:
   case ISD::SUBCARRY:           return LowerADDSUBCARRY(Op, DAG);
   case ISD::ADD:
-  case ISD::SUB:                return LowerADD_SUB(Op, DAG);
+  case ISD::SUB:                return lowerAddSub(Op, DAG, Subtarget);
+  case ISD::UADDSAT:
+  case ISD::SADDSAT:
+  case ISD::USUBSAT:
+  case ISD::SSUBSAT:            return LowerADDSAT_SUBSAT(Op, DAG);
   case ISD::SMAX:
   case ISD::SMIN:
   case ISD::UMAX:
   case ISD::UMIN:               return LowerMINMAX(Op, DAG);
-  case ISD::ABS:                return LowerABS(Op, DAG);
+  case ISD::ABS:                return LowerABS(Op, Subtarget, DAG);
   case ISD::FSINCOS:            return LowerFSINCOS(Op, Subtarget, DAG);
   case ISD::MLOAD:              return LowerMLOAD(Op, Subtarget, DAG);
   case ISD::MSTORE:             return LowerMSTORE(Op, Subtarget, DAG);
@@ -26242,7 +26317,6 @@
                                            SmallVectorImpl<SDValue>&Results,
                                            SelectionDAG &DAG) const {
   SDLoc dl(N);
-  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
   switch (N->getOpcode()) {
   default:
     llvm_unreachable("Do not know how to custom type legalize this operation!");
@@ -26254,13 +26328,9 @@
       // Promote to a pattern that will be turned into PMULUDQ.
       SDValue N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v2i64,
                                N->getOperand(0));
-      N0 = DAG.getNode(ISD::AND, dl, MVT::v2i64, N0,
-                       DAG.getConstant(0xffffffff, dl, MVT::v2i64));
       SDValue N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v2i64,
                                N->getOperand(1));
-      N1 = DAG.getNode(ISD::AND, dl, MVT::v2i64, N1,
-                       DAG.getConstant(0xffffffff, dl, MVT::v2i64));
-      SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v2i64, N0, N1);
+      SDValue Mul = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, N0, N1);
       Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, VT, Mul));
     } else if (getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
                VT.getVectorElementType() == MVT::i8) {
@@ -26279,11 +26349,14 @@
     }
     return;
   }
+  case ISD::UADDSAT:
+  case ISD::SADDSAT:
+  case ISD::USUBSAT:
+  case ISD::SSUBSAT:
   case X86ISD::VPMADDWD:
-  case X86ISD::ADDUS:
-  case X86ISD::SUBUS:
   case X86ISD::AVG: {
-    // Legalize types for X86ISD::AVG/ADDUS/SUBUS/VPMADDWD by widening.
+    // Legalize types for ISD::UADDSAT/SADDSAT/USUBSAT/SSUBSAT and
+    // X86ISD::AVG/VPMADDWD by widening.
     assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
 
     EVT VT = N->getValueType(0);
@@ -26361,7 +26434,7 @@
         unsigned NumConcats = 128 / VT.getSizeInBits();
         SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
         Ops0[0] = N->getOperand(0);
-        EVT ResVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
+        EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
         SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
         SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
         SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
@@ -26440,6 +26513,48 @@
     }
     return;
   }
+  case ISD::SIGN_EXTEND_VECTOR_INREG: {
+    if (ExperimentalVectorWideningLegalization)
+      return;
+
+    EVT VT = N->getValueType(0);
+    SDValue In = N->getOperand(0);
+    EVT InVT = In.getValueType();
+    if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
+        (InVT == MVT::v16i16 || InVT == MVT::v32i8)) {
+      // Custom split this so we can extend i8/i16->i32 invec. This is better
+      // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
+      // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
+      // we allow the sra from the extend to i32 to be shared by the split.
+      EVT ExtractVT = EVT::getVectorVT(*DAG.getContext(),
+                                       InVT.getVectorElementType(),
+                                       InVT.getVectorNumElements() / 2);
+      MVT ExtendVT = MVT::getVectorVT(MVT::i32,
+                                      VT.getVectorNumElements());
+      In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ExtractVT,
+                       In, DAG.getIntPtrConstant(0, dl));
+      In = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, MVT::v4i32, In);
+
+      // Fill a vector with sign bits for each element.
+      SDValue Zero = DAG.getConstant(0, dl, ExtendVT);
+      SDValue SignBits = DAG.getSetCC(dl, ExtendVT, Zero, In, ISD::SETGT);
+
+      EVT LoVT, HiVT;
+      std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+
+      // Create an unpackl and unpackh to interleave the sign bits then bitcast
+      // to vXi64.
+      SDValue Lo = getUnpackl(DAG, dl, ExtendVT, In, SignBits);
+      Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
+      SDValue Hi = getUnpackh(DAG, dl, ExtendVT, In, SignBits);
+      Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
+
+      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
+      Results.push_back(Res);
+      return;
+    }
+    return;
+  }
   case ISD::SIGN_EXTEND:
   case ISD::ZERO_EXTEND: {
     if (!ExperimentalVectorWideningLegalization)
@@ -26451,8 +26566,9 @@
     if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
         (InVT == MVT::v4i16 || InVT == MVT::v4i8)) {
       // Custom split this so we can extend i8/i16->i32 invec. This is better
-      // since sign_extend_inreg i8/i16->i64 requires two sra operations. So
-      // this allows the first to be shared.
+      // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
+      // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
+      // we allow the sra from the extend to i32 to be shared by the split.
       In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
 
       // Fill a vector with sign bits for each element.
@@ -26665,7 +26781,7 @@
     return;
   }
   case ISD::FP_ROUND: {
-    if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
+    if (!isTypeLegal(N->getOperand(0).getValueType()))
         return;
     SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
     Results.push_back(V);
@@ -27016,9 +27132,7 @@
   case X86ISD::PSHUFB:             return "X86ISD::PSHUFB";
   case X86ISD::ANDNP:              return "X86ISD::ANDNP";
   case X86ISD::BLENDI:             return "X86ISD::BLENDI";
-  case X86ISD::SHRUNKBLEND:        return "X86ISD::SHRUNKBLEND";
-  case X86ISD::ADDUS:              return "X86ISD::ADDUS";
-  case X86ISD::SUBUS:              return "X86ISD::SUBUS";
+  case X86ISD::BLENDV:             return "X86ISD::BLENDV";
   case X86ISD::HADD:               return "X86ISD::HADD";
   case X86ISD::HSUB:               return "X86ISD::HSUB";
   case X86ISD::FHADD:              return "X86ISD::FHADD";
@@ -27061,13 +27175,14 @@
   case X86ISD::LOR:                return "X86ISD::LOR";
   case X86ISD::LXOR:               return "X86ISD::LXOR";
   case X86ISD::LAND:               return "X86ISD::LAND";
-  case X86ISD::LINC:               return "X86ISD::LINC";
-  case X86ISD::LDEC:               return "X86ISD::LDEC";
   case X86ISD::VZEXT_MOVL:         return "X86ISD::VZEXT_MOVL";
   case X86ISD::VZEXT_LOAD:         return "X86ISD::VZEXT_LOAD";
   case X86ISD::VTRUNC:             return "X86ISD::VTRUNC";
   case X86ISD::VTRUNCS:            return "X86ISD::VTRUNCS";
   case X86ISD::VTRUNCUS:           return "X86ISD::VTRUNCUS";
+  case X86ISD::VMTRUNC:            return "X86ISD::VMTRUNC";
+  case X86ISD::VMTRUNCS:           return "X86ISD::VMTRUNCS";
+  case X86ISD::VMTRUNCUS:          return "X86ISD::VMTRUNCUS";
   case X86ISD::VTRUNCSTORES:       return "X86ISD::VTRUNCSTORES";
   case X86ISD::VTRUNCSTOREUS:      return "X86ISD::VTRUNCSTOREUS";
   case X86ISD::VMTRUNCSTORES:      return "X86ISD::VMTRUNCSTORES";
@@ -27076,6 +27191,7 @@
   case X86ISD::VFPEXT_RND:         return "X86ISD::VFPEXT_RND";
   case X86ISD::VFPEXTS_RND:        return "X86ISD::VFPEXTS_RND";
   case X86ISD::VFPROUND:           return "X86ISD::VFPROUND";
+  case X86ISD::VMFPROUND:          return "X86ISD::VMFPROUND";
   case X86ISD::VFPROUND_RND:       return "X86ISD::VFPROUND_RND";
   case X86ISD::VFPROUNDS_RND:      return "X86ISD::VFPROUNDS_RND";
   case X86ISD::VSHLDQ:             return "X86ISD::VSHLDQ";
@@ -27100,10 +27216,6 @@
   case X86ISD::SBB:                return "X86ISD::SBB";
   case X86ISD::SMUL:               return "X86ISD::SMUL";
   case X86ISD::UMUL:               return "X86ISD::UMUL";
-  case X86ISD::SMUL8:              return "X86ISD::SMUL8";
-  case X86ISD::UMUL8:              return "X86ISD::UMUL8";
-  case X86ISD::INC:                return "X86ISD::INC";
-  case X86ISD::DEC:                return "X86ISD::DEC";
   case X86ISD::OR:                 return "X86ISD::OR";
   case X86ISD::XOR:                return "X86ISD::XOR";
   case X86ISD::AND:                return "X86ISD::AND";
@@ -27206,7 +27318,6 @@
   case X86ISD::XTEST:              return "X86ISD::XTEST";
   case X86ISD::COMPRESS:           return "X86ISD::COMPRESS";
   case X86ISD::EXPAND:             return "X86ISD::EXPAND";
-  case X86ISD::SELECT:             return "X86ISD::SELECT";
   case X86ISD::SELECTS:            return "X86ISD::SELECTS";
   case X86ISD::ADDSUB:             return "X86ISD::ADDSUB";
   case X86ISD::RCP14:              return "X86ISD::RCP14";
@@ -27232,14 +27343,14 @@
   case X86ISD::FGETEXPS_RND:       return "X86ISD::FGETEXPS_RND";
   case X86ISD::SCALEF:             return "X86ISD::SCALEF";
   case X86ISD::SCALEFS:            return "X86ISD::SCALEFS";
-  case X86ISD::ADDS:               return "X86ISD::ADDS";
-  case X86ISD::SUBS:               return "X86ISD::SUBS";
   case X86ISD::AVG:                return "X86ISD::AVG";
   case X86ISD::MULHRS:             return "X86ISD::MULHRS";
   case X86ISD::SINT_TO_FP_RND:     return "X86ISD::SINT_TO_FP_RND";
   case X86ISD::UINT_TO_FP_RND:     return "X86ISD::UINT_TO_FP_RND";
   case X86ISD::CVTTP2SI:           return "X86ISD::CVTTP2SI";
   case X86ISD::CVTTP2UI:           return "X86ISD::CVTTP2UI";
+  case X86ISD::MCVTTP2SI:          return "X86ISD::MCVTTP2SI";
+  case X86ISD::MCVTTP2UI:          return "X86ISD::MCVTTP2UI";
   case X86ISD::CVTTP2SI_RND:       return "X86ISD::CVTTP2SI_RND";
   case X86ISD::CVTTP2UI_RND:       return "X86ISD::CVTTP2UI_RND";
   case X86ISD::CVTTS2SI:           return "X86ISD::CVTTS2SI";
@@ -27254,10 +27365,13 @@
   case X86ISD::SCALAR_SINT_TO_FP_RND: return "X86ISD::SCALAR_SINT_TO_FP_RND";
   case X86ISD::SCALAR_UINT_TO_FP_RND: return "X86ISD::SCALAR_UINT_TO_FP_RND";
   case X86ISD::CVTPS2PH:           return "X86ISD::CVTPS2PH";
+  case X86ISD::MCVTPS2PH:          return "X86ISD::MCVTPS2PH";
   case X86ISD::CVTPH2PS:           return "X86ISD::CVTPH2PS";
   case X86ISD::CVTPH2PS_RND:       return "X86ISD::CVTPH2PS_RND";
   case X86ISD::CVTP2SI:            return "X86ISD::CVTP2SI";
   case X86ISD::CVTP2UI:            return "X86ISD::CVTP2UI";
+  case X86ISD::MCVTP2SI:           return "X86ISD::MCVTP2SI";
+  case X86ISD::MCVTP2UI:           return "X86ISD::MCVTP2UI";
   case X86ISD::CVTP2SI_RND:        return "X86ISD::CVTP2SI_RND";
   case X86ISD::CVTP2UI_RND:        return "X86ISD::CVTP2UI_RND";
   case X86ISD::CVTS2SI:            return "X86ISD::CVTS2SI";
@@ -29978,11 +30092,12 @@
     EVT SrcVT = Src.getValueType();
     APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
                                             Op.getConstantOperandVal(1));
-    DAG.computeKnownBits(Src, Known, DemandedElt, Depth + 1);
+    Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
     Known = Known.zextOrTrunc(BitWidth);
     Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
     break;
   }
+  case X86ISD::VSRAI:
   case X86ISD::VSHLI:
   case X86ISD::VSRLI: {
     if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
@@ -29991,18 +30106,21 @@
         break;
       }
 
-      DAG.computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
+      Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
       unsigned ShAmt = ShiftImm->getZExtValue();
       if (Opc == X86ISD::VSHLI) {
         Known.Zero <<= ShAmt;
         Known.One <<= ShAmt;
         // Low bits are known zero.
         Known.Zero.setLowBits(ShAmt);
-      } else {
+      } else if (Opc == X86ISD::VSRLI) {
         Known.Zero.lshrInPlace(ShAmt);
         Known.One.lshrInPlace(ShAmt);
         // High bits are known zero.
         Known.Zero.setHighBits(ShAmt);
+      } else {
+        Known.Zero.ashrInPlace(ShAmt);
+        Known.One.ashrInPlace(ShAmt);
       }
     }
     break;
@@ -30017,12 +30135,12 @@
 
     KnownBits Known2;
     if (!!DemandedLHS) {
-      DAG.computeKnownBits(Op.getOperand(0), Known2, DemandedLHS, Depth + 1);
+      Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
       Known.One &= Known2.One;
       Known.Zero &= Known2.Zero;
     }
     if (!!DemandedRHS) {
-      DAG.computeKnownBits(Op.getOperand(1), Known2, DemandedRHS, Depth + 1);
+      Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
       Known.One &= Known2.One;
       Known.Zero &= Known2.Zero;
     }
@@ -30033,12 +30151,11 @@
     break;
   }
   case X86ISD::CMOV: {
-    DAG.computeKnownBits(Op.getOperand(1), Known, Depth+1);
+    Known = DAG.computeKnownBits(Op.getOperand(1), Depth+1);
     // If we don't know any bits, early out.
     if (Known.isUnknown())
       break;
-    KnownBits Known2;
-    DAG.computeKnownBits(Op.getOperand(0), Known2, Depth+1);
+    KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth+1);
 
     // Only known if known in both the LHS and RHS.
     Known.One &= Known2.One;
@@ -30089,8 +30206,8 @@
         for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
           if (!DemandedOps[i])
             continue;
-          KnownBits Known2;
-          DAG.computeKnownBits(Ops[i], Known2, DemandedOps[i], Depth + 1);
+          KnownBits Known2 =
+              DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
           Known.One &= Known2.One;
           Known.Zero &= Known2.Zero;
         }
@@ -30485,9 +30602,10 @@
     }
   }
 
-  // Attempt to match against either a unary or binary PACKSS/PACKUS shuffle.
-  // TODO add support for 256/512-bit types.
-  if ((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) {
+  // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
+  if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
+      ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
+      ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
     if (matchVectorShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
                                    Subtarget)) {
       DstVT = MaskVT;
@@ -32260,6 +32378,52 @@
       return SDValue(N, 0);
   }
 
+  // Look for a truncating shuffle to v2i32 of a PMULUDQ where one of the
+  // operands is an extend from v2i32 to v2i64. Turn it into a pmulld.
+  // FIXME: This can probably go away once we default to widening legalization.
+  if (Subtarget.hasSSE41() && VT == MVT::v4i32 &&
+      N->getOpcode() == ISD::VECTOR_SHUFFLE &&
+      N->getOperand(0).getOpcode() == ISD::BITCAST &&
+      N->getOperand(0).getOperand(0).getOpcode() == X86ISD::PMULUDQ) {
+    SDValue BC = N->getOperand(0);
+    SDValue MULUDQ = BC.getOperand(0);
+    ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+    ArrayRef<int> Mask = SVOp->getMask();
+    if (BC.hasOneUse() && MULUDQ.hasOneUse() &&
+        Mask[0] == 0 && Mask[1] == 2 && Mask[2] == -1 && Mask[3] == -1) {
+      SDValue Op0 = MULUDQ.getOperand(0);
+      SDValue Op1 = MULUDQ.getOperand(1);
+      if (Op0.getOpcode() == ISD::BITCAST &&
+          Op0.getOperand(0).getOpcode() == ISD::VECTOR_SHUFFLE &&
+          Op0.getOperand(0).getValueType() == MVT::v4i32) {
+        ShuffleVectorSDNode *SVOp0 =
+          cast<ShuffleVectorSDNode>(Op0.getOperand(0));
+        ArrayRef<int> Mask2 = SVOp0->getMask();
+        if (Mask2[0] == 0 && Mask2[1] == -1 &&
+            Mask2[2] == 1 && Mask2[3] == -1) {
+          Op0 = SVOp0->getOperand(0);
+          Op1 = DAG.getBitcast(MVT::v4i32, Op1);
+          Op1 = DAG.getVectorShuffle(MVT::v4i32, dl, Op1, Op1, Mask);
+          return DAG.getNode(ISD::MUL, dl, MVT::v4i32, Op0, Op1);
+        }
+      }
+      if (Op1.getOpcode() == ISD::BITCAST &&
+          Op1.getOperand(0).getOpcode() == ISD::VECTOR_SHUFFLE &&
+          Op1.getOperand(0).getValueType() == MVT::v4i32) {
+        ShuffleVectorSDNode *SVOp1 =
+          cast<ShuffleVectorSDNode>(Op1.getOperand(0));
+        ArrayRef<int> Mask2 = SVOp1->getMask();
+        if (Mask2[0] == 0 && Mask2[1] == -1 &&
+            Mask2[2] == 1 && Mask2[3] == -1) {
+          Op0 = DAG.getBitcast(MVT::v4i32, Op0);
+          Op0 = DAG.getVectorShuffle(MVT::v4i32, dl, Op0, Op0, Mask);
+          Op1 = SVOp1->getOperand(0);
+          return DAG.getNode(ISD::MUL, dl, MVT::v4i32, Op0, Op1);
+        }
+      }
+    }
+  }
+
   return SDValue();
 }
 
@@ -32427,8 +32591,10 @@
 }
 
 bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
-    SDValue Op, const APInt &OriginalDemandedBits, KnownBits &Known,
-    TargetLoweringOpt &TLO, unsigned Depth) const {
+    SDValue Op, const APInt &OriginalDemandedBits,
+    const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
+    unsigned Depth) const {
+  EVT VT = Op.getValueType();
   unsigned BitWidth = OriginalDemandedBits.getBitWidth();
   unsigned Opc = Op.getOpcode();
   switch(Opc) {
@@ -32447,44 +32613,122 @@
     break;
   }
   case X86ISD::VSHLI: {
-    if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
+    SDValue Op0 = Op.getOperand(0);
+    SDValue Op1 = Op.getOperand(1);
+
+    if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) {
       if (ShiftImm->getAPIntValue().uge(BitWidth))
         break;
 
-      KnownBits KnownOp;
       unsigned ShAmt = ShiftImm->getZExtValue();
       APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
-      if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask, KnownOp, TLO,
-                               Depth + 1))
+
+      // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
+      // single shift.  We can do this if the bottom bits (which are shifted
+      // out) are never demanded.
+      if (Op0.getOpcode() == X86ISD::VSRLI &&
+          OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
+        if (auto *Shift2Imm = dyn_cast<ConstantSDNode>(Op0.getOperand(1))) {
+          if (Shift2Imm->getAPIntValue().ult(BitWidth)) {
+            int Diff = ShAmt - Shift2Imm->getZExtValue();
+            if (Diff == 0)
+              return TLO.CombineTo(Op, Op0.getOperand(0));
+
+            unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
+            SDValue NewShift = TLO.DAG.getNode(
+                NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
+                TLO.DAG.getConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
+            return TLO.CombineTo(Op, NewShift);
+          }
+        }
+      }
+
+      if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
+                               TLO, Depth + 1))
         return true;
+
+      assert(!Known.hasConflict() && "Bits known to be one AND zero?");
+      Known.Zero <<= ShAmt;
+      Known.One <<= ShAmt;
+
+      // Low bits known zero.
+      Known.Zero.setLowBits(ShAmt);
     }
     break;
   }
-  case X86ISD::VSRAI:
   case X86ISD::VSRLI: {
     if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
       if (ShiftImm->getAPIntValue().uge(BitWidth))
         break;
 
-      KnownBits KnownOp;
       unsigned ShAmt = ShiftImm->getZExtValue();
       APInt DemandedMask = OriginalDemandedBits << ShAmt;
 
+      if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
+                               OriginalDemandedElts, Known, TLO, Depth + 1))
+        return true;
+
+      assert(!Known.hasConflict() && "Bits known to be one AND zero?");
+      Known.Zero.lshrInPlace(ShAmt);
+      Known.One.lshrInPlace(ShAmt);
+
+      // High bits known zero.
+      Known.Zero.setHighBits(ShAmt);
+    }
+    break;
+  }
+  case X86ISD::VSRAI: {
+    SDValue Op0 = Op.getOperand(0);
+    SDValue Op1 = Op.getOperand(1);
+
+    if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) {
+      if (ShiftImm->getAPIntValue().uge(BitWidth))
+        break;
+
+      unsigned ShAmt = ShiftImm->getZExtValue();
+      APInt DemandedMask = OriginalDemandedBits << ShAmt;
+
+      // If we just want the sign bit then we don't need to shift it.
+      if (OriginalDemandedBits.isSignMask())
+        return TLO.CombineTo(Op, Op0);
+
+      // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
+      if (Op0.getOpcode() == X86ISD::VSHLI && Op1 == Op0.getOperand(1)) {
+        SDValue Op00 = Op0.getOperand(0);
+        unsigned NumSignBits =
+            TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
+        if (ShAmt < NumSignBits)
+          return TLO.CombineTo(Op, Op00);
+      }
+
       // If any of the demanded bits are produced by the sign extension, we also
       // demand the input sign bit.
-      if (Opc == X86ISD::VSRAI &&
-          OriginalDemandedBits.countLeadingZeros() < ShAmt)
+      if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
         DemandedMask.setSignBit();
 
-      if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask, KnownOp, TLO,
-                               Depth + 1))
+      if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
+                               TLO, Depth + 1))
         return true;
+
+      assert(!Known.hasConflict() && "Bits known to be one AND zero?");
+      Known.Zero.lshrInPlace(ShAmt);
+      Known.One.lshrInPlace(ShAmt);
+
+      // If the input sign bit is known to be zero, or if none of the top bits
+      // are demanded, turn this into an unsigned shift right.
+      if (Known.Zero[BitWidth - ShAmt - 1] ||
+          OriginalDemandedBits.countLeadingZeros() >= ShAmt)
+        return TLO.CombineTo(
+            Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
+
+      // High bits are known one.
+      if (Known.One[BitWidth - ShAmt - 1])
+        Known.One.setHighBits(ShAmt);
     }
     break;
   }
   case X86ISD::MOVMSK: {
     SDValue Src = Op.getOperand(0);
-    MVT VT = Op.getSimpleValueType();
     MVT SrcVT = Src.getSimpleValueType();
     unsigned SrcBits = SrcVT.getScalarSizeInBits();
     unsigned NumElts = SrcVT.getVectorNumElements();
@@ -32505,8 +32749,8 @@
 
     // MOVMSK only uses the MSB from each vector element.
     KnownBits KnownSrc;
-    if (SimplifyDemandedBits(Src, APInt::getSignMask(SrcBits), KnownSrc, TLO,
-                             Depth + 1))
+    if (SimplifyDemandedBits(Src, APInt::getSignMask(SrcBits), DemandedElts,
+                             KnownSrc, TLO, Depth + 1))
       return true;
 
     if (KnownSrc.One[SrcBits - 1])
@@ -32514,11 +32758,11 @@
     else if (KnownSrc.Zero[SrcBits - 1])
       Known.Zero.setLowBits(NumElts);
     return false;
-   }
+  }
   }
 
   return TargetLowering::SimplifyDemandedBitsForTargetNode(
-      Op, OriginalDemandedBits, Known, TLO, Depth);
+      Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
 }
 
 /// Check if a vector extract from a target-specific shuffle of a load can be
@@ -32639,9 +32883,18 @@
   if (!VT.isScalarInteger() || !VecVT.isSimple())
     return SDValue();
 
+  // If the input is a truncate from v16i8 or v32i8 go ahead and use a
+  // movmskb even with avx512. This will be better than truncating to vXi1 and
+  // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
+  // vpcmpeqb/vpcmpgtb.
+  bool IsTruncated = N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
+                     (N0.getOperand(0).getValueType() == MVT::v16i8 ||
+                      N0.getOperand(0).getValueType() == MVT::v32i8 ||
+                      N0.getOperand(0).getValueType() == MVT::v64i8);
+
   // With AVX512 vxi1 types are legal and we prefer using k-regs.
   // MOVMSK is supported in SSE2 or later.
-  if (Subtarget.hasAVX512() || !Subtarget.hasSSE2())
+  if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !IsTruncated))
     return SDValue();
 
   // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
@@ -32693,12 +32946,30 @@
   case MVT::v32i1:
     SExtVT = MVT::v32i8;
     break;
+  case MVT::v64i1:
+    // If we have AVX512F, but not AVX512BW and the input is truncated from
+    // v64i8 checked earlier. Then split the input and make two pmovmskbs.
+    if (Subtarget.hasAVX512() && !Subtarget.hasBWI()) {
+      SExtVT = MVT::v64i8;
+      break;
+    }
+    return SDValue();
   };
 
   SDLoc DL(BitCast);
   SDValue V = DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, N0);
 
-  if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8) {
+  if (SExtVT == MVT::v64i8) {
+    SDValue Lo, Hi;
+    std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
+    Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
+    Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
+    Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
+    Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
+    Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
+                     DAG.getConstant(32, DL, MVT::i8));
+    V = DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
+  } else if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8) {
     V = getPMOVMSKB(DL, V, DAG, Subtarget);
   } else {
     if (SExtVT == MVT::v8i16)
@@ -33399,6 +33670,15 @@
       scaleShuffleMask<int>(Scale, Mask, ScaledMask);
       Mask = std::move(ScaledMask);
     } else if ((Mask.size() % NumSrcElts) == 0) {
+      // Simplify Mask based on demanded element.
+      int ExtractIdx = (int)N->getConstantOperandVal(1);
+      int Scale = Mask.size() / NumSrcElts;
+      int Lo = Scale * ExtractIdx;
+      int Hi = Scale * (ExtractIdx + 1);
+      for (int i = 0, e = (int)Mask.size(); i != e; ++i)
+        if (i < Lo || Hi <= i)
+          Mask[i] = SM_SentinelUndef;
+
       SmallVector<int, 16> WidenedMask;
       while (Mask.size() > NumSrcElts &&
              canWidenShuffleElements(Mask, WidenedMask))
@@ -33693,11 +33973,14 @@
 /// If this is a *dynamic* select (non-constant condition) and we can match
 /// this node with one of the variable blend instructions, restructure the
 /// condition so that blends can use the high (sign) bit of each element.
-static SDValue combineVSelectToShrunkBlend(SDNode *N, SelectionDAG &DAG,
+/// This function will also call SimplfiyDemandedBits on already created
+/// BLENDV to perform additional simplifications.
+static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
                                            TargetLowering::DAGCombinerInfo &DCI,
                                            const X86Subtarget &Subtarget) {
   SDValue Cond = N->getOperand(0);
-  if (N->getOpcode() != ISD::VSELECT ||
+  if ((N->getOpcode() != ISD::VSELECT &&
+       N->getOpcode() != X86ISD::BLENDV) ||
       ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
     return SDValue();
 
@@ -33739,7 +34022,9 @@
   // TODO: Add other opcodes eventually lowered into BLEND.
   for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
        UI != UE; ++UI)
-    if (UI->getOpcode() != ISD::VSELECT || UI.getOperandNo() != 0)
+    if ((UI->getOpcode() != ISD::VSELECT &&
+         UI->getOpcode() != X86ISD::BLENDV) ||
+        UI.getOperandNo() != 0)
       return SDValue();
 
   APInt DemandedMask(APInt::getSignMask(BitWidth));
@@ -33755,9 +34040,13 @@
   // optimizations as we messed with the actual expectation for the vector
   // boolean values.
   for (SDNode *U : Cond->uses()) {
-    SDValue SB = DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(U), U->getValueType(0),
+    if (U->getOpcode() == X86ISD::BLENDV)
+      continue;
+
+    SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
                              Cond, U->getOperand(1), U->getOperand(2));
     DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
+    DCI.AddToWorklist(U);
   }
   DCI.CommitTargetLoweringOpt(TLO);
   return SDValue(N, 0);
@@ -33773,7 +34062,7 @@
   SDValue RHS = N->getOperand(2);
 
   // Try simplification again because we use this function to optimize
-  // SHRUNKBLEND nodes that are not handled by the generic combiner.
+  // BLENDV nodes that are not handled by the generic combiner.
   if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
     return V;
 
@@ -34037,33 +34326,27 @@
       SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
       SDValue CondRHS = Cond->getOperand(1);
 
-      auto SUBUSBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
-                             ArrayRef<SDValue> Ops) {
-        return DAG.getNode(X86ISD::SUBUS, DL, Ops[0].getValueType(), Ops);
-      };
-
       // Look for a general sub with unsigned saturation first.
       // x >= y ? x-y : 0 --> subus x, y
       // x >  y ? x-y : 0 --> subus x, y
       if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
           Other->getOpcode() == ISD::SUB && OpRHS == CondRHS)
-        return SplitOpsAndApply(DAG, Subtarget, DL, VT, { OpLHS, OpRHS },
-                                SUBUSBuilder);
+        return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
 
       if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
         if (isa<BuildVectorSDNode>(CondRHS)) {
           // If the RHS is a constant we have to reverse the const
           // canonicalization.
           // x > C-1 ? x+-C : 0 --> subus x, C
-          auto MatchSUBUS = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
+          // TODO: Handle build_vectors with undef elements.
+          auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
             return Cond->getAPIntValue() == (-Op->getAPIntValue() - 1);
           };
           if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
-              ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchSUBUS)) {
+              ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT)) {
             OpRHS = DAG.getNode(ISD::SUB, DL, VT,
                                 DAG.getConstant(0, DL, VT), OpRHS);
-            return SplitOpsAndApply(DAG, Subtarget, DL, VT, { OpLHS, OpRHS },
-                                    SUBUSBuilder);
+            return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
           }
 
           // Another special case: If C was a sign bit, the sub has been
@@ -34075,11 +34358,10 @@
             if (CC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
                 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
                 OpRHSConst->getAPIntValue().isSignMask()) {
-              OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
               // Note that we have to rebuild the RHS constant here to ensure we
               // don't rely on particular values of undef lanes.
-              return SplitOpsAndApply(DAG, Subtarget, DL, VT, { OpLHS, OpRHS },
-                                      SUBUSBuilder);
+              OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
+              return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
             }
           }
         }
@@ -34112,11 +34394,6 @@
     if (Other.getNode() && Other.getOpcode() == ISD::ADD) {
       SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
 
-      auto ADDUSBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
-                             ArrayRef<SDValue> Ops) {
-        return DAG.getNode(X86ISD::ADDUS, DL, Ops[0].getValueType(), Ops);
-      };
-
       // Canonicalize condition operands.
       if (CC == ISD::SETUGE) {
         std::swap(CondLHS, CondRHS);
@@ -34128,21 +34405,19 @@
       // x+y >= x ? x+y : ~0 --> addus x, y
       if (CC == ISD::SETULE && Other == CondRHS &&
           (OpLHS == CondLHS || OpRHS == CondLHS))
-        return SplitOpsAndApply(DAG, Subtarget, DL, VT, { OpLHS, OpRHS },
-                                ADDUSBuilder);
+        return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
 
       if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
           CondLHS == OpLHS) {
         // If the RHS is a constant we have to reverse the const
         // canonicalization.
         // x > ~C ? x+C : ~0 --> addus x, C
-        auto MatchADDUS = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
+        auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
           return Cond->getAPIntValue() == ~Op->getAPIntValue();
         };
         if (CC == ISD::SETULE &&
-            ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchADDUS))
-          return SplitOpsAndApply(DAG, Subtarget, DL, VT, { OpLHS, OpRHS },
-                                  ADDUSBuilder);
+            ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
+          return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
       }
     }
   }
@@ -34154,7 +34429,7 @@
   if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
     return V;
 
-  if (SDValue V = combineVSelectToShrunkBlend(N, DAG, DCI, Subtarget))
+  if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
     return V;
 
   // Custom action for SELECT MMX
@@ -34236,16 +34511,7 @@
         /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
         /*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()),
         AN->getMemOperand());
-    // If the comparision uses the CF flag we can't use INC/DEC instructions.
-    bool NeedCF = false;
-    switch (CC) {
-    default: break;
-    case X86::COND_A: case X86::COND_AE:
-    case X86::COND_B: case X86::COND_BE:
-      NeedCF = true;
-      break;
-    }
-    auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget, !NeedCF);
+    auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
     DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
                                   DAG.getUNDEF(CmpLHS.getValueType()));
     DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
@@ -34807,86 +35073,85 @@
     // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
     // lower part is needed.
     SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
-    if (Mode == MULU8 || Mode == MULS8) {
+    if (Mode == MULU8 || Mode == MULS8)
       return DAG.getNode((Mode == MULU8) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND,
                          DL, VT, MulLo);
-    } else {
-      MVT ResVT = MVT::getVectorVT(MVT::i32, NumElts / 2);
-      // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
-      // the higher part is also needed.
-      SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
-                                  ReducedVT, NewN0, NewN1);
 
-      // Repack the lower part and higher part result of mul into a wider
-      // result.
-      // Generate shuffle functioning as punpcklwd.
-      SmallVector<int, 16> ShuffleMask(NumElts);
-      for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
-        ShuffleMask[2 * i] = i;
-        ShuffleMask[2 * i + 1] = i + NumElts;
-      }
-      SDValue ResLo =
-          DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
-      ResLo = DAG.getBitcast(ResVT, ResLo);
-      // Generate shuffle functioning as punpckhwd.
-      for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
-        ShuffleMask[2 * i] = i + NumElts / 2;
-        ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
-      }
-      SDValue ResHi =
-          DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
-      ResHi = DAG.getBitcast(ResVT, ResHi);
-      return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
+    MVT ResVT = MVT::getVectorVT(MVT::i32, NumElts / 2);
+    // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
+    // the higher part is also needed.
+    SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
+                                ReducedVT, NewN0, NewN1);
+
+    // Repack the lower part and higher part result of mul into a wider
+    // result.
+    // Generate shuffle functioning as punpcklwd.
+    SmallVector<int, 16> ShuffleMask(NumElts);
+    for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
+      ShuffleMask[2 * i] = i;
+      ShuffleMask[2 * i + 1] = i + NumElts;
     }
-  } else {
-    // When VT.getVectorNumElements() < OpsVT.getVectorNumElements(), we want
-    // to legalize the mul explicitly because implicit legalization for type
-    // <4 x i16> to <4 x i32> sometimes involves unnecessary unpack
-    // instructions which will not exist when we explicitly legalize it by
-    // extending <4 x i16> to <8 x i16> (concatenating the <4 x i16> val with
-    // <4 x i16> undef).
-    //
-    // Legalize the operands of mul.
-    // FIXME: We may be able to handle non-concatenated vectors by insertion.
-    unsigned ReducedSizeInBits = ReducedVT.getSizeInBits();
-    if ((RegSize % ReducedSizeInBits) != 0)
-      return SDValue();
-
-    SmallVector<SDValue, 16> Ops(RegSize / ReducedSizeInBits,
-                                 DAG.getUNDEF(ReducedVT));
-    Ops[0] = NewN0;
-    NewN0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
-    Ops[0] = NewN1;
-    NewN1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
-
-    if (Mode == MULU8 || Mode == MULS8) {
-      // Generate lower part of mul: pmullw. For MULU8/MULS8, only the lower
-      // part is needed.
-      SDValue Mul = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
-
-      // convert the type of mul result to VT.
-      MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
-      SDValue Res = DAG.getNode(Mode == MULU8 ? ISD::ZERO_EXTEND_VECTOR_INREG
-                                              : ISD::SIGN_EXTEND_VECTOR_INREG,
-                                DL, ResVT, Mul);
-      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
-                         DAG.getIntPtrConstant(0, DL));
-    } else {
-      // Generate the lower and higher part of mul: pmulhw/pmulhuw. For
-      // MULU16/MULS16, both parts are needed.
-      SDValue MulLo = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
-      SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
-                                  OpsVT, NewN0, NewN1);
-
-      // Repack the lower part and higher part result of mul into a wider
-      // result. Make sure the type of mul result is VT.
-      MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
-      SDValue Res = getUnpackl(DAG, DL, OpsVT, MulLo, MulHi);
-      Res = DAG.getBitcast(ResVT, Res);
-      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
-                         DAG.getIntPtrConstant(0, DL));
+    SDValue ResLo =
+        DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
+    ResLo = DAG.getBitcast(ResVT, ResLo);
+    // Generate shuffle functioning as punpckhwd.
+    for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
+      ShuffleMask[2 * i] = i + NumElts / 2;
+      ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
     }
+    SDValue ResHi =
+        DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
+    ResHi = DAG.getBitcast(ResVT, ResHi);
+    return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
   }
+
+  // When VT.getVectorNumElements() < OpsVT.getVectorNumElements(), we want
+  // to legalize the mul explicitly because implicit legalization for type
+  // <4 x i16> to <4 x i32> sometimes involves unnecessary unpack
+  // instructions which will not exist when we explicitly legalize it by
+  // extending <4 x i16> to <8 x i16> (concatenating the <4 x i16> val with
+  // <4 x i16> undef).
+  //
+  // Legalize the operands of mul.
+  // FIXME: We may be able to handle non-concatenated vectors by insertion.
+  unsigned ReducedSizeInBits = ReducedVT.getSizeInBits();
+  if ((RegSize % ReducedSizeInBits) != 0)
+    return SDValue();
+
+  SmallVector<SDValue, 16> Ops(RegSize / ReducedSizeInBits,
+                               DAG.getUNDEF(ReducedVT));
+  Ops[0] = NewN0;
+  NewN0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
+  Ops[0] = NewN1;
+  NewN1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
+
+  if (Mode == MULU8 || Mode == MULS8) {
+    // Generate lower part of mul: pmullw. For MULU8/MULS8, only the lower
+    // part is needed.
+    SDValue Mul = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
+
+    // convert the type of mul result to VT.
+    MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
+    SDValue Res = DAG.getNode(Mode == MULU8 ? ISD::ZERO_EXTEND_VECTOR_INREG
+                                            : ISD::SIGN_EXTEND_VECTOR_INREG,
+                              DL, ResVT, Mul);
+    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
+                       DAG.getIntPtrConstant(0, DL));
+  }
+
+  // Generate the lower and higher part of mul: pmulhw/pmulhuw. For
+  // MULU16/MULS16, both parts are needed.
+  SDValue MulLo = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
+  SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
+                              OpsVT, NewN0, NewN1);
+
+  // Repack the lower part and higher part result of mul into a wider
+  // result. Make sure the type of mul result is VT.
+  MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
+  SDValue Res = getUnpackl(DAG, DL, OpsVT, MulLo, MulHi);
+  Res = DAG.getBitcast(ResVT, Res);
+  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
+                     DAG.getIntPtrConstant(0, DL));
 }
 
 static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
@@ -35035,7 +35300,8 @@
 
   // Only support vXi64 vectors.
   if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
-      !DAG.getTargetLoweringInfo().isTypeLegal(VT))
+      VT.getVectorNumElements() < 2 ||
+      !isPowerOf2_32(VT.getVectorNumElements()))
     return SDValue();
 
   SDValue N0 = N->getOperand(0);
@@ -35074,26 +35340,6 @@
                           const X86Subtarget &Subtarget) {
   EVT VT = N->getValueType(0);
 
-  // Look for multiply of 2 identical shuffles with a zero vector. Shuffle the
-  // result and insert the zero there instead. This can occur due to
-  // type legalization of v2i32 multiply to a PMULUDQ pattern.
-  SDValue LHS = N->getOperand(0);
-  SDValue RHS = N->getOperand(1);
-  if (!DCI.isBeforeLegalize() && isa<ShuffleVectorSDNode>(LHS) &&
-      isa<ShuffleVectorSDNode>(RHS) && LHS.hasOneUse() && RHS.hasOneUse() &&
-      LHS.getOperand(1) == RHS.getOperand(1) &&
-      ISD::isBuildVectorAllZeros(LHS.getOperand(1).getNode())) {
-    ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(LHS);
-    ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(RHS);
-    if (SVN0->getMask().equals(SVN1->getMask())) {
-      SDLoc dl(N);
-      SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, LHS.getOperand(0),
-                                RHS.getOperand(0));
-      return DAG.getVectorShuffle(VT, dl, Mul, DAG.getConstant(0, dl, VT),
-                                  SVN0->getMask());
-    }
-  }
-
   if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
     return V;
 
@@ -35569,22 +35815,6 @@
   if (ISD::isBuildVectorAllZeros(N0.getNode()))
     return DAG.getConstant(0, SDLoc(N), VT);
 
-  // fold (VSRLI (VSRAI X, Y), 31) -> (VSRLI X, 31).
-  // This VSRLI only looks at the sign bit, which is unmodified by VSRAI.
-  // TODO - support other sra opcodes as needed.
-  if (Opcode == X86ISD::VSRLI && (ShiftVal + 1) == NumBitsPerElt &&
-      N0.getOpcode() == X86ISD::VSRAI)
-    return DAG.getNode(X86ISD::VSRLI, SDLoc(N), VT, N0.getOperand(0), N1);
-
-  // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
-  if (Opcode == X86ISD::VSRAI && N0.getOpcode() == X86ISD::VSHLI &&
-      N1 == N0.getOperand(1)) {
-    SDValue N00 = N0.getOperand(0);
-    unsigned NumSignBits = DAG.ComputeNumSignBits(N00);
-    if (ShiftVal < NumSignBits)
-      return N00;
-  }
-
   // Fold (VSRAI (VSRAI X, C1), C2) --> (VSRAI X, (C1 + C2)) with (C1 + C2)
   // clamped to (NumBitsPerElt - 1).
   if (Opcode == X86ISD::VSRAI && N0.getOpcode() == X86ISD::VSRAI) {
@@ -36489,6 +36719,7 @@
 
   // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
   bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
+  unsigned Bits = VT.getScalarSizeInBits();
 
   // SHLD/SHRD instructions have lower register pressure, but on some
   // platforms they have higher latency than the equivalent
@@ -36511,6 +36742,23 @@
   SDValue ShAmt1 = N1.getOperand(1);
   if (ShAmt1.getValueType() != MVT::i8)
     return SDValue();
+
+  // Peek through any modulo shift masks.
+  SDValue ShMsk0;
+  if (ShAmt0.getOpcode() == ISD::AND &&
+      isa<ConstantSDNode>(ShAmt0.getOperand(1)) &&
+      ShAmt0.getConstantOperandVal(1) == (Bits - 1)) {
+    ShMsk0 = ShAmt0;
+    ShAmt0 = ShAmt0.getOperand(0);
+  }
+  SDValue ShMsk1;
+  if (ShAmt1.getOpcode() == ISD::AND &&
+      isa<ConstantSDNode>(ShAmt1.getOperand(1)) &&
+      ShAmt1.getConstantOperandVal(1) == (Bits - 1)) {
+    ShMsk1 = ShAmt1;
+    ShAmt1 = ShAmt1.getOperand(0);
+  }
+
   if (ShAmt0.getOpcode() == ISD::TRUNCATE)
     ShAmt0 = ShAmt0.getOperand(0);
   if (ShAmt1.getOpcode() == ISD::TRUNCATE)
@@ -36525,27 +36773,29 @@
     Opc = X86ISD::SHRD;
     std::swap(Op0, Op1);
     std::swap(ShAmt0, ShAmt1);
+    std::swap(ShMsk0, ShMsk1);
   }
 
   // OR( SHL( X, C ), SRL( Y, 32 - C ) ) -> SHLD( X, Y, C )
   // OR( SRL( X, C ), SHL( Y, 32 - C ) ) -> SHRD( X, Y, C )
   // OR( SHL( X, C ), SRL( SRL( Y, 1 ), XOR( C, 31 ) ) ) -> SHLD( X, Y, C )
   // OR( SRL( X, C ), SHL( SHL( Y, 1 ), XOR( C, 31 ) ) ) -> SHRD( X, Y, C )
-  unsigned Bits = VT.getSizeInBits();
+  // OR( SHL( X, AND( C, 31 ) ), SRL( Y, AND( 0 - C, 31 ) ) ) -> SHLD( X, Y, C )
+  // OR( SRL( X, AND( C, 31 ) ), SHL( Y, AND( 0 - C, 31 ) ) ) -> SHRD( X, Y, C )
   if (ShAmt1.getOpcode() == ISD::SUB) {
     SDValue Sum = ShAmt1.getOperand(0);
-    if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
+    if (auto *SumC = dyn_cast<ConstantSDNode>(Sum)) {
       SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
       if (ShAmt1Op1.getOpcode() == ISD::TRUNCATE)
         ShAmt1Op1 = ShAmt1Op1.getOperand(0);
-      if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
-        return DAG.getNode(Opc, DL, VT,
-                           Op0, Op1,
-                           DAG.getNode(ISD::TRUNCATE, DL,
-                                       MVT::i8, ShAmt0));
+      if ((SumC->getAPIntValue() == Bits ||
+           (SumC->getAPIntValue() == 0 && ShMsk1)) &&
+          ShAmt1Op1 == ShAmt0)
+        return DAG.getNode(Opc, DL, VT, Op0, Op1,
+                           DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ShAmt0));
     }
-  } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
-    ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
+  } else if (auto *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
+    auto *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
     if (ShAmt0C && (ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue()) == Bits)
       return DAG.getNode(Opc, DL, VT,
                          N0.getOperand(0), N1.getOperand(0),
@@ -36553,12 +36803,13 @@
                                        MVT::i8, ShAmt0));
   } else if (ShAmt1.getOpcode() == ISD::XOR) {
     SDValue Mask = ShAmt1.getOperand(1);
-    if (ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask)) {
+    if (auto *MaskC = dyn_cast<ConstantSDNode>(Mask)) {
       unsigned InnerShift = (X86ISD::SHLD == Opc ? ISD::SRL : ISD::SHL);
       SDValue ShAmt1Op0 = ShAmt1.getOperand(0);
       if (ShAmt1Op0.getOpcode() == ISD::TRUNCATE)
         ShAmt1Op0 = ShAmt1Op0.getOperand(0);
-      if (MaskC->getSExtValue() == (Bits - 1) && ShAmt1Op0 == ShAmt0) {
+      if (MaskC->getSExtValue() == (Bits - 1) &&
+          (ShAmt1Op0 == ShAmt0 || ShAmt1Op0 == ShMsk0)) {
         if (Op1.getOpcode() == InnerShift &&
             isa<ConstantSDNode>(Op1.getOperand(1)) &&
             Op1.getConstantOperandVal(1) == 1) {
@@ -36569,7 +36820,7 @@
         if (InnerShift == ISD::SHL && Op1.getOpcode() == ISD::ADD &&
             Op1.getOperand(0) == Op1.getOperand(1)) {
           return DAG.getNode(Opc, DL, VT, Op0, Op1.getOperand(0),
-                     DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ShAmt0));
+                             DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ShAmt0));
         }
       }
     }
@@ -36840,6 +37091,7 @@
       return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
   }
   if (VT.isVector() && isPowerOf2_32(VT.getVectorNumElements()) &&
+      !Subtarget.hasAVX512() &&
       (SVT == MVT::i8 || SVT == MVT::i16) &&
       (InSVT == MVT::i16 || InSVT == MVT::i32)) {
     if (auto USatVal = detectSSatPattern(In, VT, true)) {
@@ -37823,16 +38075,6 @@
   return true;
 }
 
-/// Horizontal vector math instructions may be slower than normal math with
-/// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
-/// implementation, and likely shuffle complexity of the alternate sequence.
-static bool shouldCombineToHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
-                                        const X86Subtarget &Subtarget) {
-  bool IsOptimizingSize = DAG.getMachineFunction().getFunction().optForSize();
-  bool HasFastHOps = Subtarget.hasFastHorizontalOps();
-  return !IsSingleSource || IsOptimizingSize || HasFastHOps;
-}
-
 /// Do target-specific dag combines on floating-point adds/subs.
 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
                                const X86Subtarget &Subtarget) {
@@ -37840,16 +38082,16 @@
   SDValue LHS = N->getOperand(0);
   SDValue RHS = N->getOperand(1);
   bool IsFadd = N->getOpcode() == ISD::FADD;
+  auto HorizOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
   assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
 
   // Try to synthesize horizontal add/sub from adds/subs of shuffles.
   if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
        (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
       isHorizontalBinOp(LHS, RHS, IsFadd) &&
-      shouldCombineToHorizontalOp(LHS == RHS, DAG, Subtarget)) {
-    auto NewOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
-    return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS);
-  }
+      shouldUseHorizontalOp(LHS == RHS, DAG, Subtarget))
+    return DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
+
   return SDValue();
 }
 
@@ -38076,8 +38318,7 @@
 
   // Use PACKUS if the input has zero-bits that extend all the way to the
   // packed/truncated value. e.g. masks, zext_in_reg, etc.
-  KnownBits Known;
-  DAG.computeKnownBits(In, Known);
+  KnownBits Known = DAG.computeKnownBits(In);
   unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
   if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
     return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
@@ -39943,6 +40184,159 @@
   return SDValue();
 }
 
+static bool needCarryOrOverflowFlag(SDValue Flags) {
+  assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
+
+  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
+         UI != UE; ++UI) {
+    SDNode *User = *UI;
+
+    X86::CondCode CC;
+    switch (User->getOpcode()) {
+    default:
+      // Be conservative.
+      return true;
+    case X86ISD::SETCC:
+    case X86ISD::SETCC_CARRY:
+      CC = (X86::CondCode)User->getConstantOperandVal(0);
+      break;
+    case X86ISD::BRCOND:
+      CC = (X86::CondCode)User->getConstantOperandVal(2);
+      break;
+    case X86ISD::CMOV:
+      CC = (X86::CondCode)User->getConstantOperandVal(2);
+      break;
+    }
+
+    switch (CC) {
+    default: break;
+    case X86::COND_A: case X86::COND_AE:
+    case X86::COND_B: case X86::COND_BE:
+    case X86::COND_O: case X86::COND_NO:
+    case X86::COND_G: case X86::COND_GE:
+    case X86::COND_L: case X86::COND_LE:
+      return true;
+    }
+  }
+
+  return false;
+}
+
+static bool onlyZeroFlagUsed(SDValue Flags) {
+  assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
+
+  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
+         UI != UE; ++UI) {
+    SDNode *User = *UI;
+
+    unsigned CCOpNo;
+    switch (User->getOpcode()) {
+    default:
+      // Be conservative.
+      return false;
+    case X86ISD::SETCC:       CCOpNo = 0; break;
+    case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
+    case X86ISD::BRCOND:      CCOpNo = 2; break;
+    case X86ISD::CMOV:        CCOpNo = 2; break;
+    }
+
+    X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
+    if (CC != X86::COND_E && CC != X86::COND_NE)
+      return false;
+  }
+
+  return true;
+}
+
+static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
+  // Only handle test patterns.
+  if (!isNullConstant(N->getOperand(1)))
+    return SDValue();
+
+  // If we have a CMP of a truncated binop, see if we can make a smaller binop
+  // and use its flags directly.
+  // TODO: Maybe we should try promoting compares that only use the zero flag
+  // first if we can prove the upper bits with computeKnownBits?
+  SDLoc dl(N);
+  SDValue Op = N->getOperand(0);
+  EVT VT = Op.getValueType();
+
+  // If we have a constant logical shift that's only used in a comparison
+  // against zero turn it into an equivalent AND. This allows turning it into
+  // a TEST instruction later.
+  if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
+      Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
+      onlyZeroFlagUsed(SDValue(N, 0))) {
+    EVT VT = Op.getValueType();
+    unsigned BitWidth = VT.getSizeInBits();
+    unsigned ShAmt = Op.getConstantOperandVal(1);
+    if (ShAmt < BitWidth) { // Avoid undefined shifts.
+      APInt Mask = Op.getOpcode() == ISD::SRL
+                       ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
+                       : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
+      if (Mask.isSignedIntN(32)) {
+        Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
+                         DAG.getConstant(Mask, dl, VT));
+        return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
+                           DAG.getConstant(0, dl, VT));
+      }
+    }
+  }
+
+
+  // Look for a truncate with a single use.
+  if (Op.getOpcode() != ISD::TRUNCATE || !Op.hasOneUse())
+    return SDValue();
+
+  Op = Op.getOperand(0);
+
+  // Arithmetic op can only have one use.
+  if (!Op.hasOneUse())
+    return SDValue();
+
+  unsigned NewOpc;
+  switch (Op.getOpcode()) {
+  default: return SDValue();
+  case ISD::AND:
+    // Skip and with constant. We have special handling for and with immediate
+    // during isel to generate test instructions.
+    if (isa<ConstantSDNode>(Op.getOperand(1)))
+      return SDValue();
+    NewOpc = X86ISD::AND;
+    break;
+  case ISD::OR:  NewOpc = X86ISD::OR;  break;
+  case ISD::XOR: NewOpc = X86ISD::XOR; break;
+  case ISD::ADD:
+    // If the carry or overflow flag is used, we can't truncate.
+    if (needCarryOrOverflowFlag(SDValue(N, 0)))
+      return SDValue();
+    NewOpc = X86ISD::ADD;
+    break;
+  case ISD::SUB:
+    // If the carry or overflow flag is used, we can't truncate.
+    if (needCarryOrOverflowFlag(SDValue(N, 0)))
+      return SDValue();
+    NewOpc = X86ISD::SUB;
+    break;
+  }
+
+  // We found an op we can narrow. Truncate its inputs.
+  SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
+  SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
+
+  // Use a X86 specific opcode to avoid DAG combine messing with it.
+  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
+  Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
+
+  // For AND, keep a CMP so that we can match the test pattern.
+  if (NewOpc == X86ISD::AND)
+    return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
+                       DAG.getConstant(0, dl, VT));
+
+  // Return the flags.
+  return Op.getValue(1);
+}
+
 static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
   if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
     MVT VT = N->getSimpleValueType(0);
@@ -40421,6 +40815,39 @@
                           PMADDBuilder);
 }
 
+// Try to turn (add (umax X, C), -C) into (psubus X, C)
+static SDValue combineAddToSUBUS(SDNode *N, SelectionDAG &DAG,
+                                 const X86Subtarget &Subtarget) {
+  if (!Subtarget.hasSSE2())
+    return SDValue();
+
+  EVT VT = N->getValueType(0);
+
+  // psubus is available in SSE2 for i8 and i16 vectors.
+  if (!VT.isVector() || VT.getVectorNumElements() < 2 ||
+      !isPowerOf2_32(VT.getVectorNumElements()) ||
+      !(VT.getVectorElementType() == MVT::i8 ||
+        VT.getVectorElementType() == MVT::i16))
+    return SDValue();
+
+  SDValue Op0 = N->getOperand(0);
+  SDValue Op1 = N->getOperand(1);
+  if (Op0.getOpcode() != ISD::UMAX)
+    return SDValue();
+
+  // The add should have a constant that is the negative of the max.
+  // TODO: Handle build_vectors with undef elements.
+  auto MatchUSUBSAT = [](ConstantSDNode *Max, ConstantSDNode *Op) {
+    return Max->getAPIntValue() == (-Op->getAPIntValue());
+  };
+  if (!ISD::matchBinaryPredicate(Op0.getOperand(1), Op1, MatchUSUBSAT))
+    return SDValue();
+
+  SDLoc DL(N);
+  return DAG.getNode(ISD::USUBSAT, DL, VT, Op0.getOperand(0),
+                     Op0.getOperand(1));
+}
+
 // Attempt to turn this pattern into PMADDWD.
 // (mul (add (zext (build_vector)), (zext (build_vector))),
 //      (add (zext (build_vector)), (zext (build_vector)))
@@ -40564,7 +40991,7 @@
   if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
        VT == MVT::v8i32) &&
       Subtarget.hasSSSE3() && isHorizontalBinOp(Op0, Op1, true) &&
-      shouldCombineToHorizontalOp(Op0 == Op1, DAG, Subtarget)) {
+      shouldUseHorizontalOp(Op0 == Op1, DAG, Subtarget)) {
     auto HADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
                           ArrayRef<SDValue> Ops) {
       return DAG.getNode(X86ISD::HADD, DL, Ops[0].getValueType(), Ops);
@@ -40576,6 +41003,9 @@
   if (SDValue V = combineIncDecVector(N, DAG))
     return V;
 
+  if (SDValue V = combineAddToSUBUS(N, DAG, Subtarget))
+    return V;
+
   return combineAddOrSubToADCOrSBB(N, DAG);
 }
 
@@ -40621,23 +41051,22 @@
   } else
     return SDValue();
 
-  auto SUBUSBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
-                         ArrayRef<SDValue> Ops) {
-    return DAG.getNode(X86ISD::SUBUS, DL, Ops[0].getValueType(), Ops);
+  auto USUBSATBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
+                           ArrayRef<SDValue> Ops) {
+    return DAG.getNode(ISD::USUBSAT, DL, Ops[0].getValueType(), Ops);
   };
 
   // PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with
   // special preprocessing in some cases.
   if (VT != MVT::v8i32 && VT != MVT::v16i32 && VT != MVT::v8i64)
     return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
-                            { SubusLHS, SubusRHS }, SUBUSBuilder);
+                            { SubusLHS, SubusRHS }, USUBSATBuilder);
 
   // Special preprocessing case can be only applied
   // if the value was zero extended from 16 bit,
   // so we require first 16 bits to be zeros for 32 bit
   // values, or first 48 bits for 64 bit values.
-  KnownBits Known;
-  DAG.computeKnownBits(SubusLHS, Known);
+  KnownBits Known = DAG.computeKnownBits(SubusLHS);
   unsigned NumZeros = Known.countMinLeadingZeros();
   if ((VT == MVT::v8i64 && NumZeros < 48) || NumZeros < 16)
     return SDValue();
@@ -40662,7 +41091,7 @@
   SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType);
   SDValue Psubus =
       SplitOpsAndApply(DAG, Subtarget, SDLoc(N), ShrinkedType,
-                       { NewSubusLHS, NewSubusRHS }, SUBUSBuilder);
+                       { NewSubusLHS, NewSubusRHS }, USUBSATBuilder);
   // Zero extend the result, it may be used somewhere as 32 bit,
   // if not zext and following trunc will shrink.
   return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType);
@@ -40696,7 +41125,7 @@
   if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
        VT == MVT::v8i32) &&
       Subtarget.hasSSSE3() && isHorizontalBinOp(Op0, Op1, false) &&
-      shouldCombineToHorizontalOp(Op0 == Op1, DAG, Subtarget)) {
+      shouldUseHorizontalOp(Op0 == Op1, DAG, Subtarget)) {
     auto HSUBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
                           ArrayRef<SDValue> Ops) {
       return DAG.getNode(X86ISD::HSUB, DL, Ops[0].getValueType(), Ops);
@@ -41010,6 +41439,15 @@
         return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), MVT::v1i1,
                            Src.getOperand(0));
 
+  // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
+  if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+      Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
+      Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
+    if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
+      if (C->isNullValue())
+        return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
+                           Src.getOperand(0), Src.getOperand(1));
+
   return SDValue();
 }
 
@@ -41028,6 +41466,15 @@
   if (ISD::isBuildVectorAllZeros(RHS.getNode()))
     return RHS;
 
+  // Aggressively peek through ops to get at the demanded low bits.
+  APInt DemandedMask = APInt::getLowBitsSet(64, 32);
+  SDValue DemandedLHS = DAG.GetDemandedBits(LHS, DemandedMask);
+  SDValue DemandedRHS = DAG.GetDemandedBits(RHS, DemandedMask);
+  if (DemandedLHS || DemandedRHS)
+    return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
+                       DemandedLHS ? DemandedLHS : LHS,
+                       DemandedRHS ? DemandedRHS : RHS);
+
   // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
   if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnesValue(64), DCI))
@@ -41053,9 +41500,10 @@
     return combineExtractSubvector(N, DAG, DCI, Subtarget);
   case ISD::VSELECT:
   case ISD::SELECT:
-  case X86ISD::SHRUNKBLEND: return combineSelect(N, DAG, DCI, Subtarget);
+  case X86ISD::BLENDV:      return combineSelect(N, DAG, DCI, Subtarget);
   case ISD::BITCAST:        return combineBitcast(N, DAG, DCI, Subtarget);
   case X86ISD::CMOV:        return combineCMov(N, DAG, DCI, Subtarget);
+  case X86ISD::CMP:         return combineCMP(N, DAG);
   case ISD::ADD:            return combineAdd(N, DAG, Subtarget);
   case ISD::SUB:            return combineSub(N, DAG, Subtarget);
   case X86ISD::SBB:         return combineSBB(N, DAG);
@@ -42037,14 +42485,17 @@
   if (!Res.second) {
     // Map st(0) -> st(7) -> ST0
     if (Constraint.size() == 7 && Constraint[0] == '{' &&
-        tolower(Constraint[1]) == 's' &&
-        tolower(Constraint[2]) == 't' &&
+        tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
         Constraint[3] == '(' &&
         (Constraint[4] >= '0' && Constraint[4] <= '7') &&
-        Constraint[5] == ')' &&
-        Constraint[6] == '}')
+        Constraint[5] == ')' && Constraint[6] == '}') {
+      // st(7) is not allocatable and thus not a member of RFP80. Return
+      // singleton class in cases where we have a reference to it.
+      if (Constraint[4] == '7')
+        return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
       return std::make_pair(X86::FP0 + Constraint[4] - '0',
                             &X86::RFP80RegClass);
+    }
 
     // GCC allows "st(0)" to be called just plain "st".
     if (StringRef("{st}").equals_lower(Constraint))
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 3a7078a..910acd8 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -98,7 +98,7 @@
       SETCC,
 
       /// X86 Select
-      SELECT, SELECTS,
+      SELECTS,
 
       // Same as SETCC except it's materialized with a sbb and the value is all
       // one's or all zero's.
@@ -203,8 +203,9 @@
 
       /// Dynamic (non-constant condition) vector blend where only the sign bits
       /// of the condition elements are used. This is used to enforce that the
-      /// condition mask is not valid for generic VSELECT optimizations.
-      SHRUNKBLEND,
+      /// condition mask is not valid for generic VSELECT optimizations. This
+      /// can also be used to implement the intrinsics.
+      BLENDV,
 
       /// Combined add and sub on an FP vector.
       ADDSUB,
@@ -226,14 +227,6 @@
       SCALEF,
       SCALEFS,
 
-      // Integer add/sub with unsigned saturation.
-      ADDUS,
-      SUBUS,
-
-      // Integer add/sub with signed saturation.
-      ADDS,
-      SUBS,
-
       // Unsigned Integer average.
       AVG,
 
@@ -300,12 +293,22 @@
       // Vector integer truncate with unsigned/signed saturation.
       VTRUNCUS, VTRUNCS,
 
+      // Masked version of the above. Used when less than a 128-bit result is
+      // produced since the mask only applies to the lower elements and can't
+      // be represented by a select.
+      // SRC, PASSTHRU, MASK
+      VMTRUNC, VMTRUNCUS, VMTRUNCS,
+
       // Vector FP extend.
       VFPEXT, VFPEXT_RND, VFPEXTS_RND,
 
       // Vector FP round.
       VFPROUND, VFPROUND_RND, VFPROUNDS_RND,
 
+      // Masked version of above. Used for v2f64->v4f32.
+      // SRC, PASSTHRU, MASK
+      VMFPROUND,
+
       // 128-bit vector logical left / right shift
       VSHLDQ, VSRLDQ,
 
@@ -344,8 +347,8 @@
       CMPM_RND,
 
       // Arithmetic operations with FLAGS results.
-      ADD, SUB, ADC, SBB, SMUL,
-      INC, DEC, OR, XOR, AND,
+      ADD, SUB, ADC, SBB, SMUL, UMUL,
+      OR, XOR, AND,
 
       // Bit field extract.
       BEXTR,
@@ -353,12 +356,6 @@
       // Zero High Bits Starting with Specified Bit Position.
       BZHI,
 
-      // LOW, HI, FLAGS = umul LHS, RHS.
-      UMUL,
-
-      // 8-bit SMUL/UMUL - AX, FLAGS = smul8/umul8 AL, RHS.
-      SMUL8, UMUL8,
-
       // X86-specific multiply by immediate.
       MUL_IMM,
 
@@ -517,6 +514,10 @@
       // Vector signed/unsigned integer to float/double.
       CVTSI2P, CVTUI2P,
 
+      // Masked versions of above. Used for v2f64->v4f32.
+      // SRC, PASSTHRU, MASK
+      MCVTP2SI, MCVTP2UI, MCVTTP2SI, MCVTTP2UI,
+
       // Save xmm argument registers to the stack, according to %al. An operator
       // is needed so that this can be expanded with control flow.
       VASTART_SAVE_XMM_REGS,
@@ -564,6 +565,10 @@
       // Conversions between float and half-float.
       CVTPS2PH, CVTPH2PS, CVTPH2PS_RND,
 
+      // Masked version of above.
+      // SRC, RND, PASSTHRU, MASK
+      MCVTPS2PH,
+
       // Galois Field Arithmetic Instructions
       GF2P8AFFINEINVQB, GF2P8AFFINEQB, GF2P8MULB,
 
@@ -582,7 +587,7 @@
 
       /// LOCK-prefixed arithmetic read-modify-write instructions.
       /// EFLAGS, OUTCHAIN = LADD(INCHAIN, PTR, RHS)
-      LADD, LSUB, LOR, LXOR, LAND, LINC, LDEC,
+      LADD, LSUB, LOR, LXOR, LAND,
 
       // Load, scalar_to_vector, and zero extend.
       VZEXT_LOAD,
@@ -871,6 +876,7 @@
 
     bool SimplifyDemandedBitsForTargetNode(SDValue Op,
                                            const APInt &DemandedBits,
+                                           const APInt &DemandedElts,
                                            KnownBits &Known,
                                            TargetLoweringOpt &TLO,
                                            unsigned Depth) const override;
@@ -1052,6 +1058,11 @@
     bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
                                  unsigned Index) const override;
 
+    /// Scalar ops always have equal or better analysis/performance/power than
+    /// the vector equivalent, so this always makes sense if the scalar op is
+    /// supported.
+    bool shouldScalarizeBinop(SDValue) const override;
+
     bool storeOfVectorConstantIsCheap(EVT MemVT, unsigned NumElem,
                                       unsigned AddrSpace) const override {
       // If we can replace more than 2 scalar stores, there will be a reduction
@@ -1370,6 +1381,13 @@
     /// Convert a comparison if required by the subtarget.
     SDValue ConvertCmpIfNecessary(SDValue Cmp, SelectionDAG &DAG) const;
 
+    /// Emit flags for the given setcc condition and operands. Also returns the
+    /// corresponding X86 condition code constant in X86CC.
+    SDValue emitFlagsForSetcc(SDValue Op0, SDValue Op1,
+                              ISD::CondCode CC, const SDLoc &dl,
+                              SelectionDAG &DAG,
+                              SDValue &X86CC) const;
+
     /// Check if replacement of SQRT with RSQRT should be disabled.
     bool isFsqrtCheap(SDValue Operand, SelectionDAG &DAG) const override;
 
diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td
index 0ab4ed4..7423cb8 100644
--- a/lib/Target/X86/X86InstrAVX512.td
+++ b/lib/Target/X86/X86InstrAVX512.td
@@ -2887,8 +2887,15 @@
 let Predicates = [HasAVX512] in {
   def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))),
             (COPY_TO_REGCLASS (MOVZX32rm8 addr:$src), VK8)>;
+  def : Pat<(v16i1 (bitconvert (loadi16 addr:$src))),
+            (KMOVWkm addr:$src)>;
 }
 
+def X86kextract : SDNode<"ISD::EXTRACT_VECTOR_ELT",
+                         SDTypeProfile<1, 2, [SDTCisVT<0, i8>,
+                                              SDTCVecEltisVT<1, i1>,
+                                              SDTCisPtrTy<2>]>>;
+
 let Predicates = [HasAVX512] in {
   multiclass operation_gpr_mask_copy_lowering<RegisterClass maskRC, ValueType maskVT> {
     def : Pat<(maskVT (scalar_to_vector GR32:$src)),
@@ -2896,6 +2903,12 @@
 
     def : Pat<(maskVT (scalar_to_vector GR8:$src)),
               (COPY_TO_REGCLASS (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src, sub_8bit), maskRC)>;
+
+    def : Pat<(i8 (X86kextract maskRC:$src, (iPTR 0))),
+              (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS maskRC:$src, GR32)), sub_8bit)>;
+
+    def : Pat<(i32 (anyext (i8 (X86kextract maskRC:$src, (iPTR 0))))),
+              (i32 (COPY_TO_REGCLASS maskRC:$src, GR32))>;
   }
 
   defm : operation_gpr_mask_copy_lowering<VK1,  v1i1>;
@@ -4830,13 +4843,13 @@
                                     SchedWriteVecALU, 1>;
 defm VPSUB : avx512_binop_rm_vl_all<0xF8, 0xF9, 0xFA, 0xFB, "vpsub", sub,
                                     SchedWriteVecALU, 0>;
-defm VPADDS : avx512_binop_rm_vl_bw<0xEC, 0xED, "vpadds", X86adds,
+defm VPADDS : avx512_binop_rm_vl_bw<0xEC, 0xED, "vpadds", saddsat,
                                     SchedWriteVecALU, HasBWI, 1>;
-defm VPSUBS : avx512_binop_rm_vl_bw<0xE8, 0xE9, "vpsubs", X86subs,
+defm VPSUBS : avx512_binop_rm_vl_bw<0xE8, 0xE9, "vpsubs", ssubsat,
                                     SchedWriteVecALU, HasBWI, 0>;
-defm VPADDUS : avx512_binop_rm_vl_bw<0xDC, 0xDD, "vpaddus", X86addus,
+defm VPADDUS : avx512_binop_rm_vl_bw<0xDC, 0xDD, "vpaddus", uaddsat,
                                      SchedWriteVecALU, HasBWI, 1>;
-defm VPSUBUS : avx512_binop_rm_vl_bw<0xD8, 0xD9, "vpsubus", X86subus,
+defm VPSUBUS : avx512_binop_rm_vl_bw<0xD8, 0xD9, "vpsubus", usubsat,
                                      SchedWriteVecALU, HasBWI, 0>;
 defm VPMULLD : avx512_binop_rm_vl_d<0x40, "vpmulld", mul,
                                     SchedWritePMULLD, HasAVX512, 1>, T8PD;
@@ -7956,26 +7969,53 @@
                           X86VectorVTInfo _Src, SDNode OpNode,
                           X86FoldableSchedWrite sched,
                           string Broadcast = _.BroadcastStr,
-                          string Alias = "", X86MemOperand MemOp = _Src.MemOp> {
+                          string Alias = "", X86MemOperand MemOp = _Src.MemOp,
+                          RegisterClass MaskRC = _.KRCWM> {
 
-  defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
-                         (ins _Src.RC:$src), OpcodeStr, "$src", "$src",
-                         (_.VT (OpNode (_Src.VT _Src.RC:$src)))>,
+  defm rr : AVX512_maskable_common<opc, MRMSrcReg, _, (outs _.RC:$dst),
+                         (ins _Src.RC:$src),
+                         (ins _.RC:$src0, MaskRC:$mask, _Src.RC:$src),
+                         (ins MaskRC:$mask, _Src.RC:$src),
+                          OpcodeStr, "$src", "$src",
+                         (_.VT (OpNode (_Src.VT _Src.RC:$src))),
+                         (vselect MaskRC:$mask,
+                                  (_.VT (OpNode (_Src.VT _Src.RC:$src))),
+                                  _.RC:$src0),
+                         vselect, "$src0 = $dst">,
                          EVEX, Sched<[sched]>;
 
-  defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
-                         (ins MemOp:$src), OpcodeStr#Alias, "$src", "$src",
+  defm rm : AVX512_maskable_common<opc, MRMSrcMem, _, (outs _.RC:$dst),
+                         (ins MemOp:$src),
+                         (ins _.RC:$src0, MaskRC:$mask, MemOp:$src),
+                         (ins MaskRC:$mask, MemOp:$src),
+                         OpcodeStr#Alias, "$src", "$src",
                          (_.VT (OpNode (_Src.VT
-                             (_Src.LdFrag addr:$src))))>,
+                             (_Src.LdFrag addr:$src)))),
+                         (vselect MaskRC:$mask,
+                                  (_.VT (OpNode (_Src.VT
+                                                 (_Src.LdFrag addr:$src)))),
+                                  _.RC:$src0),
+                         vselect, "$src0 = $dst">,
                          EVEX, Sched<[sched.Folded]>;
 
-  defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
-                         (ins _Src.ScalarMemOp:$src), OpcodeStr,
+  defm rmb : AVX512_maskable_common<opc, MRMSrcMem, _, (outs _.RC:$dst),
+                         (ins _Src.ScalarMemOp:$src),
+                         (ins _.RC:$src0, MaskRC:$mask, _Src.ScalarMemOp:$src),
+                         (ins MaskRC:$mask, _Src.ScalarMemOp:$src),
+                         OpcodeStr,
                          "${src}"##Broadcast, "${src}"##Broadcast,
                          (_.VT (OpNode (_Src.VT
                                   (X86VBroadcast (_Src.ScalarLdFrag addr:$src)))
-                            ))>, EVEX, EVEX_B,
-                         Sched<[sched.Folded]>;
+                            )),
+                         (vselect MaskRC:$mask,
+                                  (_.VT
+                                   (OpNode
+                                    (_Src.VT
+                                     (X86VBroadcast
+                                      (_Src.ScalarLdFrag addr:$src))))),
+                                  _.RC:$src0),
+                         vselect, "$src0 = $dst">,
+                         EVEX, EVEX_B, Sched<[sched.Folded]>;
 }
 // Coversion with SAE - suppress all exceptions
 multiclass avx512_vcvt_fp_sae<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
@@ -8026,7 +8066,8 @@
   }
   let Predicates = [HasVLX] in {
     defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v2f64x_info,
-                               X86vfpround, sched.XMM, "{1to2}", "{x}">, EVEX_V128;
+                               null_frag, sched.XMM, "{1to2}", "{x}", f128mem, VK2WM>,
+                               EVEX_V128;
     defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v4f64x_info, fpround,
                                sched.YMM, "{1to4}", "{y}">, EVEX_V256;
 
@@ -8060,6 +8101,35 @@
               (VCVTPS2PDZ128rm addr:$src)>;
   def : Pat<(v4f64 (extloadv4f32 addr:$src)),
               (VCVTPS2PDZ256rm addr:$src)>;
+
+  // Special patterns to allow use of X86vmfpround for masking. Instruction
+  // patterns have been disabled with null_frag.
+  def : Pat<(X86vfpround (v2f64 VR128X:$src)),
+            (VCVTPD2PSZ128rr VR128X:$src)>;
+  def : Pat<(X86vmfpround (v2f64 VR128X:$src), (v4f32 VR128X:$src0),
+                          VK2WM:$mask),
+            (VCVTPD2PSZ128rrk VR128X:$src0, VK2WM:$mask, VR128X:$src)>;
+  def : Pat<(X86vmfpround (v2f64 VR128X:$src), v4f32x_info.ImmAllZerosV,
+                          VK2WM:$mask),
+            (VCVTPD2PSZ128rrkz VK2WM:$mask, VR128X:$src)>;
+
+  def : Pat<(X86vfpround (loadv2f64 addr:$src)),
+            (VCVTPD2PSZ128rm addr:$src)>;
+  def : Pat<(X86vmfpround (loadv2f64 addr:$src), (v4f32 VR128X:$src0),
+                          VK2WM:$mask),
+            (VCVTPD2PSZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>;
+  def : Pat<(X86vmfpround (loadv2f64 addr:$src), v4f32x_info.ImmAllZerosV,
+                          VK2WM:$mask),
+            (VCVTPD2PSZ128rmkz VK2WM:$mask, addr:$src)>;
+
+  def : Pat<(X86vfpround (v2f64 (X86VBroadcast (loadf64 addr:$src)))),
+            (VCVTPD2PSZ128rmb addr:$src)>;
+  def : Pat<(X86vmfpround (v2f64 (X86VBroadcast (loadf64 addr:$src))),
+                          (v4f32 VR128X:$src0), VK2WM:$mask),
+            (VCVTPD2PSZ128rmbk VR128X:$src0, VK2WM:$mask, addr:$src)>;
+  def : Pat<(X86vmfpround (v2f64 (X86VBroadcast (loadf64 addr:$src))),
+                          v4f32x_info.ImmAllZerosV, VK2WM:$mask),
+            (VCVTPD2PSZ128rmbkz VK2WM:$mask, addr:$src)>;
 }
 
 // Convert Signed/Unsigned Doubleword to Double
@@ -8144,7 +8214,8 @@
     // dest type - 'v4i32x_info'. We also specify the broadcast string explicitly
     // due to the same reason.
     defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v2f64x_info,
-                               OpNode, sched.XMM, "{1to2}", "{x}">, EVEX_V128;
+                               null_frag, sched.XMM, "{1to2}", "{x}", f128mem,
+                               VK2WM>, EVEX_V128;
     defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v4f64x_info, OpNode,
                                sched.YMM, "{1to4}", "{y}">, EVEX_V256;
 
@@ -8173,8 +8244,9 @@
     // memory forms of these instructions in Asm Parcer. They have the same
     // dest type - 'v4i32x_info'. We also specify the broadcast string explicitly
     // due to the same reason.
-    defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v2f64x_info, OpNode,
-                               sched.XMM, "{1to2}", "{x}">, EVEX_V128;
+    defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v2f64x_info,
+                               null_frag, sched.XMM, "{1to2}", "{x}", f128mem,
+                               VK2WM>, EVEX_V128;
     defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v4f64x_info, OpNode,
                                sched.YMM, "{1to4}", "{y}">, EVEX_V256;
 
@@ -8457,6 +8529,122 @@
             (VCVTTPD2UDQZ256rr VR256X:$src)>;
   def : Pat<(v4i32 (fp_to_uint (loadv4f64 addr:$src))),
             (VCVTTPD2UDQZ256rm addr:$src)>;
+
+  // Special patterns to allow use of X86mcvtp2Int for masking. Instruction
+  // patterns have been disabled with null_frag.
+  def : Pat<(v4i32 (X86cvtp2Int (v2f64 VR128X:$src))),
+            (VCVTPD2DQZ128rr VR128X:$src)>;
+  def : Pat<(X86mcvtp2Int (v2f64 VR128X:$src), (v4i32 VR128X:$src0),
+                          VK2WM:$mask),
+            (VCVTPD2DQZ128rrk VR128X:$src0, VK2WM:$mask, VR128X:$src)>;
+  def : Pat<(X86mcvtp2Int (v2f64 VR128X:$src), v4i32x_info.ImmAllZerosV,
+                          VK2WM:$mask),
+            (VCVTPD2DQZ128rrkz VK2WM:$mask, VR128X:$src)>;
+
+  def : Pat<(v4i32 (X86cvtp2Int (loadv2f64 addr:$src))),
+            (VCVTPD2DQZ128rm addr:$src)>;
+  def : Pat<(X86mcvtp2Int (loadv2f64 addr:$src), (v4i32 VR128X:$src0),
+                          VK2WM:$mask),
+            (VCVTPD2DQZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>;
+  def : Pat<(X86mcvtp2Int (loadv2f64 addr:$src), v4i32x_info.ImmAllZerosV,
+                          VK2WM:$mask),
+            (VCVTPD2DQZ128rmkz VK2WM:$mask, addr:$src)>;
+
+  def : Pat<(v4i32 (X86cvtp2Int (v2f64 (X86VBroadcast (loadf64 addr:$src))))),
+            (VCVTPD2DQZ128rmb addr:$src)>;
+  def : Pat<(X86mcvtp2Int (v2f64 (X86VBroadcast (loadf64 addr:$src))),
+                          (v4i32 VR128X:$src0), VK2WM:$mask),
+            (VCVTPD2DQZ128rmbk VR128X:$src0, VK2WM:$mask, addr:$src)>;
+  def : Pat<(X86mcvtp2Int (v2f64 (X86VBroadcast (loadf64 addr:$src))),
+                          v4i32x_info.ImmAllZerosV, VK2WM:$mask),
+            (VCVTPD2DQZ128rmbkz VK2WM:$mask, addr:$src)>;
+
+  // Special patterns to allow use of X86mcvttp2si for masking. Instruction
+  // patterns have been disabled with null_frag.
+  def : Pat<(v4i32 (X86cvttp2si (v2f64 VR128X:$src))),
+            (VCVTTPD2DQZ128rr VR128X:$src)>;
+  def : Pat<(X86mcvttp2si (v2f64 VR128X:$src), (v4i32 VR128X:$src0),
+                          VK2WM:$mask),
+            (VCVTTPD2DQZ128rrk VR128X:$src0, VK2WM:$mask, VR128X:$src)>;
+  def : Pat<(X86mcvttp2si (v2f64 VR128X:$src), v4i32x_info.ImmAllZerosV,
+                          VK2WM:$mask),
+            (VCVTTPD2DQZ128rrkz VK2WM:$mask, VR128X:$src)>;
+
+  def : Pat<(v4i32 (X86cvttp2si (loadv2f64 addr:$src))),
+            (VCVTTPD2DQZ128rm addr:$src)>;
+  def : Pat<(X86mcvttp2si (loadv2f64 addr:$src), (v4i32 VR128X:$src0),
+                          VK2WM:$mask),
+            (VCVTTPD2DQZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>;
+  def : Pat<(X86mcvttp2si (loadv2f64 addr:$src), v4i32x_info.ImmAllZerosV,
+                          VK2WM:$mask),
+            (VCVTTPD2DQZ128rmkz VK2WM:$mask, addr:$src)>;
+
+  def : Pat<(v4i32 (X86cvttp2si (v2f64 (X86VBroadcast (loadf64 addr:$src))))),
+            (VCVTTPD2DQZ128rmb addr:$src)>;
+  def : Pat<(X86mcvttp2si (v2f64 (X86VBroadcast (loadf64 addr:$src))),
+                          (v4i32 VR128X:$src0), VK2WM:$mask),
+            (VCVTTPD2DQZ128rmbk VR128X:$src0, VK2WM:$mask, addr:$src)>;
+  def : Pat<(X86mcvttp2si (v2f64 (X86VBroadcast (loadf64 addr:$src))),
+                          v4i32x_info.ImmAllZerosV, VK2WM:$mask),
+            (VCVTTPD2DQZ128rmbkz VK2WM:$mask, addr:$src)>;
+
+  // Special patterns to allow use of X86mcvtp2UInt for masking. Instruction
+  // patterns have been disabled with null_frag.
+  def : Pat<(v4i32 (X86cvtp2UInt (v2f64 VR128X:$src))),
+            (VCVTPD2UDQZ128rr VR128X:$src)>;
+  def : Pat<(X86mcvtp2UInt (v2f64 VR128X:$src), (v4i32 VR128X:$src0),
+                           VK2WM:$mask),
+            (VCVTPD2UDQZ128rrk VR128X:$src0, VK2WM:$mask, VR128X:$src)>;
+  def : Pat<(X86mcvtp2UInt (v2f64 VR128X:$src), v4i32x_info.ImmAllZerosV,
+                           VK2WM:$mask),
+            (VCVTPD2UDQZ128rrkz VK2WM:$mask, VR128X:$src)>;
+
+  def : Pat<(v4i32 (X86cvtp2UInt (loadv2f64 addr:$src))),
+            (VCVTPD2UDQZ128rm addr:$src)>;
+  def : Pat<(X86mcvtp2UInt (loadv2f64 addr:$src), (v4i32 VR128X:$src0),
+                           VK2WM:$mask),
+            (VCVTPD2UDQZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>;
+  def : Pat<(X86mcvtp2UInt (loadv2f64 addr:$src), v4i32x_info.ImmAllZerosV,
+                           VK2WM:$mask),
+            (VCVTPD2UDQZ128rmkz VK2WM:$mask, addr:$src)>;
+
+  def : Pat<(v4i32 (X86cvtp2UInt (v2f64 (X86VBroadcast (loadf64 addr:$src))))),
+            (VCVTPD2UDQZ128rmb addr:$src)>;
+  def : Pat<(X86mcvtp2UInt (v2f64 (X86VBroadcast (loadf64 addr:$src))),
+                           (v4i32 VR128X:$src0), VK2WM:$mask),
+            (VCVTPD2UDQZ128rmbk VR128X:$src0, VK2WM:$mask, addr:$src)>;
+  def : Pat<(X86mcvtp2UInt (v2f64 (X86VBroadcast (loadf64 addr:$src))),
+                           v4i32x_info.ImmAllZerosV, VK2WM:$mask),
+            (VCVTPD2UDQZ128rmbkz VK2WM:$mask, addr:$src)>;
+
+  // Special patterns to allow use of X86mcvtp2UInt for masking. Instruction
+  // patterns have been disabled with null_frag.
+  def : Pat<(v4i32 (X86cvttp2ui (v2f64 VR128X:$src))),
+            (VCVTTPD2UDQZ128rr VR128X:$src)>;
+  def : Pat<(X86mcvttp2ui (v2f64 VR128X:$src), (v4i32 VR128X:$src0),
+                          VK2WM:$mask),
+            (VCVTTPD2UDQZ128rrk VR128X:$src0, VK2WM:$mask, VR128X:$src)>;
+  def : Pat<(X86mcvttp2ui (v2f64 VR128X:$src), v4i32x_info.ImmAllZerosV,
+                          VK2WM:$mask),
+            (VCVTTPD2UDQZ128rrkz VK2WM:$mask, VR128X:$src)>;
+
+  def : Pat<(v4i32 (X86cvttp2ui (loadv2f64 addr:$src))),
+            (VCVTTPD2UDQZ128rm addr:$src)>;
+  def : Pat<(X86mcvttp2ui (loadv2f64 addr:$src), (v4i32 VR128X:$src0),
+                          VK2WM:$mask),
+            (VCVTTPD2UDQZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>;
+  def : Pat<(X86mcvttp2ui (loadv2f64 addr:$src), v4i32x_info.ImmAllZerosV,
+                          VK2WM:$mask),
+            (VCVTTPD2UDQZ128rmkz VK2WM:$mask, addr:$src)>;
+
+  def : Pat<(v4i32 (X86cvttp2ui (v2f64 (X86VBroadcast (loadf64 addr:$src))))),
+            (VCVTTPD2UDQZ128rmb addr:$src)>;
+  def : Pat<(X86mcvttp2ui (v2f64 (X86VBroadcast (loadf64 addr:$src))),
+                          (v4i32 VR128X:$src0), VK2WM:$mask),
+            (VCVTTPD2UDQZ128rmbk VR128X:$src0, VK2WM:$mask, addr:$src)>;
+  def : Pat<(X86mcvttp2ui (v2f64 (X86VBroadcast (loadf64 addr:$src))),
+                          v4i32x_info.ImmAllZerosV, VK2WM:$mask),
+            (VCVTTPD2UDQZ128rmbkz VK2WM:$mask, addr:$src)>;
 }
 
 let Predicates = [HasDQI] in {
@@ -8713,12 +8901,28 @@
 
 multiclass avx512_cvtps2ph<X86VectorVTInfo _dest, X86VectorVTInfo _src,
                            X86MemOperand x86memop, SchedWrite RR, SchedWrite MR> {
-  defm rr : AVX512_maskable<0x1D, MRMDestReg, _dest ,(outs _dest.RC:$dst),
-                   (ins _src.RC:$src1, i32u8imm:$src2),
-                   "vcvtps2ph", "$src2, $src1", "$src1, $src2",
-                   (X86cvtps2ph (_src.VT _src.RC:$src1),
-                                (i32 imm:$src2)), 0, 0>,
-                   AVX512AIi8Base, Sched<[RR]>;
+let ExeDomain = GenericDomain in {
+  def rr : AVX512AIi8<0x1D, MRMDestReg, (outs _dest.RC:$dst),
+             (ins _src.RC:$src1, i32u8imm:$src2),
+             "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+             [(set _dest.RC:$dst,
+                   (X86cvtps2ph (_src.VT _src.RC:$src1), (i32 imm:$src2)))]>,
+             Sched<[RR]>;
+  let Constraints = "$src0 = $dst" in
+  def rrk : AVX512AIi8<0x1D, MRMDestReg, (outs _dest.RC:$dst),
+             (ins _dest.RC:$src0, _src.KRCWM:$mask, _src.RC:$src1, i32u8imm:$src2),
+             "vcvtps2ph\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
+             [(set _dest.RC:$dst,
+                   (X86mcvtps2ph (_src.VT _src.RC:$src1), (i32 imm:$src2),
+                                 _dest.RC:$src0, _src.KRCWM:$mask))]>,
+             Sched<[RR]>, EVEX_K;
+  def rrkz : AVX512AIi8<0x1D, MRMDestReg, (outs _dest.RC:$dst),
+             (ins _src.KRCWM:$mask, _src.RC:$src1, i32u8imm:$src2),
+             "vcvtps2ph\t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}",
+             [(set _dest.RC:$dst,
+                   (X86mcvtps2ph (_src.VT _src.RC:$src1), (i32 imm:$src2),
+                                 _dest.ImmAllZerosV, _src.KRCWM:$mask))]>,
+             Sched<[RR]>, EVEX_KZ;
   let hasSideEffects = 0, mayStore = 1 in {
     def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
                (ins x86memop:$dst, _src.RC:$src1, i32u8imm:$src2),
@@ -8730,6 +8934,7 @@
                 EVEX_K, Sched<[MR]>, NotMemoryFoldable;
   }
 }
+}
 
 multiclass avx512_cvtps2ph_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src,
                                SchedWrite Sched> {
@@ -9307,14 +9512,47 @@
 // Integer truncate and extend operations
 //-------------------------------------------------
 
+// PatFrags that contain a select and a truncate op. The take operands in the
+// same order as X86vmtrunc, X86vmtruncs, X86vmtruncus. This allows us to pass
+// either to the multiclasses.
+def select_trunc : PatFrag<(ops node:$src, node:$src0, node:$mask),
+                           (vselect node:$mask,
+                                    (trunc node:$src), node:$src0)>;
+def select_truncs : PatFrag<(ops node:$src, node:$src0, node:$mask),
+                            (vselect node:$mask,
+                                     (X86vtruncs node:$src), node:$src0)>;
+def select_truncus : PatFrag<(ops node:$src, node:$src0, node:$mask),
+                             (vselect node:$mask,
+                                      (X86vtruncus node:$src), node:$src0)>;
+
 multiclass avx512_trunc_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
+                              SDPatternOperator MaskNode,
                               X86FoldableSchedWrite sched, X86VectorVTInfo SrcInfo,
                               X86VectorVTInfo DestInfo, X86MemOperand x86memop> {
-  let ExeDomain = DestInfo.ExeDomain in
-  defm rr  : AVX512_maskable<opc, MRMDestReg, DestInfo, (outs DestInfo.RC:$dst),
-                      (ins SrcInfo.RC:$src1), OpcodeStr ,"$src1", "$src1",
-                      (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1)))>,
-                      EVEX, T8XS, Sched<[sched]>;
+  let ExeDomain = DestInfo.ExeDomain in {
+  def rr : AVX512XS8I<opc, MRMDestReg, (outs DestInfo.RC:$dst),
+             (ins SrcInfo.RC:$src),
+             OpcodeStr # "\t{$src, $dst|$dst, $src}",
+             [(set DestInfo.RC:$dst,
+                   (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src))))]>,
+             EVEX, Sched<[sched]>;
+  let Constraints = "$src0 = $dst" in
+  def rrk : AVX512XS8I<opc, MRMDestReg, (outs DestInfo.RC:$dst),
+             (ins DestInfo.RC:$src0, SrcInfo.KRCWM:$mask, SrcInfo.RC:$src),
+             OpcodeStr # "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}",
+             [(set DestInfo.RC:$dst,
+                   (MaskNode (SrcInfo.VT SrcInfo.RC:$src),
+                             (DestInfo.VT DestInfo.RC:$src0),
+                             SrcInfo.KRCWM:$mask))]>,
+             EVEX, EVEX_K, Sched<[sched]>;
+  def rrkz : AVX512XS8I<opc, MRMDestReg, (outs DestInfo.RC:$dst),
+             (ins SrcInfo.KRCWM:$mask, SrcInfo.RC:$src),
+             OpcodeStr # "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}",
+             [(set DestInfo.RC:$dst,
+                   (DestInfo.VT (MaskNode (SrcInfo.VT SrcInfo.RC:$src),
+                             DestInfo.ImmAllZerosV, SrcInfo.KRCWM:$mask)))]>,
+             EVEX, EVEX_KZ, Sched<[sched]>;
+  }
 
   let mayStore = 1, hasSideEffects = 0, ExeDomain = DestInfo.ExeDomain in {
     def mr : AVX512XS8I<opc, MRMDestMem, (outs),
@@ -9345,7 +9583,11 @@
 }
 
 multiclass avx512_trunc<bits<8> opc, string OpcodeStr, SDNode OpNode128,
-                        SDNode OpNode256, SDNode OpNode512, X86FoldableSchedWrite sched,
+                        SDNode OpNode256, SDNode OpNode512,
+                        SDPatternOperator MaskNode128,
+                        SDPatternOperator MaskNode256,
+                        SDPatternOperator MaskNode512,
+                        X86FoldableSchedWrite sched,
                         AVX512VLVectorVTInfo VTSrcInfo,
                         X86VectorVTInfo DestInfoZ128,
                         X86VectorVTInfo DestInfoZ256, X86VectorVTInfo DestInfoZ,
@@ -9354,118 +9596,167 @@
                         PatFrag mtruncFrag, Predicate prd = HasAVX512>{
 
   let Predicates = [HasVLX, prd] in {
-    defm Z128:  avx512_trunc_common<opc, OpcodeStr, OpNode128, sched,
+    defm Z128:  avx512_trunc_common<opc, OpcodeStr, OpNode128, MaskNode128, sched,
                              VTSrcInfo.info128, DestInfoZ128, x86memopZ128>,
                 avx512_trunc_mr_lowering<VTSrcInfo.info128, DestInfoZ128,
                              truncFrag, mtruncFrag, NAME>, EVEX_V128;
 
-    defm Z256:  avx512_trunc_common<opc, OpcodeStr, OpNode256, sched,
+    defm Z256:  avx512_trunc_common<opc, OpcodeStr, OpNode256, MaskNode256, sched,
                              VTSrcInfo.info256, DestInfoZ256, x86memopZ256>,
                 avx512_trunc_mr_lowering<VTSrcInfo.info256, DestInfoZ256,
                              truncFrag, mtruncFrag, NAME>, EVEX_V256;
   }
   let Predicates = [prd] in
-    defm Z:     avx512_trunc_common<opc, OpcodeStr, OpNode512, sched,
+    defm Z:     avx512_trunc_common<opc, OpcodeStr, OpNode512, MaskNode512, sched,
                              VTSrcInfo.info512, DestInfoZ, x86memopZ>,
                 avx512_trunc_mr_lowering<VTSrcInfo.info512, DestInfoZ,
                              truncFrag, mtruncFrag, NAME>, EVEX_V512;
 }
 
 multiclass avx512_trunc_qb<bits<8> opc, string OpcodeStr, SDNode OpNode,
+                           SDPatternOperator MaskNode,
                            X86FoldableSchedWrite sched, PatFrag StoreNode,
-                           PatFrag MaskedStoreNode, SDNode InVecNode = OpNode> {
-  defm NAME: avx512_trunc<opc, OpcodeStr, InVecNode, InVecNode, InVecNode, sched,
+                           PatFrag MaskedStoreNode, SDNode InVecNode,
+                           SDPatternOperator InVecMaskNode> {
+  defm NAME: avx512_trunc<opc, OpcodeStr, InVecNode, InVecNode, InVecNode,
+                          InVecMaskNode, InVecMaskNode, InVecMaskNode, sched,
                           avx512vl_i64_info, v16i8x_info, v16i8x_info,
                           v16i8x_info, i16mem, i32mem, i64mem, StoreNode,
                           MaskedStoreNode>, EVEX_CD8<8, CD8VO>;
 }
 
 multiclass avx512_trunc_qw<bits<8> opc, string OpcodeStr, SDNode OpNode,
+                           SDPatternOperator MaskNode,
                            X86FoldableSchedWrite sched, PatFrag StoreNode,
-                           PatFrag MaskedStoreNode, SDNode InVecNode = OpNode> {
-  defm NAME: avx512_trunc<opc, OpcodeStr, InVecNode, InVecNode, OpNode, sched,
+                           PatFrag MaskedStoreNode, SDNode InVecNode,
+                           SDPatternOperator InVecMaskNode> {
+  defm NAME: avx512_trunc<opc, OpcodeStr, InVecNode, InVecNode, OpNode,
+                          InVecMaskNode, InVecMaskNode, MaskNode, sched,
                           avx512vl_i64_info, v8i16x_info, v8i16x_info,
                           v8i16x_info, i32mem, i64mem, i128mem, StoreNode,
                           MaskedStoreNode>, EVEX_CD8<16, CD8VQ>;
 }
 
 multiclass avx512_trunc_qd<bits<8> opc, string OpcodeStr, SDNode OpNode,
+                           SDPatternOperator MaskNode,
                            X86FoldableSchedWrite sched, PatFrag StoreNode,
-                           PatFrag MaskedStoreNode, SDNode InVecNode = OpNode> {
-  defm NAME: avx512_trunc<opc, OpcodeStr, InVecNode, OpNode, OpNode, sched,
+                           PatFrag MaskedStoreNode, SDNode InVecNode,
+                           SDPatternOperator InVecMaskNode> {
+  defm NAME: avx512_trunc<opc, OpcodeStr, InVecNode, OpNode, OpNode,
+                          InVecMaskNode, MaskNode, MaskNode, sched,
                           avx512vl_i64_info, v4i32x_info, v4i32x_info,
                           v8i32x_info, i64mem, i128mem, i256mem, StoreNode,
                           MaskedStoreNode>, EVEX_CD8<32, CD8VH>;
 }
 
 multiclass avx512_trunc_db<bits<8> opc, string OpcodeStr, SDNode OpNode,
+                           SDPatternOperator MaskNode,
                            X86FoldableSchedWrite sched, PatFrag StoreNode,
-                           PatFrag MaskedStoreNode, SDNode InVecNode = OpNode> {
-  defm NAME: avx512_trunc<opc, OpcodeStr, InVecNode, InVecNode, OpNode, sched,
+                           PatFrag MaskedStoreNode, SDNode InVecNode,
+                           SDPatternOperator InVecMaskNode> {
+  defm NAME: avx512_trunc<opc, OpcodeStr, InVecNode, InVecNode, OpNode,
+                          InVecMaskNode, InVecMaskNode, MaskNode, sched,
                           avx512vl_i32_info, v16i8x_info, v16i8x_info,
                           v16i8x_info, i32mem, i64mem, i128mem, StoreNode,
                           MaskedStoreNode>, EVEX_CD8<8, CD8VQ>;
 }
 
 multiclass avx512_trunc_dw<bits<8> opc, string OpcodeStr, SDNode OpNode,
+                           SDPatternOperator MaskNode,
                            X86FoldableSchedWrite sched, PatFrag StoreNode,
-                           PatFrag MaskedStoreNode, SDNode InVecNode = OpNode> {
-  defm NAME: avx512_trunc<opc, OpcodeStr, InVecNode, OpNode, OpNode, sched,
+                           PatFrag MaskedStoreNode, SDNode InVecNode,
+                           SDPatternOperator InVecMaskNode> {
+  defm NAME: avx512_trunc<opc, OpcodeStr, InVecNode, OpNode, OpNode,
+                          InVecMaskNode, MaskNode, MaskNode, sched,
                           avx512vl_i32_info, v8i16x_info, v8i16x_info,
                           v16i16x_info, i64mem, i128mem, i256mem, StoreNode,
                           MaskedStoreNode>, EVEX_CD8<16, CD8VH>;
 }
 
 multiclass avx512_trunc_wb<bits<8> opc, string OpcodeStr, SDNode OpNode,
+                           SDPatternOperator MaskNode,
                            X86FoldableSchedWrite sched, PatFrag StoreNode,
-                           PatFrag MaskedStoreNode, SDNode InVecNode = OpNode> {
+                           PatFrag MaskedStoreNode, SDNode InVecNode,
+                           SDPatternOperator InVecMaskNode> {
   defm NAME: avx512_trunc<opc, OpcodeStr, InVecNode, OpNode, OpNode,
-                          sched, avx512vl_i16_info, v16i8x_info, v16i8x_info,
+                          InVecMaskNode, MaskNode, MaskNode, sched,
+                          avx512vl_i16_info, v16i8x_info, v16i8x_info,
                           v32i8x_info, i64mem, i128mem, i256mem, StoreNode,
                           MaskedStoreNode, HasBWI>, EVEX_CD8<16, CD8VH>;
 }
 
-defm VPMOVQB    : avx512_trunc_qb<0x32, "vpmovqb",   trunc, WriteShuffle256,
-                                  truncstorevi8, masked_truncstorevi8, X86vtrunc>;
-defm VPMOVSQB   : avx512_trunc_qb<0x22, "vpmovsqb",  X86vtruncs, WriteShuffle256,
-                                  truncstore_s_vi8, masked_truncstore_s_vi8>;
-defm VPMOVUSQB  : avx512_trunc_qb<0x12, "vpmovusqb", X86vtruncus, WriteShuffle256,
-                                  truncstore_us_vi8, masked_truncstore_us_vi8>;
+defm VPMOVQB    : avx512_trunc_qb<0x32, "vpmovqb",   trunc, select_trunc,
+                                  WriteShuffle256, truncstorevi8,
+                                  masked_truncstorevi8, X86vtrunc, X86vmtrunc>;
+defm VPMOVSQB   : avx512_trunc_qb<0x22, "vpmovsqb",  X86vtruncs, select_truncs,
+                                  WriteShuffle256, truncstore_s_vi8,
+                                  masked_truncstore_s_vi8, X86vtruncs,
+                                  X86vmtruncs>;
+defm VPMOVUSQB  : avx512_trunc_qb<0x12, "vpmovusqb", X86vtruncus,
+                                  select_truncus, WriteShuffle256,
+                                  truncstore_us_vi8, masked_truncstore_us_vi8,
+                                  X86vtruncus, X86vmtruncus>;
 
-defm VPMOVQW    : avx512_trunc_qw<0x34, "vpmovqw",   trunc, WriteShuffle256,
-                                  truncstorevi16, masked_truncstorevi16, X86vtrunc>;
-defm VPMOVSQW   : avx512_trunc_qw<0x24, "vpmovsqw",  X86vtruncs, WriteShuffle256,
-                                  truncstore_s_vi16, masked_truncstore_s_vi16>;
-defm VPMOVUSQW  : avx512_trunc_qw<0x14, "vpmovusqw", X86vtruncus, WriteShuffle256,
-                                  truncstore_us_vi16, masked_truncstore_us_vi16>;
+defm VPMOVQW    : avx512_trunc_qw<0x34, "vpmovqw", trunc, select_trunc,
+                                  WriteShuffle256, truncstorevi16,
+                                  masked_truncstorevi16, X86vtrunc, X86vmtrunc>;
+defm VPMOVSQW   : avx512_trunc_qw<0x24, "vpmovsqw",  X86vtruncs, select_truncs,
+                                  WriteShuffle256, truncstore_s_vi16,
+                                  masked_truncstore_s_vi16, X86vtruncs,
+                                  X86vmtruncs>;
+defm VPMOVUSQW  : avx512_trunc_qw<0x14, "vpmovusqw", X86vtruncus,
+                                  select_truncus, WriteShuffle256,
+                                  truncstore_us_vi16, masked_truncstore_us_vi16,
+                                  X86vtruncus, X86vmtruncus>;
 
-defm VPMOVQD    : avx512_trunc_qd<0x35, "vpmovqd",   trunc, WriteShuffle256,
-                                  truncstorevi32, masked_truncstorevi32, X86vtrunc>;
-defm VPMOVSQD   : avx512_trunc_qd<0x25, "vpmovsqd",  X86vtruncs, WriteShuffle256,
-                                  truncstore_s_vi32, masked_truncstore_s_vi32>;
-defm VPMOVUSQD  : avx512_trunc_qd<0x15, "vpmovusqd", X86vtruncus, WriteShuffle256,
-                                  truncstore_us_vi32, masked_truncstore_us_vi32>;
+defm VPMOVQD    : avx512_trunc_qd<0x35, "vpmovqd", trunc, select_trunc,
+                                  WriteShuffle256, truncstorevi32,
+                                  masked_truncstorevi32, X86vtrunc, X86vmtrunc>;
+defm VPMOVSQD   : avx512_trunc_qd<0x25, "vpmovsqd",  X86vtruncs, select_truncs,
+                                  WriteShuffle256, truncstore_s_vi32,
+                                  masked_truncstore_s_vi32, X86vtruncs,
+                                  X86vmtruncs>;
+defm VPMOVUSQD  : avx512_trunc_qd<0x15, "vpmovusqd", X86vtruncus,
+                                  select_truncus, WriteShuffle256,
+                                  truncstore_us_vi32, masked_truncstore_us_vi32,
+                                  X86vtruncus, X86vmtruncus>;
 
-defm VPMOVDB    : avx512_trunc_db<0x31, "vpmovdb", trunc, WriteShuffle256,
-                                  truncstorevi8, masked_truncstorevi8, X86vtrunc>;
-defm VPMOVSDB   : avx512_trunc_db<0x21, "vpmovsdb",   X86vtruncs, WriteShuffle256,
-                                  truncstore_s_vi8, masked_truncstore_s_vi8>;
-defm VPMOVUSDB  : avx512_trunc_db<0x11, "vpmovusdb",  X86vtruncus, WriteShuffle256,
-                                  truncstore_us_vi8, masked_truncstore_us_vi8>;
+defm VPMOVDB    : avx512_trunc_db<0x31, "vpmovdb", trunc, select_trunc,
+                                  WriteShuffle256, truncstorevi8,
+                                  masked_truncstorevi8, X86vtrunc, X86vmtrunc>;
+defm VPMOVSDB   : avx512_trunc_db<0x21, "vpmovsdb", X86vtruncs, select_truncs,
+                                  WriteShuffle256, truncstore_s_vi8,
+                                  masked_truncstore_s_vi8, X86vtruncs,
+                                  X86vmtruncs>;
+defm VPMOVUSDB  : avx512_trunc_db<0x11, "vpmovusdb",  X86vtruncus,
+                                  select_truncus, WriteShuffle256,
+                                  truncstore_us_vi8, masked_truncstore_us_vi8,
+                                  X86vtruncus, X86vmtruncus>;
 
-defm VPMOVDW    : avx512_trunc_dw<0x33, "vpmovdw", trunc, WriteShuffle256,
-                                  truncstorevi16, masked_truncstorevi16, X86vtrunc>;
-defm VPMOVSDW   : avx512_trunc_dw<0x23, "vpmovsdw",   X86vtruncs, WriteShuffle256,
-                                  truncstore_s_vi16, masked_truncstore_s_vi16>;
-defm VPMOVUSDW  : avx512_trunc_dw<0x13, "vpmovusdw",  X86vtruncus, WriteShuffle256,
-                                  truncstore_us_vi16, masked_truncstore_us_vi16>;
+defm VPMOVDW    : avx512_trunc_dw<0x33, "vpmovdw", trunc, select_trunc,
+                                  WriteShuffle256, truncstorevi16,
+                                  masked_truncstorevi16, X86vtrunc, X86vmtrunc>;
+defm VPMOVSDW   : avx512_trunc_dw<0x23, "vpmovsdw", X86vtruncs, select_truncs,
+                                  WriteShuffle256, truncstore_s_vi16,
+                                  masked_truncstore_s_vi16, X86vtruncs,
+                                  X86vmtruncs>;
+defm VPMOVUSDW  : avx512_trunc_dw<0x13, "vpmovusdw", X86vtruncus,
+                                  select_truncus, WriteShuffle256,
+                                  truncstore_us_vi16, masked_truncstore_us_vi16,
+                                  X86vtruncus, X86vmtruncus>;
 
-defm VPMOVWB    : avx512_trunc_wb<0x30, "vpmovwb", trunc, WriteShuffle256,
-                                  truncstorevi8, masked_truncstorevi8, X86vtrunc>;
-defm VPMOVSWB   : avx512_trunc_wb<0x20, "vpmovswb",   X86vtruncs, WriteShuffle256,
-                                  truncstore_s_vi8, masked_truncstore_s_vi8>;
-defm VPMOVUSWB  : avx512_trunc_wb<0x10, "vpmovuswb",  X86vtruncus, WriteShuffle256,
-                                  truncstore_us_vi8, masked_truncstore_us_vi8>;
+defm VPMOVWB    : avx512_trunc_wb<0x30, "vpmovwb", trunc, select_trunc,
+                                  WriteShuffle256, truncstorevi8,
+                                  masked_truncstorevi8, X86vtrunc,
+                                  X86vmtrunc>;
+defm VPMOVSWB   : avx512_trunc_wb<0x20, "vpmovswb", X86vtruncs, select_truncs,
+                                  WriteShuffle256, truncstore_s_vi8,
+                                  masked_truncstore_s_vi8, X86vtruncs,
+                                  X86vmtruncs>;
+defm VPMOVUSWB  : avx512_trunc_wb<0x10, "vpmovuswb", X86vtruncus,
+                                  select_truncus, WriteShuffle256,
+                                  truncstore_us_vi8, masked_truncstore_us_vi8,
+                                  X86vtruncus, X86vmtruncus>;
 
 let Predicates = [HasAVX512, NoVLX] in {
 def: Pat<(v8i16 (trunc (v8i32 VR256X:$src))),
@@ -9484,6 +9775,44 @@
                                             VR256X:$src, sub_ymm))), sub_xmm))>;
 }
 
+// Without BWI we can't use vXi16/vXi8 vselect so we have to use vmtrunc nodes.
+multiclass mtrunc_lowering<string InstrName, SDNode OpNode,
+                           X86VectorVTInfo DestInfo,
+                           X86VectorVTInfo SrcInfo> {
+  def : Pat<(DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src),
+                                 DestInfo.RC:$src0,
+                                 SrcInfo.KRCWM:$mask)),
+            (!cast<Instruction>(InstrName#"rrk") DestInfo.RC:$src0,
+                                                 SrcInfo.KRCWM:$mask,
+                                                 SrcInfo.RC:$src)>;
+
+  def : Pat<(DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src),
+                                 DestInfo.ImmAllZerosV,
+                                 SrcInfo.KRCWM:$mask)),
+            (!cast<Instruction>(InstrName#"rrkz") SrcInfo.KRCWM:$mask,
+                                                  SrcInfo.RC:$src)>;
+}
+
+let Predicates = [HasVLX] in {
+defm : mtrunc_lowering<"VPMOVDWZ256", X86vmtrunc, v8i16x_info, v8i32x_info>;
+defm : mtrunc_lowering<"VPMOVSDWZ256", X86vmtruncs, v8i16x_info, v8i32x_info>;
+defm : mtrunc_lowering<"VPMOVUSDWZ256", X86vmtruncus, v8i16x_info, v8i32x_info>;
+}
+
+let Predicates = [HasAVX512] in {
+defm : mtrunc_lowering<"VPMOVDWZ", X86vmtrunc, v16i16x_info, v16i32_info>;
+defm : mtrunc_lowering<"VPMOVSDWZ", X86vmtruncs, v16i16x_info, v16i32_info>;
+defm : mtrunc_lowering<"VPMOVUSDWZ", X86vmtruncus, v16i16x_info, v16i32_info>;
+
+defm : mtrunc_lowering<"VPMOVDBZ", X86vmtrunc, v16i8x_info, v16i32_info>;
+defm : mtrunc_lowering<"VPMOVSDBZ", X86vmtruncs, v16i8x_info, v16i32_info>;
+defm : mtrunc_lowering<"VPMOVUSDBZ", X86vmtruncus, v16i8x_info, v16i32_info>;
+
+defm : mtrunc_lowering<"VPMOVQWZ", X86vmtrunc, v8i16x_info, v8i64_info>;
+defm : mtrunc_lowering<"VPMOVSQWZ", X86vmtruncs, v8i16x_info, v8i64_info>;
+defm : mtrunc_lowering<"VPMOVUSQWZ", X86vmtruncus, v8i16x_info, v8i64_info>;
+}
+
 multiclass WriteShuffle256_common<bits<8> opc, string OpcodeStr, X86FoldableSchedWrite sched,
               X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo,
               X86MemOperand x86memop, PatFrag LdFrag, SDNode OpNode>{
@@ -11217,6 +11546,8 @@
           (VMOVDDUPZ128rr (v2f64 (COPY_TO_REGCLASS FR64X:$src, VR128X)))>;
 def : Pat<(v2f64 (X86VBroadcast (loadv2f64 addr:$src))),
           (VMOVDDUPZ128rm addr:$src)>;
+def : Pat<(v2f64 (X86VBroadcast (v2f64 (X86vzload addr:$src)))),
+          (VMOVDDUPZ128rm addr:$src)>;
 
 def : Pat<(vselect (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcast f64:$src)),
                    (v2f64 VR128X:$src0)),
diff --git a/lib/Target/X86/X86InstrArithmetic.td b/lib/Target/X86/X86InstrArithmetic.td
index 3288fe2..cb5a4e5 100644
--- a/lib/Target/X86/X86InstrArithmetic.td
+++ b/lib/Target/X86/X86InstrArithmetic.td
@@ -422,22 +422,35 @@
 } // SchedRW
 } // CodeSize
 
+def X86add_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
+                               (X86add_flag node:$lhs, node:$rhs), [{
+  return hasNoCarryFlagUses(SDValue(N, 1));
+}]>;
+
+def X86sub_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
+                               (X86sub_flag node:$lhs, node:$rhs), [{
+  // Only use DEC if the result is used.
+  return !SDValue(N, 0).use_empty() && hasNoCarryFlagUses(SDValue(N, 1));
+}]>;
+
 // TODO: inc/dec is slow for P4, but fast for Pentium-M.
 let Defs = [EFLAGS] in {
 let Constraints = "$src1 = $dst", SchedRW = [WriteALU] in {
 let CodeSize = 2 in
 def INC8r  : I<0xFE, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
                "inc{b}\t$dst",
-               [(set GR8:$dst, EFLAGS, (X86inc_flag GR8:$src1))]>;
+               [(set GR8:$dst, EFLAGS, (X86add_flag_nocf GR8:$src1, 1))]>;
 let isConvertibleToThreeAddress = 1, CodeSize = 2 in { // Can xform into LEA.
 def INC16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
                "inc{w}\t$dst",
-               [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))]>, OpSize16;
+               [(set GR16:$dst, EFLAGS, (X86add_flag_nocf GR16:$src1, 1))]>,
+               OpSize16;
 def INC32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
                "inc{l}\t$dst",
-               [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src1))]>, OpSize32;
+               [(set GR32:$dst, EFLAGS, (X86add_flag_nocf GR32:$src1, 1))]>,
+               OpSize32;
 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src1), "inc{q}\t$dst",
-                [(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src1))]>;
+                [(set GR64:$dst, EFLAGS, (X86add_flag_nocf GR64:$src1, 1))]>;
 } // isConvertibleToThreeAddress = 1, CodeSize = 2
 
 // Short forms only valid in 32-bit mode. Selected during MCInst lowering.
@@ -474,16 +487,18 @@
 let CodeSize = 2 in
 def DEC8r  : I<0xFE, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
                "dec{b}\t$dst",
-               [(set GR8:$dst, EFLAGS, (X86dec_flag GR8:$src1))]>;
+               [(set GR8:$dst, EFLAGS, (X86sub_flag_nocf GR8:$src1, 1))]>;
 let isConvertibleToThreeAddress = 1, CodeSize = 2 in { // Can xform into LEA.
 def DEC16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
                "dec{w}\t$dst",
-               [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src1))]>, OpSize16;
+               [(set GR16:$dst, EFLAGS, (X86sub_flag_nocf GR16:$src1, 1))]>,
+               OpSize16;
 def DEC32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
                "dec{l}\t$dst",
-               [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src1))]>, OpSize32;
+               [(set GR32:$dst, EFLAGS, (X86sub_flag_nocf GR32:$src1, 1))]>,
+               OpSize32;
 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src1), "dec{q}\t$dst",
-                [(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src1))]>;
+                [(set GR64:$dst, EFLAGS, (X86sub_flag_nocf GR64:$src1, 1))]>;
 } // isConvertibleToThreeAddress = 1, CodeSize = 2
 
 // Short forms only valid in 32-bit mode. Selected during MCInst lowering.
@@ -1212,16 +1227,21 @@
 let isCompare = 1 in {
   let Defs = [EFLAGS] in {
     let isCommutable = 1 in {
-      def TEST8rr  : BinOpRR_F<0x84, "test", Xi8 , X86testpat>;
-      def TEST16rr : BinOpRR_F<0x84, "test", Xi16, X86testpat>;
-      def TEST32rr : BinOpRR_F<0x84, "test", Xi32, X86testpat>;
-      def TEST64rr : BinOpRR_F<0x84, "test", Xi64, X86testpat>;
+      // Avoid selecting these and instead use a test+and. Post processing will
+      // combine them. This gives bunch of other patterns that start with
+      // and a chance to match.
+      def TEST8rr  : BinOpRR_F<0x84, "test", Xi8 , null_frag>;
+      def TEST16rr : BinOpRR_F<0x84, "test", Xi16, null_frag>;
+      def TEST32rr : BinOpRR_F<0x84, "test", Xi32, null_frag>;
+      def TEST64rr : BinOpRR_F<0x84, "test", Xi64, null_frag>;
     } // isCommutable
 
-    def TEST8mr    : BinOpMR_F<0x84, "test", Xi8 , X86testpat>;
-    def TEST16mr   : BinOpMR_F<0x84, "test", Xi16, X86testpat>;
-    def TEST32mr   : BinOpMR_F<0x84, "test", Xi32, X86testpat>;
-    def TEST64mr   : BinOpMR_F<0x84, "test", Xi64, X86testpat>;
+    let hasSideEffects = 0, mayLoad = 1 in {
+    def TEST8mr    : BinOpMR_F<0x84, "test", Xi8 , null_frag>;
+    def TEST16mr   : BinOpMR_F<0x84, "test", Xi16, null_frag>;
+    def TEST32mr   : BinOpMR_F<0x84, "test", Xi32, null_frag>;
+    def TEST64mr   : BinOpMR_F<0x84, "test", Xi64, null_frag>;
+    }
 
     def TEST8ri    : BinOpRI_F<0xF6, "test", Xi8 , X86testpat, MRM0r>;
     def TEST16ri   : BinOpRI_F<0xF6, "test", Xi16, X86testpat, MRM0r>;
diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td
index 529d054..394dca8 100644
--- a/lib/Target/X86/X86InstrCompiler.td
+++ b/lib/Target/X86/X86InstrCompiler.td
@@ -662,12 +662,11 @@
 
 // Memory barriers
 
-// TODO: Get this to fold the constant into the instruction.
 let isCodeGenOnly = 1, Defs = [EFLAGS] in
-def OR32mrLocked  : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
-                      "or{l}\t{$zero, $dst|$dst, $zero}", []>,
-                      Requires<[Not64BitMode]>, OpSize32, LOCK,
-                      Sched<[WriteALURMW]>;
+def OR32mi8Locked  : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$zero),
+                         "or{l}\t{$zero, $dst|$dst, $zero}", []>,
+                         Requires<[Not64BitMode]>, OpSize32, LOCK,
+                         Sched<[WriteALURMW]>;
 
 let hasSideEffects = 1 in
 def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
@@ -777,54 +776,65 @@
 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">;
 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">;
 
-multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form,
-                          string frag, string mnemonic> {
-let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
-    SchedRW = [WriteALURMW] in {
-def NAME#8m  : I<Opc8, Form, (outs), (ins i8mem :$dst),
-                 !strconcat(mnemonic, "{b}\t$dst"),
-                 [(set EFLAGS, (!cast<PatFrag>(frag # "_8") addr:$dst))]>,
-                 LOCK;
-def NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),
-                 !strconcat(mnemonic, "{w}\t$dst"),
-                 [(set EFLAGS, (!cast<PatFrag>(frag # "_16") addr:$dst))]>,
-                 OpSize16, LOCK;
-def NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),
-                 !strconcat(mnemonic, "{l}\t$dst"),
-                 [(set EFLAGS, (!cast<PatFrag>(frag # "_32") addr:$dst))]>,
-                 OpSize32, LOCK;
-def NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),
-                  !strconcat(mnemonic, "{q}\t$dst"),
-                  [(set EFLAGS, (!cast<PatFrag>(frag # "_64") addr:$dst))]>,
-                  LOCK;
-}
-}
+def X86lock_add_nocf : PatFrag<(ops node:$lhs, node:$rhs),
+                               (X86lock_add node:$lhs, node:$rhs), [{
+  return hasNoCarryFlagUses(SDValue(N, 0));
+}]>;
 
-multiclass unary_atomic_intrin<SDNode atomic_op> {
-  def _8 : PatFrag<(ops node:$ptr),
-                   (atomic_op  node:$ptr), [{
-    return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
-  }]>;
-  def _16 : PatFrag<(ops node:$ptr),
-                    (atomic_op node:$ptr), [{
-    return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
-  }]>;
-  def _32 : PatFrag<(ops node:$ptr),
-                    (atomic_op node:$ptr), [{
-    return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
-  }]>;
-  def _64 : PatFrag<(ops node:$ptr),
-                    (atomic_op node:$ptr), [{
-    return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
-  }]>;
+def X86lock_sub_nocf : PatFrag<(ops node:$lhs, node:$rhs),
+                               (X86lock_sub node:$lhs, node:$rhs), [{
+  return hasNoCarryFlagUses(SDValue(N, 0));
+}]>;
+
+let Predicates = [UseIncDec] in {
+  let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
+      SchedRW = [WriteALURMW]  in {
+    def LOCK_INC8m  : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
+                        "inc{b}\t$dst",
+                        [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i8 1)))]>,
+                        LOCK;
+    def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
+                        "inc{w}\t$dst",
+                        [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i16 1)))]>,
+                        OpSize16, LOCK;
+    def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
+                        "inc{l}\t$dst",
+                        [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i32 1)))]>,
+                        OpSize32, LOCK;
+    def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
+                         "inc{q}\t$dst",
+                         [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i64 1)))]>,
+                         LOCK;
+
+    def LOCK_DEC8m  : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
+                        "dec{b}\t$dst",
+                        [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i8 1)))]>,
+                        LOCK;
+    def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
+                        "dec{w}\t$dst",
+                        [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i16 1)))]>,
+                        OpSize16, LOCK;
+    def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
+                        "dec{l}\t$dst",
+                        [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i32 1)))]>,
+                        OpSize32, LOCK;
+    def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
+                         "dec{q}\t$dst",
+                         [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i64 1)))]>,
+                         LOCK;
+  }
+
+  // Additional patterns for -1 constant.
+  def : Pat<(X86lock_add addr:$dst, (i8  -1)), (LOCK_DEC8m  addr:$dst)>;
+  def : Pat<(X86lock_add addr:$dst, (i16 -1)), (LOCK_DEC16m addr:$dst)>;
+  def : Pat<(X86lock_add addr:$dst, (i32 -1)), (LOCK_DEC32m addr:$dst)>;
+  def : Pat<(X86lock_add addr:$dst, (i64 -1)), (LOCK_DEC64m addr:$dst)>;
+  def : Pat<(X86lock_sub addr:$dst, (i8  -1)), (LOCK_INC8m  addr:$dst)>;
+  def : Pat<(X86lock_sub addr:$dst, (i16 -1)), (LOCK_INC16m addr:$dst)>;
+  def : Pat<(X86lock_sub addr:$dst, (i32 -1)), (LOCK_INC32m addr:$dst)>;
+  def : Pat<(X86lock_sub addr:$dst, (i64 -1)), (LOCK_INC64m addr:$dst)>;
 }
 
-defm X86lock_inc : unary_atomic_intrin<X86lock_inc>;
-defm X86lock_dec : unary_atomic_intrin<X86lock_dec>;
-
-defm LOCK_INC    : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "X86lock_inc", "inc">;
-defm LOCK_DEC    : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "X86lock_dec", "dec">;
-
 // Atomic compare and swap.
 multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,
                          SDPatternOperator frag, X86MemOperand x86memop> {
@@ -1266,7 +1276,8 @@
 // i1 stored in one byte in zero-extended form.
 // Upper bits cleanup should be executed before Store.
 def : Pat<(zextloadi8i1  addr:$src), (MOV8rm addr:$src)>;
-def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
+def : Pat<(zextloadi16i1 addr:$src),
+          (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
 def : Pat<(zextloadi64i1 addr:$src),
           (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
@@ -1277,9 +1288,11 @@
 // defined, avoiding partial-register updates.
 
 def : Pat<(extloadi8i1 addr:$src),   (MOV8rm      addr:$src)>;
-def : Pat<(extloadi16i1 addr:$src),  (MOVZX16rm8  addr:$src)>;
+def : Pat<(extloadi16i1 addr:$src),
+          (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
 def : Pat<(extloadi32i1 addr:$src),  (MOVZX32rm8  addr:$src)>;
-def : Pat<(extloadi16i8 addr:$src),  (MOVZX16rm8  addr:$src)>;
+def : Pat<(extloadi16i8 addr:$src),
+          (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
 def : Pat<(extloadi32i8 addr:$src),  (MOVZX32rm8  addr:$src)>;
 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
 
@@ -1354,10 +1367,8 @@
   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
     return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
 
-  KnownBits Known0;
-  CurDAG->computeKnownBits(N->getOperand(0), Known0, 0);
-  KnownBits Known1;
-  CurDAG->computeKnownBits(N->getOperand(1), Known1, 0);
+  KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0);
+  KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0);
   return (~Known0.Zero & ~Known1.Zero) == 0;
 }]>;
 
@@ -1424,8 +1435,7 @@
 
 def sub_is_xor : PatFrag<(ops node:$lhs, node:$rhs), (sub node:$lhs, node:$rhs),[{
   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
-    KnownBits Known;
-    CurDAG->computeKnownBits(N->getOperand(1), Known);
+    KnownBits Known = CurDAG->computeKnownBits(N->getOperand(1));
 
     // If all possible ones in the RHS are set in the LHS then there can't be
     // a borrow and we can use xor.
@@ -2022,6 +2032,15 @@
   def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>;
   def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>;
   def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
+
+  def : Pat<(X86add_flag_nocf GR8:$src, -1),  (DEC8r GR8:$src)>;
+  def : Pat<(X86add_flag_nocf GR16:$src, -1), (DEC16r GR16:$src)>;
+  def : Pat<(X86add_flag_nocf GR32:$src, -1), (DEC32r GR32:$src)>;
+  def : Pat<(X86add_flag_nocf GR64:$src, -1), (DEC64r GR64:$src)>;
+  def : Pat<(X86sub_flag_nocf GR8:$src, -1),  (INC8r GR8:$src)>;
+  def : Pat<(X86sub_flag_nocf GR16:$src, -1), (INC16r GR16:$src)>;
+  def : Pat<(X86sub_flag_nocf GR32:$src, -1), (INC32r GR32:$src)>;
+  def : Pat<(X86sub_flag_nocf GR64:$src, -1), (INC64r GR64:$src)>;
 }
 
 // or reg/reg.
diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td
index 9ee5575..11a27ba 100644
--- a/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -75,7 +75,7 @@
                   SDTypeProfile<1, 3, [SDTCVecEltisVT<0, i16>,
                                        SDTCVecEltisVT<1, i8>,
                                        SDTCisSameSizeAs<0,1>,
-                                       SDTCisSameAs<1,2>, SDTCisInt<3>]>>;
+                                       SDTCisSameAs<1,2>, SDTCisVT<3, i8>]>>;
 def X86andnp   : SDNode<"X86ISD::ANDNP",
                  SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
                                       SDTCisSameAs<0,2>]>>;
@@ -104,12 +104,21 @@
                         [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
 
 def SDTVtrunc    : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
-                                       SDTCisInt<0>, SDTCisInt<1>,
-                                       SDTCisOpSmallerThanOp<0, 1>]>;
+                                        SDTCisInt<0>, SDTCisInt<1>,
+                                        SDTCisOpSmallerThanOp<0, 1>]>;
+def SDTVmtrunc   : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisVec<1>,
+                                        SDTCisInt<0>, SDTCisInt<1>,
+                                        SDTCisOpSmallerThanOp<0, 1>,
+                                        SDTCisSameAs<0, 2>,
+                                        SDTCVecEltisVT<3, i1>,
+                                        SDTCisSameNumEltsAs<1, 3>]>;
 
 def X86vtrunc    : SDNode<"X86ISD::VTRUNC",   SDTVtrunc>;
 def X86vtruncs   : SDNode<"X86ISD::VTRUNCS",  SDTVtrunc>;
 def X86vtruncus  : SDNode<"X86ISD::VTRUNCUS", SDTVtrunc>;
+def X86vmtrunc   : SDNode<"X86ISD::VMTRUNC",   SDTVmtrunc>;
+def X86vmtruncs  : SDNode<"X86ISD::VMTRUNCS",  SDTVmtrunc>;
+def X86vmtruncus : SDNode<"X86ISD::VMTRUNCUS", SDTVmtrunc>;
 
 def X86vfpext  : SDNode<"X86ISD::VFPEXT",
                         SDTypeProfile<1, 1, [SDTCVecEltisVT<0, f64>,
@@ -134,6 +143,14 @@
                                              SDTCisSameSizeAs<0, 2>,
                                              SDTCisVT<3, i32>]>>;
 
+def X86vmfpround: SDNode<"X86ISD::VMFPROUND",
+                         SDTypeProfile<1, 3, [SDTCVecEltisVT<0, f32>,
+                                              SDTCVecEltisVT<1, f64>,
+                                              SDTCisSameSizeAs<0, 1>,
+                                              SDTCisSameAs<0, 2>,
+                                              SDTCVecEltisVT<3, i1>,
+                                              SDTCisSameNumEltsAs<1, 3>]>>;
+
 def X86vshiftimm : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
                                         SDTCisVT<2, i8>, SDTCisInt<0>]>;
 
@@ -172,7 +189,7 @@
 
 def X86vshiftuniform : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
                                             SDTCisVec<2>, SDTCisInt<0>,
-                                            SDTCisInt<1>]>;
+                                            SDTCisInt<2>]>;
 
 def X86vshl    : SDNode<"X86ISD::VSHL", X86vshiftuniform>;
 def X86vsrl    : SDNode<"X86ISD::VSRL", X86vshiftuniform>;
@@ -227,10 +244,6 @@
                                           SDTCisVec<1>,
                                           SDTCisSameAs<2, 1>]>;
 
-def X86addus   : SDNode<"X86ISD::ADDUS", SDTIntBinOp, [SDNPCommutative]>;
-def X86subus   : SDNode<"X86ISD::SUBUS", SDTIntBinOp>;
-def X86adds    : SDNode<"X86ISD::ADDS", SDTIntBinOp, [SDNPCommutative]>;
-def X86subs    : SDNode<"X86ISD::SUBS", SDTIntBinOp>;
 def X86mulhrs  : SDNode<"X86ISD::MULHRS", SDTIntBinOp, [SDNPCommutative]>;
 def X86avg     : SDNode<"X86ISD::AVG" , SDTIntBinOp, [SDNPCommutative]>;
 def X86ptest   : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
@@ -577,6 +590,19 @@
 def X86cvtp2UInt     : SDNode<"X86ISD::CVTP2UI",  SDTFloatToInt>;
 
 
+def SDTMFloatToInt: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisVec<1>,
+                                         SDTCisInt<0>, SDTCisFP<1>,
+                                         SDTCisSameSizeAs<0, 1>,
+                                         SDTCisSameAs<0, 2>,
+                                         SDTCVecEltisVT<3, i1>,
+                                         SDTCisSameNumEltsAs<1, 3>]>;
+
+def X86mcvtp2Int     : SDNode<"X86ISD::MCVTP2SI",  SDTMFloatToInt>;
+def X86mcvtp2UInt    : SDNode<"X86ISD::MCVTP2UI",  SDTMFloatToInt>;
+def X86mcvttp2si     : SDNode<"X86ISD::MCVTTP2SI", SDTMFloatToInt>;
+def X86mcvttp2ui     : SDNode<"X86ISD::MCVTTP2UI", SDTMFloatToInt>;
+
+
 def X86cvtph2ps     : SDNode<"X86ISD::CVTPH2PS",
                               SDTypeProfile<1, 1, [SDTCVecEltisVT<0, f32>,
                                                    SDTCVecEltisVT<1, i16>]> >;
@@ -590,6 +616,13 @@
                         SDTypeProfile<1, 2, [SDTCVecEltisVT<0, i16>,
                                              SDTCVecEltisVT<1, f32>,
                                              SDTCisVT<2, i32>]> >;
+def X86mcvtps2ph   : SDNode<"X86ISD::MCVTPS2PH",
+                        SDTypeProfile<1, 4, [SDTCVecEltisVT<0, i16>,
+                                             SDTCVecEltisVT<1, f32>,
+                                             SDTCisVT<2, i32>,
+                                             SDTCisSameAs<0, 3>,
+                                             SDTCVecEltisVT<4, i1>,
+                                             SDTCisSameNumEltsAs<1, 4>]> >;
 def X86vfpextRnd  : SDNode<"X86ISD::VFPEXT_RND",
                         SDTypeProfile<1, 2, [SDTCVecEltisVT<0, f64>,
                                              SDTCVecEltisVT<1, f32>,
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index a0cd1a0..ab14ee7 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -718,7 +718,7 @@
 }
 
 /// Check whether the shift count for a machine operand is non-zero.
-inline static unsigned getTruncatedShiftCount(MachineInstr &MI,
+inline static unsigned getTruncatedShiftCount(const MachineInstr &MI,
                                               unsigned ShiftAmtOperandIdx) {
   // The shift count is six bits with the REX.W prefix and five bits without.
   unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
@@ -3421,9 +3421,10 @@
 /// This function can be extended later on.
 /// SrcReg, SrcRegs: register operands for FlagI.
 /// ImmValue: immediate for FlagI if it takes an immediate.
-inline static bool isRedundantFlagInstr(MachineInstr &FlagI, unsigned SrcReg,
-                                        unsigned SrcReg2, int ImmMask,
-                                        int ImmValue, MachineInstr &OI) {
+inline static bool isRedundantFlagInstr(const MachineInstr &FlagI,
+                                        unsigned SrcReg, unsigned SrcReg2,
+                                        int ImmMask, int ImmValue,
+                                        const MachineInstr &OI) {
   if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) ||
        (FlagI.getOpcode() == X86::CMP32rr && OI.getOpcode() == X86::SUB32rr) ||
        (FlagI.getOpcode() == X86::CMP16rr && OI.getOpcode() == X86::SUB16rr) ||
@@ -3454,7 +3455,9 @@
 
 /// Check whether the definition can be converted
 /// to remove a comparison against zero.
-inline static bool isDefConvertible(MachineInstr &MI) {
+inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag) {
+  NoSignFlag = false;
+
   switch (MI.getOpcode()) {
   default: return false;
 
@@ -3519,8 +3522,6 @@
   case X86::SHL8r1:    case X86::SHL16r1:  case X86::SHL32r1:case X86::SHL64r1:
   case X86::ANDN32rr:  case X86::ANDN32rm:
   case X86::ANDN64rr:  case X86::ANDN64rm:
-  case X86::BEXTR32rr: case X86::BEXTR64rr:
-  case X86::BEXTR32rm: case X86::BEXTR64rm:
   case X86::BLSI32rr:  case X86::BLSI32rm:
   case X86::BLSI64rr:  case X86::BLSI64rm:
   case X86::BLSMSK32rr:case X86::BLSMSK32rm:
@@ -3538,8 +3539,6 @@
   case X86::TZCNT16rr: case X86::TZCNT16rm:
   case X86::TZCNT32rr: case X86::TZCNT32rm:
   case X86::TZCNT64rr: case X86::TZCNT64rm:
-  case X86::BEXTRI32ri:  case X86::BEXTRI32mi:
-  case X86::BEXTRI64ri:  case X86::BEXTRI64mi:
   case X86::BLCFILL32rr: case X86::BLCFILL32rm:
   case X86::BLCFILL64rr: case X86::BLCFILL64rm:
   case X86::BLCI32rr:    case X86::BLCI32rm:
@@ -3554,12 +3553,23 @@
   case X86::BLSFILL64rr: case X86::BLSFILL64rm:
   case X86::BLSIC32rr:   case X86::BLSIC32rm:
   case X86::BLSIC64rr:   case X86::BLSIC64rm:
+  case X86::T1MSKC32rr:  case X86::T1MSKC32rm:
+  case X86::T1MSKC64rr:  case X86::T1MSKC64rm:
+  case X86::TZMSK32rr:   case X86::TZMSK32rm:
+  case X86::TZMSK64rr:   case X86::TZMSK64rm:
+    return true;
+  case X86::BEXTR32rr:   case X86::BEXTR64rr:
+  case X86::BEXTR32rm:   case X86::BEXTR64rm:
+  case X86::BEXTRI32ri:  case X86::BEXTRI32mi:
+  case X86::BEXTRI64ri:  case X86::BEXTRI64mi:
+    // BEXTR doesn't update the sign flag so we can't use it.
+    NoSignFlag = true;
     return true;
   }
 }
 
 /// Check whether the use can be converted to remove a comparison against zero.
-static X86::CondCode isUseDefConvertible(MachineInstr &MI) {
+static X86::CondCode isUseDefConvertible(const MachineInstr &MI) {
   switch (MI.getOpcode()) {
   default: return X86::COND_INVALID;
   case X86::LZCNT16rr: case X86::LZCNT16rm:
@@ -3574,12 +3584,12 @@
   case X86::TZCNT32rr: case X86::TZCNT32rm:
   case X86::TZCNT64rr: case X86::TZCNT64rm:
     return X86::COND_B;
-  case X86::BSF16rr:
-  case X86::BSF16rm:
-  case X86::BSF32rr:
-  case X86::BSF32rm:
-  case X86::BSF64rr:
-  case X86::BSF64rm:
+  case X86::BSF16rr: case X86::BSF16rm:
+  case X86::BSF32rr: case X86::BSF32rm:
+  case X86::BSF64rr: case X86::BSF64rm:
+  case X86::BSR16rr: case X86::BSR16rm:
+  case X86::BSR32rr: case X86::BSR32rm:
+  case X86::BSR64rr: case X86::BSR64rm:
     return X86::COND_E;
   }
 }
@@ -3657,8 +3667,9 @@
   // instruction we can eliminate the compare iff the use sets EFLAGS in the
   // right way.
   bool ShouldUpdateCC = false;
+  bool NoSignFlag = false;
   X86::CondCode NewCC = X86::COND_INVALID;
-  if (IsCmpZero && !isDefConvertible(*MI)) {
+  if (IsCmpZero && !isDefConvertible(*MI, NoSignFlag)) {
     // Scan forward from the use until we hit the use we're looking for or the
     // compare instruction.
     for (MachineBasicBlock::iterator J = MI;; ++J) {
@@ -3777,6 +3788,12 @@
       case X86::COND_O: case X86::COND_NO:
         // CF and OF are used, we can't perform this optimization.
         return false;
+      case X86::COND_S: case X86::COND_NS:
+        // If SF is used, but the instruction doesn't update the SF, then we
+        // can't do the optimization.
+        if (NoSignFlag)
+          return false;
+        break;
       }
 
       // If we're updating the condition code check if we have to reverse the
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index fe9b530..e53f83b 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -253,8 +253,6 @@
 def X86adc_flag  : SDNode<"X86ISD::ADC",  SDTBinaryArithWithFlagsInOut>;
 def X86sbb_flag  : SDNode<"X86ISD::SBB",  SDTBinaryArithWithFlagsInOut>;
 
-def X86inc_flag  : SDNode<"X86ISD::INC",  SDTUnaryArithWithFlags>;
-def X86dec_flag  : SDNode<"X86ISD::DEC",  SDTUnaryArithWithFlags>;
 def X86or_flag   : SDNode<"X86ISD::OR",   SDTBinaryArithWithFlags,
                           [SDNPCommutative]>;
 def X86xor_flag  : SDNode<"X86ISD::XOR",  SDTBinaryArithWithFlags,
@@ -278,13 +276,6 @@
                           [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
                            SDNPMemOperand]>;
 
-def X86lock_inc  : SDNode<"X86ISD::LINC",  SDTLockUnaryArithWithFlags,
-                          [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
-                           SDNPMemOperand]>;
-def X86lock_dec  : SDNode<"X86ISD::LDEC",  SDTLockUnaryArithWithFlags,
-                          [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
-                           SDNPMemOperand]>;
-
 def X86bextr  : SDNode<"X86ISD::BEXTR",  SDTIntBinOp>;
 
 def X86bzhi   : SDNode<"X86ISD::BZHI",   SDTIntBinOp>;
@@ -2387,6 +2378,16 @@
 // Pattern fragments to auto generate BMI instructions.
 //===----------------------------------------------------------------------===//
 
+def or_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
+                           (X86or_flag node:$lhs, node:$rhs), [{
+  return hasNoCarryFlagUses(SDValue(N, 1));
+}]>;
+
+def xor_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
+                            (X86xor_flag node:$lhs, node:$rhs), [{
+  return hasNoCarryFlagUses(SDValue(N, 1));
+}]>;
+
 let Predicates = [HasBMI] in {
   // FIXME: patterns for the load versions are not implemented
   def : Pat<(and GR32:$src, (add GR32:$src, -1)),
@@ -2403,6 +2404,14 @@
             (BLSI32rr GR32:$src)>;
   def : Pat<(and GR64:$src, (ineg GR64:$src)),
             (BLSI64rr GR64:$src)>;
+
+  // Versions to match flag producing ops.
+  // X86and_flag nodes are rarely created. Those should use CMP+AND. We do
+  // TESTrr matching in PostProcessISelDAG to allow BLSR/BLSI to be formed.
+  def : Pat<(xor_flag_nocf GR32:$src, (add GR32:$src, -1)),
+            (BLSMSK32rr GR32:$src)>;
+  def : Pat<(xor_flag_nocf GR64:$src, (add GR64:$src, -1)),
+            (BLSMSK64rr GR64:$src)>;
 }
 
 multiclass bmi_bextr<bits<8> opc, string mnemonic, RegisterClass RC,
@@ -2801,6 +2810,45 @@
             (TZMSK32rr GR32:$src)>;
   def : Pat<(and (not GR64:$src), (add GR64:$src, -1)),
             (TZMSK64rr GR64:$src)>;
+
+  // Patterns to match flag producing ops.
+  // X86and_flag nodes are rarely created. Those should use CMP+AND. We do
+  // TESTrr matching in PostProcessISelDAG to allow BLSR/BLSI to be formed.
+  def : Pat<(or_flag_nocf GR32:$src, (not (add GR32:$src, 1))),
+            (BLCI32rr GR32:$src)>;
+  def : Pat<(or_flag_nocf GR64:$src, (not (add GR64:$src, 1))),
+            (BLCI64rr GR64:$src)>;
+
+  // Extra patterns because opt can optimize the above patterns to this.
+  def : Pat<(or_flag_nocf GR32:$src, (sub -2, GR32:$src)),
+            (BLCI32rr GR32:$src)>;
+  def : Pat<(or_flag_nocf GR64:$src, (sub -2, GR64:$src)),
+            (BLCI64rr GR64:$src)>;
+
+  def : Pat<(xor_flag_nocf GR32:$src, (add GR32:$src, 1)),
+            (BLCMSK32rr GR32:$src)>;
+  def : Pat<(xor_flag_nocf GR64:$src, (add GR64:$src, 1)),
+            (BLCMSK64rr GR64:$src)>;
+
+  def : Pat<(or_flag_nocf GR32:$src, (add GR32:$src, 1)),
+            (BLCS32rr GR32:$src)>;
+  def : Pat<(or_flag_nocf GR64:$src, (add GR64:$src, 1)),
+            (BLCS64rr GR64:$src)>;
+
+  def : Pat<(or_flag_nocf GR32:$src, (add GR32:$src, -1)),
+            (BLSFILL32rr GR32:$src)>;
+  def : Pat<(or_flag_nocf GR64:$src, (add GR64:$src, -1)),
+            (BLSFILL64rr GR64:$src)>;
+
+  def : Pat<(or_flag_nocf (not GR32:$src), (add GR32:$src, -1)),
+            (BLSIC32rr GR32:$src)>;
+  def : Pat<(or_flag_nocf (not GR64:$src), (add GR64:$src, -1)),
+            (BLSIC64rr GR64:$src)>;
+
+  def : Pat<(or_flag_nocf (not GR32:$src), (add GR32:$src, 1)),
+            (T1MSKC32rr GR32:$src)>;
+  def : Pat<(or_flag_nocf (not GR64:$src), (add GR64:$src, 1)),
+            (T1MSKC64rr GR64:$src)>;
 } // HasTBM
 
 //===----------------------------------------------------------------------===//
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td
index 472ee71..e2bcd18 100644
--- a/lib/Target/X86/X86InstrSSE.td
+++ b/lib/Target/X86/X86InstrSSE.td
@@ -3623,13 +3623,13 @@
                              SchedWriteVecALU, 1, NoVLX>;
 defm PADDQ   : PDI_binop_all<0xD4, "paddq", add, v2i64, v4i64,
                              SchedWriteVecALU, 1, NoVLX>;
-defm PADDSB  : PDI_binop_all<0xEC, "paddsb", X86adds, v16i8, v32i8,
+defm PADDSB  : PDI_binop_all<0xEC, "paddsb", saddsat, v16i8, v32i8,
                              SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
-defm PADDSW  : PDI_binop_all<0xED, "paddsw", X86adds, v8i16, v16i16,
+defm PADDSW  : PDI_binop_all<0xED, "paddsw", saddsat, v8i16, v16i16,
                              SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
-defm PADDUSB : PDI_binop_all<0xDC, "paddusb", X86addus, v16i8, v32i8,
+defm PADDUSB : PDI_binop_all<0xDC, "paddusb", uaddsat, v16i8, v32i8,
                              SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
-defm PADDUSW : PDI_binop_all<0xDD, "paddusw", X86addus, v8i16, v16i16,
+defm PADDUSW : PDI_binop_all<0xDD, "paddusw", uaddsat, v8i16, v16i16,
                              SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
 defm PMULLW  : PDI_binop_all<0xD5, "pmullw", mul, v8i16, v16i16,
                              SchedWriteVecIMul, 1, NoVLX_Or_NoBWI>;
@@ -3645,13 +3645,13 @@
                              SchedWriteVecALU, 0, NoVLX>;
 defm PSUBQ   : PDI_binop_all<0xFB, "psubq", sub, v2i64, v4i64,
                              SchedWriteVecALU, 0, NoVLX>;
-defm PSUBSB  : PDI_binop_all<0xE8, "psubsb", X86subs, v16i8, v32i8,
+defm PSUBSB  : PDI_binop_all<0xE8, "psubsb", ssubsat, v16i8, v32i8,
                              SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
-defm PSUBSW  : PDI_binop_all<0xE9, "psubsw", X86subs, v8i16, v16i16,
+defm PSUBSW  : PDI_binop_all<0xE9, "psubsw", ssubsat, v8i16, v16i16,
                              SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
-defm PSUBUSB : PDI_binop_all<0xD8, "psubusb", X86subus, v16i8, v32i8,
+defm PSUBUSB : PDI_binop_all<0xD8, "psubusb", usubsat, v16i8, v32i8,
                              SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
-defm PSUBUSW : PDI_binop_all<0xD9, "psubusw", X86subus, v8i16, v16i16,
+defm PSUBUSW : PDI_binop_all<0xD9, "psubusw", usubsat, v8i16, v16i16,
                              SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
 defm PMINUB  : PDI_binop_all<0xDA, "pminub", umin, v16i8, v32i8,
                              SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
@@ -4669,12 +4669,16 @@
 let Predicates = [HasAVX, NoVLX] in {
   def : Pat<(X86Movddup (loadv2f64 addr:$src)),
             (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
+  def : Pat<(X86Movddup (v2f64 (X86vzload addr:$src))),
+            (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
 }
 
 let Predicates = [UseSSE3] in {
   // No need for aligned memory as this only loads 64-bits.
   def : Pat<(X86Movddup (loadv2f64 addr:$src)),
             (MOVDDUPrm addr:$src)>;
+  def : Pat<(X86Movddup (v2f64 (X86vzload addr:$src))),
+            (MOVDDUPrm addr:$src)>;
 }
 
 //===---------------------------------------------------------------------===//
@@ -8034,6 +8038,8 @@
             (VMOVDDUPrr VR128:$src)>;
   def : Pat<(v2f64 (X86VBroadcast (loadv2f64 addr:$src))),
             (VMOVDDUPrm addr:$src)>;
+  def : Pat<(v2f64 (X86VBroadcast (v2f64 (X86vzload addr:$src)))),
+            (VMOVDDUPrm addr:$src)>;
 }
 
 let Predicates = [HasAVX1Only] in {
diff --git a/lib/Target/X86/X86IntrinsicsInfo.h b/lib/Target/X86/X86IntrinsicsInfo.h
index f1ed8ca..151e1b9 100644
--- a/lib/Target/X86/X86IntrinsicsInfo.h
+++ b/lib/Target/X86/X86IntrinsicsInfo.h
@@ -20,18 +20,18 @@
 namespace llvm {
 
 enum IntrinsicType : uint16_t {
-  GATHER, SCATTER, PREFETCH, RDSEED, RDRAND, RDPMC, RDTSC, XTEST, XGETBV, ADX, FPCLASS, FPCLASSS,
+  GATHER, SCATTER, PREFETCH, RDSEED, RDRAND, RDPMC, RDTSC, XTEST, XGETBV, ADX, FPCLASSS,
   INTR_TYPE_1OP, INTR_TYPE_2OP, INTR_TYPE_3OP, INTR_TYPE_4OP,
-  INTR_TYPE_2OP_IMM8, INTR_TYPE_3OP_IMM8,
-  CMP_MASK, CMP_MASK_CC,CMP_MASK_SCALAR_CC, VSHIFT, COMI, COMI_RM,
-  CVTPD2PS, CVTPD2PS_MASK,
+  INTR_TYPE_3OP_IMM8,
+  CMP_MASK_CC,CMP_MASK_SCALAR_CC, VSHIFT, COMI, COMI_RM,
+  CVTPD2PS, CVTPD2PS_MASK, CVTPD2PS_RND_MASK,
   INTR_TYPE_1OP_MASK, INTR_TYPE_1OP_MASK_RM,
   INTR_TYPE_2OP_MASK, INTR_TYPE_2OP_MASK_RM,
   INTR_TYPE_3OP_MASK,
-  FMA_OP_MASK, FMA_OP_MASKZ, FMA_OP_SCALAR,
   IFMA_OP, VPERM_2OP, INTR_TYPE_SCALAR_MASK,
   INTR_TYPE_SCALAR_MASK_RM, INTR_TYPE_3OP_SCALAR_MASK,
   COMPRESS_EXPAND_IN_REG,
+  TRUNCATE_TO_REG, CVTPS2PH_MASK, CVTPD2I_MASK,
   TRUNCATE_TO_MEM_VI8, TRUNCATE_TO_MEM_VI16, TRUNCATE_TO_MEM_VI32,
   FIXUPIMM, FIXUPIMM_MASKZ, FIXUPIMMS,
   FIXUPIMMS_MASKZ, GATHER_AVX2,
@@ -115,6 +115,31 @@
   X86_INTRINSIC_DATA(avx512_gatherpf_qps_512, PREFETCH,
                      X86::VGATHERPF0QPSm, X86::VGATHERPF1QPSm),
 
+  X86_INTRINSIC_DATA(avx512_mask_gather_dpd_512, GATHER, X86::VGATHERDPDZrm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather_dpi_512, GATHER, X86::VPGATHERDDZrm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather_dpq_512, GATHER, X86::VPGATHERDQZrm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather_dps_512, GATHER, X86::VGATHERDPSZrm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather_qpd_512, GATHER, X86::VGATHERQPDZrm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather_qpi_512, GATHER, X86::VPGATHERQDZrm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather_qpq_512, GATHER, X86::VPGATHERQQZrm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather_qps_512, GATHER, X86::VGATHERQPSZrm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3div2_df, GATHER, X86::VGATHERQPDZ128rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3div2_di, GATHER, X86::VPGATHERQQZ128rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3div4_df, GATHER, X86::VGATHERQPDZ256rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3div4_di, GATHER, X86::VPGATHERQQZ256rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3div4_sf, GATHER, X86::VGATHERQPSZ128rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3div4_si, GATHER, X86::VPGATHERQDZ128rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3div8_sf, GATHER, X86::VGATHERQPSZ256rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3div8_si, GATHER, X86::VPGATHERQDZ256rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3siv2_df, GATHER, X86::VGATHERDPDZ128rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3siv2_di, GATHER, X86::VPGATHERDQZ128rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3siv4_df, GATHER, X86::VGATHERDPDZ256rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3siv4_di, GATHER, X86::VPGATHERDQZ256rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3siv4_sf, GATHER, X86::VGATHERDPSZ128rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3siv4_si, GATHER, X86::VPGATHERDDZ128rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3siv8_sf, GATHER, X86::VGATHERDPSZ256rm, 0),
+  X86_INTRINSIC_DATA(avx512_mask_gather3siv8_si, GATHER, X86::VPGATHERDDZ256rm, 0),
+
   X86_INTRINSIC_DATA(avx512_mask_pmov_db_mem_128, TRUNCATE_TO_MEM_VI8,
                      X86ISD::VTRUNC, 0),
   X86_INTRINSIC_DATA(avx512_mask_pmov_db_mem_256, TRUNCATE_TO_MEM_VI8,
@@ -224,6 +249,31 @@
   X86_INTRINSIC_DATA(avx512_mask_pmovus_wb_mem_512, TRUNCATE_TO_MEM_VI8,
                      X86ISD::VTRUNCUS, 0),
 
+  X86_INTRINSIC_DATA(avx512_mask_scatter_dpd_512, SCATTER, X86::VSCATTERDPDZmr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatter_dpi_512, SCATTER, X86::VPSCATTERDDZmr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatter_dpq_512, SCATTER, X86::VPSCATTERDQZmr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatter_dps_512, SCATTER, X86::VSCATTERDPSZmr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatter_qpd_512, SCATTER, X86::VSCATTERQPDZmr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatter_qpi_512, SCATTER, X86::VPSCATTERQDZmr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatter_qpq_512, SCATTER, X86::VPSCATTERQQZmr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatter_qps_512, SCATTER, X86::VSCATTERQPSZmr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatterdiv2_df, SCATTER, X86::VSCATTERQPDZ128mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatterdiv2_di, SCATTER, X86::VPSCATTERQQZ128mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatterdiv4_df, SCATTER, X86::VSCATTERQPDZ256mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatterdiv4_di, SCATTER, X86::VPSCATTERQQZ256mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatterdiv4_sf, SCATTER, X86::VSCATTERQPSZ128mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatterdiv4_si, SCATTER, X86::VPSCATTERQDZ128mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatterdiv8_sf, SCATTER, X86::VSCATTERQPSZ256mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scatterdiv8_si, SCATTER, X86::VPSCATTERQDZ256mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scattersiv2_df, SCATTER, X86::VSCATTERDPDZ128mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scattersiv2_di, SCATTER, X86::VPSCATTERDQZ128mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scattersiv4_df, SCATTER, X86::VSCATTERDPDZ256mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scattersiv4_di, SCATTER, X86::VPSCATTERDQZ256mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scattersiv4_sf, SCATTER, X86::VSCATTERDPSZ128mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scattersiv4_si, SCATTER, X86::VPSCATTERDDZ128mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scattersiv8_sf, SCATTER, X86::VSCATTERDPSZ256mr, 0),
+  X86_INTRINSIC_DATA(avx512_mask_scattersiv8_si, SCATTER, X86::VPSCATTERDDZ256mr, 0),
+
   X86_INTRINSIC_DATA(avx512_scatter_dpd_512, SCATTER, X86::VSCATTERDPDZmr, 0),
   X86_INTRINSIC_DATA(avx512_scatter_dpi_512, SCATTER, X86::VPSCATTERDDZmr, 0),
   X86_INTRINSIC_DATA(avx512_scatter_dpq_512, SCATTER, X86::VPSCATTERDQZmr, 0),
@@ -319,8 +369,6 @@
   X86_INTRINSIC_DATA(avx2_packsswb, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
   X86_INTRINSIC_DATA(avx2_packusdw, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
   X86_INTRINSIC_DATA(avx2_packuswb, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
-  X86_INTRINSIC_DATA(avx2_padds_b, INTR_TYPE_2OP, X86ISD::ADDS, 0),
-  X86_INTRINSIC_DATA(avx2_padds_w, INTR_TYPE_2OP, X86ISD::ADDS, 0),
   X86_INTRINSIC_DATA(avx2_permd, VPERM_2OP, X86ISD::VPERMV, 0),
   X86_INTRINSIC_DATA(avx2_permps, VPERM_2OP, X86ISD::VPERMV, 0),
   X86_INTRINSIC_DATA(avx2_phadd_d, INTR_TYPE_2OP, X86ISD::HADD, 0),
@@ -361,8 +409,6 @@
   X86_INTRINSIC_DATA(avx2_psrlv_d_256, INTR_TYPE_2OP, ISD::SRL, 0),
   X86_INTRINSIC_DATA(avx2_psrlv_q, INTR_TYPE_2OP, ISD::SRL, 0),
   X86_INTRINSIC_DATA(avx2_psrlv_q_256, INTR_TYPE_2OP, ISD::SRL, 0),
-  X86_INTRINSIC_DATA(avx2_psubs_b, INTR_TYPE_2OP, X86ISD::SUBS, 0),
-  X86_INTRINSIC_DATA(avx2_psubs_w, INTR_TYPE_2OP, X86ISD::SUBS, 0),
   X86_INTRINSIC_DATA(avx512_add_pd_512, INTR_TYPE_2OP, ISD::FADD, X86ISD::FADD_RND),
   X86_INTRINSIC_DATA(avx512_add_ps_512, INTR_TYPE_2OP, ISD::FADD, X86ISD::FADD_RND),
   X86_INTRINSIC_DATA(avx512_cmp_pd_128, CMP_MASK_CC, X86ISD::CMPM, 0),
@@ -392,12 +438,12 @@
   X86_INTRINSIC_DATA(avx512_div_ps_512, INTR_TYPE_2OP, ISD::FDIV, X86ISD::FDIV_RND),
   X86_INTRINSIC_DATA(avx512_exp2_pd, INTR_TYPE_1OP_MASK_RM, X86ISD::EXP2, 0),
   X86_INTRINSIC_DATA(avx512_exp2_ps, INTR_TYPE_1OP_MASK_RM, X86ISD::EXP2, 0),
-  X86_INTRINSIC_DATA(avx512_fpclass_pd_128, FPCLASS, X86ISD::VFPCLASS, 0),
-  X86_INTRINSIC_DATA(avx512_fpclass_pd_256, FPCLASS, X86ISD::VFPCLASS, 0),
-  X86_INTRINSIC_DATA(avx512_fpclass_pd_512, FPCLASS, X86ISD::VFPCLASS, 0),
-  X86_INTRINSIC_DATA(avx512_fpclass_ps_128, FPCLASS, X86ISD::VFPCLASS, 0),
-  X86_INTRINSIC_DATA(avx512_fpclass_ps_256, FPCLASS, X86ISD::VFPCLASS, 0),
-  X86_INTRINSIC_DATA(avx512_fpclass_ps_512, FPCLASS, X86ISD::VFPCLASS, 0),
+  X86_INTRINSIC_DATA(avx512_fpclass_pd_128, INTR_TYPE_2OP, X86ISD::VFPCLASS, 0),
+  X86_INTRINSIC_DATA(avx512_fpclass_pd_256, INTR_TYPE_2OP, X86ISD::VFPCLASS, 0),
+  X86_INTRINSIC_DATA(avx512_fpclass_pd_512, INTR_TYPE_2OP, X86ISD::VFPCLASS, 0),
+  X86_INTRINSIC_DATA(avx512_fpclass_ps_128, INTR_TYPE_2OP, X86ISD::VFPCLASS, 0),
+  X86_INTRINSIC_DATA(avx512_fpclass_ps_256, INTR_TYPE_2OP, X86ISD::VFPCLASS, 0),
+  X86_INTRINSIC_DATA(avx512_fpclass_ps_512, INTR_TYPE_2OP, X86ISD::VFPCLASS, 0),
   X86_INTRINSIC_DATA(avx512_kadd_b, INTR_TYPE_2OP, X86ISD::KADD, 0),
   X86_INTRINSIC_DATA(avx512_kadd_d, INTR_TYPE_2OP, X86ISD::KADD, 0),
   X86_INTRINSIC_DATA(avx512_kadd_q, INTR_TYPE_2OP, X86ISD::KADD, 0),
@@ -461,13 +507,13 @@
                      X86ISD::CONFLICT, 0),
   X86_INTRINSIC_DATA(avx512_mask_cvtdq2ps_512, INTR_TYPE_1OP_MASK,
                      ISD::SINT_TO_FP, X86ISD::SINT_TO_FP_RND), //er
-  X86_INTRINSIC_DATA(avx512_mask_cvtpd2dq_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::CVTP2SI, 0),
+  X86_INTRINSIC_DATA(avx512_mask_cvtpd2dq_128, CVTPD2I_MASK,
+                     X86ISD::CVTP2SI, X86ISD::MCVTP2SI),
   X86_INTRINSIC_DATA(avx512_mask_cvtpd2dq_512, INTR_TYPE_1OP_MASK,
-                    X86ISD::CVTP2SI, X86ISD::CVTP2SI_RND),
-  X86_INTRINSIC_DATA(avx512_mask_cvtpd2ps,     INTR_TYPE_1OP_MASK,
-                    X86ISD::VFPROUND, 0),
-  X86_INTRINSIC_DATA(avx512_mask_cvtpd2ps_512, CVTPD2PS_MASK,
+                     X86ISD::CVTP2SI, X86ISD::CVTP2SI_RND),
+  X86_INTRINSIC_DATA(avx512_mask_cvtpd2ps,     CVTPD2PS_MASK,
+                     X86ISD::VFPROUND, X86ISD::VMFPROUND),
+  X86_INTRINSIC_DATA(avx512_mask_cvtpd2ps_512, CVTPD2PS_RND_MASK,
                      ISD::FP_ROUND, X86ISD::VFPROUND_RND),
   X86_INTRINSIC_DATA(avx512_mask_cvtpd2qq_128, INTR_TYPE_1OP_MASK,
                      X86ISD::CVTP2SI, 0),
@@ -475,8 +521,8 @@
                      X86ISD::CVTP2SI, 0),
   X86_INTRINSIC_DATA(avx512_mask_cvtpd2qq_512, INTR_TYPE_1OP_MASK,
                      X86ISD::CVTP2SI, X86ISD::CVTP2SI_RND),
-  X86_INTRINSIC_DATA(avx512_mask_cvtpd2udq_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::CVTP2UI, 0),
+  X86_INTRINSIC_DATA(avx512_mask_cvtpd2udq_128, CVTPD2I_MASK,
+                     X86ISD::CVTP2UI, X86ISD::MCVTP2UI),
   X86_INTRINSIC_DATA(avx512_mask_cvtpd2udq_256, INTR_TYPE_1OP_MASK,
                      X86ISD::CVTP2UI, 0),
   X86_INTRINSIC_DATA(avx512_mask_cvtpd2udq_512, INTR_TYPE_1OP_MASK,
@@ -525,8 +571,8 @@
                      X86ISD::VFPROUNDS_RND, 0),
   X86_INTRINSIC_DATA(avx512_mask_cvtss2sd_round, INTR_TYPE_SCALAR_MASK_RM,
                      X86ISD::VFPEXTS_RND, 0),
-  X86_INTRINSIC_DATA(avx512_mask_cvttpd2dq_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::CVTTP2SI, 0),
+  X86_INTRINSIC_DATA(avx512_mask_cvttpd2dq_128, CVTPD2I_MASK,
+                     X86ISD::CVTTP2SI, X86ISD::MCVTTP2SI),
   X86_INTRINSIC_DATA(avx512_mask_cvttpd2dq_512, INTR_TYPE_1OP_MASK,
                      X86ISD::CVTTP2SI, X86ISD::CVTTP2SI_RND),
   X86_INTRINSIC_DATA(avx512_mask_cvttpd2qq_128, INTR_TYPE_1OP_MASK,
@@ -535,8 +581,8 @@
                      X86ISD::CVTTP2SI, 0),
   X86_INTRINSIC_DATA(avx512_mask_cvttpd2qq_512, INTR_TYPE_1OP_MASK,
                      X86ISD::CVTTP2SI, X86ISD::CVTTP2SI_RND),
-  X86_INTRINSIC_DATA(avx512_mask_cvttpd2udq_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::CVTTP2UI, 0),
+  X86_INTRINSIC_DATA(avx512_mask_cvttpd2udq_128, CVTPD2I_MASK,
+                     X86ISD::CVTTP2UI, X86ISD::MCVTTP2UI),
   X86_INTRINSIC_DATA(avx512_mask_cvttpd2udq_256, INTR_TYPE_1OP_MASK,
                      X86ISD::CVTTP2UI, 0),
   X86_INTRINSIC_DATA(avx512_mask_cvttpd2udq_512, INTR_TYPE_1OP_MASK,
@@ -671,120 +717,114 @@
                      X86ISD::FMULS_RND, 0),
   X86_INTRINSIC_DATA(avx512_mask_mul_ss_round, INTR_TYPE_SCALAR_MASK_RM,
                      X86ISD::FMULS_RND, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmov_db_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNC, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmov_db_256, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNC, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmov_db_512, INTR_TYPE_1OP_MASK,
-                     ISD::TRUNCATE, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmov_dw_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNC, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmov_dw_256, INTR_TYPE_1OP_MASK,
-                     ISD::TRUNCATE, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmov_dw_512, INTR_TYPE_1OP_MASK,
-                     ISD::TRUNCATE, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmov_qb_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNC, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmov_qb_256, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNC, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmov_qb_512, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNC, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmov_qd_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNC, 0),
+  X86_INTRINSIC_DATA(avx512_mask_pmov_db_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNC, X86ISD::VMTRUNC),
+  X86_INTRINSIC_DATA(avx512_mask_pmov_db_256, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNC, X86ISD::VMTRUNC),
+  X86_INTRINSIC_DATA(avx512_mask_pmov_db_512, TRUNCATE_TO_REG,
+                     ISD::TRUNCATE, X86ISD::VMTRUNC),
+  X86_INTRINSIC_DATA(avx512_mask_pmov_dw_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNC, X86ISD::VMTRUNC),
+  X86_INTRINSIC_DATA(avx512_mask_pmov_dw_256, TRUNCATE_TO_REG,
+                     ISD::TRUNCATE, X86ISD::VMTRUNC),
+  X86_INTRINSIC_DATA(avx512_mask_pmov_dw_512, TRUNCATE_TO_REG,
+                     ISD::TRUNCATE, X86ISD::VMTRUNC),
+  X86_INTRINSIC_DATA(avx512_mask_pmov_qb_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNC, X86ISD::VMTRUNC),
+  X86_INTRINSIC_DATA(avx512_mask_pmov_qb_256, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNC, X86ISD::VMTRUNC),
+  X86_INTRINSIC_DATA(avx512_mask_pmov_qb_512, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNC, X86ISD::VMTRUNC),
+  X86_INTRINSIC_DATA(avx512_mask_pmov_qd_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNC, X86ISD::VMTRUNC),
   X86_INTRINSIC_DATA(avx512_mask_pmov_qd_256, INTR_TYPE_1OP_MASK,
                      ISD::TRUNCATE, 0),
   X86_INTRINSIC_DATA(avx512_mask_pmov_qd_512, INTR_TYPE_1OP_MASK,
                      ISD::TRUNCATE, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmov_qw_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNC, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmov_qw_256, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNC, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmov_qw_512, INTR_TYPE_1OP_MASK,
-                     ISD::TRUNCATE, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmov_wb_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNC, 0),
+  X86_INTRINSIC_DATA(avx512_mask_pmov_qw_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNC, X86ISD::VMTRUNC),
+  X86_INTRINSIC_DATA(avx512_mask_pmov_qw_256, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNC, X86ISD::VMTRUNC),
+  X86_INTRINSIC_DATA(avx512_mask_pmov_qw_512, TRUNCATE_TO_REG,
+                     ISD::TRUNCATE, X86ISD::VMTRUNC),
+  X86_INTRINSIC_DATA(avx512_mask_pmov_wb_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNC, X86ISD::VMTRUNC),
   X86_INTRINSIC_DATA(avx512_mask_pmov_wb_256, INTR_TYPE_1OP_MASK,
                      ISD::TRUNCATE, 0),
   X86_INTRINSIC_DATA(avx512_mask_pmov_wb_512, INTR_TYPE_1OP_MASK,
                      ISD::TRUNCATE, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovs_db_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovs_db_256, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovs_db_512, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovs_dw_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovs_dw_256, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovs_dw_512, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovs_qb_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovs_qb_256, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovs_qb_512, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCS, 0),
+  X86_INTRINSIC_DATA(avx512_mask_pmovs_db_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCS, X86ISD::VMTRUNCS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovs_db_256, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCS, X86ISD::VMTRUNCS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovs_db_512, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCS, X86ISD::VMTRUNCS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovs_dw_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCS, X86ISD::VMTRUNCS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovs_dw_256, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCS, X86ISD::VMTRUNCS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovs_dw_512, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCS, X86ISD::VMTRUNCS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovs_qb_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCS, X86ISD::VMTRUNCS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovs_qb_256, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCS, X86ISD::VMTRUNCS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovs_qb_512, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCS, X86ISD::VMTRUNCS),
   X86_INTRINSIC_DATA(avx512_mask_pmovs_qd_128, INTR_TYPE_1OP_MASK,
                      X86ISD::VTRUNCS, 0),
   X86_INTRINSIC_DATA(avx512_mask_pmovs_qd_256, INTR_TYPE_1OP_MASK,
                      X86ISD::VTRUNCS, 0),
   X86_INTRINSIC_DATA(avx512_mask_pmovs_qd_512, INTR_TYPE_1OP_MASK,
                      X86ISD::VTRUNCS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovs_qw_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovs_qw_256, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovs_qw_512, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovs_wb_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCS, 0),
+  X86_INTRINSIC_DATA(avx512_mask_pmovs_qw_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCS, X86ISD::VMTRUNCS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovs_qw_256, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCS, X86ISD::VMTRUNCS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovs_qw_512, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCS, X86ISD::VMTRUNCS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovs_wb_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCS, X86ISD::VMTRUNCS),
   X86_INTRINSIC_DATA(avx512_mask_pmovs_wb_256, INTR_TYPE_1OP_MASK,
                      X86ISD::VTRUNCS, 0),
   X86_INTRINSIC_DATA(avx512_mask_pmovs_wb_512, INTR_TYPE_1OP_MASK,
                      X86ISD::VTRUNCS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovus_db_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCUS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovus_db_256, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCUS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovus_db_512, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCUS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovus_dw_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCUS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovus_dw_256, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCUS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovus_dw_512, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCUS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovus_qb_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCUS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovus_qb_256, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCUS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovus_qb_512, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCUS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovus_qd_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCUS, 0),
+  X86_INTRINSIC_DATA(avx512_mask_pmovus_db_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCUS, X86ISD::VMTRUNCUS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovus_db_256, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCUS, X86ISD::VMTRUNCUS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovus_db_512, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCUS, X86ISD::VMTRUNCUS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovus_dw_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCUS, X86ISD::VMTRUNCUS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovus_dw_256, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCUS, X86ISD::VMTRUNCUS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovus_dw_512, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCUS, X86ISD::VMTRUNCUS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovus_qb_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCUS, X86ISD::VMTRUNCUS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovus_qb_256, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCUS, X86ISD::VMTRUNCUS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovus_qb_512, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCUS, X86ISD::VMTRUNCUS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovus_qd_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCUS, X86ISD::VMTRUNCUS),
   X86_INTRINSIC_DATA(avx512_mask_pmovus_qd_256, INTR_TYPE_1OP_MASK,
                      X86ISD::VTRUNCUS, 0),
   X86_INTRINSIC_DATA(avx512_mask_pmovus_qd_512, INTR_TYPE_1OP_MASK,
                      X86ISD::VTRUNCUS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovus_qw_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCUS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovus_qw_256, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCUS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovus_qw_512, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCUS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmovus_wb_128, INTR_TYPE_1OP_MASK,
-                     X86ISD::VTRUNCUS, 0),
+  X86_INTRINSIC_DATA(avx512_mask_pmovus_qw_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCUS, X86ISD::VMTRUNCUS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovus_qw_256, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCUS, X86ISD::VMTRUNCUS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovus_qw_512, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCUS, X86ISD::VMTRUNCUS),
+  X86_INTRINSIC_DATA(avx512_mask_pmovus_wb_128, TRUNCATE_TO_REG,
+                     X86ISD::VTRUNCUS, X86ISD::VMTRUNCUS),
   X86_INTRINSIC_DATA(avx512_mask_pmovus_wb_256, INTR_TYPE_1OP_MASK,
                      X86ISD::VTRUNCUS, 0),
   X86_INTRINSIC_DATA(avx512_mask_pmovus_wb_512, INTR_TYPE_1OP_MASK,
                      X86ISD::VTRUNCUS, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmultishift_qb_128, INTR_TYPE_2OP_MASK,
-                     X86ISD::MULTISHIFT, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmultishift_qb_256, INTR_TYPE_2OP_MASK,
-                     X86ISD::MULTISHIFT, 0),
-  X86_INTRINSIC_DATA(avx512_mask_pmultishift_qb_512, INTR_TYPE_2OP_MASK,
-                     X86ISD::MULTISHIFT, 0),
   X86_INTRINSIC_DATA(avx512_mask_range_pd_128, INTR_TYPE_3OP_MASK, X86ISD::VRANGE, 0),
   X86_INTRINSIC_DATA(avx512_mask_range_pd_256, INTR_TYPE_3OP_MASK, X86ISD::VRANGE, 0),
   X86_INTRINSIC_DATA(avx512_mask_range_pd_512, INTR_TYPE_3OP_MASK, X86ISD::VRANGE, X86ISD::VRANGE_RND),
@@ -841,38 +881,12 @@
                      X86ISD::CVTPH2PS, 0),
   X86_INTRINSIC_DATA(avx512_mask_vcvtph2ps_512, INTR_TYPE_1OP_MASK,
                      X86ISD::CVTPH2PS, X86ISD::CVTPH2PS_RND),
-  X86_INTRINSIC_DATA(avx512_mask_vcvtps2ph_128, INTR_TYPE_2OP_MASK,
-                     X86ISD::CVTPS2PH, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vcvtps2ph_256, INTR_TYPE_2OP_MASK,
-                     X86ISD::CVTPS2PH, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vcvtps2ph_512, INTR_TYPE_2OP_MASK,
-                     X86ISD::CVTPS2PH, 0),
-
-  X86_INTRINSIC_DATA(avx512_mask_vpshldv_d_128, FMA_OP_MASK, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshldv_d_256, FMA_OP_MASK, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshldv_d_512, FMA_OP_MASK, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshldv_q_128, FMA_OP_MASK, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshldv_q_256, FMA_OP_MASK, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshldv_q_512, FMA_OP_MASK, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshldv_w_128, FMA_OP_MASK, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshldv_w_256, FMA_OP_MASK, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshldv_w_512, FMA_OP_MASK, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshrdv_d_128, FMA_OP_MASK, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshrdv_d_256, FMA_OP_MASK, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshrdv_d_512, FMA_OP_MASK, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshrdv_q_128, FMA_OP_MASK, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshrdv_q_256, FMA_OP_MASK, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshrdv_q_512, FMA_OP_MASK, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshrdv_w_128, FMA_OP_MASK, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshrdv_w_256, FMA_OP_MASK, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshrdv_w_512, FMA_OP_MASK, X86ISD::VSHRDV, 0),
-
-  X86_INTRINSIC_DATA(avx512_mask_vpshufbitqmb_128, CMP_MASK,
-                     X86ISD::VPSHUFBITQMB, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshufbitqmb_256, CMP_MASK,
-                     X86ISD::VPSHUFBITQMB, 0),
-  X86_INTRINSIC_DATA(avx512_mask_vpshufbitqmb_512, CMP_MASK,
-                     X86ISD::VPSHUFBITQMB, 0),
+  X86_INTRINSIC_DATA(avx512_mask_vcvtps2ph_128, CVTPS2PH_MASK,
+                     X86ISD::CVTPS2PH, X86ISD::MCVTPS2PH),
+  X86_INTRINSIC_DATA(avx512_mask_vcvtps2ph_256, CVTPS2PH_MASK,
+                     X86ISD::CVTPS2PH, X86ISD::MCVTPS2PH),
+  X86_INTRINSIC_DATA(avx512_mask_vcvtps2ph_512, CVTPS2PH_MASK,
+                     X86ISD::CVTPS2PH, X86ISD::MCVTPS2PH),
 
   X86_INTRINSIC_DATA(avx512_maskz_fixupimm_pd_128, FIXUPIMM_MASKZ,
                      X86ISD::VFIXUPIMM, 0),
@@ -891,25 +905,6 @@
   X86_INTRINSIC_DATA(avx512_maskz_fixupimm_ss, FIXUPIMMS_MASKZ,
                      X86ISD::VFIXUPIMMS, 0),
 
-  X86_INTRINSIC_DATA(avx512_maskz_vpshldv_d_128, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshldv_d_256, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshldv_d_512, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshldv_q_128, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshldv_q_256, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshldv_q_512, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshldv_w_128, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshldv_w_256, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshldv_w_512, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_d_128, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_d_256, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_d_512, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_q_128, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_q_256, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_q_512, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_w_128, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_w_256, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
-  X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_w_512, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
-
   X86_INTRINSIC_DATA(avx512_max_pd_512, INTR_TYPE_2OP, X86ISD::FMAX, X86ISD::FMAX_RND),
   X86_INTRINSIC_DATA(avx512_max_ps_512, INTR_TYPE_2OP, X86ISD::FMAX, X86ISD::FMAX_RND),
   X86_INTRINSIC_DATA(avx512_min_pd_512, INTR_TYPE_2OP, X86ISD::FMIN, X86ISD::FMIN_RND),
@@ -920,8 +915,6 @@
   X86_INTRINSIC_DATA(avx512_packsswb_512, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
   X86_INTRINSIC_DATA(avx512_packusdw_512, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
   X86_INTRINSIC_DATA(avx512_packuswb_512, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
-  X86_INTRINSIC_DATA(avx512_padds_b_512, INTR_TYPE_2OP, X86ISD::ADDS, 0),
-  X86_INTRINSIC_DATA(avx512_padds_w_512, INTR_TYPE_2OP, X86ISD::ADDS, 0),
   X86_INTRINSIC_DATA(avx512_permvar_df_256, VPERM_2OP, X86ISD::VPERMV, 0),
   X86_INTRINSIC_DATA(avx512_permvar_df_512, VPERM_2OP, X86ISD::VPERMV, 0),
   X86_INTRINSIC_DATA(avx512_permvar_di_256, VPERM_2OP, X86ISD::VPERMV, 0),
@@ -939,30 +932,9 @@
   X86_INTRINSIC_DATA(avx512_pmul_hr_sw_512, INTR_TYPE_2OP, X86ISD::MULHRS, 0),
   X86_INTRINSIC_DATA(avx512_pmulh_w_512, INTR_TYPE_2OP, ISD::MULHS, 0),
   X86_INTRINSIC_DATA(avx512_pmulhu_w_512, INTR_TYPE_2OP, ISD::MULHU, 0),
-  X86_INTRINSIC_DATA(avx512_prol_d_128,  INTR_TYPE_2OP_IMM8, X86ISD::VROTLI, 0),
-  X86_INTRINSIC_DATA(avx512_prol_d_256,  INTR_TYPE_2OP_IMM8, X86ISD::VROTLI, 0),
-  X86_INTRINSIC_DATA(avx512_prol_d_512,  INTR_TYPE_2OP_IMM8, X86ISD::VROTLI, 0),
-  X86_INTRINSIC_DATA(avx512_prol_q_128,  INTR_TYPE_2OP_IMM8, X86ISD::VROTLI, 0),
-  X86_INTRINSIC_DATA(avx512_prol_q_256,  INTR_TYPE_2OP_IMM8, X86ISD::VROTLI, 0),
-  X86_INTRINSIC_DATA(avx512_prol_q_512,  INTR_TYPE_2OP_IMM8, X86ISD::VROTLI, 0),
-  X86_INTRINSIC_DATA(avx512_prolv_d_128, INTR_TYPE_2OP, ISD::ROTL, 0),
-  X86_INTRINSIC_DATA(avx512_prolv_d_256, INTR_TYPE_2OP, ISD::ROTL, 0),
-  X86_INTRINSIC_DATA(avx512_prolv_d_512, INTR_TYPE_2OP, ISD::ROTL, 0),
-  X86_INTRINSIC_DATA(avx512_prolv_q_128, INTR_TYPE_2OP, ISD::ROTL, 0),
-  X86_INTRINSIC_DATA(avx512_prolv_q_256, INTR_TYPE_2OP, ISD::ROTL, 0),
-  X86_INTRINSIC_DATA(avx512_prolv_q_512, INTR_TYPE_2OP, ISD::ROTL, 0),
-  X86_INTRINSIC_DATA(avx512_pror_d_128,  INTR_TYPE_2OP_IMM8, X86ISD::VROTRI, 0),
-  X86_INTRINSIC_DATA(avx512_pror_d_256,  INTR_TYPE_2OP_IMM8, X86ISD::VROTRI, 0),
-  X86_INTRINSIC_DATA(avx512_pror_d_512,  INTR_TYPE_2OP_IMM8, X86ISD::VROTRI, 0),
-  X86_INTRINSIC_DATA(avx512_pror_q_128,  INTR_TYPE_2OP_IMM8, X86ISD::VROTRI, 0),
-  X86_INTRINSIC_DATA(avx512_pror_q_256,  INTR_TYPE_2OP_IMM8, X86ISD::VROTRI, 0),
-  X86_INTRINSIC_DATA(avx512_pror_q_512,  INTR_TYPE_2OP_IMM8, X86ISD::VROTRI, 0),
-  X86_INTRINSIC_DATA(avx512_prorv_d_128, INTR_TYPE_2OP, ISD::ROTR, 0),
-  X86_INTRINSIC_DATA(avx512_prorv_d_256, INTR_TYPE_2OP, ISD::ROTR, 0),
-  X86_INTRINSIC_DATA(avx512_prorv_d_512, INTR_TYPE_2OP, ISD::ROTR, 0),
-  X86_INTRINSIC_DATA(avx512_prorv_q_128, INTR_TYPE_2OP, ISD::ROTR, 0),
-  X86_INTRINSIC_DATA(avx512_prorv_q_256, INTR_TYPE_2OP, ISD::ROTR, 0),
-  X86_INTRINSIC_DATA(avx512_prorv_q_512, INTR_TYPE_2OP, ISD::ROTR, 0),
+  X86_INTRINSIC_DATA(avx512_pmultishift_qb_128, INTR_TYPE_2OP, X86ISD::MULTISHIFT, 0),
+  X86_INTRINSIC_DATA(avx512_pmultishift_qb_256, INTR_TYPE_2OP, X86ISD::MULTISHIFT, 0),
+  X86_INTRINSIC_DATA(avx512_pmultishift_qb_512, INTR_TYPE_2OP, X86ISD::MULTISHIFT, 0),
   X86_INTRINSIC_DATA(avx512_psad_bw_512, INTR_TYPE_2OP, X86ISD::PSADBW, 0),
   X86_INTRINSIC_DATA(avx512_pshuf_b_512, INTR_TYPE_2OP, X86ISD::PSHUFB, 0),
   X86_INTRINSIC_DATA(avx512_psll_d_512, INTR_TYPE_2OP, X86ISD::VSHL, 0),
@@ -1004,8 +976,6 @@
   X86_INTRINSIC_DATA(avx512_psrlv_w_128, INTR_TYPE_2OP, ISD::SRL, 0),
   X86_INTRINSIC_DATA(avx512_psrlv_w_256, INTR_TYPE_2OP, ISD::SRL, 0),
   X86_INTRINSIC_DATA(avx512_psrlv_w_512, INTR_TYPE_2OP, ISD::SRL, 0),
-  X86_INTRINSIC_DATA(avx512_psubs_b_512, INTR_TYPE_2OP, X86ISD::SUBS, 0),
-  X86_INTRINSIC_DATA(avx512_psubs_w_512, INTR_TYPE_2OP, X86ISD::SUBS, 0),
   X86_INTRINSIC_DATA(avx512_pternlog_d_128, INTR_TYPE_4OP, X86ISD::VPTERNLOG, 0),
   X86_INTRINSIC_DATA(avx512_pternlog_d_256, INTR_TYPE_4OP, X86ISD::VPTERNLOG, 0),
   X86_INTRINSIC_DATA(avx512_pternlog_d_512, INTR_TYPE_4OP, X86ISD::VPTERNLOG, 0),
@@ -1098,24 +1068,9 @@
   X86_INTRINSIC_DATA(avx512_vpmadd52l_uq_128 , IFMA_OP, X86ISD::VPMADD52L, 0),
   X86_INTRINSIC_DATA(avx512_vpmadd52l_uq_256 , IFMA_OP, X86ISD::VPMADD52L, 0),
   X86_INTRINSIC_DATA(avx512_vpmadd52l_uq_512 , IFMA_OP, X86ISD::VPMADD52L, 0),
-  X86_INTRINSIC_DATA(avx512_vpshld_d_128, INTR_TYPE_3OP_IMM8, X86ISD::VSHLD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshld_d_256, INTR_TYPE_3OP_IMM8, X86ISD::VSHLD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshld_d_512, INTR_TYPE_3OP_IMM8, X86ISD::VSHLD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshld_q_128, INTR_TYPE_3OP_IMM8, X86ISD::VSHLD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshld_q_256, INTR_TYPE_3OP_IMM8, X86ISD::VSHLD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshld_q_512, INTR_TYPE_3OP_IMM8, X86ISD::VSHLD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshld_w_128, INTR_TYPE_3OP_IMM8, X86ISD::VSHLD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshld_w_256, INTR_TYPE_3OP_IMM8, X86ISD::VSHLD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshld_w_512, INTR_TYPE_3OP_IMM8, X86ISD::VSHLD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshrd_d_128, INTR_TYPE_3OP_IMM8, X86ISD::VSHRD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshrd_d_256, INTR_TYPE_3OP_IMM8, X86ISD::VSHRD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshrd_d_512, INTR_TYPE_3OP_IMM8, X86ISD::VSHRD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshrd_q_128, INTR_TYPE_3OP_IMM8, X86ISD::VSHRD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshrd_q_256, INTR_TYPE_3OP_IMM8, X86ISD::VSHRD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshrd_q_512, INTR_TYPE_3OP_IMM8, X86ISD::VSHRD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshrd_w_128, INTR_TYPE_3OP_IMM8, X86ISD::VSHRD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshrd_w_256, INTR_TYPE_3OP_IMM8, X86ISD::VSHRD, 0),
-  X86_INTRINSIC_DATA(avx512_vpshrd_w_512, INTR_TYPE_3OP_IMM8, X86ISD::VSHRD, 0),
+  X86_INTRINSIC_DATA(avx512_vpshufbitqmb_128, INTR_TYPE_2OP, X86ISD::VPSHUFBITQMB, 0),
+  X86_INTRINSIC_DATA(avx512_vpshufbitqmb_256, INTR_TYPE_2OP, X86ISD::VPSHUFBITQMB, 0),
+  X86_INTRINSIC_DATA(avx512_vpshufbitqmb_512, INTR_TYPE_2OP, X86ISD::VPSHUFBITQMB, 0),
   X86_INTRINSIC_DATA(bmi_bextr_32,         INTR_TYPE_2OP, X86ISD::BEXTR, 0),
   X86_INTRINSIC_DATA(bmi_bextr_64,         INTR_TYPE_2OP, X86ISD::BEXTR, 0),
   X86_INTRINSIC_DATA(bmi_bzhi_32,          INTR_TYPE_2OP, X86ISD::BZHI, 0),
@@ -1168,8 +1123,6 @@
   X86_INTRINSIC_DATA(sse2_packssdw_128, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
   X86_INTRINSIC_DATA(sse2_packsswb_128, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
   X86_INTRINSIC_DATA(sse2_packuswb_128, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
-  X86_INTRINSIC_DATA(sse2_padds_b,      INTR_TYPE_2OP, X86ISD::ADDS, 0),
-  X86_INTRINSIC_DATA(sse2_padds_w,      INTR_TYPE_2OP, X86ISD::ADDS, 0),
   X86_INTRINSIC_DATA(sse2_pmadd_wd,     INTR_TYPE_2OP, X86ISD::VPMADDWD, 0),
   X86_INTRINSIC_DATA(sse2_pmovmskb_128, INTR_TYPE_1OP, X86ISD::MOVMSK, 0),
   X86_INTRINSIC_DATA(sse2_pmulh_w,      INTR_TYPE_2OP, ISD::MULHS, 0),
@@ -1191,8 +1144,6 @@
   X86_INTRINSIC_DATA(sse2_psrli_d,      VSHIFT, X86ISD::VSRLI, 0),
   X86_INTRINSIC_DATA(sse2_psrli_q,      VSHIFT, X86ISD::VSRLI, 0),
   X86_INTRINSIC_DATA(sse2_psrli_w,      VSHIFT, X86ISD::VSRLI, 0),
-  X86_INTRINSIC_DATA(sse2_psubs_b,      INTR_TYPE_2OP, X86ISD::SUBS, 0),
-  X86_INTRINSIC_DATA(sse2_psubs_w,      INTR_TYPE_2OP, X86ISD::SUBS, 0),
   X86_INTRINSIC_DATA(sse2_ucomieq_sd,   COMI, X86ISD::UCOMI, ISD::SETEQ),
   X86_INTRINSIC_DATA(sse2_ucomige_sd,   COMI, X86ISD::UCOMI, ISD::SETGE),
   X86_INTRINSIC_DATA(sse2_ucomigt_sd,   COMI, X86ISD::UCOMI, ISD::SETGT),
@@ -1262,14 +1213,6 @@
   X86_INTRINSIC_DATA(xop_vpermil2ps,     INTR_TYPE_4OP, X86ISD::VPERMIL2, 0),
   X86_INTRINSIC_DATA(xop_vpermil2ps_256, INTR_TYPE_4OP, X86ISD::VPERMIL2, 0),
   X86_INTRINSIC_DATA(xop_vpperm,        INTR_TYPE_3OP, X86ISD::VPPERM, 0),
-  X86_INTRINSIC_DATA(xop_vprotb,        INTR_TYPE_2OP, ISD::ROTL, 0),
-  X86_INTRINSIC_DATA(xop_vprotbi,       INTR_TYPE_2OP, X86ISD::VROTLI, 0),
-  X86_INTRINSIC_DATA(xop_vprotd,        INTR_TYPE_2OP, ISD::ROTL, 0),
-  X86_INTRINSIC_DATA(xop_vprotdi,       INTR_TYPE_2OP, X86ISD::VROTLI, 0),
-  X86_INTRINSIC_DATA(xop_vprotq,        INTR_TYPE_2OP, ISD::ROTL, 0),
-  X86_INTRINSIC_DATA(xop_vprotqi,       INTR_TYPE_2OP, X86ISD::VROTLI, 0),
-  X86_INTRINSIC_DATA(xop_vprotw,        INTR_TYPE_2OP, ISD::ROTL, 0),
-  X86_INTRINSIC_DATA(xop_vprotwi,       INTR_TYPE_2OP, X86ISD::VROTLI, 0),
   X86_INTRINSIC_DATA(xop_vpshab,        INTR_TYPE_2OP, X86ISD::VPSHA, 0),
   X86_INTRINSIC_DATA(xop_vpshad,        INTR_TYPE_2OP, X86ISD::VPSHA, 0),
   X86_INTRINSIC_DATA(xop_vpshaq,        INTR_TYPE_2OP, X86ISD::VPSHA, 0),
diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td
index 0c1b05f..aa20273 100644
--- a/lib/Target/X86/X86RegisterInfo.td
+++ b/lib/Target/X86/X86RegisterInfo.td
@@ -522,10 +522,16 @@
 // faster on common hardware.  In reality, this should be controlled by a
 // command line option or something.
 
+
 def RFP32 : RegisterClass<"X86",[f32], 32, (sequence "FP%u", 0, 6)>;
 def RFP64 : RegisterClass<"X86",[f64], 32, (add RFP32)>;
 def RFP80 : RegisterClass<"X86",[f80], 32, (add RFP32)>;
 
+// st(7) may be is not allocatable.
+def RFP80_7 : RegisterClass<"X86",[f80], 32, (add FP7)> {
+  let isAllocatable = 0;
+}
+
 // Floating point stack registers (these are not allocatable by the
 // register allocator - the floating point stackifier is responsible
 // for transforming FPn allocations to STn registers)
diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp
index 7889322..36929a4 100644
--- a/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -832,6 +832,12 @@
     { ISD::FDIV, MVT::v4f32,      39 }, // Pentium IV from http://www.agner.org/
     { ISD::FDIV, MVT::f64,        38 }, // Pentium IV from http://www.agner.org/
     { ISD::FDIV, MVT::v2f64,      69 }, // Pentium IV from http://www.agner.org/
+
+    { ISD::FADD, MVT::f32,         2 }, // Pentium IV from http://www.agner.org/
+    { ISD::FADD, MVT::f64,         2 }, // Pentium IV from http://www.agner.org/
+
+    { ISD::FSUB, MVT::f32,         2 }, // Pentium IV from http://www.agner.org/
+    { ISD::FSUB, MVT::f64,         2 }, // Pentium IV from http://www.agner.org/
   };
 
   if (ST->hasSSE2())
@@ -841,6 +847,20 @@
   static const CostTblEntry SSE1CostTable[] = {
     { ISD::FDIV, MVT::f32,   17 }, // Pentium III from http://www.agner.org/
     { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
+
+    { ISD::FADD, MVT::f32,    1 }, // Pentium III from http://www.agner.org/
+    { ISD::FADD, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
+
+    { ISD::FSUB, MVT::f32,    1 }, // Pentium III from http://www.agner.org/
+    { ISD::FSUB, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
+
+    { ISD::ADD, MVT::i8,      1 }, // Pentium III from http://www.agner.org/
+    { ISD::ADD, MVT::i16,     1 }, // Pentium III from http://www.agner.org/
+    { ISD::ADD, MVT::i32,     1 }, // Pentium III from http://www.agner.org/
+
+    { ISD::SUB, MVT::i8,      1 }, // Pentium III from http://www.agner.org/
+    { ISD::SUB, MVT::i16,     1 }, // Pentium III from http://www.agner.org/
+    { ISD::SUB, MVT::i32,     1 }, // Pentium III from http://www.agner.org/
   };
 
   if (ST->hasSSE1())
@@ -1742,6 +1762,14 @@
     { ISD::CTTZ,       MVT::v16i32, 14 },
     { ISD::CTTZ,       MVT::v32i16, 12 },
     { ISD::CTTZ,       MVT::v64i8,   9 },
+    { ISD::SADDSAT,    MVT::v32i16,  1 },
+    { ISD::SADDSAT,    MVT::v64i8,   1 },
+    { ISD::SSUBSAT,    MVT::v32i16,  1 },
+    { ISD::SSUBSAT,    MVT::v64i8,   1 },
+    { ISD::UADDSAT,    MVT::v32i16,  1 },
+    { ISD::UADDSAT,    MVT::v64i8,   1 },
+    { ISD::USUBSAT,    MVT::v32i16,  1 },
+    { ISD::USUBSAT,    MVT::v64i8,   1 },
   };
   static const CostTblEntry AVX512CostTbl[] = {
     { ISD::BITREVERSE, MVT::v8i64,  36 },
@@ -1752,6 +1780,10 @@
     { ISD::CTPOP,      MVT::v16i32, 24 },
     { ISD::CTTZ,       MVT::v8i64,  20 },
     { ISD::CTTZ,       MVT::v16i32, 28 },
+    { ISD::USUBSAT,    MVT::v16i32,  2 }, // pmaxud + psubd
+    { ISD::USUBSAT,    MVT::v2i64,   2 }, // pmaxuq + psubq
+    { ISD::USUBSAT,    MVT::v4i64,   2 }, // pmaxuq + psubq
+    { ISD::USUBSAT,    MVT::v8i64,   2 }, // pmaxuq + psubq
   };
   static const CostTblEntry XOPCostTbl[] = {
     { ISD::BITREVERSE, MVT::v4i64,   4 },
@@ -1787,6 +1819,15 @@
     { ISD::CTTZ,       MVT::v8i32,  14 },
     { ISD::CTTZ,       MVT::v16i16, 12 },
     { ISD::CTTZ,       MVT::v32i8,   9 },
+    { ISD::SADDSAT,    MVT::v16i16,  1 },
+    { ISD::SADDSAT,    MVT::v32i8,   1 },
+    { ISD::SSUBSAT,    MVT::v16i16,  1 },
+    { ISD::SSUBSAT,    MVT::v32i8,   1 },
+    { ISD::UADDSAT,    MVT::v16i16,  1 },
+    { ISD::UADDSAT,    MVT::v32i8,   1 },
+    { ISD::USUBSAT,    MVT::v16i16,  1 },
+    { ISD::USUBSAT,    MVT::v32i8,   1 },
+    { ISD::USUBSAT,    MVT::v8i32,   2 }, // pmaxud + psubd
     { ISD::FSQRT,      MVT::f32,     7 }, // Haswell from http://www.agner.org/
     { ISD::FSQRT,      MVT::v4f32,   7 }, // Haswell from http://www.agner.org/
     { ISD::FSQRT,      MVT::v8f32,  14 }, // Haswell from http://www.agner.org/
@@ -1814,6 +1855,15 @@
     { ISD::CTTZ,       MVT::v8i32,  30 }, // 2 x 128-bit Op + extract/insert
     { ISD::CTTZ,       MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
     { ISD::CTTZ,       MVT::v32i8,  20 }, // 2 x 128-bit Op + extract/insert
+    { ISD::SADDSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
+    { ISD::SADDSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
+    { ISD::SSUBSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
+    { ISD::SSUBSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
+    { ISD::UADDSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
+    { ISD::UADDSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
+    { ISD::USUBSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
+    { ISD::USUBSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
+    { ISD::USUBSAT,    MVT::v8i32,   6 }, // 2 x 128-bit Op + extract/insert
     { ISD::FSQRT,      MVT::f32,    14 }, // SNB from http://www.agner.org/
     { ISD::FSQRT,      MVT::v4f32,  14 }, // SNB from http://www.agner.org/
     { ISD::FSQRT,      MVT::v8f32,  28 }, // SNB from http://www.agner.org/
@@ -1834,6 +1884,7 @@
     { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd
   };
   static const CostTblEntry SSE42CostTbl[] = {
+    { ISD::USUBSAT,    MVT::v4i32,   2 }, // pmaxud + psubd
     { ISD::FSQRT,      MVT::f32,    18 }, // Nehalem from http://www.agner.org/
     { ISD::FSQRT,      MVT::v4f32,  18 }, // Nehalem from http://www.agner.org/
   };
@@ -1878,6 +1929,14 @@
     { ISD::CTTZ,       MVT::v4i32,  18 },
     { ISD::CTTZ,       MVT::v8i16,  16 },
     { ISD::CTTZ,       MVT::v16i8,  13 },
+    { ISD::SADDSAT,    MVT::v8i16,   1 },
+    { ISD::SADDSAT,    MVT::v16i8,   1 },
+    { ISD::SSUBSAT,    MVT::v8i16,   1 },
+    { ISD::SSUBSAT,    MVT::v16i8,   1 },
+    { ISD::UADDSAT,    MVT::v8i16,   1 },
+    { ISD::UADDSAT,    MVT::v16i8,   1 },
+    { ISD::USUBSAT,    MVT::v8i16,   1 },
+    { ISD::USUBSAT,    MVT::v16i8,   1 },
     { ISD::FSQRT,      MVT::f64,    32 }, // Nehalem from http://www.agner.org/
     { ISD::FSQRT,      MVT::v2f64,  32 }, // Nehalem from http://www.agner.org/
   };
@@ -1886,7 +1945,7 @@
     { ISD::FSQRT,      MVT::v4f32,  56 }, // Pentium III from http://www.agner.org/
   };
   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
-    { ISD::BITREVERSE, MVT::i64,    14 } 
+    { ISD::BITREVERSE, MVT::i64,    14 }
   };
   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
     { ISD::BITREVERSE, MVT::i32,    14 },
@@ -1913,6 +1972,18 @@
   case Intrinsic::cttz:
     ISD = ISD::CTTZ;
     break;
+  case Intrinsic::sadd_sat:
+    ISD = ISD::SADDSAT;
+    break;
+  case Intrinsic::ssub_sat:
+    ISD = ISD::SSUBSAT;
+    break;
+  case Intrinsic::uadd_sat:
+    ISD = ISD::UADDSAT;
+    break;
+  case Intrinsic::usub_sat:
+    ISD = ISD::USUBSAT;
+    break;
   case Intrinsic::sqrt:
     ISD = ISD::FSQRT;
     break;
@@ -2899,6 +2970,9 @@
     Options.LoadSizes.push_back(4);
     Options.LoadSizes.push_back(2);
     Options.LoadSizes.push_back(1);
+    // All GPR and vector loads can be unaligned. SIMD compare requires integer
+    // vectors (SSE2/AVX2).
+    Options.AllowOverlappingLoads = true;
     return Options;
   }();
   return IsZeroCmp ? &EqZeroOptions : &ThreeWayOptions;
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index 99e7614..75d7ae7 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -403,8 +403,7 @@
 
 static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
 {
-  KnownBits Known;
-  DAG.computeKnownBits(Value, Known);
+  KnownBits Known = DAG.computeKnownBits(Value);
   return Known.countMinTrailingZeros() >= 2;
 }
 
@@ -1649,10 +1648,9 @@
     // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
     // low bit set
     if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
-      KnownBits Known;
       APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
                                          VT.getSizeInBits() - 1);
-      DAG.computeKnownBits(N2, Known);
+      KnownBits Known = DAG.computeKnownBits(N2);
       if ((Known.Zero & Mask) == Mask) {
         SDValue Carry = DAG.getConstant(0, dl, VT);
         SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
@@ -1672,10 +1670,9 @@
 
     // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
     if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
-      KnownBits Known;
       APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
                                          VT.getSizeInBits() - 1);
-      DAG.computeKnownBits(N2, Known);
+      KnownBits Known = DAG.computeKnownBits(N2);
       if ((Known.Zero & Mask) == Mask) {
         SDValue Borrow = N2;
         SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
@@ -1688,10 +1685,9 @@
     // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
     // low bit set
     if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
-      KnownBits Known;
       APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
                                          VT.getSizeInBits() - 1);
-      DAG.computeKnownBits(N2, Known);
+      KnownBits Known = DAG.computeKnownBits(N2);
       if ((Known.Zero & Mask) == Mask) {
         SDValue Borrow = DAG.getConstant(0, dl, VT);
         SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
diff --git a/lib/TextAPI/ELF/TBEHandler.cpp b/lib/TextAPI/ELF/TBEHandler.cpp
index dde9802..b621829 100644
--- a/lib/TextAPI/ELF/TBEHandler.cpp
+++ b/lib/TextAPI/ELF/TBEHandler.cpp
@@ -105,6 +105,7 @@
       IO.mapRequired("Size", Symbol.Size);
     }
     IO.mapOptional("Undefined", Symbol.Undefined, false);
+    IO.mapOptional("Weak", Symbol.Weak, false);
     IO.mapOptional("Warning", Symbol.Warning);
   }
 
diff --git a/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
index 7560060..c795866 100644
--- a/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
+++ b/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
@@ -59,6 +59,99 @@
 };
 } // namespace
 
+/// Match a pattern for a bitwise rotate operation that partially guards
+/// against undefined behavior by branching around the rotation when the shift
+/// amount is 0.
+static bool foldGuardedRotateToFunnelShift(Instruction &I) {
+  if (I.getOpcode() != Instruction::PHI || I.getNumOperands() != 2)
+    return false;
+
+  // As with the one-use checks below, this is not strictly necessary, but we
+  // are being cautious to avoid potential perf regressions on targets that
+  // do not actually have a rotate instruction (where the funnel shift would be
+  // expanded back into math/shift/logic ops).
+  if (!isPowerOf2_32(I.getType()->getScalarSizeInBits()))
+    return false;
+
+  // Match V to funnel shift left/right and capture the source operand and
+  // shift amount in X and Y.
+  auto matchRotate = [](Value *V, Value *&X, Value *&Y) {
+    Value *L0, *L1, *R0, *R1;
+    unsigned Width = V->getType()->getScalarSizeInBits();
+    auto Sub = m_Sub(m_SpecificInt(Width), m_Value(R1));
+
+    // rotate_left(X, Y) == (X << Y) | (X >> (Width - Y))
+    auto RotL = m_OneUse(
+        m_c_Or(m_Shl(m_Value(L0), m_Value(L1)), m_LShr(m_Value(R0), Sub)));
+    if (RotL.match(V) && L0 == R0 && L1 == R1) {
+      X = L0;
+      Y = L1;
+      return Intrinsic::fshl;
+    }
+
+    // rotate_right(X, Y) == (X >> Y) | (X << (Width - Y))
+    auto RotR = m_OneUse(
+        m_c_Or(m_LShr(m_Value(L0), m_Value(L1)), m_Shl(m_Value(R0), Sub)));
+    if (RotR.match(V) && L0 == R0 && L1 == R1) {
+      X = L0;
+      Y = L1;
+      return Intrinsic::fshr;
+    }
+
+    return Intrinsic::not_intrinsic;
+  };
+
+  // One phi operand must be a rotate operation, and the other phi operand must
+  // be the source value of that rotate operation:
+  // phi [ rotate(RotSrc, RotAmt), RotBB ], [ RotSrc, GuardBB ]
+  PHINode &Phi = cast<PHINode>(I);
+  Value *P0 = Phi.getOperand(0), *P1 = Phi.getOperand(1);
+  Value *RotSrc, *RotAmt;
+  Intrinsic::ID IID = matchRotate(P0, RotSrc, RotAmt);
+  if (IID == Intrinsic::not_intrinsic || RotSrc != P1) {
+    IID = matchRotate(P1, RotSrc, RotAmt);
+    if (IID == Intrinsic::not_intrinsic || RotSrc != P0)
+      return false;
+    assert((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
+           "Pattern must match funnel shift left or right");
+  }
+
+  // The incoming block with our source operand must be the "guard" block.
+  // That must contain a cmp+branch to avoid the rotate when the shift amount
+  // is equal to 0. The other incoming block is the block with the rotate.
+  BasicBlock *GuardBB = Phi.getIncomingBlock(RotSrc == P1);
+  BasicBlock *RotBB = Phi.getIncomingBlock(RotSrc != P1);
+  Instruction *TermI = GuardBB->getTerminator();
+  BasicBlock *TrueBB, *FalseBB;
+  ICmpInst::Predicate Pred;
+  if (!match(TermI, m_Br(m_ICmp(Pred, m_Specific(RotAmt), m_ZeroInt()), TrueBB,
+                         FalseBB)))
+    return false;
+
+  BasicBlock *PhiBB = Phi.getParent();
+  if (Pred != CmpInst::ICMP_EQ || TrueBB != PhiBB || FalseBB != RotBB)
+    return false;
+
+  // We matched a variation of this IR pattern:
+  // GuardBB:
+  //   %cmp = icmp eq i32 %RotAmt, 0
+  //   br i1 %cmp, label %PhiBB, label %RotBB
+  // RotBB:
+  //   %sub = sub i32 32, %RotAmt
+  //   %shr = lshr i32 %X, %sub
+  //   %shl = shl i32 %X, %RotAmt
+  //   %rot = or i32 %shr, %shl
+  //   br label %PhiBB
+  // PhiBB:
+  //   %cond = phi i32 [ %rot, %RotBB ], [ %X, %GuardBB ]
+  // -->
+  // llvm.fshl.i32(i32 %X, i32 %RotAmt)
+  IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt());
+  Function *F = Intrinsic::getDeclaration(Phi.getModule(), IID, Phi.getType());
+  Phi.replaceAllUsesWith(Builder.CreateCall(F, {RotSrc, RotSrc, RotAmt}));
+  return true;
+}
+
 /// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and
 /// the bit indexes (Mask) needed by a masked compare. If we're matching a chain
 /// of 'and' ops, then we also need to capture the fact that we saw an
@@ -69,9 +162,9 @@
   bool MatchAndChain;
   bool FoundAnd1;
 
-  MaskOps(unsigned BitWidth, bool MatchAnds) :
-      Root(nullptr), Mask(APInt::getNullValue(BitWidth)),
-      MatchAndChain(MatchAnds), FoundAnd1(false) {}
+  MaskOps(unsigned BitWidth, bool MatchAnds)
+      : Root(nullptr), Mask(APInt::getNullValue(BitWidth)),
+        MatchAndChain(MatchAnds), FoundAnd1(false) {}
 };
 
 /// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a
@@ -152,8 +245,8 @@
   IRBuilder<> Builder(&I);
   Constant *Mask = ConstantInt::get(I.getType(), MOps.Mask);
   Value *And = Builder.CreateAnd(MOps.Root, Mask);
-  Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask) :
-                                 Builder.CreateIsNotNull(And);
+  Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask)
+                               : Builder.CreateIsNotNull(And);
   Value *Zext = Builder.CreateZExt(Cmp, I.getType());
   I.replaceAllUsesWith(Zext);
   return true;
@@ -174,8 +267,10 @@
     // Also, we want to avoid matching partial patterns.
     // TODO: It would be more efficient if we removed dead instructions
     // iteratively in this loop rather than waiting until the end.
-    for (Instruction &I : make_range(BB.rbegin(), BB.rend()))
+    for (Instruction &I : make_range(BB.rbegin(), BB.rend())) {
       MadeChange |= foldAnyOrAllBitsSet(I);
+      MadeChange |= foldGuardedRotateToFunnelShift(I);
+    }
   }
 
   // We're done with transforms, so remove dead instructions.
diff --git a/lib/Transforms/Hello/CMakeLists.txt b/lib/Transforms/Hello/CMakeLists.txt
index 4a55dd9..c4f1024 100644
--- a/lib/Transforms/Hello/CMakeLists.txt
+++ b/lib/Transforms/Hello/CMakeLists.txt
@@ -10,7 +10,7 @@
   set(LLVM_LINK_COMPONENTS Core Support)
 endif()
 
-add_llvm_loadable_module( LLVMHello
+add_llvm_library( LLVMHello MODULE BUILDTREE_ONLY
   Hello.cpp
 
   DEPENDS
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index f2c2b55..4663de0 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -49,6 +49,7 @@
 #include "llvm/Analysis/Loads.h"
 #include "llvm/Analysis/MemoryLocation.h"
 #include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
 #include "llvm/IR/Argument.h"
 #include "llvm/IR/Attributes.h"
 #include "llvm/IR/BasicBlock.h"
@@ -213,7 +214,8 @@
   FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg());
 
   // Create the new function body and insert it into the module.
-  Function *NF = Function::Create(NFTy, F->getLinkage(), F->getName());
+  Function *NF = Function::Create(NFTy, F->getLinkage(), F->getAddressSpace(),
+                                  F->getName());
   NF->copyAttributesFrom(F);
 
   // Patch the pointer to LLVM function in debug info descriptor.
@@ -808,6 +810,21 @@
   return false;
 }
 
+static bool areFunctionArgsABICompatible(
+    const Function &F, const TargetTransformInfo &TTI,
+    SmallPtrSetImpl<Argument *> &ArgsToPromote,
+    SmallPtrSetImpl<Argument *> &ByValArgsToTransform) {
+  for (const Use &U : F.uses()) {
+    CallSite CS(U.getUser());
+    const Function *Caller = CS.getCaller();
+    const Function *Callee = CS.getCalledFunction();
+    if (!TTI.areFunctionArgsABICompatible(Caller, Callee, ArgsToPromote) ||
+        !TTI.areFunctionArgsABICompatible(Caller, Callee, ByValArgsToTransform))
+      return false;
+  }
+  return true;
+}
+
 /// PromoteArguments - This method checks the specified function to see if there
 /// are any promotable arguments and if it is safe to promote the function (for
 /// example, all callers are direct).  If safe to promote some arguments, it
@@ -816,7 +833,8 @@
 promoteArguments(Function *F, function_ref<AAResults &(Function &F)> AARGetter,
                  unsigned MaxElements,
                  Optional<function_ref<void(CallSite OldCS, CallSite NewCS)>>
-                     ReplaceCallSite) {
+                     ReplaceCallSite,
+                 const TargetTransformInfo &TTI) {
   // Don't perform argument promotion for naked functions; otherwise we can end
   // up removing parameters that are seemingly 'not used' as they are referred
   // to in the assembly.
@@ -845,7 +863,7 @@
 
   // Second check: make sure that all callers are direct callers.  We can't
   // transform functions that have indirect callers.  Also see if the function
-  // is self-recursive.
+  // is self-recursive and check that target features are compatible.
   bool isSelfRecursive = false;
   for (Use &U : F->uses()) {
     CallSite CS(U.getUser());
@@ -954,6 +972,10 @@
   if (ArgsToPromote.empty() && ByValArgsToTransform.empty())
     return nullptr;
 
+  if (!areFunctionArgsABICompatible(*F, TTI, ArgsToPromote,
+                                    ByValArgsToTransform))
+    return nullptr;
+
   return doPromotion(F, ArgsToPromote, ByValArgsToTransform, ReplaceCallSite);
 }
 
@@ -979,7 +1001,9 @@
         return FAM.getResult<AAManager>(F);
       };
 
-      Function *NewF = promoteArguments(&OldF, AARGetter, MaxElements, None);
+      const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(OldF);
+      Function *NewF =
+          promoteArguments(&OldF, AARGetter, MaxElements, None, TTI);
       if (!NewF)
         continue;
       LocalChange = true;
@@ -1017,6 +1041,7 @@
   void getAnalysisUsage(AnalysisUsage &AU) const override {
     AU.addRequired<AssumptionCacheTracker>();
     AU.addRequired<TargetLibraryInfoWrapperPass>();
+    AU.addRequired<TargetTransformInfoWrapperPass>();
     getAAResultsAnalysisUsage(AU);
     CallGraphSCCPass::getAnalysisUsage(AU);
   }
@@ -1042,6 +1067,7 @@
 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
 INITIALIZE_PASS_END(ArgPromotion, "argpromotion",
                     "Promote 'by reference' arguments to scalars", false, false)
 
@@ -1078,8 +1104,10 @@
         CallerNode->replaceCallEdge(OldCS, NewCS, NewCalleeNode);
       };
 
+      const TargetTransformInfo &TTI =
+          getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*OldF);
       if (Function *NewF = promoteArguments(OldF, AARGetter, MaxElements,
-                                            {ReplaceCallSite})) {
+                                            {ReplaceCallSite}, TTI)) {
         LocalChange = true;
 
         // Update the call graph for the newly promoted function.
diff --git a/lib/Transforms/IPO/DeadArgumentElimination.cpp b/lib/Transforms/IPO/DeadArgumentElimination.cpp
index cffb332..cb30e8f 100644
--- a/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -164,7 +164,7 @@
   unsigned NumArgs = Params.size();
 
   // Create the new function body and insert it into the module...
-  Function *NF = Function::Create(NFTy, Fn.getLinkage());
+  Function *NF = Function::Create(NFTy, Fn.getLinkage(), Fn.getAddressSpace());
   NF->copyAttributesFrom(&Fn);
   NF->setComdat(Fn.getComdat());
   Fn.getParent()->getFunctionList().insert(Fn.getIterator(), NF);
@@ -863,7 +863,7 @@
     return false;
 
   // Create the new function body and insert it into the module...
-  Function *NF = Function::Create(NFTy, F->getLinkage());
+  Function *NF = Function::Create(NFTy, F->getLinkage(), F->getAddressSpace());
   NF->copyAttributesFrom(F);
   NF->setComdat(F->getComdat());
   NF->setAttributes(NewPAL);
diff --git a/lib/Transforms/IPO/ExtractGV.cpp b/lib/Transforms/IPO/ExtractGV.cpp
index d45a883..a744d7f 100644
--- a/lib/Transforms/IPO/ExtractGV.cpp
+++ b/lib/Transforms/IPO/ExtractGV.cpp
@@ -135,6 +135,7 @@
           llvm::Value *Declaration;
           if (FunctionType *FTy = dyn_cast<FunctionType>(Ty)) {
             Declaration = Function::Create(FTy, GlobalValue::ExternalLinkage,
+                                           CurI->getAddressSpace(),
                                            CurI->getName(), &M);
 
           } else {
diff --git a/lib/Transforms/IPO/FunctionAttrs.cpp b/lib/Transforms/IPO/FunctionAttrs.cpp
index 3a04f7a..4e2a82b 100644
--- a/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -130,16 +130,15 @@
 
     // Some instructions can be ignored even if they read or write memory.
     // Detect these now, skipping to the next instruction if one is found.
-    CallSite CS(cast<Value>(I));
-    if (CS) {
+    if (auto *Call = dyn_cast<CallBase>(I)) {
       // Ignore calls to functions in the same SCC, as long as the call sites
       // don't have operand bundles.  Calls with operand bundles are allowed to
       // have memory effects not described by the memory effects of the call
       // target.
-      if (!CS.hasOperandBundles() && CS.getCalledFunction() &&
-          SCCNodes.count(CS.getCalledFunction()))
+      if (!Call->hasOperandBundles() && Call->getCalledFunction() &&
+          SCCNodes.count(Call->getCalledFunction()))
         continue;
-      FunctionModRefBehavior MRB = AAR.getModRefBehavior(CS);
+      FunctionModRefBehavior MRB = AAR.getModRefBehavior(Call);
       ModRefInfo MRI = createModRefInfo(MRB);
 
       // If the call doesn't access memory, we're done.
@@ -158,7 +157,7 @@
 
       // Check whether all pointer arguments point to local memory, and
       // ignore calls that only access local memory.
-      for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
+      for (CallSite::arg_iterator CI = Call->arg_begin(), CE = Call->arg_end();
            CI != CE; ++CI) {
         Value *Arg = *CI;
         if (!Arg->getType()->isPtrOrPtrVectorTy())
diff --git a/lib/Transforms/IPO/FunctionImport.cpp b/lib/Transforms/IPO/FunctionImport.cpp
index c5490f1..1223a23 100644
--- a/lib/Transforms/IPO/FunctionImport.cpp
+++ b/lib/Transforms/IPO/FunctionImport.cpp
@@ -777,9 +777,14 @@
     VI = updateValueInfoForIndirectCalls(Index, VI);
     if (!VI)
       return;
-    for (auto &S : VI.getSummaryList())
-      if (S->isLive())
-        return;
+
+    // We need to make sure all variants of the symbol are scanned, alias can
+    // make one (but not all) alive.
+    if (llvm::all_of(VI.getSummaryList(),
+                     [](const std::unique_ptr<llvm::GlobalValueSummary> &S) {
+                       return S->isLive();
+                     }))
+      return;
 
     // We only keep live symbols that are known to be non-prevailing if any are
     // available_externally, linkonceodr, weakodr. Those symbols are discarded
@@ -911,7 +916,8 @@
     if (GV.getValueType()->isFunctionTy())
       NewGV =
           Function::Create(cast<FunctionType>(GV.getValueType()),
-                           GlobalValue::ExternalLinkage, "", GV.getParent());
+                           GlobalValue::ExternalLinkage, GV.getAddressSpace(),
+                           "", GV.getParent());
     else
       NewGV =
           new GlobalVariable(*GV.getParent(), GV.getValueType(),
diff --git a/lib/Transforms/IPO/HotColdSplitting.cpp b/lib/Transforms/IPO/HotColdSplitting.cpp
index 5d989a4..924a7d5 100644
--- a/lib/Transforms/IPO/HotColdSplitting.cpp
+++ b/lib/Transforms/IPO/HotColdSplitting.cpp
@@ -295,6 +295,7 @@
 /// A pair of (basic block, score).
 using BlockTy = std::pair<BasicBlock *, unsigned>;
 
+namespace {
 /// A maximal outlining region. This contains all blocks post-dominated by a
 /// sink block, the sink block itself, and all blocks dominated by the sink.
 class OutliningRegion {
@@ -458,6 +459,7 @@
     return SubRegion;
   }
 };
+} // namespace
 
 bool HotColdSplitting::outlineColdRegions(Function &F, ProfileSummaryInfo &PSI,
                                           BlockFrequencyInfo *BFI,
diff --git a/lib/Transforms/IPO/Inliner.cpp b/lib/Transforms/IPO/Inliner.cpp
index 44001b5..66a6f80 100644
--- a/lib/Transforms/IPO/Inliner.cpp
+++ b/lib/Transforms/IPO/Inliner.cpp
@@ -311,6 +311,11 @@
   // For now we only handle local or inline functions.
   if (!Caller->hasLocalLinkage() && !Caller->hasLinkOnceODRLinkage())
     return false;
+  // If the cost of inlining CS is non-positive, it is not going to prevent the
+  // caller from being inlined into its callers and hence we don't need to
+  // defer.
+  if (IC.getCost() <= 0)
+    return false;
   // Try to detect the case where the current inlining candidate caller (call
   // it B) is a static or linkonce-ODR function and is an inlining candidate
   // elsewhere, and the current candidate callee (call it C) is large enough
@@ -330,25 +335,31 @@
   TotalSecondaryCost = 0;
   // The candidate cost to be imposed upon the current function.
   int CandidateCost = IC.getCost() - 1;
-  // This bool tracks what happens if we do NOT inline C into B.
-  bool callerWillBeRemoved = Caller->hasLocalLinkage();
+  // If the caller has local linkage and can be inlined to all its callers, we
+  // can apply a huge negative bonus to TotalSecondaryCost.
+  bool ApplyLastCallBonus = Caller->hasLocalLinkage() && !Caller->hasOneUse();
   // This bool tracks what happens if we DO inline C into B.
   bool inliningPreventsSomeOuterInline = false;
   for (User *U : Caller->users()) {
+    // If the caller will not be removed (either because it does not have a
+    // local linkage or because the LastCallToStaticBonus has been already
+    // applied), then we can exit the loop early.
+    if (!ApplyLastCallBonus && TotalSecondaryCost >= IC.getCost())
+      return false;
     CallSite CS2(U);
 
     // If this isn't a call to Caller (it could be some other sort
     // of reference) skip it.  Such references will prevent the caller
     // from being removed.
     if (!CS2 || CS2.getCalledFunction() != Caller) {
-      callerWillBeRemoved = false;
+      ApplyLastCallBonus = false;
       continue;
     }
 
     InlineCost IC2 = GetInlineCost(CS2);
     ++NumCallerCallersAnalyzed;
     if (!IC2) {
-      callerWillBeRemoved = false;
+      ApplyLastCallBonus = false;
       continue;
     }
     if (IC2.isAlways())
@@ -366,7 +377,7 @@
   // one is set very low by getInlineCost, in anticipation that Caller will
   // be removed entirely.  We did not account for this above unless there
   // is only one caller of Caller.
-  if (callerWillBeRemoved && !Caller->hasOneUse())
+  if (ApplyLastCallBonus)
     TotalSecondaryCost -= InlineConstants::LastCallToStaticBonus;
 
   if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost())
diff --git a/lib/Transforms/IPO/LowerTypeTests.cpp b/lib/Transforms/IPO/LowerTypeTests.cpp
index 2d29e93..87c65db 100644
--- a/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -989,6 +989,7 @@
     if (F->isDSOLocal()) {
       Function *RealF = Function::Create(F->getFunctionType(),
                                          GlobalValue::ExternalLinkage,
+                                         F->getAddressSpace(),
                                          Name + ".cfi", &M);
       RealF->setVisibility(GlobalVariable::HiddenVisibility);
       replaceDirectCalls(F, RealF);
@@ -1000,13 +1001,13 @@
   if (F->isDeclarationForLinker() && !isDefinition) {
     // Declaration of an external function.
     FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
-                             Name + ".cfi_jt", &M);
+                             F->getAddressSpace(), Name + ".cfi_jt", &M);
     FDecl->setVisibility(GlobalValue::HiddenVisibility);
   } else if (isDefinition) {
     F->setName(Name + ".cfi");
     F->setLinkage(GlobalValue::ExternalLinkage);
     FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
-                             Name, &M);
+                             F->getAddressSpace(), Name, &M);
     FDecl->setVisibility(Visibility);
     Visibility = GlobalValue::HiddenVisibility;
 
@@ -1016,7 +1017,8 @@
     for (auto &U : F->uses()) {
       if (auto *A = dyn_cast<GlobalAlias>(U.getUser())) {
         Function *AliasDecl = Function::Create(
-            F->getFunctionType(), GlobalValue::ExternalLinkage, "", &M);
+            F->getFunctionType(), GlobalValue::ExternalLinkage,
+            F->getAddressSpace(), "", &M);
         AliasDecl->takeName(A);
         A->replaceAllUsesWith(AliasDecl);
         ToErase.push_back(A);
@@ -1191,7 +1193,9 @@
     WeakInitializerFn = Function::Create(
         FunctionType::get(Type::getVoidTy(M.getContext()),
                           /* IsVarArg */ false),
-        GlobalValue::InternalLinkage, "__cfi_global_var_init", &M);
+        GlobalValue::InternalLinkage,
+        M.getDataLayout().getProgramAddressSpace(),
+        "__cfi_global_var_init", &M);
     BasicBlock *BB =
         BasicBlock::Create(M.getContext(), "entry", WeakInitializerFn);
     ReturnInst::Create(M.getContext(), BB);
@@ -1234,7 +1238,8 @@
   // placeholder first.
   Function *PlaceholderFn =
       Function::Create(cast<FunctionType>(F->getValueType()),
-                       GlobalValue::ExternalWeakLinkage, "", &M);
+                       GlobalValue::ExternalWeakLinkage,
+                       F->getAddressSpace(), "", &M);
   replaceCfiUses(F, PlaceholderFn, IsDefinition);
 
   Constant *Target = ConstantExpr::getSelect(
@@ -1424,7 +1429,9 @@
   Function *JumpTableFn =
       Function::Create(FunctionType::get(Type::getVoidTy(M.getContext()),
                                          /* IsVarArg */ false),
-                       GlobalValue::PrivateLinkage, ".cfi.jumptable", &M);
+                       GlobalValue::PrivateLinkage,
+                       M.getDataLayout().getProgramAddressSpace(),
+                       ".cfi.jumptable", &M);
   ArrayType *JumpTableType =
       ArrayType::get(getJumpTableEntryType(), Functions.size());
   auto JumpTable =
@@ -1695,6 +1702,13 @@
       !ExportSummary && !ImportSummary)
     return false;
 
+  // If only some of the modules were split, we cannot correctly handle
+  // code that contains type tests.
+  if (TypeTestFunc && !TypeTestFunc->use_empty() &&
+      ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) ||
+       (ImportSummary && ImportSummary->partiallySplitLTOUnits())))
+    report_fatal_error("inconsistent LTO Unit splitting with llvm.type.test");
+
   if (ImportSummary) {
     if (TypeTestFunc) {
       for (auto UI = TypeTestFunc->use_begin(), UE = TypeTestFunc->use_end();
@@ -1813,7 +1827,8 @@
         if (!F)
           F = Function::Create(
               FunctionType::get(Type::getVoidTy(M.getContext()), false),
-              GlobalVariable::ExternalLinkage, FunctionName, &M);
+              GlobalVariable::ExternalLinkage,
+              M.getDataLayout().getProgramAddressSpace(), FunctionName, &M);
 
         // If the function is available_externally, remove its definition so
         // that it is handled the same way as a declaration. Later we will try
diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp
index e84de09..11efe95 100644
--- a/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/lib/Transforms/IPO/MergeFunctions.cpp
@@ -474,7 +474,7 @@
                                           NewPAL.getRetAttributes(),
                                           NewArgAttrs));
 
-      remove(CS.getInstruction()->getParent()->getParent());
+      remove(CS.getInstruction()->getFunction());
       U->set(BitcastNew);
     }
   }
@@ -693,8 +693,8 @@
     GEntryBlock->getTerminator()->eraseFromParent();
     BB = GEntryBlock;
   } else {
-    NewG = Function::Create(G->getFunctionType(), G->getLinkage(), "",
-                            G->getParent());
+    NewG = Function::Create(G->getFunctionType(), G->getLinkage(),
+                            G->getAddressSpace(), "", G->getParent());
     BB = BasicBlock::Create(F->getContext(), "", NewG);
   }
 
@@ -807,8 +807,8 @@
     }
 
     // Make them both thunks to the same internal function.
-    Function *NewF = Function::Create(F->getFunctionType(), F->getLinkage(), "",
-                                      F->getParent());
+    Function *NewF = Function::Create(F->getFunctionType(), F->getLinkage(),
+                                      F->getAddressSpace(), "", F->getParent());
     NewF->copyAttributesFrom(F);
     NewF->takeName(F);
     removeUsers(F);
@@ -845,7 +845,7 @@
     // If G was internal then we may have replaced all uses of G with F. If so,
     // stop here and delete G. There's no need for a thunk. (See note on
     // MergeFunctionsPDI above).
-    if (G->hasLocalLinkage() && G->use_empty() && !MergeFunctionsPDI) {
+    if (G->isDiscardableIfUnused() && G->use_empty() && !MergeFunctionsPDI) {
       G->eraseFromParent();
       ++NumFunctionsMerged;
       return;
@@ -954,7 +954,7 @@
 
     for (User *U : V->users()) {
       if (Instruction *I = dyn_cast<Instruction>(U)) {
-        remove(I->getParent()->getParent());
+        remove(I->getFunction());
       } else if (isa<GlobalValue>(U)) {
         // do nothing
       } else if (Constant *C = dyn_cast<Constant>(U)) {
diff --git a/lib/Transforms/IPO/PartialInlining.cpp b/lib/Transforms/IPO/PartialInlining.cpp
index 917582a..da214a1 100644
--- a/lib/Transforms/IPO/PartialInlining.cpp
+++ b/lib/Transforms/IPO/PartialInlining.cpp
@@ -851,12 +851,8 @@
       break;
     }
 
-    IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(&I);
-    if (IntrInst) {
-      if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start ||
-          IntrInst->getIntrinsicID() == Intrinsic::lifetime_end)
-        continue;
-    }
+    if (I.isLifetimeStartOrEnd())
+      continue;
 
     if (CallInst *CI = dyn_cast<CallInst>(&I)) {
       InlineCost += getCallsiteCost(CallSite(CI), DL);
diff --git a/lib/Transforms/IPO/PassManagerBuilder.cpp b/lib/Transforms/IPO/PassManagerBuilder.cpp
index e625433..9764944 100644
--- a/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -378,8 +378,8 @@
   if (EnableLoopInterchange)
     MPM.add(createLoopInterchangePass()); // Interchange loops
 
-  if (!DisableUnrollLoops)
-    MPM.add(createSimpleLoopUnrollPass(OptLevel));    // Unroll small loops
+  MPM.add(createSimpleLoopUnrollPass(OptLevel,
+                                     DisableUnrollLoops)); // Unroll small loops
   addExtensionsToPM(EP_LoopOptimizerEnd, MPM);
   // This ends the loop pass pipelines.
 
@@ -462,12 +462,14 @@
 
     addExtensionsToPM(EP_EnabledOnOptLevel0, MPM);
 
-    // Rename anon globals to be able to export them in the summary.
-    // This has to be done after we add the extensions to the pass manager
-    // as there could be passes (e.g. Adddress sanitizer) which introduce
-    // new unnamed globals.
-    if (PrepareForLTO || PrepareForThinLTO)
+    if (PrepareForLTO || PrepareForThinLTO) {
+      MPM.add(createCanonicalizeAliasesPass());
+      // Rename anon globals to be able to export them in the summary.
+      // This has to be done after we add the extensions to the pass manager
+      // as there could be passes (e.g. Adddress sanitizer) which introduce
+      // new unnamed globals.
       MPM.add(createNameAnonGlobalPass());
+    }
     return;
   }
 
@@ -585,6 +587,7 @@
     // Ensure we perform any last passes, but do so before renaming anonymous
     // globals in case the passes add any.
     addExtensionsToPM(EP_OptimizerLast, MPM);
+    MPM.add(createCanonicalizeAliasesPass());
     // Rename anon globals to be able to export them in the summary.
     MPM.add(createNameAnonGlobalPass());
     return;
@@ -637,7 +640,7 @@
   // llvm.loop.distribute=true or when -enable-loop-distribute is specified.
   MPM.add(createLoopDistributePass());
 
-  MPM.add(createLoopVectorizePass(DisableUnrollLoops, LoopVectorize));
+  MPM.add(createLoopVectorizePass(DisableUnrollLoops, !LoopVectorize));
 
   // Eliminate loads by forwarding stores from the previous iteration to loads
   // of the current iteration.
@@ -682,16 +685,17 @@
   addExtensionsToPM(EP_Peephole, MPM);
   addInstructionCombiningPass(MPM);
 
+  if (EnableUnrollAndJam && !DisableUnrollLoops) {
+    // Unroll and Jam. We do this before unroll but need to be in a separate
+    // loop pass manager in order for the outer loop to be processed by
+    // unroll and jam before the inner loop is unrolled.
+    MPM.add(createLoopUnrollAndJamPass(OptLevel));
+  }
+
+  MPM.add(createLoopUnrollPass(OptLevel,
+                               DisableUnrollLoops)); // Unroll small loops
+
   if (!DisableUnrollLoops) {
-    if (EnableUnrollAndJam) {
-      // Unroll and Jam. We do this before unroll but need to be in a separate
-      // loop pass manager in order for the outer loop to be processed by
-      // unroll and jam before the inner loop is unrolled.
-      MPM.add(createLoopUnrollAndJamPass(OptLevel));
-    }
-
-    MPM.add(createLoopUnrollPass(OptLevel));    // Unroll small loops
-
     // LoopUnroll may generate some redundency to cleanup.
     addInstructionCombiningPass(MPM);
 
@@ -700,7 +704,7 @@
     // outer loop. LICM pass can help to promote the runtime check out if the
     // checked value is loop invariant.
     MPM.add(createLICMPass());
- }
+  }
 
   MPM.add(createWarnMissedTransformationsPass());
 
@@ -743,9 +747,11 @@
 
   addExtensionsToPM(EP_OptimizerLast, MPM);
 
-  // Rename anon globals to be able to handle them in the summary
-  if (PrepareForLTO)
+  if (PrepareForLTO) {
+    MPM.add(createCanonicalizeAliasesPass());
+    // Rename anon globals to be able to handle them in the summary
     MPM.add(createNameAnonGlobalPass());
+  }
 }
 
 void PassManagerBuilder::addLTOOptimizationPasses(legacy::PassManagerBase &PM) {
@@ -872,12 +878,11 @@
   if (EnableLoopInterchange)
     PM.add(createLoopInterchangePass());
 
-  if (!DisableUnrollLoops)
-    PM.add(createSimpleLoopUnrollPass(OptLevel));   // Unroll small loops
-  PM.add(createLoopVectorizePass(true, LoopVectorize));
+  PM.add(createSimpleLoopUnrollPass(OptLevel,
+                                    DisableUnrollLoops)); // Unroll small loops
+  PM.add(createLoopVectorizePass(true, !LoopVectorize));
   // The vectorizer may have significantly shortened a loop body; unroll again.
-  if (!DisableUnrollLoops)
-    PM.add(createLoopUnrollPass(OptLevel));
+  PM.add(createLoopUnrollPass(OptLevel, DisableUnrollLoops));
 
   PM.add(createWarnMissedTransformationsPass());
 
diff --git a/lib/Transforms/IPO/SampleProfile.cpp b/lib/Transforms/IPO/SampleProfile.cpp
index 06a1ce8..9f123c2 100644
--- a/lib/Transforms/IPO/SampleProfile.cpp
+++ b/lib/Transforms/IPO/SampleProfile.cpp
@@ -218,6 +218,7 @@
   const FunctionSamples *findCalleeFunctionSamples(const Instruction &I) const;
   std::vector<const FunctionSamples *>
   findIndirectCallFunctionSamples(const Instruction &I, uint64_t &Sum) const;
+  mutable DenseMap<const DILocation *, const FunctionSamples *> DILocation2SampleMap;
   const FunctionSamples *findFunctionSamples(const Instruction &I) const;
   bool inlineCallInstruction(Instruction *I);
   bool inlineHotFunctions(Function &F,
@@ -544,10 +545,10 @@
   if (!FS)
     return std::error_code();
 
-  // Ignore all intrinsics and branch instructions.
-  // Branch instruction usually contains debug info from sources outside of
+  // Ignore all intrinsics, phinodes and branch instructions.
+  // Branch and phinodes instruction usually contains debug info from sources outside of
   // the residing basic block, thus we ignore them during annotation.
-  if (isa<BranchInst>(Inst) || isa<IntrinsicInst>(Inst))
+  if (isa<BranchInst>(Inst) || isa<IntrinsicInst>(Inst) || isa<PHINode>(Inst))
     return std::error_code();
 
   // If a direct call/invoke instruction is inlined in profile
@@ -719,12 +720,14 @@
 /// \returns the FunctionSamples pointer to the inlined instance.
 const FunctionSamples *
 SampleProfileLoader::findFunctionSamples(const Instruction &Inst) const {
-  SmallVector<std::pair<LineLocation, StringRef>, 10> S;
   const DILocation *DIL = Inst.getDebugLoc();
   if (!DIL)
     return Samples;
 
-  return Samples->findFunctionSamples(DIL);
+  auto it = DILocation2SampleMap.try_emplace(DIL,nullptr);
+  if (it.second)
+    it.first->second = Samples->findFunctionSamples(DIL);
+  return it.first->second;
 }
 
 bool SampleProfileLoader::inlineCallInstruction(Instruction *I) {
@@ -1610,6 +1613,8 @@
 }
 
 bool SampleProfileLoader::runOnFunction(Function &F, ModuleAnalysisManager *AM) {
+  
+  DILocation2SampleMap.clear();
   // By default the entry count is initialized to -1, which will be treated
   // conservatively by getEntryCount as the same as unknown (None). This is
   // to avoid newly added code to be treated as cold. If we have samples
diff --git a/lib/Transforms/IPO/SyntheticCountsPropagation.cpp b/lib/Transforms/IPO/SyntheticCountsPropagation.cpp
index 64837d4..ba4efb3 100644
--- a/lib/Transforms/IPO/SyntheticCountsPropagation.cpp
+++ b/lib/Transforms/IPO/SyntheticCountsPropagation.cpp
@@ -30,6 +30,7 @@
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Analysis/BlockFrequencyInfo.h"
 #include "llvm/Analysis/CallGraph.h"
+#include "llvm/Analysis/ProfileSummaryInfo.h"
 #include "llvm/Analysis/SyntheticCountsUtils.h"
 #include "llvm/IR/CallSite.h"
 #include "llvm/IR/Function.h"
@@ -98,13 +99,15 @@
                                                   ModuleAnalysisManager &MAM) {
   FunctionAnalysisManager &FAM =
       MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
-  DenseMap<Function *, uint64_t> Counts;
+  DenseMap<Function *, Scaled64> Counts;
   // Set initial entry counts.
-  initializeCounts(M, [&](Function *F, uint64_t Count) { Counts[F] = Count; });
+  initializeCounts(
+      M, [&](Function *F, uint64_t Count) { Counts[F] = Scaled64(Count, 0); });
 
-  // Compute the relative block frequency for a call edge. Use scaled numbers
-  // and not integers since the relative block frequency could be less than 1.
-  auto GetCallSiteRelFreq = [&](const CallGraphNode::CallRecord &Edge) {
+  // Edge includes information about the source. Hence ignore the first
+  // parameter.
+  auto GetCallSiteProfCount = [&](const CallGraphNode *,
+                                  const CallGraphNode::CallRecord &Edge) {
     Optional<Scaled64> Res = None;
     if (!Edge.first)
       return Res;
@@ -112,29 +115,33 @@
     CallSite CS(cast<Instruction>(Edge.first));
     Function *Caller = CS.getCaller();
     auto &BFI = FAM.getResult<BlockFrequencyAnalysis>(*Caller);
+
+    // Now compute the callsite count from relative frequency and
+    // entry count:
     BasicBlock *CSBB = CS.getInstruction()->getParent();
     Scaled64 EntryFreq(BFI.getEntryFreq(), 0);
-    Scaled64 BBFreq(BFI.getBlockFreq(CSBB).getFrequency(), 0);
-    BBFreq /= EntryFreq;
-    return Optional<Scaled64>(BBFreq);
+    Scaled64 BBCount(BFI.getBlockFreq(CSBB).getFrequency(), 0);
+    BBCount /= EntryFreq;
+    BBCount *= Counts[Caller];
+    return Optional<Scaled64>(BBCount);
   };
 
   CallGraph CG(M);
   // Propgate the entry counts on the callgraph.
   SyntheticCountsUtils<const CallGraph *>::propagate(
-      &CG, GetCallSiteRelFreq,
-      [&](const CallGraphNode *N) { return Counts[N->getFunction()]; },
-      [&](const CallGraphNode *N, uint64_t New) {
+      &CG, GetCallSiteProfCount, [&](const CallGraphNode *N, Scaled64 New) {
         auto F = N->getFunction();
         if (!F || F->isDeclaration())
           return;
+
         Counts[F] += New;
       });
 
   // Set the counts as metadata.
-  for (auto Entry : Counts)
-    Entry.first->setEntryCount(
-        ProfileCount(Entry.second, Function::PCT_Synthetic));
+  for (auto Entry : Counts) {
+    Entry.first->setEntryCount(ProfileCount(
+        Entry.second.template toInt<uint64_t>(), Function::PCT_Synthetic));
+  }
 
   return PreservedAnalyses::all();
 }
diff --git a/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp b/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
index bfab96a..510ecb5 100644
--- a/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
+++ b/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
@@ -154,7 +154,8 @@
       continue;
 
     Function *NewF =
-        Function::Create(EmptyFT, GlobalValue::ExternalLinkage, "", &M);
+        Function::Create(EmptyFT, GlobalValue::ExternalLinkage,
+                         F.getAddressSpace(), "", &M);
     NewF->setVisibility(F.getVisibility());
     NewF->takeName(&F);
     F.replaceAllUsesWith(ConstantExpr::getBitCast(NewF, F.getType()));
@@ -417,8 +418,18 @@
   }
 }
 
-// Returns whether this module needs to be split because it uses type metadata.
+// Returns whether this module needs to be split because splitting is
+// enabled and it uses type metadata.
 bool requiresSplit(Module &M) {
+  // First check if the LTO Unit splitting has been enabled.
+  bool EnableSplitLTOUnit = false;
+  if (auto *MD = mdconst::extract_or_null<ConstantInt>(
+          M.getModuleFlag("EnableSplitLTOUnit")))
+    EnableSplitLTOUnit = MD->getZExtValue();
+  if (!EnableSplitLTOUnit)
+    return false;
+
+  // Module only needs to be split if it contains type metadata.
   for (auto &GO : M.global_objects()) {
     if (GO.hasMetadata(LLVMContext::MD_type))
       return true;
@@ -430,7 +441,7 @@
 void writeThinLTOBitcode(raw_ostream &OS, raw_ostream *ThinLinkOS,
                          function_ref<AAResults &(Function &)> AARGetter,
                          Module &M, const ModuleSummaryIndex *Index) {
-  // See if this module has any type metadata. If so, we need to split it.
+  // Split module if splitting is enabled and it contains any type metadata.
   if (requiresSplit(M))
     return splitAndWriteThinLTOBitcode(OS, ThinLinkOS, AARGetter, M);
 
diff --git a/lib/Transforms/IPO/WholeProgramDevirt.cpp b/lib/Transforms/IPO/WholeProgramDevirt.cpp
index b8f68d4..48bd0cd 100644
--- a/lib/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/lib/Transforms/IPO/WholeProgramDevirt.cpp
@@ -864,10 +864,13 @@
   Function *JT;
   if (isa<MDString>(Slot.TypeID)) {
     JT = Function::Create(FT, Function::ExternalLinkage,
+                          M.getDataLayout().getProgramAddressSpace(),
                           getGlobalName(Slot, {}, "branch_funnel"), &M);
     JT->setVisibility(GlobalValue::HiddenVisibility);
   } else {
-    JT = Function::Create(FT, Function::InternalLinkage, "branch_funnel", &M);
+    JT = Function::Create(FT, Function::InternalLinkage,
+                          M.getDataLayout().getProgramAddressSpace(),
+                          "branch_funnel", &M);
   }
   JT->addAttribute(1, Attribute::Nest);
 
@@ -1560,6 +1563,17 @@
       M.getFunction(Intrinsic::getName(Intrinsic::type_checked_load));
   Function *AssumeFunc = M.getFunction(Intrinsic::getName(Intrinsic::assume));
 
+  // If only some of the modules were split, we cannot correctly handle
+  // code that contains type tests or type checked loads.
+  if ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) ||
+      (ImportSummary && ImportSummary->partiallySplitLTOUnits())) {
+    if ((TypeTestFunc && !TypeTestFunc->use_empty()) ||
+        (TypeCheckedLoadFunc && !TypeCheckedLoadFunc->use_empty()))
+      report_fatal_error("inconsistent LTO Unit splitting with llvm.type.test "
+                         "or llvm.type.checked.load");
+    return false;
+  }
+
   // Normally if there are no users of the devirtualization intrinsics in the
   // module, this pass has nothing to do. But if we are exporting, we also need
   // to handle any users that appear only in the function summaries.
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index f5db853..6e196bf 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1649,15 +1649,14 @@
     // X - A*-B -> X + A*B
     // X - -A*B -> X + A*B
     Value *A, *B;
-    Constant *CI;
     if (match(Op1, m_c_Mul(m_Value(A), m_Neg(m_Value(B)))))
       return BinaryOperator::CreateAdd(Op0, Builder.CreateMul(A, B));
 
-    // X - A*CI -> X + A*-CI
+    // X - A*C -> X + A*-C
     // No need to handle commuted multiply because multiply handling will
     // ensure constant will be move to the right hand side.
-    if (match(Op1, m_Mul(m_Value(A), m_Constant(CI)))) {
-      Value *NewMul = Builder.CreateMul(A, ConstantExpr::getNeg(CI));
+    if (match(Op1, m_Mul(m_Value(A), m_Constant(C))) && !isa<ConstantExpr>(C)) {
+      Value *NewMul = Builder.CreateMul(A, ConstantExpr::getNeg(C));
       return BinaryOperator::CreateAdd(Op0, NewMul);
     }
   }
diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 24a82ba..404c2ad 100644
--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1810,6 +1810,57 @@
   return LastInst;
 }
 
+/// Transform UB-safe variants of bitwise rotate to the funnel shift intrinsic.
+static Instruction *matchRotate(Instruction &Or) {
+  // TODO: Can we reduce the code duplication between this and the related
+  // rotate matching code under visitSelect and visitTrunc?
+  unsigned Width = Or.getType()->getScalarSizeInBits();
+  if (!isPowerOf2_32(Width))
+    return nullptr;
+
+  // First, find an or'd pair of opposite shifts with the same shifted operand:
+  // or (lshr ShVal, ShAmt0), (shl ShVal, ShAmt1)
+  Value *Or0 = Or.getOperand(0), *Or1 = Or.getOperand(1);
+  Value *ShVal, *ShAmt0, *ShAmt1;
+  if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal), m_Value(ShAmt0)))) ||
+      !match(Or1, m_OneUse(m_LogicalShift(m_Specific(ShVal), m_Value(ShAmt1)))))
+    return nullptr;
+
+  auto ShiftOpcode0 = cast<BinaryOperator>(Or0)->getOpcode();
+  auto ShiftOpcode1 = cast<BinaryOperator>(Or1)->getOpcode();
+  if (ShiftOpcode0 == ShiftOpcode1)
+    return nullptr;
+
+  // Match the shift amount operands for a rotate pattern. This always matches
+  // a subtraction on the R operand.
+  auto matchShiftAmount = [](Value *L, Value *R, unsigned Width) -> Value * {
+    // The shift amount may be masked with negation:
+    // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1)))
+    Value *X;
+    unsigned Mask = Width - 1;
+    if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) &&
+        match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))
+      return X;
+
+    return nullptr;
+  };
+
+  Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, Width);
+  bool SubIsOnLHS = false;
+  if (!ShAmt) {
+    ShAmt = matchShiftAmount(ShAmt1, ShAmt0, Width);
+    SubIsOnLHS = true;
+  }
+  if (!ShAmt)
+    return nullptr;
+
+  bool IsFshl = (!SubIsOnLHS && ShiftOpcode0 == BinaryOperator::Shl) ||
+                (SubIsOnLHS && ShiftOpcode1 == BinaryOperator::Shl);
+  Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
+  Function *F = Intrinsic::getDeclaration(Or.getModule(), IID, Or.getType());
+  return IntrinsicInst::Create(F, { ShVal, ShVal, ShAmt });
+}
+
 /// If all elements of two constant vectors are 0/-1 and inverses, return true.
 static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) {
   unsigned NumElts = C1->getType()->getVectorNumElements();
@@ -2170,6 +2221,9 @@
   if (Instruction *BSwap = matchBSwap(I))
     return BSwap;
 
+  if (Instruction *Rotate = matchRotate(I))
+    return Rotate;
+
   Value *X, *Y;
   const APInt *CV;
   if (match(&I, m_c_Or(m_OneUse(m_Xor(m_Value(X), m_APInt(CV))), m_Value(Y))) &&
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index ae158ae..aeb25d5 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -136,6 +136,14 @@
   if (Size > 8 || (Size&(Size-1)))
     return nullptr;  // If not 1/2/4/8 bytes, exit.
 
+  // If it is an atomic and alignment is less than the size then we will
+  // introduce the unaligned memory access which will be later transformed
+  // into libcall in CodeGen. This is not evident performance gain so disable
+  // it now.
+  if (isa<AtomicMemTransferInst>(MI))
+    if (CopyDstAlign < Size || CopySrcAlign < Size)
+      return nullptr;
+
   // Use an integer load+store unless we can find something better.
   unsigned SrcAddrSp =
     cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
@@ -174,6 +182,9 @@
     MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
   if (LoopMemParallelMD)
     L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
+  MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);
+  if (AccessGroupMD)
+    L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
 
   StoreInst *S = Builder.CreateStore(L, Dest);
   // Alignment from the mem intrinsic will be better, so use it.
@@ -182,6 +193,8 @@
     S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
   if (LoopMemParallelMD)
     S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
+  if (AccessGroupMD)
+    S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
 
   if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
     // non-atomics can be volatile
@@ -215,6 +228,18 @@
   Alignment = MI->getDestAlignment();
   assert(Len && "0-sized memory setting should be removed already.");
 
+  // Alignment 0 is identity for alignment 1 for memset, but not store.
+  if (Alignment == 0)
+    Alignment = 1;
+
+  // If it is an atomic and alignment is less than the size then we will
+  // introduce the unaligned memory access which will be later transformed
+  // into libcall in CodeGen. This is not evident performance gain so disable
+  // it now.
+  if (isa<AtomicMemSetInst>(MI))
+    if (Alignment < Len)
+      return nullptr;
+
   // memset(s,c,n) -> store s, c (for n=1,2,4,8)
   if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
     Type *ITy = IntegerType::get(MI->getContext(), Len*8);  // n=1 -> i8.
@@ -224,9 +249,6 @@
     Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
     Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
 
-    // Alignment 0 is identity for alignment 1 for memset, but not store.
-    if (Alignment == 0) Alignment = 1;
-
     // Extract the fill value and store.
     uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
     StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
@@ -243,67 +265,6 @@
   return nullptr;
 }
 
-static Value *simplifyX86AddsSubs(const IntrinsicInst &II,
-                                  InstCombiner::BuilderTy &Builder) {
-  bool IsAddition;
-
-  switch (II.getIntrinsicID()) {
-  default: llvm_unreachable("Unexpected intrinsic!");
-  case Intrinsic::x86_sse2_padds_b:
-  case Intrinsic::x86_sse2_padds_w:
-  case Intrinsic::x86_avx2_padds_b:
-  case Intrinsic::x86_avx2_padds_w:
-  case Intrinsic::x86_avx512_padds_b_512:
-  case Intrinsic::x86_avx512_padds_w_512:
-    IsAddition = true;
-    break;
-  case Intrinsic::x86_sse2_psubs_b:
-  case Intrinsic::x86_sse2_psubs_w:
-  case Intrinsic::x86_avx2_psubs_b:
-  case Intrinsic::x86_avx2_psubs_w:
-  case Intrinsic::x86_avx512_psubs_b_512:
-  case Intrinsic::x86_avx512_psubs_w_512:
-    IsAddition = false;
-    break;
-  }
-
-  auto *Arg0 = dyn_cast<Constant>(II.getOperand(0));
-  auto *Arg1 = dyn_cast<Constant>(II.getOperand(1));
-  auto VT = cast<VectorType>(II.getType());
-  auto SVT = VT->getElementType();
-  unsigned NumElems = VT->getNumElements();
-
-  if (!Arg0 || !Arg1)
-    return nullptr;
-
-  SmallVector<Constant *, 64> Result;
-
-  APInt MaxValue = APInt::getSignedMaxValue(SVT->getIntegerBitWidth());
-  APInt MinValue = APInt::getSignedMinValue(SVT->getIntegerBitWidth());
-  for (unsigned i = 0; i < NumElems; ++i) {
-    auto *Elt0 = Arg0->getAggregateElement(i);
-    auto *Elt1 = Arg1->getAggregateElement(i);
-    if (isa<UndefValue>(Elt0) || isa<UndefValue>(Elt1)) {
-      Result.push_back(UndefValue::get(SVT));
-      continue;
-    }
-
-    if (!isa<ConstantInt>(Elt0) || !isa<ConstantInt>(Elt1))
-      return nullptr;
-
-    const APInt &Val0 = cast<ConstantInt>(Elt0)->getValue();
-    const APInt &Val1 = cast<ConstantInt>(Elt1)->getValue();
-    bool Overflow = false;
-    APInt ResultElem = IsAddition ? Val0.sadd_ov(Val1, Overflow)
-                                  : Val0.ssub_ov(Val1, Overflow);
-    if (Overflow)
-      ResultElem = Val0.isNegative() ? MinValue : MaxValue;
-    Result.push_back(Constant::getIntegerValue(SVT, ResultElem));
-  }
-
-  return ConstantVector::get(Result);
-}
-
 static Value *simplifyX86immShift(const IntrinsicInst &II,
                                   InstCombiner::BuilderTy &Builder) {
   bool LogicalShift = false;
@@ -2784,23 +2745,6 @@
     break;
   }
 
-  // Constant fold add/sub with saturation intrinsics.
-  case Intrinsic::x86_sse2_padds_b:
-  case Intrinsic::x86_sse2_padds_w:
-  case Intrinsic::x86_sse2_psubs_b:
-  case Intrinsic::x86_sse2_psubs_w:
-  case Intrinsic::x86_avx2_padds_b:
-  case Intrinsic::x86_avx2_padds_w:
-  case Intrinsic::x86_avx2_psubs_b:
-  case Intrinsic::x86_avx2_psubs_w:
-  case Intrinsic::x86_avx512_padds_b_512:
-  case Intrinsic::x86_avx512_padds_w_512:
-  case Intrinsic::x86_avx512_psubs_b_512:
-  case Intrinsic::x86_avx512_psubs_w_512:
-    if (Value *V = simplifyX86AddsSubs(*II, Builder))
-      return replaceInstUsesWith(*II, V);
-    break;
-
   // Constant fold ashr( <A x Bi>, Ci ).
   // Constant fold lshr( <A x Bi>, Ci ).
   // Constant fold shl( <A x Bi>, Ci ).
@@ -3833,6 +3777,11 @@
         // Promote to next legal integer type.
         unsigned Width = CmpType->getBitWidth();
         unsigned NewWidth = Width;
+
+        // Don't do anything for i1 comparisons.
+        if (Width == 1)
+          break;
+
         if (Width <= 16)
           NewWidth = 16;
         else if (Width <= 32)
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 582f69f..1201ac1 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -492,7 +492,7 @@
 }
 
 /// Rotate left/right may occur in a wider type than necessary because of type
-/// promotion rules. Try to narrow all of the component instructions.
+/// promotion rules. Try to narrow the inputs and convert to funnel shift.
 Instruction *InstCombiner::narrowRotate(TruncInst &Trunc) {
   assert((isa<VectorType>(Trunc.getSrcTy()) ||
           shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) &&
@@ -563,23 +563,15 @@
 
   // We have an unnecessarily wide rotate!
   // trunc (or (lshr ShVal, ShAmt), (shl ShVal, BitWidth - ShAmt))
-  // Narrow it down to eliminate the zext/trunc:
-  // or (lshr trunc(ShVal), ShAmt0'), (shl trunc(ShVal), ShAmt1')
+  // Narrow the inputs and convert to funnel shift intrinsic:
+  // llvm.fshl.i8(trunc(ShVal), trunc(ShVal), trunc(ShAmt))
   Value *NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy);
-  Value *NegShAmt = Builder.CreateNeg(NarrowShAmt);
-
-  // Mask both shift amounts to ensure there's no UB from oversized shifts.
-  Constant *MaskC = ConstantInt::get(DestTy, NarrowWidth - 1);
-  Value *MaskedShAmt = Builder.CreateAnd(NarrowShAmt, MaskC);
-  Value *MaskedNegShAmt = Builder.CreateAnd(NegShAmt, MaskC);
-
-  // Truncate the original value and use narrow ops.
   Value *X = Builder.CreateTrunc(ShVal, DestTy);
-  Value *NarrowShAmt0 = SubIsOnLHS ? MaskedNegShAmt : MaskedShAmt;
-  Value *NarrowShAmt1 = SubIsOnLHS ? MaskedShAmt : MaskedNegShAmt;
-  Value *NarrowSh0 = Builder.CreateBinOp(ShiftOpcode0, X, NarrowShAmt0);
-  Value *NarrowSh1 = Builder.CreateBinOp(ShiftOpcode1, X, NarrowShAmt1);
-  return BinaryOperator::CreateOr(NarrowSh0, NarrowSh1);
+  bool IsFshl = (!SubIsOnLHS && ShiftOpcode0 == BinaryOperator::Shl) ||
+                (SubIsOnLHS && ShiftOpcode1 == BinaryOperator::Shl);
+  Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
+  Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy);
+  return IntrinsicInst::Create(F, { X, X, NarrowShAmt });
 }
 
 /// Try to narrow the width of math or bitwise logic instructions by pulling a
@@ -1107,12 +1099,9 @@
   Value *Src = CI.getOperand(0);
   Type *SrcTy = Src->getType(), *DestTy = CI.getType();
 
-  // Attempt to extend the entire input expression tree to the destination
-  // type.   Only do this if the dest type is a simple type, don't convert the
-  // expression tree to something weird like i93 unless the source is also
-  // strange.
+  // Try to extend the entire expression tree to the wide destination type.
   unsigned BitsToClear;
-  if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
+  if (shouldChangeType(SrcTy, DestTy) &&
       canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) {
     assert(BitsToClear <= SrcTy->getScalarSizeInBits() &&
            "Can't clear more bits than in SrcTy");
@@ -1389,12 +1378,8 @@
     return replaceInstUsesWith(CI, ZExt);
   }
 
-  // Attempt to extend the entire input expression tree to the destination
-  // type.   Only do this if the dest type is a simple type, don't convert the
-  // expression tree to something weird like i93 unless the source is also
-  // strange.
-  if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
-      canEvaluateSExtd(Src, DestTy)) {
+  // Try to extend the entire expression tree to the wide destination type.
+  if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) {
     // Okay, we can transform this!  Insert the new expression now.
     LLVM_DEBUG(
         dbgs() << "ICE: EvaluateInDifferentType converting expression type"
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 3853993..b5bbb09 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -2765,6 +2765,7 @@
 
   // Handle icmp {eq|ne} <intrinsic>, Constant.
   Type *Ty = II->getType();
+  unsigned BitWidth = C.getBitWidth();
   switch (II->getIntrinsicID()) {
   case Intrinsic::bswap:
     Worklist.Add(II);
@@ -2773,21 +2774,39 @@
     return &Cmp;
 
   case Intrinsic::ctlz:
-  case Intrinsic::cttz:
+  case Intrinsic::cttz: {
     // ctz(A) == bitwidth(A)  ->  A == 0 and likewise for !=
-    if (C == C.getBitWidth()) {
+    if (C == BitWidth) {
       Worklist.Add(II);
       Cmp.setOperand(0, II->getArgOperand(0));
       Cmp.setOperand(1, ConstantInt::getNullValue(Ty));
       return &Cmp;
     }
+
+    // ctz(A) == C -> A & Mask1 == Mask2, where Mask2 only has bit C set
+    // and Mask1 has bits 0..C+1 set. Similar for ctl, but for high bits.
+    // Limit to one use to ensure we don't increase instruction count.
+    unsigned Num = C.getLimitedValue(BitWidth);
+    if (Num != BitWidth && II->hasOneUse()) {
+      bool IsTrailing = II->getIntrinsicID() == Intrinsic::cttz;
+      APInt Mask1 = IsTrailing ? APInt::getLowBitsSet(BitWidth, Num + 1)
+                               : APInt::getHighBitsSet(BitWidth, Num + 1);
+      APInt Mask2 = IsTrailing
+        ? APInt::getOneBitSet(BitWidth, Num)
+        : APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
+      Cmp.setOperand(0, Builder.CreateAnd(II->getArgOperand(0), Mask1));
+      Cmp.setOperand(1, ConstantInt::get(Ty, Mask2));
+      Worklist.Add(II);
+      return &Cmp;
+    }
     break;
+  }
 
   case Intrinsic::ctpop: {
     // popcount(A) == 0  ->  A == 0 and likewise for !=
     // popcount(A) == bitwidth(A)  ->  A == -1 and likewise for !=
     bool IsZero = C.isNullValue();
-    if (IsZero || C == C.getBitWidth()) {
+    if (IsZero || C == BitWidth) {
       Worklist.Add(II);
       Cmp.setOperand(0, II->getArgOperand(0));
       auto *NewOp =
diff --git a/lib/Transforms/InstCombine/InstCombineInternal.h b/lib/Transforms/InstCombine/InstCombineInternal.h
index e507630..2de41bd 100644
--- a/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -802,7 +802,8 @@
 
   Value *simplifyAMDGCNMemoryIntrinsicDemanded(IntrinsicInst *II,
                                                APInt DemandedElts,
-                                               int DmaskIdx = -1);
+                                               int DmaskIdx = -1,
+                                               int TFCIdx = -1);
 
   Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
                                     APInt &UndefElts, unsigned Depth = 0);
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 02ebb5e..76ab614 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -116,13 +116,10 @@
       }
 
       // Lifetime intrinsics can be handled by the caller.
-      if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
-        if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
-            II->getIntrinsicID() == Intrinsic::lifetime_end) {
-          assert(II->use_empty() && "Lifetime markers have no result to use!");
-          ToDelete.push_back(II);
-          continue;
-        }
+      if (I->isLifetimeStartOrEnd()) {
+        assert(I->use_empty() && "Lifetime markers have no result to use!");
+        ToDelete.push_back(I);
+        continue;
       }
 
       // If this is isn't our memcpy/memmove, reject it as something we can't
@@ -493,6 +490,7 @@
     case LLVMContext::MD_noalias:
     case LLVMContext::MD_nontemporal:
     case LLVMContext::MD_mem_parallel_loop_access:
+    case LLVMContext::MD_access_group:
       // All of these directly apply.
       NewLoad->setMetadata(ID, N);
       break;
@@ -552,10 +550,10 @@
     case LLVMContext::MD_noalias:
     case LLVMContext::MD_nontemporal:
     case LLVMContext::MD_mem_parallel_loop_access:
+    case LLVMContext::MD_access_group:
       // All of these directly apply.
       NewStore->setMetadata(ID, N);
       break;
-
     case LLVMContext::MD_invariant_load:
     case LLVMContext::MD_nonnull:
     case LLVMContext::MD_range:
diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index c348aec..7e99f3e 100644
--- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -244,6 +244,11 @@
     return NewMul;
   }
 
+  // -X * Y --> -(X * Y)
+  // X * -Y --> -(X * Y)
+  if (match(&I, m_c_Mul(m_OneUse(m_Neg(m_Value(X))), m_Value(Y))))
+    return BinaryOperator::CreateNeg(Builder.CreateMul(X, Y));
+
   // (X / Y) *  Y = X - (X % Y)
   // (X / Y) * -Y = (X % Y) - X
   {
diff --git a/lib/Transforms/InstCombine/InstCombinePHI.cpp b/lib/Transforms/InstCombine/InstCombinePHI.cpp
index a71ebdc..7603cf4 100644
--- a/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -608,6 +608,7 @@
     LLVMContext::MD_align,
     LLVMContext::MD_dereferenceable,
     LLVMContext::MD_dereferenceable_or_null,
+    LLVMContext::MD_access_group,
   };
 
   for (unsigned ID : KnownIDs)
diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 19858ae..faf58a0 100644
--- a/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -709,17 +709,18 @@
       match(Count, m_Trunc(m_Value(V))))
     Count = V;
 
+  // Check that 'Count' is a call to intrinsic cttz/ctlz. Also check that the
+  // input to the cttz/ctlz is used as LHS for the compare instruction.
+  if (!match(Count, m_Intrinsic<Intrinsic::cttz>(m_Specific(CmpLHS))) &&
+      !match(Count, m_Intrinsic<Intrinsic::ctlz>(m_Specific(CmpLHS))))
+    return nullptr;
+
+  IntrinsicInst *II = cast<IntrinsicInst>(Count);
+
   // Check if the value propagated on zero is a constant number equal to the
   // sizeof in bits of 'Count'.
   unsigned SizeOfInBits = Count->getType()->getScalarSizeInBits();
-  if (!match(ValueOnZero, m_SpecificInt(SizeOfInBits)))
-    return nullptr;
-
-  // Check that 'Count' is a call to intrinsic cttz/ctlz. Also check that the
-  // input to the cttz/ctlz is used as LHS for the compare instruction.
-  if (match(Count, m_Intrinsic<Intrinsic::cttz>(m_Specific(CmpLHS))) ||
-      match(Count, m_Intrinsic<Intrinsic::ctlz>(m_Specific(CmpLHS)))) {
-    IntrinsicInst *II = cast<IntrinsicInst>(Count);
+  if (match(ValueOnZero, m_SpecificInt(SizeOfInBits))) {
     // Explicitly clear the 'undef_on_zero' flag.
     IntrinsicInst *NewI = cast<IntrinsicInst>(II->clone());
     NewI->setArgOperand(1, ConstantInt::getFalse(NewI->getContext()));
@@ -727,6 +728,12 @@
     return Builder.CreateZExtOrTrunc(NewI, ValueOnZero->getType());
   }
 
+  // If the ValueOnZero is not the bitwidth, we can at least make use of the
+  // fact that the cttz/ctlz result will not be used if the input is zero, so
+  // it's okay to relax it to undef for that case.
+  if (II->hasOneUse() && !match(II->getArgOperand(1), m_One()))
+    II->setArgOperand(1, ConstantInt::getTrue(II->getContext()));
+
   return nullptr;
 }
 
@@ -1547,11 +1554,10 @@
 }
 
 /// Try to reduce a rotate pattern that includes a compare and select into a
-/// sequence of ALU ops only. Example:
+/// funnel shift intrinsic. Example:
 /// rotl32(a, b) --> (b == 0 ? a : ((a >> (32 - b)) | (a << b)))
-///              --> (a >> (-b & 31)) | (a << (b & 31))
-static Instruction *foldSelectRotate(SelectInst &Sel,
-                                     InstCombiner::BuilderTy &Builder) {
+///              --> call llvm.fshl.i32(a, a, b)
+static Instruction *foldSelectRotate(SelectInst &Sel) {
   // The false value of the select must be a rotate of the true value.
   Value *Or0, *Or1;
   if (!match(Sel.getFalseValue(), m_OneUse(m_Or(m_Value(Or0), m_Value(Or1)))))
@@ -1593,17 +1599,12 @@
     return nullptr;
 
   // This is a rotate that avoids shift-by-bitwidth UB in a suboptimal way.
-  // Convert to safely bitmasked shifts.
-  // TODO: When we can canonicalize to funnel shift intrinsics without risk of
-  // performance regressions, replace this sequence with that call.
-  Value *NegShAmt = Builder.CreateNeg(ShAmt);
-  Value *MaskedShAmt = Builder.CreateAnd(ShAmt, Width - 1);
-  Value *MaskedNegShAmt = Builder.CreateAnd(NegShAmt, Width - 1);
-  Value *NewSA0 = ShAmt == SA0 ? MaskedShAmt : MaskedNegShAmt;
-  Value *NewSA1 = ShAmt == SA1 ? MaskedShAmt : MaskedNegShAmt;
-  Value *NewSh0 = Builder.CreateBinOp(ShiftOpcode0, TVal, NewSA0);
-  Value *NewSh1 = Builder.CreateBinOp(ShiftOpcode1, TVal, NewSA1);
-  return BinaryOperator::CreateOr(NewSh0, NewSh1);
+  // Convert to funnel shift intrinsic.
+  bool IsFshl = (ShAmt == SA0 && ShiftOpcode0 == BinaryOperator::Shl) ||
+                (ShAmt == SA1 && ShiftOpcode1 == BinaryOperator::Shl);
+  Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
+  Function *F = Intrinsic::getDeclaration(Sel.getModule(), IID, Sel.getType());
+  return IntrinsicInst::Create(F, { TVal, TVal, ShAmt });
 }
 
 Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
@@ -2045,7 +2046,7 @@
   if (Instruction *Select = foldSelectBinOpIdentity(SI, TLI))
     return Select;
 
-  if (Instruction *Rot = foldSelectRotate(SI, Builder))
+  if (Instruction *Rot = foldSelectRotate(SI))
     return Rot;
 
   return nullptr;
diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index a193dde..9bf87d0 100644
--- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -969,11 +969,24 @@
 /// Implement SimplifyDemandedVectorElts for amdgcn buffer and image intrinsics.
 Value *InstCombiner::simplifyAMDGCNMemoryIntrinsicDemanded(IntrinsicInst *II,
                                                            APInt DemandedElts,
-                                                           int DMaskIdx) {
+                                                           int DMaskIdx,
+                                                           int TFCIdx) {
   unsigned VWidth = II->getType()->getVectorNumElements();
   if (VWidth == 1)
     return nullptr;
 
+  // Need to change to new instruction format
+  ConstantInt *TFC = nullptr;
+  bool TFELWEEnabled = false;
+  if (TFCIdx > 0) {
+    TFC = dyn_cast<ConstantInt>(II->getArgOperand(TFCIdx));
+    TFELWEEnabled =    TFC->getZExtValue() & 0x1  // TFE
+                    || TFC->getZExtValue() & 0x2; // LWE
+  }
+
+  if (TFELWEEnabled)
+    return nullptr; // TFE not yet supported
+
   ConstantInt *NewDMask = nullptr;
 
   if (DMaskIdx < 0) {
@@ -1619,10 +1632,15 @@
       break;
     case Intrinsic::amdgcn_buffer_load:
     case Intrinsic::amdgcn_buffer_load_format:
+    case Intrinsic::amdgcn_raw_buffer_load:
+    case Intrinsic::amdgcn_raw_buffer_load_format:
+    case Intrinsic::amdgcn_struct_buffer_load:
+    case Intrinsic::amdgcn_struct_buffer_load_format:
       return simplifyAMDGCNMemoryIntrinsicDemanded(II, DemandedElts);
     default: {
       if (getAMDGPUImageDMaskIntrinsic(II->getIntrinsicID()))
-        return simplifyAMDGCNMemoryIntrinsicDemanded(II, DemandedElts, 0);
+        return simplifyAMDGCNMemoryIntrinsicDemanded(
+            II, DemandedElts, 0, II->getNumArgOperands() - 2);
 
       break;
     }
diff --git a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 7ad29a5..0ad1fc0 100644
--- a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -46,42 +46,34 @@
 #define DEBUG_TYPE "instcombine"
 
 /// Return true if the value is cheaper to scalarize than it is to leave as a
-/// vector operation. isConstant indicates whether we're extracting one known
-/// element. If false we're extracting a variable index.
-//
-// FIXME: It's possible to create more instructions that previously existed.
-static bool cheapToScalarize(Value *V, bool isConstant) {
-  if (Constant *C = dyn_cast<Constant>(V)) {
-    if (isConstant) return true;
+/// vector operation. IsConstantExtractIndex indicates whether we are extracting
+/// one known element from a vector constant.
+///
+/// FIXME: It's possible to create more instructions than previously existed.
+static bool cheapToScalarize(Value *V, bool IsConstantExtractIndex) {
+  // If we can pick a scalar constant value out of a vector, that is free.
+  if (auto *C = dyn_cast<Constant>(V))
+    return IsConstantExtractIndex || C->getSplatValue();
 
-    // If all elts are the same, we can extract it and use any of the values.
-    if (Constant *Op0 = C->getAggregateElement(0U)) {
-      for (unsigned i = 1, e = V->getType()->getVectorNumElements(); i != e;
-           ++i)
-        if (C->getAggregateElement(i) != Op0)
-          return false;
-      return true;
-    }
-  }
-  Instruction *I = dyn_cast<Instruction>(V);
-  if (!I) return false;
+  // An insertelement to the same constant index as our extract will simplify
+  // to the scalar inserted element. An insertelement to a different constant
+  // index is irrelevant to our extract.
+  if (match(V, m_InsertElement(m_Value(), m_Value(), m_ConstantInt())))
+    return IsConstantExtractIndex;
 
-  // Insert element gets simplified to the inserted element or is deleted if
-  // this is constant idx extract element and its a constant idx insertelt.
-  if (I->getOpcode() == Instruction::InsertElement && isConstant &&
-      isa<ConstantInt>(I->getOperand(2)))
+  if (match(V, m_OneUse(m_Load(m_Value()))))
     return true;
-  if (I->getOpcode() == Instruction::Load && I->hasOneUse())
-    return true;
-  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I))
-    if (BO->hasOneUse() &&
-        (cheapToScalarize(BO->getOperand(0), isConstant) ||
-         cheapToScalarize(BO->getOperand(1), isConstant)))
+
+  Value *V0, *V1;
+  if (match(V, m_OneUse(m_BinOp(m_Value(V0), m_Value(V1)))))
+    if (cheapToScalarize(V0, IsConstantExtractIndex) ||
+        cheapToScalarize(V1, IsConstantExtractIndex))
       return true;
-  if (CmpInst *CI = dyn_cast<CmpInst>(I))
-    if (CI->hasOneUse() &&
-        (cheapToScalarize(CI->getOperand(0), isConstant) ||
-         cheapToScalarize(CI->getOperand(1), isConstant)))
+
+  CmpInst::Predicate UnusedPred;
+  if (match(V, m_OneUse(m_Cmp(UnusedPred, m_Value(V0), m_Value(V1)))))
+    if (cheapToScalarize(V0, IsConstantExtractIndex) ||
+        cheapToScalarize(V1, IsConstantExtractIndex))
       return true;
 
   return false;
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index 7c9dbcb..be7d43b 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -183,7 +183,10 @@
 /// a fundamental type in IR, and there are many specialized optimizations for
 /// i1 types.
 bool InstCombiner::shouldChangeType(Type *From, Type *To) const {
-  assert(From->isIntegerTy() && To->isIntegerTy());
+  // TODO: This could be extended to allow vectors. Datalayout changes might be
+  // needed to properly support that.
+  if (!From->isIntegerTy() || !To->isIntegerTy())
+    return false;
 
   unsigned FromWidth = From->getPrimitiveSizeInBits();
   unsigned ToWidth = To->getPrimitiveSizeInBits();
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 8728838..f1558c7 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -109,6 +109,7 @@
 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
 static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
 static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
+static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
 static const uint64_t kPS4CPU_ShadowOffset64 = 1ULL << 40;
 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
 
@@ -440,8 +441,11 @@
     for (auto MDN : Globals->operands()) {
       // Metadata node contains the global and the fields of "Entry".
       assert(MDN->getNumOperands() == 5);
-      auto *GV = mdconst::extract_or_null<GlobalVariable>(MDN->getOperand(0));
+      auto *V = mdconst::extract_or_null<Constant>(MDN->getOperand(0));
       // The optimizer may optimize away a global entirely.
+      if (!V) continue;
+      auto *StrippedV = V->stripPointerCasts();
+      auto *GV = dyn_cast<GlobalVariable>(StrippedV);
       if (!GV) continue;
       // We can already have an entry for GV if it was merged with another
       // global.
@@ -544,9 +548,12 @@
       Mapping.Offset = kSystemZ_ShadowOffset64;
     else if (IsFreeBSD && !IsMIPS64)
       Mapping.Offset = kFreeBSD_ShadowOffset64;
-    else if (IsNetBSD)
-      Mapping.Offset = kNetBSD_ShadowOffset64;
-    else if (IsPS4CPU)
+    else if (IsNetBSD) {
+      if (IsKasan)
+        Mapping.Offset = kNetBSDKasan_ShadowOffset64;
+      else
+        Mapping.Offset = kNetBSD_ShadowOffset64;
+    } else if (IsPS4CPU)
       Mapping.Offset = kPS4CPU_ShadowOffset64;
     else if (IsLinux && IsX86_64) {
       if (IsKasan)
@@ -998,7 +1005,7 @@
     if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
     if (!ASan.UseAfterScope)
       return;
-    if (ID != Intrinsic::lifetime_start && ID != Intrinsic::lifetime_end)
+    if (!II.isLifetimeStartOrEnd())
       return;
     // Found lifetime intrinsic, add ASan instrumentation if necessary.
     ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0));
@@ -2143,6 +2150,10 @@
     NewGlobal->copyAttributesFrom(G);
     NewGlobal->setComdat(G->getComdat());
     NewGlobal->setAlignment(MinRZ);
+    // Don't fold globals with redzones. ODR violation detector and redzone
+    // poisoning implicitly creates a dependence on the global's address, so it
+    // is no longer valid for it to be marked unnamed_addr.
+    NewGlobal->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
 
     // Move null-terminated C strings to "__asan_cstring" section on Darwin.
     if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
@@ -2190,10 +2201,8 @@
           GlobalAlias::create(GlobalValue::PrivateLinkage, "", NewGlobal);
     }
 
-    // ODR check is not useful for the following, but we see false reports
-    // caused by linker optimizations.
-    if (NewGlobal->hasLocalLinkage() || NewGlobal->hasGlobalUnnamedAddr() ||
-        NewGlobal->hasLinkOnceODRLinkage() || NewGlobal->hasWeakODRLinkage()) {
+    // ODR should not happen for local linkage.
+    if (NewGlobal->hasLocalLinkage()) {
       ODRIndicator = ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1),
                                                IRB.getInt8PtrTy());
     } else if (UseOdrIndicator) {
diff --git a/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 8291239..9af64ed 100644
--- a/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -185,7 +185,7 @@
 /// Prefer relative paths in the coverage notes. Clang also may split
 /// up absolute paths into a directory and filename component. When
 /// the relative path doesn't exist, reconstruct the absolute path.
-SmallString<128> getFilename(const DISubprogram *SP) {
+static SmallString<128> getFilename(const DISubprogram *SP) {
   SmallString<128> Path;
   StringRef RelPath = SP->getFilename();
   if (sys::fs::exists(RelPath))
diff --git a/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 9102160..d04c2b7 100644
--- a/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -152,6 +152,10 @@
                               cl::desc("create static frame descriptions"),
                               cl::Hidden, cl::init(true));
 
+static cl::opt<bool>
+    ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
+                              cl::desc("instrument memory intrinsics"),
+                              cl::Hidden, cl::init(true));
 namespace {
 
 /// An instrumentation pass implementing detection of addressability bugs
@@ -182,6 +186,7 @@
   void instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
                                  unsigned AccessSizeIndex,
                                  Instruction *InsertBefore);
+  void instrumentMemIntrinsic(MemIntrinsic *MI);
   bool instrumentMemAccess(Instruction *I);
   Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
                                    uint64_t *TypeSize, unsigned *Alignment,
@@ -206,6 +211,7 @@
   LLVMContext *C;
   std::string CurModuleUniqueId;
   Triple TargetTriple;
+  Function *HWAsanMemmove, *HWAsanMemcpy, *HWAsanMemset;
 
   // Frame description is a way to pass names/sizes of local variables
   // to the run-time w/o adding extra executable code in every function.
@@ -258,6 +264,7 @@
 
   Function *HwasanTagMemoryFunc;
   Function *HwasanGenerateTagFunc;
+  Function *HwasanThreadEnterFunc;
 
   Constant *ShadowGlobal;
 
@@ -309,15 +316,24 @@
                                             kHwasanInitName,
                                             /*InitArgTypes=*/{},
                                             /*InitArgs=*/{});
-    appendToGlobalCtors(M, HwasanCtorFunction, 0);
-  }
+    Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
+    HwasanCtorFunction->setComdat(CtorComdat);
+    appendToGlobalCtors(M, HwasanCtorFunction, 0, HwasanCtorFunction);
 
-  // Create a call to __hwasan_init_frames.
-  if (HwasanCtorFunction) {
-    // Create a dummy frame description for the CTOR function.
-    // W/o it we would have to create the call to __hwasan_init_frames after
-    // all functions are instrumented (i.e. need to have a ModulePass).
-    createFrameGlobal(*HwasanCtorFunction, "");
+    // Create a zero-length global in __hwasan_frame so that the linker will
+    // always create start and stop symbols.
+    //
+    // N.B. If we ever start creating associated metadata in this pass this
+    // global will need to be associated with the ctor.
+    Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
+    auto GV =
+        new GlobalVariable(M, Int8Arr0Ty, /*isConstantGlobal*/ true,
+                           GlobalVariable::PrivateLinkage,
+                           Constant::getNullValue(Int8Arr0Ty), "__hwasan");
+    GV->setSection(getFrameSection());
+    GV->setComdat(CtorComdat);
+    appendToCompilerUsed(M, GV);
+
     IRBuilder<> IRBCtor(HwasanCtorFunction->getEntryBlock().getTerminator());
     IRBCtor.CreateCall(
         declareSanitizerInitFunction(M, "__hwasan_init_frames",
@@ -364,6 +380,21 @@
   if (Mapping.InGlobal)
     ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
                                        ArrayType::get(IRB.getInt8Ty(), 0));
+
+  const std::string MemIntrinCallbackPrefix =
+      CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix;
+  HWAsanMemmove = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+      MemIntrinCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
+      IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy));
+  HWAsanMemcpy = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+      MemIntrinCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
+      IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy));
+  HWAsanMemset = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+      MemIntrinCallbackPrefix + "memset", IRB.getInt8PtrTy(),
+      IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy));
+
+  HwasanThreadEnterFunc = checkSanitizerInterfaceFunction(
+      M.getOrInsertFunction("__hwasan_thread_enter", IRB.getVoidTy()));
 }
 
 Value *HWAddressSanitizer::getDynamicShadowNonTls(IRBuilder<> &IRB) {
@@ -539,12 +570,36 @@
   IRB.CreateCall(Asm, PtrLong);
 }
 
+void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
+  IRBuilder<> IRB(MI);
+  if (isa<MemTransferInst>(MI)) {
+    IRB.CreateCall(
+        isa<MemMoveInst>(MI) ? HWAsanMemmove : HWAsanMemcpy,
+        {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
+         IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
+         IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
+  } else if (isa<MemSetInst>(MI)) {
+    IRB.CreateCall(
+        HWAsanMemset,
+        {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
+         IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
+         IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
+  }
+  MI->eraseFromParent();
+}
+
 bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
   LLVM_DEBUG(dbgs() << "Instrumenting: " << *I << "\n");
   bool IsWrite = false;
   unsigned Alignment = 0;
   uint64_t TypeSize = 0;
   Value *MaybeMask = nullptr;
+
+  if (ClInstrumentMemIntrinsics && isa<MemIntrinsic>(I)) {
+    instrumentMemIntrinsic(cast<MemIntrinsic>(I));
+    return true;
+  }
+
   Value *Addr =
       isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask);
 
@@ -703,10 +758,12 @@
 Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
   if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) {
+    // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
+    // in Bionic's libc/private/bionic_tls.h.
     Function *ThreadPointerFunc =
         Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
     Value *SlotPtr = IRB.CreatePointerCast(
-        IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), 0x40),
+        IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), 0x30),
         Ty->getPointerTo(0));
     return SlotPtr;
   }
@@ -742,10 +799,9 @@
   GV->setSection(getFrameSection());
   appendToCompilerUsed(M, GV);
   // Put GV into the F's Comadat so that if F is deleted GV can be deleted too.
-  if (&F != HwasanCtorFunction)
-    if (auto Comdat =
-            GetOrCreateFunctionComdat(F, TargetTriple, CurModuleUniqueId))
-      GV->setComdat(Comdat);
+  if (auto Comdat =
+          GetOrCreateFunctionComdat(F, TargetTriple, CurModuleUniqueId))
+    GV->setComdat(Comdat);
 }
 
 Value *HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB,
@@ -756,14 +812,35 @@
   Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
   assert(SlotPtr);
 
-  Value *ThreadLong = IRB.CreateLoad(SlotPtr);
+  Instruction *ThreadLong = IRB.CreateLoad(SlotPtr);
+
+  Function *F = IRB.GetInsertBlock()->getParent();
+  if (F->getFnAttribute("hwasan-abi").getValueAsString() == "interceptor") {
+    Value *ThreadLongEqZero =
+        IRB.CreateICmpEQ(ThreadLong, ConstantInt::get(IntptrTy, 0));
+    auto *Br = cast<BranchInst>(SplitBlockAndInsertIfThen(
+        ThreadLongEqZero, cast<Instruction>(ThreadLongEqZero)->getNextNode(),
+        false, MDBuilder(*C).createBranchWeights(1, 100000)));
+
+    IRB.SetInsertPoint(Br);
+    // FIXME: This should call a new runtime function with a custom calling
+    // convention to avoid needing to spill all arguments here.
+    IRB.CreateCall(HwasanThreadEnterFunc);
+    LoadInst *ReloadThreadLong = IRB.CreateLoad(SlotPtr);
+
+    IRB.SetInsertPoint(&*Br->getSuccessor(0)->begin());
+    PHINode *ThreadLongPhi = IRB.CreatePHI(IntptrTy, 2);
+    ThreadLongPhi->addIncoming(ThreadLong, ThreadLong->getParent());
+    ThreadLongPhi->addIncoming(ReloadThreadLong, ReloadThreadLong->getParent());
+    ThreadLong = ThreadLongPhi;
+  }
+
   // Extract the address field from ThreadLong. Unnecessary on AArch64 with TBI.
   Value *ThreadLongMaybeUntagged =
       TargetTriple.isAArch64() ? ThreadLong : untagPointer(IRB, ThreadLong);
 
   if (WithFrameRecord) {
     // Prepare ring buffer data.
-    Function *F = IRB.GetInsertBlock()->getParent();
     auto PC = IRB.CreatePtrToInt(F, IntptrTy);
     auto GetStackPointerFn =
         Intrinsic::getDeclaration(F->getParent(), Intrinsic::frameaddress);
diff --git a/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp b/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
index 7edc53d..58436c8 100644
--- a/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
+++ b/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
@@ -19,7 +19,7 @@
 #include "llvm/ADT/Statistic.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/Analysis/IndirectCallPromotionAnalysis.h"
-#include "llvm/Analysis/IndirectCallSiteVisitor.h"
+#include "llvm/Analysis/IndirectCallVisitor.h"
 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
 #include "llvm/Analysis/ProfileSummaryInfo.h"
 #include "llvm/IR/Attributes.h"
@@ -41,8 +41,8 @@
 #include "llvm/ProfileData/InstrProf.h"
 #include "llvm/Support/Casting.h"
 #include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Error.h"
 #include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Transforms/Instrumentation.h"
 #include "llvm/Transforms/Instrumentation/PGOInstrumentation.h"
@@ -352,7 +352,7 @@
 bool ICallPromotionFunc::processFunction(ProfileSummaryInfo *PSI) {
   bool Changed = false;
   ICallPromotionAnalysis ICallAnalysis;
-  for (auto &I : findIndirectCallSites(F)) {
+  for (auto &I : findIndirectCalls(F)) {
     uint32_t NumVals, NumCandidates;
     uint64_t TotalCount;
     auto ICallProfDataRef = ICallAnalysis.getPromotionCandidatesForInstruction(
diff --git a/lib/Transforms/Instrumentation/InstrProfiling.cpp b/lib/Transforms/Instrumentation/InstrProfiling.cpp
index 62da930..15b9438 100644
--- a/lib/Transforms/Instrumentation/InstrProfiling.cpp
+++ b/lib/Transforms/Instrumentation/InstrProfiling.cpp
@@ -701,6 +701,7 @@
   // Use linker script magic to get data/cnts/name start/end.
   if (Triple(M.getTargetTriple()).isOSLinux() ||
       Triple(M.getTargetTriple()).isOSFreeBSD() ||
+      Triple(M.getTargetTriple()).isOSNetBSD() ||
       Triple(M.getTargetTriple()).isOSFuchsia() ||
       Triple(M.getTargetTriple()).isPS4CPU())
     return false;
diff --git a/lib/Transforms/Instrumentation/Instrumentation.cpp b/lib/Transforms/Instrumentation/Instrumentation.cpp
index eb6a373..c3e3236 100644
--- a/lib/Transforms/Instrumentation/Instrumentation.cpp
+++ b/lib/Transforms/Instrumentation/Instrumentation.cpp
@@ -111,9 +111,9 @@
   initializePGOIndirectCallPromotionLegacyPassPass(Registry);
   initializePGOMemOPSizeOptLegacyPassPass(Registry);
   initializeInstrProfilingLegacyPassPass(Registry);
-  initializeMemorySanitizerPass(Registry);
+  initializeMemorySanitizerLegacyPassPass(Registry);
   initializeHWAddressSanitizerPass(Registry);
-  initializeThreadSanitizerPass(Registry);
+  initializeThreadSanitizerLegacyPassPass(Registry);
   initializeSanitizerCoverageModulePass(Registry);
   initializeDataFlowSanitizerPass(Registry);
   initializeEfficiencySanitizerPass(Registry);
diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 0bbf3a9..e6573af 100644
--- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -99,9 +99,8 @@
 /// also possible that the arguments only indicate the offset for a base taken
 /// from a segment register, so it's dangerous to treat any asm() arguments as
 /// pointers. We take a conservative approach generating calls to
-///   __msan_instrument_asm_load(ptr, size) and
 ///   __msan_instrument_asm_store(ptr, size)
-/// , which defer the memory checking/unpoisoning to the runtime library.
+/// , which defer the memory unpoisoning to the runtime library.
 /// The latter can perform more complex address checks to figure out whether
 /// it's safe to touch the shadow memory.
 /// Like with atomic operations, we call __msan_instrument_asm_store() before
@@ -141,6 +140,7 @@
 ///
 //===----------------------------------------------------------------------===//
 
+#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/DepthFirstIterator.h"
@@ -150,7 +150,6 @@
 #include "llvm/ADT/StringRef.h"
 #include "llvm/ADT/Triple.h"
 #include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/Transforms/Utils/Local.h"
 #include "llvm/IR/Argument.h"
 #include "llvm/IR/Attributes.h"
 #include "llvm/IR/BasicBlock.h"
@@ -188,6 +187,7 @@
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Transforms/Instrumentation.h"
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
 #include "llvm/Transforms/Utils/ModuleUtils.h"
 #include <algorithm>
 #include <cassert>
@@ -321,7 +321,6 @@
        cl::desc("Define custom MSan OriginBase"),
        cl::Hidden, cl::init(0));
 
-static const char *const kMsanModuleCtorName = "msan.module_ctor";
 static const char *const kMsanInitName = "__msan_init";
 
 namespace {
@@ -447,19 +446,16 @@
 
 namespace {
 
-/// An instrumentation pass implementing detection of uninitialized
-/// reads.
+/// Instrument functions of a module to detect uninitialized reads.
 ///
-/// MemorySanitizer: instrument the code in module to find
-/// uninitialized reads.
-class MemorySanitizer : public FunctionPass {
+/// Instantiating MemorySanitizer inserts the msan runtime library API function
+/// declarations into the module if they don't exist already. Instantiating
+/// ensures the __msan_init function is in the list of global constructors for
+/// the module.
+class MemorySanitizer {
 public:
-  // Pass identification, replacement for typeid.
-  static char ID;
-
-  MemorySanitizer(int TrackOrigins = 0, bool Recover = false,
-                  bool EnableKmsan = false)
-      : FunctionPass(ID) {
+  MemorySanitizer(Module &M, int TrackOrigins = 0, bool Recover = false,
+                  bool EnableKmsan = false) {
     this->CompileKernel =
         ClEnableKmsan.getNumOccurrences() > 0 ? ClEnableKmsan : EnableKmsan;
     if (ClTrackOrigins.getNumOccurrences() > 0)
@@ -469,15 +465,16 @@
     this->Recover = ClKeepGoing.getNumOccurrences() > 0
                         ? ClKeepGoing
                         : (this->CompileKernel | Recover);
-  }
-  StringRef getPassName() const override { return "MemorySanitizer"; }
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.addRequired<TargetLibraryInfoWrapperPass>();
+    initializeModule(M);
   }
 
-  bool runOnFunction(Function &F) override;
-  bool doInitialization(Module &M) override;
+  // MSan cannot be moved or copied because of MapParams.
+  MemorySanitizer(MemorySanitizer &&) = delete;
+  MemorySanitizer &operator=(MemorySanitizer &&) = delete;
+  MemorySanitizer(const MemorySanitizer &) = delete;
+  MemorySanitizer &operator=(const MemorySanitizer &) = delete;
+
+  bool sanitizeFunction(Function &F, TargetLibraryInfo &TLI);
 
 private:
   friend struct MemorySanitizerVisitor;
@@ -486,13 +483,13 @@
   friend struct VarArgAArch64Helper;
   friend struct VarArgPowerPC64Helper;
 
+  void initializeModule(Module &M);
   void initializeCallbacks(Module &M);
   void createKernelApi(Module &M);
   void createUserspaceApi(Module &M);
 
   /// True if we're compiling the Linux kernel.
   bool CompileKernel;
-
   /// Track origins (allocation points) of uninitialized values.
   int TrackOrigins;
   bool Recover;
@@ -570,7 +567,7 @@
   Value *MsanMetadataPtrForLoadN, *MsanMetadataPtrForStoreN;
   Value *MsanMetadataPtrForLoad_1_8[4];
   Value *MsanMetadataPtrForStore_1_8[4];
-  Value *MsanInstrumentAsmStoreFn, *MsanInstrumentAsmLoadFn;
+  Value *MsanInstrumentAsmStoreFn;
 
   /// Helper to choose between different MsanMetadataPtrXxx().
   Value *getKmsanShadowOriginAccessFn(bool isStore, int size);
@@ -589,25 +586,61 @@
 
   /// An empty volatile inline asm that prevents callback merge.
   InlineAsm *EmptyAsm;
+};
 
-  Function *MsanCtorFunction;
+/// A legacy function pass for msan instrumentation.
+///
+/// Instruments functions to detect unitialized reads.
+struct MemorySanitizerLegacyPass : public FunctionPass {
+  // Pass identification, replacement for typeid.
+  static char ID;
+
+  MemorySanitizerLegacyPass(int TrackOrigins = 0, bool Recover = false,
+                            bool EnableKmsan = false)
+      : FunctionPass(ID), TrackOrigins(TrackOrigins), Recover(Recover),
+        EnableKmsan(EnableKmsan) {}
+  StringRef getPassName() const override { return "MemorySanitizerLegacyPass"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addRequired<TargetLibraryInfoWrapperPass>();
+  }
+
+  bool runOnFunction(Function &F) override {
+    return MSan->sanitizeFunction(
+        F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI());
+  }
+  bool doInitialization(Module &M) override;
+
+  Optional<MemorySanitizer> MSan;
+  int TrackOrigins;
+  bool Recover;
+  bool EnableKmsan;
 };
 
 } // end anonymous namespace
 
-char MemorySanitizer::ID = 0;
+PreservedAnalyses MemorySanitizerPass::run(Function &F,
+                                           FunctionAnalysisManager &FAM) {
+  MemorySanitizer Msan(*F.getParent(), TrackOrigins, Recover, EnableKmsan);
+  if (Msan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
+    return PreservedAnalyses::none();
+  return PreservedAnalyses::all();
+}
 
-INITIALIZE_PASS_BEGIN(
-    MemorySanitizer, "msan",
-    "MemorySanitizer: detects uninitialized reads.", false, false)
+char MemorySanitizerLegacyPass::ID = 0;
+
+INITIALIZE_PASS_BEGIN(MemorySanitizerLegacyPass, "msan",
+                      "MemorySanitizer: detects uninitialized reads.", false,
+                      false)
 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
-INITIALIZE_PASS_END(
-    MemorySanitizer, "msan",
-    "MemorySanitizer: detects uninitialized reads.", false, false)
+INITIALIZE_PASS_END(MemorySanitizerLegacyPass, "msan",
+                    "MemorySanitizer: detects uninitialized reads.", false,
+                    false)
 
-FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins, bool Recover,
-                                              bool CompileKernel) {
-  return new MemorySanitizer(TrackOrigins, Recover, CompileKernel);
+FunctionPass *llvm::createMemorySanitizerLegacyPassPass(int TrackOrigins,
+                                                        bool Recover,
+                                                        bool CompileKernel) {
+  return new MemorySanitizerLegacyPass(TrackOrigins, Recover, CompileKernel);
 }
 
 /// Create a non-const global initialized with the given string.
@@ -684,6 +717,14 @@
       "__msan_unpoison_alloca", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy);
 }
 
+static Constant *getOrInsertGlobal(Module &M, StringRef Name, Type *Ty) {
+  return M.getOrInsertGlobal(Name, Ty, [&] {
+    return new GlobalVariable(M, Ty, false, GlobalVariable::ExternalLinkage,
+                              nullptr, Name, nullptr,
+                              GlobalVariable::InitialExecTLSModel);
+  });
+}
+
 /// Insert declarations for userspace-specific functions and globals.
 void MemorySanitizer::createUserspaceApi(Module &M) {
   IRBuilder<> IRB(*C);
@@ -695,42 +736,31 @@
   WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
 
   // Create the global TLS variables.
-  RetvalTLS = new GlobalVariable(
-      M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false,
-      GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr,
-      GlobalVariable::InitialExecTLSModel);
+  RetvalTLS =
+      getOrInsertGlobal(M, "__msan_retval_tls",
+                        ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8));
 
-  RetvalOriginTLS = new GlobalVariable(
-      M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr,
-      "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
+  RetvalOriginTLS = getOrInsertGlobal(M, "__msan_retval_origin_tls", OriginTy);
 
-  ParamTLS = new GlobalVariable(
-      M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
-      GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr,
-      GlobalVariable::InitialExecTLSModel);
+  ParamTLS =
+      getOrInsertGlobal(M, "__msan_param_tls",
+                        ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
 
-  ParamOriginTLS = new GlobalVariable(
-      M, ArrayType::get(OriginTy, kParamTLSSize / 4), false,
-      GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls",
-      nullptr, GlobalVariable::InitialExecTLSModel);
+  ParamOriginTLS =
+      getOrInsertGlobal(M, "__msan_param_origin_tls",
+                        ArrayType::get(OriginTy, kParamTLSSize / 4));
 
-  VAArgTLS = new GlobalVariable(
-      M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
-      GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr,
-      GlobalVariable::InitialExecTLSModel);
+  VAArgTLS =
+      getOrInsertGlobal(M, "__msan_va_arg_tls",
+                        ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
 
-  VAArgOriginTLS = new GlobalVariable(
-      M, ArrayType::get(OriginTy, kParamTLSSize / 4), false,
-      GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_origin_tls",
-      nullptr, GlobalVariable::InitialExecTLSModel);
+  VAArgOriginTLS =
+      getOrInsertGlobal(M, "__msan_va_arg_origin_tls",
+                        ArrayType::get(OriginTy, kParamTLSSize / 4));
 
-  VAArgOverflowSizeTLS = new GlobalVariable(
-      M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
-      "__msan_va_arg_overflow_size_tls", nullptr,
-      GlobalVariable::InitialExecTLSModel);
-  OriginTLS = new GlobalVariable(
-      M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
-      "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
+  VAArgOverflowSizeTLS =
+      getOrInsertGlobal(M, "__msan_va_arg_overflow_size_tls", IRB.getInt64Ty());
+  OriginTLS = getOrInsertGlobal(M, "__msan_origin_tls", IRB.getInt32Ty());
 
   for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
        AccessSizeIndex++) {
@@ -779,9 +809,6 @@
                             StringRef(""), StringRef(""),
                             /*hasSideEffects=*/true);
 
-  MsanInstrumentAsmLoadFn =
-      M.getOrInsertFunction("__msan_instrument_asm_load", IRB.getVoidTy(),
-                            PointerType::get(IRB.getInt8Ty(), 0), IntptrTy);
   MsanInstrumentAsmStoreFn =
       M.getOrInsertFunction("__msan_instrument_asm_store", IRB.getVoidTy(),
                             PointerType::get(IRB.getInt8Ty(), 0), IntptrTy);
@@ -812,9 +839,7 @@
 }
 
 /// Module-level initialization.
-///
-/// inserts a call to __msan_init to the module's constructor list.
-bool MemorySanitizer::doInitialization(Module &M) {
+void MemorySanitizer::initializeModule(Module &M) {
   auto &DL = M.getDataLayout();
 
   bool ShadowPassed = ClShadowBase.getNumOccurrences() > 0;
@@ -888,27 +913,26 @@
   OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
 
   if (!CompileKernel) {
-    std::tie(MsanCtorFunction, std::ignore) =
-        createSanitizerCtorAndInitFunctions(M, kMsanModuleCtorName,
-                                            kMsanInitName,
-                                            /*InitArgTypes=*/{},
-                                            /*InitArgs=*/{});
-    if (ClWithComdat) {
-      Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName);
-      MsanCtorFunction->setComdat(MsanCtorComdat);
-      appendToGlobalCtors(M, MsanCtorFunction, 0, MsanCtorFunction);
-    } else {
-      appendToGlobalCtors(M, MsanCtorFunction, 0);
-    }
+    getOrCreateInitFunction(M, kMsanInitName);
 
     if (TrackOrigins)
-      new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
-                         IRB.getInt32(TrackOrigins), "__msan_track_origins");
+      M.getOrInsertGlobal("__msan_track_origins", IRB.getInt32Ty(), [&] {
+        return new GlobalVariable(
+            M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
+            IRB.getInt32(TrackOrigins), "__msan_track_origins");
+      });
 
     if (Recover)
-      new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
-                         IRB.getInt32(Recover), "__msan_keep_going");
-  }
+      M.getOrInsertGlobal("__msan_keep_going", IRB.getInt32Ty(), [&] {
+        return new GlobalVariable(M, IRB.getInt32Ty(), true,
+                                  GlobalValue::WeakODRLinkage,
+                                  IRB.getInt32(Recover), "__msan_keep_going");
+      });
+}
+}
+
+bool MemorySanitizerLegacyPass::doInitialization(Module &M) {
+  MSan.emplace(M, TrackOrigins, Recover, EnableKmsan);
   return true;
 }
 
@@ -989,8 +1013,9 @@
   SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
   SmallVector<StoreInst *, 16> StoreList;
 
-  MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
-      : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
+  MemorySanitizerVisitor(Function &F, MemorySanitizer &MS,
+                         const TargetLibraryInfo &TLI)
+      : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)), TLI(&TLI) {
     bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory);
     InsertChecks = SanitizeFunction;
     PropagateShadow = SanitizeFunction;
@@ -999,7 +1024,6 @@
     // FIXME: Consider using SpecialCaseList to specify a list of functions that
     // must always return fully initialized values. For now, we hardcode "main".
     CheckReturnValue = SanitizeFunction && (F.getName() == "main");
-    TLI = &MS.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
 
     MS.initializeCallbacks(*F.getParent());
     if (MS.CompileKernel)
@@ -3103,6 +3127,12 @@
       handleVectorComparePackedIntrinsic(I);
       break;
 
+    case Intrinsic::is_constant:
+      // The result of llvm.is.constant() is always defined.
+      setShadow(&I, getCleanShadow(&I));
+      setOrigin(&I, getCleanOrigin());
+      break;
+
     default:
       if (!handleUnknownIntrinsic(I))
         visitInstruction(I);
@@ -3200,8 +3230,7 @@
     }
     LLVM_DEBUG(dbgs() << "  done with call args\n");
 
-    FunctionType *FT =
-      cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
+    FunctionType *FT = CS.getFunctionType();
     if (FT->isVarArg()) {
       VAHelper->visitCallSite(CS, IRB);
     }
@@ -3482,19 +3511,17 @@
     Type *OpType = Operand->getType();
     // Check the operand value itself.
     insertShadowCheck(Operand, &I);
-    if (!OpType->isPointerTy()) {
+    if (!OpType->isPointerTy() || !isOutput) {
       assert(!isOutput);
       return;
     }
-    Value *Hook =
-        isOutput ? MS.MsanInstrumentAsmStoreFn : MS.MsanInstrumentAsmLoadFn;
     Type *ElType = OpType->getPointerElementType();
     if (!ElType->isSized())
       return;
     int Size = DL.getTypeStoreSize(ElType);
     Value *Ptr = IRB.CreatePointerCast(Operand, IRB.getInt8PtrTy());
     Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
-    IRB.CreateCall(Hook, {Ptr, SizeVal});
+    IRB.CreateCall(MS.MsanInstrumentAsmStoreFn, {Ptr, SizeVal});
   }
 
   /// Get the number of output arguments returned by pointers.
@@ -4430,10 +4457,8 @@
     return new VarArgNoOpHelper(Func, Msan, Visitor);
 }
 
-bool MemorySanitizer::runOnFunction(Function &F) {
-  if (!CompileKernel && (&F == MsanCtorFunction))
-    return false;
-  MemorySanitizerVisitor Visitor(F, *this);
+bool MemorySanitizer::sanitizeFunction(Function &F, TargetLibraryInfo &TLI) {
+  MemorySanitizerVisitor Visitor(F, *this, TLI);
 
   // Clear out readonly/readnone attributes.
   AttrBuilder B;
diff --git a/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
index 876ae23..f043325 100644
--- a/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
+++ b/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
@@ -63,7 +63,7 @@
 #include "llvm/Analysis/BlockFrequencyInfo.h"
 #include "llvm/Analysis/BranchProbabilityInfo.h"
 #include "llvm/Analysis/CFG.h"
-#include "llvm/Analysis/IndirectCallSiteVisitor.h"
+#include "llvm/Analysis/IndirectCallVisitor.h"
 #include "llvm/Analysis/LoopInfo.h"
 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
 #include "llvm/IR/Attributes.h"
@@ -544,7 +544,7 @@
     MIVisitor.countMemIntrinsics(Func);
     NumOfPGOSelectInsts += SIVisitor.getNumOfSelectInsts();
     NumOfPGOMemIntrinsics += MIVisitor.getNumOfMemIntrinsics();
-    ValueSites[IPVK_IndirectCallTarget] = findIndirectCallSites(Func);
+    ValueSites[IPVK_IndirectCallTarget] = findIndirectCalls(Func);
     ValueSites[IPVK_MemOPSize] = MIVisitor.findMemIntrinsics(Func);
 
     FuncName = getPGOFuncName(F);
@@ -754,12 +754,12 @@
   if (DisableValueProfiling)
     return;
 
-  unsigned NumIndirectCallSites = 0;
+  unsigned NumIndirectCalls = 0;
   for (auto &I : FuncInfo.ValueSites[IPVK_IndirectCallTarget]) {
     CallSite CS(I);
     Value *Callee = CS.getCalledValue();
     LLVM_DEBUG(dbgs() << "Instrument one indirect call: CallSite Index = "
-                      << NumIndirectCallSites << "\n");
+                      << NumIndirectCalls << "\n");
     IRBuilder<> Builder(I);
     assert(Builder.GetInsertPoint() != I->getParent()->end() &&
            "Cannot get the Instrumentation point");
@@ -769,9 +769,9 @@
          Builder.getInt64(FuncInfo.FunctionHash),
          Builder.CreatePtrToInt(Callee, Builder.getInt64Ty()),
          Builder.getInt32(IPVK_IndirectCallTarget),
-         Builder.getInt32(NumIndirectCallSites++)});
+         Builder.getInt32(NumIndirectCalls++)});
   }
-  NumOfPGOICall += NumIndirectCallSites;
+  NumOfPGOICall += NumIndirectCalls;
 
   // Now instrument memop intrinsic calls.
   FuncInfo.MIVisitor.instrumentMemIntrinsics(
diff --git a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index 7f683ad..0ba8d57 100644
--- a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -264,7 +264,7 @@
   SecEnd->setVisibility(GlobalValue::HiddenVisibility);
   IRBuilder<> IRB(M.getContext());
   Value *SecEndPtr = IRB.CreatePointerCast(SecEnd, Ty);
-  if (TargetTriple.getObjectFormat() != Triple::COFF)
+  if (!TargetTriple.isOSBinFormatCOFF())
     return std::make_pair(IRB.CreatePointerCast(SecStart, Ty), SecEndPtr);
 
   // Account for the fact that on windows-msvc __start_* symbols actually
@@ -293,24 +293,15 @@
     appendToGlobalCtors(M, CtorFunc, SanCtorAndDtorPriority);
   }
 
-  if (TargetTriple.getObjectFormat() == Triple::COFF) {
+  if (TargetTriple.isOSBinFormatCOFF()) {
     // In COFF files, if the contructors are set as COMDAT (they are because
     // COFF supports COMDAT) and the linker flag /OPT:REF (strip unreferenced
     // functions and data) is used, the constructors get stripped. To prevent
-    // this, give the constructors weak ODR linkage and tell the linker to
-    // always include the sancov constructor. This way the linker can
-    // deduplicate the constructors but always leave one copy.
+    // this, give the constructors weak ODR linkage and ensure the linker knows
+    // to include the sancov constructor. This way the linker can deduplicate
+    // the constructors but always leave one copy.
     CtorFunc->setLinkage(GlobalValue::WeakODRLinkage);
-    SmallString<20> PartialIncDirective("/include:");
-    // Get constructor's mangled name in order to support i386.
-    SmallString<40> MangledName;
-    Mangler().getNameWithPrefix(MangledName, CtorFunc, true);
-    Twine IncDirective = PartialIncDirective + MangledName;
-    Metadata *Args[1] = {MDString::get(*C, IncDirective.str())};
-    MDNode *MetadataNode = MDNode::get(*C, Args);
-    NamedMDNode *NamedMetadata =
-        M.getOrInsertNamedMetadata("llvm.linker.options");
-    NamedMetadata->addOperand(MetadataNode);
+    appendToUsed(M, CtorFunc);
   }
   return CtorFunc;
 }
@@ -577,7 +568,7 @@
       *CurModule, ArrayTy, false, GlobalVariable::PrivateLinkage,
       Constant::getNullValue(ArrayTy), "__sancov_gen_");
 
-  if (TargetTriple.supportsCOMDAT())
+  if (TargetTriple.supportsCOMDAT() && !F.isInterposable())
     if (auto Comdat =
             GetOrCreateFunctionComdat(F, TargetTriple, CurModuleUniqueId))
       Array->setComdat(Comdat);
@@ -833,7 +824,7 @@
 
 std::string
 SanitizerCoverageModule::getSectionName(const std::string &Section) const {
-  if (TargetTriple.getObjectFormat() == Triple::COFF) {
+  if (TargetTriple.isOSBinFormatCOFF()) {
     if (Section == SanCovCountersSectionName)
       return ".SCOV$CM";
     if (Section == SanCovPCsSectionName)
diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index fa1e5a1..077364e 100644
--- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -19,6 +19,7 @@
 // The rest is handled by the run-time library.
 //===----------------------------------------------------------------------===//
 
+#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/ADT/SmallVector.h"
@@ -86,15 +87,16 @@
 namespace {
 
 /// ThreadSanitizer: instrument the code in module to find races.
-struct ThreadSanitizer : public FunctionPass {
-  ThreadSanitizer() : FunctionPass(ID) {}
-  StringRef getPassName() const override;
-  void getAnalysisUsage(AnalysisUsage &AU) const override;
-  bool runOnFunction(Function &F) override;
-  bool doInitialization(Module &M) override;
-  static char ID;  // Pass identification, replacement for typeid.
+///
+/// Instantiating ThreadSanitizer inserts the tsan runtime library API function
+/// declarations into the module if they don't exist already. Instantiating
+/// ensures the __tsan_init function is in the list of global constructors for
+/// the module.
+struct ThreadSanitizer {
+  ThreadSanitizer(Module &M);
+  bool sanitizeFunction(Function &F, const TargetLibraryInfo &TLI);
 
- private:
+private:
   void initializeCallbacks(Module &M);
   bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
   bool instrumentAtomic(Instruction *I, const DataLayout &DL);
@@ -130,27 +132,55 @@
   Function *MemmoveFn, *MemcpyFn, *MemsetFn;
   Function *TsanCtorFunction;
 };
+
+struct ThreadSanitizerLegacyPass : FunctionPass {
+  ThreadSanitizerLegacyPass() : FunctionPass(ID) {}
+  StringRef getPassName() const override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  bool runOnFunction(Function &F) override;
+  bool doInitialization(Module &M) override;
+  static char ID; // Pass identification, replacement for typeid.
+private:
+  Optional<ThreadSanitizer> TSan;
+};
 }  // namespace
 
-char ThreadSanitizer::ID = 0;
-INITIALIZE_PASS_BEGIN(
-    ThreadSanitizer, "tsan",
-    "ThreadSanitizer: detects data races.",
-    false, false)
+PreservedAnalyses ThreadSanitizerPass::run(Function &F,
+                                           FunctionAnalysisManager &FAM) {
+  ThreadSanitizer TSan(*F.getParent());
+  if (TSan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
+    return PreservedAnalyses::none();
+  return PreservedAnalyses::all();
+}
+
+char ThreadSanitizerLegacyPass::ID = 0;
+INITIALIZE_PASS_BEGIN(ThreadSanitizerLegacyPass, "tsan",
+                      "ThreadSanitizer: detects data races.", false, false)
 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
-INITIALIZE_PASS_END(
-    ThreadSanitizer, "tsan",
-    "ThreadSanitizer: detects data races.",
-    false, false)
+INITIALIZE_PASS_END(ThreadSanitizerLegacyPass, "tsan",
+                    "ThreadSanitizer: detects data races.", false, false)
 
-StringRef ThreadSanitizer::getPassName() const { return "ThreadSanitizer"; }
+StringRef ThreadSanitizerLegacyPass::getPassName() const {
+  return "ThreadSanitizerLegacyPass";
+}
 
-void ThreadSanitizer::getAnalysisUsage(AnalysisUsage &AU) const {
+void ThreadSanitizerLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
   AU.addRequired<TargetLibraryInfoWrapperPass>();
 }
 
-FunctionPass *llvm::createThreadSanitizerPass() {
-  return new ThreadSanitizer();
+bool ThreadSanitizerLegacyPass::doInitialization(Module &M) {
+  TSan.emplace(M);
+  return true;
+}
+
+bool ThreadSanitizerLegacyPass::runOnFunction(Function &F) {
+  auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+  TSan->sanitizeFunction(F, TLI);
+  return true;
+}
+
+FunctionPass *llvm::createThreadSanitizerLegacyPassPass() {
+  return new ThreadSanitizerLegacyPass();
 }
 
 void ThreadSanitizer::initializeCallbacks(Module &M) {
@@ -252,16 +282,16 @@
                             IRB.getInt32Ty(), IntptrTy));
 }
 
-bool ThreadSanitizer::doInitialization(Module &M) {
+ThreadSanitizer::ThreadSanitizer(Module &M) {
   const DataLayout &DL = M.getDataLayout();
   IntptrTy = DL.getIntPtrType(M.getContext());
-  std::tie(TsanCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions(
-      M, kTsanModuleCtorName, kTsanInitName, /*InitArgTypes=*/{},
-      /*InitArgs=*/{});
-
-  appendToGlobalCtors(M, TsanCtorFunction, 0);
-
-  return true;
+  std::tie(TsanCtorFunction, std::ignore) =
+      getOrCreateSanitizerCtorAndInitFunctions(
+          M, kTsanModuleCtorName, kTsanInitName, /*InitArgTypes=*/{},
+          /*InitArgs=*/{},
+          // This callback is invoked when the functions are created the first
+          // time. Hook them into the global ctors list in that case:
+          [&](Function *Ctor, Function *) { appendToGlobalCtors(M, Ctor, 0); });
 }
 
 static bool isVtableAccess(Instruction *I) {
@@ -402,7 +432,8 @@
   }
 }
 
-bool ThreadSanitizer::runOnFunction(Function &F) {
+bool ThreadSanitizer::sanitizeFunction(Function &F,
+                                       const TargetLibraryInfo &TLI) {
   // This is required to prevent instrumenting call to __tsan_init from within
   // the module constructor.
   if (&F == TsanCtorFunction)
@@ -416,8 +447,6 @@
   bool HasCalls = false;
   bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
   const DataLayout &DL = F.getParent()->getDataLayout();
-  const TargetLibraryInfo *TLI =
-      &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
 
   // Traverse all instructions, collect loads/stores/returns, check for calls.
   for (auto &BB : F) {
@@ -428,7 +457,7 @@
         LocalLoadsAndStores.push_back(&Inst);
       else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
         if (CallInst *CI = dyn_cast<CallInst>(&Inst))
-          maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI);
+          maybeMarkSanitizerLibraryCallNoBuiltin(CI, &TLI);
         if (isa<MemIntrinsic>(Inst))
           MemIntrinCalls.push_back(&Inst);
         HasCalls = true;
diff --git a/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h b/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
index ba4924c..7f6b157 100644
--- a/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
+++ b/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
@@ -26,6 +26,7 @@
 #include "llvm/ADT/StringRef.h"
 #include "llvm/IR/Attributes.h"
 #include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/Module.h"
 #include "llvm/IR/Type.h"
 #include "llvm/Support/ErrorHandling.h"
@@ -74,27 +75,27 @@
 
     switch (kind) {
     case ARCRuntimeEntryPointKind::AutoreleaseRV:
-      return getI8XRetI8XEntryPoint(AutoreleaseRV,
-                                    "objc_autoreleaseReturnValue", true);
+      return getIntrinsicEntryPoint(AutoreleaseRV,
+                                    Intrinsic::objc_autoreleaseReturnValue);
     case ARCRuntimeEntryPointKind::Release:
-      return getVoidRetI8XEntryPoint(Release, "objc_release");
+      return getIntrinsicEntryPoint(Release, Intrinsic::objc_release);
     case ARCRuntimeEntryPointKind::Retain:
-      return getI8XRetI8XEntryPoint(Retain, "objc_retain", true);
+      return getIntrinsicEntryPoint(Retain, Intrinsic::objc_retain);
     case ARCRuntimeEntryPointKind::RetainBlock:
-      return getI8XRetI8XEntryPoint(RetainBlock, "objc_retainBlock", false);
+      return getIntrinsicEntryPoint(RetainBlock, Intrinsic::objc_retainBlock);
     case ARCRuntimeEntryPointKind::Autorelease:
-      return getI8XRetI8XEntryPoint(Autorelease, "objc_autorelease", true);
+      return getIntrinsicEntryPoint(Autorelease, Intrinsic::objc_autorelease);
     case ARCRuntimeEntryPointKind::StoreStrong:
-      return getI8XRetI8XXI8XEntryPoint(StoreStrong, "objc_storeStrong");
+      return getIntrinsicEntryPoint(StoreStrong, Intrinsic::objc_storeStrong);
     case ARCRuntimeEntryPointKind::RetainRV:
-      return getI8XRetI8XEntryPoint(RetainRV,
-                                    "objc_retainAutoreleasedReturnValue", true);
+      return getIntrinsicEntryPoint(RetainRV,
+                                Intrinsic::objc_retainAutoreleasedReturnValue);
     case ARCRuntimeEntryPointKind::RetainAutorelease:
-      return getI8XRetI8XEntryPoint(RetainAutorelease, "objc_retainAutorelease",
-                                    true);
+      return getIntrinsicEntryPoint(RetainAutorelease,
+                                    Intrinsic::objc_retainAutorelease);
     case ARCRuntimeEntryPointKind::RetainAutoreleaseRV:
-      return getI8XRetI8XEntryPoint(RetainAutoreleaseRV,
-                                    "objc_retainAutoreleaseReturnValue", true);
+      return getIntrinsicEntryPoint(RetainAutoreleaseRV,
+                                Intrinsic::objc_retainAutoreleaseReturnValue);
     }
 
     llvm_unreachable("Switch should be a covered switch.");
@@ -131,54 +132,11 @@
   /// Declaration for objc_retainAutoreleaseReturnValue().
   Constant *RetainAutoreleaseRV = nullptr;
 
-  Constant *getVoidRetI8XEntryPoint(Constant *&Decl, StringRef Name) {
+  Constant *getIntrinsicEntryPoint(Constant *&Decl, Intrinsic::ID IntID) {
     if (Decl)
       return Decl;
 
-    LLVMContext &C = TheModule->getContext();
-    Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
-    AttributeList Attr = AttributeList().addAttribute(
-        C, AttributeList::FunctionIndex, Attribute::NoUnwind);
-    FunctionType *Fty = FunctionType::get(Type::getVoidTy(C), Params,
-                                          /*isVarArg=*/false);
-    return Decl = TheModule->getOrInsertFunction(Name, Fty, Attr);
-  }
-
-  Constant *getI8XRetI8XEntryPoint(Constant *&Decl, StringRef Name,
-                                   bool NoUnwind = false) {
-    if (Decl)
-      return Decl;
-
-    LLVMContext &C = TheModule->getContext();
-    Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
-    Type *Params[] = { I8X };
-    FunctionType *Fty = FunctionType::get(I8X, Params, /*isVarArg=*/false);
-    AttributeList Attr = AttributeList();
-
-    if (NoUnwind)
-      Attr = Attr.addAttribute(C, AttributeList::FunctionIndex,
-                               Attribute::NoUnwind);
-
-    return Decl = TheModule->getOrInsertFunction(Name, Fty, Attr);
-  }
-
-  Constant *getI8XRetI8XXI8XEntryPoint(Constant *&Decl, StringRef Name) {
-    if (Decl)
-      return Decl;
-
-    LLVMContext &C = TheModule->getContext();
-    Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
-    Type *I8XX = PointerType::getUnqual(I8X);
-    Type *Params[] = { I8XX, I8X };
-
-    AttributeList Attr = AttributeList().addAttribute(
-        C, AttributeList::FunctionIndex, Attribute::NoUnwind);
-    Attr = Attr.addParamAttribute(C, 0, Attribute::NoCapture);
-
-    FunctionType *Fty = FunctionType::get(Type::getVoidTy(C), Params,
-                                          /*isVarArg=*/false);
-
-    return Decl = TheModule->getOrInsertFunction(Name, Fty, Attr);
+    return Decl = Intrinsic::getDeclaration(TheModule, IntID);
   }
 };
 
diff --git a/lib/Transforms/ObjCARC/DependencyAnalysis.cpp b/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
index 52a5e8c..4bd5fd1 100644
--- a/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
+++ b/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
@@ -45,18 +45,15 @@
   default: break;
   }
 
-  ImmutableCallSite CS(Inst);
-  assert(CS && "Only calls can alter reference counts!");
+  const auto *Call = cast<CallBase>(Inst);
 
   // See if AliasAnalysis can help us with the call.
-  FunctionModRefBehavior MRB = PA.getAA()->getModRefBehavior(CS);
+  FunctionModRefBehavior MRB = PA.getAA()->getModRefBehavior(Call);
   if (AliasAnalysis::onlyReadsMemory(MRB))
     return false;
   if (AliasAnalysis::onlyAccessesArgPointees(MRB)) {
     const DataLayout &DL = Inst->getModule()->getDataLayout();
-    for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
-         I != E; ++I) {
-      const Value *Op = *I;
+    for (const Value *Op : Call->args()) {
       if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) &&
           PA.related(Ptr, Op, DL))
         return true;
diff --git a/lib/Transforms/ObjCARC/ObjCARC.h b/lib/Transforms/ObjCARC/ObjCARC.h
index 1dbe72c..751c8f3 100644
--- a/lib/Transforms/ObjCARC/ObjCARC.h
+++ b/lib/Transforms/ObjCARC/ObjCARC.h
@@ -58,7 +58,7 @@
     // Replace the return value with the argument.
     assert((IsForwarding(GetBasicARCInstKind(CI)) ||
             (IsNoopOnNull(GetBasicARCInstKind(CI)) &&
-             isa<ConstantPointerNull>(OldArg))) &&
+             IsNullOrUndef(OldArg->stripPointerCasts()))) &&
            "Can't delete non-forwarding instruction with users!");
     CI->replaceAllUsesWith(OldArg);
   }
diff --git a/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/lib/Transforms/ObjCARC/ObjCARCContract.cpp
index 1f1ea9f..abe2871 100644
--- a/lib/Transforms/ObjCARC/ObjCARCContract.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCContract.cpp
@@ -522,7 +522,7 @@
         TailOkForStoreStrongs = false;
       return true;
     case ARCInstKind::IntrinsicUser:
-      // Remove calls to @clang.arc.use(...).
+      // Remove calls to @llvm.objc.clang.arc.use(...).
       Inst->eraseFromParent();
       return true;
     default:
diff --git a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
index 6ffaadc..9a02174 100644
--- a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
@@ -600,6 +600,17 @@
     }
   }
 
+  // Track PHIs which are equivalent to our Arg.
+  SmallDenseSet<const Value*, 2> EquivalentArgs;
+  EquivalentArgs.insert(Arg);
+
+  // Add PHIs that are equivalent to Arg to ArgUsers.
+  if (const PHINode *PN = dyn_cast<PHINode>(Arg)) {
+    SmallVector<const Value *, 2> ArgUsers;
+    getEquivalentPHIs(*PN, ArgUsers);
+    EquivalentArgs.insert(ArgUsers.begin(), ArgUsers.end());
+  }
+
   // Check for being preceded by an objc_autoreleaseReturnValue on the same
   // pointer. In this case, we can delete the pair.
   BasicBlock::iterator I = RetainRV->getIterator(),
@@ -609,7 +620,7 @@
       --I;
     while (I != Begin && IsNoopInstruction(&*I));
     if (GetBasicARCInstKind(&*I) == ARCInstKind::AutoreleaseRV &&
-        GetArgRCIdentityRoot(&*I) == Arg) {
+        EquivalentArgs.count(GetArgRCIdentityRoot(&*I))) {
       Changed = true;
       ++NumPeeps;
 
diff --git a/lib/Transforms/Scalar/BDCE.cpp b/lib/Transforms/Scalar/BDCE.cpp
index f63182e..d3c9b9a 100644
--- a/lib/Transforms/Scalar/BDCE.cpp
+++ b/lib/Transforms/Scalar/BDCE.cpp
@@ -96,30 +96,41 @@
     if (I.mayHaveSideEffects() && I.use_empty())
       continue;
 
-    if (I.getType()->isIntOrIntVectorTy() &&
-        !DB.getDemandedBits(&I).getBoolValue()) {
-      // For live instructions that have all dead bits, first make them dead by
-      // replacing all uses with something else. Then, if they don't need to
-      // remain live (because they have side effects, etc.) we can remove them.
-      LLVM_DEBUG(dbgs() << "BDCE: Trivializing: " << I << " (all bits dead)\n");
+    // Remove instructions that are dead, either because they were not reached
+    // during analysis or have no demanded bits.
+    if (DB.isInstructionDead(&I) ||
+        (I.getType()->isIntOrIntVectorTy() &&
+         DB.getDemandedBits(&I).isNullValue() &&
+         wouldInstructionBeTriviallyDead(&I))) {
+      salvageDebugInfo(I);
+      Worklist.push_back(&I);
+      I.dropAllReferences();
+      Changed = true;
+      continue;
+    }
+
+    for (Use &U : I.operands()) {
+      // DemandedBits only detects dead integer uses.
+      if (!U->getType()->isIntOrIntVectorTy())
+        continue;
+
+      if (!isa<Instruction>(U) && !isa<Argument>(U))
+        continue;
+
+      if (!DB.isUseDead(&U))
+        continue;
+
+      LLVM_DEBUG(dbgs() << "BDCE: Trivializing: " << U << " (all bits dead)\n");
 
       clearAssumptionsOfUsers(&I, DB);
 
       // FIXME: In theory we could substitute undef here instead of zero.
       // This should be reconsidered once we settle on the semantics of
       // undef, poison, etc.
-      Value *Zero = ConstantInt::get(I.getType(), 0);
+      U.set(ConstantInt::get(U->getType(), 0));
       ++NumSimplified;
-      I.replaceNonMetadataUsesWith(Zero);
       Changed = true;
     }
-    if (!DB.isInstructionDead(&I))
-      continue;
-
-    salvageDebugInfo(I);
-    Worklist.push_back(&I);
-    I.dropAllReferences();
-    Changed = true;
   }
 
   for (Instruction *&I : Worklist) {
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 69112f3..469930c 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -834,7 +834,7 @@
       continue;
     }
 
-    if (auto CS = CallSite(&*BBI)) {
+    if (auto *Call = dyn_cast<CallBase>(&*BBI)) {
       // Remove allocation function calls from the list of dead stack objects;
       // there can't be any references before the definition.
       if (isAllocLikeFn(&*BBI, TLI))
@@ -842,15 +842,15 @@
 
       // If this call does not access memory, it can't be loading any of our
       // pointers.
-      if (AA->doesNotAccessMemory(CS))
+      if (AA->doesNotAccessMemory(Call))
         continue;
 
       // If the call might load from any of our allocas, then any store above
       // the call is live.
       DeadStackObjects.remove_if([&](Value *I) {
         // See if the call site touches the value.
-        return isRefSet(AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI,
-                                                                BB.getParent())));
+        return isRefSet(AA->getModRefInfo(
+            Call, I, getPointerSize(I, DL, *TLI, BB.getParent())));
       });
 
       // If all of the allocas were clobbered by the call then we're not going
diff --git a/lib/Transforms/Scalar/EarlyCSE.cpp b/lib/Transforms/Scalar/EarlyCSE.cpp
index 11009d5..1f09979 100644
--- a/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -812,7 +812,8 @@
         LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
         continue;
       }
-      salvageDebugInfo(*Inst);
+      if (!salvageDebugInfo(*Inst))
+        replaceDbgUsesWithUndef(Inst);
       removeMSSA(Inst);
       Inst->eraseFromParent();
       Changed = true;
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 440ea4a..9861948 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -437,7 +437,7 @@
 
     // Non-local case.
     const MemoryDependenceResults::NonLocalDepInfo &deps =
-      MD->getNonLocalCallDependency(CallSite(C));
+        MD->getNonLocalCallDependency(C);
     // FIXME: Move the checking logic to MemDep!
     CallInst* cdep = nullptr;
 
@@ -1645,10 +1645,12 @@
 }
 
 void GVN::assignBlockRPONumber(Function &F) {
+  BlockRPONumber.clear();
   uint32_t NextBlockNumber = 1;
   ReversePostOrderTraversal<Function *> RPOT(&F);
   for (BasicBlock *BB : RPOT)
     BlockRPONumber[BB] = NextBlockNumber++;
+  InvalidBlockRPONumbers = false;
 }
 
 // Tries to replace instruction with const, using information from
@@ -1992,6 +1994,7 @@
   ICF = &ImplicitCFT;
   VN.setMemDep(MD);
   ORE = RunORE;
+  InvalidBlockRPONumbers = true;
 
   bool Changed = false;
   bool ShouldContinue = true;
@@ -2021,7 +2024,6 @@
     // Fabricate val-num for dead-code in order to suppress assertion in
     // performPRE().
     assignValNumForDeadCode();
-    assignBlockRPONumber(F);
     bool PREChanged = true;
     while (PREChanged) {
       PREChanged = performPRE(F);
@@ -2079,10 +2081,9 @@
       salvageDebugInfo(*I);
       if (MD) MD->removeInstruction(I);
       LLVM_DEBUG(verifyRemoved(I));
+      ICF->removeInstruction(I);
       I->eraseFromParent();
     }
-
-    ICF->invalidateBlock(BB);
     InstrsToErase.clear();
 
     if (AtStart)
@@ -2184,6 +2185,10 @@
   BasicBlock *PREPred = nullptr;
   BasicBlock *CurrentBlock = CurInst->getParent();
 
+  // Update the RPO numbers for this function.
+  if (InvalidBlockRPONumbers)
+    assignBlockRPONumber(*CurrentBlock->getParent());
+
   SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap;
   for (BasicBlock *P : predecessors(CurrentBlock)) {
     // We're not interested in PRE where blocks with predecessors that are
@@ -2195,6 +2200,8 @@
     // It is not safe to do PRE when P->CurrentBlock is a loop backedge, and
     // when CurInst has operand defined in CurrentBlock (so it may be defined
     // by phi in the loop header).
+    assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) &&
+           "Invalid BlockRPONumber map.");
     if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock] &&
         llvm::any_of(CurInst->operands(), [&](const Use &U) {
           if (auto *Inst = dyn_cast<Instruction>(U.get()))
@@ -2301,7 +2308,7 @@
   LLVM_DEBUG(verifyRemoved(CurInst));
   // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes
   // some assertion failures.
-  ICF->invalidateBlock(CurrentBlock);
+  ICF->removeInstruction(CurInst);
   CurInst->eraseFromParent();
   ++NumGVNInstr;
 
@@ -2342,6 +2349,7 @@
       SplitCriticalEdge(Pred, Succ, CriticalEdgeSplittingOptions(DT));
   if (MD)
     MD->invalidateCachedPredecessors();
+  InvalidBlockRPONumbers = true;
   return BB;
 }
 
@@ -2356,6 +2364,7 @@
                       CriticalEdgeSplittingOptions(DT));
   } while (!toSplit.empty());
   if (MD) MD->invalidateCachedPredecessors();
+  InvalidBlockRPONumbers = true;
   return true;
 }
 
@@ -2382,6 +2391,7 @@
   BlockRPONumber.clear();
   TableAllocator.Reset();
   ICF->clear();
+  InvalidBlockRPONumbers = true;
 }
 
 /// Verify that the specified instruction does not occur in our
diff --git a/lib/Transforms/Scalar/GVNHoist.cpp b/lib/Transforms/Scalar/GVNHoist.cpp
index 0797ce9..76a42d7 100644
--- a/lib/Transforms/Scalar/GVNHoist.cpp
+++ b/lib/Transforms/Scalar/GVNHoist.cpp
@@ -246,7 +246,7 @@
       LLVMContext::MD_tbaa,           LLVMContext::MD_alias_scope,
       LLVMContext::MD_noalias,        LLVMContext::MD_range,
       LLVMContext::MD_fpmath,         LLVMContext::MD_invariant_load,
-      LLVMContext::MD_invariant_group};
+      LLVMContext::MD_invariant_group, LLVMContext::MD_access_group};
   combineMetadata(ReplInst, I, KnownIDs, true);
 }
 
diff --git a/lib/Transforms/Scalar/GuardWidening.cpp b/lib/Transforms/Scalar/GuardWidening.cpp
index cbbd7b8..efc204d 100644
--- a/lib/Transforms/Scalar/GuardWidening.cpp
+++ b/lib/Transforms/Scalar/GuardWidening.cpp
@@ -472,8 +472,7 @@
       return false;
     // TODO: diamond, triangle cases
     if (!PDT) return true;
-    return !PDT->dominates(DominatedGuard->getParent(),
-                           DominatingGuard->getParent());
+    return !PDT->dominates(DominatedBlock, DominatingBlock);
   };
 
   return MaybeHoistingOutOfIf() ? WS_IllegalOrNegative : WS_Neutral;
diff --git a/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index c5ed6d5..1c701bb 100644
--- a/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -133,34 +133,16 @@
 ///     taken by the containing loop's induction variable.
 ///
 class InductiveRangeCheck {
-  // Classifies a range check
-  enum RangeCheckKind : unsigned {
-    // Range check of the form "0 <= I".
-    RANGE_CHECK_LOWER = 1,
-
-    // Range check of the form "I < L" where L is known positive.
-    RANGE_CHECK_UPPER = 2,
-
-    // The logical and of the RANGE_CHECK_LOWER and RANGE_CHECK_UPPER
-    // conditions.
-    RANGE_CHECK_BOTH = RANGE_CHECK_LOWER | RANGE_CHECK_UPPER,
-
-    // Unrecognized range check condition.
-    RANGE_CHECK_UNKNOWN = (unsigned)-1
-  };
-
-  static StringRef rangeCheckKindToStr(RangeCheckKind);
 
   const SCEV *Begin = nullptr;
   const SCEV *Step = nullptr;
   const SCEV *End = nullptr;
   Use *CheckUse = nullptr;
-  RangeCheckKind Kind = RANGE_CHECK_UNKNOWN;
   bool IsSigned = true;
 
-  static RangeCheckKind parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
-                                            ScalarEvolution &SE, Value *&Index,
-                                            Value *&Length, bool &IsSigned);
+  static bool parseRangeCheckICmp(Loop *L, ICmpInst *ICI, ScalarEvolution &SE,
+                                  Value *&Index, Value *&Length,
+                                  bool &IsSigned);
 
   static void
   extractRangeChecksFromCond(Loop *L, ScalarEvolution &SE, Use &ConditionUse,
@@ -175,7 +157,6 @@
 
   void print(raw_ostream &OS) const {
     OS << "InductiveRangeCheck:\n";
-    OS << "  Kind: " << rangeCheckKindToStr(Kind) << "\n";
     OS << "  Begin: ";
     Begin->print(OS);
     OS << "  Step: ";
@@ -283,32 +264,11 @@
 INITIALIZE_PASS_END(IRCELegacyPass, "irce", "Inductive range check elimination",
                     false, false)
 
-StringRef InductiveRangeCheck::rangeCheckKindToStr(
-    InductiveRangeCheck::RangeCheckKind RCK) {
-  switch (RCK) {
-  case InductiveRangeCheck::RANGE_CHECK_UNKNOWN:
-    return "RANGE_CHECK_UNKNOWN";
-
-  case InductiveRangeCheck::RANGE_CHECK_UPPER:
-    return "RANGE_CHECK_UPPER";
-
-  case InductiveRangeCheck::RANGE_CHECK_LOWER:
-    return "RANGE_CHECK_LOWER";
-
-  case InductiveRangeCheck::RANGE_CHECK_BOTH:
-    return "RANGE_CHECK_BOTH";
-  }
-
-  llvm_unreachable("unknown range check type!");
-}
-
 /// Parse a single ICmp instruction, `ICI`, into a range check.  If `ICI` cannot
-/// be interpreted as a range check, return `RANGE_CHECK_UNKNOWN` and set
-/// `Index` and `Length` to `nullptr`.  Otherwise set `Index` to the value being
-/// range checked, and set `Length` to the upper limit `Index` is being range
-/// checked with if (and only if) the range check type is stronger or equal to
-/// RANGE_CHECK_UPPER.
-InductiveRangeCheck::RangeCheckKind
+/// be interpreted as a range check, return false and set `Index` and `Length`
+/// to `nullptr`.  Otherwise set `Index` to the value being range checked, and
+/// set `Length` to the upper limit `Index` is being range checked.
+bool
 InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
                                          ScalarEvolution &SE, Value *&Index,
                                          Value *&Length, bool &IsSigned) {
@@ -322,7 +282,7 @@
 
   switch (Pred) {
   default:
-    return RANGE_CHECK_UNKNOWN;
+    return false;
 
   case ICmpInst::ICMP_SLE:
     std::swap(LHS, RHS);
@@ -331,9 +291,9 @@
     IsSigned = true;
     if (match(RHS, m_ConstantInt<0>())) {
       Index = LHS;
-      return RANGE_CHECK_LOWER;
+      return true; // Lower.
     }
-    return RANGE_CHECK_UNKNOWN;
+    return false;
 
   case ICmpInst::ICMP_SLT:
     std::swap(LHS, RHS);
@@ -342,15 +302,15 @@
     IsSigned = true;
     if (match(RHS, m_ConstantInt<-1>())) {
       Index = LHS;
-      return RANGE_CHECK_LOWER;
+      return true; // Lower.
     }
 
     if (IsLoopInvariant(LHS)) {
       Index = RHS;
       Length = LHS;
-      return RANGE_CHECK_UPPER;
+      return true; // Upper.
     }
-    return RANGE_CHECK_UNKNOWN;
+    return false;
 
   case ICmpInst::ICMP_ULT:
     std::swap(LHS, RHS);
@@ -360,9 +320,9 @@
     if (IsLoopInvariant(LHS)) {
       Index = RHS;
       Length = LHS;
-      return RANGE_CHECK_BOTH;
+      return true; // Both lower and upper.
     }
-    return RANGE_CHECK_UNKNOWN;
+    return false;
   }
 
   llvm_unreachable("default clause returns!");
@@ -391,8 +351,7 @@
 
   Value *Length = nullptr, *Index;
   bool IsSigned;
-  auto RCKind = parseRangeCheckICmp(L, ICI, SE, Index, Length, IsSigned);
-  if (RCKind == InductiveRangeCheck::RANGE_CHECK_UNKNOWN)
+  if (!parseRangeCheckICmp(L, ICI, SE, Index, Length, IsSigned))
     return;
 
   const auto *IndexAddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Index));
@@ -408,7 +367,6 @@
   if (Length)
     End = SE.getSCEV(Length);
   else {
-    assert(RCKind == InductiveRangeCheck::RANGE_CHECK_LOWER && "invariant!");
     // So far we can only reach this point for Signed range check. This may
     // change in future. In this case we will need to pick Unsigned max for the
     // unsigned range check.
@@ -422,7 +380,6 @@
   IRC.Begin = IndexAddRec->getStart();
   IRC.Step = IndexAddRec->getStepRecurrence(SE);
   IRC.CheckUse = &ConditionUse;
-  IRC.Kind = RCKind;
   IRC.IsSigned = IsSigned;
   Checks.push_back(IRC);
 }
@@ -689,17 +646,6 @@
       PN->setIncomingBlock(i, ReplaceBy);
 }
 
-static bool CannotBeMaxInLoop(const SCEV *BoundSCEV, Loop *L,
-                              ScalarEvolution &SE, bool Signed) {
-  unsigned BitWidth = cast<IntegerType>(BoundSCEV->getType())->getBitWidth();
-  APInt Max = Signed ? APInt::getSignedMaxValue(BitWidth) :
-    APInt::getMaxValue(BitWidth);
-  auto Predicate = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
-  return SE.isAvailableAtLoopEntry(BoundSCEV, L) &&
-         SE.isLoopEntryGuardedByCond(L, Predicate, BoundSCEV,
-                                     SE.getConstant(Max));
-}
-
 /// Given a loop with an deccreasing induction variable, is it possible to
 /// safely calculate the bounds of a new loop using the given Predicate.
 static bool isSafeDecreasingBound(const SCEV *Start,
@@ -795,31 +741,6 @@
           SE.isLoopEntryGuardedByCond(L, BoundPred, BoundSCEV, Limit));
 }
 
-static bool CannotBeMinInLoop(const SCEV *BoundSCEV, Loop *L,
-                              ScalarEvolution &SE, bool Signed) {
-  unsigned BitWidth = cast<IntegerType>(BoundSCEV->getType())->getBitWidth();
-  APInt Min = Signed ? APInt::getSignedMinValue(BitWidth) :
-    APInt::getMinValue(BitWidth);
-  auto Predicate = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
-  return SE.isAvailableAtLoopEntry(BoundSCEV, L) &&
-         SE.isLoopEntryGuardedByCond(L, Predicate, BoundSCEV,
-                                     SE.getConstant(Min));
-}
-
-static bool isKnownNonNegativeInLoop(const SCEV *BoundSCEV, const Loop *L,
-                                     ScalarEvolution &SE) {
-  const SCEV *Zero = SE.getZero(BoundSCEV->getType());
-  return SE.isAvailableAtLoopEntry(BoundSCEV, L) &&
-         SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGE, BoundSCEV, Zero);
-}
-
-static bool isKnownNegativeInLoop(const SCEV *BoundSCEV, const Loop *L,
-                                  ScalarEvolution &SE) {
-  const SCEV *Zero = SE.getZero(BoundSCEV->getType());
-  return SE.isAvailableAtLoopEntry(BoundSCEV, L) &&
-         SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, BoundSCEV, Zero);
-}
-
 Optional<LoopStructure>
 LoopStructure::parseLoopStructure(ScalarEvolution &SE,
                                   BranchProbabilityInfo *BPI, Loop &L,
@@ -977,12 +898,12 @@
         //   ...                          ...
         // }                            }
         if (IndVarBase->getNoWrapFlags(SCEV::FlagNUW) &&
-            CannotBeMinInLoop(RightSCEV, &L, SE, /*Signed*/false)) {
+            cannotBeMinInLoop(RightSCEV, &L, SE, /*Signed*/false)) {
           Pred = ICmpInst::ICMP_UGT;
           RightSCEV = SE.getMinusSCEV(RightSCEV,
                                       SE.getOne(RightSCEV->getType()));
           DecreasedRightValueByOne = true;
-        } else if (CannotBeMinInLoop(RightSCEV, &L, SE, /*Signed*/true)) {
+        } else if (cannotBeMinInLoop(RightSCEV, &L, SE, /*Signed*/true)) {
           Pred = ICmpInst::ICMP_SGT;
           RightSCEV = SE.getMinusSCEV(RightSCEV,
                                       SE.getOne(RightSCEV->getType()));
@@ -1042,11 +963,11 @@
         //   ...                          ...
         // }                            }
         if (IndVarBase->getNoWrapFlags(SCEV::FlagNUW) &&
-            CannotBeMaxInLoop(RightSCEV, &L, SE, /* Signed */ false)) {
+            cannotBeMaxInLoop(RightSCEV, &L, SE, /* Signed */ false)) {
           Pred = ICmpInst::ICMP_ULT;
           RightSCEV = SE.getAddExpr(RightSCEV, SE.getOne(RightSCEV->getType()));
           IncreasedRightValueByOne = true;
-        } else if (CannotBeMaxInLoop(RightSCEV, &L, SE, /* Signed */ true)) {
+        } else if (cannotBeMaxInLoop(RightSCEV, &L, SE, /* Signed */ true)) {
           Pred = ICmpInst::ICMP_SLT;
           RightSCEV = SE.getAddExpr(RightSCEV, SE.getOne(RightSCEV->getType()));
           IncreasedRightValueByOne = true;
@@ -1339,29 +1260,20 @@
 
   // EnterLoopCond - is it okay to start executing this `LS'?
   Value *EnterLoopCond = nullptr;
-  if (Increasing)
-    EnterLoopCond = IsSignedPredicate
-                        ? B.CreateICmpSLT(LS.IndVarStart, ExitSubloopAt)
-                        : B.CreateICmpULT(LS.IndVarStart, ExitSubloopAt);
-  else
-    EnterLoopCond = IsSignedPredicate
-                        ? B.CreateICmpSGT(LS.IndVarStart, ExitSubloopAt)
-                        : B.CreateICmpUGT(LS.IndVarStart, ExitSubloopAt);
+  auto Pred =
+      Increasing
+          ? (IsSignedPredicate ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT)
+          : (IsSignedPredicate ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT);
+  EnterLoopCond = B.CreateICmp(Pred, LS.IndVarStart, ExitSubloopAt);
 
   B.CreateCondBr(EnterLoopCond, LS.Header, RRI.PseudoExit);
   PreheaderJump->eraseFromParent();
 
   LS.LatchBr->setSuccessor(LS.LatchBrExitIdx, RRI.ExitSelector);
   B.SetInsertPoint(LS.LatchBr);
-  Value *TakeBackedgeLoopCond = nullptr;
-  if (Increasing)
-    TakeBackedgeLoopCond = IsSignedPredicate
-                        ? B.CreateICmpSLT(LS.IndVarBase, ExitSubloopAt)
-                        : B.CreateICmpULT(LS.IndVarBase, ExitSubloopAt);
-  else
-    TakeBackedgeLoopCond = IsSignedPredicate
-                        ? B.CreateICmpSGT(LS.IndVarBase, ExitSubloopAt)
-                        : B.CreateICmpUGT(LS.IndVarBase, ExitSubloopAt);
+  Value *TakeBackedgeLoopCond = B.CreateICmp(Pred, LS.IndVarBase,
+                                             ExitSubloopAt);
+
   Value *CondForBranch = LS.LatchBrExitIdx == 1
                              ? TakeBackedgeLoopCond
                              : B.CreateNot(TakeBackedgeLoopCond);
@@ -1373,15 +1285,7 @@
   // IterationsLeft - are there any more iterations left, given the original
   // upper bound on the induction variable?  If not, we branch to the "real"
   // exit.
-  Value *IterationsLeft = nullptr;
-  if (Increasing)
-    IterationsLeft = IsSignedPredicate
-                         ? B.CreateICmpSLT(LS.IndVarBase, LS.LoopExitAt)
-                         : B.CreateICmpULT(LS.IndVarBase, LS.LoopExitAt);
-  else
-    IterationsLeft = IsSignedPredicate
-                         ? B.CreateICmpSGT(LS.IndVarBase, LS.LoopExitAt)
-                         : B.CreateICmpUGT(LS.IndVarBase, LS.LoopExitAt);
+  Value *IterationsLeft = B.CreateICmp(Pred, LS.IndVarBase, LS.LoopExitAt);
   B.CreateCondBr(IterationsLeft, RRI.PseudoExit, LS.LatchExit);
 
   BranchInst *BranchToContinuation =
@@ -1513,16 +1417,14 @@
 
     if (Increasing)
       ExitPreLoopAtSCEV = *SR.LowLimit;
+    else if (cannotBeMinInLoop(*SR.HighLimit, &OriginalLoop, SE,
+                               IsSignedPredicate))
+      ExitPreLoopAtSCEV = SE.getAddExpr(*SR.HighLimit, MinusOneS);
     else {
-      if (CannotBeMinInLoop(*SR.HighLimit, &OriginalLoop, SE,
-                            IsSignedPredicate))
-        ExitPreLoopAtSCEV = SE.getAddExpr(*SR.HighLimit, MinusOneS);
-      else {
-        LLVM_DEBUG(dbgs() << "irce: could not prove no-overflow when computing "
-                          << "preloop exit limit.  HighLimit = "
-                          << *(*SR.HighLimit) << "\n");
-        return false;
-      }
+      LLVM_DEBUG(dbgs() << "irce: could not prove no-overflow when computing "
+                        << "preloop exit limit.  HighLimit = "
+                        << *(*SR.HighLimit) << "\n");
+      return false;
     }
 
     if (!isSafeToExpandAt(ExitPreLoopAtSCEV, InsertPt, SE)) {
@@ -1542,16 +1444,14 @@
 
     if (Increasing)
       ExitMainLoopAtSCEV = *SR.HighLimit;
+    else if (cannotBeMinInLoop(*SR.LowLimit, &OriginalLoop, SE,
+                               IsSignedPredicate))
+      ExitMainLoopAtSCEV = SE.getAddExpr(*SR.LowLimit, MinusOneS);
     else {
-      if (CannotBeMinInLoop(*SR.LowLimit, &OriginalLoop, SE,
-                            IsSignedPredicate))
-        ExitMainLoopAtSCEV = SE.getAddExpr(*SR.LowLimit, MinusOneS);
-      else {
-        LLVM_DEBUG(dbgs() << "irce: could not prove no-overflow when computing "
-                          << "mainloop exit limit.  LowLimit = "
-                          << *(*SR.LowLimit) << "\n");
-        return false;
-      }
+      LLVM_DEBUG(dbgs() << "irce: could not prove no-overflow when computing "
+                        << "mainloop exit limit.  LowLimit = "
+                        << *(*SR.LowLimit) << "\n");
+      return false;
     }
 
     if (!isSafeToExpandAt(ExitMainLoopAtSCEV, InsertPt, SE)) {
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 1429629..48de56a 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -1171,6 +1171,9 @@
     }
   }
 
+  if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator()))
+    TryToUnfoldSelect(SI, BB);
+
   // Check for some cases that are worth simplifying.  Right now we want to look
   // for loads that are used by a switch or by the condition for the branch.  If
   // we see one, check to see if it's partially redundant.  If so, insert a PHI
@@ -2388,6 +2391,72 @@
   return true;
 }
 
+// Pred is a predecessor of BB with an unconditional branch to BB. SI is
+// a Select instruction in Pred. BB has other predecessors and SI is used in
+// a PHI node in BB. SI has no other use.
+// A new basic block, NewBB, is created and SI is converted to compare and 
+// conditional branch. SI is erased from parent.
+void JumpThreadingPass::UnfoldSelectInstr(BasicBlock *Pred, BasicBlock *BB,
+                                          SelectInst *SI, PHINode *SIUse,
+                                          unsigned Idx) {
+  // Expand the select.
+  //
+  // Pred --
+  //  |    v
+  //  |  NewBB
+  //  |    |
+  //  |-----
+  //  v
+  // BB
+  BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator());
+  BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "select.unfold",
+                                         BB->getParent(), BB);
+  // Move the unconditional branch to NewBB.
+  PredTerm->removeFromParent();
+  NewBB->getInstList().insert(NewBB->end(), PredTerm);
+  // Create a conditional branch and update PHI nodes.
+  BranchInst::Create(NewBB, BB, SI->getCondition(), Pred);
+  SIUse->setIncomingValue(Idx, SI->getFalseValue());
+  SIUse->addIncoming(SI->getTrueValue(), NewBB);
+
+  // The select is now dead.
+  SI->eraseFromParent();
+  DTU->applyUpdates({{DominatorTree::Insert, NewBB, BB},
+                    {DominatorTree::Insert, Pred, NewBB}});
+
+  // Update any other PHI nodes in BB.
+  for (BasicBlock::iterator BI = BB->begin();
+       PHINode *Phi = dyn_cast<PHINode>(BI); ++BI)
+    if (Phi != SIUse)
+      Phi->addIncoming(Phi->getIncomingValueForBlock(Pred), NewBB);
+}
+
+bool JumpThreadingPass::TryToUnfoldSelect(SwitchInst *SI, BasicBlock *BB) {
+  PHINode *CondPHI = dyn_cast<PHINode>(SI->getCondition());
+
+  if (!CondPHI || CondPHI->getParent() != BB)
+    return false;
+
+  for (unsigned I = 0, E = CondPHI->getNumIncomingValues(); I != E; ++I) {
+    BasicBlock *Pred = CondPHI->getIncomingBlock(I);
+    SelectInst *PredSI = dyn_cast<SelectInst>(CondPHI->getIncomingValue(I));
+
+    // The second and third condition can be potentially relaxed. Currently
+    // the conditions help to simplify the code and allow us to reuse existing
+    // code, developed for TryToUnfoldSelect(CmpInst *, BasicBlock *)
+    if (!PredSI || PredSI->getParent() != Pred || !PredSI->hasOneUse())
+      continue;
+
+    BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator());
+    if (!PredTerm || !PredTerm->isUnconditional())
+      continue;
+
+    UnfoldSelectInstr(Pred, BB, PredSI, CondPHI, I);
+    return true;
+  }
+  return false;
+}
+
 /// TryToUnfoldSelect - Look for blocks of the form
 /// bb1:
 ///   %a = select
@@ -2438,34 +2507,7 @@
     if ((LHSFolds != LazyValueInfo::Unknown ||
          RHSFolds != LazyValueInfo::Unknown) &&
         LHSFolds != RHSFolds) {
-      // Expand the select.
-      //
-      // Pred --
-      //  |    v
-      //  |  NewBB
-      //  |    |
-      //  |-----
-      //  v
-      // BB
-      BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "select.unfold",
-                                             BB->getParent(), BB);
-      // Move the unconditional branch to NewBB.
-      PredTerm->removeFromParent();
-      NewBB->getInstList().insert(NewBB->end(), PredTerm);
-      // Create a conditional branch and update PHI nodes.
-      BranchInst::Create(NewBB, BB, SI->getCondition(), Pred);
-      CondLHS->setIncomingValue(I, SI->getFalseValue());
-      CondLHS->addIncoming(SI->getTrueValue(), NewBB);
-      // The select is now dead.
-      SI->eraseFromParent();
-
-      DTU->applyUpdates({{DominatorTree::Insert, NewBB, BB},
-                         {DominatorTree::Insert, Pred, NewBB}});
-      // Update any other PHI nodes in BB.
-      for (BasicBlock::iterator BI = BB->begin();
-           PHINode *Phi = dyn_cast<PHINode>(BI); ++BI)
-        if (Phi != CondLHS)
-          Phi->addIncoming(Phi->getIncomingValueForBlock(Pred), NewBB);
+      UnfoldSelectInstr(Pred, BB, SI, CondLHS, I);
       return true;
     }
   }
diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp
index 695eaf6..d204654 100644
--- a/lib/Transforms/Scalar/LICM.cpp
+++ b/lib/Transforms/Scalar/LICM.cpp
@@ -46,11 +46,11 @@
 #include "llvm/Analysis/LoopPass.h"
 #include "llvm/Analysis/MemoryBuiltins.h"
 #include "llvm/Analysis/MemorySSA.h"
+#include "llvm/Analysis/MemorySSAUpdater.h"
 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
 #include "llvm/Analysis/ScalarEvolution.h"
 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
 #include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/Transforms/Utils/Local.h"
 #include "llvm/Analysis/ValueTracking.h"
 #include "llvm/IR/CFG.h"
 #include "llvm/IR/Constants.h"
@@ -69,6 +69,7 @@
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Transforms/Scalar/LoopPassManager.h"
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
 #include "llvm/Transforms/Utils/LoopUtils.h"
 #include "llvm/Transforms/Utils/SSAUpdater.h"
 #include <algorithm>
@@ -106,16 +107,29 @@
 LICMN2Theshold("licm-n2-threshold", cl::Hidden, cl::init(0),
                cl::desc("How many instruction to cross product using AA"));
 
+// Experimental option to allow imprecision in LICM (use MemorySSA cap) in
+// pathological cases, in exchange for faster compile. This is to be removed
+// if MemorySSA starts to address the same issue. This flag applies only when
+// LICM uses MemorySSA instead on AliasSetTracker. When the flag is disabled
+// (default), LICM calls MemorySSAWalker's getClobberingMemoryAccess, which
+// gets perfect accuracy. When flag is enabled, LICM will call into MemorySSA's
+// getDefiningAccess, which may not be precise, since optimizeUses is capped.
+static cl::opt<bool> EnableLicmCap(
+    "enable-licm-cap", cl::init(false), cl::Hidden,
+    cl::desc("Enable imprecision in LICM (uses MemorySSA cap) in "
+             "pathological cases, in exchange for faster compile"));
+
 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
                                   const LoopSafetyInfo *SafetyInfo,
                                   TargetTransformInfo *TTI, bool &FreeInLoop);
 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
                   BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
-                  OptimizationRemarkEmitter *ORE);
+                  MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE);
 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
                  const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
-                 OptimizationRemarkEmitter *ORE, bool FreeInLoop);
+                 MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE,
+                 bool FreeInLoop);
 static bool isSafeToExecuteUnconditionally(Instruction &Inst,
                                            const DominatorTree *DT,
                                            const Loop *CurLoop,
@@ -125,14 +139,14 @@
 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
                                      AliasSetTracker *CurAST, Loop *CurLoop,
                                      AliasAnalysis *AA);
-
-static Instruction *
-CloneInstructionInExitBlock(Instruction &I, BasicBlock &ExitBlock, PHINode &PN,
-                            const LoopInfo *LI,
-                            const LoopSafetyInfo *SafetyInfo);
+static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
+                                             Loop *CurLoop);
+static Instruction *CloneInstructionInExitBlock(
+    Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
+    const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU);
 
 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
-                             AliasSetTracker *AST);
+                             AliasSetTracker *AST, MemorySSAUpdater *MSSAU);
 
 static void moveInstructionBefore(Instruction &I, Instruction &Dest,
                                   ICFLoopSafetyInfo &SafetyInfo);
@@ -194,8 +208,10 @@
     AU.addPreserved<DominatorTreeWrapperPass>();
     AU.addPreserved<LoopInfoWrapperPass>();
     AU.addRequired<TargetLibraryInfoWrapperPass>();
-    if (EnableMSSALoopDependency)
+    if (EnableMSSALoopDependency) {
       AU.addRequired<MemorySSAWrapperPass>();
+      AU.addPreserved<MemorySSAWrapperPass>();
+    }
     AU.addRequired<TargetTransformInfoWrapperPass>();
     getLoopAnalysisUsage(AU);
   }
@@ -275,7 +291,15 @@
 
   assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
 
-  std::unique_ptr<AliasSetTracker> CurAST = collectAliasInfoForLoop(L, LI, AA);
+  std::unique_ptr<AliasSetTracker> CurAST;
+  std::unique_ptr<MemorySSAUpdater> MSSAU;
+  if (!MSSA) {
+    LLVM_DEBUG(dbgs() << "LICM: Using Alias Set Tracker.\n");
+    CurAST = collectAliasInfoForLoop(L, LI, AA);
+  } else {
+    LLVM_DEBUG(dbgs() << "LICM: Using MemorySSA. Promotion disabled.\n");
+    MSSAU = make_unique<MemorySSAUpdater>(MSSA);
+  }
 
   // Get the preheader block to move instructions into...
   BasicBlock *Preheader = L->getLoopPreheader();
@@ -296,10 +320,10 @@
   //
   if (L->hasDedicatedExits())
     Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
-                          CurAST.get(), &SafetyInfo, ORE);
+                          CurAST.get(), MSSAU.get(), &SafetyInfo, ORE);
   if (Preheader)
     Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, L,
-                           CurAST.get(), &SafetyInfo, ORE);
+                           CurAST.get(), MSSAU.get(), &SafetyInfo, ORE);
 
   // Now that all loop invariants have been removed from the loop, promote any
   // memory references to scalars that we can.
@@ -328,27 +352,30 @@
 
       bool Promoted = false;
 
-      // Loop over all of the alias sets in the tracker object.
-      for (AliasSet &AS : *CurAST) {
-        // We can promote this alias set if it has a store, if it is a "Must"
-        // alias set, if the pointer is loop invariant, and if we are not
-        // eliminating any volatile loads or stores.
-        if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
-            !L->isLoopInvariant(AS.begin()->getValue()))
-          continue;
+      if (CurAST.get()) {
+        // Loop over all of the alias sets in the tracker object.
+        for (AliasSet &AS : *CurAST) {
+          // We can promote this alias set if it has a store, if it is a "Must"
+          // alias set, if the pointer is loop invariant, and if we are not
+          // eliminating any volatile loads or stores.
+          if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
+              !L->isLoopInvariant(AS.begin()->getValue()))
+            continue;
 
-        assert(
-            !AS.empty() &&
-            "Must alias set should have at least one pointer element in it!");
+          assert(
+              !AS.empty() &&
+              "Must alias set should have at least one pointer element in it!");
 
-        SmallSetVector<Value *, 8> PointerMustAliases;
-        for (const auto &ASI : AS)
-          PointerMustAliases.insert(ASI.getValue());
+          SmallSetVector<Value *, 8> PointerMustAliases;
+          for (const auto &ASI : AS)
+            PointerMustAliases.insert(ASI.getValue());
 
-        Promoted |= promoteLoopAccessesToScalars(
-            PointerMustAliases, ExitBlocks, InsertPts, PIC, LI, DT, TLI, L,
-            CurAST.get(), &SafetyInfo, ORE);
+          Promoted |= promoteLoopAccessesToScalars(
+              PointerMustAliases, ExitBlocks, InsertPts, PIC, LI, DT, TLI, L,
+              CurAST.get(), &SafetyInfo, ORE);
+        }
       }
+      // FIXME: Promotion initially disabled when using MemorySSA.
 
       // Once we have promoted values across the loop body we have to
       // recursively reform LCSSA as any nested loop may now have values defined
@@ -372,9 +399,12 @@
 
   // If this loop is nested inside of another one, save the alias information
   // for when we process the outer loop.
-  if (L->getParentLoop() && !DeleteAST)
+  if (CurAST.get() && L->getParentLoop() && !DeleteAST)
     LoopToAliasSetMap[L] = std::move(CurAST);
 
+  if (MSSAU.get() && VerifyMemorySSA)
+    MSSAU->getMemorySSA()->verifyMemorySSA();
+
   if (Changed && SE)
     SE->forgetLoopDispositions(L);
   return Changed;
@@ -388,13 +418,16 @@
 bool llvm::sinkRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
                       DominatorTree *DT, TargetLibraryInfo *TLI,
                       TargetTransformInfo *TTI, Loop *CurLoop,
-                      AliasSetTracker *CurAST, ICFLoopSafetyInfo *SafetyInfo,
+                      AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
+                      ICFLoopSafetyInfo *SafetyInfo,
                       OptimizationRemarkEmitter *ORE) {
 
   // Verify inputs.
   assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
-         CurLoop != nullptr && CurAST && SafetyInfo != nullptr &&
-         "Unexpected input to sinkRegion");
+         CurLoop != nullptr && SafetyInfo != nullptr &&
+         "Unexpected input to sinkRegion.");
+  assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
+         "Either AliasSetTracker or MemorySSA should be initialized.");
 
   // We want to visit children before parents. We will enque all the parents
   // before their children in the worklist and process the worklist in reverse
@@ -418,7 +451,7 @@
         LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
         salvageDebugInfo(I);
         ++II;
-        eraseInstruction(I, *SafetyInfo, CurAST);
+        eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
         Changed = true;
         continue;
       }
@@ -430,21 +463,24 @@
       //
       bool FreeInLoop = false;
       if (isNotUsedOrFreeInLoop(I, CurLoop, SafetyInfo, TTI, FreeInLoop) &&
-          canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, true, ORE) &&
+          canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, ORE) &&
           !I.mayHaveSideEffects()) {
-        if (sink(I, LI, DT, CurLoop, SafetyInfo, ORE, FreeInLoop)) {
+        if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE, FreeInLoop)) {
           if (!FreeInLoop) {
             ++II;
-            eraseInstruction(I, *SafetyInfo, CurAST);
+            eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
           }
           Changed = true;
         }
       }
     }
   }
+  if (MSSAU && VerifyMemorySSA)
+    MSSAU->getMemorySSA()->verifyMemorySSA();
   return Changed;
 }
 
+namespace {
 // This is a helper class for hoistRegion to make it able to hoist control flow
 // in order to be able to hoist phis. The way this works is that we initially
 // start hoisting to the loop preheader, and when we see a loop invariant branch
@@ -458,6 +494,7 @@
   LoopInfo *LI;
   DominatorTree *DT;
   Loop *CurLoop;
+  MemorySSAUpdater *MSSAU;
 
   // A map of blocks in the loop to the block their instructions will be hoisted
   // to.
@@ -468,8 +505,9 @@
   DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
 
 public:
-  ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop)
-      : LI(LI), DT(DT), CurLoop(CurLoop) {}
+  ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
+                     MemorySSAUpdater *MSSAU)
+      : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
 
   void registerPossiblyHoistableBranch(BranchInst *BI) {
     // We can only hoist conditional branches with loop invariant operands.
@@ -644,6 +682,9 @@
     if (HoistTarget == InitialPreheader) {
       // Phis in the loop header now need to use the new preheader.
       InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
+      if (MSSAU)
+        MSSAU->wireOldPredecessorsToNewImmediatePredecessor(
+            HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
       // The new preheader dominates the loop header.
       DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
       DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
@@ -666,6 +707,7 @@
     return HoistDestinationMap[BB];
   }
 };
+} // namespace
 
 /// Walk the specified region of the CFG (defined by all blocks dominated by
 /// the specified block, and that are in the current loop) in depth first
@@ -674,14 +716,17 @@
 ///
 bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
                        DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop,
-                       AliasSetTracker *CurAST, ICFLoopSafetyInfo *SafetyInfo,
+                       AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
+                       ICFLoopSafetyInfo *SafetyInfo,
                        OptimizationRemarkEmitter *ORE) {
   // Verify inputs.
   assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
-         CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr &&
-         "Unexpected input to hoistRegion");
+         CurLoop != nullptr && SafetyInfo != nullptr &&
+         "Unexpected input to hoistRegion.");
+  assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
+         "Either AliasSetTracker or MemorySSA should be initialized.");
 
-  ControlFlowHoister CFH(LI, DT, CurLoop);
+  ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
 
   // Keep track of instructions that have been hoisted, as they may need to be
   // re-hoisted if they end up not dominating all of their uses.
@@ -708,10 +753,12 @@
               &I, I.getModule()->getDataLayout(), TLI)) {
         LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << "  --> " << *C
                           << '\n');
-        CurAST->copyValue(&I, C);
+        if (CurAST)
+          CurAST->copyValue(&I, C);
+        // FIXME MSSA: Such replacements may make accesses unoptimized (D51960).
         I.replaceAllUsesWith(C);
         if (isInstructionTriviallyDead(&I, TLI))
-          eraseInstruction(I, *SafetyInfo, CurAST);
+          eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
         Changed = true;
         continue;
       }
@@ -723,11 +770,12 @@
       // and we have accurately duplicated the control flow from the loop header
       // to that block.
       if (CurLoop->hasLoopInvariantOperands(&I) &&
-          canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, true, ORE) &&
+          canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, ORE) &&
           isSafeToExecuteUnconditionally(
               I, DT, CurLoop, SafetyInfo, ORE,
               CurLoop->getLoopPreheader()->getTerminator())) {
-        hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, ORE);
+        hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
+              MSSAU, ORE);
         HoistedInstructions.push_back(&I);
         Changed = true;
         continue;
@@ -742,19 +790,19 @@
         auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
         auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
         ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
-        SafetyInfo->insertInstructionTo(I.getParent());
+        SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
         ReciprocalDivisor->insertBefore(&I);
 
         auto Product =
             BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
         Product->setFastMathFlags(I.getFastMathFlags());
-        SafetyInfo->insertInstructionTo(I.getParent());
+        SafetyInfo->insertInstructionTo(Product, I.getParent());
         Product->insertAfter(&I);
         I.replaceAllUsesWith(Product);
-        eraseInstruction(I, *SafetyInfo, CurAST);
+        eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
 
         hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
-              SafetyInfo, ORE);
+              SafetyInfo, MSSAU, ORE);
         HoistedInstructions.push_back(ReciprocalDivisor);
         Changed = true;
         continue;
@@ -767,7 +815,8 @@
           CurLoop->hasLoopInvariantOperands(&I) &&
           SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
           SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop)) {
-        hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, ORE);
+        hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
+              MSSAU, ORE);
         HoistedInstructions.push_back(&I);
         Changed = true;
         continue;
@@ -781,7 +830,7 @@
             PN->setIncomingBlock(
                 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
           hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
-                ORE);
+                MSSAU, ORE);
           assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
           Changed = true;
           continue;
@@ -809,22 +858,26 @@
                         [&](Use &U) { return DT->dominates(I, U); })) {
         BasicBlock *Dominator =
             DT->getNode(I->getParent())->getIDom()->getBlock();
-        LLVM_DEBUG(dbgs() << "LICM rehoisting to " << Dominator->getName()
-                          << ": " << *I << "\n");
-        if (!HoistPoint || HoistPoint->getParent() != Dominator) {
+        if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
           if (HoistPoint)
             assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
                    "New hoist point expected to dominate old hoist point");
           HoistPoint = Dominator->getTerminator();
         }
+        LLVM_DEBUG(dbgs() << "LICM rehoisting to "
+                          << HoistPoint->getParent()->getName()
+                          << ": " << *I << "\n");
         moveInstructionBefore(*I, *HoistPoint, *SafetyInfo);
         HoistPoint = I;
         Changed = true;
       }
     }
   }
+  if (MSSAU && VerifyMemorySSA)
+    MSSAU->getMemorySSA()->verifyMemorySSA();
 
-  // Now that we've finished hoisting make sure that LI and DT are still valid.
+    // Now that we've finished hoisting make sure that LI and DT are still
+    // valid.
 #ifndef NDEBUG
   if (Changed) {
     assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
@@ -904,25 +957,53 @@
           isa<ExtractValueInst>(I) || isa<InsertValueInst>(I));
 }
 /// Return true if all of the alias sets within this AST are known not to
-/// contain a Mod.
-bool isReadOnly(AliasSetTracker *CurAST) {
-  for (AliasSet &AS : *CurAST) {
-    if (!AS.isForwardingAliasSet() && AS.isMod()) {
-      return false;
+/// contain a Mod, or if MSSA knows thare are no MemoryDefs in the loop.
+bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU,
+                const Loop *L) {
+  if (CurAST) {
+    for (AliasSet &AS : *CurAST) {
+      if (!AS.isForwardingAliasSet() && AS.isMod()) {
+        return false;
+      }
     }
+    return true;
+  } else { /*MSSAU*/
+    for (auto *BB : L->getBlocks())
+      if (MSSAU->getMemorySSA()->getBlockDefs(BB))
+        return false;
+    return true;
   }
+}
+
+/// Return true if I is the only Instruction with a MemoryAccess in L.
+bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
+                        const MemorySSAUpdater *MSSAU) {
+  for (auto *BB : L->getBlocks())
+    if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) {
+      int NotAPhi = 0;
+      for (const auto &Acc : *Accs) {
+        if (isa<MemoryPhi>(&Acc))
+          continue;
+        const auto *MUD = cast<MemoryUseOrDef>(&Acc);
+        if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
+          return false;
+      }
+    }
   return true;
 }
 }
 
 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
                               Loop *CurLoop, AliasSetTracker *CurAST,
+                              MemorySSAUpdater *MSSAU,
                               bool TargetExecutesOncePerLoop,
                               OptimizationRemarkEmitter *ORE) {
   // If we don't understand the instruction, bail early.
   if (!isHoistableAndSinkableInst(I))
     return false;
-  
+
+  MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr;
+
   // Loads have extra constraints we have to verify before we can hoist them.
   if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
     if (!LI->isUnordered())
@@ -942,8 +1023,13 @@
     if (isLoadInvariantInLoop(LI, DT, CurLoop))
       return true;
 
-    bool Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI),
-                                                CurAST, CurLoop, AA);
+    bool Invalidated;
+    if (CurAST)
+      Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST,
+                                             CurLoop, AA);
+    else
+      Invalidated = pointerInvalidatedByLoopWithMSSA(
+          MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop);
     // Check loop-invariant address because this may also be a sinkable load
     // whose address is not necessarily loop-invariant.
     if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
@@ -968,7 +1054,7 @@
     if (match(CI, m_Intrinsic<Intrinsic::assume>()))
       // Assumes don't actually alias anything or throw
       return true;
-    
+
     // Handle simple cases by querying alias analysis.
     FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI);
     if (Behavior == FMRB_DoesNotAccessMemory)
@@ -980,17 +1066,24 @@
       if (AliasAnalysis::onlyAccessesArgPointees(Behavior)) {
         // TODO: expand to writeable arguments
         for (Value *Op : CI->arg_operands())
-          if (Op->getType()->isPointerTy() &&
-              pointerInvalidatedByLoop(
+          if (Op->getType()->isPointerTy()) {
+            bool Invalidated;
+            if (CurAST)
+              Invalidated = pointerInvalidatedByLoop(
                   MemoryLocation(Op, LocationSize::unknown(), AAMDNodes()),
-                  CurAST, CurLoop, AA))
-            return false;
+                  CurAST, CurLoop, AA);
+            else
+              Invalidated = pointerInvalidatedByLoopWithMSSA(
+                  MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop);
+            if (Invalidated)
+              return false;
+          }
         return true;
       }
 
       // If this call only reads from memory and there are no writes to memory
       // in the loop, we can hoist or sink the call as appropriate.
-      if (isReadOnly(CurAST))
+      if (isReadOnly(CurAST, MSSAU, CurLoop))
         return true;
     }
 
@@ -1001,18 +1094,21 @@
   } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
     // Fences alias (most) everything to provide ordering.  For the moment,
     // just give up if there are any other memory operations in the loop.
-    auto Begin = CurAST->begin();
-    assert(Begin != CurAST->end() && "must contain FI");
-    if (std::next(Begin) != CurAST->end())
-      // constant memory for instance, TODO: handle better
-      return false;
-    auto *UniqueI = Begin->getUniqueInstruction();
-    if (!UniqueI)
-      // other memory op, give up
-      return false;
-    (void)FI; //suppress unused variable warning
-    assert(UniqueI == FI && "AS must contain FI");
-    return true;
+    if (CurAST) {
+      auto Begin = CurAST->begin();
+      assert(Begin != CurAST->end() && "must contain FI");
+      if (std::next(Begin) != CurAST->end())
+        // constant memory for instance, TODO: handle better
+        return false;
+      auto *UniqueI = Begin->getUniqueInstruction();
+      if (!UniqueI)
+        // other memory op, give up
+        return false;
+      (void)FI; // suppress unused variable warning
+      assert(UniqueI == FI && "AS must contain FI");
+      return true;
+    } else // MSSAU
+      return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
   } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
     if (!SI->isUnordered())
       return false; // Don't sink/hoist volatile or ordered atomic store!
@@ -1022,17 +1118,29 @@
     // load store promotion instead.  TODO: We can extend this to cases where
     // there is exactly one write to the location and that write dominates an
     // arbitrary number of reads in the loop.
-    auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI));
+    if (CurAST) {
+      auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI));
 
-    if (AS.isRef() || !AS.isMustAlias())
-      // Quick exit test, handled by the full path below as well.
+      if (AS.isRef() || !AS.isMustAlias())
+        // Quick exit test, handled by the full path below as well.
+        return false;
+      auto *UniqueI = AS.getUniqueInstruction();
+      if (!UniqueI)
+        // other memory op, give up
+        return false;
+      assert(UniqueI == SI && "AS must contain SI");
+      return true;
+    } else { // MSSAU
+      if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
+        return true;
+      if (!EnableLicmCap) {
+        auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI);
+        if (MSSA->isLiveOnEntryDef(Source) ||
+            !CurLoop->contains(Source->getBlock()))
+          return true;
+      }
       return false;
-    auto *UniqueI = AS.getUniqueInstruction();
-    if (!UniqueI)
-      // other memory op, give up
-      return false;
-    assert(UniqueI == SI && "AS must contain SI");
-    return true;
+    }
   }
 
   assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
@@ -1116,10 +1224,9 @@
   return true;
 }
 
-static Instruction *
-CloneInstructionInExitBlock(Instruction &I, BasicBlock &ExitBlock, PHINode &PN,
-                            const LoopInfo *LI,
-                            const LoopSafetyInfo *SafetyInfo) {
+static Instruction *CloneInstructionInExitBlock(
+    Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
+    const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) {
   Instruction *New;
   if (auto *CI = dyn_cast<CallInst>(&I)) {
     const auto &BlockColors = SafetyInfo->getBlockColors();
@@ -1155,6 +1262,21 @@
   if (!I.getName().empty())
     New->setName(I.getName() + ".le");
 
+  MemoryAccess *OldMemAcc;
+  if (MSSAU && (OldMemAcc = MSSAU->getMemorySSA()->getMemoryAccess(&I))) {
+    // Create a new MemoryAccess and let MemorySSA set its defining access.
+    MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
+        New, nullptr, New->getParent(), MemorySSA::Beginning);
+    if (NewMemAcc) {
+      if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
+        MSSAU->insertDef(MemDef, /*RenameUses=*/true);
+      else {
+        auto *MemUse = cast<MemoryUse>(NewMemAcc);
+        MSSAU->insertUse(MemUse);
+      }
+    }
+  }
+
   // Build LCSSA PHI nodes for any in-loop operands. Note that this is
   // particularly cheap because we can rip off the PHI node that we're
   // replacing for the number and blocks of the predecessors.
@@ -1178,9 +1300,11 @@
 }
 
 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
-                             AliasSetTracker *AST) {
+                             AliasSetTracker *AST, MemorySSAUpdater *MSSAU) {
   if (AST)
     AST->deleteValue(&I);
+  if (MSSAU)
+    MSSAU->removeMemoryAccess(&I);
   SafetyInfo.removeInstruction(&I);
   I.eraseFromParent();
 }
@@ -1188,14 +1312,15 @@
 static void moveInstructionBefore(Instruction &I, Instruction &Dest,
                                   ICFLoopSafetyInfo &SafetyInfo) {
   SafetyInfo.removeInstruction(&I);
-  SafetyInfo.insertInstructionTo(Dest.getParent());
+  SafetyInfo.insertInstructionTo(&I, Dest.getParent());
   I.moveBefore(&Dest);
 }
 
 static Instruction *sinkThroughTriviallyReplaceablePHI(
     PHINode *TPN, Instruction *I, LoopInfo *LI,
     SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
-    const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop) {
+    const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
+    MemorySSAUpdater *MSSAU) {
   assert(isTriviallyReplaceablePHI(*TPN, *I) &&
          "Expect only trivially replaceable PHI");
   BasicBlock *ExitBlock = TPN->getParent();
@@ -1204,8 +1329,8 @@
   if (It != SunkCopies.end())
     New = It->second;
   else
-    New = SunkCopies[ExitBlock] =
-        CloneInstructionInExitBlock(*I, *ExitBlock, *TPN, LI, SafetyInfo);
+    New = SunkCopies[ExitBlock] = CloneInstructionInExitBlock(
+        *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
   return New;
 }
 
@@ -1229,7 +1354,8 @@
 
 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
                                         LoopInfo *LI, const Loop *CurLoop,
-                                        LoopSafetyInfo *SafetyInfo) {
+                                        LoopSafetyInfo *SafetyInfo,
+                                        MemorySSAUpdater *MSSAU) {
 #ifndef NDEBUG
   SmallVector<BasicBlock *, 32> ExitBlocks;
   CurLoop->getUniqueExitBlocks(ExitBlocks);
@@ -1279,7 +1405,7 @@
            "Expect all predecessors are in the loop");
     if (PN->getBasicBlockIndex(PredBB) >= 0) {
       BasicBlock *NewPred = SplitBlockPredecessors(
-          ExitBB, PredBB, ".split.loop.exit", DT, LI, nullptr, true);
+          ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
       // Since we do not allow splitting EH-block with BlockColors in
       // canSplitPredecessors(), we can simply assign predecessor's color to
       // the new block.
@@ -1300,7 +1426,8 @@
 ///
 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
                  const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
-                 OptimizationRemarkEmitter *ORE, bool FreeInLoop) {
+                 MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE,
+                 bool FreeInLoop) {
   LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
   ORE->emit([&]() {
     return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
@@ -1352,7 +1479,7 @@
 
     // Split predecessors of the PHI so that we can make users trivially
     // replaceable.
-    splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo);
+    splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU);
 
     // Should rebuild the iterators, as they may be invalidated by
     // splitPredecessorsOfLoopExit().
@@ -1387,10 +1514,10 @@
     assert(ExitBlockSet.count(PN->getParent()) &&
            "The LCSSA PHI is not in an exit block!");
     // The PHI must be trivially replaceable.
-    Instruction *New = sinkThroughTriviallyReplaceablePHI(PN, &I, LI, SunkCopies,
-                                                          SafetyInfo, CurLoop);
+    Instruction *New = sinkThroughTriviallyReplaceablePHI(
+        PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
     PN->replaceAllUsesWith(New);
-    eraseInstruction(*PN, *SafetyInfo, nullptr);
+    eraseInstruction(*PN, *SafetyInfo, nullptr, nullptr);
     Changed = true;
   }
   return Changed;
@@ -1401,7 +1528,7 @@
 ///
 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
                   BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
-                  OptimizationRemarkEmitter *ORE) {
+                  MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE) {
   LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getName() << ": " << I
                     << "\n");
   ORE->emit([&]() {
@@ -1426,6 +1553,13 @@
   else
     // Move the new node to the destination block, before its terminator.
     moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo);
+  if (MSSAU) {
+    // If moving, I just moved a load or store, so update MemorySSA.
+    MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
+        MSSAU->getMemorySSA()->getMemoryAccess(&I));
+    if (OldMemAcc)
+      MSSAU->moveToPlace(OldMemAcc, Dest, MemorySSA::End);
+  }
 
   // Do not retain debug locations when we are moving instructions to different
   // basic blocks, because we want to avoid jumpy line tables. Calls, however,
@@ -1830,7 +1964,7 @@
 
   // If the SSAUpdater didn't use the load in the preheader, just zap it now.
   if (PreheaderLoad->use_empty())
-    eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST);
+    eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST, nullptr);
 
   return true;
 }
@@ -1960,6 +2094,18 @@
   return false;
 }
 
+static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
+                                             Loop *CurLoop) {
+  MemoryAccess *Source;
+  // See declaration of EnableLicmCap for usage details.
+  if (EnableLicmCap)
+    Source = MU->getDefiningAccess();
+  else
+    Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU);
+  return !MSSA->isLiveOnEntryDef(Source) &&
+         CurLoop->contains(Source->getBlock());
+}
+
 /// Little predicate that returns true if the specified basic block is in
 /// a subloop of the current one, not the current one itself.
 ///
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 241dbed..fbffa19 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -26,7 +26,7 @@
 // Future floating point idioms to recognize in -ffast-math mode:
 //   fpowi
 // Future integer operation idioms to recognize:
-//   ctpop, ctlz, cttz
+//   ctpop
 //
 // Beware that isel's default lowering for ctpop is highly inefficient for
 // i64 and larger types when i64 is legal and the value has few bits set.  It
@@ -187,9 +187,10 @@
   bool recognizePopcount();
   void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
                                PHINode *CntPhi, Value *Var);
-  bool recognizeAndInsertCTLZ();
-  void transformLoopToCountable(BasicBlock *PreCondBB, Instruction *CntInst,
-                                PHINode *CntPhi, Value *Var, Instruction *DefX,
+  bool recognizeAndInsertFFS();  /// Find First Set: ctlz or cttz
+  void transformLoopToCountable(Intrinsic::ID IntrinID, BasicBlock *PreCondBB,
+                                Instruction *CntInst, PHINode *CntPhi,
+                                Value *Var, Instruction *DefX,
                                 const DebugLoc &DL, bool ZeroCheck,
                                 bool IsCntPhiUsedOutsideLoop);
 
@@ -779,12 +780,13 @@
   // Get the location that may be stored across the loop.  Since the access is
   // strided positively through memory, we say that the modified location starts
   // at the pointer and has infinite size.
-  uint64_t AccessSize = MemoryLocation::UnknownSize;
+  LocationSize AccessSize = LocationSize::unknown();
 
   // If the loop iterates a fixed number of times, we can refine the access size
   // to be exactly the size of the memset, which is (BECount+1)*StoreSize
   if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
-    AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize;
+    AccessSize = LocationSize::precise((BECst->getValue()->getZExtValue() + 1) *
+                                       StoreSize);
 
   // TODO: For this to be really effective, we have to dive into the pointer
   // operand in the store.  Store to &A[i] of 100 will always return may alias
@@ -1107,15 +1109,17 @@
 }
 
 bool LoopIdiomRecognize::runOnNoncountableLoop() {
-  return recognizePopcount() || recognizeAndInsertCTLZ();
+  return recognizePopcount() || recognizeAndInsertFFS();
 }
 
 /// Check if the given conditional branch is based on the comparison between
-/// a variable and zero, and if the variable is non-zero, the control yields to
-/// the loop entry. If the branch matches the behavior, the variable involved
-/// in the comparison is returned. This function will be called to see if the
-/// precondition and postcondition of the loop are in desirable form.
-static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry) {
+/// a variable and zero, and if the variable is non-zero or zero (JmpOnZero is
+/// true), the control yields to the loop entry. If the branch matches the
+/// behavior, the variable involved in the comparison is returned. This function
+/// will be called to see if the precondition and postcondition of the loop are
+/// in desirable form.
+static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry,
+                             bool JmpOnZero = false) {
   if (!BI || !BI->isConditional())
     return nullptr;
 
@@ -1127,9 +1131,14 @@
   if (!CmpZero || !CmpZero->isZero())
     return nullptr;
 
+  BasicBlock *TrueSucc = BI->getSuccessor(0);
+  BasicBlock *FalseSucc = BI->getSuccessor(1);
+  if (JmpOnZero)
+    std::swap(TrueSucc, FalseSucc);
+
   ICmpInst::Predicate Pred = Cond->getPredicate();
-  if ((Pred == ICmpInst::ICMP_NE && BI->getSuccessor(0) == LoopEntry) ||
-      (Pred == ICmpInst::ICMP_EQ && BI->getSuccessor(1) == LoopEntry))
+  if ((Pred == ICmpInst::ICMP_NE && TrueSucc == LoopEntry) ||
+      (Pred == ICmpInst::ICMP_EQ && FalseSucc == LoopEntry))
     return Cond->getOperand(0);
 
   return nullptr;
@@ -1305,14 +1314,14 @@
 ///
 /// loop-exit:
 /// \endcode
-static bool detectCTLZIdiom(Loop *CurLoop, PHINode *&PhiX,
-                            Instruction *&CntInst, PHINode *&CntPhi,
-                            Instruction *&DefX) {
+static bool detectShiftUntilZeroIdiom(Loop *CurLoop, const DataLayout &DL,
+                                      Intrinsic::ID &IntrinID, Value *&InitX,
+                                      Instruction *&CntInst, PHINode *&CntPhi,
+                                      Instruction *&DefX) {
   BasicBlock *LoopEntry;
   Value *VarX = nullptr;
 
   DefX = nullptr;
-  PhiX = nullptr;
   CntInst = nullptr;
   CntPhi = nullptr;
   LoopEntry = *(CurLoop->block_begin());
@@ -1324,20 +1333,28 @@
   else
     return false;
 
-  // step 2: detect instructions corresponding to "x.next = x >> 1"
-  if (!DefX || (DefX->getOpcode() != Instruction::AShr &&
-                DefX->getOpcode() != Instruction::LShr))
+  // step 2: detect instructions corresponding to "x.next = x >> 1 or x << 1"
+  if (!DefX || !DefX->isShift())
     return false;
+  IntrinID = DefX->getOpcode() == Instruction::Shl ? Intrinsic::cttz :
+                                                     Intrinsic::ctlz;
   ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1));
   if (!Shft || !Shft->isOne())
     return false;
   VarX = DefX->getOperand(0);
 
   // step 3: Check the recurrence of variable X
-  PhiX = getRecurrenceVar(VarX, DefX, LoopEntry);
+  PHINode *PhiX = getRecurrenceVar(VarX, DefX, LoopEntry);
   if (!PhiX)
     return false;
 
+  InitX = PhiX->getIncomingValueForBlock(CurLoop->getLoopPreheader());
+
+  // Make sure the initial value can't be negative otherwise the ashr in the
+  // loop might never reach zero which would make the loop infinite.
+  if (DefX->getOpcode() == Instruction::AShr && !isKnownNonNegative(InitX, DL))
+    return false;
+
   // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
   // TODO: We can skip the step. If loop trip count is known (CTLZ),
   //       then all uses of "cnt.next" could be optimized to the trip count
@@ -1369,17 +1386,25 @@
   return true;
 }
 
-/// Recognize CTLZ idiom in a non-countable loop and convert the loop
-/// to countable (with CTLZ trip count).
-/// If CTLZ inserted as a new trip count returns true; otherwise, returns false.
-bool LoopIdiomRecognize::recognizeAndInsertCTLZ() {
+/// Recognize CTLZ or CTTZ idiom in a non-countable loop and convert the loop
+/// to countable (with CTLZ / CTTZ trip count). If CTLZ / CTTZ inserted as a new
+/// trip count returns true; otherwise, returns false.
+bool LoopIdiomRecognize::recognizeAndInsertFFS() {
   // Give up if the loop has multiple blocks or multiple backedges.
   if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
     return false;
 
-  Instruction *CntInst, *DefX;
-  PHINode *CntPhi, *PhiX;
-  if (!detectCTLZIdiom(CurLoop, PhiX, CntInst, CntPhi, DefX))
+  Intrinsic::ID IntrinID;
+  Value *InitX;
+  Instruction *DefX = nullptr;
+  PHINode *CntPhi = nullptr;
+  Instruction *CntInst = nullptr;
+  // Help decide if transformation is profitable. For ShiftUntilZero idiom,
+  // this is always 6.
+  size_t IdiomCanonicalSize = 6;
+
+  if (!detectShiftUntilZeroIdiom(CurLoop, *DL, IntrinID, InitX,
+                                 CntInst, CntPhi, DefX))
     return false;
 
   bool IsCntPhiUsedOutsideLoop = false;
@@ -1406,12 +1431,6 @@
   // It is safe to assume Preheader exist as it was checked in
   // parent function RunOnLoop.
   BasicBlock *PH = CurLoop->getLoopPreheader();
-  Value *InitX = PhiX->getIncomingValueForBlock(PH);
-
-  // Make sure the initial value can't be negative otherwise the ashr in the
-  // loop might never reach zero which would make the loop infinite.
-  if (DefX->getOpcode() == Instruction::AShr && !isKnownNonNegative(InitX, *DL))
-    return false;
 
   // If we are using the count instruction outside the loop, make sure we
   // have a zero check as a precondition. Without the check the loop would run
@@ -1429,8 +1448,10 @@
     ZeroCheck = true;
   }
 
-  // Check if CTLZ intrinsic is profitable. Assume it is always profitable
-  // if we delete the loop (the loop has only 6 instructions):
+  // Check if CTLZ / CTTZ intrinsic is profitable. Assume it is always
+  // profitable if we delete the loop.
+
+  // the loop has only 6 instructions:
   //  %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
   //  %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
   //  %shr = ashr %n.addr.0, 1
@@ -1441,12 +1462,12 @@
   const Value *Args[] =
       {InitX, ZeroCheck ? ConstantInt::getTrue(InitX->getContext())
                         : ConstantInt::getFalse(InitX->getContext())};
-  if (CurLoop->getHeader()->size() != 6 &&
-      TTI->getIntrinsicCost(Intrinsic::ctlz, InitX->getType(), Args) >
-          TargetTransformInfo::TCC_Basic)
+  if (CurLoop->getHeader()->size() != IdiomCanonicalSize &&
+      TTI->getIntrinsicCost(IntrinID, InitX->getType(), Args) >
+        TargetTransformInfo::TCC_Basic)
     return false;
 
-  transformLoopToCountable(PH, CntInst, CntPhi, InitX, DefX,
+  transformLoopToCountable(IntrinID, PH, CntInst, CntPhi, InitX, DefX,
                            DefX->getDebugLoc(), ZeroCheck,
                            IsCntPhiUsedOutsideLoop);
   return true;
@@ -1515,20 +1536,21 @@
   return CI;
 }
 
-static CallInst *createCTLZIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
-                                     const DebugLoc &DL, bool ZeroCheck) {
+static CallInst *createFFSIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
+                                    const DebugLoc &DL, bool ZeroCheck,
+                                    Intrinsic::ID IID) {
   Value *Ops[] = {Val, ZeroCheck ? IRBuilder.getTrue() : IRBuilder.getFalse()};
   Type *Tys[] = {Val->getType()};
 
   Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
-  Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctlz, Tys);
+  Value *Func = Intrinsic::getDeclaration(M, IID, Tys);
   CallInst *CI = IRBuilder.CreateCall(Func, Ops);
   CI->setDebugLoc(DL);
 
   return CI;
 }
 
-/// Transform the following loop:
+/// Transform the following loop (Using CTLZ, CTTZ is similar):
 /// loop:
 ///   CntPhi = PHI [Cnt0, CntInst]
 ///   PhiX = PHI [InitX, DefX]
@@ -1560,19 +1582,19 @@
 /// If LOOP_BODY is empty the loop will be deleted.
 /// If CntInst and DefX are not used in LOOP_BODY they will be removed.
 void LoopIdiomRecognize::transformLoopToCountable(
-    BasicBlock *Preheader, Instruction *CntInst, PHINode *CntPhi, Value *InitX,
-    Instruction *DefX, const DebugLoc &DL, bool ZeroCheck,
-    bool IsCntPhiUsedOutsideLoop) {
+    Intrinsic::ID IntrinID, BasicBlock *Preheader, Instruction *CntInst,
+    PHINode *CntPhi, Value *InitX, Instruction *DefX, const DebugLoc &DL,
+    bool ZeroCheck, bool IsCntPhiUsedOutsideLoop) {
   BranchInst *PreheaderBr = cast<BranchInst>(Preheader->getTerminator());
 
-  // Step 1: Insert the CTLZ instruction at the end of the preheader block
+  // Step 1: Insert the CTLZ/CTTZ instruction at the end of the preheader block
+  IRBuilder<> Builder(PreheaderBr);
+  Builder.SetCurrentDebugLocation(DL);
+  Value *FFS, *Count, *CountPrev, *NewCount, *InitXNext;
+
   //   Count = BitWidth - CTLZ(InitX);
   // If there are uses of CntPhi create:
   //   CountPrev = BitWidth - CTLZ(InitX >> 1);
-  IRBuilder<> Builder(PreheaderBr);
-  Builder.SetCurrentDebugLocation(DL);
-  Value *CTLZ, *Count, *CountPrev, *NewCount, *InitXNext;
-
   if (IsCntPhiUsedOutsideLoop) {
     if (DefX->getOpcode() == Instruction::AShr)
       InitXNext =
@@ -1580,29 +1602,30 @@
     else if (DefX->getOpcode() == Instruction::LShr)
       InitXNext =
           Builder.CreateLShr(InitX, ConstantInt::get(InitX->getType(), 1));
+    else if (DefX->getOpcode() == Instruction::Shl) // cttz
+      InitXNext =
+          Builder.CreateShl(InitX, ConstantInt::get(InitX->getType(), 1));
     else
       llvm_unreachable("Unexpected opcode!");
   } else
     InitXNext = InitX;
-  CTLZ = createCTLZIntrinsic(Builder, InitXNext, DL, ZeroCheck);
+  FFS = createFFSIntrinsic(Builder, InitXNext, DL, ZeroCheck, IntrinID);
   Count = Builder.CreateSub(
-      ConstantInt::get(CTLZ->getType(),
-                       CTLZ->getType()->getIntegerBitWidth()),
-      CTLZ);
+      ConstantInt::get(FFS->getType(),
+                       FFS->getType()->getIntegerBitWidth()),
+      FFS);
   if (IsCntPhiUsedOutsideLoop) {
     CountPrev = Count;
     Count = Builder.CreateAdd(
         CountPrev,
         ConstantInt::get(CountPrev->getType(), 1));
   }
-  if (IsCntPhiUsedOutsideLoop)
-    NewCount = Builder.CreateZExtOrTrunc(CountPrev,
-        cast<IntegerType>(CntInst->getType()));
-  else
-    NewCount = Builder.CreateZExtOrTrunc(Count,
-        cast<IntegerType>(CntInst->getType()));
 
-  // If the CTLZ counter's initial value is not zero, insert Add Inst.
+  NewCount = Builder.CreateZExtOrTrunc(
+                      IsCntPhiUsedOutsideLoop ? CountPrev : Count,
+                      cast<IntegerType>(CntInst->getType()));
+
+  // If the counter's initial value is not zero, insert Add Inst.
   Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader);
   ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
   if (!InitConst || !InitConst->isZero())
@@ -1638,8 +1661,7 @@
   LbCond->setOperand(1, ConstantInt::get(Ty, 0));
 
   // Step 3: All the references to the original counter outside
-  //  the loop are replaced with the NewCount -- the value returned from
-  //  __builtin_ctlz(x).
+  //  the loop are replaced with the NewCount
   if (IsCntPhiUsedOutsideLoop)
     CntPhi->replaceUsesOutsideBlock(NewCount, Body);
   else
diff --git a/lib/Transforms/Scalar/LoopPredication.cpp b/lib/Transforms/Scalar/LoopPredication.cpp
index ccaf101..5983c80 100644
--- a/lib/Transforms/Scalar/LoopPredication.cpp
+++ b/lib/Transforms/Scalar/LoopPredication.cpp
@@ -180,6 +180,7 @@
 #include "llvm/Transforms/Scalar/LoopPredication.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Analysis/GuardUtils.h"
 #include "llvm/Analysis/LoopInfo.h"
 #include "llvm/Analysis/LoopPass.h"
 #include "llvm/Analysis/ScalarEvolution.h"
@@ -820,9 +821,8 @@
   SmallVector<IntrinsicInst *, 4> Guards;
   for (const auto BB : L->blocks())
     for (auto &I : *BB)
-      if (auto *II = dyn_cast<IntrinsicInst>(&I))
-        if (II->getIntrinsicID() == Intrinsic::experimental_guard)
-          Guards.push_back(II);
+      if (isGuard(&I))
+        Guards.push_back(cast<IntrinsicInst>(&I));
 
   if (Guards.empty())
     return false;
diff --git a/lib/Transforms/Scalar/LoopSimplifyCFG.cpp b/lib/Transforms/Scalar/LoopSimplifyCFG.cpp
index c370efa..2e5927f 100644
--- a/lib/Transforms/Scalar/LoopSimplifyCFG.cpp
+++ b/lib/Transforms/Scalar/LoopSimplifyCFG.cpp
@@ -42,10 +42,14 @@
 #define DEBUG_TYPE "loop-simplifycfg"
 
 static cl::opt<bool> EnableTermFolding("enable-loop-simplifycfg-term-folding",
-                                       cl::init(true));
+                                       cl::init(false));
 
 STATISTIC(NumTerminatorsFolded,
           "Number of terminators folded to unconditional branches");
+STATISTIC(NumLoopBlocksDeleted,
+          "Number of loop blocks deleted");
+STATISTIC(NumLoopExitsDeleted,
+          "Number of loop exiting edges deleted");
 
 /// If \p BB is a switch or a conditional branch, but only one of its successors
 /// can be reached from this block in runtime, return this successor. Otherwise,
@@ -76,6 +80,7 @@
   return nullptr;
 }
 
+namespace {
 /// Helper class that can turn branches and switches with constant conditions
 /// into unconditional branches.
 class ConstantTerminatorFoldingImpl {
@@ -83,6 +88,7 @@
   Loop &L;
   LoopInfo &LI;
   DominatorTree &DT;
+  ScalarEvolution &SE;
   MemorySSAUpdater *MSSAU;
 
   // Whether or not the current loop has irreducible CFG.
@@ -102,7 +108,7 @@
   SmallPtrSet<BasicBlock *, 8> LiveLoopBlocks;
   // The blocks of the original loop that will become unreachable from entry
   // after the constant folding.
-  SmallPtrSet<BasicBlock *, 8> DeadLoopBlocks;
+  SmallVector<BasicBlock *, 8> DeadLoopBlocks;
   // The exits of the original loop that will still be reachable from entry
   // after the constant folding.
   SmallPtrSet<BasicBlock *, 8> LiveExitBlocks;
@@ -137,7 +143,7 @@
     PrintOutVector("Blocks in which we can constant-fold terminator:",
                    FoldCandidates);
     PrintOutSet("Live blocks from the original loop:", LiveLoopBlocks);
-    PrintOutSet("Dead blocks from the original loop:", DeadLoopBlocks);
+    PrintOutVector("Dead blocks from the original loop:", DeadLoopBlocks);
     PrintOutSet("Live exit blocks:", LiveExitBlocks);
     PrintOutVector("Dead exit blocks:", DeadExitBlocks);
     if (!DeleteCurrentLoop)
@@ -192,7 +198,7 @@
 
       // If a loop block wasn't marked as live so far, then it's dead.
       if (!LiveLoopBlocks.count(BB)) {
-        DeadLoopBlocks.insert(BB);
+        DeadLoopBlocks.push_back(BB);
         continue;
       }
 
@@ -269,6 +275,138 @@
            "All blocks that stay in loop should be live!");
   }
 
+  /// We need to preserve static reachibility of all loop exit blocks (this is)
+  /// required by loop pass manager. In order to do it, we make the following
+  /// trick:
+  ///
+  ///  preheader:
+  ///    <preheader code>
+  ///    br label %loop_header
+  ///
+  ///  loop_header:
+  ///    ...
+  ///    br i1 false, label %dead_exit, label %loop_block
+  ///    ...
+  ///
+  /// We cannot simply remove edge from the loop to dead exit because in this
+  /// case dead_exit (and its successors) may become unreachable. To avoid that,
+  /// we insert the following fictive preheader:
+  ///
+  ///  preheader:
+  ///    <preheader code>
+  ///    switch i32 0, label %preheader-split,
+  ///                  [i32 1, label %dead_exit_1],
+  ///                  [i32 2, label %dead_exit_2],
+  ///                  ...
+  ///                  [i32 N, label %dead_exit_N],
+  ///
+  ///  preheader-split:
+  ///    br label %loop_header
+  ///
+  ///  loop_header:
+  ///    ...
+  ///    br i1 false, label %dead_exit_N, label %loop_block
+  ///    ...
+  ///
+  /// Doing so, we preserve static reachibility of all dead exits and can later
+  /// remove edges from the loop to these blocks.
+  void handleDeadExits() {
+    // If no dead exits, nothing to do.
+    if (DeadExitBlocks.empty())
+      return;
+
+    // Construct split preheader and the dummy switch to thread edges from it to
+    // dead exits.
+    DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
+    BasicBlock *Preheader = L.getLoopPreheader();
+    BasicBlock *NewPreheader = Preheader->splitBasicBlock(
+        Preheader->getTerminator(),
+        Twine(Preheader->getName()).concat("-split"));
+    DTU.deleteEdge(Preheader, L.getHeader());
+    DTU.insertEdge(NewPreheader, L.getHeader());
+    DTU.insertEdge(Preheader, NewPreheader);
+    IRBuilder<> Builder(Preheader->getTerminator());
+    SwitchInst *DummySwitch =
+        Builder.CreateSwitch(Builder.getInt32(0), NewPreheader);
+    Preheader->getTerminator()->eraseFromParent();
+
+    unsigned DummyIdx = 1;
+    for (BasicBlock *BB : DeadExitBlocks) {
+      SmallVector<Instruction *, 4> DeadPhis;
+      for (auto &PN : BB->phis())
+        DeadPhis.push_back(&PN);
+
+      // Eliminate all Phis from dead exits.
+      for (Instruction *PN : DeadPhis) {
+        PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
+        PN->eraseFromParent();
+      }
+      assert(DummyIdx != 0 && "Too many dead exits!");
+      DummySwitch->addCase(Builder.getInt32(DummyIdx++), BB);
+      DTU.insertEdge(Preheader, BB);
+      ++NumLoopExitsDeleted;
+    }
+
+    assert(L.getLoopPreheader() == NewPreheader && "Malformed CFG?");
+    if (Loop *OuterLoop = LI.getLoopFor(Preheader)) {
+      OuterLoop->addBasicBlockToLoop(NewPreheader, LI);
+
+      // When we break dead edges, the outer loop may become unreachable from
+      // the current loop. We need to fix loop info accordingly. For this, we
+      // find the most nested loop that still contains L and remove L from all
+      // loops that are inside of it.
+      Loop *StillReachable = nullptr;
+      for (BasicBlock *BB : LiveExitBlocks) {
+        Loop *BBL = LI.getLoopFor(BB);
+        if (BBL && BBL->contains(L.getHeader()))
+          if (!StillReachable ||
+              BBL->getLoopDepth() > StillReachable->getLoopDepth())
+            StillReachable = BBL;
+      }
+
+      // Okay, our loop is no longer in the outer loop (and maybe not in some of
+      // its parents as well). Make the fixup.
+      if (StillReachable != OuterLoop) {
+        LI.changeLoopFor(NewPreheader, StillReachable);
+        for (Loop *NotContaining = OuterLoop; NotContaining != StillReachable;
+             NotContaining = NotContaining->getParentLoop()) {
+          NotContaining->removeBlockFromLoop(NewPreheader);
+          for (auto *BB : L.blocks())
+            NotContaining->removeBlockFromLoop(BB);
+        }
+        OuterLoop->removeChildLoop(&L);
+        if (StillReachable)
+          StillReachable->addChildLoop(&L);
+        else
+          LI.addTopLevelLoop(&L);
+      }
+    }
+  }
+
+  /// Delete loop blocks that have become unreachable after folding. Make all
+  /// relevant updates to DT and LI.
+  void deleteDeadLoopBlocks() {
+    DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
+    if (MSSAU) {
+      SmallPtrSet<BasicBlock *, 8> DeadLoopBlocksSet(DeadLoopBlocks.begin(),
+                                                     DeadLoopBlocks.end());
+      MSSAU->removeBlocks(DeadLoopBlocksSet);
+    }
+    for (auto *BB : DeadLoopBlocks) {
+      assert(BB != L.getHeader() &&
+             "Header of the current loop cannot be dead!");
+      LLVM_DEBUG(dbgs() << "Deleting dead loop block " << BB->getName()
+                        << "\n");
+      if (LI.isLoopHeader(BB)) {
+        assert(LI.getLoopFor(BB) != &L && "Attempt to remove current loop!");
+        LI.erase(LI.getLoopFor(BB));
+      }
+      LI.removeBlock(BB);
+      DeleteDeadBlock(BB, &DTU);
+      ++NumLoopBlocksDeleted;
+    }
+  }
+
   /// Constant-fold terminators of blocks acculumated in FoldCandidates into the
   /// unconditional branches.
   void foldTerminators() {
@@ -323,8 +461,9 @@
 
 public:
   ConstantTerminatorFoldingImpl(Loop &L, LoopInfo &LI, DominatorTree &DT,
+                                ScalarEvolution &SE,
                                 MemorySSAUpdater *MSSAU)
-      : L(L), LI(LI), DT(DT), MSSAU(MSSAU) {}
+      : L(L), LI(LI), DT(DT), SE(SE), MSSAU(MSSAU) {}
   bool run() {
     assert(L.getLoopLatch() && "Should be single latch!");
 
@@ -358,26 +497,10 @@
       return false;
     }
 
-    // TODO: Support deletion of dead loop blocks.
-    if (!DeadLoopBlocks.empty()) {
-      LLVM_DEBUG(dbgs() << "Give up constant terminator folding in loop "
-                        << L.getHeader()->getName()
-                        << ": we don't currently"
-                           " support deletion of dead in-loop blocks.\n");
-      return false;
-    }
-
-    // TODO: Support dead loop exits.
-    if (!DeadExitBlocks.empty()) {
-      LLVM_DEBUG(dbgs() << "Give up constant terminator folding in loop "
-                        << L.getHeader()->getName()
-                        << ": we don't currently support dead loop exits.\n");
-      return false;
-    }
-
     // TODO: Support blocks that are not dead, but also not in loop after the
     // folding.
-    if (BlocksInLoopAfterFolding.size() != L.getNumBlocks()) {
+    if (BlocksInLoopAfterFolding.size() + DeadLoopBlocks.size() !=
+        L.getNumBlocks()) {
       LLVM_DEBUG(
           dbgs() << "Give up constant terminator folding in loop "
                  << L.getHeader()->getName()
@@ -387,6 +510,7 @@
       return false;
     }
 
+    SE.forgetTopmostLoop(&L);
     // Dump analysis results.
     LLVM_DEBUG(dump());
 
@@ -395,8 +519,16 @@
                       << "\n");
 
     // Make the actual transforms.
+    handleDeadExits();
     foldTerminators();
 
+    if (!DeadLoopBlocks.empty()) {
+      LLVM_DEBUG(dbgs() << "Deleting " << DeadLoopBlocks.size()
+                    << " dead blocks in loop " << L.getHeader()->getName()
+                    << "\n");
+      deleteDeadLoopBlocks();
+    }
+
 #ifndef NDEBUG
     // Make sure that we have preserved all data structures after the transform.
     DT.verify();
@@ -407,10 +539,12 @@
     return true;
   }
 };
+} // namespace
 
 /// Turn branches and switches with known constant conditions into unconditional
 /// branches.
 static bool constantFoldTerminators(Loop &L, DominatorTree &DT, LoopInfo &LI,
+                                    ScalarEvolution &SE,
                                     MemorySSAUpdater *MSSAU) {
   if (!EnableTermFolding)
     return false;
@@ -420,7 +554,7 @@
   if (!L.getLoopLatch())
     return false;
 
-  ConstantTerminatorFoldingImpl BranchFolder(L, LI, DT, MSSAU);
+  ConstantTerminatorFoldingImpl BranchFolder(L, LI, DT, SE, MSSAU);
   return BranchFolder.run();
 }
 
@@ -457,7 +591,7 @@
   bool Changed = false;
 
   // Constant-fold terminators with known constant conditions.
-  Changed |= constantFoldTerminators(L, DT, LI, MSSAU);
+  Changed |= constantFoldTerminators(L, DT, LI, SE, MSSAU);
 
   // Eliminate unconditional branches by merging blocks into their predecessors.
   Changed |= mergeBlocksIntoPredecessors(L, DT, LI, MSSAU);
diff --git a/lib/Transforms/Scalar/LoopSink.cpp b/lib/Transforms/Scalar/LoopSink.cpp
index 540d19f..2f7ad21 100644
--- a/lib/Transforms/Scalar/LoopSink.cpp
+++ b/lib/Transforms/Scalar/LoopSink.cpp
@@ -304,7 +304,7 @@
     // No need to check for instruction's operands are loop invariant.
     assert(L.hasLoopInvariantOperands(I) &&
            "Insts in a loop's preheader should have loop invariant operands!");
-    if (!canSinkOrHoistInst(*I, &AA, &DT, &L, &CurAST, false))
+    if (!canSinkOrHoistInst(*I, &AA, &DT, &L, &CurAST, nullptr, false))
       continue;
     if (sinkInstruction(L, *I, ColdLoopBBs, LoopBlockNumber, LI, DT, BFI))
       Changed = true;
diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp
index b7baba6..38b80f4 100644
--- a/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -965,13 +965,15 @@
     Loop *L, DominatorTree &DT, LoopInfo *LI, ScalarEvolution &SE,
     const TargetTransformInfo &TTI, AssumptionCache &AC,
     OptimizationRemarkEmitter &ORE, bool PreserveLCSSA, int OptLevel,
-    Optional<unsigned> ProvidedCount, Optional<unsigned> ProvidedThreshold,
-    Optional<bool> ProvidedAllowPartial, Optional<bool> ProvidedRuntime,
-    Optional<bool> ProvidedUpperBound, Optional<bool> ProvidedAllowPeeling) {
+    bool OnlyWhenForced, Optional<unsigned> ProvidedCount,
+    Optional<unsigned> ProvidedThreshold, Optional<bool> ProvidedAllowPartial,
+    Optional<bool> ProvidedRuntime, Optional<bool> ProvidedUpperBound,
+    Optional<bool> ProvidedAllowPeeling) {
   LLVM_DEBUG(dbgs() << "Loop Unroll: F["
                     << L->getHeader()->getParent()->getName() << "] Loop %"
                     << L->getHeader()->getName() << "\n");
-  if (hasUnrollTransformation(L) & TM_Disable)
+  TransformationMode TM = hasUnrollTransformation(L);
+  if (TM & TM_Disable)
     return LoopUnrollResult::Unmodified;
   if (!L->isLoopSimplifyForm()) {
     LLVM_DEBUG(
@@ -979,6 +981,11 @@
     return LoopUnrollResult::Unmodified;
   }
 
+  // When automtatic unrolling is disabled, do not unroll unless overridden for
+  // this loop.
+  if (OnlyWhenForced && !(TM & TM_Enable))
+    return LoopUnrollResult::Unmodified;
+
   unsigned NumInlineCandidates;
   bool NotDuplicatable;
   bool Convergent;
@@ -1119,6 +1126,12 @@
   static char ID; // Pass ID, replacement for typeid
 
   int OptLevel;
+
+  /// If false, use a cost model to determine whether unrolling of a loop is
+  /// profitable. If true, only loops that explicitly request unrolling via
+  /// metadata are considered. All other loops are skipped.
+  bool OnlyWhenForced;
+
   Optional<unsigned> ProvidedCount;
   Optional<unsigned> ProvidedThreshold;
   Optional<bool> ProvidedAllowPartial;
@@ -1126,15 +1139,16 @@
   Optional<bool> ProvidedUpperBound;
   Optional<bool> ProvidedAllowPeeling;
 
-  LoopUnroll(int OptLevel = 2, Optional<unsigned> Threshold = None,
+  LoopUnroll(int OptLevel = 2, bool OnlyWhenForced = false,
+             Optional<unsigned> Threshold = None,
              Optional<unsigned> Count = None,
              Optional<bool> AllowPartial = None, Optional<bool> Runtime = None,
              Optional<bool> UpperBound = None,
              Optional<bool> AllowPeeling = None)
-      : LoopPass(ID), OptLevel(OptLevel), ProvidedCount(std::move(Count)),
-        ProvidedThreshold(Threshold), ProvidedAllowPartial(AllowPartial),
-        ProvidedRuntime(Runtime), ProvidedUpperBound(UpperBound),
-        ProvidedAllowPeeling(AllowPeeling) {
+      : LoopPass(ID), OptLevel(OptLevel), OnlyWhenForced(OnlyWhenForced),
+        ProvidedCount(std::move(Count)), ProvidedThreshold(Threshold),
+        ProvidedAllowPartial(AllowPartial), ProvidedRuntime(Runtime),
+        ProvidedUpperBound(UpperBound), ProvidedAllowPeeling(AllowPeeling) {
     initializeLoopUnrollPass(*PassRegistry::getPassRegistry());
   }
 
@@ -1157,8 +1171,8 @@
     bool PreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
 
     LoopUnrollResult Result = tryToUnrollLoop(
-        L, DT, LI, SE, TTI, AC, ORE, PreserveLCSSA, OptLevel, ProvidedCount,
-        ProvidedThreshold, ProvidedAllowPartial, ProvidedRuntime,
+        L, DT, LI, SE, TTI, AC, ORE, PreserveLCSSA, OptLevel, OnlyWhenForced,
+        ProvidedCount, ProvidedThreshold, ProvidedAllowPartial, ProvidedRuntime,
         ProvidedUpperBound, ProvidedAllowPeeling);
 
     if (Result == LoopUnrollResult::FullyUnrolled)
@@ -1188,14 +1202,16 @@
 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
 INITIALIZE_PASS_END(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
 
-Pass *llvm::createLoopUnrollPass(int OptLevel, int Threshold, int Count,
-                                 int AllowPartial, int Runtime, int UpperBound,
+Pass *llvm::createLoopUnrollPass(int OptLevel, bool OnlyWhenForced,
+                                 int Threshold, int Count, int AllowPartial,
+                                 int Runtime, int UpperBound,
                                  int AllowPeeling) {
   // TODO: It would make more sense for this function to take the optionals
   // directly, but that's dangerous since it would silently break out of tree
   // callers.
   return new LoopUnroll(
-      OptLevel, Threshold == -1 ? None : Optional<unsigned>(Threshold),
+      OptLevel, OnlyWhenForced,
+      Threshold == -1 ? None : Optional<unsigned>(Threshold),
       Count == -1 ? None : Optional<unsigned>(Count),
       AllowPartial == -1 ? None : Optional<bool>(AllowPartial),
       Runtime == -1 ? None : Optional<bool>(Runtime),
@@ -1203,8 +1219,8 @@
       AllowPeeling == -1 ? None : Optional<bool>(AllowPeeling));
 }
 
-Pass *llvm::createSimpleLoopUnrollPass(int OptLevel) {
-  return createLoopUnrollPass(OptLevel, -1, -1, 0, 0, 0, 0);
+Pass *llvm::createSimpleLoopUnrollPass(int OptLevel, bool OnlyWhenForced) {
+  return createLoopUnrollPass(OptLevel, OnlyWhenForced, -1, -1, 0, 0, 0, 0);
 }
 
 PreservedAnalyses LoopFullUnrollPass::run(Loop &L, LoopAnalysisManager &AM,
@@ -1234,7 +1250,8 @@
 
   bool Changed =
       tryToUnrollLoop(&L, AR.DT, &AR.LI, AR.SE, AR.TTI, AR.AC, *ORE,
-                      /*PreserveLCSSA*/ true, OptLevel, /*Count*/ None,
+                      /*PreserveLCSSA*/ true, OptLevel, OnlyWhenForced,
+                      /*Count*/ None,
                       /*Threshold*/ None, /*AllowPartial*/ false,
                       /*Runtime*/ false, /*UpperBound*/ false,
                       /*AllowPeeling*/ false) != LoopUnrollResult::Unmodified;
@@ -1371,7 +1388,8 @@
     // flavors of unrolling during construction time (by setting UnrollOpts).
     LoopUnrollResult Result = tryToUnrollLoop(
         &L, DT, &LI, SE, TTI, AC, ORE,
-        /*PreserveLCSSA*/ true, UnrollOpts.OptLevel, /*Count*/ None,
+        /*PreserveLCSSA*/ true, UnrollOpts.OptLevel, UnrollOpts.OnlyWhenForced,
+        /*Count*/ None,
         /*Threshold*/ None, UnrollOpts.AllowPartial, UnrollOpts.AllowRuntime,
         UnrollOpts.AllowUpperBound, LocalAllowPeeling);
     Changed |= Result != LoopUnrollResult::Unmodified;
diff --git a/lib/Transforms/Scalar/LoopVersioningLICM.cpp b/lib/Transforms/Scalar/LoopVersioningLICM.cpp
index c0c59d2..83861b9 100644
--- a/lib/Transforms/Scalar/LoopVersioningLICM.cpp
+++ b/lib/Transforms/Scalar/LoopVersioningLICM.cpp
@@ -360,10 +360,11 @@
 bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) {
   assert(I != nullptr && "Null instruction found!");
   // Check function call safety
-  if (isa<CallInst>(I) && !AA->doesNotAccessMemory(CallSite(I))) {
-    LLVM_DEBUG(dbgs() << "    Unsafe call site found.\n");
-    return false;
-  }
+  if (auto *Call = dyn_cast<CallBase>(I))
+    if (!AA->doesNotAccessMemory(Call)) {
+      LLVM_DEBUG(dbgs() << "    Unsafe call site found.\n");
+      return false;
+    }
   // Avoid loops with possiblity of throw
   if (I->mayThrow()) {
     LLVM_DEBUG(dbgs() << "    May throw instruction found in loop body\n");
@@ -633,6 +634,8 @@
     // Set Loop Versioning metaData for version loop.
     addStringMetadataToLoop(LVer.getVersionedLoop(), LICMVersioningMetaData);
     // Set "llvm.mem.parallel_loop_access" metaData to versioned loop.
+    // FIXME: "llvm.mem.parallel_loop_access" annotates memory access
+    // instructions, not loops.
     addStringMetadataToLoop(LVer.getVersionedLoop(),
                             "llvm.mem.parallel_loop_access");
     // Update version loop with aggressive aliasing assumption.
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 8756a1a..ced923d 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -546,8 +546,8 @@
   // Memory locations of lifted instructions.
   SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
 
-  // Lifted callsites.
-  SmallVector<ImmutableCallSite, 8> CallSites;
+  // Lifted calls.
+  SmallVector<const CallBase *, 8> Calls;
 
   const MemoryLocation LoadLoc = MemoryLocation::get(LI);
 
@@ -565,10 +565,9 @@
       });
 
       if (!NeedLift)
-        NeedLift =
-            llvm::any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) {
-              return isModOrRefSet(AA.getModRefInfo(C, CS));
-            });
+        NeedLift = llvm::any_of(Calls, [C, &AA](const CallBase *Call) {
+          return isModOrRefSet(AA.getModRefInfo(C, Call));
+        });
     }
 
     if (!NeedLift)
@@ -579,12 +578,12 @@
       // none of them may modify its source.
       if (isModSet(AA.getModRefInfo(C, LoadLoc)))
         return false;
-      else if (auto CS = ImmutableCallSite(C)) {
+      else if (const auto *Call = dyn_cast<CallBase>(C)) {
         // If we can't lift this before P, it's game over.
-        if (isModOrRefSet(AA.getModRefInfo(P, CS)))
+        if (isModOrRefSet(AA.getModRefInfo(P, Call)))
           return false;
 
-        CallSites.push_back(CS);
+        Calls.push_back(Call);
       } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
         // If we can't lift this before P, it's game over.
         auto ML = MemoryLocation::get(C);
@@ -675,13 +674,11 @@
           if (UseMemMove)
             M = Builder.CreateMemMove(
                 SI->getPointerOperand(), findStoreAlignment(DL, SI),
-                LI->getPointerOperand(), findLoadAlignment(DL, LI), Size,
-                SI->isVolatile());
+                LI->getPointerOperand(), findLoadAlignment(DL, LI), Size);
           else
             M = Builder.CreateMemCpy(
                 SI->getPointerOperand(), findStoreAlignment(DL, SI),
-                LI->getPointerOperand(), findLoadAlignment(DL, LI), Size,
-                SI->isVolatile());
+                LI->getPointerOperand(), findLoadAlignment(DL, LI), Size);
 
           LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "
                             << *M << "\n");
@@ -770,8 +767,8 @@
       if (!Align)
         Align = DL.getABITypeAlignment(T);
       IRBuilder<> Builder(SI);
-      auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal,
-                                     Size, Align, SI->isVolatile());
+      auto *M =
+          Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, Align);
 
       LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
 
@@ -919,8 +916,7 @@
       continue;
     }
     if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
-      if (IT->getIntrinsicID() == Intrinsic::lifetime_start ||
-          IT->getIntrinsicID() == Intrinsic::lifetime_end)
+      if (IT->isLifetimeStartOrEnd())
         continue;
 
     if (U != C && U != cpy)
@@ -945,10 +941,10 @@
   // the use analysis, we also need to know that it does not sneakily
   // access dest.  We rely on AA to figure this out for us.
   AliasAnalysis &AA = LookupAliasAnalysis();
-  ModRefInfo MR = AA.getModRefInfo(C, cpyDest, srcSize);
+  ModRefInfo MR = AA.getModRefInfo(C, cpyDest, LocationSize::precise(srcSize));
   // If necessary, perform additional analysis.
   if (isModOrRefSet(MR))
-    MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT);
+    MR = AA.callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), &DT);
   if (isModOrRefSet(MR))
     return false;
 
@@ -996,7 +992,8 @@
   // handled here, but combineMetadata doesn't support them yet
   unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
                          LLVMContext::MD_noalias,
-                         LLVMContext::MD_invariant_group};
+                         LLVMContext::MD_invariant_group,
+                         LLVMContext::MD_access_group};
   combineMetadata(C, cpy, KnownIDs, true);
 
   // Remove the memcpy.
@@ -1059,6 +1056,8 @@
     UseMemMove = true;
 
   // If all checks passed, then we can transform M.
+  LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
+                    << *MDep << '\n' << *M << '\n');
 
   // TODO: Is this worth it if we're creating a less aligned memcpy? For
   // example we could be moving from movaps -> movq on x86.
@@ -1341,7 +1340,7 @@
   Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
   uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
   MemDepResult DepInfo = MD->getPointerDependencyFrom(
-      MemoryLocation(ByValArg, ByValSize), true,
+      MemoryLocation(ByValArg, LocationSize::precise(ByValSize)), true,
       CS.getInstruction()->getIterator(), CS.getInstruction()->getParent());
   if (!DepInfo.isClobber())
     return false;
diff --git a/lib/Transforms/Scalar/NewGVN.cpp b/lib/Transforms/Scalar/NewGVN.cpp
index 6e9ab44..7cbb0fe 100644
--- a/lib/Transforms/Scalar/NewGVN.cpp
+++ b/lib/Transforms/Scalar/NewGVN.cpp
@@ -4093,10 +4093,13 @@
           // It's about to be alive again.
           if (LeaderUseCount == 0 && isa<Instruction>(DominatingLeader))
             ProbablyDead.erase(cast<Instruction>(DominatingLeader));
-          // Copy instructions, however, are still dead because we use their
-          // operand as the leader.
-          if (LeaderUseCount == 0 && isSSACopy)
-            ProbablyDead.insert(II);
+          // For copy instructions, we use their operand as a leader,
+          // which means we remove a user of the copy and it may become dead.
+          if (isSSACopy) {
+            unsigned &IIUseCount = UseCounts[II];
+            if (--IIUseCount == 0)
+              ProbablyDead.insert(II);
+          }
           ++LeaderUseCount;
           AnythingReplaced = true;
         }
diff --git a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index cf2ce03..42d7ed5 100644
--- a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -347,7 +347,7 @@
   if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
     return containsGCPtrType(AT->getElementType());
   if (StructType *ST = dyn_cast<StructType>(Ty))
-    return llvm::any_of(ST->subtypes(), containsGCPtrType);
+    return llvm::any_of(ST->elements(), containsGCPtrType);
   return false;
 }
 
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index 11cd7cc..2f6ed05 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -1172,7 +1172,7 @@
         return;
 
       Value *CopyOf = I->getOperand(0);
-      auto *PBranch = dyn_cast<PredicateBranch>(getPredicateInfoFor(I));
+      auto *PBranch = dyn_cast<PredicateBranch>(PI);
       if (!PBranch) {
         mergeInValue(ValueState[I], I, getValueState(CopyOf));
         return;
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index a8b9ee5..eab77cf 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -913,8 +913,7 @@
     if (!IsOffsetKnown)
       return PI.setAborted(&II);
 
-    if (II.getIntrinsicID() == Intrinsic::lifetime_start ||
-        II.getIntrinsicID() == Intrinsic::lifetime_end) {
+    if (II.isLifetimeStartOrEnd()) {
       ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
       uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(),
                                Length->getLimitedValue());
@@ -1807,8 +1806,7 @@
     if (!S.isSplittable())
       return false; // Skip any unsplittable intrinsics.
   } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
-    if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
-        II->getIntrinsicID() != Intrinsic::lifetime_end)
+    if (!II->isLifetimeStartOrEnd())
       return false;
   } else if (U->get()->getType()->getPointerElementType()->isStructTy()) {
     // Disable vector promotion when there are loads or stores of an FCA.
@@ -2029,8 +2027,7 @@
     if (!S.isSplittable())
       return false; // Skip any unsplittable intrinsics.
   } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
-    if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
-        II->getIntrinsicID() != Intrinsic::lifetime_end)
+    if (!II->isLifetimeStartOrEnd())
       return false;
   } else {
     return false;
@@ -2593,7 +2590,8 @@
     }
     V = convertValue(DL, IRB, V, NewAllocaTy);
     StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
-    Store->copyMetadata(SI, LLVMContext::MD_mem_parallel_loop_access);
+    Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access,
+                             LLVMContext::MD_access_group});
     if (AATags)
       Store->setAAMetadata(AATags);
     Pass.DeadInsts.insert(&SI);
@@ -2662,7 +2660,8 @@
       NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()),
                                      SI.isVolatile());
     }
-    NewSI->copyMetadata(SI, LLVMContext::MD_mem_parallel_loop_access);
+    NewSI->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access,
+                             LLVMContext::MD_access_group});
     if (AATags)
       NewSI->setAAMetadata(AATags);
     if (SI.isVolatile())
@@ -3011,8 +3010,7 @@
   }
 
   bool visitIntrinsicInst(IntrinsicInst &II) {
-    assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
-           II.getIntrinsicID() == Intrinsic::lifetime_end);
+    assert(II.isLifetimeStartOrEnd());
     LLVM_DEBUG(dbgs() << "    original: " << II << "\n");
     assert(II.getArgOperand(1) == OldPtr);
 
@@ -3164,7 +3162,12 @@
   /// value (as opposed to the user).
   Use *U;
 
+  /// Used to calculate offsets, and hence alignment, of subobjects.
+  const DataLayout &DL;
+
 public:
+  AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {}
+
   /// Rewrite loads and stores through a pointer and all pointers derived from
   /// it.
   bool rewrite(Instruction &I) {
@@ -3208,10 +3211,22 @@
     /// split operations.
     Value *Ptr;
 
+    /// The base pointee type being GEPed into.
+    Type *BaseTy;
+
+    /// Known alignment of the base pointer.
+    unsigned BaseAlign;
+
+    /// To calculate offset of each component so we can correctly deduce
+    /// alignments.
+    const DataLayout &DL;
+
     /// Initialize the splitter with an insertion point, Ptr and start with a
     /// single zero GEP index.
-    OpSplitter(Instruction *InsertionPoint, Value *Ptr)
-        : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {}
+    OpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
+               unsigned BaseAlign, const DataLayout &DL)
+        : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr),
+          BaseTy(BaseTy), BaseAlign(BaseAlign), DL(DL) {}
 
   public:
     /// Generic recursive split emission routine.
@@ -3228,8 +3243,11 @@
     /// \param Agg The aggregate value being built up or stored, depending on
     /// whether this is splitting a load or a store respectively.
     void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
-      if (Ty->isSingleValueType())
-        return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name);
+      if (Ty->isSingleValueType()) {
+        unsigned Offset = DL.getIndexedOffsetInType(BaseTy, GEPIndices);
+        return static_cast<Derived *>(this)->emitFunc(
+            Ty, Agg, MinAlign(BaseAlign, Offset), Name);
+      }
 
       if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
         unsigned OldSize = Indices.size();
@@ -3268,17 +3286,19 @@
   struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
     AAMDNodes AATags;
 
-    LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, AAMDNodes AATags)
-        : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr), AATags(AATags) {}
+    LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
+                   AAMDNodes AATags, unsigned BaseAlign, const DataLayout &DL)
+        : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign,
+                                     DL), AATags(AATags) {}
 
     /// Emit a leaf load of a single value. This is called at the leaves of the
     /// recursive emission to actually load values.
-    void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
+    void emitFunc(Type *Ty, Value *&Agg, unsigned Align, const Twine &Name) {
       assert(Ty->isSingleValueType());
       // Load the single value and insert it using the indices.
       Value *GEP =
           IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep");
-      LoadInst *Load = IRB.CreateLoad(GEP, Name + ".load");
+      LoadInst *Load = IRB.CreateAlignedLoad(GEP, Align, Name + ".load");
       if (AATags)
         Load->setAAMetadata(AATags);
       Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
@@ -3295,7 +3315,8 @@
     LLVM_DEBUG(dbgs() << "    original: " << LI << "\n");
     AAMDNodes AATags;
     LI.getAAMetadata(AATags);
-    LoadOpSplitter Splitter(&LI, *U, AATags);
+    LoadOpSplitter Splitter(&LI, *U, LI.getType(), AATags,
+                            getAdjustedAlignment(&LI, 0, DL), DL);
     Value *V = UndefValue::get(LI.getType());
     Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
     LI.replaceAllUsesWith(V);
@@ -3304,13 +3325,15 @@
   }
 
   struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
-    StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, AAMDNodes AATags)
-        : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr), AATags(AATags) {}
+    StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
+                    AAMDNodes AATags, unsigned BaseAlign, const DataLayout &DL)
+        : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign,
+                                      DL),
+          AATags(AATags) {}
     AAMDNodes AATags;
-
     /// Emit a leaf store of a single value. This is called at the leaves of the
     /// recursive emission to actually produce stores.
-    void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
+    void emitFunc(Type *Ty, Value *&Agg, unsigned Align, const Twine &Name) {
       assert(Ty->isSingleValueType());
       // Extract the single value and store it using the indices.
       //
@@ -3320,7 +3343,8 @@
           IRB.CreateExtractValue(Agg, Indices, Name + ".extract");
       Value *InBoundsGEP =
           IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep");
-      StoreInst *Store = IRB.CreateStore(ExtractValue, InBoundsGEP);
+      StoreInst *Store =
+          IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Align);
       if (AATags)
         Store->setAAMetadata(AATags);
       LLVM_DEBUG(dbgs() << "          to: " << *Store << "\n");
@@ -3338,7 +3362,8 @@
     LLVM_DEBUG(dbgs() << "    original: " << SI << "\n");
     AAMDNodes AATags;
     SI.getAAMetadata(AATags);
-    StoreOpSplitter Splitter(&SI, *U, AATags);
+    StoreOpSplitter Splitter(&SI, *U, V->getType(), AATags,
+                             getAdjustedAlignment(&SI, 0, DL), DL);
     Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
     SI.eraseFromParent();
     return true;
@@ -3772,7 +3797,8 @@
                          PartPtrTy, BasePtr->getName() + "."),
           getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false,
           LI->getName());
-      PLoad->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access);
+      PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access,
+                                LLVMContext::MD_access_group});
 
       // Append this load onto the list of split loads so we can find it later
       // to rewrite the stores.
@@ -3828,7 +3854,8 @@
                            APInt(DL.getIndexSizeInBits(AS), PartOffset),
                            PartPtrTy, StoreBasePtr->getName() + "."),
             getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false);
-        PStore->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access);
+        PStore->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access,
+                                   LLVMContext::MD_access_group});
         LLVM_DEBUG(dbgs() << "      +" << PartOffset << ":" << *PStore << "\n");
       }
 
@@ -4356,7 +4383,7 @@
 
   // First, split any FCA loads and stores touching this alloca to promote
   // better splitting and promotion opportunities.
-  AggLoadStoreRewriter AggRewriter;
+  AggLoadStoreRewriter AggRewriter(DL);
   Changed |= AggRewriter.rewrite(AI);
 
   // Build the slices using a recursive instruction-visiting builder.
diff --git a/lib/Transforms/Scalar/Scalarizer.cpp b/lib/Transforms/Scalar/Scalarizer.cpp
index 3a6f8e6..5eb3fda 100644
--- a/lib/Transforms/Scalar/Scalarizer.cpp
+++ b/lib/Transforms/Scalar/Scalarizer.cpp
@@ -379,7 +379,8 @@
           || Tag == LLVMContext::MD_invariant_load
           || Tag == LLVMContext::MD_alias_scope
           || Tag == LLVMContext::MD_noalias
-          || Tag == ParallelLoopAccessMDKind);
+          || Tag == ParallelLoopAccessMDKind
+          || Tag == LLVMContext::MD_access_group);
 }
 
 // Transfer metadata from Op to the instructions in CV if it is known
diff --git a/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
index abec918..5a67178 100644
--- a/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
@@ -2281,7 +2281,10 @@
   if (MSSAU && VerifyMemorySSA)
     MSSAU->getMemorySSA()->verifyMemorySSA();
 
-  ++NumBranches;
+  if (BI)
+    ++NumBranches;
+  else
+    ++NumSwitches;
 }
 
 /// Recursively compute the cost of a dominator subtree based on the per-block
diff --git a/lib/Transforms/Scalar/Sink.cpp b/lib/Transforms/Scalar/Sink.cpp
index d1cdfab..c99da8f 100644
--- a/lib/Transforms/Scalar/Sink.cpp
+++ b/lib/Transforms/Scalar/Sink.cpp
@@ -76,14 +76,14 @@
       Inst->mayThrow())
     return false;
 
-  if (auto CS = CallSite(Inst)) {
+  if (auto *Call = dyn_cast<CallBase>(Inst)) {
     // Convergent operations cannot be made control-dependent on additional
     // values.
-    if (CS.hasFnAttr(Attribute::Convergent))
+    if (Call->hasFnAttr(Attribute::Convergent))
       return false;
 
     for (Instruction *S : Stores)
-      if (isModSet(AA.getModRefInfo(S, CS)))
+      if (isModSet(AA.getModRefInfo(S, Call)))
         return false;
   }
 
diff --git a/lib/Transforms/Utils/AddDiscriminators.cpp b/lib/Transforms/Utils/AddDiscriminators.cpp
index e3ef423..564537a 100644
--- a/lib/Transforms/Utils/AddDiscriminators.cpp
+++ b/lib/Transforms/Utils/AddDiscriminators.cpp
@@ -209,10 +209,18 @@
       // Only the lowest 7 bits are used to represent a discriminator to fit
       // it in 1 byte ULEB128 representation.
       unsigned Discriminator = R.second ? ++LDM[L] : LDM[L];
-      I.setDebugLoc(DIL->setBaseDiscriminator(Discriminator));
-      LLVM_DEBUG(dbgs() << DIL->getFilename() << ":" << DIL->getLine() << ":"
-                        << DIL->getColumn() << ":" << Discriminator << " " << I
-                        << "\n");
+      auto NewDIL = DIL->setBaseDiscriminator(Discriminator);
+      if (!NewDIL) {
+        LLVM_DEBUG(dbgs() << "Could not encode discriminator: "
+                          << DIL->getFilename() << ":" << DIL->getLine() << ":"
+                          << DIL->getColumn() << ":" << Discriminator << " "
+                          << I << "\n");
+      } else {
+        I.setDebugLoc(NewDIL.getValue());
+        LLVM_DEBUG(dbgs() << DIL->getFilename() << ":" << DIL->getLine() << ":"
+                   << DIL->getColumn() << ":" << Discriminator << " " << I
+                   << "\n");
+      }
       Changed = true;
     }
   }
@@ -224,23 +232,31 @@
   for (BasicBlock &B : F) {
     LocationSet CallLocations;
     for (auto &I : B.getInstList()) {
-      CallInst *Current = dyn_cast<CallInst>(&I);
       // We bypass intrinsic calls for the following two reasons:
       //  1) We want to avoid a non-deterministic assigment of
       //     discriminators.
       //  2) We want to minimize the number of base discriminators used.
-      if (!Current || isa<IntrinsicInst>(&I))
+      if (!isa<InvokeInst>(I) && (!isa<CallInst>(I) || isa<IntrinsicInst>(I)))  
         continue;
 
-      DILocation *CurrentDIL = Current->getDebugLoc();
+      DILocation *CurrentDIL = I.getDebugLoc();
       if (!CurrentDIL)
         continue;
       Location L =
           std::make_pair(CurrentDIL->getFilename(), CurrentDIL->getLine());
       if (!CallLocations.insert(L).second) {
         unsigned Discriminator = ++LDM[L];
-        Current->setDebugLoc(CurrentDIL->setBaseDiscriminator(Discriminator));
-        Changed = true;
+        auto NewDIL = CurrentDIL->setBaseDiscriminator(Discriminator);
+        if (!NewDIL) {
+          LLVM_DEBUG(dbgs()
+                     << "Could not encode discriminator: "
+                     << CurrentDIL->getFilename() << ":"
+                     << CurrentDIL->getLine() << ":" << CurrentDIL->getColumn()
+                     << ":" << Discriminator << " " << I << "\n");
+        } else {
+          I.setDebugLoc(NewDIL.getValue());
+          Changed = true;
+        }
       }
     }
   }
diff --git a/lib/Transforms/Utils/BasicBlockUtils.cpp b/lib/Transforms/Utils/BasicBlockUtils.cpp
index 11a0114..7da7682 100644
--- a/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -49,46 +49,57 @@
 using namespace llvm;
 
 void llvm::DeleteDeadBlock(BasicBlock *BB, DomTreeUpdater *DTU) {
-  assert((pred_begin(BB) == pred_end(BB) ||
-         // Can delete self loop.
-         BB->getSinglePredecessor() == BB) && "Block is not dead!");
-  Instruction *BBTerm = BB->getTerminator();
-  std::vector<DominatorTree::UpdateType> Updates;
+  SmallVector<BasicBlock *, 1> BBs = {BB};
+  DeleteDeadBlocks(BBs, DTU);
+}
 
-  // Loop through all of our successors and make sure they know that one
-  // of their predecessors is going away.
+void llvm::DeleteDeadBlocks(SmallVectorImpl <BasicBlock *> &BBs,
+                            DomTreeUpdater *DTU) {
+#ifndef NDEBUG
+  // Make sure that all predecessors of each dead block is also dead.
+  SmallPtrSet<BasicBlock *, 4> Dead(BBs.begin(), BBs.end());
+  assert(Dead.size() == BBs.size() && "Duplicating blocks?");
+  for (auto *BB : Dead)
+    for (BasicBlock *Pred : predecessors(BB))
+      assert(Dead.count(Pred) && "All predecessors must be dead!");
+#endif
+
+  SmallVector<DominatorTree::UpdateType, 4> Updates;
+  for (auto *BB : BBs) {
+    // Loop through all of our successors and make sure they know that one
+    // of their predecessors is going away.
+    for (BasicBlock *Succ : successors(BB)) {
+      Succ->removePredecessor(BB);
+      if (DTU)
+        Updates.push_back({DominatorTree::Delete, BB, Succ});
+    }
+
+    // Zap all the instructions in the block.
+    while (!BB->empty()) {
+      Instruction &I = BB->back();
+      // If this instruction is used, replace uses with an arbitrary value.
+      // Because control flow can't get here, we don't care what we replace the
+      // value with.  Note that since this block is unreachable, and all values
+      // contained within it must dominate their uses, that all uses will
+      // eventually be removed (they are themselves dead).
+      if (!I.use_empty())
+        I.replaceAllUsesWith(UndefValue::get(I.getType()));
+      BB->getInstList().pop_back();
+    }
+    new UnreachableInst(BB->getContext(), BB);
+    assert(BB->getInstList().size() == 1 &&
+           isa<UnreachableInst>(BB->getTerminator()) &&
+           "The successor list of BB isn't empty before "
+           "applying corresponding DTU updates.");
+  }
   if (DTU)
-    Updates.reserve(BBTerm->getNumSuccessors());
-  for (BasicBlock *Succ : successors(BBTerm)) {
-    Succ->removePredecessor(BB);
-    if (DTU)
-      Updates.push_back({DominatorTree::Delete, BB, Succ});
-  }
-
-  // Zap all the instructions in the block.
-  while (!BB->empty()) {
-    Instruction &I = BB->back();
-    // If this instruction is used, replace uses with an arbitrary value.
-    // Because control flow can't get here, we don't care what we replace the
-    // value with.  Note that since this block is unreachable, and all values
-    // contained within it must dominate their uses, that all uses will
-    // eventually be removed (they are themselves dead).
-    if (!I.use_empty())
-      I.replaceAllUsesWith(UndefValue::get(I.getType()));
-    BB->getInstList().pop_back();
-  }
-  new UnreachableInst(BB->getContext(), BB);
-  assert(BB->getInstList().size() == 1 &&
-         isa<UnreachableInst>(BB->getTerminator()) &&
-         "The successor list of BB isn't empty before "
-         "applying corresponding DTU updates.");
-
-  if (DTU) {
     DTU->applyUpdates(Updates, /*ForceRemoveDuplicates*/ true);
-    DTU->deleteBB(BB);
-  } else {
-    BB->eraseFromParent(); // Zap the block!
-  }
+
+  for (BasicBlock *BB : BBs)
+    if (DTU)
+      DTU->deleteBB(BB);
+    else
+      BB->eraseFromParent();
 }
 
 void llvm::FoldSingleEntryPHINodes(BasicBlock *BB,
diff --git a/lib/Transforms/Utils/CMakeLists.txt b/lib/Transforms/Utils/CMakeLists.txt
index eb3bad7..cb3dc17 100644
--- a/lib/Transforms/Utils/CMakeLists.txt
+++ b/lib/Transforms/Utils/CMakeLists.txt
@@ -6,6 +6,7 @@
   BuildLibCalls.cpp
   BypassSlowDivision.cpp
   CallPromotionUtils.cpp
+  CanonicalizeAliases.cpp
   CloneFunction.cpp
   CloneModule.cpp
   CodeExtractor.cpp
diff --git a/lib/Transforms/Utils/CanonicalizeAliases.cpp b/lib/Transforms/Utils/CanonicalizeAliases.cpp
new file mode 100644
index 0000000..cf41fd2
--- /dev/null
+++ b/lib/Transforms/Utils/CanonicalizeAliases.cpp
@@ -0,0 +1,105 @@
+//===- CanonicalizeAliases.cpp - ThinLTO Support: Canonicalize Aliases ----===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Currently this file implements partial alias canonicalization, to
+// flatten chains of aliases (also done by GlobalOpt, but not on for
+// O0 compiles). E.g.
+//  @a = alias i8, i8 *@b
+//  @b = alias i8, i8 *@g
+//
+// will be converted to:
+//  @a = alias i8, i8 *@g  <-- @a is now an alias to base object @g
+//  @b = alias i8, i8 *@g
+//
+// Eventually this file will implement full alias canonicalation, so that
+// all aliasees are private anonymous values. E.g.
+//  @a = alias i8, i8 *@g
+//  @g = global i8 0
+//
+// will be converted to:
+//  @0 = private global
+//  @a = alias i8, i8* @0
+//  @g = alias i8, i8* @0
+//
+// This simplifies optimization and ThinLTO linking of the original symbols.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Utils/CanonicalizeAliases.h"
+
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
+
+using namespace llvm;
+
+namespace {
+
+static Constant *canonicalizeAlias(Constant *C, bool &Changed) {
+  if (auto *GA = dyn_cast<GlobalAlias>(C)) {
+    auto *NewAliasee = canonicalizeAlias(GA->getAliasee(), Changed);
+    if (NewAliasee != GA->getAliasee()) {
+      GA->setAliasee(NewAliasee);
+      Changed = true;
+    }
+    return NewAliasee;
+  }
+
+  auto *CE = dyn_cast<ConstantExpr>(C);
+  if (!CE)
+    return C;
+
+  std::vector<Constant *> Ops;
+  for (Use &U : CE->operands())
+    Ops.push_back(canonicalizeAlias(cast<Constant>(U), Changed));
+  return CE->getWithOperands(Ops);
+}
+
+/// Convert aliases to canonical form.
+static bool canonicalizeAliases(Module &M) {
+  bool Changed = false;
+  for (auto &GA : M.aliases())
+    canonicalizeAlias(&GA, Changed);
+  return Changed;
+}
+
+// Legacy pass that canonicalizes aliases.
+class CanonicalizeAliasesLegacyPass : public ModulePass {
+
+public:
+  /// Pass identification, replacement for typeid
+  static char ID;
+
+  /// Specify pass name for debug output
+  StringRef getPassName() const override { return "Canonicalize Aliases"; }
+
+  explicit CanonicalizeAliasesLegacyPass() : ModulePass(ID) {}
+
+  bool runOnModule(Module &M) override { return canonicalizeAliases(M); }
+};
+char CanonicalizeAliasesLegacyPass::ID = 0;
+
+} // anonymous namespace
+
+PreservedAnalyses CanonicalizeAliasesPass::run(Module &M,
+                                               ModuleAnalysisManager &AM) {
+  if (!canonicalizeAliases(M))
+    return PreservedAnalyses::all();
+
+  return PreservedAnalyses::none();
+}
+
+INITIALIZE_PASS_BEGIN(CanonicalizeAliasesLegacyPass, "canonicalize-aliases",
+                      "Canonicalize aliases", false, false)
+INITIALIZE_PASS_END(CanonicalizeAliasesLegacyPass, "canonicalize-aliases",
+                    "Canonicalize aliases", false, false)
+
+namespace llvm {
+ModulePass *createCanonicalizeAliasesPass() {
+  return new CanonicalizeAliasesLegacyPass();
+}
+} // namespace llvm
diff --git a/lib/Transforms/Utils/CodeExtractor.cpp b/lib/Transforms/Utils/CodeExtractor.cpp
index a6b0110..25d4ae5 100644
--- a/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/lib/Transforms/Utils/CodeExtractor.cpp
@@ -332,8 +332,7 @@
       default: {
         IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(&II);
         if (IntrInst) {
-          if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start ||
-              IntrInst->getIntrinsicID() == Intrinsic::lifetime_end)
+          if (IntrInst->isLifetimeStartOrEnd())
             break;
           return false;
         }
@@ -884,9 +883,10 @@
 /// emitCallAndSwitchStatement - This method sets up the caller side by adding
 /// the call instruction, splitting any PHI nodes in the header block as
 /// necessary.
-void CodeExtractor::
-emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
-                           ValueSet &inputs, ValueSet &outputs) {
+CallInst *CodeExtractor::emitCallAndSwitchStatement(Function *newFunction,
+                                                    BasicBlock *codeReplacer,
+                                                    ValueSet &inputs,
+                                                    ValueSet &outputs) {
   // Emit a call to the new function, passing in: *pointer to struct (if
   // aggregating parameters), or plan inputs and allocated memory for outputs
   std::vector<Value *> params, StructValues, ReloadOutputs, Reloads;
@@ -894,6 +894,7 @@
   Module *M = newFunction->getParent();
   LLVMContext &Context = M->getContext();
   const DataLayout &DL = M->getDataLayout();
+  CallInst *call = nullptr;
 
   // Add inputs as params, or to be filled into the struct
   for (Value *input : inputs)
@@ -944,8 +945,8 @@
   }
 
   // Emit the call to the function
-  CallInst *call = CallInst::Create(newFunction, params,
-                                    NumExitBlocks > 1 ? "targetBlock" : "");
+  call = CallInst::Create(newFunction, params,
+                          NumExitBlocks > 1 ? "targetBlock" : "");
   // Add debug location to the new call, if the original function has debug
   // info. In that case, the terminator of the entry block of the extracted
   // function contains the first debug location of the extracted function,
@@ -1117,6 +1118,8 @@
     TheSwitch->removeCase(SwitchInst::CaseIt(TheSwitch, NumExitBlocks-1));
     break;
   }
+
+  return call;
 }
 
 void CodeExtractor::moveCodeToFunction(Function *newFunction) {
@@ -1178,6 +1181,71 @@
       MDBuilder(TI->getContext()).createBranchWeights(BranchWeights));
 }
 
+/// Scan the extraction region for lifetime markers which reference inputs.
+/// Erase these markers. Return the inputs which were referenced.
+///
+/// The extraction region is defined by a set of blocks (\p Blocks), and a set
+/// of allocas which will be moved from the caller function into the extracted
+/// function (\p SunkAllocas).
+static SetVector<Value *>
+eraseLifetimeMarkersOnInputs(const SetVector<BasicBlock *> &Blocks,
+                             const SetVector<Value *> &SunkAllocas) {
+  SetVector<Value *> InputObjectsWithLifetime;
+  for (BasicBlock *BB : Blocks) {
+    for (auto It = BB->begin(), End = BB->end(); It != End;) {
+      auto *II = dyn_cast<IntrinsicInst>(&*It);
+      ++It;
+      if (!II || !II->isLifetimeStartOrEnd())
+        continue;
+
+      // Get the memory operand of the lifetime marker. If the underlying
+      // object is a sunk alloca, or is otherwise defined in the extraction
+      // region, the lifetime marker must not be erased.
+      Value *Mem = II->getOperand(1)->stripInBoundsOffsets();
+      if (SunkAllocas.count(Mem) || definedInRegion(Blocks, Mem))
+        continue;
+
+      InputObjectsWithLifetime.insert(Mem);
+      II->eraseFromParent();
+    }
+  }
+  return InputObjectsWithLifetime;
+}
+
+/// Insert lifetime start/end markers surrounding the call to the new function
+/// for objects defined in the caller.
+static void insertLifetimeMarkersSurroundingCall(
+    Module *M, const SetVector<Value *> &InputObjectsWithLifetime,
+    CallInst *TheCall) {
+  if (InputObjectsWithLifetime.empty())
+    return;
+
+  LLVMContext &Ctx = M->getContext();
+  auto Int8PtrTy = Type::getInt8PtrTy(Ctx);
+  auto NegativeOne = ConstantInt::getSigned(Type::getInt64Ty(Ctx), -1);
+  auto LifetimeStartFn = llvm::Intrinsic::getDeclaration(
+      M, llvm::Intrinsic::lifetime_start, Int8PtrTy);
+  auto LifetimeEndFn = llvm::Intrinsic::getDeclaration(
+      M, llvm::Intrinsic::lifetime_end, Int8PtrTy);
+  for (Value *Mem : InputObjectsWithLifetime) {
+    assert((!isa<Instruction>(Mem) ||
+            cast<Instruction>(Mem)->getFunction() == TheCall->getFunction()) &&
+           "Input memory not defined in original function");
+    Value *MemAsI8Ptr = nullptr;
+    if (Mem->getType() == Int8PtrTy)
+      MemAsI8Ptr = Mem;
+    else
+      MemAsI8Ptr =
+          CastInst::CreatePointerCast(Mem, Int8PtrTy, "lt.cast", TheCall);
+
+    auto StartMarker =
+        CallInst::Create(LifetimeStartFn, {NegativeOne, MemAsI8Ptr});
+    StartMarker->insertBefore(TheCall);
+    auto EndMarker = CallInst::Create(LifetimeEndFn, {NegativeOne, MemAsI8Ptr});
+    EndMarker->insertAfter(TheCall);
+  }
+}
+
 Function *CodeExtractor::extractCodeRegion() {
   if (!isEligible())
     return nullptr;
@@ -1292,11 +1360,17 @@
       cast<Instruction>(II)->moveBefore(TI);
   }
 
+  // Collect objects which are inputs to the extraction region and also
+  // referenced by lifetime start/end markers within it. The effects of these
+  // markers must be replicated in the calling function to prevent the stack
+  // coloring pass from merging slots which store input objects.
+  ValueSet InputObjectsWithLifetime =
+      eraseLifetimeMarkersOnInputs(Blocks, SinkingCands);
+
   // Construct new function based on inputs/outputs & add allocas for all defs.
-  Function *newFunction = constructFunction(inputs, outputs, header,
-                                            newFuncRoot,
-                                            codeReplacer, oldFunction,
-                                            oldFunction->getParent());
+  Function *newFunction =
+      constructFunction(inputs, outputs, header, newFuncRoot, codeReplacer,
+                        oldFunction, oldFunction->getParent());
 
   // Update the entry count of the function.
   if (BFI) {
@@ -1307,10 +1381,16 @@
     BFI->setBlockFreq(codeReplacer, EntryFreq.getFrequency());
   }
 
-  emitCallAndSwitchStatement(newFunction, codeReplacer, inputs, outputs);
+  CallInst *TheCall =
+      emitCallAndSwitchStatement(newFunction, codeReplacer, inputs, outputs);
 
   moveCodeToFunction(newFunction);
 
+  // Replicate the effects of any lifetime start/end markers which referenced
+  // input objects in the extraction region by placing markers around the call.
+  insertLifetimeMarkersSurroundingCall(oldFunction->getParent(),
+                                       InputObjectsWithLifetime, TheCall);
+
   // Propagate personality info to the new function if there is one.
   if (oldFunction->hasPersonalityFn())
     newFunction->setPersonalityFn(oldFunction->getPersonalityFn());
diff --git a/lib/Transforms/Utils/Evaluator.cpp b/lib/Transforms/Utils/Evaluator.cpp
index 992c8b9..e875cd6 100644
--- a/lib/Transforms/Utils/Evaluator.cpp
+++ b/lib/Transforms/Utils/Evaluator.cpp
@@ -483,8 +483,7 @@
           }
         }
 
-        if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
-            II->getIntrinsicID() == Intrinsic::lifetime_end) {
+        if (II->isLifetimeStartOrEnd()) {
           LLVM_DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n");
           ++CurInst;
           continue;
diff --git a/lib/Transforms/Utils/InlineFunction.cpp b/lib/Transforms/Utils/InlineFunction.cpp
index bda2ee2..623fe91 100644
--- a/lib/Transforms/Utils/InlineFunction.cpp
+++ b/lib/Transforms/Utils/InlineFunction.cpp
@@ -31,6 +31,7 @@
 #include "llvm/Analysis/ProfileSummaryInfo.h"
 #include "llvm/Transforms/Utils/Local.h"
 #include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/VectorUtils.h"
 #include "llvm/IR/Argument.h"
 #include "llvm/IR/BasicBlock.h"
 #include "llvm/IR/CFG.h"
@@ -770,14 +771,16 @@
   UnwindDest->removePredecessor(InvokeBB);
 }
 
-/// When inlining a call site that has !llvm.mem.parallel_loop_access metadata,
-/// that metadata should be propagated to all memory-accessing cloned
-/// instructions.
+/// When inlining a call site that has !llvm.mem.parallel_loop_access or
+/// llvm.access.group metadata, that metadata should be propagated to all
+/// memory-accessing cloned instructions.
 static void PropagateParallelLoopAccessMetadata(CallSite CS,
                                                 ValueToValueMapTy &VMap) {
   MDNode *M =
     CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
-  if (!M)
+  MDNode *CallAccessGroup =
+      CS.getInstruction()->getMetadata(LLVMContext::MD_access_group);
+  if (!M && !CallAccessGroup)
     return;
 
   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
@@ -789,11 +792,20 @@
     if (!NI)
       continue;
 
-    if (MDNode *PM = NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
+    if (M) {
+      if (MDNode *PM =
+              NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
         M = MDNode::concatenate(PM, M);
       NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
-    } else if (NI->mayReadOrWriteMemory()) {
-      NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
+      } else if (NI->mayReadOrWriteMemory()) {
+        NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
+      }
+    }
+
+    if (NI->mayReadOrWriteMemory()) {
+      MDNode *UnitedAccGroups = uniteAccessGroups(
+          NI->getMetadata(LLVMContext::MD_access_group), CallAccessGroup);
+      NI->setMetadata(LLVMContext::MD_access_group, UnitedAccGroups);
     }
   }
 }
@@ -987,22 +999,22 @@
         PtrArgs.push_back(CXI->getPointerOperand());
       else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
         PtrArgs.push_back(RMWI->getPointerOperand());
-      else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
+      else if (const auto *Call = dyn_cast<CallBase>(I)) {
         // If we know that the call does not access memory, then we'll still
         // know that about the inlined clone of this call site, and we don't
         // need to add metadata.
-        if (ICS.doesNotAccessMemory())
+        if (Call->doesNotAccessMemory())
           continue;
 
         IsFuncCall = true;
         if (CalleeAAR) {
-          FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS);
+          FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
           if (MRB == FMRB_OnlyAccessesArgumentPointees ||
               MRB == FMRB_OnlyReadsArgumentPointees)
             IsArgMemOnlyCall = true;
         }
 
-        for (Value *Arg : ICS.args()) {
+        for (Value *Arg : Call->args()) {
           // We need to check the underlying objects of all arguments, not just
           // the pointer arguments, because we might be passing pointers as
           // integers, etc.
@@ -1308,16 +1320,10 @@
 
 // Check whether this Value is used by a lifetime intrinsic.
 static bool isUsedByLifetimeMarker(Value *V) {
-  for (User *U : V->users()) {
-    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
-      switch (II->getIntrinsicID()) {
-      default: break;
-      case Intrinsic::lifetime_start:
-      case Intrinsic::lifetime_end:
+  for (User *U : V->users())
+    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
+      if (II->isLifetimeStartOrEnd())
         return true;
-      }
-    }
-  }
   return false;
 }
 
diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp
index fa0151d..499e611 100644
--- a/lib/Transforms/Utils/Local.cpp
+++ b/lib/Transforms/Utils/Local.cpp
@@ -34,6 +34,7 @@
 #include "llvm/Analysis/MemorySSAUpdater.h"
 #include "llvm/Analysis/TargetLibraryInfo.h"
 #include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/VectorUtils.h"
 #include "llvm/BinaryFormat/Dwarf.h"
 #include "llvm/IR/Argument.h"
 #include "llvm/IR/Attributes.h"
@@ -392,8 +393,7 @@
       return true;
 
     // Lifetime intrinsics are dead when their right-hand is undef.
-    if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
-        II->getIntrinsicID() == Intrinsic::lifetime_end)
+    if (II->isLifetimeStartOrEnd())
       return isa<UndefValue>(II->getArgOperand(1));
 
     // Assumptions are dead if their condition is trivially true.  Guards on
@@ -1297,33 +1297,6 @@
     return;
   }
 
-  // If an argument is zero extended then use argument directly. The ZExt
-  // may be zapped by an optimization pass in future.
-  Argument *ExtendedArg = nullptr;
-  if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
-    ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
-  if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
-    ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0));
-  if (ExtendedArg) {
-    // If this DII was already describing only a fragment of a variable, ensure
-    // that fragment is appropriately narrowed here.
-    // But if a fragment wasn't used, describe the value as the original
-    // argument (rather than the zext or sext) so that it remains described even
-    // if the sext/zext is optimized away. This widens the variable description,
-    // leaving it up to the consumer to know how the smaller value may be
-    // represented in a larger register.
-    if (auto Fragment = DIExpr->getFragmentInfo()) {
-      unsigned FragmentOffset = Fragment->OffsetInBits;
-      SmallVector<uint64_t, 3> Ops(DIExpr->elements_begin(),
-                                   DIExpr->elements_end() - 3);
-      Ops.push_back(dwarf::DW_OP_LLVM_fragment);
-      Ops.push_back(FragmentOffset);
-      const DataLayout &DL = DII->getModule()->getDataLayout();
-      Ops.push_back(DL.getTypeSizeInBits(ExtendedArg->getType()));
-      DIExpr = Builder.createExpression(Ops);
-    }
-    DV = ExtendedArg;
-  }
   if (!LdStHasDebugValue(DIVar, DIExpr, SI))
     Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, DII->getDebugLoc(),
                                     SI);
@@ -2324,6 +2297,10 @@
       case LLVMContext::MD_mem_parallel_loop_access:
         K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
         break;
+      case LLVMContext::MD_access_group:
+        K->setMetadata(LLVMContext::MD_access_group,
+                       intersectAccessGroups(K, J));
+        break;
       case LLVMContext::MD_range:
 
         // If K does move, use most generic range. Otherwise keep the range of
@@ -2380,7 +2357,8 @@
       LLVMContext::MD_invariant_load,  LLVMContext::MD_nonnull,
       LLVMContext::MD_invariant_group, LLVMContext::MD_align,
       LLVMContext::MD_dereferenceable,
-      LLVMContext::MD_dereferenceable_or_null};
+      LLVMContext::MD_dereferenceable_or_null,
+      LLVMContext::MD_access_group};
   combineMetadata(K, J, KnownIDs, KDominatesJ);
 }
 
@@ -2411,7 +2389,8 @@
       LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
       LLVMContext::MD_noalias,         LLVMContext::MD_range,
       LLVMContext::MD_fpmath,          LLVMContext::MD_invariant_load,
-      LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull};
+      LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull,
+      LLVMContext::MD_access_group};
   combineMetadata(ReplInst, I, KnownIDs, false);
 }
 
diff --git a/lib/Transforms/Utils/LoopUnroll.cpp b/lib/Transforms/Utils/LoopUnroll.cpp
index efd8b92..da7ed2b 100644
--- a/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/lib/Transforms/Utils/LoopUnroll.cpp
@@ -54,10 +54,10 @@
 static cl::opt<bool>
 UnrollVerifyDomtree("unroll-verify-domtree", cl::Hidden,
                     cl::desc("Verify domtree after unrolling"),
-#ifdef NDEBUG
-    cl::init(false)
-#else
+#ifdef EXPENSIVE_CHECKS
     cl::init(true)
+#else
+    cl::init(false)
 #endif
                     );
 
@@ -598,8 +598,15 @@
     for (BasicBlock *BB : L->getBlocks())
       for (Instruction &I : *BB)
         if (!isa<DbgInfoIntrinsic>(&I))
-          if (const DILocation *DIL = I.getDebugLoc())
-            I.setDebugLoc(DIL->cloneWithDuplicationFactor(Count));
+          if (const DILocation *DIL = I.getDebugLoc()) {
+            auto NewDIL = DIL->cloneWithDuplicationFactor(Count);
+            if (NewDIL)
+              I.setDebugLoc(NewDIL.getValue());
+            else
+              LLVM_DEBUG(dbgs()
+                         << "Failed to create new discriminator: "
+                         << DIL->getFilename() << " Line: " << DIL->getLine());
+          }
 
   for (unsigned It = 1; It != Count; ++It) {
     std::vector<BasicBlock*> NewBlocks;
diff --git a/lib/Transforms/Utils/LoopUnrollAndJam.cpp b/lib/Transforms/Utils/LoopUnrollAndJam.cpp
index b5d80f6..e267626 100644
--- a/lib/Transforms/Utils/LoopUnrollAndJam.cpp
+++ b/lib/Transforms/Utils/LoopUnrollAndJam.cpp
@@ -300,8 +300,15 @@
     for (BasicBlock *BB : L->getBlocks())
       for (Instruction &I : *BB)
         if (!isa<DbgInfoIntrinsic>(&I))
-          if (const DILocation *DIL = I.getDebugLoc())
-            I.setDebugLoc(DIL->cloneWithDuplicationFactor(Count));
+          if (const DILocation *DIL = I.getDebugLoc()) {
+            auto NewDIL = DIL->cloneWithDuplicationFactor(Count);
+            if (NewDIL)
+              I.setDebugLoc(NewDIL.getValue());
+            else
+              LLVM_DEBUG(dbgs()
+                         << "Failed to create new discriminator: "
+                         << DIL->getFilename() << " Line: " << DIL->getLine());
+          }
 
   // Copy all blocks
   for (unsigned It = 1; It != Count; ++It) {
diff --git a/lib/Transforms/Utils/LoopUnrollPeel.cpp b/lib/Transforms/Utils/LoopUnrollPeel.cpp
index 86ac1a7..151a285 100644
--- a/lib/Transforms/Utils/LoopUnrollPeel.cpp
+++ b/lib/Transforms/Utils/LoopUnrollPeel.cpp
@@ -615,7 +615,9 @@
       // the original loop body.
       if (Iter == 0)
         DT->changeImmediateDominator(Exit, cast<BasicBlock>(LVMap[Latch]));
+#ifdef EXPENSIVE_CHECKS
       assert(DT->verify(DominatorTree::VerificationLevel::Fast));
+#endif
     }
 
     auto *LatchBRCopy = cast<BranchInst>(VMap[LatchBR]);
diff --git a/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/lib/Transforms/Utils/LoopUnrollRuntime.cpp
index 3606ec4..00d2fd2 100644
--- a/lib/Transforms/Utils/LoopUnrollRuntime.cpp
+++ b/lib/Transforms/Utils/LoopUnrollRuntime.cpp
@@ -70,6 +70,17 @@
                           BasicBlock *PreHeader, BasicBlock *NewPreHeader,
                           ValueToValueMapTy &VMap, DominatorTree *DT,
                           LoopInfo *LI, bool PreserveLCSSA) {
+  // Loop structure should be the following:
+  // Preheader
+  //  PrologHeader
+  //  ...
+  //  PrologLatch
+  //  PrologExit
+  //   NewPreheader
+  //    Header
+  //    ...
+  //    Latch
+  //      LatchExit
   BasicBlock *Latch = L->getLoopLatch();
   assert(Latch && "Loop must have a latch");
   BasicBlock *PrologLatch = cast<BasicBlock>(VMap[Latch]);
@@ -83,14 +94,21 @@
     for (PHINode &PN : Succ->phis()) {
       // Add a new PHI node to the prolog end block and add the
       // appropriate incoming values.
+      // TODO: This code assumes that the PrologExit (or the LatchExit block for
+      // prolog loop) contains only one predecessor from the loop, i.e. the
+      // PrologLatch. When supporting multiple-exiting block loops, we can have
+      // two or more blocks that have the LatchExit as the target in the
+      // original loop.
       PHINode *NewPN = PHINode::Create(PN.getType(), 2, PN.getName() + ".unr",
                                        PrologExit->getFirstNonPHI());
       // Adding a value to the new PHI node from the original loop preheader.
       // This is the value that skips all the prolog code.
       if (L->contains(&PN)) {
+        // Succ is loop header.
         NewPN->addIncoming(PN.getIncomingValueForBlock(NewPreHeader),
                            PreHeader);
       } else {
+        // Succ is LatchExit.
         NewPN->addIncoming(UndefValue::get(PN.getType()), PreHeader);
       }
 
@@ -787,10 +805,7 @@
   // Now the loop blocks are cloned and the other exiting blocks from the
   // remainder are connected to the original Loop's exit blocks. The remaining
   // work is to update the phi nodes in the original loop, and take in the
-  // values from the cloned region. Also update the dominator info for
-  // OtherExits and their immediate successors, since we have new edges into
-  // OtherExits.
-  SmallPtrSet<BasicBlock*, 8> ImmediateSuccessorsOfExitBlocks;
+  // values from the cloned region.
   for (auto *BB : OtherExits) {
    for (auto &II : *BB) {
 
@@ -825,27 +840,30 @@
              "Breaks the definition of dedicated exits!");
     }
 #endif
-   // Update the dominator info because the immediate dominator is no longer the
-   // header of the original Loop. BB has edges both from L and remainder code.
-   // Since the preheader determines which loop is run (L or directly jump to
-   // the remainder code), we set the immediate dominator as the preheader.
-   if (DT) {
-     DT->changeImmediateDominator(BB, PreHeader);
-     // Also update the IDom for immediate successors of BB.  If the current
-     // IDom is the header, update the IDom to be the preheader because that is
-     // the nearest common dominator of all predecessors of SuccBB.  We need to
-     // check for IDom being the header because successors of exit blocks can
-     // have edges from outside the loop, and we should not incorrectly update
-     // the IDom in that case.
-     for (BasicBlock *SuccBB: successors(BB))
-       if (ImmediateSuccessorsOfExitBlocks.insert(SuccBB).second) {
-         if (DT->getNode(SuccBB)->getIDom()->getBlock() == Header) {
-           assert(!SuccBB->getSinglePredecessor() &&
-                  "BB should be the IDom then!");
-           DT->changeImmediateDominator(SuccBB, PreHeader);
-         }
-       }
+  }
+
+  // Update the immediate dominator of the exit blocks and blocks that are
+  // reachable from the exit blocks. This is needed because we now have paths
+  // from both the original loop and the remainder code reaching the exit
+  // blocks. While the IDom of these exit blocks were from the original loop,
+  // now the IDom is the preheader (which decides whether the original loop or
+  // remainder code should run).
+  if (DT && !L->getExitingBlock()) {
+    SmallVector<BasicBlock *, 16> ChildrenToUpdate;
+    // NB! We have to examine the dom children of all loop blocks, not just
+    // those which are the IDom of the exit blocks. This is because blocks
+    // reachable from the exit blocks can have their IDom as the nearest common
+    // dominator of the exit blocks.
+    for (auto *BB : L->blocks()) {
+      auto *DomNodeBB = DT->getNode(BB);
+      for (auto *DomChild : DomNodeBB->getChildren()) {
+        auto *DomChildBB = DomChild->getBlock();
+        if (!L->contains(LI->getLoopFor(DomChildBB)))
+          ChildrenToUpdate.push_back(DomChildBB);
+      }
     }
+    for (auto *BB : ChildrenToUpdate)
+      DT->changeImmediateDominator(BB, PreHeader);
   }
 
   // Loop structure should be the following:
@@ -909,6 +927,12 @@
   // of its parent loops, so the Scalar Evolution pass needs to be run again.
   SE->forgetTopmostLoop(L);
 
+  // Verify that the Dom Tree is correct.
+#if defined(EXPENSIVE_CHECKS) && !defined(NDEBUG)
+  if (DT)
+    assert(DT->verify(DominatorTree::VerificationLevel::Full));
+#endif
+
   // Canonicalize to LoopSimplifyForm both original and remainder loops. We
   // cannot rely on the LoopUnrollPass to do this because it only does
   // canonicalization for parent/subloops and not the sibling loops.
diff --git a/lib/Transforms/Utils/LoopUtils.cpp b/lib/Transforms/Utils/LoopUtils.cpp
index 1d4f07f..a93d1ae 100644
--- a/lib/Transforms/Utils/LoopUtils.cpp
+++ b/lib/Transforms/Utils/LoopUtils.cpp
@@ -187,44 +187,14 @@
   INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
 }
 
-static Optional<MDNode *> findOptionMDForLoopID(MDNode *LoopID,
-                                                StringRef Name) {
-  // Return none if LoopID is false.
-  if (!LoopID)
-    return None;
-
-  // First operand should refer to the loop id itself.
-  assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
-  assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
-
-  // Iterate over LoopID operands and look for MDString Metadata
-  for (unsigned i = 1, e = LoopID->getNumOperands(); i < e; ++i) {
-    MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
-    if (!MD)
-      continue;
-    MDString *S = dyn_cast<MDString>(MD->getOperand(0));
-    if (!S)
-      continue;
-    // Return true if MDString holds expected MetaData.
-    if (Name.equals(S->getString()))
-      return MD;
-  }
-  return None;
-}
-
-static Optional<MDNode *> findOptionMDForLoop(const Loop *TheLoop,
-                                              StringRef Name) {
-  return findOptionMDForLoopID(TheLoop->getLoopID(), Name);
-}
-
 /// Find string metadata for loop
 ///
 /// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
 /// operand or null otherwise.  If the string metadata is not found return
 /// Optional's not-a-value.
-Optional<const MDOperand *> llvm::findStringMetadataForLoop(Loop *TheLoop,
+Optional<const MDOperand *> llvm::findStringMetadataForLoop(const Loop *TheLoop,
                                                             StringRef Name) {
-  auto MD = findOptionMDForLoop(TheLoop, Name).getValueOr(nullptr);
+  MDNode *MD = findOptionMDForLoop(TheLoop, Name);
   if (!MD)
     return None;
   switch (MD->getNumOperands()) {
@@ -239,19 +209,15 @@
 
 static Optional<bool> getOptionalBoolLoopAttribute(const Loop *TheLoop,
                                                    StringRef Name) {
-  Optional<MDNode *> MD = findOptionMDForLoop(TheLoop, Name);
-  if (!MD.hasValue())
+  MDNode *MD = findOptionMDForLoop(TheLoop, Name);
+  if (!MD)
     return None;
-  MDNode *OptionNode = MD.getValue();
-  if (OptionNode == nullptr)
-    return None;
-  switch (OptionNode->getNumOperands()) {
+  switch (MD->getNumOperands()) {
   case 1:
     // When the value is absent it is interpreted as 'attribute set'.
     return true;
   case 2:
-    return mdconst::extract_or_null<ConstantInt>(
-        OptionNode->getOperand(1).get());
+    return mdconst::extract_or_null<ConstantInt>(MD->getOperand(1).get());
   }
   llvm_unreachable("unexpected number of options");
 }
@@ -325,8 +291,7 @@
 
   bool HasAnyFollowup = false;
   for (StringRef OptionName : FollowupOptions) {
-    MDNode *FollowupNode =
-        findOptionMDForLoopID(OrigLoopID, OptionName).getValueOr(nullptr);
+    MDNode *FollowupNode = findOptionMDForLoopID(OrigLoopID, OptionName);
     if (!FollowupNode)
       continue;
 
@@ -963,3 +928,39 @@
       VecOp->andIRFlags(V);
   }
 }
+
+bool llvm::isKnownNegativeInLoop(const SCEV *S, const Loop *L,
+                                 ScalarEvolution &SE) {
+  const SCEV *Zero = SE.getZero(S->getType());
+  return SE.isAvailableAtLoopEntry(S, L) &&
+         SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, S, Zero);
+}
+
+bool llvm::isKnownNonNegativeInLoop(const SCEV *S, const Loop *L,
+                                    ScalarEvolution &SE) {
+  const SCEV *Zero = SE.getZero(S->getType());
+  return SE.isAvailableAtLoopEntry(S, L) &&
+         SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGE, S, Zero);
+}
+
+bool llvm::cannotBeMinInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
+                             bool Signed) {
+  unsigned BitWidth = cast<IntegerType>(S->getType())->getBitWidth();
+  APInt Min = Signed ? APInt::getSignedMinValue(BitWidth) :
+    APInt::getMinValue(BitWidth);
+  auto Predicate = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
+  return SE.isAvailableAtLoopEntry(S, L) &&
+         SE.isLoopEntryGuardedByCond(L, Predicate, S,
+                                     SE.getConstant(Min));
+}
+
+bool llvm::cannotBeMaxInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
+                             bool Signed) {
+  unsigned BitWidth = cast<IntegerType>(S->getType())->getBitWidth();
+  APInt Max = Signed ? APInt::getSignedMaxValue(BitWidth) :
+    APInt::getMaxValue(BitWidth);
+  auto Predicate = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
+  return SE.isAvailableAtLoopEntry(S, L) &&
+         SE.isLoopEntryGuardedByCond(L, Predicate, S,
+                                     SE.getConstant(Max));
+}
diff --git a/lib/Transforms/Utils/ModuleUtils.cpp b/lib/Transforms/Utils/ModuleUtils.cpp
index ba4b7f3..ae5e72e 100644
--- a/lib/Transforms/Utils/ModuleUtils.cpp
+++ b/lib/Transforms/Utils/ModuleUtils.cpp
@@ -174,6 +174,49 @@
   return std::make_pair(Ctor, InitFunction);
 }
 
+std::pair<Function *, Function *>
+llvm::getOrCreateSanitizerCtorAndInitFunctions(
+    Module &M, StringRef CtorName, StringRef InitName,
+    ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs,
+    function_ref<void(Function *, Function *)> FunctionsCreatedCallback,
+    StringRef VersionCheckName) {
+  assert(!CtorName.empty() && "Expected ctor function name");
+
+  if (Function *Ctor = M.getFunction(CtorName))
+    // FIXME: Sink this logic into the module, similar to the handling of
+    // globals. This will make moving to a concurrent model much easier.
+    if (Ctor->arg_size() == 0 ||
+        Ctor->getReturnType() == Type::getVoidTy(M.getContext()))
+      return {Ctor, declareSanitizerInitFunction(M, InitName, InitArgTypes)};
+
+  Function *Ctor, *InitFunction;
+  std::tie(Ctor, InitFunction) = llvm::createSanitizerCtorAndInitFunctions(
+      M, CtorName, InitName, InitArgTypes, InitArgs, VersionCheckName);
+  FunctionsCreatedCallback(Ctor, InitFunction);
+  return std::make_pair(Ctor, InitFunction);
+}
+
+Function *llvm::getOrCreateInitFunction(Module &M, StringRef Name) {
+  assert(!Name.empty() && "Expected init function name");
+  if (Function *F = M.getFunction(Name)) {
+    if (F->arg_size() != 0 ||
+        F->getReturnType() != Type::getVoidTy(M.getContext())) {
+      std::string Err;
+      raw_string_ostream Stream(Err);
+      Stream << "Sanitizer interface function defined with wrong type: " << *F;
+      report_fatal_error(Err);
+    }
+    return F;
+  }
+  Function *F = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+      Name, AttributeList(), Type::getVoidTy(M.getContext())));
+  F->setLinkage(Function::ExternalLinkage);
+
+  appendToGlobalCtors(M, F, 0);
+
+  return F;
+}
+
 void llvm::filterDeadComdatFunctions(
     Module &M, SmallVectorImpl<Function *> &DeadComdatFunctions) {
   // Build a map from the comdat to the number of entries in that comdat we
diff --git a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index a53083c..91e4f42 100644
--- a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -82,8 +82,7 @@
       if (SI->isVolatile())
         return false;
     } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
-      if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
-          II->getIntrinsicID() != Intrinsic::lifetime_end)
+      if (!II->isLifetimeStartOrEnd())
         return false;
     } else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
       if (BCI->getType() != Type::getInt8PtrTy(U->getContext(), AS))
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index b98f2ff..03b7395 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -1321,7 +1321,8 @@
                              LLVMContext::MD_align,
                              LLVMContext::MD_dereferenceable,
                              LLVMContext::MD_dereferenceable_or_null,
-                             LLVMContext::MD_mem_parallel_loop_access};
+                             LLVMContext::MD_mem_parallel_loop_access,
+                             LLVMContext::MD_access_group};
       combineMetadata(I1, I2, KnownIDs, true);
 
       // I1 and I2 are being combined into a single instruction.  Its debug
@@ -1372,14 +1373,6 @@
     }
   }
 
-  // As the parent basic block terminator is a branch instruction which is
-  // removed at the end of the current transformation, use its previous
-  // non-debug instruction, as the reference insertion point, which will
-  // provide the debug location for generated select instructions. For BBs
-  // with only debug instructions, use an empty debug location.
-  Instruction *InsertPt =
-      BIParent->getTerminator()->getPrevNonDebugInstruction();
-
   // Okay, it is safe to hoist the terminator.
   Instruction *NT = I1->clone();
   BIParent->getInstList().insert(BI->getIterator(), NT);
@@ -1393,11 +1386,8 @@
   // it involves inlinable calls.
   NT->applyMergedLocation(I1->getDebugLoc(), I2->getDebugLoc());
 
+  // PHIs created below will adopt NT's merged DebugLoc.
   IRBuilder<NoFolder> Builder(NT);
-  // If an earlier instruction in this BB had a location, adopt it, otherwise
-  // clear debug locations.
-  Builder.SetCurrentDebugLocation(InsertPt ? InsertPt->getDebugLoc()
-                                           : DebugLoc());
 
   // Hoisting one of the terminators from our successor is a great thing.
   // Unfortunately, the successors of the if/else blocks may have PHI nodes in
diff --git a/lib/Transforms/Utils/SimplifyLibCalls.cpp b/lib/Transforms/Utils/SimplifyLibCalls.cpp
index a50575b..1bb26ca 100644
--- a/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -798,8 +798,11 @@
       Bitfield.setBit((unsigned char)C);
     Value *BitfieldC = B.getInt(Bitfield);
 
-    // First check that the bit field access is within bounds.
+    // Adjust width of "C" to the bitfield width, then mask off the high bits.
     Value *C = B.CreateZExtOrTrunc(CI->getArgOperand(1), BitfieldC->getType());
+    C = B.CreateAnd(C, B.getIntN(Width, 0xFF));
+
+    // First check that the bit field access is within bounds.
     Value *Bounds = B.CreateICmp(ICmpInst::ICMP_ULT, C, B.getIntN(Width, Width),
                                  "memchr.bounds");
 
diff --git a/lib/Transforms/Utils/Utils.cpp b/lib/Transforms/Utils/Utils.cpp
index afd842f..95416de 100644
--- a/lib/Transforms/Utils/Utils.cpp
+++ b/lib/Transforms/Utils/Utils.cpp
@@ -26,6 +26,7 @@
 void llvm::initializeTransformUtils(PassRegistry &Registry) {
   initializeAddDiscriminatorsLegacyPassPass(Registry);
   initializeBreakCriticalEdgesPass(Registry);
+  initializeCanonicalizeAliasesLegacyPassPass(Registry);
   initializeInstNamerPass(Registry);
   initializeLCSSAWrapperPassPass(Registry);
   initializeLibCallsShrinkWrapLegacyPassPass(Registry);
diff --git a/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
index 36f1cbd..b44fe5a 100644
--- a/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
+++ b/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
@@ -80,10 +80,11 @@
   return false;
 }
 
-LoopVectorizeHints::LoopVectorizeHints(const Loop *L, bool DisableInterleaving,
+LoopVectorizeHints::LoopVectorizeHints(const Loop *L,
+                                       bool InterleaveOnlyWhenForced,
                                        OptimizationRemarkEmitter &ORE)
     : Width("vectorize.width", VectorizerParams::VectorizationFactor, HK_WIDTH),
-      Interleave("interleave.count", DisableInterleaving, HK_UNROLL),
+      Interleave("interleave.count", InterleaveOnlyWhenForced, HK_UNROLL),
       Force("vectorize.enable", FK_Undefined, HK_FORCE),
       IsVectorized("isvectorized", 0, HK_ISVECTORIZED), TheLoop(L), ORE(ORE) {
   // Populate values with existing loop metadata.
@@ -98,19 +99,19 @@
     // consider the loop to have been already vectorized because there's
     // nothing more that we can do.
     IsVectorized.Value = Width.Value == 1 && Interleave.Value == 1;
-  LLVM_DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs()
+  LLVM_DEBUG(if (InterleaveOnlyWhenForced && Interleave.Value == 1) dbgs()
              << "LV: Interleaving disabled by the pass manager\n");
 }
 
-bool LoopVectorizeHints::allowVectorization(Function *F, Loop *L,
-                                            bool AlwaysVectorize) const {
+bool LoopVectorizeHints::allowVectorization(
+    Function *F, Loop *L, bool VectorizeOnlyWhenForced) const {
   if (getForce() == LoopVectorizeHints::FK_Disabled) {
     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n");
     emitRemarkWithHints();
     return false;
   }
 
-  if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) {
+  if (VectorizeOnlyWhenForced && getForce() != LoopVectorizeHints::FK_Enabled) {
     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n");
     emitRemarkWithHints();
     return false;
@@ -713,10 +714,30 @@
           !isa<DbgInfoIntrinsic>(CI) &&
           !(CI->getCalledFunction() && TLI &&
             TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) {
-        ORE->emit(createMissedAnalysis("CantVectorizeCall", CI)
-                  << "call instruction cannot be vectorized");
+        // If the call is a recognized math libary call, it is likely that
+        // we can vectorize it given loosened floating-point constraints.
+        LibFunc Func;
+        bool IsMathLibCall =
+            TLI && CI->getCalledFunction() &&
+            CI->getType()->isFloatingPointTy() &&
+            TLI->getLibFunc(CI->getCalledFunction()->getName(), Func) &&
+            TLI->hasOptimizedCodeGen(Func);
+
+        if (IsMathLibCall) {
+          // TODO: Ideally, we should not use clang-specific language here,
+          // but it's hard to provide meaningful yet generic advice.
+          // Also, should this be guarded by allowExtraAnalysis() and/or be part
+          // of the returned info from isFunctionVectorizable()?
+          ORE->emit(createMissedAnalysis("CantVectorizeLibcall", CI)
+              << "library call cannot be vectorized. "
+                 "Try compiling with -fno-math-errno, -ffast-math, "
+                 "or similar flags");
+        } else {
+          ORE->emit(createMissedAnalysis("CantVectorizeCall", CI)
+                    << "call instruction cannot be vectorized");
+        }
         LLVM_DEBUG(
-            dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n");
+            dbgs() << "LV: Found a non-intrinsic callsite.\n");
         return false;
       }
 
diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp
index 0341cce..c45dee5 100644
--- a/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -759,8 +759,15 @@
   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
     const DILocation *DIL = Inst->getDebugLoc();
     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
-        !isa<DbgInfoIntrinsic>(Inst))
-      B.SetCurrentDebugLocation(DIL->cloneWithDuplicationFactor(UF * VF));
+        !isa<DbgInfoIntrinsic>(Inst)) {
+      auto NewDIL = DIL->cloneWithDuplicationFactor(UF * VF);
+      if (NewDIL)
+        B.SetCurrentDebugLocation(NewDIL.getValue());
+      else
+        LLVM_DEBUG(dbgs()
+                   << "Failed to create new discriminator: "
+                   << DIL->getFilename() << " Line: " << DIL->getLine());
+    }
     else
       B.SetCurrentDebugLocation(DIL);
   } else
@@ -1359,7 +1366,8 @@
     return false;
 
   Function *Fn = OuterLp->getHeader()->getParent();
-  if (!Hints.allowVectorization(Fn, OuterLp, false /*AlwaysVectorize*/)) {
+  if (!Hints.allowVectorization(Fn, OuterLp,
+                                true /*VectorizeOnlyWhenForced*/)) {
     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
     return false;
   }
@@ -1415,10 +1423,11 @@
 
   LoopVectorizePass Impl;
 
-  explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true)
+  explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
+                         bool VectorizeOnlyWhenForced = false)
       : FunctionPass(ID) {
-    Impl.DisableUnrolling = NoUnrolling;
-    Impl.AlwaysVectorize = AlwaysVectorize;
+    Impl.InterleaveOnlyWhenForced = InterleaveOnlyWhenForced;
+    Impl.VectorizeOnlyWhenForced = VectorizeOnlyWhenForced;
     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
   }
 
@@ -6022,8 +6031,9 @@
 
 namespace llvm {
 
-Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) {
-  return new LoopVectorize(NoUnrolling, AlwaysVectorize);
+Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
+                              bool VectorizeOnlyWhenForced) {
+  return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
 }
 
 } // end namespace llvm
@@ -7141,7 +7151,7 @@
                     << L->getHeader()->getParent()->getName() << "\" from "
                     << DebugLocStr << "\n");
 
-  LoopVectorizeHints Hints(L, DisableUnrolling, *ORE);
+  LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
 
   LLVM_DEBUG(
       dbgs() << "LV: Loop hints:"
@@ -7165,7 +7175,7 @@
   // less verbose reporting vectorized loops and unvectorized loops that may
   // benefit from vectorization, respectively.
 
-  if (!Hints.allowVectorization(F, L, AlwaysVectorize)) {
+  if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
     return false;
   }
diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 87a8619..2e856a7 100644
--- a/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -2164,7 +2164,7 @@
                 // extractelement/ext pair.
                 DeadCost -= TTI->getExtractWithExtendCost(
                     Ext->getOpcode(), Ext->getType(), VecTy, i);
-                // Add back the cost of s|zext which is subtracted seperately.
+                // Add back the cost of s|zext which is subtracted separately.
                 DeadCost += TTI->getCastInstrCost(
                     Ext->getOpcode(), Ext->getType(), E->getType(), Ext);
                 continue;
@@ -2536,13 +2536,13 @@
     // uses. However, we should not compute the cost of duplicate sequences.
     // For example, if we have a build vector (i.e., insertelement sequence)
     // that is used by more than one vector instruction, we only need to
-    // compute the cost of the insertelement instructions once. The redundent
+    // compute the cost of the insertelement instructions once. The redundant
     // instructions will be eliminated by CSE.
     //
     // We should consider not creating duplicate tree entries for gather
     // sequences, and instead add additional edges to the tree representing
     // their uses. Since such an approach results in fewer total entries,
-    // existing heuristics based on tree size may yeild different results.
+    // existing heuristics based on tree size may yield different results.
     //
     if (TE.NeedToGather &&
         std::any_of(std::next(VectorizableTree.begin(), I + 1),
@@ -4269,7 +4269,7 @@
     Worklist.push_back(I);
 
   // Traverse the expression tree in bottom-up order looking for loads. If we
-  // encounter an instruciton we don't yet handle, we give up.
+  // encounter an instruction we don't yet handle, we give up.
   auto MaxWidth = 0u;
   auto FoundUnknownInst = false;
   while (!Worklist.empty() && !FoundUnknownInst) {
diff --git a/projects/CMakeLists.txt b/projects/CMakeLists.txt
index 9afc30c..c98a882 100644
--- a/projects/CMakeLists.txt
+++ b/projects/CMakeLists.txt
@@ -31,6 +31,7 @@
     # dependent projects can see the target names of their dependencies.
     add_llvm_external_project(libunwind)
     add_llvm_external_project(libcxxabi)
+    add_llvm_external_project(pstl)
     add_llvm_external_project(libcxx)
   endif()
   if(NOT LLVM_BUILD_EXTERNAL_COMPILER_RT)
diff --git a/test/Analysis/BasicAA/128-bit-ptr.ll b/test/Analysis/BasicAA/128-bit-ptr.ll
new file mode 100644
index 0000000..59fc99d
--- /dev/null
+++ b/test/Analysis/BasicAA/128-bit-ptr.ll
@@ -0,0 +1,60 @@
+; This testcase consists of alias relations on 128-bit pointers that
+; should be completely resolvable by basicaa.
+
+; RUN: opt < %s -basicaa -aa-eval -print-no-aliases -print-may-aliases -print-must-aliases -disable-output 2>&1 | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-i128:128:128-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128-p100:128:64:64-p101:128:64:64"
+
+
+; test0 is similar to SimpleCases.ll
+
+%T = type { i32, [10 x i8] }
+
+; CHECK:     Function: test0
+; CHECK-NOT:   MayAlias:
+define void @test0(%T addrspace(100)* %P) {
+  %A = getelementptr %T, %T addrspace(100)* %P, i64 0
+  %B = getelementptr %T, %T addrspace(100)* %P, i64 0, i32 0
+  %C = getelementptr %T, %T addrspace(100)* %P, i64 0, i32 1
+  %D = getelementptr %T, %T addrspace(100)* %P, i64 0, i32 1, i64 0
+  %E = getelementptr %T, %T addrspace(100)* %P, i64 0, i32 1, i64 5
+  ret void
+}
+
+; test1 checks that >64 bits of index can be considered.
+; If BasicAA is truncating the arithmetic, it will conclude
+; that %A and %B must alias when in fact they must not.
+
+; CHECK:     Function: test1
+; CHECK-NOT:   MustAlias:
+; CHECK:       NoAlias:
+; CHECK-SAME:  %A
+; CHECK-SAME:  %B
+define void @test1(double addrspace(100)* %P, i128 %i) {
+  ; 1180591620717411303424 is 2**70
+  ;  590295810358705651712 is 2**69
+  %i70 = add i128 %i, 1180591620717411303424 
+  %i69 = add i128 %i, 590295810358705651712
+  %A = getelementptr double, double addrspace(100)* %P, i128 %i70
+  %B = getelementptr double, double addrspace(100)* %P, i128 %i69
+  ret void
+}
+
+; test2 checks that >64 bits of index can be considered
+; and computes the same address in two ways to ensure that
+; they are considered equivalent.
+
+; CHECK: Function: test2
+; CHECK: MustAlias:
+; CHECK-SAME: %A
+; CHECK-SAME: %C
+define void @test2(double addrspace(100)* %P, i128 %i) {
+  ; 1180591620717411303424 is 2**70
+  ;  590295810358705651712 is 2**69
+  %i70 = add i128 %i, 1180591620717411303424 
+  %i69 = add i128 %i, 590295810358705651712
+  %j70 = add i128 %i69, 590295810358705651712 
+  %A = getelementptr double, double addrspace(100)* %P, i128 %i70
+  %C = getelementptr double, double addrspace(100)* %P, i128 %j70
+  ret void
+}
diff --git a/test/Analysis/BasicAA/gep-and-alias-64.ll b/test/Analysis/BasicAA/gep-and-alias-64.ll
new file mode 100644
index 0000000..c1a5354
--- /dev/null
+++ b/test/Analysis/BasicAA/gep-and-alias-64.ll
@@ -0,0 +1,43 @@
+; RUN: opt -S -basicaa -gvn < %s | FileCheck %s
+
+target datalayout = "e-m:o-p:64:64-f64:32:64-f80:128-n8:16:32-S128"
+target triple = "x86_64-apple-macosx10.6.0"
+
+; The load and store address in the loop body could alias so the load
+; can't be hoisted above the store and out of the loop.
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i32, i1)
+
+define i64 @foo(i64 %x, i64 %z, i64 %n) {
+entry:
+  %pool = alloca [59 x i64], align 4
+  %tmp = bitcast [59 x i64]* %pool to i8*
+  call void @llvm.memset.p0i8.i64(i8* nonnull %tmp, i8 0, i64 236, i32 4, i1 false)
+  %cmp3 = icmp eq i64 %n, 0
+  br i1 %cmp3, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph:                                   ; preds = %entry
+  %add = add i64 %z, %x
+  %and = and i64 %add, 9223372036854775807
+  %sub = add nsw i64 %and, -9223372036844814062
+  %arrayidx = getelementptr inbounds [59 x i64], [59 x i64]* %pool, i64 0, i64 %sub
+  %arrayidx1 = getelementptr inbounds [59 x i64], [59 x i64]* %pool, i64 0, i64 42
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.lr.ph, %for.body
+  %i.04 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
+  store i64 %i.04, i64* %arrayidx, align 4
+  %tmp1 = load i64, i64* %arrayidx1, align 4
+  %inc = add nuw i64 %i.04, 1
+  %exitcond = icmp ne i64 %inc, %n
+  br i1 %exitcond, label %for.body, label %for.end.loopexit
+
+for.end.loopexit:                                 ; preds = %for.body
+  %lcssa = phi i64 [ %tmp1, %for.body ]
+  br label %for.end
+
+for.end:                                          ; preds = %for.end.loopexit, %entry
+  %s = phi i64 [ 0, %entry ], [ %lcssa, %for.end.loopexit ]
+; CHECK: ret i64 %s
+  ret i64 %s
+}
diff --git a/test/Analysis/BasicAA/gep-and-alias.ll b/test/Analysis/BasicAA/gep-and-alias.ll
index e2e5811..0fa17b6 100644
--- a/test/Analysis/BasicAA/gep-and-alias.ll
+++ b/test/Analysis/BasicAA/gep-and-alias.ll
@@ -1,4 +1,5 @@
 ; RUN: opt -S -basicaa -gvn < %s | FileCheck %s
+; RUN: opt -S -basicaa -gvn -basicaa-force-at-least-64b=0 < %s | FileCheck %s
 
 target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
 target triple = "i386-apple-macosx10.6.0"
diff --git a/test/Analysis/ConstantFolding/bitcount.ll b/test/Analysis/ConstantFolding/bitcount.ll
new file mode 100644
index 0000000..6333c4d
--- /dev/null
+++ b/test/Analysis/ConstantFolding/bitcount.ll
@@ -0,0 +1,177 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -constprop -S | FileCheck %s
+
+declare i31 @llvm.ctpop.i31(i31 %val)
+declare i32 @llvm.cttz.i32(i32 %val, i1)
+declare i33 @llvm.ctlz.i33(i33 %val, i1)
+declare <2 x i31> @llvm.ctpop.v2i31(<2 x i31> %val)
+declare <2 x i32> @llvm.cttz.v2i32(<2 x i32> %val, i1)
+declare <2 x i33> @llvm.ctlz.v2i33(<2 x i33> %val, i1)
+
+define i31 @ctpop_const() {
+; CHECK-LABEL: @ctpop_const(
+; CHECK-NEXT:    ret i31 12
+;
+  %x = call i31 @llvm.ctpop.i31(i31 12415124)
+  ret i31 %x
+}
+
+define i32 @cttz_const() {
+; CHECK-LABEL: @cttz_const(
+; CHECK-NEXT:    ret i32 1
+;
+  %x = call i32 @llvm.cttz.i32(i32 87359874, i1 true)
+  ret i32 %x
+}
+
+define i33 @ctlz_const() {
+; CHECK-LABEL: @ctlz_const(
+; CHECK-NEXT:    ret i33 6
+;
+  %x = call i33 @llvm.ctlz.i33(i33 87359874, i1 true)
+  ret i33 %x
+}
+
+define i31 @ctpop_zero() {
+; CHECK-LABEL: @ctpop_zero(
+; CHECK-NEXT:    ret i31 0
+;
+  %x = call i31 @llvm.ctpop.i31(i31 0)
+  ret i31 %x
+}
+
+define i32 @cttz_zero_defined() {
+; CHECK-LABEL: @cttz_zero_defined(
+; CHECK-NEXT:    ret i32 32
+;
+  %x = call i32 @llvm.cttz.i32(i32 0, i1 false)
+  ret i32 %x
+}
+
+define i32 @cttz_zero_undefined() {
+; CHECK-LABEL: @cttz_zero_undefined(
+; CHECK-NEXT:    ret i32 undef
+;
+  %x = call i32 @llvm.cttz.i32(i32 0, i1 true)
+  ret i32 %x
+}
+
+define i33 @ctlz_zero_defined() {
+; CHECK-LABEL: @ctlz_zero_defined(
+; CHECK-NEXT:    ret i33 33
+;
+  %x = call i33 @llvm.ctlz.i33(i33 0, i1 false)
+  ret i33 %x
+}
+
+define i33 @ctlz_zero_undefined() {
+; CHECK-LABEL: @ctlz_zero_undefined(
+; CHECK-NEXT:    ret i33 undef
+;
+  %x = call i33 @llvm.ctlz.i33(i33 0, i1 true)
+  ret i33 %x
+}
+
+define i31 @ctpop_undef() {
+; CHECK-LABEL: @ctpop_undef(
+; CHECK-NEXT:    ret i31 0
+;
+  %x = call i31 @llvm.ctpop.i31(i31 undef)
+  ret i31 %x
+}
+
+define i32 @cttz_undef_defined() {
+; CHECK-LABEL: @cttz_undef_defined(
+; CHECK-NEXT:    ret i32 0
+;
+  %x = call i32 @llvm.cttz.i32(i32 undef, i1 false)
+  ret i32 %x
+}
+
+define i32 @cttz_undef_undefined() {
+; CHECK-LABEL: @cttz_undef_undefined(
+; CHECK-NEXT:    ret i32 undef
+;
+  %x = call i32 @llvm.cttz.i32(i32 undef, i1 true)
+  ret i32 %x
+}
+
+define i33 @ctlz_undef_defined() {
+; CHECK-LABEL: @ctlz_undef_defined(
+; CHECK-NEXT:    ret i33 0
+;
+  %x = call i33 @llvm.ctlz.i33(i33 undef, i1 false)
+  ret i33 %x
+}
+
+define i33 @ctlz_undef_undefined() {
+; CHECK-LABEL: @ctlz_undef_undefined(
+; CHECK-NEXT:    ret i33 undef
+;
+  %x = call i33 @llvm.ctlz.i33(i33 undef, i1 true)
+  ret i33 %x
+}
+
+define <2 x i31> @ctpop_vector() {
+; CHECK-LABEL: @ctpop_vector(
+; CHECK-NEXT:    ret <2 x i31> <i31 8, i31 1>
+;
+  %x = call <2 x i31> @llvm.ctpop.v2i31(<2 x i31> <i31 255, i31 16>)
+  ret <2 x i31> %x
+}
+
+define <2 x i31> @ctpop_vector_undef() {
+; CHECK-LABEL: @ctpop_vector_undef(
+; CHECK-NEXT:    ret <2 x i31> zeroinitializer
+;
+  %x = call <2 x i31> @llvm.ctpop.v2i31(<2 x i31> <i31 0, i31 undef>)
+  ret <2 x i31> %x
+}
+
+define <2 x i32> @cttz_vector() {
+; CHECK-LABEL: @cttz_vector(
+; CHECK-NEXT:    ret <2 x i32> <i32 0, i32 4>
+;
+  %x = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> <i32 255, i32 16>, i1 true)
+  ret <2 x i32> %x
+}
+
+define <2 x i32> @cttz_vector_undef_defined() {
+; CHECK-LABEL: @cttz_vector_undef_defined(
+; CHECK-NEXT:    ret <2 x i32> <i32 32, i32 0>
+;
+  %x = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> <i32 0, i32 undef>, i1 false)
+  ret <2 x i32> %x
+}
+
+define <2 x i32> @cttz_vector_undef_undefined() {
+; CHECK-LABEL: @cttz_vector_undef_undefined(
+; CHECK-NEXT:    ret <2 x i32> undef
+;
+  %x = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> <i32 0, i32 undef>, i1 true)
+  ret <2 x i32> %x
+}
+
+define <2 x i33> @ctlz_vector() {
+; CHECK-LABEL: @ctlz_vector(
+; CHECK-NEXT:    ret <2 x i33> <i33 25, i33 28>
+;
+  %x = call <2 x i33> @llvm.ctlz.v2i33(<2 x i33> <i33 255, i33 16>, i1 true)
+  ret <2 x i33> %x
+}
+
+define <2 x i33> @ctlz_vector_undef_defined() {
+; CHECK-LABEL: @ctlz_vector_undef_defined(
+; CHECK-NEXT:    ret <2 x i33> <i33 33, i33 0>
+;
+  %x = call <2 x i33> @llvm.ctlz.v2i33(<2 x i33> <i33 0, i33 undef>, i1 false)
+  ret <2 x i33> %x
+}
+
+define <2 x i33> @ctlz_vector_undef_undefined() {
+; CHECK-LABEL: @ctlz_vector_undef_undefined(
+; CHECK-NEXT:    ret <2 x i33> undef
+;
+  %x = call <2 x i33> @llvm.ctlz.v2i33(<2 x i33> <i33 0, i33 undef>, i1 true)
+  ret <2 x i33> %x
+}
diff --git a/test/Analysis/ConstantFolding/func-and-folding.ll b/test/Analysis/ConstantFolding/func-and-folding.ll
deleted file mode 100644
index 2dbbe67..0000000
--- a/test/Analysis/ConstantFolding/func-and-folding.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: opt < %s -constprop -S -o - | FileCheck %s
-
-; Function Attrs: minsize norecurse nounwind optsize readnone
-define void @foo1() #0 {
-entry:
-  ret void
-}
-
-; Function Attrs: minsize norecurse nounwind optsize readnone
-define void @foo2() align 4 {
-entry:
-  ret void
-}
-
-; Function Attrs: minsize nounwind optsize
-define i32 @main() local_unnamed_addr #1 {
-entry:
-; CHECK: ptrtoint
-  %call = tail call i32 bitcast (i32 (...)* @process to i32 (i32)*)(i32 and (i32 ptrtoint (void ()* @foo1 to i32), i32 2)) #3
-; CHECK-NEXT: ptrtoint
-  %call2 = tail call i32 bitcast (i32 (...)* @process to i32 (i32)*)(i32 and (i32 ptrtoint (void ()* @foo2 to i32), i32 2)) #3
-  ret i32 0
-}
-
-; Function Attrs: minsize optsize
-declare i32 @process(...) local_unnamed_addr #2
-
diff --git a/test/Analysis/ConstantFolding/funnel-shift.ll b/test/Analysis/ConstantFolding/funnel-shift.ll
index 8ccc714..fae29c9 100644
--- a/test/Analysis/ConstantFolding/funnel-shift.ll
+++ b/test/Analysis/ConstantFolding/funnel-shift.ll
@@ -81,3 +81,152 @@
   ret <4 x i8> %f
 }
 
+; Undef handling
+
+define i32 @fshl_scalar_all_undef() {
+; CHECK-LABEL: @fshl_scalar_all_undef(
+; CHECK-NEXT:    ret i32 undef
+;
+  %f = call i32 @llvm.fshl.i32(i32 undef, i32 undef, i32 undef)
+  ret i32 %f
+}
+
+define i32 @fshr_scalar_all_undef() {
+; CHECK-LABEL: @fshr_scalar_all_undef(
+; CHECK-NEXT:    ret i32 undef
+;
+  %f = call i32 @llvm.fshr.i32(i32 undef, i32 undef, i32 undef)
+  ret i32 %f
+}
+
+define i32 @fshl_scalar_undef_shamt() {
+; CHECK-LABEL: @fshl_scalar_undef_shamt(
+; CHECK-NEXT:    ret i32 1
+;
+  %f = call i32 @llvm.fshl.i32(i32 1, i32 2, i32 undef)
+  ret i32 %f
+}
+
+define i32 @fshr_scalar_undef_shamt() {
+; CHECK-LABEL: @fshr_scalar_undef_shamt(
+; CHECK-NEXT:    ret i32 2
+;
+  %f = call i32 @llvm.fshr.i32(i32 1, i32 2, i32 undef)
+  ret i32 %f
+}
+
+define i32 @fshl_scalar_undef_ops() {
+; CHECK-LABEL: @fshl_scalar_undef_ops(
+; CHECK-NEXT:    ret i32 undef
+;
+  %f = call i32 @llvm.fshl.i32(i32 undef, i32 undef, i32 7)
+  ret i32 %f
+}
+
+define i32 @fshr_scalar_undef_ops() {
+; CHECK-LABEL: @fshr_scalar_undef_ops(
+; CHECK-NEXT:    ret i32 undef
+;
+  %f = call i32 @llvm.fshr.i32(i32 undef, i32 undef, i32 7)
+  ret i32 %f
+}
+
+define i32 @fshl_scalar_undef_op1_zero_shift() {
+; CHECK-LABEL: @fshl_scalar_undef_op1_zero_shift(
+; CHECK-NEXT:    ret i32 undef
+;
+  %f = call i32 @llvm.fshl.i32(i32 undef, i32 1, i32 0)
+  ret i32 %f
+}
+
+define i32 @fshl_scalar_undef_op2_zero_shift() {
+; CHECK-LABEL: @fshl_scalar_undef_op2_zero_shift(
+; CHECK-NEXT:    ret i32 1
+;
+  %f = call i32 @llvm.fshl.i32(i32 1, i32 undef, i32 32)
+  ret i32 %f
+}
+
+define i32 @fshr_scalar_undef_op1_zero_shift() {
+; CHECK-LABEL: @fshr_scalar_undef_op1_zero_shift(
+; CHECK-NEXT:    ret i32 1
+;
+  %f = call i32 @llvm.fshr.i32(i32 undef, i32 1, i32 64)
+  ret i32 %f
+}
+
+define i32 @fshr_scalar_undef_op2_zero_shift() {
+; CHECK-LABEL: @fshr_scalar_undef_op2_zero_shift(
+; CHECK-NEXT:    ret i32 undef
+;
+  %f = call i32 @llvm.fshr.i32(i32 1, i32 undef, i32 0)
+  ret i32 %f
+}
+
+define i32 @fshl_scalar_undef_op1_nonzero_shift() {
+; CHECK-LABEL: @fshl_scalar_undef_op1_nonzero_shift(
+; CHECK-NEXT:    ret i32 255
+;
+  %f = call i32 @llvm.fshl.i32(i32 undef, i32 -1, i32 8)
+  ret i32 %f
+}
+
+define i32 @fshl_scalar_undef_op2_nonzero_shift() {
+; CHECK-LABEL: @fshl_scalar_undef_op2_nonzero_shift(
+; CHECK-NEXT:    ret i32 -256
+;
+  %f = call i32 @llvm.fshl.i32(i32 -1, i32 undef, i32 8)
+  ret i32 %f
+}
+
+define i32 @fshr_scalar_undef_op1_nonzero_shift() {
+; CHECK-LABEL: @fshr_scalar_undef_op1_nonzero_shift(
+; CHECK-NEXT:    ret i32 16777215
+;
+  %f = call i32 @llvm.fshr.i32(i32 undef, i32 -1, i32 8)
+  ret i32 %f
+}
+
+define i32 @fshr_scalar_undef_op2_nonzero_shift() {
+; CHECK-LABEL: @fshr_scalar_undef_op2_nonzero_shift(
+; CHECK-NEXT:    ret i32 -16777216
+;
+  %f = call i32 @llvm.fshr.i32(i32 -1, i32 undef, i32 8)
+  ret i32 %f
+}
+
+; Undef/Undef/Undef; 1/2/Undef; Undef/Undef/3; Undef/1/0
+define <4 x i8> @fshl_vector_mix1() {
+; CHECK-LABEL: @fshl_vector_mix1(
+; CHECK-NEXT:    ret <4 x i8> <i8 undef, i8 1, i8 undef, i8 undef>
+;
+  %f = call <4 x i8> @llvm.fshl.v4i8(<4 x i8> <i8 undef, i8 1, i8 undef, i8 undef>, <4 x i8> <i8 undef, i8 2, i8 undef, i8 1>, <4 x i8> <i8 undef, i8 undef, i8 3, i8 0>)
+  ret <4 x i8> %f
+}
+
+; 1/Undef/8; Undef/-1/2; -1/Undef/2; 7/8/4
+define <4 x i8> @fshl_vector_mix2() {
+; CHECK-LABEL: @fshl_vector_mix2(
+; CHECK-NEXT:    ret <4 x i8> <i8 1, i8 3, i8 -4, i8 112>
+;
+  %f = call <4 x i8> @llvm.fshl.v4i8(<4 x i8> <i8 1, i8 undef, i8 -1, i8 7>, <4 x i8> <i8 undef, i8 -1, i8 undef, i8 8>, <4 x i8> <i8 8, i8 2, i8 2, i8 4>)
+  ret <4 x i8> %f
+}
+
+; Undef/Undef/Undef; 1/2/Undef; Undef/Undef/3; Undef/1/0
+define <4 x i8> @fshr_vector_mix1() {
+; CHECK-LABEL: @fshr_vector_mix1(
+; CHECK-NEXT:    ret <4 x i8> <i8 undef, i8 2, i8 undef, i8 1>
+;
+  %f = call <4 x i8> @llvm.fshr.v4i8(<4 x i8> <i8 undef, i8 1, i8 undef, i8 undef>, <4 x i8> <i8 undef, i8 2, i8 undef, i8 1>, <4 x i8> <i8 undef, i8 undef, i8 3, i8 0>)
+  ret <4 x i8> %f
+}
+
+; 1/Undef/8; Undef/-1/2; -1/Undef/2; 7/8/4
+define <4 x i8> @fshr_vector_mix2() {
+; CHECK-LABEL: @fshr_vector_mix2(
+; CHECK-NEXT:    ret <4 x i8> <i8 undef, i8 63, i8 -64, i8 112>
+;
+  %f = call <4 x i8> @llvm.fshr.v4i8(<4 x i8> <i8 1, i8 undef, i8 -1, i8 7>, <4 x i8> <i8 undef, i8 -1, i8 undef, i8 8>, <4 x i8> <i8 8, i8 2, i8 2, i8 4>)
+  ret <4 x i8> %f
+}
diff --git a/test/Analysis/ConstantFolding/saturating-add-sub.ll b/test/Analysis/ConstantFolding/saturating-add-sub.ll
index 14c6a9f..7c6fdbf 100644
--- a/test/Analysis/ConstantFolding/saturating-add-sub.ll
+++ b/test/Analysis/ConstantFolding/saturating-add-sub.ll
@@ -1,9 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -constprop -S | FileCheck %s
 
-declare void @dummy(i8)
-declare void @dummy_vec(<2 x i8>)
-
 declare i8 @llvm.uadd.sat.i8(i8, i8)
 declare i8 @llvm.sadd.sat.i8(i8, i8)
 declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>)
@@ -14,98 +11,356 @@
 declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>)
 declare <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8>, <2 x i8>)
 
-define void @test_add_scalar() {
-; CHECK-LABEL: @test_add_scalar(
-; CHECK-NEXT:    call void @dummy(i8 30)
-; CHECK-NEXT:    call void @dummy(i8 -1)
-; CHECK-NEXT:    call void @dummy(i8 -10)
-; CHECK-NEXT:    call void @dummy(i8 127)
-; CHECK-NEXT:    call void @dummy(i8 -128)
-; CHECK-NEXT:    ret void
+define i8 @test_uadd_scalar_no_sat() {
+; CHECK-LABEL: @test_uadd_scalar_no_sat(
+; CHECK-NEXT:    ret i8 30
 ;
-  %x1 = call i8 @llvm.uadd.sat.i8(i8 10, i8 20)
-  call void @dummy(i8 %x1)
-  %x2 = call i8 @llvm.uadd.sat.i8(i8 250, i8 100)
-  call void @dummy(i8 %x2)
-
-  %y1 = call i8 @llvm.sadd.sat.i8(i8 10, i8 -20)
-  call void @dummy(i8 %y1)
-  %y2 = call i8 @llvm.sadd.sat.i8(i8 120, i8 10)
-  call void @dummy(i8 %y2)
-  %y3 = call i8 @llvm.sadd.sat.i8(i8 -120, i8 -10)
-  call void @dummy(i8 %y3)
-
-  ret void
+  %x = call i8 @llvm.uadd.sat.i8(i8 10, i8 20)
+  ret i8 %x
 }
 
-define void @test_add_vector(<2 x i8> %a) {
-; CHECK-LABEL: @test_add_vector(
-; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 20, i8 30>)
-; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 -1, i8 -1>)
-; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 -10, i8 -30>)
-; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 127, i8 127>)
-; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 -128, i8 -128>)
-; CHECK-NEXT:    ret void
+define i8 @test_uadd_scalar_sat() {
+; CHECK-LABEL: @test_uadd_scalar_sat(
+; CHECK-NEXT:    ret i8 -1
 ;
-  %x1 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 15>, <2 x i8> <i8 10, i8 15>)
-  call void @dummy_vec(<2 x i8> %x1)
-  %x2 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 100, i8 200>, <2 x i8> <i8 250, i8 100>)
-  call void @dummy_vec(<2 x i8> %x2)
-
-  %y1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 10, i8 -15>, <2 x i8> <i8 -20, i8 -15>)
-  call void @dummy_vec(<2 x i8> %y1)
-  %y2 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 100, i8 10>, <2 x i8> <i8 30, i8 120>)
-  call void @dummy_vec(<2 x i8> %y2)
-  %y3 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 -100, i8 -10>, <2 x i8> <i8 -30, i8 -120>)
-  call void @dummy_vec(<2 x i8> %y3)
-
-  ret void
+  %x = call i8 @llvm.uadd.sat.i8(i8 250, i8 100)
+  ret i8 %x
 }
 
-define void @test_usub_ssub_scalar() {
-; CHECK-LABEL: @test_usub_ssub_scalar(
-; CHECK-NEXT:    call void @dummy(i8 10)
-; CHECK-NEXT:    call void @dummy(i8 0)
-; CHECK-NEXT:    call void @dummy(i8 -30)
-; CHECK-NEXT:    call void @dummy(i8 127)
-; CHECK-NEXT:    call void @dummy(i8 -128)
-; CHECK-NEXT:    ret void
+define i8 @test_sadd_scalar_no_sat() {
+; CHECK-LABEL: @test_sadd_scalar_no_sat(
+; CHECK-NEXT:    ret i8 -10
 ;
-  %x1 = call i8 @llvm.usub.sat.i8(i8 20, i8 10)
-  call void @dummy(i8 %x1)
-  %x2 = call i8 @llvm.usub.sat.i8(i8 200, i8 250)
-  call void @dummy(i8 %x2)
-
-  %y1 = call i8 @llvm.ssub.sat.i8(i8 -10, i8 20)
-  call void @dummy(i8 %y1)
-  %y2 = call i8 @llvm.ssub.sat.i8(i8 120, i8 -10)
-  call void @dummy(i8 %y2)
-  %y3 = call i8 @llvm.ssub.sat.i8(i8 -120, i8 10)
-  call void @dummy(i8 %y3)
-
-  ret void
+  %x = call i8 @llvm.sadd.sat.i8(i8 10, i8 -20)
+  ret i8 %x
 }
 
-define void @test_sub_vector(<2 x i8> %a) {
-; CHECK-LABEL: @test_sub_vector(
-; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 10, i8 5>)
-; CHECK-NEXT:    call void @dummy_vec(<2 x i8> zeroinitializer)
-; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 30, i8 0>)
-; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 127, i8 127>)
-; CHECK-NEXT:    call void @dummy_vec(<2 x i8> <i8 -128, i8 -128>)
-; CHECK-NEXT:    ret void
+define i8 @test_sadd_scalar_sat_pos() {
+; CHECK-LABEL: @test_sadd_scalar_sat_pos(
+; CHECK-NEXT:    ret i8 127
 ;
-  %x1 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 20, i8 15>, <2 x i8> <i8 10, i8 10>)
-  call void @dummy_vec(<2 x i8> %x1)
-  %x2 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 100, i8 200>, <2 x i8> <i8 150, i8 250>)
-  call void @dummy_vec(<2 x i8> %x2)
+  %x = call i8 @llvm.sadd.sat.i8(i8 120, i8 10)
+  ret i8 %x
+}
 
-  %y1 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 10, i8 -15>, <2 x i8> <i8 -20, i8 -15>)
-  call void @dummy_vec(<2 x i8> %y1)
-  %y2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 100, i8 10>, <2 x i8> <i8 -30, i8 -120>)
-  call void @dummy_vec(<2 x i8> %y2)
-  %y3 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 -100, i8 -10>, <2 x i8> <i8 30, i8 120>)
-  call void @dummy_vec(<2 x i8> %y3)
+define i8 @test_sadd_scalar_sat_neg() {
+; CHECK-LABEL: @test_sadd_scalar_sat_neg(
+; CHECK-NEXT:    ret i8 -128
+;
+  %x = call i8 @llvm.sadd.sat.i8(i8 -120, i8 -10)
+  ret i8 %x
+}
 
-  ret void
+define <2 x i8> @test_uadd_vector_no_sat(<2 x i8> %a) {
+; CHECK-LABEL: @test_uadd_vector_no_sat(
+; CHECK-NEXT:    ret <2 x i8> <i8 20, i8 30>
+;
+  %x = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 15>, <2 x i8> <i8 10, i8 15>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_uadd_vector_sat(<2 x i8> %a) {
+; CHECK-LABEL: @test_uadd_vector_sat(
+; CHECK-NEXT:    ret <2 x i8> <i8 -1, i8 -1>
+;
+  %x = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 100, i8 200>, <2 x i8> <i8 250, i8 100>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_sadd_vector_no_sat(<2 x i8> %a) {
+; CHECK-LABEL: @test_sadd_vector_no_sat(
+; CHECK-NEXT:    ret <2 x i8> <i8 -10, i8 -30>
+;
+  %x = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 10, i8 -15>, <2 x i8> <i8 -20, i8 -15>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_sadd_vector_sat_pos(<2 x i8> %a) {
+; CHECK-LABEL: @test_sadd_vector_sat_pos(
+; CHECK-NEXT:    ret <2 x i8> <i8 127, i8 127>
+;
+  %x = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 100, i8 10>, <2 x i8> <i8 30, i8 120>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_sadd_vector_sat_neg(<2 x i8> %a) {
+; CHECK-LABEL: @test_sadd_vector_sat_neg(
+; CHECK-NEXT:    ret <2 x i8> <i8 -128, i8 -128>
+;
+  %x = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 -100, i8 -10>, <2 x i8> <i8 -30, i8 -120>)
+  ret <2 x i8> %x
+}
+
+define i8 @test_usub_scalar_no_sat() {
+; CHECK-LABEL: @test_usub_scalar_no_sat(
+; CHECK-NEXT:    ret i8 10
+;
+  %x = call i8 @llvm.usub.sat.i8(i8 20, i8 10)
+  ret i8 %x
+}
+
+define i8 @test_usub_scalar_sat() {
+; CHECK-LABEL: @test_usub_scalar_sat(
+; CHECK-NEXT:    ret i8 0
+;
+  %x = call i8 @llvm.usub.sat.i8(i8 200, i8 250)
+  ret i8 %x
+}
+
+define i8 @test_ssub_scalar_no_sat() {
+; CHECK-LABEL: @test_ssub_scalar_no_sat(
+; CHECK-NEXT:    ret i8 -30
+;
+  %x = call i8 @llvm.ssub.sat.i8(i8 -10, i8 20)
+  ret i8 %x
+}
+
+define i8 @test_ssub_scalar_sat_pos() {
+; CHECK-LABEL: @test_ssub_scalar_sat_pos(
+; CHECK-NEXT:    ret i8 127
+;
+  %x = call i8 @llvm.ssub.sat.i8(i8 120, i8 -10)
+  ret i8 %x
+}
+
+define i8 @test_ssub_scalar_sat_neg() {
+; CHECK-LABEL: @test_ssub_scalar_sat_neg(
+; CHECK-NEXT:    ret i8 -128
+;
+  %x = call i8 @llvm.ssub.sat.i8(i8 -120, i8 10)
+  ret i8 %x
+}
+
+define <2 x i8> @test_usub_vector_no_sat(<2 x i8> %a) {
+; CHECK-LABEL: @test_usub_vector_no_sat(
+; CHECK-NEXT:    ret <2 x i8> <i8 10, i8 5>
+;
+  %x = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 20, i8 15>, <2 x i8> <i8 10, i8 10>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_usub_vector_sat(<2 x i8> %a) {
+; CHECK-LABEL: @test_usub_vector_sat(
+; CHECK-NEXT:    ret <2 x i8> zeroinitializer
+;
+  %x = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 100, i8 200>, <2 x i8> <i8 150, i8 250>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_ssub_vector_no_sat(<2 x i8> %a) {
+; CHECK-LABEL: @test_ssub_vector_no_sat(
+; CHECK-NEXT:    ret <2 x i8> <i8 30, i8 0>
+;
+  %x = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 10, i8 -15>, <2 x i8> <i8 -20, i8 -15>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_ssub_vector_sat_pos(<2 x i8> %a) {
+; CHECK-LABEL: @test_ssub_vector_sat_pos(
+; CHECK-NEXT:    ret <2 x i8> <i8 127, i8 127>
+;
+  %x = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 100, i8 10>, <2 x i8> <i8 -30, i8 -120>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_ssub_vector_sat_neg(<2 x i8> %a) {
+; CHECK-LABEL: @test_ssub_vector_sat_neg(
+; CHECK-NEXT:    ret <2 x i8> <i8 -128, i8 -128>
+;
+  %x = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 -100, i8 -10>, <2 x i8> <i8 30, i8 120>)
+  ret <2 x i8> %x
+}
+
+; Tests for undef handling
+
+define i8 @test_uadd_scalar_both_undef() {
+; CHECK-LABEL: @test_uadd_scalar_both_undef(
+; CHECK-NEXT:    ret i8 undef
+;
+  %x = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
+  ret i8 %x
+}
+
+define i8 @test_sadd_scalar_both_undef() {
+; CHECK-LABEL: @test_sadd_scalar_both_undef(
+; CHECK-NEXT:    ret i8 undef
+;
+  %x = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
+  ret i8 %x
+}
+
+define i8 @test_usub_scalar_both_undef() {
+; CHECK-LABEL: @test_usub_scalar_both_undef(
+; CHECK-NEXT:    ret i8 undef
+;
+  %x = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+  ret i8 %x
+}
+
+define i8 @test_ssub_scalar_both_undef() {
+; CHECK-LABEL: @test_ssub_scalar_both_undef(
+; CHECK-NEXT:    ret i8 undef
+;
+  %x = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
+  ret i8 %x
+}
+
+define i8 @test_uadd_scalar_op2_undef() {
+; CHECK-LABEL: @test_uadd_scalar_op2_undef(
+; CHECK-NEXT:    ret i8 -1
+;
+  %x = call i8 @llvm.uadd.sat.i8(i8 10, i8 undef)
+  ret i8 %x
+}
+
+define i8 @test_sadd_scalar_op1_undef() {
+; CHECK-LABEL: @test_sadd_scalar_op1_undef(
+; CHECK-NEXT:    ret i8 -1
+;
+  %x = call i8 @llvm.sadd.sat.i8(i8 undef, i8 10)
+  ret i8 %x
+}
+
+define i8 @test_usub_scalar_op2_undef() {
+; CHECK-LABEL: @test_usub_scalar_op2_undef(
+; CHECK-NEXT:    ret i8 0
+;
+  %x = call i8 @llvm.usub.sat.i8(i8 10, i8 undef)
+  ret i8 %x
+}
+
+define i8 @test_usub_scalar_op1_undef() {
+; CHECK-LABEL: @test_usub_scalar_op1_undef(
+; CHECK-NEXT:    ret i8 0
+;
+  %x = call i8 @llvm.usub.sat.i8(i8 undef, i8 10)
+  ret i8 %x
+}
+
+define <2 x i8> @test_uadd_vector_both_undef_splat() {
+; CHECK-LABEL: @test_uadd_vector_both_undef_splat(
+; CHECK-NEXT:    ret <2 x i8> undef
+;
+  %x = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_sadd_vector_both_undef_splat() {
+; CHECK-LABEL: @test_sadd_vector_both_undef_splat(
+; CHECK-NEXT:    ret <2 x i8> undef
+;
+  %x = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_usub_vector_both_undef_splat() {
+; CHECK-LABEL: @test_usub_vector_both_undef_splat(
+; CHECK-NEXT:    ret <2 x i8> undef
+;
+  %x = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_ssub_vector_both_undef_splat() {
+; CHECK-LABEL: @test_ssub_vector_both_undef_splat(
+; CHECK-NEXT:    ret <2 x i8> undef
+;
+  %x = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_uadd_vector_op2_undef_splat() {
+; CHECK-LABEL: @test_uadd_vector_op2_undef_splat(
+; CHECK-NEXT:    ret <2 x i8> <i8 -1, i8 -1>
+;
+  %x = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 20>, <2 x i8> undef)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_sadd_vector_op1_undef_splat() {
+; CHECK-LABEL: @test_sadd_vector_op1_undef_splat(
+; CHECK-NEXT:    ret <2 x i8> <i8 -1, i8 -1>
+;
+  %x = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> <i8 10, i8 20>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_usub_vector_op2_undef_splat() {
+; CHECK-LABEL: @test_usub_vector_op2_undef_splat(
+; CHECK-NEXT:    ret <2 x i8> zeroinitializer
+;
+  %x = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 10, i8 20>, <2 x i8> undef)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_ssub_vector_op1_undef_splat() {
+; CHECK-LABEL: @test_ssub_vector_op1_undef_splat(
+; CHECK-NEXT:    ret <2 x i8> zeroinitializer
+;
+  %x = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> <i8 10, i8 20>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_uadd_vector_op2_undef_mix1() {
+; CHECK-LABEL: @test_uadd_vector_op2_undef_mix1(
+; CHECK-NEXT:    ret <2 x i8> <i8 30, i8 undef>
+;
+  %x = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 undef>, <2 x i8> <i8 20, i8 undef>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_uadd_vector_op2_undef_mix2() {
+; CHECK-LABEL: @test_uadd_vector_op2_undef_mix2(
+; CHECK-NEXT:    ret <2 x i8> <i8 -1, i8 -1>
+;
+  %x = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 undef>, <2 x i8> <i8 undef, i8 20>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_sadd_vector_op1_undef_mix1() {
+; CHECK-LABEL: @test_sadd_vector_op1_undef_mix1(
+; CHECK-NEXT:    ret <2 x i8> <i8 undef, i8 30>
+;
+  %x = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 undef, i8 10>, <2 x i8> <i8 undef, i8 20>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_sadd_vector_op1_undef_mix2() {
+; CHECK-LABEL: @test_sadd_vector_op1_undef_mix2(
+; CHECK-NEXT:    ret <2 x i8> <i8 -1, i8 -1>
+;
+  %x = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 undef, i8 10>, <2 x i8> <i8 20, i8 undef>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_usub_vector_op2_undef_mix1() {
+; CHECK-LABEL: @test_usub_vector_op2_undef_mix1(
+; CHECK-NEXT:    ret <2 x i8> <i8 0, i8 undef>
+;
+  %x = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 10, i8 undef>, <2 x i8> <i8 20, i8 undef>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_usub_vector_op2_undef_mix2() {
+; CHECK-LABEL: @test_usub_vector_op2_undef_mix2(
+; CHECK-NEXT:    ret <2 x i8> zeroinitializer
+;
+  %x = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 10, i8 undef>, <2 x i8> <i8 undef, i8 20>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_ssub_vector_op1_undef_mix1() {
+; CHECK-LABEL: @test_ssub_vector_op1_undef_mix1(
+; CHECK-NEXT:    ret <2 x i8> <i8 undef, i8 -10>
+;
+  %x = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 undef, i8 10>, <2 x i8> <i8 undef, i8 20>)
+  ret <2 x i8> %x
+}
+
+define <2 x i8> @test_ssub_vector_op1_undef_mix2() {
+; CHECK-LABEL: @test_ssub_vector_op1_undef_mix2(
+; CHECK-NEXT:    ret <2 x i8> zeroinitializer
+;
+  %x = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 undef, i8 10>, <2 x i8> <i8 20, i8 undef>)
+  ret <2 x i8> %x
 }
diff --git a/test/Analysis/CostModel/X86/arith-fp.ll b/test/Analysis/CostModel/X86/arith-fp.ll
index a6b5cf6..bce7ca8 100644
--- a/test/Analysis/CostModel/X86/arith-fp.ll
+++ b/test/Analysis/CostModel/X86/arith-fp.ll
@@ -1,4 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt < %s -enable-no-nans-fp-math  -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=-sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE1
 ; RUN: opt < %s -enable-no-nans-fp-math  -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
 ; RUN: opt < %s -enable-no-nans-fp-math  -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE42
 ; RUN: opt < %s -enable-no-nans-fp-math  -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
@@ -14,6 +15,17 @@
 target triple = "x86_64-apple-macosx10.8.0"
 
 define i32 @fadd(i32 %arg) {
+; SSE1-LABEL: 'fadd'
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %F32 = fadd float undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fadd <4 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = fadd <8 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = fadd <16 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F64 = fadd double undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2F64 = fadd <2 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4F64 = fadd <4 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8F64 = fadd <8 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
 ; SSE2-LABEL: 'fadd'
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = fadd float undef, undef
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fadd <4 x float> undef, undef
@@ -116,6 +128,17 @@
 }
 
 define i32 @fsub(i32 %arg) {
+; SSE1-LABEL: 'fsub'
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %F32 = fsub float undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fsub <4 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = fsub <8 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = fsub <16 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F64 = fsub double undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2F64 = fsub <2 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4F64 = fsub <4 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8F64 = fsub <8 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
 ; SSE2-LABEL: 'fsub'
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = fsub float undef, undef
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fsub <4 x float> undef, undef
@@ -218,6 +241,17 @@
 }
 
 define i32 @fmul(i32 %arg) {
+; SSE1-LABEL: 'fmul'
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = fmul float undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fmul <4 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = fmul <8 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = fmul <16 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F64 = fmul double undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2F64 = fmul <2 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4F64 = fmul <4 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8F64 = fmul <8 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
 ; SSE2-LABEL: 'fmul'
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = fmul float undef, undef
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fmul <4 x float> undef, undef
@@ -320,6 +354,17 @@
 }
 
 define i32 @fdiv(i32 %arg) {
+; SSE1-LABEL: 'fdiv'
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 17 for instruction: %F32 = fdiv float undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 34 for instruction: %V4F32 = fdiv <4 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 68 for instruction: %V8F32 = fdiv <8 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 136 for instruction: %V16F32 = fdiv <16 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F64 = fdiv double undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2F64 = fdiv <2 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4F64 = fdiv <4 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8F64 = fdiv <8 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
 ; SSE2-LABEL: 'fdiv'
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 23 for instruction: %F32 = fdiv float undef, undef
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 39 for instruction: %V4F32 = fdiv <4 x float> undef, undef
@@ -422,16 +467,38 @@
 }
 
 define i32 @frem(i32 %arg) {
-; SSE-LABEL: 'frem'
-; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = frem float undef, undef
-; SSE-NEXT:  Cost Model: Found an estimated cost of 14 for instruction: %V4F32 = frem <4 x float> undef, undef
-; SSE-NEXT:  Cost Model: Found an estimated cost of 28 for instruction: %V8F32 = frem <8 x float> undef, undef
-; SSE-NEXT:  Cost Model: Found an estimated cost of 56 for instruction: %V16F32 = frem <16 x float> undef, undef
-; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F64 = frem double undef, undef
-; SSE-NEXT:  Cost Model: Found an estimated cost of 6 for instruction: %V2F64 = frem <2 x double> undef, undef
-; SSE-NEXT:  Cost Model: Found an estimated cost of 12 for instruction: %V4F64 = frem <4 x double> undef, undef
-; SSE-NEXT:  Cost Model: Found an estimated cost of 24 for instruction: %V8F64 = frem <8 x double> undef, undef
-; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+; SSE1-LABEL: 'frem'
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = frem float undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 14 for instruction: %V4F32 = frem <4 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 28 for instruction: %V8F32 = frem <8 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 56 for instruction: %V16F32 = frem <16 x float> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F64 = frem double undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2F64 = frem <2 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4F64 = frem <4 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8F64 = frem <8 x double> undef, undef
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; SSE2-LABEL: 'frem'
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = frem float undef, undef
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 14 for instruction: %V4F32 = frem <4 x float> undef, undef
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 28 for instruction: %V8F32 = frem <8 x float> undef, undef
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 56 for instruction: %V16F32 = frem <16 x float> undef, undef
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F64 = frem double undef, undef
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 6 for instruction: %V2F64 = frem <2 x double> undef, undef
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 12 for instruction: %V4F64 = frem <4 x double> undef, undef
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 24 for instruction: %V8F64 = frem <8 x double> undef, undef
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; SSE42-LABEL: 'frem'
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = frem float undef, undef
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 14 for instruction: %V4F32 = frem <4 x float> undef, undef
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 28 for instruction: %V8F32 = frem <8 x float> undef, undef
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 56 for instruction: %V16F32 = frem <16 x float> undef, undef
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F64 = frem double undef, undef
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 6 for instruction: %V2F64 = frem <2 x double> undef, undef
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 12 for instruction: %V4F64 = frem <4 x double> undef, undef
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 24 for instruction: %V8F64 = frem <8 x double> undef, undef
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; AVX-LABEL: 'frem'
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = frem float undef, undef
@@ -502,6 +569,17 @@
 }
 
 define i32 @fsqrt(i32 %arg) {
+; SSE1-LABEL: 'fsqrt'
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 28 for instruction: %F32 = call float @llvm.sqrt.f32(float undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 56 for instruction: %V4F32 = call <4 x float> @llvm.sqrt.v4f32(<4 x float> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 112 for instruction: %V8F32 = call <8 x float> @llvm.sqrt.v8f32(<8 x float> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 224 for instruction: %V16F32 = call <16 x float> @llvm.sqrt.v16f32(<16 x float> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %F64 = call double @llvm.sqrt.f64(double undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2F64 = call <2 x double> @llvm.sqrt.v2f64(<2 x double> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4F64 = call <4 x double> @llvm.sqrt.v4f64(<4 x double> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8F64 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
 ; SSE2-LABEL: 'fsqrt'
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 28 for instruction: %F32 = call float @llvm.sqrt.f32(float undef)
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 56 for instruction: %V4F32 = call <4 x float> @llvm.sqrt.v4f32(<4 x float> undef)
@@ -604,16 +682,38 @@
 }
 
 define i32 @fabs(i32 %arg) {
-; SSE-LABEL: 'fabs'
-; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = call float @llvm.fabs.f32(float undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = call <4 x float> @llvm.fabs.v4f32(<4 x float> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = call <8 x float> @llvm.fabs.v8f32(<8 x float> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = call <16 x float> @llvm.fabs.v16f32(<16 x float> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F64 = call double @llvm.fabs.f64(double undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = call <2 x double> @llvm.fabs.v2f64(<2 x double> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = call <4 x double> @llvm.fabs.v4f64(<4 x double> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = call <8 x double> @llvm.fabs.v8f64(<8 x double> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+; SSE1-LABEL: 'fabs'
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = call float @llvm.fabs.f32(float undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = call <4 x float> @llvm.fabs.v4f32(<4 x float> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = call <8 x float> @llvm.fabs.v8f32(<8 x float> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = call <16 x float> @llvm.fabs.v16f32(<16 x float> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %F64 = call double @llvm.fabs.f64(double undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2F64 = call <2 x double> @llvm.fabs.v2f64(<2 x double> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4F64 = call <4 x double> @llvm.fabs.v4f64(<4 x double> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8F64 = call <8 x double> @llvm.fabs.v8f64(<8 x double> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; SSE2-LABEL: 'fabs'
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = call float @llvm.fabs.f32(float undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = call <4 x float> @llvm.fabs.v4f32(<4 x float> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = call <8 x float> @llvm.fabs.v8f32(<8 x float> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = call <16 x float> @llvm.fabs.v16f32(<16 x float> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F64 = call double @llvm.fabs.f64(double undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = call <2 x double> @llvm.fabs.v2f64(<2 x double> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = call <4 x double> @llvm.fabs.v4f64(<4 x double> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = call <8 x double> @llvm.fabs.v8f64(<8 x double> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; SSE42-LABEL: 'fabs'
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = call float @llvm.fabs.f32(float undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = call <4 x float> @llvm.fabs.v4f32(<4 x float> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = call <8 x float> @llvm.fabs.v8f32(<8 x float> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = call <16 x float> @llvm.fabs.v16f32(<16 x float> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F64 = call double @llvm.fabs.f64(double undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = call <2 x double> @llvm.fabs.v2f64(<2 x double> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = call <4 x double> @llvm.fabs.v4f64(<4 x double> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = call <8 x double> @llvm.fabs.v8f64(<8 x double> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; AVX-LABEL: 'fabs'
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = call float @llvm.fabs.f32(float undef)
@@ -684,16 +784,38 @@
 }
 
 define i32 @fcopysign(i32 %arg) {
-; SSE-LABEL: 'fcopysign'
-; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = call float @llvm.copysign.f32(float undef, float undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = call <4 x float> @llvm.copysign.v4f32(<4 x float> undef, <4 x float> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = call <8 x float> @llvm.copysign.v8f32(<8 x float> undef, <8 x float> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = call <16 x float> @llvm.copysign.v16f32(<16 x float> undef, <16 x float> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F64 = call double @llvm.copysign.f64(double undef, double undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = call <2 x double> @llvm.copysign.v2f64(<2 x double> undef, <2 x double> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = call <4 x double> @llvm.copysign.v4f64(<4 x double> undef, <4 x double> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = call <8 x double> @llvm.copysign.v8f64(<8 x double> undef, <8 x double> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+; SSE1-LABEL: 'fcopysign'
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = call float @llvm.copysign.f32(float undef, float undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = call <4 x float> @llvm.copysign.v4f32(<4 x float> undef, <4 x float> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = call <8 x float> @llvm.copysign.v8f32(<8 x float> undef, <8 x float> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = call <16 x float> @llvm.copysign.v16f32(<16 x float> undef, <16 x float> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %F64 = call double @llvm.copysign.f64(double undef, double undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 20 for instruction: %V2F64 = call <2 x double> @llvm.copysign.v2f64(<2 x double> undef, <2 x double> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 40 for instruction: %V4F64 = call <4 x double> @llvm.copysign.v4f64(<4 x double> undef, <4 x double> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 80 for instruction: %V8F64 = call <8 x double> @llvm.copysign.v8f64(<8 x double> undef, <8 x double> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; SSE2-LABEL: 'fcopysign'
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = call float @llvm.copysign.f32(float undef, float undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = call <4 x float> @llvm.copysign.v4f32(<4 x float> undef, <4 x float> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = call <8 x float> @llvm.copysign.v8f32(<8 x float> undef, <8 x float> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = call <16 x float> @llvm.copysign.v16f32(<16 x float> undef, <16 x float> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F64 = call double @llvm.copysign.f64(double undef, double undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = call <2 x double> @llvm.copysign.v2f64(<2 x double> undef, <2 x double> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = call <4 x double> @llvm.copysign.v4f64(<4 x double> undef, <4 x double> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = call <8 x double> @llvm.copysign.v8f64(<8 x double> undef, <8 x double> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; SSE42-LABEL: 'fcopysign'
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = call float @llvm.copysign.f32(float undef, float undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = call <4 x float> @llvm.copysign.v4f32(<4 x float> undef, <4 x float> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = call <8 x float> @llvm.copysign.v8f32(<8 x float> undef, <8 x float> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = call <16 x float> @llvm.copysign.v16f32(<16 x float> undef, <16 x float> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F64 = call double @llvm.copysign.f64(double undef, double undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = call <2 x double> @llvm.copysign.v2f64(<2 x double> undef, <2 x double> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = call <4 x double> @llvm.copysign.v4f64(<4 x double> undef, <4 x double> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = call <8 x double> @llvm.copysign.v8f64(<8 x double> undef, <8 x double> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; AVX-LABEL: 'fcopysign'
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %F32 = call float @llvm.copysign.f32(float undef, float undef)
@@ -764,16 +886,38 @@
 }
 
 define i32 @fma(i32 %arg) {
-; SSE-LABEL: 'fma'
-; SSE-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %F32 = call float @llvm.fma.f32(float undef, float undef, float undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 43 for instruction: %V4F32 = call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 86 for instruction: %V8F32 = call <8 x float> @llvm.fma.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 172 for instruction: %V16F32 = call <16 x float> @llvm.fma.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %F64 = call double @llvm.fma.f64(double undef, double undef, double undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 21 for instruction: %V2F64 = call <2 x double> @llvm.fma.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 42 for instruction: %V4F64 = call <4 x double> @llvm.fma.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 84 for instruction: %V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef)
-; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+; SSE1-LABEL: 'fma'
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %F32 = call float @llvm.fma.f32(float undef, float undef, float undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 43 for instruction: %V4F32 = call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 86 for instruction: %V8F32 = call <8 x float> @llvm.fma.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 172 for instruction: %V16F32 = call <16 x float> @llvm.fma.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %F64 = call double @llvm.fma.f64(double undef, double undef, double undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 20 for instruction: %V2F64 = call <2 x double> @llvm.fma.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 40 for instruction: %V4F64 = call <4 x double> @llvm.fma.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 80 for instruction: %V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef)
+; SSE1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; SSE2-LABEL: 'fma'
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %F32 = call float @llvm.fma.f32(float undef, float undef, float undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 43 for instruction: %V4F32 = call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 86 for instruction: %V8F32 = call <8 x float> @llvm.fma.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 172 for instruction: %V16F32 = call <16 x float> @llvm.fma.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %F64 = call double @llvm.fma.f64(double undef, double undef, double undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 21 for instruction: %V2F64 = call <2 x double> @llvm.fma.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 42 for instruction: %V4F64 = call <4 x double> @llvm.fma.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 84 for instruction: %V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef)
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; SSE42-LABEL: 'fma'
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %F32 = call float @llvm.fma.f32(float undef, float undef, float undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 43 for instruction: %V4F32 = call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 86 for instruction: %V8F32 = call <8 x float> @llvm.fma.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 172 for instruction: %V16F32 = call <16 x float> @llvm.fma.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %F64 = call double @llvm.fma.f64(double undef, double undef, double undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 21 for instruction: %V2F64 = call <2 x double> @llvm.fma.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 42 for instruction: %V4F64 = call <4 x double> @llvm.fma.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 84 for instruction: %V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; AVX-LABEL: 'fma'
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %F32 = call float @llvm.fma.f32(float undef, float undef, float undef)
diff --git a/test/Analysis/CostModel/X86/arith-ssat.ll b/test/Analysis/CostModel/X86/arith-ssat.ll
new file mode 100644
index 0000000..457573b
--- /dev/null
+++ b/test/Analysis/CostModel/X86/arith-ssat.ll
@@ -0,0 +1,445 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+ssse3 | FileCheck %s --check-prefixes=CHECK,SSE,SSSE3
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE42
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512F
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512BW
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512DQ
+;
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=slm | FileCheck %s --check-prefixes=SLM
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=goldmont | FileCheck %s --check-prefixes=GLM
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=btver2 | FileCheck %s --check-prefixes=BTVER2
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+declare i64        @llvm.sadd.sat.i64(i64, i64)
+declare <2 x i64>  @llvm.sadd.sat.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64>  @llvm.sadd.sat.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64>  @llvm.sadd.sat.v8i64(<8 x i64>, <8 x i64>)
+
+declare i32        @llvm.sadd.sat.i32(i32, i32)
+declare <4 x i32>  @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32>  @llvm.sadd.sat.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32>, <16 x i32>)
+
+declare i16        @llvm.sadd.sat.i16(i16, i16)
+declare <8 x i16>  @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>)
+declare <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16>, <32 x i16>)
+
+declare i8         @llvm.sadd.sat.i8(i8,  i8)
+declare <16 x i8>  @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <32 x i8>  @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>)
+declare <64 x i8>  @llvm.sadd.sat.v64i8(<64 x i8>, <64 x i8>)
+
+define i32 @add(i32 %arg) {
+; SSE-LABEL: 'add'
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX1-LABEL: 'add'
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX2-LABEL: 'add'
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX512F-LABEL: 'add'
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX512BW-LABEL: 'add'
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX512DQ-LABEL: 'add'
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; SLM-LABEL: 'add'
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; GLM-LABEL: 'add'
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; BTVER2-LABEL: 'add'
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+  %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
+  %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+  %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+  %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+
+  %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
+  %V4I32  = call <4 x i32>  @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+  %V8I32  = call <8 x i32>  @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+  %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+
+  %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
+  %V8I16  = call <8 x i16>  @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+  %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+  %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+
+  %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
+  %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+  %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+  %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+
+  ret i32 undef
+}
+
+declare i64        @llvm.ssub.sat.i64(i64, i64)
+declare <2 x i64>  @llvm.ssub.sat.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64>  @llvm.ssub.sat.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64>  @llvm.ssub.sat.v8i64(<8 x i64>, <8 x i64>)
+
+declare i32        @llvm.ssub.sat.i32(i32, i32)
+declare <4 x i32>  @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32>  @llvm.ssub.sat.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32>, <16 x i32>)
+
+declare i16        @llvm.ssub.sat.i16(i16, i16)
+declare <8 x i16>  @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>)
+declare <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16>, <32 x i16>)
+
+declare i8         @llvm.ssub.sat.i8(i8,  i8)
+declare <16 x i8>  @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <32 x i8>  @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>)
+declare <64 x i8>  @llvm.ssub.sat.v64i8(<64 x i8>, <64 x i8>)
+
+define i32 @sub(i32 %arg) {
+; SSE-LABEL: 'sub'
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX1-LABEL: 'sub'
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX2-LABEL: 'sub'
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX512F-LABEL: 'sub'
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX512BW-LABEL: 'sub'
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX512DQ-LABEL: 'sub'
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; SLM-LABEL: 'sub'
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; GLM-LABEL: 'sub'
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; BTVER2-LABEL: 'sub'
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+  %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
+  %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+  %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+  %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+
+  %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
+  %V4I32  = call <4 x i32>  @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+  %V8I32  = call <8 x i32>  @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+  %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+
+  %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
+  %V8I16  = call <8 x i16>  @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+  %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+  %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+
+  %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
+  %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+  %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+  %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+
+  ret i32 undef
+}
diff --git a/test/Analysis/CostModel/X86/arith-usat.ll b/test/Analysis/CostModel/X86/arith-usat.ll
new file mode 100644
index 0000000..4d0df9d
--- /dev/null
+++ b/test/Analysis/CostModel/X86/arith-usat.ll
@@ -0,0 +1,464 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+ssse3 | FileCheck %s --check-prefixes=CHECK,SSE,SSSE3
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE42
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512F
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512BW
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512DQ
+;
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=slm | FileCheck %s --check-prefixes=SLM
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=goldmont | FileCheck %s --check-prefixes=GLM
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=btver2 | FileCheck %s --check-prefixes=BTVER2
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+declare i64        @llvm.uadd.sat.i64(i64, i64)
+declare <2 x i64>  @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64>  @llvm.uadd.sat.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64>  @llvm.uadd.sat.v8i64(<8 x i64>, <8 x i64>)
+
+declare i32        @llvm.uadd.sat.i32(i32, i32)
+declare <4 x i32>  @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32>  @llvm.uadd.sat.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32>, <16 x i32>)
+
+declare i16        @llvm.uadd.sat.i16(i16, i16)
+declare <8 x i16>  @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>)
+declare <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16>, <32 x i16>)
+
+declare i8         @llvm.uadd.sat.i8(i8,  i8)
+declare <16 x i8>  @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <32 x i8>  @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>)
+declare <64 x i8>  @llvm.uadd.sat.v64i8(<64 x i8>, <64 x i8>)
+
+define i32 @add(i32 %arg) {
+; SSE-LABEL: 'add'
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX1-LABEL: 'add'
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX2-LABEL: 'add'
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX512F-LABEL: 'add'
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX512BW-LABEL: 'add'
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX512DQ-LABEL: 'add'
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; SLM-LABEL: 'add'
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; GLM-LABEL: 'add'
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; BTVER2-LABEL: 'add'
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+  %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
+  %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+  %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+  %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+
+  %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
+  %V4I32  = call <4 x i32>  @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+  %V8I32  = call <8 x i32>  @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+  %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+
+  %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
+  %V8I16  = call <8 x i16>  @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+  %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+  %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+
+  %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
+  %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+  %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+  %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+
+  ret i32 undef
+}
+
+declare i64        @llvm.usub.sat.i64(i64, i64)
+declare <2 x i64>  @llvm.usub.sat.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64>  @llvm.usub.sat.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64>  @llvm.usub.sat.v8i64(<8 x i64>, <8 x i64>)
+
+declare i32        @llvm.usub.sat.i32(i32, i32)
+declare <4 x i32>  @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32>  @llvm.usub.sat.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.usub.sat.v16i32(<16 x i32>, <16 x i32>)
+
+declare i16        @llvm.usub.sat.i16(i16, i16)
+declare <8 x i16>  @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>)
+declare <32 x i16> @llvm.usub.sat.v32i16(<32 x i16>, <32 x i16>)
+
+declare i8         @llvm.usub.sat.i8(i8,  i8)
+declare <16 x i8>  @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <32 x i8>  @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>)
+declare <64 x i8>  @llvm.usub.sat.v64i8(<64 x i8>, <64 x i8>)
+
+define i32 @sub(i32 %arg) {
+; SSSE3-LABEL: 'sub'
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; SSE42-LABEL: 'sub'
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX1-LABEL: 'sub'
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 6 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 12 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX2-LABEL: 'sub'
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX512F-LABEL: 'sub'
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX512BW-LABEL: 'sub'
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX512DQ-LABEL: 'sub'
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; AVX512DQ-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; SLM-LABEL: 'sub'
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; SLM-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; GLM-LABEL: 'sub'
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; GLM-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; BTVER2-LABEL: 'sub'
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 6 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 12 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+  %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
+  %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+  %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+  %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+
+  %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
+  %V4I32  = call <4 x i32>  @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+  %V8I32  = call <8 x i32>  @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+  %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+
+  %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
+  %V8I16  = call <8 x i16>  @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+  %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+  %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+
+  %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+  %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+  %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+  %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+
+  ret i32 undef
+}
diff --git a/test/Analysis/CostModel/X86/trunc.ll b/test/Analysis/CostModel/X86/trunc.ll
index e2b2cfa..9668da5 100644
--- a/test/Analysis/CostModel/X86/trunc.ll
+++ b/test/Analysis/CostModel/X86/trunc.ll
@@ -16,35 +16,41 @@
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i32>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i32>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i32>
+; SSE-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i32>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; AVX1-LABEL: 'trunc_vXi32'
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i32>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i32>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i32>
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 19 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i32>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; AVX2-LABEL: 'trunc_vXi32'
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i32>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i32>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i32>
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i32>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; AVX512-LABEL: 'trunc_vXi32'
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i32>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i32>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i32>
+; AVX512-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i32>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; BTVER2-LABEL: 'trunc_vXi32'
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i32>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i32>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i32>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 19 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i32>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
   %V2i64 = trunc <2 x i64> undef to <2 x i32>
   %V4i64 = trunc <4 x i64> undef to <4 x i32>
   %V8i64 = trunc <8 x i64> undef to <8 x i32>
+  %V16i64 = trunc <16 x i64> undef to <16 x i32>
   ret i32 undef
 }
 
@@ -53,79 +59,117 @@
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i16>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i16>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i16>
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i16>
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i16>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i16>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i16>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 5 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i16>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i16>
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 21 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i16>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; SSSE3-LABEL: 'trunc_vXi16'
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i16>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i16>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i16>
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i16>
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i16>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i16>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i16>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 5 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i16>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i16>
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 21 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i16>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; SSE42-LABEL: 'trunc_vXi16'
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i16>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i16>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i16>
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i16>
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i16>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i16>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i16>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i16>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 6 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i16>
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 13 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i16>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; AVX1-LABEL: 'trunc_vXi16'
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i16>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i16>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i16>
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i16>
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i16>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i16>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i16>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 5 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i16>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 6 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i16>
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 13 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i16>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; AVX2-LABEL: 'trunc_vXi16'
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i16>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i16>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i16>
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i16>
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i16>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i16>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i16>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i16>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 6 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i16>
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 13 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i16>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
-; AVX512-LABEL: 'trunc_vXi16'
-; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i16>
-; AVX512-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i16>
-; AVX512-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i16>
-; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i16>
-; AVX512-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i16>
-; AVX512-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i16>
-; AVX512-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i16>
-; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+; AVX512F-LABEL: 'trunc_vXi16'
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i16>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i16>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i16>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i16>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i16>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i16>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i16>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i16>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i16>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i16>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; AVX512BW-LABEL: 'trunc_vXi16'
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i16>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i16>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i16>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i16>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i16>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i16>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i16>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i16>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i16>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i16>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; BTVER2-LABEL: 'trunc_vXi16'
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i16>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i16>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i16>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i16>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i16>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i16>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i16>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 5 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i16>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 6 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i16>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 13 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i16>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
   %V2i64 = trunc <2 x i64> undef to <2 x i16>
   %V4i64 = trunc <4 x i64> undef to <4 x i16>
   %V8i64 = trunc <8 x i64> undef to <8 x i16>
+  %V16i64 = trunc <16 x i64> undef to <16 x i16>
+  %V32i64 = trunc <32 x i64> undef to <32 x i16>
+
   %V2i32 = trunc <2 x i32> undef to <2 x i16>
   %V4i32 = trunc <4 x i32> undef to <4 x i16>
   %V8i32 = trunc <8 x i32> undef to <8 x i16>
   %V16i32 = trunc <16 x i32> undef to <16 x i16>
+  %V32i32 = trunc <32 x i32> undef to <32 x i16>
   ret i32 undef
 }
 
@@ -134,136 +178,190 @@
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i8>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i8>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i8>
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i8>
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i8>
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 31 for instruction: %V64i64 = trunc <64 x i64> undef to <64 x i8>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i8>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i8>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i8>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i8>
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i8>
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 31 for instruction: %V64i32 = trunc <64 x i32> undef to <64 x i8>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i16 = trunc <2 x i16> undef to <2 x i8>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V4i16 = trunc <4 x i16> undef to <4 x i8>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8i16 = trunc <8 x i16> undef to <8 x i8>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V16i16 = trunc <16 x i16> undef to <16 x i8>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V32i16 = trunc <32 x i16> undef to <32 x i8>
+; SSE2-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V64i16 = trunc <64 x i16> undef to <64 x i8>
 ; SSE2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; SSSE3-LABEL: 'trunc_vXi8'
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i8>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i8>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i8>
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i8>
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i8>
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 31 for instruction: %V64i64 = trunc <64 x i64> undef to <64 x i8>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i8>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i8>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i8>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i8>
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i8>
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 31 for instruction: %V64i32 = trunc <64 x i32> undef to <64 x i8>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i16 = trunc <2 x i16> undef to <2 x i8>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V4i16 = trunc <4 x i16> undef to <4 x i8>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8i16 = trunc <8 x i16> undef to <8 x i8>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V16i16 = trunc <16 x i16> undef to <16 x i8>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V32i16 = trunc <32 x i16> undef to <32 x i8>
+; SSSE3-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V64i16 = trunc <64 x i16> undef to <64 x i8>
 ; SSSE3-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; SSE42-LABEL: 'trunc_vXi8'
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i8>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i8>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i8>
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i8>
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i8>
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 31 for instruction: %V64i64 = trunc <64 x i64> undef to <64 x i8>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i8>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i8>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i8>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i8>
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i8>
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 31 for instruction: %V64i32 = trunc <64 x i32> undef to <64 x i8>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i16 = trunc <2 x i16> undef to <2 x i8>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i16 = trunc <4 x i16> undef to <4 x i8>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8i16 = trunc <8 x i16> undef to <8 x i8>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V16i16 = trunc <16 x i16> undef to <16 x i8>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V32i16 = trunc <32 x i16> undef to <32 x i8>
+; SSE42-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V64i16 = trunc <64 x i16> undef to <64 x i8>
 ; SSE42-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; AVX1-LABEL: 'trunc_vXi8'
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i8>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i8>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i8>
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i8>
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i8>
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V64i64 = trunc <64 x i64> undef to <64 x i8>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i8>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i8>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i8>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i8>
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i8>
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 31 for instruction: %V64i32 = trunc <64 x i32> undef to <64 x i8>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i16 = trunc <2 x i16> undef to <2 x i8>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i16 = trunc <4 x i16> undef to <4 x i8>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8i16 = trunc <8 x i16> undef to <8 x i8>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V16i16 = trunc <16 x i16> undef to <16 x i8>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %V32i16 = trunc <32 x i16> undef to <32 x i8>
+; AVX1-NEXT:  Cost Model: Found an estimated cost of 19 for instruction: %V64i16 = trunc <64 x i16> undef to <64 x i8>
 ; AVX1-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; AVX2-LABEL: 'trunc_vXi8'
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i8>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i8>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i8>
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i8>
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i8>
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V64i64 = trunc <64 x i64> undef to <64 x i8>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i8>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i8>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i8>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i8>
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i8>
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 31 for instruction: %V64i32 = trunc <64 x i32> undef to <64 x i8>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i16 = trunc <2 x i16> undef to <2 x i8>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i16 = trunc <4 x i16> undef to <4 x i8>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8i16 = trunc <8 x i16> undef to <8 x i8>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V16i16 = trunc <16 x i16> undef to <16 x i8>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %V32i16 = trunc <32 x i16> undef to <32 x i8>
+; AVX2-NEXT:  Cost Model: Found an estimated cost of 19 for instruction: %V64i16 = trunc <64 x i16> undef to <64 x i8>
 ; AVX2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; AVX512F-LABEL: 'trunc_vXi8'
 ; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i8>
 ; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i8>
 ; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i8>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i8>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i8>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V64i64 = trunc <64 x i64> undef to <64 x i8>
 ; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i8>
 ; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i8>
 ; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i8>
 ; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i8>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i8>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V64i32 = trunc <64 x i32> undef to <64 x i8>
 ; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i16 = trunc <2 x i16> undef to <2 x i8>
 ; AVX512F-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i16 = trunc <4 x i16> undef to <4 x i8>
 ; AVX512F-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8i16 = trunc <8 x i16> undef to <8 x i8>
 ; AVX512F-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V16i16 = trunc <16 x i16> undef to <16 x i8>
 ; AVX512F-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %V32i16 = trunc <32 x i16> undef to <32 x i8>
+; AVX512F-NEXT:  Cost Model: Found an estimated cost of 19 for instruction: %V64i16 = trunc <64 x i16> undef to <64 x i8>
 ; AVX512F-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; AVX512BW-LABEL: 'trunc_vXi8'
 ; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i8>
 ; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i8>
 ; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i8>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i8>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i8>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V64i64 = trunc <64 x i64> undef to <64 x i8>
 ; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i8>
 ; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i8>
 ; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i8>
 ; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i8>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i8>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V64i32 = trunc <64 x i32> undef to <64 x i8>
 ; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i16 = trunc <2 x i16> undef to <2 x i8>
 ; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i16 = trunc <4 x i16> undef to <4 x i8>
 ; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8i16 = trunc <8 x i16> undef to <8 x i8>
 ; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V16i16 = trunc <16 x i16> undef to <16 x i8>
 ; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V32i16 = trunc <32 x i16> undef to <32 x i8>
+; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V64i16 = trunc <64 x i16> undef to <64 x i8>
 ; AVX512BW-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; BTVER2-LABEL: 'trunc_vXi8'
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i8>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i8>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i8>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i8>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i8>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V64i64 = trunc <64 x i64> undef to <64 x i8>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i8>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i8>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i8>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i8>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i8>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 31 for instruction: %V64i32 = trunc <64 x i32> undef to <64 x i8>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i16 = trunc <2 x i16> undef to <2 x i8>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4i16 = trunc <4 x i16> undef to <4 x i8>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8i16 = trunc <8 x i16> undef to <8 x i8>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %V16i16 = trunc <16 x i16> undef to <16 x i8>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %V32i16 = trunc <32 x i16> undef to <32 x i8>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 19 for instruction: %V64i16 = trunc <64 x i16> undef to <64 x i8>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
   %V2i64 = trunc <2 x i64> undef to <2 x i8>
   %V4i64 = trunc <4 x i64> undef to <4 x i8>
   %V8i64 = trunc <8 x i64> undef to <8 x i8>
+  %V16i64 = trunc <16 x i64> undef to <16 x i8>
+  %V32i64 = trunc <32 x i64> undef to <32 x i8>
+  %V64i64 = trunc <64 x i64> undef to <64 x i8>
 
   %V2i32 = trunc <2 x i32> undef to <2 x i8>
   %V4i32 = trunc <4 x i32> undef to <4 x i8>
   %V8i32 = trunc <8 x i32> undef to <8 x i8>
   %V16i32 = trunc <16 x i32> undef to <16 x i8>
+  %V32i32 = trunc <32 x i32> undef to <32 x i8>
+  %V64i32 = trunc <64 x i32> undef to <64 x i8>
 
   %V2i16 = trunc <2 x i16> undef to <2 x i8>
   %V4i16 = trunc <4 x i16> undef to <4 x i8>
   %V8i16 = trunc <8 x i16> undef to <8 x i8>
   %V16i16 = trunc <16 x i16> undef to <16 x i8>
   %V32i16 = trunc <32 x i16> undef to <32 x i8>
+  %V64i16 = trunc <64 x i16> undef to <64 x i8>
 
   ret i32 undef
 }
@@ -273,15 +371,21 @@
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i1>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i1>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i1>
+; SSE-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i1>
+; SSE-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i1>
+; SSE-NEXT:  Cost Model: Found an estimated cost of 31 for instruction: %V64i64 = trunc <64 x i64> undef to <64 x i1>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i1>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i1>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i1>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i1>
+; SSE-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i1>
+; SSE-NEXT:  Cost Model: Found an estimated cost of 15 for instruction: %V64i32 = trunc <64 x i32> undef to <64 x i1>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i16 = trunc <2 x i16> undef to <2 x i1>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V4i16 = trunc <4 x i16> undef to <4 x i1>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i16 = trunc <8 x i16> undef to <8 x i1>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V16i16 = trunc <16 x i16> undef to <16 x i1>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V32i16 = trunc <32 x i16> undef to <32 x i1>
+; SSE-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %V64i16 = trunc <64 x i16> undef to <64 x i1>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V32i8 = trunc <32 x i8> undef to <32 x i1>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V64i8 = trunc <64 x i8> undef to <64 x i1>
 ; SSE-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
@@ -290,15 +394,21 @@
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i1>
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i1>
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i1>
+; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i1>
+; AVX-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i1>
+; AVX-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V64i64 = trunc <64 x i64> undef to <64 x i1>
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i1>
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i1>
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i1>
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i1>
+; AVX-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i1>
+; AVX-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V64i32 = trunc <64 x i32> undef to <64 x i1>
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i16 = trunc <2 x i16> undef to <2 x i1>
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V4i16 = trunc <4 x i16> undef to <4 x i1>
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i16 = trunc <8 x i16> undef to <8 x i1>
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i16 = trunc <16 x i16> undef to <16 x i1>
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32i16 = trunc <32 x i16> undef to <32 x i1>
+; AVX-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V64i16 = trunc <64 x i16> undef to <64 x i1>
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V32i8 = trunc <32 x i8> undef to <32 x i1>
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V64i8 = trunc <64 x i8> undef to <64 x i1>
 ; AVX-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
@@ -307,15 +417,21 @@
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i1>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i1>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i1>
+; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i1>
+; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i1>
+; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V64i64 = trunc <64 x i64> undef to <64 x i1>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i1>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i1>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i1>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i1>
+; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i1>
+; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V64i32 = trunc <64 x i32> undef to <64 x i1>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i16 = trunc <2 x i16> undef to <2 x i1>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V4i16 = trunc <4 x i16> undef to <4 x i1>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i16 = trunc <8 x i16> undef to <8 x i1>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i16 = trunc <16 x i16> undef to <16 x i1>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V32i16 = trunc <32 x i16> undef to <32 x i1>
+; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V64i16 = trunc <64 x i16> undef to <64 x i1>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V32i8 = trunc <32 x i8> undef to <32 x i1>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V64i8 = trunc <64 x i8> undef to <64 x i1>
 ; AVX512-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
@@ -324,15 +440,21 @@
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i64 = trunc <2 x i64> undef to <2 x i1>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V4i64 = trunc <4 x i64> undef to <4 x i1>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i64 = trunc <8 x i64> undef to <8 x i1>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i64 = trunc <16 x i64> undef to <16 x i1>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32i64 = trunc <32 x i64> undef to <32 x i1>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V64i64 = trunc <64 x i64> undef to <64 x i1>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i32 = trunc <2 x i32> undef to <2 x i1>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V4i32 = trunc <4 x i32> undef to <4 x i1>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i32 = trunc <8 x i32> undef to <8 x i1>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i32 = trunc <16 x i32> undef to <16 x i1>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32i32 = trunc <32 x i32> undef to <32 x i1>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V64i32 = trunc <64 x i32> undef to <64 x i1>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V2i16 = trunc <2 x i16> undef to <2 x i1>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V4i16 = trunc <4 x i16> undef to <4 x i1>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V8i16 = trunc <8 x i16> undef to <8 x i1>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V16i16 = trunc <16 x i16> undef to <16 x i1>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %V32i16 = trunc <32 x i16> undef to <32 x i1>
+; BTVER2-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %V64i16 = trunc <64 x i16> undef to <64 x i1>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V32i8 = trunc <32 x i8> undef to <32 x i1>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: %V64i8 = trunc <64 x i8> undef to <64 x i1>
 ; BTVER2-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
@@ -340,17 +462,23 @@
   %V2i64 = trunc <2 x i64> undef to <2 x i1>
   %V4i64 = trunc <4 x i64> undef to <4 x i1>
   %V8i64 = trunc <8 x i64> undef to <8 x i1>
+  %V16i64 = trunc <16 x i64> undef to <16 x i1>
+  %V32i64 = trunc <32 x i64> undef to <32 x i1>
+  %V64i64 = trunc <64 x i64> undef to <64 x i1>
 
   %V2i32 = trunc <2 x i32> undef to <2 x i1>
   %V4i32 = trunc <4 x i32> undef to <4 x i1>
   %V8i32 = trunc <8 x i32> undef to <8 x i1>
   %V16i32 = trunc <16 x i32> undef to <16 x i1>
+  %V32i32 = trunc <32 x i32> undef to <32 x i1>
+  %V64i32 = trunc <64 x i32> undef to <64 x i1>
 
   %V2i16 = trunc <2 x i16> undef to <2 x i1>
   %V4i16 = trunc <4 x i16> undef to <4 x i1>
   %V8i16 = trunc <8 x i16> undef to <8 x i1>
   %V16i16 = trunc <16 x i16> undef to <16 x i1>
   %V32i16 = trunc <32 x i16> undef to <32 x i1>
+  %V64i16 = trunc <64 x i16> undef to <64 x i1>
 
   %V32i8 = trunc <32 x i8> undef to <32 x i1>
   %V64i8 = trunc <64 x i8> undef to <64 x i1>
diff --git a/test/Analysis/DependenceAnalysis/AA.ll b/test/Analysis/DependenceAnalysis/AA.ll
index 0d213e2..6f9c828 100644
--- a/test/Analysis/DependenceAnalysis/AA.ll
+++ b/test/Analysis/DependenceAnalysis/AA.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>"                            \
+; RUN: "-aa-pipeline=basic-aa,type-based-aa" 2>&1 | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -tbaa -da | FileCheck %s
 
 ; CHECK-LABEL: 'Dependence Analysis' for function 'test_no_noalias'
diff --git a/test/Analysis/DependenceAnalysis/Banerjee.ll b/test/Analysis/DependenceAnalysis/Banerjee.ll
index 8c28231..5dda3a9 100644
--- a/test/Analysis/DependenceAnalysis/Banerjee.ll
+++ b/test/Analysis/DependenceAnalysis/Banerjee.ll
@@ -1,4 +1,8 @@
+; RUN: opt < %s -disable-output -da-delinearize=false "-passes=print<da>"      \
+; RUN: -aa-pipeline=basic-aa 2>&1 | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da -da-delinearize=false | FileCheck %s
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s -check-prefix=DELIN
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s -check-prefix=DELIN
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Analysis/DependenceAnalysis/BasePtrBug.ll b/test/Analysis/DependenceAnalysis/BasePtrBug.ll
index 8de75df..694e26a 100644
--- a/test/Analysis/DependenceAnalysis/BasePtrBug.ll
+++ b/test/Analysis/DependenceAnalysis/BasePtrBug.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da  | FileCheck %s
 
 ; Test that the dependence analysis generates the correct results when using
diff --git a/test/Analysis/DependenceAnalysis/Constraints.ll b/test/Analysis/DependenceAnalysis/Constraints.ll
index 42dfac7..2fbee70 100644
--- a/test/Analysis/DependenceAnalysis/Constraints.ll
+++ b/test/Analysis/DependenceAnalysis/Constraints.ll
@@ -1,3 +1,4 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1
 ; RUN: opt < %s -analyze -basicaa -da
 ;; Check that this code doesn't abort. Test case is reduced version of lnt Polybench benchmark test case dynprog.
 
diff --git a/test/Analysis/DependenceAnalysis/Coupled.ll b/test/Analysis/DependenceAnalysis/Coupled.ll
index 7095671..0480087 100644
--- a/test/Analysis/DependenceAnalysis/Coupled.ll
+++ b/test/Analysis/DependenceAnalysis/Coupled.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Analysis/DependenceAnalysis/DADelin.ll b/test/Analysis/DependenceAnalysis/DADelin.ll
index 8438c2b..5c70624 100644
--- a/test/Analysis/DependenceAnalysis/DADelin.ll
+++ b/test/Analysis/DependenceAnalysis/DADelin.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
diff --git a/test/Analysis/DependenceAnalysis/ExactRDIV.ll b/test/Analysis/DependenceAnalysis/ExactRDIV.ll
index 5b2488c..44b399c 100644
--- a/test/Analysis/DependenceAnalysis/ExactRDIV.ll
+++ b/test/Analysis/DependenceAnalysis/ExactRDIV.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 ; ModuleID = 'ExactRDIV.bc'
diff --git a/test/Analysis/DependenceAnalysis/ExactSIV.ll b/test/Analysis/DependenceAnalysis/ExactSIV.ll
index e815799..f3fba82 100644
--- a/test/Analysis/DependenceAnalysis/ExactSIV.ll
+++ b/test/Analysis/DependenceAnalysis/ExactSIV.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Analysis/DependenceAnalysis/GCD.ll b/test/Analysis/DependenceAnalysis/GCD.ll
index b0fcda4..5f734b7 100644
--- a/test/Analysis/DependenceAnalysis/GCD.ll
+++ b/test/Analysis/DependenceAnalysis/GCD.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s -check-prefix=DELIN
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s -check-prefix=DELIN
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Analysis/DependenceAnalysis/Invariant.ll b/test/Analysis/DependenceAnalysis/Invariant.ll
index c964127..6835582 100644
--- a/test/Analysis/DependenceAnalysis/Invariant.ll
+++ b/test/Analysis/DependenceAnalysis/Invariant.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 ; Test for a bug, which caused an assert when an invalid
diff --git a/test/Analysis/DependenceAnalysis/MIVCheckConst.ll b/test/Analysis/DependenceAnalysis/MIVCheckConst.ll
index 64e72df..d5dd105 100644
--- a/test/Analysis/DependenceAnalysis/MIVCheckConst.ll
+++ b/test/Analysis/DependenceAnalysis/MIVCheckConst.ll
@@ -1,4 +1,5 @@
 ; RUN: opt < %s -analyze -basicaa -da
+; RUN: opt < %s -passes="print<da>"
 
 ; Test that the dependence analysis pass does seg-fault due to a null pointer
 ; dereference. The code in gcdMIVTest requires a null check for the result of
diff --git a/test/Analysis/DependenceAnalysis/NonAffineExpr.ll b/test/Analysis/DependenceAnalysis/NonAffineExpr.ll
index f12e20f..801b3f6 100644
--- a/test/Analysis/DependenceAnalysis/NonAffineExpr.ll
+++ b/test/Analysis/DependenceAnalysis/NonAffineExpr.ll
@@ -1,3 +1,4 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1
 ; RUN: opt < %s -analyze -basicaa -da
 ;
 ; CHECK: da analyze - consistent input [S S]!
diff --git a/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll b/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
index afeb061..2aba992 100644
--- a/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
+++ b/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s -check-prefix=DELIN
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s -check-prefix=DELIN
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -98,4 +100,4 @@
 
 !llvm.ident = !{!0}
 
-!0 = !{!"clang version 3.7.0 (https://vaivaswatha@bitbucket.org/compilertree/amd_clang.git 93a05fb75ee3411d24e8b2b184fc766a5318403e) (https://vaivaswatha@bitbucket.org/compilertree/amd_llvm.git 166d93d26efc912b517739f64d054a435e8e95cd)"}
+!0 = !{!"clang version 3.7.0"}
diff --git a/test/Analysis/DependenceAnalysis/PR21585.ll b/test/Analysis/DependenceAnalysis/PR21585.ll
index 23af449..ff42ba9 100644
--- a/test/Analysis/DependenceAnalysis/PR21585.ll
+++ b/test/Analysis/DependenceAnalysis/PR21585.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>"                            \
+; RUN: "-aa-pipeline=basic-aa,globals-aa" 2>&1 | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -globals-aa -da | FileCheck %s
 define void @i32_subscript(i32* %a) {
 entry:
diff --git a/test/Analysis/DependenceAnalysis/Preliminary.ll b/test/Analysis/DependenceAnalysis/Preliminary.ll
index 372d78a..2c7e5dd 100644
--- a/test/Analysis/DependenceAnalysis/Preliminary.ll
+++ b/test/Analysis/DependenceAnalysis/Preliminary.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Analysis/DependenceAnalysis/Propagating.ll b/test/Analysis/DependenceAnalysis/Propagating.ll
index 0aa2567..64d6a9c 100644
--- a/test/Analysis/DependenceAnalysis/Propagating.ll
+++ b/test/Analysis/DependenceAnalysis/Propagating.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Analysis/DependenceAnalysis/Separability.ll b/test/Analysis/DependenceAnalysis/Separability.ll
index 8e6c775..b6004b8 100644
--- a/test/Analysis/DependenceAnalysis/Separability.ll
+++ b/test/Analysis/DependenceAnalysis/Separability.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Analysis/DependenceAnalysis/StrongSIV.ll b/test/Analysis/DependenceAnalysis/StrongSIV.ll
index 4a6136e..f7528a5 100644
--- a/test/Analysis/DependenceAnalysis/StrongSIV.ll
+++ b/test/Analysis/DependenceAnalysis/StrongSIV.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll b/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll
index 6e8b98c..34c1415 100644
--- a/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll
+++ b/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 ; ModuleID = 'SymbolicRDIV.bc'
diff --git a/test/Analysis/DependenceAnalysis/SymbolicSIV.ll b/test/Analysis/DependenceAnalysis/SymbolicSIV.ll
index 26c4770..b24547f 100644
--- a/test/Analysis/DependenceAnalysis/SymbolicSIV.ll
+++ b/test/Analysis/DependenceAnalysis/SymbolicSIV.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Analysis/DependenceAnalysis/UsefulGEP.ll b/test/Analysis/DependenceAnalysis/UsefulGEP.ll
index cd46a27..283fd2c 100644
--- a/test/Analysis/DependenceAnalysis/UsefulGEP.ll
+++ b/test/Analysis/DependenceAnalysis/UsefulGEP.ll
@@ -1,3 +1,4 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1
 ; RUN: opt < %s -analyze -basicaa -da
 ;; Check this doesn't crash.
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll b/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll
index 5b81ec1..119ae98 100644
--- a/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll
+++ b/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 ; ModuleID = 'WeakCrossingSIV.bc'
diff --git a/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll b/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll
index 128eb47..37c912c 100644
--- a/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll
+++ b/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 ; ModuleID = 'WeakZeroDstSIV.bc'
diff --git a/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll b/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll
index 43c3de1..602a791 100644
--- a/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll
+++ b/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 ; ModuleID = 'WeakZeroSrcSIV.bc'
diff --git a/test/Analysis/DependenceAnalysis/ZIV.ll b/test/Analysis/DependenceAnalysis/ZIV.ll
index b321641..c2049a6 100644
--- a/test/Analysis/DependenceAnalysis/ZIV.ll
+++ b/test/Analysis/DependenceAnalysis/ZIV.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -disable-output "-passes=print<da>" -aa-pipeline=basic-aa 2>&1 \
+; RUN: | FileCheck %s
 ; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
 
 ; ModuleID = 'ZIV.bc'
diff --git a/test/Analysis/LoopInfo/annotated-parallel-complex.ll b/test/Analysis/LoopInfo/annotated-parallel-complex.ll
new file mode 100644
index 0000000..661e266
--- /dev/null
+++ b/test/Analysis/LoopInfo/annotated-parallel-complex.ll
@@ -0,0 +1,91 @@
+; RUN: opt -loops -analyze < %s | FileCheck %s
+;
+; void func(long n, double A[static const restrict 4*n], double B[static const restrict 4*n]) {
+;   for (long i = 0; i < n; i += 1)
+;     for (long j = 0; j < n; j += 1)
+;       for (long k = 0; k < n; k += 1)
+;         for (long l = 0; l < n; l += 1) {
+;           A[i + j + k + l] = 21;
+;           B[i + j + k + l] = 42;
+;         }
+; }
+;
+; Check that isAnnotatedParallel is working as expected.
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @func(i64 %n, double* noalias nonnull %A, double* noalias nonnull %B) {
+entry:
+  br label %for.cond
+
+for.cond:
+  %i.0 = phi i64 [ 0, %entry ], [ %add28, %for.inc27 ]
+  %cmp = icmp slt i64 %i.0, %n
+  br i1 %cmp, label %for.cond2, label %for.end29
+
+for.cond2:
+  %j.0 = phi i64 [ %add25, %for.inc24 ], [ 0, %for.cond ]
+  %cmp3 = icmp slt i64 %j.0, %n
+  br i1 %cmp3, label %for.cond6, label %for.inc27
+
+for.cond6:
+  %k.0 = phi i64 [ %add22, %for.inc21 ], [ 0, %for.cond2 ]
+  %cmp7 = icmp slt i64 %k.0, %n
+  br i1 %cmp7, label %for.cond10, label %for.inc24
+
+for.cond10:
+  %l.0 = phi i64 [ %add20, %for.body13 ], [ 0, %for.cond6 ]
+  %cmp11 = icmp slt i64 %l.0, %n
+  br i1 %cmp11, label %for.body13, label %for.inc21
+
+for.body13:
+  %add = add nuw nsw i64 %i.0, %j.0
+  %add14 = add nuw nsw i64 %add, %k.0
+  %add15 = add nuw nsw i64 %add14, %l.0
+  %arrayidx = getelementptr inbounds double, double* %A, i64 %add15
+  store double 2.100000e+01, double* %arrayidx, align 8, !llvm.access.group !5
+  %add16 = add nuw nsw i64 %i.0, %j.0
+  %add17 = add nuw nsw i64 %add16, %k.0
+  %add18 = add nuw nsw i64 %add17, %l.0
+  %arrayidx19 = getelementptr inbounds double, double* %B, i64 %add18
+  store double 4.200000e+01, double* %arrayidx19, align 8, !llvm.access.group !6
+  %add20 = add nuw nsw i64 %l.0, 1
+  br label %for.cond10, !llvm.loop !11
+
+for.inc21:
+  %add22 = add nuw nsw i64 %k.0, 1
+  br label %for.cond6, !llvm.loop !14
+
+for.inc24:
+  %add25 = add nuw nsw i64 %j.0, 1
+  br label %for.cond2, !llvm.loop !16
+
+for.inc27:
+  %add28 = add nuw nsw i64 %i.0, 1
+  br label %for.cond, !llvm.loop !18
+
+for.end29:
+  ret void
+}
+
+; access groups
+!7 = distinct !{}
+!8 = distinct !{}
+!10 = distinct !{}
+
+; access group lists
+!5 = !{!7, !10}
+!6 = !{!7, !8, !10}
+
+; LoopIDs
+!11 = distinct !{!11, !{!"llvm.loop.parallel_accesses", !10}}
+!14 = distinct !{!14, !{!"llvm.loop.parallel_accesses", !8, !10}}
+!16 = distinct !{!16, !{!"llvm.loop.parallel_accesses", !8}}
+!18 = distinct !{!18, !{!"llvm.loop.parallel_accesses", !7}}
+
+
+; CHECK: Parallel Loop at depth 1
+; CHECK-NOT: Parallel
+; CHECK:     Loop at depth 2
+; CHECK:         Parallel Loop
+; CHECK:             Parallel Loop
diff --git a/test/Analysis/LoopInfo/annotated-parallel-simple.ll b/test/Analysis/LoopInfo/annotated-parallel-simple.ll
new file mode 100644
index 0000000..4e25af8
--- /dev/null
+++ b/test/Analysis/LoopInfo/annotated-parallel-simple.ll
@@ -0,0 +1,37 @@
+; RUN: opt -loops -analyze < %s | FileCheck %s
+;
+; void func(long n, double A[static const restrict n]) {
+;   for (long i = 0; i < n; i += 1)
+;     A[i] = 21;
+; }
+;
+; Check that isAnnotatedParallel is working as expected.
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @func(i64 %n, double* noalias nonnull %A) {
+entry:
+  br label %for.cond
+
+for.cond:
+  %i.0 = phi i64 [ 0, %entry ], [ %add, %for.body ]
+  %cmp = icmp slt i64 %i.0, %n
+  br i1 %cmp, label %for.body, label %for.end
+
+for.body:
+  %arrayidx = getelementptr inbounds double, double* %A, i64 %i.0
+  store double 2.100000e+01, double* %arrayidx, align 8, !llvm.access.group !6
+  %add = add nuw nsw i64 %i.0, 1
+  br label %for.cond, !llvm.loop !7
+
+for.end:
+  ret void
+}
+
+!6 = distinct !{} ; access group
+
+!7 = distinct !{!7, !9} ; LoopID
+!9 = !{!"llvm.loop.parallel_accesses", !6}
+
+
+; CHECK: Parallel Loop
diff --git a/test/Analysis/ValueTracking/gep-negative-issue.ll b/test/Analysis/ValueTracking/gep-negative-issue.ll
new file mode 100644
index 0000000..84088df
--- /dev/null
+++ b/test/Analysis/ValueTracking/gep-negative-issue.ll
@@ -0,0 +1,44 @@
+; RUN: opt -gvn -S < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-p100:128:64:64-p101:128:64:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+%ArrayImpl = type { i64, i64 addrspace(100)*, [1 x i64], [1 x i64], [1 x i64], i64, i64, double addrspace(100)*, double addrspace(100)*, i8, i64 }
+%_array = type { i64, %ArrayImpl addrspace(100)*, i8 }
+
+define void @test(i64 %n_chpl) {
+entry:
+  ; First section is some code
+  %0 = getelementptr inbounds %_array, %_array* null, i32 0, i32 1
+  %1 = load %ArrayImpl addrspace(100)*, %ArrayImpl addrspace(100)** %0
+  %2 = getelementptr inbounds %ArrayImpl, %ArrayImpl addrspace(100)* %1, i32 0, i32 8
+  %3 = load double addrspace(100)*, double addrspace(100)* addrspace(100)* %2
+  %4 = getelementptr inbounds double, double addrspace(100)* %3, i64 -1
+  ; Second section is that code repeated
+  %x0 = getelementptr inbounds %_array, %_array* null, i32 0, i32 1
+  %x1 = load %ArrayImpl addrspace(100)*, %ArrayImpl addrspace(100)** %x0
+  %x2 = getelementptr inbounds %ArrayImpl, %ArrayImpl addrspace(100)* %x1, i32 0, i32 8
+  %x3 = load double addrspace(100)*, double addrspace(100)* addrspace(100)* %x2
+  %x4 = getelementptr inbounds double, double addrspace(100)* %x3, i64 -1
+  ; These two stores refer to the same memory location
+  ; Even so, they are expected to remain separate stores here
+  store double 0.000000e+00, double addrspace(100)* %4
+  store double 0.000000e+00, double addrspace(100)* %x4
+  ; Third section is the repeated code again, with a later store
+  ; This third section is necessary to trigger the crash
+  %y1 = load %ArrayImpl addrspace(100)*, %ArrayImpl addrspace(100)** %0
+  %y2 = getelementptr inbounds %ArrayImpl, %ArrayImpl addrspace(100)* %y1, i32 0, i32 8
+  %y3 = load double addrspace(100)*, double addrspace(100)* addrspace(100)* %y2
+  %y4 = getelementptr inbounds double, double addrspace(100)* %y3, i64 -1
+  store double 0.000000e+00, double addrspace(100)* %y4
+  ret void
+; CHECK-LABEL: define void @test
+; CHECK: getelementptr inbounds double, double addrspace(100)* {{%.*}}, i64 -1
+; CHECK-NEXT: store double 0.000000e+00, double addrspace(100)* [[DST:%.*]]
+; CHECK-NEXT: store double 0.000000e+00, double addrspace(100)* [[DST]]
+; CHECK: load
+; CHECK: getelementptr inbounds %ArrayImpl, %ArrayImpl addrspace(100)*
+; CHECK: load
+; CHECK: getelementptr inbounds double, double addrspace(100)* {{%.*}}, i64 -1
+; CHECK: store double 0.000000e+00, double addrspace(100)*
+; CHECK: ret
+}
diff --git a/test/Assembler/2004-03-07-FunctionAddressAlignment.ll b/test/Assembler/2004-03-07-FunctionAddressAlignment.ll
new file mode 100644
index 0000000..7fa0802
--- /dev/null
+++ b/test/Assembler/2004-03-07-FunctionAddressAlignment.ll
@@ -0,0 +1,16 @@
+; RUN: llvm-as < %s | llvm-dis | not grep ptrtoint
+; RUN: verify-uselistorder %s
+; All of these should be eliminable
+
+
+define i32 @foo() {
+	ret i32 and (i32 ptrtoint (i32()* @foo to i32), i32 1)
+}
+
+define i32 @foo2() {
+	ret i32 and (i32 1, i32 ptrtoint (i32()* @foo2 to i32))
+}
+
+define i1 @foo3() {
+	ret i1 icmp ne (i1()* @foo3, i1()* null)
+}
diff --git a/test/Assembler/DIEnumerator.ll b/test/Assembler/DIEnumerator.ll
index 9dc3d7c..fdb91d4 100644
--- a/test/Assembler/DIEnumerator.ll
+++ b/test/Assembler/DIEnumerator.ll
@@ -40,9 +40,9 @@
 ; CHECK: !DIEnumerator(name: "B0", value: 2147483647)
 
 
-!10 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E1", file: !3, line: 3, baseType: !6, size: 32, flags: DIFlagFixedEnum, elements: !11, identifier: "_ZTS2E1")
+!10 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E1", file: !3, line: 3, baseType: !6, size: 32, flags: DIFlagEnumClass, elements: !11, identifier: "_ZTS2E1")
 ; CHECK: !DICompositeType(tag: DW_TAG_enumeration_type, name: "E1"{{.*}}, baseType: ![[INT]]
-; CHECK-SAME: DIFlagFixedEnum
+; CHECK-SAME: DIFlagEnumClass
 !11 = !{!12, !13}
 !12 = !DIEnumerator(name: "A1", value: -2147483648)
 !13 = !DIEnumerator(name: "B1", value: 2147483647)
@@ -50,9 +50,9 @@
 ; CHECK: !DIEnumerator(name: "B1", value: 2147483647)
 
 
-!14 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E2", file: !3, line: 5, baseType: !15, size: 64, flags: DIFlagFixedEnum, elements: !16, identifier: "_ZTS2E2")
+!14 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E2", file: !3, line: 5, baseType: !15, size: 64, flags: DIFlagEnumClass, elements: !16, identifier: "_ZTS2E2")
 ; CHECK: !DICompositeType(tag: DW_TAG_enumeration_type, name: "E2"{{.*}}, baseType: ![[LONG:[0-9]+]]
-; CHECK-SAME: DIFlagFixedEnum
+; CHECK-SAME: DIFlagEnumClass
 !15 = !DIBasicType(name: "long long int", size: 64, encoding: DW_ATE_signed)
 ; CHECK: ![[LONG]] = !DIBasicType(name: "long long int", size: 64, encoding: DW_ATE_signed)
 !16 = !{!17, !18}
@@ -62,9 +62,9 @@
 ; CHECK: !DIEnumerator(name: "B2", value: 9223372036854775807)
 
 
-!19 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E3", file: !3, line: 7, baseType: !20, size: 64, flags: DIFlagFixedEnum, elements: !21, identifier: "_ZTS2E3")
+!19 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E3", file: !3, line: 7, baseType: !20, size: 64, flags: DIFlagEnumClass, elements: !21, identifier: "_ZTS2E3")
 ; CHECK: !DICompositeType(tag: DW_TAG_enumeration_type, name: "E3"{{.*}}, baseType: ![[ULONG:[0-9]+]]
-; CHECK-SAME: DIFlagFixedEnum
+; CHECK-SAME: DIFlagEnumClass
 !20 = !DIBasicType(name: "long long unsigned int", size: 64, encoding: DW_ATE_unsigned)
 ; CHECK: ![[ULONG]] = !DIBasicType(name: "long long unsigned int", size: 64, encoding: DW_ATE_unsigned)
 !21 = !{!22}
diff --git a/test/Bitcode/dilocalvariable-3.9.ll b/test/Bitcode/dilocalvariable-3.9.ll
index 99f99db..a252c35 100644
--- a/test/Bitcode/dilocalvariable-3.9.ll
+++ b/test/Bitcode/dilocalvariable-3.9.ll
@@ -7,12 +7,12 @@
 !llvm.module.flags = !{!3, !4}
 !llvm.ident = !{!5}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.9.1 (http://llvm.org/git/clang.git c3709e72d22432f53f8e2f14354def31a96734fe)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.9.1", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "main.c", directory: "/tmp")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
-!5 = !{!"clang version 3.9.1 (http://llvm.org/git/clang.git c3709e72d22432f53f8e2f14354def31a96734fe)"}
+!5 = !{!"clang version 3.9.1"}
 !6 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: false, unit: !0, retainedNodes: !2)
 !7 = !DISubroutineType(types: !8)
 !8 = !{null}
diff --git a/test/Bitcode/thinlto-alias.ll b/test/Bitcode/thinlto-alias.ll
index 835d720..f3896d2 100644
--- a/test/Bitcode/thinlto-alias.ll
+++ b/test/Bitcode/thinlto-alias.ll
@@ -18,6 +18,7 @@
 ; CHECK-NEXT: <FUNCTION op0=4 op1=7
 ; CHECK:       <GLOBALVAL_SUMMARY_BLOCK
 ; CHECK-NEXT:    <VERSION
+; CHECK-NEXT:    <FLAGS
 ; See if the call to func is registered.
 ; The value id 1 matches the second FUNCTION record above.
 ; CHECK-NEXT:    <PERMODULE {{.*}} op6=1/>
diff --git a/test/Bitcode/thinlto-alias2.ll b/test/Bitcode/thinlto-alias2.ll
index 3d68e3f..8b04ee7 100644
--- a/test/Bitcode/thinlto-alias2.ll
+++ b/test/Bitcode/thinlto-alias2.ll
@@ -4,6 +4,7 @@
 
 ; CHECK:       <GLOBALVAL_SUMMARY_BLOCK
 ; CHECK-NEXT:    <VERSION
+; CHECK-NEXT:    <FLAGS
 ; CHECK-NEXT:    <PERMODULE {{.*}} op4=0 op5=0 op6=[[ALIASID:[0-9]+]]/>
 ; CHECK-NEXT:    <PERMODULE {{.*}} op0=[[ALIASEEID:[0-9]+]]
 ; CHECK-NEXT:    <ALIAS {{.*}} op0=[[ALIASID]] {{.*}} op2=[[ALIASEEID]]/>
diff --git a/test/Bitcode/thinlto-function-summary-callgraph-cast.ll b/test/Bitcode/thinlto-function-summary-callgraph-cast.ll
index 7964440..d4b4d54 100644
--- a/test/Bitcode/thinlto-function-summary-callgraph-cast.ll
+++ b/test/Bitcode/thinlto-function-summary-callgraph-cast.ll
@@ -5,6 +5,7 @@
 
 ; CHECK:       <GLOBALVAL_SUMMARY_BLOCK
 ; CHECK-NEXT:    <VERSION
+; CHECK-NEXT:    <FLAGS
 ; "op7" is a call to "callee" function.
 ; CHECK-NEXT:    <PERMODULE {{.*}} op8=3 op9=[[ALIASID:[0-9]+]]/>
 ; "another_caller" has only references but no calls.
diff --git a/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll b/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll
index e332224..b9613f7 100644
--- a/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll
+++ b/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll
@@ -16,6 +16,7 @@
 ; CHECK-NEXT: <FUNCTION op0=4 op1=4
 ; CHECK:       <GLOBALVAL_SUMMARY_BLOCK
 ; CHECK-NEXT:    <VERSION
+; CHECK-NEXT:    <FLAGS
 ; See if the call to func is registered, using the expected hotness type.
 ; CHECK-NEXT:    <PERMODULE_PROFILE {{.*}} op6=1 op7=2/>
 ; CHECK-NEXT:  </GLOBALVAL_SUMMARY_BLOCK>
diff --git a/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll b/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
index 31c99c18..0cd1098 100644
--- a/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
+++ b/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
@@ -46,6 +46,7 @@
 ; CHECK-NEXT: <FUNCTION op0=42 op1=5
 ; CHECK-LABEL:       <GLOBALVAL_SUMMARY_BLOCK
 ; CHECK-NEXT:    <VERSION
+; CHECK-NEXT:    <FLAGS
 ; CHECK-NEXT:    <VALUE_GUID op0=25 op1=123/>
 ; op4=hot1 op6=cold op8=hot2 op10=hot4 op12=none1 op14=hot3 op16=none2 op18=none3 op20=123
 ; CHECK-NEXT:    <PERMODULE_PROFILE {{.*}} op6=1 op7=3 op8=5 op9=1 op10=2 op11=3 op12=4 op13=1 op14=6 op15=2 op16=3 op17=3 op18=7 op19=2 op20=8 op21=2 op22=25 op23=4/>
diff --git a/test/Bitcode/thinlto-function-summary-callgraph-relbf.ll b/test/Bitcode/thinlto-function-summary-callgraph-relbf.ll
index 6c14465..7c7a6f6 100644
--- a/test/Bitcode/thinlto-function-summary-callgraph-relbf.ll
+++ b/test/Bitcode/thinlto-function-summary-callgraph-relbf.ll
@@ -12,6 +12,7 @@
 ; CHECK-NEXT: <FUNCTION op0=17 op1=4
 ; CHECK:       <GLOBALVAL_SUMMARY_BLOCK
 ; CHECK-NEXT:    <VERSION
+; CHECK-NEXT:    <FLAGS
 ; See if the call to func is registered.
 ; CHECK-NEXT:    <PERMODULE_RELBF {{.*}} op4=1 {{.*}} op8=256
 ; CHECK-NEXT:  </GLOBALVAL_SUMMARY_BLOCK>
diff --git a/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll b/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll
index d1f980a..8bf65ab 100644
--- a/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll
+++ b/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll
@@ -29,6 +29,7 @@
 
 ; CHECK-LABEL:       <GLOBALVAL_SUMMARY_BLOCK
 ; CHECK-NEXT:    <VERSION
+; CHECK-NEXT:    <FLAGS
 ; CHECK-NEXT:    <VALUE_GUID op0=26 op1=123/>
 ; op4=none1 op6=hot1 op8=cold1 op10=none2 op12=hot2 op14=cold2 op16=none3 op18=hot3 op20=cold3 op22=123
 ; CHECK-NEXT:    <PERMODULE_PROFILE {{.*}} op6=7 op7=0 op8=1 op9=3 op10=4 op11=1 op12=8 op13=0 op14=2 op15=3 op16=5 op17=1 op18=9 op19=0 op20=3 op21=3 op22=6 op23=1 op24=26 op25=4/>
diff --git a/test/Bitcode/thinlto-function-summary-callgraph.ll b/test/Bitcode/thinlto-function-summary-callgraph.ll
index a605b7e..0969b84 100644
--- a/test/Bitcode/thinlto-function-summary-callgraph.ll
+++ b/test/Bitcode/thinlto-function-summary-callgraph.ll
@@ -17,6 +17,7 @@
 ; CHECK-NEXT: <FUNCTION op0=17 op1=4
 ; CHECK:       <GLOBALVAL_SUMMARY_BLOCK
 ; CHECK-NEXT:    <VERSION
+; CHECK-NEXT:    <FLAGS
 ; See if the call to func is registered
 ; CHECK-NEXT:    <PERMODULE {{.*}} op4=1
 ; CHECK-NEXT:  </GLOBALVAL_SUMMARY_BLOCK>
diff --git a/test/Bitcode/thinlto-function-summary.ll b/test/Bitcode/thinlto-function-summary.ll
index be7e974..67c5037 100644
--- a/test/Bitcode/thinlto-function-summary.ll
+++ b/test/Bitcode/thinlto-function-summary.ll
@@ -19,6 +19,7 @@
 ; BC-NEXT: <ALIAS op0=67 op1=1
 ; BC: <GLOBALVAL_SUMMARY_BLOCK
 ; BC-NEXT: <VERSION
+; BC-NEXT: <FLAGS
 ; BC-NEXT: <PERMODULE {{.*}} op0=1 op1=0
 ; BC-NEXT: <PERMODULE {{.*}} op0=2 op1=0
 ; BC-NEXT: <PERMODULE {{.*}} op0=3 op1=7
diff --git a/test/BugPoint/compile-custom.ll.py b/test/BugPoint/compile-custom.ll.py
index 4b9b30c..b0062ac 100755
--- a/test/BugPoint/compile-custom.ll.py
+++ b/test/BugPoint/compile-custom.ll.py
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import print_function
+
 import sys
 
 # Currently any print-out from the custom tool is interpreted as a crash
diff --git a/test/BugPoint/func-attrs-keyval.ll b/test/BugPoint/func-attrs-keyval.ll
new file mode 100644
index 0000000..830d096
--- /dev/null
+++ b/test/BugPoint/func-attrs-keyval.ll
@@ -0,0 +1,11 @@
+; RUN: bugpoint -load %llvmshlibdir/BugpointPasses%shlibext %s -output-prefix %t -bugpoint-crashfuncattr -silence-passes
+; RUN: llvm-dis %t-reduced-simplified.bc -o - | FileCheck %s
+; REQUIRES: loadable_module
+
+; CHECK: f() #[[ATTRS:[0-9]+]]
+define void @f() #0 {
+  ret void
+}
+
+; CHECK: attributes #[[ATTRS]] = { "bugpoint-crash"="sure" }
+attributes #0 = { "bugpoint-crash"="sure" noreturn "no-frame-pointer-elim-non-leaf" }
diff --git a/test/BugPoint/func-attrs.ll b/test/BugPoint/func-attrs.ll
new file mode 100644
index 0000000..3941e73
--- /dev/null
+++ b/test/BugPoint/func-attrs.ll
@@ -0,0 +1,11 @@
+; RUN: bugpoint -load %llvmshlibdir/BugpointPasses%shlibext %s -output-prefix %t -bugpoint-crashfuncattr -silence-passes
+; RUN: llvm-dis %t-reduced-simplified.bc -o - | FileCheck %s
+; REQUIRES: loadable_module
+
+; CHECK: f() #[[ATTRS:[0-9]+]]
+define void @f() #0 {
+  ret void
+}
+
+; CHECK: attributes #[[ATTRS]] = { "bugpoint-crash" }
+attributes #0 = { noinline "bugpoint-crash" "no-frame-pointer-elim-non-leaf" }
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index b39086a..d2b2b8d 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -61,6 +61,7 @@
           dsymutil
           llvm-dwarfdump
           llvm-dwp
+          llvm-elfabi
           llvm-exegesis
           llvm-extract
           llvm-isel-fuzzer
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
index a8fca90..48a5c84 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
@@ -1173,12 +1173,12 @@
 ; CHECK: [[BOOLADDR:%[0-9]+]]:_(p0) = COPY $x2
 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = G_LOAD [[LHSADDR]](p0)
 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = G_LOAD [[RHSADDR]](p0)
-; CHECK: [[TST:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[LHS]](s32), [[RHS]]
+; CHECK: [[TST:%[0-9]+]]:_(s1) = nnan ninf nsz arcp contract afn reassoc G_FCMP floatpred(oge), [[LHS]](s32), [[RHS]]
 ; CHECK: G_STORE [[TST]](s1), [[BOOLADDR]](p0)
 define void @float_comparison(float* %a.addr, float* %b.addr, i1* %bool.addr) {
   %a = load float, float* %a.addr
   %b = load float, float* %b.addr
-  %res = fcmp oge float %a, %b
+  %res = fcmp nnan ninf nsz arcp contract afn reassoc oge float %a, %b
   store i1 %res, i1* %bool.addr
   ret void
 }
@@ -1338,9 +1338,9 @@
 ; CHECK-LABEL: name: test_pow_intrin
 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $s0
 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $s1
-; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FPOW [[LHS]], [[RHS]]
+; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FPOW [[LHS]], [[RHS]]
 ; CHECK: $s0 = COPY [[RES]]
-  %res = call float @llvm.pow.f32(float %l, float %r)
+  %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.pow.f32(float %l, float %r)
   ret float %res
 }
 
@@ -1350,9 +1350,9 @@
 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
 ; CHECK: [[B:%[0-9]+]]:_(s32) = COPY $s1
 ; CHECK: [[C:%[0-9]+]]:_(s32) = COPY $s2
-; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FMA [[A]], [[B]], [[C]]
+; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FMA [[A]], [[B]], [[C]]
 ; CHECK: $s0 = COPY [[RES]]
-  %res = call float @llvm.fma.f32(float %a, float %b, float %c)
+  %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.fma.f32(float %a, float %b, float %c)
   ret float %res
 }
 
@@ -1360,9 +1360,9 @@
 define float @test_exp_intrin(float %a) {
 ; CHECK-LABEL: name: test_exp_intrin
 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
-; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FEXP [[A]]
+; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FEXP [[A]]
 ; CHECK: $s0 = COPY [[RES]]
-  %res = call float @llvm.exp.f32(float %a)
+  %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.exp.f32(float %a)
   ret float %res
 }
 
@@ -1370,9 +1370,9 @@
 define float @test_exp2_intrin(float %a) {
 ; CHECK-LABEL: name: test_exp2_intrin
 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
-; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FEXP2 [[A]]
+; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FEXP2 [[A]]
 ; CHECK: $s0 = COPY [[RES]]
-  %res = call float @llvm.exp2.f32(float %a)
+  %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.exp2.f32(float %a)
   ret float %res
 }
 
@@ -1380,9 +1380,9 @@
 define float @test_log_intrin(float %a) {
 ; CHECK-LABEL: name: test_log_intrin
 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
-; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FLOG [[A]]
+; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FLOG [[A]]
 ; CHECK: $s0 = COPY [[RES]]
-  %res = call float @llvm.log.f32(float %a)
+  %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.log.f32(float %a)
   ret float %res
 }
 
@@ -1400,9 +1400,9 @@
 define float @test_log10_intrin(float %a) {
 ; CHECK-LABEL: name: test_log10_intrin
 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
-; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FLOG10 [[A]]
+; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FLOG10 [[A]]
 ; CHECK: $s0 = COPY [[RES]]
-  %res = call float @llvm.log10.f32(float %a)
+  %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.log10.f32(float %a)
   ret float %res
 }
 
@@ -1410,9 +1410,9 @@
 define float @test_fabs_intrin(float %a) {
 ; CHECK-LABEL: name: test_fabs_intrin
 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
-; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FABS [[A]]
+; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FABS [[A]]
 ; CHECK: $s0 = COPY [[RES]]
-  %res = call float @llvm.fabs.f32(float %a)
+  %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.fabs.f32(float %a)
   ret float %res
 }
 
@@ -2242,3 +2242,44 @@
   call void @llvm.invariant.end.p0i8({}* %inv, i64 8, i8* %y)
   ret void
 }
+
+declare float @llvm.ceil.f32(float)
+define float @test_ceil_f32(float %x) {
+  ; CHECK-LABEL: name:            test_ceil_f32
+  ; CHECK: %{{[0-9]+}}:_(s32) = G_FCEIL %{{[0-9]+}}
+  %y = call float @llvm.ceil.f32(float %x)
+  ret float %y
+}
+
+declare double @llvm.ceil.f64(double)
+define double @test_ceil_f64(double %x) {
+  ; CHECK-LABEL: name:            test_ceil_f64
+  ; CHECK: %{{[0-9]+}}:_(s64) = G_FCEIL %{{[0-9]+}}
+  %y = call double @llvm.ceil.f64(double %x)
+  ret double %y
+}
+
+declare <2 x float> @llvm.ceil.v2f32(<2 x float>)
+define <2 x float> @test_ceil_v2f32(<2 x float> %x) {
+  ; CHECK-LABEL: name:            test_ceil_v2f32
+  ; CHECK: %{{[0-9]+}}:_(<2 x s32>) = G_FCEIL %{{[0-9]+}}
+  %y = call <2 x float> @llvm.ceil.v2f32(<2 x float> %x)
+  ret <2 x float> %y
+}
+
+declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
+define <4 x float> @test_ceil_v4f32(<4 x float> %x) {
+  ; CHECK-LABEL: name:            test_ceil_v4f32
+  ; CHECK: %{{[0-9]+}}:_(<4 x s32>) = G_FCEIL %{{[0-9]+}}
+  ; SELECT: %{{[0-9]+}}:fpr128 = FRINTPv4f32 %{{[0-9]+}}
+  %y = call <4 x float> @llvm.ceil.v4f32(<4 x float> %x)
+  ret <4 x float> %y
+}
+
+declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
+define <2 x double> @test_ceil_v2f64(<2 x double> %x) {
+  ; CHECK-LABEL: name:            test_ceil_v2f64
+  ; CHECK: %{{[0-9]+}}:_(<2 x s64>) = G_FCEIL %{{[0-9]+}}
+  %y = call <2 x double> @llvm.ceil.v2f64(<2 x double> %x)
+  ret <2 x double> %y
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll b/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll
new file mode 100644
index 0000000..64e8fe2
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll
@@ -0,0 +1,34 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -O1 -stop-after=irtranslator -enable-cse-in-irtranslator=1 -global-isel -verify-machineinstrs %s -o - 2>&1 | FileCheck %s
+
+; CHECK-LABEL: name: test_split_struct
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
+; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load 8 from %ir.ptr)
+; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
+; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p0) :: (load 8 from %ir.ptr + 8)
+
+; CHECK: [[IMPDEF:%[0-9]+]]:_(s128) = G_IMPLICIT_DEF
+; CHECK: [[INS1:%[0-9]+]]:_(s128) = G_INSERT [[IMPDEF]], [[LO]](s64), 0
+; CHECK: [[INS2:%[0-9]+]]:_(s128) = G_INSERT [[INS1]], [[HI]](s64), 64
+; CHECK: [[EXTLO:%[0-9]+]]:_(s64) = G_EXTRACT [[INS2]](s128), 0
+; CHECK: [[EXTHI:%[0-9]+]]:_(s64) = G_EXTRACT [[INS2]](s128), 64
+
+; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
+; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[CST2]](s64)
+; CHECK: G_STORE [[EXTLO]](s64), [[GEP2]](p0) :: (store 8 into stack, align 0)
+; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
+; CHECK: [[CST3:%[0-9]+]]:_(s64) = COPY [[CST]]
+; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[CST3]](s64)
+; CHECK: G_STORE [[EXTHI]](s64), [[GEP3]](p0) :: (store 8 into stack + 8, align 0)
+define void @test_split_struct([2 x i64]* %ptr) {
+  %struct = load [2 x i64], [2 x i64]* %ptr
+  call void @take_split_struct([2 x i64]* null, i64 1, i64 2, i64 3,
+                               i64 4, i64 5, i64 6,
+                               [2 x i64] %struct)
+  ret void
+}
+
+declare void @take_split_struct([2 x i64]* %ptr, i64, i64, i64,
+                               i64, i64, i64,
+                               [2 x i64] %in) ;
diff --git a/test/CodeGen/AArch64/GlobalISel/gisel-commandline-option-fastisel.ll b/test/CodeGen/AArch64/GlobalISel/gisel-commandline-option-fastisel.ll
new file mode 100644
index 0000000..73158f5
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/gisel-commandline-option-fastisel.ll
@@ -0,0 +1,35 @@
+; REQUIRES: asserts
+
+; RUN: llc -mtriple=aarch64-- -debug-pass=Structure %s -o /dev/null 2>&1 \
+; RUN:   -verify-machineinstrs=0 -O0 -global-isel=false -debug-only=isel \
+; RUN:   | FileCheck %s --check-prefixes=DISABLED,FASTISEL
+
+; RUN: llc -mtriple=aarch64-- -debug-pass=Structure %s -o /dev/null 2>&1 \
+; RUN:   -verify-machineinstrs=0 -O1 -global-isel=false -debug-only=isel \
+; RUN:   | FileCheck %s --check-prefixes=DISABLED,NOFASTISEL
+
+; RUN: llc -mtriple=aarch64-- -debug-pass=Structure %s -o /dev/null 2>&1 \
+; RUN:   -verify-machineinstrs=0 -O0 -fast-isel=false -global-isel=false \
+; RUN:   -debug-only=isel \
+; RUN:   | FileCheck %s --check-prefixes=DISABLED,NOFASTISEL
+
+; RUN: llc -mtriple=aarch64-- -debug-pass=Structure %s -o /dev/null 2>&1 \
+; RUN:   -verify-machineinstrs=0 -O1 -fast-isel=false -global-isel=false \
+; RUN:   -debug-only=isel \
+; RUN:   | FileCheck %s --check-prefixes=DISABLED,NOFASTISEL
+
+; Check that the right instruction selector is chosen when using
+; -global-isel=false. FastISel should be used at -O0 (unless -fast-isel=false is
+; also present) and SelectionDAG otherwise.
+
+; DISABLED-NOT: IRTranslator
+
+; DISABLED: AArch64 Instruction Selection
+; DISABLED: Expand ISel Pseudo-instructions
+
+; FASTISEL: Enabling fast-isel
+; NOFASTISEL-NOT: Enabling fast-isel
+
+define void @empty() {
+  ret void
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/gisel-commandline-option.ll b/test/CodeGen/AArch64/GlobalISel/gisel-commandline-option.ll
index 87425b8..a044b34 100644
--- a/test/CodeGen/AArch64/GlobalISel/gisel-commandline-option.ll
+++ b/test/CodeGen/AArch64/GlobalISel/gisel-commandline-option.ll
@@ -45,6 +45,7 @@
 ; VERIFY-NEXT:   Verify generated machine code
 ; ENABLED-NEXT:  PreLegalizerCombiner
 ; VERIFY-NEXT:   Verify generated machine code
+; ENABLED-NEXT:  Analysis containing CSE Info
 ; ENABLED-NEXT:  Legalizer
 ; VERIFY-NEXT:   Verify generated machine code
 ; ENABLED-NEXT:  RegBankSelect
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-ext-cse.mir b/test/CodeGen/AArch64/GlobalISel/legalize-ext-cse.mir
new file mode 100644
index 0000000..92980dc
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-ext-cse.mir
@@ -0,0 +1,21 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=aarch64 -run-pass=legalizer %s -o - -enable-cse-in-legalizer=1 -O1 | FileCheck %s
+---
+name:            test_cse_in_legalizer
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_cse_in_legalizer
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]]
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[AND]](s32)
+    ; CHECK: $w0 = COPY [[COPY1]](s32)
+    ; CHECK: $w0 = COPY [[AND]](s32)
+    %0:_(s64) = COPY $x0
+    %1:_(s8) = G_TRUNC %0(s64)
+    %19:_(s32) = G_ZEXT %1(s8)
+    $w0 = COPY %19(s32)
+    %2:_(s8) = G_TRUNC %0(s64)
+    %20:_(s32) = G_ZEXT %2(s8)
+    $w0 = COPY %20(s32)
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir b/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir
index e46c9ad..f6cca29 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -march=aarch64 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -march=aarch64 -run-pass=legalizer -O0 %s -o - | FileCheck %s
 ---
 name:            test_implicit_def
 body: |
@@ -16,3 +16,58 @@
     %1:_(s64) = G_TRUNC %0(s128)
     $x0 = COPY %1(s64)
 ...
+
+---
+name: test_implicit_def_s3
+body: |
+  bb.0:
+    liveins:
+
+    ; CHECK-LABEL: name: test_implicit_def_s3
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 61
+    ; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
+    ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[DEF]], [[C]]
+    ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]]
+    ; CHECK: $x0 = COPY [[ASHR]](s64)
+    %0:_(s3) = G_IMPLICIT_DEF
+    %1:_(s64) = G_SEXT %0
+    $x0 = COPY %1(s64)
+...
+
+# FIXME: s2 not correctly handled
+
+---
+name: test_implicit_def_v2s32
+body: |
+  bb.0:
+
+    ; CHECK-LABEL: name: test_implicit_def_v2s32
+    ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+    ; CHECK: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+    ; CHECK: [[DEF2:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+    ; CHECK: [[DEF3:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF1]](s32), [[DEF2]](s32), [[DEF3]](s32)
+    ; CHECK: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s32>)
+    ; CHECK: $x0 = COPY [[UV]](<2 x s32>)
+    ; CHECK: $x1 = COPY [[UV1]](<2 x s32>)
+    %0:_(<4 x s32>) = G_IMPLICIT_DEF
+    %1:_(<2 x s32> ), %2:_(<2 x s32>) = G_UNMERGE_VALUES %0
+    $x0 = COPY %1
+    $x1 = COPY %2
+...
+
+---
+name: test_implicit_def_v4s64
+body: |
+  bb.0:
+
+    ; CHECK-LABEL: name: test_implicit_def_v4s64
+    ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK: [[DEF1:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK: $q0 = COPY [[DEF]](<2 x s64>)
+    ; CHECK: $q1 = COPY [[DEF1]](<2 x s64>)
+    %0:_(<4 x s64>) = G_IMPLICIT_DEF
+    %1:_(<2 x s64> ), %2:_(<2 x s64>) = G_UNMERGE_VALUES %0
+    $q0 = COPY %1
+    $q1 = COPY %2
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index 0c75cd3..b1bb9b6 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -7,7 +7,7 @@
 # REQUIRES: asserts
 
 # The main purpose of this test is to make sure we don't over-relax
-# LegalizerInfo validation and loose its ability to catch bugs.
+# LegalizerInfo validation and lose its ability to catch bugs.
 #
 # Watch out for every "SKIPPED: user-defined predicate detected" in the
 # check-lines below and keep each and every one of them justified.
@@ -46,7 +46,7 @@
 # DEBUG:      .. the first uncovered type index: 1, OK
 #
 # DEBUG-NEXT: G_IMPLICIT_DEF (opcode {{[0-9]+}}): 1 type index
-# DEBUG:      .. the first uncovered type index: 1, OK
+# DEBUG: .. type index coverage check SKIPPED: user-defined predicate detected
 #
 # DEBUG-NEXT: G_PHI (opcode {{[0-9]+}}): 1 type index
 # DEBUG:      .. the first uncovered type index: 1, OK
@@ -329,6 +329,9 @@
 #
 # DEBUG-NEXT: G_BSWAP (opcode {{[0-9]+}}): 1 type index
 # DEBUG:      .. the first uncovered type index: 1, OK
+#
+# DEBUG-NEXT: G_FCEIL (opcode {{[0-9]+}}): 1 type index
+# DEBUG:      .. the first uncovered type index: 1, OK
 
 # CHECK-NOT: ill-defined
 
diff --git a/test/CodeGen/AArch64/GlobalISel/regbank-ceil.ll b/test/CodeGen/AArch64/GlobalISel/regbank-ceil.ll
new file mode 100644
index 0000000..b7bc230
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/regbank-ceil.ll
@@ -0,0 +1,16 @@
+; RUN: llc -O=0 -verify-machineinstrs -mtriple aarch64--- \
+; RUN: -stop-before=instruction-select -global-isel %s -o - | FileCheck %s
+
+; Make sure that we choose a FPR for the G_FCEIL and G_LOAD instead of a GPR.
+
+declare float @llvm.ceil.f32(float)
+
+; CHECK-LABEL: name:            foo
+define float @foo(float) {
+  store float %0, float* undef, align 4
+  ; CHECK: %2:fpr(s32) = G_LOAD %1(p0)
+  ; CHECK-NEXT: %3:fpr(s32) = G_FCEIL %2
+  %2 = load float, float* undef, align 4
+  %3 = call float @llvm.ceil.f32(float %2)
+  ret float %3
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/select-ceil.mir b/test/CodeGen/AArch64/GlobalISel/select-ceil.mir
new file mode 100644
index 0000000..5d42a2c
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-ceil.mir
@@ -0,0 +1,93 @@
+# RUN: llc -verify-machineinstrs -mtriple aarch64--- \
+# RUN: -run-pass=instruction-select -global-isel %s -o - | FileCheck %s
+...
+---
+name:            ceil_float
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: fpr }
+  - { id: 1, class: fpr }
+body:             |
+  bb.0:
+    ; CHECK-LABEL: name:            ceil_float
+    ; CHECK: %{{[0-9]+}}:fpr32 = FRINTPSr %{{[0-9]+}}
+    liveins: $s0
+    %0:fpr(s32) = COPY $s0
+    %1:fpr(s32) = G_FCEIL %0
+    $s0 = COPY %1(s32)
+
+...
+---
+name:            ceil_double
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: fpr }
+  - { id: 1, class: fpr }
+body:             |
+  bb.0:
+    ; CHECK-LABEL: name:            ceil_double
+    ; CHECK: %{{[0-9]+}}:fpr64 = FRINTPDr %{{[0-9]+}}
+    liveins: $d0
+    %0:fpr(s64) = COPY $d0
+    %1:fpr(s64) = G_FCEIL %0
+    $d0 = COPY %1(s64)
+
+...
+---
+name:            ceil_v2f32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: fpr }
+  - { id: 1, class: fpr }
+body:             |
+  bb.0:
+    ; CHECK-LABEL: name:            ceil_v2f32
+    ; CHECK: %{{[0-9]+}}:fpr64 = FRINTPv2f32 %{{[0-9]+}}
+    liveins: $d0
+    %0:fpr(<2 x s32>) = COPY $d0
+    %1:fpr(<2 x s32>) = G_FCEIL %0
+    $d0 = COPY %1(<2 x s32>)
+
+...
+---
+name:            ceil_v4f32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: fpr }
+  - { id: 1, class: fpr }
+body:             |
+  bb.0:
+    ; CHECK-LABEL: name:            ceil_v4f32
+    ; CHECK: %{{[0-9]+}}:fpr128 = FRINTPv4f32 %{{[0-9]+}}
+    liveins: $q0
+    %0:fpr(<4 x s32>) = COPY $q0
+    %1:fpr(<4 x s32>) = G_FCEIL %0
+    $q0 = COPY %1(<4 x s32>)
+
+...
+---
+name:            ceil_v2f64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: fpr }
+  - { id: 1, class: fpr }
+body:             |
+  bb.0:
+    ; CHECK-LABEL: name:            ceil_v2f64
+    ; CHECK: %{{[0-9]+}}:fpr128 = FRINTPv2f64 %{{[0-9]+}}
+    liveins: $q0
+    %0:fpr(<2 x s64>) = COPY $q0
+    %1:fpr(<2 x s64>) = G_FCEIL %0
+    $q0 = COPY %1(<2 x s64>)
+
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-scalar-merge.mir b/test/CodeGen/AArch64/GlobalISel/select-scalar-merge.mir
new file mode 100644
index 0000000..dafeee4
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-scalar-merge.mir
@@ -0,0 +1,34 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+--- |
+  target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+  define void @gmerge_s64_s32() { ret void }
+...
+
+---
+name:            gmerge_s64_s32
+legalized:       true
+regBankSelected: true
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+  - { id: 2, class: gpr }
+
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: gmerge_s64_s32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY $w1
+    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32
+    ; CHECK: [[SUBREG_TO_REG1:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_32
+    ; CHECK: [[BFMXri:%[0-9]+]]:gpr64 = BFMXri [[SUBREG_TO_REG]], [[SUBREG_TO_REG1]], 32, 31
+    ; CHECK: $x0 = COPY [[BFMXri]]
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
+    %2(s64) = G_MERGE_VALUES %0(s32), %1(s32)
+    $x0 = COPY %2(s64)
+...
diff --git a/test/CodeGen/AArch64/O0-pipeline.ll b/test/CodeGen/AArch64/O0-pipeline.ll
index d85d126..aa9b1d0 100644
--- a/test/CodeGen/AArch64/O0-pipeline.ll
+++ b/test/CodeGen/AArch64/O0-pipeline.ll
@@ -32,8 +32,10 @@
 ; CHECK-NEXT:       Safe Stack instrumentation pass
 ; CHECK-NEXT:       Insert stack protectors
 ; CHECK-NEXT:       Module Verifier
+; CHECK-NEXT:       Analysis containing CSE Info
 ; CHECK-NEXT:       IRTranslator
 ; CHECK-NEXT:       AArch64PreLegalizerCombiner
+; CHECK-NEXT:       Analysis containing CSE Info
 ; CHECK-NEXT:       Legalizer
 ; CHECK-NEXT:       RegBankSelect
 ; CHECK-NEXT:       Localizer
@@ -50,6 +52,7 @@
 ; CHECK-NEXT:       Prologue/Epilogue Insertion & Frame Finalization
 ; CHECK-NEXT:       Post-RA pseudo instruction expansion pass
 ; CHECK-NEXT:       AArch64 pseudo instruction expansion pass
+; CHECK-NEXT:       AArch64 speculation hardening pass
 ; CHECK-NEXT:       Analyze Machine Code For Garbage Collection
 ; CHECK-NEXT:       Branch relaxation pass
 ; CHECK-NEXT:       AArch64 Branch Targets
diff --git a/test/CodeGen/AArch64/O3-pipeline.ll b/test/CodeGen/AArch64/O3-pipeline.ll
index a32da0b..98cef01 100644
--- a/test/CodeGen/AArch64/O3-pipeline.ll
+++ b/test/CodeGen/AArch64/O3-pipeline.ll
@@ -146,6 +146,7 @@
 ; CHECK-NEXT:       Post-RA pseudo instruction expansion pass
 ; CHECK-NEXT:       AArch64 pseudo instruction expansion pass
 ; CHECK-NEXT:       AArch64 load / store optimization pass
+; CHECK-NEXT:       AArch64 speculation hardening pass
 ; CHECK-NEXT:       MachineDominator Tree Construction
 ; CHECK-NEXT:       Machine Natural Loop Construction
 ; CHECK-NEXT:       Falkor HW Prefetch Fix Late Phase
@@ -154,6 +155,7 @@
 ; CHECK-NEXT:       Machine Block Frequency Analysis
 ; CHECK-NEXT:       MachinePostDominator Tree Construction
 ; CHECK-NEXT:       Branch Probability Basic Block Placement
+; CHECK-NEXT:       AArch64 load / store optimization pass
 ; CHECK-NEXT:       Branch relaxation pass
 ; CHECK-NEXT:       AArch64 Branch Targets
 ; CHECK-NEXT:       AArch64 Compress Jump Tables
diff --git a/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll b/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
index 708ae08..09eb5fe 100644
--- a/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
+++ b/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
@@ -1,5 +1,5 @@
 ; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -disable-post-ra < %s | FileCheck %s
-; RUN: llc -verify-machineinstrs -mtriple=arm64-apple-ios -disable-fp-elim -disable-post-ra < %s | FileCheck %s --check-prefix=CHECK-MACHO
+; RUN: llc -verify-machineinstrs -mtriple=arm64-apple-ios -frame-pointer=all -disable-post-ra < %s | FileCheck %s --check-prefix=CHECK-MACHO
 
 ; This test aims to check basic correctness of frame layout &
 ; frame access code. There are 8 functions in this test file,
diff --git a/test/CodeGen/AArch64/addr-of-ret-addr.ll b/test/CodeGen/AArch64/addr-of-ret-addr.ll
index 247b282..b099b18 100644
--- a/test/CodeGen/AArch64/addr-of-ret-addr.ll
+++ b/test/CodeGen/AArch64/addr-of-ret-addr.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -disable-fp-elim -mtriple=arm64-windows | FileCheck %s
+; RUN: llc < %s -frame-pointer=all -mtriple=arm64-windows | FileCheck %s
 
 ; Test generated from C code:
 ; #include <stdarg.h>
diff --git a/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll b/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll
index 294680a..d9d12c3 100644
--- a/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll
+++ b/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll
@@ -28,7 +28,7 @@
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!9, !10}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.0 (http://llvm.org/git/clang.git git:/git/puzzlebox/clang.git/ c4d1aea01c4444eb81bdbf391f1be309127c3cf1)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, globals: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, globals: !2)
 !1 = !DIFile(filename: "print.i", directory: "/Volumes/Ebi/echeng/radars/r9146594")
 !2 = !{!3}
 !3 = !DIGlobalVariableExpression(var: !4, expr: !DIExpression())
diff --git a/test/CodeGen/AArch64/arm64-abi_align.ll b/test/CodeGen/AArch64/arm64-abi_align.ll
index 85fd1ea..836b7b8 100644
--- a/test/CodeGen/AArch64/arm64-abi_align.ll
+++ b/test/CodeGen/AArch64/arm64-abi_align.ll
@@ -1,5 +1,5 @@
-; RUN: llc -fast-isel-sink-local-values < %s -mtriple=arm64-apple-darwin -mcpu=cyclone -enable-misched=false -disable-fp-elim | FileCheck %s
-; RUN: llc -fast-isel-sink-local-values < %s -mtriple=arm64-apple-darwin -O0 -disable-fp-elim -fast-isel | FileCheck -check-prefix=FAST %s
+; RUN: llc -fast-isel-sink-local-values < %s -mtriple=arm64-apple-darwin -mcpu=cyclone -enable-misched=false -frame-pointer=all | FileCheck %s
+; RUN: llc -fast-isel-sink-local-values < %s -mtriple=arm64-apple-darwin -O0 -frame-pointer=all -fast-isel | FileCheck -check-prefix=FAST %s
 
 ; rdar://12648441
 ; Generated from arm64-arguments.c with -O2.
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll b/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll
index 256db18..1b65d9c 100644
--- a/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll
+++ b/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll
@@ -1,5 +1,5 @@
 ; This test should cause the TargetMaterializeAlloca to be invoked
-; RUN: llc -O0 -fast-isel -fast-isel-abort=1 -verify-machineinstrs -mtriple=arm64-apple-darwin -disable-fp-elim < %s | FileCheck %s
+; RUN: llc -O0 -fast-isel -fast-isel-abort=1 -verify-machineinstrs -mtriple=arm64-apple-darwin -frame-pointer=all < %s | FileCheck %s
 
 %struct.S1Ty = type { i64 }
 %struct.S2Ty = type { %struct.S1Ty, %struct.S1Ty }
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-call.ll b/test/CodeGen/AArch64/arm64-fast-isel-call.ll
index abbe655..dc1aac8 100644
--- a/test/CodeGen/AArch64/arm64-fast-isel-call.ll
+++ b/test/CodeGen/AArch64/arm64-fast-isel-call.ll
@@ -1,6 +1,6 @@
-; RUN: llc -fast-isel-sink-local-values -O0 -fast-isel -fast-isel-abort=2 -code-model=small -verify-machineinstrs -disable-fp-elim -mtriple=arm64-apple-darwin   < %s | FileCheck %s
-; RUN: llc -fast-isel-sink-local-values -O0 -fast-isel -fast-isel-abort=2 -code-model=large -verify-machineinstrs -disable-fp-elim -mtriple=arm64-apple-darwin   < %s | FileCheck %s --check-prefix=LARGE
-; RUN: llc -fast-isel-sink-local-values -O0 -fast-isel -fast-isel-abort=2 -code-model=small -verify-machineinstrs -disable-fp-elim -mtriple=aarch64_be-linux-gnu < %s | FileCheck %s --check-prefix=CHECK-BE
+; RUN: llc -fast-isel-sink-local-values -O0 -fast-isel -fast-isel-abort=2 -code-model=small -verify-machineinstrs -frame-pointer=all -mtriple=arm64-apple-darwin   < %s | FileCheck %s
+; RUN: llc -fast-isel-sink-local-values -O0 -fast-isel -fast-isel-abort=2 -code-model=large -verify-machineinstrs -frame-pointer=all -mtriple=arm64-apple-darwin   < %s | FileCheck %s --check-prefix=LARGE
+; RUN: llc -fast-isel-sink-local-values -O0 -fast-isel -fast-isel-abort=2 -code-model=small -verify-machineinstrs -frame-pointer=all -mtriple=aarch64_be-linux-gnu < %s | FileCheck %s --check-prefix=CHECK-BE
 
 define void @call0() nounwind {
 entry:
diff --git a/test/CodeGen/AArch64/arm64-hello.ll b/test/CodeGen/AArch64/arm64-hello.ll
index a8d1c24..5e1bd9d 100644
--- a/test/CodeGen/AArch64/arm64-hello.ll
+++ b/test/CodeGen/AArch64/arm64-hello.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -disable-post-ra -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -disable-post-ra -frame-pointer=all | FileCheck %s
 ; RUN: llc < %s -mtriple=arm64-linux-gnu -disable-post-ra | FileCheck %s --check-prefix=CHECK-LINUX
 
 ; CHECK-LABEL: main:
diff --git a/test/CodeGen/AArch64/arm64-large-frame.ll b/test/CodeGen/AArch64/arm64-large-frame.ll
index d1244e7..cfda00c 100644
--- a/test/CodeGen/AArch64/arm64-large-frame.ll
+++ b/test/CodeGen/AArch64/arm64-large-frame.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -mtriple=arm64-none-linux-gnu -disable-fp-elim -disable-post-ra < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=arm64-none-linux-gnu -frame-pointer=all -disable-post-ra < %s | FileCheck %s
 declare void @use_addr(i8*)
 
 @addr = global i8* null
diff --git a/test/CodeGen/AArch64/arm64-neon-copy.ll b/test/CodeGen/AArch64/arm64-neon-copy.ll
index 0b6132b..0d4d2c7 100644
--- a/test/CodeGen/AArch64/arm64-neon-copy.ll
+++ b/test/CodeGen/AArch64/arm64-neon-copy.ll
@@ -1,58 +1,81 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
-
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
 
 define <16 x i8> @ins16bw(<16 x i8> %tmp1, i8 %tmp2) {
 ; CHECK-LABEL: ins16bw:
-; CHECK: mov {{v[0-9]+}}.b[15], {{w[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v0.b[15], w0
+; CHECK-NEXT:    ret
   %tmp3 = insertelement <16 x i8> %tmp1, i8 %tmp2, i32 15
   ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @ins8hw(<8 x i16> %tmp1, i16 %tmp2) {
 ; CHECK-LABEL: ins8hw:
-; CHECK: mov {{v[0-9]+}}.h[6], {{w[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v0.h[6], w0
+; CHECK-NEXT:    ret
   %tmp3 = insertelement <8 x i16> %tmp1, i16 %tmp2, i32 6
   ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @ins4sw(<4 x i32> %tmp1, i32 %tmp2) {
 ; CHECK-LABEL: ins4sw:
-; CHECK: mov {{v[0-9]+}}.s[2], {{w[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v0.s[2], w0
+; CHECK-NEXT:    ret
   %tmp3 = insertelement <4 x i32> %tmp1, i32 %tmp2, i32 2
   ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @ins2dw(<2 x i64> %tmp1, i64 %tmp2) {
 ; CHECK-LABEL: ins2dw:
-; CHECK: mov {{v[0-9]+}}.d[1], {{x[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v0.d[1], x0
+; CHECK-NEXT:    ret
   %tmp3 = insertelement <2 x i64> %tmp1, i64 %tmp2, i32 1
   ret <2 x i64> %tmp3
 }
 
 define <8 x i8> @ins8bw(<8 x i8> %tmp1, i8 %tmp2) {
 ; CHECK-LABEL: ins8bw:
-; CHECK: mov {{v[0-9]+}}.b[5], {{w[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v0.b[5], w0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %tmp3 = insertelement <8 x i8> %tmp1, i8 %tmp2, i32 5
   ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @ins4hw(<4 x i16> %tmp1, i16 %tmp2) {
 ; CHECK-LABEL: ins4hw:
-; CHECK: mov {{v[0-9]+}}.h[3], {{w[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v0.h[3], w0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %tmp3 = insertelement <4 x i16> %tmp1, i16 %tmp2, i32 3
   ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @ins2sw(<2 x i32> %tmp1, i32 %tmp2) {
 ; CHECK-LABEL: ins2sw:
-; CHECK: mov {{v[0-9]+}}.s[1], {{w[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v0.s[1], w0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1
   ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @ins16b16(<16 x i8> %tmp1, <16 x i8> %tmp2) {
 ; CHECK-LABEL: ins16b16:
-; CHECK: mov {{v[0-9]+}}.b[15], {{v[0-9]+}}.b[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v1.b[15], v0.b[2]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <16 x i8> %tmp1, i32 2
   %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
   ret <16 x i8> %tmp4
@@ -60,7 +83,10 @@
 
 define <8 x i16> @ins8h8(<8 x i16> %tmp1, <8 x i16> %tmp2) {
 ; CHECK-LABEL: ins8h8:
-; CHECK: mov {{v[0-9]+}}.h[7], {{v[0-9]+}}.h[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v1.h[7], v0.h[2]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <8 x i16> %tmp1, i32 2
   %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7
   ret <8 x i16> %tmp4
@@ -68,7 +94,10 @@
 
 define <4 x i32> @ins4s4(<4 x i32> %tmp1, <4 x i32> %tmp2) {
 ; CHECK-LABEL: ins4s4:
-; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v1.s[1], v0.s[2]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <4 x i32> %tmp1, i32 2
   %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1
   ret <4 x i32> %tmp4
@@ -76,7 +105,10 @@
 
 define <2 x i64> @ins2d2(<2 x i64> %tmp1, <2 x i64> %tmp2) {
 ; CHECK-LABEL: ins2d2:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v1.d[1], v0.d[0]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <2 x i64> %tmp1, i32 0
   %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1
   ret <2 x i64> %tmp4
@@ -84,7 +116,10 @@
 
 define <4 x float> @ins4f4(<4 x float> %tmp1, <4 x float> %tmp2) {
 ; CHECK-LABEL: ins4f4:
-; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v1.s[1], v0.s[2]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <4 x float> %tmp1, i32 2
   %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1
   ret <4 x float> %tmp4
@@ -92,7 +127,10 @@
 
 define <2 x double> @ins2df2(<2 x double> %tmp1, <2 x double> %tmp2) {
 ; CHECK-LABEL: ins2df2:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v1.d[1], v0.d[0]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <2 x double> %tmp1, i32 0
   %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1
   ret <2 x double> %tmp4
@@ -100,7 +138,11 @@
 
 define <16 x i8> @ins8b16(<8 x i8> %tmp1, <16 x i8> %tmp2) {
 ; CHECK-LABEL: ins8b16:
-; CHECK: mov {{v[0-9]+}}.b[15], {{v[0-9]+}}.b[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v1.b[15], v0.b[2]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <8 x i8> %tmp1, i32 2
   %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
   ret <16 x i8> %tmp4
@@ -108,7 +150,11 @@
 
 define <8 x i16> @ins4h8(<4 x i16> %tmp1, <8 x i16> %tmp2) {
 ; CHECK-LABEL: ins4h8:
-; CHECK: mov {{v[0-9]+}}.h[7], {{v[0-9]+}}.h[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v1.h[7], v0.h[2]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <4 x i16> %tmp1, i32 2
   %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7
   ret <8 x i16> %tmp4
@@ -116,7 +162,11 @@
 
 define <4 x i32> @ins2s4(<2 x i32> %tmp1, <4 x i32> %tmp2) {
 ; CHECK-LABEL: ins2s4:
-; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[1]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v1.s[1], v0.s[1]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <2 x i32> %tmp1, i32 1
   %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1
   ret <4 x i32> %tmp4
@@ -124,7 +174,11 @@
 
 define <2 x i64> @ins1d2(<1 x i64> %tmp1, <2 x i64> %tmp2) {
 ; CHECK-LABEL: ins1d2:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v1.d[1], v0.d[0]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <1 x i64> %tmp1, i32 0
   %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1
   ret <2 x i64> %tmp4
@@ -132,7 +186,11 @@
 
 define <4 x float> @ins2f4(<2 x float> %tmp1, <4 x float> %tmp2) {
 ; CHECK-LABEL: ins2f4:
-; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[1]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v1.s[1], v0.s[1]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <2 x float> %tmp1, i32 1
   %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1
   ret <4 x float> %tmp4
@@ -140,7 +198,10 @@
 
 define <2 x double> @ins1f2(<1 x double> %tmp1, <2 x double> %tmp2) {
 ; CHECK-LABEL: ins1f2:
-; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <1 x double> %tmp1, i32 0
   %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1
   ret <2 x double> %tmp4
@@ -148,7 +209,11 @@
 
 define <8 x i8> @ins16b8(<16 x i8> %tmp1, <8 x i8> %tmp2) {
 ; CHECK-LABEL: ins16b8:
-; CHECK: mov {{v[0-9]+}}.b[7], {{v[0-9]+}}.b[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v1.b[7], v0.b[2]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <16 x i8> %tmp1, i32 2
   %tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 7
   ret <8 x i8> %tmp4
@@ -156,7 +221,11 @@
 
 define <4 x i16> @ins8h4(<8 x i16> %tmp1, <4 x i16> %tmp2) {
 ; CHECK-LABEL: ins8h4:
-; CHECK: mov {{v[0-9]+}}.h[3], {{v[0-9]+}}.h[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v1.h[3], v0.h[2]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <8 x i16> %tmp1, i32 2
   %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 3
   ret <4 x i16> %tmp4
@@ -164,7 +233,11 @@
 
 define <2 x i32> @ins4s2(<4 x i32> %tmp1, <2 x i32> %tmp2) {
 ; CHECK-LABEL: ins4s2:
-; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v1.s[1], v0.s[2]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <4 x i32> %tmp1, i32 2
   %tmp4 = insertelement <2 x i32> %tmp2, i32 %tmp3, i32 1
   ret <2 x i32> %tmp4
@@ -172,7 +245,11 @@
 
 define <1 x i64> @ins2d1(<2 x i64> %tmp1, <1 x i64> %tmp2) {
 ; CHECK-LABEL: ins2d1:
-; CHECK: mov {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v1.d[0], v0.d[0]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <2 x i64> %tmp1, i32 0
   %tmp4 = insertelement <1 x i64> %tmp2, i64 %tmp3, i32 0
   ret <1 x i64> %tmp4
@@ -180,7 +257,11 @@
 
 define <2 x float> @ins4f2(<4 x float> %tmp1, <2 x float> %tmp2) {
 ; CHECK-LABEL: ins4f2:
-; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v1.s[1], v0.s[2]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <4 x float> %tmp1, i32 2
   %tmp4 = insertelement <2 x float> %tmp2, float %tmp3, i32 1
   ret <2 x float> %tmp4
@@ -188,7 +269,10 @@
 
 define <1 x double> @ins2f1(<2 x double> %tmp1, <1 x double> %tmp2) {
 ; CHECK-LABEL: ins2f1:
-; CHECK: dup {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.2d, v0.d[1]
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <2 x double> %tmp1, i32 1
   %tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0
   ret <1 x double> %tmp4
@@ -196,7 +280,12 @@
 
 define <8 x i8> @ins8b8(<8 x i8> %tmp1, <8 x i8> %tmp2) {
 ; CHECK-LABEL: ins8b8:
-; CHECK: mov {{v[0-9]+}}.b[4], {{v[0-9]+}}.b[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v1.b[4], v0.b[2]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <8 x i8> %tmp1, i32 2
   %tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 4
   ret <8 x i8> %tmp4
@@ -204,7 +293,12 @@
 
 define <4 x i16> @ins4h4(<4 x i16> %tmp1, <4 x i16> %tmp2) {
 ; CHECK-LABEL: ins4h4:
-; CHECK: mov {{v[0-9]+}}.h[3], {{v[0-9]+}}.h[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v1.h[3], v0.h[2]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <4 x i16> %tmp1, i32 2
   %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 3
   ret <4 x i16> %tmp4
@@ -212,7 +306,12 @@
 
 define <2 x i32> @ins2s2(<2 x i32> %tmp1, <2 x i32> %tmp2) {
 ; CHECK-LABEL: ins2s2:
-; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v1.s[1], v0.s[0]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <2 x i32> %tmp1, i32 0
   %tmp4 = insertelement <2 x i32> %tmp2, i32 %tmp3, i32 1
   ret <2 x i32> %tmp4
@@ -220,7 +319,12 @@
 
 define <1 x i64> @ins1d1(<1 x i64> %tmp1, <1 x i64> %tmp2) {
 ; CHECK-LABEL: ins1d1:
-; CHECK: mov {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v1.d[0], v0.d[0]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <1 x i64> %tmp1, i32 0
   %tmp4 = insertelement <1 x i64> %tmp2, i64 %tmp3, i32 0
   ret <1 x i64> %tmp4
@@ -228,7 +332,12 @@
 
 define <2 x float> @ins2f2(<2 x float> %tmp1, <2 x float> %tmp2) {
 ; CHECK-LABEL: ins2f2:
-; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v1.s[1], v0.s[0]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <2 x float> %tmp1, i32 0
   %tmp4 = insertelement <2 x float> %tmp2, float %tmp3, i32 1
   ret <2 x float> %tmp4
@@ -236,7 +345,8 @@
 
 define <1 x double> @ins1df1(<1 x double> %tmp1, <1 x double> %tmp2) {
 ; CHECK-LABEL: ins1df1:
-; CHECK-NOT: mov {{v[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <1 x double> %tmp1, i32 0
   %tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0
   ret <1 x double> %tmp4
@@ -244,7 +354,9 @@
 
 define i32 @umovw16b(<16 x i8> %tmp1) {
 ; CHECK-LABEL: umovw16b:
-; CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.b[8]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umov w0, v0.b[8]
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <16 x i8> %tmp1, i32 8
   %tmp4 = zext i8 %tmp3 to i32
   ret i32 %tmp4
@@ -252,7 +364,9 @@
 
 define i32 @umovw8h(<8 x i16> %tmp1) {
 ; CHECK-LABEL: umovw8h:
-; CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.h[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umov w0, v0.h[2]
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <8 x i16> %tmp1, i32 2
   %tmp4 = zext i16 %tmp3 to i32
   ret i32 %tmp4
@@ -260,21 +374,28 @@
 
 define i32 @umovw4s(<4 x i32> %tmp1) {
 ; CHECK-LABEL: umovw4s:
-; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.s[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w0, v0.s[2]
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <4 x i32> %tmp1, i32 2
   ret i32 %tmp3
 }
 
 define i64 @umovx2d(<2 x i64> %tmp1) {
 ; CHECK-LABEL: umovx2d:
-; CHECK: mov {{x[0-9]+}}, {{v[0-9]+}}.d[1]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x0, v0.d[1]
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <2 x i64> %tmp1, i32 1
   ret i64 %tmp3
 }
 
 define i32 @umovw8b(<8 x i8> %tmp1) {
 ; CHECK-LABEL: umovw8b:
-; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.b[7]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w0, v0.b[7]
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <8 x i8> %tmp1, i32 7
   %tmp4 = zext i8 %tmp3 to i32
   ret i32 %tmp4
@@ -282,7 +403,10 @@
 
 define i32 @umovw4h(<4 x i16> %tmp1) {
 ; CHECK-LABEL: umovw4h:
-; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.h[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w0, v0.h[2]
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <4 x i16> %tmp1, i32 2
   %tmp4 = zext i16 %tmp3 to i32
   ret i32 %tmp4
@@ -290,21 +414,30 @@
 
 define i32 @umovw2s(<2 x i32> %tmp1) {
 ; CHECK-LABEL: umovw2s:
-; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.s[1]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov w0, v0.s[1]
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <2 x i32> %tmp1, i32 1
   ret i32 %tmp3
 }
 
 define i64 @umovx1d(<1 x i64> %tmp1) {
 ; CHECK-LABEL: umovx1d:
-; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <1 x i64> %tmp1, i32 0
   ret i64 %tmp3
 }
 
 define i32 @smovw16b(<16 x i8> %tmp1) {
 ; CHECK-LABEL: smovw16b:
-; CHECK: smov {{w[0-9]+}}, {{v[0-9]+}}.b[8]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smov w8, v0.b[8]
+; CHECK-NEXT:    add w0, w8, w8
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <16 x i8> %tmp1, i32 8
   %tmp4 = sext i8 %tmp3 to i32
   %tmp5 = add i32 %tmp4, %tmp4
@@ -313,7 +446,10 @@
 
 define i32 @smovw8h(<8 x i16> %tmp1) {
 ; CHECK-LABEL: smovw8h:
-; CHECK: smov {{w[0-9]+}}, {{v[0-9]+}}.h[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smov w8, v0.h[2]
+; CHECK-NEXT:    add w0, w8, w8
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <8 x i16> %tmp1, i32 2
   %tmp4 = sext i16 %tmp3 to i32
   %tmp5 = add i32 %tmp4, %tmp4
@@ -322,7 +458,9 @@
 
 define i64 @smovx16b(<16 x i8> %tmp1) {
 ; CHECK-LABEL: smovx16b:
-; CHECK: smov {{x[0-9]+}}, {{v[0-9]+}}.b[8]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smov x0, v0.b[8]
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <16 x i8> %tmp1, i32 8
   %tmp4 = sext i8 %tmp3 to i64
   ret i64 %tmp4
@@ -330,7 +468,9 @@
 
 define i64 @smovx8h(<8 x i16> %tmp1) {
 ; CHECK-LABEL: smovx8h:
-; CHECK: smov {{x[0-9]+}}, {{v[0-9]+}}.h[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smov x0, v0.h[2]
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <8 x i16> %tmp1, i32 2
   %tmp4 = sext i16 %tmp3 to i64
   ret i64 %tmp4
@@ -338,7 +478,9 @@
 
 define i64 @smovx4s(<4 x i32> %tmp1) {
 ; CHECK-LABEL: smovx4s:
-; CHECK: smov {{x[0-9]+}}, {{v[0-9]+}}.s[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smov x0, v0.s[2]
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <4 x i32> %tmp1, i32 2
   %tmp4 = sext i32 %tmp3 to i64
   ret i64 %tmp4
@@ -346,7 +488,11 @@
 
 define i32 @smovw8b(<8 x i8> %tmp1) {
 ; CHECK-LABEL: smovw8b:
-; CHECK: smov {{w[0-9]+}}, {{v[0-9]+}}.b[4]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    smov w8, v0.b[4]
+; CHECK-NEXT:    add w0, w8, w8
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <8 x i8> %tmp1, i32 4
   %tmp4 = sext i8 %tmp3 to i32
   %tmp5 = add i32 %tmp4, %tmp4
@@ -355,7 +501,11 @@
 
 define i32 @smovw4h(<4 x i16> %tmp1) {
 ; CHECK-LABEL: smovw4h:
-; CHECK: smov {{w[0-9]+}}, {{v[0-9]+}}.h[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    smov w8, v0.h[2]
+; CHECK-NEXT:    add w0, w8, w8
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <4 x i16> %tmp1, i32 2
   %tmp4 = sext i16 %tmp3 to i32
   %tmp5 = add i32 %tmp4, %tmp4
@@ -364,7 +514,10 @@
 
 define i32 @smovx8b(<8 x i8> %tmp1) {
 ; CHECK-LABEL: smovx8b:
-; CHECK: smov {{[xw][0-9]+}}, {{v[0-9]+}}.b[6]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    smov w0, v0.b[6]
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <8 x i8> %tmp1, i32 6
   %tmp4 = sext i8 %tmp3 to i32
   ret i32 %tmp4
@@ -372,7 +525,10 @@
 
 define i32 @smovx4h(<4 x i16> %tmp1) {
 ; CHECK-LABEL: smovx4h:
-; CHECK: smov {{[xw][0-9]+}}, {{v[0-9]+}}.h[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    smov w0, v0.h[2]
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <4 x i16> %tmp1, i32 2
   %tmp4 = sext i16 %tmp3 to i32
   ret i32 %tmp4
@@ -380,7 +536,10 @@
 
 define i64 @smovx2s(<2 x i32> %tmp1) {
 ; CHECK-LABEL: smovx2s:
-; CHECK: smov {{x[0-9]+}}, {{v[0-9]+}}.s[1]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    smov x0, v0.s[1]
+; CHECK-NEXT:    ret
   %tmp3 = extractelement <2 x i32> %tmp1, i32 1
   %tmp4 = sext i32 %tmp3 to i64
   ret i64 %tmp4
@@ -388,35 +547,52 @@
 
 define <8 x i8> @test_vcopy_lane_s8(<8 x i8> %v1, <8 x i8> %v2) {
 ; CHECK-LABEL: test_vcopy_lane_s8:
-; CHECK: mov  {{v[0-9]+}}.b[5], {{v[0-9]+}}.b[3]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v0.b[5], v1.b[3]
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %vset_lane = shufflevector <8 x i8> %v1, <8 x i8> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 11, i32 6, i32 7>
   ret <8 x i8> %vset_lane
 }
 
 define <16 x i8> @test_vcopyq_laneq_s8(<16 x i8> %v1, <16 x i8> %v2) {
 ; CHECK-LABEL: test_vcopyq_laneq_s8:
-; CHECK: mov  {{v[0-9]+}}.b[14], {{v[0-9]+}}.b[6]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v0.b[14], v1.b[6]
+; CHECK-NEXT:    ret
   %vset_lane = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 22, i32 15>
   ret <16 x i8> %vset_lane
 }
 
 define <8 x i8> @test_vcopy_lane_swap_s8(<8 x i8> %v1, <8 x i8> %v2) {
 ; CHECK-LABEL: test_vcopy_lane_swap_s8:
-; CHECK: mov {{v[0-9]+}}.b[7], {{v[0-9]+}}.b[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v1.b[7], v0.b[0]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %vset_lane = shufflevector <8 x i8> %v1, <8 x i8> %v2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 0>
   ret <8 x i8> %vset_lane
 }
 
 define <16 x i8> @test_vcopyq_laneq_swap_s8(<16 x i8> %v1, <16 x i8> %v2) {
 ; CHECK-LABEL: test_vcopyq_laneq_swap_s8:
-; CHECK: mov {{v[0-9]+}}.b[0], {{v[0-9]+}}.b[15]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v1.b[0], v0.b[15]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   %vset_lane = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 15, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   ret <16 x i8> %vset_lane
 }
 
 define <8 x i8> @test_vdup_n_u8(i8 %v1) #0 {
 ; CHECK-LABEL: test_vdup_n_u8:
-; CHECK: dup {{v[0-9]+}}.8b, {{w[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.8b, w0
+; CHECK-NEXT:    ret
   %vecinit.i = insertelement <8 x i8> undef, i8 %v1, i32 0
   %vecinit1.i = insertelement <8 x i8> %vecinit.i, i8 %v1, i32 1
   %vecinit2.i = insertelement <8 x i8> %vecinit1.i, i8 %v1, i32 2
@@ -430,7 +606,9 @@
 
 define <4 x i16> @test_vdup_n_u16(i16 %v1) #0 {
 ; CHECK-LABEL: test_vdup_n_u16:
-; CHECK: dup {{v[0-9]+}}.4h, {{w[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.4h, w0
+; CHECK-NEXT:    ret
   %vecinit.i = insertelement <4 x i16> undef, i16 %v1, i32 0
   %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %v1, i32 1
   %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %v1, i32 2
@@ -440,7 +618,9 @@
 
 define <2 x i32> @test_vdup_n_u32(i32 %v1) #0 {
 ; CHECK-LABEL: test_vdup_n_u32:
-; CHECK: dup {{v[0-9]+}}.2s, {{w[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.2s, w0
+; CHECK-NEXT:    ret
   %vecinit.i = insertelement <2 x i32> undef, i32 %v1, i32 0
   %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %v1, i32 1
   ret <2 x i32> %vecinit1.i
@@ -448,14 +628,18 @@
 
 define <1 x i64> @test_vdup_n_u64(i64 %v1) #0 {
 ; CHECK-LABEL: test_vdup_n_u64:
-; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    ret
   %vecinit.i = insertelement <1 x i64> undef, i64 %v1, i32 0
   ret <1 x i64> %vecinit.i
 }
 
 define <16 x i8> @test_vdupq_n_u8(i8 %v1) #0 {
 ; CHECK-LABEL: test_vdupq_n_u8:
-; CHECK: dup {{v[0-9]+}}.16b, {{w[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.16b, w0
+; CHECK-NEXT:    ret
   %vecinit.i = insertelement <16 x i8> undef, i8 %v1, i32 0
   %vecinit1.i = insertelement <16 x i8> %vecinit.i, i8 %v1, i32 1
   %vecinit2.i = insertelement <16 x i8> %vecinit1.i, i8 %v1, i32 2
@@ -477,7 +661,9 @@
 
 define <8 x i16> @test_vdupq_n_u16(i16 %v1) #0 {
 ; CHECK-LABEL: test_vdupq_n_u16:
-; CHECK: dup {{v[0-9]+}}.8h, {{w[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.8h, w0
+; CHECK-NEXT:    ret
   %vecinit.i = insertelement <8 x i16> undef, i16 %v1, i32 0
   %vecinit1.i = insertelement <8 x i16> %vecinit.i, i16 %v1, i32 1
   %vecinit2.i = insertelement <8 x i16> %vecinit1.i, i16 %v1, i32 2
@@ -491,7 +677,9 @@
 
 define <4 x i32> @test_vdupq_n_u32(i32 %v1) #0 {
 ; CHECK-LABEL: test_vdupq_n_u32:
-; CHECK: dup {{v[0-9]+}}.4s, {{w[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.4s, w0
+; CHECK-NEXT:    ret
   %vecinit.i = insertelement <4 x i32> undef, i32 %v1, i32 0
   %vecinit1.i = insertelement <4 x i32> %vecinit.i, i32 %v1, i32 1
   %vecinit2.i = insertelement <4 x i32> %vecinit1.i, i32 %v1, i32 2
@@ -501,7 +689,9 @@
 
 define <2 x i64> @test_vdupq_n_u64(i64 %v1) #0 {
 ; CHECK-LABEL: test_vdupq_n_u64:
-; CHECK: dup {{v[0-9]+}}.2d, {{x[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.2d, x0
+; CHECK-NEXT:    ret
   %vecinit.i = insertelement <2 x i64> undef, i64 %v1, i32 0
   %vecinit1.i = insertelement <2 x i64> %vecinit.i, i64 %v1, i32 1
   ret <2 x i64> %vecinit1.i
@@ -509,190 +699,252 @@
 
 define <8 x i8> @test_vdup_lane_s8(<8 x i8> %v1) #0 {
 ; CHECK-LABEL: test_vdup_lane_s8:
-; CHECK: dup {{v[0-9]+}}.8b, {{v[0-9]+}}.b[5]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.8b, v0.b[5]
+; CHECK-NEXT:    ret
   %shuffle = shufflevector <8 x i8> %v1, <8 x i8> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
   ret <8 x i8> %shuffle
 }
 
 define <4 x i16> @test_vdup_lane_s16(<4 x i16> %v1) #0 {
 ; CHECK-LABEL: test_vdup_lane_s16:
-; CHECK: dup {{v[0-9]+}}.4h, {{v[0-9]+}}.h[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.4h, v0.h[2]
+; CHECK-NEXT:    ret
   %shuffle = shufflevector <4 x i16> %v1, <4 x i16> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
   ret <4 x i16> %shuffle
 }
 
 define <2 x i32> @test_vdup_lane_s32(<2 x i32> %v1) #0 {
 ; CHECK-LABEL: test_vdup_lane_s32:
-; CHECK: dup {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.2s, v0.s[1]
+; CHECK-NEXT:    ret
   %shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
   ret <2 x i32> %shuffle
 }
 
 define <16 x i8> @test_vdupq_lane_s8(<8 x i8> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_lane_s8:
-; CHECK: {{v[0-9]+}}.16b, {{v[0-9]+}}.b[5]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.16b, v0.b[5]
+; CHECK-NEXT:    ret
   %shuffle = shufflevector <8 x i8> %v1, <8 x i8> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
   ret <16 x i8> %shuffle
 }
 
 define <8 x i16> @test_vdupq_lane_s16(<4 x i16> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_lane_s16:
-; CHECK: {{v[0-9]+}}.8h, {{v[0-9]+}}.h[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.8h, v0.h[2]
+; CHECK-NEXT:    ret
   %shuffle = shufflevector <4 x i16> %v1, <4 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
   ret <8 x i16> %shuffle
 }
 
 define <4 x i32> @test_vdupq_lane_s32(<2 x i32> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_lane_s32:
-; CHECK: {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.4s, v0.s[1]
+; CHECK-NEXT:    ret
   %shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %shuffle
 }
 
 define <2 x i64> @test_vdupq_lane_s64(<1 x i64> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_lane_s64:
-; CHECK: {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.2d, v0.d[0]
+; CHECK-NEXT:    ret
   %shuffle = shufflevector <1 x i64> %v1, <1 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %shuffle
 }
 
 define <8 x i8> @test_vdup_laneq_s8(<16 x i8> %v1) #0 {
 ; CHECK-LABEL: test_vdup_laneq_s8:
-; CHECK: dup {{v[0-9]+}}.8b, {{v[0-9]+}}.b[5]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.8b, v0.b[5]
+; CHECK-NEXT:    ret
   %shuffle = shufflevector <16 x i8> %v1, <16 x i8> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
   ret <8 x i8> %shuffle
 }
 
 define <4 x i16> @test_vdup_laneq_s16(<8 x i16> %v1) #0 {
 ; CHECK-LABEL: test_vdup_laneq_s16:
-; CHECK: dup {{v[0-9]+}}.4h, {{v[0-9]+}}.h[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.4h, v0.h[2]
+; CHECK-NEXT:    ret
   %shuffle = shufflevector <8 x i16> %v1, <8 x i16> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
   ret <4 x i16> %shuffle
 }
 
 define <2 x i32> @test_vdup_laneq_s32(<4 x i32> %v1) #0 {
 ; CHECK-LABEL: test_vdup_laneq_s32:
-; CHECK: dup {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.2s, v0.s[1]
+; CHECK-NEXT:    ret
   %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
   ret <2 x i32> %shuffle
 }
 
 define <16 x i8> @test_vdupq_laneq_s8(<16 x i8> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_laneq_s8:
-; CHECK: dup {{v[0-9]+}}.16b, {{v[0-9]+}}.b[5]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.16b, v0.b[5]
+; CHECK-NEXT:    ret
   %shuffle = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
   ret <16 x i8> %shuffle
 }
 
 define <8 x i16> @test_vdupq_laneq_s16(<8 x i16> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_laneq_s16:
-; CHECK: {{v[0-9]+}}.8h, {{v[0-9]+}}.h[2]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.8h, v0.h[2]
+; CHECK-NEXT:    ret
   %shuffle = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
   ret <8 x i16> %shuffle
 }
 
 define <4 x i32> @test_vdupq_laneq_s32(<4 x i32> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_laneq_s32:
-; CHECK: dup {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.4s, v0.s[1]
+; CHECK-NEXT:    ret
   %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %shuffle
 }
 
 define <2 x i64> @test_vdupq_laneq_s64(<2 x i64> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_laneq_s64:
-; CHECK: dup {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.2d, v0.d[0]
+; CHECK-NEXT:    ret
   %shuffle = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %shuffle
 }
 
 define i64 @test_bitcastv8i8toi64(<8 x i8> %in) {
 ; CHECK-LABEL: test_bitcastv8i8toi64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
    %res = bitcast <8 x i8> %in to i64
-; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
    ret i64 %res
 }
 
 define i64 @test_bitcastv4i16toi64(<4 x i16> %in) {
 ; CHECK-LABEL: test_bitcastv4i16toi64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
    %res = bitcast <4 x i16> %in to i64
-; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
    ret i64 %res
 }
 
 define i64 @test_bitcastv2i32toi64(<2 x i32> %in) {
 ; CHECK-LABEL: test_bitcastv2i32toi64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
    %res = bitcast <2 x i32> %in to i64
-; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
    ret i64 %res
 }
 
 define i64 @test_bitcastv2f32toi64(<2 x float> %in) {
 ; CHECK-LABEL: test_bitcastv2f32toi64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
    %res = bitcast <2 x float> %in to i64
-; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
    ret i64 %res
 }
 
 define i64 @test_bitcastv1i64toi64(<1 x i64> %in) {
 ; CHECK-LABEL: test_bitcastv1i64toi64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
    %res = bitcast <1 x i64> %in to i64
-; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
    ret i64 %res
 }
 
 define i64 @test_bitcastv1f64toi64(<1 x double> %in) {
 ; CHECK-LABEL: test_bitcastv1f64toi64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
    %res = bitcast <1 x double> %in to i64
-; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
    ret i64 %res
 }
 
 define <8 x i8> @test_bitcasti64tov8i8(i64 %in) {
 ; CHECK-LABEL: test_bitcasti64tov8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    ret
    %res = bitcast i64 %in to <8 x i8>
-; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
    ret <8 x i8> %res
 }
 
 define <4 x i16> @test_bitcasti64tov4i16(i64 %in) {
 ; CHECK-LABEL: test_bitcasti64tov4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    ret
    %res = bitcast i64 %in to <4 x i16>
-; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
    ret <4 x i16> %res
 }
 
 define <2 x i32> @test_bitcasti64tov2i32(i64 %in) {
 ; CHECK-LABEL: test_bitcasti64tov2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    ret
    %res = bitcast i64 %in to <2 x i32>
-; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
    ret <2 x i32> %res
 }
 
 define <2 x float> @test_bitcasti64tov2f32(i64 %in) {
 ; CHECK-LABEL: test_bitcasti64tov2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    ret
    %res = bitcast i64 %in to <2 x float>
-; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
    ret <2 x float> %res
 }
 
 define <1 x i64> @test_bitcasti64tov1i64(i64 %in) {
 ; CHECK-LABEL: test_bitcasti64tov1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    ret
    %res = bitcast i64 %in to <1 x i64>
-; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
    ret <1 x i64> %res
 }
 
 define <1 x double> @test_bitcasti64tov1f64(i64 %in) {
 ; CHECK-LABEL: test_bitcasti64tov1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    ret
    %res = bitcast i64 %in to <1 x double>
-; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
    ret <1 x double> %res
 }
 
 define <1 x i64> @test_bitcastv8i8tov1f64(<8 x i8> %a) #0 {
 ; CHECK-LABEL: test_bitcastv8i8tov1f64:
-; CHECK: neg {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-; CHECK-NEXT: fcvtzs {{[xd][0-9]+}}, {{d[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg v0.8b, v0.8b
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
   %sub.i = sub <8 x i8> zeroinitializer, %a
   %1 = bitcast <8 x i8> %sub.i to <1 x double>
   %vcvt.i = fptosi <1 x double> %1 to <1 x i64>
@@ -701,8 +953,11 @@
 
 define <1 x i64> @test_bitcastv4i16tov1f64(<4 x i16> %a) #0 {
 ; CHECK-LABEL: test_bitcastv4i16tov1f64:
-; CHECK: neg {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-; CHECK-NEXT: fcvtzs {{[dx][0-9]+}}, {{d[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg v0.4h, v0.4h
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
   %sub.i = sub <4 x i16> zeroinitializer, %a
   %1 = bitcast <4 x i16> %sub.i to <1 x double>
   %vcvt.i = fptosi <1 x double> %1 to <1 x i64>
@@ -711,8 +966,11 @@
 
 define <1 x i64> @test_bitcastv2i32tov1f64(<2 x i32> %a) #0 {
 ; CHECK-LABEL: test_bitcastv2i32tov1f64:
-; CHECK: neg {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-; CHECK-NEXT: fcvtzs {{[xd][0-9]+}}, {{d[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
   %sub.i = sub <2 x i32> zeroinitializer, %a
   %1 = bitcast <2 x i32> %sub.i to <1 x double>
   %vcvt.i = fptosi <1 x double> %1 to <1 x i64>
@@ -721,8 +979,11 @@
 
 define <1 x i64> @test_bitcastv1i64tov1f64(<1 x i64> %a) #0 {
 ; CHECK-LABEL: test_bitcastv1i64tov1f64:
-; CHECK: neg {{d[0-9]+}}, {{d[0-9]+}}
-; CHECK-NEXT: fcvtzs {{[dx][0-9]+}}, {{d[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg d0, d0
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
   %sub.i = sub <1 x i64> zeroinitializer, %a
   %1 = bitcast <1 x i64> %sub.i to <1 x double>
   %vcvt.i = fptosi <1 x double> %1 to <1 x i64>
@@ -731,8 +992,11 @@
 
 define <1 x i64> @test_bitcastv2f32tov1f64(<2 x float> %a) #0 {
 ; CHECK-LABEL: test_bitcastv2f32tov1f64:
-; CHECK: fneg {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-; CHECK-NEXT: fcvtzs {{[xd][0-9]+}}, {{d[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fneg v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
   %sub.i = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %a
   %1 = bitcast <2 x float> %sub.i to <1 x double>
   %vcvt.i = fptosi <1 x double> %1 to <1 x i64>
@@ -741,8 +1005,12 @@
 
 define <8 x i8> @test_bitcastv1f64tov8i8(<1 x i64> %a) #0 {
 ; CHECK-LABEL: test_bitcastv1f64tov8i8:
-; CHECK: scvtf {{d[0-9]+}}, {{[xd][0-9]+}}
-; CHECK-NEXT: neg {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    scvtf d0, x8
+; CHECK-NEXT:    neg v0.8b, v0.8b
+; CHECK-NEXT:    ret
   %vcvt.i = sitofp <1 x i64> %a to <1 x double>
   %1 = bitcast <1 x double> %vcvt.i to <8 x i8>
   %sub.i = sub <8 x i8> zeroinitializer, %1
@@ -751,8 +1019,12 @@
 
 define <4 x i16> @test_bitcastv1f64tov4i16(<1 x i64> %a) #0 {
 ; CHECK-LABEL: test_bitcastv1f64tov4i16:
-; CHECK: scvtf {{d[0-9]+}}, {{[xd][0-9]+}}
-; CHECK-NEXT: neg {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    scvtf d0, x8
+; CHECK-NEXT:    neg v0.4h, v0.4h
+; CHECK-NEXT:    ret
   %vcvt.i = sitofp <1 x i64> %a to <1 x double>
   %1 = bitcast <1 x double> %vcvt.i to <4 x i16>
   %sub.i = sub <4 x i16> zeroinitializer, %1
@@ -761,8 +1033,12 @@
 
 define <2 x i32> @test_bitcastv1f64tov2i32(<1 x i64> %a) #0 {
 ; CHECK-LABEL: test_bitcastv1f64tov2i32:
-; CHECK: scvtf {{d[0-9]+}}, {{[xd][0-9]+}}
-; CHECK-NEXT: neg {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    scvtf d0, x8
+; CHECK-NEXT:    neg v0.2s, v0.2s
+; CHECK-NEXT:    ret
   %vcvt.i = sitofp <1 x i64> %a to <1 x double>
   %1 = bitcast <1 x double> %vcvt.i to <2 x i32>
   %sub.i = sub <2 x i32> zeroinitializer, %1
@@ -771,8 +1047,12 @@
 
 define <1 x i64> @test_bitcastv1f64tov1i64(<1 x i64> %a) #0 {
 ; CHECK-LABEL: test_bitcastv1f64tov1i64:
-; CHECK: scvtf {{d[0-9]+}}, {{[xd][0-9]+}}
-; CHECK-NEXT: neg {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    scvtf d0, x8
+; CHECK-NEXT:    neg d0, d0
+; CHECK-NEXT:    ret
   %vcvt.i = sitofp <1 x i64> %a to <1 x double>
   %1 = bitcast <1 x double> %vcvt.i to <1 x i64>
   %sub.i = sub <1 x i64> zeroinitializer, %1
@@ -781,8 +1061,12 @@
 
 define <2 x float> @test_bitcastv1f64tov2f32(<1 x i64> %a) #0 {
 ; CHECK-LABEL: test_bitcastv1f64tov2f32:
-; CHECK: scvtf {{d[0-9]+}}, {{[xd][0-9]+}}
-; CHECK-NEXT: fneg {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    scvtf d0, x8
+; CHECK-NEXT:    fneg v0.2s, v0.2s
+; CHECK-NEXT:    ret
   %vcvt.i = sitofp <1 x i64> %a to <1 x double>
   %1 = bitcast <1 x double> %vcvt.i to <2 x float>
   %sub.i = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %1
@@ -882,7 +1166,9 @@
 
 define <8 x i8> @getl(<16 x i8> %x) #0 {
 ; CHECK-LABEL: getl:
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %vecext = extractelement <16 x i8> %x, i32 0
   %vecinit = insertelement <8 x i8> undef, i8 %vecext, i32 0
   %vecext1 = extractelement <16 x i8> %x, i32 1
@@ -902,15 +1188,22 @@
   ret <8 x i8> %vecinit14
 }
 
-; CHECK-LABEL: test_extracts_inserts_varidx_extract:
-; CHECK: str q0
-; CHECK-DAG: and [[MASKED_IDX:x[0-9]+]], x0, #0x7
-; CHECK: bfi [[PTR:x[0-9]+]], [[MASKED_IDX]], #1, #3
-; CHECK-DAG: ldr h[[R:[0-9]+]], {{\[}}[[PTR]]{{\]}}
-; CHECK-DAG: mov v[[R]].h[1], v0.h[1]
-; CHECK-DAG: mov v[[R]].h[2], v0.h[2]
-; CHECK-DAG: mov v[[R]].h[3], v0.h[3]
 define <4 x i16> @test_extracts_inserts_varidx_extract(<8 x i16> %x, i32 %idx) {
+; CHECK-LABEL: test_extracts_inserts_varidx_extract:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    str q0, [sp, #-16]!
+; CHECK-NEXT:    and x8, x0, #0x7
+; CHECK-NEXT:    mov x9, sp
+; CHECK-NEXT:    bfi x9, x8, #1, #3
+; CHECK-NEXT:    ldr h1, [x9]
+; CHECK-NEXT:    mov v1.h[1], v0.h[1]
+; CHECK-NEXT:    mov v1.h[2], v0.h[2]
+; CHECK-NEXT:    mov v1.h[3], v0.h[3]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    add sp, sp, #16 // =16
+; CHECK-NEXT:    ret
   %tmp = extractelement <8 x i16> %x, i32 %idx
   %tmp2 = insertelement <4 x i16> undef, i16 %tmp, i32 0
   %tmp3 = extractelement <8 x i16> %x, i32 1
@@ -922,15 +1215,23 @@
   ret <4 x i16> %tmp8
 }
 
-; CHECK-LABEL: test_extracts_inserts_varidx_insert:
-; CHECK: and [[MASKED_IDX:x[0-9]+]], x0, #0x3
-; CHECK: bfi x9, [[MASKED_IDX]], #1, #2
-; CHECK: str h0, [x9]
-; CHECK-DAG: ldr d[[R:[0-9]+]]
-; CHECK-DAG: mov v[[R]].h[1], v0.h[1]
-; CHECK-DAG: mov v[[R]].h[2], v0.h[2]
-; CHECK-DAG: mov v[[R]].h[3], v0.h[3]
 define <4 x i16> @test_extracts_inserts_varidx_insert(<8 x i16> %x, i32 %idx) {
+; CHECK-LABEL: test_extracts_inserts_varidx_insert:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16 // =16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    and x8, x0, #0x3
+; CHECK-NEXT:    add x9, sp, #8 // =8
+; CHECK-NEXT:    bfi x9, x8, #1, #2
+; CHECK-NEXT:    str h0, [x9]
+; CHECK-NEXT:    ldr d1, [sp, #8]
+; CHECK-NEXT:    mov v1.h[1], v0.h[1]
+; CHECK-NEXT:    mov v1.h[2], v0.h[2]
+; CHECK-NEXT:    mov v1.h[3], v0.h[3]
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    add sp, sp, #16 // =16
+; CHECK-NEXT:    ret
   %tmp = extractelement <8 x i16> %x, i32 0
   %tmp2 = insertelement <4 x i16> undef, i16 %tmp, i32 %idx
   %tmp3 = extractelement <8 x i16> %x, i32 1
@@ -944,7 +1245,10 @@
 
 define <4 x i16> @test_dup_v2i32_v4i16(<2 x i32> %a) {
 ; CHECK-LABEL: test_dup_v2i32_v4i16:
-; CHECK: dup v0.4h, v0.h[2]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.4h, v0.h[2]
+; CHECK-NEXT:    ret
 entry:
   %x = extractelement <2 x i32> %a, i32 1
   %vget_lane = trunc i32 %x to i16
@@ -957,7 +1261,9 @@
 
 define <8 x i16> @test_dup_v4i32_v8i16(<4 x i32> %a) {
 ; CHECK-LABEL: test_dup_v4i32_v8i16:
-; CHECK: dup v0.8h, v0.h[6]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    dup v0.8h, v0.h[6]
+; CHECK-NEXT:    ret
 entry:
   %x = extractelement <4 x i32> %a, i32 3
   %vget_lane = trunc i32 %x to i16
@@ -974,7 +1280,10 @@
 
 define <4 x i16> @test_dup_v1i64_v4i16(<1 x i64> %a) {
 ; CHECK-LABEL: test_dup_v1i64_v4i16:
-; CHECK: dup v0.4h, v0.h[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.4h, v0.h[0]
+; CHECK-NEXT:    ret
 entry:
   %x = extractelement <1 x i64> %a, i32 0
   %vget_lane = trunc i64 %x to i16
@@ -987,7 +1296,10 @@
 
 define <2 x i32> @test_dup_v1i64_v2i32(<1 x i64> %a) {
 ; CHECK-LABEL: test_dup_v1i64_v2i32:
-; CHECK: dup v0.2s, v0.s[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.2s, v0.s[0]
+; CHECK-NEXT:    ret
 entry:
   %x = extractelement <1 x i64> %a, i32 0
   %vget_lane = trunc i64 %x to i32
@@ -998,7 +1310,9 @@
 
 define <8 x i16> @test_dup_v2i64_v8i16(<2 x i64> %a) {
 ; CHECK-LABEL: test_dup_v2i64_v8i16:
-; CHECK: dup v0.8h, v0.h[4]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    dup v0.8h, v0.h[4]
+; CHECK-NEXT:    ret
 entry:
   %x = extractelement <2 x i64> %a, i32 1
   %vget_lane = trunc i64 %x to i16
@@ -1015,7 +1329,9 @@
 
 define <4 x i32> @test_dup_v2i64_v4i32(<2 x i64> %a) {
 ; CHECK-LABEL: test_dup_v2i64_v4i32:
-; CHECK: dup v0.4s, v0.s[2]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    dup v0.4s, v0.s[2]
+; CHECK-NEXT:    ret
 entry:
   %x = extractelement <2 x i64> %a, i32 1
   %vget_lane = trunc i64 %x to i32
@@ -1028,7 +1344,9 @@
 
 define <4 x i16> @test_dup_v4i32_v4i16(<4 x i32> %a) {
 ; CHECK-LABEL: test_dup_v4i32_v4i16:
-; CHECK: dup v0.4h, v0.h[2]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    dup v0.4h, v0.h[2]
+; CHECK-NEXT:    ret
 entry:
   %x = extractelement <4 x i32> %a, i32 1
   %vget_lane = trunc i32 %x to i16
@@ -1041,7 +1359,9 @@
 
 define <4 x i16> @test_dup_v2i64_v4i16(<2 x i64> %a) {
 ; CHECK-LABEL: test_dup_v2i64_v4i16:
-; CHECK: dup v0.4h, v0.h[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    dup v0.4h, v0.h[0]
+; CHECK-NEXT:    ret
 entry:
   %x = extractelement <2 x i64> %a, i32 0
   %vget_lane = trunc i64 %x to i16
@@ -1054,7 +1374,9 @@
 
 define <2 x i32> @test_dup_v2i64_v2i32(<2 x i64> %a) {
 ; CHECK-LABEL: test_dup_v2i64_v2i32:
-; CHECK: dup v0.2s, v0.s[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    dup v0.2s, v0.s[0]
+; CHECK-NEXT:    ret
 entry:
   %x = extractelement <2 x i64> %a, i32 0
   %vget_lane = trunc i64 %x to i32
@@ -1066,8 +1388,9 @@
 
 define <2 x float> @test_scalar_to_vector_f32_to_v2f32(<2 x float> %a) {
 ; CHECK-LABEL: test_scalar_to_vector_f32_to_v2f32:
-; CHECK: fmaxp s{{[0-9]+}}, v{{[0-9]+}}.2s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmaxp s0, v0.2s
+; CHECK-NEXT:    ret
 entry:
   %0 = call float @llvm.aarch64.neon.fmaxv.f32.v2f32(<2 x float> %a)
   %1 = insertelement <1 x float> undef, float %0, i32 0
@@ -1078,8 +1401,9 @@
 
 define <4 x float> @test_scalar_to_vector_f32_to_v4f32(<2 x float> %a) {
 ; CHECK-LABEL: test_scalar_to_vector_f32_to_v4f32:
-; CHECK: fmaxp s{{[0-9]+}}, v{{[0-9]+}}.2s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmaxp s0, v0.2s
+; CHECK-NEXT:    ret
 entry:
   %0 = call float @llvm.aarch64.neon.fmaxv.f32.v2f32(<2 x float> %a)
   %1 = insertelement <1 x float> undef, float %0, i32 0
@@ -1092,7 +1416,10 @@
 
 define <2 x i32> @test_concat_undef_v1i32(<2 x i32> %a) {
 ; CHECK-LABEL: test_concat_undef_v1i32:
-; CHECK: dup {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.2s, v0.s[0]
+; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <2 x i32> %a, i32 0
   %vecinit1.i = insertelement <2 x i32> undef, i32 %0, i32 1
@@ -1103,8 +1430,10 @@
 
 define <2 x i32> @test_concat_v1i32_undef(i32 %a) {
 ; CHECK-LABEL: test_concat_v1i32_undef:
-; CHECK: sqabs s{{[0-9]+}}, s{{[0-9]+}}
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmov s0, w0
+; CHECK-NEXT:    sqabs s0, s0
+; CHECK-NEXT:    ret
 entry:
   %b = tail call i32 @llvm.aarch64.neon.sqabs.i32(i32 %a)
   %vecinit.i432 = insertelement <2 x i32> undef, i32 %b, i32 0
@@ -1113,7 +1442,10 @@
 
 define <2 x i32> @test_concat_same_v1i32_v1i32(<2 x i32> %a) {
 ; CHECK-LABEL: test_concat_same_v1i32_v1i32:
-; CHECK: dup v{{[0-9]+}}.2s, v{{[0-9]+}}.s[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.2s, v0.s[0]
+; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <2 x i32> %a, i32 0
   %vecinit.i = insertelement <2 x i32> undef, i32 %0, i32 0
@@ -1123,9 +1455,15 @@
 
 define <2 x i32> @test_concat_diff_v1i32_v1i32(i32 %a, i32 %b) {
 ; CHECK-LABEL: test_concat_diff_v1i32_v1i32:
-; CHECK: sqabs s{{[0-9]+}}, s{{[0-9]+}}
-; CHECK: sqabs s{{[0-9]+}}, s{{[0-9]+}}
-; CHECK: mov {{v[0-9]+}}.s[1], w{{[0-9]+}}
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmov s1, w1
+; CHECK-NEXT:    fmov s0, w0
+; CHECK-NEXT:    sqabs s1, s1
+; CHECK-NEXT:    sqabs s0, s0
+; CHECK-NEXT:    fmov w8, s1
+; CHECK-NEXT:    mov v0.s[1], w8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %c = tail call i32 @llvm.aarch64.neon.sqabs.i32(i32 %a)
   %d = insertelement <2 x i32> undef, i32 %c, i32 0
@@ -1137,7 +1475,9 @@
 
 define <16 x i8> @test_concat_v16i8_v16i8_v16i8(<16 x i8> %x, <16 x i8> %y) #0 {
 ; CHECK-LABEL: test_concat_v16i8_v16i8_v16i8:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
 entry:
   %vecinit30 = shufflevector <16 x i8> %x, <16 x i8> %y, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
   ret <16 x i8> %vecinit30
@@ -1145,7 +1485,10 @@
 
 define <16 x i8> @test_concat_v16i8_v8i8_v16i8(<8 x i8> %x, <16 x i8> %y) #0 {
 ; CHECK-LABEL: test_concat_v16i8_v8i8_v16i8:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
 entry:
   %vecext = extractelement <8 x i8> %x, i32 0
   %vecinit = insertelement <16 x i8> undef, i8 %vecext, i32 0
@@ -1169,7 +1512,10 @@
 
 define <16 x i8> @test_concat_v16i8_v16i8_v8i8(<16 x i8> %x, <8 x i8> %y) #0 {
 ; CHECK-LABEL: test_concat_v16i8_v16i8_v8i8:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
 entry:
   %vecext = extractelement <16 x i8> %x, i32 0
   %vecinit = insertelement <16 x i8> undef, i8 %vecext, i32 0
@@ -1208,7 +1554,11 @@
 
 define <16 x i8> @test_concat_v16i8_v8i8_v8i8(<8 x i8> %x, <8 x i8> %y) #0 {
 ; CHECK-LABEL: test_concat_v16i8_v8i8_v8i8:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
 entry:
   %vecext = extractelement <8 x i8> %x, i32 0
   %vecinit = insertelement <16 x i8> undef, i8 %vecext, i32 0
@@ -1247,7 +1597,9 @@
 
 define <8 x i16> @test_concat_v8i16_v8i16_v8i16(<8 x i16> %x, <8 x i16> %y) #0 {
 ; CHECK-LABEL: test_concat_v8i16_v8i16_v8i16:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
 entry:
   %vecinit14 = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
   ret <8 x i16> %vecinit14
@@ -1255,7 +1607,10 @@
 
 define <8 x i16> @test_concat_v8i16_v4i16_v8i16(<4 x i16> %x, <8 x i16> %y) #0 {
 ; CHECK-LABEL: test_concat_v8i16_v4i16_v8i16:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
 entry:
   %vecext = extractelement <4 x i16> %x, i32 0
   %vecinit = insertelement <8 x i16> undef, i16 %vecext, i32 0
@@ -1271,7 +1626,10 @@
 
 define <8 x i16> @test_concat_v8i16_v8i16_v4i16(<8 x i16> %x, <4 x i16> %y) #0 {
 ; CHECK-LABEL: test_concat_v8i16_v8i16_v4i16:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
 entry:
   %vecext = extractelement <8 x i16> %x, i32 0
   %vecinit = insertelement <8 x i16> undef, i16 %vecext, i32 0
@@ -1294,7 +1652,11 @@
 
 define <8 x i16> @test_concat_v8i16_v4i16_v4i16(<4 x i16> %x, <4 x i16> %y) #0 {
 ; CHECK-LABEL: test_concat_v8i16_v4i16_v4i16:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
 entry:
   %vecext = extractelement <4 x i16> %x, i32 0
   %vecinit = insertelement <8 x i16> undef, i16 %vecext, i32 0
@@ -1317,7 +1679,9 @@
 
 define <4 x i32> @test_concat_v4i32_v4i32_v4i32(<4 x i32> %x, <4 x i32> %y) #0 {
 ; CHECK-LABEL: test_concat_v4i32_v4i32_v4i32:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
 entry:
   %vecinit6 = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
   ret <4 x i32> %vecinit6
@@ -1325,7 +1689,10 @@
 
 define <4 x i32> @test_concat_v4i32_v2i32_v4i32(<2 x i32> %x, <4 x i32> %y) #0 {
 ; CHECK-LABEL: test_concat_v4i32_v2i32_v4i32:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
 entry:
   %vecext = extractelement <2 x i32> %x, i32 0
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
@@ -1337,7 +1704,10 @@
 
 define <4 x i32> @test_concat_v4i32_v4i32_v2i32(<4 x i32> %x, <2 x i32> %y) #0 {
 ; CHECK-LABEL: test_concat_v4i32_v4i32_v2i32:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
 entry:
   %vecext = extractelement <4 x i32> %x, i32 0
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
@@ -1352,7 +1722,11 @@
 
 define <4 x i32> @test_concat_v4i32_v2i32_v2i32(<2 x i32> %x, <2 x i32> %y) #0 {
 ; CHECK-LABEL: test_concat_v4i32_v2i32_v2i32:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
 entry:
   %vecinit6 = shufflevector <2 x i32> %x, <2 x i32> %y, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x i32> %vecinit6
@@ -1360,7 +1734,9 @@
 
 define <2 x i64> @test_concat_v2i64_v2i64_v2i64(<2 x i64> %x, <2 x i64> %y) #0 {
 ; CHECK-LABEL: test_concat_v2i64_v2i64_v2i64:
-; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    ret
 entry:
   %vecinit2 = shufflevector <2 x i64> %x, <2 x i64> %y, <2 x i32> <i32 0, i32 2>
   ret <2 x i64> %vecinit2
@@ -1368,7 +1744,10 @@
 
 define <2 x i64> @test_concat_v2i64_v1i64_v2i64(<1 x i64> %x, <2 x i64> %y) #0 {
 ; CHECK-LABEL: test_concat_v2i64_v1i64_v2i64:
-; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    ret
 entry:
   %vecext = extractelement <1 x i64> %x, i32 0
   %vecinit = insertelement <2 x i64> undef, i64 %vecext, i32 0
@@ -1378,7 +1757,10 @@
 
 define <2 x i64> @test_concat_v2i64_v2i64_v1i64(<2 x i64> %x, <1 x i64> %y) #0 {
 ; CHECK-LABEL: test_concat_v2i64_v2i64_v1i64:
-; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    ret
 entry:
   %vecext = extractelement <2 x i64> %x, i32 0
   %vecinit = insertelement <2 x i64> undef, i64 %vecext, i32 0
@@ -1389,7 +1771,11 @@
 
 define <2 x i64> @test_concat_v2i64_v1i64_v1i64(<1 x i64> %x, <1 x i64> %y) #0 {
 ; CHECK-LABEL: test_concat_v2i64_v1i64_v1i64:
-; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
 entry:
   %vecext = extractelement <1 x i64> %x, i32 0
   %vecinit = insertelement <2 x i64> undef, i64 %vecext, i32 0
@@ -1401,84 +1787,113 @@
 
 define <4 x i16> @concat_vector_v4i16_const() {
 ; CHECK-LABEL: concat_vector_v4i16_const:
-; CHECK: movi {{v[0-9]+}}.2d, #0
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    ret
  %r = shufflevector <1 x i16> zeroinitializer, <1 x i16> undef, <4 x i32> zeroinitializer
  ret <4 x i16> %r
 }
 
 define <4 x i16> @concat_vector_v4i16_const_one() {
 ; CHECK-LABEL: concat_vector_v4i16_const_one:
-; CHECK: movi {{v[0-9]+}}.4h, #1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v0.4h, #1
+; CHECK-NEXT:    ret
  %r = shufflevector <1 x i16> <i16 1>, <1 x i16> undef, <4 x i32> zeroinitializer
  ret <4 x i16> %r
 }
 
 define <4 x i32> @concat_vector_v4i32_const() {
 ; CHECK-LABEL: concat_vector_v4i32_const:
-; CHECK: movi {{v[0-9]+}}.2d, #0
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    ret
  %r = shufflevector <1 x i32> zeroinitializer, <1 x i32> undef, <4 x i32> zeroinitializer
  ret <4 x i32> %r
 }
 
 define <8 x i8> @concat_vector_v8i8_const() {
 ; CHECK-LABEL: concat_vector_v8i8_const:
-; CHECK: movi {{v[0-9]+}}.2d, #0
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    ret
  %r = shufflevector <1 x i8> zeroinitializer, <1 x i8> undef, <8 x i32> zeroinitializer
  ret <8 x i8> %r
 }
 
 define <8 x i16> @concat_vector_v8i16_const() {
 ; CHECK-LABEL: concat_vector_v8i16_const:
-; CHECK: movi {{v[0-9]+}}.2d, #0
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    ret
  %r = shufflevector <1 x i16> zeroinitializer, <1 x i16> undef, <8 x i32> zeroinitializer
  ret <8 x i16> %r
 }
 
 define <8 x i16> @concat_vector_v8i16_const_one() {
 ; CHECK-LABEL: concat_vector_v8i16_const_one:
-; CHECK: movi {{v[0-9]+}}.8h, #1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v0.8h, #1
+; CHECK-NEXT:    ret
  %r = shufflevector <1 x i16> <i16 1>, <1 x i16> undef, <8 x i32> zeroinitializer
  ret <8 x i16> %r
 }
 
 define <16 x i8> @concat_vector_v16i8_const() {
 ; CHECK-LABEL: concat_vector_v16i8_const:
-; CHECK: movi {{v[0-9]+}}.2d, #0
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    ret
  %r = shufflevector <1 x i8> zeroinitializer, <1 x i8> undef, <16 x i32> zeroinitializer
  ret <16 x i8> %r
 }
 
 define <4 x i16> @concat_vector_v4i16(<1 x i16> %a) {
 ; CHECK-LABEL: concat_vector_v4i16:
-; CHECK: dup v0.4h, v0.h[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.4h, v0.h[0]
+; CHECK-NEXT:    ret
  %r = shufflevector <1 x i16> %a, <1 x i16> undef, <4 x i32> zeroinitializer
  ret <4 x i16> %r
 }
 
 define <4 x i32> @concat_vector_v4i32(<1 x i32> %a) {
 ; CHECK-LABEL: concat_vector_v4i32:
-; CHECK: dup v0.4s, v0.s[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.4s, v0.s[0]
+; CHECK-NEXT:    ret
  %r = shufflevector <1 x i32> %a, <1 x i32> undef, <4 x i32> zeroinitializer
  ret <4 x i32> %r
 }
 
 define <8 x i8> @concat_vector_v8i8(<1 x i8> %a) {
 ; CHECK-LABEL: concat_vector_v8i8:
-; CHECK: dup v0.8b, v0.b[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.8b, v0.b[0]
+; CHECK-NEXT:    ret
  %r = shufflevector <1 x i8> %a, <1 x i8> undef, <8 x i32> zeroinitializer
  ret <8 x i8> %r
 }
 
 define <8 x i16> @concat_vector_v8i16(<1 x i16> %a) {
 ; CHECK-LABEL: concat_vector_v8i16:
-; CHECK: dup v0.8h, v0.h[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.8h, v0.h[0]
+; CHECK-NEXT:    ret
  %r = shufflevector <1 x i16> %a, <1 x i16> undef, <8 x i32> zeroinitializer
  ret <8 x i16> %r
 }
 
 define <16 x i8> @concat_vector_v16i8(<1 x i8> %a) {
 ; CHECK-LABEL: concat_vector_v16i8:
-; CHECK: dup v0.16b, v0.b[0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.16b, v0.b[0]
+; CHECK-NEXT:    ret
  %r = shufflevector <1 x i8> %a, <1 x i8> undef, <16 x i32> zeroinitializer
  ret <16 x i8> %r
 }
diff --git a/test/CodeGen/AArch64/arm64-shrink-wrapping.ll b/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
index 7ae739e..b98cb7a 100644
--- a/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
+++ b/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
@@ -1,5 +1,5 @@
-; RUN: llc %s -o - -enable-shrink-wrap=true -disable-post-ra -disable-fp-elim | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE
-; RUN: llc %s -o - -enable-shrink-wrap=false -disable-post-ra -disable-fp-elim | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE
+; RUN: llc %s -o - -enable-shrink-wrap=true -disable-post-ra -frame-pointer=all | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE
+; RUN: llc %s -o - -enable-shrink-wrap=false -disable-post-ra -frame-pointer=all | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 target triple = "arm64-apple-ios"
 
diff --git a/test/CodeGen/AArch64/arm64-vcvt.ll b/test/CodeGen/AArch64/arm64-vcvt.ll
index f7437bc..d236aea 100644
--- a/test/CodeGen/AArch64/arm64-vcvt.ll
+++ b/test/CodeGen/AArch64/arm64-vcvt.ll
@@ -1,4 +1,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-eabi -pass-remarks-missed=gisel-* \
+; RUN: -aarch64-neon-syntax=apple -global-isel -global-isel-abort=2 2>&1 | \
+; RUN: FileCheck %s --check-prefixes=FALLBACK,CHECK
 
 define <2 x i32> @fcvtas_2s(<2 x float> %A) nounwind {
 ;CHECK-LABEL: fcvtas_2s:
@@ -427,6 +430,7 @@
 declare <4 x float> @llvm.aarch64.neon.frintn.v4f32(<4 x float>) nounwind readnone
 declare <2 x double> @llvm.aarch64.neon.frintn.v2f64(<2 x double>) nounwind readnone
 
+; FALLBACK-NOT: remark{{.*}}frintp_2s
 define <2 x float> @frintp_2s(<2 x float> %A) nounwind {
 ;CHECK-LABEL: frintp_2s:
 ;CHECK-NOT: ld1
@@ -436,6 +440,7 @@
 	ret <2 x float> %tmp3
 }
 
+; FALLBACK-NOT: remark{{.*}}frintp_4s
 define <4 x float> @frintp_4s(<4 x float> %A) nounwind {
 ;CHECK-LABEL: frintp_4s:
 ;CHECK-NOT: ld1
@@ -445,6 +450,7 @@
 	ret <4 x float> %tmp3
 }
 
+; FALLBACK-NOT: remark{{.*}}frintp_2d
 define <2 x double> @frintp_2d(<2 x double> %A) nounwind {
 ;CHECK-LABEL: frintp_2d:
 ;CHECK-NOT: ld1
diff --git a/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll b/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll
index 4e1de87..2d7976d 100644
--- a/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll
+++ b/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll
@@ -1,11 +1,325 @@
-; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mattr=-fullfp16 | FileCheck %s
-; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mattr=+fullfp16 | FileCheck %s --check-prefix=CHECK-FP16
+; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mattr=-fullfp16 \
+; RUN:     | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NOFP16
+; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mattr=+fullfp16 \
+; RUN:     | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FP16
+
+;;; Half vectors
+
+%v4f16 = type <4 x half>
+
+define %v4f16 @test_v4f16.sqrt(%v4f16 %a) {
+  ; CHECK-LABEL:          test_v4f16.sqrt:
+  ; CHECK-NOFP16-COUNT-4: fsqrt s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           fsqrt.4h
+  ; CHECK-FP16-NEXT:      ret
+  %1 = call %v4f16 @llvm.sqrt.v4f16(%v4f16 %a)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.powi(%v4f16 %a, i32 %b) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v4f16.powi:
+  ; CHECK-COUNT-4: bl __powi
+  %1 = call %v4f16 @llvm.powi.v4f16(%v4f16 %a, i32 %b)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.sin(%v4f16 %a) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v4f16.sin:
+  ; CHECK-COUNT-4: bl sinf
+  %1 = call %v4f16 @llvm.sin.v4f16(%v4f16 %a)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.cos(%v4f16 %a) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v4f16.cos:
+  ; CHECK-COUNT-4: bl cosf
+  %1 = call %v4f16 @llvm.cos.v4f16(%v4f16 %a)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.pow(%v4f16 %a, %v4f16 %b) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v4f16.pow:
+  ; CHECK-COUNT-4: bl pow
+  %1 = call %v4f16 @llvm.pow.v4f16(%v4f16 %a, %v4f16 %b)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.exp(%v4f16 %a) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v4f16.exp:
+  ; CHECK-COUNT-4: bl exp
+  %1 = call %v4f16 @llvm.exp.v4f16(%v4f16 %a)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.exp2(%v4f16 %a) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v4f16.exp2:
+  ; CHECK-COUNT-4: bl exp2
+  %1 = call %v4f16 @llvm.exp2.v4f16(%v4f16 %a)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.log(%v4f16 %a) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v4f16.log:
+  ; CHECK-COUNT-4: bl log
+  %1 = call %v4f16 @llvm.log.v4f16(%v4f16 %a)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.log10(%v4f16 %a) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v4f16.log10:
+  ; CHECK-COUNT-4: bl log10
+  %1 = call %v4f16 @llvm.log10.v4f16(%v4f16 %a)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.log2(%v4f16 %a) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v4f16.log2:
+  ; CHECK-COUNT-4: bl log2
+  %1 = call %v4f16 @llvm.log2.v4f16(%v4f16 %a)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.fma(%v4f16 %a, %v4f16 %b, %v4f16 %c) {
+  ; CHECK-LABEL:          test_v4f16.fma:
+  ; CHECK-NOFP16-COUNT-4: fmadd s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           fmla.4h
+  %1 = call %v4f16 @llvm.fma.v4f16(%v4f16 %a, %v4f16 %b, %v4f16 %c)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.fabs(%v4f16 %a) {
+  ; CHECK-LABEL:          test_v4f16.fabs:
+  ; CHECK-NOFP16-COUNT-4: fabs s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           fabs.4h
+  ; CHECK-FP16-NEXT:      ret
+  %1 = call %v4f16 @llvm.fabs.v4f16(%v4f16 %a)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.floor(%v4f16 %a) {
+  ; CHECK-LABEL:          test_v4f16.floor:
+  ; CHECK-NOFP16-COUNT-4: frintm s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           frintm.4h
+  ; CHECK-FP16-NEXT:      ret
+  %1 = call %v4f16 @llvm.floor.v4f16(%v4f16 %a)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.ceil(%v4f16 %a) {
+  ; CHECK-LABEL:          test_v4f16.ceil:
+  ; CHECK-NOFP16-COUNT-4: frintp s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           frintp.4h
+  ; CHECK-FP16-NEXT:      ret
+  %1 = call %v4f16 @llvm.ceil.v4f16(%v4f16 %a)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.trunc(%v4f16 %a) {
+  ; CHECK-LABEL:          test_v4f16.trunc:
+  ; CHECK-NOFP16-COUNT-4: frintz s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           frintz.4h
+  ; CHECK-FP16-NEXT:      ret
+  %1 = call %v4f16 @llvm.trunc.v4f16(%v4f16 %a)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.rint(%v4f16 %a) {
+  ; CHECK-LABEL:          test_v4f16.rint:
+  ; CHECK-NOFP16-COUNT-4: frintx s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           frintx.4h
+  ; CHECK-FP16-NEXT:      ret
+  %1 = call %v4f16 @llvm.rint.v4f16(%v4f16 %a)
+  ret %v4f16 %1
+}
+define %v4f16 @test_v4f16.nearbyint(%v4f16 %a) {
+  ; CHECK-LABEL:          test_v4f16.nearbyint:
+  ; CHECK-NOFP16-COUNT-4: frinti s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           frinti.4h
+  ; CHECK-FP16-NEXT:      ret
+  %1 = call %v4f16 @llvm.nearbyint.v4f16(%v4f16 %a)
+  ret %v4f16 %1
+}
+
+declare %v4f16 @llvm.sqrt.v4f16(%v4f16) #0
+declare %v4f16 @llvm.powi.v4f16(%v4f16, i32) #0
+declare %v4f16 @llvm.sin.v4f16(%v4f16) #0
+declare %v4f16 @llvm.cos.v4f16(%v4f16) #0
+declare %v4f16 @llvm.pow.v4f16(%v4f16, %v4f16) #0
+declare %v4f16 @llvm.exp.v4f16(%v4f16) #0
+declare %v4f16 @llvm.exp2.v4f16(%v4f16) #0
+declare %v4f16 @llvm.log.v4f16(%v4f16) #0
+declare %v4f16 @llvm.log10.v4f16(%v4f16) #0
+declare %v4f16 @llvm.log2.v4f16(%v4f16) #0
+declare %v4f16 @llvm.fma.v4f16(%v4f16, %v4f16, %v4f16) #0
+declare %v4f16 @llvm.fabs.v4f16(%v4f16) #0
+declare %v4f16 @llvm.floor.v4f16(%v4f16) #0
+declare %v4f16 @llvm.ceil.v4f16(%v4f16) #0
+declare %v4f16 @llvm.trunc.v4f16(%v4f16) #0
+declare %v4f16 @llvm.rint.v4f16(%v4f16) #0
+declare %v4f16 @llvm.nearbyint.v4f16(%v4f16) #0
+
+;;;
+
+%v8f16 = type <8 x half>
+
+define %v8f16 @test_v8f16.sqrt(%v8f16 %a) {
+  ; CHECK-LABEL:          test_v8f16.sqrt:
+  ; CHECK-NOFP16-COUNT-8: fsqrt s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           fsqrt.8h
+  ; CHECK-FP16-NEXT:      ret
+  %1 = call %v8f16 @llvm.sqrt.v8f16(%v8f16 %a)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.powi(%v8f16 %a, i32 %b) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v8f16.powi:
+  ; CHECK-COUNT-8: bl __powi
+  %1 = call %v8f16 @llvm.powi.v8f16(%v8f16 %a, i32 %b)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.sin(%v8f16 %a) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v8f16.sin:
+  ; CHECK-COUNT-8: bl sinf
+  %1 = call %v8f16 @llvm.sin.v8f16(%v8f16 %a)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.cos(%v8f16 %a) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v8f16.cos:
+  ; CHECK-COUNT-8: bl cosf
+  %1 = call %v8f16 @llvm.cos.v8f16(%v8f16 %a)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.pow(%v8f16 %a, %v8f16 %b) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v8f16.pow:
+  ; CHECK-COUNT-8: bl pow
+  %1 = call %v8f16 @llvm.pow.v8f16(%v8f16 %a, %v8f16 %b)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.exp(%v8f16 %a) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v8f16.exp:
+  ; CHECK-COUNT-8: bl exp
+  %1 = call %v8f16 @llvm.exp.v8f16(%v8f16 %a)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.exp2(%v8f16 %a) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v8f16.exp2:
+  ; CHECK-COUNT-8: bl exp2
+  %1 = call %v8f16 @llvm.exp2.v8f16(%v8f16 %a)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.log(%v8f16 %a) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v8f16.log:
+  ; CHECK-COUNT-8: bl log
+  %1 = call %v8f16 @llvm.log.v8f16(%v8f16 %a)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.log10(%v8f16 %a) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v8f16.log10:
+  ; CHECK-COUNT-8: bl log10
+  %1 = call %v8f16 @llvm.log10.v8f16(%v8f16 %a)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.log2(%v8f16 %a) {
+  ; This operation is expanded, whether with or without +fullfp16.
+  ; CHECK-LABEL:   test_v8f16.log2:
+  ; CHECK-COUNT-8: bl log2
+  %1 = call %v8f16 @llvm.log2.v8f16(%v8f16 %a)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.fma(%v8f16 %a, %v8f16 %b, %v8f16 %c) {
+  ; CHECK-LABEL:          test_v8f16.fma:
+  ; CHECK-NOFP16-COUNT-8: fmadd s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           fmla.8h
+  %1 = call %v8f16 @llvm.fma.v8f16(%v8f16 %a, %v8f16 %b, %v8f16 %c)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.fabs(%v8f16 %a) {
+  ; CHECK-LABEL:          test_v8f16.fabs:
+  ; CHECK-NOFP16-COUNT-8: fabs s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           fabs.8h
+  ; CHECK-FP16-NEXT:      ret
+  %1 = call %v8f16 @llvm.fabs.v8f16(%v8f16 %a)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.floor(%v8f16 %a) {
+  ; CHECK-LABEL:     		  test_v8f16.floor:
+  ; CHECK-NOFP16-COUNT-8: frintm s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           frintm.8h
+  ; CHECK-FP16-NEXT:      ret
+  %1 = call %v8f16 @llvm.floor.v8f16(%v8f16 %a)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.ceil(%v8f16 %a) {
+  ; CHECK-LABEL:          test_v8f16.ceil:
+  ; CHECK-NOFP16-COUNT-8: frintp s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           frintp.8h
+  ; CHECK-FP16-NEXT:      ret
+  %1 = call %v8f16 @llvm.ceil.v8f16(%v8f16 %a)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.trunc(%v8f16 %a) {
+  ; CHECK-LABEL:          test_v8f16.trunc:
+  ; CHECK-NOFP16-COUNT-8: frintz s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           frintz.8h
+  ; CHECK-FP16-NEXT:      ret
+  %1 = call %v8f16 @llvm.trunc.v8f16(%v8f16 %a)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.rint(%v8f16 %a) {
+  ; CHECK-LABEL:          test_v8f16.rint:
+  ; CHECK-NOFP16-COUNT-8: frintx s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           frintx.8h
+  ; CHECK-FP16-NEXT:      ret
+  %1 = call %v8f16 @llvm.rint.v8f16(%v8f16 %a)
+  ret %v8f16 %1
+}
+define %v8f16 @test_v8f16.nearbyint(%v8f16 %a) {
+  ; CHECK-LABEL:          test_v8f16.nearbyint:
+  ; CHECK-NOFP16-COUNT-8: frinti s{{[0-9]+}}, s{{[0-9]+}}
+  ; CHECK-FP16-NOT:       fcvt
+  ; CHECK-FP16:           frinti.8h
+  ; CHECK-FP16-NEXT:      ret
+  %1 = call %v8f16 @llvm.nearbyint.v8f16(%v8f16 %a)
+  ret %v8f16 %1
+}
+
+declare %v8f16 @llvm.sqrt.v8f16(%v8f16) #0
+declare %v8f16 @llvm.powi.v8f16(%v8f16, i32) #0
+declare %v8f16 @llvm.sin.v8f16(%v8f16) #0
+declare %v8f16 @llvm.cos.v8f16(%v8f16) #0
+declare %v8f16 @llvm.pow.v8f16(%v8f16, %v8f16) #0
+declare %v8f16 @llvm.exp.v8f16(%v8f16) #0
+declare %v8f16 @llvm.exp2.v8f16(%v8f16) #0
+declare %v8f16 @llvm.log.v8f16(%v8f16) #0
+declare %v8f16 @llvm.log10.v8f16(%v8f16) #0
+declare %v8f16 @llvm.log2.v8f16(%v8f16) #0
+declare %v8f16 @llvm.fma.v8f16(%v8f16, %v8f16, %v8f16) #0
+declare %v8f16 @llvm.fabs.v8f16(%v8f16) #0
+declare %v8f16 @llvm.floor.v8f16(%v8f16) #0
+declare %v8f16 @llvm.ceil.v8f16(%v8f16) #0
+declare %v8f16 @llvm.trunc.v8f16(%v8f16) #0
+declare %v8f16 @llvm.rint.v8f16(%v8f16) #0
+declare %v8f16 @llvm.nearbyint.v8f16(%v8f16) #0
 
 ;;; Float vectors
 
 %v2f32 = type <2 x float>
-%v4f16 = type <4 x half>
-%v8f16 = type <8 x half>
 
 ; CHECK-LABEL: test_v2f32.sqrt:
 define %v2f32 @test_v2f32.sqrt(%v2f32 %a) {
@@ -13,30 +327,6 @@
   %1 = call %v2f32 @llvm.sqrt.v2f32(%v2f32 %a)
   ret %v2f32 %1
 }
-define %v4f16 @test_v4f16.sqrt(%v4f16 %a) {
-; CHECK-LABEL: test_v4f16.sqrt:
-; CHECK:       fsqrt s{{.}}, s{{.}}
-; CHECK:       fsqrt s{{.}}, s{{.}}
-; CHECK:       fsqrt s{{.}}, s{{.}}
-; CHECK:       fsqrt s{{.}}, s{{.}}
-
-; CHECK-FP16-LABEL: test_v4f16.sqrt:
-; CHECK-FP16-NOT:   fcvt
-; CHECK-FP16:       fsqrt.4h
-; CHECK-FP16-NEXT:  ret
-  %1 = call %v4f16 @llvm.sqrt.v4f16(%v4f16 %a)
-  ret %v4f16 %1
-}
-define %v8f16 @test_v8f16.sqrt(%v8f16 %a) {
-; Filechecks are unwieldy with 16 fcvt and 8 fsqrt tests, so skipped for -fullfp16.
-
-; CHECK-FP16-LABEL: test_v8f16.sqrt:
-; CHECK-FP16-NOT:   fcvt
-; CHECK-FP16:       fsqrt.8h
-; CHECK-FP16-NEXT:  ret
-  %1 = call %v8f16 @llvm.sqrt.v8f16(%v8f16 %a)
-  ret %v8f16 %1
-}
 ; CHECK: test_v2f32.powi:
 define %v2f32 @test_v2f32.powi(%v2f32 %a, i32 %b) {
   ; CHECK: pow
@@ -97,211 +387,44 @@
   %1 = call %v2f32 @llvm.fma.v2f32(%v2f32 %a, %v2f32 %b, %v2f32 %c)
   ret %v2f32 %1
 }
-define %v4f16 @test_v4f16.fma(%v4f16 %a, %v4f16 %b, %v4f16 %c) {
-; CHECK-LABEL: test_v4f16.fma:
-; CHECK:       fmadd s{{.}}, s{{.}}, s{{.}}, s{{.}}
-; CHECK:       fmadd s{{.}}, s{{.}}, s{{.}}, s{{.}}
-; CHECK:       fmadd s{{.}}, s{{.}}, s{{.}}, s{{.}}
-; CHECK:       fmadd s{{.}}, s{{.}}, s{{.}}, s{{.}}
-
-; CHECK-FP16-LABEL: test_v4f16.fma:
-; CHECK-FP16-NOT:   fcvt
-; CHECK-FP16:       fmla.4h
-  %1 = call %v4f16 @llvm.fma.v4f16(%v4f16 %a, %v4f16 %b, %v4f16 %c)
-  ret %v4f16 %1
-}
-define %v8f16 @test_v8f16.fma(%v8f16 %a, %v8f16 %b, %v8f16 %c) {
-; Filechecks are unwieldy with 16 fcvt and 8 fma tests, so skipped for -fullfp16.
-
-; CHECK-FP16-LABEL: test_v8f16.fma:
-; CHECK-FP16-NOT:   fcvt
-; CHECK-FP16:       fmla.8h
-  %1 = call %v8f16 @llvm.fma.v8f16(%v8f16 %a, %v8f16 %b, %v8f16 %c)
-  ret %v8f16 %1
-}
 ; CHECK-LABEL: test_v2f32.fabs:
 define %v2f32 @test_v2f32.fabs(%v2f32 %a) {
   ; CHECK: fabs.2s
   %1 = call %v2f32 @llvm.fabs.v2f32(%v2f32 %a)
   ret %v2f32 %1
 }
-define %v4f16 @test_v4f16.fabs(%v4f16 %a) {
-; CHECK-LABEL: test_v4f16.fabs:
-; CHECK:       fabs s{{.}}, s{{.}}
-; CHECK:       fabs s{{.}}, s{{.}}
-; CHECK:       fabs s{{.}}, s{{.}}
-; CHECK:       fabs s{{.}}, s{{.}}
-
-; CHECK-FP16-LABEL: test_v4f16.fabs:
-; CHECK-FP16-NOT:   fcvt
-; CHECK-FP16:       fabs.4h
-; CHECK-FP16-NEXT:  ret
-  %1 = call %v4f16 @llvm.fabs.v4f16(%v4f16 %a)
-  ret %v4f16 %1
-}
-define %v8f16 @test_v8f16.fabs(%v8f16 %a) {
-; Filechecks are unwieldy with 16 fcvt and 8 fabs tests, so skipped for -fullfp16.
-
-; CHECK-FP16-LABEL: test_v8f16.fabs:
-; CHECK-FP16-NOT:   fcvt
-; CHECK-FP16:       fabs.8h
-; CHECK-FP16-NEXT:  ret
-  %1 = call %v8f16 @llvm.fabs.v8f16(%v8f16 %a)
-  ret %v8f16 %1
-}
 ; CHECK-LABEL: test_v2f32.floor:
 define %v2f32 @test_v2f32.floor(%v2f32 %a) {
   ; CHECK: frintm.2s
   %1 = call %v2f32 @llvm.floor.v2f32(%v2f32 %a)
   ret %v2f32 %1
 }
-define %v4f16 @test_v4f16.floor(%v4f16 %a) {
-; CHECK-LABEL: test_v4f16.floor:
-; CHECK:       frintm s{{.}}, s{{.}}
-; CHECK:       frintm s{{.}}, s{{.}}
-; CHECK:       frintm s{{.}}, s{{.}}
-; CHECK:       frintm s{{.}}, s{{.}}
-
-; CHECK-FP16-LABEL: test_v4f16.floor:
-; CHECK-FP16-NOT:   fcvt
-; CHECK-FP16:       frintm.4h
-; CHECK-FP16-NEXT:  ret
-  %1 = call %v4f16 @llvm.floor.v4f16(%v4f16 %a)
-  ret %v4f16 %1
-}
-define %v8f16 @test_v8f16.floor(%v8f16 %a) {
-; Filechecks are unwieldy with 16 fcvt and 8 frintm tests, so skipped for -fullfp16.
-
-; CHECK-FP16-LABEL: test_v8f16.floor:
-; CHECK-FP16-NOT:   fcvt
-; CHECK-FP16:       frintm.8h
-; CHECK-FP16-NEXT:  ret
-  %1 = call %v8f16 @llvm.floor.v8f16(%v8f16 %a)
-  ret %v8f16 %1
-}
 ; CHECK-LABEL: test_v2f32.ceil:
 define %v2f32 @test_v2f32.ceil(%v2f32 %a) {
   ; CHECK: frintp.2s
   %1 = call %v2f32 @llvm.ceil.v2f32(%v2f32 %a)
   ret %v2f32 %1
 }
-define %v4f16 @test_v4f16.ceil(%v4f16 %a) {
-; CHECK-LABEL: test_v4f16.ceil:
-; CHECK:       frintp s{{.}}, s{{.}}
-; CHECK:       frintp s{{.}}, s{{.}}
-; CHECK:       frintp s{{.}}, s{{.}}
-; CHECK:       frintp s{{.}}, s{{.}}
-
-; CHECK-FP16-LABEL: test_v4f16.ceil:
-; CHECK-FP16-NOT:   fcvt
-; CHECK-FP16:       frintp.4h
-; CHECK-FP16-NEXT:  ret
-  %1 = call %v4f16 @llvm.ceil.v4f16(%v4f16 %a)
-  ret %v4f16 %1
-}
-define %v8f16 @test_v8f16.ceil(%v8f16 %a) {
-; Filechecks are unwieldy with 16 fcvt and 8 frint tests, so skipped for -fullfp16.
-
-; CHECK-FP16-LABEL: test_v8f16.ceil:
-; CHECK-FP16-NOT:   fcvt
-; CHECK-FP16:       frintp.8h
-; CHECK-FP16-NEXT:  ret
-  %1 = call %v8f16 @llvm.ceil.v8f16(%v8f16 %a)
-  ret %v8f16 %1
-}
 ; CHECK-LABEL: test_v2f32.trunc:
 define %v2f32 @test_v2f32.trunc(%v2f32 %a) {
   ; CHECK: frintz.2s
   %1 = call %v2f32 @llvm.trunc.v2f32(%v2f32 %a)
   ret %v2f32 %1
 }
-define %v4f16 @test_v4f16.trunc(%v4f16 %a) {
-; CHECK-LABEL: test_v4f16.trunc:
-; CHECK:       frintz s{{.}}, s{{.}}
-; CHECK:       frintz s{{.}}, s{{.}}
-; CHECK:       frintz s{{.}}, s{{.}}
-; CHECK:       frintz s{{.}}, s{{.}}
-
-; CHECK-FP16-LABEL: test_v4f16.trunc:
-; CHECK-FP16:       frintz.4h
-; CHECK-FP16-NEXT:  ret
-  %1 = call %v4f16 @llvm.trunc.v4f16(%v4f16 %a)
-  ret %v4f16 %1
-}
-define %v8f16 @test_v8f16.trunc(%v8f16 %a) {
-; Filechecks are unwieldy with 16 fcvt and 8 frint tests, so skipped for -fullfp16.
-
-; CHECK-FP16-LABEL: test_v8f16.trunc:
-; CHECK-FP16-NOT:   fcvt
-; CHECK-FP16:       frintz.8h
-; CHECK-FP16-NEXT:  ret
-  %1 = call %v8f16 @llvm.trunc.v8f16(%v8f16 %a)
-  ret %v8f16 %1
-}
 ; CHECK-LABEL: test_v2f32.rint:
 define %v2f32 @test_v2f32.rint(%v2f32 %a) {
   ; CHECK: frintx.2s
   %1 = call %v2f32 @llvm.rint.v2f32(%v2f32 %a)
   ret %v2f32 %1
 }
-define %v4f16 @test_v4f16.rint(%v4f16 %a) {
-; CHECK-LABEL: test_v4f16.rint:
-; CHECK:       frintx s{{.}}, s{{.}}
-; CHECK:       frintx s{{.}}, s{{.}}
-; CHECK:       frintx s{{.}}, s{{.}}
-; CHECK:       frintx s{{.}}, s{{.}}
-
-; CHECK-FP16-LABEL: test_v4f16.rint:
-; CHECK-FP16-NOT:   fcvt
-; CHECK-FP16:       frintx.4h
-; CHECK-FP16-NEXT:  ret
-  %1 = call %v4f16 @llvm.rint.v4f16(%v4f16 %a)
-  ret %v4f16 %1
-}
-define %v8f16 @test_v8f16.rint(%v8f16 %a) {
-; Filechecks are unwieldy with 16 fcvt and 8 frint tests, so skipped for -fullfp16.
-
-; CHECK-FP16-LABEL: test_v8f16.rint:
-; CHECK-FP16:       frintx.8h
-; CHECK-FP16-NEXT:  ret
-  %1 = call %v8f16 @llvm.rint.v8f16(%v8f16 %a)
-  ret %v8f16 %1
-}
 ; CHECK-LABEL: test_v2f32.nearbyint:
 define %v2f32 @test_v2f32.nearbyint(%v2f32 %a) {
   ; CHECK: frinti.2s
   %1 = call %v2f32 @llvm.nearbyint.v2f32(%v2f32 %a)
   ret %v2f32 %1
 }
-define %v4f16 @test_v4f16.nearbyint(%v4f16 %a) {
-; CHECK-LABEL: test_v4f16.nearbyint:
-; CHECK:       frinti s{{.}}, s{{.}}
-; CHECK:       frinti s{{.}}, s{{.}}
-; CHECK:       frinti s{{.}}, s{{.}}
-; CHECK:       frinti s{{.}}, s{{.}}
-
-; CHECK-FP16-LABEL: test_v4f16.nearbyint:
-; CHECK-FP16-NOT:   fcvt
-; CHECK-FP16:       frinti.4h
-; CHECK-FP16-NEXT:  ret
-  %1 = call %v4f16 @llvm.nearbyint.v4f16(%v4f16 %a)
-  ret %v4f16 %1
-}
-define %v8f16 @test_v8f16.nearbyint(%v8f16 %a) {
-; Filechecks are unwieldy with 16 fcvt and 8 frint tests, so skipped for -fullfp16.
-
-; CHECK-FP16-LABEL: test_v8f16.nearbyint:
-; CHECK-FP16-NOT:   fcvt
-; CHECK-FP16:       frinti.8h
-; CHECK-FP16-NEXT:  ret
-  %1 = call %v8f16 @llvm.nearbyint.v8f16(%v8f16 %a)
-  ret %v8f16 %1
-}
 
 declare %v2f32 @llvm.sqrt.v2f32(%v2f32) #0
-declare %v4f16 @llvm.sqrt.v4f16(%v4f16) #0
-declare %v8f16 @llvm.sqrt.v8f16(%v8f16) #0
-
 declare %v2f32 @llvm.powi.v2f32(%v2f32, i32) #0
 declare %v2f32 @llvm.sin.v2f32(%v2f32) #0
 declare %v2f32 @llvm.cos.v2f32(%v2f32) #0
@@ -311,38 +434,18 @@
 declare %v2f32 @llvm.log.v2f32(%v2f32) #0
 declare %v2f32 @llvm.log10.v2f32(%v2f32) #0
 declare %v2f32 @llvm.log2.v2f32(%v2f32) #0
-
 declare %v2f32 @llvm.fma.v2f32(%v2f32, %v2f32, %v2f32) #0
-declare %v4f16 @llvm.fma.v4f16(%v4f16, %v4f16, %v4f16) #0
-declare %v8f16 @llvm.fma.v8f16(%v8f16, %v8f16, %v8f16) #0
-
 declare %v2f32 @llvm.fabs.v2f32(%v2f32) #0
-declare %v4f16 @llvm.fabs.v4f16(%v4f16) #0
-declare %v8f16 @llvm.fabs.v8f16(%v8f16) #0
-
 declare %v2f32 @llvm.floor.v2f32(%v2f32) #0
-declare %v4f16 @llvm.floor.v4f16(%v4f16) #0
-declare %v8f16 @llvm.floor.v8f16(%v8f16) #0
-
 declare %v2f32 @llvm.ceil.v2f32(%v2f32) #0
-declare %v4f16 @llvm.ceil.v4f16(%v4f16) #0
-declare %v8f16 @llvm.ceil.v8f16(%v8f16) #0
-
 declare %v2f32 @llvm.trunc.v2f32(%v2f32) #0
-declare %v4f16 @llvm.trunc.v4f16(%v4f16) #0
-declare %v8f16 @llvm.trunc.v8f16(%v8f16) #0
-
 declare %v2f32 @llvm.rint.v2f32(%v2f32) #0
-declare %v4f16 @llvm.rint.v4f16(%v4f16) #0
-declare %v8f16 @llvm.rint.v8f16(%v8f16) #0
-
 declare %v2f32 @llvm.nearbyint.v2f32(%v2f32) #0
-declare %v4f16 @llvm.nearbyint.v4f16(%v4f16) #0
-declare %v8f16 @llvm.nearbyint.v8f16(%v8f16) #0
 
 ;;;
 
 %v4f32 = type <4 x float>
+
 ; CHECK: test_v4f32.sqrt:
 define %v4f32 @test_v4f32.sqrt(%v4f32 %a) {
   ; CHECK: fsqrt.4s
diff --git a/test/CodeGen/AArch64/emutls.ll b/test/CodeGen/AArch64/emutls.ll
index c322058..25be391 100644
--- a/test/CodeGen/AArch64/emutls.ll
+++ b/test/CodeGen/AArch64/emutls.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -emulated-tls -mtriple=aarch64-linux-android \
-; RUN:     -relocation-model=pic -disable-fp-elim < %s | FileCheck -check-prefix=ARM64 %s
+; RUN:     -relocation-model=pic -frame-pointer=all < %s | FileCheck -check-prefix=ARM64 %s
 ; RUN: llc -mtriple=aarch64-linux-android \
-; RUN:     -relocation-model=pic -disable-fp-elim < %s | FileCheck -check-prefix=ARM64 %s
+; RUN:     -relocation-model=pic -frame-pointer=all < %s | FileCheck -check-prefix=ARM64 %s
 
 ; Copied from X86/emutls.ll
 
diff --git a/test/CodeGen/AArch64/extract-bits.ll b/test/CodeGen/AArch64/extract-bits.ll
index 5dbb719..b1a2034 100644
--- a/test/CodeGen/AArch64/extract-bits.ll
+++ b/test/CodeGen/AArch64/extract-bits.ll
@@ -232,6 +232,64 @@
   ret i64 %masked
 }
 
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_a0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w9, wzr, #0x1
+; CHECK-NEXT:    lsl x9, x9, x2
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %shifted = lshr i64 %val, %numskipbits
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %shifted
+  %res = trunc i64 %masked to i32
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_a1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_a1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w9, wzr, #0x1
+; CHECK-NEXT:    lsl w9, w9, w2
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %shifted = lshr i64 %val, %numskipbits
+  %truncshifted = trunc i64 %shifted to i32
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %truncshifted
+  ret i32 %masked
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_a2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_a2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w9, wzr, #0x1
+; CHECK-NEXT:    lsl w9, w9, w2
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %shifted = lshr i64 %val, %numskipbits
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %zextmask = zext i32 %mask to i64
+  %masked = and i64 %zextmask, %shifted
+  %truncmasked = trunc i64 %masked to i32
+  ret i32 %truncmasked
+}
+
 ; ---------------------------------------------------------------------------- ;
 ; Pattern b. 32-bit
 ; ---------------------------------------------------------------------------- ;
@@ -408,6 +466,67 @@
   ret i64 %masked
 }
 
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_b0(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_b0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x9, #-1
+; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    lsl x9, x9, x2
+; CHECK-NEXT:    bic w0, w8, w9
+; CHECK-NEXT:    ret
+  %shiftedval = lshr i64 %val, %numskipbits
+  %widenumlowbits = zext i8 %numlowbits to i64
+  %notmask = shl nsw i64 -1, %widenumlowbits
+  %mask = xor i64 %notmask, -1
+  %wideres = and i64 %shiftedval, %mask
+  %res = trunc i64 %wideres to i32
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_b1(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_b1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    lsl w9, w9, w2
+; CHECK-NEXT:    bic w0, w8, w9
+; CHECK-NEXT:    ret
+  %shiftedval = lshr i64 %val, %numskipbits
+  %truncshiftedval = trunc i64 %shiftedval to i32
+  %widenumlowbits = zext i8 %numlowbits to i32
+  %notmask = shl nsw i32 -1, %widenumlowbits
+  %mask = xor i32 %notmask, -1
+  %res = and i32 %truncshiftedval, %mask
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_b2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    lsl w9, w9, w2
+; CHECK-NEXT:    bic w0, w8, w9
+; CHECK-NEXT:    ret
+  %shiftedval = lshr i64 %val, %numskipbits
+  %widenumlowbits = zext i8 %numlowbits to i32
+  %notmask = shl nsw i32 -1, %widenumlowbits
+  %mask = xor i32 %notmask, -1
+  %zextmask = zext i32 %mask to i64
+  %wideres = and i64 %shiftedval, %zextmask
+  %res = trunc i64 %wideres to i32
+  ret i32 %res
+}
+
 ; ---------------------------------------------------------------------------- ;
 ; Pattern c. 32-bit
 ; ---------------------------------------------------------------------------- ;
@@ -598,6 +717,64 @@
   ret i64 %masked
 }
 
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_c0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg x9, x2
+; CHECK-NEXT:    mov x10, #-1
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    lsr x9, x10, x9
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %shifted = lshr i64 %val, %numskipbits
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %mask, %shifted
+  %res = trunc i64 %masked to i32
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_c1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg w9, w2
+; CHECK-NEXT:    mov w10, #-1
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    lsr w9, w10, w9
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %shifted = lshr i64 %val, %numskipbits
+  %truncshifted = trunc i64 %shifted to i32
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %mask, %truncshifted
+  ret i32 %masked
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_c2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg w9, w2
+; CHECK-NEXT:    mov w10, #-1
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    lsr w9, w10, w9
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %shifted = lshr i64 %val, %numskipbits
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %zextmask = zext i32 %mask to i64
+  %masked = and i64 %zextmask, %shifted
+  %truncmasked = trunc i64 %masked to i32
+  ret i32 %truncmasked
+}
+
 ; ---------------------------------------------------------------------------- ;
 ; Pattern d. 32-bit.
 ; ---------------------------------------------------------------------------- ;
@@ -748,12 +925,49 @@
   ret i64 %masked
 }
 
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_d0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    neg x9, x2
+; CHECK-NEXT:    lsl x8, x8, x9
+; CHECK-NEXT:    lsr x0, x8, x9
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %shifted = lshr i64 %val, %numskipbits
+  %numhighbits = sub i64 64, %numlowbits
+  %highbitscleared = shl i64 %shifted, %numhighbits
+  %masked = lshr i64 %highbitscleared, %numhighbits
+  %res = trunc i64 %masked to i32
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_d1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_d1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    neg w9, w2
+; CHECK-NEXT:    lsl w8, w8, w9
+; CHECK-NEXT:    lsr w0, w8, w9
+; CHECK-NEXT:    ret
+  %shifted = lshr i64 %val, %numskipbits
+  %truncshifted = trunc i64 %shifted to i32
+  %numhighbits = sub i32 32, %numlowbits
+  %highbitscleared = shl i32 %truncshifted, %numhighbits
+  %masked = lshr i32 %highbitscleared, %numhighbits
+  ret i32 %masked
+}
+
 ; ---------------------------------------------------------------------------- ;
 ; Constant
 ; ---------------------------------------------------------------------------- ;
 
 ; https://bugs.llvm.org/show_bug.cgi?id=38938
-define void @pr38938(i32* %a0, i64* %a1) {
+define void @pr38938(i32* %a0, i64* %a1) nounwind {
 ; CHECK-LABEL: pr38938:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x1]
@@ -774,7 +988,7 @@
 }
 
 ; The most canonical variant
-define i32 @c0_i32(i32 %arg) {
+define i32 @c0_i32(i32 %arg) nounwind {
 ; CHECK-LABEL: c0_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx w0, w0, #19, #10
@@ -785,7 +999,7 @@
 }
 
 ; Should be still fine, but the mask is shifted
-define i32 @c1_i32(i32 %arg) {
+define i32 @c1_i32(i32 %arg) nounwind {
 ; CHECK-LABEL: c1_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #19
@@ -797,7 +1011,7 @@
 }
 
 ; Should be still fine, but the result is shifted left afterwards
-define i32 @c2_i32(i32 %arg) {
+define i32 @c2_i32(i32 %arg) nounwind {
 ; CHECK-LABEL: c2_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx w8, w0, #19, #10
@@ -810,7 +1024,7 @@
 }
 
 ; The mask covers newly shifted-in bit
-define i32 @c4_i32_bad(i32 %arg) {
+define i32 @c4_i32_bad(i32 %arg) nounwind {
 ; CHECK-LABEL: c4_i32_bad:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #19
@@ -824,7 +1038,7 @@
 ; i64
 
 ; The most canonical variant
-define i64 @c0_i64(i64 %arg) {
+define i64 @c0_i64(i64 %arg) nounwind {
 ; CHECK-LABEL: c0_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx x0, x0, #51, #10
@@ -835,7 +1049,7 @@
 }
 
 ; Should be still fine, but the mask is shifted
-define i64 @c1_i64(i64 %arg) {
+define i64 @c1_i64(i64 %arg) nounwind {
 ; CHECK-LABEL: c1_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr x8, x0, #51
@@ -847,7 +1061,7 @@
 }
 
 ; Should be still fine, but the result is shifted left afterwards
-define i64 @c2_i64(i64 %arg) {
+define i64 @c2_i64(i64 %arg) nounwind {
 ; CHECK-LABEL: c2_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx x8, x0, #51, #10
@@ -860,7 +1074,7 @@
 }
 
 ; The mask covers newly shifted-in bit
-define i64 @c4_i64_bad(i64 %arg) {
+define i64 @c4_i64_bad(i64 %arg) nounwind {
 ; CHECK-LABEL: c4_i64_bad:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr x8, x0, #51
@@ -878,7 +1092,7 @@
 ; i32
 
 ; The most canonical variant
-define void @c5_i32(i32 %arg, i32* %ptr) {
+define void @c5_i32(i32 %arg, i32* %ptr) nounwind {
 ; CHECK-LABEL: c5_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx w8, w0, #19, #10
@@ -891,7 +1105,7 @@
 }
 
 ; Should be still fine, but the mask is shifted
-define void @c6_i32(i32 %arg, i32* %ptr) {
+define void @c6_i32(i32 %arg, i32* %ptr) nounwind {
 ; CHECK-LABEL: c6_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx w8, w0, #19, #12
@@ -904,7 +1118,7 @@
 }
 
 ; Should be still fine, but the result is shifted left afterwards
-define void @c7_i32(i32 %arg, i32* %ptr) {
+define void @c7_i32(i32 %arg, i32* %ptr) nounwind {
 ; CHECK-LABEL: c7_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx w8, w0, #19, #10
@@ -921,7 +1135,7 @@
 ; i64
 
 ; The most canonical variant
-define void @c5_i64(i64 %arg, i64* %ptr) {
+define void @c5_i64(i64 %arg, i64* %ptr) nounwind {
 ; CHECK-LABEL: c5_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx x8, x0, #51, #10
@@ -934,7 +1148,7 @@
 }
 
 ; Should be still fine, but the mask is shifted
-define void @c6_i64(i64 %arg, i64* %ptr) {
+define void @c6_i64(i64 %arg, i64* %ptr) nounwind {
 ; CHECK-LABEL: c6_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx x8, x0, #51, #12
@@ -947,7 +1161,7 @@
 }
 
 ; Should be still fine, but the result is shifted left afterwards
-define void @c7_i64(i64 %arg, i64* %ptr) {
+define void @c7_i64(i64 %arg, i64* %ptr) nounwind {
 ; CHECK-LABEL: c7_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx x8, x0, #51, #10
diff --git a/test/CodeGen/AArch64/f16-instructions.ll b/test/CodeGen/AArch64/f16-instructions.ll
index 352a275..04d9917 100644
--- a/test/CodeGen/AArch64/f16-instructions.ll
+++ b/test/CodeGen/AArch64/f16-instructions.ll
@@ -1,5 +1,16 @@
-; RUN: llc < %s -mtriple aarch64-unknown-unknown -aarch64-neon-syntax=apple -asm-verbose=false -disable-post-ra -disable-fp-elim | FileCheck %s --check-prefix=CHECK-CVT --check-prefix=CHECK-COMMON
-; RUN: llc < %s -mtriple aarch64-unknown-unknown -mattr=+fullfp16 -aarch64-neon-syntax=apple -asm-verbose=false -disable-post-ra -disable-fp-elim | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-FP16
+; RUN: llc < %s -mtriple aarch64-unknown-unknown -aarch64-neon-syntax=apple -asm-verbose=false -disable-post-ra -frame-pointer=all | FileCheck %s --check-prefix=CHECK-CVT --check-prefix=CHECK-COMMON
+; RUN: llc < %s -mtriple aarch64-unknown-unknown -mattr=+fullfp16 -aarch64-neon-syntax=apple -asm-verbose=false -disable-post-ra -frame-pointer=all | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-FP16
+
+; RUN: llc < %s -mtriple aarch64-unknown-unknown -aarch64-neon-syntax=apple \
+; RUN: -asm-verbose=false -disable-post-ra -frame-pointer=all -global-isel \
+; RUN: -global-isel-abort=2 -pass-remarks-missed=gisel-* 2>&1 | FileCheck %s \
+; RUN: --check-prefixes=FALLBACK,GISEL-CVT
+
+; RUN: llc < %s -mtriple aarch64-unknown-unknown -mattr=+fullfp16 \
+; RUN: -aarch64-neon-syntax=apple -asm-verbose=false -disable-post-ra \
+; RUN: -frame-pointer=all -global-isel -global-isel-abort=2 \
+; RUN: -pass-remarks-missed=gisel-* 2>&1 | FileCheck %s \
+; RUN: --check-prefixes=FALLBACK-FP16,GISEL-FP16
 
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
@@ -1071,6 +1082,18 @@
 ; CHECK-FP16-NEXT: frintp h0, h0
 ; CHECK-FP16-NEXT: ret
 
+; FALLBACK-NOT: remark:{{.*}}test_ceil
+; FALLBACK-FP16-NOT: remark:{{.*}}test_ceil
+
+; GISEL-CVT-LABEL: test_ceil:
+; GISEL-CVT-NEXT: fcvt [[FLOAT32:s[0-9]+]], h0
+; GISEL-CVT-NEXT: frintp [[INT32:s[0-9]+]], [[FLOAT32]]
+; GISEL-CVT-NEXT: fcvt h0, [[INT32]]
+; GISEL-CVT-NEXT: ret
+
+; GISEL-FP16-LABEL: test_ceil:
+; GISEL-FP16-NEXT: frintp h0, h0
+; GISEL-FP16-NEXT: ret
 define half @test_ceil(half %a) #0 {
   %r = call half @llvm.ceil.f16(half %a)
   ret half %r
diff --git a/test/CodeGen/AArch64/fast-isel-erase.ll b/test/CodeGen/AArch64/fast-isel-erase.ll
new file mode 100644
index 0000000..e8265bc
--- /dev/null
+++ b/test/CodeGen/AArch64/fast-isel-erase.ll
@@ -0,0 +1,25 @@
+; RUN: llc -mtriple=arm64-apple-ios -o - %s -fast-isel=1 -O0 | FileCheck %s
+
+; The zext can be folded into the load and removed, but doing so can invalidate
+; pointers internal to FastISel and cause a crash so it must be done carefully.
+define i32 @test() {
+; CHECK-LABEL: test:
+; CHECK: ldrh
+; CHECK: bl _callee
+; CHECK-NOT: uxth
+
+entry:
+  store i32 undef, i32* undef, align 4
+  %t81 = load i16, i16* undef, align 2
+  call void @callee()
+  %t82 = zext i16 %t81 to i32
+  %t83 = shl i32 %t82, 16
+  %t84 = or i32 undef, %t83
+  br label %end
+
+end:
+  %val = phi i32 [%t84, %entry]
+  ret i32 %val
+}
+
+declare void @callee()
diff --git a/test/CodeGen/AArch64/fastcc.ll b/test/CodeGen/AArch64/fastcc.ll
index 3ea6df5..d4e1161 100644
--- a/test/CodeGen/AArch64/fastcc.ll
+++ b/test/CodeGen/AArch64/fastcc.ll
@@ -1,6 +1,6 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -disable-fp-elim -tailcallopt | FileCheck %s -check-prefix CHECK-TAIL
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -disable-fp-elim | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -disable-fp-elim -tailcallopt -aarch64-redzone | FileCheck %s -check-prefix CHECK-TAIL-RZ
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -frame-pointer=all -tailcallopt | FileCheck %s -check-prefix CHECK-TAIL
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -frame-pointer=all | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -frame-pointer=all -tailcallopt -aarch64-redzone | FileCheck %s -check-prefix CHECK-TAIL-RZ
 
 ; Without tailcallopt fastcc still means the caller cleans up the
 ; stack, so try to make sure this is respected.
diff --git a/test/CodeGen/AArch64/landingpad-ifcvt.ll b/test/CodeGen/AArch64/landingpad-ifcvt.ll
new file mode 100644
index 0000000..4437970
--- /dev/null
+++ b/test/CodeGen/AArch64/landingpad-ifcvt.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s | FileCheck %s
+
+; Make sure this doesn't crash (and the output is sane).
+; CHECK: ; %__except.ret
+; CHECK-NEXT: mov     x0, xzr
+
+target datalayout = "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64-pc-windows-msvc19.11.0"
+
+define i64 @f(i32* %hwnd, i32 %message, i64 %wparam, i64 %lparam) personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) {
+entry:
+  %call = invoke i64 @callee(i32* %hwnd, i32 %message, i64 %wparam, i64 %lparam)
+          to label %__try.cont unwind label %catch.dispatch
+
+catch.dispatch:                                   ; preds = %entry
+  %0 = catchswitch within none [label %__except.ret] unwind to caller
+
+__except.ret:                                     ; preds = %catch.dispatch
+  %1 = catchpad within %0 [i8* bitcast (i32 (i8*, i8*)* @filt to i8*)]
+  catchret from %1 to label %__try.cont
+
+__try.cont:                                       ; preds = %__except.ret, %entry
+  %rv.0 = phi i64 [ 0, %__except.ret ], [ %call, %entry ]
+  ret i64 %rv.0
+}
+
+declare dso_local i64 @callee(i32*, i32, i64, i64)
+declare i32 @filt(i8*, i8* nocapture readnone)
+declare dso_local i32 @__C_specific_handler(...)
diff --git a/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll b/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll
new file mode 100644
index 0000000..468f773
--- /dev/null
+++ b/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll
@@ -0,0 +1,51 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O3 -mtriple=aarch64-arm < %s | FileCheck %s
+
+; Run at O3 to make sure we can optimize load/store instructions after Machine
+; Block Placement takes place using Tail Duplication Threshold = 4.
+
+define void @foo(i1 %cond, i64* %ptr) {
+; CHECK-LABEL: foo:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    tbz w0, #0, .LBB0_2
+; CHECK-NEXT:  // %bb.1: // %if.then
+; CHECK-NEXT:    ldp x9, x8, [x1, #8]
+; CHECK-NEXT:    str xzr, [x1, #16]
+; CHECK-NEXT:    cmp x8, x9
+; CHECK-NEXT:    b.lt .LBB0_3
+; CHECK-NEXT:    b .LBB0_4
+; CHECK-NEXT:  .LBB0_2: // %if.else
+; CHECK-NEXT:    ldp x8, x9, [x1]
+; CHECK-NEXT:    cmp x8, x9
+; CHECK-NEXT:    b.ge .LBB0_4
+; CHECK-NEXT:  .LBB0_3: // %exit1
+; CHECK-NEXT:    str xzr, [x1, #8]
+; CHECK-NEXT:  .LBB0_4: // %exit2
+; CHECK-NEXT:    ret
+entry:
+  br i1 %cond, label %if.then, label %if.else
+
+if.then:
+  %0 = getelementptr inbounds i64, i64* %ptr, i64 2
+  %1 = load i64, i64* %0, align 8
+  store i64 0, i64* %0, align 8
+  br label %if.end
+
+if.else:
+  %2 = load i64, i64* %ptr, align 8
+  br label %if.end
+
+if.end:
+  %3 = phi i64 [ %1, %if.then ], [ %2, %if.else ]
+  %4 = getelementptr inbounds i64, i64* %ptr, i64 1
+  %5 = load i64, i64* %4, align 8
+  %6 = icmp slt i64 %3, %5
+  br i1 %6, label %exit1, label %exit2
+
+exit1:
+  store i64 0, i64* %4, align 8
+  ret void
+
+exit2:
+  ret void
+}
diff --git a/test/CodeGen/AArch64/ldst-opt.ll b/test/CodeGen/AArch64/ldst-opt.ll
index 7f6cba2..fe55806 100644
--- a/test/CodeGen/AArch64/ldst-opt.ll
+++ b/test/CodeGen/AArch64/ldst-opt.ll
@@ -1681,3 +1681,19 @@
   %add = add i64 %ld, 1
   ret i64 %add
 }
+
+; CHECK-LABEL: trunc_splat_zero:
+; CHECK-DAG: strh wzr, [x0]
+define void @trunc_splat_zero(<2 x i8>* %ptr) {
+  store <2 x i8> zeroinitializer, <2 x i8>* %ptr, align 2
+  ret void
+}
+
+; CHECK-LABEL: trunc_splat:
+; CHECK: mov [[VAL:w[0-9]+]], #42
+; CHECK: movk [[VAL]], #42, lsl #16
+; CHECK: str [[VAL]], [x0]
+define void @trunc_splat(<2 x i16>* %ptr) {
+  store <2 x i16> <i16 42, i16 42>, <2 x i16>* %ptr, align 4
+  ret void
+}
diff --git a/test/CodeGen/AArch64/local_vars.ll b/test/CodeGen/AArch64/local_vars.ll
index a479572..cf5bdac 100644
--- a/test/CodeGen/AArch64/local_vars.ll
+++ b/test/CodeGen/AArch64/local_vars.ll
@@ -1,5 +1,5 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -disable-fp-elim | FileCheck -check-prefix CHECK-WITHFP-ARM64 %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -frame-pointer=all | FileCheck -check-prefix CHECK-WITHFP-ARM64 %s
 
 ; Make sure a reasonably sane prologue and epilogue are
 ; generated. This test is not robust in the face of an frame-handling
diff --git a/test/CodeGen/AArch64/misched-fusion-arith-logic.mir b/test/CodeGen/AArch64/misched-fusion-arith-logic.mir
new file mode 100644
index 0000000..6227677
--- /dev/null
+++ b/test/CodeGen/AArch64/misched-fusion-arith-logic.mir
@@ -0,0 +1,112 @@
+# RUN: llc -o /dev/null 2>&1 %s -mtriple aarch64-unknown -mattr=fuse-arith-logic -run-pass=machine-scheduler -misched-print-dags | FileCheck %s
+# RUN: llc -o /dev/null 2>&1 %s -mtriple aarch64-unknown -mcpu=exynos-m4         -run-pass=machine-scheduler -misched-print-dags | FileCheck %s
+# REQUIRES: asserts
+
+---
+name: arith
+body: |
+  bb.0.entry:
+    %0:gpr32 = SUBWrr undef $w0, undef $w1
+    %1:gpr32 = ADDWrr undef $w1, undef $w2
+    %2:gpr32 = SUBWrs %0, undef $w2, 0
+    %3:gpr32 = ADDWrs %1, undef $w3, 0
+
+    ; CHECK: SU(0): %0:gpr32 = SUBWrr undef $w0, undef $w1
+    ; CHECK: Successors:
+    ; CHECK: SU(2): Ord  Latency=0 Cluster
+    ; CHECK: SU(1): %1:gpr32 = ADDWrr undef $w1, undef $w2
+    ; CHECK: Successors:
+    ; CHECK: SU(3): Ord  Latency=0 Cluster
+    ; CHECK: SU(2): dead %2:gpr32 = SUBWrs %0:gpr32, undef $w2, 0
+    ; CHECK: Predecessors:
+    ; CHECK: SU(0): Ord  Latency=0 Cluster
+    ; CHECK: SU(3): dead %3:gpr32 = ADDWrs %1:gpr32, undef $w3, 0
+    ; CHECK: Predecessors:
+    ; CHECK: SU(1): Ord  Latency=0 Cluster
+...
+---
+name: compare
+body: |
+  bb.0.entry:
+    %0:gpr64 = ADDXrr undef $x0, undef $x1
+    %1:gpr64 = SUBXrs undef $x1, undef $x2, 0
+    %2:gpr64 = ADDSXrr %0, undef $x3, implicit-def $nzcv
+    %3:gpr64 = SUBSXrs %1, undef $x4, 0, implicit-def $nzcv
+
+    ; CHECK: SU(0): %0:gpr64 = ADDXrr undef $x0, undef $x1
+    ; CHECK: Successors:
+    ; CHECK: SU(2): Ord  Latency=0 Cluster
+    ; CHECK: SU(1): %1:gpr64 = SUBXrs undef $x1, undef $x2, 0
+    ; CHECK: Successors:
+    ; CHECK: SU(3): Ord  Latency=0 Cluster
+    ; CHECK: SU(2): dead %2:gpr64 = ADDSXrr %0:gpr64, undef $x3, implicit-def $nzcv
+    ; CHECK: Predecessors:
+    ; CHECK: SU(0): Ord  Latency=0 Cluster
+    ; CHECK: SU(3): dead %3:gpr64 = SUBSXrs %1:gpr64, undef $x4, 0, implicit-def $nzcv
+    ; CHECK: Predecessors:
+    ; CHECK: SU(1): Ord  Latency=0 Cluster
+...
+---
+name: logic
+body: |
+  bb.0.entry:
+    %0:gpr32 = ADDWrr undef $w0, undef $w1
+    %1:gpr64 = SUBXrs undef $x1, undef $x2, 0
+    %3:gpr32 = ANDWrs %0, undef $w3, 0
+    %4:gpr64 = ORRXrr %1, undef $x4
+
+    ; CHECK: SU(0): %0:gpr32 = ADDWrr undef $w0, undef $w1
+    ; CHECK: Successors:
+    ; CHECK: SU(2): Ord  Latency=0 Cluster
+    ; CHECK: SU(1): %1:gpr64 = SUBXrs undef $x1, undef $x2, 0
+    ; CHECK: Successors:
+    ; CHECK: SU(3): Ord  Latency=0 Cluster
+    ; CHECK: SU(2): dead %2:gpr32 = ANDWrs %0:gpr32, undef $w3, 0
+    ; CHECK: Predecessors:
+    ; CHECK: SU(0): Ord  Latency=0 Cluster
+    ; CHECK: SU(3): dead %3:gpr64 = ORRXrr %1:gpr64, undef $x4
+    ; CHECK: Predecessors:
+    ; CHECK: SU(1): Ord  Latency=0 Cluster
+...
+---
+name: nope
+body: |
+  bb.0.entry:
+    ; Shifted register.
+    %0:gpr32 = SUBWrr undef $w0, undef $w1
+    %1:gpr32 = SUBWrs %0, undef $w2, 1
+    ; CHECK: SU(0): %0:gpr32 = SUBWrr undef $w0, undef $w1
+    ; CHECK: Successors:
+    ; CHECK-NOT: SU(1): Ord  Latency=0 Cluster
+    ; CHECK: SU(1): dead %1:gpr32 = SUBWrs %0:gpr32, undef $w2, 1
+
+    ; Multiple successors.
+    %2:gpr64 = ADDXrr undef $x0, undef $x1
+    %3:gpr32 = EXTRACT_SUBREG %2, %subreg.sub_32
+    %4:gpr32 = ANDWrs %3, undef $w2, 0
+    %5:gpr64 = ADDSXrr %2, undef $x3, implicit-def $nzcv
+    ; CHECK: SU(2): %2:gpr64 = ADDXrr undef $x0, undef $x1
+    ; CHECK: Successors:
+    ; CHECK-NOT: SU(3): Ord  Latency=0 Cluster
+    ; CHECK: SU(5): Ord  Latency=0 Cluster
+    ; CHECK: SU(3): %3:gpr32 = EXTRACT_SUBREG %2:gpr64, %subreg.sub_32
+    ; CHECK: SU(5): dead %5:gpr64 = ADDSXrr %2:gpr64, undef $x3, implicit-def $nzcv
+
+    ; Different register sizes.
+    %6:gpr32 = SUBWrr undef $w0, undef $w1
+    %7:gpr64 = ADDXrr undef $x1, undef $x2
+    %8:gpr64 = SUBXrr %7, undef $x3
+    %9:gpr32 = ADDWrr %6, undef $w4
+    ; CHECK: SU(6): %6:gpr32 = SUBWrr undef $w0, undef $w1
+    ; CHECK: Successors:
+    ; CHECK-NOT: SU(8): Ord  Latency=0 Cluster
+    ; CHECK: SU(7): %7:gpr64 = ADDXrr undef $x1, undef $x2
+    ; CHECK: Successors:
+    ; CHECK-NOT: SU(9): Ord  Latency=0 Cluster
+    ; CHECK: SU(8): dead %8:gpr64 = SUBXrr %7:gpr64, undef $x3
+    ; CHECK: Predecessors:
+    ; CHECK: SU(7): Ord  Latency=0 Cluster
+    ; CHECK: SU(9): dead %9:gpr32 = ADDWrr %6:gpr32, undef $w4
+    ; CHECK: Predecessors:
+    ; CHECK: SU(6): Ord  Latency=0 Cluster
+...
diff --git a/test/CodeGen/AArch64/pr40091.ll b/test/CodeGen/AArch64/pr40091.ll
new file mode 100644
index 0000000..b70ae8a
--- /dev/null
+++ b/test/CodeGen/AArch64/pr40091.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-arm-none-eabi | FileCheck %s
+
+define i64 @test(i64 %aa) {
+; CHECK-LABEL: test:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v0.8b, #137
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+entry:
+  %a = bitcast i64 %aa to  <1 x i64>
+  %k = icmp sgt <1 x i64> %a, zeroinitializer
+  %l = zext <1 x i1> %k to <1 x i64>
+  %o = and <1 x i64> %l, %a
+  %p = xor <1 x i64> %l, <i64 -1>
+  %q = and <1 x i64> %p, <i64 81985529216486895>
+  %r = or <1 x i64> %q, %o
+  %s = bitcast <1 x i64> %r to <8 x i8>
+  %t = shufflevector <8 x i8> %s, <8 x i8> %s, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+  %u = bitcast <8 x i8> %t to i64
+  ret i64 %u
+}
diff --git a/test/CodeGen/AArch64/regress-tblgen-chains.ll b/test/CodeGen/AArch64/regress-tblgen-chains.ll
index 50da7d1..bf2dece 100644
--- a/test/CodeGen/AArch64/regress-tblgen-chains.ll
+++ b/test/CodeGen/AArch64/regress-tblgen-chains.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -mtriple=arm64-apple-ios7.0 -disable-fp-elim -o - %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=arm64-apple-ios7.0 -frame-pointer=all -o - %s | FileCheck %s
 
 ; When generating DAG selection tables, TableGen used to only flag an
 ; instruction as needing a chain on its own account if it had a built-in pattern
diff --git a/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll b/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
index 66a5ed6..2cbcad6 100644
--- a/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
+++ b/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -disable-fp-elim < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -frame-pointer=all < %s | FileCheck %s
 @var = global i32 0
 
 declare void @bar()
diff --git a/test/CodeGen/AArch64/reloc-specifiers.mir b/test/CodeGen/AArch64/reloc-specifiers.mir
new file mode 100644
index 0000000..374a475
--- /dev/null
+++ b/test/CodeGen/AArch64/reloc-specifiers.mir
@@ -0,0 +1,21 @@
+# RUN: llc -mtriple=arm64-windows -start-after=prologepilog -show-mc-encoding \
+# RUN: -o - %s | FileCheck %s
+
+--- |
+  define void @bar() { ret void }
+...
+
+---
+name:            bar
+body: |
+  bb.0:
+    ; CHECK-LABEL: bar
+
+    ; CHECK: movz    x0, #:abs_g1_s:.Lfoo$frame_escape_0 ; encoding: [0bAAA00000,A,0b101AAAAA,0xd2]
+    ; CHECK: fixup A - offset: 0, value: :abs_g1_s:.Lfoo$frame_escape_0, kind: fixup_aarch64_movw
+    renamable $x0 = MOVZXi target-flags(aarch64-g1, aarch64-s) <mcsymbol .Lfoo$frame_escape_0>, 16
+
+    ; CHECK: movk    x0, #:abs_g0_nc:.Lfoo$frame_escape_0 ; encoding: [0bAAA00000,A,0b100AAAAA,0xf2]
+    ; CHECK: fixup A - offset: 0, value: :abs_g0_nc:.Lfoo$frame_escape_0, kind: fixup_aarch64_movw
+    renamable $x0 = MOVKXi $x0, target-flags(aarch64-g0, aarch64-nc) <mcsymbol .Lfoo$frame_escape_0>, 0
+...
diff --git a/test/CodeGen/AArch64/shrink-constant-multiple-users.ll b/test/CodeGen/AArch64/shrink-constant-multiple-users.ll
new file mode 100644
index 0000000..d787d36
--- /dev/null
+++ b/test/CodeGen/AArch64/shrink-constant-multiple-users.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mtriple arm64-ios- %s -o - | FileCheck %s
+
+; Check the -8 constant is shrunk if there are multiple users of the AND instruction.
+
+; CHECK-LABEL:  _test:
+; CHECK:          and x0, x0, #0xfffffff8
+; CHECK-NEXT:     add x19, x0, #10
+; CHECK-NEXT:     bl  _user
+
+define i64 @test(i32 %a) {
+  %ext = zext i32 %a to i64
+  %v1 = and i64 %ext, -8
+  %v2 = add i64 %v1, 10
+  call void @user(i64 %v1)
+  ret i64 %v2
+}
+
+declare void @user(i64)
diff --git a/test/CodeGen/AArch64/sign-return-address.ll b/test/CodeGen/AArch64/sign-return-address.ll
index c057c81..dfd52f8 100644
--- a/test/CodeGen/AArch64/sign-return-address.ll
+++ b/test/CodeGen/AArch64/sign-return-address.ll
@@ -24,17 +24,17 @@
 ; CHECK-LABEL: @leaf_sign_all
 ; CHECK: paciasp
 ; CHECK: autiasp
-; CHECK-NEXT: ret
+; CHECK: ret
 define i32 @leaf_sign_all(i32 %x) "sign-return-address"="all" {
   ret i32 %x
 }
 
 ; CHECK: @leaf_clobbers_lr
 ; CHECK: paciasp
-; CHECK-NEXT: str x30, [sp, #-16]!
+; CHECK: str x30, [sp, #-16]!
 ; CHECK: ldr  x30, [sp], #16
 ; CHECK-NEXT: autiasp
-; CHECK-NEXT: ret
+; CHECK: ret
 define i64 @leaf_clobbers_lr(i64 %x) "sign-return-address"="non-leaf"  {
   call void asm sideeffect "mov x30, $0", "r,~{lr}"(i64 %x) #1
   ret i64 %x
@@ -45,7 +45,7 @@
 ; CHECK: @non_leaf_sign_all
 ; CHECK: paciasp
 ; CHECK: autiasp
-; CHECK-NEXT: ret
+; CHECK: ret
 define i32 @non_leaf_sign_all(i32 %x) "sign-return-address"="all" {
   %call = call i32 @foo(i32 %x)
   ret i32 %call
@@ -53,10 +53,10 @@
 
 ; CHECK: @non_leaf_sign_non_leaf
 ; CHECK: paciasp
-; CHECK-NEXT: str x30, [sp, #-16]!
+; CHECK: str x30, [sp, #-16]!
 ; CHECK: ldr  x30, [sp], #16
-; CHECK-NEXT: autiasp
-; CHECK-NEXT: ret
+; CHECK: autiasp
+; CHECK: ret
 define i32 @non_leaf_sign_non_leaf(i32 %x) "sign-return-address"="non-leaf"  {
   %call = call i32 @foo(i32 %x)
   ret i32 %call
@@ -65,7 +65,7 @@
 ; CHECK-LABEL: @leaf_sign_all_v83
 ; CHECK: paciasp
 ; CHECK-NOT: ret
-; CHECK-NEXT: retaa
+; CHECK: retaa
 ; CHECK-NOT: ret
 define i32 @leaf_sign_all_v83(i32 %x) "sign-return-address"="all" "target-features"="+v8.3a" {
   ret i32 %x
@@ -75,10 +75,10 @@
 
 ; CHECK-LABEL: @spill_lr_and_tail_call
 ; CHECK: paciasp
-; CHECK-NEXT: str x30, [sp, #-16]!
+; CHECK: str x30, [sp, #-16]!
 ; CHECK: ldr  x30, [sp], #16
-; CHECK-NEXT: autiasp
-; CHECK-NEXT: b  bar
+; CHECK: autiasp
+; CHECK: b  bar
 define fastcc void @spill_lr_and_tail_call(i64 %x) "sign-return-address"="all" {
   call void asm sideeffect "mov x30, $0", "r,~{lr}"(i64 %x) #1
   tail call fastcc i64 @bar(i64 %x)
diff --git a/test/CodeGen/AArch64/speculation-hardening-dagisel.ll b/test/CodeGen/AArch64/speculation-hardening-dagisel.ll
new file mode 100644
index 0000000..4d13d98
--- /dev/null
+++ b/test/CodeGen/AArch64/speculation-hardening-dagisel.ll
@@ -0,0 +1,71 @@
+; RUN: sed -e 's/SLHATTR/speculative_load_hardening/' %s | llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefixes=CHECK,SLH --dump-input-on-failure
+; RUN: sed -e 's/SLHATTR//' %s | llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefixes=CHECK,NOSLH --dump-input-on-failure
+
+declare i64 @g(i64, i64) local_unnamed_addr
+define i64 @f_using_reserved_reg_x16(i64 %a, i64 %b) local_unnamed_addr SLHATTR {
+; CHECK-LABEL: f_using_reserved_reg_x16
+; SLH: dsb sy
+; SLH: isb
+; NOSLH-NOT: dsb sy
+; NOSLH-NOT: isb
+entry:
+  %cmp = icmp ugt i64 %a, %b
+  br i1 %cmp, label %if.then, label %cleanup
+
+; CHECK: b.ls
+; SLH: dsb sy
+; SLH: isb
+; NOSLH-NOT: dsb sy
+; NOSLH-NOT: isb
+if.then:
+  %0 = tail call i64 asm "autia1716", "={x17},{x16},0"(i64 %b, i64 %a)
+; CHECK: bl g
+; SLH: dsb sy
+; SLH: isb
+; NOSLH-NOT: dsb sy
+; NOSLH-NOT: isb
+; CHECK: ret
+  %call = tail call i64 @g(i64 %a, i64 %b) #3
+  %add = add i64 %call, %0
+  br label %cleanup
+
+cleanup:
+; SLH: dsb sy
+; SLH: isb
+; NOSLH-NOT: dsb sy
+; NOSLH-NOT: isb
+; SLH: ret
+  %retval.0 = phi i64 [ %add, %if.then ], [ %b, %entry ]
+  ret i64 %retval.0
+}
+
+define i32 @f_clobbered_reg_w16(i32 %a, i32 %b) local_unnamed_addr SLHATTR {
+; CHECK-LABEL: f_clobbered_reg_w16
+entry:
+; SLH: dsb sy
+; SLH: isb
+; NOSLH-NOT: dsb sy
+; NOSLH-NOT: isb
+  %cmp = icmp sgt i32 %a, %b
+  br i1 %cmp, label %if.then, label %if.end
+; CHECK: b.le
+
+if.then:
+; SLH: dsb sy
+; SLH: isb
+; NOSLH-NOT: dsb sy
+; NOSLH-NOT: isb
+; CHECK: mov w16, w0
+  tail call void asm sideeffect "mov w16, ${0:w}", "r,~{w16}"(i32 %a)
+  br label %if.end
+; SLH: ret
+
+if.end:
+  %add = add nsw i32 %b, %a
+  ret i32 %add
+; SLH: dsb sy
+; SLH: isb
+; NOSLH-NOT: dsb sy
+; NOSLH-NOT: isb
+; SLH: ret
+}
diff --git a/test/CodeGen/AArch64/speculation-hardening-loads.ll b/test/CodeGen/AArch64/speculation-hardening-loads.ll
new file mode 100644
index 0000000..0b8f8d3
--- /dev/null
+++ b/test/CodeGen/AArch64/speculation-hardening-loads.ll
@@ -0,0 +1,157 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu | FileCheck %s --dump-input-on-failure
+
+define i128 @ldp_single_csdb(i128* %p) speculative_load_hardening {
+entry:
+  %0 = load i128, i128* %p, align 16
+  ret i128 %0
+; CHECK-LABEL: ldp_single_csdb
+; CHECK:      ldp   x8, x1, [x0]
+; CHECK-NEXT: cmp sp, #0
+; CHECK-NEXT: csetm x16, ne
+; CHECK-NEXT: and   x8, x8, x16
+; CHECK-NEXT: and   x1, x1, x16
+; CHECK-NEXT: csdb
+; CHECK-NEXT: mov x17, sp
+; CHECK-NEXT: and x17, x17, x16
+; CHECK-NEXT: mov x0, x8
+; CHECK-NEXT: mov sp, x17
+; CHECK-NEXT: ret
+}
+
+define double @ld_double(double* %p) speculative_load_hardening {
+entry:
+  %0 = load double, double* %p, align 8
+  ret double %0
+; Checking that the address laoded from is masked for a floating point load.
+; CHECK-LABEL: ld_double
+; CHECK:      cmp sp, #0
+; CHECK-NEXT: csetm x16, ne
+; CHECK-NEXT: and   x0, x0, x16
+; CHECK-NEXT: csdb
+; CHECK-NEXT: ldr   d0, [x0]
+; CHECK-NEXT: mov x17, sp
+; CHECK-NEXT: and x17, x17, x16
+; CHECK-NEXT: mov sp, x17
+; CHECK-NEXT: ret
+}
+
+define i32 @csdb_emitted_for_subreg_use(i64* %p, i32 %b) speculative_load_hardening {
+entry:
+  %X = load i64, i64* %p, align 8
+  %X_trunc = trunc i64 %X to i32
+  %add = add i32 %b, %X_trunc
+  %iszero = icmp eq i64 %X, 0
+  %ret = select i1 %iszero, i32 %b, i32 %add
+  ret i32 %ret
+; Checking that the address laoded from is masked for a floating point load.
+; CHECK-LABEL: csdb_emitted_for_subreg_use
+; CHECK:      ldr x8, [x0]
+; CHECK-NEXT: cmp sp, #0
+; CHECK-NEXT: csetm x16, ne
+; CHECK-NEXT: and x8, x8, x16
+; csdb instruction must occur before the add instruction with w8 as operand.
+; CHECK-NEXT: csdb
+; CHECK-NEXT: mov x17, sp
+; CHECK-NEXT: add w9, w1, w8
+; CHECK-NEXT: cmp x8, #0
+; CHECK-NEXT: and x17, x17, x16
+; CHECK-NEXT: csel w0, w1, w9, eq
+; CHECK-NEXT: mov sp, x17
+; CHECK-NEXT: ret
+}
+
+define i64 @csdb_emitted_for_superreg_use(i32* %p, i64 %b) speculative_load_hardening {
+entry:
+  %X = load i32, i32* %p, align 4
+  %X_ext = zext i32 %X to i64
+  %add = add i64 %b, %X_ext
+  %iszero = icmp eq i32 %X, 0
+  %ret = select i1 %iszero, i64 %b, i64 %add
+  ret i64 %ret
+; Checking that the address laoded from is masked for a floating point load.
+; CHECK-LABEL: csdb_emitted_for_superreg_use
+; CHECK:      ldr w8, [x0]
+; CHECK-NEXT: cmp sp, #0
+; CHECK-NEXT: csetm x16, ne
+; CHECK-NEXT: and w8, w8, w16
+; csdb instruction must occur before the add instruction with x8 as operand.
+; CHECK-NEXT: csdb
+; CHECK-NEXT: mov x17, sp
+; CHECK-NEXT: add x9, x1, x8
+; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: and x17, x17, x16
+; CHECK-NEXT: csel x0, x1, x9, eq
+; CHECK-NEXT: mov sp, x17
+; CHECK-NEXT: ret
+}
+
+define i64 @no_masking_with_full_control_flow_barriers(i64 %a, i64 %b, i64* %p) speculative_load_hardening {
+; CHECK-LABEL: no_masking_with_full_control_flow_barriers
+; CHECK: dsb sy
+; CHECK: isb
+entry:
+  %0 = tail call i64 asm "autia1716", "={x17},{x16},0"(i64 %b, i64 %a)
+  %X = load i64, i64* %p, align 8
+  %ret = add i64 %X, %0
+; CHECK-NOT: csdb
+; CHECK-NOT: and
+; CHECK: ret
+  ret i64 %ret
+}
+
+define void @f_implicitdef_vector_load(<4 x i32>* %dst, <2 x i32>* %src) speculative_load_hardening
+{
+entry:
+  %0 = load <2 x i32>, <2 x i32>* %src, align 8
+  %shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+  store <4 x i32> %shuffle, <4 x i32>* %dst, align 4
+  ret void
+; CHECK-LABEL: f_implicitdef_vector_load
+; CHECK:       cmp     sp, #0
+; CHECK-NEXT:  csetm   x16, ne
+; CHECK-NEXT:  and     x1, x1, x16
+; CHECK-NEXT:  csdb
+; CHECK-NEXT:  ldr     d0, [x1]
+; CHECK-NEXT:  mov     x17, sp
+; CHECK-NEXT:  and     x17, x17, x16
+; CHECK-NEXT:  mov     v0.d[1], v0.d[0]
+; CHECK-NEXT:  str     q0, [x0]
+; CHECK-NEXT:  mov     sp, x17
+; CHECK-NEXT:  ret
+}
+
+define <2 x double> @f_usedefvectorload(double* %a, double* %b) speculative_load_hardening {
+entry:
+; CHECK-LABEL: f_usedefvectorload
+; CHECK:       cmp     sp, #0
+; CHECK-NEXT:  csetm   x16, ne
+; CHECK-NEXT:  movi    v0.2d, #0000000000000000
+; CHECK-NEXT:  and     x1, x1, x16
+; CHECK-NEXT:  csdb
+; CHECK-NEXT:  ld1     { v0.d }[0], [x1]
+; CHECK-NEXT:  mov     x17, sp
+; CHECK-NEXT:  and     x17, x17, x16
+; CHECK-NEXT:  mov     sp, x17
+; CHECK-NEXT:  ret
+  %0 = load double, double* %b, align 16
+  %vld1_lane = insertelement <2 x double> <double undef, double 0.000000e+00>, double %0, i32 0
+  ret <2 x double> %vld1_lane
+}
+
+define i32 @deadload() speculative_load_hardening {
+entry:
+; CHECK-LABEL: deadload
+; CHECK:       cmp     sp, #0
+; CHECK-NEXT:  csetm   x16, ne
+; CHECK-NEXT:  sub     sp, sp, #16
+; CHECK-NEXT:  .cfi_def_cfa_offset 16
+; CHECK-NEXT:  ldr     w8, [sp, #12]
+; CHECK-NEXT:  add     sp, sp, #16
+; CHECK-NEXT:  mov     x17, sp
+; CHECK-NEXT:  and     x17, x17, x16
+; CHECK-NEXT:  mov     sp, x17
+; CHECK-NEXT:  ret
+  %a = alloca i32, align 4
+  %val = load volatile i32, i32* %a, align 4
+  ret i32 undef
+}
diff --git a/test/CodeGen/AArch64/speculation-hardening.ll b/test/CodeGen/AArch64/speculation-hardening.ll
new file mode 100644
index 0000000..3535b63
--- /dev/null
+++ b/test/CodeGen/AArch64/speculation-hardening.ll
@@ -0,0 +1,156 @@
+; RUN: sed -e 's/SLHATTR/speculative_load_hardening/' %s | llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefixes=CHECK,SLH --dump-input-on-failure
+; RUN: sed -e 's/SLHATTR//' %s | llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefixes=CHECK,NOSLH --dump-input-on-failure
+; RUN: sed -e 's/SLHATTR/speculative_load_hardening/' %s | llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -global-isel | FileCheck %s --check-prefixes=CHECK,SLH --dump-input-on-failure
+; RUN sed -e 's/SLHATTR//' %s | llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -global-isel | FileCheck %s --check-prefixes=CHECK,NOSLH --dump-input-on-failure
+; RUN: sed -e 's/SLHATTR/speculative_load_hardening/' %s | llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -fast-isel | FileCheck %s --check-prefixes=CHECK,SLH --dump-input-on-failure
+; RUN: sed -e 's/SLHATTR//' %s | llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -fast-isel | FileCheck %s --check-prefixes=CHECK,NOSLH --dump-input-on-failure
+
+define i32 @f(i8* nocapture readonly %p, i32 %i, i32 %N) local_unnamed_addr SLHATTR {
+; CHECK-LABEL: f
+entry:
+; SLH:  cmp sp, #0
+; SLH:  csetm x16, ne
+; NOSLH-NOT:  cmp sp, #0
+; NOSLH-NOT:  csetm x16, ne
+
+; SLH:  mov x17, sp
+; SLH:  and x17, x17, x16
+; SLH:  mov sp, x17
+; NOSLH-NOT:  mov x17, sp
+; NOSLH-NOT:  and x17, x17, x16
+; NOSLH-NOT:  mov sp, x17
+  %call = tail call i32 @tail_callee(i32 %i)
+; SLH:  cmp sp, #0
+; SLH:  csetm x16, ne
+; NOSLH-NOT:  cmp sp, #0
+; NOSLH-NOT:  csetm x16, ne
+  %cmp = icmp slt i32 %call, %N
+  br i1 %cmp, label %if.then, label %return
+; GlobalISel lowers the branch to a b.ne sometimes instead of b.ge as expected..
+; CHECK: b.[[COND:(ge)|(lt)|(ne)]]
+
+if.then:                                          ; preds = %entry
+; NOSLH-NOT: csel x16, x16, xzr, {{(lt)|(ge)|(eq)}}
+; SLH-DAG: csel x16, x16, xzr, {{(lt)|(ge)|(eq)}}
+  %idxprom = sext i32 %i to i64
+  %arrayidx = getelementptr inbounds i8, i8* %p, i64 %idxprom
+  %0 = load i8, i8* %arrayidx, align 1
+; CHECK-DAG:      ldrb [[LOADED:w[0-9]+]],
+  %conv = zext i8 %0 to i32
+  br label %return
+
+; SLH-DAG: csel x16, x16, xzr, [[COND]]
+; NOSLH-NOT: csel x16, x16, xzr, [[COND]]
+return:                                           ; preds = %entry, %if.then
+  %retval.0 = phi i32 [ %conv, %if.then ], [ 0, %entry ]
+; SLH:  mov x17, sp
+; SLH:  and x17, x17, x16
+; SLH:  mov sp, x17
+; NOSLH-NOT:  mov x17, sp
+; NOSLH-NOT:  and x17, x17, x16
+; NOSLH-NOT:  mov sp, x17
+  ret i32 %retval.0
+}
+
+; Make sure that for a tail call, taint doesn't get put into SP twice.
+define i32 @tail_caller(i32 %a) local_unnamed_addr SLHATTR {
+; CHECK-LABEL: tail_caller:
+; SLH:     mov     x17, sp
+; SLH:     and     x17, x17, x16
+; SLH:     mov     sp, x17
+; NOSLH-NOT:     mov     x17, sp
+; NOSLH-NOT:     and     x17, x17, x16
+; NOSLH-NOT:     mov     sp, x17
+;  GlobalISel doesn't optimize tail calls (yet?), so only check that
+;  cross-call taint register setup code is missing if a tail call was
+;  actually produced.
+; SLH:     {{(bl tail_callee[[:space:]] cmp sp, #0)|(b tail_callee)}}
+; SLH-NOT: cmp sp, #0
+  %call = tail call i32 @tail_callee(i32 %a)
+  ret i32 %call
+}
+
+declare i32 @tail_callee(i32) local_unnamed_addr
+
+; Verify that no cb(n)z/tb(n)z instructions are produced when implementing
+; SLH
+define i32 @compare_branch_zero(i32, i32) SLHATTR {
+; CHECK-LABEL: compare_branch_zero
+  %3 = icmp eq i32 %0, 0
+  br i1 %3, label %then, label %else
+;SLH-NOT:   cb{{n?}}z
+;NOSLH:     cb{{n?}}z
+then:
+  %4 = sdiv i32 5, %1
+  ret i32 %4
+else:
+  %5 = sdiv i32 %1, %0
+  ret i32 %5
+}
+
+define i32 @test_branch_zero(i32, i32) SLHATTR {
+; CHECK-LABEL: test_branch_zero
+  %3 = and i32 %0, 16
+  %4 = icmp eq i32 %3, 0
+  br i1 %4, label %then, label %else
+;SLH-NOT:   tb{{n?}}z
+;NOSLH:     tb{{n?}}z
+then:
+  %5 = sdiv i32 5, %1
+  ret i32 %5
+else:
+  %6 = sdiv i32 %1, %0
+  ret i32 %6
+}
+
+define i32 @landingpad(i32 %l0, i32 %l1) SLHATTR personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+; CHECK-LABEL: landingpad
+entry:
+; SLH:  cmp sp, #0
+; SLH:  csetm x16, ne
+; NOSLH-NOT:  cmp sp, #0
+; NOSLH-NOT:  csetm x16, ne
+; CHECK: bl _Z10throwing_fv
+  invoke void @_Z10throwing_fv()
+          to label %exit unwind label %lpad
+; SLH:  cmp sp, #0
+; SLH:  csetm x16, ne
+
+lpad:
+  %l4 = landingpad { i8*, i32 }
+          catch i8* null
+; SLH:  cmp sp, #0
+; SLH:  csetm x16, ne
+; NOSLH-NOT:  cmp sp, #0
+; NOSLH-NOT:  csetm x16, ne
+  %l5 = extractvalue { i8*, i32 } %l4, 0
+  %l6 = tail call i8* @__cxa_begin_catch(i8* %l5)
+  %l7 = icmp sgt i32 %l0, %l1
+  br i1 %l7, label %then, label %else
+; GlobalISel lowers the branch to a b.ne sometimes instead of b.ge as expected..
+; CHECK: b.[[COND:(le)|(gt)|(ne)]]
+
+then:
+; SLH-DAG: csel x16, x16, xzr, [[COND]]
+  %l9 = sdiv i32 %l0, %l1
+  br label %postif
+
+else:
+; SLH-DAG: csel x16, x16, xzr, {{(gt)|(le)|(eq)}}
+  %l11 = sdiv i32 %l1, %l0
+  br label %postif
+
+postif:
+  %l13 = phi i32 [ %l9, %then ], [ %l11, %else ]
+  tail call void @__cxa_end_catch()
+  br label %exit
+
+exit:
+  %l15 = phi i32 [ %l13, %postif ], [ 0, %entry ]
+  ret i32 %l15
+}
+
+declare i32 @__gxx_personality_v0(...)
+declare void @_Z10throwing_fv() local_unnamed_addr
+declare i8* @__cxa_begin_catch(i8*) local_unnamed_addr
+declare void @__cxa_end_catch() local_unnamed_addr
diff --git a/test/CodeGen/AArch64/speculation-hardening.mir b/test/CodeGen/AArch64/speculation-hardening.mir
new file mode 100644
index 0000000..cf8357d
--- /dev/null
+++ b/test/CodeGen/AArch64/speculation-hardening.mir
@@ -0,0 +1,117 @@
+# RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu \
+# RUN:     -start-before aarch64-speculation-hardening -o - %s \
+# RUN:   | FileCheck %s --dump-input-on-failure
+
+# Check that the speculation hardening pass generates code as expected for
+# basic blocks ending with a variety of branch patterns:
+# - (1) no branches (fallthrough)
+# - (2) one unconditional branch
+# - (3) one conditional branch + fall-through
+# - (4) one conditional branch + one unconditional branch
+# - other direct branches don't seem to be generated by the AArch64 codegen
+--- |
+  define void @nobranch_fallthrough(i32 %a, i32 %b) speculative_load_hardening {
+   ret void
+  }
+  define void @uncondbranch(i32 %a, i32 %b) speculative_load_hardening {
+   ret void
+  }
+  define void @condbranch_fallthrough(i32 %a, i32 %b) speculative_load_hardening {
+   ret void
+  }
+  define void @condbranch_uncondbranch(i32 %a, i32 %b) speculative_load_hardening {
+   ret void
+  }
+  define void @indirectbranch(i32 %a, i32 %b) speculative_load_hardening {
+   ret void
+  }
+...
+---
+name:            nobranch_fallthrough
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: nobranch_fallthrough
+  bb.0:
+    successors: %bb.1
+    liveins: $w0, $w1
+  ; CHECK-NOT: csel
+  bb.1:
+    liveins: $w0
+   RET undef $lr, implicit $w0
+...
+---
+name:            uncondbranch
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: uncondbranch
+  bb.0:
+    successors: %bb.1
+    liveins: $w0, $w1
+    B %bb.1
+  ; CHECK-NOT: csel
+  bb.1:
+   liveins: $w0
+   RET undef $lr, implicit $w0
+...
+---
+name:            condbranch_fallthrough
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: condbranch_fallthrough
+  bb.0:
+    successors: %bb.1, %bb.2
+    liveins: $w0, $w1
+    $wzr = SUBSWrs renamable $w0, renamable $w1, 0, implicit-def $nzcv, implicit-def $nzcv
+    Bcc 11, %bb.2, implicit $nzcv
+  ; CHECK: b.lt [[BB_LT_T:\.LBB[0-9_]+]]
+
+  bb.1:
+    liveins: $nzcv, $w0
+  ; CHECK: csel x16, x16, xzr, ge
+    RET undef $lr, implicit $w0
+  bb.2:
+    liveins: $nzcv, $w0
+  ; CHECK: csel x16, x16, xzr, lt
+    RET undef $lr, implicit $w0
+...
+---
+name:            condbranch_uncondbranch
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: condbranch_uncondbranch
+  bb.0:
+    successors: %bb.1, %bb.2
+    liveins: $w0, $w1
+    $wzr = SUBSWrs renamable $w0, renamable $w1, 0, implicit-def $nzcv, implicit-def $nzcv
+    Bcc 11, %bb.2, implicit $nzcv
+    B %bb.1, implicit $nzcv
+  ; CHECK: b.lt [[BB_LT_T:\.LBB[0-9_]+]]
+
+  bb.1:
+    liveins: $nzcv, $w0
+  ; CHECK: csel x16, x16, xzr, ge
+    RET undef $lr, implicit $w0
+  bb.2:
+    liveins: $nzcv, $w0
+  ; CHECK: csel x16, x16, xzr, lt
+    RET undef $lr, implicit $w0
+...
+---
+name:            indirectbranch
+tracksRegLiveness: true
+body:             |
+  ; Check that no instrumentation is done on indirect branches (for now).
+  ; CHECK-LABEL: indirectbranch
+  bb.0:
+    successors: %bb.1, %bb.2
+    liveins: $x0
+    BR $x0
+  bb.1:
+   liveins: $x0
+  ; CHECK-NOT: csel
+   RET undef $lr, implicit $x0
+  bb.2:
+   liveins: $x0
+  ; CHECK-NOT: csel
+   RET undef $lr, implicit $x0
+...
diff --git a/test/CodeGen/AArch64/sponentry.ll b/test/CodeGen/AArch64/sponentry.ll
index 5b3638a..8bd995f 100644
--- a/test/CodeGen/AArch64/sponentry.ll
+++ b/test/CodeGen/AArch64/sponentry.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=aarch64-windows-msvc -disable-fp-elim %s -o - | FileCheck %s
-; RUN: llc -mtriple=aarch64-windows-msvc -fast-isel -disable-fp-elim %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64-windows-msvc -frame-pointer=all %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64-windows-msvc -fast-isel -frame-pointer=all %s -o - | FileCheck %s
 ; RUN: llc -mtriple=aarch64-windows-msvc %s -o - | FileCheck %s --check-prefix=NOFP
 ; RUN: llc -mtriple=aarch64-windows-msvc -fast-isel %s -o - | FileCheck %s --check-prefix=NOFP
 
diff --git a/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll b/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
index 8f1a7b4..946d1cc 100644
--- a/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
+++ b/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=arm64-apple-ios -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-apple-ios -relocation-model=pic -frame-pointer=all | FileCheck %s
 
 @__stack_chk_guard = external global i64*
 
diff --git a/test/CodeGen/AArch64/swifterror.ll b/test/CodeGen/AArch64/swifterror.ll
index 8ea8946..3c3ab60 100644
--- a/test/CodeGen/AArch64/swifterror.ll
+++ b/test/CodeGen/AArch64/swifterror.ll
@@ -1,5 +1,5 @@
-; RUN: llc -fast-isel-sink-local-values -verify-machineinstrs -disable-fp-elim -enable-shrink-wrap=false < %s -mtriple=aarch64-apple-ios -disable-post-ra | FileCheck -allow-deprecated-dag-overlap --check-prefix=CHECK-APPLE %s
-; RUN: llc -fast-isel-sink-local-values -verify-machineinstrs -disable-fp-elim -O0 -fast-isel < %s -mtriple=aarch64-apple-ios -disable-post-ra | FileCheck -allow-deprecated-dag-overlap --check-prefix=CHECK-O0 %s
+; RUN: llc -fast-isel-sink-local-values -verify-machineinstrs -frame-pointer=all -enable-shrink-wrap=false < %s -mtriple=aarch64-apple-ios -disable-post-ra | FileCheck -allow-deprecated-dag-overlap --check-prefix=CHECK-APPLE %s
+; RUN: llc -fast-isel-sink-local-values -verify-machineinstrs -frame-pointer=all -O0 -fast-isel < %s -mtriple=aarch64-apple-ios -disable-post-ra | FileCheck -allow-deprecated-dag-overlap --check-prefix=CHECK-O0 %s
 
 declare i8* @malloc(i64)
 declare void @free(i8*)
diff --git a/test/CodeGen/AArch64/tail-call-unused-zext.ll b/test/CodeGen/AArch64/tail-call-unused-zext.ll
new file mode 100644
index 0000000..1617d13
--- /dev/null
+++ b/test/CodeGen/AArch64/tail-call-unused-zext.ll
@@ -0,0 +1,36 @@
+; RUN: llc -mtriple=arm64--- -stop-after=expand-isel-pseudos -o - %s | FileCheck %s
+
+; Check that we ignore the zeroext attribute on the return type of the tail
+; call, since the return value is unused. This happens during CodeGenPrepare in
+; dupRetToEnableTailCallOpts, which calls attributesPermitTailCall to check if
+; the attributes of the caller and the callee match.
+
+declare zeroext i1 @zcallee()
+define void @zcaller() {
+; CHECK-LABEL: name: zcaller
+entry:
+  br i1 undef, label %calllabel, label %retlabel
+calllabel:
+; CHECK: bb.1.calllabel:
+; CHECK-NOT: BL @zcallee
+; CHECK-NEXT: TCRETURNdi @zcallee
+  %unused_result = tail call zeroext i1 @zcallee()
+  br label %retlabel
+retlabel:
+  ret void
+}
+
+declare signext i1 @scallee()
+define void @scaller() {
+; CHECK-LABEL: name: scaller
+entry:
+  br i1 undef, label %calllabel, label %retlabel
+calllabel:
+; CHECK: bb.1.calllabel:
+; CHECK-NOT: BL @scallee
+; CHECK-NEXT: TCRETURNdi @scallee
+  %unused_result = tail call signext i1 @scallee()
+  br label %retlabel
+retlabel:
+  ret void
+}
diff --git a/test/CodeGen/AArch64/wineh-mingw.ll b/test/CodeGen/AArch64/wineh-mingw.ll
new file mode 100644
index 0000000..ae26b06
--- /dev/null
+++ b/test/CodeGen/AArch64/wineh-mingw.ll
@@ -0,0 +1,48 @@
+; RUN: llc < %s -exception-model=wineh -mtriple=aarch64-pc-mingw32 | FileCheck %s -check-prefix=WINEH
+; RUN: llc < %s -exception-model=wineh -mtriple=aarch64-pc-mingw32 -filetype=obj | llvm-readobj -s | FileCheck %s -check-prefix=WINEH-SECTIONS
+
+; Check emission of eh handler and handler data
+declare i32 @_d_eh_personality(i32, i32, i64, i8*, i8*)
+declare void @_d_eh_resume_unwind(i8*)
+
+declare i32 @bar()
+
+define i32 @foo4() #0 personality i32 (i32, i32, i64, i8*, i8*)* @_d_eh_personality {
+entry:
+  %step = alloca i32, align 4
+  store i32 0, i32* %step
+  %tmp = load i32, i32* %step
+
+  %tmp1 = invoke i32 @bar()
+          to label %finally unwind label %landingpad
+
+finally:
+  store i32 1, i32* %step
+  br label %endtryfinally
+
+landingpad:
+  %landing_pad = landingpad { i8*, i32 }
+          cleanup
+  %tmp3 = extractvalue { i8*, i32 } %landing_pad, 0
+  store i32 2, i32* %step
+  call void @_d_eh_resume_unwind(i8* %tmp3)
+  unreachable
+
+endtryfinally:
+  %tmp10 = load i32, i32* %step
+  ret i32 %tmp10
+}
+; WINEH-LABEL: foo4:
+; WINEH: .seh_proc foo4
+; WINEH: .seh_handler _d_eh_personality, @unwind, @except
+; WINEH: ret
+; WINEH: .section .xdata,"dr"
+; WINEH-NEXT: .seh_handlerdata
+; WINEH-NEXT: .text
+; WINEH-NEXT: .seh_endproc
+; WINEH: .section .xdata,"dr"
+; WINEH-NEXT: .p2align 2
+; WINEH-NEXT: GCC_except_table0:
+
+; WINEH-SECTIONS: Name: .xdata
+; WINEH-SECTIONS-NOT: Name: .gcc_except_table
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir
index f561cc7..48f3294 100644
--- a/test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir
@@ -2,17 +2,72 @@
 # RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
 
 ---
-name:            test_and
+name: test_and_i32
 body: |
   bb.0:
     liveins: $vgpr0, $vgpr1
 
-    ; CHECK-LABEL: name: test_and
+    ; CHECK-LABEL: name: test_and_i32
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK: $vgpr0 = COPY [[AND]](s32)
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $vgpr1
     %2:_(s32) = G_AND %0, %1
     $vgpr0 = COPY %2
 ...
+
+---
+name: test_and_i1
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+
+    ; CHECK-LABEL: name: test_and_i1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK: S_NOP 0, implicit [[AND]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = G_CONSTANT i64 0
+    %3:_(s1) = G_ICMP intpred(ne), %0, %2
+    %4:_(s1) = G_ICMP intpred(ne), %1, %2
+    %5:_(s32) = G_AND %0, %1
+    S_NOP 0, implicit %5
+...
+
+---
+name: test_and_i64
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+
+    ; CHECK-LABEL: name: test_and_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+    ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK: $vgpr0_vgpr1 = COPY [[AND]](s64)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = COPY $vgpr2_vgpr3
+    %2:_(s64) = G_AND %0, %1
+    $vgpr0_vgpr1 = COPY %2
+...
+
+---
+name: test_and_v2i32
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+
+    ; CHECK-LABEL: name: test_and_v2i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK: $vgpr0_vgpr1 = COPY [[AND]](<2 x s32>)
+    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
+    %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
+    %2:_(<2 x s32>) = G_AND %0, %1
+    $vgpr0_vgpr1 = COPY %2
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg.mir
new file mode 100644
index 0000000..738106d
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg.mir
@@ -0,0 +1,71 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: atomic_cmpxchg_global_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2, $sgpr3
+    ; CHECK-LABEL: name: atomic_cmpxchg_global_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr3
+    ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p1), [[COPY1]], [[COPY2]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = COPY $sgpr3
+    %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomic_cmpxchg_local_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomic_cmpxchg_local_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p3), [[COPY1]], [[COPY2]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $sgpr2
+    %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store seq_cst 4, addrspace 3)
+...
+
+---
+name: atomic_cmpxchg_global_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2, $sgpr3
+    ; CHECK-LABEL: name: atomic_cmpxchg_global_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr3
+    ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p1), [[COPY1]], [[COPY2]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = COPY $sgpr3
+    %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomic_cmpxchg_local_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomic_cmpxchg_local_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p3), [[COPY1]], [[COPY2]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $sgpr2
+    %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-add.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-add.mir
new file mode 100644
index 0000000..a28718d
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-add.mir
@@ -0,0 +1,63 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: atomicrmw_add_global_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_add_global_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_add_local_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_add_local_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
+
+---
+name: atomicrmw_add_global_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_add_global_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_add_local_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_add_local_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-and.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-and.mir
new file mode 100644
index 0000000..5ea7c58
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-and.mir
@@ -0,0 +1,63 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: atomicrmw_and_global_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_and_global_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_AND:%[0-9]+]]:_(s32) = G_ATOMICRMW_AND [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_AND %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_and_local_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_and_local_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_AND:%[0-9]+]]:_(s32) = G_ATOMICRMW_AND [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_AND %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
+
+---
+name: atomicrmw_and_global_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_and_global_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_AND:%[0-9]+]]:_(s32) = G_ATOMICRMW_AND [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_AND %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_and_local_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_and_local_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_AND:%[0-9]+]]:_(s32) = G_ATOMICRMW_AND [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_AND %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-max.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-max.mir
new file mode 100644
index 0000000..c1b4648
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-max.mir
@@ -0,0 +1,63 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: atomicrmw_max_global_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_max_global_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_MAX:%[0-9]+]]:_(s32) = G_ATOMICRMW_MAX [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_MAX %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_max_local_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_max_local_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_MAX:%[0-9]+]]:_(s32) = G_ATOMICRMW_MAX [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_MAX %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
+
+---
+name: atomicrmw_max_global_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_max_global_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_MAX:%[0-9]+]]:_(s32) = G_ATOMICRMW_MAX [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_MAX %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_max_local_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_max_local_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_MAX:%[0-9]+]]:_(s32) = G_ATOMICRMW_MAX [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_MAX %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-min.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-min.mir
new file mode 100644
index 0000000..08ec0a0
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-min.mir
@@ -0,0 +1,63 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: atomicrmw_min_global_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_min_global_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_MIN:%[0-9]+]]:_(s32) = G_ATOMICRMW_MIN [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_MIN %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_min_local_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_min_local_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_MIN:%[0-9]+]]:_(s32) = G_ATOMICRMW_MIN [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_MIN %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
+
+---
+name: atomicrmw_min_global_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_min_global_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_MIN:%[0-9]+]]:_(s32) = G_ATOMICRMW_MIN [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_MIN %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_min_local_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_min_local_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_MIN:%[0-9]+]]:_(s32) = G_ATOMICRMW_MIN [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_MIN %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-nand.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-nand.mir
new file mode 100644
index 0000000..ccce93d
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-nand.mir
@@ -0,0 +1,22 @@
+# RUN: not llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer -o - %s 2>&1| FileCheck -check-prefix=ERROR %s
+
+# This needs to be expanded into a cmpxchg loop.
+# TODO: Will AtomicExpand still do this?
+
+# ERROR: LLVM ERROR: unable to legalize instruction: %2:_(s32) = G_ATOMICRMW_NAND %0:_(p1), %1:_ :: (load store seq_cst 4, addrspace 1) (in function: atomicrmw_nand_global_i32)
+
+---
+name: atomicrmw_nand_global_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_nand_global_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_NAND:%[0-9]+]]:_(s32) = G_ATOMICRMW_NAND [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_NAND %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-or.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-or.mir
new file mode 100644
index 0000000..e214ee6
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-or.mir
@@ -0,0 +1,63 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: atomicrmw_or_global_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_or_global_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_OR:%[0-9]+]]:_(s32) = G_ATOMICRMW_OR [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_OR %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_or_local_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_or_local_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_OR:%[0-9]+]]:_(s32) = G_ATOMICRMW_OR [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_OR %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
+
+---
+name: atomicrmw_or_global_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_or_global_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_OR:%[0-9]+]]:_(s32) = G_ATOMICRMW_OR [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_OR %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_or_local_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_or_local_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_OR:%[0-9]+]]:_(s32) = G_ATOMICRMW_OR [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_OR %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-sub.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-sub.mir
new file mode 100644
index 0000000..974cd3f
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-sub.mir
@@ -0,0 +1,63 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: atomicrmw_sub_global_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_sub_global_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_SUB:%[0-9]+]]:_(s32) = G_ATOMICRMW_SUB [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_SUB %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_sub_local_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_sub_local_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_SUB:%[0-9]+]]:_(s32) = G_ATOMICRMW_SUB [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_SUB %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
+
+---
+name: atomicrmw_sub_global_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_sub_global_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_SUB:%[0-9]+]]:_(s32) = G_ATOMICRMW_SUB [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_SUB %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_sub_local_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_sub_local_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_SUB:%[0-9]+]]:_(s32) = G_ATOMICRMW_SUB [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_SUB %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-umax.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-umax.mir
new file mode 100644
index 0000000..0fc62a4
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-umax.mir
@@ -0,0 +1,63 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: atomicrmw_umax_global_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_umax_global_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_UMAX:%[0-9]+]]:_(s32) = G_ATOMICRMW_UMAX [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_UMAX %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_umax_local_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_umax_local_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_UMAX:%[0-9]+]]:_(s32) = G_ATOMICRMW_UMAX [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_UMAX %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
+
+---
+name: atomicrmw_umax_global_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_umax_global_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_UMAX:%[0-9]+]]:_(s32) = G_ATOMICRMW_UMAX [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_UMAX %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_umax_local_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_umax_local_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_UMAX:%[0-9]+]]:_(s32) = G_ATOMICRMW_UMAX [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_UMAX %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-umin.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-umin.mir
new file mode 100644
index 0000000..a72b6d7
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-umin.mir
@@ -0,0 +1,63 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: atomicrmw_umin_global_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_umin_global_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_UMIN:%[0-9]+]]:_(s32) = G_ATOMICRMW_UMIN [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_UMIN %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_umin_local_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_umin_local_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_UMIN:%[0-9]+]]:_(s32) = G_ATOMICRMW_UMIN [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_UMIN %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
+
+---
+name: atomicrmw_umin_global_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_umin_global_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_UMIN:%[0-9]+]]:_(s32) = G_ATOMICRMW_UMIN [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_UMIN %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_umin_local_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_umin_local_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_UMIN:%[0-9]+]]:_(s32) = G_ATOMICRMW_UMIN [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_UMIN %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-xchg-flat.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-xchg-flat.mir
new file mode 100644
index 0000000..96b33c1
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-xchg-flat.mir
@@ -0,0 +1,36 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=bonaire -O0 -run-pass=legalizer -o - %s | FileCheck %s
+# RUN: not llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer -o /dev/null %s 2>&1 | FileCheck -check-prefix=ERROR %s
+
+# ERROR: LLVM ERROR: unable to legalize instruction: %2:_(s32) = G_ATOMICRMW_XCHG %0:_(p0), %1:_ :: (load store seq_cst 4) (in function: atomicrmw_xchg_flat_i32)
+
+
+---
+name: atomicrmw_xchg_flat_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_xchg_flat_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_XCHG:%[0-9]+]]:_(s32) = G_ATOMICRMW_XCHG [[COPY]](p0), [[COPY1]] :: (load store seq_cst 4)
+    %0:_(p0) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_XCHG %0, %1 :: (load store seq_cst 4, addrspace 0)
+...
+
+---
+name: atomicrmw_xchg_flat_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_xchg_flat_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_XCHG:%[0-9]+]]:_(s32) = G_ATOMICRMW_XCHG [[COPY]](p0), [[COPY1]] :: (load store seq_cst 4)
+    %0:_(p0) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_XCHG %0, %1 :: (load store seq_cst 4, addrspace 0)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-xchg.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-xchg.mir
new file mode 100644
index 0000000..424ac33
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-xchg.mir
@@ -0,0 +1,63 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: atomicrmw_xchg_global_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_xchg_global_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_XCHG:%[0-9]+]]:_(s32) = G_ATOMICRMW_XCHG [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_XCHG %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_xchg_local_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_xchg_local_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_XCHG:%[0-9]+]]:_(s32) = G_ATOMICRMW_XCHG [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_XCHG %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
+
+---
+name: atomicrmw_xchg_global_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_xchg_global_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_XCHG:%[0-9]+]]:_(s32) = G_ATOMICRMW_XCHG [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_XCHG %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_xchg_local_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_xchg_local_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_XCHG:%[0-9]+]]:_(s32) = G_ATOMICRMW_XCHG [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_XCHG %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-xor.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-xor.mir
new file mode 100644
index 0000000..ce5f78f
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-atomicrmw-xor.mir
@@ -0,0 +1,63 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: atomicrmw_xor_global_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_xor_global_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_XOR:%[0-9]+]]:_(s32) = G_ATOMICRMW_XOR [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_XOR %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_xor_local_i32
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_xor_local_i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_XOR:%[0-9]+]]:_(s32) = G_ATOMICRMW_XOR [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_XOR %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
+
+---
+name: atomicrmw_xor_global_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_xor_global_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+    ; CHECK: [[ATOMICRMW_XOR:%[0-9]+]]:_(s32) = G_ATOMICRMW_XOR [[COPY]](p1), [[COPY1]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_XOR %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_xor_local_i64
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_xor_local_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+    ; CHECK: [[ATOMICRMW_XOR:%[0-9]+]]:_(s32) = G_ATOMICRMW_XOR [[COPY]](p3), [[COPY1]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_XOR %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-brcond.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-brcond.mir
new file mode 100644
index 0000000..eb8b349
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-brcond.mir
@@ -0,0 +1,24 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: legal_brcond
+body:             |
+  ; CHECK-LABEL: name: legal_brcond
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; CHECK:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
+  ; CHECK:   G_BRCOND [[ICMP]](s1), %bb.1
+  ; CHECK: bb.1:
+  bb.0.entry:
+    successors: %bb.1
+    liveins: $vgpr0, $vgpr1
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s1) = G_ICMP intpred(ne), %0, %1
+    G_BRCOND %2, %bb.1
+
+  bb.1:
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-build-vector.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-build-vector.mir
new file mode 100644
index 0000000..2768ff8
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-build-vector.mir
@@ -0,0 +1,585 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: legal_v2s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: legal_v2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<2 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(<2 x s32>) = G_BUILD_VECTOR %0, %1
+    S_NOP 0, implicit %2
+...
+---
+name: legal_v3s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2
+    ; CHECK-LABEL: name: legal_v3s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(<3 x s32>) = G_BUILD_VECTOR %0, %1, %2
+    S_NOP 0, implicit %3
+...
+---
+name: legal_v4s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
+    ; CHECK-LABEL: name: legal_v4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<4 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = COPY $vgpr3
+    %4:_(<4 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3
+    S_NOP 0, implicit %4
+...
+---
+name: legal_v5s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+    ; CHECK-LABEL: name: legal_v5s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<5 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = COPY $vgpr3
+    %4:_(s32) = COPY $vgpr4
+    %5:_(<5 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4
+    S_NOP 0, implicit %5
+...
+---
+name: legal_v6s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+    ; CHECK-LABEL: name: legal_v6s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
+    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<6 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<6 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = COPY $vgpr3
+    %4:_(s32) = COPY $vgpr4
+    %5:_(s32) = COPY $vgpr5
+    %6:_(<6 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5
+    S_NOP 0, implicit %6
+...
+---
+name: legal_v7s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+    ; CHECK-LABEL: name: legal_v7s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
+    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
+    ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<7 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<7 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = COPY $vgpr3
+    %4:_(s32) = COPY $vgpr4
+    %5:_(s32) = COPY $vgpr5
+    %6:_(s32) = COPY $vgpr6
+    %7:_(<7 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6
+    S_NOP 0, implicit %7
+...
+---
+name: legal_v8s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
+    ; CHECK-LABEL: name: legal_v8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
+    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
+    ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
+    ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<8 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = COPY $vgpr3
+    %4:_(s32) = COPY $vgpr4
+    %5:_(s32) = COPY $vgpr5
+    %6:_(s32) = COPY $vgpr6
+    %7:_(s32) = COPY $vgpr7
+    %8:_(<8 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6, %7
+    S_NOP 0, implicit %8
+...
+---
+name: legal_v9s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
+    ; CHECK-LABEL: name: legal_v9s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
+    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
+    ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
+    ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
+    ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<9 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<9 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = COPY $vgpr3
+    %4:_(s32) = COPY $vgpr4
+    %5:_(s32) = COPY $vgpr5
+    %6:_(s32) = COPY $vgpr6
+    %7:_(s32) = COPY $vgpr7
+    %8:_(s32) = COPY $vgpr8
+    %9:_(<9 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6, %7, %8
+    S_NOP 0, implicit %9
+...
+---
+name: legal_v10s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9
+    ; CHECK-LABEL: name: legal_v10s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
+    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
+    ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
+    ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
+    ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
+    ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<10 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<10 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = COPY $vgpr3
+    %4:_(s32) = COPY $vgpr4
+    %5:_(s32) = COPY $vgpr5
+    %6:_(s32) = COPY $vgpr6
+    %7:_(s32) = COPY $vgpr7
+    %8:_(s32) = COPY $vgpr8
+    %9:_(s32) = COPY $vgpr9
+    %10:_(<10 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6, %7, %8, %9
+    S_NOP 0, implicit %10
+...
+---
+name: legal_v11s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10
+    ; CHECK-LABEL: name: legal_v11s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
+    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
+    ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
+    ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
+    ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
+    ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
+    ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<11 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<11 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = COPY $vgpr3
+    %4:_(s32) = COPY $vgpr4
+    %5:_(s32) = COPY $vgpr5
+    %6:_(s32) = COPY $vgpr6
+    %7:_(s32) = COPY $vgpr7
+    %8:_(s32) = COPY $vgpr8
+    %9:_(s32) = COPY $vgpr9
+    %10:_(s32) = COPY $vgpr10
+    %11:_(<11 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10
+    S_NOP 0, implicit %11
+...
+---
+name: legal_v12s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11
+    ; CHECK-LABEL: name: legal_v12s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
+    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
+    ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
+    ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
+    ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
+    ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
+    ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
+    ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<12 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<12 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = COPY $vgpr3
+    %4:_(s32) = COPY $vgpr4
+    %5:_(s32) = COPY $vgpr5
+    %6:_(s32) = COPY $vgpr6
+    %7:_(s32) = COPY $vgpr7
+    %8:_(s32) = COPY $vgpr8
+    %9:_(s32) = COPY $vgpr9
+    %10:_(s32) = COPY $vgpr10
+    %11:_(s32) = COPY $vgpr11
+    %12:_(<12 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10, %11
+    S_NOP 0, implicit %12
+...
+---
+name: legal_v13s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12
+    ; CHECK-LABEL: name: legal_v13s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
+    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
+    ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
+    ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
+    ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
+    ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
+    ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
+    ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
+    ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<13 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<13 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = COPY $vgpr3
+    %4:_(s32) = COPY $vgpr4
+    %5:_(s32) = COPY $vgpr5
+    %6:_(s32) = COPY $vgpr6
+    %7:_(s32) = COPY $vgpr7
+    %8:_(s32) = COPY $vgpr8
+    %9:_(s32) = COPY $vgpr9
+    %10:_(s32) = COPY $vgpr10
+    %11:_(s32) = COPY $vgpr11
+    %12:_(s32) = COPY $vgpr12
+    %13:_(<13 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10, %11, %12
+    S_NOP 0, implicit %13
+...
+---
+name: legal_v14s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13
+    ; CHECK-LABEL: name: legal_v14s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
+    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
+    ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
+    ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
+    ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
+    ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
+    ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
+    ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
+    ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
+    ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<14 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<14 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = COPY $vgpr3
+    %4:_(s32) = COPY $vgpr4
+    %5:_(s32) = COPY $vgpr5
+    %6:_(s32) = COPY $vgpr6
+    %7:_(s32) = COPY $vgpr7
+    %8:_(s32) = COPY $vgpr8
+    %9:_(s32) = COPY $vgpr9
+    %10:_(s32) = COPY $vgpr10
+    %11:_(s32) = COPY $vgpr11
+    %12:_(s32) = COPY $vgpr12
+    %13:_(s32) = COPY $vgpr13
+    %14:_(<14 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10, %11, %12, %13
+    S_NOP 0, implicit %14
+...
+---
+name: legal_v15s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14
+    ; CHECK-LABEL: name: legal_v15s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
+    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
+    ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
+    ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
+    ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
+    ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
+    ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
+    ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
+    ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
+    ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
+    ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<15 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<15 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = COPY $vgpr3
+    %4:_(s32) = COPY $vgpr4
+    %5:_(s32) = COPY $vgpr5
+    %6:_(s32) = COPY $vgpr6
+    %7:_(s32) = COPY $vgpr7
+    %8:_(s32) = COPY $vgpr8
+    %9:_(s32) = COPY $vgpr9
+    %10:_(s32) = COPY $vgpr10
+    %11:_(s32) = COPY $vgpr11
+    %12:_(s32) = COPY $vgpr12
+    %13:_(s32) = COPY $vgpr13
+    %14:_(s32) = COPY $vgpr14
+    %15:_(<15 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14
+    S_NOP 0, implicit %15
+...
+---
+name: legal_v16s32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15
+    ; CHECK-LABEL: name: legal_v16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
+    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
+    ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
+    ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
+    ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
+    ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
+    ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
+    ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
+    ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
+    ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
+    ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
+    ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<16 x s32>)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = COPY $vgpr3
+    %4:_(s32) = COPY $vgpr4
+    %5:_(s32) = COPY $vgpr5
+    %6:_(s32) = COPY $vgpr6
+    %7:_(s32) = COPY $vgpr7
+    %8:_(s32) = COPY $vgpr8
+    %9:_(s32) = COPY $vgpr9
+    %10:_(s32) = COPY $vgpr10
+    %11:_(s32) = COPY $vgpr11
+    %12:_(s32) = COPY $vgpr12
+    %13:_(s32) = COPY $vgpr13
+    %14:_(s32) = COPY $vgpr14
+    %15:_(s32) = COPY $vgpr15
+    %16:_(<16 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15
+    S_NOP 0, implicit %16
+...
+---
+name: legal_v2s64
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+    ; CHECK-LABEL: name: legal_v2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[COPY]](s64), [[COPY1]](s64)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<2 x s64>)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = COPY $vgpr2_vgpr3
+    %2:_(<2 x s64>) = G_BUILD_VECTOR %0, %1
+    S_NOP 0, implicit %2
+...
+---
+name: legal_v3s64
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5
+    ; CHECK-LABEL: name: legal_v3s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[COPY]](s64), [[COPY1]](s64), [[COPY2]](s64)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s64>)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = COPY $vgpr2_vgpr3
+    %2:_(s64) = COPY $vgpr4_vgpr5
+    %3:_(<3 x s64>) = G_BUILD_VECTOR %0, %1, %2
+    S_NOP 0, implicit %3
+...
+---
+name: legal_v4s64
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5, $vgpr6_vgpr7
+    ; CHECK-LABEL: name: legal_v4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $vgpr6_vgpr7
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[COPY]](s64), [[COPY1]](s64), [[COPY2]](s64), [[COPY3]](s64)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<4 x s64>)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = COPY $vgpr2_vgpr3
+    %2:_(s64) = COPY $vgpr4_vgpr5
+    %3:_(s64) = COPY $vgpr6_vgpr7
+    %4:_(<4 x s64>) = G_BUILD_VECTOR %0, %1, %2, %3
+    S_NOP 0, implicit %4
+...
+---
+name: legal_v5s64
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5, $vgpr6_vgpr7, $vgpr8_vgpr9
+    ; CHECK-LABEL: name: legal_v5s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $vgpr6_vgpr7
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $vgpr8_vgpr9
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s64>) = G_BUILD_VECTOR [[COPY]](s64), [[COPY1]](s64), [[COPY2]](s64), [[COPY3]](s64), [[COPY4]](s64)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<5 x s64>)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = COPY $vgpr2_vgpr3
+    %2:_(s64) = COPY $vgpr4_vgpr5
+    %3:_(s64) = COPY $vgpr6_vgpr7
+    %4:_(s64) = COPY $vgpr8_vgpr9
+    %5:_(<5 x s64>) = G_BUILD_VECTOR %0, %1, %2, %3, %4
+    S_NOP 0, implicit %5
+...
+---
+name: legal_v6s64
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5, $vgpr6_vgpr7, $vgpr8_vgpr9, $vgpr10_vgpr11
+    ; CHECK-LABEL: name: legal_v6s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $vgpr6_vgpr7
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $vgpr8_vgpr9
+    ; CHECK: [[COPY5:%[0-9]+]]:_(s64) = COPY $vgpr10_vgpr11
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<6 x s64>) = G_BUILD_VECTOR [[COPY]](s64), [[COPY1]](s64), [[COPY2]](s64), [[COPY3]](s64), [[COPY4]](s64), [[COPY5]](s64)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<6 x s64>)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = COPY $vgpr2_vgpr3
+    %2:_(s64) = COPY $vgpr4_vgpr5
+    %3:_(s64) = COPY $vgpr6_vgpr7
+    %4:_(s64) = COPY $vgpr8_vgpr9
+    %5:_(s64) = COPY $vgpr10_vgpr11
+    %6:_(<6 x s64>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5
+    S_NOP 0, implicit %6
+...
+---
+name: legal_v7s64
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5, $vgpr6_vgpr7, $vgpr8_vgpr9, $vgpr10_vgpr11, $vgpr12_vgpr13
+    ; CHECK-LABEL: name: legal_v7s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $vgpr6_vgpr7
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $vgpr8_vgpr9
+    ; CHECK: [[COPY5:%[0-9]+]]:_(s64) = COPY $vgpr10_vgpr11
+    ; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $vgpr12_vgpr13
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<7 x s64>) = G_BUILD_VECTOR [[COPY]](s64), [[COPY1]](s64), [[COPY2]](s64), [[COPY3]](s64), [[COPY4]](s64), [[COPY5]](s64), [[COPY6]](s64)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<7 x s64>)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = COPY $vgpr2_vgpr3
+    %2:_(s64) = COPY $vgpr4_vgpr5
+    %3:_(s64) = COPY $vgpr6_vgpr7
+    %4:_(s64) = COPY $vgpr8_vgpr9
+    %5:_(s64) = COPY $vgpr10_vgpr11
+    %6:_(s64) = COPY $vgpr12_vgpr13
+    %7:_(<7 x s64>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6
+    S_NOP 0, implicit %7
+...
+---
+name: legal_v8s64
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5, $vgpr6_vgpr7, $vgpr8_vgpr9, $vgpr10_vgpr11, $vgpr12_vgpr13, $vgpr14_vgpr15
+    ; CHECK-LABEL: name: legal_v8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $vgpr6_vgpr7
+    ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $vgpr8_vgpr9
+    ; CHECK: [[COPY5:%[0-9]+]]:_(s64) = COPY $vgpr10_vgpr11
+    ; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $vgpr12_vgpr13
+    ; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $vgpr14_vgpr15
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s64>) = G_BUILD_VECTOR [[COPY]](s64), [[COPY1]](s64), [[COPY2]](s64), [[COPY3]](s64), [[COPY4]](s64), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64)
+    ; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<8 x s64>)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = COPY $vgpr2_vgpr3
+    %2:_(s64) = COPY $vgpr4_vgpr5
+    %3:_(s64) = COPY $vgpr6_vgpr7
+    %4:_(s64) = COPY $vgpr8_vgpr9
+    %5:_(s64) = COPY $vgpr10_vgpr11
+    %6:_(s64) = COPY $vgpr12_vgpr13
+    %7:_(s64) = COPY $vgpr14_vgpr15
+    %8:_(<8 x s64>) = G_BUILD_VECTOR %0, %1, %2, %3, %4, %5, %6, %7
+    S_NOP 0, implicit %8
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-concat-vectors.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-concat-vectors.mir
new file mode 100644
index 0000000..c3e1bca
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-concat-vectors.mir
@@ -0,0 +1,129 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: concat_vectors_v2s32_v2s32
+
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+    ; CHECK-LABEL: name: concat_vectors_v2s32_v2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+    ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>)
+    ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
+    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
+    %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
+    %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1
+    $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2
+...
+
+---
+name: concat_vectors_v2s16_v2s16
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: concat_vectors_v2s16_v2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+    ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY]](<2 x s16>), [[COPY1]](<2 x s16>)
+    ; CHECK: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+    %0:_(<2 x s16>) = COPY $vgpr0
+    %1:_(<2 x s16>) = COPY $vgpr1
+    %2:_(<4 x s16>) = G_CONCAT_VECTORS %0, %1
+    $vgpr0_vgpr1 = COPY %2
+...
+
+---
+name: concat_vectors_v2s16_v2s16_v2s16_v2s16
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
+    ; CHECK-LABEL: name: concat_vectors_v2s16_v2s16_v2s16_v2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr3
+    ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[COPY]](<2 x s16>), [[COPY1]](<2 x s16>), [[COPY2]](<2 x s16>), [[COPY3]](<2 x s16>)
+    ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<8 x s16>)
+    %0:_(<2 x s16>) = COPY $vgpr0
+    %1:_(<2 x s16>) = COPY $vgpr1
+    %2:_(<2 x s16>) = COPY $vgpr2
+    %3:_(<2 x s16>) = COPY $vgpr3
+    %4:_(<8 x s16>) = G_CONCAT_VECTORS %0, %1, %2, %3
+    $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %4
+...
+
+---
+name: concat_vectors_v4s16_v4s16
+
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+    ; CHECK-LABEL: name: concat_vectors_v4s16_v4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+    ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[COPY]](<4 x s16>), [[COPY1]](<4 x s16>)
+    ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<8 x s16>)
+    %0:_(<4 x s16>) = COPY $vgpr0_vgpr1
+    %1:_(<4 x s16>) = COPY $vgpr2_vgpr3
+    %2:_(<8 x s16>) = G_CONCAT_VECTORS %0, %1
+    $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2
+...
+
+---
+name: concat_vectors_v4s32_v4s32
+
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
+    ; CHECK-LABEL: name: concat_vectors_v4s32_v4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+    ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
+    ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
+    %0:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3,
+    %1:_(<4 x s32>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+    %2:_(<8 x s32>) = G_CONCAT_VECTORS %0, %1
+    $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
+...
+
+---
+name: concat_vectors_v2s32_v2s32_v2s32_v2s32
+
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5, $vgpr6_vgpr7
+    ; CHECK-LABEL: name: concat_vectors_v2s32_v2s32_v2s32_v2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+    ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5
+    ; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr6_vgpr7
+    ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>), [[COPY2]](<2 x s32>), [[COPY3]](<2 x s32>)
+    ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
+    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
+    %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
+    %2:_(<2 x s32>) = COPY $vgpr4_vgpr5
+    %3:_(<2 x s32>) = COPY $vgpr6_vgpr7
+
+    %4:_(<8 x s32>) = G_CONCAT_VECTORS %0, %1, %2, %3
+    $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %4
+...
+---
+name: concat_vectors_v2s64_v2s64
+
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
+    ; CHECK-LABEL: name: concat_vectors_v2s64_v2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+    ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[COPY]](<2 x s64>), [[COPY1]](<2 x s64>)
+    ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
+    %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+    %1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+    %2:_(<4 x s64>) = G_CONCAT_VECTORS %0, %1
+    $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-fabs.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-fabs.mir
new file mode 100644
index 0000000..31f1e41
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-fabs.mir
@@ -0,0 +1,25 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -run-pass=legalizer -global-isel %s -o - | FileCheck  %s
+
+---
+name: test_fabs_f32
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_fabs_f32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_FABS %0
+...
+---
+name: test_fabs_f64
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_fabs_f64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = G_FABS %0
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-fma.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-fma.mir
new file mode 100644
index 0000000..954859e
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-fma.mir
@@ -0,0 +1,35 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: test_fma_f32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2
+
+    ; CHECK-LABEL: name: test_fma
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = G_FMA %0, %1, %2
+    $vgpr0 = COPY %3
+...
+---
+name: test_fma_f64
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3,  $vgpr4_vgpr5
+
+    ; CHECK-LABEL: name: test_fma
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+    ; CHECK: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[COPY]], [[COPY1]]
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = COPY $vgpr2_vgpr3
+    %2:_(s64) = COPY $vgpr4_vgpr5
+    %3:_(s64) = G_FMA %0, %1, %2
+    $vgpr0_vgpr1 = COPY %3
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-fneg.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-fneg.mir
new file mode 100644
index 0000000..7d280f7
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-fneg.mir
@@ -0,0 +1,25 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -run-pass=legalizer -global-isel %s -o - | FileCheck  %s
+
+---
+name: test_fneg_f32
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_fneg_f32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_FNEG %0
+...
+---
+name: test_fneg_f64
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_fneg_f64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = G_FNEG %0
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-fptrunc.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-fptrunc.mir
new file mode 100644
index 0000000..40eae26
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-fptrunc.mir
@@ -0,0 +1,17 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer -global-isel %s -o - | FileCheck %s
+
+---
+name: test_fptrunc_f64_to_f32
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_fptrunc_f64_to_f32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[FPTRUNC:%[0-9]+]]:_(s32) = G_FPTRUNC [[COPY]](s64)
+    ; CHECK: $vgpr0 = COPY [[FPTRUNC]](s32)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s32) = G_FPTRUNC %0
+    $vgpr0 = COPY %1
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-fsub.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-fsub.mir
new file mode 100644
index 0000000..13012f4
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-fsub.mir
@@ -0,0 +1,36 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: test_fsub_f32
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+
+    ; CHECK-LABEL: name: test_fsub_f32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[COPY1]]
+    ; CHECK: $vgpr0 = COPY [[FSUB]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = G_FSUB %0, %1
+    $vgpr0 = COPY %2
+...
+---
+name: test_fsub_f64
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+
+    ; CHECK-LABEL: name: test_fsub_f64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+    ; CHECK: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY1]]
+    ; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[COPY]], [[FNEG]]
+    ; CHECK: $vgpr0_vgpr1 = COPY [[FADD]](s64)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = COPY $vgpr2_vgpr3
+    %2:_(s64) = G_FSUB %0, %1
+    $vgpr0_vgpr1 = COPY %2
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-round.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-round.mir
new file mode 100644
index 0000000..3deebe6
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-round.mir
@@ -0,0 +1,25 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: test_intrinsic_round_f32
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_intrinsic_round_f32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_INTRINSIC_ROUND %0
+...
+---
+name: test_intrinsic_round_f64
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_intrinsic_round_f64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = G_INTRINSIC_ROUND %0
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-trunc.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-trunc.mir
new file mode 100644
index 0000000..d53f112
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-trunc.mir
@@ -0,0 +1,25 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: test_intrinsic_trunc_f32
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_intrinsic_trunc_f32
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_INTRINSIC_TRUNC %0
+...
+---
+name: test_intrinsic_trunc_f64
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: test_intrinsic_trunc_f64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = G_INTRINSIC_TRUNC %0
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir
index 6e988da..380cd92 100644
--- a/test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir
@@ -2,17 +2,72 @@
 # RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
 
 ---
-name:            test_or
+name: test_or_i32
 body: |
   bb.0:
     liveins: $vgpr0, $vgpr1
 
-    ; CHECK-LABEL: name: test_or
+    ; CHECK-LABEL: name: test_or_i32
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK: $vgpr0 = COPY [[OR]](s32)
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $vgpr1
     %2:_(s32) = G_OR %0, %1
     $vgpr0 = COPY %2
 ...
+
+---
+name: test_or_i1
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+
+    ; CHECK-LABEL: name: test_or_i1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK: S_NOP 0, implicit [[OR]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = G_CONSTANT i64 0
+    %3:_(s1) = G_ICMP intpred(ne), %0, %2
+    %4:_(s1) = G_ICMP intpred(ne), %1, %2
+    %5:_(s32) = G_OR %0, %1
+    S_NOP 0, implicit %5
+...
+
+---
+name: test_or_i64
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+
+    ; CHECK-LABEL: name: test_or_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+    ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK: $vgpr0_vgpr1 = COPY [[OR]](s64)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = COPY $vgpr2_vgpr3
+    %2:_(s64) = G_OR %0, %1
+    $vgpr0_vgpr1 = COPY %2
+...
+
+---
+name: test_or_v2i32
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+
+    ; CHECK-LABEL: name: test_or_v2i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+    ; CHECK: [[OR:%[0-9]+]]:_(<2 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK: $vgpr0_vgpr1 = COPY [[OR]](<2 x s32>)
+    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
+    %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
+    %2:_(<2 x s32>) = G_OR %0, %1
+    $vgpr0_vgpr1 = COPY %2
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-xor.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-xor.mir
index e5f4400..c9fa4d7 100644
--- a/test/CodeGen/AMDGPU/GlobalISel/legalize-xor.mir
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-xor.mir
@@ -2,17 +2,72 @@
 # RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
 
 ---
-name: test_xor
+name: test_xor_i32
 body: |
   bb.0:
     liveins: $vgpr0, $vgpr1
 
-    ; CHECK-LABEL: name: test_xor
+    ; CHECK-LABEL: name: test_xor_i32
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK: $vgpr0 = COPY [[XOR]](s32)
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $vgpr1
     %2:_(s32) = G_XOR %0, %1
     $vgpr0 = COPY %2
 ...
+
+---
+name: test_xor_i1
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+
+    ; CHECK-LABEL: name: test_xor_i1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+    ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK: S_NOP 0, implicit [[XOR]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = G_CONSTANT i64 0
+    %3:_(s1) = G_ICMP intpred(ne), %0, %2
+    %4:_(s1) = G_ICMP intpred(ne), %1, %2
+    %5:_(s32) = G_XOR %0, %1
+    S_NOP 0, implicit %5
+...
+
+---
+name: test_xor_i64
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+
+    ; CHECK-LABEL: name: test_xor_i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+    ; CHECK: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK: $vgpr0_vgpr1 = COPY [[XOR]](s64)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = COPY $vgpr2_vgpr3
+    %2:_(s64) = G_XOR %0, %1
+    $vgpr0_vgpr1 = COPY %2
+...
+
+---
+name: test_xor_v2i32
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+
+    ; CHECK-LABEL: name: test_xor_v2i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+    ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK: $vgpr0_vgpr1 = COPY [[XOR]](<2 x s32>)
+    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
+    %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
+    %2:_(<2 x s32>) = G_XOR %0, %1
+    $vgpr0_vgpr1 = COPY %2
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn-wqm-vote.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn-wqm-vote.mir
new file mode 100644
index 0000000..a3a5994
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn-wqm-vote.mir
@@ -0,0 +1,57 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: wqm_vote_scc
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: wqm_vote_scc
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s1) = COPY [[ICMP]](s1)
+    ; CHECK: [[INT:%[0-9]+]]:sgpr(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.wqm.vote), [[COPY2]](s1)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s1) = G_ICMP intpred(ne), %0, %1
+    %3:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.wqm.vote), %2
+...
+
+---
+name: wqm_vote_vcc
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: wqm_vote_vcc
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s1) = COPY [[ICMP]](s1)
+    ; CHECK: [[INT:%[0-9]+]]:sgpr(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.wqm.vote), [[COPY2]](s1)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s1) = G_ICMP intpred(ne), %0, %1
+    %3:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.wqm.vote), %2
+...
+
+---
+name: wqm_vote_sgpr
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: wqm_vote_sgpr
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[INT:%[0-9]+]]:sgpr(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.wqm.vote), [[TRUNC]](s1)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s1) = G_TRUNC %0
+    %2:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.wqm.vote), %1
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-and.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-and.mir
index e1864d8..5e51c0a 100644
--- a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-and.mir
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-and.mir
@@ -3,13 +3,13 @@
 # RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
 
 ---
-name: and_ss
+name: and_i32_ss
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $sgpr1
-    ; CHECK-LABEL: name: and_ss
+    ; CHECK-LABEL: name: and_i32_ss
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
     ; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY]], [[COPY1]]
@@ -19,13 +19,13 @@
 ...
 
 ---
-name: and_sv
+name: and_i32_sv
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $vgpr0
-    ; CHECK-LABEL: name: and_sv
+    ; CHECK-LABEL: name: and_i32_sv
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[COPY]], [[COPY1]]
@@ -35,13 +35,13 @@
 ...
 
 ---
-name: and_vs
+name: and_i32_vs
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $vgpr0
-    ; CHECK-LABEL: name: and_vs
+    ; CHECK-LABEL: name: and_i32_vs
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
@@ -52,13 +52,13 @@
 ...
 
 ---
-name: and_vv
+name: and_i32_vv
 legalized: true
 
 body: |
   bb.0:
     liveins: $vgpr0, $vgpr1
-    ; CHECK-LABEL: name: and_vv
+    ; CHECK-LABEL: name: and_i32_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
     ; CHECK: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[COPY]], [[COPY1]]
@@ -66,3 +66,147 @@
     %1:_(s32) = COPY $vgpr1
     %2:_(s32) = G_AND %0, %1
 ...
+
+---
+name: and_i1_scc_scc
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: and_i1_scc_scc
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; CHECK: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[C]]
+    ; CHECK: [[ICMP1:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[C]]
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s1) = COPY [[ICMP]](s1)
+    ; CHECK: [[COPY3:%[0-9]+]]:sgpr(s1) = COPY [[ICMP1]](s1)
+    ; CHECK: [[AND:%[0-9]+]]:sgpr(s1) = G_AND [[COPY2]], [[COPY3]]
+    ; CHECK: S_NOP 0, implicit [[AND]](s1)
+      %0:_(s32) = COPY $sgpr0
+      %1:_(s32) = COPY $sgpr1
+      %2:_(s32) = G_CONSTANT i64 0
+      %4:_(s1) = G_ICMP intpred(ne), %0, %2
+      %5:_(s1) = G_ICMP intpred(ne), %1, %2
+      %6:_(s1) = G_AND %4, %5
+      S_NOP 0, implicit %6
+...
+
+---
+name: and_i1_vcc_vcc
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: and_i1_vcc_vcc
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[C]]
+    ; CHECK: [[ICMP1:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[C]]
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s1) = COPY [[ICMP]](s1)
+    ; CHECK: [[COPY3:%[0-9]+]]:sgpr(s1) = COPY [[ICMP1]](s1)
+    ; CHECK: [[AND:%[0-9]+]]:sgpr(s1) = G_AND [[COPY2]], [[COPY3]]
+    ; CHECK: S_NOP 0, implicit [[AND]](s1)
+      %0:_(s32) = COPY $vgpr0
+      %1:_(s32) = COPY $vgpr1
+      %2:_(s32) = G_CONSTANT i64 0
+      %4:_(s1) = G_ICMP intpred(ne), %0, %2
+      %5:_(s1) = G_ICMP intpred(ne), %1, %2
+      %6:_(s1) = G_AND %4, %5
+      S_NOP 0, implicit %6
+...
+
+---
+name: and_i1_scc_vcc
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: and_i1_scc_vcc
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; CHECK: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[C]]
+    ; CHECK: [[ICMP1:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[C]]
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s1) = COPY [[ICMP]](s1)
+    ; CHECK: [[COPY3:%[0-9]+]]:sgpr(s1) = COPY [[ICMP1]](s1)
+    ; CHECK: [[AND:%[0-9]+]]:sgpr(s1) = G_AND [[COPY2]], [[COPY3]]
+    ; CHECK: S_NOP 0, implicit [[AND]](s1)
+      %0:_(s32) = COPY $sgpr0
+      %1:_(s32) = COPY $vgpr0
+      %2:_(s32) = G_CONSTANT i64 0
+      %4:_(s1) = G_ICMP intpred(ne), %0, %2
+      %5:_(s1) = G_ICMP intpred(ne), %1, %2
+      %6:_(s1) = G_AND %4, %5
+      S_NOP 0, implicit %6
+...
+
+---
+name: and_i1_sgpr_trunc_sgpr_trunc
+legalized: true
+body:             |
+  bb.0.entry:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: and_i1_sgpr_trunc_sgpr_trunc
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[TRUNC1:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY1]](s32)
+    ; CHECK: [[AND:%[0-9]+]]:sgpr(s1) = G_AND [[TRUNC]], [[TRUNC1]]
+    ; CHECK: S_NOP 0, implicit [[AND]](s1)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s1) = G_TRUNC %0
+    %3:_(s1) = G_TRUNC %1
+    %4:_(s1) = G_AND %2, %3
+    S_NOP 0, implicit %4
+
+...
+
+---
+name: and_i1_trunc_scc
+legalized: true
+body:             |
+  bb.0.entry:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: and_i1_trunc_scc
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s1) = COPY [[ICMP]](s1)
+    ; CHECK: [[AND:%[0-9]+]]:sgpr(s1) = G_AND [[TRUNC]], [[COPY2]]
+    ; CHECK: S_NOP 0, implicit [[AND]](s1)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s1) = G_TRUNC %0
+    %3:_(s1) = G_ICMP intpred(ne), %0, %1
+    %4:_(s1) = G_AND %2, %3
+    S_NOP 0, implicit %4
+...
+
+---
+name: and_i1_s_trunc_vcc
+legalized: true
+body:             |
+  bb.0.entry:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: and_i1_s_trunc_vcc
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s1) = COPY [[ICMP]](s1)
+    ; CHECK: [[AND:%[0-9]+]]:sgpr(s1) = G_AND [[TRUNC]], [[COPY2]]
+    ; CHECK: S_NOP 0, implicit [[AND]](s1)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $vgpr0
+    %2:_(s1) = G_TRUNC %0
+    %3:_(s1) = G_ICMP intpred(ne), %0, %1
+    %4:_(s1) = G_AND %2, %3
+    S_NOP 0, implicit %4
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomic-cmpxchg.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomic-cmpxchg.mir
new file mode 100644
index 0000000..4681284
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomic-cmpxchg.mir
@@ -0,0 +1,66 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: atomic_cmpxchg_global_i32_sss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2, $sgpr3
+    ; CHECK-LABEL: name: atomic_cmpxchg_global_i32_sss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr3
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
+    ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32)
+    ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:vgpr(s32) = G_ATOMIC_CMPXCHG [[COPY3]](p1), [[COPY4]], [[COPY5]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = COPY $sgpr3
+    %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomic_cmpxchg_flat_i32_sss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2, $sgpr3
+    ; CHECK-LABEL: name: atomic_cmpxchg_flat_i32_sss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p0) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr3
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(p0) = COPY [[COPY]](p0)
+    ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32)
+    ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:vgpr(s32) = G_ATOMIC_CMPXCHG [[COPY3]](p0), [[COPY4]], [[COPY5]] :: (load store seq_cst 4)
+    %0:_(p0) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = COPY $sgpr3
+    %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store seq_cst 4, addrspace 0)
+...
+
+---
+name: atomic_cmpxchg_local_i32_sss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomic_cmpxchg_local_i32_sss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(p3) = COPY [[COPY]](p3)
+    ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32)
+    ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:vgpr(s32) = G_ATOMIC_CMPXCHG [[COPY3]](p3), [[COPY4]], [[COPY5]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $sgpr2
+    %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-add.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-add.mir
new file mode 100644
index 0000000..fdcb8ff
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-add.mir
@@ -0,0 +1,57 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: atomicrmw_add_global_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_add_global_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p1), [[COPY3]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_add_flat_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_add_flat_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p0) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p0) = COPY [[COPY]](p0)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p0), [[COPY3]] :: (load store seq_cst 4)
+    %0:_(p0) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 0)
+...
+
+---
+name: atomicrmw_add_local_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_add_local_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p3) = COPY [[COPY]](p3)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p3), [[COPY3]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-and.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-and.mir
new file mode 100644
index 0000000..c075f7b
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-and.mir
@@ -0,0 +1,57 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: atomicrmw_and_global_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_and_global_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p1), [[COPY3]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_and_flat_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_and_flat_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p0) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p0) = COPY [[COPY]](p0)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p0), [[COPY3]] :: (load store seq_cst 4)
+    %0:_(p0) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 0)
+...
+
+---
+name: atomicrmw_and_local_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_and_local_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p3) = COPY [[COPY]](p3)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p3), [[COPY3]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-max.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-max.mir
new file mode 100644
index 0000000..67e932d
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-max.mir
@@ -0,0 +1,57 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: atomicrmw_max_global_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_max_global_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p1), [[COPY3]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_max_flat_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_max_flat_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p0) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p0) = COPY [[COPY]](p0)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p0), [[COPY3]] :: (load store seq_cst 4)
+    %0:_(p0) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 0)
+...
+
+---
+name: atomicrmw_max_local_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_max_local_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p3) = COPY [[COPY]](p3)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p3), [[COPY3]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-min.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-min.mir
new file mode 100644
index 0000000..749f773
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-min.mir
@@ -0,0 +1,57 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: atomicrmw_min_global_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_min_global_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p1), [[COPY3]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_min_flat_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_min_flat_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p0) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p0) = COPY [[COPY]](p0)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p0), [[COPY3]] :: (load store seq_cst 4)
+    %0:_(p0) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 0)
+...
+
+---
+name: atomicrmw_min_local_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_min_local_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p3) = COPY [[COPY]](p3)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p3), [[COPY3]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-or.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-or.mir
new file mode 100644
index 0000000..5fbd24e
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-or.mir
@@ -0,0 +1,57 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: atomicrmw_or_global_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_or_global_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p1), [[COPY3]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_or_flat_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_or_flat_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p0) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p0) = COPY [[COPY]](p0)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p0), [[COPY3]] :: (load store seq_cst 4)
+    %0:_(p0) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 0)
+...
+
+---
+name: atomicrmw_or_local_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_or_local_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p3) = COPY [[COPY]](p3)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p3), [[COPY3]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-sub.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-sub.mir
new file mode 100644
index 0000000..0fa3243c
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-sub.mir
@@ -0,0 +1,57 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: atomicrmw_sub_global_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_sub_global_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p1), [[COPY3]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_sub_flat_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_sub_flat_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p0) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p0) = COPY [[COPY]](p0)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p0), [[COPY3]] :: (load store seq_cst 4)
+    %0:_(p0) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 0)
+...
+
+---
+name: atomicrmw_sub_local_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_sub_local_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p3) = COPY [[COPY]](p3)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p3), [[COPY3]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-umax.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-umax.mir
new file mode 100644
index 0000000..866c1e0
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-umax.mir
@@ -0,0 +1,57 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: atomicrmw_umax_global_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_umax_global_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p1), [[COPY3]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_umax_flat_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_umax_flat_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p0) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p0) = COPY [[COPY]](p0)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p0), [[COPY3]] :: (load store seq_cst 4)
+    %0:_(p0) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 0)
+...
+
+---
+name: atomicrmw_umax_local_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_umax_local_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p3) = COPY [[COPY]](p3)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p3), [[COPY3]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-umin.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-umin.mir
new file mode 100644
index 0000000..0de2e89
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-umin.mir
@@ -0,0 +1,57 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: atomicrmw_umin_global_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_umin_global_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p1), [[COPY3]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_umin_flat_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_umin_flat_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p0) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p0) = COPY [[COPY]](p0)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p0), [[COPY3]] :: (load store seq_cst 4)
+    %0:_(p0) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 0)
+...
+
+---
+name: atomicrmw_umin_local_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_umin_local_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p3) = COPY [[COPY]](p3)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p3), [[COPY3]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-xchg.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-xchg.mir
new file mode 100644
index 0000000..d7d16af
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-xchg.mir
@@ -0,0 +1,57 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: atomicrmw_xchg_global_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_xchg_global_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p1), [[COPY3]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_xchg_flat_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_xchg_flat_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p0) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p0) = COPY [[COPY]](p0)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p0), [[COPY3]] :: (load store seq_cst 4)
+    %0:_(p0) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 0)
+...
+
+---
+name: atomicrmw_xchg_local_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_xchg_local_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p3) = COPY [[COPY]](p3)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p3), [[COPY3]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-xor.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-xor.mir
new file mode 100644
index 0000000..c0b7206
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-atomicrmw-xor.mir
@@ -0,0 +1,57 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: atomicrmw_xor_global_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_xor_global_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p1), [[COPY3]] :: (load store seq_cst 4, addrspace 1)
+    %0:_(p1) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 1)
+...
+
+---
+name: atomicrmw_xor_flat_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1, $sgpr2
+    ; CHECK-LABEL: name: atomicrmw_xor_flat_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p0) = COPY $sgpr0_sgpr1
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p0) = COPY [[COPY]](p0)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p0), [[COPY3]] :: (load store seq_cst 4)
+    %0:_(p0) = COPY $sgpr0_sgpr1
+    %1:_(s32) = COPY $sgpr2
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 0)
+...
+
+---
+name: atomicrmw_xor_local_i32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: atomicrmw_xor_local_i32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p3) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(p3) = COPY [[COPY]](p3)
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:vgpr(s32) = G_ATOMICRMW_ADD [[COPY2]](p3), [[COPY3]] :: (load store seq_cst 4, addrspace 3)
+    %0:_(p3) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4, addrspace 3)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-brcond.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-brcond.mir
new file mode 100644
index 0000000..8b2ef91
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-brcond.mir
@@ -0,0 +1,180 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: brcond_vcc_cond
+legalized: true
+body:             |
+  ; CHECK-LABEL: name: brcond_vcc_cond
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+  ; CHECK:   [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
+  ; CHECK:   G_BRCOND [[ICMP]](s1), %bb.1
+  ; CHECK: bb.1:
+  bb.0.entry:
+    successors: %bb.1
+    liveins: $vgpr0, $vgpr1
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s1) = G_ICMP intpred(ne), %0, %1
+    G_BRCOND %2, %bb.1
+
+  bb.1:
+...
+
+---
+name: brcond_scc_cond
+legalized: true
+body:             |
+  ; CHECK-LABEL: name: brcond_scc_cond
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+  ; CHECK:   [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
+  ; CHECK:   G_BRCOND [[ICMP]](s1), %bb.1
+  ; CHECK: bb.1:
+  bb.0.entry:
+    successors: %bb.1
+    liveins: $sgpr0, $sgpr1
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s1) = G_ICMP intpred(ne), %0, %1
+    G_BRCOND %2, %bb.1
+
+  bb.1:
+...
+
+---
+name: brcond_sgpr_cond
+legalized: true
+body:             |
+  ; CHECK-LABEL: name: brcond_sgpr_cond
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+  ; CHECK:   [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY]](s32)
+  ; CHECK:   [[COPY1:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+  ; CHECK:   G_BRCOND [[COPY1]](s1), %bb.1
+  ; CHECK: bb.1:
+  bb.0.entry:
+    successors: %bb.1
+    liveins: $sgpr0
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s1) = G_TRUNC %0
+    G_BRCOND %1, %bb.1
+
+  bb.1:
+...
+
+---
+name: brcond_vgpr_cond
+legalized: true
+body:             |
+  ; CHECK-LABEL: name: brcond_vgpr_cond
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+  ; CHECK:   [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+  ; CHECK:   [[COPY1:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+  ; CHECK:   G_BRCOND [[COPY1]](s1), %bb.1
+  ; CHECK: bb.1:
+  bb.0.entry:
+    successors: %bb.1
+    liveins: $vgpr0
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s1) = G_TRUNC %0
+    G_BRCOND %1, %bb.1
+
+  bb.1:
+...
+
+
+# The terminator that needs handling is the only instruction in the
+# block.
+
+---
+name: empty_block_vgpr_brcond
+legalized: true
+body:             |
+  ; CHECK-LABEL: name: empty_block_vgpr_brcond
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+  ; CHECK:   [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+  ; CHECK: bb.1:
+  ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK:   [[COPY1:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+  ; CHECK:   G_BRCOND [[COPY1]](s1), %bb.1
+  ; CHECK: bb.2:
+  bb.0.entry:
+    successors: %bb.1
+    liveins: $vgpr0
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s1) = G_TRUNC %0
+
+  bb.1:
+    G_BRCOND %1, %bb.1
+
+  bb.2:
+...
+
+
+# Make sure the first instruction in the block isn't skipped.
+---
+name: copy_first_inst_brcond
+legalized: true
+body:             |
+  ; CHECK-LABEL: name: copy_first_inst_brcond
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+  ; CHECK: bb.1:
+  ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK:   [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+  ; CHECK:   [[COPY1:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+  ; CHECK:   G_BRCOND [[COPY1]](s1), %bb.1
+  ; CHECK: bb.2:
+  bb.0.entry:
+    successors: %bb.1
+    liveins: $vgpr0
+    %0:_(s32) = COPY $vgpr0
+
+  bb.1:
+    %1:_(s1) = G_TRUNC %0
+    G_BRCOND %1, %bb.1
+
+  bb.2:
+...
+
+# Extra instruction separates brcond from the condition def
+---
+name: copy_middle_inst_brcond
+legalized: true
+body:             |
+  ; CHECK-LABEL: name: copy_middle_inst_brcond
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+  ; CHECK: bb.1:
+  ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK:   [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+  ; CHECK:   S_NOP 0
+  ; CHECK:   [[COPY1:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+  ; CHECK:   G_BRCOND [[COPY1]](s1), %bb.1
+  ; CHECK: bb.2:
+  bb.0.entry:
+    successors: %bb.1
+    liveins: $vgpr0
+    %0:_(s32) = COPY $vgpr0
+
+  bb.1:
+    %1:_(s1) = G_TRUNC %0
+    S_NOP 0
+    G_BRCOND %1, %bb.1
+
+  bb.2:
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fabs.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fabs.mir
new file mode 100644
index 0000000..c48d101
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fabs.mir
@@ -0,0 +1,35 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: fabs_s
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1
+    ; CHECK-LABEL: name: fabs_s
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[FABS:%[0-9]+]]:sgpr(s32) = G_FABS [[COPY]]
+    ; CHECK: $vgpr0 = COPY [[FABS]](s32)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = G_FABS %0
+    $vgpr0 = COPY %1
+...
+
+---
+name: fabs_v
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+    ; CHECK-LABEL: name: fabs_v
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[FABS:%[0-9]+]]:vgpr(s32) = G_FABS [[COPY]]
+    ; CHECK: $vgpr0 = COPY [[FABS]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_FABS %0
+    $vgpr0 = COPY %1
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fcmp.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fcmp.mir
index b866ab4..d07c9f3 100644
--- a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fcmp.mir
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fcmp.mir
@@ -13,7 +13,7 @@
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: [[FCMP:%[0-9]+]]:sgpr(s1) = G_FCMP floatpred(uge), [[COPY]](s32), [[COPY2]]
+    ; CHECK: [[FCMP:%[0-9]+]]:vcc(s1) = G_FCMP floatpred(uge), [[COPY]](s32), [[COPY2]]
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $sgpr1
     %2:_(s1) = G_FCMP floatpred(uge), %0(s32), %1
@@ -29,7 +29,7 @@
     ; CHECK-LABEL: name: fcmp_sv
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK: [[FCMP:%[0-9]+]]:sgpr(s1) = G_FCMP floatpred(uge), [[COPY]](s32), [[COPY1]]
+    ; CHECK: [[FCMP:%[0-9]+]]:vcc(s1) = G_FCMP floatpred(uge), [[COPY]](s32), [[COPY1]]
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $vgpr0
     %2:_(s1) = G_FCMP floatpred(uge), %0, %1
@@ -46,7 +46,7 @@
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
-    ; CHECK: [[FCMP:%[0-9]+]]:sgpr(s1) = G_FCMP floatpred(uge), [[COPY1]](s32), [[COPY2]]
+    ; CHECK: [[FCMP:%[0-9]+]]:vcc(s1) = G_FCMP floatpred(uge), [[COPY1]](s32), [[COPY2]]
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $vgpr0
     %2:_(s1) = G_FCMP floatpred(uge), %1, %0
@@ -62,7 +62,7 @@
     ; CHECK-LABEL: name: fcmp_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
-    ; CHECK: [[ICMP:%[0-9]+]]:sgpr(s1) = G_ICMP floatpred(uge), [[COPY]](s32), [[COPY1]]
+    ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP floatpred(uge), [[COPY]](s32), [[COPY1]]
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $vgpr1
     %2:_(s1) = G_ICMP floatpred(uge), %0, %1
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fexp2.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fexp2.mir
new file mode 100644
index 0000000..1b56ca3
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fexp2.mir
@@ -0,0 +1,31 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: fexp2_s
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0
+    ; CHECK-LABEL: name: fexp2_s
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[FEXP2_:%[0-9]+]]:vgpr(s32) = G_FEXP2 [[COPY]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = G_FEXP2 %0
+...
+
+---
+name: fexp2_v
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0
+    ; CHECK-LABEL: name: fexp2_v
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[FEXP2_:%[0-9]+]]:vgpr(s32) = G_FEXP2 [[COPY]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_FEXP2 %0
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-flog2.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-flog2.mir
new file mode 100644
index 0000000..2915137
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-flog2.mir
@@ -0,0 +1,31 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: flog2_s
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0
+    ; CHECK-LABEL: name: flog2_s
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[FLOG2_:%[0-9]+]]:vgpr(s32) = G_FLOG2 [[COPY]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = G_FLOG2 %0
+...
+
+---
+name: flog2_v
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0
+    ; CHECK-LABEL: name: flog2_v
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[FLOG2_:%[0-9]+]]:vgpr(s32) = G_FLOG2 [[COPY]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_FLOG2 %0
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fma.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fma.mir
new file mode 100644
index 0000000..3f0dc22
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fma.mir
@@ -0,0 +1,148 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: fma_sss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $sgpr2
+    ; CHECK-LABEL: name: fma_sss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32)
+    ; CHECK: [[FMA:%[0-9]+]]:vgpr(s32) = G_FMA [[COPY]], [[COPY3]], [[COPY4]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $sgpr2
+    %3:_(s32) = G_FMA %0, %1, %2
+...
+---
+name: fma_vss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: fma_vss
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32)
+    ; CHECK: [[FMA:%[0-9]+]]:vgpr(s32) = G_FMA [[COPY]], [[COPY3]], [[COPY4]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $sgpr0
+    %2:_(s32) = COPY $sgpr1
+    %3:_(s32) = G_FMA %0, %1, %2
+...
+---
+name: fma_svs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0, $sgpr1
+    ; CHECK-LABEL: name: fma_svs
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32)
+    ; CHECK: [[FMA:%[0-9]+]]:vgpr(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY3]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $vgpr0
+    %2:_(s32) = COPY $sgpr1
+    %3:_(s32) = G_FMA %0, %1, %2
+...
+---
+name: fma_ssv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $vgpr0
+    ; CHECK-LABEL: name: fma_ssv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[FMA:%[0-9]+]]:vgpr(s32) = G_FMA [[COPY]], [[COPY3]], [[COPY2]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $vgpr0
+    %3:_(s32) = G_FMA %0, %1, %2
+...
+---
+name: fma_vvs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $sgpr0
+    ; CHECK-LABEL: name: fma_vvs
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32)
+    ; CHECK: [[FMA:%[0-9]+]]:vgpr(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY3]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $sgpr0
+    %3:_(s32) = G_FMA %0, %1, %2
+...
+---
+name: fma_vsv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $sgpr0, $vgpr1
+    ; CHECK-LABEL: name: fma_vsv
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[FMA:%[0-9]+]]:vgpr(s32) = G_FMA [[COPY]], [[COPY3]], [[COPY2]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $vgpr1
+    %3:_(s32) = G_FMA %0, %1, %2
+...
+---
+name: fma_svv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: fma_svv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[FMA:%[0-9]+]]:vgpr(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $vgpr0
+    %2:_(s32) = COPY $vgpr1
+    %3:_(s32) = G_FMA %0, %1, %2
+...
+---
+name: fma_vvv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2
+    ; CHECK-LABEL: name: fma_vvv
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2
+    ; CHECK: [[FMA:%[0-9]+]]:vgpr(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $vgpr2
+    %3:_(s32) = G_FMA %0, %1, %2
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fneg.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fneg.mir
new file mode 100644
index 0000000..3438275
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fneg.mir
@@ -0,0 +1,35 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: fneg_s
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1
+    ; CHECK-LABEL: name: fneg_s
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[FNEG:%[0-9]+]]:sgpr(s32) = G_FNEG [[COPY]]
+    ; CHECK: $vgpr0 = COPY [[FNEG]](s32)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = G_FNEG %0
+    $vgpr0 = COPY %1
+...
+
+---
+name: fneg_v
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+    ; CHECK-LABEL: name: fneg_v
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[FNEG:%[0-9]+]]:vgpr(s32) = G_FNEG [[COPY]]
+    ; CHECK: $vgpr0 = COPY [[FNEG]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_FNEG %0
+    $vgpr0 = COPY %1
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fptrunc.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fptrunc.mir
new file mode 100644
index 0000000..02cb49a
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fptrunc.mir
@@ -0,0 +1,31 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: fptrunc_s
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1
+    ; CHECK-LABEL: name: fptrunc_s
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
+    ; CHECK: [[FPTRUNC:%[0-9]+]]:vgpr(s32) = G_FPTRUNC [[COPY]](s64)
+    %0:_(s64) = COPY $sgpr0_sgpr1
+    %1:_(s32) = G_FPTRUNC %0
+...
+
+---
+name: fptrunc_v
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+    ; CHECK-LABEL: name: fptrunc_v
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[FPTRUNC:%[0-9]+]]:vgpr(s32) = G_FPTRUNC [[COPY]](s64)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s32) = G_FPTRUNC %0
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-frame-index.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-frame-index.mir
new file mode 100644
index 0000000..77a444d6
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-frame-index.mir
@@ -0,0 +1,23 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+--- |
+  target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+  define void @test_frame_index_p5() {
+      %ptr0 = alloca i32, addrspace(5)
+     ret void
+    }
+...
+---
+name: test_frame_index_p5
+legalized:       true
+stack:
+  - { id: 0, name: ptr0, offset: 0, size: 4, alignment: 4 }
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: test_frame_index_p5
+    ; CHECK: [[FRAME_INDEX:%[0-9]+]]:sgpr(p5) = G_FRAME_INDEX %stack.0.ptr0
+    %0:_(p5) = G_FRAME_INDEX %stack.0.ptr0
+
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fsub.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fsub.mir
new file mode 100644
index 0000000..a924e5e
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-fsub.mir
@@ -0,0 +1,69 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: fsub_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: fsub_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[FSUB:%[0-9]+]]:vgpr(s32) = G_FSUB [[COPY]], [[COPY2]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_FSUB %0, %1
+...
+
+---
+name: fsub_sv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: fsub_sv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[FSUB:%[0-9]+]]:vgpr(s32) = G_FSUB [[COPY]], [[COPY1]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $vgpr0
+    %2:_(s32) = G_FSUB %0, %1
+...
+
+---
+name: fsub_vs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: fsub_vs
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[FSUB:%[0-9]+]]:vgpr(s32) = G_FSUB [[COPY]], [[COPY2]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $sgpr0
+    %2:_(s32) = G_FSUB %0, %1
+...
+
+---
+name: fsub_vv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: fsub_vv
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[FSUB:%[0-9]+]]:vgpr(s32) = G_FSUB [[COPY]], [[COPY1]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = G_FSUB %0, %1
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-icmp.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-icmp.mir
index dba76e8..15aa98b 100644
--- a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-icmp.mir
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-icmp.mir
@@ -28,7 +28,7 @@
     ; CHECK-LABEL: name: icmp_sv
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK: [[ICMP:%[0-9]+]]:sgpr(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
+    ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $vgpr0
     %2:_(s1) = G_ICMP intpred(ne), %0, %1
@@ -44,7 +44,7 @@
     ; CHECK-LABEL: name: icmp_vs
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK: [[ICMP:%[0-9]+]]:sgpr(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[COPY]]
+    ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[COPY]]
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $vgpr0
     %2:_(s1) = G_ICMP intpred(ne), %1, %0
@@ -60,7 +60,7 @@
     ; CHECK-LABEL: name: icmp_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
-    ; CHECK: [[ICMP:%[0-9]+]]:sgpr(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
+    ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $vgpr1
     %2:_(s1) = G_ICMP intpred(ne), %0, %1
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-intrinsic-round.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-intrinsic-round.mir
new file mode 100644
index 0000000..456baf4
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-intrinsic-round.mir
@@ -0,0 +1,31 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: intrinsic_round_s
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0
+    ; CHECK-LABEL: name: intrinsic_round_s
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[INTRINSIC_ROUND:%[0-9]+]]:vgpr(s32) = G_INTRINSIC_ROUND [[COPY]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = G_INTRINSIC_ROUND %0
+...
+
+---
+name: intrinsic_round_v
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0
+    ; CHECK-LABEL: name: intrinsic_round_v
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[INTRINSIC_ROUND:%[0-9]+]]:vgpr(s32) = G_INTRINSIC_ROUND [[COPY]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_INTRINSIC_ROUND %0
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-intrinsic-trunc.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-intrinsic-trunc.mir
new file mode 100644
index 0000000..fef4d49
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-intrinsic-trunc.mir
@@ -0,0 +1,31 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: intrinsic_trunc_s
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0
+    ; CHECK-LABEL: name: intrinsic_trunc_s
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[INTRINSIC_TRUNC:%[0-9]+]]:vgpr(s32) = G_INTRINSIC_TRUNC [[COPY]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = G_INTRINSIC_TRUNC %0
+...
+
+---
+name: intrinsic_trunc_v
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0
+    ; CHECK-LABEL: name: intrinsic_trunc_v
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[INTRINSIC_TRUNC:%[0-9]+]]:vgpr(s32) = G_INTRINSIC_TRUNC [[COPY]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_INTRINSIC_TRUNC %0
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-or.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-or.mir
index 7381a5f..83d244b 100644
--- a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-or.mir
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-or.mir
@@ -3,13 +3,13 @@
 # RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
 
 ---
-name: or_ss
+name: or_i32_ss
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $sgpr1
-    ; CHECK-LABEL: name: or_ss
+    ; CHECK-LABEL: name: or_i32_ss
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
     ; CHECK: [[OR:%[0-9]+]]:sgpr(s32) = G_OR [[COPY]], [[COPY1]]
@@ -19,13 +19,13 @@
 ...
 
 ---
-name: or_sv
+name: or_i32_sv
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $vgpr0
-    ; CHECK-LABEL: name: or_sv
+    ; CHECK-LABEL: name: or_i32_sv
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[OR:%[0-9]+]]:vgpr(s32) = G_OR [[COPY]], [[COPY1]]
@@ -35,13 +35,13 @@
 ...
 
 ---
-name: or_vs
+name: or_i32_vs
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $vgpr0
-    ; CHECK-LABEL: name: or_vs
+    ; CHECK-LABEL: name: or_i32_vs
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
@@ -52,13 +52,13 @@
 ...
 
 ---
-name: or_vv
+name: or_i32_vv
 legalized: true
 
 body: |
   bb.0:
     liveins: $vgpr0, $vgpr1
-    ; CHECK-LABEL: name: or_vv
+    ; CHECK-LABEL: name: or_i32_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
     ; CHECK: [[OR:%[0-9]+]]:vgpr(s32) = G_OR [[COPY]], [[COPY1]]
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sadde.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sadde.mir
new file mode 100644
index 0000000..72fed92
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sadde.mir
@@ -0,0 +1,154 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck -check-prefix=FAST %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck -check-prefix=GREEDY %s
+
+---
+name: sadde_s32_sss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $sgpr2
+    ; FAST-LABEL: name: sadde_s32_sss
+    ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; FAST: [[SADDE:%[0-9]+]]:sgpr(s32), [[SADDE1:%[0-9]+]]:scc(s1) = G_SADDE [[COPY]], [[COPY1]], [[ICMP]]
+    ; GREEDY-LABEL: name: sadde_s32_sss
+    ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; GREEDY: [[SADDE:%[0-9]+]]:sgpr(s32), [[SADDE1:%[0-9]+]]:scc(s1) = G_SADDE [[COPY]], [[COPY1]], [[ICMP]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $sgpr2
+    %3:_(s32) = G_CONSTANT i64 0
+    %4:_(s1) = G_ICMP intpred(eq), %2, %3
+    %5:_(s32), %6:_(s1) = G_SADDE %0, %1, %4
+...
+
+---
+name: sadde_s32_vss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $sgpr0, $sgpr1
+    ; FAST-LABEL: name: sadde_s32_vss
+    ; FAST: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; FAST: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1)
+    ; FAST: [[SADDE:%[0-9]+]]:vgpr(s32), [[SADDE1:%[0-9]+]]:vcc(s1) = G_SADDE [[COPY]], [[COPY3]], [[COPY4]]
+    ; GREEDY-LABEL: name: sadde_s32_vss
+    ; GREEDY: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; GREEDY: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1)
+    ; GREEDY: [[SADDE:%[0-9]+]]:vgpr(s32), [[SADDE1:%[0-9]+]]:vcc(s1) = G_SADDE [[COPY]], [[COPY3]], [[COPY4]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $sgpr0
+    %2:_(s32) = COPY $sgpr1
+    %3:_(s32) = G_CONSTANT i64 0
+    %4:_(s1) = G_ICMP intpred(eq), %2, %3
+    %5:_(s32), %6:_(s1) = G_SADDE %0, %1, %4
+...
+---
+name: sadde_s32_ssv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $vgpr0
+    ; FAST-LABEL: name: sadde_s32_ssv
+    ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; FAST: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; FAST: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; FAST: [[COPY5:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; FAST: [[SADDE:%[0-9]+]]:vgpr(s32), [[SADDE1:%[0-9]+]]:vcc(s1) = G_SADDE [[COPY3]], [[COPY4]], [[COPY5]]
+    ; GREEDY-LABEL: name: sadde_s32_ssv
+    ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; GREEDY: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; GREEDY: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; GREEDY: [[COPY5:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; GREEDY: [[SADDE:%[0-9]+]]:vgpr(s32), [[SADDE1:%[0-9]+]]:vcc(s1) = G_SADDE [[COPY3]], [[COPY4]], [[COPY5]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $vgpr0
+    %3:_(s1) = G_TRUNC %2
+    %4:_(s32), %5:_(s1) = G_SADDE %0, %1, %3
+...
+
+---
+name: sadde_s32_vvs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $sgpr0
+    ; FAST-LABEL: name: sadde_s32_vvs
+    ; FAST: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; FAST: [[COPY3:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; FAST: [[SADDE:%[0-9]+]]:vgpr(s32), [[SADDE1:%[0-9]+]]:vcc(s1) = G_SADDE [[COPY]], [[COPY1]], [[COPY3]]
+    ; GREEDY-LABEL: name: sadde_s32_vvs
+    ; GREEDY: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; GREEDY: [[COPY3:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; GREEDY: [[SADDE:%[0-9]+]]:vgpr(s32), [[SADDE1:%[0-9]+]]:vcc(s1) = G_SADDE [[COPY]], [[COPY1]], [[COPY3]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $sgpr0
+    %3:_(s1) = G_TRUNC %2
+    %4:_(s32), %5:_(s1) = G_SADDE %0, %1, %3
+...
+
+---
+name: sadde_s32_sss_noscc
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $sgpr2
+    ; FAST-LABEL: name: sadde_s32_sss_noscc
+    ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; FAST: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; FAST: [[COPY3:%[0-9]+]]:scc(s1) = COPY [[TRUNC]](s1)
+    ; FAST: [[SADDE:%[0-9]+]]:sgpr(s32), [[SADDE1:%[0-9]+]]:scc(s1) = G_SADDE [[COPY]], [[COPY1]], [[COPY3]]
+    ; GREEDY-LABEL: name: sadde_s32_sss_noscc
+    ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; GREEDY: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; GREEDY: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; GREEDY: [[COPY5:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; GREEDY: [[SADDE:%[0-9]+]]:vgpr(s32), [[SADDE1:%[0-9]+]]:vcc(s1) = G_SADDE [[COPY3]], [[COPY4]], [[COPY5]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $sgpr2
+    %3:_(s1) = G_TRUNC %2
+    %4:_(s32), %5:_(s1) = G_SADDE %0, %1, %3
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-saddo.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-saddo.mir
new file mode 100644
index 0000000..2f60834
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-saddo.mir
@@ -0,0 +1,68 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+---
+name: saddo_s32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: saddo_s32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[SADDO:%[0-9]+]]:sgpr(s32), [[SADDO1:%[0-9]+]]:scc(s1) = G_SADDO [[COPY]], [[COPY1]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32), %3:_(s1) = G_SADDO %0, %1
+...
+
+---
+name: saddo_s32_sv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: saddo_s32_sv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[SADDO:%[0-9]+]]:vgpr(s32), [[SADDO1:%[0-9]+]]:vcc(s1) = G_SADDO [[COPY2]], [[COPY1]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $vgpr0
+    %2:_(s32), %3:_(s1) = G_SADDO %0, %1
+...
+
+---
+name: saddo_s32_vs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: saddo_s32_vs
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[SADDO:%[0-9]+]]:vgpr(s32), [[SADDO1:%[0-9]+]]:vcc(s1) = G_SADDO [[COPY]], [[COPY2]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $sgpr0
+    %2:_(s32), %3:_(s1) = G_SADDO %0, %1
+...
+
+---
+name: saddo_s32_vv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: saddo_s32_vv
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[SADDO:%[0-9]+]]:vgpr(s32), [[SADDO1:%[0-9]+]]:vcc(s1) = G_SADDO [[COPY]], [[COPY1]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32), %3:_(s1) = G_SADDO %0, %1
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-select.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-select.mir
index 331de80..0a67b5f 100644
--- a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-select.mir
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-select.mir
@@ -1,34 +1,20 @@
-# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -global-isel %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck --check-prefixes=GCN,FAST %s
-# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -global-isel %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck --check-prefixes=GCN,GREEDY %s
-
-# REQUIRES: global-isel
-
---- |
-  define void @select_sss() { ret void }
-  define void @select_ssv() { ret void }
-  define void @select_svs() { ret void }
-  define void @select_svv() { ret void }
-  define void @select_vss() { ret void }
-  define void @select_vsv() { ret void }
-  define void @select_vvs() { ret void }
-  define void @select_vvv() { ret void }
-...
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -global-isel %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -global-isel %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
 
 ---
 name: select_sss
 legalized: true
-
-# GCN-LABEL: name: select_sss
-# GCN: [[SGPR0:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
-# GCN: [[SGPR1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
-# GCN: [[SGPR2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
-# GCN: [[SGPR3:%[0-9]+]]:sgpr(s32) = COPY $sgpr3
-# GCN: [[SCC:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[SGPR0]](s32), [[SGPR1]]
-# GCN: {{%[0-9]+}}:sgpr(s32) = G_SELECT [[SCC]](s1), [[SGPR2]], [[SGPR3]]
-
 body: |
   bb.0:
     liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3
+    ; CHECK-LABEL: name: select_sss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:sgpr(s32) = COPY $sgpr3
+    ; CHECK: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
+    ; CHECK: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY2]], [[COPY3]]
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $sgpr1
     %2:_(s32) = COPY $sgpr2
@@ -40,24 +26,21 @@
 ---
 name: select_ssv
 legalized: true
-
-# GCN-LABEL: name: select_ssv
-# GCN: [[SGPR0:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
-# GCN: [[SGPR1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
-# GCN: [[SGPR2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
-# GCN: [[VGPR0:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-# GCN: [[SCC:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[SGPR0]](s32), [[SGPR1]]
-# GCN: [[SCC_S:%[0-9]+]]:sgpr(s1) = COPY [[SCC]]
-# FAST:   [[SGPR2_V:%[0-9]+]]:vgpr(s32) = COPY [[SGPR2]]
-# FAST:   {{%[0-9]+}}:vgpr(s32) = G_SELECT [[SCC_S]](s1), [[SGPR2_V]], [[VGPR0]]
-# GREEDY: {{%[0-9]+}}:vgpr(s32) = G_SELECT [[SCC_S]](s1), [[SGPR2]], [[VGPR0]]
-
 body: |
   bb.0:
     liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0
+    ; CHECK-LABEL: name: select_ssv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
+    ; CHECK: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1)
+    ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32)
+    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[COPY4]](s1), [[COPY5]], [[COPY3]]
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $sgpr1
-    %2:_(s32) = COPY $sgpr2 
+    %2:_(s32) = COPY $sgpr2
     %3:_(s32) = COPY $vgpr0
     %4:_(s1) = G_ICMP intpred(ne), %0, %1
     %5:_(s32) = G_SELECT %4, %2, %3
@@ -67,21 +50,18 @@
 ---
 name: select_svs
 legalized: true
-
-# GCN-LABEL: name: select_svs
-# GCN: [[SGPR0:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
-# GCN: [[SGPR1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
-# GCN: [[SGPR2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
-# GCN: [[VGPR0:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-# GCN: [[SCC:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[SGPR0]](s32), [[SGPR1]]
-# GCN: [[SCC_S:%[0-9]+]]:sgpr(s1) = COPY [[SCC]]
-# FAST:   [[SGPR2_V:%[0-9]+]]:vgpr(s32) = COPY [[SGPR2]]
-# FAST:   {{%[0-9]+}}:vgpr(s32) = G_SELECT [[SCC_S]](s1), [[VGPR0]], [[SGPR2_V]]
-# GREEDY: {{%[0-9]+}}:vgpr(s32) = G_SELECT [[SCC_S]](s1), [[VGPR0]], [[SGPR2]]
-
 body: |
   bb.0:
     liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0
+    ; CHECK-LABEL: name: select_svs
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
+    ; CHECK: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1)
+    ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32)
+    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[COPY4]](s1), [[COPY3]], [[COPY5]]
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $sgpr1
     %2:_(s32) = COPY $sgpr2
@@ -93,19 +73,17 @@
 ---
 name: select_svv
 legalized: true
-
-# GCN-LABEL: name: select_svv
-# GCN: [[SGPR0:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
-# GCN: [[SGPR1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
-# GCN: [[VGPR0:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-# GCN: [[VGPR1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
-# GCN: [[SCC:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[SGPR0]](s32), [[SGPR1]]
-# GCN: [[SCC_S:%[0-9]+]]:sgpr(s1) = COPY [[SCC]]
-# GCN: {{%[0-9]+}}:vgpr(s32) = G_SELECT [[SCC_S]](s1), [[VGPR0]], [[VGPR1]]
-
 body: |
   bb.0:
     liveins: $sgpr0, $sgpr1, $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: select_svv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
+    ; CHECK: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1)
+    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[COPY4]](s1), [[COPY2]], [[COPY3]]
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $sgpr1
     %2:_(s32) = COPY $vgpr0
@@ -117,21 +95,18 @@
 ---
 name: select_vss
 legalized: true
-
-# GCN-LABEL: name: select_vss
-# GCN:  [[SGPR0:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
-# GCN:  [[SGPR1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
-# GCN:  [[VGPR0:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-# GCN:  [[VGPR1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
-# GCN:  [[VCC:%[0-9]+]]:sgpr(s1) = G_ICMP intpred(ne), [[VGPR0]](s32), [[VGPR1]]
-# FAST:  [[SGPR0_V:%[0-9]+]]:vgpr(s32) = COPY [[SGPR0]]
-# GCN:  [[SGPR1_V:%[0-9]+]]:vgpr(s32) = COPY [[SGPR1]]
-# FAST:  {{%[0-9]+}}:vgpr(s32) = G_SELECT [[VCC]](s1), [[SGPR0_V]], [[SGPR1_V]]
-# GREDY: {{%[0-9]+}}:vgpr(s32) = G_SELECT [[VCC]](s1), [[SGPR0]], [[SGPR1_V]]
-
 body: |
   bb.0:
     liveins: $sgpr0, $sgpr1, $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: select_vss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY2]](s32), [[COPY3]]
+    ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY4]], [[COPY5]]
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $sgpr1
     %2:_(s32) = COPY $vgpr0
@@ -143,20 +118,17 @@
 ---
 name: select_vsv
 legalized: true
-
-# GCN-LABEL: name: select_vsv
-# GCN:  [[SGPR0:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
-# GCN:  [[VGPR0:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-# GCN:  [[VGPR1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
-# GCN:  [[VGPR2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2
-# GCN:  [[VCC:%[0-9]+]]:sgpr(s1) = G_ICMP intpred(ne), [[VGPR0]](s32), [[VGPR1]]
-# FAST:   [[SGPR0_V:%[0-9]+]]:vgpr(s32) = COPY [[SGPR0]]
-# FAST:   {{%[0-9]+}}:vgpr(s32) = G_SELECT [[VCC]](s1), [[SGPR0_V]], [[VGPR2]]
-# GREEDY: {{%[0-9]+}}:vgpr(s32) = G_SELECT [[VCC]](s1), [[SGPR0]], [[VGPR2]]
-
 body: |
   bb.0:
     liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr2
+    ; CHECK-LABEL: name: select_vsv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr2
+    ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[COPY2]]
+    ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY4]], [[COPY3]]
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $vgpr0
     %2:_(s32) = COPY $vgpr1
@@ -168,20 +140,17 @@
 ---
 name: select_vvs
 legalized: true
-
-# GCN-LABEL: name: select_vvs
-# GCN:  [[SGPR0:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
-# GCN:  [[VGPR0:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-# GCN:  [[VGPR1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
-# GCN:  [[VGPR2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2
-# GCN:  [[VCC:%[0-9]+]]:sgpr(s1) = G_ICMP intpred(ne), [[VGPR0]](s32), [[VGPR1]]
-# FAST:   [[SGPR0_V:%[0-9]+]]:vgpr(s32) = COPY [[SGPR0]]
-# FAST:   {{%[0-9]+}}:vgpr(s32) = G_SELECT [[VCC]](s1), [[VGPR2]], [[SGPR0_V]]
-# GREEDY: {{%[0-9]+}}:vgpr(s32) = G_SELECT [[VCC]](s1), [[VGPR2]], [[SGPR0]]
-
 body: |
   bb.0:
     liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr2
+    ; CHECK-LABEL: name: select_vvs
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr2
+    ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[COPY2]]
+    ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY3]], [[COPY4]]
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $vgpr0
     %2:_(s32) = COPY $vgpr1
@@ -193,18 +162,16 @@
 ---
 name: select_vvv
 legalized: true
-
-# GCN-LABEL: name: select_vvv
-# GCN: [[VGPR0:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-# GCN: [[VGPR1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
-# GCN: [[VGPR2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2
-# GCN: [[VGPR3:%[0-9]+]]:vgpr(s32) = COPY $vgpr3
-# GCN: [[VCC:%[0-9]+]]:sgpr(s1) = G_ICMP intpred(ne), [[VGPR0]](s32), [[VGPR1]]
-# GCN: {{%[0-9]+}}:vgpr(s32) = G_SELECT [[VCC]](s1), [[VGPR2]], [[VGPR3]]
-
 body: |
   bb.0:
     liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
+    ; CHECK-LABEL: name: select_vvv
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2
+    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr3
+    ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY1]]
+    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY2]], [[COPY3]]
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $vgpr1
     %2:_(s32) = COPY $vgpr2
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sitofp.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sitofp.mir
new file mode 100644
index 0000000..e482921
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sitofp.mir
@@ -0,0 +1,31 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: sitofp_s
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0
+    ; CHECK-LABEL: name: sitofp_s
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[SITOFP:%[0-9]+]]:vgpr(s32) = G_SITOFP [[COPY]](s32)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = G_SITOFP %0
+...
+
+---
+name: sitofp_v
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0
+    ; CHECK-LABEL: name: sitofp_v
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[SITOFP:%[0-9]+]]:vgpr(s32) = G_SITOFP [[COPY]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_SITOFP %0
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-ssube.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-ssube.mir
new file mode 100644
index 0000000..7de8f9e
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-ssube.mir
@@ -0,0 +1,154 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck -check-prefix=FAST %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck -check-prefix=GREEDY %s
+
+---
+name: ssube_s32_sss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $sgpr2
+    ; FAST-LABEL: name: ssube_s32_sss
+    ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; FAST: [[SSUBE:%[0-9]+]]:sgpr(s32), [[SSUBE1:%[0-9]+]]:scc(s1) = G_SSUBE [[COPY]], [[COPY1]], [[ICMP]]
+    ; GREEDY-LABEL: name: ssube_s32_sss
+    ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; GREEDY: [[SSUBE:%[0-9]+]]:sgpr(s32), [[SSUBE1:%[0-9]+]]:scc(s1) = G_SSUBE [[COPY]], [[COPY1]], [[ICMP]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $sgpr2
+    %3:_(s32) = G_CONSTANT i64 0
+    %4:_(s1) = G_ICMP intpred(eq), %2, %3
+    %5:_(s32), %6:_(s1) = G_SSUBE %0, %1, %4
+...
+
+---
+name: ssube_s32_vss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $sgpr0, $sgpr1
+    ; FAST-LABEL: name: ssube_s32_vss
+    ; FAST: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; FAST: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1)
+    ; FAST: [[SSUBE:%[0-9]+]]:vgpr(s32), [[SSUBE1:%[0-9]+]]:vcc(s1) = G_SSUBE [[COPY]], [[COPY3]], [[COPY4]]
+    ; GREEDY-LABEL: name: ssube_s32_vss
+    ; GREEDY: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; GREEDY: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1)
+    ; GREEDY: [[SSUBE:%[0-9]+]]:vgpr(s32), [[SSUBE1:%[0-9]+]]:vcc(s1) = G_SSUBE [[COPY]], [[COPY3]], [[COPY4]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $sgpr0
+    %2:_(s32) = COPY $sgpr1
+    %3:_(s32) = G_CONSTANT i64 0
+    %4:_(s1) = G_ICMP intpred(eq), %2, %3
+    %5:_(s32), %6:_(s1) = G_SSUBE %0, %1, %4
+...
+---
+name: ssube_s32_ssv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $vgpr0
+    ; FAST-LABEL: name: ssube_s32_ssv
+    ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; FAST: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; FAST: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; FAST: [[COPY5:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; FAST: [[SSUBE:%[0-9]+]]:vgpr(s32), [[SSUBE1:%[0-9]+]]:vcc(s1) = G_SSUBE [[COPY3]], [[COPY4]], [[COPY5]]
+    ; GREEDY-LABEL: name: ssube_s32_ssv
+    ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; GREEDY: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; GREEDY: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; GREEDY: [[COPY5:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; GREEDY: [[SSUBE:%[0-9]+]]:vgpr(s32), [[SSUBE1:%[0-9]+]]:vcc(s1) = G_SSUBE [[COPY3]], [[COPY4]], [[COPY5]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $vgpr0
+    %3:_(s1) = G_TRUNC %2
+    %4:_(s32), %5:_(s1) = G_SSUBE %0, %1, %3
+...
+
+---
+name: ssube_s32_vvs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $sgpr0
+    ; FAST-LABEL: name: ssube_s32_vvs
+    ; FAST: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; FAST: [[COPY3:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; FAST: [[SSUBE:%[0-9]+]]:vgpr(s32), [[SSUBE1:%[0-9]+]]:vcc(s1) = G_SSUBE [[COPY]], [[COPY1]], [[COPY3]]
+    ; GREEDY-LABEL: name: ssube_s32_vvs
+    ; GREEDY: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; GREEDY: [[COPY3:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; GREEDY: [[SSUBE:%[0-9]+]]:vgpr(s32), [[SSUBE1:%[0-9]+]]:vcc(s1) = G_SSUBE [[COPY]], [[COPY1]], [[COPY3]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $sgpr0
+    %3:_(s1) = G_TRUNC %2
+    %4:_(s32), %5:_(s1) = G_SSUBE %0, %1, %3
+...
+
+---
+name: ssubee_s32_sss_noscc
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $sgpr2
+    ; FAST-LABEL: name: ssubee_s32_sss_noscc
+    ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; FAST: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; FAST: [[COPY3:%[0-9]+]]:scc(s1) = COPY [[TRUNC]](s1)
+    ; FAST: [[SSUBE:%[0-9]+]]:sgpr(s32), [[SSUBE1:%[0-9]+]]:scc(s1) = G_SSUBE [[COPY]], [[COPY1]], [[COPY3]]
+    ; GREEDY-LABEL: name: ssubee_s32_sss_noscc
+    ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; GREEDY: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; GREEDY: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; GREEDY: [[COPY5:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; GREEDY: [[SSUBE:%[0-9]+]]:vgpr(s32), [[SSUBE1:%[0-9]+]]:vcc(s1) = G_SSUBE [[COPY3]], [[COPY4]], [[COPY5]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $sgpr2
+    %3:_(s1) = G_TRUNC %2
+    %4:_(s32), %5:_(s1) = G_SSUBE %0, %1, %3
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-ssubo.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-ssubo.mir
new file mode 100644
index 0000000..63834e4
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-ssubo.mir
@@ -0,0 +1,69 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+
+---
+name: ssubo_s32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: ssubo_s32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[SSUBO:%[0-9]+]]:sgpr(s32), [[SSUBO1:%[0-9]+]]:scc(s1) = G_SSUBO [[COPY]], [[COPY1]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32), %3:_(s1) = G_SSUBO %0, %1
+...
+
+---
+name: ssubo_s32_sv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: ssubo_s32_sv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[SSUBO:%[0-9]+]]:vgpr(s32), [[SSUBO1:%[0-9]+]]:vcc(s1) = G_SSUBO [[COPY2]], [[COPY1]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $vgpr0
+    %2:_(s32), %3:_(s1) = G_SSUBO %0, %1
+...
+
+---
+name: ssubo_s32_vs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: ssubo_s32_vs
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[SSUBO:%[0-9]+]]:vgpr(s32), [[SSUBO1:%[0-9]+]]:vcc(s1) = G_SSUBO [[COPY]], [[COPY2]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $sgpr0
+    %2:_(s32), %3:_(s1) = G_SSUBO %0, %1
+...
+
+---
+name: ssubo_s32_vv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: ssubo_s32_vv
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[SSUBO:%[0-9]+]]:vgpr(s32), [[SSUBO1:%[0-9]+]]:vcc(s1) = G_SSUBO [[COPY]], [[COPY1]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32), %3:_(s1) = G_SSUBO %0, %1
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-trunc.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-trunc.mir
index 14de218..d171d9f 100644
--- a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-trunc.mir
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-trunc.mir
@@ -10,7 +10,7 @@
   bb.0:
     liveins: $sgpr0_sgpr1
     ; CHECK-LABEL: name: trunc_i64_to_i32_s
-    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
     ; CHECK: [[TRUNC:%[0-9]+]]:sgpr(s32) = G_TRUNC [[COPY]](s64)
     %0:_(s64) = COPY $sgpr0_sgpr1
     %1:_(s32) = G_TRUNC %0
@@ -24,8 +24,63 @@
   bb.0:
     liveins: $vgpr0_vgpr1
     ; CHECK-LABEL: name: trunc_i64_to_i32_v
-    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
     ; CHECK: [[TRUNC:%[0-9]+]]:vgpr(s32) = G_TRUNC [[COPY]](s64)
     %0:_(s64) = COPY $vgpr0_vgpr1
     %1:_(s32) = G_TRUNC %0
 ...
+---
+name: trunc_i64_to_i1_s
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1
+    ; CHECK-LABEL: name: trunc_i64_to_i1_s
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
+    ; CHECK: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY]](s64)
+    %0:_(s64) = COPY $sgpr0_sgpr1
+    %1:_(s1) = G_TRUNC %0
+...
+
+---
+name: trunc_i64_to_i1_v
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+    ; CHECK-LABEL: name: trunc_i64_to_i1_v
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s64)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s1) = G_TRUNC %0
+...
+
+---
+name: trunc_i32_to_i1_s
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0
+    ; CHECK-LABEL: name: trunc_i32_to_i1_s
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY]](s32)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s1) = G_TRUNC %0
+...
+
+---
+name: trunc_i32_to_i1_v
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0
+    ; CHECK-LABEL: name: trunc_i32_to_i1_v
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s1) = G_TRUNC %0
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-uadde.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-uadde.mir
new file mode 100644
index 0000000..83b4513
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-uadde.mir
@@ -0,0 +1,153 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck -check-prefix=FAST %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck -check-prefix=GREEDY %s
+---
+name: uadde_s32_sss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $sgpr2
+    ; FAST-LABEL: name: uadde_s32_sss
+    ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; FAST: [[UADDE:%[0-9]+]]:sgpr(s32), [[UADDE1:%[0-9]+]]:scc(s1) = G_UADDE [[COPY]], [[COPY1]], [[ICMP]]
+    ; GREEDY-LABEL: name: uadde_s32_sss
+    ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; GREEDY: [[UADDE:%[0-9]+]]:sgpr(s32), [[UADDE1:%[0-9]+]]:scc(s1) = G_UADDE [[COPY]], [[COPY1]], [[ICMP]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $sgpr2
+    %3:_(s32) = G_CONSTANT i64 0
+    %4:_(s1) = G_ICMP intpred(eq), %2, %3
+    %5:_(s32), %6:_(s1) = G_UADDE %0, %1, %4
+...
+
+---
+name: uadde_s32_vss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $sgpr0, $sgpr1
+    ; FAST-LABEL: name: uadde_s32_vss
+    ; FAST: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; FAST: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1)
+    ; FAST: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY]], [[COPY3]], [[COPY4]]
+    ; GREEDY-LABEL: name: uadde_s32_vss
+    ; GREEDY: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; GREEDY: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1)
+    ; GREEDY: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY]], [[COPY3]], [[COPY4]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $sgpr0
+    %2:_(s32) = COPY $sgpr1
+    %3:_(s32) = G_CONSTANT i64 0
+    %4:_(s1) = G_ICMP intpred(eq), %2, %3
+    %5:_(s32), %6:_(s1) = G_UADDE %0, %1, %4
+...
+---
+name: uadde_s32_ssv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $vgpr0
+    ; FAST-LABEL: name: uadde_s32_ssv
+    ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; FAST: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; FAST: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; FAST: [[COPY5:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; FAST: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY3]], [[COPY4]], [[COPY5]]
+    ; GREEDY-LABEL: name: uadde_s32_ssv
+    ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; GREEDY: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; GREEDY: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; GREEDY: [[COPY5:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; GREEDY: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY3]], [[COPY4]], [[COPY5]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $vgpr0
+    %3:_(s1) = G_TRUNC %2
+    %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3
+...
+
+---
+name: uadde_s32_vvs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $sgpr0
+    ; FAST-LABEL: name: uadde_s32_vvs
+    ; FAST: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; FAST: [[COPY3:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; FAST: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY]], [[COPY1]], [[COPY3]]
+    ; GREEDY-LABEL: name: uadde_s32_vvs
+    ; GREEDY: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; GREEDY: [[COPY3:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; GREEDY: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY]], [[COPY1]], [[COPY3]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $sgpr0
+    %3:_(s1) = G_TRUNC %2
+    %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3
+...
+
+---
+name: uadde_s32_sss_noscc
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $sgpr2
+    ; FAST-LABEL: name: uadde_s32_sss_noscc
+    ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; FAST: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; FAST: [[COPY3:%[0-9]+]]:scc(s1) = COPY [[TRUNC]](s1)
+    ; FAST: [[UADDE:%[0-9]+]]:sgpr(s32), [[UADDE1:%[0-9]+]]:scc(s1) = G_UADDE [[COPY]], [[COPY1]], [[COPY3]]
+    ; GREEDY-LABEL: name: uadde_s32_sss_noscc
+    ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; GREEDY: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; GREEDY: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; GREEDY: [[COPY5:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; GREEDY: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY3]], [[COPY4]], [[COPY5]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $sgpr2
+    %3:_(s1) = G_TRUNC %2
+    %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-uaddo.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-uaddo.mir
new file mode 100644
index 0000000..c6234fd
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-uaddo.mir
@@ -0,0 +1,69 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: uaddo_s32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: uaddo_s32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[UADDO:%[0-9]+]]:sgpr(s32), [[UADDO1:%[0-9]+]]:scc(s1) = G_UADDO [[COPY]], [[COPY1]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32), %3:_(s1) = G_UADDO %0, %1
+...
+
+---
+name: uaddo_s32_sv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: uaddo_s32_sv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[UADDO:%[0-9]+]]:vgpr(s32), [[UADDO1:%[0-9]+]]:vcc(s1) = G_UADDO [[COPY2]], [[COPY1]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $vgpr0
+    %2:_(s32),  %3:_(s1) = G_UADDO %0, %1
+...
+
+---
+name: uaddo_s32_vs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: uaddo_s32_vs
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[UADDO:%[0-9]+]]:vgpr(s32), [[UADDO1:%[0-9]+]]:vcc(s1) = G_UADDO [[COPY]], [[COPY2]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $sgpr0
+    %2:_(s32), %3:_(s1) = G_UADDO %0, %1
+...
+
+---
+name: uaddo_s32_vv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: uaddo_s32_vv
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[UADDO:%[0-9]+]]:vgpr(s32), [[UADDO1:%[0-9]+]]:vcc(s1) = G_UADDO [[COPY]], [[COPY1]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32), %3:_(s1) = G_UADDO %0, %1
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-uitofp.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-uitofp.mir
new file mode 100644
index 0000000..07f47c5
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-uitofp.mir
@@ -0,0 +1,31 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: uitofp_s
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0
+    ; CHECK-LABEL: name: uitofp_s
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[UITOFP:%[0-9]+]]:vgpr(s32) = G_UITOFP [[COPY]](s32)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = G_UITOFP %0
+...
+
+---
+name: uitofp_v
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0
+    ; CHECK-LABEL: name: uitofp_v
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[UITOFP:%[0-9]+]]:vgpr(s32) = G_UITOFP [[COPY]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_UITOFP %0
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-unmerge-values.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-unmerge-values.mir
new file mode 100644
index 0000000..5a30572
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-unmerge-values.mir
@@ -0,0 +1,38 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -global-isel %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+
+---
+name: test_unmerge_s64_s32_s
+legalized: true
+
+body: |
+  bb.0:
+   liveins: $sgpr0_sgpr1
+    ; CHECK-LABEL: name: test_unmerge_s64_s32_s
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
+    ; CHECK: [[UV:%[0-9]+]]:sgpr(s32), [[UV1:%[0-9]+]]:sgpr(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; CHECK: $vgpr0 = COPY [[UV]](s32)
+    ; CHECK: $vgpr2 = COPY [[UV]](s32)
+    %0:_(s64) = COPY $sgpr0_sgpr1
+    %1:_(s32), %2:_(s32) = G_UNMERGE_VALUES %0:_(s64)
+    $vgpr0 = COPY %1(s32)
+    $vgpr2 = COPY %1(s32)
+...
+
+---
+name: test_unmerge_s64_s32_v
+legalized: true
+
+body: |
+  bb.0:
+   liveins: $vgpr0_vgpr1
+    ; CHECK-LABEL: name: test_unmerge_s64_s32_v
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
+    ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; CHECK: $vgpr0 = COPY [[UV]](s32)
+    ; CHECK: $vgpr2 = COPY [[UV]](s32)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s32), %2:_(s32) = G_UNMERGE_VALUES %0:_(s64)
+    $vgpr0 = COPY %1(s32)
+    $vgpr2 = COPY %1(s32)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-usube.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-usube.mir
new file mode 100644
index 0000000..d410dc0
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-usube.mir
@@ -0,0 +1,154 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck -check-prefix=FAST %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck -check-prefix=GREEDY %s
+
+---
+name: usube_s32_sss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $sgpr2
+    ; FAST-LABEL: name: usube_s32_sss
+    ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; FAST: [[USUBE:%[0-9]+]]:sgpr(s32), [[USUBE1:%[0-9]+]]:scc(s1) = G_USUBE [[COPY]], [[COPY1]], [[ICMP]]
+    ; GREEDY-LABEL: name: usube_s32_sss
+    ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; GREEDY: [[USUBE:%[0-9]+]]:sgpr(s32), [[USUBE1:%[0-9]+]]:scc(s1) = G_USUBE [[COPY]], [[COPY1]], [[ICMP]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $sgpr2
+    %3:_(s32) = G_CONSTANT i64 0
+    %4:_(s1) = G_ICMP intpred(eq), %2, %3
+    %5:_(s32), %6:_(s1) = G_USUBE %0, %1, %4
+...
+
+---
+name: usube_s32_vss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $sgpr0, $sgpr1
+    ; FAST-LABEL: name: usube_s32_vss
+    ; FAST: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; FAST: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1)
+    ; FAST: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY]], [[COPY3]], [[COPY4]]
+    ; GREEDY-LABEL: name: usube_s32_vss
+    ; GREEDY: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0
+    ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
+    ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; GREEDY: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1)
+    ; GREEDY: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY]], [[COPY3]], [[COPY4]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $sgpr0
+    %2:_(s32) = COPY $sgpr1
+    %3:_(s32) = G_CONSTANT i64 0
+    %4:_(s1) = G_ICMP intpred(eq), %2, %3
+    %5:_(s32), %6:_(s1) = G_USUBE %0, %1, %4
+...
+---
+name: usube_s32_ssv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $vgpr0
+    ; FAST-LABEL: name: usube_s32_ssv
+    ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; FAST: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; FAST: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; FAST: [[COPY5:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; FAST: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY3]], [[COPY4]], [[COPY5]]
+    ; GREEDY-LABEL: name: usube_s32_ssv
+    ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; GREEDY: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; GREEDY: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; GREEDY: [[COPY5:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; GREEDY: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY3]], [[COPY4]], [[COPY5]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $vgpr0
+    %3:_(s1) = G_TRUNC %2
+    %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3
+...
+
+---
+name: usube_s32_vvs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $sgpr0
+    ; FAST-LABEL: name: usube_s32_vvs
+    ; FAST: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; FAST: [[COPY3:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; FAST: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY]], [[COPY1]], [[COPY3]]
+    ; GREEDY-LABEL: name: usube_s32_vvs
+    ; GREEDY: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; GREEDY: [[COPY3:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; GREEDY: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY]], [[COPY1]], [[COPY3]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32) = COPY $sgpr0
+    %3:_(s1) = G_TRUNC %2
+    %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3
+...
+
+---
+name: usube_s32_sss_noscc
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1, $sgpr2
+    ; FAST-LABEL: name: usube_s32_sss_noscc
+    ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; FAST: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; FAST: [[COPY3:%[0-9]+]]:scc(s1) = COPY [[TRUNC]](s1)
+    ; FAST: [[USUBE:%[0-9]+]]:sgpr(s32), [[USUBE1:%[0-9]+]]:scc(s1) = G_USUBE [[COPY]], [[COPY1]], [[COPY3]]
+    ; GREEDY-LABEL: name: usube_s32_sss_noscc
+    ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+    ; GREEDY: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[COPY2]](s32)
+    ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; GREEDY: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; GREEDY: [[COPY5:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+    ; GREEDY: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY3]], [[COPY4]], [[COPY5]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = COPY $sgpr2
+    %3:_(s1) = G_TRUNC %2
+    %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-usubo.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-usubo.mir
new file mode 100644
index 0000000..64ed8fc
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-usubo.mir
@@ -0,0 +1,69 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: usubo_s32_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: usubo_s32_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[USUBO:%[0-9]+]]:sgpr(s32), [[USUBO1:%[0-9]+]]:scc(s1) = G_USUBO [[COPY]], [[COPY1]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32), %3:_(s1) = G_USUBO %0, %1
+...
+
+---
+name: usubo_s32_sv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: usubo_s32_sv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+    ; CHECK: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[COPY2]], [[COPY1]]
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $vgpr0
+    %2:_(s32),  %3:_(s1) = G_USUBO %0, %1
+...
+
+---
+name: usubo_s32_vs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: usubo_s32_vs
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[COPY]], [[COPY2]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $sgpr0
+    %2:_(s32), %3:_(s1) = G_USUBO %0, %1
+...
+
+---
+name: usubo_s32_vv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: usubo_s32_vv
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[COPY]], [[COPY1]]
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s32), %3:_(s1) = G_USUBO %0, %1
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-xor.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-xor.mir
index 848126a..fab5ae8 100644
--- a/test/CodeGen/AMDGPU/GlobalISel/regbankselect-xor.mir
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect-xor.mir
@@ -3,13 +3,13 @@
 # RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
 
 ---
-name: xor_ss
+name: xor_i32_ss
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $sgpr1
-    ; CHECK-LABEL: name: xor_ss
+    ; CHECK-LABEL: name: xor_i32_ss
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
     ; CHECK: [[XOR:%[0-9]+]]:sgpr(s32) = G_XOR [[COPY]], [[COPY1]]
@@ -19,13 +19,13 @@
 ...
 
 ---
-name: xor_sv
+name: xor_i32_sv
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $vgpr0
-    ; CHECK-LABEL: name: xor_sv
+    ; CHECK-LABEL: name: xor_i32_sv
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[XOR:%[0-9]+]]:vgpr(s32) = G_XOR [[COPY]], [[COPY1]]
@@ -35,13 +35,13 @@
 ...
 
 ---
-name: xor_vs
+name: xor_i32_vs
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $vgpr0
-    ; CHECK-LABEL: name: xor_vs
+    ; CHECK-LABEL: name: xor_i32_vs
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
@@ -52,13 +52,13 @@
 ...
 
 ---
-name: xor_vv
+name: xor_i32_vv
 legalized: true
 
 body: |
   bb.0:
     liveins: $vgpr0, $vgpr1
-    ; CHECK-LABEL: name: xor_vv
+    ; CHECK-LABEL: name: xor_i32_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
     ; CHECK: [[XOR:%[0-9]+]]:vgpr(s32) = G_XOR [[COPY]], [[COPY1]]
diff --git a/test/CodeGen/AMDGPU/add3.ll b/test/CodeGen/AMDGPU/add3.ll
index 3505519..e49f57c 100644
--- a/test/CodeGen/AMDGPU/add3.ll
+++ b/test/CodeGen/AMDGPU/add3.ll
@@ -23,6 +23,32 @@
   ret float %bc
 }
 
+; V_MAD_U32_U24 is given higher priority.
+define amdgpu_ps float @mad_no_add3(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
+; GFX9-LABEL: mad_no_add3:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_mad_u32_u24 v0, v0, v1, v4
+; GFX9-NEXT:    v_mad_u32_u24 v0, v2, v3, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %a0 = shl i32 %a, 8
+  %a1 = lshr i32 %a0, 8
+  %b0 = shl i32 %b, 8
+  %b1 = lshr i32 %b0, 8
+  %mul1 = mul i32 %a1, %b1
+
+  %c0 = shl i32 %c, 8
+  %c1 = lshr i32 %c0, 8
+  %d0 = shl i32 %d, 8
+  %d1 = lshr i32 %d0, 8
+  %mul2 = mul i32 %c1, %d1
+
+  %add0 = add i32 %e, %mul1
+  %add1 = add i32 %mul2, %add0
+
+  %bc = bitcast i32 %add1 to float
+  ret float %bc
+}
+
 ; ThreeOp instruction variant not used due to Constant Bus Limitations
 ; TODO: with reassociation it is possible to replace a v_add_u32_e32 with a s_add_i32
 define amdgpu_ps float @add3_vgpr_b(i32 inreg %a, i32 %b, i32 inreg %c) {
diff --git a/test/CodeGen/AMDGPU/bitcast-v4f16-v4i16.ll b/test/CodeGen/AMDGPU/bitcast-v4f16-v4i16.ll
new file mode 100644
index 0000000..24cc1c4
--- /dev/null
+++ b/test/CodeGen/AMDGPU/bitcast-v4f16-v4i16.ll
@@ -0,0 +1,35 @@
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope %s
+
+; creating v4i16->v4f16 and v4f16->v4i16 bitcasts in the selection DAG is rather
+; difficult, so this test has to throw in some llvm.amdgcn.wqm to get them
+
+; LABEL: {{^}}test_to_i16:
+; CHECK: s_endpgm
+define amdgpu_ps void @test_to_i16(<4 x i32> inreg, <4 x half> inreg) #0 {
+  %a_tmp = call <4 x half> @llvm.amdgcn.wqm.v4f16(<4 x half> %1)
+  %a_i16_tmp = bitcast <4 x half> %a_tmp to <4 x i16>
+  %a_i16 = call <4 x i16> @llvm.amdgcn.wqm.v4i16(<4 x i16> %a_i16_tmp)
+
+  %a_i32 = bitcast <4 x i16> %a_i16 to <2 x i32>
+  call void @llvm.amdgcn.raw.buffer.store.v2i32(<2 x i32> %a_i32, <4 x i32> %0, i32 0, i32 0, i32 0)
+  ret void
+}
+
+; LABEL: {{^}}test_to_half:
+; CHECK: s_endpgm
+define amdgpu_ps void @test_to_half(<4 x i32> inreg, <4 x i16> inreg) #0 {
+  %a_tmp = call <4 x i16> @llvm.amdgcn.wqm.v4i16(<4 x i16> %1)
+  %a_half_tmp = bitcast <4 x i16> %a_tmp to <4 x half>
+  %a_half = call <4 x half> @llvm.amdgcn.wqm.v4f16(<4 x half> %a_half_tmp)
+
+  %a_i32 = bitcast <4 x half> %a_half to <2 x i32>
+  call void @llvm.amdgcn.raw.buffer.store.v2i32(<2 x i32> %a_i32, <4 x i32> %0, i32 0, i32 0, i32 0)
+  ret void
+}
+
+declare <4 x half> @llvm.amdgcn.wqm.v4f16(<4 x half>) #1
+declare <4 x i16> @llvm.amdgcn.wqm.v4i16(<4 x i16>) #1
+declare void @llvm.amdgcn.raw.buffer.store.v2i32(<2 x i32>, <4 x i32>, i32, i32, i32) #0
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readonly }
diff --git a/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll b/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
index d8126fa..4c41565 100644
--- a/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
+++ b/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
@@ -37,9 +37,10 @@
 ; GCN: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]]
 ; GCN-NOT: v_cvt_f32_ubyte3_e32
 ; GCN-DAG: v_cvt_f32_ubyte2_e32 v[[HIRESULT:[0-9]+]], [[VAL]]
-; GCN-DAG: v_cvt_f32_ubyte1_e32 v{{[0-9]+}}, [[VAL]]
+; GCN-DAG: v_cvt_f32_ubyte1_e32 v[[MDRESULT:[0-9]+]], [[VAL]]
 ; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[VAL]]
-; GCN: buffer_store_dwordx3 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
+; SI: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[MDRESULT]]{{\]}},
+; VI: buffer_store_dwordx3 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
 define amdgpu_kernel void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8> addrspace(1)* noalias %in) nounwind {
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %gep = getelementptr <3 x i8>, <3 x i8> addrspace(1)* %in, i32 %tid
@@ -281,3 +282,23 @@
   store float %cvt, float addrspace(1)* %out
   ret void
 }
+
+; GCN-LABEL: {{^}}cvt_ubyte0_or_multiuse:
+; GCN:     {{buffer|flat}}_load_dword [[LOADREG:v[0-9]+]],
+; GCN-DAG: v_or_b32_e32 [[OR:v[0-9]+]], 0x80000001, [[LOADREG]]
+; GCN-DAG: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[OR]]
+; GCN:     v_add_f32_e32 [[RES:v[0-9]+]], [[OR]], [[CONV]]
+; GCN:     buffer_store_dword [[RES]],
+define amdgpu_kernel void @cvt_ubyte0_or_multiuse(i32 addrspace(1)* %in, float addrspace(1)* %out) {
+bb:
+  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 %lid
+  %load = load i32, i32 addrspace(1)* %gep
+  %or = or i32 %load, -2147483647
+  %and = and i32 %or, 255
+  %uitofp = uitofp i32 %and to float
+  %cast = bitcast i32 %or to float
+  %add = fadd float %cast, %uitofp
+  store float %add, float addrspace(1)* %out
+  ret void
+}
diff --git a/test/CodeGen/AMDGPU/debug-value2.ll b/test/CodeGen/AMDGPU/debug-value2.ll
index 5200315..3a13a33 100644
--- a/test/CodeGen/AMDGPU/debug-value2.ll
+++ b/test/CodeGen/AMDGPU/debug-value2.ll
@@ -278,7 +278,7 @@
 !opencl.ocl.version = !{!107}
 !llvm.ident = !{!108, !109}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 7.0.0 (https://github.com/llvm-mirror/clang.git 3edc9a6d1f98fec61a944167cb5c36c40104918a) (https://github.com/llvm-mirror/llvm.git 90eddc791688f226397e600c287c043d9b0e35fa)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !74)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 7.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !74)
 !1 = !DIFile(filename: "tmp.cl", directory: "/home/yaxunl/h/git/llvm/assert")
 !2 = !{!3, !27, !37, !42, !46, !51, !55, !68}
 !3 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "BrdfType", file: !4, line: 1334, size: 32, elements: !5)
@@ -386,7 +386,7 @@
 !105 = !{i32 2, !"Debug Info Version", i32 3}
 !106 = !{i32 1, !"wchar_size", i32 4}
 !107 = !{i32 2, i32 0}
-!108 = !{!"clang version 7.0.0 (https://github.com/llvm-mirror/clang.git 3edc9a6d1f98fec61a944167cb5c36c40104918a) (https://github.com/llvm-mirror/llvm.git 90eddc791688f226397e600c287c043d9b0e35fa)"}
+!108 = !{!"clang version 7.0.0"}
 !109 = !{!"clang version 4.0 "}
 !110 = distinct !DISubprogram(name: "Scene_transformT", scope: !4, file: !4, line: 2182, type: !111, isLocal: false, isDefinition: true, scopeLine: 2183, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !116)
 !111 = !DISubroutineType(types: !112)
diff --git a/test/CodeGen/AMDGPU/divergent-branch-uniform-condition.ll b/test/CodeGen/AMDGPU/divergent-branch-uniform-condition.ll
new file mode 100644
index 0000000..8d21050
--- /dev/null
+++ b/test/CodeGen/AMDGPU/divergent-branch-uniform-condition.ll
@@ -0,0 +1,97 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s
+
+; This module creates a divergent branch. The branch is marked as divergent by
+; the divergence analysis but the condition is not. This test ensures that the
+; divergence of the branch is tested, not its condition, so that branch is
+; correctly emitted as divergent.
+
+target triple = "amdgcn-mesa-mesa3d"
+
+define amdgpu_ps void @main(i32, float) {
+; CHECK-LABEL: main:
+; CHECK:       ; %bb.0: ; %start
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
+; CHECK-NEXT:    s_mov_b32 m0, s0
+; CHECK-NEXT:    s_mov_b64 s[4:5], 0
+; CHECK-NEXT:    v_interp_p1_f32_e32 v0, v1, attr0.x
+; CHECK-NEXT:    v_cmp_nlt_f32_e64 s[0:1], 0, v0
+; CHECK-NEXT:    v_mov_b32_e32 v1, 0
+; CHECK-NEXT:    ; implicit-def: $sgpr2_sgpr3
+; CHECK-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; CHECK-NEXT:  BB0_1: ; %loop
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc, 32, v1
+; CHECK-NEXT:    s_and_b64 vcc, exec, vcc
+; CHECK-NEXT:    s_or_b64 s[6:7], s[6:7], exec
+; CHECK-NEXT:    s_or_b64 s[2:3], s[2:3], exec
+; CHECK-NEXT:    s_cbranch_vccz BB0_5
+; CHECK-NEXT:  ; %bb.2: ; %endif1
+; CHECK-NEXT:    ; in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT:    s_mov_b64 s[6:7], -1
+; CHECK-NEXT:    s_and_saveexec_b64 s[8:9], s[0:1]
+; CHECK-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; CHECK-NEXT:    ; mask branch BB0_4
+; CHECK-NEXT:  BB0_3: ; %endif2
+; CHECK-NEXT:    ; in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT:    v_add_u32_e32 v1, 1, v1
+; CHECK-NEXT:    s_xor_b64 s[6:7], exec, -1
+; CHECK-NEXT:  BB0_4: ; %Flow1
+; CHECK-NEXT:    ; in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT:    s_or_b64 exec, exec, s[8:9]
+; CHECK-NEXT:    s_andn2_b64 s[2:3], s[2:3], exec
+; CHECK-NEXT:    s_branch BB0_6
+; CHECK-NEXT:  BB0_5: ; in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT:    ; implicit-def: $vgpr1
+; CHECK-NEXT:  BB0_6: ; %Flow
+; CHECK-NEXT:    ; in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT:    s_and_b64 s[8:9], exec, s[6:7]
+; CHECK-NEXT:    s_or_b64 s[8:9], s[8:9], s[4:5]
+; CHECK-NEXT:    s_mov_b64 s[4:5], s[8:9]
+; CHECK-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; CHECK-NEXT:    s_cbranch_execnz BB0_1
+; CHECK-NEXT:  ; %bb.7: ; %Flow2
+; CHECK-NEXT:    s_or_b64 exec, exec, s[8:9]
+; CHECK-NEXT:    v_mov_b32_e32 v1, 0
+; this is the divergent branch with the condition not marked as divergent
+; CHECK-NEXT:    s_and_saveexec_b64 s[0:1], s[2:3]
+; CHECK-NEXT:    ; mask branch BB0_9
+; CHECK-NEXT:  BB0_8: ; %if1
+; CHECK-NEXT:    v_sqrt_f32_e32 v1, v0
+; CHECK-NEXT:  BB0_9: ; %endloop
+; CHECK-NEXT:    s_or_b64 exec, exec, s[0:1]
+; CHECK-NEXT:    exp mrt0 v1, v1, v1, v1 done vm
+; CHECK-NEXT:    s_endpgm
+start:
+  %v0 = call float @llvm.amdgcn.interp.p1(float %1, i32 0, i32 0, i32 %0)
+  br label %loop
+
+loop:
+  %v1 = phi i32 [ 0, %start ], [ %v5, %endif2 ]
+  %v2 = icmp ugt i32 %v1, 31
+  br i1 %v2, label %if1, label %endif1
+
+if1:
+  %v3 = call float @llvm.sqrt.f32(float %v0)
+  br label %endloop
+
+endif1:
+  %v4 = fcmp ogt float %v0, 0.000000e+00
+  br i1 %v4, label %endloop, label %endif2
+
+endif2:
+  %v5 = add i32 %v1, 1
+  br label %loop
+
+endloop:
+  %v6 = phi float [ %v3, %if1 ], [ 0.0, %endif1 ]
+  call void @llvm.amdgcn.exp.v4f32(i32 0, i32 15, float %v6, float %v6, float %v6, float %v6, i1 true, i1 true)
+  ret void
+}
+
+declare float @llvm.sqrt.f32(float) #1
+declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1
+declare void @llvm.amdgcn.exp.v4f32(i32, i32, float, float, float, float, i1, i1) #0
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/early-if-convert-cost.ll b/test/CodeGen/AMDGPU/early-if-convert-cost.ll
index b8bb106..626a6e2 100644
--- a/test/CodeGen/AMDGPU/early-if-convert-cost.ll
+++ b/test/CodeGen/AMDGPU/early-if-convert-cost.ll
@@ -60,7 +60,8 @@
 ; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc
 ; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc
 
-; GCN-DAG: buffer_store_dwordx3
+; GCN-DAG: buffer_store_dword v
+; GCN-DAG: buffer_store_dwordx2
 define amdgpu_kernel void @test_vccnz_ifcvt_triangle96(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %in, float %cnd) #0 {
 entry:
   %v = load <3 x i32>, <3 x i32> addrspace(1)* %in
diff --git a/test/CodeGen/AMDGPU/elf.metadata.ll b/test/CodeGen/AMDGPU/elf.metadata.ll
index 097310a..9214ca2 100644
--- a/test/CodeGen/AMDGPU/elf.metadata.ll
+++ b/test/CodeGen/AMDGPU/elf.metadata.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=amdgcn -mcpu=fiji -filetype=obj | llvm-readobj -symbols -s -sd - | FileCheck %s
 
 ; CHECK: Section {
-; CHECK: Name: .AMDGPU.metadata.info_1
+; CHECK: Name: .AMDGPU.comment.info_1
 ; CHECK: Type: SHT_PROGBITS (0x1)
 ; CHECK: Flags [ (0x0)
 ; CHECK: Size: 16
@@ -11,7 +11,7 @@
 ; CHECK: }
 
 ; CHECK: Section {
-; CHECK: Name: .AMDGPU.metadata.info_2
+; CHECK: Name: .AMDGPU.comment.info_2
 ; CHECK: Type: SHT_PROGBITS (0x1)
 ; CHECK: Flags [ (0x0)
 ; CHECK: Size: 16
@@ -21,7 +21,7 @@
 ; CHECK: }
 
 ; CHECK: Section {
-; CHECK: Name: .AMDGPU.metadata.info_3
+; CHECK: Name: .AMDGPU.comment.info_3
 ; CHECK: Type: SHT_PROGBITS (0x1)
 ; CHECK: Flags [ (0x0)
 ; CHECK: Size: 16
@@ -31,26 +31,26 @@
 ; CHECK: }
 
 ; CHECK: Symbol {
-; CHECK: Name: metadata_info_var_1
+; CHECK: Name: comment_info_var_1
 ; CHECK: Size: 16
 ; CHECK: Binding: Local
-; CHECK: Section: .AMDGPU.metadata.info_1
+; CHECK: Section: .AMDGPU.comment.info_1
 ; CHECK: }
 
 ; CHECK: Symbol {
-; CHECK: Name: metadata_info_var_2
+; CHECK: Name: comment_info_var_2
 ; CHECK: Size: 16
 ; CHECK: Binding: Global
-; CHECK: Section: .AMDGPU.metadata.info_2
+; CHECK: Section: .AMDGPU.comment.info_2
 ; CHECK: }
 
 ; CHECK: Symbol {
-; CHECK: Name: metadata_info_var_3
+; CHECK: Name: comment_info_var_3
 ; CHECK: Size: 16
 ; CHECK: Binding: Global
-; CHECK: Section: .AMDGPU.metadata.info_3
+; CHECK: Section: .AMDGPU.comment.info_3
 ; CHECK: }
 
-@metadata_info_var_1 = internal global [4 x i32][i32 826559809, i32 826559809, i32 826559809, i32 826559809], align 1, section ".AMDGPU.metadata.info_1"
-@metadata_info_var_2 = constant [4 x i32][i32 843337025, i32 843337025, i32 843337025, i32 843337025], align 1, section ".AMDGPU.metadata.info_2"
-@metadata_info_var_3 = global [4 x i32][i32 860114241, i32 860114241, i32 860114241, i32 860114241], align 1, section ".AMDGPU.metadata.info_3"
+@comment_info_var_1 = internal global [4 x i32][i32 826559809, i32 826559809, i32 826559809, i32 826559809], align 1, section ".AMDGPU.comment.info_1"
+@comment_info_var_2 = constant [4 x i32][i32 843337025, i32 843337025, i32 843337025, i32 843337025], align 1, section ".AMDGPU.comment.info_2"
+@comment_info_var_3 = global [4 x i32][i32 860114241, i32 860114241, i32 860114241, i32 860114241], align 1, section ".AMDGPU.comment.info_3"
diff --git a/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll b/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll
index 3746aa8..a3f176b 100644
--- a/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll
+++ b/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll
@@ -131,10 +131,10 @@
 ; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
 ; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |v{{[0-9]+}}|, [[L]]
 ; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
-; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
-; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
-; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
-; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -v{{[0-9]+}}
 ; GCN-DENORM-DAG: v_rcp_f32_e32
 ; GCN-DENORM-DAG: v_rcp_f32_e32
 ; GCN-DENORM-DAG: v_rcp_f32_e32
@@ -166,10 +166,10 @@
 ; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
 ; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |v{{[0-9]+}}|, [[L]]
 ; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
-; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}
-; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}
-; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}
-; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -s{{[0-9]+}}, v{{[0-9]+}}
 ; GCN-DENORM-DAG: v_rcp_f32_e32
 ; GCN-DENORM-DAG: v_rcp_f32_e32
 ; GCN-DENORM-DAG: v_rcp_f32_e32
@@ -246,7 +246,7 @@
 ; GCN-DAG:        v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
 
 ; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -v{{[0-9]+}}
 ; GCN-DENORM-DAG: v_rcp_f32_e32 [[RCP1:v[0-9]+]], v{{[0-9]+}}
 ; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[RCP1]]
 ; GCN-DENORM-DAG: v_rcp_f32_e32 [[RCP2:v[0-9]+]], v{{[0-9]+}}
@@ -288,7 +288,7 @@
 ; GCN-DAG:        v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
 
 ; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -s{{[0-9]+}}, v{{[0-9]+}}
 ; GCN-DENORM-DAG: v_rcp_f32_e32 [[RCP1:v[0-9]+]], v{{[0-9]+}}
 ; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[RCP1]]
 ; GCN-DENORM-DAG: v_rcp_f32_e32 [[RCP2:v[0-9]+]], v{{[0-9]+}}
diff --git a/test/CodeGen/AMDGPU/imm16.ll b/test/CodeGen/AMDGPU/imm16.ll
index dcf3b36..29b80da 100644
--- a/test/CodeGen/AMDGPU/imm16.ll
+++ b/test/CodeGen/AMDGPU/imm16.ll
@@ -266,7 +266,7 @@
 }
 
 ; GCN-LABEL: {{^}}add_inline_imm_neg_1_f16:
-; VI: v_add_u32_e32 [[REG:v[0-9]+]], vcc, -1
+; VI: v_add_u16_e32 [[REG:v[0-9]+]], -1, [[REG:v[0-9]+]]
 ; VI: buffer_store_short [[REG]]
 define amdgpu_kernel void @add_inline_imm_neg_1_f16(half addrspace(1)* %out, i16 addrspace(1)* %in) {
   %x = load i16, i16 addrspace(1)* %in
@@ -277,7 +277,7 @@
 }
 
 ; GCN-LABEL: {{^}}add_inline_imm_neg_2_f16:
-; VI: v_add_u32_e32 [[REG:v[0-9]+]], vcc, 0xfffe
+; VI: v_add_u16_e32 [[REG:v[0-9]+]], -2, [[REG:v[0-9]+]]
 ; VI: buffer_store_short [[REG]]
 define amdgpu_kernel void @add_inline_imm_neg_2_f16(half addrspace(1)* %out, i16 addrspace(1)* %in) {
   %x = load i16, i16 addrspace(1)* %in
@@ -288,7 +288,7 @@
 }
 
 ; GCN-LABEL: {{^}}add_inline_imm_neg_16_f16:
-; VI: v_add_u32_e32 [[REG:v[0-9]+]], vcc, 0xfff0
+; VI: v_add_u16_e32 [[REG:v[0-9]+]], -16, [[REG:v[0-9]+]]
 ; VI: buffer_store_short [[REG]]
 define amdgpu_kernel void @add_inline_imm_neg_16_f16(half addrspace(1)* %out, i16 addrspace(1)* %in) {
   %x = load i16, i16 addrspace(1)* %in
diff --git a/test/CodeGen/AMDGPU/indirect-addressing-si-gfx9.ll b/test/CodeGen/AMDGPU/indirect-addressing-si-gfx9.ll
index c8f7551..b266dd1 100644
--- a/test/CodeGen/AMDGPU/indirect-addressing-si-gfx9.ll
+++ b/test/CodeGen/AMDGPU/indirect-addressing-si-gfx9.ll
@@ -14,8 +14,9 @@
 ; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT15:[0-9]+]], s[[S_ELT15]]
 ; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT0:[0-9]+]], s[[S_ELT0]]
 
+; GCN-DAG: v_add_u32_e32 [[IDX1:v[0-9]+]], 1, [[IDX0]]
+
 ; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]:
-; GCN-NEXT: s_waitcnt vmcnt(0)
 ; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
 ; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
 ; GCN: s_and_saveexec_b64 vcc, vcc
@@ -36,8 +37,8 @@
 ; GCN: s_mov_b64 [[MASK]], exec
 
 ; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]:
-; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
-; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
+; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX1]]
+; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX1]]
 ; GCN: s_and_saveexec_b64 vcc, vcc
 
 ; MOVREL: s_mov_b32 m0, [[READLANE]]
diff --git a/test/CodeGen/AMDGPU/indirect-addressing-si-pregfx9.ll b/test/CodeGen/AMDGPU/indirect-addressing-si-pregfx9.ll
index 544ab99..e9b640c 100644
--- a/test/CodeGen/AMDGPU/indirect-addressing-si-pregfx9.ll
+++ b/test/CodeGen/AMDGPU/indirect-addressing-si-pregfx9.ll
@@ -17,8 +17,9 @@
 ; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT15:[0-9]+]], s[[S_ELT15]]
 ; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT0:[0-9]+]], s[[S_ELT0]]
 
+; GCN-DAG: v_add_{{i32|u32}}_e32 [[IDX1:v[0-9]+]], vcc, 1, [[IDX0]]
+
 ; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]:
-; GCN-NEXT: s_waitcnt vmcnt(0)
 ; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
 ; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
 ; GCN: s_and_saveexec_b64 vcc, vcc
@@ -39,8 +40,8 @@
 ; GCN: s_mov_b64 [[MASK]], exec
 
 ; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]:
-; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
-; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
+; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX1]]
+; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX1]]
 ; GCN: s_and_saveexec_b64 vcc, vcc
 
 ; MOVREL: s_mov_b32 m0, [[READLANE]]
diff --git a/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/test/CodeGen/AMDGPU/indirect-addressing-si.ll
index 081f623..3412ef5 100644
--- a/test/CodeGen/AMDGPU/indirect-addressing-si.ll
+++ b/test/CodeGen/AMDGPU/indirect-addressing-si.ll
@@ -7,11 +7,12 @@
 ; indexing of vectors.
 
 ; GCN-LABEL: {{^}}extract_w_offset:
-; GCN-DAG: s_load_dword [[IN:s[0-9]+]]
+; GCN-DAG: s_load_dword [[IN0:s[0-9]+]]
 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0
 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000
-; GCN-DAG: v_mov_b32_e32 [[BASEREG:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 1.0
+; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 2.0
+; GCN-DAG: v_mov_b32_e32 [[BASEREG:v[0-9]+]], 1.0
+; GCN-DAG: s_add_i32 [[IN:s[0-9]+]], [[IN0]], 1
 
 ; MOVREL-DAG: s_mov_b32 m0, [[IN]]
 ; MOVREL: v_movrels_b32_e32 v{{[0-9]+}}, [[BASEREG]]
@@ -29,16 +30,17 @@
 
 ; XXX: Could do v_or_b32 directly
 ; GCN-LABEL: {{^}}extract_w_offset_salu_use_vector:
+; GCN-DAG: s_or_b32
+; GCN-DAG: s_or_b32
+; GCN-DAG: s_or_b32
+; GCN-DAG: s_or_b32
 ; MOVREL: s_mov_b32 m0
-; GCN-DAG: s_or_b32
-; GCN-DAG: s_or_b32
-; GCN-DAG: s_or_b32
-; GCN-DAG: s_or_b32
 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
 
+
 ; MOVREL: v_movrels_b32_e32
 
 ; IDXMODE: s_set_gpr_idx_on s{{[0-9]+}}, src0{{$}}
@@ -176,7 +178,8 @@
 }
 
 ; GCN-LABEL: {{^}}insert_w_offset:
-; GCN-DAG: s_load_dword [[IN:s[0-9]+]]
+; GCN-DAG: s_load_dword [[IN0:s[0-9]+]]
+; MOVREL-DAG: s_add_i32 [[IN:s[0-9]+]], [[IN0]], 1
 ; MOVREL-DAG: s_mov_b32 m0, [[IN]]
 ; GCN-DAG: v_mov_b32_e32 v[[ELT0:[0-9]+]], 1.0
 ; GCN-DAG: v_mov_b32_e32 v[[ELT1:[0-9]+]], 2.0
@@ -185,7 +188,7 @@
 ; GCN-DAG: v_mov_b32_e32 v[[ELT15:[0-9]+]], 0x41800000
 ; GCN-DAG: v_mov_b32_e32 v[[INS:[0-9]+]], 0x41880000
 
-; MOVREL: v_movreld_b32_e32 v[[ELT1]], v[[INS]]
+; MOVREL: v_movreld_b32_e32 v[[ELT0]], v[[INS]]
 ; MOVREL: buffer_store_dwordx4 v{{\[}}[[ELT0]]:[[ELT3]]{{\]}}
 define amdgpu_kernel void @insert_w_offset(<16 x float> addrspace(1)* %out, i32 %in) {
 entry:
@@ -195,6 +198,51 @@
   ret void
 }
 
+; GCN-LABEL: {{^}}insert_unsigned_base_plus_offset:
+; GCN-DAG: s_load_dword [[IN:s[0-9]+]]
+; GCN-DAG: v_mov_b32_e32 [[ELT0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[ELT1:v[0-9]+]], 2.0
+; GCN-DAG: s_and_b32 [[BASE:s[0-9]+]], [[IN]], 0xffff
+
+; MOVREL: s_mov_b32 m0, [[BASE]]
+; MOVREL: v_movreld_b32_e32 [[ELT1]], v{{[0-9]+}}
+
+; IDXMODE: s_set_gpr_idx_on [[BASE]], dst
+; IDXMODE-NEXT: v_mov_b32_e32 [[ELT1]], v{{[0-9]+}}
+; IDXMODE-NEXT: s_set_gpr_idx_off
+define amdgpu_kernel void @insert_unsigned_base_plus_offset(<16 x float> addrspace(1)* %out, i16 %in) {
+entry:
+  %base = zext i16 %in to i32
+  %add = add i32 %base, 1
+  %ins = insertelement <16 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>, float 17.0, i32 %add
+  store <16 x float> %ins, <16 x float> addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}insert_signed_base_plus_offset:
+; GCN-DAG: s_load_dword [[IN:s[0-9]+]]
+; GCN-DAG: v_mov_b32_e32 [[ELT0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[ELT1:v[0-9]+]], 2.0
+
+; GCN-DAG: s_sext_i32_i16 [[BASE:s[0-9]+]], [[IN]]
+; GCN-DAG: s_add_i32 [[BASE_PLUS_OFFSET:s[0-9]+]], [[BASE]], 1
+
+; MOVREL: s_mov_b32 m0, [[BASE_PLUS_OFFSET]]
+; MOVREL: v_movreld_b32_e32 [[ELT0]], v{{[0-9]+}}
+
+; IDXMODE: s_set_gpr_idx_on [[BASE_PLUS_OFFSET]], dst
+; IDXMODE-NEXT: v_mov_b32_e32 [[ELT0]], v{{[0-9]+}}
+; IDXMODE-NEXT: s_set_gpr_idx_off
+define amdgpu_kernel void @insert_signed_base_plus_offset(<16 x float> addrspace(1)* %out, i16 %in) {
+entry:
+  %base = sext i16 %in to i32
+  %add = add i32 %base, 1
+  %ins = insertelement <16 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>, float 17.0, i32 %add
+  store <16 x float> %ins, <16 x float> addrspace(1)* %out
+  ret void
+}
+
+
 ; GCN-LABEL: {{^}}insert_wo_offset:
 ; GCN: s_load_dword [[IN:s[0-9]+]]
 
@@ -354,8 +402,12 @@
 
 ; GCN: s_mov_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec
 
+; GCN: s_waitcnt vmcnt(0)
+; PREGFX9: v_add_{{i32|u32}}_e32 [[IDX1:v[0-9]+]], vcc, 1, [[IDX0]]
+; GFX9: v_add_{{i32|u32}}_e32 [[IDX1:v[0-9]+]], 1, [[IDX0]]
+
+
 ; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]:
-; GCN-NEXT: s_waitcnt vmcnt(0)
 ; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
 ; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
 ; GCN: s_and_saveexec_b64 vcc, vcc
@@ -373,20 +425,20 @@
 ; FIXME: Redundant copy
 ; GCN: s_mov_b64 exec, [[MASK]]
 
-; GCN: v_mov_b32_e32 [[VEC_ELT1_2:v[0-9]+]], [[S_ELT1]]
+; GCN: v_mov_b32_e32 [[VEC_ELT0_2:v[0-9]+]], [[S_ELT0]]
 
 ; GCN: s_mov_b64 [[MASK2:s\[[0-9]+:[0-9]+\]]], exec
 
 ; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]:
-; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
-; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
+; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX1]]
+; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX1]]
 ; GCN: s_and_saveexec_b64 vcc, vcc
 
 ; MOVREL: s_mov_b32 m0, [[READLANE]]
-; MOVREL-NEXT: v_movrels_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT1_2]]
+; MOVREL-NEXT: v_movrels_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT0_2]]
 
 ; IDXMODE: s_set_gpr_idx_on [[READLANE]], src0
-; IDXMODE-NEXT: v_mov_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT1_2]]
+; IDXMODE-NEXT: v_mov_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT0_2]]
 ; IDXMODE: s_set_gpr_idx_off
 
 ; GCN-NEXT: s_xor_b64 exec, exec, vcc
@@ -492,13 +544,15 @@
 
 ; offset puts outside of superegister bounaries, so clamp to 1st element.
 ; GCN-LABEL: {{^}}extract_largest_inbounds_offset:
-; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\].* offset:48}}
-; GCN-DAG: s_load_dword [[IDX:s[0-9]+]]
+; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]
+; GCN-DAG: s_load_dword [[IDX0:s[0-9]+]]
+; GCN-DAG: s_add_i32 [[IDX:s[0-9]+]], [[IDX0]], 15
+
 ; MOVREL: s_mov_b32 m0, [[IDX]]
-; MOVREL: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[HI_ELT]]
+; MOVREL: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]]
 
 ; IDXMODE: s_set_gpr_idx_on [[IDX]], src0
-; IDXMODE: v_mov_b32_e32 [[EXTRACT:v[0-9]+]], v[[HI_ELT]]
+; IDXMODE: v_mov_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]]
 ; IDXMODE: s_set_gpr_idx_off
 
 ; GCN: buffer_store_dword [[EXTRACT]]
@@ -514,10 +568,11 @@
 ; GCN-LABEL: {{^}}extract_out_of_bounds_offset:
 ; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}}
 ; GCN-DAG: s_load_dword [[IDX:s[0-9]+]]
-; MOVREL: s_add_i32 m0, [[IDX]], 16
+; GCN: s_add_i32 [[ADD_IDX:s[0-9]+]], [[IDX]], 16
+
+; MOVREL: s_mov_b32 m0, [[ADD_IDX]]
 ; MOVREL: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]]
 
-; IDXMODE: s_add_i32 [[ADD_IDX:s[0-9]+]], [[IDX]], 16
 ; IDXMODE: s_set_gpr_idx_on [[ADD_IDX]], src0
 ; IDXMODE: v_mov_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]]
 ; IDXMODE: s_set_gpr_idx_off
@@ -532,18 +587,15 @@
   ret void
 }
 
-; Test that the or is folded into the base address register instead of
-; added to m0
-
 ; GCN-LABEL: {{^}}extractelement_v16i32_or_index:
 ; GCN: s_load_dword [[IDX_IN:s[0-9]+]]
 ; GCN: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]]
-; GCN-NOT: [[IDX_SHL]]
+; GCN: s_or_b32 [[IDX_FIN:s[0-9]+]], [[IDX_SHL]], 1
 
-; MOVREL: s_mov_b32 m0, [[IDX_SHL]]
+; MOVREL: s_mov_b32 m0, [[IDX_FIN]]
 ; MOVREL: v_movrels_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
 
-; IDXMODE: s_set_gpr_idx_on [[IDX_SHL]], src0
+; IDXMODE: s_set_gpr_idx_on [[IDX_FIN]], src0
 ; IDXMODE: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
 ; IDXMODE: s_set_gpr_idx_off
 define amdgpu_kernel void @extractelement_v16i32_or_index(i32 addrspace(1)* %out, <16 x i32> addrspace(1)* %in, i32 %idx.in) {
@@ -559,12 +611,12 @@
 ; GCN-LABEL: {{^}}insertelement_v16f32_or_index:
 ; GCN: s_load_dword [[IDX_IN:s[0-9]+]]
 ; GCN: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]]
-; GCN-NOT: [[IDX_SHL]]
+; GCN: s_or_b32 [[IDX_FIN:s[0-9]+]], [[IDX_SHL]], 1
 
-; MOVREL: s_mov_b32 m0, [[IDX_SHL]]
+; MOVREL: s_mov_b32 m0, [[IDX_FIN]]
 ; MOVREL: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
 
-; IDXMODE: s_set_gpr_idx_on [[IDX_SHL]], dst
+; IDXMODE: s_set_gpr_idx_on [[IDX_FIN]], dst
 ; IDXMODE: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
 ; IDXMODE: s_set_gpr_idx_off
 define amdgpu_kernel void @insertelement_v16f32_or_index(<16 x float> addrspace(1)* %out, <16 x float> %a, i32 %idx.in) nounwind {
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll
index ba9f206..bcde25a 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll
@@ -195,7 +195,7 @@
 
 ;CHECK-LABEL: {{^}}buffer_load_x3_offen_merged:
 ;CHECK-NEXT: %bb.
-;CHECK-NEXT: buffer_load_dwordx3 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
+;VI-NEXT: buffer_load_dwordx3 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
 ;CHECK: s_waitcnt
 define amdgpu_ps void @buffer_load_x3_offen_merged(<4 x i32> inreg %rsrc, i32 %a) {
 main_body:
@@ -245,7 +245,7 @@
 
 ;CHECK-LABEL: {{^}}buffer_load_x3_offset_merged:
 ;CHECK-NEXT: %bb.
-;CHECK-NEXT: buffer_load_dwordx3 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:4
+;VI-NEXT: buffer_load_dwordx3 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:4
 ;CHECK: s_waitcnt
 define amdgpu_ps void @buffer_load_x3_offset_merged(<4 x i32> inreg %rsrc) {
 main_body:
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll
index 584fb43..06ac7da 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll
@@ -4,6 +4,7 @@
 declare i64 @llvm.amdgcn.icmp.i32(i32, i32, i32) #0
 declare i64 @llvm.amdgcn.icmp.i64(i64, i64, i32) #0
 declare i64 @llvm.amdgcn.icmp.i16(i16, i16, i32) #0
+declare i64 @llvm.amdgcn.icmp.i1(i1, i1, i32) #0
 
 ; No crash on invalid input
 ; GCN-LABEL: {{^}}v_icmp_i32_dynamic_cc:
@@ -314,4 +315,21 @@
   ret void
 }
 
+; GCN-LABEL: {{^}}v_icmp_i1_ne0:
+; GCN: v_cmp_gt_u32_e64 s[[C0:\[[0-9]+:[0-9]+\]]],
+; GCN: v_cmp_gt_u32_e64 s[[C1:\[[0-9]+:[0-9]+\]]],
+; GCN: s_and_b64 s[[SRC:\[[0-9]+:[0-9]+\]]], s[[C0]], s[[C1]]
+; SI-NEXT: s_mov_b32 s{{[0-9]+}}, -1
+; GCN-NEXT: v_mov_b32_e32
+; GCN-NEXT: v_mov_b32_e32
+; GCN-NEXT: {{global|flat|buffer}}_store_dwordx2
+define amdgpu_kernel void @v_icmp_i1_ne0(i64 addrspace(1)* %out, i32 %a, i32 %b) {
+  %c0 = icmp ugt i32 %a, 1
+  %c1 = icmp ugt i32 %b, 2
+  %src = and i1 %c0, %c1
+  %result = call i64 @llvm.amdgcn.icmp.i1(i1 %src, i1 false, i32 33)
+  store i64 %result, i64 addrspace(1)* %out
+  ret void
+}
+
 attributes #0 = { nounwind readnone convergent }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.image.dim.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.image.dim.ll
index bf93ffa..b297aca 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.image.dim.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.image.dim.ll
@@ -1,6 +1,7 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,SIVI,PRT %s
+; RUN: llc -march=amdgcn -mcpu=fiji  -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,SIVI,PRT %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX900,PRT %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-enable-prt-strict-null -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX900,NOPRT %s
 
 ; GCN-LABEL: {{^}}load_1d:
 ; GCN: image_load v[0:3], v0, s[0:7] dmask:0xf unorm{{$}}
@@ -10,6 +11,52 @@
   ret <4 x float> %v
 }
 
+; GCN-LABEL: {{^}}load_1d_tfe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v{{[0-9]+}}, s[0:7] dmask:0xf unorm tfe{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_1d_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.1d.v4f32i32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
+; GCN-LABEL: {{^}}load_1d_lwe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v{{[0-9]+}}, s[0:7] dmask:0xf unorm lwe{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_1d_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
+main_body:
+  %v = call {<4 x float>, i32} @llvm.amdgcn.image.load.1d.v4f32i32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 2, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
 ; GCN-LABEL: {{^}}load_2d:
 ; GCN: image_load v[0:3], v[0:1], s[0:7] dmask:0xf unorm{{$}}
 define amdgpu_ps <4 x float> @load_2d(<8 x i32> inreg %rsrc, i32 %s, i32 %t) {
@@ -18,6 +65,29 @@
   ret <4 x float> %v
 }
 
+; GCN-LABEL: {{^}}load_2d_tfe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm tfe{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_2d_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.2d.v4f32i32.i32(i32 15, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
 ; GCN-LABEL: {{^}}load_3d:
 ; GCN: image_load v[0:3], v[0:3], s[0:7] dmask:0xf unorm{{$}}
 define amdgpu_ps <4 x float> @load_3d(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %r) {
@@ -26,6 +96,29 @@
   ret <4 x float> %v
 }
 
+; GCN-LABEL: {{^}}load_3d_tfe_lwe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm tfe lwe{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_3d_tfe_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %r) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.3d.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %r, <8 x i32> %rsrc, i32 3, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
 ; GCN-LABEL: {{^}}load_cube:
 ; GCN: image_load v[0:3], v[0:3], s[0:7] dmask:0xf unorm da{{$}}
 define amdgpu_ps <4 x float> @load_cube(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %slice) {
@@ -34,6 +127,29 @@
   ret <4 x float> %v
 }
 
+; GCN-LABEL: {{^}}load_cube_lwe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm lwe da{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_cube_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %slice) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.cube.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %slice, <8 x i32> %rsrc, i32 2, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
 ; GCN-LABEL: {{^}}load_1darray:
 ; GCN: image_load v[0:3], v[0:1], s[0:7] dmask:0xf unorm da{{$}}
 define amdgpu_ps <4 x float> @load_1darray(<8 x i32> inreg %rsrc, i32 %s, i32 %slice) {
@@ -42,6 +158,29 @@
   ret <4 x float> %v
 }
 
+; GCN-LABEL: {{^}}load_1darray_tfe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm tfe da{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_1darray_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %slice) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.1darray.v4f32i32.i32(i32 15, i32 %s, i32 %slice, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
 ; GCN-LABEL: {{^}}load_2darray:
 ; GCN: image_load v[0:3], v[0:3], s[0:7] dmask:0xf unorm da{{$}}
 define amdgpu_ps <4 x float> @load_2darray(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %slice) {
@@ -50,6 +189,29 @@
   ret <4 x float> %v
 }
 
+; GCN-LABEL: {{^}}load_2darray_lwe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm lwe da{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_2darray_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %slice) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.2darray.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %slice, <8 x i32> %rsrc, i32 2, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
 ; GCN-LABEL: {{^}}load_2dmsaa:
 ; GCN: image_load v[0:3], v[0:3], s[0:7] dmask:0xf unorm{{$}}
 define amdgpu_ps <4 x float> @load_2dmsaa(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %fragid) {
@@ -58,6 +220,29 @@
   ret <4 x float> %v
 }
 
+; GCN-LABEL: {{^}}load_2dmsaa_both:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm tfe lwe{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_2dmsaa_both(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %fragid) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.2dmsaa.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %fragid, <8 x i32> %rsrc, i32 3, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
 ; GCN-LABEL: {{^}}load_2darraymsaa:
 ; GCN: image_load v[0:3], v[0:3], s[0:7] dmask:0xf unorm da{{$}}
 define amdgpu_ps <4 x float> @load_2darraymsaa(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
@@ -66,6 +251,29 @@
   ret <4 x float> %v
 }
 
+; GCN-LABEL: {{^}}load_2darraymsaa_tfe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm tfe da{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_2darraymsaa_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.2darraymsaa.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
 ; GCN-LABEL: {{^}}load_mip_1d:
 ; GCN: image_load_mip v[0:3], v[0:1], s[0:7] dmask:0xf unorm{{$}}
 define amdgpu_ps <4 x float> @load_mip_1d(<8 x i32> inreg %rsrc, i32 %s, i32 %mip) {
@@ -74,6 +282,29 @@
   ret <4 x float> %v
 }
 
+; GCN-LABEL: {{^}}load_mip_1d_lwe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load_mip v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm lwe{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_mip_1d_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %mip) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.mip.1d.v4f32i32.i32(i32 15, i32 %s, i32 %mip, <8 x i32> %rsrc, i32 2, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
 ; GCN-LABEL: {{^}}load_mip_2d:
 ; GCN: image_load_mip v[0:3], v[0:3], s[0:7] dmask:0xf unorm{{$}}
 define amdgpu_ps <4 x float> @load_mip_2d(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %mip) {
@@ -82,6 +313,191 @@
   ret <4 x float> %v
 }
 
+; GCN-LABEL: {{^}}load_mip_2d_tfe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load_mip v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm tfe{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_mip_2d_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %mip) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.mip.2d.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %mip, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
+; Make sure that error flag is returned even with dmask 0
+; GCN-LABEL: {{^}}load_1d_V2_tfe_dmask0:
+; GCN: v_mov_b32_e32 v1, 0
+; PRT-DAG: v_mov_b32_e32 v2, v1
+; PRT: image_load v[1:2], v0, s[0:7] dmask:0x1 unorm tfe{{$}}
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT: image_load v[0:1], v0, s[0:7] dmask:0x1 unorm tfe{{$}}
+define amdgpu_ps float @load_1d_V2_tfe_dmask0(<8 x i32> inreg %rsrc, i32 %s) {
+main_body:
+  %v = call {<2 x float>,i32} @llvm.amdgcn.image.load.1d.v2f32i32.i32(i32 0, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.err = extractvalue {<2 x float>, i32} %v, 1
+  %vv = bitcast i32 %v.err to float
+  ret float %vv
+}
+
+; GCN-LABEL: {{^}}load_1d_V1_tfe_dmask0:
+; GCN: v_mov_b32_e32 v1, 0
+; PRT-DAG: v_mov_b32_e32 v2, v1
+; PRT: image_load v[1:2], v0, s[0:7] dmask:0x1 unorm tfe{{$}}
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT: image_load v[0:1], v0, s[0:7] dmask:0x1 unorm tfe{{$}}
+define amdgpu_ps float @load_1d_V1_tfe_dmask0(<8 x i32> inreg %rsrc, i32 %s) {
+main_body:
+  %v = call {float,i32} @llvm.amdgcn.image.load.1d.f32i32.i32(i32 0, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.err = extractvalue {float, i32} %v, 1
+  %vv = bitcast i32 %v.err to float
+  ret float %vv
+}
+
+; GCN-LABEL: {{^}}load_mip_2d_tfe_dmask0:
+; GCN: v_mov_b32_e32 v3, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v3
+; PRT: image_load_mip v[3:4], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x1 unorm tfe{{$}}
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT: image_load_mip v[2:3], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x1 unorm tfe{{$}}
+define amdgpu_ps float @load_mip_2d_tfe_dmask0(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %mip) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.mip.2d.v4f32i32.i32(i32 0, i32 %s, i32 %t, i32 %mip, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  %vv = bitcast i32 %v.err to float
+  ret float %vv
+}
+
+; Do not make dmask 0 even if no result (other than tfe) is used.
+; GCN-LABEL: {{^}}load_mip_2d_tfe_nouse:
+; GCN: v_mov_b32_e32 v3, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v3
+; PRT: image_load_mip v[3:4], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x1 unorm tfe{{$}}
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT: image_load_mip v[2:3], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x1 unorm tfe{{$}}
+define amdgpu_ps float @load_mip_2d_tfe_nouse(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %mip) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.mip.2d.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %mip, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  %vv = bitcast i32 %v.err to float
+  ret float %vv
+}
+
+; GCN-LABEL: {{^}}load_mip_2d_tfe_nouse_V2:
+; GCN: v_mov_b32_e32 v3, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v3
+; PRT: image_load_mip v[3:4], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x1 unorm tfe{{$}}
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT: image_load_mip v[2:3], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x1 unorm tfe{{$}}
+define amdgpu_ps float @load_mip_2d_tfe_nouse_V2(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %mip) {
+main_body:
+  %v = call {<2 x float>,i32} @llvm.amdgcn.image.load.mip.2d.v2f32i32.i32(i32 6, i32 %s, i32 %t, i32 %mip, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.err = extractvalue {<2 x float>, i32} %v, 1
+  %vv = bitcast i32 %v.err to float
+  ret float %vv
+}
+
+; GCN-LABEL: {{^}}load_mip_2d_tfe_nouse_V1:
+; GCN: v_mov_b32_e32 v3, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v3
+; PRT: image_load_mip v[3:4], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x2 unorm tfe{{$}}
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT: image_load_mip v[2:3], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x2 unorm tfe{{$}}
+define amdgpu_ps float @load_mip_2d_tfe_nouse_V1(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %mip) {
+main_body:
+  %v = call {float, i32} @llvm.amdgcn.image.load.mip.2d.f32i32.i32(i32 2, i32 %s, i32 %t, i32 %mip, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.err = extractvalue {float, i32} %v, 1
+  %vv = bitcast i32 %v.err to float
+  ret float %vv
+}
+
+; Check for dmask being materially smaller than return type
+; GCN-LABEL: {{^}}load_1d_tfe_V4_dmask3:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v3, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; GCN: image_load v[0:3], v{{[0-9]+}}, s[0:7] dmask:0x7 unorm tfe{{$}}
+; SIVI: buffer_store_dword v3, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v3
+define amdgpu_ps <4 x float> @load_1d_tfe_V4_dmask3(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.1d.v4f32i32.i32(i32 7, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
+; GCN-LABEL: {{^}}load_1d_tfe_V4_dmask2:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v2, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; GCN: image_load v[0:3], v{{[0-9]+}}, s[0:7] dmask:0x6 unorm tfe{{$}}
+; SIVI: buffer_store_dword v2, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v2
+define amdgpu_ps <4 x float> @load_1d_tfe_V4_dmask2(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.1d.v4f32i32.i32(i32 6, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
+; GCN-LABEL: {{^}}load_1d_tfe_V4_dmask1:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v1, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; GCN: image_load v[0:1], v{{[0-9]+}}, s[0:7] dmask:0x8 unorm tfe{{$}}
+; SIVI: buffer_store_dword v1, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v1
+define amdgpu_ps <4 x float> @load_1d_tfe_V4_dmask1(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.1d.v4f32i32.i32(i32 8, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
+; GCN-LABEL: {{^}}load_1d_tfe_V2_dmask1:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v1, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; GCN: image_load v[0:1], v{{[0-9]+}}, s[0:7] dmask:0x8 unorm tfe{{$}}
+; SIVI: buffer_store_dword v1, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v1
+define amdgpu_ps <2 x float> @load_1d_tfe_V2_dmask1(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
+main_body:
+  %v = call {<2 x float>,i32} @llvm.amdgcn.image.load.1d.v2f32i32.i32(i32 8, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
+  %v.vec = extractvalue {<2 x float>, i32} %v, 0
+  %v.err = extractvalue {<2 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <2 x float> %v.vec
+}
+
+
 ; GCN-LABEL: {{^}}load_mip_3d:
 ; GCN: image_load_mip v[0:3], v[0:3], s[0:7] dmask:0xf unorm{{$}}
 define amdgpu_ps <4 x float> @load_mip_3d(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %r, i32 %mip) {
@@ -404,23 +820,37 @@
   store float 0.000000e+00, float addrspace(3)* %lds
   %c0 = extractelement <2 x i32> %c, i32 0
   %c1 = extractelement <2 x i32> %c, i32 1
-  %tex = call float @llvm.amdgcn.image.load.2d.f32.i32(i32 15, i32 %c0, i32 %c1, <8 x i32> %rsrc, i32 0, i32 0)
+  %tex = call float @llvm.amdgcn.image.load.2d.f32.i32(i32 1, i32 %c0, i32 %c1, <8 x i32> %rsrc, i32 0, i32 0)
   %tmp2 = getelementptr float, float addrspace(3)* %lds, i32 4
   store float 0.000000e+00, float addrspace(3)* %tmp2
   ret float %tex
 }
 
 declare <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i32(i32, i32, <8 x i32>, i32, i32) #1
+declare {float,i32} @llvm.amdgcn.image.load.1d.f32i32.i32(i32, i32, <8 x i32>, i32, i32) #1
+declare {<2 x float>,i32} @llvm.amdgcn.image.load.1d.v2f32i32.i32(i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.1d.v4f32i32.i32(i32, i32, <8 x i32>, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.load.2d.v4f32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.2d.v4f32i32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.load.3d.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.3d.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.load.cube.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.cube.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.load.1darray.v4f32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.1darray.v4f32i32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.load.2darray.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.2darray.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.load.2dmsaa.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.2dmsaa.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.load.2darraymsaa.v4f32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.2darraymsaa.v4f32i32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
 
 declare <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.mip.1d.v4f32i32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.mip.2d.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<2 x float>,i32} @llvm.amdgcn.image.load.mip.2d.v2f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {float,i32} @llvm.amdgcn.image.load.mip.2d.f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.image.load.a16.d16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.image.load.a16.d16.ll
index 1fbfccb..fd2c6e7 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.image.load.a16.d16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.image.load.a16.d16.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s
 
 ; GCN-LABEL: {{^}}load.f16.1d:
-; GCN: image_load v[0:1], v0, s[0:7] dmask:0x1 unorm a16 d16
+; GCN: image_load v0, v0, s[0:7] dmask:0x1 unorm a16 d16
 define amdgpu_ps <4 x half> @load.f16.1d(<8 x i32> inreg %rsrc, <2 x i16> %coords) {
 main_body:
   %x = extractelement <2 x i16> %coords, i32 0
@@ -10,7 +10,7 @@
 }
 
 ; GCN-LABEL: {{^}}load.v2f16.1d:
-; GCN: image_load v[0:1], v0, s[0:7] dmask:0x3 unorm a16 d16
+; GCN: image_load v0, v0, s[0:7] dmask:0x3 unorm a16 d16
 define amdgpu_ps <4 x half> @load.v2f16.1d(<8 x i32> inreg %rsrc, <2 x i16> %coords) {
 main_body:
   %x = extractelement <2 x i16> %coords, i32 0
@@ -37,7 +37,7 @@
 }
 
 ; GCN-LABEL: {{^}}load.f16.2d:
-; GCN: image_load v[0:1], v0, s[0:7] dmask:0x1 unorm a16 d16
+; GCN: image_load v0, v0, s[0:7] dmask:0x1 unorm a16 d16
 define amdgpu_ps <4 x half> @load.f16.2d(<8 x i32> inreg %rsrc, <2 x i16> %coords) {
 main_body:
   %x = extractelement <2 x i16> %coords, i32 0
@@ -47,7 +47,7 @@
 }
 
 ; GCN-LABEL: {{^}}load.v2f16.2d:
-; GCN: image_load v[0:1], v0, s[0:7] dmask:0x3 unorm a16 d16
+; GCN: image_load v0, v0, s[0:7] dmask:0x3 unorm a16 d16
 define amdgpu_ps <4 x half> @load.v2f16.2d(<8 x i32> inreg %rsrc, <2 x i16> %coords) {
 main_body:
   %x = extractelement <2 x i16> %coords, i32 0
@@ -77,7 +77,7 @@
 }
 
 ; GCN-LABEL: {{^}}load.f16.3d:
-; GCN: image_load v[0:1], v[0:1], s[0:7] dmask:0x1 unorm a16 d16
+; GCN: image_load v0, v[0:1], s[0:7] dmask:0x1 unorm a16 d16
 define amdgpu_ps <4 x half> @load.f16.3d(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) {
 main_body:
   %x = extractelement <2 x i16> %coords_lo, i32 0
@@ -88,7 +88,7 @@
 }
 
 ; GCN-LABEL: {{^}}load.v2f16.3d:
-; GCN: image_load v[0:1], v[0:1], s[0:7] dmask:0x3 unorm a16 d16
+; GCN: image_load v0, v[0:1], s[0:7] dmask:0x3 unorm a16 d16
 define amdgpu_ps <4 x half> @load.v2f16.3d(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) {
 main_body:
   %x = extractelement <2 x i16> %coords_lo, i32 0
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.image.load.a16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.image.load.a16.ll
index d857ae1..be579b8 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.image.load.a16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.image.load.a16.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s
 
 ; GCN-LABEL: {{^}}load.f32.1d:
-; GCN: image_load v[0:3], v0, s[0:7] dmask:0x1 unorm a16
+; GCN: image_load v0, v0, s[0:7] dmask:0x1 unorm a16
 define amdgpu_ps <4 x float> @load.f32.1d(<8 x i32> inreg %rsrc, <2 x i16> %coords) {
 main_body:
   %x = extractelement <2 x i16> %coords, i32 0
@@ -10,7 +10,7 @@
 }
 
 ; GCN-LABEL: {{^}}load.v2f32.1d:
-; GCN: image_load v[0:3], v0, s[0:7] dmask:0x3 unorm a16
+; GCN: image_load v[0:1], v0, s[0:7] dmask:0x3 unorm a16
 define amdgpu_ps <4 x float> @load.v2f32.1d(<8 x i32> inreg %rsrc, <2 x i16> %coords) {
 main_body:
   %x = extractelement <2 x i16> %coords, i32 0
@@ -37,7 +37,7 @@
 }
 
 ; GCN-LABEL: {{^}}load.f32.2d:
-; GCN: image_load v[0:3], v0, s[0:7] dmask:0x1 unorm a16
+; GCN: image_load v0, v0, s[0:7] dmask:0x1 unorm a16
 define amdgpu_ps <4 x float> @load.f32.2d(<8 x i32> inreg %rsrc, <2 x i16> %coords) {
 main_body:
   %x = extractelement <2 x i16> %coords, i32 0
@@ -47,7 +47,7 @@
 }
 
 ; GCN-LABEL: {{^}}load.v2f32.2d:
-; GCN: image_load v[0:3], v0, s[0:7] dmask:0x3 unorm a16
+; GCN: image_load v[0:1], v0, s[0:7] dmask:0x3 unorm a16
 define amdgpu_ps <4 x float> @load.v2f32.2d(<8 x i32> inreg %rsrc, <2 x i16> %coords) {
 main_body:
   %x = extractelement <2 x i16> %coords, i32 0
@@ -77,7 +77,7 @@
 }
 
 ; GCN-LABEL: {{^}}load.f32.3d:
-; GCN: image_load v[0:3], v[0:1], s[0:7] dmask:0x1 unorm a16
+; GCN: image_load v0, v[0:1], s[0:7] dmask:0x1 unorm a16
 define amdgpu_ps <4 x float> @load.f32.3d(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) {
 main_body:
   %x = extractelement <2 x i16> %coords_lo, i32 0
@@ -88,7 +88,7 @@
 }
 
 ; GCN-LABEL: {{^}}load.v2f32.3d:
-; GCN: image_load v[0:3], v[0:1], s[0:7] dmask:0x3 unorm a16
+; GCN: image_load v[0:1], v[0:1], s[0:7] dmask:0x3 unorm a16
 define amdgpu_ps <4 x float> @load.v2f32.3d(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) {
 main_body:
   %x = extractelement <2 x i16> %coords_lo, i32 0
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.d16.dim.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.d16.dim.ll
index 9619304..b6260f4 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.d16.dim.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.d16.dim.ll
@@ -10,6 +10,19 @@
   ret half %tex
 }
 
+; GCN-LABEL: {{^}}image_sample_2d_f16_tfe:
+; GCN: v_mov_b32_e32 v{{[0-9]+}}, 0
+; PACKED: image_sample v[2:3], v[0:1], s[0:7], s[8:11] dmask:0x1 tfe d16{{$}}
+; UNPACKED: image_sample v[2:3], v[0:1], s[0:7], s[8:11] dmask:0x1 tfe d16{{$}}
+define amdgpu_ps half @image_sample_2d_f16_tfe(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t, i32 addrspace(1)* inreg %out) {
+main_body:
+  %tex = call {half,i32} @llvm.amdgcn.image.sample.2d.f16i32.f32(i32 1, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 1, i32 0)
+  %tex.vec = extractvalue {half, i32} %tex, 0
+  %tex.err = extractvalue {half, i32} %tex, 1
+  store i32 %tex.err, i32 addrspace(1)* %out, align 4
+  ret half %tex.vec
+}
+
 ; GCN-LABEL: {{^}}image_sample_c_d_1d_v2f16:
 ; UNPACKED: image_sample_c_d v[0:1], v[0:3], s[0:7], s[8:11] dmask:0x3 d16{{$}}
 ; PACKED: image_sample_c_d v0, v[0:3], s[0:7], s[8:11] dmask:0x3 d16{{$}}
@@ -20,6 +33,22 @@
   ret float %r
 }
 
+; GCN-LABEL: {{^}}image_sample_c_d_1d_v2f16_tfe:
+; GCN: v_mov_b32_e32 v{{[0-9]+}}, 0
+; UNPACKED: image_sample_c_d v[{{[0-9]+:[0-9]+}}], v[0:3], s[0:7], s[8:11] dmask:0x3 tfe d16{{$}}
+; PACKED: image_sample_c_d v[{{[0-9]+:[0-9]+}}], v[0:3], s[0:7], s[8:11] dmask:0x3 tfe d16{{$}}
+define amdgpu_ps <2 x float> @image_sample_c_d_1d_v2f16_tfe(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, float %dsdh, float %dsdv, float %s) {
+main_body:
+  %tex = call {<2 x half>,i32} @llvm.amdgcn.image.sample.c.d.1d.v2f16i32.f32.f32(i32 3, float %zcompare, float %dsdh, float %dsdv, float %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 1, i32 0)
+  %tex.vec = extractvalue {<2 x half>, i32} %tex, 0
+  %tex.err = extractvalue {<2 x half>, i32} %tex, 1
+  %tex.vecf = bitcast <2 x half> %tex.vec to float
+  %r.0 = insertelement <2 x float> undef, float %tex.vecf, i32 0
+  %tex.errf = bitcast i32 %tex.err to float
+  %r = insertelement <2 x float> %r.0, float %tex.errf, i32 1
+  ret <2 x float> %r
+}
+
 ; GCN-LABEL: {{^}}image_sample_b_2d_v4f16:
 ; UNPACKED: image_sample_b v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf d16{{$}}
 ; PACKED: image_sample_b v[0:1], v[0:3], s[0:7], s[8:11] dmask:0xf d16{{$}}
@@ -30,9 +59,33 @@
   ret <2 x float> %r
 }
 
+; GCN-LABEL: {{^}}image_sample_b_2d_v4f16_tfe:
+; GCN: v_mov_b32_e32 v{{[0-9]+}}, 0
+; UNPACKED: image_sample_b v[{{[0-9]+:[0-9]+}}], v[0:3], s[0:7], s[8:11] dmask:0xf tfe d16{{$}}
+; PACKED: image_sample_b v[{{[0-9]+:[0-9]+}}], v[0:3], s[0:7], s[8:11] dmask:0xf tfe d16{{$}}
+define amdgpu_ps <4 x float> @image_sample_b_2d_v4f16_tfe(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %bias, float %s, float %t) {
+main_body:
+  %tex = call {<4 x half>,i32} @llvm.amdgcn.image.sample.b.2d.v4f16i32.f32.f32(i32 15, float %bias, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 1, i32 0)
+  %tex.vec = extractvalue {<4 x half>, i32} %tex, 0
+  %tex.err = extractvalue {<4 x half>, i32} %tex, 1
+  %tex.vecf = bitcast <4 x half> %tex.vec to <2 x float>
+  %tex.vecf.0 = extractelement <2 x float> %tex.vecf, i32 0
+  %tex.vecf.1 = extractelement <2 x float> %tex.vecf, i32 1
+  %r.0 = insertelement <4 x float> undef, float %tex.vecf.0, i32 0
+  %r.1 = insertelement <4 x float> %r.0, float %tex.vecf.1, i32 1
+  %tex.errf = bitcast i32 %tex.err to float
+  %r = insertelement <4 x float> %r.1, float %tex.errf, i32 2
+  ret <4 x float> %r
+}
+
 declare half @llvm.amdgcn.image.sample.2d.f16.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
+declare {half,i32} @llvm.amdgcn.image.sample.2d.f16i32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
+declare <4 x half> @llvm.amdgcn.image.sample.2d.v4f16.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
+declare {<2 x half>,i32} @llvm.amdgcn.image.sample.2d.v2f16i32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
 declare <2 x half> @llvm.amdgcn.image.sample.c.d.1d.v2f16.f32.f32(i32, float, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
+declare {<2 x half>,i32} @llvm.amdgcn.image.sample.c.d.1d.v2f16i32.f32.f32(i32, float, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
 declare <4 x half> @llvm.amdgcn.image.sample.b.2d.v4f16.f32.f32(i32, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
+declare {<4 x half>,i32} @llvm.amdgcn.image.sample.b.2d.v4f16i32.f32.f32(i32, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { nounwind readonly }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.dim.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.dim.ll
index 65f4b46..2ee69ac 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.dim.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.dim.ll
@@ -9,6 +9,162 @@
   ret <4 x float> %v
 }
 
+; GCN-LABEL: {{^}}sample_1d_tfe:
+; GCN: v_mov_b32_e32 v0, 0
+; GCN: v_mov_b32_e32 v1, v0
+; GCN: v_mov_b32_e32 v2, v0
+; GCN: v_mov_b32_e32 v3, v0
+; GCN: v_mov_b32_e32 v4, v0
+; GCN: image_sample v[0:7], v5, s[0:7], s[8:11] dmask:0xf tfe{{$}}
+define amdgpu_ps <4 x float> @sample_1d_tfe(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 addrspace(1)* inreg %out, float %s) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.sample.1d.v4f32i32.f32(i32 15, float %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 1, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
+; GCN-LABEL: {{^}}sample_1d_tfe_adjust_writemask_1:
+; GCN: v_mov_b32_e32 v0, 0
+; GCN: v_mov_b32_e32 v1, v0
+; GCN: image_sample v[0:1], v2, s[0:7], s[8:11] dmask:0x1 tfe{{$}}
+define amdgpu_ps <2 x float> @sample_1d_tfe_adjust_writemask_1(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 addrspace(1)* inreg %out, float %s) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.sample.1d.v4f32i32.f32(i32 15, float %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 1, i32 0)
+  %res.vec = extractvalue {<4 x float>,i32} %v, 0
+  %res.f = extractelement <4 x float> %res.vec, i32 0
+  %res.err = extractvalue {<4 x float>,i32} %v, 1
+  %res.errf = bitcast i32 %res.err to float
+  %res.tmp = insertelement <2 x float> undef, float %res.f, i32 0
+  %res = insertelement <2 x float> %res.tmp, float %res.errf, i32 1
+  ret <2 x float> %res
+}
+
+; GCN-LABEL: {{^}}sample_1d_tfe_adjust_writemask_2:
+; GCN: v_mov_b32_e32 v0, 0
+; GCN: v_mov_b32_e32 v1, v0
+; GCN: image_sample v[0:1], v2, s[0:7], s[8:11] dmask:0x2 tfe{{$}}
+define amdgpu_ps <2 x float> @sample_1d_tfe_adjust_writemask_2(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.sample.1d.v4f32i32.f32(i32 15, float %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 1, i32 0)
+  %res.vec = extractvalue {<4 x float>,i32} %v, 0
+  %res.f = extractelement <4 x float> %res.vec, i32 1
+  %res.err = extractvalue {<4 x float>,i32} %v, 1
+  %res.errf = bitcast i32 %res.err to float
+  %res.tmp = insertelement <2 x float> undef, float %res.f, i32 0
+  %res = insertelement <2 x float> %res.tmp, float %res.errf, i32 1
+  ret <2 x float> %res
+}
+
+; GCN-LABEL: {{^}}sample_1d_tfe_adjust_writemask_3:
+; GCN: v_mov_b32_e32 v0, 0
+; GCN: v_mov_b32_e32 v1, v0
+; GCN: image_sample v[0:1], v2, s[0:7], s[8:11] dmask:0x4 tfe{{$}}
+define amdgpu_ps <2 x float> @sample_1d_tfe_adjust_writemask_3(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.sample.1d.v4f32i32.f32(i32 15, float %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 1, i32 0)
+  %res.vec = extractvalue {<4 x float>,i32} %v, 0
+  %res.f = extractelement <4 x float> %res.vec, i32 2
+  %res.err = extractvalue {<4 x float>,i32} %v, 1
+  %res.errf = bitcast i32 %res.err to float
+  %res.tmp = insertelement <2 x float> undef, float %res.f, i32 0
+  %res = insertelement <2 x float> %res.tmp, float %res.errf, i32 1
+  ret <2 x float> %res
+}
+
+; GCN-LABEL: {{^}}sample_1d_tfe_adjust_writemask_4:
+; GCN: v_mov_b32_e32 v0, 0
+; GCN: v_mov_b32_e32 v1, v0
+; GCN: image_sample v[0:1], v2, s[0:7], s[8:11] dmask:0x8 tfe{{$}}
+define amdgpu_ps <2 x float> @sample_1d_tfe_adjust_writemask_4(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.sample.1d.v4f32i32.f32(i32 15, float %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 1, i32 0)
+  %res.vec = extractvalue {<4 x float>,i32} %v, 0
+  %res.f = extractelement <4 x float> %res.vec, i32 3
+  %res.err = extractvalue {<4 x float>,i32} %v, 1
+  %res.errf = bitcast i32 %res.err to float
+  %res.tmp = insertelement <2 x float> undef, float %res.f, i32 0
+  %res = insertelement <2 x float> %res.tmp, float %res.errf, i32 1
+  ret <2 x float> %res
+}
+
+; GCN-LABEL: {{^}}sample_1d_tfe_adjust_writemask_12:
+; GCN: v_mov_b32_e32 v0, 0
+; GCN: v_mov_b32_e32 v1, v0
+; GCN: v_mov_b32_e32 v2, v0
+; GCN: image_sample v[0:2], v3, s[0:7], s[8:11] dmask:0x3 tfe{{$}}
+define amdgpu_ps <4 x float> @sample_1d_tfe_adjust_writemask_12(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.sample.1d.v4f32i32.f32(i32 15, float %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 1, i32 0)
+  %res.vec = extractvalue {<4 x float>,i32} %v, 0
+  %res.f1 = extractelement <4 x float> %res.vec, i32 0
+  %res.f2 = extractelement <4 x float> %res.vec, i32 1
+  %res.err = extractvalue {<4 x float>,i32} %v, 1
+  %res.errf = bitcast i32 %res.err to float
+  %res.tmp1 = insertelement <4 x float> undef, float %res.f1, i32 0
+  %res.tmp2 = insertelement <4 x float> %res.tmp1, float %res.f2, i32 1
+  %res = insertelement <4 x float> %res.tmp2, float %res.errf, i32 2
+  ret <4 x float> %res
+}
+
+; GCN-LABEL: {{^}}sample_1d_tfe_adjust_writemask_24:
+; GCN: v_mov_b32_e32 v0, 0
+; GCN: v_mov_b32_e32 v1, v0
+; GCN: v_mov_b32_e32 v2, v0
+; GCN: image_sample v[0:2], v3, s[0:7], s[8:11] dmask:0xa tfe{{$}}
+define amdgpu_ps <4 x float> @sample_1d_tfe_adjust_writemask_24(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.sample.1d.v4f32i32.f32(i32 15, float %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 1, i32 0)
+  %res.vec = extractvalue {<4 x float>,i32} %v, 0
+  %res.f1 = extractelement <4 x float> %res.vec, i32 1
+  %res.f2 = extractelement <4 x float> %res.vec, i32 3
+  %res.err = extractvalue {<4 x float>,i32} %v, 1
+  %res.errf = bitcast i32 %res.err to float
+  %res.tmp1 = insertelement <4 x float> undef, float %res.f1, i32 0
+  %res.tmp2 = insertelement <4 x float> %res.tmp1, float %res.f2, i32 1
+  %res = insertelement <4 x float> %res.tmp2, float %res.errf, i32 2
+  ret <4 x float> %res
+}
+
+; GCN-LABEL: {{^}}sample_1d_tfe_adjust_writemask_134:
+; GCN: v_mov_b32_e32 v0, 0
+; GCN: v_mov_b32_e32 v1, v0
+; GCN: v_mov_b32_e32 v2, v0
+; GCN: v_mov_b32_e32 v3, v0
+; GCN: image_sample v[0:3], v4, s[0:7], s[8:11] dmask:0xd tfe{{$}}
+define amdgpu_ps <4 x float> @sample_1d_tfe_adjust_writemask_134(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.sample.1d.v4f32i32.f32(i32 15, float %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 1, i32 0)
+  %res.vec = extractvalue {<4 x float>,i32} %v, 0
+  %res.f1 = extractelement <4 x float> %res.vec, i32 0
+  %res.f2 = extractelement <4 x float> %res.vec, i32 2
+  %res.f3 = extractelement <4 x float> %res.vec, i32 3
+  %res.err = extractvalue {<4 x float>,i32} %v, 1
+  %res.errf = bitcast i32 %res.err to float
+  %res.tmp1 = insertelement <4 x float> undef, float %res.f1, i32 0
+  %res.tmp2 = insertelement <4 x float> %res.tmp1, float %res.f2, i32 1
+  %res.tmp3 = insertelement <4 x float> %res.tmp2, float %res.f3, i32 2
+  %res = insertelement <4 x float> %res.tmp3, float %res.errf, i32 3
+  ret <4 x float> %res
+}
+
+; GCN-LABEL: {{^}}sample_1d_lwe:
+; GCN: v_mov_b32_e32 v0, 0
+; GCN: v_mov_b32_e32 v1, v0
+; GCN: v_mov_b32_e32 v2, v0
+; GCN: v_mov_b32_e32 v3, v0
+; GCN: v_mov_b32_e32 v4, v0
+; GCN: image_sample v[0:7], v5, s[0:7], s[8:11] dmask:0xf lwe{{$}}
+define amdgpu_ps <4 x float> @sample_1d_lwe(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 addrspace(1)* inreg %out, float %s) {
+main_body:
+  %v = call {<4 x float>,i32} @llvm.amdgcn.image.sample.1d.v4f32i32.f32(i32 15, float %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 2, i32 0)
+  %v.vec = extractvalue {<4 x float>, i32} %v, 0
+  %v.err = extractvalue {<4 x float>, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret <4 x float> %v.vec
+}
+
 ; GCN-LABEL: {{^}}sample_2d:
 ; GCN: image_sample v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf{{$}}
 define amdgpu_ps <4 x float> @sample_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t) {
@@ -361,6 +517,17 @@
   ret float %v
 }
 
+; GCN-LABEL: {{^}}sample_c_d_o_2darray_V1_tfe:
+; GCN: image_sample_c_d_o v[9:10], v[0:15], s[0:7], s[8:11] dmask:0x4 tfe da{{$}}
+define amdgpu_ps float @sample_c_d_o_2darray_V1_tfe(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %dsdh, float %dtdh, float %dsdv, float %dtdv, float %s, float %t, float %slice, i32 addrspace(1)* inreg %out) {
+main_body:
+  %v = call {float,i32} @llvm.amdgcn.image.sample.c.d.o.2darray.f32i32.f32.f32(i32 4, i32 %offset, float %zcompare, float %dsdh, float %dtdh, float %dsdv, float %dtdv, float %s, float %t, float %slice, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 1, i32 0)
+  %v.vec = extractvalue {float, i32} %v, 0
+  %v.err = extractvalue {float, i32} %v, 1
+  store i32 %v.err, i32 addrspace(1)* %out, align 4
+  ret float %v.vec
+}
+
 ; GCN-LABEL: {{^}}sample_c_d_o_2darray_V2:
 ; GCN: image_sample_c_d_o v[0:1], v[0:15], s[0:7], s[8:11] dmask:0x6 da{{$}}
 define amdgpu_ps <2 x float> @sample_c_d_o_2darray_V2(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %dsdh, float %dtdh, float %dsdv, float %dtdv, float %s, float %t, float %slice) {
@@ -369,6 +536,22 @@
   ret <2 x float> %v
 }
 
+; GCN-LABEL: {{^}}sample_c_d_o_2darray_V2_tfe:
+; GCN: image_sample_c_d_o v[9:12], v[0:15], s[0:7], s[8:11] dmask:0x6 tfe da{{$}}
+define amdgpu_ps <4 x float> @sample_c_d_o_2darray_V2_tfe(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %dsdh, float %dtdh, float %dsdv, float %dtdv, float %s, float %t, float %slice) {
+main_body:
+  %v = call {<2 x float>, i32} @llvm.amdgcn.image.sample.c.d.o.2darray.v2f32i32.f32.f32(i32 6, i32 %offset, float %zcompare, float %dsdh, float %dtdh, float %dsdv, float %dtdv, float %s, float %t, float %slice, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 1, i32 0)
+  %v.vec = extractvalue {<2 x float>, i32} %v, 0
+  %v.f1 = extractelement <2 x float> %v.vec, i32 0
+  %v.f2 = extractelement <2 x float> %v.vec, i32 1
+  %v.err = extractvalue {<2 x float>, i32} %v, 1
+  %v.errf = bitcast i32 %v.err to float
+  %res.0 = insertelement <4 x float> undef, float %v.f1, i32 0
+  %res.1 = insertelement <4 x float> %res.0, float %v.f2, i32 1
+  %res.2 = insertelement <4 x float> %res.1, float %v.errf, i32 2
+  ret <4 x float> %res.2
+}
+
 ; GCN-LABEL: {{^}}sample_1d_unorm:
 ; GCN: image_sample v[0:3], v0, s[0:7], s[8:11] dmask:0xf unorm{{$}}
 define amdgpu_ps <4 x float> @sample_1d_unorm(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s) {
@@ -491,6 +674,7 @@
 }
 
 declare <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.sample.1d.v4f32i32.f32(i32, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.sample.3d.v4f32.f32(i32, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.sample.cube.v4f32.f32(i32, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
@@ -542,7 +726,9 @@
 declare <4 x float> @llvm.amdgcn.image.sample.c.lz.2d.v4f32.f32(i32, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
 
 declare float @llvm.amdgcn.image.sample.c.d.o.2darray.f32.f32.f32(i32, i32, float, float, float, float, float, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
+declare {float, i32} @llvm.amdgcn.image.sample.c.d.o.2darray.f32i32.f32.f32(i32, i32, float, float, float, float, float, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
 declare <2 x float> @llvm.amdgcn.image.sample.c.d.o.2darray.v2f32.f32.f32(i32, i32, float, float, float, float, float, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
+declare {<2 x float>, i32} @llvm.amdgcn.image.sample.c.d.o.2darray.v2f32i32.f32.f32(i32, i32, float, float, float, float, float, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { nounwind readonly }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.ll
index dcae08d..33b2967 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.ll
@@ -97,12 +97,12 @@
   ret float %val
 }
 
-;CHECK-LABEL: {{^}}buffer_load_x1_offen_merged:
+;CHECK-LABEL: {{^}}buffer_load_x1_offen_merged_and:
 ;CHECK-NEXT: %bb.
 ;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
 ;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28
 ;CHECK: s_waitcnt
-define amdgpu_ps void @buffer_load_x1_offen_merged(<4 x i32> inreg %rsrc, i32 %a) {
+define amdgpu_ps void @buffer_load_x1_offen_merged_and(<4 x i32> inreg %rsrc, i32 %a) {
 main_body:
   %a1 = add i32 %a, 4
   %a2 = add i32 %a, 8
@@ -121,6 +121,32 @@
   ret void
 }
 
+;CHECK-LABEL: {{^}}buffer_load_x1_offen_merged_or:
+;CHECK-NEXT: %bb.
+;CHECK-NEXT: v_lshlrev_b32_e32 v{{[0-9]}}, 6, v0
+;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v{{[0-9]}}, s[0:3], 0 offen offset:4
+;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v{{[0-9]}}, s[0:3], 0 offen offset:28
+;CHECK: s_waitcnt
+define amdgpu_ps void @buffer_load_x1_offen_merged_or(<4 x i32> inreg %rsrc, i32 %inp) {
+main_body:
+  %a = shl i32 %inp, 6
+  %a1 = or i32 %a, 4
+  %a2 = or i32 %a, 8
+  %a3 = or i32 %a, 12
+  %a4 = or i32 %a, 16
+  %a5 = or i32 %a, 28
+  %a6 = or i32 %a, 32
+  %r1 = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %a1, i32 0, i32 0)
+  %r2 = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %a2, i32 0, i32 0)
+  %r3 = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %a3, i32 0, i32 0)
+  %r4 = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %a4, i32 0, i32 0)
+  %r5 = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %a5, i32 0, i32 0)
+  %r6 = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %a6, i32 0, i32 0)
+  call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r1, float %r2, float %r3, float %r4, i1 true, i1 true)
+  call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r5, float %r6, float undef, float undef, i1 true, i1 true)
+  ret void
+}
+
 ;CHECK-LABEL: {{^}}buffer_load_x1_offen_merged_glc_slc:
 ;CHECK-NEXT: %bb.
 ;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4{{$}}
@@ -146,11 +172,11 @@
   ret void
 }
 
-;CHECK-LABEL: {{^}}buffer_load_x2_offen_merged:
+;CHECK-LABEL: {{^}}buffer_load_x2_offen_merged_and:
 ;CHECK-NEXT: %bb.
 ;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
 ;CHECK: s_waitcnt
-define amdgpu_ps void @buffer_load_x2_offen_merged(<4 x i32> inreg %rsrc, i32 %a) {
+define amdgpu_ps void @buffer_load_x2_offen_merged_and(<4 x i32> inreg %rsrc, i32 %a) {
 main_body:
   %a1 = add i32 %a, 4
   %a2 = add i32 %a, 12
@@ -164,6 +190,26 @@
   ret void
 }
 
+;CHECK-LABEL: {{^}}buffer_load_x2_offen_merged_or:
+;CHECK-NEXT: %bb.
+;CHECK-NEXT: v_lshlrev_b32_e32 v{{[0-9]}}, 4, v0
+;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v{{[0-9]}}, s[0:3], 0 offen offset:4
+;CHECK: s_waitcnt
+define amdgpu_ps void @buffer_load_x2_offen_merged_or(<4 x i32> inreg %rsrc, i32 %inp) {
+main_body:
+  %a = shl i32 %inp, 4
+  %a1 = add i32 %a, 4
+  %a2 = add i32 %a, 12
+  %vr1 = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %a1, i32 0, i32 0)
+  %vr2 = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %a2, i32 0, i32 0)
+  %r1 = extractelement <2 x float> %vr1, i32 0
+  %r2 = extractelement <2 x float> %vr1, i32 1
+  %r3 = extractelement <2 x float> %vr2, i32 0
+  %r4 = extractelement <2 x float> %vr2, i32 1
+  call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r1, float %r2, float %r3, float %r4, i1 true, i1 true)
+  ret void
+}
+
 ;CHECK-LABEL: {{^}}buffer_load_x1_offset_merged:
 ;CHECK-NEXT: %bb.
 ;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:4
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll
index afb2ef8..4f39867 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll
@@ -67,11 +67,11 @@
   ret void
 }
 
-;CHECK-LABEL: {{^}}buffer_store_x1_offen_merged:
+;CHECK-LABEL: {{^}}buffer_store_x1_offen_merged_and:
 ;CHECK-NOT: s_waitcnt
 ;CHECK-DAG: buffer_store_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
 ;CHECK-DAG: buffer_store_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28
-define amdgpu_ps void @buffer_store_x1_offen_merged(<4 x i32> inreg %rsrc, i32 %a, float %v1, float %v2, float %v3, float %v4, float %v5, float %v6) {
+define amdgpu_ps void @buffer_store_x1_offen_merged_and(<4 x i32> inreg %rsrc, i32 %a, float %v1, float %v2, float %v3, float %v4, float %v5, float %v6) {
   %a1 = add i32 %a, 4
   %a2 = add i32 %a, 8
   %a3 = add i32 %a, 12
@@ -87,6 +87,28 @@
   ret void
 }
 
+;CHECK-LABEL: {{^}}buffer_store_x1_offen_merged_or:
+;CHECK-NOT: s_waitcnt
+;CHECK-DAG: buffer_store_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v{{[0-9]}}, s[0:3], 0 offen offset:4
+;CHECK-DAG: buffer_store_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v{{[0-9]}}, s[0:3], 0 offen offset:28
+define amdgpu_ps void @buffer_store_x1_offen_merged_or(<4 x i32> inreg %rsrc, i32 %inp, float %v1, float %v2, float %v3, float %v4, float %v5, float %v6) {
+  %a = shl i32 %inp, 6
+  %a1 = add i32 %a, 4
+  %a2 = add i32 %a, 8
+  %a3 = add i32 %a, 12
+  %a4 = add i32 %a, 16
+  %a5 = add i32 %a, 28
+  %a6 = add i32 %a, 32
+  call void @llvm.amdgcn.raw.buffer.store.f32(float %v1, <4 x i32> %rsrc, i32 %a1, i32 0, i32 0)
+  call void @llvm.amdgcn.raw.buffer.store.f32(float %v2, <4 x i32> %rsrc, i32 %a2, i32 0, i32 0)
+  call void @llvm.amdgcn.raw.buffer.store.f32(float %v3, <4 x i32> %rsrc, i32 %a3, i32 0, i32 0)
+  call void @llvm.amdgcn.raw.buffer.store.f32(float %v4, <4 x i32> %rsrc, i32 %a4, i32 0, i32 0)
+  call void @llvm.amdgcn.raw.buffer.store.f32(float %v5, <4 x i32> %rsrc, i32 %a5, i32 0, i32 0)
+  call void @llvm.amdgcn.raw.buffer.store.f32(float %v6, <4 x i32> %rsrc, i32 %a6, i32 0, i32 0)
+  ret void
+}
+
+
 ;CHECK-LABEL: {{^}}buffer_store_x1_offen_merged_glc_slc:
 ;CHECK-NOT: s_waitcnt
 ;CHECK-DAG: buffer_store_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4{{$}}
@@ -108,10 +130,22 @@
   ret void
 }
 
-;CHECK-LABEL: {{^}}buffer_store_x2_offen_merged:
+;CHECK-LABEL: {{^}}buffer_store_x2_offen_merged_and:
 ;CHECK-NOT: s_waitcnt
 ;CHECK: buffer_store_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
-define amdgpu_ps void @buffer_store_x2_offen_merged(<4 x i32> inreg %rsrc, i32 %a, <2 x float> %v1, <2 x float> %v2) {
+define amdgpu_ps void @buffer_store_x2_offen_merged_and(<4 x i32> inreg %rsrc, i32 %a, <2 x float> %v1, <2 x float> %v2) {
+  %a1 = add i32 %a, 4
+  %a2 = add i32 %a, 12
+  call void @llvm.amdgcn.raw.buffer.store.v2f32(<2 x float> %v1, <4 x i32> %rsrc, i32 %a1, i32 0, i32 0)
+  call void @llvm.amdgcn.raw.buffer.store.v2f32(<2 x float> %v2, <4 x i32> %rsrc, i32 %a2, i32 0, i32 0)
+  ret void
+}
+
+;CHECK-LABEL: {{^}}buffer_store_x2_offen_merged_or:
+;CHECK-NOT: s_waitcnt
+;CHECK: buffer_store_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v{{[0-9]}}, s[0:3], 0 offen offset:4
+define amdgpu_ps void @buffer_store_x2_offen_merged_or(<4 x i32> inreg %rsrc, i32 %inp, <2 x float> %v1, <2 x float> %v2) {
+  %a = shl i32 %inp, 4
   %a1 = add i32 %a, 4
   %a2 = add i32 %a, 12
   call void @llvm.amdgcn.raw.buffer.store.v2f32(<2 x float> %v1, <4 x i32> %rsrc, i32 %a1, i32 0, i32 0)
diff --git a/test/CodeGen/AMDGPU/lshl64-to-32.ll b/test/CodeGen/AMDGPU/lshl64-to-32.ll
index b7e5eab..7119795 100644
--- a/test/CodeGen/AMDGPU/lshl64-to-32.ll
+++ b/test/CodeGen/AMDGPU/lshl64-to-32.ll
@@ -1,9 +1,19 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
 
-; GCN-LABEL: {{^}}zext_shl64_to_32:
-; GCN: s_lshl_b32
-; GCN-NOT: s_lshl_b64
 define amdgpu_kernel void @zext_shl64_to_32(i64 addrspace(1)* nocapture %out, i32 %x) {
+; GCN-LABEL: zext_shl64_to_32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dword s4, s[0:1], 0xb
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshl_b32 s4, s4, 2
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
   %and = and i32 %x, 1073741823
   %ext = zext i32 %and to i64
   %shl = shl i64 %ext, 2
@@ -11,10 +21,20 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}sext_shl64_to_32:
-; GCN: s_lshl_b32
-; GCN-NOT: s_lshl_b64
 define amdgpu_kernel void @sext_shl64_to_32(i64 addrspace(1)* nocapture %out, i32 %x) {
+; GCN-LABEL: sext_shl64_to_32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dword s4, s[0:1], 0xb
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_and_b32 s4, s4, 0x1fffffff
+; GCN-NEXT:    s_lshl_b32 s4, s4, 2
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
   %and = and i32 %x, 536870911
   %ext = sext i32 %and to i64
   %shl = shl i64 %ext, 2
@@ -22,10 +42,21 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}zext_shl64_overflow:
-; GCN: s_lshl_b64
-; GCN-NOT: s_lshl_b32
 define amdgpu_kernel void @zext_shl64_overflow(i64 addrspace(1)* nocapture %out, i32 %x) {
+; GCN-LABEL: zext_shl64_overflow:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dword s2, s[0:1], 0xb
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s5, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_and_b32 s4, s2, 0x7fffffff
+; GCN-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
   %and = and i32 %x, 2147483647
   %ext = zext i32 %and to i64
   %shl = shl i64 %ext, 2
@@ -33,10 +64,21 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}sext_shl64_overflow:
-; GCN: s_lshl_b64
-; GCN-NOT: s_lshl_b32
 define amdgpu_kernel void @sext_shl64_overflow(i64 addrspace(1)* nocapture %out, i32 %x) {
+; GCN-LABEL: sext_shl64_overflow:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dword s2, s[0:1], 0xb
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s5, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_and_b32 s4, s2, 0x7fffffff
+; GCN-NEXT:    s_lshl_b64 s[4:5], s[4:5], 2
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
   %and = and i32 %x, 2147483647
   %ext = sext i32 %and to i64
   %shl = shl i64 %ext, 2
@@ -44,10 +86,19 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}mulu24_shl64:
-; GCN: v_mul_u32_u24_e32 [[M:v[0-9]+]], 7, v{{[0-9]+}}
-; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, 2, [[M]]
 define amdgpu_kernel void @mulu24_shl64(i32 addrspace(1)* nocapture %arg) {
+; GCN-LABEL: mulu24_shl64:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GCN-NEXT:    v_and_b32_e32 v0, 6, v0
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    v_mul_u32_u24_e32 v0, 7, v0
+; GCN-NEXT:    s_mov_b32 s2, 0
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    buffer_store_dword v1, v[0:1], s[0:3], 0 addr64
+; GCN-NEXT:    s_endpgm
 bb:
   %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
   %tmp1 = and i32 %tmp, 6
@@ -58,10 +109,26 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}muli24_shl64:
-; GCN: v_mul_i32_i24_e32 [[M:v[0-9]+]], -7, v{{[0-9]+}}
-; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, 3, [[M]]
 define amdgpu_kernel void @muli24_shl64(i64 addrspace(1)* nocapture %arg, i32 addrspace(1)* nocapture readonly %arg1) {
+; GCN-LABEL: muli24_shl64:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, 0
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; GCN-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT:    buffer_load_dword v1, v[1:2], s[4:7], 0 addr64
+; GCN-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v1, 0x800000, v1
+; GCN-NEXT:    v_mul_i32_i24_e32 v1, 0xfffff9, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 3, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, 3, v0
+; GCN-NEXT:    v_mov_b32_e32 v4, v2
+; GCN-NEXT:    buffer_store_dwordx2 v[1:2], v[3:4], s[0:3], 0 addr64
+; GCN-NEXT:    s_endpgm
 bb:
   %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
   %tmp2 = sext i32 %tmp to i64
diff --git a/test/CodeGen/AMDGPU/merge-stores.ll b/test/CodeGen/AMDGPU/merge-stores.ll
index 7d0c0db..7fb0e35 100644
--- a/test/CodeGen/AMDGPU/merge-stores.ll
+++ b/test/CodeGen/AMDGPU/merge-stores.ll
@@ -1,5 +1,5 @@
 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=GCN-AA %s
 
 ; This test is mostly to test DAG store merging, so disable the vectorizer.
 ; Run with devices with different unaligned load restrictions.
@@ -65,8 +65,8 @@
 }
 
 ; GCN-LABEL: {{^}}merge_global_store_2_constants_i32:
-; SI-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0x1c8
-; SI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0x7b
+; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0x1c8
+; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0x7b
 ; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
 define amdgpu_kernel void @merge_global_store_2_constants_i32(i32 addrspace(1)* %out) #0 {
   %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
@@ -87,8 +87,8 @@
 }
 
 ; GCN-LABEL: {{^}}merge_global_store_2_constants_f32_i32:
-; SI-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], 4.0
-; SI-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], 0x7b
+; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], 4.0
+; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], 0x7b
 ; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
 define amdgpu_kernel void @merge_global_store_2_constants_f32_i32(float addrspace(1)* %out) #0 {
   %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
@@ -164,9 +164,10 @@
 }
 
 ; GCN-LABEL: {{^}}merge_global_store_3_constants_i32:
-; SI-DAG: buffer_store_dwordx3
-; SI-NOT: buffer_store_dwordx2
-; SI-NOT: buffer_store_dword
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dword v
+; CI-DAG: buffer_store_dwordx3
+; GCN-NOT: buffer_store_dword
 ; GCN: s_endpgm
 define amdgpu_kernel void @merge_global_store_3_constants_i32(i32 addrspace(1)* %out) #0 {
   %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
@@ -274,9 +275,13 @@
 }
 
 ; GCN-LABEL: {{^}}merge_global_store_3_adjacent_loads_i32:
-; SI-DAG: buffer_load_dwordx3
+; SI-DAG: buffer_load_dwordx2
+; SI-DAG: buffer_load_dword v
+; CI-DAG: buffer_load_dwordx3
 ; GCN: s_waitcnt
-; SI-DAG: buffer_store_dwordx3 v
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dword v
+; CI-DAG: buffer_store_dwordx3
 ; GCN: s_endpgm
 define amdgpu_kernel void @merge_global_store_3_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
   %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
@@ -561,7 +566,9 @@
 
 ; GCN-LABEL: {{^}}merge_global_store_7_constants_i32:
 ; GCN: buffer_store_dwordx4
-; GCN: buffer_store_dwordx3
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dword v
+; CI: buffer_store_dwordx3
 define amdgpu_kernel void @merge_global_store_7_constants_i32(i32 addrspace(1)* %out) {
   store i32 34, i32 addrspace(1)* %out, align 4
   %idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
@@ -608,11 +615,15 @@
 
 ; GCN-LABEL: {{^}}copy_v3i32_align4:
 ; GCN-NOT: SCRATCH_RSRC_DWORD
-; GCN-DAG: buffer_load_dwordx3 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; SI-DAG: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8
+; SI-DAG: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; CI-DAG: buffer_load_dwordx3 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
 ; GCN-NOT: offen
 ; GCN: s_waitcnt vmcnt
 ; GCN-NOT: offen
-; GCN-DAG: buffer_store_dwordx3 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; SI-DAG: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; SI-DAG: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8
+; CI-DAG: buffer_store_dwordx3 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
 
 ; GCN: ScratchSize: 0{{$}}
 define amdgpu_kernel void @copy_v3i32_align4(<3 x i32> addrspace(1)* noalias %out, <3 x i32> addrspace(1)* noalias %in) #0 {
@@ -639,11 +650,15 @@
 
 ; GCN-LABEL: {{^}}copy_v3f32_align4:
 ; GCN-NOT: SCRATCH_RSRC_DWORD
-; GCN-DAG: buffer_load_dwordx3 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; SI-DAG: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8
+; SI-DAG: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; CI-DAG: buffer_load_dwordx3 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
 ; GCN-NOT: offen
 ; GCN: s_waitcnt vmcnt
 ; GCN-NOT: offen
-; GCN-DAG: buffer_store_dwordx3 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; SI-DAG: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; SI-DAG: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8
+; CI-DAG: buffer_store_dwordx3 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
 ; GCN: ScratchSize: 0{{$}}
 define amdgpu_kernel void @copy_v3f32_align4(<3 x float> addrspace(1)* noalias %out, <3 x float> addrspace(1)* noalias %in) #0 {
   %vec = load <3 x float>, <3 x float> addrspace(1)* %in, align 4
diff --git a/test/CodeGen/AMDGPU/promote-constOffset-to-imm.mir b/test/CodeGen/AMDGPU/promote-constOffset-to-imm.mir
index 55ef994..b5e4032 100644
--- a/test/CodeGen/AMDGPU/promote-constOffset-to-imm.mir
+++ b/test/CodeGen/AMDGPU/promote-constOffset-to-imm.mir
@@ -152,3 +152,39 @@
     %44:vreg_64 = REG_SEQUENCE %40, %subreg.sub0, %42, %subreg.sub1
     %45:vreg_64 = GLOBAL_LOAD_DWORDX2 %44, 0, 0, 0, implicit $exec
 ...
+---
+
+# Tests for a successful compilation.
+name: assert_hit
+body:             |
+    bb.0.entry:
+    %0:sgpr_64 = COPY $sgpr0_sgpr1
+    %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0
+    %3:sreg_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
+    %4:sreg_32_xm0 = COPY $sgpr101
+    %5:sreg_32_xm0 = S_MOV_B32 0
+    $sgpr0_sgpr1_sgpr2_sgpr3 = COPY %3
+    $sgpr4 = COPY %4
+    $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+    %6:vreg_64 = COPY $vgpr0_vgpr1
+    %7:vgpr_32 = V_AND_B32_e32 255, %6.sub0, implicit $exec
+    %8:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    %9:vreg_64 = REG_SEQUENCE killed %7, %subreg.sub0, %8, %subreg.sub1
+    %10:vgpr_32 = V_LSHLREV_B32_e64 7, %6.sub0, implicit $exec
+    %11:vgpr_32 = V_AND_B32_e32 -32768, killed %10, implicit $exec
+    %12:sgpr_32 = COPY %1.sub1
+    %13:vgpr_32 = COPY %5
+    %14:vgpr_32, %15:sreg_64_xexec = V_ADD_I32_e64 %1.sub0, %11, implicit $exec
+    %16:vgpr_32 = COPY %12
+    %17:vgpr_32, dead %18:sreg_64_xexec = V_ADDC_U32_e64 %16, %13, killed %15, implicit $exec
+    %19:vreg_64 = REG_SEQUENCE %14, %subreg.sub0, %17, %subreg.sub1
+    %20:vreg_64 = V_LSHLREV_B64 3, %9, implicit $exec
+    %21:vgpr_32, %22:sreg_64_xexec = V_ADD_I32_e64 %14, %20.sub0, implicit $exec
+    %23:vgpr_32, dead %24:sreg_64_xexec = V_ADDC_U32_e64 %17, %20.sub1, killed %22, implicit $exec
+
+    %25:sgpr_32 = S_MOV_B32 6144
+    %26:vgpr_32, %27:sreg_64_xexec = V_ADD_I32_e64 %21, %25, implicit $exec
+    %28:vgpr_32, dead %29:sreg_64_xexec = V_ADDC_U32_e64 %23, 4294967295, killed %27, implicit $exec
+    %30:vreg_64 = REG_SEQUENCE %26, %subreg.sub0, %28, %subreg.sub1
+    %31:vreg_64 = GLOBAL_LOAD_DWORDX2 %30, 0, 0, 0, implicit $exec
+...
diff --git a/test/CodeGen/AMDGPU/regcoalesce-cannot-join-failures.mir b/test/CodeGen/AMDGPU/regcoalesce-cannot-join-failures.mir
new file mode 100644
index 0000000..846607b
--- /dev/null
+++ b/test/CodeGen/AMDGPU/regcoalesce-cannot-join-failures.mir
@@ -0,0 +1,118 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-coalescing -run-pass=simple-register-coalescing -verify-machineinstrs -o - %s | FileCheck %s
+
+---
+name: couldnt_join_subrange_implicit_def_pred_block
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: name: couldnt_join_subrange_implicit_def_pred_block
+  ; CHECK: bb.0:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   undef %0.sub0:sreg_64_xexec = IMPLICIT_DEF
+  ; CHECK: bb.1:
+  ; CHECK:   successors: %bb.2(0x80000000)
+  ; CHECK:   %0.sub1:sreg_64_xexec = COPY %0.sub0
+  ; CHECK:   S_BRANCH %bb.2
+  ; CHECK: bb.2:
+  ; CHECK:   S_ENDPGM implicit %0
+  bb.0:
+    successors: %bb.1
+
+    undef %0.sub0:sreg_64_xexec = IMPLICIT_DEF
+
+  bb.1:
+    successors: %bb.2
+
+    %1:sreg_64 = COPY %0:sreg_64_xexec
+    %0.sub1:sreg_64_xexec = COPY %0.sub0:sreg_64_xexec
+    S_BRANCH %bb.2
+
+  bb.2:
+    dead %2:sreg_32_xm0 = COPY %0.sub0:sreg_64_xexec
+    S_ENDPGM implicit killed %1
+
+...
+---
+name: couldnt_join_subrange_no_implicit_def_inst
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; CHECK-LABEL: name: couldnt_join_subrange_no_implicit_def_inst
+    ; CHECK: undef %0.sub0:sreg_64 = S_MOV_B32 0
+    ; CHECK: %0.sub1:sreg_64 = COPY %0.sub0
+    ; CHECK: S_ENDPGM implicit %0.sub1
+    undef %0.sub0:sreg_64 = S_MOV_B32 0
+    %1:sreg_64 = COPY %0:sreg_64
+    %0.sub1:sreg_64 = COPY %0.sub0:sreg_64
+    S_ENDPGM implicit %1.sub1:sreg_64
+
+...
+---
+name: couldnt_join_subrange0
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: name: couldnt_join_subrange0
+  ; CHECK: bb.0:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   undef %0.sub1:sreg_64 = S_MOV_B32 -1
+  ; CHECK: bb.1:
+  ; CHECK:   %0.sub0:sreg_64 = S_MOV_B32 0
+  ; CHECK:   [[COPY:%[0-9]+]]:sreg_64 = COPY %0
+  ; CHECK:   dead %0.sub1:sreg_64 = COPY %0.sub0
+  ; CHECK:   S_ENDPGM implicit [[COPY]].sub1
+  bb.0:
+    successors: %bb.1
+    undef %0.sub1:sreg_64 = S_MOV_B32 -1
+
+  bb.1:
+    %0.sub0:sreg_64 = S_MOV_B32 0
+    %1:sreg_64 = COPY %0:sreg_64
+    dead %0.sub1:sreg_64 = COPY %0.sub0:sreg_64
+    S_ENDPGM implicit %1.sub1:sreg_64
+
+...
+---
+name: lanes_not_tracked_subreg_join_couldnt_join_subrange
+tracksRegLiveness: true
+body:             |
+  bb.0:
+
+    ; CHECK-LABEL: name: lanes_not_tracked_subreg_join_couldnt_join_subrange
+    ; CHECK: undef %0.sub0:sreg_64_xexec = S_MOV_B32 0
+    ; CHECK: %0.sub1:sreg_64_xexec = S_MOV_B32 0
+    ; CHECK: S_NOP 0, implicit %0.sub1
+    ; CHECK: S_NOP 0, implicit %0
+    ; CHECK: S_ENDPGM
+    undef %0.sub0:sreg_64_xexec = S_MOV_B32 0
+    %1:sreg_64 = COPY %0
+    %0.sub1:sreg_64_xexec = S_MOV_B32 0
+    S_NOP 0, implicit %0.sub1
+    S_NOP 0, implicit %1
+    S_ENDPGM
+
+...
+---
+name: couldnt_join_subrange1
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: name: couldnt_join_subrange1
+  ; CHECK: bb.0:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   undef %0.sub0:sreg_64_xexec = S_MOV_B32 0
+  ; CHECK:   %0.sub1:sreg_64_xexec = COPY %0.sub0
+  ; CHECK: bb.1:
+  ; CHECK:   S_NOP 0, implicit %0.sub1
+  ; CHECK:   S_ENDPGM implicit %0
+  bb.0:
+    successors: %bb.1
+
+    undef %0.sub0:sreg_64_xexec = S_MOV_B32 0
+    %1:sreg_64 = COPY %0
+    %0.sub1:sreg_64_xexec = COPY %0.sub0
+
+  bb.1:
+
+    S_NOP 0, implicit %0.sub1
+    S_ENDPGM implicit %1
+
+...
diff --git a/test/CodeGen/AMDGPU/regcoalesce-keep-valid-lanes-implicit-def-bug39602.mir b/test/CodeGen/AMDGPU/regcoalesce-keep-valid-lanes-implicit-def-bug39602.mir
new file mode 100644
index 0000000..f7a9915
--- /dev/null
+++ b/test/CodeGen/AMDGPU/regcoalesce-keep-valid-lanes-implicit-def-bug39602.mir
@@ -0,0 +1,57 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-coalescing -run-pass=simple-register-coalescing -verify-machineinstrs -o - %s | FileCheck %s
+
+# Bug 39602: Avoid "Couldn't join subrange" error when clearing valid
+# lanes on an implicit_def that later cannot be erased.
+
+---
+name: lost_valid_lanes_maybe_erasable_implicit_def
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: name: lost_valid_lanes_maybe_erasable_implicit_def
+  ; CHECK: bb.0:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   undef %0.sub1:sreg_64 = IMPLICIT_DEF
+  ; CHECK: bb.1:
+  ; CHECK:   %0.sub0:sreg_64 = S_MOV_B32 0
+  ; CHECK:   [[COPY:%[0-9]+]]:sreg_64 = COPY %0
+  ; CHECK:   dead %0.sub1:sreg_64 = COPY %0.sub0
+  ; CHECK:   S_ENDPGM implicit [[COPY]].sub1
+  bb.0:
+    successors: %bb.1
+    undef %0.sub1:sreg_64 = IMPLICIT_DEF
+
+  bb.1:
+    %0.sub0:sreg_64 = S_MOV_B32 0
+    %1:sreg_64 = COPY %0:sreg_64
+    dead %0.sub1:sreg_64 = COPY %0.sub0:sreg_64
+    S_ENDPGM implicit %1.sub1:sreg_64
+
+...
+---
+# Same as previous, except with a real value instead of
+# IMPLICIT_DEF. These should both be handled the same way.
+
+name:  lost_valid_lanes_real_value
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: name: lost_valid_lanes_real_value
+  ; CHECK: bb.0:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   undef %0.sub1:sreg_64 = S_MOV_B32 -1
+  ; CHECK: bb.1:
+  ; CHECK:   %0.sub0:sreg_64 = S_MOV_B32 0
+  ; CHECK:   [[COPY:%[0-9]+]]:sreg_64 = COPY %0
+  ; CHECK:   dead %0.sub1:sreg_64 = COPY %0.sub0
+  ; CHECK:   S_ENDPGM implicit [[COPY]].sub1
+  bb.0:
+    successors: %bb.1
+    undef %0.sub1:sreg_64 = S_MOV_B32 -1
+
+  bb.1:
+    %0.sub0:sreg_64 = S_MOV_B32 0
+    %1:sreg_64 = COPY %0:sreg_64
+    dead %0.sub1:sreg_64 = COPY %0.sub0:sreg_64
+    S_ENDPGM implicit %1.sub1:sreg_64
+
+...
diff --git a/test/CodeGen/AMDGPU/scratch-simple.ll b/test/CodeGen/AMDGPU/scratch-simple.ll
index b2781a7..0ddd7b9 100644
--- a/test/CodeGen/AMDGPU/scratch-simple.ll
+++ b/test/CodeGen/AMDGPU/scratch-simple.ll
@@ -1,6 +1,10 @@
 ; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=verde -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=SI %s
 ; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx803 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=SI %s
 ; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX9 %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx900 -filetype=obj < %s | llvm-readobj -relocations | FileCheck --check-prefix=RELS %s
+
+; RELS: R_AMDGPU_ABS32_LO SCRATCH_RSRC_DWORD0 0x0
+; RELS: R_AMDGPU_ABS32_LO SCRATCH_RSRC_DWORD1 0x0
 
 ; This used to fail due to a v_add_i32 instruction with an illegal immediate
 ; operand that was created during Local Stack Slot Allocation. Test case derived
diff --git a/test/CodeGen/AMDGPU/store-global.ll b/test/CodeGen/AMDGPU/store-global.ll
index 50f6b81..c0b9f6c 100644
--- a/test/CodeGen/AMDGPU/store-global.ll
+++ b/test/CodeGen/AMDGPU/store-global.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SIVI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SIVI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SIVI -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SIVI -check-prefix=VI -check-prefix=FUNC %s
 ; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=FUNC %s
 ; RUN: llc -march=r600 -mtriple=r600-- -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
 ; RUN: llc -march=r600 -mtriple=r600-- -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=CM -check-prefix=FUNC %s
@@ -273,7 +273,10 @@
 }
 
 ; FUNC-LABEL: {{^}}store_v3i32:
-; SIVI-DAG: buffer_store_dwordx3
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dword v
+
+; VI-DAG: buffer_store_dwordx3
 
 ; GFX9-DAG: global_store_dwordx2
 ; GFX9-DAG: global_store_dword v
diff --git a/test/CodeGen/AMDGPU/store-v3i64.ll b/test/CodeGen/AMDGPU/store-v3i64.ll
index 534347f..7af1736 100644
--- a/test/CodeGen/AMDGPU/store-v3i64.ll
+++ b/test/CodeGen/AMDGPU/store-v3i64.ll
@@ -89,7 +89,9 @@
 }
 
 ; GCN-LABEL: {{^}}global_truncstore_v3i64_to_v3i32:
-; GCN-DAG: buffer_store_dwordx3
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dword v
+; VI-DAG: buffer_store_dwordx3
 define amdgpu_kernel void @global_truncstore_v3i64_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i64> %x) {
   %trunc = trunc <3 x i64> %x to <3 x i32>
   store <3 x i32> %trunc, <3 x i32> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/trunc-combine.ll b/test/CodeGen/AMDGPU/trunc-combine.ll
index 2325a3d..53ae976 100644
--- a/test/CodeGen/AMDGPU/trunc-combine.ll
+++ b/test/CodeGen/AMDGPU/trunc-combine.ll
@@ -25,7 +25,7 @@
 ; GCN: _load_dword
 ; GCN-NOT: _load_dword
 ; GCN-NOT: v_mov_b32
-; GCN: v_add_u32_e32 v0, vcc, 4, v0
+; GCN: v_add_u16_e32 v0, 4, v0
 define i16 @trunc_bitcast_v2i32_to_i16(<2 x i32> %bar) {
   %load0 = load i32, i32 addrspace(1)* undef
   %load1 = load i32, i32 addrspace(1)* null
@@ -42,7 +42,7 @@
 ; GCN: _load_dword
 ; GCN-NOT: _load_dword
 ; GCN-NOT: v_mov_b32
-; GCN: v_add_u32_e32 v0, vcc, 4, v0
+; GCN: v_add_u16_e32 v0, 4, v0
 define i16 @trunc_bitcast_v2f32_to_i16(<2 x float> %bar) {
   %load0 = load float, float addrspace(1)* undef
   %load1 = load float, float addrspace(1)* null
diff --git a/test/CodeGen/AMDGPU/waitcnt-loop-irreducible.mir b/test/CodeGen/AMDGPU/waitcnt-loop-irreducible.mir
index b0ca67f..7afb2ba 100644
--- a/test/CodeGen/AMDGPU/waitcnt-loop-irreducible.mir
+++ b/test/CodeGen/AMDGPU/waitcnt-loop-irreducible.mir
@@ -1,20 +1,26 @@
-# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-insert-waitcnts -o - %s | FileCheck -check-prefixes=GCN %s
+# RUN: llc -march=amdgcn -mcpu=gfx803 -verify-machineinstrs -run-pass si-insert-waitcnts -o - %s | FileCheck -check-prefixes=GCN,GFX8 %s
+# RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass si-insert-waitcnts -o - %s | FileCheck -check-prefixes=GCN,GFX9 %s
 
-# GCN-LABEL: name: irreducible_loop{{$}}
-# GCN: S_LOAD_DWORDX4_IMM
-# GCN: S_WAITCNT 127{{$}}
-# GCN: S_BUFFER_LOAD_DWORD_IMM
-# GCN: S_WAITCNT 127{{$}}
-# GCN: S_CMP_GE_I32
 --- |
 
   define amdgpu_ps void @irreducible_loop() {
-  main:
+    ret void
+  }
+  define amdgpu_ps void @irreducible_loop_extended() {
     ret void
   }
 
 ...
 ---
+
+# GCN-LABEL: name: irreducible_loop{{$}}
+# GCN: S_LOAD_DWORDX4_IMM
+# GFX8: S_WAITCNT 127{{$}}
+# GFX9: S_WAITCNT 49279{{$}}
+# GCN: S_BUFFER_LOAD_DWORD_IMM
+# GFX8: S_WAITCNT 127{{$}}
+# GFX9: S_WAITCNT 49279{{$}}
+# GCN: S_CMP_GE_I32
 name:            irreducible_loop
 body:             |
   bb.0:
@@ -45,3 +51,53 @@
     S_ENDPGM
 
 ...
+
+# GCN-LABEL: name: irreducible_loop_extended
+
+# GCN: S_LOAD_DWORDX4_IMM
+# GFX8: S_WAITCNT 127{{$}}
+# GFX9: S_WAITCNT 49279{{$}}
+# GCN: BUFFER_STORE_DWORD_OFFEN_exact
+# GFX8: S_WAITCNT 127{{$}}
+# GFX9: S_WAITCNT 49279{{$}}
+# GCN: BUFFER_STORE_DWORD_OFFEN_exact
+# GCN: S_LOAD_DWORDX4_IMM
+# GFX8: S_WAITCNT 127{{$}}
+# GFX9: S_WAITCNT 49279{{$}}
+# GCN: BUFFER_ATOMIC_ADD_OFFSET_RTN
+# GCN: S_WAITCNT 3952
+# GCN: FLAT_STORE_DWORD
+# GCN: S_ENDPGM
+name: irreducible_loop_extended
+
+body: |
+  bb.0:
+    successors: %bb.1, %bb.2
+    $sgpr4_sgpr5_sgpr6_sgpr7 = S_LOAD_DWORDX4_IMM renamable $sgpr2_sgpr3, 0, 0
+    S_CBRANCH_VCCZ %bb.2, implicit $vcc
+
+  bb.1:
+    successors: %bb.2
+    BUFFER_STORE_DWORD_OFFEN_exact killed renamable $vgpr3, renamable $vgpr2, renamable $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec
+
+  bb.2:
+    successors: %bb.3, %bb.6
+    S_CBRANCH_VCCNZ %bb.6, implicit $vcc
+
+  bb.3:
+    successors: %bb.4, %bb.5
+    BUFFER_STORE_DWORD_OFFEN_exact killed renamable $vgpr3, killed renamable $vgpr2, killed renamable $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec
+    S_CBRANCH_VCCNZ %bb.5, implicit $vcc
+
+  bb.4:
+    successors: %bb.5
+    renamable $sgpr12_sgpr13_sgpr14_sgpr15 = S_LOAD_DWORDX4_IMM killed renamable $sgpr2_sgpr3, 64, 0
+    renamable $vgpr2 = BUFFER_ATOMIC_ADD_OFFSET_RTN killed renamable $vgpr2, killed renamable $sgpr12_sgpr13_sgpr14_sgpr15, 0, 0, 0, implicit $exec
+
+  bb.5:
+    successors: %bb.6
+
+  bb.6:
+    FLAT_STORE_DWORD $vgpr3_vgpr4, $vgpr2, 0, 0, 0, implicit $exec, implicit $flat_scr
+    S_ENDPGM
+...
diff --git a/test/CodeGen/AMDGPU/widen-smrd-loads.ll b/test/CodeGen/AMDGPU/widen-smrd-loads.ll
index 9a2e428..c950e2d 100644
--- a/test/CodeGen/AMDGPU/widen-smrd-loads.ll
+++ b/test/CodeGen/AMDGPU/widen-smrd-loads.ll
@@ -1,11 +1,38 @@
-; RUN: llc -amdgpu-codegenprepare-widen-constant-loads=0 -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
-; RUN: llc -amdgpu-codegenprepare-widen-constant-loads=0 -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -amdgpu-codegenprepare-widen-constant-loads=0 -mtriple=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
+; RUN: llc -amdgpu-codegenprepare-widen-constant-loads=0 -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s
 
-; GCN-LABEL: {{^}}widen_i16_constant_load:
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-; GCN: s_addk_i32 [[VAL]], 0x3e7
-; GCN: s_or_b32 [[OR:s[0-9]+]], [[VAL]], 4
 define amdgpu_kernel void @widen_i16_constant_load(i16 addrspace(4)* %arg) {
+; SI-LABEL: widen_i16_constant_load:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s5, 0
+; SI-NEXT:    s_mov_b32 s4, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_addk_i32 s0, 0x3e7
+; SI-NEXT:    s_or_b32 s0, s0, 4
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: widen_i16_constant_load:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s0, s0, 0xffff
+; VI-NEXT:    s_addk_i32 s0, 0x3e7
+; VI-NEXT:    s_or_b32 s0, s0, 4
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
   %load = load i16, i16 addrspace(4)* %arg, align 4
   %add = add i16 %load, 999
   %or = or i16 %add, 4
@@ -13,12 +40,38 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}widen_i16_constant_load_zext_i32:
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-; GCN: s_and_b32 [[TRUNC:s[0-9]+]], [[VAL]], 0xffff{{$}}
-; GCN: s_addk_i32 [[TRUNC]], 0x3e7
-; GCN: s_or_b32 [[OR:s[0-9]+]], [[TRUNC]], 4
 define amdgpu_kernel void @widen_i16_constant_load_zext_i32(i16 addrspace(4)* %arg) {
+; SI-LABEL: widen_i16_constant_load_zext_i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s5, 0
+; SI-NEXT:    s_mov_b32 s4, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s0, s0, 0xffff
+; SI-NEXT:    s_addk_i32 s0, 0x3e7
+; SI-NEXT:    s_or_b32 s0, s0, 4
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: widen_i16_constant_load_zext_i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s0, s0, 0xffff
+; VI-NEXT:    s_addk_i32 s0, 0x3e7
+; VI-NEXT:    s_or_b32 s0, s0, 4
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %load = load i16, i16 addrspace(4)* %arg, align 4
   %ext = zext i16 %load to i32
   %add = add i32 %ext, 999
@@ -27,12 +80,38 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}widen_i16_constant_load_sext_i32:
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-; GCN: s_sext_i32_i16 [[EXT:s[0-9]+]], [[VAL]]
-; GCN: s_addk_i32 [[EXT]], 0x3e7
-; GCN: s_or_b32 [[OR:s[0-9]+]], [[EXT]], 4
 define amdgpu_kernel void @widen_i16_constant_load_sext_i32(i16 addrspace(4)* %arg) {
+; SI-LABEL: widen_i16_constant_load_sext_i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s5, 0
+; SI-NEXT:    s_mov_b32 s4, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_sext_i32_i16 s0, s0
+; SI-NEXT:    s_addk_i32 s0, 0x3e7
+; SI-NEXT:    s_or_b32 s0, s0, 4
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: widen_i16_constant_load_sext_i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_sext_i32_i16 s0, s0
+; VI-NEXT:    s_addk_i32 s0, 0x3e7
+; VI-NEXT:    s_or_b32 s0, s0, 4
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %load = load i16, i16 addrspace(4)* %arg, align 4
   %ext = sext i16 %load to i32
   %add = add i32 %ext, 999
@@ -41,12 +120,46 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}widen_i17_constant_load:
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-; GCN: s_add_i32 [[ADD:s[0-9]+]], [[VAL]], 34
-; GCN: s_or_b32 [[OR:s[0-9]+]], [[ADD]], 4
-; GCN: s_bfe_u32 s{{[0-9]+}}, [[OR]], 0x10010
 define amdgpu_kernel void @widen_i17_constant_load(i17 addrspace(4)* %arg) {
+; SI-LABEL: widen_i17_constant_load:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s5, 0
+; SI-NEXT:    s_mov_b32 s4, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_add_i32 s0, s0, 34
+; SI-NEXT:    s_or_b32 s0, s0, 4
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    s_bfe_u32 s0, s0, 0x10010
+; SI-NEXT:    buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT:    s_mov_b32 s4, 2
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: widen_i17_constant_load:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v2, 2
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    v_mov_b32_e32 v3, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_add_i32 s0, s0, 34
+; VI-NEXT:    s_or_b32 s0, s0, 4
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    s_bfe_u32 s0, s0, 0x10010
+; VI-NEXT:    v_mov_b32_e32 v5, s0
+; VI-NEXT:    flat_store_short v[0:1], v4
+; VI-NEXT:    flat_store_byte v[2:3], v5
+; VI-NEXT:    s_endpgm
   %load = load i17, i17 addrspace(4)* %arg, align 4
   %add = add i17 %load, 34
   %or = or i17 %add, 4
@@ -54,13 +167,34 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}widen_f16_constant_load:
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-; SI: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], [[VAL]]
-; SI: v_add_f32_e32 [[ADD:v[0-9]+]], 4.0, [[CVT]]
-
-; VI: v_add_f16_e64 [[ADD:v[0-9]+]], [[VAL]], 4.0
 define amdgpu_kernel void @widen_f16_constant_load(half addrspace(4)* %arg) {
+; SI-LABEL: widen_f16_constant_load:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; SI-NEXT:    s_mov_b32 s1, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s0
+; SI-NEXT:    s_mov_b32 s0, 0
+; SI-NEXT:    v_add_f32_e32 v0, 4.0, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: widen_f16_constant_load:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_add_f16_e64 v2, s0, 4.0
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
   %load = load half, half addrspace(4)* %arg, align 4
   %add = fadd half %load, 4.0
   store half %add, half addrspace(1)* null
@@ -68,21 +202,49 @@
 }
 
 ; FIXME: valu usage on VI
-; GCN-LABEL: {{^}}widen_v2i8_constant_load:
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-
-; SI: s_add_i32
-; SI: s_or_b32
-; SI: s_addk_i32
-; SI: s_and_b32
-; SI: s_or_b32
-; SI: s_or_b32
-
-; VI: s_add_i32
-; VI: v_add_u32_sdwa
-; VI: v_or_b32_sdwa
-; VI: v_or_b32_e32
 define amdgpu_kernel void @widen_v2i8_constant_load(<2 x i8> addrspace(4)* %arg) {
+; SI-LABEL: widen_v2i8_constant_load:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s5, 0
+; SI-NEXT:    s_mov_b32 s4, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s1, s0, 0xff00
+; SI-NEXT:    s_and_b32 s0, s0, 0xffff
+; SI-NEXT:    s_add_i32 s0, s0, 12
+; SI-NEXT:    s_or_b32 s0, s0, 4
+; SI-NEXT:    s_addk_i32 s1, 0x2c00
+; SI-NEXT:    s_and_b32 s0, s0, 0xff
+; SI-NEXT:    s_or_b32 s0, s0, s1
+; SI-NEXT:    s_or_b32 s0, s0, 0x300
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: widen_v2i8_constant_load:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 44
+; VI-NEXT:    v_mov_b32_e32 v1, 3
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s1, s0, 0xffff
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    s_add_i32 s1, s1, 12
+; VI-NEXT:    v_add_u32_sdwa v0, vcc, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; VI-NEXT:    s_or_b32 s0, s1, 4
+; VI-NEXT:    v_or_b32_sdwa v0, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT:    s_and_b32 s0, s0, 0xff
+; VI-NEXT:    v_or_b32_e32 v2, s0, v0
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
   %load = load <2 x i8>, <2 x i8> addrspace(4)* %arg, align 4
   %add = add <2 x i8> %load, <i8 12, i8 44>
   %or = or <2 x i8> %add, <i8 4, i8 3>
@@ -90,9 +252,41 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}no_widen_i16_constant_divergent_load:
-; GCN: {{buffer|flat}}_load_ushort
 define amdgpu_kernel void @no_widen_i16_constant_divergent_load(i16 addrspace(4)* %arg) {
+; SI-LABEL: no_widen_i16_constant_divergent_load:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s2, 0
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    buffer_load_ushort v0, v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    s_mov_b32 s1, 0
+; SI-NEXT:    s_mov_b32 s0, 0
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, 0x3e7, v0
+; SI-NEXT:    v_or_b32_e32 v0, 4, v0
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: no_widen_i16_constant_divergent_load:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_ushort v2, v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u16_e32 v2, 0x3e7, v2
+; VI-NEXT:    v_or_b32_e32 v2, 4, v2
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = zext i32 %tid to i64
   %gep.arg = getelementptr inbounds i16, i16 addrspace(4)* %arg, i64 %tid.ext
@@ -103,22 +297,72 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}widen_i1_constant_load:
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-; GCN: s_and_b32 {{s[0-9]+}}, [[VAL]], 1{{$}}
 define amdgpu_kernel void @widen_i1_constant_load(i1 addrspace(4)* %arg) {
+; SI-LABEL: widen_i1_constant_load:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s5, 0
+; SI-NEXT:    s_mov_b32 s4, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s0, s0, 1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: widen_i1_constant_load:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s0, s0, 1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_endpgm
   %load = load i1, i1 addrspace(4)* %arg, align 4
   %and = and i1 %load, true
   store i1 %and, i1 addrspace(1)* null
   ret void
 }
 
-; GCN-LABEL: {{^}}widen_i16_zextload_i64_constant_load:
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-; GCN: s_and_b32 [[TRUNC:s[0-9]+]], [[VAL]], 0xffff{{$}}
-; GCN: s_addk_i32 [[TRUNC]], 0x3e7
-; GCN: s_or_b32 [[OR:s[0-9]+]], [[TRUNC]], 4
 define amdgpu_kernel void @widen_i16_zextload_i64_constant_load(i16 addrspace(4)* %arg) {
+; SI-LABEL: widen_i16_zextload_i64_constant_load:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s5, 0
+; SI-NEXT:    s_mov_b32 s4, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s0, s0, 0xffff
+; SI-NEXT:    s_addk_i32 s0, 0x3e7
+; SI-NEXT:    s_or_b32 s0, s0, 4
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: widen_i16_zextload_i64_constant_load:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s0, s0, 0xffff
+; VI-NEXT:    s_addk_i32 s0, 0x3e7
+; VI-NEXT:    s_or_b32 s0, s0, 4
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %load = load i16, i16 addrspace(4)* %arg, align 4
   %zext = zext i16 %load to i32
   %add = add i32 %zext, 999
@@ -127,12 +371,40 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}widen_i1_zext_to_i64_constant_load:
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-; GCN: s_and_b32 [[AND:s[0-9]+]], [[VAL]], 1
-; GCN: s_add_u32 [[ADD:s[0-9]+]], [[AND]], 0x3e7
-; GCN: s_addc_u32 s{{[0-9]+}}, 0, 0
 define amdgpu_kernel void @widen_i1_zext_to_i64_constant_load(i1 addrspace(4)* %arg) {
+; SI-LABEL: widen_i1_zext_to_i64_constant_load:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s5, 0
+; SI-NEXT:    s_mov_b32 s4, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s0, s0, 1
+; SI-NEXT:    s_add_u32 s0, s0, 0x3e7
+; SI-NEXT:    s_addc_u32 s1, 0, 0
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: widen_i1_zext_to_i64_constant_load:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s0, s0, 1
+; VI-NEXT:    s_add_u32 s0, s0, 0x3e7
+; VI-NEXT:    s_addc_u32 s1, 0, 0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; VI-NEXT:    s_endpgm
   %load = load i1, i1 addrspace(4)* %arg, align 4
   %zext = zext i1 %load to i64
   %add = add i64 %zext, 999
@@ -140,11 +412,39 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}widen_i16_constant32_load:
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-; GCN: s_addk_i32 [[VAL]], 0x3e7
-; GCN: s_or_b32 [[OR:s[0-9]+]], [[VAL]], 4
 define amdgpu_kernel void @widen_i16_constant32_load(i16 addrspace(6)* %arg) {
+; SI-LABEL: widen_i16_constant32_load:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s1, 0
+; SI-NEXT:    s_mov_b32 s5, 0
+; SI-NEXT:    s_mov_b32 s4, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_addk_i32 s0, 0x3e7
+; SI-NEXT:    s_or_b32 s0, s0, 4
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: widen_i16_constant32_load:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x24
+; VI-NEXT:    s_mov_b32 s1, 0
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s0, s0, 0xffff
+; VI-NEXT:    s_addk_i32 s0, 0x3e7
+; VI-NEXT:    s_or_b32 s0, s0, 4
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
   %load = load i16, i16 addrspace(6)* %arg, align 4
   %add = add i16 %load, 999
   %or = or i16 %add, 4
@@ -152,11 +452,37 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}widen_i16_global_invariant_load:
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-; GCN: s_addk_i32 [[VAL]], 0x3e7
-; GCN: s_or_b32 [[OR:s[0-9]+]], [[VAL]], 1
 define amdgpu_kernel void @widen_i16_global_invariant_load(i16 addrspace(1)* %arg) {
+; SI-LABEL: widen_i16_global_invariant_load:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s5, 0
+; SI-NEXT:    s_mov_b32 s4, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_addk_i32 s0, 0x3e7
+; SI-NEXT:    s_or_b32 s0, s0, 1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: widen_i16_global_invariant_load:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s0, s0, 0xffff
+; VI-NEXT:    s_addk_i32 s0, 0x3e7
+; VI-NEXT:    s_or_b32 s0, s0, 1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
   %load = load i16, i16 addrspace(1)* %arg, align 4, !invariant.load !0
   %add = add i16 %load, 999
   %or = or i16 %add, 1
diff --git a/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll b/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll
index 489d4e4..d5570df 100644
--- a/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll
+++ b/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -disable-fp-elim -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
+; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -frame-pointer=all -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
 
 ; ModuleID = '<stdin>'
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
diff --git a/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll b/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll
index 133fc05..b7a252b 100644
--- a/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll
+++ b/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -disable-fp-elim -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
+; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -frame-pointer=all -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
 
 ; ModuleID = '<stdin>'
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
diff --git a/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll b/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll
index 30a388b..b33b333 100644
--- a/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll
+++ b/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8
 
 ; rdar://9117613
 
diff --git a/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll b/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
index 92bdd19..b526b8c 100644
--- a/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
+++ b/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8 | FileCheck %s
 
 ; Do not form Thumb2 ldrd / strd if the offset is not multiple of 4.
 ; rdar://9133587
diff --git a/test/CodeGen/ARM/2011-03-23-PeepholeBug.ll b/test/CodeGen/ARM/2011-03-23-PeepholeBug.ll
index 83c7676..4567b7f 100644
--- a/test/CodeGen/ARM/2011-03-23-PeepholeBug.ll
+++ b/test/CodeGen/ARM/2011-03-23-PeepholeBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8 | FileCheck %s
 
 ; subs r4, #1
 ; cmp r4, 0
diff --git a/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll b/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll
index ef33b2f..4ddc728 100644
--- a/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll
+++ b/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=thumbv7-apple-ios -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 < %s
+; RUN: llc -mtriple=thumbv7-apple-ios -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8 < %s
 
 ; CodeGen SplitCriticalEdge() shouldn't try to break edge to a landing pad.
 ; rdar://11300144
diff --git a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
index 4b9812c..f030a5a 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
@@ -64,8 +64,12 @@
   define void @test_stores() #0 { ret void }
 
   define void @test_gep() { ret void }
+
+  define void @test_MOVi32imm() #3 { ret void }
+
   define void @test_constant_imm() { ret void }
   define void @test_constant_cimm() { ret void }
+
   define void @test_pointer_constant_unconstrained() { ret void }
   define void @test_pointer_constant_constrained() { ret void }
 
@@ -1481,6 +1485,23 @@
     BX_RET 14, $noreg, implicit $r0
 ...
 ---
+name:            test_MOVi32imm
+# CHECK-LABEL: name: test_MOVi32imm
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+body:             |
+  bb.0:
+    %0(s32) = G_CONSTANT 65537
+    ; CHECK: %[[C:[0-9]+]]:gpr = MOVi32imm 65537
+
+    $r0 = COPY %0(s32)
+    BX_RET 14, $noreg, implicit $r0
+...
+---
 name:            test_constant_imm
 # CHECK-LABEL: name: test_constant_imm
 legalized:       true
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalize-consts.mir b/test/CodeGen/ARM/GlobalISel/arm-legalize-consts.mir
new file mode 100644
index 0000000..d3248fe
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalize-consts.mir
@@ -0,0 +1,57 @@
+# RUN: llc -mtriple arm-- -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple thumb-- -mattr=+v6t2 -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+  define void @test_constants() { ret void }
+...
+---
+name:            test_constants
+# CHECK-LABEL: name: test_constants
+legalized:       false
+# CHECK: legalized: true
+regBankSelected: false
+selected:        false
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: _ }
+  - { id: 1, class: _ }
+  - { id: 2, class: _ }
+  - { id: 3, class: _ }
+  - { id: 4, class: _ }
+  - { id: 5, class: _ }
+body:             |
+  bb.0:
+    liveins: $r0
+
+    %4(p0) = COPY $r0
+
+    %0(s32) = G_CONSTANT 42
+    ; CHECK: {{%[0-9]+}}:_(s32) = G_CONSTANT 42
+
+    %1(s16) = G_CONSTANT i16 21
+    G_STORE %1(s16), %4(p0) :: (store 2)
+    ; CHECK-NOT: G_CONSTANT i16
+    ; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_CONSTANT i32 21
+    ; CHECK: {{%[0-9]+}}:_(s16) = G_TRUNC [[EXT]](s32)
+    ; CHECK-NOT: G_CONSTANT i16
+
+    %2(s8) = G_CONSTANT i8 10
+    G_STORE %2(s8), %4(p0) :: (store 1)
+    ; CHECK-NOT: G_CONSTANT i8
+    ; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+    ; CHECK: {{%[0-9]+}}:_(s8) = G_TRUNC [[EXT]](s32)
+    ; CHECK-NOT: G_CONSTANT i8
+
+    %3(s1) = G_CONSTANT i1 1
+    G_STORE %3(s1), %4(p0) :: (store 1)
+    ; CHECK-NOT: G_CONSTANT i1
+    ; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK: {{%[0-9]+}}:_(s1) = G_TRUNC [[EXT]](s32)
+    ; CHECK-NOT: G_CONSTANT i1
+
+    %5(p0) = G_CONSTANT 0
+    G_STORE %5(p0), %4(p0) :: (store 4)
+    ; CHECK: {{%[0-9]+}}:_(p0) = G_CONSTANT 0
+
+    $r0 = COPY %0(s32)
+    BX_RET 14, $noreg, implicit $r0
+...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
index 0d65fca..bee8b7b 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
@@ -9,7 +9,7 @@
 
   define void @test_gep() { ret void }
 
-  define void @test_constants() { ret void }
+  define void @test_constants_s64() { ret void }
 
   define void @test_icmp_s8() { ret void }
   define void @test_icmp_s16() { ret void }
@@ -189,8 +189,8 @@
     BX_RET 14, $noreg, implicit $r0
 ...
 ---
-name:            test_constants
-# CHECK-LABEL: name: test_constants
+name:            test_constants_s64
+# CHECK-LABEL: name: test_constants_s64
 legalized:       false
 # CHECK: legalized: true
 regBankSelected: false
@@ -201,55 +201,21 @@
   - { id: 1, class: _ }
   - { id: 2, class: _ }
   - { id: 3, class: _ }
-  - { id: 4, class: _ }
-  - { id: 5, class: _ }
-  - { id: 6, class: _ }
-  - { id: 7, class: _ }
-  - { id: 8, class: _ }
 body:             |
   bb.0:
     liveins: $r0
 
-    %4(p0) = COPY $r0
+    %0(p0) = COPY $r0
 
-    %0(s32) = G_CONSTANT 42
-    ; CHECK: {{%[0-9]+}}:_(s32) = G_CONSTANT 42
-
-    %1(s16) = G_CONSTANT i16 21
-    G_STORE %1(s16), %4(p0) :: (store 2)
-    ; CHECK-NOT: G_CONSTANT i16
-    ; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_CONSTANT i32 21
-    ; CHECK: {{%[0-9]+}}:_(s16) = G_TRUNC [[EXT]](s32)
-    ; CHECK-NOT: G_CONSTANT i16
-
-    %2(s8) = G_CONSTANT i8 10
-    G_STORE %2(s8), %4(p0) :: (store 1)
-    ; CHECK-NOT: G_CONSTANT i8
-    ; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
-    ; CHECK: {{%[0-9]+}}:_(s8) = G_TRUNC [[EXT]](s32)
-    ; CHECK-NOT: G_CONSTANT i8
-
-    %3(s1) = G_CONSTANT i1 1
-    G_STORE %3(s1), %4(p0) :: (store 1)
-    ; CHECK-NOT: G_CONSTANT i1
-    ; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK: {{%[0-9]+}}:_(s1) = G_TRUNC [[EXT]](s32)
-    ; CHECK-NOT: G_CONSTANT i1
-
-    %5(p0) = G_CONSTANT 0
-    G_STORE %5(p0), %4(p0) :: (store 4)
-    ; CHECK: {{%[0-9]+}}:_(p0) = G_CONSTANT 0
-
-    %6(s64) = G_CONSTANT i64 17179869200 ; = 4 * 2 ^ 32 + 16
-    %7(s32), %8(s32) = G_UNMERGE_VALUES %6(s64)
-    G_STORE %7(s32), %4(p0) :: (store 4)
-    G_STORE %8(s32), %4(p0) :: (store 4)
+    %1(s64) = G_CONSTANT i64 17179869200 ; = 4 * 2 ^ 32 + 16
+    %2(s32), %3(s32) = G_UNMERGE_VALUES %1(s64)
+    G_STORE %2(s32), %0(p0) :: (store 4)
+    G_STORE %3(s32), %0(p0) :: (store 4)
     ; CHECK-DAG: {{%[0-9]+}}:_(s32) = G_CONSTANT i32 4
     ; CHECK-DAG: {{%[0-9]+}}:_(s32) = G_CONSTANT i32 16
     ; CHECK-NOT: G_CONSTANT i64
 
-    $r0 = COPY %0(s32)
-    BX_RET 14, $noreg, implicit $r0
+    BX_RET 14, $noreg
 ...
 ---
 name:            test_icmp_s8
diff --git a/test/CodeGen/ARM/GlobalISel/thumb-select-arithmetic-ops.mir b/test/CodeGen/ARM/GlobalISel/thumb-select-arithmetic-ops.mir
new file mode 100644
index 0000000..cf42a81
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/thumb-select-arithmetic-ops.mir
@@ -0,0 +1,251 @@
+# RUN: llc -O0 -mtriple thumb-- -mattr=+v6t2 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+--- |
+  define void @test_add_regs() { ret void }
+  define void @test_add_fold_imm() { ret void }
+  define void @test_add_fold_imm12() { ret void }
+  define void @test_add_no_fold_imm() { ret void }
+
+  define void @test_sub_imm_lhs() { ret void }
+  define void @test_sub_imm_rhs() { ret void }
+
+  define void @test_mul() { ret void }
+  define void @test_mla() { ret void }
+...
+---
+name:            test_add_regs
+# CHECK-LABEL: name: test_add_regs
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0, $r1
+
+    %0(s32) = COPY $r0
+    ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0
+
+    %1(s32) = COPY $r1
+    ; CHECK: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
+
+    %2(s32) = G_ADD %0, %1
+    ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = t2ADDrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg
+
+    $r0 = COPY %2(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_add_fold_imm
+# CHECK-LABEL: name: test_add_fold_imm
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0
+
+    %0(s32) = COPY $r0
+    ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0
+
+    %1(s32) = G_CONSTANT i32 786444 ; 0x000c000c
+    %2(s32) = G_ADD %0, %1
+    ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = t2ADDri [[VREGX]], 786444, 14, $noreg, $noreg
+
+    $r0 = COPY %2(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_add_fold_imm12
+# CHECK-LABEL: name: test_add_fold_imm12
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0
+
+    %0(s32) = COPY $r0
+    ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0
+
+    %1(s32) = G_CONSTANT i32 4093
+    %2(s32) = G_ADD %0, %1
+    ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = t2ADDri12 [[VREGX]], 4093, 14, $noreg
+
+    $r0 = COPY %2(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_add_no_fold_imm
+# CHECK-LABEL: name: test_add_no_fold_imm
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0
+
+    %0(s32) = COPY $r0
+    ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0
+
+    %1(s32) = G_CONSTANT i32 185470479 ; 0x0b0e0e0f
+    ; CHECK: [[VREGY:%[0-9]+]]:rgpr = t2MOVi32imm 185470479
+
+    %2(s32) = G_ADD %0, %1
+    ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = t2ADDrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg
+
+    $r0 = COPY %2(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_sub_imm_lhs
+# CHECK-LABEL: name: test_sub_imm_lhs
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0
+
+    %0(s32) = COPY $r0
+    ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+
+    %1(s32) = G_CONSTANT i32 786444 ; 0x000c000c
+    %2(s32) = G_SUB %1, %0
+    ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2RSBri [[VREGX]], 786444, 14, $noreg, $noreg
+
+    $r0 = COPY %2(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_sub_imm_rhs
+# CHECK-LABEL: name: test_sub_imm_rhs
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0
+
+    %0(s32) = COPY $r0
+    ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0
+
+    %1(s32) = G_CONSTANT i32 786444 ; 0x000c000c
+    %2(s32) = G_SUB %0, %1
+    ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = t2SUBri [[VREGX]], 786444, 14, $noreg, $noreg
+
+    $r0 = COPY %2(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_mul
+# CHECK-LABEL: name: test_mul
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0, $r1
+
+    %0(s32) = COPY $r0
+    ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+
+    %1(s32) = COPY $r1
+    ; CHECK: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
+
+    %2(s32) = G_MUL %0, %1
+    ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2MUL [[VREGX]], [[VREGY]], 14, $noreg
+
+    $r0 = COPY %2(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_mla
+# CHECK-LABEL: name: test_mla
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+  - { id: 3, class: gprb }
+  - { id: 4, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0, $r1, $r2
+
+    %0(s32) = COPY $r0
+    ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+
+    %1(s32) = COPY $r1
+    ; CHECK: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
+
+    %2(s32) = COPY $r2
+    ; CHECK: [[VREGZ:%[0-9]+]]:rgpr = COPY $r2
+
+    %3(s32) = G_MUL %0, %1
+    %4(s32) = G_ADD %3, %2
+    ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2MLA [[VREGX]], [[VREGY]], [[VREGZ]], 14, $noreg
+
+    $r0 = COPY %4(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
diff --git a/test/CodeGen/ARM/GlobalISel/thumb-select-binops.mir b/test/CodeGen/ARM/GlobalISel/thumb-select-binops.mir
deleted file mode 100644
index f71cc1c..0000000
--- a/test/CodeGen/ARM/GlobalISel/thumb-select-binops.mir
+++ /dev/null
@@ -1,135 +0,0 @@
-# RUN: llc -O0 -mtriple thumb-- -mattr=+v6t2 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
---- |
-  define void @test_add_regs() { ret void }
-
-  define void @test_mul() { ret void }
-  define void @test_mla() { ret void }
-
-  define void @test_and_regs() { ret void }
-...
----
-name:            test_add_regs
-# CHECK-LABEL: name: test_add_regs
-legalized:       true
-regBankSelected: true
-selected:        false
-# CHECK: selected: true
-registers:
-  - { id: 0, class: gprb }
-  - { id: 1, class: gprb }
-  - { id: 2, class: gprb }
-body:             |
-  bb.0:
-    liveins: $r0, $r1
-
-    %0(s32) = COPY $r0
-    ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0
-
-    %1(s32) = COPY $r1
-    ; CHECK: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
-
-    %2(s32) = G_ADD %0, %1
-    ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = t2ADDrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg
-
-    $r0 = COPY %2(s32)
-    ; CHECK: $r0 = COPY [[VREGRES]]
-
-    BX_RET 14, $noreg, implicit $r0
-    ; CHECK: BX_RET 14, $noreg, implicit $r0
-...
----
-name:            test_mul
-# CHECK-LABEL: name: test_mul
-legalized:       true
-regBankSelected: true
-selected:        false
-# CHECK: selected: true
-registers:
-  - { id: 0, class: gprb }
-  - { id: 1, class: gprb }
-  - { id: 2, class: gprb }
-body:             |
-  bb.0:
-    liveins: $r0, $r1
-
-    %0(s32) = COPY $r0
-    ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
-
-    %1(s32) = COPY $r1
-    ; CHECK: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
-
-    %2(s32) = G_MUL %0, %1
-    ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2MUL [[VREGX]], [[VREGY]], 14, $noreg
-
-    $r0 = COPY %2(s32)
-    ; CHECK: $r0 = COPY [[VREGRES]]
-
-    BX_RET 14, $noreg, implicit $r0
-    ; CHECK: BX_RET 14, $noreg, implicit $r0
-...
----
-name:            test_mla
-# CHECK-LABEL: name: test_mla
-legalized:       true
-regBankSelected: true
-selected:        false
-# CHECK: selected: true
-registers:
-  - { id: 0, class: gprb }
-  - { id: 1, class: gprb }
-  - { id: 2, class: gprb }
-  - { id: 3, class: gprb }
-  - { id: 4, class: gprb }
-body:             |
-  bb.0:
-    liveins: $r0, $r1, $r2
-
-    %0(s32) = COPY $r0
-    ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
-
-    %1(s32) = COPY $r1
-    ; CHECK: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
-
-    %2(s32) = COPY $r2
-    ; CHECK: [[VREGZ:%[0-9]+]]:rgpr = COPY $r2
-
-    %3(s32) = G_MUL %0, %1
-    %4(s32) = G_ADD %3, %2
-    ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2MLA [[VREGX]], [[VREGY]], [[VREGZ]], 14, $noreg
-
-    $r0 = COPY %4(s32)
-    ; CHECK: $r0 = COPY [[VREGRES]]
-
-    BX_RET 14, $noreg, implicit $r0
-    ; CHECK: BX_RET 14, $noreg, implicit $r0
-...
----
-name:            test_and_regs
-# CHECK-LABEL: name: test_and_regs
-legalized:       true
-regBankSelected: true
-selected:        false
-# CHECK: selected: true
-registers:
-  - { id: 0, class: gprb }
-  - { id: 1, class: gprb }
-  - { id: 2, class: gprb }
-body:             |
-  bb.0:
-    liveins: $r0, $r1
-
-    %0(s32) = COPY $r0
-    ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
-
-    %1(s32) = COPY $r1
-    ; CHECK: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
-
-    %2(s32) = G_AND %0, %1
-    ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2ANDrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg
-
-    $r0 = COPY %2(s32)
-    ; CHECK: $r0 = COPY [[VREGRES]]
-
-    BX_RET 14, $noreg, implicit $r0
-    ; CHECK: BX_RET 14, $noreg, implicit $r0
-...
diff --git a/test/CodeGen/ARM/GlobalISel/thumb-select-imm.mir b/test/CodeGen/ARM/GlobalISel/thumb-select-imm.mir
new file mode 100644
index 0000000..4979491
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/thumb-select-imm.mir
@@ -0,0 +1,66 @@
+# RUN: llc -O0 -mtriple thumb-- -mattr=+v6t2 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+--- |
+  define void @test_movi() { ret void }
+  define void @test_movi16() { ret void }
+  define void @test_movi32() { ret void }
+...
+---
+name:            test_movi
+# CHECK-LABEL: name: test_movi
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+body:             |
+  bb.0:
+    %0(s32) = G_CONSTANT i32 786444 ; 0x000c000c
+    ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2MOVi 786444, 14, $noreg, $noreg
+
+    $r0 = COPY %0(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_movi16
+# CHECK-LABEL: name: test_movi16
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+body:             |
+  bb.0:
+    %0(s32) = G_CONSTANT i32 65533
+    ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2MOVi16 65533, 14, $noreg
+
+    $r0 = COPY %0(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_movi32
+# CHECK-LABEL: name: test_movi32
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+body:             |
+  bb.0:
+    %0(s32) = G_CONSTANT i32 185470479 ; 0x0b0e0e0f
+    ; CHECK: [[VREGY:%[0-9]+]]:rgpr = t2MOVi32imm 185470479
+
+    $r0 = COPY %0(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
diff --git a/test/CodeGen/ARM/GlobalISel/thumb-select-logical-ops.mir b/test/CodeGen/ARM/GlobalISel/thumb-select-logical-ops.mir
new file mode 100644
index 0000000..d63c599
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/thumb-select-logical-ops.mir
@@ -0,0 +1,219 @@
+# RUN: llc -O0 -mtriple thumb-- -mattr=+v6t2 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+--- |
+  define void @test_and_regs() { ret void }
+  define void @test_and_imm() { ret void }
+
+  define void @test_bfc() { ret void }
+  define void @test_no_bfc_bad_mask() { ret void }
+
+  define void @test_mvn() { ret void }
+  define void @test_bic() { ret void }
+  define void @test_orn() { ret void }
+...
+---
+name:            test_and_regs
+# CHECK-LABEL: name: test_and_regs
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0, $r1
+
+    %0(s32) = COPY $r0
+    ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+
+    %1(s32) = COPY $r1
+    ; CHECK: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
+
+    %2(s32) = G_AND %0, %1
+    ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2ANDrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg
+
+    $r0 = COPY %2(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_and_imm
+# CHECK-LABEL: name: test_and_imm
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0
+
+    %0(s32) = COPY $r0
+    ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+
+    %1(s32) = G_CONSTANT i32 786444 ; 0x000c000c
+    %2(s32) = G_AND %0, %1
+    ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2ANDri [[VREGX]], 786444, 14, $noreg, $noreg
+
+    $r0 = COPY %2(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_bfc
+# CHECK-LABEL: name: test_bfc
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0
+
+    %0(s32) = COPY $r0
+    ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+
+    %1(s32) = G_CONSTANT i32 -65529 ; 0xFFFF0007
+    %2(s32) = G_AND %0, %1
+    ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2BFC [[VREGX]], -65529, 14, $noreg
+
+    $r0 = COPY %2(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_no_bfc_bad_mask
+# CHECK-LABEL: name: test_no_bfc_bad_mask
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0
+
+    %0(s32) = COPY $r0
+
+    %1(s32) = G_CONSTANT i32 786444 ; 0x000c000c
+    %2(s32) = G_AND %0, %1
+    ; CHECK-NOT: t2BFC
+
+    $r0 = COPY %2(s32)
+
+    BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_mvn
+# CHECK-LABEL: name: test_mvn
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0
+
+    %0(s32) = COPY $r0
+    ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+
+    %1(s32) = G_CONSTANT i32 -1
+    %2(s32) = G_XOR %0, %1
+    ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2MVNr [[VREGX]], 14, $noreg, $noreg
+
+    $r0 = COPY %2(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_bic
+# CHECK-LABEL: name: test_bic
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+  - { id: 3, class: gprb }
+  - { id: 4, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0, $r1
+
+    %0(s32) = COPY $r0
+    %1(s32) = COPY $r1
+    ; CHECK-DAG: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+    ; CHECK-DAG: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
+
+    %2(s32) = G_CONSTANT i32 -1
+    %3(s32) = G_XOR %1, %2
+
+    %4(s32) = G_AND %0, %3
+    ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2BICrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg
+
+    $r0 = COPY %4(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name:            test_orn
+# CHECK-LABEL: name: test_orn
+legalized:       true
+regBankSelected: true
+selected:        false
+# CHECK: selected: true
+registers:
+  - { id: 0, class: gprb }
+  - { id: 1, class: gprb }
+  - { id: 2, class: gprb }
+  - { id: 3, class: gprb }
+  - { id: 4, class: gprb }
+body:             |
+  bb.0:
+    liveins: $r0, $r1
+
+    %0(s32) = COPY $r0
+    %1(s32) = COPY $r1
+    ; CHECK-DAG: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+    ; CHECK-DAG: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
+
+    %2(s32) = G_CONSTANT i32 -1
+    %3(s32) = G_XOR %1, %2
+
+    %4(s32) = G_OR %0, %3
+    ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2ORNrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg
+
+    $r0 = COPY %4(s32)
+    ; CHECK: $r0 = COPY [[VREGRES]]
+
+    BX_RET 14, $noreg, implicit $r0
+    ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
diff --git a/test/CodeGen/ARM/Windows/frame-register.ll b/test/CodeGen/ARM/Windows/frame-register.ll
index 7ecfc1a..6605ffc 100644
--- a/test/CodeGen/ARM/Windows/frame-register.ll
+++ b/test/CodeGen/ARM/Windows/frame-register.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple thumbv7-windows -disable-fp-elim -filetype asm -o - %s \
+; RUN: llc -mtriple thumbv7-windows -frame-pointer=all -filetype asm -o - %s \
 ; RUN:     | FileCheck %s
 
 declare void @callee(i32)
diff --git a/test/CodeGen/ARM/and-load-combine.ll b/test/CodeGen/ARM/and-load-combine.ll
index 8f08909..ef6a2ad 100644
--- a/test/CodeGen/ARM/and-load-combine.ll
+++ b/test/CodeGen/ARM/and-load-combine.ll
@@ -1549,3 +1549,39 @@
   %and = and i64 %1, -281474976710656
   ret i64 %and
 }
+
+; ARM-LABEL: test27:
+; ARM:       @ %bb.0:
+; ARM-NEXT:    ldrb r1, [r0, #1]
+; ARM-NEXT:    lsl r1, r1, #16
+; ARM-NEXT:    str r1, [r0]
+; ARM-NEXT:    bx lr
+;
+; ARMEB-LABEL: test27:
+; ARMEB:     @ %bb.0:
+; ARMEB-NEXT:  ldrb r1, [r0, #2]
+; ARMEB-NEXT:  lsl r1, r1, #16
+; ARMEB-NEXT:  str r1, [r0]
+; ARMEB-NEXT:  bx lr
+;
+; THUMB1-LABEL: test27:
+; THUMB1:     @ %bb.0:
+; THUMB1-NEXT:  ldrb r1, [r0, #1]
+; THUMB1-NEXT:  lsls r1, r1, #16
+; THUMB1-NEXT:  str r1, [r0]
+; THUMB1-NEXT:  bx lr
+;
+; THUMB2-LABEL: test27:
+; THUMB2:       @ %bb.0:
+; THUMB2-NEXT:    ldrb r1, [r0, #1]
+; THUMB2-NEXT:    lsls r1, r1, #16
+; THUMB2-NEXT:    str r1, [r0]
+; THUMB2-NEXT:    bx lr
+define void @test27(i32* nocapture %ptr) {
+entry:
+  %0 = load i32, i32* %ptr, align 4
+  %and = and i32 %0, 65280
+  %shl = shl i32 %and, 8
+  store i32 %shl, i32* %ptr, align 4
+  ret void
+}
diff --git a/test/CodeGen/ARM/atomic-ops-m33.ll b/test/CodeGen/ARM/atomic-ops-m33.ll
new file mode 100644
index 0000000..474ad89
--- /dev/null
+++ b/test/CodeGen/ARM/atomic-ops-m33.ll
@@ -0,0 +1,140 @@
+; RUN: llc -mtriple=thumbv7-none-eabi -mcpu=cortex-m33 -verify-machineinstrs -o -  %s | FileCheck %s
+
+define i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_add_i8:
+  %old = atomicrmw add i8* @var8, i8 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+  ; r0 below is a reasonable guess but could change: it certainly comes into the
+  ;  function there.
+; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+  ret i8 %old
+}
+
+define i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_add_i16:
+  %old = atomicrmw add i16* @var16, i16 %offset acquire
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK: movt r[[ADDR]], :upper16:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+  ; r0 below is a reasonable guess but could change: it certainly comes into the
+  ;  function there.
+; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+  ret i16 %old
+}
+
+define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_add_i32:
+  %old = atomicrmw add i32* @var32, i32 %offset release
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
+; CHECK: movt r[[ADDR]], :upper16:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
+  ; r0 below is a reasonable guess but could change: it certainly comes into the
+  ;  function there.
+; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+  ret i32 %old
+}
+
+define void @test_atomic_load_add_i64(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_add_i64:
+; CHECK: bl __sync_fetch_and_add_8
+   %old = atomicrmw add i64* @var64, i64 %offset monotonic
+  store i64 %old, i64* @var64
+  ret void
+}
+
+define i8 @test_load_acquire_i8(i8* %ptr) {
+; CHECK-LABEL: test_load_acquire_i8:
+; CHECK: ldab r0, [r0]
+  %val = load atomic i8, i8* %ptr seq_cst, align 1
+  ret i8 %val
+}
+
+define i16 @test_load_acquire_i16(i16* %ptr) {
+; CHECK-LABEL: test_load_acquire_i16:
+; CHECK: ldah r0, [r0]
+  %val = load atomic i16, i16* %ptr acquire, align 2
+  ret i16 %val
+}
+
+define i32 @test_load_acquire_i32(i32* %ptr) {
+; CHECK-LABEL: test_load_acquire_i32:
+; CHECK: lda r0, [r0]
+  %val = load atomic i32, i32* %ptr acquire, align 4
+  ret i32 %val
+}
+
+define i64 @test_load_acquire_i64(i64* %ptr) {
+; CHECK-LABEL: test_load_acquire_i64:
+; CHECK: bl __atomic_load
+  %val = load atomic i64, i64* %ptr acquire, align 4
+  ret i64 %val
+}
+
+define void @test_store_release_i8(i8 %val, i8* %ptr) {
+; CHECK-LABEL: test_store_release_i8:
+; CHECK: stlb r0, [r1]
+  store atomic i8 %val, i8* %ptr seq_cst, align 1
+  ret void
+}
+
+define void @test_store_release_i16(i16 %val, i16* %ptr) {
+; CHECK-LABEL: test_store_release_i16:
+; CHECK: stlh r0, [r1]
+  store atomic i16 %val, i16* %ptr release, align 2
+  ret void
+}
+
+define void @test_store_release_i32(i32 %val, i32* %ptr) {
+; CHECK-LABEL: test_store_release_i32:
+; CHECK: stl r0, [r1]
+  store atomic i32 %val, i32* %ptr seq_cst, align 4
+  ret void
+}
+
+define void @test_store_release_i64(i64 %val, i64* %ptr) {
+; CHECK-LABEL: test_store_release_i64:
+; CHECK: bl __atomic_store
+  store atomic i64 %val, i64* %ptr seq_cst, align 4
+  ret void
+}
+
+
+@var8 = global i8 0
+@var16 = global i16 0
+@var32 = global i32 0
+@var64 = global i64 0
diff --git a/test/CodeGen/ARM/build-attributes.ll b/test/CodeGen/ARM/build-attributes.ll
index 3d0c941..fefe0c8 100644
--- a/test/CodeGen/ARM/build-attributes.ll
+++ b/test/CodeGen/ARM/build-attributes.ll
@@ -3,23 +3,23 @@
 
 ; RUN: llc < %s -mtriple=thumbv5-linux-gnueabi -mcpu=xscale -mattr=+strict-align | FileCheck %s --check-prefix=XSCALE
 ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6
-; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6-FAST
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6-FAST
 ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6M
-; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mattr=+strict-align  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mattr=+strict-align  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST
 ; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6M
-; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST
+; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST
 ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align | FileCheck %s --check-prefix=ARM1156T2F-S
-; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast  | FileCheck %s --check-prefix=ARM1156T2F-S-FAST
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast  | FileCheck %s --check-prefix=ARM1156T2F-S-FAST
 ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi | FileCheck %s --check-prefix=V7M
-; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7M-FAST
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7M-FAST
 ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=V7
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7-FAST
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V8-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V8-FAST
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi | FileCheck %s --check-prefix=Vt8
 ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
@@ -31,35 +31,35 @@
 ; RUN: llc < %s -mtriple=thumbv8m.main-linux-gnueabi | FileCheck %s --check-prefix=V8MMAINLINE
 ; RUN: llc < %s -mtriple=thumbv8m.main-linux-gnueabi -mattr=+dsp | FileCheck %s --check-prefix=V8MMAINLINE_DSP
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT-FAST
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-neon,+d16 | FileCheck %s --check-prefix=CORTEX-A5-NONEON
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A5-NOFPU
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-NOFPU-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-NOFPU-FAST
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A8-SOFT
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-SOFT-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-SOFT-FAST
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A8-HARD
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-HARD-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-HARD-FAST
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A8-SOFT
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-SOFT-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-SOFT-FAST
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A9-HARD
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-HARD-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-HARD-FAST
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT-FAST
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A12-NOFPU
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-NOFPU-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-NOFPU-FAST
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 | FileCheck %s --check-prefix=CORTEX-A15
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A15-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A15-FAST
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 | FileCheck %s --check-prefix=CORTEX-A17-DEFAULT
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-FAST
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A17-NOFPU
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-NOFPU-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-NOFPU-FAST
 
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-no-trapping-fp-math | FileCheck %s --check-prefix=NO-TRAPPING-MATH
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -denormal-fp-math=ieee | FileCheck %s --check-prefix=DENORMAL-IEEE
@@ -74,87 +74,87 @@
 
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 | FileCheck %s --check-prefix=CORTEX-M0
-; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0-FAST
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0-FAST
 ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus | FileCheck %s --check-prefix=CORTEX-M0PLUS
-; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0PLUS-FAST
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0PLUS-FAST
 ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 | FileCheck %s --check-prefix=CORTEX-M1
-; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -mattr=+strict-align  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M1-FAST
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -mattr=+strict-align  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M1-FAST
 ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align | FileCheck %s --check-prefix=SC000
-; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC000-FAST
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC000-FAST
 ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 | FileCheck %s --check-prefix=CORTEX-M3
-; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M3-FAST
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M3-FAST
 ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 | FileCheck %s --check-prefix=SC300
-; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC300-FAST
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC300-FAST
 ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-M4-SOFT
-; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-SOFT-FAST
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-SOFT-FAST
 ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-M4-HARD
-; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-HARD-FAST
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-HARD-FAST
 ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SOFT
-; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-NOFPU-FAST
+; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-NOFPU-FAST
 ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=+fp-only-sp | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SINGLE
-; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=+fp-only-sp  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-FAST
+; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=+fp-only-sp  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-FAST
 ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 | FileCheck %s --check-prefix=CORTEX-M7-DOUBLE
 ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m23 | FileCheck %s --check-prefix=CORTEX-M23
 ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 | FileCheck %s --check-prefix=CORTEX-M33
-; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M33-FAST
+; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M33-FAST
 ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r4 | FileCheck %s --check-prefix=CORTEX-R4
 ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r4f | FileCheck %s --check-prefix=CORTEX-R4F
 ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 | FileCheck %s --check-prefix=CORTEX-R5
-; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R5-FAST
+; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R5-FAST
 ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 | FileCheck %s --check-prefix=CORTEX-R7
-; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R7-FAST
+; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R7-FAST
 ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 | FileCheck %s --check-prefix=CORTEX-R8
-; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R8-FAST
+; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R8-FAST
 ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 | FileCheck %s --check-prefix=CORTEX-A32
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A32-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A32-FAST
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 | FileCheck %s --check-prefix=CORTEX-A35
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A35-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A35-FAST
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 | FileCheck %s --check-prefix=CORTEX-A53
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A53-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A53-FAST
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 | FileCheck %s --check-prefix=CORTEX-A57
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A57-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A57-FAST
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 | FileCheck %s --check-prefix=CORTEX-A72
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A72-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A72-FAST
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a73 | FileCheck %s --check-prefix=CORTEX-A73
 ; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi | FileCheck %s --check-prefix=GENERIC-ARMV8_1-A
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m1 | FileCheck %s --check-prefix=EXYNOS-M1
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m1  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m1  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m1 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m2 | FileCheck %s --check-prefix=EXYNOS-M2
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m2  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m2  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m2 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 | FileCheck %s --check-prefix=EXYNOS-M3
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 | FileCheck %s --check-prefix=EXYNOS-M4
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
-; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=GENERIC-ARMV8_1-A-FAST
+; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=GENERIC-ARMV8_1-A-FAST
 ; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
 ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 | FileCheck %s  --check-prefix=CORTEX-A7-CHECK
-; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s  --check-prefix=CORTEX-A7-CHECK-FAST
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s  --check-prefix=CORTEX-A7-CHECK-FAST
 ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2,-vfp3,-vfp4,-neon,-fp16 | FileCheck %s --check-prefix=CORTEX-A7-NOFPU
-; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2,-vfp3,-vfp4,-neon,-fp16  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-NOFPU-FAST
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2,-vfp3,-vfp4,-neon,-fp16  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-NOFPU-FAST
 ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4
 ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
-; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon  -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-FPUV4-FAST
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon  -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-FPUV4-FAST
 ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,,+d16,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4
 ; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=pic | FileCheck %s --check-prefix=RELOC-PIC
 ; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=static | FileCheck %s --check-prefix=RELOC-OTHER
@@ -1623,7 +1623,7 @@
 ; EXYNOS-M4:  .eabi_attribute 8, 1
 ; EXYNOS-M4:  .eabi_attribute 9, 2
 ; EXYNOS-M4:  .fpu crypto-neon-fp-armv8
-; EXYNOS-M4:  .eabi_attribute 12, 3
+; EXYNOS-M4:  .eabi_attribute 12, 4
 ; EXYNOS-M4-NOT:  .eabi_attribute 27
 ; EXYNOS-M4:  .eabi_attribute 36, 1
 ; EXYNOS-M4:  .eabi_attribute 42, 1
diff --git a/test/CodeGen/ARM/constantpool-promote-dbg.ll b/test/CodeGen/ARM/constantpool-promote-dbg.ll
index 4a6a7fe..2611310 100644
--- a/test/CodeGen/ARM/constantpool-promote-dbg.ll
+++ b/test/CodeGen/ARM/constantpool-promote-dbg.ll
@@ -25,14 +25,14 @@
 !llvm.module.flags = !{!3, !4, !5, !6}
 !llvm.ident = !{!7}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.9.0 (http://llvm.org/git/clang.git 075a2bc2570dfcbb6d6aed6c836e4c62b37afea6)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.9.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "/Users/jammol01/Code/test.c", directory: "/Users/jammol01/Code/llvm-git/build")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
 !5 = !{i32 1, !"wchar_size", i32 4}
 !6 = !{i32 1, !"min_enum_size", i32 4}
-!7 = !{!"clang version 3.9.0 (http://llvm.org/git/clang.git 075a2bc2570dfcbb6d6aed6c836e4c62b37afea6)"}
+!7 = !{!"clang version 3.9.0"}
 !8 = distinct !DISubprogram(name: "fn1", scope: !1, file: !1, line: 1, type: !9, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, retainedNodes: !2)
 !9 = !DISubroutineType(types: !10)
 !10 = !{!11}
diff --git a/test/CodeGen/ARM/copy-by-struct-i32.ll b/test/CodeGen/ARM/copy-by-struct-i32.ll
new file mode 100644
index 0000000..a575068
--- /dev/null
+++ b/test/CodeGen/ARM/copy-by-struct-i32.ll
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=armv7-unknown-linux < %s -stop-before=expand-isel-pseudos | FileCheck --check-prefix=BEFORE-EXPAND %s
+; RUN: llc -mtriple=armv7-unknown-linux < %s | FileCheck --check-prefix=ASSEMBLY %s
+
+; Check COPY_STRUCT_BYVAL_I32 has CPSR as operand.
+; BEFORE-EXPAND: COPY_STRUCT_BYVAL_I32 {{.*}} implicit-def dead $cpsr
+; BEFORE-EXPAND: COPY_STRUCT_BYVAL_I32 {{.*}} implicit-def dead $cpsr
+
+%struct.anon = type { i32, i32, i32, i32, i32, i32, i32, %struct.f, i32, i64, i32 }
+%struct.f = type { i32, i32, i32, i32, i32 }
+
+define arm_aapcscc void @s(i64* %q, %struct.anon* %p) {
+; ASSEMBLY-LABEL: s:
+; ASSEMBLY:       @ %bb.0: @ %entry
+; ASSEMBLY-NEXT:    push {r4, r5, r11, lr}
+; ASSEMBLY-NEXT:    sub sp, sp, #136
+; ASSEMBLY-NEXT:    ldrd r4, r5, [r0]
+; ASSEMBLY-NEXT:    add lr, sp, #56
+; ASSEMBLY-NEXT:    ldm r1, {r0, r12}
+; ASSEMBLY-NEXT:    subs r4, r4, #1
+; ASSEMBLY-NEXT:    sbc r5, r5, #0
+; ASSEMBLY-NEXT:    ldr r2, [r1, #8]
+; ASSEMBLY-NEXT:    ldr r3, [r1, #12]
+; ASSEMBLY-NEXT:    str r5, [sp, #132]
+; ASSEMBLY-NEXT:    add r5, r1, #16
+; ASSEMBLY-NEXT:    str r4, [sp, #128]
+; ASSEMBLY-NEXT:    mov r4, sp
+; ASSEMBLY-NEXT:    vld1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT:    vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT:    vld1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT:    vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT:    vld1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT:    vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT:    vld1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT:    vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT:    vld1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT:    vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT:    vld1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT:    vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT:    vld1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT:    vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT:    movw r4, #72
+; ASSEMBLY-NEXT:  .LBB0_1: @ %entry
+; ASSEMBLY-NEXT:    @ =>This Inner Loop Header: Depth=1
+; ASSEMBLY-NEXT:    vld1.32 {d16}, [r1]!
+; ASSEMBLY-NEXT:    subs r4, r4, #8
+; ASSEMBLY-NEXT:    vst1.32 {d16}, [lr]!
+; ASSEMBLY-NEXT:    bne .LBB0_1
+; ASSEMBLY-NEXT:  @ %bb.2: @ %entry
+; ASSEMBLY-NEXT:    mov r1, r12
+; ASSEMBLY-NEXT:    bl r
+; ASSEMBLY-NEXT:    add sp, sp, #136
+; ASSEMBLY-NEXT:    pop {r4, r5, r11, pc}
+entry:
+  %0 = load i64, i64* %q, align 8
+  %sub = add nsw i64 %0, -1
+  tail call arm_aapcscc void bitcast (void (...)* @r to void (%struct.anon*, %struct.anon*, i64)*)(%struct.anon* byval nonnull align 8 %p, %struct.anon* byval nonnull align 8 %p, i64 %sub)
+  ret void
+}
+
+declare arm_aapcscc void @r(...)
diff --git a/test/CodeGen/ARM/crash-O0.ll b/test/CodeGen/ARM/crash-O0.ll
index bfbab8a..29110fc 100644
--- a/test/CodeGen/ARM/crash-O0.ll
+++ b/test/CodeGen/ARM/crash-O0.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -relocation-model=pic -disable-fp-elim -no-integrated-as
+; RUN: llc < %s -O0 -relocation-model=pic -frame-pointer=all -no-integrated-as
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64-n32"
 target triple = "armv6-apple-darwin10"
 
diff --git a/test/CodeGen/ARM/crash-greedy-v6.ll b/test/CodeGen/ARM/crash-greedy-v6.ll
index 287c081..d3c5057 100644
--- a/test/CodeGen/ARM/crash-greedy-v6.ll
+++ b/test/CodeGen/ARM/crash-greedy-v6.ll
@@ -1,5 +1,5 @@
-; RUN: llc -disable-fp-elim -relocation-model=pic < %s
-; RUN: llc -disable-fp-elim -relocation-model=pic -O0 -pre-RA-sched=source < %s | FileCheck %s --check-prefix=SOURCE-SCHED
+; RUN: llc -frame-pointer=all -relocation-model=pic < %s
+; RUN: llc -frame-pointer=all -relocation-model=pic -O0 -pre-RA-sched=source < %s | FileCheck %s --check-prefix=SOURCE-SCHED
 target triple = "armv6-apple-ios"
 
 ; Reduced from 177.mesa. This test causes a live range split before an LDR_POST instruction.
diff --git a/test/CodeGen/ARM/crash-greedy.ll b/test/CodeGen/ARM/crash-greedy.ll
index 5320a16..444505f 100644
--- a/test/CodeGen/ARM/crash-greedy.ll
+++ b/test/CodeGen/ARM/crash-greedy.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -regalloc=greedy -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -regalloc=greedy -mcpu=cortex-a8 -relocation-model=pic -frame-pointer=all -verify-machineinstrs | FileCheck %s
 ;
 ; ARM tests that crash or fail with the greedy register allocator.
 
diff --git a/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll b/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll
index 86291aa..de24681 100644
--- a/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll
+++ b/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll
@@ -1,50 +1,52 @@
 ; RUN: llc -mtriple armv7 %s -o - | FileCheck %s
 
-; CHECK-LABEL: f:
 define float @f(<4 x i16>* nocapture %in) {
-  ; CHECK: vld1
-  ; CHECK: vmovl.u16
-  ; CHECK-NOT: vand
+; CHECK-LABEL: f:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vld1.16 {d16}, [r0:64]
+; CHECK-NEXT:    vmovl.u16 q8, d16
+; CHECK-NEXT:    vcvt.f32.u32 q0, q8
+; CHECK-NEXT:    vadd.f32 s4, s0, s1
+; CHECK-NEXT:    vadd.f32 s0, s4, s2
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
   %1 = load <4 x i16>, <4 x i16>* %in
-  ; CHECK: vcvt.f32.u32
   %2 = uitofp <4 x i16> %1 to <4 x float>
   %3 = extractelement <4 x float> %2, i32 0
   %4 = extractelement <4 x float> %2, i32 1
   %5 = extractelement <4 x float> %2, i32 2
 
-  ; CHECK: vadd.f32
   %6 = fadd float %3, %4
   %7 = fadd float %6, %5
 
   ret float %7
 }
 
-; CHECK-LABEL: g:
 define float @g(<4 x i16>* nocapture %in) {
-  ; CHECK: vldr
+; CHECK-LABEL: g:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vmov.u16 r0, d16[0]
+; CHECK-NEXT:    vmov s0, r0
+; CHECK-NEXT:    vcvt.f32.u32 s0, s0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
   %1 = load <4 x i16>, <4 x i16>* %in
-
-  ; For now we're generating a vmov.16 and a uxth instruction.
-  ; The uxth is redundant, and we should be able to extend without
-  ; having to generate cross-domain copies. Once we can do this
-  ; we should modify the checks below.
-
-  ; CHECK: uxth
   %2 = extractelement <4 x i16> %1, i32 0
-  ; CHECK: vcvt.f32.u32
   %3 = uitofp i16 %2 to float
   ret float %3
 }
 
 ; Make sure we generate zext from <4 x i8> to <4 x 32>.
-
-; CHECK-LABEL: h:
-; CHECK: vld1.32
-; CHECK: vmovl.u8 q8, d16
-; CHECK: vmovl.u16 q8, d16
-; CHECK: vmov r0, r1, d16
-; CHECK: vmov r2, r3, d17
 define <4 x i32> @h(<4 x i8> *%in) {
+; CHECK-LABEL: h:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vld1.32 {d16[0]}, [r0:32]
+; CHECK-NEXT:    vmovl.u8 q8, d16
+; CHECK-NEXT:    vmovl.u16 q8, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    bx lr
   %1 = load <4 x i8>, <4 x i8>* %in, align 4
   %2 = extractelement <4 x i8> %1, i32 0
   %3 = zext i8 %2 to i32
@@ -60,3 +62,79 @@
   %13 = insertelement <4 x i32> %10, i32 %12, i32 3
   ret <4 x i32> %13
 }
+
+define float @i(<4 x i16>* nocapture %in) {
+  ; FIXME: The vmov.u + sxt can convert to a vmov.s
+; CHECK-LABEL: i:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vmov.u16 r0, d16[0]
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    vmov s0, r0
+; CHECK-NEXT:    vcvt.f32.s32 s0, s0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
+  %1 = load <4 x i16>, <4 x i16>* %in
+  %2 = extractelement <4 x i16> %1, i32 0
+  %3 = sitofp i16 %2 to float
+  ret float %3
+}
+
+define float @j(<8 x i8>* nocapture %in) {
+; CHECK-LABEL: j:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vmov.u8 r0, d16[7]
+; CHECK-NEXT:    vmov s0, r0
+; CHECK-NEXT:    vcvt.f32.u32 s0, s0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
+  %1 = load <8 x i8>, <8 x i8>* %in
+  %2 = extractelement <8 x i8> %1, i32 7
+  %3 = uitofp i8 %2 to float
+  ret float %3
+}
+
+define float @k(<8 x i8>* nocapture %in) {
+; FIXME: The vmov.u + sxt can convert to a vmov.s
+; CHECK-LABEL: k:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vmov.u8 r0, d16[7]
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    vmov s0, r0
+; CHECK-NEXT:    vcvt.f32.s32 s0, s0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
+  %1 = load <8 x i8>, <8 x i8>* %in
+  %2 = extractelement <8 x i8> %1, i32 7
+  %3 = sitofp i8 %2 to float
+  ret float %3
+}
+
+define float @KnownUpperZero(<4 x i16> %v) {
+; FIXME: uxtb are not required
+; CHECK-LABEL: KnownUpperZero:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i16 d16, #0x3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vand d16, d17, d16
+; CHECK-NEXT:    vmov.u16 r0, d16[0]
+; CHECK-NEXT:    vmov.u16 r1, d16[3]
+; CHECK-NEXT:    uxtb r0, r0
+; CHECK-NEXT:    vmov s0, r0
+; CHECK-NEXT:    uxtb r0, r1
+; CHECK-NEXT:    vmov s2, r0
+; CHECK-NEXT:    vcvt.f32.s32 s0, s0
+; CHECK-NEXT:    vcvt.f32.s32 s2, s2
+; CHECK-NEXT:    vadd.f32 s0, s2, s0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
+  %1 = and <4 x i16> %v, <i16 3,i16 3,i16 3,i16 3>
+  %2 = extractelement <4 x i16> %1, i32 3
+  %3 = extractelement <4 x i16> %1, i32 0
+  %sinf1 = sitofp i16 %2 to float
+  %sinf2 = sitofp i16 %3 to float
+  %sum =   fadd float %sinf1, %sinf2
+  ret float %sum
+}
diff --git a/test/CodeGen/ARM/dbg-range-extension.mir b/test/CodeGen/ARM/dbg-range-extension.mir
index f2b174a..0a48ba8 100644
--- a/test/CodeGen/ARM/dbg-range-extension.mir
+++ b/test/CodeGen/ARM/dbg-range-extension.mir
@@ -122,7 +122,7 @@
   !4 = !{i32 2, !"Debug Info Version", i32 3}
   !5 = !{i32 1, !"wchar_size", i32 4}
   !6 = !{i32 1, !"min_enum_size", i32 4}
-  !7 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git b8f10df3679b36f51e1de7c4351b82d297825089) (http://llvm.org/git/llvm.git c2a5d16d1e3b8c49f5bbb1ff87a76ac4f88edb89)"}
+  !7 = !{!"clang version 4.0.0"}
   !8 = distinct !DISubprogram(name: "func", scope: !1, file: !1, line: 2, type: !9, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
   !9 = !DISubroutineType(types: !10)
   !10 = !{null, !11}
diff --git a/test/CodeGen/ARM/debug-frame-large-stack.ll b/test/CodeGen/ARM/debug-frame-large-stack.ll
index b816725..e5d2425 100644
--- a/test/CodeGen/ARM/debug-frame-large-stack.ll
+++ b/test/CodeGen/ARM/debug-frame-large-stack.ll
@@ -1,4 +1,4 @@
-; RUN: llc -filetype=asm -o - < %s -mtriple arm-arm-netbsd-eabi -disable-fp-elim| FileCheck %s --check-prefix=CHECK-ARM
+; RUN: llc -filetype=asm -o - < %s -mtriple arm-arm-netbsd-eabi -frame-pointer=all| FileCheck %s --check-prefix=CHECK-ARM
 ; RUN: llc -filetype=asm -o - < %s -mtriple arm-arm-netbsd-eabi | FileCheck %s --check-prefix=CHECK-ARM-FP-ELIM
 
 define void @test1() {
diff --git a/test/CodeGen/ARM/debug-frame-no-debug.ll b/test/CodeGen/ARM/debug-frame-no-debug.ll
index 8a07f26..8f3965a 100644
--- a/test/CodeGen/ARM/debug-frame-no-debug.ll
+++ b/test/CodeGen/ARM/debug-frame-no-debug.ll
@@ -8,7 +8,7 @@
 ; RUN:   | FileCheck %s --check-prefix=CHECK-FP-ELIM
 
 ; RUN: llc -mtriple thumb-unknown-linux-gnueabi \
-; RUN:     -disable-fp-elim -filetype=asm -o - %s \
+; RUN:     -frame-pointer=all -filetype=asm -o - %s \
 ; RUN:   | FileCheck %s --check-prefix=CHECK-THUMB-FP
 
 ;-------------------------------------------------------------------------------
diff --git a/test/CodeGen/ARM/debug-frame-vararg.ll b/test/CodeGen/ARM/debug-frame-vararg.ll
index e675647..c9dcc0b 100644
--- a/test/CodeGen/ARM/debug-frame-vararg.ll
+++ b/test/CodeGen/ARM/debug-frame-vararg.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -mtriple arm-unknown-linux-gnueabi -filetype asm -o - %s | FileCheck %s --check-prefix=CHECK-FP
-; RUN: llc -mtriple arm-unknown-linux-gnueabi -filetype asm -o - %s -disable-fp-elim | FileCheck %s --check-prefix=CHECK-FP-ELIM
+; RUN: llc -mtriple arm-unknown-linux-gnueabi -filetype asm -o - %s -frame-pointer=all | FileCheck %s --check-prefix=CHECK-FP-ELIM
 ; RUN: llc -mtriple thumb-unknown-linux-gnueabi -filetype asm -o - %s | FileCheck %s --check-prefix=CHECK-THUMB-FP
-; RUN: llc -mtriple thumb-unknown-linux-gnueabi -filetype asm -o - %s -disable-fp-elim | FileCheck %s --check-prefix=CHECK-THUMB-FP-ELIM
+; RUN: llc -mtriple thumb-unknown-linux-gnueabi -filetype asm -o - %s -frame-pointer=all | FileCheck %s --check-prefix=CHECK-THUMB-FP-ELIM
 
 ; Tests that the initial space allocated to the varargs on the stack is
 ; taken into account in the .cfi_ directives.
diff --git a/test/CodeGen/ARM/debug-frame.ll b/test/CodeGen/ARM/debug-frame.ll
index f033363..6efe58a 100644
--- a/test/CodeGen/ARM/debug-frame.ll
+++ b/test/CodeGen/ARM/debug-frame.ll
@@ -4,18 +4,18 @@
 ; are properly generated or not.
 
 ; We have to check several cases:
-; (1) arm with -disable-fp-elim
-; (2) arm without -disable-fp-elim
-; (3) armv7 with -disable-fp-elim
-; (4) armv7 without -disable-fp-elim
-; (5) thumb with -disable-fp-elim
-; (6) thumb without -disable-fp-elim
-; (7) thumbv7 with -disable-fp-elim
-; (8) thumbv7 without -disable-fp-elim
+; (1) arm with -frame-pointer=all
+; (2) arm without -frame-pointer=all
+; (3) armv7 with -frame-pointer=all
+; (4) armv7 without -frame-pointer=all
+; (5) thumb with -frame-pointer=all
+; (6) thumb without -frame-pointer=all
+; (7) thumbv7 with -frame-pointer=all
+; (8) thumbv7 without -frame-pointer=all
 ; (9) thumbv7 with -no-integrated-as
 
 ; RUN: llc -mtriple arm-unknown-linux-gnueabi \
-; RUN:     -disable-fp-elim -filetype=asm -o - %s \
+; RUN:     -frame-pointer=all -filetype=asm -o - %s \
 ; RUN:   | FileCheck %s --check-prefix=CHECK-FP
 
 ; RUN: llc -mtriple arm-unknown-linux-gnueabi \
@@ -23,7 +23,7 @@
 ; RUN:   | FileCheck %s --check-prefix=CHECK-FP-ELIM
 
 ; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN:     -disable-fp-elim -filetype=asm -o - %s \
+; RUN:     -frame-pointer=all -filetype=asm -o - %s \
 ; RUN:   | FileCheck %s --check-prefix=CHECK-V7-FP
 
 ; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
@@ -31,7 +31,7 @@
 ; RUN:   | FileCheck %s --check-prefix=CHECK-V7-FP-ELIM
 
 ; RUN: llc -mtriple thumbv5-unknown-linux-gnueabi \
-; RUN:     -disable-fp-elim -filetype=asm -o - %s \
+; RUN:     -frame-pointer=all -filetype=asm -o - %s \
 ; RUN:   | FileCheck %s --check-prefix=CHECK-THUMB-FP
 
 ; RUN: llc -mtriple thumbv5-unknown-linux-gnueabi \
@@ -39,7 +39,7 @@
 ; RUN:   | FileCheck %s --check-prefix=CHECK-THUMB-FP-ELIM
 
 ; RUN: llc -mtriple thumbv7-unknown-linux-gnueabi \
-; RUN:     -disable-fp-elim -filetype=asm -o - %s \
+; RUN:     -frame-pointer=all -filetype=asm -o - %s \
 ; RUN:   | FileCheck %s --check-prefix=CHECK-THUMB-V7-FP
 
 ; RUN: llc -mtriple thumbv7-unknown-linux-gnueabi \
@@ -47,7 +47,7 @@
 ; RUN:   | FileCheck %s --check-prefix=CHECK-THUMB-V7-FP-ELIM
 
 ; RUN: llc -mtriple thumbv7-unknown-linux-gnueabi \
-; RUN:     -disable-fp-elim -no-integrated-as -filetype=asm -o - %s \
+; RUN:     -frame-pointer=all -no-integrated-as -filetype=asm -o - %s \
 ; RUN:   | FileCheck %s --check-prefix=CHECK-THUMB-V7-FP-NOIAS
 
 ;-------------------------------------------------------------------------------
diff --git a/test/CodeGen/ARM/disable-fp-elim.ll b/test/CodeGen/ARM/disable-fp-elim.ll
index dafeda2..ddbe365 100644
--- a/test/CodeGen/ARM/disable-fp-elim.ll
+++ b/test/CodeGen/ARM/disable-fp-elim.ll
@@ -1,9 +1,9 @@
 ; RUN: llc < %s -mtriple armv7-none-linux-gnueabi -O1 | FileCheck %s --check-prefix=DISABLE-FP-ELIM
-; RUN: llc < %s -mtriple armv7-none-linux-gnueabi -disable-fp-elim -O1 | FileCheck %s --check-prefix=DISABLE-FP-ELIM
-; RUN: llc < %s -mtriple armv7-none-linux-gnueabi -disable-fp-elim=false -O1 | FileCheck %s --check-prefix=ENABLE-FP-ELIM
-; RUN: llc < %s -mtriple armv7-none-linux-gnueabi -disable-fp-elim=false -O0 | FileCheck %s --check-prefix=DISABLE-FP-ELIM
+; RUN: llc < %s -mtriple armv7-none-linux-gnueabi -frame-pointer=all -O1 | FileCheck %s --check-prefix=DISABLE-FP-ELIM
+; RUN: llc < %s -mtriple armv7-none-linux-gnueabi -frame-pointer=none -O1 | FileCheck %s --check-prefix=ENABLE-FP-ELIM
+; RUN: llc < %s -mtriple armv7-none-linux-gnueabi -frame-pointer=none -O0 | FileCheck %s --check-prefix=DISABLE-FP-ELIM
 
-; Check that command line option "-disable-fp-elim" overrides function attribute
+; Check that command line option "-frame-pointer=all" overrides function attribute
 ; "no-frame-pointer-elim". Also, check frame pointer elimination is disabled
 ; when fast-isel is used.
 
diff --git a/test/CodeGen/ARM/ehabi-unwind.ll b/test/CodeGen/ARM/ehabi-unwind.ll
index a86f340..57d3eda 100644
--- a/test/CodeGen/ARM/ehabi-unwind.ll
+++ b/test/CodeGen/ARM/ehabi-unwind.ll
@@ -1,6 +1,6 @@
 ; Test that the EHABI unwind instruction generator does not encounter any
 ; unfamiliar instructions.
-; RUN: llc < %s -mtriple=thumbv7 -disable-fp-elim
+; RUN: llc < %s -mtriple=thumbv7 -frame-pointer=all
 ; RUN: llc < %s -mtriple=thumbv7
 
 define void @_Z1fv() nounwind {
diff --git a/test/CodeGen/ARM/ehabi.ll b/test/CodeGen/ARM/ehabi.ll
index f5a433b..5c4a2b6 100644
--- a/test/CodeGen/ARM/ehabi.ll
+++ b/test/CodeGen/ARM/ehabi.ll
@@ -13,13 +13,13 @@
 ;     nounwind function attribute.
 
 ; We have to check several cases:
-; (1) arm with -disable-fp-elim
-; (2) arm without -disable-fp-elim
-; (3) armv7 with -disable-fp-elim
-; (4) armv7 without -disable-fp-elim
+; (1) arm with -frame-pointer=all
+; (2) arm without -frame-pointer=all
+; (3) armv7 with -frame-pointer=all
+; (4) armv7 without -frame-pointer=all
 
 ; RUN: llc -mtriple arm-unknown-linux-gnueabi \
-; RUN:     -disable-fp-elim -filetype=asm -o - %s \
+; RUN:     -frame-pointer=all -filetype=asm -o - %s \
 ; RUN:   | FileCheck %s --check-prefix=CHECK-FP
 
 ; RUN: llc -mtriple arm-unknown-linux-gnueabi \
@@ -27,7 +27,7 @@
 ; RUN:   | FileCheck %s --check-prefix=CHECK-FP-ELIM
 
 ; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN:     -disable-fp-elim -filetype=asm -o - %s \
+; RUN:     -frame-pointer=all -filetype=asm -o - %s \
 ; RUN:   | FileCheck %s --check-prefix=CHECK-V7-FP
 
 ; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
@@ -35,7 +35,7 @@
 ; RUN:   | FileCheck %s --check-prefix=CHECK-V7-FP-ELIM
 
 ; RUN: llc -mtriple arm-unknown-linux-musleabi \
-; RUN:     -disable-fp-elim -filetype=asm -o - %s \
+; RUN:     -frame-pointer=all -filetype=asm -o - %s \
 ; RUN:   | FileCheck %s --check-prefix=CHECK-FP
 
 ; RUN: llc -mtriple arm-unknown-linux-musleabi \
@@ -43,7 +43,7 @@
 ; RUN:   | FileCheck %s --check-prefix=CHECK-FP-ELIM
 
 ; RUN: llc -mtriple armv7-unknown-linux-musleabi \
-; RUN:     -disable-fp-elim -filetype=asm -o - %s \
+; RUN:     -frame-pointer=all -filetype=asm -o - %s \
 ; RUN:   | FileCheck %s --check-prefix=CHECK-V7-FP
 
 ; RUN: llc -mtriple armv7-unknown-linux-musleabi \
@@ -51,7 +51,7 @@
 ; RUN:   | FileCheck %s --check-prefix=CHECK-V7-FP-ELIM
 
 ; RUN: llc -mtriple arm-unknown-linux-androideabi \
-; RUN:     -disable-fp-elim -filetype=asm -o - %s \
+; RUN:     -frame-pointer=all -filetype=asm -o - %s \
 ; RUN:   | FileCheck %s --check-prefix=CHECK-FP
 
 ; RUN: llc -mtriple arm-unknown-linux-androideabi \
@@ -59,7 +59,7 @@
 ; RUN:   | FileCheck %s --check-prefix=CHECK-FP-ELIM
 
 ; RUN: llc -mtriple armv7-unknown-linux-androideabi \
-; RUN:     -disable-fp-elim -filetype=asm -o - %s \
+; RUN:     -frame-pointer=all -filetype=asm -o - %s \
 ; RUN:   | FileCheck %s --check-prefix=CHECK-V7-FP
 
 ; RUN: llc -mtriple armv7-unknown-linux-androideabi \
@@ -67,7 +67,7 @@
 ; RUN:   | FileCheck %s --check-prefix=CHECK-V7-FP-ELIM
 
 ; RUN: llc -mtriple arm-unknown-netbsd-eabi \
-; RUN:     -disable-fp-elim -filetype=asm -o - %s \
+; RUN:     -frame-pointer=all -filetype=asm -o - %s \
 ; RUN:   | FileCheck %s --check-prefix=DWARF-FP
 
 ; RUN: llc -mtriple arm-unknown-netbsd-eabi \
@@ -75,7 +75,7 @@
 ; RUN:   | FileCheck %s --check-prefix=DWARF-FP-ELIM
 
 ; RUN: llc -mtriple armv7-unknown-netbsd-eabi \
-; RUN:     -disable-fp-elim -filetype=asm -o - %s \
+; RUN:     -frame-pointer=all -filetype=asm -o - %s \
 ; RUN:   | FileCheck %s --check-prefix=DWARF-V7-FP
 
 ; RUN: llc -mtriple armv7-unknown-netbsd-eabi \
diff --git a/test/CodeGen/ARM/fold-stack-adjust.ll b/test/CodeGen/ARM/fold-stack-adjust.ll
index eb32ee5..6b86c6a 100644
--- a/test/CodeGen/ARM/fold-stack-adjust.ll
+++ b/test/CodeGen/ARM/fold-stack-adjust.ll
@@ -1,9 +1,9 @@
 ; Disable shrink-wrapping on the first test otherwise we wouldn't
 ; exerce the path for PR18136.
 ; RUN: llc -mtriple=thumbv7-apple-none-macho < %s -enable-shrink-wrap=false | FileCheck %s
-; RUN: llc -mtriple=thumbv6m-apple-none-macho -disable-fp-elim < %s | FileCheck %s --check-prefix=CHECK-T1
-; RUN: llc -mtriple=thumbv7-apple-darwin-ios -disable-fp-elim < %s | FileCheck %s --check-prefix=CHECK-IOS
-; RUN: llc -mtriple=thumbv7--linux-gnueabi -disable-fp-elim < %s | FileCheck %s --check-prefix=CHECK-LINUX
+; RUN: llc -mtriple=thumbv6m-apple-none-macho -frame-pointer=all < %s | FileCheck %s --check-prefix=CHECK-T1
+; RUN: llc -mtriple=thumbv7-apple-darwin-ios -frame-pointer=all < %s | FileCheck %s --check-prefix=CHECK-IOS
+; RUN: llc -mtriple=thumbv7--linux-gnueabi -frame-pointer=all < %s | FileCheck %s --check-prefix=CHECK-LINUX
 
 
 declare void @bar(i8*)
diff --git a/test/CodeGen/ARM/frame-register.ll b/test/CodeGen/ARM/frame-register.ll
index 0cc5005..c008b21 100644
--- a/test/CodeGen/ARM/frame-register.ll
+++ b/test/CodeGen/ARM/frame-register.ll
@@ -1,13 +1,13 @@
-; RUN: llc -mtriple arm-eabi -disable-fp-elim -filetype asm -o - %s \
+; RUN: llc -mtriple arm-eabi -frame-pointer=all -filetype asm -o - %s \
 ; RUN:     | FileCheck -check-prefix CHECK-ARM %s
 
-; RUN: llc -mtriple thumb-eabi -disable-fp-elim -filetype asm -o - %s \
+; RUN: llc -mtriple thumb-eabi -frame-pointer=all -filetype asm -o - %s \
 ; RUN:     | FileCheck -check-prefix CHECK-THUMB %s
 
-; RUN: llc -mtriple arm-darwin -disable-fp-elim -filetype asm -o - %s \
+; RUN: llc -mtriple arm-darwin -frame-pointer=all -filetype asm -o - %s \
 ; RUN:     | FileCheck -check-prefix CHECK-DARWIN-ARM %s
 
-; RUN: llc -mtriple thumb-darwin -disable-fp-elim -filetype asm -o - %s \
+; RUN: llc -mtriple thumb-darwin -frame-pointer=all -filetype asm -o - %s \
 ; RUN:     | FileCheck -check-prefix CHECK-DARWIN-THUMB %s
 
 declare void @callee(i32)
diff --git a/test/CodeGen/ARM/hello.ll b/test/CodeGen/ARM/hello.ll
index 2641059..bdeb41d 100644
--- a/test/CodeGen/ARM/hello.ll
+++ b/test/CodeGen/ARM/hello.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -mtriple=arm-eabi %s -o /dev/null
 ; RUN: llc -mtriple=armv6-linux-gnueabi %s -o - | FileCheck %s
 
-; RUN: llc -mtriple=armv6-linux-gnu --disable-fp-elim %s -o - \
+; RUN: llc -mtriple=armv6-linux-gnu --frame-pointer=all %s -o - \
 ; RUN:  | FileCheck %s -check-prefix CHECK-FP-ELIM
 
 ; RUN: llc -mtriple=armv6-apple-ios %s -o - \
diff --git a/test/CodeGen/ARM/inline-asm-clobber.ll b/test/CodeGen/ARM/inline-asm-clobber.ll
index 458949a..cb2069c 100644
--- a/test/CodeGen/ARM/inline-asm-clobber.ll
+++ b/test/CodeGen/ARM/inline-asm-clobber.ll
@@ -3,7 +3,7 @@
 ; RUN: llc <%s -mtriple=arm-none-eabi -relocation-model=rwpi 2>&1 \
 ; RUN:   | FileCheck %s -check-prefix=RWPI
 
-; RUN: llc <%s -mtriple=arm-none-eabi --disable-fp-elim 2>&1 \
+; RUN: llc <%s -mtriple=arm-none-eabi --frame-pointer=all 2>&1 \
 ; RUN:   | FileCheck %s -check-prefix=NO_FP_ELIM
 
 ; CHECK: warning: inline asm clobber list contains reserved registers: SP, PC
diff --git a/test/CodeGen/ARM/ldstrex-m.ll b/test/CodeGen/ARM/ldstrex-m.ll
index 5b717f7..713fb9e 100644
--- a/test/CodeGen/ARM/ldstrex-m.ll
+++ b/test/CodeGen/ARM/ldstrex-m.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=thumbv7m-none-eabi -mcpu=cortex-m4 | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8m.main-none-eabi | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8m.base-none-eabi | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7m-none-eabi -mcpu=cortex-m4 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V7
+; RUN: llc < %s -mtriple=thumbv8m.main-none-eabi | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V8
+; RUN: llc < %s -mtriple=thumbv8m.base-none-eabi | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V8
 
 ; CHECK-LABEL: f0:
 ; CHECK-NOT: ldrexd
@@ -28,7 +28,8 @@
 }
 
 ; CHECK-LABEL: f3:
-; CHECK: ldr
+; CHECK-V7: ldr
+; CHECK-V8: lda
 define i32 @f3(i32* %p) nounwind readonly {
 entry:
   %0 = load atomic i32, i32* %p seq_cst, align 4
@@ -36,7 +37,8 @@
 }
 
 ; CHECK-LABEL: f4:
-; CHECK: ldrb
+; CHECK-V7: ldrb
+; CHECK-V8: ldab
 define i8 @f4(i8* %p) nounwind readonly {
 entry:
   %0 = load atomic i8, i8* %p seq_cst, align 4
@@ -44,7 +46,8 @@
 }
 
 ; CHECK-LABEL: f5:
-; CHECK: str
+; CHECK-V7: str
+; CHECK-V8: stl
 define void @f5(i32* %p) nounwind readonly {
 entry:
   store atomic i32 0, i32* %p seq_cst, align 4
@@ -52,8 +55,10 @@
 }
 
 ; CHECK-LABEL: f6:
-; CHECK: ldrex
-; CHECK: strex
+; CHECK-V7: ldrex
+; CHECK-V7: strex
+; CHECK-V8: ldaex
+; CHECK-V8: stlex
 define i32 @f6(i32* %p) nounwind readonly {
 entry:
   %0 = atomicrmw add i32* %p, i32 1 seq_cst
diff --git a/test/CodeGen/ARM/lowerMUL-newload.ll b/test/CodeGen/ARM/lowerMUL-newload.ll
index 93d765c..1d483c9 100644
--- a/test/CodeGen/ARM/lowerMUL-newload.ll
+++ b/test/CodeGen/ARM/lowerMUL-newload.ll
@@ -1,25 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm-eabi -mcpu=krait | FileCheck %s
 
 define void @func1(i16* %a, i16* %b, i16* %c) {
+; CHECK-LABEL: func1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    add r3, r1, #16
+; CHECK-NEXT:    vldr d18, [r2, #16]
+; CHECK-NEXT:    vld1.16 {d16}, [r3:64]
+; CHECK-NEXT:    vmovl.u16 q8, d16
+; CHECK-NEXT:    vaddw.s16 q10, q8, d18
+; CHECK-NEXT:    vmovn.i32 d19, q10
+; CHECK-NEXT:    vldr d20, [r0, #16]
+; CHECK-NEXT:    vstr d19, [r0, #16]
+; CHECK-NEXT:    vldr d19, [r2, #16]
+; CHECK-NEXT:    vmull.s16 q11, d18, d19
+; CHECK-NEXT:    vmovl.s16 q9, d19
+; CHECK-NEXT:    vmla.i32 q11, q8, q9
+; CHECK-NEXT:    vmovn.i32 d16, q11
+; CHECK-NEXT:    vstr d16, [r1, #16]
+; CHECK-NEXT:    vldr d16, [r2, #16]
+; CHECK-NEXT:    vmlal.s16 q11, d16, d20
+; CHECK-NEXT:    vmovn.i32 d16, q11
+; CHECK-NEXT:    vstr d16, [r0, #16]
+; CHECK-NEXT:    bx lr
 entry:
 ; The test case trying to vectorize the pseudo code below.
 ; a[i] = b[i] + c[i];
 ; b[i] = a[i] * c[i];
 ; a[i] = b[i] + a[i] * c[i];
-;
 ; Checking that vector load a[i] for "a[i] = b[i] + a[i] * c[i]" is
 ; scheduled before the first vector store to "a[i] = b[i] + c[i]".
 ; Checking that there is no vector load a[i] scheduled between the vector
 ; stores to a[i], otherwise the load of a[i] will be polluted by the first
 ; vector store to a[i].
-;
 ; This test case check that the chain information is updated during
 ; lowerMUL for the new created Load SDNode.
 
-; CHECK: vldr {{.*}} [r0, #16]
-; CHECK: vstr {{.*}} [r0, #16]
-; CHECK-NOT: vldr {{.*}} [r0, #16]
-; CHECK: vstr {{.*}} [r0, #16]
 
   %scevgep0 = getelementptr i16, i16* %a, i32 8
   %vector_ptr0 = bitcast i16* %scevgep0 to <4 x i16>*
@@ -57,26 +73,41 @@
 }
 
 define void @func2(i16* %a, i16* %b, i16* %c) {
+; CHECK-LABEL: func2:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    add r3, r1, #16
+; CHECK-NEXT:    vldr d18, [r2, #16]
+; CHECK-NEXT:    vld1.16 {d16}, [r3:64]
+; CHECK-NEXT:    vmovl.u16 q8, d16
+; CHECK-NEXT:    vaddw.s16 q10, q8, d18
+; CHECK-NEXT:    vmovn.i32 d19, q10
+; CHECK-NEXT:    vldr d20, [r0, #16]
+; CHECK-NEXT:    vstr d19, [r0, #16]
+; CHECK-NEXT:    vldr d19, [r2, #16]
+; CHECK-NEXT:    vmull.s16 q11, d18, d19
+; CHECK-NEXT:    vmovl.s16 q9, d19
+; CHECK-NEXT:    vmla.i32 q11, q8, q9
+; CHECK-NEXT:    vmovn.i32 d16, q11
+; CHECK-NEXT:    vstr d16, [r1, #16]
+; CHECK-NEXT:    vldr d16, [r2, #16]
+; CHECK-NEXT:    vmlal.s16 q11, d16, d20
+; CHECK-NEXT:    vaddw.s16 q8, q11, d20
+; CHECK-NEXT:    vmovn.i32 d16, q8
+; CHECK-NEXT:    vstr d16, [r0, #16]
+; CHECK-NEXT:    bx lr
 entry:
 ; The test case trying to vectorize the pseudo code below.
 ; a[i] = b[i] + c[i];
 ; b[i] = a[i] * c[i];
 ; a[i] = b[i] + a[i] * c[i] + a[i];
-;
 ; Checking that vector load a[i] for "a[i] = b[i] + a[i] * c[i] + a[i]"
 ; is scheduled before the first vector store to "a[i] = b[i] + c[i]".
 ; Checking that there is no vector load a[i] scheduled between the first
 ; vector store to a[i] and the vector add of a[i], otherwise the load of
 ; a[i] will be polluted by the first vector store to a[i].
-;
 ; This test case check that both the chain and value of the new created
 ; Load SDNode are updated during lowerMUL.
 
-; CHECK: vldr {{.*}} [r0, #16]
-; CHECK: vstr {{.*}} [r0, #16]
-; CHECK-NOT: vldr {{.*}} [r0, #16]
-; CHECK: vaddw.s16
-; CHECK: vstr {{.*}} [r0, #16]
 
   %scevgep0 = getelementptr i16, i16* %a, i32 8
   %vector_ptr0 = bitcast i16* %scevgep0 to <4 x i16>*
diff --git a/test/CodeGen/ARM/machine-licm.ll b/test/CodeGen/ARM/machine-licm.ll
index 9ed1a57..1cf291b 100644
--- a/test/CodeGen/ARM/machine-licm.ll
+++ b/test/CodeGen/ARM/machine-licm.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=thumb-apple-darwin -relocation-model=pic -disable-fp-elim | FileCheck %s -check-prefix=THUMB
-; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic -disable-fp-elim   | FileCheck %s -check-prefix=ARM
-; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic -disable-fp-elim -mattr=+v6t2 | FileCheck %s -check-prefix=MOVT
+; RUN: llc < %s -mtriple=thumb-apple-darwin -relocation-model=pic -frame-pointer=all | FileCheck %s -check-prefix=THUMB
+; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic -frame-pointer=all   | FileCheck %s -check-prefix=ARM
+; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic -frame-pointer=all -mattr=+v6t2 | FileCheck %s -check-prefix=MOVT
 ; rdar://7353541
 ; rdar://7354376
 ; rdar://8887598
diff --git a/test/CodeGen/ARM/macho-frame-offset.ll b/test/CodeGen/ARM/macho-frame-offset.ll
index f3dacf6..b61a7d7 100644
--- a/test/CodeGen/ARM/macho-frame-offset.ll
+++ b/test/CodeGen/ARM/macho-frame-offset.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple thumbv7m-apple-macho -disable-fp-elim -o - %s | FileCheck %s
+; RUN: llc -mtriple thumbv7m-apple-macho -frame-pointer=all -o - %s | FileCheck %s
 
 define void @func() {
 ; CHECK-LABEL: func:
diff --git a/test/CodeGen/ARM/none-macho.ll b/test/CodeGen/ARM/none-macho.ll
index fee459f..057da94 100644
--- a/test/CodeGen/ARM/none-macho.ll
+++ b/test/CodeGen/ARM/none-macho.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=thumbv7m-none-macho %s -o - -relocation-model=pic -disable-fp-elim | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NON-FAST
-; RUN: llc -mtriple=thumbv7m-none-macho -O0 %s -o - -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc -mtriple=thumbv7m-none-macho %s -o - -relocation-model=pic -frame-pointer=all | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NON-FAST
+; RUN: llc -mtriple=thumbv7m-none-macho -O0 %s -o - -relocation-model=pic -frame-pointer=all | FileCheck %s
 ; RUN: llc -mtriple=thumbv7m-none-macho -filetype=obj %s -o /dev/null
 
 @var = external global i32
diff --git a/test/CodeGen/ARM/setcc-logic.ll b/test/CodeGen/ARM/setcc-logic.ll
index 2c2792e..cf482f3 100644
--- a/test/CodeGen/ARM/setcc-logic.ll
+++ b/test/CodeGen/ARM/setcc-logic.ll
@@ -61,9 +61,8 @@
 ; CHECK-NEXT:    vceq.i32 q8, q9, q8
 ; CHECK-NEXT:    vld1.64 {d22, d23}, [r0]
 ; CHECK-NEXT:    vceq.i32 q9, q11, q10
+; CHECK-NEXT:    vand q8, q8, q9
 ; CHECK-NEXT:    vmovn.i32 d16, q8
-; CHECK-NEXT:    vmovn.i32 d17, q9
-; CHECK-NEXT:    vand d16, d16, d17
 ; CHECK-NEXT:    vmov r0, r1, d16
 ; CHECK-NEXT:    pop {r11, pc}
   %cmp1 = icmp eq <4 x i32> %a, %b
diff --git a/test/CodeGen/ARM/shuffle.ll b/test/CodeGen/ARM/shuffle.ll
index 7d6be4f..17ec7c5 100644
--- a/test/CodeGen/ARM/shuffle.ll
+++ b/test/CodeGen/ARM/shuffle.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -relocation-model=pic -frame-pointer=all | FileCheck %s
 
 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
 target triple = "thumbv7-apple-darwin"
diff --git a/test/CodeGen/ARM/smul.ll b/test/CodeGen/ARM/smul.ll
index 2b7be41..7091f8d 100644
--- a/test/CodeGen/ARM/smul.ll
+++ b/test/CodeGen/ARM/smul.ll
@@ -1,14 +1,14 @@
-; RUN: llc -mtriple=arm-eabi -mcpu=generic %s -o /dev/null
+; RUN: llc -mtriple=arm-eabi -mcpu=generic %s -o - | FileCheck %s --check-prefix=DISABLED
 ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
 ; RUN: llc -mtriple=thumb--none-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
 ; RUN: llc -mtriple=thumbv6t2-none-eabi %s -o - | FileCheck %s
-; RUN: llc -mtriple=thumbv6-none-eabi %s -o - | FileCheck %s -check-prefix=CHECK-THUMBV6
+; RUN: llc -mtriple=thumbv6-none-eabi %s -o - | FileCheck %s -check-prefix=DISABLED
 
 define i32 @f1(i16 %x, i32 %y) {
 ; CHECK-LABEL: f1:
 ; CHECK-NOT: sxth
 ; CHECK: {{smulbt r0, r0, r1|smultb r0, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smulbt|smultb}}
+; DISABLED-NOT: {{smulbt|smultb}}
         %tmp1 = sext i16 %x to i32
         %tmp2 = ashr i32 %y, 16
         %tmp3 = mul i32 %tmp2, %tmp1
@@ -18,7 +18,7 @@
 define i32 @f2(i32 %x, i32 %y) {
 ; CHECK-LABEL: f2:
 ; CHECK: smultt
-; CHECK-THUMBV6-NOT: smultt
+; DISABLED-NOT: smultt
         %tmp1 = ashr i32 %x, 16
         %tmp3 = ashr i32 %y, 16
         %tmp4 = mul i32 %tmp3, %tmp1
@@ -29,7 +29,7 @@
 ; CHECK-LABEL: f3:
 ; CHECK-NOT: sxth
 ; CHECK: {{smlabt r0, r1, r2, r0|smlatb r0, r2, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smlabt|smlatb}}
+; DISABLED-NOT: {{smlabt|smlatb}}
         %tmp = sext i16 %x to i32
         %tmp2 = ashr i32 %y, 16
         %tmp3 = mul i32 %tmp2, %tmp
@@ -40,7 +40,7 @@
 define i32 @f4(i32 %a, i32 %x, i32 %y) {
 ; CHECK-LABEL: f4:
 ; CHECK: smlatt
-; CHECK-THUMBV6-NOT: smlatt
+; DISABLED-NOT: smlatt
         %tmp1 = ashr i32 %x, 16
         %tmp3 = ashr i32 %y, 16
         %tmp4 = mul i32 %tmp3, %tmp1
@@ -52,7 +52,7 @@
 ; CHECK-LABEL: f5:
 ; CHECK-NOT: sxth
 ; CHECK: smlabb
-; CHECK-THUMBV6-NOT: smlabb
+; DISABLED-NOT: smlabb
         %tmp1 = sext i16 %x to i32
         %tmp3 = sext i16 %y to i32
         %tmp4 = mul i32 %tmp3, %tmp1
@@ -64,7 +64,7 @@
 ; CHECK-LABEL: f6:
 ; CHECK-NOT: sxth
 ; CHECK: {{smlatb r0, r1, r2, r0|smlabt r0, r2, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smlatb|smlabt}}
+; DISABLED-NOT: {{smlatb|smlabt}}
         %tmp1 = sext i16 %y to i32
         %tmp2 = ashr i32 %x, 16
         %tmp3 = mul i32 %tmp2, %tmp1
@@ -75,7 +75,7 @@
 define i32 @f7(i32 %a, i32 %b, i32 %c) {
 ; CHECK-LABEL: f7:
 ; CHECK: smlawb r0, r0, r1, r2
-; CHECK-THUMBV6-NOT: smlawb
+; DISABLED-NOT: smlawb
         %shl = shl i32 %b, 16
         %shr = ashr exact i32 %shl, 16
         %conv = sext i32 %a to i64
@@ -91,7 +91,7 @@
 ; CHECK-LABEL: f8:
 ; CHECK-NOT: sxth
 ; CHECK: smlawb r0, r0, r1, r2
-; CHECK-THUMBV6-NOT: smlawb
+; DISABLED-NOT: smlawb
         %conv = sext i32 %a to i64
         %conv1 = sext i16 %b to i64
         %mul = mul nsw i64 %conv1, %conv
@@ -104,7 +104,7 @@
 define i32 @f9(i32 %a, i32 %b, i32 %c) {
 ; CHECK-LABEL: f9:
 ; CHECK: smlawt r0, r0, r1, r2
-; CHECK-THUMBV6-NOT: smlawt
+; DISABLED-NOT: smlawt
         %conv = sext i32 %a to i64
         %shr = ashr i32 %b, 16
         %conv1 = sext i32 %shr to i64
@@ -118,7 +118,7 @@
 define i32 @f10(i32 %a, i32 %b) {
 ; CHECK-LABEL: f10:
 ; CHECK: smulwb r0, r0, r1
-; CHECK-THUMBV6-NOT: smulwb
+; DISABLED-NOT: smulwb
         %shl = shl i32 %b, 16
         %shr = ashr exact i32 %shl, 16
         %conv = sext i32 %a to i64
@@ -133,7 +133,7 @@
 ; CHECK-LABEL: f11:
 ; CHECK-NOT: sxth
 ; CHECK: smulwb r0, r0, r1
-; CHECK-THUMBV6-NOT: smulwb
+; DISABLED-NOT: smulwb
         %conv = sext i32 %a to i64
         %conv1 = sext i16 %b to i64
         %mul = mul nsw i64 %conv1, %conv
@@ -145,7 +145,7 @@
 define i32 @f12(i32 %a, i32 %b) {
 ; CHECK-LABEL: f12:
 ; CHECK: smulwt r0, r0, r1
-; CHECK-THUMBV6-NOT: smulwt
+; DISABLED-NOT: smulwt
         %conv = sext i32 %a to i64
         %shr = ashr i32 %b, 16
         %conv1 = sext i32 %shr to i64
@@ -159,7 +159,7 @@
 ; CHECK-LABEL: f13:
 ; CHECK-NOT: sxth
 ; CHECK: {{smultb r0, r0, r1|smulbt r0, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smultb|smulbt}}
+; DISABLED-NOT: {{smultb|smulbt}}
         %tmp1 = sext i16 %y to i32
         %tmp2 = ashr i32 %x, 16
         %tmp3 = mul i32 %tmp2, %tmp1
@@ -169,11 +169,11 @@
 define i32 @f14(i32 %x, i32 %y) {
 ; CHECK-LABEL: f14:
 ; CHECK-NOT: sxth
-; CHECK: {{smultb r0, r0, r1|smulbt r0, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smultb|smulbt}}
-        %tmp1 = shl i32 %y, 16
+; CHECK: {{smultb r0, r1, r0|smulbt r0, r0, r1}}
+; DISABLED-NOT: {{smultb|smulbt}}
+        %tmp1 = shl i32 %x, 16
         %tmp2 = ashr i32 %tmp1, 16
-        %tmp3 = ashr i32 %x, 16
+        %tmp3 = ashr i32 %y, 16
         %tmp4 = mul i32 %tmp3, %tmp2
         ret i32 %tmp4
 }
@@ -182,7 +182,7 @@
 ; CHECK-LABEL: f15:
 ; CHECK-NOT: sxth
 ; CHECK: {{smulbt r0, r0, r1|smultb r0, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smulbt|smultb}}
+; DISABLED-NOT: {{smulbt|smultb}}
         %tmp1 = shl i32 %x, 16
         %tmp2 = ashr i32 %tmp1, 16
         %tmp3 = ashr i32 %y, 16
@@ -194,7 +194,7 @@
 ; CHECK-LABEL: f16:
 ; CHECK-NOT: sxth
 ; CHECK: smulbb
-; CHECK-THUMBV6-NOT: smulbb
+; DISABLED-NOT: smulbb
         %tmp1 = sext i16 %x to i32
         %tmp2 = sext i16 %x to i32
         %tmp3 = mul i32 %tmp1, %tmp2
@@ -203,8 +203,9 @@
 
 define i32 @f17(i32 %x, i32 %y) {
 ; CHECK-LABEL: f17:
+; CHECK-NOT: sxth
 ; CHECK: smulbb
-; CHECK-THUMBV6-NOT: smulbb
+; DISABLED-NOT: smulbb
         %tmp1 = shl i32 %x, 16
         %tmp2 = shl i32 %y, 16
         %tmp3 = ashr i32 %tmp1, 16
@@ -215,8 +216,9 @@
 
 define i32 @f18(i32 %a, i32 %x, i32 %y) {
 ; CHECK-LABEL: f18:
+; CHECK-NOT: sxth
 ; CHECK: {{smlabt r0, r1, r2, r0|smlatb r0, r2, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smlabt|smlatb}}
+; DISABLED-NOT: {{smlabt|smlatb}}
         %tmp0 = shl i32 %x, 16
         %tmp1 = ashr i32 %tmp0, 16
         %tmp2 = ashr i32 %y, 16
@@ -227,20 +229,21 @@
 
 define i32 @f19(i32 %a, i32 %x, i32 %y) {
 ; CHECK-LABEL: f19:
-; CHECK: {{smlatb r0, r1, r2, r0|smlabt r0, r2, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smlatb|smlabt}}
-        %tmp0 = shl i32 %y, 16
+; CHECK: {{smlatb r0, r2, r1, r0|smlabt r0, r1, r2, r0}}
+; DISABLED-NOT: {{smlatb|smlabt}}
+        %tmp0 = shl i32 %x, 16
         %tmp1 = ashr i32 %tmp0, 16
-        %tmp2 = ashr i32 %x, 16
-        %tmp3 = mul i32 %tmp2, %tmp1
+        %tmp2 = ashr i32 %y, 16
+        %tmp3 = mul i32 %tmp1, %tmp2
         %tmp5 = add i32 %tmp3, %a
         ret i32 %tmp5
 }
 
 define i32 @f20(i32 %a, i32 %x, i32 %y) {
 ; CHECK-LABEL: f20:
+; CHECK-NOT: sxth
 ; CHECK: smlabb
-; CHECK-THUMBV6-NOT: smlabb
+; DISABLED-NOT: smlabb
         %tmp1 = shl i32 %x, 16
         %tmp2 = ashr i32 %tmp1, 16
         %tmp3 = shl i32 %y, 16
@@ -254,7 +257,7 @@
 ; CHECK-LABEL: f21
 ; CHECK-NOT: sxth
 ; CHECK: smlabb
-; CHECK-THUMBV6-NOT: smlabb
+; DISABLED-NOT: smlabb
         %tmp1 = shl i32 %x, 16
         %tmp2 = ashr i32 %tmp1, 16
         %tmp3 = sext i16 %y to i32
@@ -263,12 +266,26 @@
         ret i32 %tmp5
 }
 
+define i32 @f21_b(i32 %a, i32 %x, i16 %y) {
+; CHECK-LABEL: f21_b
+; CHECK-NOT: sxth
+; CHECK: smlabb
+; DISABLED-NOT: smlabb
+        %tmp1 = shl i32 %x, 16
+        %tmp2 = ashr i32 %tmp1, 16
+        %tmp3 = sext i16 %y to i32
+        %tmp4 = mul i32 %tmp3, %tmp2
+        %tmp5 = add i32 %a, %tmp4
+        ret i32 %tmp5
+}
+
 @global_b = external global i16, align 2
 
 define i32 @f22(i32 %a) {
 ; CHECK-LABEL: f22:
+; CHECK-NOT: sxth
 ; CHECK: smulwb r0, r0, r1
-; CHECK-THUMBV6-NOT: smulwb
+; DISABLED-NOT: smulwb
         %b = load i16, i16* @global_b, align 2
         %sext = sext i16 %b to i64
         %conv = sext i32 %a to i64
@@ -280,8 +297,9 @@
 
 define i32 @f23(i32 %a, i32 %c) {
 ; CHECK-LABEL: f23:
+; CHECK-NOT: sxth
 ; CHECK: smlawb r0, r0, r2, r1
-; CHECK-THUMBV6-NOT: smlawb
+; DISABLED-NOT: smlawb
         %b = load i16, i16* @global_b, align 2
         %sext = sext i16 %b to i64
         %conv = sext i32 %a to i64
@@ -291,3 +309,102 @@
         %add = add nsw i32 %conv5, %c
         ret i32 %add
 }
+
+; CHECK-LABEL: f24
+; CHECK-NOT: sxth
+; CHECK: smulbb
+define i32 @f24(i16* %a, i32* %b, i32* %c) {
+  %ld.0 = load i16, i16* %a, align 2
+  %ld.1 = load i32, i32* %b, align 4
+  %conv.0 = sext i16 %ld.0 to i32
+  %shift = shl i32 %ld.1, 16
+  %conv.1 = ashr i32 %shift, 16
+  %mul.0 = mul i32 %conv.0, %conv.1
+  store i32 %ld.1, i32* %c
+  ret i32 %mul.0
+}
+
+; CHECK-LABEL: f25
+; CHECK-NOT: sxth
+; CHECK: smulbb
+define i32 @f25(i16* %a, i32 %b, i32* %c) {
+  %ld.0 = load i16, i16* %a, align 2
+  %conv.0 = sext i16 %ld.0 to i32
+  %shift = shl i32 %b, 16
+  %conv.1 = ashr i32 %shift, 16
+  %mul.0 = mul i32 %conv.0, %conv.1
+  store i32 %b, i32* %c
+  ret i32 %mul.0
+}
+
+; CHECK-LABEL: f25_b
+; CHECK-NOT: sxth
+; CHECK: smulbb
+define i32 @f25_b(i16* %a, i32 %b, i32* %c) {
+  %ld.0 = load i16, i16* %a, align 2
+  %conv.0 = sext i16 %ld.0 to i32
+  %shift = shl i32 %b, 16
+  %conv.1 = ashr i32 %shift, 16
+  %mul.0 = mul i32 %conv.1, %conv.0
+  store i32 %b, i32* %c
+  ret i32 %mul.0
+}
+
+; CHECK-LABEL: f26
+; CHECK-NOT: sxth
+; CHECK: {{smulbt | smultb}}
+define i32 @f26(i16* %a, i32 %b, i32* %c) {
+  %ld.0 = load i16, i16* %a, align 2
+  %conv.0 = sext i16 %ld.0 to i32
+  %conv.1 = ashr i32 %b, 16
+  %mul.0 = mul i32 %conv.0, %conv.1
+  store i32 %b, i32* %c
+  ret i32 %mul.0
+}
+
+; CHECK-LABEL: f26_b
+; CHECK-NOT: sxth
+; CHECK: {{smulbt | smultb}}
+define i32 @f26_b(i16* %a, i32 %b, i32* %c) {
+  %ld.0 = load i16, i16* %a, align 2
+  %conv.0 = sext i16 %ld.0 to i32
+  %conv.1 = ashr i32 %b, 16
+  %mul.0 = mul i32 %conv.1, %conv.0
+  store i32 %b, i32* %c
+  ret i32 %mul.0
+}
+
+; CHECK-LABEL: f27
+; CHECK-NOT: sxth
+; CHECK: smulbb
+; CHECK: {{smlabt | smlatb}}
+define i32 @f27(i16* %a, i32* %b) {
+  %ld.0 = load i16, i16* %a, align 2
+  %ld.1 = load i32, i32* %b, align 4
+  %conv.0 = sext i16 %ld.0 to i32
+  %shift = shl i32 %ld.1, 16
+  %conv.1 = ashr i32 %shift, 16
+  %conv.2 = ashr i32 %ld.1, 16
+  %mul.0 = mul i32 %conv.0, %conv.1
+  %mul.1 = mul i32 %conv.0, %conv.2
+  %add = add i32 %mul.0, %mul.1
+  ret i32 %add
+}
+
+; CHECK-LABEL: f27_b
+; CHECK-NOT: sxth
+; CHECK: smulbb
+; CHECK: {{smlabt | smlatb}}
+define i32 @f27_b(i16* %a, i32* %b) {
+  %ld.0 = load i16, i16* %a, align 2
+  %ld.1 = load i32, i32* %b, align 4
+  %conv.0 = sext i16 %ld.0 to i32
+  %shift = shl i32 %ld.1, 16
+  %conv.1 = ashr i32 %shift, 16
+  %conv.2 = ashr i32 %ld.1, 16
+  %mul.0 = mul i32 %conv.0, %conv.1
+  %mul.1 = mul i32 %conv.2, %conv.0
+  %add = add i32 %mul.0, %mul.1
+  ret i32 %add
+}
+
diff --git a/test/CodeGen/ARM/ssp-data-layout.ll b/test/CodeGen/ARM/ssp-data-layout.ll
index b087fa9..feb0189 100644
--- a/test/CodeGen/ARM/ssp-data-layout.ll
+++ b/test/CodeGen/ARM/ssp-data-layout.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -disable-fp-elim -mcpu=cortex-a8 -mtriple arm-linux-gnu -target-abi=apcs -o - | FileCheck %s
+; RUN: llc < %s -frame-pointer=all -mcpu=cortex-a8 -mtriple arm-linux-gnu -target-abi=apcs -o - | FileCheck %s
 ;  This test is fairly fragile.  The goal is to ensure that "large" stack
 ;  objects are allocated closest to the stack protector (i.e., farthest away 
 ;  from the Stack Pointer.)  In standard SSP mode this means that large (>=
diff --git a/test/CodeGen/ARM/subreg-remat.ll b/test/CodeGen/ARM/subreg-remat.ll
index 1b40610..6166a94 100644
--- a/test/CodeGen/ARM/subreg-remat.ll
+++ b/test/CodeGen/ARM/subreg-remat.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 -pre-RA-sched=source -no-integrated-as | FileCheck %s
+; RUN: llc < %s -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8 -pre-RA-sched=source -no-integrated-as | FileCheck %s
 target triple = "thumbv7-apple-ios"
 ; <rdar://problem/10032939>
 ;
diff --git a/test/CodeGen/AVR/integration/blink.ll b/test/CodeGen/AVR/integration/blink.ll
index ef88e3e..29c3b7f 100644
--- a/test/CodeGen/AVR/integration/blink.ll
+++ b/test/CodeGen/AVR/integration/blink.ll
@@ -34,15 +34,8 @@
 define void @setup_ddr() {
 entry:
 
-  ; This should load the value of DDRB, OR it with the bit number and store
-  ; the result back to DDRB.
-
-  ; CHECK:      in      [[TMPREG:r[0-9]+]], 4
-  ; CHECK-NEXT: ori     [[TMPREG]], 32
-
-  ; CHECK-NOT: ori     {{r[0-9]+}}, 0
-
-  ; CHECK-NEXT: out     4, [[TMPREG]]
+  ; This should set the 5th bit of DDRB.
+  ; CHECK:      sbi	4, 5
   ; CHECK-NEXT: ret
 
   %0 = load volatile i8, i8* inttoptr (i16 36 to i8*), align 1
@@ -58,15 +51,8 @@
 define void @turn_on() {
 entry:
 
-  ; This should load the value of PORTB, OR it with the bit number and store
-  ; the result back to DDRB.
-
-  ; CHECK:      in      [[TMPREG:r[0-9]+]], 5
-  ; CHECK-NEXT: ori     [[TMPREG]], 32
-
-  ; CHECK-NOT: ori     {{r[0-9]+}}, 0
-
-  ; CHECK-NEXT: out     5, [[TMPREG]]
+  ; This should set the 5th bit of PORTB
+  ; CHECK:      sbi	5, 5
   ; CHECK-NEXT: ret
 
   %0 = load volatile i8, i8* inttoptr (i16 37 to i8*), align 1
@@ -82,14 +68,8 @@
 define void @turn_off() {
 entry:
 
-  ; This should load the value of PORTB, OR it with the bit number and store
-  ; the result back to DDRB.
-
-
-  ; CHECK:      in      [[TMPREG:r[0-9]+]], 5
-  ; CHECK-NEXT: andi    [[TMPREG]], 223
-  ; CHECK-NEXT: andi    {{r[0-9]+}}, 0
-  ; CHECK-NEXT: out     5, [[TMPREG]]
+  ; This should clear the 5th bit of PORTB
+  ; CHECK:      cbi     5, 5
   ; CHECK-NEXT: ret
 
   %0 = load volatile i8, i8* inttoptr (i16 37 to i8*), align 1
diff --git a/test/CodeGen/BPF/BTF/array-1d-char.ll b/test/CodeGen/BPF/BTF/array-1d-char.ll
new file mode 100644
index 0000000..5ef3e92
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/array-1d-char.ll
@@ -0,0 +1,68 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   char a[10];
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global [10 x i8] zeroinitializer, align 1, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!10, !11, !12}
+!llvm.ident = !{!13}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   56
+; CHECK-NEXT:        .long   56
+; CHECK-NEXT:        .long   26
+; CHECK-NEXT:        .long   0                       # BTF_KIND_ARRAY(id = 1)
+; CHECK-NEXT:        .long   50331648                # 0x3000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   10
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 2)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   32                      # 0x20
+; CHECK-NEXT:        .long   21                      # BTF_KIND_INT(id = 3)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   16777224                # 0x1000008
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "__ARRAY_SIZE_TYPE__"   # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "char"                  # string offset=21
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DICompositeType(tag: DW_TAG_array_type, baseType: !7, size: 80, elements: !8)
+!7 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
+!8 = !{!9}
+!9 = !DISubrange(count: 10)
+!10 = !{i32 2, !"Dwarf Version", i32 4}
+!11 = !{i32 2, !"Debug Info Version", i32 3}
+!12 = !{i32 1, !"wchar_size", i32 4}
+!13 = !{!"clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)"}
diff --git a/test/CodeGen/BPF/BTF/array-1d-int.ll b/test/CodeGen/BPF/BTF/array-1d-int.ll
new file mode 100644
index 0000000..8d81dbe
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/array-1d-int.ll
@@ -0,0 +1,68 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   int a[10];
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global [10 x i32] zeroinitializer, align 4, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!10, !11, !12}
+!llvm.ident = !{!13}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   56
+; CHECK-NEXT:        .long   56
+; CHECK-NEXT:        .long   25
+; CHECK-NEXT:        .long   0                       # BTF_KIND_ARRAY(id = 1)
+; CHECK-NEXT:        .long   50331648                # 0x3000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   10
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 2)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   32                      # 0x20
+; CHECK-NEXT:        .long   21                      # BTF_KIND_INT(id = 3)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "__ARRAY_SIZE_TYPE__"   # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=21
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DICompositeType(tag: DW_TAG_array_type, baseType: !7, size: 320, elements: !8)
+!7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!8 = !{!9}
+!9 = !DISubrange(count: 10)
+!10 = !{i32 2, !"Dwarf Version", i32 4}
+!11 = !{i32 2, !"Debug Info Version", i32 3}
+!12 = !{i32 1, !"wchar_size", i32 4}
+!13 = !{!"clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)"}
diff --git a/test/CodeGen/BPF/BTF/array-2d-int.ll b/test/CodeGen/BPF/BTF/array-2d-int.ll
new file mode 100644
index 0000000..00db42e
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/array-2d-int.ll
@@ -0,0 +1,68 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   int a[10][10];
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global [10 x [10 x i32]] zeroinitializer, align 4, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!10, !11, !12}
+!llvm.ident = !{!13}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   56
+; CHECK-NEXT:        .long   56
+; CHECK-NEXT:        .long   25
+; CHECK-NEXT:        .long   0                       # BTF_KIND_ARRAY(id = 1)
+; CHECK-NEXT:        .long   50331648                # 0x3000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   100
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 2)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   32                      # 0x20
+; CHECK-NEXT:        .long   21                      # BTF_KIND_INT(id = 3)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "__ARRAY_SIZE_TYPE__"   # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=21
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DICompositeType(tag: DW_TAG_array_type, baseType: !7, size: 3200, elements: !8)
+!7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!8 = !{!9, !9}
+!9 = !DISubrange(count: 10)
+!10 = !{i32 2, !"Dwarf Version", i32 4}
+!11 = !{i32 2, !"Debug Info Version", i32 3}
+!12 = !{i32 1, !"wchar_size", i32 4}
+!13 = !{!"clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)"}
diff --git a/test/CodeGen/BPF/BTF/array-size-0.ll b/test/CodeGen/BPF/BTF/array-size-0.ll
new file mode 100644
index 0000000..11e694a
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/array-size-0.ll
@@ -0,0 +1,70 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   struct t {};
+;   struct t a[10];
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+%struct.t = type {}
+
+@a = common dso_local local_unnamed_addr global [10 x %struct.t] zeroinitializer, align 1, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!10, !11, !12}
+!llvm.ident = !{!13}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   52
+; CHECK-NEXT:        .long   52
+; CHECK-NEXT:        .long   23
+; CHECK-NEXT:        .long   0                       # BTF_KIND_ARRAY(id = 1)
+; CHECK-NEXT:        .long   50331648                # 0x3000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 2)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   32                      # 0x20
+; CHECK-NEXT:        .long   21                      # BTF_KIND_STRUCT(id = 3)
+; CHECK-NEXT:        .long   67108864                # 0x4000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "__ARRAY_SIZE_TYPE__"   # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   116                     # string offset=21
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 2, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DICompositeType(tag: DW_TAG_array_type, baseType: !7, elements: !8)
+!7 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "t", file: !3, line: 1, elements: !4)
+!8 = !{!9}
+!9 = !DISubrange(count: 10)
+!10 = !{i32 2, !"Dwarf Version", i32 4}
+!11 = !{i32 2, !"Debug Info Version", i32 3}
+!12 = !{i32 1, !"wchar_size", i32 4}
+!13 = !{!"clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)"}
diff --git a/test/CodeGen/BPF/BTF/array-typedef.ll b/test/CodeGen/BPF/BTF/array-typedef.ll
new file mode 100644
index 0000000..9fe57c9
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/array-typedef.ll
@@ -0,0 +1,82 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   typedef unsigned _int;
+;   typedef _int __int;
+;   __int a[10];
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global [10 x i32] zeroinitializer, align 4, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!12, !13, !14}
+!llvm.ident = !{!15}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   80
+; CHECK-NEXT:        .long   80
+; CHECK-NEXT:        .long   45
+; CHECK-NEXT:        .long   0                       # BTF_KIND_ARRAY(id = 1)
+; CHECK-NEXT:        .long   50331648                # 0x3000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   10
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 2)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   32                      # 0x20
+; CHECK-NEXT:        .long   21                      # BTF_KIND_TYPEDEF(id = 3)
+; CHECK-NEXT:        .long   134217728               # 0x8000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   27                      # BTF_KIND_TYPEDEF(id = 4)
+; CHECK-NEXT:        .long   134217728               # 0x8000000
+; CHECK-NEXT:        .long   5
+; CHECK-NEXT:        .long   32                      # BTF_KIND_INT(id = 5)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   32                      # 0x20
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "__ARRAY_SIZE_TYPE__"   # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "__int"                 # string offset=21
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "_int"                  # string offset=27
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "unsigned int"          # string offset=32
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 3, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DICompositeType(tag: DW_TAG_array_type, baseType: !7, size: 320, elements: !10)
+!7 = !DIDerivedType(tag: DW_TAG_typedef, name: "__int", file: !3, line: 2, baseType: !8)
+!8 = !DIDerivedType(tag: DW_TAG_typedef, name: "_int", file: !3, line: 1, baseType: !9)
+!9 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned)
+!10 = !{!11}
+!11 = !DISubrange(count: 10)
+!12 = !{i32 2, !"Dwarf Version", i32 4}
+!13 = !{i32 2, !"Debug Info Version", i32 3}
+!14 = !{i32 1, !"wchar_size", i32 4}
+!15 = !{!"clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)"}
diff --git a/test/CodeGen/BPF/BTF/binary-format.ll b/test/CodeGen/BPF/BTF/binary-format.ll
new file mode 100644
index 0000000..39d699b
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/binary-format.ll
@@ -0,0 +1,68 @@
+; RUN: llc -march=bpfel -filetype=obj -o - %s | llvm-readelf -x ".BTF" -x ".BTF.ext" | FileCheck -check-prefixes=CHECK,CHECK-EL %s
+; RUN: llc -march=bpfeb -filetype=obj -o - %s | llvm-readelf -x ".BTF" -x ".BTF.ext" | FileCheck -check-prefixes=CHECK,CHECK-EB %s
+
+; Source code:
+;   int f(int a) { return a; }
+; Compilation flag:
+;   clang -target bpf -O2 -g -gdwarf-5 -gembed-source -S -emit-llvm t.c
+
+; Function Attrs: nounwind readnone
+define dso_local i32 @f(i32 returned %a) local_unnamed_addr #0 !dbg !7 {
+entry:
+  call void @llvm.dbg.value(metadata i32 %a, metadata !12, metadata !DIExpression()), !dbg !13
+  ret i32 %a, !dbg !14
+}
+
+; CHECK:    '.BTF'
+; CHECK-EL: 0x00000000 9feb0100 18000000 00000000 30000000
+; CHECK-EL: 0x00000010 30000000 33000000 2b000000 00000001
+; CHECK-EL: 0x00000020 04000000 20000001 00000000 0100000d
+; CHECK-EL: 0x00000030 01000000 2f000000 01000000 31000000
+; CHECK-EL: 0x00000040 0000000c 02000000 002e7465 7874002f
+; CHECK-EB: 0x00000000 eb9f0100 00000018 00000000 00000030
+; CHECK-EB: 0x00000010 00000030 00000033 0000002b 01000000
+; CHECK-EB: 0x00000020 00000004 01000020 00000000 0d000001
+; CHECK-EB: 0x00000030 00000001 0000002f 00000001 00000031
+; CHECK-EB: 0x00000040 0c000000 00000002 002e7465 7874002f
+; CHECK:    0x00000050 746d702f 742e6300 696e7420 6628696e
+; CHECK:    0x00000060 74206129 207b2072 65747572 6e20613b
+; CHECK:    0x00000070 207d0069 6e740061 006600
+; CHECK:    '.BTF.ext'
+; CHECK-EL: 0x00000000 9feb0100 18000000 00000000 14000000
+; CHECK-EL: 0x00000010 14000000 2c000000 08000000 01000000
+; CHECK-EL: 0x00000020 01000000 00000000 03000000 10000000
+; CHECK-EL: 0x00000030 01000000 02000000 00000000 07000000
+; CHECK-EL: 0x00000040 10000000 00040000 08000000 07000000
+; CHECK-EL: 0x00000050 10000000 10040000
+; CHECK-EB: 0x00000000 eb9f0100 00000018 00000000 00000014
+; CHECK-EB: 0x00000010 00000014 0000002c 00000008 00000001
+; CHECK-EB: 0x00000020 00000001 00000000 00000003 00000010
+; CHECK-EB: 0x00000030 00000001 00000002 00000000 00000007
+; CHECK-EB: 0x00000040 00000010 00000400 00000008 00000007
+; CHECK-EB: 0x00000050 00000010 00000410
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.value(metadata, metadata, metadata) #1
+
+attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone speculatable }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
+!1 = !DIFile(filename: "t.c", directory: "/tmp", checksumkind: CSK_MD5, checksum: "1924f0d78deb326ceb76cd8e9f450775", source: "int f(int a) { return a; }\0A")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 5}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"wchar_size", i32 4}
+!6 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
+!7 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, isOptimized: true, unit: !0, retainedNodes: !11)
+!8 = !DISubroutineType(types: !9)
+!9 = !{!10, !10}
+!10 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!11 = !{!12}
+!12 = !DILocalVariable(name: "a", arg: 1, scope: !7, file: !1, line: 1, type: !10)
+!13 = !DILocation(line: 1, column: 11, scope: !7)
+!14 = !DILocation(line: 1, column: 16, scope: !7)
diff --git a/test/CodeGen/BPF/BTF/char.ll b/test/CodeGen/BPF/BTF/char.ll
new file mode 100644
index 0000000..122f698
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/char.ll
@@ -0,0 +1,53 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   char a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global i8 0, align 1, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!7, !8, !9}
+!llvm.ident = !{!10}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   6
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 1)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   16777224                # 0x1000008
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "char"                  # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+!9 = !{i32 1, !"wchar_size", i32 4}
+!10 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/enum-basic.ll b/test/CodeGen/BPF/BTF/enum-basic.ll
new file mode 100644
index 0000000..5b71030
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/enum-basic.ll
@@ -0,0 +1,62 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   enum { A = -1, B = 2 } a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global i32 0, align 4, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!11, !12, !13}
+!llvm.ident = !{!14}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   28
+; CHECK-NEXT:        .long   28
+; CHECK-NEXT:        .long   5
+; CHECK-NEXT:        .long   0                       # BTF_KIND_ENUM(id = 1)
+; CHECK-NEXT:        .long   100663298               # 0x6000002
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   -1
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .byte   65                      # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   66                      # string offset=3
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !5, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !10, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{!5}
+!5 = !DICompositeType(tag: DW_TAG_enumeration_type, file: !3, line: 1, baseType: !6, size: 32, elements: !7)
+!6 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!7 = !{!8, !9}
+!8 = !DIEnumerator(name: "A", value: -1)
+!9 = !DIEnumerator(name: "B", value: 2)
+!10 = !{!0}
+!11 = !{i32 2, !"Dwarf Version", i32 4}
+!12 = !{i32 2, !"Debug Info Version", i32 3}
+!13 = !{i32 1, !"wchar_size", i32 4}
+!14 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/func-func-ptr.ll b/test/CodeGen/BPF/BTF/func-func-ptr.ll
new file mode 100644
index 0000000..df564f5
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/func-func-ptr.ll
@@ -0,0 +1,128 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   void (*a1)(int p1);
+;   struct t1 { void (*a1)(int p1); } b1;
+;   void f1(int p2) { }
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+%struct.t1 = type { void (i32)* }
+
+@a1 = common dso_local local_unnamed_addr global void (i32)* null, align 8, !dbg !0
+@b1 = common dso_local local_unnamed_addr global %struct.t1 zeroinitializer, align 8, !dbg !6
+
+; Function Attrs: nounwind readnone
+define dso_local void @f1(i32 %p2) local_unnamed_addr #0 !dbg !19 {
+entry:
+  call void @llvm.dbg.value(metadata i32 %p2, metadata !21, metadata !DIExpression()), !dbg !22
+  ret void, !dbg !23
+}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   104
+; CHECK-NEXT:        .long   104
+; CHECK-NEXT:        .long   32
+; CHECK-NEXT:        .long   16                      # BTF_KIND_INT(id = 1)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .long   0                       # BTF_KIND_FUNC_PROTO(id = 2)
+; CHECK-NEXT:        .long   218103809               # 0xd000001
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   23                      # BTF_KIND_FUNC(id = 3)
+; CHECK-NEXT:        .long   201326592               # 0xc000000
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   0                       # BTF_KIND_PTR(id = 4)
+; CHECK-NEXT:        .long   33554432                # 0x2000000
+; CHECK-NEXT:        .long   5
+; CHECK-NEXT:        .long   0                       # BTF_KIND_FUNC_PROTO(id = 5)
+; CHECK-NEXT:        .long   218103809               # 0xd000001
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   26                      # BTF_KIND_STRUCT(id = 6)
+; CHECK-NEXT:        .long   67108865                # 0x4000001
+; CHECK-NEXT:        .long   8
+; CHECK-NEXT:        .long   29
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  ".text"                 # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "/tmp/t.c"              # string offset=7
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=16
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "p2"                    # string offset=20
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "f1"                    # string offset=23
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "t1"                    # string offset=26
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "a1"                    # string offset=29
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   28
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   1                       # FuncInfo section string offset=1
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   .Lfunc_begin0
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   16                      # LineInfo
+; CHECK-NEXT:        .long   1                       # LineInfo section string offset=1
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   .Ltmp{{[0-9]+}}
+; CHECK-NEXT:        .long   7
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   3091                    # Line 3 Col 19
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.value(metadata, metadata, metadata) #1
+
+attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone speculatable }
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!15, !16, !17}
+!llvm.ident = !{!18}
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a1", scope: !2, file: !3, line: 1, type: !11, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/tmp")
+!4 = !{}
+!5 = !{!0, !6}
+!6 = !DIGlobalVariableExpression(var: !7, expr: !DIExpression())
+!7 = distinct !DIGlobalVariable(name: "b1", scope: !2, file: !3, line: 2, type: !8, isLocal: false, isDefinition: true)
+!8 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "t1", file: !3, line: 2, size: 64, elements: !9)
+!9 = !{!10}
+!10 = !DIDerivedType(tag: DW_TAG_member, name: "a1", scope: !8, file: !3, line: 2, baseType: !11, size: 64)
+!11 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !12, size: 64)
+!12 = !DISubroutineType(types: !13)
+!13 = !{null, !14}
+!14 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!15 = !{i32 2, !"Dwarf Version", i32 4}
+!16 = !{i32 2, !"Debug Info Version", i32 3}
+!17 = !{i32 1, !"wchar_size", i32 4}
+!18 = !{!"clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)"}
+!19 = distinct !DISubprogram(name: "f1", scope: !3, file: !3, line: 3, type: !12, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !2, retainedNodes: !20)
+!20 = !{!21}
+!21 = !DILocalVariable(name: "p2", arg: 1, scope: !19, file: !3, line: 3, type: !14)
+!22 = !DILocation(line: 3, column: 13, scope: !19)
+!23 = !DILocation(line: 3, column: 19, scope: !19)
diff --git a/test/CodeGen/BPF/BTF/func-non-void.ll b/test/CodeGen/BPF/BTF/func-non-void.ll
new file mode 100644
index 0000000..e5361b9
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/func-non-void.ll
@@ -0,0 +1,97 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   int f1(int a1) { return a1; }
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+; Function Attrs: nounwind readnone
+define dso_local i32 @f1(i32 returned) local_unnamed_addr #0 !dbg !7 {
+  call void @llvm.dbg.value(metadata i32 %0, metadata !12, metadata !DIExpression()), !dbg !13
+  ret i32 %0, !dbg !14
+}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   48
+; CHECK-NEXT:        .long   48
+; CHECK-NEXT:        .long   26
+; CHECK-NEXT:        .long   16                      # BTF_KIND_INT(id = 1)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .long   0                       # BTF_KIND_FUNC_PROTO(id = 2)
+; CHECK-NEXT:        .long   218103809               # 0xd000001
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   23                      # BTF_KIND_FUNC(id = 3)
+; CHECK-NEXT:        .long   201326592               # 0xc000000
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  ".text"                 # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "/tmp/t.c"              # string offset=7
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=16
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "a1"                    # string offset=20
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "f1"                    # string offset=23
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   44
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   1                       # FuncInfo section string offset=1
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   .Lfunc_begin0
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   16                      # LineInfo
+; CHECK-NEXT:        .long   1                       # LineInfo section string offset=1
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   .Lfunc_begin0
+; CHECK-NEXT:        .long   7
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   1024                    # Line 1 Col 0
+; CHECK-NEXT:        .long   .Ltmp{{[0-9]+}}
+; CHECK-NEXT:        .long   7
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   1042                    # Line 1 Col 18
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.value(metadata, metadata, metadata) #1
+
+attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone speculatable }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 8.0.0 (trunk 345562) (llvm/trunk 345560)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
+!1 = !DIFile(filename: "t.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"wchar_size", i32 4}
+!6 = !{!"clang version 8.0.0 (trunk 345562) (llvm/trunk 345560)"}
+!7 = distinct !DISubprogram(name: "f1", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !11)
+!8 = !DISubroutineType(types: !9)
+!9 = !{!10, !10}
+!10 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!11 = !{!12}
+!12 = !DILocalVariable(name: "a1", arg: 1, scope: !7, file: !1, line: 1, type: !10)
+!13 = !DILocation(line: 1, column: 12, scope: !7)
+!14 = !DILocation(line: 1, column: 18, scope: !7)
diff --git a/test/CodeGen/BPF/BTF/func-source.ll b/test/CodeGen/BPF/BTF/func-source.ll
new file mode 100644
index 0000000..6e1bed3
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/func-source.ll
@@ -0,0 +1,80 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   void f(void) { }
+; Compilation flag:
+;   clang -target bpf -O2 -g -gdwarf-5 -gembed-source -S -emit-llvm t.c
+;
+; This test embeds the source code in the IR, so the line info should have
+; correct reference to the lines in the string table.
+
+; Function Attrs: norecurse nounwind readnone
+define dso_local void @f() local_unnamed_addr #0 !dbg !7 {
+entry:
+  ret void, !dbg !10
+}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   35
+; CHECK-NEXT:        .long   0                       # BTF_KIND_FUNC_PROTO(id = 1)
+; CHECK-NEXT:        .long   218103808               # 0xd000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   33                      # BTF_KIND_FUNC(id = 2)
+; CHECK-NEXT:        .long   201326592               # 0xc000000
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  ".text"                 # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "/tmp/t.c"              # string offset=7
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "void f(void) { }"      # string offset=16
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   102                     # string offset=33
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   28
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   1                       # FuncInfo section string offset=1
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   .Lfunc_begin0
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   16                      # LineInfo
+; CHECK-NEXT:        .long   1                       # LineInfo section string offset=1
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   .Ltmp{{[0-9]+}}
+; CHECK-NEXT:        .long   7
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   1040                    # Line 1 Col 16
+
+attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
+!1 = !DIFile(filename: "t.c", directory: "/tmp", checksumkind: CSK_MD5, checksum: "978599fafe3a080b456e3d95a3710359", source: "void f(void) { }\0A")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 5}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"wchar_size", i32 4}
+!6 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
+!7 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, isOptimized: true, unit: !0, retainedNodes: !2)
+!8 = !DISubroutineType(types: !9)
+!9 = !{null}
+!10 = !DILocation(line: 1, column: 16, scope: !7)
diff --git a/test/CodeGen/BPF/BTF/func-typedef.ll b/test/CodeGen/BPF/BTF/func-typedef.ll
new file mode 100644
index 0000000..8deac0f
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/func-typedef.ll
@@ -0,0 +1,112 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   typedef int _int;
+;   typedef _int __int;
+;   __int f(__int a) { return a; }
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+; Function Attrs: nounwind readnone
+define dso_local i32 @f(i32 returned %a) local_unnamed_addr #0 !dbg !7 {
+entry:
+  call void @llvm.dbg.value(metadata i32 %a, metadata !14, metadata !DIExpression()), !dbg !15
+  ret i32 %a, !dbg !16
+}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   72
+; CHECK-NEXT:        .long   72
+; CHECK-NEXT:        .long   35
+; CHECK-NEXT:        .long   16                      # BTF_KIND_TYPEDEF(id = 1)
+; CHECK-NEXT:        .long   134217728               # 0x8000000
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   22                      # BTF_KIND_TYPEDEF(id = 2)
+; CHECK-NEXT:        .long   134217728               # 0x8000000
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   27                      # BTF_KIND_INT(id = 3)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .long   0                       # BTF_KIND_FUNC_PROTO(id = 4)
+; CHECK-NEXT:        .long   218103809               # 0xd000001
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   31
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   33                      # BTF_KIND_FUNC(id = 5)
+; CHECK-NEXT:        .long   201326592               # 0xc000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  ".text"                 # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "/tmp/t.c"              # string offset=7
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "__int"                 # string offset=16
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "_int"                  # string offset=22
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=27
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   97                      # string offset=31
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   102                     # string offset=33
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   44
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   1                       # FuncInfo section string offset=1
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   .Lfunc_begin0
+; CHECK-NEXT:        .long   5
+; CHECK-NEXT:        .long   16                      # LineInfo
+; CHECK-NEXT:        .long   1                       # LineInfo section string offset=1
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   .Lfunc_begin0
+; CHECK-NEXT:        .long   7
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   3072                    # Line 3 Col 0
+; CHECK-NEXT:        .long   .Ltmp{{[0-9]+}}
+; CHECK-NEXT:        .long   7
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   3092                    # Line 3 Col 20
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.value(metadata, metadata, metadata) #1
+
+attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone speculatable }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
+!1 = !DIFile(filename: "t.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"wchar_size", i32 4}
+!6 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
+!7 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 3, type: !8, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !13)
+!8 = !DISubroutineType(types: !9)
+!9 = !{!10, !10}
+!10 = !DIDerivedType(tag: DW_TAG_typedef, name: "__int", file: !1, line: 2, baseType: !11)
+!11 = !DIDerivedType(tag: DW_TAG_typedef, name: "_int", file: !1, line: 1, baseType: !12)
+!12 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!13 = !{!14}
+!14 = !DILocalVariable(name: "a", arg: 1, scope: !7, file: !1, line: 3, type: !10)
+!15 = !DILocation(line: 3, column: 15, scope: !7)
+!16 = !DILocation(line: 3, column: 20, scope: !7)
diff --git a/test/CodeGen/BPF/BTF/func-unused-arg.ll b/test/CodeGen/BPF/BTF/func-unused-arg.ll
new file mode 100644
index 0000000..62e9d40
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/func-unused-arg.ll
@@ -0,0 +1,93 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   int f1(int a1) { return 0; }
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+; Function Attrs: nounwind readnone
+define dso_local i32 @f1(i32) local_unnamed_addr #0 !dbg !7 {
+  call void @llvm.dbg.value(metadata i32 %0, metadata !12, metadata !DIExpression()), !dbg !13
+  ret i32 0, !dbg !14
+}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   48
+; CHECK-NEXT:        .long   48
+; CHECK-NEXT:        .long   26
+; CHECK-NEXT:        .long   16                      # BTF_KIND_INT(id = 1)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .long   0                       # BTF_KIND_FUNC_PROTO(id = 2)
+; CHECK-NEXT:        .long   218103809               # 0xd000001
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   23                      # BTF_KIND_FUNC(id = 3)
+; CHECK-NEXT:        .long   201326592               # 0xc000000
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  ".text"                 # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "/tmp/t.c"              # string offset=7
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=16
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "a1"                    # string offset=20
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "f1"                    # string offset=23
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   28
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   1                       # FuncInfo section string offset=1
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   .Lfunc_begin0
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   16                      # LineInfo
+; CHECK-NEXT:        .long   1                       # LineInfo section string offset=1
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   .Ltmp{{[0-9]+}}
+; CHECK-NEXT:        .long   7
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   1042                    # Line 1 Col 18
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.value(metadata, metadata, metadata) #1
+
+attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone speculatable }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 8.0.0 (trunk 345562) (llvm/trunk 345560)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
+!1 = !DIFile(filename: "t.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"wchar_size", i32 4}
+!6 = !{!"clang version 8.0.0 (trunk 345562) (llvm/trunk 345560)"}
+!7 = distinct !DISubprogram(name: "f1", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !11)
+!8 = !DISubroutineType(types: !9)
+!9 = !{!10, !10}
+!10 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!11 = !{!12}
+!12 = !DILocalVariable(name: "a1", arg: 1, scope: !7, file: !1, line: 1, type: !10)
+!13 = !DILocation(line: 1, column: 12, scope: !7)
+!14 = !DILocation(line: 1, column: 18, scope: !7)
diff --git a/test/CodeGen/BPF/BTF/func-void.ll b/test/CodeGen/BPF/BTF/func-void.ll
new file mode 100644
index 0000000..f86643b
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/func-void.ll
@@ -0,0 +1,74 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   void f1(void) {}
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+; Function Attrs: norecurse nounwind readnone
+define dso_local void @f1() local_unnamed_addr #0 !dbg !7 {
+  ret void, !dbg !10
+}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   19
+; CHECK-NEXT:        .long   0                       # BTF_KIND_FUNC_PROTO(id = 1)
+; CHECK-NEXT:        .long   218103808               # 0xd000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   16                      # BTF_KIND_FUNC(id = 2)
+; CHECK-NEXT:        .long   201326592               # 0xc000000
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  ".text"                 # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "/tmp/t.c"              # string offset=7
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "f1"                    # string offset=16
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   20
+; CHECK-NEXT:        .long   28
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   1                       # FuncInfo section string offset=1
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   .Lfunc_begin0
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   16                      # LineInfo
+; CHECK-NEXT:        .long   1                       # LineInfo section string offset=1
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   .Ltmp{{[0-9]+}}
+; CHECK-NEXT:        .long   7
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   1040                    # Line 1 Col 16
+
+attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 8.0.0 (trunk 345562) (llvm/trunk 345560)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
+!1 = !DIFile(filename: "t.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"wchar_size", i32 4}
+!6 = !{!"clang version 8.0.0 (trunk 345562) (llvm/trunk 345560)"}
+!7 = distinct !DISubprogram(name: "f1", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
+!8 = !DISubroutineType(types: !9)
+!9 = !{null}
+!10 = !DILocation(line: 1, column: 16, scope: !7)
diff --git a/test/CodeGen/BPF/BTF/fwd-no-define.ll b/test/CodeGen/BPF/BTF/fwd-no-define.ll
new file mode 100644
index 0000000..c3d082e
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/fwd-no-define.ll
@@ -0,0 +1,73 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   struct t1;
+;   struct t2 {struct t1 *p;} a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+%struct.t2 = type { %struct.t1* }
+%struct.t1 = type opaque
+
+@a = common dso_local local_unnamed_addr global %struct.t2 zeroinitializer, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!11, !12, !13}
+!llvm.ident = !{!14}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   48
+; CHECK-NEXT:        .long   48
+; CHECK-NEXT:        .long   9
+; CHECK-NEXT:        .long   1                       # BTF_KIND_STRUCT(id = 1)
+; CHECK-NEXT:        .long   67108865                # 0x4000001
+; CHECK-NEXT:        .long   8
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   0                       # BTF_KIND_PTR(id = 2)
+; CHECK-NEXT:        .long   33554432                # 0x2000000
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   6                       # BTF_KIND_FWD(id = 3)
+; CHECK-NEXT:        .long   117440512               # 0x7000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "t2"                    # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   112                     # string offset=4
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "t1"                    # string offset=6
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 2, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "t2", file: !3, line: 2, size: 64, elements: !7)
+!7 = !{!8}
+!8 = !DIDerivedType(tag: DW_TAG_member, name: "p", scope: !6, file: !3, line: 2, baseType: !9, size: 64)
+!9 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !10, size: 64)
+!10 = !DICompositeType(tag: DW_TAG_structure_type, name: "t1", file: !3, line: 1, flags: DIFlagFwdDecl)
+!11 = !{i32 2, !"Dwarf Version", i32 4}
+!12 = !{i32 2, !"Debug Info Version", i32 3}
+!13 = !{i32 1, !"wchar_size", i32 4}
+!14 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/fwd-with-define.ll b/test/CodeGen/BPF/BTF/fwd-with-define.ll
new file mode 100644
index 0000000..8a88c89
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/fwd-with-define.ll
@@ -0,0 +1,66 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   struct t1;
+;   struct t1 {struct t1 *p;} a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+%struct.t1 = type { %struct.t1* }
+
+@a = common dso_local local_unnamed_addr global %struct.t1 zeroinitializer, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!10, !11, !12}
+!llvm.ident = !{!13}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   36
+; CHECK-NEXT:        .long   36
+; CHECK-NEXT:        .long   6
+; CHECK-NEXT:        .long   1                       # BTF_KIND_STRUCT(id = 1)
+; CHECK-NEXT:        .long   67108865                # 0x4000001
+; CHECK-NEXT:        .long   8
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   0                       # BTF_KIND_PTR(id = 2)
+; CHECK-NEXT:        .long   33554432                # 0x2000000
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "t1"                    # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   112                     # string offset=4
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 2, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "t1", file: !3, line: 2, size: 64, elements: !7)
+!7 = !{!8}
+!8 = !DIDerivedType(tag: DW_TAG_member, name: "p", scope: !6, file: !3, line: 2, baseType: !9, size: 64)
+!9 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !6, size: 64)
+!10 = !{i32 2, !"Dwarf Version", i32 4}
+!11 = !{i32 2, !"Debug Info Version", i32 3}
+!12 = !{i32 1, !"wchar_size", i32 4}
+!13 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/int.ll b/test/CodeGen/BPF/BTF/int.ll
new file mode 100644
index 0000000..fd12cd0
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/int.ll
@@ -0,0 +1,53 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   int a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global i32 0, align 4, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!7, !8, !9}
+!llvm.ident = !{!10}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   5
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 1)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+!9 = !{i32 1, !"wchar_size", i32 4}
+!10 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/longlong.ll b/test/CodeGen/BPF/BTF/longlong.ll
new file mode 100644
index 0000000..f936e1e
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/longlong.ll
@@ -0,0 +1,53 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   long long a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global i64 0, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!7, !8, !9}
+!llvm.ident = !{!10}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   15
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 1)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   8
+; CHECK-NEXT:        .long   16777280                # 0x1000040
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "long long int"         # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIBasicType(name: "long long int", size: 64, encoding: DW_ATE_signed)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+!9 = !{i32 1, !"wchar_size", i32 4}
+!10 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/ptr-const-void.ll b/test/CodeGen/BPF/BTF/ptr-const-void.ll
new file mode 100644
index 0000000..ccc7b8d
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/ptr-const-void.ll
@@ -0,0 +1,54 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   const void *a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@p = common dso_local local_unnamed_addr global i8* null, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!8, !9, !10}
+!llvm.ident = !{!11}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   0                       # BTF_KIND_PTR(id = 1)
+; CHECK-NEXT:        .long   33554432                # 0x2000000
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   0                       # BTF_KIND_CONST(id = 2)
+; CHECK-NEXT:        .long   167772160               # 0xa000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "p", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64)
+!7 = !DIDerivedType(tag: DW_TAG_const_type, baseType: null)
+!8 = !{i32 2, !"Dwarf Version", i32 4}
+!9 = !{i32 2, !"Debug Info Version", i32 3}
+!10 = !{i32 1, !"wchar_size", i32 4}
+!11 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/ptr-func-1.ll b/test/CodeGen/BPF/BTF/ptr-func-1.ll
new file mode 100644
index 0000000..e80158b
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/ptr-func-1.ll
@@ -0,0 +1,55 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   void (*a)(void);
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global void ()* null, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!9, !10, !11}
+!llvm.ident = !{!12}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   0                       # BTF_KIND_PTR(id = 1)
+; CHECK-NEXT:        .long   33554432                # 0x2000000
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   0                       # BTF_KIND_FUNC_PROTO(id = 2)
+; CHECK-NEXT:        .long   218103808               # 0xd000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64)
+!7 = !DISubroutineType(types: !8)
+!8 = !{null}
+!9 = !{i32 2, !"Dwarf Version", i32 4}
+!10 = !{i32 2, !"Debug Info Version", i32 3}
+!11 = !{i32 1, !"wchar_size", i32 4}
+!12 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/ptr-func-2.ll b/test/CodeGen/BPF/BTF/ptr-func-2.ll
new file mode 100644
index 0000000..2972121
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/ptr-func-2.ll
@@ -0,0 +1,73 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   int (*a)(int a, char b);
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global i32 (i32, i8)* null, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!11, !12, !13}
+!llvm.ident = !{!14}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   72
+; CHECK-NEXT:        .long   72
+; CHECK-NEXT:        .long   10
+; CHECK-NEXT:        .long   0                       # BTF_KIND_PTR(id = 1)
+; CHECK-NEXT:        .long   33554432                # 0x2000000
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   0                       # BTF_KIND_FUNC_PROTO(id = 2)
+; CHECK-NEXT:        .long   218103810               # 0xd000002
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 3)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .long   5                       # BTF_KIND_INT(id = 4)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   16777224                # 0x1000008
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "char"                  # string offset=5
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64)
+!7 = !DISubroutineType(types: !8)
+!8 = !{!9, !9, !10}
+!9 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!10 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
+!11 = !{i32 2, !"Dwarf Version", i32 4}
+!12 = !{i32 2, !"Debug Info Version", i32 3}
+!13 = !{i32 1, !"wchar_size", i32 4}
+!14 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/ptr-func-3.ll b/test/CodeGen/BPF/BTF/ptr-func-3.ll
new file mode 100644
index 0000000..85a15b3
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/ptr-func-3.ll
@@ -0,0 +1,73 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   typedef int __int;
+;   __int (*a)(__int a, __int b);
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global i32 (i32, i32)* null, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!11, !12, !13}
+!llvm.ident = !{!14}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   68
+; CHECK-NEXT:        .long   68
+; CHECK-NEXT:        .long   11
+; CHECK-NEXT:        .long   0                       # BTF_KIND_PTR(id = 1)
+; CHECK-NEXT:        .long   33554432                # 0x2000000
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   0                       # BTF_KIND_FUNC_PROTO(id = 2)
+; CHECK-NEXT:        .long   218103810               # 0xd000002
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   1                       # BTF_KIND_TYPEDEF(id = 3)
+; CHECK-NEXT:        .long   134217728               # 0x8000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   7                       # BTF_KIND_INT(id = 4)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "__int"                 # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=7
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 2, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64)
+!7 = !DISubroutineType(types: !8)
+!8 = !{!9, !9, !9}
+!9 = !DIDerivedType(tag: DW_TAG_typedef, name: "__int", file: !3, line: 1, baseType: !10)
+!10 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!11 = !{i32 2, !"Dwarf Version", i32 4}
+!12 = !{i32 2, !"Debug Info Version", i32 3}
+!13 = !{i32 1, !"wchar_size", i32 4}
+!14 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/ptr-int.ll b/test/CodeGen/BPF/BTF/ptr-int.ll
new file mode 100644
index 0000000..1203f79
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/ptr-int.ll
@@ -0,0 +1,58 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   int *a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global i32* null, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!8, !9, !10}
+!llvm.ident = !{!11}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   28
+; CHECK-NEXT:        .long   28
+; CHECK-NEXT:        .long   5
+; CHECK-NEXT:        .long   0                       # BTF_KIND_PTR(id = 1)
+; CHECK-NEXT:        .long   33554432                # 0x2000000
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 2)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64)
+!7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!8 = !{i32 2, !"Dwarf Version", i32 4}
+!9 = !{i32 2, !"Debug Info Version", i32 3}
+!10 = !{i32 1, !"wchar_size", i32 4}
+!11 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/ptr-void.ll b/test/CodeGen/BPF/BTF/ptr-void.ll
new file mode 100644
index 0000000..e83e130
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/ptr-void.ll
@@ -0,0 +1,50 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   void *a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global i8* null, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!7, !8, !9}
+!llvm.ident = !{!10}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   12
+; CHECK-NEXT:        .long   12
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   0                       # BTF_KIND_PTR(id = 1)
+; CHECK-NEXT:        .long   33554432                # 0x2000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: null, size: 64)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+!9 = !{i32 1, !"wchar_size", i32 4}
+!10 = !{!"clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)"}
diff --git a/test/CodeGen/BPF/BTF/ptr-volatile-const-void.ll b/test/CodeGen/BPF/BTF/ptr-volatile-const-void.ll
new file mode 100644
index 0000000..3d5a227
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/ptr-volatile-const-void.ll
@@ -0,0 +1,58 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   volatile const void *p;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@p = common dso_local local_unnamed_addr global i8* null, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!9, !10, !11}
+!llvm.ident = !{!12}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   36
+; CHECK-NEXT:        .long   36
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   0                       # BTF_KIND_PTR(id = 1)
+; CHECK-NEXT:        .long   33554432                # 0x2000000
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   0                       # BTF_KIND_CONST(id = 2)
+; CHECK-NEXT:        .long   167772160               # 0xa000000
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   0                       # BTF_KIND_VOLATILE(id = 3)
+; CHECK-NEXT:        .long   150994944               # 0x9000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "p", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64)
+!7 = !DIDerivedType(tag: DW_TAG_const_type, baseType: !8)
+!8 = !DIDerivedType(tag: DW_TAG_volatile_type, baseType: null)
+!9 = !{i32 2, !"Dwarf Version", i32 4}
+!10 = !{i32 2, !"Debug Info Version", i32 3}
+!11 = !{i32 1, !"wchar_size", i32 4}
+!12 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/ptr-volatile-void.ll b/test/CodeGen/BPF/BTF/ptr-volatile-void.ll
new file mode 100644
index 0000000..185eee0
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/ptr-volatile-void.ll
@@ -0,0 +1,54 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   volatile void *a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@p = common dso_local local_unnamed_addr global i8* null, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!8, !9, !10}
+!llvm.ident = !{!11}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   0                       # BTF_KIND_PTR(id = 1)
+; CHECK-NEXT:        .long   33554432                # 0x2000000
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   0                       # BTF_KIND_VOLATILE(id = 2)
+; CHECK-NEXT:        .long   150994944               # 0x9000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "p", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64)
+!7 = !DIDerivedType(tag: DW_TAG_volatile_type, baseType: null)
+!8 = !{i32 2, !"Dwarf Version", i32 4}
+!9 = !{i32 2, !"Debug Info Version", i32 3}
+!10 = !{i32 1, !"wchar_size", i32 4}
+!11 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/restrict-ptr.ll b/test/CodeGen/BPF/BTF/restrict-ptr.ll
new file mode 100644
index 0000000..97cbbe4
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/restrict-ptr.ll
@@ -0,0 +1,61 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   int * restrict p;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@p = common dso_local local_unnamed_addr global i32* null, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!9, !10, !11}
+!llvm.ident = !{!12}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   40
+; CHECK-NEXT:        .long   40
+; CHECK-NEXT:        .long   5
+; CHECK-NEXT:        .long   0                       # BTF_KIND_RESTRICT(id = 1)
+; CHECK-NEXT:        .long   184549376               # 0xb000000
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   0                       # BTF_KIND_PTR(id = 2)
+; CHECK-NEXT:        .long   33554432                # 0x2000000
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 3)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "p", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIDerivedType(tag: DW_TAG_restrict_type, baseType: !7)
+!7 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !8, size: 64)
+!8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!9 = !{i32 2, !"Dwarf Version", i32 4}
+!10 = !{i32 2, !"Debug Info Version", i32 3}
+!11 = !{i32 1, !"wchar_size", i32 4}
+!12 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/short.ll b/test/CodeGen/BPF/BTF/short.ll
new file mode 100644
index 0000000..56b779c
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/short.ll
@@ -0,0 +1,54 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   short a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+
+@a = common dso_local local_unnamed_addr global i16 0, align 2, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!7, !8, !9}
+!llvm.ident = !{!10}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   7
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 1)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   16777232                # 0x1000010
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "short"                 # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIBasicType(name: "short", size: 16, encoding: DW_ATE_signed)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+!9 = !{i32 1, !"wchar_size", i32 4}
+!10 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/struct-anon.ll b/test/CodeGen/BPF/BTF/struct-anon.ll
new file mode 100644
index 0000000..3be1752
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/struct-anon.ll
@@ -0,0 +1,76 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   struct { struct {int m;}; } a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+%struct.anon = type { %struct.anon.0 }
+%struct.anon.0 = type { i32 }
+
+@a = common dso_local local_unnamed_addr global %struct.anon zeroinitializer, align 4, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!13, !14, !15}
+!llvm.ident = !{!16}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   64
+; CHECK-NEXT:        .long   64
+; CHECK-NEXT:        .long   7
+; CHECK-NEXT:        .long   0                       # BTF_KIND_STRUCT(id = 1)
+; CHECK-NEXT:        .long   67108865                # 0x4000001
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   0                       # BTF_KIND_STRUCT(id = 2)
+; CHECK-NEXT:        .long   67108865                # 0x4000001
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   3                       # BTF_KIND_INT(id = 3)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .byte   109                     # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=3
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = distinct !DICompositeType(tag: DW_TAG_structure_type, file: !3, line: 1, size: 32, elements: !7)
+!7 = !{!8}
+!8 = !DIDerivedType(tag: DW_TAG_member, scope: !6, file: !3, line: 1, baseType: !9, size: 32)
+!9 = distinct !DICompositeType(tag: DW_TAG_structure_type, scope: !6, file: !3, line: 1, size: 32, elements: !10)
+!10 = !{!11}
+!11 = !DIDerivedType(tag: DW_TAG_member, name: "m", scope: !9, file: !3, line: 1, baseType: !12, size: 32)
+!12 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!13 = !{i32 2, !"Dwarf Version", i32 4}
+!14 = !{i32 2, !"Debug Info Version", i32 3}
+!15 = !{i32 1, !"wchar_size", i32 4}
+!16 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/struct-basic.ll b/test/CodeGen/BPF/BTF/struct-basic.ll
new file mode 100644
index 0000000..69dc0ca
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/struct-basic.ll
@@ -0,0 +1,81 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   struct t1 {char m1; int n1;} a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+%struct.t1 = type { i8, i32 }
+
+@a = common dso_local local_unnamed_addr global %struct.t1 zeroinitializer, align 4, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!12, !13, !14}
+!llvm.ident = !{!15}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   68
+; CHECK-NEXT:        .long   68
+; CHECK-NEXT:        .long   19
+; CHECK-NEXT:        .long   1                       # BTF_KIND_STRUCT(id = 1)
+; CHECK-NEXT:        .long   67108866                # 0x4000002
+; CHECK-NEXT:        .long   8
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   7
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   32
+; CHECK-NEXT:        .long   10                      # BTF_KIND_INT(id = 2)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   16777224                # 0x1000008
+; CHECK-NEXT:        .long   15                      # BTF_KIND_INT(id = 3)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "t1"                    # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "m1"                    # string offset=4
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "n1"                    # string offset=7
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "char"                  # string offset=10
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=15
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "t1", file: !3, line: 1, size: 64, elements: !7)
+!7 = !{!8, !10}
+!8 = !DIDerivedType(tag: DW_TAG_member, name: "m1", scope: !6, file: !3, line: 1, baseType: !9, size: 8)
+!9 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
+!10 = !DIDerivedType(tag: DW_TAG_member, name: "n1", scope: !6, file: !3, line: 1, baseType: !11, size: 32, offset: 32)
+!11 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!12 = !{i32 2, !"Dwarf Version", i32 4}
+!13 = !{i32 2, !"Debug Info Version", i32 3}
+!14 = !{i32 1, !"wchar_size", i32 4}
+!15 = !{!"clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)"}
diff --git a/test/CodeGen/BPF/BTF/struct-bitfield-typedef.ll b/test/CodeGen/BPF/BTF/struct-bitfield-typedef.ll
new file mode 100644
index 0000000..502efb0
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/struct-bitfield-typedef.ll
@@ -0,0 +1,99 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   typedef int _int;
+;   typedef _int __int;
+;   struct {char m:2; __int n:3; char p;} a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+%struct.anon = type { i8, i8, [2 x i8] }
+
+@a = common dso_local local_unnamed_addr global %struct.anon zeroinitializer, align 4, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!15, !16, !17}
+!llvm.ident = !{!18}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   104
+; CHECK-NEXT:        .long   104
+; CHECK-NEXT:        .long   27
+; CHECK-NEXT:        .long   0                       # BTF_KIND_STRUCT(id = 1)
+; CHECK-NEXT:        .long   2214592515              # 0x84000003
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   33554432                # 0x2000000
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   50331650                # 0x3000002
+; CHECK-NEXT:        .long   5
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   8                       # 0x8
+; CHECK-NEXT:        .long   7                       # BTF_KIND_INT(id = 2)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   16777224                # 0x1000008
+; CHECK-NEXT:        .long   12                      # BTF_KIND_TYPEDEF(id = 3)
+; CHECK-NEXT:        .long   134217728               # 0x8000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   18                      # BTF_KIND_TYPEDEF(id = 4)
+; CHECK-NEXT:        .long   134217728               # 0x8000000
+; CHECK-NEXT:        .long   5
+; CHECK-NEXT:        .long   23                      # BTF_KIND_INT(id = 5)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .byte   109                     # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   110                     # string offset=3
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   112                     # string offset=5
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "char"                  # string offset=7
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "__int"                 # string offset=12
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "_int"                  # string offset=18
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=23
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 3, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = distinct !DICompositeType(tag: DW_TAG_structure_type, file: !3, line: 3, size: 32, elements: !7)
+!7 = !{!8, !10, !14}
+!8 = !DIDerivedType(tag: DW_TAG_member, name: "m", scope: !6, file: !3, line: 3, baseType: !9, size: 2, flags: DIFlagBitField, extraData: i64 0)
+!9 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
+!10 = !DIDerivedType(tag: DW_TAG_member, name: "n", scope: !6, file: !3, line: 3, baseType: !11, size: 3, offset: 2, flags: DIFlagBitField, extraData: i64 0)
+!11 = !DIDerivedType(tag: DW_TAG_typedef, name: "__int", file: !3, line: 2, baseType: !12)
+!12 = !DIDerivedType(tag: DW_TAG_typedef, name: "_int", file: !3, line: 1, baseType: !13)
+!13 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!14 = !DIDerivedType(tag: DW_TAG_member, name: "p", scope: !6, file: !3, line: 3, baseType: !9, size: 8, offset: 8)
+!15 = !{i32 2, !"Dwarf Version", i32 4}
+!16 = !{i32 2, !"Debug Info Version", i32 3}
+!17 = !{i32 1, !"wchar_size", i32 4}
+!18 = !{!"clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)"}
diff --git a/test/CodeGen/BPF/BTF/struct-enum.ll b/test/CodeGen/BPF/BTF/struct-enum.ll
new file mode 100644
index 0000000..c5d9243
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/struct-enum.ll
@@ -0,0 +1,86 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   enum t1 { A , B };
+;   struct t2 { enum t1 m:2; enum t1 n; } a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+%struct.t2 = type { i8, i32 }
+
+@a = common dso_local local_unnamed_addr global %struct.t2 zeroinitializer, align 4, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!15, !16, !17}
+!llvm.ident = !{!18}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   64
+; CHECK-NEXT:        .long   64
+; CHECK-NEXT:        .long   15
+; CHECK-NEXT:        .long   1                       # BTF_KIND_STRUCT(id = 1)
+; CHECK-NEXT:        .long   2214592514              # 0x84000002
+; CHECK-NEXT:        .long   8
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   33554432                # 0x2000000
+; CHECK-NEXT:        .long   6
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   32                      # 0x20
+; CHECK-NEXT:        .long   8                       # BTF_KIND_ENUM(id = 2)
+; CHECK-NEXT:        .long   100663298               # 0x6000002
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   11
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   13
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "t2"                    # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   109                     # string offset=4
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   110                     # string offset=6
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "t1"                    # string offset=8
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   65                      # string offset=11
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   66                      # string offset=13
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 2, type: !11, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !10, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{!5}
+!5 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "t1", file: !3, line: 1, baseType: !6, size: 32, elements: !7)
+!6 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned)
+!7 = !{!8, !9}
+!8 = !DIEnumerator(name: "A", value: 0, isUnsigned: true)
+!9 = !DIEnumerator(name: "B", value: 1, isUnsigned: true)
+!10 = !{!0}
+!11 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "t2", file: !3, line: 2, size: 64, elements: !12)
+!12 = !{!13, !14}
+!13 = !DIDerivedType(tag: DW_TAG_member, name: "m", scope: !11, file: !3, line: 2, baseType: !5, size: 2, flags: DIFlagBitField, extraData: i64 0)
+!14 = !DIDerivedType(tag: DW_TAG_member, name: "n", scope: !11, file: !3, line: 2, baseType: !5, size: 32, offset: 32)
+!15 = !{i32 2, !"Dwarf Version", i32 4}
+!16 = !{i32 2, !"Debug Info Version", i32 3}
+!17 = !{i32 1, !"wchar_size", i32 4}
+!18 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/uchar.ll b/test/CodeGen/BPF/BTF/uchar.ll
new file mode 100644
index 0000000..19dad38
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/uchar.ll
@@ -0,0 +1,53 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   unsigned char a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global i8 0, align 1, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!7, !8, !9}
+!llvm.ident = !{!10}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   15
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 1)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   8                       # 0x8
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "unsigned char"         # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIBasicType(name: "unsigned char", size: 8, encoding: DW_ATE_unsigned_char)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+!9 = !{i32 1, !"wchar_size", i32 4}
+!10 = !{!"clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)"}
diff --git a/test/CodeGen/BPF/BTF/uint.ll b/test/CodeGen/BPF/BTF/uint.ll
new file mode 100644
index 0000000..b87b2ab
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/uint.ll
@@ -0,0 +1,53 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   unsigned a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global i32 0, align 4, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!7, !8, !9}
+!llvm.ident = !{!10}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   14
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 1)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   32                      # 0x20
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "unsigned int"          # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+!9 = !{i32 1, !"wchar_size", i32 4}
+!10 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/ulonglong.ll b/test/CodeGen/BPF/BTF/ulonglong.ll
new file mode 100644
index 0000000..b8ff144
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/ulonglong.ll
@@ -0,0 +1,53 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   unsigned long long a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global i64 0, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!7, !8, !9}
+!llvm.ident = !{!10}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 1)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   8
+; CHECK-NEXT:        .long   64                      # 0x40
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "long long unsigned int" # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIBasicType(name: "long long unsigned int", size: 64, encoding: DW_ATE_unsigned)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+!9 = !{i32 1, !"wchar_size", i32 4}
+!10 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/BTF/union-array-typedef.ll b/test/CodeGen/BPF/BTF/union-array-typedef.ll
new file mode 100644
index 0000000..53a4c60
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/union-array-typedef.ll
@@ -0,0 +1,103 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   typedef int _int;
+;   union t {char m[4]; _int n;} a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+%union.t = type { i32 }
+
+@a = common dso_local local_unnamed_addr global %union.t zeroinitializer, align 4, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!16, !17, !18}
+!llvm.ident = !{!19}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   120
+; CHECK-NEXT:        .long   120
+; CHECK-NEXT:        .long   41
+; CHECK-NEXT:        .long   1                       # BTF_KIND_UNION(id = 1)
+; CHECK-NEXT:        .long   83886082                # 0x5000002
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   5
+; CHECK-NEXT:        .long   5
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   0                       # BTF_KIND_ARRAY(id = 2)
+; CHECK-NEXT:        .long   50331648                # 0x3000000
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   3
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   7                       # BTF_KIND_INT(id = 3)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   32                      # 0x20
+; CHECK-NEXT:        .long   27                      # BTF_KIND_INT(id = 4)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   1
+; CHECK-NEXT:        .long   16777224                # 0x1000008
+; CHECK-NEXT:        .long   32                      # BTF_KIND_TYPEDEF(id = 5)
+; CHECK-NEXT:        .long   134217728               # 0x8000000
+; CHECK-NEXT:        .long   6
+; CHECK-NEXT:        .long   37                      # BTF_KIND_INT(id = 6)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   16777248                # 0x1000020
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .byte   116                     # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   109                     # string offset=3
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .byte   110                     # string offset=5
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "__ARRAY_SIZE_TYPE__"   # string offset=7
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "char"                  # string offset=27
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "_int"                  # string offset=32
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .ascii  "int"                   # string offset=37
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 2, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = distinct !DICompositeType(tag: DW_TAG_union_type, name: "t", file: !3, line: 2, size: 32, elements: !7)
+!7 = !{!8, !13}
+!8 = !DIDerivedType(tag: DW_TAG_member, name: "m", scope: !6, file: !3, line: 2, baseType: !9, size: 32)
+!9 = !DICompositeType(tag: DW_TAG_array_type, baseType: !10, size: 32, elements: !11)
+!10 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
+!11 = !{!12}
+!12 = !DISubrange(count: 4)
+!13 = !DIDerivedType(tag: DW_TAG_member, name: "n", scope: !6, file: !3, line: 2, baseType: !14, size: 32)
+!14 = !DIDerivedType(tag: DW_TAG_typedef, name: "_int", file: !3, line: 1, baseType: !15)
+!15 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!16 = !{i32 2, !"Dwarf Version", i32 4}
+!17 = !{i32 2, !"Debug Info Version", i32 3}
+!18 = !{i32 1, !"wchar_size", i32 4}
+!19 = !{!"clang version 8.0.0 (trunk 345296) (llvm/trunk 345297)"}
diff --git a/test/CodeGen/BPF/BTF/ushort.ll b/test/CodeGen/BPF/BTF/ushort.ll
new file mode 100644
index 0000000..2eb28a31
--- /dev/null
+++ b/test/CodeGen/BPF/BTF/ushort.ll
@@ -0,0 +1,53 @@
+; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK %s
+
+; Source code:
+;   unsigned short a;
+; Compilation flag:
+;   clang -target bpf -O2 -g -S -emit-llvm t.c
+
+@a = common dso_local local_unnamed_addr global i16 0, align 2, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!7, !8, !9}
+!llvm.ident = !{!10}
+
+; CHECK:             .section        .BTF,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   16
+; CHECK-NEXT:        .long   1                       # BTF_KIND_INT(id = 1)
+; CHECK-NEXT:        .long   16777216                # 0x1000000
+; CHECK-NEXT:        .long   2
+; CHECK-NEXT:        .long   16                      # 0x10
+; CHECK-NEXT:        .byte   0                       # string offset=0
+; CHECK-NEXT:        .ascii  "unsigned short"        # string offset=1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .section        .BTF.ext,"",@progbits
+; CHECK-NEXT:        .short  60319                   # 0xeb9f
+; CHECK-NEXT:        .byte   1
+; CHECK-NEXT:        .byte   0
+; CHECK-NEXT:        .long   24
+; CHECK-NEXT:        .long   0
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   4
+; CHECK-NEXT:        .long   8                       # FuncInfo
+; CHECK-NEXT:        .long   16                      # LineInfo
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.c", directory: "/home/yhs/tmp")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIBasicType(name: "unsigned short", size: 16, encoding: DW_ATE_unsigned)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+!9 = !{i32 1, !"wchar_size", i32 4}
+!10 = !{!"clang version 8.0.0 (trunk 344789) (llvm/trunk 344782)"}
diff --git a/test/CodeGen/BPF/reloc-btf.ll b/test/CodeGen/BPF/reloc-btf.ll
new file mode 100644
index 0000000..4d6e3f5
--- /dev/null
+++ b/test/CodeGen/BPF/reloc-btf.ll
@@ -0,0 +1,33 @@
+; RUN: llc -march=bpfel -filetype=obj < %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-RELOC %s
+
+; Function Attrs: norecurse nounwind readnone
+define dso_local i32 @test() local_unnamed_addr #0 !dbg !7 {
+entry:
+  ret i32 0, !dbg !11
+}
+
+; CHECK-RELOC: file format ELF64-BPF
+; CHECK-RELOC: RELOCATION RECORDS FOR [.rel.debug_info]:
+; CHECK-RELOC: R_BPF_64_32 .debug_abbrev
+; CHECK-RELOC: R_BPF_64_64 .text
+; CHECK-RELOC: RELOCATION RECORDS FOR [.rel.BTF.ext]:
+; CHECK-RELOC: R_BPF_NONE .text
+
+attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 8.0.0 (trunk 350573) (llvm/trunk 350569)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
+!1 = !DIFile(filename: "tt.c", directory: "/home/yhs/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"wchar_size", i32 4}
+!6 = !{!"clang version 8.0.0 (trunk 350573) (llvm/trunk 350569)"}
+!7 = distinct !DISubprogram(name: "test", scope: !1, file: !1, line: 1, type: !8, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2)
+!8 = !DISubroutineType(types: !9)
+!9 = !{!10}
+!10 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!11 = !DILocation(line: 1, column: 14, scope: !7)
diff --git a/test/CodeGen/Hexagon/autohvx/bitwise-pred-128b.ll b/test/CodeGen/Hexagon/autohvx/bitwise-pred-128b.ll
index 129c952..0fc8ba4 100644
--- a/test/CodeGen/Hexagon/autohvx/bitwise-pred-128b.ll
+++ b/test/CodeGen/Hexagon/autohvx/bitwise-pred-128b.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 
 ; CHECK-LABEL: t00
-; CHECK: and(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vand(v{{[0-9:]+}},v{{[0-9:]+}})
 define <128 x i8> @t00(<128 x i8> %a0, <128 x i8> %a1) #0 {
   %q0 = trunc <128 x i8> %a0 to <128 x i1>
   %q1 = trunc <128 x i8> %a1 to <128 x i1>
@@ -13,7 +13,7 @@
 declare <1024 x i1> @llvm.hexagon.vandvrt.128B(<128 x i8>, i32)
 
 ; CHECK-LABEL: t01
-; CHECK: or(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vor(v{{[0-9:]+}},v{{[0-9:]+}})
 define <128 x i8> @t01(<128 x i8> %a0, <128 x i8> %a1) #0 {
   %q0 = trunc <128 x i8> %a0 to <128 x i1>
   %q1 = trunc <128 x i8> %a1 to <128 x i1>
@@ -23,7 +23,7 @@
 }
 
 ; CHECK-LABEL: t02
-; CHECK: xor(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vxor(v{{[0-9:]+}},v{{[0-9:]+}})
 define <128 x i8> @t02(<128 x i8> %a0, <128 x i8> %a1) #0 {
   %q0 = trunc <128 x i8> %a0 to <128 x i1>
   %q1 = trunc <128 x i8> %a1 to <128 x i1>
@@ -33,7 +33,7 @@
 }
 
 ; CHECK-LABEL: t10
-; CHECK: and(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vand(v{{[0-9:]+}},v{{[0-9:]+}})
 define <64 x i16> @t10(<64 x i16> %a0, <64 x i16> %a1) #0 {
   %q0 = trunc <64 x i16> %a0 to <64 x i1>
   %q1 = trunc <64 x i16> %a1 to <64 x i1>
@@ -43,7 +43,7 @@
 }
 
 ; CHECK-LABEL: t11
-; CHECK: or(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vor(v{{[0-9:]+}},v{{[0-9:]+}})
 define <64 x i16> @t11(<64 x i16> %a0, <64 x i16> %a1) #0 {
   %q0 = trunc <64 x i16> %a0 to <64 x i1>
   %q1 = trunc <64 x i16> %a1 to <64 x i1>
@@ -53,7 +53,7 @@
 }
 
 ; CHECK-LABEL: t12
-; CHECK: xor(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vxor(v{{[0-9:]+}},v{{[0-9:]+}})
 define <64 x i16> @t12(<64 x i16> %a0, <64 x i16> %a1) #0 {
   %q0 = trunc <64 x i16> %a0 to <64 x i1>
   %q1 = trunc <64 x i16> %a1 to <64 x i1>
@@ -63,7 +63,7 @@
 }
 
 ; CHECK-LABEL: t20
-; CHECK: and(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vand(v{{[0-9:]+}},v{{[0-9:]+}})
 define <32 x i32> @t20(<32 x i32> %a0, <32 x i32> %a1) #0 {
   %q0 = trunc <32 x i32> %a0 to <32 x i1>
   %q1 = trunc <32 x i32> %a1 to <32 x i1>
@@ -73,7 +73,7 @@
 }
 
 ; CHECK-LABEL: t21
-; CHECK: or(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vor(v{{[0-9:]+}},v{{[0-9:]+}})
 define <32 x i32> @t21(<32 x i32> %a0, <32 x i32> %a1) #0 {
   %q0 = trunc <32 x i32> %a0 to <32 x i1>
   %q1 = trunc <32 x i32> %a1 to <32 x i1>
@@ -83,7 +83,7 @@
 }
 
 ; CHECK-LABEL: t22
-; CHECK: xor(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vxor(v{{[0-9:]+}},v{{[0-9:]+}})
 define <32 x i32> @t22(<32 x i32> %a0, <32 x i32> %a1) #0 {
   %q0 = trunc <32 x i32> %a0 to <32 x i1>
   %q1 = trunc <32 x i32> %a1 to <32 x i1>
diff --git a/test/CodeGen/Hexagon/autohvx/bitwise-pred-64b.ll b/test/CodeGen/Hexagon/autohvx/bitwise-pred-64b.ll
index 1b547f7..3895670 100644
--- a/test/CodeGen/Hexagon/autohvx/bitwise-pred-64b.ll
+++ b/test/CodeGen/Hexagon/autohvx/bitwise-pred-64b.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 
 ; CHECK-LABEL: t00
-; CHECK: and(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vand(v{{[0-9:]+}},v{{[0-9:]+}})
 define <64 x i8> @t00(<64 x i8> %a0, <64 x i8> %a1) #0 {
   %q0 = trunc <64 x i8> %a0 to <64 x i1>
   %q1 = trunc <64 x i8> %a1 to <64 x i1>
@@ -11,7 +11,7 @@
 }
 
 ; CHECK-LABEL: t01
-; CHECK: or(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vor(v{{[0-9:]+}},v{{[0-9:]+}})
 define <64 x i8> @t01(<64 x i8> %a0, <64 x i8> %a1) #0 {
   %q0 = trunc <64 x i8> %a0 to <64 x i1>
   %q1 = trunc <64 x i8> %a1 to <64 x i1>
@@ -21,7 +21,7 @@
 }
 
 ; CHECK-LABEL: t02
-; CHECK: xor(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vxor(v{{[0-9:]+}},v{{[0-9:]+}})
 define <64 x i8> @t02(<64 x i8> %a0, <64 x i8> %a1) #0 {
   %q0 = trunc <64 x i8> %a0 to <64 x i1>
   %q1 = trunc <64 x i8> %a1 to <64 x i1>
@@ -31,7 +31,7 @@
 }
 
 ; CHECK-LABEL: t10
-; CHECK: and(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vand(v{{[0-9:]+}},v{{[0-9:]+}})
 define <32 x i16> @t10(<32 x i16> %a0, <32 x i16> %a1) #0 {
   %q0 = trunc <32 x i16> %a0 to <32 x i1>
   %q1 = trunc <32 x i16> %a1 to <32 x i1>
@@ -41,7 +41,7 @@
 }
 
 ; CHECK-LABEL: t11
-; CHECK: or(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vor(v{{[0-9:]+}},v{{[0-9:]+}})
 define <32 x i16> @t11(<32 x i16> %a0, <32 x i16> %a1) #0 {
   %q0 = trunc <32 x i16> %a0 to <32 x i1>
   %q1 = trunc <32 x i16> %a1 to <32 x i1>
@@ -51,7 +51,7 @@
 }
 
 ; CHECK-LABEL: t12
-; CHECK: xor(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vxor(v{{[0-9:]+}},v{{[0-9:]+}})
 define <32 x i16> @t12(<32 x i16> %a0, <32 x i16> %a1) #0 {
   %q0 = trunc <32 x i16> %a0 to <32 x i1>
   %q1 = trunc <32 x i16> %a1 to <32 x i1>
@@ -61,7 +61,7 @@
 }
 
 ; CHECK-LABEL: t20
-; CHECK: and(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vand(v{{[0-9:]+}},v{{[0-9:]+}})
 define <16 x i32> @t20(<16 x i32> %a0, <16 x i32> %a1) #0 {
   %q0 = trunc <16 x i32> %a0 to <16 x i1>
   %q1 = trunc <16 x i32> %a1 to <16 x i1>
@@ -71,7 +71,7 @@
 }
 
 ; CHECK-LABEL: t21
-; CHECK: or(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vor(v{{[0-9:]+}},v{{[0-9:]+}})
 define <16 x i32> @t21(<16 x i32> %a0, <16 x i32> %a1) #0 {
   %q0 = trunc <16 x i32> %a0 to <16 x i1>
   %q1 = trunc <16 x i32> %a1 to <16 x i1>
@@ -81,7 +81,7 @@
 }
 
 ; CHECK-LABEL: t22
-; CHECK: xor(q{{[0-3]}},q{{[0-3]}})
+; CHECK: vxor(v{{[0-9:]+}},v{{[0-9:]+}})
 define <16 x i32> @t22(<16 x i32> %a0, <16 x i32> %a1) #0 {
   %q0 = trunc <16 x i32> %a0 to <16 x i1>
   %q1 = trunc <16 x i32> %a1 to <16 x i1>
diff --git a/test/CodeGen/Hexagon/cfi-late.ll b/test/CodeGen/Hexagon/cfi-late.ll
index 460b645..6748542 100644
--- a/test/CodeGen/Hexagon/cfi-late.ll
+++ b/test/CodeGen/Hexagon/cfi-late.ll
@@ -41,7 +41,7 @@
 !llvm.module.flags = !{!11, !12}
 !llvm.ident = !{!13}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.8.0 (http://llvm.org/git/clang.git 15506a21305e212c406f980ed9b6b1bac785df56)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.8.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "cfi-late.c", directory: "/test")
 !2 = !{}
 !4 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 3, type: !5, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !8)
@@ -53,7 +53,7 @@
 !10 = !DILocalVariable(name: "y", arg: 2, scope: !4, file: !1, line: 3, type: !7)
 !11 = !{i32 2, !"Dwarf Version", i32 4}
 !12 = !{i32 2, !"Debug Info Version", i32 3}
-!13 = !{!"clang version 3.8.0 (http://llvm.org/git/clang.git 15506a21305e212c406f980ed9b6b1bac785df56)"}
+!13 = !{!"clang version 3.8.0"}
 !14 = !DIExpression()
 !15 = !DILocation(line: 3, column: 13, scope: !4)
 !16 = !DILocation(line: 3, column: 20, scope: !4)
diff --git a/test/CodeGen/Hexagon/copy-to-combine-dbg.ll b/test/CodeGen/Hexagon/copy-to-combine-dbg.ll
index 9a4d432..837e97d 100644
--- a/test/CodeGen/Hexagon/copy-to-combine-dbg.ll
+++ b/test/CodeGen/Hexagon/copy-to-combine-dbg.ll
@@ -36,7 +36,7 @@
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!3, !4}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0 (http://llvm.org/git/clang.git 37afcb099ac2b001f4c826da7ca1d077b67a508c) (http://llvm.org/git/llvm.git 5887f1c75b3ba216850c834b186efdd3e54b7d4f)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "file.c", directory: "/")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
diff --git a/test/CodeGen/Hexagon/funnel-shift.ll b/test/CodeGen/Hexagon/funnel-shift.ll
new file mode 100644
index 0000000..fcf623f
--- /dev/null
+++ b/test/CodeGen/Hexagon/funnel-shift.ll
@@ -0,0 +1,265 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; CHECK-LABEL: f0:
+; CHECK: r[[R00:[0-9]+]]:[[R01:[0-9]+]] = combine(r0,r1)
+; CHECK: r[[R02:[0-9]+]]:[[R03:[0-9]+]] = asl(r[[R00]]:[[R01]],#17)
+define i32 @f0(i32 %a0, i32 %a1) #1 {
+b0:
+  %v0 = tail call i32 @llvm.fshl.i32(i32 %a0, i32 %a1, i32 17)
+  ret i32 %v0
+}
+
+; CHECK-LABEL: f1:
+; CHECK: r[[R10:[0-9]+]]:[[R11:[0-9]+]] = combine(r0,r1)
+; CHECK: r[[R12:[0-9]+]]:[[R13:[0-9]+]] = asl(r[[R10]]:[[R11]],r2)
+define i32 @f1(i32 %a0, i32 %a1, i32 %a2) #1 {
+b0:
+  %v0 = tail call i32 @llvm.fshl.i32(i32 %a0, i32 %a1, i32 %a2)
+  ret i32 %v0
+}
+
+; CHECK-LABEL: f2:
+; CHECK: r[[R20:[0-9]+]]:[[R21:[0-9]+]] = asl(r3:2,#17)
+; CHECK: r[[R20]]:[[R21]] |= lsr(r1:0,#47)
+define i64 @f2(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 17)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f3:
+; CHECK: r[[R30:[0-9]+]]:[[R31:[0-9]+]] = asl(r3:2,r4)
+; CHECK: r[[R32:[0-9]+]] = sub(#64,r4)
+; CHECK: r[[R30]]:[[R31]] |= lsr(r1:0,r[[R32]])
+define i64 @f3(i64 %a0, i64 %a1, i64 %a2) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 %a2)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f4:
+; CHECK: r[[R40:[0-9]+]]:[[R41:[0-9]+]] = combine(r0,r1)
+; CHECK: r[[R42:[0-9]+]]:[[R43:[0-9]+]] = lsr(r[[R40]]:[[R41]],#17)
+define i32 @f4(i32 %a0, i32 %a1) #1 {
+b0:
+  %v0 = tail call i32 @llvm.fshr.i32(i32 %a0, i32 %a1, i32 17)
+  ret i32 %v0
+}
+
+; CHECK-LABEL: f5:
+; CHECK: r[[R50:[0-9]+]]:[[R51:[0-9]+]] = combine(r0,r1)
+; CHECK: r[[R52:[0-9]+]]:[[R53:[0-9]+]] = lsr(r[[R50]]:[[R51]],r2)
+define i32 @f5(i32 %a0, i32 %a1, i32 %a2) #1 {
+b0:
+  %v0 = tail call i32 @llvm.fshr.i32(i32 %a0, i32 %a1, i32 %a2)
+  ret i32 %v0
+}
+
+; CHECK-LABEL: f6:
+; CHECK: r[[R60:[0-9]+]]:[[R61:[0-9]+]] = lsr(r3:2,#17)
+; CHECK: r[[R60]]:[[R61]] |= asl(r1:0,#47)
+define i64 @f6(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 17)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f7:
+; CHECK: r[[R70:[0-9]+]]:[[R71:[0-9]+]] = lsr(r3:2,r4)
+; CHECK: r[[R72:[0-9]+]] = sub(#64,r4)
+; CHECK: r[[R70]]:[[R71]] |= asl(r1:0,r6)
+define i64 @f7(i64 %a0, i64 %a1, i64 %a2) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 %a2)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f8:
+; CHECK: r[[R80:[0-9]+]] = rol(r0,#17)
+define i32 @f8(i32 %a0) #1 {
+b0:
+  %v0 = tail call i32 @llvm.fshl.i32(i32 %a0, i32 %a0, i32 17)
+  ret i32 %v0
+}
+
+; CHECK-LABEL: f9:
+; CHECK: r[[R90:[0-9]+]]:[[R91:[0-9]+]] = combine(r0,r0)
+; CHECK: r[[R92:[0-9]+]]:[[R93:[0-9]+]] = asl(r[[R90]]:[[R91]],r1)
+define i32 @f9(i32 %a0, i32 %a1) #1 {
+b0:
+  %v0 = tail call i32 @llvm.fshl.i32(i32 %a0, i32 %a0, i32 %a1)
+  ret i32 %v0
+}
+
+; CHECK-LABEL: f10:
+; CHECK: r[[RA0:[0-9]+]]:[[RA1:[0-9]+]] = rol(r1:0,#17)
+define i64 @f10(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a0, i64 17)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f11:
+; CHECK: r[[RB0:[0-9]+]]:[[RB1:[0-9]+]] = asl(r1:0,r2)
+; CHECK: r[[RB2:[0-9]+]] = sub(#64,r2)
+; CHECK: r[[RB0]]:[[RB1]] |= lsr(r1:0,r[[RB2]])
+define i64 @f11(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a0, i64 %a1)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f12:
+; CHECK: r[[RC0:[0-9]+]] = rol(r0,#15)
+define i32 @f12(i32 %a0, i32 %a1) #1 {
+b0:
+  %v0 = tail call i32 @llvm.fshr.i32(i32 %a0, i32 %a0, i32 17)
+  ret i32 %v0
+}
+
+; CHECK-LABEL: f13:
+; CHECK: r[[RD0:[0-9]+]]:[[RD1:[0-9]+]] = combine(r0,r0)
+; CHECK: r[[RD2:[0-9]+]]:[[RD3:[0-9]+]] = lsr(r[[RD0]]:[[RD1]],r1)
+define i32 @f13(i32 %a0, i32 %a1) #1 {
+b0:
+  %v0 = tail call i32 @llvm.fshr.i32(i32 %a0, i32 %a0, i32 %a1)
+  ret i32 %v0
+}
+
+; CHECK-LABEL: f14:
+; CHECK: r[[RE0:[0-9]+]]:[[RE1:[0-9]+]] = rol(r1:0,#47)
+define i64 @f14(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a0, i64 17)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f15:
+; CHECK: r[[RF0:[0-9]+]]:[[RF1:[0-9]+]] = lsr(r1:0,r2)
+; CHECK: r[[RF2:[0-9]+]] = sub(#64,r2)
+; CHECK: r[[RF0]]:[[RF1]] |= asl(r1:0,r[[RF2]])
+define i64 @f15(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a0, i64 %a1)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f16:
+; CHECK: r[[RG0:[0-9]+]]:[[RG1:[0-9]+]] = valignb(r1:0,r3:2,#7)
+define i64 @f16(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 8)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f17:
+; CHECK: r[[RH0:[0-9]+]]:[[RH1:[0-9]+]] = valignb(r1:0,r3:2,#6)
+define i64 @f17(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 16)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f18:
+; CHECK: r[[RI0:[0-9]+]]:[[RI1:[0-9]+]] = valignb(r1:0,r3:2,#5)
+define i64 @f18(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 24)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f19:
+; CHECK: r[[RJ0:[0-9]+]]:[[RJ1:[0-9]+]] = valignb(r1:0,r3:2,#4)
+define i64 @f19(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 32)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f20:
+; CHECK: r[[RK0:[0-9]+]]:[[RK1:[0-9]+]] = valignb(r1:0,r3:2,#3)
+define i64 @f20(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 40)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f21:
+; CHECK: r[[RL0:[0-9]+]]:[[RL1:[0-9]+]] = valignb(r1:0,r3:2,#2)
+define i64 @f21(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 48)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f22:
+; CHECK: r[[RM0:[0-9]+]]:[[RM1:[0-9]+]] = valignb(r1:0,r3:2,#1)
+define i64 @f22(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 56)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f23:
+; CHECK: r[[RN0:[0-9]+]]:[[RN1:[0-9]+]] = valignb(r1:0,r3:2,#1)
+define i64 @f23(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 8)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f24:
+; CHECK: r[[RO0:[0-9]+]]:[[RO1:[0-9]+]] = valignb(r1:0,r3:2,#2)
+define i64 @f24(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 16)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f25:
+; CHECK: r[[RP0:[0-9]+]]:[[RP1:[0-9]+]] = valignb(r1:0,r3:2,#3)
+define i64 @f25(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 24)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f26:
+; CHECK: r[[RQ0:[0-9]+]]:[[RQ1:[0-9]+]] = valignb(r1:0,r3:2,#4)
+define i64 @f26(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 32)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f27:
+; CHECK: r[[RR0:[0-9]+]]:[[RR1:[0-9]+]] = valignb(r1:0,r3:2,#5)
+define i64 @f27(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 40)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f28:
+; CHECK: r[[RS0:[0-9]+]]:[[RS1:[0-9]+]] = valignb(r1:0,r3:2,#6)
+define i64 @f28(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 48)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f29:
+; CHECK: r[[RT0:[0-9]+]]:[[RT1:[0-9]+]] = valignb(r1:0,r3:2,#7)
+define i64 @f29(i64 %a0, i64 %a1) #1 {
+b0:
+  %v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 56)
+  ret i64 %v0
+}
+
+declare i32 @llvm.fshl.i32(i32, i32, i32) #0
+declare i32 @llvm.fshr.i32(i32, i32, i32) #0
+declare i64 @llvm.fshl.i64(i64, i64, i64) #0
+declare i64 @llvm.fshr.i64(i64, i64, i64) #0
+
+attributes #0 = { nounwind readnone speculatable }
+attributes #1 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="-packets" }
diff --git a/test/CodeGen/Hexagon/isel-vlsr-v2i16.ll b/test/CodeGen/Hexagon/isel-vlsr-v2i16.ll
new file mode 100644
index 0000000..995ce06
--- /dev/null
+++ b/test/CodeGen/Hexagon/isel-vlsr-v2i16.ll
@@ -0,0 +1,16 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; This used to crash with "cannot select" error.
+; CHECK: vlsrh(r1:0,#4)
+
+target triple = "hexagon-unknown-linux-gnu"
+
+define <2 x i16> @foo(<2 x i32>* nocapture %v) nounwind {
+  %vec = load <2 x i32>, <2 x i32>* %v, align 8
+  %trunc = trunc <2 x i32> %vec to <2 x i16>
+  %r = lshr <2 x i16> %trunc, <i16 4, i16 4>
+  ret <2 x i16> %r
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+
diff --git a/test/CodeGen/Hexagon/misaligned-const-load.ll b/test/CodeGen/Hexagon/misaligned-const-load.ll
index 0a43796..37d1155 100644
--- a/test/CodeGen/Hexagon/misaligned-const-load.ll
+++ b/test/CodeGen/Hexagon/misaligned-const-load.ll
@@ -17,7 +17,7 @@
 !llvm.module.flags = !{!6, !7, !8}
 !llvm.ident = !{!9}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 8.0.0 (http://llvm.org/git/clang.git 3fb90d137ea16e5c3a4580b9db5fd18d93df1a90) (http://llvm.org/git/llvm.git daf385e5698c00fdd693fac736acc96b95ccccd3)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !3)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 8.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !3)
 !1 = !DIFile(filename: "misaligned-const-load.c", directory: "/test")
 !2 = !{}
 !3 = !{!4}
@@ -26,7 +26,7 @@
 !6 = !{i32 2, !"Dwarf Version", i32 4}
 !7 = !{i32 2, !"Debug Info Version", i32 3}
 !8 = !{i32 1, !"wchar_size", i32 4}
-!9 = !{!"clang version 8.0.0 (http://llvm.org/git/clang.git 3fb90d137ea16e5c3a4580b9db5fd18d93df1a90) (http://llvm.org/git/llvm.git daf385e5698c00fdd693fac736acc96b95ccccd3)"}
+!9 = !{!"clang version 8.0.0"}
 !10 = distinct !DISubprogram(name: "bad_load", scope: !1, file: !1, line: 1, type: !11, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, retainedNodes: !2)
 !11 = !DISubroutineType(types: !12)
 !12 = !{!5}
diff --git a/test/CodeGen/Hexagon/misaligned-const-store.ll b/test/CodeGen/Hexagon/misaligned-const-store.ll
index b24cd77..311a56e 100644
--- a/test/CodeGen/Hexagon/misaligned-const-store.ll
+++ b/test/CodeGen/Hexagon/misaligned-const-store.ll
@@ -17,7 +17,7 @@
 !llvm.module.flags = !{!6, !7, !8}
 !llvm.ident = !{!9}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 8.0.0 (http://llvm.org/git/clang.git 3fb90d137ea16e5c3a4580b9db5fd18d93df1a90) (http://llvm.org/git/llvm.git daf385e5698c00fdd693fac736acc96b95ccccd3)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !3)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 8.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !3)
 !1 = !DIFile(filename: "misaligned-const-store.c", directory: "/test")
 !2 = !{}
 !3 = !{!4}
@@ -26,7 +26,7 @@
 !6 = !{i32 2, !"Dwarf Version", i32 4}
 !7 = !{i32 2, !"Debug Info Version", i32 3}
 !8 = !{i32 1, !"wchar_size", i32 4}
-!9 = !{!"clang version 8.0.0 (http://llvm.org/git/clang.git 3fb90d137ea16e5c3a4580b9db5fd18d93df1a90) (http://llvm.org/git/llvm.git daf385e5698c00fdd693fac736acc96b95ccccd3)"}
+!9 = !{!"clang version 8.0.0"}
 !10 = distinct !DISubprogram(name: "bad_store", scope: !1, file: !1, line: 1, type: !11, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, retainedNodes: !2)
 !11 = !DISubroutineType(types: !12)
 !12 = !{!5}
diff --git a/test/CodeGen/Hexagon/misched-top-rptracker-sync.ll b/test/CodeGen/Hexagon/misched-top-rptracker-sync.ll
index 9bd33bc..c83389e 100644
--- a/test/CodeGen/Hexagon/misched-top-rptracker-sync.ll
+++ b/test/CodeGen/Hexagon/misched-top-rptracker-sync.ll
@@ -79,12 +79,12 @@
 !llvm.module.flags = !{!3, !4}
 !llvm.ident = !{!5}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.9.0 (http://llvm.org/git/clang.git 4b380bc1db8b0c72bdbdaf0e4697b1a84100a369) (http://llvm.org/git/llvm.git 6217a62bc009d55e160dbb694f2e94a22c80809f)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.9.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "bug.c", directory: "/")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
-!5 = !{!"clang version 3.9.0 (http://llvm.org/git/clang.git 4b380bc1db8b0c72bdbdaf0e4697b1a84100a369) (http://llvm.org/git/llvm.git 6217a62bc009d55e160dbb694f2e94a22c80809f)"}
+!5 = !{!"clang version 3.9.0"}
 !6 = distinct !DISubprogram(name: "fred", scope: !1, file: !1, line: 138, type: !7, isLocal: false, isDefinition: true, scopeLine: 139, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !25)
 !7 = !DISubroutineType(types: !8)
 !8 = !{null, !9, !15}
diff --git a/test/CodeGen/Hexagon/rotate.ll b/test/CodeGen/Hexagon/rotate.ll
index bcc978f..3015a5a 100644
--- a/test/CodeGen/Hexagon/rotate.ll
+++ b/test/CodeGen/Hexagon/rotate.ll
@@ -13,13 +13,10 @@
 }
 
 ; CHECK-LABEL: f1
-; No variable-shift rotates. Check for the default expansion code.
-; This is a rotate left by %a1(r1).
-; CHECK: r[[R10:[0-9]+]] = sub(#32,r1)
-; CHECK: r[[R11:[0-9]+]] = and(r1,#31)
-; CHECK: r[[R12:[0-9]+]] = and(r[[R10]],#31)
-; CHECK: r[[R13:[0-9]+]] = asl(r0,r[[R11]])
-; CHECK: r[[R13]] |= lsr(r0,r[[R12]])
+; This is a rotate left by %a1(r1). Use register-pair shift to implement it.
+; CHECK: r[[R10:[0-9]+]]:[[R11:[0-9]+]] = combine(r0,r0)
+; CHECK: r[[R12:[0-9]+]]:[[R13:[0-9]+]] = asl(r[[R10]]:[[R11]],r1)
+; CHECK: r0 = r[[R12]]
 define i32 @f1(i32 %a0, i32 %a1) #0 {
 b0:
   %v0 = shl i32 %a0, %a1
@@ -40,13 +37,9 @@
 }
 
 ; CHECK-LABEL: f3
-; No variable-shift rotates. Check for the default expansion code.
-; This is a rotate right by %a1(r1) that became a rotate left by 32-%a1.
-; CHECK: r[[R30:[0-9]+]] = sub(#32,r1)
-; CHECK: r[[R31:[0-9]+]] = and(r1,#31)
-; CHECK: r[[R32:[0-9]+]] = and(r[[R30]],#31)
-; CHECK: r[[R33:[0-9]+]] = asl(r0,r[[R32]])
-; CHECK: r[[R33]] |= lsr(r0,r[[R31]])
+; This is a rotate right by %a1(r1). Use register-pair shift to implement it.
+; CHECK: r[[R30:[0-9]+]]:[[R31:[0-9]+]] = combine(r0,r0)
+; CHECK: r[[R32:[0-9]+]]:[[R33:[0-9]+]] = lsr(r[[R30]]:[[R31]],r1)
 define i32 @f3(i32 %a0, i32 %a1) #0 {
 b0:
   %v0 = lshr i32 %a0, %a1
@@ -67,13 +60,10 @@
 }
 
 ; CHECK-LABEL: f5
-; No variable-shift rotates. Check for the default expansion code.
 ; This is a rotate left by %a1(r2).
-; CHECK: r[[R50:[0-9]+]] = sub(#64,r2)
-; CHECK: r[[R51:[0-9]+]] = and(r2,#63)
-; CHECK: r[[R52:[0-9]+]] = and(r[[R50]],#63)
-; CHECK: r[[R53:[0-9]+]]:[[R54:[0-9]+]] = asl(r1:0,r[[R51]])
-; CHECK: r[[R53]]:[[R54]] |= lsr(r1:0,r[[R52]])
+; CHECK: r[[R50:[0-9]+]]:[[R51:[0-9]+]] = asl(r1:0,r2)
+; CHECK: r[[R52:[0-9]+]] = sub(#64,r2)
+; CHECK: r[[R50]]:[[R51]] |= lsr(r1:0,r[[R52]])
 define i64 @f5(i64 %a0, i32 %a1) #0 {
 b0:
   %v0 = zext i32 %a1 to i64
@@ -96,13 +86,10 @@
 }
 
 ; CHECK-LABEL: f7
-; No variable-shift rotates. Check for the default expansion code.
-; This is a rotate right by %a1(r2) that became a rotate left by 64-%a1.
-; CHECK: r[[R70:[0-9]+]] = sub(#64,r2)
-; CHECK: r[[R71:[0-9]+]] = and(r2,#63)
-; CHECK: r[[R72:[0-9]+]] = and(r[[R70]],#63)
-; CHECK: r[[R73:[0-9]+]]:[[R75:[0-9]+]] = asl(r1:0,r[[R72]])
-; CHECK: r[[R73]]:[[R75]] |= lsr(r1:0,r[[R71]])
+; This is a rotate right by %a1(r2).
+; CHECK: r[[R70:[0-9]+]]:[[R71:[0-9]+]] = lsr(r1:0,r2)
+; CHECK: r[[R72:[0-9]+]] = sub(#64,r2)
+; CHECK: r[[R70]]:[[R71]] |= asl(r1:0,r[[R72]])
 define i64 @f7(i64 %a0, i32 %a1) #0 {
 b0:
   %v0 = zext i32 %a1 to i64
diff --git a/test/CodeGen/Hexagon/vect/vect-vaslw.ll b/test/CodeGen/Hexagon/vect/vect-vaslw.ll
index c662b0b..23c1676 100644
--- a/test/CodeGen/Hexagon/vect/vect-vaslw.ll
+++ b/test/CodeGen/Hexagon/vect/vect-vaslw.ll
@@ -1,5 +1,5 @@
 ; RUN: llc -march=hexagon < %s | FileCheck %s
-; CHECK: vaslw
+; CHECK: vaslh
 
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
 target triple = "hexagon-unknown-linux-gnu"
diff --git a/test/CodeGen/MIR/AArch64/cfi.mir b/test/CodeGen/MIR/AArch64/cfi.mir
index 747d58e..04380e0 100644
--- a/test/CodeGen/MIR/AArch64/cfi.mir
+++ b/test/CodeGen/MIR/AArch64/cfi.mir
@@ -45,4 +45,6 @@
     ; CHECK: CFI_INSTRUCTION escape 0x61, 0x62, 0x63
     CFI_INSTRUCTION window_save
     ; CHECK: CFI_INSTRUCTION window_save
+    CFI_INSTRUCTION negate_ra_sign_state
+    ; CHECK: CFI_INSTRUCTION negate_ra_sign_state
     RET_ReallyLR
diff --git a/test/CodeGen/MIR/AArch64/return-address-signing.mir b/test/CodeGen/MIR/AArch64/return-address-signing.mir
new file mode 100644
index 0000000..1489c67
--- /dev/null
+++ b/test/CodeGen/MIR/AArch64/return-address-signing.mir
@@ -0,0 +1,48 @@
+# RUN: llc -mtriple=aarch64-arm-none-eabi -run-pass=prologepilog -o - %s 2>&1 | FileCheck %s
+--- |
+  target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+  target triple = "aarch64-arm-none-eabi"
+
+  define dso_local i32 @foo() "sign-return-address"="all" "sign-return-address-key"="a_key" {
+  entry:
+    ret i32 2
+  }
+
+  define dso_local i32 @bar() "sign-return-address"="all" "sign-return-address-key"="b_key" {
+  entry:
+    ret i32 2
+  }
+...
+---
+#CHECK: foo
+name:            foo
+alignment:       2
+tracksRegLiveness: true
+frameInfo:
+  maxCallFrameSize: 0
+#CHECK:    frame-setup PACIASP implicit-def $lr, implicit $lr, implicit $sp
+#CHECK:    frame-setup CFI_INSTRUCTION negate_ra_sign_state
+#CHECK:    frame-destroy AUTIASP implicit-def $lr, implicit $lr, implicit $sp
+body:             |
+  bb.0.entry:
+    $w0 = MOVi32imm 2
+    RET_ReallyLR implicit killed $w0
+
+...
+---
+#CHECK: bar
+name:            bar
+alignment:       2
+tracksRegLiveness: true
+frameInfo:
+  maxCallFrameSize: 0
+#CHECK:    frame-setup EMITBKEY
+#CHECK:    frame-setup PACIBSP implicit-def $lr, implicit $lr, implicit $sp
+#CHECK:    frame-setup CFI_INSTRUCTION negate_ra_sign_state
+#CHECK:    frame-destroy AUTIBSP implicit-def $lr, implicit $lr, implicit $sp
+body:             |
+  bb.0.entry:
+    $w0 = MOVi32imm 2
+    RET_ReallyLR implicit killed $w0
+
+...
diff --git a/test/CodeGen/MSP430/Inst16mm.ll b/test/CodeGen/MSP430/Inst16mm.ll
index 21fab42..af00a18 100644
--- a/test/CodeGen/MSP430/Inst16mm.ll
+++ b/test/CodeGen/MSP430/Inst16mm.ll
@@ -67,3 +67,22 @@
 ; CHECK-DAG:	mov	2(r1), 6(r1)
 ; CHECK-DAG:	mov	0(r1), 4(r1)
 }
+
+define void @cmp(i16* %g, i16* %i) {
+entry:
+; CHECK-LABEL: cmp:
+; CHECK: cmp 8(r12), 4(r13)
+  %add.ptr = getelementptr inbounds i16, i16* %g, i16 4
+  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr1 = getelementptr inbounds i16, i16* %i, i16 2
+  %1 = load i16, i16* %add.ptr1, align 2
+  %cmp = icmp sgt i16 %0, %1
+  br i1 %cmp, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  store i16 0, i16* %g, align 2
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
diff --git a/test/CodeGen/MSP430/Inst8mm.ll b/test/CodeGen/MSP430/Inst8mm.ll
index b9848dc..5709728 100644
--- a/test/CodeGen/MSP430/Inst8mm.ll
+++ b/test/CodeGen/MSP430/Inst8mm.ll
@@ -53,3 +53,21 @@
 	ret void
 }
 
+define void @cmp(i8* %g, i8* %i) {
+entry:
+; CHECK-LABEL: cmp:
+; CHECK: cmp.b 4(r12), 2(r13)
+  %add.ptr = getelementptr inbounds i8, i8* %g, i16 4
+  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr1 = getelementptr inbounds i8, i8* %i, i16 2
+  %1 = load i8, i8* %add.ptr1, align 1
+  %cmp = icmp sgt i8 %0, %1
+  br i1 %cmp, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  store i8 0, i8* %g, align 2
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
diff --git a/test/CodeGen/MSP430/InstII.ll b/test/CodeGen/MSP430/InstII.ll
new file mode 100644
index 0000000..596d5b0
--- /dev/null
+++ b/test/CodeGen/MSP430/InstII.ll
@@ -0,0 +1,68 @@
+; RUN: llc -march=msp430 < %s | FileCheck %s
+target datalayout = "e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8"
+target triple = "msp430-generic-generic"
+
+define void @rra8m(i8* %i) {
+entry:
+; CHECK-LABEL: rra8m:
+; CHECK: rra.b 2(r12)
+  %0 = getelementptr inbounds i8, i8* %i, i16 2
+  %1 = load i8, i8* %0, align 1
+  %shr = ashr i8 %1, 1
+  store i8 %shr, i8* %0, align 1
+  ret void
+}
+
+define void @rra16m(i16* %i) {
+entry:
+; CHECK-LABEL: rra16m:
+; CHECK: rra 4(r12)
+  %0 = getelementptr inbounds i16, i16* %i, i16 2
+  %1 = load i16, i16* %0, align 2
+  %shr = ashr i16 %1, 1
+  store i16 %shr, i16* %0, align 2
+  ret void
+}
+
+; TODO: `clrc; rrc.b 2(r12)` is expected
+define void @rrc8m(i8* %g) {
+entry:
+; CHECK-LABEL: rrc8m:
+; CHECK: mov.b 2(r12), r13
+; CHECK: clrc
+; CHECK: rrc.b r13
+; CHECK: mov.b r13, 2(r12)
+  %add.ptr = getelementptr inbounds i8, i8* %g, i16 2
+  %0 = load i8, i8* %add.ptr, align 1
+  %1 = lshr i8 %0, 1
+  store i8 %1, i8* %add.ptr, align 1
+  ret void
+}
+
+; TODO: `clrc; rrc 4(r12)` is expected
+define void @rrc16m(i16* %g) {
+entry:
+; CHECK-LABEL: rrc16m:
+; CHECK: mov 4(r12), r13
+; CHECK: clrc
+; CHECK: rrc r13
+; CHECK: mov r13, 4(r12)
+  %add.ptr = getelementptr inbounds i16, i16* %g, i16 2
+  %0 = load i16, i16* %add.ptr, align 2
+  %shr = lshr i16 %0, 1
+  store i16 %shr, i16* %add.ptr, align 2
+  ret void
+}
+
+define void @sxt16m(i16* %x) {
+entry:
+; CHECK-LABEL: sxt16m:
+; CHECK: sxt 4(r12)
+  %add.ptr = getelementptr inbounds i16, i16* %x, i16 2
+  %0 = bitcast i16* %add.ptr to i8*
+  %1 = load i8, i8* %0, align 1
+  %conv = sext i8 %1 to i16
+  store i16 %conv, i16* %add.ptr, align 2
+  ret void
+}
+
diff --git a/test/CodeGen/MSP430/fp.ll b/test/CodeGen/MSP430/fp.ll
index 87c4055..e7d7c51 100644
--- a/test/CodeGen/MSP430/fp.ll
+++ b/test/CodeGen/MSP430/fp.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -disable-fp-elim < %s | FileCheck %s
+; RUN: llc -O0 -frame-pointer=all < %s | FileCheck %s
 
 target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"
 target triple = "msp430---elf"
diff --git a/test/CodeGen/MSP430/shifts.ll b/test/CodeGen/MSP430/shifts.ll
index 0732519..5a1fc9c 100644
--- a/test/CodeGen/MSP430/shifts.ll
+++ b/test/CodeGen/MSP430/shifts.ll
@@ -74,3 +74,14 @@
   %shr = lshr i16 %a, 10
   ret i16 %shr
 }
+
+define i16 @lshl10_i16(i16 %a) #0 {
+entry:
+; CHECK-LABEL: lshl10_i16:
+; CHECK:      mov.b r12, r12
+; CHECK-NEXT: swpb r12
+; CHECK-NEXT: add r12, r12
+; CHECK-NEXT: add r12, r12
+  %shl = shl i16 %a, 10
+  ret i16 %shl
+}
diff --git a/test/CodeGen/MSP430/stacksave_restore.ll b/test/CodeGen/MSP430/stacksave_restore.ll
new file mode 100644
index 0000000..47c4553
--- /dev/null
+++ b/test/CodeGen/MSP430/stacksave_restore.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=msp430
+
+target triple = "msp430"
+
+define void @foo() {
+entry:
+  %0 = tail call i8* @llvm.stacksave()
+  tail call void @llvm.stackrestore(i8* %0)
+  ret void
+}
+
+declare i8* @llvm.stacksave()
+declare void @llvm.stackrestore(i8*)
diff --git a/test/CodeGen/Mips/Fast-ISel/stackloadstore.ll b/test/CodeGen/Mips/Fast-ISel/stackloadstore.ll
index 421f8ff..982284b 100644
--- a/test/CodeGen/Mips/Fast-ISel/stackloadstore.ll
+++ b/test/CodeGen/Mips/Fast-ISel/stackloadstore.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=mipsel -mcpu=mips32 -fast-isel -disable-fp-elim -relocation-model=pic < %s
+; RUN: llc < %s -march=mipsel -mcpu=mips32 -fast-isel -frame-pointer=all -relocation-model=pic < %s
 
 ; Test that negative array access don't crash constant synthesis when fast isel
 ; generates negative offsets.
diff --git a/test/CodeGen/Mips/GlobalISel/instruction-select/bitwise.mir b/test/CodeGen/Mips/GlobalISel/instruction-select/bitwise.mir
index 1939811..710b00e 100644
--- a/test/CodeGen/Mips/GlobalISel/instruction-select/bitwise.mir
+++ b/test/CodeGen/Mips/GlobalISel/instruction-select/bitwise.mir
@@ -2,9 +2,9 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @and(i32, i32) {entry: ret void}
-  define void @or(i32, i32) {entry: ret void}
-  define void @xor(i32, i32) {entry: ret void}
+  define void @and_i32() {entry: ret void}
+  define void @or_i32() {entry: ret void}
+  define void @xor_i32() {entry: ret void}
   define void @shl(i32) {entry: ret void}
   define void @ashr(i32) {entry: ret void}
   define void @lshr(i32) {entry: ret void}
@@ -14,7 +14,7 @@
 
 ...
 ---
-name:            and
+name:            and_i32
 alignment:       2
 legalized:       true
 regBankSelected: true
@@ -23,7 +23,7 @@
   bb.1.entry:
     liveins: $a0, $a1
 
-    ; MIPS32-LABEL: name: and
+    ; MIPS32-LABEL: name: and_i32
     ; MIPS32: liveins: $a0, $a1
     ; MIPS32: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
     ; MIPS32: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
@@ -38,7 +38,7 @@
 
 ...
 ---
-name:            or
+name:            or_i32
 alignment:       2
 legalized:       true
 regBankSelected: true
@@ -47,7 +47,7 @@
   bb.1.entry:
     liveins: $a0, $a1
 
-    ; MIPS32-LABEL: name: or
+    ; MIPS32-LABEL: name: or_i32
     ; MIPS32: liveins: $a0, $a1
     ; MIPS32: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
     ; MIPS32: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
@@ -62,7 +62,7 @@
 
 ...
 ---
-name:            xor
+name:            xor_i32
 alignment:       2
 legalized:       true
 regBankSelected: true
@@ -71,7 +71,7 @@
   bb.1.entry:
     liveins: $a0, $a1
 
-    ; MIPS32-LABEL: name: xor
+    ; MIPS32-LABEL: name: xor_i32
     ; MIPS32: liveins: $a0, $a1
     ; MIPS32: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
     ; MIPS32: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
diff --git a/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div.mir b/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div.mir
new file mode 100644
index 0000000..5559433
--- /dev/null
+++ b/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div.mir
@@ -0,0 +1,110 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
+--- |
+
+  define void @sdiv_i32() {entry: ret void}
+  define void @srem_i32() {entry: ret void}
+  define void @udiv_i32() {entry: ret void}
+  define void @urem_i32() {entry: ret void}
+
+...
+---
+name:            sdiv_i32
+alignment:       2
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: sdiv_i32
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; MIPS32: [[PseudoSDIV:%[0-9]+]]:acc64 = PseudoSDIV [[COPY1]], [[COPY]]
+    ; MIPS32: [[PseudoMFLO:%[0-9]+]]:gpr32 = PseudoMFLO [[PseudoSDIV]]
+    ; MIPS32: $v0 = COPY [[PseudoMFLO]]
+    ; MIPS32: RetRA implicit $v0
+    %0:gprb(s32) = COPY $a0
+    %1:gprb(s32) = COPY $a1
+    %2:gprb(s32) = G_SDIV %1, %0
+    $v0 = COPY %2(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            srem_i32
+alignment:       2
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: srem_i32
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; MIPS32: [[PseudoSDIV:%[0-9]+]]:acc64 = PseudoSDIV [[COPY1]], [[COPY]]
+    ; MIPS32: [[PseudoMFHI:%[0-9]+]]:gpr32 = PseudoMFHI [[PseudoSDIV]]
+    ; MIPS32: $v0 = COPY [[PseudoMFHI]]
+    ; MIPS32: RetRA implicit $v0
+    %0:gprb(s32) = COPY $a0
+    %1:gprb(s32) = COPY $a1
+    %2:gprb(s32) = G_SREM %1, %0
+    $v0 = COPY %2(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            udiv_i32
+alignment:       2
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: udiv_i32
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; MIPS32: [[PseudoUDIV:%[0-9]+]]:acc64 = PseudoUDIV [[COPY1]], [[COPY]]
+    ; MIPS32: [[PseudoMFLO:%[0-9]+]]:gpr32 = PseudoMFLO [[PseudoUDIV]]
+    ; MIPS32: $v0 = COPY [[PseudoMFLO]]
+    ; MIPS32: RetRA implicit $v0
+    %0:gprb(s32) = COPY $a0
+    %1:gprb(s32) = COPY $a1
+    %2:gprb(s32) = G_UDIV %1, %0
+    $v0 = COPY %2(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            urem_i32
+alignment:       2
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: urem_i32
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; MIPS32: [[PseudoUDIV:%[0-9]+]]:acc64 = PseudoUDIV [[COPY1]], [[COPY]]
+    ; MIPS32: [[PseudoMFHI:%[0-9]+]]:gpr32 = PseudoMFHI [[PseudoUDIV]]
+    ; MIPS32: $v0 = COPY [[PseudoMFHI]]
+    ; MIPS32: RetRA implicit $v0
+    %0:gprb(s32) = COPY $a0
+    %1:gprb(s32) = COPY $a1
+    %2:gprb(s32) = G_UREM %1, %0
+    $v0 = COPY %2(s32)
+    RetRA implicit $v0
+
+...
diff --git a/test/CodeGen/Mips/GlobalISel/instruction-select/select.mir b/test/CodeGen/Mips/GlobalISel/instruction-select/select.mir
new file mode 100644
index 0000000..38f90ae
--- /dev/null
+++ b/test/CodeGen/Mips/GlobalISel/instruction-select/select.mir
@@ -0,0 +1,72 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
+--- |
+
+  define void @select_i32() {entry: ret void}
+  define void @select_ptr() {entry: ret void}
+
+...
+---
+name:            select_i32
+alignment:       2
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; MIPS32-LABEL: name: select_i32
+    ; MIPS32: liveins: $a0, $a1, $a2
+    ; MIPS32: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; MIPS32: [[LUi:%[0-9]+]]:gpr32 = LUi 0
+    ; MIPS32: [[ORi:%[0-9]+]]:gpr32 = ORi [[LUi]], 1
+    ; MIPS32: [[AND:%[0-9]+]]:gpr32 = AND [[COPY]], [[ORi]]
+    ; MIPS32: [[MOVN_I_I:%[0-9]+]]:gpr32 = MOVN_I_I [[COPY1]], [[AND]], [[COPY2]]
+    ; MIPS32: $v0 = COPY [[MOVN_I_I]]
+    ; MIPS32: RetRA implicit $v0
+    %3:gprb(s32) = COPY $a0
+    %1:gprb(s32) = COPY $a1
+    %2:gprb(s32) = COPY $a2
+    %6:gprb(s32) = G_CONSTANT i32 1
+    %7:gprb(s32) = COPY %3(s32)
+    %5:gprb(s32) = G_AND %7, %6
+    %4:gprb(s32) = G_SELECT %5(s32), %1, %2
+    $v0 = COPY %4(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            select_ptr
+alignment:       2
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; MIPS32-LABEL: name: select_ptr
+    ; MIPS32: liveins: $a0, $a1, $a2
+    ; MIPS32: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; MIPS32: [[LUi:%[0-9]+]]:gpr32 = LUi 0
+    ; MIPS32: [[ORi:%[0-9]+]]:gpr32 = ORi [[LUi]], 1
+    ; MIPS32: [[AND:%[0-9]+]]:gpr32 = AND [[COPY]], [[ORi]]
+    ; MIPS32: [[MOVN_I_I:%[0-9]+]]:gpr32 = MOVN_I_I [[COPY1]], [[AND]], [[COPY2]]
+    ; MIPS32: $v0 = COPY [[MOVN_I_I]]
+    ; MIPS32: RetRA implicit $v0
+    %3:gprb(s32) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %6:gprb(s32) = G_CONSTANT i32 1
+    %7:gprb(s32) = COPY %3(s32)
+    %5:gprb(s32) = G_AND %7, %6
+    %4:gprb(p0) = G_SELECT %5(s32), %1, %2
+    $v0 = COPY %4(p0)
+    RetRA implicit $v0
+
+...
diff --git a/test/CodeGen/Mips/GlobalISel/irtranslator/bitwise.ll b/test/CodeGen/Mips/GlobalISel/irtranslator/bitwise.ll
index 917a67a..fa34ea4 100644
--- a/test/CodeGen/Mips/GlobalISel/irtranslator/bitwise.ll
+++ b/test/CodeGen/Mips/GlobalISel/irtranslator/bitwise.ll
@@ -1,48 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 
-define i32 @and(i32 %a, i32 %b) {
-  ; MIPS32-LABEL: name: and
-  ; MIPS32: bb.1.entry:
-  ; MIPS32:   liveins: $a0, $a1
-  ; MIPS32:   [[COPY:%[0-9]+]]:_(s32) = COPY $a0
-  ; MIPS32:   [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
-  ; MIPS32:   [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[COPY]]
-  ; MIPS32:   $v0 = COPY [[AND]](s32)
-  ; MIPS32:   RetRA implicit $v0
-entry:
-  %and = and i32 %b, %a
-  ret i32 %and
-}
-
-define i32 @or(i32 %a, i32 %b) {
-  ; MIPS32-LABEL: name: or
-  ; MIPS32: bb.1.entry:
-  ; MIPS32:   liveins: $a0, $a1
-  ; MIPS32:   [[COPY:%[0-9]+]]:_(s32) = COPY $a0
-  ; MIPS32:   [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
-  ; MIPS32:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY1]], [[COPY]]
-  ; MIPS32:   $v0 = COPY [[OR]](s32)
-  ; MIPS32:   RetRA implicit $v0
-entry:
-  %or = or i32 %b, %a
-  ret i32 %or
-}
-
-define i32 @xor(i32 %a, i32 %b) {
-  ; MIPS32-LABEL: name: xor
-  ; MIPS32: bb.1.entry:
-  ; MIPS32:   liveins: $a0, $a1
-  ; MIPS32:   [[COPY:%[0-9]+]]:_(s32) = COPY $a0
-  ; MIPS32:   [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
-  ; MIPS32:   [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY1]], [[COPY]]
-  ; MIPS32:   $v0 = COPY [[XOR]](s32)
-  ; MIPS32:   RetRA implicit $v0
-entry:
-  %xor = xor i32 %b, %a
-  ret i32 %xor
-}
-
 define i32 @shl(i32 %a) {
   ; MIPS32-LABEL: name: shl
   ; MIPS32: bb.1.entry:
diff --git a/test/CodeGen/Mips/GlobalISel/legalizer/add.mir b/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
index ff9ae06..c90b8e6 100644
--- a/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
+++ b/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
@@ -10,6 +10,7 @@
   define void @add_i16_zext() {entry: ret void}
   define void @add_i16_aext() {entry: ret void}
   define void @add_i64() {entry: ret void}
+  define void @add_i128() {entry: ret void}
 
 ...
 ---
@@ -226,11 +227,19 @@
     ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
     ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
     ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
-    ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY]]
-    ; MIPS32: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY3]], [[COPY1]]
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY3]], [[COPY1]]
+    ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C1]]
+    ; MIPS32: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[AND]]
     ; MIPS32: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[COPY3]]
-    ; MIPS32: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[ICMP]]
-    ; MIPS32: $v0 = COPY [[ADD2]](s32)
+    ; MIPS32: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY]]
+    ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
+    ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C2]]
+    ; MIPS32: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[ADD2]], [[AND1]]
+    ; MIPS32: $v0 = COPY [[ADD3]](s32)
     ; MIPS32: $v1 = COPY [[ADD1]](s32)
     ; MIPS32: RetRA implicit $v0, implicit $v1
     %2:_(s32) = COPY $a0
@@ -246,3 +255,82 @@
     RetRA implicit $v0, implicit $v1
 
 ...
+---
+name:            add_i128
+alignment:       2
+tracksRegLiveness: true
+fixedStack:
+  - { id: 0, offset: 28, size: 4, alignment: 4, stack-id: 0, isImmutable: true }
+  - { id: 1, offset: 24, size: 4, alignment: 8, stack-id: 0, isImmutable: true }
+  - { id: 2, offset: 20, size: 4, alignment: 4, stack-id: 0, isImmutable: true }
+  - { id: 3, offset: 16, size: 4, alignment: 8, stack-id: 0, isImmutable: true }
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    ; MIPS32-LABEL: name: add_i128
+    ; MIPS32: liveins: $a0, $a1, $a2, $a3
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
+    ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+    ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0)
+    ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+    ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1, align 0)
+    ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+    ; MIPS32: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 0)
+    ; MIPS32: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+    ; MIPS32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (load 4 from %fixed-stack.3, align 0)
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[COPY]]
+    ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C1]]
+    ; MIPS32: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[AND]]
+    ; MIPS32: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[LOAD]]
+    ; MIPS32: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[LOAD1]], [[COPY1]]
+    ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
+    ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C2]]
+    ; MIPS32: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[ADD2]], [[AND1]]
+    ; MIPS32: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[LOAD1]]
+    ; MIPS32: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[LOAD2]], [[COPY2]]
+    ; MIPS32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ICMP1]](s32)
+    ; MIPS32: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C3]]
+    ; MIPS32: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[AND2]]
+    ; MIPS32: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD5]](s32), [[LOAD2]]
+    ; MIPS32: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[LOAD3]], [[COPY3]]
+    ; MIPS32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ICMP2]](s32)
+    ; MIPS32: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C4]]
+    ; MIPS32: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[ADD6]], [[AND3]]
+    ; MIPS32: $v0 = COPY [[ADD1]](s32)
+    ; MIPS32: $v1 = COPY [[ADD3]](s32)
+    ; MIPS32: $a0 = COPY [[ADD5]](s32)
+    ; MIPS32: $a1 = COPY [[ADD7]](s32)
+    ; MIPS32: RetRA implicit $v0, implicit $v1, implicit $a0, implicit $a1
+    %2:_(s32) = COPY $a0
+    %3:_(s32) = COPY $a1
+    %4:_(s32) = COPY $a2
+    %5:_(s32) = COPY $a3
+    %0:_(s128) = G_MERGE_VALUES %2(s32), %3(s32), %4(s32), %5(s32)
+    %10:_(p0) = G_FRAME_INDEX %fixed-stack.3
+    %6:_(s32) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.3, align 0)
+    %11:_(p0) = G_FRAME_INDEX %fixed-stack.2
+    %7:_(s32) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.2, align 0)
+    %12:_(p0) = G_FRAME_INDEX %fixed-stack.1
+    %8:_(s32) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.1, align 0)
+    %13:_(p0) = G_FRAME_INDEX %fixed-stack.0
+    %9:_(s32) = G_LOAD %13(p0) :: (load 4 from %fixed-stack.0, align 0)
+    %1:_(s128) = G_MERGE_VALUES %6(s32), %7(s32), %8(s32), %9(s32)
+    %14:_(s128) = G_ADD %1, %0
+    %15:_(s32), %16:_(s32), %17:_(s32), %18:_(s32) = G_UNMERGE_VALUES %14(s128)
+    $v0 = COPY %15(s32)
+    $v1 = COPY %16(s32)
+    $a0 = COPY %17(s32)
+    $a1 = COPY %18(s32)
+    RetRA implicit $v0, implicit $v1, implicit $a0, implicit $a1
+
+...
diff --git a/test/CodeGen/Mips/GlobalISel/legalizer/bitwise.mir b/test/CodeGen/Mips/GlobalISel/legalizer/bitwise.mir
index 80944aa..1e5d50f 100644
--- a/test/CodeGen/Mips/GlobalISel/legalizer/bitwise.mir
+++ b/test/CodeGen/Mips/GlobalISel/legalizer/bitwise.mir
@@ -2,9 +2,21 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @and(i32, i32) {entry: ret void}
-  define void @or(i32, i32) {entry: ret void}
-  define void @xor(i32, i32) {entry: ret void}
+  define void @and_i1() {entry: ret void}
+  define void @and_i8() {entry: ret void}
+  define void @and_i16() {entry: ret void}
+  define void @and_i32() {entry: ret void}
+  define void @and_i64() {entry: ret void}
+  define void @or_i1() {entry: ret void}
+  define void @or_i8() {entry: ret void}
+  define void @or_i16() {entry: ret void}
+  define void @or_i32() {entry: ret void}
+  define void @or_i64() {entry: ret void}
+  define void @xor_i1() {entry: ret void}
+  define void @xor_i8() {entry: ret void}
+  define void @xor_i16() {entry: ret void}
+  define void @xor_i32() {entry: ret void}
+  define void @xor_i64() {entry: ret void}
   define void @shl(i32) {entry: ret void}
   define void @ashr(i32) {entry: ret void}
   define void @lshr(i32) {entry: ret void}
@@ -14,14 +26,98 @@
 
 ...
 ---
-name:            and
+name:            and_i1
 alignment:       2
 tracksRegLiveness: true
 body:             |
   bb.1.entry:
     liveins: $a0, $a1
 
-    ; MIPS32-LABEL: name: and
+    ; MIPS32-LABEL: name: and_i1
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[AND]](s32)
+    ; MIPS32: $v0 = COPY [[COPY4]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s1) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s1) = G_TRUNC %3(s32)
+    %4:_(s1) = G_AND %1, %0
+    %5:_(s32) = G_ANYEXT %4(s1)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            and_i8
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: and_i8
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[AND]](s32)
+    ; MIPS32: $v0 = COPY [[COPY4]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s8) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s8) = G_TRUNC %3(s32)
+    %4:_(s8) = G_AND %1, %0
+    %5:_(s32) = G_ANYEXT %4(s8)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            and_i16
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: and_i16
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[AND]](s32)
+    ; MIPS32: $v0 = COPY [[COPY4]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s16) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s16) = G_TRUNC %3(s32)
+    %4:_(s16) = G_AND %1, %0
+    %5:_(s32) = G_ANYEXT %4(s16)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            and_i32
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: and_i32
     ; MIPS32: liveins: $a0, $a1
     ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
     ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
@@ -36,14 +132,130 @@
 
 ...
 ---
-name:            or
+name:            and_i64
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    ; MIPS32-LABEL: name: and_i64
+    ; MIPS32: liveins: $a0, $a1, $a2, $a3
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY]]
+    ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[COPY1]]
+    ; MIPS32: $v0 = COPY [[AND]](s32)
+    ; MIPS32: $v1 = COPY [[AND1]](s32)
+    ; MIPS32: RetRA implicit $v0, implicit $v1
+    %2:_(s32) = COPY $a0
+    %3:_(s32) = COPY $a1
+    %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %4:_(s32) = COPY $a2
+    %5:_(s32) = COPY $a3
+    %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
+    %6:_(s64) = G_AND %1, %0
+    %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64)
+    $v0 = COPY %7(s32)
+    $v1 = COPY %8(s32)
+    RetRA implicit $v0, implicit $v1
+
+...
+---
+name:            or_i1
 alignment:       2
 tracksRegLiveness: true
 body:             |
   bb.1.entry:
     liveins: $a0, $a1
 
-    ; MIPS32-LABEL: name: or
+    ; MIPS32-LABEL: name: or_i1
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY2]], [[COPY3]]
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[OR]](s32)
+    ; MIPS32: $v0 = COPY [[COPY4]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s1) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s1) = G_TRUNC %3(s32)
+    %4:_(s1) = G_OR %1, %0
+    %5:_(s32) = G_ANYEXT %4(s1)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            or_i8
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: or_i8
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY2]], [[COPY3]]
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[OR]](s32)
+    ; MIPS32: $v0 = COPY [[COPY4]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s8) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s8) = G_TRUNC %3(s32)
+    %4:_(s8) = G_OR %1, %0
+    %5:_(s32) = G_ANYEXT %4(s8)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            or_i16
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: or_i16
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY2]], [[COPY3]]
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[OR]](s32)
+    ; MIPS32: $v0 = COPY [[COPY4]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s16) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s16) = G_TRUNC %3(s32)
+    %4:_(s16) = G_OR %1, %0
+    %5:_(s32) = G_ANYEXT %4(s16)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            or_i32
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: or_i32
     ; MIPS32: liveins: $a0, $a1
     ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
     ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
@@ -58,14 +270,130 @@
 
 ...
 ---
-name:            xor
+name:            or_i64
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    ; MIPS32-LABEL: name: or_i64
+    ; MIPS32: liveins: $a0, $a1, $a2, $a3
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
+    ; MIPS32: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY2]], [[COPY]]
+    ; MIPS32: [[OR1:%[0-9]+]]:_(s32) = G_OR [[COPY3]], [[COPY1]]
+    ; MIPS32: $v0 = COPY [[OR]](s32)
+    ; MIPS32: $v1 = COPY [[OR1]](s32)
+    ; MIPS32: RetRA implicit $v0, implicit $v1
+    %2:_(s32) = COPY $a0
+    %3:_(s32) = COPY $a1
+    %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %4:_(s32) = COPY $a2
+    %5:_(s32) = COPY $a3
+    %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
+    %6:_(s64) = G_OR %1, %0
+    %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64)
+    $v0 = COPY %7(s32)
+    $v1 = COPY %8(s32)
+    RetRA implicit $v0, implicit $v1
+
+...
+---
+name:            xor_i1
 alignment:       2
 tracksRegLiveness: true
 body:             |
   bb.1.entry:
     liveins: $a0, $a1
 
-    ; MIPS32-LABEL: name: xor
+    ; MIPS32-LABEL: name: xor_i1
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY2]], [[COPY3]]
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[XOR]](s32)
+    ; MIPS32: $v0 = COPY [[COPY4]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s1) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s1) = G_TRUNC %3(s32)
+    %4:_(s1) = G_XOR %1, %0
+    %5:_(s32) = G_ANYEXT %4(s1)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            xor_i8
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: xor_i8
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY2]], [[COPY3]]
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[XOR]](s32)
+    ; MIPS32: $v0 = COPY [[COPY4]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s8) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s8) = G_TRUNC %3(s32)
+    %4:_(s8) = G_XOR %1, %0
+    %5:_(s32) = G_ANYEXT %4(s8)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            xor_i16
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: xor_i16
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY2]], [[COPY3]]
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[XOR]](s32)
+    ; MIPS32: $v0 = COPY [[COPY4]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s16) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s16) = G_TRUNC %3(s32)
+    %4:_(s16) = G_XOR %1, %0
+    %5:_(s32) = G_ANYEXT %4(s16)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            xor_i32
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: xor_i32
     ; MIPS32: liveins: $a0, $a1
     ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
     ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
@@ -80,6 +408,38 @@
 
 ...
 ---
+name:            xor_i64
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    ; MIPS32-LABEL: name: xor_i64
+    ; MIPS32: liveins: $a0, $a1, $a2, $a3
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
+    ; MIPS32: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY2]], [[COPY]]
+    ; MIPS32: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[COPY3]], [[COPY1]]
+    ; MIPS32: $v0 = COPY [[XOR]](s32)
+    ; MIPS32: $v1 = COPY [[XOR1]](s32)
+    ; MIPS32: RetRA implicit $v0, implicit $v1
+    %2:_(s32) = COPY $a0
+    %3:_(s32) = COPY $a1
+    %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %4:_(s32) = COPY $a2
+    %5:_(s32) = COPY $a3
+    %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
+    %6:_(s64) = G_XOR %1, %0
+    %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64)
+    $v0 = COPY %7(s32)
+    $v1 = COPY %8(s32)
+    RetRA implicit $v0, implicit $v1
+
+...
+---
 name:            shl
 alignment:       2
 tracksRegLiveness: true
diff --git a/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div.mir b/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div.mir
new file mode 100644
index 0000000..f722e8e
--- /dev/null
+++ b/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div.mir
@@ -0,0 +1,554 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
+--- |
+
+  define void @sdiv_i8() {entry: ret void}
+  define void @sdiv_i16() {entry: ret void}
+  define void @sdiv_i32() {entry: ret void}
+  define void @sdiv_i64() {entry: ret void}
+  define void @srem_i8() {entry: ret void}
+  define void @srem_i16() {entry: ret void}
+  define void @srem_i32() {entry: ret void}
+  define void @srem_i64() {entry: ret void}
+  define void @udiv_i8() {entry: ret void}
+  define void @udiv_i16() {entry: ret void}
+  define void @udiv_i32() {entry: ret void}
+  define void @udiv_i64() {entry: ret void}
+  define void @urem_i8() {entry: ret void}
+  define void @urem_i16() {entry: ret void}
+  define void @urem_i32() {entry: ret void}
+  define void @urem_i64() {entry: ret void}
+
+...
+---
+name:            sdiv_i8
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: sdiv_i8
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]]
+    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+    ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C1]]
+    ; MIPS32: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]]
+    ; MIPS32: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[ASHR]], [[ASHR1]]
+    ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SDIV]](s32)
+    ; MIPS32: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]]
+    ; MIPS32: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C2]]
+    ; MIPS32: $v0 = COPY [[ASHR2]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s8) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s8) = G_TRUNC %3(s32)
+    %4:_(s8) = G_SDIV %1, %0
+    %5:_(s32) = G_SEXT %4(s8)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            sdiv_i16
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: sdiv_i16
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]]
+    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+    ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C1]]
+    ; MIPS32: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]]
+    ; MIPS32: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[ASHR]], [[ASHR1]]
+    ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SDIV]](s32)
+    ; MIPS32: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]]
+    ; MIPS32: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C2]]
+    ; MIPS32: $v0 = COPY [[ASHR2]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s16) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s16) = G_TRUNC %3(s32)
+    %4:_(s16) = G_SDIV %1, %0
+    %5:_(s32) = G_SEXT %4(s16)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            sdiv_i32
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: sdiv_i32
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[COPY1]], [[COPY]]
+    ; MIPS32: $v0 = COPY [[SDIV]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %0:_(s32) = COPY $a0
+    %1:_(s32) = COPY $a1
+    %2:_(s32) = G_SDIV %1, %0
+    $v0 = COPY %2(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            sdiv_i64
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    ; MIPS32-LABEL: name: sdiv_i64
+    ; MIPS32: liveins: $a0, $a1, $a2, $a3
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
+    ; MIPS32: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
+    ; MIPS32: $a0 = COPY [[COPY2]](s32)
+    ; MIPS32: $a1 = COPY [[COPY3]](s32)
+    ; MIPS32: $a2 = COPY [[COPY]](s32)
+    ; MIPS32: $a3 = COPY [[COPY1]](s32)
+    ; MIPS32: JAL &__divdi3, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0, implicit-def $v1
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY $v0
+    ; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY $v1
+    ; MIPS32: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
+    ; MIPS32: $v0 = COPY [[COPY4]](s32)
+    ; MIPS32: $v1 = COPY [[COPY5]](s32)
+    ; MIPS32: RetRA implicit $v0, implicit $v1
+    %2:_(s32) = COPY $a0
+    %3:_(s32) = COPY $a1
+    %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %4:_(s32) = COPY $a2
+    %5:_(s32) = COPY $a3
+    %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
+    %6:_(s64) = G_SDIV %1, %0
+    %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64)
+    $v0 = COPY %7(s32)
+    $v1 = COPY %8(s32)
+    RetRA implicit $v0, implicit $v1
+
+...
+---
+name:            srem_i8
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: srem_i8
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]]
+    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+    ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C1]]
+    ; MIPS32: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]]
+    ; MIPS32: [[SREM:%[0-9]+]]:_(s32) = G_SREM [[ASHR]], [[ASHR1]]
+    ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SREM]](s32)
+    ; MIPS32: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]]
+    ; MIPS32: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C2]]
+    ; MIPS32: $v0 = COPY [[ASHR2]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s8) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s8) = G_TRUNC %3(s32)
+    %4:_(s8) = G_SREM %1, %0
+    %5:_(s32) = G_SEXT %4(s8)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            srem_i16
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: srem_i16
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]]
+    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+    ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C1]]
+    ; MIPS32: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]]
+    ; MIPS32: [[SREM:%[0-9]+]]:_(s32) = G_SREM [[ASHR]], [[ASHR1]]
+    ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SREM]](s32)
+    ; MIPS32: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]]
+    ; MIPS32: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C2]]
+    ; MIPS32: $v0 = COPY [[ASHR2]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s16) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s16) = G_TRUNC %3(s32)
+    %4:_(s16) = G_SREM %1, %0
+    %5:_(s32) = G_SEXT %4(s16)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            srem_i32
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: srem_i32
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[SREM:%[0-9]+]]:_(s32) = G_SREM [[COPY1]], [[COPY]]
+    ; MIPS32: $v0 = COPY [[SREM]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %0:_(s32) = COPY $a0
+    %1:_(s32) = COPY $a1
+    %2:_(s32) = G_SREM %1, %0
+    $v0 = COPY %2(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            srem_i64
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    ; MIPS32-LABEL: name: srem_i64
+    ; MIPS32: liveins: $a0, $a1, $a2, $a3
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
+    ; MIPS32: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
+    ; MIPS32: $a0 = COPY [[COPY2]](s32)
+    ; MIPS32: $a1 = COPY [[COPY3]](s32)
+    ; MIPS32: $a2 = COPY [[COPY]](s32)
+    ; MIPS32: $a3 = COPY [[COPY1]](s32)
+    ; MIPS32: JAL &__moddi3, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0, implicit-def $v1
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY $v0
+    ; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY $v1
+    ; MIPS32: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
+    ; MIPS32: $v0 = COPY [[COPY4]](s32)
+    ; MIPS32: $v1 = COPY [[COPY5]](s32)
+    ; MIPS32: RetRA implicit $v0, implicit $v1
+    %2:_(s32) = COPY $a0
+    %3:_(s32) = COPY $a1
+    %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %4:_(s32) = COPY $a2
+    %5:_(s32) = COPY $a3
+    %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
+    %6:_(s64) = G_SREM %1, %0
+    %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64)
+    $v0 = COPY %7(s32)
+    $v1 = COPY %8(s32)
+    RetRA implicit $v0, implicit $v1
+
+...
+---
+name:            udiv_i8
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: udiv_i8
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+    ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]]
+    ; MIPS32: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[AND]], [[AND1]]
+    ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UDIV]](s32)
+    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]]
+    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C2]]
+    ; MIPS32: $v0 = COPY [[ASHR]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s8) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s8) = G_TRUNC %3(s32)
+    %4:_(s8) = G_UDIV %1, %0
+    %5:_(s32) = G_SEXT %4(s8)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            udiv_i16
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: udiv_i16
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+    ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]]
+    ; MIPS32: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[AND]], [[AND1]]
+    ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UDIV]](s32)
+    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]]
+    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C2]]
+    ; MIPS32: $v0 = COPY [[ASHR]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s16) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s16) = G_TRUNC %3(s32)
+    %4:_(s16) = G_UDIV %1, %0
+    %5:_(s32) = G_SEXT %4(s16)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            udiv_i32
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: udiv_i32
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[COPY1]], [[COPY]]
+    ; MIPS32: $v0 = COPY [[UDIV]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %0:_(s32) = COPY $a0
+    %1:_(s32) = COPY $a1
+    %2:_(s32) = G_UDIV %1, %0
+    $v0 = COPY %2(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            udiv_i64
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    ; MIPS32-LABEL: name: udiv_i64
+    ; MIPS32: liveins: $a0, $a1, $a2, $a3
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
+    ; MIPS32: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
+    ; MIPS32: $a0 = COPY [[COPY2]](s32)
+    ; MIPS32: $a1 = COPY [[COPY3]](s32)
+    ; MIPS32: $a2 = COPY [[COPY]](s32)
+    ; MIPS32: $a3 = COPY [[COPY1]](s32)
+    ; MIPS32: JAL &__udivdi3, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0, implicit-def $v1
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY $v0
+    ; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY $v1
+    ; MIPS32: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
+    ; MIPS32: $v0 = COPY [[COPY4]](s32)
+    ; MIPS32: $v1 = COPY [[COPY5]](s32)
+    ; MIPS32: RetRA implicit $v0, implicit $v1
+    %2:_(s32) = COPY $a0
+    %3:_(s32) = COPY $a1
+    %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %4:_(s32) = COPY $a2
+    %5:_(s32) = COPY $a3
+    %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
+    %6:_(s64) = G_UDIV %1, %0
+    %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64)
+    $v0 = COPY %7(s32)
+    $v1 = COPY %8(s32)
+    RetRA implicit $v0, implicit $v1
+
+...
+---
+name:            urem_i8
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: urem_i8
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+    ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]]
+    ; MIPS32: [[UREM:%[0-9]+]]:_(s32) = G_UREM [[AND]], [[AND1]]
+    ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UREM]](s32)
+    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]]
+    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C2]]
+    ; MIPS32: $v0 = COPY [[ASHR]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s8) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s8) = G_TRUNC %3(s32)
+    %4:_(s8) = G_UREM %1, %0
+    %5:_(s32) = G_SEXT %4(s8)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            urem_i16
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: urem_i16
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+    ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]]
+    ; MIPS32: [[UREM:%[0-9]+]]:_(s32) = G_UREM [[AND]], [[AND1]]
+    ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UREM]](s32)
+    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]]
+    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C2]]
+    ; MIPS32: $v0 = COPY [[ASHR]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %2:_(s32) = COPY $a0
+    %0:_(s16) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $a1
+    %1:_(s16) = G_TRUNC %3(s32)
+    %4:_(s16) = G_UREM %1, %0
+    %5:_(s32) = G_SEXT %4(s16)
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            urem_i32
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: urem_i32
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[UREM:%[0-9]+]]:_(s32) = G_UREM [[COPY1]], [[COPY]]
+    ; MIPS32: $v0 = COPY [[UREM]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %0:_(s32) = COPY $a0
+    %1:_(s32) = COPY $a1
+    %2:_(s32) = G_UREM %1, %0
+    $v0 = COPY %2(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            urem_i64
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    ; MIPS32-LABEL: name: urem_i64
+    ; MIPS32: liveins: $a0, $a1, $a2, $a3
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
+    ; MIPS32: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
+    ; MIPS32: $a0 = COPY [[COPY2]](s32)
+    ; MIPS32: $a1 = COPY [[COPY3]](s32)
+    ; MIPS32: $a2 = COPY [[COPY]](s32)
+    ; MIPS32: $a3 = COPY [[COPY1]](s32)
+    ; MIPS32: JAL &__umoddi3, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0, implicit-def $v1
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY $v0
+    ; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY $v1
+    ; MIPS32: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
+    ; MIPS32: $v0 = COPY [[COPY4]](s32)
+    ; MIPS32: $v1 = COPY [[COPY5]](s32)
+    ; MIPS32: RetRA implicit $v0, implicit $v1
+    %2:_(s32) = COPY $a0
+    %3:_(s32) = COPY $a1
+    %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %4:_(s32) = COPY $a2
+    %5:_(s32) = COPY $a3
+    %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
+    %6:_(s64) = G_UREM %1, %0
+    %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64)
+    $v0 = COPY %7(s32)
+    $v1 = COPY %8(s32)
+    RetRA implicit $v0, implicit $v1
+
+...
diff --git a/test/CodeGen/Mips/GlobalISel/legalizer/select.mir b/test/CodeGen/Mips/GlobalISel/legalizer/select.mir
new file mode 100644
index 0000000..249c4d3
--- /dev/null
+++ b/test/CodeGen/Mips/GlobalISel/legalizer/select.mir
@@ -0,0 +1,172 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
+--- |
+
+  define void @select_i8() {entry: ret void}
+  define void @select_i16() {entry: ret void}
+  define void @select_i32() {entry: ret void}
+  define void @select_ptr() {entry: ret void}
+  define void @select_with_negation() {entry: ret void}
+
+...
+---
+name:            select_i8
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; MIPS32-LABEL: name: select_i8
+    ; MIPS32: liveins: $a0, $a1, $a2
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C]]
+    ; MIPS32: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[COPY3]], [[COPY4]]
+    ; MIPS32: [[COPY6:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+    ; MIPS32: $v0 = COPY [[COPY6]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %3:_(s32) = COPY $a0
+    %0:_(s1) = G_TRUNC %3(s32)
+    %4:_(s32) = COPY $a1
+    %1:_(s8) = G_TRUNC %4(s32)
+    %5:_(s32) = COPY $a2
+    %2:_(s8) = G_TRUNC %5(s32)
+    %6:_(s8) = G_SELECT %0(s1), %1, %2
+    %7:_(s32) = G_ANYEXT %6(s8)
+    $v0 = COPY %7(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            select_i16
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; MIPS32-LABEL: name: select_i16
+    ; MIPS32: liveins: $a0, $a1, $a2
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C]]
+    ; MIPS32: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[COPY3]], [[COPY4]]
+    ; MIPS32: [[COPY6:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+    ; MIPS32: $v0 = COPY [[COPY6]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %3:_(s32) = COPY $a0
+    %0:_(s1) = G_TRUNC %3(s32)
+    %4:_(s32) = COPY $a1
+    %1:_(s16) = G_TRUNC %4(s32)
+    %5:_(s32) = COPY $a2
+    %2:_(s16) = G_TRUNC %5(s32)
+    %6:_(s16) = G_SELECT %0(s1), %1, %2
+    %7:_(s32) = G_ANYEXT %6(s16)
+    $v0 = COPY %7(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            select_i32
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; MIPS32-LABEL: name: select_i32
+    ; MIPS32: liveins: $a0, $a1, $a2
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
+    ; MIPS32: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[COPY1]], [[COPY2]]
+    ; MIPS32: $v0 = COPY [[SELECT]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %3:_(s32) = COPY $a0
+    %0:_(s1) = G_TRUNC %3(s32)
+    %1:_(s32) = COPY $a1
+    %2:_(s32) = COPY $a2
+    %4:_(s32) = G_SELECT %0(s1), %1, %2
+    $v0 = COPY %4(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            select_ptr
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; MIPS32-LABEL: name: select_ptr
+    ; MIPS32: liveins: $a0, $a1, $a2
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
+    ; MIPS32: [[SELECT:%[0-9]+]]:_(p0) = G_SELECT [[AND]](s32), [[COPY1]], [[COPY2]]
+    ; MIPS32: $v0 = COPY [[SELECT]](p0)
+    ; MIPS32: RetRA implicit $v0
+    %3:_(s32) = COPY $a0
+    %0:_(s1) = G_TRUNC %3(s32)
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %4:_(p0) = G_SELECT %0(s1), %1, %2
+    $v0 = COPY %4(p0)
+    RetRA implicit $v0
+
+...
+---
+name:            select_with_negation
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    ; MIPS32-LABEL: name: select_with_negation
+    ; MIPS32: liveins: $a0, $a1, $a2, $a3
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
+    ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; MIPS32: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY]](s32), [[COPY1]]
+    ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
+    ; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; MIPS32: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY4]], [[COPY5]]
+    ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY6:%[0-9]+]]:_(s32) = COPY [[XOR]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C1]]
+    ; MIPS32: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[COPY2]], [[COPY3]]
+    ; MIPS32: $v0 = COPY [[SELECT]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %0:_(s32) = COPY $a0
+    %1:_(s32) = COPY $a1
+    %2:_(s32) = COPY $a2
+    %3:_(s32) = COPY $a3
+    %5:_(s1) = G_CONSTANT i1 true
+    %4:_(s1) = G_ICMP intpred(slt), %0(s32), %1
+    %6:_(s1) = G_XOR %4, %5
+    %7:_(s32) = G_SELECT %6(s1), %2, %3
+    $v0 = COPY %7(s32)
+    RetRA implicit $v0
+
+...
diff --git a/test/CodeGen/Mips/GlobalISel/llvm-ir/add.ll b/test/CodeGen/Mips/GlobalISel/llvm-ir/add.ll
index cf6d1e2..a1fb026 100644
--- a/test/CodeGen/Mips/GlobalISel/llvm-ir/add.ll
+++ b/test/CodeGen/Mips/GlobalISel/llvm-ir/add.ll
@@ -90,14 +90,73 @@
 define i64 @add_i64(i64 %a, i64 %b) {
 ; MIPS32-LABEL: add_i64:
 ; MIPS32:       # %bb.0: # %entry
-; MIPS32-NEXT:    addu $5, $7, $5
+; MIPS32-NEXT:    lui $1, 0
+; MIPS32-NEXT:    ori $1, $1, 0
 ; MIPS32-NEXT:    addu $4, $6, $4
-; MIPS32-NEXT:    sltu $6, $4, $6
-; MIPS32-NEXT:    addu $3, $5, $6
-; MIPS32-NEXT:    move $2, $4
+; MIPS32-NEXT:    lui $2, 0
+; MIPS32-NEXT:    ori $2, $2, 1
+; MIPS32-NEXT:    and $1, $1, $2
+; MIPS32-NEXT:    addu $1, $4, $1
+; MIPS32-NEXT:    sltu $2, $1, $6
+; MIPS32-NEXT:    addu $4, $7, $5
+; MIPS32-NEXT:    lui $5, 0
+; MIPS32-NEXT:    ori $5, $5, 1
+; MIPS32-NEXT:    and $2, $2, $5
+; MIPS32-NEXT:    addu $3, $4, $2
+; MIPS32-NEXT:    move $2, $1
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
   %add = add i64 %b, %a
   ret i64 %add
-}
\ No newline at end of file
+}
+
+define i128 @add_i128(i128 %a, i128 %b) {
+; MIPS32-LABEL: add_i128:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -8
+; MIPS32-NEXT:    .cfi_def_cfa_offset 8
+; MIPS32-NEXT:    addiu $1, $sp, 24
+; MIPS32-NEXT:    lw $1, 0($1)
+; MIPS32-NEXT:    addiu $2, $sp, 28
+; MIPS32-NEXT:    lw $2, 0($2)
+; MIPS32-NEXT:    addiu $3, $sp, 32
+; MIPS32-NEXT:    lw $3, 0($3)
+; MIPS32-NEXT:    addiu $8, $sp, 36
+; MIPS32-NEXT:    lw $8, 0($8)
+; MIPS32-NEXT:    lui $9, 0
+; MIPS32-NEXT:    ori $9, $9, 0
+; MIPS32-NEXT:    addu $4, $1, $4
+; MIPS32-NEXT:    lui $10, 0
+; MIPS32-NEXT:    ori $10, $10, 1
+; MIPS32-NEXT:    and $9, $9, $10
+; MIPS32-NEXT:    addu $4, $4, $9
+; MIPS32-NEXT:    sltu $1, $4, $1
+; MIPS32-NEXT:    addu $5, $2, $5
+; MIPS32-NEXT:    lui $9, 0
+; MIPS32-NEXT:    ori $9, $9, 1
+; MIPS32-NEXT:    and $1, $1, $9
+; MIPS32-NEXT:    addu $1, $5, $1
+; MIPS32-NEXT:    sltu $2, $1, $2
+; MIPS32-NEXT:    addu $5, $3, $6
+; MIPS32-NEXT:    lui $6, 0
+; MIPS32-NEXT:    ori $6, $6, 1
+; MIPS32-NEXT:    and $2, $2, $6
+; MIPS32-NEXT:    addu $2, $5, $2
+; MIPS32-NEXT:    sltu $3, $2, $3
+; MIPS32-NEXT:    addu $5, $8, $7
+; MIPS32-NEXT:    lui $6, 0
+; MIPS32-NEXT:    ori $6, $6, 1
+; MIPS32-NEXT:    and $3, $3, $6
+; MIPS32-NEXT:    addu $5, $5, $3
+; MIPS32-NEXT:    sw $2, 4($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    move $2, $4
+; MIPS32-NEXT:    move $3, $1
+; MIPS32-NEXT:    lw $4, 4($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    addiu $sp, $sp, 8
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %add = add i128 %b, %a
+  ret i128 %add
+}
diff --git a/test/CodeGen/Mips/GlobalISel/llvm-ir/bitwise.ll b/test/CodeGen/Mips/GlobalISel/llvm-ir/bitwise.ll
index 0ecd52d..9d8671e 100644
--- a/test/CodeGen/Mips/GlobalISel/llvm-ir/bitwise.ll
+++ b/test/CodeGen/Mips/GlobalISel/llvm-ir/bitwise.ll
@@ -1,9 +1,41 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-define i32 @and(i32 %a, i32 %b) {
-; MIPS32-LABEL: and:
+define i1 @and_i1(i1 %a, i1 %b) {
+; MIPS32-LABEL: and_i1:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    and $2, $5, $4
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %and = and i1 %b, %a
+  ret i1 %and
+}
+
+define i8 @and_i8(i8 %a, i8 %b) {
+; MIPS32-LABEL: and_i8:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    and $2, $5, $4
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %and = and i8 %b, %a
+  ret i8 %and
+}
+
+define i16 @and_i16(i16 %a, i16 %b) {
+; MIPS32-LABEL: and_i16:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    and $2, $5, $4
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %and = and i16 %b, %a
+  ret i16 %and
+}
+
+define i32 @and_i32(i32 %a, i32 %b) {
+; MIPS32-LABEL: and_i32:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    and $2, $5, $4
 ; MIPS32-NEXT:    jr $ra
@@ -13,8 +45,53 @@
   ret i32 %and
 }
 
-define i32 @or(i32 %a, i32 %b) {
-; MIPS32-LABEL: or:
+define i64 @and_i64(i64 %a, i64 %b) {
+; MIPS32-LABEL: and_i64:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    and $2, $6, $4
+; MIPS32-NEXT:    and $3, $7, $5
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %and = and i64 %b, %a
+  ret i64 %and
+}
+
+define i1 @or_i1(i1 %a, i1 %b) {
+; MIPS32-LABEL: or_i1:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    or $2, $5, $4
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %or = or i1 %b, %a
+  ret i1 %or
+}
+
+define i8 @or_i8(i8 %a, i8 %b) {
+; MIPS32-LABEL: or_i8:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    or $2, $5, $4
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %or = or i8 %b, %a
+  ret i8 %or
+}
+
+define i16 @or_i16(i16 %a, i16 %b) {
+; MIPS32-LABEL: or_i16:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    or $2, $5, $4
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %or = or i16 %b, %a
+  ret i16 %or
+}
+
+define i32 @or_i32(i32 %a, i32 %b) {
+; MIPS32-LABEL: or_i32:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    or $2, $5, $4
 ; MIPS32-NEXT:    jr $ra
@@ -24,8 +101,53 @@
   ret i32 %or
 }
 
-define i32 @xor(i32 %a, i32 %b) {
-; MIPS32-LABEL: xor:
+define i64 @or_i64(i64 %a, i64 %b) {
+; MIPS32-LABEL: or_i64:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    or $2, $6, $4
+; MIPS32-NEXT:    or $3, $7, $5
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %or = or i64 %b, %a
+  ret i64 %or
+}
+
+define i1 @xor_i1(i1 %a, i1 %b) {
+; MIPS32-LABEL: xor_i1:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    xor $2, $5, $4
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %xor = xor i1 %b, %a
+  ret i1 %xor
+}
+
+define i8 @xor_i8(i8 %a, i8 %b) {
+; MIPS32-LABEL: xor_i8:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    xor $2, $5, $4
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %xor = xor i8 %b, %a
+  ret i8 %xor
+}
+
+define i16 @xor_i16(i16 %a, i16 %b) {
+; MIPS32-LABEL: xor_i16:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    xor $2, $5, $4
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %xor = xor i16 %b, %a
+  ret i16 %xor
+}
+
+define i32 @xor_i32(i32 %a, i32 %b) {
+; MIPS32-LABEL: xor_i32:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    xor $2, $5, $4
 ; MIPS32-NEXT:    jr $ra
@@ -35,6 +157,18 @@
   ret i32 %xor
 }
 
+define i64 @xor_i64(i64 %a, i64 %b) {
+; MIPS32-LABEL: xor_i64:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    xor $2, $6, $4
+; MIPS32-NEXT:    xor $3, $7, $5
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %xor = xor i64 %b, %a
+  ret i64 %xor
+}
+
 define i32 @shl(i32 %a) {
 ; MIPS32-LABEL: shl:
 ; MIPS32:       # %bb.0: # %entry
diff --git a/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div.ll b/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div.ll
new file mode 100644
index 0000000..b17c603
--- /dev/null
+++ b/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div.ll
@@ -0,0 +1,314 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
+
+; sdiv
+define signext i8 @sdiv_i8(i8 signext %a, i8 signext %b) {
+; MIPS32-LABEL: sdiv_i8:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    sll $5, $5, 24
+; MIPS32-NEXT:    sra $5, $5, 24
+; MIPS32-NEXT:    sll $4, $4, 24
+; MIPS32-NEXT:    sra $4, $4, 24
+; MIPS32-NEXT:    div $zero, $5, $4
+; MIPS32-NEXT:    teq $4, $zero, 7
+; MIPS32-NEXT:    mflo $4
+; MIPS32-NEXT:    sll $4, $4, 24
+; MIPS32-NEXT:    sra $2, $4, 24
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %div = sdiv i8 %b, %a
+  ret i8 %div
+}
+
+define signext i16 @sdiv_i16(i16 signext %a, i16 signext %b) {
+; MIPS32-LABEL: sdiv_i16:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    sll $5, $5, 16
+; MIPS32-NEXT:    sra $5, $5, 16
+; MIPS32-NEXT:    sll $4, $4, 16
+; MIPS32-NEXT:    sra $4, $4, 16
+; MIPS32-NEXT:    div $zero, $5, $4
+; MIPS32-NEXT:    teq $4, $zero, 7
+; MIPS32-NEXT:    mflo $4
+; MIPS32-NEXT:    sll $4, $4, 16
+; MIPS32-NEXT:    sra $2, $4, 16
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %div = sdiv i16 %b, %a
+  ret i16 %div
+}
+
+define signext i32 @sdiv_i32(i32 signext %a, i32 signext %b) {
+; MIPS32-LABEL: sdiv_i32:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    div $zero, $5, $4
+; MIPS32-NEXT:    teq $4, $zero, 7
+; MIPS32-NEXT:    mflo $2
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %div = sdiv i32 %b, %a
+  ret i32 %div
+}
+
+define signext i64 @sdiv_i64(i64 signext %a, i64 signext %b) {
+; MIPS32-LABEL: sdiv_i64:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -32
+; MIPS32-NEXT:    .cfi_def_cfa_offset 32
+; MIPS32-NEXT:    sw $ra, 28($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    .cfi_offset 31, -4
+; MIPS32-NEXT:    sw $4, 24($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    move $4, $6
+; MIPS32-NEXT:    sw $5, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    move $5, $7
+; MIPS32-NEXT:    lw $6, 24($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $7, 20($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    jal __divdi3
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:    lw $ra, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    addiu $sp, $sp, 32
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %div = sdiv i64 %b, %a
+  ret i64 %div
+}
+
+; srem
+define signext i8 @srem_i8(i8 signext %a, i8 signext %b) {
+; MIPS32-LABEL: srem_i8:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    sll $5, $5, 24
+; MIPS32-NEXT:    sra $5, $5, 24
+; MIPS32-NEXT:    sll $4, $4, 24
+; MIPS32-NEXT:    sra $4, $4, 24
+; MIPS32-NEXT:    div $zero, $5, $4
+; MIPS32-NEXT:    teq $4, $zero, 7
+; MIPS32-NEXT:    mflo $4
+; MIPS32-NEXT:    sll $4, $4, 24
+; MIPS32-NEXT:    sra $2, $4, 24
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %div = sdiv i8 %b, %a
+  ret i8 %div
+}
+
+define signext i16 @srem_i16(i16 signext %a, i16 signext %b) {
+; MIPS32-LABEL: srem_i16:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    sll $5, $5, 16
+; MIPS32-NEXT:    sra $5, $5, 16
+; MIPS32-NEXT:    sll $4, $4, 16
+; MIPS32-NEXT:    sra $4, $4, 16
+; MIPS32-NEXT:    div $zero, $5, $4
+; MIPS32-NEXT:    teq $4, $zero, 7
+; MIPS32-NEXT:    mfhi $4
+; MIPS32-NEXT:    sll $4, $4, 16
+; MIPS32-NEXT:    sra $2, $4, 16
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %rem = srem i16 %b, %a
+  ret i16 %rem
+}
+
+define signext i32 @srem_i32(i32 signext %a, i32 signext %b) {
+; MIPS32-LABEL: srem_i32:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    div $zero, $5, $4
+; MIPS32-NEXT:    teq $4, $zero, 7
+; MIPS32-NEXT:    mfhi $2
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %rem = srem i32 %b, %a
+  ret i32 %rem
+}
+
+define signext i64 @srem_i64(i64 signext %a, i64 signext %b) {
+; MIPS32-LABEL: srem_i64:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -32
+; MIPS32-NEXT:    .cfi_def_cfa_offset 32
+; MIPS32-NEXT:    sw $ra, 28($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    .cfi_offset 31, -4
+; MIPS32-NEXT:    sw $4, 24($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    move $4, $6
+; MIPS32-NEXT:    sw $5, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    move $5, $7
+; MIPS32-NEXT:    lw $6, 24($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $7, 20($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    jal __moddi3
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:    lw $ra, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    addiu $sp, $sp, 32
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %rem = srem i64 %b, %a
+  ret i64 %rem
+}
+
+; udiv
+define signext i8 @udiv_i8(i8 signext %a, i8 signext %b) {
+; MIPS32-LABEL: udiv_i8:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lui $1, 0
+; MIPS32-NEXT:    ori $1, $1, 255
+; MIPS32-NEXT:    and $1, $5, $1
+; MIPS32-NEXT:    lui $5, 0
+; MIPS32-NEXT:    ori $5, $5, 255
+; MIPS32-NEXT:    and $4, $4, $5
+; MIPS32-NEXT:    divu $zero, $1, $4
+; MIPS32-NEXT:    teq $4, $zero, 7
+; MIPS32-NEXT:    mflo $1
+; MIPS32-NEXT:    sll $1, $1, 24
+; MIPS32-NEXT:    sra $2, $1, 24
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %div = udiv i8 %b, %a
+  ret i8 %div
+}
+
+define signext i16 @udiv_i16(i16 signext %a, i16 signext %b) {
+; MIPS32-LABEL: udiv_i16:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lui $1, 0
+; MIPS32-NEXT:    ori $1, $1, 65535
+; MIPS32-NEXT:    and $1, $5, $1
+; MIPS32-NEXT:    lui $5, 0
+; MIPS32-NEXT:    ori $5, $5, 65535
+; MIPS32-NEXT:    and $4, $4, $5
+; MIPS32-NEXT:    divu $zero, $1, $4
+; MIPS32-NEXT:    teq $4, $zero, 7
+; MIPS32-NEXT:    mflo $1
+; MIPS32-NEXT:    sll $1, $1, 16
+; MIPS32-NEXT:    sra $2, $1, 16
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %div = udiv i16 %b, %a
+  ret i16 %div
+}
+
+define signext i32 @udiv_i32(i32 signext %a, i32 signext %b) {
+; MIPS32-LABEL: udiv_i32:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    divu $zero, $5, $4
+; MIPS32-NEXT:    teq $4, $zero, 7
+; MIPS32-NEXT:    mflo $2
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %div = udiv i32 %b, %a
+  ret i32 %div
+}
+
+define signext i64 @udiv_i64(i64 signext %a, i64 signext %b) {
+; MIPS32-LABEL: udiv_i64:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -32
+; MIPS32-NEXT:    .cfi_def_cfa_offset 32
+; MIPS32-NEXT:    sw $ra, 28($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    .cfi_offset 31, -4
+; MIPS32-NEXT:    sw $4, 24($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    move $4, $6
+; MIPS32-NEXT:    sw $5, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    move $5, $7
+; MIPS32-NEXT:    lw $6, 24($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $7, 20($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    jal __udivdi3
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:    lw $ra, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    addiu $sp, $sp, 32
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %div = udiv i64 %b, %a
+  ret i64 %div
+}
+
+; urem
+define signext i8 @urem_i8(i8 signext %a, i8 signext %b) {
+; MIPS32-LABEL: urem_i8:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lui $1, 0
+; MIPS32-NEXT:    ori $1, $1, 255
+; MIPS32-NEXT:    and $1, $5, $1
+; MIPS32-NEXT:    lui $5, 0
+; MIPS32-NEXT:    ori $5, $5, 255
+; MIPS32-NEXT:    and $4, $4, $5
+; MIPS32-NEXT:    divu $zero, $1, $4
+; MIPS32-NEXT:    teq $4, $zero, 7
+; MIPS32-NEXT:    mfhi $1
+; MIPS32-NEXT:    sll $1, $1, 24
+; MIPS32-NEXT:    sra $2, $1, 24
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %rem = urem i8 %b, %a
+  ret i8 %rem
+}
+
+define signext i16 @urem_i16(i16 signext %a, i16 signext %b) {
+; MIPS32-LABEL: urem_i16:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lui $1, 0
+; MIPS32-NEXT:    ori $1, $1, 65535
+; MIPS32-NEXT:    and $1, $5, $1
+; MIPS32-NEXT:    lui $5, 0
+; MIPS32-NEXT:    ori $5, $5, 65535
+; MIPS32-NEXT:    and $4, $4, $5
+; MIPS32-NEXT:    divu $zero, $1, $4
+; MIPS32-NEXT:    teq $4, $zero, 7
+; MIPS32-NEXT:    mfhi $1
+; MIPS32-NEXT:    sll $1, $1, 16
+; MIPS32-NEXT:    sra $2, $1, 16
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %rem = urem i16 %b, %a
+  ret i16 %rem
+}
+
+define signext i32 @urem_i32(i32 signext %a, i32 signext %b) {
+; MIPS32-LABEL: urem_i32:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    divu $zero, $5, $4
+; MIPS32-NEXT:    teq $4, $zero, 7
+; MIPS32-NEXT:    mfhi $2
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %rem = urem i32 %b, %a
+  ret i32 %rem
+}
+
+define signext i64 @urem_i64(i64 signext %a, i64 signext %b) {
+; MIPS32-LABEL: urem_i64:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -32
+; MIPS32-NEXT:    .cfi_def_cfa_offset 32
+; MIPS32-NEXT:    sw $ra, 28($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    .cfi_offset 31, -4
+; MIPS32-NEXT:    sw $4, 24($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    move $4, $6
+; MIPS32-NEXT:    sw $5, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    move $5, $7
+; MIPS32-NEXT:    lw $6, 24($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $7, 20($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    jal __umoddi3
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:    lw $ra, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    addiu $sp, $sp, 32
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %rem = urem i64 %b, %a
+  ret i64 %rem
+}
diff --git a/test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll b/test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll
new file mode 100644
index 0000000..f15977c
--- /dev/null
+++ b/test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll
@@ -0,0 +1,83 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
+
+define i8 @select_i8(i1 %test, i8 %a, i8 %b) {
+; MIPS32-LABEL: select_i8:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lui $1, 0
+; MIPS32-NEXT:    ori $1, $1, 1
+; MIPS32-NEXT:    and $1, $4, $1
+; MIPS32-NEXT:    movn $6, $5, $1
+; MIPS32-NEXT:    move $2, $6
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %cond = select i1 %test, i8 %a, i8 %b
+  ret i8 %cond
+}
+
+define i16 @select_i16(i1 %test, i16 %a, i16 %b) {
+; MIPS32-LABEL: select_i16:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lui $1, 0
+; MIPS32-NEXT:    ori $1, $1, 1
+; MIPS32-NEXT:    and $1, $4, $1
+; MIPS32-NEXT:    movn $6, $5, $1
+; MIPS32-NEXT:    move $2, $6
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %cond = select i1 %test, i16 %a, i16 %b
+  ret i16 %cond
+}
+
+define i32 @select_i32(i1 %test, i32 %a, i32 %b) {
+; MIPS32-LABEL: select_i32:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lui $1, 0
+; MIPS32-NEXT:    ori $1, $1, 1
+; MIPS32-NEXT:    and $1, $4, $1
+; MIPS32-NEXT:    movn $6, $5, $1
+; MIPS32-NEXT:    move $2, $6
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %cond = select i1 %test, i32 %a, i32 %b
+  ret i32 %cond
+}
+
+define i32* @select_ptr(i1 %test, i32* %a, i32* %b) {
+; MIPS32-LABEL: select_ptr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lui $1, 0
+; MIPS32-NEXT:    ori $1, $1, 1
+; MIPS32-NEXT:    and $1, $4, $1
+; MIPS32-NEXT:    movn $6, $5, $1
+; MIPS32-NEXT:    move $2, $6
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %cond = select i1 %test, i32* %a, i32* %b
+  ret i32* %cond
+}
+
+define i32 @select_with_negation(i32 %a, i32 %b, i32 %x, i32 %y) {
+; MIPS32-LABEL: select_with_negation:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lui $1, 65535
+; MIPS32-NEXT:    ori $1, $1, 65535
+; MIPS32-NEXT:    slt $4, $4, $5
+; MIPS32-NEXT:    xor $1, $4, $1
+; MIPS32-NEXT:    lui $4, 0
+; MIPS32-NEXT:    ori $4, $4, 1
+; MIPS32-NEXT:    and $1, $1, $4
+; MIPS32-NEXT:    movn $7, $6, $1
+; MIPS32-NEXT:    move $2, $7
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %cmp = icmp slt i32 %a, %b
+  %lneg = xor i1 %cmp, true
+  %cond = select i1 %lneg, i32 %x, i32 %y
+  ret i32 %cond
+}
diff --git a/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/tryCombine.mir b/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/tryCombine.mir
new file mode 100644
index 0000000..a8b2db1
--- /dev/null
+++ b/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/tryCombine.mir
@@ -0,0 +1,38 @@
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=mips-prelegalizer-combiner -verify-machineinstrs -debug %s -o - 2>&1 | FileCheck %s -check-prefixes=MIPS32
+# REQUIRES: asserts
+--- |
+
+  define void @f() {entry: ret void}
+
+...
+---
+# Check that we report attempts to combine each instruction from the input
+# since none of them gets changed in this test.
+
+# MIPS32-LABEL: Generic MI Combiner for: f
+# MIPS32: Try combining %0:_(s32) = COPY $a0
+# MIPS32: Try combining %1:_(s32) = COPY $a1
+# MIPS32: Try combining %2:_(s32) = G_ADD %1:_, %0:_
+# MIPS32: Try combining $v0 = COPY %2:_(s32)
+# MIPS32: Try combining RetRA implicit $v0
+name:            f
+alignment:       2
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: f
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+    ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY]]
+    ; MIPS32: $v0 = COPY [[ADD]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %0:_(s32) = COPY $a0
+    %1:_(s32) = COPY $a1
+    %2:_(s32) = G_ADD %1, %0
+    $v0 = COPY %2(s32)
+    RetRA implicit $v0
+
+...
diff --git a/test/CodeGen/Mips/GlobalISel/regbankselect/bitwise.mir b/test/CodeGen/Mips/GlobalISel/regbankselect/bitwise.mir
index 5df2c20..595b4a1 100644
--- a/test/CodeGen/Mips/GlobalISel/regbankselect/bitwise.mir
+++ b/test/CodeGen/Mips/GlobalISel/regbankselect/bitwise.mir
@@ -3,9 +3,9 @@
 --- |
 
 
-  define void @and(i32, i32) {entry: ret void}
-  define void @or(i32, i32) {entry: ret void}
-  define void @xor(i32, i32) {entry: ret void}
+  define void @and_i32() {entry: ret void}
+  define void @or_i32() {entry: ret void}
+  define void @xor_i32() {entry: ret void}
   define void @shl(i32) {entry: ret void}
   define void @ashr(i32) {entry: ret void}
   define void @lshr(i32) {entry: ret void}
@@ -15,7 +15,7 @@
 
 ...
 ---
-name:            and
+name:            and_i32
 alignment:       2
 legalized:       true
 tracksRegLiveness: true
@@ -23,7 +23,7 @@
   bb.1.entry:
     liveins: $a0, $a1
 
-    ; MIPS32-LABEL: name: and
+    ; MIPS32-LABEL: name: and_i32
     ; MIPS32: liveins: $a0, $a1
     ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
     ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
@@ -38,7 +38,7 @@
 
 ...
 ---
-name:            or
+name:            or_i32
 alignment:       2
 legalized:       true
 tracksRegLiveness: true
@@ -46,7 +46,7 @@
   bb.1.entry:
     liveins: $a0, $a1
 
-    ; MIPS32-LABEL: name: or
+    ; MIPS32-LABEL: name: or_i32
     ; MIPS32: liveins: $a0, $a1
     ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
     ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
@@ -61,7 +61,7 @@
 
 ...
 ---
-name:            xor
+name:            xor_i32
 alignment:       2
 legalized:       true
 tracksRegLiveness: true
@@ -69,7 +69,7 @@
   bb.1.entry:
     liveins: $a0, $a1
 
-    ; MIPS32-LABEL: name: xor
+    ; MIPS32-LABEL: name: xor_i32
     ; MIPS32: liveins: $a0, $a1
     ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
     ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
diff --git a/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div.mir b/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div.mir
new file mode 100644
index 0000000..bee3630
--- /dev/null
+++ b/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div.mir
@@ -0,0 +1,102 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
+--- |
+
+  define void @sdiv_i32() {entry: ret void}
+  define void @srem_i32() {entry: ret void}
+  define void @udiv_i32() {entry: ret void}
+  define void @urem_i32() {entry: ret void}
+
+...
+---
+name:            sdiv_i32
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: sdiv_i32
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+    ; MIPS32: [[SDIV:%[0-9]+]]:gprb(s32) = G_SDIV [[COPY1]], [[COPY]]
+    ; MIPS32: $v0 = COPY [[SDIV]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %0:_(s32) = COPY $a0
+    %1:_(s32) = COPY $a1
+    %2:_(s32) = G_SDIV %1, %0
+    $v0 = COPY %2(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            srem_i32
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: srem_i32
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+    ; MIPS32: [[SREM:%[0-9]+]]:gprb(s32) = G_SREM [[COPY1]], [[COPY]]
+    ; MIPS32: $v0 = COPY [[SREM]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %0:_(s32) = COPY $a0
+    %1:_(s32) = COPY $a1
+    %2:_(s32) = G_SREM %1, %0
+    $v0 = COPY %2(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            udiv_i32
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: udiv_i32
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+    ; MIPS32: [[UDIV:%[0-9]+]]:gprb(s32) = G_UDIV [[COPY1]], [[COPY]]
+    ; MIPS32: $v0 = COPY [[UDIV]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %0:_(s32) = COPY $a0
+    %1:_(s32) = COPY $a1
+    %2:_(s32) = G_UDIV %1, %0
+    $v0 = COPY %2(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            urem_i32
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: urem_i32
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+    ; MIPS32: [[UREM:%[0-9]+]]:gprb(s32) = G_UREM [[COPY1]], [[COPY]]
+    ; MIPS32: $v0 = COPY [[UREM]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %0:_(s32) = COPY $a0
+    %1:_(s32) = COPY $a1
+    %2:_(s32) = G_UREM %1, %0
+    $v0 = COPY %2(s32)
+    RetRA implicit $v0
+
+...
diff --git a/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir b/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir
new file mode 100644
index 0000000..98aae8a
--- /dev/null
+++ b/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir
@@ -0,0 +1,70 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
+--- |
+
+  define void @select_i32(i32, i32) {entry: ret void}
+  define void @select_ptr(i32, i32) {entry: ret void}
+
+...
+---
+name:            select_i32
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; MIPS32-LABEL: name: select_i32
+    ; MIPS32: liveins: $a0, $a1, $a2
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+    ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY3:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY3]], [[C]]
+    ; MIPS32: [[SELECT:%[0-9]+]]:gprb(s32) = G_SELECT [[AND]](s32), [[COPY1]], [[COPY2]]
+    ; MIPS32: $v0 = COPY [[SELECT]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %3:_(s32) = COPY $a0
+    %1:_(s32) = COPY $a1
+    %2:_(s32) = COPY $a2
+    %6:_(s32) = G_CONSTANT i32 1
+    %7:_(s32) = COPY %3(s32)
+    %5:_(s32) = G_AND %7, %6
+    %4:_(s32) = G_SELECT %5(s32), %1, %2
+    $v0 = COPY %4(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            select_ptr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; MIPS32-LABEL: name: select_ptr
+    ; MIPS32: liveins: $a0, $a1, $a2
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY3:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY3]], [[C]]
+    ; MIPS32: [[SELECT:%[0-9]+]]:gprb(p0) = G_SELECT [[AND]](s32), [[COPY1]], [[COPY2]]
+    ; MIPS32: $v0 = COPY [[SELECT]](p0)
+    ; MIPS32: RetRA implicit $v0
+    %3:_(s32) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %6:_(s32) = G_CONSTANT i32 1
+    %7:_(s32) = COPY %3(s32)
+    %5:_(s32) = G_AND %7, %6
+    %4:_(p0) = G_SELECT %5(s32), %1, %2
+    $v0 = COPY %4(p0)
+    RetRA implicit $v0
+
+...
diff --git a/test/CodeGen/Mips/llvm-ir/ashr.ll b/test/CodeGen/Mips/llvm-ir/ashr.ll
index dc19c9e..3aa384e 100644
--- a/test/CodeGen/Mips/llvm-ir/ashr.ll
+++ b/test/CodeGen/Mips/llvm-ir/ashr.ll
@@ -274,26 +274,21 @@
 
 define signext i64 @ashr_i64(i64 signext %a, i64 signext %b) {
 ; MIPS-LABEL: ashr_i64:
-; MIPS:       # %bb.0: # %entry
-; MIPS-NEXT:    srav $2, $4, $7
-; MIPS-NEXT:    andi $6, $7, 32
-; MIPS-NEXT:    beqz $6, $BB4_3
-; MIPS-NEXT:    move $3, $2
-; MIPS-NEXT:  # %bb.1: # %entry
-; MIPS-NEXT:    bnez $6, $BB4_4
-; MIPS-NEXT:    nop
-; MIPS-NEXT:  $BB4_2: # %entry
-; MIPS-NEXT:    jr $ra
-; MIPS-NEXT:    nop
-; MIPS-NEXT:  $BB4_3: # %entry
-; MIPS-NEXT:    srlv $1, $5, $7
-; MIPS-NEXT:    not $3, $7
-; MIPS-NEXT:    sll $5, $4, 1
-; MIPS-NEXT:    sllv $3, $5, $3
-; MIPS-NEXT:    beqz $6, $BB4_2
-; MIPS-NEXT:    or $3, $3, $1
-; MIPS-NEXT:  $BB4_4:
-; MIPS-NEXT:    jr $ra
+; MIPS:       # %bb.0:
+; MIPS-NEXT:    andi  $1, $7, 32
+; MIPS-NEXT:    bnez  $1, $BB4_2
+; MIPS-NEXT:    srav  $3, $4, $7
+; MIPS-NEXT:  # %bb.1:
+; MIPS-NEXT:    srlv  $1, $5, $7
+; MIPS-NEXT:    not $2, $7
+; MIPS-NEXT:    sll $4, $4, 1
+; MIPS-NEXT:    sllv  $2, $4, $2
+; MIPS-NEXT:    or  $1, $2, $1
+; MIPS-NEXT:    move  $2, $3
+; MIPS-NEXT:    jr  $ra
+; MIPS-NEXT:    move  $3, $1
+; MIPS-NEXT:  $BB4_2:
+; MIPS-NEXT:    jr  $ra
 ; MIPS-NEXT:    sra $2, $4, 31
 ;
 ; MIPS32-LABEL: ashr_i64:
@@ -400,133 +395,114 @@
 
 define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
 ; MIPS-LABEL: ashr_i128:
-; MIPS:       # %bb.0: # %entry
-; MIPS-NEXT:    addiu $sp, $sp, -8
-; MIPS-NEXT:    .cfi_def_cfa_offset 8
-; MIPS-NEXT:    sw $17, 4($sp) # 4-byte Folded Spill
-; MIPS-NEXT:    sw $16, 0($sp) # 4-byte Folded Spill
-; MIPS-NEXT:    .cfi_offset 17, -4
-; MIPS-NEXT:    .cfi_offset 16, -8
-; MIPS-NEXT:    lw $25, 36($sp)
+; MIPS:       # %bb.0:
+; MIPS-NEXT:    lw  $2, 28($sp)
 ; MIPS-NEXT:    addiu $1, $zero, 64
-; MIPS-NEXT:    subu $11, $1, $25
-; MIPS-NEXT:    sllv $9, $5, $11
-; MIPS-NEXT:    andi $13, $11, 32
-; MIPS-NEXT:    addiu $2, $zero, 0
-; MIPS-NEXT:    bnez $13, $BB5_2
-; MIPS-NEXT:    addiu $3, $zero, 0
-; MIPS-NEXT:  # %bb.1: # %entry
-; MIPS-NEXT:    move $3, $9
-; MIPS-NEXT:  $BB5_2: # %entry
-; MIPS-NEXT:    not $gp, $25
-; MIPS-NEXT:    srlv $12, $6, $25
-; MIPS-NEXT:    andi $8, $25, 32
-; MIPS-NEXT:    bnez $8, $BB5_4
-; MIPS-NEXT:    move $15, $12
-; MIPS-NEXT:  # %bb.3: # %entry
-; MIPS-NEXT:    srlv $1, $7, $25
-; MIPS-NEXT:    sll $10, $6, 1
-; MIPS-NEXT:    sllv $10, $10, $gp
-; MIPS-NEXT:    or $15, $10, $1
-; MIPS-NEXT:  $BB5_4: # %entry
-; MIPS-NEXT:    addiu $10, $25, -64
-; MIPS-NEXT:    sll $17, $4, 1
-; MIPS-NEXT:    srav $14, $4, $10
-; MIPS-NEXT:    andi $24, $10, 32
-; MIPS-NEXT:    bnez $24, $BB5_6
-; MIPS-NEXT:    move $16, $14
-; MIPS-NEXT:  # %bb.5: # %entry
-; MIPS-NEXT:    srlv $1, $5, $10
-; MIPS-NEXT:    not $10, $10
-; MIPS-NEXT:    sllv $10, $17, $10
-; MIPS-NEXT:    or $16, $10, $1
-; MIPS-NEXT:  $BB5_6: # %entry
-; MIPS-NEXT:    sltiu $10, $25, 64
-; MIPS-NEXT:    beqz $10, $BB5_8
+; MIPS-NEXT:    subu  $9, $1, $2
+; MIPS-NEXT:    sllv  $10, $5, $9
+; MIPS-NEXT:    andi  $13, $9, 32
+; MIPS-NEXT:    andi  $3, $2, 32
+; MIPS-NEXT:    addiu $11, $zero, 0
+; MIPS-NEXT:    bnez  $13, $BB5_2
+; MIPS-NEXT:    addiu $12, $zero, 0
+; MIPS-NEXT:  # %bb.1:
+; MIPS-NEXT:    move  $12, $10
+; MIPS-NEXT:  $BB5_2:
+; MIPS-NEXT:    not $8, $2
+; MIPS-NEXT:    bnez  $3, $BB5_5
+; MIPS-NEXT:    srlv  $14, $6, $2
+; MIPS-NEXT:  # %bb.3:
+; MIPS-NEXT:    sll $1, $6, 1
+; MIPS-NEXT:    srlv  $11, $7, $2
+; MIPS-NEXT:    sllv  $1, $1, $8
+; MIPS-NEXT:    or  $15, $1, $11
+; MIPS-NEXT:    bnez  $13, $BB5_7
+; MIPS-NEXT:    move  $11, $14
+; MIPS-NEXT:  # %bb.4:
+; MIPS-NEXT:    b $BB5_6
 ; MIPS-NEXT:    nop
-; MIPS-NEXT:  # %bb.7:
-; MIPS-NEXT:    or $16, $15, $3
-; MIPS-NEXT:  $BB5_8: # %entry
-; MIPS-NEXT:    srav $15, $4, $25
-; MIPS-NEXT:    beqz $8, $BB5_20
-; MIPS-NEXT:    move $3, $15
-; MIPS-NEXT:  # %bb.9: # %entry
-; MIPS-NEXT:    sltiu $gp, $25, 1
-; MIPS-NEXT:    beqz $gp, $BB5_21
+; MIPS-NEXT:  $BB5_5:
+; MIPS-NEXT:    bnez  $13, $BB5_7
+; MIPS-NEXT:    move  $15, $14
+; MIPS-NEXT:  $BB5_6:
+; MIPS-NEXT:    sllv  $1, $4, $9
+; MIPS-NEXT:    not $9, $9
+; MIPS-NEXT:    srl $10, $5, 1
+; MIPS-NEXT:    srlv  $9, $10, $9
+; MIPS-NEXT:    or  $10, $1, $9
+; MIPS-NEXT:  $BB5_7:
+; MIPS-NEXT:    addiu $24, $2, -64
+; MIPS-NEXT:    sll $13, $4, 1
+; MIPS-NEXT:    srav  $14, $4, $24
+; MIPS-NEXT:    andi  $1, $24, 32
+; MIPS-NEXT:    bnez  $1, $BB5_10
+; MIPS-NEXT:    sra $9, $4, 31
+; MIPS-NEXT:  # %bb.8:
+; MIPS-NEXT:    srlv  $1, $5, $24
+; MIPS-NEXT:    not $24, $24
+; MIPS-NEXT:    sllv  $24, $13, $24
+; MIPS-NEXT:    or  $25, $24, $1
+; MIPS-NEXT:    move  $24, $14
+; MIPS-NEXT:    sltiu $14, $2, 64
+; MIPS-NEXT:    beqz  $14, $BB5_12
 ; MIPS-NEXT:    nop
-; MIPS-NEXT:  $BB5_10: # %entry
-; MIPS-NEXT:    beqz $10, $BB5_22
-; MIPS-NEXT:    sra $25, $4, 31
-; MIPS-NEXT:  $BB5_11: # %entry
-; MIPS-NEXT:    beqz $13, $BB5_23
+; MIPS-NEXT:  # %bb.9:
+; MIPS-NEXT:    b $BB5_11
 ; MIPS-NEXT:    nop
-; MIPS-NEXT:  $BB5_12: # %entry
-; MIPS-NEXT:    beqz $8, $BB5_24
+; MIPS-NEXT:  $BB5_10:
+; MIPS-NEXT:    move  $25, $14
+; MIPS-NEXT:    sltiu $14, $2, 64
+; MIPS-NEXT:    beqz  $14, $BB5_12
+; MIPS-NEXT:    move  $24, $9
+; MIPS-NEXT:  $BB5_11:
+; MIPS-NEXT:    or  $25, $15, $12
+; MIPS-NEXT:  $BB5_12:
+; MIPS-NEXT:    sltiu $12, $2, 1
+; MIPS-NEXT:    beqz  $12, $BB5_18
 ; MIPS-NEXT:    nop
-; MIPS-NEXT:  $BB5_13: # %entry
-; MIPS-NEXT:    beqz $24, $BB5_25
-; MIPS-NEXT:    move $4, $25
-; MIPS-NEXT:  $BB5_14: # %entry
-; MIPS-NEXT:    bnez $10, $BB5_26
+; MIPS-NEXT:  # %bb.13:
+; MIPS-NEXT:    bnez  $14, $BB5_19
 ; MIPS-NEXT:    nop
-; MIPS-NEXT:  $BB5_15: # %entry
-; MIPS-NEXT:    beqz $gp, $BB5_27
+; MIPS-NEXT:  $BB5_14:
+; MIPS-NEXT:    beqz  $12, $BB5_20
 ; MIPS-NEXT:    nop
-; MIPS-NEXT:  $BB5_16: # %entry
-; MIPS-NEXT:    beqz $8, $BB5_28
-; MIPS-NEXT:    move $2, $25
-; MIPS-NEXT:  $BB5_17: # %entry
-; MIPS-NEXT:    bnez $10, $BB5_19
+; MIPS-NEXT:  $BB5_15:
+; MIPS-NEXT:    bnez  $3, $BB5_21
+; MIPS-NEXT:    srav  $4, $4, $2
+; MIPS-NEXT:  $BB5_16:
+; MIPS-NEXT:    srlv  $1, $5, $2
+; MIPS-NEXT:    sllv  $2, $13, $8
+; MIPS-NEXT:    or  $3, $2, $1
+; MIPS-NEXT:    bnez  $14, $BB5_23
+; MIPS-NEXT:    move  $2, $4
+; MIPS-NEXT:  # %bb.17:
+; MIPS-NEXT:    b $BB5_22
 ; MIPS-NEXT:    nop
-; MIPS-NEXT:  $BB5_18: # %entry
-; MIPS-NEXT:    move $2, $25
-; MIPS-NEXT:  $BB5_19: # %entry
-; MIPS-NEXT:    move $4, $6
-; MIPS-NEXT:    move $5, $7
-; MIPS-NEXT:    lw $16, 0($sp) # 4-byte Folded Reload
-; MIPS-NEXT:    lw $17, 4($sp) # 4-byte Folded Reload
-; MIPS-NEXT:    jr $ra
-; MIPS-NEXT:    addiu $sp, $sp, 8
-; MIPS-NEXT:  $BB5_20: # %entry
-; MIPS-NEXT:    srlv $1, $5, $25
-; MIPS-NEXT:    sllv $3, $17, $gp
-; MIPS-NEXT:    sltiu $gp, $25, 1
-; MIPS-NEXT:    bnez $gp, $BB5_10
-; MIPS-NEXT:    or $3, $3, $1
-; MIPS-NEXT:  $BB5_21: # %entry
-; MIPS-NEXT:    move $7, $16
-; MIPS-NEXT:    bnez $10, $BB5_11
-; MIPS-NEXT:    sra $25, $4, 31
-; MIPS-NEXT:  $BB5_22: # %entry
-; MIPS-NEXT:    bnez $13, $BB5_12
-; MIPS-NEXT:    move $3, $25
-; MIPS-NEXT:  $BB5_23: # %entry
-; MIPS-NEXT:    not $1, $11
-; MIPS-NEXT:    srl $5, $5, 1
-; MIPS-NEXT:    sllv $4, $4, $11
-; MIPS-NEXT:    srlv $1, $5, $1
-; MIPS-NEXT:    bnez $8, $BB5_13
-; MIPS-NEXT:    or $9, $4, $1
-; MIPS-NEXT:  $BB5_24: # %entry
-; MIPS-NEXT:    move $2, $12
-; MIPS-NEXT:    bnez $24, $BB5_14
-; MIPS-NEXT:    move $4, $25
-; MIPS-NEXT:  $BB5_25: # %entry
-; MIPS-NEXT:    beqz $10, $BB5_15
-; MIPS-NEXT:    move $4, $14
-; MIPS-NEXT:  $BB5_26:
-; MIPS-NEXT:    bnez $gp, $BB5_16
-; MIPS-NEXT:    or $4, $2, $9
-; MIPS-NEXT:  $BB5_27: # %entry
-; MIPS-NEXT:    move $6, $4
-; MIPS-NEXT:    bnez $8, $BB5_17
-; MIPS-NEXT:    move $2, $25
-; MIPS-NEXT:  $BB5_28: # %entry
-; MIPS-NEXT:    bnez $10, $BB5_19
-; MIPS-NEXT:    move $2, $15
-; MIPS-NEXT:  # %bb.29: # %entry
-; MIPS-NEXT:    b $BB5_18
+; MIPS-NEXT:  $BB5_18:
+; MIPS-NEXT:    beqz  $14, $BB5_14
+; MIPS-NEXT:    move  $7, $25
+; MIPS-NEXT:  $BB5_19:
+; MIPS-NEXT:    bnez  $12, $BB5_15
+; MIPS-NEXT:    or  $24, $11, $10
+; MIPS-NEXT:  $BB5_20:
+; MIPS-NEXT:    move  $6, $24
+; MIPS-NEXT:    beqz  $3, $BB5_16
+; MIPS-NEXT:    srav  $4, $4, $2
+; MIPS-NEXT:  $BB5_21:
+; MIPS-NEXT:    move  $2, $9
+; MIPS-NEXT:    bnez  $14, $BB5_23
+; MIPS-NEXT:    move  $3, $4
+; MIPS-NEXT:  $BB5_22:
+; MIPS-NEXT:    move  $2, $9
+; MIPS-NEXT:  $BB5_23:
+; MIPS-NEXT:    bnez  $14, $BB5_25
 ; MIPS-NEXT:    nop
+; MIPS-NEXT:  # %bb.24:
+; MIPS-NEXT:    move  $3, $9
+; MIPS-NEXT:  $BB5_25:
+; MIPS-NEXT:    move  $4, $6
+; MIPS-NEXT:    jr  $ra
+; MIPS-NEXT:    move  $5, $7
 ;
 ; MIPS32-LABEL: ashr_i128:
 ; MIPS32:       # %bb.0: # %entry
@@ -715,27 +691,23 @@
 ;
 ; MIPS3-LABEL: ashr_i128:
 ; MIPS3:       # %bb.0: # %entry
-; MIPS3-NEXT:    sll $8, $7, 0
-; MIPS3-NEXT:    dsrav $2, $4, $7
-; MIPS3-NEXT:    andi $6, $8, 64
-; MIPS3-NEXT:    beqz $6, .LBB5_3
-; MIPS3-NEXT:    move $3, $2
-; MIPS3-NEXT:  # %bb.1: # %entry
-; MIPS3-NEXT:    bnez $6, .LBB5_4
-; MIPS3-NEXT:    nop
-; MIPS3-NEXT:  .LBB5_2: # %entry
-; MIPS3-NEXT:    jr $ra
-; MIPS3-NEXT:    nop
-; MIPS3-NEXT:  .LBB5_3: # %entry
+; MIPS3-NEXT:    sll $2, $7, 0
+; MIPS3-NEXT:    andi  $1, $2, 64
+; MIPS3-NEXT:    bnez  $1, .LBB5_2
+; MIPS3-NEXT:    dsrav $3, $4, $7
+; MIPS3-NEXT:  # %bb.1:
 ; MIPS3-NEXT:    dsrlv $1, $5, $7
-; MIPS3-NEXT:    dsll $3, $4, 1
-; MIPS3-NEXT:    not $5, $8
-; MIPS3-NEXT:    dsllv $3, $3, $5
-; MIPS3-NEXT:    beqz $6, .LBB5_2
-; MIPS3-NEXT:    or $3, $3, $1
-; MIPS3-NEXT:  .LBB5_4:
-; MIPS3-NEXT:    jr $ra
-; MIPS3-NEXT:    dsra $2, $4, 63
+; MIPS3-NEXT:    dsll  $4, $4, 1
+; MIPS3-NEXT:    not $2, $2
+; MIPS3-NEXT:    dsllv $2, $4, $2
+; MIPS3-NEXT:    or  $1, $2, $1
+; MIPS3-NEXT:    move  $2, $3
+; MIPS3-NEXT:    jr  $ra
+; MIPS3-NEXT:    move  $3, $1
+; MIPS3-NEXT:  .LBB5_2:
+; MIPS3-NEXT:    jr  $ra
+; MIPS3-NEXT:    dsra  $2, $4, 63
+
 ;
 ; MIPS64-LABEL: ashr_i128:
 ; MIPS64:       # %bb.0: # %entry
diff --git a/test/CodeGen/Mips/llvm-ir/lshr.ll b/test/CodeGen/Mips/llvm-ir/lshr.ll
index 8068240..57b1b81 100644
--- a/test/CodeGen/Mips/llvm-ir/lshr.ll
+++ b/test/CodeGen/Mips/llvm-ir/lshr.ll
@@ -298,28 +298,22 @@
 
 define signext i64 @lshr_i64(i64 signext %a, i64 signext %b) {
 ; MIPS2-LABEL: lshr_i64:
-; MIPS2:       # %bb.0: # %entry
-; MIPS2-NEXT:    srlv $6, $4, $7
-; MIPS2-NEXT:    andi $8, $7, 32
-; MIPS2-NEXT:    beqz $8, $BB4_3
-; MIPS2-NEXT:    move $3, $6
-; MIPS2-NEXT:  # %bb.1: # %entry
-; MIPS2-NEXT:    beqz $8, $BB4_4
+; MIPS2:       # %bb.0:
+; MIPS2-NEXT:    srlv  $6, $4, $7
+; MIPS2-NEXT:    andi  $1, $7, 32
+; MIPS2-NEXT:    bnez  $1, $BB4_2
 ; MIPS2-NEXT:    addiu $2, $zero, 0
-; MIPS2-NEXT:  $BB4_2: # %entry
-; MIPS2-NEXT:    jr $ra
-; MIPS2-NEXT:    nop
-; MIPS2-NEXT:  $BB4_3: # %entry
-; MIPS2-NEXT:    srlv $1, $5, $7
+; MIPS2-NEXT:  # %bb.1:
+; MIPS2-NEXT:    srlv  $1, $5, $7
 ; MIPS2-NEXT:    not $2, $7
 ; MIPS2-NEXT:    sll $3, $4, 1
-; MIPS2-NEXT:    sllv $2, $3, $2
-; MIPS2-NEXT:    or $3, $2, $1
-; MIPS2-NEXT:    bnez $8, $BB4_2
-; MIPS2-NEXT:    addiu $2, $zero, 0
-; MIPS2-NEXT:  $BB4_4: # %entry
-; MIPS2-NEXT:    jr $ra
-; MIPS2-NEXT:    move $2, $6
+; MIPS2-NEXT:    sllv  $2, $3, $2
+; MIPS2-NEXT:    or  $3, $2, $1
+; MIPS2-NEXT:    jr  $ra
+; MIPS2-NEXT:    move  $2, $6
+; MIPS2-NEXT:  $BB4_2:
+; MIPS2-NEXT:    jr  $ra
+; MIPS2-NEXT:    move  $3, $6
 ;
 ; MIPS32-LABEL: lshr_i64:
 ; MIPS32:       # %bb.0: # %entry
@@ -423,131 +417,119 @@
 
 define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
 ; MIPS2-LABEL: lshr_i128:
-; MIPS2:       # %bb.0: # %entry
-; MIPS2-NEXT:    addiu $sp, $sp, -8
-; MIPS2-NEXT:    .cfi_def_cfa_offset 8
-; MIPS2-NEXT:    sw $17, 4($sp) # 4-byte Folded Spill
-; MIPS2-NEXT:    sw $16, 0($sp) # 4-byte Folded Spill
-; MIPS2-NEXT:    .cfi_offset 17, -4
-; MIPS2-NEXT:    .cfi_offset 16, -8
-; MIPS2-NEXT:    lw $2, 36($sp)
+; MIPS2:       # %bb.0:
+; MIPS2-NEXT:    lw  $2, 28($sp)
 ; MIPS2-NEXT:    addiu $1, $zero, 64
-; MIPS2-NEXT:    subu $10, $1, $2
-; MIPS2-NEXT:    sllv $9, $5, $10
-; MIPS2-NEXT:    andi $13, $10, 32
-; MIPS2-NEXT:    addiu $8, $zero, 0
-; MIPS2-NEXT:    bnez $13, $BB5_2
-; MIPS2-NEXT:    addiu $25, $zero, 0
-; MIPS2-NEXT:  # %bb.1: # %entry
-; MIPS2-NEXT:    move $25, $9
-; MIPS2-NEXT:  $BB5_2: # %entry
-; MIPS2-NEXT:    not $3, $2
-; MIPS2-NEXT:    srlv $11, $6, $2
-; MIPS2-NEXT:    andi $12, $2, 32
-; MIPS2-NEXT:    bnez $12, $BB5_4
-; MIPS2-NEXT:    move $16, $11
-; MIPS2-NEXT:  # %bb.3: # %entry
-; MIPS2-NEXT:    srlv $1, $7, $2
-; MIPS2-NEXT:    sll $14, $6, 1
-; MIPS2-NEXT:    sllv $14, $14, $3
-; MIPS2-NEXT:    or $16, $14, $1
-; MIPS2-NEXT:  $BB5_4: # %entry
-; MIPS2-NEXT:    addiu $24, $2, -64
-; MIPS2-NEXT:    sll $17, $4, 1
-; MIPS2-NEXT:    srlv $14, $4, $24
-; MIPS2-NEXT:    andi $15, $24, 32
-; MIPS2-NEXT:    bnez $15, $BB5_6
-; MIPS2-NEXT:    move $gp, $14
-; MIPS2-NEXT:  # %bb.5: # %entry
-; MIPS2-NEXT:    srlv $1, $5, $24
-; MIPS2-NEXT:    not $24, $24
-; MIPS2-NEXT:    sllv $24, $17, $24
-; MIPS2-NEXT:    or $gp, $24, $1
-; MIPS2-NEXT:  $BB5_6: # %entry
-; MIPS2-NEXT:    sltiu $24, $2, 64
-; MIPS2-NEXT:    beqz $24, $BB5_8
-; MIPS2-NEXT:    nop
-; MIPS2-NEXT:  # %bb.7:
-; MIPS2-NEXT:    or $gp, $16, $25
-; MIPS2-NEXT:  $BB5_8: # %entry
-; MIPS2-NEXT:    srlv $25, $4, $2
-; MIPS2-NEXT:    bnez $12, $BB5_10
-; MIPS2-NEXT:    move $16, $25
-; MIPS2-NEXT:  # %bb.9: # %entry
-; MIPS2-NEXT:    srlv $1, $5, $2
-; MIPS2-NEXT:    sllv $3, $17, $3
-; MIPS2-NEXT:    or $16, $3, $1
-; MIPS2-NEXT:  $BB5_10: # %entry
-; MIPS2-NEXT:    bnez $12, $BB5_12
+; MIPS2-NEXT:    subu  $12, $1, $2
+; MIPS2-NEXT:    sllv  $10, $5, $12
+; MIPS2-NEXT:    andi  $15, $12, 32
+; MIPS2-NEXT:    andi  $8, $2, 32
 ; MIPS2-NEXT:    addiu $3, $zero, 0
-; MIPS2-NEXT:  # %bb.11: # %entry
-; MIPS2-NEXT:    move $3, $25
-; MIPS2-NEXT:  $BB5_12: # %entry
-; MIPS2-NEXT:    addiu $1, $zero, 63
-; MIPS2-NEXT:    sltiu $25, $2, 1
-; MIPS2-NEXT:    beqz $25, $BB5_22
-; MIPS2-NEXT:    sltu $17, $1, $2
-; MIPS2-NEXT:  # %bb.13: # %entry
-; MIPS2-NEXT:    beqz $17, $BB5_23
+; MIPS2-NEXT:    bnez  $15, $BB5_2
+; MIPS2-NEXT:    addiu $13, $zero, 0
+; MIPS2-NEXT:  # %bb.1:
+; MIPS2-NEXT:    move  $13, $10
+; MIPS2-NEXT:  $BB5_2:
+; MIPS2-NEXT:    not $9, $2
+; MIPS2-NEXT:    bnez  $8, $BB5_5
+; MIPS2-NEXT:    srlv  $24, $6, $2
+; MIPS2-NEXT:  # %bb.3:
+; MIPS2-NEXT:    sll $1, $6, 1
+; MIPS2-NEXT:    srlv  $11, $7, $2
+; MIPS2-NEXT:    sllv  $1, $1, $9
+; MIPS2-NEXT:    or  $14, $1, $11
+; MIPS2-NEXT:    bnez  $15, $BB5_7
+; MIPS2-NEXT:    move  $11, $24
+; MIPS2-NEXT:  # %bb.4:
+; MIPS2-NEXT:    b $BB5_6
+; MIPS2-NEXT:    nop
+; MIPS2-NEXT:  $BB5_5:
+; MIPS2-NEXT:    addiu $11, $zero, 0
+; MIPS2-NEXT:    bnez  $15, $BB5_7
+; MIPS2-NEXT:    move  $14, $24
+; MIPS2-NEXT:  $BB5_6:
+; MIPS2-NEXT:    sllv  $1, $4, $12
+; MIPS2-NEXT:    not $10, $12
+; MIPS2-NEXT:    srl $12, $5, 1
+; MIPS2-NEXT:    srlv  $10, $12, $10
+; MIPS2-NEXT:    or  $10, $1, $10
+; MIPS2-NEXT:  $BB5_7:
+; MIPS2-NEXT:    addiu $15, $2, -64
+; MIPS2-NEXT:    sll $12, $4, 1
+; MIPS2-NEXT:    andi  $1, $15, 32
+; MIPS2-NEXT:    bnez  $1, $BB5_10
+; MIPS2-NEXT:    srlv  $25, $4, $15
+; MIPS2-NEXT:  # %bb.8:
+; MIPS2-NEXT:    srlv  $1, $5, $15
+; MIPS2-NEXT:    not $15, $15
+; MIPS2-NEXT:    sllv  $15, $12, $15
+; MIPS2-NEXT:    or  $24, $15, $1
+; MIPS2-NEXT:    move  $15, $25
+; MIPS2-NEXT:    sltiu $25, $2, 64
+; MIPS2-NEXT:    beqz  $25, $BB5_12
+; MIPS2-NEXT:    nop
+; MIPS2-NEXT:  # %bb.9:
+; MIPS2-NEXT:    b $BB5_11
+; MIPS2-NEXT:    nop
+; MIPS2-NEXT:  $BB5_10:
+; MIPS2-NEXT:    move  $24, $25
+; MIPS2-NEXT:    sltiu $25, $2, 64
+; MIPS2-NEXT:    beqz  $25, $BB5_12
+; MIPS2-NEXT:    addiu $15, $zero, 0
+; MIPS2-NEXT:  $BB5_11:
+; MIPS2-NEXT:    or  $24, $14, $13
+; MIPS2-NEXT:  $BB5_12:
+; MIPS2-NEXT:    sltiu $13, $2, 1
+; MIPS2-NEXT:    beqz  $13, $BB5_19
+; MIPS2-NEXT:    nop
+; MIPS2-NEXT:  # %bb.13:
+; MIPS2-NEXT:    bnez  $25, $BB5_20
+; MIPS2-NEXT:    nop
+; MIPS2-NEXT:  $BB5_14:
+; MIPS2-NEXT:    bnez  $13, $BB5_16
+; MIPS2-NEXT:    addiu $10, $zero, 63
+; MIPS2-NEXT:  $BB5_15:
+; MIPS2-NEXT:    move  $6, $15
+; MIPS2-NEXT:  $BB5_16:
+; MIPS2-NEXT:    sltu  $10, $10, $2
+; MIPS2-NEXT:    bnez  $8, $BB5_22
+; MIPS2-NEXT:    srlv  $11, $4, $2
+; MIPS2-NEXT:  # %bb.17:
+; MIPS2-NEXT:    srlv  $1, $5, $2
+; MIPS2-NEXT:    sllv  $2, $12, $9
+; MIPS2-NEXT:    or  $4, $2, $1
+; MIPS2-NEXT:    move  $5, $11
+; MIPS2-NEXT:    bnez  $10, $BB5_24
 ; MIPS2-NEXT:    addiu $2, $zero, 0
-; MIPS2-NEXT:  $BB5_14: # %entry
-; MIPS2-NEXT:    beqz $17, $BB5_24
-; MIPS2-NEXT:    addiu $3, $zero, 0
-; MIPS2-NEXT:  $BB5_15: # %entry
-; MIPS2-NEXT:    beqz $13, $BB5_25
+; MIPS2-NEXT:  # %bb.18:
+; MIPS2-NEXT:    b $BB5_23
 ; MIPS2-NEXT:    nop
-; MIPS2-NEXT:  $BB5_16: # %entry
-; MIPS2-NEXT:    beqz $12, $BB5_26
-; MIPS2-NEXT:    addiu $4, $zero, 0
-; MIPS2-NEXT:  $BB5_17: # %entry
-; MIPS2-NEXT:    beqz $15, $BB5_27
+; MIPS2-NEXT:  $BB5_19:
+; MIPS2-NEXT:    beqz  $25, $BB5_14
+; MIPS2-NEXT:    move  $7, $24
+; MIPS2-NEXT:  $BB5_20:
+; MIPS2-NEXT:    or  $15, $11, $10
+; MIPS2-NEXT:    bnez  $13, $BB5_16
+; MIPS2-NEXT:    addiu $10, $zero, 63
+; MIPS2-NEXT:  # %bb.21:
+; MIPS2-NEXT:    b $BB5_15
 ; MIPS2-NEXT:    nop
-; MIPS2-NEXT:  $BB5_18: # %entry
-; MIPS2-NEXT:    bnez $24, $BB5_28
-; MIPS2-NEXT:    nop
-; MIPS2-NEXT:  $BB5_19: # %entry
-; MIPS2-NEXT:    bnez $25, $BB5_21
-; MIPS2-NEXT:    nop
-; MIPS2-NEXT:  $BB5_20: # %entry
-; MIPS2-NEXT:    move $6, $8
-; MIPS2-NEXT:  $BB5_21: # %entry
-; MIPS2-NEXT:    move $4, $6
-; MIPS2-NEXT:    move $5, $7
-; MIPS2-NEXT:    lw $16, 0($sp) # 4-byte Folded Reload
-; MIPS2-NEXT:    lw $17, 4($sp) # 4-byte Folded Reload
-; MIPS2-NEXT:    jr $ra
-; MIPS2-NEXT:    addiu $sp, $sp, 8
-; MIPS2-NEXT:  $BB5_22: # %entry
-; MIPS2-NEXT:    move $7, $gp
-; MIPS2-NEXT:    bnez $17, $BB5_14
+; MIPS2-NEXT:  $BB5_22:
+; MIPS2-NEXT:    addiu $5, $zero, 0
+; MIPS2-NEXT:    move  $4, $11
+; MIPS2-NEXT:    bnez  $10, $BB5_24
 ; MIPS2-NEXT:    addiu $2, $zero, 0
-; MIPS2-NEXT:  $BB5_23: # %entry
-; MIPS2-NEXT:    move $2, $3
-; MIPS2-NEXT:    bnez $17, $BB5_15
-; MIPS2-NEXT:    addiu $3, $zero, 0
-; MIPS2-NEXT:  $BB5_24: # %entry
-; MIPS2-NEXT:    bnez $13, $BB5_16
-; MIPS2-NEXT:    move $3, $16
-; MIPS2-NEXT:  $BB5_25: # %entry
-; MIPS2-NEXT:    not $1, $10
-; MIPS2-NEXT:    srl $5, $5, 1
-; MIPS2-NEXT:    sllv $4, $4, $10
-; MIPS2-NEXT:    srlv $1, $5, $1
-; MIPS2-NEXT:    or $9, $4, $1
-; MIPS2-NEXT:    bnez $12, $BB5_17
-; MIPS2-NEXT:    addiu $4, $zero, 0
-; MIPS2-NEXT:  $BB5_26: # %entry
-; MIPS2-NEXT:    bnez $15, $BB5_18
-; MIPS2-NEXT:    move $4, $11
-; MIPS2-NEXT:  $BB5_27: # %entry
-; MIPS2-NEXT:    beqz $24, $BB5_19
-; MIPS2-NEXT:    move $8, $14
-; MIPS2-NEXT:  $BB5_28:
-; MIPS2-NEXT:    bnez $25, $BB5_21
-; MIPS2-NEXT:    or $8, $4, $9
-; MIPS2-NEXT:  # %bb.29:
-; MIPS2-NEXT:    b $BB5_20
+; MIPS2-NEXT:  $BB5_23:
+; MIPS2-NEXT:    move  $2, $5
+; MIPS2-NEXT:  $BB5_24:
+; MIPS2-NEXT:    bnez  $10, $BB5_26
 ; MIPS2-NEXT:    nop
+; MIPS2-NEXT:  # %bb.25:
+; MIPS2-NEXT:    move  $3, $4
+; MIPS2-NEXT:  $BB5_26:
+; MIPS2-NEXT:    move  $4, $6
+; MIPS2-NEXT:    jr  $ra
+; MIPS2-NEXT:    move  $5, $7
 ;
 ; MIPS32-LABEL: lshr_i128:
 ; MIPS32:       # %bb.0: # %entry
@@ -731,29 +713,23 @@
 ; MIPS32R6-NEXT:    addiu $sp, $sp, 8
 ;
 ; MIPS3-LABEL: lshr_i128:
-; MIPS3:       # %bb.0: # %entry
-; MIPS3-NEXT:    sll $2, $7, 0
+; MIPS3:       # %bb.0:
+; MIPS3-NEXT:    sll $3, $7, 0
 ; MIPS3-NEXT:    dsrlv $6, $4, $7
-; MIPS3-NEXT:    andi $8, $2, 64
-; MIPS3-NEXT:    beqz $8, .LBB5_3
-; MIPS3-NEXT:    move $3, $6
-; MIPS3-NEXT:  # %bb.1: # %entry
-; MIPS3-NEXT:    beqz $8, .LBB5_4
-; MIPS3-NEXT:    daddiu $2, $zero, 0
-; MIPS3-NEXT:  .LBB5_2: # %entry
-; MIPS3-NEXT:    jr $ra
-; MIPS3-NEXT:    nop
-; MIPS3-NEXT:  .LBB5_3: # %entry
+; MIPS3-NEXT:    andi  $1, $3, 64
+; MIPS3-NEXT:    bnez  $1, .LBB5_2
+; MIPS3-NEXT:    daddiu  $2, $zero, 0
+; MIPS3-NEXT:  # %bb.1:
 ; MIPS3-NEXT:    dsrlv $1, $5, $7
-; MIPS3-NEXT:    dsll $3, $4, 1
-; MIPS3-NEXT:    not $2, $2
-; MIPS3-NEXT:    dsllv $2, $3, $2
-; MIPS3-NEXT:    or $3, $2, $1
-; MIPS3-NEXT:    bnez $8, .LBB5_2
-; MIPS3-NEXT:    daddiu $2, $zero, 0
-; MIPS3-NEXT:  .LBB5_4: # %entry
-; MIPS3-NEXT:    jr $ra
-; MIPS3-NEXT:    move $2, $6
+; MIPS3-NEXT:    dsll  $2, $4, 1
+; MIPS3-NEXT:    not $3, $3
+; MIPS3-NEXT:    dsllv $2, $2, $3
+; MIPS3-NEXT:    or  $3, $2, $1
+; MIPS3-NEXT:    jr  $ra
+; MIPS3-NEXT:    move  $2, $6
+; MIPS3-NEXT:  .LBB5_2:
+; MIPS3-NEXT:    jr  $ra
+; MIPS3-NEXT:    move  $3, $6
 ;
 ; MIPS4-LABEL: lshr_i128:
 ; MIPS4:       # %bb.0: # %entry
diff --git a/test/CodeGen/Mips/llvm-ir/shl.ll b/test/CodeGen/Mips/llvm-ir/shl.ll
index 6c34f63..bd69a3f 100644
--- a/test/CodeGen/Mips/llvm-ir/shl.ll
+++ b/test/CodeGen/Mips/llvm-ir/shl.ll
@@ -330,28 +330,28 @@
 
 define signext i64 @shl_i64(i64 signext %a, i64 signext %b) {
 ; MIPS2-LABEL: shl_i64:
-; MIPS2:       # %bb.0: # %entry
-; MIPS2-NEXT:    sllv $6, $5, $7
-; MIPS2-NEXT:    andi $8, $7, 32
-; MIPS2-NEXT:    beqz $8, $BB4_3
-; MIPS2-NEXT:    move $2, $6
-; MIPS2-NEXT:  # %bb.1: # %entry
-; MIPS2-NEXT:    beqz $8, $BB4_4
+; MIPS2:       # %bb.0:
+; MIPS2-NEXT:    sllv  $6, $5, $7
+; MIPS2-NEXT:    andi  $8, $7, 32
+; MIPS2-NEXT:    beqz  $8, $BB4_3
+; MIPS2-NEXT:    move  $2, $6
+; MIPS2-NEXT:  # %bb.1:
+; MIPS2-NEXT:    beqz  $8, $BB4_4
 ; MIPS2-NEXT:    addiu $3, $zero, 0
-; MIPS2-NEXT:  $BB4_2: # %entry
-; MIPS2-NEXT:    jr $ra
+; MIPS2-NEXT:  $BB4_2:
+; MIPS2-NEXT:    jr  $ra
 ; MIPS2-NEXT:    nop
-; MIPS2-NEXT:  $BB4_3: # %entry
-; MIPS2-NEXT:    sllv $1, $4, $7
+; MIPS2-NEXT:  $BB4_3:
+; MIPS2-NEXT:    sllv  $1, $4, $7
 ; MIPS2-NEXT:    not $2, $7
 ; MIPS2-NEXT:    srl $3, $5, 1
-; MIPS2-NEXT:    srlv $2, $3, $2
-; MIPS2-NEXT:    or $2, $1, $2
-; MIPS2-NEXT:    bnez $8, $BB4_2
+; MIPS2-NEXT:    srlv  $2, $3, $2
+; MIPS2-NEXT:    or  $2, $1, $2
+; MIPS2-NEXT:    bnez  $8, $BB4_2
 ; MIPS2-NEXT:    addiu $3, $zero, 0
-; MIPS2-NEXT:  $BB4_4: # %entry
-; MIPS2-NEXT:    jr $ra
-; MIPS2-NEXT:    move $3, $6
+; MIPS2-NEXT:  $BB4_4:
+; MIPS2-NEXT:    jr  $ra
+; MIPS2-NEXT:    move  $3, $6
 ;
 ; MIPS32-LABEL: shl_i64:
 ; MIPS32:       # %bb.0: # %entry
@@ -455,132 +455,131 @@
 
 define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
 ; MIPS2-LABEL: shl_i128:
-; MIPS2:       # %bb.0: # %entry
+; MIPS2:       # %bb.0:
 ; MIPS2-NEXT:    addiu $sp, $sp, -8
 ; MIPS2-NEXT:    .cfi_def_cfa_offset 8
-; MIPS2-NEXT:    sw $17, 4($sp) # 4-byte Folded Spill
-; MIPS2-NEXT:    sw $16, 0($sp) # 4-byte Folded Spill
+; MIPS2-NEXT:    sw  $17, 4($sp)
+; MIPS2-NEXT:    sw  $16, 0($sp)
 ; MIPS2-NEXT:    .cfi_offset 17, -4
 ; MIPS2-NEXT:    .cfi_offset 16, -8
-; MIPS2-NEXT:    lw $8, 36($sp)
+; MIPS2-NEXT:    lw  $8, 36($sp)
 ; MIPS2-NEXT:    addiu $1, $zero, 64
-; MIPS2-NEXT:    subu $10, $1, $8
-; MIPS2-NEXT:    srlv $3, $6, $10
-; MIPS2-NEXT:    andi $13, $10, 32
+; MIPS2-NEXT:    subu  $3, $1, $8
+; MIPS2-NEXT:    srlv  $9, $6, $3
+; MIPS2-NEXT:    andi  $1, $3, 32
+; MIPS2-NEXT:    bnez  $1, $BB5_2
 ; MIPS2-NEXT:    addiu $2, $zero, 0
-; MIPS2-NEXT:    bnez $13, $BB5_2
-; MIPS2-NEXT:    addiu $25, $zero, 0
-; MIPS2-NEXT:  # %bb.1: # %entry
-; MIPS2-NEXT:    move $25, $3
-; MIPS2-NEXT:  $BB5_2: # %entry
-; MIPS2-NEXT:    not $9, $8
-; MIPS2-NEXT:    sllv $11, $5, $8
-; MIPS2-NEXT:    andi $12, $8, 32
-; MIPS2-NEXT:    bnez $12, $BB5_4
-; MIPS2-NEXT:    move $16, $11
-; MIPS2-NEXT:  # %bb.3: # %entry
-; MIPS2-NEXT:    sllv $1, $4, $8
-; MIPS2-NEXT:    srl $14, $5, 1
-; MIPS2-NEXT:    srlv $14, $14, $9
-; MIPS2-NEXT:    or $16, $1, $14
-; MIPS2-NEXT:  $BB5_4: # %entry
-; MIPS2-NEXT:    addiu $24, $8, -64
-; MIPS2-NEXT:    srl $17, $7, 1
-; MIPS2-NEXT:    sllv $14, $7, $24
-; MIPS2-NEXT:    andi $15, $24, 32
-; MIPS2-NEXT:    bnez $15, $BB5_6
-; MIPS2-NEXT:    move $gp, $14
-; MIPS2-NEXT:  # %bb.5: # %entry
-; MIPS2-NEXT:    sllv $1, $6, $24
-; MIPS2-NEXT:    not $24, $24
-; MIPS2-NEXT:    srlv $24, $17, $24
-; MIPS2-NEXT:    or $gp, $1, $24
-; MIPS2-NEXT:  $BB5_6: # %entry
-; MIPS2-NEXT:    sltiu $24, $8, 64
-; MIPS2-NEXT:    beqz $24, $BB5_8
+; MIPS2-NEXT:  # %bb.1:
+; MIPS2-NEXT:    srlv  $1, $7, $3
+; MIPS2-NEXT:    not $3, $3
+; MIPS2-NEXT:    sll $10, $6, 1
+; MIPS2-NEXT:    sllv  $3, $10, $3
+; MIPS2-NEXT:    or  $3, $3, $1
+; MIPS2-NEXT:    b $BB5_3
+; MIPS2-NEXT:    move  $15, $9
+; MIPS2-NEXT:  $BB5_2:
+; MIPS2-NEXT:    addiu $15, $zero, 0
+; MIPS2-NEXT:    move  $3, $9
+; MIPS2-NEXT:  $BB5_3:
+; MIPS2-NEXT:    not $13, $8
+; MIPS2-NEXT:    sllv  $9, $5, $8
+; MIPS2-NEXT:    andi  $10, $8, 32
+; MIPS2-NEXT:    bnez  $10, $BB5_5
+; MIPS2-NEXT:    move  $25, $9
+; MIPS2-NEXT:  # %bb.4:
+; MIPS2-NEXT:    sllv  $1, $4, $8
+; MIPS2-NEXT:    srl $11, $5, 1
+; MIPS2-NEXT:    srlv  $11, $11, $13
+; MIPS2-NEXT:    or  $25, $1, $11
+; MIPS2-NEXT:  $BB5_5:
+; MIPS2-NEXT:    addiu $14, $8, -64
+; MIPS2-NEXT:    srl $24, $7, 1
+; MIPS2-NEXT:    sllv  $11, $7, $14
+; MIPS2-NEXT:    andi  $12, $14, 32
+; MIPS2-NEXT:    bnez  $12, $BB5_7
+; MIPS2-NEXT:    move  $gp, $11
+; MIPS2-NEXT:  # %bb.6:
+; MIPS2-NEXT:    sllv  $1, $6, $14
+; MIPS2-NEXT:    not $14, $14
+; MIPS2-NEXT:    srlv  $14, $24, $14
+; MIPS2-NEXT:    or  $gp, $1, $14
+; MIPS2-NEXT:  $BB5_7:
+; MIPS2-NEXT:    sltiu $14, $8, 64
+; MIPS2-NEXT:    beqz  $14, $BB5_9
 ; MIPS2-NEXT:    nop
-; MIPS2-NEXT:  # %bb.7:
-; MIPS2-NEXT:    or $gp, $16, $25
-; MIPS2-NEXT:  $BB5_8: # %entry
-; MIPS2-NEXT:    sllv $25, $7, $8
-; MIPS2-NEXT:    bnez $12, $BB5_10
-; MIPS2-NEXT:    move $16, $25
-; MIPS2-NEXT:  # %bb.9: # %entry
-; MIPS2-NEXT:    sllv $1, $6, $8
-; MIPS2-NEXT:    srlv $9, $17, $9
-; MIPS2-NEXT:    or $16, $1, $9
-; MIPS2-NEXT:  $BB5_10: # %entry
-; MIPS2-NEXT:    bnez $12, $BB5_12
-; MIPS2-NEXT:    addiu $9, $zero, 0
-; MIPS2-NEXT:  # %bb.11: # %entry
-; MIPS2-NEXT:    move $9, $25
-; MIPS2-NEXT:  $BB5_12: # %entry
+; MIPS2-NEXT:  # %bb.8:
+; MIPS2-NEXT:    or  $gp, $25, $15
+; MIPS2-NEXT:  $BB5_9:
+; MIPS2-NEXT:    sllv  $25, $7, $8
+; MIPS2-NEXT:    bnez  $10, $BB5_11
+; MIPS2-NEXT:    addiu $17, $zero, 0
+; MIPS2-NEXT:  # %bb.10:
+; MIPS2-NEXT:    move  $17, $25
+; MIPS2-NEXT:  $BB5_11:
 ; MIPS2-NEXT:    addiu $1, $zero, 63
-; MIPS2-NEXT:    sltiu $25, $8, 1
-; MIPS2-NEXT:    beqz $25, $BB5_22
-; MIPS2-NEXT:    sltu $17, $1, $8
-; MIPS2-NEXT:  # %bb.13: # %entry
-; MIPS2-NEXT:    beqz $17, $BB5_23
-; MIPS2-NEXT:    addiu $8, $zero, 0
-; MIPS2-NEXT:  $BB5_14: # %entry
-; MIPS2-NEXT:    beqz $17, $BB5_24
-; MIPS2-NEXT:    addiu $9, $zero, 0
-; MIPS2-NEXT:  $BB5_15: # %entry
-; MIPS2-NEXT:    beqz $13, $BB5_25
+; MIPS2-NEXT:    sltiu $15, $8, 1
+; MIPS2-NEXT:    beqz  $15, $BB5_21
+; MIPS2-NEXT:    sltu  $16, $1, $8
+; MIPS2-NEXT:  # %bb.12:
+; MIPS2-NEXT:    beqz  $16, $BB5_22
+; MIPS2-NEXT:    addiu $7, $zero, 0
+; MIPS2-NEXT:  $BB5_13:
+; MIPS2-NEXT:    beqz  $10, $BB5_23
 ; MIPS2-NEXT:    nop
-; MIPS2-NEXT:  $BB5_16: # %entry
-; MIPS2-NEXT:    beqz $12, $BB5_26
+; MIPS2-NEXT:  $BB5_14:
+; MIPS2-NEXT:    beqz  $16, $BB5_24
 ; MIPS2-NEXT:    addiu $6, $zero, 0
-; MIPS2-NEXT:  $BB5_17: # %entry
-; MIPS2-NEXT:    beqz $15, $BB5_27
+; MIPS2-NEXT:  $BB5_15:
+; MIPS2-NEXT:    beqz  $10, $BB5_25
+; MIPS2-NEXT:    addiu $8, $zero, 0
+; MIPS2-NEXT:  $BB5_16:
+; MIPS2-NEXT:    beqz  $12, $BB5_26
 ; MIPS2-NEXT:    nop
-; MIPS2-NEXT:  $BB5_18: # %entry
-; MIPS2-NEXT:    bnez $24, $BB5_28
+; MIPS2-NEXT:  $BB5_17:
+; MIPS2-NEXT:    bnez  $14, $BB5_27
 ; MIPS2-NEXT:    nop
-; MIPS2-NEXT:  $BB5_19: # %entry
-; MIPS2-NEXT:    bnez $25, $BB5_21
+; MIPS2-NEXT:  $BB5_18:
+; MIPS2-NEXT:    bnez  $15, $BB5_20
 ; MIPS2-NEXT:    nop
-; MIPS2-NEXT:  $BB5_20: # %entry
-; MIPS2-NEXT:    move $5, $2
-; MIPS2-NEXT:  $BB5_21: # %entry
-; MIPS2-NEXT:    move $2, $4
-; MIPS2-NEXT:    move $3, $5
-; MIPS2-NEXT:    move $4, $9
-; MIPS2-NEXT:    move $5, $8
-; MIPS2-NEXT:    lw $16, 0($sp) # 4-byte Folded Reload
-; MIPS2-NEXT:    lw $17, 4($sp) # 4-byte Folded Reload
-; MIPS2-NEXT:    jr $ra
+; MIPS2-NEXT:  $BB5_19:
+; MIPS2-NEXT:    move  $5, $2
+; MIPS2-NEXT:  $BB5_20:
+; MIPS2-NEXT:    move  $2, $4
+; MIPS2-NEXT:    move  $3, $5
+; MIPS2-NEXT:    move  $4, $6
+; MIPS2-NEXT:    move  $5, $7
+; MIPS2-NEXT:    lw  $16, 0($sp)
+; MIPS2-NEXT:    lw  $17, 4($sp)
+; MIPS2-NEXT:    jr  $ra
 ; MIPS2-NEXT:    addiu $sp, $sp, 8
-; MIPS2-NEXT:  $BB5_22: # %entry
-; MIPS2-NEXT:    move $4, $gp
-; MIPS2-NEXT:    bnez $17, $BB5_14
-; MIPS2-NEXT:    addiu $8, $zero, 0
-; MIPS2-NEXT:  $BB5_23: # %entry
-; MIPS2-NEXT:    move $8, $9
-; MIPS2-NEXT:    bnez $17, $BB5_15
-; MIPS2-NEXT:    addiu $9, $zero, 0
-; MIPS2-NEXT:  $BB5_24: # %entry
-; MIPS2-NEXT:    bnez $13, $BB5_16
-; MIPS2-NEXT:    move $9, $16
-; MIPS2-NEXT:  $BB5_25: # %entry
-; MIPS2-NEXT:    not $1, $10
-; MIPS2-NEXT:    sll $3, $6, 1
-; MIPS2-NEXT:    srlv $6, $7, $10
-; MIPS2-NEXT:    sllv $1, $3, $1
-; MIPS2-NEXT:    or $3, $1, $6
-; MIPS2-NEXT:    bnez $12, $BB5_17
+; MIPS2-NEXT:  $BB5_21:
+; MIPS2-NEXT:    move  $4, $gp
+; MIPS2-NEXT:    bnez  $16, $BB5_13
+; MIPS2-NEXT:    addiu $7, $zero, 0
+; MIPS2-NEXT:  $BB5_22:
+; MIPS2-NEXT:    bnez  $10, $BB5_14
+; MIPS2-NEXT:    move  $7, $17
+; MIPS2-NEXT:  $BB5_23:
+; MIPS2-NEXT:    sllv  $1, $6, $8
+; MIPS2-NEXT:    srlv  $6, $24, $13
+; MIPS2-NEXT:    or  $25, $1, $6
+; MIPS2-NEXT:    bnez  $16, $BB5_15
 ; MIPS2-NEXT:    addiu $6, $zero, 0
-; MIPS2-NEXT:  $BB5_26: # %entry
-; MIPS2-NEXT:    bnez $15, $BB5_18
-; MIPS2-NEXT:    move $6, $11
-; MIPS2-NEXT:  $BB5_27: # %entry
-; MIPS2-NEXT:    beqz $24, $BB5_19
-; MIPS2-NEXT:    move $2, $14
-; MIPS2-NEXT:  $BB5_28:
-; MIPS2-NEXT:    bnez $25, $BB5_21
-; MIPS2-NEXT:    or $2, $6, $3
-; MIPS2-NEXT:  # %bb.29:
-; MIPS2-NEXT:    b $BB5_20
+; MIPS2-NEXT:  $BB5_24:
+; MIPS2-NEXT:    move  $6, $25
+; MIPS2-NEXT:    bnez  $10, $BB5_16
+; MIPS2-NEXT:    addiu $8, $zero, 0
+; MIPS2-NEXT:  $BB5_25:
+; MIPS2-NEXT:    bnez  $12, $BB5_17
+; MIPS2-NEXT:    move  $8, $9
+; MIPS2-NEXT:  $BB5_26:
+; MIPS2-NEXT:    beqz  $14, $BB5_18
+; MIPS2-NEXT:    move  $2, $11
+; MIPS2-NEXT:  $BB5_27:
+; MIPS2-NEXT:    bnez  $15, $BB5_20
+; MIPS2-NEXT:    or  $2, $8, $3
+; MIPS2-NEXT:  # %bb.28:
+; MIPS2-NEXT:    b $BB5_19
 ; MIPS2-NEXT:    nop
 ;
 ; MIPS32-LABEL: shl_i128:
@@ -760,29 +759,29 @@
 ; MIPS32R6-NEXT:    move $5, $1
 ;
 ; MIPS3-LABEL: shl_i128:
-; MIPS3:       # %bb.0: # %entry
+; MIPS3:       # %bb.0:
 ; MIPS3-NEXT:    sll $3, $7, 0
 ; MIPS3-NEXT:    dsllv $6, $5, $7
-; MIPS3-NEXT:    andi $8, $3, 64
-; MIPS3-NEXT:    beqz $8, .LBB5_3
-; MIPS3-NEXT:    move $2, $6
-; MIPS3-NEXT:  # %bb.1: # %entry
-; MIPS3-NEXT:    beqz $8, .LBB5_4
-; MIPS3-NEXT:    daddiu $3, $zero, 0
-; MIPS3-NEXT:  .LBB5_2: # %entry
-; MIPS3-NEXT:    jr $ra
+; MIPS3-NEXT:    andi  $8, $3, 64
+; MIPS3-NEXT:    beqz  $8, .LBB5_3
+; MIPS3-NEXT:    move  $2, $6
+; MIPS3-NEXT:  # %bb.1:
+; MIPS3-NEXT:    beqz  $8, .LBB5_4
+; MIPS3-NEXT:    daddiu  $3, $zero, 0
+; MIPS3-NEXT:  .LBB5_2:
+; MIPS3-NEXT:    jr  $ra
 ; MIPS3-NEXT:    nop
-; MIPS3-NEXT:  .LBB5_3: # %entry
+; MIPS3-NEXT:  .LBB5_3:
 ; MIPS3-NEXT:    dsllv $1, $4, $7
-; MIPS3-NEXT:    dsrl $2, $5, 1
+; MIPS3-NEXT:    dsrl  $2, $5, 1
 ; MIPS3-NEXT:    not $3, $3
 ; MIPS3-NEXT:    dsrlv $2, $2, $3
-; MIPS3-NEXT:    or $2, $1, $2
-; MIPS3-NEXT:    bnez $8, .LBB5_2
-; MIPS3-NEXT:    daddiu $3, $zero, 0
-; MIPS3-NEXT:  .LBB5_4: # %entry
-; MIPS3-NEXT:    jr $ra
-; MIPS3-NEXT:    move $3, $6
+; MIPS3-NEXT:    or  $2, $1, $2
+; MIPS3-NEXT:    bnez  $8, .LBB5_2
+; MIPS3-NEXT:    daddiu  $3, $zero, 0
+; MIPS3-NEXT:  .LBB5_4:
+; MIPS3-NEXT:    jr  $ra
+; MIPS3-NEXT:    move  $3, $6
 ;
 ; MIPS4-LABEL: shl_i128:
 ; MIPS4:       # %bb.0: # %entry
diff --git a/test/CodeGen/Mips/micromips-b-range.ll b/test/CodeGen/Mips/micromips-b-range.ll
index f761d1c..5831ae8 100644
--- a/test/CodeGen/Mips/micromips-b-range.ll
+++ b/test/CodeGen/Mips/micromips-b-range.ll
@@ -44,8 +44,7 @@
 ; CHECK-NEXT:    9a:	ff fd 00 00 	lw	$ra, 0($sp)
 ; CHECK-NEXT:    9e:	00 01 0f 3c 	jr	$1
 ; CHECK-NEXT:    a2:	33 bd 00 08 	addiu	$sp, $sp, 8
-
-; CHECK:      10466:	00 00 00 00 	nop
+; CHECK:                ...
 ; CHECK-NEXT: 1046a:	94 00 00 02 	b	8 <foo+0x10472>
 ; CHECK-NEXT: 1046e:	00 00 00 00 	nop
 ; CHECK-NEXT: 10472:	33 bd ff f8 	addiu	$sp, $sp, -8
diff --git a/test/CodeGen/NVPTX/calls-with-phi.ll b/test/CodeGen/NVPTX/calls-with-phi.ll
new file mode 100644
index 0000000..6e010ea
--- /dev/null
+++ b/test/CodeGen/NVPTX/calls-with-phi.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -march=nvptx 2>&1 | FileCheck %s
+; Make sure the example doesn't crash with segfault
+
+; CHECK: .visible .func ({{.*}}) loop
+define i32 @loop(i32, i32) {
+entry:
+  br label %loop
+
+loop:
+  %i = phi i32 [ %0, %entry ], [ %res, %loop ]
+  %res = call i32 @div(i32 %i, i32 %1)
+
+  %exitcond = icmp eq i32 %res, %0
+  br i1 %exitcond, label %exit, label %loop
+
+exit:
+  ret i32 %res
+}
+
+define i32 @div(i32, i32) {
+  ret i32 0
+}
diff --git a/test/CodeGen/NVPTX/f16-instructions.ll b/test/CodeGen/NVPTX/f16-instructions.ll
index 4548d55..7788adc 100644
--- a/test/CodeGen/NVPTX/f16-instructions.ll
+++ b/test/CodeGen/NVPTX/f16-instructions.ll
@@ -1,20 +1,20 @@
 ; ## Full FP16 support enabled by default.

 ; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \

-; RUN:          -O0 -disable-post-ra -disable-fp-elim -verify-machineinstrs \

+; RUN:          -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \

 ; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOFTZ,CHECK-F16,CHECK-F16-NOFTZ %s

 ; ## Full FP16 with FTZ

 ; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \

-; RUN:          -O0 -disable-post-ra -disable-fp-elim -verify-machineinstrs \

+; RUN:          -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \

 ; RUN:          -nvptx-f32ftz \

 ; RUN: | FileCheck -check-prefixes CHECK,CHECK-F16,CHECK-F16-FTZ %s

 ; ## FP16 support explicitly disabled.

 ; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \

-; RUN:          -O0 -disable-post-ra -disable-fp-elim --nvptx-no-f16-math \

+; RUN:          -O0 -disable-post-ra -frame-pointer=all --nvptx-no-f16-math \

 ; RUN:           -verify-machineinstrs \

 ; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOFTZ,CHECK-NOF16 %s

 ; ## FP16 is not supported by hardware.

 ; RUN: llc < %s -O0 -mtriple=nvptx64-nvidia-cuda -mcpu=sm_52 -asm-verbose=false \

-; RUN:          -disable-post-ra -disable-fp-elim -verify-machineinstrs \

+; RUN:          -disable-post-ra -frame-pointer=all -verify-machineinstrs \

 ; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOFTZ,CHECK-NOF16 %s

 

 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"

diff --git a/test/CodeGen/NVPTX/f16x2-instructions.ll b/test/CodeGen/NVPTX/f16x2-instructions.ll
index 77ce45a..a899681 100644
--- a/test/CodeGen/NVPTX/f16x2-instructions.ll
+++ b/test/CodeGen/NVPTX/f16x2-instructions.ll
@@ -1,15 +1,15 @@
 ; ## Full FP16 support enabled by default.

 ; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \

-; RUN:          -O0 -disable-post-ra -disable-fp-elim -verify-machineinstrs \

+; RUN:          -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \

 ; RUN: | FileCheck -allow-deprecated-dag-overlap -check-prefixes CHECK,CHECK-F16 %s

 ; ## FP16 support explicitly disabled.

 ; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \

-; RUN:          -O0 -disable-post-ra -disable-fp-elim --nvptx-no-f16-math \

+; RUN:          -O0 -disable-post-ra -frame-pointer=all --nvptx-no-f16-math \

 ; RUN:           -verify-machineinstrs \

 ; RUN: | FileCheck -allow-deprecated-dag-overlap -check-prefixes CHECK,CHECK-NOF16 %s

 ; ## FP16 is not supported by hardware.

 ; RUN: llc < %s -O0 -mtriple=nvptx64-nvidia-cuda -mcpu=sm_52 -asm-verbose=false \

-; RUN:          -disable-post-ra -disable-fp-elim -verify-machineinstrs \

+; RUN:          -disable-post-ra -frame-pointer=all -verify-machineinstrs \

 ; RUN: | FileCheck -allow-deprecated-dag-overlap -check-prefixes CHECK,CHECK-NOF16 %s

 

 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"

diff --git a/test/CodeGen/NVPTX/ld-st-addrrspace.py b/test/CodeGen/NVPTX/ld-st-addrrspace.py
index c944066..7c28620 100644
--- a/test/CodeGen/NVPTX/ld-st-addrrspace.py
+++ b/test/CodeGen/NVPTX/ld-st-addrrspace.py
@@ -5,6 +5,8 @@
 # RUN: llc < %t.ll -march=nvptx64 -mcpu=sm_30 | FileCheck -check-prefixes=CHECK,CHECK_P64 %t.ll
 # RUN: llc < %t.ll -march=nvptx -mcpu=sm_30 | FileCheck -check-prefixes=CHECK,CHECK_P32 %t.ll
 
+from __future__ import print_function
+
 from itertools import product
 from string import Template
 
diff --git a/test/CodeGen/NVPTX/libcall-fulfilled.ll b/test/CodeGen/NVPTX/libcall-fulfilled.ll
new file mode 100644
index 0000000..9d6777d
--- /dev/null
+++ b/test/CodeGen/NVPTX/libcall-fulfilled.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s -march=nvptx 2>&1 | FileCheck %s
+; Allow to make libcalls that are defined in the current module
+
+; Underlying libcall declaration
+; CHECK: .visible .func  (.param .align 16 .b8 func_retval0[16]) __umodti3
+
+define i128 @remainder(i128, i128) {
+bb0:
+  ; CHECK:      { // callseq 0, 0
+  ; CHECK:      call.uni (retval0),
+  ; CHECK-NEXT: __umodti3,
+  ; CHECK-NEXT: (
+  ; CHECK-NEXT: param0,
+  ; CHECK-NEXT: param1
+  ; CHECK-NEXT: );
+  ; CHECK-NEXT: ld.param.v2.b64 {%[[REG0:rd[0-9]+]], %[[REG1:rd[0-9]+]]}, [retval0+0];
+  ; CHECK-NEXT: } // callseq 0
+  %a = urem i128 %0, %1
+  br label %bb1
+
+bb1:
+  ; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%[[REG0]], %[[REG1]]};
+  ; CHECK-NEXT: ret;
+  ret i128 %a
+}
+
+; Underlying libcall definition
+; CHECK: .visible .func  (.param .align 16 .b8 func_retval0[16]) __umodti3(
+define i128 @__umodti3(i128, i128) {
+  ret i128 0
+}
diff --git a/test/CodeGen/NVPTX/libcall-instruction.ll b/test/CodeGen/NVPTX/libcall-instruction.ll
new file mode 100644
index 0000000..a40a504
--- /dev/null
+++ b/test/CodeGen/NVPTX/libcall-instruction.ll
@@ -0,0 +1,8 @@
+; RUN: not llc < %s -march=nvptx 2>&1 | FileCheck %s
+; used to panic on failed assertion and now fails with an "Undefined external symbol"
+
+; CHECK: LLVM ERROR: Undefined external symbol "__umodti3"
+define hidden i128 @remainder(i128, i128) {
+  %3 = urem i128 %0, %1
+  ret i128 %3
+}
diff --git a/test/CodeGen/NVPTX/libcall-intrinsic.ll b/test/CodeGen/NVPTX/libcall-intrinsic.ll
new file mode 100644
index 0000000..0b5e022
--- /dev/null
+++ b/test/CodeGen/NVPTX/libcall-intrinsic.ll
@@ -0,0 +1,10 @@
+; RUN: not llc < %s -march=nvptx 2>&1 | FileCheck %s
+; used to seqfault and now fails with an "Undefined external symbol"
+
+; CHECK: LLVM ERROR: Undefined external symbol "__powidf2"
+define double @powi(double, i32) {
+  %a = call double @llvm.powi.f64(double %0, i32 %1)
+  ret double %a
+}
+
+declare double @llvm.powi.f64(double, i32) nounwind readnone
diff --git a/test/CodeGen/NVPTX/proxy-reg-erasure-mir.ll b/test/CodeGen/NVPTX/proxy-reg-erasure-mir.ll
new file mode 100644
index 0000000..6bfbe2a
--- /dev/null
+++ b/test/CodeGen/NVPTX/proxy-reg-erasure-mir.ll
@@ -0,0 +1,25 @@
+; RUN: llc -march=nvptx64 -stop-before=nvptx-proxyreg-erasure < %s 2>&1 \
+; RUN:   | FileCheck %s --check-prefix=MIR --check-prefix=MIR-BEFORE
+
+; RUN: llc -march=nvptx64 -stop-after=nvptx-proxyreg-erasure < %s 2>&1 \
+; RUN:   | FileCheck %s --check-prefix=MIR --check-prefix=MIR-AFTER
+
+; Check ProxyRegErasure pass MIR manipulation.
+
+declare <4 x i32> @callee_vec_i32()
+define  <4 x i32> @check_vec_i32() {
+  ; MIR: body:
+  ; MIR-DAG: Callseq_Start {{[0-9]+}}, {{[0-9]+}}
+  ; MIR-DAG: %0:int32regs, %1:int32regs, %2:int32regs, %3:int32regs = LoadParamMemV4I32 0
+  ; MIR-DAG: Callseq_End {{[0-9]+}}
+
+  ; MIR-BEFORE-DAG: %4:int32regs = ProxyRegI32 killed %0
+  ; MIR-BEFORE-DAG: %5:int32regs = ProxyRegI32 killed %1
+  ; MIR-BEFORE-DAG: %6:int32regs = ProxyRegI32 killed %2
+  ; MIR-BEFORE-DAG: %7:int32regs = ProxyRegI32 killed %3
+  ; MIR-BEFORE-DAG: StoreRetvalV4I32 killed %4, killed %5, killed %6, killed %7, 0
+  ; MIR-AFTER-DAG:  StoreRetvalV4I32 killed %0, killed %1, killed %2, killed %3, 0
+
+  %ret = call <4 x i32> @callee_vec_i32()
+  ret <4 x i32> %ret
+}
diff --git a/test/CodeGen/NVPTX/proxy-reg-erasure-ptx.ll b/test/CodeGen/NVPTX/proxy-reg-erasure-ptx.ll
new file mode 100644
index 0000000..7a3b29b
--- /dev/null
+++ b/test/CodeGen/NVPTX/proxy-reg-erasure-ptx.ll
@@ -0,0 +1,183 @@
+; RUN: llc -march=nvptx64 -stop-before=nvptx-proxyreg-erasure < %s 2>&1 \
+; RUN:   | llc -x mir -march=nvptx64 -start-before=nvptx-proxyreg-erasure 2>&1 \
+; RUN:   | FileCheck %s --check-prefix=PTX --check-prefix=PTX-WITH
+
+; RUN: llc -march=nvptx64 -stop-before=nvptx-proxyreg-erasure < %s 2>&1 \
+; RUN:   | llc -x mir -march=nvptx64 -start-after=nvptx-proxyreg-erasure 2>&1 \
+; RUN:   | FileCheck %s --check-prefix=PTX --check-prefix=PTX-WITHOUT
+
+; Thorough testing of ProxyRegErasure: PTX assembly with and without the pass.
+
+declare i1 @callee_i1()
+define i1 @check_i1() {
+  ; PTX-LABEL: check_i1
+  ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}}
+  ; PTX-DAG: ld.param.b32 [[LD:%r[0-9]+]], [retval0+0];
+  ; PTX-DAG: } // callseq {{[0-9]+}}
+
+  ; PTX-WITHOUT-DAG: mov.b32 [[PROXY:%r[0-9]+]], [[LD]];
+  ; PTX-WITHOUT-DAG: and.b32 [[RES:%r[0-9]+]], [[PROXY]], 1;
+  ; PTX-WITH-DAG:    and.b32 [[RES:%r[0-9]+]], [[LD]], 1;
+
+  ; PTX-DAG: st.param.b32 [func_retval0+0], [[RES]];
+
+  %ret = call i1 @callee_i1()
+  ret i1 %ret
+}
+
+declare i16 @callee_i16()
+define  i16 @check_i16() {
+  ; PTX-LABEL: check_i16
+  ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}}
+  ; PTX-DAG: ld.param.b32 [[LD:%r[0-9]+]], [retval0+0];
+  ; PTX-DAG: } // callseq {{[0-9]+}}
+
+  ; PTX-WITHOUT-DAG: mov.b32 [[PROXY:%r[0-9]+]], [[LD]];
+  ; PTX-WITHOUT-DAG: and.b32 [[RES:%r[0-9]+]], [[PROXY]], 65535;
+  ; PTX-WITH-DAG:    and.b32 [[RES:%r[0-9]+]], [[LD]], 65535;
+
+  ; PTX-DAG: st.param.b32 [func_retval0+0], [[RES]];
+
+  %ret = call i16 @callee_i16()
+  ret i16 %ret
+}
+
+declare i32 @callee_i32()
+define  i32 @check_i32() {
+  ; PTX-LABEL: check_i32
+  ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}}
+  ; PTX-DAG: ld.param.b32 [[LD:%r[0-9]+]], [retval0+0];
+  ; PTX-DAG: } // callseq {{[0-9]+}}
+
+  ; PTX-WITHOUT-DAG: mov.b32 [[PROXY:%r[0-9]+]], [[LD]];
+  ; PTX-WITHOUT-DAG: st.param.b32 [func_retval0+0], [[PROXY]];
+  ; PTX-WITH-DAG:    st.param.b32 [func_retval0+0], [[LD]];
+
+  %ret = call i32 @callee_i32()
+  ret i32 %ret
+}
+
+declare i64 @callee_i64()
+define  i64 @check_i64() {
+  ; PTX-LABEL: check_i64
+  ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}}
+  ; PTX-DAG: ld.param.b64 [[LD:%rd[0-9]+]], [retval0+0];
+  ; PTX-DAG: } // callseq {{[0-9]+}}
+
+  ; PTX-WITHOUT-DAG: mov.b64 [[PROXY:%rd[0-9]+]], [[LD]];
+  ; PTX-WITHOUT-DAG: st.param.b64 [func_retval0+0], [[PROXY]];
+  ; PTX-WITH-DAG:    st.param.b64 [func_retval0+0], [[LD]];
+
+  %ret = call i64 @callee_i64()
+  ret i64 %ret
+}
+
+declare i128 @callee_i128()
+define  i128 @check_i128() {
+  ; PTX-LABEL: check_i128
+  ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}}
+  ; PTX-DAG: ld.param.v2.b64 {[[LD0:%rd[0-9]+]], [[LD1:%rd[0-9]+]]}, [retval0+0];
+  ; PTX-DAG: } // callseq {{[0-9]+}}
+
+  ; PTX-WITHOUT-DAG: mov.b64 [[PROXY0:%rd[0-9]+]], [[LD0]];
+  ; PTX-WITHOUT-DAG: mov.b64 [[PROXY1:%rd[0-9]+]], [[LD1]];
+  ; PTX-WITHOUT-DAG: st.param.v2.b64 [func_retval0+0], {[[PROXY0]], [[PROXY1]]};
+  ; PTX-WITH-DAG:    st.param.v2.b64 [func_retval0+0], {[[LD0]], [[LD1]]};
+
+  %ret = call i128 @callee_i128()
+  ret i128 %ret
+}
+
+declare half @callee_f16()
+define  half @check_f16() {
+  ; PTX-LABEL: check_f16
+  ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}}
+  ; PTX-DAG: ld.param.b16 [[LD:%h[0-9]+]], [retval0+0];
+  ; PTX-DAG: } // callseq {{[0-9]+}}
+
+  ; PTX-WITHOUT-DAG: mov.b16 [[PROXY:%h[0-9]+]], [[LD]];
+  ; PTX-WITHOUT-DAG: st.param.b16 [func_retval0+0], [[PROXY]];
+  ; PTX-WITH-DAG:    st.param.b16 [func_retval0+0], [[LD]];
+
+  %ret = call half @callee_f16()
+  ret half %ret
+}
+
+declare float @callee_f32()
+define  float @check_f32() {
+  ; PTX-LABEL: check_f32
+  ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}}
+  ; PTX-DAG: ld.param.f32 [[LD:%f[0-9]+]], [retval0+0];
+  ; PTX-DAG: } // callseq {{[0-9]+}}
+
+  ; PTX-WITHOUT-DAG: mov.f32 [[PROXY:%f[0-9]+]], [[LD]];
+  ; PTX-WITHOUT-DAG: st.param.f32 [func_retval0+0], [[PROXY]];
+  ; PTX-WITH-DAG:    st.param.f32 [func_retval0+0], [[LD]];
+
+  %ret = call float @callee_f32()
+  ret float %ret
+}
+
+declare double @callee_f64()
+define  double @check_f64() {
+  ; PTX-LABEL: check_f64
+  ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}}
+  ; PTX-DAG: ld.param.f64 [[LD:%fd[0-9]+]], [retval0+0];
+  ; PTX-DAG: } // callseq {{[0-9]+}}
+
+  ; PTX-WITHOUT-DAG: mov.f64 [[PROXY:%fd[0-9]+]], [[LD]];
+  ; PTX-WITHOUT-DAG: st.param.f64 [func_retval0+0], [[PROXY]];
+  ; PTX-WITH-DAG:    st.param.f64 [func_retval0+0], [[LD]];
+
+  %ret = call double @callee_f64()
+  ret double %ret
+}
+
+declare <4 x i32> @callee_vec_i32()
+define  <4 x i32> @check_vec_i32() {
+  ; PTX-LABEL: check_vec_i32
+  ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}}
+  ; PTX-DAG: ld.param.v4.b32 {[[LD0:%r[0-9]+]], [[LD1:%r[0-9]+]], [[LD2:%r[0-9]+]], [[LD3:%r[0-9]+]]}, [retval0+0];
+  ; PTX-DAG: } // callseq {{[0-9]+}}
+
+  ; PTX-WITHOUT-DAG: mov.b32 [[PROXY0:%r[0-9]+]], [[LD0]];
+  ; PTX-WITHOUT-DAG: mov.b32 [[PROXY1:%r[0-9]+]], [[LD1]];
+  ; PTX-WITHOUT-DAG: mov.b32 [[PROXY2:%r[0-9]+]], [[LD2]];
+  ; PTX-WITHOUT-DAG: mov.b32 [[PROXY3:%r[0-9]+]], [[LD3]];
+  ; PTX-WITHOUT-DAG: st.param.v4.b32 [func_retval0+0], {[[PROXY0]], [[PROXY1]], [[PROXY2]], [[PROXY3]]};
+  ; PTX-WITH-DAG:    st.param.v4.b32 [func_retval0+0], {[[LD0]], [[LD1]], [[LD2]], [[LD3]]};
+
+  %ret = call <4 x i32> @callee_vec_i32()
+  ret <4 x i32> %ret
+}
+
+declare <2 x half> @callee_vec_f16()
+define  <2 x half> @check_vec_f16() {
+  ; PTX-LABEL: check_vec_f16
+  ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}}
+  ; PTX-DAG: ld.param.b32 [[LD:%hh[0-9]+]], [retval0+0];
+  ; PTX-DAG: } // callseq {{[0-9]+}}
+
+  ; PTX-WITHOUT-DAG: mov.b32 [[PROXY:%hh[0-9]+]], [[LD]];
+  ; PTX-WITHOUT-DAG: st.param.b32 [func_retval0+0], [[PROXY]];
+  ; PTX-WITH-DAG:    st.param.b32 [func_retval0+0], [[LD]];
+
+  %ret = call <2 x half> @callee_vec_f16()
+  ret <2 x half> %ret
+}
+
+declare <2 x double> @callee_vec_f64()
+define  <2 x double> @check_vec_f64() {
+  ; PTX-LABEL: check_vec_f64
+  ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}}
+  ; PTX-DAG: ld.param.v2.f64 {[[LD0:%fd[0-9]+]], [[LD1:%fd[0-9]+]]}, [retval0+0];
+  ; PTX-DAG: } // callseq {{[0-9]+}}
+
+  ; PTX-WITHOUT-DAG: mov.f64 [[PROXY0:%fd[0-9]+]], [[LD0]];
+  ; PTX-WITHOUT-DAG: mov.f64 [[PROXY1:%fd[0-9]+]], [[LD1]];
+  ; PTX-WITHOUT-DAG: st.param.v2.f64 [func_retval0+0], {[[PROXY0]], [[PROXY1]]};
+  ; PTX-WITH-DAG:    st.param.v2.f64 [func_retval0+0], {[[LD0]], [[LD1]]};
+
+  %ret = call <2 x double> @callee_vec_f64()
+  ret <2 x double> %ret
+}
diff --git a/test/CodeGen/NVPTX/wmma.py b/test/CodeGen/NVPTX/wmma.py
index bec5a27..14bbfd7 100644
--- a/test/CodeGen/NVPTX/wmma.py
+++ b/test/CodeGen/NVPTX/wmma.py
@@ -4,6 +4,8 @@
 # RUN: python %s > %t.ll
 # RUN: llc < %t.ll -march=nvptx64 -mcpu=sm_70 -mattr=+ptx61 | FileCheck %t.ll
 
+from __future__ import print_function
+
 from itertools import product
 from string import Template
 
diff --git a/test/CodeGen/NVPTX/zero-cs.ll b/test/CodeGen/NVPTX/zero-cs.ll
deleted file mode 100644
index 7a7a990..0000000
--- a/test/CodeGen/NVPTX/zero-cs.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: not llc < %s -march=nvptx 2>&1 | FileCheck %s
-; used to seqfault and now fails with a "Cannot select"
-
-; CHECK: LLVM ERROR: Cannot select: {{t7|0x[0-9a-f]+}}: i32 = ExternalSymbol'__powidf2'
-define double @powi() {
-  %1 = call double @llvm.powi.f64(double 1.000000e+00, i32 undef)
-  ret double %1
-}
-
-declare double @llvm.powi.f64(double, i32) nounwind readnone
diff --git a/test/CodeGen/Nios2/add-sub.ll b/test/CodeGen/Nios2/add-sub.ll
deleted file mode 100644
index 7c9a289..0000000
--- a/test/CodeGen/Nios2/add-sub.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=nios2 2>&1 | FileCheck %s
-; RUN: llc < %s -march=nios2 -target-abi=nios2r2 2>&1 | FileCheck %s
-
-define i32 @add_reg(i32 %a, i32 %b) nounwind {
-entry:
-; CHECK: add_reg:
-; CHECK:   add r2, r4, r5
-  %c = add i32 %a, %b
-  ret i32 %c
-}
-
-define i32 @sub_reg(i32 %a, i32 %b) nounwind {
-entry:
-; CHECK: sub_reg:
-; CHECK:   sub r2, r4, r5
-  %c = sub i32 %a, %b
-  ret i32 %c
-}
-
diff --git a/test/CodeGen/Nios2/lit.local.cfg b/test/CodeGen/Nios2/lit.local.cfg
deleted file mode 100644
index 84c8b03..0000000
--- a/test/CodeGen/Nios2/lit.local.cfg
+++ /dev/null
@@ -1,3 +0,0 @@
-if not 'Nios2' in config.root.targets:
-    config.unsupported = True
-
diff --git a/test/CodeGen/Nios2/mul-div.ll b/test/CodeGen/Nios2/mul-div.ll
deleted file mode 100644
index 8327823..0000000
--- a/test/CodeGen/Nios2/mul-div.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=nios2 2>&1 | FileCheck %s
-; RUN: llc < %s -march=nios2 -target-abi=nios2r2 2>&1 | FileCheck %s
-
-define i32 @mul_reg(i32 %a, i32 %b) nounwind {
-entry:
-; CHECK: mul_reg:
-; CHECK:   mul r2, r4, r5
-  %c = mul i32 %a, %b
-  ret i32 %c
-}
-
-define i32 @div_signed(i32 %a, i32 %b) nounwind {
-entry:
-; CHECK: div_signed:
-; CHECK:   div r2, r4, r5
-  %c = sdiv i32 %a, %b
-  ret i32 %c
-}
-
-define i32 @div_unsigned(i32 %a, i32 %b) nounwind {
-entry:
-; CHECK: div_unsigned:
-; CHECK:   divu r2, r4, r5
-  %c = udiv i32 %a, %b
-  ret i32 %c
-}
-
diff --git a/test/CodeGen/Nios2/proc_support.ll b/test/CodeGen/Nios2/proc_support.ll
deleted file mode 100644
index c83c105..0000000
--- a/test/CodeGen/Nios2/proc_support.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; This tests that llc accepts Nios2 processors.
-
-; RUN: not not llc < %s -asm-verbose=false -march=nios2 -mcpu=nios2r1 2>&1 | FileCheck %s --check-prefix=ARCH
-; RUN: not not llc < %s -asm-verbose=false -march=nios2 -mcpu=nios2r2 2>&1 | FileCheck %s --check-prefix=ARCH
-
-; ARCH-NOT: is not a recognized processor
-
-define i32 @f(i32 %i) {
-  ret i32 %i
-}
diff --git a/test/CodeGen/Nios2/ret_generated.ll b/test/CodeGen/Nios2/ret_generated.ll
deleted file mode 100644
index 986c657..0000000
--- a/test/CodeGen/Nios2/ret_generated.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; This tests that llc generates 'ret' instruction in assembly output.
-
-; RUN: llc < %s -march=nios2 2>&1 | FileCheck %s --check-prefix=ARCH
-
-; ARCH: ret
-
-define i32 @f(i32 %i) {
-  ret i32 %i
-}
diff --git a/test/CodeGen/Nios2/shift-rotate.ll b/test/CodeGen/Nios2/shift-rotate.ll
deleted file mode 100644
index d3084b5..0000000
--- a/test/CodeGen/Nios2/shift-rotate.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=nios2 2>&1 | FileCheck %s
-; RUN: llc < %s -march=nios2 -target-abi=nios2r2 2>&1 | FileCheck %s
-
-define i32 @sll_reg(i32 %a, i32 %b) nounwind {
-entry:
-; CHECK: sll_reg:
-; CHECK:   sll r2, r4, r5
-  %c = shl i32 %a, %b
-  ret i32 %c
-}
-
-define i32 @srl_reg(i32 %a, i32 %b) nounwind {
-entry:
-; CHECK: srl_reg:
-; CHECK:   srl r2, r4, r5
-  %c = lshr i32 %a, %b
-  ret i32 %c
-}
-
-define i32 @sra_reg(i32 %a, i32 %b) nounwind {
-entry:
-; CHECK: sra_reg:
-; CHECK:   sra r2, r4, r5
-  %c = ashr i32 %a, %b
-  ret i32 %c
-}
diff --git a/test/CodeGen/Nios2/target_support.ll b/test/CodeGen/Nios2/target_support.ll
deleted file mode 100644
index 90e7020..0000000
--- a/test/CodeGen/Nios2/target_support.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; This tests that llc accepts Nios2 target.
-
-; RUN: not not llc < %s -asm-verbose=false -march=nios2 2>&1 | FileCheck %s --check-prefix=ARCH
-; RUN: not not llc < %s -asm-verbose=false -mtriple=nios2 2>&1 | FileCheck %s --check-prefix=TRIPLE
-
-; ARCH-NOT: invalid target
-; TRIPLE-NOT: unable to get target
-
-define i32 @f(i32 %i) {
-  ret i32 %i
-}
diff --git a/test/CodeGen/PowerPC/2010-02-04-EmptyGlobal.ll b/test/CodeGen/PowerPC/2010-02-04-EmptyGlobal.ll
index 66f0566..a9a2e46 100644
--- a/test/CodeGen/PowerPC/2010-02-04-EmptyGlobal.ll
+++ b/test/CodeGen/PowerPC/2010-02-04-EmptyGlobal.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -relocation-model=pic -frame-pointer=all | FileCheck %s
 ; <rdar://problem/7604010>
 
 %cmd.type = type { }
diff --git a/test/CodeGen/PowerPC/2010-12-18-PPCStackRefs.ll b/test/CodeGen/PowerPC/2010-12-18-PPCStackRefs.ll
index c858823..f6577e9 100644
--- a/test/CodeGen/PowerPC/2010-12-18-PPCStackRefs.ll
+++ b/test/CodeGen/PowerPC/2010-12-18-PPCStackRefs.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -disable-fp-elim < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -frame-pointer=all < %s | FileCheck %s
 ; PR8749
 target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128-n32"
 target triple = "powerpc-unknown-linux-gnu.8"
diff --git a/test/CodeGen/PowerPC/Frames-alloca.ll b/test/CodeGen/PowerPC/Frames-alloca.ll
index 2f48f3f..034231e 100644
--- a/test/CodeGen/PowerPC/Frames-alloca.ll
+++ b/test/CodeGen/PowerPC/Frames-alloca.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s -check-prefix=CHECK-PPC32
 ; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s -check-prefix=CHECK-PPC64
-; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu -disable-fp-elim | FileCheck %s -check-prefix=CHECK-PPC32-NOFP
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -disable-fp-elim | FileCheck %s -check-prefix=CHECK-PPC64-NOFP
+; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu -frame-pointer=all | FileCheck %s -check-prefix=CHECK-PPC32-NOFP
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -frame-pointer=all | FileCheck %s -check-prefix=CHECK-PPC64-NOFP
 ; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s -check-prefix=CHECK-PPC32
 
 ; CHECK-PPC32: stwu 1, -32(1)
diff --git a/test/CodeGen/PowerPC/Frames-large.ll b/test/CodeGen/PowerPC/Frames-large.ll
index a2e60c1..1c49de5 100644
--- a/test/CodeGen/PowerPC/Frames-large.ll
+++ b/test/CodeGen/PowerPC/Frames-large.ll
@@ -1,8 +1,8 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s -check-prefix=PPC32-NOFP
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -disable-fp-elim | FileCheck %s -check-prefix=PPC32-FP
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -frame-pointer=all | FileCheck %s -check-prefix=PPC32-FP
 
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s -check-prefix=PPC64-NOFP
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -disable-fp-elim | FileCheck %s -check-prefix=PPC64-FP
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -frame-pointer=all | FileCheck %s -check-prefix=PPC64-FP
 
 define i32* @f1() nounwind {
         %tmp = alloca i32, i32 8191             ; <i32*> [#uses=1]
diff --git a/test/CodeGen/PowerPC/Frames-leaf.ll b/test/CodeGen/PowerPC/Frames-leaf.ll
index 6dce8e2..d06364a 100644
--- a/test/CodeGen/PowerPC/Frames-leaf.ll
+++ b/test/CodeGen/PowerPC/Frames-leaf.ll
@@ -6,13 +6,13 @@
 ; RUN:   not grep "addi r1, r1, "
 ; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- | \
 ; RUN:   not grep "lwz r31, 20(r1)"
-; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -disable-fp-elim | \
+; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -frame-pointer=all | \
 ; RUN:   not grep "stw r31, 20(r1)"
-; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -disable-fp-elim | \
+; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -frame-pointer=all | \
 ; RUN:   not grep "stwu r1, -.*(r1)"
-; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -disable-fp-elim | \
+; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -frame-pointer=all | \
 ; RUN:   not grep "addi r1, r1, "
-; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -disable-fp-elim | \
+; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -frame-pointer=all | \
 ; RUN:   not grep "lwz r31, 20(r1)"
 ; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64-- | \
 ; RUN:   not grep "std r31, 40(r1)"
@@ -22,13 +22,13 @@
 ; RUN:   not grep "addi r1, r1, "
 ; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64-- | \
 ; RUN:   not grep "ld r31, 40(r1)"
-; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64-- -disable-fp-elim | \
+; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64-- -frame-pointer=all | \
 ; RUN:   not grep "stw r31, 40(r1)"
-; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64-- -disable-fp-elim | \
+; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64-- -frame-pointer=all | \
 ; RUN:   not grep "stdu r1, -.*(r1)"
-; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64-- -disable-fp-elim | \
+; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64-- -frame-pointer=all | \
 ; RUN:   not grep "addi r1, r1, "
-; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64-- -disable-fp-elim | \
+; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64-- -frame-pointer=all | \
 ; RUN:   not grep "ld r31, 40(r1)"
 
 define i32* @f1() {
diff --git a/test/CodeGen/PowerPC/Frames-small.ll b/test/CodeGen/PowerPC/Frames-small.ll
index 57dbe3f..1fa9908 100644
--- a/test/CodeGen/PowerPC/Frames-small.ll
+++ b/test/CodeGen/PowerPC/Frames-small.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s -check-prefix=PPC32-FP
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -disable-fp-elim | FileCheck %s -check-prefix=PPC32-NOFP
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -frame-pointer=all | FileCheck %s -check-prefix=PPC32-NOFP
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s -check-prefix=PPC64-FP
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -disable-fp-elim | FileCheck %s -check-prefix=PPC64-NOFP
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -frame-pointer=all | FileCheck %s -check-prefix=PPC64-NOFP
 
 ;PPC32-FP: f1:
 ;PPC32-FP: stwu 1, -16400(1)
diff --git a/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir b/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
new file mode 100644
index 0000000..be28ac3
--- /dev/null
+++ b/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
@@ -0,0 +1,121 @@
+# RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu -start-after \
+# RUN:   virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck %s
+
+--- |
+  ; ModuleID = 'a.ll'
+  source_filename = "a.c"
+  target datalayout = "e-m:e-i64:64-n32:64"
+  target triple = "powerpc64le-unknown-linux-gnu"
+  
+  ; Function Attrs: nounwind
+  define void @test(i32 signext %a6, i32 signext %a7, i32 signext %a17) local_unnamed_addr #0 {
+  entry:
+    %cmp27 = icmp slt i32 %a6, %a7
+    %cmp29 = icmp sgt i32 %a6, %a17
+    %or.cond781 = or i1 %cmp27, %cmp29
+    tail call void asm sideeffect "# nothing", "~{cr0},~{cr1},~{cr2},~{cr3},~{cr4},~{cr5},~{cr6},~{cr7},~{memory}"() #1, !srcloc !1
+    br label %if.end326
+  
+  if.end326:                                        ; preds = %entry
+    br i1 %or.cond781, label %if.then330, label %if.end331
+  
+  if.then330:                                       ; preds = %if.end326
+    unreachable
+  
+  if.end331:                                        ; preds = %if.end326
+    ret void
+  }
+  
+  ; Function Attrs: nounwind
+  declare void @llvm.stackprotector(i8*, i8**) #1
+  
+  attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+htm,+power8-vector,+vsx,-power9-vector,-qpx" "unsafe-fp-math"="false" "use-soft-float"="false" }
+  attributes #1 = { nounwind }
+  
+  !llvm.ident = !{!0}
+  
+  !0 = !{!"clang version 8.0.0 (trunk 349357)"}
+  !1 = !{i32 3373}
+
+...
+---
+name:            test
+alignment:       4
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+registers:       []
+liveins:         
+  - { reg: '$x3', virtual-reg: '' }
+  - { reg: '$x4', virtual-reg: '' }
+  - { reg: '$x5', virtual-reg: '' }
+frameInfo:       
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       0
+  offsetAdjustment: 0
+  maxAlignment:    4
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 4294967295
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:           
+  - { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 4, 
+      stack-id: 0, callee-saved-register: '', callee-saved-restored: true, 
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+  - { id: 1, name: '', type: spill-slot, offset: 0, size: 4, alignment: 4, 
+      stack-id: 0, callee-saved-register: '', callee-saved-restored: true, 
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+  - { id: 2, name: '', type: spill-slot, offset: 0, size: 4, alignment: 4, 
+      stack-id: 0, callee-saved-register: '', callee-saved-restored: true, 
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+constants:       []
+body:             |
+  bb.0.entry:
+    liveins: $x3, $x4, $x5
+  
+    renamable $cr0 = CMPW renamable $r3, renamable $r4, implicit $x4
+    renamable $cr1 = CMPW renamable $r3, renamable $r5, implicit $x5, implicit killed $x3
+    renamable $cr5lt = CRNOR renamable $cr0lt, renamable $cr1gt, implicit killed $cr0
+    renamable $cr5gt = COPY renamable $cr1gt, implicit $cr1
+    ; CHECK: crnor 4*cr5+lt, lt, 4*cr1+gt
+    ; CHECK: cror 4*cr5+gt, 4*cr1+gt, 4*cr1+gt
+    SPILL_CRBIT killed renamable $cr5lt, 0, %stack.0 :: (store 4 into %stack.0)
+    renamable $cr1 = CMPW renamable $r4, renamable $r5, implicit killed $x5, implicit killed $x4
+    SPILL_CRBIT killed renamable $cr5gt, 0, %stack.1 :: (store 4 into %stack.1)
+    SPILL_CRBIT killed renamable $cr1gt, 0, %stack.2 :: (store 4 into %stack.2)
+    INLINEASM &"# nothing", 25, 12, implicit-def dead early-clobber $cr0, 12, implicit-def dead early-clobber $cr1, 12, implicit-def dead early-clobber $cr2, 12, implicit-def dead early-clobber $cr3, 12, implicit-def dead early-clobber $cr4, 12, implicit-def dead early-clobber $cr5, 12, implicit-def dead early-clobber $cr6, 12, implicit-def dead early-clobber $cr7, !1
+    BLR8 implicit $lr8, implicit $rm
+  
+  bb.1.if.end326:
+    successors: %bb.2(0x00000001), %bb.3(0x7fffffff)
+  
+    renamable $cr5lt = RESTORE_CRBIT 0, %stack.0 :: (load 4 from %stack.0)
+    renamable $cr5gt = RESTORE_CRBIT 0, %stack.1 :: (load 4 from %stack.1)
+    renamable $cr5lt = CROR killed renamable $cr5lt, killed renamable $cr5gt
+    BCn killed renamable $cr5lt, %bb.3
+    B %bb.2
+  
+  bb.2.if.then330:
+    successors: 
+  
+  
+  bb.3.if.end331:
+    BLR8 implicit $lr8, implicit $rm
+
+...
diff --git a/test/CodeGen/PowerPC/PR33671.ll b/test/CodeGen/PowerPC/PR33671.ll
index 0edd2e8..a613387f 100644
--- a/test/CodeGen/PowerPC/PR33671.ll
+++ b/test/CodeGen/PowerPC/PR33671.ll
@@ -27,6 +27,6 @@
 ; CHECK-LABEL: test2
 ; CHECK: addi 3, 3, 8
 ; CHECK: lxvx [[LD:[0-9]+]], 0, 3
-; CHECK: addi 3, 4, 4
-; CHECK: stxvx [[LD]], 0, 3
+; CHECK: addi [[REG:[0-9]+]], 4, 4
+; CHECK: stxvx [[LD]], 0, [[REG]] 
 }
diff --git a/test/CodeGen/PowerPC/PR35812-neg-cmpxchg.ll b/test/CodeGen/PowerPC/PR35812-neg-cmpxchg.ll
index f249dd8..aa0e1fc 100644
--- a/test/CodeGen/PowerPC/PR35812-neg-cmpxchg.ll
+++ b/test/CodeGen/PowerPC/PR35812-neg-cmpxchg.ll
@@ -2,7 +2,7 @@
 ; Make sure that a negative value for the compare-and-swap is zero extended
 ; from i8/i16 to i32 since it will be compared for equality.
 ; RUN: llc -mtriple=powerpc64le-linux-gnu -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=powerpc64le-linux-gnu -mcpu=pwr7 < %s | FileCheck %s --check-prefix=CHECK-P7
+; RUN: llc -mtriple=powerpc64le-linux-gnu -mcpu=pwr7 -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-P7
 
 @str = private unnamed_addr constant [46 x i8] c"FAILED: __atomic_compare_exchange_n() failed.\00"
 @str.1 = private unnamed_addr constant [59 x i8] c"FAILED: __atomic_compare_exchange_n() set the wrong value.\00"
@@ -50,7 +50,7 @@
 ; CHECK-P7:  .LBB0_1: # %L.entry
 ; CHECK-P7:    lwarx 9, 0, 4
 ; CHECK-P7:    and 6, 9, 5
-; CHECK-P7:    cmpw 0, 6, 8
+; CHECK-P7:    cmpw 6, 8
 ; CHECK-P7:    bne 0, .LBB0_3
 ; CHECK-P7:    andc 9, 9, 5
 ; CHECK-P7:    or 9, 9, 7
diff --git a/test/CodeGen/PowerPC/VSX-XForm-Scalars.ll b/test/CodeGen/PowerPC/VSX-XForm-Scalars.ll
index 643ec90..937264f 100644
--- a/test/CodeGen/PowerPC/VSX-XForm-Scalars.ll
+++ b/test/CodeGen/PowerPC/VSX-XForm-Scalars.ll
@@ -33,12 +33,12 @@
 ; CHECK-P9:    xxpermdi vs0, f0, f0, 2
 ; CHECK-P9:    xxspltw vs0, vs0, 3
 ; CHECK-P9:    stxvx vs0, 0, r4
-; CHECK-P9:    lis r4, 1024
 ; CHECK-P9:    lfiwax f0, 0, r3
 ; CHECK-P9:    addis r3, r2, .LC1@toc@ha
 ; CHECK-P9:    ld r3, .LC1@toc@l(r3)
 ; CHECK-P9:    xscvsxdsp f0, f0
 ; CHECK-P9:    ld r3, 0(r3)
+; CHECK-P9:    lis r4, 1024
 ; CHECK-P9:    stfsx f0, r3, r4
 ; CHECK-P9:    blr
 entry:
diff --git a/test/CodeGen/PowerPC/adde_return_type.ll b/test/CodeGen/PowerPC/adde_return_type.ll
new file mode 100644
index 0000000..7ce1107
--- /dev/null
+++ b/test/CodeGen/PowerPC/adde_return_type.ll
@@ -0,0 +1,11 @@
+; REQUIRES: asserts
+; RUN: llc -mtriple=powerpc64le-unknown-unknown -debug-only=legalize-types \
+; RUN:   < %s -o /dev/null 2>&1 | FileCheck %s
+
+define i64 @testAddeReturnType(i64 %X, i64 %Z) {
+; CHECK: Legally typed node: {{.*}}: i64,glue = adde {{.*}} 
+  %cmp = icmp ne i64 %Z, 0
+  %conv1 = zext i1 %cmp to i64
+  %add = add nsw i64 %conv1, %X
+  ret i64 %add
+}
diff --git a/test/CodeGen/PowerPC/atomic-2.ll b/test/CodeGen/PowerPC/atomic-2.ll
index c1fbb43..29d16ef 100644
--- a/test/CodeGen/PowerPC/atomic-2.ll
+++ b/test/CodeGen/PowerPC/atomic-2.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -ppc-asm-full-reg-names -mtriple=ppc64-- | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
-; RUN: llc < %s -ppc-asm-full-reg-names -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
-; RUN: llc < %s -ppc-asm-full-reg-names -mtriple=ppc64-- -mcpu=pwr7 | FileCheck %s
-; RUN: llc < %s -ppc-asm-full-reg-names -mtriple=ppc64-- -mcpu=pwr8 | FileCheck %s -check-prefix=CHECK-P8U
+; RUN: llc -verify-machineinstrs < %s -ppc-asm-full-reg-names -mtriple=ppc64-- | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
+; RUN: llc -verify-machineinstrs < %s -ppc-asm-full-reg-names -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
+; RUN: llc -verify-machineinstrs < %s -ppc-asm-full-reg-names -mtriple=ppc64-- -mcpu=pwr7 | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -ppc-asm-full-reg-names -mtriple=ppc64-- -mcpu=pwr8 | FileCheck %s -check-prefix=CHECK-P8U
 
 define i64 @exchange_and_add(i64* %mem, i64 %val) nounwind {
 ; CHECK-LABEL: exchange_and_add:
diff --git a/test/CodeGen/PowerPC/atomic-minmax.ll b/test/CodeGen/PowerPC/atomic-minmax.ll
index 5b9a153..f7369f3 100644
--- a/test/CodeGen/PowerPC/atomic-minmax.ll
+++ b/test/CodeGen/PowerPC/atomic-minmax.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s | FileCheck %s
 target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
@@ -244,7 +244,7 @@
 ; CHECK: and [[MOLDV:[0-9]+]], [[OLDV]], [[M]]
 ; CHECK: srw [[SMOLDV:[0-9]+]], [[MOLDV]], [[SA]]
 ; CHECK: extsh [[SESMOLDV:[0-9]+]], [[SMOLDV]]
-; CHECK: cmpw 0, 4, [[SESMOLDV]]
+; CHECK: cmpw 4, [[SESMOLDV]]
 ; CHECK: bgelr 0
 ; CHECK: andc [[NOLDV:[0-9]+]], [[OLDV]], [[M]]
 ; CHECK: or [[NEWV:[0-9]+]], [[SMV]], [[NOLDV]]
@@ -271,7 +271,7 @@
 ; CHECK: and [[MOLDV:[0-9]+]], [[OLDV]], [[M]]
 ; CHECK: srw [[SMOLDV:[0-9]+]], [[MOLDV]], [[SA]]
 ; CHECK: extsh [[SESMOLDV:[0-9]+]], [[SMOLDV]]
-; CHECK: cmpw 0, 4, [[SESMOLDV]]
+; CHECK: cmpw 4, [[SESMOLDV]]
 ; CHECK: blelr 0
 ; CHECK: andc [[NOLDV:[0-9]+]], [[OLDV]], [[M]]
 ; CHECK: or [[NEWV:[0-9]+]], [[SMV]], [[NOLDV]]
@@ -296,7 +296,7 @@
 ; CHECK-DAG: and [[SMV:[0-9]+]], [[SV]], [[M]]
 ; CHECK: lwarx [[OLDV:[0-9]+]], 0, 3
 ; CHECK: and [[MOLDV:[0-9]+]], [[OLDV]], [[M]]
-; CHECK: cmplw 0, 4, [[MOLDV]]
+; CHECK: cmplw 4, [[MOLDV]]
 ; CHECK: bgelr 0
 ; CHECK: andc [[NOLDV:[0-9]+]], [[OLDV]], [[M]]
 ; CHECK: or [[NEWV:[0-9]+]], [[SMV]], [[NOLDV]]
@@ -321,7 +321,7 @@
 ; CHECK-DAG: and [[SMV:[0-9]+]], [[SV]], [[M]]
 ; CHECK: lwarx [[OLDV:[0-9]+]], 0, 3
 ; CHECK: and [[MOLDV:[0-9]+]], [[OLDV]], [[M]]
-; CHECK: cmplw 0, 4, [[MOLDV]]
+; CHECK: cmplw 4, [[MOLDV]]
 ; CHECK: blelr 0
 ; CHECK: andc [[NOLDV:[0-9]+]], [[OLDV]], [[M]]
 ; CHECK: or [[NEWV:[0-9]+]], [[SMV]], [[NOLDV]]
@@ -347,7 +347,7 @@
 ; CHECK: and [[MOLDV:[0-9]+]], [[OLDV]], [[M]]
 ; CHECK: srw [[SMOLDV:[0-9]+]], [[MOLDV]], [[SA]]
 ; CHECK: extsb [[SESMOLDV:[0-9]+]], [[SMOLDV]]
-; CHECK: cmpw 0, 4, [[SESMOLDV]]
+; CHECK: cmpw 4, [[SESMOLDV]]
 ; CHECK: bgelr 0
 ; CHECK: andc [[NOLDV:[0-9]+]], [[OLDV]], [[M]]
 ; CHECK: or [[NEWV:[0-9]+]], [[SMV]], [[NOLDV]]
@@ -373,7 +373,7 @@
 ; CHECK: and [[MOLDV:[0-9]+]], [[OLDV]], [[M]]
 ; CHECK: srw [[SMOLDV:[0-9]+]], [[MOLDV]], [[SA]]
 ; CHECK: extsb [[SESMOLDV:[0-9]+]], [[SMOLDV]]
-; CHECK: cmpw 0, 4, [[SESMOLDV]]
+; CHECK: cmpw 4, [[SESMOLDV]]
 ; CHECK: blelr 0
 ; CHECK: andc [[NOLDV:[0-9]+]], [[OLDV]], [[M]]
 ; CHECK: or [[NEWV:[0-9]+]], [[SMV]], [[NOLDV]]
@@ -397,7 +397,7 @@
 ; CHECK-DAG: and [[SMV:[0-9]+]], [[SV]], [[M]]
 ; CHECK: lwarx [[OLDV:[0-9]+]], 0, 3
 ; CHECK: and [[MOLDV:[0-9]+]], [[OLDV]], [[M]]
-; CHECK: cmplw 0, 4, [[MOLDV]]
+; CHECK: cmplw 4, [[MOLDV]]
 ; CHECK: bgelr 0
 ; CHECK: andc [[NOLDV:[0-9]+]], [[OLDV]], [[M]]
 ; CHECK: or [[NEWV:[0-9]+]], [[SMV]], [[NOLDV]]
@@ -421,7 +421,7 @@
 ; CHECK-DAG: and [[SMV:[0-9]+]], [[SV]], [[M]]
 ; CHECK: lwarx [[OLDV:[0-9]+]], 0, 3
 ; CHECK: and [[MOLDV:[0-9]+]], [[OLDV]], [[M]]
-; CHECK: cmplw 0, 4, [[MOLDV]]
+; CHECK: cmplw 4, [[MOLDV]]
 ; CHECK: blelr 0
 ; CHECK: andc [[NOLDV:[0-9]+]], [[OLDV]], [[M]]
 ; CHECK: or [[NEWV:[0-9]+]], [[SMV]], [[NOLDV]]
diff --git a/test/CodeGen/PowerPC/atomics.ll b/test/CodeGen/PowerPC/atomics.ll
index 1abc9dc..c964218 100644
--- a/test/CodeGen/PowerPC/atomics.ll
+++ b/test/CodeGen/PowerPC/atomics.ll
@@ -1,7 +1,6 @@
-; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu -verify-machineinstrs  -ppc-asm-full-reg-names | FileCheck %s --check-prefix=CHECK --check-prefix=PPC32
-; FIXME: -verify-machineinstrs currently fail on ppc64 (mismatched register/instruction).
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -verify-machineinstrs  -ppc-asm-full-reg-names | FileCheck %s --check-prefix=CHECK --check-prefix=PPC32
 ; This is already checked for in Atomics-64.ll
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu  -ppc-asm-full-reg-names | FileCheck %s --check-prefix=CHECK --check-prefix=PPC64
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu  -ppc-asm-full-reg-names | FileCheck %s --check-prefix=CHECK --check-prefix=PPC64
 
 ; FIXME: we don't currently check for the operations themselves with CHECK-NEXT,
 ;   because they are implemented in a very messy way with lwarx/stwcx.
diff --git a/test/CodeGen/PowerPC/bitfieldinsert.ll b/test/CodeGen/PowerPC/bitfieldinsert.ll
index 76a648b..97d86f2 100644
--- a/test/CodeGen/PowerPC/bitfieldinsert.ll
+++ b/test/CodeGen/PowerPC/bitfieldinsert.ll
@@ -60,3 +60,61 @@
   ret void
 }
 
+; test cases which include ISD::TRUNCATE
+; equivalent C code
+;   struct s64b {
+;     int a:4;
+;     int b:16;
+;     int c:24;
+;   };
+;   void bitfieldinsert64b(struct s64b *p, unsigned char v) {
+;     p->b = v;
+;   }
+
+%struct.s64b = type { i24, i24 }
+
+define void @bitfieldinsert64b(%struct.s64b* nocapture %p, i8 zeroext %v) {
+; CHECK-LABEL: @bitfieldinsert64b
+; CHECK: lwz [[REG1:[0-9]+]], 0(3)
+; CHECK-NEXT: rlwimi [[REG1]], 4, 4, 12, 27
+; CHECK-NEXT: stw [[REG1]], 0(3)
+; CHECK-NEXT: blr
+entry:
+  %conv = zext i8 %v to i32
+  %0 = bitcast %struct.s64b* %p to i32*
+  %bf.load = load i32, i32* %0, align 4
+  %bf.shl = shl nuw nsw i32 %conv, 4
+  %bf.clear = and i32 %bf.load, -1048561
+  %bf.set = or i32 %bf.clear, %bf.shl
+  store i32 %bf.set, i32* %0, align 4
+  ret void
+}
+
+; equivalent C code
+;   struct s64c {
+;     int a:5;
+;     int b:16;
+;     long c:10;
+;   };
+;   void bitfieldinsert64c(struct s64c *p, unsigned short v) {
+;     p->b = v;
+;   }
+
+%struct.s64c = type { i32, [4 x i8] }
+
+define void @bitfieldinsert64c(%struct.s64c* nocapture %p, i16 zeroext %v) {
+; CHECK-LABEL: @bitfieldinsert64c
+; CHECK: lwz [[REG1:[0-9]+]], 0(3)
+; CHECK-NEXT: rlwimi [[REG1]], 4, 5, 11, 26
+; CHECK-NEXT: stw [[REG1]], 0(3)
+; CHECK-NEXT: blr
+entry:
+  %conv = zext i16 %v to i32
+  %0 = getelementptr inbounds %struct.s64c, %struct.s64c* %p, i64 0, i32 0
+  %bf.load = load i32, i32* %0, align 8
+  %bf.shl = shl nuw nsw i32 %conv, 5
+  %bf.clear = and i32 %bf.load, -2097121
+  %bf.set = or i32 %bf.clear, %bf.shl
+  store i32 %bf.set, i32* %0, align 8
+  ret void
+}
diff --git a/test/CodeGen/PowerPC/build-vector-tests.ll b/test/CodeGen/PowerPC/build-vector-tests.ll
index 6f65b189..3fc5ffe 100644
--- a/test/CodeGen/PowerPC/build-vector-tests.ll
+++ b/test/CodeGen/PowerPC/build-vector-tests.ll
@@ -1244,15 +1244,15 @@
 ; P9LE-LABEL: fromRegsConvftoi
 ; P8BE-LABEL: fromRegsConvftoi
 ; P8LE-LABEL: fromRegsConvftoi
-; P9BE-DAG: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs1, vs3
-; P9BE-DAG: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs2, vs4
-; P9BE-DAG: xvcvdpsxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
-; P9BE-DAG: xvcvdpsxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
+; P9BE: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs2, vs4
+; P9BE: xvcvdpsxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
+; P9BE: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs1, vs3
+; P9BE: xvcvdpsxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
 ; P9BE: vmrgew v2, [[REG3]], [[REG4]]
-; P9LE-DAG: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs3, vs1
-; P9LE-DAG: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs4, vs2
-; P9LE-DAG: xvcvdpsxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
-; P9LE-DAG: xvcvdpsxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
+; P9LE: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs3, vs1
+; P9LE: xvcvdpsxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
+; P9LE: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs4, vs2
+; P9LE: xvcvdpsxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
 ; P9LE: vmrgew v2, [[REG4]], [[REG3]]
 ; P8BE-DAG: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs1, vs3
 ; P8BE-DAG: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs2, vs4
@@ -1516,15 +1516,15 @@
 ; P9LE-LABEL: fromRegsConvdtoi
 ; P8BE-LABEL: fromRegsConvdtoi
 ; P8LE-LABEL: fromRegsConvdtoi
-; P9BE-DAG: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs1, vs3
-; P9BE-DAG: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs2, vs4
-; P9BE-DAG: xvcvdpsxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
-; P9BE-DAG: xvcvdpsxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
+; P9BE: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs2, vs4
+; P9BE: xvcvdpsxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
+; P9BE: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs1, vs3
+; P9BE: xvcvdpsxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
 ; P9BE: vmrgew v2, [[REG3]], [[REG4]]
-; P9LE-DAG: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs3, vs1
-; P9LE-DAG: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs4, vs2
-; P9LE-DAG: xvcvdpsxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
-; P9LE-DAG: xvcvdpsxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
+; P9LE: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs3, vs1
+; P9LE: xvcvdpsxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
+; P9LE: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs4, vs2
+; P9LE: xvcvdpsxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
 ; P9LE: vmrgew v2, [[REG4]], [[REG3]]
 ; P8BE-DAG: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs1, vs3
 ; P8BE-DAG: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs2, vs4
@@ -1642,8 +1642,8 @@
 ; P9LE: lfd
 ; P9LE: lfd
 ; P9LE: xxmrghd
-; P9LE: xxmrghd
 ; P9LE: xvcvdpsxws
+; P9LE: xxmrghd
 ; P9LE: xvcvdpsxws
 ; P9LE: vmrgew v2
 ; P8BE: lfdx
@@ -1711,8 +1711,8 @@
 ; P9LE: lfd
 ; P9LE: lfd
 ; P9LE: xxmrghd
-; P9LE: xxmrghd
 ; P9LE: xvcvdpsxws
+; P9LE: xxmrghd
 ; P9LE: xvcvdpsxws
 ; P9LE: vmrgew v2
 ; P8BE: lfdux
@@ -1780,8 +1780,8 @@
 ; P9LE: lfd
 ; P9LE: lfd
 ; P9LE: xxmrghd
-; P9LE: xxmrghd
 ; P9LE: xvcvdpsxws
+; P9LE: xxmrghd
 ; P9LE: xvcvdpsxws
 ; P9LE: vmrgew v2
 ; P8BE: lfdux
@@ -2376,15 +2376,15 @@
 ; P9LE-LABEL: fromRegsConvftoui
 ; P8BE-LABEL: fromRegsConvftoui
 ; P8LE-LABEL: fromRegsConvftoui
-; P9BE-DAG: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs1, vs3
-; P9BE-DAG: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs2, vs4
-; P9BE-DAG: xvcvdpuxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
-; P9BE-DAG: xvcvdpuxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
+; P9BE: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs2, vs4
+; P9BE: xvcvdpuxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
+; P9BE: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs1, vs3
+; P9BE: xvcvdpuxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
 ; P9BE: vmrgew v2, [[REG3]], [[REG4]]
-; P9LE-DAG: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs3, vs1
-; P9LE-DAG: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs4, vs2
-; P9LE-DAG: xvcvdpuxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
-; P9LE-DAG: xvcvdpuxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
+; P9LE: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs3, vs1
+; P9LE: xvcvdpuxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
+; P9LE: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs4, vs2
+; P9LE: xvcvdpuxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
 ; P9LE: vmrgew v2, [[REG4]], [[REG3]]
 ; P8BE-DAG: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs1, vs3
 ; P8BE-DAG: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs2, vs4
@@ -2648,15 +2648,15 @@
 ; P9LE-LABEL: fromRegsConvdtoui
 ; P8BE-LABEL: fromRegsConvdtoui
 ; P8LE-LABEL: fromRegsConvdtoui
-; P9BE-DAG: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs1, vs3
-; P9BE-DAG: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs2, vs4
-; P9BE-DAG: xvcvdpuxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
-; P9BE-DAG: xvcvdpuxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
+; P9BE: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs2, vs4
+; P9BE: xvcvdpuxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
+; P9BE: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs1, vs3
+; P9BE: xvcvdpuxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
 ; P9BE: vmrgew v2, [[REG3]], [[REG4]]
-; P9LE-DAG: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs3, vs1
-; P9LE-DAG: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs4, vs2
-; P9LE-DAG: xvcvdpuxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
-; P9LE-DAG: xvcvdpuxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
+; P9LE: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs3, vs1
+; P9LE: xvcvdpuxws [[REG3:v[0-9]+]], {{[vs]+}}[[REG1]]
+; P9LE: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs4, vs2
+; P9LE: xvcvdpuxws [[REG4:v[0-9]+]], {{[vs]+}}[[REG2]]
 ; P9LE: vmrgew v2, [[REG4]], [[REG3]]
 ; P8BE-DAG: xxmrghd {{[vs]+}}[[REG1:[0-9]+]], vs1, vs3
 ; P8BE-DAG: xxmrghd {{[vs]+}}[[REG2:[0-9]+]], vs2, vs4
@@ -2774,8 +2774,8 @@
 ; P9LE: lfd
 ; P9LE: lfd
 ; P9LE: xxmrghd
-; P9LE: xxmrghd
 ; P9LE: xvcvdpuxws
+; P9LE: xxmrghd
 ; P9LE: xvcvdpuxws
 ; P9LE: vmrgew v2
 ; P8BE: lfdx
@@ -2843,8 +2843,8 @@
 ; P9LE: lfd
 ; P9LE: lfd
 ; P9LE: xxmrghd
-; P9LE: xxmrghd
 ; P9LE: xvcvdpuxws
+; P9LE: xxmrghd
 ; P9LE: xvcvdpuxws
 ; P9LE: vmrgew v2
 ; P8BE: lfdux
@@ -2912,8 +2912,8 @@
 ; P9LE: lfd
 ; P9LE: lfd
 ; P9LE: xxmrghd
-; P9LE: xxmrghd
 ; P9LE: xvcvdpuxws
+; P9LE: xxmrghd
 ; P9LE: xvcvdpuxws
 ; P9LE: vmrgew v2
 ; P8BE: lfdux
diff --git a/test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll b/test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll
index dbfd817..e3ffc0f 100644
--- a/test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll
+++ b/test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll
@@ -112,8 +112,8 @@
   %2 = call fp128 @llvm.ppc.scalar.insert.exp.qp(fp128 %0, i64 %1)
   ret fp128 %2
 ; CHECK-LABEL: insert_exp_qp
-; CHECK: mtvsrd [[FPREG:f[0-9]+]], r3
-; CHECK: lxvx [[VECREG:v[0-9]+]]
+; CHECK-DAG: mtvsrd [[FPREG:f[0-9]+]], r3
+; CHECK-DAG: lxvx [[VECREG:v[0-9]+]]
 ; CHECK: xsiexpqp v2, [[VECREG]], [[FPREG]]
 ; CHECK: blr
 }
diff --git a/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir b/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
index e210ec5..eb058bc 100644
--- a/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
+++ b/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
@@ -1,5 +1,5 @@
-# RUN: llc -run-pass ppc-mi-peepholes -ppc-convert-rr-to-ri %s -o - | FileCheck %s
-# RUN: llc -start-after ppc-mi-peepholes -ppc-late-peephole %s -o - | FileCheck %s --check-prefix=CHECK-LATE
+# RUN: llc -run-pass ppc-mi-peepholes -ppc-convert-rr-to-ri %s -o - -verify-machineinstrs | FileCheck %s
+# RUN: llc -start-after ppc-mi-peepholes -ppc-late-peephole %s -o - -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-LATE
 
 --- |
   ; ModuleID = 'convert-rr-to-ri-instrs.ll'
diff --git a/test/CodeGen/PowerPC/crsave.ll b/test/CodeGen/PowerPC/crsave.ll
index d7be2fd..980749a 100644
--- a/test/CodeGen/PowerPC/crsave.ll
+++ b/test/CodeGen/PowerPC/crsave.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -disable-fp-elim -mtriple=powerpc-unknown-linux-gnu -mcpu=g5 < %s | FileCheck %s -check-prefix=PPC32
+; RUN: llc -O0 -frame-pointer=all -mtriple=powerpc-unknown-linux-gnu -mcpu=g5 < %s | FileCheck %s -check-prefix=PPC32
 ; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu -mcpu=g5 < %s | FileCheck %s -check-prefix=PPC64
 ; RUN: llc -O0 -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs < %s | FileCheck %s -check-prefix=PPC64-ELFv2
 
diff --git a/test/CodeGen/PowerPC/direct-move-profit.ll b/test/CodeGen/PowerPC/direct-move-profit.ll
index 7205d11..2e2da7c 100644
--- a/test/CodeGen/PowerPC/direct-move-profit.ll
+++ b/test/CodeGen/PowerPC/direct-move-profit.ll
@@ -74,7 +74,7 @@
 
 }
 
-!0 = !{!"clang version 3.9.0 (http://llvm.org/git/clang.git b88a395e7ba26c0fb96cd99a2a004d76f4f41d0c) (http://llvm.org/git/llvm.git 1ac3fbac0f5b037c17c0b0f9d271c32c4d7ca1b5)"}
+!0 = !{!"clang version 3.9.0"}
 !1 = !{!2, !2, i64 0}
 !2 = !{!"int", !3, i64 0}
 !3 = !{!"omnipotent char", !4, i64 0}
diff --git a/test/CodeGen/PowerPC/empty-functions.ll b/test/CodeGen/PowerPC/empty-functions.ll
index 4f36c49..f3238ba 100644
--- a/test/CodeGen/PowerPC/empty-functions.ll
+++ b/test/CodeGen/PowerPC/empty-functions.ll
@@ -1,5 +1,5 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-linux-gnu | FileCheck -check-prefix=LINUX-NO-FP %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-linux-gnu -disable-fp-elim | FileCheck -check-prefix=LINUX-FP %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-linux-gnu -frame-pointer=all | FileCheck -check-prefix=LINUX-FP %s
 
 define void @func() {
 entry:
diff --git a/test/CodeGen/PowerPC/f128-aggregates.ll b/test/CodeGen/PowerPC/f128-aggregates.ll
index d671c76..9d16103 100644
--- a/test/CodeGen/PowerPC/f128-aggregates.ll
+++ b/test/CodeGen/PowerPC/f128-aggregates.ll
@@ -82,7 +82,6 @@
                             align 16 %a) {
 ; CHECK-LABEL: testStruct_03:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    lxv v2, 128(r1)
 ; CHECK-NEXT:    std r10, 88(r1)
 ; CHECK-NEXT:    std r9, 80(r1)
 ; CHECK-NEXT:    std r8, 72(r1)
@@ -91,11 +90,11 @@
 ; CHECK-NEXT:    std r5, 48(r1)
 ; CHECK-NEXT:    std r4, 40(r1)
 ; CHECK-NEXT:    std r3, 32(r1)
+; CHECK-NEXT:    lxv v2, 128(r1)
 ; CHECK-NEXT:    blr
 
 ; CHECK-BE-LABEL: testStruct_03:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv v2, 144(r1)
 ; CHECK-BE-NEXT:    std r10, 104(r1)
 ; CHECK-BE-NEXT:    std r9, 96(r1)
 ; CHECK-BE-NEXT:    std r8, 88(r1)
@@ -104,6 +103,7 @@
 ; CHECK-BE-NEXT:    std r5, 64(r1)
 ; CHECK-BE-NEXT:    std r4, 56(r1)
 ; CHECK-BE-NEXT:    std r3, 48(r1)
+; CHECK-BE-NEXT:    lxv v2, 144(r1)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a7 = getelementptr inbounds %struct.With9fp128params,
@@ -228,12 +228,12 @@
 define fp128 @testMixedAggregate_03([4 x i128] %sa.coerce) {
 ; CHECK-LABEL: testMixedAggregate_03:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-DAG:     mtvsrwa v2, r3
-; CHECK-DAG:     mtvsrdd v3, r6, r5
-; CHECK:         mtvsrd v4, r10
+; CHECK:         mtvsrwa v2, r3
 ; CHECK:         xscvsdqp v2, v2
-; CHECK-DAG:     xscvsdqp v[[REG:[0-9]+]], v4
-; CHECK-DAG:     xsaddqp v2, v3, v2
+; CHECK:         mtvsrdd v3, r6, r5
+; CHECK:         xsaddqp v2, v3, v2
+; CHECK:         mtvsrd v[[REG1:[0-9]+]], r10
+; CHECK:         xscvsdqp v[[REG:[0-9]+]], v[[REG1]]
 ; CHECK:         xsaddqp v2, v2, v[[REG]]
 ; CHECK-NEXT:    blr
 entry:
@@ -260,11 +260,11 @@
 ; CHECK-NEXT:    std r7, 64(r1)
 ; CHECK-NEXT:    std r10, 88(r1)
 ; CHECK-NEXT:    std r9, 80(r1)
-; CHECK-NEXT:    lxv v2, 64(r1)
 ; CHECK-NEXT:    std r6, 56(r1)
 ; CHECK-NEXT:    std r5, 48(r1)
 ; CHECK-NEXT:    std r4, 40(r1)
 ; CHECK-NEXT:    std r3, 32(r1)
+; CHECK-NEXT:    lxv v2, 64(r1)
 ; CHECK-NEXT:    blr
 
 ; CHECK-BE-LABEL: testNestedAggregate:
@@ -273,11 +273,11 @@
 ; CHECK-BE-NEXT:    std r7, 80(r1)
 ; CHECK-BE-NEXT:    std r10, 104(r1)
 ; CHECK-BE-NEXT:    std r9, 96(r1)
-; CHECK-BE-NEXT:    lxv v2, 80(r1)
 ; CHECK-BE-NEXT:    std r6, 72(r1)
 ; CHECK-BE-NEXT:    std r5, 64(r1)
 ; CHECK-BE-NEXT:    std r4, 56(r1)
 ; CHECK-BE-NEXT:    std r3, 48(r1)
+; CHECK-BE-NEXT:    lxv v2, 80(r1)
 ; CHECK-BE-NEXT:    blr
 entry:
   %c = getelementptr inbounds %struct.MixedC, %struct.MixedC* %a, i64 0, i32 1, i32 1
@@ -337,25 +337,25 @@
 define fp128 @sum_float128(i32 signext %count, ...) {
 ; CHECK-LABEL: sum_float128:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addis r11, r2, .LCPI17_0@toc@ha
-; CHECK-NEXT:    cmpwi cr0, r3, 1
 ; CHECK-NEXT:    std r10, 88(r1)
 ; CHECK-NEXT:    std r9, 80(r1)
 ; CHECK-NEXT:    std r8, 72(r1)
 ; CHECK-NEXT:    std r7, 64(r1)
 ; CHECK-NEXT:    std r6, 56(r1)
-; CHECK-NEXT:    std r5, 48(r1)
+; CHECK-NEXT:    cmpwi cr0, r3, 1
 ; CHECK-NEXT:    std r4, 40(r1)
-; CHECK-NEXT:    addi r11, r11, .LCPI17_0@toc@l
-; CHECK-NEXT:    lxvx v2, 0, r11
+; CHECK-NEXT:    addis [[REG:r[0-9]+]], r2, .LCPI17_0@toc@ha
+; CHECK-NEXT:    addi [[REG1:r[0-9]+]], [[REG]], .LCPI17_0@toc@l
+; CHECK-NEXT:    lxvx v2, 0, [[REG1]]
+; CHECK-NEXT:    std r5, 48(r1)
 ; CHECK-NEXT:    bltlr cr0
 ; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    addi r3, r1, 40
 ; CHECK-NEXT:    lxvx v3, 0, r3
 ; CHECK-NEXT:    xsaddqp v2, v3, v2
+; CHECK-NEXT:    addi [[REG2:r[0-9]+]], r1, 72
+; CHECK-NEXT:    std [[REG2]], -8(r1)
 ; CHECK-NEXT:    lxv v3, 16(r3)
-; CHECK-NEXT:    addi r3, r1, 72
-; CHECK-NEXT:    std r3, -8(r1)
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    blr
 entry:
diff --git a/test/CodeGen/PowerPC/f128-arith.ll b/test/CodeGen/PowerPC/f128-arith.ll
index 955d59d..fde472d 100644
--- a/test/CodeGen/PowerPC/f128-arith.ll
+++ b/test/CodeGen/PowerPC/f128-arith.ll
@@ -283,7 +283,7 @@
                      fp128* nocapture %res) {
 ; CHECK-LABEL: qp_powi:
 ; CHECK:         lxv v2, 0(r3)
-; CHECK:         lwz r3, 0(r4)
+; CHECK:         lwz r5, 0(r4)
 ; CHECK:         bl __powikf2
 ; CHECK:         blr
 entry:
diff --git a/test/CodeGen/PowerPC/f128-conv.ll b/test/CodeGen/PowerPC/f128-conv.ll
index ef433bd..4c64341 100644
--- a/test/CodeGen/PowerPC/f128-conv.ll
+++ b/test/CodeGen/PowerPC/f128-conv.ll
@@ -444,10 +444,10 @@
 ; CHECK-LABEL: qpConv2dp_03:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, .LC7@toc@ha
-; CHECK-NEXT:    sldi r4, r4, 3
 ; CHECK-NEXT:    ld r5, .LC7@toc@l(r5)
 ; CHECK-NEXT:    lxvx v2, 0, r5
 ; CHECK-NEXT:    xscvqpdp v2, v2
+; CHECK-NEXT:    sldi r4, r4, 3
 ; CHECK-NEXT:    stxsdx v2, r3, r4
 ; CHECK-NEXT:    blr
 entry:
@@ -517,11 +517,11 @@
 ; CHECK-LABEL: qpConv2sp_03:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, .LC7@toc@ha
-; CHECK-NEXT:    sldi r4, r4, 2
 ; CHECK-NEXT:    ld r5, .LC7@toc@l(r5)
 ; CHECK-NEXT:    lxv v2, 48(r5)
 ; CHECK-NEXT:    xscvqpdpo v2, v2
 ; CHECK-NEXT:    xsrsp f0, v2
+; CHECK-NEXT:    sldi r4, r4, 2
 ; CHECK-NEXT:    stfsx f0, r3, r4
 ; CHECK-NEXT:    blr
 entry:
@@ -609,8 +609,8 @@
 ; CHECK-LABEL: dpConv2qp_03:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscpsgndp v2, f1, f1
-; CHECK-NEXT:    sldi r4, r4, 4
-; CHECK-NEXT:    xscvdpqp v2, v2
+; CHECK-DAG:     sldi r4, r4, 4
+; CHECK-DAG:     xscvdpqp v2, v2
 ; CHECK-NEXT:    stxvx v2, r3, r4
 ; CHECK-NEXT:    blr
 entry:
@@ -689,8 +689,8 @@
 ; CHECK-LABEL: spConv2qp_03:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscpsgndp v2, f1, f1
-; CHECK-NEXT:    sldi r4, r4, 4
-; CHECK-NEXT:    xscvdpqp v2, v2
+; CHECK-DAG:     sldi r4, r4, 4
+; CHECK-DAG:     xscvdpqp v2, v2
 ; CHECK-NEXT:    stxvx v2, r3, r4
 ; CHECK-NEXT:    blr
 entry:
diff --git a/test/CodeGen/PowerPC/f128-passByValue.ll b/test/CodeGen/PowerPC/f128-passByValue.ll
index 467e555..7d400b8 100644
--- a/test/CodeGen/PowerPC/f128-passByValue.ll
+++ b/test/CodeGen/PowerPC/f128-passByValue.ll
@@ -63,8 +63,8 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sldi r4, r4, 4
 ; CHECK-NEXT:    lxv v2, 0(r3)
-; CHECK-NEXT:    add r4, r3, r4
-; CHECK-NEXT:    lxv v3, -16(r4)
+; CHECK-NEXT:    add [[REG:r[0-9]+]], r3, r4
+; CHECK-NEXT:    lxv v3, -16([[REG]])
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    blr
                          i32 signext %loopcnt, fp128* nocapture readnone %sum) {
@@ -85,7 +85,6 @@
 ; CHECK-LABEL: maxVecParam:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
-; CHECK-NEXT:    lxv v[[REG0:[0-9]+]], 224(r1)
 ; CHECK-NEXT:    xsaddqp v2, v2, v4
 ; CHECK-NEXT:    xsaddqp v2, v2, v5
 ; CHECK-NEXT:    xsaddqp v2, v2, v6
@@ -96,6 +95,7 @@
 ; CHECK-NEXT:    xsaddqp v2, v2, v11
 ; CHECK-NEXT:    xsaddqp v2, v2, v12
 ; CHECK-NEXT:    xsaddqp v2, v2, v13
+; CHECK-NEXT:    lxv v[[REG0:[0-9]+]], 224(r1)
 ; CHECK-NEXT:    xssubqp v2, v2, v[[REG0]]
 ; CHECK-NEXT:    blr
                           fp128 %p6, fp128 %p7, fp128 %p8, fp128 %p9, fp128 %p10,
@@ -121,9 +121,9 @@
 define fp128 @mixParam_01(fp128 %a, i32 signext %i, fp128 %b) {
 ; CHECK-LABEL: mixParam_01:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    mtvsrwa v4, r5
-; CHECK-NEXT:    xsaddqp v2, v2, v3
-; CHECK-NEXT:    xscvsdqp v[[REG0:[0-9]+]], v4
+; CHECK-DAG:     mtvsrwa [[REG1:v[0-9]+]], r5
+; CHECK-DAG:     xsaddqp v2, v2, v3
+; CHECK-NEXT:    xscvsdqp v[[REG0:[0-9]+]], [[REG1]]
 ; CHECK-NEXT:    xsaddqp v2, v2, v[[REG0]]
 ; CHECK-NEXT:    blr
 entry:
@@ -136,8 +136,8 @@
 define fastcc fp128 @mixParam_01f(fp128 %a, i32 signext %i, fp128 %b) {
 ; CHECK-LABEL: mixParam_01f:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    mtvsrwa v[[REG0:[0-9]+]], r3
-; CHECK-NEXT:    xsaddqp v2, v2, v3
+; CHECK-DAG:     mtvsrwa v[[REG0:[0-9]+]], r3
+; CHECK-DAG:     xsaddqp v2, v2, v3
 ; CHECK-NEXT:    xscvsdqp v[[REG1:[0-9]+]], v[[REG0]]
 ; CHECK-NEXT:    xsaddqp v2, v2, v[[REG1]]
 ; CHECK-NEXT:    blr
@@ -152,17 +152,17 @@
 define fp128 @mixParam_02(fp128 %p1, double %p2, i64* nocapture %p3,
 ; CHECK-LABEL: mixParam_02:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-DAG:     lwz r3, 96(r1)
+; CHECK:         lwz r3, 96(r1)
 ; CHECK:         add r4, r7, r9
-; CHECK-NEXT:    xscpsgndp v[[REG0:[0-9]+]], f1, f1
-; CHECK-DAG:     add r4, r4, r10
+; CHECK:         add r4, r4, r10
+; CHECK:         add r3, r4, r3
+; CHECK:         clrldi r3, r3, 32
+; CHECK:         std r3, 0(r6)
+; CHECK:         lxv v[[REG1:[0-9]+]], 0(r8)
+; CHECK:         xscpsgndp v[[REG0:[0-9]+]], f1, f1
 ; CHECK:         xscvdpqp v[[REG0]], v[[REG0]]
-; CHECK-NEXT:    add r3, r4, r3
-; CHECK-NEXT:    clrldi r3, r3, 32
-; CHECK-NEXT:    std r3, 0(r6)
-; CHECK-NEXT:    lxv v[[REG1:[0-9]+]], 0(r8)
-; CHECK-NEXT:    xsaddqp v2, v[[REG1]], v2
-; CHECK-NEXT:    xsaddqp v2, v2, v3
+; CHECK:         xsaddqp v2, v[[REG1]], v2
+; CHECK:         xsaddqp v2, v2, v3
 ; CHECK-NEXT:    blr
                           i16 signext %p4, fp128* nocapture readonly %p5,
                           i32 signext %p6, i8 zeroext %p7, i32 zeroext %p8) {
@@ -186,13 +186,13 @@
 ; CHECK-LABEL: mixParam_02f:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    add r4, r4, r6
-; CHECK-NEXT:    xscpsgndp v[[REG0:[0-9]+]], f1, f1
 ; CHECK-NEXT:    add r4, r4, r7
-; CHECK-NEXT:    xscvdpqp v[[REG0]], v[[REG0]]
 ; CHECK-NEXT:    add r4, r4, r8
 ; CHECK-NEXT:    clrldi r4, r4, 32
-; CHECK-NEXT:    std r4, 0(r3)
-; CHECK-NEXT:    lxv v[[REG1:[0-9]+]], 0(r5)
+; CHECK-DAG:     std r4, 0(r3)
+; CHECK-DAG:     lxv v[[REG1:[0-9]+]], 0(r5)
+; CHECK-NEXT:    xscpsgndp v[[REG0:[0-9]+]], f1, f1
+; CHECK-NEXT:    xscvdpqp v[[REG0]], v[[REG0]]
 ; CHECK-NEXT:    xsaddqp v2, v[[REG1]], v2
 ; CHECK-NEXT:    xsaddqp v2, v2, v[[REG0]] 
 ; CHECK-NEXT:    blr
@@ -219,11 +219,11 @@
 ; CHECK-LABEL: mixParam_03:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-DAG:     ld r3, 104(r1)
-; CHECK-DAG:     mtvsrwa v[[REG2:[0-9]+]], r10
 ; CHECK-DAG:     stxv v2, 0(r9)
-; CHECK-DAG:     xscvsdqp v[[REG1:[0-9]+]], v[[REG2]]
 ; CHECK:         stxvx v3, 0, r3
-; CHECK-NEXT:    lxv v2, 0(r9)
+; CHECK:         mtvsrwa v[[REG2:[0-9]+]], r10
+; CHECK-DAG:     xscvsdqp v[[REG1:[0-9]+]], v[[REG2]]
+; CHECK-DAG:     lxv v2, 0(r9)
 ; CHECK-NEXT:    xsaddqp v2, v2, v[[REG1]]
 ; CHECK-NEXT:    xscvqpdp v2, v2
 ; CHECK-NEXT:    stxsd v2, 0(r5)
@@ -245,10 +245,10 @@
 define fastcc void @mixParam_03f(fp128 %f1, double* nocapture %d1, <4 x i32> %vec1,
 ; CHECK-LABEL: mixParam_03f:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    mtvsrwa v[[REG0:[0-9]+]], r5
-; CHECK-NEXT:    stxv v[[REG1:[0-9]+]], 0(r4)
-; CHECK-NEXT:    stxv v[[REG2:[0-9]+]], 0(r7)
-; CHECK-NEXT:    lxv v[[REG1]], 0(r4)
+; CHECK-DAG:     mtvsrwa v[[REG0:[0-9]+]], r5
+; CHECK-DAG:     stxv v[[REG1:[0-9]+]], 0(r4)
+; CHECK-DAG:     stxv v[[REG2:[0-9]+]], 0(r7)
+; CHECK-DAG:     lxv v[[REG1]], 0(r4)
 ; CHECK-NEXT:    xscvsdqp v[[REG3:[0-9]+]], v[[REG0]]
 ; CHECK-NEXT:    xsaddqp v[[REG4:[0-9]+]], v[[REG1]], v[[REG3]]
 ; CHECK-NEXT:    xscvqpdp v2, v[[REG4]]
diff --git a/test/CodeGen/PowerPC/f128-truncateNconv.ll b/test/CodeGen/PowerPC/f128-truncateNconv.ll
index d346683..ebded9d 100644
--- a/test/CodeGen/PowerPC/f128-truncateNconv.ll
+++ b/test/CodeGen/PowerPC/f128-truncateNconv.ll
@@ -53,10 +53,10 @@
   ret i64 %conv
 
 ; CHECK-LABEL: qpConv2sdw_03
+; CHECK: lxv v[[REG:[0-9]+]], 0(r3)
 ; CHECK: addis r[[REG0:[0-9]+]], r2, .LC0@toc@ha
-; CHECK-DAG: ld r[[REG0]], .LC0@toc@l(r[[REG0]])
-; CHECK-DAG: lxv v[[REG1:[0-9]+]], 16(r[[REG0]])
-; CHECK-DAG: lxv v[[REG:[0-9]+]], 0(r3)
+; CHECK: ld r[[REG0]], .LC0@toc@l(r[[REG0]])
+; CHECK: lxv v[[REG1:[0-9]+]], 16(r[[REG0]])
 ; CHECK: xsaddqp v[[REG]], v[[REG]], v[[REG1]]
 ; CHECK-NEXT: xscvqpsdz v[[CONV:[0-9]+]], v[[REG]]
 ; CHECK-NEXT: mfvsrd r3, v[[CONV]]
@@ -97,7 +97,7 @@
 
 ; CHECK-LABEL: qpConv2sdw_testXForm
 ; CHECK: xscvqpsdz v[[CONV:[0-9]+]],
-; CHECK-NEXT: stxsdx v[[CONV]], r3, r4
+; CHECK: stxsdx v[[CONV]], r3, r4
 ; CHECK-NEXT: blr
 }
 
@@ -146,10 +146,10 @@
   ret i64 %conv
 
 ; CHECK-LABEL: qpConv2udw_03
+; CHECK: lxv v[[REG:[0-9]+]], 0(r3)
 ; CHECK: addis r[[REG0:[0-9]+]], r2, .LC0@toc@ha
 ; CHECK-DAG: ld r[[REG0]], .LC0@toc@l(r[[REG0]])
 ; CHECK-DAG: lxv v[[REG1:[0-9]+]], 16(r[[REG0]])
-; CHECK-DAG: lxv v[[REG:[0-9]+]], 0(r3)
 ; CHECK: xsaddqp v[[REG]], v[[REG]], v[[REG1]]
 ; CHECK-NEXT: xscvqpudz v[[CONV:[0-9]+]], v[[REG]]
 ; CHECK-NEXT: mfvsrd r3, v[[CONV]]
@@ -190,7 +190,7 @@
 
 ; CHECK-LABEL: qpConv2udw_testXForm
 ; CHECK: xscvqpudz v[[CONV:[0-9]+]],
-; CHECK-NEXT: stxsdx v[[CONV]], r3, r4
+; CHECK: stxsdx v[[CONV]], r3, r4
 ; CHECK-NEXT: blr
 }
 
@@ -240,10 +240,10 @@
   ret i32 %conv
 
 ; CHECK-LABEL: qpConv2sw_03
+; CHECK: lxv v[[REG:[0-9]+]], 0(r3)
 ; CHECK: addis r[[REG0:[0-9]+]], r2, .LC0@toc@ha
 ; CHECK-DAG: ld r[[REG0]], .LC0@toc@l(r[[REG0]])
 ; CHECK-DAG: lxv v[[REG1:[0-9]+]], 16(r[[REG0]])
-; CHECK-DAG: lxv v[[REG:[0-9]+]], 0(r3)
 ; CHECK-NEXT: xsaddqp v[[ADD:[0-9]+]], v[[REG]], v[[REG1]]
 ; CHECK-NEXT: xscvqpswz v[[CONV:[0-9]+]], v[[ADD]]
 ; CHECK-NEXT: mfvsrwz r[[REG2:[0-9]+]], v[[CONV]]
@@ -316,10 +316,10 @@
   ret i32 %conv
 
 ; CHECK-LABEL: qpConv2uw_03
+; CHECK: lxv v[[REG:[0-9]+]], 0(r3)
 ; CHECK: addis r[[REG0:[0-9]+]], r2, .LC0@toc@ha
 ; CHECK-DAG: ld r[[REG0]], .LC0@toc@l(r[[REG0]])
 ; CHECK-DAG: lxv v[[REG1:[0-9]+]], 16(r[[REG0]])
-; CHECK-DAG: lxv v[[REG:[0-9]+]], 0(r3)
 ; CHECK-NEXT: xsaddqp v[[ADD:[0-9]+]], v[[REG]], v[[REG1]]
 ; CHECK-NEXT: xscvqpuwz v[[CONV:[0-9]+]], v[[ADD]]
 ; CHECK-NEXT: mfvsrwz r3, v[[CONV]]
@@ -355,7 +355,7 @@
 ; CHECK-NEXT:    lxv v2, 0(r3)
 ; CHECK-NEXT:    xscvqpswz v2, v2
 ; CHECK-NEXT:    mfvsrwz r3, v2
-; CHECK-NEXT:    extsh r3, r3
+; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
@@ -386,14 +386,14 @@
 define signext i16 @qpConv2shw_03(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qpConv2shw_03:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addis r4, r2, .LC0@toc@ha
 ; CHECK-NEXT:    lxv v2, 0(r3)
-; CHECK-NEXT:    ld r4, .LC0@toc@l(r4)
-; CHECK-NEXT:    lxv v3, 16(r4)
+; CHECK-NEXT:    addis [[REG:r[0-9]+]], r2, .LC0@toc@ha
+; CHECK-NEXT:    ld [[REG1:r[0-9]+]], .LC0@toc@l([[REG]])
+; CHECK-NEXT:    lxv v3, 16([[REG1]])
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    xscvqpswz v2, v2
 ; CHECK-NEXT:    mfvsrwz r3, v2
-; CHECK-NEXT:    extsh r3, r3
+; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
@@ -463,10 +463,10 @@
 define zeroext i16 @qpConv2uhw_03(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qpConv2uhw_03:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addis r4, r2, .LC0@toc@ha
 ; CHECK-NEXT:    lxv v2, 0(r3)
-; CHECK-NEXT:    ld r4, .LC0@toc@l(r4)
-; CHECK-NEXT:    lxv v3, 16(r4)
+; CHECK-NEXT:    addis [[REG:r[0-9]+]], r2, .LC0@toc@ha
+; CHECK-NEXT:    ld [[REG1:r[0-9]+]], .LC0@toc@l([[REG]])
+; CHECK-NEXT:    lxv v3, 16([[REG1]])
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    xscvqpswz v2, v2
 ; CHECK-NEXT:    mfvsrwz r3, v2
@@ -509,7 +509,7 @@
 ; CHECK-NEXT:    lxv v2, 0(r3)
 ; CHECK-NEXT:    xscvqpswz v2, v2
 ; CHECK-NEXT:    mfvsrwz r3, v2
-; CHECK-NEXT:    extsb r3, r3
+; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
@@ -540,14 +540,14 @@
 define signext i8 @qpConv2sb_03(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qpConv2sb_03:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addis r4, r2, .LC0@toc@ha
 ; CHECK-NEXT:    lxv v2, 0(r3)
-; CHECK-NEXT:    ld r4, .LC0@toc@l(r4)
-; CHECK-NEXT:    lxv v3, 16(r4)
+; CHECK-NEXT:    addis [[REG:r[0-9]+]], r2, .LC0@toc@ha
+; CHECK-NEXT:    ld [[REG1:r[0-9]+]], .LC0@toc@l([[REG]])
+; CHECK-NEXT:    lxv v3, 16([[REG1]])
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    xscvqpswz v2, v2
 ; CHECK-NEXT:    mfvsrwz r3, v2
-; CHECK-NEXT:    extsb r3, r3
+; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
   %0 = load fp128, fp128* %a, align 16
@@ -617,10 +617,10 @@
 define zeroext i8 @qpConv2ub_03(fp128* nocapture readonly %a) {
 ; CHECK-LABEL: qpConv2ub_03:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addis r4, r2, .LC0@toc@ha
 ; CHECK-NEXT:    lxv v2, 0(r3)
-; CHECK-NEXT:    ld r4, .LC0@toc@l(r4)
-; CHECK-NEXT:    lxv v3, 16(r4)
+; CHECK-NEXT:    addis [[REG:r[0-9]+]], r2, .LC0@toc@ha
+; CHECK-NEXT:    ld [[REG1:r[0-9]+]], .LC0@toc@l([[REG]])
+; CHECK-NEXT:    lxv v3, 16([[REG1]])
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
 ; CHECK-NEXT:    xscvqpswz v2, v2
 ; CHECK-NEXT:    mfvsrwz r3, v2
diff --git a/test/CodeGen/PowerPC/fast-isel-fcmp-nan.ll b/test/CodeGen/PowerPC/fast-isel-fcmp-nan.ll
index f060395..84d4614 100644
--- a/test/CodeGen/PowerPC/fast-isel-fcmp-nan.ll
+++ b/test/CodeGen/PowerPC/fast-isel-fcmp-nan.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple powerpc64le-unknown-linux-gnu -fast-isel -O0 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple powerpc64le-unknown-linux-gnu -fast-isel -O0 < %s | FileCheck %s
 
 define i1 @TestULT(double %t0) {
 ; CHECK-LABEL: TestULT:
@@ -17,7 +17,7 @@
 
 define i1 @TestULE(double %t0) {
 ; CHECK-LABEL: TestULE:
-; CHECK: fcmpu
+; CHECK: xscmpudp
 ; CHECK-NEXT: ble
 ; CHECK: blr
 entry:
@@ -33,7 +33,7 @@
 
 define i1 @TestUNE(double %t0) {
 ; CHECK-LABEL: TestUNE:
-; CHECK: fcmpu
+; CHECK: xscmpudp
 ; CHECK-NEXT: bne
 ; CHECK: blr
 entry:
@@ -79,7 +79,7 @@
 
 define i1 @TestUGE(double %t0) {
 ; CHECK-LABEL: TestUGE:
-; CHECK: fcmpu
+; CHECK: xscmpudp
 ; CHECK-NEXT: bge
 ; CHECK: blr
 entry:
@@ -95,7 +95,7 @@
 
 define i1 @TestOLT(double %t0) {
 ; CHECK-LABEL: TestOLT:
-; CHECK: fcmpu
+; CHECK: xscmpudp
 ; CHECK-NEXT: blt
 ; CHECK: blr
 entry:
@@ -141,7 +141,7 @@
 
 define i1 @TestOEQ(double %t0) {
 ; CHECK-LABEL: TestOEQ:
-; CHECK: fcmpu
+; CHECK: xscmpudp
 ; CHECK-NEXT: beq
 ; CHECK: blr
 entry:
@@ -157,7 +157,7 @@
 
 define i1 @TestOGT(double %t0) {
 ; CHECK-LABEL: TestOGT:
-; CHECK: fcmpu
+; CHECK: xscmpudp
 ; CHECK-NEXT: bgt
 ; CHECK: blr
 entry:
diff --git a/test/CodeGen/PowerPC/memcmpIR.ll b/test/CodeGen/PowerPC/memcmpIR.ll
index 9888519..4c0de72 100644
--- a/test/CodeGen/PowerPC/memcmpIR.ll
+++ b/test/CodeGen/PowerPC/memcmpIR.ll
@@ -17,10 +17,14 @@
   ; CHECK-NEXT: br label %endblock
 
   ; CHECK-LABEL: loadbb1:{{.*}}
-  ; CHECK: [[GEP1:%[0-9]+]] = getelementptr i64, i64* {{.*}}, i64 1
-  ; CHECK-NEXT: [[GEP2:%[0-9]+]] = getelementptr i64, i64* {{.*}}, i64 1
-  ; CHECK-NEXT: [[LOAD1:%[0-9]+]] = load i64, i64* [[GEP1]]
-  ; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64* [[GEP2]]
+  ; CHECK: [[BCC1:%[0-9]+]] = bitcast i32* {{.*}} to i8*
+  ; CHECK-NEXT: [[BCC2:%[0-9]+]] = bitcast i32* {{.*}} to i8*
+  ; CHECK-NEXT: [[GEP1:%[0-9]+]] = getelementptr i8, i8* [[BCC2]], i8 8
+  ; CHECK-NEXT: [[BCL1:%[0-9]+]] = bitcast i8* [[GEP1]] to i64*
+  ; CHECK-NEXT: [[GEP2:%[0-9]+]] = getelementptr i8, i8* [[BCC1]], i8 8
+  ; CHECK-NEXT: [[BCL2:%[0-9]+]] = bitcast i8* [[GEP2]] to i64*
+  ; CHECK-NEXT: [[LOAD1:%[0-9]+]] = load i64, i64* [[BCL1]]
+  ; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64* [[BCL2]]
   ; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD1]])
   ; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD2]])
   ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[BSWAP1]], [[BSWAP2]]
@@ -38,10 +42,14 @@
   ; CHECK-BE-NEXT: br label %endblock
 
   ; CHECK-BE-LABEL: loadbb1:{{.*}}
-  ; CHECK-BE: [[GEP1:%[0-9]+]] = getelementptr i64, i64* {{.*}}, i64 1
-  ; CHECK-BE-NEXT: [[GEP2:%[0-9]+]] = getelementptr i64, i64* {{.*}}, i64 1
-  ; CHECK-BE-NEXT: [[LOAD1:%[0-9]+]] = load i64, i64* [[GEP1]]
-  ; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64* [[GEP2]]
+  ; CHECK-BE: [[BCC1:%[0-9]+]] = bitcast i32* {{.*}} to i8*
+  ; CHECK-BE-NEXT: [[BCC2:%[0-9]+]] = bitcast i32* {{.*}} to i8*
+  ; CHECK-BE-NEXT: [[GEP1:%[0-9]+]] = getelementptr i8, i8* [[BCC2]], i8 8
+  ; CHECK-BE-NEXT: [[BCL1:%[0-9]+]] = bitcast i8* [[GEP1]] to i64*
+  ; CHECK-BE-NEXT: [[GEP2:%[0-9]+]] = getelementptr i8, i8* [[BCC1]], i8 8
+  ; CHECK-BE-NEXT: [[BCL2:%[0-9]+]] = bitcast i8* [[GEP2]] to i64*
+  ; CHECK-BE-NEXT: [[LOAD1:%[0-9]+]] = load i64, i64* [[BCL1]]
+  ; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64* [[BCL2]]
   ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
   ; CHECK-BE-NEXT:  br i1 [[ICMP]], label %endblock, label %res_block
 
diff --git a/test/CodeGen/PowerPC/mi-scheduling-lhs.ll b/test/CodeGen/PowerPC/mi-scheduling-lhs.ll
new file mode 100644
index 0000000..2e09cb1
--- /dev/null
+++ b/test/CodeGen/PowerPC/mi-scheduling-lhs.ll
@@ -0,0 +1,49 @@
+; RUN: llc -relocation-model=pic -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 <%s | FileCheck %s
+
+%struct.Record = type { %struct.Record*, i32 }
+
+@n = local_unnamed_addr global i32 500000000, align 4
+@m = common global %struct.Record zeroinitializer, align 8
+@a = hidden local_unnamed_addr global %struct.Record* @m, align 8
+@o = common global %struct.Record zeroinitializer, align 8
+@b = hidden local_unnamed_addr global %struct.Record* @o, align 8
+
+define signext i32 @foo() local_unnamed_addr {
+entry:
+  %0 = load i64, i64* bitcast (%struct.Record** @b to i64*), align 8
+  %1 = load i64*, i64** bitcast (%struct.Record** @a to i64**), align 8
+  store i64 %0, i64* %1, align 8
+  %2 = load i32, i32* @n, align 4
+  %cmp9 = icmp eq i32 %2, 0
+  br i1 %cmp9, label %for.end, label %for.body
+
+for.body:                                         ; preds = %entry, %for.body
+  %i.010 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+  %3 = load %struct.Record*, %struct.Record** @a, align 8
+  %IntComp = getelementptr inbounds %struct.Record, %struct.Record* %3, i64 0, i32 1
+  store i32 5, i32* %IntComp, align 8
+  %PtrComp2 = getelementptr inbounds %struct.Record, %struct.Record* %3, i64 0, i32 0
+  %4 = load %struct.Record*, %struct.Record** %PtrComp2, align 8
+  %IntComp3 = getelementptr inbounds %struct.Record, %struct.Record* %4, i64 0, i32 1
+  store i32 5, i32* %IntComp3, align 8
+  %PtrComp6 = getelementptr inbounds %struct.Record, %struct.Record* %4, i64 0, i32 0
+  store %struct.Record* %4, %struct.Record** %PtrComp6, align 8
+  %inc = add nuw i32 %i.010, 1
+  %cmp = icmp ult i32 %inc, %2
+  br i1 %cmp, label %for.body, label %for.end
+
+for.end:                                          ; preds = %for.body, %entry
+  ret i32 0
+
+; CHECK-LABEL: foo
+; CHECK: addis [[REG1:[0-9]+]], 2, a@toc@ha
+; CHECK: li [[REG4:[0-9]+]], 5
+; CHECK: [[LAB:[a-z0-9A-Z_.]+]]:
+; CHECK: ld [[REG2:[0-9]+]], a@toc@l([[REG1]])
+; CHECK: stw [[REG4]], 8([[REG2]])
+; CHECK: ld [[REG3:[0-9]+]], 0([[REG2]])
+; CHECK: stw [[REG4]], 8([[REG3]]) 
+; CHECK: std [[REG3]], 0([[REG3]])
+; CHECK: bdnz [[LAB]]
+}
+
diff --git a/test/CodeGen/PowerPC/mulld.ll b/test/CodeGen/PowerPC/mulld.ll
index e42286d..c867d9a 100644
--- a/test/CodeGen/PowerPC/mulld.ll
+++ b/test/CodeGen/PowerPC/mulld.ll
@@ -11,8 +11,8 @@
 ; CHECK-LABEL: bn_mul_comba8:
 ; CHECK:    mulhdu
 ; CHECK-NEXT:    mulld
-; CHECK-NEXT:    mulhdu
-; CHECK-NEXT:    mulld
+; CHECK:         mulhdu
+; CHECK:         mulld
 ; CHECK-NEXT:    mulhdu
 
 
diff --git a/test/CodeGen/PowerPC/ppc-prologue.ll b/test/CodeGen/PowerPC/ppc-prologue.ll
index 8675318..342728c 100644
--- a/test/CodeGen/PowerPC/ppc-prologue.ll
+++ b/test/CodeGen/PowerPC/ppc-prologue.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -disable-fp-elim | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -frame-pointer=all | FileCheck %s
 
 define i32 @_Z4funci(i32 %a) ssp {
 ; CHECK:       mflr 0
diff --git a/test/CodeGen/PowerPC/ppc64-P9-setb.ll b/test/CodeGen/PowerPC/ppc64-P9-setb.ll
new file mode 100644
index 0000000..d141e4a
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-P9-setb.ll
@@ -0,0 +1,1330 @@
+; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown \
+; RUN:   -enable-ppc-quad-precision -ppc-asm-full-reg-names < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown \
+; RUN:   -ppc-asm-full-reg-names < %s | FileCheck %s -check-prefix=CHECK-PWR8    \
+; RUN:   -implicit-check-not "\<setb\>"
+
+; Test different patterns with type i64
+
+; select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setne)), setlt
+define i64 @setb1(i64 %a, i64 %b) {
+  %t1 = icmp slt i64 %a, %b
+  %t2 = icmp ne i64 %a, %b
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb1:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: addic
+; CHECK-NOT: subfe
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb1
+; CHECK-PWR8-DAG: xor
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: addic
+; CHECK-PWR8-DAG: subfe
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setne)), setgt
+define i64 @setb2(i64 %a, i64 %b) {
+  %t1 = icmp sgt i64 %b, %a
+  %t2 = icmp ne i64 %a, %b
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb2:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: addic
+; CHECK-NOT: subfe
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb2
+; CHECK-PWR8-DAG: xor
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: addic
+; CHECK-PWR8-DAG: subfe
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setne)), setlt
+define i64 @setb3(i64 %a, i64 %b) {
+  %t1 = icmp slt i64 %a, %b
+  %t2 = icmp ne i64 %b, %a
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb3:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: addic
+; CHECK-NOT: subfe
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb3
+; CHECK-PWR8-DAG: xor
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: addic
+; CHECK-PWR8-DAG: subfe
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setne)), setgt
+define i64 @setb4(i64 %a, i64 %b) {
+  %t1 = icmp sgt i64 %b, %a
+  %t2 = icmp ne i64 %b, %a
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb4:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: addic
+; CHECK-NOT: subfe
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb4
+; CHECK-PWR8-DAG: xor
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: addic
+; CHECK-PWR8-DAG: subfe
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setgt)), setlt
+define i64 @setb5(i64 %a, i64 %b) {
+  %t1 = icmp slt i64 %a, %b
+  %t2 = icmp sgt i64 %a, %b
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb5:
+; CHECK-NOT: sradi
+; CHECK-NOT: rldicl
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfc
+; CHECK-NOT: adde
+; CHECK-NOT: xori
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb5
+; CHECK-PWR8-DAG: sradi
+; CHECK-PWR8-DAG: rldicl
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: subfc
+; CHECK-PWR8-DAG: adde
+; CHECK-PWR8-DAG: xori
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setgt)), setgt
+define i64 @setb6(i64 %a, i64 %b) {
+  %t1 = icmp sgt i64 %b, %a
+  %t2 = icmp sgt i64 %a, %b
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb6:
+; CHECK-NOT: sradi
+; CHECK-NOT: rldicl
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfc
+; CHECK-NOT: adde
+; CHECK-NOT: xori
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb6
+; CHECK-PWR8-DAG: sradi
+; CHECK-PWR8-DAG: rldicl
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: subfc
+; CHECK-PWR8-DAG: adde
+; CHECK-PWR8-DAG: xori
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setlt)), setlt
+define i64 @setb7(i64 %a, i64 %b) {
+  %t1 = icmp slt i64 %a, %b
+  %t2 = icmp slt i64 %b, %a
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb7:
+; CHECK-NOT: sradi
+; CHECK-NOT: rldicl
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfc
+; CHECK-NOT: adde
+; CHECK-NOT: xori
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb7
+; CHECK-PWR8-DAG: sradi
+; CHECK-PWR8-DAG: rldicl
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: subfc
+; CHECK-PWR8-DAG: adde
+; CHECK-PWR8-DAG: xori
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setlt)), setgt
+define i64 @setb8(i64 %a, i64 %b) {
+  %t1 = icmp sgt i64 %b, %a
+  %t2 = icmp slt i64 %b, %a
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb8:
+; CHECK-NOT: sradi
+; CHECK-NOT: rldicl
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfc
+; CHECK-NOT: adde
+; CHECK-NOT: xori
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb8
+; CHECK-PWR8-DAG: sradi
+; CHECK-PWR8-DAG: rldicl
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: subfc
+; CHECK-PWR8-DAG: adde
+; CHECK-PWR8-DAG: xori
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setne)), setgt
+define i64 @setb9(i64 %a, i64 %b) {
+  %t1 = icmp sgt i64 %a, %b
+  %t2 = icmp ne i64 %a, %b
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb9:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfic
+; CHECK-NOT: subfe
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb9
+; CHECK-PWR8-DAG: xor
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: subfic
+; CHECK-PWR8-DAG: subfe
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 1, (sext (setcc rhs, lhs, setne)), setlt
+define i64 @setb10(i64 %a, i64 %b) {
+  %t1 = icmp slt i64 %b, %a
+  %t2 = icmp ne i64 %a, %b
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb10:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfic
+; CHECK-NOT: subfe
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb10
+; CHECK-PWR8-DAG: xor
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: subfic
+; CHECK-PWR8-DAG: subfe
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 1, (sext (setcc rhs, lhs, setne)), setgt
+define i64 @setb11(i64 %a, i64 %b) {
+  %t1 = icmp sgt i64 %a, %b
+  %t2 = icmp ne i64 %b, %a
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb11:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfic
+; CHECK-NOT: subfe
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb11
+; CHECK-PWR8-DAG: xor
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: subfic
+; CHECK-PWR8-DAG: subfe
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setne)), setlt
+define i64 @setb12(i64 %a, i64 %b) {
+  %t1 = icmp slt i64 %b, %a
+  %t2 = icmp ne i64 %b, %a
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb12:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfic
+; CHECK-NOT: subfe
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb12
+; CHECK-PWR8-DAG: xor
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: subfic
+; CHECK-PWR8-DAG: subfe
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setlt)), setgt
+define i64 @setb13(i64 %a, i64 %b) {
+  %t1 = icmp sgt i64 %a, %b
+  %t2 = icmp slt i64 %a, %b
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb13:
+; CHECK-NOT: sradi
+; CHECK-NOT: rldicl
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfc
+; CHECK-NOT: adde
+; CHECK-NOT: xori
+; CHECK-NOT: neg
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb13
+; CHECK-PWR8-DAG: sradi
+; CHECK-PWR8-DAG: rldicl
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: subfc
+; CHECK-PWR8-DAG: adde
+; CHECK-PWR8-DAG: xori
+; CHECK-PWR8-DAG: neg
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 1, (sext (setcc rhs, lhs, setlt)), setlt
+define i64 @setb14(i64 %a, i64 %b) {
+  %t1 = icmp slt i64 %b, %a
+  %t2 = icmp slt i64 %a, %b
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb14:
+; CHECK-NOT: sradi
+; CHECK-NOT: rldicl
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfc
+; CHECK-NOT: adde
+; CHECK-NOT: xori
+; CHECK-NOT: neg
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb14
+; CHECK-PWR8-DAG: sradi
+; CHECK-PWR8-DAG: rldicl
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: subfc
+; CHECK-PWR8-DAG: adde
+; CHECK-PWR8-DAG: xori
+; CHECK-PWR8-DAG: neg
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 1, (sext (setcc rhs, lhs, setgt)), setgt
+define i64 @setb15(i64 %a, i64 %b) {
+  %t1 = icmp sgt i64 %a, %b
+  %t2 = icmp sgt i64 %b, %a
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb15:
+; CHECK-NOT: sradi
+; CHECK-NOT: rldicl
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfc
+; CHECK-NOT: adde
+; CHECK-NOT: xori
+; CHECK-NOT: neg
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb15
+; CHECK-PWR8-DAG: sradi
+; CHECK-PWR8-DAG: rldicl
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: subfc
+; CHECK-PWR8-DAG: adde
+; CHECK-PWR8-DAG: xori
+; CHECK-PWR8-DAG: neg
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setgt)), setlt
+define i64 @setb16(i64 %a, i64 %b) {
+  %t1 = icmp slt i64 %b, %a
+  %t2 = icmp sgt i64 %b, %a
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb16:
+; CHECK-NOT: sradi
+; CHECK-NOT: rldicl
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfc
+; CHECK-NOT: adde
+; CHECK-NOT: xori
+; CHECK-NOT: neg
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb16
+; CHECK-PWR8-DAG: sradi
+; CHECK-PWR8-DAG: rldicl
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: subfc
+; CHECK-PWR8-DAG: adde
+; CHECK-PWR8-DAG: xori
+; CHECK-PWR8-DAG: neg
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 0, (select_cc lhs, rhs, 1, -1, setgt), seteq
+define i64 @setb17(i64 %a, i64 %b) {
+  %t1 = icmp eq i64 %a, %b
+  %t2 = icmp sgt i64 %a, %b
+  %t3 = select i1 %t2, i64 1, i64 -1
+  %t4 = select i1 %t1, i64 0, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb17:
+; CHECK-NOT: li
+; CHECK-NOT: cmpld
+; CHECK-NOT: isel
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb17
+; CHECK-PWR8: cmpd
+; CHECK-PWR8: isel
+; CHECK-PWR8: cmpld
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 0, (select_cc rhs, lhs, 1, -1, setgt), seteq
+define i64 @setb18(i64 %a, i64 %b) {
+  %t1 = icmp eq i64 %b, %a
+  %t2 = icmp sgt i64 %a, %b
+  %t3 = select i1 %t2, i64 1, i64 -1
+  %t4 = select i1 %t1, i64 0, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb18:
+; CHECK-NOT: li
+; CHECK-NOT: cmpld
+; CHECK-NOT: isel
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb18
+; CHECK-PWR8: cmpd
+; CHECK-PWR8: isel
+; CHECK-PWR8: cmpld
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 0, (select_cc rhs, lhs, 1, -1, setlt), seteq
+define i64 @setb19(i64 %a, i64 %b) {
+  %t1 = icmp eq i64 %a, %b
+  %t2 = icmp slt i64 %b, %a
+  %t3 = select i1 %t2, i64 1, i64 -1
+  %t4 = select i1 %t1, i64 0, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb19:
+; CHECK-NOT: li
+; CHECK-NOT: cmpld
+; CHECK-NOT: isel
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb19
+; CHECK-PWR8: cmpd
+; CHECK-PWR8: isel
+; CHECK-PWR8: cmpld
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 0, (select_cc lhs, rhs, 1, -1, setlt), seteq
+define i64 @setb20(i64 %a, i64 %b) {
+  %t1 = icmp eq i64 %b, %a
+  %t2 = icmp slt i64 %b, %a
+  %t3 = select i1 %t2, i64 1, i64 -1
+  %t4 = select i1 %t1, i64 0, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb20:
+; CHECK-NOT: li
+; CHECK-NOT: cmpld
+; CHECK-NOT: isel
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb20
+; CHECK-PWR8: cmpd
+; CHECK-PWR8: isel
+; CHECK-PWR8: cmpld
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 0, (select_cc lhs, rhs, -1, 1, setlt), seteq
+define i64 @setb21(i64 %a, i64 %b) {
+  %t1 = icmp eq i64 %a, %b
+  %t2 = icmp slt i64 %a, %b
+  %t3 = select i1 %t2, i64 -1, i64 1
+  %t4 = select i1 %t1, i64 0, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb21:
+; CHECK-NOT: li
+; CHECK-NOT: cmpld
+; CHECK-NOT: isel
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb21
+; CHECK-PWR8: cmpd
+; CHECK-PWR8: isel
+; CHECK-PWR8: cmpld
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 0, (select_cc rhs, lhs, -1, 1, setlt), seteq
+define i64 @setb22(i64 %a, i64 %b) {
+  %t1 = icmp eq i64 %b, %a
+  %t2 = icmp slt i64 %a, %b
+  %t3 = select i1 %t2, i64 -1, i64 1
+  %t4 = select i1 %t1, i64 0, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb22:
+; CHECK-NOT: li
+; CHECK-NOT: cmpld
+; CHECK-NOT: isel
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb22
+; CHECK-PWR8: cmpd
+; CHECK-PWR8: isel
+; CHECK-PWR8: cmpld
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 0, (select_cc rhs, lhs, -1, 1, setgt), seteq
+define i64 @setb23(i64 %a, i64 %b) {
+  %t1 = icmp eq i64 %a, %b
+  %t2 = icmp sgt i64 %b, %a
+  %t3 = select i1 %t2, i64 -1, i64 1
+  %t4 = select i1 %t1, i64 0, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb23:
+; CHECK-NOT: li
+; CHECK-NOT: cmpld
+; CHECK-NOT: isel
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb23
+; CHECK-PWR8: cmpd
+; CHECK-PWR8: isel
+; CHECK-PWR8: cmpld
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 0, (select_cc lhs, rhs, -1, 1, setgt), seteq
+define i64 @setb24(i64 %a, i64 %b) {
+  %t1 = icmp eq i64 %b, %a
+  %t2 = icmp sgt i64 %b, %a
+  %t3 = select i1 %t2, i64 -1, i64 1
+  %t4 = select i1 %t1, i64 0, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb24:
+; CHECK-NOT: li
+; CHECK-NOT: cmpld
+; CHECK-NOT: isel
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb24
+; CHECK-PWR8: cmpd
+; CHECK-PWR8: isel
+; CHECK-PWR8: cmpld
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+; end all patterns testing for i64
+
+; Test with swapping the input parameters
+
+; select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setne)), setlt
+define i64 @setb25(i64 %a, i64 %b) {
+  %t1 = icmp slt i64 %b, %a
+  %t2 = icmp ne i64 %b, %a
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb25:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK-NOT: cmpd
+; CHECK: cmpd {{c?r?(0, )?}}r4, r3
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: addic
+; CHECK-NOT: subfe
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb25
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: addic
+; CHECK-PWR8-DAG: subfe
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setne)), setgt
+define i64 @setb26(i64 %a, i64 %b) {
+  %t1 = icmp sgt i64 %a, %b
+  %t2 = icmp ne i64 %b, %a
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setb26:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r4, r3
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: addic
+; CHECK-NOT: subfe
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb26
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: addic
+; CHECK-PWR8-DAG: subfe
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; Test with different scalar integer type for selected value
+; i32/i16/i8 rather than i64 above
+
+; select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setne)), setlt
+define i64 @setb27(i64 %a, i64 %b) {
+  %t1 = icmp slt i64 %a, %b
+  %t2 = icmp ne i64 %b, %a
+  %t3 = zext i1 %t2 to i32
+  %t4 = select i1 %t1, i32 -1, i32 %t3
+  %t5 = sext i32 %t4 to i64
+  ret i64 %t5
+; CHECK-LABEL: setb27:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: addic
+; CHECK-NOT: subfe
+; CHECK-NOT: isel
+; CHECK: extsw
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb27
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: addic
+; CHECK-PWR8-DAG: subfe
+; CHECK-PWR8: isel
+; CHECK-PWR8: extsw
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setne)), setgt
+define i64 @setb28(i64 %a, i64 %b) {
+  %t1 = icmp sgt i64 %b, %a
+  %t2 = icmp ne i64 %b, %a
+  %t3 = zext i1 %t2 to i16
+  %t4 = select i1 %t1, i16 -1, i16 %t3
+  %t5 = sext i16 %t4 to i64
+  ret i64 %t5
+; CHECK-LABEL: setb28:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: addic
+; CHECK-NOT: subfe
+; CHECK-NOT: isel
+; CHECK: extsw
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb28
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: addic
+; CHECK-PWR8-DAG: subfe
+; CHECK-PWR8: isel
+; CHECK-PWR8: extsw
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setgt)), setlt
+define i64 @setb29(i64 %a, i64 %b) {
+  %t1 = icmp slt i64 %a, %b
+  %t2 = icmp sgt i64 %a, %b
+  %t3 = zext i1 %t2 to i8
+  %t4 = select i1 %t1, i8 -1, i8 %t3
+  %t5 = zext i8 %t4 to i64
+  ret i64 %t5
+; CHECK-LABEL: setb29:
+; CHECK-NOT: sradi
+; CHECK-NOT: rldicl
+; CHECK-NOT: li
+; CHECK: cmpd {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfc
+; CHECK-NOT: adde
+; CHECK-NOT: xori
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setb29
+; CHECK-PWR8-DAG: cmpd
+; CHECK-PWR8-DAG: subfc
+; CHECK-PWR8-DAG: adde
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; Testings to cover different comparison opcodes
+; Test with integer type i32/i16/i8 for input parameter
+
+; select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setne)), setlt
+define i64 @setbsw1(i32 %a, i32 %b) {
+  %t1 = icmp slt i32 %a, %b
+  %t2 = icmp ne i32 %a, %b
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbsw1:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpw {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: cntlzw
+; CHECK-NOT: srwi
+; CHECK-NOT: xori
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbsw1
+; CHECK-PWR8-DAG: cntlzw
+; CHECK-PWR8-DAG: cmpw
+; CHECK-PWR8-DAG: srwi
+; CHECK-PWR8-DAG: xori
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setne)), setgt
+define i64 @setbsw2(i32 %a, i32 %b) {
+  %t1 = icmp sgt i32 %b, %a
+  %t2 = icmp ne i32 %a, %b
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbsw2:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpw {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: cntlzw
+; CHECK-NOT: srwi
+; CHECK-NOT: xori
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbsw2
+; CHECK-PWR8-DAG: cntlzw
+; CHECK-PWR8-DAG: cmpw
+; CHECK-PWR8-DAG: srwi
+; CHECK-PWR8-DAG: xori
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 0, (select_cc rhs, lhs, -1, 1, setgt), seteq
+define i64 @setbsw3(i32 %a, i32 %b) {
+  %t1 = icmp eq i32 %a, %b
+  %t2 = icmp sgt i32 %b, %a
+  %t3 = select i1 %t2, i64 -1, i64 1
+  %t4 = select i1 %t1, i64 0, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbsw3:
+; CHECK-NOT: li
+; CHECK: cmpw {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: isel
+; CHECK-NOT: cmplw
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbsw3
+; CHECK-PWR8: cmpw
+; CHECK-PWR8: isel
+; CHECK-PWR8: cmplw
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setne)), setlt
+define i64 @setbsh1(i16 signext %a, i16 signext %b) {
+  %t1 = icmp slt i16 %a, %b
+  %t2 = icmp ne i16 %b, %a
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbsh1:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpw {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: cntlzw
+; CHECK-NOT: srwi
+; CHECK-NOT: xori
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbsh1
+; CHECK-PWR8-DAG: cntlzw
+; CHECK-PWR8-DAG: cmpw
+; CHECK-PWR8-DAG: srwi
+; CHECK-PWR8-DAG: xori
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setne)), setgt
+define i64 @setbsh2(i16 signext %a, i16 signext %b) {
+  %t1 = icmp sgt i16 %b, %a
+  %t2 = icmp ne i16 %b, %a
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbsh2:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpw {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: cntlzw
+; CHECK-NOT: srwi
+; CHECK-NOT: xori
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbsh2
+; CHECK-PWR8-DAG: cmpw
+; CHECK-PWR8-DAG: cntlzw
+; CHECK-PWR8-DAG: srwi
+; CHECK-PWR8-DAG: xori
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setgt)), setlt
+define i64 @setbsc1(i8 %a, i8 %b) {
+  %t1 = icmp slt i8 %a, %b
+  %t2 = icmp sgt i8 %a, %b
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbsc1:
+; CHECK-DAG: extsb [[RA:r[0-9]+]], r3
+; CHECK-DAG: extsb [[RB:r[0-9]+]], r4
+; CHECK-NOT: li
+; CHECK-NOT: sub
+; CHECK: cmpw {{c?r?(0, )?}}[[RA]], [[RB]]
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: rldicl
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbsc1
+; CHECK-PWR8-DAG: extsb
+; CHECK-PWR8-DAG: extsb
+; CHECK-PWR8-DAG: extsw
+; CHECK-PWR8-DAG: extsw
+; CHECK-PWR8-DAG: cmpw
+; CHECK-PWR8-DAG: rldicl
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setgt)), setgt
+define i64 @setbsc2(i8 %a, i8 %b) {
+  %t1 = icmp sgt i8 %b, %a
+  %t2 = icmp sgt i8 %a, %b
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbsc2:
+; CHECK-DAG: extsb [[RA:r[0-9]+]], r3
+; CHECK-DAG: extsb [[RB:r[0-9]+]], r4
+; CHECK-NOT: li
+; CHECK-NOT: sub
+; CHECK: cmpw {{c?r?(0, )?}}[[RA]], [[RB]]
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: rldicl
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbsc2
+; CHECK-PWR8-DAG: extsb
+; CHECK-PWR8-DAG: extsb
+; CHECK-PWR8-DAG: extsw
+; CHECK-PWR8-DAG: extsw
+; CHECK-PWR8-DAG: cmpw
+; CHECK-PWR8-DAG: rldicl
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setlt)), setlt
+define i64 @setbsc3(i4 %a, i4 %b) {
+  %t1 = icmp slt i4 %a, %b
+  %t2 = icmp slt i4 %b, %a
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbsc3:
+; CHECK-DAG: slwi [[RA:r[0-9]+]], r3, 28
+; CHECK-DAG: slwi [[RB:r[0-9]+]], r4, 28
+; CHECK-NOT: li
+; CHECK-DAG: srawi [[RA1:r[0-9]+]], [[RA]], 28
+; CHECK-DAG: srawi [[RB1:r[0-9]+]], [[RB]], 28
+; CHECK-NOT: sub
+; CHECK: cmpw {{c?r?(0, )?}}[[RA1]], [[RB1]]
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: rldicl
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbsc3
+; CHECK-PWR8-DAG: slwi
+; CHECK-PWR8-DAG: slwi
+; CHECK-PWR8-DAG: srawi
+; CHECK-PWR8-DAG: srawi
+; CHECK-PWR8-DAG: extsw
+; CHECK-PWR8-DAG: extsw
+; CHECK-PWR8-DAG: cmpw
+; CHECK-PWR8-DAG: rldicl
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; Test with unsigned integer type i64/i32/i16/i8 for input parameter
+
+; select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setult)), setugt
+define i64 @setbud1(i64 %a, i64 %b) {
+  %t1 = icmp ugt i64 %b, %a
+  %t2 = icmp ult i64 %b, %a
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbud1:
+; CHECK-NOT: li
+; CHECK: cmpld {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfc
+; CHECK-NOT: subfe
+; CHECK-NOT: neg
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbud1
+; CHECK-PWR8-DAG: subfc
+; CHECK-PWR8-DAG: subfe
+; CHECK-PWR8-DAG: cmpld
+; CHECK-PWR8-DAG: neg
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setne)), setugt
+define i64 @setbud2(i64 %a, i64 %b) {
+  %t1 = icmp ugt i64 %a, %b
+  %t2 = icmp ne i64 %a, %b
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbud2:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmpld {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: subfic
+; CHECK-NOT: subfe
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbud2
+; CHECK-PWR8-DAG: cmpld
+; CHECK-PWR8-DAG: subfic
+; CHECK-PWR8-DAG: subfe
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 0, (select_cc lhs, rhs, -1, 1, setugt), seteq
+define i64 @setbud3(i64 %a, i64 %b) {
+  %t1 = icmp eq i64 %b, %a
+  %t2 = icmp ugt i64 %b, %a
+  %t3 = select i1 %t2, i64 -1, i64 1
+  %t4 = select i1 %t1, i64 0, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbud3:
+; CHECK-NOT: li
+; CHECK: cmpld {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: li
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbud3
+; CHECK-PWR8-DAG: cmpld
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8-DAG: li
+; CHECK-PWR8: isel
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 1, (sext (setcc rhs, lhs, setne)), setult
+define i64 @setbuw1(i32 %a, i32 %b) {
+  %t1 = icmp ult i32 %b, %a
+  %t2 = icmp ne i32 %a, %b
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbuw1:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmplw {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: cntlzw
+; CHECK-NOT: srwi
+; CHECK-NOT: xori
+; CHECK-NOT: neg
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbuw1
+; CHECK-PWR8-DAG: cntlzw
+; CHECK-PWR8-DAG: cmplw
+; CHECK-PWR8-DAG: srwi
+; CHECK-PWR8-DAG: xori
+; CHECK-PWR8-DAG: neg
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 1, (sext (setcc rhs, lhs, setne)), setugt
+define i64 @setbuw2(i32 %a, i32 %b) {
+  %t1 = icmp ugt i32 %a, %b
+  %t2 = icmp ne i32 %b, %a
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbuw2:
+; CHECK-NOT: xor
+; CHECK-NOT: li
+; CHECK: cmplw {{c?r?(0, )?}}r3, r4
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: cntlzw
+; CHECK-NOT: srwi
+; CHECK-NOT: xori
+; CHECK-NOT: neg
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbuw2
+; CHECK-PWR8-DAG: cntlzw
+; CHECK-PWR8-DAG: cmplw
+; CHECK-PWR8-DAG: srwi
+; CHECK-PWR8-DAG: xori
+; CHECK-PWR8-DAG: neg
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setne)), setult
+define i64 @setbuh(i16 %a, i16 %b) {
+  %t1 = icmp ult i16 %b, %a
+  %t2 = icmp ne i16 %b, %a
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbuh:
+; CHECK-DAG: rlwinm [[RA:r[0-9]+]], r3, 0, 16, 31
+; CHECK-DAG: rlwinm [[RB:r[0-9]+]], r4, 0, 16, 31
+; CHECK-NOT: li
+; CHECK-NOT: xor
+; CHECK: cmplw {{c?r?(0, )?}}[[RA]], [[RB]]
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: cntlzw
+; CHECK-NOT: srwi
+; CHECK-NOT: xori
+; CHECK-NOT: neg
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbuh
+; CHECK-PWR8: rlwinm
+; CHECK-PWR8: rlwinm
+; CHECK-PWR8-DAG: cmplw
+; CHECK-PWR8-DAG: cntlzw
+; CHECK-PWR8: srwi
+; CHECK-PWR8: xori
+; CHECK-PWR8: neg
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setult)), setugt
+define i64 @setbuc(i8 %a, i8 %b) {
+  %t1 = icmp ugt i8 %a, %b
+  %t2 = icmp ult i8 %a, %b
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbuc:
+; CHECK-DAG: rlwinm [[RA:r[0-9]+]], r3, 0, 24, 31
+; CHECK-DAG: rlwinm [[RB:r[0-9]+]], r4, 0, 24, 31
+; CHECK-NOT: li
+; CHECK-NOT: clrldi
+; CHECK: cmplw {{c?r?(0, )?}}[[RA]], [[RB]]
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: sub
+; CHECK-NOT: sradi
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbuc
+; CHECK-PWR8: rlwinm
+; CHECK-PWR8: rlwinm
+; CHECK-PWR8-DAG: clrldi
+; CHECK-PWR8-DAG: clrldi
+; CHECK-PWR8-DAG: cmplw
+; CHECK-PWR8: sradi
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; Test with float/double/float128 for input parameter
+
+; select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setlt)), setlt
+define i64 @setbf1(float %a, float %b) {
+  %t1 = fcmp fast olt float %a, %b
+  %t2 = fcmp fast olt float %b, %a
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbf1:
+; CHECK-NOT: li
+; CHECK: fcmpu cr0, f1, f2
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: isel
+; CHECK-NOT: li
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbf1
+; CHECK-PWR8: isel
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setlt)), setgt
+define i64 @setbf2(float %a, float %b) {
+  %t1 = fcmp fast ogt float %b, %a
+  %t2 = fcmp fast olt float %b, %a
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbf2:
+; CHECK-NOT: li
+; CHECK: fcmpu cr0, f1, f2
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: isel
+; CHECK-NOT: li
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbf2
+; CHECK-PWR8: isel
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 0, (select_cc lhs, rhs, -1, 1, setgt), seteq
+define i64 @setbdf1(double %a, double %b) {
+  %t1 = fcmp fast oeq double %b, %a
+  %t2 = fcmp fast ogt double %b, %a
+  %t3 = select i1 %t2, i64 -1, i64 1
+  %t4 = select i1 %t1, i64 0, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbdf1:
+; CHECK: xscmpudp cr0, f1, f2
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: li
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbdf1
+; CHECK-PWR8: isel
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setgt)), setlt
+define i64 @setbdf2(double %a, double %b) {
+  %t1 = fcmp fast olt double %b, %a
+  %t2 = fcmp fast ogt double %b, %a
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbdf2:
+; CHECK-NOT: fcmpu
+; CHECK-NOT: li
+; CHECK: xscmpudp cr0, f1, f2
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: li
+; CHECK-NOT: isel
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbdf2
+; CHECK-PWR8: isel
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+define i64 @setbf128(fp128 %a, fp128 %b) {
+  %t1 = fcmp fast ogt fp128 %a, %b
+  %t2 = fcmp fast olt fp128 %a, %b
+  %t3 = sext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbf128:
+; CHECK-NOT: li
+; CHECK: xscmpuqp cr0, v2, v3
+; CHECK-NEXT: setb r3, cr0
+; CHECK-NOT: isel
+; CHECK-NOT: li
+; CHECK: blr
+; CHECK-PWR8-LABEL: setbf128
+; CHECK-PWR8: isel
+; CHECK-PWR8: blr
+}
+
+; Some cases we can't leverage setb
+
+define i64 @setbn1(i64 %a, i64 %b) {
+  %t1 = icmp slt i64 %a, %b
+  %t2 = icmp eq i64 %a, %b
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbn1:
+; CHECK-NOT: {{\<setb\>}}
+; CHECK: isel
+; CHECK: blr
+}
+
+define i64 @setbn2(double %a, double %b) {
+  %t1 = fcmp olt double %a, %b
+  %t2 = fcmp one double %a, %b
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbn2:
+; CHECK-NOT: {{\<setb\>}}
+; CHECK: isel
+; CHECK: blr
+}
+
+define i64 @setbn3(float %a, float %b) {
+  %t1 = fcmp ult float %a, %b
+  %t2 = fcmp une float %a, %b
+  %t3 = zext i1 %t2 to i64
+  %t4 = select i1 %t1, i64 -1, i64 %t3
+  ret i64 %t4
+; CHECK-LABEL: setbn3:
+; CHECK-NOT: {{\<setb\>}}
+; CHECK: isel
+; CHECK: blr
+}
diff --git a/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll b/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
index 5768f0c..653b212 100644
--- a/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
+++ b/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
@@ -1,37 +1,41 @@
-; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -verify-machineinstrs | FileCheck %s
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr9 -verify-machineinstrs | FileCheck %s
-; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -verify-machineinstrs | FileCheck %s -check-prefix=CHECK-PWR8 -implicit-check-not vabsdu
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr9 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names -verify-machineinstrs | FileCheck %s -check-prefix=CHECK-PWR8 -implicit-check-not vabsdu
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names -verify-machineinstrs | FileCheck %s -check-prefix=CHECK-PWR7 -implicit-check-not vmaxsd
 
-; Function Attrs: nounwind readnone
 define <4 x i32> @simple_absv_32(<4 x i32> %a) local_unnamed_addr {
 entry:
   %sub.i = sub <4 x i32> zeroinitializer, %a
   %0 = tail call <4 x i32> @llvm.ppc.altivec.vmaxsw(<4 x i32> %a, <4 x i32> %sub.i)
   ret <4 x i32> %0
 ; CHECK-LABEL: simple_absv_32
-; CHECK-DAG: vxor {{[0-9]+}}, [[REG:[0-9]+]], [[REG]]
-; CHECK-DAG: xvnegsp 34, 34
-; CHECK-DAG: xvnegsp 35, {{[0-9]+}}
-; CHECK-NEXT: vabsduw 2, 2, {{[0-9]+}}
+; CHECK-NOT:  vxor 
+; CHECK-NOT:  vabsduw
+; CHECK:      vnegw v[[REG:[0-9]+]], v2
+; CHECK-NEXT: vmaxsw v2, v2, v[[REG]]
 ; CHECK-NEXT: blr
 ; CHECK-PWR8-LABEL: simple_absv_32
 ; CHECK-PWR8: xxlxor
 ; CHECK-PWR8: vsubuwm
 ; CHECK-PWR8: vmaxsw
 ; CHECK-PWR8: blr
+; CHECK-PWR7-LABEL: simple_absv_32
+; CHECK-PWR7: xxlxor
+; CHECK-PWR7: vsubuwm
+; CHECK-PWR7: vmaxsw
+; CHECK-PWR7: blr
 }
 
-; Function Attrs: nounwind readnone
 define <4 x i32> @simple_absv_32_swap(<4 x i32> %a) local_unnamed_addr {
 entry:
   %sub.i = sub <4 x i32> zeroinitializer, %a
   %0 = tail call <4 x i32> @llvm.ppc.altivec.vmaxsw(<4 x i32> %sub.i, <4 x i32> %a)
   ret <4 x i32> %0
 ; CHECK-LABEL: simple_absv_32_swap
-; CHECK-DAG: vxor {{[0-9]+}}, [[REG:[0-9]+]], [[REG]]
-; CHECK-DAG: xvnegsp 34, 34
-; CHECK-DAG: xvnegsp 35, {{[0-9]+}}
-; CHECK-NEXT: vabsduw 2, 2, {{[0-9]+}}
+; CHECK-NOT:  vxor 
+; CHECK-NOT:  vabsduw
+; CHECK:      vnegw  v[[REG:[0-9]+]], v2
+; CHECK-NEXT: vmaxsw v2, v2, v[[REG]]
 ; CHECK-NEXT: blr
 ; CHECK-PWR8-LABEL: simple_absv_32_swap
 ; CHECK-PWR8: xxlxor
@@ -46,37 +50,72 @@
   %0 = tail call <8 x i16> @llvm.ppc.altivec.vmaxsh(<8 x i16> %a, <8 x i16> %sub.i)
   ret <8 x i16> %0
 ; CHECK-LABEL: simple_absv_16
-; CHECK: mtvsrws {{[0-9]+}}, {{[0-9]+}}
-; CHECK-NEXT: vadduhm 2, 2, [[IMM:[0-9]+]]
-; CHECK-NEXT: vabsduh 2, 2, [[IMM]]
+; CHECK-NOT:  mtvsrws
+; CHECK-NOT:  vabsduh
+; CHECK:      xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]]
+; CHECK-NEXT: vsubuhm v[[REG:[0-9]+]], v[[ZERO]], v2
+; CHECK-NEXT: vmaxsh v2, v2, v[[REG]]
 ; CHECK-NEXT: blr
 ; CHECK-PWR8-LABEL: simple_absv_16
 ; CHECK-PWR8: xxlxor
 ; CHECK-PWR8: vsubuhm
 ; CHECK-PWR8: vmaxsh
 ; CHECK-PWR8: blr
+; CHECK-PWR7-LABEL: simple_absv_16
+; CHECK-PWR7: xxlxor
+; CHECK-PWR7: vsubuhm
+; CHECK-PWR7: vmaxsh
+; CHECK-PWR7: blr
 }
 
-; Function Attrs: nounwind readnone
 define <16 x i8> @simple_absv_8(<16 x i8> %a) local_unnamed_addr {
 entry:
   %sub.i = sub <16 x i8> zeroinitializer, %a
   %0 = tail call <16 x i8> @llvm.ppc.altivec.vmaxsb(<16 x i8> %a, <16 x i8> %sub.i)
   ret <16 x i8> %0
 ; CHECK-LABEL: simple_absv_8
-; CHECK: xxspltib {{[0-9]+}}, 128
-; CHECK-NEXT: vaddubm 2, 2, [[IMM:[0-9]+]]
-; CHECK-NEXT: vabsdub 2, 2, [[IMM]]
+; CHECK-NOT:  xxspltib
+; CHECK-NOT:  vabsdub
+; CHECK:      xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]]
+; CHECK-NEXT: vsububm v[[REG:[0-9]+]], v[[ZERO]], v2
+; CHECK-NEXT: vmaxsb v2, v2, v[[REG]]
 ; CHECK-NEXT: blr
 ; CHECK-PWR8-LABEL: simple_absv_8
 ; CHECK-PWR8: xxlxor
 ; CHECK-PWR8: vsububm
 ; CHECK-PWR8: vmaxsb
 ; CHECK-PWR8: blr
+; CHECK-PWR7-LABEL: simple_absv_8
+; CHECK-PWR7: xxlxor
+; CHECK-PWR7: vsububm
+; CHECK-PWR7: vmaxsb
+; CHECK-PWR7: blr
+}
+
+; v2i64 vmax isn't avaiable on pwr7 
+define <2 x i64> @sub_absv_64(<2 x i64> %a, <2 x i64> %b) local_unnamed_addr {
+entry:
+  %0 = sub nsw <2 x i64> %a, %b
+  %1 = icmp sgt <2 x i64> %0, <i64 -1, i64 -1>
+  %2 = sub <2 x i64> zeroinitializer, %0
+  %3 = select <2 x i1> %1, <2 x i64> %0, <2 x i64> %2
+  ret <2 x i64> %3
+; CHECK-LABEL: sub_absv_64
+; CHECK: vsubudm
+; CHECK: vnegd
+; CHECK: vmaxsd
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: sub_absv_64
+; CHECK-PWR8-DAG: vsubudm
+; CHECK-PWR8-DAG: xxlxor
+; CHECK-PWR8: vmaxsd
+; CHECK-PWR8: blr
+; CHECK-PWR7-LABEL: sub_absv_64
+; CHECK-PWR7-NOT: vmaxsd
+; CHECK-PWR7: blr
 }
 
 ; The select pattern can only be detected for v4i32.
-; Function Attrs: norecurse nounwind readnone
 define <4 x i32> @sub_absv_32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr {
 entry:
   %0 = sub nsw <4 x i32> %a, %b
@@ -85,14 +124,77 @@
   %3 = select <4 x i1> %1, <4 x i32> %0, <4 x i32> %2
   ret <4 x i32> %3
 ; CHECK-LABEL: sub_absv_32
-; CHECK-DAG: xvnegsp 34, 34
-; CHECK-DAG: xvnegsp 35, 35
-; CHECK-NEXT: vabsduw 2, 2, 3
+; CHECK-NOT:  vsubuwm
+; CHECK-NOT:  vnegw
+; CHECK-NOT:  vmaxsw
+; CHECK-DAG:  xvnegsp v2, v2
+; CHECK-DAG:  xvnegsp v3, v3
+; CHECK-NEXT: vabsduw v2, v{{[23]}}, v{{[23]}}
 ; CHECK-NEXT: blr
 ; CHECK-PWR8-LABEL: sub_absv_32
-; CHECK-PWR8: vsubuwm
-; CHECK-PWR8: xxlxor
+; CHECK-PWR8-DAG: vsubuwm
+; CHECK-PWR8-DAG: xxlxor
+; CHECK-PWR8: vmaxsw
 ; CHECK-PWR8: blr
+; CHECK-PWR7-LABEL: sub_absv_32
+; CHECK-PWR7-DAG: vsubuwm
+; CHECK-PWR7-DAG: xxlxor
+; CHECK-PWR7: vmaxsw
+; CHECK-PWR7: blr
+}
+
+define <8 x i16> @sub_absv_16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr {
+entry:
+  %0 = sub nsw <8 x i16> %a, %b
+  %1 = icmp sgt <8 x i16> %0, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+  %2 = sub <8 x i16> zeroinitializer, %0
+  %3 = select <8 x i1> %1, <8 x i16> %0, <8 x i16> %2
+  ret <8 x i16> %3
+; CHECK-LABEL: sub_absv_16
+; CHECK-NOT:  vabsduh
+; CHECK-DAG:  xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]]
+; CHECK-DAG:  vsubuhm v[[SUB:[0-9]+]], v2, v3
+; CHECK:      vsubuhm v[[SUB1:[0-9]+]], v[[ZERO]], v[[SUB]]
+; CHECK-NEXT: vmaxsh v2, v[[SUB]], v[[SUB1]]
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: sub_absv_16
+; CHECK-PWR8-DAG:  xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]]
+; CHECK-PWR8-DAG:  vsubuhm v[[SUB:[0-9]+]], v2, v3
+; CHECK-PWR8:      vsubuhm v[[SUB1:[0-9]+]], v[[ZERO]], v[[SUB]]
+; CHECK-PWR8-NEXT: vmaxsh v2, v[[SUB]], v[[SUB1]]
+; CHECK-PWR8-NEXT: blr
+; CHECK-PWR7-LABEL: sub_absv_16
+; CHECK-PWR7-DAG: vsubuhm
+; CHECK-PWR7-DAG: xxlxor
+; CHECK-PWR7: vmaxsh
+; CHECK-PWR7-NEXT: blr
+}
+
+define <16 x i8> @sub_absv_8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr {
+entry:
+  %0 = sub nsw <16 x i8> %a, %b
+  %1 = icmp sgt <16 x i8> %0, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+  %2 = sub <16 x i8> zeroinitializer, %0
+  %3 = select <16 x i1> %1, <16 x i8> %0, <16 x i8> %2
+  ret <16 x i8> %3
+; CHECK-LABEL: sub_absv_8
+; CHECK-NOT:  vabsdub
+; CHECK-DAG:  xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]]
+; CHECK-DAG:  vsububm v[[SUB:[0-9]+]], v2, v3
+; CHECK:      vsububm v[[SUB1:[0-9]+]], v[[ZERO]], v[[SUB]]
+; CHECK-NEXT: vmaxsb v2, v[[SUB]], v[[SUB1]]
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: sub_absv_8
+; CHECK-PWR8-DAG:  xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]]
+; CHECK-PWR8-DAG:  vsububm v[[SUB:[0-9]+]], v2, v3
+; CHECK-PWR8:      vsububm v[[SUB1:[0-9]+]], v[[ZERO]], v[[SUB]]
+; CHECK-PWR8-NEXT: vmaxsb v2, v[[SUB]], v[[SUB1]]
+; CHECK-PWR8-NEXT: blr
+; CHECK-PWR7-LABEL: sub_absv_8
+; CHECK-PWR7-DAG:  xxlxor
+; CHECK-PWR7-DAG:  vsububm
+; CHECK-PWR7: vmaxsb
+; CHECK-PWR7-NEXT: blr
 }
 
 ; FIXME: This does not produce the ISD::ABS that we are looking for.
@@ -100,8 +202,7 @@
 ; We do manage to find the word version of ABS but not the halfword.
 ; Threfore, we end up doing more work than is required with a pair of abs for word
 ;  instead of just one for the halfword.
-; Function Attrs: norecurse nounwind readnone
-define <8 x i16> @sub_absv_16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr {
+define <8 x i16> @sub_absv_16_ext(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr {
 entry:
   %0 = sext <8 x i16> %a to <8 x i32>
   %1 = sext <8 x i16> %b to <8 x i32>
@@ -111,23 +212,25 @@
   %5 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> %4
   %6 = trunc <8 x i32> %5 to <8 x i16>
   ret <8 x i16> %6
-; CHECK-LABEL: sub_absv_16
+; CHECK-LABEL: sub_absv_16_ext
 ; CHECK-NOT: vabsduh
 ; CHECK: vabsduw
+; CHECK-NOT: vnegw
 ; CHECK-NOT: vabsduh
 ; CHECK: vabsduw
+; CHECK-NOT: vnegw
 ; CHECK-NOT: vabsduh
 ; CHECK: blr
 ; CHECK-PWR8-LABEL: sub_absv_16
-; CHECK-PWR8: vsubuwm
-; CHECK-PWR8: xxlxor
+; CHECK-PWR8-DAG: vsubuwm
+; CHECK-PWR8-DAG: xxlxor
 ; CHECK-PWR8: blr
 }
 
 ; FIXME: This does not produce ISD::ABS. This does not even vectorize correctly!
 ; This function should look like sub_absv_32 and sub_absv_16 except that the type is v16i8.
 ; Function Attrs: norecurse nounwind readnone
-define <16 x i8> @sub_absv_8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr {
+define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr {
 entry:
   %vecext = extractelement <16 x i8> %a, i32 0
   %conv = zext i8 %vecext to i32
@@ -290,20 +393,19 @@
   %conv122 = trunc i32 %15 to i8
   %vecins123 = insertelement <16 x i8> %vecins115, i8 %conv122, i32 15
   ret <16 x i8> %vecins123
-; CHECK-LABEL: sub_absv_8
+; CHECK-LABEL: sub_absv_8_ext
 ; CHECK-NOT: vabsdub
 ; CHECK: subf
 ; CHECK-NOT: vabsdub
 ; CHECK: xor
 ; CHECK-NOT: vabsdub
 ; CHECK: blr
-; CHECK-PWR8-LABEL: sub_absv_8
+; CHECK-PWR8-LABEL: sub_absv_8_ext
 ; CHECK-PWR8: subf
 ; CHECK-PWR8: xor
 ; CHECK-PWR8: blr
 }
 
-; Function Attrs: nounwind readnone
 define <4 x i32> @sub_absv_vec_32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr {
 entry:
   %sub = sub <4 x i32> %a, %b
@@ -311,16 +413,20 @@
   %0 = tail call <4 x i32> @llvm.ppc.altivec.vmaxsw(<4 x i32> %sub, <4 x i32> %sub.i)
   ret <4 x i32> %0
 ; CHECK-LABEL: sub_absv_vec_32
-; CHECK: vabsduw 2, 2, 3
+; CHECK-NOT:  vsubuwm
+; CHECK-NOT:  vnegw
+; CHECK-NOT:  vmaxsw
+; CHECK-DAG:  xvnegsp v2, v2
+; CHECK-DAG:  xvnegsp v3, v3
+; CHECK-NEXT: vabsduw v2, v{{[23]}}, v{{[23]}}
 ; CHECK-NEXT: blr
 ; CHECK-PWR8-LABEL: sub_absv_vec_32
-; CHECK-PWR8: xxlxor
-; CHECK-PWR8: vsubuwm
+; CHECK-PWR8-DAG: xxlxor
+; CHECK-PWR8-DAG: vsubuwm
 ; CHECK-PWR8: vmaxsw
 ; CHECK-PWR8: blr
 }
 
-; Function Attrs: nounwind readnone
 define <8 x i16> @sub_absv_vec_16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr {
 entry:
   %sub = sub <8 x i16> %a, %b
@@ -328,16 +434,20 @@
   %0 = tail call <8 x i16> @llvm.ppc.altivec.vmaxsh(<8 x i16> %sub, <8 x i16> %sub.i)
   ret <8 x i16> %0
 ; CHECK-LABEL: sub_absv_vec_16
-; CHECK: vabsduh 2, 2, 3
+; CHECK-NOT:  mtvsrws
+; CHECK-NOT:  vabsduh
+; CHECK-DAG:  xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]]
+; CHECK-DAG:  vsubuhm v[[SUB:[0-9]+]], v2, v3
+; CHECK:      vsubuhm v[[SUB1:[0-9]+]], v[[ZERO]], v[[SUB]]
+; CHECK-NEXT: vmaxsh v2, v[[SUB]], v[[SUB1]]
 ; CHECK-NEXT: blr
 ; CHECK-PWR8-LABEL: sub_absv_vec_16
-; CHECK-PWR8: xxlxor
-; CHECK-PWR8: vsubuhm
+; CHECK-PWR8-DAG: xxlxor
+; CHECK-PWR8-DAG: vsubuhm
 ; CHECK-PWR8: vmaxsh
 ; CHECK-PWR8: blr
 }
 
-; Function Attrs: nounwind readnone
 define <16 x i8> @sub_absv_vec_8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr {
 entry:
   %sub = sub <16 x i8> %a, %b
@@ -345,22 +455,313 @@
   %0 = tail call <16 x i8> @llvm.ppc.altivec.vmaxsb(<16 x i8> %sub, <16 x i8> %sub.i)
   ret <16 x i8> %0
 ; CHECK-LABEL: sub_absv_vec_8
-; CHECK: vabsdub 2, 2, 3
+; CHECK-NOT:  xxspltib
+; CHECK-NOT:  vabsdub
+; CHECK-DAG:  xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]]
+; CHECK-DAG:  vsububm v[[SUB:[0-9]+]], v2, v3
+; CHECK:      vsububm v[[SUB1:[0-9]+]], v[[ZERO]], v[[SUB]]
+; CHECK-NEXT: vmaxsb v2, v[[SUB]], v[[SUB1]]
 ; CHECK-NEXT: blr
 ; CHECK-PWR8-LABEL: sub_absv_vec_8
-; CHECK-PWR8: xxlxor
-; CHECK-PWR8: vsububm
+; CHECK-PWR8-DAG: xxlxor
+; CHECK-PWR8-DAG: vsububm
 ; CHECK-PWR8: vmaxsb
 ; CHECK-PWR8: blr
 }
 
+define <4 x i32> @zext_sub_absd32(<4 x i16>, <4 x i16>) local_unnamed_addr {
+    %3 = zext <4 x i16> %0 to <4 x i32>
+    %4 = zext <4 x i16> %1 to <4 x i32>
+    %5 = sub <4 x i32> %3, %4
+    %6 = sub <4 x i32> zeroinitializer, %5
+    %7 = tail call <4 x i32> @llvm.ppc.altivec.vmaxsw(<4 x i32> %5, <4 x i32> %6)
+    ret <4 x i32> %7
+; CHECK-LABEL: zext_sub_absd32
+; CHECK-NOT: xvnegsp
+; CHECK:     vabsduw
+; CHECK:     blr
+; CHECK-PWR8-LABEL: zext_sub_absd32
+; CHECK-PWR8: vmaxsw
+; CHECK-PWR8: blr
+}
 
-; Function Attrs: nounwind readnone
+define <8 x i16> @zext_sub_absd16(<8 x i8>, <8 x i8>) local_unnamed_addr {
+    %3 = zext <8 x i8> %0 to <8 x i16>
+    %4 = zext <8 x i8> %1 to <8 x i16>
+    %5 = sub <8 x i16> %3, %4
+    %6 = sub <8 x i16> zeroinitializer, %5
+    %7 = tail call <8 x i16> @llvm.ppc.altivec.vmaxsh(<8 x i16> %5, <8 x i16> %6)
+    ret <8 x i16> %7
+; CHECK-LABEL: zext_sub_absd16
+; CHECK-NOT: vadduhm
+; CHECK:     vabsduh
+; CHECK:     blr
+; CHECK-PWR8-LABEL: zext_sub_absd16
+; CHECK-PWR8: vmaxsh
+; CHECK-PWR8: blr
+}
+
+define <16 x i8> @zext_sub_absd8(<16 x i4>, <16 x i4>) local_unnamed_addr {
+    %3 = zext <16 x i4> %0 to <16 x i8>
+    %4 = zext <16 x i4> %1 to <16 x i8>
+    %5 = sub <16 x i8> %3, %4
+    %6 = sub <16 x i8> zeroinitializer, %5
+    %7 = tail call <16 x i8> @llvm.ppc.altivec.vmaxsb(<16 x i8> %5, <16 x i8> %6)
+    ret <16 x i8> %7
+; CHECK-LABEL: zext_sub_absd8
+; CHECK-NOT: vaddubm
+; CHECK:     vabsdub
+; CHECK:     blr
+; CHECK-PWR8-LABEL: zext_sub_absd8
+; CHECK-PWR8: vmaxsb
+; CHECK-PWR8: blr
+}
+
+; To verify vabsdu* exploitation for ucmp + sub + select sequence
+
+define <4 x i32> @absd_int32_ugt(<4 x i32>, <4 x i32>) {
+  %3 = icmp ugt <4 x i32> %0, %1
+  %4 = sub <4 x i32> %0, %1
+  %5 = sub <4 x i32> %1, %0
+  %6 = select <4 x i1> %3, <4 x i32> %4, <4 x i32> %5
+  ret <4 x i32> %6
+; CHECK-LABEL: absd_int32_ugt
+; CHECK-NOT: vcmpgtuw
+; CHECK-NOT: xxsel
+; CHECK: vabsduw v2, v2, v3
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: absd_int32_ugt
+; CHECK-PWR8: vcmpgtuw
+; CHECK-PWR8: xxsel
+; CHECK-PWR8: blr
+}
+
+define <4 x i32> @absd_int32_uge(<4 x i32>, <4 x i32>) {
+  %3 = icmp uge <4 x i32> %0, %1
+  %4 = sub <4 x i32> %0, %1
+  %5 = sub <4 x i32> %1, %0
+  %6 = select <4 x i1> %3, <4 x i32> %4, <4 x i32> %5
+  ret <4 x i32> %6
+; CHECK-LABEL: absd_int32_uge
+; CHECK-NOT: vcmpgtuw
+; CHECK-NOT: xxsel
+; CHECK: vabsduw v2, v2, v3
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: absd_int32_uge
+; CHECK-PWR8: vcmpgtuw
+; CHECK-PWR8: xxsel
+; CHECK-PWR8: blr
+}
+
+define <4 x i32> @absd_int32_ult(<4 x i32>, <4 x i32>) {
+  %3 = icmp ult <4 x i32> %0, %1
+  %4 = sub <4 x i32> %0, %1
+  %5 = sub <4 x i32> %1, %0
+  %6 = select <4 x i1> %3, <4 x i32> %5, <4 x i32> %4
+  ret <4 x i32> %6
+; CHECK-LABEL: absd_int32_ult
+; CHECK-NOT: vcmpgtuw
+; CHECK-NOT: xxsel
+; CHECK: vabsduw v2, v2, v3
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: absd_int32_ult
+; CHECK-PWR8: vcmpgtuw
+; CHECK-PWR8: xxsel
+; CHECK-PWR8: blr
+}
+
+define <4 x i32> @absd_int32_ule(<4 x i32>, <4 x i32>) {
+  %3 = icmp ule <4 x i32> %0, %1
+  %4 = sub <4 x i32> %0, %1
+  %5 = sub <4 x i32> %1, %0
+  %6 = select <4 x i1> %3, <4 x i32> %5, <4 x i32> %4
+  ret <4 x i32> %6
+; CHECK-LABEL: absd_int32_ule
+; CHECK-NOT: vcmpgtuw
+; CHECK-NOT: xxsel
+; CHECK: vabsduw v2, v2, v3
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: absd_int32_ule
+; CHECK-PWR8: vcmpgtuw
+; CHECK-PWR8: xxsel
+; CHECK-PWR8: blr
+}
+
+define <8 x i16> @absd_int16_ugt(<8 x i16>, <8 x i16>) {
+  %3 = icmp ugt <8 x i16> %0, %1
+  %4 = sub <8 x i16> %0, %1
+  %5 = sub <8 x i16> %1, %0
+  %6 = select <8 x i1> %3, <8 x i16> %4, <8 x i16> %5
+  ret <8 x i16> %6
+; CHECK-LABEL: absd_int16_ugt
+; CHECK-NOT: vcmpgtuh
+; CHECK-NOT: xxsel
+; CHECK: vabsduh v2, v2, v3
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: absd_int16_ugt
+; CHECK-PWR8: vcmpgtuh
+; CHECK-PWR8: xxsel
+; CHECK-PWR8: blr
+}
+
+define <8 x i16> @absd_int16_uge(<8 x i16>, <8 x i16>) {
+  %3 = icmp uge <8 x i16> %0, %1
+  %4 = sub <8 x i16> %0, %1
+  %5 = sub <8 x i16> %1, %0
+  %6 = select <8 x i1> %3, <8 x i16> %4, <8 x i16> %5
+  ret <8 x i16> %6
+; CHECK-LABEL: absd_int16_uge
+; CHECK-NOT: vcmpgtuh
+; CHECK-NOT: xxsel
+; CHECK: vabsduh v2, v2, v3
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: absd_int16_uge
+; CHECK-PWR8: vcmpgtuh
+; CHECK-PWR8: xxsel
+; CHECK-PWR8: blr
+}
+
+define <8 x i16> @absd_int16_ult(<8 x i16>, <8 x i16>) {
+  %3 = icmp ult <8 x i16> %0, %1
+  %4 = sub <8 x i16> %0, %1
+  %5 = sub <8 x i16> %1, %0
+  %6 = select <8 x i1> %3, <8 x i16> %5, <8 x i16> %4
+  ret <8 x i16> %6
+; CHECK-LABEL: absd_int16_ult
+; CHECK-NOT: vcmpgtuh
+; CHECK-NOT: xxsel
+; CHECK: vabsduh v2, v2, v3
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: absd_int16_ult
+; CHECK-PWR8: vcmpgtuh
+; CHECK-PWR8: xxsel
+; CHECK-PWR8: blr
+}
+
+define <8 x i16> @absd_int16_ule(<8 x i16>, <8 x i16>) {
+  %3 = icmp ule <8 x i16> %0, %1
+  %4 = sub <8 x i16> %0, %1
+  %5 = sub <8 x i16> %1, %0
+  %6 = select <8 x i1> %3, <8 x i16> %5, <8 x i16> %4
+  ret <8 x i16> %6
+; CHECK-LABEL: absd_int16_ule
+; CHECK-NOT: vcmpgtuh
+; CHECK-NOT: xxsel
+; CHECK: vabsduh v2, v2, v3
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: absd_int16_ule
+; CHECK-PWR8: vcmpgtuh
+; CHECK-PWR8: xxsel
+; CHECK-PWR8: blr
+}
+
+define <16 x i8> @absd_int8_ugt(<16 x i8>, <16 x i8>) {
+  %3 = icmp ugt <16 x i8> %0, %1
+  %4 = sub <16 x i8> %0, %1
+  %5 = sub <16 x i8> %1, %0
+  %6 = select <16 x i1> %3, <16 x i8> %4, <16 x i8> %5
+  ret <16 x i8> %6
+; CHECK-LABEL: absd_int8_ugt
+; CHECK-NOT: vcmpgtub
+; CHECK-NOT: xxsel
+; CHECK: vabsdub v2, v2, v3
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: absd_int8_ugt
+; CHECK-PWR8: vcmpgtub
+; CHECK-PWR8: xxsel
+; CHECK-PWR8: blr
+}
+
+define <16 x i8> @absd_int8_uge(<16 x i8>, <16 x i8>) {
+  %3 = icmp uge <16 x i8> %0, %1
+  %4 = sub <16 x i8> %0, %1
+  %5 = sub <16 x i8> %1, %0
+  %6 = select <16 x i1> %3, <16 x i8> %4, <16 x i8> %5
+  ret <16 x i8> %6
+; CHECK-LABEL: absd_int8_uge
+; CHECK-NOT: vcmpgtub
+; CHECK-NOT: xxsel
+; CHECK: vabsdub v2, v2, v3
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: absd_int8_uge
+; CHECK-PWR8: vcmpgtub
+; CHECK-PWR8: xxsel
+; CHECK-PWR8: blr
+}
+
+define <16 x i8> @absd_int8_ult(<16 x i8>, <16 x i8>) {
+  %3 = icmp ult <16 x i8> %0, %1
+  %4 = sub <16 x i8> %0, %1
+  %5 = sub <16 x i8> %1, %0
+  %6 = select <16 x i1> %3, <16 x i8> %5, <16 x i8> %4
+  ret <16 x i8> %6
+; CHECK-LABEL: absd_int8_ult
+; CHECK-NOT: vcmpgtub
+; CHECK-NOT: xxsel
+; CHECK: vabsdub v2, v2, v3
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: absd_int8_ult
+; CHECK-PWR8: vcmpgtub
+; CHECK-PWR8: xxsel
+; CHECK-PWR8: blr
+}
+
+define <16 x i8> @absd_int8_ule(<16 x i8>, <16 x i8>) {
+  %3 = icmp ule <16 x i8> %0, %1
+  %4 = sub <16 x i8> %0, %1
+  %5 = sub <16 x i8> %1, %0
+  %6 = select <16 x i1> %3, <16 x i8> %5, <16 x i8> %4
+  ret <16 x i8> %6
+; CHECK-LABEL: absd_int8_ule
+; CHECK-NOT: vcmpgtub
+; CHECK-NOT: xxsel
+; CHECK: vabsdub v2, v2, v3
+; CHECK-NEXT: blr
+; CHECK-PWR8-LABEL: absd_int8_ule
+; CHECK-PWR8: vcmpgtub
+; CHECK-PWR8: xxsel
+; CHECK-PWR8: blr
+}
+
+; some cases we are unable to optimize
+; check whether goes beyond the scope
+define <4 x i32> @absd_int32_ugt_opp(<4 x i32>, <4 x i32>) {
+  %3 = icmp ugt <4 x i32> %0, %1
+  %4 = sub <4 x i32> %0, %1
+  %5 = sub <4 x i32> %1, %0
+  %6 = select <4 x i1> %3, <4 x i32> %5, <4 x i32> %4
+  ret <4 x i32> %6
+; CHECK-LABEL: absd_int32_ugt_opp
+; CHECK-NOT: vabsduw
+; CHECK: vcmpgtuw
+; CHECK: xxsel
+; CHECK: blr
+; CHECK-PWR8-LABEL: absd_int32_ugt_opp
+; CHECK-PWR8: vcmpgtuw
+; CHECK-PWR8: xxsel
+; CHECK-PWR8: blr
+}
+
+define <2 x i64> @absd_int64_ugt(<2 x i64>, <2 x i64>) {
+  %3 = icmp ugt <2 x i64> %0, %1
+  %4 = sub <2 x i64> %0, %1
+  %5 = sub <2 x i64> %1, %0
+  %6 = select <2 x i1> %3, <2 x i64> %4, <2 x i64> %5
+  ret <2 x i64> %6
+; CHECK-LABEL: absd_int64_ugt
+; CHECK-NOT: vabsduw
+; CHECK: vcmpgtud
+; CHECK: xxsel
+; CHECK: blr
+; CHECK-PWR8-LABEL: absd_int64_ugt
+; CHECK-PWR8: vcmpgtud
+; CHECK-PWR8: xxsel
+; CHECK-PWR8: blr
+}
+
 declare <4 x i32> @llvm.ppc.altivec.vmaxsw(<4 x i32>, <4 x i32>)
 
-; Function Attrs: nounwind readnone
 declare <8 x i16> @llvm.ppc.altivec.vmaxsh(<8 x i16>, <8 x i16>)
 
-; Function Attrs: nounwind readnone
 declare <16 x i8> @llvm.ppc.altivec.vmaxsb(<16 x i8>, <16 x i8>)
 
diff --git a/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll b/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll
index dfa6ec0..e19ab11 100644
--- a/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll
+++ b/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll
@@ -1,4 +1,4 @@
-; RUN: not llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
+; RUN: not llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
 ;
 ; Check that misuse of anyregcc results in a compile time error.
 
diff --git a/test/CodeGen/PowerPC/ppc64-anyregcc.ll b/test/CodeGen/PowerPC/ppc64-anyregcc.ll
index 06ec561..b8c62c3 100644
--- a/test/CodeGen/PowerPC/ppc64-anyregcc.ll
+++ b/test/CodeGen/PowerPC/ppc64-anyregcc.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s | FileCheck %s
 target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
diff --git a/test/CodeGen/PowerPC/ppc64-patchpoint.ll b/test/CodeGen/PowerPC/ppc64-patchpoint.ll
index d10ea98..7a6f5ac 100644
--- a/test/CodeGen/PowerPC/ppc64-patchpoint.ll
+++ b/test/CodeGen/PowerPC/ppc64-patchpoint.ll
@@ -1,7 +1,7 @@
-; RUN: llc                             < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
-; RUN: llc -fast-isel -fast-isel-abort=1 < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
-; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu                             < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
-; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -fast-isel -fast-isel-abort=1 < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
+; RUN: llc -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
+; RUN: llc -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
 
 target triple = "powerpc64-unknown-linux-gnu"
 
diff --git a/test/CodeGen/PowerPC/ppc64-stackmap.ll b/test/CodeGen/PowerPC/ppc64-stackmap.ll
index 5abc2a2..8b2466b 100644
--- a/test/CodeGen/PowerPC/ppc64-stackmap.ll
+++ b/test/CodeGen/PowerPC/ppc64-stackmap.ll
@@ -1,4 +1,4 @@
-; RUN: llc                             < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s | FileCheck %s
 ;
 ; Note: Print verbose stackmaps using -debug-only=stackmaps.
 
diff --git a/test/CodeGen/PowerPC/pr39815.ll b/test/CodeGen/PowerPC/pr39815.ll
new file mode 100644
index 0000000..a01c8be
--- /dev/null
+++ b/test/CodeGen/PowerPC/pr39815.ll
@@ -0,0 +1,31 @@
+; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu < %s \
+; RUN:   -verify-machineinstrs | FileCheck %s
+
+@b = common dso_local local_unnamed_addr global i64* null, align 8
+@a = common dso_local local_unnamed_addr global i8 0, align 1
+
+define void @testADDEPromoteResult() {
+entry:
+  %0 = load i64*, i64** @b, align 8
+  %1 = load i64, i64* %0, align 8
+  %cmp = icmp ne i64* %0, null
+  %conv1 = zext i1 %cmp to i64
+  %add = add nsw i64 %1, %conv1
+  %2 = trunc i64 %add to i8
+  %conv2 = and i8 %2, 5
+  store i8 %conv2, i8* @a, align 1
+  ret void
+
+; CHECK-LABEL: @testADDEPromoteResult
+; CHECK:      # %bb.0:
+; CHECK-DAG:   addis [[REG1:[0-9]+]], [[REG2:[0-9]+]], [[VAR1:[a-z0-9A-Z_.]+]]@toc@ha
+; CHECK-DAG:   ld [[REG3:[0-9]+]], [[VAR1]]@toc@l([[REG1]])
+; CHECK-DAG:   lbz [[REG4:[0-9]+]], 0([[REG3]])
+; CHECK-DAG:   addic [[REG5:[0-9]+]], [[REG3]], -1
+; CHECK-DAG:   extsb [[REG6:[0-9]+]], [[REG4]]
+; CHECK-DAG:   addze [[REG7:[0-9]+]], [[REG6]]
+; CHECK-DAG:   addis [[REG8:[0-9]+]], [[REG2]], [[VAR2:[a-z0-9A-Z_.]+]]@toc@ha
+; CHECK-DAG:   andi. [[REG9:[0-9]+]], [[REG7]], 5
+; CHECK-DAG:   stb [[REG9]], [[VAR2]]@toc@l([[REG8]])
+; CHECK:       blr
+}
diff --git a/test/CodeGen/PowerPC/pre-inc-disable.ll b/test/CodeGen/PowerPC/pre-inc-disable.ll
index f7b8294..65f2e6c 100644
--- a/test/CodeGen/PowerPC/pre-inc-disable.ll
+++ b/test/CodeGen/PowerPC/pre-inc-disable.ll
@@ -10,107 +10,107 @@
 define signext i32 @test_pre_inc_disable_1(i8* nocapture readonly %pix1, i32 signext %i_stride_pix1, i8* nocapture readonly %pix2) {
 ; CHECK-LABEL: test_pre_inc_disable_1:
 ; CHECK:   # %bb.0: # %entry
-; CHECK:    addis r6, r2
-; CHECK:    addis r7, r2,
 ; CHECK:    lfd f0, 0(r5)
-; CHECK:    xxlxor v4, v4, v4
-; CHECK:    addi r5, r6,
-; CHECK:    addi r6, r7,
+; CHECK:    addis r5, r2
+; CHECK:    addi r5, r5,
 ; CHECK:    lxvx v2, 0, r5
-; CHECK:    lxvx v3, 0, r6
+; CHECK:    addis r5, r2,
+; CHECK:    addi r5, r5,
+; CHECK:    lxvx v4, 0, r5
 ; CHECK:    xxpermdi v5, f0, f0, 2
-; CHECK:    vperm v0, v4, v5, v2
-; CHECK:    vperm v5, v5, v4, v3
-; CHECK:    xvnegsp v5, v5
-; CHECK:    xvnegsp v0, v0
+; CHECK:    xxlxor v3, v3, v3
+; CHECK-DAG: vperm v[[VR1:[0-9]+]], v5, v3, v4
+; CHECK-DAG: vperm v[[VR2:[0-9]+]], v3, v5, v2
+; CHECK-DAG: xvnegsp v[[VR3:[0-9]+]], v[[VR1]]
+; CHECK-DAG: xvnegsp v[[VR4:[0-9]+]], v[[VR2]]
 
 ; CHECK:  .LBB0_1: # %for.cond1.preheader
 ; CHECK:    lfd f0, 0(r3)
 ; CHECK:    xxpermdi v1, f0, f0, 2
-; CHECK:    vperm v6, v1, v4, v3
-; CHECK:    vperm v1, v4, v1, v2
-; CHECK:    xvnegsp v6, v6
-; CHECK:    xvnegsp v1, v1
-; CHECK:    vabsduw v1, v1, v0
-; CHECK:    vabsduw v6, v6, v5
-; CHECK:    vadduwm v1, v6, v1
+; CHECK:    vperm v6, v3, v1, v2
+; CHECK:    vperm v1, v1, v3, v4
+; CHECK-DAG:    xvnegsp v6, v6
+; CHECK-DAG:    xvnegsp v1, v1
+; CHECK-DAG: vabsduw v1, v1, v[[VR3]]
+; CHECK-DAG: vabsduw v6, v6, v[[VR4]]
+; CHECK:    vadduwm v1, v1, v6
 ; CHECK:    xxswapd v6, v1
 ; CHECK:    vadduwm v1, v1, v6
 ; CHECK:    xxspltw v6, v1, 2
 ; CHECK:    vadduwm v1, v1, v6
-; CHECK:    vextuwrx r7, r6, v1
+; CHECK:    vextuwrx r7, r5, v1
 ; CHECK:    ldux r8, r3, r4
 ; CHECK:    add r3, r3, r4
-; CHECK:    add r5, r7, r5
+; CHECK:    add r6, r7, r6
 ; CHECK:    mtvsrd f0, r8
 ; CHECK:    xxswapd v1, vs0
-; CHECK:    vperm v6, v1, v4, v3
-; CHECK:    vperm v1, v4, v1, v2
-; CHECK:    xvnegsp v6, v6
-; CHECK:    xvnegsp v1, v1
-; CHECK:    vabsduw v1, v1, v0
-; CHECK:    vabsduw v6, v6, v5
-; CHECK:    vadduwm v1, v6, v1
+; CHECK:    vperm v6, v3, v1, v2
+; CHECK:    vperm v1, v1, v3, v4
+; CHECK-DAG: xvnegsp v6, v6
+; CHECK-DAG: xvnegsp v1, v1
+; CHECK-DAG: vabsduw v1, v1, v[[VR3]]
+; CHECK-DAG: vabsduw v6, v6, v[[VR4]]
+; CHECK:    vadduwm v1, v1, v6
 ; CHECK:    xxswapd v6, v1
 ; CHECK:    vadduwm v1, v1, v6
 ; CHECK:    xxspltw v6, v1, 2
 ; CHECK:    vadduwm v1, v1, v6
-; CHECK:    vextuwrx r8, r6, v1
-; CHECK:    add r5, r8, r5
+; CHECK:    vextuwrx r7, r5, v1
+; CHECK:    add r6, r7, r6
 ; CHECK:    bdnz .LBB0_1
-; CHECK:    extsw r3, r5
+; CHECK:    extsw r3, r6
 ; CHECK:    blr
 
 ; P9BE-LABEL: test_pre_inc_disable_1:
-; P9BE:    addis r6, r2,
-; P9BE:    addis r7, r2,
 ; P9BE:    lfd f0, 0(r5)
-; P9BE:    xxlxor v4, v4, v4
-; P9BE:    addi r5, r6,
-; P9BE:    addi r6, r7,
+; P9BE:    addis r5, r2,
+; P9BE:    addi r5, r5,
 ; P9BE:    lxvx v2, 0, r5
-; P9BE:    lxvx v3, 0, r6
+; P9BE:    addis r5, r2,
+; P9BE:    addi r5, r5,
+; P9BE:    lxvx v4, 0, r5
 ; P9BE:    xxlor v5, vs0, vs0
-; P9BE:    li r6, 0
-; P9BE:    vperm v0, v4, v5, v2
-; P9BE:    vperm v5, v4, v5, v3
-; P9BE:    xvnegsp v5, v5
-; P9BE:    xvnegsp v0, v0
+; P9BE:    xxlxor v3, v3, v3
+; P9BE-DAG: li r5, 0
+; P9BE-DAG: vperm v[[VR1:[0-9]+]], v3, v5, v2
+; P9BE-DAG: vperm v[[VR2:[0-9]+]], v3, v5, v4
+; P9BE-DAG: xvnegsp v[[VR3:[0-9]+]], v[[VR1]]
+; P9BE-DAG: xvnegsp v[[VR4:[0-9]+]], v[[VR2]]
 
 ; P9BE:  .LBB0_1: # %for.cond1.preheader
 ; P9BE:    lfd f0, 0(r3)
 ; P9BE:    xxlor v1, vs0, vs0
-; P9BE:    vperm v6, v4, v1, v3
-; P9BE:    vperm v1, v4, v1, v2
-; P9BE:    xvnegsp v6, v6
-; P9BE:    xvnegsp v1, v1
-; P9BE:    vabsduw v1, v1, v0
-; P9BE:    vabsduw v6, v6, v5
+; P9BE:    vperm v6, v3, v1, v4
+; P9BE:    vperm v1, v3, v1, v2
+; P9BE-DAG: xvnegsp v6, v6
+; P9BE-DAG: xvnegsp v1, v1
+; P9BE-DAG: vabsduw v1, v1, v[[VR3]]
+; P9BE-DAG: vabsduw v6, v6, v[[VR4]]
 ; P9BE:    vadduwm v1, v6, v1
 ; P9BE:    xxswapd v6, v1
 ; P9BE:    vadduwm v1, v1, v6
 ; P9BE:    xxspltw v6, v1, 1
 ; P9BE:    vadduwm v1, v1, v6
-; P9BE:    vextuwlx r7, r6, v1
-; P9BE:    ldux r8, r3, r4
+; P9BE:    vextuwlx r[[GR1:[0-9]+]], r5, v1
+; P9BE:    add r6, r[[GR1]], r6
+; P9BE:    ldux r[[GR2:[0-9]+]], r3, r4
 ; P9BE:    add r3, r3, r4
-; P9BE:    add r5, r7, r5
-; P9BE:    mtvsrd v1, r8
-; P9BE:    vperm v6, v4, v1, v3
-; P9BE:    vperm v1, v4, v1, v2
-; P9BE:    xvnegsp v6, v6
-; P9BE:    xvnegsp v1, v1
-; P9BE:    vabsduw v1, v1, v0
-; P9BE:    vabsduw v6, v6, v5
-; P9BE:    vadduwm v1, v6, v1
+; P9BE:    mtvsrd v1, r[[GR2]]
+; P9BE:    vperm v6, v3, v1, v2
+; P9BE:    vperm v1, v3, v1, v4
+; P9BE-DAG: xvnegsp v6, v6
+; P9BE-DAG: xvnegsp v1, v1
+; P9BE-DAG: vabsduw v1, v1, v[[VR4]]
+; P9BE-DAG: vabsduw v6, v6, v[[VR3]]
+; P9BE:    vadduwm v1, v1, v6
 ; P9BE:    xxswapd v6, v1
 ; P9BE:    vadduwm v1, v1, v6
 ; P9BE:    xxspltw v6, v1, 1
 ; P9BE:    vadduwm v1, v1, v6
-; P9BE:    vextuwlx r8, r6, v1
-; P9BE:    add r5, r8, r5
+; P9BE:    vextuwlx r7, r5, v1
+; P9BE:    add r6, r7, r6
 ; P9BE:    bdnz .LBB0_1
-; P9BE:    extsw r3, r5
+; P9BE:    extsw r3, r6
 ; P9BE:    blr
 entry:
   %idx.ext = sext i32 %i_stride_pix1 to i64
@@ -166,28 +166,24 @@
 ; Function Attrs: norecurse nounwind readonly
 define signext i32 @test_pre_inc_disable_2(i8* nocapture readonly %pix1, i8* nocapture readonly %pix2) {
 ; CHECK-LABEL: test_pre_inc_disable_2:
-; CHECK:    addis r5, r2,
-; CHECK:    addis r6, r2,
 ; CHECK:    lfd f0, 0(r3)
-; CHECK:    lfd f1, 0(r4)
-; CHECK:    xxlxor v0, v0, v0
-; CHECK:    addi r3, r5, .LCPI1_0@toc@l
-; CHECK:    addi r4, r6, .LCPI1_1@toc@l
-; CHECK:    lxvx v2, 0, r3
-; CHECK:    lxvx v3, 0, r4
-; CHECK:    xxpermdi v4, f0, f0, 2
-; CHECK:    xxpermdi v5, f1, f1, 2
-; CHECK:    vperm v1, v4, v0, v2
-; CHECK:    vperm v4, v0, v4, v3
-; CHECK:    vperm v2, v5, v0, v2
-; CHECK:    vperm v3, v0, v5, v3
-; CHECK:    xvnegsp v5, v1
-; CHECK:    xvnegsp v4, v4
-; CHECK:    xvnegsp v2, v2
-; CHECK:    xvnegsp v3, v3
-; CHECK:    vabsduw v3, v4, v3
-; CHECK:    vabsduw v2, v5, v2
-; CHECK:    vadduwm v2, v2, v3
+; CHECK:    addis r3, r2,
+; CHECK:    addi r3, r3, .LCPI1_0@toc@l
+; CHECK:    lxvx v4, 0, r3
+; CHECK:    addis r3, r2,
+; CHECK:    xxpermdi v2, f0, f0, 2
+; CHECK:    lfd f0, 0(r4)
+; CHECK:    addi r3, r3, .LCPI1_1@toc@l
+; CHECK:    xxlxor v3, v3, v3
+; CHECK:    lxvx v0, 0, r3
+; CHECK:    xxpermdi v1, f0, f0, 2
+; CHECK:    vperm v5, v2, v3, v4
+; CHECK:    vperm v2, v3, v2, v0
+; CHECK:    vperm v0, v3, v1, v0
+; CHECK:    vperm v3, v1, v3, v4
+; CHECK:    vabsduw v2, v2, v0
+; CHECK:    vabsduw v3, v5, v3
+; CHECK:    vadduwm v2, v3, v2
 ; CHECK:    xxswapd v3, v2
 ; CHECK:    vadduwm v2, v2, v3
 ; CHECK:    xxspltw v3, v2, 2
@@ -197,28 +193,24 @@
 ; CHECK:    blr
 
 ; P9BE-LABEL: test_pre_inc_disable_2:
-; P9BE:    addis r5, r2,
-; P9BE:    addis r6, r2,
 ; P9BE:    lfd f0, 0(r3)
-; P9BE:    lfd f1, 0(r4)
-; P9BE:    xxlxor v5, v5, v5
-; P9BE:    addi r3, r5,
-; P9BE:    addi r4, r6,
-; P9BE:    lxvx v2, 0, r3
-; P9BE:    lxvx v3, 0, r4
-; P9BE:    xxlor v4, vs0, vs0
-; P9BE:    xxlor v0, vs1, vs1
-; P9BE:    vperm v1, v5, v4, v2
-; P9BE:    vperm v4, v5, v4, v3
-; P9BE:    vperm v2, v5, v0, v2
-; P9BE:    vperm v3, v5, v0, v3
-; P9BE:    xvnegsp v5, v1
-; P9BE:    xvnegsp v4, v4
-; P9BE:    xvnegsp v2, v2
-; P9BE:    xvnegsp v3, v3
-; P9BE:    vabsduw v3, v4, v3
-; P9BE:    vabsduw v2, v5, v2
-; P9BE:    vadduwm v2, v2, v3
+; P9BE:    addis r3, r2,
+; P9BE:    addi r3, r3,
+; P9BE:    lxvx v4, 0, r3
+; P9BE:    addis r3, r2,
+; P9BE:    addi r3, r3,
+; P9BE:    xxlor v2, vs0, vs0
+; P9BE:    lfd f0, 0(r4)
+; P9BE:    lxvx v0, 0, r3
+; P9BE:    xxlxor v3, v3, v3
+; P9BE:    xxlor v1, vs0, vs0
+; P9BE:    vperm v5, v3, v2, v4
+; P9BE:    vperm v2, v3, v2, v0
+; P9BE:    vperm v0, v3, v1, v0
+; P9BE:    vperm v3, v3, v1, v4
+; P9BE:    vabsduw v2, v2, v0
+; P9BE:    vabsduw v3, v5, v3
+; P9BE:    vadduwm v2, v3, v2
 ; P9BE:    xxswapd v3, v2
 ; P9BE:    vadduwm v2, v2, v3
 ; P9BE:    xxspltw v3, v2, 1
diff --git a/test/CodeGen/PowerPC/remove-implicit-use.mir b/test/CodeGen/PowerPC/remove-implicit-use.mir
new file mode 100644
index 0000000..9a70ce3
--- /dev/null
+++ b/test/CodeGen/PowerPC/remove-implicit-use.mir
@@ -0,0 +1,78 @@
+# RUN: llc -mtriple=powerpc64le-unknown-unknown -start-after=ppc-mi-peepholes \ 
+# RUN: -stop-before=ppc-expand-isel -verify-machineinstrs %s -o - | FileCheck %s
+--- |
+  ; ModuleID = 'a.ll'
+  source_filename = "a.c"   
+  target datalayout = "e-m:e-i64:64-n32:64"
+  target triple = "powerpc64le-unknown-linux-gnu"
+
+  ; Function Attrs: norecurse nounwind readnone
+  define signext i32 @test(i32 signext %a, i32 signext %b, i32 signext %c) local_unnamed_addr #0 {
+  entry:
+    %cmp = icmp sgt i32 %a, %b
+    %add = add nsw i32 %c, %b
+    %cond = select i1 %cmp, i32 %a, i32 %add
+    ret i32 %cond
+  }
+
+  
+  !llvm.module.flags = !{!0, !1}
+  !llvm.ident = !{!2}
+  
+  !0 = !{i32 1, !"wchar_size", i32 4}
+  !1 = !{i32 7, !"PIC Level", i32 2}
+  !2 = !{!"clang version 8.0.0 (trunk 347251)"}
+
+...
+---
+name:            test
+alignment:       4
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+registers:       []
+liveins:         
+  - { reg: '$x3', virtual-reg: '' }
+  - { reg: '$x4', virtual-reg: '' }
+  - { reg: '$x5', virtual-reg: '' }
+frameInfo:       
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       0
+  offsetAdjustment: 0
+  maxAlignment:    0
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 0
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:           []
+constants:       []
+body:             |
+  bb.0.entry:
+    liveins: $x3, $x5
+
+    renamable $r4 = LI 0
+    renamable $r5 = nsw ADD4 killed renamable $r5, renamable $r5, implicit $x5
+    renamable $cr0 = CMPW renamable $r3, killed renamable $r4, implicit $x4
+    ; CHECK: ADD4
+    ; CHECK-NOT: implicit $x4
+    renamable $r3 = ISEL killed renamable $r3, killed renamable $r5, killed renamable $cr0gt, implicit $cr0, implicit $x3
+    renamable $x3 = EXTSW_32_64 killed renamable $r3
+    BLR8 implicit $lr8, implicit $rm, implicit killed $x3
+
+...
+
diff --git a/test/CodeGen/PowerPC/scalar_vector_test_1.ll b/test/CodeGen/PowerPC/scalar_vector_test_1.ll
index d8f44f1..5ad28e0 100644
--- a/test/CodeGen/PowerPC/scalar_vector_test_1.ll
+++ b/test/CodeGen/PowerPC/scalar_vector_test_1.ll
@@ -7,6 +7,7 @@
 ; RUN: llc -mcpu=pwr8 -verify-machineinstrs -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names \
 ; RUN:    -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P8BE
 
+
 ; Function Attrs: norecurse nounwind readonly
 define <2 x i64> @s2v_test1(i64* nocapture readonly %int64, <2 x i64> %vec) {
 ; P9LE-LABEL: s2v_test1:
diff --git a/test/CodeGen/PowerPC/scalar_vector_test_2.ll b/test/CodeGen/PowerPC/scalar_vector_test_2.ll
index da1b8bc..96c823b 100644
--- a/test/CodeGen/PowerPC/scalar_vector_test_2.ll
+++ b/test/CodeGen/PowerPC/scalar_vector_test_2.ll
@@ -65,12 +65,12 @@
 ; P9LE:       # %bb.0:
 ; P9LE-NEXT:    lfiwzx f0, 0, r3
 ; P9LE-NEXT:    lfiwzx f1, 0, r4
-; P9LE-NEXT:    mr r3, r5
 ; P9LE-NEXT:    xxpermdi vs0, f0, f0, 2
 ; P9LE-NEXT:    xxpermdi vs1, f1, f1, 2
 ; P9LE-NEXT:    xvsubsp vs0, vs0, vs1
 ; P9LE-NEXT:    xxsldwi vs0, vs0, vs0, 3
 ; P9LE-NEXT:    xscvspdpn f0, vs0
+; P9LE-NEXT:    mr r3, r5
 ; P9LE-NEXT:    stfs f0, 0(r5)
 ; P9LE-NEXT:    blr
 
@@ -78,11 +78,11 @@
 ; P9BE:       # %bb.0:
 ; P9BE-NEXT:    lfiwzx f0, 0, r3
 ; P9BE-NEXT:    lfiwzx f1, 0, r4
-; P9BE-NEXT:    mr r3, r5
 ; P9BE-NEXT:    xxsldwi vs0, f0, f0, 1
 ; P9BE-NEXT:    xxsldwi vs1, f1, f1, 1
 ; P9BE-NEXT:    xvsubsp vs0, vs0, vs1
 ; P9BE-NEXT:    xscvspdpn f0, vs0
+; P9BE-NEXT:    mr r3, r5
 ; P9BE-NEXT:    stfs f0, 0(r5)
 ; P9BE-NEXT:    blr
 
diff --git a/test/CodeGen/PowerPC/scalar_vector_test_4.ll b/test/CodeGen/PowerPC/scalar_vector_test_4.ll
index aaaf0ba..2a7e177 100644
--- a/test/CodeGen/PowerPC/scalar_vector_test_4.ll
+++ b/test/CodeGen/PowerPC/scalar_vector_test_4.ll
@@ -172,8 +172,8 @@
 ; P9LE-LABEL: s2v_test_f2:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    addi r3, r3, 4
-; P9LE-NEXT:    xxspltw v2, v2, 2
-; P9LE-NEXT:    lfiwzx f0, 0, r3
+; P9LE-DAG:     xxspltw v2, v2, 2
+; P9LE-DAG:     lfiwzx f0, 0, r3
 ; P9LE-NEXT:    xxpermdi v3, f0, f0, 2
 ; P9LE-NEXT:    vmrglw v2, v2, v3
 ; P9LE-NEXT:    blr
@@ -181,8 +181,8 @@
 ; P9BE-LABEL: s2v_test_f2:
 ; P9BE:       # %bb.0: # %entry
 ; P9BE:       addi r3, r3, 4
-; P9BE:       xxspltw v2, v2, 1
-; P9BE:       lfiwzx f0, 0, r3
+; P9BE-DAG:   xxspltw v2, v2, 1
+; P9BE-DAG:   lfiwzx f0, 0, r3
 ; P9BE-NEXT:  xxsldwi v3, f0, f0, 1
 ; P9BE:       vmrghw v2, v3, v2
 ; P9BE-NEXT:  blr
@@ -216,18 +216,18 @@
 ; P9LE-LABEL: s2v_test_f3:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    sldi r4, r7, 2
-; P9LE-NEXT:    xxspltw v2, v2, 2
 ; P9LE-NEXT:    lfiwzx f0, r3, r4
-; P9LE-NEXT:    xxpermdi v3, f0, f0, 2
+; P9LE-DAG:     xxspltw v2, v2, 2
+; P9LE-DAG:     xxpermdi v3, f0, f0, 2
 ; P9LE-NEXT:    vmrglw v2, v2, v3
 ; P9LE-NEXT:    blr
 
 ; P9BE-LABEL: s2v_test_f3:
 ; P9BE:       # %bb.0: # %entry
 ; P9BE:         sldi r4, r7, 2
-; P9BE:         xxspltw v2, v2, 1
 ; P9BE:         lfiwzx f0, r3, r4
-; P9BE-NEXT:    xxsldwi v3, f0, f0, 1
+; P9BE-DAG:     xxspltw v2, v2, 1
+; P9BE-DAG:     xxsldwi v3, f0, f0, 1
 ; P9BE:         vmrghw v2, v3, v2
 ; P9BE-NEXT:    blr
 
@@ -261,18 +261,18 @@
 ; P9LE-LABEL: s2v_test_f4:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    addi r3, r3, 4
-; P9LE-NEXT:    xxspltw v2, v2, 2
 ; P9LE-NEXT:    lfiwzx f0, 0, r3
-; P9LE-NEXT:    xxpermdi v3, f0, f0, 2
+; P9LE-DAG:    xxspltw v2, v2, 2
+; P9LE-DAG:    xxpermdi v3, f0, f0, 2
 ; P9LE-NEXT:    vmrglw v2, v2, v3
 ; P9LE-NEXT:    blr
 
 ; P9BE-LABEL: s2v_test_f4:
 ; P9BE:       # %bb.0: # %entry
 ; P9BE:         addi r3, r3, 4
-; P9BE:         xxspltw v2, v2, 1
 ; P9BE:         lfiwzx f0, 0, r3
-; P9BE-NEXT:    xxsldwi v3, f0, f0, 1
+; P9BE-DAG:     xxspltw v2, v2, 1
+; P9BE-DAG:     xxsldwi v3, f0, f0, 1
 ; P9BE:         vmrghw v2, v3, v2
 ; P9BE-NEXT:    blr
 
diff --git a/test/CodeGen/PowerPC/select-i1-vs-i1.ll b/test/CodeGen/PowerPC/select-i1-vs-i1.ll
index a2df182..51bd26d 100644
--- a/test/CodeGen/PowerPC/select-i1-vs-i1.ll
+++ b/test/CodeGen/PowerPC/select-i1-vs-i1.ll
@@ -928,10 +928,8 @@
 ; CHECK-LABEL: @testv4floateq
 ; CHECK-DAG: fcmpu {{[0-9]+}}, 3, 4
 ; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
-; CHECK: crxor [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
-; CHECK: bc 12, [[REG1]], .LBB[[BB1:[0-9_]+]]
-; CHECK: vmr 3, 2
-; CHECK: .LBB[[BB1]]
+; CHECK: creqv [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
+; CHECK: bclr 12, [[REG1]], 0
 ; CHECK: vmr 2, 3
 ; CHECK: blr
 }
@@ -1065,7 +1063,7 @@
 ; CHECK-DAG: fcmpu {{[0-9]+}}, 3, 4
 ; CHECK: bc 12, 2, .LBB[[BB:[0-9_]+]]
 ; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
-; CHECK: bc 4, 2, .LBB[[BB]]
+; CHECK: bclr 12, 2, 0
 ; CHECK: .LBB[[BB]]:
 ; CHECK: vmr 2, 3
 ; CHECK: blr
@@ -1083,7 +1081,7 @@
 ; CHECK-DAG: fcmpu {{[0-9]+}}, 3, 4
 ; CHECK: bc 4, 2, .LBB[[BB:[0-9_]+]]
 ; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
-; CHECK: bc 12, 2, .LBB[[BB]]
+; CHECK: bclr 4, 2, 0
 ; CHECK: .LBB[[BB]]:
 ; CHECK: vmr 2, 3
 ; CHECK: blr
@@ -1134,10 +1132,8 @@
 ; CHECK-LABEL: @testv2doubleeq
 ; CHECK-DAG: fcmpu {{[0-9]+}}, 3, 4
 ; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
-; CHECK: crxor [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
-; CHECK: bc 12, [[REG1]], .LBB[[BB55:[0-9_]+]]
-; CHECK: vmr 3, 2
-; CHECK: .LBB[[BB55]]
+; CHECK: creqv [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
+; CHECK: bclr 12, [[REG1]], 0
 ; CHECK: vmr 2, 3
 ; CHECK: blr
 }
@@ -1188,7 +1184,7 @@
 ; CHECK-DAG: fcmpu {{[0-9]+}}, 3, 4
 ; CHECK: bc 4, 2, .LBB[[BB:[0-9_]+]]
 ; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
-; CHECK: bc 12, 2, .LBB[[BB]]
+; CHECK: bclr 4, 2, 0
 ; CHECK: .LBB[[BB]]
 ; CHECK: vmr 2, 3
 ; CHECK: blr
@@ -1206,7 +1202,7 @@
 ; CHECK-DAG: fcmpu {{[0-9]+}}, 3, 4
 ; CHECK: bc 12, 2, .LBB[[BB:[0-9_]+]]
 ; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
-; CHECK: bc 4, 2, .LBB[[BB]]
+; CHECK: bclr 12, 2, 0
 ; CHECK: .LBB[[BB]]
 ; CHECK: vmr 2, 3
 ; CHECK: blr
diff --git a/test/CodeGen/PowerPC/setcr_bc3.mir b/test/CodeGen/PowerPC/setcr_bc3.mir
new file mode 100644
index 0000000..49df90a
--- /dev/null
+++ b/test/CodeGen/PowerPC/setcr_bc3.mir
@@ -0,0 +1,108 @@
+# RUN: llc -verify-machineinstrs -start-before=ppc-pre-emit-peephole %s -o - | FileCheck %s
+--- |
+  target datalayout = "e-m:e-i64:64-n32:64"
+  target triple = "powerpc64le-unknown-linux-gnu"
+
+  declare signext i32 @callee(i32 signext)
+
+  define signext i32 @func(i32 signext %v) {
+    ret i32 0
+  }
+...
+---
+name:            func
+alignment:       4
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+registers:
+liveins:
+  - { reg: '$x3', virtual-reg: '' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       48
+  offsetAdjustment: 0
+  maxAlignment:    0
+  adjustsStack:    true
+  hasCalls:        true
+  stackProtector:  ''
+  maxCallFrameSize: 32
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:
+  - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, stack-id: 0, 
+      callee-saved-register: '$x30', callee-saved-restored: true, debug-info-variable: '', 
+      debug-info-expression: '', debug-info-location: '' }
+stack:
+constants:
+
+body:             |
+  bb.0:
+    successors: %bb.2(0x30000000), %bb.1(0x50000000)
+    liveins: $x3, $x30
+
+    ; bc should be converted into b
+    ; CHECK-LABEL: func
+    ; CHECK: # %bb.1
+    ; CHECK: creqv
+    ; CHECK-NOT: bc
+    ; CHECK: b .LBB0_3
+    ; CHECK: .LBB0_2
+
+    $x0 = MFLR8 implicit $lr8
+    STD killed $x0, 16, $x1
+    $x1 = STDU $x1, -48, $x1
+    STD killed $x30, 32, $x1 :: (store 8 into %fixed-stack.0, align 16)
+    $x30 = OR8 $x3, $x3
+    BL8_NOP @callee, csr_svr464_altivec, implicit-def dead $lr8, implicit $rm, implicit killed $x3, implicit $x2, implicit-def $r1, implicit-def $x3
+    renamable $cr0 = CMPLWI renamable $r3, 0
+    BCC 76, killed renamable $cr0, %bb.2
+
+  bb.1:
+    successors: %bb.5(0x40000000), %bb.2(0x40000000)
+    liveins: $x3
+
+    renamable $x3 = EXTSW_32_64 killed renamable $r3, implicit $x3
+    BL8_NOP @callee, csr_svr464_altivec, implicit-def dead $lr8, implicit $rm, implicit killed $x3, implicit $x2, implicit-def $r1, implicit-def $x3
+    renamable $cr2un = CRSET
+    $cr2gt = CROR $cr2un, $cr2un
+    $x30 = OR8 killed $x3, $x3
+    BC killed renamable $cr2un, %bb.5
+    B %bb.2
+
+  bb.4:
+    successors: %bb.5(0x80000000)
+    liveins: $x30
+
+    $x3 = LI8 0
+    BL8_NOP @callee, csr_svr464_altivec, implicit-def dead $lr8, implicit $rm, implicit killed $x3, implicit $x2, implicit-def $r1, implicit-def dead $x3
+
+  bb.5:
+    liveins: $x30, $cr0gt
+
+    renamable $x3 = EXTSW_32_64 killed renamable $r30, implicit $x30
+    $x30 = LD 32, $x1 :: (load 8 from %fixed-stack.0, align 16)
+    $x1 = ADDI8 $x1, 48
+    $x0 = LD 16, $x1
+    MTLR8 killed $x0, implicit-def $lr8
+    BLR8 implicit $lr8, implicit $rm, implicit killed $x3
+
+  bb.2:
+    successors: %bb.5(0x40000000), %bb.4(0x40000000)
+    liveins: $x30
+
+    renamable $cr0 = CMPWI renamable $r30, -1
+    BCn killed renamable $cr0gt, %bb.4
+    B %bb.5
+
+...
diff --git a/test/CodeGen/PowerPC/stack-realign.ll b/test/CodeGen/PowerPC/stack-realign.ll
index d92f93b..21de346 100644
--- a/test/CodeGen/PowerPC/stack-realign.ll
+++ b/test/CodeGen/PowerPC/stack-realign.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s | FileCheck %s
-; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -disable-fp-elim < %s | FileCheck -check-prefix=CHECK-FP %s
-; RUN: llc -mtriple=powerpc-unknown-linux-gnu -disable-fp-elim < %s | FileCheck -check-prefix=CHECK-32 %s
-; RUN: llc -mtriple=powerpc-unknown-linux-gnu -disable-fp-elim -relocation-model=pic < %s | FileCheck -check-prefix=CHECK-32-PIC %s
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -frame-pointer=all < %s | FileCheck -check-prefix=CHECK-FP %s
+; RUN: llc -mtriple=powerpc-unknown-linux-gnu -frame-pointer=all < %s | FileCheck -check-prefix=CHECK-32 %s
+; RUN: llc -mtriple=powerpc-unknown-linux-gnu -frame-pointer=all -relocation-model=pic < %s | FileCheck -check-prefix=CHECK-32-PIC %s
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
diff --git a/test/CodeGen/PowerPC/store_fptoi.ll b/test/CodeGen/PowerPC/store_fptoi.ll
index bac874a..0978061 100644
--- a/test/CodeGen/PowerPC/store_fptoi.ll
+++ b/test/CodeGen/PowerPC/store_fptoi.ll
@@ -296,8 +296,8 @@
 
 ; CHECK-LABEL: spConv2sdw_x
 ; CHECK: lfs [[LD:[0-9]+]], 0(3)
-; CHECK-NEXT: sldi [[REG:[0-9]+]], 5, 3
-; CHECK-NEXT: xscvdpsxds [[CONV:[0-9]+]], [[LD]]
+; CHECK-DAG:  sldi [[REG:[0-9]+]], 5, 3
+; CHECK-DAG:  xscvdpsxds [[CONV:[0-9]+]], [[LD]]
 ; CHECK-NEXT: stxsdx [[CONV]], 4, [[REG]]
 ; CHECK-NEXT: blr
 
@@ -322,8 +322,8 @@
 
 ; CHECK-LABEL: spConv2sw_x
 ; CHECK: lfs [[LD:[0-9]+]], 0(3)
-; CHECK-NEXT: sldi [[REG:[0-9]+]], 5, 2
-; CHECK-NEXT: xscvdpsxws [[CONV:[0-9]+]], [[LD]]
+; CHECK-DAG: sldi [[REG:[0-9]+]], 5, 2
+; CHECK-DAG: xscvdpsxws [[CONV:[0-9]+]], [[LD]]
 ; CHECK-NEXT: stfiwx [[CONV]], 4, [[REG]]
 ; CHECK-NEXT: blr
 
@@ -348,8 +348,8 @@
 
 ; CHECK-LABEL: spConv2shw_x
 ; CHECK: lfs [[LD:[0-9]+]], 0(3)
-; CHECK: sldi [[REG:[0-9]+]], 5, 1
-; CHECK: xscvdpsxws [[CONV:[0-9]+]], [[LD]]
+; CHECK-DAG: sldi [[REG:[0-9]+]], 5, 1
+; CHECK-DAG: xscvdpsxws [[CONV:[0-9]+]], [[LD]]
 ; CHECK-NEXT: stxsihx [[CONV]], 4, [[REG]]
 ; CHECK-NEXT: blr
 
@@ -680,8 +680,8 @@
 
 ; CHECK-LABEL: spConv2udw_x
 ; CHECK: lfs [[LD:[0-9]+]], 0(3)
-; CHECK-NEXT: sldi [[REG:[0-9]+]], 5, 3
-; CHECK-NEXT: xscvdpuxds [[CONV:[0-9]+]], [[LD]]
+; CHECK-DAG: sldi [[REG:[0-9]+]], 5, 3
+; CHECK-DAG: xscvdpuxds [[CONV:[0-9]+]], [[LD]]
 ; CHECK-NEXT: stxsdx [[CONV]], 4, [[REG]]
 ; CHECK-NEXT: blr
 
@@ -706,8 +706,8 @@
 
 ; CHECK-LABEL: spConv2uw_x
 ; CHECK: lfs [[LD:[0-9]+]], 0(3)
-; CHECK-NEXT: sldi [[REG:[0-9]+]], 5, 2
-; CHECK-NEXT: xscvdpuxws [[CONV:[0-9]+]], [[LD]]
+; CHECK-DAG: sldi [[REG:[0-9]+]], 5, 2
+; CHECK-DAG: xscvdpuxws [[CONV:[0-9]+]], [[LD]]
 ; CHECK-NEXT: stfiwx [[CONV]], 4, [[REG]]
 ; CHECK-NEXT: blr
 
@@ -732,8 +732,8 @@
 
 ; CHECK-LABEL: spConv2uhw_x
 ; CHECK: lfs [[LD:[0-9]+]], 0(3)
-; CHECK: sldi [[REG:[0-9]+]], 5, 1
-; CHECK: xscvdpuxws [[CONV:[0-9]+]], [[LD]]
+; CHECK-DAG: sldi [[REG:[0-9]+]], 5, 1
+; CHECK-DAG: xscvdpuxws [[CONV:[0-9]+]], [[LD]]
 ; CHECK-NEXT: stxsihx [[CONV]], 4, [[REG]]
 ; CHECK-NEXT: blr
 
diff --git a/test/CodeGen/PowerPC/structsinmem.ll b/test/CodeGen/PowerPC/structsinmem.ll
index bbe8289..b994aae 100644
--- a/test/CodeGen/PowerPC/structsinmem.ll
+++ b/test/CodeGen/PowerPC/structsinmem.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -mcpu=ppc64 -O0 -disable-fp-elim -fast-isel=false < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=ppc64 -O0 -frame-pointer=all -fast-isel=false < %s | FileCheck %s
 
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/structsinregs.ll b/test/CodeGen/PowerPC/structsinregs.ll
index 52976ca..08f8ba9 100644
--- a/test/CodeGen/PowerPC/structsinregs.ll
+++ b/test/CodeGen/PowerPC/structsinregs.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -mcpu=ppc64 -O0 -disable-fp-elim -fast-isel=false < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=ppc64 -O0 -frame-pointer=all -fast-isel=false < %s | FileCheck %s
 
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/stwu-sched.ll b/test/CodeGen/PowerPC/stwu-sched.ll
index a8a8896..1c5cf0b 100644
--- a/test/CodeGen/PowerPC/stwu-sched.ll
+++ b/test/CodeGen/PowerPC/stwu-sched.ll
@@ -11,8 +11,8 @@
 ; Function Attrs: norecurse nounwind writeonly
 define void @initCombList(%0* nocapture, i32 signext) local_unnamed_addr #0 {
 ; CHECK-LABEL: initCombList:
-; CHECK:   addi 3, 3, -8
-; CHECK-NEXT: stwu 5, 64(4)
+; CHECK: addi 4, 4, -8
+; CHECK: stwu 5, 64(3)
 
 ; CHECK-ITIN-LABEL: initCombList:
 ; CHECK-ITIN: stwu 5, 64(4)
diff --git a/test/CodeGen/PowerPC/swaps-le-6.ll b/test/CodeGen/PowerPC/swaps-le-6.ll
index 0811287..a746721 100644
--- a/test/CodeGen/PowerPC/swaps-le-6.ll
+++ b/test/CodeGen/PowerPC/swaps-le-6.ll
@@ -9,7 +9,7 @@
 
 ; RUN: llc -relocation-model=pic -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu -O3 \
 ; RUN:   -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names -verify-machineinstrs \
-; RUN:   -mattr=-power9-vector < %s | FileCheck %s
+; RUN:   -mattr=-power9-vector < %s | FileCheck %s --check-prefix=CHECK-P9-NOVECTOR
 
 ; These tests verify that VSX swap optimization works when loading a scalar
 ; into a vector register.
@@ -31,18 +31,29 @@
 ; CHECK:     stxvd2x vs0, 0, r3
 ; CHECK:     blr
 ;
+; CHECK-P9-NOVECTOR-LABEL: bar0:
+; CHECK-P9-NOVECTOR:   # %bb.0: # %entry
+; CHECK-P9-NOVECTOR:     addis r3, r2, .LC0@toc@ha
+; CHECK-P9-NOVECTOR:     ld r3, .LC0@toc@l(r3)
+; CHECK-P9-NOVECTOR:     addis r3, r2, .LC1@toc@ha
+; CHECK-P9-NOVECTOR:     addis r3, r2, .LC2@toc@ha
+; CHECK-P9-NOVECTOR:     ld r3, .LC2@toc@l(r3)
+; CHECK-P9-NOVECTOR:     xxpermdi vs0, vs1, vs0, 1
+; CHECK-P9-NOVECTOR:     stxvd2x vs0, 0, r3
+; CHECK-P9-NOVECTOR:     blr
+;
 ; CHECK-P9-LABEL: bar0:
 ; CHECK-P9:   # %bb.0: # %entry
 ; CHECK-P9:     addis r3, r2, .LC0@toc@ha
-; CHECK-P9:     addis r4, r2, .LC1@toc@ha
 ; CHECK-P9:     ld r3, .LC0@toc@l(r3)
-; CHECK-P9:     ld r4, .LC1@toc@l(r4)
-; CHECK-P9:     lfd f0, 0(r3)
-; CHECK-P9:     lxvx vs1, 0, r4
+; CHECK-P9:     lxvx vs0, 0, r3
+; CHECK-P9:     addis r3, r2, .LC1@toc@ha
+; CHECK-P9:     ld r3, .LC1@toc@l(r3)
+; CHECK-P9:     lfd f1, 0(r3)
 ; CHECK-P9:     addis r3, r2, .LC2@toc@ha
 ; CHECK-P9:     ld r3, .LC2@toc@l(r3)
-; CHECK-P9:     xxpermdi vs0, f0, f0, 2
-; CHECK-P9:     xxpermdi vs0, vs1, vs0, 1
+; CHECK-P9:     xxpermdi vs1, f1, f1, 2
+; CHECK-P9:     xxpermdi vs0, vs0, vs1, 1
 ; CHECK-P9:     stxvx vs0, 0, r3
 ; CHECK-P9:     blr
 entry:
@@ -65,18 +76,29 @@
 ; CHECK:     stxvd2x vs0, 0, r3
 ; CHECK:     blr
 ;
+; CHECK-P9-NOVECTOR-LABEL: bar1:
+; CHECK-P9-NOVECTOR:   # %bb.0: # %entry
+; CHECK-P9-NOVECTOR:     addis r3, r2, .LC0@toc@ha
+; CHECK-P9-NOVECTOR:     ld r3, .LC0@toc@l(r3)
+; CHECK-P9-NOVECTOR:     addis r3, r2, .LC1@toc@ha
+; CHECK-P9-NOVECTOR:     addis r3, r2, .LC2@toc@ha
+; CHECK-P9-NOVECTOR:     ld r3, .LC2@toc@l(r3)
+; CHECK-P9-NOVECTOR:     xxmrghd vs0, vs0, vs1
+; CHECK-P9-NOVECTOR:     stxvd2x vs0, 0, r3
+; CHECK-P9-NOVECTOR:     blr
+;
 ; CHECK-P9-LABEL: bar1:
 ; CHECK-P9:   # %bb.0: # %entry
 ; CHECK-P9:     addis r3, r2, .LC0@toc@ha
-; CHECK-P9:     addis r4, r2, .LC1@toc@ha
 ; CHECK-P9:     ld r3, .LC0@toc@l(r3)
-; CHECK-P9:     ld r4, .LC1@toc@l(r4)
-; CHECK-P9:     lfd f0, 0(r3)
-; CHECK-P9:     lxvx vs1, 0, r4
+; CHECK-P9:     lxvx vs0, 0, r3
+; CHECK-P9:     addis r3, r2, .LC1@toc@ha
+; CHECK-P9:     ld r3, .LC1@toc@l(r3)
+; CHECK-P9:     lfd f1, 0(r3)
 ; CHECK-P9:     addis r3, r2, .LC2@toc@ha
 ; CHECK-P9:     ld r3, .LC2@toc@l(r3)
-; CHECK-P9:     xxpermdi vs0, f0, f0, 2
-; CHECK-P9:     xxmrgld vs0, vs0, vs1
+; CHECK-P9:     xxpermdi vs1, f1, f1, 2
+; CHECK-P9:     xxmrgld vs0, vs1, vs0
 ; CHECK-P9:     stxvx vs0, 0, r3
 ; CHECK-P9:     blr
 entry:
diff --git a/test/CodeGen/PowerPC/toc-float.ll b/test/CodeGen/PowerPC/toc-float.ll
index ff892e0..5ecc6a5 100644
--- a/test/CodeGen/PowerPC/toc-float.ll
+++ b/test/CodeGen/PowerPC/toc-float.ll
@@ -6,9 +6,10 @@
 define double @doubleConstant1() {
   ret double 1.400000e+01
 
-; CHECK-LABEL: doubleConstant1:
+; CHECK-P9-LABEL: doubleConstant1:
 ; CHECK-P9: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha
 ; CHECK-P9: lfs {{[0-9]+}}, [[VAR]]@toc@l([[REG1]])
+; CHECK-P8-LABEL: doubleConstant1:
 ; CHECK-P8: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha
 ; CHECK-P8: lfs {{[0-9]+}}, [[VAR]]@toc@l([[REG1]])
 }
@@ -18,9 +19,10 @@
 define double @doubleConstant2() {
   ret double 2.408904e+01
 
-; CHECK-LABEL: doubleConstant2:
+; CHECK-P9-LABEL: doubleConstant2:
 ; CHECK-P9: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha
 ; CHECK-P9: lfd {{[0-9]+}}, [[VAR]]@toc@l([[REG1]])
+; CHECK-P8-LABEL: doubleConstant2:
 ; CHECK-P8: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha
 ; CHECK-P8: lfd {{[0-9]+}}, [[VAR]]@toc@l([[REG1]])
 }
@@ -32,9 +34,10 @@
   %2 = fadd float %1, 0x400B333340000000
   ret float %2
 
-; CHECK-LABEL: floatConstantArray 
+; CHECK-P9-LABEL: floatConstantArray 
 ; CHECK-P9: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha+[[REG2:[0-9]+]]
 ; CHECK-P9: lfs {{[0-9]+}}, [[VAR]]@toc@l+[[REG2]]([[REG1]])
+; CHECK-P8-LABEL: floatConstantArray 
 ; CHECK-P8: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha
 ; CHECK-P8: addi [[REG2:[0-9]+]], [[REG1]], [[VAR]]@toc@l
 ; CHECK-P8: lfs {{[0-9]+}}, 12([[REG2]])
@@ -43,9 +46,10 @@
 define float @floatConstant() {
   ret float 0x400470A3E0000000
 
-; CHECK-LABEL: floatConstant:
+; CHECK-P9-LABEL: floatConstant:
 ; CHECK-P9: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha
 ; CHECK-P9: lfs {{[0-9]+}}, [[VAR]]@toc@l([[REG1]])
+; CHECK-P8-LABEL: floatConstant:
 ; CHECK-P8: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha
 ; CHECK-P8: lfs {{[0-9]+}}, [[VAR]]@toc@l([[REG1]])
 }
@@ -59,9 +63,10 @@
   %2 = fadd double %1, 6.880000e+00
   ret double %2
 
-; CHECK-LABEL: doubleConstantArray
+; CHECK-P9-LABEL: doubleConstantArray
 ; CHECK-P9: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha+[[REG2:[0-9]+]]
 ; CHECK-P9: lfd {{[0-9]+}}, [[VAR]]@toc@l+[[REG2]]([[REG1]])
+; CHECK-P8-LABEL: doubleConstantArray
 ; CHECK-P8: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha
 ; CHECK-P8: addi [[REG2:[0-9]+]], [[REG1]], [[VAR]]@toc@l
 ; CHECK-P8: lfd {{[0-9]+}}, 24([[REG2]])
@@ -75,12 +80,13 @@
   ret double %2
 
 ; Access an element with an offset that doesn't fit in the displacement field of LFD. 
-; CHECK-LABEL: doubleLargeConstantArray
+; CHECK-P9-LABEL: doubleLargeConstantArray
 ; CHECK-P9: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha
 ; CHECK-P9: li [[REG2:[0-9]+]], 0 
 ; CHECK-P9: addi [[REG3:[0-9]+]], [[REG1]], [[VAR:[a-z0-9A-Z_.]+]]@toc@l
 ; CHECK-P9: ori [[REG4:[0-9]+]], [[REG2]], 32768 
 ; CHECK-P9: lfdx {{[0-9]+}}, [[REG3]], [[REG4]] 
+; CHECK-P8-LABEL: doubleLargeConstantArray
 ; CHECK-P8: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha
 ; CHECK-P8: li [[REG2:[0-9]+]], 0 
 ; CHECK-P8: addi [[REG3:[0-9]+]], [[REG1]], [[VAR:[a-z0-9A-Z_.]+]]@toc@l
@@ -95,10 +101,11 @@
   %0 = load <4 x i32>, <4 x i32>* getelementptr inbounds ([10 x <4 x i32>], [10 x <4 x i32>]* @vec_arr, i64 0, i64 2), align 16
   ret <4 x i32> %0
 
-; CHECK-LABEL: vectorArray
+; CHECK-P9-LABEL: vectorArray
 ; CHECK-P9: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha
 ; CHECK-P9: ld [[REG2:[0-9]+]], [[VAR]]@toc@l([[REG1]])
 ; CHECK-P9: lxv {{[0-9]+}}, 32([[REG2]])
+; CHECK-P8-LABEL: vectorArray
 ; CHECK-P8: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha
 ; CHECK-P8: ld [[REG2:[0-9]+]], [[VAR]]@toc@l([[REG1]])
 ; CHECK-P8: addi [[REG3:[0-9]+]], [[REG2]], 32
diff --git a/test/CodeGen/PowerPC/vec-itofp.ll b/test/CodeGen/PowerPC/vec-itofp.ll
index 852b7c8..d9528e2 100644
--- a/test/CodeGen/PowerPC/vec-itofp.ll
+++ b/test/CodeGen/PowerPC/vec-itofp.ll
@@ -16,12 +16,12 @@
   ret void
 ; CHECK-P9-LABEL: @test8
 ; CHECK-P9: vperm
-; CHECK-P9: vperm
-; CHECK-P9: vperm
+; CHECK-P9: xvcvuxddp
 ; CHECK-P9: vperm
 ; CHECK-P9: xvcvuxddp
+; CHECK-P9: vperm
 ; CHECK-P9: xvcvuxddp
-; CHECK-P9: xvcvuxddp
+; CHECK-P9: vperm
 ; CHECK-P9: xvcvuxddp
 ; CHECK-P8-LABEL: @test8
 ; CHECK-P8: vperm
@@ -42,8 +42,8 @@
   ret void
 ; CHECK-P9-LABEL: @test4
 ; CHECK-P9: vperm
-; CHECK-P9: vperm
 ; CHECK-P9: xvcvuxddp
+; CHECK-P9: vperm
 ; CHECK-P9: xvcvuxddp
 ; CHECK-P8-LABEL: @test4
 ; CHECK-P8: vperm
@@ -113,16 +113,16 @@
   ret void
 ; CHECK-P9-LABEL: @stest8
 ; CHECK-P9: vperm
-; CHECK-P9: vperm
-; CHECK-P9: vperm
-; CHECK-P9: vperm
-; CHECK-P9: vextsh2d
-; CHECK-P9: vextsh2d
-; CHECK-P9: vextsh2d
 ; CHECK-P9: vextsh2d
 ; CHECK-P9: xvcvsxddp
+; CHECK-P9: vperm
+; CHECK-P9: vextsh2d
 ; CHECK-P9: xvcvsxddp
+; CHECK-P9: vperm
+; CHECK-P9: vextsh2d
 ; CHECK-P9: xvcvsxddp
+; CHECK-P9: vperm
+; CHECK-P9: vextsh2d
 ; CHECK-P9: xvcvsxddp
 }
 
@@ -134,10 +134,10 @@
   ret void
 ; CHECK-P9-LABEL: @stest4
 ; CHECK-P9: vperm
-; CHECK-P9: vperm
-; CHECK-P9: vextsh2d
 ; CHECK-P9: vextsh2d
 ; CHECK-P9: xvcvsxddp
+; CHECK-P9: vperm
+; CHECK-P9: vextsh2d
 ; CHECK-P9: xvcvsxddp
 }
 
diff --git a/test/CodeGen/PowerPC/vec_conv_fp32_to_i16_elts.ll b/test/CodeGen/PowerPC/vec_conv_fp32_to_i16_elts.ll
index 9977410..631e3c4 100644
--- a/test/CodeGen/PowerPC/vec_conv_fp32_to_i16_elts.ll
+++ b/test/CodeGen/PowerPC/vec_conv_fp32_to_i16_elts.ll
@@ -36,35 +36,35 @@
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xxsldwi vs1, v2, v2, 3
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
 ; CHECK-P9-NEXT:    xscvspdpn f1, vs1
 ; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
 ; CHECK-P9-NEXT:    mfvsrwz r3, f1
-; CHECK-P9-NEXT:    mtvsrd f1, r4
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    xxswapd v2, vs0
+; CHECK-P9-NEXT:    xxswapd v2, vs1
+; CHECK-P9-NEXT:    xxswapd v3, vs0
 ; CHECK-P9-NEXT:    vmrglh v2, v3, v2
+; CHECK-P9-NEXT:    li r3, 0
 ; CHECK-P9-NEXT:    vextuwrx r3, r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test2elt:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrd f0, r3
-; CHECK-BE-NEXT:    xxsldwi vs1, vs0, vs0, 1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
 ; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v2, r3
 ; CHECK-BE-NEXT:    mfvsrwz r3, f0
 ; CHECK-BE-NEXT:    sldi r3, r3, 48
-; CHECK-BE-NEXT:    mfvsrwz r4, f1
-; CHECK-BE-NEXT:    mtvsrd v2, r3
+; CHECK-BE-NEXT:    mtvsrd v3, r3
 ; CHECK-BE-NEXT:    li r3, 0
-; CHECK-BE-NEXT:    sldi r4, r4, 48
-; CHECK-BE-NEXT:    mtvsrd v3, r4
 ; CHECK-BE-NEXT:    vmrghh v2, v2, v3
 ; CHECK-BE-NEXT:    vextuwlx r3, r3, v2
 ; CHECK-BE-NEXT:    blr
@@ -111,62 +111,62 @@
 ; CHECK-P9-LABEL: test4elt:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    xxsldwi vs0, v2, v2, 3
-; CHECK-P9-NEXT:    xxswapd vs1, v2
-; CHECK-P9-NEXT:    xxsldwi vs2, v2, v2, 1
-; CHECK-P9-NEXT:    xscvspdpn f3, v2
 ; CHECK-P9-NEXT:    xscvspdpn f0, vs0
-; CHECK-P9-NEXT:    xscvspdpn f1, vs1
-; CHECK-P9-NEXT:    xscvspdpn f2, vs2
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    mfvsrwz r5, f3
 ; CHECK-P9-NEXT:    mfvsrwz r3, f0
-; CHECK-P9-NEXT:    mfvsrwz r4, f1
-; CHECK-P9-NEXT:    mfvsrwz r6, f2
-; CHECK-P9-NEXT:    mtvsrd f2, r5
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    xxswapd v4, vs2
+; CHECK-P9-NEXT:    xxswapd v3, vs0
+; CHECK-P9-NEXT:    xxswapd vs0, v2
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xscvspdpn f0, v2
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, v2, v2, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
-; CHECK-P9-NEXT:    vmrglh v2, v3, v2
-; CHECK-P9-NEXT:    vmrglh v3, v4, v5
-; CHECK-P9-NEXT:    vmrglw v2, v3, v2
+; CHECK-P9-NEXT:    vmrglh v2, v4, v2
+; CHECK-P9-NEXT:    vmrglw v2, v2, v3
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    xxsldwi vs0, v2, v2, 3
-; CHECK-BE-NEXT:    xxswapd vs1, v2
-; CHECK-BE-NEXT:    xxsldwi vs2, v2, v2, 1
-; CHECK-BE-NEXT:    xscvspdpn f3, v2
 ; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
 ; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    mfvsrwz r5, f3
-; CHECK-BE-NEXT:    sldi r5, r5, 48
 ; CHECK-BE-NEXT:    mfvsrwz r3, f0
-; CHECK-BE-NEXT:    mfvsrwz r4, f1
-; CHECK-BE-NEXT:    mfvsrwz r6, f2
-; CHECK-BE-NEXT:    mtvsrd v4, r5
+; CHECK-BE-NEXT:    xxswapd vs0, v2
 ; CHECK-BE-NEXT:    sldi r3, r3, 48
-; CHECK-BE-NEXT:    sldi r4, r4, 48
-; CHECK-BE-NEXT:    sldi r6, r6, 48
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    xscvspdpn f0, v2
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    xxsldwi vs0, v2, v2, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 48
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
-; CHECK-BE-NEXT:    vmrghh v2, v3, v2
-; CHECK-BE-NEXT:    vmrghh v3, v4, v5
-; CHECK-BE-NEXT:    vmrghw v2, v3, v2
+; CHECK-BE-NEXT:    vmrghh v2, v4, v2
+; CHECK-BE-NEXT:    vmrghw v2, v2, v3
 ; CHECK-BE-NEXT:    mfvsrd r3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -238,119 +238,119 @@
 ;
 ; CHECK-P9-LABEL: test8elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r3)
 ; CHECK-P9-NEXT:    lxv vs1, 0(r3)
 ; CHECK-P9-NEXT:    xxsldwi vs2, vs1, vs1, 3
-; CHECK-P9-NEXT:    xxswapd vs3, vs1
-; CHECK-P9-NEXT:    xxsldwi vs4, vs1, vs1, 1
-; CHECK-P9-NEXT:    xxsldwi vs5, vs0, vs0, 3
-; CHECK-P9-NEXT:    xxswapd vs6, vs0
-; CHECK-P9-NEXT:    xxsldwi vs7, vs0, vs0, 1
-; CHECK-P9-NEXT:    xscvspdpn f1, vs1
-; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f2, vs2
-; CHECK-P9-NEXT:    xscvspdpn f3, vs3
-; CHECK-P9-NEXT:    xscvspdpn f4, vs4
-; CHECK-P9-NEXT:    xscvspdpn f5, vs5
-; CHECK-P9-NEXT:    xscvspdpn f6, vs6
-; CHECK-P9-NEXT:    xscvspdpn f7, vs7
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    lxv vs0, 16(r3)
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    xxswapd v2, vs2
+; CHECK-P9-NEXT:    xxswapd vs2, vs1
+; CHECK-P9-NEXT:    xscvspdpn f2, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    xscvspdpn f2, vs1
+; CHECK-P9-NEXT:    xxsldwi vs1, vs1, vs1, 1
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    vmrglh v2, v3, v2
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    vmrglh v3, v3, v4
+; CHECK-P9-NEXT:    vmrglw v2, v3, v2
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs1
+; CHECK-P9-NEXT:    xxswapd vs1, vs0
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xscvspdpn f1, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xscvdpsxws f1, f1
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
-; CHECK-P9-NEXT:    mfvsrwz r5, f1
-; CHECK-P9-NEXT:    mfvsrwz r9, f0
-; CHECK-P9-NEXT:    mfvsrwz r3, f2
-; CHECK-P9-NEXT:    mfvsrwz r4, f3
-; CHECK-P9-NEXT:    mfvsrwz r6, f4
-; CHECK-P9-NEXT:    mfvsrwz r7, f5
-; CHECK-P9-NEXT:    mfvsrwz r8, f6
-; CHECK-P9-NEXT:    mfvsrwz r10, f7
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    mtvsrd f6, r9
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    mtvsrd f4, r7
-; CHECK-P9-NEXT:    mtvsrd f5, r8
-; CHECK-P9-NEXT:    mtvsrd f7, r10
-; CHECK-P9-NEXT:    xxswapd v4, vs2
-; CHECK-P9-NEXT:    xxswapd v6, vs6
-; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
-; CHECK-P9-NEXT:    xxswapd v0, vs4
-; CHECK-P9-NEXT:    xxswapd v1, vs5
-; CHECK-P9-NEXT:    xxswapd v7, vs7
-; CHECK-P9-NEXT:    vmrglh v2, v3, v2
-; CHECK-P9-NEXT:    vmrglh v3, v4, v5
-; CHECK-P9-NEXT:    vmrglh v4, v1, v0
-; CHECK-P9-NEXT:    vmrglh v5, v6, v7
-; CHECK-P9-NEXT:    vmrglw v2, v3, v2
-; CHECK-P9-NEXT:    vmrglw v3, v5, v4
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    vmrglh v4, v4, v5
+; CHECK-P9-NEXT:    vmrglw v3, v4, v3
 ; CHECK-P9-NEXT:    xxmrgld v2, v3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
 ; CHECK-BE-NEXT:    xxsldwi vs2, vs1, vs1, 3
-; CHECK-BE-NEXT:    xxswapd vs3, vs1
-; CHECK-BE-NEXT:    xxsldwi vs4, vs1, vs1, 1
-; CHECK-BE-NEXT:    xxsldwi vs5, vs0, vs0, 3
-; CHECK-BE-NEXT:    xxswapd vs6, vs0
-; CHECK-BE-NEXT:    xxsldwi vs7, vs0, vs0, 1
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
 ; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xscvspdpn f3, vs3
-; CHECK-BE-NEXT:    xscvspdpn f4, vs4
-; CHECK-BE-NEXT:    xscvspdpn f5, vs5
-; CHECK-BE-NEXT:    xscvspdpn f6, vs6
-; CHECK-BE-NEXT:    xscvspdpn f7, vs7
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
 ; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    mfvsrwz r5, f1
-; CHECK-BE-NEXT:    mfvsrwz r9, f0
-; CHECK-BE-NEXT:    sldi r5, r5, 48
-; CHECK-BE-NEXT:    sldi r9, r9, 48
+; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    mfvsrwz r3, f2
-; CHECK-BE-NEXT:    mfvsrwz r4, f3
-; CHECK-BE-NEXT:    mfvsrwz r6, f4
-; CHECK-BE-NEXT:    mfvsrwz r7, f5
-; CHECK-BE-NEXT:    mfvsrwz r8, f6
-; CHECK-BE-NEXT:    mfvsrwz r10, f7
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    mtvsrd v6, r9
+; CHECK-BE-NEXT:    xxswapd vs2, vs1
 ; CHECK-BE-NEXT:    sldi r3, r3, 48
-; CHECK-BE-NEXT:    sldi r4, r4, 48
-; CHECK-BE-NEXT:    sldi r6, r6, 48
-; CHECK-BE-NEXT:    sldi r7, r7, 48
-; CHECK-BE-NEXT:    sldi r8, r8, 48
-; CHECK-BE-NEXT:    sldi r10, r10, 48
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
-; CHECK-BE-NEXT:    mtvsrd v0, r7
-; CHECK-BE-NEXT:    mtvsrd v1, r8
-; CHECK-BE-NEXT:    mtvsrd v7, r10
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xscvspdpn f2, vs1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs1, vs1, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
 ; CHECK-BE-NEXT:    vmrghh v2, v3, v2
-; CHECK-BE-NEXT:    vmrghh v3, v4, v5
-; CHECK-BE-NEXT:    vmrghh v4, v1, v0
-; CHECK-BE-NEXT:    vmrghh v5, v6, v7
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs0
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    vmrghh v3, v3, v4
+; CHECK-BE-NEXT:    sldi r3, r3, 48
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
-; CHECK-BE-NEXT:    vmrghw v3, v5, v4
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    vmrghh v4, v4, v5
+; CHECK-BE-NEXT:    vmrghw v3, v4, v3
 ; CHECK-BE-NEXT:    xxmrghd v2, v3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -481,258 +481,234 @@
 ;
 ; CHECK-P9-LABEL: test16elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs2, 16(r4)
-; CHECK-P9-NEXT:    lxv vs3, 0(r4)
-; CHECK-P9-NEXT:    lxv vs0, 48(r4)
-; CHECK-P9-NEXT:    lxv vs1, 32(r4)
-; CHECK-P9-NEXT:    std r25, -56(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    xxsldwi vs4, vs3, vs3, 3
-; CHECK-P9-NEXT:    xxswapd vs5, vs3
-; CHECK-P9-NEXT:    xxsldwi vs6, vs3, vs3, 1
-; CHECK-P9-NEXT:    xxsldwi vs7, vs2, vs2, 3
-; CHECK-P9-NEXT:    xxswapd vs8, vs2
-; CHECK-P9-NEXT:    xxsldwi vs9, vs2, vs2, 1
-; CHECK-P9-NEXT:    xxsldwi vs10, vs1, vs1, 3
-; CHECK-P9-NEXT:    xxswapd vs11, vs1
-; CHECK-P9-NEXT:    xxsldwi vs12, vs1, vs1, 1
-; CHECK-P9-NEXT:    xxsldwi vs13, vs0, vs0, 3
-; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxsldwi v3, vs0, vs0, 1
-; CHECK-P9-NEXT:    xscvspdpn f3, vs3
-; CHECK-P9-NEXT:    xscvspdpn f2, vs2
-; CHECK-P9-NEXT:    xscvspdpn f1, vs1
-; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    lxv vs1, 0(r4)
+; CHECK-P9-NEXT:    lxv vs3, 16(r4)
+; CHECK-P9-NEXT:    xscvspdpn f5, vs1
+; CHECK-P9-NEXT:    xxsldwi vs2, vs1, vs1, 3
+; CHECK-P9-NEXT:    xscvspdpn f8, vs3
+; CHECK-P9-NEXT:    xxswapd vs4, vs1
+; CHECK-P9-NEXT:    xxsldwi vs1, vs1, vs1, 1
 ; CHECK-P9-NEXT:    xscvspdpn f4, vs4
-; CHECK-P9-NEXT:    xscvspdpn f5, vs5
+; CHECK-P9-NEXT:    xscvdpsxws f5, f5
+; CHECK-P9-NEXT:    xscvspdpn f2, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f8, f8
+; CHECK-P9-NEXT:    xxsldwi vs6, vs3, vs3, 3
+; CHECK-P9-NEXT:    xxswapd vs7, vs3
 ; CHECK-P9-NEXT:    xscvspdpn f6, vs6
+; CHECK-P9-NEXT:    xxsldwi vs3, vs3, vs3, 1
 ; CHECK-P9-NEXT:    xscvspdpn f7, vs7
-; CHECK-P9-NEXT:    xscvspdpn f8, vs8
+; CHECK-P9-NEXT:    xscvspdpn f3, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f4, f4
+; CHECK-P9-NEXT:    xscvdpsxws f6, f6
+; CHECK-P9-NEXT:    mfvsrwz r5, f5
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    xscvdpsxws f7, f7
+; CHECK-P9-NEXT:    xscvdpsxws f3, f3
+; CHECK-P9-NEXT:    mtvsrd f5, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f8
+; CHECK-P9-NEXT:    mtvsrd f8, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f2
+; CHECK-P9-NEXT:    lxv vs0, 32(r4)
+; CHECK-P9-NEXT:    xxsldwi vs9, vs0, vs0, 3
+; CHECK-P9-NEXT:    xxswapd vs10, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f9, vs9
 ; CHECK-P9-NEXT:    xscvspdpn f10, vs10
-; CHECK-P9-NEXT:    xscvspdpn f11, vs11
-; CHECK-P9-NEXT:    xscvspdpn f12, vs12
-; CHECK-P9-NEXT:    xscvspdpn f13, vs13
-; CHECK-P9-NEXT:    xscvspdpn v2, v2
-; CHECK-P9-NEXT:    xscvspdpn v3, v3
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
-; CHECK-P9-NEXT:    xscvdpsxws f8, f8
 ; CHECK-P9-NEXT:    xscvdpsxws f9, f9
 ; CHECK-P9-NEXT:    xscvdpsxws f10, f10
-; CHECK-P9-NEXT:    xscvdpsxws f11, f11
-; CHECK-P9-NEXT:    xscvdpsxws f12, f12
-; CHECK-P9-NEXT:    xscvdpsxws f13, f13
-; CHECK-P9-NEXT:    xscvdpsxws v2, v2
-; CHECK-P9-NEXT:    xscvdpsxws v3, v3
-; CHECK-P9-NEXT:    mfvsrwz r4, f3
-; CHECK-P9-NEXT:    mfvsrwz r5, f2
-; CHECK-P9-NEXT:    mfvsrwz r12, f1
-; CHECK-P9-NEXT:    mfvsrwz r0, f0
-; CHECK-P9-NEXT:    mfvsrwz r6, f4
-; CHECK-P9-NEXT:    mfvsrwz r7, f5
-; CHECK-P9-NEXT:    mfvsrwz r8, f6
-; CHECK-P9-NEXT:    mfvsrwz r9, f7
-; CHECK-P9-NEXT:    mfvsrwz r10, f8
-; CHECK-P9-NEXT:    mfvsrwz r11, f9
-; CHECK-P9-NEXT:    mfvsrwz r30, f10
-; CHECK-P9-NEXT:    mfvsrwz r29, f11
-; CHECK-P9-NEXT:    mfvsrwz r28, f12
-; CHECK-P9-NEXT:    mfvsrwz r27, f13
-; CHECK-P9-NEXT:    mfvsrwz r26, v2
-; CHECK-P9-NEXT:    mfvsrwz r25, v3
-; CHECK-P9-NEXT:    mtvsrd f0, r4
+; CHECK-P9-NEXT:    mtvsrd f2, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f4
+; CHECK-P9-NEXT:    mtvsrd f4, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f1
 ; CHECK-P9-NEXT:    mtvsrd f1, r5
-; CHECK-P9-NEXT:    mtvsrd f8, r12
-; CHECK-P9-NEXT:    mtvsrd f9, r0
-; CHECK-P9-NEXT:    mtvsrd f2, r6
-; CHECK-P9-NEXT:    mtvsrd f3, r7
-; CHECK-P9-NEXT:    mtvsrd f4, r8
-; CHECK-P9-NEXT:    mtvsrd f5, r9
-; CHECK-P9-NEXT:    mtvsrd f6, r10
-; CHECK-P9-NEXT:    mtvsrd f7, r11
-; CHECK-P9-NEXT:    mtvsrd f10, r30
-; CHECK-P9-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd f11, r29
-; CHECK-P9-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd f12, r28
-; CHECK-P9-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd f13, r27
-; CHECK-P9-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd v2, r26
-; CHECK-P9-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd v3, r25
-; CHECK-P9-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xxswapd v4, vs0
-; CHECK-P9-NEXT:    xxswapd v5, vs2
+; CHECK-P9-NEXT:    mfvsrwz r5, f6
+; CHECK-P9-NEXT:    xxswapd v2, vs2
+; CHECK-P9-NEXT:    xxswapd v3, vs4
+; CHECK-P9-NEXT:    xscvspdpn f2, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    mtvsrd f6, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f7
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    lxv vs1, 48(r4)
+; CHECK-P9-NEXT:    vmrglh v2, v3, v2
+; CHECK-P9-NEXT:    xxswapd v3, vs5
+; CHECK-P9-NEXT:    mtvsrd f7, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f3
+; CHECK-P9-NEXT:    vmrglh v3, v3, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs6
+; CHECK-P9-NEXT:    xxswapd v5, vs7
+; CHECK-P9-NEXT:    mtvsrd f3, r5
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
 ; CHECK-P9-NEXT:    xxswapd v0, vs3
-; CHECK-P9-NEXT:    xxswapd v1, vs4
-; CHECK-P9-NEXT:    xxswapd v6, vs5
-; CHECK-P9-NEXT:    xxswapd v7, vs6
-; CHECK-P9-NEXT:    xxswapd v8, vs1
-; CHECK-P9-NEXT:    xxswapd v9, vs7
-; CHECK-P9-NEXT:    xxswapd v10, vs10
-; CHECK-P9-NEXT:    xxswapd v11, vs11
-; CHECK-P9-NEXT:    xxswapd v12, vs8
-; CHECK-P9-NEXT:    xxswapd v13, vs12
-; CHECK-P9-NEXT:    xxswapd v14, vs13
-; CHECK-P9-NEXT:    xxswapd v2, v2
-; CHECK-P9-NEXT:    xxswapd v15, vs9
-; CHECK-P9-NEXT:    xxswapd v3, v3
-; CHECK-P9-NEXT:    vmrglh v5, v0, v5
-; CHECK-P9-NEXT:    vmrglh v4, v4, v1
-; CHECK-P9-NEXT:    vmrglh v0, v7, v6
-; CHECK-P9-NEXT:    vmrglh v1, v8, v9
-; CHECK-P9-NEXT:    vmrglh v6, v11, v10
-; CHECK-P9-NEXT:    vmrglh v7, v12, v13
-; CHECK-P9-NEXT:    vmrglh v2, v2, v14
-; CHECK-P9-NEXT:    vmrglh v3, v15, v3
-; CHECK-P9-NEXT:    vmrglw v4, v4, v5
-; CHECK-P9-NEXT:    vmrglw v5, v1, v0
-; CHECK-P9-NEXT:    vmrglw v0, v7, v6
+; CHECK-P9-NEXT:    vmrglh v4, v5, v4
+; CHECK-P9-NEXT:    xxswapd v5, vs8
+; CHECK-P9-NEXT:    vmrglh v5, v5, v0
+; CHECK-P9-NEXT:    mfvsrwz r4, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r4
+; CHECK-P9-NEXT:    mfvsrwz r4, f0
 ; CHECK-P9-NEXT:    vmrglw v2, v3, v2
-; CHECK-P9-NEXT:    xxmrgld vs0, v5, v4
-; CHECK-P9-NEXT:    xxmrgld vs1, v2, v0
-; CHECK-P9-NEXT:    stxv vs0, 0(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    mtvsrd f0, r4
+; CHECK-P9-NEXT:    vmrglw v3, v5, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs2
+; CHECK-P9-NEXT:    xxmrgld vs2, v3, v2
+; CHECK-P9-NEXT:    xxswapd v2, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs1, vs1, 3
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r4
+; CHECK-P9-NEXT:    xxswapd v3, vs0
+; CHECK-P9-NEXT:    xxswapd vs0, vs1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r4
+; CHECK-P9-NEXT:    vmrglh v2, v4, v2
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xscvspdpn f0, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r4
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs1, vs1, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r5, f9
+; CHECK-P9-NEXT:    mtvsrd f9, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f10
+; CHECK-P9-NEXT:    mtvsrd f10, r5
+; CHECK-P9-NEXT:    xxswapd v0, vs9
+; CHECK-P9-NEXT:    xxswapd v1, vs10
+; CHECK-P9-NEXT:    vmrglh v0, v1, v0
+; CHECK-P9-NEXT:    vmrglw v2, v2, v0
+; CHECK-P9-NEXT:    stxv vs2, 0(r3)
+; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r4
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    vmrglh v4, v4, v5
+; CHECK-P9-NEXT:    vmrglw v3, v4, v3
+; CHECK-P9-NEXT:    xxmrgld vs0, v3, v2
+; CHECK-P9-NEXT:    stxv vs0, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs2, 0(r4)
-; CHECK-BE-NEXT:    lxv vs3, 16(r4)
-; CHECK-BE-NEXT:    lxv vs0, 32(r4)
-; CHECK-BE-NEXT:    lxv vs1, 48(r4)
-; CHECK-BE-NEXT:    std r25, -56(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    xxsldwi vs4, vs3, vs3, 3
-; CHECK-BE-NEXT:    xxswapd vs5, vs3
-; CHECK-BE-NEXT:    xxsldwi vs6, vs3, vs3, 1
-; CHECK-BE-NEXT:    xxsldwi vs7, vs2, vs2, 3
-; CHECK-BE-NEXT:    xxswapd vs8, vs2
-; CHECK-BE-NEXT:    xxsldwi vs9, vs2, vs2, 1
-; CHECK-BE-NEXT:    xxsldwi vs10, vs1, vs1, 3
-; CHECK-BE-NEXT:    xxswapd vs11, vs1
-; CHECK-BE-NEXT:    xxsldwi vs12, vs1, vs1, 1
-; CHECK-BE-NEXT:    xxsldwi vs13, vs0, vs0, 3
-; CHECK-BE-NEXT:    xxswapd v2, vs0
-; CHECK-BE-NEXT:    xxsldwi v3, vs0, vs0, 1
-; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    lxv vs1, 16(r4)
+; CHECK-BE-NEXT:    xxsldwi vs2, vs1, vs1, 3
 ; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f4, vs4
-; CHECK-BE-NEXT:    xscvspdpn f5, vs5
-; CHECK-BE-NEXT:    xscvspdpn f6, vs6
-; CHECK-BE-NEXT:    xscvspdpn f7, vs7
-; CHECK-BE-NEXT:    xscvspdpn f8, vs8
-; CHECK-BE-NEXT:    xscvspdpn f9, vs9
-; CHECK-BE-NEXT:    xscvspdpn f10, vs10
-; CHECK-BE-NEXT:    xscvspdpn f11, vs11
-; CHECK-BE-NEXT:    xscvspdpn f12, vs12
-; CHECK-BE-NEXT:    xscvspdpn f13, vs13
-; CHECK-BE-NEXT:    xscvspdpn v2, v2
-; CHECK-BE-NEXT:    xscvspdpn v3, v3
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    xxswapd vs3, vs1
+; CHECK-BE-NEXT:    xscvspdpn f3, vs3
 ; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    xscvdpsxws f8, f8
-; CHECK-BE-NEXT:    xscvdpsxws f9, f9
-; CHECK-BE-NEXT:    xscvdpsxws f10, f10
-; CHECK-BE-NEXT:    xscvdpsxws f11, f11
-; CHECK-BE-NEXT:    xscvdpsxws f12, f12
-; CHECK-BE-NEXT:    xscvdpsxws f13, f13
-; CHECK-BE-NEXT:    xscvdpsxws v2, v2
-; CHECK-BE-NEXT:    xscvdpsxws v3, v3
-; CHECK-BE-NEXT:    mfvsrwz r4, f3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
 ; CHECK-BE-NEXT:    mfvsrwz r5, f2
-; CHECK-BE-NEXT:    mfvsrwz r12, f1
-; CHECK-BE-NEXT:    mfvsrwz r0, f0
-; CHECK-BE-NEXT:    mfvsrwz r6, f4
-; CHECK-BE-NEXT:    mfvsrwz r7, f5
-; CHECK-BE-NEXT:    mfvsrwz r8, f6
-; CHECK-BE-NEXT:    mfvsrwz r9, f7
-; CHECK-BE-NEXT:    mfvsrwz r10, f8
-; CHECK-BE-NEXT:    mfvsrwz r11, f9
-; CHECK-BE-NEXT:    mfvsrwz r30, f10
-; CHECK-BE-NEXT:    mfvsrwz r29, f11
-; CHECK-BE-NEXT:    mfvsrwz r28, f12
-; CHECK-BE-NEXT:    mfvsrwz r27, f13
-; CHECK-BE-NEXT:    mfvsrwz r26, v2
-; CHECK-BE-NEXT:    mfvsrwz r25, v3
-; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    xscvspdpn f4, vs1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs1, vs1, 1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
 ; CHECK-BE-NEXT:    sldi r5, r5, 48
-; CHECK-BE-NEXT:    sldi r12, r12, 48
-; CHECK-BE-NEXT:    sldi r0, r0, 48
-; CHECK-BE-NEXT:    sldi r6, r6, 48
-; CHECK-BE-NEXT:    sldi r7, r7, 48
-; CHECK-BE-NEXT:    sldi r8, r8, 48
-; CHECK-BE-NEXT:    sldi r9, r9, 48
-; CHECK-BE-NEXT:    sldi r10, r10, 48
-; CHECK-BE-NEXT:    sldi r11, r11, 48
-; CHECK-BE-NEXT:    sldi r30, r30, 48
-; CHECK-BE-NEXT:    sldi r29, r29, 48
-; CHECK-BE-NEXT:    sldi r28, r28, 48
-; CHECK-BE-NEXT:    sldi r27, r27, 48
-; CHECK-BE-NEXT:    sldi r26, r26, 48
-; CHECK-BE-NEXT:    sldi r25, r25, 48
-; CHECK-BE-NEXT:    mtvsrd v2, r4
+; CHECK-BE-NEXT:    mtvsrd v2, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f4
+; CHECK-BE-NEXT:    lxv vs0, 0(r4)
+; CHECK-BE-NEXT:    xxsldwi vs2, vs0, vs0, 3
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    sldi r5, r5, 48
 ; CHECK-BE-NEXT:    mtvsrd v3, r5
-; CHECK-BE-NEXT:    mtvsrd v10, r12
-; CHECK-BE-NEXT:    mtvsrd v14, r0
-; CHECK-BE-NEXT:    mtvsrd v4, r6
-; CHECK-BE-NEXT:    mtvsrd v5, r7
-; CHECK-BE-NEXT:    mtvsrd v0, r8
-; CHECK-BE-NEXT:    mtvsrd v1, r9
-; CHECK-BE-NEXT:    mtvsrd v6, r10
-; CHECK-BE-NEXT:    mtvsrd v7, r11
-; CHECK-BE-NEXT:    mtvsrd v8, r30
-; CHECK-BE-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v9, r29
-; CHECK-BE-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v11, r28
-; CHECK-BE-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v12, r27
-; CHECK-BE-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v13, r26
-; CHECK-BE-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v15, r25
-; CHECK-BE-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
+; CHECK-BE-NEXT:    vmrghh v2, v3, v2
+; CHECK-BE-NEXT:    mfvsrwz r5, f3
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    mtvsrd v3, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs0
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    mtvsrd v4, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f2
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    vmrghh v3, v3, v4
+; CHECK-BE-NEXT:    mtvsrd v4, r5
+; CHECK-BE-NEXT:    vmrghw v2, v3, v2
+; CHECK-BE-NEXT:    mfvsrwz r5, f1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v5, r5
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
 ; CHECK-BE-NEXT:    vmrghh v4, v5, v4
-; CHECK-BE-NEXT:    vmrghh v2, v2, v0
-; CHECK-BE-NEXT:    vmrghh v5, v6, v1
-; CHECK-BE-NEXT:    vmrghh v3, v3, v7
-; CHECK-BE-NEXT:    vmrghh v0, v9, v8
-; CHECK-BE-NEXT:    vmrghh v1, v10, v11
-; CHECK-BE-NEXT:    vmrghh v6, v13, v12
-; CHECK-BE-NEXT:    vmrghh v7, v14, v15
-; CHECK-BE-NEXT:    vmrghw v2, v2, v4
-; CHECK-BE-NEXT:    vmrghw v3, v3, v5
-; CHECK-BE-NEXT:    vmrghw v4, v1, v0
-; CHECK-BE-NEXT:    vmrghw v5, v7, v6
+; CHECK-BE-NEXT:    mfvsrwz r5, f1
+; CHECK-BE-NEXT:    lxv vs1, 48(r4)
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    mtvsrd v5, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f0
+; CHECK-BE-NEXT:    lxv vs0, 32(r4)
+; CHECK-BE-NEXT:    xscvspdpn f5, vs1
+; CHECK-BE-NEXT:    xxsldwi vs2, vs1, vs1, 3
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    xscvdpsxws f5, f5
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    xxswapd vs3, vs1
+; CHECK-BE-NEXT:    mtvsrd v0, r5
+; CHECK-BE-NEXT:    vmrghh v5, v5, v0
+; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    xxsldwi vs1, vs1, vs1, 1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    vmrghw v3, v5, v4
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    mfvsrwz r4, f5
+; CHECK-BE-NEXT:    xxmrghd vs4, v3, v2
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v2, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f2
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    stxv vs4, 0(r3)
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v3, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f3
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v4, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    mtvsrd v4, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs0
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    vmrghh v2, v2, v4
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    vmrghw v2, v2, v3
+; CHECK-BE-NEXT:    mtvsrd v3, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v4, r4
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    mfvsrwz r4, f1
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v4, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f0
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v5, r4
+; CHECK-BE-NEXT:    vmrghh v4, v4, v5
+; CHECK-BE-NEXT:    vmrghw v3, v4, v3
 ; CHECK-BE-NEXT:    xxmrghd vs0, v3, v2
-; CHECK-BE-NEXT:    xxmrghd vs1, v5, v4
-; CHECK-BE-NEXT:    stxv vs0, 0(r3)
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x float>, <16 x float>* %0, align 64
@@ -768,35 +744,35 @@
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xxsldwi vs1, v2, v2, 3
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
 ; CHECK-P9-NEXT:    xscvspdpn f1, vs1
 ; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
 ; CHECK-P9-NEXT:    mfvsrwz r3, f1
-; CHECK-P9-NEXT:    mtvsrd f1, r4
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    xxswapd v2, vs0
+; CHECK-P9-NEXT:    xxswapd v2, vs1
+; CHECK-P9-NEXT:    xxswapd v3, vs0
 ; CHECK-P9-NEXT:    vmrglh v2, v3, v2
+; CHECK-P9-NEXT:    li r3, 0
 ; CHECK-P9-NEXT:    vextuwrx r3, r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test2elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrd f0, r3
-; CHECK-BE-NEXT:    xxsldwi vs1, vs0, vs0, 1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
 ; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v2, r3
 ; CHECK-BE-NEXT:    mfvsrwz r3, f0
 ; CHECK-BE-NEXT:    sldi r3, r3, 48
-; CHECK-BE-NEXT:    mfvsrwz r4, f1
-; CHECK-BE-NEXT:    mtvsrd v2, r3
+; CHECK-BE-NEXT:    mtvsrd v3, r3
 ; CHECK-BE-NEXT:    li r3, 0
-; CHECK-BE-NEXT:    sldi r4, r4, 48
-; CHECK-BE-NEXT:    mtvsrd v3, r4
 ; CHECK-BE-NEXT:    vmrghh v2, v2, v3
 ; CHECK-BE-NEXT:    vextuwlx r3, r3, v2
 ; CHECK-BE-NEXT:    blr
@@ -843,62 +819,62 @@
 ; CHECK-P9-LABEL: test4elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    xxsldwi vs0, v2, v2, 3
-; CHECK-P9-NEXT:    xxswapd vs1, v2
-; CHECK-P9-NEXT:    xxsldwi vs2, v2, v2, 1
-; CHECK-P9-NEXT:    xscvspdpn f3, v2
 ; CHECK-P9-NEXT:    xscvspdpn f0, vs0
-; CHECK-P9-NEXT:    xscvspdpn f1, vs1
-; CHECK-P9-NEXT:    xscvspdpn f2, vs2
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    mfvsrwz r5, f3
 ; CHECK-P9-NEXT:    mfvsrwz r3, f0
-; CHECK-P9-NEXT:    mfvsrwz r4, f1
-; CHECK-P9-NEXT:    mfvsrwz r6, f2
-; CHECK-P9-NEXT:    mtvsrd f2, r5
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    xxswapd v4, vs2
+; CHECK-P9-NEXT:    xxswapd v3, vs0
+; CHECK-P9-NEXT:    xxswapd vs0, v2
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xscvspdpn f0, v2
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, v2, v2, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
-; CHECK-P9-NEXT:    vmrglh v2, v3, v2
-; CHECK-P9-NEXT:    vmrglh v3, v4, v5
-; CHECK-P9-NEXT:    vmrglw v2, v3, v2
+; CHECK-P9-NEXT:    vmrglh v2, v4, v2
+; CHECK-P9-NEXT:    vmrglw v2, v2, v3
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    xxsldwi vs0, v2, v2, 3
-; CHECK-BE-NEXT:    xxswapd vs1, v2
-; CHECK-BE-NEXT:    xxsldwi vs2, v2, v2, 1
-; CHECK-BE-NEXT:    xscvspdpn f3, v2
 ; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
 ; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    mfvsrwz r5, f3
-; CHECK-BE-NEXT:    sldi r5, r5, 48
 ; CHECK-BE-NEXT:    mfvsrwz r3, f0
-; CHECK-BE-NEXT:    mfvsrwz r4, f1
-; CHECK-BE-NEXT:    mfvsrwz r6, f2
-; CHECK-BE-NEXT:    mtvsrd v4, r5
+; CHECK-BE-NEXT:    xxswapd vs0, v2
 ; CHECK-BE-NEXT:    sldi r3, r3, 48
-; CHECK-BE-NEXT:    sldi r4, r4, 48
-; CHECK-BE-NEXT:    sldi r6, r6, 48
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    xscvspdpn f0, v2
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    xxsldwi vs0, v2, v2, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 48
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
-; CHECK-BE-NEXT:    vmrghh v2, v3, v2
-; CHECK-BE-NEXT:    vmrghh v3, v4, v5
-; CHECK-BE-NEXT:    vmrghw v2, v3, v2
+; CHECK-BE-NEXT:    vmrghh v2, v4, v2
+; CHECK-BE-NEXT:    vmrghw v2, v2, v3
 ; CHECK-BE-NEXT:    mfvsrd r3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -970,119 +946,119 @@
 ;
 ; CHECK-P9-LABEL: test8elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r3)
 ; CHECK-P9-NEXT:    lxv vs1, 0(r3)
 ; CHECK-P9-NEXT:    xxsldwi vs2, vs1, vs1, 3
-; CHECK-P9-NEXT:    xxswapd vs3, vs1
-; CHECK-P9-NEXT:    xxsldwi vs4, vs1, vs1, 1
-; CHECK-P9-NEXT:    xxsldwi vs5, vs0, vs0, 3
-; CHECK-P9-NEXT:    xxswapd vs6, vs0
-; CHECK-P9-NEXT:    xxsldwi vs7, vs0, vs0, 1
-; CHECK-P9-NEXT:    xscvspdpn f1, vs1
-; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f2, vs2
-; CHECK-P9-NEXT:    xscvspdpn f3, vs3
-; CHECK-P9-NEXT:    xscvspdpn f4, vs4
-; CHECK-P9-NEXT:    xscvspdpn f5, vs5
-; CHECK-P9-NEXT:    xscvspdpn f6, vs6
-; CHECK-P9-NEXT:    xscvspdpn f7, vs7
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    lxv vs0, 16(r3)
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    xxswapd v2, vs2
+; CHECK-P9-NEXT:    xxswapd vs2, vs1
+; CHECK-P9-NEXT:    xscvspdpn f2, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    xscvspdpn f2, vs1
+; CHECK-P9-NEXT:    xxsldwi vs1, vs1, vs1, 1
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    vmrglh v2, v3, v2
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    vmrglh v3, v3, v4
+; CHECK-P9-NEXT:    vmrglw v2, v3, v2
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs1
+; CHECK-P9-NEXT:    xxswapd vs1, vs0
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xscvspdpn f1, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xscvdpsxws f1, f1
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
-; CHECK-P9-NEXT:    mfvsrwz r5, f1
-; CHECK-P9-NEXT:    mfvsrwz r9, f0
-; CHECK-P9-NEXT:    mfvsrwz r3, f2
-; CHECK-P9-NEXT:    mfvsrwz r4, f3
-; CHECK-P9-NEXT:    mfvsrwz r6, f4
-; CHECK-P9-NEXT:    mfvsrwz r7, f5
-; CHECK-P9-NEXT:    mfvsrwz r8, f6
-; CHECK-P9-NEXT:    mfvsrwz r10, f7
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    mtvsrd f6, r9
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    mtvsrd f4, r7
-; CHECK-P9-NEXT:    mtvsrd f5, r8
-; CHECK-P9-NEXT:    mtvsrd f7, r10
-; CHECK-P9-NEXT:    xxswapd v4, vs2
-; CHECK-P9-NEXT:    xxswapd v6, vs6
-; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
-; CHECK-P9-NEXT:    xxswapd v0, vs4
-; CHECK-P9-NEXT:    xxswapd v1, vs5
-; CHECK-P9-NEXT:    xxswapd v7, vs7
-; CHECK-P9-NEXT:    vmrglh v2, v3, v2
-; CHECK-P9-NEXT:    vmrglh v3, v4, v5
-; CHECK-P9-NEXT:    vmrglh v4, v1, v0
-; CHECK-P9-NEXT:    vmrglh v5, v6, v7
-; CHECK-P9-NEXT:    vmrglw v2, v3, v2
-; CHECK-P9-NEXT:    vmrglw v3, v5, v4
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    vmrglh v4, v4, v5
+; CHECK-P9-NEXT:    vmrglw v3, v4, v3
 ; CHECK-P9-NEXT:    xxmrgld v2, v3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
 ; CHECK-BE-NEXT:    xxsldwi vs2, vs1, vs1, 3
-; CHECK-BE-NEXT:    xxswapd vs3, vs1
-; CHECK-BE-NEXT:    xxsldwi vs4, vs1, vs1, 1
-; CHECK-BE-NEXT:    xxsldwi vs5, vs0, vs0, 3
-; CHECK-BE-NEXT:    xxswapd vs6, vs0
-; CHECK-BE-NEXT:    xxsldwi vs7, vs0, vs0, 1
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
 ; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xscvspdpn f3, vs3
-; CHECK-BE-NEXT:    xscvspdpn f4, vs4
-; CHECK-BE-NEXT:    xscvspdpn f5, vs5
-; CHECK-BE-NEXT:    xscvspdpn f6, vs6
-; CHECK-BE-NEXT:    xscvspdpn f7, vs7
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
 ; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    mfvsrwz r5, f1
-; CHECK-BE-NEXT:    mfvsrwz r9, f0
-; CHECK-BE-NEXT:    sldi r5, r5, 48
-; CHECK-BE-NEXT:    sldi r9, r9, 48
+; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    mfvsrwz r3, f2
-; CHECK-BE-NEXT:    mfvsrwz r4, f3
-; CHECK-BE-NEXT:    mfvsrwz r6, f4
-; CHECK-BE-NEXT:    mfvsrwz r7, f5
-; CHECK-BE-NEXT:    mfvsrwz r8, f6
-; CHECK-BE-NEXT:    mfvsrwz r10, f7
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    mtvsrd v6, r9
+; CHECK-BE-NEXT:    xxswapd vs2, vs1
 ; CHECK-BE-NEXT:    sldi r3, r3, 48
-; CHECK-BE-NEXT:    sldi r4, r4, 48
-; CHECK-BE-NEXT:    sldi r6, r6, 48
-; CHECK-BE-NEXT:    sldi r7, r7, 48
-; CHECK-BE-NEXT:    sldi r8, r8, 48
-; CHECK-BE-NEXT:    sldi r10, r10, 48
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
-; CHECK-BE-NEXT:    mtvsrd v0, r7
-; CHECK-BE-NEXT:    mtvsrd v1, r8
-; CHECK-BE-NEXT:    mtvsrd v7, r10
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xscvspdpn f2, vs1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs1, vs1, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
 ; CHECK-BE-NEXT:    vmrghh v2, v3, v2
-; CHECK-BE-NEXT:    vmrghh v3, v4, v5
-; CHECK-BE-NEXT:    vmrghh v4, v1, v0
-; CHECK-BE-NEXT:    vmrghh v5, v6, v7
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs0
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    vmrghh v3, v3, v4
+; CHECK-BE-NEXT:    sldi r3, r3, 48
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
-; CHECK-BE-NEXT:    vmrghw v3, v5, v4
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    vmrghh v4, v4, v5
+; CHECK-BE-NEXT:    vmrghw v3, v4, v3
 ; CHECK-BE-NEXT:    xxmrghd v2, v3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -1213,258 +1189,234 @@
 ;
 ; CHECK-P9-LABEL: test16elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs2, 16(r4)
-; CHECK-P9-NEXT:    lxv vs3, 0(r4)
-; CHECK-P9-NEXT:    lxv vs0, 48(r4)
-; CHECK-P9-NEXT:    lxv vs1, 32(r4)
-; CHECK-P9-NEXT:    std r25, -56(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    xxsldwi vs4, vs3, vs3, 3
-; CHECK-P9-NEXT:    xxswapd vs5, vs3
-; CHECK-P9-NEXT:    xxsldwi vs6, vs3, vs3, 1
-; CHECK-P9-NEXT:    xxsldwi vs7, vs2, vs2, 3
-; CHECK-P9-NEXT:    xxswapd vs8, vs2
-; CHECK-P9-NEXT:    xxsldwi vs9, vs2, vs2, 1
-; CHECK-P9-NEXT:    xxsldwi vs10, vs1, vs1, 3
-; CHECK-P9-NEXT:    xxswapd vs11, vs1
-; CHECK-P9-NEXT:    xxsldwi vs12, vs1, vs1, 1
-; CHECK-P9-NEXT:    xxsldwi vs13, vs0, vs0, 3
-; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxsldwi v3, vs0, vs0, 1
-; CHECK-P9-NEXT:    xscvspdpn f3, vs3
-; CHECK-P9-NEXT:    xscvspdpn f2, vs2
-; CHECK-P9-NEXT:    xscvspdpn f1, vs1
-; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    lxv vs1, 0(r4)
+; CHECK-P9-NEXT:    lxv vs3, 16(r4)
+; CHECK-P9-NEXT:    xscvspdpn f5, vs1
+; CHECK-P9-NEXT:    xxsldwi vs2, vs1, vs1, 3
+; CHECK-P9-NEXT:    xscvspdpn f8, vs3
+; CHECK-P9-NEXT:    xxswapd vs4, vs1
+; CHECK-P9-NEXT:    xxsldwi vs1, vs1, vs1, 1
 ; CHECK-P9-NEXT:    xscvspdpn f4, vs4
-; CHECK-P9-NEXT:    xscvspdpn f5, vs5
+; CHECK-P9-NEXT:    xscvdpsxws f5, f5
+; CHECK-P9-NEXT:    xscvspdpn f2, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f8, f8
+; CHECK-P9-NEXT:    xxsldwi vs6, vs3, vs3, 3
+; CHECK-P9-NEXT:    xxswapd vs7, vs3
 ; CHECK-P9-NEXT:    xscvspdpn f6, vs6
+; CHECK-P9-NEXT:    xxsldwi vs3, vs3, vs3, 1
 ; CHECK-P9-NEXT:    xscvspdpn f7, vs7
-; CHECK-P9-NEXT:    xscvspdpn f8, vs8
+; CHECK-P9-NEXT:    xscvspdpn f3, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f4, f4
+; CHECK-P9-NEXT:    xscvdpsxws f6, f6
+; CHECK-P9-NEXT:    mfvsrwz r5, f5
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    xscvdpsxws f7, f7
+; CHECK-P9-NEXT:    xscvdpsxws f3, f3
+; CHECK-P9-NEXT:    mtvsrd f5, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f8
+; CHECK-P9-NEXT:    mtvsrd f8, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f2
+; CHECK-P9-NEXT:    lxv vs0, 32(r4)
+; CHECK-P9-NEXT:    xxsldwi vs9, vs0, vs0, 3
+; CHECK-P9-NEXT:    xxswapd vs10, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f9, vs9
 ; CHECK-P9-NEXT:    xscvspdpn f10, vs10
-; CHECK-P9-NEXT:    xscvspdpn f11, vs11
-; CHECK-P9-NEXT:    xscvspdpn f12, vs12
-; CHECK-P9-NEXT:    xscvspdpn f13, vs13
-; CHECK-P9-NEXT:    xscvspdpn v2, v2
-; CHECK-P9-NEXT:    xscvspdpn v3, v3
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
-; CHECK-P9-NEXT:    xscvdpsxws f8, f8
 ; CHECK-P9-NEXT:    xscvdpsxws f9, f9
 ; CHECK-P9-NEXT:    xscvdpsxws f10, f10
-; CHECK-P9-NEXT:    xscvdpsxws f11, f11
-; CHECK-P9-NEXT:    xscvdpsxws f12, f12
-; CHECK-P9-NEXT:    xscvdpsxws f13, f13
-; CHECK-P9-NEXT:    xscvdpsxws v2, v2
-; CHECK-P9-NEXT:    xscvdpsxws v3, v3
-; CHECK-P9-NEXT:    mfvsrwz r4, f3
-; CHECK-P9-NEXT:    mfvsrwz r5, f2
-; CHECK-P9-NEXT:    mfvsrwz r12, f1
-; CHECK-P9-NEXT:    mfvsrwz r0, f0
-; CHECK-P9-NEXT:    mfvsrwz r6, f4
-; CHECK-P9-NEXT:    mfvsrwz r7, f5
-; CHECK-P9-NEXT:    mfvsrwz r8, f6
-; CHECK-P9-NEXT:    mfvsrwz r9, f7
-; CHECK-P9-NEXT:    mfvsrwz r10, f8
-; CHECK-P9-NEXT:    mfvsrwz r11, f9
-; CHECK-P9-NEXT:    mfvsrwz r30, f10
-; CHECK-P9-NEXT:    mfvsrwz r29, f11
-; CHECK-P9-NEXT:    mfvsrwz r28, f12
-; CHECK-P9-NEXT:    mfvsrwz r27, f13
-; CHECK-P9-NEXT:    mfvsrwz r26, v2
-; CHECK-P9-NEXT:    mfvsrwz r25, v3
-; CHECK-P9-NEXT:    mtvsrd f0, r4
+; CHECK-P9-NEXT:    mtvsrd f2, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f4
+; CHECK-P9-NEXT:    mtvsrd f4, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f1
 ; CHECK-P9-NEXT:    mtvsrd f1, r5
-; CHECK-P9-NEXT:    mtvsrd f8, r12
-; CHECK-P9-NEXT:    mtvsrd f9, r0
-; CHECK-P9-NEXT:    mtvsrd f2, r6
-; CHECK-P9-NEXT:    mtvsrd f3, r7
-; CHECK-P9-NEXT:    mtvsrd f4, r8
-; CHECK-P9-NEXT:    mtvsrd f5, r9
-; CHECK-P9-NEXT:    mtvsrd f6, r10
-; CHECK-P9-NEXT:    mtvsrd f7, r11
-; CHECK-P9-NEXT:    mtvsrd f10, r30
-; CHECK-P9-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd f11, r29
-; CHECK-P9-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd f12, r28
-; CHECK-P9-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd f13, r27
-; CHECK-P9-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd v2, r26
-; CHECK-P9-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd v3, r25
-; CHECK-P9-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xxswapd v4, vs0
-; CHECK-P9-NEXT:    xxswapd v5, vs2
+; CHECK-P9-NEXT:    mfvsrwz r5, f6
+; CHECK-P9-NEXT:    xxswapd v2, vs2
+; CHECK-P9-NEXT:    xxswapd v3, vs4
+; CHECK-P9-NEXT:    xscvspdpn f2, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    mtvsrd f6, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f7
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    lxv vs1, 48(r4)
+; CHECK-P9-NEXT:    vmrglh v2, v3, v2
+; CHECK-P9-NEXT:    xxswapd v3, vs5
+; CHECK-P9-NEXT:    mtvsrd f7, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f3
+; CHECK-P9-NEXT:    vmrglh v3, v3, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs6
+; CHECK-P9-NEXT:    xxswapd v5, vs7
+; CHECK-P9-NEXT:    mtvsrd f3, r5
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
 ; CHECK-P9-NEXT:    xxswapd v0, vs3
-; CHECK-P9-NEXT:    xxswapd v1, vs4
-; CHECK-P9-NEXT:    xxswapd v6, vs5
-; CHECK-P9-NEXT:    xxswapd v7, vs6
-; CHECK-P9-NEXT:    xxswapd v8, vs1
-; CHECK-P9-NEXT:    xxswapd v9, vs7
-; CHECK-P9-NEXT:    xxswapd v10, vs10
-; CHECK-P9-NEXT:    xxswapd v11, vs11
-; CHECK-P9-NEXT:    xxswapd v12, vs8
-; CHECK-P9-NEXT:    xxswapd v13, vs12
-; CHECK-P9-NEXT:    xxswapd v14, vs13
-; CHECK-P9-NEXT:    xxswapd v2, v2
-; CHECK-P9-NEXT:    xxswapd v15, vs9
-; CHECK-P9-NEXT:    xxswapd v3, v3
-; CHECK-P9-NEXT:    vmrglh v5, v0, v5
-; CHECK-P9-NEXT:    vmrglh v4, v4, v1
-; CHECK-P9-NEXT:    vmrglh v0, v7, v6
-; CHECK-P9-NEXT:    vmrglh v1, v8, v9
-; CHECK-P9-NEXT:    vmrglh v6, v11, v10
-; CHECK-P9-NEXT:    vmrglh v7, v12, v13
-; CHECK-P9-NEXT:    vmrglh v2, v2, v14
-; CHECK-P9-NEXT:    vmrglh v3, v15, v3
-; CHECK-P9-NEXT:    vmrglw v4, v4, v5
-; CHECK-P9-NEXT:    vmrglw v5, v1, v0
-; CHECK-P9-NEXT:    vmrglw v0, v7, v6
+; CHECK-P9-NEXT:    vmrglh v4, v5, v4
+; CHECK-P9-NEXT:    xxswapd v5, vs8
+; CHECK-P9-NEXT:    vmrglh v5, v5, v0
+; CHECK-P9-NEXT:    mfvsrwz r4, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r4
+; CHECK-P9-NEXT:    mfvsrwz r4, f0
 ; CHECK-P9-NEXT:    vmrglw v2, v3, v2
-; CHECK-P9-NEXT:    xxmrgld vs0, v5, v4
-; CHECK-P9-NEXT:    xxmrgld vs1, v2, v0
-; CHECK-P9-NEXT:    stxv vs0, 0(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    mtvsrd f0, r4
+; CHECK-P9-NEXT:    vmrglw v3, v5, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs2
+; CHECK-P9-NEXT:    xxmrgld vs2, v3, v2
+; CHECK-P9-NEXT:    xxswapd v2, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs1, vs1, 3
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r4
+; CHECK-P9-NEXT:    xxswapd v3, vs0
+; CHECK-P9-NEXT:    xxswapd vs0, vs1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r4
+; CHECK-P9-NEXT:    vmrglh v2, v4, v2
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xscvspdpn f0, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r4
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs1, vs1, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r5, f9
+; CHECK-P9-NEXT:    mtvsrd f9, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f10
+; CHECK-P9-NEXT:    mtvsrd f10, r5
+; CHECK-P9-NEXT:    xxswapd v0, vs9
+; CHECK-P9-NEXT:    xxswapd v1, vs10
+; CHECK-P9-NEXT:    vmrglh v0, v1, v0
+; CHECK-P9-NEXT:    vmrglw v2, v2, v0
+; CHECK-P9-NEXT:    stxv vs2, 0(r3)
+; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r4
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    vmrglh v4, v4, v5
+; CHECK-P9-NEXT:    vmrglw v3, v4, v3
+; CHECK-P9-NEXT:    xxmrgld vs0, v3, v2
+; CHECK-P9-NEXT:    stxv vs0, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs2, 0(r4)
-; CHECK-BE-NEXT:    lxv vs3, 16(r4)
-; CHECK-BE-NEXT:    lxv vs0, 32(r4)
-; CHECK-BE-NEXT:    lxv vs1, 48(r4)
-; CHECK-BE-NEXT:    std r25, -56(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    xxsldwi vs4, vs3, vs3, 3
-; CHECK-BE-NEXT:    xxswapd vs5, vs3
-; CHECK-BE-NEXT:    xxsldwi vs6, vs3, vs3, 1
-; CHECK-BE-NEXT:    xxsldwi vs7, vs2, vs2, 3
-; CHECK-BE-NEXT:    xxswapd vs8, vs2
-; CHECK-BE-NEXT:    xxsldwi vs9, vs2, vs2, 1
-; CHECK-BE-NEXT:    xxsldwi vs10, vs1, vs1, 3
-; CHECK-BE-NEXT:    xxswapd vs11, vs1
-; CHECK-BE-NEXT:    xxsldwi vs12, vs1, vs1, 1
-; CHECK-BE-NEXT:    xxsldwi vs13, vs0, vs0, 3
-; CHECK-BE-NEXT:    xxswapd v2, vs0
-; CHECK-BE-NEXT:    xxsldwi v3, vs0, vs0, 1
-; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    lxv vs1, 16(r4)
+; CHECK-BE-NEXT:    xxsldwi vs2, vs1, vs1, 3
 ; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f4, vs4
-; CHECK-BE-NEXT:    xscvspdpn f5, vs5
-; CHECK-BE-NEXT:    xscvspdpn f6, vs6
-; CHECK-BE-NEXT:    xscvspdpn f7, vs7
-; CHECK-BE-NEXT:    xscvspdpn f8, vs8
-; CHECK-BE-NEXT:    xscvspdpn f9, vs9
-; CHECK-BE-NEXT:    xscvspdpn f10, vs10
-; CHECK-BE-NEXT:    xscvspdpn f11, vs11
-; CHECK-BE-NEXT:    xscvspdpn f12, vs12
-; CHECK-BE-NEXT:    xscvspdpn f13, vs13
-; CHECK-BE-NEXT:    xscvspdpn v2, v2
-; CHECK-BE-NEXT:    xscvspdpn v3, v3
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    xxswapd vs3, vs1
+; CHECK-BE-NEXT:    xscvspdpn f3, vs3
 ; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    xscvdpsxws f8, f8
-; CHECK-BE-NEXT:    xscvdpsxws f9, f9
-; CHECK-BE-NEXT:    xscvdpsxws f10, f10
-; CHECK-BE-NEXT:    xscvdpsxws f11, f11
-; CHECK-BE-NEXT:    xscvdpsxws f12, f12
-; CHECK-BE-NEXT:    xscvdpsxws f13, f13
-; CHECK-BE-NEXT:    xscvdpsxws v2, v2
-; CHECK-BE-NEXT:    xscvdpsxws v3, v3
-; CHECK-BE-NEXT:    mfvsrwz r4, f3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
 ; CHECK-BE-NEXT:    mfvsrwz r5, f2
-; CHECK-BE-NEXT:    mfvsrwz r12, f1
-; CHECK-BE-NEXT:    mfvsrwz r0, f0
-; CHECK-BE-NEXT:    mfvsrwz r6, f4
-; CHECK-BE-NEXT:    mfvsrwz r7, f5
-; CHECK-BE-NEXT:    mfvsrwz r8, f6
-; CHECK-BE-NEXT:    mfvsrwz r9, f7
-; CHECK-BE-NEXT:    mfvsrwz r10, f8
-; CHECK-BE-NEXT:    mfvsrwz r11, f9
-; CHECK-BE-NEXT:    mfvsrwz r30, f10
-; CHECK-BE-NEXT:    mfvsrwz r29, f11
-; CHECK-BE-NEXT:    mfvsrwz r28, f12
-; CHECK-BE-NEXT:    mfvsrwz r27, f13
-; CHECK-BE-NEXT:    mfvsrwz r26, v2
-; CHECK-BE-NEXT:    mfvsrwz r25, v3
-; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    xscvspdpn f4, vs1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs1, vs1, 1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
 ; CHECK-BE-NEXT:    sldi r5, r5, 48
-; CHECK-BE-NEXT:    sldi r12, r12, 48
-; CHECK-BE-NEXT:    sldi r0, r0, 48
-; CHECK-BE-NEXT:    sldi r6, r6, 48
-; CHECK-BE-NEXT:    sldi r7, r7, 48
-; CHECK-BE-NEXT:    sldi r8, r8, 48
-; CHECK-BE-NEXT:    sldi r9, r9, 48
-; CHECK-BE-NEXT:    sldi r10, r10, 48
-; CHECK-BE-NEXT:    sldi r11, r11, 48
-; CHECK-BE-NEXT:    sldi r30, r30, 48
-; CHECK-BE-NEXT:    sldi r29, r29, 48
-; CHECK-BE-NEXT:    sldi r28, r28, 48
-; CHECK-BE-NEXT:    sldi r27, r27, 48
-; CHECK-BE-NEXT:    sldi r26, r26, 48
-; CHECK-BE-NEXT:    sldi r25, r25, 48
-; CHECK-BE-NEXT:    mtvsrd v2, r4
+; CHECK-BE-NEXT:    mtvsrd v2, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f4
+; CHECK-BE-NEXT:    lxv vs0, 0(r4)
+; CHECK-BE-NEXT:    xxsldwi vs2, vs0, vs0, 3
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    sldi r5, r5, 48
 ; CHECK-BE-NEXT:    mtvsrd v3, r5
-; CHECK-BE-NEXT:    mtvsrd v10, r12
-; CHECK-BE-NEXT:    mtvsrd v14, r0
-; CHECK-BE-NEXT:    mtvsrd v4, r6
-; CHECK-BE-NEXT:    mtvsrd v5, r7
-; CHECK-BE-NEXT:    mtvsrd v0, r8
-; CHECK-BE-NEXT:    mtvsrd v1, r9
-; CHECK-BE-NEXT:    mtvsrd v6, r10
-; CHECK-BE-NEXT:    mtvsrd v7, r11
-; CHECK-BE-NEXT:    mtvsrd v8, r30
-; CHECK-BE-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v9, r29
-; CHECK-BE-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v11, r28
-; CHECK-BE-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v12, r27
-; CHECK-BE-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v13, r26
-; CHECK-BE-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v15, r25
-; CHECK-BE-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
+; CHECK-BE-NEXT:    vmrghh v2, v3, v2
+; CHECK-BE-NEXT:    mfvsrwz r5, f3
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    mtvsrd v3, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs0
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    mtvsrd v4, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f2
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    vmrghh v3, v3, v4
+; CHECK-BE-NEXT:    mtvsrd v4, r5
+; CHECK-BE-NEXT:    vmrghw v2, v3, v2
+; CHECK-BE-NEXT:    mfvsrwz r5, f1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v5, r5
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
 ; CHECK-BE-NEXT:    vmrghh v4, v5, v4
-; CHECK-BE-NEXT:    vmrghh v2, v2, v0
-; CHECK-BE-NEXT:    vmrghh v5, v6, v1
-; CHECK-BE-NEXT:    vmrghh v3, v3, v7
-; CHECK-BE-NEXT:    vmrghh v0, v9, v8
-; CHECK-BE-NEXT:    vmrghh v1, v10, v11
-; CHECK-BE-NEXT:    vmrghh v6, v13, v12
-; CHECK-BE-NEXT:    vmrghh v7, v14, v15
-; CHECK-BE-NEXT:    vmrghw v2, v2, v4
-; CHECK-BE-NEXT:    vmrghw v3, v3, v5
-; CHECK-BE-NEXT:    vmrghw v4, v1, v0
-; CHECK-BE-NEXT:    vmrghw v5, v7, v6
+; CHECK-BE-NEXT:    mfvsrwz r5, f1
+; CHECK-BE-NEXT:    lxv vs1, 48(r4)
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    mtvsrd v5, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f0
+; CHECK-BE-NEXT:    lxv vs0, 32(r4)
+; CHECK-BE-NEXT:    xscvspdpn f5, vs1
+; CHECK-BE-NEXT:    xxsldwi vs2, vs1, vs1, 3
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    xscvdpsxws f5, f5
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    xxswapd vs3, vs1
+; CHECK-BE-NEXT:    mtvsrd v0, r5
+; CHECK-BE-NEXT:    vmrghh v5, v5, v0
+; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    xxsldwi vs1, vs1, vs1, 1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    vmrghw v3, v5, v4
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    mfvsrwz r4, f5
+; CHECK-BE-NEXT:    xxmrghd vs4, v3, v2
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v2, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f2
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    stxv vs4, 0(r3)
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v3, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f3
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v4, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    mtvsrd v4, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs0
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    vmrghh v2, v2, v4
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    vmrghw v2, v2, v3
+; CHECK-BE-NEXT:    mtvsrd v3, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v4, r4
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    mfvsrwz r4, f1
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v4, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f0
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v5, r4
+; CHECK-BE-NEXT:    vmrghh v4, v4, v5
+; CHECK-BE-NEXT:    vmrghw v3, v4, v3
 ; CHECK-BE-NEXT:    xxmrghd vs0, v3, v2
-; CHECK-BE-NEXT:    xxmrghd vs1, v5, v4
-; CHECK-BE-NEXT:    stxv vs0, 0(r3)
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x float>, <16 x float>* %0, align 64
diff --git a/test/CodeGen/PowerPC/vec_conv_fp32_to_i64_elts.ll b/test/CodeGen/PowerPC/vec_conv_fp32_to_i64_elts.ll
index 3a733f0..6b945d4 100644
--- a/test/CodeGen/PowerPC/vec_conv_fp32_to_i64_elts.ll
+++ b/test/CodeGen/PowerPC/vec_conv_fp32_to_i64_elts.ll
@@ -35,10 +35,10 @@
 ; CHECK-BE-LABEL: test2elt:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrd f0, r3
-; CHECK-BE-NEXT:    xxsldwi vs1, vs0, vs0, 1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
 ; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs1
+; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
 ; CHECK-BE-NEXT:    xvcvdpuxds v2, vs0
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -72,34 +72,34 @@
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    xxsldwi vs0, v2, v2, 3
 ; CHECK-P9-NEXT:    xxswapd vs1, v2
-; CHECK-P9-NEXT:    xxsldwi vs2, v2, v2, 1
-; CHECK-P9-NEXT:    xscvspdpn f3, v2
 ; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xxsldwi vs2, v2, v2, 1
 ; CHECK-P9-NEXT:    xscvspdpn f2, vs2
 ; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
+; CHECK-P9-NEXT:    xscvspdpn f1, v2
+; CHECK-P9-NEXT:    xxmrghd vs1, vs1, vs2
 ; CHECK-P9-NEXT:    xvcvdpuxds vs0, vs0
 ; CHECK-P9-NEXT:    xvcvdpuxds vs1, vs1
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
 ; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    xxsldwi vs0, v2, v2, 1
-; CHECK-BE-NEXT:    xxsldwi vs1, v2, v2, 3
+; CHECK-BE-NEXT:    xxsldwi vs1, v2, v2, 1
+; CHECK-BE-NEXT:    xscvspdpn f0, v2
 ; CHECK-BE-NEXT:    xxswapd vs2, v2
-; CHECK-BE-NEXT:    xscvspdpn f3, v2
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs1
+; CHECK-BE-NEXT:    xxsldwi vs1, v2, v2, 3
 ; CHECK-BE-NEXT:    xscvspdpn f1, vs1
 ; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xxmrghd vs0, vs3, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs2, vs1
 ; CHECK-BE-NEXT:    xvcvdpuxds vs0, vs0
+; CHECK-BE-NEXT:    xxmrghd vs1, vs2, vs1
 ; CHECK-BE-NEXT:    xvcvdpuxds vs1, vs1
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = fptoui <4 x float> %a to <4 x i64>
@@ -149,66 +149,66 @@
 ;
 ; CHECK-P9-LABEL: test8elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r4)
-; CHECK-P9-NEXT:    lxv vs1, 0(r4)
-; CHECK-P9-NEXT:    xxsldwi vs2, vs1, vs1, 3
-; CHECK-P9-NEXT:    xxswapd vs3, vs1
-; CHECK-P9-NEXT:    xxsldwi vs4, vs1, vs1, 1
-; CHECK-P9-NEXT:    xxsldwi vs5, vs0, vs0, 3
-; CHECK-P9-NEXT:    xxswapd vs6, vs0
-; CHECK-P9-NEXT:    xxsldwi vs7, vs0, vs0, 1
+; CHECK-P9-NEXT:    lxv vs0, 0(r4)
+; CHECK-P9-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-P9-NEXT:    xxswapd vs2, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f1, vs1
-; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f2, vs2
+; CHECK-P9-NEXT:    xscvspdpn f3, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xxmrghd vs1, vs2, vs1
+; CHECK-P9-NEXT:    lxv vs2, 16(r4)
+; CHECK-P9-NEXT:    xxmrghd vs0, vs3, vs0
+; CHECK-P9-NEXT:    xvcvdpuxds vs1, vs1
+; CHECK-P9-NEXT:    xvcvdpuxds vs0, vs0
+; CHECK-P9-NEXT:    xxsldwi vs3, vs2, vs2, 3
+; CHECK-P9-NEXT:    xxswapd vs4, vs2
 ; CHECK-P9-NEXT:    xscvspdpn f3, vs3
 ; CHECK-P9-NEXT:    xscvspdpn f4, vs4
-; CHECK-P9-NEXT:    xscvspdpn f5, vs5
-; CHECK-P9-NEXT:    xscvspdpn f6, vs6
-; CHECK-P9-NEXT:    xscvspdpn f7, vs7
-; CHECK-P9-NEXT:    xxmrghd vs2, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs1, vs1, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs6, vs5
-; CHECK-P9-NEXT:    xxmrghd vs0, vs0, vs7
-; CHECK-P9-NEXT:    xvcvdpuxds vs2, vs2
-; CHECK-P9-NEXT:    xvcvdpuxds vs1, vs1
+; CHECK-P9-NEXT:    stxv vs0, 16(r3)
+; CHECK-P9-NEXT:    xxmrghd vs3, vs4, vs3
+; CHECK-P9-NEXT:    xscvspdpn f4, vs2
+; CHECK-P9-NEXT:    xxsldwi vs2, vs2, vs2, 1
+; CHECK-P9-NEXT:    xscvspdpn f2, vs2
 ; CHECK-P9-NEXT:    xvcvdpuxds vs3, vs3
-; CHECK-P9-NEXT:    xvcvdpuxds vs0, vs0
-; CHECK-P9-NEXT:    stxv vs0, 48(r3)
+; CHECK-P9-NEXT:    xxmrghd vs2, vs4, vs2
+; CHECK-P9-NEXT:    xvcvdpuxds vs2, vs2
 ; CHECK-P9-NEXT:    stxv vs3, 32(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
-; CHECK-P9-NEXT:    stxv vs2, 0(r3)
+; CHECK-P9-NEXT:    stxv vs2, 48(r3)
+; CHECK-P9-NEXT:    stxv vs1, 0(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 16(r4)
 ; CHECK-BE-NEXT:    lxv vs1, 0(r4)
-; CHECK-BE-NEXT:    xxsldwi vs2, vs1, vs1, 1
-; CHECK-BE-NEXT:    xxsldwi vs3, vs1, vs1, 3
-; CHECK-BE-NEXT:    xxswapd vs4, vs1
-; CHECK-BE-NEXT:    xxsldwi vs5, vs0, vs0, 1
-; CHECK-BE-NEXT:    xxsldwi vs6, vs0, vs0, 3
-; CHECK-BE-NEXT:    xxswapd vs7, vs0
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    xxsldwi vs3, vs1, vs1, 1
+; CHECK-BE-NEXT:    xscvspdpn f2, vs1
 ; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    lxv vs0, 16(r4)
+; CHECK-BE-NEXT:    xxsldwi vs4, vs0, vs0, 1
 ; CHECK-BE-NEXT:    xscvspdpn f4, vs4
-; CHECK-BE-NEXT:    xscvspdpn f5, vs5
-; CHECK-BE-NEXT:    xscvspdpn f6, vs6
-; CHECK-BE-NEXT:    xscvspdpn f7, vs7
-; CHECK-BE-NEXT:    xxmrghd vs1, vs1, vs2
-; CHECK-BE-NEXT:    xxmrghd vs2, vs4, vs3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs5
-; CHECK-BE-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-BE-NEXT:    xvcvdpuxds vs1, vs1
+; CHECK-BE-NEXT:    xxmrghd vs2, vs2, vs3
+; CHECK-BE-NEXT:    xxsldwi vs3, vs1, vs1, 3
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
+; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xxmrghd vs1, vs1, vs3
+; CHECK-BE-NEXT:    xscvspdpn f3, vs0
+; CHECK-BE-NEXT:    xxmrghd vs3, vs3, vs4
+; CHECK-BE-NEXT:    xxsldwi vs4, vs0, vs0, 3
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    xscvspdpn f4, vs4
+; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs4
 ; CHECK-BE-NEXT:    xvcvdpuxds vs2, vs2
-; CHECK-BE-NEXT:    xvcvdpuxds vs0, vs0
+; CHECK-BE-NEXT:    xvcvdpuxds vs1, vs1
 ; CHECK-BE-NEXT:    xvcvdpuxds vs3, vs3
-; CHECK-BE-NEXT:    stxv vs3, 48(r3)
-; CHECK-BE-NEXT:    stxv vs0, 32(r3)
-; CHECK-BE-NEXT:    stxv vs2, 16(r3)
-; CHECK-BE-NEXT:    stxv vs1, 0(r3)
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    xvcvdpuxds vs0, vs0
+; CHECK-BE-NEXT:    stxv vs3, 32(r3)
+; CHECK-BE-NEXT:    stxv vs0, 48(r3)
+; CHECK-BE-NEXT:    stxv vs2, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <8 x float>, <8 x float>* %0, align 32
@@ -295,130 +295,122 @@
 ;
 ; CHECK-P9-LABEL: test16elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r4)
-; CHECK-P9-NEXT:    lxv vs1, 0(r4)
-; CHECK-P9-NEXT:    lxv vs2, 48(r4)
-; CHECK-P9-NEXT:    lxv vs3, 32(r4)
-; CHECK-P9-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    xxsldwi vs4, vs1, vs1, 3
-; CHECK-P9-NEXT:    xxswapd vs5, vs1
-; CHECK-P9-NEXT:    xxsldwi vs6, vs1, vs1, 1
-; CHECK-P9-NEXT:    xxsldwi vs7, vs0, vs0, 3
-; CHECK-P9-NEXT:    xxswapd vs8, vs0
-; CHECK-P9-NEXT:    xxsldwi vs9, vs0, vs0, 1
-; CHECK-P9-NEXT:    xxsldwi vs10, vs3, vs3, 3
-; CHECK-P9-NEXT:    xxswapd vs11, vs3
-; CHECK-P9-NEXT:    xxsldwi vs12, vs3, vs3, 1
-; CHECK-P9-NEXT:    xxsldwi vs13, vs2, vs2, 3
-; CHECK-P9-NEXT:    xxswapd v2, vs2
-; CHECK-P9-NEXT:    xxsldwi v3, vs2, vs2, 1
-; CHECK-P9-NEXT:    xscvspdpn f1, vs1
-; CHECK-P9-NEXT:    xscvspdpn f0, vs0
-; CHECK-P9-NEXT:    xscvspdpn f3, vs3
-; CHECK-P9-NEXT:    xscvspdpn f2, vs2
-; CHECK-P9-NEXT:    xscvspdpn f4, vs4
+; CHECK-P9-NEXT:    lxv vs4, 16(r4)
+; CHECK-P9-NEXT:    xxsldwi vs5, vs4, vs4, 3
+; CHECK-P9-NEXT:    xxswapd vs6, vs4
+; CHECK-P9-NEXT:    lxv vs0, 0(r4)
+; CHECK-P9-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-P9-NEXT:    xxswapd vs2, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f5, vs5
 ; CHECK-P9-NEXT:    xscvspdpn f6, vs6
+; CHECK-P9-NEXT:    xxmrghd vs5, vs6, vs5
+; CHECK-P9-NEXT:    xscvspdpn f6, vs4
+; CHECK-P9-NEXT:    xxsldwi vs4, vs4, vs4, 1
+; CHECK-P9-NEXT:    lxv vs3, 32(r4)
+; CHECK-P9-NEXT:    xscvspdpn f2, vs2
+; CHECK-P9-NEXT:    xxswapd vs7, vs3
 ; CHECK-P9-NEXT:    xscvspdpn f7, vs7
-; CHECK-P9-NEXT:    xscvspdpn f8, vs8
-; CHECK-P9-NEXT:    xscvspdpn f9, vs9
-; CHECK-P9-NEXT:    xscvspdpn f10, vs10
-; CHECK-P9-NEXT:    xscvspdpn f11, vs11
-; CHECK-P9-NEXT:    xscvspdpn f12, vs12
-; CHECK-P9-NEXT:    xscvspdpn f13, vs13
-; CHECK-P9-NEXT:    xscvspdpn f31, v2
-; CHECK-P9-NEXT:    xscvspdpn f30, v3
-; CHECK-P9-NEXT:    xxmrghd vs4, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs1, vs1, vs6
-; CHECK-P9-NEXT:    xxmrghd vs5, vs8, vs7
-; CHECK-P9-NEXT:    xxmrghd vs0, vs0, vs9
-; CHECK-P9-NEXT:    xxmrghd vs6, vs11, vs10
-; CHECK-P9-NEXT:    xxmrghd vs3, vs3, vs12
-; CHECK-P9-NEXT:    xxmrghd vs7, vs31, vs13
-; CHECK-P9-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xxmrghd vs2, vs2, vs30
-; CHECK-P9-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xvcvdpuxds vs4, vs4
+; CHECK-P9-NEXT:    xscvspdpn f4, vs4
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xxmrghd vs1, vs2, vs1
+; CHECK-P9-NEXT:    xscvspdpn f2, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xxmrghd vs0, vs2, vs0
+; CHECK-P9-NEXT:    xxmrghd vs4, vs6, vs4
+; CHECK-P9-NEXT:    xxsldwi vs6, vs3, vs3, 3
 ; CHECK-P9-NEXT:    xvcvdpuxds vs1, vs1
 ; CHECK-P9-NEXT:    xvcvdpuxds vs5, vs5
+; CHECK-P9-NEXT:    xscvspdpn f6, vs6
+; CHECK-P9-NEXT:    xxmrghd vs6, vs7, vs6
+; CHECK-P9-NEXT:    xscvspdpn f7, vs3
+; CHECK-P9-NEXT:    xxsldwi vs3, vs3, vs3, 1
+; CHECK-P9-NEXT:    lxv vs2, 48(r4)
+; CHECK-P9-NEXT:    xxswapd vs8, vs2
+; CHECK-P9-NEXT:    xscvspdpn f8, vs8
 ; CHECK-P9-NEXT:    xvcvdpuxds vs0, vs0
+; CHECK-P9-NEXT:    stxv vs5, 32(r3)
 ; CHECK-P9-NEXT:    xvcvdpuxds vs6, vs6
+; CHECK-P9-NEXT:    xscvspdpn f3, vs3
+; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs3
+; CHECK-P9-NEXT:    xxsldwi vs7, vs2, vs2, 3
+; CHECK-P9-NEXT:    xscvspdpn f7, vs7
+; CHECK-P9-NEXT:    xxmrghd vs7, vs8, vs7
+; CHECK-P9-NEXT:    xscvspdpn f8, vs2
+; CHECK-P9-NEXT:    xxsldwi vs2, vs2, vs2, 1
+; CHECK-P9-NEXT:    stxv vs6, 64(r3)
+; CHECK-P9-NEXT:    xvcvdpuxds vs4, vs4
 ; CHECK-P9-NEXT:    xvcvdpuxds vs3, vs3
 ; CHECK-P9-NEXT:    xvcvdpuxds vs7, vs7
+; CHECK-P9-NEXT:    xscvspdpn f2, vs2
+; CHECK-P9-NEXT:    stxv vs3, 80(r3)
+; CHECK-P9-NEXT:    xxmrghd vs2, vs8, vs2
 ; CHECK-P9-NEXT:    xvcvdpuxds vs2, vs2
-; CHECK-P9-NEXT:    stxv vs0, 48(r3)
-; CHECK-P9-NEXT:    stxv vs5, 32(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
-; CHECK-P9-NEXT:    stxv vs4, 0(r3)
 ; CHECK-P9-NEXT:    stxv vs2, 112(r3)
 ; CHECK-P9-NEXT:    stxv vs7, 96(r3)
-; CHECK-P9-NEXT:    stxv vs3, 80(r3)
-; CHECK-P9-NEXT:    stxv vs6, 64(r3)
+; CHECK-P9-NEXT:    stxv vs4, 48(r3)
+; CHECK-P9-NEXT:    stxv vs0, 16(r3)
+; CHECK-P9-NEXT:    stxv vs1, 0(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 16(r4)
-; CHECK-BE-NEXT:    lxv vs1, 0(r4)
-; CHECK-BE-NEXT:    lxv vs2, 48(r4)
-; CHECK-BE-NEXT:    lxv vs3, 32(r4)
-; CHECK-BE-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    xxsldwi vs4, vs1, vs1, 1
-; CHECK-BE-NEXT:    xxsldwi vs5, vs1, vs1, 3
-; CHECK-BE-NEXT:    xxswapd vs6, vs1
-; CHECK-BE-NEXT:    xxsldwi vs7, vs0, vs0, 1
-; CHECK-BE-NEXT:    xxsldwi vs8, vs0, vs0, 3
-; CHECK-BE-NEXT:    xxswapd vs9, vs0
-; CHECK-BE-NEXT:    xxsldwi vs10, vs3, vs3, 1
-; CHECK-BE-NEXT:    xxsldwi vs11, vs3, vs3, 3
-; CHECK-BE-NEXT:    xxswapd vs12, vs3
-; CHECK-BE-NEXT:    xxsldwi vs13, vs2, vs2, 1
-; CHECK-BE-NEXT:    xxsldwi v2, vs2, vs2, 3
-; CHECK-BE-NEXT:    xxswapd v3, vs2
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f3, vs3
-; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xscvspdpn f4, vs4
+; CHECK-BE-NEXT:    lxv vs0, 0(r4)
+; CHECK-BE-NEXT:    lxv vs4, 16(r4)
+; CHECK-BE-NEXT:    xxsldwi vs2, vs0, vs0, 1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs5, vs0, vs0, 3
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
 ; CHECK-BE-NEXT:    xscvspdpn f5, vs5
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    xxsldwi vs6, vs4, vs4, 1
 ; CHECK-BE-NEXT:    xscvspdpn f6, vs6
+; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs5
+; CHECK-BE-NEXT:    xscvspdpn f5, vs4
+; CHECK-BE-NEXT:    lxv vs3, 32(r4)
+; CHECK-BE-NEXT:    xxsldwi vs7, vs3, vs3, 1
 ; CHECK-BE-NEXT:    xscvspdpn f7, vs7
-; CHECK-BE-NEXT:    xscvspdpn f8, vs8
-; CHECK-BE-NEXT:    xscvspdpn f9, vs9
-; CHECK-BE-NEXT:    xscvspdpn f10, vs10
-; CHECK-BE-NEXT:    xscvspdpn f11, vs11
-; CHECK-BE-NEXT:    xscvspdpn f12, vs12
-; CHECK-BE-NEXT:    xscvspdpn f13, vs13
-; CHECK-BE-NEXT:    xscvspdpn f31, v2
-; CHECK-BE-NEXT:    xscvspdpn f30, v3
-; CHECK-BE-NEXT:    xxmrghd vs1, vs1, vs4
-; CHECK-BE-NEXT:    xxmrghd vs4, vs6, vs5
-; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs7
-; CHECK-BE-NEXT:    xxmrghd vs5, vs9, vs8
-; CHECK-BE-NEXT:    xxmrghd vs3, vs3, vs10
-; CHECK-BE-NEXT:    xxmrghd vs6, vs12, vs11
-; CHECK-BE-NEXT:    xxmrghd vs2, vs2, vs13
-; CHECK-BE-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-BE-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
+; CHECK-BE-NEXT:    xxmrghd vs5, vs5, vs6
+; CHECK-BE-NEXT:    xxsldwi vs6, vs4, vs4, 3
+; CHECK-BE-NEXT:    xxswapd vs4, vs4
+; CHECK-BE-NEXT:    xscvspdpn f6, vs6
+; CHECK-BE-NEXT:    xscvspdpn f4, vs4
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    xxmrghd vs1, vs1, vs2
+; CHECK-BE-NEXT:    lxv vs2, 48(r4)
+; CHECK-BE-NEXT:    xxsldwi vs8, vs2, vs2, 1
 ; CHECK-BE-NEXT:    xvcvdpuxds vs1, vs1
-; CHECK-BE-NEXT:    xvcvdpuxds vs4, vs4
 ; CHECK-BE-NEXT:    xvcvdpuxds vs0, vs0
 ; CHECK-BE-NEXT:    xvcvdpuxds vs5, vs5
-; CHECK-BE-NEXT:    xvcvdpuxds vs3, vs3
+; CHECK-BE-NEXT:    xscvspdpn f8, vs8
+; CHECK-BE-NEXT:    xxmrghd vs4, vs4, vs6
+; CHECK-BE-NEXT:    xscvspdpn f6, vs3
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
+; CHECK-BE-NEXT:    xxmrghd vs6, vs6, vs7
+; CHECK-BE-NEXT:    xxsldwi vs7, vs3, vs3, 3
+; CHECK-BE-NEXT:    xxswapd vs3, vs3
+; CHECK-BE-NEXT:    xscvspdpn f7, vs7
+; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    xxmrghd vs3, vs3, vs7
+; CHECK-BE-NEXT:    xscvspdpn f7, vs2
+; CHECK-BE-NEXT:    xxmrghd vs7, vs7, vs8
+; CHECK-BE-NEXT:    xxsldwi vs8, vs2, vs2, 3
+; CHECK-BE-NEXT:    xxswapd vs2, vs2
+; CHECK-BE-NEXT:    xscvspdpn f8, vs8
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    xxmrghd vs2, vs2, vs8
+; CHECK-BE-NEXT:    stxv vs5, 32(r3)
+; CHECK-BE-NEXT:    xvcvdpuxds vs4, vs4
 ; CHECK-BE-NEXT:    xvcvdpuxds vs6, vs6
-; CHECK-BE-NEXT:    xvcvdpuxds vs2, vs2
+; CHECK-BE-NEXT:    xvcvdpuxds vs3, vs3
 ; CHECK-BE-NEXT:    xvcvdpuxds vs7, vs7
-; CHECK-BE-NEXT:    stxv vs5, 48(r3)
-; CHECK-BE-NEXT:    stxv vs0, 32(r3)
-; CHECK-BE-NEXT:    stxv vs4, 16(r3)
+; CHECK-BE-NEXT:    stxv vs3, 80(r3)
+; CHECK-BE-NEXT:    stxv vs7, 96(r3)
+; CHECK-BE-NEXT:    xvcvdpuxds vs2, vs2
+; CHECK-BE-NEXT:    stxv vs2, 112(r3)
+; CHECK-BE-NEXT:    stxv vs6, 64(r3)
+; CHECK-BE-NEXT:    stxv vs4, 48(r3)
 ; CHECK-BE-NEXT:    stxv vs1, 0(r3)
-; CHECK-BE-NEXT:    stxv vs7, 112(r3)
-; CHECK-BE-NEXT:    stxv vs2, 96(r3)
-; CHECK-BE-NEXT:    stxv vs6, 80(r3)
-; CHECK-BE-NEXT:    stxv vs3, 64(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x float>, <16 x float>* %0, align 64
@@ -453,10 +445,10 @@
 ; CHECK-BE-LABEL: test2elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrd f0, r3
-; CHECK-BE-NEXT:    xxsldwi vs1, vs0, vs0, 1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
 ; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs1
+; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
 ; CHECK-BE-NEXT:    xvcvdpuxds v2, vs0
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -490,34 +482,34 @@
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    xxsldwi vs0, v2, v2, 3
 ; CHECK-P9-NEXT:    xxswapd vs1, v2
-; CHECK-P9-NEXT:    xxsldwi vs2, v2, v2, 1
-; CHECK-P9-NEXT:    xscvspdpn f3, v2
 ; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xxsldwi vs2, v2, v2, 1
 ; CHECK-P9-NEXT:    xscvspdpn f2, vs2
 ; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
+; CHECK-P9-NEXT:    xscvspdpn f1, v2
+; CHECK-P9-NEXT:    xxmrghd vs1, vs1, vs2
 ; CHECK-P9-NEXT:    xvcvdpuxds vs0, vs0
 ; CHECK-P9-NEXT:    xvcvdpuxds vs1, vs1
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
 ; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    xxsldwi vs0, v2, v2, 1
-; CHECK-BE-NEXT:    xxsldwi vs1, v2, v2, 3
+; CHECK-BE-NEXT:    xxsldwi vs1, v2, v2, 1
+; CHECK-BE-NEXT:    xscvspdpn f0, v2
 ; CHECK-BE-NEXT:    xxswapd vs2, v2
-; CHECK-BE-NEXT:    xscvspdpn f3, v2
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs1
+; CHECK-BE-NEXT:    xxsldwi vs1, v2, v2, 3
 ; CHECK-BE-NEXT:    xscvspdpn f1, vs1
 ; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xxmrghd vs0, vs3, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs2, vs1
 ; CHECK-BE-NEXT:    xvcvdpuxds vs0, vs0
+; CHECK-BE-NEXT:    xxmrghd vs1, vs2, vs1
 ; CHECK-BE-NEXT:    xvcvdpuxds vs1, vs1
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = fptoui <4 x float> %a to <4 x i64>
@@ -567,66 +559,66 @@
 ;
 ; CHECK-P9-LABEL: test8elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r4)
-; CHECK-P9-NEXT:    lxv vs1, 0(r4)
-; CHECK-P9-NEXT:    xxsldwi vs2, vs1, vs1, 3
-; CHECK-P9-NEXT:    xxswapd vs3, vs1
-; CHECK-P9-NEXT:    xxsldwi vs4, vs1, vs1, 1
-; CHECK-P9-NEXT:    xxsldwi vs5, vs0, vs0, 3
-; CHECK-P9-NEXT:    xxswapd vs6, vs0
-; CHECK-P9-NEXT:    xxsldwi vs7, vs0, vs0, 1
+; CHECK-P9-NEXT:    lxv vs0, 0(r4)
+; CHECK-P9-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-P9-NEXT:    xxswapd vs2, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f1, vs1
-; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f2, vs2
+; CHECK-P9-NEXT:    xscvspdpn f3, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xxmrghd vs1, vs2, vs1
+; CHECK-P9-NEXT:    lxv vs2, 16(r4)
+; CHECK-P9-NEXT:    xxmrghd vs0, vs3, vs0
+; CHECK-P9-NEXT:    xvcvdpuxds vs1, vs1
+; CHECK-P9-NEXT:    xvcvdpuxds vs0, vs0
+; CHECK-P9-NEXT:    xxsldwi vs3, vs2, vs2, 3
+; CHECK-P9-NEXT:    xxswapd vs4, vs2
 ; CHECK-P9-NEXT:    xscvspdpn f3, vs3
 ; CHECK-P9-NEXT:    xscvspdpn f4, vs4
-; CHECK-P9-NEXT:    xscvspdpn f5, vs5
-; CHECK-P9-NEXT:    xscvspdpn f6, vs6
-; CHECK-P9-NEXT:    xscvspdpn f7, vs7
-; CHECK-P9-NEXT:    xxmrghd vs2, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs1, vs1, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs6, vs5
-; CHECK-P9-NEXT:    xxmrghd vs0, vs0, vs7
-; CHECK-P9-NEXT:    xvcvdpuxds vs2, vs2
-; CHECK-P9-NEXT:    xvcvdpuxds vs1, vs1
+; CHECK-P9-NEXT:    stxv vs0, 16(r3)
+; CHECK-P9-NEXT:    xxmrghd vs3, vs4, vs3
+; CHECK-P9-NEXT:    xscvspdpn f4, vs2
+; CHECK-P9-NEXT:    xxsldwi vs2, vs2, vs2, 1
+; CHECK-P9-NEXT:    xscvspdpn f2, vs2
 ; CHECK-P9-NEXT:    xvcvdpuxds vs3, vs3
-; CHECK-P9-NEXT:    xvcvdpuxds vs0, vs0
-; CHECK-P9-NEXT:    stxv vs0, 48(r3)
+; CHECK-P9-NEXT:    xxmrghd vs2, vs4, vs2
+; CHECK-P9-NEXT:    xvcvdpuxds vs2, vs2
 ; CHECK-P9-NEXT:    stxv vs3, 32(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
-; CHECK-P9-NEXT:    stxv vs2, 0(r3)
+; CHECK-P9-NEXT:    stxv vs2, 48(r3)
+; CHECK-P9-NEXT:    stxv vs1, 0(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 16(r4)
 ; CHECK-BE-NEXT:    lxv vs1, 0(r4)
-; CHECK-BE-NEXT:    xxsldwi vs2, vs1, vs1, 1
-; CHECK-BE-NEXT:    xxsldwi vs3, vs1, vs1, 3
-; CHECK-BE-NEXT:    xxswapd vs4, vs1
-; CHECK-BE-NEXT:    xxsldwi vs5, vs0, vs0, 1
-; CHECK-BE-NEXT:    xxsldwi vs6, vs0, vs0, 3
-; CHECK-BE-NEXT:    xxswapd vs7, vs0
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    xxsldwi vs3, vs1, vs1, 1
+; CHECK-BE-NEXT:    xscvspdpn f2, vs1
 ; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    lxv vs0, 16(r4)
+; CHECK-BE-NEXT:    xxsldwi vs4, vs0, vs0, 1
 ; CHECK-BE-NEXT:    xscvspdpn f4, vs4
-; CHECK-BE-NEXT:    xscvspdpn f5, vs5
-; CHECK-BE-NEXT:    xscvspdpn f6, vs6
-; CHECK-BE-NEXT:    xscvspdpn f7, vs7
-; CHECK-BE-NEXT:    xxmrghd vs1, vs1, vs2
-; CHECK-BE-NEXT:    xxmrghd vs2, vs4, vs3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs5
-; CHECK-BE-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-BE-NEXT:    xvcvdpuxds vs1, vs1
+; CHECK-BE-NEXT:    xxmrghd vs2, vs2, vs3
+; CHECK-BE-NEXT:    xxsldwi vs3, vs1, vs1, 3
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
+; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xxmrghd vs1, vs1, vs3
+; CHECK-BE-NEXT:    xscvspdpn f3, vs0
+; CHECK-BE-NEXT:    xxmrghd vs3, vs3, vs4
+; CHECK-BE-NEXT:    xxsldwi vs4, vs0, vs0, 3
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    xscvspdpn f4, vs4
+; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs4
 ; CHECK-BE-NEXT:    xvcvdpuxds vs2, vs2
-; CHECK-BE-NEXT:    xvcvdpuxds vs0, vs0
+; CHECK-BE-NEXT:    xvcvdpuxds vs1, vs1
 ; CHECK-BE-NEXT:    xvcvdpuxds vs3, vs3
-; CHECK-BE-NEXT:    stxv vs3, 48(r3)
-; CHECK-BE-NEXT:    stxv vs0, 32(r3)
-; CHECK-BE-NEXT:    stxv vs2, 16(r3)
-; CHECK-BE-NEXT:    stxv vs1, 0(r3)
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    xvcvdpuxds vs0, vs0
+; CHECK-BE-NEXT:    stxv vs3, 32(r3)
+; CHECK-BE-NEXT:    stxv vs0, 48(r3)
+; CHECK-BE-NEXT:    stxv vs2, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <8 x float>, <8 x float>* %0, align 32
@@ -713,130 +705,122 @@
 ;
 ; CHECK-P9-LABEL: test16elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r4)
-; CHECK-P9-NEXT:    lxv vs1, 0(r4)
-; CHECK-P9-NEXT:    lxv vs2, 48(r4)
-; CHECK-P9-NEXT:    lxv vs3, 32(r4)
-; CHECK-P9-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    xxsldwi vs4, vs1, vs1, 3
-; CHECK-P9-NEXT:    xxswapd vs5, vs1
-; CHECK-P9-NEXT:    xxsldwi vs6, vs1, vs1, 1
-; CHECK-P9-NEXT:    xxsldwi vs7, vs0, vs0, 3
-; CHECK-P9-NEXT:    xxswapd vs8, vs0
-; CHECK-P9-NEXT:    xxsldwi vs9, vs0, vs0, 1
-; CHECK-P9-NEXT:    xxsldwi vs10, vs3, vs3, 3
-; CHECK-P9-NEXT:    xxswapd vs11, vs3
-; CHECK-P9-NEXT:    xxsldwi vs12, vs3, vs3, 1
-; CHECK-P9-NEXT:    xxsldwi vs13, vs2, vs2, 3
-; CHECK-P9-NEXT:    xxswapd v2, vs2
-; CHECK-P9-NEXT:    xxsldwi v3, vs2, vs2, 1
-; CHECK-P9-NEXT:    xscvspdpn f1, vs1
-; CHECK-P9-NEXT:    xscvspdpn f0, vs0
-; CHECK-P9-NEXT:    xscvspdpn f3, vs3
-; CHECK-P9-NEXT:    xscvspdpn f2, vs2
-; CHECK-P9-NEXT:    xscvspdpn f4, vs4
+; CHECK-P9-NEXT:    lxv vs4, 16(r4)
+; CHECK-P9-NEXT:    xxsldwi vs5, vs4, vs4, 3
+; CHECK-P9-NEXT:    xxswapd vs6, vs4
+; CHECK-P9-NEXT:    lxv vs0, 0(r4)
+; CHECK-P9-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-P9-NEXT:    xxswapd vs2, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f5, vs5
 ; CHECK-P9-NEXT:    xscvspdpn f6, vs6
+; CHECK-P9-NEXT:    xxmrghd vs5, vs6, vs5
+; CHECK-P9-NEXT:    xscvspdpn f6, vs4
+; CHECK-P9-NEXT:    xxsldwi vs4, vs4, vs4, 1
+; CHECK-P9-NEXT:    lxv vs3, 32(r4)
+; CHECK-P9-NEXT:    xscvspdpn f2, vs2
+; CHECK-P9-NEXT:    xxswapd vs7, vs3
 ; CHECK-P9-NEXT:    xscvspdpn f7, vs7
-; CHECK-P9-NEXT:    xscvspdpn f8, vs8
-; CHECK-P9-NEXT:    xscvspdpn f9, vs9
-; CHECK-P9-NEXT:    xscvspdpn f10, vs10
-; CHECK-P9-NEXT:    xscvspdpn f11, vs11
-; CHECK-P9-NEXT:    xscvspdpn f12, vs12
-; CHECK-P9-NEXT:    xscvspdpn f13, vs13
-; CHECK-P9-NEXT:    xscvspdpn f31, v2
-; CHECK-P9-NEXT:    xscvspdpn f30, v3
-; CHECK-P9-NEXT:    xxmrghd vs4, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs1, vs1, vs6
-; CHECK-P9-NEXT:    xxmrghd vs5, vs8, vs7
-; CHECK-P9-NEXT:    xxmrghd vs0, vs0, vs9
-; CHECK-P9-NEXT:    xxmrghd vs6, vs11, vs10
-; CHECK-P9-NEXT:    xxmrghd vs3, vs3, vs12
-; CHECK-P9-NEXT:    xxmrghd vs7, vs31, vs13
-; CHECK-P9-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xxmrghd vs2, vs2, vs30
-; CHECK-P9-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xvcvdpuxds vs4, vs4
+; CHECK-P9-NEXT:    xscvspdpn f4, vs4
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xxmrghd vs1, vs2, vs1
+; CHECK-P9-NEXT:    xscvspdpn f2, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xxmrghd vs0, vs2, vs0
+; CHECK-P9-NEXT:    xxmrghd vs4, vs6, vs4
+; CHECK-P9-NEXT:    xxsldwi vs6, vs3, vs3, 3
 ; CHECK-P9-NEXT:    xvcvdpuxds vs1, vs1
 ; CHECK-P9-NEXT:    xvcvdpuxds vs5, vs5
+; CHECK-P9-NEXT:    xscvspdpn f6, vs6
+; CHECK-P9-NEXT:    xxmrghd vs6, vs7, vs6
+; CHECK-P9-NEXT:    xscvspdpn f7, vs3
+; CHECK-P9-NEXT:    xxsldwi vs3, vs3, vs3, 1
+; CHECK-P9-NEXT:    lxv vs2, 48(r4)
+; CHECK-P9-NEXT:    xxswapd vs8, vs2
+; CHECK-P9-NEXT:    xscvspdpn f8, vs8
 ; CHECK-P9-NEXT:    xvcvdpuxds vs0, vs0
+; CHECK-P9-NEXT:    stxv vs5, 32(r3)
 ; CHECK-P9-NEXT:    xvcvdpuxds vs6, vs6
+; CHECK-P9-NEXT:    xscvspdpn f3, vs3
+; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs3
+; CHECK-P9-NEXT:    xxsldwi vs7, vs2, vs2, 3
+; CHECK-P9-NEXT:    xscvspdpn f7, vs7
+; CHECK-P9-NEXT:    xxmrghd vs7, vs8, vs7
+; CHECK-P9-NEXT:    xscvspdpn f8, vs2
+; CHECK-P9-NEXT:    xxsldwi vs2, vs2, vs2, 1
+; CHECK-P9-NEXT:    stxv vs6, 64(r3)
+; CHECK-P9-NEXT:    xvcvdpuxds vs4, vs4
 ; CHECK-P9-NEXT:    xvcvdpuxds vs3, vs3
 ; CHECK-P9-NEXT:    xvcvdpuxds vs7, vs7
+; CHECK-P9-NEXT:    xscvspdpn f2, vs2
+; CHECK-P9-NEXT:    stxv vs3, 80(r3)
+; CHECK-P9-NEXT:    xxmrghd vs2, vs8, vs2
 ; CHECK-P9-NEXT:    xvcvdpuxds vs2, vs2
-; CHECK-P9-NEXT:    stxv vs0, 48(r3)
-; CHECK-P9-NEXT:    stxv vs5, 32(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
-; CHECK-P9-NEXT:    stxv vs4, 0(r3)
 ; CHECK-P9-NEXT:    stxv vs2, 112(r3)
 ; CHECK-P9-NEXT:    stxv vs7, 96(r3)
-; CHECK-P9-NEXT:    stxv vs3, 80(r3)
-; CHECK-P9-NEXT:    stxv vs6, 64(r3)
+; CHECK-P9-NEXT:    stxv vs4, 48(r3)
+; CHECK-P9-NEXT:    stxv vs0, 16(r3)
+; CHECK-P9-NEXT:    stxv vs1, 0(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 16(r4)
-; CHECK-BE-NEXT:    lxv vs1, 0(r4)
-; CHECK-BE-NEXT:    lxv vs2, 48(r4)
-; CHECK-BE-NEXT:    lxv vs3, 32(r4)
-; CHECK-BE-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    xxsldwi vs4, vs1, vs1, 1
-; CHECK-BE-NEXT:    xxsldwi vs5, vs1, vs1, 3
-; CHECK-BE-NEXT:    xxswapd vs6, vs1
-; CHECK-BE-NEXT:    xxsldwi vs7, vs0, vs0, 1
-; CHECK-BE-NEXT:    xxsldwi vs8, vs0, vs0, 3
-; CHECK-BE-NEXT:    xxswapd vs9, vs0
-; CHECK-BE-NEXT:    xxsldwi vs10, vs3, vs3, 1
-; CHECK-BE-NEXT:    xxsldwi vs11, vs3, vs3, 3
-; CHECK-BE-NEXT:    xxswapd vs12, vs3
-; CHECK-BE-NEXT:    xxsldwi vs13, vs2, vs2, 1
-; CHECK-BE-NEXT:    xxsldwi v2, vs2, vs2, 3
-; CHECK-BE-NEXT:    xxswapd v3, vs2
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f3, vs3
-; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xscvspdpn f4, vs4
+; CHECK-BE-NEXT:    lxv vs0, 0(r4)
+; CHECK-BE-NEXT:    lxv vs4, 16(r4)
+; CHECK-BE-NEXT:    xxsldwi vs2, vs0, vs0, 1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs5, vs0, vs0, 3
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
 ; CHECK-BE-NEXT:    xscvspdpn f5, vs5
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    xxsldwi vs6, vs4, vs4, 1
 ; CHECK-BE-NEXT:    xscvspdpn f6, vs6
+; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs5
+; CHECK-BE-NEXT:    xscvspdpn f5, vs4
+; CHECK-BE-NEXT:    lxv vs3, 32(r4)
+; CHECK-BE-NEXT:    xxsldwi vs7, vs3, vs3, 1
 ; CHECK-BE-NEXT:    xscvspdpn f7, vs7
-; CHECK-BE-NEXT:    xscvspdpn f8, vs8
-; CHECK-BE-NEXT:    xscvspdpn f9, vs9
-; CHECK-BE-NEXT:    xscvspdpn f10, vs10
-; CHECK-BE-NEXT:    xscvspdpn f11, vs11
-; CHECK-BE-NEXT:    xscvspdpn f12, vs12
-; CHECK-BE-NEXT:    xscvspdpn f13, vs13
-; CHECK-BE-NEXT:    xscvspdpn f31, v2
-; CHECK-BE-NEXT:    xscvspdpn f30, v3
-; CHECK-BE-NEXT:    xxmrghd vs1, vs1, vs4
-; CHECK-BE-NEXT:    xxmrghd vs4, vs6, vs5
-; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs7
-; CHECK-BE-NEXT:    xxmrghd vs5, vs9, vs8
-; CHECK-BE-NEXT:    xxmrghd vs3, vs3, vs10
-; CHECK-BE-NEXT:    xxmrghd vs6, vs12, vs11
-; CHECK-BE-NEXT:    xxmrghd vs2, vs2, vs13
-; CHECK-BE-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-BE-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
+; CHECK-BE-NEXT:    xxmrghd vs5, vs5, vs6
+; CHECK-BE-NEXT:    xxsldwi vs6, vs4, vs4, 3
+; CHECK-BE-NEXT:    xxswapd vs4, vs4
+; CHECK-BE-NEXT:    xscvspdpn f6, vs6
+; CHECK-BE-NEXT:    xscvspdpn f4, vs4
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    xxmrghd vs1, vs1, vs2
+; CHECK-BE-NEXT:    lxv vs2, 48(r4)
+; CHECK-BE-NEXT:    xxsldwi vs8, vs2, vs2, 1
 ; CHECK-BE-NEXT:    xvcvdpuxds vs1, vs1
-; CHECK-BE-NEXT:    xvcvdpuxds vs4, vs4
 ; CHECK-BE-NEXT:    xvcvdpuxds vs0, vs0
 ; CHECK-BE-NEXT:    xvcvdpuxds vs5, vs5
-; CHECK-BE-NEXT:    xvcvdpuxds vs3, vs3
+; CHECK-BE-NEXT:    xscvspdpn f8, vs8
+; CHECK-BE-NEXT:    xxmrghd vs4, vs4, vs6
+; CHECK-BE-NEXT:    xscvspdpn f6, vs3
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
+; CHECK-BE-NEXT:    xxmrghd vs6, vs6, vs7
+; CHECK-BE-NEXT:    xxsldwi vs7, vs3, vs3, 3
+; CHECK-BE-NEXT:    xxswapd vs3, vs3
+; CHECK-BE-NEXT:    xscvspdpn f7, vs7
+; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    xxmrghd vs3, vs3, vs7
+; CHECK-BE-NEXT:    xscvspdpn f7, vs2
+; CHECK-BE-NEXT:    xxmrghd vs7, vs7, vs8
+; CHECK-BE-NEXT:    xxsldwi vs8, vs2, vs2, 3
+; CHECK-BE-NEXT:    xxswapd vs2, vs2
+; CHECK-BE-NEXT:    xscvspdpn f8, vs8
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    xxmrghd vs2, vs2, vs8
+; CHECK-BE-NEXT:    stxv vs5, 32(r3)
+; CHECK-BE-NEXT:    xvcvdpuxds vs4, vs4
 ; CHECK-BE-NEXT:    xvcvdpuxds vs6, vs6
-; CHECK-BE-NEXT:    xvcvdpuxds vs2, vs2
+; CHECK-BE-NEXT:    xvcvdpuxds vs3, vs3
 ; CHECK-BE-NEXT:    xvcvdpuxds vs7, vs7
-; CHECK-BE-NEXT:    stxv vs5, 48(r3)
-; CHECK-BE-NEXT:    stxv vs0, 32(r3)
-; CHECK-BE-NEXT:    stxv vs4, 16(r3)
+; CHECK-BE-NEXT:    stxv vs3, 80(r3)
+; CHECK-BE-NEXT:    stxv vs7, 96(r3)
+; CHECK-BE-NEXT:    xvcvdpuxds vs2, vs2
+; CHECK-BE-NEXT:    stxv vs2, 112(r3)
+; CHECK-BE-NEXT:    stxv vs6, 64(r3)
+; CHECK-BE-NEXT:    stxv vs4, 48(r3)
 ; CHECK-BE-NEXT:    stxv vs1, 0(r3)
-; CHECK-BE-NEXT:    stxv vs7, 112(r3)
-; CHECK-BE-NEXT:    stxv vs2, 96(r3)
-; CHECK-BE-NEXT:    stxv vs6, 80(r3)
-; CHECK-BE-NEXT:    stxv vs3, 64(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x float>, <16 x float>* %0, align 64
diff --git a/test/CodeGen/PowerPC/vec_conv_fp32_to_i8_elts.ll b/test/CodeGen/PowerPC/vec_conv_fp32_to_i8_elts.ll
index 708e844..de38a82 100644
--- a/test/CodeGen/PowerPC/vec_conv_fp32_to_i8_elts.ll
+++ b/test/CodeGen/PowerPC/vec_conv_fp32_to_i8_elts.ll
@@ -39,18 +39,18 @@
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xxsldwi vs1, v2, v2, 3
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
 ; CHECK-P9-NEXT:    xscvspdpn f1, vs1
 ; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
 ; CHECK-P9-NEXT:    mfvsrwz r3, f1
-; CHECK-P9-NEXT:    mtvsrd f1, r4
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    addi r3, r1, -2
-; CHECK-P9-NEXT:    xxswapd v2, vs0
+; CHECK-P9-NEXT:    xxswapd v2, vs1
+; CHECK-P9-NEXT:    xxswapd v3, vs0
 ; CHECK-P9-NEXT:    vmrglb v2, v3, v2
 ; CHECK-P9-NEXT:    vsldoi v2, v2, v2, 8
+; CHECK-P9-NEXT:    addi r3, r1, -2
 ; CHECK-P9-NEXT:    stxsihx v2, 0, r3
 ; CHECK-P9-NEXT:    lhz r3, -2(r1)
 ; CHECK-P9-NEXT:    blr
@@ -58,18 +58,18 @@
 ; CHECK-BE-LABEL: test2elt:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrd f0, r3
-; CHECK-BE-NEXT:    xxsldwi vs1, vs0, vs0, 1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
 ; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v2, r3
 ; CHECK-BE-NEXT:    mfvsrwz r3, f0
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    mfvsrwz r4, f1
-; CHECK-BE-NEXT:    mtvsrd v2, r3
+; CHECK-BE-NEXT:    mtvsrd v3, r3
 ; CHECK-BE-NEXT:    addi r3, r1, -2
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    mtvsrd v3, r4
 ; CHECK-BE-NEXT:    vmrghb v2, v2, v3
 ; CHECK-BE-NEXT:    vsldoi v2, v2, v2, 10
 ; CHECK-BE-NEXT:    stxsihx v2, 0, r3
@@ -118,64 +118,64 @@
 ; CHECK-P9-LABEL: test4elt:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    xxsldwi vs0, v2, v2, 3
-; CHECK-P9-NEXT:    xxswapd vs1, v2
-; CHECK-P9-NEXT:    xxsldwi vs2, v2, v2, 1
-; CHECK-P9-NEXT:    xscvspdpn f3, v2
 ; CHECK-P9-NEXT:    xscvspdpn f0, vs0
-; CHECK-P9-NEXT:    xscvspdpn f1, vs1
-; CHECK-P9-NEXT:    xscvspdpn f2, vs2
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    mfvsrwz r5, f3
 ; CHECK-P9-NEXT:    mfvsrwz r3, f0
-; CHECK-P9-NEXT:    mfvsrwz r4, f1
-; CHECK-P9-NEXT:    mfvsrwz r6, f2
-; CHECK-P9-NEXT:    mtvsrd f2, r5
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    xxswapd v4, vs2
+; CHECK-P9-NEXT:    xxswapd v3, vs0
+; CHECK-P9-NEXT:    xxswapd vs0, v2
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xscvspdpn f0, v2
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglb v3, v4, v3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, v2, v2, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
 ; CHECK-P9-NEXT:    li r3, 0
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
-; CHECK-P9-NEXT:    vmrglb v2, v3, v2
-; CHECK-P9-NEXT:    vmrglb v3, v4, v5
-; CHECK-P9-NEXT:    vmrglh v2, v3, v2
+; CHECK-P9-NEXT:    vmrglb v2, v4, v2
+; CHECK-P9-NEXT:    vmrglh v2, v2, v3
 ; CHECK-P9-NEXT:    vextuwrx r3, r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    xxsldwi vs0, v2, v2, 3
-; CHECK-BE-NEXT:    xxswapd vs1, v2
-; CHECK-BE-NEXT:    xxsldwi vs2, v2, v2, 1
-; CHECK-BE-NEXT:    xscvspdpn f3, v2
 ; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
 ; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    mfvsrwz r5, f3
-; CHECK-BE-NEXT:    sldi r5, r5, 56
 ; CHECK-BE-NEXT:    mfvsrwz r3, f0
-; CHECK-BE-NEXT:    mfvsrwz r4, f1
-; CHECK-BE-NEXT:    mfvsrwz r6, f2
-; CHECK-BE-NEXT:    mtvsrd v4, r5
+; CHECK-BE-NEXT:    xxswapd vs0, v2
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    sldi r6, r6, 56
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    xscvspdpn f0, v2
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghb v3, v4, v3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    xxsldwi vs0, v2, v2, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
 ; CHECK-BE-NEXT:    li r3, 0
-; CHECK-BE-NEXT:    vmrghb v2, v3, v2
-; CHECK-BE-NEXT:    vmrghb v3, v4, v5
-; CHECK-BE-NEXT:    vmrghh v2, v3, v2
+; CHECK-BE-NEXT:    vmrghb v2, v4, v2
+; CHECK-BE-NEXT:    vmrghh v2, v2, v3
 ; CHECK-BE-NEXT:    vextuwlx r3, r3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -249,120 +249,120 @@
 ;
 ; CHECK-P9-LABEL: test8elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r3)
 ; CHECK-P9-NEXT:    lxv vs1, 0(r3)
 ; CHECK-P9-NEXT:    xxsldwi vs2, vs1, vs1, 3
-; CHECK-P9-NEXT:    xxswapd vs3, vs1
-; CHECK-P9-NEXT:    xxsldwi vs4, vs1, vs1, 1
-; CHECK-P9-NEXT:    xxsldwi vs5, vs0, vs0, 3
-; CHECK-P9-NEXT:    xxswapd vs6, vs0
-; CHECK-P9-NEXT:    xxsldwi vs7, vs0, vs0, 1
-; CHECK-P9-NEXT:    xscvspdpn f1, vs1
-; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f2, vs2
-; CHECK-P9-NEXT:    xscvspdpn f3, vs3
-; CHECK-P9-NEXT:    xscvspdpn f4, vs4
-; CHECK-P9-NEXT:    xscvspdpn f5, vs5
-; CHECK-P9-NEXT:    xscvspdpn f6, vs6
-; CHECK-P9-NEXT:    xscvspdpn f7, vs7
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    lxv vs0, 16(r3)
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    xxswapd v2, vs2
+; CHECK-P9-NEXT:    xxswapd vs2, vs1
+; CHECK-P9-NEXT:    xscvspdpn f2, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    xscvspdpn f2, vs1
+; CHECK-P9-NEXT:    xxsldwi vs1, vs1, vs1, 1
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    vmrglb v2, v3, v2
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
+; CHECK-P9-NEXT:    vmrglh v2, v3, v2
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs1
+; CHECK-P9-NEXT:    xxswapd vs1, vs0
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xscvspdpn f1, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xscvdpsxws f1, f1
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
-; CHECK-P9-NEXT:    mfvsrwz r5, f1
-; CHECK-P9-NEXT:    mfvsrwz r9, f0
-; CHECK-P9-NEXT:    mfvsrwz r3, f2
-; CHECK-P9-NEXT:    mfvsrwz r4, f3
-; CHECK-P9-NEXT:    mfvsrwz r6, f4
-; CHECK-P9-NEXT:    mfvsrwz r7, f5
-; CHECK-P9-NEXT:    mfvsrwz r8, f6
-; CHECK-P9-NEXT:    mfvsrwz r10, f7
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    mtvsrd f6, r9
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    mtvsrd f4, r7
-; CHECK-P9-NEXT:    mtvsrd f5, r8
-; CHECK-P9-NEXT:    mtvsrd f7, r10
-; CHECK-P9-NEXT:    xxswapd v4, vs2
-; CHECK-P9-NEXT:    xxswapd v6, vs6
-; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
-; CHECK-P9-NEXT:    xxswapd v0, vs4
-; CHECK-P9-NEXT:    xxswapd v1, vs5
-; CHECK-P9-NEXT:    xxswapd v7, vs7
-; CHECK-P9-NEXT:    vmrglb v2, v3, v2
-; CHECK-P9-NEXT:    vmrglb v3, v4, v5
-; CHECK-P9-NEXT:    vmrglb v4, v1, v0
-; CHECK-P9-NEXT:    vmrglb v5, v6, v7
-; CHECK-P9-NEXT:    vmrglh v2, v3, v2
-; CHECK-P9-NEXT:    vmrglh v3, v5, v4
+; CHECK-P9-NEXT:    vmrglb v3, v4, v3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    vmrglb v4, v4, v5
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
 ; CHECK-P9-NEXT:    vmrglw v2, v3, v2
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
 ; CHECK-BE-NEXT:    xxsldwi vs2, vs1, vs1, 3
-; CHECK-BE-NEXT:    xxswapd vs3, vs1
-; CHECK-BE-NEXT:    xxsldwi vs4, vs1, vs1, 1
-; CHECK-BE-NEXT:    xxsldwi vs5, vs0, vs0, 3
-; CHECK-BE-NEXT:    xxswapd vs6, vs0
-; CHECK-BE-NEXT:    xxsldwi vs7, vs0, vs0, 1
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
 ; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xscvspdpn f3, vs3
-; CHECK-BE-NEXT:    xscvspdpn f4, vs4
-; CHECK-BE-NEXT:    xscvspdpn f5, vs5
-; CHECK-BE-NEXT:    xscvspdpn f6, vs6
-; CHECK-BE-NEXT:    xscvspdpn f7, vs7
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
 ; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    mfvsrwz r5, f1
-; CHECK-BE-NEXT:    mfvsrwz r9, f0
-; CHECK-BE-NEXT:    sldi r5, r5, 56
-; CHECK-BE-NEXT:    sldi r9, r9, 56
+; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    mfvsrwz r3, f2
-; CHECK-BE-NEXT:    mfvsrwz r4, f3
-; CHECK-BE-NEXT:    mfvsrwz r6, f4
-; CHECK-BE-NEXT:    mfvsrwz r7, f5
-; CHECK-BE-NEXT:    mfvsrwz r8, f6
-; CHECK-BE-NEXT:    mfvsrwz r10, f7
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    mtvsrd v6, r9
+; CHECK-BE-NEXT:    xxswapd vs2, vs1
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    sldi r6, r6, 56
-; CHECK-BE-NEXT:    sldi r7, r7, 56
-; CHECK-BE-NEXT:    sldi r8, r8, 56
-; CHECK-BE-NEXT:    sldi r10, r10, 56
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
-; CHECK-BE-NEXT:    mtvsrd v0, r7
-; CHECK-BE-NEXT:    mtvsrd v1, r8
-; CHECK-BE-NEXT:    mtvsrd v7, r10
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xscvspdpn f2, vs1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs1, vs1, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
 ; CHECK-BE-NEXT:    vmrghb v2, v3, v2
-; CHECK-BE-NEXT:    vmrghb v3, v4, v5
-; CHECK-BE-NEXT:    vmrghb v4, v1, v0
-; CHECK-BE-NEXT:    vmrghb v5, v6, v7
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs0
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
+; CHECK-BE-NEXT:    sldi r3, r3, 56
 ; CHECK-BE-NEXT:    vmrghh v2, v3, v2
-; CHECK-BE-NEXT:    vmrghh v3, v5, v4
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    vmrghb v3, v4, v3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    vmrghb v4, v4, v5
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
 ; CHECK-BE-NEXT:    mfvsrd r3, v2
 ; CHECK-BE-NEXT:    blr
@@ -494,251 +494,231 @@
 ;
 ; CHECK-P9-LABEL: test16elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs2, 16(r3)
-; CHECK-P9-NEXT:    lxv vs3, 0(r3)
-; CHECK-P9-NEXT:    lxv vs0, 48(r3)
-; CHECK-P9-NEXT:    lxv vs1, 32(r3)
-; CHECK-P9-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    xxsldwi vs4, vs3, vs3, 3
-; CHECK-P9-NEXT:    xxswapd vs5, vs3
-; CHECK-P9-NEXT:    xxsldwi vs6, vs3, vs3, 1
-; CHECK-P9-NEXT:    xxsldwi vs7, vs2, vs2, 3
-; CHECK-P9-NEXT:    xxswapd vs8, vs2
-; CHECK-P9-NEXT:    xxsldwi vs9, vs2, vs2, 1
-; CHECK-P9-NEXT:    xxsldwi vs10, vs1, vs1, 3
-; CHECK-P9-NEXT:    xxswapd vs11, vs1
-; CHECK-P9-NEXT:    xxsldwi vs12, vs1, vs1, 1
-; CHECK-P9-NEXT:    xxsldwi vs13, vs0, vs0, 3
-; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxsldwi v3, vs0, vs0, 1
-; CHECK-P9-NEXT:    xscvspdpn f3, vs3
-; CHECK-P9-NEXT:    xscvspdpn f2, vs2
+; CHECK-P9-NEXT:    lxv vs0, 0(r3)
+; CHECK-P9-NEXT:    xxsldwi vs1, vs0, vs0, 3
 ; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r4, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r4
+; CHECK-P9-NEXT:    xxswapd v2, vs1
+; CHECK-P9-NEXT:    xxswapd vs1, vs0
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    lxv vs2, 48(r3)
+; CHECK-P9-NEXT:    lxv vs3, 32(r3)
+; CHECK-P9-NEXT:    lxv vs4, 16(r3)
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs1
+; CHECK-P9-NEXT:    xscvspdpn f1, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs0, vs0, 1
 ; CHECK-P9-NEXT:    xscvspdpn f0, vs0
-; CHECK-P9-NEXT:    xscvspdpn f4, vs4
-; CHECK-P9-NEXT:    xscvspdpn f5, vs5
-; CHECK-P9-NEXT:    xscvspdpn f6, vs6
-; CHECK-P9-NEXT:    xscvspdpn f7, vs7
-; CHECK-P9-NEXT:    xscvspdpn f8, vs8
-; CHECK-P9-NEXT:    xscvspdpn f9, vs9
-; CHECK-P9-NEXT:    xscvspdpn f10, vs10
-; CHECK-P9-NEXT:    xscvspdpn f11, vs11
-; CHECK-P9-NEXT:    xscvspdpn f12, vs12
-; CHECK-P9-NEXT:    xscvspdpn f13, vs13
-; CHECK-P9-NEXT:    xscvspdpn v2, v2
-; CHECK-P9-NEXT:    xscvspdpn v3, v3
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
 ; CHECK-P9-NEXT:    xscvdpsxws f1, f1
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
-; CHECK-P9-NEXT:    xscvdpsxws f8, f8
-; CHECK-P9-NEXT:    xscvdpsxws f9, f9
-; CHECK-P9-NEXT:    xscvdpsxws f10, f10
-; CHECK-P9-NEXT:    xscvdpsxws f11, f11
-; CHECK-P9-NEXT:    xscvdpsxws f12, f12
-; CHECK-P9-NEXT:    xscvdpsxws f13, f13
-; CHECK-P9-NEXT:    xscvdpsxws v2, v2
-; CHECK-P9-NEXT:    xscvdpsxws v3, v3
-; CHECK-P9-NEXT:    mfvsrwz r3, f3
-; CHECK-P9-NEXT:    mfvsrwz r4, f2
-; CHECK-P9-NEXT:    mfvsrwz r11, f1
-; CHECK-P9-NEXT:    mfvsrwz r12, f0
-; CHECK-P9-NEXT:    mfvsrwz r5, f4
-; CHECK-P9-NEXT:    mfvsrwz r6, f5
-; CHECK-P9-NEXT:    mfvsrwz r7, f6
-; CHECK-P9-NEXT:    mfvsrwz r8, f7
-; CHECK-P9-NEXT:    mfvsrwz r9, f8
-; CHECK-P9-NEXT:    mfvsrwz r10, f9
-; CHECK-P9-NEXT:    mfvsrwz r0, f10
-; CHECK-P9-NEXT:    mfvsrwz r30, f11
-; CHECK-P9-NEXT:    mfvsrwz r29, f12
-; CHECK-P9-NEXT:    mfvsrwz r28, f13
-; CHECK-P9-NEXT:    mfvsrwz r27, v2
-; CHECK-P9-NEXT:    mfvsrwz r26, v3
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f8, r11
-; CHECK-P9-NEXT:    mtvsrd f9, r12
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    mtvsrd f4, r7
-; CHECK-P9-NEXT:    mtvsrd f5, r8
-; CHECK-P9-NEXT:    mtvsrd f6, r9
-; CHECK-P9-NEXT:    mtvsrd f7, r10
-; CHECK-P9-NEXT:    mtvsrd f10, r0
-; CHECK-P9-NEXT:    mtvsrd f11, r30
-; CHECK-P9-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd f12, r29
-; CHECK-P9-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd f13, r28
-; CHECK-P9-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd v2, r27
-; CHECK-P9-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd v3, r26
-; CHECK-P9-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
 ; CHECK-P9-NEXT:    xxswapd v4, vs0
-; CHECK-P9-NEXT:    xxswapd v5, vs2
-; CHECK-P9-NEXT:    xxswapd v0, vs3
-; CHECK-P9-NEXT:    xxswapd v1, vs4
-; CHECK-P9-NEXT:    xxswapd v6, vs5
-; CHECK-P9-NEXT:    xxswapd v7, vs6
-; CHECK-P9-NEXT:    xxswapd v8, vs1
-; CHECK-P9-NEXT:    xxswapd v9, vs7
-; CHECK-P9-NEXT:    xxswapd v10, vs10
-; CHECK-P9-NEXT:    xxswapd v11, vs11
-; CHECK-P9-NEXT:    xxswapd v12, vs8
-; CHECK-P9-NEXT:    xxswapd v13, vs12
-; CHECK-P9-NEXT:    xxswapd v14, vs13
-; CHECK-P9-NEXT:    xxswapd v2, v2
-; CHECK-P9-NEXT:    xxswapd v15, vs9
-; CHECK-P9-NEXT:    xxswapd v3, v3
-; CHECK-P9-NEXT:    vmrglb v5, v0, v5
-; CHECK-P9-NEXT:    vmrglb v4, v4, v1
-; CHECK-P9-NEXT:    vmrglb v0, v7, v6
-; CHECK-P9-NEXT:    vmrglb v1, v8, v9
-; CHECK-P9-NEXT:    vmrglb v6, v11, v10
-; CHECK-P9-NEXT:    vmrglb v7, v12, v13
-; CHECK-P9-NEXT:    vmrglb v2, v2, v14
-; CHECK-P9-NEXT:    vmrglb v3, v15, v3
-; CHECK-P9-NEXT:    vmrglh v4, v4, v5
-; CHECK-P9-NEXT:    vmrglh v5, v1, v0
-; CHECK-P9-NEXT:    vmrglh v0, v7, v6
+; CHECK-P9-NEXT:    xxsldwi vs0, vs4, vs4, 3
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    vmrglb v2, v3, v2
+; CHECK-P9-NEXT:    xxswapd v3, vs1
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
 ; CHECK-P9-NEXT:    vmrglh v2, v3, v2
-; CHECK-P9-NEXT:    vmrglw v3, v5, v4
-; CHECK-P9-NEXT:    vmrglw v2, v2, v0
-; CHECK-P9-NEXT:    xxmrgld v2, v2, v3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs0
+; CHECK-P9-NEXT:    xxswapd vs0, vs4
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xscvspdpn f0, vs4
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglb v3, v4, v3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs4, vs4, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs3, vs3, 3
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    vmrglb v4, v4, v5
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
+; CHECK-P9-NEXT:    vmrglw v2, v3, v2
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs0
+; CHECK-P9-NEXT:    xxswapd vs0, vs3
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xscvspdpn f0, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglb v3, v4, v3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs3, vs3, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs2, vs2, 3
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    vmrglb v4, v4, v5
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xxswapd vs0, vs2
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    xscvspdpn f0, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglb v4, v5, v4
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs2, vs2, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v0, vs0
+; CHECK-P9-NEXT:    vmrglb v5, v5, v0
+; CHECK-P9-NEXT:    vmrglh v4, v5, v4
+; CHECK-P9-NEXT:    vmrglw v3, v4, v3
+; CHECK-P9-NEXT:    xxmrgld v2, v3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs2, 32(r3)
 ; CHECK-BE-NEXT:    lxv vs3, 48(r3)
+; CHECK-BE-NEXT:    xxsldwi vs4, vs3, vs3, 3
+; CHECK-BE-NEXT:    xscvspdpn f4, vs4
+; CHECK-BE-NEXT:    xscvdpsxws f4, f4
 ; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
-; CHECK-BE-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    xxsldwi vs4, vs3, vs3, 3
-; CHECK-BE-NEXT:    xxswapd vs5, vs3
-; CHECK-BE-NEXT:    xxsldwi vs6, vs3, vs3, 1
-; CHECK-BE-NEXT:    xxsldwi vs7, vs2, vs2, 3
-; CHECK-BE-NEXT:    xxswapd vs8, vs2
-; CHECK-BE-NEXT:    xxsldwi vs9, vs2, vs2, 1
-; CHECK-BE-NEXT:    xxsldwi vs10, vs1, vs1, 3
-; CHECK-BE-NEXT:    xxswapd vs11, vs1
-; CHECK-BE-NEXT:    xxsldwi vs12, vs1, vs1, 1
-; CHECK-BE-NEXT:    xxsldwi vs13, vs0, vs0, 3
-; CHECK-BE-NEXT:    xxswapd v2, vs0
-; CHECK-BE-NEXT:    xxsldwi v3, vs0, vs0, 1
-; CHECK-BE-NEXT:    xscvspdpn f3, vs3
-; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f4, vs4
-; CHECK-BE-NEXT:    xscvspdpn f5, vs5
-; CHECK-BE-NEXT:    xscvspdpn f6, vs6
-; CHECK-BE-NEXT:    xscvspdpn f7, vs7
-; CHECK-BE-NEXT:    xscvspdpn f8, vs8
-; CHECK-BE-NEXT:    xscvspdpn f9, vs9
-; CHECK-BE-NEXT:    xscvspdpn f10, vs10
-; CHECK-BE-NEXT:    xscvspdpn f11, vs11
-; CHECK-BE-NEXT:    xscvspdpn f12, vs12
-; CHECK-BE-NEXT:    xscvspdpn f13, vs13
-; CHECK-BE-NEXT:    xscvspdpn v2, v2
-; CHECK-BE-NEXT:    xscvspdpn v3, v3
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    xscvdpsxws f8, f8
-; CHECK-BE-NEXT:    xscvdpsxws f9, f9
-; CHECK-BE-NEXT:    xscvdpsxws f10, f10
-; CHECK-BE-NEXT:    xscvdpsxws f11, f11
-; CHECK-BE-NEXT:    xscvdpsxws f12, f12
-; CHECK-BE-NEXT:    xscvdpsxws f13, f13
-; CHECK-BE-NEXT:    xscvdpsxws v2, v2
-; CHECK-BE-NEXT:    xscvdpsxws v3, v3
-; CHECK-BE-NEXT:    mfvsrwz r3, f3
-; CHECK-BE-NEXT:    mfvsrwz r4, f2
-; CHECK-BE-NEXT:    mfvsrwz r11, f1
-; CHECK-BE-NEXT:    mfvsrwz r12, f0
-; CHECK-BE-NEXT:    mfvsrwz r5, f4
-; CHECK-BE-NEXT:    mfvsrwz r6, f5
-; CHECK-BE-NEXT:    mfvsrwz r7, f6
-; CHECK-BE-NEXT:    mfvsrwz r8, f7
-; CHECK-BE-NEXT:    mfvsrwz r9, f8
-; CHECK-BE-NEXT:    mfvsrwz r10, f9
-; CHECK-BE-NEXT:    mfvsrwz r0, f10
-; CHECK-BE-NEXT:    mfvsrwz r30, f11
-; CHECK-BE-NEXT:    mfvsrwz r29, f12
-; CHECK-BE-NEXT:    mfvsrwz r28, f13
-; CHECK-BE-NEXT:    mfvsrwz r27, v2
-; CHECK-BE-NEXT:    mfvsrwz r26, v3
+; CHECK-BE-NEXT:    lxv vs2, 32(r3)
+; CHECK-BE-NEXT:    mfvsrwz r3, f4
+; CHECK-BE-NEXT:    xxswapd vs4, vs3
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    sldi r11, r11, 56
-; CHECK-BE-NEXT:    sldi r12, r12, 56
-; CHECK-BE-NEXT:    sldi r5, r5, 56
-; CHECK-BE-NEXT:    sldi r6, r6, 56
-; CHECK-BE-NEXT:    sldi r7, r7, 56
-; CHECK-BE-NEXT:    sldi r8, r8, 56
-; CHECK-BE-NEXT:    sldi r9, r9, 56
-; CHECK-BE-NEXT:    sldi r10, r10, 56
-; CHECK-BE-NEXT:    sldi r0, r0, 56
-; CHECK-BE-NEXT:    sldi r30, r30, 56
-; CHECK-BE-NEXT:    sldi r29, r29, 56
-; CHECK-BE-NEXT:    sldi r28, r28, 56
-; CHECK-BE-NEXT:    sldi r27, r27, 56
-; CHECK-BE-NEXT:    sldi r26, r26, 56
+; CHECK-BE-NEXT:    xscvspdpn f4, vs4
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v10, r11
-; CHECK-BE-NEXT:    mtvsrd v14, r12
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    mtvsrd v5, r6
-; CHECK-BE-NEXT:    mtvsrd v0, r7
-; CHECK-BE-NEXT:    mtvsrd v1, r8
-; CHECK-BE-NEXT:    mtvsrd v6, r9
-; CHECK-BE-NEXT:    mtvsrd v7, r10
-; CHECK-BE-NEXT:    mtvsrd v8, r0
-; CHECK-BE-NEXT:    mtvsrd v9, r30
-; CHECK-BE-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v11, r29
-; CHECK-BE-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v12, r28
-; CHECK-BE-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v13, r27
-; CHECK-BE-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v15, r26
-; CHECK-BE-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    vmrghb v4, v5, v4
-; CHECK-BE-NEXT:    vmrghb v2, v2, v0
-; CHECK-BE-NEXT:    vmrghb v5, v6, v1
-; CHECK-BE-NEXT:    vmrghb v3, v3, v7
-; CHECK-BE-NEXT:    vmrghb v0, v9, v8
-; CHECK-BE-NEXT:    vmrghb v1, v10, v11
-; CHECK-BE-NEXT:    vmrghb v6, v13, v12
-; CHECK-BE-NEXT:    vmrghb v7, v14, v15
-; CHECK-BE-NEXT:    vmrghh v2, v2, v4
-; CHECK-BE-NEXT:    vmrghh v3, v3, v5
-; CHECK-BE-NEXT:    vmrghh v4, v1, v0
-; CHECK-BE-NEXT:    vmrghh v5, v7, v6
+; CHECK-BE-NEXT:    xscvdpsxws f4, f4
+; CHECK-BE-NEXT:    mfvsrwz r3, f4
+; CHECK-BE-NEXT:    xscvspdpn f4, vs3
+; CHECK-BE-NEXT:    xxsldwi vs3, vs3, vs3, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f4, f4
+; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    vmrghb v2, v3, v2
+; CHECK-BE-NEXT:    mfvsrwz r3, f4
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    xxsldwi vs3, vs2, vs2, 3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    xxswapd vs3, vs2
+; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    vmrghh v2, v3, v2
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    xscvspdpn f3, vs2
+; CHECK-BE-NEXT:    xxsldwi vs2, vs2, vs2, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    vmrghb v3, v4, v3
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xxsldwi vs2, vs1, vs1, 3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xxswapd vs2, vs1
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    vmrghb v4, v4, v5
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
-; CHECK-BE-NEXT:    vmrghw v3, v5, v4
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xscvspdpn f2, vs1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs1, vs1, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    vmrghb v3, v4, v3
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs0
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    vmrghb v4, v4, v5
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    vmrghb v4, v5, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v0, r3
+; CHECK-BE-NEXT:    vmrghb v5, v5, v0
+; CHECK-BE-NEXT:    vmrghh v4, v5, v4
+; CHECK-BE-NEXT:    vmrghw v3, v4, v3
 ; CHECK-BE-NEXT:    xxmrghd v2, v3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -777,18 +757,18 @@
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xxsldwi vs1, v2, v2, 3
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
 ; CHECK-P9-NEXT:    xscvspdpn f1, vs1
 ; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
 ; CHECK-P9-NEXT:    mfvsrwz r3, f1
-; CHECK-P9-NEXT:    mtvsrd f1, r4
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    addi r3, r1, -2
-; CHECK-P9-NEXT:    xxswapd v2, vs0
+; CHECK-P9-NEXT:    xxswapd v2, vs1
+; CHECK-P9-NEXT:    xxswapd v3, vs0
 ; CHECK-P9-NEXT:    vmrglb v2, v3, v2
 ; CHECK-P9-NEXT:    vsldoi v2, v2, v2, 8
+; CHECK-P9-NEXT:    addi r3, r1, -2
 ; CHECK-P9-NEXT:    stxsihx v2, 0, r3
 ; CHECK-P9-NEXT:    lhz r3, -2(r1)
 ; CHECK-P9-NEXT:    blr
@@ -796,18 +776,18 @@
 ; CHECK-BE-LABEL: test2elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrd f0, r3
-; CHECK-BE-NEXT:    xxsldwi vs1, vs0, vs0, 1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
 ; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v2, r3
 ; CHECK-BE-NEXT:    mfvsrwz r3, f0
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    mfvsrwz r4, f1
-; CHECK-BE-NEXT:    mtvsrd v2, r3
+; CHECK-BE-NEXT:    mtvsrd v3, r3
 ; CHECK-BE-NEXT:    addi r3, r1, -2
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    mtvsrd v3, r4
 ; CHECK-BE-NEXT:    vmrghb v2, v2, v3
 ; CHECK-BE-NEXT:    vsldoi v2, v2, v2, 10
 ; CHECK-BE-NEXT:    stxsihx v2, 0, r3
@@ -856,64 +836,64 @@
 ; CHECK-P9-LABEL: test4elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    xxsldwi vs0, v2, v2, 3
-; CHECK-P9-NEXT:    xxswapd vs1, v2
-; CHECK-P9-NEXT:    xxsldwi vs2, v2, v2, 1
-; CHECK-P9-NEXT:    xscvspdpn f3, v2
 ; CHECK-P9-NEXT:    xscvspdpn f0, vs0
-; CHECK-P9-NEXT:    xscvspdpn f1, vs1
-; CHECK-P9-NEXT:    xscvspdpn f2, vs2
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    mfvsrwz r5, f3
 ; CHECK-P9-NEXT:    mfvsrwz r3, f0
-; CHECK-P9-NEXT:    mfvsrwz r4, f1
-; CHECK-P9-NEXT:    mfvsrwz r6, f2
-; CHECK-P9-NEXT:    mtvsrd f2, r5
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    xxswapd v4, vs2
+; CHECK-P9-NEXT:    xxswapd v3, vs0
+; CHECK-P9-NEXT:    xxswapd vs0, v2
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xscvspdpn f0, v2
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglb v3, v4, v3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, v2, v2, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
 ; CHECK-P9-NEXT:    li r3, 0
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
-; CHECK-P9-NEXT:    vmrglb v2, v3, v2
-; CHECK-P9-NEXT:    vmrglb v3, v4, v5
-; CHECK-P9-NEXT:    vmrglh v2, v3, v2
+; CHECK-P9-NEXT:    vmrglb v2, v4, v2
+; CHECK-P9-NEXT:    vmrglh v2, v2, v3
 ; CHECK-P9-NEXT:    vextuwrx r3, r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    xxsldwi vs0, v2, v2, 3
-; CHECK-BE-NEXT:    xxswapd vs1, v2
-; CHECK-BE-NEXT:    xxsldwi vs2, v2, v2, 1
-; CHECK-BE-NEXT:    xscvspdpn f3, v2
 ; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
 ; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    mfvsrwz r5, f3
-; CHECK-BE-NEXT:    sldi r5, r5, 56
 ; CHECK-BE-NEXT:    mfvsrwz r3, f0
-; CHECK-BE-NEXT:    mfvsrwz r4, f1
-; CHECK-BE-NEXT:    mfvsrwz r6, f2
-; CHECK-BE-NEXT:    mtvsrd v4, r5
+; CHECK-BE-NEXT:    xxswapd vs0, v2
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    sldi r6, r6, 56
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    xscvspdpn f0, v2
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghb v3, v4, v3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    xxsldwi vs0, v2, v2, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
 ; CHECK-BE-NEXT:    li r3, 0
-; CHECK-BE-NEXT:    vmrghb v2, v3, v2
-; CHECK-BE-NEXT:    vmrghb v3, v4, v5
-; CHECK-BE-NEXT:    vmrghh v2, v3, v2
+; CHECK-BE-NEXT:    vmrghb v2, v4, v2
+; CHECK-BE-NEXT:    vmrghh v2, v2, v3
 ; CHECK-BE-NEXT:    vextuwlx r3, r3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -987,120 +967,120 @@
 ;
 ; CHECK-P9-LABEL: test8elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r3)
 ; CHECK-P9-NEXT:    lxv vs1, 0(r3)
 ; CHECK-P9-NEXT:    xxsldwi vs2, vs1, vs1, 3
-; CHECK-P9-NEXT:    xxswapd vs3, vs1
-; CHECK-P9-NEXT:    xxsldwi vs4, vs1, vs1, 1
-; CHECK-P9-NEXT:    xxsldwi vs5, vs0, vs0, 3
-; CHECK-P9-NEXT:    xxswapd vs6, vs0
-; CHECK-P9-NEXT:    xxsldwi vs7, vs0, vs0, 1
-; CHECK-P9-NEXT:    xscvspdpn f1, vs1
-; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xscvspdpn f2, vs2
-; CHECK-P9-NEXT:    xscvspdpn f3, vs3
-; CHECK-P9-NEXT:    xscvspdpn f4, vs4
-; CHECK-P9-NEXT:    xscvspdpn f5, vs5
-; CHECK-P9-NEXT:    xscvspdpn f6, vs6
-; CHECK-P9-NEXT:    xscvspdpn f7, vs7
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    lxv vs0, 16(r3)
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    xxswapd v2, vs2
+; CHECK-P9-NEXT:    xxswapd vs2, vs1
+; CHECK-P9-NEXT:    xscvspdpn f2, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    xscvspdpn f2, vs1
+; CHECK-P9-NEXT:    xxsldwi vs1, vs1, vs1, 1
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    vmrglb v2, v3, v2
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
+; CHECK-P9-NEXT:    vmrglh v2, v3, v2
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs1
+; CHECK-P9-NEXT:    xxswapd vs1, vs0
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xscvspdpn f1, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
 ; CHECK-P9-NEXT:    xscvdpsxws f1, f1
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
-; CHECK-P9-NEXT:    mfvsrwz r5, f1
-; CHECK-P9-NEXT:    mfvsrwz r9, f0
-; CHECK-P9-NEXT:    mfvsrwz r3, f2
-; CHECK-P9-NEXT:    mfvsrwz r4, f3
-; CHECK-P9-NEXT:    mfvsrwz r6, f4
-; CHECK-P9-NEXT:    mfvsrwz r7, f5
-; CHECK-P9-NEXT:    mfvsrwz r8, f6
-; CHECK-P9-NEXT:    mfvsrwz r10, f7
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    mtvsrd f6, r9
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    mtvsrd f4, r7
-; CHECK-P9-NEXT:    mtvsrd f5, r8
-; CHECK-P9-NEXT:    mtvsrd f7, r10
-; CHECK-P9-NEXT:    xxswapd v4, vs2
-; CHECK-P9-NEXT:    xxswapd v6, vs6
-; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
-; CHECK-P9-NEXT:    xxswapd v0, vs4
-; CHECK-P9-NEXT:    xxswapd v1, vs5
-; CHECK-P9-NEXT:    xxswapd v7, vs7
-; CHECK-P9-NEXT:    vmrglb v2, v3, v2
-; CHECK-P9-NEXT:    vmrglb v3, v4, v5
-; CHECK-P9-NEXT:    vmrglb v4, v1, v0
-; CHECK-P9-NEXT:    vmrglb v5, v6, v7
-; CHECK-P9-NEXT:    vmrglh v2, v3, v2
-; CHECK-P9-NEXT:    vmrglh v3, v5, v4
+; CHECK-P9-NEXT:    vmrglb v3, v4, v3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    vmrglb v4, v4, v5
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
 ; CHECK-P9-NEXT:    vmrglw v2, v3, v2
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
 ; CHECK-BE-NEXT:    xxsldwi vs2, vs1, vs1, 3
-; CHECK-BE-NEXT:    xxswapd vs3, vs1
-; CHECK-BE-NEXT:    xxsldwi vs4, vs1, vs1, 1
-; CHECK-BE-NEXT:    xxsldwi vs5, vs0, vs0, 3
-; CHECK-BE-NEXT:    xxswapd vs6, vs0
-; CHECK-BE-NEXT:    xxsldwi vs7, vs0, vs0, 1
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
 ; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xscvspdpn f3, vs3
-; CHECK-BE-NEXT:    xscvspdpn f4, vs4
-; CHECK-BE-NEXT:    xscvspdpn f5, vs5
-; CHECK-BE-NEXT:    xscvspdpn f6, vs6
-; CHECK-BE-NEXT:    xscvspdpn f7, vs7
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
 ; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    mfvsrwz r5, f1
-; CHECK-BE-NEXT:    mfvsrwz r9, f0
-; CHECK-BE-NEXT:    sldi r5, r5, 56
-; CHECK-BE-NEXT:    sldi r9, r9, 56
+; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    mfvsrwz r3, f2
-; CHECK-BE-NEXT:    mfvsrwz r4, f3
-; CHECK-BE-NEXT:    mfvsrwz r6, f4
-; CHECK-BE-NEXT:    mfvsrwz r7, f5
-; CHECK-BE-NEXT:    mfvsrwz r8, f6
-; CHECK-BE-NEXT:    mfvsrwz r10, f7
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    mtvsrd v6, r9
+; CHECK-BE-NEXT:    xxswapd vs2, vs1
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    sldi r6, r6, 56
-; CHECK-BE-NEXT:    sldi r7, r7, 56
-; CHECK-BE-NEXT:    sldi r8, r8, 56
-; CHECK-BE-NEXT:    sldi r10, r10, 56
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
-; CHECK-BE-NEXT:    mtvsrd v0, r7
-; CHECK-BE-NEXT:    mtvsrd v1, r8
-; CHECK-BE-NEXT:    mtvsrd v7, r10
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xscvspdpn f2, vs1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs1, vs1, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
 ; CHECK-BE-NEXT:    vmrghb v2, v3, v2
-; CHECK-BE-NEXT:    vmrghb v3, v4, v5
-; CHECK-BE-NEXT:    vmrghb v4, v1, v0
-; CHECK-BE-NEXT:    vmrghb v5, v6, v7
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs0
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
+; CHECK-BE-NEXT:    sldi r3, r3, 56
 ; CHECK-BE-NEXT:    vmrghh v2, v3, v2
-; CHECK-BE-NEXT:    vmrghh v3, v5, v4
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    vmrghb v3, v4, v3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    vmrghb v4, v4, v5
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
 ; CHECK-BE-NEXT:    mfvsrd r3, v2
 ; CHECK-BE-NEXT:    blr
@@ -1232,251 +1212,231 @@
 ;
 ; CHECK-P9-LABEL: test16elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs2, 16(r3)
-; CHECK-P9-NEXT:    lxv vs3, 0(r3)
-; CHECK-P9-NEXT:    lxv vs0, 48(r3)
-; CHECK-P9-NEXT:    lxv vs1, 32(r3)
-; CHECK-P9-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    xxsldwi vs4, vs3, vs3, 3
-; CHECK-P9-NEXT:    xxswapd vs5, vs3
-; CHECK-P9-NEXT:    xxsldwi vs6, vs3, vs3, 1
-; CHECK-P9-NEXT:    xxsldwi vs7, vs2, vs2, 3
-; CHECK-P9-NEXT:    xxswapd vs8, vs2
-; CHECK-P9-NEXT:    xxsldwi vs9, vs2, vs2, 1
-; CHECK-P9-NEXT:    xxsldwi vs10, vs1, vs1, 3
-; CHECK-P9-NEXT:    xxswapd vs11, vs1
-; CHECK-P9-NEXT:    xxsldwi vs12, vs1, vs1, 1
-; CHECK-P9-NEXT:    xxsldwi vs13, vs0, vs0, 3
-; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxsldwi v3, vs0, vs0, 1
-; CHECK-P9-NEXT:    xscvspdpn f3, vs3
-; CHECK-P9-NEXT:    xscvspdpn f2, vs2
+; CHECK-P9-NEXT:    lxv vs0, 0(r3)
+; CHECK-P9-NEXT:    xxsldwi vs1, vs0, vs0, 3
 ; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r4, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r4
+; CHECK-P9-NEXT:    xxswapd v2, vs1
+; CHECK-P9-NEXT:    xxswapd vs1, vs0
+; CHECK-P9-NEXT:    xscvspdpn f1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    lxv vs2, 48(r3)
+; CHECK-P9-NEXT:    lxv vs3, 32(r3)
+; CHECK-P9-NEXT:    lxv vs4, 16(r3)
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs1
+; CHECK-P9-NEXT:    xscvspdpn f1, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs0, vs0, 1
 ; CHECK-P9-NEXT:    xscvspdpn f0, vs0
-; CHECK-P9-NEXT:    xscvspdpn f4, vs4
-; CHECK-P9-NEXT:    xscvspdpn f5, vs5
-; CHECK-P9-NEXT:    xscvspdpn f6, vs6
-; CHECK-P9-NEXT:    xscvspdpn f7, vs7
-; CHECK-P9-NEXT:    xscvspdpn f8, vs8
-; CHECK-P9-NEXT:    xscvspdpn f9, vs9
-; CHECK-P9-NEXT:    xscvspdpn f10, vs10
-; CHECK-P9-NEXT:    xscvspdpn f11, vs11
-; CHECK-P9-NEXT:    xscvspdpn f12, vs12
-; CHECK-P9-NEXT:    xscvspdpn f13, vs13
-; CHECK-P9-NEXT:    xscvspdpn v2, v2
-; CHECK-P9-NEXT:    xscvspdpn v3, v3
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
 ; CHECK-P9-NEXT:    xscvdpsxws f1, f1
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
-; CHECK-P9-NEXT:    xscvdpsxws f8, f8
-; CHECK-P9-NEXT:    xscvdpsxws f9, f9
-; CHECK-P9-NEXT:    xscvdpsxws f10, f10
-; CHECK-P9-NEXT:    xscvdpsxws f11, f11
-; CHECK-P9-NEXT:    xscvdpsxws f12, f12
-; CHECK-P9-NEXT:    xscvdpsxws f13, f13
-; CHECK-P9-NEXT:    xscvdpsxws v2, v2
-; CHECK-P9-NEXT:    xscvdpsxws v3, v3
-; CHECK-P9-NEXT:    mfvsrwz r3, f3
-; CHECK-P9-NEXT:    mfvsrwz r4, f2
-; CHECK-P9-NEXT:    mfvsrwz r11, f1
-; CHECK-P9-NEXT:    mfvsrwz r12, f0
-; CHECK-P9-NEXT:    mfvsrwz r5, f4
-; CHECK-P9-NEXT:    mfvsrwz r6, f5
-; CHECK-P9-NEXT:    mfvsrwz r7, f6
-; CHECK-P9-NEXT:    mfvsrwz r8, f7
-; CHECK-P9-NEXT:    mfvsrwz r9, f8
-; CHECK-P9-NEXT:    mfvsrwz r10, f9
-; CHECK-P9-NEXT:    mfvsrwz r0, f10
-; CHECK-P9-NEXT:    mfvsrwz r30, f11
-; CHECK-P9-NEXT:    mfvsrwz r29, f12
-; CHECK-P9-NEXT:    mfvsrwz r28, f13
-; CHECK-P9-NEXT:    mfvsrwz r27, v2
-; CHECK-P9-NEXT:    mfvsrwz r26, v3
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f8, r11
-; CHECK-P9-NEXT:    mtvsrd f9, r12
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    mtvsrd f4, r7
-; CHECK-P9-NEXT:    mtvsrd f5, r8
-; CHECK-P9-NEXT:    mtvsrd f6, r9
-; CHECK-P9-NEXT:    mtvsrd f7, r10
-; CHECK-P9-NEXT:    mtvsrd f10, r0
-; CHECK-P9-NEXT:    mtvsrd f11, r30
-; CHECK-P9-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd f12, r29
-; CHECK-P9-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd f13, r28
-; CHECK-P9-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd v2, r27
-; CHECK-P9-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrd v3, r26
-; CHECK-P9-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
 ; CHECK-P9-NEXT:    xxswapd v4, vs0
-; CHECK-P9-NEXT:    xxswapd v5, vs2
-; CHECK-P9-NEXT:    xxswapd v0, vs3
-; CHECK-P9-NEXT:    xxswapd v1, vs4
-; CHECK-P9-NEXT:    xxswapd v6, vs5
-; CHECK-P9-NEXT:    xxswapd v7, vs6
-; CHECK-P9-NEXT:    xxswapd v8, vs1
-; CHECK-P9-NEXT:    xxswapd v9, vs7
-; CHECK-P9-NEXT:    xxswapd v10, vs10
-; CHECK-P9-NEXT:    xxswapd v11, vs11
-; CHECK-P9-NEXT:    xxswapd v12, vs8
-; CHECK-P9-NEXT:    xxswapd v13, vs12
-; CHECK-P9-NEXT:    xxswapd v14, vs13
-; CHECK-P9-NEXT:    xxswapd v2, v2
-; CHECK-P9-NEXT:    xxswapd v15, vs9
-; CHECK-P9-NEXT:    xxswapd v3, v3
-; CHECK-P9-NEXT:    vmrglb v5, v0, v5
-; CHECK-P9-NEXT:    vmrglb v4, v4, v1
-; CHECK-P9-NEXT:    vmrglb v0, v7, v6
-; CHECK-P9-NEXT:    vmrglb v1, v8, v9
-; CHECK-P9-NEXT:    vmrglb v6, v11, v10
-; CHECK-P9-NEXT:    vmrglb v7, v12, v13
-; CHECK-P9-NEXT:    vmrglb v2, v2, v14
-; CHECK-P9-NEXT:    vmrglb v3, v15, v3
-; CHECK-P9-NEXT:    vmrglh v4, v4, v5
-; CHECK-P9-NEXT:    vmrglh v5, v1, v0
-; CHECK-P9-NEXT:    vmrglh v0, v7, v6
+; CHECK-P9-NEXT:    xxsldwi vs0, vs4, vs4, 3
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    vmrglb v2, v3, v2
+; CHECK-P9-NEXT:    xxswapd v3, vs1
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
 ; CHECK-P9-NEXT:    vmrglh v2, v3, v2
-; CHECK-P9-NEXT:    vmrglw v3, v5, v4
-; CHECK-P9-NEXT:    vmrglw v2, v2, v0
-; CHECK-P9-NEXT:    xxmrgld v2, v2, v3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs0
+; CHECK-P9-NEXT:    xxswapd vs0, vs4
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xscvspdpn f0, vs4
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglb v3, v4, v3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs4, vs4, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs3, vs3, 3
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    vmrglb v4, v4, v5
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
+; CHECK-P9-NEXT:    vmrglw v2, v3, v2
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs0
+; CHECK-P9-NEXT:    xxswapd vs0, vs3
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xscvspdpn f0, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglb v3, v4, v3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs3, vs3, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs2, vs2, 3
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    vmrglb v4, v4, v5
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    xxswapd vs0, vs2
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    xscvspdpn f0, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglb v4, v5, v4
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    xxsldwi vs0, vs2, vs2, 1
+; CHECK-P9-NEXT:    xscvspdpn f0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v0, vs0
+; CHECK-P9-NEXT:    vmrglb v5, v5, v0
+; CHECK-P9-NEXT:    vmrglh v4, v5, v4
+; CHECK-P9-NEXT:    vmrglw v3, v4, v3
+; CHECK-P9-NEXT:    xxmrgld v2, v3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs2, 32(r3)
 ; CHECK-BE-NEXT:    lxv vs3, 48(r3)
+; CHECK-BE-NEXT:    xxsldwi vs4, vs3, vs3, 3
+; CHECK-BE-NEXT:    xscvspdpn f4, vs4
+; CHECK-BE-NEXT:    xscvdpsxws f4, f4
 ; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
-; CHECK-BE-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    xxsldwi vs4, vs3, vs3, 3
-; CHECK-BE-NEXT:    xxswapd vs5, vs3
-; CHECK-BE-NEXT:    xxsldwi vs6, vs3, vs3, 1
-; CHECK-BE-NEXT:    xxsldwi vs7, vs2, vs2, 3
-; CHECK-BE-NEXT:    xxswapd vs8, vs2
-; CHECK-BE-NEXT:    xxsldwi vs9, vs2, vs2, 1
-; CHECK-BE-NEXT:    xxsldwi vs10, vs1, vs1, 3
-; CHECK-BE-NEXT:    xxswapd vs11, vs1
-; CHECK-BE-NEXT:    xxsldwi vs12, vs1, vs1, 1
-; CHECK-BE-NEXT:    xxsldwi vs13, vs0, vs0, 3
-; CHECK-BE-NEXT:    xxswapd v2, vs0
-; CHECK-BE-NEXT:    xxsldwi v3, vs0, vs0, 1
-; CHECK-BE-NEXT:    xscvspdpn f3, vs3
-; CHECK-BE-NEXT:    xscvspdpn f2, vs2
-; CHECK-BE-NEXT:    xscvspdpn f1, vs1
-; CHECK-BE-NEXT:    xscvspdpn f0, vs0
-; CHECK-BE-NEXT:    xscvspdpn f4, vs4
-; CHECK-BE-NEXT:    xscvspdpn f5, vs5
-; CHECK-BE-NEXT:    xscvspdpn f6, vs6
-; CHECK-BE-NEXT:    xscvspdpn f7, vs7
-; CHECK-BE-NEXT:    xscvspdpn f8, vs8
-; CHECK-BE-NEXT:    xscvspdpn f9, vs9
-; CHECK-BE-NEXT:    xscvspdpn f10, vs10
-; CHECK-BE-NEXT:    xscvspdpn f11, vs11
-; CHECK-BE-NEXT:    xscvspdpn f12, vs12
-; CHECK-BE-NEXT:    xscvspdpn f13, vs13
-; CHECK-BE-NEXT:    xscvspdpn v2, v2
-; CHECK-BE-NEXT:    xscvspdpn v3, v3
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    xscvdpsxws f8, f8
-; CHECK-BE-NEXT:    xscvdpsxws f9, f9
-; CHECK-BE-NEXT:    xscvdpsxws f10, f10
-; CHECK-BE-NEXT:    xscvdpsxws f11, f11
-; CHECK-BE-NEXT:    xscvdpsxws f12, f12
-; CHECK-BE-NEXT:    xscvdpsxws f13, f13
-; CHECK-BE-NEXT:    xscvdpsxws v2, v2
-; CHECK-BE-NEXT:    xscvdpsxws v3, v3
-; CHECK-BE-NEXT:    mfvsrwz r3, f3
-; CHECK-BE-NEXT:    mfvsrwz r4, f2
-; CHECK-BE-NEXT:    mfvsrwz r11, f1
-; CHECK-BE-NEXT:    mfvsrwz r12, f0
-; CHECK-BE-NEXT:    mfvsrwz r5, f4
-; CHECK-BE-NEXT:    mfvsrwz r6, f5
-; CHECK-BE-NEXT:    mfvsrwz r7, f6
-; CHECK-BE-NEXT:    mfvsrwz r8, f7
-; CHECK-BE-NEXT:    mfvsrwz r9, f8
-; CHECK-BE-NEXT:    mfvsrwz r10, f9
-; CHECK-BE-NEXT:    mfvsrwz r0, f10
-; CHECK-BE-NEXT:    mfvsrwz r30, f11
-; CHECK-BE-NEXT:    mfvsrwz r29, f12
-; CHECK-BE-NEXT:    mfvsrwz r28, f13
-; CHECK-BE-NEXT:    mfvsrwz r27, v2
-; CHECK-BE-NEXT:    mfvsrwz r26, v3
+; CHECK-BE-NEXT:    lxv vs2, 32(r3)
+; CHECK-BE-NEXT:    mfvsrwz r3, f4
+; CHECK-BE-NEXT:    xxswapd vs4, vs3
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    sldi r11, r11, 56
-; CHECK-BE-NEXT:    sldi r12, r12, 56
-; CHECK-BE-NEXT:    sldi r5, r5, 56
-; CHECK-BE-NEXT:    sldi r6, r6, 56
-; CHECK-BE-NEXT:    sldi r7, r7, 56
-; CHECK-BE-NEXT:    sldi r8, r8, 56
-; CHECK-BE-NEXT:    sldi r9, r9, 56
-; CHECK-BE-NEXT:    sldi r10, r10, 56
-; CHECK-BE-NEXT:    sldi r0, r0, 56
-; CHECK-BE-NEXT:    sldi r30, r30, 56
-; CHECK-BE-NEXT:    sldi r29, r29, 56
-; CHECK-BE-NEXT:    sldi r28, r28, 56
-; CHECK-BE-NEXT:    sldi r27, r27, 56
-; CHECK-BE-NEXT:    sldi r26, r26, 56
+; CHECK-BE-NEXT:    xscvspdpn f4, vs4
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v10, r11
-; CHECK-BE-NEXT:    mtvsrd v14, r12
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    mtvsrd v5, r6
-; CHECK-BE-NEXT:    mtvsrd v0, r7
-; CHECK-BE-NEXT:    mtvsrd v1, r8
-; CHECK-BE-NEXT:    mtvsrd v6, r9
-; CHECK-BE-NEXT:    mtvsrd v7, r10
-; CHECK-BE-NEXT:    mtvsrd v8, r0
-; CHECK-BE-NEXT:    mtvsrd v9, r30
-; CHECK-BE-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v11, r29
-; CHECK-BE-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v12, r28
-; CHECK-BE-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v13, r27
-; CHECK-BE-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v15, r26
-; CHECK-BE-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    vmrghb v4, v5, v4
-; CHECK-BE-NEXT:    vmrghb v2, v2, v0
-; CHECK-BE-NEXT:    vmrghb v5, v6, v1
-; CHECK-BE-NEXT:    vmrghb v3, v3, v7
-; CHECK-BE-NEXT:    vmrghb v0, v9, v8
-; CHECK-BE-NEXT:    vmrghb v1, v10, v11
-; CHECK-BE-NEXT:    vmrghb v6, v13, v12
-; CHECK-BE-NEXT:    vmrghb v7, v14, v15
-; CHECK-BE-NEXT:    vmrghh v2, v2, v4
-; CHECK-BE-NEXT:    vmrghh v3, v3, v5
-; CHECK-BE-NEXT:    vmrghh v4, v1, v0
-; CHECK-BE-NEXT:    vmrghh v5, v7, v6
+; CHECK-BE-NEXT:    xscvdpsxws f4, f4
+; CHECK-BE-NEXT:    mfvsrwz r3, f4
+; CHECK-BE-NEXT:    xscvspdpn f4, vs3
+; CHECK-BE-NEXT:    xxsldwi vs3, vs3, vs3, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f4, f4
+; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    vmrghb v2, v3, v2
+; CHECK-BE-NEXT:    mfvsrwz r3, f4
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    xxsldwi vs3, vs2, vs2, 3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    xxswapd vs3, vs2
+; CHECK-BE-NEXT:    xscvspdpn f3, vs3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    vmrghh v2, v3, v2
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    xscvspdpn f3, vs2
+; CHECK-BE-NEXT:    xxsldwi vs2, vs2, vs2, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    vmrghb v3, v4, v3
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xxsldwi vs2, vs1, vs1, 3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xxswapd vs2, vs1
+; CHECK-BE-NEXT:    xscvspdpn f2, vs2
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    vmrghb v4, v4, v5
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
-; CHECK-BE-NEXT:    vmrghw v3, v5, v4
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xscvspdpn f2, vs1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs1, vs1, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    vmrghb v3, v4, v3
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xxsldwi vs1, vs0, vs0, 3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs0
+; CHECK-BE-NEXT:    xscvspdpn f1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    vmrghb v4, v4, v5
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvspdpn f1, vs0
+; CHECK-BE-NEXT:    xxsldwi vs0, vs0, vs0, 1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    xscvspdpn f0, vs0
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    vmrghb v4, v5, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v0, r3
+; CHECK-BE-NEXT:    vmrghb v5, v5, v0
+; CHECK-BE-NEXT:    vmrghh v4, v5, v4
+; CHECK-BE-NEXT:    vmrghw v3, v4, v3
 ; CHECK-BE-NEXT:    xxmrghd v2, v3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
diff --git a/test/CodeGen/PowerPC/vec_conv_fp64_to_i16_elts.ll b/test/CodeGen/PowerPC/vec_conv_fp64_to_i16_elts.ll
index a713b52..9e87876 100644
--- a/test/CodeGen/PowerPC/vec_conv_fp64_to_i16_elts.ll
+++ b/test/CodeGen/PowerPC/vec_conv_fp64_to_i16_elts.ll
@@ -28,33 +28,33 @@
 ;
 ; CHECK-P9-LABEL: test2elt:
 ; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xscvdpsxws f0, v2
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs0
 ; CHECK-P9-NEXT:    xxswapd vs0, v2
-; CHECK-P9-NEXT:    xscvdpsxws f1, v2
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    mfvsrwz r3, f1
-; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
 ; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    mtvsrd f1, r4
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    vmrglh v2, v2, v3
+; CHECK-P9-NEXT:    vmrglh v2, v3, v2
 ; CHECK-P9-NEXT:    vextuwrx r3, r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test2elt:
 ; CHECK-BE:       # %bb.0: # %entry
+; CHECK-BE-NEXT:    xscvdpsxws f0, v2
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
 ; CHECK-BE-NEXT:    xxswapd vs0, v2
-; CHECK-BE-NEXT:    xscvdpsxws f1, v2
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    mfvsrwz r3, f1
 ; CHECK-BE-NEXT:    sldi r3, r3, 48
-; CHECK-BE-NEXT:    mfvsrwz r4, f0
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 48
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
 ; CHECK-BE-NEXT:    li r3, 0
-; CHECK-BE-NEXT:    sldi r4, r4, 48
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    vmrghh v2, v2, v3
+; CHECK-BE-NEXT:    vmrghh v2, v3, v2
 ; CHECK-BE-NEXT:    vextuwlx r3, r3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -96,56 +96,56 @@
 ;
 ; CHECK-P9-LABEL: test4elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r3)
 ; CHECK-P9-NEXT:    lxv vs1, 0(r3)
-; CHECK-P9-NEXT:    xxswapd vs2, vs1
-; CHECK-P9-NEXT:    xxswapd vs3, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f2, f1
+; CHECK-P9-NEXT:    xxswapd vs1, vs1
 ; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
+; CHECK-P9-NEXT:    lxv vs0, 16(r3)
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
 ; CHECK-P9-NEXT:    mfvsrwz r3, f1
-; CHECK-P9-NEXT:    mfvsrwz r5, f0
-; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mfvsrwz r4, f2
-; CHECK-P9-NEXT:    mfvsrwz r6, f3
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    xxswapd v4, vs2
+; CHECK-P9-NEXT:    xxswapd v2, vs2
+; CHECK-P9-NEXT:    mtvsrd f1, r3
 ; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f1, f0
+; CHECK-P9-NEXT:    xxswapd vs0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
 ; CHECK-P9-NEXT:    vmrglh v2, v2, v3
-; CHECK-P9-NEXT:    vmrglh v3, v4, v5
+; CHECK-P9-NEXT:    xxswapd v3, vs1
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    vmrglh v3, v3, v4
 ; CHECK-P9-NEXT:    vmrglw v2, v3, v2
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
-; CHECK-BE-NEXT:    xxswapd vs2, vs1
-; CHECK-BE-NEXT:    xxswapd vs3, vs0
+; CHECK-BE-NEXT:    xscvdpsxws f2, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
 ; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    mfvsrwz r3, f1
-; CHECK-BE-NEXT:    mfvsrwz r5, f0
+; CHECK-BE-NEXT:    lxv vs0, 0(r3)
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
 ; CHECK-BE-NEXT:    sldi r3, r3, 48
-; CHECK-BE-NEXT:    sldi r5, r5, 48
-; CHECK-BE-NEXT:    mfvsrwz r4, f2
-; CHECK-BE-NEXT:    mfvsrwz r6, f3
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    sldi r4, r4, 48
-; CHECK-BE-NEXT:    sldi r6, r6, 48
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f0
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v3, r3
 ; CHECK-BE-NEXT:    vmrghh v2, v2, v3
-; CHECK-BE-NEXT:    vmrghh v3, v4, v5
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghh v3, v3, v4
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
 ; CHECK-BE-NEXT:    mfvsrd r3, v2
 ; CHECK-BE-NEXT:    blr
@@ -213,103 +213,103 @@
 ;
 ; CHECK-P9-LABEL: test8elt:
 ; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    lxv vs3, 0(r3)
+; CHECK-P9-NEXT:    xscvdpsxws f4, f3
+; CHECK-P9-NEXT:    xxswapd vs3, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f3
 ; CHECK-P9-NEXT:    lxv vs0, 48(r3)
 ; CHECK-P9-NEXT:    lxv vs1, 32(r3)
 ; CHECK-P9-NEXT:    lxv vs2, 16(r3)
-; CHECK-P9-NEXT:    lxv vs3, 0(r3)
-; CHECK-P9-NEXT:    xxswapd vs4, vs3
-; CHECK-P9-NEXT:    xxswapd vs5, vs2
-; CHECK-P9-NEXT:    xxswapd vs6, vs1
-; CHECK-P9-NEXT:    xxswapd vs7, vs0
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
+; CHECK-P9-NEXT:    mfvsrwz r3, f4
+; CHECK-P9-NEXT:    mtvsrd f4, r3
 ; CHECK-P9-NEXT:    mfvsrwz r3, f3
-; CHECK-P9-NEXT:    mfvsrwz r5, f2
-; CHECK-P9-NEXT:    mfvsrwz r7, f1
-; CHECK-P9-NEXT:    mfvsrwz r9, f0
-; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    mfvsrwz r4, f4
-; CHECK-P9-NEXT:    mfvsrwz r6, f5
-; CHECK-P9-NEXT:    mfvsrwz r8, f6
-; CHECK-P9-NEXT:    mfvsrwz r10, f7
-; CHECK-P9-NEXT:    mtvsrd f4, r7
-; CHECK-P9-NEXT:    mtvsrd f6, r9
-; CHECK-P9-NEXT:    xxswapd v2, vs0
+; CHECK-P9-NEXT:    xxswapd v2, vs4
+; CHECK-P9-NEXT:    mtvsrd f3, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f2
+; CHECK-P9-NEXT:    xxswapd vs2, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    mfvsrwz r3, f3
+; CHECK-P9-NEXT:    mtvsrd f3, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
 ; CHECK-P9-NEXT:    xxswapd v4, vs2
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    mtvsrd f5, r8
-; CHECK-P9-NEXT:    mtvsrd f7, r10
-; CHECK-P9-NEXT:    xxswapd v0, vs4
-; CHECK-P9-NEXT:    xxswapd v6, vs6
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
-; CHECK-P9-NEXT:    xxswapd v1, vs5
-; CHECK-P9-NEXT:    xxswapd v7, vs7
+; CHECK-P9-NEXT:    xscvdpsxws f2, f1
+; CHECK-P9-NEXT:    xxswapd vs1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
 ; CHECK-P9-NEXT:    vmrglh v2, v2, v3
-; CHECK-P9-NEXT:    vmrglh v3, v4, v5
-; CHECK-P9-NEXT:    vmrglh v4, v0, v1
-; CHECK-P9-NEXT:    vmrglh v5, v6, v7
+; CHECK-P9-NEXT:    xxswapd v3, vs3
+; CHECK-P9-NEXT:    vmrglh v3, v3, v4
 ; CHECK-P9-NEXT:    vmrglw v2, v3, v2
-; CHECK-P9-NEXT:    vmrglw v3, v5, v4
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f0
+; CHECK-P9-NEXT:    xxswapd vs0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglh v3, v3, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    vmrglh v4, v4, v5
+; CHECK-P9-NEXT:    vmrglw v3, v4, v3
 ; CHECK-P9-NEXT:    xxmrgld v2, v3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt:
 ; CHECK-BE:       # %bb.0: # %entry
+; CHECK-BE-NEXT:    lxv vs3, 48(r3)
+; CHECK-BE-NEXT:    xscvdpsxws f4, f3
+; CHECK-BE-NEXT:    xxswapd vs3, vs3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    lxv vs2, 32(r3)
 ; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
-; CHECK-BE-NEXT:    lxv vs2, 32(r3)
-; CHECK-BE-NEXT:    lxv vs3, 48(r3)
-; CHECK-BE-NEXT:    xxswapd vs4, vs3
-; CHECK-BE-NEXT:    xxswapd vs5, vs2
-; CHECK-BE-NEXT:    xxswapd vs6, vs1
-; CHECK-BE-NEXT:    xxswapd vs7, vs0
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    mfvsrwz r3, f3
-; CHECK-BE-NEXT:    mfvsrwz r5, f2
-; CHECK-BE-NEXT:    mfvsrwz r7, f1
-; CHECK-BE-NEXT:    mfvsrwz r9, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f4
 ; CHECK-BE-NEXT:    sldi r3, r3, 48
-; CHECK-BE-NEXT:    sldi r5, r5, 48
-; CHECK-BE-NEXT:    sldi r7, r7, 48
-; CHECK-BE-NEXT:    sldi r9, r9, 48
-; CHECK-BE-NEXT:    mfvsrwz r4, f4
-; CHECK-BE-NEXT:    mfvsrwz r6, f5
-; CHECK-BE-NEXT:    mfvsrwz r8, f6
-; CHECK-BE-NEXT:    mfvsrwz r10, f7
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    mtvsrd v0, r7
-; CHECK-BE-NEXT:    mtvsrd v6, r9
-; CHECK-BE-NEXT:    sldi r4, r4, 48
-; CHECK-BE-NEXT:    sldi r6, r6, 48
-; CHECK-BE-NEXT:    sldi r8, r8, 48
-; CHECK-BE-NEXT:    sldi r10, r10, 48
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
-; CHECK-BE-NEXT:    mtvsrd v1, r8
-; CHECK-BE-NEXT:    mtvsrd v7, r10
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f2
+; CHECK-BE-NEXT:    xxswapd vs2, vs2
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    mtvsrd v3, r3
 ; CHECK-BE-NEXT:    vmrghh v2, v2, v3
-; CHECK-BE-NEXT:    vmrghh v3, v4, v5
-; CHECK-BE-NEXT:    vmrghh v4, v0, v1
-; CHECK-BE-NEXT:    vmrghh v5, v6, v7
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xscvdpsxws f2, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghh v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    sldi r3, r3, 48
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
-; CHECK-BE-NEXT:    vmrghw v3, v5, v4
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f0
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghh v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    vmrghh v4, v4, v5
+; CHECK-BE-NEXT:    vmrghw v3, v4, v3
 ; CHECK-BE-NEXT:    xxmrghd v2, v3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -428,226 +428,202 @@
 ;
 ; CHECK-P9-LABEL: test16elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs2, 48(r4)
-; CHECK-P9-NEXT:    lxv vs4, 32(r4)
-; CHECK-P9-NEXT:    lxv vs5, 16(r4)
-; CHECK-P9-NEXT:    lxv vs6, 0(r4)
+; CHECK-P9-NEXT:    lxv vs4, 0(r4)
+; CHECK-P9-NEXT:    lxv vs3, 16(r4)
+; CHECK-P9-NEXT:    lxv vs2, 32(r4)
+; CHECK-P9-NEXT:    xscvdpsxws f5, f4
+; CHECK-P9-NEXT:    lxv vs1, 48(r4)
+; CHECK-P9-NEXT:    xscvdpsxws f6, f3
+; CHECK-P9-NEXT:    lxv vs0, 64(r4)
+; CHECK-P9-NEXT:    xscvdpsxws f7, f2
+; CHECK-P9-NEXT:    xscvdpsxws f8, f1
+; CHECK-P9-NEXT:    xxswapd vs4, vs4
+; CHECK-P9-NEXT:    xscvdpsxws f4, f4
+; CHECK-P9-NEXT:    mfvsrwz r5, f5
+; CHECK-P9-NEXT:    xscvdpsxws f9, f0
+; CHECK-P9-NEXT:    xxswapd vs3, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f3
+; CHECK-P9-NEXT:    mtvsrd f5, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f6
+; CHECK-P9-NEXT:    xxswapd vs2, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    mtvsrd f6, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f7
+; CHECK-P9-NEXT:    mtvsrd f7, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f8
+; CHECK-P9-NEXT:    mtvsrd f8, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f9
+; CHECK-P9-NEXT:    mtvsrd f9, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f4
+; CHECK-P9-NEXT:    mtvsrd f4, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f3
+; CHECK-P9-NEXT:    xxswapd vs1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    xxswapd v2, vs5
+; CHECK-P9-NEXT:    xxswapd v5, vs8
+; CHECK-P9-NEXT:    xxswapd v0, vs9
+; CHECK-P9-NEXT:    mtvsrd f3, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r5
+; CHECK-P9-NEXT:    xxswapd vs0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    xxswapd v1, vs2
+; CHECK-P9-NEXT:    lxv vs2, 80(r4)
+; CHECK-P9-NEXT:    xxswapd v3, vs4
+; CHECK-P9-NEXT:    vmrglh v2, v2, v3
+; CHECK-P9-NEXT:    xxswapd v3, vs6
+; CHECK-P9-NEXT:    xxswapd v4, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f2
+; CHECK-P9-NEXT:    xxswapd vs2, vs2
+; CHECK-P9-NEXT:    mfvsrwz r5, f1
+; CHECK-P9-NEXT:    vmrglh v3, v3, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs7
+; CHECK-P9-NEXT:    mtvsrd f1, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f0
+; CHECK-P9-NEXT:    vmrglh v4, v4, v1
+; CHECK-P9-NEXT:    xxswapd v1, vs1
+; CHECK-P9-NEXT:    mtvsrd f0, r5
+; CHECK-P9-NEXT:    vmrglh v5, v5, v1
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    xxswapd v1, vs0
 ; CHECK-P9-NEXT:    lxv vs0, 112(r4)
 ; CHECK-P9-NEXT:    lxv vs1, 96(r4)
-; CHECK-P9-NEXT:    lxv vs3, 80(r4)
-; CHECK-P9-NEXT:    lxv vs7, 64(r4)
-; CHECK-P9-NEXT:    std r25, -56(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    xxswapd vs8, vs6
-; CHECK-P9-NEXT:    xxswapd vs9, vs5
-; CHECK-P9-NEXT:    xxswapd vs10, vs4
-; CHECK-P9-NEXT:    xxswapd vs11, vs2
-; CHECK-P9-NEXT:    xxswapd vs12, vs7
-; CHECK-P9-NEXT:    xxswapd vs13, vs3
-; CHECK-P9-NEXT:    xxswapd v2, vs1
-; CHECK-P9-NEXT:    xxswapd v3, vs0
-; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f8, f8
-; CHECK-P9-NEXT:    xscvdpsxws f9, f9
-; CHECK-P9-NEXT:    xscvdpsxws f10, f10
-; CHECK-P9-NEXT:    xscvdpsxws f11, f11
-; CHECK-P9-NEXT:    xscvdpsxws f12, f12
-; CHECK-P9-NEXT:    xscvdpsxws f13, f13
-; CHECK-P9-NEXT:    xscvdpsxws v2, v2
-; CHECK-P9-NEXT:    xscvdpsxws v3, v3
-; CHECK-P9-NEXT:    mfvsrwz r4, f6
-; CHECK-P9-NEXT:    mfvsrwz r5, f5
-; CHECK-P9-NEXT:    mfvsrwz r6, f4
-; CHECK-P9-NEXT:    mfvsrwz r7, f2
-; CHECK-P9-NEXT:    mfvsrwz r12, f7
-; CHECK-P9-NEXT:    mfvsrwz r0, f3
-; CHECK-P9-NEXT:    mfvsrwz r30, f1
-; CHECK-P9-NEXT:    mfvsrwz r29, f0
-; CHECK-P9-NEXT:    mfvsrwz r8, f8
-; CHECK-P9-NEXT:    mfvsrwz r9, f9
-; CHECK-P9-NEXT:    mfvsrwz r10, f10
-; CHECK-P9-NEXT:    mfvsrwz r11, f11
-; CHECK-P9-NEXT:    mfvsrwz r28, f12
-; CHECK-P9-NEXT:    mfvsrwz r27, f13
-; CHECK-P9-NEXT:    mfvsrwz r26, v2
-; CHECK-P9-NEXT:    mfvsrwz r25, v3
-; CHECK-P9-NEXT:    mtvsrd f0, r4
-; CHECK-P9-NEXT:    mtvsrd f1, r5
-; CHECK-P9-NEXT:    mtvsrd f2, r6
-; CHECK-P9-NEXT:    mtvsrd f3, r7
-; CHECK-P9-NEXT:    mtvsrd f8, r12
-; CHECK-P9-NEXT:    mtvsrd f9, r0
-; CHECK-P9-NEXT:    mtvsrd f10, r30
-; CHECK-P9-NEXT:    mtvsrd f11, r29
-; CHECK-P9-NEXT:    mtvsrd f4, r8
-; CHECK-P9-NEXT:    mtvsrd f5, r9
-; CHECK-P9-NEXT:    mtvsrd f6, r10
-; CHECK-P9-NEXT:    mtvsrd f7, r11
-; CHECK-P9-NEXT:    mtvsrd f12, r28
-; CHECK-P9-NEXT:    mtvsrd f13, r27
-; CHECK-P9-NEXT:    mtvsrd v2, r26
-; CHECK-P9-NEXT:    mtvsrd v3, r25
-; CHECK-P9-NEXT:    xxswapd v4, vs0
-; CHECK-P9-NEXT:    xxswapd v5, vs1
-; CHECK-P9-NEXT:    xxswapd v0, vs2
-; CHECK-P9-NEXT:    xxswapd v1, vs3
-; CHECK-P9-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xxswapd v6, vs4
-; CHECK-P9-NEXT:    xxswapd v7, vs5
-; CHECK-P9-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xxswapd v8, vs6
-; CHECK-P9-NEXT:    xxswapd v9, vs7
-; CHECK-P9-NEXT:    xxswapd v10, vs8
-; CHECK-P9-NEXT:    xxswapd v11, vs12
-; CHECK-P9-NEXT:    xxswapd v12, vs9
-; CHECK-P9-NEXT:    xxswapd v13, vs13
-; CHECK-P9-NEXT:    xxswapd v14, vs10
-; CHECK-P9-NEXT:    xxswapd v2, v2
-; CHECK-P9-NEXT:    xxswapd v15, vs11
-; CHECK-P9-NEXT:    xxswapd v3, v3
-; CHECK-P9-NEXT:    vmrglh v4, v4, v6
-; CHECK-P9-NEXT:    vmrglh v5, v5, v7
-; CHECK-P9-NEXT:    vmrglh v0, v0, v8
-; CHECK-P9-NEXT:    vmrglh v1, v1, v9
-; CHECK-P9-NEXT:    vmrglh v6, v10, v11
-; CHECK-P9-NEXT:    vmrglh v7, v12, v13
-; CHECK-P9-NEXT:    vmrglh v2, v14, v2
-; CHECK-P9-NEXT:    vmrglh v3, v15, v3
-; CHECK-P9-NEXT:    vmrglw v4, v5, v4
-; CHECK-P9-NEXT:    vmrglw v5, v1, v0
-; CHECK-P9-NEXT:    vmrglw v0, v7, v6
+; CHECK-P9-NEXT:    mfvsrwz r4, f3
+; CHECK-P9-NEXT:    mtvsrd f3, r4
+; CHECK-P9-NEXT:    mfvsrwz r4, f2
 ; CHECK-P9-NEXT:    vmrglw v2, v3, v2
-; CHECK-P9-NEXT:    xxmrgld vs0, v5, v4
-; CHECK-P9-NEXT:    xxmrgld vs1, v2, v0
-; CHECK-P9-NEXT:    stxv vs0, 0(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    vmrglw v3, v5, v4
+; CHECK-P9-NEXT:    xxmrgld vs4, v3, v2
+; CHECK-P9-NEXT:    xxswapd v2, vs3
+; CHECK-P9-NEXT:    vmrglh v0, v0, v1
+; CHECK-P9-NEXT:    mtvsrd f2, r4
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f1
+; CHECK-P9-NEXT:    xxswapd vs1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r4, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r4
+; CHECK-P9-NEXT:    mfvsrwz r4, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r4
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f0
+; CHECK-P9-NEXT:    xxswapd vs0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r4, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r4
+; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    vmrglh v2, v2, v3
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    vmrglh v3, v3, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    vmrglw v2, v2, v0
+; CHECK-P9-NEXT:    mtvsrd f0, r4
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    vmrglh v4, v4, v5
+; CHECK-P9-NEXT:    vmrglw v3, v4, v3
+; CHECK-P9-NEXT:    xxmrgld vs0, v3, v2
+; CHECK-P9-NEXT:    stxv vs0, 16(r3)
+; CHECK-P9-NEXT:    stxv vs4, 0(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs2, 0(r4)
-; CHECK-BE-NEXT:    lxv vs4, 16(r4)
-; CHECK-BE-NEXT:    lxv vs5, 32(r4)
-; CHECK-BE-NEXT:    lxv vs6, 48(r4)
-; CHECK-BE-NEXT:    lxv vs0, 64(r4)
-; CHECK-BE-NEXT:    lxv vs1, 80(r4)
-; CHECK-BE-NEXT:    lxv vs3, 96(r4)
-; CHECK-BE-NEXT:    lxv vs7, 112(r4)
-; CHECK-BE-NEXT:    std r25, -56(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    xxswapd vs8, vs6
-; CHECK-BE-NEXT:    xxswapd vs9, vs5
-; CHECK-BE-NEXT:    xxswapd vs10, vs4
-; CHECK-BE-NEXT:    xxswapd vs11, vs2
-; CHECK-BE-NEXT:    xxswapd vs12, vs7
-; CHECK-BE-NEXT:    xxswapd vs13, vs3
-; CHECK-BE-NEXT:    xxswapd v2, vs1
-; CHECK-BE-NEXT:    xxswapd v3, vs0
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
+; CHECK-BE-NEXT:    lxv vs4, 48(r4)
+; CHECK-BE-NEXT:    xscvdpsxws f5, f4
+; CHECK-BE-NEXT:    xxswapd vs4, vs4
+; CHECK-BE-NEXT:    lxv vs3, 32(r4)
+; CHECK-BE-NEXT:    xscvdpsxws f6, f3
+; CHECK-BE-NEXT:    xxswapd vs3, vs3
 ; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f8, f8
-; CHECK-BE-NEXT:    xscvdpsxws f9, f9
-; CHECK-BE-NEXT:    xscvdpsxws f10, f10
-; CHECK-BE-NEXT:    xscvdpsxws f11, f11
-; CHECK-BE-NEXT:    xscvdpsxws f12, f12
-; CHECK-BE-NEXT:    xscvdpsxws f13, f13
-; CHECK-BE-NEXT:    xscvdpsxws v2, v2
-; CHECK-BE-NEXT:    xscvdpsxws v3, v3
-; CHECK-BE-NEXT:    mfvsrwz r4, f6
 ; CHECK-BE-NEXT:    mfvsrwz r5, f5
-; CHECK-BE-NEXT:    mfvsrwz r6, f4
-; CHECK-BE-NEXT:    mfvsrwz r7, f2
-; CHECK-BE-NEXT:    mfvsrwz r12, f7
-; CHECK-BE-NEXT:    mfvsrwz r0, f3
-; CHECK-BE-NEXT:    mfvsrwz r30, f1
-; CHECK-BE-NEXT:    mfvsrwz r29, f0
-; CHECK-BE-NEXT:    mfvsrwz r8, f8
-; CHECK-BE-NEXT:    mfvsrwz r9, f9
-; CHECK-BE-NEXT:    mfvsrwz r10, f10
-; CHECK-BE-NEXT:    mfvsrwz r11, f11
-; CHECK-BE-NEXT:    mfvsrwz r28, f12
-; CHECK-BE-NEXT:    mfvsrwz r27, f13
-; CHECK-BE-NEXT:    mfvsrwz r26, v2
-; CHECK-BE-NEXT:    mfvsrwz r25, v3
-; CHECK-BE-NEXT:    sldi r4, r4, 48
 ; CHECK-BE-NEXT:    sldi r5, r5, 48
-; CHECK-BE-NEXT:    sldi r6, r6, 48
-; CHECK-BE-NEXT:    sldi r7, r7, 48
-; CHECK-BE-NEXT:    sldi r12, r12, 48
-; CHECK-BE-NEXT:    sldi r0, r0, 48
-; CHECK-BE-NEXT:    sldi r30, r30, 48
-; CHECK-BE-NEXT:    sldi r29, r29, 48
-; CHECK-BE-NEXT:    sldi r8, r8, 48
-; CHECK-BE-NEXT:    sldi r9, r9, 48
-; CHECK-BE-NEXT:    sldi r10, r10, 48
-; CHECK-BE-NEXT:    sldi r11, r11, 48
-; CHECK-BE-NEXT:    sldi r28, r28, 48
-; CHECK-BE-NEXT:    sldi r27, r27, 48
-; CHECK-BE-NEXT:    sldi r26, r26, 48
-; CHECK-BE-NEXT:    sldi r25, r25, 48
-; CHECK-BE-NEXT:    mtvsrd v2, r4
+; CHECK-BE-NEXT:    lxv vs2, 16(r4)
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    xscvdpsxws f7, f2
+; CHECK-BE-NEXT:    xxswapd vs2, vs2
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    mtvsrd v2, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f4
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    lxv vs1, 0(r4)
+; CHECK-BE-NEXT:    xscvdpsxws f4, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
 ; CHECK-BE-NEXT:    mtvsrd v3, r5
-; CHECK-BE-NEXT:    mtvsrd v4, r6
-; CHECK-BE-NEXT:    mtvsrd v5, r7
-; CHECK-BE-NEXT:    mtvsrd v8, r12
-; CHECK-BE-NEXT:    mtvsrd v10, r0
-; CHECK-BE-NEXT:    mtvsrd v12, r30
-; CHECK-BE-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v0, r8
-; CHECK-BE-NEXT:    mtvsrd v1, r9
-; CHECK-BE-NEXT:    mtvsrd v6, r10
-; CHECK-BE-NEXT:    mtvsrd v7, r11
-; CHECK-BE-NEXT:    mtvsrd v9, r28
-; CHECK-BE-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v11, r27
-; CHECK-BE-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v13, r26
-; CHECK-BE-NEXT:    mtvsrd v14, r29
-; CHECK-BE-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v15, r25
-; CHECK-BE-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    vmrghh v2, v2, v0
-; CHECK-BE-NEXT:    vmrghh v3, v3, v1
-; CHECK-BE-NEXT:    vmrghh v4, v4, v6
-; CHECK-BE-NEXT:    vmrghh v5, v5, v7
-; CHECK-BE-NEXT:    vmrghh v0, v8, v9
-; CHECK-BE-NEXT:    vmrghh v1, v10, v11
-; CHECK-BE-NEXT:    vmrghh v6, v12, v13
-; CHECK-BE-NEXT:    vmrghh v7, v14, v15
+; CHECK-BE-NEXT:    mfvsrwz r5, f6
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    lxv vs0, 112(r4)
+; CHECK-BE-NEXT:    vmrghh v2, v2, v3
+; CHECK-BE-NEXT:    mtvsrd v3, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f0
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v4, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f7
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    vmrghh v3, v3, v4
+; CHECK-BE-NEXT:    mtvsrd v4, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f4
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    mtvsrd v5, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f3
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    mtvsrd v0, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f2
+; CHECK-BE-NEXT:    lxv vs2, 96(r4)
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    mtvsrd v1, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f1
+; CHECK-BE-NEXT:    lxv vs1, 80(r4)
+; CHECK-BE-NEXT:    xscvdpsxws f3, f2
+; CHECK-BE-NEXT:    xxswapd vs2, vs2
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    vmrghh v4, v4, v1
+; CHECK-BE-NEXT:    mtvsrd v1, r5
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    vmrghh v5, v5, v1
+; CHECK-BE-NEXT:    mfvsrwz r5, f0
+; CHECK-BE-NEXT:    lxv vs0, 64(r4)
+; CHECK-BE-NEXT:    mfvsrwz r4, f3
+; CHECK-BE-NEXT:    sldi r4, r4, 48
 ; CHECK-BE-NEXT:    vmrghw v3, v5, v4
-; CHECK-BE-NEXT:    vmrghw v4, v1, v0
-; CHECK-BE-NEXT:    vmrghw v5, v7, v6
+; CHECK-BE-NEXT:    xxmrghd vs3, v3, v2
+; CHECK-BE-NEXT:    mtvsrd v2, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f2
+; CHECK-BE-NEXT:    xscvdpsxws f2, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    mtvsrd v3, r4
+; CHECK-BE-NEXT:    vmrghh v2, v2, v3
+; CHECK-BE-NEXT:    mfvsrwz r4, f2
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v3, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f0
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v4, r4
+; CHECK-BE-NEXT:    vmrghh v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r4, f1
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v4, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f0
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    mtvsrd v1, r5
+; CHECK-BE-NEXT:    vmrghh v0, v0, v1
+; CHECK-BE-NEXT:    vmrghw v2, v2, v0
+; CHECK-BE-NEXT:    stxv vs3, 0(r3)
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v5, r4
+; CHECK-BE-NEXT:    vmrghh v4, v4, v5
+; CHECK-BE-NEXT:    vmrghw v3, v4, v3
 ; CHECK-BE-NEXT:    xxmrghd vs0, v3, v2
-; CHECK-BE-NEXT:    xxmrghd vs1, v5, v4
-; CHECK-BE-NEXT:    stxv vs0, 0(r3)
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x double>, <16 x double>* %0, align 128
@@ -675,33 +651,33 @@
 ;
 ; CHECK-P9-LABEL: test2elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xscvdpsxws f0, v2
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs0
 ; CHECK-P9-NEXT:    xxswapd vs0, v2
-; CHECK-P9-NEXT:    xscvdpsxws f1, v2
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    mfvsrwz r3, f1
-; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
 ; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    mtvsrd f1, r4
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    vmrglh v2, v2, v3
+; CHECK-P9-NEXT:    vmrglh v2, v3, v2
 ; CHECK-P9-NEXT:    vextuwrx r3, r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test2elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
+; CHECK-BE-NEXT:    xscvdpsxws f0, v2
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
 ; CHECK-BE-NEXT:    xxswapd vs0, v2
-; CHECK-BE-NEXT:    xscvdpsxws f1, v2
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    mfvsrwz r3, f1
 ; CHECK-BE-NEXT:    sldi r3, r3, 48
-; CHECK-BE-NEXT:    mfvsrwz r4, f0
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 48
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
 ; CHECK-BE-NEXT:    li r3, 0
-; CHECK-BE-NEXT:    sldi r4, r4, 48
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    vmrghh v2, v2, v3
+; CHECK-BE-NEXT:    vmrghh v2, v3, v2
 ; CHECK-BE-NEXT:    vextuwlx r3, r3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -743,56 +719,56 @@
 ;
 ; CHECK-P9-LABEL: test4elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r3)
 ; CHECK-P9-NEXT:    lxv vs1, 0(r3)
-; CHECK-P9-NEXT:    xxswapd vs2, vs1
-; CHECK-P9-NEXT:    xxswapd vs3, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f2, f1
+; CHECK-P9-NEXT:    xxswapd vs1, vs1
 ; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
+; CHECK-P9-NEXT:    lxv vs0, 16(r3)
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
 ; CHECK-P9-NEXT:    mfvsrwz r3, f1
-; CHECK-P9-NEXT:    mfvsrwz r5, f0
-; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mfvsrwz r4, f2
-; CHECK-P9-NEXT:    mfvsrwz r6, f3
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    xxswapd v4, vs2
+; CHECK-P9-NEXT:    xxswapd v2, vs2
+; CHECK-P9-NEXT:    mtvsrd f1, r3
 ; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f1, f0
+; CHECK-P9-NEXT:    xxswapd vs0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
 ; CHECK-P9-NEXT:    vmrglh v2, v2, v3
-; CHECK-P9-NEXT:    vmrglh v3, v4, v5
+; CHECK-P9-NEXT:    xxswapd v3, vs1
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    vmrglh v3, v3, v4
 ; CHECK-P9-NEXT:    vmrglw v2, v3, v2
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
-; CHECK-BE-NEXT:    xxswapd vs2, vs1
-; CHECK-BE-NEXT:    xxswapd vs3, vs0
+; CHECK-BE-NEXT:    xscvdpsxws f2, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
 ; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    mfvsrwz r3, f1
-; CHECK-BE-NEXT:    mfvsrwz r5, f0
+; CHECK-BE-NEXT:    lxv vs0, 0(r3)
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
 ; CHECK-BE-NEXT:    sldi r3, r3, 48
-; CHECK-BE-NEXT:    sldi r5, r5, 48
-; CHECK-BE-NEXT:    mfvsrwz r4, f2
-; CHECK-BE-NEXT:    mfvsrwz r6, f3
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    sldi r4, r4, 48
-; CHECK-BE-NEXT:    sldi r6, r6, 48
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f0
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v3, r3
 ; CHECK-BE-NEXT:    vmrghh v2, v2, v3
-; CHECK-BE-NEXT:    vmrghh v3, v4, v5
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghh v3, v3, v4
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
 ; CHECK-BE-NEXT:    mfvsrd r3, v2
 ; CHECK-BE-NEXT:    blr
@@ -860,103 +836,103 @@
 ;
 ; CHECK-P9-LABEL: test8elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    lxv vs3, 0(r3)
+; CHECK-P9-NEXT:    xscvdpsxws f4, f3
+; CHECK-P9-NEXT:    xxswapd vs3, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f3
 ; CHECK-P9-NEXT:    lxv vs0, 48(r3)
 ; CHECK-P9-NEXT:    lxv vs1, 32(r3)
 ; CHECK-P9-NEXT:    lxv vs2, 16(r3)
-; CHECK-P9-NEXT:    lxv vs3, 0(r3)
-; CHECK-P9-NEXT:    xxswapd vs4, vs3
-; CHECK-P9-NEXT:    xxswapd vs5, vs2
-; CHECK-P9-NEXT:    xxswapd vs6, vs1
-; CHECK-P9-NEXT:    xxswapd vs7, vs0
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
+; CHECK-P9-NEXT:    mfvsrwz r3, f4
+; CHECK-P9-NEXT:    mtvsrd f4, r3
 ; CHECK-P9-NEXT:    mfvsrwz r3, f3
-; CHECK-P9-NEXT:    mfvsrwz r5, f2
-; CHECK-P9-NEXT:    mfvsrwz r7, f1
-; CHECK-P9-NEXT:    mfvsrwz r9, f0
-; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    mfvsrwz r4, f4
-; CHECK-P9-NEXT:    mfvsrwz r6, f5
-; CHECK-P9-NEXT:    mfvsrwz r8, f6
-; CHECK-P9-NEXT:    mfvsrwz r10, f7
-; CHECK-P9-NEXT:    mtvsrd f4, r7
-; CHECK-P9-NEXT:    mtvsrd f6, r9
-; CHECK-P9-NEXT:    xxswapd v2, vs0
+; CHECK-P9-NEXT:    xxswapd v2, vs4
+; CHECK-P9-NEXT:    mtvsrd f3, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f2
+; CHECK-P9-NEXT:    xxswapd vs2, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    mfvsrwz r3, f3
+; CHECK-P9-NEXT:    mtvsrd f3, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
 ; CHECK-P9-NEXT:    xxswapd v4, vs2
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    mtvsrd f5, r8
-; CHECK-P9-NEXT:    mtvsrd f7, r10
-; CHECK-P9-NEXT:    xxswapd v0, vs4
-; CHECK-P9-NEXT:    xxswapd v6, vs6
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
-; CHECK-P9-NEXT:    xxswapd v1, vs5
-; CHECK-P9-NEXT:    xxswapd v7, vs7
+; CHECK-P9-NEXT:    xscvdpsxws f2, f1
+; CHECK-P9-NEXT:    xxswapd vs1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
 ; CHECK-P9-NEXT:    vmrglh v2, v2, v3
-; CHECK-P9-NEXT:    vmrglh v3, v4, v5
-; CHECK-P9-NEXT:    vmrglh v4, v0, v1
-; CHECK-P9-NEXT:    vmrglh v5, v6, v7
+; CHECK-P9-NEXT:    xxswapd v3, vs3
+; CHECK-P9-NEXT:    vmrglh v3, v3, v4
 ; CHECK-P9-NEXT:    vmrglw v2, v3, v2
-; CHECK-P9-NEXT:    vmrglw v3, v5, v4
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f0
+; CHECK-P9-NEXT:    xxswapd vs0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglh v3, v3, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    vmrglh v4, v4, v5
+; CHECK-P9-NEXT:    vmrglw v3, v4, v3
 ; CHECK-P9-NEXT:    xxmrgld v2, v3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
+; CHECK-BE-NEXT:    lxv vs3, 48(r3)
+; CHECK-BE-NEXT:    xscvdpsxws f4, f3
+; CHECK-BE-NEXT:    xxswapd vs3, vs3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    lxv vs2, 32(r3)
 ; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
-; CHECK-BE-NEXT:    lxv vs2, 32(r3)
-; CHECK-BE-NEXT:    lxv vs3, 48(r3)
-; CHECK-BE-NEXT:    xxswapd vs4, vs3
-; CHECK-BE-NEXT:    xxswapd vs5, vs2
-; CHECK-BE-NEXT:    xxswapd vs6, vs1
-; CHECK-BE-NEXT:    xxswapd vs7, vs0
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    mfvsrwz r3, f3
-; CHECK-BE-NEXT:    mfvsrwz r5, f2
-; CHECK-BE-NEXT:    mfvsrwz r7, f1
-; CHECK-BE-NEXT:    mfvsrwz r9, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f4
 ; CHECK-BE-NEXT:    sldi r3, r3, 48
-; CHECK-BE-NEXT:    sldi r5, r5, 48
-; CHECK-BE-NEXT:    sldi r7, r7, 48
-; CHECK-BE-NEXT:    sldi r9, r9, 48
-; CHECK-BE-NEXT:    mfvsrwz r4, f4
-; CHECK-BE-NEXT:    mfvsrwz r6, f5
-; CHECK-BE-NEXT:    mfvsrwz r8, f6
-; CHECK-BE-NEXT:    mfvsrwz r10, f7
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    mtvsrd v0, r7
-; CHECK-BE-NEXT:    mtvsrd v6, r9
-; CHECK-BE-NEXT:    sldi r4, r4, 48
-; CHECK-BE-NEXT:    sldi r6, r6, 48
-; CHECK-BE-NEXT:    sldi r8, r8, 48
-; CHECK-BE-NEXT:    sldi r10, r10, 48
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
-; CHECK-BE-NEXT:    mtvsrd v1, r8
-; CHECK-BE-NEXT:    mtvsrd v7, r10
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f2
+; CHECK-BE-NEXT:    xxswapd vs2, vs2
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    mtvsrd v3, r3
 ; CHECK-BE-NEXT:    vmrghh v2, v2, v3
-; CHECK-BE-NEXT:    vmrghh v3, v4, v5
-; CHECK-BE-NEXT:    vmrghh v4, v0, v1
-; CHECK-BE-NEXT:    vmrghh v5, v6, v7
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xscvdpsxws f2, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghh v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    sldi r3, r3, 48
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
-; CHECK-BE-NEXT:    vmrghw v3, v5, v4
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f0
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghh v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 48
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    vmrghh v4, v4, v5
+; CHECK-BE-NEXT:    vmrghw v3, v4, v3
 ; CHECK-BE-NEXT:    xxmrghd v2, v3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -1075,226 +1051,202 @@
 ;
 ; CHECK-P9-LABEL: test16elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs2, 48(r4)
-; CHECK-P9-NEXT:    lxv vs4, 32(r4)
-; CHECK-P9-NEXT:    lxv vs5, 16(r4)
-; CHECK-P9-NEXT:    lxv vs6, 0(r4)
+; CHECK-P9-NEXT:    lxv vs4, 0(r4)
+; CHECK-P9-NEXT:    lxv vs3, 16(r4)
+; CHECK-P9-NEXT:    lxv vs2, 32(r4)
+; CHECK-P9-NEXT:    xscvdpsxws f5, f4
+; CHECK-P9-NEXT:    lxv vs1, 48(r4)
+; CHECK-P9-NEXT:    xscvdpsxws f6, f3
+; CHECK-P9-NEXT:    lxv vs0, 64(r4)
+; CHECK-P9-NEXT:    xscvdpsxws f7, f2
+; CHECK-P9-NEXT:    xscvdpsxws f8, f1
+; CHECK-P9-NEXT:    xxswapd vs4, vs4
+; CHECK-P9-NEXT:    xscvdpsxws f4, f4
+; CHECK-P9-NEXT:    mfvsrwz r5, f5
+; CHECK-P9-NEXT:    xscvdpsxws f9, f0
+; CHECK-P9-NEXT:    xxswapd vs3, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f3
+; CHECK-P9-NEXT:    mtvsrd f5, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f6
+; CHECK-P9-NEXT:    xxswapd vs2, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    mtvsrd f6, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f7
+; CHECK-P9-NEXT:    mtvsrd f7, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f8
+; CHECK-P9-NEXT:    mtvsrd f8, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f9
+; CHECK-P9-NEXT:    mtvsrd f9, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f4
+; CHECK-P9-NEXT:    mtvsrd f4, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f3
+; CHECK-P9-NEXT:    xxswapd vs1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    xxswapd v2, vs5
+; CHECK-P9-NEXT:    xxswapd v5, vs8
+; CHECK-P9-NEXT:    xxswapd v0, vs9
+; CHECK-P9-NEXT:    mtvsrd f3, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r5
+; CHECK-P9-NEXT:    xxswapd vs0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    xxswapd v1, vs2
+; CHECK-P9-NEXT:    lxv vs2, 80(r4)
+; CHECK-P9-NEXT:    xxswapd v3, vs4
+; CHECK-P9-NEXT:    vmrglh v2, v2, v3
+; CHECK-P9-NEXT:    xxswapd v3, vs6
+; CHECK-P9-NEXT:    xxswapd v4, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f2
+; CHECK-P9-NEXT:    xxswapd vs2, vs2
+; CHECK-P9-NEXT:    mfvsrwz r5, f1
+; CHECK-P9-NEXT:    vmrglh v3, v3, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs7
+; CHECK-P9-NEXT:    mtvsrd f1, r5
+; CHECK-P9-NEXT:    mfvsrwz r5, f0
+; CHECK-P9-NEXT:    vmrglh v4, v4, v1
+; CHECK-P9-NEXT:    xxswapd v1, vs1
+; CHECK-P9-NEXT:    mtvsrd f0, r5
+; CHECK-P9-NEXT:    vmrglh v5, v5, v1
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    xxswapd v1, vs0
 ; CHECK-P9-NEXT:    lxv vs0, 112(r4)
 ; CHECK-P9-NEXT:    lxv vs1, 96(r4)
-; CHECK-P9-NEXT:    lxv vs3, 80(r4)
-; CHECK-P9-NEXT:    lxv vs7, 64(r4)
-; CHECK-P9-NEXT:    std r25, -56(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    xxswapd vs8, vs6
-; CHECK-P9-NEXT:    xxswapd vs9, vs5
-; CHECK-P9-NEXT:    xxswapd vs10, vs4
-; CHECK-P9-NEXT:    xxswapd vs11, vs2
-; CHECK-P9-NEXT:    xxswapd vs12, vs7
-; CHECK-P9-NEXT:    xxswapd vs13, vs3
-; CHECK-P9-NEXT:    xxswapd v2, vs1
-; CHECK-P9-NEXT:    xxswapd v3, vs0
-; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f8, f8
-; CHECK-P9-NEXT:    xscvdpsxws f9, f9
-; CHECK-P9-NEXT:    xscvdpsxws f10, f10
-; CHECK-P9-NEXT:    xscvdpsxws f11, f11
-; CHECK-P9-NEXT:    xscvdpsxws f12, f12
-; CHECK-P9-NEXT:    xscvdpsxws f13, f13
-; CHECK-P9-NEXT:    xscvdpsxws v2, v2
-; CHECK-P9-NEXT:    xscvdpsxws v3, v3
-; CHECK-P9-NEXT:    mfvsrwz r4, f6
-; CHECK-P9-NEXT:    mfvsrwz r5, f5
-; CHECK-P9-NEXT:    mfvsrwz r6, f4
-; CHECK-P9-NEXT:    mfvsrwz r7, f2
-; CHECK-P9-NEXT:    mfvsrwz r12, f7
-; CHECK-P9-NEXT:    mfvsrwz r0, f3
-; CHECK-P9-NEXT:    mfvsrwz r30, f1
-; CHECK-P9-NEXT:    mfvsrwz r29, f0
-; CHECK-P9-NEXT:    mfvsrwz r8, f8
-; CHECK-P9-NEXT:    mfvsrwz r9, f9
-; CHECK-P9-NEXT:    mfvsrwz r10, f10
-; CHECK-P9-NEXT:    mfvsrwz r11, f11
-; CHECK-P9-NEXT:    mfvsrwz r28, f12
-; CHECK-P9-NEXT:    mfvsrwz r27, f13
-; CHECK-P9-NEXT:    mfvsrwz r26, v2
-; CHECK-P9-NEXT:    mfvsrwz r25, v3
-; CHECK-P9-NEXT:    mtvsrd f0, r4
-; CHECK-P9-NEXT:    mtvsrd f1, r5
-; CHECK-P9-NEXT:    mtvsrd f2, r6
-; CHECK-P9-NEXT:    mtvsrd f3, r7
-; CHECK-P9-NEXT:    mtvsrd f8, r12
-; CHECK-P9-NEXT:    mtvsrd f9, r0
-; CHECK-P9-NEXT:    mtvsrd f10, r30
-; CHECK-P9-NEXT:    mtvsrd f11, r29
-; CHECK-P9-NEXT:    mtvsrd f4, r8
-; CHECK-P9-NEXT:    mtvsrd f5, r9
-; CHECK-P9-NEXT:    mtvsrd f6, r10
-; CHECK-P9-NEXT:    mtvsrd f7, r11
-; CHECK-P9-NEXT:    mtvsrd f12, r28
-; CHECK-P9-NEXT:    mtvsrd f13, r27
-; CHECK-P9-NEXT:    mtvsrd v2, r26
-; CHECK-P9-NEXT:    mtvsrd v3, r25
-; CHECK-P9-NEXT:    xxswapd v4, vs0
-; CHECK-P9-NEXT:    xxswapd v5, vs1
-; CHECK-P9-NEXT:    xxswapd v0, vs2
-; CHECK-P9-NEXT:    xxswapd v1, vs3
-; CHECK-P9-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xxswapd v6, vs4
-; CHECK-P9-NEXT:    xxswapd v7, vs5
-; CHECK-P9-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xxswapd v8, vs6
-; CHECK-P9-NEXT:    xxswapd v9, vs7
-; CHECK-P9-NEXT:    xxswapd v10, vs8
-; CHECK-P9-NEXT:    xxswapd v11, vs12
-; CHECK-P9-NEXT:    xxswapd v12, vs9
-; CHECK-P9-NEXT:    xxswapd v13, vs13
-; CHECK-P9-NEXT:    xxswapd v14, vs10
-; CHECK-P9-NEXT:    xxswapd v2, v2
-; CHECK-P9-NEXT:    xxswapd v15, vs11
-; CHECK-P9-NEXT:    xxswapd v3, v3
-; CHECK-P9-NEXT:    vmrglh v4, v4, v6
-; CHECK-P9-NEXT:    vmrglh v5, v5, v7
-; CHECK-P9-NEXT:    vmrglh v0, v0, v8
-; CHECK-P9-NEXT:    vmrglh v1, v1, v9
-; CHECK-P9-NEXT:    vmrglh v6, v10, v11
-; CHECK-P9-NEXT:    vmrglh v7, v12, v13
-; CHECK-P9-NEXT:    vmrglh v2, v14, v2
-; CHECK-P9-NEXT:    vmrglh v3, v15, v3
-; CHECK-P9-NEXT:    vmrglw v4, v5, v4
-; CHECK-P9-NEXT:    vmrglw v5, v1, v0
-; CHECK-P9-NEXT:    vmrglw v0, v7, v6
+; CHECK-P9-NEXT:    mfvsrwz r4, f3
+; CHECK-P9-NEXT:    mtvsrd f3, r4
+; CHECK-P9-NEXT:    mfvsrwz r4, f2
 ; CHECK-P9-NEXT:    vmrglw v2, v3, v2
-; CHECK-P9-NEXT:    xxmrgld vs0, v5, v4
-; CHECK-P9-NEXT:    xxmrgld vs1, v2, v0
-; CHECK-P9-NEXT:    stxv vs0, 0(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    vmrglw v3, v5, v4
+; CHECK-P9-NEXT:    xxmrgld vs4, v3, v2
+; CHECK-P9-NEXT:    xxswapd v2, vs3
+; CHECK-P9-NEXT:    vmrglh v0, v0, v1
+; CHECK-P9-NEXT:    mtvsrd f2, r4
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f1
+; CHECK-P9-NEXT:    xxswapd vs1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r4, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r4
+; CHECK-P9-NEXT:    mfvsrwz r4, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r4
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f0
+; CHECK-P9-NEXT:    xxswapd vs0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r4, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r4
+; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    vmrglh v2, v2, v3
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    vmrglh v3, v3, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    vmrglw v2, v2, v0
+; CHECK-P9-NEXT:    mtvsrd f0, r4
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    vmrglh v4, v4, v5
+; CHECK-P9-NEXT:    vmrglw v3, v4, v3
+; CHECK-P9-NEXT:    xxmrgld vs0, v3, v2
+; CHECK-P9-NEXT:    stxv vs0, 16(r3)
+; CHECK-P9-NEXT:    stxv vs4, 0(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs2, 0(r4)
-; CHECK-BE-NEXT:    lxv vs4, 16(r4)
-; CHECK-BE-NEXT:    lxv vs5, 32(r4)
-; CHECK-BE-NEXT:    lxv vs6, 48(r4)
-; CHECK-BE-NEXT:    lxv vs0, 64(r4)
-; CHECK-BE-NEXT:    lxv vs1, 80(r4)
-; CHECK-BE-NEXT:    lxv vs3, 96(r4)
-; CHECK-BE-NEXT:    lxv vs7, 112(r4)
-; CHECK-BE-NEXT:    std r25, -56(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    xxswapd vs8, vs6
-; CHECK-BE-NEXT:    xxswapd vs9, vs5
-; CHECK-BE-NEXT:    xxswapd vs10, vs4
-; CHECK-BE-NEXT:    xxswapd vs11, vs2
-; CHECK-BE-NEXT:    xxswapd vs12, vs7
-; CHECK-BE-NEXT:    xxswapd vs13, vs3
-; CHECK-BE-NEXT:    xxswapd v2, vs1
-; CHECK-BE-NEXT:    xxswapd v3, vs0
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
+; CHECK-BE-NEXT:    lxv vs4, 48(r4)
+; CHECK-BE-NEXT:    xscvdpsxws f5, f4
+; CHECK-BE-NEXT:    xxswapd vs4, vs4
+; CHECK-BE-NEXT:    lxv vs3, 32(r4)
+; CHECK-BE-NEXT:    xscvdpsxws f6, f3
+; CHECK-BE-NEXT:    xxswapd vs3, vs3
 ; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f8, f8
-; CHECK-BE-NEXT:    xscvdpsxws f9, f9
-; CHECK-BE-NEXT:    xscvdpsxws f10, f10
-; CHECK-BE-NEXT:    xscvdpsxws f11, f11
-; CHECK-BE-NEXT:    xscvdpsxws f12, f12
-; CHECK-BE-NEXT:    xscvdpsxws f13, f13
-; CHECK-BE-NEXT:    xscvdpsxws v2, v2
-; CHECK-BE-NEXT:    xscvdpsxws v3, v3
-; CHECK-BE-NEXT:    mfvsrwz r4, f6
 ; CHECK-BE-NEXT:    mfvsrwz r5, f5
-; CHECK-BE-NEXT:    mfvsrwz r6, f4
-; CHECK-BE-NEXT:    mfvsrwz r7, f2
-; CHECK-BE-NEXT:    mfvsrwz r12, f7
-; CHECK-BE-NEXT:    mfvsrwz r0, f3
-; CHECK-BE-NEXT:    mfvsrwz r30, f1
-; CHECK-BE-NEXT:    mfvsrwz r29, f0
-; CHECK-BE-NEXT:    mfvsrwz r8, f8
-; CHECK-BE-NEXT:    mfvsrwz r9, f9
-; CHECK-BE-NEXT:    mfvsrwz r10, f10
-; CHECK-BE-NEXT:    mfvsrwz r11, f11
-; CHECK-BE-NEXT:    mfvsrwz r28, f12
-; CHECK-BE-NEXT:    mfvsrwz r27, f13
-; CHECK-BE-NEXT:    mfvsrwz r26, v2
-; CHECK-BE-NEXT:    mfvsrwz r25, v3
-; CHECK-BE-NEXT:    sldi r4, r4, 48
 ; CHECK-BE-NEXT:    sldi r5, r5, 48
-; CHECK-BE-NEXT:    sldi r6, r6, 48
-; CHECK-BE-NEXT:    sldi r7, r7, 48
-; CHECK-BE-NEXT:    sldi r12, r12, 48
-; CHECK-BE-NEXT:    sldi r0, r0, 48
-; CHECK-BE-NEXT:    sldi r30, r30, 48
-; CHECK-BE-NEXT:    sldi r29, r29, 48
-; CHECK-BE-NEXT:    sldi r8, r8, 48
-; CHECK-BE-NEXT:    sldi r9, r9, 48
-; CHECK-BE-NEXT:    sldi r10, r10, 48
-; CHECK-BE-NEXT:    sldi r11, r11, 48
-; CHECK-BE-NEXT:    sldi r28, r28, 48
-; CHECK-BE-NEXT:    sldi r27, r27, 48
-; CHECK-BE-NEXT:    sldi r26, r26, 48
-; CHECK-BE-NEXT:    sldi r25, r25, 48
-; CHECK-BE-NEXT:    mtvsrd v2, r4
+; CHECK-BE-NEXT:    lxv vs2, 16(r4)
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    xscvdpsxws f7, f2
+; CHECK-BE-NEXT:    xxswapd vs2, vs2
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    mtvsrd v2, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f4
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    lxv vs1, 0(r4)
+; CHECK-BE-NEXT:    xscvdpsxws f4, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
 ; CHECK-BE-NEXT:    mtvsrd v3, r5
-; CHECK-BE-NEXT:    mtvsrd v4, r6
-; CHECK-BE-NEXT:    mtvsrd v5, r7
-; CHECK-BE-NEXT:    mtvsrd v8, r12
-; CHECK-BE-NEXT:    mtvsrd v10, r0
-; CHECK-BE-NEXT:    mtvsrd v12, r30
-; CHECK-BE-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v0, r8
-; CHECK-BE-NEXT:    mtvsrd v1, r9
-; CHECK-BE-NEXT:    mtvsrd v6, r10
-; CHECK-BE-NEXT:    mtvsrd v7, r11
-; CHECK-BE-NEXT:    mtvsrd v9, r28
-; CHECK-BE-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v11, r27
-; CHECK-BE-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v13, r26
-; CHECK-BE-NEXT:    mtvsrd v14, r29
-; CHECK-BE-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v15, r25
-; CHECK-BE-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    vmrghh v2, v2, v0
-; CHECK-BE-NEXT:    vmrghh v3, v3, v1
-; CHECK-BE-NEXT:    vmrghh v4, v4, v6
-; CHECK-BE-NEXT:    vmrghh v5, v5, v7
-; CHECK-BE-NEXT:    vmrghh v0, v8, v9
-; CHECK-BE-NEXT:    vmrghh v1, v10, v11
-; CHECK-BE-NEXT:    vmrghh v6, v12, v13
-; CHECK-BE-NEXT:    vmrghh v7, v14, v15
+; CHECK-BE-NEXT:    mfvsrwz r5, f6
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    lxv vs0, 112(r4)
+; CHECK-BE-NEXT:    vmrghh v2, v2, v3
+; CHECK-BE-NEXT:    mtvsrd v3, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f0
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v4, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f7
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    vmrghh v3, v3, v4
+; CHECK-BE-NEXT:    mtvsrd v4, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f4
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    mtvsrd v5, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f3
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    mtvsrd v0, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f2
+; CHECK-BE-NEXT:    lxv vs2, 96(r4)
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    mtvsrd v1, r5
+; CHECK-BE-NEXT:    mfvsrwz r5, f1
+; CHECK-BE-NEXT:    lxv vs1, 80(r4)
+; CHECK-BE-NEXT:    xscvdpsxws f3, f2
+; CHECK-BE-NEXT:    xxswapd vs2, vs2
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    vmrghh v4, v4, v1
+; CHECK-BE-NEXT:    mtvsrd v1, r5
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    vmrghh v5, v5, v1
+; CHECK-BE-NEXT:    mfvsrwz r5, f0
+; CHECK-BE-NEXT:    lxv vs0, 64(r4)
+; CHECK-BE-NEXT:    mfvsrwz r4, f3
+; CHECK-BE-NEXT:    sldi r4, r4, 48
 ; CHECK-BE-NEXT:    vmrghw v3, v5, v4
-; CHECK-BE-NEXT:    vmrghw v4, v1, v0
-; CHECK-BE-NEXT:    vmrghw v5, v7, v6
+; CHECK-BE-NEXT:    xxmrghd vs3, v3, v2
+; CHECK-BE-NEXT:    mtvsrd v2, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f2
+; CHECK-BE-NEXT:    xscvdpsxws f2, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    mtvsrd v3, r4
+; CHECK-BE-NEXT:    vmrghh v2, v2, v3
+; CHECK-BE-NEXT:    mfvsrwz r4, f2
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v3, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f0
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v4, r4
+; CHECK-BE-NEXT:    vmrghh v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r4, f1
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v4, r4
+; CHECK-BE-NEXT:    mfvsrwz r4, f0
+; CHECK-BE-NEXT:    sldi r5, r5, 48
+; CHECK-BE-NEXT:    mtvsrd v1, r5
+; CHECK-BE-NEXT:    vmrghh v0, v0, v1
+; CHECK-BE-NEXT:    vmrghw v2, v2, v0
+; CHECK-BE-NEXT:    stxv vs3, 0(r3)
+; CHECK-BE-NEXT:    sldi r4, r4, 48
+; CHECK-BE-NEXT:    mtvsrd v5, r4
+; CHECK-BE-NEXT:    vmrghh v4, v4, v5
+; CHECK-BE-NEXT:    vmrghw v3, v4, v3
 ; CHECK-BE-NEXT:    xxmrghd vs0, v3, v2
-; CHECK-BE-NEXT:    xxmrghd vs1, v5, v4
-; CHECK-BE-NEXT:    stxv vs0, 0(r3)
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x double>, <16 x double>* %0, align 128
diff --git a/test/CodeGen/PowerPC/vec_conv_fp64_to_i32_elts.ll b/test/CodeGen/PowerPC/vec_conv_fp64_to_i32_elts.ll
index 5f35f80..f3431f8 100644
--- a/test/CodeGen/PowerPC/vec_conv_fp64_to_i32_elts.ll
+++ b/test/CodeGen/PowerPC/vec_conv_fp64_to_i32_elts.ll
@@ -28,27 +28,27 @@
 ;
 ; CHECK-P9-LABEL: test2elt:
 ; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xscvdpuxws f0, v2
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    xxswapd vs0, v2
-; CHECK-P9-NEXT:    xscvdpuxws f1, v2
+; CHECK-P9-NEXT:    mtvsrws v3, r3
 ; CHECK-P9-NEXT:    xscvdpuxws f0, f0
-; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrws v2, r3
-; CHECK-P9-NEXT:    mfvsrwz r4, f0
-; CHECK-P9-NEXT:    mtvsrws v3, r4
-; CHECK-P9-NEXT:    vmrglw v2, v2, v3
+; CHECK-P9-NEXT:    vmrglw v2, v3, v2
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test2elt:
 ; CHECK-BE:       # %bb.0: # %entry
+; CHECK-BE-NEXT:    xscvdpuxws f0, v2
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
 ; CHECK-BE-NEXT:    xxswapd vs0, v2
-; CHECK-BE-NEXT:    xscvdpuxws f1, v2
+; CHECK-BE-NEXT:    mtvsrws v3, r3
 ; CHECK-BE-NEXT:    xscvdpuxws f0, f0
-; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
 ; CHECK-BE-NEXT:    mtvsrws v2, r3
-; CHECK-BE-NEXT:    mfvsrwz r4, f0
-; CHECK-BE-NEXT:    mtvsrws v3, r4
-; CHECK-BE-NEXT:    vmrghw v2, v2, v3
+; CHECK-BE-NEXT:    vmrghw v2, v3, v2
 ; CHECK-BE-NEXT:    mfvsrd r3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -129,42 +129,42 @@
 ;
 ; CHECK-P9-LABEL: test8elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 32(r4)
-; CHECK-P9-NEXT:    lxv vs1, 48(r4)
 ; CHECK-P9-NEXT:    lxv vs2, 0(r4)
 ; CHECK-P9-NEXT:    lxv vs3, 16(r4)
 ; CHECK-P9-NEXT:    xxmrgld vs4, vs3, vs2
 ; CHECK-P9-NEXT:    xxmrghd vs2, vs3, vs2
-; CHECK-P9-NEXT:    xxmrgld vs3, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
+; CHECK-P9-NEXT:    lxv vs0, 32(r4)
+; CHECK-P9-NEXT:    lxv vs1, 48(r4)
 ; CHECK-P9-NEXT:    xvcvdpuxws v2, vs4
 ; CHECK-P9-NEXT:    xvcvdpuxws v3, vs2
-; CHECK-P9-NEXT:    xvcvdpuxws v4, vs3
-; CHECK-P9-NEXT:    xvcvdpuxws v5, vs0
+; CHECK-P9-NEXT:    xxmrgld vs2, vs1, vs0
+; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
+; CHECK-P9-NEXT:    xvcvdpuxws v4, vs0
 ; CHECK-P9-NEXT:    vmrgew v2, v3, v2
-; CHECK-P9-NEXT:    vmrgew v3, v5, v4
-; CHECK-P9-NEXT:    stxv v3, 16(r3)
+; CHECK-P9-NEXT:    xvcvdpuxws v3, vs2
 ; CHECK-P9-NEXT:    stxv v2, 0(r3)
+; CHECK-P9-NEXT:    vmrgew v3, v4, v3
+; CHECK-P9-NEXT:    stxv v3, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 48(r4)
-; CHECK-BE-NEXT:    lxv vs1, 32(r4)
 ; CHECK-BE-NEXT:    lxv vs2, 16(r4)
 ; CHECK-BE-NEXT:    lxv vs3, 0(r4)
 ; CHECK-BE-NEXT:    xxmrgld vs4, vs3, vs2
 ; CHECK-BE-NEXT:    xxmrghd vs2, vs3, vs2
-; CHECK-BE-NEXT:    xxmrgld vs3, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
+; CHECK-BE-NEXT:    lxv vs0, 48(r4)
+; CHECK-BE-NEXT:    lxv vs1, 32(r4)
 ; CHECK-BE-NEXT:    xvcvdpuxws v2, vs4
 ; CHECK-BE-NEXT:    xvcvdpuxws v3, vs2
-; CHECK-BE-NEXT:    xvcvdpuxws v4, vs3
-; CHECK-BE-NEXT:    xvcvdpuxws v5, vs0
+; CHECK-BE-NEXT:    xxmrgld vs2, vs1, vs0
+; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
+; CHECK-BE-NEXT:    xvcvdpuxws v4, vs0
 ; CHECK-BE-NEXT:    vmrgew v2, v3, v2
-; CHECK-BE-NEXT:    vmrgew v3, v5, v4
-; CHECK-BE-NEXT:    stxv v3, 16(r3)
+; CHECK-BE-NEXT:    xvcvdpuxws v3, vs2
 ; CHECK-BE-NEXT:    stxv v2, 0(r3)
+; CHECK-BE-NEXT:    vmrgew v3, v4, v3
+; CHECK-BE-NEXT:    stxv v3, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <8 x double>, <8 x double>* %0, align 64
@@ -227,74 +227,74 @@
 ;
 ; CHECK-P9-LABEL: test16elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 32(r4)
-; CHECK-P9-NEXT:    lxv vs1, 48(r4)
-; CHECK-P9-NEXT:    lxv vs2, 0(r4)
-; CHECK-P9-NEXT:    lxv vs3, 16(r4)
-; CHECK-P9-NEXT:    lxv vs4, 96(r4)
-; CHECK-P9-NEXT:    lxv vs5, 112(r4)
-; CHECK-P9-NEXT:    lxv vs6, 64(r4)
-; CHECK-P9-NEXT:    lxv vs7, 80(r4)
-; CHECK-P9-NEXT:    xxmrgld vs8, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs3, vs2
-; CHECK-P9-NEXT:    xxmrgld vs3, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrgld vs1, vs7, vs6
+; CHECK-P9-NEXT:    lxv vs6, 0(r4)
+; CHECK-P9-NEXT:    lxv vs7, 16(r4)
+; CHECK-P9-NEXT:    xxmrgld vs8, vs7, vs6
 ; CHECK-P9-NEXT:    xxmrghd vs6, vs7, vs6
+; CHECK-P9-NEXT:    lxv vs4, 32(r4)
+; CHECK-P9-NEXT:    lxv vs5, 48(r4)
 ; CHECK-P9-NEXT:    xxmrgld vs7, vs5, vs4
 ; CHECK-P9-NEXT:    xxmrghd vs4, vs5, vs4
 ; CHECK-P9-NEXT:    xvcvdpuxws v2, vs8
-; CHECK-P9-NEXT:    xvcvdpuxws v3, vs2
-; CHECK-P9-NEXT:    xvcvdpuxws v4, vs3
-; CHECK-P9-NEXT:    xvcvdpuxws v5, vs0
-; CHECK-P9-NEXT:    xvcvdpuxws v0, vs1
-; CHECK-P9-NEXT:    xvcvdpuxws v1, vs6
-; CHECK-P9-NEXT:    xvcvdpuxws v6, vs7
-; CHECK-P9-NEXT:    xvcvdpuxws v7, vs4
+; CHECK-P9-NEXT:    xvcvdpuxws v3, vs6
+; CHECK-P9-NEXT:    lxv vs2, 64(r4)
+; CHECK-P9-NEXT:    lxv vs3, 80(r4)
+; CHECK-P9-NEXT:    xvcvdpuxws v4, vs7
 ; CHECK-P9-NEXT:    vmrgew v2, v3, v2
-; CHECK-P9-NEXT:    vmrgew v3, v5, v4
-; CHECK-P9-NEXT:    vmrgew v4, v1, v0
-; CHECK-P9-NEXT:    vmrgew v5, v7, v6
-; CHECK-P9-NEXT:    stxv v3, 16(r3)
+; CHECK-P9-NEXT:    xvcvdpuxws v3, vs4
+; CHECK-P9-NEXT:    xxmrgld vs4, vs3, vs2
+; CHECK-P9-NEXT:    xxmrghd vs2, vs3, vs2
+; CHECK-P9-NEXT:    lxv vs0, 96(r4)
+; CHECK-P9-NEXT:    lxv vs1, 112(r4)
 ; CHECK-P9-NEXT:    stxv v2, 0(r3)
-; CHECK-P9-NEXT:    stxv v5, 48(r3)
+; CHECK-P9-NEXT:    xvcvdpuxws v5, vs2
+; CHECK-P9-NEXT:    xxmrgld vs2, vs1, vs0
+; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
+; CHECK-P9-NEXT:    xvcvdpuxws v0, vs0
+; CHECK-P9-NEXT:    vmrgew v3, v3, v4
+; CHECK-P9-NEXT:    xvcvdpuxws v4, vs4
+; CHECK-P9-NEXT:    stxv v3, 16(r3)
+; CHECK-P9-NEXT:    vmrgew v4, v5, v4
 ; CHECK-P9-NEXT:    stxv v4, 32(r3)
+; CHECK-P9-NEXT:    xvcvdpuxws v5, vs2
+; CHECK-P9-NEXT:    vmrgew v5, v0, v5
+; CHECK-P9-NEXT:    stxv v5, 48(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 48(r4)
-; CHECK-BE-NEXT:    lxv vs1, 32(r4)
-; CHECK-BE-NEXT:    lxv vs2, 16(r4)
-; CHECK-BE-NEXT:    lxv vs3, 0(r4)
-; CHECK-BE-NEXT:    lxv vs4, 112(r4)
-; CHECK-BE-NEXT:    lxv vs5, 96(r4)
-; CHECK-BE-NEXT:    lxv vs6, 80(r4)
-; CHECK-BE-NEXT:    lxv vs7, 64(r4)
-; CHECK-BE-NEXT:    xxmrgld vs8, vs3, vs2
-; CHECK-BE-NEXT:    xxmrghd vs2, vs3, vs2
-; CHECK-BE-NEXT:    xxmrgld vs3, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrgld vs1, vs7, vs6
+; CHECK-BE-NEXT:    lxv vs6, 16(r4)
+; CHECK-BE-NEXT:    lxv vs7, 0(r4)
+; CHECK-BE-NEXT:    xxmrgld vs8, vs7, vs6
 ; CHECK-BE-NEXT:    xxmrghd vs6, vs7, vs6
+; CHECK-BE-NEXT:    lxv vs4, 48(r4)
+; CHECK-BE-NEXT:    lxv vs5, 32(r4)
 ; CHECK-BE-NEXT:    xxmrgld vs7, vs5, vs4
 ; CHECK-BE-NEXT:    xxmrghd vs4, vs5, vs4
 ; CHECK-BE-NEXT:    xvcvdpuxws v2, vs8
-; CHECK-BE-NEXT:    xvcvdpuxws v3, vs2
-; CHECK-BE-NEXT:    xvcvdpuxws v4, vs3
-; CHECK-BE-NEXT:    xvcvdpuxws v5, vs0
-; CHECK-BE-NEXT:    xvcvdpuxws v0, vs1
-; CHECK-BE-NEXT:    xvcvdpuxws v1, vs6
-; CHECK-BE-NEXT:    xvcvdpuxws v6, vs7
-; CHECK-BE-NEXT:    xvcvdpuxws v7, vs4
+; CHECK-BE-NEXT:    xvcvdpuxws v3, vs6
+; CHECK-BE-NEXT:    lxv vs2, 80(r4)
+; CHECK-BE-NEXT:    lxv vs3, 64(r4)
+; CHECK-BE-NEXT:    xvcvdpuxws v4, vs7
 ; CHECK-BE-NEXT:    vmrgew v2, v3, v2
-; CHECK-BE-NEXT:    vmrgew v3, v5, v4
-; CHECK-BE-NEXT:    vmrgew v4, v1, v0
-; CHECK-BE-NEXT:    vmrgew v5, v7, v6
-; CHECK-BE-NEXT:    stxv v3, 16(r3)
+; CHECK-BE-NEXT:    xvcvdpuxws v3, vs4
+; CHECK-BE-NEXT:    xxmrgld vs4, vs3, vs2
+; CHECK-BE-NEXT:    xxmrghd vs2, vs3, vs2
+; CHECK-BE-NEXT:    lxv vs0, 112(r4)
+; CHECK-BE-NEXT:    lxv vs1, 96(r4)
 ; CHECK-BE-NEXT:    stxv v2, 0(r3)
-; CHECK-BE-NEXT:    stxv v5, 48(r3)
+; CHECK-BE-NEXT:    xvcvdpuxws v5, vs2
+; CHECK-BE-NEXT:    xxmrgld vs2, vs1, vs0
+; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
+; CHECK-BE-NEXT:    xvcvdpuxws v0, vs0
+; CHECK-BE-NEXT:    vmrgew v3, v3, v4
+; CHECK-BE-NEXT:    xvcvdpuxws v4, vs4
+; CHECK-BE-NEXT:    stxv v3, 16(r3)
+; CHECK-BE-NEXT:    vmrgew v4, v5, v4
 ; CHECK-BE-NEXT:    stxv v4, 32(r3)
+; CHECK-BE-NEXT:    xvcvdpuxws v5, vs2
+; CHECK-BE-NEXT:    vmrgew v5, v0, v5
+; CHECK-BE-NEXT:    stxv v5, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x double>, <16 x double>* %0, align 128
@@ -322,27 +322,27 @@
 ;
 ; CHECK-P9-LABEL: test2elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xscvdpsxws f0, v2
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    xxswapd vs0, v2
-; CHECK-P9-NEXT:    xscvdpsxws f1, v2
+; CHECK-P9-NEXT:    mtvsrws v3, r3
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrws v2, r3
-; CHECK-P9-NEXT:    mfvsrwz r4, f0
-; CHECK-P9-NEXT:    mtvsrws v3, r4
-; CHECK-P9-NEXT:    vmrglw v2, v2, v3
+; CHECK-P9-NEXT:    vmrglw v2, v3, v2
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test2elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
+; CHECK-BE-NEXT:    xscvdpsxws f0, v2
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
 ; CHECK-BE-NEXT:    xxswapd vs0, v2
-; CHECK-BE-NEXT:    xscvdpsxws f1, v2
+; CHECK-BE-NEXT:    mtvsrws v3, r3
 ; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
 ; CHECK-BE-NEXT:    mtvsrws v2, r3
-; CHECK-BE-NEXT:    mfvsrwz r4, f0
-; CHECK-BE-NEXT:    mtvsrws v3, r4
-; CHECK-BE-NEXT:    vmrghw v2, v2, v3
+; CHECK-BE-NEXT:    vmrghw v2, v3, v2
 ; CHECK-BE-NEXT:    mfvsrd r3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -423,42 +423,42 @@
 ;
 ; CHECK-P9-LABEL: test8elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 32(r4)
-; CHECK-P9-NEXT:    lxv vs1, 48(r4)
 ; CHECK-P9-NEXT:    lxv vs2, 0(r4)
 ; CHECK-P9-NEXT:    lxv vs3, 16(r4)
 ; CHECK-P9-NEXT:    xxmrgld vs4, vs3, vs2
 ; CHECK-P9-NEXT:    xxmrghd vs2, vs3, vs2
-; CHECK-P9-NEXT:    xxmrgld vs3, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
+; CHECK-P9-NEXT:    lxv vs0, 32(r4)
+; CHECK-P9-NEXT:    lxv vs1, 48(r4)
 ; CHECK-P9-NEXT:    xvcvdpsxws v2, vs4
 ; CHECK-P9-NEXT:    xvcvdpsxws v3, vs2
-; CHECK-P9-NEXT:    xvcvdpsxws v4, vs3
-; CHECK-P9-NEXT:    xvcvdpsxws v5, vs0
+; CHECK-P9-NEXT:    xxmrgld vs2, vs1, vs0
+; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
+; CHECK-P9-NEXT:    xvcvdpsxws v4, vs0
 ; CHECK-P9-NEXT:    vmrgew v2, v3, v2
-; CHECK-P9-NEXT:    vmrgew v3, v5, v4
-; CHECK-P9-NEXT:    stxv v3, 16(r3)
+; CHECK-P9-NEXT:    xvcvdpsxws v3, vs2
 ; CHECK-P9-NEXT:    stxv v2, 0(r3)
+; CHECK-P9-NEXT:    vmrgew v3, v4, v3
+; CHECK-P9-NEXT:    stxv v3, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 48(r4)
-; CHECK-BE-NEXT:    lxv vs1, 32(r4)
 ; CHECK-BE-NEXT:    lxv vs2, 16(r4)
 ; CHECK-BE-NEXT:    lxv vs3, 0(r4)
 ; CHECK-BE-NEXT:    xxmrgld vs4, vs3, vs2
 ; CHECK-BE-NEXT:    xxmrghd vs2, vs3, vs2
-; CHECK-BE-NEXT:    xxmrgld vs3, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
+; CHECK-BE-NEXT:    lxv vs0, 48(r4)
+; CHECK-BE-NEXT:    lxv vs1, 32(r4)
 ; CHECK-BE-NEXT:    xvcvdpsxws v2, vs4
 ; CHECK-BE-NEXT:    xvcvdpsxws v3, vs2
-; CHECK-BE-NEXT:    xvcvdpsxws v4, vs3
-; CHECK-BE-NEXT:    xvcvdpsxws v5, vs0
+; CHECK-BE-NEXT:    xxmrgld vs2, vs1, vs0
+; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
+; CHECK-BE-NEXT:    xvcvdpsxws v4, vs0
 ; CHECK-BE-NEXT:    vmrgew v2, v3, v2
-; CHECK-BE-NEXT:    vmrgew v3, v5, v4
-; CHECK-BE-NEXT:    stxv v3, 16(r3)
+; CHECK-BE-NEXT:    xvcvdpsxws v3, vs2
 ; CHECK-BE-NEXT:    stxv v2, 0(r3)
+; CHECK-BE-NEXT:    vmrgew v3, v4, v3
+; CHECK-BE-NEXT:    stxv v3, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <8 x double>, <8 x double>* %0, align 64
@@ -521,74 +521,74 @@
 ;
 ; CHECK-P9-LABEL: test16elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 32(r4)
-; CHECK-P9-NEXT:    lxv vs1, 48(r4)
-; CHECK-P9-NEXT:    lxv vs2, 0(r4)
-; CHECK-P9-NEXT:    lxv vs3, 16(r4)
-; CHECK-P9-NEXT:    lxv vs4, 96(r4)
-; CHECK-P9-NEXT:    lxv vs5, 112(r4)
-; CHECK-P9-NEXT:    lxv vs6, 64(r4)
-; CHECK-P9-NEXT:    lxv vs7, 80(r4)
-; CHECK-P9-NEXT:    xxmrgld vs8, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs3, vs2
-; CHECK-P9-NEXT:    xxmrgld vs3, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrgld vs1, vs7, vs6
+; CHECK-P9-NEXT:    lxv vs6, 0(r4)
+; CHECK-P9-NEXT:    lxv vs7, 16(r4)
+; CHECK-P9-NEXT:    xxmrgld vs8, vs7, vs6
 ; CHECK-P9-NEXT:    xxmrghd vs6, vs7, vs6
+; CHECK-P9-NEXT:    lxv vs4, 32(r4)
+; CHECK-P9-NEXT:    lxv vs5, 48(r4)
 ; CHECK-P9-NEXT:    xxmrgld vs7, vs5, vs4
 ; CHECK-P9-NEXT:    xxmrghd vs4, vs5, vs4
 ; CHECK-P9-NEXT:    xvcvdpsxws v2, vs8
-; CHECK-P9-NEXT:    xvcvdpsxws v3, vs2
-; CHECK-P9-NEXT:    xvcvdpsxws v4, vs3
-; CHECK-P9-NEXT:    xvcvdpsxws v5, vs0
-; CHECK-P9-NEXT:    xvcvdpsxws v0, vs1
-; CHECK-P9-NEXT:    xvcvdpsxws v1, vs6
-; CHECK-P9-NEXT:    xvcvdpsxws v6, vs7
-; CHECK-P9-NEXT:    xvcvdpsxws v7, vs4
+; CHECK-P9-NEXT:    xvcvdpsxws v3, vs6
+; CHECK-P9-NEXT:    lxv vs2, 64(r4)
+; CHECK-P9-NEXT:    lxv vs3, 80(r4)
+; CHECK-P9-NEXT:    xvcvdpsxws v4, vs7
 ; CHECK-P9-NEXT:    vmrgew v2, v3, v2
-; CHECK-P9-NEXT:    vmrgew v3, v5, v4
-; CHECK-P9-NEXT:    vmrgew v4, v1, v0
-; CHECK-P9-NEXT:    vmrgew v5, v7, v6
-; CHECK-P9-NEXT:    stxv v3, 16(r3)
+; CHECK-P9-NEXT:    xvcvdpsxws v3, vs4
+; CHECK-P9-NEXT:    xxmrgld vs4, vs3, vs2
+; CHECK-P9-NEXT:    xxmrghd vs2, vs3, vs2
+; CHECK-P9-NEXT:    lxv vs0, 96(r4)
+; CHECK-P9-NEXT:    lxv vs1, 112(r4)
 ; CHECK-P9-NEXT:    stxv v2, 0(r3)
-; CHECK-P9-NEXT:    stxv v5, 48(r3)
+; CHECK-P9-NEXT:    xvcvdpsxws v5, vs2
+; CHECK-P9-NEXT:    xxmrgld vs2, vs1, vs0
+; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
+; CHECK-P9-NEXT:    xvcvdpsxws v0, vs0
+; CHECK-P9-NEXT:    vmrgew v3, v3, v4
+; CHECK-P9-NEXT:    xvcvdpsxws v4, vs4
+; CHECK-P9-NEXT:    stxv v3, 16(r3)
+; CHECK-P9-NEXT:    vmrgew v4, v5, v4
 ; CHECK-P9-NEXT:    stxv v4, 32(r3)
+; CHECK-P9-NEXT:    xvcvdpsxws v5, vs2
+; CHECK-P9-NEXT:    vmrgew v5, v0, v5
+; CHECK-P9-NEXT:    stxv v5, 48(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 48(r4)
-; CHECK-BE-NEXT:    lxv vs1, 32(r4)
-; CHECK-BE-NEXT:    lxv vs2, 16(r4)
-; CHECK-BE-NEXT:    lxv vs3, 0(r4)
-; CHECK-BE-NEXT:    lxv vs4, 112(r4)
-; CHECK-BE-NEXT:    lxv vs5, 96(r4)
-; CHECK-BE-NEXT:    lxv vs6, 80(r4)
-; CHECK-BE-NEXT:    lxv vs7, 64(r4)
-; CHECK-BE-NEXT:    xxmrgld vs8, vs3, vs2
-; CHECK-BE-NEXT:    xxmrghd vs2, vs3, vs2
-; CHECK-BE-NEXT:    xxmrgld vs3, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrgld vs1, vs7, vs6
+; CHECK-BE-NEXT:    lxv vs6, 16(r4)
+; CHECK-BE-NEXT:    lxv vs7, 0(r4)
+; CHECK-BE-NEXT:    xxmrgld vs8, vs7, vs6
 ; CHECK-BE-NEXT:    xxmrghd vs6, vs7, vs6
+; CHECK-BE-NEXT:    lxv vs4, 48(r4)
+; CHECK-BE-NEXT:    lxv vs5, 32(r4)
 ; CHECK-BE-NEXT:    xxmrgld vs7, vs5, vs4
 ; CHECK-BE-NEXT:    xxmrghd vs4, vs5, vs4
 ; CHECK-BE-NEXT:    xvcvdpsxws v2, vs8
-; CHECK-BE-NEXT:    xvcvdpsxws v3, vs2
-; CHECK-BE-NEXT:    xvcvdpsxws v4, vs3
-; CHECK-BE-NEXT:    xvcvdpsxws v5, vs0
-; CHECK-BE-NEXT:    xvcvdpsxws v0, vs1
-; CHECK-BE-NEXT:    xvcvdpsxws v1, vs6
-; CHECK-BE-NEXT:    xvcvdpsxws v6, vs7
-; CHECK-BE-NEXT:    xvcvdpsxws v7, vs4
+; CHECK-BE-NEXT:    xvcvdpsxws v3, vs6
+; CHECK-BE-NEXT:    lxv vs2, 80(r4)
+; CHECK-BE-NEXT:    lxv vs3, 64(r4)
+; CHECK-BE-NEXT:    xvcvdpsxws v4, vs7
 ; CHECK-BE-NEXT:    vmrgew v2, v3, v2
-; CHECK-BE-NEXT:    vmrgew v3, v5, v4
-; CHECK-BE-NEXT:    vmrgew v4, v1, v0
-; CHECK-BE-NEXT:    vmrgew v5, v7, v6
-; CHECK-BE-NEXT:    stxv v3, 16(r3)
+; CHECK-BE-NEXT:    xvcvdpsxws v3, vs4
+; CHECK-BE-NEXT:    xxmrgld vs4, vs3, vs2
+; CHECK-BE-NEXT:    xxmrghd vs2, vs3, vs2
+; CHECK-BE-NEXT:    lxv vs0, 112(r4)
+; CHECK-BE-NEXT:    lxv vs1, 96(r4)
 ; CHECK-BE-NEXT:    stxv v2, 0(r3)
-; CHECK-BE-NEXT:    stxv v5, 48(r3)
+; CHECK-BE-NEXT:    xvcvdpsxws v5, vs2
+; CHECK-BE-NEXT:    xxmrgld vs2, vs1, vs0
+; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
+; CHECK-BE-NEXT:    xvcvdpsxws v0, vs0
+; CHECK-BE-NEXT:    vmrgew v3, v3, v4
+; CHECK-BE-NEXT:    xvcvdpsxws v4, vs4
+; CHECK-BE-NEXT:    stxv v3, 16(r3)
+; CHECK-BE-NEXT:    vmrgew v4, v5, v4
 ; CHECK-BE-NEXT:    stxv v4, 32(r3)
+; CHECK-BE-NEXT:    xvcvdpsxws v5, vs2
+; CHECK-BE-NEXT:    vmrgew v5, v0, v5
+; CHECK-BE-NEXT:    stxv v5, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x double>, <16 x double>* %0, align 128
diff --git a/test/CodeGen/PowerPC/vec_conv_fp64_to_i8_elts.ll b/test/CodeGen/PowerPC/vec_conv_fp64_to_i8_elts.ll
index ef7b9c1..5e4751e 100644
--- a/test/CodeGen/PowerPC/vec_conv_fp64_to_i8_elts.ll
+++ b/test/CodeGen/PowerPC/vec_conv_fp64_to_i8_elts.ll
@@ -31,17 +31,17 @@
 ;
 ; CHECK-P9-LABEL: test2elt:
 ; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xscvdpsxws f0, v2
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs0
 ; CHECK-P9-NEXT:    xxswapd vs0, v2
-; CHECK-P9-NEXT:    xscvdpsxws f1, v2
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    mfvsrwz r3, f1
-; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
 ; CHECK-P9-NEXT:    addi r3, r1, -2
-; CHECK-P9-NEXT:    mtvsrd f1, r4
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    vmrglb v2, v2, v3
+; CHECK-P9-NEXT:    vmrglb v2, v3, v2
 ; CHECK-P9-NEXT:    vsldoi v2, v2, v2, 8
 ; CHECK-P9-NEXT:    stxsihx v2, 0, r3
 ; CHECK-P9-NEXT:    lhz r3, -2(r1)
@@ -49,17 +49,17 @@
 ;
 ; CHECK-BE-LABEL: test2elt:
 ; CHECK-BE:       # %bb.0: # %entry
+; CHECK-BE-NEXT:    xscvdpsxws f0, v2
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
 ; CHECK-BE-NEXT:    xxswapd vs0, v2
-; CHECK-BE-NEXT:    xscvdpsxws f1, v2
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    mfvsrwz r3, f1
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    mfvsrwz r4, f0
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
 ; CHECK-BE-NEXT:    addi r3, r1, -2
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    vmrghb v2, v2, v3
+; CHECK-BE-NEXT:    vmrghb v2, v3, v2
 ; CHECK-BE-NEXT:    vsldoi v2, v2, v2, 10
 ; CHECK-BE-NEXT:    stxsihx v2, 0, r3
 ; CHECK-BE-NEXT:    lhz r3, -2(r1)
@@ -103,58 +103,58 @@
 ;
 ; CHECK-P9-LABEL: test4elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r3)
 ; CHECK-P9-NEXT:    lxv vs1, 0(r3)
-; CHECK-P9-NEXT:    xxswapd vs2, vs1
-; CHECK-P9-NEXT:    xxswapd vs3, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f2, f1
+; CHECK-P9-NEXT:    xxswapd vs1, vs1
 ; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
+; CHECK-P9-NEXT:    lxv vs0, 16(r3)
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
 ; CHECK-P9-NEXT:    mfvsrwz r3, f1
-; CHECK-P9-NEXT:    mfvsrwz r5, f0
-; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    mfvsrwz r4, f2
-; CHECK-P9-NEXT:    mfvsrwz r6, f3
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    xxswapd v4, vs2
+; CHECK-P9-NEXT:    xxswapd v2, vs2
+; CHECK-P9-NEXT:    mtvsrd f1, r3
 ; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f1, f0
+; CHECK-P9-NEXT:    xxswapd vs0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
 ; CHECK-P9-NEXT:    vmrglb v2, v2, v3
-; CHECK-P9-NEXT:    vmrglb v3, v4, v5
+; CHECK-P9-NEXT:    xxswapd v3, vs1
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
 ; CHECK-P9-NEXT:    vmrglh v2, v3, v2
+; CHECK-P9-NEXT:    li r3, 0
 ; CHECK-P9-NEXT:    vextuwrx r3, r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
-; CHECK-BE-NEXT:    xxswapd vs2, vs1
-; CHECK-BE-NEXT:    xxswapd vs3, vs0
+; CHECK-BE-NEXT:    xscvdpsxws f2, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
 ; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    mfvsrwz r3, f1
-; CHECK-BE-NEXT:    mfvsrwz r5, f0
+; CHECK-BE-NEXT:    lxv vs0, 0(r3)
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    sldi r5, r5, 56
-; CHECK-BE-NEXT:    mfvsrwz r4, f2
-; CHECK-BE-NEXT:    mfvsrwz r6, f3
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    li r3, 0
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    sldi r6, r6, 56
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f0
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v3, r3
 ; CHECK-BE-NEXT:    vmrghb v2, v2, v3
-; CHECK-BE-NEXT:    vmrghb v3, v4, v5
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    li r3, 0
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
 ; CHECK-BE-NEXT:    vmrghh v2, v3, v2
 ; CHECK-BE-NEXT:    vextuwlx r3, r3, v2
 ; CHECK-BE-NEXT:    blr
@@ -224,104 +224,104 @@
 ;
 ; CHECK-P9-LABEL: test8elt:
 ; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    lxv vs3, 0(r3)
+; CHECK-P9-NEXT:    xscvdpsxws f4, f3
+; CHECK-P9-NEXT:    xxswapd vs3, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f3
 ; CHECK-P9-NEXT:    lxv vs0, 48(r3)
 ; CHECK-P9-NEXT:    lxv vs1, 32(r3)
 ; CHECK-P9-NEXT:    lxv vs2, 16(r3)
-; CHECK-P9-NEXT:    lxv vs3, 0(r3)
-; CHECK-P9-NEXT:    xxswapd vs4, vs3
-; CHECK-P9-NEXT:    xxswapd vs5, vs2
-; CHECK-P9-NEXT:    xxswapd vs6, vs1
-; CHECK-P9-NEXT:    xxswapd vs7, vs0
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
+; CHECK-P9-NEXT:    mfvsrwz r3, f4
+; CHECK-P9-NEXT:    mtvsrd f4, r3
 ; CHECK-P9-NEXT:    mfvsrwz r3, f3
-; CHECK-P9-NEXT:    mfvsrwz r5, f2
-; CHECK-P9-NEXT:    mfvsrwz r7, f1
-; CHECK-P9-NEXT:    mfvsrwz r9, f0
-; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    mfvsrwz r4, f4
-; CHECK-P9-NEXT:    mfvsrwz r6, f5
-; CHECK-P9-NEXT:    mfvsrwz r8, f6
-; CHECK-P9-NEXT:    mfvsrwz r10, f7
-; CHECK-P9-NEXT:    mtvsrd f4, r7
-; CHECK-P9-NEXT:    mtvsrd f6, r9
-; CHECK-P9-NEXT:    xxswapd v2, vs0
+; CHECK-P9-NEXT:    xxswapd v2, vs4
+; CHECK-P9-NEXT:    mtvsrd f3, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f2
+; CHECK-P9-NEXT:    xxswapd vs2, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    mfvsrwz r3, f3
+; CHECK-P9-NEXT:    mtvsrd f3, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
 ; CHECK-P9-NEXT:    xxswapd v4, vs2
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    mtvsrd f5, r8
-; CHECK-P9-NEXT:    mtvsrd f7, r10
-; CHECK-P9-NEXT:    xxswapd v0, vs4
-; CHECK-P9-NEXT:    xxswapd v6, vs6
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
-; CHECK-P9-NEXT:    xxswapd v1, vs5
-; CHECK-P9-NEXT:    xxswapd v7, vs7
+; CHECK-P9-NEXT:    xscvdpsxws f2, f1
+; CHECK-P9-NEXT:    xxswapd vs1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
 ; CHECK-P9-NEXT:    vmrglb v2, v2, v3
-; CHECK-P9-NEXT:    vmrglb v3, v4, v5
-; CHECK-P9-NEXT:    vmrglb v4, v0, v1
-; CHECK-P9-NEXT:    vmrglb v5, v6, v7
+; CHECK-P9-NEXT:    xxswapd v3, vs3
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
 ; CHECK-P9-NEXT:    vmrglh v2, v3, v2
-; CHECK-P9-NEXT:    vmrglh v3, v5, v4
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f0
+; CHECK-P9-NEXT:    xxswapd vs0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    vmrglb v4, v4, v5
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
 ; CHECK-P9-NEXT:    vmrglw v2, v3, v2
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt:
 ; CHECK-BE:       # %bb.0: # %entry
+; CHECK-BE-NEXT:    lxv vs3, 48(r3)
+; CHECK-BE-NEXT:    xscvdpsxws f4, f3
+; CHECK-BE-NEXT:    xxswapd vs3, vs3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    lxv vs2, 32(r3)
 ; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
-; CHECK-BE-NEXT:    lxv vs2, 32(r3)
-; CHECK-BE-NEXT:    lxv vs3, 48(r3)
-; CHECK-BE-NEXT:    xxswapd vs4, vs3
-; CHECK-BE-NEXT:    xxswapd vs5, vs2
-; CHECK-BE-NEXT:    xxswapd vs6, vs1
-; CHECK-BE-NEXT:    xxswapd vs7, vs0
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    mfvsrwz r3, f3
-; CHECK-BE-NEXT:    mfvsrwz r5, f2
-; CHECK-BE-NEXT:    mfvsrwz r7, f1
-; CHECK-BE-NEXT:    mfvsrwz r9, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f4
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    sldi r5, r5, 56
-; CHECK-BE-NEXT:    sldi r7, r7, 56
-; CHECK-BE-NEXT:    sldi r9, r9, 56
-; CHECK-BE-NEXT:    mfvsrwz r4, f4
-; CHECK-BE-NEXT:    mfvsrwz r6, f5
-; CHECK-BE-NEXT:    mfvsrwz r8, f6
-; CHECK-BE-NEXT:    mfvsrwz r10, f7
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    mtvsrd v0, r7
-; CHECK-BE-NEXT:    mtvsrd v6, r9
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    sldi r6, r6, 56
-; CHECK-BE-NEXT:    sldi r8, r8, 56
-; CHECK-BE-NEXT:    sldi r10, r10, 56
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
-; CHECK-BE-NEXT:    mtvsrd v1, r8
-; CHECK-BE-NEXT:    mtvsrd v7, r10
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f2
+; CHECK-BE-NEXT:    xxswapd vs2, vs2
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    mtvsrd v3, r3
 ; CHECK-BE-NEXT:    vmrghb v2, v2, v3
-; CHECK-BE-NEXT:    vmrghb v3, v4, v5
-; CHECK-BE-NEXT:    vmrghb v4, v0, v1
-; CHECK-BE-NEXT:    vmrghb v5, v6, v7
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xscvdpsxws f2, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    sldi r3, r3, 56
 ; CHECK-BE-NEXT:    vmrghh v2, v3, v2
-; CHECK-BE-NEXT:    vmrghh v3, v5, v4
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f0
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    vmrghb v4, v4, v5
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
 ; CHECK-BE-NEXT:    mfvsrd r3, v2
 ; CHECK-BE-NEXT:    blr
@@ -441,219 +441,199 @@
 ;
 ; CHECK-P9-LABEL: test16elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs2, 48(r3)
-; CHECK-P9-NEXT:    lxv vs3, 32(r3)
-; CHECK-P9-NEXT:    lxv vs4, 16(r3)
-; CHECK-P9-NEXT:    lxv vs5, 0(r3)
+; CHECK-P9-NEXT:    lxv vs7, 0(r3)
+; CHECK-P9-NEXT:    xscvdpsxws f8, f7
+; CHECK-P9-NEXT:    xxswapd vs7, vs7
+; CHECK-P9-NEXT:    xscvdpsxws f7, f7
 ; CHECK-P9-NEXT:    lxv vs0, 112(r3)
 ; CHECK-P9-NEXT:    lxv vs1, 96(r3)
-; CHECK-P9-NEXT:    lxv vs6, 80(r3)
-; CHECK-P9-NEXT:    lxv vs7, 64(r3)
-; CHECK-P9-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    xxswapd vs8, vs5
-; CHECK-P9-NEXT:    xxswapd vs9, vs4
-; CHECK-P9-NEXT:    xxswapd vs10, vs3
-; CHECK-P9-NEXT:    xxswapd vs11, vs2
-; CHECK-P9-NEXT:    xxswapd vs12, vs7
-; CHECK-P9-NEXT:    xxswapd vs13, vs6
-; CHECK-P9-NEXT:    xxswapd v2, vs1
-; CHECK-P9-NEXT:    xxswapd v3, vs0
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
+; CHECK-P9-NEXT:    lxv vs2, 80(r3)
+; CHECK-P9-NEXT:    lxv vs3, 64(r3)
+; CHECK-P9-NEXT:    lxv vs4, 48(r3)
+; CHECK-P9-NEXT:    lxv vs5, 32(r3)
+; CHECK-P9-NEXT:    lxv vs6, 16(r3)
+; CHECK-P9-NEXT:    mfvsrwz r3, f8
+; CHECK-P9-NEXT:    mtvsrd f8, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f7
+; CHECK-P9-NEXT:    xxswapd v2, vs8
+; CHECK-P9-NEXT:    mtvsrd f7, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs7
+; CHECK-P9-NEXT:    xscvdpsxws f7, f6
+; CHECK-P9-NEXT:    xxswapd vs6, vs6
 ; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f8, f8
-; CHECK-P9-NEXT:    xscvdpsxws f9, f9
-; CHECK-P9-NEXT:    xscvdpsxws f10, f10
-; CHECK-P9-NEXT:    xscvdpsxws f11, f11
-; CHECK-P9-NEXT:    xscvdpsxws f12, f12
-; CHECK-P9-NEXT:    xscvdpsxws f13, f13
-; CHECK-P9-NEXT:    xscvdpsxws v2, v2
-; CHECK-P9-NEXT:    xscvdpsxws v3, v3
+; CHECK-P9-NEXT:    mfvsrwz r3, f7
+; CHECK-P9-NEXT:    mtvsrd f7, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f6
+; CHECK-P9-NEXT:    mtvsrd f6, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs6
+; CHECK-P9-NEXT:    xscvdpsxws f6, f5
+; CHECK-P9-NEXT:    xxswapd vs5, vs5
+; CHECK-P9-NEXT:    xscvdpsxws f5, f5
+; CHECK-P9-NEXT:    mfvsrwz r3, f6
+; CHECK-P9-NEXT:    mtvsrd f6, r3
 ; CHECK-P9-NEXT:    mfvsrwz r3, f5
-; CHECK-P9-NEXT:    mfvsrwz r4, f4
-; CHECK-P9-NEXT:    mfvsrwz r5, f3
-; CHECK-P9-NEXT:    mfvsrwz r6, f2
-; CHECK-P9-NEXT:    mfvsrwz r11, f7
-; CHECK-P9-NEXT:    mfvsrwz r12, f6
-; CHECK-P9-NEXT:    mfvsrwz r0, f1
-; CHECK-P9-NEXT:    mfvsrwz r30, f0
-; CHECK-P9-NEXT:    mfvsrwz r7, f8
-; CHECK-P9-NEXT:    mfvsrwz r8, f9
-; CHECK-P9-NEXT:    mfvsrwz r9, f10
-; CHECK-P9-NEXT:    mfvsrwz r10, f11
-; CHECK-P9-NEXT:    mfvsrwz r29, f12
-; CHECK-P9-NEXT:    mfvsrwz r28, f13
-; CHECK-P9-NEXT:    mfvsrwz r27, v2
-; CHECK-P9-NEXT:    mfvsrwz r26, v3
-; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    mtvsrd f8, r11
-; CHECK-P9-NEXT:    mtvsrd f9, r12
-; CHECK-P9-NEXT:    mtvsrd f10, r0
-; CHECK-P9-NEXT:    mtvsrd f11, r30
-; CHECK-P9-NEXT:    mtvsrd f4, r7
-; CHECK-P9-NEXT:    mtvsrd f5, r8
-; CHECK-P9-NEXT:    mtvsrd f6, r9
-; CHECK-P9-NEXT:    mtvsrd f7, r10
-; CHECK-P9-NEXT:    mtvsrd f12, r29
-; CHECK-P9-NEXT:    mtvsrd f13, r28
-; CHECK-P9-NEXT:    mtvsrd v2, r27
-; CHECK-P9-NEXT:    mtvsrd v3, r26
-; CHECK-P9-NEXT:    xxswapd v4, vs0
-; CHECK-P9-NEXT:    xxswapd v5, vs1
-; CHECK-P9-NEXT:    xxswapd v0, vs2
-; CHECK-P9-NEXT:    xxswapd v1, vs3
-; CHECK-P9-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xxswapd v6, vs4
-; CHECK-P9-NEXT:    xxswapd v7, vs5
-; CHECK-P9-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xxswapd v8, vs6
-; CHECK-P9-NEXT:    xxswapd v9, vs7
-; CHECK-P9-NEXT:    xxswapd v10, vs8
-; CHECK-P9-NEXT:    xxswapd v11, vs12
-; CHECK-P9-NEXT:    xxswapd v12, vs9
-; CHECK-P9-NEXT:    xxswapd v13, vs13
-; CHECK-P9-NEXT:    xxswapd v14, vs10
-; CHECK-P9-NEXT:    xxswapd v2, v2
-; CHECK-P9-NEXT:    xxswapd v15, vs11
-; CHECK-P9-NEXT:    xxswapd v3, v3
-; CHECK-P9-NEXT:    vmrglb v4, v4, v6
-; CHECK-P9-NEXT:    vmrglb v5, v5, v7
-; CHECK-P9-NEXT:    vmrglb v0, v0, v8
-; CHECK-P9-NEXT:    vmrglb v1, v1, v9
-; CHECK-P9-NEXT:    vmrglb v6, v10, v11
-; CHECK-P9-NEXT:    vmrglb v7, v12, v13
-; CHECK-P9-NEXT:    vmrglb v2, v14, v2
-; CHECK-P9-NEXT:    vmrglb v3, v15, v3
-; CHECK-P9-NEXT:    vmrglh v4, v5, v4
-; CHECK-P9-NEXT:    vmrglh v5, v1, v0
-; CHECK-P9-NEXT:    vmrglh v0, v7, v6
+; CHECK-P9-NEXT:    vmrglb v2, v2, v3
+; CHECK-P9-NEXT:    xxswapd v3, vs7
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
 ; CHECK-P9-NEXT:    vmrglh v2, v3, v2
-; CHECK-P9-NEXT:    vmrglw v3, v5, v4
-; CHECK-P9-NEXT:    vmrglw v2, v2, v0
-; CHECK-P9-NEXT:    xxmrgld v2, v2, v3
+; CHECK-P9-NEXT:    xxswapd v3, vs6
+; CHECK-P9-NEXT:    mtvsrd f5, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs5
+; CHECK-P9-NEXT:    xscvdpsxws f5, f4
+; CHECK-P9-NEXT:    xxswapd vs4, vs4
+; CHECK-P9-NEXT:    xscvdpsxws f4, f4
+; CHECK-P9-NEXT:    mfvsrwz r3, f5
+; CHECK-P9-NEXT:    mtvsrd f5, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f4
+; CHECK-P9-NEXT:    mtvsrd f4, r3
+; CHECK-P9-NEXT:    xxswapd v5, vs4
+; CHECK-P9-NEXT:    xscvdpsxws f4, f3
+; CHECK-P9-NEXT:    xxswapd vs3, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f3
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs5
+; CHECK-P9-NEXT:    vmrglb v4, v4, v5
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
+; CHECK-P9-NEXT:    mfvsrwz r3, f4
+; CHECK-P9-NEXT:    mtvsrd f4, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f3
+; CHECK-P9-NEXT:    mtvsrd f3, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f2
+; CHECK-P9-NEXT:    xxswapd vs2, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    mfvsrwz r3, f3
+; CHECK-P9-NEXT:    mtvsrd f3, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    xxswapd v5, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f1
+; CHECK-P9-NEXT:    xxswapd vs1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    vmrglw v2, v3, v2
+; CHECK-P9-NEXT:    xxswapd v3, vs4
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs3
+; CHECK-P9-NEXT:    vmrglb v4, v4, v5
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    xxswapd v4, vs2
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v5, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f0
+; CHECK-P9-NEXT:    xxswapd vs0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglb v4, v4, v5
+; CHECK-P9-NEXT:    xxswapd v5, vs1
+; CHECK-P9-NEXT:    xxswapd v0, vs0
+; CHECK-P9-NEXT:    vmrglb v5, v5, v0
+; CHECK-P9-NEXT:    vmrglh v4, v5, v4
+; CHECK-P9-NEXT:    vmrglw v3, v4, v3
+; CHECK-P9-NEXT:    xxmrgld v2, v3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs2, 64(r3)
-; CHECK-BE-NEXT:    lxv vs3, 80(r3)
-; CHECK-BE-NEXT:    lxv vs4, 96(r3)
-; CHECK-BE-NEXT:    lxv vs5, 112(r3)
+; CHECK-BE-NEXT:    lxv vs7, 112(r3)
+; CHECK-BE-NEXT:    xscvdpsxws f8, f7
+; CHECK-BE-NEXT:    xxswapd vs7, vs7
+; CHECK-BE-NEXT:    xscvdpsxws f7, f7
+; CHECK-BE-NEXT:    lxv vs6, 96(r3)
 ; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
-; CHECK-BE-NEXT:    lxv vs6, 32(r3)
-; CHECK-BE-NEXT:    lxv vs7, 48(r3)
-; CHECK-BE-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    xxswapd vs8, vs5
-; CHECK-BE-NEXT:    xxswapd vs9, vs4
-; CHECK-BE-NEXT:    xxswapd vs10, vs3
-; CHECK-BE-NEXT:    xxswapd vs11, vs2
-; CHECK-BE-NEXT:    xxswapd vs12, vs7
-; CHECK-BE-NEXT:    xxswapd vs13, vs6
-; CHECK-BE-NEXT:    xxswapd v2, vs1
-; CHECK-BE-NEXT:    xxswapd v3, vs0
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
-; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f8, f8
-; CHECK-BE-NEXT:    xscvdpsxws f9, f9
-; CHECK-BE-NEXT:    xscvdpsxws f10, f10
-; CHECK-BE-NEXT:    xscvdpsxws f11, f11
-; CHECK-BE-NEXT:    xscvdpsxws f12, f12
-; CHECK-BE-NEXT:    xscvdpsxws f13, f13
-; CHECK-BE-NEXT:    xscvdpsxws v2, v2
-; CHECK-BE-NEXT:    xscvdpsxws v3, v3
-; CHECK-BE-NEXT:    mfvsrwz r3, f5
-; CHECK-BE-NEXT:    mfvsrwz r4, f4
-; CHECK-BE-NEXT:    mfvsrwz r5, f3
-; CHECK-BE-NEXT:    mfvsrwz r6, f2
-; CHECK-BE-NEXT:    mfvsrwz r11, f7
-; CHECK-BE-NEXT:    mfvsrwz r12, f6
-; CHECK-BE-NEXT:    mfvsrwz r0, f1
-; CHECK-BE-NEXT:    mfvsrwz r30, f0
-; CHECK-BE-NEXT:    mfvsrwz r7, f8
-; CHECK-BE-NEXT:    mfvsrwz r8, f9
-; CHECK-BE-NEXT:    mfvsrwz r9, f10
-; CHECK-BE-NEXT:    mfvsrwz r10, f11
-; CHECK-BE-NEXT:    mfvsrwz r29, f12
-; CHECK-BE-NEXT:    mfvsrwz r28, f13
-; CHECK-BE-NEXT:    mfvsrwz r27, v2
-; CHECK-BE-NEXT:    mfvsrwz r26, v3
+; CHECK-BE-NEXT:    lxv vs2, 32(r3)
+; CHECK-BE-NEXT:    lxv vs3, 48(r3)
+; CHECK-BE-NEXT:    lxv vs4, 64(r3)
+; CHECK-BE-NEXT:    lxv vs5, 80(r3)
+; CHECK-BE-NEXT:    mfvsrwz r3, f8
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    sldi r5, r5, 56
-; CHECK-BE-NEXT:    sldi r6, r6, 56
-; CHECK-BE-NEXT:    sldi r11, r11, 56
-; CHECK-BE-NEXT:    sldi r12, r12, 56
-; CHECK-BE-NEXT:    sldi r0, r0, 56
-; CHECK-BE-NEXT:    sldi r30, r30, 56
-; CHECK-BE-NEXT:    sldi r7, r7, 56
-; CHECK-BE-NEXT:    sldi r8, r8, 56
-; CHECK-BE-NEXT:    sldi r9, r9, 56
-; CHECK-BE-NEXT:    sldi r10, r10, 56
-; CHECK-BE-NEXT:    sldi r29, r29, 56
-; CHECK-BE-NEXT:    sldi r28, r28, 56
-; CHECK-BE-NEXT:    sldi r27, r27, 56
-; CHECK-BE-NEXT:    sldi r26, r26, 56
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    mtvsrd v5, r6
-; CHECK-BE-NEXT:    mtvsrd v8, r11
-; CHECK-BE-NEXT:    mtvsrd v10, r12
-; CHECK-BE-NEXT:    mtvsrd v12, r0
-; CHECK-BE-NEXT:    mtvsrd v14, r30
-; CHECK-BE-NEXT:    mtvsrd v0, r7
-; CHECK-BE-NEXT:    mtvsrd v1, r8
-; CHECK-BE-NEXT:    mtvsrd v6, r9
-; CHECK-BE-NEXT:    mtvsrd v7, r10
-; CHECK-BE-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v9, r29
-; CHECK-BE-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v11, r28
-; CHECK-BE-NEXT:    mtvsrd v13, r27
-; CHECK-BE-NEXT:    mtvsrd v15, r26
-; CHECK-BE-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    vmrghb v2, v2, v0
-; CHECK-BE-NEXT:    vmrghb v3, v3, v1
-; CHECK-BE-NEXT:    vmrghb v4, v4, v6
-; CHECK-BE-NEXT:    vmrghb v5, v5, v7
-; CHECK-BE-NEXT:    vmrghb v0, v8, v9
-; CHECK-BE-NEXT:    vmrghb v1, v10, v11
-; CHECK-BE-NEXT:    vmrghb v6, v12, v13
-; CHECK-BE-NEXT:    vmrghb v7, v14, v15
+; CHECK-BE-NEXT:    mfvsrwz r3, f7
+; CHECK-BE-NEXT:    xscvdpsxws f7, f6
+; CHECK-BE-NEXT:    xxswapd vs6, vs6
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f6, f6
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    vmrghb v2, v2, v3
+; CHECK-BE-NEXT:    mfvsrwz r3, f7
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f6
+; CHECK-BE-NEXT:    xscvdpsxws f6, f5
+; CHECK-BE-NEXT:    xxswapd vs5, vs5
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f5, f5
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f6
+; CHECK-BE-NEXT:    sldi r3, r3, 56
 ; CHECK-BE-NEXT:    vmrghh v2, v3, v2
-; CHECK-BE-NEXT:    vmrghh v3, v5, v4
-; CHECK-BE-NEXT:    vmrghh v4, v1, v0
-; CHECK-BE-NEXT:    vmrghh v5, v7, v6
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f5
+; CHECK-BE-NEXT:    xscvdpsxws f5, f4
+; CHECK-BE-NEXT:    xxswapd vs4, vs4
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f4, f4
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f5
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f4
+; CHECK-BE-NEXT:    xscvdpsxws f4, f3
+; CHECK-BE-NEXT:    xxswapd vs3, vs3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    vmrghb v4, v4, v5
+; CHECK-BE-NEXT:    mfvsrwz r3, f4
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
-; CHECK-BE-NEXT:    vmrghw v3, v5, v4
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f2
+; CHECK-BE-NEXT:    xxswapd vs2, vs2
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xscvdpsxws f2, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    vmrghb v4, v4, v5
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f0
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    vmrghb v4, v4, v5
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v0, r3
+; CHECK-BE-NEXT:    vmrghb v5, v5, v0
+; CHECK-BE-NEXT:    vmrghh v4, v5, v4
+; CHECK-BE-NEXT:    vmrghw v3, v4, v3
 ; CHECK-BE-NEXT:    xxmrghd v2, v3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -684,17 +664,17 @@
 ;
 ; CHECK-P9-LABEL: test2elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xscvdpsxws f0, v2
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs0
 ; CHECK-P9-NEXT:    xxswapd vs0, v2
-; CHECK-P9-NEXT:    xscvdpsxws f1, v2
 ; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    mfvsrwz r3, f1
-; CHECK-P9-NEXT:    mfvsrwz r4, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
 ; CHECK-P9-NEXT:    addi r3, r1, -2
-; CHECK-P9-NEXT:    mtvsrd f1, r4
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    vmrglb v2, v2, v3
+; CHECK-P9-NEXT:    vmrglb v2, v3, v2
 ; CHECK-P9-NEXT:    vsldoi v2, v2, v2, 8
 ; CHECK-P9-NEXT:    stxsihx v2, 0, r3
 ; CHECK-P9-NEXT:    lhz r3, -2(r1)
@@ -702,17 +682,17 @@
 ;
 ; CHECK-BE-LABEL: test2elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
+; CHECK-BE-NEXT:    xscvdpsxws f0, v2
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
 ; CHECK-BE-NEXT:    xxswapd vs0, v2
-; CHECK-BE-NEXT:    xscvdpsxws f1, v2
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    mfvsrwz r3, f1
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    mfvsrwz r4, f0
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
 ; CHECK-BE-NEXT:    addi r3, r1, -2
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    vmrghb v2, v2, v3
+; CHECK-BE-NEXT:    vmrghb v2, v3, v2
 ; CHECK-BE-NEXT:    vsldoi v2, v2, v2, 10
 ; CHECK-BE-NEXT:    stxsihx v2, 0, r3
 ; CHECK-BE-NEXT:    lhz r3, -2(r1)
@@ -756,58 +736,58 @@
 ;
 ; CHECK-P9-LABEL: test4elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r3)
 ; CHECK-P9-NEXT:    lxv vs1, 0(r3)
-; CHECK-P9-NEXT:    xxswapd vs2, vs1
-; CHECK-P9-NEXT:    xxswapd vs3, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f2, f1
+; CHECK-P9-NEXT:    xxswapd vs1, vs1
 ; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
+; CHECK-P9-NEXT:    lxv vs0, 16(r3)
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
 ; CHECK-P9-NEXT:    mfvsrwz r3, f1
-; CHECK-P9-NEXT:    mfvsrwz r5, f0
-; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    mfvsrwz r4, f2
-; CHECK-P9-NEXT:    mfvsrwz r6, f3
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    xxswapd v4, vs2
+; CHECK-P9-NEXT:    xxswapd v2, vs2
+; CHECK-P9-NEXT:    mtvsrd f1, r3
 ; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f1, f0
+; CHECK-P9-NEXT:    xxswapd vs0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
 ; CHECK-P9-NEXT:    vmrglb v2, v2, v3
-; CHECK-P9-NEXT:    vmrglb v3, v4, v5
+; CHECK-P9-NEXT:    xxswapd v3, vs1
+; CHECK-P9-NEXT:    xxswapd v4, vs0
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
 ; CHECK-P9-NEXT:    vmrglh v2, v3, v2
+; CHECK-P9-NEXT:    li r3, 0
 ; CHECK-P9-NEXT:    vextuwrx r3, r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
-; CHECK-BE-NEXT:    xxswapd vs2, vs1
-; CHECK-BE-NEXT:    xxswapd vs3, vs0
+; CHECK-BE-NEXT:    xscvdpsxws f2, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
 ; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    mfvsrwz r3, f1
-; CHECK-BE-NEXT:    mfvsrwz r5, f0
+; CHECK-BE-NEXT:    lxv vs0, 0(r3)
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    sldi r5, r5, 56
-; CHECK-BE-NEXT:    mfvsrwz r4, f2
-; CHECK-BE-NEXT:    mfvsrwz r6, f3
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    li r3, 0
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    sldi r6, r6, 56
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f0
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v3, r3
 ; CHECK-BE-NEXT:    vmrghb v2, v2, v3
-; CHECK-BE-NEXT:    vmrghb v3, v4, v5
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    li r3, 0
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
 ; CHECK-BE-NEXT:    vmrghh v2, v3, v2
 ; CHECK-BE-NEXT:    vextuwlx r3, r3, v2
 ; CHECK-BE-NEXT:    blr
@@ -877,104 +857,104 @@
 ;
 ; CHECK-P9-LABEL: test8elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    lxv vs3, 0(r3)
+; CHECK-P9-NEXT:    xscvdpsxws f4, f3
+; CHECK-P9-NEXT:    xxswapd vs3, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f3
 ; CHECK-P9-NEXT:    lxv vs0, 48(r3)
 ; CHECK-P9-NEXT:    lxv vs1, 32(r3)
 ; CHECK-P9-NEXT:    lxv vs2, 16(r3)
-; CHECK-P9-NEXT:    lxv vs3, 0(r3)
-; CHECK-P9-NEXT:    xxswapd vs4, vs3
-; CHECK-P9-NEXT:    xxswapd vs5, vs2
-; CHECK-P9-NEXT:    xxswapd vs6, vs1
-; CHECK-P9-NEXT:    xxswapd vs7, vs0
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
+; CHECK-P9-NEXT:    mfvsrwz r3, f4
+; CHECK-P9-NEXT:    mtvsrd f4, r3
 ; CHECK-P9-NEXT:    mfvsrwz r3, f3
-; CHECK-P9-NEXT:    mfvsrwz r5, f2
-; CHECK-P9-NEXT:    mfvsrwz r7, f1
-; CHECK-P9-NEXT:    mfvsrwz r9, f0
-; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    mfvsrwz r4, f4
-; CHECK-P9-NEXT:    mfvsrwz r6, f5
-; CHECK-P9-NEXT:    mfvsrwz r8, f6
-; CHECK-P9-NEXT:    mfvsrwz r10, f7
-; CHECK-P9-NEXT:    mtvsrd f4, r7
-; CHECK-P9-NEXT:    mtvsrd f6, r9
-; CHECK-P9-NEXT:    xxswapd v2, vs0
+; CHECK-P9-NEXT:    xxswapd v2, vs4
+; CHECK-P9-NEXT:    mtvsrd f3, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f2
+; CHECK-P9-NEXT:    xxswapd vs2, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    mfvsrwz r3, f3
+; CHECK-P9-NEXT:    mtvsrd f3, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
 ; CHECK-P9-NEXT:    xxswapd v4, vs2
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    mtvsrd f5, r8
-; CHECK-P9-NEXT:    mtvsrd f7, r10
-; CHECK-P9-NEXT:    xxswapd v0, vs4
-; CHECK-P9-NEXT:    xxswapd v6, vs6
-; CHECK-P9-NEXT:    xxswapd v3, vs1
-; CHECK-P9-NEXT:    xxswapd v5, vs3
-; CHECK-P9-NEXT:    xxswapd v1, vs5
-; CHECK-P9-NEXT:    xxswapd v7, vs7
+; CHECK-P9-NEXT:    xscvdpsxws f2, f1
+; CHECK-P9-NEXT:    xxswapd vs1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
 ; CHECK-P9-NEXT:    vmrglb v2, v2, v3
-; CHECK-P9-NEXT:    vmrglb v3, v4, v5
-; CHECK-P9-NEXT:    vmrglb v4, v0, v1
-; CHECK-P9-NEXT:    vmrglb v5, v6, v7
+; CHECK-P9-NEXT:    xxswapd v3, vs3
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
 ; CHECK-P9-NEXT:    vmrglh v2, v3, v2
-; CHECK-P9-NEXT:    vmrglh v3, v5, v4
+; CHECK-P9-NEXT:    xxswapd v3, vs2
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f0
+; CHECK-P9-NEXT:    xxswapd vs0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs1
+; CHECK-P9-NEXT:    xxswapd v5, vs0
+; CHECK-P9-NEXT:    vmrglb v4, v4, v5
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
 ; CHECK-P9-NEXT:    vmrglw v2, v3, v2
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
+; CHECK-BE-NEXT:    lxv vs3, 48(r3)
+; CHECK-BE-NEXT:    xscvdpsxws f4, f3
+; CHECK-BE-NEXT:    xxswapd vs3, vs3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    lxv vs2, 32(r3)
 ; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
-; CHECK-BE-NEXT:    lxv vs2, 32(r3)
-; CHECK-BE-NEXT:    lxv vs3, 48(r3)
-; CHECK-BE-NEXT:    xxswapd vs4, vs3
-; CHECK-BE-NEXT:    xxswapd vs5, vs2
-; CHECK-BE-NEXT:    xxswapd vs6, vs1
-; CHECK-BE-NEXT:    xxswapd vs7, vs0
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    mfvsrwz r3, f3
-; CHECK-BE-NEXT:    mfvsrwz r5, f2
-; CHECK-BE-NEXT:    mfvsrwz r7, f1
-; CHECK-BE-NEXT:    mfvsrwz r9, f0
+; CHECK-BE-NEXT:    mfvsrwz r3, f4
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    sldi r5, r5, 56
-; CHECK-BE-NEXT:    sldi r7, r7, 56
-; CHECK-BE-NEXT:    sldi r9, r9, 56
-; CHECK-BE-NEXT:    mfvsrwz r4, f4
-; CHECK-BE-NEXT:    mfvsrwz r6, f5
-; CHECK-BE-NEXT:    mfvsrwz r8, f6
-; CHECK-BE-NEXT:    mfvsrwz r10, f7
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    mtvsrd v0, r7
-; CHECK-BE-NEXT:    mtvsrd v6, r9
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    sldi r6, r6, 56
-; CHECK-BE-NEXT:    sldi r8, r8, 56
-; CHECK-BE-NEXT:    sldi r10, r10, 56
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v5, r6
-; CHECK-BE-NEXT:    mtvsrd v1, r8
-; CHECK-BE-NEXT:    mtvsrd v7, r10
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f2
+; CHECK-BE-NEXT:    xxswapd vs2, vs2
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    mtvsrd v3, r3
 ; CHECK-BE-NEXT:    vmrghb v2, v2, v3
-; CHECK-BE-NEXT:    vmrghb v3, v4, v5
-; CHECK-BE-NEXT:    vmrghb v4, v0, v1
-; CHECK-BE-NEXT:    vmrghb v5, v6, v7
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xscvdpsxws f2, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    sldi r3, r3, 56
 ; CHECK-BE-NEXT:    vmrghh v2, v3, v2
-; CHECK-BE-NEXT:    vmrghh v3, v5, v4
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f0
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    vmrghb v4, v4, v5
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
 ; CHECK-BE-NEXT:    mfvsrd r3, v2
 ; CHECK-BE-NEXT:    blr
@@ -1094,219 +1074,199 @@
 ;
 ; CHECK-P9-LABEL: test16elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs2, 48(r3)
-; CHECK-P9-NEXT:    lxv vs3, 32(r3)
-; CHECK-P9-NEXT:    lxv vs4, 16(r3)
-; CHECK-P9-NEXT:    lxv vs5, 0(r3)
+; CHECK-P9-NEXT:    lxv vs7, 0(r3)
+; CHECK-P9-NEXT:    xscvdpsxws f8, f7
+; CHECK-P9-NEXT:    xxswapd vs7, vs7
+; CHECK-P9-NEXT:    xscvdpsxws f7, f7
 ; CHECK-P9-NEXT:    lxv vs0, 112(r3)
 ; CHECK-P9-NEXT:    lxv vs1, 96(r3)
-; CHECK-P9-NEXT:    lxv vs6, 80(r3)
-; CHECK-P9-NEXT:    lxv vs7, 64(r3)
-; CHECK-P9-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    xxswapd vs8, vs5
-; CHECK-P9-NEXT:    xxswapd vs9, vs4
-; CHECK-P9-NEXT:    xxswapd vs10, vs3
-; CHECK-P9-NEXT:    xxswapd vs11, vs2
-; CHECK-P9-NEXT:    xxswapd vs12, vs7
-; CHECK-P9-NEXT:    xxswapd vs13, vs6
-; CHECK-P9-NEXT:    xxswapd v2, vs1
-; CHECK-P9-NEXT:    xxswapd v3, vs0
-; CHECK-P9-NEXT:    xscvdpsxws f5, f5
-; CHECK-P9-NEXT:    xscvdpsxws f4, f4
-; CHECK-P9-NEXT:    xscvdpsxws f3, f3
-; CHECK-P9-NEXT:    xscvdpsxws f2, f2
-; CHECK-P9-NEXT:    xscvdpsxws f7, f7
+; CHECK-P9-NEXT:    lxv vs2, 80(r3)
+; CHECK-P9-NEXT:    lxv vs3, 64(r3)
+; CHECK-P9-NEXT:    lxv vs4, 48(r3)
+; CHECK-P9-NEXT:    lxv vs5, 32(r3)
+; CHECK-P9-NEXT:    lxv vs6, 16(r3)
+; CHECK-P9-NEXT:    mfvsrwz r3, f8
+; CHECK-P9-NEXT:    mtvsrd f8, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f7
+; CHECK-P9-NEXT:    xxswapd v2, vs8
+; CHECK-P9-NEXT:    mtvsrd f7, r3
+; CHECK-P9-NEXT:    xxswapd v3, vs7
+; CHECK-P9-NEXT:    xscvdpsxws f7, f6
+; CHECK-P9-NEXT:    xxswapd vs6, vs6
 ; CHECK-P9-NEXT:    xscvdpsxws f6, f6
-; CHECK-P9-NEXT:    xscvdpsxws f1, f1
-; CHECK-P9-NEXT:    xscvdpsxws f0, f0
-; CHECK-P9-NEXT:    xscvdpsxws f8, f8
-; CHECK-P9-NEXT:    xscvdpsxws f9, f9
-; CHECK-P9-NEXT:    xscvdpsxws f10, f10
-; CHECK-P9-NEXT:    xscvdpsxws f11, f11
-; CHECK-P9-NEXT:    xscvdpsxws f12, f12
-; CHECK-P9-NEXT:    xscvdpsxws f13, f13
-; CHECK-P9-NEXT:    xscvdpsxws v2, v2
-; CHECK-P9-NEXT:    xscvdpsxws v3, v3
+; CHECK-P9-NEXT:    mfvsrwz r3, f7
+; CHECK-P9-NEXT:    mtvsrd f7, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f6
+; CHECK-P9-NEXT:    mtvsrd f6, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs6
+; CHECK-P9-NEXT:    xscvdpsxws f6, f5
+; CHECK-P9-NEXT:    xxswapd vs5, vs5
+; CHECK-P9-NEXT:    xscvdpsxws f5, f5
+; CHECK-P9-NEXT:    mfvsrwz r3, f6
+; CHECK-P9-NEXT:    mtvsrd f6, r3
 ; CHECK-P9-NEXT:    mfvsrwz r3, f5
-; CHECK-P9-NEXT:    mfvsrwz r4, f4
-; CHECK-P9-NEXT:    mfvsrwz r5, f3
-; CHECK-P9-NEXT:    mfvsrwz r6, f2
-; CHECK-P9-NEXT:    mfvsrwz r11, f7
-; CHECK-P9-NEXT:    mfvsrwz r12, f6
-; CHECK-P9-NEXT:    mfvsrwz r0, f1
-; CHECK-P9-NEXT:    mfvsrwz r30, f0
-; CHECK-P9-NEXT:    mfvsrwz r7, f8
-; CHECK-P9-NEXT:    mfvsrwz r8, f9
-; CHECK-P9-NEXT:    mfvsrwz r9, f10
-; CHECK-P9-NEXT:    mfvsrwz r10, f11
-; CHECK-P9-NEXT:    mfvsrwz r29, f12
-; CHECK-P9-NEXT:    mfvsrwz r28, f13
-; CHECK-P9-NEXT:    mfvsrwz r27, v2
-; CHECK-P9-NEXT:    mfvsrwz r26, v3
-; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    mtvsrd f1, r4
-; CHECK-P9-NEXT:    mtvsrd f2, r5
-; CHECK-P9-NEXT:    mtvsrd f3, r6
-; CHECK-P9-NEXT:    mtvsrd f8, r11
-; CHECK-P9-NEXT:    mtvsrd f9, r12
-; CHECK-P9-NEXT:    mtvsrd f10, r0
-; CHECK-P9-NEXT:    mtvsrd f11, r30
-; CHECK-P9-NEXT:    mtvsrd f4, r7
-; CHECK-P9-NEXT:    mtvsrd f5, r8
-; CHECK-P9-NEXT:    mtvsrd f6, r9
-; CHECK-P9-NEXT:    mtvsrd f7, r10
-; CHECK-P9-NEXT:    mtvsrd f12, r29
-; CHECK-P9-NEXT:    mtvsrd f13, r28
-; CHECK-P9-NEXT:    mtvsrd v2, r27
-; CHECK-P9-NEXT:    mtvsrd v3, r26
-; CHECK-P9-NEXT:    xxswapd v4, vs0
-; CHECK-P9-NEXT:    xxswapd v5, vs1
-; CHECK-P9-NEXT:    xxswapd v0, vs2
-; CHECK-P9-NEXT:    xxswapd v1, vs3
-; CHECK-P9-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xxswapd v6, vs4
-; CHECK-P9-NEXT:    xxswapd v7, vs5
-; CHECK-P9-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xxswapd v8, vs6
-; CHECK-P9-NEXT:    xxswapd v9, vs7
-; CHECK-P9-NEXT:    xxswapd v10, vs8
-; CHECK-P9-NEXT:    xxswapd v11, vs12
-; CHECK-P9-NEXT:    xxswapd v12, vs9
-; CHECK-P9-NEXT:    xxswapd v13, vs13
-; CHECK-P9-NEXT:    xxswapd v14, vs10
-; CHECK-P9-NEXT:    xxswapd v2, v2
-; CHECK-P9-NEXT:    xxswapd v15, vs11
-; CHECK-P9-NEXT:    xxswapd v3, v3
-; CHECK-P9-NEXT:    vmrglb v4, v4, v6
-; CHECK-P9-NEXT:    vmrglb v5, v5, v7
-; CHECK-P9-NEXT:    vmrglb v0, v0, v8
-; CHECK-P9-NEXT:    vmrglb v1, v1, v9
-; CHECK-P9-NEXT:    vmrglb v6, v10, v11
-; CHECK-P9-NEXT:    vmrglb v7, v12, v13
-; CHECK-P9-NEXT:    vmrglb v2, v14, v2
-; CHECK-P9-NEXT:    vmrglb v3, v15, v3
-; CHECK-P9-NEXT:    vmrglh v4, v5, v4
-; CHECK-P9-NEXT:    vmrglh v5, v1, v0
-; CHECK-P9-NEXT:    vmrglh v0, v7, v6
+; CHECK-P9-NEXT:    vmrglb v2, v2, v3
+; CHECK-P9-NEXT:    xxswapd v3, vs7
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
 ; CHECK-P9-NEXT:    vmrglh v2, v3, v2
-; CHECK-P9-NEXT:    vmrglw v3, v5, v4
-; CHECK-P9-NEXT:    vmrglw v2, v2, v0
-; CHECK-P9-NEXT:    xxmrgld v2, v2, v3
+; CHECK-P9-NEXT:    xxswapd v3, vs6
+; CHECK-P9-NEXT:    mtvsrd f5, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs5
+; CHECK-P9-NEXT:    xscvdpsxws f5, f4
+; CHECK-P9-NEXT:    xxswapd vs4, vs4
+; CHECK-P9-NEXT:    xscvdpsxws f4, f4
+; CHECK-P9-NEXT:    mfvsrwz r3, f5
+; CHECK-P9-NEXT:    mtvsrd f5, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f4
+; CHECK-P9-NEXT:    mtvsrd f4, r3
+; CHECK-P9-NEXT:    xxswapd v5, vs4
+; CHECK-P9-NEXT:    xscvdpsxws f4, f3
+; CHECK-P9-NEXT:    xxswapd vs3, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f3
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs5
+; CHECK-P9-NEXT:    vmrglb v4, v4, v5
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
+; CHECK-P9-NEXT:    mfvsrwz r3, f4
+; CHECK-P9-NEXT:    mtvsrd f4, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f3
+; CHECK-P9-NEXT:    mtvsrd f3, r3
+; CHECK-P9-NEXT:    xxswapd v4, vs3
+; CHECK-P9-NEXT:    xscvdpsxws f3, f2
+; CHECK-P9-NEXT:    xxswapd vs2, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f2
+; CHECK-P9-NEXT:    mfvsrwz r3, f3
+; CHECK-P9-NEXT:    mtvsrd f3, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    xxswapd v5, vs2
+; CHECK-P9-NEXT:    xscvdpsxws f2, f1
+; CHECK-P9-NEXT:    xxswapd vs1, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f1
+; CHECK-P9-NEXT:    vmrglw v2, v3, v2
+; CHECK-P9-NEXT:    xxswapd v3, vs4
+; CHECK-P9-NEXT:    vmrglb v3, v3, v4
+; CHECK-P9-NEXT:    xxswapd v4, vs3
+; CHECK-P9-NEXT:    vmrglb v4, v4, v5
+; CHECK-P9-NEXT:    vmrglh v3, v4, v3
+; CHECK-P9-NEXT:    mfvsrwz r3, f2
+; CHECK-P9-NEXT:    mtvsrd f2, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    xxswapd v4, vs2
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    xxswapd v5, vs1
+; CHECK-P9-NEXT:    xscvdpsxws f1, f0
+; CHECK-P9-NEXT:    xxswapd vs0, vs0
+; CHECK-P9-NEXT:    xscvdpsxws f0, f0
+; CHECK-P9-NEXT:    mfvsrwz r3, f1
+; CHECK-P9-NEXT:    mtvsrd f1, r3
+; CHECK-P9-NEXT:    mfvsrwz r3, f0
+; CHECK-P9-NEXT:    mtvsrd f0, r3
+; CHECK-P9-NEXT:    vmrglb v4, v4, v5
+; CHECK-P9-NEXT:    xxswapd v5, vs1
+; CHECK-P9-NEXT:    xxswapd v0, vs0
+; CHECK-P9-NEXT:    vmrglb v5, v5, v0
+; CHECK-P9-NEXT:    vmrglh v4, v5, v4
+; CHECK-P9-NEXT:    vmrglw v3, v4, v3
+; CHECK-P9-NEXT:    xxmrgld v2, v3, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs2, 64(r3)
-; CHECK-BE-NEXT:    lxv vs3, 80(r3)
-; CHECK-BE-NEXT:    lxv vs4, 96(r3)
-; CHECK-BE-NEXT:    lxv vs5, 112(r3)
+; CHECK-BE-NEXT:    lxv vs7, 112(r3)
+; CHECK-BE-NEXT:    xscvdpsxws f8, f7
+; CHECK-BE-NEXT:    xxswapd vs7, vs7
+; CHECK-BE-NEXT:    xscvdpsxws f7, f7
+; CHECK-BE-NEXT:    lxv vs6, 96(r3)
 ; CHECK-BE-NEXT:    lxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    lxv vs1, 16(r3)
-; CHECK-BE-NEXT:    lxv vs6, 32(r3)
-; CHECK-BE-NEXT:    lxv vs7, 48(r3)
-; CHECK-BE-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    xxswapd vs8, vs5
-; CHECK-BE-NEXT:    xxswapd vs9, vs4
-; CHECK-BE-NEXT:    xxswapd vs10, vs3
-; CHECK-BE-NEXT:    xxswapd vs11, vs2
-; CHECK-BE-NEXT:    xxswapd vs12, vs7
-; CHECK-BE-NEXT:    xxswapd vs13, vs6
-; CHECK-BE-NEXT:    xxswapd v2, vs1
-; CHECK-BE-NEXT:    xxswapd v3, vs0
-; CHECK-BE-NEXT:    xscvdpsxws f5, f5
-; CHECK-BE-NEXT:    xscvdpsxws f4, f4
-; CHECK-BE-NEXT:    xscvdpsxws f3, f3
-; CHECK-BE-NEXT:    xscvdpsxws f2, f2
-; CHECK-BE-NEXT:    xscvdpsxws f7, f7
-; CHECK-BE-NEXT:    xscvdpsxws f6, f6
-; CHECK-BE-NEXT:    xscvdpsxws f1, f1
-; CHECK-BE-NEXT:    xscvdpsxws f0, f0
-; CHECK-BE-NEXT:    xscvdpsxws f8, f8
-; CHECK-BE-NEXT:    xscvdpsxws f9, f9
-; CHECK-BE-NEXT:    xscvdpsxws f10, f10
-; CHECK-BE-NEXT:    xscvdpsxws f11, f11
-; CHECK-BE-NEXT:    xscvdpsxws f12, f12
-; CHECK-BE-NEXT:    xscvdpsxws f13, f13
-; CHECK-BE-NEXT:    xscvdpsxws v2, v2
-; CHECK-BE-NEXT:    xscvdpsxws v3, v3
-; CHECK-BE-NEXT:    mfvsrwz r3, f5
-; CHECK-BE-NEXT:    mfvsrwz r4, f4
-; CHECK-BE-NEXT:    mfvsrwz r5, f3
-; CHECK-BE-NEXT:    mfvsrwz r6, f2
-; CHECK-BE-NEXT:    mfvsrwz r11, f7
-; CHECK-BE-NEXT:    mfvsrwz r12, f6
-; CHECK-BE-NEXT:    mfvsrwz r0, f1
-; CHECK-BE-NEXT:    mfvsrwz r30, f0
-; CHECK-BE-NEXT:    mfvsrwz r7, f8
-; CHECK-BE-NEXT:    mfvsrwz r8, f9
-; CHECK-BE-NEXT:    mfvsrwz r9, f10
-; CHECK-BE-NEXT:    mfvsrwz r10, f11
-; CHECK-BE-NEXT:    mfvsrwz r29, f12
-; CHECK-BE-NEXT:    mfvsrwz r28, f13
-; CHECK-BE-NEXT:    mfvsrwz r27, v2
-; CHECK-BE-NEXT:    mfvsrwz r26, v3
+; CHECK-BE-NEXT:    lxv vs2, 32(r3)
+; CHECK-BE-NEXT:    lxv vs3, 48(r3)
+; CHECK-BE-NEXT:    lxv vs4, 64(r3)
+; CHECK-BE-NEXT:    lxv vs5, 80(r3)
+; CHECK-BE-NEXT:    mfvsrwz r3, f8
 ; CHECK-BE-NEXT:    sldi r3, r3, 56
-; CHECK-BE-NEXT:    sldi r4, r4, 56
-; CHECK-BE-NEXT:    sldi r5, r5, 56
-; CHECK-BE-NEXT:    sldi r6, r6, 56
-; CHECK-BE-NEXT:    sldi r11, r11, 56
-; CHECK-BE-NEXT:    sldi r12, r12, 56
-; CHECK-BE-NEXT:    sldi r0, r0, 56
-; CHECK-BE-NEXT:    sldi r30, r30, 56
-; CHECK-BE-NEXT:    sldi r7, r7, 56
-; CHECK-BE-NEXT:    sldi r8, r8, 56
-; CHECK-BE-NEXT:    sldi r9, r9, 56
-; CHECK-BE-NEXT:    sldi r10, r10, 56
-; CHECK-BE-NEXT:    sldi r29, r29, 56
-; CHECK-BE-NEXT:    sldi r28, r28, 56
-; CHECK-BE-NEXT:    sldi r27, r27, 56
-; CHECK-BE-NEXT:    sldi r26, r26, 56
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    mtvsrd v3, r4
-; CHECK-BE-NEXT:    mtvsrd v4, r5
-; CHECK-BE-NEXT:    mtvsrd v5, r6
-; CHECK-BE-NEXT:    mtvsrd v8, r11
-; CHECK-BE-NEXT:    mtvsrd v10, r12
-; CHECK-BE-NEXT:    mtvsrd v12, r0
-; CHECK-BE-NEXT:    mtvsrd v14, r30
-; CHECK-BE-NEXT:    mtvsrd v0, r7
-; CHECK-BE-NEXT:    mtvsrd v1, r8
-; CHECK-BE-NEXT:    mtvsrd v6, r9
-; CHECK-BE-NEXT:    mtvsrd v7, r10
-; CHECK-BE-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v9, r29
-; CHECK-BE-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrd v11, r28
-; CHECK-BE-NEXT:    mtvsrd v13, r27
-; CHECK-BE-NEXT:    mtvsrd v15, r26
-; CHECK-BE-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    vmrghb v2, v2, v0
-; CHECK-BE-NEXT:    vmrghb v3, v3, v1
-; CHECK-BE-NEXT:    vmrghb v4, v4, v6
-; CHECK-BE-NEXT:    vmrghb v5, v5, v7
-; CHECK-BE-NEXT:    vmrghb v0, v8, v9
-; CHECK-BE-NEXT:    vmrghb v1, v10, v11
-; CHECK-BE-NEXT:    vmrghb v6, v12, v13
-; CHECK-BE-NEXT:    vmrghb v7, v14, v15
+; CHECK-BE-NEXT:    mfvsrwz r3, f7
+; CHECK-BE-NEXT:    xscvdpsxws f7, f6
+; CHECK-BE-NEXT:    xxswapd vs6, vs6
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f6, f6
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    vmrghb v2, v2, v3
+; CHECK-BE-NEXT:    mfvsrwz r3, f7
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f6
+; CHECK-BE-NEXT:    xscvdpsxws f6, f5
+; CHECK-BE-NEXT:    xxswapd vs5, vs5
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f5, f5
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f6
+; CHECK-BE-NEXT:    sldi r3, r3, 56
 ; CHECK-BE-NEXT:    vmrghh v2, v3, v2
-; CHECK-BE-NEXT:    vmrghh v3, v5, v4
-; CHECK-BE-NEXT:    vmrghh v4, v1, v0
-; CHECK-BE-NEXT:    vmrghh v5, v7, v6
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f5
+; CHECK-BE-NEXT:    xscvdpsxws f5, f4
+; CHECK-BE-NEXT:    xxswapd vs4, vs4
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f4, f4
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f5
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f4
+; CHECK-BE-NEXT:    xscvdpsxws f4, f3
+; CHECK-BE-NEXT:    xxswapd vs3, vs3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f3, f3
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    vmrghb v4, v4, v5
+; CHECK-BE-NEXT:    mfvsrwz r3, f4
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
 ; CHECK-BE-NEXT:    vmrghw v2, v3, v2
-; CHECK-BE-NEXT:    vmrghw v3, v5, v4
+; CHECK-BE-NEXT:    mtvsrd v3, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    xscvdpsxws f3, f2
+; CHECK-BE-NEXT:    xxswapd vs2, vs2
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f2, f2
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    vmrghb v3, v3, v4
+; CHECK-BE-NEXT:    mfvsrwz r3, f3
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    xscvdpsxws f2, f1
+; CHECK-BE-NEXT:    xxswapd vs1, vs1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f1, f1
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    vmrghb v4, v4, v5
+; CHECK-BE-NEXT:    mfvsrwz r3, f2
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    vmrghh v3, v4, v3
+; CHECK-BE-NEXT:    mtvsrd v4, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    xscvdpsxws f1, f0
+; CHECK-BE-NEXT:    xxswapd vs0, vs0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    xscvdpsxws f0, f0
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    vmrghb v4, v4, v5
+; CHECK-BE-NEXT:    mfvsrwz r3, f1
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v5, r3
+; CHECK-BE-NEXT:    mfvsrwz r3, f0
+; CHECK-BE-NEXT:    sldi r3, r3, 56
+; CHECK-BE-NEXT:    mtvsrd v0, r3
+; CHECK-BE-NEXT:    vmrghb v5, v5, v0
+; CHECK-BE-NEXT:    vmrghh v4, v5, v4
+; CHECK-BE-NEXT:    vmrghw v3, v4, v3
 ; CHECK-BE-NEXT:    xxmrghd v2, v3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
diff --git a/test/CodeGen/PowerPC/vec_conv_fp_to_i_8byte_elts.ll b/test/CodeGen/PowerPC/vec_conv_fp_to_i_8byte_elts.ll
index a5097b0..a059e56 100644
--- a/test/CodeGen/PowerPC/vec_conv_fp_to_i_8byte_elts.ll
+++ b/test/CodeGen/PowerPC/vec_conv_fp_to_i_8byte_elts.ll
@@ -163,58 +163,58 @@
 ;
 ; CHECK-P9-LABEL: test16elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 48(r4)
-; CHECK-P9-NEXT:    lxv vs1, 32(r4)
-; CHECK-P9-NEXT:    lxv vs2, 16(r4)
-; CHECK-P9-NEXT:    lxv vs3, 0(r4)
-; CHECK-P9-NEXT:    lxv vs4, 112(r4)
-; CHECK-P9-NEXT:    lxv vs5, 96(r4)
-; CHECK-P9-NEXT:    lxv vs6, 80(r4)
-; CHECK-P9-NEXT:    lxv vs7, 64(r4)
+; CHECK-P9-NEXT:    lxv vs0, 112(r4)
+; CHECK-P9-NEXT:    lxv vs1, 96(r4)
+; CHECK-P9-NEXT:    lxv vs2, 80(r4)
+; CHECK-P9-NEXT:    lxv vs3, 64(r4)
+; CHECK-P9-NEXT:    lxv vs4, 48(r4)
+; CHECK-P9-NEXT:    xvcvdpuxds vs4, vs4
+; CHECK-P9-NEXT:    lxv vs5, 32(r4)
+; CHECK-P9-NEXT:    lxv vs6, 16(r4)
+; CHECK-P9-NEXT:    lxv vs7, 0(r4)
+; CHECK-P9-NEXT:    xvcvdpuxds vs7, vs7
+; CHECK-P9-NEXT:    xvcvdpuxds vs6, vs6
+; CHECK-P9-NEXT:    xvcvdpuxds vs5, vs5
 ; CHECK-P9-NEXT:    xvcvdpuxds vs3, vs3
 ; CHECK-P9-NEXT:    xvcvdpuxds vs2, vs2
 ; CHECK-P9-NEXT:    xvcvdpuxds vs1, vs1
 ; CHECK-P9-NEXT:    xvcvdpuxds vs0, vs0
-; CHECK-P9-NEXT:    xvcvdpuxds vs7, vs7
-; CHECK-P9-NEXT:    xvcvdpuxds vs6, vs6
-; CHECK-P9-NEXT:    xvcvdpuxds vs5, vs5
-; CHECK-P9-NEXT:    xvcvdpuxds vs4, vs4
-; CHECK-P9-NEXT:    stxv vs0, 48(r3)
-; CHECK-P9-NEXT:    stxv vs1, 32(r3)
-; CHECK-P9-NEXT:    stxv vs2, 16(r3)
-; CHECK-P9-NEXT:    stxv vs3, 0(r3)
-; CHECK-P9-NEXT:    stxv vs4, 112(r3)
-; CHECK-P9-NEXT:    stxv vs5, 96(r3)
-; CHECK-P9-NEXT:    stxv vs6, 80(r3)
-; CHECK-P9-NEXT:    stxv vs7, 64(r3)
+; CHECK-P9-NEXT:    stxv vs0, 112(r3)
+; CHECK-P9-NEXT:    stxv vs1, 96(r3)
+; CHECK-P9-NEXT:    stxv vs2, 80(r3)
+; CHECK-P9-NEXT:    stxv vs3, 64(r3)
+; CHECK-P9-NEXT:    stxv vs4, 48(r3)
+; CHECK-P9-NEXT:    stxv vs5, 32(r3)
+; CHECK-P9-NEXT:    stxv vs6, 16(r3)
+; CHECK-P9-NEXT:    stxv vs7, 0(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 48(r4)
-; CHECK-BE-NEXT:    lxv vs1, 32(r4)
-; CHECK-BE-NEXT:    lxv vs2, 16(r4)
-; CHECK-BE-NEXT:    lxv vs3, 0(r4)
-; CHECK-BE-NEXT:    lxv vs4, 112(r4)
-; CHECK-BE-NEXT:    lxv vs5, 96(r4)
-; CHECK-BE-NEXT:    lxv vs6, 80(r4)
-; CHECK-BE-NEXT:    lxv vs7, 64(r4)
+; CHECK-BE-NEXT:    lxv vs0, 112(r4)
+; CHECK-BE-NEXT:    lxv vs1, 96(r4)
+; CHECK-BE-NEXT:    lxv vs2, 80(r4)
+; CHECK-BE-NEXT:    lxv vs3, 64(r4)
+; CHECK-BE-NEXT:    lxv vs4, 48(r4)
+; CHECK-BE-NEXT:    xvcvdpuxds vs4, vs4
+; CHECK-BE-NEXT:    lxv vs5, 32(r4)
+; CHECK-BE-NEXT:    lxv vs6, 16(r4)
+; CHECK-BE-NEXT:    lxv vs7, 0(r4)
+; CHECK-BE-NEXT:    xvcvdpuxds vs7, vs7
+; CHECK-BE-NEXT:    xvcvdpuxds vs6, vs6
+; CHECK-BE-NEXT:    xvcvdpuxds vs5, vs5
 ; CHECK-BE-NEXT:    xvcvdpuxds vs3, vs3
 ; CHECK-BE-NEXT:    xvcvdpuxds vs2, vs2
 ; CHECK-BE-NEXT:    xvcvdpuxds vs1, vs1
 ; CHECK-BE-NEXT:    xvcvdpuxds vs0, vs0
-; CHECK-BE-NEXT:    xvcvdpuxds vs7, vs7
-; CHECK-BE-NEXT:    xvcvdpuxds vs6, vs6
-; CHECK-BE-NEXT:    xvcvdpuxds vs5, vs5
-; CHECK-BE-NEXT:    xvcvdpuxds vs4, vs4
-; CHECK-BE-NEXT:    stxv vs0, 48(r3)
-; CHECK-BE-NEXT:    stxv vs1, 32(r3)
-; CHECK-BE-NEXT:    stxv vs2, 16(r3)
-; CHECK-BE-NEXT:    stxv vs3, 0(r3)
-; CHECK-BE-NEXT:    stxv vs4, 112(r3)
-; CHECK-BE-NEXT:    stxv vs5, 96(r3)
-; CHECK-BE-NEXT:    stxv vs6, 80(r3)
-; CHECK-BE-NEXT:    stxv vs7, 64(r3)
+; CHECK-BE-NEXT:    stxv vs0, 112(r3)
+; CHECK-BE-NEXT:    stxv vs1, 96(r3)
+; CHECK-BE-NEXT:    stxv vs2, 80(r3)
+; CHECK-BE-NEXT:    stxv vs3, 64(r3)
+; CHECK-BE-NEXT:    stxv vs4, 48(r3)
+; CHECK-BE-NEXT:    stxv vs5, 32(r3)
+; CHECK-BE-NEXT:    stxv vs6, 16(r3)
+; CHECK-BE-NEXT:    stxv vs7, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x double>, <16 x double>* %0, align 128
@@ -377,58 +377,58 @@
 ;
 ; CHECK-P9-LABEL: test16elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 48(r4)
-; CHECK-P9-NEXT:    lxv vs1, 32(r4)
-; CHECK-P9-NEXT:    lxv vs2, 16(r4)
-; CHECK-P9-NEXT:    lxv vs3, 0(r4)
-; CHECK-P9-NEXT:    lxv vs4, 112(r4)
-; CHECK-P9-NEXT:    lxv vs5, 96(r4)
-; CHECK-P9-NEXT:    lxv vs6, 80(r4)
-; CHECK-P9-NEXT:    lxv vs7, 64(r4)
+; CHECK-P9-NEXT:    lxv vs0, 112(r4)
+; CHECK-P9-NEXT:    lxv vs1, 96(r4)
+; CHECK-P9-NEXT:    lxv vs2, 80(r4)
+; CHECK-P9-NEXT:    lxv vs3, 64(r4)
+; CHECK-P9-NEXT:    lxv vs4, 48(r4)
+; CHECK-P9-NEXT:    xvcvdpsxds vs4, vs4
+; CHECK-P9-NEXT:    lxv vs5, 32(r4)
+; CHECK-P9-NEXT:    lxv vs6, 16(r4)
+; CHECK-P9-NEXT:    lxv vs7, 0(r4)
+; CHECK-P9-NEXT:    xvcvdpsxds vs7, vs7
+; CHECK-P9-NEXT:    xvcvdpsxds vs6, vs6
+; CHECK-P9-NEXT:    xvcvdpsxds vs5, vs5
 ; CHECK-P9-NEXT:    xvcvdpsxds vs3, vs3
 ; CHECK-P9-NEXT:    xvcvdpsxds vs2, vs2
 ; CHECK-P9-NEXT:    xvcvdpsxds vs1, vs1
 ; CHECK-P9-NEXT:    xvcvdpsxds vs0, vs0
-; CHECK-P9-NEXT:    xvcvdpsxds vs7, vs7
-; CHECK-P9-NEXT:    xvcvdpsxds vs6, vs6
-; CHECK-P9-NEXT:    xvcvdpsxds vs5, vs5
-; CHECK-P9-NEXT:    xvcvdpsxds vs4, vs4
-; CHECK-P9-NEXT:    stxv vs0, 48(r3)
-; CHECK-P9-NEXT:    stxv vs1, 32(r3)
-; CHECK-P9-NEXT:    stxv vs2, 16(r3)
-; CHECK-P9-NEXT:    stxv vs3, 0(r3)
-; CHECK-P9-NEXT:    stxv vs4, 112(r3)
-; CHECK-P9-NEXT:    stxv vs5, 96(r3)
-; CHECK-P9-NEXT:    stxv vs6, 80(r3)
-; CHECK-P9-NEXT:    stxv vs7, 64(r3)
+; CHECK-P9-NEXT:    stxv vs0, 112(r3)
+; CHECK-P9-NEXT:    stxv vs1, 96(r3)
+; CHECK-P9-NEXT:    stxv vs2, 80(r3)
+; CHECK-P9-NEXT:    stxv vs3, 64(r3)
+; CHECK-P9-NEXT:    stxv vs4, 48(r3)
+; CHECK-P9-NEXT:    stxv vs5, 32(r3)
+; CHECK-P9-NEXT:    stxv vs6, 16(r3)
+; CHECK-P9-NEXT:    stxv vs7, 0(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 48(r4)
-; CHECK-BE-NEXT:    lxv vs1, 32(r4)
-; CHECK-BE-NEXT:    lxv vs2, 16(r4)
-; CHECK-BE-NEXT:    lxv vs3, 0(r4)
-; CHECK-BE-NEXT:    lxv vs4, 112(r4)
-; CHECK-BE-NEXT:    lxv vs5, 96(r4)
-; CHECK-BE-NEXT:    lxv vs6, 80(r4)
-; CHECK-BE-NEXT:    lxv vs7, 64(r4)
+; CHECK-BE-NEXT:    lxv vs0, 112(r4)
+; CHECK-BE-NEXT:    lxv vs1, 96(r4)
+; CHECK-BE-NEXT:    lxv vs2, 80(r4)
+; CHECK-BE-NEXT:    lxv vs3, 64(r4)
+; CHECK-BE-NEXT:    lxv vs4, 48(r4)
+; CHECK-BE-NEXT:    xvcvdpsxds vs4, vs4
+; CHECK-BE-NEXT:    lxv vs5, 32(r4)
+; CHECK-BE-NEXT:    lxv vs6, 16(r4)
+; CHECK-BE-NEXT:    lxv vs7, 0(r4)
+; CHECK-BE-NEXT:    xvcvdpsxds vs7, vs7
+; CHECK-BE-NEXT:    xvcvdpsxds vs6, vs6
+; CHECK-BE-NEXT:    xvcvdpsxds vs5, vs5
 ; CHECK-BE-NEXT:    xvcvdpsxds vs3, vs3
 ; CHECK-BE-NEXT:    xvcvdpsxds vs2, vs2
 ; CHECK-BE-NEXT:    xvcvdpsxds vs1, vs1
 ; CHECK-BE-NEXT:    xvcvdpsxds vs0, vs0
-; CHECK-BE-NEXT:    xvcvdpsxds vs7, vs7
-; CHECK-BE-NEXT:    xvcvdpsxds vs6, vs6
-; CHECK-BE-NEXT:    xvcvdpsxds vs5, vs5
-; CHECK-BE-NEXT:    xvcvdpsxds vs4, vs4
-; CHECK-BE-NEXT:    stxv vs0, 48(r3)
-; CHECK-BE-NEXT:    stxv vs1, 32(r3)
-; CHECK-BE-NEXT:    stxv vs2, 16(r3)
-; CHECK-BE-NEXT:    stxv vs3, 0(r3)
-; CHECK-BE-NEXT:    stxv vs4, 112(r3)
-; CHECK-BE-NEXT:    stxv vs5, 96(r3)
-; CHECK-BE-NEXT:    stxv vs6, 80(r3)
-; CHECK-BE-NEXT:    stxv vs7, 64(r3)
+; CHECK-BE-NEXT:    stxv vs0, 112(r3)
+; CHECK-BE-NEXT:    stxv vs1, 96(r3)
+; CHECK-BE-NEXT:    stxv vs2, 80(r3)
+; CHECK-BE-NEXT:    stxv vs3, 64(r3)
+; CHECK-BE-NEXT:    stxv vs4, 48(r3)
+; CHECK-BE-NEXT:    stxv vs5, 32(r3)
+; CHECK-BE-NEXT:    stxv vs6, 16(r3)
+; CHECK-BE-NEXT:    stxv vs7, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x double>, <16 x double>* %0, align 128
diff --git a/test/CodeGen/PowerPC/vec_conv_i16_to_fp32_elts.ll b/test/CodeGen/PowerPC/vec_conv_i16_to_fp32_elts.ll
index b522155..f8538ac 100644
--- a/test/CodeGen/PowerPC/vec_conv_i16_to_fp32_elts.ll
+++ b/test/CodeGen/PowerPC/vec_conv_i16_to_fp32_elts.ll
@@ -35,20 +35,20 @@
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrws v2, r3
 ; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    li r4, 2
 ; CHECK-P9-NEXT:    vextuhrx r3, r3, v2
-; CHECK-P9-NEXT:    vextuhrx r4, r4, v2
 ; CHECK-P9-NEXT:    rlwinm r3, r3, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r4, r4, 0, 16, 31
 ; CHECK-P9-NEXT:    mtvsrwz f0, r3
-; CHECK-P9-NEXT:    mtvsrwz f1, r4
+; CHECK-P9-NEXT:    li r3, 2
 ; CHECK-P9-NEXT:    xscvuxdsp f0, f0
-; CHECK-P9-NEXT:    xscvuxdsp f1, f1
 ; CHECK-P9-NEXT:    xscvdpspn vs0, f0
-; CHECK-P9-NEXT:    xscvdpspn vs1, f1
+; CHECK-P9-NEXT:    vextuhrx r3, r3, v2
+; CHECK-P9-NEXT:    rlwinm r3, r3, 0, 16, 31
+; CHECK-P9-NEXT:    xxsldwi v3, vs0, vs0, 1
+; CHECK-P9-NEXT:    mtvsrwz f0, r3
+; CHECK-P9-NEXT:    xscvuxdsp f0, f0
+; CHECK-P9-NEXT:    xscvdpspn vs0, f0
 ; CHECK-P9-NEXT:    xxsldwi v2, vs0, vs0, 1
-; CHECK-P9-NEXT:    xxsldwi v3, vs1, vs1, 1
-; CHECK-P9-NEXT:    vmrglw v2, v3, v2
+; CHECK-P9-NEXT:    vmrglw v2, v2, v3
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
@@ -56,18 +56,18 @@
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrws v2, r3
 ; CHECK-BE-NEXT:    li r3, 2
-; CHECK-BE-NEXT:    li r4, 0
 ; CHECK-BE-NEXT:    vextuhlx r3, r3, v2
-; CHECK-BE-NEXT:    vextuhlx r4, r4, v2
 ; CHECK-BE-NEXT:    rlwinm r3, r3, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r4, r4, 0, 16, 31
 ; CHECK-BE-NEXT:    mtvsrwz f0, r3
-; CHECK-BE-NEXT:    mtvsrwz f1, r4
+; CHECK-BE-NEXT:    li r3, 0
 ; CHECK-BE-NEXT:    xscvuxdsp f0, f0
-; CHECK-BE-NEXT:    xscvuxdsp f1, f1
+; CHECK-BE-NEXT:    vextuhlx r3, r3, v2
+; CHECK-BE-NEXT:    rlwinm r3, r3, 0, 16, 31
+; CHECK-BE-NEXT:    xscvdpspn v3, f0
+; CHECK-BE-NEXT:    mtvsrwz f0, r3
+; CHECK-BE-NEXT:    xscvuxdsp f0, f0
 ; CHECK-BE-NEXT:    xscvdpspn v2, f0
-; CHECK-BE-NEXT:    xscvdpspn v3, f1
-; CHECK-BE-NEXT:    vmrghw v2, v3, v2
+; CHECK-BE-NEXT:    vmrghw v2, v2, v3
 ; CHECK-BE-NEXT:    mfvsrd r3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -80,90 +80,37 @@
 define <4 x float> @test4elt(i64 %a.coerce) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt:
 ; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI1_0@toc@ha
 ; CHECK-P8-NEXT:    mtvsrd f0, r3
-; CHECK-P8-NEXT:    mfvsrd r3, f0
-; CHECK-P8-NEXT:    clrldi r4, r3, 48
-; CHECK-P8-NEXT:    rldicl r5, r3, 32, 48
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 16, 31
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 16, 31
-; CHECK-P8-NEXT:    mtvsrwz f0, r4
-; CHECK-P8-NEXT:    rldicl r4, r3, 48, 48
-; CHECK-P8-NEXT:    rldicl r3, r3, 16, 48
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 16, 31
-; CHECK-P8-NEXT:    rlwinm r3, r3, 0, 16, 31
-; CHECK-P8-NEXT:    mtvsrwz f1, r5
-; CHECK-P8-NEXT:    mtvsrwz f2, r4
-; CHECK-P8-NEXT:    mtvsrwz f3, r3
-; CHECK-P8-NEXT:    xscvuxdsp f0, f0
-; CHECK-P8-NEXT:    xscvuxdsp f1, f1
-; CHECK-P8-NEXT:    xscvuxdsp f2, f2
-; CHECK-P8-NEXT:    xscvuxdsp f3, f3
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P8-NEXT:    vmrgew v2, v3, v2
+; CHECK-P8-NEXT:    addi r3, r4, .LCPI1_0@toc@l
+; CHECK-P8-NEXT:    xxlxor v4, v4, v4
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    lvx v3, 0, r3
+; CHECK-P8-NEXT:    vperm v2, v4, v2, v3
+; CHECK-P8-NEXT:    xvcvuxwsp v2, v2
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test4elt:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    li r4, 4
-; CHECK-P9-NEXT:    li r5, 2
-; CHECK-P9-NEXT:    li r6, 6
+; CHECK-P9-NEXT:    addis r3, r2, .LCPI1_0@toc@ha
+; CHECK-P9-NEXT:    addi r3, r3, .LCPI1_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r3
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    vextuhrx r3, r3, v2
-; CHECK-P9-NEXT:    vextuhrx r4, r4, v2
-; CHECK-P9-NEXT:    vextuhrx r5, r5, v2
-; CHECK-P9-NEXT:    vextuhrx r6, r6, v2
-; CHECK-P9-NEXT:    rlwinm r3, r3, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r4, r4, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r5, r5, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-P9-NEXT:    mtvsrwz f0, r3
-; CHECK-P9-NEXT:    mtvsrwz f1, r4
-; CHECK-P9-NEXT:    mtvsrwz f2, r5
-; CHECK-P9-NEXT:    mtvsrwz f3, r6
-; CHECK-P9-NEXT:    xscvuxdsp f0, f0
-; CHECK-P9-NEXT:    xscvuxdsp f1, f1
-; CHECK-P9-NEXT:    xscvuxdsp f2, f2
-; CHECK-P9-NEXT:    xscvuxdsp f3, f3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
+; CHECK-P9-NEXT:    xxlxor v4, v4, v4
+; CHECK-P9-NEXT:    vperm v2, v4, v2, v3
+; CHECK-P9-NEXT:    xvcvuxwsp v2, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    li r4, 6
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    li r3, 2
-; CHECK-BE-NEXT:    li r5, 4
-; CHECK-BE-NEXT:    li r6, 0
-; CHECK-BE-NEXT:    vextuhlx r4, r4, v2
-; CHECK-BE-NEXT:    vextuhlx r3, r3, v2
-; CHECK-BE-NEXT:    vextuhlx r5, r5, v2
-; CHECK-BE-NEXT:    vextuhlx r6, r6, v2
-; CHECK-BE-NEXT:    rlwinm r4, r4, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r3, r3, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r5, r5, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-BE-NEXT:    mtvsrwz f0, r4
-; CHECK-BE-NEXT:    mtvsrwz f1, r3
-; CHECK-BE-NEXT:    mtvsrwz f2, r5
-; CHECK-BE-NEXT:    mtvsrwz f3, r6
-; CHECK-BE-NEXT:    xscvuxdsp f0, f0
-; CHECK-BE-NEXT:    xscvuxdsp f1, f1
-; CHECK-BE-NEXT:    xscvuxdsp f2, f2
-; CHECK-BE-NEXT:    xscvuxdsp f3, f3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs1
-; CHECK-BE-NEXT:    vmrgew v2, v3, v2
+; CHECK-BE-NEXT:    addis r3, r2, .LCPI1_0@toc@ha
+; CHECK-BE-NEXT:    addi r3, r3, .LCPI1_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r3
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    vperm v2, v2, v4, v3
+; CHECK-BE-NEXT:    xvcvuxwsp v2, v2
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = bitcast i64 %a.coerce to <4 x i16>
@@ -174,166 +121,54 @@
 define void @test8elt(<8 x float>* noalias nocapture sret %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
-; CHECK-P8-NEXT:    mfvsrd r5, v2
-; CHECK-P8-NEXT:    xxswapd vs0, v2
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI2_0@toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI2_1@toc@ha
+; CHECK-P8-NEXT:    xxlxor v4, v4, v4
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI2_0@toc@l
+; CHECK-P8-NEXT:    lvx v3, 0, r4
+; CHECK-P8-NEXT:    addi r4, r5, .LCPI2_1@toc@l
+; CHECK-P8-NEXT:    lvx v5, 0, r4
 ; CHECK-P8-NEXT:    li r4, 16
-; CHECK-P8-NEXT:    clrldi r6, r5, 48
-; CHECK-P8-NEXT:    rldicl r7, r5, 32, 48
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-P8-NEXT:    mfvsrd r8, f0
-; CHECK-P8-NEXT:    rlwinm r7, r7, 0, 16, 31
-; CHECK-P8-NEXT:    mtvsrwz f1, r6
-; CHECK-P8-NEXT:    rldicl r6, r5, 48, 48
-; CHECK-P8-NEXT:    rldicl r5, r5, 16, 48
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 16, 31
-; CHECK-P8-NEXT:    mtvsrwz f0, r7
-; CHECK-P8-NEXT:    mtvsrwz f2, r6
-; CHECK-P8-NEXT:    clrldi r6, r8, 48
-; CHECK-P8-NEXT:    mtvsrwz f3, r5
-; CHECK-P8-NEXT:    rldicl r5, r8, 32, 48
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-P8-NEXT:    mtvsrwz f4, r6
-; CHECK-P8-NEXT:    rldicl r6, r8, 48, 48
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 16, 31
-; CHECK-P8-NEXT:    mtvsrwz f5, r5
-; CHECK-P8-NEXT:    rlwinm r5, r6, 0, 16, 31
-; CHECK-P8-NEXT:    mtvsrwz f6, r5
-; CHECK-P8-NEXT:    rldicl r5, r8, 16, 48
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 16, 31
-; CHECK-P8-NEXT:    xscvuxdsp f1, f1
-; CHECK-P8-NEXT:    mtvsrwz f7, r5
-; CHECK-P8-NEXT:    xscvuxdsp f0, f0
-; CHECK-P8-NEXT:    xscvuxdsp f2, f2
-; CHECK-P8-NEXT:    xscvuxdsp f4, f4
-; CHECK-P8-NEXT:    xscvuxdsp f5, f5
-; CHECK-P8-NEXT:    xscvuxdsp f6, f6
-; CHECK-P8-NEXT:    xscvuxdsp f7, f7
-; CHECK-P8-NEXT:    xscvuxdsp f3, f3
-; CHECK-P8-NEXT:    xxmrghd vs0, vs0, vs1
-; CHECK-P8-NEXT:    xxmrghd vs1, vs5, vs4
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xxmrghd vs4, vs7, vs6
-; CHECK-P8-NEXT:    xxmrghd vs2, vs3, vs2
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P8-NEXT:    xvcvdpsp v4, vs4
-; CHECK-P8-NEXT:    xvcvdpsp v5, vs2
-; CHECK-P8-NEXT:    vmrgew v3, v4, v3
-; CHECK-P8-NEXT:    vmrgew v2, v5, v2
+; CHECK-P8-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P8-NEXT:    vperm v2, v4, v2, v5
+; CHECK-P8-NEXT:    xvcvuxwsp v3, v3
+; CHECK-P8-NEXT:    xvcvuxwsp v2, v2
 ; CHECK-P8-NEXT:    stvx v3, 0, r3
 ; CHECK-P8-NEXT:    stvx v2, r3, r4
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test8elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    li r4, 8
-; CHECK-P9-NEXT:    li r5, 12
-; CHECK-P9-NEXT:    li r6, 10
-; CHECK-P9-NEXT:    li r7, 14
-; CHECK-P9-NEXT:    li r8, 0
-; CHECK-P9-NEXT:    li r9, 4
-; CHECK-P9-NEXT:    li r10, 2
-; CHECK-P9-NEXT:    li r11, 6
-; CHECK-P9-NEXT:    vextuhrx r4, r4, v2
-; CHECK-P9-NEXT:    vextuhrx r5, r5, v2
-; CHECK-P9-NEXT:    vextuhrx r6, r6, v2
-; CHECK-P9-NEXT:    vextuhrx r7, r7, v2
-; CHECK-P9-NEXT:    vextuhrx r8, r8, v2
-; CHECK-P9-NEXT:    vextuhrx r9, r9, v2
-; CHECK-P9-NEXT:    vextuhrx r10, r10, v2
-; CHECK-P9-NEXT:    vextuhrx r11, r11, v2
-; CHECK-P9-NEXT:    rlwinm r4, r4, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r5, r5, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r7, r7, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r8, r8, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r9, r9, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r10, r10, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r11, r11, 0, 16, 31
-; CHECK-P9-NEXT:    mtvsrwz f0, r4
-; CHECK-P9-NEXT:    mtvsrwz f1, r5
-; CHECK-P9-NEXT:    mtvsrwz f2, r6
-; CHECK-P9-NEXT:    mtvsrwz f3, r7
-; CHECK-P9-NEXT:    mtvsrwz f4, r8
-; CHECK-P9-NEXT:    mtvsrwz f5, r9
-; CHECK-P9-NEXT:    mtvsrwz f6, r10
-; CHECK-P9-NEXT:    mtvsrwz f7, r11
-; CHECK-P9-NEXT:    xscvuxdsp f0, f0
-; CHECK-P9-NEXT:    xscvuxdsp f1, f1
-; CHECK-P9-NEXT:    xscvuxdsp f2, f2
-; CHECK-P9-NEXT:    xscvuxdsp f3, f3
-; CHECK-P9-NEXT:    xscvuxdsp f4, f4
-; CHECK-P9-NEXT:    xscvuxdsp f5, f5
-; CHECK-P9-NEXT:    xscvuxdsp f6, f6
-; CHECK-P9-NEXT:    xscvuxdsp f7, f7
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P9-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v5, vs3
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
-; CHECK-P9-NEXT:    vmrgew v3, v5, v4
-; CHECK-P9-NEXT:    stxv v3, 0(r3)
-; CHECK-P9-NEXT:    stxv v2, 16(r3)
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI2_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI2_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    xxlxor v4, v4, v4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI2_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI2_1@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    xvcvuxwsp vs0, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v4, v2, v3
+; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    xvcvuxwsp vs1, v2
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    li r4, 12
-; CHECK-BE-NEXT:    li r5, 8
-; CHECK-BE-NEXT:    li r6, 10
-; CHECK-BE-NEXT:    li r7, 14
-; CHECK-BE-NEXT:    li r8, 6
-; CHECK-BE-NEXT:    li r9, 2
-; CHECK-BE-NEXT:    li r10, 4
-; CHECK-BE-NEXT:    li r11, 0
-; CHECK-BE-NEXT:    vextuhlx r4, r4, v2
-; CHECK-BE-NEXT:    vextuhlx r5, r5, v2
-; CHECK-BE-NEXT:    vextuhlx r6, r6, v2
-; CHECK-BE-NEXT:    vextuhlx r7, r7, v2
-; CHECK-BE-NEXT:    vextuhlx r8, r8, v2
-; CHECK-BE-NEXT:    vextuhlx r9, r9, v2
-; CHECK-BE-NEXT:    vextuhlx r10, r10, v2
-; CHECK-BE-NEXT:    vextuhlx r11, r11, v2
-; CHECK-BE-NEXT:    rlwinm r4, r4, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r5, r5, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r7, r7, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r8, r8, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r9, r9, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r10, r10, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r11, r11, 0, 16, 31
-; CHECK-BE-NEXT:    mtvsrwz f0, r4
-; CHECK-BE-NEXT:    mtvsrwz f1, r5
-; CHECK-BE-NEXT:    mtvsrwz f2, r6
-; CHECK-BE-NEXT:    mtvsrwz f3, r7
-; CHECK-BE-NEXT:    mtvsrwz f4, r8
-; CHECK-BE-NEXT:    mtvsrwz f5, r9
-; CHECK-BE-NEXT:    mtvsrwz f6, r10
-; CHECK-BE-NEXT:    mtvsrwz f7, r11
-; CHECK-BE-NEXT:    xscvuxdsp f0, f0
-; CHECK-BE-NEXT:    xscvuxdsp f1, f1
-; CHECK-BE-NEXT:    xscvuxdsp f2, f2
-; CHECK-BE-NEXT:    xscvuxdsp f3, f3
-; CHECK-BE-NEXT:    xscvuxdsp f4, f4
-; CHECK-BE-NEXT:    xscvuxdsp f5, f5
-; CHECK-BE-NEXT:    xscvuxdsp f6, f6
-; CHECK-BE-NEXT:    xscvuxdsp f7, f7
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs2, vs3
-; CHECK-BE-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-BE-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs1
-; CHECK-BE-NEXT:    xvcvdpsp v4, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v5, vs3
-; CHECK-BE-NEXT:    vmrgew v2, v2, v3
-; CHECK-BE-NEXT:    vmrgew v3, v5, v4
-; CHECK-BE-NEXT:    stxv v3, 0(r3)
-; CHECK-BE-NEXT:    stxv v2, 16(r3)
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI2_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI2_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI2_1@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI2_1@toc@l
+; CHECK-BE-NEXT:    vperm v3, v2, v4, v3
+; CHECK-BE-NEXT:    xvcvuxwsp vs0, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    xvcvuxwsp vs1, v2
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = uitofp <8 x i16> %a to <8 x float>
@@ -344,341 +179,80 @@
 define void @test16elt(<16 x float>* noalias nocapture sret %agg.result, <16 x i16>* nocapture readonly) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI3_0@toc@ha
+; CHECK-P8-NEXT:    addis r6, r2, .LCPI3_1@toc@ha
+; CHECK-P8-NEXT:    xxlxor v3, v3, v3
+; CHECK-P8-NEXT:    lvx v4, 0, r4
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI3_0@toc@l
+; CHECK-P8-NEXT:    addi r6, r6, .LCPI3_1@toc@l
+; CHECK-P8-NEXT:    lvx v2, 0, r5
 ; CHECK-P8-NEXT:    li r5, 16
-; CHECK-P8-NEXT:    lvx v3, 0, r4
-; CHECK-P8-NEXT:    lvx v2, r4, r5
-; CHECK-P8-NEXT:    mfvsrd r7, v3
-; CHECK-P8-NEXT:    xxswapd vs8, v3
-; CHECK-P8-NEXT:    mfvsrd r6, v2
-; CHECK-P8-NEXT:    xxswapd vs2, v2
-; CHECK-P8-NEXT:    clrldi r4, r6, 48
-; CHECK-P8-NEXT:    rldicl r8, r6, 32, 48
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 16, 31
-; CHECK-P8-NEXT:    rlwinm r8, r8, 0, 16, 31
-; CHECK-P8-NEXT:    mtvsrwz f0, r4
-; CHECK-P8-NEXT:    rldicl r4, r6, 48, 48
-; CHECK-P8-NEXT:    rldicl r6, r6, 16, 48
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 16, 31
-; CHECK-P8-NEXT:    mtvsrwz f1, r8
-; CHECK-P8-NEXT:    clrldi r8, r7, 48
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-P8-NEXT:    mtvsrwz f3, r4
-; CHECK-P8-NEXT:    rlwinm r4, r8, 0, 16, 31
-; CHECK-P8-NEXT:    mtvsrwz f4, r6
-; CHECK-P8-NEXT:    rldicl r6, r7, 32, 48
-; CHECK-P8-NEXT:    mtvsrwz f5, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 48, 48
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-P8-NEXT:    mfvsrd r8, f2
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 16, 31
-; CHECK-P8-NEXT:    mtvsrwz f2, r6
-; CHECK-P8-NEXT:    rldicl r6, r7, 16, 48
-; CHECK-P8-NEXT:    mtvsrwz f6, r4
-; CHECK-P8-NEXT:    clrldi r4, r8, 48
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 16, 31
-; CHECK-P8-NEXT:    mtvsrwz f7, r6
-; CHECK-P8-NEXT:    rldicl r6, r8, 32, 48
-; CHECK-P8-NEXT:    mtvsrwz f9, r4
-; CHECK-P8-NEXT:    rldicl r4, r8, 48, 48
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 16, 31
-; CHECK-P8-NEXT:    mtvsrwz f10, r6
-; CHECK-P8-NEXT:    rldicl r6, r8, 16, 48
-; CHECK-P8-NEXT:    mtvsrwz f11, r4
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-P8-NEXT:    mfvsrd r4, f8
-; CHECK-P8-NEXT:    mtvsrwz f8, r6
-; CHECK-P8-NEXT:    clrldi r6, r4, 48
-; CHECK-P8-NEXT:    xscvuxdsp f0, f0
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-P8-NEXT:    xscvuxdsp f1, f1
-; CHECK-P8-NEXT:    xscvuxdsp f3, f3
-; CHECK-P8-NEXT:    xscvuxdsp f4, f4
-; CHECK-P8-NEXT:    mtvsrwz f12, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 32, 48
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-P8-NEXT:    xscvuxdsp f5, f5
-; CHECK-P8-NEXT:    mtvsrwz f13, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 48, 48
-; CHECK-P8-NEXT:    rldicl r4, r4, 16, 48
-; CHECK-P8-NEXT:    xscvuxdsp f2, f2
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 16, 31
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    mtvsrwz v2, r6
-; CHECK-P8-NEXT:    mtvsrwz v3, r4
-; CHECK-P8-NEXT:    xxmrghd vs3, vs4, vs3
+; CHECK-P8-NEXT:    lvx v0, 0, r6
+; CHECK-P8-NEXT:    li r6, 32
+; CHECK-P8-NEXT:    lvx v5, r4, r5
 ; CHECK-P8-NEXT:    li r4, 48
-; CHECK-P8-NEXT:    xscvuxdsp f6, f6
-; CHECK-P8-NEXT:    xscvuxdsp f7, f7
-; CHECK-P8-NEXT:    xscvuxdsp f9, f9
-; CHECK-P8-NEXT:    xscvuxdsp f10, f10
-; CHECK-P8-NEXT:    xxmrghd vs2, vs2, vs5
-; CHECK-P8-NEXT:    xscvuxdsp f11, f11
-; CHECK-P8-NEXT:    xscvuxdsp f8, f8
-; CHECK-P8-NEXT:    xscvuxdsp f12, f12
-; CHECK-P8-NEXT:    xscvuxdsp f13, f13
-; CHECK-P8-NEXT:    xxmrghd vs5, vs7, vs6
-; CHECK-P8-NEXT:    xscvuxdsp f1, v2
-; CHECK-P8-NEXT:    xscvuxdsp f4, v3
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xxmrghd vs0, vs10, vs9
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs3
-; CHECK-P8-NEXT:    xxmrghd vs3, vs8, vs11
-; CHECK-P8-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P8-NEXT:    xxmrghd vs2, vs13, vs12
-; CHECK-P8-NEXT:    xvcvdpsp v5, vs5
-; CHECK-P8-NEXT:    xvcvdpsp v0, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs4, vs1
-; CHECK-P8-NEXT:    xvcvdpsp v1, vs3
-; CHECK-P8-NEXT:    xvcvdpsp v6, vs2
-; CHECK-P8-NEXT:    vmrgew v2, v3, v2
-; CHECK-P8-NEXT:    xvcvdpsp v7, vs1
-; CHECK-P8-NEXT:    vmrgew v3, v5, v4
-; CHECK-P8-NEXT:    vmrgew v4, v1, v0
-; CHECK-P8-NEXT:    stvx v2, r3, r4
-; CHECK-P8-NEXT:    li r4, 32
-; CHECK-P8-NEXT:    vmrgew v5, v7, v6
+; CHECK-P8-NEXT:    vperm v1, v3, v4, v2
+; CHECK-P8-NEXT:    vperm v2, v3, v5, v2
+; CHECK-P8-NEXT:    vperm v5, v3, v5, v0
+; CHECK-P8-NEXT:    vperm v3, v3, v4, v0
+; CHECK-P8-NEXT:    xvcvuxwsp v4, v1
+; CHECK-P8-NEXT:    xvcvuxwsp v2, v2
+; CHECK-P8-NEXT:    xvcvuxwsp v5, v5
+; CHECK-P8-NEXT:    xvcvuxwsp v3, v3
+; CHECK-P8-NEXT:    stvx v4, 0, r3
+; CHECK-P8-NEXT:    stvx v2, r3, r6
+; CHECK-P8-NEXT:    stvx v5, r3, r4
 ; CHECK-P8-NEXT:    stvx v3, r3, r5
-; CHECK-P8-NEXT:    stvx v4, r3, r4
-; CHECK-P8-NEXT:    stvx v5, 0, r3
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test16elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv v3, 0(r4)
 ; CHECK-P9-NEXT:    lxv v2, 16(r4)
-; CHECK-P9-NEXT:    li r4, 0
-; CHECK-P9-NEXT:    li r5, 4
-; CHECK-P9-NEXT:    li r6, 2
-; CHECK-P9-NEXT:    li r7, 6
-; CHECK-P9-NEXT:    li r8, 8
-; CHECK-P9-NEXT:    std r25, -72(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    li r9, 12
-; CHECK-P9-NEXT:    li r10, 10
-; CHECK-P9-NEXT:    li r11, 14
-; CHECK-P9-NEXT:    std r26, -64(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r27, -56(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r28, -48(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r29, -40(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r30, -32(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    vextuhrx r12, r4, v3
-; CHECK-P9-NEXT:    vextuhrx r0, r5, v3
-; CHECK-P9-NEXT:    vextuhrx r30, r6, v3
-; CHECK-P9-NEXT:    vextuhrx r29, r7, v3
-; CHECK-P9-NEXT:    vextuhrx r28, r8, v3
-; CHECK-P9-NEXT:    vextuhrx r27, r9, v3
-; CHECK-P9-NEXT:    vextuhrx r26, r10, v3
-; CHECK-P9-NEXT:    vextuhrx r25, r11, v3
-; CHECK-P9-NEXT:    vextuhrx r4, r4, v2
-; CHECK-P9-NEXT:    vextuhrx r5, r5, v2
-; CHECK-P9-NEXT:    vextuhrx r6, r6, v2
-; CHECK-P9-NEXT:    vextuhrx r7, r7, v2
-; CHECK-P9-NEXT:    vextuhrx r8, r8, v2
-; CHECK-P9-NEXT:    vextuhrx r9, r9, v2
-; CHECK-P9-NEXT:    vextuhrx r10, r10, v2
-; CHECK-P9-NEXT:    vextuhrx r11, r11, v2
-; CHECK-P9-NEXT:    rlwinm r12, r12, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r0, r0, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r30, r30, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r29, r29, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r28, r28, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r27, r27, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r26, r26, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r25, r25, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r4, r4, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r5, r5, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r7, r7, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r8, r8, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r9, r9, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r10, r10, 0, 16, 31
-; CHECK-P9-NEXT:    rlwinm r11, r11, 0, 16, 31
-; CHECK-P9-NEXT:    mtvsrwz f0, r12
-; CHECK-P9-NEXT:    mtvsrwz f1, r0
-; CHECK-P9-NEXT:    mtvsrwz f2, r30
-; CHECK-P9-NEXT:    ld r30, -32(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz f3, r29
-; CHECK-P9-NEXT:    ld r29, -40(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz f4, r28
-; CHECK-P9-NEXT:    ld r28, -48(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz f5, r27
-; CHECK-P9-NEXT:    ld r27, -56(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz f6, r26
-; CHECK-P9-NEXT:    ld r26, -64(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz f7, r25
-; CHECK-P9-NEXT:    ld r25, -72(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz f8, r4
-; CHECK-P9-NEXT:    mtvsrwz f9, r5
-; CHECK-P9-NEXT:    mtvsrwz f10, r6
-; CHECK-P9-NEXT:    mtvsrwz f11, r7
-; CHECK-P9-NEXT:    mtvsrwz f12, r8
-; CHECK-P9-NEXT:    mtvsrwz f13, r9
-; CHECK-P9-NEXT:    mtvsrwz v2, r10
-; CHECK-P9-NEXT:    mtvsrwz v3, r11
-; CHECK-P9-NEXT:    xscvuxdsp f0, f0
-; CHECK-P9-NEXT:    xscvuxdsp f1, f1
-; CHECK-P9-NEXT:    xscvuxdsp f2, f2
-; CHECK-P9-NEXT:    xscvuxdsp f3, f3
-; CHECK-P9-NEXT:    xscvuxdsp f4, f4
-; CHECK-P9-NEXT:    xscvuxdsp f5, f5
-; CHECK-P9-NEXT:    xscvuxdsp f6, f6
-; CHECK-P9-NEXT:    xscvuxdsp f7, f7
-; CHECK-P9-NEXT:    xscvuxdsp f8, f8
-; CHECK-P9-NEXT:    xscvuxdsp f9, f9
-; CHECK-P9-NEXT:    xscvuxdsp f10, f10
-; CHECK-P9-NEXT:    xscvuxdsp f11, f11
-; CHECK-P9-NEXT:    xscvuxdsp f12, f12
-; CHECK-P9-NEXT:    xscvuxdsp f13, f13
-; CHECK-P9-NEXT:    xscvuxdsp f31, v2
-; CHECK-P9-NEXT:    xscvuxdsp f30, v3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P9-NEXT:    xxmrghd vs4, vs9, vs8
-; CHECK-P9-NEXT:    xxmrghd vs5, vs11, vs10
-; CHECK-P9-NEXT:    xxmrghd vs6, vs13, vs12
-; CHECK-P9-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-P9-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P9-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v5, vs3
-; CHECK-P9-NEXT:    xvcvdpsp v0, vs4
-; CHECK-P9-NEXT:    xvcvdpsp v1, vs5
-; CHECK-P9-NEXT:    xvcvdpsp v6, vs6
-; CHECK-P9-NEXT:    xvcvdpsp v7, vs7
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
-; CHECK-P9-NEXT:    vmrgew v3, v5, v4
-; CHECK-P9-NEXT:    vmrgew v4, v1, v0
-; CHECK-P9-NEXT:    vmrgew v5, v7, v6
-; CHECK-P9-NEXT:    stxv v3, 16(r3)
-; CHECK-P9-NEXT:    stxv v2, 0(r3)
-; CHECK-P9-NEXT:    stxv v5, 48(r3)
-; CHECK-P9-NEXT:    stxv v4, 32(r3)
+; CHECK-P9-NEXT:    lxv v3, 0(r4)
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_0@toc@l
+; CHECK-P9-NEXT:    lxvx v4, 0, r4
+; CHECK-P9-NEXT:    xxlxor v5, v5, v5
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_1@toc@l
+; CHECK-P9-NEXT:    vperm v0, v5, v3, v4
+; CHECK-P9-NEXT:    xvcvuxwsp vs0, v0
+; CHECK-P9-NEXT:    lxvx v0, 0, r4
+; CHECK-P9-NEXT:    vperm v3, v5, v3, v0
+; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    xvcvuxwsp vs1, v3
+; CHECK-P9-NEXT:    vperm v3, v5, v2, v4
+; CHECK-P9-NEXT:    vperm v2, v5, v2, v0
+; CHECK-P9-NEXT:    xvcvuxwsp vs2, v3
+; CHECK-P9-NEXT:    xvcvuxwsp vs3, v2
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    stxv vs3, 48(r3)
+; CHECK-P9-NEXT:    stxv vs2, 32(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv v3, 0(r4)
 ; CHECK-BE-NEXT:    lxv v2, 16(r4)
-; CHECK-BE-NEXT:    li r4, 6
-; CHECK-BE-NEXT:    li r5, 2
-; CHECK-BE-NEXT:    li r6, 4
-; CHECK-BE-NEXT:    li r7, 0
-; CHECK-BE-NEXT:    li r8, 14
-; CHECK-BE-NEXT:    std r25, -72(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    li r9, 10
-; CHECK-BE-NEXT:    li r10, 12
-; CHECK-BE-NEXT:    li r11, 8
-; CHECK-BE-NEXT:    std r26, -64(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r27, -56(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r28, -48(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r29, -40(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r30, -32(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    vextuhlx r12, r4, v3
-; CHECK-BE-NEXT:    vextuhlx r0, r5, v3
-; CHECK-BE-NEXT:    vextuhlx r30, r6, v3
-; CHECK-BE-NEXT:    vextuhlx r29, r7, v3
-; CHECK-BE-NEXT:    vextuhlx r28, r8, v3
-; CHECK-BE-NEXT:    vextuhlx r27, r9, v3
-; CHECK-BE-NEXT:    vextuhlx r26, r10, v3
-; CHECK-BE-NEXT:    vextuhlx r25, r11, v3
-; CHECK-BE-NEXT:    vextuhlx r4, r4, v2
-; CHECK-BE-NEXT:    vextuhlx r5, r5, v2
-; CHECK-BE-NEXT:    vextuhlx r6, r6, v2
-; CHECK-BE-NEXT:    vextuhlx r7, r7, v2
-; CHECK-BE-NEXT:    vextuhlx r8, r8, v2
-; CHECK-BE-NEXT:    vextuhlx r9, r9, v2
-; CHECK-BE-NEXT:    vextuhlx r10, r10, v2
-; CHECK-BE-NEXT:    vextuhlx r11, r11, v2
-; CHECK-BE-NEXT:    rlwinm r12, r12, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r0, r0, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r30, r30, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r29, r29, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r28, r28, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r27, r27, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r26, r26, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r25, r25, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r4, r4, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r5, r5, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r6, r6, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r7, r7, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r8, r8, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r9, r9, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r10, r10, 0, 16, 31
-; CHECK-BE-NEXT:    rlwinm r11, r11, 0, 16, 31
-; CHECK-BE-NEXT:    mtvsrwz f0, r12
-; CHECK-BE-NEXT:    mtvsrwz f1, r0
-; CHECK-BE-NEXT:    mtvsrwz f2, r30
-; CHECK-BE-NEXT:    ld r30, -32(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz f3, r29
-; CHECK-BE-NEXT:    ld r29, -40(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz f4, r28
-; CHECK-BE-NEXT:    ld r28, -48(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz f5, r27
-; CHECK-BE-NEXT:    ld r27, -56(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz f6, r26
-; CHECK-BE-NEXT:    ld r26, -64(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz f7, r25
-; CHECK-BE-NEXT:    ld r25, -72(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz f8, r4
-; CHECK-BE-NEXT:    mtvsrwz f9, r5
-; CHECK-BE-NEXT:    mtvsrwz f10, r6
-; CHECK-BE-NEXT:    mtvsrwz f11, r7
-; CHECK-BE-NEXT:    mtvsrwz f12, r8
-; CHECK-BE-NEXT:    mtvsrwz f13, r9
-; CHECK-BE-NEXT:    mtvsrwz v2, r10
-; CHECK-BE-NEXT:    mtvsrwz v3, r11
-; CHECK-BE-NEXT:    xscvuxdsp f0, f0
-; CHECK-BE-NEXT:    xscvuxdsp f1, f1
-; CHECK-BE-NEXT:    xscvuxdsp f2, f2
-; CHECK-BE-NEXT:    xscvuxdsp f3, f3
-; CHECK-BE-NEXT:    xscvuxdsp f4, f4
-; CHECK-BE-NEXT:    xscvuxdsp f5, f5
-; CHECK-BE-NEXT:    xscvuxdsp f6, f6
-; CHECK-BE-NEXT:    xscvuxdsp f7, f7
-; CHECK-BE-NEXT:    xscvuxdsp f8, f8
-; CHECK-BE-NEXT:    xscvuxdsp f9, f9
-; CHECK-BE-NEXT:    xscvuxdsp f10, f10
-; CHECK-BE-NEXT:    xscvuxdsp f11, f11
-; CHECK-BE-NEXT:    xscvuxdsp f12, f12
-; CHECK-BE-NEXT:    xscvuxdsp f13, f13
-; CHECK-BE-NEXT:    xscvuxdsp f31, v2
-; CHECK-BE-NEXT:    xscvuxdsp f30, v3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-BE-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-BE-NEXT:    xxmrghd vs4, vs9, vs8
-; CHECK-BE-NEXT:    xxmrghd vs5, vs11, vs10
-; CHECK-BE-NEXT:    xxmrghd vs6, vs13, vs12
-; CHECK-BE-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-BE-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs1
-; CHECK-BE-NEXT:    xvcvdpsp v4, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v5, vs3
-; CHECK-BE-NEXT:    xvcvdpsp v0, vs4
-; CHECK-BE-NEXT:    xvcvdpsp v1, vs5
-; CHECK-BE-NEXT:    xvcvdpsp v6, vs6
-; CHECK-BE-NEXT:    xvcvdpsp v7, vs7
-; CHECK-BE-NEXT:    vmrgew v2, v3, v2
-; CHECK-BE-NEXT:    vmrgew v3, v5, v4
-; CHECK-BE-NEXT:    vmrgew v4, v1, v0
-; CHECK-BE-NEXT:    vmrgew v5, v7, v6
-; CHECK-BE-NEXT:    stxv v3, 16(r3)
-; CHECK-BE-NEXT:    stxv v2, 0(r3)
-; CHECK-BE-NEXT:    stxv v5, 48(r3)
-; CHECK-BE-NEXT:    stxv v4, 32(r3)
+; CHECK-BE-NEXT:    lxv v3, 0(r4)
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_0@toc@l
+; CHECK-BE-NEXT:    lxvx v4, 0, r4
+; CHECK-BE-NEXT:    xxlxor v5, v5, v5
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_1@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_1@toc@l
+; CHECK-BE-NEXT:    vperm v0, v3, v5, v4
+; CHECK-BE-NEXT:    xvcvuxwsp vs0, v0
+; CHECK-BE-NEXT:    lxvx v0, 0, r4
+; CHECK-BE-NEXT:    vperm v3, v5, v3, v0
+; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    xvcvuxwsp vs1, v3
+; CHECK-BE-NEXT:    vperm v3, v2, v5, v4
+; CHECK-BE-NEXT:    vperm v2, v5, v2, v0
+; CHECK-BE-NEXT:    xvcvuxwsp vs2, v3
+; CHECK-BE-NEXT:    xvcvuxwsp vs3, v2
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    stxv vs3, 48(r3)
+; CHECK-BE-NEXT:    stxv vs2, 32(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x i16>, <16 x i16>* %0, align 32
@@ -713,20 +287,20 @@
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrws v2, r3
 ; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    li r4, 2
 ; CHECK-P9-NEXT:    vextuhrx r3, r3, v2
-; CHECK-P9-NEXT:    vextuhrx r4, r4, v2
 ; CHECK-P9-NEXT:    extsh r3, r3
-; CHECK-P9-NEXT:    extsh r4, r4
 ; CHECK-P9-NEXT:    mtvsrwa f0, r3
-; CHECK-P9-NEXT:    mtvsrwa f1, r4
+; CHECK-P9-NEXT:    li r3, 2
 ; CHECK-P9-NEXT:    xscvsxdsp f0, f0
-; CHECK-P9-NEXT:    xscvsxdsp f1, f1
 ; CHECK-P9-NEXT:    xscvdpspn vs0, f0
-; CHECK-P9-NEXT:    xscvdpspn vs1, f1
+; CHECK-P9-NEXT:    vextuhrx r3, r3, v2
+; CHECK-P9-NEXT:    extsh r3, r3
+; CHECK-P9-NEXT:    xxsldwi v3, vs0, vs0, 1
+; CHECK-P9-NEXT:    mtvsrwa f0, r3
+; CHECK-P9-NEXT:    xscvsxdsp f0, f0
+; CHECK-P9-NEXT:    xscvdpspn vs0, f0
 ; CHECK-P9-NEXT:    xxsldwi v2, vs0, vs0, 1
-; CHECK-P9-NEXT:    xxsldwi v3, vs1, vs1, 1
-; CHECK-P9-NEXT:    vmrglw v2, v3, v2
+; CHECK-P9-NEXT:    vmrglw v2, v2, v3
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
@@ -734,18 +308,18 @@
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrws v2, r3
 ; CHECK-BE-NEXT:    li r3, 2
-; CHECK-BE-NEXT:    li r4, 0
 ; CHECK-BE-NEXT:    vextuhlx r3, r3, v2
-; CHECK-BE-NEXT:    vextuhlx r4, r4, v2
 ; CHECK-BE-NEXT:    extsh r3, r3
-; CHECK-BE-NEXT:    extsh r4, r4
 ; CHECK-BE-NEXT:    mtvsrwa f0, r3
-; CHECK-BE-NEXT:    mtvsrwa f1, r4
+; CHECK-BE-NEXT:    li r3, 0
 ; CHECK-BE-NEXT:    xscvsxdsp f0, f0
-; CHECK-BE-NEXT:    xscvsxdsp f1, f1
+; CHECK-BE-NEXT:    vextuhlx r3, r3, v2
+; CHECK-BE-NEXT:    extsh r3, r3
+; CHECK-BE-NEXT:    xscvdpspn v3, f0
+; CHECK-BE-NEXT:    mtvsrwa f0, r3
+; CHECK-BE-NEXT:    xscvsxdsp f0, f0
 ; CHECK-BE-NEXT:    xscvdpspn v2, f0
-; CHECK-BE-NEXT:    xscvdpspn v3, f1
-; CHECK-BE-NEXT:    vmrghw v2, v3, v2
+; CHECK-BE-NEXT:    vmrghw v2, v2, v3
 ; CHECK-BE-NEXT:    mfvsrd r3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -759,89 +333,30 @@
 ; CHECK-P8-LABEL: test4elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    mtvsrd f0, r3
-; CHECK-P8-NEXT:    mfvsrd r3, f0
-; CHECK-P8-NEXT:    clrldi r4, r3, 48
-; CHECK-P8-NEXT:    rldicl r5, r3, 32, 48
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    extsh r5, r5
-; CHECK-P8-NEXT:    mtvsrwa f0, r4
-; CHECK-P8-NEXT:    rldicl r4, r3, 48, 48
-; CHECK-P8-NEXT:    rldicl r3, r3, 16, 48
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    extsh r3, r3
-; CHECK-P8-NEXT:    mtvsrwa f1, r5
-; CHECK-P8-NEXT:    mtvsrwa f2, r4
-; CHECK-P8-NEXT:    mtvsrwa f3, r3
-; CHECK-P8-NEXT:    xscvsxdsp f0, f0
-; CHECK-P8-NEXT:    xscvsxdsp f1, f1
-; CHECK-P8-NEXT:    xscvsxdsp f2, f2
-; CHECK-P8-NEXT:    xscvsxdsp f3, f3
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P8-NEXT:    vmrgew v2, v3, v2
+; CHECK-P8-NEXT:    vspltisw v3, 8
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    vadduwm v3, v3, v3
+; CHECK-P8-NEXT:    vmrglh v2, v2, v2
+; CHECK-P8-NEXT:    vslw v2, v2, v3
+; CHECK-P8-NEXT:    vsraw v2, v2, v3
+; CHECK-P8-NEXT:    xvcvsxwsp v2, v2
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test4elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrd f0, r3
-; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    li r4, 4
-; CHECK-P9-NEXT:    li r5, 2
-; CHECK-P9-NEXT:    li r6, 6
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    vextuhrx r3, r3, v2
-; CHECK-P9-NEXT:    vextuhrx r4, r4, v2
-; CHECK-P9-NEXT:    vextuhrx r5, r5, v2
-; CHECK-P9-NEXT:    vextuhrx r6, r6, v2
-; CHECK-P9-NEXT:    extsh r3, r3
-; CHECK-P9-NEXT:    extsh r4, r4
-; CHECK-P9-NEXT:    extsh r5, r5
-; CHECK-P9-NEXT:    extsh r6, r6
-; CHECK-P9-NEXT:    mtvsrwa f0, r3
-; CHECK-P9-NEXT:    mtvsrwa f1, r4
-; CHECK-P9-NEXT:    mtvsrwa f2, r5
-; CHECK-P9-NEXT:    mtvsrwa f3, r6
-; CHECK-P9-NEXT:    xscvsxdsp f0, f0
-; CHECK-P9-NEXT:    xscvsxdsp f1, f1
-; CHECK-P9-NEXT:    xscvsxdsp f2, f2
-; CHECK-P9-NEXT:    xscvsxdsp f3, f3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
+; CHECK-P9-NEXT:    vmrglh v2, v2, v2
+; CHECK-P9-NEXT:    vextsh2w v2, v2
+; CHECK-P9-NEXT:    xvcvsxwsp v2, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    li r4, 6
 ; CHECK-BE-NEXT:    mtvsrd v2, r3
-; CHECK-BE-NEXT:    li r3, 2
-; CHECK-BE-NEXT:    li r5, 4
-; CHECK-BE-NEXT:    li r6, 0
-; CHECK-BE-NEXT:    vextuhlx r4, r4, v2
-; CHECK-BE-NEXT:    vextuhlx r3, r3, v2
-; CHECK-BE-NEXT:    vextuhlx r5, r5, v2
-; CHECK-BE-NEXT:    vextuhlx r6, r6, v2
-; CHECK-BE-NEXT:    extsh r4, r4
-; CHECK-BE-NEXT:    extsh r3, r3
-; CHECK-BE-NEXT:    extsh r5, r5
-; CHECK-BE-NEXT:    extsh r6, r6
-; CHECK-BE-NEXT:    mtvsrwa f0, r4
-; CHECK-BE-NEXT:    mtvsrwa f1, r3
-; CHECK-BE-NEXT:    mtvsrwa f2, r5
-; CHECK-BE-NEXT:    mtvsrwa f3, r6
-; CHECK-BE-NEXT:    xscvsxdsp f0, f0
-; CHECK-BE-NEXT:    xscvsxdsp f1, f1
-; CHECK-BE-NEXT:    xscvsxdsp f2, f2
-; CHECK-BE-NEXT:    xscvsxdsp f3, f3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs1
-; CHECK-BE-NEXT:    vmrgew v2, v3, v2
+; CHECK-BE-NEXT:    vmrghh v2, v2, v2
+; CHECK-BE-NEXT:    vextsh2w v2, v2
+; CHECK-BE-NEXT:    xvcvsxwsp v2, v2
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = bitcast i64 %a.coerce to <4 x i16>
@@ -852,166 +367,47 @@
 define void @test8elt_signed(<8 x float>* noalias nocapture sret %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
-; CHECK-P8-NEXT:    mfvsrd r5, v2
-; CHECK-P8-NEXT:    xxswapd vs0, v2
+; CHECK-P8-NEXT:    vmrglh v4, v2, v2
+; CHECK-P8-NEXT:    vspltisw v3, 8
 ; CHECK-P8-NEXT:    li r4, 16
-; CHECK-P8-NEXT:    clrldi r6, r5, 48
-; CHECK-P8-NEXT:    rldicl r7, r5, 32, 48
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    mfvsrd r8, f0
-; CHECK-P8-NEXT:    extsh r7, r7
-; CHECK-P8-NEXT:    mtvsrwa f1, r6
-; CHECK-P8-NEXT:    rldicl r6, r5, 48, 48
-; CHECK-P8-NEXT:    rldicl r5, r5, 16, 48
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    extsh r5, r5
-; CHECK-P8-NEXT:    mtvsrwa f0, r7
-; CHECK-P8-NEXT:    mtvsrwa f2, r6
-; CHECK-P8-NEXT:    clrldi r6, r8, 48
-; CHECK-P8-NEXT:    mtvsrwa f3, r5
-; CHECK-P8-NEXT:    rldicl r5, r8, 32, 48
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f4, r6
-; CHECK-P8-NEXT:    rldicl r6, r8, 48, 48
-; CHECK-P8-NEXT:    extsh r5, r5
-; CHECK-P8-NEXT:    mtvsrwa f5, r5
-; CHECK-P8-NEXT:    extsh r5, r6
-; CHECK-P8-NEXT:    mtvsrwa f6, r5
-; CHECK-P8-NEXT:    rldicl r5, r8, 16, 48
-; CHECK-P8-NEXT:    extsh r5, r5
-; CHECK-P8-NEXT:    xscvsxdsp f1, f1
-; CHECK-P8-NEXT:    mtvsrwa f7, r5
-; CHECK-P8-NEXT:    xscvsxdsp f0, f0
-; CHECK-P8-NEXT:    xscvsxdsp f2, f2
-; CHECK-P8-NEXT:    xscvsxdsp f4, f4
-; CHECK-P8-NEXT:    xscvsxdsp f5, f5
-; CHECK-P8-NEXT:    xscvsxdsp f6, f6
-; CHECK-P8-NEXT:    xscvsxdsp f7, f7
-; CHECK-P8-NEXT:    xscvsxdsp f3, f3
-; CHECK-P8-NEXT:    xxmrghd vs0, vs0, vs1
-; CHECK-P8-NEXT:    xxmrghd vs1, vs5, vs4
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xxmrghd vs4, vs7, vs6
-; CHECK-P8-NEXT:    xxmrghd vs2, vs3, vs2
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P8-NEXT:    xvcvdpsp v4, vs4
-; CHECK-P8-NEXT:    xvcvdpsp v5, vs2
-; CHECK-P8-NEXT:    vmrgew v3, v4, v3
-; CHECK-P8-NEXT:    vmrgew v2, v5, v2
+; CHECK-P8-NEXT:    vmrghh v2, v2, v2
+; CHECK-P8-NEXT:    vadduwm v3, v3, v3
+; CHECK-P8-NEXT:    vslw v4, v4, v3
+; CHECK-P8-NEXT:    vslw v2, v2, v3
+; CHECK-P8-NEXT:    vsraw v4, v4, v3
+; CHECK-P8-NEXT:    vsraw v2, v2, v3
+; CHECK-P8-NEXT:    xvcvsxwsp v3, v4
+; CHECK-P8-NEXT:    xvcvsxwsp v2, v2
 ; CHECK-P8-NEXT:    stvx v3, 0, r3
 ; CHECK-P8-NEXT:    stvx v2, r3, r4
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test8elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    li r4, 8
-; CHECK-P9-NEXT:    li r5, 12
-; CHECK-P9-NEXT:    li r6, 10
-; CHECK-P9-NEXT:    li r7, 14
-; CHECK-P9-NEXT:    li r8, 0
-; CHECK-P9-NEXT:    li r9, 4
-; CHECK-P9-NEXT:    li r10, 2
-; CHECK-P9-NEXT:    li r11, 6
-; CHECK-P9-NEXT:    vextuhrx r4, r4, v2
-; CHECK-P9-NEXT:    vextuhrx r5, r5, v2
-; CHECK-P9-NEXT:    vextuhrx r6, r6, v2
-; CHECK-P9-NEXT:    vextuhrx r7, r7, v2
-; CHECK-P9-NEXT:    vextuhrx r8, r8, v2
-; CHECK-P9-NEXT:    vextuhrx r9, r9, v2
-; CHECK-P9-NEXT:    vextuhrx r10, r10, v2
-; CHECK-P9-NEXT:    vextuhrx r11, r11, v2
-; CHECK-P9-NEXT:    extsh r4, r4
-; CHECK-P9-NEXT:    extsh r5, r5
-; CHECK-P9-NEXT:    extsh r6, r6
-; CHECK-P9-NEXT:    extsh r7, r7
-; CHECK-P9-NEXT:    extsh r8, r8
-; CHECK-P9-NEXT:    extsh r9, r9
-; CHECK-P9-NEXT:    extsh r10, r10
-; CHECK-P9-NEXT:    extsh r11, r11
-; CHECK-P9-NEXT:    mtvsrwa f0, r4
-; CHECK-P9-NEXT:    mtvsrwa f1, r5
-; CHECK-P9-NEXT:    mtvsrwa f2, r6
-; CHECK-P9-NEXT:    mtvsrwa f3, r7
-; CHECK-P9-NEXT:    mtvsrwa f4, r8
-; CHECK-P9-NEXT:    mtvsrwa f5, r9
-; CHECK-P9-NEXT:    mtvsrwa f6, r10
-; CHECK-P9-NEXT:    mtvsrwa f7, r11
-; CHECK-P9-NEXT:    xscvsxdsp f0, f0
-; CHECK-P9-NEXT:    xscvsxdsp f1, f1
-; CHECK-P9-NEXT:    xscvsxdsp f2, f2
-; CHECK-P9-NEXT:    xscvsxdsp f3, f3
-; CHECK-P9-NEXT:    xscvsxdsp f4, f4
-; CHECK-P9-NEXT:    xscvsxdsp f5, f5
-; CHECK-P9-NEXT:    xscvsxdsp f6, f6
-; CHECK-P9-NEXT:    xscvsxdsp f7, f7
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P9-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v5, vs3
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
-; CHECK-P9-NEXT:    vmrgew v3, v5, v4
-; CHECK-P9-NEXT:    stxv v3, 0(r3)
-; CHECK-P9-NEXT:    stxv v2, 16(r3)
+; CHECK-P9-NEXT:    vmrglh v3, v2, v2
+; CHECK-P9-NEXT:    vmrghh v2, v2, v2
+; CHECK-P9-NEXT:    vextsh2w v3, v3
+; CHECK-P9-NEXT:    vextsh2w v2, v2
+; CHECK-P9-NEXT:    xvcvsxwsp vs0, v3
+; CHECK-P9-NEXT:    xvcvsxwsp vs1, v2
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    stxv vs0, 0(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    li r4, 12
-; CHECK-BE-NEXT:    li r5, 8
-; CHECK-BE-NEXT:    li r6, 10
-; CHECK-BE-NEXT:    li r7, 14
-; CHECK-BE-NEXT:    li r8, 6
-; CHECK-BE-NEXT:    li r9, 2
-; CHECK-BE-NEXT:    li r10, 4
-; CHECK-BE-NEXT:    li r11, 0
-; CHECK-BE-NEXT:    vextuhlx r4, r4, v2
-; CHECK-BE-NEXT:    vextuhlx r5, r5, v2
-; CHECK-BE-NEXT:    vextuhlx r6, r6, v2
-; CHECK-BE-NEXT:    vextuhlx r7, r7, v2
-; CHECK-BE-NEXT:    vextuhlx r8, r8, v2
-; CHECK-BE-NEXT:    vextuhlx r9, r9, v2
-; CHECK-BE-NEXT:    vextuhlx r10, r10, v2
-; CHECK-BE-NEXT:    vextuhlx r11, r11, v2
-; CHECK-BE-NEXT:    extsh r4, r4
-; CHECK-BE-NEXT:    extsh r5, r5
-; CHECK-BE-NEXT:    extsh r6, r6
-; CHECK-BE-NEXT:    extsh r7, r7
-; CHECK-BE-NEXT:    extsh r8, r8
-; CHECK-BE-NEXT:    extsh r9, r9
-; CHECK-BE-NEXT:    extsh r10, r10
-; CHECK-BE-NEXT:    extsh r11, r11
-; CHECK-BE-NEXT:    mtvsrwa f0, r4
-; CHECK-BE-NEXT:    mtvsrwa f1, r5
-; CHECK-BE-NEXT:    mtvsrwa f2, r6
-; CHECK-BE-NEXT:    mtvsrwa f3, r7
-; CHECK-BE-NEXT:    mtvsrwa f4, r8
-; CHECK-BE-NEXT:    mtvsrwa f5, r9
-; CHECK-BE-NEXT:    mtvsrwa f6, r10
-; CHECK-BE-NEXT:    mtvsrwa f7, r11
-; CHECK-BE-NEXT:    xscvsxdsp f0, f0
-; CHECK-BE-NEXT:    xscvsxdsp f1, f1
-; CHECK-BE-NEXT:    xscvsxdsp f2, f2
-; CHECK-BE-NEXT:    xscvsxdsp f3, f3
-; CHECK-BE-NEXT:    xscvsxdsp f4, f4
-; CHECK-BE-NEXT:    xscvsxdsp f5, f5
-; CHECK-BE-NEXT:    xscvsxdsp f6, f6
-; CHECK-BE-NEXT:    xscvsxdsp f7, f7
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs2, vs3
-; CHECK-BE-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-BE-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs1
-; CHECK-BE-NEXT:    xvcvdpsp v4, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v5, vs3
-; CHECK-BE-NEXT:    vmrgew v2, v2, v3
-; CHECK-BE-NEXT:    vmrgew v3, v5, v4
-; CHECK-BE-NEXT:    stxv v3, 0(r3)
-; CHECK-BE-NEXT:    stxv v2, 16(r3)
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI6_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI6_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
+; CHECK-BE-NEXT:    vmrghh v2, v2, v2
+; CHECK-BE-NEXT:    vextsh2w v3, v3
+; CHECK-BE-NEXT:    vextsh2w v2, v2
+; CHECK-BE-NEXT:    xvcvsxwsp vs0, v3
+; CHECK-BE-NEXT:    xvcvsxwsp vs1, v2
+; CHECK-BE-NEXT:    stxv vs1, 0(r3)
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = sitofp <8 x i16> %a to <8 x float>
@@ -1023,340 +419,80 @@
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
-; CHECK-P8-NEXT:    lvx v3, 0, r4
-; CHECK-P8-NEXT:    lvx v2, r4, r5
-; CHECK-P8-NEXT:    mfvsrd r7, v3
-; CHECK-P8-NEXT:    xxswapd vs8, v3
-; CHECK-P8-NEXT:    mfvsrd r6, v2
-; CHECK-P8-NEXT:    xxswapd vs2, v2
-; CHECK-P8-NEXT:    clrldi r4, r6, 48
-; CHECK-P8-NEXT:    rldicl r8, r6, 32, 48
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    extsh r8, r8
-; CHECK-P8-NEXT:    mtvsrwa f0, r4
-; CHECK-P8-NEXT:    rldicl r4, r6, 48, 48
-; CHECK-P8-NEXT:    rldicl r6, r6, 16, 48
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f1, r8
-; CHECK-P8-NEXT:    clrldi r8, r7, 48
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f3, r4
-; CHECK-P8-NEXT:    extsh r4, r8
-; CHECK-P8-NEXT:    mtvsrwa f4, r6
-; CHECK-P8-NEXT:    rldicl r6, r7, 32, 48
-; CHECK-P8-NEXT:    mtvsrwa f5, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 48, 48
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    mfvsrd r8, f2
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f2, r6
-; CHECK-P8-NEXT:    rldicl r6, r7, 16, 48
-; CHECK-P8-NEXT:    mtvsrwa f6, r4
-; CHECK-P8-NEXT:    clrldi r4, r8, 48
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f7, r6
-; CHECK-P8-NEXT:    rldicl r6, r8, 32, 48
-; CHECK-P8-NEXT:    mtvsrwa f9, r4
-; CHECK-P8-NEXT:    rldicl r4, r8, 48, 48
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f10, r6
-; CHECK-P8-NEXT:    rldicl r6, r8, 16, 48
-; CHECK-P8-NEXT:    mtvsrwa f11, r4
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    mfvsrd r4, f8
-; CHECK-P8-NEXT:    mtvsrwa f8, r6
-; CHECK-P8-NEXT:    clrldi r6, r4, 48
-; CHECK-P8-NEXT:    xscvsxdsp f0, f0
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    xscvsxdsp f1, f1
-; CHECK-P8-NEXT:    xscvsxdsp f3, f3
-; CHECK-P8-NEXT:    xscvsxdsp f4, f4
-; CHECK-P8-NEXT:    mtvsrwa f12, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 32, 48
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    xscvsxdsp f5, f5
-; CHECK-P8-NEXT:    mtvsrwa f13, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 48, 48
-; CHECK-P8-NEXT:    rldicl r4, r4, 16, 48
-; CHECK-P8-NEXT:    xscvsxdsp f2, f2
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    mtvsrwa v2, r6
-; CHECK-P8-NEXT:    mtvsrwa v3, r4
-; CHECK-P8-NEXT:    xxmrghd vs3, vs4, vs3
+; CHECK-P8-NEXT:    lvx v2, 0, r4
+; CHECK-P8-NEXT:    vspltisw v5, 8
+; CHECK-P8-NEXT:    li r6, 32
+; CHECK-P8-NEXT:    lvx v3, r4, r5
 ; CHECK-P8-NEXT:    li r4, 48
-; CHECK-P8-NEXT:    xscvsxdsp f6, f6
-; CHECK-P8-NEXT:    xscvsxdsp f7, f7
-; CHECK-P8-NEXT:    xscvsxdsp f9, f9
-; CHECK-P8-NEXT:    xscvsxdsp f10, f10
-; CHECK-P8-NEXT:    xxmrghd vs2, vs2, vs5
-; CHECK-P8-NEXT:    xscvsxdsp f11, f11
-; CHECK-P8-NEXT:    xscvsxdsp f8, f8
-; CHECK-P8-NEXT:    xscvsxdsp f12, f12
-; CHECK-P8-NEXT:    xscvsxdsp f13, f13
-; CHECK-P8-NEXT:    xxmrghd vs5, vs7, vs6
-; CHECK-P8-NEXT:    xscvsxdsp f1, v2
-; CHECK-P8-NEXT:    xscvsxdsp f4, v3
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xxmrghd vs0, vs10, vs9
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs3
-; CHECK-P8-NEXT:    xxmrghd vs3, vs8, vs11
-; CHECK-P8-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P8-NEXT:    xxmrghd vs2, vs13, vs12
-; CHECK-P8-NEXT:    xvcvdpsp v5, vs5
-; CHECK-P8-NEXT:    xvcvdpsp v0, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs4, vs1
-; CHECK-P8-NEXT:    xvcvdpsp v1, vs3
-; CHECK-P8-NEXT:    xvcvdpsp v6, vs2
-; CHECK-P8-NEXT:    vmrgew v2, v3, v2
-; CHECK-P8-NEXT:    xvcvdpsp v7, vs1
-; CHECK-P8-NEXT:    vmrgew v3, v5, v4
-; CHECK-P8-NEXT:    vmrgew v4, v1, v0
-; CHECK-P8-NEXT:    stvx v2, r3, r4
-; CHECK-P8-NEXT:    li r4, 32
-; CHECK-P8-NEXT:    vmrgew v5, v7, v6
-; CHECK-P8-NEXT:    stvx v3, r3, r5
-; CHECK-P8-NEXT:    stvx v4, r3, r4
-; CHECK-P8-NEXT:    stvx v5, 0, r3
+; CHECK-P8-NEXT:    vmrglh v4, v2, v2
+; CHECK-P8-NEXT:    vmrglh v0, v3, v3
+; CHECK-P8-NEXT:    vmrghh v3, v3, v3
+; CHECK-P8-NEXT:    vmrghh v2, v2, v2
+; CHECK-P8-NEXT:    vadduwm v5, v5, v5
+; CHECK-P8-NEXT:    vslw v4, v4, v5
+; CHECK-P8-NEXT:    vslw v0, v0, v5
+; CHECK-P8-NEXT:    vslw v3, v3, v5
+; CHECK-P8-NEXT:    vslw v2, v2, v5
+; CHECK-P8-NEXT:    vsraw v4, v4, v5
+; CHECK-P8-NEXT:    vsraw v0, v0, v5
+; CHECK-P8-NEXT:    vsraw v3, v3, v5
+; CHECK-P8-NEXT:    vsraw v2, v2, v5
+; CHECK-P8-NEXT:    xvcvsxwsp v4, v4
+; CHECK-P8-NEXT:    xvcvsxwsp v5, v0
+; CHECK-P8-NEXT:    xvcvsxwsp v3, v3
+; CHECK-P8-NEXT:    xvcvsxwsp v2, v2
+; CHECK-P8-NEXT:    stvx v4, 0, r3
+; CHECK-P8-NEXT:    stvx v5, r3, r6
+; CHECK-P8-NEXT:    stvx v3, r3, r4
+; CHECK-P8-NEXT:    stvx v2, r3, r5
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test16elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    lxv v3, 0(r4)
 ; CHECK-P9-NEXT:    lxv v2, 16(r4)
-; CHECK-P9-NEXT:    li r4, 0
-; CHECK-P9-NEXT:    li r5, 4
-; CHECK-P9-NEXT:    li r6, 2
-; CHECK-P9-NEXT:    li r7, 6
-; CHECK-P9-NEXT:    li r8, 8
-; CHECK-P9-NEXT:    std r25, -72(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    li r9, 12
-; CHECK-P9-NEXT:    li r10, 10
-; CHECK-P9-NEXT:    li r11, 14
-; CHECK-P9-NEXT:    std r26, -64(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r27, -56(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r28, -48(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r29, -40(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r30, -32(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    vextuhrx r12, r4, v3
-; CHECK-P9-NEXT:    vextuhrx r0, r5, v3
-; CHECK-P9-NEXT:    vextuhrx r30, r6, v3
-; CHECK-P9-NEXT:    vextuhrx r29, r7, v3
-; CHECK-P9-NEXT:    vextuhrx r28, r8, v3
-; CHECK-P9-NEXT:    vextuhrx r27, r9, v3
-; CHECK-P9-NEXT:    vextuhrx r26, r10, v3
-; CHECK-P9-NEXT:    vextuhrx r25, r11, v3
-; CHECK-P9-NEXT:    vextuhrx r4, r4, v2
-; CHECK-P9-NEXT:    vextuhrx r5, r5, v2
-; CHECK-P9-NEXT:    vextuhrx r6, r6, v2
-; CHECK-P9-NEXT:    vextuhrx r7, r7, v2
-; CHECK-P9-NEXT:    vextuhrx r8, r8, v2
-; CHECK-P9-NEXT:    vextuhrx r9, r9, v2
-; CHECK-P9-NEXT:    vextuhrx r10, r10, v2
-; CHECK-P9-NEXT:    vextuhrx r11, r11, v2
-; CHECK-P9-NEXT:    extsh r12, r12
-; CHECK-P9-NEXT:    extsh r0, r0
-; CHECK-P9-NEXT:    extsh r30, r30
-; CHECK-P9-NEXT:    extsh r29, r29
-; CHECK-P9-NEXT:    extsh r28, r28
-; CHECK-P9-NEXT:    extsh r27, r27
-; CHECK-P9-NEXT:    extsh r26, r26
-; CHECK-P9-NEXT:    extsh r25, r25
-; CHECK-P9-NEXT:    extsh r4, r4
-; CHECK-P9-NEXT:    extsh r5, r5
-; CHECK-P9-NEXT:    extsh r6, r6
-; CHECK-P9-NEXT:    extsh r7, r7
-; CHECK-P9-NEXT:    extsh r8, r8
-; CHECK-P9-NEXT:    extsh r9, r9
-; CHECK-P9-NEXT:    extsh r10, r10
-; CHECK-P9-NEXT:    extsh r11, r11
-; CHECK-P9-NEXT:    mtvsrwa f0, r12
-; CHECK-P9-NEXT:    mtvsrwa f1, r0
-; CHECK-P9-NEXT:    mtvsrwa f2, r30
-; CHECK-P9-NEXT:    ld r30, -32(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa f3, r29
-; CHECK-P9-NEXT:    ld r29, -40(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa f4, r28
-; CHECK-P9-NEXT:    ld r28, -48(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa f5, r27
-; CHECK-P9-NEXT:    ld r27, -56(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa f6, r26
-; CHECK-P9-NEXT:    ld r26, -64(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa f7, r25
-; CHECK-P9-NEXT:    ld r25, -72(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa f8, r4
-; CHECK-P9-NEXT:    mtvsrwa f9, r5
-; CHECK-P9-NEXT:    mtvsrwa f10, r6
-; CHECK-P9-NEXT:    mtvsrwa f11, r7
-; CHECK-P9-NEXT:    mtvsrwa f12, r8
-; CHECK-P9-NEXT:    mtvsrwa f13, r9
-; CHECK-P9-NEXT:    mtvsrwa v2, r10
-; CHECK-P9-NEXT:    mtvsrwa v3, r11
-; CHECK-P9-NEXT:    xscvsxdsp f0, f0
-; CHECK-P9-NEXT:    xscvsxdsp f1, f1
-; CHECK-P9-NEXT:    xscvsxdsp f2, f2
-; CHECK-P9-NEXT:    xscvsxdsp f3, f3
-; CHECK-P9-NEXT:    xscvsxdsp f4, f4
-; CHECK-P9-NEXT:    xscvsxdsp f5, f5
-; CHECK-P9-NEXT:    xscvsxdsp f6, f6
-; CHECK-P9-NEXT:    xscvsxdsp f7, f7
-; CHECK-P9-NEXT:    xscvsxdsp f8, f8
-; CHECK-P9-NEXT:    xscvsxdsp f9, f9
-; CHECK-P9-NEXT:    xscvsxdsp f10, f10
-; CHECK-P9-NEXT:    xscvsxdsp f11, f11
-; CHECK-P9-NEXT:    xscvsxdsp f12, f12
-; CHECK-P9-NEXT:    xscvsxdsp f13, f13
-; CHECK-P9-NEXT:    xscvsxdsp f31, v2
-; CHECK-P9-NEXT:    xscvsxdsp f30, v3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P9-NEXT:    xxmrghd vs4, vs9, vs8
-; CHECK-P9-NEXT:    xxmrghd vs5, vs11, vs10
-; CHECK-P9-NEXT:    xxmrghd vs6, vs13, vs12
-; CHECK-P9-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-P9-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P9-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v5, vs3
-; CHECK-P9-NEXT:    xvcvdpsp v0, vs4
-; CHECK-P9-NEXT:    xvcvdpsp v1, vs5
-; CHECK-P9-NEXT:    xvcvdpsp v6, vs6
-; CHECK-P9-NEXT:    xvcvdpsp v7, vs7
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
-; CHECK-P9-NEXT:    vmrgew v3, v5, v4
-; CHECK-P9-NEXT:    vmrgew v4, v1, v0
-; CHECK-P9-NEXT:    vmrgew v5, v7, v6
-; CHECK-P9-NEXT:    stxv v3, 16(r3)
-; CHECK-P9-NEXT:    stxv v2, 0(r3)
-; CHECK-P9-NEXT:    stxv v5, 48(r3)
-; CHECK-P9-NEXT:    stxv v4, 32(r3)
+; CHECK-P9-NEXT:    vmrglh v4, v3, v3
+; CHECK-P9-NEXT:    vmrghh v3, v3, v3
+; CHECK-P9-NEXT:    vextsh2w v3, v3
+; CHECK-P9-NEXT:    xvcvsxwsp vs1, v3
+; CHECK-P9-NEXT:    vmrglh v3, v2, v2
+; CHECK-P9-NEXT:    vmrghh v2, v2, v2
+; CHECK-P9-NEXT:    vextsh2w v4, v4
+; CHECK-P9-NEXT:    xvcvsxwsp vs0, v4
+; CHECK-P9-NEXT:    vextsh2w v3, v3
+; CHECK-P9-NEXT:    vextsh2w v2, v2
+; CHECK-P9-NEXT:    xvcvsxwsp vs2, v3
+; CHECK-P9-NEXT:    xvcvsxwsp vs3, v2
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    stxv vs3, 48(r3)
+; CHECK-P9-NEXT:    stxv vs2, 32(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv v3, 0(r4)
 ; CHECK-BE-NEXT:    lxv v2, 16(r4)
-; CHECK-BE-NEXT:    li r4, 6
-; CHECK-BE-NEXT:    li r5, 2
-; CHECK-BE-NEXT:    li r6, 4
-; CHECK-BE-NEXT:    li r7, 0
-; CHECK-BE-NEXT:    li r8, 14
-; CHECK-BE-NEXT:    std r25, -72(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    li r9, 10
-; CHECK-BE-NEXT:    li r10, 12
-; CHECK-BE-NEXT:    li r11, 8
-; CHECK-BE-NEXT:    std r26, -64(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r27, -56(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r28, -48(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r29, -40(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r30, -32(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    vextuhlx r12, r4, v3
-; CHECK-BE-NEXT:    vextuhlx r0, r5, v3
-; CHECK-BE-NEXT:    vextuhlx r30, r6, v3
-; CHECK-BE-NEXT:    vextuhlx r29, r7, v3
-; CHECK-BE-NEXT:    vextuhlx r28, r8, v3
-; CHECK-BE-NEXT:    vextuhlx r27, r9, v3
-; CHECK-BE-NEXT:    vextuhlx r26, r10, v3
-; CHECK-BE-NEXT:    vextuhlx r25, r11, v3
-; CHECK-BE-NEXT:    vextuhlx r4, r4, v2
-; CHECK-BE-NEXT:    vextuhlx r5, r5, v2
-; CHECK-BE-NEXT:    vextuhlx r6, r6, v2
-; CHECK-BE-NEXT:    vextuhlx r7, r7, v2
-; CHECK-BE-NEXT:    vextuhlx r8, r8, v2
-; CHECK-BE-NEXT:    vextuhlx r9, r9, v2
-; CHECK-BE-NEXT:    vextuhlx r10, r10, v2
-; CHECK-BE-NEXT:    vextuhlx r11, r11, v2
-; CHECK-BE-NEXT:    extsh r12, r12
-; CHECK-BE-NEXT:    extsh r0, r0
-; CHECK-BE-NEXT:    extsh r30, r30
-; CHECK-BE-NEXT:    extsh r29, r29
-; CHECK-BE-NEXT:    extsh r28, r28
-; CHECK-BE-NEXT:    extsh r27, r27
-; CHECK-BE-NEXT:    extsh r26, r26
-; CHECK-BE-NEXT:    extsh r25, r25
-; CHECK-BE-NEXT:    extsh r4, r4
-; CHECK-BE-NEXT:    extsh r5, r5
-; CHECK-BE-NEXT:    extsh r6, r6
-; CHECK-BE-NEXT:    extsh r7, r7
-; CHECK-BE-NEXT:    extsh r8, r8
-; CHECK-BE-NEXT:    extsh r9, r9
-; CHECK-BE-NEXT:    extsh r10, r10
-; CHECK-BE-NEXT:    extsh r11, r11
-; CHECK-BE-NEXT:    mtvsrwa f0, r12
-; CHECK-BE-NEXT:    mtvsrwa f1, r0
-; CHECK-BE-NEXT:    mtvsrwa f2, r30
-; CHECK-BE-NEXT:    ld r30, -32(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa f3, r29
-; CHECK-BE-NEXT:    ld r29, -40(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa f4, r28
-; CHECK-BE-NEXT:    ld r28, -48(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa f5, r27
-; CHECK-BE-NEXT:    ld r27, -56(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa f6, r26
-; CHECK-BE-NEXT:    ld r26, -64(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa f7, r25
-; CHECK-BE-NEXT:    ld r25, -72(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa f8, r4
-; CHECK-BE-NEXT:    mtvsrwa f9, r5
-; CHECK-BE-NEXT:    mtvsrwa f10, r6
-; CHECK-BE-NEXT:    mtvsrwa f11, r7
-; CHECK-BE-NEXT:    mtvsrwa f12, r8
-; CHECK-BE-NEXT:    mtvsrwa f13, r9
-; CHECK-BE-NEXT:    mtvsrwa v2, r10
-; CHECK-BE-NEXT:    mtvsrwa v3, r11
-; CHECK-BE-NEXT:    xscvsxdsp f0, f0
-; CHECK-BE-NEXT:    xscvsxdsp f1, f1
-; CHECK-BE-NEXT:    xscvsxdsp f2, f2
-; CHECK-BE-NEXT:    xscvsxdsp f3, f3
-; CHECK-BE-NEXT:    xscvsxdsp f4, f4
-; CHECK-BE-NEXT:    xscvsxdsp f5, f5
-; CHECK-BE-NEXT:    xscvsxdsp f6, f6
-; CHECK-BE-NEXT:    xscvsxdsp f7, f7
-; CHECK-BE-NEXT:    xscvsxdsp f8, f8
-; CHECK-BE-NEXT:    xscvsxdsp f9, f9
-; CHECK-BE-NEXT:    xscvsxdsp f10, f10
-; CHECK-BE-NEXT:    xscvsxdsp f11, f11
-; CHECK-BE-NEXT:    xscvsxdsp f12, f12
-; CHECK-BE-NEXT:    xscvsxdsp f13, f13
-; CHECK-BE-NEXT:    xscvsxdsp f31, v2
-; CHECK-BE-NEXT:    xscvsxdsp f30, v3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-BE-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-BE-NEXT:    xxmrghd vs4, vs9, vs8
-; CHECK-BE-NEXT:    xxmrghd vs5, vs11, vs10
-; CHECK-BE-NEXT:    xxmrghd vs6, vs13, vs12
-; CHECK-BE-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-BE-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs1
-; CHECK-BE-NEXT:    xvcvdpsp v4, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v5, vs3
-; CHECK-BE-NEXT:    xvcvdpsp v0, vs4
-; CHECK-BE-NEXT:    xvcvdpsp v1, vs5
-; CHECK-BE-NEXT:    xvcvdpsp v6, vs6
-; CHECK-BE-NEXT:    xvcvdpsp v7, vs7
-; CHECK-BE-NEXT:    vmrgew v2, v3, v2
-; CHECK-BE-NEXT:    vmrgew v3, v5, v4
-; CHECK-BE-NEXT:    vmrgew v4, v1, v0
-; CHECK-BE-NEXT:    vmrgew v5, v7, v6
-; CHECK-BE-NEXT:    stxv v3, 16(r3)
-; CHECK-BE-NEXT:    stxv v2, 0(r3)
-; CHECK-BE-NEXT:    stxv v5, 48(r3)
-; CHECK-BE-NEXT:    stxv v4, 32(r3)
+; CHECK-BE-NEXT:    lxv v3, 0(r4)
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_0@toc@l
+; CHECK-BE-NEXT:    lxvx v4, 0, r4
+; CHECK-BE-NEXT:    xxlxor v5, v5, v5
+; CHECK-BE-NEXT:    vperm v0, v5, v3, v4
+; CHECK-BE-NEXT:    vperm v4, v5, v2, v4
+; CHECK-BE-NEXT:    vmrghh v3, v3, v3
+; CHECK-BE-NEXT:    vmrghh v2, v2, v2
+; CHECK-BE-NEXT:    vextsh2w v0, v0
+; CHECK-BE-NEXT:    vextsh2w v4, v4
+; CHECK-BE-NEXT:    vextsh2w v3, v3
+; CHECK-BE-NEXT:    vextsh2w v2, v2
+; CHECK-BE-NEXT:    xvcvsxwsp vs0, v0
+; CHECK-BE-NEXT:    xvcvsxwsp vs1, v4
+; CHECK-BE-NEXT:    xvcvsxwsp vs2, v3
+; CHECK-BE-NEXT:    xvcvsxwsp vs3, v2
+; CHECK-BE-NEXT:    stxv vs3, 32(r3)
+; CHECK-BE-NEXT:    stxv vs2, 0(r3)
+; CHECK-BE-NEXT:    stxv vs1, 48(r3)
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x i16>, <16 x i16>* %0, align 32
diff --git a/test/CodeGen/PowerPC/vec_conv_i16_to_fp64_elts.ll b/test/CodeGen/PowerPC/vec_conv_i16_to_fp64_elts.ll
index a8f814c..f03d069 100644
--- a/test/CodeGen/PowerPC/vec_conv_i16_to_fp64_elts.ll
+++ b/test/CodeGen/PowerPC/vec_conv_i16_to_fp64_elts.ll
@@ -24,23 +24,23 @@
 ;
 ; CHECK-P9-LABEL: test2elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    addis r4, r2, .LCPI0_0@toc@ha
-; CHECK-P9-NEXT:    mtvsrws v3, r3
+; CHECK-P9-NEXT:    mtvsrws v2, r3
+; CHECK-P9-NEXT:    addis r3, r2, .LCPI0_0@toc@ha
+; CHECK-P9-NEXT:    addi r3, r3, .LCPI0_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r3
 ; CHECK-P9-NEXT:    xxlxor v4, v4, v4
-; CHECK-P9-NEXT:    addi r4, r4, .LCPI0_0@toc@l
-; CHECK-P9-NEXT:    lxvx v2, 0, r4
-; CHECK-P9-NEXT:    vperm v2, v4, v3, v2
+; CHECK-P9-NEXT:    vperm v2, v4, v2, v3
 ; CHECK-P9-NEXT:    xvcvuxddp v2, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test2elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    addis r4, r2, .LCPI0_0@toc@ha
-; CHECK-BE-NEXT:    mtvsrws v3, r3
+; CHECK-BE-NEXT:    mtvsrws v2, r3
+; CHECK-BE-NEXT:    addis r3, r2, .LCPI0_0@toc@ha
+; CHECK-BE-NEXT:    addi r3, r3, .LCPI0_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r3
 ; CHECK-BE-NEXT:    xxlxor v4, v4, v4
-; CHECK-BE-NEXT:    addi r4, r4, .LCPI0_0@toc@l
-; CHECK-BE-NEXT:    lxvx v2, 0, r4
-; CHECK-BE-NEXT:    vperm v2, v3, v4, v2
+; CHECK-BE-NEXT:    vperm v2, v2, v4, v3
 ; CHECK-BE-NEXT:    xvcvuxddp v2, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -74,39 +74,39 @@
 ;
 ; CHECK-P9-LABEL: test4elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    addis r5, r2, .LCPI1_0@toc@ha
-; CHECK-P9-NEXT:    addis r6, r2, .LCPI1_1@toc@ha
 ; CHECK-P9-NEXT:    mtvsrd f0, r4
-; CHECK-P9-NEXT:    xxlxor v5, v5, v5
-; CHECK-P9-NEXT:    addi r5, r5, .LCPI1_0@toc@l
-; CHECK-P9-NEXT:    addi r6, r6, .LCPI1_1@toc@l
-; CHECK-P9-NEXT:    xxswapd v4, vs0
-; CHECK-P9-NEXT:    lxvx v2, 0, r5
-; CHECK-P9-NEXT:    lxvx v3, 0, r6
-; CHECK-P9-NEXT:    vperm v2, v5, v4, v2
-; CHECK-P9-NEXT:    vperm v3, v5, v4, v3
-; CHECK-P9-NEXT:    xvcvuxddp vs0, v2
-; CHECK-P9-NEXT:    xvcvuxddp vs1, v3
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI1_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI1_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    xxswapd v2, vs0
+; CHECK-P9-NEXT:    xxlxor v4, v4, v4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI1_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI1_1@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    xvcvuxddp vs0, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v4, v2, v3
 ; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs1, v2
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    addis r5, r2, .LCPI1_0@toc@ha
-; CHECK-BE-NEXT:    addis r6, r2, .LCPI1_1@toc@ha
-; CHECK-BE-NEXT:    mtvsrd v4, r4
-; CHECK-BE-NEXT:    xxlxor v5, v5, v5
-; CHECK-BE-NEXT:    addi r5, r5, .LCPI1_0@toc@l
-; CHECK-BE-NEXT:    addi r6, r6, .LCPI1_1@toc@l
-; CHECK-BE-NEXT:    lxvx v2, 0, r5
-; CHECK-BE-NEXT:    lxvx v3, 0, r6
-; CHECK-BE-NEXT:    vperm v2, v4, v5, v2
-; CHECK-BE-NEXT:    vperm v3, v5, v4, v3
-; CHECK-BE-NEXT:    xvcvuxddp vs0, v2
-; CHECK-BE-NEXT:    xvcvuxddp vs1, v3
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    mtvsrd v2, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI1_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI1_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI1_1@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI1_1@toc@l
+; CHECK-BE-NEXT:    vperm v3, v2, v4, v3
+; CHECK-BE-NEXT:    xvcvuxddp vs0, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v4, v2, v3
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs1, v2
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = bitcast i64 %a.coerce to <4 x i16>
@@ -155,59 +155,59 @@
 ; CHECK-P9-LABEL: test8elt:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    addis r4, r2, .LCPI2_0@toc@ha
-; CHECK-P9-NEXT:    addis r5, r2, .LCPI2_1@toc@ha
-; CHECK-P9-NEXT:    addis r6, r2, .LCPI2_2@toc@ha
-; CHECK-P9-NEXT:    addis r7, r2, .LCPI2_3@toc@ha
-; CHECK-P9-NEXT:    xxlxor v1, v1, v1
 ; CHECK-P9-NEXT:    addi r4, r4, .LCPI2_0@toc@l
-; CHECK-P9-NEXT:    addi r5, r5, .LCPI2_1@toc@l
-; CHECK-P9-NEXT:    addi r6, r6, .LCPI2_2@toc@l
-; CHECK-P9-NEXT:    addi r7, r7, .LCPI2_3@toc@l
 ; CHECK-P9-NEXT:    lxvx v3, 0, r4
-; CHECK-P9-NEXT:    lxvx v4, 0, r5
-; CHECK-P9-NEXT:    lxvx v5, 0, r6
-; CHECK-P9-NEXT:    lxvx v0, 0, r7
-; CHECK-P9-NEXT:    vperm v3, v1, v2, v3
-; CHECK-P9-NEXT:    vperm v4, v1, v2, v4
-; CHECK-P9-NEXT:    vperm v5, v1, v2, v5
-; CHECK-P9-NEXT:    vperm v2, v1, v2, v0
+; CHECK-P9-NEXT:    xxlxor v4, v4, v4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI2_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI2_1@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
 ; CHECK-P9-NEXT:    xvcvuxddp vs0, v3
-; CHECK-P9-NEXT:    xvcvuxddp vs1, v4
-; CHECK-P9-NEXT:    xvcvuxddp vs2, v5
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI2_2@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI2_2@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs1, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI2_3@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI2_3@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs2, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v4, v2, v3
+; CHECK-P9-NEXT:    stxv vs2, 32(r3)
 ; CHECK-P9-NEXT:    xvcvuxddp vs3, v2
 ; CHECK-P9-NEXT:    stxv vs3, 48(r3)
-; CHECK-P9-NEXT:    stxv vs2, 32(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
-; CHECK-P9-NEXT:    stxv vs0, 0(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    addis r4, r2, .LCPI2_0@toc@ha
-; CHECK-BE-NEXT:    addis r5, r2, .LCPI2_1@toc@ha
-; CHECK-BE-NEXT:    addis r6, r2, .LCPI2_2@toc@ha
-; CHECK-BE-NEXT:    addis r7, r2, .LCPI2_3@toc@ha
-; CHECK-BE-NEXT:    xxlxor v1, v1, v1
 ; CHECK-BE-NEXT:    addi r4, r4, .LCPI2_0@toc@l
-; CHECK-BE-NEXT:    addi r5, r5, .LCPI2_1@toc@l
-; CHECK-BE-NEXT:    addi r6, r6, .LCPI2_2@toc@l
-; CHECK-BE-NEXT:    addi r7, r7, .LCPI2_3@toc@l
 ; CHECK-BE-NEXT:    lxvx v3, 0, r4
-; CHECK-BE-NEXT:    lxvx v4, 0, r5
-; CHECK-BE-NEXT:    lxvx v5, 0, r6
-; CHECK-BE-NEXT:    lxvx v0, 0, r7
-; CHECK-BE-NEXT:    vperm v3, v2, v1, v3
-; CHECK-BE-NEXT:    vperm v4, v1, v2, v4
-; CHECK-BE-NEXT:    vperm v5, v1, v2, v5
-; CHECK-BE-NEXT:    vperm v2, v1, v2, v0
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI2_1@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI2_1@toc@l
+; CHECK-BE-NEXT:    vperm v3, v2, v4, v3
 ; CHECK-BE-NEXT:    xvcvuxddp vs0, v3
-; CHECK-BE-NEXT:    xvcvuxddp vs1, v4
-; CHECK-BE-NEXT:    xvcvuxddp vs2, v5
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI2_2@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI2_2@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs1, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI2_3@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI2_3@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs2, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs2, 32(r3)
 ; CHECK-BE-NEXT:    xvcvuxddp vs3, v2
 ; CHECK-BE-NEXT:    stxv vs3, 48(r3)
-; CHECK-BE-NEXT:    stxv vs2, 32(r3)
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
-; CHECK-BE-NEXT:    stxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = uitofp <8 x i16> %a to <8 x double>
@@ -276,88 +276,88 @@
 ;
 ; CHECK-P9-LABEL: test16elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    addis r5, r2, .LCPI3_0@toc@ha
-; CHECK-P9-NEXT:    addis r6, r2, .LCPI3_1@toc@ha
-; CHECK-P9-NEXT:    addis r7, r2, .LCPI3_2@toc@ha
-; CHECK-P9-NEXT:    addis r8, r2, .LCPI3_3@toc@ha
-; CHECK-P9-NEXT:    lxv v0, 0(r4)
-; CHECK-P9-NEXT:    lxv v1, 16(r4)
-; CHECK-P9-NEXT:    xxlxor v6, v6, v6
-; CHECK-P9-NEXT:    addi r5, r5, .LCPI3_0@toc@l
-; CHECK-P9-NEXT:    addi r6, r6, .LCPI3_1@toc@l
-; CHECK-P9-NEXT:    addi r7, r7, .LCPI3_2@toc@l
-; CHECK-P9-NEXT:    addi r8, r8, .LCPI3_3@toc@l
-; CHECK-P9-NEXT:    lxvx v2, 0, r5
-; CHECK-P9-NEXT:    lxvx v3, 0, r6
-; CHECK-P9-NEXT:    lxvx v4, 0, r7
-; CHECK-P9-NEXT:    lxvx v5, 0, r8
-; CHECK-P9-NEXT:    vperm v7, v6, v0, v2
-; CHECK-P9-NEXT:    vperm v8, v6, v0, v3
-; CHECK-P9-NEXT:    vperm v9, v6, v0, v4
-; CHECK-P9-NEXT:    vperm v0, v6, v0, v5
-; CHECK-P9-NEXT:    vperm v2, v6, v1, v2
-; CHECK-P9-NEXT:    vperm v3, v6, v1, v3
-; CHECK-P9-NEXT:    vperm v4, v6, v1, v4
-; CHECK-P9-NEXT:    vperm v5, v6, v1, v5
-; CHECK-P9-NEXT:    xvcvuxddp vs0, v7
-; CHECK-P9-NEXT:    xvcvuxddp vs1, v8
-; CHECK-P9-NEXT:    xvcvuxddp vs2, v9
-; CHECK-P9-NEXT:    xvcvuxddp vs3, v0
-; CHECK-P9-NEXT:    xvcvuxddp vs4, v2
-; CHECK-P9-NEXT:    xvcvuxddp vs5, v3
-; CHECK-P9-NEXT:    xvcvuxddp vs6, v4
-; CHECK-P9-NEXT:    xvcvuxddp vs7, v5
-; CHECK-P9-NEXT:    stxv vs3, 48(r3)
-; CHECK-P9-NEXT:    stxv vs2, 32(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    lxv v2, 16(r4)
+; CHECK-P9-NEXT:    lxv v3, 0(r4)
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_0@toc@l
+; CHECK-P9-NEXT:    lxvx v4, 0, r4
+; CHECK-P9-NEXT:    xxlxor v5, v5, v5
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_1@toc@l
+; CHECK-P9-NEXT:    vperm v0, v5, v3, v4
+; CHECK-P9-NEXT:    xvcvuxddp vs0, v0
+; CHECK-P9-NEXT:    lxvx v0, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_2@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_2@toc@l
+; CHECK-P9-NEXT:    vperm v1, v5, v3, v0
 ; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs1, v1
+; CHECK-P9-NEXT:    lxvx v1, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_3@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_3@toc@l
+; CHECK-P9-NEXT:    vperm v6, v5, v3, v1
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs2, v6
+; CHECK-P9-NEXT:    lxvx v6, 0, r4
+; CHECK-P9-NEXT:    vperm v3, v5, v3, v6
+; CHECK-P9-NEXT:    stxv vs2, 32(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs3, v3
+; CHECK-P9-NEXT:    vperm v3, v5, v2, v4
+; CHECK-P9-NEXT:    xvcvuxddp vs4, v3
+; CHECK-P9-NEXT:    vperm v3, v5, v2, v0
+; CHECK-P9-NEXT:    xvcvuxddp vs5, v3
+; CHECK-P9-NEXT:    vperm v3, v5, v2, v1
+; CHECK-P9-NEXT:    vperm v2, v5, v2, v6
+; CHECK-P9-NEXT:    stxv vs3, 48(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs6, v3
+; CHECK-P9-NEXT:    xvcvuxddp vs7, v2
+; CHECK-P9-NEXT:    stxv vs4, 64(r3)
+; CHECK-P9-NEXT:    stxv vs5, 80(r3)
 ; CHECK-P9-NEXT:    stxv vs7, 112(r3)
 ; CHECK-P9-NEXT:    stxv vs6, 96(r3)
-; CHECK-P9-NEXT:    stxv vs5, 80(r3)
-; CHECK-P9-NEXT:    stxv vs4, 64(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    addis r5, r2, .LCPI3_0@toc@ha
-; CHECK-BE-NEXT:    addis r6, r2, .LCPI3_1@toc@ha
-; CHECK-BE-NEXT:    addis r7, r2, .LCPI3_2@toc@ha
-; CHECK-BE-NEXT:    addis r8, r2, .LCPI3_3@toc@ha
-; CHECK-BE-NEXT:    lxv v0, 0(r4)
-; CHECK-BE-NEXT:    lxv v1, 16(r4)
-; CHECK-BE-NEXT:    xxlxor v6, v6, v6
-; CHECK-BE-NEXT:    addi r5, r5, .LCPI3_0@toc@l
-; CHECK-BE-NEXT:    addi r6, r6, .LCPI3_1@toc@l
-; CHECK-BE-NEXT:    addi r7, r7, .LCPI3_2@toc@l
-; CHECK-BE-NEXT:    addi r8, r8, .LCPI3_3@toc@l
-; CHECK-BE-NEXT:    lxvx v2, 0, r5
-; CHECK-BE-NEXT:    lxvx v3, 0, r6
-; CHECK-BE-NEXT:    lxvx v4, 0, r7
-; CHECK-BE-NEXT:    lxvx v5, 0, r8
-; CHECK-BE-NEXT:    vperm v7, v0, v6, v2
-; CHECK-BE-NEXT:    vperm v8, v6, v0, v3
-; CHECK-BE-NEXT:    vperm v9, v6, v0, v4
-; CHECK-BE-NEXT:    vperm v0, v6, v0, v5
-; CHECK-BE-NEXT:    vperm v2, v1, v6, v2
-; CHECK-BE-NEXT:    vperm v3, v6, v1, v3
-; CHECK-BE-NEXT:    vperm v4, v6, v1, v4
-; CHECK-BE-NEXT:    vperm v5, v6, v1, v5
-; CHECK-BE-NEXT:    xvcvuxddp vs0, v7
-; CHECK-BE-NEXT:    xvcvuxddp vs1, v8
-; CHECK-BE-NEXT:    xvcvuxddp vs2, v9
-; CHECK-BE-NEXT:    xvcvuxddp vs3, v0
-; CHECK-BE-NEXT:    xvcvuxddp vs4, v2
-; CHECK-BE-NEXT:    xvcvuxddp vs5, v3
-; CHECK-BE-NEXT:    xvcvuxddp vs6, v4
-; CHECK-BE-NEXT:    xvcvuxddp vs7, v5
-; CHECK-BE-NEXT:    stxv vs3, 48(r3)
-; CHECK-BE-NEXT:    stxv vs2, 32(r3)
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    lxv v2, 16(r4)
+; CHECK-BE-NEXT:    lxv v3, 0(r4)
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_0@toc@l
+; CHECK-BE-NEXT:    lxvx v4, 0, r4
+; CHECK-BE-NEXT:    xxlxor v5, v5, v5
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_1@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_1@toc@l
+; CHECK-BE-NEXT:    vperm v0, v3, v5, v4
+; CHECK-BE-NEXT:    xvcvuxddp vs0, v0
+; CHECK-BE-NEXT:    lxvx v0, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_2@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_2@toc@l
+; CHECK-BE-NEXT:    vperm v1, v5, v3, v0
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs1, v1
+; CHECK-BE-NEXT:    lxvx v1, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_3@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_3@toc@l
+; CHECK-BE-NEXT:    vperm v6, v5, v3, v1
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs2, v6
+; CHECK-BE-NEXT:    lxvx v6, 0, r4
+; CHECK-BE-NEXT:    vperm v3, v5, v3, v6
+; CHECK-BE-NEXT:    stxv vs2, 32(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs3, v3
+; CHECK-BE-NEXT:    vperm v3, v2, v5, v4
+; CHECK-BE-NEXT:    xvcvuxddp vs4, v3
+; CHECK-BE-NEXT:    vperm v3, v5, v2, v0
+; CHECK-BE-NEXT:    xvcvuxddp vs5, v3
+; CHECK-BE-NEXT:    vperm v3, v5, v2, v1
+; CHECK-BE-NEXT:    vperm v2, v5, v2, v6
+; CHECK-BE-NEXT:    stxv vs3, 48(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs6, v3
+; CHECK-BE-NEXT:    xvcvuxddp vs7, v2
+; CHECK-BE-NEXT:    stxv vs4, 64(r3)
+; CHECK-BE-NEXT:    stxv vs5, 80(r3)
 ; CHECK-BE-NEXT:    stxv vs7, 112(r3)
 ; CHECK-BE-NEXT:    stxv vs6, 96(r3)
-; CHECK-BE-NEXT:    stxv vs5, 80(r3)
-; CHECK-BE-NEXT:    stxv vs4, 64(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x i16>, <16 x i16>* %0, align 32
@@ -369,37 +369,39 @@
 define <2 x double> @test2elt_signed(i32 %a.coerce) local_unnamed_addr #0 {
 ; CHECK-P8-LABEL: test2elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI4_0@toc@ha
 ; CHECK-P8-NEXT:    mtvsrd f0, r3
-; CHECK-P8-NEXT:    mfvsrd r3, f0
-; CHECK-P8-NEXT:    clrldi r4, r3, 48
-; CHECK-P8-NEXT:    rldicl r3, r3, 48, 48
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    extsh r3, r3
-; CHECK-P8-NEXT:    mtvsrwa f0, r4
-; CHECK-P8-NEXT:    mtvsrwa f1, r3
-; CHECK-P8-NEXT:    xscvsxddp f0, f0
-; CHECK-P8-NEXT:    xscvsxddp f1, f1
-; CHECK-P8-NEXT:    xxmrghd v2, vs1, vs0
+; CHECK-P8-NEXT:    addi r3, r4, .LCPI4_0@toc@l
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    lvx v3, 0, r3
+; CHECK-P8-NEXT:    addis r3, r2, .LCPI4_1@toc@ha
+; CHECK-P8-NEXT:    addi r3, r3, .LCPI4_1@toc@l
+; CHECK-P8-NEXT:    lxvd2x vs0, 0, r3
+; CHECK-P8-NEXT:    vperm v2, v2, v2, v3
+; CHECK-P8-NEXT:    xxswapd v3, vs0
+; CHECK-P8-NEXT:    vsld v2, v2, v3
+; CHECK-P8-NEXT:    vsrad v2, v2, v3
+; CHECK-P8-NEXT:    xvcvsxddp v2, v2
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test2elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    addis r4, r2, .LCPI4_0@toc@ha
-; CHECK-P9-NEXT:    mtvsrws v3, r3
-; CHECK-P9-NEXT:    addi r4, r4, .LCPI4_0@toc@l
-; CHECK-P9-NEXT:    lxvx v2, 0, r4
-; CHECK-P9-NEXT:    vperm v2, v3, v3, v2
+; CHECK-P9-NEXT:    mtvsrws v2, r3
+; CHECK-P9-NEXT:    addis r3, r2, .LCPI4_0@toc@ha
+; CHECK-P9-NEXT:    addi r3, r3, .LCPI4_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r3
+; CHECK-P9-NEXT:    vperm v2, v2, v2, v3
 ; CHECK-P9-NEXT:    vextsh2d v2, v2
 ; CHECK-P9-NEXT:    xvcvsxddp v2, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test2elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    addis r4, r2, .LCPI4_0@toc@ha
-; CHECK-BE-NEXT:    mtvsrws v3, r3
-; CHECK-BE-NEXT:    addi r4, r4, .LCPI4_0@toc@l
-; CHECK-BE-NEXT:    lxvx v2, 0, r4
-; CHECK-BE-NEXT:    vperm v2, v3, v3, v2
+; CHECK-BE-NEXT:    mtvsrws v2, r3
+; CHECK-BE-NEXT:    addis r3, r2, .LCPI4_0@toc@ha
+; CHECK-BE-NEXT:    addi r3, r3, .LCPI4_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r3
+; CHECK-BE-NEXT:    vperm v2, v2, v2, v3
 ; CHECK-BE-NEXT:    vextsh2d v2, v2
 ; CHECK-BE-NEXT:    xvcvsxddp v2, v2
 ; CHECK-BE-NEXT:    blr
@@ -412,27 +414,27 @@
 define void @test4elt_signed(<4 x double>* noalias nocapture sret %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI5_0@toc@ha
 ; CHECK-P8-NEXT:    mtvsrd f0, r4
-; CHECK-P8-NEXT:    mfvsrd r4, f0
-; CHECK-P8-NEXT:    clrldi r5, r4, 48
-; CHECK-P8-NEXT:    rldicl r6, r4, 48, 48
-; CHECK-P8-NEXT:    extsh r5, r5
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f0, r5
-; CHECK-P8-NEXT:    rldicl r5, r4, 32, 48
-; CHECK-P8-NEXT:    rldicl r4, r4, 16, 48
-; CHECK-P8-NEXT:    extsh r5, r5
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f1, r6
-; CHECK-P8-NEXT:    mtvsrwa f2, r5
-; CHECK-P8-NEXT:    mtvsrwa f3, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI5_2@toc@ha
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI5_0@toc@l
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI5_2@toc@l
+; CHECK-P8-NEXT:    lvx v2, 0, r5
+; CHECK-P8-NEXT:    xxswapd v3, vs0
+; CHECK-P8-NEXT:    lvx v4, 0, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI5_1@toc@ha
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI5_1@toc@l
+; CHECK-P8-NEXT:    lxvd2x vs0, 0, r4
 ; CHECK-P8-NEXT:    li r4, 16
-; CHECK-P8-NEXT:    xscvsxddp f0, f0
-; CHECK-P8-NEXT:    xscvsxddp f1, f1
-; CHECK-P8-NEXT:    xscvsxddp f2, f2
-; CHECK-P8-NEXT:    xscvsxddp f3, f3
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs3, vs2
+; CHECK-P8-NEXT:    vperm v2, v3, v3, v2
+; CHECK-P8-NEXT:    vperm v3, v3, v3, v4
+; CHECK-P8-NEXT:    xxswapd v4, vs0
+; CHECK-P8-NEXT:    vsld v2, v2, v4
+; CHECK-P8-NEXT:    vsld v3, v3, v4
+; CHECK-P8-NEXT:    vsrad v2, v2, v4
+; CHECK-P8-NEXT:    vsrad v3, v3, v4
+; CHECK-P8-NEXT:    xvcvsxddp vs0, v2
+; CHECK-P8-NEXT:    xvcvsxddp vs1, v3
 ; CHECK-P8-NEXT:    xxswapd vs0, vs0
 ; CHECK-P8-NEXT:    xxswapd vs1, vs1
 ; CHECK-P8-NEXT:    stxvd2x vs1, r3, r4
@@ -441,41 +443,41 @@
 ;
 ; CHECK-P9-LABEL: test4elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    addis r5, r2, .LCPI5_0@toc@ha
-; CHECK-P9-NEXT:    addis r6, r2, .LCPI5_1@toc@ha
 ; CHECK-P9-NEXT:    mtvsrd f0, r4
-; CHECK-P9-NEXT:    addi r5, r5, .LCPI5_0@toc@l
-; CHECK-P9-NEXT:    addi r6, r6, .LCPI5_1@toc@l
-; CHECK-P9-NEXT:    xxswapd v4, vs0
-; CHECK-P9-NEXT:    lxvx v2, 0, r5
-; CHECK-P9-NEXT:    lxvx v3, 0, r6
-; CHECK-P9-NEXT:    vperm v2, v4, v4, v2
-; CHECK-P9-NEXT:    vperm v3, v4, v4, v3
-; CHECK-P9-NEXT:    vextsh2d v2, v2
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI5_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI5_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    xxswapd v2, vs0
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI5_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI5_1@toc@l
 ; CHECK-P9-NEXT:    vextsh2d v3, v3
-; CHECK-P9-NEXT:    xvcvsxddp vs0, v2
-; CHECK-P9-NEXT:    xvcvsxddp vs1, v3
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    xvcvsxddp vs0, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v2, v2, v3
 ; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    vextsh2d v2, v2
+; CHECK-P9-NEXT:    xvcvsxddp vs1, v2
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    addis r5, r2, .LCPI5_0@toc@ha
-; CHECK-BE-NEXT:    addis r6, r2, .LCPI5_1@toc@ha
-; CHECK-BE-NEXT:    mtvsrd v4, r4
-; CHECK-BE-NEXT:    xxlxor v5, v5, v5
-; CHECK-BE-NEXT:    addi r5, r5, .LCPI5_0@toc@l
-; CHECK-BE-NEXT:    addi r6, r6, .LCPI5_1@toc@l
-; CHECK-BE-NEXT:    lxvx v2, 0, r5
-; CHECK-BE-NEXT:    lxvx v3, 0, r6
-; CHECK-BE-NEXT:    vperm v2, v5, v4, v2
-; CHECK-BE-NEXT:    vperm v3, v4, v4, v3
-; CHECK-BE-NEXT:    vextsh2d v2, v2
+; CHECK-BE-NEXT:    mtvsrd v2, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI5_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI5_0@toc@l
+; CHECK-BE-NEXT:    lxvx v4, 0, r4
+; CHECK-BE-NEXT:    xxlxor v3, v3, v3
+; CHECK-BE-NEXT:    vperm v3, v3, v2, v4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI5_1@toc@ha
 ; CHECK-BE-NEXT:    vextsh2d v3, v3
-; CHECK-BE-NEXT:    xvcvsxddp vs0, v2
-; CHECK-BE-NEXT:    xvcvsxddp vs1, v3
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI5_1@toc@l
+; CHECK-BE-NEXT:    xvcvsxddp vs0, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v2, v2, v3
 ; CHECK-BE-NEXT:    stxv vs0, 16(r3)
+; CHECK-BE-NEXT:    vextsh2d v2, v2
+; CHECK-BE-NEXT:    xvcvsxddp vs1, v2
 ; CHECK-BE-NEXT:    stxv vs1, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -488,121 +490,114 @@
 define void @test8elt_signed(<8 x double>* noalias nocapture sret %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
-; CHECK-P8-NEXT:    mfvsrd r4, v2
-; CHECK-P8-NEXT:    xxswapd vs0, v2
-; CHECK-P8-NEXT:    clrldi r5, r4, 48
-; CHECK-P8-NEXT:    rldicl r6, r4, 48, 48
-; CHECK-P8-NEXT:    extsh r5, r5
-; CHECK-P8-NEXT:    mfvsrd r7, f0
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f1, r5
-; CHECK-P8-NEXT:    rldicl r5, r4, 32, 48
-; CHECK-P8-NEXT:    rldicl r4, r4, 16, 48
-; CHECK-P8-NEXT:    extsh r5, r5
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f0, r6
-; CHECK-P8-NEXT:    mtvsrwa f2, r5
-; CHECK-P8-NEXT:    clrldi r5, r7, 48
-; CHECK-P8-NEXT:    mtvsrwa f3, r4
-; CHECK-P8-NEXT:    extsh r4, r5
-; CHECK-P8-NEXT:    rldicl r5, r7, 16, 48
-; CHECK-P8-NEXT:    mtvsrwa f4, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 48, 48
-; CHECK-P8-NEXT:    extsh r5, r5
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f7, r5
-; CHECK-P8-NEXT:    li r5, 32
-; CHECK-P8-NEXT:    mtvsrwa f5, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 32, 48
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    xscvsxddp f1, f1
-; CHECK-P8-NEXT:    mtvsrwa f6, r4
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI6_2@toc@ha
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI6_0@toc@ha
+; CHECK-P8-NEXT:    addis r6, r2, .LCPI6_3@toc@ha
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI6_2@toc@l
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI6_0@toc@l
+; CHECK-P8-NEXT:    addi r6, r6, .LCPI6_3@toc@l
+; CHECK-P8-NEXT:    lvx v4, 0, r5
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI6_4@toc@ha
+; CHECK-P8-NEXT:    lvx v3, 0, r4
+; CHECK-P8-NEXT:    lvx v5, 0, r6
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI6_1@toc@ha
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI6_4@toc@l
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI6_1@toc@l
+; CHECK-P8-NEXT:    lvx v0, 0, r5
+; CHECK-P8-NEXT:    lxvd2x vs0, 0, r4
 ; CHECK-P8-NEXT:    li r4, 48
-; CHECK-P8-NEXT:    xscvsxddp f0, f0
-; CHECK-P8-NEXT:    xscvsxddp f2, f2
-; CHECK-P8-NEXT:    xscvsxddp f3, f3
-; CHECK-P8-NEXT:    xscvsxddp f4, f4
-; CHECK-P8-NEXT:    xscvsxddp f5, f5
-; CHECK-P8-NEXT:    xscvsxddp f6, f6
-; CHECK-P8-NEXT:    xscvsxddp f7, f7
-; CHECK-P8-NEXT:    xxmrghd vs0, vs0, vs1
-; CHECK-P8-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P8-NEXT:    xxswapd vs0, vs0
-; CHECK-P8-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P8-NEXT:    xxswapd vs1, vs1
-; CHECK-P8-NEXT:    xxmrghd vs3, vs7, vs6
+; CHECK-P8-NEXT:    li r5, 32
+; CHECK-P8-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P8-NEXT:    vperm v4, v2, v2, v4
+; CHECK-P8-NEXT:    vperm v5, v2, v2, v5
+; CHECK-P8-NEXT:    vperm v2, v2, v2, v0
+; CHECK-P8-NEXT:    xxswapd v0, vs0
+; CHECK-P8-NEXT:    vsld v3, v3, v0
+; CHECK-P8-NEXT:    vsld v4, v4, v0
+; CHECK-P8-NEXT:    vsld v5, v5, v0
+; CHECK-P8-NEXT:    vsld v2, v2, v0
+; CHECK-P8-NEXT:    vsrad v3, v3, v0
+; CHECK-P8-NEXT:    vsrad v2, v2, v0
+; CHECK-P8-NEXT:    vsrad v4, v4, v0
+; CHECK-P8-NEXT:    vsrad v5, v5, v0
+; CHECK-P8-NEXT:    xvcvsxddp vs2, v2
+; CHECK-P8-NEXT:    xvcvsxddp vs0, v3
+; CHECK-P8-NEXT:    xvcvsxddp vs1, v5
+; CHECK-P8-NEXT:    xvcvsxddp vs3, v4
 ; CHECK-P8-NEXT:    xxswapd vs2, vs2
-; CHECK-P8-NEXT:    stxvd2x vs1, r3, r4
-; CHECK-P8-NEXT:    li r4, 16
-; CHECK-P8-NEXT:    stxvd2x vs0, r3, r5
+; CHECK-P8-NEXT:    xxswapd vs0, vs0
+; CHECK-P8-NEXT:    xxswapd vs1, vs1
 ; CHECK-P8-NEXT:    xxswapd vs3, vs3
+; CHECK-P8-NEXT:    stxvd2x vs2, r3, r4
+; CHECK-P8-NEXT:    li r4, 16
+; CHECK-P8-NEXT:    stxvd2x vs1, r3, r5
 ; CHECK-P8-NEXT:    stxvd2x vs3, r3, r4
-; CHECK-P8-NEXT:    stxvd2x vs2, 0, r3
+; CHECK-P8-NEXT:    stxvd2x vs0, 0, r3
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test8elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    addis r4, r2, .LCPI6_0@toc@ha
-; CHECK-P9-NEXT:    addis r5, r2, .LCPI6_1@toc@ha
-; CHECK-P9-NEXT:    addis r6, r2, .LCPI6_2@toc@ha
-; CHECK-P9-NEXT:    addis r7, r2, .LCPI6_3@toc@ha
 ; CHECK-P9-NEXT:    addi r4, r4, .LCPI6_0@toc@l
-; CHECK-P9-NEXT:    addi r5, r5, .LCPI6_1@toc@l
-; CHECK-P9-NEXT:    addi r6, r6, .LCPI6_2@toc@l
-; CHECK-P9-NEXT:    addi r7, r7, .LCPI6_3@toc@l
 ; CHECK-P9-NEXT:    lxvx v3, 0, r4
-; CHECK-P9-NEXT:    lxvx v4, 0, r5
-; CHECK-P9-NEXT:    lxvx v5, 0, r6
-; CHECK-P9-NEXT:    lxvx v0, 0, r7
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI6_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI6_1@toc@l
 ; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
-; CHECK-P9-NEXT:    vperm v4, v2, v2, v4
-; CHECK-P9-NEXT:    vperm v5, v2, v2, v5
-; CHECK-P9-NEXT:    vperm v2, v2, v2, v0
 ; CHECK-P9-NEXT:    vextsh2d v3, v3
-; CHECK-P9-NEXT:    vextsh2d v4, v4
-; CHECK-P9-NEXT:    vextsh2d v5, v5
-; CHECK-P9-NEXT:    vextsh2d v2, v2
 ; CHECK-P9-NEXT:    xvcvsxddp vs0, v3
-; CHECK-P9-NEXT:    xvcvsxddp vs1, v4
-; CHECK-P9-NEXT:    xvcvsxddp vs2, v5
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI6_2@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI6_2@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    vextsh2d v3, v3
+; CHECK-P9-NEXT:    xvcvsxddp vs1, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI6_3@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI6_3@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    vextsh2d v3, v3
+; CHECK-P9-NEXT:    xvcvsxddp vs2, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v2, v2, v3
+; CHECK-P9-NEXT:    stxv vs2, 32(r3)
+; CHECK-P9-NEXT:    vextsh2d v2, v2
 ; CHECK-P9-NEXT:    xvcvsxddp vs3, v2
 ; CHECK-P9-NEXT:    stxv vs3, 48(r3)
-; CHECK-P9-NEXT:    stxv vs2, 32(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
-; CHECK-P9-NEXT:    stxv vs0, 0(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    addis r4, r2, .LCPI6_0@toc@ha
-; CHECK-BE-NEXT:    addis r5, r2, .LCPI6_1@toc@ha
-; CHECK-BE-NEXT:    addis r6, r2, .LCPI6_2@toc@ha
-; CHECK-BE-NEXT:    addis r7, r2, .LCPI6_3@toc@ha
-; CHECK-BE-NEXT:    xxlxor v1, v1, v1
 ; CHECK-BE-NEXT:    addi r4, r4, .LCPI6_0@toc@l
-; CHECK-BE-NEXT:    addi r5, r5, .LCPI6_1@toc@l
-; CHECK-BE-NEXT:    addi r6, r6, .LCPI6_2@toc@l
-; CHECK-BE-NEXT:    addi r7, r7, .LCPI6_3@toc@l
 ; CHECK-BE-NEXT:    lxvx v3, 0, r4
-; CHECK-BE-NEXT:    lxvx v4, 0, r5
-; CHECK-BE-NEXT:    lxvx v5, 0, r6
-; CHECK-BE-NEXT:    lxvx v0, 0, r7
-; CHECK-BE-NEXT:    vperm v3, v1, v2, v3
-; CHECK-BE-NEXT:    vperm v4, v1, v2, v4
-; CHECK-BE-NEXT:    vperm v5, v2, v2, v5
-; CHECK-BE-NEXT:    vperm v2, v2, v2, v0
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI6_1@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI6_1@toc@l
 ; CHECK-BE-NEXT:    vextsh2d v3, v3
-; CHECK-BE-NEXT:    vextsh2d v4, v4
-; CHECK-BE-NEXT:    vextsh2d v5, v5
-; CHECK-BE-NEXT:    vextsh2d v2, v2
 ; CHECK-BE-NEXT:    xvcvsxddp vs0, v3
-; CHECK-BE-NEXT:    xvcvsxddp vs1, v4
-; CHECK-BE-NEXT:    xvcvsxddp vs2, v5
-; CHECK-BE-NEXT:    xvcvsxddp vs3, v2
-; CHECK-BE-NEXT:    stxv vs1, 48(r3)
-; CHECK-BE-NEXT:    stxv vs3, 32(r3)
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI6_2@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI6_2@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
 ; CHECK-BE-NEXT:    stxv vs0, 16(r3)
+; CHECK-BE-NEXT:    vextsh2d v3, v3
+; CHECK-BE-NEXT:    xvcvsxddp vs1, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI6_3@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI6_3@toc@l
+; CHECK-BE-NEXT:    vperm v3, v2, v2, v3
+; CHECK-BE-NEXT:    stxv vs1, 48(r3)
+; CHECK-BE-NEXT:    vextsh2d v3, v3
+; CHECK-BE-NEXT:    xvcvsxddp vs2, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v2, v2, v3
 ; CHECK-BE-NEXT:    stxv vs2, 0(r3)
+; CHECK-BE-NEXT:    vextsh2d v2, v2
+; CHECK-BE-NEXT:    xvcvsxddp vs3, v2
+; CHECK-BE-NEXT:    stxv vs3, 32(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = sitofp <8 x i16> %a to <8 x double>
@@ -613,212 +608,180 @@
 define void @test16elt_signed(<16 x double>* noalias nocapture sret %agg.result, <16 x i16>* nocapture readonly) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
-; CHECK-P8-NEXT:    li r5, 16
-; CHECK-P8-NEXT:    lvx v3, 0, r4
-; CHECK-P8-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P8-NEXT:    lvx v2, r4, r5
-; CHECK-P8-NEXT:    mfvsrd r7, v3
-; CHECK-P8-NEXT:    xxswapd vs8, v3
-; CHECK-P8-NEXT:    mfvsrd r6, v2
-; CHECK-P8-NEXT:    xxswapd vs2, v2
-; CHECK-P8-NEXT:    clrldi r4, r6, 48
-; CHECK-P8-NEXT:    rldicl r8, r6, 48, 48
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    extsh r8, r8
-; CHECK-P8-NEXT:    mtvsrwa f0, r4
-; CHECK-P8-NEXT:    rldicl r4, r6, 32, 48
-; CHECK-P8-NEXT:    rldicl r6, r6, 16, 48
-; CHECK-P8-NEXT:    mtvsrwa f1, r8
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    clrldi r8, r7, 48
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f3, r4
-; CHECK-P8-NEXT:    extsh r4, r8
-; CHECK-P8-NEXT:    mtvsrwa f4, r6
-; CHECK-P8-NEXT:    rldicl r6, r7, 48, 48
-; CHECK-P8-NEXT:    mtvsrwa f5, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 32, 48
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    mfvsrd r8, f2
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f2, r6
-; CHECK-P8-NEXT:    rldicl r6, r7, 16, 48
-; CHECK-P8-NEXT:    mtvsrwa f6, r4
-; CHECK-P8-NEXT:    clrldi r4, r8, 48
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f7, r6
-; CHECK-P8-NEXT:    rldicl r6, r8, 48, 48
-; CHECK-P8-NEXT:    mtvsrwa f9, r4
-; CHECK-P8-NEXT:    rldicl r4, r8, 32, 48
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f10, r6
-; CHECK-P8-NEXT:    rldicl r6, r8, 16, 48
-; CHECK-P8-NEXT:    mtvsrwa f11, r4
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    mfvsrd r4, f8
-; CHECK-P8-NEXT:    mtvsrwa f8, r6
-; CHECK-P8-NEXT:    clrldi r6, r4, 48
-; CHECK-P8-NEXT:    xscvsxddp f3, f3
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    xscvsxddp f4, f4
-; CHECK-P8-NEXT:    mtvsrwa f12, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 48, 48
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    xscvsxddp f0, f0
-; CHECK-P8-NEXT:    mtvsrwa f13, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 32, 48
-; CHECK-P8-NEXT:    rldicl r4, r4, 16, 48
-; CHECK-P8-NEXT:    xscvsxddp f1, f1
-; CHECK-P8-NEXT:    extsh r6, r6
-; CHECK-P8-NEXT:    extsh r4, r4
-; CHECK-P8-NEXT:    xscvsxddp f5, f5
-; CHECK-P8-NEXT:    xscvsxddp f2, f2
-; CHECK-P8-NEXT:    xxmrghd vs3, vs4, vs3
-; CHECK-P8-NEXT:    mtvsrwa v2, r6
-; CHECK-P8-NEXT:    li r6, 32
-; CHECK-P8-NEXT:    mtvsrwa v3, r4
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI7_0@toc@ha
+; CHECK-P8-NEXT:    addis r6, r2, .LCPI7_2@toc@ha
+; CHECK-P8-NEXT:    lvx v4, 0, r4
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI7_0@toc@l
+; CHECK-P8-NEXT:    addi r6, r6, .LCPI7_2@toc@l
+; CHECK-P8-NEXT:    lvx v2, 0, r5
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI7_3@toc@ha
+; CHECK-P8-NEXT:    lvx v3, 0, r6
+; CHECK-P8-NEXT:    addis r6, r2, .LCPI7_4@toc@ha
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI7_3@toc@l
+; CHECK-P8-NEXT:    addi r6, r6, .LCPI7_4@toc@l
+; CHECK-P8-NEXT:    lvx v5, 0, r5
+; CHECK-P8-NEXT:    lvx v0, 0, r6
+; CHECK-P8-NEXT:    li r6, 16
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI7_1@toc@ha
+; CHECK-P8-NEXT:    lvx v7, r4, r6
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI7_1@toc@l
+; CHECK-P8-NEXT:    vperm v1, v4, v4, v2
 ; CHECK-P8-NEXT:    li r4, 112
-; CHECK-P8-NEXT:    xscvsxddp f6, f6
-; CHECK-P8-NEXT:    xscvsxddp f7, f7
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xscvsxddp f9, f9
-; CHECK-P8-NEXT:    xscvsxddp f10, f10
-; CHECK-P8-NEXT:    xxmrghd vs1, vs2, vs5
-; CHECK-P8-NEXT:    xscvsxddp f11, f11
-; CHECK-P8-NEXT:    xxswapd vs2, vs3
-; CHECK-P8-NEXT:    xscvsxddp f8, f8
-; CHECK-P8-NEXT:    xxswapd vs0, vs0
-; CHECK-P8-NEXT:    xscvsxddp f12, f12
-; CHECK-P8-NEXT:    xxswapd vs1, vs1
-; CHECK-P8-NEXT:    xscvsxddp f13, f13
-; CHECK-P8-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P8-NEXT:    xscvsxddp f4, v2
-; CHECK-P8-NEXT:    stxvd2x vs2, r3, r4
-; CHECK-P8-NEXT:    li r4, 96
-; CHECK-P8-NEXT:    xscvsxddp f31, v3
-; CHECK-P8-NEXT:    xxmrghd vs5, vs10, vs9
+; CHECK-P8-NEXT:    vperm v6, v4, v4, v3
+; CHECK-P8-NEXT:    lxvd2x vs0, 0, r5
+; CHECK-P8-NEXT:    li r5, 96
+; CHECK-P8-NEXT:    vperm v8, v4, v4, v5
+; CHECK-P8-NEXT:    vperm v4, v4, v4, v0
+; CHECK-P8-NEXT:    vperm v5, v7, v7, v5
+; CHECK-P8-NEXT:    xxswapd v9, vs0
+; CHECK-P8-NEXT:    vperm v0, v7, v7, v0
+; CHECK-P8-NEXT:    vperm v2, v7, v7, v2
+; CHECK-P8-NEXT:    vperm v3, v7, v7, v3
+; CHECK-P8-NEXT:    vsld v1, v1, v9
+; CHECK-P8-NEXT:    vsld v6, v6, v9
+; CHECK-P8-NEXT:    vsld v5, v5, v9
+; CHECK-P8-NEXT:    vsld v0, v0, v9
+; CHECK-P8-NEXT:    vsld v2, v2, v9
+; CHECK-P8-NEXT:    vsld v3, v3, v9
+; CHECK-P8-NEXT:    vsrad v5, v5, v9
+; CHECK-P8-NEXT:    vsrad v0, v0, v9
+; CHECK-P8-NEXT:    vsld v7, v8, v9
+; CHECK-P8-NEXT:    vsld v4, v4, v9
+; CHECK-P8-NEXT:    vsrad v2, v2, v9
+; CHECK-P8-NEXT:    vsrad v3, v3, v9
+; CHECK-P8-NEXT:    xvcvsxddp vs2, v5
+; CHECK-P8-NEXT:    xvcvsxddp vs3, v0
+; CHECK-P8-NEXT:    vsrad v1, v1, v9
+; CHECK-P8-NEXT:    vsrad v6, v6, v9
+; CHECK-P8-NEXT:    vsrad v7, v7, v9
+; CHECK-P8-NEXT:    vsrad v4, v4, v9
+; CHECK-P8-NEXT:    xvcvsxddp vs1, v2
+; CHECK-P8-NEXT:    xxswapd vs2, vs2
+; CHECK-P8-NEXT:    xvcvsxddp vs4, v3
 ; CHECK-P8-NEXT:    xxswapd vs3, vs3
-; CHECK-P8-NEXT:    stxvd2x vs0, r3, r4
-; CHECK-P8-NEXT:    li r4, 48
-; CHECK-P8-NEXT:    xxmrghd vs6, vs8, vs11
-; CHECK-P8-NEXT:    xxmrghd vs7, vs13, vs12
+; CHECK-P8-NEXT:    xvcvsxddp vs0, v7
+; CHECK-P8-NEXT:    xvcvsxddp vs5, v4
+; CHECK-P8-NEXT:    xvcvsxddp vs6, v1
 ; CHECK-P8-NEXT:    stxvd2x vs3, r3, r4
 ; CHECK-P8-NEXT:    li r4, 80
-; CHECK-P8-NEXT:    xxswapd vs0, vs6
-; CHECK-P8-NEXT:    stxvd2x vs1, r3, r6
-; CHECK-P8-NEXT:    li r6, 64
-; CHECK-P8-NEXT:    xxmrghd vs2, vs31, vs4
-; CHECK-P8-NEXT:    xxswapd vs4, vs5
-; CHECK-P8-NEXT:    xxswapd vs5, vs7
-; CHECK-P8-NEXT:    stxvd2x vs0, r3, r4
-; CHECK-P8-NEXT:    xxswapd vs2, vs2
-; CHECK-P8-NEXT:    stxvd2x vs4, r3, r6
+; CHECK-P8-NEXT:    xvcvsxddp vs7, v6
 ; CHECK-P8-NEXT:    stxvd2x vs2, r3, r5
-; CHECK-P8-NEXT:    stxvd2x vs5, 0, r3
-; CHECK-P8-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    li r5, 64
+; CHECK-P8-NEXT:    xxswapd vs1, vs1
+; CHECK-P8-NEXT:    xxswapd vs4, vs4
+; CHECK-P8-NEXT:    xxswapd vs0, vs0
+; CHECK-P8-NEXT:    xxswapd vs5, vs5
+; CHECK-P8-NEXT:    xxswapd vs3, vs6
+; CHECK-P8-NEXT:    stxvd2x vs4, r3, r4
+; CHECK-P8-NEXT:    li r4, 48
+; CHECK-P8-NEXT:    xxswapd vs2, vs7
+; CHECK-P8-NEXT:    stxvd2x vs1, r3, r5
+; CHECK-P8-NEXT:    li r5, 32
+; CHECK-P8-NEXT:    stxvd2x vs5, r3, r4
+; CHECK-P8-NEXT:    stxvd2x vs0, r3, r5
+; CHECK-P8-NEXT:    stxvd2x vs2, r3, r6
+; CHECK-P8-NEXT:    stxvd2x vs3, 0, r3
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test16elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    addis r5, r2, .LCPI7_0@toc@ha
-; CHECK-P9-NEXT:    addis r6, r2, .LCPI7_1@toc@ha
-; CHECK-P9-NEXT:    addis r7, r2, .LCPI7_2@toc@ha
-; CHECK-P9-NEXT:    addis r8, r2, .LCPI7_3@toc@ha
-; CHECK-P9-NEXT:    lxv v0, 0(r4)
-; CHECK-P9-NEXT:    lxv v1, 16(r4)
 ; CHECK-P9-NEXT:    addi r5, r5, .LCPI7_0@toc@l
-; CHECK-P9-NEXT:    addi r6, r6, .LCPI7_1@toc@l
-; CHECK-P9-NEXT:    addi r7, r7, .LCPI7_2@toc@l
-; CHECK-P9-NEXT:    addi r8, r8, .LCPI7_3@toc@l
-; CHECK-P9-NEXT:    lxvx v2, 0, r5
-; CHECK-P9-NEXT:    lxvx v3, 0, r6
-; CHECK-P9-NEXT:    lxvx v4, 0, r7
-; CHECK-P9-NEXT:    lxvx v5, 0, r8
-; CHECK-P9-NEXT:    vperm v6, v0, v0, v2
-; CHECK-P9-NEXT:    vperm v7, v0, v0, v3
-; CHECK-P9-NEXT:    vperm v8, v0, v0, v4
-; CHECK-P9-NEXT:    vperm v0, v0, v0, v5
-; CHECK-P9-NEXT:    vperm v2, v1, v1, v2
-; CHECK-P9-NEXT:    vperm v3, v1, v1, v3
-; CHECK-P9-NEXT:    vperm v4, v1, v1, v4
-; CHECK-P9-NEXT:    vperm v5, v1, v1, v5
-; CHECK-P9-NEXT:    vextsh2d v1, v6
-; CHECK-P9-NEXT:    vextsh2d v6, v7
-; CHECK-P9-NEXT:    vextsh2d v7, v8
-; CHECK-P9-NEXT:    vextsh2d v0, v0
-; CHECK-P9-NEXT:    vextsh2d v2, v2
-; CHECK-P9-NEXT:    vextsh2d v3, v3
+; CHECK-P9-NEXT:    lxv v2, 0(r4)
+; CHECK-P9-NEXT:    lxvx v3, 0, r5
+; CHECK-P9-NEXT:    addis r5, r2, .LCPI7_1@toc@ha
+; CHECK-P9-NEXT:    addi r5, r5, .LCPI7_1@toc@l
+; CHECK-P9-NEXT:    lxvx v5, 0, r5
+; CHECK-P9-NEXT:    addis r5, r2, .LCPI7_2@toc@ha
+; CHECK-P9-NEXT:    vperm v4, v2, v2, v3
+; CHECK-P9-NEXT:    addi r5, r5, .LCPI7_2@toc@l
 ; CHECK-P9-NEXT:    vextsh2d v4, v4
-; CHECK-P9-NEXT:    vextsh2d v5, v5
-; CHECK-P9-NEXT:    xvcvsxddp vs0, v1
-; CHECK-P9-NEXT:    xvcvsxddp vs1, v6
-; CHECK-P9-NEXT:    xvcvsxddp vs2, v7
-; CHECK-P9-NEXT:    xvcvsxddp vs3, v0
-; CHECK-P9-NEXT:    xvcvsxddp vs4, v2
-; CHECK-P9-NEXT:    xvcvsxddp vs5, v3
-; CHECK-P9-NEXT:    xvcvsxddp vs6, v4
-; CHECK-P9-NEXT:    xvcvsxddp vs7, v5
+; CHECK-P9-NEXT:    lxvx v0, 0, r5
+; CHECK-P9-NEXT:    addis r5, r2, .LCPI7_3@toc@ha
+; CHECK-P9-NEXT:    xvcvsxddp vs0, v4
+; CHECK-P9-NEXT:    vperm v4, v2, v2, v5
+; CHECK-P9-NEXT:    addi r5, r5, .LCPI7_3@toc@l
+; CHECK-P9-NEXT:    lxvx v1, 0, r5
+; CHECK-P9-NEXT:    vextsh2d v4, v4
+; CHECK-P9-NEXT:    xvcvsxddp vs1, v4
+; CHECK-P9-NEXT:    vperm v4, v2, v2, v0
+; CHECK-P9-NEXT:    vperm v2, v2, v2, v1
+; CHECK-P9-NEXT:    vextsh2d v4, v4
+; CHECK-P9-NEXT:    xvcvsxddp vs2, v4
+; CHECK-P9-NEXT:    lxv v4, 16(r4)
+; CHECK-P9-NEXT:    vextsh2d v2, v2
+; CHECK-P9-NEXT:    xvcvsxddp vs3, v2
+; CHECK-P9-NEXT:    vperm v2, v4, v4, v3
+; CHECK-P9-NEXT:    vextsh2d v2, v2
 ; CHECK-P9-NEXT:    stxv vs3, 48(r3)
+; CHECK-P9-NEXT:    xvcvsxddp vs4, v2
+; CHECK-P9-NEXT:    vperm v2, v4, v4, v5
+; CHECK-P9-NEXT:    vextsh2d v2, v2
+; CHECK-P9-NEXT:    xvcvsxddp vs5, v2
+; CHECK-P9-NEXT:    vperm v2, v4, v4, v0
+; CHECK-P9-NEXT:    stxv vs4, 64(r3)
+; CHECK-P9-NEXT:    stxv vs5, 80(r3)
+; CHECK-P9-NEXT:    vextsh2d v2, v2
+; CHECK-P9-NEXT:    xvcvsxddp vs6, v2
+; CHECK-P9-NEXT:    vperm v2, v4, v4, v1
+; CHECK-P9-NEXT:    vextsh2d v2, v2
+; CHECK-P9-NEXT:    stxv vs6, 96(r3)
+; CHECK-P9-NEXT:    xvcvsxddp vs7, v2
+; CHECK-P9-NEXT:    stxv vs7, 112(r3)
 ; CHECK-P9-NEXT:    stxv vs2, 32(r3)
 ; CHECK-P9-NEXT:    stxv vs1, 16(r3)
 ; CHECK-P9-NEXT:    stxv vs0, 0(r3)
-; CHECK-P9-NEXT:    stxv vs7, 112(r3)
-; CHECK-P9-NEXT:    stxv vs6, 96(r3)
-; CHECK-P9-NEXT:    stxv vs5, 80(r3)
-; CHECK-P9-NEXT:    stxv vs4, 64(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    addis r5, r2, .LCPI7_0@toc@ha
-; CHECK-BE-NEXT:    addis r6, r2, .LCPI7_1@toc@ha
-; CHECK-BE-NEXT:    addis r7, r2, .LCPI7_2@toc@ha
-; CHECK-BE-NEXT:    addis r8, r2, .LCPI7_3@toc@ha
-; CHECK-BE-NEXT:    lxv v2, 16(r4)
-; CHECK-BE-NEXT:    lxv v3, 0(r4)
-; CHECK-BE-NEXT:    xxlxor v6, v6, v6
 ; CHECK-BE-NEXT:    addi r5, r5, .LCPI7_0@toc@l
-; CHECK-BE-NEXT:    addi r6, r6, .LCPI7_1@toc@l
-; CHECK-BE-NEXT:    addi r7, r7, .LCPI7_2@toc@l
-; CHECK-BE-NEXT:    addi r8, r8, .LCPI7_3@toc@l
-; CHECK-BE-NEXT:    lxvx v4, 0, r5
-; CHECK-BE-NEXT:    lxvx v5, 0, r6
-; CHECK-BE-NEXT:    lxvx v0, 0, r7
-; CHECK-BE-NEXT:    lxvx v1, 0, r8
-; CHECK-BE-NEXT:    vperm v7, v6, v3, v4
-; CHECK-BE-NEXT:    vperm v8, v6, v3, v5
-; CHECK-BE-NEXT:    vperm v4, v6, v2, v4
-; CHECK-BE-NEXT:    vperm v5, v6, v2, v5
-; CHECK-BE-NEXT:    vperm v6, v3, v3, v0
-; CHECK-BE-NEXT:    vperm v3, v3, v3, v1
-; CHECK-BE-NEXT:    vperm v0, v2, v2, v0
-; CHECK-BE-NEXT:    vperm v2, v2, v2, v1
-; CHECK-BE-NEXT:    vextsh2d v1, v7
-; CHECK-BE-NEXT:    vextsh2d v7, v8
-; CHECK-BE-NEXT:    vextsh2d v4, v4
-; CHECK-BE-NEXT:    vextsh2d v5, v5
-; CHECK-BE-NEXT:    vextsh2d v6, v6
-; CHECK-BE-NEXT:    vextsh2d v3, v3
-; CHECK-BE-NEXT:    vextsh2d v0, v0
+; CHECK-BE-NEXT:    lxvx v2, 0, r5
+; CHECK-BE-NEXT:    lxv v4, 0(r4)
+; CHECK-BE-NEXT:    lxv v1, 16(r4)
+; CHECK-BE-NEXT:    addis r5, r2, .LCPI7_1@toc@ha
+; CHECK-BE-NEXT:    addi r5, r5, .LCPI7_1@toc@l
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_2@toc@ha
+; CHECK-BE-NEXT:    xxlxor v5, v5, v5
+; CHECK-BE-NEXT:    vperm v0, v5, v4, v2
+; CHECK-BE-NEXT:    lxvx v3, 0, r5
+; CHECK-BE-NEXT:    vperm v2, v5, v1, v2
 ; CHECK-BE-NEXT:    vextsh2d v2, v2
-; CHECK-BE-NEXT:    xvcvsxddp vs0, v1
-; CHECK-BE-NEXT:    xvcvsxddp vs1, v7
-; CHECK-BE-NEXT:    xvcvsxddp vs2, v4
-; CHECK-BE-NEXT:    xvcvsxddp vs3, v5
-; CHECK-BE-NEXT:    xvcvsxddp vs4, v6
-; CHECK-BE-NEXT:    xvcvsxddp vs5, v3
-; CHECK-BE-NEXT:    xvcvsxddp vs6, v0
-; CHECK-BE-NEXT:    xvcvsxddp vs7, v2
-; CHECK-BE-NEXT:    stxv vs3, 112(r3)
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_2@toc@l
+; CHECK-BE-NEXT:    vextsh2d v0, v0
+; CHECK-BE-NEXT:    xvcvsxddp vs2, v2
+; CHECK-BE-NEXT:    vperm v2, v5, v1, v3
+; CHECK-BE-NEXT:    vextsh2d v2, v2
 ; CHECK-BE-NEXT:    stxv vs2, 80(r3)
+; CHECK-BE-NEXT:    xvcvsxddp vs3, v2
+; CHECK-BE-NEXT:    lxvx v2, 0, r4
+; CHECK-BE-NEXT:    xvcvsxddp vs0, v0
+; CHECK-BE-NEXT:    vperm v0, v5, v4, v3
+; CHECK-BE-NEXT:    vperm v3, v4, v4, v2
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_3@toc@ha
+; CHECK-BE-NEXT:    vextsh2d v0, v0
+; CHECK-BE-NEXT:    xvcvsxddp vs1, v0
 ; CHECK-BE-NEXT:    stxv vs1, 48(r3)
-; CHECK-BE-NEXT:    stxv vs0, 16(r3)
-; CHECK-BE-NEXT:    stxv vs7, 96(r3)
+; CHECK-BE-NEXT:    vextsh2d v3, v3
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_3@toc@l
+; CHECK-BE-NEXT:    xvcvsxddp vs4, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v1, v1, v2
+; CHECK-BE-NEXT:    vextsh2d v2, v2
+; CHECK-BE-NEXT:    xvcvsxddp vs6, v2
+; CHECK-BE-NEXT:    vperm v2, v1, v1, v3
+; CHECK-BE-NEXT:    vperm v4, v4, v4, v3
+; CHECK-BE-NEXT:    vextsh2d v4, v4
+; CHECK-BE-NEXT:    vextsh2d v2, v2
+; CHECK-BE-NEXT:    xvcvsxddp vs7, v2
+; CHECK-BE-NEXT:    xvcvsxddp vs5, v4
+; CHECK-BE-NEXT:    stxv vs3, 112(r3)
 ; CHECK-BE-NEXT:    stxv vs6, 64(r3)
-; CHECK-BE-NEXT:    stxv vs5, 32(r3)
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
 ; CHECK-BE-NEXT:    stxv vs4, 0(r3)
+; CHECK-BE-NEXT:    stxv vs7, 96(r3)
+; CHECK-BE-NEXT:    stxv vs5, 32(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x i16>, <16 x i16>* %0, align 32
diff --git a/test/CodeGen/PowerPC/vec_conv_i32_to_fp64_elts.ll b/test/CodeGen/PowerPC/vec_conv_i32_to_fp64_elts.ll
index 39b88ba..2609cb3 100644
--- a/test/CodeGen/PowerPC/vec_conv_i32_to_fp64_elts.ll
+++ b/test/CodeGen/PowerPC/vec_conv_i32_to_fp64_elts.ll
@@ -105,38 +105,38 @@
 ;
 ; CHECK-P9-LABEL: test8elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r4)
 ; CHECK-P9-NEXT:    lxv vs1, 0(r4)
 ; CHECK-P9-NEXT:    xxmrglw v2, vs1, vs1
-; CHECK-P9-NEXT:    xxmrghw v3, vs1, vs1
-; CHECK-P9-NEXT:    xxmrglw v4, vs0, vs0
-; CHECK-P9-NEXT:    xxmrghw v5, vs0, vs0
+; CHECK-P9-NEXT:    lxv vs0, 16(r4)
+; CHECK-P9-NEXT:    xvcvuxwdp vs2, v2
+; CHECK-P9-NEXT:    xxmrghw v2, vs1, vs1
+; CHECK-P9-NEXT:    xvcvuxwdp vs1, v2
+; CHECK-P9-NEXT:    xxmrglw v2, vs0, vs0
+; CHECK-P9-NEXT:    xvcvuxwdp vs3, v2
+; CHECK-P9-NEXT:    xxmrghw v2, vs0, vs0
+; CHECK-P9-NEXT:    stxv vs2, 0(r3)
 ; CHECK-P9-NEXT:    xvcvuxwdp vs0, v2
-; CHECK-P9-NEXT:    xvcvuxwdp vs1, v3
-; CHECK-P9-NEXT:    xvcvuxwdp vs2, v4
-; CHECK-P9-NEXT:    xvcvuxwdp vs3, v5
-; CHECK-P9-NEXT:    stxv vs3, 48(r3)
-; CHECK-P9-NEXT:    stxv vs2, 32(r3)
 ; CHECK-P9-NEXT:    stxv vs1, 16(r3)
-; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    stxv vs3, 32(r3)
+; CHECK-P9-NEXT:    stxv vs0, 48(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 16(r4)
 ; CHECK-BE-NEXT:    lxv vs1, 0(r4)
 ; CHECK-BE-NEXT:    xxmrghw v2, vs1, vs1
-; CHECK-BE-NEXT:    xxmrglw v3, vs1, vs1
-; CHECK-BE-NEXT:    xxmrghw v4, vs0, vs0
-; CHECK-BE-NEXT:    xxmrglw v5, vs0, vs0
+; CHECK-BE-NEXT:    lxv vs0, 16(r4)
+; CHECK-BE-NEXT:    xvcvuxwdp vs2, v2
+; CHECK-BE-NEXT:    xxmrglw v2, vs1, vs1
+; CHECK-BE-NEXT:    xvcvuxwdp vs1, v2
+; CHECK-BE-NEXT:    xxmrghw v2, vs0, vs0
+; CHECK-BE-NEXT:    xvcvuxwdp vs3, v2
+; CHECK-BE-NEXT:    xxmrglw v2, vs0, vs0
+; CHECK-BE-NEXT:    stxv vs2, 0(r3)
 ; CHECK-BE-NEXT:    xvcvuxwdp vs0, v2
-; CHECK-BE-NEXT:    xvcvuxwdp vs1, v3
-; CHECK-BE-NEXT:    xvcvuxwdp vs2, v4
-; CHECK-BE-NEXT:    xvcvuxwdp vs3, v5
-; CHECK-BE-NEXT:    stxv vs3, 48(r3)
-; CHECK-BE-NEXT:    stxv vs2, 32(r3)
 ; CHECK-BE-NEXT:    stxv vs1, 16(r3)
-; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    stxv vs3, 32(r3)
+; CHECK-BE-NEXT:    stxv vs0, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <8 x i32>, <8 x i32>* %0, align 32
@@ -195,66 +195,66 @@
 ;
 ; CHECK-P9-LABEL: test16elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r4)
-; CHECK-P9-NEXT:    lxv vs1, 0(r4)
-; CHECK-P9-NEXT:    lxv vs2, 48(r4)
-; CHECK-P9-NEXT:    lxv vs3, 32(r4)
-; CHECK-P9-NEXT:    xxmrglw v2, vs1, vs1
-; CHECK-P9-NEXT:    xxmrghw v3, vs1, vs1
-; CHECK-P9-NEXT:    xxmrglw v4, vs0, vs0
-; CHECK-P9-NEXT:    xxmrghw v5, vs0, vs0
-; CHECK-P9-NEXT:    xxmrglw v0, vs3, vs3
-; CHECK-P9-NEXT:    xxmrghw v1, vs3, vs3
-; CHECK-P9-NEXT:    xxmrglw v6, vs2, vs2
-; CHECK-P9-NEXT:    xxmrghw v7, vs2, vs2
+; CHECK-P9-NEXT:    lxv vs0, 0(r4)
+; CHECK-P9-NEXT:    xxmrglw v2, vs0, vs0
+; CHECK-P9-NEXT:    lxv vs2, 16(r4)
+; CHECK-P9-NEXT:    lxv vs4, 48(r4)
+; CHECK-P9-NEXT:    xvcvuxwdp vs1, v2
+; CHECK-P9-NEXT:    xxmrghw v2, vs0, vs0
+; CHECK-P9-NEXT:    lxv vs5, 32(r4)
 ; CHECK-P9-NEXT:    xvcvuxwdp vs0, v2
-; CHECK-P9-NEXT:    xvcvuxwdp vs1, v3
-; CHECK-P9-NEXT:    xvcvuxwdp vs2, v4
-; CHECK-P9-NEXT:    xvcvuxwdp vs3, v5
-; CHECK-P9-NEXT:    xvcvuxwdp vs4, v0
-; CHECK-P9-NEXT:    xvcvuxwdp vs5, v1
-; CHECK-P9-NEXT:    xvcvuxwdp vs6, v6
-; CHECK-P9-NEXT:    xvcvuxwdp vs7, v7
-; CHECK-P9-NEXT:    stxv vs3, 48(r3)
-; CHECK-P9-NEXT:    stxv vs2, 32(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
-; CHECK-P9-NEXT:    stxv vs0, 0(r3)
-; CHECK-P9-NEXT:    stxv vs7, 112(r3)
-; CHECK-P9-NEXT:    stxv vs6, 96(r3)
+; CHECK-P9-NEXT:    xxmrglw v2, vs2, vs2
+; CHECK-P9-NEXT:    xvcvuxwdp vs3, v2
+; CHECK-P9-NEXT:    xxmrghw v2, vs2, vs2
+; CHECK-P9-NEXT:    stxv vs1, 0(r3)
+; CHECK-P9-NEXT:    stxv vs0, 16(r3)
+; CHECK-P9-NEXT:    xvcvuxwdp vs2, v2
+; CHECK-P9-NEXT:    xxmrglw v2, vs5, vs5
+; CHECK-P9-NEXT:    xvcvuxwdp vs6, v2
+; CHECK-P9-NEXT:    xxmrghw v2, vs5, vs5
+; CHECK-P9-NEXT:    stxv vs3, 32(r3)
+; CHECK-P9-NEXT:    stxv vs2, 48(r3)
+; CHECK-P9-NEXT:    xvcvuxwdp vs5, v2
+; CHECK-P9-NEXT:    xxmrglw v2, vs4, vs4
+; CHECK-P9-NEXT:    xvcvuxwdp vs7, v2
+; CHECK-P9-NEXT:    xxmrghw v2, vs4, vs4
+; CHECK-P9-NEXT:    stxv vs6, 64(r3)
 ; CHECK-P9-NEXT:    stxv vs5, 80(r3)
-; CHECK-P9-NEXT:    stxv vs4, 64(r3)
+; CHECK-P9-NEXT:    xvcvuxwdp vs4, v2
+; CHECK-P9-NEXT:    stxv vs7, 96(r3)
+; CHECK-P9-NEXT:    stxv vs4, 112(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 16(r4)
-; CHECK-BE-NEXT:    lxv vs1, 0(r4)
-; CHECK-BE-NEXT:    lxv vs2, 48(r4)
-; CHECK-BE-NEXT:    lxv vs3, 32(r4)
-; CHECK-BE-NEXT:    xxmrghw v2, vs1, vs1
-; CHECK-BE-NEXT:    xxmrglw v3, vs1, vs1
-; CHECK-BE-NEXT:    xxmrghw v4, vs0, vs0
-; CHECK-BE-NEXT:    xxmrglw v5, vs0, vs0
-; CHECK-BE-NEXT:    xxmrghw v0, vs3, vs3
-; CHECK-BE-NEXT:    xxmrglw v1, vs3, vs3
-; CHECK-BE-NEXT:    xxmrghw v6, vs2, vs2
-; CHECK-BE-NEXT:    xxmrglw v7, vs2, vs2
+; CHECK-BE-NEXT:    lxv vs0, 0(r4)
+; CHECK-BE-NEXT:    xxmrghw v2, vs0, vs0
+; CHECK-BE-NEXT:    lxv vs2, 16(r4)
+; CHECK-BE-NEXT:    lxv vs4, 48(r4)
+; CHECK-BE-NEXT:    xvcvuxwdp vs1, v2
+; CHECK-BE-NEXT:    xxmrglw v2, vs0, vs0
+; CHECK-BE-NEXT:    lxv vs5, 32(r4)
 ; CHECK-BE-NEXT:    xvcvuxwdp vs0, v2
-; CHECK-BE-NEXT:    xvcvuxwdp vs1, v3
-; CHECK-BE-NEXT:    xvcvuxwdp vs2, v4
-; CHECK-BE-NEXT:    xvcvuxwdp vs3, v5
-; CHECK-BE-NEXT:    xvcvuxwdp vs4, v0
-; CHECK-BE-NEXT:    xvcvuxwdp vs5, v1
-; CHECK-BE-NEXT:    xvcvuxwdp vs6, v6
-; CHECK-BE-NEXT:    xvcvuxwdp vs7, v7
-; CHECK-BE-NEXT:    stxv vs3, 48(r3)
-; CHECK-BE-NEXT:    stxv vs2, 32(r3)
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
-; CHECK-BE-NEXT:    stxv vs0, 0(r3)
-; CHECK-BE-NEXT:    stxv vs7, 112(r3)
-; CHECK-BE-NEXT:    stxv vs6, 96(r3)
+; CHECK-BE-NEXT:    xxmrghw v2, vs2, vs2
+; CHECK-BE-NEXT:    xvcvuxwdp vs3, v2
+; CHECK-BE-NEXT:    xxmrglw v2, vs2, vs2
+; CHECK-BE-NEXT:    stxv vs1, 0(r3)
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
+; CHECK-BE-NEXT:    xvcvuxwdp vs2, v2
+; CHECK-BE-NEXT:    xxmrghw v2, vs5, vs5
+; CHECK-BE-NEXT:    xvcvuxwdp vs6, v2
+; CHECK-BE-NEXT:    xxmrglw v2, vs5, vs5
+; CHECK-BE-NEXT:    stxv vs3, 32(r3)
+; CHECK-BE-NEXT:    stxv vs2, 48(r3)
+; CHECK-BE-NEXT:    xvcvuxwdp vs5, v2
+; CHECK-BE-NEXT:    xxmrghw v2, vs4, vs4
+; CHECK-BE-NEXT:    xvcvuxwdp vs7, v2
+; CHECK-BE-NEXT:    xxmrglw v2, vs4, vs4
+; CHECK-BE-NEXT:    stxv vs6, 64(r3)
 ; CHECK-BE-NEXT:    stxv vs5, 80(r3)
-; CHECK-BE-NEXT:    stxv vs4, 64(r3)
+; CHECK-BE-NEXT:    xvcvuxwdp vs4, v2
+; CHECK-BE-NEXT:    stxv vs7, 96(r3)
+; CHECK-BE-NEXT:    stxv vs4, 112(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x i32>, <16 x i32>* %0, align 64
@@ -359,38 +359,38 @@
 ;
 ; CHECK-P9-LABEL: test8elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r4)
 ; CHECK-P9-NEXT:    lxv vs1, 0(r4)
 ; CHECK-P9-NEXT:    xxmrglw v2, vs1, vs1
-; CHECK-P9-NEXT:    xxmrghw v3, vs1, vs1
-; CHECK-P9-NEXT:    xxmrglw v4, vs0, vs0
-; CHECK-P9-NEXT:    xxmrghw v5, vs0, vs0
+; CHECK-P9-NEXT:    lxv vs0, 16(r4)
+; CHECK-P9-NEXT:    xvcvsxwdp vs2, v2
+; CHECK-P9-NEXT:    xxmrghw v2, vs1, vs1
+; CHECK-P9-NEXT:    xvcvsxwdp vs1, v2
+; CHECK-P9-NEXT:    xxmrglw v2, vs0, vs0
+; CHECK-P9-NEXT:    xvcvsxwdp vs3, v2
+; CHECK-P9-NEXT:    xxmrghw v2, vs0, vs0
+; CHECK-P9-NEXT:    stxv vs2, 0(r3)
 ; CHECK-P9-NEXT:    xvcvsxwdp vs0, v2
-; CHECK-P9-NEXT:    xvcvsxwdp vs1, v3
-; CHECK-P9-NEXT:    xvcvsxwdp vs2, v4
-; CHECK-P9-NEXT:    xvcvsxwdp vs3, v5
-; CHECK-P9-NEXT:    stxv vs3, 48(r3)
-; CHECK-P9-NEXT:    stxv vs2, 32(r3)
 ; CHECK-P9-NEXT:    stxv vs1, 16(r3)
-; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    stxv vs3, 32(r3)
+; CHECK-P9-NEXT:    stxv vs0, 48(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 16(r4)
 ; CHECK-BE-NEXT:    lxv vs1, 0(r4)
 ; CHECK-BE-NEXT:    xxmrghw v2, vs1, vs1
-; CHECK-BE-NEXT:    xxmrglw v3, vs1, vs1
-; CHECK-BE-NEXT:    xxmrghw v4, vs0, vs0
-; CHECK-BE-NEXT:    xxmrglw v5, vs0, vs0
+; CHECK-BE-NEXT:    lxv vs0, 16(r4)
+; CHECK-BE-NEXT:    xvcvsxwdp vs2, v2
+; CHECK-BE-NEXT:    xxmrglw v2, vs1, vs1
+; CHECK-BE-NEXT:    xvcvsxwdp vs1, v2
+; CHECK-BE-NEXT:    xxmrghw v2, vs0, vs0
+; CHECK-BE-NEXT:    xvcvsxwdp vs3, v2
+; CHECK-BE-NEXT:    xxmrglw v2, vs0, vs0
+; CHECK-BE-NEXT:    stxv vs2, 0(r3)
 ; CHECK-BE-NEXT:    xvcvsxwdp vs0, v2
-; CHECK-BE-NEXT:    xvcvsxwdp vs1, v3
-; CHECK-BE-NEXT:    xvcvsxwdp vs2, v4
-; CHECK-BE-NEXT:    xvcvsxwdp vs3, v5
-; CHECK-BE-NEXT:    stxv vs3, 48(r3)
-; CHECK-BE-NEXT:    stxv vs2, 32(r3)
 ; CHECK-BE-NEXT:    stxv vs1, 16(r3)
-; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    stxv vs3, 32(r3)
+; CHECK-BE-NEXT:    stxv vs0, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <8 x i32>, <8 x i32>* %0, align 32
@@ -449,66 +449,66 @@
 ;
 ; CHECK-P9-LABEL: test16elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r4)
-; CHECK-P9-NEXT:    lxv vs1, 0(r4)
-; CHECK-P9-NEXT:    lxv vs2, 48(r4)
-; CHECK-P9-NEXT:    lxv vs3, 32(r4)
-; CHECK-P9-NEXT:    xxmrglw v2, vs1, vs1
-; CHECK-P9-NEXT:    xxmrghw v3, vs1, vs1
-; CHECK-P9-NEXT:    xxmrglw v4, vs0, vs0
-; CHECK-P9-NEXT:    xxmrghw v5, vs0, vs0
-; CHECK-P9-NEXT:    xxmrglw v0, vs3, vs3
-; CHECK-P9-NEXT:    xxmrghw v1, vs3, vs3
-; CHECK-P9-NEXT:    xxmrglw v6, vs2, vs2
-; CHECK-P9-NEXT:    xxmrghw v7, vs2, vs2
+; CHECK-P9-NEXT:    lxv vs0, 0(r4)
+; CHECK-P9-NEXT:    xxmrglw v2, vs0, vs0
+; CHECK-P9-NEXT:    lxv vs2, 16(r4)
+; CHECK-P9-NEXT:    lxv vs4, 48(r4)
+; CHECK-P9-NEXT:    xvcvsxwdp vs1, v2
+; CHECK-P9-NEXT:    xxmrghw v2, vs0, vs0
+; CHECK-P9-NEXT:    lxv vs5, 32(r4)
 ; CHECK-P9-NEXT:    xvcvsxwdp vs0, v2
-; CHECK-P9-NEXT:    xvcvsxwdp vs1, v3
-; CHECK-P9-NEXT:    xvcvsxwdp vs2, v4
-; CHECK-P9-NEXT:    xvcvsxwdp vs3, v5
-; CHECK-P9-NEXT:    xvcvsxwdp vs4, v0
-; CHECK-P9-NEXT:    xvcvsxwdp vs5, v1
-; CHECK-P9-NEXT:    xvcvsxwdp vs6, v6
-; CHECK-P9-NEXT:    xvcvsxwdp vs7, v7
-; CHECK-P9-NEXT:    stxv vs3, 48(r3)
-; CHECK-P9-NEXT:    stxv vs2, 32(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
-; CHECK-P9-NEXT:    stxv vs0, 0(r3)
-; CHECK-P9-NEXT:    stxv vs7, 112(r3)
-; CHECK-P9-NEXT:    stxv vs6, 96(r3)
+; CHECK-P9-NEXT:    xxmrglw v2, vs2, vs2
+; CHECK-P9-NEXT:    xvcvsxwdp vs3, v2
+; CHECK-P9-NEXT:    xxmrghw v2, vs2, vs2
+; CHECK-P9-NEXT:    stxv vs1, 0(r3)
+; CHECK-P9-NEXT:    stxv vs0, 16(r3)
+; CHECK-P9-NEXT:    xvcvsxwdp vs2, v2
+; CHECK-P9-NEXT:    xxmrglw v2, vs5, vs5
+; CHECK-P9-NEXT:    xvcvsxwdp vs6, v2
+; CHECK-P9-NEXT:    xxmrghw v2, vs5, vs5
+; CHECK-P9-NEXT:    stxv vs3, 32(r3)
+; CHECK-P9-NEXT:    stxv vs2, 48(r3)
+; CHECK-P9-NEXT:    xvcvsxwdp vs5, v2
+; CHECK-P9-NEXT:    xxmrglw v2, vs4, vs4
+; CHECK-P9-NEXT:    xvcvsxwdp vs7, v2
+; CHECK-P9-NEXT:    xxmrghw v2, vs4, vs4
+; CHECK-P9-NEXT:    stxv vs6, 64(r3)
 ; CHECK-P9-NEXT:    stxv vs5, 80(r3)
-; CHECK-P9-NEXT:    stxv vs4, 64(r3)
+; CHECK-P9-NEXT:    xvcvsxwdp vs4, v2
+; CHECK-P9-NEXT:    stxv vs7, 96(r3)
+; CHECK-P9-NEXT:    stxv vs4, 112(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 16(r4)
-; CHECK-BE-NEXT:    lxv vs1, 0(r4)
-; CHECK-BE-NEXT:    lxv vs2, 48(r4)
-; CHECK-BE-NEXT:    lxv vs3, 32(r4)
-; CHECK-BE-NEXT:    xxmrghw v2, vs1, vs1
-; CHECK-BE-NEXT:    xxmrglw v3, vs1, vs1
-; CHECK-BE-NEXT:    xxmrghw v4, vs0, vs0
-; CHECK-BE-NEXT:    xxmrglw v5, vs0, vs0
-; CHECK-BE-NEXT:    xxmrghw v0, vs3, vs3
-; CHECK-BE-NEXT:    xxmrglw v1, vs3, vs3
-; CHECK-BE-NEXT:    xxmrghw v6, vs2, vs2
-; CHECK-BE-NEXT:    xxmrglw v7, vs2, vs2
+; CHECK-BE-NEXT:    lxv vs0, 0(r4)
+; CHECK-BE-NEXT:    xxmrghw v2, vs0, vs0
+; CHECK-BE-NEXT:    lxv vs2, 16(r4)
+; CHECK-BE-NEXT:    lxv vs4, 48(r4)
+; CHECK-BE-NEXT:    xvcvsxwdp vs1, v2
+; CHECK-BE-NEXT:    xxmrglw v2, vs0, vs0
+; CHECK-BE-NEXT:    lxv vs5, 32(r4)
 ; CHECK-BE-NEXT:    xvcvsxwdp vs0, v2
-; CHECK-BE-NEXT:    xvcvsxwdp vs1, v3
-; CHECK-BE-NEXT:    xvcvsxwdp vs2, v4
-; CHECK-BE-NEXT:    xvcvsxwdp vs3, v5
-; CHECK-BE-NEXT:    xvcvsxwdp vs4, v0
-; CHECK-BE-NEXT:    xvcvsxwdp vs5, v1
-; CHECK-BE-NEXT:    xvcvsxwdp vs6, v6
-; CHECK-BE-NEXT:    xvcvsxwdp vs7, v7
-; CHECK-BE-NEXT:    stxv vs3, 48(r3)
-; CHECK-BE-NEXT:    stxv vs2, 32(r3)
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
-; CHECK-BE-NEXT:    stxv vs0, 0(r3)
-; CHECK-BE-NEXT:    stxv vs7, 112(r3)
-; CHECK-BE-NEXT:    stxv vs6, 96(r3)
+; CHECK-BE-NEXT:    xxmrghw v2, vs2, vs2
+; CHECK-BE-NEXT:    xvcvsxwdp vs3, v2
+; CHECK-BE-NEXT:    xxmrglw v2, vs2, vs2
+; CHECK-BE-NEXT:    stxv vs1, 0(r3)
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
+; CHECK-BE-NEXT:    xvcvsxwdp vs2, v2
+; CHECK-BE-NEXT:    xxmrghw v2, vs5, vs5
+; CHECK-BE-NEXT:    xvcvsxwdp vs6, v2
+; CHECK-BE-NEXT:    xxmrglw v2, vs5, vs5
+; CHECK-BE-NEXT:    stxv vs3, 32(r3)
+; CHECK-BE-NEXT:    stxv vs2, 48(r3)
+; CHECK-BE-NEXT:    xvcvsxwdp vs5, v2
+; CHECK-BE-NEXT:    xxmrghw v2, vs4, vs4
+; CHECK-BE-NEXT:    xvcvsxwdp vs7, v2
+; CHECK-BE-NEXT:    xxmrglw v2, vs4, vs4
+; CHECK-BE-NEXT:    stxv vs6, 64(r3)
 ; CHECK-BE-NEXT:    stxv vs5, 80(r3)
-; CHECK-BE-NEXT:    stxv vs4, 64(r3)
+; CHECK-BE-NEXT:    xvcvsxwdp vs4, v2
+; CHECK-BE-NEXT:    stxv vs7, 96(r3)
+; CHECK-BE-NEXT:    stxv vs4, 112(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x i32>, <16 x i32>* %0, align 64
diff --git a/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll b/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll
index cae556e..08e6f70 100644
--- a/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll
+++ b/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll
@@ -28,14 +28,14 @@
 ; CHECK-P9-LABEL: test2elt:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    xxswapd vs0, v2
-; CHECK-P9-NEXT:    xxlor vs1, v2, v2
-; CHECK-P9-NEXT:    xscvuxdsp f1, f1
 ; CHECK-P9-NEXT:    xscvuxdsp f0, f0
-; CHECK-P9-NEXT:    xscvdpspn vs1, f1
 ; CHECK-P9-NEXT:    xscvdpspn vs0, f0
-; CHECK-P9-NEXT:    xxsldwi v3, vs1, vs1, 1
+; CHECK-P9-NEXT:    xxsldwi v3, vs0, vs0, 1
+; CHECK-P9-NEXT:    xxlor vs0, v2, v2
+; CHECK-P9-NEXT:    xscvuxdsp f0, f0
+; CHECK-P9-NEXT:    xscvdpspn vs0, f0
 ; CHECK-P9-NEXT:    xxsldwi v2, vs0, vs0, 1
-; CHECK-P9-NEXT:    vmrglw v2, v3, v2
+; CHECK-P9-NEXT:    vmrglw v2, v2, v3
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
@@ -62,51 +62,35 @@
 ; CHECK-P8-NEXT:    li r4, 16
 ; CHECK-P8-NEXT:    lxvd2x vs1, 0, r3
 ; CHECK-P8-NEXT:    lxvd2x vs0, r3, r4
-; CHECK-P8-NEXT:    xxswapd vs3, vs1
-; CHECK-P8-NEXT:    xscvuxdsp f1, f1
-; CHECK-P8-NEXT:    xxswapd vs2, vs0
-; CHECK-P8-NEXT:    xscvuxdsp f0, f0
-; CHECK-P8-NEXT:    xscvuxdsp f3, f3
-; CHECK-P8-NEXT:    xscvuxdsp f2, f2
-; CHECK-P8-NEXT:    xxmrghd vs0, vs0, vs1
-; CHECK-P8-NEXT:    xxmrghd vs1, vs2, vs3
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P8-NEXT:    vmrgew v2, v3, v2
+; CHECK-P8-NEXT:    xxswapd v3, vs1
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    xvcvuxdsp vs1, v3
+; CHECK-P8-NEXT:    xvcvuxdsp vs0, v2
+; CHECK-P8-NEXT:    xxsldwi v3, vs1, vs1, 3
+; CHECK-P8-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-P8-NEXT:    vpkudum v2, v2, v3
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test4elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r3)
-; CHECK-P9-NEXT:    lxv vs1, 0(r3)
-; CHECK-P9-NEXT:    xxswapd vs2, vs1
-; CHECK-P9-NEXT:    xxswapd vs3, vs0
-; CHECK-P9-NEXT:    xscvuxdsp f1, f1
-; CHECK-P9-NEXT:    xscvuxdsp f0, f0
-; CHECK-P9-NEXT:    xscvuxdsp f2, f2
-; CHECK-P9-NEXT:    xscvuxdsp f3, f3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs0, vs1
-; CHECK-P9-NEXT:    xxmrghd vs2, vs3, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs2
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
+; CHECK-P9-NEXT:    lxv v3, 0(r3)
+; CHECK-P9-NEXT:    xvcvuxdsp vs0, v3
+; CHECK-P9-NEXT:    lxv v2, 16(r3)
+; CHECK-P9-NEXT:    xxsldwi v3, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvuxdsp vs0, v2
+; CHECK-P9-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-P9-NEXT:    vpkudum v2, v2, v3
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 0(r3)
-; CHECK-BE-NEXT:    lxv vs1, 16(r3)
-; CHECK-BE-NEXT:    xxswapd vs2, vs1
-; CHECK-BE-NEXT:    xxswapd vs3, vs0
-; CHECK-BE-NEXT:    xscvuxdsp f1, f1
-; CHECK-BE-NEXT:    xscvuxdsp f0, f0
-; CHECK-BE-NEXT:    xscvuxdsp f2, f2
-; CHECK-BE-NEXT:    xscvuxdsp f3, f3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs1
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs1
-; CHECK-BE-NEXT:    vmrgew v2, v2, v3
+; CHECK-BE-NEXT:    lxv v3, 16(r3)
+; CHECK-BE-NEXT:    xvcvuxdsp vs0, v3
+; CHECK-BE-NEXT:    lxv v2, 0(r3)
+; CHECK-BE-NEXT:    xxsldwi v3, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvuxdsp vs0, v2
+; CHECK-BE-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-BE-NEXT:    vpkudum v2, v2, v3
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <4 x i64>, <4 x i64>* %0, align 32
@@ -124,94 +108,62 @@
 ; CHECK-P8-NEXT:    li r5, 16
 ; CHECK-P8-NEXT:    lxvd2x vs1, r4, r6
 ; CHECK-P8-NEXT:    lxvd2x vs2, r4, r5
-; CHECK-P8-NEXT:    xxswapd vs7, vs3
-; CHECK-P8-NEXT:    xscvuxdsp f3, f3
-; CHECK-P8-NEXT:    xxswapd vs4, vs0
-; CHECK-P8-NEXT:    xscvuxdsp f0, f0
-; CHECK-P8-NEXT:    xxswapd vs5, vs1
-; CHECK-P8-NEXT:    xscvuxdsp f1, f1
-; CHECK-P8-NEXT:    xxswapd vs6, vs2
-; CHECK-P8-NEXT:    xscvuxdsp f2, f2
-; CHECK-P8-NEXT:    xscvuxdsp f4, f4
-; CHECK-P8-NEXT:    xscvuxdsp f5, f5
-; CHECK-P8-NEXT:    xscvuxdsp f6, f6
-; CHECK-P8-NEXT:    xscvuxdsp f7, f7
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs2, vs3
-; CHECK-P8-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P8-NEXT:    xxmrghd vs0, vs6, vs7
-; CHECK-P8-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P8-NEXT:    xvcvdpsp v5, vs0
-; CHECK-P8-NEXT:    vmrgew v2, v4, v2
-; CHECK-P8-NEXT:    vmrgew v3, v5, v3
+; CHECK-P8-NEXT:    xxswapd v5, vs3
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    xxswapd v3, vs1
+; CHECK-P8-NEXT:    xxswapd v4, vs2
+; CHECK-P8-NEXT:    xvcvuxdsp vs3, v5
+; CHECK-P8-NEXT:    xvcvuxdsp vs0, v2
+; CHECK-P8-NEXT:    xvcvuxdsp vs1, v3
+; CHECK-P8-NEXT:    xvcvuxdsp vs2, v4
+; CHECK-P8-NEXT:    xxsldwi v5, vs3, vs3, 3
+; CHECK-P8-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-P8-NEXT:    xxsldwi v3, vs1, vs1, 3
+; CHECK-P8-NEXT:    xxsldwi v4, vs2, vs2, 3
+; CHECK-P8-NEXT:    vpkudum v2, v3, v2
+; CHECK-P8-NEXT:    vpkudum v3, v4, v5
 ; CHECK-P8-NEXT:    stvx v2, r3, r5
 ; CHECK-P8-NEXT:    stvx v3, 0, r3
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test8elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 48(r4)
-; CHECK-P9-NEXT:    lxv vs1, 32(r4)
-; CHECK-P9-NEXT:    lxv vs2, 16(r4)
-; CHECK-P9-NEXT:    lxv vs3, 0(r4)
-; CHECK-P9-NEXT:    xxswapd vs4, vs3
-; CHECK-P9-NEXT:    xxswapd vs5, vs2
-; CHECK-P9-NEXT:    xxswapd vs6, vs1
-; CHECK-P9-NEXT:    xxswapd vs7, vs0
-; CHECK-P9-NEXT:    xscvuxdsp f3, f3
-; CHECK-P9-NEXT:    xscvuxdsp f2, f2
-; CHECK-P9-NEXT:    xscvuxdsp f1, f1
-; CHECK-P9-NEXT:    xscvuxdsp f0, f0
-; CHECK-P9-NEXT:    xscvuxdsp f4, f4
-; CHECK-P9-NEXT:    xscvuxdsp f5, f5
-; CHECK-P9-NEXT:    xscvuxdsp f6, f6
-; CHECK-P9-NEXT:    xscvuxdsp f7, f7
-; CHECK-P9-NEXT:    xxmrghd vs2, vs2, vs3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs0, vs1
-; CHECK-P9-NEXT:    xxmrghd vs4, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v5, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs4
-; CHECK-P9-NEXT:    xvcvdpsp v4, vs3
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
-; CHECK-P9-NEXT:    vmrgew v3, v5, v4
-; CHECK-P9-NEXT:    stxv v3, 16(r3)
-; CHECK-P9-NEXT:    stxv v2, 0(r3)
+; CHECK-P9-NEXT:    lxv v5, 0(r4)
+; CHECK-P9-NEXT:    xvcvuxdsp vs0, v5
+; CHECK-P9-NEXT:    lxv v4, 16(r4)
+; CHECK-P9-NEXT:    xxsldwi v5, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvuxdsp vs0, v4
+; CHECK-P9-NEXT:    lxv v3, 32(r4)
+; CHECK-P9-NEXT:    xxsldwi v4, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvuxdsp vs0, v3
+; CHECK-P9-NEXT:    lxv v2, 48(r4)
+; CHECK-P9-NEXT:    vpkudum v3, v4, v5
+; CHECK-P9-NEXT:    stxv v3, 0(r3)
+; CHECK-P9-NEXT:    xxsldwi v4, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvuxdsp vs0, v2
+; CHECK-P9-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-P9-NEXT:    vpkudum v2, v2, v4
+; CHECK-P9-NEXT:    stxv v2, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 32(r4)
-; CHECK-BE-NEXT:    lxv vs1, 48(r4)
-; CHECK-BE-NEXT:    lxv vs2, 0(r4)
-; CHECK-BE-NEXT:    lxv vs3, 16(r4)
-; CHECK-BE-NEXT:    xxswapd vs4, vs3
-; CHECK-BE-NEXT:    xxswapd vs5, vs2
-; CHECK-BE-NEXT:    xxswapd vs6, vs1
-; CHECK-BE-NEXT:    xxswapd vs7, vs0
-; CHECK-BE-NEXT:    xscvuxdsp f3, f3
-; CHECK-BE-NEXT:    xscvuxdsp f2, f2
-; CHECK-BE-NEXT:    xscvuxdsp f1, f1
-; CHECK-BE-NEXT:    xscvuxdsp f0, f0
-; CHECK-BE-NEXT:    xscvuxdsp f4, f4
-; CHECK-BE-NEXT:    xscvuxdsp f5, f5
-; CHECK-BE-NEXT:    xscvuxdsp f6, f6
-; CHECK-BE-NEXT:    xscvuxdsp f7, f7
-; CHECK-BE-NEXT:    xxmrghd vs2, vs2, vs3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs1
-; CHECK-BE-NEXT:    xxmrghd vs3, vs5, vs4
-; CHECK-BE-NEXT:    xxmrghd vs1, vs7, vs6
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v4, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs3
-; CHECK-BE-NEXT:    xvcvdpsp v5, vs1
-; CHECK-BE-NEXT:    vmrgew v2, v2, v3
-; CHECK-BE-NEXT:    vmrgew v3, v4, v5
-; CHECK-BE-NEXT:    stxv v3, 16(r3)
-; CHECK-BE-NEXT:    stxv v2, 0(r3)
+; CHECK-BE-NEXT:    lxv v5, 16(r4)
+; CHECK-BE-NEXT:    xvcvuxdsp vs0, v5
+; CHECK-BE-NEXT:    lxv v4, 0(r4)
+; CHECK-BE-NEXT:    xxsldwi v5, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvuxdsp vs0, v4
+; CHECK-BE-NEXT:    lxv v3, 48(r4)
+; CHECK-BE-NEXT:    xxsldwi v4, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvuxdsp vs0, v3
+; CHECK-BE-NEXT:    lxv v2, 32(r4)
+; CHECK-BE-NEXT:    vpkudum v3, v4, v5
+; CHECK-BE-NEXT:    stxv v3, 0(r3)
+; CHECK-BE-NEXT:    xxsldwi v4, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvuxdsp vs0, v2
+; CHECK-BE-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-BE-NEXT:    vpkudum v2, v2, v4
+; CHECK-BE-NEXT:    stxv v2, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <8 x i64>, <8 x i64>* %0, align 64
@@ -223,69 +175,49 @@
 define void @test16elt(<16 x float>* noalias nocapture sret %agg.result, <16 x i64>* nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
-; CHECK-P8-NEXT:    li r7, 64
 ; CHECK-P8-NEXT:    li r5, 32
 ; CHECK-P8-NEXT:    li r6, 48
-; CHECK-P8-NEXT:    lxvd2x vs11, 0, r4
-; CHECK-P8-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-P8-NEXT:    lxvd2x vs8, r4, r7
-; CHECK-P8-NEXT:    li r7, 80
-; CHECK-P8-NEXT:    lxvd2x vs6, r4, r5
-; CHECK-P8-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P8-NEXT:    lxvd2x vs7, r4, r6
+; CHECK-P8-NEXT:    li r7, 64
+; CHECK-P8-NEXT:    lxvd2x vs4, 0, r4
+; CHECK-P8-NEXT:    lxvd2x vs0, r4, r5
+; CHECK-P8-NEXT:    lxvd2x vs1, r4, r6
 ; CHECK-P8-NEXT:    lxvd2x vs2, r4, r7
-; CHECK-P8-NEXT:    li r7, 96
+; CHECK-P8-NEXT:    li r7, 80
 ; CHECK-P8-NEXT:    lxvd2x vs3, r4, r7
+; CHECK-P8-NEXT:    li r7, 96
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    lxvd2x vs0, r4, r7
 ; CHECK-P8-NEXT:    li r7, 112
-; CHECK-P8-NEXT:    xscvuxdsp f30, f11
-; CHECK-P8-NEXT:    xxswapd vs11, vs11
-; CHECK-P8-NEXT:    lxvd2x vs4, r4, r7
+; CHECK-P8-NEXT:    xxswapd v3, vs1
+; CHECK-P8-NEXT:    lxvd2x vs1, r4, r7
 ; CHECK-P8-NEXT:    li r7, 16
-; CHECK-P8-NEXT:    xscvuxdsp f0, f6
-; CHECK-P8-NEXT:    xxswapd vs6, vs6
-; CHECK-P8-NEXT:    xscvuxdsp f1, f7
-; CHECK-P8-NEXT:    lxvd2x vs9, r4, r7
-; CHECK-P8-NEXT:    xxswapd vs7, vs7
-; CHECK-P8-NEXT:    xscvuxdsp f5, f8
-; CHECK-P8-NEXT:    xxswapd vs8, vs8
-; CHECK-P8-NEXT:    xscvuxdsp f10, f2
-; CHECK-P8-NEXT:    xxswapd vs2, vs2
-; CHECK-P8-NEXT:    xscvuxdsp f12, f3
-; CHECK-P8-NEXT:    xxswapd vs3, vs3
-; CHECK-P8-NEXT:    xscvuxdsp f13, f4
-; CHECK-P8-NEXT:    xxswapd vs4, vs4
-; CHECK-P8-NEXT:    xscvuxdsp f31, f9
-; CHECK-P8-NEXT:    xxswapd vs9, vs9
-; CHECK-P8-NEXT:    xscvuxdsp f6, f6
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xscvuxdsp f7, f7
-; CHECK-P8-NEXT:    xscvuxdsp f8, f8
-; CHECK-P8-NEXT:    xxmrghd vs5, vs10, vs5
-; CHECK-P8-NEXT:    xscvuxdsp f2, f2
-; CHECK-P8-NEXT:    xscvuxdsp f3, f3
-; CHECK-P8-NEXT:    xxmrghd vs10, vs13, vs12
-; CHECK-P8-NEXT:    xscvuxdsp f4, f4
-; CHECK-P8-NEXT:    xscvuxdsp f1, f9
-; CHECK-P8-NEXT:    xscvuxdsp f9, f11
-; CHECK-P8-NEXT:    xxmrghd vs11, vs31, vs30
-; CHECK-P8-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-P8-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xxmrghd vs0, vs7, vs6
-; CHECK-P8-NEXT:    xxmrghd vs2, vs2, vs8
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs5
-; CHECK-P8-NEXT:    xvcvdpsp v4, vs10
-; CHECK-P8-NEXT:    xxmrghd vs3, vs4, vs3
-; CHECK-P8-NEXT:    xvcvdpsp v5, vs11
-; CHECK-P8-NEXT:    xvcvdpsp v0, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs1, vs9
-; CHECK-P8-NEXT:    xvcvdpsp v1, vs2
-; CHECK-P8-NEXT:    xvcvdpsp v6, vs3
-; CHECK-P8-NEXT:    xvcvdpsp v7, vs1
-; CHECK-P8-NEXT:    vmrgew v2, v0, v2
-; CHECK-P8-NEXT:    vmrgew v3, v1, v3
-; CHECK-P8-NEXT:    vmrgew v4, v6, v4
-; CHECK-P8-NEXT:    vmrgew v5, v7, v5
+; CHECK-P8-NEXT:    xxswapd v4, vs2
+; CHECK-P8-NEXT:    lxvd2x vs2, r4, r7
+; CHECK-P8-NEXT:    xxswapd v5, vs3
+; CHECK-P8-NEXT:    xvcvuxdsp vs3, v2
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    xvcvuxdsp vs0, v3
+; CHECK-P8-NEXT:    xxswapd v3, vs1
+; CHECK-P8-NEXT:    xvcvuxdsp vs1, v4
+; CHECK-P8-NEXT:    xxswapd v4, vs2
+; CHECK-P8-NEXT:    xvcvuxdsp vs2, v5
+; CHECK-P8-NEXT:    xxswapd v5, vs4
+; CHECK-P8-NEXT:    xvcvuxdsp vs4, v2
+; CHECK-P8-NEXT:    xvcvuxdsp vs5, v3
+; CHECK-P8-NEXT:    xvcvuxdsp vs6, v4
+; CHECK-P8-NEXT:    xxsldwi v2, vs3, vs3, 3
+; CHECK-P8-NEXT:    xvcvuxdsp vs7, v5
+; CHECK-P8-NEXT:    xxsldwi v3, vs0, vs0, 3
+; CHECK-P8-NEXT:    xxsldwi v4, vs1, vs1, 3
+; CHECK-P8-NEXT:    xxsldwi v5, vs2, vs2, 3
+; CHECK-P8-NEXT:    xxsldwi v0, vs4, vs4, 3
+; CHECK-P8-NEXT:    vpkudum v2, v3, v2
+; CHECK-P8-NEXT:    xxsldwi v1, vs5, vs5, 3
+; CHECK-P8-NEXT:    xxsldwi v6, vs6, vs6, 3
+; CHECK-P8-NEXT:    vpkudum v3, v5, v4
+; CHECK-P8-NEXT:    xxsldwi v7, vs7, vs7, 3
+; CHECK-P8-NEXT:    vpkudum v4, v1, v0
+; CHECK-P8-NEXT:    vpkudum v5, v6, v7
 ; CHECK-P8-NEXT:    stvx v2, r3, r7
 ; CHECK-P8-NEXT:    stvx v3, r3, r5
 ; CHECK-P8-NEXT:    stvx v4, r3, r6
@@ -294,130 +226,74 @@
 ;
 ; CHECK-P9-LABEL: test16elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs4, 48(r4)
-; CHECK-P9-NEXT:    lxv vs5, 32(r4)
-; CHECK-P9-NEXT:    lxv vs6, 16(r4)
-; CHECK-P9-NEXT:    lxv vs7, 0(r4)
-; CHECK-P9-NEXT:    lxv vs8, 112(r4)
-; CHECK-P9-NEXT:    lxv vs9, 96(r4)
-; CHECK-P9-NEXT:    lxv vs10, 80(r4)
-; CHECK-P9-NEXT:    lxv vs11, 64(r4)
-; CHECK-P9-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    xxswapd vs0, vs7
-; CHECK-P9-NEXT:    xxswapd vs1, vs6
-; CHECK-P9-NEXT:    xxswapd vs2, vs5
-; CHECK-P9-NEXT:    xxswapd vs3, vs4
-; CHECK-P9-NEXT:    xxswapd vs12, vs11
-; CHECK-P9-NEXT:    xxswapd vs13, vs10
-; CHECK-P9-NEXT:    xxswapd vs31, vs9
-; CHECK-P9-NEXT:    xxswapd vs30, vs8
-; CHECK-P9-NEXT:    xscvuxdsp f7, f7
-; CHECK-P9-NEXT:    xscvuxdsp f6, f6
-; CHECK-P9-NEXT:    xscvuxdsp f5, f5
-; CHECK-P9-NEXT:    xscvuxdsp f4, f4
-; CHECK-P9-NEXT:    xscvuxdsp f11, f11
-; CHECK-P9-NEXT:    xscvuxdsp f10, f10
-; CHECK-P9-NEXT:    xscvuxdsp f9, f9
-; CHECK-P9-NEXT:    xscvuxdsp f8, f8
-; CHECK-P9-NEXT:    xscvuxdsp f0, f0
-; CHECK-P9-NEXT:    xscvuxdsp f1, f1
-; CHECK-P9-NEXT:    xscvuxdsp f2, f2
-; CHECK-P9-NEXT:    xscvuxdsp f3, f3
-; CHECK-P9-NEXT:    xscvuxdsp f12, f12
-; CHECK-P9-NEXT:    xscvuxdsp f13, f13
-; CHECK-P9-NEXT:    xscvuxdsp f31, f31
-; CHECK-P9-NEXT:    xscvuxdsp f30, f30
-; CHECK-P9-NEXT:    xxmrghd vs6, vs6, vs7
-; CHECK-P9-NEXT:    xxmrghd vs4, vs4, vs5
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs10, vs11
-; CHECK-P9-NEXT:    xxmrghd vs3, vs8, vs9
-; CHECK-P9-NEXT:    xxmrghd vs5, vs13, vs12
-; CHECK-P9-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-P9-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs6
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs4
-; CHECK-P9-NEXT:    xvcvdpsp v4, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v5, vs1
-; CHECK-P9-NEXT:    xvcvdpsp v0, vs5
-; CHECK-P9-NEXT:    xvcvdpsp v1, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v6, vs7
-; CHECK-P9-NEXT:    xvcvdpsp v7, vs3
-; CHECK-P9-NEXT:    vmrgew v2, v2, v4
-; CHECK-P9-NEXT:    vmrgew v3, v3, v5
-; CHECK-P9-NEXT:    vmrgew v4, v1, v0
-; CHECK-P9-NEXT:    vmrgew v5, v7, v6
+; CHECK-P9-NEXT:    lxv v7, 0(r4)
+; CHECK-P9-NEXT:    xvcvuxdsp vs0, v7
+; CHECK-P9-NEXT:    lxv v6, 16(r4)
+; CHECK-P9-NEXT:    xxsldwi v7, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvuxdsp vs0, v6
+; CHECK-P9-NEXT:    lxv v1, 32(r4)
+; CHECK-P9-NEXT:    xxsldwi v6, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvuxdsp vs0, v1
+; CHECK-P9-NEXT:    lxv v0, 48(r4)
+; CHECK-P9-NEXT:    vpkudum v1, v6, v7
+; CHECK-P9-NEXT:    xxsldwi v6, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvuxdsp vs0, v0
+; CHECK-P9-NEXT:    lxv v5, 64(r4)
+; CHECK-P9-NEXT:    xxsldwi v0, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvuxdsp vs0, v5
+; CHECK-P9-NEXT:    lxv v4, 80(r4)
+; CHECK-P9-NEXT:    vpkudum v0, v0, v6
+; CHECK-P9-NEXT:    xxsldwi v5, vs0, vs0, 3
+; CHECK-P9-NEXT:    lxv v3, 96(r4)
+; CHECK-P9-NEXT:    xvcvuxdsp vs0, v4
+; CHECK-P9-NEXT:    xxsldwi v4, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvuxdsp vs0, v3
+; CHECK-P9-NEXT:    lxv v2, 112(r4)
+; CHECK-P9-NEXT:    stxv v0, 16(r3)
+; CHECK-P9-NEXT:    stxv v1, 0(r3)
+; CHECK-P9-NEXT:    vpkudum v4, v4, v5
 ; CHECK-P9-NEXT:    stxv v4, 32(r3)
-; CHECK-P9-NEXT:    stxv v3, 16(r3)
-; CHECK-P9-NEXT:    stxv v2, 0(r3)
-; CHECK-P9-NEXT:    stxv v5, 48(r3)
+; CHECK-P9-NEXT:    xxsldwi v3, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvuxdsp vs0, v2
+; CHECK-P9-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-P9-NEXT:    vpkudum v2, v2, v3
+; CHECK-P9-NEXT:    stxv v2, 48(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs2, 32(r4)
-; CHECK-BE-NEXT:    lxv vs3, 48(r4)
-; CHECK-BE-NEXT:    lxv vs4, 0(r4)
-; CHECK-BE-NEXT:    lxv vs5, 16(r4)
-; CHECK-BE-NEXT:    lxv vs6, 96(r4)
-; CHECK-BE-NEXT:    lxv vs7, 112(r4)
-; CHECK-BE-NEXT:    lxv vs8, 64(r4)
-; CHECK-BE-NEXT:    lxv vs9, 80(r4)
-; CHECK-BE-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    xxswapd vs0, vs5
-; CHECK-BE-NEXT:    xxswapd vs1, vs4
-; CHECK-BE-NEXT:    xxswapd vs10, vs3
-; CHECK-BE-NEXT:    xxswapd vs11, vs2
-; CHECK-BE-NEXT:    xxswapd vs12, vs9
-; CHECK-BE-NEXT:    xxswapd vs13, vs8
-; CHECK-BE-NEXT:    xxswapd vs31, vs7
-; CHECK-BE-NEXT:    xxswapd vs30, vs6
-; CHECK-BE-NEXT:    xscvuxdsp f5, f5
-; CHECK-BE-NEXT:    xscvuxdsp f4, f4
-; CHECK-BE-NEXT:    xscvuxdsp f3, f3
-; CHECK-BE-NEXT:    xscvuxdsp f2, f2
-; CHECK-BE-NEXT:    xscvuxdsp f9, f9
-; CHECK-BE-NEXT:    xscvuxdsp f8, f8
-; CHECK-BE-NEXT:    xscvuxdsp f7, f7
-; CHECK-BE-NEXT:    xscvuxdsp f6, f6
-; CHECK-BE-NEXT:    xscvuxdsp f0, f0
-; CHECK-BE-NEXT:    xscvuxdsp f1, f1
-; CHECK-BE-NEXT:    xscvuxdsp f10, f10
-; CHECK-BE-NEXT:    xscvuxdsp f11, f11
-; CHECK-BE-NEXT:    xscvuxdsp f12, f12
-; CHECK-BE-NEXT:    xscvuxdsp f13, f13
-; CHECK-BE-NEXT:    xscvuxdsp f31, f31
-; CHECK-BE-NEXT:    xscvuxdsp f30, f30
-; CHECK-BE-NEXT:    xxmrghd vs4, vs4, vs5
-; CHECK-BE-NEXT:    xxmrghd vs2, vs2, vs3
-; CHECK-BE-NEXT:    xxmrghd vs3, vs8, vs9
-; CHECK-BE-NEXT:    xxmrghd vs5, vs6, vs7
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs11, vs10
-; CHECK-BE-NEXT:    xxmrghd vs6, vs13, vs12
-; CHECK-BE-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-BE-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs4
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v0, vs3
-; CHECK-BE-NEXT:    xvcvdpsp v6, vs5
-; CHECK-BE-NEXT:    xvcvdpsp v4, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v5, vs1
-; CHECK-BE-NEXT:    xvcvdpsp v1, vs6
-; CHECK-BE-NEXT:    xvcvdpsp v7, vs7
-; CHECK-BE-NEXT:    vmrgew v2, v2, v4
-; CHECK-BE-NEXT:    vmrgew v3, v3, v5
-; CHECK-BE-NEXT:    vmrgew v4, v0, v1
-; CHECK-BE-NEXT:    vmrgew v5, v6, v7
-; CHECK-BE-NEXT:    stxv v5, 48(r3)
+; CHECK-BE-NEXT:    lxv v7, 16(r4)
+; CHECK-BE-NEXT:    xvcvuxdsp vs0, v7
+; CHECK-BE-NEXT:    lxv v6, 0(r4)
+; CHECK-BE-NEXT:    xxsldwi v7, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvuxdsp vs0, v6
+; CHECK-BE-NEXT:    lxv v1, 48(r4)
+; CHECK-BE-NEXT:    xxsldwi v6, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvuxdsp vs0, v1
+; CHECK-BE-NEXT:    lxv v0, 32(r4)
+; CHECK-BE-NEXT:    vpkudum v1, v6, v7
+; CHECK-BE-NEXT:    xxsldwi v6, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvuxdsp vs0, v0
+; CHECK-BE-NEXT:    lxv v5, 80(r4)
+; CHECK-BE-NEXT:    xxsldwi v0, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvuxdsp vs0, v5
+; CHECK-BE-NEXT:    lxv v4, 64(r4)
+; CHECK-BE-NEXT:    vpkudum v0, v0, v6
+; CHECK-BE-NEXT:    xxsldwi v5, vs0, vs0, 3
+; CHECK-BE-NEXT:    lxv v3, 112(r4)
+; CHECK-BE-NEXT:    xvcvuxdsp vs0, v4
+; CHECK-BE-NEXT:    xxsldwi v4, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvuxdsp vs0, v3
+; CHECK-BE-NEXT:    lxv v2, 96(r4)
+; CHECK-BE-NEXT:    stxv v0, 16(r3)
+; CHECK-BE-NEXT:    stxv v1, 0(r3)
+; CHECK-BE-NEXT:    vpkudum v4, v4, v5
 ; CHECK-BE-NEXT:    stxv v4, 32(r3)
-; CHECK-BE-NEXT:    stxv v3, 16(r3)
-; CHECK-BE-NEXT:    stxv v2, 0(r3)
+; CHECK-BE-NEXT:    xxsldwi v3, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvuxdsp vs0, v2
+; CHECK-BE-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-BE-NEXT:    vpkudum v2, v2, v3
+; CHECK-BE-NEXT:    stxv v2, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x i64>, <16 x i64>* %0, align 128
@@ -445,14 +321,14 @@
 ; CHECK-P9-LABEL: test2elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    xxswapd vs0, v2
-; CHECK-P9-NEXT:    xxlor vs1, v2, v2
-; CHECK-P9-NEXT:    xscvsxdsp f1, f1
 ; CHECK-P9-NEXT:    xscvsxdsp f0, f0
-; CHECK-P9-NEXT:    xscvdpspn vs1, f1
 ; CHECK-P9-NEXT:    xscvdpspn vs0, f0
-; CHECK-P9-NEXT:    xxsldwi v3, vs1, vs1, 1
+; CHECK-P9-NEXT:    xxsldwi v3, vs0, vs0, 1
+; CHECK-P9-NEXT:    xxlor vs0, v2, v2
+; CHECK-P9-NEXT:    xscvsxdsp f0, f0
+; CHECK-P9-NEXT:    xscvdpspn vs0, f0
 ; CHECK-P9-NEXT:    xxsldwi v2, vs0, vs0, 1
-; CHECK-P9-NEXT:    vmrglw v2, v3, v2
+; CHECK-P9-NEXT:    vmrglw v2, v2, v3
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
@@ -479,51 +355,35 @@
 ; CHECK-P8-NEXT:    li r4, 16
 ; CHECK-P8-NEXT:    lxvd2x vs1, 0, r3
 ; CHECK-P8-NEXT:    lxvd2x vs0, r3, r4
-; CHECK-P8-NEXT:    xxswapd vs3, vs1
-; CHECK-P8-NEXT:    xscvsxdsp f1, f1
-; CHECK-P8-NEXT:    xxswapd vs2, vs0
-; CHECK-P8-NEXT:    xscvsxdsp f0, f0
-; CHECK-P8-NEXT:    xscvsxdsp f3, f3
-; CHECK-P8-NEXT:    xscvsxdsp f2, f2
-; CHECK-P8-NEXT:    xxmrghd vs0, vs0, vs1
-; CHECK-P8-NEXT:    xxmrghd vs1, vs2, vs3
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P8-NEXT:    vmrgew v2, v3, v2
+; CHECK-P8-NEXT:    xxswapd v3, vs1
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    xvcvsxdsp vs1, v3
+; CHECK-P8-NEXT:    xvcvsxdsp vs0, v2
+; CHECK-P8-NEXT:    xxsldwi v3, vs1, vs1, 3
+; CHECK-P8-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-P8-NEXT:    vpkudum v2, v2, v3
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test4elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 16(r3)
-; CHECK-P9-NEXT:    lxv vs1, 0(r3)
-; CHECK-P9-NEXT:    xxswapd vs2, vs1
-; CHECK-P9-NEXT:    xxswapd vs3, vs0
-; CHECK-P9-NEXT:    xscvsxdsp f1, f1
-; CHECK-P9-NEXT:    xscvsxdsp f0, f0
-; CHECK-P9-NEXT:    xscvsxdsp f2, f2
-; CHECK-P9-NEXT:    xscvsxdsp f3, f3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs0, vs1
-; CHECK-P9-NEXT:    xxmrghd vs2, vs3, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs2
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
+; CHECK-P9-NEXT:    lxv v3, 0(r3)
+; CHECK-P9-NEXT:    xvcvsxdsp vs0, v3
+; CHECK-P9-NEXT:    lxv v2, 16(r3)
+; CHECK-P9-NEXT:    xxsldwi v3, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvsxdsp vs0, v2
+; CHECK-P9-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-P9-NEXT:    vpkudum v2, v2, v3
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 0(r3)
-; CHECK-BE-NEXT:    lxv vs1, 16(r3)
-; CHECK-BE-NEXT:    xxswapd vs2, vs1
-; CHECK-BE-NEXT:    xxswapd vs3, vs0
-; CHECK-BE-NEXT:    xscvsxdsp f1, f1
-; CHECK-BE-NEXT:    xscvsxdsp f0, f0
-; CHECK-BE-NEXT:    xscvsxdsp f2, f2
-; CHECK-BE-NEXT:    xscvsxdsp f3, f3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs1
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs1
-; CHECK-BE-NEXT:    vmrgew v2, v2, v3
+; CHECK-BE-NEXT:    lxv v3, 16(r3)
+; CHECK-BE-NEXT:    xvcvsxdsp vs0, v3
+; CHECK-BE-NEXT:    lxv v2, 0(r3)
+; CHECK-BE-NEXT:    xxsldwi v3, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvsxdsp vs0, v2
+; CHECK-BE-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-BE-NEXT:    vpkudum v2, v2, v3
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <4 x i64>, <4 x i64>* %0, align 32
@@ -541,94 +401,62 @@
 ; CHECK-P8-NEXT:    li r5, 16
 ; CHECK-P8-NEXT:    lxvd2x vs1, r4, r6
 ; CHECK-P8-NEXT:    lxvd2x vs2, r4, r5
-; CHECK-P8-NEXT:    xxswapd vs7, vs3
-; CHECK-P8-NEXT:    xscvsxdsp f3, f3
-; CHECK-P8-NEXT:    xxswapd vs4, vs0
-; CHECK-P8-NEXT:    xscvsxdsp f0, f0
-; CHECK-P8-NEXT:    xxswapd vs5, vs1
-; CHECK-P8-NEXT:    xscvsxdsp f1, f1
-; CHECK-P8-NEXT:    xxswapd vs6, vs2
-; CHECK-P8-NEXT:    xscvsxdsp f2, f2
-; CHECK-P8-NEXT:    xscvsxdsp f4, f4
-; CHECK-P8-NEXT:    xscvsxdsp f5, f5
-; CHECK-P8-NEXT:    xscvsxdsp f6, f6
-; CHECK-P8-NEXT:    xscvsxdsp f7, f7
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs2, vs3
-; CHECK-P8-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P8-NEXT:    xxmrghd vs0, vs6, vs7
-; CHECK-P8-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P8-NEXT:    xvcvdpsp v5, vs0
-; CHECK-P8-NEXT:    vmrgew v2, v4, v2
-; CHECK-P8-NEXT:    vmrgew v3, v5, v3
+; CHECK-P8-NEXT:    xxswapd v5, vs3
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    xxswapd v3, vs1
+; CHECK-P8-NEXT:    xxswapd v4, vs2
+; CHECK-P8-NEXT:    xvcvsxdsp vs3, v5
+; CHECK-P8-NEXT:    xvcvsxdsp vs0, v2
+; CHECK-P8-NEXT:    xvcvsxdsp vs1, v3
+; CHECK-P8-NEXT:    xvcvsxdsp vs2, v4
+; CHECK-P8-NEXT:    xxsldwi v5, vs3, vs3, 3
+; CHECK-P8-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-P8-NEXT:    xxsldwi v3, vs1, vs1, 3
+; CHECK-P8-NEXT:    xxsldwi v4, vs2, vs2, 3
+; CHECK-P8-NEXT:    vpkudum v2, v3, v2
+; CHECK-P8-NEXT:    vpkudum v3, v4, v5
 ; CHECK-P8-NEXT:    stvx v2, r3, r5
 ; CHECK-P8-NEXT:    stvx v3, 0, r3
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test8elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs0, 48(r4)
-; CHECK-P9-NEXT:    lxv vs1, 32(r4)
-; CHECK-P9-NEXT:    lxv vs2, 16(r4)
-; CHECK-P9-NEXT:    lxv vs3, 0(r4)
-; CHECK-P9-NEXT:    xxswapd vs4, vs3
-; CHECK-P9-NEXT:    xxswapd vs5, vs2
-; CHECK-P9-NEXT:    xxswapd vs6, vs1
-; CHECK-P9-NEXT:    xxswapd vs7, vs0
-; CHECK-P9-NEXT:    xscvsxdsp f3, f3
-; CHECK-P9-NEXT:    xscvsxdsp f2, f2
-; CHECK-P9-NEXT:    xscvsxdsp f1, f1
-; CHECK-P9-NEXT:    xscvsxdsp f0, f0
-; CHECK-P9-NEXT:    xscvsxdsp f4, f4
-; CHECK-P9-NEXT:    xscvsxdsp f5, f5
-; CHECK-P9-NEXT:    xscvsxdsp f6, f6
-; CHECK-P9-NEXT:    xscvsxdsp f7, f7
-; CHECK-P9-NEXT:    xxmrghd vs2, vs2, vs3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs0, vs1
-; CHECK-P9-NEXT:    xxmrghd vs4, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v5, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs4
-; CHECK-P9-NEXT:    xvcvdpsp v4, vs3
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
-; CHECK-P9-NEXT:    vmrgew v3, v5, v4
-; CHECK-P9-NEXT:    stxv v3, 16(r3)
-; CHECK-P9-NEXT:    stxv v2, 0(r3)
+; CHECK-P9-NEXT:    lxv v5, 0(r4)
+; CHECK-P9-NEXT:    xvcvsxdsp vs0, v5
+; CHECK-P9-NEXT:    lxv v4, 16(r4)
+; CHECK-P9-NEXT:    xxsldwi v5, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvsxdsp vs0, v4
+; CHECK-P9-NEXT:    lxv v3, 32(r4)
+; CHECK-P9-NEXT:    xxsldwi v4, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvsxdsp vs0, v3
+; CHECK-P9-NEXT:    lxv v2, 48(r4)
+; CHECK-P9-NEXT:    vpkudum v3, v4, v5
+; CHECK-P9-NEXT:    stxv v3, 0(r3)
+; CHECK-P9-NEXT:    xxsldwi v4, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvsxdsp vs0, v2
+; CHECK-P9-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-P9-NEXT:    vpkudum v2, v2, v4
+; CHECK-P9-NEXT:    stxv v2, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs0, 32(r4)
-; CHECK-BE-NEXT:    lxv vs1, 48(r4)
-; CHECK-BE-NEXT:    lxv vs2, 0(r4)
-; CHECK-BE-NEXT:    lxv vs3, 16(r4)
-; CHECK-BE-NEXT:    xxswapd vs4, vs3
-; CHECK-BE-NEXT:    xxswapd vs5, vs2
-; CHECK-BE-NEXT:    xxswapd vs6, vs1
-; CHECK-BE-NEXT:    xxswapd vs7, vs0
-; CHECK-BE-NEXT:    xscvsxdsp f3, f3
-; CHECK-BE-NEXT:    xscvsxdsp f2, f2
-; CHECK-BE-NEXT:    xscvsxdsp f1, f1
-; CHECK-BE-NEXT:    xscvsxdsp f0, f0
-; CHECK-BE-NEXT:    xscvsxdsp f4, f4
-; CHECK-BE-NEXT:    xscvsxdsp f5, f5
-; CHECK-BE-NEXT:    xscvsxdsp f6, f6
-; CHECK-BE-NEXT:    xscvsxdsp f7, f7
-; CHECK-BE-NEXT:    xxmrghd vs2, vs2, vs3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs0, vs1
-; CHECK-BE-NEXT:    xxmrghd vs3, vs5, vs4
-; CHECK-BE-NEXT:    xxmrghd vs1, vs7, vs6
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v4, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs3
-; CHECK-BE-NEXT:    xvcvdpsp v5, vs1
-; CHECK-BE-NEXT:    vmrgew v2, v2, v3
-; CHECK-BE-NEXT:    vmrgew v3, v4, v5
-; CHECK-BE-NEXT:    stxv v3, 16(r3)
-; CHECK-BE-NEXT:    stxv v2, 0(r3)
+; CHECK-BE-NEXT:    lxv v5, 16(r4)
+; CHECK-BE-NEXT:    xvcvsxdsp vs0, v5
+; CHECK-BE-NEXT:    lxv v4, 0(r4)
+; CHECK-BE-NEXT:    xxsldwi v5, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvsxdsp vs0, v4
+; CHECK-BE-NEXT:    lxv v3, 48(r4)
+; CHECK-BE-NEXT:    xxsldwi v4, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvsxdsp vs0, v3
+; CHECK-BE-NEXT:    lxv v2, 32(r4)
+; CHECK-BE-NEXT:    vpkudum v3, v4, v5
+; CHECK-BE-NEXT:    stxv v3, 0(r3)
+; CHECK-BE-NEXT:    xxsldwi v4, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvsxdsp vs0, v2
+; CHECK-BE-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-BE-NEXT:    vpkudum v2, v2, v4
+; CHECK-BE-NEXT:    stxv v2, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <8 x i64>, <8 x i64>* %0, align 64
@@ -640,69 +468,49 @@
 define void @test16elt_signed(<16 x float>* noalias nocapture sret %agg.result, <16 x i64>* nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
-; CHECK-P8-NEXT:    li r7, 64
 ; CHECK-P8-NEXT:    li r5, 32
 ; CHECK-P8-NEXT:    li r6, 48
-; CHECK-P8-NEXT:    lxvd2x vs11, 0, r4
-; CHECK-P8-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-P8-NEXT:    lxvd2x vs8, r4, r7
-; CHECK-P8-NEXT:    li r7, 80
-; CHECK-P8-NEXT:    lxvd2x vs6, r4, r5
-; CHECK-P8-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P8-NEXT:    lxvd2x vs7, r4, r6
+; CHECK-P8-NEXT:    li r7, 64
+; CHECK-P8-NEXT:    lxvd2x vs4, 0, r4
+; CHECK-P8-NEXT:    lxvd2x vs0, r4, r5
+; CHECK-P8-NEXT:    lxvd2x vs1, r4, r6
 ; CHECK-P8-NEXT:    lxvd2x vs2, r4, r7
-; CHECK-P8-NEXT:    li r7, 96
+; CHECK-P8-NEXT:    li r7, 80
 ; CHECK-P8-NEXT:    lxvd2x vs3, r4, r7
+; CHECK-P8-NEXT:    li r7, 96
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    lxvd2x vs0, r4, r7
 ; CHECK-P8-NEXT:    li r7, 112
-; CHECK-P8-NEXT:    xscvsxdsp f30, f11
-; CHECK-P8-NEXT:    xxswapd vs11, vs11
-; CHECK-P8-NEXT:    lxvd2x vs4, r4, r7
+; CHECK-P8-NEXT:    xxswapd v3, vs1
+; CHECK-P8-NEXT:    lxvd2x vs1, r4, r7
 ; CHECK-P8-NEXT:    li r7, 16
-; CHECK-P8-NEXT:    xscvsxdsp f0, f6
-; CHECK-P8-NEXT:    xxswapd vs6, vs6
-; CHECK-P8-NEXT:    xscvsxdsp f1, f7
-; CHECK-P8-NEXT:    lxvd2x vs9, r4, r7
-; CHECK-P8-NEXT:    xxswapd vs7, vs7
-; CHECK-P8-NEXT:    xscvsxdsp f5, f8
-; CHECK-P8-NEXT:    xxswapd vs8, vs8
-; CHECK-P8-NEXT:    xscvsxdsp f10, f2
-; CHECK-P8-NEXT:    xxswapd vs2, vs2
-; CHECK-P8-NEXT:    xscvsxdsp f12, f3
-; CHECK-P8-NEXT:    xxswapd vs3, vs3
-; CHECK-P8-NEXT:    xscvsxdsp f13, f4
-; CHECK-P8-NEXT:    xxswapd vs4, vs4
-; CHECK-P8-NEXT:    xscvsxdsp f31, f9
-; CHECK-P8-NEXT:    xxswapd vs9, vs9
-; CHECK-P8-NEXT:    xscvsxdsp f6, f6
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xscvsxdsp f7, f7
-; CHECK-P8-NEXT:    xscvsxdsp f8, f8
-; CHECK-P8-NEXT:    xxmrghd vs5, vs10, vs5
-; CHECK-P8-NEXT:    xscvsxdsp f2, f2
-; CHECK-P8-NEXT:    xscvsxdsp f3, f3
-; CHECK-P8-NEXT:    xxmrghd vs10, vs13, vs12
-; CHECK-P8-NEXT:    xscvsxdsp f4, f4
-; CHECK-P8-NEXT:    xscvsxdsp f1, f9
-; CHECK-P8-NEXT:    xscvsxdsp f9, f11
-; CHECK-P8-NEXT:    xxmrghd vs11, vs31, vs30
-; CHECK-P8-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-P8-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xxmrghd vs0, vs7, vs6
-; CHECK-P8-NEXT:    xxmrghd vs2, vs2, vs8
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs5
-; CHECK-P8-NEXT:    xvcvdpsp v4, vs10
-; CHECK-P8-NEXT:    xxmrghd vs3, vs4, vs3
-; CHECK-P8-NEXT:    xvcvdpsp v5, vs11
-; CHECK-P8-NEXT:    xvcvdpsp v0, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs1, vs9
-; CHECK-P8-NEXT:    xvcvdpsp v1, vs2
-; CHECK-P8-NEXT:    xvcvdpsp v6, vs3
-; CHECK-P8-NEXT:    xvcvdpsp v7, vs1
-; CHECK-P8-NEXT:    vmrgew v2, v0, v2
-; CHECK-P8-NEXT:    vmrgew v3, v1, v3
-; CHECK-P8-NEXT:    vmrgew v4, v6, v4
-; CHECK-P8-NEXT:    vmrgew v5, v7, v5
+; CHECK-P8-NEXT:    xxswapd v4, vs2
+; CHECK-P8-NEXT:    lxvd2x vs2, r4, r7
+; CHECK-P8-NEXT:    xxswapd v5, vs3
+; CHECK-P8-NEXT:    xvcvsxdsp vs3, v2
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    xvcvsxdsp vs0, v3
+; CHECK-P8-NEXT:    xxswapd v3, vs1
+; CHECK-P8-NEXT:    xvcvsxdsp vs1, v4
+; CHECK-P8-NEXT:    xxswapd v4, vs2
+; CHECK-P8-NEXT:    xvcvsxdsp vs2, v5
+; CHECK-P8-NEXT:    xxswapd v5, vs4
+; CHECK-P8-NEXT:    xvcvsxdsp vs4, v2
+; CHECK-P8-NEXT:    xvcvsxdsp vs5, v3
+; CHECK-P8-NEXT:    xvcvsxdsp vs6, v4
+; CHECK-P8-NEXT:    xxsldwi v2, vs3, vs3, 3
+; CHECK-P8-NEXT:    xvcvsxdsp vs7, v5
+; CHECK-P8-NEXT:    xxsldwi v3, vs0, vs0, 3
+; CHECK-P8-NEXT:    xxsldwi v4, vs1, vs1, 3
+; CHECK-P8-NEXT:    xxsldwi v5, vs2, vs2, 3
+; CHECK-P8-NEXT:    xxsldwi v0, vs4, vs4, 3
+; CHECK-P8-NEXT:    vpkudum v2, v3, v2
+; CHECK-P8-NEXT:    xxsldwi v1, vs5, vs5, 3
+; CHECK-P8-NEXT:    xxsldwi v6, vs6, vs6, 3
+; CHECK-P8-NEXT:    vpkudum v3, v5, v4
+; CHECK-P8-NEXT:    xxsldwi v7, vs7, vs7, 3
+; CHECK-P8-NEXT:    vpkudum v4, v1, v0
+; CHECK-P8-NEXT:    vpkudum v5, v6, v7
 ; CHECK-P8-NEXT:    stvx v2, r3, r7
 ; CHECK-P8-NEXT:    stvx v3, r3, r5
 ; CHECK-P8-NEXT:    stvx v4, r3, r6
@@ -711,130 +519,74 @@
 ;
 ; CHECK-P9-LABEL: test16elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv vs4, 48(r4)
-; CHECK-P9-NEXT:    lxv vs5, 32(r4)
-; CHECK-P9-NEXT:    lxv vs6, 16(r4)
-; CHECK-P9-NEXT:    lxv vs7, 0(r4)
-; CHECK-P9-NEXT:    lxv vs8, 112(r4)
-; CHECK-P9-NEXT:    lxv vs9, 96(r4)
-; CHECK-P9-NEXT:    lxv vs10, 80(r4)
-; CHECK-P9-NEXT:    lxv vs11, 64(r4)
-; CHECK-P9-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    xxswapd vs0, vs7
-; CHECK-P9-NEXT:    xxswapd vs1, vs6
-; CHECK-P9-NEXT:    xxswapd vs2, vs5
-; CHECK-P9-NEXT:    xxswapd vs3, vs4
-; CHECK-P9-NEXT:    xxswapd vs12, vs11
-; CHECK-P9-NEXT:    xxswapd vs13, vs10
-; CHECK-P9-NEXT:    xxswapd vs31, vs9
-; CHECK-P9-NEXT:    xxswapd vs30, vs8
-; CHECK-P9-NEXT:    xscvsxdsp f7, f7
-; CHECK-P9-NEXT:    xscvsxdsp f6, f6
-; CHECK-P9-NEXT:    xscvsxdsp f5, f5
-; CHECK-P9-NEXT:    xscvsxdsp f4, f4
-; CHECK-P9-NEXT:    xscvsxdsp f11, f11
-; CHECK-P9-NEXT:    xscvsxdsp f10, f10
-; CHECK-P9-NEXT:    xscvsxdsp f9, f9
-; CHECK-P9-NEXT:    xscvsxdsp f8, f8
-; CHECK-P9-NEXT:    xscvsxdsp f0, f0
-; CHECK-P9-NEXT:    xscvsxdsp f1, f1
-; CHECK-P9-NEXT:    xscvsxdsp f2, f2
-; CHECK-P9-NEXT:    xscvsxdsp f3, f3
-; CHECK-P9-NEXT:    xscvsxdsp f12, f12
-; CHECK-P9-NEXT:    xscvsxdsp f13, f13
-; CHECK-P9-NEXT:    xscvsxdsp f31, f31
-; CHECK-P9-NEXT:    xscvsxdsp f30, f30
-; CHECK-P9-NEXT:    xxmrghd vs6, vs6, vs7
-; CHECK-P9-NEXT:    xxmrghd vs4, vs4, vs5
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs10, vs11
-; CHECK-P9-NEXT:    xxmrghd vs3, vs8, vs9
-; CHECK-P9-NEXT:    xxmrghd vs5, vs13, vs12
-; CHECK-P9-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-P9-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs6
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs4
-; CHECK-P9-NEXT:    xvcvdpsp v4, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v5, vs1
-; CHECK-P9-NEXT:    xvcvdpsp v0, vs5
-; CHECK-P9-NEXT:    xvcvdpsp v1, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v6, vs7
-; CHECK-P9-NEXT:    xvcvdpsp v7, vs3
-; CHECK-P9-NEXT:    vmrgew v2, v2, v4
-; CHECK-P9-NEXT:    vmrgew v3, v3, v5
-; CHECK-P9-NEXT:    vmrgew v4, v1, v0
-; CHECK-P9-NEXT:    vmrgew v5, v7, v6
+; CHECK-P9-NEXT:    lxv v7, 0(r4)
+; CHECK-P9-NEXT:    xvcvsxdsp vs0, v7
+; CHECK-P9-NEXT:    lxv v6, 16(r4)
+; CHECK-P9-NEXT:    xxsldwi v7, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvsxdsp vs0, v6
+; CHECK-P9-NEXT:    lxv v1, 32(r4)
+; CHECK-P9-NEXT:    xxsldwi v6, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvsxdsp vs0, v1
+; CHECK-P9-NEXT:    lxv v0, 48(r4)
+; CHECK-P9-NEXT:    vpkudum v1, v6, v7
+; CHECK-P9-NEXT:    xxsldwi v6, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvsxdsp vs0, v0
+; CHECK-P9-NEXT:    lxv v5, 64(r4)
+; CHECK-P9-NEXT:    xxsldwi v0, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvsxdsp vs0, v5
+; CHECK-P9-NEXT:    lxv v4, 80(r4)
+; CHECK-P9-NEXT:    vpkudum v0, v0, v6
+; CHECK-P9-NEXT:    xxsldwi v5, vs0, vs0, 3
+; CHECK-P9-NEXT:    lxv v3, 96(r4)
+; CHECK-P9-NEXT:    xvcvsxdsp vs0, v4
+; CHECK-P9-NEXT:    xxsldwi v4, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvsxdsp vs0, v3
+; CHECK-P9-NEXT:    lxv v2, 112(r4)
+; CHECK-P9-NEXT:    stxv v0, 16(r3)
+; CHECK-P9-NEXT:    stxv v1, 0(r3)
+; CHECK-P9-NEXT:    vpkudum v4, v4, v5
 ; CHECK-P9-NEXT:    stxv v4, 32(r3)
-; CHECK-P9-NEXT:    stxv v3, 16(r3)
-; CHECK-P9-NEXT:    stxv v2, 0(r3)
-; CHECK-P9-NEXT:    stxv v5, 48(r3)
+; CHECK-P9-NEXT:    xxsldwi v3, vs0, vs0, 3
+; CHECK-P9-NEXT:    xvcvsxdsp vs0, v2
+; CHECK-P9-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-P9-NEXT:    vpkudum v2, v2, v3
+; CHECK-P9-NEXT:    stxv v2, 48(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv vs2, 32(r4)
-; CHECK-BE-NEXT:    lxv vs3, 48(r4)
-; CHECK-BE-NEXT:    lxv vs4, 0(r4)
-; CHECK-BE-NEXT:    lxv vs5, 16(r4)
-; CHECK-BE-NEXT:    lxv vs6, 96(r4)
-; CHECK-BE-NEXT:    lxv vs7, 112(r4)
-; CHECK-BE-NEXT:    lxv vs8, 64(r4)
-; CHECK-BE-NEXT:    lxv vs9, 80(r4)
-; CHECK-BE-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    xxswapd vs0, vs5
-; CHECK-BE-NEXT:    xxswapd vs1, vs4
-; CHECK-BE-NEXT:    xxswapd vs10, vs3
-; CHECK-BE-NEXT:    xxswapd vs11, vs2
-; CHECK-BE-NEXT:    xxswapd vs12, vs9
-; CHECK-BE-NEXT:    xxswapd vs13, vs8
-; CHECK-BE-NEXT:    xxswapd vs31, vs7
-; CHECK-BE-NEXT:    xxswapd vs30, vs6
-; CHECK-BE-NEXT:    xscvsxdsp f5, f5
-; CHECK-BE-NEXT:    xscvsxdsp f4, f4
-; CHECK-BE-NEXT:    xscvsxdsp f3, f3
-; CHECK-BE-NEXT:    xscvsxdsp f2, f2
-; CHECK-BE-NEXT:    xscvsxdsp f9, f9
-; CHECK-BE-NEXT:    xscvsxdsp f8, f8
-; CHECK-BE-NEXT:    xscvsxdsp f7, f7
-; CHECK-BE-NEXT:    xscvsxdsp f6, f6
-; CHECK-BE-NEXT:    xscvsxdsp f0, f0
-; CHECK-BE-NEXT:    xscvsxdsp f1, f1
-; CHECK-BE-NEXT:    xscvsxdsp f10, f10
-; CHECK-BE-NEXT:    xscvsxdsp f11, f11
-; CHECK-BE-NEXT:    xscvsxdsp f12, f12
-; CHECK-BE-NEXT:    xscvsxdsp f13, f13
-; CHECK-BE-NEXT:    xscvsxdsp f31, f31
-; CHECK-BE-NEXT:    xscvsxdsp f30, f30
-; CHECK-BE-NEXT:    xxmrghd vs4, vs4, vs5
-; CHECK-BE-NEXT:    xxmrghd vs2, vs2, vs3
-; CHECK-BE-NEXT:    xxmrghd vs3, vs8, vs9
-; CHECK-BE-NEXT:    xxmrghd vs5, vs6, vs7
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs11, vs10
-; CHECK-BE-NEXT:    xxmrghd vs6, vs13, vs12
-; CHECK-BE-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-BE-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs4
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v0, vs3
-; CHECK-BE-NEXT:    xvcvdpsp v6, vs5
-; CHECK-BE-NEXT:    xvcvdpsp v4, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v5, vs1
-; CHECK-BE-NEXT:    xvcvdpsp v1, vs6
-; CHECK-BE-NEXT:    xvcvdpsp v7, vs7
-; CHECK-BE-NEXT:    vmrgew v2, v2, v4
-; CHECK-BE-NEXT:    vmrgew v3, v3, v5
-; CHECK-BE-NEXT:    vmrgew v4, v0, v1
-; CHECK-BE-NEXT:    vmrgew v5, v6, v7
-; CHECK-BE-NEXT:    stxv v5, 48(r3)
+; CHECK-BE-NEXT:    lxv v7, 16(r4)
+; CHECK-BE-NEXT:    xvcvsxdsp vs0, v7
+; CHECK-BE-NEXT:    lxv v6, 0(r4)
+; CHECK-BE-NEXT:    xxsldwi v7, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvsxdsp vs0, v6
+; CHECK-BE-NEXT:    lxv v1, 48(r4)
+; CHECK-BE-NEXT:    xxsldwi v6, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvsxdsp vs0, v1
+; CHECK-BE-NEXT:    lxv v0, 32(r4)
+; CHECK-BE-NEXT:    vpkudum v1, v6, v7
+; CHECK-BE-NEXT:    xxsldwi v6, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvsxdsp vs0, v0
+; CHECK-BE-NEXT:    lxv v5, 80(r4)
+; CHECK-BE-NEXT:    xxsldwi v0, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvsxdsp vs0, v5
+; CHECK-BE-NEXT:    lxv v4, 64(r4)
+; CHECK-BE-NEXT:    vpkudum v0, v0, v6
+; CHECK-BE-NEXT:    xxsldwi v5, vs0, vs0, 3
+; CHECK-BE-NEXT:    lxv v3, 112(r4)
+; CHECK-BE-NEXT:    xvcvsxdsp vs0, v4
+; CHECK-BE-NEXT:    xxsldwi v4, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvsxdsp vs0, v3
+; CHECK-BE-NEXT:    lxv v2, 96(r4)
+; CHECK-BE-NEXT:    stxv v0, 16(r3)
+; CHECK-BE-NEXT:    stxv v1, 0(r3)
+; CHECK-BE-NEXT:    vpkudum v4, v4, v5
 ; CHECK-BE-NEXT:    stxv v4, 32(r3)
-; CHECK-BE-NEXT:    stxv v3, 16(r3)
-; CHECK-BE-NEXT:    stxv v2, 0(r3)
+; CHECK-BE-NEXT:    xxsldwi v3, vs0, vs0, 3
+; CHECK-BE-NEXT:    xvcvsxdsp vs0, v2
+; CHECK-BE-NEXT:    xxsldwi v2, vs0, vs0, 3
+; CHECK-BE-NEXT:    vpkudum v2, v2, v3
+; CHECK-BE-NEXT:    stxv v2, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x i64>, <16 x i64>* %0, align 128
diff --git a/test/CodeGen/PowerPC/vec_conv_i8_to_fp32_elts.ll b/test/CodeGen/PowerPC/vec_conv_i8_to_fp32_elts.ll
index 9d44a88..432598f 100644
--- a/test/CodeGen/PowerPC/vec_conv_i8_to_fp32_elts.ll
+++ b/test/CodeGen/PowerPC/vec_conv_i8_to_fp32_elts.ll
@@ -35,20 +35,20 @@
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrws v2, r3
 ; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    li r4, 1
 ; CHECK-P9-NEXT:    vextubrx r3, r3, v2
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
 ; CHECK-P9-NEXT:    rlwinm r3, r3, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r4, r4, 0, 24, 31
 ; CHECK-P9-NEXT:    mtvsrwz f0, r3
-; CHECK-P9-NEXT:    mtvsrwz f1, r4
+; CHECK-P9-NEXT:    li r3, 1
 ; CHECK-P9-NEXT:    xscvuxdsp f0, f0
-; CHECK-P9-NEXT:    xscvuxdsp f1, f1
 ; CHECK-P9-NEXT:    xscvdpspn vs0, f0
-; CHECK-P9-NEXT:    xscvdpspn vs1, f1
+; CHECK-P9-NEXT:    vextubrx r3, r3, v2
+; CHECK-P9-NEXT:    rlwinm r3, r3, 0, 24, 31
+; CHECK-P9-NEXT:    xxsldwi v3, vs0, vs0, 1
+; CHECK-P9-NEXT:    mtvsrwz f0, r3
+; CHECK-P9-NEXT:    xscvuxdsp f0, f0
+; CHECK-P9-NEXT:    xscvdpspn vs0, f0
 ; CHECK-P9-NEXT:    xxsldwi v2, vs0, vs0, 1
-; CHECK-P9-NEXT:    xxsldwi v3, vs1, vs1, 1
-; CHECK-P9-NEXT:    vmrglw v2, v3, v2
+; CHECK-P9-NEXT:    vmrglw v2, v2, v3
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
@@ -56,18 +56,18 @@
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrws v2, r3
 ; CHECK-BE-NEXT:    li r3, 1
-; CHECK-BE-NEXT:    li r4, 0
 ; CHECK-BE-NEXT:    vextublx r3, r3, v2
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
 ; CHECK-BE-NEXT:    rlwinm r3, r3, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r4, r4, 0, 24, 31
 ; CHECK-BE-NEXT:    mtvsrwz f0, r3
-; CHECK-BE-NEXT:    mtvsrwz f1, r4
+; CHECK-BE-NEXT:    li r3, 0
 ; CHECK-BE-NEXT:    xscvuxdsp f0, f0
-; CHECK-BE-NEXT:    xscvuxdsp f1, f1
+; CHECK-BE-NEXT:    vextublx r3, r3, v2
+; CHECK-BE-NEXT:    rlwinm r3, r3, 0, 24, 31
+; CHECK-BE-NEXT:    xscvdpspn v3, f0
+; CHECK-BE-NEXT:    mtvsrwz f0, r3
+; CHECK-BE-NEXT:    xscvuxdsp f0, f0
 ; CHECK-BE-NEXT:    xscvdpspn v2, f0
-; CHECK-BE-NEXT:    xscvdpspn v3, f1
-; CHECK-BE-NEXT:    vmrghw v2, v3, v2
+; CHECK-BE-NEXT:    vmrghw v2, v2, v3
 ; CHECK-BE-NEXT:    mfvsrd r3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -80,89 +80,36 @@
 define <4 x float> @test4elt(i32 %a.coerce) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt:
 ; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI1_0@toc@ha
 ; CHECK-P8-NEXT:    mtvsrd f0, r3
-; CHECK-P8-NEXT:    mfvsrd r3, f0
-; CHECK-P8-NEXT:    clrldi r4, r3, 56
-; CHECK-P8-NEXT:    rldicl r5, r3, 48, 56
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f0, r4
-; CHECK-P8-NEXT:    rldicl r4, r3, 56, 56
-; CHECK-P8-NEXT:    rldicl r3, r3, 40, 56
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r3, r3, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f1, r5
-; CHECK-P8-NEXT:    mtvsrwz f2, r4
-; CHECK-P8-NEXT:    mtvsrwz f3, r3
-; CHECK-P8-NEXT:    xscvuxdsp f0, f0
-; CHECK-P8-NEXT:    xscvuxdsp f1, f1
-; CHECK-P8-NEXT:    xscvuxdsp f2, f2
-; CHECK-P8-NEXT:    xscvuxdsp f3, f3
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P8-NEXT:    vmrgew v2, v3, v2
+; CHECK-P8-NEXT:    addi r3, r4, .LCPI1_0@toc@l
+; CHECK-P8-NEXT:    xxlxor v4, v4, v4
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    lvx v3, 0, r3
+; CHECK-P8-NEXT:    vperm v2, v4, v2, v3
+; CHECK-P8-NEXT:    xvcvuxwsp v2, v2
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test4elt:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrws v2, r3
-; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    li r4, 2
-; CHECK-P9-NEXT:    li r5, 1
-; CHECK-P9-NEXT:    li r6, 3
-; CHECK-P9-NEXT:    vextubrx r3, r3, v2
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
-; CHECK-P9-NEXT:    vextubrx r5, r5, v2
-; CHECK-P9-NEXT:    vextubrx r6, r6, v2
-; CHECK-P9-NEXT:    rlwinm r3, r3, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P9-NEXT:    mtvsrwz f0, r3
-; CHECK-P9-NEXT:    mtvsrwz f1, r4
-; CHECK-P9-NEXT:    mtvsrwz f2, r5
-; CHECK-P9-NEXT:    mtvsrwz f3, r6
-; CHECK-P9-NEXT:    xscvuxdsp f0, f0
-; CHECK-P9-NEXT:    xscvuxdsp f1, f1
-; CHECK-P9-NEXT:    xscvuxdsp f2, f2
-; CHECK-P9-NEXT:    xscvuxdsp f3, f3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
+; CHECK-P9-NEXT:    addis r3, r2, .LCPI1_0@toc@ha
+; CHECK-P9-NEXT:    addi r3, r3, .LCPI1_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r3
+; CHECK-P9-NEXT:    xxlxor v4, v4, v4
+; CHECK-P9-NEXT:    vperm v2, v4, v2, v3
+; CHECK-P9-NEXT:    xvcvuxwsp v2, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrws v2, r3
-; CHECK-BE-NEXT:    li r3, 3
-; CHECK-BE-NEXT:    li r4, 1
-; CHECK-BE-NEXT:    li r5, 2
-; CHECK-BE-NEXT:    li r6, 0
-; CHECK-BE-NEXT:    vextublx r3, r3, v2
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
-; CHECK-BE-NEXT:    vextublx r5, r5, v2
-; CHECK-BE-NEXT:    vextublx r6, r6, v2
-; CHECK-BE-NEXT:    rlwinm r3, r3, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-BE-NEXT:    mtvsrwz f0, r3
-; CHECK-BE-NEXT:    mtvsrwz f1, r4
-; CHECK-BE-NEXT:    mtvsrwz f2, r5
-; CHECK-BE-NEXT:    mtvsrwz f3, r6
-; CHECK-BE-NEXT:    xscvuxdsp f0, f0
-; CHECK-BE-NEXT:    xscvuxdsp f1, f1
-; CHECK-BE-NEXT:    xscvuxdsp f2, f2
-; CHECK-BE-NEXT:    xscvuxdsp f3, f3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs1
-; CHECK-BE-NEXT:    vmrgew v2, v3, v2
+; CHECK-BE-NEXT:    addis r3, r2, .LCPI1_0@toc@ha
+; CHECK-BE-NEXT:    addi r3, r3, .LCPI1_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r3
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    vperm v2, v2, v4, v3
+; CHECK-BE-NEXT:    xvcvuxwsp v2, v2
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = bitcast i32 %a.coerce to <4 x i8>
@@ -173,168 +120,59 @@
 define void @test8elt(<8 x float>* noalias nocapture sret %agg.result, i64 %a.coerce) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI2_0@toc@ha
 ; CHECK-P8-NEXT:    mtvsrd f0, r4
-; CHECK-P8-NEXT:    li r5, 16
-; CHECK-P8-NEXT:    mfvsrd r4, f0
-; CHECK-P8-NEXT:    clrldi r6, r4, 56
-; CHECK-P8-NEXT:    rldicl r7, r4, 48, 56
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f0, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 56, 56
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f1, r7
-; CHECK-P8-NEXT:    rldicl r7, r4, 40, 56
-; CHECK-P8-NEXT:    mtvsrwz f2, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 32, 56
-; CHECK-P8-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f3, r7
-; CHECK-P8-NEXT:    rldicl r7, r4, 16, 56
-; CHECK-P8-NEXT:    mtvsrwz f4, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 24, 56
-; CHECK-P8-NEXT:    rldicl r4, r4, 8, 56
-; CHECK-P8-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f5, r7
-; CHECK-P8-NEXT:    mtvsrwz f6, r6
-; CHECK-P8-NEXT:    mtvsrwz f7, r4
-; CHECK-P8-NEXT:    xscvuxdsp f0, f0
-; CHECK-P8-NEXT:    xscvuxdsp f1, f1
-; CHECK-P8-NEXT:    xscvuxdsp f2, f2
-; CHECK-P8-NEXT:    xscvuxdsp f3, f3
-; CHECK-P8-NEXT:    xscvuxdsp f4, f4
-; CHECK-P8-NEXT:    xscvuxdsp f5, f5
-; CHECK-P8-NEXT:    xscvuxdsp f6, f6
-; CHECK-P8-NEXT:    xscvuxdsp f7, f7
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P8-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P8-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P8-NEXT:    xvcvdpsp v5, vs3
-; CHECK-P8-NEXT:    vmrgew v2, v3, v2
-; CHECK-P8-NEXT:    vmrgew v3, v5, v4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI2_1@toc@ha
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI2_0@toc@l
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI2_1@toc@l
+; CHECK-P8-NEXT:    xxlxor v4, v4, v4
+; CHECK-P8-NEXT:    lvx v2, 0, r5
+; CHECK-P8-NEXT:    xxswapd v3, vs0
+; CHECK-P8-NEXT:    lvx v5, 0, r4
+; CHECK-P8-NEXT:    li r4, 16
+; CHECK-P8-NEXT:    vperm v2, v4, v3, v2
+; CHECK-P8-NEXT:    vperm v3, v4, v3, v5
+; CHECK-P8-NEXT:    xvcvuxwsp v2, v2
+; CHECK-P8-NEXT:    xvcvuxwsp v3, v3
 ; CHECK-P8-NEXT:    stvx v2, 0, r3
-; CHECK-P8-NEXT:    stvx v3, r3, r5
+; CHECK-P8-NEXT:    stvx v3, r3, r4
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test8elt:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrd f0, r4
-; CHECK-P9-NEXT:    li r4, 0
-; CHECK-P9-NEXT:    li r5, 2
-; CHECK-P9-NEXT:    li r6, 1
-; CHECK-P9-NEXT:    li r7, 3
-; CHECK-P9-NEXT:    li r8, 4
-; CHECK-P9-NEXT:    li r9, 6
-; CHECK-P9-NEXT:    li r10, 5
-; CHECK-P9-NEXT:    li r11, 7
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI2_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI2_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
-; CHECK-P9-NEXT:    vextubrx r5, r5, v2
-; CHECK-P9-NEXT:    vextubrx r6, r6, v2
-; CHECK-P9-NEXT:    vextubrx r7, r7, v2
-; CHECK-P9-NEXT:    vextubrx r8, r8, v2
-; CHECK-P9-NEXT:    vextubrx r9, r9, v2
-; CHECK-P9-NEXT:    vextubrx r10, r10, v2
-; CHECK-P9-NEXT:    vextubrx r11, r11, v2
-; CHECK-P9-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r8, r8, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r9, r9, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r10, r10, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r11, r11, 0, 24, 31
-; CHECK-P9-NEXT:    mtvsrwz f0, r4
-; CHECK-P9-NEXT:    mtvsrwz f1, r5
-; CHECK-P9-NEXT:    mtvsrwz f2, r6
-; CHECK-P9-NEXT:    mtvsrwz f3, r7
-; CHECK-P9-NEXT:    mtvsrwz f4, r8
-; CHECK-P9-NEXT:    mtvsrwz f5, r9
-; CHECK-P9-NEXT:    mtvsrwz f6, r10
-; CHECK-P9-NEXT:    mtvsrwz f7, r11
-; CHECK-P9-NEXT:    xscvuxdsp f0, f0
-; CHECK-P9-NEXT:    xscvuxdsp f1, f1
-; CHECK-P9-NEXT:    xscvuxdsp f2, f2
-; CHECK-P9-NEXT:    xscvuxdsp f3, f3
-; CHECK-P9-NEXT:    xscvuxdsp f4, f4
-; CHECK-P9-NEXT:    xscvuxdsp f5, f5
-; CHECK-P9-NEXT:    xscvuxdsp f6, f6
-; CHECK-P9-NEXT:    xscvuxdsp f7, f7
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P9-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v5, vs3
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
-; CHECK-P9-NEXT:    vmrgew v3, v5, v4
-; CHECK-P9-NEXT:    stxv v3, 16(r3)
-; CHECK-P9-NEXT:    stxv v2, 0(r3)
+; CHECK-P9-NEXT:    xxlxor v4, v4, v4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI2_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI2_1@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    xvcvuxwsp vs0, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v4, v2, v3
+; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    xvcvuxwsp vs1, v2
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    li r5, 3
 ; CHECK-BE-NEXT:    mtvsrd v2, r4
-; CHECK-BE-NEXT:    li r4, 1
-; CHECK-BE-NEXT:    li r6, 2
-; CHECK-BE-NEXT:    li r7, 0
-; CHECK-BE-NEXT:    li r8, 7
-; CHECK-BE-NEXT:    li r9, 5
-; CHECK-BE-NEXT:    li r10, 6
-; CHECK-BE-NEXT:    li r11, 4
-; CHECK-BE-NEXT:    vextublx r5, r5, v2
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
-; CHECK-BE-NEXT:    vextublx r6, r6, v2
-; CHECK-BE-NEXT:    vextublx r7, r7, v2
-; CHECK-BE-NEXT:    vextublx r8, r8, v2
-; CHECK-BE-NEXT:    vextublx r9, r9, v2
-; CHECK-BE-NEXT:    vextublx r10, r10, v2
-; CHECK-BE-NEXT:    vextublx r11, r11, v2
-; CHECK-BE-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r8, r8, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r9, r9, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r10, r10, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r11, r11, 0, 24, 31
-; CHECK-BE-NEXT:    mtvsrwz f0, r5
-; CHECK-BE-NEXT:    mtvsrwz f1, r4
-; CHECK-BE-NEXT:    mtvsrwz f2, r6
-; CHECK-BE-NEXT:    mtvsrwz f3, r7
-; CHECK-BE-NEXT:    mtvsrwz f4, r8
-; CHECK-BE-NEXT:    mtvsrwz f5, r9
-; CHECK-BE-NEXT:    mtvsrwz f6, r10
-; CHECK-BE-NEXT:    mtvsrwz f7, r11
-; CHECK-BE-NEXT:    xscvuxdsp f0, f0
-; CHECK-BE-NEXT:    xscvuxdsp f1, f1
-; CHECK-BE-NEXT:    xscvuxdsp f2, f2
-; CHECK-BE-NEXT:    xscvuxdsp f3, f3
-; CHECK-BE-NEXT:    xscvuxdsp f4, f4
-; CHECK-BE-NEXT:    xscvuxdsp f5, f5
-; CHECK-BE-NEXT:    xscvuxdsp f6, f6
-; CHECK-BE-NEXT:    xscvuxdsp f7, f7
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-BE-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs1
-; CHECK-BE-NEXT:    xvcvdpsp v4, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v5, vs3
-; CHECK-BE-NEXT:    vmrgew v2, v3, v2
-; CHECK-BE-NEXT:    vmrgew v3, v5, v4
-; CHECK-BE-NEXT:    stxv v2, 0(r3)
-; CHECK-BE-NEXT:    stxv v3, 16(r3)
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI2_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI2_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI2_1@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI2_1@toc@l
+; CHECK-BE-NEXT:    vperm v3, v2, v4, v3
+; CHECK-BE-NEXT:    xvcvuxwsp vs0, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    xvcvuxwsp vs1, v2
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = bitcast i64 %a.coerce to <8 x i8>
@@ -346,348 +184,92 @@
 define void @test16elt(<16 x float>* noalias nocapture sret %agg.result, <16 x i8> %a) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
-; CHECK-P8-NEXT:    mfvsrd r4, v2
-; CHECK-P8-NEXT:    xxswapd vs2, v2
-; CHECK-P8-NEXT:    clrldi r5, r4, 56
-; CHECK-P8-NEXT:    rldicl r6, r4, 48, 56
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f0, r5
-; CHECK-P8-NEXT:    rldicl r5, r4, 40, 56
-; CHECK-P8-NEXT:    rldicl r7, r4, 56, 56
-; CHECK-P8-NEXT:    mtvsrwz f1, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 32, 56
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f4, r5
-; CHECK-P8-NEXT:    rldicl r5, r4, 16, 56
-; CHECK-P8-NEXT:    mtvsrwz f3, r7
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f5, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 24, 56
-; CHECK-P8-NEXT:    rldicl r4, r4, 8, 56
-; CHECK-P8-NEXT:    mfvsrd r7, f2
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f2, r5
-; CHECK-P8-NEXT:    rlwinm r5, r6, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f6, r5
-; CHECK-P8-NEXT:    clrldi r5, r7, 56
-; CHECK-P8-NEXT:    mtvsrwz f7, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 48, 56
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f8, r5
-; CHECK-P8-NEXT:    rldicl r5, r7, 56, 56
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f9, r4
-; CHECK-P8-NEXT:    rlwinm r4, r5, 0, 24, 31
-; CHECK-P8-NEXT:    rldicl r5, r7, 8, 56
-; CHECK-P8-NEXT:    mtvsrwz f10, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 40, 56
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P8-NEXT:    xscvuxdsp f0, f0
-; CHECK-P8-NEXT:    mtvsrwz f11, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 32, 56
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P8-NEXT:    xscvuxdsp f1, f1
-; CHECK-P8-NEXT:    xscvuxdsp f3, f3
-; CHECK-P8-NEXT:    xscvuxdsp f4, f4
-; CHECK-P8-NEXT:    mtvsrwz f12, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 16, 56
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P8-NEXT:    xscvuxdsp f5, f5
-; CHECK-P8-NEXT:    mtvsrwz f13, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 24, 56
-; CHECK-P8-NEXT:    xscvuxdsp f2, f2
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    mtvsrwz v2, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI3_0@toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI3_2@toc@ha
+; CHECK-P8-NEXT:    xxlxor v4, v4, v4
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI3_0@toc@l
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI3_2@toc@l
+; CHECK-P8-NEXT:    lvx v3, 0, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI3_3@toc@ha
+; CHECK-P8-NEXT:    lvx v5, 0, r5
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI3_1@toc@ha
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI3_3@toc@l
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI3_1@toc@l
+; CHECK-P8-NEXT:    lvx v0, 0, r4
+; CHECK-P8-NEXT:    lvx v1, 0, r5
 ; CHECK-P8-NEXT:    li r4, 48
-; CHECK-P8-NEXT:    mtvsrwz v3, r5
-; CHECK-P8-NEXT:    xxmrghd vs3, vs4, vs3
 ; CHECK-P8-NEXT:    li r5, 32
-; CHECK-P8-NEXT:    xscvuxdsp f6, f6
-; CHECK-P8-NEXT:    xscvuxdsp f7, f7
-; CHECK-P8-NEXT:    xscvuxdsp f8, f8
-; CHECK-P8-NEXT:    xscvuxdsp f9, f9
-; CHECK-P8-NEXT:    xxmrghd vs2, vs2, vs5
-; CHECK-P8-NEXT:    xscvuxdsp f10, f10
-; CHECK-P8-NEXT:    xscvuxdsp f11, f11
-; CHECK-P8-NEXT:    xscvuxdsp f12, f12
-; CHECK-P8-NEXT:    xscvuxdsp f13, f13
-; CHECK-P8-NEXT:    xxmrghd vs5, vs7, vs6
-; CHECK-P8-NEXT:    xscvuxdsp f1, v2
-; CHECK-P8-NEXT:    xscvuxdsp f4, v3
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xxmrghd vs0, vs9, vs8
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs3
-; CHECK-P8-NEXT:    xxmrghd vs3, vs11, vs10
-; CHECK-P8-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P8-NEXT:    xxmrghd vs2, vs13, vs12
-; CHECK-P8-NEXT:    xvcvdpsp v5, vs5
-; CHECK-P8-NEXT:    xvcvdpsp v0, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs4, vs1
-; CHECK-P8-NEXT:    xvcvdpsp v1, vs3
-; CHECK-P8-NEXT:    xvcvdpsp v6, vs2
-; CHECK-P8-NEXT:    vmrgew v2, v3, v2
-; CHECK-P8-NEXT:    xvcvdpsp v7, vs1
-; CHECK-P8-NEXT:    vmrgew v3, v5, v4
-; CHECK-P8-NEXT:    vmrgew v4, v1, v0
-; CHECK-P8-NEXT:    stvx v2, r3, r5
-; CHECK-P8-NEXT:    vmrgew v5, v7, v6
-; CHECK-P8-NEXT:    stvx v3, r3, r4
-; CHECK-P8-NEXT:    li r4, 16
-; CHECK-P8-NEXT:    stvx v4, 0, r3
+; CHECK-P8-NEXT:    vperm v5, v4, v2, v5
+; CHECK-P8-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P8-NEXT:    vperm v0, v4, v2, v0
+; CHECK-P8-NEXT:    vperm v2, v4, v2, v1
+; CHECK-P8-NEXT:    xvcvuxwsp v4, v5
+; CHECK-P8-NEXT:    xvcvuxwsp v3, v3
+; CHECK-P8-NEXT:    xvcvuxwsp v5, v0
+; CHECK-P8-NEXT:    xvcvuxwsp v2, v2
+; CHECK-P8-NEXT:    stvx v4, r3, r5
+; CHECK-P8-NEXT:    stvx v3, 0, r3
 ; CHECK-P8-NEXT:    stvx v5, r3, r4
+; CHECK-P8-NEXT:    li r4, 16
+; CHECK-P8-NEXT:    stvx v2, r3, r4
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test16elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    std r25, -72(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r26, -64(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r27, -56(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r28, -48(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r29, -40(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    li r4, 0
-; CHECK-P9-NEXT:    std r30, -32(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    li r5, 2
-; CHECK-P9-NEXT:    li r6, 1
-; CHECK-P9-NEXT:    li r7, 3
-; CHECK-P9-NEXT:    li r8, 4
-; CHECK-P9-NEXT:    li r9, 6
-; CHECK-P9-NEXT:    li r10, 5
-; CHECK-P9-NEXT:    li r11, 7
-; CHECK-P9-NEXT:    li r12, 8
-; CHECK-P9-NEXT:    li r0, 10
-; CHECK-P9-NEXT:    li r30, 9
-; CHECK-P9-NEXT:    li r29, 11
-; CHECK-P9-NEXT:    li r28, 12
-; CHECK-P9-NEXT:    li r27, 14
-; CHECK-P9-NEXT:    li r26, 13
-; CHECK-P9-NEXT:    li r25, 15
-; CHECK-P9-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
-; CHECK-P9-NEXT:    vextubrx r5, r5, v2
-; CHECK-P9-NEXT:    vextubrx r6, r6, v2
-; CHECK-P9-NEXT:    vextubrx r7, r7, v2
-; CHECK-P9-NEXT:    vextubrx r8, r8, v2
-; CHECK-P9-NEXT:    vextubrx r9, r9, v2
-; CHECK-P9-NEXT:    vextubrx r10, r10, v2
-; CHECK-P9-NEXT:    vextubrx r11, r11, v2
-; CHECK-P9-NEXT:    vextubrx r12, r12, v2
-; CHECK-P9-NEXT:    vextubrx r0, r0, v2
-; CHECK-P9-NEXT:    vextubrx r30, r30, v2
-; CHECK-P9-NEXT:    vextubrx r29, r29, v2
-; CHECK-P9-NEXT:    vextubrx r28, r28, v2
-; CHECK-P9-NEXT:    vextubrx r27, r27, v2
-; CHECK-P9-NEXT:    vextubrx r26, r26, v2
-; CHECK-P9-NEXT:    vextubrx r25, r25, v2
-; CHECK-P9-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r8, r8, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r9, r9, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r10, r10, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r11, r11, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r12, r12, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r0, r0, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r30, r30, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r29, r29, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r28, r28, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r27, r27, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r26, r26, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r25, r25, 0, 24, 31
-; CHECK-P9-NEXT:    mtvsrwz f0, r4
-; CHECK-P9-NEXT:    mtvsrwz f1, r5
-; CHECK-P9-NEXT:    mtvsrwz f2, r6
-; CHECK-P9-NEXT:    mtvsrwz f3, r7
-; CHECK-P9-NEXT:    mtvsrwz f4, r8
-; CHECK-P9-NEXT:    mtvsrwz f5, r9
-; CHECK-P9-NEXT:    mtvsrwz f6, r10
-; CHECK-P9-NEXT:    mtvsrwz f7, r11
-; CHECK-P9-NEXT:    mtvsrwz f8, r12
-; CHECK-P9-NEXT:    mtvsrwz f9, r0
-; CHECK-P9-NEXT:    mtvsrwz f10, r30
-; CHECK-P9-NEXT:    ld r30, -32(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz f11, r29
-; CHECK-P9-NEXT:    ld r29, -40(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz f12, r28
-; CHECK-P9-NEXT:    ld r28, -48(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz f13, r27
-; CHECK-P9-NEXT:    ld r27, -56(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz v2, r26
-; CHECK-P9-NEXT:    ld r26, -64(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz v3, r25
-; CHECK-P9-NEXT:    ld r25, -72(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xscvuxdsp f0, f0
-; CHECK-P9-NEXT:    xscvuxdsp f1, f1
-; CHECK-P9-NEXT:    xscvuxdsp f2, f2
-; CHECK-P9-NEXT:    xscvuxdsp f3, f3
-; CHECK-P9-NEXT:    xscvuxdsp f4, f4
-; CHECK-P9-NEXT:    xscvuxdsp f5, f5
-; CHECK-P9-NEXT:    xscvuxdsp f6, f6
-; CHECK-P9-NEXT:    xscvuxdsp f7, f7
-; CHECK-P9-NEXT:    xscvuxdsp f8, f8
-; CHECK-P9-NEXT:    xscvuxdsp f9, f9
-; CHECK-P9-NEXT:    xscvuxdsp f10, f10
-; CHECK-P9-NEXT:    xscvuxdsp f11, f11
-; CHECK-P9-NEXT:    xscvuxdsp f12, f12
-; CHECK-P9-NEXT:    xscvuxdsp f13, f13
-; CHECK-P9-NEXT:    xscvuxdsp f31, v2
-; CHECK-P9-NEXT:    xscvuxdsp f30, v3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P9-NEXT:    xxmrghd vs4, vs9, vs8
-; CHECK-P9-NEXT:    xxmrghd vs5, vs11, vs10
-; CHECK-P9-NEXT:    xxmrghd vs6, vs13, vs12
-; CHECK-P9-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-P9-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P9-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v5, vs3
-; CHECK-P9-NEXT:    xvcvdpsp v0, vs4
-; CHECK-P9-NEXT:    xvcvdpsp v1, vs5
-; CHECK-P9-NEXT:    xvcvdpsp v6, vs6
-; CHECK-P9-NEXT:    xvcvdpsp v7, vs7
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
-; CHECK-P9-NEXT:    vmrgew v3, v5, v4
-; CHECK-P9-NEXT:    vmrgew v4, v1, v0
-; CHECK-P9-NEXT:    vmrgew v5, v7, v6
-; CHECK-P9-NEXT:    stxv v3, 16(r3)
-; CHECK-P9-NEXT:    stxv v2, 0(r3)
-; CHECK-P9-NEXT:    stxv v5, 48(r3)
-; CHECK-P9-NEXT:    stxv v4, 32(r3)
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    xxlxor v4, v4, v4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_1@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    xvcvuxwsp vs0, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_2@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_2@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    xvcvuxwsp vs1, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_3@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_3@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    xvcvuxwsp vs2, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v4, v2, v3
+; CHECK-P9-NEXT:    stxv vs2, 32(r3)
+; CHECK-P9-NEXT:    xvcvuxwsp vs3, v2
+; CHECK-P9-NEXT:    stxv vs3, 48(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    std r25, -72(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r26, -64(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r27, -56(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r28, -48(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r29, -40(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    li r4, 3
-; CHECK-BE-NEXT:    std r30, -32(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    li r5, 1
-; CHECK-BE-NEXT:    li r6, 2
-; CHECK-BE-NEXT:    li r7, 0
-; CHECK-BE-NEXT:    li r8, 7
-; CHECK-BE-NEXT:    li r9, 5
-; CHECK-BE-NEXT:    li r10, 6
-; CHECK-BE-NEXT:    li r11, 4
-; CHECK-BE-NEXT:    li r12, 11
-; CHECK-BE-NEXT:    li r0, 9
-; CHECK-BE-NEXT:    li r30, 10
-; CHECK-BE-NEXT:    li r29, 8
-; CHECK-BE-NEXT:    li r28, 15
-; CHECK-BE-NEXT:    li r27, 13
-; CHECK-BE-NEXT:    li r26, 14
-; CHECK-BE-NEXT:    li r25, 12
-; CHECK-BE-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
-; CHECK-BE-NEXT:    vextublx r5, r5, v2
-; CHECK-BE-NEXT:    vextublx r6, r6, v2
-; CHECK-BE-NEXT:    vextublx r7, r7, v2
-; CHECK-BE-NEXT:    vextublx r8, r8, v2
-; CHECK-BE-NEXT:    vextublx r9, r9, v2
-; CHECK-BE-NEXT:    vextublx r10, r10, v2
-; CHECK-BE-NEXT:    vextublx r11, r11, v2
-; CHECK-BE-NEXT:    vextublx r12, r12, v2
-; CHECK-BE-NEXT:    vextublx r0, r0, v2
-; CHECK-BE-NEXT:    vextublx r30, r30, v2
-; CHECK-BE-NEXT:    vextublx r29, r29, v2
-; CHECK-BE-NEXT:    vextublx r28, r28, v2
-; CHECK-BE-NEXT:    vextublx r27, r27, v2
-; CHECK-BE-NEXT:    vextublx r26, r26, v2
-; CHECK-BE-NEXT:    vextublx r25, r25, v2
-; CHECK-BE-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r8, r8, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r9, r9, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r10, r10, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r11, r11, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r12, r12, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r0, r0, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r30, r30, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r29, r29, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r28, r28, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r27, r27, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r26, r26, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r25, r25, 0, 24, 31
-; CHECK-BE-NEXT:    mtvsrwz f0, r4
-; CHECK-BE-NEXT:    mtvsrwz f1, r5
-; CHECK-BE-NEXT:    mtvsrwz f2, r6
-; CHECK-BE-NEXT:    mtvsrwz f3, r7
-; CHECK-BE-NEXT:    mtvsrwz f4, r8
-; CHECK-BE-NEXT:    mtvsrwz f5, r9
-; CHECK-BE-NEXT:    mtvsrwz f6, r10
-; CHECK-BE-NEXT:    mtvsrwz f7, r11
-; CHECK-BE-NEXT:    mtvsrwz f8, r12
-; CHECK-BE-NEXT:    mtvsrwz f9, r0
-; CHECK-BE-NEXT:    mtvsrwz f10, r30
-; CHECK-BE-NEXT:    ld r30, -32(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz f11, r29
-; CHECK-BE-NEXT:    ld r29, -40(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz f12, r28
-; CHECK-BE-NEXT:    ld r28, -48(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz f13, r27
-; CHECK-BE-NEXT:    ld r27, -56(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz v2, r26
-; CHECK-BE-NEXT:    ld r26, -64(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz v3, r25
-; CHECK-BE-NEXT:    ld r25, -72(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    xscvuxdsp f0, f0
-; CHECK-BE-NEXT:    xscvuxdsp f1, f1
-; CHECK-BE-NEXT:    xscvuxdsp f2, f2
-; CHECK-BE-NEXT:    xscvuxdsp f3, f3
-; CHECK-BE-NEXT:    xscvuxdsp f4, f4
-; CHECK-BE-NEXT:    xscvuxdsp f5, f5
-; CHECK-BE-NEXT:    xscvuxdsp f6, f6
-; CHECK-BE-NEXT:    xscvuxdsp f7, f7
-; CHECK-BE-NEXT:    xscvuxdsp f8, f8
-; CHECK-BE-NEXT:    xscvuxdsp f9, f9
-; CHECK-BE-NEXT:    xscvuxdsp f10, f10
-; CHECK-BE-NEXT:    xscvuxdsp f11, f11
-; CHECK-BE-NEXT:    xscvuxdsp f12, f12
-; CHECK-BE-NEXT:    xscvuxdsp f13, f13
-; CHECK-BE-NEXT:    xscvuxdsp f31, v2
-; CHECK-BE-NEXT:    xscvuxdsp f30, v3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-BE-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-BE-NEXT:    xxmrghd vs4, vs9, vs8
-; CHECK-BE-NEXT:    xxmrghd vs5, vs11, vs10
-; CHECK-BE-NEXT:    xxmrghd vs6, vs13, vs12
-; CHECK-BE-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-BE-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs1
-; CHECK-BE-NEXT:    xvcvdpsp v4, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v5, vs3
-; CHECK-BE-NEXT:    xvcvdpsp v0, vs4
-; CHECK-BE-NEXT:    xvcvdpsp v1, vs5
-; CHECK-BE-NEXT:    xvcvdpsp v6, vs6
-; CHECK-BE-NEXT:    xvcvdpsp v7, vs7
-; CHECK-BE-NEXT:    vmrgew v2, v3, v2
-; CHECK-BE-NEXT:    vmrgew v3, v5, v4
-; CHECK-BE-NEXT:    vmrgew v4, v1, v0
-; CHECK-BE-NEXT:    vmrgew v5, v7, v6
-; CHECK-BE-NEXT:    stxv v3, 16(r3)
-; CHECK-BE-NEXT:    stxv v2, 0(r3)
-; CHECK-BE-NEXT:    stxv v5, 48(r3)
-; CHECK-BE-NEXT:    stxv v4, 32(r3)
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_1@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_1@toc@l
+; CHECK-BE-NEXT:    vperm v3, v2, v4, v3
+; CHECK-BE-NEXT:    xvcvuxwsp vs0, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_2@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_2@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    xvcvuxwsp vs1, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_3@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_3@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    xvcvuxwsp vs2, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs2, 32(r3)
+; CHECK-BE-NEXT:    xvcvuxwsp vs3, v2
+; CHECK-BE-NEXT:    stxv vs3, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = uitofp <16 x i8> %a to <16 x float>
@@ -721,20 +303,20 @@
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrws v2, r3
 ; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    li r4, 1
 ; CHECK-P9-NEXT:    vextubrx r3, r3, v2
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
 ; CHECK-P9-NEXT:    extsb r3, r3
-; CHECK-P9-NEXT:    extsb r4, r4
 ; CHECK-P9-NEXT:    mtvsrwa f0, r3
-; CHECK-P9-NEXT:    mtvsrwa f1, r4
+; CHECK-P9-NEXT:    li r3, 1
 ; CHECK-P9-NEXT:    xscvsxdsp f0, f0
-; CHECK-P9-NEXT:    xscvsxdsp f1, f1
 ; CHECK-P9-NEXT:    xscvdpspn vs0, f0
-; CHECK-P9-NEXT:    xscvdpspn vs1, f1
+; CHECK-P9-NEXT:    vextubrx r3, r3, v2
+; CHECK-P9-NEXT:    extsb r3, r3
+; CHECK-P9-NEXT:    xxsldwi v3, vs0, vs0, 1
+; CHECK-P9-NEXT:    mtvsrwa f0, r3
+; CHECK-P9-NEXT:    xscvsxdsp f0, f0
+; CHECK-P9-NEXT:    xscvdpspn vs0, f0
 ; CHECK-P9-NEXT:    xxsldwi v2, vs0, vs0, 1
-; CHECK-P9-NEXT:    xxsldwi v3, vs1, vs1, 1
-; CHECK-P9-NEXT:    vmrglw v2, v3, v2
+; CHECK-P9-NEXT:    vmrglw v2, v2, v3
 ; CHECK-P9-NEXT:    mfvsrld r3, v2
 ; CHECK-P9-NEXT:    blr
 ;
@@ -742,18 +324,18 @@
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrws v2, r3
 ; CHECK-BE-NEXT:    li r3, 1
-; CHECK-BE-NEXT:    li r4, 0
 ; CHECK-BE-NEXT:    vextublx r3, r3, v2
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
 ; CHECK-BE-NEXT:    extsb r3, r3
-; CHECK-BE-NEXT:    extsb r4, r4
 ; CHECK-BE-NEXT:    mtvsrwa f0, r3
-; CHECK-BE-NEXT:    mtvsrwa f1, r4
+; CHECK-BE-NEXT:    li r3, 0
 ; CHECK-BE-NEXT:    xscvsxdsp f0, f0
-; CHECK-BE-NEXT:    xscvsxdsp f1, f1
+; CHECK-BE-NEXT:    vextublx r3, r3, v2
+; CHECK-BE-NEXT:    extsb r3, r3
+; CHECK-BE-NEXT:    xscvdpspn v3, f0
+; CHECK-BE-NEXT:    mtvsrwa f0, r3
+; CHECK-BE-NEXT:    xscvsxdsp f0, f0
 ; CHECK-BE-NEXT:    xscvdpspn v2, f0
-; CHECK-BE-NEXT:    xscvdpspn v3, f1
-; CHECK-BE-NEXT:    vmrghw v2, v3, v2
+; CHECK-BE-NEXT:    vmrghw v2, v2, v3
 ; CHECK-BE-NEXT:    mfvsrd r3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -766,89 +348,39 @@
 define <4 x float> @test4elt_signed(i32 %a.coerce) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI5_0@toc@ha
 ; CHECK-P8-NEXT:    mtvsrd f0, r3
-; CHECK-P8-NEXT:    mfvsrd r3, f0
-; CHECK-P8-NEXT:    clrldi r4, r3, 56
-; CHECK-P8-NEXT:    rldicl r5, r3, 48, 56
-; CHECK-P8-NEXT:    extsb r4, r4
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    mtvsrwa f0, r4
-; CHECK-P8-NEXT:    rldicl r4, r3, 56, 56
-; CHECK-P8-NEXT:    rldicl r3, r3, 40, 56
-; CHECK-P8-NEXT:    extsb r4, r4
-; CHECK-P8-NEXT:    extsb r3, r3
-; CHECK-P8-NEXT:    mtvsrwa f1, r5
-; CHECK-P8-NEXT:    mtvsrwa f2, r4
-; CHECK-P8-NEXT:    mtvsrwa f3, r3
-; CHECK-P8-NEXT:    xscvsxdsp f0, f0
-; CHECK-P8-NEXT:    xscvsxdsp f1, f1
-; CHECK-P8-NEXT:    xscvsxdsp f2, f2
-; CHECK-P8-NEXT:    xscvsxdsp f3, f3
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P8-NEXT:    vmrgew v2, v3, v2
+; CHECK-P8-NEXT:    addi r3, r4, .LCPI5_0@toc@l
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    lvx v3, 0, r3
+; CHECK-P8-NEXT:    vperm v2, v2, v2, v3
+; CHECK-P8-NEXT:    vspltisw v3, 12
+; CHECK-P8-NEXT:    vadduwm v3, v3, v3
+; CHECK-P8-NEXT:    vslw v2, v2, v3
+; CHECK-P8-NEXT:    vsraw v2, v2, v3
+; CHECK-P8-NEXT:    xvcvsxwsp v2, v2
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test4elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrws v2, r3
-; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    li r4, 2
-; CHECK-P9-NEXT:    li r5, 1
-; CHECK-P9-NEXT:    li r6, 3
-; CHECK-P9-NEXT:    vextubrx r3, r3, v2
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
-; CHECK-P9-NEXT:    vextubrx r5, r5, v2
-; CHECK-P9-NEXT:    vextubrx r6, r6, v2
-; CHECK-P9-NEXT:    extsb r3, r3
-; CHECK-P9-NEXT:    extsb r4, r4
-; CHECK-P9-NEXT:    extsb r5, r5
-; CHECK-P9-NEXT:    extsb r6, r6
-; CHECK-P9-NEXT:    mtvsrwa f0, r3
-; CHECK-P9-NEXT:    mtvsrwa f1, r4
-; CHECK-P9-NEXT:    mtvsrwa f2, r5
-; CHECK-P9-NEXT:    mtvsrwa f3, r6
-; CHECK-P9-NEXT:    xscvsxdsp f0, f0
-; CHECK-P9-NEXT:    xscvsxdsp f1, f1
-; CHECK-P9-NEXT:    xscvsxdsp f2, f2
-; CHECK-P9-NEXT:    xscvsxdsp f3, f3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
+; CHECK-P9-NEXT:    addis r3, r2, .LCPI5_0@toc@ha
+; CHECK-P9-NEXT:    addi r3, r3, .LCPI5_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r3
+; CHECK-P9-NEXT:    vperm v2, v2, v2, v3
+; CHECK-P9-NEXT:    vextsb2w v2, v2
+; CHECK-P9-NEXT:    xvcvsxwsp v2, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrws v2, r3
-; CHECK-BE-NEXT:    li r3, 3
-; CHECK-BE-NEXT:    li r4, 1
-; CHECK-BE-NEXT:    li r5, 2
-; CHECK-BE-NEXT:    li r6, 0
-; CHECK-BE-NEXT:    vextublx r3, r3, v2
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
-; CHECK-BE-NEXT:    vextublx r5, r5, v2
-; CHECK-BE-NEXT:    vextublx r6, r6, v2
-; CHECK-BE-NEXT:    extsb r3, r3
-; CHECK-BE-NEXT:    extsb r4, r4
-; CHECK-BE-NEXT:    extsb r5, r5
-; CHECK-BE-NEXT:    extsb r6, r6
-; CHECK-BE-NEXT:    mtvsrwa f0, r3
-; CHECK-BE-NEXT:    mtvsrwa f1, r4
-; CHECK-BE-NEXT:    mtvsrwa f2, r5
-; CHECK-BE-NEXT:    mtvsrwa f3, r6
-; CHECK-BE-NEXT:    xscvsxdsp f0, f0
-; CHECK-BE-NEXT:    xscvsxdsp f1, f1
-; CHECK-BE-NEXT:    xscvsxdsp f2, f2
-; CHECK-BE-NEXT:    xscvsxdsp f3, f3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs1
-; CHECK-BE-NEXT:    vmrgew v2, v3, v2
+; CHECK-BE-NEXT:    addis r3, r2, .LCPI5_0@toc@ha
+; CHECK-BE-NEXT:    addi r3, r3, .LCPI5_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r3
+; CHECK-BE-NEXT:    vperm v2, v2, v2, v3
+; CHECK-BE-NEXT:    vextsb2w v2, v2
+; CHECK-BE-NEXT:    xvcvsxwsp v2, v2
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = bitcast i32 %a.coerce to <4 x i8>
@@ -859,168 +391,67 @@
 define void @test8elt_signed(<8 x float>* noalias nocapture sret %agg.result, i64 %a.coerce) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI6_0@toc@ha
 ; CHECK-P8-NEXT:    mtvsrd f0, r4
-; CHECK-P8-NEXT:    li r5, 16
-; CHECK-P8-NEXT:    mfvsrd r4, f0
-; CHECK-P8-NEXT:    clrldi r6, r4, 56
-; CHECK-P8-NEXT:    rldicl r7, r4, 48, 56
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    extsb r7, r7
-; CHECK-P8-NEXT:    mtvsrwa f0, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 56, 56
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f1, r7
-; CHECK-P8-NEXT:    rldicl r7, r4, 40, 56
-; CHECK-P8-NEXT:    mtvsrwa f2, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 32, 56
-; CHECK-P8-NEXT:    extsb r7, r7
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f3, r7
-; CHECK-P8-NEXT:    rldicl r7, r4, 16, 56
-; CHECK-P8-NEXT:    mtvsrwa f4, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 24, 56
-; CHECK-P8-NEXT:    rldicl r4, r4, 8, 56
-; CHECK-P8-NEXT:    extsb r7, r7
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    extsb r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f5, r7
-; CHECK-P8-NEXT:    mtvsrwa f6, r6
-; CHECK-P8-NEXT:    mtvsrwa f7, r4
-; CHECK-P8-NEXT:    xscvsxdsp f0, f0
-; CHECK-P8-NEXT:    xscvsxdsp f1, f1
-; CHECK-P8-NEXT:    xscvsxdsp f2, f2
-; CHECK-P8-NEXT:    xscvsxdsp f3, f3
-; CHECK-P8-NEXT:    xscvsxdsp f4, f4
-; CHECK-P8-NEXT:    xscvsxdsp f5, f5
-; CHECK-P8-NEXT:    xscvsxdsp f6, f6
-; CHECK-P8-NEXT:    xscvsxdsp f7, f7
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P8-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P8-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P8-NEXT:    xvcvdpsp v5, vs3
-; CHECK-P8-NEXT:    vmrgew v2, v3, v2
-; CHECK-P8-NEXT:    vmrgew v3, v5, v4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI6_1@toc@ha
+; CHECK-P8-NEXT:    vspltisw v5, 12
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI6_0@toc@l
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI6_1@toc@l
+; CHECK-P8-NEXT:    lvx v2, 0, r5
+; CHECK-P8-NEXT:    xxswapd v3, vs0
+; CHECK-P8-NEXT:    lvx v4, 0, r4
+; CHECK-P8-NEXT:    li r4, 16
+; CHECK-P8-NEXT:    vperm v2, v3, v3, v2
+; CHECK-P8-NEXT:    vperm v3, v3, v3, v4
+; CHECK-P8-NEXT:    vadduwm v4, v5, v5
+; CHECK-P8-NEXT:    vslw v2, v2, v4
+; CHECK-P8-NEXT:    vslw v3, v3, v4
+; CHECK-P8-NEXT:    vsraw v2, v2, v4
+; CHECK-P8-NEXT:    vsraw v3, v3, v4
+; CHECK-P8-NEXT:    xvcvsxwsp v2, v2
+; CHECK-P8-NEXT:    xvcvsxwsp v3, v3
 ; CHECK-P8-NEXT:    stvx v2, 0, r3
-; CHECK-P8-NEXT:    stvx v3, r3, r5
+; CHECK-P8-NEXT:    stvx v3, r3, r4
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test8elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrd f0, r4
-; CHECK-P9-NEXT:    li r4, 0
-; CHECK-P9-NEXT:    li r5, 2
-; CHECK-P9-NEXT:    li r6, 1
-; CHECK-P9-NEXT:    li r7, 3
-; CHECK-P9-NEXT:    li r8, 4
-; CHECK-P9-NEXT:    li r9, 6
-; CHECK-P9-NEXT:    li r10, 5
-; CHECK-P9-NEXT:    li r11, 7
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI6_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI6_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
-; CHECK-P9-NEXT:    vextubrx r5, r5, v2
-; CHECK-P9-NEXT:    vextubrx r6, r6, v2
-; CHECK-P9-NEXT:    vextubrx r7, r7, v2
-; CHECK-P9-NEXT:    vextubrx r8, r8, v2
-; CHECK-P9-NEXT:    vextubrx r9, r9, v2
-; CHECK-P9-NEXT:    vextubrx r10, r10, v2
-; CHECK-P9-NEXT:    vextubrx r11, r11, v2
-; CHECK-P9-NEXT:    extsb r4, r4
-; CHECK-P9-NEXT:    extsb r5, r5
-; CHECK-P9-NEXT:    extsb r6, r6
-; CHECK-P9-NEXT:    extsb r7, r7
-; CHECK-P9-NEXT:    extsb r8, r8
-; CHECK-P9-NEXT:    extsb r9, r9
-; CHECK-P9-NEXT:    extsb r10, r10
-; CHECK-P9-NEXT:    extsb r11, r11
-; CHECK-P9-NEXT:    mtvsrwa f0, r4
-; CHECK-P9-NEXT:    mtvsrwa f1, r5
-; CHECK-P9-NEXT:    mtvsrwa f2, r6
-; CHECK-P9-NEXT:    mtvsrwa f3, r7
-; CHECK-P9-NEXT:    mtvsrwa f4, r8
-; CHECK-P9-NEXT:    mtvsrwa f5, r9
-; CHECK-P9-NEXT:    mtvsrwa f6, r10
-; CHECK-P9-NEXT:    mtvsrwa f7, r11
-; CHECK-P9-NEXT:    xscvsxdsp f0, f0
-; CHECK-P9-NEXT:    xscvsxdsp f1, f1
-; CHECK-P9-NEXT:    xscvsxdsp f2, f2
-; CHECK-P9-NEXT:    xscvsxdsp f3, f3
-; CHECK-P9-NEXT:    xscvsxdsp f4, f4
-; CHECK-P9-NEXT:    xscvsxdsp f5, f5
-; CHECK-P9-NEXT:    xscvsxdsp f6, f6
-; CHECK-P9-NEXT:    xscvsxdsp f7, f7
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P9-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v5, vs3
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
-; CHECK-P9-NEXT:    vmrgew v3, v5, v4
-; CHECK-P9-NEXT:    stxv v3, 16(r3)
-; CHECK-P9-NEXT:    stxv v2, 0(r3)
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI6_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI6_1@toc@l
+; CHECK-P9-NEXT:    vextsb2w v3, v3
+; CHECK-P9-NEXT:    xvcvsxwsp vs0, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v2, v2, v3
+; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    vextsb2w v2, v2
+; CHECK-P9-NEXT:    xvcvsxwsp vs1, v2
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    li r5, 3
 ; CHECK-BE-NEXT:    mtvsrd v2, r4
-; CHECK-BE-NEXT:    li r4, 1
-; CHECK-BE-NEXT:    li r6, 2
-; CHECK-BE-NEXT:    li r7, 0
-; CHECK-BE-NEXT:    li r8, 7
-; CHECK-BE-NEXT:    li r9, 5
-; CHECK-BE-NEXT:    li r10, 6
-; CHECK-BE-NEXT:    li r11, 4
-; CHECK-BE-NEXT:    vextublx r5, r5, v2
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
-; CHECK-BE-NEXT:    vextublx r6, r6, v2
-; CHECK-BE-NEXT:    vextublx r7, r7, v2
-; CHECK-BE-NEXT:    vextublx r8, r8, v2
-; CHECK-BE-NEXT:    vextublx r9, r9, v2
-; CHECK-BE-NEXT:    vextublx r10, r10, v2
-; CHECK-BE-NEXT:    vextublx r11, r11, v2
-; CHECK-BE-NEXT:    extsb r5, r5
-; CHECK-BE-NEXT:    extsb r4, r4
-; CHECK-BE-NEXT:    extsb r6, r6
-; CHECK-BE-NEXT:    extsb r7, r7
-; CHECK-BE-NEXT:    extsb r8, r8
-; CHECK-BE-NEXT:    extsb r9, r9
-; CHECK-BE-NEXT:    extsb r10, r10
-; CHECK-BE-NEXT:    extsb r11, r11
-; CHECK-BE-NEXT:    mtvsrwa f0, r5
-; CHECK-BE-NEXT:    mtvsrwa f1, r4
-; CHECK-BE-NEXT:    mtvsrwa f2, r6
-; CHECK-BE-NEXT:    mtvsrwa f3, r7
-; CHECK-BE-NEXT:    mtvsrwa f4, r8
-; CHECK-BE-NEXT:    mtvsrwa f5, r9
-; CHECK-BE-NEXT:    mtvsrwa f6, r10
-; CHECK-BE-NEXT:    mtvsrwa f7, r11
-; CHECK-BE-NEXT:    xscvsxdsp f0, f0
-; CHECK-BE-NEXT:    xscvsxdsp f1, f1
-; CHECK-BE-NEXT:    xscvsxdsp f2, f2
-; CHECK-BE-NEXT:    xscvsxdsp f3, f3
-; CHECK-BE-NEXT:    xscvsxdsp f4, f4
-; CHECK-BE-NEXT:    xscvsxdsp f5, f5
-; CHECK-BE-NEXT:    xscvsxdsp f6, f6
-; CHECK-BE-NEXT:    xscvsxdsp f7, f7
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-BE-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs1
-; CHECK-BE-NEXT:    xvcvdpsp v4, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v5, vs3
-; CHECK-BE-NEXT:    vmrgew v2, v3, v2
-; CHECK-BE-NEXT:    vmrgew v3, v5, v4
-; CHECK-BE-NEXT:    stxv v2, 0(r3)
-; CHECK-BE-NEXT:    stxv v3, 16(r3)
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI6_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI6_0@toc@l
+; CHECK-BE-NEXT:    lxvx v4, 0, r4
+; CHECK-BE-NEXT:    xxlxor v3, v3, v3
+; CHECK-BE-NEXT:    vperm v3, v3, v2, v4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI6_1@toc@ha
+; CHECK-BE-NEXT:    vextsb2w v3, v3
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI6_1@toc@l
+; CHECK-BE-NEXT:    xvcvsxwsp vs0, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v2, v2, v3
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
+; CHECK-BE-NEXT:    vextsb2w v2, v2
+; CHECK-BE-NEXT:    xvcvsxwsp vs1, v2
+; CHECK-BE-NEXT:    stxv vs1, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = bitcast i64 %a.coerce to <8 x i8>
@@ -1032,348 +463,108 @@
 define void @test16elt_signed(<16 x float>* noalias nocapture sret %agg.result, <16 x i8> %a) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
-; CHECK-P8-NEXT:    mfvsrd r4, v2
-; CHECK-P8-NEXT:    xxswapd vs2, v2
-; CHECK-P8-NEXT:    clrldi r5, r4, 56
-; CHECK-P8-NEXT:    rldicl r6, r4, 48, 56
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f0, r5
-; CHECK-P8-NEXT:    rldicl r5, r4, 40, 56
-; CHECK-P8-NEXT:    rldicl r7, r4, 56, 56
-; CHECK-P8-NEXT:    mtvsrwa f1, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 32, 56
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    extsb r7, r7
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f4, r5
-; CHECK-P8-NEXT:    rldicl r5, r4, 16, 56
-; CHECK-P8-NEXT:    mtvsrwa f3, r7
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    mtvsrwa f5, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 24, 56
-; CHECK-P8-NEXT:    rldicl r4, r4, 8, 56
-; CHECK-P8-NEXT:    mfvsrd r7, f2
-; CHECK-P8-NEXT:    extsb r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f2, r5
-; CHECK-P8-NEXT:    extsb r5, r6
-; CHECK-P8-NEXT:    mtvsrwa f6, r5
-; CHECK-P8-NEXT:    clrldi r5, r7, 56
-; CHECK-P8-NEXT:    mtvsrwa f7, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 48, 56
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    mtvsrwa f8, r5
-; CHECK-P8-NEXT:    rldicl r5, r7, 56, 56
-; CHECK-P8-NEXT:    extsb r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f9, r4
-; CHECK-P8-NEXT:    extsb r4, r5
-; CHECK-P8-NEXT:    rldicl r5, r7, 8, 56
-; CHECK-P8-NEXT:    mtvsrwa f10, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 40, 56
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    extsb r4, r4
-; CHECK-P8-NEXT:    xscvsxdsp f0, f0
-; CHECK-P8-NEXT:    mtvsrwa f11, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 32, 56
-; CHECK-P8-NEXT:    extsb r4, r4
-; CHECK-P8-NEXT:    xscvsxdsp f1, f1
-; CHECK-P8-NEXT:    xscvsxdsp f3, f3
-; CHECK-P8-NEXT:    xscvsxdsp f4, f4
-; CHECK-P8-NEXT:    mtvsrwa f12, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 16, 56
-; CHECK-P8-NEXT:    extsb r4, r4
-; CHECK-P8-NEXT:    xscvsxdsp f5, f5
-; CHECK-P8-NEXT:    mtvsrwa f13, r4
-; CHECK-P8-NEXT:    rldicl r4, r7, 24, 56
-; CHECK-P8-NEXT:    xscvsxdsp f2, f2
-; CHECK-P8-NEXT:    extsb r4, r4
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    mtvsrwa v2, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI7_0@toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI7_2@toc@ha
+; CHECK-P8-NEXT:    vspltisw v1, 12
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI7_0@toc@l
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI7_2@toc@l
+; CHECK-P8-NEXT:    lvx v3, 0, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI7_3@toc@ha
+; CHECK-P8-NEXT:    lvx v4, 0, r5
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI7_1@toc@ha
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI7_3@toc@l
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI7_1@toc@l
+; CHECK-P8-NEXT:    lvx v5, 0, r4
+; CHECK-P8-NEXT:    lvx v0, 0, r5
 ; CHECK-P8-NEXT:    li r4, 48
-; CHECK-P8-NEXT:    mtvsrwa v3, r5
-; CHECK-P8-NEXT:    xxmrghd vs3, vs4, vs3
 ; CHECK-P8-NEXT:    li r5, 32
-; CHECK-P8-NEXT:    xscvsxdsp f6, f6
-; CHECK-P8-NEXT:    xscvsxdsp f7, f7
-; CHECK-P8-NEXT:    xscvsxdsp f8, f8
-; CHECK-P8-NEXT:    xscvsxdsp f9, f9
-; CHECK-P8-NEXT:    xxmrghd vs2, vs2, vs5
-; CHECK-P8-NEXT:    xscvsxdsp f10, f10
-; CHECK-P8-NEXT:    xscvsxdsp f11, f11
-; CHECK-P8-NEXT:    xscvsxdsp f12, f12
-; CHECK-P8-NEXT:    xscvsxdsp f13, f13
-; CHECK-P8-NEXT:    xxmrghd vs5, vs7, vs6
-; CHECK-P8-NEXT:    xscvsxdsp f1, v2
-; CHECK-P8-NEXT:    xscvsxdsp f4, v3
-; CHECK-P8-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P8-NEXT:    xxmrghd vs0, vs9, vs8
-; CHECK-P8-NEXT:    xvcvdpsp v3, vs3
-; CHECK-P8-NEXT:    xxmrghd vs3, vs11, vs10
-; CHECK-P8-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P8-NEXT:    xxmrghd vs2, vs13, vs12
-; CHECK-P8-NEXT:    xvcvdpsp v5, vs5
-; CHECK-P8-NEXT:    xvcvdpsp v0, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs4, vs1
-; CHECK-P8-NEXT:    xvcvdpsp v1, vs3
-; CHECK-P8-NEXT:    xvcvdpsp v6, vs2
-; CHECK-P8-NEXT:    vmrgew v2, v3, v2
-; CHECK-P8-NEXT:    xvcvdpsp v7, vs1
-; CHECK-P8-NEXT:    vmrgew v3, v5, v4
-; CHECK-P8-NEXT:    vmrgew v4, v1, v0
-; CHECK-P8-NEXT:    stvx v2, r3, r5
-; CHECK-P8-NEXT:    vmrgew v5, v7, v6
-; CHECK-P8-NEXT:    stvx v3, r3, r4
-; CHECK-P8-NEXT:    li r4, 16
-; CHECK-P8-NEXT:    stvx v4, 0, r3
+; CHECK-P8-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P8-NEXT:    vperm v4, v2, v2, v4
+; CHECK-P8-NEXT:    vperm v5, v2, v2, v5
+; CHECK-P8-NEXT:    vperm v2, v2, v2, v0
+; CHECK-P8-NEXT:    vadduwm v0, v1, v1
+; CHECK-P8-NEXT:    vslw v3, v3, v0
+; CHECK-P8-NEXT:    vslw v4, v4, v0
+; CHECK-P8-NEXT:    vslw v5, v5, v0
+; CHECK-P8-NEXT:    vslw v2, v2, v0
+; CHECK-P8-NEXT:    vsraw v3, v3, v0
+; CHECK-P8-NEXT:    vsraw v4, v4, v0
+; CHECK-P8-NEXT:    vsraw v5, v5, v0
+; CHECK-P8-NEXT:    vsraw v2, v2, v0
+; CHECK-P8-NEXT:    xvcvsxwsp v3, v3
+; CHECK-P8-NEXT:    xvcvsxwsp v4, v4
+; CHECK-P8-NEXT:    xvcvsxwsp v5, v5
+; CHECK-P8-NEXT:    xvcvsxwsp v2, v2
+; CHECK-P8-NEXT:    stvx v3, 0, r3
+; CHECK-P8-NEXT:    stvx v4, r3, r5
 ; CHECK-P8-NEXT:    stvx v5, r3, r4
+; CHECK-P8-NEXT:    li r4, 16
+; CHECK-P8-NEXT:    stvx v2, r3, r4
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test16elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    std r25, -72(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r26, -64(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r27, -56(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r28, -48(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r29, -40(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    li r4, 0
-; CHECK-P9-NEXT:    std r30, -32(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    li r5, 2
-; CHECK-P9-NEXT:    li r6, 1
-; CHECK-P9-NEXT:    li r7, 3
-; CHECK-P9-NEXT:    li r8, 4
-; CHECK-P9-NEXT:    li r9, 6
-; CHECK-P9-NEXT:    li r10, 5
-; CHECK-P9-NEXT:    li r11, 7
-; CHECK-P9-NEXT:    li r12, 8
-; CHECK-P9-NEXT:    li r0, 10
-; CHECK-P9-NEXT:    li r30, 9
-; CHECK-P9-NEXT:    li r29, 11
-; CHECK-P9-NEXT:    li r28, 12
-; CHECK-P9-NEXT:    li r27, 14
-; CHECK-P9-NEXT:    li r26, 13
-; CHECK-P9-NEXT:    li r25, 15
-; CHECK-P9-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
-; CHECK-P9-NEXT:    vextubrx r5, r5, v2
-; CHECK-P9-NEXT:    vextubrx r6, r6, v2
-; CHECK-P9-NEXT:    vextubrx r7, r7, v2
-; CHECK-P9-NEXT:    vextubrx r8, r8, v2
-; CHECK-P9-NEXT:    vextubrx r9, r9, v2
-; CHECK-P9-NEXT:    vextubrx r10, r10, v2
-; CHECK-P9-NEXT:    vextubrx r11, r11, v2
-; CHECK-P9-NEXT:    vextubrx r12, r12, v2
-; CHECK-P9-NEXT:    vextubrx r0, r0, v2
-; CHECK-P9-NEXT:    vextubrx r30, r30, v2
-; CHECK-P9-NEXT:    vextubrx r29, r29, v2
-; CHECK-P9-NEXT:    vextubrx r28, r28, v2
-; CHECK-P9-NEXT:    vextubrx r27, r27, v2
-; CHECK-P9-NEXT:    vextubrx r26, r26, v2
-; CHECK-P9-NEXT:    vextubrx r25, r25, v2
-; CHECK-P9-NEXT:    extsb r4, r4
-; CHECK-P9-NEXT:    extsb r5, r5
-; CHECK-P9-NEXT:    extsb r6, r6
-; CHECK-P9-NEXT:    extsb r7, r7
-; CHECK-P9-NEXT:    extsb r8, r8
-; CHECK-P9-NEXT:    extsb r9, r9
-; CHECK-P9-NEXT:    extsb r10, r10
-; CHECK-P9-NEXT:    extsb r11, r11
-; CHECK-P9-NEXT:    extsb r12, r12
-; CHECK-P9-NEXT:    extsb r0, r0
-; CHECK-P9-NEXT:    extsb r30, r30
-; CHECK-P9-NEXT:    extsb r29, r29
-; CHECK-P9-NEXT:    extsb r28, r28
-; CHECK-P9-NEXT:    extsb r27, r27
-; CHECK-P9-NEXT:    extsb r26, r26
-; CHECK-P9-NEXT:    extsb r25, r25
-; CHECK-P9-NEXT:    mtvsrwa f0, r4
-; CHECK-P9-NEXT:    mtvsrwa f1, r5
-; CHECK-P9-NEXT:    mtvsrwa f2, r6
-; CHECK-P9-NEXT:    mtvsrwa f3, r7
-; CHECK-P9-NEXT:    mtvsrwa f4, r8
-; CHECK-P9-NEXT:    mtvsrwa f5, r9
-; CHECK-P9-NEXT:    mtvsrwa f6, r10
-; CHECK-P9-NEXT:    mtvsrwa f7, r11
-; CHECK-P9-NEXT:    mtvsrwa f8, r12
-; CHECK-P9-NEXT:    mtvsrwa f9, r0
-; CHECK-P9-NEXT:    mtvsrwa f10, r30
-; CHECK-P9-NEXT:    ld r30, -32(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa f11, r29
-; CHECK-P9-NEXT:    ld r29, -40(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa f12, r28
-; CHECK-P9-NEXT:    ld r28, -48(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa f13, r27
-; CHECK-P9-NEXT:    ld r27, -56(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa v2, r26
-; CHECK-P9-NEXT:    ld r26, -64(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa v3, r25
-; CHECK-P9-NEXT:    ld r25, -72(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xscvsxdsp f0, f0
-; CHECK-P9-NEXT:    xscvsxdsp f1, f1
-; CHECK-P9-NEXT:    xscvsxdsp f2, f2
-; CHECK-P9-NEXT:    xscvsxdsp f3, f3
-; CHECK-P9-NEXT:    xscvsxdsp f4, f4
-; CHECK-P9-NEXT:    xscvsxdsp f5, f5
-; CHECK-P9-NEXT:    xscvsxdsp f6, f6
-; CHECK-P9-NEXT:    xscvsxdsp f7, f7
-; CHECK-P9-NEXT:    xscvsxdsp f8, f8
-; CHECK-P9-NEXT:    xscvsxdsp f9, f9
-; CHECK-P9-NEXT:    xscvsxdsp f10, f10
-; CHECK-P9-NEXT:    xscvsxdsp f11, f11
-; CHECK-P9-NEXT:    xscvsxdsp f12, f12
-; CHECK-P9-NEXT:    xscvsxdsp f13, f13
-; CHECK-P9-NEXT:    xscvsxdsp f31, v2
-; CHECK-P9-NEXT:    xscvsxdsp f30, v3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P9-NEXT:    xxmrghd vs4, vs9, vs8
-; CHECK-P9-NEXT:    xxmrghd vs5, vs11, vs10
-; CHECK-P9-NEXT:    xxmrghd vs6, vs13, vs12
-; CHECK-P9-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-P9-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xvcvdpsp v2, vs0
-; CHECK-P9-NEXT:    xvcvdpsp v3, vs1
-; CHECK-P9-NEXT:    xvcvdpsp v4, vs2
-; CHECK-P9-NEXT:    xvcvdpsp v5, vs3
-; CHECK-P9-NEXT:    xvcvdpsp v0, vs4
-; CHECK-P9-NEXT:    xvcvdpsp v1, vs5
-; CHECK-P9-NEXT:    xvcvdpsp v6, vs6
-; CHECK-P9-NEXT:    xvcvdpsp v7, vs7
-; CHECK-P9-NEXT:    vmrgew v2, v3, v2
-; CHECK-P9-NEXT:    vmrgew v3, v5, v4
-; CHECK-P9-NEXT:    vmrgew v4, v1, v0
-; CHECK-P9-NEXT:    vmrgew v5, v7, v6
-; CHECK-P9-NEXT:    stxv v3, 16(r3)
-; CHECK-P9-NEXT:    stxv v2, 0(r3)
-; CHECK-P9-NEXT:    stxv v5, 48(r3)
-; CHECK-P9-NEXT:    stxv v4, 32(r3)
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI7_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI7_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI7_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI7_1@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P9-NEXT:    vextsb2w v3, v3
+; CHECK-P9-NEXT:    xvcvsxwsp vs0, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI7_2@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI7_2@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    vextsb2w v3, v3
+; CHECK-P9-NEXT:    xvcvsxwsp vs1, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI7_3@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI7_3@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    vextsb2w v3, v3
+; CHECK-P9-NEXT:    xvcvsxwsp vs2, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v2, v2, v3
+; CHECK-P9-NEXT:    stxv vs2, 32(r3)
+; CHECK-P9-NEXT:    vextsb2w v2, v2
+; CHECK-P9-NEXT:    xvcvsxwsp vs3, v2
+; CHECK-P9-NEXT:    stxv vs3, 48(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    std r25, -72(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r26, -64(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r27, -56(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r28, -48(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r29, -40(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    li r4, 3
-; CHECK-BE-NEXT:    std r30, -32(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    li r5, 1
-; CHECK-BE-NEXT:    li r6, 2
-; CHECK-BE-NEXT:    li r7, 0
-; CHECK-BE-NEXT:    li r8, 7
-; CHECK-BE-NEXT:    li r9, 5
-; CHECK-BE-NEXT:    li r10, 6
-; CHECK-BE-NEXT:    li r11, 4
-; CHECK-BE-NEXT:    li r12, 11
-; CHECK-BE-NEXT:    li r0, 9
-; CHECK-BE-NEXT:    li r30, 10
-; CHECK-BE-NEXT:    li r29, 8
-; CHECK-BE-NEXT:    li r28, 15
-; CHECK-BE-NEXT:    li r27, 13
-; CHECK-BE-NEXT:    li r26, 14
-; CHECK-BE-NEXT:    li r25, 12
-; CHECK-BE-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
-; CHECK-BE-NEXT:    vextublx r5, r5, v2
-; CHECK-BE-NEXT:    vextublx r6, r6, v2
-; CHECK-BE-NEXT:    vextublx r7, r7, v2
-; CHECK-BE-NEXT:    vextublx r8, r8, v2
-; CHECK-BE-NEXT:    vextublx r9, r9, v2
-; CHECK-BE-NEXT:    vextublx r10, r10, v2
-; CHECK-BE-NEXT:    vextublx r11, r11, v2
-; CHECK-BE-NEXT:    vextublx r12, r12, v2
-; CHECK-BE-NEXT:    vextublx r0, r0, v2
-; CHECK-BE-NEXT:    vextublx r30, r30, v2
-; CHECK-BE-NEXT:    vextublx r29, r29, v2
-; CHECK-BE-NEXT:    vextublx r28, r28, v2
-; CHECK-BE-NEXT:    vextublx r27, r27, v2
-; CHECK-BE-NEXT:    vextublx r26, r26, v2
-; CHECK-BE-NEXT:    vextublx r25, r25, v2
-; CHECK-BE-NEXT:    extsb r4, r4
-; CHECK-BE-NEXT:    extsb r5, r5
-; CHECK-BE-NEXT:    extsb r6, r6
-; CHECK-BE-NEXT:    extsb r7, r7
-; CHECK-BE-NEXT:    extsb r8, r8
-; CHECK-BE-NEXT:    extsb r9, r9
-; CHECK-BE-NEXT:    extsb r10, r10
-; CHECK-BE-NEXT:    extsb r11, r11
-; CHECK-BE-NEXT:    extsb r12, r12
-; CHECK-BE-NEXT:    extsb r0, r0
-; CHECK-BE-NEXT:    extsb r30, r30
-; CHECK-BE-NEXT:    extsb r29, r29
-; CHECK-BE-NEXT:    extsb r28, r28
-; CHECK-BE-NEXT:    extsb r27, r27
-; CHECK-BE-NEXT:    extsb r26, r26
-; CHECK-BE-NEXT:    extsb r25, r25
-; CHECK-BE-NEXT:    mtvsrwa f0, r4
-; CHECK-BE-NEXT:    mtvsrwa f1, r5
-; CHECK-BE-NEXT:    mtvsrwa f2, r6
-; CHECK-BE-NEXT:    mtvsrwa f3, r7
-; CHECK-BE-NEXT:    mtvsrwa f4, r8
-; CHECK-BE-NEXT:    mtvsrwa f5, r9
-; CHECK-BE-NEXT:    mtvsrwa f6, r10
-; CHECK-BE-NEXT:    mtvsrwa f7, r11
-; CHECK-BE-NEXT:    mtvsrwa f8, r12
-; CHECK-BE-NEXT:    mtvsrwa f9, r0
-; CHECK-BE-NEXT:    mtvsrwa f10, r30
-; CHECK-BE-NEXT:    ld r30, -32(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa f11, r29
-; CHECK-BE-NEXT:    ld r29, -40(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa f12, r28
-; CHECK-BE-NEXT:    ld r28, -48(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa f13, r27
-; CHECK-BE-NEXT:    ld r27, -56(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa v2, r26
-; CHECK-BE-NEXT:    ld r26, -64(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa v3, r25
-; CHECK-BE-NEXT:    ld r25, -72(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    xscvsxdsp f0, f0
-; CHECK-BE-NEXT:    xscvsxdsp f1, f1
-; CHECK-BE-NEXT:    xscvsxdsp f2, f2
-; CHECK-BE-NEXT:    xscvsxdsp f3, f3
-; CHECK-BE-NEXT:    xscvsxdsp f4, f4
-; CHECK-BE-NEXT:    xscvsxdsp f5, f5
-; CHECK-BE-NEXT:    xscvsxdsp f6, f6
-; CHECK-BE-NEXT:    xscvsxdsp f7, f7
-; CHECK-BE-NEXT:    xscvsxdsp f8, f8
-; CHECK-BE-NEXT:    xscvsxdsp f9, f9
-; CHECK-BE-NEXT:    xscvsxdsp f10, f10
-; CHECK-BE-NEXT:    xscvsxdsp f11, f11
-; CHECK-BE-NEXT:    xscvsxdsp f12, f12
-; CHECK-BE-NEXT:    xscvsxdsp f13, f13
-; CHECK-BE-NEXT:    xscvsxdsp f31, v2
-; CHECK-BE-NEXT:    xscvsxdsp f30, v3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-BE-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-BE-NEXT:    xxmrghd vs4, vs9, vs8
-; CHECK-BE-NEXT:    xxmrghd vs5, vs11, vs10
-; CHECK-BE-NEXT:    xxmrghd vs6, vs13, vs12
-; CHECK-BE-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-BE-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    xvcvdpsp v2, vs0
-; CHECK-BE-NEXT:    xvcvdpsp v3, vs1
-; CHECK-BE-NEXT:    xvcvdpsp v4, vs2
-; CHECK-BE-NEXT:    xvcvdpsp v5, vs3
-; CHECK-BE-NEXT:    xvcvdpsp v0, vs4
-; CHECK-BE-NEXT:    xvcvdpsp v1, vs5
-; CHECK-BE-NEXT:    xvcvdpsp v6, vs6
-; CHECK-BE-NEXT:    xvcvdpsp v7, vs7
-; CHECK-BE-NEXT:    vmrgew v2, v3, v2
-; CHECK-BE-NEXT:    vmrgew v3, v5, v4
-; CHECK-BE-NEXT:    vmrgew v4, v1, v0
-; CHECK-BE-NEXT:    vmrgew v5, v7, v6
-; CHECK-BE-NEXT:    stxv v3, 16(r3)
-; CHECK-BE-NEXT:    stxv v2, 0(r3)
-; CHECK-BE-NEXT:    stxv v5, 48(r3)
-; CHECK-BE-NEXT:    stxv v4, 32(r3)
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_1@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_1@toc@l
+; CHECK-BE-NEXT:    vextsb2w v3, v3
+; CHECK-BE-NEXT:    xvcvsxwsp vs0, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_2@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_2@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
+; CHECK-BE-NEXT:    vextsb2w v3, v3
+; CHECK-BE-NEXT:    xvcvsxwsp vs1, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_3@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_3@toc@l
+; CHECK-BE-NEXT:    vperm v3, v2, v2, v3
+; CHECK-BE-NEXT:    stxv vs1, 48(r3)
+; CHECK-BE-NEXT:    vextsb2w v3, v3
+; CHECK-BE-NEXT:    xvcvsxwsp vs2, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v2, v2, v3
+; CHECK-BE-NEXT:    stxv vs2, 0(r3)
+; CHECK-BE-NEXT:    vextsb2w v2, v2
+; CHECK-BE-NEXT:    xvcvsxwsp vs3, v2
+; CHECK-BE-NEXT:    stxv vs3, 32(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = sitofp <16 x i8> %a to <16 x float>
diff --git a/test/CodeGen/PowerPC/vec_conv_i8_to_fp64_elts.ll b/test/CodeGen/PowerPC/vec_conv_i8_to_fp64_elts.ll
index bafc18b..1a97901 100644
--- a/test/CodeGen/PowerPC/vec_conv_i8_to_fp64_elts.ll
+++ b/test/CodeGen/PowerPC/vec_conv_i8_to_fp64_elts.ll
@@ -12,49 +12,36 @@
 define <2 x double> @test2elt(i16 %a.coerce) local_unnamed_addr #0 {
 ; CHECK-P8-LABEL: test2elt:
 ; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI0_0@toc@ha
 ; CHECK-P8-NEXT:    mtvsrd f0, r3
-; CHECK-P8-NEXT:    mfvsrd r3, f0
-; CHECK-P8-NEXT:    clrldi r4, r3, 56
-; CHECK-P8-NEXT:    rldicl r3, r3, 56, 56
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r3, r3, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f0, r4
-; CHECK-P8-NEXT:    mtvsrwz f1, r3
-; CHECK-P8-NEXT:    xscvuxddp f0, f0
-; CHECK-P8-NEXT:    xscvuxddp f1, f1
-; CHECK-P8-NEXT:    xxmrghd v2, vs1, vs0
+; CHECK-P8-NEXT:    addi r3, r4, .LCPI0_0@toc@l
+; CHECK-P8-NEXT:    xxlxor v4, v4, v4
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    lvx v3, 0, r3
+; CHECK-P8-NEXT:    vperm v2, v4, v2, v3
+; CHECK-P8-NEXT:    xvcvuxddp v2, v2
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test2elt:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrws v2, r3
-; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    li r4, 1
-; CHECK-P9-NEXT:    vextubrx r3, r3, v2
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
-; CHECK-P9-NEXT:    rlwinm r3, r3, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P9-NEXT:    mtvsrwz f0, r3
-; CHECK-P9-NEXT:    mtvsrwz f1, r4
-; CHECK-P9-NEXT:    xscvuxddp f0, f0
-; CHECK-P9-NEXT:    xscvuxddp f1, f1
-; CHECK-P9-NEXT:    xxmrghd v2, vs1, vs0
+; CHECK-P9-NEXT:    addis r3, r2, .LCPI0_0@toc@ha
+; CHECK-P9-NEXT:    addi r3, r3, .LCPI0_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r3
+; CHECK-P9-NEXT:    xxlxor v4, v4, v4
+; CHECK-P9-NEXT:    vperm v2, v4, v2, v3
+; CHECK-P9-NEXT:    xvcvuxddp v2, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test2elt:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrws v2, r3
-; CHECK-BE-NEXT:    li r3, 1
-; CHECK-BE-NEXT:    li r4, 0
-; CHECK-BE-NEXT:    vextublx r3, r3, v2
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
-; CHECK-BE-NEXT:    rlwinm r3, r3, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-BE-NEXT:    mtvsrwz f0, r3
-; CHECK-BE-NEXT:    mtvsrwz f1, r4
-; CHECK-BE-NEXT:    xscvuxddp f0, f0
-; CHECK-BE-NEXT:    xscvuxddp f1, f1
-; CHECK-BE-NEXT:    xxmrghd v2, vs1, vs0
+; CHECK-BE-NEXT:    addis r3, r2, .LCPI0_0@toc@ha
+; CHECK-BE-NEXT:    addi r3, r3, .LCPI0_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r3
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    vperm v2, v2, v4, v3
+; CHECK-BE-NEXT:    xvcvuxddp v2, v2
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = bitcast i16 %a.coerce to <2 x i8>
@@ -65,27 +52,20 @@
 define void @test4elt(<4 x double>* noalias nocapture sret %agg.result, i32 %a.coerce) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt:
 ; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI1_0@toc@ha
 ; CHECK-P8-NEXT:    mtvsrd f0, r4
-; CHECK-P8-NEXT:    mfvsrd r4, f0
-; CHECK-P8-NEXT:    clrldi r5, r4, 56
-; CHECK-P8-NEXT:    rldicl r6, r4, 56, 56
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f0, r5
-; CHECK-P8-NEXT:    rldicl r5, r4, 48, 56
-; CHECK-P8-NEXT:    rldicl r4, r4, 40, 56
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f1, r6
-; CHECK-P8-NEXT:    mtvsrwz f2, r5
-; CHECK-P8-NEXT:    mtvsrwz f3, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI1_1@toc@ha
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI1_0@toc@l
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI1_1@toc@l
+; CHECK-P8-NEXT:    xxlxor v4, v4, v4
+; CHECK-P8-NEXT:    lvx v2, 0, r5
+; CHECK-P8-NEXT:    xxswapd v3, vs0
+; CHECK-P8-NEXT:    lvx v5, 0, r4
 ; CHECK-P8-NEXT:    li r4, 16
-; CHECK-P8-NEXT:    xscvuxddp f0, f0
-; CHECK-P8-NEXT:    xscvuxddp f1, f1
-; CHECK-P8-NEXT:    xscvuxddp f2, f2
-; CHECK-P8-NEXT:    xscvuxddp f3, f3
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs3, vs2
+; CHECK-P8-NEXT:    vperm v2, v4, v3, v2
+; CHECK-P8-NEXT:    vperm v3, v4, v3, v5
+; CHECK-P8-NEXT:    xvcvuxddp vs0, v2
+; CHECK-P8-NEXT:    xvcvuxddp vs1, v3
 ; CHECK-P8-NEXT:    xxswapd vs0, vs0
 ; CHECK-P8-NEXT:    xxswapd vs1, vs1
 ; CHECK-P8-NEXT:    stxvd2x vs1, r3, r4
@@ -95,59 +75,37 @@
 ; CHECK-P9-LABEL: test4elt:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrws v2, r4
-; CHECK-P9-NEXT:    li r4, 0
-; CHECK-P9-NEXT:    li r5, 1
-; CHECK-P9-NEXT:    li r6, 2
-; CHECK-P9-NEXT:    li r7, 3
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
-; CHECK-P9-NEXT:    vextubrx r5, r5, v2
-; CHECK-P9-NEXT:    vextubrx r6, r6, v2
-; CHECK-P9-NEXT:    vextubrx r7, r7, v2
-; CHECK-P9-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-P9-NEXT:    mtvsrwz f0, r4
-; CHECK-P9-NEXT:    mtvsrwz f1, r5
-; CHECK-P9-NEXT:    mtvsrwz f2, r6
-; CHECK-P9-NEXT:    mtvsrwz f3, r7
-; CHECK-P9-NEXT:    xscvuxddp f0, f0
-; CHECK-P9-NEXT:    xscvuxddp f1, f1
-; CHECK-P9-NEXT:    xscvuxddp f2, f2
-; CHECK-P9-NEXT:    xscvuxddp f3, f3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI1_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI1_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    xxlxor v4, v4, v4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI1_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI1_1@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    xvcvuxddp vs0, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v4, v2, v3
 ; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs1, v2
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrws v2, r4
-; CHECK-BE-NEXT:    li r4, 1
-; CHECK-BE-NEXT:    li r5, 0
-; CHECK-BE-NEXT:    li r6, 3
-; CHECK-BE-NEXT:    li r7, 2
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
-; CHECK-BE-NEXT:    vextublx r5, r5, v2
-; CHECK-BE-NEXT:    vextublx r6, r6, v2
-; CHECK-BE-NEXT:    vextublx r7, r7, v2
-; CHECK-BE-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-BE-NEXT:    mtvsrwz f0, r4
-; CHECK-BE-NEXT:    mtvsrwz f1, r5
-; CHECK-BE-NEXT:    mtvsrwz f2, r6
-; CHECK-BE-NEXT:    mtvsrwz f3, r7
-; CHECK-BE-NEXT:    xscvuxddp f0, f0
-; CHECK-BE-NEXT:    xscvuxddp f1, f1
-; CHECK-BE-NEXT:    xscvuxddp f2, f2
-; CHECK-BE-NEXT:    xscvuxddp f3, f3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI1_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI1_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI1_1@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI1_1@toc@l
+; CHECK-BE-NEXT:    vperm v3, v2, v4, v3
+; CHECK-BE-NEXT:    xvcvuxddp vs0, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v4, v2, v3
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs1, v2
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = bitcast i32 %a.coerce to <4 x i8>
@@ -159,161 +117,100 @@
 define void @test8elt(<8 x double>* noalias nocapture sret %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI2_0@toc@ha
 ; CHECK-P8-NEXT:    mtvsrd f0, r4
-; CHECK-P8-NEXT:    mfvsrd r4, f0
-; CHECK-P8-NEXT:    clrldi r5, r4, 56
-; CHECK-P8-NEXT:    rldicl r6, r4, 56, 56
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f0, r5
-; CHECK-P8-NEXT:    rldicl r5, r4, 48, 56
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f1, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 40, 56
-; CHECK-P8-NEXT:    mtvsrwz f2, r5
-; CHECK-P8-NEXT:    rldicl r5, r4, 32, 56
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f3, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 24, 56
-; CHECK-P8-NEXT:    mtvsrwz f4, r5
-; CHECK-P8-NEXT:    rldicl r5, r4, 16, 56
-; CHECK-P8-NEXT:    rldicl r4, r4, 8, 56
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f5, r6
-; CHECK-P8-NEXT:    mtvsrwz f6, r5
-; CHECK-P8-NEXT:    li r5, 32
-; CHECK-P8-NEXT:    mtvsrwz f7, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI2_2@toc@ha
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI2_0@toc@l
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI2_2@toc@l
+; CHECK-P8-NEXT:    xxlxor v4, v4, v4
+; CHECK-P8-NEXT:    lvx v2, 0, r5
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI2_3@toc@ha
+; CHECK-P8-NEXT:    lvx v5, 0, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI2_1@toc@ha
+; CHECK-P8-NEXT:    xxswapd v3, vs0
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI2_3@toc@l
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI2_1@toc@l
+; CHECK-P8-NEXT:    lvx v0, 0, r5
+; CHECK-P8-NEXT:    lvx v1, 0, r4
 ; CHECK-P8-NEXT:    li r4, 48
-; CHECK-P8-NEXT:    xscvuxddp f4, f4
-; CHECK-P8-NEXT:    xscvuxddp f5, f5
-; CHECK-P8-NEXT:    xscvuxddp f6, f6
-; CHECK-P8-NEXT:    xscvuxddp f7, f7
-; CHECK-P8-NEXT:    xscvuxddp f0, f0
-; CHECK-P8-NEXT:    xscvuxddp f1, f1
-; CHECK-P8-NEXT:    xscvuxddp f2, f2
-; CHECK-P8-NEXT:    xscvuxddp f3, f3
-; CHECK-P8-NEXT:    xxmrghd vs4, vs5, vs4
-; CHECK-P8-NEXT:    xxmrghd vs5, vs7, vs6
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P8-NEXT:    xxswapd vs2, vs5
-; CHECK-P8-NEXT:    xxswapd vs3, vs4
+; CHECK-P8-NEXT:    li r5, 32
+; CHECK-P8-NEXT:    vperm v2, v4, v3, v2
+; CHECK-P8-NEXT:    vperm v5, v4, v3, v5
+; CHECK-P8-NEXT:    vperm v0, v4, v3, v0
+; CHECK-P8-NEXT:    vperm v3, v4, v3, v1
+; CHECK-P8-NEXT:    xvcvuxddp vs0, v2
+; CHECK-P8-NEXT:    xvcvuxddp vs1, v5
+; CHECK-P8-NEXT:    xvcvuxddp vs2, v0
+; CHECK-P8-NEXT:    xvcvuxddp vs3, v3
 ; CHECK-P8-NEXT:    xxswapd vs0, vs0
 ; CHECK-P8-NEXT:    xxswapd vs1, vs1
+; CHECK-P8-NEXT:    xxswapd vs2, vs2
+; CHECK-P8-NEXT:    xxswapd vs3, vs3
 ; CHECK-P8-NEXT:    stxvd2x vs2, r3, r4
 ; CHECK-P8-NEXT:    li r4, 16
-; CHECK-P8-NEXT:    stxvd2x vs3, r3, r5
-; CHECK-P8-NEXT:    stxvd2x vs1, r3, r4
+; CHECK-P8-NEXT:    stxvd2x vs1, r3, r5
+; CHECK-P8-NEXT:    stxvd2x vs3, r3, r4
 ; CHECK-P8-NEXT:    stxvd2x vs0, 0, r3
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test8elt:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrd f0, r4
-; CHECK-P9-NEXT:    li r4, 0
-; CHECK-P9-NEXT:    li r5, 1
-; CHECK-P9-NEXT:    li r6, 2
-; CHECK-P9-NEXT:    li r7, 3
-; CHECK-P9-NEXT:    li r8, 4
-; CHECK-P9-NEXT:    li r9, 5
-; CHECK-P9-NEXT:    li r10, 6
-; CHECK-P9-NEXT:    li r11, 7
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI2_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI2_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
-; CHECK-P9-NEXT:    vextubrx r5, r5, v2
-; CHECK-P9-NEXT:    vextubrx r6, r6, v2
-; CHECK-P9-NEXT:    vextubrx r7, r7, v2
-; CHECK-P9-NEXT:    vextubrx r8, r8, v2
-; CHECK-P9-NEXT:    vextubrx r9, r9, v2
-; CHECK-P9-NEXT:    vextubrx r10, r10, v2
-; CHECK-P9-NEXT:    vextubrx r11, r11, v2
-; CHECK-P9-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r8, r8, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r9, r9, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r10, r10, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r11, r11, 0, 24, 31
-; CHECK-P9-NEXT:    mtvsrwz f0, r4
-; CHECK-P9-NEXT:    mtvsrwz f1, r5
-; CHECK-P9-NEXT:    mtvsrwz f2, r6
-; CHECK-P9-NEXT:    mtvsrwz f3, r7
-; CHECK-P9-NEXT:    mtvsrwz f4, r8
-; CHECK-P9-NEXT:    mtvsrwz f5, r9
-; CHECK-P9-NEXT:    mtvsrwz f6, r10
-; CHECK-P9-NEXT:    mtvsrwz f7, r11
-; CHECK-P9-NEXT:    xscvuxddp f0, f0
-; CHECK-P9-NEXT:    xscvuxddp f1, f1
-; CHECK-P9-NEXT:    xscvuxddp f2, f2
-; CHECK-P9-NEXT:    xscvuxddp f3, f3
-; CHECK-P9-NEXT:    xscvuxddp f4, f4
-; CHECK-P9-NEXT:    xscvuxddp f5, f5
-; CHECK-P9-NEXT:    xscvuxddp f6, f6
-; CHECK-P9-NEXT:    xscvuxddp f7, f7
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P9-NEXT:    stxv vs3, 48(r3)
-; CHECK-P9-NEXT:    stxv vs2, 32(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    xxlxor v4, v4, v4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI2_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI2_1@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    xvcvuxddp vs0, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI2_2@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI2_2@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
 ; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs1, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI2_3@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI2_3@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs2, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v4, v2, v3
+; CHECK-P9-NEXT:    stxv vs2, 32(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs3, v2
+; CHECK-P9-NEXT:    stxv vs3, 48(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    li r5, 1
 ; CHECK-BE-NEXT:    mtvsrd v2, r4
-; CHECK-BE-NEXT:    li r4, 0
-; CHECK-BE-NEXT:    li r6, 3
-; CHECK-BE-NEXT:    li r7, 2
-; CHECK-BE-NEXT:    li r8, 5
-; CHECK-BE-NEXT:    li r9, 4
-; CHECK-BE-NEXT:    li r10, 7
-; CHECK-BE-NEXT:    li r11, 6
-; CHECK-BE-NEXT:    vextublx r5, r5, v2
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
-; CHECK-BE-NEXT:    vextublx r6, r6, v2
-; CHECK-BE-NEXT:    vextublx r7, r7, v2
-; CHECK-BE-NEXT:    vextublx r8, r8, v2
-; CHECK-BE-NEXT:    vextublx r9, r9, v2
-; CHECK-BE-NEXT:    vextublx r10, r10, v2
-; CHECK-BE-NEXT:    vextublx r11, r11, v2
-; CHECK-BE-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r8, r8, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r9, r9, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r10, r10, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r11, r11, 0, 24, 31
-; CHECK-BE-NEXT:    mtvsrwz f0, r5
-; CHECK-BE-NEXT:    mtvsrwz f1, r4
-; CHECK-BE-NEXT:    mtvsrwz f2, r6
-; CHECK-BE-NEXT:    mtvsrwz f3, r7
-; CHECK-BE-NEXT:    mtvsrwz f4, r8
-; CHECK-BE-NEXT:    mtvsrwz f5, r9
-; CHECK-BE-NEXT:    mtvsrwz f6, r10
-; CHECK-BE-NEXT:    mtvsrwz f7, r11
-; CHECK-BE-NEXT:    xscvuxddp f0, f0
-; CHECK-BE-NEXT:    xscvuxddp f1, f1
-; CHECK-BE-NEXT:    xscvuxddp f2, f2
-; CHECK-BE-NEXT:    xscvuxddp f3, f3
-; CHECK-BE-NEXT:    xscvuxddp f4, f4
-; CHECK-BE-NEXT:    xscvuxddp f5, f5
-; CHECK-BE-NEXT:    xscvuxddp f6, f6
-; CHECK-BE-NEXT:    xscvuxddp f7, f7
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-BE-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-BE-NEXT:    stxv vs2, 32(r3)
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI2_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI2_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI2_1@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI2_1@toc@l
+; CHECK-BE-NEXT:    vperm v3, v2, v4, v3
+; CHECK-BE-NEXT:    xvcvuxddp vs0, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI2_2@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI2_2@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs1, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI2_3@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI2_3@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs2, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs2, 32(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs3, v2
 ; CHECK-BE-NEXT:    stxv vs3, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
@@ -326,338 +223,176 @@
 define void @test16elt(<16 x double>* noalias nocapture sret %agg.result, <16 x i8> %a) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
-; CHECK-P8-NEXT:    mfvsrd r5, v2
-; CHECK-P8-NEXT:    xxswapd vs2, v2
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI3_0@toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI3_1@toc@ha
+; CHECK-P8-NEXT:    xxlxor v4, v4, v4
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI3_0@toc@l
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI3_1@toc@l
+; CHECK-P8-NEXT:    lvx v3, 0, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI3_2@toc@ha
+; CHECK-P8-NEXT:    lvx v5, 0, r5
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI3_4@toc@ha
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI3_2@toc@l
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI3_4@toc@l
+; CHECK-P8-NEXT:    lvx v0, 0, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI3_6@toc@ha
+; CHECK-P8-NEXT:    lvx v1, 0, r5
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI3_7@toc@ha
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI3_6@toc@l
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI3_7@toc@l
+; CHECK-P8-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P8-NEXT:    lvx v6, 0, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI3_5@toc@ha
+; CHECK-P8-NEXT:    lvx v7, 0, r5
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI3_3@toc@ha
+; CHECK-P8-NEXT:    vperm v5, v4, v2, v5
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI3_5@toc@l
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI3_3@toc@l
+; CHECK-P8-NEXT:    vperm v0, v4, v2, v0
+; CHECK-P8-NEXT:    lvx v8, 0, r4
+; CHECK-P8-NEXT:    lvx v9, 0, r5
+; CHECK-P8-NEXT:    vperm v1, v4, v2, v1
 ; CHECK-P8-NEXT:    li r4, 112
-; CHECK-P8-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P8-NEXT:    clrldi r6, r5, 56
-; CHECK-P8-NEXT:    rldicl r7, r5, 56, 56
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f0, r6
-; CHECK-P8-NEXT:    rldicl r6, r5, 40, 56
-; CHECK-P8-NEXT:    rldicl r8, r5, 48, 56
-; CHECK-P8-NEXT:    mtvsrwz f1, r7
-; CHECK-P8-NEXT:    rldicl r7, r5, 32, 56
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r8, r8, 0, 24, 31
-; CHECK-P8-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f4, r6
-; CHECK-P8-NEXT:    rldicl r6, r5, 24, 56
-; CHECK-P8-NEXT:    mtvsrwz f3, r8
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f5, r7
-; CHECK-P8-NEXT:    rldicl r7, r5, 16, 56
-; CHECK-P8-NEXT:    rldicl r5, r5, 8, 56
-; CHECK-P8-NEXT:    mfvsrd r8, f2
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f2, r6
-; CHECK-P8-NEXT:    rlwinm r6, r7, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f6, r6
-; CHECK-P8-NEXT:    clrldi r6, r8, 56
-; CHECK-P8-NEXT:    mtvsrwz f7, r5
-; CHECK-P8-NEXT:    rldicl r5, r8, 56, 56
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f8, r6
-; CHECK-P8-NEXT:    rldicl r6, r8, 48, 56
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f9, r5
-; CHECK-P8-NEXT:    rldicl r5, r8, 40, 56
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f10, r6
-; CHECK-P8-NEXT:    rldicl r6, r8, 32, 56
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f11, r5
-; CHECK-P8-NEXT:    rldicl r5, r8, 24, 56
-; CHECK-P8-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz f12, r6
-; CHECK-P8-NEXT:    rldicl r6, r8, 16, 56
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    xscvuxddp f6, f6
-; CHECK-P8-NEXT:    xscvuxddp f7, f7
-; CHECK-P8-NEXT:    mtvsrwz f13, r5
-; CHECK-P8-NEXT:    rlwinm r5, r6, 0, 24, 31
-; CHECK-P8-NEXT:    mtvsrwz v2, r5
-; CHECK-P8-NEXT:    rldicl r5, r8, 8, 56
-; CHECK-P8-NEXT:    xscvuxddp f5, f5
-; CHECK-P8-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P8-NEXT:    xscvuxddp f2, f2
-; CHECK-P8-NEXT:    xscvuxddp f0, f0
-; CHECK-P8-NEXT:    xscvuxddp f1, f1
-; CHECK-P8-NEXT:    xxmrghd vs6, vs7, vs6
-; CHECK-P8-NEXT:    mtvsrwz v3, r5
-; CHECK-P8-NEXT:    li r5, 64
-; CHECK-P8-NEXT:    xscvuxddp f3, f3
-; CHECK-P8-NEXT:    xscvuxddp f4, f4
-; CHECK-P8-NEXT:    xscvuxddp f31, v2
-; CHECK-P8-NEXT:    xxmrghd vs2, vs2, vs5
-; CHECK-P8-NEXT:    xscvuxddp f7, v3
-; CHECK-P8-NEXT:    xscvuxddp f8, f8
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xscvuxddp f9, f9
-; CHECK-P8-NEXT:    xxswapd vs1, vs6
-; CHECK-P8-NEXT:    xscvuxddp f10, f10
-; CHECK-P8-NEXT:    xxswapd vs2, vs2
-; CHECK-P8-NEXT:    xscvuxddp f12, f12
-; CHECK-P8-NEXT:    xxmrghd vs3, vs4, vs3
-; CHECK-P8-NEXT:    xscvuxddp f13, f13
-; CHECK-P8-NEXT:    stxvd2x vs1, r3, r4
-; CHECK-P8-NEXT:    li r4, 96
-; CHECK-P8-NEXT:    xscvuxddp f11, f11
-; CHECK-P8-NEXT:    xxmrghd vs6, vs7, vs31
-; CHECK-P8-NEXT:    xxswapd vs3, vs3
+; CHECK-P8-NEXT:    li r5, 96
+; CHECK-P8-NEXT:    vperm v6, v4, v2, v6
+; CHECK-P8-NEXT:    vperm v7, v4, v2, v7
+; CHECK-P8-NEXT:    vperm v8, v4, v2, v8
+; CHECK-P8-NEXT:    vperm v2, v4, v2, v9
+; CHECK-P8-NEXT:    xvcvuxddp vs0, v0
+; CHECK-P8-NEXT:    xvcvuxddp vs1, v1
+; CHECK-P8-NEXT:    xvcvuxddp vs2, v6
+; CHECK-P8-NEXT:    xvcvuxddp vs3, v7
+; CHECK-P8-NEXT:    xvcvuxddp vs4, v8
+; CHECK-P8-NEXT:    xvcvuxddp vs5, v2
+; CHECK-P8-NEXT:    xvcvuxddp vs6, v3
 ; CHECK-P8-NEXT:    xxswapd vs0, vs0
-; CHECK-P8-NEXT:    xxmrghd vs4, vs9, vs8
-; CHECK-P8-NEXT:    stxvd2x vs2, r3, r4
-; CHECK-P8-NEXT:    li r4, 80
-; CHECK-P8-NEXT:    xxswapd vs2, vs6
-; CHECK-P8-NEXT:    stxvd2x vs3, r3, r4
-; CHECK-P8-NEXT:    li r4, 48
-; CHECK-P8-NEXT:    stxvd2x vs0, r3, r5
-; CHECK-P8-NEXT:    li r5, 32
-; CHECK-P8-NEXT:    xxmrghd vs5, vs13, vs12
-; CHECK-P8-NEXT:    xxswapd vs4, vs4
-; CHECK-P8-NEXT:    xxmrghd vs1, vs11, vs10
-; CHECK-P8-NEXT:    stxvd2x vs2, r3, r4
-; CHECK-P8-NEXT:    li r4, 16
-; CHECK-P8-NEXT:    xxswapd vs5, vs5
+; CHECK-P8-NEXT:    xvcvuxddp vs7, v5
 ; CHECK-P8-NEXT:    xxswapd vs1, vs1
-; CHECK-P8-NEXT:    stxvd2x vs5, r3, r5
-; CHECK-P8-NEXT:    stxvd2x vs1, r3, r4
-; CHECK-P8-NEXT:    stxvd2x vs4, 0, r3
-; CHECK-P8-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    xxswapd vs2, vs2
+; CHECK-P8-NEXT:    xxswapd vs3, vs3
+; CHECK-P8-NEXT:    xxswapd vs4, vs4
+; CHECK-P8-NEXT:    xxswapd vs5, vs5
+; CHECK-P8-NEXT:    stxvd2x vs3, r3, r4
+; CHECK-P8-NEXT:    stxvd2x vs2, r3, r5
+; CHECK-P8-NEXT:    li r4, 80
+; CHECK-P8-NEXT:    li r5, 64
+; CHECK-P8-NEXT:    xxswapd vs2, vs7
+; CHECK-P8-NEXT:    xxswapd vs3, vs6
+; CHECK-P8-NEXT:    stxvd2x vs4, r3, r4
+; CHECK-P8-NEXT:    li r4, 48
+; CHECK-P8-NEXT:    stxvd2x vs1, r3, r5
+; CHECK-P8-NEXT:    li r5, 32
+; CHECK-P8-NEXT:    stxvd2x vs5, r3, r4
+; CHECK-P8-NEXT:    li r4, 16
+; CHECK-P8-NEXT:    stxvd2x vs0, r3, r5
+; CHECK-P8-NEXT:    stxvd2x vs2, r3, r4
+; CHECK-P8-NEXT:    stxvd2x vs3, 0, r3
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test16elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    std r25, -72(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r26, -64(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r27, -56(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r28, -48(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r29, -40(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    li r4, 0
-; CHECK-P9-NEXT:    std r30, -32(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    li r5, 1
-; CHECK-P9-NEXT:    li r6, 2
-; CHECK-P9-NEXT:    li r7, 3
-; CHECK-P9-NEXT:    li r8, 4
-; CHECK-P9-NEXT:    li r9, 5
-; CHECK-P9-NEXT:    li r10, 6
-; CHECK-P9-NEXT:    li r11, 7
-; CHECK-P9-NEXT:    li r12, 8
-; CHECK-P9-NEXT:    li r0, 9
-; CHECK-P9-NEXT:    li r30, 10
-; CHECK-P9-NEXT:    li r29, 11
-; CHECK-P9-NEXT:    li r28, 12
-; CHECK-P9-NEXT:    li r27, 13
-; CHECK-P9-NEXT:    li r26, 14
-; CHECK-P9-NEXT:    li r25, 15
-; CHECK-P9-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
-; CHECK-P9-NEXT:    vextubrx r5, r5, v2
-; CHECK-P9-NEXT:    vextubrx r6, r6, v2
-; CHECK-P9-NEXT:    vextubrx r7, r7, v2
-; CHECK-P9-NEXT:    vextubrx r8, r8, v2
-; CHECK-P9-NEXT:    vextubrx r9, r9, v2
-; CHECK-P9-NEXT:    vextubrx r10, r10, v2
-; CHECK-P9-NEXT:    vextubrx r11, r11, v2
-; CHECK-P9-NEXT:    vextubrx r12, r12, v2
-; CHECK-P9-NEXT:    vextubrx r0, r0, v2
-; CHECK-P9-NEXT:    vextubrx r30, r30, v2
-; CHECK-P9-NEXT:    vextubrx r29, r29, v2
-; CHECK-P9-NEXT:    vextubrx r28, r28, v2
-; CHECK-P9-NEXT:    vextubrx r27, r27, v2
-; CHECK-P9-NEXT:    vextubrx r26, r26, v2
-; CHECK-P9-NEXT:    vextubrx r25, r25, v2
-; CHECK-P9-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r8, r8, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r9, r9, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r10, r10, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r11, r11, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r12, r12, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r0, r0, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r30, r30, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r29, r29, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r28, r28, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r27, r27, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r26, r26, 0, 24, 31
-; CHECK-P9-NEXT:    rlwinm r25, r25, 0, 24, 31
-; CHECK-P9-NEXT:    mtvsrwz f0, r4
-; CHECK-P9-NEXT:    mtvsrwz f1, r5
-; CHECK-P9-NEXT:    mtvsrwz f2, r6
-; CHECK-P9-NEXT:    mtvsrwz f3, r7
-; CHECK-P9-NEXT:    mtvsrwz f4, r8
-; CHECK-P9-NEXT:    mtvsrwz f5, r9
-; CHECK-P9-NEXT:    mtvsrwz f6, r10
-; CHECK-P9-NEXT:    mtvsrwz f7, r11
-; CHECK-P9-NEXT:    mtvsrwz f8, r12
-; CHECK-P9-NEXT:    mtvsrwz f9, r0
-; CHECK-P9-NEXT:    mtvsrwz f10, r30
-; CHECK-P9-NEXT:    ld r30, -32(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz f11, r29
-; CHECK-P9-NEXT:    ld r29, -40(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz f12, r28
-; CHECK-P9-NEXT:    ld r28, -48(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz f13, r27
-; CHECK-P9-NEXT:    ld r27, -56(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz v2, r26
-; CHECK-P9-NEXT:    ld r26, -64(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwz v3, r25
-; CHECK-P9-NEXT:    ld r25, -72(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xscvuxddp f0, f0
-; CHECK-P9-NEXT:    xscvuxddp f1, f1
-; CHECK-P9-NEXT:    xscvuxddp f2, f2
-; CHECK-P9-NEXT:    xscvuxddp f3, f3
-; CHECK-P9-NEXT:    xscvuxddp f4, f4
-; CHECK-P9-NEXT:    xscvuxddp f5, f5
-; CHECK-P9-NEXT:    xscvuxddp f6, f6
-; CHECK-P9-NEXT:    xscvuxddp f7, f7
-; CHECK-P9-NEXT:    xscvuxddp f8, f8
-; CHECK-P9-NEXT:    xscvuxddp f9, f9
-; CHECK-P9-NEXT:    xscvuxddp f10, f10
-; CHECK-P9-NEXT:    xscvuxddp f11, f11
-; CHECK-P9-NEXT:    xscvuxddp f12, f12
-; CHECK-P9-NEXT:    xscvuxddp f13, f13
-; CHECK-P9-NEXT:    xscvuxddp f31, v2
-; CHECK-P9-NEXT:    xscvuxddp f30, v3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P9-NEXT:    xxmrghd vs4, vs9, vs8
-; CHECK-P9-NEXT:    xxmrghd vs5, vs11, vs10
-; CHECK-P9-NEXT:    xxmrghd vs6, vs13, vs12
-; CHECK-P9-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-P9-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    xxlxor v4, v4, v4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_1@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    xvcvuxddp vs0, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_2@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_2@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
 ; CHECK-P9-NEXT:    stxv vs0, 0(r3)
-; CHECK-P9-NEXT:    stxv vs4, 64(r3)
-; CHECK-P9-NEXT:    stxv vs3, 48(r3)
-; CHECK-P9-NEXT:    stxv vs2, 32(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs1, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_3@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_3@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
 ; CHECK-P9-NEXT:    stxv vs1, 16(r3)
-; CHECK-P9-NEXT:    stxv vs7, 112(r3)
-; CHECK-P9-NEXT:    stxv vs6, 96(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs2, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_4@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_4@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    stxv vs2, 32(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs3, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_5@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_5@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    stxv vs3, 48(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs4, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_6@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_6@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
+; CHECK-P9-NEXT:    stxv vs4, 64(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs5, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI3_7@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI3_7@toc@l
+; CHECK-P9-NEXT:    vperm v3, v4, v2, v3
 ; CHECK-P9-NEXT:    stxv vs5, 80(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs6, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v4, v2, v3
+; CHECK-P9-NEXT:    stxv vs6, 96(r3)
+; CHECK-P9-NEXT:    xvcvuxddp vs7, v2
+; CHECK-P9-NEXT:    stxv vs7, 112(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    std r25, -72(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r26, -64(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r27, -56(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r28, -48(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r29, -40(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    li r4, 1
-; CHECK-BE-NEXT:    std r30, -32(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    li r5, 0
-; CHECK-BE-NEXT:    li r6, 3
-; CHECK-BE-NEXT:    li r7, 2
-; CHECK-BE-NEXT:    li r8, 5
-; CHECK-BE-NEXT:    li r9, 4
-; CHECK-BE-NEXT:    li r10, 7
-; CHECK-BE-NEXT:    li r11, 6
-; CHECK-BE-NEXT:    li r12, 9
-; CHECK-BE-NEXT:    li r0, 8
-; CHECK-BE-NEXT:    li r30, 11
-; CHECK-BE-NEXT:    li r29, 10
-; CHECK-BE-NEXT:    li r28, 13
-; CHECK-BE-NEXT:    li r27, 12
-; CHECK-BE-NEXT:    li r26, 15
-; CHECK-BE-NEXT:    li r25, 14
-; CHECK-BE-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
-; CHECK-BE-NEXT:    vextublx r5, r5, v2
-; CHECK-BE-NEXT:    vextublx r6, r6, v2
-; CHECK-BE-NEXT:    vextublx r7, r7, v2
-; CHECK-BE-NEXT:    vextublx r8, r8, v2
-; CHECK-BE-NEXT:    vextublx r9, r9, v2
-; CHECK-BE-NEXT:    vextublx r10, r10, v2
-; CHECK-BE-NEXT:    vextublx r11, r11, v2
-; CHECK-BE-NEXT:    vextublx r12, r12, v2
-; CHECK-BE-NEXT:    vextublx r0, r0, v2
-; CHECK-BE-NEXT:    vextublx r30, r30, v2
-; CHECK-BE-NEXT:    vextublx r29, r29, v2
-; CHECK-BE-NEXT:    vextublx r28, r28, v2
-; CHECK-BE-NEXT:    vextublx r27, r27, v2
-; CHECK-BE-NEXT:    vextublx r26, r26, v2
-; CHECK-BE-NEXT:    vextublx r25, r25, v2
-; CHECK-BE-NEXT:    rlwinm r4, r4, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r5, r5, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r6, r6, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r7, r7, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r8, r8, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r9, r9, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r10, r10, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r11, r11, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r12, r12, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r0, r0, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r30, r30, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r29, r29, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r28, r28, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r27, r27, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r26, r26, 0, 24, 31
-; CHECK-BE-NEXT:    rlwinm r25, r25, 0, 24, 31
-; CHECK-BE-NEXT:    mtvsrwz f0, r4
-; CHECK-BE-NEXT:    mtvsrwz f1, r5
-; CHECK-BE-NEXT:    mtvsrwz f2, r6
-; CHECK-BE-NEXT:    mtvsrwz f3, r7
-; CHECK-BE-NEXT:    mtvsrwz f4, r8
-; CHECK-BE-NEXT:    mtvsrwz f5, r9
-; CHECK-BE-NEXT:    mtvsrwz f6, r10
-; CHECK-BE-NEXT:    mtvsrwz f7, r11
-; CHECK-BE-NEXT:    mtvsrwz f8, r12
-; CHECK-BE-NEXT:    mtvsrwz f9, r0
-; CHECK-BE-NEXT:    mtvsrwz f10, r30
-; CHECK-BE-NEXT:    ld r30, -32(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz f11, r29
-; CHECK-BE-NEXT:    ld r29, -40(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz f12, r28
-; CHECK-BE-NEXT:    ld r28, -48(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz f13, r27
-; CHECK-BE-NEXT:    ld r27, -56(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz v2, r26
-; CHECK-BE-NEXT:    ld r26, -64(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwz v3, r25
-; CHECK-BE-NEXT:    ld r25, -72(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    xscvuxddp f0, f0
-; CHECK-BE-NEXT:    xscvuxddp f1, f1
-; CHECK-BE-NEXT:    xscvuxddp f2, f2
-; CHECK-BE-NEXT:    xscvuxddp f3, f3
-; CHECK-BE-NEXT:    xscvuxddp f4, f4
-; CHECK-BE-NEXT:    xscvuxddp f5, f5
-; CHECK-BE-NEXT:    xscvuxddp f6, f6
-; CHECK-BE-NEXT:    xscvuxddp f7, f7
-; CHECK-BE-NEXT:    xscvuxddp f8, f8
-; CHECK-BE-NEXT:    xscvuxddp f9, f9
-; CHECK-BE-NEXT:    xscvuxddp f10, f10
-; CHECK-BE-NEXT:    xscvuxddp f11, f11
-; CHECK-BE-NEXT:    xscvuxddp f12, f12
-; CHECK-BE-NEXT:    xscvuxddp f13, f13
-; CHECK-BE-NEXT:    xscvuxddp f31, v2
-; CHECK-BE-NEXT:    xscvuxddp f30, v3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-BE-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-BE-NEXT:    xxmrghd vs4, vs9, vs8
-; CHECK-BE-NEXT:    xxmrghd vs5, vs11, vs10
-; CHECK-BE-NEXT:    xxmrghd vs6, vs13, vs12
-; CHECK-BE-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-BE-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_1@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_1@toc@l
+; CHECK-BE-NEXT:    vperm v3, v2, v4, v3
+; CHECK-BE-NEXT:    xvcvuxddp vs0, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_2@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_2@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
-; CHECK-BE-NEXT:    stxv vs4, 64(r3)
-; CHECK-BE-NEXT:    stxv vs3, 48(r3)
-; CHECK-BE-NEXT:    stxv vs2, 32(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs1, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_3@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_3@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
 ; CHECK-BE-NEXT:    stxv vs1, 16(r3)
-; CHECK-BE-NEXT:    stxv vs7, 112(r3)
-; CHECK-BE-NEXT:    stxv vs6, 96(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs2, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_4@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_4@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs2, 32(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs3, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_5@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_5@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs3, 48(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs4, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_6@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_6@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs4, 64(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs5, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI3_7@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI3_7@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
 ; CHECK-BE-NEXT:    stxv vs5, 80(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs6, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs6, 96(r3)
+; CHECK-BE-NEXT:    xvcvuxddp vs7, v2
+; CHECK-BE-NEXT:    stxv vs7, 112(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = uitofp <16 x i8> %a to <16 x double>
@@ -668,49 +403,41 @@
 define <2 x double> @test2elt_signed(i16 %a.coerce) local_unnamed_addr #0 {
 ; CHECK-P8-LABEL: test2elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI4_0@toc@ha
 ; CHECK-P8-NEXT:    mtvsrd f0, r3
-; CHECK-P8-NEXT:    mfvsrd r3, f0
-; CHECK-P8-NEXT:    clrldi r4, r3, 56
-; CHECK-P8-NEXT:    rldicl r3, r3, 56, 56
-; CHECK-P8-NEXT:    extsb r4, r4
-; CHECK-P8-NEXT:    extsb r3, r3
-; CHECK-P8-NEXT:    mtvsrwa f0, r4
-; CHECK-P8-NEXT:    mtvsrwa f1, r3
-; CHECK-P8-NEXT:    xscvsxddp f0, f0
-; CHECK-P8-NEXT:    xscvsxddp f1, f1
-; CHECK-P8-NEXT:    xxmrghd v2, vs1, vs0
+; CHECK-P8-NEXT:    addi r3, r4, .LCPI4_0@toc@l
+; CHECK-P8-NEXT:    xxswapd v2, vs0
+; CHECK-P8-NEXT:    lvx v3, 0, r3
+; CHECK-P8-NEXT:    addis r3, r2, .LCPI4_1@toc@ha
+; CHECK-P8-NEXT:    addi r3, r3, .LCPI4_1@toc@l
+; CHECK-P8-NEXT:    lxvd2x vs0, 0, r3
+; CHECK-P8-NEXT:    vperm v2, v2, v2, v3
+; CHECK-P8-NEXT:    xxswapd v3, vs0
+; CHECK-P8-NEXT:    vsld v2, v2, v3
+; CHECK-P8-NEXT:    vsrad v2, v2, v3
+; CHECK-P8-NEXT:    xvcvsxddp v2, v2
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test2elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrws v2, r3
-; CHECK-P9-NEXT:    li r3, 0
-; CHECK-P9-NEXT:    li r4, 1
-; CHECK-P9-NEXT:    vextubrx r3, r3, v2
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
-; CHECK-P9-NEXT:    extsb r3, r3
-; CHECK-P9-NEXT:    extsb r4, r4
-; CHECK-P9-NEXT:    mtvsrwa f0, r3
-; CHECK-P9-NEXT:    mtvsrwa f1, r4
-; CHECK-P9-NEXT:    xscvsxddp f0, f0
-; CHECK-P9-NEXT:    xscvsxddp f1, f1
-; CHECK-P9-NEXT:    xxmrghd v2, vs1, vs0
+; CHECK-P9-NEXT:    addis r3, r2, .LCPI4_0@toc@ha
+; CHECK-P9-NEXT:    addi r3, r3, .LCPI4_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r3
+; CHECK-P9-NEXT:    vperm v2, v2, v2, v3
+; CHECK-P9-NEXT:    vextsb2d v2, v2
+; CHECK-P9-NEXT:    xvcvsxddp v2, v2
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test2elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrws v2, r3
-; CHECK-BE-NEXT:    li r3, 1
-; CHECK-BE-NEXT:    li r4, 0
-; CHECK-BE-NEXT:    vextublx r3, r3, v2
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
-; CHECK-BE-NEXT:    extsb r3, r3
-; CHECK-BE-NEXT:    extsb r4, r4
-; CHECK-BE-NEXT:    mtvsrwa f0, r3
-; CHECK-BE-NEXT:    mtvsrwa f1, r4
-; CHECK-BE-NEXT:    xscvsxddp f0, f0
-; CHECK-BE-NEXT:    xscvsxddp f1, f1
-; CHECK-BE-NEXT:    xxmrghd v2, vs1, vs0
+; CHECK-BE-NEXT:    addis r3, r2, .LCPI4_0@toc@ha
+; CHECK-BE-NEXT:    addi r3, r3, .LCPI4_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r3
+; CHECK-BE-NEXT:    vperm v2, v2, v2, v3
+; CHECK-BE-NEXT:    vextsb2d v2, v2
+; CHECK-BE-NEXT:    xvcvsxddp v2, v2
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = bitcast i16 %a.coerce to <2 x i8>
@@ -721,27 +448,27 @@
 define void @test4elt_signed(<4 x double>* noalias nocapture sret %agg.result, i32 %a.coerce) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI5_0@toc@ha
 ; CHECK-P8-NEXT:    mtvsrd f0, r4
-; CHECK-P8-NEXT:    mfvsrd r4, f0
-; CHECK-P8-NEXT:    clrldi r5, r4, 56
-; CHECK-P8-NEXT:    rldicl r6, r4, 56, 56
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f0, r5
-; CHECK-P8-NEXT:    rldicl r5, r4, 48, 56
-; CHECK-P8-NEXT:    rldicl r4, r4, 40, 56
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    extsb r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f1, r6
-; CHECK-P8-NEXT:    mtvsrwa f2, r5
-; CHECK-P8-NEXT:    mtvsrwa f3, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI5_2@toc@ha
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI5_0@toc@l
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI5_2@toc@l
+; CHECK-P8-NEXT:    lvx v2, 0, r5
+; CHECK-P8-NEXT:    xxswapd v3, vs0
+; CHECK-P8-NEXT:    lvx v4, 0, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI5_1@toc@ha
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI5_1@toc@l
+; CHECK-P8-NEXT:    lxvd2x vs0, 0, r4
 ; CHECK-P8-NEXT:    li r4, 16
-; CHECK-P8-NEXT:    xscvsxddp f0, f0
-; CHECK-P8-NEXT:    xscvsxddp f1, f1
-; CHECK-P8-NEXT:    xscvsxddp f2, f2
-; CHECK-P8-NEXT:    xscvsxddp f3, f3
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs3, vs2
+; CHECK-P8-NEXT:    vperm v2, v3, v3, v2
+; CHECK-P8-NEXT:    vperm v3, v3, v3, v4
+; CHECK-P8-NEXT:    xxswapd v4, vs0
+; CHECK-P8-NEXT:    vsld v2, v2, v4
+; CHECK-P8-NEXT:    vsld v3, v3, v4
+; CHECK-P8-NEXT:    vsrad v2, v2, v4
+; CHECK-P8-NEXT:    vsrad v3, v3, v4
+; CHECK-P8-NEXT:    xvcvsxddp vs0, v2
+; CHECK-P8-NEXT:    xvcvsxddp vs1, v3
 ; CHECK-P8-NEXT:    xxswapd vs0, vs0
 ; CHECK-P8-NEXT:    xxswapd vs1, vs1
 ; CHECK-P8-NEXT:    stxvd2x vs1, r3, r4
@@ -751,59 +478,40 @@
 ; CHECK-P9-LABEL: test4elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrws v2, r4
-; CHECK-P9-NEXT:    li r4, 0
-; CHECK-P9-NEXT:    li r5, 1
-; CHECK-P9-NEXT:    li r6, 2
-; CHECK-P9-NEXT:    li r7, 3
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
-; CHECK-P9-NEXT:    vextubrx r5, r5, v2
-; CHECK-P9-NEXT:    vextubrx r6, r6, v2
-; CHECK-P9-NEXT:    vextubrx r7, r7, v2
-; CHECK-P9-NEXT:    extsb r4, r4
-; CHECK-P9-NEXT:    extsb r5, r5
-; CHECK-P9-NEXT:    extsb r6, r6
-; CHECK-P9-NEXT:    extsb r7, r7
-; CHECK-P9-NEXT:    mtvsrwa f0, r4
-; CHECK-P9-NEXT:    mtvsrwa f1, r5
-; CHECK-P9-NEXT:    mtvsrwa f2, r6
-; CHECK-P9-NEXT:    mtvsrwa f3, r7
-; CHECK-P9-NEXT:    xscvsxddp f0, f0
-; CHECK-P9-NEXT:    xscvsxddp f1, f1
-; CHECK-P9-NEXT:    xscvsxddp f2, f2
-; CHECK-P9-NEXT:    xscvsxddp f3, f3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI5_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI5_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI5_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI5_1@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P9-NEXT:    vextsb2d v3, v3
+; CHECK-P9-NEXT:    xvcvsxddp vs0, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v2, v2, v3
 ; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    vextsb2d v2, v2
+; CHECK-P9-NEXT:    xvcvsxddp vs1, v2
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test4elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mtvsrws v2, r4
-; CHECK-BE-NEXT:    li r4, 1
-; CHECK-BE-NEXT:    li r5, 0
-; CHECK-BE-NEXT:    li r6, 3
-; CHECK-BE-NEXT:    li r7, 2
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
-; CHECK-BE-NEXT:    vextublx r5, r5, v2
-; CHECK-BE-NEXT:    vextublx r6, r6, v2
-; CHECK-BE-NEXT:    vextublx r7, r7, v2
-; CHECK-BE-NEXT:    extsb r4, r4
-; CHECK-BE-NEXT:    extsb r5, r5
-; CHECK-BE-NEXT:    extsb r6, r6
-; CHECK-BE-NEXT:    extsb r7, r7
-; CHECK-BE-NEXT:    mtvsrwa f0, r4
-; CHECK-BE-NEXT:    mtvsrwa f1, r5
-; CHECK-BE-NEXT:    mtvsrwa f2, r6
-; CHECK-BE-NEXT:    mtvsrwa f3, r7
-; CHECK-BE-NEXT:    xscvsxddp f0, f0
-; CHECK-BE-NEXT:    xscvsxddp f1, f1
-; CHECK-BE-NEXT:    xscvsxddp f2, f2
-; CHECK-BE-NEXT:    xscvsxddp f3, f3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
-; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI5_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI5_0@toc@l
+; CHECK-BE-NEXT:    lxvx v4, 0, r4
+; CHECK-BE-NEXT:    xxlxor v3, v3, v3
+; CHECK-BE-NEXT:    vperm v3, v3, v2, v4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI5_1@toc@ha
+; CHECK-BE-NEXT:    vextsb2d v3, v3
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI5_1@toc@l
+; CHECK-BE-NEXT:    xvcvsxddp vs0, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v2, v2, v3
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
+; CHECK-BE-NEXT:    vextsb2d v2, v2
+; CHECK-BE-NEXT:    xvcvsxddp vs1, v2
+; CHECK-BE-NEXT:    stxv vs1, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = bitcast i32 %a.coerce to <4 x i8>
@@ -816,161 +524,118 @@
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    mtvsrd f0, r4
-; CHECK-P8-NEXT:    mfvsrd r4, f0
-; CHECK-P8-NEXT:    clrldi r5, r4, 56
-; CHECK-P8-NEXT:    rldicl r6, r4, 56, 56
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f0, r5
-; CHECK-P8-NEXT:    rldicl r5, r4, 48, 56
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    mtvsrwa f1, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 40, 56
-; CHECK-P8-NEXT:    mtvsrwa f2, r5
-; CHECK-P8-NEXT:    rldicl r5, r4, 32, 56
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    mtvsrwa f3, r6
-; CHECK-P8-NEXT:    rldicl r6, r4, 24, 56
-; CHECK-P8-NEXT:    mtvsrwa f4, r5
-; CHECK-P8-NEXT:    rldicl r5, r4, 16, 56
-; CHECK-P8-NEXT:    rldicl r4, r4, 8, 56
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    extsb r4, r4
-; CHECK-P8-NEXT:    mtvsrwa f5, r6
-; CHECK-P8-NEXT:    mtvsrwa f6, r5
-; CHECK-P8-NEXT:    li r5, 32
-; CHECK-P8-NEXT:    mtvsrwa f7, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI6_2@toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI6_0@toc@ha
+; CHECK-P8-NEXT:    addis r6, r2, .LCPI6_3@toc@ha
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI6_2@toc@l
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI6_0@toc@l
+; CHECK-P8-NEXT:    addi r6, r6, .LCPI6_3@toc@l
+; CHECK-P8-NEXT:    lvx v4, 0, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI6_4@toc@ha
+; CHECK-P8-NEXT:    lvx v2, 0, r5
+; CHECK-P8-NEXT:    xxswapd v3, vs0
+; CHECK-P8-NEXT:    lvx v5, 0, r6
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI6_1@toc@ha
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI6_4@toc@l
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI6_1@toc@l
+; CHECK-P8-NEXT:    lvx v0, 0, r4
+; CHECK-P8-NEXT:    lxvd2x vs0, 0, r5
 ; CHECK-P8-NEXT:    li r4, 48
-; CHECK-P8-NEXT:    xscvsxddp f4, f4
-; CHECK-P8-NEXT:    xscvsxddp f5, f5
-; CHECK-P8-NEXT:    xscvsxddp f6, f6
-; CHECK-P8-NEXT:    xscvsxddp f7, f7
-; CHECK-P8-NEXT:    xscvsxddp f0, f0
-; CHECK-P8-NEXT:    xscvsxddp f1, f1
-; CHECK-P8-NEXT:    xscvsxddp f2, f2
-; CHECK-P8-NEXT:    xscvsxddp f3, f3
-; CHECK-P8-NEXT:    xxmrghd vs4, vs5, vs4
-; CHECK-P8-NEXT:    xxmrghd vs5, vs7, vs6
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P8-NEXT:    xxswapd vs2, vs5
-; CHECK-P8-NEXT:    xxswapd vs3, vs4
+; CHECK-P8-NEXT:    li r5, 32
+; CHECK-P8-NEXT:    vperm v2, v3, v3, v2
+; CHECK-P8-NEXT:    vperm v4, v3, v3, v4
+; CHECK-P8-NEXT:    vperm v5, v3, v3, v5
+; CHECK-P8-NEXT:    vperm v3, v3, v3, v0
+; CHECK-P8-NEXT:    xxswapd v0, vs0
+; CHECK-P8-NEXT:    vsld v2, v2, v0
+; CHECK-P8-NEXT:    vsld v4, v4, v0
+; CHECK-P8-NEXT:    vsld v5, v5, v0
+; CHECK-P8-NEXT:    vsld v3, v3, v0
+; CHECK-P8-NEXT:    vsrad v2, v2, v0
+; CHECK-P8-NEXT:    vsrad v3, v3, v0
+; CHECK-P8-NEXT:    vsrad v4, v4, v0
+; CHECK-P8-NEXT:    vsrad v5, v5, v0
+; CHECK-P8-NEXT:    xvcvsxddp vs2, v3
+; CHECK-P8-NEXT:    xvcvsxddp vs0, v2
+; CHECK-P8-NEXT:    xvcvsxddp vs1, v5
+; CHECK-P8-NEXT:    xvcvsxddp vs3, v4
+; CHECK-P8-NEXT:    xxswapd vs2, vs2
 ; CHECK-P8-NEXT:    xxswapd vs0, vs0
 ; CHECK-P8-NEXT:    xxswapd vs1, vs1
+; CHECK-P8-NEXT:    xxswapd vs3, vs3
 ; CHECK-P8-NEXT:    stxvd2x vs2, r3, r4
 ; CHECK-P8-NEXT:    li r4, 16
-; CHECK-P8-NEXT:    stxvd2x vs3, r3, r5
-; CHECK-P8-NEXT:    stxvd2x vs1, r3, r4
+; CHECK-P8-NEXT:    stxvd2x vs1, r3, r5
+; CHECK-P8-NEXT:    stxvd2x vs3, r3, r4
 ; CHECK-P8-NEXT:    stxvd2x vs0, 0, r3
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test8elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    mtvsrd f0, r4
-; CHECK-P9-NEXT:    li r4, 0
-; CHECK-P9-NEXT:    li r5, 1
-; CHECK-P9-NEXT:    li r6, 2
-; CHECK-P9-NEXT:    li r7, 3
-; CHECK-P9-NEXT:    li r8, 4
-; CHECK-P9-NEXT:    li r9, 5
-; CHECK-P9-NEXT:    li r10, 6
-; CHECK-P9-NEXT:    li r11, 7
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI6_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI6_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
 ; CHECK-P9-NEXT:    xxswapd v2, vs0
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
-; CHECK-P9-NEXT:    vextubrx r5, r5, v2
-; CHECK-P9-NEXT:    vextubrx r6, r6, v2
-; CHECK-P9-NEXT:    vextubrx r7, r7, v2
-; CHECK-P9-NEXT:    vextubrx r8, r8, v2
-; CHECK-P9-NEXT:    vextubrx r9, r9, v2
-; CHECK-P9-NEXT:    vextubrx r10, r10, v2
-; CHECK-P9-NEXT:    vextubrx r11, r11, v2
-; CHECK-P9-NEXT:    extsb r4, r4
-; CHECK-P9-NEXT:    extsb r5, r5
-; CHECK-P9-NEXT:    extsb r6, r6
-; CHECK-P9-NEXT:    extsb r7, r7
-; CHECK-P9-NEXT:    extsb r8, r8
-; CHECK-P9-NEXT:    extsb r9, r9
-; CHECK-P9-NEXT:    extsb r10, r10
-; CHECK-P9-NEXT:    extsb r11, r11
-; CHECK-P9-NEXT:    mtvsrwa f0, r4
-; CHECK-P9-NEXT:    mtvsrwa f1, r5
-; CHECK-P9-NEXT:    mtvsrwa f2, r6
-; CHECK-P9-NEXT:    mtvsrwa f3, r7
-; CHECK-P9-NEXT:    mtvsrwa f4, r8
-; CHECK-P9-NEXT:    mtvsrwa f5, r9
-; CHECK-P9-NEXT:    mtvsrwa f6, r10
-; CHECK-P9-NEXT:    mtvsrwa f7, r11
-; CHECK-P9-NEXT:    xscvsxddp f0, f0
-; CHECK-P9-NEXT:    xscvsxddp f1, f1
-; CHECK-P9-NEXT:    xscvsxddp f2, f2
-; CHECK-P9-NEXT:    xscvsxddp f3, f3
-; CHECK-P9-NEXT:    xscvsxddp f4, f4
-; CHECK-P9-NEXT:    xscvsxddp f5, f5
-; CHECK-P9-NEXT:    xscvsxddp f6, f6
-; CHECK-P9-NEXT:    xscvsxddp f7, f7
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P9-NEXT:    stxv vs3, 48(r3)
-; CHECK-P9-NEXT:    stxv vs2, 32(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI6_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI6_1@toc@l
+; CHECK-P9-NEXT:    vextsb2d v3, v3
+; CHECK-P9-NEXT:    xvcvsxddp vs0, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI6_2@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI6_2@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
 ; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    vextsb2d v3, v3
+; CHECK-P9-NEXT:    xvcvsxddp vs1, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI6_3@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI6_3@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    vextsb2d v3, v3
+; CHECK-P9-NEXT:    xvcvsxddp vs2, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v2, v2, v3
+; CHECK-P9-NEXT:    stxv vs2, 32(r3)
+; CHECK-P9-NEXT:    vextsb2d v2, v2
+; CHECK-P9-NEXT:    xvcvsxddp vs3, v2
+; CHECK-P9-NEXT:    stxv vs3, 48(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test8elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    li r5, 1
 ; CHECK-BE-NEXT:    mtvsrd v2, r4
-; CHECK-BE-NEXT:    li r4, 0
-; CHECK-BE-NEXT:    li r6, 3
-; CHECK-BE-NEXT:    li r7, 2
-; CHECK-BE-NEXT:    li r8, 5
-; CHECK-BE-NEXT:    li r9, 4
-; CHECK-BE-NEXT:    li r10, 7
-; CHECK-BE-NEXT:    li r11, 6
-; CHECK-BE-NEXT:    vextublx r5, r5, v2
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
-; CHECK-BE-NEXT:    vextublx r6, r6, v2
-; CHECK-BE-NEXT:    vextublx r7, r7, v2
-; CHECK-BE-NEXT:    vextublx r8, r8, v2
-; CHECK-BE-NEXT:    vextublx r9, r9, v2
-; CHECK-BE-NEXT:    vextublx r10, r10, v2
-; CHECK-BE-NEXT:    vextublx r11, r11, v2
-; CHECK-BE-NEXT:    extsb r5, r5
-; CHECK-BE-NEXT:    extsb r4, r4
-; CHECK-BE-NEXT:    extsb r6, r6
-; CHECK-BE-NEXT:    extsb r7, r7
-; CHECK-BE-NEXT:    extsb r8, r8
-; CHECK-BE-NEXT:    extsb r9, r9
-; CHECK-BE-NEXT:    extsb r10, r10
-; CHECK-BE-NEXT:    extsb r11, r11
-; CHECK-BE-NEXT:    mtvsrwa f0, r5
-; CHECK-BE-NEXT:    mtvsrwa f1, r4
-; CHECK-BE-NEXT:    mtvsrwa f2, r6
-; CHECK-BE-NEXT:    mtvsrwa f3, r7
-; CHECK-BE-NEXT:    mtvsrwa f4, r8
-; CHECK-BE-NEXT:    mtvsrwa f5, r9
-; CHECK-BE-NEXT:    mtvsrwa f6, r10
-; CHECK-BE-NEXT:    mtvsrwa f7, r11
-; CHECK-BE-NEXT:    xscvsxddp f0, f0
-; CHECK-BE-NEXT:    xscvsxddp f1, f1
-; CHECK-BE-NEXT:    xscvsxddp f2, f2
-; CHECK-BE-NEXT:    xscvsxddp f3, f3
-; CHECK-BE-NEXT:    xscvsxddp f4, f4
-; CHECK-BE-NEXT:    xscvsxddp f5, f5
-; CHECK-BE-NEXT:    xscvsxddp f6, f6
-; CHECK-BE-NEXT:    xscvsxddp f7, f7
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-BE-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-BE-NEXT:    stxv vs2, 32(r3)
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
-; CHECK-BE-NEXT:    stxv vs0, 0(r3)
-; CHECK-BE-NEXT:    stxv vs3, 48(r3)
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI6_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI6_0@toc@l
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    xxlxor v4, v4, v4
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI6_1@toc@ha
+; CHECK-BE-NEXT:    vextsb2d v3, v3
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI6_1@toc@l
+; CHECK-BE-NEXT:    xvcvsxddp vs0, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI6_2@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI6_2@toc@l
+; CHECK-BE-NEXT:    vperm v3, v4, v2, v3
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
+; CHECK-BE-NEXT:    vextsb2d v3, v3
+; CHECK-BE-NEXT:    xvcvsxddp vs1, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI6_3@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI6_3@toc@l
+; CHECK-BE-NEXT:    vperm v3, v2, v2, v3
+; CHECK-BE-NEXT:    stxv vs1, 48(r3)
+; CHECK-BE-NEXT:    vextsb2d v3, v3
+; CHECK-BE-NEXT:    xvcvsxddp vs2, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v2, v2, v3
+; CHECK-BE-NEXT:    stxv vs2, 0(r3)
+; CHECK-BE-NEXT:    vextsb2d v2, v2
+; CHECK-BE-NEXT:    xvcvsxddp vs3, v2
+; CHECK-BE-NEXT:    stxv vs3, 32(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = bitcast i64 %a.coerce to <8 x i8>
@@ -982,338 +647,210 @@
 define void @test16elt_signed(<16 x double>* noalias nocapture sret %agg.result, <16 x i8> %a) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
-; CHECK-P8-NEXT:    mfvsrd r5, v2
-; CHECK-P8-NEXT:    xxswapd vs2, v2
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI7_0@toc@ha
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI7_2@toc@ha
+; CHECK-P8-NEXT:    addis r6, r2, .LCPI7_3@toc@ha
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI7_0@toc@l
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI7_2@toc@l
+; CHECK-P8-NEXT:    addi r6, r6, .LCPI7_3@toc@l
+; CHECK-P8-NEXT:    lvx v3, 0, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI7_4@toc@ha
+; CHECK-P8-NEXT:    lvx v4, 0, r5
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI7_5@toc@ha
+; CHECK-P8-NEXT:    lvx v5, 0, r6
+; CHECK-P8-NEXT:    addis r6, r2, .LCPI7_1@toc@ha
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI7_4@toc@l
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI7_5@toc@l
+; CHECK-P8-NEXT:    addi r6, r6, .LCPI7_1@toc@l
+; CHECK-P8-NEXT:    lvx v0, 0, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI7_6@toc@ha
+; CHECK-P8-NEXT:    lvx v1, 0, r5
+; CHECK-P8-NEXT:    addis r5, r2, .LCPI7_7@toc@ha
+; CHECK-P8-NEXT:    lxvd2x vs0, 0, r6
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI7_6@toc@l
+; CHECK-P8-NEXT:    addi r5, r5, .LCPI7_7@toc@l
+; CHECK-P8-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P8-NEXT:    lvx v6, 0, r4
+; CHECK-P8-NEXT:    addis r4, r2, .LCPI7_8@toc@ha
+; CHECK-P8-NEXT:    lvx v7, 0, r5
+; CHECK-P8-NEXT:    vperm v4, v2, v2, v4
+; CHECK-P8-NEXT:    li r5, 96
+; CHECK-P8-NEXT:    addi r4, r4, .LCPI7_8@toc@l
+; CHECK-P8-NEXT:    vperm v5, v2, v2, v5
+; CHECK-P8-NEXT:    xxswapd v9, vs0
+; CHECK-P8-NEXT:    lvx v8, 0, r4
+; CHECK-P8-NEXT:    vperm v0, v2, v2, v0
 ; CHECK-P8-NEXT:    li r4, 112
-; CHECK-P8-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P8-NEXT:    clrldi r6, r5, 56
-; CHECK-P8-NEXT:    rldicl r7, r5, 56, 56
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    extsb r7, r7
-; CHECK-P8-NEXT:    mtvsrwa f0, r6
-; CHECK-P8-NEXT:    rldicl r6, r5, 40, 56
-; CHECK-P8-NEXT:    rldicl r8, r5, 48, 56
-; CHECK-P8-NEXT:    mtvsrwa f1, r7
-; CHECK-P8-NEXT:    rldicl r7, r5, 32, 56
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    extsb r8, r8
-; CHECK-P8-NEXT:    extsb r7, r7
-; CHECK-P8-NEXT:    mtvsrwa f4, r6
-; CHECK-P8-NEXT:    rldicl r6, r5, 24, 56
-; CHECK-P8-NEXT:    mtvsrwa f3, r8
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f5, r7
-; CHECK-P8-NEXT:    rldicl r7, r5, 16, 56
-; CHECK-P8-NEXT:    rldicl r5, r5, 8, 56
-; CHECK-P8-NEXT:    mfvsrd r8, f2
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    mtvsrwa f2, r6
-; CHECK-P8-NEXT:    extsb r6, r7
-; CHECK-P8-NEXT:    mtvsrwa f6, r6
-; CHECK-P8-NEXT:    clrldi r6, r8, 56
-; CHECK-P8-NEXT:    mtvsrwa f7, r5
-; CHECK-P8-NEXT:    rldicl r5, r8, 56, 56
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f8, r6
-; CHECK-P8-NEXT:    rldicl r6, r8, 48, 56
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    mtvsrwa f9, r5
-; CHECK-P8-NEXT:    rldicl r5, r8, 40, 56
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f10, r6
-; CHECK-P8-NEXT:    rldicl r6, r8, 32, 56
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    mtvsrwa f11, r5
-; CHECK-P8-NEXT:    rldicl r5, r8, 24, 56
-; CHECK-P8-NEXT:    extsb r6, r6
-; CHECK-P8-NEXT:    mtvsrwa f12, r6
-; CHECK-P8-NEXT:    rldicl r6, r8, 16, 56
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    xscvsxddp f6, f6
-; CHECK-P8-NEXT:    xscvsxddp f7, f7
-; CHECK-P8-NEXT:    mtvsrwa f13, r5
-; CHECK-P8-NEXT:    extsb r5, r6
-; CHECK-P8-NEXT:    mtvsrwa v2, r5
-; CHECK-P8-NEXT:    rldicl r5, r8, 8, 56
-; CHECK-P8-NEXT:    xscvsxddp f5, f5
-; CHECK-P8-NEXT:    extsb r5, r5
-; CHECK-P8-NEXT:    xscvsxddp f2, f2
-; CHECK-P8-NEXT:    xscvsxddp f0, f0
-; CHECK-P8-NEXT:    xscvsxddp f1, f1
-; CHECK-P8-NEXT:    xxmrghd vs6, vs7, vs6
-; CHECK-P8-NEXT:    mtvsrwa v3, r5
-; CHECK-P8-NEXT:    li r5, 64
-; CHECK-P8-NEXT:    xscvsxddp f3, f3
-; CHECK-P8-NEXT:    xscvsxddp f4, f4
-; CHECK-P8-NEXT:    xscvsxddp f31, v2
-; CHECK-P8-NEXT:    xxmrghd vs2, vs2, vs5
-; CHECK-P8-NEXT:    xscvsxddp f7, v3
-; CHECK-P8-NEXT:    xscvsxddp f8, f8
-; CHECK-P8-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P8-NEXT:    xscvsxddp f9, f9
-; CHECK-P8-NEXT:    xxswapd vs1, vs6
-; CHECK-P8-NEXT:    xscvsxddp f10, f10
+; CHECK-P8-NEXT:    vperm v1, v2, v2, v1
+; CHECK-P8-NEXT:    vperm v6, v2, v2, v6
+; CHECK-P8-NEXT:    vperm v7, v2, v2, v7
+; CHECK-P8-NEXT:    vperm v2, v2, v2, v8
+; CHECK-P8-NEXT:    vsld v3, v3, v9
+; CHECK-P8-NEXT:    vsld v0, v0, v9
+; CHECK-P8-NEXT:    vsld v1, v1, v9
+; CHECK-P8-NEXT:    vsld v6, v6, v9
+; CHECK-P8-NEXT:    vsld v7, v7, v9
+; CHECK-P8-NEXT:    vsld v2, v2, v9
+; CHECK-P8-NEXT:    vsrad v7, v7, v9
+; CHECK-P8-NEXT:    vsrad v2, v2, v9
+; CHECK-P8-NEXT:    vsld v4, v4, v9
+; CHECK-P8-NEXT:    vsld v5, v5, v9
+; CHECK-P8-NEXT:    vsrad v6, v6, v9
+; CHECK-P8-NEXT:    vsrad v0, v0, v9
+; CHECK-P8-NEXT:    vsrad v1, v1, v9
+; CHECK-P8-NEXT:    xvcvsxddp vs2, v7
+; CHECK-P8-NEXT:    xvcvsxddp vs3, v2
+; CHECK-P8-NEXT:    vsrad v3, v3, v9
+; CHECK-P8-NEXT:    vsrad v4, v4, v9
+; CHECK-P8-NEXT:    vsrad v5, v5, v9
+; CHECK-P8-NEXT:    xvcvsxddp vs4, v6
+; CHECK-P8-NEXT:    xvcvsxddp vs1, v1
 ; CHECK-P8-NEXT:    xxswapd vs2, vs2
-; CHECK-P8-NEXT:    xscvsxddp f12, f12
-; CHECK-P8-NEXT:    xxmrghd vs3, vs4, vs3
-; CHECK-P8-NEXT:    xscvsxddp f13, f13
-; CHECK-P8-NEXT:    stxvd2x vs1, r3, r4
-; CHECK-P8-NEXT:    li r4, 96
-; CHECK-P8-NEXT:    xscvsxddp f11, f11
-; CHECK-P8-NEXT:    xxmrghd vs6, vs7, vs31
+; CHECK-P8-NEXT:    xvcvsxddp vs5, v0
 ; CHECK-P8-NEXT:    xxswapd vs3, vs3
-; CHECK-P8-NEXT:    xxswapd vs0, vs0
-; CHECK-P8-NEXT:    xxmrghd vs4, vs9, vs8
-; CHECK-P8-NEXT:    stxvd2x vs2, r3, r4
-; CHECK-P8-NEXT:    li r4, 80
-; CHECK-P8-NEXT:    xxswapd vs2, vs6
+; CHECK-P8-NEXT:    xvcvsxddp vs0, v5
+; CHECK-P8-NEXT:    xvcvsxddp vs6, v3
+; CHECK-P8-NEXT:    xvcvsxddp vs7, v4
 ; CHECK-P8-NEXT:    stxvd2x vs3, r3, r4
-; CHECK-P8-NEXT:    li r4, 48
-; CHECK-P8-NEXT:    stxvd2x vs0, r3, r5
-; CHECK-P8-NEXT:    li r5, 32
-; CHECK-P8-NEXT:    xxmrghd vs5, vs13, vs12
+; CHECK-P8-NEXT:    li r4, 80
 ; CHECK-P8-NEXT:    xxswapd vs4, vs4
-; CHECK-P8-NEXT:    xxmrghd vs1, vs11, vs10
-; CHECK-P8-NEXT:    stxvd2x vs2, r3, r4
-; CHECK-P8-NEXT:    li r4, 16
-; CHECK-P8-NEXT:    xxswapd vs5, vs5
+; CHECK-P8-NEXT:    stxvd2x vs2, r3, r5
+; CHECK-P8-NEXT:    li r5, 64
 ; CHECK-P8-NEXT:    xxswapd vs1, vs1
-; CHECK-P8-NEXT:    stxvd2x vs5, r3, r5
-; CHECK-P8-NEXT:    stxvd2x vs1, r3, r4
-; CHECK-P8-NEXT:    stxvd2x vs4, 0, r3
-; CHECK-P8-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT:    xxswapd vs5, vs5
+; CHECK-P8-NEXT:    xxswapd vs0, vs0
+; CHECK-P8-NEXT:    stxvd2x vs4, r3, r4
+; CHECK-P8-NEXT:    li r4, 48
+; CHECK-P8-NEXT:    xxswapd vs3, vs6
+; CHECK-P8-NEXT:    stxvd2x vs1, r3, r5
+; CHECK-P8-NEXT:    li r5, 32
+; CHECK-P8-NEXT:    xxswapd vs2, vs7
+; CHECK-P8-NEXT:    stxvd2x vs5, r3, r4
+; CHECK-P8-NEXT:    li r4, 16
+; CHECK-P8-NEXT:    stxvd2x vs0, r3, r5
+; CHECK-P8-NEXT:    stxvd2x vs2, r3, r4
+; CHECK-P8-NEXT:    stxvd2x vs3, 0, r3
 ; CHECK-P8-NEXT:    blr
 ;
 ; CHECK-P9-LABEL: test16elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    std r25, -72(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r26, -64(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r27, -56(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r28, -48(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    std r29, -40(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    li r4, 0
-; CHECK-P9-NEXT:    std r30, -32(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    li r5, 1
-; CHECK-P9-NEXT:    li r6, 2
-; CHECK-P9-NEXT:    li r7, 3
-; CHECK-P9-NEXT:    li r8, 4
-; CHECK-P9-NEXT:    li r9, 5
-; CHECK-P9-NEXT:    li r10, 6
-; CHECK-P9-NEXT:    li r11, 7
-; CHECK-P9-NEXT:    li r12, 8
-; CHECK-P9-NEXT:    li r0, 9
-; CHECK-P9-NEXT:    li r30, 10
-; CHECK-P9-NEXT:    li r29, 11
-; CHECK-P9-NEXT:    li r28, 12
-; CHECK-P9-NEXT:    li r27, 13
-; CHECK-P9-NEXT:    li r26, 14
-; CHECK-P9-NEXT:    li r25, 15
-; CHECK-P9-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-P9-NEXT:    vextubrx r4, r4, v2
-; CHECK-P9-NEXT:    vextubrx r5, r5, v2
-; CHECK-P9-NEXT:    vextubrx r6, r6, v2
-; CHECK-P9-NEXT:    vextubrx r7, r7, v2
-; CHECK-P9-NEXT:    vextubrx r8, r8, v2
-; CHECK-P9-NEXT:    vextubrx r9, r9, v2
-; CHECK-P9-NEXT:    vextubrx r10, r10, v2
-; CHECK-P9-NEXT:    vextubrx r11, r11, v2
-; CHECK-P9-NEXT:    vextubrx r12, r12, v2
-; CHECK-P9-NEXT:    vextubrx r0, r0, v2
-; CHECK-P9-NEXT:    vextubrx r30, r30, v2
-; CHECK-P9-NEXT:    vextubrx r29, r29, v2
-; CHECK-P9-NEXT:    vextubrx r28, r28, v2
-; CHECK-P9-NEXT:    vextubrx r27, r27, v2
-; CHECK-P9-NEXT:    vextubrx r26, r26, v2
-; CHECK-P9-NEXT:    vextubrx r25, r25, v2
-; CHECK-P9-NEXT:    extsb r4, r4
-; CHECK-P9-NEXT:    extsb r5, r5
-; CHECK-P9-NEXT:    extsb r6, r6
-; CHECK-P9-NEXT:    extsb r7, r7
-; CHECK-P9-NEXT:    extsb r8, r8
-; CHECK-P9-NEXT:    extsb r9, r9
-; CHECK-P9-NEXT:    extsb r10, r10
-; CHECK-P9-NEXT:    extsb r11, r11
-; CHECK-P9-NEXT:    extsb r12, r12
-; CHECK-P9-NEXT:    extsb r0, r0
-; CHECK-P9-NEXT:    extsb r30, r30
-; CHECK-P9-NEXT:    extsb r29, r29
-; CHECK-P9-NEXT:    extsb r28, r28
-; CHECK-P9-NEXT:    extsb r27, r27
-; CHECK-P9-NEXT:    extsb r26, r26
-; CHECK-P9-NEXT:    extsb r25, r25
-; CHECK-P9-NEXT:    mtvsrwa f0, r4
-; CHECK-P9-NEXT:    mtvsrwa f1, r5
-; CHECK-P9-NEXT:    mtvsrwa f2, r6
-; CHECK-P9-NEXT:    mtvsrwa f3, r7
-; CHECK-P9-NEXT:    mtvsrwa f4, r8
-; CHECK-P9-NEXT:    mtvsrwa f5, r9
-; CHECK-P9-NEXT:    mtvsrwa f6, r10
-; CHECK-P9-NEXT:    mtvsrwa f7, r11
-; CHECK-P9-NEXT:    mtvsrwa f8, r12
-; CHECK-P9-NEXT:    mtvsrwa f9, r0
-; CHECK-P9-NEXT:    mtvsrwa f10, r30
-; CHECK-P9-NEXT:    ld r30, -32(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa f11, r29
-; CHECK-P9-NEXT:    ld r29, -40(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa f12, r28
-; CHECK-P9-NEXT:    ld r28, -48(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa f13, r27
-; CHECK-P9-NEXT:    ld r27, -56(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa v2, r26
-; CHECK-P9-NEXT:    ld r26, -64(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    mtvsrwa v3, r25
-; CHECK-P9-NEXT:    ld r25, -72(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    xscvsxddp f0, f0
-; CHECK-P9-NEXT:    xscvsxddp f1, f1
-; CHECK-P9-NEXT:    xscvsxddp f2, f2
-; CHECK-P9-NEXT:    xscvsxddp f3, f3
-; CHECK-P9-NEXT:    xscvsxddp f4, f4
-; CHECK-P9-NEXT:    xscvsxddp f5, f5
-; CHECK-P9-NEXT:    xscvsxddp f6, f6
-; CHECK-P9-NEXT:    xscvsxddp f7, f7
-; CHECK-P9-NEXT:    xscvsxddp f8, f8
-; CHECK-P9-NEXT:    xscvsxddp f9, f9
-; CHECK-P9-NEXT:    xscvsxddp f10, f10
-; CHECK-P9-NEXT:    xscvsxddp f11, f11
-; CHECK-P9-NEXT:    xscvsxddp f12, f12
-; CHECK-P9-NEXT:    xscvsxddp f13, f13
-; CHECK-P9-NEXT:    xscvsxddp f31, v2
-; CHECK-P9-NEXT:    xscvsxddp f30, v3
-; CHECK-P9-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-P9-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-P9-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-P9-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-P9-NEXT:    xxmrghd vs4, vs9, vs8
-; CHECK-P9-NEXT:    xxmrghd vs5, vs11, vs10
-; CHECK-P9-NEXT:    xxmrghd vs6, vs13, vs12
-; CHECK-P9-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-P9-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-P9-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI7_0@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI7_0@toc@l
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI7_1@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI7_1@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P9-NEXT:    vextsb2d v3, v3
+; CHECK-P9-NEXT:    xvcvsxddp vs0, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI7_2@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI7_2@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
 ; CHECK-P9-NEXT:    stxv vs0, 0(r3)
-; CHECK-P9-NEXT:    stxv vs4, 64(r3)
-; CHECK-P9-NEXT:    stxv vs3, 48(r3)
-; CHECK-P9-NEXT:    stxv vs2, 32(r3)
+; CHECK-P9-NEXT:    vextsb2d v3, v3
+; CHECK-P9-NEXT:    xvcvsxddp vs1, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI7_3@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI7_3@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
 ; CHECK-P9-NEXT:    stxv vs1, 16(r3)
-; CHECK-P9-NEXT:    stxv vs7, 112(r3)
-; CHECK-P9-NEXT:    stxv vs6, 96(r3)
+; CHECK-P9-NEXT:    vextsb2d v3, v3
+; CHECK-P9-NEXT:    xvcvsxddp vs2, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI7_4@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI7_4@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P9-NEXT:    stxv vs2, 32(r3)
+; CHECK-P9-NEXT:    vextsb2d v3, v3
+; CHECK-P9-NEXT:    xvcvsxddp vs3, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI7_5@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI7_5@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P9-NEXT:    stxv vs3, 48(r3)
+; CHECK-P9-NEXT:    vextsb2d v3, v3
+; CHECK-P9-NEXT:    xvcvsxddp vs4, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI7_6@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI7_6@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
+; CHECK-P9-NEXT:    stxv vs4, 64(r3)
+; CHECK-P9-NEXT:    vextsb2d v3, v3
+; CHECK-P9-NEXT:    xvcvsxddp vs5, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    addis r4, r2, .LCPI7_7@toc@ha
+; CHECK-P9-NEXT:    addi r4, r4, .LCPI7_7@toc@l
+; CHECK-P9-NEXT:    vperm v3, v2, v2, v3
 ; CHECK-P9-NEXT:    stxv vs5, 80(r3)
+; CHECK-P9-NEXT:    vextsb2d v3, v3
+; CHECK-P9-NEXT:    xvcvsxddp vs6, v3
+; CHECK-P9-NEXT:    lxvx v3, 0, r4
+; CHECK-P9-NEXT:    vperm v2, v2, v2, v3
+; CHECK-P9-NEXT:    stxv vs6, 96(r3)
+; CHECK-P9-NEXT:    vextsb2d v2, v2
+; CHECK-P9-NEXT:    xvcvsxddp vs7, v2
+; CHECK-P9-NEXT:    stxv vs7, 112(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    std r25, -72(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r26, -64(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r27, -56(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r28, -48(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    std r29, -40(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    li r4, 1
-; CHECK-BE-NEXT:    std r30, -32(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    li r5, 0
-; CHECK-BE-NEXT:    li r6, 3
-; CHECK-BE-NEXT:    li r7, 2
-; CHECK-BE-NEXT:    li r8, 5
-; CHECK-BE-NEXT:    li r9, 4
-; CHECK-BE-NEXT:    li r10, 7
-; CHECK-BE-NEXT:    li r11, 6
-; CHECK-BE-NEXT:    li r12, 9
-; CHECK-BE-NEXT:    li r0, 8
-; CHECK-BE-NEXT:    li r30, 11
-; CHECK-BE-NEXT:    li r29, 10
-; CHECK-BE-NEXT:    li r28, 13
-; CHECK-BE-NEXT:    li r27, 12
-; CHECK-BE-NEXT:    li r26, 15
-; CHECK-BE-NEXT:    li r25, 14
-; CHECK-BE-NEXT:    stfd f30, -16(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    stfd f31, -8(r1) # 8-byte Folded Spill
-; CHECK-BE-NEXT:    vextublx r4, r4, v2
-; CHECK-BE-NEXT:    vextublx r5, r5, v2
-; CHECK-BE-NEXT:    vextublx r6, r6, v2
-; CHECK-BE-NEXT:    vextublx r7, r7, v2
-; CHECK-BE-NEXT:    vextublx r8, r8, v2
-; CHECK-BE-NEXT:    vextublx r9, r9, v2
-; CHECK-BE-NEXT:    vextublx r10, r10, v2
-; CHECK-BE-NEXT:    vextublx r11, r11, v2
-; CHECK-BE-NEXT:    vextublx r12, r12, v2
-; CHECK-BE-NEXT:    vextublx r0, r0, v2
-; CHECK-BE-NEXT:    vextublx r30, r30, v2
-; CHECK-BE-NEXT:    vextublx r29, r29, v2
-; CHECK-BE-NEXT:    vextublx r28, r28, v2
-; CHECK-BE-NEXT:    vextublx r27, r27, v2
-; CHECK-BE-NEXT:    vextublx r26, r26, v2
-; CHECK-BE-NEXT:    vextublx r25, r25, v2
-; CHECK-BE-NEXT:    extsb r4, r4
-; CHECK-BE-NEXT:    extsb r5, r5
-; CHECK-BE-NEXT:    extsb r6, r6
-; CHECK-BE-NEXT:    extsb r7, r7
-; CHECK-BE-NEXT:    extsb r8, r8
-; CHECK-BE-NEXT:    extsb r9, r9
-; CHECK-BE-NEXT:    extsb r10, r10
-; CHECK-BE-NEXT:    extsb r11, r11
-; CHECK-BE-NEXT:    extsb r12, r12
-; CHECK-BE-NEXT:    extsb r0, r0
-; CHECK-BE-NEXT:    extsb r30, r30
-; CHECK-BE-NEXT:    extsb r29, r29
-; CHECK-BE-NEXT:    extsb r28, r28
-; CHECK-BE-NEXT:    extsb r27, r27
-; CHECK-BE-NEXT:    extsb r26, r26
-; CHECK-BE-NEXT:    extsb r25, r25
-; CHECK-BE-NEXT:    mtvsrwa f0, r4
-; CHECK-BE-NEXT:    mtvsrwa f1, r5
-; CHECK-BE-NEXT:    mtvsrwa f2, r6
-; CHECK-BE-NEXT:    mtvsrwa f3, r7
-; CHECK-BE-NEXT:    mtvsrwa f4, r8
-; CHECK-BE-NEXT:    mtvsrwa f5, r9
-; CHECK-BE-NEXT:    mtvsrwa f6, r10
-; CHECK-BE-NEXT:    mtvsrwa f7, r11
-; CHECK-BE-NEXT:    mtvsrwa f8, r12
-; CHECK-BE-NEXT:    mtvsrwa f9, r0
-; CHECK-BE-NEXT:    mtvsrwa f10, r30
-; CHECK-BE-NEXT:    ld r30, -32(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa f11, r29
-; CHECK-BE-NEXT:    ld r29, -40(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa f12, r28
-; CHECK-BE-NEXT:    ld r28, -48(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa f13, r27
-; CHECK-BE-NEXT:    ld r27, -56(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa v2, r26
-; CHECK-BE-NEXT:    ld r26, -64(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    mtvsrwa v3, r25
-; CHECK-BE-NEXT:    ld r25, -72(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    xscvsxddp f0, f0
-; CHECK-BE-NEXT:    xscvsxddp f1, f1
-; CHECK-BE-NEXT:    xscvsxddp f2, f2
-; CHECK-BE-NEXT:    xscvsxddp f3, f3
-; CHECK-BE-NEXT:    xscvsxddp f4, f4
-; CHECK-BE-NEXT:    xscvsxddp f5, f5
-; CHECK-BE-NEXT:    xscvsxddp f6, f6
-; CHECK-BE-NEXT:    xscvsxddp f7, f7
-; CHECK-BE-NEXT:    xscvsxddp f8, f8
-; CHECK-BE-NEXT:    xscvsxddp f9, f9
-; CHECK-BE-NEXT:    xscvsxddp f10, f10
-; CHECK-BE-NEXT:    xscvsxddp f11, f11
-; CHECK-BE-NEXT:    xscvsxddp f12, f12
-; CHECK-BE-NEXT:    xscvsxddp f13, f13
-; CHECK-BE-NEXT:    xscvsxddp f31, v2
-; CHECK-BE-NEXT:    xscvsxddp f30, v3
-; CHECK-BE-NEXT:    xxmrghd vs0, vs1, vs0
-; CHECK-BE-NEXT:    xxmrghd vs1, vs3, vs2
-; CHECK-BE-NEXT:    xxmrghd vs2, vs5, vs4
-; CHECK-BE-NEXT:    xxmrghd vs3, vs7, vs6
-; CHECK-BE-NEXT:    xxmrghd vs4, vs9, vs8
-; CHECK-BE-NEXT:    xxmrghd vs5, vs11, vs10
-; CHECK-BE-NEXT:    xxmrghd vs6, vs13, vs12
-; CHECK-BE-NEXT:    xxmrghd vs7, vs30, vs31
-; CHECK-BE-NEXT:    lfd f31, -8(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    lfd f30, -16(r1) # 8-byte Folded Reload
-; CHECK-BE-NEXT:    stxv vs0, 0(r3)
-; CHECK-BE-NEXT:    stxv vs4, 64(r3)
-; CHECK-BE-NEXT:    stxv vs3, 48(r3)
-; CHECK-BE-NEXT:    stxv vs2, 32(r3)
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
-; CHECK-BE-NEXT:    stxv vs7, 112(r3)
-; CHECK-BE-NEXT:    stxv vs6, 96(r3)
-; CHECK-BE-NEXT:    stxv vs5, 80(r3)
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_0@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_0@toc@l
+; CHECK-BE-NEXT:    lxvx v4, 0, r4
+; CHECK-BE-NEXT:    xxlxor v3, v3, v3
+; CHECK-BE-NEXT:    vperm v4, v3, v2, v4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_1@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_1@toc@l
+; CHECK-BE-NEXT:    vextsb2d v4, v4
+; CHECK-BE-NEXT:    xvcvsxddp vs0, v4
+; CHECK-BE-NEXT:    lxvx v4, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_2@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_2@toc@l
+; CHECK-BE-NEXT:    vperm v4, v3, v2, v4
+; CHECK-BE-NEXT:    stxv vs0, 16(r3)
+; CHECK-BE-NEXT:    vextsb2d v4, v4
+; CHECK-BE-NEXT:    xvcvsxddp vs1, v4
+; CHECK-BE-NEXT:    lxvx v4, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_3@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_3@toc@l
+; CHECK-BE-NEXT:    vperm v4, v3, v2, v4
+; CHECK-BE-NEXT:    stxv vs1, 48(r3)
+; CHECK-BE-NEXT:    vextsb2d v4, v4
+; CHECK-BE-NEXT:    xvcvsxddp vs2, v4
+; CHECK-BE-NEXT:    lxvx v4, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_4@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_4@toc@l
+; CHECK-BE-NEXT:    vperm v3, v3, v2, v4
+; CHECK-BE-NEXT:    stxv vs2, 80(r3)
+; CHECK-BE-NEXT:    vextsb2d v3, v3
+; CHECK-BE-NEXT:    xvcvsxddp vs3, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_5@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_5@toc@l
+; CHECK-BE-NEXT:    vperm v3, v2, v2, v3
+; CHECK-BE-NEXT:    stxv vs3, 112(r3)
+; CHECK-BE-NEXT:    vextsb2d v3, v3
+; CHECK-BE-NEXT:    xvcvsxddp vs4, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_6@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_6@toc@l
+; CHECK-BE-NEXT:    vperm v3, v2, v2, v3
+; CHECK-BE-NEXT:    stxv vs4, 0(r3)
+; CHECK-BE-NEXT:    vextsb2d v3, v3
+; CHECK-BE-NEXT:    xvcvsxddp vs5, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    addis r4, r2, .LCPI7_7@toc@ha
+; CHECK-BE-NEXT:    addi r4, r4, .LCPI7_7@toc@l
+; CHECK-BE-NEXT:    vperm v3, v2, v2, v3
+; CHECK-BE-NEXT:    stxv vs5, 32(r3)
+; CHECK-BE-NEXT:    vextsb2d v3, v3
+; CHECK-BE-NEXT:    xvcvsxddp vs6, v3
+; CHECK-BE-NEXT:    lxvx v3, 0, r4
+; CHECK-BE-NEXT:    vperm v2, v2, v2, v3
+; CHECK-BE-NEXT:    stxv vs6, 64(r3)
+; CHECK-BE-NEXT:    vextsb2d v2, v2
+; CHECK-BE-NEXT:    xvcvsxddp vs7, v2
+; CHECK-BE-NEXT:    stxv vs7, 96(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = sitofp <16 x i8> %a to <16 x double>
diff --git a/test/CodeGen/PowerPC/vec_conv_i_to_fp_8byte_elts.ll b/test/CodeGen/PowerPC/vec_conv_i_to_fp_8byte_elts.ll
index 5bf15e3..45e306e 100644
--- a/test/CodeGen/PowerPC/vec_conv_i_to_fp_8byte_elts.ll
+++ b/test/CodeGen/PowerPC/vec_conv_i_to_fp_8byte_elts.ll
@@ -163,58 +163,58 @@
 ;
 ; CHECK-P9-LABEL: test16elt:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv v2, 48(r4)
-; CHECK-P9-NEXT:    lxv v3, 32(r4)
-; CHECK-P9-NEXT:    lxv v4, 16(r4)
-; CHECK-P9-NEXT:    lxv v5, 0(r4)
-; CHECK-P9-NEXT:    lxv v0, 112(r4)
-; CHECK-P9-NEXT:    lxv v1, 96(r4)
-; CHECK-P9-NEXT:    lxv v6, 80(r4)
-; CHECK-P9-NEXT:    lxv v7, 64(r4)
-; CHECK-P9-NEXT:    xvcvuxddp vs0, v5
-; CHECK-P9-NEXT:    xvcvuxddp vs1, v4
-; CHECK-P9-NEXT:    xvcvuxddp vs2, v3
-; CHECK-P9-NEXT:    xvcvuxddp vs3, v2
-; CHECK-P9-NEXT:    xvcvuxddp vs4, v7
-; CHECK-P9-NEXT:    xvcvuxddp vs5, v6
-; CHECK-P9-NEXT:    xvcvuxddp vs6, v1
-; CHECK-P9-NEXT:    xvcvuxddp vs7, v0
-; CHECK-P9-NEXT:    stxv vs3, 48(r3)
-; CHECK-P9-NEXT:    stxv vs2, 32(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
-; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    lxv v2, 112(r4)
+; CHECK-P9-NEXT:    lxv v3, 96(r4)
+; CHECK-P9-NEXT:    lxv v4, 80(r4)
+; CHECK-P9-NEXT:    lxv v5, 64(r4)
+; CHECK-P9-NEXT:    lxv v0, 48(r4)
+; CHECK-P9-NEXT:    xvcvuxddp vs3, v0
+; CHECK-P9-NEXT:    lxv v1, 32(r4)
+; CHECK-P9-NEXT:    lxv v6, 16(r4)
+; CHECK-P9-NEXT:    lxv v7, 0(r4)
+; CHECK-P9-NEXT:    xvcvuxddp vs0, v7
+; CHECK-P9-NEXT:    xvcvuxddp vs1, v6
+; CHECK-P9-NEXT:    xvcvuxddp vs2, v1
+; CHECK-P9-NEXT:    xvcvuxddp vs4, v5
+; CHECK-P9-NEXT:    xvcvuxddp vs5, v4
+; CHECK-P9-NEXT:    xvcvuxddp vs6, v3
+; CHECK-P9-NEXT:    xvcvuxddp vs7, v2
 ; CHECK-P9-NEXT:    stxv vs7, 112(r3)
 ; CHECK-P9-NEXT:    stxv vs6, 96(r3)
 ; CHECK-P9-NEXT:    stxv vs5, 80(r3)
 ; CHECK-P9-NEXT:    stxv vs4, 64(r3)
+; CHECK-P9-NEXT:    stxv vs3, 48(r3)
+; CHECK-P9-NEXT:    stxv vs2, 32(r3)
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    stxv vs0, 0(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv v2, 48(r4)
-; CHECK-BE-NEXT:    lxv v3, 32(r4)
-; CHECK-BE-NEXT:    lxv v4, 16(r4)
-; CHECK-BE-NEXT:    lxv v5, 0(r4)
-; CHECK-BE-NEXT:    lxv v0, 112(r4)
-; CHECK-BE-NEXT:    lxv v1, 96(r4)
-; CHECK-BE-NEXT:    lxv v6, 80(r4)
-; CHECK-BE-NEXT:    lxv v7, 64(r4)
-; CHECK-BE-NEXT:    xvcvuxddp vs0, v5
-; CHECK-BE-NEXT:    xvcvuxddp vs1, v4
-; CHECK-BE-NEXT:    xvcvuxddp vs2, v3
-; CHECK-BE-NEXT:    xvcvuxddp vs3, v2
-; CHECK-BE-NEXT:    xvcvuxddp vs4, v7
-; CHECK-BE-NEXT:    xvcvuxddp vs5, v6
-; CHECK-BE-NEXT:    xvcvuxddp vs6, v1
-; CHECK-BE-NEXT:    xvcvuxddp vs7, v0
-; CHECK-BE-NEXT:    stxv vs3, 48(r3)
-; CHECK-BE-NEXT:    stxv vs2, 32(r3)
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
-; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    lxv v2, 112(r4)
+; CHECK-BE-NEXT:    lxv v3, 96(r4)
+; CHECK-BE-NEXT:    lxv v4, 80(r4)
+; CHECK-BE-NEXT:    lxv v5, 64(r4)
+; CHECK-BE-NEXT:    lxv v0, 48(r4)
+; CHECK-BE-NEXT:    xvcvuxddp vs3, v0
+; CHECK-BE-NEXT:    lxv v1, 32(r4)
+; CHECK-BE-NEXT:    lxv v6, 16(r4)
+; CHECK-BE-NEXT:    lxv v7, 0(r4)
+; CHECK-BE-NEXT:    xvcvuxddp vs0, v7
+; CHECK-BE-NEXT:    xvcvuxddp vs1, v6
+; CHECK-BE-NEXT:    xvcvuxddp vs2, v1
+; CHECK-BE-NEXT:    xvcvuxddp vs4, v5
+; CHECK-BE-NEXT:    xvcvuxddp vs5, v4
+; CHECK-BE-NEXT:    xvcvuxddp vs6, v3
+; CHECK-BE-NEXT:    xvcvuxddp vs7, v2
 ; CHECK-BE-NEXT:    stxv vs7, 112(r3)
 ; CHECK-BE-NEXT:    stxv vs6, 96(r3)
 ; CHECK-BE-NEXT:    stxv vs5, 80(r3)
 ; CHECK-BE-NEXT:    stxv vs4, 64(r3)
+; CHECK-BE-NEXT:    stxv vs3, 48(r3)
+; CHECK-BE-NEXT:    stxv vs2, 32(r3)
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    stxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x i64>, <16 x i64>* %0, align 128
@@ -377,58 +377,58 @@
 ;
 ; CHECK-P9-LABEL: test16elt_signed:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lxv v2, 48(r4)
-; CHECK-P9-NEXT:    lxv v3, 32(r4)
-; CHECK-P9-NEXT:    lxv v4, 16(r4)
-; CHECK-P9-NEXT:    lxv v5, 0(r4)
-; CHECK-P9-NEXT:    lxv v0, 112(r4)
-; CHECK-P9-NEXT:    lxv v1, 96(r4)
-; CHECK-P9-NEXT:    lxv v6, 80(r4)
-; CHECK-P9-NEXT:    lxv v7, 64(r4)
-; CHECK-P9-NEXT:    xvcvsxddp vs0, v5
-; CHECK-P9-NEXT:    xvcvsxddp vs1, v4
-; CHECK-P9-NEXT:    xvcvsxddp vs2, v3
-; CHECK-P9-NEXT:    xvcvsxddp vs3, v2
-; CHECK-P9-NEXT:    xvcvsxddp vs4, v7
-; CHECK-P9-NEXT:    xvcvsxddp vs5, v6
-; CHECK-P9-NEXT:    xvcvsxddp vs6, v1
-; CHECK-P9-NEXT:    xvcvsxddp vs7, v0
-; CHECK-P9-NEXT:    stxv vs3, 48(r3)
-; CHECK-P9-NEXT:    stxv vs2, 32(r3)
-; CHECK-P9-NEXT:    stxv vs1, 16(r3)
-; CHECK-P9-NEXT:    stxv vs0, 0(r3)
+; CHECK-P9-NEXT:    lxv v2, 112(r4)
+; CHECK-P9-NEXT:    lxv v3, 96(r4)
+; CHECK-P9-NEXT:    lxv v4, 80(r4)
+; CHECK-P9-NEXT:    lxv v5, 64(r4)
+; CHECK-P9-NEXT:    lxv v0, 48(r4)
+; CHECK-P9-NEXT:    xvcvsxddp vs3, v0
+; CHECK-P9-NEXT:    lxv v1, 32(r4)
+; CHECK-P9-NEXT:    lxv v6, 16(r4)
+; CHECK-P9-NEXT:    lxv v7, 0(r4)
+; CHECK-P9-NEXT:    xvcvsxddp vs0, v7
+; CHECK-P9-NEXT:    xvcvsxddp vs1, v6
+; CHECK-P9-NEXT:    xvcvsxddp vs2, v1
+; CHECK-P9-NEXT:    xvcvsxddp vs4, v5
+; CHECK-P9-NEXT:    xvcvsxddp vs5, v4
+; CHECK-P9-NEXT:    xvcvsxddp vs6, v3
+; CHECK-P9-NEXT:    xvcvsxddp vs7, v2
 ; CHECK-P9-NEXT:    stxv vs7, 112(r3)
 ; CHECK-P9-NEXT:    stxv vs6, 96(r3)
 ; CHECK-P9-NEXT:    stxv vs5, 80(r3)
 ; CHECK-P9-NEXT:    stxv vs4, 64(r3)
+; CHECK-P9-NEXT:    stxv vs3, 48(r3)
+; CHECK-P9-NEXT:    stxv vs2, 32(r3)
+; CHECK-P9-NEXT:    stxv vs1, 16(r3)
+; CHECK-P9-NEXT:    stxv vs0, 0(r3)
 ; CHECK-P9-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: test16elt_signed:
 ; CHECK-BE:       # %bb.0: # %entry
-; CHECK-BE-NEXT:    lxv v2, 48(r4)
-; CHECK-BE-NEXT:    lxv v3, 32(r4)
-; CHECK-BE-NEXT:    lxv v4, 16(r4)
-; CHECK-BE-NEXT:    lxv v5, 0(r4)
-; CHECK-BE-NEXT:    lxv v0, 112(r4)
-; CHECK-BE-NEXT:    lxv v1, 96(r4)
-; CHECK-BE-NEXT:    lxv v6, 80(r4)
-; CHECK-BE-NEXT:    lxv v7, 64(r4)
-; CHECK-BE-NEXT:    xvcvsxddp vs0, v5
-; CHECK-BE-NEXT:    xvcvsxddp vs1, v4
-; CHECK-BE-NEXT:    xvcvsxddp vs2, v3
-; CHECK-BE-NEXT:    xvcvsxddp vs3, v2
-; CHECK-BE-NEXT:    xvcvsxddp vs4, v7
-; CHECK-BE-NEXT:    xvcvsxddp vs5, v6
-; CHECK-BE-NEXT:    xvcvsxddp vs6, v1
-; CHECK-BE-NEXT:    xvcvsxddp vs7, v0
-; CHECK-BE-NEXT:    stxv vs3, 48(r3)
-; CHECK-BE-NEXT:    stxv vs2, 32(r3)
-; CHECK-BE-NEXT:    stxv vs1, 16(r3)
-; CHECK-BE-NEXT:    stxv vs0, 0(r3)
+; CHECK-BE-NEXT:    lxv v2, 112(r4)
+; CHECK-BE-NEXT:    lxv v3, 96(r4)
+; CHECK-BE-NEXT:    lxv v4, 80(r4)
+; CHECK-BE-NEXT:    lxv v5, 64(r4)
+; CHECK-BE-NEXT:    lxv v0, 48(r4)
+; CHECK-BE-NEXT:    xvcvsxddp vs3, v0
+; CHECK-BE-NEXT:    lxv v1, 32(r4)
+; CHECK-BE-NEXT:    lxv v6, 16(r4)
+; CHECK-BE-NEXT:    lxv v7, 0(r4)
+; CHECK-BE-NEXT:    xvcvsxddp vs0, v7
+; CHECK-BE-NEXT:    xvcvsxddp vs1, v6
+; CHECK-BE-NEXT:    xvcvsxddp vs2, v1
+; CHECK-BE-NEXT:    xvcvsxddp vs4, v5
+; CHECK-BE-NEXT:    xvcvsxddp vs5, v4
+; CHECK-BE-NEXT:    xvcvsxddp vs6, v3
+; CHECK-BE-NEXT:    xvcvsxddp vs7, v2
 ; CHECK-BE-NEXT:    stxv vs7, 112(r3)
 ; CHECK-BE-NEXT:    stxv vs6, 96(r3)
 ; CHECK-BE-NEXT:    stxv vs5, 80(r3)
 ; CHECK-BE-NEXT:    stxv vs4, 64(r3)
+; CHECK-BE-NEXT:    stxv vs3, 48(r3)
+; CHECK-BE-NEXT:    stxv vs2, 32(r3)
+; CHECK-BE-NEXT:    stxv vs1, 16(r3)
+; CHECK-BE-NEXT:    stxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
   %a = load <16 x i64>, <16 x i64>* %0, align 128
diff --git a/test/CodeGen/PowerPC/vsx-p9.ll b/test/CodeGen/PowerPC/vsx-p9.ll
index d7bea34..0dec6dd 100644
--- a/test/CodeGen/PowerPC/vsx-p9.ll
+++ b/test/CodeGen/PowerPC/vsx-p9.ll
@@ -37,7 +37,7 @@
   %add.i = add <16 x i8> %1, %0
   tail call void (...) @sink(<16 x i8> %add.i)
 ; CHECK: lxvx 34, 0, 3
-; CHECK: lxvx 35, 0, 4
+; CHECK: lxvx 35, 0, 3 
 ; CHECK: vaddubm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
@@ -46,7 +46,7 @@
   %add.i22 = add <16 x i8> %3, %2
   tail call void (...) @sink(<16 x i8> %add.i22)
 ; CHECK: lxvx 34, 0, 3
-; CHECK: lxvx 35, 0, 4
+; CHECK: lxvx 35, 0, 3 
 ; CHECK: vaddubm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
@@ -55,7 +55,7 @@
   %add.i21 = add <8 x i16> %5, %4
   tail call void (...) @sink(<8 x i16> %add.i21)
 ; CHECK: lxvx 34, 0, 3
-; CHECK: lxvx 35, 0, 4
+; CHECK: lxvx 35, 0, 3 
 ; CHECK: vadduhm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
@@ -64,7 +64,7 @@
   %add.i20 = add <8 x i16> %7, %6
   tail call void (...) @sink(<8 x i16> %add.i20)
 ; CHECK: lxvx 34, 0, 3
-; CHECK: lxvx 35, 0, 4
+; CHECK: lxvx 35, 0, 3 
 ; CHECK: vadduhm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
@@ -73,7 +73,7 @@
   %add.i19 = add <4 x i32> %9, %8
   tail call void (...) @sink(<4 x i32> %add.i19)
 ; CHECK: lxvx 34, 0, 3
-; CHECK: lxvx 35, 0, 4
+; CHECK: lxvx 35, 0, 3 
 ; CHECK: vadduwm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
@@ -82,7 +82,7 @@
   %add.i18 = add <4 x i32> %11, %10
   tail call void (...) @sink(<4 x i32> %add.i18)
 ; CHECK: lxvx 34, 0, 3
-; CHECK: lxvx 35, 0, 4
+; CHECK: lxvx 35, 0, 3 
 ; CHECK: vadduwm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
@@ -91,7 +91,7 @@
   %add.i17 = add <2 x i64> %13, %12
   tail call void (...) @sink(<2 x i64> %add.i17)
 ; CHECK: lxvx 34, 0, 3
-; CHECK: lxvx 35, 0, 4
+; CHECK: lxvx 35, 0, 3 
 ; CHECK: vaddudm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
@@ -100,7 +100,7 @@
   %add.i16 = add <2 x i64> %15, %14
   tail call void (...) @sink(<2 x i64> %add.i16)
 ; CHECK: lxvx 34, 0, 3
-; CHECK: lxvx 35, 0, 4
+; CHECK: lxvx 35, 0, 3 
 ; CHECK: vaddudm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
@@ -109,7 +109,7 @@
   %add.i15 = add <1 x i128> %17, %16
   tail call void (...) @sink(<1 x i128> %add.i15)
 ; CHECK: lxvx 34, 0, 3
-; CHECK: lxvx 35, 0, 4
+; CHECK: lxvx 35, 0, 3 
 ; CHECK: vadduqm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
@@ -118,7 +118,7 @@
   %add.i14 = add <1 x i128> %19, %18
   tail call void (...) @sink(<1 x i128> %add.i14)
 ; CHECK: lxvx 34, 0, 3
-; CHECK: lxvx 35, 0, 4
+; CHECK: lxvx 35, 0, 3 
 ; CHECK: vadduqm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
@@ -127,7 +127,7 @@
   %add.i13 = fadd <4 x float> %20, %21
   tail call void (...) @sink(<4 x float> %add.i13)
 ; CHECK: lxvx 0, 0, 3
-; CHECK: lxvx 1, 0, 4
+; CHECK: lxvx 1, 0, 3 
 ; CHECK: xvaddsp 34, 0, 1
 ; CHECK: stxv 34,
 ; CHECK: bl sink
@@ -136,7 +136,7 @@
   %add.i12 = fadd <2 x double> %22, %23
   tail call void (...) @sink(<2 x double> %add.i12)
 ; CHECK: lxvx 0, 0, 3
-; CHECK: lxvx 1, 0, 4
+; CHECK: lxvx 1, 0, 3 
 ; CHECK: xvadddp 0, 0, 1
 ; CHECK: stxv 0,
 ; CHECK: bl sink
diff --git a/test/CodeGen/PowerPC/vsx-self-copy.ll b/test/CodeGen/PowerPC/vsx-self-copy.ll
index 787ac4b..6a6008d 100644
--- a/test/CodeGen/PowerPC/vsx-self-copy.ll
+++ b/test/CodeGen/PowerPC/vsx-self-copy.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mcpu=pwr7 -mattr=+vsx < %s | FileCheck %s
-; RUN: llc -mcpu=pwr7 -mattr=+vsx -fast-isel -O0 < %s | FileCheck %s
+; RUN: llc -mcpu=pwr7 -mattr=+vsx < %s -verify-machineinstrs | FileCheck %s
+; RUN: llc -mcpu=pwr7 -mattr=+vsx -fast-isel -O0 < %s -verify-machineinstrs | FileCheck %s
 target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
diff --git a/test/CodeGen/PowerPC/vsx-spill.ll b/test/CodeGen/PowerPC/vsx-spill.ll
index d46664b..6c87861 100644
--- a/test/CodeGen/PowerPC/vsx-spill.ll
+++ b/test/CodeGen/PowerPC/vsx-spill.ll
@@ -93,8 +93,8 @@
 
 ; CHECK-P9-REG-LABEL: foo3
 ; CHECK-P9-REG: stdu r1, -400(r1)
-; CHECK-P9-REG: lfd f30, 384(r1)
-; CHECK-P9-REG: xsadddp f1, f0, f0
+; CHECK-P9-REG-DAG: lfd f30, 384(r1)
+; CHECK-P9-REG-DAG: xsadddp f1, f0, f0
 
 ; CHECK-P9-FISL-LABEL: foo3
 ; CHECK-P9-FISL: stdu r1, -400(r1)
diff --git a/test/CodeGen/PowerPC/vsx.ll b/test/CodeGen/PowerPC/vsx.ll
index d6a5ed3..4b1c2ed 100644
--- a/test/CodeGen/PowerPC/vsx.ll
+++ b/test/CodeGen/PowerPC/vsx.ll
@@ -1099,20 +1099,15 @@
   ret <2 x double> %w
 
 ; CHECK-LABEL: @test69
-; CHECK-DAG: lfiwax f0, 0, r3
-; CHECK-DAG: lfiwax f1, 0, r3
-; CHECK-DAG: xscvsxddp f0, f0
-; CHECK-DAG: xscvsxddp f1, f1
-; CHECK: xxmrghd v2, vs1, vs0
+; CHECK-DAG: lxvd2x v2, 0, r3
+; CHECK-DAG: xvcvsxddp v2, v2
 ; CHECK: blr
 
 ; CHECK-LE-LABEL: @test69
-; CHECK-LE: mfvsrd
-; CHECK-LE: mtvsrwa
-; CHECK-LE: mtvsrwa
-; CHECK-LE: xscvsxddp
-; CHECK-LE: xscvsxddp
-; CHECK-LE: xxmrghd
+; CHECK-LE: vperm
+; CHECK-LE: vsld
+; CHECK-LE: vsrad
+; CHECK-LE: xvcvsxddp v2, v2
 ; CHECK-LE: blr
 }
 
@@ -1122,20 +1117,15 @@
   ret <2 x double> %w
 
 ; CHECK-LABEL: @test70
-; CHECK-DAG: lfiwax f0, 0, r3
-; CHECK-DAG: lfiwax f1, 0, r3
-; CHECK-DAG: xscvsxddp f0, f0
-; CHECK-DAG: xscvsxddp f1, f1
-; CHECK: xxmrghd v2, vs1, vs0
+; CHECK-DAG: lxvd2x v2, 0, r3
+; CHECK-DAG: xvcvsxddp v2, v2
 ; CHECK: blr
 
 ; CHECK-LE-LABEL: @test70
-; CHECK-LE: mfvsrd
-; CHECK-LE: mtvsrwa
-; CHECK-LE: mtvsrwa
-; CHECK-LE: xscvsxddp
-; CHECK-LE: xscvsxddp
-; CHECK-LE: xxmrghd
+; CHECK-LE: vperm
+; CHECK-LE: vsld
+; CHECK-LE: vsrad
+; CHECK-LE: xvcvsxddp v2, v2
 ; CHECK-LE: blr
 }
 
diff --git a/test/CodeGen/PowerPC/vsx_insert_extract_le.ll b/test/CodeGen/PowerPC/vsx_insert_extract_le.ll
index ef7d8f3..0c081f7 100644
--- a/test/CodeGen/PowerPC/vsx_insert_extract_le.ll
+++ b/test/CodeGen/PowerPC/vsx_insert_extract_le.ll
@@ -31,10 +31,10 @@
 ;
 ; CHECK-P9-LABEL: testi0:
 ; CHECK-P9:       # %bb.0:
-; CHECK-P9-NEXT:    lfd f0, 0(r4)
-; CHECK-P9-NEXT:    lxv vs1, 0(r3)
-; CHECK-P9-NEXT:    xxpermdi vs0, f0, f0, 2
-; CHECK-P9-NEXT:    xxpermdi v2, vs1, vs0, 1
+; CHECK-P9-NEXT:    lfd [[REG:f[0-9]+]], 0(r4)
+; CHECK-P9-NEXT:    lxv [[REG1:vs[0-9]+]], 0(r3)
+; CHECK-P9-NEXT:    xxpermdi [[REG2:vs[0-9]+]], [[REG]], [[REG]], 2
+; CHECK-P9-NEXT:    xxpermdi v2, [[REG1]], [[REG2]], 1
 ; CHECK-P9-NEXT:    blr
   %v = load <2 x double>, <2 x double>* %p1
   %s = load double, double* %p2
@@ -65,10 +65,10 @@
 ;
 ; CHECK-P9-LABEL: testi1:
 ; CHECK-P9:       # %bb.0:
-; CHECK-P9-NEXT:    lfd f0, 0(r4)
-; CHECK-P9-NEXT:    lxv vs1, 0(r3)
-; CHECK-P9-NEXT:    xxpermdi vs0, f0, f0, 2
-; CHECK-P9-NEXT:    xxmrgld v2, vs0, vs1
+; CHECK-P9-NEXT:    lfd [[REG:f[0-9]+]], 0(r4)
+; CHECK-P9-NEXT:    lxv [[REG1:vs[0-9]+]], 0(r3)
+; CHECK-P9-NEXT:    xxpermdi [[REG2:vs[0-9]+]], [[REG]], [[REG]], 2
+; CHECK-P9-NEXT:    xxmrgld v2, [[REG2]], [[REG1]] 
 ; CHECK-P9-NEXT:    blr
   %v = load <2 x double>, <2 x double>* %p1
   %s = load double, double* %p2
diff --git a/test/CodeGen/RISCV/alu32.ll b/test/CodeGen/RISCV/alu32.ll
index 3776e53..d6f667c 100644
--- a/test/CodeGen/RISCV/alu32.ll
+++ b/test/CodeGen/RISCV/alu32.ll
@@ -181,7 +181,7 @@
 ;
 ; RV64I-LABEL: sll:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -235,8 +235,6 @@
   ret i32 %1
 }
 
-; TODO: should select srlw for RV64.
-
 define i32 @srl(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: srl:
 ; RV32I:       # %bb.0:
@@ -245,16 +243,12 @@
 ;
 ; RV64I-LABEL: srl:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
 }
 
-; TODO: should select sraw for RV64.
-
 define i32 @sra(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sra:
 ; RV32I:       # %bb.0:
@@ -263,8 +257,7 @@
 ;
 ; RV64I-LABEL: sra:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
diff --git a/test/CodeGen/RISCV/alu64.ll b/test/CodeGen/RISCV/alu64.ll
index 021211b..e66d1d6 100644
--- a/test/CodeGen/RISCV/alu64.ll
+++ b/test/CodeGen/RISCV/alu64.ll
@@ -444,13 +444,10 @@
   ret i32 %1
 }
 
-; TODO: should select sllw for RV64.
-
 define signext i32 @sllw(i32 signext %a, i32 zeroext %b) {
 ; RV64I-LABEL: sllw:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32I-LABEL: sllw:
@@ -461,15 +458,10 @@
   ret i32 %1
 }
 
-; TODO: should select srlw for RV64.
-
 define signext i32 @srlw(i32 signext %a, i32 zeroext %b) {
 ; RV64I-LABEL: srlw:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32I-LABEL: srlw:
@@ -480,13 +472,10 @@
   ret i32 %1
 }
 
-; TODO: should select sraw for RV64.
-
 define signext i32 @sraw(i64 %a, i32 zeroext %b) {
 ; RV64I-LABEL: sraw:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32I-LABEL: sraw:
diff --git a/test/CodeGen/RISCV/atomic-cmpxchg.ll b/test/CodeGen/RISCV/atomic-cmpxchg.ll
index 85cd169..19b85b6 100644
--- a/test/CodeGen/RISCV/atomic-cmpxchg.ll
+++ b/test/CodeGen/RISCV/atomic-cmpxchg.ll
@@ -3,6 +3,8 @@
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IA %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
 
 define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) {
 ; RV32I-LABEL: cmpxchg_i8_monotonic_monotonic:
@@ -41,6 +43,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB0_1
 ; RV32IA-NEXT:  .LBB0_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_monotonic_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic
   ret void
 }
@@ -82,6 +97,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB1_1
 ; RV32IA-NEXT:  .LBB1_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_acquire_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire monotonic
   ret void
 }
@@ -123,6 +151,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB2_1
 ; RV32IA-NEXT:  .LBB2_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_acquire_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire acquire
   ret void
 }
@@ -164,6 +205,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB3_1
 ; RV32IA-NEXT:  .LBB3_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_release_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic
   ret void
 }
@@ -205,6 +259,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB4_1
 ; RV32IA-NEXT:  .LBB4_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_release_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire
   ret void
 }
@@ -246,6 +313,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB5_1
 ; RV32IA-NEXT:  .LBB5_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_acq_rel_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel monotonic
   ret void
 }
@@ -287,6 +367,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB6_1
 ; RV32IA-NEXT:  .LBB6_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_acq_rel_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel acquire
   ret void
 }
@@ -328,6 +421,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB7_1
 ; RV32IA-NEXT:  .LBB7_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_seq_cst_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst monotonic
   ret void
 }
@@ -369,6 +475,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB8_1
 ; RV32IA-NEXT:  .LBB8_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_seq_cst_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst acquire
   ret void
 }
@@ -410,6 +529,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB9_1
 ; RV32IA-NEXT:  .LBB9_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_seq_cst_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst seq_cst
   ret void
 }
@@ -452,6 +584,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB10_1
 ; RV32IA-NEXT:  .LBB10_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_monotonic_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic
   ret void
 }
@@ -494,6 +639,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB11_1
 ; RV32IA-NEXT:  .LBB11_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_acquire_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire monotonic
   ret void
 }
@@ -536,6 +694,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB12_1
 ; RV32IA-NEXT:  .LBB12_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_acquire_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire acquire
   ret void
 }
@@ -578,6 +749,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB13_1
 ; RV32IA-NEXT:  .LBB13_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_release_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic
   ret void
 }
@@ -620,6 +804,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB14_1
 ; RV32IA-NEXT:  .LBB14_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_release_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire
   ret void
 }
@@ -662,6 +859,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB15_1
 ; RV32IA-NEXT:  .LBB15_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_acq_rel_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel monotonic
   ret void
 }
@@ -704,6 +914,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB16_1
 ; RV32IA-NEXT:  .LBB16_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_acq_rel_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel acquire
   ret void
 }
@@ -746,6 +969,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB17_1
 ; RV32IA-NEXT:  .LBB17_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_seq_cst_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst monotonic
   ret void
 }
@@ -788,6 +1024,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB18_1
 ; RV32IA-NEXT:  .LBB18_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_seq_cst_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst acquire
   ret void
 }
@@ -830,6 +1079,19 @@
 ; RV32IA-NEXT:    bnez a5, .LBB19_1
 ; RV32IA-NEXT:  .LBB19_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_seq_cst_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst seq_cst
   ret void
 }
@@ -858,6 +1120,19 @@
 ; RV32IA-NEXT:    bnez a4, .LBB20_1
 ; RV32IA-NEXT:  .LBB20_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_monotonic_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic
   ret void
 }
@@ -886,6 +1161,19 @@
 ; RV32IA-NEXT:    bnez a4, .LBB21_1
 ; RV32IA-NEXT:  .LBB21_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_acquire_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire monotonic
   ret void
 }
@@ -914,6 +1202,19 @@
 ; RV32IA-NEXT:    bnez a4, .LBB22_1
 ; RV32IA-NEXT:  .LBB22_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_acquire_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire acquire
   ret void
 }
@@ -942,6 +1243,19 @@
 ; RV32IA-NEXT:    bnez a4, .LBB23_1
 ; RV32IA-NEXT:  .LBB23_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_release_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic
   ret void
 }
@@ -970,6 +1284,19 @@
 ; RV32IA-NEXT:    bnez a4, .LBB24_1
 ; RV32IA-NEXT:  .LBB24_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_release_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire
   ret void
 }
@@ -998,6 +1325,19 @@
 ; RV32IA-NEXT:    bnez a4, .LBB25_1
 ; RV32IA-NEXT:  .LBB25_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_acq_rel_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel monotonic
   ret void
 }
@@ -1026,6 +1366,19 @@
 ; RV32IA-NEXT:    bnez a4, .LBB26_1
 ; RV32IA-NEXT:  .LBB26_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_acq_rel_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel acquire
   ret void
 }
@@ -1054,6 +1407,19 @@
 ; RV32IA-NEXT:    bnez a4, .LBB27_1
 ; RV32IA-NEXT:  .LBB27_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_seq_cst_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst monotonic
   ret void
 }
@@ -1082,6 +1448,19 @@
 ; RV32IA-NEXT:    bnez a4, .LBB28_1
 ; RV32IA-NEXT:  .LBB28_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_seq_cst_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst acquire
   ret void
 }
@@ -1110,6 +1489,19 @@
 ; RV32IA-NEXT:    bnez a4, .LBB29_1
 ; RV32IA-NEXT:  .LBB29_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_seq_cst_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst
   ret void
 }
@@ -1146,6 +1538,19 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_monotonic_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic
   ret void
 }
@@ -1184,6 +1589,19 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_acquire_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire monotonic
   ret void
 }
@@ -1220,6 +1638,19 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_acquire_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire acquire
   ret void
 }
@@ -1258,6 +1689,19 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_release_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic
   ret void
 }
@@ -1296,6 +1740,19 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_release_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire
   ret void
 }
@@ -1334,6 +1791,19 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_acq_rel_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel monotonic
   ret void
 }
@@ -1372,6 +1842,19 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_acq_rel_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel acquire
   ret void
 }
@@ -1410,6 +1893,19 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_seq_cst_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst monotonic
   ret void
 }
@@ -1448,6 +1944,19 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_seq_cst_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst acquire
   ret void
 }
@@ -1484,6 +1993,19 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_seq_cst_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst seq_cst
   ret void
 }
diff --git a/test/CodeGen/RISCV/atomic-fence.ll b/test/CodeGen/RISCV/atomic-fence.ll
index 174977d..120f65b 100644
--- a/test/CodeGen/RISCV/atomic-fence.ll
+++ b/test/CodeGen/RISCV/atomic-fence.ll
@@ -3,12 +3,21 @@
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
 
 define void @fence_acquire() nounwind {
 ; RV32I-LABEL: fence_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    fence r, rw
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fence_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    fence r, rw
+; RV64I-NEXT:    ret
   fence acquire
   ret void
 }
@@ -18,6 +27,11 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    fence rw, w
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fence_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    fence rw, w
+; RV64I-NEXT:    ret
   fence release
   ret void
 }
@@ -27,6 +41,11 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    fence.tso
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fence_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    fence.tso
+; RV64I-NEXT:    ret
   fence acq_rel
   ret void
 }
@@ -36,6 +55,11 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    fence rw, rw
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fence_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    fence rw, rw
+; RV64I-NEXT:    ret
   fence seq_cst
   ret void
 }
diff --git a/test/CodeGen/RISCV/atomic-load-store.ll b/test/CodeGen/RISCV/atomic-load-store.ll
index ba2b594..2df3a9f 100644
--- a/test/CodeGen/RISCV/atomic-load-store.ll
+++ b/test/CodeGen/RISCV/atomic-load-store.ll
@@ -3,6 +3,8 @@
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IA %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
 
 define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i8_unordered:
@@ -19,6 +21,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    lb a0, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i8_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i8, i8* %a unordered, align 1
   ret i8 %1
 }
@@ -38,6 +50,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    lb a0, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i8, i8* %a monotonic, align 1
   ret i8 %1
 }
@@ -58,6 +80,16 @@
 ; RV32IA-NEXT:    lb a0, 0(a0)
 ; RV32IA-NEXT:    fence r, rw
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    call __atomic_load_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i8, i8* %a acquire, align 1
   ret i8 %1
 }
@@ -79,6 +111,16 @@
 ; RV32IA-NEXT:    lb a0, 0(a0)
 ; RV32IA-NEXT:    fence r, rw
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __atomic_load_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i8, i8* %a seq_cst, align 1
   ret i8 %1
 }
@@ -98,6 +140,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    lh a0, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i16_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i16, i16* %a unordered, align 2
   ret i16 %1
 }
@@ -117,6 +169,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    lh a0, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i16, i16* %a monotonic, align 2
   ret i16 %1
 }
@@ -137,6 +199,16 @@
 ; RV32IA-NEXT:    lh a0, 0(a0)
 ; RV32IA-NEXT:    fence r, rw
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    call __atomic_load_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i16, i16* %a acquire, align 2
   ret i16 %1
 }
@@ -158,6 +230,16 @@
 ; RV32IA-NEXT:    lh a0, 0(a0)
 ; RV32IA-NEXT:    fence r, rw
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __atomic_load_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i16, i16* %a seq_cst, align 2
   ret i16 %1
 }
@@ -177,6 +259,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    lw a0, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i32_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i32, i32* %a unordered, align 4
   ret i32 %1
 }
@@ -196,6 +288,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    lw a0, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i32, i32* %a monotonic, align 4
   ret i32 %1
 }
@@ -216,6 +318,16 @@
 ; RV32IA-NEXT:    lw a0, 0(a0)
 ; RV32IA-NEXT:    fence r, rw
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    call __atomic_load_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i32, i32* %a acquire, align 4
   ret i32 %1
 }
@@ -237,6 +349,16 @@
 ; RV32IA-NEXT:    lw a0, 0(a0)
 ; RV32IA-NEXT:    fence r, rw
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __atomic_load_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i32, i32* %a seq_cst, align 4
   ret i32 %1
 }
@@ -261,6 +383,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i64_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i64, i64* %a unordered, align 8
   ret i64 %1
 }
@@ -285,6 +417,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i64, i64* %a monotonic, align 8
   ret i64 %1
 }
@@ -309,6 +451,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    call __atomic_load_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i64, i64* %a acquire, align 8
   ret i64 %1
 }
@@ -333,6 +485,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __atomic_load_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i64, i64* %a seq_cst, align 8
   ret i64 %1
 }
@@ -352,6 +514,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    sb a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i8_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i8 %b, i8* %a unordered, align 1
   ret void
 }
@@ -371,6 +543,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    sb a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i8 %b, i8* %a monotonic, align 1
   ret void
 }
@@ -391,6 +573,16 @@
 ; RV32IA-NEXT:    fence rw, w
 ; RV32IA-NEXT:    sb a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_store_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i8 %b, i8* %a release, align 1
   ret void
 }
@@ -411,6 +603,16 @@
 ; RV32IA-NEXT:    fence rw, w
 ; RV32IA-NEXT:    sb a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_store_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i8 %b, i8* %a seq_cst, align 1
   ret void
 }
@@ -430,6 +632,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    sh a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i16_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i16 %b, i16* %a unordered, align 2
   ret void
 }
@@ -449,6 +661,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    sh a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i16 %b, i16* %a monotonic, align 2
   ret void
 }
@@ -469,6 +691,16 @@
 ; RV32IA-NEXT:    fence rw, w
 ; RV32IA-NEXT:    sh a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_store_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i16 %b, i16* %a release, align 2
   ret void
 }
@@ -489,6 +721,16 @@
 ; RV32IA-NEXT:    fence rw, w
 ; RV32IA-NEXT:    sh a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_store_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i16 %b, i16* %a seq_cst, align 2
   ret void
 }
@@ -508,6 +750,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    sw a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i32_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i32 %b, i32* %a unordered, align 4
   ret void
 }
@@ -527,6 +779,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    sw a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i32 %b, i32* %a monotonic, align 4
   ret void
 }
@@ -547,6 +809,16 @@
 ; RV32IA-NEXT:    fence rw, w
 ; RV32IA-NEXT:    sw a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_store_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i32 %b, i32* %a release, align 4
   ret void
 }
@@ -567,6 +839,16 @@
 ; RV32IA-NEXT:    fence rw, w
 ; RV32IA-NEXT:    sw a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_store_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i32 %b, i32* %a seq_cst, align 4
   ret void
 }
@@ -591,6 +873,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i64_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i64 %b, i64* %a unordered, align 8
   ret void
 }
@@ -615,6 +907,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i64 %b, i64* %a monotonic, align 8
   ret void
 }
@@ -639,6 +941,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_store_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i64 %b, i64* %a release, align 8
   ret void
 }
@@ -663,6 +975,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_store_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i64 %b, i64* %a seq_cst, align 8
   ret void
 }
diff --git a/test/CodeGen/RISCV/atomic-rmw.ll b/test/CodeGen/RISCV/atomic-rmw.ll
index f27afbb..256a669 100644
--- a/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/test/CodeGen/RISCV/atomic-rmw.ll
@@ -3,6 +3,8 @@
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IA %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
 
 define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) {
 ; RV32I-LABEL: atomicrmw_xchg_i8_monotonic:
@@ -35,6 +37,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -70,6 +82,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -105,6 +127,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i8* %a, i8 %b release
   ret i8 %1
 }
@@ -140,6 +172,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -175,6 +217,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -210,6 +262,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_add_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -245,6 +307,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_add_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -280,6 +352,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_add_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i8* %a, i8 %b release
   ret i8 %1
 }
@@ -315,6 +397,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_add_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -350,6 +442,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_add_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -385,6 +487,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_sub_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -420,6 +532,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_sub_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -455,6 +577,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_sub_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i8* %a, i8 %b release
   ret i8 %1
 }
@@ -490,6 +622,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_sub_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -525,6 +667,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -554,6 +706,16 @@
 ; RV32IA-NEXT:    amoand.w a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_and_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -583,6 +745,16 @@
 ; RV32IA-NEXT:    amoand.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_and_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -612,6 +784,16 @@
 ; RV32IA-NEXT:    amoand.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_and_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i8* %a, i8 %b release
   ret i8 %1
 }
@@ -641,6 +823,16 @@
 ; RV32IA-NEXT:    amoand.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_and_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -670,6 +862,16 @@
 ; RV32IA-NEXT:    amoand.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_and_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -706,6 +908,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_nand_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -742,6 +954,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_nand_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -778,6 +1000,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_nand_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i8* %a, i8 %b release
   ret i8 %1
 }
@@ -814,6 +1046,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_nand_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -850,6 +1092,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_nand_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -875,6 +1127,16 @@
 ; RV32IA-NEXT:    amoor.w a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_or_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -900,6 +1162,16 @@
 ; RV32IA-NEXT:    amoor.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_or_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -925,6 +1197,16 @@
 ; RV32IA-NEXT:    amoor.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_or_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i8* %a, i8 %b release
   ret i8 %1
 }
@@ -950,6 +1232,16 @@
 ; RV32IA-NEXT:    amoor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_or_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -975,6 +1267,16 @@
 ; RV32IA-NEXT:    amoor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_or_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -1000,6 +1302,16 @@
 ; RV32IA-NEXT:    amoxor.w a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_xor_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -1025,6 +1337,16 @@
 ; RV32IA-NEXT:    amoxor.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_xor_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -1050,6 +1372,16 @@
 ; RV32IA-NEXT:    amoxor.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_xor_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i8* %a, i8 %b release
   ret i8 %1
 }
@@ -1075,6 +1407,16 @@
 ; RV32IA-NEXT:    amoxor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_xor_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -1100,6 +1442,16 @@
 ; RV32IA-NEXT:    amoxor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_xor_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -1177,6 +1529,49 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s1, a1, 56
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:  .LBB35_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB35_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB35_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB35_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB35_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB35_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -1257,6 +1652,52 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s5, a1, 56
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB36_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s5, a1, .LBB36_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB36_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB36_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB36_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB36_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -1337,6 +1778,52 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s1, a1, 56
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB37_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB37_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB37_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB37_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB37_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB37_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i8* %a, i8 %b release
   ret i8 %1
 }
@@ -1420,6 +1907,55 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s1, a1, 56
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB38_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB38_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB38_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB38_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB38_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB38_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -1500,6 +2036,52 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s5, a1, 56
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB39_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s5, a1, .LBB39_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB39_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB39_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB39_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB39_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -1577,6 +2159,49 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s1, a1, 56
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:  .LBB40_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB40_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB40_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB40_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB40_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB40_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -1657,6 +2282,52 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s5, a1, 56
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB41_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s5, a1, .LBB41_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB41_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB41_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB41_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB41_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -1737,6 +2408,52 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s1, a1, 56
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB42_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB42_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB42_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB42_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB42_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB42_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i8* %a, i8 %b release
   ret i8 %1
 }
@@ -1820,6 +2537,55 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s1, a1, 56
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB43_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB43_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB43_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB43_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB43_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB43_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -1900,6 +2666,52 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s5, a1, 56
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB44_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s5, a1, .LBB44_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB44_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB44_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB44_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB44_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -1970,6 +2782,47 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s1, a1, 255
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:  .LBB45_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s1, a1, .LBB45_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB45_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB45_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB45_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB45_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -2043,6 +2896,50 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s5, a1, 255
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB46_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s5, a1, .LBB46_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB46_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB46_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB46_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB46_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -2116,6 +3013,50 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s1, a1, 255
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB47_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s1, a1, .LBB47_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB47_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB47_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB47_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB47_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i8* %a, i8 %b release
   ret i8 %1
 }
@@ -2192,6 +3133,53 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s1, a1, 255
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB48_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s1, a1, .LBB48_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB48_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB48_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB48_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB48_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -2265,6 +3253,50 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s5, a1, 255
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB49_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s5, a1, .LBB49_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB49_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB49_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB49_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB49_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -2335,6 +3367,47 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s1, a1, 255
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:  .LBB50_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s1, a1, .LBB50_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB50_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB50_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB50_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB50_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -2408,6 +3481,50 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s5, a1, 255
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB51_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s5, a1, .LBB51_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB51_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB51_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB51_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB51_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -2481,6 +3598,50 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s1, a1, 255
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB52_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s1, a1, .LBB52_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB52_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB52_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB52_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB52_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i8* %a, i8 %b release
   ret i8 %1
 }
@@ -2557,6 +3718,53 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s1, a1, 255
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB53_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s1, a1, .LBB53_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB53_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB53_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB53_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB53_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -2630,6 +3838,50 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s5, a1, 255
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB54_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s5, a1, .LBB54_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB54_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB54_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB54_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB54_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -2666,6 +3918,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -2702,6 +3964,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -2738,6 +4010,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i16* %a, i16 %b release
   ret i16 %1
 }
@@ -2774,6 +4056,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2810,6 +4102,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -2846,6 +4148,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_add_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -2882,6 +4194,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_add_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -2918,6 +4240,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_add_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i16* %a, i16 %b release
   ret i16 %1
 }
@@ -2954,6 +4286,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_add_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2990,6 +4332,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_add_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3026,6 +4378,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_sub_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -3062,6 +4424,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_sub_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -3098,6 +4470,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_sub_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i16* %a, i16 %b release
   ret i16 %1
 }
@@ -3134,6 +4516,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_sub_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -3170,6 +4562,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3200,6 +4602,16 @@
 ; RV32IA-NEXT:    amoand.w a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_and_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -3230,6 +4642,16 @@
 ; RV32IA-NEXT:    amoand.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_and_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -3260,6 +4682,16 @@
 ; RV32IA-NEXT:    amoand.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_and_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i16* %a, i16 %b release
   ret i16 %1
 }
@@ -3290,6 +4722,16 @@
 ; RV32IA-NEXT:    amoand.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_and_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -3320,6 +4762,16 @@
 ; RV32IA-NEXT:    amoand.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_and_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3357,6 +4809,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_nand_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -3394,6 +4856,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_nand_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -3431,6 +4903,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_nand_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i16* %a, i16 %b release
   ret i16 %1
 }
@@ -3468,6 +4950,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_nand_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -3505,6 +4997,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_nand_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3532,6 +5034,16 @@
 ; RV32IA-NEXT:    amoor.w a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_or_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -3559,6 +5071,16 @@
 ; RV32IA-NEXT:    amoor.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_or_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -3586,6 +5108,16 @@
 ; RV32IA-NEXT:    amoor.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_or_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i16* %a, i16 %b release
   ret i16 %1
 }
@@ -3613,6 +5145,16 @@
 ; RV32IA-NEXT:    amoor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_or_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -3640,6 +5182,16 @@
 ; RV32IA-NEXT:    amoor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_or_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3667,6 +5219,16 @@
 ; RV32IA-NEXT:    amoxor.w a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_xor_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -3694,6 +5256,16 @@
 ; RV32IA-NEXT:    amoxor.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_xor_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -3721,6 +5293,16 @@
 ; RV32IA-NEXT:    amoxor.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_xor_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i16* %a, i16 %b release
   ret i16 %1
 }
@@ -3748,6 +5330,16 @@
 ; RV32IA-NEXT:    amoxor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_xor_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -3775,6 +5367,16 @@
 ; RV32IA-NEXT:    amoxor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_xor_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3853,6 +5455,49 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s1, a1, 48
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:  .LBB90_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB90_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB90_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB90_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB90_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB90_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -3934,6 +5579,52 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s5, a1, 48
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB91_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s5, a1, .LBB91_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB91_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB91_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB91_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB91_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -4015,6 +5706,52 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s1, a1, 48
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB92_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB92_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB92_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB92_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB92_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB92_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b release
   ret i16 %1
 }
@@ -4099,6 +5836,55 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s1, a1, 48
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB93_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB93_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB93_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB93_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB93_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB93_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -4180,6 +5966,52 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s5, a1, 48
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB94_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s5, a1, .LBB94_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB94_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB94_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB94_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB94_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -4258,6 +6090,49 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s1, a1, 48
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:  .LBB95_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB95_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB95_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB95_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB95_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB95_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -4339,6 +6214,52 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s5, a1, 48
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB96_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s5, a1, .LBB96_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB96_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB96_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB96_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB96_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -4420,6 +6341,52 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s1, a1, 48
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB97_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB97_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB97_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB97_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB97_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB97_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b release
   ret i16 %1
 }
@@ -4504,6 +6471,55 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s1, a1, 48
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB98_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB98_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB98_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB98_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB98_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB98_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -4585,6 +6601,52 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s5, a1, 48
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB99_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s5, a1, .LBB99_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB99_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB99_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB99_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB99_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -4660,6 +6722,51 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s1, a1, -1
+; RV64I-NEXT:    and s5, s2, s1
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:  .LBB100_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s1
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s5, a1, .LBB100_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB100_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB100_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB100_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB100_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -4738,6 +6845,54 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s5, a1, -1
+; RV64I-NEXT:    and s6, s2, s5
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB101_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s5
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s6, a1, .LBB101_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB101_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB101_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB101_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB101_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -4816,6 +6971,54 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s1, a1, -1
+; RV64I-NEXT:    and s6, s2, s1
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB102_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s1
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s6, a1, .LBB102_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB102_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB102_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB102_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB102_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i16* %a, i16 %b release
   ret i16 %1
 }
@@ -4897,6 +7100,57 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -80
+; RV64I-NEXT:    sd ra, 72(sp)
+; RV64I-NEXT:    sd s1, 64(sp)
+; RV64I-NEXT:    sd s2, 56(sp)
+; RV64I-NEXT:    sd s3, 48(sp)
+; RV64I-NEXT:    sd s4, 40(sp)
+; RV64I-NEXT:    sd s5, 32(sp)
+; RV64I-NEXT:    sd s6, 24(sp)
+; RV64I-NEXT:    sd s7, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s1, a1, -1
+; RV64I-NEXT:    and s7, s2, s1
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB103_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s1
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s7, a1, .LBB103_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB103_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB103_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB103_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB103_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s7, 16(sp)
+; RV64I-NEXT:    ld s6, 24(sp)
+; RV64I-NEXT:    ld s5, 32(sp)
+; RV64I-NEXT:    ld s4, 40(sp)
+; RV64I-NEXT:    ld s3, 48(sp)
+; RV64I-NEXT:    ld s2, 56(sp)
+; RV64I-NEXT:    ld s1, 64(sp)
+; RV64I-NEXT:    ld ra, 72(sp)
+; RV64I-NEXT:    addi sp, sp, 80
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -4975,6 +7229,54 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s5, a1, -1
+; RV64I-NEXT:    and s6, s2, s5
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB104_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s5
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s6, a1, .LBB104_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB104_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB104_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB104_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB104_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -5050,6 +7352,51 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s1, a1, -1
+; RV64I-NEXT:    and s5, s2, s1
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:  .LBB105_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s1
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s5, a1, .LBB105_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB105_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB105_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB105_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB105_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -5128,6 +7475,54 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s5, a1, -1
+; RV64I-NEXT:    and s6, s2, s5
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB106_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s5
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s6, a1, .LBB106_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB106_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB106_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB106_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB106_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -5206,6 +7601,54 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s1, a1, -1
+; RV64I-NEXT:    and s6, s2, s1
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB107_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s1
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s6, a1, .LBB107_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB107_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB107_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB107_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB107_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i16* %a, i16 %b release
   ret i16 %1
 }
@@ -5287,6 +7730,57 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -80
+; RV64I-NEXT:    sd ra, 72(sp)
+; RV64I-NEXT:    sd s1, 64(sp)
+; RV64I-NEXT:    sd s2, 56(sp)
+; RV64I-NEXT:    sd s3, 48(sp)
+; RV64I-NEXT:    sd s4, 40(sp)
+; RV64I-NEXT:    sd s5, 32(sp)
+; RV64I-NEXT:    sd s6, 24(sp)
+; RV64I-NEXT:    sd s7, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s1, a1, -1
+; RV64I-NEXT:    and s7, s2, s1
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB108_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s1
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s7, a1, .LBB108_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB108_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB108_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB108_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB108_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s7, 16(sp)
+; RV64I-NEXT:    ld s6, 24(sp)
+; RV64I-NEXT:    ld s5, 32(sp)
+; RV64I-NEXT:    ld s4, 40(sp)
+; RV64I-NEXT:    ld s3, 48(sp)
+; RV64I-NEXT:    ld s2, 56(sp)
+; RV64I-NEXT:    ld s1, 64(sp)
+; RV64I-NEXT:    ld ra, 72(sp)
+; RV64I-NEXT:    addi sp, sp, 80
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -5365,6 +7859,54 @@
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s5, a1, -1
+; RV64I-NEXT:    and s6, s2, s5
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB109_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s5
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s6, a1, .LBB109_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB109_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB109_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB109_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB109_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -5384,6 +7926,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoswap.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -5403,6 +7955,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoswap.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -5422,6 +7984,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoswap.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i32* %a, i32 %b release
   ret i32 %1
 }
@@ -5441,6 +8013,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoswap.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -5460,6 +8042,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoswap.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -5479,6 +8071,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoadd.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_add_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -5498,6 +8100,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoadd.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_add_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -5517,6 +8129,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoadd.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_add_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i32* %a, i32 %b release
   ret i32 %1
 }
@@ -5536,6 +8158,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_add_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -5555,6 +8187,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_add_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -5575,6 +8217,16 @@
 ; RV32IA-NEXT:    neg a1, a1
 ; RV32IA-NEXT:    amoadd.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_sub_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -5595,6 +8247,16 @@
 ; RV32IA-NEXT:    neg a1, a1
 ; RV32IA-NEXT:    amoadd.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_sub_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -5615,6 +8277,16 @@
 ; RV32IA-NEXT:    neg a1, a1
 ; RV32IA-NEXT:    amoadd.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_sub_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i32* %a, i32 %b release
   ret i32 %1
 }
@@ -5635,6 +8307,16 @@
 ; RV32IA-NEXT:    neg a1, a1
 ; RV32IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_sub_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -5655,6 +8337,16 @@
 ; RV32IA-NEXT:    neg a1, a1
 ; RV32IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -5674,6 +8366,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoand.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_and_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -5693,6 +8395,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoand.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_and_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -5712,6 +8424,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoand.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_and_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i32* %a, i32 %b release
   ret i32 %1
 }
@@ -5731,6 +8453,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoand.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_and_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -5750,6 +8482,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoand.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_and_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -5776,6 +8518,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_nand_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -5802,6 +8554,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_nand_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -5828,6 +8590,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_nand_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i32* %a, i32 %b release
   ret i32 %1
 }
@@ -5854,6 +8626,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_nand_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -5880,6 +8662,16 @@
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_nand_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -5899,6 +8691,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoor.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_or_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -5918,6 +8720,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoor.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_or_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -5937,6 +8749,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoor.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_or_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i32* %a, i32 %b release
   ret i32 %1
 }
@@ -5956,6 +8778,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_or_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -5975,6 +8807,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_or_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -5994,6 +8836,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoxor.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_xor_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -6013,6 +8865,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoxor.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_xor_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -6032,6 +8894,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoxor.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_xor_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i32* %a, i32 %b release
   ret i32 %1
 }
@@ -6051,6 +8923,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoxor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_xor_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -6070,6 +8952,16 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoxor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_xor_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -6115,6 +9007,47 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomax.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:  .LBB145_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB145_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB145_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB145_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB145_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB145_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -6163,6 +9096,50 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomax.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB146_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s5, a1, .LBB146_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB146_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB146_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB146_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB146_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -6211,6 +9188,50 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomax.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB147_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB147_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB147_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB147_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB147_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB147_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i32* %a, i32 %b release
   ret i32 %1
 }
@@ -6262,6 +9283,53 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomax.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB148_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB148_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB148_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB148_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB148_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB148_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -6310,6 +9378,50 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomax.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB149_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s5, a1, .LBB149_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB149_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB149_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB149_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB149_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -6355,6 +9467,47 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomin.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:  .LBB150_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB150_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB150_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB150_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB150_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB150_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -6403,6 +9556,50 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomin.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB151_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s5, a1, .LBB151_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB151_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB151_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB151_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB151_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -6451,6 +9648,50 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomin.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB152_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB152_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB152_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB152_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB152_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB152_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i32* %a, i32 %b release
   ret i32 %1
 }
@@ -6502,6 +9743,53 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomin.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB153_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB153_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB153_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB153_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB153_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB153_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -6550,6 +9838,50 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomin.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB154_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s5, a1, .LBB154_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB154_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB154_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB154_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB154_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -6595,6 +9927,47 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomaxu.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:  .LBB155_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s1, a1, .LBB155_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB155_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB155_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB155_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB155_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -6643,6 +10016,50 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomaxu.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB156_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s5, a1, .LBB156_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB156_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB156_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB156_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB156_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -6691,6 +10108,50 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomaxu.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB157_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s1, a1, .LBB157_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB157_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB157_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB157_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB157_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i32* %a, i32 %b release
   ret i32 %1
 }
@@ -6742,6 +10203,53 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomaxu.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB158_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s1, a1, .LBB158_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB158_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB158_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB158_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB158_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -6790,6 +10298,50 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomaxu.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB159_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s5, a1, .LBB159_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB159_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB159_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB159_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB159_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -6835,6 +10387,47 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amominu.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:  .LBB160_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s1, a1, .LBB160_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB160_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB160_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB160_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB160_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -6883,6 +10476,50 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amominu.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB161_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s5, a1, .LBB161_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB161_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB161_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB161_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB161_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -6931,6 +10568,50 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amominu.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB162_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s1, a1, .LBB162_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB162_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB162_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB162_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB162_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i32* %a, i32 %b release
   ret i32 %1
 }
@@ -6982,6 +10663,53 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amominu.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB163_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s1, a1, .LBB163_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB163_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB163_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB163_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB163_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -7030,6 +10758,50 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amominu.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB164_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s5, a1, .LBB164_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB164_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB164_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB164_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB164_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -7054,6 +10826,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -7078,6 +10860,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -7102,6 +10894,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i64* %a, i64 %b release
   ret i64 %1
 }
@@ -7126,6 +10928,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -7150,6 +10962,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -7174,6 +10996,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_add_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -7198,6 +11030,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_add_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -7222,6 +11064,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_add_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i64* %a, i64 %b release
   ret i64 %1
 }
@@ -7246,6 +11098,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_add_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -7270,6 +11132,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_add_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -7294,6 +11166,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_sub_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -7318,6 +11200,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_sub_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -7342,6 +11234,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_sub_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i64* %a, i64 %b release
   ret i64 %1
 }
@@ -7366,6 +11268,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_sub_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -7390,6 +11302,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -7414,6 +11336,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_and_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -7438,6 +11370,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_and_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -7462,6 +11404,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_and_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i64* %a, i64 %b release
   ret i64 %1
 }
@@ -7486,6 +11438,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_and_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -7510,6 +11472,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_and_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -7534,6 +11506,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_nand_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -7558,6 +11540,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_nand_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -7582,6 +11574,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_nand_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i64* %a, i64 %b release
   ret i64 %1
 }
@@ -7606,6 +11608,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_nand_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -7630,6 +11642,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_nand_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -7654,6 +11676,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_or_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -7678,6 +11710,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_or_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -7702,6 +11744,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_or_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i64* %a, i64 %b release
   ret i64 %1
 }
@@ -7726,6 +11778,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_or_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -7750,6 +11812,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_or_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -7774,6 +11846,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_xor_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -7798,6 +11880,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_xor_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -7822,6 +11914,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_xor_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i64* %a, i64 %b release
   ret i64 %1
 }
@@ -7846,6 +11948,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_xor_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -7870,6 +11982,16 @@
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_xor_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -7990,6 +12112,42 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:  .LBB200_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    blt s1, a2, .LBB200_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB200_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB200_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB200_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB200_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -8116,6 +12274,45 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 2
+; RV64I-NEXT:  .LBB201_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    blt s1, a2, .LBB201_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB201_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB201_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB201_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB201_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -8242,6 +12439,45 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB202_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    blt s1, a2, .LBB202_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB202_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB202_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB202_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB202_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i64* %a, i64 %b release
   ret i64 %1
 }
@@ -8374,6 +12610,48 @@
 ; RV32IA-NEXT:    lw ra, 44(sp)
 ; RV32IA-NEXT:    addi sp, sp, 48
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB203_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    blt s1, a2, .LBB203_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB203_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB203_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB203_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB203_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -8500,6 +12778,45 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 5
+; RV64I-NEXT:  .LBB204_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    blt s1, a2, .LBB204_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB204_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB204_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB204_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB204_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -8622,6 +12939,42 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:  .LBB205_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    bge s1, a2, .LBB205_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB205_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB205_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB205_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB205_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -8750,6 +13103,45 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 2
+; RV64I-NEXT:  .LBB206_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bge s1, a2, .LBB206_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB206_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB206_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB206_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB206_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -8878,6 +13270,45 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB207_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bge s1, a2, .LBB207_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB207_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB207_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB207_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB207_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i64* %a, i64 %b release
   ret i64 %1
 }
@@ -9012,6 +13443,48 @@
 ; RV32IA-NEXT:    lw ra, 44(sp)
 ; RV32IA-NEXT:    addi sp, sp, 48
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB208_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    bge s1, a2, .LBB208_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB208_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB208_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB208_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB208_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -9140,6 +13613,45 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 5
+; RV64I-NEXT:  .LBB209_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bge s1, a2, .LBB209_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB209_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB209_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB209_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB209_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -9260,6 +13772,42 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:  .LBB210_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    bltu s1, a2, .LBB210_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB210_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB210_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB210_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB210_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -9386,6 +13934,45 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 2
+; RV64I-NEXT:  .LBB211_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bltu s1, a2, .LBB211_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB211_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB211_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB211_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB211_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -9512,6 +14099,45 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB212_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bltu s1, a2, .LBB212_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB212_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB212_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB212_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB212_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i64* %a, i64 %b release
   ret i64 %1
 }
@@ -9644,6 +14270,48 @@
 ; RV32IA-NEXT:    lw ra, 44(sp)
 ; RV32IA-NEXT:    addi sp, sp, 48
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB213_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    bltu s1, a2, .LBB213_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB213_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB213_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB213_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB213_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -9770,6 +14438,45 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 5
+; RV64I-NEXT:  .LBB214_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bltu s1, a2, .LBB214_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB214_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB214_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB214_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB214_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -9892,6 +14599,42 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:  .LBB215_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    bgeu s1, a2, .LBB215_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB215_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB215_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB215_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB215_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -10020,6 +14763,45 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 2
+; RV64I-NEXT:  .LBB216_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bgeu s1, a2, .LBB216_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB216_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB216_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB216_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB216_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -10148,6 +14930,45 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB217_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bgeu s1, a2, .LBB217_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB217_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB217_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB217_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB217_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i64* %a, i64 %b release
   ret i64 %1
 }
@@ -10282,6 +15103,48 @@
 ; RV32IA-NEXT:    lw ra, 44(sp)
 ; RV32IA-NEXT:    addi sp, sp, 48
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB218_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    bgeu s1, a2, .LBB218_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB218_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB218_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB218_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB218_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -10410,6 +15273,45 @@
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 5
+; RV64I-NEXT:  .LBB219_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bgeu s1, a2, .LBB219_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB219_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB219_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB219_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB219_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i64* %a, i64 %b seq_cst
   ret i64 %1
 }
diff --git a/test/CodeGen/RISCV/calling-conv.ll b/test/CodeGen/RISCV/calling-conv.ll
index 5368593..56f1de6 100644
--- a/test/CodeGen/RISCV/calling-conv.ll
+++ b/test/CodeGen/RISCV/calling-conv.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I-FPELIM %s
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs -disable-fp-elim < %s \
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs -frame-pointer=all < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I-WITHFP %s
 
 ; As well as calling convention details, we check that ra and fp are
diff --git a/test/CodeGen/RISCV/div.ll b/test/CodeGen/RISCV/div.ll
index da9df17..1fd0084 100644
--- a/test/CodeGen/RISCV/div.ll
+++ b/test/CodeGen/RISCV/div.ll
@@ -3,6 +3,10 @@
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IM %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IM %s
 
 define i32 @udiv(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: udiv:
@@ -18,6 +22,24 @@
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    divu a0, a0, a1
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: udiv:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    call __udivdi3
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: udiv:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
   %1 = udiv i32 %a, %b
   ret i32 %1
 }
@@ -40,6 +62,34 @@
 ; RV32IM-NEXT:    mulhu a0, a0, a1
 ; RV32IM-NEXT:    srli a0, a0, 2
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: udiv_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __udivdi3
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: udiv_constant:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    lui a1, 1035469
+; RV64IM-NEXT:    addiw a1, a1, -819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, -819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, -819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, -819
+; RV64IM-NEXT:    mulhu a0, a0, a1
+; RV64IM-NEXT:    srli a0, a0, 2
+; RV64IM-NEXT:    ret
   %1 = udiv i32 %a, 5
   ret i32 %1
 }
@@ -54,6 +104,16 @@
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    srli a0, a0, 3
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: udiv_pow2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a0, a0, 3
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: udiv_pow2:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    srliw a0, a0, 3
+; RV64IM-NEXT:    ret
   %1 = udiv i32 %a, 8
   ret i32 %1
 }
@@ -76,6 +136,20 @@
 ; RV32IM-NEXT:    lw ra, 12(sp)
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: udiv64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    call __udivdi3
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: udiv64:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divu a0, a0, a1
+; RV64IM-NEXT:    ret
   %1 = udiv i64 %a, %b
   ret i64 %1
 }
@@ -102,6 +176,30 @@
 ; RV32IM-NEXT:    lw ra, 12(sp)
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: udiv64_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __udivdi3
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: udiv64_constant:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    lui a1, 1035469
+; RV64IM-NEXT:    addiw a1, a1, -819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, -819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, -819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, -819
+; RV64IM-NEXT:    mulhu a0, a0, a1
+; RV64IM-NEXT:    srli a0, a0, 2
+; RV64IM-NEXT:    ret
   %1 = udiv i64 %a, 5
   ret i64 %1
 }
@@ -120,6 +218,22 @@
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    div a0, a0, a1
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: sdiv:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    call __divdi3
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: sdiv:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
   %1 = sdiv i32 %a, %b
   ret i32 %1
 }
@@ -144,6 +258,34 @@
 ; RV32IM-NEXT:    srai a0, a0, 1
 ; RV32IM-NEXT:    add a0, a0, a1
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: sdiv_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __divdi3
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: sdiv_constant:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    sext.w a0, a0
+; RV64IM-NEXT:    lui a1, 13107
+; RV64IM-NEXT:    addiw a1, a1, 819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, 819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, 819
+; RV64IM-NEXT:    slli a1, a1, 13
+; RV64IM-NEXT:    addi a1, a1, 1639
+; RV64IM-NEXT:    mulh a0, a0, a1
+; RV64IM-NEXT:    srli a1, a0, 63
+; RV64IM-NEXT:    srai a0, a0, 1
+; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    ret
   %1 = sdiv i32 %a, 5
   ret i32 %1
 }
@@ -164,6 +306,24 @@
 ; RV32IM-NEXT:    add a0, a0, a1
 ; RV32IM-NEXT:    srai a0, a0, 3
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: sdiv_pow2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    srli a1, a1, 60
+; RV64I-NEXT:    andi a1, a1, 7
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    sraiw a0, a0, 3
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: sdiv_pow2:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    sext.w a1, a0
+; RV64IM-NEXT:    srli a1, a1, 60
+; RV64IM-NEXT:    andi a1, a1, 7
+; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    sraiw a0, a0, 3
+; RV64IM-NEXT:    ret
   %1 = sdiv i32 %a, 8
   ret i32 %1
 }
@@ -186,6 +346,20 @@
 ; RV32IM-NEXT:    lw ra, 12(sp)
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: sdiv64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    call __divdi3
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: sdiv64:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    div a0, a0, a1
+; RV64IM-NEXT:    ret
   %1 = sdiv i64 %a, %b
   ret i64 %1
 }
@@ -212,6 +386,83 @@
 ; RV32IM-NEXT:    lw ra, 12(sp)
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: sdiv64_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __divdi3
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: sdiv64_constant:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    lui a1, 13107
+; RV64IM-NEXT:    addiw a1, a1, 819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, 819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, 819
+; RV64IM-NEXT:    slli a1, a1, 13
+; RV64IM-NEXT:    addi a1, a1, 1639
+; RV64IM-NEXT:    mulh a0, a0, a1
+; RV64IM-NEXT:    srli a1, a0, 63
+; RV64IM-NEXT:    srai a0, a0, 1
+; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    ret
   %1 = sdiv i64 %a, 5
   ret i64 %1
 }
+
+; Although this sdiv has two sexti32 operands, it shouldn't compile to divw on
+; RV64M as that wouldn't produce the correct result for e.g. INT_MIN/-1.
+
+define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: sdiv64_sext_operands:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    srai a3, a2, 31
+; RV32I-NEXT:    call __divdi3
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: sdiv64_sext_operands:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    addi sp, sp, -16
+; RV32IM-NEXT:    sw ra, 12(sp)
+; RV32IM-NEXT:    mv a2, a1
+; RV32IM-NEXT:    srai a1, a0, 31
+; RV32IM-NEXT:    srai a3, a2, 31
+; RV32IM-NEXT:    call __divdi3
+; RV32IM-NEXT:    lw ra, 12(sp)
+; RV32IM-NEXT:    addi sp, sp, 16
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: sdiv64_sext_operands:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    call __divdi3
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: sdiv64_sext_operands:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    sext.w a1, a1
+; RV64IM-NEXT:    sext.w a0, a0
+; RV64IM-NEXT:    div a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sext i32 %a to i64
+  %2 = sext i32 %b to i64
+  %3 = sdiv i64 %1, %2
+  ret i64 %3
+}
diff --git a/test/CodeGen/RISCV/frame.ll b/test/CodeGen/RISCV/frame.ll
index a1f5809..e8a833f 100644
--- a/test/CodeGen/RISCV/frame.ll
+++ b/test/CodeGen/RISCV/frame.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I-FPELIM %s
-; RUN: llc -mtriple=riscv32 -disable-fp-elim -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -frame-pointer=all -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I-WITHFP %s
 
 %struct.key_t = type { i32, [16 x i8] }
diff --git a/test/CodeGen/RISCV/large-stack.ll b/test/CodeGen/RISCV/large-stack.ll
index 51130fc..3e86871 100644
--- a/test/CodeGen/RISCV/large-stack.ll
+++ b/test/CodeGen/RISCV/large-stack.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I-FPELIM %s
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs -disable-fp-elim < %s \
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs -frame-pointer=all < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I-WITHFP %s
 
 ; TODO: the quality of the generated code is poor
diff --git a/test/CodeGen/RISCV/mul.ll b/test/CodeGen/RISCV/mul.ll
index 444a75f..9bf95be 100644
--- a/test/CodeGen/RISCV/mul.ll
+++ b/test/CodeGen/RISCV/mul.ll
@@ -3,8 +3,12 @@
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IM %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IM %s
 
-define i32 @square(i32 %a) nounwind {
+define signext i32 @square(i32 %a) nounwind {
 ; RV32I-LABEL: square:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -19,11 +23,27 @@
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    mul a0, a0, a0
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: square:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: square:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mulw a0, a0, a0
+; RV64IM-NEXT:    ret
   %1 = mul i32 %a, %a
   ret i32 %1
 }
 
-define i32 @mul(i32 %a, i32 %b) nounwind {
+define signext i32 @mul(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: mul:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -37,11 +57,26 @@
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    mul a0, a0, a1
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: mul:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: mul:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
   %1 = mul i32 %a, %b
   ret i32 %1
 }
 
-define i32 @mul_constant(i32 %a) nounwind {
+define signext i32 @mul_constant(i32 %a) nounwind {
 ; RV32I-LABEL: mul_constant:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -57,6 +92,23 @@
 ; RV32IM-NEXT:    addi a1, zero, 5
 ; RV32IM-NEXT:    mul a0, a0, a1
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: mul_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: mul_constant:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    addi a1, zero, 5
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
   %1 = mul i32 %a, 5
   ret i32 %1
 }
@@ -71,6 +123,16 @@
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    slli a0, a0, 3
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: mul_pow2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 3
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: mul_pow2:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a0, a0, 3
+; RV64IM-NEXT:    ret
   %1 = mul i32 %a, 8
   ret i32 %1
 }
@@ -94,6 +156,20 @@
 ; RV32IM-NEXT:    add a1, a3, a1
 ; RV32IM-NEXT:    mul a0, a0, a2
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: mul64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: mul64:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    ret
   %1 = mul i64 %a, %b
   ret i64 %1
 }
@@ -118,6 +194,22 @@
 ; RV32IM-NEXT:    add a1, a3, a1
 ; RV32IM-NEXT:    mul a0, a0, a2
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: mul64_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: mul64_constant:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    addi a1, zero, 5
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    ret
   %1 = mul i64 %a, 5
   ret i64 %1
 }
@@ -140,6 +232,26 @@
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    mulh a0, a0, a1
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: mulhs:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: mulhs:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    sext.w a1, a1
+; RV64IM-NEXT:    sext.w a0, a0
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
   %1 = sext i32 %a to i64
   %2 = sext i32 %b to i64
   %3 = mul i64 %1, %2
@@ -148,7 +260,7 @@
   ret i32 %5
 }
 
-define i32 @mulhu(i32 %a, i32 %b) nounwind {
+define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV32I-LABEL: mulhu:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -166,6 +278,22 @@
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    mulhu a0, a0, a1
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: mulhu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: mulhu:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
   %1 = zext i32 %a to i64
   %2 = zext i32 %b to i64
   %3 = mul i64 %1, %2
diff --git a/test/CodeGen/RISCV/rem.ll b/test/CodeGen/RISCV/rem.ll
index f37931f..505a351 100644
--- a/test/CodeGen/RISCV/rem.ll
+++ b/test/CodeGen/RISCV/rem.ll
@@ -3,6 +3,10 @@
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IM %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IM %s
 
 define i32 @urem(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: urem:
@@ -18,6 +22,24 @@
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    remu a0, a0, a1
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: urem:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: urem:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
   %1 = urem i32 %a, %b
   ret i32 %1
 }
@@ -36,6 +58,22 @@
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    rem a0, a0, a1
 ; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: srem:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: srem:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
   %1 = srem i32 %a, %b
   ret i32 %1
 }
diff --git a/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll b/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll
index 52a59c0..b7f513d 100644
--- a/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll
+++ b/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll
@@ -546,7 +546,7 @@
 define i32 @aext_sllw_aext_aext(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: aext_sllw_aext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -555,7 +555,7 @@
 define i32 @aext_sllw_aext_sext(i32 %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: aext_sllw_aext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -564,7 +564,7 @@
 define i32 @aext_sllw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: aext_sllw_aext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -573,7 +573,7 @@
 define i32 @aext_sllw_sext_aext(i32 signext %a, i32 %b) nounwind {
 ; RV64I-LABEL: aext_sllw_sext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -582,7 +582,7 @@
 define i32 @aext_sllw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: aext_sllw_sext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -591,7 +591,7 @@
 define i32 @aext_sllw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: aext_sllw_sext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -600,7 +600,7 @@
 define i32 @aext_sllw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 ; RV64I-LABEL: aext_sllw_zext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -609,7 +609,7 @@
 define i32 @aext_sllw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: aext_sllw_zext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -618,19 +618,16 @@
 define i32 @aext_sllw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: aext_sllw_zext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
 }
 
-; TODO: Select sllw for all cases witha signext result.
-
 define signext i32 @sext_sllw_aext_aext(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: sext_sllw_aext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -639,8 +636,7 @@
 define signext i32 @sext_sllw_aext_sext(i32 %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sext_sllw_aext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -649,8 +645,7 @@
 define signext i32 @sext_sllw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: sext_sllw_aext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -659,8 +654,7 @@
 define signext i32 @sext_sllw_sext_aext(i32 signext %a, i32 %b) nounwind {
 ; RV64I-LABEL: sext_sllw_sext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -669,8 +663,7 @@
 define signext i32 @sext_sllw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sext_sllw_sext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -679,8 +672,7 @@
 define signext i32 @sext_sllw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: sext_sllw_sext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -689,8 +681,7 @@
 define signext i32 @sext_sllw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 ; RV64I-LABEL: sext_sllw_zext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -699,8 +690,7 @@
 define signext i32 @sext_sllw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sext_sllw_zext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -709,8 +699,7 @@
 define signext i32 @sext_sllw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: sext_sllw_zext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
@@ -721,7 +710,7 @@
 define zeroext i32 @zext_sllw_aext_aext(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_sllw_aext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -732,7 +721,7 @@
 define zeroext i32 @zext_sllw_aext_sext(i32 %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_sllw_aext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -743,7 +732,7 @@
 define zeroext i32 @zext_sllw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_sllw_aext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -754,7 +743,7 @@
 define zeroext i32 @zext_sllw_sext_aext(i32 signext %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_sllw_sext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -765,7 +754,7 @@
 define zeroext i32 @zext_sllw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_sllw_sext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -776,7 +765,7 @@
 define zeroext i32 @zext_sllw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_sllw_sext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -787,7 +776,7 @@
 define zeroext i32 @zext_sllw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_sllw_zext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -798,7 +787,7 @@
 define zeroext i32 @zext_sllw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_sllw_zext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -809,7 +798,7 @@
 define zeroext i32 @zext_sllw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_sllw_zext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -817,14 +806,10 @@
   ret i32 %1
 }
 
-; TODO: srlw should be selected for 32-bit lshr with variable arguments.
-
 define i32 @aext_srlw_aext_aext(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: aext_srlw_aext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -833,9 +818,7 @@
 define i32 @aext_srlw_aext_sext(i32 %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: aext_srlw_aext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -844,9 +827,7 @@
 define i32 @aext_srlw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: aext_srlw_aext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -855,9 +836,7 @@
 define i32 @aext_srlw_sext_aext(i32 signext %a, i32 %b) nounwind {
 ; RV64I-LABEL: aext_srlw_sext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -866,9 +845,7 @@
 define i32 @aext_srlw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: aext_srlw_sext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -877,9 +854,7 @@
 define i32 @aext_srlw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: aext_srlw_sext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -888,7 +863,7 @@
 define i32 @aext_srlw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 ; RV64I-LABEL: aext_srlw_zext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -897,7 +872,7 @@
 define i32 @aext_srlw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: aext_srlw_zext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -906,7 +881,7 @@
 define i32 @aext_srlw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: aext_srlw_zext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -915,10 +890,7 @@
 define signext i32 @sext_srlw_aext_aext(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: sext_srlw_aext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -927,10 +899,7 @@
 define signext i32 @sext_srlw_aext_sext(i32 %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sext_srlw_aext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -939,10 +908,7 @@
 define signext i32 @sext_srlw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: sext_srlw_aext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -951,10 +917,7 @@
 define signext i32 @sext_srlw_sext_aext(i32 signext %a, i32 %b) nounwind {
 ; RV64I-LABEL: sext_srlw_sext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -963,10 +926,7 @@
 define signext i32 @sext_srlw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sext_srlw_sext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -975,10 +935,7 @@
 define signext i32 @sext_srlw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: sext_srlw_sext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -987,8 +944,7 @@
 define signext i32 @sext_srlw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 ; RV64I-LABEL: sext_srlw_zext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    srl a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -997,8 +953,7 @@
 define signext i32 @sext_srlw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sext_srlw_zext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    srl a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -1007,8 +962,7 @@
 define signext i32 @sext_srlw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: sext_srlw_zext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    srl a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
@@ -1017,9 +971,7 @@
 define zeroext i32 @zext_srlw_aext_aext(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_srlw_aext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1030,9 +982,7 @@
 define zeroext i32 @zext_srlw_aext_sext(i32 %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_srlw_aext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1043,9 +993,7 @@
 define zeroext i32 @zext_srlw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_srlw_aext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1056,9 +1004,7 @@
 define zeroext i32 @zext_srlw_sext_aext(i32 signext %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_srlw_sext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1069,9 +1015,7 @@
 define zeroext i32 @zext_srlw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_srlw_sext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1082,9 +1026,7 @@
 define zeroext i32 @zext_srlw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_srlw_sext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1095,7 +1037,7 @@
 define zeroext i32 @zext_srlw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_srlw_zext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1106,7 +1048,7 @@
 define zeroext i32 @zext_srlw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_srlw_zext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1117,7 +1059,7 @@
 define zeroext i32 @zext_srlw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_srlw_zext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1125,14 +1067,10 @@
   ret i32 %1
 }
 
-; TODO: sraw should be selected if the first operand is not sign-extended. If the
-; first operand is sign-extended, sra is equivalent for the test cases below.
-
 define i32 @aext_sraw_aext_aext(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: aext_sraw_aext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1141,8 +1079,7 @@
 define i32 @aext_sraw_aext_sext(i32 %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: aext_sraw_aext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1151,8 +1088,7 @@
 define i32 @aext_sraw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: aext_sraw_aext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1161,7 +1097,7 @@
 define i32 @aext_sraw_sext_aext(i32 signext %a, i32 %b) nounwind {
 ; RV64I-LABEL: aext_sraw_sext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1170,7 +1106,7 @@
 define i32 @aext_sraw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: aext_sraw_sext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1179,7 +1115,7 @@
 define i32 @aext_sraw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: aext_sraw_sext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1188,8 +1124,7 @@
 define i32 @aext_sraw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 ; RV64I-LABEL: aext_sraw_zext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1198,8 +1133,7 @@
 define i32 @aext_sraw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: aext_sraw_zext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1208,8 +1142,7 @@
 define i32 @aext_sraw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: aext_sraw_zext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1218,8 +1151,7 @@
 define signext i32 @sext_sraw_aext_aext(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: sext_sraw_aext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1228,8 +1160,7 @@
 define signext i32 @sext_sraw_aext_sext(i32 %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sext_sraw_aext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1238,8 +1169,7 @@
 define signext i32 @sext_sraw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: sext_sraw_aext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1248,7 +1178,7 @@
 define signext i32 @sext_sraw_sext_aext(i32 signext %a, i32 %b) nounwind {
 ; RV64I-LABEL: sext_sraw_sext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1257,7 +1187,7 @@
 define signext i32 @sext_sraw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sext_sraw_sext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1266,7 +1196,7 @@
 define signext i32 @sext_sraw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: sext_sraw_sext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1275,8 +1205,7 @@
 define signext i32 @sext_sraw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 ; RV64I-LABEL: sext_sraw_zext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1285,8 +1214,7 @@
 define signext i32 @sext_sraw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sext_sraw_zext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1295,8 +1223,7 @@
 define signext i32 @sext_sraw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: sext_sraw_zext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
@@ -1305,8 +1232,7 @@
 define zeroext i32 @zext_sraw_aext_aext(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_sraw_aext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1317,8 +1243,7 @@
 define zeroext i32 @zext_sraw_aext_sext(i32 %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_sraw_aext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1329,8 +1254,7 @@
 define zeroext i32 @zext_sraw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_sraw_aext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1341,7 +1265,7 @@
 define zeroext i32 @zext_sraw_sext_aext(i32 signext %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_sraw_sext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1352,7 +1276,7 @@
 define zeroext i32 @zext_sraw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_sraw_sext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1363,7 +1287,7 @@
 define zeroext i32 @zext_sraw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_sraw_sext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1374,8 +1298,7 @@
 define zeroext i32 @zext_sraw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_sraw_zext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1386,8 +1309,7 @@
 define zeroext i32 @zext_sraw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_sraw_zext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1398,8 +1320,7 @@
 define zeroext i32 @zext_sraw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_sraw_zext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
diff --git a/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll b/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll
new file mode 100644
index 0000000..f3e877a
--- /dev/null
+++ b/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll
@@ -0,0 +1,1308 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64IM
+
+; The patterns for the 'W' suffixed RV64M instructions have the potential of
+; missing cases. This file checks all the variants of
+; sign-extended/zero-extended/any-extended inputs and outputs.
+
+define i32 @aext_mulw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_mulw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_mulw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_mulw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_mulw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_mulw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_mulw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_mulw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_mulw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_mulw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_mulw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_mulw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_mulw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_mulw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_mulw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_mulw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_mulw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_mulw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_mulw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_mulw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_mulw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_mulw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_mulw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_mulw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_mulw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_mulw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_mulw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_mulw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_mulw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_mulw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_mulw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_mulw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_mulw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_mulw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_mulw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_mulw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_mulw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_mulw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_mulw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_mulw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_mulw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_mulw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_mulw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_mulw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_mulw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_mulw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_mulw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_mulw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_mulw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_mulw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_mulw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_mulw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_mulw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_mulw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divuw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_divuw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divuw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_divuw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divuw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_divuw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divuw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_divuw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divuw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_divuw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divuw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_divuw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divuw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_divuw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divuw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_divuw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divuw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_divuw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divuw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_divuw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divuw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_divuw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divuw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_divuw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divuw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_divuw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divuw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_divuw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divuw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_divuw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divuw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_divuw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divuw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_divuw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divuw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_divuw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divuw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_divuw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divuw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_divuw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divuw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_divuw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    divu a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divuw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_divuw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divuw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_divuw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divuw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_divuw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    divu a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divuw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_divuw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a1, a1, 32
+; RV64IM-NEXT:    srli a1, a1, 32
+; RV64IM-NEXT:    divu a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divuw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_divuw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a1, a1, 32
+; RV64IM-NEXT:    srli a1, a1, 32
+; RV64IM-NEXT:    divu a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divuw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_divuw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divu a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_divw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_divw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_divw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_divw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_divw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_divw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_divw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_divw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_divw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_divw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_divw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_divw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_divw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_divw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_divw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_divw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_divw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_divw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_divw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_divw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_divw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_divw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_divw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_divw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_divw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_divw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_divw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_divw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_divw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_divw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_remw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_remw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_remw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_remw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_remw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_remw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_remw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_remw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_remw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_remw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_remw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_remw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_remw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_remw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_remw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_remw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_remw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_remw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_remw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_remw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_remw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_remw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_remw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_remw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_remw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_remw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_remw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remuw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_remuw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remuw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_remuw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remuw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_remuw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remuw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_remuw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remuw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_remuw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remuw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_remuw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remuw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: aext_remuw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remuw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: aext_remuw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_remuw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: aext_remuw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remuw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_remuw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remuw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_remuw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remuw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_remuw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remuw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_remuw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remuw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_remuw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remuw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_remuw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remuw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: sext_remuw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remuw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: sext_remuw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_remuw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: sext_remuw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remuw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_remuw_aext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remuw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_remuw_aext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remuw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_remuw_aext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    remu a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remuw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_remuw_sext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remuw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_remuw_sext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remuw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_remuw_sext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a0, a0, 32
+; RV64IM-NEXT:    srli a0, a0, 32
+; RV64IM-NEXT:    remu a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remuw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64IM-LABEL: zext_remuw_zext_aext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a1, a1, 32
+; RV64IM-NEXT:    srli a1, a1, 32
+; RV64IM-NEXT:    remu a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remuw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64IM-LABEL: zext_remuw_zext_sext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a1, a1, 32
+; RV64IM-NEXT:    srli a1, a1, 32
+; RV64IM-NEXT:    remu a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_remuw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64IM-LABEL: zext_remuw_zext_zext:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remu a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
diff --git a/test/CodeGen/RISCV/vararg.ll b/test/CodeGen/RISCV/vararg.ll
index 77f8f30..4dd73dc 100644
--- a/test/CodeGen/RISCV/vararg.ll
+++ b/test/CodeGen/RISCV/vararg.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I-FPELIM %s
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs -disable-fp-elim < %s \
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs -frame-pointer=all < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I-WITHFP %s
 
 declare void @llvm.va_start(i8*)
diff --git a/test/CodeGen/SPARC/empty-functions.ll b/test/CodeGen/SPARC/empty-functions.ll
index 974df23..797bbda 100644
--- a/test/CodeGen/SPARC/empty-functions.ll
+++ b/test/CodeGen/SPARC/empty-functions.ll
@@ -1,5 +1,5 @@
 ; RUN: llc < %s -mtriple=sparc-linux-gnu | FileCheck -check-prefix=LINUX-NO-FP %s
-; RUN: llc < %s -mtriple=sparc-linux-gnu -disable-fp-elim | FileCheck -check-prefix=LINUX-FP %s
+; RUN: llc < %s -mtriple=sparc-linux-gnu -frame-pointer=all | FileCheck -check-prefix=LINUX-FP %s
 
 define void @func() {
 entry:
diff --git a/test/CodeGen/SystemZ/Large/branch-01.ll b/test/CodeGen/SystemZ/Large/branch-01.ll
index 17d9c49..ed50547 100644
--- a/test/CodeGen/SystemZ/Large/branch-01.ll
+++ b/test/CodeGen/SystemZ/Large/branch-01.ll
@@ -11889,7 +11889,7 @@
 
 !llvm.ident = !{!0}
 
-!0 = !{!"clang version 7.0.0 (http://llvm.org/git/clang.git a73a299e6b81e72cb50aa8ec5e8b04de7f4e1f81) (http://llvm.org/git/llvm.git 94cb0130bcb7244300ebde720d4e03b6910a1f1f)"}
+!0 = !{!"clang version 7.0.0"}
 !1 = !{!2, !4}
 !2 = distinct !{!2, !3, !"func_62: %agg.result"}
 !3 = distinct !{!3, !"func_62"}
diff --git a/test/CodeGen/SystemZ/Large/branch-range-01.py b/test/CodeGen/SystemZ/Large/branch-range-01.py
index 344d261..49ce623 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-01.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-01.py
@@ -67,42 +67,44 @@
 # CHECK: c %r4, 136(%r3)
 # CHECK: jge [[LABEL]]
 
+from __future__ import print_function
+
 branch_blocks = 10
 main_size = 0xffd8
 
-print '@global = global i32 0'
+print('@global = global i32 0')
 
-print 'define void @f1(i8 *%base, i32 *%stop, i32 %limit) {'
-print 'entry:'
-print '  br label %before0'
-print ''
+print('define void @f1(i8 *%base, i32 *%stop, i32 %limit) {')
+print('entry:')
+print('  br label %before0')
+print('')
 
-for i in xrange(branch_blocks):
+for i in range(branch_blocks):
     next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print 'before%d:' % i
-    print '  %%bstop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i)
-    print '  %%bcur%d = load i32 , i32 *%%bstop%d' % (i, i)
-    print '  %%btest%d = icmp eq i32 %%limit, %%bcur%d' % (i, i)
-    print '  br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
-    print ''
+    print('before%d:' % i)
+    print('  %%bstop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i))
+    print('  %%bcur%d = load i32 , i32 *%%bstop%d' % (i, i))
+    print('  %%btest%d = icmp eq i32 %%limit, %%bcur%d' % (i, i))
+    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
+    print('')
 
-print '%s:' % next
+print('%s:' % next)
 a, b = 1, 1
-for i in xrange(0, main_size, 6):
+for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print '  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
-    print '  store volatile i8 %d, i8 *%%ptr%d' % (value, i)
+    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
+    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
 
-for i in xrange(branch_blocks):
-    print '  %%astop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i + 25)
-    print '  %%acur%d = load i32 , i32 *%%astop%d' % (i, i)
-    print '  %%atest%d = icmp eq i32 %%limit, %%acur%d' % (i, i)
-    print '  br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
-    print ''
-    print 'after%d:' % i
+for i in range(branch_blocks):
+    print('  %%astop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i + 25))
+    print('  %%acur%d = load i32 , i32 *%%astop%d' % (i, i))
+    print('  %%atest%d = icmp eq i32 %%limit, %%acur%d' % (i, i))
+    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
+    print('')
+    print('after%d:' % i)
 
-print '  %dummy = load volatile i32, i32 *@global'
-print '  ret void'
-print '}'
+print('  %dummy = load volatile i32, i32 *@global')
+print('  ret void')
+print('}')
diff --git a/test/CodeGen/SystemZ/Large/branch-range-02.py b/test/CodeGen/SystemZ/Large/branch-range-02.py
index 7f7b099..155f742 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-02.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-02.py
@@ -56,27 +56,29 @@
 # CHECK: c %r4, 1036(%r3)
 # CHECK: jge [[LABEL]]
 
+from __future__ import print_function
+
 blocks = 256 + 4
 
-print 'define void @f1(i8 *%base, i32 *%stop, i32 %limit) {'
-print 'entry:'
-print '  br label %b0'
-print ''
+print('define void @f1(i8 *%base, i32 *%stop, i32 %limit) {')
+print('entry:')
+print('  br label %b0')
+print('')
 
 a, b = 1, 1
-for i in xrange(blocks):
+for i in range(blocks):
     a, b = b, a + b
     value = a % 256
     next = 'b%d' % (i + 1) if i + 1 < blocks else 'end'
     other = 'end' if 2 * i < blocks else 'b0'
-    print 'b%d:' % i
-    print '  store volatile i8 %d, i8 *%%base' % value
-    print '  %%astop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i)
-    print '  %%acur%d = load i32 , i32 *%%astop%d' % (i, i)
-    print '  %%atest%d = icmp eq i32 %%limit, %%acur%d' % (i, i)
-    print '  br i1 %%atest%d, label %%%s, label %%%s' % (i, other, next)
+    print('b%d:' % i)
+    print('  store volatile i8 %d, i8 *%%base' % value)
+    print('  %%astop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i))
+    print('  %%acur%d = load i32 , i32 *%%astop%d' % (i, i))
+    print('  %%atest%d = icmp eq i32 %%limit, %%acur%d' % (i, i))
+    print('  br i1 %%atest%d, label %%%s, label %%%s' % (i, other, next))
 
-print ''
-print '%s:' % next
-print '  ret void'
-print '}'
+print('')
+print('%s:' % next)
+print('  ret void')
+print('}')
diff --git a/test/CodeGen/SystemZ/Large/branch-range-03.py b/test/CodeGen/SystemZ/Large/branch-range-03.py
index 75c9ea4..f58bedf 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-03.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-03.py
@@ -67,44 +67,46 @@
 # CHECK: cr %r4, [[REG]]
 # CHECK: jge [[LABEL]]
 
+from __future__ import print_function
+
 branch_blocks = 8
 main_size = 0xffcc
 
-print '@global = global i32 0'
+print('@global = global i32 0')
 
-print 'define void @f1(i8 *%base, i8 *%stop, i32 %limit) {'
-print 'entry:'
-print '  br label %before0'
-print ''
+print('define void @f1(i8 *%base, i8 *%stop, i32 %limit) {')
+print('entry:')
+print('  br label %before0')
+print('')
 
-for i in xrange(branch_blocks):
+for i in range(branch_blocks):
     next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print 'before%d:' % i
-    print '  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i)
-    print '  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i)
-    print '  %%bext%d = sext i8 %%bcur%d to i32' % (i, i)
-    print '  %%btest%d = icmp eq i32 %%limit, %%bext%d' % (i, i)
-    print '  br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
-    print ''
+    print('before%d:' % i)
+    print('  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i))
+    print('  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i))
+    print('  %%bext%d = sext i8 %%bcur%d to i32' % (i, i))
+    print('  %%btest%d = icmp eq i32 %%limit, %%bext%d' % (i, i))
+    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
+    print('')
 
-print '%s:' % next
+print('%s:' % next)
 a, b = 1, 1
-for i in xrange(0, main_size, 6):
+for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print '  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
-    print '  store volatile i8 %d, i8 *%%ptr%d' % (value, i)
+    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
+    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
 
-for i in xrange(branch_blocks):
-    print '  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25)
-    print '  %%acur%d = load i8 , i8 *%%astop%d' % (i, i)
-    print '  %%aext%d = sext i8 %%acur%d to i32' % (i, i)
-    print '  %%atest%d = icmp eq i32 %%limit, %%aext%d' % (i, i)
-    print '  br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
-    print ''
-    print 'after%d:' % i
+for i in range(branch_blocks):
+    print('  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25))
+    print('  %%acur%d = load i8 , i8 *%%astop%d' % (i, i))
+    print('  %%aext%d = sext i8 %%acur%d to i32' % (i, i))
+    print('  %%atest%d = icmp eq i32 %%limit, %%aext%d' % (i, i))
+    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
+    print('')
+    print('after%d:' % i)
 
-print '  %dummy = load volatile i32, i32 *@global'
-print '  ret void'
-print '}'
+print('  %dummy = load volatile i32, i32 *@global')
+print('  ret void')
+print('}')
diff --git a/test/CodeGen/SystemZ/Large/branch-range-04.py b/test/CodeGen/SystemZ/Large/branch-range-04.py
index d475c95..6aca0a2 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-04.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-04.py
@@ -71,44 +71,46 @@
 # CHECK: cgr %r4, [[REG]]
 # CHECK: jge [[LABEL]]
 
+from __future__ import print_function
+
 branch_blocks = 8
 main_size = 0xffcc
 
-print '@global = global i32 0'
+print('@global = global i32 0')
 
-print 'define void @f1(i8 *%base, i8 *%stop, i64 %limit) {'
-print 'entry:'
-print '  br label %before0'
-print ''
+print('define void @f1(i8 *%base, i8 *%stop, i64 %limit) {')
+print('entry:')
+print('  br label %before0')
+print('')
 
-for i in xrange(branch_blocks):
+for i in range(branch_blocks):
     next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print 'before%d:' % i
-    print '  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i)
-    print '  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i)
-    print '  %%bext%d = sext i8 %%bcur%d to i64' % (i, i)
-    print '  %%btest%d = icmp eq i64 %%limit, %%bext%d' % (i, i)
-    print '  br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
-    print ''
+    print('before%d:' % i)
+    print('  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i))
+    print('  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i))
+    print('  %%bext%d = sext i8 %%bcur%d to i64' % (i, i))
+    print('  %%btest%d = icmp eq i64 %%limit, %%bext%d' % (i, i))
+    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
+    print('')
 
-print '%s:' % next
+print('%s:' % next)
 a, b = 1, 1
-for i in xrange(0, main_size, 6):
+for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print '  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
-    print '  store volatile i8 %d, i8 *%%ptr%d' % (value, i)
+    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
+    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
 
-for i in xrange(branch_blocks):
-    print '  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25)
-    print '  %%acur%d = load i8 , i8 *%%astop%d' % (i, i)
-    print '  %%aext%d = sext i8 %%acur%d to i64' % (i, i)
-    print '  %%atest%d = icmp eq i64 %%limit, %%aext%d' % (i, i)
-    print '  br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
-    print ''
-    print 'after%d:' % i
+for i in range(branch_blocks):
+    print('  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25))
+    print('  %%acur%d = load i8 , i8 *%%astop%d' % (i, i))
+    print('  %%aext%d = sext i8 %%acur%d to i64' % (i, i))
+    print('  %%atest%d = icmp eq i64 %%limit, %%aext%d' % (i, i))
+    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
+    print('')
+    print('after%d:' % i)
 
-print '  %dummy = load volatile i32, i32 *@global'
-print '  ret void'
-print '}'
+print('  %dummy = load volatile i32, i32 *@global')
+print('  ret void')
+print('}')
diff --git a/test/CodeGen/SystemZ/Large/branch-range-05.py b/test/CodeGen/SystemZ/Large/branch-range-05.py
index 0a56eff..326fe8b 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-05.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-05.py
@@ -71,42 +71,44 @@
 # CHECK: chi [[REG]], 107
 # CHECK: jgl [[LABEL]]
 
+from __future__ import print_function
+
 branch_blocks = 8
 main_size = 0xffcc
 
-print '@global = global i32 0'
+print('@global = global i32 0')
 
-print 'define void @f1(i8 *%base, i8 *%stop) {'
-print 'entry:'
-print '  br label %before0'
-print ''
+print('define void @f1(i8 *%base, i8 *%stop) {')
+print('entry:')
+print('  br label %before0')
+print('')
 
-for i in xrange(branch_blocks):
+for i in range(branch_blocks):
     next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print 'before%d:' % i
-    print '  %%bcur%d = load i8 , i8 *%%stop' % i
-    print '  %%bext%d = sext i8 %%bcur%d to i32' % (i, i)
-    print '  %%btest%d = icmp slt i32 %%bext%d, %d' % (i, i, i + 50)
-    print '  br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
-    print ''
+    print('before%d:' % i)
+    print('  %%bcur%d = load i8 , i8 *%%stop' % i)
+    print('  %%bext%d = sext i8 %%bcur%d to i32' % (i, i))
+    print('  %%btest%d = icmp slt i32 %%bext%d, %d' % (i, i, i + 50))
+    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
+    print('')
 
-print '%s:' % next
+print('%s:' % next)
 a, b = 1, 1
-for i in xrange(0, main_size, 6):
+for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print '  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
-    print '  store volatile i8 %d, i8 *%%ptr%d' % (value, i)
+    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
+    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
 
-for i in xrange(branch_blocks):
-    print '  %%acur%d = load i8 , i8 *%%stop' % i
-    print '  %%aext%d = sext i8 %%acur%d to i32' % (i, i)
-    print '  %%atest%d = icmp slt i32 %%aext%d, %d' % (i, i, i + 100)
-    print '  br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
-    print ''
-    print 'after%d:' % i
+for i in range(branch_blocks):
+    print('  %%acur%d = load i8 , i8 *%%stop' % i)
+    print('  %%aext%d = sext i8 %%acur%d to i32' % (i, i))
+    print('  %%atest%d = icmp slt i32 %%aext%d, %d' % (i, i, i + 100))
+    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
+    print('')
+    print('after%d:' % i)
 
-print '  %dummy = load volatile i32, i32 *@global'
-print '  ret void'
-print '}'
+print('  %dummy = load volatile i32, i32 *@global')
+print('  ret void')
+print('}')
diff --git a/test/CodeGen/SystemZ/Large/branch-range-06.py b/test/CodeGen/SystemZ/Large/branch-range-06.py
index 5b05434..44cc5c6 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-06.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-06.py
@@ -71,42 +71,44 @@
 # CHECK: cghi [[REG]], 107
 # CHECK: jgl [[LABEL]]
 
+from __future__ import print_function
+
 branch_blocks = 8
 main_size = 0xffcc
 
-print '@global = global i32 0'
+print('@global = global i32 0')
 
-print 'define void @f1(i8 *%base, i8 *%stop) {'
-print 'entry:'
-print '  br label %before0'
-print ''
+print('define void @f1(i8 *%base, i8 *%stop) {')
+print('entry:')
+print('  br label %before0')
+print('')
 
-for i in xrange(branch_blocks):
+for i in range(branch_blocks):
     next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print 'before%d:' % i
-    print '  %%bcur%d = load i8 , i8 *%%stop' % i
-    print '  %%bext%d = sext i8 %%bcur%d to i64' % (i, i)
-    print '  %%btest%d = icmp slt i64 %%bext%d, %d' % (i, i, i + 50)
-    print '  br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
-    print ''
+    print('before%d:' % i)
+    print('  %%bcur%d = load i8 , i8 *%%stop' % i)
+    print('  %%bext%d = sext i8 %%bcur%d to i64' % (i, i))
+    print('  %%btest%d = icmp slt i64 %%bext%d, %d' % (i, i, i + 50))
+    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
+    print('')
 
-print '%s:' % next
+print('%s:' % next)
 a, b = 1, 1
-for i in xrange(0, main_size, 6):
+for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print '  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
-    print '  store volatile i8 %d, i8 *%%ptr%d' % (value, i)
+    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
+    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
 
-for i in xrange(branch_blocks):
-    print '  %%acur%d = load i8 , i8 *%%stop' % i
-    print '  %%aext%d = sext i8 %%acur%d to i64' % (i, i)
-    print '  %%atest%d = icmp slt i64 %%aext%d, %d' % (i, i, i + 100)
-    print '  br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
-    print ''
-    print 'after%d:' % i
+for i in range(branch_blocks):
+    print('  %%acur%d = load i8 , i8 *%%stop' % i)
+    print('  %%aext%d = sext i8 %%acur%d to i64' % (i, i))
+    print('  %%atest%d = icmp slt i64 %%aext%d, %d' % (i, i, i + 100))
+    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
+    print('')
+    print('after%d:' % i)
 
-print '  %dummy = load volatile i32, i32 *@global'
-print '  ret void'
-print '}'
+print('  %dummy = load volatile i32, i32 *@global')
+print('  ret void')
+print('}')
diff --git a/test/CodeGen/SystemZ/Large/branch-range-07.py b/test/CodeGen/SystemZ/Large/branch-range-07.py
index c5fef10..71e9588 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-07.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-07.py
@@ -32,37 +32,39 @@
 # CHECK: ahi {{%r[0-9]+}}, -1
 # CHECK: jglh
 
+from __future__ import print_function
+
 branch_blocks = 8
 main_size = 0xffd8
 
-print 'define void @f1(i8 *%base, i32 *%counts) {'
-print 'entry:'
+print('define void @f1(i8 *%base, i32 *%counts) {')
+print('entry:')
 
-for i in xrange(branch_blocks - 1, -1, -1):
-    print '  %%countptr%d = getelementptr i32, i32 *%%counts, i64 %d' % (i, i)
-    print '  %%initcount%d = load i32 , i32 *%%countptr%d' % (i, i)
-    print '  br label %%loop%d' % i
+for i in range(branch_blocks - 1, -1, -1):
+    print('  %%countptr%d = getelementptr i32, i32 *%%counts, i64 %d' % (i, i))
+    print('  %%initcount%d = load i32 , i32 *%%countptr%d' % (i, i))
+    print('  br label %%loop%d' % i)
     
-    print 'loop%d:' % i
+    print('loop%d:' % i)
     block1 = 'entry' if i == branch_blocks - 1 else 'loop%d' % (i + 1)
     block2 = 'loop0' if i == 0 else 'after%d' % (i - 1)
-    print ('  %%count%d = phi i32 [ %%initcount%d, %%%s ],'
-           ' [ %%nextcount%d, %%%s ]' % (i, i, block1, i, block2))
+    print(('  %%count%d = phi i32 [ %%initcount%d, %%%s ],'
+           ' [ %%nextcount%d, %%%s ]' % (i, i, block1, i, block2)))
 
 a, b = 1, 1
-for i in xrange(0, main_size, 6):
+for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print '  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
-    print '  store volatile i8 %d, i8 *%%ptr%d' % (value, i)
+    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
+    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
 
-for i in xrange(branch_blocks):
-    print '  %%nextcount%d = add i32 %%count%d, -1' % (i, i)
-    print '  %%test%d = icmp ne i32 %%nextcount%d, 0' % (i, i)
-    print '  br i1 %%test%d, label %%loop%d, label %%after%d' % (i, i, i)
-    print ''
-    print 'after%d:' % i
+for i in range(branch_blocks):
+    print('  %%nextcount%d = add i32 %%count%d, -1' % (i, i))
+    print('  %%test%d = icmp ne i32 %%nextcount%d, 0' % (i, i))
+    print('  br i1 %%test%d, label %%loop%d, label %%after%d' % (i, i, i))
+    print('')
+    print('after%d:' % i)
 
-print '  ret void'
-print '}'
+print('  ret void')
+print('}')
diff --git a/test/CodeGen/SystemZ/Large/branch-range-08.py b/test/CodeGen/SystemZ/Large/branch-range-08.py
index 8b6b673..2ef6859 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-08.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-08.py
@@ -33,37 +33,39 @@
 # CHECK: aghi {{%r[0-9]+}}, -1
 # CHECK: jglh
 
+from __future__ import print_function
+
 branch_blocks = 8
 main_size = 0xffd8
 
-print 'define void @f1(i8 *%base, i64 *%counts) {'
-print 'entry:'
+print('define void @f1(i8 *%base, i64 *%counts) {')
+print('entry:')
 
-for i in xrange(branch_blocks - 1, -1, -1):
-    print '  %%countptr%d = getelementptr i64, i64 *%%counts, i64 %d' % (i, i)
-    print '  %%initcount%d = load i64 , i64 *%%countptr%d' % (i, i)
-    print '  br label %%loop%d' % i
+for i in range(branch_blocks - 1, -1, -1):
+    print('  %%countptr%d = getelementptr i64, i64 *%%counts, i64 %d' % (i, i))
+    print('  %%initcount%d = load i64 , i64 *%%countptr%d' % (i, i))
+    print('  br label %%loop%d' % i)
     
-    print 'loop%d:' % i
+    print('loop%d:' % i)
     block1 = 'entry' if i == branch_blocks - 1 else 'loop%d' % (i + 1)
     block2 = 'loop0' if i == 0 else 'after%d' % (i - 1)
-    print ('  %%count%d = phi i64 [ %%initcount%d, %%%s ],'
-           ' [ %%nextcount%d, %%%s ]' % (i, i, block1, i, block2))
+    print(('  %%count%d = phi i64 [ %%initcount%d, %%%s ],'
+           ' [ %%nextcount%d, %%%s ]' % (i, i, block1, i, block2)))
 
 a, b = 1, 1
-for i in xrange(0, main_size, 6):
+for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print '  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
-    print '  store volatile i8 %d, i8 *%%ptr%d' % (value, i)
+    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
+    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
 
-for i in xrange(branch_blocks):
-    print '  %%nextcount%d = add i64 %%count%d, -1' % (i, i)
-    print '  %%test%d = icmp ne i64 %%nextcount%d, 0' % (i, i)
-    print '  br i1 %%test%d, label %%loop%d, label %%after%d' % (i, i, i)
-    print ''
-    print 'after%d:' % i
+for i in range(branch_blocks):
+    print('  %%nextcount%d = add i64 %%count%d, -1' % (i, i))
+    print('  %%test%d = icmp ne i64 %%nextcount%d, 0' % (i, i))
+    print('  br i1 %%test%d, label %%loop%d, label %%after%d' % (i, i, i))
+    print('')
+    print('after%d:' % i)
 
-print '  ret void'
-print '}'
+print('  ret void')
+print('}')
diff --git a/test/CodeGen/SystemZ/Large/branch-range-09.py b/test/CodeGen/SystemZ/Large/branch-range-09.py
index 6b568a6..09d5d69 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-09.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-09.py
@@ -67,44 +67,46 @@
 # CHECK: clr %r4, [[REG]]
 # CHECK: jgl [[LABEL]]
 
+from __future__ import print_function
+
 branch_blocks = 8
 main_size = 0xffcc
 
-print '@global = global i32 0'
+print('@global = global i32 0')
 
-print 'define void @f1(i8 *%base, i8 *%stop, i32 %limit) {'
-print 'entry:'
-print '  br label %before0'
-print ''
+print('define void @f1(i8 *%base, i8 *%stop, i32 %limit) {')
+print('entry:')
+print('  br label %before0')
+print('')
 
-for i in xrange(branch_blocks):
+for i in range(branch_blocks):
     next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print 'before%d:' % i
-    print '  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i)
-    print '  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i)
-    print '  %%bext%d = sext i8 %%bcur%d to i32' % (i, i)
-    print '  %%btest%d = icmp ult i32 %%limit, %%bext%d' % (i, i)
-    print '  br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
-    print ''
+    print('before%d:' % i)
+    print('  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i))
+    print('  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i))
+    print('  %%bext%d = sext i8 %%bcur%d to i32' % (i, i))
+    print('  %%btest%d = icmp ult i32 %%limit, %%bext%d' % (i, i))
+    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
+    print('')
 
-print '%s:' % next
+print('%s:' % next)
 a, b = 1, 1
-for i in xrange(0, main_size, 6):
+for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print '  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
-    print '  store volatile i8 %d, i8 *%%ptr%d' % (value, i)
+    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
+    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
 
-for i in xrange(branch_blocks):
-    print '  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25)
-    print '  %%acur%d = load i8 , i8 *%%astop%d' % (i, i)
-    print '  %%aext%d = sext i8 %%acur%d to i32' % (i, i)
-    print '  %%atest%d = icmp ult i32 %%limit, %%aext%d' % (i, i)
-    print '  br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
-    print ''
-    print 'after%d:' % i
+for i in range(branch_blocks):
+    print('  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25))
+    print('  %%acur%d = load i8 , i8 *%%astop%d' % (i, i))
+    print('  %%aext%d = sext i8 %%acur%d to i32' % (i, i))
+    print('  %%atest%d = icmp ult i32 %%limit, %%aext%d' % (i, i))
+    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
+    print('')
+    print('after%d:' % i)
 
-print '  %dummy = load volatile i32, i32 *@global'
-print '  ret void'
-print '}'
+print('  %dummy = load volatile i32, i32 *@global')
+print('  ret void')
+print('}')
diff --git a/test/CodeGen/SystemZ/Large/branch-range-10.py b/test/CodeGen/SystemZ/Large/branch-range-10.py
index c6f8945..258989b 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-10.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-10.py
@@ -71,44 +71,46 @@
 # CHECK: clgr %r4, [[REG]]
 # CHECK: jgl [[LABEL]]
 
+from __future__ import print_function
+
 branch_blocks = 8
 main_size = 0xffcc
 
-print '@global = global i32 0'
+print('@global = global i32 0')
 
-print 'define void @f1(i8 *%base, i8 *%stop, i64 %limit) {'
-print 'entry:'
-print '  br label %before0'
-print ''
+print('define void @f1(i8 *%base, i8 *%stop, i64 %limit) {')
+print('entry:')
+print('  br label %before0')
+print('')
 
-for i in xrange(branch_blocks):
+for i in range(branch_blocks):
     next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print 'before%d:' % i
-    print '  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i)
-    print '  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i)
-    print '  %%bext%d = sext i8 %%bcur%d to i64' % (i, i)
-    print '  %%btest%d = icmp ult i64 %%limit, %%bext%d' % (i, i)
-    print '  br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
-    print ''
+    print('before%d:' % i)
+    print('  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i))
+    print('  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i))
+    print('  %%bext%d = sext i8 %%bcur%d to i64' % (i, i))
+    print('  %%btest%d = icmp ult i64 %%limit, %%bext%d' % (i, i))
+    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
+    print('')
 
-print '%s:' % next
+print('%s:' % next)
 a, b = 1, 1
-for i in xrange(0, main_size, 6):
+for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print '  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
-    print '  store volatile i8 %d, i8 *%%ptr%d' % (value, i)
+    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
+    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
 
-for i in xrange(branch_blocks):
-    print '  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25)
-    print '  %%acur%d = load i8 , i8 *%%astop%d' % (i, i)
-    print '  %%aext%d = sext i8 %%acur%d to i64' % (i, i)
-    print '  %%atest%d = icmp ult i64 %%limit, %%aext%d' % (i, i)
-    print '  br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
-    print ''
-    print 'after%d:' % i
+for i in range(branch_blocks):
+    print('  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25))
+    print('  %%acur%d = load i8 , i8 *%%astop%d' % (i, i))
+    print('  %%aext%d = sext i8 %%acur%d to i64' % (i, i))
+    print('  %%atest%d = icmp ult i64 %%limit, %%aext%d' % (i, i))
+    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
+    print('')
+    print('after%d:' % i)
 
-print '  %dummy = load volatile i32, i32 *@global'
-print '  ret void'
-print '}'
+print('  %dummy = load volatile i32, i32 *@global')
+print('  ret void')
+print('}')
diff --git a/test/CodeGen/SystemZ/Large/branch-range-11.py b/test/CodeGen/SystemZ/Large/branch-range-11.py
index 10466df..22776aa 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-11.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-11.py
@@ -87,44 +87,46 @@
 # CHECK: clfi [[REG]], 107
 # CHECK: jgl [[LABEL]]
 
+from __future__ import print_function
+
 branch_blocks = 8
 main_size = 0xffc6
 
-print '@global = global i32 0'
+print('@global = global i32 0')
 
-print 'define void @f1(i8 *%base, i32 *%stopa, i32 *%stopb) {'
-print 'entry:'
-print '  br label %before0'
-print ''
+print('define void @f1(i8 *%base, i32 *%stopa, i32 *%stopb) {')
+print('entry:')
+print('  br label %before0')
+print('')
 
-for i in xrange(branch_blocks):
+for i in range(branch_blocks):
     next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print 'before%d:' % i
-    print '  %%bcur%da = load i32 , i32 *%%stopa' % i
-    print '  %%bcur%db = load i32 , i32 *%%stopb' % i
-    print '  %%bsub%d = sub i32 %%bcur%da, %%bcur%db' % (i, i, i)
-    print '  %%btest%d = icmp ult i32 %%bsub%d, %d' % (i, i, i + 50)
-    print '  br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
-    print ''
+    print('before%d:' % i)
+    print('  %%bcur%da = load i32 , i32 *%%stopa' % i)
+    print('  %%bcur%db = load i32 , i32 *%%stopb' % i)
+    print('  %%bsub%d = sub i32 %%bcur%da, %%bcur%db' % (i, i, i))
+    print('  %%btest%d = icmp ult i32 %%bsub%d, %d' % (i, i, i + 50))
+    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
+    print('')
 
-print '%s:' % next
+print('%s:' % next)
 a, b = 1, 1
-for i in xrange(0, main_size, 6):
+for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print '  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
-    print '  store volatile i8 %d, i8 *%%ptr%d' % (value, i)
+    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
+    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
 
-for i in xrange(branch_blocks):
-    print '  %%acur%da = load i32 , i32 *%%stopa' % i
-    print '  %%acur%db = load i32 , i32 *%%stopb' % i
-    print '  %%asub%d = sub i32 %%acur%da, %%acur%db' % (i, i, i)
-    print '  %%atest%d = icmp ult i32 %%asub%d, %d' % (i, i, i + 100)
-    print '  br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
-    print ''
-    print 'after%d:' % i
+for i in range(branch_blocks):
+    print('  %%acur%da = load i32 , i32 *%%stopa' % i)
+    print('  %%acur%db = load i32 , i32 *%%stopb' % i)
+    print('  %%asub%d = sub i32 %%acur%da, %%acur%db' % (i, i, i))
+    print('  %%atest%d = icmp ult i32 %%asub%d, %d' % (i, i, i + 100))
+    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
+    print('')
+    print('after%d:' % i)
 
-print '  %dummy = load volatile i32, i32 *@global'
-print '  ret void'
-print '}'
+print('  %dummy = load volatile i32, i32 *@global')
+print('  ret void')
+print('}')
diff --git a/test/CodeGen/SystemZ/Large/branch-range-12.py b/test/CodeGen/SystemZ/Large/branch-range-12.py
index 809483a..0911953 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-12.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-12.py
@@ -87,44 +87,46 @@
 # CHECK: clgfi [[REG]], 107
 # CHECK: jgl [[LABEL]]
 
+from __future__ import print_function
+
 branch_blocks = 8
 main_size = 0xffb4
 
-print '@global = global i32 0'
+print('@global = global i32 0')
 
-print 'define void @f1(i8 *%base, i64 *%stopa, i64 *%stopb) {'
-print 'entry:'
-print '  br label %before0'
-print ''
+print('define void @f1(i8 *%base, i64 *%stopa, i64 *%stopb) {')
+print('entry:')
+print('  br label %before0')
+print('')
 
-for i in xrange(branch_blocks):
+for i in range(branch_blocks):
     next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print 'before%d:' % i
-    print '  %%bcur%da = load i64 , i64 *%%stopa' % i
-    print '  %%bcur%db = load i64 , i64 *%%stopb' % i
-    print '  %%bsub%d = sub i64 %%bcur%da, %%bcur%db' % (i, i, i)
-    print '  %%btest%d = icmp ult i64 %%bsub%d, %d' % (i, i, i + 50)
-    print '  br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
-    print ''
+    print('before%d:' % i)
+    print('  %%bcur%da = load i64 , i64 *%%stopa' % i)
+    print('  %%bcur%db = load i64 , i64 *%%stopb' % i)
+    print('  %%bsub%d = sub i64 %%bcur%da, %%bcur%db' % (i, i, i))
+    print('  %%btest%d = icmp ult i64 %%bsub%d, %d' % (i, i, i + 50))
+    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
+    print('')
 
-print '%s:' % next
+print('%s:' % next)
 a, b = 1, 1
-for i in xrange(0, main_size, 6):
+for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print '  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
-    print '  store volatile i8 %d, i8 *%%ptr%d' % (value, i)
+    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
+    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
 
-for i in xrange(branch_blocks):
-    print '  %%acur%da = load i64 , i64 *%%stopa' % i
-    print '  %%acur%db = load i64 , i64 *%%stopb' % i
-    print '  %%asub%d = sub i64 %%acur%da, %%acur%db' % (i, i, i)
-    print '  %%atest%d = icmp ult i64 %%asub%d, %d' % (i, i, i + 100)
-    print '  br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
-    print ''
-    print 'after%d:' % i
+for i in range(branch_blocks):
+    print('  %%acur%da = load i64 , i64 *%%stopa' % i)
+    print('  %%acur%db = load i64 , i64 *%%stopb' % i)
+    print('  %%asub%d = sub i64 %%acur%da, %%acur%db' % (i, i, i))
+    print('  %%atest%d = icmp ult i64 %%asub%d, %d' % (i, i, i + 100))
+    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
+    print('')
+    print('after%d:' % i)
 
-print '  %dummy = load volatile i32, i32 *@global'
-print '  ret void'
-print '}'
+print('  %dummy = load volatile i32, i32 *@global')
+print('  ret void')
+print('}')
diff --git a/test/CodeGen/SystemZ/Large/spill-01.py b/test/CodeGen/SystemZ/Large/spill-01.py
index f59f607..2831804 100644
--- a/test/CodeGen/SystemZ/Large/spill-01.py
+++ b/test/CodeGen/SystemZ/Large/spill-01.py
@@ -18,23 +18,26 @@
 # CHECK: lay [[REG:%r[0-5]]], 4096(%r15)
 # CHECK: mvc {{[0-9]+}}(8,{{%r[0-9]+}}), 8([[REG]])
 # CHECK: br %r14
+
+from __future__ import print_function
+
 count = 500
 
-print 'declare void @foo()'
-print ''
-print 'define void @f1(i64 *%base0, i64 *%base1) {'
+print('declare void @foo()')
+print('')
+print('define void @f1(i64 *%base0, i64 *%base1) {')
 
 for i in range(count):
-    print '  %%ptr%d = getelementptr i64, i64 *%%base%d, i64 %d' % (i, i % 2, i / 2)
-    print '  %%val%d = load i64 , i64 *%%ptr%d' % (i, i)
-    print ''
+    print('  %%ptr%d = getelementptr i64, i64 *%%base%d, i64 %d' % (i, i % 2, i / 2))
+    print('  %%val%d = load i64 , i64 *%%ptr%d' % (i, i))
+    print('')
 
-print '  call void @foo()'
-print ''
+print('  call void @foo()')
+print('')
 
 for i in range(count):
-    print '  store i64 %%val%d, i64 *%%ptr%d' % (i, i)
+    print('  store i64 %%val%d, i64 *%%ptr%d' % (i, i))
 
-print ''
-print '  ret void'
-print '}'
+print('')
+print('  ret void')
+print('}')
diff --git a/test/CodeGen/SystemZ/Large/spill-02.py b/test/CodeGen/SystemZ/Large/spill-02.py
index 4ccfa11..f835993 100644
--- a/test/CodeGen/SystemZ/Large/spill-02.py
+++ b/test/CodeGen/SystemZ/Large/spill-02.py
@@ -19,55 +19,58 @@
 # the first 8168 bytes to be used for the call.  160 of these bytes are
 # allocated for the ABI frame.  There are also 5 argument registers, one of
 # which is used as a base pointer.
+
+from __future__ import print_function
+
 args = (8168 - 160) / 8 + (5 - 1)
 
-print 'declare i64 *@foo(i64 *%s)' % (', i64' * args)
-print 'declare void @bar(i64 *)'
-print ''
-print 'define i64 @f1(i64 %foo) {'
-print 'entry:'
+print('declare i64 *@foo(i64 *%s)' % (', i64' * args))
+print('declare void @bar(i64 *)')
+print('')
+print('define i64 @f1(i64 %foo) {')
+print('entry:')
 
 # Make the allocation big, so that it goes at the top of the frame.
-print '  %array = alloca [1000 x i64]'
-print '  %area = getelementptr [1000 x i64], [1000 x i64] *%array, i64 0, i64 0'
-print '  %%base = call i64 *@foo(i64 *%%area%s)' % (', i64 0' * args)
-print ''
+print('  %array = alloca [1000 x i64]')
+print('  %area = getelementptr [1000 x i64], [1000 x i64] *%array, i64 0, i64 0')
+print('  %%base = call i64 *@foo(i64 *%%area%s)' % (', i64 0' * args))
+print('')
 
 # Make sure all GPRs are used.  One is needed for the stack pointer and
 # another for %base, so we need 14 live values.
 count = 14
 for i in range(count):
-    print '  %%ptr%d = getelementptr i64, i64 *%%base, i64 %d' % (i, i / 2)
-    print '  %%val%d = load volatile i64 , i64 *%%ptr%d' % (i, i)
-    print ''
+    print('  %%ptr%d = getelementptr i64, i64 *%%base, i64 %d' % (i, i / 2))
+    print('  %%val%d = load volatile i64 , i64 *%%ptr%d' % (i, i))
+    print('')
 
 # Encourage the register allocator to give preference to these %vals
 # by using them several times.
 for j in range(4):
     for i in range(count):
-        print '  store volatile i64 %%val%d, i64 *%%ptr%d' % (i, i)
-    print ''
+        print('  store volatile i64 %%val%d, i64 *%%ptr%d' % (i, i))
+    print('')
 
 # Copy the incoming argument, which we expect to be spilled, to the frame
 # index for the alloca area.  Also throw in a volatile store, so that this
 # block cannot be reordered with the surrounding code.
-print '  %cond = icmp eq i64 %val0, %val1'
-print '  br i1 %cond, label %skip, label %fallthru'
-print ''
-print 'fallthru:'
-print '  store i64 %foo, i64 *%area'
-print '  store volatile i64 %val0, i64 *%ptr0'
-print '  br label %skip'
-print ''
-print 'skip:'
+print('  %cond = icmp eq i64 %val0, %val1')
+print('  br i1 %cond, label %skip, label %fallthru')
+print('')
+print('fallthru:')
+print('  store i64 %foo, i64 *%area')
+print('  store volatile i64 %val0, i64 *%ptr0')
+print('  br label %skip')
+print('')
+print('skip:')
 
 # Use each %val a few more times to emphasise the point, and to make sure
 # that they are live across the store of %foo.
 for j in range(4):
     for i in range(count):
-        print '  store volatile i64 %%val%d, i64 *%%ptr%d' % (i, i)
-    print ''
+        print('  store volatile i64 %%val%d, i64 *%%ptr%d' % (i, i))
+    print('')
 
-print '  call void @bar(i64 *%area)'
-print '  ret i64 0'
-print '}'
+print('  call void @bar(i64 *%area)')
+print('  ret i64 0')
+print('}')
diff --git a/test/CodeGen/SystemZ/cond-move-05.mir b/test/CodeGen/SystemZ/cond-move-05.mir
index 95507c9..de33f49 100644
--- a/test/CodeGen/SystemZ/cond-move-05.mir
+++ b/test/CodeGen/SystemZ/cond-move-05.mir
@@ -38,7 +38,7 @@
   
   !llvm.ident = !{!0}
   
-  !0 = !{!"clang version 6.0.0 (http://llvm.org/git/clang.git d80246686d6ad2a749d11470afbbd1bbe4d1b561) (http://llvm.org/git/llvm.git 1693ef38604a3ad9c3da656d2b58a77312207b01)"}
+  !0 = !{!"clang version 6.0.0"}
 
 ...
 
diff --git a/test/CodeGen/SystemZ/frame-07.ll b/test/CodeGen/SystemZ/frame-07.ll
index b042d9a..c5c36a6 100644
--- a/test/CodeGen/SystemZ/frame-07.ll
+++ b/test/CodeGen/SystemZ/frame-07.ll
@@ -1,7 +1,7 @@
 ; Test the saving and restoring of FPRs in large frames.
 ;
 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck -check-prefix=CHECK-NOFP %s
-; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 -disable-fp-elim | FileCheck -check-prefix=CHECK-FP %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 -frame-pointer=all | FileCheck -check-prefix=CHECK-FP %s
 
 ; Test a frame size that requires some FPRs to be saved and loaded using
 ; the 20-bit STDY and LDY while others can use the 12-bit STD and LD.
diff --git a/test/CodeGen/SystemZ/frame-09.ll b/test/CodeGen/SystemZ/frame-09.ll
index d6ebc0d..8dbd47a 100644
--- a/test/CodeGen/SystemZ/frame-09.ll
+++ b/test/CodeGen/SystemZ/frame-09.ll
@@ -1,6 +1,6 @@
 ; Test the handling of the frame pointer (%r11).
 ;
-; RUN: llc < %s -mtriple=s390x-linux-gnu -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -frame-pointer=all | FileCheck %s
 
 ; We should always initialise %r11 when FP elimination is disabled.
 ; We don't need to allocate any more than the caller-provided 160-byte
diff --git a/test/CodeGen/SystemZ/frame-13.ll b/test/CodeGen/SystemZ/frame-13.ll
index 15503fd..1fb840b 100644
--- a/test/CodeGen/SystemZ/frame-13.ll
+++ b/test/CodeGen/SystemZ/frame-13.ll
@@ -4,7 +4,7 @@
 ;
 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | \
 ; RUN:   FileCheck -check-prefix=CHECK-NOFP %s
-; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 -disable-fp-elim | \
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 -frame-pointer=all | \
 ; RUN:   FileCheck -check-prefix=CHECK-FP %s
 
 ; This file tests what happens when a displacement is converted from
diff --git a/test/CodeGen/SystemZ/frame-14.ll b/test/CodeGen/SystemZ/frame-14.ll
index d6235d1..e707312 100644
--- a/test/CodeGen/SystemZ/frame-14.ll
+++ b/test/CodeGen/SystemZ/frame-14.ll
@@ -5,7 +5,7 @@
 ;
 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | \
 ; RUN:   FileCheck -check-prefix=CHECK-NOFP %s
-; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 -disable-fp-elim | \
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 -frame-pointer=all | \
 ; RUN:   FileCheck -check-prefix=CHECK-FP %s
 ;
 ; This file tests what happens when a displacement is converted from
diff --git a/test/CodeGen/SystemZ/frame-15.ll b/test/CodeGen/SystemZ/frame-15.ll
index e86ef7b..0595b54 100644
--- a/test/CodeGen/SystemZ/frame-15.ll
+++ b/test/CodeGen/SystemZ/frame-15.ll
@@ -4,7 +4,7 @@
 ;
 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | \
 ; RUN:   FileCheck -check-prefix=CHECK-NOFP %s
-; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 -disable-fp-elim | \
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 -frame-pointer=all | \
 ; RUN:   FileCheck -check-prefix=CHECK-FP %s
 
 declare void @foo(float *%ptr1, float *%ptr2)
diff --git a/test/CodeGen/SystemZ/frame-16.ll b/test/CodeGen/SystemZ/frame-16.ll
index c99c2a3..ae8a041 100644
--- a/test/CodeGen/SystemZ/frame-16.ll
+++ b/test/CodeGen/SystemZ/frame-16.ll
@@ -5,7 +5,7 @@
 ;
 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | \
 ; RUN:   FileCheck -check-prefix=CHECK-NOFP %s
-; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 -disable-fp-elim | \
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 -frame-pointer=all | \
 ; RUN:   FileCheck -check-prefix=CHECK-FP %s
 
 ; This file tests what happens when a displacement is converted from
diff --git a/test/CodeGen/SystemZ/knownbits.ll b/test/CodeGen/SystemZ/knownbits.ll
index f0f8465..f23ffc5 100644
--- a/test/CodeGen/SystemZ/knownbits.ll
+++ b/test/CodeGen/SystemZ/knownbits.ll
@@ -35,16 +35,16 @@
 ; CHECK-LABEL: f1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    clhhsi 0, 0
-; CHECK-NEXT:    lhi %r1, 0
-; CHECK-NEXT:    lochie %r1, 1
-; CHECK-NEXT:    lghi %r2, 1
-; CHECK-NEXT:    vlvgp %v0, %r1, %r2
+; CHECK-NEXT:    lhi %r0, 0
+; CHECK-NEXT:    lochie %r0, 1
+; CHECK-NEXT:    lghi %r1, 1
+; CHECK-NEXT:    vlvgp %v0, %r0, %r1
 ; CHECK-NEXT:    vrepig %v1, 1
 ; CHECK-NEXT:    vx %v0, %v0, %v1
-; CHECK-NEXT:    vlgvf %r1, %v0, 1
-; CHECK-NEXT:    lhi %r0, 0
-; CHECK-NEXT:    cijlh %r1, 0, .LBB1_3
+; CHECK-NEXT:    vlgvf %r0, %v0, 1
+; CHECK-NEXT:    cijlh %r0, 0, .LBB1_3
 ; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    vlgvf %r0, %v0, 3
 ; CHECK-NEXT:    cijlh %r0, 0, .LBB1_3
 ; CHECK-NEXT:  # %bb.2:
 ; CHECK-NEXT:  .LBB1_3:
@@ -54,8 +54,9 @@
   %4 = insertelement <2 x i1> %3, i1 true, i32 1
   %5 = xor <2 x i1> %4, <i1 true, i1 true>
   %6 = extractelement <2 x i1> %5, i32 0
-  %7 = or i1 %6, undef
-  br i1 %7, label %9, label %8
+  %7 = extractelement <2 x i1> %5, i32 1
+  %8 = or i1 %6, %7
+  br i1 %8, label %10, label %9
 
 ; <label>:8:                                      ; preds = %0
   unreachable
diff --git a/test/CodeGen/SystemZ/subregliveness-01.ll b/test/CodeGen/SystemZ/subregliveness-01.ll
index 8863e22..5813fd3 100644
--- a/test/CodeGen/SystemZ/subregliveness-01.ll
+++ b/test/CodeGen/SystemZ/subregliveness-01.ll
@@ -36,7 +36,7 @@
 
 !llvm.ident = !{!0}
 
-!0 = !{!"clang version 8.0.0 (http://llvm.org/git/clang.git c0a5e830f198cf42d29f72f1ec06fbf4c5210e2c) (http://llvm.org/git/llvm.git ffc8c538b70b678031b8617f61f83ee120bcb884)"}
+!0 = !{!"clang version 8.0.0"}
 !1 = !{!2}
 !2 = distinct !{!2, !3, !"func_1: %agg.result"}
 !3 = distinct !{!3, !"func_1"}
diff --git a/test/CodeGen/SystemZ/subregliveness-06.mir b/test/CodeGen/SystemZ/subregliveness-06.mir
index fcb1d34..01e3341 100644
--- a/test/CodeGen/SystemZ/subregliveness-06.mir
+++ b/test/CodeGen/SystemZ/subregliveness-06.mir
@@ -128,7 +128,7 @@
   
   !llvm.ident = !{!0}
   
-  !0 = !{!"clang version 8.0.0 (http://llvm.org/git/clang.git c0a5e830f198cf42d29f72f1ec06fbf4c5210e2c) (http://llvm.org/git/llvm.git ffc8c538b70b678031b8617f61f83ee120bcb884)"}
+  !0 = !{!"clang version 8.0.0"}
   !1 = !{!2, !2, i64 0}
   !2 = !{!"omnipotent char", !3, i64 0}
   !3 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/SystemZ/vec-conv-02.ll b/test/CodeGen/SystemZ/vec-conv-02.ll
index ab84389..d4c0f72 100644
--- a/test/CodeGen/SystemZ/vec-conv-02.ll
+++ b/test/CodeGen/SystemZ/vec-conv-02.ll
@@ -15,19 +15,30 @@
 ; Test conversion of an f64 in a vector register to an f32.
 define float @f2(<2 x double> %vec) {
 ; CHECK-LABEL: f2:
-; CHECK: wledb %f0, %v24
+; CHECK: wledb %f0, %v24, 0, 0
 ; CHECK: br %r14
   %scalar = extractelement <2 x double> %vec, i32 0
   %ret = fptrunc double %scalar to float
   ret float %ret
 }
 
-; Test conversion of an f32 in a vector register to an f64.
-define double @f3(<4 x float> %vec) {
+; Test cases where even elements of a v4f32 are converted to f64s.
+define <2 x double> @f3(<4 x float> %vec) {
 ; CHECK-LABEL: f3:
+; CHECK: vldeb %v24, {{%v[0-9]+}}
+; CHECK: br %r14
+  %shuffle = shufflevector <4 x float> %vec, <4 x float> undef, <2 x i32> <i32 0, i32 2>
+  %res = fpext <2 x float> %shuffle to <2 x double>
+  ret <2 x double> %res
+}
+
+; Test conversion of an f32 in a vector register to an f64.
+define double @f4(<4 x float> %vec) {
+; CHECK-LABEL: f4:
 ; CHECK: wldeb %f0, %v24
 ; CHECK: br %r14
   %scalar = extractelement <4 x float> %vec, i32 0
   %ret = fpext float %scalar to double
   ret double %ret
 }
+
diff --git a/test/CodeGen/SystemZ/vec-move-08.ll b/test/CodeGen/SystemZ/vec-move-08.ll
index 5396a1e..7c4a16c 100644
--- a/test/CodeGen/SystemZ/vec-move-08.ll
+++ b/test/CodeGen/SystemZ/vec-move-08.ll
@@ -442,3 +442,35 @@
   %ret = insertelement <2 x double> %val, double %element, i32 1
   ret <2 x double> %ret
 }
+
+; Test a v4i32 gather where the load is chained.
+define void @f40(<4 x i32> %val, <4 x i32> %index, i64 %base, <4 x i32> *%res) {
+; CHECK-LABEL: f40:
+; CHECK: vgef %v24, 0(%v26,%r2), 1
+; CHECK: vst %v24, 0(%r3)
+; CHECK: br %r14
+  %elem = extractelement <4 x i32> %index, i32 1
+  %ext = zext i32 %elem to i64
+  %add = add i64 %base, %ext
+  %ptr = inttoptr i64 %add to i32 *
+  %element = load i32, i32 *%ptr
+  %ret = insertelement <4 x i32> %val, i32 %element, i32 1
+  store <4 x i32> %ret, <4 x i32> *%res
+  ret void
+}
+
+; Test a v2i64 gather where the load is chained.
+define void @f41(<2 x i64> %val, <2 x i64> %index, i64 %base, <2 x i64> *%res) {
+; CHECK-LABEL: f41:
+; CHECK: vgeg %v24, 0(%v26,%r2), 1
+; CHECK: vst %v24, 0(%r3)
+; CHECK: br %r14
+  %elem = extractelement <2 x i64> %index, i32 1
+  %add = add i64 %base, %elem
+  %ptr = inttoptr i64 %add to i64 *
+  %element = load i64, i64 *%ptr
+  %ret = insertelement <2 x i64> %val, i64 %element, i32 1
+  store <2 x i64> %ret, <2 x i64> *%res
+  ret void
+}
+
diff --git a/test/CodeGen/SystemZ/vec-move-14.ll b/test/CodeGen/SystemZ/vec-move-14.ll
index e41eb9d..e6415e8 100644
--- a/test/CodeGen/SystemZ/vec-move-14.ll
+++ b/test/CodeGen/SystemZ/vec-move-14.ll
@@ -94,3 +94,45 @@
   %ret = insertelement <2 x double> zeroinitializer, double %val, i32 0
   ret <2 x double> %ret
 }
+
+; Test VLLEZF with a float when the result is stored to memory.
+define void @f10(float *%ptr, <4 x float> *%res) {
+; CHECK-LABEL: f10:
+; CHECK: vllezf [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: vst [[REG]], 0(%r3)
+; CHECK: br %r14
+  %val = load float, float *%ptr
+  %ret = insertelement <4 x float> zeroinitializer, float %val, i32 1
+  store <4 x float> %ret, <4 x float> *%res
+  ret void
+}
+
+; Test VLLEZG with a double when the result is stored to memory.
+define void @f11(double *%ptr, <2 x double> *%res) {
+; CHECK-LABEL: f11:
+; CHECK: vllezg [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: vst [[REG]], 0(%r3)
+; CHECK: br %r14
+  %val = load double, double *%ptr
+  %ret = insertelement <2 x double> zeroinitializer, double %val, i32 0
+  store <2 x double> %ret, <2 x double> *%res
+  ret void
+}
+
+; Test VLLEZG when the zeroinitializer is shared.
+define void @f12(i64 *%ptr, <2 x i64> *%res) {
+; CHECK-LABEL: f12:
+; CHECK: vllezg [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: vst [[REG]], 0(%r3)
+; CHECK: vllezg [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK: vst [[REG1]], 0(%r3)
+; CHECK: br %r14
+  %val = load volatile i64, i64 *%ptr
+  %ret = insertelement <2 x i64> zeroinitializer, i64 %val, i32 0
+  store volatile <2 x i64> %ret, <2 x i64> *%res
+  %val1 = load volatile i64, i64 *%ptr
+  %ret1 = insertelement <2 x i64> zeroinitializer, i64 %val1, i32 0
+  store volatile <2 x i64> %ret1, <2 x i64> *%res
+  ret void
+}
+
diff --git a/test/CodeGen/SystemZ/vec-move-18.ll b/test/CodeGen/SystemZ/vec-move-18.ll
index 5d3d09d..9bb6193 100644
--- a/test/CodeGen/SystemZ/vec-move-18.ll
+++ b/test/CodeGen/SystemZ/vec-move-18.ll
@@ -22,3 +22,15 @@
   ret <4 x float> %ret
 }
 
+; Test VLLEZLF with a float when the result is stored to memory.
+define void @f3(float *%ptr, <4 x float> *%res) {
+; CHECK-LABEL: f3:
+; CHECK: vllezlf [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: vst [[REG]], 0(%r3)
+; CHECK: br %r14
+  %val = load float, float *%ptr
+  %ret = insertelement <4 x float> zeroinitializer, float %val, i32 0
+  store <4 x float> %ret, <4 x float> *%res
+  ret void
+}
+
diff --git a/test/CodeGen/Thumb/2009-07-27-PEIAssert.ll b/test/CodeGen/Thumb/2009-07-27-PEIAssert.ll
index aaca3a7..f0de64d 100644
--- a/test/CodeGen/Thumb/2009-07-27-PEIAssert.ll
+++ b/test/CodeGen/Thumb/2009-07-27-PEIAssert.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv6-apple-darwin -relocation-model=pic -disable-fp-elim
+; RUN: llc < %s -mtriple=thumbv6-apple-darwin -relocation-model=pic -frame-pointer=all
 
 	%struct.LinkList = type { i32, %struct.LinkList* }
 	%struct.List = type { i32, i32* }
diff --git a/test/CodeGen/Thumb/2009-08-20-ISelBug.ll b/test/CodeGen/Thumb/2009-08-20-ISelBug.ll
index 86d7023..5c4b34d 100644
--- a/test/CodeGen/Thumb/2009-08-20-ISelBug.ll
+++ b/test/CodeGen/Thumb/2009-08-20-ISelBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv6-apple-darwin -relocation-model=pic -disable-fp-elim -mattr=+v6 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv6-apple-darwin -relocation-model=pic -frame-pointer=all -mattr=+v6 -verify-machineinstrs | FileCheck %s
 ; rdar://7157006
 
 %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
diff --git a/test/CodeGen/Thumb/frame-access.ll b/test/CodeGen/Thumb/frame-access.ll
index 7fae447..9cbed5e 100644
--- a/test/CodeGen/Thumb/frame-access.ll
+++ b/test/CodeGen/Thumb/frame-access.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=thumbv6m-eabi -disable-fp-elim=false %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv6m-eabi -frame-pointer=none %s -o - | FileCheck %s
 
 ; struct S { int x[128]; } s;
 ; int f(int *, int, int, int, struct S);
diff --git a/test/CodeGen/Thumb/frame_thumb.ll b/test/CodeGen/Thumb/frame_thumb.ll
index 6cc4dd1..3cff8e2 100644
--- a/test/CodeGen/Thumb/frame_thumb.ll
+++ b/test/CodeGen/Thumb/frame_thumb.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mtriple=thumb-apple-darwin \
-; RUN:     -disable-fp-elim | not grep "r11"
+; RUN:     -frame-pointer=all | not grep "r11"
 ; RUN: llc < %s -mtriple=thumb-linux-gnueabi \
-; RUN:     -disable-fp-elim | not grep "r11"
+; RUN:     -frame-pointer=all | not grep "r11"
 
 define i32 @f() {
 entry:
diff --git a/test/CodeGen/Thumb/push.ll b/test/CodeGen/Thumb/push.ll
index 4f4ffed..74629ef 100644
--- a/test/CodeGen/Thumb/push.ll
+++ b/test/CodeGen/Thumb/push.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumb-apple-darwin -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=thumb-apple-darwin -frame-pointer=all | FileCheck %s
 ; rdar://7268481
 
 define void @t() nounwind {
diff --git a/test/CodeGen/Thumb/shift-and.ll b/test/CodeGen/Thumb/shift-and.ll
index 01f6b84..3981d2c 100644
--- a/test/CodeGen/Thumb/shift-and.ll
+++ b/test/CodeGen/Thumb/shift-and.ll
@@ -45,9 +45,8 @@
 define i32 @test4(i32 %x) {
 ; CHECK-LABEL: test4:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    lsls r0, r0, #4
-; CHECK-NEXT:    movs r1, #112
-; CHECK-NEXT:    bics r0, r1
+; CHECK-NEXT:    lsrs r0, r0, #3
+; CHECK-NEXT:    lsls r0, r0, #7
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = shl i32 %x, 4
@@ -84,9 +83,8 @@
 define i32 @test7(i32 %x) {
 ; CHECK-LABEL: test7:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    lsrs r1, r0, #29
-; CHECK-NEXT:    movs r0, #4
-; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    lsrs r0, r0, #31
+; CHECK-NEXT:    lsls r0, r0, #2
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = lshr i32 %x, 29
@@ -110,9 +108,8 @@
 define i32 @test9(i32 %x) {
 ; CHECK-LABEL: test9:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    lsrs r0, r0, #2
-; CHECK-NEXT:    movs r1, #1
-; CHECK-NEXT:    bics r0, r1
+; CHECK-NEXT:    lsrs r0, r0, #3
+; CHECK-NEXT:    lsls r0, r0, #1
 ; CHECK-NEXT:    bx lr
 entry:
   %and = lshr i32 %x, 2
@@ -131,3 +128,63 @@
   %shr = and i32 %0, 255
   ret i32 %shr
 }
+
+define i32 @test11(i32 %x) {
+; CHECK-LABEL: test11:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    lsrs r0, r0, #24
+; CHECK-NEXT:    lsls r0, r0, #2
+; CHECK-NEXT:    bx lr
+entry:
+  %shl = lshr i32 %x, 22
+  %and = and i32 %shl, 1020
+  ret i32 %and
+}
+
+define i32 @test12(i32 %x) {
+; CHECK-LABEL: test12:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    lsrs r0, r0, #3
+; CHECK-NEXT:    lsls r0, r0, #4
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = shl i32 %x, 1
+  %shr = and i32 %0, -16
+  ret i32 %shr
+}
+
+define i32 @test13(i32 %x) {
+; CHECK-LABEL: test13:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    lsrs r0, r0, #3
+; CHECK-NEXT:    lsls r0, r0, #4
+; CHECK-NEXT:    bx lr
+entry:
+  %shr = lshr i32 %x, 3
+  %shl = shl i32 %shr, 4
+  ret i32 %shl
+}
+
+define i32 @test14(i32 %x) {
+; CHECK-LABEL: test14:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    lsrs r0, r0, #6
+; CHECK-NEXT:    lsls r0, r0, #10
+; CHECK-NEXT:    bx lr
+entry:
+  %shl = shl i32 %x, 4
+  %and = and i32 %shl, -1024
+  ret i32 %and
+}
+
+define i32 @test15(i32 %x) {
+; CHECK-LABEL: test15:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    lsrs r0, r0, #4
+; CHECK-NEXT:    lsls r0, r0, #3
+; CHECK-NEXT:    bx lr
+entry:
+  %shr = lshr i32 %x, 4
+  %shl = shl i32 %shr, 3
+  ret i32 %shl
+}
diff --git a/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll b/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll
index 77d2991..1b4dbb3 100644
--- a/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll
+++ b/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -frame-pointer=all
 
 	%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
 	%struct.JHUFF_TBL = type { [17 x i8], [256 x i8], i32 }
diff --git a/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll b/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
index 4a99e28..1eafa5b 100644
--- a/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
+++ b/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim -arm-atomic-cfg-tidy=0 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -frame-pointer=all -arm-atomic-cfg-tidy=0 | FileCheck %s
 
 @csize = external global [100 x [20 x [4 x i8]]]		; <[100 x [20 x [4 x i8]]]*> [#uses=1]
 @vsize = external global [100 x [20 x [4 x i8]]]		; <[100 x [20 x [4 x i8]]]*> [#uses=1]
diff --git a/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll b/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll
index 55b0921..85d8d5c5 100644
--- a/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll
+++ b/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -frame-pointer=all
 
 	%0 = type { void (%"struct.xalanc_1_8::FormatterToXML"*, i16)*, i32 }		; type %0
 	%1 = type { void (%"struct.xalanc_1_8::FormatterToXML"*, i16*)*, i32 }		; type %1
diff --git a/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll b/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll
index ccec979..1be2c96 100644
--- a/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll
+++ b/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -relocation-model=pic -frame-pointer=all
 
 	%0 = type { %struct.GAP }		; type %0
 	%1 = type { i16, i8, i8 }		; type %1
diff --git a/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll b/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll
index 89f47d9..5656d78 100644
--- a/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll
+++ b/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim -O3
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -relocation-model=pic -frame-pointer=all -O3
 
 	%0 = type { i16, i8, i8 }		; type %0
 	%1 = type { [2 x i32], [2 x i32] }		; type %1
diff --git a/test/CodeGen/Thumb2/2009-08-21-PostRAKill4.ll b/test/CodeGen/Thumb2/2009-08-21-PostRAKill4.ll
index 04dcb9d..ff6375d 100644
--- a/test/CodeGen/Thumb2/2009-08-21-PostRAKill4.ll
+++ b/test/CodeGen/Thumb2/2009-08-21-PostRAKill4.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -disable-fp-elim -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
+; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -frame-pointer=all -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
 
 ; ModuleID = '<stdin>'
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
diff --git a/test/CodeGen/Thumb2/2009-09-01-PostRAProlog.ll b/test/CodeGen/Thumb2/2009-09-01-PostRAProlog.ll
index 5b71076..ee1f03e 100644
--- a/test/CodeGen/Thumb2/2009-09-01-PostRAProlog.ll
+++ b/test/CodeGen/Thumb2/2009-09-01-PostRAProlog.ll
@@ -1,4 +1,4 @@
-; RUN: llc -asm-verbose=false -O3 -relocation-model=pic -disable-fp-elim -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 < %s | FileCheck %s
+; RUN: llc -asm-verbose=false -O3 -relocation-model=pic -frame-pointer=all -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 < %s | FileCheck %s
 
 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
 target triple = "thumbv7-apple-darwin9"
diff --git a/test/CodeGen/Thumb2/2009-11-01-CopyReg2RegBug.ll b/test/CodeGen/Thumb2/2009-11-01-CopyReg2RegBug.ll
index 4588018..7363a4a 100644
--- a/test/CodeGen/Thumb2/2009-11-01-CopyReg2RegBug.ll
+++ b/test/CodeGen/Thumb2/2009-11-01-CopyReg2RegBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8
 
 define void @get_initial_mb16x16_cost() nounwind {
 entry:
diff --git a/test/CodeGen/Thumb2/2010-02-24-BigStack.ll b/test/CodeGen/Thumb2/2010-02-24-BigStack.ll
index 2b53747..86e434a 100644
--- a/test/CodeGen/Thumb2/2010-02-24-BigStack.ll
+++ b/test/CodeGen/Thumb2/2010-02-24-BigStack.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 -mattr=+vfp2
+; RUN: llc < %s -O0 -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8 -mattr=+vfp2
 ; This test creates a big stack frame without spilling any callee-saved registers.
 ; Make sure the whole stack frame is addrerssable wiothout scavenger crashes.
 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
diff --git a/test/CodeGen/Thumb2/2010-06-19-ITBlockCrash.ll b/test/CodeGen/Thumb2/2010-06-19-ITBlockCrash.ll
index eba2e58..4203916 100644
--- a/test/CodeGen/Thumb2/2010-06-19-ITBlockCrash.ll
+++ b/test/CodeGen/Thumb2/2010-06-19-ITBlockCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O3 -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O3 -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8
 ; rdar://8110842
 
 declare arm_apcscc i32 @__maskrune(i32, i32)
diff --git a/test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll b/test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll
index 7c8802d..854b425 100644
--- a/test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll
+++ b/test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-ios -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8
 ; RUN: llc < %s -mtriple=thumbv8-none-linux-gnueabi
 
 %struct.LIST_NODE.0.16 = type { %struct.LIST_NODE.0.16*, i8* }
diff --git a/test/CodeGen/Thumb2/2012-01-13-CBNZBug.ll b/test/CodeGen/Thumb2/2012-01-13-CBNZBug.ll
index e5be8df..99daf7e 100644
--- a/test/CodeGen/Thumb2/2012-01-13-CBNZBug.ll
+++ b/test/CodeGen/Thumb2/2012-01-13-CBNZBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-ios -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8 | FileCheck %s
 ; rdar://10676853
 
 %struct.Dict_node_struct = type { i8*, %struct.Word_file_struct*, %struct.Exp_struct*, %struct.Dict_node_struct*, %struct.Dict_node_struct* }
diff --git a/test/CodeGen/Thumb2/frameless.ll b/test/CodeGen/Thumb2/frameless.ll
index fa8d5d8..01e0414 100644
--- a/test/CodeGen/Thumb2/frameless.ll
+++ b/test/CodeGen/Thumb2/frameless.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -disable-fp-elim | not grep mov
-; RUN: llc < %s -mtriple=thumbv7-linux -disable-fp-elim | not grep mov
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -frame-pointer=all | not grep mov
+; RUN: llc < %s -mtriple=thumbv7-linux -frame-pointer=all | not grep mov
 
 define void @t() nounwind readnone {
   ret void
diff --git a/test/CodeGen/Thumb2/frameless2.ll b/test/CodeGen/Thumb2/frameless2.ll
index 3743354..c8aa82f 100644
--- a/test/CodeGen/Thumb2/frameless2.ll
+++ b/test/CodeGen/Thumb2/frameless2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -disable-fp-elim | not grep r7
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -frame-pointer=all | not grep r7
 
 %struct.noise3 = type { [3 x [17 x i32]] }
 %struct.noiseguard = type { i32, i32, i32 }
diff --git a/test/CodeGen/Thumb2/ldr-str-imm12.ll b/test/CodeGen/Thumb2/ldr-str-imm12.ll
index c6d00d4..c1f7de3 100644
--- a/test/CodeGen/Thumb2/ldr-str-imm12.ll
+++ b/test/CodeGen/Thumb2/ldr-str-imm12.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -arm-atomic-cfg-tidy=0 -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -arm-atomic-cfg-tidy=0 -mcpu=cortex-a8 -relocation-model=pic -frame-pointer=all | FileCheck %s
 ; rdar://7352504
 ; Make sure we use "str r9, [sp, #+28]" instead of "sub.w r4, r7, #256" followed by "str r9, [r4, #-32]".
 
diff --git a/test/CodeGen/Thumb2/machine-licm.ll b/test/CodeGen/Thumb2/machine-licm.ll
index 1f63875..d2438ac 100644
--- a/test/CodeGen/Thumb2/machine-licm.ll
+++ b/test/CodeGen/Thumb2/machine-licm.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -relocation-model=dynamic-no-pic -disable-fp-elim | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim | FileCheck %s --check-prefix=PIC
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -relocation-model=dynamic-no-pic -frame-pointer=all | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -relocation-model=pic -frame-pointer=all | FileCheck %s --check-prefix=PIC
 ; rdar://7353541
 ; rdar://7354376
 
diff --git a/test/CodeGen/Thumb2/t2-teq-reduce.mir b/test/CodeGen/Thumb2/t2-teq-reduce.mir
new file mode 100644
index 0000000..d85d233a
--- /dev/null
+++ b/test/CodeGen/Thumb2/t2-teq-reduce.mir
@@ -0,0 +1,267 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -run-pass=t2-reduce-size %s -o - | FileCheck %s
+
+--- |
+  target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+  target triple = "thumbv8m.main"
+
+  %list_head = type { %list_head*, %list_data* }
+  %list_data = type { i16, i16 }
+
+  define %list_head* @reg_reg_it_block(%list_head* %a, i16 zeroext %b) {
+  entry:
+    br label %while.begin
+
+  while.begin:                                      ; preds = %while.body.end, %entry
+    %list.addr.i = phi %list_head* [ %ld.5, %while.body.end ], [ %a, %entry ]
+    %info.i = getelementptr inbounds %list_head, %list_head* %list.addr.i, i32 0, i32 1
+    %ld.0 = load %list_data*, %list_data** %info.i, align 4
+    %data16.i1 = bitcast %list_data* %ld.0 to i16*
+    %ld.1 = load i16, i16* %data16.i1, align 2
+    %xor.1 = xor i16 %ld.1, %b
+    %cmp.i = icmp eq i16 %xor.1, 0
+    br i1 %cmp.i, label %exit, label %while.body.a
+
+  while.body.a:                                     ; preds = %while.begin
+    %next.i2 = bitcast %list_head* %list.addr.i to %list_head**
+    %ld.2 = load %list_head*, %list_head** %next.i2, align 4
+    %cmp.i.1 = icmp eq %list_head* %ld.2, null
+    br i1 %cmp.i.1, label %exit, label %it.block
+
+  it.block:                                         ; preds = %while.body.a
+    %info.i.1 = getelementptr inbounds %list_head, %list_head* %ld.2, i32 0, i32 1
+    %ld.3 = load %list_data*, %list_data** %info.i.1, align 4
+    %data16.i.13 = bitcast %list_data* %ld.3 to i16*
+    %ld.4 = load i16, i16* %data16.i.13, align 2
+    %xor.2 = xor i16 %ld.4, %b
+    %cmp.i.2 = icmp eq i16 %xor.2, 0
+    br i1 %cmp.i.2, label %exit, label %while.body.end
+
+  while.body.end:                                   ; preds = %it.block
+    %next.i.14 = bitcast %list_head* %ld.2 to %list_head**
+    %ld.5 = load %list_head*, %list_head** %next.i.14, align 4
+    %cmp.i.3 = icmp eq %list_head* %ld.5, null
+    br i1 %cmp.i.3, label %exit, label %while.begin
+
+  exit:                                             ; preds = %while.body.end, %it.block, %while.body.a, %while.begin
+    %res = phi %list_head* [ %list.addr.i, %while.begin ], [ %ld.2, %while.body.a ], [ %ld.2, %it.block ], [ %ld.5, %while.body.end ]
+    ret %list_head* %res
+  }
+
+  define i16 @op_not_killed(%list_head* %a, i16 zeroext %b) {
+  entry:
+    br label %while.begin
+
+  while.begin:                                      ; preds = %while.body.end, %entry
+    %list.addr.i = phi %list_head* [ %ld.5, %while.body.end ], [ %a, %entry ]
+    %info.i = getelementptr inbounds %list_head, %list_head* %list.addr.i, i32 0, i32 1
+    %ld.0 = load %list_data*, %list_data** %info.i, align 4
+    %data16.i1 = bitcast %list_data* %ld.0 to i16*
+    %ld.1 = load i16, i16* %data16.i1, align 2
+    %xor.1 = xor i16 %ld.1, %b
+    %cmp.i = icmp eq i16 %xor.1, 0
+    br i1 %cmp.i, label %exit, label %while.body.a
+
+  while.body.a:                                     ; preds = %while.begin
+    %next.i2 = bitcast %list_head* %list.addr.i to %list_head**
+    %ld.2 = load %list_head*, %list_head** %next.i2, align 4
+    %cmp.i.1 = icmp eq %list_head* %ld.2, null
+    br i1 %cmp.i.1, label %exit, label %it.block
+
+  it.block:                                         ; preds = %while.body.a
+    %info.i.1 = getelementptr inbounds %list_head, %list_head* %ld.2, i32 0, i32 1
+    %ld.3 = load %list_data*, %list_data** %info.i.1, align 4
+    %data16.i.13 = bitcast %list_data* %ld.3 to i16*
+    %ld.4 = load i16, i16* %data16.i.13, align 2
+    %xor.2 = xor i16 %ld.4, %b
+    %cmp.i.2 = icmp eq i16 %xor.2, 0
+    br i1 %cmp.i.2, label %exit, label %while.body.end
+
+  while.body.end:                                   ; preds = %it.block
+    %next.i.14 = bitcast %list_head* %ld.2 to %list_head**
+    %ld.5 = load %list_head*, %list_head** %next.i.14, align 4
+    %cmp.i.3 = icmp eq %list_head* %ld.5, null
+    br i1 %cmp.i.3, label %exit, label %while.begin
+
+  exit:                                             ; preds = %while.body.end, %it.block, %while.body.a, %while.begin
+    %res = phi i16 [ %ld.1, %while.begin ], [ %ld.1, %while.body.a ], [ %ld.4, %it.block ], [ %ld.4, %while.body.end ]
+    ret i16 %res
+  }
+
+...
+---
+name:            reg_reg_it_block
+tracksRegLiveness: true
+liveins:
+  - { reg: '$r0', virtual-reg: '' }
+  - { reg: '$r1', virtual-reg: '' }
+body:             |
+  ; CHECK-LABEL: name: reg_reg_it_block
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.2(0x80000000)
+  ; CHECK:   liveins: $r0, $r1
+  ; CHECK:   t2B %bb.2, 14, $noreg
+  ; CHECK: bb.1.while.body.end:
+  ; CHECK:   successors: %bb.2(0x80000000)
+  ; CHECK:   liveins: $r0, $r1
+  ; CHECK:   renamable $r0 = tLDRi killed renamable $r0, 0, 14, $noreg :: (load 4 from %ir.next.i.14)
+  ; CHECK:   tCMPi8 renamable $r0, 0, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   BUNDLE implicit-def dead $itstate, implicit killed $cpsr, implicit $r0 {
+  ; CHECK:     t2IT 0, 8, implicit-def $itstate
+  ; CHECK:     tBX_RET 0, killed $cpsr, implicit $r0, implicit internal killed $itstate
+  ; CHECK:   }
+  ; CHECK: bb.2.while.begin:
+  ; CHECK:   successors: %bb.4(0x04000000), %bb.3(0x7c000000)
+  ; CHECK:   liveins: $r0, $r1
+  ; CHECK:   renamable $r2 = tLDRi renamable $r0, 1, 14, $noreg :: (load 4 from %ir.info.i)
+  ; CHECK:   renamable $r2 = tLDRHi killed renamable $r2, 0, 14, $noreg :: (load 2 from %ir.data16.i1)
+  ; CHECK:   dead renamable $r2, $cpsr = tEOR killed renamable $r2, renamable $r1, 14, $noreg
+  ; CHECK:   t2Bcc %bb.4, 0, killed $cpsr
+  ; CHECK: bb.3.while.body.a:
+  ; CHECK:   successors: %bb.4(0x4207fef8), %bb.1(0x3df80108)
+  ; CHECK:   liveins: $r0, $r1
+  ; CHECK:   renamable $r0 = tLDRi killed renamable $r0, 0, 14, $noreg :: (load 4 from %ir.next.i2)
+  ; CHECK:   tCMPi8 renamable $r0, 0, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   BUNDLE implicit-def dead $itstate, implicit-def dead $r2, implicit-def $cpsr, implicit $r0, implicit killed $cpsr, implicit $r1 {
+  ; CHECK:     t2IT 1, 30, implicit-def $itstate
+  ; CHECK:     renamable $r2 = tLDRi renamable $r0, 1, 1, $cpsr, implicit internal $itstate :: (load 4 from %ir.info.i.1)
+  ; CHECK:     renamable $r2 = tLDRHi internal killed renamable $r2, 0, 1, $cpsr, implicit internal killed $r2, implicit internal $itstate :: (load 2 from %ir.data16.i.13)
+  ; CHECK:     t2TEQrr internal killed renamable $r2, renamable $r1, 1, killed $cpsr, implicit-def $cpsr, implicit internal killed $itstate
+  ; CHECK:   }
+  ; CHECK:   t2Bcc %bb.1, 1, killed $cpsr
+  ; CHECK: bb.4.exit:
+  ; CHECK:   liveins: $r0
+  ; CHECK:   tBX_RET 14, $noreg, implicit killed $r0
+  bb.0.entry:
+    successors: %bb.1(0x80000000)
+    liveins: $r0, $r1
+
+    t2B %bb.1, 14, $noreg
+
+  bb.3.while.body.end:
+    successors: %bb.1(0x80000000)
+    liveins: $r0, $r1
+
+    renamable $r0 = tLDRi killed renamable $r0, 0, 14, $noreg :: (load 4 from %ir.next.i.14)
+    tCMPi8 renamable $r0, 0, 14, $noreg, implicit-def $cpsr
+    BUNDLE implicit-def dead $itstate, implicit killed $cpsr, implicit $r0 {
+      t2IT 0, 8, implicit-def $itstate
+      tBX_RET 0, killed $cpsr, implicit $r0, implicit internal killed $itstate
+    }
+
+  bb.1.while.begin:
+    successors: %bb.4(0x04000000), %bb.2(0x7c000000)
+    liveins: $r0, $r1
+
+    renamable $r2 = tLDRi renamable $r0, 1, 14, $noreg :: (load 4 from %ir.info.i)
+    renamable $r2 = tLDRHi killed renamable $r2, 0, 14, $noreg :: (load 2 from %ir.data16.i1)
+    dead renamable $r2, $cpsr = tEOR killed renamable $r2, renamable $r1, 14, $noreg
+    t2Bcc %bb.4, 0, killed $cpsr
+
+  bb.2.while.body.a:
+    successors: %bb.4(0x80000000), %bb.3(0x78200000)
+    liveins: $r0, $r1
+
+    renamable $r0 = tLDRi killed renamable $r0, 0, 14, $noreg :: (load 4 from %ir.next.i2)
+    tCMPi8 renamable $r0, 0, 14, $noreg, implicit-def $cpsr
+    BUNDLE implicit-def dead $itstate, implicit-def dead $r2, implicit-def $cpsr, implicit $r0, implicit killed $cpsr, implicit $r1 {
+      t2IT 1, 30, implicit-def $itstate
+      renamable $r2 = tLDRi renamable $r0, 1, 1, $cpsr, implicit internal $itstate :: (load 4 from %ir.info.i.1)
+      renamable $r2 = tLDRHi internal killed renamable $r2, 0, 1, $cpsr, implicit internal killed $r2, implicit internal $itstate :: (load 2 from %ir.data16.i.13)
+      t2TEQrr internal killed renamable $r2, renamable $r1, 1, killed $cpsr, implicit-def $cpsr, implicit internal killed $itstate
+    }
+    t2Bcc %bb.3, 1, killed $cpsr
+
+  bb.4.exit:
+    liveins: $r0
+
+    tBX_RET 14, $noreg, implicit killed $r0
+
+...
+---
+name:            op_not_killed
+tracksRegLiveness: true
+liveins:
+  - { reg: '$r0', virtual-reg: '' }
+  - { reg: '$r1', virtual-reg: '' }
+body:             |
+  ; CHECK-LABEL: name: op_not_killed
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   liveins: $r0, $r1
+  ; CHECK:   $r2 = tMOVr $r0, 14, $noreg
+  ; CHECK: bb.1.while.begin:
+  ; CHECK:   successors: %bb.5(0x04000000), %bb.2(0x7c000000)
+  ; CHECK:   liveins: $r1, $r2
+  ; CHECK:   renamable $r0 = tLDRi renamable $r2, 1, 14, $noreg :: (load 4 from %ir.info.i)
+  ; CHECK:   renamable $r0 = tLDRHi killed renamable $r0, 0, 14, $noreg :: (load 2 from %ir.data16.i1)
+  ; CHECK:   t2TEQrr renamable $r0, renamable $r1, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   t2Bcc %bb.5, 0, killed $cpsr
+  ; CHECK: bb.2.while.body.a:
+  ; CHECK:   successors: %bb.5(0x04000000), %bb.3(0x7c000000)
+  ; CHECK:   liveins: $r0, $r1, $r2
+  ; CHECK:   renamable $r2 = tLDRi killed renamable $r2, 0, 14, $noreg :: (load 4 from %ir.next.i2)
+  ; CHECK:   tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   t2Bcc %bb.5, 0, killed $cpsr
+  ; CHECK: bb.3.it.block:
+  ; CHECK:   successors: %bb.5(0x04000000), %bb.4(0x7c000000)
+  ; CHECK:   liveins: $r1, $r2
+  ; CHECK:   renamable $r0 = tLDRi renamable $r2, 1, 14, $noreg :: (load 4 from %ir.info.i.1)
+  ; CHECK:   renamable $r0 = tLDRHi killed renamable $r0, 0, 14, $noreg :: (load 2 from %ir.data16.i.13)
+  ; CHECK:   t2TEQrr renamable $r0, renamable $r1, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   t2Bcc %bb.5, 0, killed $cpsr
+  ; CHECK: bb.4.while.body.end:
+  ; CHECK:   successors: %bb.5(0x04000000), %bb.1(0x7c000000)
+  ; CHECK:   liveins: $r0, $r1, $r2
+  ; CHECK:   renamable $r2 = tLDRi killed renamable $r2, 0, 14, $noreg :: (load 4 from %ir.next.i.14)
+  ; CHECK:   tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   t2Bcc %bb.1, 1, killed $cpsr
+  ; CHECK: bb.5.exit:
+  ; CHECK:   liveins: $r0
+  ; CHECK:   tBX_RET 14, $noreg, implicit $r0
+  bb.0.entry:
+    successors: %bb.1(0x80000000)
+    liveins: $r0, $r1
+
+    $r2 = tMOVr $r0, 14, $noreg
+
+  bb.1.while.begin:
+    successors: %bb.5(0x04000000), %bb.2(0x7c000000)
+    liveins: $r1, $r2
+
+    renamable $r0 = t2LDRi12 renamable $r2, 4, 14, $noreg :: (load 4 from %ir.info.i)
+    renamable $r0 = t2LDRHi12 killed renamable $r0, 0, 14, $noreg :: (load 2 from %ir.data16.i1)
+    t2TEQrr renamable $r0, renamable $r1, 14, $noreg, implicit-def $cpsr
+    t2Bcc %bb.5, 0, killed $cpsr
+
+  bb.2.while.body.a:
+    successors: %bb.5(0x04000000), %bb.3(0x7c000000)
+    liveins: $r0, $r1, $r2
+
+    renamable $r2 = t2LDRi12 killed renamable $r2, 0, 14, $noreg :: (load 4 from %ir.next.i2)
+    t2CMPri renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+    t2Bcc %bb.5, 0, killed $cpsr
+
+  bb.3.it.block:
+    successors: %bb.5(0x04000000), %bb.4(0x7c000000)
+    liveins: $r1, $r2
+
+    renamable $r0 = t2LDRi12 renamable $r2, 4, 14, $noreg :: (load 4 from %ir.info.i.1)
+    renamable $r0 = t2LDRHi12 killed renamable $r0, 0, 14, $noreg :: (load 2 from %ir.data16.i.13)
+    t2TEQrr renamable $r0, renamable $r1, 14, $noreg, implicit-def $cpsr
+    t2Bcc %bb.5, 0, killed $cpsr
+
+  bb.4.while.body.end:
+    successors: %bb.5(0x04000000), %bb.1(0x7c000000)
+    liveins: $r0, $r1, $r2
+
+    renamable $r2 = t2LDRi12 killed renamable $r2, 0, 14, $noreg :: (load 4 from %ir.next.i.14)
+    t2CMPri renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+    t2Bcc %bb.1, 1, killed $cpsr
+
+  bb.5.exit:
+    liveins: $r0
+
+    tBX_RET 14, $noreg, implicit $r0
+
+...
diff --git a/test/CodeGen/Thumb2/thumb2-teq2.ll b/test/CodeGen/Thumb2/thumb2-teq2.ll
index 22bde87..1f37dde 100644
--- a/test/CodeGen/Thumb2/thumb2-teq2.ll
+++ b/test/CodeGen/Thumb2/thumb2-teq2.ll
@@ -5,7 +5,7 @@
 
 define i32 @f2(i32 %a, i32 %b) {
 ; CHECK: f2
-; CHECK: teq.w {{.*}}, r1
+; CHECK: eors {{.*}}, r1
     %tmp = xor i32 %a, %b
     %tmp1 = icmp eq i32 %tmp, 0
     %ret = select i1 %tmp1, i32 42, i32 24
@@ -14,7 +14,7 @@
 
 define i32 @f4(i32 %a, i32 %b) {
 ; CHECK: f4
-; CHECK: teq.w  {{.*}}, r1
+; CHECK: eors  {{.*}}, r1
     %tmp = xor i32 %a, %b
     %tmp1 = icmp eq i32 0, %tmp
     %ret = select i1 %tmp1, i32 42, i32 24
diff --git a/test/CodeGen/WebAssembly/PR40172.ll b/test/CodeGen/WebAssembly/PR40172.ll
new file mode 100644
index 0000000..e0ad5be
--- /dev/null
+++ b/test/CodeGen/WebAssembly/PR40172.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s -O0 -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck %s
+
+; Regression test for bug 40172. The problem was that FastISel assumed
+; that CmpInst results did not need to be zero extended because
+; WebAssembly's compare instructions always return 0 or 1. But in this
+; test case FastISel falls back to DAG ISel, which combines away the
+; comparison, invalidating FastISel's assumption.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+; CHECK:  i32.sub $[[BASE:[0-9]+]]=,
+; CHECK:  local.copy $[[ARG:[0-9]+]]=, $0{{$}}
+; CHECK:  i32.const $[[A0:[0-9]+]]=, 1{{$}}
+; CHECK:  i32.and $[[A1:[0-9]+]]=, $[[ARG]], $[[A0]]{{$}}
+; CHECK:  i32.store8 8($[[BASE]]), $[[A1]]{{$}}
+
+define void @test(i8 %byte) {
+  %t = alloca { i8, i8 }, align 1
+  %x4 = and i8 %byte, 1
+  %x5 = icmp eq i8 %x4, 1
+  %x6 = and i8 %byte, 2
+  %x7 = icmp eq i8 %x6, 2
+  %x8 = bitcast { i8, i8 }* %t to i8*
+  %x9 = zext i1 %x5 to i8
+  store i8 %x9, i8* %x8, align 1
+  %x10 = getelementptr inbounds { i8, i8 }, { i8, i8 }* %t, i32 0, i32 1
+  %x11 = zext i1 %x7 to i8
+  store i8 %x11, i8* %x10, align 1
+  ret void
+}
diff --git a/test/CodeGen/WebAssembly/atomic-rmw.ll b/test/CodeGen/WebAssembly/atomic-rmw.ll
index d4e6680..27b3be0 100644
--- a/test/CodeGen/WebAssembly/atomic-rmw.ll
+++ b/test/CodeGen/WebAssembly/atomic-rmw.ll
@@ -276,7 +276,7 @@
 
 ; CHECK-LABEL: add_sext_i8_i32:
 ; CHECK-NEXT: .functype add_sext_i8_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw8_u.add $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @add_sext_i8_i32(i8* %p, i32 %v) {
@@ -288,7 +288,7 @@
 
 ; CHECK-LABEL: add_sext_i16_i32:
 ; CHECK-NEXT: .functype add_sext_i16_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw16_u.add $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw16.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @add_sext_i16_i32(i16* %p, i32 %v) {
@@ -300,7 +300,7 @@
 
 ; CHECK-LABEL: add_sext_i8_i64:
 ; CHECK-NEXT: .functype add_sext_i8_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw8_u.add $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw8.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @add_sext_i8_i64(i8* %p, i64 %v) {
@@ -312,7 +312,7 @@
 
 ; CHECK-LABEL: add_sext_i16_i64:
 ; CHECK-NEXT: .functype add_sext_i16_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw16_u.add $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw16.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @add_sext_i16_i64(i16* %p, i64 %v) {
@@ -322,12 +322,12 @@
   ret i64 %e
 }
 
-; 32->64 sext rmw gets selected as i32.atomic.rmw.add, i64_extend_s/i32
+; 32->64 sext rmw gets selected as i32.atomic.rmw.add, i64.extend_i32_s
 ; CHECK-LABEL: add_sext_i32_i64:
 ; CHECK-NEXT: .functype add_sext_i32_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i32.wrap/i64 $push0=, $1{{$}}
+; CHECK: i32.wrap_i64 $push0=, $1{{$}}
 ; CHECK: i32.atomic.rmw.add $push1=, 0($0), $pop0{{$}}
-; CHECK-NEXT: i64.extend_s/i32 $push2=, $pop1{{$}}
+; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
 ; CHECK-NEXT: return $pop2{{$}}
 define i64 @add_sext_i32_i64(i32* %p, i64 %v) {
   %t = trunc i64 %v to i32
@@ -340,7 +340,7 @@
 
 ; CHECK-LABEL: sub_sext_i8_i32:
 ; CHECK-NEXT: .functype sub_sext_i8_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw8_u.sub $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @sub_sext_i8_i32(i8* %p, i32 %v) {
@@ -352,7 +352,7 @@
 
 ; CHECK-LABEL: sub_sext_i16_i32:
 ; CHECK-NEXT: .functype sub_sext_i16_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw16_u.sub $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw16.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @sub_sext_i16_i32(i16* %p, i32 %v) {
@@ -364,7 +364,7 @@
 
 ; CHECK-LABEL: sub_sext_i8_i64:
 ; CHECK-NEXT: .functype sub_sext_i8_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw8_u.sub $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw8.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @sub_sext_i8_i64(i8* %p, i64 %v) {
@@ -376,7 +376,7 @@
 
 ; CHECK-LABEL: sub_sext_i16_i64:
 ; CHECK-NEXT: .functype sub_sext_i16_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw16_u.sub $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw16.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @sub_sext_i16_i64(i16* %p, i64 %v) {
@@ -386,12 +386,12 @@
   ret i64 %e
 }
 
-; 32->64 sext rmw gets selected as i32.atomic.rmw.sub, i64_extend_s/i32
+; 32->64 sext rmw gets selected as i32.atomic.rmw.sub, i64.extend_i32_s
 ; CHECK-LABEL: sub_sext_i32_i64:
 ; CHECK-NEXT: .functype sub_sext_i32_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i32.wrap/i64 $push0=, $1
+; CHECK: i32.wrap_i64 $push0=, $1
 ; CHECK: i32.atomic.rmw.sub $push1=, 0($0), $pop0{{$}}
-; CHECK-NEXT: i64.extend_s/i32 $push2=, $pop1{{$}}
+; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
 ; CHECK-NEXT: return $pop2{{$}}
 define i64 @sub_sext_i32_i64(i32* %p, i64 %v) {
   %t = trunc i64 %v to i32
@@ -404,7 +404,7 @@
 
 ; CHECK-LABEL: and_sext_i8_i32:
 ; CHECK-NEXT: .functype and_sext_i8_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw8_u.and $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @and_sext_i8_i32(i8* %p, i32 %v) {
@@ -416,7 +416,7 @@
 
 ; CHECK-LABEL: and_sext_i16_i32:
 ; CHECK-NEXT: .functype and_sext_i16_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw16_u.and $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw16.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @and_sext_i16_i32(i16* %p, i32 %v) {
@@ -428,7 +428,7 @@
 
 ; CHECK-LABEL: and_sext_i8_i64:
 ; CHECK-NEXT: .functype and_sext_i8_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw8_u.and $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw8.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @and_sext_i8_i64(i8* %p, i64 %v) {
@@ -440,7 +440,7 @@
 
 ; CHECK-LABEL: and_sext_i16_i64:
 ; CHECK-NEXT: .functype and_sext_i16_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw16_u.and $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw16.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @and_sext_i16_i64(i16* %p, i64 %v) {
@@ -450,12 +450,12 @@
   ret i64 %e
 }
 
-; 32->64 sext rmw gets selected as i32.atomic.rmw.and, i64_extend_s/i32
+; 32->64 sext rmw gets selected as i32.atomic.rmw.and, i64.extend_i32_s
 ; CHECK-LABEL: and_sext_i32_i64:
 ; CHECK-NEXT: .functype and_sext_i32_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i32.wrap/i64 $push0=, $1{{$}}
+; CHECK: i32.wrap_i64 $push0=, $1{{$}}
 ; CHECK: i32.atomic.rmw.and $push1=, 0($0), $pop0{{$}}
-; CHECK-NEXT: i64.extend_s/i32 $push2=, $pop1{{$}}
+; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
 ; CHECK-NEXT: return $pop2{{$}}
 define i64 @and_sext_i32_i64(i32* %p, i64 %v) {
   %t = trunc i64 %v to i32
@@ -468,7 +468,7 @@
 
 ; CHECK-LABEL: or_sext_i8_i32:
 ; CHECK-NEXT: .functype or_sext_i8_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw8_u.or $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @or_sext_i8_i32(i8* %p, i32 %v) {
@@ -480,7 +480,7 @@
 
 ; CHECK-LABEL: or_sext_i16_i32:
 ; CHECK-NEXT: .functype or_sext_i16_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw16_u.or $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw16.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @or_sext_i16_i32(i16* %p, i32 %v) {
@@ -492,7 +492,7 @@
 
 ; CHECK-LABEL: or_sext_i8_i64:
 ; CHECK-NEXT: .functype or_sext_i8_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw8_u.or $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw8.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @or_sext_i8_i64(i8* %p, i64 %v) {
@@ -504,7 +504,7 @@
 
 ; CHECK-LABEL: or_sext_i16_i64:
 ; CHECK-NEXT: .functype or_sext_i16_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw16_u.or $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw16.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @or_sext_i16_i64(i16* %p, i64 %v) {
@@ -514,12 +514,12 @@
   ret i64 %e
 }
 
-; 32->64 sext rmw gets selected as i32.atomic.rmw.or, i64_extend_s/i32
+; 32->64 sext rmw gets selected as i32.atomic.rmw.or, i64.extend_i32_s
 ; CHECK-LABEL: or_sext_i32_i64:
 ; CHECK-NEXT: .functype or_sext_i32_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i32.wrap/i64 $push0=, $1{{$}}
+; CHECK: i32.wrap_i64 $push0=, $1{{$}}
 ; CHECK: i32.atomic.rmw.or $push1=, 0($0), $pop0{{$}}
-; CHECK-NEXT: i64.extend_s/i32 $push2=, $pop1{{$}}
+; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
 ; CHECK-NEXT: return $pop2{{$}}
 define i64 @or_sext_i32_i64(i32* %p, i64 %v) {
   %t = trunc i64 %v to i32
@@ -532,7 +532,7 @@
 
 ; CHECK-LABEL: xor_sext_i8_i32:
 ; CHECK-NEXT: .functype xor_sext_i8_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw8_u.xor $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @xor_sext_i8_i32(i8* %p, i32 %v) {
@@ -544,7 +544,7 @@
 
 ; CHECK-LABEL: xor_sext_i16_i32:
 ; CHECK-NEXT: .functype xor_sext_i16_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw16_u.xor $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw16.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @xor_sext_i16_i32(i16* %p, i32 %v) {
@@ -556,7 +556,7 @@
 
 ; CHECK-LABEL: xor_sext_i8_i64:
 ; CHECK-NEXT: .functype xor_sext_i8_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw8_u.xor $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw8.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @xor_sext_i8_i64(i8* %p, i64 %v) {
@@ -568,7 +568,7 @@
 
 ; CHECK-LABEL: xor_sext_i16_i64:
 ; CHECK-NEXT: .functype xor_sext_i16_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw16_u.xor $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw16.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @xor_sext_i16_i64(i16* %p, i64 %v) {
@@ -578,12 +578,12 @@
   ret i64 %e
 }
 
-; 32->64 sext rmw gets selected as i32.atomic.rmw.xor, i64_extend_s/i32
+; 32->64 sext rmw gets selected as i32.atomic.rmw.xor, i64.extend_i32_s
 ; CHECK-LABEL: xor_sext_i32_i64:
 ; CHECK-NEXT: .functype xor_sext_i32_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i32.wrap/i64 $push0=, $1{{$}}
+; CHECK: i32.wrap_i64 $push0=, $1{{$}}
 ; CHECK: i32.atomic.rmw.xor $push1=, 0($0), $pop0{{$}}
-; CHECK-NEXT: i64.extend_s/i32 $push2=, $pop1{{$}}
+; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
 ; CHECK-NEXT: return $pop2{{$}}
 define i64 @xor_sext_i32_i64(i32* %p, i64 %v) {
   %t = trunc i64 %v to i32
@@ -596,7 +596,7 @@
 
 ; CHECK-LABEL: xchg_sext_i8_i32:
 ; CHECK-NEXT: .functype xchg_sext_i8_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw8_u.xchg $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @xchg_sext_i8_i32(i8* %p, i32 %v) {
@@ -608,7 +608,7 @@
 
 ; CHECK-LABEL: xchg_sext_i16_i32:
 ; CHECK-NEXT: .functype xchg_sext_i16_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw16_u.xchg $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw16.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @xchg_sext_i16_i32(i16* %p, i32 %v) {
@@ -620,7 +620,7 @@
 
 ; CHECK-LABEL: xchg_sext_i8_i64:
 ; CHECK-NEXT: .functype xchg_sext_i8_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw8_u.xchg $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw8.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @xchg_sext_i8_i64(i8* %p, i64 %v) {
@@ -632,7 +632,7 @@
 
 ; CHECK-LABEL: xchg_sext_i16_i64:
 ; CHECK-NEXT: .functype xchg_sext_i16_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw16_u.xchg $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw16.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @xchg_sext_i16_i64(i16* %p, i64 %v) {
@@ -642,12 +642,12 @@
   ret i64 %e
 }
 
-; 32->64 sext rmw gets selected as i32.atomic.rmw.xchg, i64_extend_s/i32
+; 32->64 sext rmw gets selected as i32.atomic.rmw.xchg, i64.extend_i32_s
 ; CHECK-LABEL: xchg_sext_i32_i64:
 ; CHECK-NEXT: .functype xchg_sext_i32_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i32.wrap/i64 $push0=, $1{{$}}
+; CHECK: i32.wrap_i64 $push0=, $1{{$}}
 ; CHECK: i32.atomic.rmw.xchg $push1=, 0($0), $pop0{{$}}
-; CHECK-NEXT: i64.extend_s/i32 $push2=, $pop1{{$}}
+; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
 ; CHECK-NEXT: return $pop2{{$}}
 define i64 @xchg_sext_i32_i64(i32* %p, i64 %v) {
   %t = trunc i64 %v to i32
@@ -660,7 +660,7 @@
 
 ; CHECK-LABEL: cmpxchg_sext_i8_i32:
 ; CHECK-NEXT: .functype cmpxchg_sext_i8_i32 (i32, i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw8_u.cmpxchg $push0=, 0($0), $1, $2{{$}}
+; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @cmpxchg_sext_i8_i32(i8* %p, i32 %exp, i32 %new) {
@@ -674,7 +674,7 @@
 
 ; CHECK-LABEL: cmpxchg_sext_i16_i32:
 ; CHECK-NEXT: .functype cmpxchg_sext_i16_i32 (i32, i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw16_u.cmpxchg $push0=, 0($0), $1, $2{{$}}
+; CHECK: i32.atomic.rmw16.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @cmpxchg_sext_i16_i32(i16* %p, i32 %exp, i32 %new) {
@@ -688,7 +688,7 @@
 
 ; CHECK-LABEL: cmpxchg_sext_i8_i64:
 ; CHECK-NEXT: .functype cmpxchg_sext_i8_i64 (i32, i64, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw8_u.cmpxchg $push0=, 0($0), $1, $2{{$}}
+; CHECK: i64.atomic.rmw8.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @cmpxchg_sext_i8_i64(i8* %p, i64 %exp, i64 %new) {
@@ -702,7 +702,7 @@
 
 ; CHECK-LABEL: cmpxchg_sext_i16_i64:
 ; CHECK-NEXT: .functype cmpxchg_sext_i16_i64 (i32, i64, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw16_u.cmpxchg $push0=, 0($0), $1, $2{{$}}
+; CHECK: i64.atomic.rmw16.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @cmpxchg_sext_i16_i64(i16* %p, i64 %exp, i64 %new) {
@@ -714,13 +714,13 @@
   ret i64 %e
 }
 
-; 32->64 sext rmw gets selected as i32.atomic.rmw.cmpxchg, i64_extend_s/i32
+; 32->64 sext rmw gets selected as i32.atomic.rmw.cmpxchg, i64.extend_i32_s
 ; CHECK-LABEL: cmpxchg_sext_i32_i64:
 ; CHECK-NEXT: .functype cmpxchg_sext_i32_i64 (i32, i64, i64) -> (i64){{$}}
-; CHECK: i32.wrap/i64 $push1=, $1{{$}}
-; CHECK-NEXT: i32.wrap/i64 $push0=, $2{{$}}
+; CHECK: i32.wrap_i64 $push1=, $1{{$}}
+; CHECK-NEXT: i32.wrap_i64 $push0=, $2{{$}}
 ; CHECK-NEXT: i32.atomic.rmw.cmpxchg $push2=, 0($0), $pop1, $pop0{{$}}
-; CHECK-NEXT: i64.extend_s/i32 $push3=, $pop2{{$}}
+; CHECK-NEXT: i64.extend_i32_s $push3=, $pop2{{$}}
 ; CHECK-NEXT: return $pop3{{$}}
 define i64 @cmpxchg_sext_i32_i64(i32* %p, i64 %exp, i64 %new) {
   %exp_t = trunc i64 %exp to i32
@@ -739,7 +739,7 @@
 ; CHECK-LABEL: nand_sext_i8_i32:
 ; CHECK-NEXT: .functype nand_sext_i8_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: loop
-; CHECK: i32.atomic.rmw8_u.cmpxchg
+; CHECK: i32.atomic.rmw8.cmpxchg_u
 ; CHECK: i32.extend8_s
 define i32 @nand_sext_i8_i32(i8* %p, i32 %v) {
   %t = trunc i32 %v to i8
@@ -751,7 +751,7 @@
 ; CHECK-LABEL: nand_sext_i16_i32:
 ; CHECK-NEXT: .functype nand_sext_i16_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: loop
-; CHECK: i32.atomic.rmw16_u.cmpxchg
+; CHECK: i32.atomic.rmw16.cmpxchg_u
 ; CHECK: i32.extend16_s
 define i32 @nand_sext_i16_i32(i16* %p, i32 %v) {
   %t = trunc i32 %v to i16
@@ -760,12 +760,12 @@
   ret i32 %e
 }
 
-; FIXME Currently this cannot make use of i64.atomic.rmw8_u.cmpxchg
+; FIXME Currently this cannot make use of i64.atomic.rmw8.cmpxchg_u
 ; CHECK-LABEL: nand_sext_i8_i64:
 ; CHECK-NEXT: .functype nand_sext_i8_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: loop
-; CHECK: i32.atomic.rmw8_u.cmpxchg
-; CHECK: i64.extend_u/i32
+; CHECK: i32.atomic.rmw8.cmpxchg_u
+; CHECK: i64.extend_i32_u
 ; CHECK: i64.extend8_s
 define i64 @nand_sext_i8_i64(i8* %p, i64 %v) {
   %t = trunc i64 %v to i8
@@ -774,12 +774,12 @@
   ret i64 %e
 }
 
-; FIXME Currently this cannot make use of i64.atomic.rmw16_u.cmpxchg
+; FIXME Currently this cannot make use of i64.atomic.rmw16.cmpxchg_u
 ; CHECK-LABEL: nand_sext_i16_i64:
 ; CHECK-NEXT: .functype nand_sext_i16_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: loop
-; CHECK: i32.atomic.rmw16_u.cmpxchg
-; CHECK: i64.extend_u/i32
+; CHECK: i32.atomic.rmw16.cmpxchg_u
+; CHECK: i64.extend_i32_u
 ; CHECK: i64.extend16_s
 define i64 @nand_sext_i16_i64(i16* %p, i64 %v) {
   %t = trunc i64 %v to i16
@@ -788,12 +788,12 @@
   ret i64 %e
 }
 
-; 32->64 sext rmw gets selected as i32.atomic.rmw.nand, i64_extend_s/i32
+; 32->64 sext rmw gets selected as i32.atomic.rmw.nand, i64.extend_i32_s
 ; CHECK-LABEL: nand_sext_i32_i64:
 ; CHECK-NEXT: .functype nand_sext_i32_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: loop
 ; CHECK: i32.atomic.rmw.cmpxchg
-; CHECK: i64.extend_s/i32
+; CHECK: i64.extend_i32_s
 define i64 @nand_sext_i32_i64(i32* %p, i64 %v) {
   %t = trunc i64 %v to i32
   %old = atomicrmw nand i32* %p, i32 %t seq_cst
@@ -809,7 +809,7 @@
 
 ; CHECK-LABEL: add_zext_i8_i32:
 ; CHECK-NEXT: .functype add_zext_i8_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw8_u.add $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @add_zext_i8_i32(i8* %p, i32 %v) {
   %t = trunc i32 %v to i8
@@ -820,7 +820,7 @@
 
 ; CHECK-LABEL: add_zext_i16_i32:
 ; CHECK-NEXT: .functype add_zext_i16_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw16_u.add $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw16.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @add_zext_i16_i32(i16* %p, i32 %v) {
   %t = trunc i32 %v to i16
@@ -831,7 +831,7 @@
 
 ; CHECK-LABEL: add_zext_i8_i64:
 ; CHECK-NEXT: .functype add_zext_i8_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw8_u.add $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw8.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @add_zext_i8_i64(i8* %p, i64 %v) {
   %t = trunc i64 %v to i8
@@ -842,7 +842,7 @@
 
 ; CHECK-LABEL: add_zext_i16_i64:
 ; CHECK-NEXT: .functype add_zext_i16_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw16_u.add $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw16.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @add_zext_i16_i64(i16* %p, i64 %v) {
   %t = trunc i64 %v to i16
@@ -853,7 +853,7 @@
 
 ; CHECK-LABEL: add_zext_i32_i64:
 ; CHECK-NEXT: .functype add_zext_i32_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw32_u.add $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw32.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @add_zext_i32_i64(i32* %p, i64 %v) {
   %t = trunc i64 %v to i32
@@ -866,7 +866,7 @@
 
 ; CHECK-LABEL: sub_zext_i8_i32:
 ; CHECK-NEXT: .functype sub_zext_i8_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw8_u.sub $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @sub_zext_i8_i32(i8* %p, i32 %v) {
   %t = trunc i32 %v to i8
@@ -877,7 +877,7 @@
 
 ; CHECK-LABEL: sub_zext_i16_i32:
 ; CHECK-NEXT: .functype sub_zext_i16_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw16_u.sub $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw16.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @sub_zext_i16_i32(i16* %p, i32 %v) {
   %t = trunc i32 %v to i16
@@ -888,7 +888,7 @@
 
 ; CHECK-LABEL: sub_zext_i8_i64:
 ; CHECK-NEXT: .functype sub_zext_i8_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw8_u.sub $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw8.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @sub_zext_i8_i64(i8* %p, i64 %v) {
   %t = trunc i64 %v to i8
@@ -899,7 +899,7 @@
 
 ; CHECK-LABEL: sub_zext_i16_i64:
 ; CHECK-NEXT: .functype sub_zext_i16_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw16_u.sub $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw16.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @sub_zext_i16_i64(i16* %p, i64 %v) {
   %t = trunc i64 %v to i16
@@ -910,7 +910,7 @@
 
 ; CHECK-LABEL: sub_zext_i32_i64:
 ; CHECK-NEXT: .functype sub_zext_i32_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw32_u.sub $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw32.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @sub_zext_i32_i64(i32* %p, i64 %v) {
   %t = trunc i64 %v to i32
@@ -923,7 +923,7 @@
 
 ; CHECK-LABEL: and_zext_i8_i32:
 ; CHECK-NEXT: .functype and_zext_i8_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw8_u.and $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @and_zext_i8_i32(i8* %p, i32 %v) {
   %t = trunc i32 %v to i8
@@ -934,7 +934,7 @@
 
 ; CHECK-LABEL: and_zext_i16_i32:
 ; CHECK-NEXT: .functype and_zext_i16_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw16_u.and $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw16.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @and_zext_i16_i32(i16* %p, i32 %v) {
   %t = trunc i32 %v to i16
@@ -945,7 +945,7 @@
 
 ; CHECK-LABEL: and_zext_i8_i64:
 ; CHECK-NEXT: .functype and_zext_i8_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw8_u.and $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw8.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @and_zext_i8_i64(i8* %p, i64 %v) {
   %t = trunc i64 %v to i8
@@ -956,7 +956,7 @@
 
 ; CHECK-LABEL: and_zext_i16_i64:
 ; CHECK-NEXT: .functype and_zext_i16_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw16_u.and $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw16.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @and_zext_i16_i64(i16* %p, i64 %v) {
   %t = trunc i64 %v to i16
@@ -967,7 +967,7 @@
 
 ; CHECK-LABEL: and_zext_i32_i64:
 ; CHECK-NEXT: .functype and_zext_i32_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw32_u.and $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw32.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @and_zext_i32_i64(i32* %p, i64 %v) {
   %t = trunc i64 %v to i32
@@ -980,7 +980,7 @@
 
 ; CHECK-LABEL: or_zext_i8_i32:
 ; CHECK-NEXT: .functype or_zext_i8_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw8_u.or $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @or_zext_i8_i32(i8* %p, i32 %v) {
   %t = trunc i32 %v to i8
@@ -991,7 +991,7 @@
 
 ; CHECK-LABEL: or_zext_i16_i32:
 ; CHECK-NEXT: .functype or_zext_i16_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw16_u.or $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw16.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @or_zext_i16_i32(i16* %p, i32 %v) {
   %t = trunc i32 %v to i16
@@ -1002,7 +1002,7 @@
 
 ; CHECK-LABEL: or_zext_i8_i64:
 ; CHECK-NEXT: .functype or_zext_i8_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw8_u.or $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw8.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @or_zext_i8_i64(i8* %p, i64 %v) {
   %t = trunc i64 %v to i8
@@ -1013,7 +1013,7 @@
 
 ; CHECK-LABEL: or_zext_i16_i64:
 ; CHECK-NEXT: .functype or_zext_i16_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw16_u.or $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw16.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @or_zext_i16_i64(i16* %p, i64 %v) {
   %t = trunc i64 %v to i16
@@ -1024,7 +1024,7 @@
 
 ; CHECK-LABEL: or_zext_i32_i64:
 ; CHECK-NEXT: .functype or_zext_i32_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw32_u.or $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw32.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @or_zext_i32_i64(i32* %p, i64 %v) {
   %t = trunc i64 %v to i32
@@ -1037,7 +1037,7 @@
 
 ; CHECK-LABEL: xor_zext_i8_i32:
 ; CHECK-NEXT: .functype xor_zext_i8_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw8_u.xor $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @xor_zext_i8_i32(i8* %p, i32 %v) {
   %t = trunc i32 %v to i8
@@ -1048,7 +1048,7 @@
 
 ; CHECK-LABEL: xor_zext_i16_i32:
 ; CHECK-NEXT: .functype xor_zext_i16_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw16_u.xor $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw16.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @xor_zext_i16_i32(i16* %p, i32 %v) {
   %t = trunc i32 %v to i16
@@ -1059,7 +1059,7 @@
 
 ; CHECK-LABEL: xor_zext_i8_i64:
 ; CHECK-NEXT: .functype xor_zext_i8_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw8_u.xor $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw8.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @xor_zext_i8_i64(i8* %p, i64 %v) {
   %t = trunc i64 %v to i8
@@ -1070,7 +1070,7 @@
 
 ; CHECK-LABEL: xor_zext_i16_i64:
 ; CHECK-NEXT: .functype xor_zext_i16_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw16_u.xor $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw16.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @xor_zext_i16_i64(i16* %p, i64 %v) {
   %t = trunc i64 %v to i16
@@ -1081,7 +1081,7 @@
 
 ; CHECK-LABEL: xor_zext_i32_i64:
 ; CHECK-NEXT: .functype xor_zext_i32_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw32_u.xor $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw32.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @xor_zext_i32_i64(i32* %p, i64 %v) {
   %t = trunc i64 %v to i32
@@ -1094,7 +1094,7 @@
 
 ; CHECK-LABEL: xchg_zext_i8_i32:
 ; CHECK-NEXT: .functype xchg_zext_i8_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw8_u.xchg $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @xchg_zext_i8_i32(i8* %p, i32 %v) {
   %t = trunc i32 %v to i8
@@ -1105,7 +1105,7 @@
 
 ; CHECK-LABEL: xchg_zext_i16_i32:
 ; CHECK-NEXT: .functype xchg_zext_i16_i32 (i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw16_u.xchg $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw16.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @xchg_zext_i16_i32(i16* %p, i32 %v) {
   %t = trunc i32 %v to i16
@@ -1116,7 +1116,7 @@
 
 ; CHECK-LABEL: xchg_zext_i8_i64:
 ; CHECK-NEXT: .functype xchg_zext_i8_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw8_u.xchg $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw8.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @xchg_zext_i8_i64(i8* %p, i64 %v) {
   %t = trunc i64 %v to i8
@@ -1127,7 +1127,7 @@
 
 ; CHECK-LABEL: xchg_zext_i16_i64:
 ; CHECK-NEXT: .functype xchg_zext_i16_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw16_u.xchg $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw16.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @xchg_zext_i16_i64(i16* %p, i64 %v) {
   %t = trunc i64 %v to i16
@@ -1138,7 +1138,7 @@
 
 ; CHECK-LABEL: xchg_zext_i32_i64:
 ; CHECK-NEXT: .functype xchg_zext_i32_i64 (i32, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw32_u.xchg $push0=, 0($0), $1{{$}}
+; CHECK: i64.atomic.rmw32.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @xchg_zext_i32_i64(i32* %p, i64 %v) {
   %t = trunc i64 %v to i32
@@ -1151,7 +1151,7 @@
 
 ; CHECK-LABEL: cmpxchg_zext_i8_i32:
 ; CHECK-NEXT: .functype cmpxchg_zext_i8_i32 (i32, i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw8_u.cmpxchg $push0=, 0($0), $1, $2{{$}}
+; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @cmpxchg_zext_i8_i32(i8* %p, i32 %exp, i32 %new) {
   %exp_t = trunc i32 %exp to i8
@@ -1164,7 +1164,7 @@
 
 ; CHECK-LABEL: cmpxchg_zext_i16_i32:
 ; CHECK-NEXT: .functype cmpxchg_zext_i16_i32 (i32, i32, i32) -> (i32){{$}}
-; CHECK: i32.atomic.rmw16_u.cmpxchg $push0=, 0($0), $1, $2{{$}}
+; CHECK: i32.atomic.rmw16.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @cmpxchg_zext_i16_i32(i16* %p, i32 %exp, i32 %new) {
   %exp_t = trunc i32 %exp to i16
@@ -1177,7 +1177,7 @@
 
 ; CHECK-LABEL: cmpxchg_zext_i8_i64:
 ; CHECK-NEXT: .functype cmpxchg_zext_i8_i64 (i32, i64, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw8_u.cmpxchg $push0=, 0($0), $1, $2{{$}}
+; CHECK: i64.atomic.rmw8.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @cmpxchg_zext_i8_i64(i8* %p, i64 %exp, i64 %new) {
   %exp_t = trunc i64 %exp to i8
@@ -1190,7 +1190,7 @@
 
 ; CHECK-LABEL: cmpxchg_zext_i16_i64:
 ; CHECK-NEXT: .functype cmpxchg_zext_i16_i64 (i32, i64, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw16_u.cmpxchg $push0=, 0($0), $1, $2{{$}}
+; CHECK: i64.atomic.rmw16.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @cmpxchg_zext_i16_i64(i16* %p, i64 %exp, i64 %new) {
   %exp_t = trunc i64 %exp to i16
@@ -1203,7 +1203,7 @@
 
 ; CHECK-LABEL: cmpxchg_zext_i32_i64:
 ; CHECK-NEXT: .functype cmpxchg_zext_i32_i64 (i32, i64, i64) -> (i64){{$}}
-; CHECK: i64.atomic.rmw32_u.cmpxchg $push0=, 0($0), $1, $2{{$}}
+; CHECK: i64.atomic.rmw32.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @cmpxchg_zext_i32_i64(i32* %p, i64 %exp, i64 %new) {
   %exp_t = trunc i64 %exp to i32
@@ -1222,7 +1222,7 @@
 ; CHECK-LABEL: nand_zext_i8_i32:
 ; CHECK-NEXT: .functype nand_zext_i8_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: loop
-; CHECK: i32.atomic.rmw8_u.cmpxchg
+; CHECK: i32.atomic.rmw8.cmpxchg_u
 define i32 @nand_zext_i8_i32(i8* %p, i32 %v) {
   %t = trunc i32 %v to i8
   %old = atomicrmw nand i8* %p, i8 %t seq_cst
@@ -1233,7 +1233,7 @@
 ; CHECK-LABEL: nand_zext_i16_i32:
 ; CHECK-NEXT: .functype nand_zext_i16_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: loop
-; CHECK: i32.atomic.rmw16_u.cmpxchg
+; CHECK: i32.atomic.rmw16.cmpxchg_u
 define i32 @nand_zext_i16_i32(i16* %p, i32 %v) {
   %t = trunc i32 %v to i16
   %old = atomicrmw nand i16* %p, i16 %t seq_cst
@@ -1241,12 +1241,12 @@
   ret i32 %e
 }
 
-; FIXME Currently this cannot make use of i64.atomic.rmw8_u.cmpxchg
+; FIXME Currently this cannot make use of i64.atomic.rmw8.cmpxchg_u
 ; CHECK-LABEL: nand_zext_i8_i64:
 ; CHECK-NEXT: .functype nand_zext_i8_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: loop
-; CHECK: i32.atomic.rmw8_u.cmpxchg
-; CHECK: i64.extend_u/i32
+; CHECK: i32.atomic.rmw8.cmpxchg_u
+; CHECK: i64.extend_i32_u
 define i64 @nand_zext_i8_i64(i8* %p, i64 %v) {
   %t = trunc i64 %v to i8
   %old = atomicrmw nand i8* %p, i8 %t seq_cst
@@ -1254,12 +1254,12 @@
   ret i64 %e
 }
 
-; FIXME Currently this cannot make use of i64.atomic.rmw16_u.cmpxchg
+; FIXME Currently this cannot make use of i64.atomic.rmw16.cmpxchg_u
 ; CHECK-LABEL: nand_zext_i16_i64:
 ; CHECK-NEXT: .functype nand_zext_i16_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: loop
-; CHECK: i32.atomic.rmw16_u.cmpxchg
-; CHECK: i64.extend_u/i32
+; CHECK: i32.atomic.rmw16.cmpxchg_u
+; CHECK: i64.extend_i32_u
 define i64 @nand_zext_i16_i64(i16* %p, i64 %v) {
   %t = trunc i64 %v to i16
   %old = atomicrmw nand i16* %p, i16 %t seq_cst
@@ -1267,12 +1267,12 @@
   ret i64 %e
 }
 
-; FIXME Currently this cannot make use of i64.atomic.rmw32_u.cmpxchg
+; FIXME Currently this cannot make use of i64.atomic.rmw32.cmpxchg_u
 ; CHECK-LABEL: nand_zext_i32_i64:
 ; CHECK-NEXT: .functype nand_zext_i32_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: loop
 ; CHECK: i32.atomic.rmw.cmpxchg
-; CHECK: i64.extend_u/i32
+; CHECK: i64.extend_i32_u
 define i64 @nand_zext_i32_i64(i32* %p, i64 %v) {
   %t = trunc i64 %v to i32
   %old = atomicrmw nand i32* %p, i32 %t seq_cst
diff --git a/test/CodeGen/WebAssembly/byval.ll b/test/CodeGen/WebAssembly/byval.ll
index 0caa720..68f86e4 100644
--- a/test/CodeGen/WebAssembly/byval.ll
+++ b/test/CodeGen/WebAssembly/byval.ll
@@ -21,12 +21,12 @@
 define void @byval_arg(%SmallStruct* %ptr) {
  ; CHECK: .functype byval_arg (i32) -> ()
  ; Subtract 16 from SP (SP is 16-byte aligned)
- ; CHECK-NEXT: get_global $push[[L2:.+]]=, __stack_pointer@GLOBAL
+ ; CHECK-NEXT: global.get $push[[L2:.+]]=, __stack_pointer@GLOBAL
  ; CHECK-NEXT: i32.const $push[[L3:.+]]=, 16
  ; CHECK-NEXT: i32.sub $push[[L11:.+]]=, $pop[[L2]], $pop[[L3]]
  ; Ensure SP is stored back before the call
- ; CHECK-NEXT: tee_local $push[[L10:.+]]=, $[[SP:.+]]=, $pop[[L11]]{{$}}
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L10]]{{$}}
+ ; CHECK-NEXT: local.tee $push[[L10:.+]]=, $[[SP:.+]]=, $pop[[L11]]{{$}}
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L10]]{{$}}
  ; Copy the SmallStruct argument to the stack (SP+12, original SP-4)
  ; CHECK-NEXT: i32.load $push[[L0:.+]]=, 0($0)
  ; CHECK-NEXT: i32.store 12($[[SP]]), $pop[[L0]]
@@ -38,7 +38,7 @@
  ; Restore the stack
  ; CHECK-NEXT: i32.const $push[[L6:.+]]=, 16
  ; CHECK-NEXT: i32.add $push[[L8:.+]]=, $[[SP]], $pop[[L6]]
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L8]]
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L8]]
  ; CHECK-NEXT: return
  ret void
 }
@@ -49,8 +49,8 @@
  ; Don't check the entire SP sequence, just enough to get the alignment.
  ; CHECK: i32.const $push[[L1:.+]]=, 16
  ; CHECK-NEXT: i32.sub $push[[L11:.+]]=, {{.+}}, $pop[[L1]]
- ; CHECK-NEXT: tee_local $push[[L10:.+]]=, $[[SP:.+]]=, $pop[[L11]]{{$}}
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L10]]{{$}}
+ ; CHECK-NEXT: local.tee $push[[L10:.+]]=, $[[SP:.+]]=, $pop[[L11]]{{$}}
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L10]]{{$}}
  ; Copy the SmallStruct argument to the stack (SP+8, original SP-8)
  ; CHECK-NEXT: i32.load $push[[L0:.+]]=, 0($0){{$}}
  ; CHECK-NEXT: i32.store 8($[[SP]]), $pop[[L0]]{{$}}
@@ -68,8 +68,8 @@
  ; Subtract 16 from SP (SP is 16-byte aligned)
  ; CHECK: i32.const $push[[L1:.+]]=, 16
  ; CHECK-NEXT: i32.sub $push[[L14:.+]]=, {{.+}}, $pop[[L1]]
- ; CHECK-NEXT: tee_local $push[[L13:.+]]=, $[[SP:.+]]=, $pop[[L14]]
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L13]]
+ ; CHECK-NEXT: local.tee $push[[L13:.+]]=, $[[SP:.+]]=, $pop[[L14]]
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L13]]
  ; Copy the AlignedStruct argument to the stack (SP+0, original SP-16)
  ; Just check the last load/store pair of the memcpy
  ; CHECK: i64.load $push[[L4:.+]]=, 0($0)
@@ -107,14 +107,14 @@
 
 ; Call memcpy for "big" byvals.
 ; CHECK-LABEL: big_byval:
-; CHECK:      get_global $push[[L2:.+]]=, __stack_pointer@GLOBAL{{$}}
+; CHECK:      global.get $push[[L2:.+]]=, __stack_pointer@GLOBAL{{$}}
 ; CHECK-NEXT: i32.const $push[[L3:.+]]=, 131072
 ; CHECK-NEXT: i32.sub $push[[L11:.+]]=, $pop[[L2]], $pop[[L3]]
-; CHECK-NEXT: tee_local $push[[L10:.+]]=, $[[SP:.+]]=, $pop[[L11]]{{$}}
-; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L10]]{{$}}
+; CHECK-NEXT: local.tee $push[[L10:.+]]=, $[[SP:.+]]=, $pop[[L11]]{{$}}
+; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L10]]{{$}}
 ; CHECK-NEXT: i32.const $push[[L0:.+]]=, 131072
 ; CHECK-NEXT: i32.call       $push[[L11:.+]]=, memcpy@FUNCTION, $[[SP]], ${{.+}}, $pop{{.+}}
-; CHECK-NEXT: tee_local      $push[[L9:.+]]=, $[[SP:.+]]=, $pop[[L11]]{{$}}
+; CHECK-NEXT: local.tee      $push[[L9:.+]]=, $[[SP:.+]]=, $pop[[L11]]{{$}}
 ; CHECK-NEXT: call           big_byval_callee@FUNCTION,
 %big = type [131072 x i8]
 declare void @big_byval_callee(%big* byval align 1)
diff --git a/test/CodeGen/WebAssembly/call.ll b/test/CodeGen/WebAssembly/call.ll
index 3a84973..db666a6 100644
--- a/test/CodeGen/WebAssembly/call.ll
+++ b/test/CodeGen/WebAssembly/call.ll
@@ -71,7 +71,7 @@
 
 ; CHECK-LABEL: call_i32_unary:
 ; CHECK-NEXT: .functype call_i32_unary (i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} i32.call $push[[NUM:[0-9]+]]=, i32_unary@FUNCTION, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @call_i32_unary(i32 %a) {
@@ -81,8 +81,8 @@
 
 ; CHECK-LABEL: call_i32_binary:
 ; CHECK-NEXT: .functype call_i32_binary (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: {{^}} i32.call $push[[NUM:[0-9]+]]=, i32_binary@FUNCTION, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @call_i32_binary(i32 %a, i32 %b) {
@@ -92,7 +92,7 @@
 
 ; CHECK-LABEL: call_indirect_void:
 ; CHECK-NEXT: .functype call_indirect_void (i32) -> (){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} call_indirect $pop[[L0]]{{$}}
 ; CHECK-NEXT: return{{$}}
 define void @call_indirect_void(void ()* %callee) {
@@ -102,7 +102,7 @@
 
 ; CHECK-LABEL: call_indirect_i32:
 ; CHECK-NEXT: .functype call_indirect_i32 (i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} i32.call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @call_indirect_i32(i32 ()* %callee) {
@@ -112,7 +112,7 @@
 
 ; CHECK-LABEL: call_indirect_i64:
 ; CHECK-NEXT: .functype call_indirect_i64 (i32) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} i64.call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i64 @call_indirect_i64(i64 ()* %callee) {
@@ -122,7 +122,7 @@
 
 ; CHECK-LABEL: call_indirect_float:
 ; CHECK-NEXT: .functype call_indirect_float (i32) -> (f32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} f32.call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define float @call_indirect_float(float ()* %callee) {
@@ -132,7 +132,7 @@
 
 ; CHECK-LABEL: call_indirect_double:
 ; CHECK-NEXT: .functype call_indirect_double (i32) -> (f64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} f64.call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define double @call_indirect_double(double ()* %callee) {
@@ -142,7 +142,7 @@
 
 ; CHECK-LABEL: call_indirect_v128:
 ; CHECK-NEXT: .functype call_indirect_v128 (i32) -> (v128){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} v128.call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define <16 x i8> @call_indirect_v128(<16 x i8> ()* %callee) {
@@ -152,8 +152,8 @@
 
 ; CHECK-LABEL: call_indirect_arg:
 ; CHECK-NEXT: .functype call_indirect_arg (i32, i32) -> (){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 1{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} call_indirect $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return{{$}}
 define void @call_indirect_arg(void (i32)* %callee, i32 %arg) {
@@ -163,9 +163,9 @@
 
 ; CHECK-LABEL: call_indirect_arg_2:
 ; CHECK-NEXT: .functype call_indirect_arg_2 (i32, i32, i32) -> (){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 1{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 2{{$}}
-; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 2{{$}}
+; CHECK-NEXT: local.get $push[[L2:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} i32.call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]], $pop[[L2]]{{$}}
 ; CHECK-NEXT: drop $pop[[NUM]]{{$}}
 ; CHECK-NEXT: return{{$}}
diff --git a/test/CodeGen/WebAssembly/cfg-stackify-dbg-skip.ll b/test/CodeGen/WebAssembly/cfg-stackify-dbg-skip.ll
index 9077d7a..5a72534 100644
--- a/test/CodeGen/WebAssembly/cfg-stackify-dbg-skip.ll
+++ b/test/CodeGen/WebAssembly/cfg-stackify-dbg-skip.ll
@@ -7,7 +7,7 @@
 ; CHECK: body:
 ; CHECK: BLOCK
 ;                       <-- Stackified expression starts
-; CHECK-NEXT: GET_LOCAL_I64
+; CHECK-NEXT: LOCAL_GET_I64
 ; CHECK-NEXT: I32_WRAP_I64
 ; CHECK-NEXT: DBG_VALUE
 ;                       <-- BLOCK should NOT be placed here!
diff --git a/test/CodeGen/WebAssembly/cfg-stackify-eh.mir b/test/CodeGen/WebAssembly/cfg-stackify-eh.mir
index b675790..11d9aaf 100644
--- a/test/CodeGen/WebAssembly/cfg-stackify-eh.mir
+++ b/test/CodeGen/WebAssembly/cfg-stackify-eh.mir
@@ -98,14 +98,14 @@
     successors: %bb.2, %bb.7
 
     %30:i32 = CATCH_I32 0, implicit-def dead $arguments
-    SET_LOCAL_I32 0, %30:i32, implicit-def $arguments
+    LOCAL_SET_I32 0, %30:i32, implicit-def $arguments
     %16:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
     %27:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
     STORE_I32 2, @__wasm_lpad_context + 4, %16:i32, %27:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack :: (store 4 into `i8** getelementptr inbounds ({ i32, i8*, i32 }, { i32, i8*, i32 }* @__wasm_lpad_context, i32 0, i32 1)`)
     %26:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
     %25:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
     STORE_I32 2, @__wasm_lpad_context, %26:i32, %25:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack :: (store 4 into `i32* getelementptr inbounds ({ i32, i8*, i32 }, { i32, i8*, i32 }* @__wasm_lpad_context, i32 0, i32 0)`)
-    %32:i32 = GET_LOCAL_I32 0, implicit-def $arguments
+    %32:i32 = LOCAL_GET_I32 0, implicit-def $arguments
     %31:i32 = CALL_I32 @_Unwind_CallPersonality, %32:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64
     DROP_I32 killed %31:i32, implicit-def $arguments
     %24:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
@@ -118,7 +118,7 @@
   ; predecessors: %bb.1
     successors: %bb.8, %bb.3, %bb.6
 
-    %34:i32 = GET_LOCAL_I32 0, implicit-def $arguments
+    %34:i32 = LOCAL_GET_I32 0, implicit-def $arguments
     %33:i32 = CALL_I32 @__cxa_begin_catch, %34:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64
     DROP_I32 killed %33:i32, implicit-def $arguments
     CALL_VOID @may_throw, implicit-def dead $arguments, implicit $sp32, implicit $sp64
@@ -134,11 +134,11 @@
     successors: %bb.4, %bb.5
 
     %35:i32 = CATCH_I32 0, implicit-def dead $arguments
-    SET_LOCAL_I32 0, %35:i32, implicit-def $arguments
+    LOCAL_SET_I32 0, %35:i32, implicit-def $arguments
     %21:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
     %20:i32 = CONST_I32 1, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
     STORE_I32 2, @__wasm_lpad_context, %21:i32, %20:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack :: (store 4 into `i32* getelementptr inbounds ({ i32, i8*, i32 }, { i32, i8*, i32 }* @__wasm_lpad_context, i32 0, i32 0)`)
-    %37:i32 = GET_LOCAL_I32 0, implicit-def $arguments
+    %37:i32 = LOCAL_GET_I32 0, implicit-def $arguments
     %36:i32 = CALL_I32 @_Unwind_CallPersonality, %37:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64
     DROP_I32 killed %36:i32, implicit-def $arguments
     %29:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
@@ -151,7 +151,7 @@
   ; predecessors: %bb.3
     successors: %bb.8
 
-    %39:i32 = GET_LOCAL_I32 0, implicit-def $arguments
+    %39:i32 = LOCAL_GET_I32 0, implicit-def $arguments
     %38:i32 = CALL_I32 @__cxa_begin_catch, %39:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64
     DROP_I32 killed %38:i32, implicit-def $arguments
     CALL_VOID @__cxa_end_catch, implicit-def dead $arguments, implicit $sp32, implicit $sp64
@@ -212,9 +212,9 @@
     successors: %bb.1, %bb.4
 
     %18:i32 = CONST_I32 0, implicit-def dead $arguments
-    SET_LOCAL_I32 1, %18:i32, implicit-def $arguments
+    LOCAL_SET_I32 1, %18:i32, implicit-def $arguments
     %14:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
-    %19:i32 = GET_LOCAL_I32 0, implicit-def $arguments
+    %19:i32 = LOCAL_GET_I32 0, implicit-def $arguments
     %9:i32 = GE_S_I32 %14:i32, %19:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
     BR_IF %bb.4, %9:i32, implicit-def $arguments
 
@@ -243,11 +243,11 @@
   ; predecessors: %bb.1
     successors: %bb.1, %bb.4
 
-    %20:i32 = GET_LOCAL_I32 1, implicit-def $arguments
+    %20:i32 = LOCAL_GET_I32 1, implicit-def $arguments
     %17:i32 = CONST_I32 1, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
     %16:i32 = ADD_I32 %20:i32, %17:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
-    %15:i32 = TEE_LOCAL_I32 1, %16:i32, implicit-def $arguments
-    %21:i32 = GET_LOCAL_I32 0, implicit-def $arguments
+    %15:i32 = LOCAL_TEE_I32 1, %16:i32, implicit-def $arguments
+    %21:i32 = LOCAL_GET_I32 0, implicit-def $arguments
     %10:i32 = GE_S_I32 %15:i32, %21:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
     BR_UNLESS %bb.1, %10:i32, implicit-def $arguments
   ; CHECK-LABEL: bb.3:
@@ -289,9 +289,9 @@
     %18:i32 = CALL_I32 @__cxa_begin_catch, %9:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64, implicit-def $value_stack, implicit $value_stack
     DROP_I32 killed %18:i32, implicit-def $arguments
     %19:i32 = CONST_I32 0, implicit-def dead $arguments
-    SET_LOCAL_I32 1, %19:i32, implicit-def $arguments
+    LOCAL_SET_I32 1, %19:i32, implicit-def $arguments
     %14:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
-    %20:i32 = GET_LOCAL_I32 0, implicit-def $arguments
+    %20:i32 = LOCAL_GET_I32 0, implicit-def $arguments
     %10:i32 = GE_S_I32 %14:i32, %20:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
     BR_IF %bb.3, %10:i32, implicit-def $arguments, implicit-def $value_stack, implicit $value_stack
 
@@ -300,11 +300,11 @@
     successors: %bb.2, %bb.3
 
     CALL_VOID @dont_throw, implicit-def dead $arguments, implicit $sp32, implicit $sp64
-    %21:i32 = GET_LOCAL_I32 1, implicit-def $arguments
+    %21:i32 = LOCAL_GET_I32 1, implicit-def $arguments
     %17:i32 = CONST_I32 1, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
     %16:i32 = ADD_I32 %21:i32, %17:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
-    %15:i32 = TEE_LOCAL_I32 1, %16:i32, implicit-def $arguments
-    %22:i32 = GET_LOCAL_I32 0, implicit-def $arguments
+    %15:i32 = LOCAL_TEE_I32 1, %16:i32, implicit-def $arguments
+    %22:i32 = LOCAL_GET_I32 0, implicit-def $arguments
     %11:i32 = GE_S_I32 %15:i32, %22:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack
     BR_UNLESS %bb.2, %11:i32, implicit-def $arguments, implicit-def $value_stack, implicit $value_stack
 
diff --git a/test/CodeGen/WebAssembly/comparisons-f32.ll b/test/CodeGen/WebAssembly/comparisons-f32.ll
index 7ccfe5e..e4753c4 100644
--- a/test/CodeGen/WebAssembly/comparisons-f32.ll
+++ b/test/CodeGen/WebAssembly/comparisons-f32.ll
@@ -8,11 +8,11 @@
 
 ; CHECK-LABEL: ord_f32:
 ; CHECK-NEXT: .functype ord_f32 (f32, f32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: f32.eq $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 1{{$}}
-; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L2:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L3:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f32.eq $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
 ; CHECK-NEXT: i32.and $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM1]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM2]]{{$}}
@@ -24,11 +24,11 @@
 
 ; CHECK-LABEL: uno_f32:
 ; CHECK-NEXT: .functype uno_f32 (f32, f32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: f32.ne $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 1{{$}}
-; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L2:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L3:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f32.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
 ; CHECK-NEXT: i32.or $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM1]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM2]]{{$}}
@@ -40,8 +40,8 @@
 
 ; CHECK-LABEL: oeq_f32:
 ; CHECK-NEXT: .functype oeq_f32 (f32, f32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f32.eq $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @oeq_f32(float %x, float %y) {
@@ -100,14 +100,14 @@
 
 ; CHECK-LABEL: ueq_f32:
 ; CHECK-NEXT: .functype ueq_f32 (f32, f32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f32.eq $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L3:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: f32.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
-; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
-; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L5:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f32.ne $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
 ; CHECK-NEXT: i32.or $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
 ; CHECK-NEXT: i32.or $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
@@ -120,14 +120,14 @@
 
 ; CHECK-LABEL: one_f32:
 ; CHECK-NEXT: .functype one_f32 (f32, f32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f32.ne $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L3:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: f32.eq $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
-; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
-; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L5:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f32.eq $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
 ; CHECK-NEXT: i32.and $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
 ; CHECK-NEXT: i32.and $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
@@ -140,8 +140,8 @@
 
 ; CHECK-LABEL: ult_f32:
 ; CHECK-NEXT: .functype ult_f32 (f32, f32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f32.ge $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: i32.const $push[[C0:[0-9]+]]=, 1
 ; CHECK-NEXT: i32.xor $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[C0]]{{$}}
@@ -154,8 +154,8 @@
 
 ; CHECK-LABEL: ule_f32:
 ; CHECK-NEXT: .functype ule_f32 (f32, f32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f32.gt $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: i32.const $push[[C0:[0-9]+]]=, 1
 ; CHECK-NEXT: i32.xor $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[C0]]{{$}}
@@ -168,8 +168,8 @@
 
 ; CHECK-LABEL: ugt_f32:
 ; CHECK-NEXT: .functype ugt_f32 (f32, f32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f32.le $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: i32.const $push[[C0:[0-9]+]]=, 1
 ; CHECK-NEXT: i32.xor $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[C0]]{{$}}
@@ -182,8 +182,8 @@
 
 ; CHECK-LABEL: uge_f32:
 ; CHECK-NEXT: .functype uge_f32 (f32, f32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f32.lt $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: i32.const $push[[C0:[0-9]+]]=, 1
 ; CHECK-NEXT: i32.xor $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[C0]]{{$}}
diff --git a/test/CodeGen/WebAssembly/comparisons-f64.ll b/test/CodeGen/WebAssembly/comparisons-f64.ll
index 46b24db..37150e8 100644
--- a/test/CodeGen/WebAssembly/comparisons-f64.ll
+++ b/test/CodeGen/WebAssembly/comparisons-f64.ll
@@ -8,11 +8,11 @@
 
 ; CHECK-LABEL: ord_f64:
 ; CHECK-NEXT: .functype ord_f64 (f64, f64) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: f64.eq $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 1{{$}}
-; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L2:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L3:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f64.eq $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
 ; CHECK-NEXT: i32.and $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM1]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM2]]{{$}}
@@ -24,11 +24,11 @@
 
 ; CHECK-LABEL: uno_f64:
 ; CHECK-NEXT: .functype uno_f64 (f64, f64) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: f64.ne $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 1{{$}}
-; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L2:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L3:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
 ; CHECK-NEXT: i32.or $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM1]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM2]]{{$}}
@@ -40,8 +40,8 @@
 
 ; CHECK-LABEL: oeq_f64:
 ; CHECK-NEXT: .functype oeq_f64 (f64, f64) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f64.eq $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @oeq_f64(double %x, double %y) {
@@ -99,14 +99,14 @@
 
 ; CHECK-LABEL: ueq_f64:
 ; CHECK-NEXT: .functype ueq_f64 (f64, f64) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f64.eq $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L3:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
-; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
-; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L5:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f64.ne $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
 ; CHECK-NEXT: i32.or $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
 ; CHECK-NEXT: i32.or $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
@@ -119,14 +119,14 @@
 
 ; CHECK-LABEL: one_f64:
 ; CHECK-NEXT: .functype one_f64 (f64, f64) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f64.ne $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L3:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: f64.eq $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
-; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
-; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L5:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f64.eq $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
 ; CHECK-NEXT: i32.and $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
 ; CHECK-NEXT: i32.and $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
@@ -139,8 +139,8 @@
 
 ; CHECK-LABEL: ult_f64:
 ; CHECK-NEXT: .functype ult_f64 (f64, f64) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f64.ge $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: i32.const $push[[C0:[0-9]+]]=, 1
 ; CHECK-NEXT: i32.xor $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[C0]]{{$}}
@@ -153,8 +153,8 @@
 
 ; CHECK-LABEL: ule_f64:
 ; CHECK-NEXT: .functype ule_f64 (f64, f64) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f64.gt $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: i32.const $push[[C0:[0-9]+]]=, 1
 ; CHECK-NEXT: i32.xor $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[C0]]{{$}}
@@ -167,8 +167,8 @@
 
 ; CHECK-LABEL: ugt_f64:
 ; CHECK-NEXT: .functype ugt_f64 (f64, f64) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f64.le $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: i32.const $push[[C0:[0-9]+]]=, 1
 ; CHECK-NEXT: i32.xor $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[C0]]{{$}}
@@ -181,8 +181,8 @@
 
 ; CHECK-LABEL: uge_f64:
 ; CHECK-NEXT: .functype uge_f64 (f64, f64) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f64.lt $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: i32.const $push[[C0:[0-9]+]]=, 1
 ; CHECK-NEXT: i32.xor $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[C0]]{{$}}
diff --git a/test/CodeGen/WebAssembly/comparisons-i32.ll b/test/CodeGen/WebAssembly/comparisons-i32.ll
index e7d4e1b..ed884e7 100644
--- a/test/CodeGen/WebAssembly/comparisons-i32.ll
+++ b/test/CodeGen/WebAssembly/comparisons-i32.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -fast-isel -fast-isel-abort=1 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck %s --check-prefixes CHECK,SLOW
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefixes CHECK,FAST
 
 ; Test that basic 32-bit integer comparison operations assemble as expected.
 
@@ -8,10 +8,13 @@
 
 ; CHECK-LABEL: eq_i32:
 ; CHECK-NEXT: .functype eq_i32 (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
-; CHECK-NEXT: i32.eq $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.eq $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; SLOW-NEXT: return $pop[[L2]]{{$}}
+; FAST-NEXT:  i32.const $push[[L3:[0-9]+]]=, 1{{$}}
+; FAST-NEXT:  i32.and $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
+; FAST-NEXT:  return $pop[[L4]]{{$}}
 define i32 @eq_i32(i32 %x, i32 %y) {
   %a = icmp eq i32 %x, %y
   %b = zext i1 %a to i32
@@ -19,8 +22,11 @@
 }
 
 ; CHECK-LABEL: ne_i32:
-; CHECK: i32.ne $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i32.ne $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @ne_i32(i32 %x, i32 %y) {
   %a = icmp ne i32 %x, %y
   %b = zext i1 %a to i32
@@ -28,8 +34,11 @@
 }
 
 ; CHECK-LABEL: slt_i32:
-; CHECK: i32.lt_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i32.lt_s $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @slt_i32(i32 %x, i32 %y) {
   %a = icmp slt i32 %x, %y
   %b = zext i1 %a to i32
@@ -37,8 +46,11 @@
 }
 
 ; CHECK-LABEL: sle_i32:
-; CHECK: i32.le_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i32.le_s $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @sle_i32(i32 %x, i32 %y) {
   %a = icmp sle i32 %x, %y
   %b = zext i1 %a to i32
@@ -46,8 +58,11 @@
 }
 
 ; CHECK-LABEL: ult_i32:
-; CHECK: i32.lt_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i32.lt_u $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @ult_i32(i32 %x, i32 %y) {
   %a = icmp ult i32 %x, %y
   %b = zext i1 %a to i32
@@ -55,8 +70,11 @@
 }
 
 ; CHECK-LABEL: ule_i32:
-; CHECK: i32.le_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i32.le_u $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @ule_i32(i32 %x, i32 %y) {
   %a = icmp ule i32 %x, %y
   %b = zext i1 %a to i32
@@ -64,8 +82,11 @@
 }
 
 ; CHECK-LABEL: sgt_i32:
-; CHECK: i32.gt_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i32.gt_s $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @sgt_i32(i32 %x, i32 %y) {
   %a = icmp sgt i32 %x, %y
   %b = zext i1 %a to i32
@@ -73,8 +94,11 @@
 }
 
 ; CHECK-LABEL: sge_i32:
-; CHECK: i32.ge_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i32.ge_s $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @sge_i32(i32 %x, i32 %y) {
   %a = icmp sge i32 %x, %y
   %b = zext i1 %a to i32
@@ -82,8 +106,11 @@
 }
 
 ; CHECK-LABEL: ugt_i32:
-; CHECK: i32.gt_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i32.gt_u $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @ugt_i32(i32 %x, i32 %y) {
   %a = icmp ugt i32 %x, %y
   %b = zext i1 %a to i32
@@ -91,8 +118,11 @@
 }
 
 ; CHECK-LABEL: uge_i32:
-; CHECK: i32.ge_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i32.ge_u $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @uge_i32(i32 %x, i32 %y) {
   %a = icmp uge i32 %x, %y
   %b = zext i1 %a to i32
diff --git a/test/CodeGen/WebAssembly/comparisons-i64.ll b/test/CodeGen/WebAssembly/comparisons-i64.ll
index ffd460f..899cc06 100644
--- a/test/CodeGen/WebAssembly/comparisons-i64.ll
+++ b/test/CodeGen/WebAssembly/comparisons-i64.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -fast-isel -fast-isel-abort=1 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck %s --check-prefixes CHECK,SLOW
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefixes CHECK,FAST
 
 ; Test that basic 64-bit integer comparison operations assemble as expected.
 
@@ -8,10 +8,13 @@
 
 ; CHECK-LABEL: eq_i64:
 ; CHECK-NEXT: .functype eq_i64 (i64, i64) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
-; CHECK-NEXT: i64.eq $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.eq $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; SLOW-NEXT:  return $pop[[L2]]{{$}}
+; FAST-NEXT:  i32.const $push[[L3:[0-9]+]]=, 1{{$}}
+; FAST-NEXT:  i32.and $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
+; FAST-NEXT:  return $pop[[L4]]{{$}}
 define i32 @eq_i64(i64 %x, i64 %y) {
   %a = icmp eq i64 %x, %y
   %b = zext i1 %a to i32
@@ -19,8 +22,11 @@
 }
 
 ; CHECK-LABEL: ne_i64:
-; CHECK: i64.ne $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i64.ne $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @ne_i64(i64 %x, i64 %y) {
   %a = icmp ne i64 %x, %y
   %b = zext i1 %a to i32
@@ -28,8 +34,11 @@
 }
 
 ; CHECK-LABEL: slt_i64:
-; CHECK: i64.lt_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i64.lt_s $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @slt_i64(i64 %x, i64 %y) {
   %a = icmp slt i64 %x, %y
   %b = zext i1 %a to i32
@@ -37,8 +46,11 @@
 }
 
 ; CHECK-LABEL: sle_i64:
-; CHECK: i64.le_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i64.le_s $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @sle_i64(i64 %x, i64 %y) {
   %a = icmp sle i64 %x, %y
   %b = zext i1 %a to i32
@@ -46,8 +58,11 @@
 }
 
 ; CHECK-LABEL: ult_i64:
-; CHECK: i64.lt_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i64.lt_u $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @ult_i64(i64 %x, i64 %y) {
   %a = icmp ult i64 %x, %y
   %b = zext i1 %a to i32
@@ -55,8 +70,11 @@
 }
 
 ; CHECK-LABEL: ule_i64:
-; CHECK: i64.le_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i64.le_u $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @ule_i64(i64 %x, i64 %y) {
   %a = icmp ule i64 %x, %y
   %b = zext i1 %a to i32
@@ -64,8 +82,11 @@
 }
 
 ; CHECK-LABEL: sgt_i64:
-; CHECK: i64.gt_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i64.gt_s $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @sgt_i64(i64 %x, i64 %y) {
   %a = icmp sgt i64 %x, %y
   %b = zext i1 %a to i32
@@ -73,8 +94,11 @@
 }
 
 ; CHECK-LABEL: sge_i64:
-; CHECK: i64.ge_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i64.ge_s $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @sge_i64(i64 %x, i64 %y) {
   %a = icmp sge i64 %x, %y
   %b = zext i1 %a to i32
@@ -82,8 +106,11 @@
 }
 
 ; CHECK-LABEL: ugt_i64:
-; CHECK: i64.gt_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i64.gt_u $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @ugt_i64(i64 %x, i64 %y) {
   %a = icmp ugt i64 %x, %y
   %b = zext i1 %a to i32
@@ -91,8 +118,11 @@
 }
 
 ; CHECK-LABEL: uge_i64:
-; CHECK: i64.ge_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
+; CHECK: i64.ge_u $push[[L0:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; SLOW-NEXT: return $pop[[L0]]{{$}}
+; FAST-NEXT: i32.const $push[[L1:[0-9]+]]=, 1{{$}}
+; FAST-NEXT: i32.and $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; FAST-NEXT: return $pop[[L2]]{{$}}
 define i32 @uge_i64(i64 %x, i64 %y) {
   %a = icmp uge i64 %x, %y
   %b = zext i1 %a to i32
diff --git a/test/CodeGen/WebAssembly/conv-trap.ll b/test/CodeGen/WebAssembly/conv-trap.ll
index ca87f6b..aa589de 100644
--- a/test/CodeGen/WebAssembly/conv-trap.ll
+++ b/test/CodeGen/WebAssembly/conv-trap.ll
@@ -17,7 +17,7 @@
 ; CHECK-NEXT: return $pop[[ALT]]{{$}}
 ; CHECK-NEXT: BB
 ; CHECK-NEXT: end_block
-; CHECK-NEXT: i32.trunc_s/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_f32_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @i32_trunc_s_f32(float %x) {
   %a = fptosi float %x to i32
@@ -37,7 +37,7 @@
 ; CHECK-NEXT: return $pop[[ALT]]{{$}}
 ; CHECK-NEXT: BB
 ; CHECK-NEXT: end_block
-; CHECK-NEXT: i32.trunc_u/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_f32_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @i32_trunc_u_f32(float %x) {
   %a = fptoui float %x to i32
@@ -55,7 +55,7 @@
 ; CHECK-NEXT: return $pop[[ALT]]{{$}}
 ; CHECK-NEXT: BB
 ; CHECK-NEXT: end_block
-; CHECK-NEXT: i32.trunc_s/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_f64_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @i32_trunc_s_f64(double %x) {
   %a = fptosi double %x to i32
@@ -75,7 +75,7 @@
 ; CHECK-NEXT: return $pop[[ALT]]{{$}}
 ; CHECK-NEXT: BB
 ; CHECK-NEXT: end_block
-; CHECK-NEXT: i32.trunc_u/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_f64_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @i32_trunc_u_f64(double %x) {
   %a = fptoui double %x to i32
@@ -93,7 +93,7 @@
 ; CHECK-NEXT: return $pop[[ALT]]{{$}}
 ; CHECK-NEXT: BB
 ; CHECK-NEXT: end_block
-; CHECK-NEXT: i64.trunc_s/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_f32_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i64 @i64_trunc_s_f32(float %x) {
   %a = fptosi float %x to i64
@@ -113,7 +113,7 @@
 ; CHECK-NEXT: return $pop[[ALT]]{{$}}
 ; CHECK-NEXT: BB
 ; CHECK-NEXT: end_block
-; CHECK-NEXT: i64.trunc_u/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_f32_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i64 @i64_trunc_u_f32(float %x) {
   %a = fptoui float %x to i64
@@ -131,7 +131,7 @@
 ; CHECK-NEXT: return $pop[[ALT]]{{$}}
 ; CHECK-NEXT: BB
 ; CHECK-NEXT: end_block
-; CHECK-NEXT: i64.trunc_s/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_f64_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i64 @i64_trunc_s_f64(double %x) {
   %a = fptosi double %x to i64
@@ -151,7 +151,7 @@
 ; CHECK-NEXT: return $pop[[ALT]]{{$}}
 ; CHECK-NEXT: BB
 ; CHECK-NEXT: end_block
-; CHECK-NEXT: i64.trunc_u/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_f64_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i64 @i64_trunc_u_f64(double %x) {
   %a = fptoui double %x to i64
diff --git a/test/CodeGen/WebAssembly/conv.ll b/test/CodeGen/WebAssembly/conv.ll
index 8b729a6..68f94154 100644
--- a/test/CodeGen/WebAssembly/conv.ll
+++ b/test/CodeGen/WebAssembly/conv.ll
@@ -7,7 +7,7 @@
 
 ; CHECK-LABEL: i32_wrap_i64:
 ; CHECK-NEXT: .functype i32_wrap_i64 (i64) -> (i32){{$}}
-; CHECK-NEXT: i32.wrap/i64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.wrap_i64 $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @i32_wrap_i64(i64 %x) {
   %a = trunc i64 %x to i32
@@ -16,7 +16,7 @@
 
 ; CHECK-LABEL: i64_extend_s_i32:
 ; CHECK-NEXT: .functype i64_extend_s_i32 (i32) -> (i64){{$}}
-; CHECK-NEXT: i64.extend_s/i32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.extend_i32_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i64 @i64_extend_s_i32(i32 %x) {
   %a = sext i32 %x to i64
@@ -25,7 +25,7 @@
 
 ; CHECK-LABEL: i64_extend_u_i32:
 ; CHECK-NEXT: .functype i64_extend_u_i32 (i32) -> (i64){{$}}
-; CHECK-NEXT: i64.extend_u/i32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.extend_i32_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i64 @i64_extend_u_i32(i32 %x) {
   %a = zext i32 %x to i64
@@ -34,7 +34,7 @@
 
 ; CHECK-LABEL: i32_trunc_s_f32:
 ; CHECK-NEXT: .functype i32_trunc_s_f32 (f32) -> (i32){{$}}
-; CHECK-NEXT: i32.trunc_s:sat/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_sat_f32_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @i32_trunc_s_f32(float %x) {
   %a = fptosi float %x to i32
@@ -43,7 +43,7 @@
 
 ; CHECK-LABEL: i32_trunc_sat_s_f32:
 ; CHECK-NEXT: .functype i32_trunc_sat_s_f32 (f32) -> (i32){{$}}
-; CHECK-NEXT: i32.trunc_s:sat/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_sat_f32_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 declare i32 @llvm.wasm.trunc.saturate.signed.i32.f32(float)
 define i32 @i32_trunc_sat_s_f32(float %x) {
@@ -53,7 +53,7 @@
 
 ; CHECK-LABEL: i32_trunc_u_f32:
 ; CHECK-NEXT: .functype i32_trunc_u_f32 (f32) -> (i32){{$}}
-; CHECK-NEXT: i32.trunc_u:sat/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_sat_f32_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @i32_trunc_u_f32(float %x) {
   %a = fptoui float %x to i32
@@ -62,7 +62,7 @@
 
 ; CHECK-LABEL: i32_trunc_sat_u_f32:
 ; CHECK-NEXT: .functype i32_trunc_sat_u_f32 (f32) -> (i32){{$}}
-; CHECK-NEXT: i32.trunc_u:sat/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_sat_f32_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 declare i32 @llvm.wasm.trunc.saturate.unsigned.i32.f32(float)
 define i32 @i32_trunc_sat_u_f32(float %x) {
@@ -72,7 +72,7 @@
 
 ; CHECK-LABEL: i32_trunc_s_f64:
 ; CHECK-NEXT: .functype i32_trunc_s_f64 (f64) -> (i32){{$}}
-; CHECK-NEXT: i32.trunc_s:sat/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_sat_f64_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @i32_trunc_s_f64(double %x) {
   %a = fptosi double %x to i32
@@ -81,7 +81,7 @@
 
 ; CHECK-LABEL: i32_trunc_sat_s_f64:
 ; CHECK-NEXT: .functype i32_trunc_sat_s_f64 (f64) -> (i32){{$}}
-; CHECK-NEXT: i32.trunc_s:sat/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_sat_f64_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 declare i32 @llvm.wasm.trunc.saturate.signed.i32.f64(double)
 define i32 @i32_trunc_sat_s_f64(double %x) {
@@ -91,7 +91,7 @@
 
 ; CHECK-LABEL: i32_trunc_u_f64:
 ; CHECK-NEXT: .functype i32_trunc_u_f64 (f64) -> (i32){{$}}
-; CHECK-NEXT: i32.trunc_u:sat/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_sat_f64_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @i32_trunc_u_f64(double %x) {
   %a = fptoui double %x to i32
@@ -100,7 +100,7 @@
 
 ; CHECK-LABEL: i32_trunc_sat_u_f64:
 ; CHECK-NEXT: .functype i32_trunc_sat_u_f64 (f64) -> (i32){{$}}
-; CHECK-NEXT: i32.trunc_u:sat/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_sat_f64_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 declare i32 @llvm.wasm.trunc.saturate.unsigned.i32.f64(double)
 define i32 @i32_trunc_sat_u_f64(double %x) {
@@ -110,7 +110,7 @@
 
 ; CHECK-LABEL: i64_trunc_s_f32:
 ; CHECK-NEXT: .functype i64_trunc_s_f32 (f32) -> (i64){{$}}
-; CHECK-NEXT: i64.trunc_s:sat/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_sat_f32_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i64 @i64_trunc_s_f32(float %x) {
   %a = fptosi float %x to i64
@@ -119,7 +119,7 @@
 
 ; CHECK-LABEL: i64_trunc_sat_s_f32:
 ; CHECK-NEXT: .functype i64_trunc_sat_s_f32 (f32) -> (i64){{$}}
-; CHECK-NEXT: i64.trunc_s:sat/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_sat_f32_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 declare i64 @llvm.wasm.trunc.saturate.signed.i64.f32(float)
 define i64 @i64_trunc_sat_s_f32(float %x) {
@@ -129,7 +129,7 @@
 
 ; CHECK-LABEL: i64_trunc_u_f32:
 ; CHECK-NEXT: .functype i64_trunc_u_f32 (f32) -> (i64){{$}}
-; CHECK-NEXT: i64.trunc_u:sat/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_sat_f32_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i64 @i64_trunc_u_f32(float %x) {
   %a = fptoui float %x to i64
@@ -138,7 +138,7 @@
 
 ; CHECK-LABEL: i64_trunc_sat_u_f32:
 ; CHECK-NEXT: .functype i64_trunc_sat_u_f32 (f32) -> (i64){{$}}
-; CHECK-NEXT: i64.trunc_u:sat/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_sat_f32_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 declare i64 @llvm.wasm.trunc.saturate.unsigned.i64.f32(float)
 define i64 @i64_trunc_sat_u_f32(float %x) {
@@ -148,7 +148,7 @@
 
 ; CHECK-LABEL: i64_trunc_s_f64:
 ; CHECK-NEXT: .functype i64_trunc_s_f64 (f64) -> (i64){{$}}
-; CHECK-NEXT: i64.trunc_s:sat/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_sat_f64_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i64 @i64_trunc_s_f64(double %x) {
   %a = fptosi double %x to i64
@@ -157,7 +157,7 @@
 
 ; CHECK-LABEL: i64_trunc_sat_s_f64:
 ; CHECK-NEXT: .functype i64_trunc_sat_s_f64 (f64) -> (i64){{$}}
-; CHECK-NEXT: i64.trunc_s:sat/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_sat_f64_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 declare i64 @llvm.wasm.trunc.saturate.signed.i64.f64(double)
 define i64 @i64_trunc_sat_s_f64(double %x) {
@@ -167,7 +167,7 @@
 
 ; CHECK-LABEL: i64_trunc_u_f64:
 ; CHECK-NEXT: .functype i64_trunc_u_f64 (f64) -> (i64){{$}}
-; CHECK-NEXT: i64.trunc_u:sat/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_sat_f64_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i64 @i64_trunc_u_f64(double %x) {
   %a = fptoui double %x to i64
@@ -176,7 +176,7 @@
 
 ; CHECK-LABEL: i64_trunc_sat_u_f64:
 ; CHECK-NEXT: .functype i64_trunc_sat_u_f64 (f64) -> (i64){{$}}
-; CHECK-NEXT: i64.trunc_u:sat/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_sat_f64_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 declare i64 @llvm.wasm.trunc.saturate.unsigned.i64.f64(double)
 define i64 @i64_trunc_sat_u_f64(double %x) {
@@ -186,7 +186,7 @@
 
 ; CHECK-LABEL: f32_convert_s_i32:
 ; CHECK-NEXT: .functype f32_convert_s_i32 (i32) -> (f32){{$}}
-; CHECK-NEXT: f32.convert_s/i32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: f32.convert_i32_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define float @f32_convert_s_i32(i32 %x) {
   %a = sitofp i32 %x to float
@@ -195,7 +195,7 @@
 
 ; CHECK-LABEL: f32_convert_u_i32:
 ; CHECK-NEXT: .functype f32_convert_u_i32 (i32) -> (f32){{$}}
-; CHECK-NEXT: f32.convert_u/i32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: f32.convert_i32_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define float @f32_convert_u_i32(i32 %x) {
   %a = uitofp i32 %x to float
@@ -204,7 +204,7 @@
 
 ; CHECK-LABEL: f64_convert_s_i32:
 ; CHECK-NEXT: .functype f64_convert_s_i32 (i32) -> (f64){{$}}
-; CHECK-NEXT: f64.convert_s/i32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: f64.convert_i32_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define double @f64_convert_s_i32(i32 %x) {
   %a = sitofp i32 %x to double
@@ -213,7 +213,7 @@
 
 ; CHECK-LABEL: f64_convert_u_i32:
 ; CHECK-NEXT: .functype f64_convert_u_i32 (i32) -> (f64){{$}}
-; CHECK-NEXT: f64.convert_u/i32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: f64.convert_i32_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define double @f64_convert_u_i32(i32 %x) {
   %a = uitofp i32 %x to double
@@ -222,7 +222,7 @@
 
 ; CHECK-LABEL: f32_convert_s_i64:
 ; CHECK-NEXT: .functype f32_convert_s_i64 (i64) -> (f32){{$}}
-; CHECK-NEXT: f32.convert_s/i64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: f32.convert_i64_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define float @f32_convert_s_i64(i64 %x) {
   %a = sitofp i64 %x to float
@@ -231,7 +231,7 @@
 
 ; CHECK-LABEL: f32_convert_u_i64:
 ; CHECK-NEXT: .functype f32_convert_u_i64 (i64) -> (f32){{$}}
-; CHECK-NEXT: f32.convert_u/i64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: f32.convert_i64_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define float @f32_convert_u_i64(i64 %x) {
   %a = uitofp i64 %x to float
@@ -240,7 +240,7 @@
 
 ; CHECK-LABEL: f64_convert_s_i64:
 ; CHECK-NEXT: .functype f64_convert_s_i64 (i64) -> (f64){{$}}
-; CHECK-NEXT: f64.convert_s/i64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: f64.convert_i64_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define double @f64_convert_s_i64(i64 %x) {
   %a = sitofp i64 %x to double
@@ -249,7 +249,7 @@
 
 ; CHECK-LABEL: f64_convert_u_i64:
 ; CHECK-NEXT: .functype f64_convert_u_i64 (i64) -> (f64){{$}}
-; CHECK-NEXT: f64.convert_u/i64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: f64.convert_i64_u $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define double @f64_convert_u_i64(i64 %x) {
   %a = uitofp i64 %x to double
@@ -258,7 +258,7 @@
 
 ; CHECK-LABEL: f64_promote_f32:
 ; CHECK-NEXT: .functype f64_promote_f32 (f32) -> (f64){{$}}
-; CHECK-NEXT: f64.promote/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: f64.promote_f32 $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define double @f64_promote_f32(float %x) {
   %a = fpext float %x to double
@@ -267,7 +267,7 @@
 
 ; CHECK-LABEL: f32_demote_f64:
 ; CHECK-NEXT: .functype f32_demote_f64 (f64) -> (f32){{$}}
-; CHECK-NEXT: f32.demote/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: f32.demote_f64 $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define float @f32_demote_f64(double %x) {
   %a = fptrunc double %x to float
@@ -278,7 +278,7 @@
 ; we need to patterm-match back to a specific instruction.
 
 ; CHECK-LABEL: anyext:
-; CHECK: i64.extend_u/i32 $push0=, $0{{$}}
+; CHECK: i64.extend_i32_u $push0=, $0{{$}}
 define i64 @anyext(i32 %x) {
     %y = sext i32 %x to i64
     %w = shl i64 %y, 32
@@ -286,28 +286,28 @@
 }
 
 ; CHECK-LABEL: bitcast_i32_to_float:
-; CHECK: f32.reinterpret/i32   $push0=, $0{{$}}
+; CHECK: f32.reinterpret_i32   $push0=, $0{{$}}
 define float @bitcast_i32_to_float(i32 %a) {
   %t = bitcast i32 %a to float
   ret float %t
 }
 
 ; CHECK-LABEL: bitcast_float_to_i32:
-; CHECK: i32.reinterpret/f32   $push0=, $0{{$}}
+; CHECK: i32.reinterpret_f32   $push0=, $0{{$}}
 define i32 @bitcast_float_to_i32(float %a) {
   %t = bitcast float %a to i32
   ret i32 %t
 }
 
 ; CHECK-LABEL: bitcast_i64_to_double:
-; CHECK: f64.reinterpret/i64   $push0=, $0{{$}}
+; CHECK: f64.reinterpret_i64   $push0=, $0{{$}}
 define double @bitcast_i64_to_double(i64 %a) {
   %t = bitcast i64 %a to double
   ret double %t
 }
 
 ; CHECK-LABEL: bitcast_double_to_i64:
-; CHECK: i64.reinterpret/f64   $push0=, $0{{$}}
+; CHECK: i64.reinterpret_f64   $push0=, $0{{$}}
 define i64 @bitcast_double_to_i64(double %a) {
   %t = bitcast double %a to i64
   ret i64 %t
diff --git a/test/CodeGen/WebAssembly/copysign-casts.ll b/test/CodeGen/WebAssembly/copysign-casts.ll
index 381d880..51aeb86 100644
--- a/test/CodeGen/WebAssembly/copysign-casts.ll
+++ b/test/CodeGen/WebAssembly/copysign-casts.ll
@@ -10,7 +10,7 @@
 declare float @copysignf(float, float) nounwind readnone
 
 ; CHECK-LABEL: fold_promote:
-; CHECK: f64.promote/f32 $push0=, $pop{{[0-9]+}}{{$}}
+; CHECK: f64.promote_f32 $push0=, $pop{{[0-9]+}}{{$}}
 ; CHECK: f64.copysign    $push1=, $pop{{[0-9]+}}, $pop0{{$}}
 define double @fold_promote(double %a, float %b) {
   %c = fpext float %b to double
@@ -19,7 +19,7 @@
 }
 
 ; CHECK-LABEL: fold_demote:{{$}}
-; CHECK: f32.demote/f64  $push0=, $pop{{[0-9]+}}{{$}}
+; CHECK: f32.demote_f64  $push0=, $pop{{[0-9]+}}{{$}}
 ; CHECK: f32.copysign    $push1=, $pop{{[0-9]+}}, $pop0{{$}}
 define float @fold_demote(float %a, double %b) {
   %c = fptrunc double %b to float
diff --git a/test/CodeGen/WebAssembly/exception.ll b/test/CodeGen/WebAssembly/exception.ll
index b0365fc..de2d061 100644
--- a/test/CodeGen/WebAssembly/exception.ll
+++ b/test/CodeGen/WebAssembly/exception.ll
@@ -12,7 +12,7 @@
 declare void @llvm.wasm.throw(i32, i8*)
 
 ; CHECK-LABEL: test_throw:
-; CHECK:      get_local $push0=, 0
+; CHECK:      local.get $push0=, 0
 ; CHECK-NEXT: throw __cpp_exception@EVENT, $pop0
 ; CHECK-NOT:  unreachable
 define void @test_throw(i8* %p) {
@@ -21,11 +21,11 @@
 }
 
 ; CHECK-LABEL: test_catch_rethrow:
-; CHECK:   get_global  $push{{.+}}=, __stack_pointer@GLOBAL
+; CHECK:   global.get  $push{{.+}}=, __stack_pointer@GLOBAL
 ; CHECK:   try
 ; CHECK:   call      foo@FUNCTION
 ; CHECK:   i32.catch     $push{{.+}}=, 0
-; CHECK:   set_global  __stack_pointer@GLOBAL
+; CHECK:   global.set  __stack_pointer@GLOBAL
 ; CHECK-DAG:   i32.store  __wasm_lpad_context
 ; CHECK-DAG:   i32.store  __wasm_lpad_context+4
 ; CHECK:   i32.call  $push{{.+}}=, _Unwind_CallPersonality@FUNCTION
@@ -67,7 +67,7 @@
 ; CHECK:   try
 ; CHECK:   call      foo@FUNCTION
 ; CHECK:   catch_all
-; CHECK:   set_global  __stack_pointer@GLOBAL
+; CHECK:   global.set  __stack_pointer@GLOBAL
 ; CHECK:   i32.call  $push{{.+}}=, _ZN7CleanupD1Ev@FUNCTION
 ; CHECK:   rethrow
 ; CHECK:   end_try
@@ -165,17 +165,17 @@
 ; CHECK:  try
 ; CHECK:  call      foo@FUNCTION
 ; CHECK:  i32.catch
-; CHECK-NOT:  get_global  $push{{.+}}=, __stack_pointer@GLOBAL
-; CHECK:  set_global  __stack_pointer@GLOBAL
+; CHECK-NOT:  global.get  $push{{.+}}=, __stack_pointer@GLOBAL
+; CHECK:  global.set  __stack_pointer@GLOBAL
 ; CHECK:  try
 ; CHECK:  call      foo@FUNCTION
 ; CHECK:  catch_all
-; CHECK-NOT:  get_global  $push{{.+}}=, __stack_pointer@GLOBAL
-; CHECK:  set_global  __stack_pointer@GLOBAL
+; CHECK-NOT:  global.get  $push{{.+}}=, __stack_pointer@GLOBAL
+; CHECK:  global.set  __stack_pointer@GLOBAL
 ; CHECK:  call      __cxa_end_catch@FUNCTION
-; CHECK-NOT:  set_global  __stack_pointer@GLOBAL, $pop{{.+}}
+; CHECK-NOT:  global.set  __stack_pointer@GLOBAL, $pop{{.+}}
 ; CHECK:  end_try
-; CHECK-NOT:  set_global  __stack_pointer@GLOBAL, $pop{{.+}}
+; CHECK-NOT:  global.set  __stack_pointer@GLOBAL, $pop{{.+}}
 ; CHECK:  end_try
 define void @test_no_prolog_epilog_in_ehpad() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
 entry:
@@ -226,7 +226,7 @@
 ; CHECK:  try
 ; CHECK:  call foo@FUNCTION
 ; CHECK:  end_try
-; CHECK-NOT:  set_global  __stack_pointer@GLOBAL
+; CHECK-NOT:  global.set  __stack_pointer@GLOBAL
 ; CHECK:  return
 define void @no_sp_writeback() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
 entry:
diff --git a/test/CodeGen/WebAssembly/f16.ll b/test/CodeGen/WebAssembly/f16.ll
index 6ffedd3..645a09a 100644
--- a/test/CodeGen/WebAssembly/f16.ll
+++ b/test/CodeGen/WebAssembly/f16.ll
@@ -8,7 +8,7 @@
 
 ; CHECK-LABEL: demote:
 ; CHECK-NEXT: .functype demote (f32) -> (f32){{$}}
-; CHECK-NEXT: get_local	$push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get	$push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i32.call	$push[[L1:[0-9]+]]=, __gnu_f2h_ieee@FUNCTION, $pop[[L0]]{{$}}
 ; CHECK-NEXT: f32.call	$push[[L2:[0-9]+]]=, __gnu_h2f_ieee@FUNCTION, $pop[[L1]]{{$}}
 ; CHECK-NEXT: return  	$pop[[L2]]{{$}}
@@ -19,7 +19,7 @@
 
 ; CHECK-LABEL: promote:
 ; CHECK-NEXT: .functype promote (f32) -> (f32){{$}}
-; CHECK-NEXT: get_local	$push0=, 0{{$}}
+; CHECK-NEXT: local.get	$push0=, 0{{$}}
 ; CHECK-NEXT: return  	$pop0{{$}}
 define float @promote(half %f) {
     %t = fpext half %f to float
diff --git a/test/CodeGen/WebAssembly/f32.ll b/test/CodeGen/WebAssembly/f32.ll
index fd89261..4591291 100644
--- a/test/CodeGen/WebAssembly/f32.ll
+++ b/test/CodeGen/WebAssembly/f32.ll
@@ -17,8 +17,8 @@
 
 ; CHECK-LABEL: fadd32:
 ; CHECK-NEXT: .functype fadd32 (f32, f32) -> (f32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f32.add $push[[LR:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop[[LR]]{{$}}
 define float @fadd32(float %x, float %y) {
diff --git a/test/CodeGen/WebAssembly/f64.ll b/test/CodeGen/WebAssembly/f64.ll
index 98a23b1..4536e8d 100644
--- a/test/CodeGen/WebAssembly/f64.ll
+++ b/test/CodeGen/WebAssembly/f64.ll
@@ -17,8 +17,8 @@
 
 ; CHECK-LABEL: fadd64:
 ; CHECK-NEXT: .functype fadd64 (f64, f64) -> (f64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f64.add $push[[LR:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop[[LR]]{{$}}
 define double @fadd64(double %x, double %y) {
diff --git a/test/CodeGen/WebAssembly/fast-isel-i24.ll b/test/CodeGen/WebAssembly/fast-isel-i24.ll
index b0ca276..dddf2c4 100644
--- a/test/CodeGen/WebAssembly/fast-isel-i24.ll
+++ b/test/CodeGen/WebAssembly/fast-isel-i24.ll
@@ -9,8 +9,8 @@
 
 ; CHECK-LABEL: add:
 ; CHECK-NEXT: .functype add (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local	$push2=, 0{{$}}
-; CHECK-NEXT: get_local	$push1=, 1{{$}}
+; CHECK-NEXT: local.get	$push2=, 0{{$}}
+; CHECK-NEXT: local.get	$push1=, 1{{$}}
 ; CHECK-NEXT: i32.add 	$push0=, $pop2, $pop1{{$}}
 ; CHECK-NEXT: end_function
 define i24 @add(i24 %x, i24 %y) {
diff --git a/test/CodeGen/WebAssembly/fast-isel-noreg.ll b/test/CodeGen/WebAssembly/fast-isel-noreg.ll
index d4c0af6..ce36fd0 100644
--- a/test/CodeGen/WebAssembly/fast-isel-noreg.ll
+++ b/test/CodeGen/WebAssembly/fast-isel-noreg.ll
@@ -38,7 +38,11 @@
 ; CHECK: i32.const {{.*}}, 24
 ; CHECK: i32.shr_s
 ; CHECK: i32.const {{.*}}, 64
-; CHECK: br_if 0, $pop0
+; CHECK: i32.lt_s
+; CHECK: i32.const {{.*}}, 1
+; CHECK: i32.and
+; CHECK: i32.eqz
+; CHECK: br_if 0, $pop{{[0-9]+}}
 define hidden i32 @d() #0 {
 entry:
   %t = icmp slt i8 ptrtoint (void ()* @addr to i8), 64
@@ -53,7 +57,11 @@
 ; CHECK: i32.const {{.*}}, 255
 ; CHECK: i32.and
 ; CHECK: i32.const {{.*}}, 64
-; CHECK: br_if 0, $pop0
+; CHECK: i32.lt_u
+; CHECK: i32.const {{.*}}, 1
+; CHECK: i32.and
+; CHECK: i32.eqz
+; CHECK: br_if 0, $pop{{[0-9]+}}
 define hidden i32 @e() #0 {
 entry:
   %t = icmp ult i8 ptrtoint (void ()* @addr to i8), 64
diff --git a/test/CodeGen/WebAssembly/fast-isel.ll b/test/CodeGen/WebAssembly/fast-isel.ll
index b03abc5..fa9bf24 100644
--- a/test/CodeGen/WebAssembly/fast-isel.ll
+++ b/test/CodeGen/WebAssembly/fast-isel.ll
@@ -21,28 +21,28 @@
 }
 
 ; CHECK-LABEL: bitcast_i32_f32:
-; CHECK: i32.reinterpret/f32 $push{{[0-9]+}}=, $0{{$}}
+; CHECK: i32.reinterpret_f32 $push{{[0-9]+}}=, $0{{$}}
 define i32 @bitcast_i32_f32(float %x) {
   %y = bitcast float %x to i32
   ret i32 %y
 }
 
 ; CHECK-LABEL: bitcast_f32_i32:
-; CHECK: f32.reinterpret/i32 $push{{[0-9]+}}=, $0{{$}}
+; CHECK: f32.reinterpret_i32 $push{{[0-9]+}}=, $0{{$}}
 define float @bitcast_f32_i32(i32 %x) {
   %y = bitcast i32 %x to float
   ret float %y
 }
 
 ; CHECK-LABEL: bitcast_i64_f64:
-; CHECK: i64.reinterpret/f64 $push{{[0-9]+}}=, $0{{$}}
+; CHECK: i64.reinterpret_f64 $push{{[0-9]+}}=, $0{{$}}
 define i64 @bitcast_i64_f64(double %x) {
   %y = bitcast double %x to i64
   ret i64 %y
 }
 
 ; CHECK-LABEL: bitcast_f64_i64:
-; CHECK: f64.reinterpret/i64 $push{{[0-9]+}}=, $0{{$}}
+; CHECK: f64.reinterpret_i64 $push{{[0-9]+}}=, $0{{$}}
 define double @bitcast_f64_i64(i64 %x) {
   %y = bitcast i64 %x to double
   ret double %y
diff --git a/test/CodeGen/WebAssembly/function-bitcasts.ll b/test/CodeGen/WebAssembly/function-bitcasts.ll
index 93c09cc..a779cbe 100644
--- a/test/CodeGen/WebAssembly/function-bitcasts.ll
+++ b/test/CodeGen/WebAssembly/function-bitcasts.ll
@@ -79,7 +79,7 @@
 }
 
 ; CHECK-LABEL: test_varargs:
-; CHECK:      set_global
+; CHECK:      global.set
 ; CHECK:      i32.const   $push[[L3:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: call        .Lvararg_bitcast@FUNCTION, $pop[[L3]]{{$}}
 ; CHECK-NEXT: i32.const   $push[[L4:[0-9]+]]=, 0{{$}}
@@ -199,5 +199,5 @@
 ; CHECK-LABEL: .Lfoo1_bitcast:
 ; CHECK-NEXT: .functype .Lfoo1_bitcast () -> (i32)
 ; CHECK-NEXT: call        foo1@FUNCTION{{$}}
-; CHECK-NEXT: copy_local  $push0=, $0
+; CHECK-NEXT: local.copy  $push0=, $0
 ; CHECK-NEXT: end_function
diff --git a/test/CodeGen/WebAssembly/i32-load-store-alignment.ll b/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
index 4fcc425..cff7fc7 100644
--- a/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
+++ b/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
@@ -38,7 +38,7 @@
   ret i32 %v
 }
 
-; The default alignment in LLVM is the same as the defualt alignment in wasm.
+; The default alignment in LLVM is the same as the default alignment in wasm.
 
 ; CHECK-LABEL: ldi32:
 ; CHECK-NEXT: .functype ldi32 (i32) -> (i32){{$}}
@@ -142,7 +142,7 @@
   ret void
 }
 
-; The default alignment in LLVM is the same as the defualt alignment in wasm.
+; The default alignment in LLVM is the same as the default alignment in wasm.
 
 ; CHECK-LABEL: sti32:
 ; CHECK-NEXT: .functype sti32 (i32, i32) -> (){{$}}
diff --git a/test/CodeGen/WebAssembly/i32.ll b/test/CodeGen/WebAssembly/i32.ll
index 3527137..897e411 100644
--- a/test/CodeGen/WebAssembly/i32.ll
+++ b/test/CodeGen/WebAssembly/i32.ll
@@ -11,8 +11,8 @@
 
 ; CHECK-LABEL: add32:
 ; CHECK-NEXT: .functype add32 (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.add $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @add32(i32 %x, i32 %y) {
@@ -22,8 +22,8 @@
 
 ; CHECK-LABEL: sub32:
 ; CHECK-NEXT: .functype sub32 (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.sub $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @sub32(i32 %x, i32 %y) {
@@ -33,8 +33,8 @@
 
 ; CHECK-LABEL: mul32:
 ; CHECK-NEXT: .functype mul32 (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.mul $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @mul32(i32 %x, i32 %y) {
@@ -44,8 +44,8 @@
 
 ; CHECK-LABEL: sdiv32:
 ; CHECK-NEXT: .functype sdiv32 (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.div_s $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @sdiv32(i32 %x, i32 %y) {
@@ -55,8 +55,8 @@
 
 ; CHECK-LABEL: udiv32:
 ; CHECK-NEXT: .functype udiv32 (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.div_u $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @udiv32(i32 %x, i32 %y) {
@@ -66,8 +66,8 @@
 
 ; CHECK-LABEL: srem32:
 ; CHECK-NEXT: .functype srem32 (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.rem_s $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @srem32(i32 %x, i32 %y) {
@@ -77,8 +77,8 @@
 
 ; CHECK-LABEL: urem32:
 ; CHECK-NEXT: .functype urem32 (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.rem_u $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @urem32(i32 %x, i32 %y) {
@@ -88,8 +88,8 @@
 
 ; CHECK-LABEL: and32:
 ; CHECK-NEXT: .functype and32 (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.and $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @and32(i32 %x, i32 %y) {
@@ -99,8 +99,8 @@
 
 ; CHECK-LABEL: or32:
 ; CHECK-NEXT: .functype or32 (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.or $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @or32(i32 %x, i32 %y) {
@@ -110,8 +110,8 @@
 
 ; CHECK-LABEL: xor32:
 ; CHECK-NEXT: .functype xor32 (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.xor $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @xor32(i32 %x, i32 %y) {
@@ -121,8 +121,8 @@
 
 ; CHECK-LABEL: shl32:
 ; CHECK-NEXT: .functype shl32 (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.shl $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @shl32(i32 %x, i32 %y) {
@@ -132,8 +132,8 @@
 
 ; CHECK-LABEL: shr32:
 ; CHECK-NEXT: .functype shr32 (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.shr_u $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @shr32(i32 %x, i32 %y) {
@@ -143,8 +143,8 @@
 
 ; CHECK-LABEL: sar32:
 ; CHECK-NEXT: .functype sar32 (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.shr_s $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @sar32(i32 %x, i32 %y) {
@@ -154,7 +154,7 @@
 
 ; CHECK-LABEL: clz32:
 ; CHECK-NEXT: .functype clz32 (i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i32.clz $push0=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @clz32(i32 %x) {
@@ -164,7 +164,7 @@
 
 ; CHECK-LABEL: clz32_zero_undef:
 ; CHECK-NEXT: .functype clz32_zero_undef (i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i32.clz $push0=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @clz32_zero_undef(i32 %x) {
@@ -174,7 +174,7 @@
 
 ; CHECK-LABEL: ctz32:
 ; CHECK-NEXT: .functype ctz32 (i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i32.ctz $push0=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @ctz32(i32 %x) {
@@ -184,7 +184,7 @@
 
 ; CHECK-LABEL: ctz32_zero_undef:
 ; CHECK-NEXT: .functype ctz32_zero_undef (i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i32.ctz $push0=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @ctz32_zero_undef(i32 %x) {
@@ -194,7 +194,7 @@
 
 ; CHECK-LABEL: popcnt32:
 ; CHECK-NEXT: .functype popcnt32 (i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i32.popcnt $push0=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @popcnt32(i32 %x) {
@@ -204,7 +204,7 @@
 
 ; CHECK-LABEL: eqz32:
 ; CHECK-NEXT: .functype eqz32 (i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i32.eqz $push0=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @eqz32(i32 %x) {
@@ -215,8 +215,8 @@
 
 ; CHECK-LABEL: rotl:
 ; CHECK-NEXT: .functype rotl (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.rotl $push0=, $pop[[L0]], $pop[[L1]]
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @rotl(i32 %x, i32 %y) {
@@ -229,8 +229,8 @@
 
 ; CHECK-LABEL: masked_rotl:
 ; CHECK-NEXT: .functype masked_rotl (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.rotl $push0=, $pop[[L0]], $pop[[L1]]
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @masked_rotl(i32 %x, i32 %y) {
@@ -244,8 +244,8 @@
 
 ; CHECK-LABEL: rotr:
 ; CHECK-NEXT: .functype rotr (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.rotr $push0=, $pop[[L0]], $pop[[L1]]
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @rotr(i32 %x, i32 %y) {
@@ -258,8 +258,8 @@
 
 ; CHECK-LABEL: masked_rotr:
 ; CHECK-NEXT: .functype masked_rotr (i32, i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.rotr $push0=, $pop[[L0]], $pop[[L1]]
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @masked_rotr(i32 %x, i32 %y) {
diff --git a/test/CodeGen/WebAssembly/i64-load-store-alignment.ll b/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
index 32933c1..5af843b 100644
--- a/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
+++ b/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
@@ -47,7 +47,7 @@
   ret i64 %v
 }
 
-; The default alignment in LLVM is the same as the defualt alignment in wasm.
+; The default alignment in LLVM is the same as the default alignment in wasm.
 
 ; CHECK-LABEL: ldi64:
 ; CHECK-NEXT: .functype ldi64 (i32) -> (i64){{$}}
@@ -205,7 +205,7 @@
   ret void
 }
 
-; The default alignment in LLVM is the same as the defualt alignment in wasm.
+; The default alignment in LLVM is the same as the default alignment in wasm.
 
 ; CHECK-LABEL: sti64:
 ; CHECK-NEXT: .functype sti64 (i32, i64) -> (){{$}}
diff --git a/test/CodeGen/WebAssembly/i64.ll b/test/CodeGen/WebAssembly/i64.ll
index 72adf7a..9ac52f9 100644
--- a/test/CodeGen/WebAssembly/i64.ll
+++ b/test/CodeGen/WebAssembly/i64.ll
@@ -11,8 +11,8 @@
 
 ; CHECK-LABEL: add64:
 ; CHECK-NEXT: .functype add64 (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.add $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @add64(i64 %x, i64 %y) {
@@ -22,8 +22,8 @@
 
 ; CHECK-LABEL: sub64:
 ; CHECK-NEXT: .functype sub64 (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.sub $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @sub64(i64 %x, i64 %y) {
@@ -33,8 +33,8 @@
 
 ; CHECK-LABEL: mul64:
 ; CHECK-NEXT: .functype mul64 (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.mul $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @mul64(i64 %x, i64 %y) {
@@ -44,8 +44,8 @@
 
 ; CHECK-LABEL: sdiv64:
 ; CHECK-NEXT: .functype sdiv64 (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.div_s $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @sdiv64(i64 %x, i64 %y) {
@@ -55,8 +55,8 @@
 
 ; CHECK-LABEL: udiv64:
 ; CHECK-NEXT: .functype udiv64 (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.div_u $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @udiv64(i64 %x, i64 %y) {
@@ -66,8 +66,8 @@
 
 ; CHECK-LABEL: srem64:
 ; CHECK-NEXT: .functype srem64 (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.rem_s $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @srem64(i64 %x, i64 %y) {
@@ -77,8 +77,8 @@
 
 ; CHECK-LABEL: urem64:
 ; CHECK-NEXT: .functype urem64 (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.rem_u $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @urem64(i64 %x, i64 %y) {
@@ -88,8 +88,8 @@
 
 ; CHECK-LABEL: and64:
 ; CHECK-NEXT: .functype and64 (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.and $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @and64(i64 %x, i64 %y) {
@@ -99,8 +99,8 @@
 
 ; CHECK-LABEL: or64:
 ; CHECK-NEXT: .functype or64 (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.or $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @or64(i64 %x, i64 %y) {
@@ -110,8 +110,8 @@
 
 ; CHECK-LABEL: xor64:
 ; CHECK-NEXT: .functype xor64 (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.xor $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @xor64(i64 %x, i64 %y) {
@@ -121,8 +121,8 @@
 
 ; CHECK-LABEL: shl64:
 ; CHECK-NEXT: .functype shl64 (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.shl $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @shl64(i64 %x, i64 %y) {
@@ -132,8 +132,8 @@
 
 ; CHECK-LABEL: shr64:
 ; CHECK-NEXT: .functype shr64 (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.shr_u $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @shr64(i64 %x, i64 %y) {
@@ -143,8 +143,8 @@
 
 ; CHECK-LABEL: sar64:
 ; CHECK-NEXT: .functype sar64 (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.shr_s $push0=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @sar64(i64 %x, i64 %y) {
@@ -154,7 +154,7 @@
 
 ; CHECK-LABEL: clz64:
 ; CHECK-NEXT: .functype clz64 (i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i64.clz $push0=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @clz64(i64 %x) {
@@ -164,7 +164,7 @@
 
 ; CHECK-LABEL: clz64_zero_undef:
 ; CHECK-NEXT: .functype clz64_zero_undef (i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i64.clz $push0=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @clz64_zero_undef(i64 %x) {
@@ -174,7 +174,7 @@
 
 ; CHECK-LABEL: ctz64:
 ; CHECK-NEXT: .functype ctz64 (i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i64.ctz $push0=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @ctz64(i64 %x) {
@@ -184,7 +184,7 @@
 
 ; CHECK-LABEL: ctz64_zero_undef:
 ; CHECK-NEXT: .functype ctz64_zero_undef (i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i64.ctz $push0=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @ctz64_zero_undef(i64 %x) {
@@ -194,7 +194,7 @@
 
 ; CHECK-LABEL: popcnt64:
 ; CHECK-NEXT: .functype popcnt64 (i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i64.popcnt $push0=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @popcnt64(i64 %x) {
@@ -204,7 +204,7 @@
 
 ; CHECK-LABEL: eqz64:
 ; CHECK-NEXT: .functype eqz64 (i64) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i64.eqz $push0=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @eqz64(i64 %x) {
@@ -215,8 +215,8 @@
 
 ; CHECK-LABEL: rotl:
 ; CHECK-NEXT: .functype rotl (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.rotl $push0=, $pop[[L0]], $pop[[L1]]
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @rotl(i64 %x, i64 %y) {
@@ -229,8 +229,8 @@
 
 ; CHECK-LABEL: masked_rotl:
 ; CHECK-NEXT: .functype masked_rotl (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.rotl $push0=, $pop[[L0]], $pop[[L1]]
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @masked_rotl(i64 %x, i64 %y) {
@@ -244,8 +244,8 @@
 
 ; CHECK-LABEL: rotr:
 ; CHECK-NEXT: .functype rotr (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.rotr $push0=, $pop[[L0]], $pop[[L1]]
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @rotr(i64 %x, i64 %y) {
@@ -258,8 +258,8 @@
 
 ; CHECK-LABEL: masked_rotr:
 ; CHECK-NEXT: .functype masked_rotr (i64, i64) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.rotr $push0=, $pop[[L0]], $pop[[L1]]
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @masked_rotr(i64 %x, i64 %y) {
diff --git a/test/CodeGen/WebAssembly/implicit-def.ll b/test/CodeGen/WebAssembly/implicit-def.ll
index 8f7dcc8..702879c 100644
--- a/test/CodeGen/WebAssembly/implicit-def.ll
+++ b/test/CodeGen/WebAssembly/implicit-def.ll
@@ -109,8 +109,8 @@
 ; CHECK-LABEL: implicit_def_v4i32:
 ; CHECK: .LBB{{[0-9]+}}_4:{{$}}
 ; CHECK-NEXT: end_block{{$}}
-; CHECK-NEXT: v128.const $push[[R:[0-9]+]]=, 0, 0, 0, 0, 0, 0, 0, 0,
-; CHECK-SAME:                                0, 0, 0, 0, 0, 0, 0, 0{{$}}
+; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i32x4.splat $push[[R:[0-9]+]]=, $pop[[L0]]
 ; CHECK-NEXT: return $pop[[R]]{{$}}
 ; CHECK-NEXT: end_function{{$}}
 define <4 x i32> @implicit_def_v4i32() {
diff --git a/test/CodeGen/WebAssembly/inline-asm-roundtrip.ll b/test/CodeGen/WebAssembly/inline-asm-roundtrip.ll
index 037d26e..2f79cc8 100644
--- a/test/CodeGen/WebAssembly/inline-asm-roundtrip.ll
+++ b/test/CodeGen/WebAssembly/inline-asm-roundtrip.ll
@@ -9,9 +9,9 @@
 ;   int src = 1;
 ;   int dst;
 ;   asm ("i32.const\t2\n"
-;        "\tget_local\t%1\n"
+;        "\tlocal.get\t%1\n"
 ;        "\ti32.add\n"
-;        "\tset_local\t%0"
+;        "\tlocal.set\t%0"
 ;        : "=r" (dst)
 ;        : "r" (src));
 ;   return dst != 3;
@@ -24,18 +24,18 @@
 ; CHECK-NEXT:	.functype main (i32, i32) -> (i32)
 ; CHECK-NEXT:	.local  	i32
 ; CHECK-NEXT:	i32.const	1
-; CHECK-NEXT:	set_local	[[SRC:[0-9]+]]
+; CHECK-NEXT:	local.set	[[SRC:[0-9]+]]
 ; CHECK-NEXT:	i32.const	2
-; CHECK-NEXT:	get_local	[[SRC]]
+; CHECK-NEXT:	local.get	[[SRC]]
 ; CHECK-NEXT:	i32.add
-; CHECK-NEXT:	set_local	[[DST:[0-9]+]]
-; CHECK-NEXT:	get_local	[[DST]]
+; CHECK-NEXT:	local.set	[[DST:[0-9]+]]
+; CHECK-NEXT:	local.get	[[DST]]
 ; CHECK-NEXT:	i32.const	3
 ; CHECK-NEXT:	i32.ne
 
 define i32 @main(i32 %argc, i8** nocapture readnone %argv) #0 {
 entry:
-  %0 = tail call i32 asm "i32.const\092\0A\09get_local\09$1\0A\09i32.add\0A\09set_local\09$0", "=r,r"(i32 1) #1
+  %0 = tail call i32 asm "i32.const\092\0A\09local.get\09$1\0A\09i32.add\0A\09local.set\09$0", "=r,r"(i32 1) #1
   %cmp = icmp ne i32 %0, 3
   %conv = zext i1 %cmp to i32
   ret i32 %conv
diff --git a/test/CodeGen/WebAssembly/inline-asm.ll b/test/CodeGen/WebAssembly/inline-asm.ll
index 657f9a6..95c10a6 100644
--- a/test/CodeGen/WebAssembly/inline-asm.ll
+++ b/test/CodeGen/WebAssembly/inline-asm.ll
@@ -11,7 +11,7 @@
 ; CHECK-NEXT: #APP{{$}}
 ; CHECK-NEXT: # 0 = aaa(0){{$}}
 ; CHECK-NEXT: #NO_APP{{$}}
-; CHECK-NEXT: get_local $push0=, 0{{$}}
+; CHECK-NEXT: local.get $push0=, 0{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @foo(i32 %r) {
 entry:
@@ -25,7 +25,7 @@
 ; CHECK-NEXT: #APP{{$}}
 ; CHECK-NEXT: # 0 = ccc(42){{$}}
 ; CHECK-NEXT: #NO_APP{{$}}
-; CHECK-NEXT: get_local $push0=, 0{{$}}
+; CHECK-NEXT: local.get $push0=, 0{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i32 @imm() {
 entry:
@@ -38,7 +38,7 @@
 ; CHECK-NEXT: #APP{{$}}
 ; CHECK-NEXT: # 0 = aaa(0){{$}}
 ; CHECK-NEXT: #NO_APP{{$}}
-; CHECK-NEXT: get_local $push0=, 0{{$}}
+; CHECK-NEXT: local.get $push0=, 0{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i64 @foo_i64(i64 %r) {
 entry:
@@ -48,8 +48,8 @@
 
 ; CHECK-LABEL: X_i16:
 ; CHECK: foo 1{{$}}
-; CHECK: get_local $push[[S0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[S1:[0-9]+]]=, 1{{$}}
+; CHECK: local.get $push[[S0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[S1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.store16 0($pop[[S0]]), $pop[[S1]]{{$}}
 define void @X_i16(i16 * %t) {
   call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"(i16* %t)
@@ -58,8 +58,8 @@
 
 ; CHECK-LABEL: X_ptr:
 ; CHECK: foo 1{{$}}
-; CHECK: get_local $push[[S0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[S1:[0-9]+]]=, 1{{$}}
+; CHECK: local.get $push[[S0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[S1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.store 0($pop[[S0]]), $pop[[S1]]{{$}}
 define void @X_ptr(i16 ** %t) {
   call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"(i16** %t)
@@ -83,11 +83,11 @@
 
 ; CHECK-LABEL: r_constraint
 ; CHECK:      i32.const $push[[S0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: set_local [[L0:[0-9]+]], $pop[[S0]]{{$}}
+; CHECK-NEXT: local.set [[L0:[0-9]+]], $pop[[S0]]{{$}}
 ; CHECK-NEXT: i32.const $push[[S1:[0-9]+]]=, 37{{$}}
-; CHECK-NEXT: set_local [[L1:[0-9]+]], $pop[[S1]]{{$}}
+; CHECK-NEXT: local.set [[L1:[0-9]+]], $pop[[S1]]{{$}}
 ; CHECK:      foo [[L2:[0-9]+]], 1, [[L0]], [[L1]]{{$}}
-; CHECK:      get_local $push{{[0-9]+}}=, [[L2]]{{$}}
+; CHECK:      local.get $push{{[0-9]+}}=, [[L2]]{{$}}
 define hidden i32 @r_constraint(i32 %a, i32 %y) {
 entry:
   %z = bitcast i32 0 to i32
@@ -96,7 +96,7 @@
 }
 
 ; CHECK-LABEL: tied_operands
-; CHECK: get_local  $push0=, 0
+; CHECK: local.get  $push0=, 0
 ; CHECK: return    $pop0
 define i32 @tied_operands(i32 %var) {
 entry:
diff --git a/test/CodeGen/WebAssembly/irreducible-cfg-exceptions.ll b/test/CodeGen/WebAssembly/irreducible-cfg-exceptions.ll
new file mode 100644
index 0000000..00fb676
--- /dev/null
+++ b/test/CodeGen/WebAssembly/irreducible-cfg-exceptions.ll
@@ -0,0 +1,108 @@
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-block-placement -wasm-disable-explicit-locals -wasm-keep-registers -enable-emscripten-cxx-exceptions | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+declare i32 @__gxx_personality_v0(...)
+
+; Check an interesting case of complex control flow due to exceptions CFG rewriting.
+; There should *not* be any irreducible control flow here.
+
+; CHECK-LABEL: crashy:
+; CHECK-NOT: br_table
+
+; Function Attrs: minsize noinline optsize
+define void @crashy() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+entry:
+  invoke void undef()
+          to label %invoke.cont unwind label %lpad
+
+invoke.cont: ; preds = %entry
+  invoke void undef()
+          to label %invoke.cont4 unwind label %lpad3
+
+invoke.cont4: ; preds = %invoke.cont
+  %call.i82 = invoke i8* undef()
+          to label %invoke.cont6 unwind label %lpad3
+
+invoke.cont6: ; preds = %invoke.cont4
+  invoke void undef()
+          to label %invoke.cont13 unwind label %lpad12
+
+invoke.cont13: ; preds = %invoke.cont6
+  br label %for.cond
+
+for.cond: ; preds = %for.cond.backedge, %invoke.cont13
+  br i1 undef, label %exit2, label %land.lhs
+
+land.lhs: ; preds = %for.cond
+  %call.i.i.i.i92 = invoke i32 undef()
+          to label %exit1 unwind label %lpad16.loopexit
+
+exit1: ; preds = %land.lhs
+  br label %exit2
+
+exit2: ; preds = %exit1, %for.cond
+  %call.i.i12.i.i93 = invoke i32 undef()
+          to label %exit3 unwind label %lpad16.loopexit
+
+exit3: ; preds = %exit2
+  invoke void undef()
+          to label %invoke.cont23 unwind label %lpad22
+
+invoke.cont23: ; preds = %exit3
+  invoke void undef()
+          to label %invoke.cont25 unwind label %lpad22
+
+invoke.cont25: ; preds = %invoke.cont23
+  %call.i.i137 = invoke i32 undef()
+          to label %invoke.cont29 unwind label %lpad16.loopexit
+
+lpad: ; preds = %entry
+  %0 = landingpad { i8*, i32 }
+          cleanup
+  unreachable
+
+lpad3: ; preds = %invoke.cont4, %invoke.cont
+  %1 = landingpad { i8*, i32 }
+          cleanup
+  unreachable
+
+lpad12: ; preds = %invoke.cont6
+  %2 = landingpad { i8*, i32 }
+          cleanup
+  resume { i8*, i32 } undef
+
+lpad16.loopexit: ; preds = %if.then, %invoke.cont29, %invoke.cont25, %exit2, %land.lhs
+  %lpad.loopexit = landingpad { i8*, i32 }
+          cleanup
+  unreachable
+
+lpad22: ; preds = %invoke.cont23, %exit3
+  %3 = landingpad { i8*, i32 }
+          cleanup
+  unreachable
+
+invoke.cont29: ; preds = %invoke.cont25
+  invoke void undef()
+          to label %invoke.cont33 unwind label %lpad16.loopexit
+
+invoke.cont33: ; preds = %invoke.cont29
+  br label %for.inc
+
+for.inc: ; preds = %invoke.cont33
+  %cmp.i.i141 = icmp eq i8* undef, undef
+  br i1 %cmp.i.i141, label %if.then, label %if.end.i.i146
+
+if.then: ; preds = %for.inc
+  %call.i.i148 = invoke i32 undef()
+          to label %for.cond.backedge unwind label %lpad16.loopexit
+
+for.cond.backedge: ; preds = %if.end.i.i146, %if.then
+  br label %for.cond
+
+if.end.i.i146: ; preds = %for.inc
+  call void undef()
+  br label %for.cond.backedge
+}
+
diff --git a/test/CodeGen/WebAssembly/irreducible-cfg-nested.ll b/test/CodeGen/WebAssembly/irreducible-cfg-nested.ll
new file mode 100644
index 0000000..5677709
--- /dev/null
+++ b/test/CodeGen/WebAssembly/irreducible-cfg-nested.ll
@@ -0,0 +1,63 @@
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-block-placement -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+
+; Test an interesting pattern of nested irreducibility.
+; Just check we resolve all the irreducibility here (if not we'd crash).
+
+; CHECK-LABEL: tre_parse:
+
+define void @tre_parse() {
+entry:
+  br label %for.cond.outer
+
+for.cond.outer:                                   ; preds = %do.body14, %entry
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.cond.backedge, %for.cond.outer
+  %nbranch.0 = phi i32* [ null, %for.cond.outer ], [ %call188, %for.cond.backedge ]
+  switch i8 undef, label %if.else [
+    i8 40, label %do.body14
+    i8 41, label %if.then63
+  ]
+
+do.body14:                                        ; preds = %for.cond
+  br label %for.cond.outer
+
+if.then63:                                        ; preds = %for.cond
+  unreachable
+
+if.else:                                          ; preds = %for.cond
+  switch i8 undef, label %if.then84 [
+    i8 92, label %if.end101
+    i8 42, label %if.end101
+  ]
+
+if.then84:                                        ; preds = %if.else
+  switch i8 undef, label %cleanup.thread [
+    i8 43, label %if.end101
+    i8 63, label %if.end101
+    i8 123, label %if.end101
+  ]
+
+if.end101:                                        ; preds = %if.then84, %if.then84, %if.then84, %if.else, %if.else
+  unreachable
+
+cleanup.thread:                                   ; preds = %if.then84
+  %call188 = tail call i32* undef(i32* %nbranch.0)
+  switch i8 undef, label %for.cond.backedge [
+    i8 92, label %land.lhs.true208
+    i8 0, label %if.else252
+  ]
+
+land.lhs.true208:                                 ; preds = %cleanup.thread
+  unreachable
+
+for.cond.backedge:                                ; preds = %cleanup.thread
+  br label %for.cond
+
+if.else252:                                       ; preds = %cleanup.thread
+  unreachable
+}
diff --git a/test/CodeGen/WebAssembly/irreducible-cfg-nested2.ll b/test/CodeGen/WebAssembly/irreducible-cfg-nested2.ll
new file mode 100644
index 0000000..3dc0cd1
--- /dev/null
+++ b/test/CodeGen/WebAssembly/irreducible-cfg-nested2.ll
@@ -0,0 +1,39 @@
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-block-placement -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+; Test an interesting pattern of nested irreducibility.
+; Just check we resolve all the irreducibility here (if not we'd crash).
+
+; CHECK-LABEL: func_2:
+
+; Function Attrs: noinline nounwind optnone
+define void @func_2() {
+entry:
+  br i1 undef, label %lbl_937, label %if.else787
+
+lbl_937:                                          ; preds = %for.body978, %entry
+  br label %if.end965
+
+if.else787:                                       ; preds = %entry
+  br label %if.end965
+
+if.end965:                                        ; preds = %if.else787, %lbl_937
+  br label %for.cond967
+
+for.cond967:                                      ; preds = %for.end1035, %if.end965
+  br label %for.cond975
+
+for.cond975:                                      ; preds = %if.end984, %for.cond967
+  br i1 undef, label %for.body978, label %for.end1035
+
+for.body978:                                      ; preds = %for.cond975
+  br i1 undef, label %lbl_937, label %if.end984
+
+if.end984:                                        ; preds = %for.body978
+  br label %for.cond975
+
+for.end1035:                                      ; preds = %for.cond975
+  br label %for.cond967
+}
diff --git a/test/CodeGen/WebAssembly/irreducible-cfg.ll b/test/CodeGen/WebAssembly/irreducible-cfg.ll
index d289d9f..8eb0a6f 100644
--- a/test/CodeGen/WebAssembly/irreducible-cfg.ll
+++ b/test/CodeGen/WebAssembly/irreducible-cfg.ll
@@ -9,7 +9,7 @@
 
 ; CHECK-LABEL: test0:
 ; CHECK: f64.load
-; CHECK: i32.const $[[REG:[^,]+]]=, 0{{$}}
+; CHECK: i32.const $[[REG:[^,]+]]=
 ; CHECK: br_table  $[[REG]],
 define void @test0(double* %arg, i32 %arg1, i32 %arg2, i32 %arg3) {
 bb:
@@ -50,7 +50,7 @@
 
 ; CHECK-LABEL: test1:
 ; CHECK: f64.load
-; CHECK: i32.const $[[REG:[^,]+]]=, 0{{$}}
+; CHECK: i32.const $[[REG:[^,]+]]=
 ; CHECK: br_table  $[[REG]],
 define void @test1(double* %arg, i32 %arg1, i32 %arg2, i32 %arg3) {
 bb:
@@ -92,3 +92,128 @@
 bb19:
   ret void
 }
+
+; A simple loop 2 blocks that are both entries.
+
+; CHECK-LABEL: test2:
+; CHECK: br_if
+; CHECK: i32.const $[[REG:[^,]+]]=
+; CHECK: br_table  $[[REG]],
+define internal i32 @test2(i32) noinline {
+entry:
+  br label %A0
+
+A0:
+  %a0a = tail call i32 @test2(i32 1)
+  %a0b = icmp eq i32 %a0a, 0
+  br i1 %a0b, label %A1, label %A2
+
+A1:
+  %a1a = tail call i32 @test2(i32 2)
+  %a1b = icmp eq i32 %a1a, 0
+  br i1 %a1b, label %A1, label %A2
+
+A2:
+  %a2a = tail call i32 @test2(i32 3)
+  %a2b = icmp eq i32 %a2a, 0
+  br i1 %a2b, label %A1, label %A2
+}
+
+; An interesting loop with inner loop and if-else structure too.
+
+; CHECK-LABEL: test3:
+; CHECK: br_if
+define void @test3(i32 %ws) {
+entry:
+  %ws.addr = alloca i32, align 4
+  store volatile i32 %ws, i32* %ws.addr, align 4
+  %0 = load volatile i32, i32* %ws.addr, align 4
+  %tobool = icmp ne i32 %0, 0
+  br i1 %tobool, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  br label %wynn
+
+if.end:                                           ; preds = %entry
+  %1 = load volatile i32, i32* %ws.addr, align 4
+  %tobool1 = icmp ne i32 %1, 0
+  br i1 %tobool1, label %if.end9, label %if.then2
+
+if.then2:                                         ; preds = %if.end
+  br label %for.cond
+
+for.cond:                                         ; preds = %wynn, %if.then7, %if.then2
+  %2 = load volatile i32, i32* %ws.addr, align 4
+  %tobool3 = icmp ne i32 %2, 0
+  br i1 %tobool3, label %if.then4, label %if.end5
+
+if.then4:                                         ; preds = %for.cond
+  br label %if.end5
+
+if.end5:                                          ; preds = %if.then4, %for.cond
+  %3 = load volatile i32, i32* %ws.addr, align 4
+  %tobool6 = icmp ne i32 %3, 0
+  br i1 %tobool6, label %if.then7, label %if.end8
+
+if.then7:                                         ; preds = %if.end5
+  br label %for.cond
+
+if.end8:                                          ; preds = %if.end5
+  br label %wynn
+
+wynn:                                             ; preds = %if.end8, %if.then
+  br label %for.cond
+
+if.end9:                                          ; preds = %if.end
+  ret void
+}
+
+; Multi-level irreducibility, after reducing in the main scope we must then
+; reduce in the inner loop that we just created.
+; CHECK: br_table
+; CHECK: br_table
+define void @pi_next() {
+entry:
+  br i1 undef, label %sw.bb5, label %return
+
+sw.bb5:                                           ; preds = %entry
+  br i1 undef, label %if.then.i49, label %if.else.i52
+
+if.then.i49:                                      ; preds = %sw.bb5
+  br label %for.inc197.i
+
+if.else.i52:                                      ; preds = %sw.bb5
+  br label %for.cond57.i
+
+for.cond57.i:                                     ; preds = %for.inc205.i, %if.else.i52
+  store i32 0, i32* undef, align 4
+  br label %for.cond65.i
+
+for.cond65.i:                                     ; preds = %for.inc201.i, %for.cond57.i
+  br i1 undef, label %for.body70.i, label %for.inc205.i
+
+for.body70.i:                                     ; preds = %for.cond65.i
+  br label %for.cond76.i
+
+for.cond76.i:                                     ; preds = %for.inc197.i, %for.body70.i
+  %0 = phi i32 [ %inc199.i, %for.inc197.i ], [ 0, %for.body70.i ]
+  %cmp81.i = icmp slt i32 %0, 0
+  br i1 %cmp81.i, label %for.body82.i, label %for.inc201.i
+
+for.body82.i:                                     ; preds = %for.cond76.i
+  br label %for.inc197.i
+
+for.inc197.i:                                     ; preds = %for.body82.i, %if.then.i49
+  %inc199.i = add nsw i32 undef, 1
+  br label %for.cond76.i
+
+for.inc201.i:                                     ; preds = %for.cond76.i
+  br label %for.cond65.i
+
+for.inc205.i:                                     ; preds = %for.cond65.i
+  br label %for.cond57.i
+
+return:                                           ; preds = %entry
+  ret void
+}
+
diff --git a/test/CodeGen/WebAssembly/legalize.ll b/test/CodeGen/WebAssembly/legalize.ll
index ea6dabf..24b8425 100644
--- a/test/CodeGen/WebAssembly/legalize.ll
+++ b/test/CodeGen/WebAssembly/legalize.ll
@@ -34,7 +34,7 @@
 
 ; CHECK-LABEL: fpext_f32_f64:
 ; CHECK: f32.load $push0=, 0($0){{$}}
-; CHECK: f64.promote/f32 $push1=, $pop0{{$}}
+; CHECK: f64.promote_f32 $push1=, $pop0{{$}}
 ; CHECK: return $pop1{{$}}
 define double @fpext_f32_f64(float *%p) {
   %v = load float, float* %p
@@ -44,7 +44,7 @@
 
 ; CHECK-LABEL: fpconv_f64_f32:
 ; CHECK: f64.load $push0=, 0($0){{$}}
-; CHECK: f32.demote/f64 $push1=, $pop0{{$}}
+; CHECK: f32.demote_f64 $push1=, $pop0{{$}}
 ; CHECK: return $pop1{{$}}
 define float @fpconv_f64_f32(double *%p) {
   %v = load double, double* %p
diff --git a/test/CodeGen/WebAssembly/load-ext-atomic.ll b/test/CodeGen/WebAssembly/load-ext-atomic.ll
index 891c161..a6d7d4f 100644
--- a/test/CodeGen/WebAssembly/load-ext-atomic.ll
+++ b/test/CodeGen/WebAssembly/load-ext-atomic.ll
@@ -84,7 +84,7 @@
 
 ; CHECK-LABEL: sext_i32_i64:
 ; CHECK: i32.atomic.load $push0=, 0($0){{$}}
-; CHECK: i64.extend_s/i32 $push1=, $pop0{{$}}
+; CHECK: i64.extend_i32_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i64 @sext_i32_i64(i32 *%p) {
   %v = load atomic i32, i32* %p seq_cst, align 4
diff --git a/test/CodeGen/WebAssembly/load.ll b/test/CodeGen/WebAssembly/load.ll
index 1878c1c..b06bef4 100644
--- a/test/CodeGen/WebAssembly/load.ll
+++ b/test/CodeGen/WebAssembly/load.ll
@@ -8,7 +8,7 @@
 
 ; CHECK-LABEL: ldi32:
 ; CHECK-NEXT: .functype ldi32 (i32) -> (i32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i32 @ldi32(i32 *%p) {
@@ -18,7 +18,7 @@
 
 ; CHECK-LABEL: ldi64:
 ; CHECK-NEXT: .functype ldi64 (i32) -> (i64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i64 @ldi64(i64 *%p) {
@@ -28,7 +28,7 @@
 
 ; CHECK-LABEL: ldf32:
 ; CHECK-NEXT: .functype ldf32 (i32) -> (f32){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: f32.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define float @ldf32(float *%p) {
@@ -38,7 +38,7 @@
 
 ; CHECK-LABEL: ldf64:
 ; CHECK-NEXT: .functype ldf64 (i32) -> (f64){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: f64.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define double @ldf64(double *%p) {
diff --git a/test/CodeGen/WebAssembly/memory-addr32.ll b/test/CodeGen/WebAssembly/memory-addr32.ll
index 0eb886f..26e9d48 100644
--- a/test/CodeGen/WebAssembly/memory-addr32.ll
+++ b/test/CodeGen/WebAssembly/memory-addr32.ll
@@ -7,10 +7,6 @@
 
 declare i32 @llvm.wasm.memory.size.i32(i32) nounwind readonly
 declare i32 @llvm.wasm.memory.grow.i32(i32, i32) nounwind
-declare i32 @llvm.wasm.mem.size.i32(i32) nounwind readonly
-declare i32 @llvm.wasm.mem.grow.i32(i32, i32) nounwind
-declare i32 @llvm.wasm.current.memory.i32() nounwind readonly
-declare i32 @llvm.wasm.grow.memory.i32(i32) nounwind
 
 ; CHECK-LABEL: memory_size:
 ; CHECK-NEXT: .functype memory_size () -> (i32){{$}}
@@ -29,39 +25,3 @@
   %a = call i32 @llvm.wasm.memory.grow.i32(i32 0, i32 %n)
   ret i32 %a
 }
-
-; CHECK-LABEL: mem_size:
-; CHECK-NEXT: .functype mem_size () -> (i32){{$}}
-; CHECK-NEXT: mem.size $push0=, 0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
-define i32 @mem_size() {
-  %a = call i32 @llvm.wasm.mem.size.i32(i32 0)
-  ret i32 %a
-}
-
-; CHECK-LABEL: mem_grow:
-; CHECK-NEXT: .functype mem_grow (i32) -> (i32){{$}}
-; CHECK: mem.grow $push0=, 0, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
-define i32 @mem_grow(i32 %n) {
-  %a = call i32 @llvm.wasm.mem.grow.i32(i32 0, i32 %n)
-  ret i32 %a
-}
-
-; CHECK-LABEL: current_memory:
-; CHECK-NEXT: .functype current_memory () -> (i32){{$}}
-; CHECK-NEXT: current_memory $push0={{$}}
-; CHECK-NEXT: return $pop0{{$}}
-define i32 @current_memory() {
-  %a = call i32 @llvm.wasm.current.memory.i32()
-  ret i32 %a
-}
-
-; CHECK-LABEL: grow_memory:
-; CHECK-NEXT: .functype grow_memory (i32) -> (i32){{$}}
-; CHECK: grow_memory $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
-define i32 @grow_memory(i32 %n) {
-  %a = call i32 @llvm.wasm.grow.memory.i32(i32 %n)
-  ret i32 %a
-}
diff --git a/test/CodeGen/WebAssembly/negative-base-reg.ll b/test/CodeGen/WebAssembly/negative-base-reg.ll
index 0be276f..7408e14 100644
--- a/test/CodeGen/WebAssembly/negative-base-reg.ll
+++ b/test/CodeGen/WebAssembly/negative-base-reg.ll
@@ -11,7 +11,7 @@
 ; If LSR stops selecting a negative base reg value, then this test will no
 ; longer be useful as written.
 ; CHECK:      i32.const $push[[L0:[0-9]+]]=, -128
-; CHECK-NEXT: set_local 0, $pop[[L0]]
+; CHECK-NEXT: local.set 0, $pop[[L0]]
 entry:
   br label %for.body
 
diff --git a/test/CodeGen/WebAssembly/offset-atomics.ll b/test/CodeGen/WebAssembly/offset-atomics.ll
index 6406a51..6884b6a 100644
--- a/test/CodeGen/WebAssembly/offset-atomics.ll
+++ b/test/CodeGen/WebAssembly/offset-atomics.ll
@@ -363,10 +363,10 @@
   ret i32 %u
 }
 
-; 32->64 sext load gets selected as i32.atomic.load, i64_extend_s/i32
+; 32->64 sext load gets selected as i32.atomic.load, i64.extend_i32_s
 ; CHECK-LABEL: load_i32_i64_s_with_folded_offset:
 ; CHECK: i32.atomic.load $push0=, 24($0){{$}}
-; CHECK-NEXT: i64.extend_s/i32 $push1=, $pop0{{$}}
+; CHECK-NEXT: i64.extend_i32_s $push1=, $pop0{{$}}
 define i64 @load_i32_i64_s_with_folded_offset(i32* %p) {
   %q = ptrtoint i32* %p to i32
   %r = add nuw i32 %q, 24
@@ -832,7 +832,7 @@
 ; Fold an offset into a sign-extending rmw.
 
 ; CHECK-LABEL: rmw_add_i8_i32_s_with_folded_offset:
-; CHECK: i32.atomic.rmw8_u.add $push0=, 24($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.add_u $push0=, 24($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
 define i32 @rmw_add_i8_i32_s_with_folded_offset(i8* %p, i32 %v) {
   %q = ptrtoint i8* %p to i32
@@ -844,11 +844,11 @@
   ret i32 %u
 }
 
-; 32->64 sext rmw gets selected as i32.atomic.rmw.add, i64_extend_s/i32
+; 32->64 sext rmw gets selected as i32.atomic.rmw.add, i64.extend_i32_s
 ; CHECK-LABEL: rmw_add_i32_i64_s_with_folded_offset:
-; CHECK: i32.wrap/i64 $push0=, $1
+; CHECK: i32.wrap_i64 $push0=, $1
 ; CHECK-NEXT: i32.atomic.rmw.add $push1=, 24($0), $pop0{{$}}
-; CHECK-NEXT: i64.extend_s/i32 $push2=, $pop1{{$}}
+; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
 define i64 @rmw_add_i32_i64_s_with_folded_offset(i32* %p, i64 %v) {
   %q = ptrtoint i32* %p to i32
   %r = add nuw i32 %q, 24
@@ -862,7 +862,7 @@
 ; Fold a gep offset into a sign-extending rmw.
 
 ; CHECK-LABEL: rmw_add_i8_i32_s_with_folded_gep_offset:
-; CHECK: i32.atomic.rmw8_u.add $push0=, 24($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.add_u $push0=, 24($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
 define i32 @rmw_add_i8_i32_s_with_folded_gep_offset(i8* %p, i32 %v) {
   %s = getelementptr inbounds i8, i8* %p, i32 24
@@ -873,7 +873,7 @@
 }
 
 ; CHECK-LABEL: rmw_add_i16_i32_s_with_folded_gep_offset:
-; CHECK: i32.atomic.rmw16_u.add $push0=, 48($0), $1{{$}}
+; CHECK: i32.atomic.rmw16.add_u $push0=, 48($0), $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0
 define i32 @rmw_add_i16_i32_s_with_folded_gep_offset(i16* %p, i32 %v) {
   %s = getelementptr inbounds i16, i16* %p, i32 24
@@ -884,7 +884,7 @@
 }
 
 ; CHECK-LABEL: rmw_add_i16_i64_s_with_folded_gep_offset:
-; CHECK: i64.atomic.rmw16_u.add $push0=, 48($0), $1{{$}}
+; CHECK: i64.atomic.rmw16.add_u $push0=, 48($0), $1{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0
 define i64 @rmw_add_i16_i64_s_with_folded_gep_offset(i16* %p, i64 %v) {
   %s = getelementptr inbounds i16, i16* %p, i32 24
@@ -898,7 +898,7 @@
 ; an 'add' if the or'ed bits are known to be zero.
 
 ; CHECK-LABEL: rmw_add_i8_i32_s_with_folded_or_offset:
-; CHECK: i32.atomic.rmw8_u.add $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1{{$}}
+; CHECK: i32.atomic.rmw8.add_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
 define i32 @rmw_add_i8_i32_s_with_folded_or_offset(i32 %x, i32 %v) {
   %and = and i32 %x, -4
@@ -911,7 +911,7 @@
 }
 
 ; CHECK-LABEL: rmw_add_i8_i64_s_with_folded_or_offset:
-; CHECK: i64.atomic.rmw8_u.add $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1{{$}}
+; CHECK: i64.atomic.rmw8.add_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1{{$}}
 ; CHECK-NEXT: i64.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
 define i64 @rmw_add_i8_i64_s_with_folded_or_offset(i32 %x, i64 %v) {
   %and = and i32 %x, -4
@@ -927,7 +927,7 @@
 
 ; CHECK-LABEL: rmw_add_i16_i32_s_from_numeric_address
 ; CHECK: i32.const $push0=, 0{{$}}
-; CHECK: i32.atomic.rmw16_u.add $push1=, 42($pop0), $0{{$}}
+; CHECK: i32.atomic.rmw16.add_u $push1=, 42($pop0), $0{{$}}
 ; CHECK-NEXT: i32.extend16_s $push2=, $pop1
 define i32 @rmw_add_i16_i32_s_from_numeric_address(i32 %v) {
   %s = inttoptr i32 42 to i16*
@@ -939,7 +939,7 @@
 
 ; CHECK-LABEL: rmw_add_i8_i32_s_from_global_address
 ; CHECK: i32.const $push0=, 0{{$}}
-; CHECK: i32.atomic.rmw8_u.add $push1=, gv8($pop0), $0{{$}}
+; CHECK: i32.atomic.rmw8.add_u $push1=, gv8($pop0), $0{{$}}
 ; CHECK-NEXT: i32.extend8_s $push2=, $pop1{{$}}
 define i32 @rmw_add_i8_i32_s_from_global_address(i32 %v) {
   %t = trunc i32 %v to i8
@@ -955,7 +955,7 @@
 ; Fold an offset into a zero-extending rmw.
 
 ; CHECK-LABEL: rmw_add_i8_i32_z_with_folded_offset:
-; CHECK: i32.atomic.rmw8_u.add $push0=, 24($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.add_u $push0=, 24($0), $1{{$}}
 define i32 @rmw_add_i8_i32_z_with_folded_offset(i8* %p, i32 %v) {
   %q = ptrtoint i8* %p to i32
   %r = add nuw i32 %q, 24
@@ -967,7 +967,7 @@
 }
 
 ; CHECK-LABEL: rmw_add_i32_i64_z_with_folded_offset:
-; CHECK: i64.atomic.rmw32_u.add $push0=, 24($0), $1{{$}}
+; CHECK: i64.atomic.rmw32.add_u $push0=, 24($0), $1{{$}}
 define i64 @rmw_add_i32_i64_z_with_folded_offset(i32* %p, i64 %v) {
   %q = ptrtoint i32* %p to i32
   %r = add nuw i32 %q, 24
@@ -981,7 +981,7 @@
 ; Fold a gep offset into a zero-extending rmw.
 
 ; CHECK-LABEL: rmw_add_i8_i32_z_with_folded_gep_offset:
-; CHECK: i32.atomic.rmw8_u.add $push0=, 24($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.add_u $push0=, 24($0), $1{{$}}
 define i32 @rmw_add_i8_i32_z_with_folded_gep_offset(i8* %p, i32 %v) {
   %s = getelementptr inbounds i8, i8* %p, i32 24
   %t = trunc i32 %v to i8
@@ -991,7 +991,7 @@
 }
 
 ; CHECK-LABEL: rmw_add_i16_i32_z_with_folded_gep_offset:
-; CHECK: i32.atomic.rmw16_u.add $push0=, 48($0), $1{{$}}
+; CHECK: i32.atomic.rmw16.add_u $push0=, 48($0), $1{{$}}
 define i32 @rmw_add_i16_i32_z_with_folded_gep_offset(i16* %p, i32 %v) {
   %s = getelementptr inbounds i16, i16* %p, i32 24
   %t = trunc i32 %v to i16
@@ -1001,7 +1001,7 @@
 }
 
 ; CHECK-LABEL: rmw_add_i16_i64_z_with_folded_gep_offset:
-; CHECK: i64.atomic.rmw16_u.add $push0=, 48($0), $1{{$}}
+; CHECK: i64.atomic.rmw16.add_u $push0=, 48($0), $1{{$}}
 define i64 @rmw_add_i16_i64_z_with_folded_gep_offset(i16* %p, i64 %v) {
   %s = getelementptr inbounds i16, i16* %p, i32 24
   %t = trunc i64 %v to i16
@@ -1014,7 +1014,7 @@
 ; an 'add' if the or'ed bits are known to be zero.
 
 ; CHECK-LABEL: rmw_add_i8_i32_z_with_folded_or_offset:
-; CHECK: i32.atomic.rmw8_u.add $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1{{$}}
+; CHECK: i32.atomic.rmw8.add_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1{{$}}
 define i32 @rmw_add_i8_i32_z_with_folded_or_offset(i32 %x, i32 %v) {
   %and = and i32 %x, -4
   %t0 = inttoptr i32 %and to i8*
@@ -1026,7 +1026,7 @@
 }
 
 ; CHECK-LABEL: rmw_add_i8_i64_z_with_folded_or_offset:
-; CHECK: i64.atomic.rmw8_u.add $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1{{$}}
+; CHECK: i64.atomic.rmw8.add_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1{{$}}
 define i64 @rmw_add_i8_i64_z_with_folded_or_offset(i32 %x, i64 %v) {
   %and = and i32 %x, -4
   %t0 = inttoptr i32 %and to i8*
@@ -1041,7 +1041,7 @@
 
 ; CHECK-LABEL: rmw_add_i16_i32_z_from_numeric_address
 ; CHECK: i32.const $push0=, 0{{$}}
-; CHECK: i32.atomic.rmw16_u.add $push1=, 42($pop0), $0{{$}}
+; CHECK: i32.atomic.rmw16.add_u $push1=, 42($pop0), $0{{$}}
 define i32 @rmw_add_i16_i32_z_from_numeric_address(i32 %v) {
   %s = inttoptr i32 42 to i16*
   %t = trunc i32 %v to i16
@@ -1052,7 +1052,7 @@
 
 ; CHECK-LABEL: rmw_add_i8_i32_z_from_global_address
 ; CHECK: i32.const $push0=, 0{{$}}
-; CHECK: i32.atomic.rmw8_u.add $push1=, gv8($pop0), $0{{$}}
+; CHECK: i32.atomic.rmw8.add_u $push1=, gv8($pop0), $0{{$}}
 define i32 @rmw_add_i8_i32_z_from_global_address(i32 %v) {
   %t = trunc i32 %v to i8
   %old = atomicrmw add i8* @gv8, i8 %t seq_cst
@@ -1063,7 +1063,7 @@
 ; i8 return value should test anyext RMWs
 
 ; CHECK-LABEL: rmw_add_i8_i32_retvalue:
-; CHECK: i32.atomic.rmw8_u.add $push0=, 0($0), $1{{$}}
+; CHECK: i32.atomic.rmw8.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
 define i8 @rmw_add_i8_i32_retvalue(i8 *%p, i32 %v) {
   %t = trunc i32 %v to i8
@@ -1261,7 +1261,7 @@
 ; Fold an offset into a sign-extending rmw.
 
 ; CHECK-LABEL: cmpxchg_i8_i32_s_with_folded_offset:
-; CHECK: i32.atomic.rmw8_u.cmpxchg $push0=, 24($0), $1, $2{{$}}
+; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
 define i32 @cmpxchg_i8_i32_s_with_folded_offset(i8* %p, i32 %exp, i32 %new) {
   %q = ptrtoint i8* %p to i32
@@ -1275,12 +1275,12 @@
   ret i32 %u
 }
 
-; 32->64 sext rmw gets selected as i32.atomic.rmw.cmpxchg, i64_extend_s/i32
+; 32->64 sext rmw gets selected as i32.atomic.rmw.cmpxchg, i64.extend_i32_s
 ; CHECK-LABEL: cmpxchg_i32_i64_s_with_folded_offset:
-; CHECK: i32.wrap/i64 $push1=, $1
-; CHECK-NEXT: i32.wrap/i64 $push0=, $2
+; CHECK: i32.wrap_i64 $push1=, $1
+; CHECK-NEXT: i32.wrap_i64 $push0=, $2
 ; CHECK-NEXT: i32.atomic.rmw.cmpxchg $push2=, 24($0), $pop1, $pop0{{$}}
-; CHECK-NEXT: i64.extend_s/i32 $push3=, $pop2{{$}}
+; CHECK-NEXT: i64.extend_i32_s $push3=, $pop2{{$}}
 define i64 @cmpxchg_i32_i64_s_with_folded_offset(i32* %p, i64 %exp, i64 %new) {
   %q = ptrtoint i32* %p to i32
   %r = add nuw i32 %q, 24
@@ -1296,7 +1296,7 @@
 ; Fold a gep offset into a sign-extending rmw.
 
 ; CHECK-LABEL: cmpxchg_i8_i32_s_with_folded_gep_offset:
-; CHECK: i32.atomic.rmw8_u.cmpxchg $push0=, 24($0), $1, $2{{$}}
+; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
 define i32 @cmpxchg_i8_i32_s_with_folded_gep_offset(i8* %p, i32 %exp, i32 %new) {
   %s = getelementptr inbounds i8, i8* %p, i32 24
@@ -1309,7 +1309,7 @@
 }
 
 ; CHECK-LABEL: cmpxchg_i16_i32_s_with_folded_gep_offset:
-; CHECK: i32.atomic.rmw16_u.cmpxchg $push0=, 48($0), $1, $2{{$}}
+; CHECK: i32.atomic.rmw16.cmpxchg_u $push0=, 48($0), $1, $2{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0
 define i32 @cmpxchg_i16_i32_s_with_folded_gep_offset(i16* %p, i32 %exp, i32 %new) {
   %s = getelementptr inbounds i16, i16* %p, i32 24
@@ -1322,7 +1322,7 @@
 }
 
 ; CHECK-LABEL: cmpxchg_i16_i64_s_with_folded_gep_offset:
-; CHECK: i64.atomic.rmw16_u.cmpxchg $push0=, 48($0), $1, $2{{$}}
+; CHECK: i64.atomic.rmw16.cmpxchg_u $push0=, 48($0), $1, $2{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0
 define i64 @cmpxchg_i16_i64_s_with_folded_gep_offset(i16* %p, i64 %exp, i64 %new) {
   %s = getelementptr inbounds i16, i16* %p, i32 24
@@ -1338,7 +1338,7 @@
 ; an 'add' if the or'ed bits are known to be zero.
 
 ; CHECK-LABEL: cmpxchg_i8_i32_s_with_folded_or_offset:
-; CHECK: i32.atomic.rmw8_u.cmpxchg $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1, $2{{$}}
+; CHECK: i32.atomic.rmw8.cmpxchg_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1, $2{{$}}
 ; CHECK-NEXT: i32.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
 define i32 @cmpxchg_i8_i32_s_with_folded_or_offset(i32 %x, i32 %exp, i32 %new) {
   %and = and i32 %x, -4
@@ -1353,7 +1353,7 @@
 }
 
 ; CHECK-LABEL: cmpxchg_i8_i64_s_with_folded_or_offset:
-; CHECK: i64.atomic.rmw8_u.cmpxchg $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1, $2{{$}}
+; CHECK: i64.atomic.rmw8.cmpxchg_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1, $2{{$}}
 ; CHECK-NEXT: i64.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
 define i64 @cmpxchg_i8_i64_s_with_folded_or_offset(i32 %x, i64 %exp, i64 %new) {
   %and = and i32 %x, -4
@@ -1371,7 +1371,7 @@
 
 ; CHECK-LABEL: cmpxchg_i16_i32_s_from_numeric_address
 ; CHECK: i32.const $push0=, 0{{$}}
-; CHECK: i32.atomic.rmw16_u.cmpxchg $push1=, 42($pop0), $0, $1{{$}}
+; CHECK: i32.atomic.rmw16.cmpxchg_u $push1=, 42($pop0), $0, $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push2=, $pop1
 define i32 @cmpxchg_i16_i32_s_from_numeric_address(i32 %exp, i32 %new) {
   %s = inttoptr i32 42 to i16*
@@ -1385,7 +1385,7 @@
 
 ; CHECK-LABEL: cmpxchg_i8_i32_s_from_global_address
 ; CHECK: i32.const $push0=, 0{{$}}
-; CHECK: i32.atomic.rmw8_u.cmpxchg $push1=, gv8($pop0), $0, $1{{$}}
+; CHECK: i32.atomic.rmw8.cmpxchg_u $push1=, gv8($pop0), $0, $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push2=, $pop1{{$}}
 define i32 @cmpxchg_i8_i32_s_from_global_address(i32 %exp, i32 %new) {
   %exp_t = trunc i32 %exp to i8
@@ -1403,7 +1403,7 @@
 ; Fold an offset into a sign-extending rmw.
 
 ; CHECK-LABEL: cmpxchg_i8_i32_z_with_folded_offset:
-; CHECK: i32.atomic.rmw8_u.cmpxchg $push0=, 24($0), $1, $2{{$}}
+; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
 define i32 @cmpxchg_i8_i32_z_with_folded_offset(i8* %p, i32 %exp, i32 %new) {
   %q = ptrtoint i8* %p to i32
   %r = add nuw i32 %q, 24
@@ -1417,7 +1417,7 @@
 }
 
 ; CHECK-LABEL: cmpxchg_i32_i64_z_with_folded_offset:
-; CHECK: i64.atomic.rmw32_u.cmpxchg $push0=, 24($0), $1, $2{{$}}
+; CHECK: i64.atomic.rmw32.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
 define i64 @cmpxchg_i32_i64_z_with_folded_offset(i32* %p, i64 %exp, i64 %new) {
   %q = ptrtoint i32* %p to i32
   %r = add nuw i32 %q, 24
@@ -1433,7 +1433,7 @@
 ; Fold a gep offset into a sign-extending rmw.
 
 ; CHECK-LABEL: cmpxchg_i8_i32_z_with_folded_gep_offset:
-; CHECK: i32.atomic.rmw8_u.cmpxchg $push0=, 24($0), $1, $2{{$}}
+; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
 define i32 @cmpxchg_i8_i32_z_with_folded_gep_offset(i8* %p, i32 %exp, i32 %new) {
   %s = getelementptr inbounds i8, i8* %p, i32 24
   %exp_t = trunc i32 %exp to i8
@@ -1445,7 +1445,7 @@
 }
 
 ; CHECK-LABEL: cmpxchg_i16_i32_z_with_folded_gep_offset:
-; CHECK: i32.atomic.rmw16_u.cmpxchg $push0=, 48($0), $1, $2{{$}}
+; CHECK: i32.atomic.rmw16.cmpxchg_u $push0=, 48($0), $1, $2{{$}}
 define i32 @cmpxchg_i16_i32_z_with_folded_gep_offset(i16* %p, i32 %exp, i32 %new) {
   %s = getelementptr inbounds i16, i16* %p, i32 24
   %exp_t = trunc i32 %exp to i16
@@ -1457,7 +1457,7 @@
 }
 
 ; CHECK-LABEL: cmpxchg_i16_i64_z_with_folded_gep_offset:
-; CHECK: i64.atomic.rmw16_u.cmpxchg $push0=, 48($0), $1, $2{{$}}
+; CHECK: i64.atomic.rmw16.cmpxchg_u $push0=, 48($0), $1, $2{{$}}
 define i64 @cmpxchg_i16_i64_z_with_folded_gep_offset(i16* %p, i64 %exp, i64 %new) {
   %s = getelementptr inbounds i16, i16* %p, i32 24
   %exp_t = trunc i64 %exp to i16
@@ -1472,7 +1472,7 @@
 ; an 'add' if the or'ed bits are known to be zero.
 
 ; CHECK-LABEL: cmpxchg_i8_i32_z_with_folded_or_offset:
-; CHECK: i32.atomic.rmw8_u.cmpxchg $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1, $2{{$}}
+; CHECK: i32.atomic.rmw8.cmpxchg_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1, $2{{$}}
 define i32 @cmpxchg_i8_i32_z_with_folded_or_offset(i32 %x, i32 %exp, i32 %new) {
   %and = and i32 %x, -4
   %t0 = inttoptr i32 %and to i8*
@@ -1486,7 +1486,7 @@
 }
 
 ; CHECK-LABEL: cmpxchg_i8_i64_z_with_folded_or_offset:
-; CHECK: i64.atomic.rmw8_u.cmpxchg $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1, $2{{$}}
+; CHECK: i64.atomic.rmw8.cmpxchg_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1, $2{{$}}
 define i64 @cmpxchg_i8_i64_z_with_folded_or_offset(i32 %x, i64 %exp, i64 %new) {
   %and = and i32 %x, -4
   %t0 = inttoptr i32 %and to i8*
@@ -1503,7 +1503,7 @@
 
 ; CHECK-LABEL: cmpxchg_i16_i32_z_from_numeric_address
 ; CHECK: i32.const $push0=, 0{{$}}
-; CHECK: i32.atomic.rmw16_u.cmpxchg $push1=, 42($pop0), $0, $1{{$}}
+; CHECK: i32.atomic.rmw16.cmpxchg_u $push1=, 42($pop0), $0, $1{{$}}
 define i32 @cmpxchg_i16_i32_z_from_numeric_address(i32 %exp, i32 %new) {
   %s = inttoptr i32 42 to i16*
   %exp_t = trunc i32 %exp to i16
@@ -1516,7 +1516,7 @@
 
 ; CHECK-LABEL: cmpxchg_i8_i32_z_from_global_address
 ; CHECK: i32.const $push0=, 0{{$}}
-; CHECK: i32.atomic.rmw8_u.cmpxchg $push1=, gv8($pop0), $0, $1{{$}}
+; CHECK: i32.atomic.rmw8.cmpxchg_u $push1=, gv8($pop0), $0, $1{{$}}
 define i32 @cmpxchg_i8_i32_z_from_global_address(i32 %exp, i32 %new) {
   %exp_t = trunc i32 %exp to i8
   %new_t = trunc i32 %new to i8
diff --git a/test/CodeGen/WebAssembly/offset-fastisel.ll b/test/CodeGen/WebAssembly/offset-fastisel.ll
index c41757c..9f10a6a 100644
--- a/test/CodeGen/WebAssembly/offset-fastisel.ll
+++ b/test/CodeGen/WebAssembly/offset-fastisel.ll
@@ -16,10 +16,10 @@
 }
 
 ; CHECK-LABEL: store_i8_with_array_alloca_gep:
-; CHECK: get_global  $push[[L0:[0-9]+]]=, __stack_pointer
+; CHECK: global.get  $push[[L0:[0-9]+]]=, __stack_pointer
 ; CHECK: i32.const   $push[[L1:[0-9]+]]=, 32{{$}}
 ; CHECK: i32.sub     $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; CHECK: copy_local  $push[[L3:[0-9]+]]=, $pop[[L2]]
+; CHECK: local.copy  $push[[L3:[0-9]+]]=, $pop[[L2]]
 ; CHECK: i32.add     $push[[L4:[0-9]+]]=, $pop[[L3]], $0{{$}}
 ; CHECK: i32.const   $push[[L5:[0-9]+]]=, 0{{$}}
 ; CHECK: i32.store8  0($pop[[L4]]), $pop[[L5]]{{$}}
diff --git a/test/CodeGen/WebAssembly/phi.ll b/test/CodeGen/WebAssembly/phi.ll
index 9582b25..7e47f5a 100644
--- a/test/CodeGen/WebAssembly/phi.ll
+++ b/test/CodeGen/WebAssembly/phi.ll
@@ -27,9 +27,9 @@
 
 ; CHECK-LABEL: test1:
 ; CHECK: .LBB1_1:
-; CHECK: copy_local $[[NUM0:[0-9]+]]=, $[[NUM1:[0-9]+]]{{$}}
-; CHECK: copy_local $[[NUM1]]=, $[[NUM2:[0-9]+]]{{$}}
-; CHECK: copy_local $[[NUM2]]=, $[[NUM0]]{{$}}
+; CHECK: local.copy $[[NUM0:[0-9]+]]=, $[[NUM1:[0-9]+]]{{$}}
+; CHECK: local.copy $[[NUM1]]=, $[[NUM2:[0-9]+]]{{$}}
+; CHECK: local.copy $[[NUM2]]=, $[[NUM0]]{{$}}
 define i32 @test1(i32 %n) {
 entry:
   br label %loop
diff --git a/test/CodeGen/WebAssembly/reg-stackify.ll b/test/CodeGen/WebAssembly/reg-stackify.ll
index 53a52a5..6b4487b 100644
--- a/test/CodeGen/WebAssembly/reg-stackify.ll
+++ b/test/CodeGen/WebAssembly/reg-stackify.ll
@@ -125,17 +125,17 @@
 ; NOREGS-LABEL: stack_uses:
 ; NOREGS: .functype stack_uses (i32, i32, i32, i32) -> (i32){{$}}
 ; NOREGS-NEXT: block {{$}}
-; NOREGS-NEXT: get_local 0{{$}}
+; NOREGS-NEXT: local.get 0{{$}}
 ; NOREGS-NEXT: i32.const   1{{$}}
 ; NOREGS-NEXT: i32.lt_s
-; NOREGS-NEXT: get_local 1{{$}}
+; NOREGS-NEXT: local.get 1{{$}}
 ; NOREGS-NEXT: i32.const   2{{$}}
 ; NOREGS-NEXT: i32.lt_s
 ; NOREGS-NEXT: i32.xor {{$}}
-; NOREGS-NEXT: get_local 2{{$}}
+; NOREGS-NEXT: local.get 2{{$}}
 ; NOREGS-NEXT: i32.const   1{{$}}
 ; NOREGS-NEXT: i32.lt_s
-; NOREGS-NEXT: get_local 3{{$}}
+; NOREGS-NEXT: local.get 3{{$}}
 ; NOREGS-NEXT: i32.const   2{{$}}
 ; NOREGS-NEXT: i32.lt_s
 ; NOREGS-NEXT: i32.xor {{$}}
@@ -166,13 +166,13 @@
 }
 
 ; Test an interesting case where the load has multiple uses and cannot
-; be trivially stackified. However, it can be stackified with a tee_local.
+; be trivially stackified. However, it can be stackified with a local.tee.
 
 ; CHECK-LABEL: multiple_uses:
 ; CHECK: .functype multiple_uses (i32, i32, i32) -> (){{$}}
 ; CHECK-NEXT: block   {{$}}
 ; CHECK-NEXT: i32.load    $push[[NUM0:[0-9]+]]=, 0($2){{$}}
-; CHECK-NEXT: tee_local   $push[[NUM1:[0-9]+]]=, $3=, $pop[[NUM0]]{{$}}
+; CHECK-NEXT: local.tee   $push[[NUM1:[0-9]+]]=, $3=, $pop[[NUM0]]{{$}}
 ; CHECK-NEXT: i32.ge_u    $push[[NUM2:[0-9]+]]=, $pop[[NUM1]], $1{{$}}
 ; CHECK-NEXT: br_if       0, $pop[[NUM2]]{{$}}
 ; CHECK-NEXT: i32.lt_u    $push[[NUM3:[0-9]+]]=, $3, $0{{$}}
@@ -185,18 +185,18 @@
 ; NOREGS: .functype multiple_uses (i32, i32, i32) -> (){{$}}
 ; NOREGS: .local i32{{$}}
 ; NOREGS-NEXT: block {{$}}
-; NOREGS-NEXT: get_local   2{{$}}
+; NOREGS-NEXT: local.get   2{{$}}
 ; NOREGS-NEXT: i32.load    0{{$}}
-; NOREGS-NEXT: tee_local   3{{$}}
-; NOREGS-NEXT: get_local   1{{$}}
+; NOREGS-NEXT: local.tee   3{{$}}
+; NOREGS-NEXT: local.get   1{{$}}
 ; NOREGS-NEXT: i32.ge_u
 ; NOREGS-NEXT: br_if       0{{$}}
-; NOREGS-NEXT: get_local   3{{$}}
-; NOREGS-NEXT: get_local   0{{$}}
+; NOREGS-NEXT: local.get   3{{$}}
+; NOREGS-NEXT: local.get   0{{$}}
 ; NOREGS-NEXT: i32.lt_u
 ; NOREGS-NEXT: br_if       0{{$}}
-; NOREGS-NEXT: get_local   2{{$}}
-; NOREGS-NEXT: get_local   3{{$}}
+; NOREGS-NEXT: local.get   2{{$}}
+; NOREGS-NEXT: local.get   3{{$}}
 ; NOREGS-NEXT: i32.store   0{{$}}
 ; NOREGS-NEXT: .LBB8_3:
 ; NOREGS-NEXT: end_block{{$}}
@@ -270,33 +270,33 @@
 ; CHECK-NEXT: return      $pop[[L14]]{{$}}
 ; NOREGS-LABEL: div_tree:
 ; NOREGS: .functype div_tree (i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) -> (i32){{$}}
-; NOREGS-NEXT: get_local 0{{$}}
-; NOREGS-NEXT: get_local 1{{$}}
+; NOREGS-NEXT: local.get 0{{$}}
+; NOREGS-NEXT: local.get 1{{$}}
 ; NOREGS-NEXT: i32.div_s{{$}}
-; NOREGS-NEXT: get_local 2{{$}}
-; NOREGS-NEXT: get_local 3{{$}}
+; NOREGS-NEXT: local.get 2{{$}}
+; NOREGS-NEXT: local.get 3{{$}}
 ; NOREGS-NEXT: i32.div_s{{$}}
 ; NOREGS-NEXT: i32.div_s{{$}}
-; NOREGS-NEXT: get_local 4{{$}}
-; NOREGS-NEXT: get_local 5{{$}}
+; NOREGS-NEXT: local.get 4{{$}}
+; NOREGS-NEXT: local.get 5{{$}}
 ; NOREGS-NEXT: i32.div_s{{$}}
-; NOREGS-NEXT: get_local 6{{$}}
-; NOREGS-NEXT: get_local 7{{$}}
+; NOREGS-NEXT: local.get 6{{$}}
+; NOREGS-NEXT: local.get 7{{$}}
 ; NOREGS-NEXT: i32.div_s{{$}}
 ; NOREGS-NEXT: i32.div_s{{$}}
 ; NOREGS-NEXT: i32.div_s{{$}}
-; NOREGS-NEXT: get_local 8{{$}}
-; NOREGS-NEXT: get_local 9{{$}}
+; NOREGS-NEXT: local.get 8{{$}}
+; NOREGS-NEXT: local.get 9{{$}}
 ; NOREGS-NEXT: i32.div_s{{$}}
-; NOREGS-NEXT: get_local 10{{$}}
-; NOREGS-NEXT: get_local 11{{$}}
+; NOREGS-NEXT: local.get 10{{$}}
+; NOREGS-NEXT: local.get 11{{$}}
 ; NOREGS-NEXT: i32.div_s{{$}}
 ; NOREGS-NEXT: i32.div_s{{$}}
-; NOREGS-NEXT: get_local 12{{$}}
-; NOREGS-NEXT: get_local 13{{$}}
+; NOREGS-NEXT: local.get 12{{$}}
+; NOREGS-NEXT: local.get 13{{$}}
 ; NOREGS-NEXT: i32.div_s{{$}}
-; NOREGS-NEXT: get_local 14{{$}}
-; NOREGS-NEXT: get_local 15{{$}}
+; NOREGS-NEXT: local.get 14{{$}}
+; NOREGS-NEXT: local.get 15{{$}}
 ; NOREGS-NEXT: i32.div_s{{$}}
 ; NOREGS-NEXT: i32.div_s{{$}}
 ; NOREGS-NEXT: i32.div_s{{$}}
@@ -327,18 +327,18 @@
 ; CHECK-LABEL: simple_multiple_use:
 ; CHECK:       .functype simple_multiple_use (i32, i32) -> (){{$}}
 ; CHECK-NEXT:  i32.mul     $push[[NUM0:[0-9]+]]=, $1, $0{{$}}
-; CHECK-NEXT:  tee_local   $push[[NUM1:[0-9]+]]=, $[[NUM2:[0-9]+]]=, $pop[[NUM0]]{{$}}
+; CHECK-NEXT:  local.tee   $push[[NUM1:[0-9]+]]=, $[[NUM2:[0-9]+]]=, $pop[[NUM0]]{{$}}
 ; CHECK-NEXT:  call        use_a@FUNCTION, $pop[[NUM1]]{{$}}
 ; CHECK-NEXT:  call        use_b@FUNCTION, $[[NUM2]]{{$}}
 ; CHECK-NEXT:  return{{$}}
 ; NOREGS-LABEL: simple_multiple_use:
 ; NOREGS:       .functype simple_multiple_use (i32, i32) -> (){{$}}
-; NOREGS-NEXT:  get_local 1{{$}}
-; NOREGS-NEXT:  get_local 0{{$}}
+; NOREGS-NEXT:  local.get 1{{$}}
+; NOREGS-NEXT:  local.get 0{{$}}
 ; NOREGS-NEXT:  i32.mul
-; NOREGS-NEXT:  tee_local   1{{$}}
+; NOREGS-NEXT:  local.tee   1{{$}}
 ; NOREGS-NEXT:  call        use_a@FUNCTION{{$}}
-; NOREGS-NEXT:  get_local   1{{$}}
+; NOREGS-NEXT:  local.get   1{{$}}
 ; NOREGS-NEXT:  call        use_b@FUNCTION{{$}}
 ; NOREGS-NEXT:  return{{$}}
 declare void @use_a(i32)
@@ -355,16 +355,16 @@
 ; CHECK-LABEL: multiple_uses_in_same_insn:
 ; CHECK:       .functype multiple_uses_in_same_insn (i32, i32) -> (){{$}}
 ; CHECK-NEXT:  i32.mul     $push[[NUM0:[0-9]+]]=, $1, $0{{$}}
-; CHECK-NEXT:  tee_local   $push[[NUM1:[0-9]+]]=, $[[NUM2:[0-9]+]]=, $pop[[NUM0]]{{$}}
+; CHECK-NEXT:  local.tee   $push[[NUM1:[0-9]+]]=, $[[NUM2:[0-9]+]]=, $pop[[NUM0]]{{$}}
 ; CHECK-NEXT:  call        use_2@FUNCTION, $pop[[NUM1]], $[[NUM2]]{{$}}
 ; CHECK-NEXT:  return{{$}}
 ; NOREGS-LABEL: multiple_uses_in_same_insn:
 ; NOREGS:       .functype multiple_uses_in_same_insn (i32, i32) -> (){{$}}
-; NOREGS-NEXT:  get_local 1{{$}}
-; NOREGS-NEXT:  get_local 0{{$}}
+; NOREGS-NEXT:  local.get 1{{$}}
+; NOREGS-NEXT:  local.get 0{{$}}
 ; NOREGS-NEXT:  i32.mul
-; NOREGS-NEXT:  tee_local   1{{$}}
-; NOREGS-NEXT:  get_local   1{{$}}
+; NOREGS-NEXT:  local.tee   1{{$}}
+; NOREGS-NEXT:  local.get   1{{$}}
 ; NOREGS-NEXT:  call        use_2@FUNCTION{{$}}
 ; NOREGS-NEXT:  return{{$}}
 declare void @use_2(i32, i32)
@@ -405,7 +405,7 @@
 }
 
 ; Don't stackify a register when it would move a the def of the register past
-; an implicit get_local for the register.
+; an implicit local.get for the register.
 
 ; CHECK-LABEL: no_stackify_past_use:
 ; CHECK:      i32.call        $1=, callee@FUNCTION, $0
@@ -416,16 +416,16 @@
 ; CHECK-NEXT: i32.div_s       $push4=, $pop3, $1
 ; CHECK-NEXT: return          $pop4
 ; NOREGS-LABEL: no_stackify_past_use:
-; NOREGS:      get_local       0{{$}}
+; NOREGS:      local.get       0{{$}}
 ; NOREGS-NEXT: i32.call        callee@FUNCTION
-; NOREGS-NEXT: set_local       1{{$}}
-; NOREGS-NEXT: get_local       0{{$}}
+; NOREGS-NEXT: local.set       1{{$}}
+; NOREGS-NEXT: local.get       0{{$}}
 ; NOREGS-NEXT: i32.const       1
 ; NOREGS-NEXT: i32.add
 ; NOREGS-NEXT: i32.call        callee@FUNCTION
-; NOREGS-NEXT: get_local       1{{$}}
+; NOREGS-NEXT: local.get       1{{$}}
 ; NOREGS-NEXT: i32.sub
-; NOREGS-NEXT: get_local       1{{$}}
+; NOREGS-NEXT: local.get       1{{$}}
 ; NOREGS-NEXT: i32.div_s
 ; NOREGS-NEXT: return
 declare i32 @callee(i32)
@@ -443,7 +443,7 @@
 
 ; CHECK-LABEL: commute_to_fix_ordering:
 ; CHECK: i32.call        $push[[L0:.+]]=, callee@FUNCTION, $0
-; CHECK: tee_local       $push[[L1:.+]]=, $1=, $pop[[L0]]
+; CHECK: local.tee       $push[[L1:.+]]=, $1=, $pop[[L0]]
 ; CHECK: i32.const       $push0=, 1
 ; CHECK: i32.add         $push1=, $0, $pop0
 ; CHECK: i32.call        $push2=, callee@FUNCTION, $pop1
@@ -451,11 +451,11 @@
 ; CHECK: i32.mul         $push4=, $pop[[L1]], $pop3
 ; CHECK: return          $pop4
 ; NOREGS-LABEL: commute_to_fix_ordering:
-; NOREGS: get_local       0{{$}}
+; NOREGS: local.get       0{{$}}
 ; NOREGS: i32.call        callee@FUNCTION
-; NOREGS: tee_local       1
-; NOREGS: get_local       1{{$}}
-; NOREGS: get_local       0{{$}}
+; NOREGS: local.tee       1
+; NOREGS: local.get       1{{$}}
+; NOREGS: local.get       0{{$}}
 ; NOREGS: i32.const       1
 ; NOREGS: i32.add
 ; NOREGS: i32.call        callee@FUNCTION
@@ -475,12 +475,12 @@
 
 ; CHECK-LABEL: multiple_defs:
 ; CHECK:        f64.add         $push[[NUM0:[0-9]+]]=, ${{[0-9]+}}, $pop{{[0-9]+}}{{$}}
-; CHECK-NEXT:   tee_local       $push[[NUM1:[0-9]+]]=, $[[NUM2:[0-9]+]]=, $pop[[NUM0]]{{$}}
+; CHECK-NEXT:   local.tee       $push[[NUM1:[0-9]+]]=, $[[NUM2:[0-9]+]]=, $pop[[NUM0]]{{$}}
 ; CHECK-NEXT:   f64.select      $push{{[0-9]+}}=, $pop{{[0-9]+}}, $pop[[NUM1]], ${{[0-9]+}}{{$}}
 ; CHECK:        $[[NUM2]]=,
 ; NOREGS-LABEL: multiple_defs:
 ; NOREGS:        f64.add
-; NOREGS:        tee_local
+; NOREGS:        local.tee
 ; NOREGS:        f64.select
 define void @multiple_defs(i32 %arg, i32 %arg1, i1 %arg2, i1 %arg3, i1 %arg4) {
 bb:
@@ -602,12 +602,12 @@
 ; CHECK-LABEL: stackify_indvar:
 ; CHECK:             i32.const   $push[[L5:.+]]=, 1{{$}}
 ; CHECK-NEXT:        i32.add     $push[[L4:.+]]=, $[[R0:.+]], $pop[[L5]]{{$}}
-; CHECK-NEXT:        tee_local   $push[[L3:.+]]=, $[[R0]]=, $pop[[L4]]{{$}}
+; CHECK-NEXT:        local.tee   $push[[L3:.+]]=, $[[R0]]=, $pop[[L4]]{{$}}
 ; CHECK-NEXT:        i32.ne      $push[[L2:.+]]=, $0, $pop[[L3]]{{$}}
 ; NOREGS-LABEL: stackify_indvar:
 ; NOREGS:             i32.const   1{{$}}
 ; NOREGS-NEXT:        i32.add
-; NOREGS-NEXT:        tee_local   2{{$}}
+; NOREGS-NEXT:        local.tee   2{{$}}
 ; NOREGS-NEXT:        i32.ne
 define void @stackify_indvar(i32 %tmp, i32* %v) #0 {
 bb:
@@ -630,11 +630,11 @@
 
 ; CHECK-LABEL: stackpointer_dependency:
 ; CHECK:      call {{.+}}, stackpointer_callee@FUNCTION,
-; CHECK-NEXT: set_global __stack_pointer@GLOBAL,
+; CHECK-NEXT: global.set __stack_pointer@GLOBAL,
 ; NOREGS-LABEL: stackpointer_dependency:
 ; NOREGS:      call stackpointer_callee@FUNCTION
-; NOREGS:      set_global __stack_pointer
-declare i32 @stackpointer_callee(i8* readnone, i8* readnone)
+; NOREGS:      global.set __stack_pointer
+declare i32 @stackpointer_callee(i8* readnone, i8* readnone) nounwind readnone
 declare i8* @llvm.frameaddress(i32)
 define i32 @stackpointer_dependency(i8* readnone) {
   %2 = tail call i8* @llvm.frameaddress(i32 0)
@@ -646,13 +646,13 @@
 
 ; CHECK-LABEL: call_indirect_stackify:
 ; CHECK: i32.load  $push[[L4:.+]]=, 0($0)
-; CHECK-NEXT: tee_local $push[[L3:.+]]=, $0=, $pop[[L4]]
+; CHECK-NEXT: local.tee $push[[L3:.+]]=, $0=, $pop[[L4]]
 ; CHECK-NEXT: i32.load  $push[[L0:.+]]=, 0($0)
 ; CHECK-NEXT: i32.load  $push[[L1:.+]]=, 0($pop[[L0]])
 ; CHECK-NEXT: i32.call_indirect $push{{.+}}=, $pop[[L3]], $1, $pop[[L1]]
 ; NOREGS-LABEL: call_indirect_stackify:
 ; NOREGS: i32.load  0
-; NOREGS-NEXT: tee_local 0
+; NOREGS-NEXT: local.tee 0
 ; NOREGS:      i32.load  0
 ; NOREGS-NEXT: i32.load  0
 ; NOREGS-NEXT: i32.call_indirect
diff --git a/test/CodeGen/WebAssembly/return-int32.ll b/test/CodeGen/WebAssembly/return-int32.ll
index 066006e..e25ca94 100644
--- a/test/CodeGen/WebAssembly/return-int32.ll
+++ b/test/CodeGen/WebAssembly/return-int32.ll
@@ -6,7 +6,7 @@
 
 ; CHECK-LABEL: return_i32:
 ; CHECK-NEXT:  .functype return_i32 (i32) -> (i32){{$}}
-; CHECK-NEXT:  get_local  $push0=, 0
+; CHECK-NEXT:  local.get  $push0=, 0
 ; CHECK-NEXT:  end_function{{$}}
 define i32 @return_i32(i32 %p) {
   ret i32 %p
diff --git a/test/CodeGen/WebAssembly/signext-inreg.ll b/test/CodeGen/WebAssembly/signext-inreg.ll
index b6e2c76..3af90be 100644
--- a/test/CodeGen/WebAssembly/signext-inreg.ll
+++ b/test/CodeGen/WebAssembly/signext-inreg.ll
@@ -30,7 +30,7 @@
 
 ; CHECK-LABEL: i64_extend8_s:
 ; CHECK-NEXT: .functype i64_extend8_s (i32) -> (i64){{$}}
-; CHECK-NEXT: i64.extend_u/i32 $push[[NUM1:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.extend_i32_u $push[[NUM1:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: i64.extend8_s $push[[NUM2:[0-9]+]]=, $pop[[NUM1]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM2]]{{$}}
 
@@ -43,7 +43,7 @@
 
 ; CHECK-LABEL: i64_extend16_s:
 ; CHECK-NEXT: .functype i64_extend16_s (i32) -> (i64){{$}}
-; CHECK-NEXT: i64.extend_u/i32 $push[[NUM1:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.extend_i32_u $push[[NUM1:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: i64.extend16_s $push[[NUM2:[0-9]+]]=, $pop[[NUM1]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM2]]{{$}}
 
@@ -57,7 +57,7 @@
 ; No SIGN_EXTEND_INREG is needed for 32->64 extension.
 ; CHECK-LABEL: i64_extend32_s:
 ; CHECK-NEXT: .functype i64_extend32_s (i32) -> (i64){{$}}
-; CHECK-NEXT: i64.extend_s/i32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.extend_i32_s $push[[NUM:[0-9]+]]=, $0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
 define i64 @i64_extend32_s(i32 %x) {
   %a = sext i32 %x to i64
diff --git a/test/CodeGen/WebAssembly/simd-arith.ll b/test/CodeGen/WebAssembly/simd-arith.ll
index 6d3d04c..8d7f020 100644
--- a/test/CodeGen/WebAssembly/simd-arith.ll
+++ b/test/CodeGen/WebAssembly/simd-arith.ll
@@ -1,13 +1,13 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -wasm-enable-unimplemented-simd -mattr=+simd128 | FileCheck %s --check-prefixes CHECK,SIMD128,SIMD128-SLOW
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -wasm-enable-unimplemented-simd -mattr=+simd128 -fast-isel | FileCheck %s --check-prefixes CHECK,SIMD128,SIMD128-FAST
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+simd128 | FileCheck %s --check-prefixes CHECK,SIMD128-VM
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+simd128 -fast-isel | FileCheck %s --check-prefixes CHECK,SIMD128-VM
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=-simd128 | FileCheck %s --check-prefixes CHECK,NO-SIMD128
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=-simd128 -fast-isel | FileCheck %s --check-prefixes CHECK,NO-SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+unimplemented-simd128 | FileCheck %s --check-prefixes CHECK,SIMD128,SIMD128-SLOW
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+unimplemented-simd128 -fast-isel | FileCheck %s --check-prefixes CHECK,SIMD128,SIMD128-FAST
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+simd128 | FileCheck %s --check-prefixes CHECK,SIMD128-VM
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+simd128 -fast-isel | FileCheck %s --check-prefixes CHECK,SIMD128-VM
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck %s --check-prefixes CHECK,NO-SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -fast-isel | FileCheck %s --check-prefixes CHECK,NO-SIMD128
 
 ; check that a non-test run (including explicit locals pass) at least finishes
-; RUN: llc < %s -O0 -wasm-enable-unimplemented-simd -mattr=+simd128,+sign-ext
-; RUN: llc < %s -O2 -wasm-enable-unimplemented-simd -mattr=+simd128,+sign-ext
+; RUN: llc < %s -O0 -mattr=+unimplemented-simd128
+; RUN: llc < %s -O2 -mattr=+unimplemented-simd128
 
 ; Test that basic SIMD128 arithmetic operations assemble as expected.
 
@@ -89,12 +89,16 @@
 ; CHECK-LABEL: shl_vec_v16i8:
 ; NO-SIMD128-NOT: i8x16
 ; SIMD128-NEXT: .functype shl_vec_v16i8 (v128, v128) -> (v128){{$}}
-; SIMD128-NEXT: i8x16.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}}
+; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}}
+; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
+; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
+; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
 ; SIMD128-NEXT: i32.shl $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
 ; Skip 14 lanes
-; SIMD128:      i8x16.extract_lane_u $push[[L4:[0-9]+]]=, $0, 15{{$}}
+; SIMD128:      i8x16.extract_lane_s $push[[L4:[0-9]+]]=, $0, 15{{$}}
 ; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}}
 ; SIMD128-NEXT: i32.shl $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
 ; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 15, $pop[[L6]]{{$}}
@@ -121,23 +125,19 @@
 ; CHECK-LABEL: shr_s_vec_v16i8:
 ; NO-SIMD128-NOT: i8x16
 ; SIMD128-NEXT: .functype shr_s_vec_v16i8 (v128, v128) -> (v128){{$}}
-; SIMD128-NEXT: i8x16.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}}
-; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 24{{$}}
-; SIMD128-NEXT: i32.shl $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: i32.const $push[[L3:[0-9]+]]=, 24{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 0{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
-; SIMD128-NEXT: i8x16.splat $push[[L7:[0-9]+]]=, $pop[[L6]]{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}}
+; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}}
+; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
+; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
+; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
+; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
 ; Skip 14 lanes
-; SIMD128:      i8x16.extract_lane_u $push[[L7:[0-9]+]]=, $0, 15{{$}}
-; SIMD128-NEXT: i32.const $push[[L8:[0-9]+]]=, 24{{$}}
-; SIMD128-NEXT: i32.shl $push[[L9:[0-9]+]]=, $pop[[L7]], $pop[[L8]]{{$}}
-; SIMD128-NEXT: i32.const $push[[L10:[0-9]+]]=, 24{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[L11:[0-9]+]]=, $pop[[L9]], $pop[[L10]]{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_u $push[[L12:[0-9]+]]=, $1, 15{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[L13:[0-9]+]]=, $pop[[L11]], $pop[[L12]]{{$}}
-; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[L14:[0-9]+]], 15, $pop[[L13]]{{$}}
+; SIMD128:      i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 15{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $1, 15{{$}}
+; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop{{[0-9]+}}, 15, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 define <16 x i8> @shr_s_vec_v16i8(<16 x i8> %v, <16 x i8> %x) {
   %a = ashr <16 x i8> %v, %x
@@ -162,7 +162,11 @@
 ; NO-SIMD128-NOT: i8x16
 ; SIMD128-NEXT: .functype shr_u_vec_v16i8 (v128, v128) -> (v128){{$}}
 ; SIMD128-NEXT: i8x16.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}}
+; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}}
+; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
+; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
+; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
 ; SIMD128-NEXT: i32.shr_u $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
 ; Skip 14 lanes
@@ -311,12 +315,16 @@
 ; CHECK-LABEL: shl_vec_v8i16:
 ; NO-SIMD128-NOT: i16x8
 ; SIMD128-NEXT: .functype shl_vec_v8i16 (v128, v128) -> (v128){{$}}
-; SIMD128-NEXT: i16x8.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}}
+; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}}
+; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
+; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
+; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
 ; SIMD128-NEXT: i32.shl $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
 ; Skip 6 lanes
-; SIMD128:      i16x8.extract_lane_u $push[[L4:[0-9]+]]=, $0, 7{{$}}
+; SIMD128:      i16x8.extract_lane_s $push[[L4:[0-9]+]]=, $0, 7{{$}}
 ; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}}
 ; SIMD128-NEXT: i32.shl $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
 ; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 7, $pop[[L6]]{{$}}
@@ -342,23 +350,19 @@
 ; CHECK-LABEL: shr_s_vec_v8i16:
 ; NO-SIMD128-NOT: i16x8
 ; SIMD128-NEXT: .functype shr_s_vec_v8i16 (v128, v128) -> (v128){{$}}
-; SIMD128-NEXT: i16x8.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}}
-; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
-; SIMD128-NEXT: i32.shl $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: i32.const $push[[L3:[0-9]+]]=, 16{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 0{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
-; SIMD128-NEXT: i16x8.splat $push[[L7:[0-9]+]]=, $pop[[L6]]{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}}
+; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}}
+; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
+; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
+; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
+; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
 ; Skip 6 lanes
-; SIMD128:      i16x8.extract_lane_u $push[[L7:[0-9]+]]=, $0, 7{{$}}
-; SIMD128-NEXT: i32.const $push[[L8:[0-9]+]]=, 16{{$}}
-; SIMD128-NEXT: i32.shl $push[[L9:[0-9]+]]=, $pop[[L7]], $pop[[L8]]{{$}}
-; SIMD128-NEXT: i32.const $push[[L10:[0-9]+]]=, 16{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[L11:[0-9]+]]=, $pop[[L9]], $pop[[L10]]{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_u $push[[L12:[0-9]+]]=, $1, 7{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[L13:[0-9]+]]=, $pop[[L11]], $pop[[L12]]{{$}}
-; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[L14:[0-9]+]], 7, $pop[[L13]]{{$}}
+; SIMD128:      i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 7{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $1, 7{{$}}
+; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop{{[0-9]+}}, 7, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 define <8 x i16> @shr_s_vec_v8i16(<8 x i16> %v, <8 x i16> %x) {
   %a = ashr <8 x i16> %v, %x
@@ -382,7 +386,11 @@
 ; NO-SIMD128-NOT: i16x8
 ; SIMD128-NEXT: .functype shr_u_vec_v8i16 (v128, v128) -> (v128){{$}}
 ; SIMD128-NEXT: i16x8.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}}
+; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}}
+; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
+; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
+; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
 ; SIMD128-NEXT: i32.shr_u $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
 ; Skip 6 lanes
@@ -725,7 +733,7 @@
 ; CHECK-LABEL: shl_nozext_v2i64:
 ; NO-SIMD128-NOT: i64x2
 ; SIMD128-NEXT: .functype shl_nozext_v2i64 (v128, i64) -> (v128){{$}}
-; SIMD128-NEXT: i32.wrap/i64 $push[[L0:[0-9]+]]=, $1{{$}}
+; SIMD128-NEXT: i32.wrap_i64 $push[[L0:[0-9]+]]=, $1{{$}}
 ; SIMD128-NEXT: i64x2.shl $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 define <2 x i64> @shl_nozext_v2i64(<2 x i64> %v, i64 %x) {
@@ -779,7 +787,7 @@
 ; CHECK-LABEL: shr_s_nozext_v2i64:
 ; NO-SIMD128-NOT: i64x2
 ; SIMD128-NEXT: .functype shr_s_nozext_v2i64 (v128, i64) -> (v128){{$}}
-; SIMD128-NEXT: i32.wrap/i64 $push[[L0:[0-9]+]]=, $1{{$}}
+; SIMD128-NEXT: i32.wrap_i64 $push[[L0:[0-9]+]]=, $1{{$}}
 ; SIMD128-NEXT: i64x2.shr_s $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 define <2 x i64> @shr_s_nozext_v2i64(<2 x i64> %v, i64 %x) {
@@ -833,7 +841,7 @@
 ; CHECK-LABEL: shr_u_nozext_v2i64:
 ; NO-SIMD128-NOT: i64x2
 ; SIMD128-NEXT: .functype shr_u_nozext_v2i64 (v128, i64) -> (v128){{$}}
-; SIMD128-NEXT: i32.wrap/i64 $push[[L0:[0-9]+]]=, $1{{$}}
+; SIMD128-NEXT: i32.wrap_i64 $push[[L0:[0-9]+]]=, $1{{$}}
 ; SIMD128-NEXT: i64x2.shr_u $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 define <2 x i64> @shr_u_nozext_v2i64(<2 x i64> %v, i64 %x) {
@@ -1087,6 +1095,7 @@
 
 ; CHECK-LABEL: div_v4f32:
 ; NO-SIMD128-NOT: f32x4
+; SIMD128-VM-NOT: f32x4.div
 ; SIMD128-NEXT: .functype div_v4f32 (v128, v128) -> (v128){{$}}
 ; SIMD128-NEXT: f32x4.div $push[[R:[0-9]+]]=, $0, $1{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
@@ -1107,6 +1116,7 @@
 
 ; CHECK-LABEL: sqrt_v4f32:
 ; NO-SIMD128-NOT: f32x4
+; SIMD128-VM-NOT: f32x4.sqrt
 ; SIMD128-NEXT: .functype sqrt_v4f32 (v128) -> (v128){{$}}
 ; SIMD128-NEXT: f32x4.sqrt $push[[R:[0-9]+]]=, $0{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
diff --git a/test/CodeGen/WebAssembly/simd-bitcasts.ll b/test/CodeGen/WebAssembly/simd-bitcasts.ll
index b36f519..5aab1c6 100644
--- a/test/CodeGen/WebAssembly/simd-bitcasts.ll
+++ b/test/CodeGen/WebAssembly/simd-bitcasts.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -wasm-enable-unimplemented-simd -mattr=+simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,SIMD128
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=+simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,SIMD128-VM
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=-simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,NO-SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=+unimplemented-simd128 | FileCheck %s --check-prefixes CHECK,SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=+simd128 | FileCheck %s --check-prefixes CHECK,SIMD128-VM
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals | FileCheck %s --check-prefixes CHECK,NO-SIMD128
 
 ; Test that bitcasts between vector types are lowered to zero instructions
 
diff --git a/test/CodeGen/WebAssembly/simd-comparisons.ll b/test/CodeGen/WebAssembly/simd-comparisons.ll
index 5e4c51a..929385f 100644
--- a/test/CodeGen/WebAssembly/simd-comparisons.ll
+++ b/test/CodeGen/WebAssembly/simd-comparisons.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -wasm-enable-unimplemented-simd -mattr=+simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,SIMD128
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=+simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,SIMD128-VM
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=-simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,NO-SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=+unimplemented-simd128 | FileCheck %s --check-prefixes CHECK,SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=+simd128 | FileCheck %s --check-prefixes CHECK,SIMD128-VM
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals | FileCheck %s --check-prefixes CHECK,NO-SIMD128
 
 ; Test SIMD comparison operators
 
diff --git a/test/CodeGen/WebAssembly/simd-conversions.ll b/test/CodeGen/WebAssembly/simd-conversions.ll
index 9c3f80f..5437a9a 100644
--- a/test/CodeGen/WebAssembly/simd-conversions.ll
+++ b/test/CodeGen/WebAssembly/simd-conversions.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -asm-verbose=false -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-enable-unimplemented-simd -mattr=+simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,SIMD128
-; RUN: llc < %s -asm-verbose=false -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -mattr=+simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,SIMD128-VM
-; RUN: llc < %s -asm-verbose=false -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -mattr=-simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,NO-SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -mattr=+unimplemented-simd128 | FileCheck %s --check-prefixes CHECK,SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -mattr=+simd128 | FileCheck %s --check-prefixes CHECK,SIMD128-VM
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals | FileCheck %s --check-prefixes CHECK,NO-SIMD128
 
 ; Test that vector float-to-int and int-to-float instructions lower correctly
 
@@ -10,7 +10,7 @@
 ; CHECK-LABEL: convert_s_v4f32:
 ; NO-SIMD128-NOT: i32x4
 ; SIMD128-NEXT: .functype convert_s_v4f32 (v128) -> (v128){{$}}
-; SIMD128-NEXT: f32x4.convert_s/i32x4 $push[[R:[0-9]+]]=, $0
+; SIMD128-NEXT: f32x4.convert_i32x4_s $push[[R:[0-9]+]]=, $0
 ; SIMD128-NEXT: return $pop[[R]]
 define <4 x float> @convert_s_v4f32(<4 x i32> %x) {
   %a = sitofp <4 x i32> %x to <4 x float>
@@ -20,7 +20,7 @@
 ; CHECK-LABEL: convert_u_v4f32:
 ; NO-SIMD128-NOT: i32x4
 ; SIMD128-NEXT: .functype convert_u_v4f32 (v128) -> (v128){{$}}
-; SIMD128-NEXT: f32x4.convert_u/i32x4 $push[[R:[0-9]+]]=, $0
+; SIMD128-NEXT: f32x4.convert_i32x4_u $push[[R:[0-9]+]]=, $0
 ; SIMD128-NEXT: return $pop[[R]]
 define <4 x float> @convert_u_v4f32(<4 x i32> %x) {
   %a = uitofp <4 x i32> %x to <4 x float>
@@ -29,9 +29,9 @@
 
 ; CHECK-LABEL: convert_s_v2f64:
 ; NO-SIMD128-NOT: i64x2
-; SIMD128-VM-NOT: f64x2.convert_s/i64x2
+; SIMD128-VM-NOT: f64x2.convert_i64x2_s
 ; SIMD128-NEXT: .functype convert_s_v2f64 (v128) -> (v128){{$}}
-; SIMD128-NEXT: f64x2.convert_s/i64x2 $push[[R:[0-9]+]]=, $0
+; SIMD128-NEXT: f64x2.convert_i64x2_s $push[[R:[0-9]+]]=, $0
 ; SIMD128-NEXT: return $pop[[R]]
 define <2 x double> @convert_s_v2f64(<2 x i64> %x) {
   %a = sitofp <2 x i64> %x to <2 x double>
@@ -40,9 +40,9 @@
 
 ; CHECK-LABEL: convert_u_v2f64:
 ; NO-SIMD128-NOT: i64x2
-; SIMD128-VM-NOT: f64x2.convert_u/i64x2
+; SIMD128-VM-NOT: f64x2.convert_i64x2_u
 ; SIMD128-NEXT: .functype convert_u_v2f64 (v128) -> (v128){{$}}
-; SIMD128-NEXT: f64x2.convert_u/i64x2 $push[[R:[0-9]+]]=, $0
+; SIMD128-NEXT: f64x2.convert_i64x2_u $push[[R:[0-9]+]]=, $0
 ; SIMD128-NEXT: return $pop[[R]]
 define <2 x double> @convert_u_v2f64(<2 x i64> %x) {
   %a = uitofp <2 x i64> %x to <2 x double>
@@ -52,7 +52,7 @@
 ; CHECK-LABEL: trunc_sat_s_v4i32:
 ; NO-SIMD128-NOT: f32x4
 ; SIMD128-NEXT: .functype trunc_sat_s_v4i32 (v128) -> (v128){{$}}
-; SIMD128-NEXT: i32x4.trunc_sat_s/f32x4 $push[[R:[0-9]+]]=, $0
+; SIMD128-NEXT: i32x4.trunc_sat_f32x4_s $push[[R:[0-9]+]]=, $0
 ; SIMD128-NEXT: return $pop[[R]]
 define <4 x i32> @trunc_sat_s_v4i32(<4 x float> %x) {
   %a = fptosi <4 x float> %x to <4 x i32>
@@ -62,7 +62,7 @@
 ; CHECK-LABEL: trunc_sat_u_v4i32:
 ; NO-SIMD128-NOT: f32x4
 ; SIMD128-NEXT: .functype trunc_sat_u_v4i32 (v128) -> (v128){{$}}
-; SIMD128-NEXT: i32x4.trunc_sat_u/f32x4 $push[[R:[0-9]+]]=, $0
+; SIMD128-NEXT: i32x4.trunc_sat_f32x4_u $push[[R:[0-9]+]]=, $0
 ; SIMD128-NEXT: return $pop[[R]]
 define <4 x i32> @trunc_sat_u_v4i32(<4 x float> %x) {
   %a = fptoui <4 x float> %x to <4 x i32>
@@ -71,9 +71,9 @@
 
 ; CHECK-LABEL: trunc_sat_s_v2i64:
 ; NO-SIMD128-NOT: f64x2
-; SIMD128-VM-NOT: i64x2.trunc_sat_s/f64x2
+; SIMD128-VM-NOT: i64x2.trunc_sat_f64x2_s
 ; SIMD128-NEXT: .functype trunc_sat_s_v2i64 (v128) -> (v128){{$}}
-; SIMD128-NEXT: i64x2.trunc_sat_s/f64x2 $push[[R:[0-9]+]]=, $0
+; SIMD128-NEXT: i64x2.trunc_sat_f64x2_s $push[[R:[0-9]+]]=, $0
 ; SIMD128-NEXT: return $pop[[R]]
 define <2 x i64> @trunc_sat_s_v2i64(<2 x double> %x) {
   %a = fptosi <2 x double> %x to <2 x i64>
@@ -82,9 +82,9 @@
 
 ; CHECK-LABEL: trunc_sat_u_v2i64:
 ; NO-SIMD128-NOT: f64x2
-; SIMD128-VM-NOT: i64x2.trunc_sat_u/f64x2
+; SIMD128-VM-NOT: i64x2.trunc_sat_f64x2_u
 ; SIMD128-NEXT: .functype trunc_sat_u_v2i64 (v128) -> (v128){{$}}
-; SIMD128-NEXT: i64x2.trunc_sat_u/f64x2 $push[[R:[0-9]+]]=, $0
+; SIMD128-NEXT: i64x2.trunc_sat_f64x2_u $push[[R:[0-9]+]]=, $0
 ; SIMD128-NEXT: return $pop[[R]]
 define <2 x i64> @trunc_sat_u_v2i64(<2 x double> %x) {
   %a = fptoui <2 x double> %x to <2 x i64>
diff --git a/test/CodeGen/WebAssembly/simd-ext-load-trunc-store.ll b/test/CodeGen/WebAssembly/simd-ext-load-trunc-store.ll
index cabb96a..39bcc1d 100644
--- a/test/CodeGen/WebAssembly/simd-ext-load-trunc-store.ll
+++ b/test/CodeGen/WebAssembly/simd-ext-load-trunc-store.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -wasm-enable-unimplemented-simd -mattr=+simd128 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+unimplemented-simd128 | FileCheck %s
 
 ; Check that store in memory with smaller lanes are loaded and stored
 ; as expected. This is a regression test for part of bug 39275.
diff --git a/test/CodeGen/WebAssembly/simd-intrinsics.ll b/test/CodeGen/WebAssembly/simd-intrinsics.ll
index 6245efd..53c98d2 100644
--- a/test/CodeGen/WebAssembly/simd-intrinsics.ll
+++ b/test/CodeGen/WebAssembly/simd-intrinsics.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -wasm-enable-unimplemented-simd -mattr=+simd128 | FileCheck %s --check-prefixes CHECK,SIMD128
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -wasm-enable-unimplemented-simd -mattr=+simd128 -fast-isel | FileCheck %s --check-prefixes CHECK,SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+unimplemented-simd128 | FileCheck %s --check-prefixes CHECK,SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+unimplemented-simd128 -fast-isel | FileCheck %s --check-prefixes CHECK,SIMD128
 
 ; Test that SIMD128 intrinsics lower as expected. These intrinsics are
 ; only expected to lower successfully if the simd128 attribute is
@@ -77,12 +77,12 @@
 
 ; CHECK-LABEL: bitselect_v16i8:
 ; SIMD128-NEXT: .functype bitselect_v16i8 (v128, v128, v128) -> (v128){{$}}
-; SIMD128-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $1, $2, $0{{$}}
+; SIMD128-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $0, $1, $2{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 declare <16 x i8> @llvm.wasm.bitselect.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)
-define <16 x i8> @bitselect_v16i8(<16 x i8> %c, <16 x i8> %v1, <16 x i8> %v2) {
+define <16 x i8> @bitselect_v16i8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %c) {
   %a = call <16 x i8> @llvm.wasm.bitselect.v16i8(
-    <16 x i8> %c, <16 x i8> %v1, <16 x i8> %v2
+     <16 x i8> %v1, <16 x i8> %v2, <16 x i8> %c
   )
   ret <16 x i8> %a
 }
@@ -156,12 +156,12 @@
 
 ; CHECK-LABEL: bitselect_v8i16:
 ; SIMD128-NEXT: .functype bitselect_v8i16 (v128, v128, v128) -> (v128){{$}}
-; SIMD128-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $1, $2, $0{{$}}
+; SIMD128-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $0, $1, $2{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 declare <8 x i16> @llvm.wasm.bitselect.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
-define <8 x i16> @bitselect_v8i16(<8 x i16> %c, <8 x i16> %v1, <8 x i16> %v2) {
+define <8 x i16> @bitselect_v8i16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %c) {
   %a = call <8 x i16> @llvm.wasm.bitselect.v8i16(
-    <8 x i16> %c, <8 x i16> %v1, <8 x i16> %v2
+    <8 x i16> %v1, <8 x i16> %v2, <8 x i16> %c
   )
   ret <8 x i16> %a
 }
@@ -191,12 +191,12 @@
 
 ; CHECK-LABEL: bitselect_v4i32:
 ; SIMD128-NEXT: .functype bitselect_v4i32 (v128, v128, v128) -> (v128){{$}}
-; SIMD128-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $1, $2, $0{{$}}
+; SIMD128-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $0, $1, $2{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 declare <4 x i32> @llvm.wasm.bitselect.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
-define <4 x i32> @bitselect_v4i32(<4 x i32> %c, <4 x i32> %v1, <4 x i32> %v2) {
+define <4 x i32> @bitselect_v4i32(<4 x i32> %v1, <4 x i32> %v2, <4 x i32> %c) {
   %a = call <4 x i32> @llvm.wasm.bitselect.v4i32(
-    <4 x i32> %c, <4 x i32> %v1, <4 x i32> %v2
+    <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %c
   )
   ret <4 x i32> %a
 }
@@ -204,7 +204,7 @@
 ; CHECK-LABEL: trunc_sat_s_v4i32:
 ; NO-SIMD128-NOT: f32x4
 ; SIMD128-NEXT: .functype trunc_sat_s_v4i32 (v128) -> (v128){{$}}
-; SIMD128-NEXT: i32x4.trunc_sat_s/f32x4 $push[[R:[0-9]+]]=, $0
+; SIMD128-NEXT: i32x4.trunc_sat_f32x4_s $push[[R:[0-9]+]]=, $0
 ; SIMD128-NEXT: return $pop[[R]]
 declare <4 x i32> @llvm.wasm.trunc.saturate.signed.v4i32.v4f32(<4 x float>)
 define <4 x i32> @trunc_sat_s_v4i32(<4 x float> %x) {
@@ -215,7 +215,7 @@
 ; CHECK-LABEL: trunc_sat_u_v4i32:
 ; NO-SIMD128-NOT: f32x4
 ; SIMD128-NEXT: .functype trunc_sat_u_v4i32 (v128) -> (v128){{$}}
-; SIMD128-NEXT: i32x4.trunc_sat_u/f32x4 $push[[R:[0-9]+]]=, $0
+; SIMD128-NEXT: i32x4.trunc_sat_f32x4_u $push[[R:[0-9]+]]=, $0
 ; SIMD128-NEXT: return $pop[[R]]
 declare <4 x i32> @llvm.wasm.trunc.saturate.unsigned.v4i32.v4f32(<4 x float>)
 define <4 x i32> @trunc_sat_u_v4i32(<4 x float> %x) {
@@ -248,12 +248,12 @@
 
 ; CHECK-LABEL: bitselect_v2i64:
 ; SIMD128-NEXT: .functype bitselect_v2i64 (v128, v128, v128) -> (v128){{$}}
-; SIMD128-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $1, $2, $0{{$}}
+; SIMD128-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $0, $1, $2{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 declare <2 x i64> @llvm.wasm.bitselect.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
-define <2 x i64> @bitselect_v2i64(<2 x i64> %c, <2 x i64> %v1, <2 x i64> %v2) {
+define <2 x i64> @bitselect_v2i64(<2 x i64> %v1, <2 x i64> %v2, <2 x i64> %c) {
   %a = call <2 x i64> @llvm.wasm.bitselect.v2i64(
-    <2 x i64> %c, <2 x i64> %v1, <2 x i64> %v2
+    <2 x i64> %v1, <2 x i64> %v2, <2 x i64> %c
   )
   ret <2 x i64> %a
 }
@@ -261,7 +261,7 @@
 ; CHECK-LABEL: trunc_sat_s_v2i64:
 ; NO-SIMD128-NOT: f32x4
 ; SIMD128-NEXT: .functype trunc_sat_s_v2i64 (v128) -> (v128){{$}}
-; SIMD128-NEXT: i64x2.trunc_sat_s/f64x2 $push[[R:[0-9]+]]=, $0
+; SIMD128-NEXT: i64x2.trunc_sat_f64x2_s $push[[R:[0-9]+]]=, $0
 ; SIMD128-NEXT: return $pop[[R]]
 declare <2 x i64> @llvm.wasm.trunc.saturate.signed.v2i64.v2f64(<2 x double>)
 define <2 x i64> @trunc_sat_s_v2i64(<2 x double> %x) {
@@ -272,7 +272,7 @@
 ; CHECK-LABEL: trunc_sat_u_v2i64:
 ; NO-SIMD128-NOT: f32x4
 ; SIMD128-NEXT: .functype trunc_sat_u_v2i64 (v128) -> (v128){{$}}
-; SIMD128-NEXT: i64x2.trunc_sat_u/f64x2 $push[[R:[0-9]+]]=, $0
+; SIMD128-NEXT: i64x2.trunc_sat_f64x2_u $push[[R:[0-9]+]]=, $0
 ; SIMD128-NEXT: return $pop[[R]]
 declare <2 x i64> @llvm.wasm.trunc.saturate.unsigned.v2i64.v2f64(<2 x double>)
 define <2 x i64> @trunc_sat_u_v2i64(<2 x double> %x) {
@@ -285,12 +285,12 @@
 ; ==============================================================================
 ; CHECK-LABEL: bitselect_v4f32:
 ; SIMD128-NEXT: .functype bitselect_v4f32 (v128, v128, v128) -> (v128){{$}}
-; SIMD128-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $1, $2, $0{{$}}
+; SIMD128-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $0, $1, $2{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 declare <4 x float> @llvm.wasm.bitselect.v4f32(<4 x float>, <4 x float>, <4 x float>)
-define <4 x float> @bitselect_v4f32(<4 x float> %c, <4 x float> %v1, <4 x float> %v2) {
+define <4 x float> @bitselect_v4f32(<4 x float> %v1, <4 x float> %v2, <4 x float> %c) {
   %a = call <4 x float> @llvm.wasm.bitselect.v4f32(
-    <4 x float> %c, <4 x float> %v1, <4 x float> %v2
+     <4 x float> %v1, <4 x float> %v2, <4 x float> %c
   )
   ret <4 x float> %a
 }
@@ -300,12 +300,12 @@
 ; ==============================================================================
 ; CHECK-LABEL: bitselect_v2f64:
 ; SIMD128-NEXT: .functype bitselect_v2f64 (v128, v128, v128) -> (v128){{$}}
-; SIMD128-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $1, $2, $0{{$}}
+; SIMD128-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $0, $1, $2{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 declare <2 x double> @llvm.wasm.bitselect.v2f64(<2 x double>, <2 x double>, <2 x double>)
-define <2 x double> @bitselect_v2f64(<2 x double> %c, <2 x double> %v1, <2 x double> %v2) {
+define <2 x double> @bitselect_v2f64(<2 x double> %v1, <2 x double> %v2, <2 x double> %c) {
   %a = call <2 x double> @llvm.wasm.bitselect.v2f64(
-    <2 x double> %c, <2 x double> %v1, <2 x double> %v2
+    <2 x double> %v1, <2 x double> %v2, <2 x double> %c
   )
   ret <2 x double> %a
 }
diff --git a/test/CodeGen/WebAssembly/simd-load-store-alignment.ll b/test/CodeGen/WebAssembly/simd-load-store-alignment.ll
index 50935b6..c19a7f6 100644
--- a/test/CodeGen/WebAssembly/simd-load-store-alignment.ll
+++ b/test/CodeGen/WebAssembly/simd-load-store-alignment.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -wasm-enable-unimplemented-simd -mattr=+simd128 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+unimplemented-simd128 | FileCheck %s
 
 ; Test loads and stores with custom alignment values.
 
diff --git a/test/CodeGen/WebAssembly/simd-nested-shuffles.ll b/test/CodeGen/WebAssembly/simd-nested-shuffles.ll
index 51ba5a9..597ab58 100644
--- a/test/CodeGen/WebAssembly/simd-nested-shuffles.ll
+++ b/test/CodeGen/WebAssembly/simd-nested-shuffles.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mattr=+simd128 | FileCheck %s --check-prefixes CHECK
+; RUN: llc < %s -mattr=+simd128 -verify-machineinstrs | FileCheck %s --check-prefixes CHECK
 
 ; Check that shuffles maintain their type when being custom
 ; lowered. Regression test for bug 39275.
diff --git a/test/CodeGen/WebAssembly/simd-noopt.ll b/test/CodeGen/WebAssembly/simd-noopt.ll
new file mode 100644
index 0000000..1ec259c
--- /dev/null
+++ b/test/CodeGen/WebAssembly/simd-noopt.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -fast-isel -mattr=+simd128,+sign-ext -verify-machineinstrs
+
+;; Ensures fastisel produces valid code when storing and loading split
+;; up v2i64 values. Lowering away v2i64s is a temporary measure while
+;; V8 does not have support for i64x2.* operations, and is done when
+;; -wasm-enable-unimplemented-simd is not present. This is a
+;; regression test for a bug that crashed llc after fastisel produced
+;; machineinstrs that used registers that had never been defined.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+define i64 @foo(<2 x i64> %vec) {
+entry:
+  %vec.addr = alloca <2 x i64>, align 16
+  store <2 x i64> %vec, <2 x i64>* %vec.addr, align 16
+  %0 = load <2 x i64>, <2 x i64>* %vec.addr, align 16
+  %1 = extractelement <2 x i64> %0, i32 0
+  ret i64 %1
+}
diff --git a/test/CodeGen/WebAssembly/simd-offset.ll b/test/CodeGen/WebAssembly/simd-offset.ll
index 6bc165f..61177ae 100644
--- a/test/CodeGen/WebAssembly/simd-offset.ll
+++ b/test/CodeGen/WebAssembly/simd-offset.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -wasm-enable-unimplemented-simd -mattr=+simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,SIMD128
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=+simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,SIMD128-VM
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=-simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,NO-SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=+unimplemented-simd128 | FileCheck %s --check-prefixes CHECK,SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=+simd128 | FileCheck %s --check-prefixes CHECK,SIMD128-VM
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals | FileCheck %s --check-prefixes CHECK,NO-SIMD128
 
 ; Test SIMD loads and stores
 
diff --git a/test/CodeGen/WebAssembly/simd-select.ll b/test/CodeGen/WebAssembly/simd-select.ll
index 70a9149..c871f60 100644
--- a/test/CodeGen/WebAssembly/simd-select.ll
+++ b/test/CodeGen/WebAssembly/simd-select.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -wasm-enable-unimplemented-simd -mattr=+simd128,+sign-ext | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+unimplemented-simd128 | FileCheck %s
 
 ; Test that vector selects of various varieties lower correctly to bitselects.
 
diff --git a/test/CodeGen/WebAssembly/simd-sext-inreg.ll b/test/CodeGen/WebAssembly/simd-sext-inreg.ll
index 5fecb4d..0c37582 100644
--- a/test/CodeGen/WebAssembly/simd-sext-inreg.ll
+++ b/test/CodeGen/WebAssembly/simd-sext-inreg.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -asm-verbose=false -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-enable-unimplemented-simd -mattr=+simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,SIMD128
-; RUN: llc < %s -asm-verbose=false -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -mattr=+simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,SIMD128-VM
-; RUN: llc < %s -asm-verbose=false -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -mattr=-simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,NO-SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -mattr=+unimplemented-simd128 | FileCheck %s --check-prefixes CHECK,SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -mattr=+simd128 | FileCheck %s --check-prefixes CHECK,SIMD128-VM
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals | FileCheck %s --check-prefixes CHECK,NO-SIMD128
 
 ; Test that vector sign extensions lower to shifts
 
diff --git a/test/CodeGen/WebAssembly/simd.ll b/test/CodeGen/WebAssembly/simd.ll
index 29b51f1..1393f62 100644
--- a/test/CodeGen/WebAssembly/simd.ll
+++ b/test/CodeGen/WebAssembly/simd.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -wasm-enable-unimplemented-simd -mattr=+simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,SIMD128
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,SIMD128-VM
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=-simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,NO-SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+unimplemented-simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,SIMD128
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+simd128,+sign-ext | FileCheck %s --check-prefixes CHECK,SIMD128-VM
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck %s --check-prefixes CHECK,NO-SIMD128
 
 ; Test that basic SIMD128 vector manipulation operations assemble as expected.
 
@@ -12,6 +12,7 @@
 ; ==============================================================================
 ; CHECK-LABEL: const_v16i8:
 ; NO-SIMD128-NOT: i8x16
+; SIMD128-VM-NOT: v128.const
 ; SIMD128-NEXT: .functype const_v16i8 () -> (v128){{$}}
 ; SIMD128-NEXT: v128.const $push[[R:[0-9]+]]=,
 ; SIMD128-SAME: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
@@ -55,10 +56,10 @@
 ; CHECK-LABEL: extract_var_v16i8_s:
 ; NO-SIMD128-NOT: i8x16
 ; SIMD128-NEXT: .functype extract_var_v16i8_s (v128, i32) -> (i32){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0
 ; SIMD128-NEXT: i32.const $push[[L4:[0-9]+]]=, 15
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L4]]
@@ -84,6 +85,7 @@
 
 ; CHECK-LABEL: extract_v16i8_u:
 ; NO-SIMD128-NOT: i8x16
+; SIMD128-VM-NOT: i8x16.extract_lane_u
 ; SIMD128-NEXT: .functype extract_v16i8_u (v128) -> (i32){{$}}
 ; SIMD128-NEXT: i8x16.extract_lane_u $push[[R:[0-9]+]]=, $0, 13{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
@@ -96,10 +98,10 @@
 ; CHECK-LABEL: extract_var_v16i8_u:
 ; NO-SIMD128-NOT: i8x16
 ; SIMD128-NEXT: .functype extract_var_v16i8_u (v128, i32) -> (i32){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L4:[0-9]+]]=, 15{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L4]]{{$}}
@@ -114,6 +116,7 @@
 
 ; CHECK-LABEL: extract_undef_v16i8_u:
 ; NO-SIMD128-NOT: i8x16
+; SIMD128-VM-NOT: i8x16.extract_lane_u
 ; SIMD128-NEXT: .functype extract_undef_v16i8_u (v128) -> (i32){{$}}
 ; SIMD128-NEXT: i8x16.extract_lane_u $push[[R:[0-9]+]]=, $0, 0{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
@@ -126,7 +129,7 @@
 ; CHECK-LABEL: extract_v16i8:
 ; NO-SIMD128-NOT: i8x16
 ; SIMD128-NEXT: .functype extract_v16i8 (v128) -> (i32){{$}}
-; SIMD128-NEXT: i8x16.extract_lane_u $push[[R:[0-9]+]]=, $0, 13{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_s $push[[R:[0-9]+]]=, $0, 13{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 define i8 @extract_v16i8(<16 x i8> %v) {
   %elem = extractelement <16 x i8> %v, i8 13
@@ -136,10 +139,10 @@
 ; CHECK-LABEL: extract_var_v16i8:
 ; NO-SIMD128-NOT: i8x16
 ; SIMD128-NEXT: .functype extract_var_v16i8 (v128, i32) -> (i32){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L4:[0-9]+]]=, 15{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L4]]{{$}}
@@ -154,7 +157,7 @@
 ; CHECK-LABEL: extract_undef_v16i8:
 ; NO-SIMD128-NOT: i8x16
 ; SIMD128-NEXT: .functype extract_undef_v16i8 (v128) -> (i32){{$}}
-; SIMD128-NEXT: i8x16.extract_lane_u $push[[R:[0-9]+]]=, $0, 0{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_s $push[[R:[0-9]+]]=, $0, 0{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 define i8 @extract_undef_v16i8(<16 x i8> %v) {
   %elem = extractelement <16 x i8> %v, i8 undef
@@ -174,10 +177,10 @@
 ; CHECK-LABEL: replace_var_v16i8:
 ; NO-SIMD128-NOT: i8x16
 ; SIMD128-NEXT: .functype replace_var_v16i8 (v128, i32, i32) -> (v128){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $3=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $3=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L4:[0-9]+]]=, 15{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L4]]{{$}}
@@ -316,10 +319,10 @@
 ; CHECK-LABEL: extract_var_v8i16_s:
 ; NO-SIMD128-NOT: i16x8
 ; SIMD128-NEXT: .functype extract_var_v8i16_s (v128, i32) -> (i32){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L4:[0-9]+]]=, 7{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L4]]{{$}}
@@ -347,6 +350,7 @@
 
 ; CHECK-LABEL: extract_v8i16_u:
 ; NO-SIMD128-NOT: i16x8
+; SIMD128-VM-NOT: i16x8.extract_lane_u
 ; SIMD128-NEXT: .functype extract_v8i16_u (v128) -> (i32){{$}}
 ; SIMD128-NEXT: i16x8.extract_lane_u $push[[R:[0-9]+]]=, $0, 5{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
@@ -359,10 +363,10 @@
 ; CHECK-LABEL: extract_var_v8i16_u:
 ; NO-SIMD128-NOT: i16x8
 ; SIMD128-NEXT: .functype extract_var_v8i16_u (v128, i32) -> (i32){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L4:[0-9]+]]=, 7{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L4]]{{$}}
@@ -379,6 +383,7 @@
 
 ; CHECK-LABEL: extract_undef_v8i16_u:
 ; NO-SIMD128-NOT: i16x8
+; SIMD128-VM-NOT: i16x8.extract_lane_u
 ; SIMD128-NEXT: .functype extract_undef_v8i16_u (v128) -> (i32){{$}}
 ; SIMD128-NEXT: i16x8.extract_lane_u $push[[R:[0-9]+]]=, $0, 0{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
@@ -391,7 +396,7 @@
 ; CHECK-LABEL: extract_v8i16:
 ; NO-SIMD128-NOT: i16x8
 ; SIMD128-NEXT: .functype extract_v8i16 (v128) -> (i32){{$}}
-; SIMD128-NEXT: i16x8.extract_lane_u $push[[R:[0-9]+]]=, $0, 5{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_s $push[[R:[0-9]+]]=, $0, 5{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 define i16 @extract_v8i16(<8 x i16> %v) {
   %elem = extractelement <8 x i16> %v, i16 5
@@ -401,10 +406,10 @@
 ; CHECK-LABEL: extract_var_v8i16:
 ; NO-SIMD128-NOT: i16x8
 ; SIMD128-NEXT: .functype extract_var_v8i16 (v128, i32) -> (i32){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L4:[0-9]+]]=, 7{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L4]]{{$}}
@@ -421,7 +426,7 @@
 ; CHECK-LABEL: extract_undef_v8i16:
 ; NO-SIMD128-NOT: i16x8
 ; SIMD128-NEXT: .functype extract_undef_v8i16 (v128) -> (i32){{$}}
-; SIMD128-NEXT: i16x8.extract_lane_u $push[[R:[0-9]+]]=, $0, 0{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_s $push[[R:[0-9]+]]=, $0, 0{{$}}
 ; SIMD128-NEXT: return $pop[[R]]{{$}}
 define i16 @extract_undef_v8i16(<8 x i16> %v) {
   %elem = extractelement <8 x i16> %v, i16 undef
@@ -441,10 +446,10 @@
 ; CHECK-LABEL: replace_var_v8i16:
 ; NO-SIMD128-NOT: i16x8
 ; SIMD128-NEXT: .functype replace_var_v8i16 (v128, i32, i32) -> (v128){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $3=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $3=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L4:[0-9]+]]=, 7{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L4]]{{$}}
@@ -562,10 +567,10 @@
 ; CHECK-LABEL: extract_var_v4i32:
 ; NO-SIMD128-NOT: i32x4
 ; SIMD128-NEXT: .functype extract_var_v4i32 (v128, i32) -> (i32){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L4:[0-9]+]]=, 3{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L4]]{{$}}
@@ -602,10 +607,10 @@
 ; CHECK-LABEL: replace_var_v4i32:
 ; NO-SIMD128-NOT: i32x4
 ; SIMD128-NEXT: .functype replace_var_v4i32 (v128, i32, i32) -> (v128){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $3=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $3=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L4:[0-9]+]]=, 3{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L4]]{{$}}
@@ -715,10 +720,10 @@
 ; CHECK-LABEL: extract_var_v2i64:
 ; NO-SIMD128-NOT: i64x2
 ; SIMD128-NEXT: .functype extract_var_v2i64 (v128, i32) -> (i64){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L2:[0-9]+]]=, 1{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L2]]{{$}}
@@ -758,10 +763,10 @@
 ; NO-SIMD128-NOT: i64x2
 ; SIMD128-VM-NOT: i64x2
 ; SIMD128-NEXT: .functype replace_var_v2i64 (v128, i32, i64) -> (v128){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $3=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $3=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L2:[0-9]+]]=, 1{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L2]]{{$}}
@@ -868,10 +873,10 @@
 ; CHECK-LABEL: extract_var_v4f32:
 ; NO-SIMD128-NOT: i64x2
 ; SIMD128-NEXT: .functype extract_var_v4f32 (v128, i32) -> (f32){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L2:[0-9]+]]=, 3{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L2]]{{$}}
@@ -908,10 +913,10 @@
 ; CHECK-LABEL: replace_var_v4f32:
 ; NO-SIMD128-NOT: f32x4
 ; SIMD128-NEXT: .functype replace_var_v4f32 (v128, i32, f32) -> (v128){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $3=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $3=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L2:[0-9]+]]=, 3{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L2]]{{$}}
@@ -1020,10 +1025,10 @@
 ; CHECK-LABEL: extract_var_v2f64:
 ; NO-SIMD128-NOT: i62x2
 ; SIMD128-NEXT: .functype extract_var_v2f64 (v128, i32) -> (f64){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $2=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L2:[0-9]+]]=, 1{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L2]]{{$}}
@@ -1063,10 +1068,10 @@
 ; NO-SIMD128-NOT: f64x2
 ; SIMD128-VM-NOT: f64x2
 ; SIMD128-NEXT: .functype replace_var_v2f64 (v128, i32, f64) -> (v128){{$}}
-; SIMD128-NEXT: get_global $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
+; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer@GLOBAL{{$}}
 ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}}
 ; SIMD128-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: tee_local $push[[L3:[0-9]+]]=, $3=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: local.tee $push[[L3:[0-9]+]]=, $3=, $pop[[L2]]{{$}}
 ; SIMD128-NEXT: v128.store 0($pop[[L3]]), $0{{$}}
 ; SIMD128-NEXT: i32.const $push[[L2:[0-9]+]]=, 1{{$}}
 ; SIMD128-NEXT: i32.and $push[[L5:[0-9]+]]=, $1, $pop[[L2]]{{$}}
diff --git a/test/CodeGen/WebAssembly/stack-alignment.ll b/test/CodeGen/WebAssembly/stack-alignment.ll
index 2c7380a..a610db9 100644
--- a/test/CodeGen/WebAssembly/stack-alignment.ll
+++ b/test/CodeGen/WebAssembly/stack-alignment.ll
@@ -6,18 +6,18 @@
 declare void @somefunc(i32*)
 
 ; CHECK-LABEL: underalign:
-; CHECK:      get_global $push[[L1:.+]]=, __stack_pointer@GLOBAL{{$}}
+; CHECK:      global.get $push[[L1:.+]]=, __stack_pointer@GLOBAL{{$}}
 ; CHECK-NEXT: i32.const $push[[L2:.+]]=, 16
 ; CHECK-NEXT: i32.sub   $push[[L10:.+]]=, $pop[[L1]], $pop[[L2]]
-; CHECK-NEXT: tee_local $push{{.+}}=, [[SP:.+]], $pop[[L10]]
+; CHECK-NEXT: local.tee $push{{.+}}=, [[SP:.+]], $pop[[L10]]
 
-; CHECK:      get_local $push[[L3:.+]]=, [[SP]]{{$}}
+; CHECK:      local.get $push[[L3:.+]]=, [[SP]]{{$}}
 ; CHECK:      i32.add   $push[[underaligned:.+]]=, $pop[[L3]], $pop{{.+}}
 ; CHECK-NEXT: call      somefunc@FUNCTION, $pop[[underaligned]]
 
-; CHECK:      get_local $push[[M4:.+]]=, [[SP]]{{$}}
+; CHECK:      local.get $push[[M4:.+]]=, [[SP]]{{$}}
 ; CHECK:      i32.add   $push[[L5:.+]]=, $pop[[M4]], $pop{{.+}}
-; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L5]]
+; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L5]]
 define void @underalign() {
 entry:
   %underaligned = alloca i32, align 8
@@ -26,19 +26,19 @@
 }
 
 ; CHECK-LABEL: overalign:
-; CHECK:      get_global $push[[L10:.+]]=, __stack_pointer@GLOBAL{{$}}
-; CHECK-NEXT: tee_local  $push[[L9:.+]]=, [[BP:.+]], $pop[[L10]]
+; CHECK:      global.get $push[[L10:.+]]=, __stack_pointer@GLOBAL{{$}}
+; CHECK-NEXT: local.tee  $push[[L9:.+]]=, [[BP:.+]], $pop[[L10]]
 ; CHECK-NEXT: i32.const  $push[[L2:.+]]=, 32
 ; CHECK-NEXT: i32.sub    $push[[L8:.+]]=, $pop[[L9]], $pop[[L2]]
 ; CHECK-NEXT: i32.const  $push[[L3:.+]]=, -32
 ; CHECK-NEXT: i32.and    $push[[L7:.+]]=, $pop[[L8]], $pop[[L3]]
-; CHECK-NEXT: tee_local  $push{{.+}}=, [[SP:.+]], $pop[[L7]]
+; CHECK-NEXT: local.tee  $push{{.+}}=, [[SP:.+]], $pop[[L7]]
 
-; CHECK:      get_local  $push[[M5:.+]]=, [[SP]]{{$}}
+; CHECK:      local.get  $push[[M5:.+]]=, [[SP]]{{$}}
 ; CHECK:      call       somefunc@FUNCTION, $pop[[M5]]{{$}}
 
-; CHECK:      get_local  $push[[M6:.+]]=, [[BP]]{{$}}
-; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[M6]]
+; CHECK:      local.get  $push[[M6:.+]]=, [[BP]]{{$}}
+; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[M6]]
 define void @overalign() {
 entry:
   %overaligned = alloca i32, align 32
@@ -47,21 +47,21 @@
 }
 
 ; CHECK-LABEL: over_and_normal_align:
-; CHECK:      get_global $push[[L14:.+]]=, __stack_pointer@GLOBAL{{$}}
-; CHECK-NEXT: tee_local  $push[[L13:.+]]=, [[BP:.+]], $pop[[L14]]
+; CHECK:      global.get $push[[L14:.+]]=, __stack_pointer@GLOBAL{{$}}
+; CHECK-NEXT: local.tee  $push[[L13:.+]]=, [[BP:.+]], $pop[[L14]]
 ; CHECK:      i32.sub    $push[[L12:.+]]=, $pop[[L13]], $pop{{.+}}
 ; CHECK:      i32.and    $push[[L11:.+]]=, $pop[[L12]], $pop{{.+}}
-; CHECK-NEXT: tee_local  $push{{.+}}=, [[SP:.+]], $pop[[L11]]
+; CHECK-NEXT: local.tee  $push{{.+}}=, [[SP:.+]], $pop[[L11]]
 
-; CHECK:      get_local  $push[[M6:.+]]=, [[SP]]{{$}}
+; CHECK:      local.get  $push[[M6:.+]]=, [[SP]]{{$}}
 ; CHECK:      i32.add    $push[[L6:.+]]=, $pop[[M6]], $pop{{.+}}
 ; CHECK-NEXT: call       somefunc@FUNCTION, $pop[[L6]]
-; CHECK:      get_local  $push[[M7:.+]]=, [[SP]]{{$}}
+; CHECK:      local.get  $push[[M7:.+]]=, [[SP]]{{$}}
 ; CHECK:      i32.add    $push[[L8:.+]]=, $pop[[M7]], $pop{{.+}}
 ; CHECK-NEXT: call       somefunc@FUNCTION, $pop[[L8]]
 
-; CHECK:      get_local  $push[[L6:.+]]=, [[BP]]{{$}}
-; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L6]]
+; CHECK:      local.get  $push[[L6:.+]]=, [[BP]]{{$}}
+; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L6]]
 define void @over_and_normal_align() {
 entry:
   %over = alloca i32, align 32
@@ -72,16 +72,16 @@
 }
 
 ; CHECK-LABEL: dynamic_overalign:
-; CHECK:      get_global $push[[L18:.+]]=, __stack_pointer@GLOBAL{{$}}
-; CHECK-NEXT: tee_local  $push[[L17:.+]]=, [[SP:.+]], $pop[[L18]]
-; CHECK-NEXT: set_local  [[BP:.+]], $pop[[L17]]
-; CHECK:      tee_local  $push{{.+}}=, [[SP_2:.+]], $pop{{.+}}
+; CHECK:      global.get $push[[L18:.+]]=, __stack_pointer@GLOBAL{{$}}
+; CHECK-NEXT: local.tee  $push[[L17:.+]]=, [[SP:.+]], $pop[[L18]]
+; CHECK-NEXT: local.set  [[BP:.+]], $pop[[L17]]
+; CHECK:      local.tee  $push{{.+}}=, [[SP_2:.+]], $pop{{.+}}
 
-; CHECK:      get_local  $push[[M8:.+]]=, [[SP_2]]{{$}}
+; CHECK:      local.get  $push[[M8:.+]]=, [[SP_2]]{{$}}
 ; CHECK:      call       somefunc@FUNCTION, $pop[[M8]]
 
-; CHECK:      get_local  $push[[M9:.+]]=, [[BP]]{{$}}
-; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[M9]]
+; CHECK:      local.get  $push[[M9:.+]]=, [[BP]]{{$}}
+; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[M9]]
 define void @dynamic_overalign(i32 %num) {
 entry:
   %dynamic = alloca i32, i32 %num, align 32
@@ -90,22 +90,22 @@
 }
 
 ; CHECK-LABEL: overalign_and_dynamic:
-; CHECK:      get_global $push[[L21:.+]]=, __stack_pointer@GLOBAL{{$}}
-; CHECK-NEXT: tee_local  $push[[L20:.+]]=, [[BP:.+]], $pop[[L21]]
+; CHECK:      global.get $push[[L21:.+]]=, __stack_pointer@GLOBAL{{$}}
+; CHECK-NEXT: local.tee  $push[[L20:.+]]=, [[BP:.+]], $pop[[L21]]
 ; CHECK:      i32.sub    $push[[L19:.+]]=, $pop[[L20]], $pop{{.+}}
 ; CHECK:      i32.and    $push[[L18:.+]]=, $pop[[L19]], $pop{{.+}}
-; CHECK:      tee_local  $push{{.+}}=, [[FP:.+]], $pop[[L18]]
-; CHECK:      get_local  $push[[M10:.+]]=, [[FP]]{{$}}
+; CHECK:      local.tee  $push{{.+}}=, [[FP:.+]], $pop[[L18]]
+; CHECK:      local.get  $push[[M10:.+]]=, [[FP]]{{$}}
 ; CHECK:      i32.sub    $push[[L16:.+]]=, $pop[[M10]], $pop{{.+}}
-; CHECK-NEXT: tee_local  $push{{.+}}=, [[SP:.+]], $pop[[L16]]
+; CHECK-NEXT: local.tee  $push{{.+}}=, [[SP:.+]], $pop[[L16]]
 
-; CHECK:      get_local  $push[[over:.+]]=, [[FP]]
+; CHECK:      local.get  $push[[over:.+]]=, [[FP]]
 ; CHECK-NEXT: call       somefunc@FUNCTION, $pop[[over]]
-; CHECK:      get_local  $push[[another:.+]]=, [[SP]]
+; CHECK:      local.get  $push[[another:.+]]=, [[SP]]
 ; CHECK-NEXT: call       somefunc@FUNCTION, $pop[[another]]
 
-; CHECK:      get_local  $push[[M11:.+]]=, [[BP]]{{$}}
-; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[M11]]
+; CHECK:      local.get  $push[[M11:.+]]=, [[BP]]{{$}}
+; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[M11]]
 define void @overalign_and_dynamic(i32 %num) {
 entry:
   %over = alloca i32, align 32
@@ -116,27 +116,27 @@
 }
 
 ; CHECK-LABEL: overalign_static_and_dynamic:
-; CHECK:      get_global $push[[L26:.+]]=, __stack_pointer@GLOBAL{{$}}
-; CHECK-NEXT: tee_local  $push[[L25:.+]]=, [[BP:.+]], $pop[[L26]]
+; CHECK:      global.get $push[[L26:.+]]=, __stack_pointer@GLOBAL{{$}}
+; CHECK-NEXT: local.tee  $push[[L25:.+]]=, [[BP:.+]], $pop[[L26]]
 ; CHECK:      i32.sub    $push[[L24:.+]]=, $pop[[L25]], $pop{{.+}}
 ; CHECK:      i32.and    $push[[L23:.+]]=, $pop[[L24]], $pop{{.+}}
-; CHECK:      tee_local  $push{{.+}}=, [[FP:.+]], $pop[[L23]]
-; CHECK:      get_local  $push[[M12:.+]]=, [[FP]]{{$}}
+; CHECK:      local.tee  $push{{.+}}=, [[FP:.+]], $pop[[L23]]
+; CHECK:      local.get  $push[[M12:.+]]=, [[FP]]{{$}}
 ; CHECK:      i32.sub    $push[[L21:.+]]=, $pop[[M12]], $pop{{.+}}
-; CHECK-NEXT: tee_local  $push{{.+}}=, [[SP:.+]], $pop[[L21]]
+; CHECK-NEXT: local.tee  $push{{.+}}=, [[SP:.+]], $pop[[L21]]
 
-; CHECK:      get_local  $push[[L19:.+]]=, [[FP]]
-; CHECK:      tee_local  $push[[L18:.+]]=, [[FP_2:.+]], $pop[[L19]]
+; CHECK:      local.get  $push[[L19:.+]]=, [[FP]]
+; CHECK:      local.tee  $push[[L18:.+]]=, [[FP_2:.+]], $pop[[L19]]
 ; CHECK:      i32.add    $push[[over:.+]]=, $pop[[L18]], $pop{{.+}}
 ; CHECK-NEXT: call       somefunc@FUNCTION, $pop[[over]]
-; CHECK:      get_local  $push[[M12:.+]]=, [[SP]]
+; CHECK:      local.get  $push[[M12:.+]]=, [[SP]]
 ; CHECK:      call       somefunc@FUNCTION, $pop[[M12]]
-; CHECK:      get_local  $push[[M13:.+]]=, [[FP_2]]
+; CHECK:      local.get  $push[[M13:.+]]=, [[FP_2]]
 ; CHECK:      i32.add    $push[[static:.+]]=, $pop[[M13]], $pop{{.+}}
 ; CHECK-NEXT: call       somefunc@FUNCTION, $pop[[static]]
 
-; CHECK:      get_local  $push[[M14:.+]]=, [[BP]]{{$}}
-; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[M14]]
+; CHECK:      local.get  $push[[M14:.+]]=, [[BP]]{{$}}
+; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[M14]]
 define void @overalign_static_and_dynamic(i32 %num) {
 entry:
   %over = alloca i32, align 32
diff --git a/test/CodeGen/WebAssembly/stack-insts.ll b/test/CodeGen/WebAssembly/stack-insts.ll
index 0876b4a..c4ccddd 100644
--- a/test/CodeGen/WebAssembly/stack-insts.ll
+++ b/test/CodeGen/WebAssembly/stack-insts.ll
@@ -8,8 +8,7 @@
 
 ; Tests if br_table is printed correctly with a tab.
 ; CHECK-LABEL: test0:
-; CHECK-NOT: br_table0, 1, 0, 1, 0
-; CHECK: br_table 0, 1, 0, 1, 0
+; CHECK: br_table {0, 1, 0, 1, 0}
 define void @test0(i32 %n) {
 entry:
   switch i32 %n, label %sw.epilog [
diff --git a/test/CodeGen/WebAssembly/store.ll b/test/CodeGen/WebAssembly/store.ll
index 9e528bd..c107d9a 100644
--- a/test/CodeGen/WebAssembly/store.ll
+++ b/test/CodeGen/WebAssembly/store.ll
@@ -8,8 +8,8 @@
 
 ; CHECK-LABEL: sti32:
 ; CHECK-NEXT: .functype sti32 (i32, i32) -> (){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.store 0($pop[[L0]]), $pop[[L1]]{{$}}
 ; CHECK-NEXT: return{{$}}
 define void @sti32(i32 *%p, i32 %v) {
@@ -19,8 +19,8 @@
 
 ; CHECK-LABEL: sti64:
 ; CHECK-NEXT: .functype sti64 (i32, i64) -> (){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.store 0($pop[[L0]]), $pop[[L1]]{{$}}
 ; CHECK-NEXT: return{{$}}
 define void @sti64(i64 *%p, i64 %v) {
@@ -30,8 +30,8 @@
 
 ; CHECK-LABEL: stf32:
 ; CHECK-NEXT: .functype stf32 (i32, f32) -> (){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f32.store 0($pop[[L0]]), $pop[[L1]]{{$}}
 ; CHECK-NEXT: return{{$}}
 define void @stf32(float *%p, float %v) {
@@ -41,8 +41,8 @@
 
 ; CHECK-LABEL: stf64:
 ; CHECK-NEXT: .functype stf64 (i32, f64) -> (){{$}}
-; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f64.store 0($pop[[L0]]), $pop[[L1]]{{$}}
 ; CHECK-NEXT: return{{$}}
 define void @stf64(double *%p, double %v) {
diff --git a/test/CodeGen/WebAssembly/umulo-128-legalisation-lowering.ll b/test/CodeGen/WebAssembly/umulo-128-legalisation-lowering.ll
index 3cdb704..5ec021f 100644
--- a/test/CodeGen/WebAssembly/umulo-128-legalisation-lowering.ll
+++ b/test/CodeGen/WebAssembly/umulo-128-legalisation-lowering.ll
@@ -3,78 +3,78 @@
 
 define { i128, i8 } @muloti_test(i128 %l, i128 %r) unnamed_addr #0 {
 ; WASM32-LABEL: muloti_test
-; WASM32: get_global      $push18=, __stack_pointer@GLOBAL
+; WASM32: global.get      $push18=, __stack_pointer@GLOBAL
 ; WASM32: i32.const       $push19=, 48
 ; WASM32: i32.sub         $push40=, $pop18, $pop19
-; WASM32: tee_local       $push39=, 5, $pop40
-; WASM32: set_global      __stack_pointer@GLOBAL, $pop39
-; WASM32: get_local       $push41=, 5
+; WASM32: local.tee       $push39=, 5, $pop40
+; WASM32: global.set      __stack_pointer@GLOBAL, $pop39
+; WASM32: local.get       $push41=, 5
 ; WASM32: i32.const       $push22=, 32
 ; WASM32: i32.add         $push23=, $pop41, $pop22
-; WASM32: get_local       $push43=, 1
+; WASM32: local.get       $push43=, 1
 ; WASM32: i64.const       $push0=, 0
-; WASM32: get_local       $push42=, 3
+; WASM32: local.get       $push42=, 3
 ; WASM32: i64.const       $push38=, 0
 ; WASM32: call            __multi3@FUNCTION, $pop23, $pop43, $pop0, $pop42, $pop38
-; WASM32: get_local       $push44=, 5
+; WASM32: local.get       $push44=, 5
 ; WASM32: i32.const       $push24=, 16
 ; WASM32: i32.add         $push25=, $pop44, $pop24
-; WASM32: get_local       $push46=, 4
+; WASM32: local.get       $push46=, 4
 ; WASM32: i64.const       $push37=, 0
-; WASM32: get_local       $push45=, 1
+; WASM32: local.get       $push45=, 1
 ; WASM32: i64.const       $push36=, 0
 ; WASM32: call            __multi3@FUNCTION, $pop25, $pop46, $pop37, $pop45, $pop36
-; WASM32: get_local       $push49=, 5
-; WASM32: get_local       $push48=, 2
+; WASM32: local.get       $push49=, 5
+; WASM32: local.get       $push48=, 2
 ; WASM32: i64.const       $push35=, 0
-; WASM32: get_local       $push47=, 3
+; WASM32: local.get       $push47=, 3
 ; WASM32: i64.const       $push34=, 0
 ; WASM32: call            __multi3@FUNCTION, $pop49, $pop48, $pop35, $pop47, $pop34
-; WASM32: get_local       $push51=, 0
-; WASM32: get_local       $push50=, 5
+; WASM32: local.get       $push51=, 0
+; WASM32: local.get       $push50=, 5
 ; WASM32: i64.load        $push1=, 32($pop50)
 ; WASM32: i64.store       0($pop51), $pop1
-; WASM32: get_local       $push55=, 0
-; WASM32: get_local       $push52=, 5
+; WASM32: local.get       $push55=, 0
+; WASM32: local.get       $push52=, 5
 ; WASM32: i32.const       $push5=, 40
 ; WASM32: i32.add         $push6=, $pop52, $pop5
 ; WASM32: i64.load        $push33=, 0($pop6)
-; WASM32: tee_local       $push32=, 1, $pop33
-; WASM32: get_local       $push53=, 5
+; WASM32: local.tee       $push32=, 1, $pop33
+; WASM32: local.get       $push53=, 5
 ; WASM32: i64.load        $push3=, 0($pop53)
-; WASM32: get_local       $push54=, 5
+; WASM32: local.get       $push54=, 5
 ; WASM32: i64.load        $push2=, 16($pop54)
 ; WASM32: i64.add         $push4=, $pop3, $pop2
 ; WASM32: i64.add         $push31=, $pop32, $pop4
-; WASM32: tee_local       $push30=, 3, $pop31
+; WASM32: local.tee       $push30=, 3, $pop31
 ; WASM32: i64.store       8($pop55), $pop30
-; WASM32: get_local       $push62=, 0
-; WASM32: get_local       $push56=, 2
+; WASM32: local.get       $push62=, 0
+; WASM32: local.get       $push56=, 2
 ; WASM32: i64.const       $push29=, 0
 ; WASM32: i64.ne          $push8=, $pop56, $pop29
-; WASM32: get_local       $push57=, 4
+; WASM32: local.get       $push57=, 4
 ; WASM32: i64.const       $push28=, 0
 ; WASM32: i64.ne          $push7=, $pop57, $pop28
 ; WASM32: i32.and         $push9=, $pop8, $pop7
-; WASM32: get_local       $push58=, 5
+; WASM32: local.get       $push58=, 5
 ; WASM32: i64.load        $push10=, 8($pop58)
 ; WASM32: i64.const       $push27=, 0
 ; WASM32: i64.ne          $push11=, $pop10, $pop27
 ; WASM32: i32.or          $push12=, $pop9, $pop11
-; WASM32: get_local       $push59=, 5
+; WASM32: local.get       $push59=, 5
 ; WASM32: i64.load        $push13=, 24($pop59)
 ; WASM32: i64.const       $push26=, 0
 ; WASM32: i64.ne          $push14=, $pop13, $pop26
 ; WASM32: i32.or          $push15=, $pop12, $pop14
-; WASM32: get_local       $push61=, 3
-; WASM32: get_local       $push60=, 1
+; WASM32: local.get       $push61=, 3
+; WASM32: local.get       $push60=, 1
 ; WASM32: i64.lt_u        $push16=, $pop61, $pop60
 ; WASM32: i32.or          $push17=, $pop15, $pop16
 ; WASM32: i32.store8      16($pop62), $pop17
-; WASM32: get_local       $push63=, 5
+; WASM32: local.get       $push63=, 5
 ; WASM32: i32.const       $push20=, 48
 ; WASM32: i32.add         $push21=, $pop63, $pop20
-; WASM32: set_global      __stack_pointer@GLOBAL, $pop21
+; WASM32: global.set      __stack_pointer@GLOBAL, $pop21
 
 start:
   %0 = tail call { i128, i1 } @llvm.umul.with.overflow.i128(i128 %l, i128 %r) #2
diff --git a/test/CodeGen/WebAssembly/userstack.ll b/test/CodeGen/WebAssembly/userstack.ll
index 9b4f283..f26240d 100644
--- a/test/CodeGen/WebAssembly/userstack.ll
+++ b/test/CodeGen/WebAssembly/userstack.ll
@@ -10,37 +10,37 @@
 ; Check that there is an extra local for the stack pointer.
 ; CHECK: .local i32{{$}}
 define void @alloca32() noredzone {
- ; CHECK-NEXT: get_global $push[[L2:.+]]=, __stack_pointer@GLOBAL{{$}}
+ ; CHECK-NEXT: global.get $push[[L2:.+]]=, __stack_pointer@GLOBAL{{$}}
  ; CHECK-NEXT: i32.const $push[[L3:.+]]=, 16
  ; CHECK-NEXT: i32.sub $push[[L9:.+]]=, $pop[[L2]], $pop[[L3]]
- ; CHECK-NEXT: tee_local $push[[L8:.+]]=, [[SP:.+]], $pop[[L9]]{{$}}
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L8]]{{$}}
+ ; CHECK-NEXT: local.tee $push[[L8:.+]]=, [[SP:.+]], $pop[[L9]]{{$}}
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L8]]{{$}}
  %retval = alloca i32
- ; CHECK: get_local $push[[L4:.+]]=, [[SP]]{{$}}
+ ; CHECK: local.get $push[[L4:.+]]=, [[SP]]{{$}}
  ; CHECK: i32.const $push[[L0:.+]]=, 0
  ; CHECK: i32.store 12($pop[[L4]]), $pop[[L0]]
  store i32 0, i32* %retval
- ; CHECK: get_local $push[[L6:.+]]=, [[SP]]{{$}}
+ ; CHECK: local.get $push[[L6:.+]]=, [[SP]]{{$}}
  ; CHECK-NEXT: i32.const $push[[L5:.+]]=, 16
  ; CHECK-NEXT: i32.add $push[[L7:.+]]=, $pop[[L6]], $pop[[L5]]
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L7]]
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L7]]
  ret void
 }
 
 ; CHECK-LABEL: alloca3264:
 ; CHECK: .local i32{{$}}
 define void @alloca3264() {
- ; CHECK: get_global $push[[L3:.+]]=, __stack_pointer@GLOBAL{{$}}
+ ; CHECK: global.get $push[[L3:.+]]=, __stack_pointer@GLOBAL{{$}}
  ; CHECK-NEXT: i32.const $push[[L4:.+]]=, 16
  ; CHECK-NEXT: i32.sub $push[[L6:.+]]=, $pop[[L3]], $pop[[L4]]
- ; CHECK-NEXT: tee_local $push[[L5:.+]]=, [[SP:.+]], $pop[[L6]]
+ ; CHECK-NEXT: local.tee $push[[L5:.+]]=, [[SP:.+]], $pop[[L6]]
  %r1 = alloca i32
  %r2 = alloca double
  store i32 0, i32* %r1
  store double 0.0, double* %r2
  ; CHECK-NEXT: i64.const $push[[L1:.+]]=, 0
  ; CHECK-NEXT: i64.store 0($pop[[L5]]), $pop[[L1]]
- ; CHECK-NEXT: get_local $push[[L2:.+]]=, [[SP]]{{$}}
+ ; CHECK-NEXT: local.get $push[[L2:.+]]=, [[SP]]{{$}}
  ; CHECK-NEXT: i32.const $push[[L0:.+]]=, 0
  ; CHECK-NEXT: i32.store 12($pop[[L2]]), $pop[[L0]]
  ; CHECK-NEXT: return
@@ -50,18 +50,18 @@
 ; CHECK-LABEL: allocarray:
 ; CHECK: .local i32{{$}}
 define void @allocarray() {
- ; CHECK-NEXT: get_global $push[[L4:.+]]=, __stack_pointer@GLOBAL{{$}}
+ ; CHECK-NEXT: global.get $push[[L4:.+]]=, __stack_pointer@GLOBAL{{$}}
  ; CHECK-NEXT: i32.const $push[[L5:.+]]=, 144{{$}}
  ; CHECK-NEXT: i32.sub $push[[L12:.+]]=, $pop[[L4]], $pop[[L5]]
- ; CHECK-NEXT: tee_local $push[[L11:.+]]=, 0, $pop[[L12]]
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L11]]
+ ; CHECK-NEXT: local.tee $push[[L11:.+]]=, 0, $pop[[L12]]
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L11]]
  %r = alloca [33 x i32]
 
  ; CHECK:      i32.const $push{{.+}}=, 24
  ; CHECK-NEXT: i32.add $push[[L3:.+]]=, $pop{{.+}}, $pop{{.+}}
  ; CHECK-NEXT: i32.const $push[[L1:.+]]=, 1{{$}}
  ; CHECK-NEXT: i32.store 0($pop[[L3]]), $pop[[L1]]{{$}}
- ; CHECK-NEXT: get_local $push[[L4:.+]]=, 0{{$}}
+ ; CHECK-NEXT: local.get $push[[L4:.+]]=, 0{{$}}
  ; CHECK-NEXT: i32.const $push[[L10:.+]]=, 1{{$}}
  ; CHECK-NEXT: i32.store 12($pop[[L4]]), $pop[[L10]]{{$}}
  %p = getelementptr [33 x i32], [33 x i32]* %r, i32 0, i32 0
@@ -69,10 +69,10 @@
  %p2 = getelementptr [33 x i32], [33 x i32]* %r, i32 0, i32 3
  store i32 1, i32* %p2
 
- ; CHECK-NEXT: get_local $push[[L2:.+]]=, [[SP]]{{$}}
+ ; CHECK-NEXT: local.get $push[[L2:.+]]=, [[SP]]{{$}}
  ; CHECK-NEXT: i32.const $push[[L7:.+]]=, 144
  ; CHECK-NEXT: i32.add $push[[L8:.+]]=, $pop[[L2]], $pop[[L7]]
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L8]]
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L8]]
  ret void
 }
 
@@ -80,24 +80,24 @@
 define void @non_mem_use(i8** %addr) {
  ; CHECK: i32.const $push[[L2:.+]]=, 48
  ; CHECK-NEXT: i32.sub $push[[L12:.+]]=, {{.+}}, $pop[[L2]]
- ; CHECK-NEXT: tee_local $push[[L11:.+]]=, [[SP:.+]], $pop[[L12]]
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L11]]
+ ; CHECK-NEXT: local.tee $push[[L11:.+]]=, [[SP:.+]], $pop[[L12]]
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L11]]
  %buf = alloca [27 x i8], align 16
  %r = alloca i64
  %r2 = alloca i64
  ; %r is at SP+8
- ; CHECK: get_local $push[[L3:.+]]=, [[SP]]
+ ; CHECK: local.get $push[[L3:.+]]=, [[SP]]
  ; CHECK: i32.const $push[[OFF:.+]]=, 8
  ; CHECK-NEXT: i32.add $push[[ARG1:.+]]=, $pop[[L3]], $pop[[OFF]]
  ; CHECK-NEXT: call ext_func@FUNCTION, $pop[[ARG1]]
  call void @ext_func(i64* %r)
  ; %r2 is at SP+0, no add needed
- ; CHECK: get_local $push[[L4:.+]]=, [[SP]]
+ ; CHECK: local.get $push[[L4:.+]]=, [[SP]]
  ; CHECK-NEXT: call ext_func@FUNCTION, $pop[[L4]]
  call void @ext_func(i64* %r2)
  ; Use as a value, but in a store
  ; %buf is at SP+16
- ; CHECK: get_local $push[[L5:.+]]=, [[SP]]
+ ; CHECK: local.get $push[[L5:.+]]=, [[SP]]
  ; CHECK: i32.const $push[[OFF:.+]]=, 16
  ; CHECK-NEXT: i32.add $push[[VAL:.+]]=, $pop[[L5]], $pop[[OFF]]
  ; CHECK-NEXT: i32.store 0($pop{{.+}}), $pop[[VAL]]
@@ -109,11 +109,11 @@
 ; CHECK-LABEL: allocarray_inbounds:
 ; CHECK: .local i32{{$}}
 define void @allocarray_inbounds() {
- ; CHECK: get_global $push[[L3:.+]]=, __stack_pointer@GLOBAL{{$}}
+ ; CHECK: global.get $push[[L3:.+]]=, __stack_pointer@GLOBAL{{$}}
  ; CHECK-NEXT: i32.const $push[[L4:.+]]=, 32{{$}}
  ; CHECK-NEXT: i32.sub $push[[L11:.+]]=, $pop[[L3]], $pop[[L4]]
- ; CHECK-NEXT: tee_local $push[[L10:.+]]=, [[SP:.+]], $pop[[L11]]
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L10]]{{$}}
+ ; CHECK-NEXT: local.tee $push[[L10:.+]]=, [[SP:.+]], $pop[[L11]]
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L10]]{{$}}
  %r = alloca [5 x i32]
  ; CHECK: i32.const $push[[L3:.+]]=, 1
  ; CHECK-DAG: i32.store 24(${{.+}}), $pop[[L3]]
@@ -127,35 +127,35 @@
  ; CHECK: call ext_func
  ; CHECK: i32.const $push[[L5:.+]]=, 32{{$}}
  ; CHECK-NEXT: i32.add $push[[L7:.+]]=, ${{.+}}, $pop[[L5]]
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L7]]
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L7]]
  ret void
 }
 
 ; CHECK-LABEL: dynamic_alloca:
 define void @dynamic_alloca(i32 %alloc) {
- ; CHECK: get_global $push[[L13:.+]]=, __stack_pointer@GLOBAL{{$}}
- ; CHECK-NEXT: tee_local $push[[L12:.+]]=, [[SP:.+]], $pop[[L13]]{{$}}
+ ; CHECK: global.get $push[[L13:.+]]=, __stack_pointer@GLOBAL{{$}}
+ ; CHECK-NEXT: local.tee $push[[L12:.+]]=, [[SP:.+]], $pop[[L13]]{{$}}
  ; Target independent codegen bumps the stack pointer.
  ; CHECK: i32.sub
  ; Check that SP is written back to memory after decrement
- ; CHECK: set_global __stack_pointer@GLOBAL,
+ ; CHECK: global.set __stack_pointer@GLOBAL,
  %r = alloca i32, i32 %alloc
  ; Target-independent codegen also calculates the store addr
  ; CHECK: call ext_func_i32@FUNCTION
  call void @ext_func_i32(i32* %r)
- ; CHECK: set_global __stack_pointer@GLOBAL, $pop{{.+}}
+ ; CHECK: global.set __stack_pointer@GLOBAL, $pop{{.+}}
  ret void
 }
 
 ; CHECK-LABEL: dynamic_alloca_redzone:
 define void @dynamic_alloca_redzone(i32 %alloc) {
- ; CHECK: get_global $push[[L13:.+]]=, __stack_pointer@GLOBAL{{$}}
- ; CHECK-NEXT: tee_local $push[[L12:.+]]=, [[SP:.+]], $pop[[L13]]{{$}}
+ ; CHECK: global.get $push[[L13:.+]]=, __stack_pointer@GLOBAL{{$}}
+ ; CHECK-NEXT: local.tee $push[[L12:.+]]=, [[SP:.+]], $pop[[L13]]{{$}}
  ; Target independent codegen bumps the stack pointer
  ; CHECK: i32.sub
  %r = alloca i32, i32 %alloc
- ; CHECK-NEXT: tee_local       $push[[L8:.+]]=, {{.+}}, $pop
- ; CHECK: get_local $push[[L7:.+]]=, 0{{$}}
+ ; CHECK-NEXT: local.tee       $push[[L8:.+]]=, {{.+}}, $pop
+ ; CHECK: local.get $push[[L7:.+]]=, 0{{$}}
  ; CHECK-NEXT: i32.const       $push[[L6:.+]]=, 0{{$}}
  ; CHECK-NEXT: i32.store       0($pop[[L7]]), $pop[[L6]]{{$}}
  store i32 0, i32* %r
@@ -166,15 +166,15 @@
 ; CHECK-LABEL: dynamic_static_alloca:
 define void @dynamic_static_alloca(i32 %alloc) noredzone {
  ; Decrement SP in the prolog by the static amount and writeback to memory.
- ; CHECK: get_global $push[[L11:.+]]=, __stack_pointer@GLOBAL{{$}}
+ ; CHECK: global.get $push[[L11:.+]]=, __stack_pointer@GLOBAL{{$}}
  ; CHECK-NEXT: i32.const $push[[L12:.+]]=, 16
  ; CHECK-NEXT: i32.sub $push[[L23:.+]]=, $pop[[L11]], $pop[[L12]]
- ; CHECK-NEXT: tee_local $push[[L22:.+]]=, [[SP:.+]], $pop[[L23]]
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L22]]
+ ; CHECK-NEXT: local.tee $push[[L22:.+]]=, [[SP:.+]], $pop[[L23]]
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L22]]
 
  ; Alloc and write to a static alloca
- ; CHECK: get_local $push[[L21:.+]]=, [[SP:.+]]
- ; CHECK-NEXT: tee_local $push[[pushedFP:.+]]=, [[FP:.+]], $pop[[L21]]
+ ; CHECK: local.get $push[[L21:.+]]=, [[SP:.+]]
+ ; CHECK-NEXT: local.tee $push[[pushedFP:.+]]=, [[FP:.+]], $pop[[L21]]
  ; CHECK-NEXT: i32.const $push[[L0:.+]]=, 101
  ; CHECK-NEXT: i32.store [[static_offset:.+]]($pop[[pushedFP]]), $pop[[L0]]
  %static = alloca i32
@@ -182,19 +182,19 @@
 
  ; Decrement SP in the body by the dynamic amount.
  ; CHECK: i32.sub
- ; CHECK: tee_local $push[[L16:.+]]=, [[dynamic_local:.+]], $pop{{.+}}
- ; CHECK: tee_local $push[[L15:.+]]=, [[other:.+]], $pop[[L16]]{{$}}
- ; CHECK: set_global __stack_pointer@GLOBAL, $pop[[L15]]{{$}}
+ ; CHECK: local.tee $push[[L16:.+]]=, [[dynamic_local:.+]], $pop{{.+}}
+ ; CHECK: local.tee $push[[L15:.+]]=, [[other:.+]], $pop[[L16]]{{$}}
+ ; CHECK: global.set __stack_pointer@GLOBAL, $pop[[L15]]{{$}}
  %dynamic = alloca i32, i32 %alloc
 
  ; Ensure we don't modify the frame pointer after assigning it.
  ; CHECK-NOT: $[[FP]]=
 
  ; Ensure the static address doesn't change after modifying the stack pointer.
- ; CHECK: get_local $push[[L17:.+]]=, [[FP]]
+ ; CHECK: local.get $push[[L17:.+]]=, [[FP]]
  ; CHECK: i32.const $push[[L7:.+]]=, 102
  ; CHECK-NEXT: i32.store [[static_offset]]($pop[[L17]]), $pop[[L7]]
- ; CHECK-NEXT: get_local $push[[L9:.+]]=, [[dynamic_local]]{{$}}
+ ; CHECK-NEXT: local.get $push[[L9:.+]]=, [[dynamic_local]]{{$}}
  ; CHECK-NEXT: i32.const $push[[L8:.+]]=, 103
  ; CHECK-NEXT: i32.store 0($pop[[L9]]), $pop[[L8]]
  store volatile i32 102, i32* %static
@@ -202,20 +202,20 @@
 
  ; Decrement SP in the body by the dynamic amount.
  ; CHECK: i32.sub
- ; CHECK: tee_local $push{{.+}}=, [[dynamic2_local:.+]], $pop{{.+}}
+ ; CHECK: local.tee $push{{.+}}=, [[dynamic2_local:.+]], $pop{{.+}}
  %dynamic.2 = alloca i32, i32 %alloc
 
  ; CHECK-NOT: $[[FP]]=
 
  ; Ensure neither the static nor dynamic address changes after the second
  ; modification of the stack pointer.
- ; CHECK: get_local $push[[L22:.+]]=, [[FP]]
+ ; CHECK: local.get $push[[L22:.+]]=, [[FP]]
  ; CHECK: i32.const $push[[L9:.+]]=, 104
  ; CHECK-NEXT: i32.store [[static_offset]]($pop[[L22]]), $pop[[L9]]
- ; CHECK-NEXT: get_local $push[[L23:.+]]=, [[dynamic_local]]
+ ; CHECK-NEXT: local.get $push[[L23:.+]]=, [[dynamic_local]]
  ; CHECK-NEXT: i32.const $push[[L10:.+]]=, 105
  ; CHECK-NEXT: i32.store 0($pop[[L23]]), $pop[[L10]]
- ; CHECK-NEXT: get_local $push[[L23:.+]]=, [[dynamic2_local]]
+ ; CHECK-NEXT: local.get $push[[L23:.+]]=, [[dynamic2_local]]
  ; CHECK-NEXT: i32.const $push[[L11:.+]]=, 106
  ; CHECK-NEXT: i32.store 0($pop[[L23]]), $pop[[L11]]
  store volatile i32 104, i32* %static
@@ -223,10 +223,10 @@
  store volatile i32 106, i32* %dynamic.2
 
  ; Writeback to memory.
- ; CHECK: get_local $push[[L24:.+]]=, [[FP]]{{$}}
+ ; CHECK: local.get $push[[L24:.+]]=, [[FP]]{{$}}
  ; CHECK: i32.const $push[[L18:.+]]=, 16
  ; CHECK-NEXT: i32.add $push[[L19:.+]]=, $pop[[L24]], $pop[[L18]]
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L19]]
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L19]]
  ret void
 }
 
@@ -235,17 +235,17 @@
 
 ; CHECK-LABEL: llvm_stack_builtins:
 define void @llvm_stack_builtins(i32 %alloc) noredzone {
- ; CHECK: get_global $push[[L11:.+]]=, __stack_pointer@GLOBAL{{$}}
- ; CHECK-NEXT: tee_local $push[[L10:.+]]=, {{.+}}, $pop[[L11]]
- ; CHECK-NEXT: set_local [[STACK:.+]], $pop[[L10]]
+ ; CHECK: global.get $push[[L11:.+]]=, __stack_pointer@GLOBAL{{$}}
+ ; CHECK-NEXT: local.tee $push[[L10:.+]]=, {{.+}}, $pop[[L11]]
+ ; CHECK-NEXT: local.set [[STACK:.+]], $pop[[L10]]
  %stack = call i8* @llvm.stacksave()
 
  ; Ensure we don't reassign the stacksave local
- ; CHECK-NOT: set_local [[STACK]],
+ ; CHECK-NOT: local.set [[STACK]],
  %dynamic = alloca i32, i32 %alloc
 
- ; CHECK: get_local $push[[L12:.+]]=, [[STACK]]
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L12]]
+ ; CHECK: local.get $push[[L12:.+]]=, [[STACK]]
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L12]]
  call void @llvm.stackrestore(i8* %stack)
 
  ret void
@@ -256,15 +256,15 @@
 ; moved after the stack pointer was updated for the dynamic alloca.
 ; CHECK-LABEL: dynamic_alloca_nouse:
 define void @dynamic_alloca_nouse(i32 %alloc) noredzone {
- ; CHECK: get_global $push[[L11:.+]]=, __stack_pointer@GLOBAL{{$}}
- ; CHECK-NEXT: tee_local $push[[L10:.+]]=, {{.+}}, $pop[[L11]]
- ; CHECK-NEXT: set_local [[FP:.+]], $pop[[L10]]
+ ; CHECK: global.get $push[[L11:.+]]=, __stack_pointer@GLOBAL{{$}}
+ ; CHECK-NEXT: local.tee $push[[L10:.+]]=, {{.+}}, $pop[[L11]]
+ ; CHECK-NEXT: local.set [[FP:.+]], $pop[[L10]]
  %dynamic = alloca i32, i32 %alloc
 
- ; CHECK-NOT: set_local [[FP]],
+ ; CHECK-NOT: local.set [[FP]],
 
- ; CHECK: get_local $push[[L12:.+]]=, [[FP]]
- ; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L12]]
+ ; CHECK: local.get $push[[L12:.+]]=, [[FP]]
+ ; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L12]]
  ret void
 }
 
@@ -278,12 +278,12 @@
  %addr = alloca i32
  ; CHECK: i32.const $push[[OFF:.+]]=, 12
  ; CHECK-NEXT: i32.add $push[[ADDR:.+]]=, $pop[[L3]], $pop[[OFF]]
- ; CHECK-NEXT: set_local [[COPY:.+]], $pop[[ADDR]]
+ ; CHECK-NEXT: local.set [[COPY:.+]], $pop[[ADDR]]
  br label %body
 body:
  %a = phi i32* [%addr, %entry], [%b, %body]
  store i32 1, i32* %a
- ; CHECK: get_local $push[[L12:.+]]=, [[COPY]]
+ ; CHECK: local.get $push[[L12:.+]]=, [[COPY]]
  ; CHECK: i32.store 0($pop[[L12]]),
  br i1 %cond, label %body, label %exit
 exit:
@@ -295,11 +295,11 @@
 
 ; Test __builtin_frame_address(0).
 ; CHECK-LABEL: frameaddress_0:
-; CHECK: get_global $push[[L3:.+]]=, __stack_pointer@GLOBAL{{$}}
-; CHECK-NEXT: tee_local $push[[L2:.+]]=, [[FP:.+]], $pop[[L3]]{{$}}
+; CHECK: global.get $push[[L3:.+]]=, __stack_pointer@GLOBAL{{$}}
+; CHECK-NEXT: local.tee $push[[L2:.+]]=, [[FP:.+]], $pop[[L3]]{{$}}
 ; CHECK-NEXT: call use_i8_star@FUNCTION, $pop[[L2]]
-; CHECK-NEXT: get_local $push[[L5:.+]]=, [[FP]]
-; CHECK-NEXT: set_global __stack_pointer@GLOBAL, $pop[[L5]]
+; CHECK-NEXT: local.get $push[[L5:.+]]=, [[FP]]
+; CHECK-NEXT: global.set __stack_pointer@GLOBAL, $pop[[L5]]
 define void @frameaddress_0() {
   %t = call i8* @llvm.frameaddress(i32 0)
   call void @use_i8_star(i8* %t)
@@ -320,7 +320,7 @@
 
 ; Test a stack address passed to an inline asm.
 ; CHECK-LABEL: inline_asm:
-; CHECK:       get_global {{.+}}, __stack_pointer@GLOBAL{{$}}
+; CHECK:       global.get {{.+}}, __stack_pointer@GLOBAL{{$}}
 ; CHECK:       #APP
 ; CHECK-NEXT:  # %{{[0-9]+}}{{$}}
 ; CHECK-NEXT:  #NO_APP
diff --git a/test/CodeGen/WebAssembly/varargs.ll b/test/CodeGen/WebAssembly/varargs.ll
index 0ea2976..1a73716 100644
--- a/test/CodeGen/WebAssembly/varargs.ll
+++ b/test/CodeGen/WebAssembly/varargs.ll
@@ -52,7 +52,7 @@
 ; CHECK-LABEL: arg_i8:
 ; CHECK-NEXT: .functype arg_i8 (i32) -> (i32){{$}}
 ; CHECK-NEXT: i32.load   $push[[NUM0:[0-9]+]]=, 0($0){{$}}
-; CHECK-NEXT: tee_local  $push[[NUM1:[0-9]+]]=, $1=, $pop[[NUM0]]{{$}}
+; CHECK-NEXT: local.tee  $push[[NUM1:[0-9]+]]=, $1=, $pop[[NUM0]]{{$}}
 ; CHECK-NEXT: i32.const  $push[[NUM2:[0-9]+]]=, 4{{$}}
 ; CHECK-NEXT: i32.add    $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
 ; CHECK-NEXT: i32.store  0($0), $pop[[NUM3]]{{$}}
@@ -73,7 +73,7 @@
 ; CHECK-NEXT: i32.add    $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM1]]{{$}}
 ; CHECK-NEXT: i32.const  $push[[NUM3:[0-9]+]]=, -4{{$}}
 ; CHECK-NEXT: i32.and    $push[[NUM4:[0-9]+]]=, $pop[[NUM2]], $pop[[NUM3]]{{$}}
-; CHECK-NEXT: tee_local  $push[[NUM5:[0-9]+]]=, $1=, $pop[[NUM4]]{{$}}
+; CHECK-NEXT: local.tee  $push[[NUM5:[0-9]+]]=, $1=, $pop[[NUM4]]{{$}}
 ; CHECK-NEXT: i32.const  $push[[NUM6:[0-9]+]]=, 4{{$}}
 ; CHECK-NEXT: i32.add    $push[[NUM7:[0-9]+]]=, $pop[[NUM5]], $pop[[NUM6]]{{$}}
 ; CHECK-NEXT: i32.store  0($0), $pop[[NUM7]]{{$}}
diff --git a/test/CodeGen/WinEH/wineh-statenumbering.ll b/test/CodeGen/WinEH/wineh-statenumbering.ll
index d5c330b..4f0b553 100644
--- a/test/CodeGen/WinEH/wineh-statenumbering.ll
+++ b/test/CodeGen/WinEH/wineh-statenumbering.ll
@@ -180,7 +180,7 @@
 define internal i32 @"\01?filt$0@0@required_state_store@@"() {
 entry:
   %0 = tail call i8* @llvm.frameaddress(i32 1)
-  %1 = tail call i8* @llvm.x86.seh.recoverfp(i8* bitcast (void (i1)* @required_state_store to i8*), i8* %0)
+  %1 = tail call i8* @llvm.eh.recoverfp(i8* bitcast (void (i1)* @required_state_store to i8*), i8* %0)
   %2 = tail call i8* @llvm.localrecover(i8* bitcast (void (i1)* @required_state_store to i8*), i8* %1, i32 0)
   %__exception_code = bitcast i8* %2 to i32*
   %3 = getelementptr inbounds i8, i8* %0, i32 -20
@@ -203,7 +203,7 @@
 
 declare i8* @llvm.frameaddress(i32)
 
-declare i8* @llvm.x86.seh.recoverfp(i8*, i8*)
+declare i8* @llvm.eh.recoverfp(i8*, i8*)
 
 declare i8* @llvm.localrecover(i8*, i8*, i32)
 
diff --git a/test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll b/test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll
index 31c6b53..3997cfb 100644
--- a/test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll
+++ b/test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin -relocation-model=pic --disable-fp-elim
+; RUN: llc < %s -mtriple=i686-apple-darwin -relocation-model=pic --frame-pointer=all
 
 	%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
 	%struct.__sFILEX = type opaque
diff --git a/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll b/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll
index 9fb325c..00423d6 100644
--- a/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll
+++ b/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=i386-pc-linux-gnu -relocation-model=pic -disable-fp-elim
+; RUN: llc < %s -mtriple=i386-pc-linux-gnu -relocation-model=pic -frame-pointer=all
 ; PR2134
 
 declare fastcc i8* @w_addchar(i8*, i32*, i32*, i8 signext ) nounwind 
diff --git a/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll b/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll
index 3cc3b83..fd0b3e7 100644
--- a/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll
+++ b/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic -disable-fp-elim | grep add | grep 12 | not grep non_lazy_ptr
+; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic -frame-pointer=all | grep add | grep 12 | not grep non_lazy_ptr
 ; Don't fold re-materialized load into a two address instruction
 
 	%"struct.Smarts::Runnable" = type { i32 (...)**, i32 }
diff --git a/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll b/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll
index b526591..175f1d9 100644
--- a/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll
+++ b/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll
@@ -1,5 +1,5 @@
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -relocation-model=pic -disable-fp-elim -O0 -regalloc=fast
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -relocation-model=pic -frame-pointer=all -O0 -regalloc=fast
 ; PR5534
 
 	%struct.CGPoint = type { double, double }
diff --git a/test/CodeGen/X86/2008-09-29-ReMatBug.ll b/test/CodeGen/X86/2008-09-29-ReMatBug.ll
index cc481a0..90f3ffd 100644
--- a/test/CodeGen/X86/2008-09-29-ReMatBug.ll
+++ b/test/CodeGen/X86/2008-09-29-ReMatBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic -disable-fp-elim
+; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic -frame-pointer=all
 
 	%struct..0objc_selector = type opaque
 	%struct.NSString = type opaque
diff --git a/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll b/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll
index 1d03a1b..2d19803 100644
--- a/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll
+++ b/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll
@@ -1,5 +1,5 @@
 ; REQUIRES: asserts
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -relocation-model=pic -disable-fp-elim -stats 2>&1 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -relocation-model=pic -frame-pointer=all -stats 2>&1 | FileCheck %s
 ; XFAIL: *
 ; 69408 removed the opportunity for this optimization to work
 
diff --git a/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll b/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll
index c291fed..80d4b0b 100644
--- a/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll
+++ b/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -disable-fp-elim -relocation-model=pic
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -frame-pointer=all -relocation-model=pic
 ; PR4099
 
 	%0 = type { [62 x %struct.Bitvec*] }		; type %0
diff --git a/test/CodeGen/X86/2009-06-02-RewriterBug.ll b/test/CodeGen/X86/2009-06-02-RewriterBug.ll
index 6ce7af6..f085338 100644
--- a/test/CodeGen/X86/2009-06-02-RewriterBug.ll
+++ b/test/CodeGen/X86/2009-06-02-RewriterBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-undermydesk-freebsd8.0 -relocation-model=pic -disable-fp-elim
+; RUN: llc < %s -mtriple=x86_64-undermydesk-freebsd8.0 -relocation-model=pic -frame-pointer=all
 ; PR4225
 
 define void @sha256_block1(i32* nocapture %arr, i8* nocapture %in, i64 %num) nounwind {
diff --git a/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll b/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll
index aa88576..7d1a300 100644
--- a/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll
+++ b/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -relocation-model=pic -frame-pointer=all | FileCheck %s
 
 ; It's not legal to fold a load from 32-bit stack slot into a 64-bit
 ; instruction. If done, the instruction does a 64-bit load and that's not
diff --git a/test/CodeGen/X86/2009-10-19-EmergencySpill.ll b/test/CodeGen/X86/2009-10-19-EmergencySpill.ll
index ec73f5a..2b0ecc3 100644
--- a/test/CodeGen/X86/2009-10-19-EmergencySpill.ll
+++ b/test/CodeGen/X86/2009-10-19-EmergencySpill.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -disable-fp-elim
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -frame-pointer=all
 ; rdar://7291624
 
 %union.RtreeCoord = type { float }
diff --git a/test/CodeGen/X86/2009-10-25-RewriterBug.ll b/test/CodeGen/X86/2009-10-25-RewriterBug.ll
index be18186..ad78f96 100644
--- a/test/CodeGen/X86/2009-10-25-RewriterBug.ll
+++ b/test/CodeGen/X86/2009-10-25-RewriterBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -relocation-model=pic -disable-fp-elim
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -relocation-model=pic -frame-pointer=all
 
 %struct.DecRefPicMarking_t = type { i32, i32, i32, i32, i32, %struct.DecRefPicMarking_t* }
 %struct.FrameStore = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.StorablePicture*, %struct.StorablePicture*, %struct.StorablePicture* }
diff --git a/test/CodeGen/X86/2009-11-13-VirtRegRewriterBug.ll b/test/CodeGen/X86/2009-11-13-VirtRegRewriterBug.ll
index 5398eef..3608e09 100644
--- a/test/CodeGen/X86/2009-11-13-VirtRegRewriterBug.ll
+++ b/test/CodeGen/X86/2009-11-13-VirtRegRewriterBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic -disable-fp-elim
+; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic -frame-pointer=all
 ; rdar://7394770
 
 %struct.JVTLib_100487 = type <{ i8 }>
diff --git a/test/CodeGen/X86/2010-01-19-OptExtBug.ll b/test/CodeGen/X86/2010-01-19-OptExtBug.ll
index def8dd3..a3e3d3d 100644
--- a/test/CodeGen/X86/2010-01-19-OptExtBug.ll
+++ b/test/CodeGen/X86/2010-01-19-OptExtBug.ll
@@ -1,5 +1,5 @@
 ; REQUIRES: asserts
-; RUN: llc < %s -mtriple=x86_64-apple-darwin11 -relocation-model=pic -disable-fp-elim -stats 2>&1 | not grep ext-opt
+; RUN: llc < %s -mtriple=x86_64-apple-darwin11 -relocation-model=pic -frame-pointer=all -stats 2>&1 | not grep ext-opt
 
 define fastcc i8* @S_scan_str(i8* %start, i32 %keep_quoted, i32 %keep_delims) nounwind ssp {
 entry:
diff --git a/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll b/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll
index 41c318b..1f2fe32 100644
--- a/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll
+++ b/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O3 -relocation-model=pic -disable-fp-elim -mcpu=nocona
+; RUN: llc < %s -O3 -relocation-model=pic -frame-pointer=all -mcpu=nocona
 ;
 ; This test case is reduced from Bullet. It crashes SSEDomainFix.
 ;
diff --git a/test/CodeGen/X86/2010-04-29-CoalescerCrash.ll b/test/CodeGen/X86/2010-04-29-CoalescerCrash.ll
index a22f38a..7b34489 100644
--- a/test/CodeGen/X86/2010-04-29-CoalescerCrash.ll
+++ b/test/CodeGen/X86/2010-04-29-CoalescerCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -relocation-model=pic -disable-fp-elim -verify-machineinstrs
+; RUN: llc < %s -relocation-model=pic -frame-pointer=all -verify-machineinstrs
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-unknown-linux-gnu"
 
diff --git a/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll b/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
index 24abb71..615a572 100644
--- a/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
+++ b/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -regalloc=fast -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -O0 -regalloc=fast -relocation-model=pic -frame-pointer=all | FileCheck %s
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
 target triple = "i386-apple-darwin10.0.0"
 
diff --git a/test/CodeGen/X86/2010-06-09-FastAllocRegisters.ll b/test/CodeGen/X86/2010-06-09-FastAllocRegisters.ll
index 7c7792a..1557fcb 100644
--- a/test/CodeGen/X86/2010-06-09-FastAllocRegisters.ll
+++ b/test/CodeGen/X86/2010-06-09-FastAllocRegisters.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -disable-fp-elim -relocation-model=pic
+; RUN: llc < %s -O0 -frame-pointer=all -relocation-model=pic
 ; PR7313
 ;
 ; The inline asm in this function clobbers almost all allocatable registers.
diff --git a/test/CodeGen/X86/2010-06-24-g-constraint-crash.ll b/test/CodeGen/X86/2010-06-24-g-constraint-crash.ll
index 905b34f..91e6d18 100644
--- a/test/CodeGen/X86/2010-06-24-g-constraint-crash.ll
+++ b/test/CodeGen/X86/2010-06-24-g-constraint-crash.ll
@@ -1,4 +1,4 @@
-; RUN: llc %s -mtriple=x86_64-apple-darwin10 -disable-fp-elim -o /dev/null
+; RUN: llc %s -mtriple=x86_64-apple-darwin10 -frame-pointer=all -o /dev/null
 ; Formerly crashed, rdar://8015842
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll b/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
index dd7c3fa..19e3d71 100644
--- a/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
+++ b/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O1 -mtriple=x86_64-unknown-linux-gnu -mcpu=core2 -relocation-model=pic -disable-fp-elim < %s | FileCheck %s
+; RUN: llc -O1 -mtriple=x86_64-unknown-linux-gnu -mcpu=core2 -relocation-model=pic -frame-pointer=all < %s | FileCheck %s
 ; <rdar://problem/8124405>
 
 %struct.type = type { %struct.subtype*, i32, i8, i32, i8, i32, i32, i32, i32, i32, i8, i32, i32, i32, i32, i32, [256 x i32], i32, [257 x i32], [257 x i32], i32*, i16*, i8*, i32, i32, i32, i32, i32, [256 x i8], [16 x i8], [256 x i8], [4096 x i8], [16 x i32], [18002 x i8], [18002 x i8], [6 x [258 x i8]], [6 x [258 x i32]], [6 x [258 x i32]], [6 x [258 x i32]], [6 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32*, i32*, i32* }
diff --git a/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll b/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
index 8f5f083..44f6c73 100644
--- a/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
+++ b/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -disable-fp-elim -mtriple=i686-pc-mingw32 -no-integrated-as
+; RUN: llc < %s -frame-pointer=all -mtriple=i686-pc-mingw32 -no-integrated-as
 
 %struct.__SEH2Frame = type {}
 
diff --git a/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll b/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll
index 1285d20..c438335 100644
--- a/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll
+++ b/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll
@@ -1,5 +1,5 @@
 ; REQUIRES: asserts
-; RUN: llc < %s -O0 -disable-fp-elim -relocation-model=pic -stats 2>&1 | FileCheck %s
+; RUN: llc < %s -O0 -frame-pointer=all -relocation-model=pic -stats 2>&1 | FileCheck %s
 ;
 ; This test should not cause any spilling with RAFast.
 ;
diff --git a/test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll b/test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll
index f1b1a70..93aabb9 100644
--- a/test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll
+++ b/test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=x86_64-- < %s -disable-fp-elim | FileCheck %s
+; RUN: llc -mtriple=x86_64-- < %s -frame-pointer=all | FileCheck %s
 
 ; This test is checking that we don't crash and we don't incorrectly fold
 ; a large displacement and a frame index into a single lea.
diff --git a/test/CodeGen/X86/2011-12-28-vselecti8.ll b/test/CodeGen/X86/2011-12-28-vselecti8.ll
index c916466..d564e9f 100644
--- a/test/CodeGen/X86/2011-12-28-vselecti8.ll
+++ b/test/CodeGen/X86/2011-12-28-vselecti8.ll
@@ -1,9 +1,11 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin  -mcpu=corei7 | FileCheck %s
 ; ModuleID = '<stdin>'
+
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-darwin11.2.0"
 
-; During legalization, the vselect mask is 'type legalized' into a 
+; During legalization, the vselect mask is 'type legalized' into a
 ; wider BUILD_VECTOR. This causes the introduction of a new
 ; sign_extend_inreg in the DAG.
 ;
@@ -13,12 +15,16 @@
 ; Make sure that the sign_extend_inreg is simplified and that we
 ; don't generate psll, psraw and pblendvb from the vselect.
 
-; CHECK-LABEL: foo8
-; CHECK-NOT: psll
-; CHECK-NOT: psraw
-; CHECK-NOT: pblendvb
-; CHECK: ret
 define void @foo8(float* nocapture %RET) nounwind {
+; CHECK-LABEL: foo8:
+; CHECK:       ## %bb.0: ## %allocas
+; CHECK-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; CHECK-NEXT:    pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; CHECK-NEXT:    cvtdq2ps %xmm0, %xmm0
+; CHECK-NEXT:    cvtdq2ps %xmm1, %xmm1
+; CHECK-NEXT:    movups %xmm1, 16(%rdi)
+; CHECK-NEXT:    movups %xmm0, (%rdi)
+; CHECK-NEXT:    retq
 allocas:
   %resultvec.i = select <8 x i1> <i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <8 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, <8 x i8> <i8 100, i8 100, i8 100, i8 100, i8 100, i8 100, i8 100, i8 100>
   %uint2float = uitofp <8 x i8> %resultvec.i to <8 x float>
diff --git a/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll b/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
index 97a3389..8f5d46c 100644
--- a/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
+++ b/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -disable-fp-elim
+; RUN: llc < %s -frame-pointer=all
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
 target triple = "i386-apple-macosx10.7"
 
diff --git a/test/CodeGen/X86/GlobalISel/x86_64-legalize-sitofp.mir b/test/CodeGen/X86/GlobalISel/x86_64-legalize-sitofp.mir
index a8be320..499532d 100644
--- a/test/CodeGen/X86/GlobalISel/x86_64-legalize-sitofp.mir
+++ b/test/CodeGen/X86/GlobalISel/x86_64-legalize-sitofp.mir
@@ -69,7 +69,7 @@
   !llvm.ident = !{!1}
 
   !0 = !{i32 1, !"wchar_size", i32 4}
-  !1 = !{!"clang version 7.0.0 (http://llvm.org/git/clang.git a05f37359b23be7c068e19968c8f106edf6f2b34) (http://llvm.org/git/llvm.git d693de1fee74d455e20f96006aac50317ca1da6b)"}
+  !1 = !{!"clang version 7.0.0"}
 
 ...
 ---
diff --git a/test/CodeGen/X86/GlobalISel/x86_64-select-sitofp.mir b/test/CodeGen/X86/GlobalISel/x86_64-select-sitofp.mir
index 896c2f4..eeb976c 100644
--- a/test/CodeGen/X86/GlobalISel/x86_64-select-sitofp.mir
+++ b/test/CodeGen/X86/GlobalISel/x86_64-select-sitofp.mir
@@ -41,7 +41,7 @@
   !llvm.ident = !{!1}
 
   !0 = !{i32 1, !"wchar_size", i32 4}
-  !1 = !{!"clang version 7.0.0 (http://llvm.org/git/clang.git a05f37359b23be7c068e19968c8f106edf6f2b34) (http://llvm.org/git/llvm.git d693de1fee74d455e20f96006aac50317ca1da6b)"}
+  !1 = !{!"clang version 7.0.0"}
 
 ...
 ---
diff --git a/test/CodeGen/X86/addr-of-ret-addr.ll b/test/CodeGen/X86/addr-of-ret-addr.ll
index 67ebb7f..8483689 100644
--- a/test/CodeGen/X86/addr-of-ret-addr.ll
+++ b/test/CodeGen/X86/addr-of-ret-addr.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -disable-fp-elim -mtriple=i686-- | FileCheck %s --check-prefix=CHECK-X86
-; RUN: llc < %s -disable-fp-elim -mtriple=x86_64-- | FileCheck %s --check-prefix=CHECK-X64
+; RUN: llc < %s -frame-pointer=all -mtriple=i686-- | FileCheck %s --check-prefix=CHECK-X86
+; RUN: llc < %s -frame-pointer=all -mtriple=x86_64-- | FileCheck %s --check-prefix=CHECK-X64
 
 define i8* @f() nounwind readnone optsize {
 entry:
diff --git a/test/CodeGen/X86/and-load-fold.ll b/test/CodeGen/X86/and-load-fold.ll
index 4c49df1..367ef2a 100644
--- a/test/CodeGen/X86/and-load-fold.ll
+++ b/test/CodeGen/X86/and-load-fold.ll
@@ -7,10 +7,8 @@
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; CHECK-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; CHECK-NEXT:    pand {{.*}}(%rip), %xmm0
-; CHECK-NEXT:    pextrw $4, %xmm0, %eax
+; CHECK-NEXT:    pextrw $1, %xmm0, %eax
+; CHECK-NEXT:    andb $95, %al
 ; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    retq
   %Vp = bitcast <4 x i8>* %V to <3 x i8>*
diff --git a/test/CodeGen/X86/anyregcc.ll b/test/CodeGen/X86/anyregcc.ll
index b75774a..7cf6d71 100644
--- a/test/CodeGen/X86/anyregcc.ll
+++ b/test/CodeGen/X86/anyregcc.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin                  -disable-fp-elim | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7     -disable-fp-elim | FileCheck --check-prefix=SSE %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -disable-fp-elim | FileCheck --check-prefix=AVX %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin                  -frame-pointer=all | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7     -frame-pointer=all | FileCheck --check-prefix=SSE %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -frame-pointer=all | FileCheck --check-prefix=AVX %s
 
 
 ; Stackmap Header: no constants - 6 callsites
diff --git a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
index 84b3b00..1e6d0e8 100644
--- a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
@@ -1956,12 +1956,9 @@
 define <4 x i64> @test_mm256_set1_epi64x(i64 %a0) nounwind {
 ; X86-LABEL: test_mm256_set1_epi64x:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    vmovd %ecx, %xmm0
-; X86-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
-; X86-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
-; X86-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
+; X86-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; X86-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; X86-NEXT:    retl
 ;
diff --git a/test/CodeGen/X86/avx-vbroadcast.ll b/test/CodeGen/X86/avx-vbroadcast.ll
index b136c72..1572386 100644
--- a/test/CodeGen/X86/avx-vbroadcast.ll
+++ b/test/CodeGen/X86/avx-vbroadcast.ll
@@ -6,12 +6,8 @@
 ; X32-LABEL: A:
 ; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    movl 4(%eax), %eax
-; X32-NEXT:    vmovd %ecx, %xmm0
-; X32-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
-; X32-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
-; X32-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
+; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; X32-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
@@ -31,17 +27,19 @@
 define <4 x i64> @A2(i64* %ptr, i64* %ptr2) nounwind uwtable readnone ssp {
 ; X32-LABEL: A2:
 ; X32:       ## %bb.0: ## %entry
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    .cfi_offset %esi, -8
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl (%ecx), %edx
-; X32-NEXT:    movl 4(%ecx), %ecx
-; X32-NEXT:    movl %ecx, 4(%eax)
+; X32-NEXT:    movl 4(%ecx), %esi
+; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    movl %edx, (%eax)
-; X32-NEXT:    vmovd %edx, %xmm0
-; X32-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
-; X32-NEXT:    vpinsrd $2, %edx, %xmm0, %xmm0
-; X32-NEXT:    vpinsrd $3, %ecx, %xmm0, %xmm0
+; X32-NEXT:    movl %esi, 4(%eax)
+; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; X32-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-NEXT:    popl %esi
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: A2:
@@ -592,12 +590,8 @@
 ; X32-LABEL: G:
 ; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    movl 4(%eax), %eax
-; X32-NEXT:    vmovd %ecx, %xmm0
-; X32-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
-; X32-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
-; X32-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
+; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: G:
@@ -615,16 +609,18 @@
 define <2 x i64> @G2(i64* %ptr, i64* %ptr2) nounwind uwtable readnone ssp {
 ; X32-LABEL: G2:
 ; X32:       ## %bb.0: ## %entry
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    .cfi_def_cfa_offset 8
+; X32-NEXT:    .cfi_offset %esi, -8
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl (%ecx), %edx
-; X32-NEXT:    movl 4(%ecx), %ecx
-; X32-NEXT:    movl %ecx, 4(%eax)
+; X32-NEXT:    movl 4(%ecx), %esi
+; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    movl %edx, (%eax)
-; X32-NEXT:    vmovd %edx, %xmm0
-; X32-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
-; X32-NEXT:    vpinsrd $2, %edx, %xmm0, %xmm0
-; X32-NEXT:    vpinsrd $3, %ecx, %xmm0, %xmm0
+; X32-NEXT:    movl %esi, 4(%eax)
+; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; X32-NEXT:    popl %esi
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: G2:
@@ -879,11 +875,11 @@
 ; X32-NEXT:    movl %esi, (%esp)
 ; X32-NEXT:    calll _gfunc
 ; X32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## 4-byte Spill
+; X32-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
 ; X32-NEXT:    movl %esi, (%esp)
 ; X32-NEXT:    calll _gfunc
 ; X32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    vsubss {{[0-9]+}}(%esp), %xmm0, %xmm0 ## 4-byte Folded Reload
+; X32-NEXT:    vsubss {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 ## 4-byte Folded Reload
 ; X32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp)
 ; X32-NEXT:    flds {{[0-9]+}}(%esp)
 ; X32-NEXT:    addl $40, %esp
@@ -896,11 +892,11 @@
 ; X64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdi
 ; X64-NEXT:    callq _gfunc
 ; X64-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X64-NEXT:    vmovss %xmm0, {{[0-9]+}}(%rsp) ## 4-byte Spill
+; X64-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
 ; X64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdi
 ; X64-NEXT:    callq _gfunc
 ; X64-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X64-NEXT:    vsubss {{[0-9]+}}(%rsp), %xmm0, %xmm0 ## 4-byte Folded Reload
+; X64-NEXT:    vsubss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 ## 4-byte Folded Reload
 ; X64-NEXT:    addq $40, %rsp
 ; X64-NEXT:    retq
   %1 = alloca <4 x float>, align 16
diff --git a/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
index e0c7a5e..d230ffe 100644
--- a/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
@@ -98,11 +98,11 @@
 ; CHECK-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <4 x i64> %a0 to <32 x i8>
   %arg1 = bitcast <4 x i64> %a1 to <32 x i8>
-  %res = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %arg0, <32 x i8> %arg1)
+  %res = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %arg0, <32 x i8> %arg1)
   %bc = bitcast <32 x i8> %res to <4 x i64>
   ret <4 x i64> %bc
 }
-declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <4 x i64> @test_mm256_adds_epi16(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: test_mm256_adds_epi16:
@@ -111,11 +111,11 @@
 ; CHECK-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <4 x i64> %a0 to <16 x i16>
   %arg1 = bitcast <4 x i64> %a1 to <16 x i16>
-  %res = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %arg0, <16 x i16> %arg1)
+  %res = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %arg0, <16 x i16> %arg1)
   %bc = bitcast <16 x i16> %res to <4 x i64>
   ret <4 x i64> %bc
 }
-declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
 
 define <4 x i64> @test_mm256_adds_epu8(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: test_mm256_adds_epu8:
@@ -124,13 +124,11 @@
 ; CHECK-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <4 x i64> %a0 to <32 x i8>
   %arg1 = bitcast <4 x i64> %a1 to <32 x i8>
-  %1 = add <32 x i8> %arg0, %arg1
-  %2 = icmp ugt <32 x i8> %arg0, %1
-  %3 = select <32 x i1> %2, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %1
-  %bc = bitcast <32 x i8> %3 to <4 x i64>
+  %res = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %arg0, <32 x i8> %arg1)
+  %bc = bitcast <32 x i8> %res to <4 x i64>
   ret <4 x i64> %bc
 }
-
+declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>)
 
 define <4 x i64> @test_mm256_adds_epu16(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: test_mm256_adds_epu16:
@@ -139,12 +137,11 @@
 ; CHECK-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <4 x i64> %a0 to <16 x i16>
   %arg1 = bitcast <4 x i64> %a1 to <16 x i16>
-  %1 = add <16 x i16> %arg0, %arg1
-  %2 = icmp ugt <16 x i16> %arg0, %1
-  %3 = select <16 x i1> %2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %1
-  %bc = bitcast <16 x i16> %3 to <4 x i64>
+  %res = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %arg0, <16 x i16> %arg1)
+  %bc = bitcast <16 x i16> %res to <4 x i64>
   ret <4 x i64> %bc
 }
+declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>)
 
 define <4 x i64> @test_mm256_alignr_epi8(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: test_mm256_alignr_epi8:
@@ -1826,12 +1823,6 @@
 define <4 x i64> @test_mm256_mul_epi32(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: test_mm256_mul_epi32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpsllq $32, %ymm0, %ymm2
-; CHECK-NEXT:    vpsrad $31, %ymm2, %ymm2
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
-; CHECK-NEXT:    vpsllq $32, %ymm1, %ymm2
-; CHECK-NEXT:    vpsrad $31, %ymm2, %ymm2
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
 ; CHECK-NEXT:    vpmuldq %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %A = shl <4 x i64> %a0, <i64 32, i64 32, i64 32, i64 32>
@@ -1846,9 +1837,6 @@
 define <4 x i64> @test_mm256_mul_epu32(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: test_mm256_mul_epu32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
 ; CHECK-NEXT:    vpmuludq %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %A = and <4 x i64> %a0, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
@@ -2530,11 +2518,11 @@
 ; CHECK-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <4 x i64> %a0 to <32 x i8>
   %arg1 = bitcast <4 x i64> %a1 to <32 x i8>
-  %res = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %arg0, <32 x i8> %arg1)
+  %res = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %arg0, <32 x i8> %arg1)
   %bc = bitcast <32 x i8> %res to <4 x i64>
   ret <4 x i64> %bc
 }
-declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <4 x i64> @test_mm256_subs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: test_mm256_subs_epi16:
@@ -2543,42 +2531,37 @@
 ; CHECK-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <4 x i64> %a0 to <16 x i16>
   %arg1 = bitcast <4 x i64> %a1 to <16 x i16>
-  %res = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %arg0, <16 x i16> %arg1)
+  %res = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %arg0, <16 x i16> %arg1)
   %bc = bitcast <16 x i16> %res to <4 x i64>
   ret <4 x i64> %bc
 }
-declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
 
 define <4 x i64> @test_mm256_subs_epu8(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: test_mm256_subs_epu8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
-; CHECK-NEXT:    vpsubb %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vpsubusb %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <4 x i64> %a0 to <32 x i8>
   %arg1 = bitcast <4 x i64> %a1 to <32 x i8>
-  %cmp = icmp ugt <32 x i8> %arg0, %arg1
-  %sel = select <32 x i1> %cmp, <32 x i8> %arg0, <32 x i8> %arg1
-  %sub = sub <32 x i8> %sel, %arg1
-  %bc = bitcast <32 x i8> %sub to <4 x i64>
+  %res = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %arg0, <32 x i8> %arg1)
+  %bc = bitcast <32 x i8> %res to <4 x i64>
   ret <4 x i64> %bc
 }
-
+declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>)
 
 define <4 x i64> @test_mm256_subs_epu16(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: test_mm256_subs_epu16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0
-; CHECK-NEXT:    vpsubw %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vpsubusw %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <4 x i64> %a0 to <16 x i16>
   %arg1 = bitcast <4 x i64> %a1 to <16 x i16>
-  %cmp = icmp ugt <16 x i16> %arg0, %arg1
-  %sel = select <16 x i1> %cmp, <16 x i16> %arg0, <16 x i16> %arg1
-  %sub = sub <16 x i16> %sel, %arg1
-  %bc = bitcast <16 x i16> %sub to <4 x i64>
+  %res = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %arg0, <16 x i16> %arg1)
+  %bc = bitcast <16 x i16> %res to <4 x i64>
   ret <4 x i64> %bc
 }
+declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>)
 
 define <4 x i64> @test_mm256_unpackhi_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
 ; CHECK-LABEL: test_mm256_unpackhi_epi8:
diff --git a/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
index 81e10a5..25d7d08 100644
--- a/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
+++ b/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
@@ -614,6 +614,28 @@
 declare <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32>, <8 x i32>) nounwind readnone
 
 
+define <32 x i8> @test_x86_avx2_padds_b(<32 x i8> %a0, <32 x i8> %a1) {
+; CHECK-LABEL: test_x86_avx2_padds_b:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpaddsb %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
+  %res = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
+  ret <32 x i8> %res
+}
+declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
+
+
+define <16 x i16> @test_x86_avx2_padds_w(<16 x i16> %a0, <16 x i16> %a1) {
+; CHECK-LABEL: test_x86_avx2_padds_w:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpaddsw %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
+  %res = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
+  ret <16 x i16> %res
+}
+declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
+
+
 define <32 x i8> @test_x86_avx2_paddus_b(<32 x i8> %a0, <32 x i8> %a1) {
 ; CHECK-LABEL: test_x86_avx2_paddus_b:
 ; CHECK:       ## %bb.0:
@@ -636,6 +658,28 @@
 declare <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16>, <16 x i16>) nounwind readnone
 
 
+define <32 x i8> @test_x86_avx2_psubs_b(<32 x i8> %a0, <32 x i8> %a1) {
+; CHECK-LABEL: test_x86_avx2_psubs_b:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpsubsb %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
+  %res = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
+  ret <32 x i8> %res
+}
+declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
+
+
+define <16 x i16> @test_x86_avx2_psubs_w(<16 x i16> %a0, <16 x i16> %a1) {
+; CHECK-LABEL: test_x86_avx2_psubs_w:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpsubsw %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
+  %res = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
+  ret <16 x i16> %res
+}
+declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
+
+
 define <32 x i8> @test_x86_avx2_psubus_b(<32 x i8> %a0, <32 x i8> %a1) {
 ; CHECK-LABEL: test_x86_avx2_psubus_b:
 ; CHECK:       ## %bb.0:
diff --git a/test/CodeGen/X86/avx2-intrinsics-x86.ll b/test/CodeGen/X86/avx2-intrinsics-x86.ll
index 101448e..617e198 100644
--- a/test/CodeGen/X86/avx2-intrinsics-x86.ll
+++ b/test/CodeGen/X86/avx2-intrinsics-x86.ll
@@ -1,19 +1,19 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin -mattr=avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2 --check-prefix=X86 --check-prefix=X86-AVX
-; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL --check-prefix=X86 --check-prefix=X86-AVX512VL
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2 --check-prefix=X64 --check-prefix=X64-AVX
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL --check-prefix=X64 --check-prefix=X64-AVX512VL
+; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2 --check-prefix=X86 --check-prefix=X86-AVX
+; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL --check-prefix=X86 --check-prefix=X86-AVX512VL
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2 --check-prefix=X64 --check-prefix=X64-AVX
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL --check-prefix=X64 --check-prefix=X64-AVX512VL
 
 define <16 x i16> @test_x86_avx2_packssdw(<8 x i32> %a0, <8 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_packssdw:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x6b,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x6b,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_packssdw:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -22,32 +22,32 @@
 
 define <16 x i16> @test_x86_avx2_packssdw_fold() {
 ; X86-AVX-LABEL: test_x86_avx2_packssdw_fold:
-; X86-AVX:       ## %bb.0:
+; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
-; X86-AVX-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI1_0, kind: FK_Data_4
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
 ;
 ; X86-AVX512VL-LABEL: test_x86_avx2_packssdw_fold:
-; X86-AVX512VL:       ## %bb.0:
-; X86-AVX512VL-NEXT:    vmovaps LCPI1_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
-; X86-AVX512VL-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT:    ## fixup A - offset: 4, value: LCPI1_0, kind: FK_Data_4
-; X86-AVX512VL-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
+; X86-AVX512VL-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: test_x86_avx2_packssdw_fold:
-; X64-AVX:       ## %bb.0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
-; X64-AVX-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI1_0-4, kind: reloc_riprel_4byte
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_avx2_packssdw_fold:
-; X64-AVX512VL:       ## %bb.0:
-; X64-AVX512VL-NEXT:    vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
-; X64-AVX512VL-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT:    ## fixup A - offset: 4, value: LCPI1_0-4, kind: reloc_riprel_4byte
-; X64-AVX512VL-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
+; X64-AVX512VL-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>)
   ret <16 x i16> %res
 }
@@ -55,14 +55,14 @@
 
 define <32 x i8> @test_x86_avx2_packsswb(<16 x i16> %a0, <16 x i16> %a1) {
 ; AVX2-LABEL: test_x86_avx2_packsswb:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpacksswb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x63,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpacksswb %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x63,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_packsswb:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpacksswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpacksswb %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1) ; <<32 x i8>> [#uses=1]
   ret <32 x i8> %res
 }
@@ -71,32 +71,32 @@
 
 define <32 x i8> @test_x86_avx2_packsswb_fold() {
 ; X86-AVX-LABEL: test_x86_avx2_packsswb_fold:
-; X86-AVX:       ## %bb.0:
+; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
-; X86-AVX-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI3_0, kind: FK_Data_4
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
 ;
 ; X86-AVX512VL-LABEL: test_x86_avx2_packsswb_fold:
-; X86-AVX512VL:       ## %bb.0:
-; X86-AVX512VL-NEXT:    vmovaps LCPI3_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
-; X86-AVX512VL-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT:    ## fixup A - offset: 4, value: LCPI3_0, kind: FK_Data_4
-; X86-AVX512VL-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
+; X86-AVX512VL-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: test_x86_avx2_packsswb_fold:
-; X64-AVX:       ## %bb.0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
-; X64-AVX-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_avx2_packsswb_fold:
-; X64-AVX512VL:       ## %bb.0:
-; X64-AVX512VL-NEXT:    vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
-; X64-AVX512VL-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT:    ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
-; X64-AVX512VL-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
+; X64-AVX512VL-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %res = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer)
   ret <32 x i8> %res
 }
@@ -104,14 +104,14 @@
 
 define <32 x i8> @test_x86_avx2_packuswb(<16 x i16> %a0, <16 x i16> %a1) {
 ; AVX2-LABEL: test_x86_avx2_packuswb:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpackuswb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x67,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpackuswb %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x67,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_packuswb:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpackuswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpackuswb %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a0, <16 x i16> %a1) ; <<32 x i8>> [#uses=1]
   ret <32 x i8> %res
 }
@@ -120,79 +120,47 @@
 
 define <32 x i8> @test_x86_avx2_packuswb_fold() {
 ; X86-AVX-LABEL: test_x86_avx2_packuswb_fold:
-; X86-AVX:       ## %bb.0:
+; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; X86-AVX-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
 ;
 ; X86-AVX512VL-LABEL: test_x86_avx2_packuswb_fold:
-; X86-AVX512VL:       ## %bb.0:
-; X86-AVX512VL-NEXT:    vmovaps LCPI5_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; X86-AVX512VL-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT:    ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4
-; X86-AVX512VL-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; X86-AVX512VL-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: test_x86_avx2_packuswb_fold:
-; X64-AVX:       ## %bb.0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; X64-AVX-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI5_0-4, kind: reloc_riprel_4byte
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_avx2_packuswb_fold:
-; X64-AVX512VL:       ## %bb.0:
-; X64-AVX512VL-NEXT:    vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; X64-AVX512VL-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT:    ## fixup A - offset: 4, value: LCPI5_0-4, kind: reloc_riprel_4byte
-; X64-AVX512VL-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; X64-AVX512VL-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %res = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer)
   ret <32 x i8> %res
 }
 
 
-define <32 x i8> @test_x86_avx2_padds_b(<32 x i8> %a0, <32 x i8> %a1) {
-; AVX2-LABEL: test_x86_avx2_padds_b:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpaddsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xec,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx2_padds_b:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpaddsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xec,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-  %res = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
-  ret <32 x i8> %res
-}
-declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) nounwind readnone
-
-
-define <16 x i16> @test_x86_avx2_padds_w(<16 x i16> %a0, <16 x i16> %a1) {
-; AVX2-LABEL: test_x86_avx2_padds_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpaddsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xed,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx2_padds_w:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpaddsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xed,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-  %res = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
-  ret <16 x i16> %res
-}
-declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) nounwind readnone
-
-
 define <8 x i32> @test_x86_avx2_pmadd_wd(<16 x i16> %a0, <16 x i16> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pmadd_wd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaddwd %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf5,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmaddwd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xf5,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pmadd_wd:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpmaddwd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf5,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmaddwd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf5,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -201,14 +169,14 @@
 
 define <16 x i16> @test_x86_avx2_pmaxs_w(<16 x i16> %a0, <16 x i16> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pmaxs_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xee,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xee,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pmaxs_w:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xee,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xee,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -217,14 +185,14 @@
 
 define <32 x i8> @test_x86_avx2_pmaxu_b(<32 x i8> %a0, <32 x i8> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pmaxu_b:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xde,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xde,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pmaxu_b:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xde,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xde,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
   ret <32 x i8> %res
 }
@@ -233,14 +201,14 @@
 
 define <16 x i16> @test_x86_avx2_pmins_w(<16 x i16> %a0, <16 x i16> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pmins_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xea,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xea,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pmins_w:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpminsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xea,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpminsw %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xea,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -249,14 +217,14 @@
 
 define <32 x i8> @test_x86_avx2_pminu_b(<32 x i8> %a0, <32 x i8> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pminu_b:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xda,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xda,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pminu_b:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpminub %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xda,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpminub %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xda,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
   ret <32 x i8> %res
 }
@@ -265,10 +233,10 @@
 
 define i32 @test_x86_avx2_pmovmskb(<32 x i8> %a0) {
 ; CHECK-LABEL: test_x86_avx2_pmovmskb:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpmovmskb %ymm0, %eax ## encoding: [0xc5,0xfd,0xd7,0xc0]
-; CHECK-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpmovmskb %ymm0, %eax # encoding: [0xc5,0xfd,0xd7,0xc0]
+; CHECK-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call i32 @llvm.x86.avx2.pmovmskb(<32 x i8> %a0) ; <i32> [#uses=1]
   ret i32 %res
 }
@@ -277,14 +245,14 @@
 
 define <16 x i16> @test_x86_avx2_pmulh_w(<16 x i16> %a0, <16 x i16> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pmulh_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmulhw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe5,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmulhw %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xe5,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pmulh_w:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpmulhw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe5,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmulhw %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe5,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -293,14 +261,14 @@
 
 define <16 x i16> @test_x86_avx2_pmulhu_w(<16 x i16> %a0, <16 x i16> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pmulhu_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmulhuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe4,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmulhuw %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xe4,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pmulhu_w:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpmulhuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe4,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmulhuw %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe4,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -309,14 +277,14 @@
 
 define <4 x i64> @test_x86_avx2_psad_bw(<32 x i8> %a0, <32 x i8> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psad_bw:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf6,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xf6,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psad_bw:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf6,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf6,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> %a0, <32 x i8> %a1) ; <<4 x i64>> [#uses=1]
   ret <4 x i64> %res
 }
@@ -325,14 +293,14 @@
 
 define <8 x i32> @test_x86_avx2_psll_d(<8 x i32> %a0, <4 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psll_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpslld %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf2,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpslld %xmm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xf2,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psll_d:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpslld %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf2,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpslld %xmm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf2,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -341,14 +309,14 @@
 
 define <4 x i64> @test_x86_avx2_psll_q(<4 x i64> %a0, <2 x i64> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psll_q:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsllq %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf3,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllq %xmm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xf3,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psll_q:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsllq %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf3,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllq %xmm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf3,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %a0, <2 x i64> %a1) ; <<4 x i64>> [#uses=1]
   ret <4 x i64> %res
 }
@@ -357,14 +325,14 @@
 
 define <16 x i16> @test_x86_avx2_psll_w(<16 x i16> %a0, <8 x i16> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psll_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsllw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf1,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllw %xmm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xf1,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psll_w:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsllw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf1,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw %xmm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf1,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -373,14 +341,14 @@
 
 define <8 x i32> @test_x86_avx2_pslli_d(<8 x i32> %a0) {
 ; AVX2-LABEL: test_x86_avx2_pslli_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpslld $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xf0,0x07]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpslld $7, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x72,0xf0,0x07]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pslli_d:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpslld $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xf0,0x07]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpslld $7, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xf0,0x07]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -389,14 +357,14 @@
 
 define <4 x i64> @test_x86_avx2_pslli_q(<4 x i64> %a0) {
 ; AVX2-LABEL: test_x86_avx2_pslli_q:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsllq $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x73,0xf0,0x07]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllq $7, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x73,0xf0,0x07]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pslli_q:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsllq $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xf0,0x07]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllq $7, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xf0,0x07]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
   ret <4 x i64> %res
 }
@@ -405,14 +373,14 @@
 
 define <16 x i16> @test_x86_avx2_pslli_w(<16 x i16> %a0) {
 ; AVX2-LABEL: test_x86_avx2_pslli_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsllw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xf0,0x07]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllw $7, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x71,0xf0,0x07]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pslli_w:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsllw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xf0,0x07]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $7, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xf0,0x07]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -421,14 +389,14 @@
 
 define <8 x i32> @test_x86_avx2_psra_d(<8 x i32> %a0, <4 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psra_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrad %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe2,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrad %xmm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xe2,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psra_d:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsrad %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe2,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrad %xmm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe2,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -437,14 +405,14 @@
 
 define <16 x i16> @test_x86_avx2_psra_w(<16 x i16> %a0, <8 x i16> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psra_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsraw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe1,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsraw %xmm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xe1,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psra_w:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsraw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe1,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsraw %xmm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe1,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -453,14 +421,14 @@
 
 define <8 x i32> @test_x86_avx2_psrai_d(<8 x i32> %a0) {
 ; AVX2-LABEL: test_x86_avx2_psrai_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrad $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xe0,0x07]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrad $7, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x72,0xe0,0x07]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psrai_d:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsrad $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xe0,0x07]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrad $7, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xe0,0x07]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -469,14 +437,14 @@
 
 define <16 x i16> @test_x86_avx2_psrai_w(<16 x i16> %a0) {
 ; AVX2-LABEL: test_x86_avx2_psrai_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsraw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xe0,0x07]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsraw $7, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x71,0xe0,0x07]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psrai_w:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsraw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xe0,0x07]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsraw $7, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xe0,0x07]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -485,14 +453,14 @@
 
 define <8 x i32> @test_x86_avx2_psrl_d(<8 x i32> %a0, <4 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psrl_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrld %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd2,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrld %xmm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xd2,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psrl_d:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsrld %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd2,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrld %xmm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd2,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -501,14 +469,14 @@
 
 define <4 x i64> @test_x86_avx2_psrl_q(<4 x i64> %a0, <2 x i64> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psrl_q:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrlq %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd3,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlq %xmm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xd3,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psrl_q:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsrlq %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd3,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlq %xmm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd3,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %a0, <2 x i64> %a1) ; <<4 x i64>> [#uses=1]
   ret <4 x i64> %res
 }
@@ -517,14 +485,14 @@
 
 define <16 x i16> @test_x86_avx2_psrl_w(<16 x i16> %a0, <8 x i16> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psrl_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd1,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xd1,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psrl_w:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -533,26 +501,26 @@
 
 define <16 x i16> @test_x86_avx2_psrl_w_load(<16 x i16> %a0, <8 x i16>* %p) {
 ; X86-AVX-LABEL: test_x86_avx2_psrl_w_load:
-; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX-NEXT:    vpsrlw (%eax), %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd1,0x00]
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vpsrlw (%eax), %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xd1,0x00]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
 ;
 ; X86-AVX512VL-LABEL: test_x86_avx2_psrl_w_load:
-; X86-AVX512VL:       ## %bb.0:
-; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512VL-NEXT:    vpsrlw (%eax), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0x00]
-; X86-AVX512VL-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512VL-NEXT:    vpsrlw (%eax), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0x00]
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: test_x86_avx2_psrl_w_load:
-; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vpsrlw (%rdi), %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd1,0x07]
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpsrlw (%rdi), %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xd1,0x07]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_avx2_psrl_w_load:
-; X64-AVX512VL:       ## %bb.0:
-; X64-AVX512VL-NEXT:    vpsrlw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0x07]
-; X64-AVX512VL-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vpsrlw (%rdi), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0x07]
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %a1 = load <8 x i16>, <8 x i16>* %p
   %res = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
@@ -561,14 +529,14 @@
 
 define <8 x i32> @test_x86_avx2_psrli_d(<8 x i32> %a0) {
 ; AVX2-LABEL: test_x86_avx2_psrli_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrld $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xd0,0x07]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrld $7, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x72,0xd0,0x07]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psrli_d:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsrld $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xd0,0x07]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrld $7, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xd0,0x07]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -577,14 +545,14 @@
 
 define <4 x i64> @test_x86_avx2_psrli_q(<4 x i64> %a0) {
 ; AVX2-LABEL: test_x86_avx2_psrli_q:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrlq $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x73,0xd0,0x07]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlq $7, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x73,0xd0,0x07]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psrli_q:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsrlq $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xd0,0x07]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlq $7, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xd0,0x07]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
   ret <4 x i64> %res
 }
@@ -593,56 +561,25 @@
 
 define <16 x i16> @test_x86_avx2_psrli_w(<16 x i16> %a0) {
 ; AVX2-LABEL: test_x86_avx2_psrli_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrlw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xd0,0x07]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlw $7, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x71,0xd0,0x07]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psrli_w:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xd0,0x07]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xd0,0x07]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
 declare <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16>, i32) nounwind readnone
 
 
-define <32 x i8> @test_x86_avx2_psubs_b(<32 x i8> %a0, <32 x i8> %a1) {
-; AVX2-LABEL: test_x86_avx2_psubs_b:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsubsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe8,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx2_psubs_b:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsubsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe8,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-  %res = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
-  ret <32 x i8> %res
-}
-declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
-
-
-define <16 x i16> @test_x86_avx2_psubs_w(<16 x i16> %a0, <16 x i16> %a1) {
-; AVX2-LABEL: test_x86_avx2_psubs_w:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe9,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx2_psubs_w:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe9,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-  %res = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
-  ret <16 x i16> %res
-}
-declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readnone
-
 define <8 x i32> @test_x86_avx2_phadd_d(<8 x i32> %a0, <8 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx2_phadd_d:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vphaddd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x02,0xc1]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vphaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x02,0xc1]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -651,9 +588,9 @@
 
 define <16 x i16> @test_x86_avx2_phadd_sw(<16 x i16> %a0, <16 x i16> %a1) {
 ; CHECK-LABEL: test_x86_avx2_phadd_sw:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vphaddsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x03,0xc1]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vphaddsw %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x03,0xc1]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -662,9 +599,9 @@
 
 define <16 x i16> @test_x86_avx2_phadd_w(<16 x i16> %a0, <16 x i16> %a1) {
 ; CHECK-LABEL: test_x86_avx2_phadd_w:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vphaddw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x01,0xc1]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vphaddw %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x01,0xc1]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -673,9 +610,9 @@
 
 define <8 x i32> @test_x86_avx2_phsub_d(<8 x i32> %a0, <8 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx2_phsub_d:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vphsubd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x06,0xc1]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vphsubd %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x06,0xc1]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -684,9 +621,9 @@
 
 define <16 x i16> @test_x86_avx2_phsub_sw(<16 x i16> %a0, <16 x i16> %a1) {
 ; CHECK-LABEL: test_x86_avx2_phsub_sw:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vphsubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x07,0xc1]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vphsubsw %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x07,0xc1]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -695,9 +632,9 @@
 
 define <16 x i16> @test_x86_avx2_phsub_w(<16 x i16> %a0, <16 x i16> %a1) {
 ; CHECK-LABEL: test_x86_avx2_phsub_w:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vphsubw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x05,0xc1]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vphsubw %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x05,0xc1]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -706,14 +643,14 @@
 
 define <16 x i16> @test_x86_avx2_pmadd_ub_sw(<32 x i8> %a0, <32 x i8> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pmadd_ub_sw:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaddubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x04,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmaddubsw %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x04,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpmaddubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x04,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmaddubsw %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x04,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -722,30 +659,30 @@
 ; Make sure we don't commute this operation.
 define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(<32 x i8>* %ptr, <32 x i8> %a1) {
 ; X86-AVX-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
-; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX-NEXT:    vmovdqa (%eax), %ymm1 ## encoding: [0xc5,0xfd,0x6f,0x08]
-; X86-AVX-NEXT:    vpmaddubsw %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x04,0xc0]
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT:    vmovdqa (%eax), %ymm1 # encoding: [0xc5,0xfd,0x6f,0x08]
+; X86-AVX-NEXT:    vpmaddubsw %ymm0, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0x04,0xc0]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
 ;
 ; X86-AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
-; X86-AVX512VL:       ## %bb.0:
-; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512VL-NEXT:    vmovdqa (%eax), %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x08]
-; X86-AVX512VL-NEXT:    vpmaddubsw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x04,0xc0]
-; X86-AVX512VL-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX512VL-NEXT:    vmovdqa (%eax), %ymm1 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x08]
+; X86-AVX512VL-NEXT:    vpmaddubsw %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x04,0xc0]
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
-; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vmovdqa (%rdi), %ymm1 ## encoding: [0xc5,0xfd,0x6f,0x0f]
-; X64-AVX-NEXT:    vpmaddubsw %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x04,0xc0]
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovdqa (%rdi), %ymm1 # encoding: [0xc5,0xfd,0x6f,0x0f]
+; X64-AVX-NEXT:    vpmaddubsw %ymm0, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0x04,0xc0]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
-; X64-AVX512VL:       ## %bb.0:
-; X64-AVX512VL-NEXT:    vmovdqa (%rdi), %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x0f]
-; X64-AVX512VL-NEXT:    vpmaddubsw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x04,0xc0]
-; X64-AVX512VL-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovdqa (%rdi), %ymm1 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x0f]
+; X64-AVX512VL-NEXT:    vpmaddubsw %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x04,0xc0]
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %a0 = load <32 x i8>, <32 x i8>* %ptr
   %res = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
@@ -753,14 +690,14 @@
 
 define <16 x i16> @test_x86_avx2_pmul_hr_sw(<16 x i16> %a0, <16 x i16> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pmul_hr_sw:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmulhrsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0b,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmulhrsw %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0b,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pmul_hr_sw:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpmulhrsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0b,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmulhrsw %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0b,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -769,14 +706,14 @@
 
 define <32 x i8> @test_x86_avx2_pshuf_b(<32 x i8> %a0, <32 x i8> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pshuf_b:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpshufb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x00,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpshufb %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x00,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pshuf_b:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpshufb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x00,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpshufb %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x00,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i8>> [#uses=1]
   ret <32 x i8> %res
 }
@@ -785,9 +722,9 @@
 
 define <32 x i8> @test_x86_avx2_psign_b(<32 x i8> %a0, <32 x i8> %a1) {
 ; CHECK-LABEL: test_x86_avx2_psign_b:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpsignb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x08,0xc1]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsignb %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x08,0xc1]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
   ret <32 x i8> %res
 }
@@ -796,9 +733,9 @@
 
 define <8 x i32> @test_x86_avx2_psign_d(<8 x i32> %a0, <8 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx2_psign_d:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpsignd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0a,0xc1]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsignd %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0a,0xc1]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> %a0, <8 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -807,9 +744,9 @@
 
 define <16 x i16> @test_x86_avx2_psign_w(<16 x i16> %a0, <16 x i16> %a1) {
 ; CHECK-LABEL: test_x86_avx2_psign_w:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpsignw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x09,0xc1]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsignw %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x09,0xc1]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -818,9 +755,9 @@
 
 define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) {
 ; CHECK-LABEL: test_x86_avx2_mpsadbw:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmpsadbw $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x42,0xc1,0x07]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmpsadbw $7, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x42,0xc1,0x07]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %a0, <32 x i8> %a1, i8 7) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -829,14 +766,14 @@
 
 define <16 x i16> @test_x86_avx2_packusdw(<8 x i32> %a0, <8 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_packusdw:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpackusdw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x2b,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpackusdw %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2b,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_packusdw:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpackusdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpackusdw %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -845,32 +782,32 @@
 
 define <16 x i16> @test_x86_avx2_packusdw_fold() {
 ; X86-AVX-LABEL: test_x86_avx2_packusdw_fold:
-; X86-AVX:       ## %bb.0:
+; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
-; X86-AVX-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI51_0, kind: FK_Data_4
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
 ;
 ; X86-AVX512VL-LABEL: test_x86_avx2_packusdw_fold:
-; X86-AVX512VL:       ## %bb.0:
-; X86-AVX512VL-NEXT:    vmovaps LCPI51_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
-; X86-AVX512VL-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT:    ## fixup A - offset: 4, value: LCPI51_0, kind: FK_Data_4
-; X86-AVX512VL-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
+; X86-AVX512VL-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: test_x86_avx2_packusdw_fold:
-; X64-AVX:       ## %bb.0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
-; X64-AVX-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI51_0-4, kind: reloc_riprel_4byte
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_avx2_packusdw_fold:
-; X64-AVX512VL:       ## %bb.0:
-; X64-AVX512VL-NEXT:    vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
-; X64-AVX512VL-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT:    ## fixup A - offset: 4, value: LCPI51_0-4, kind: reloc_riprel_4byte
-; X64-AVX512VL-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
+; X64-AVX512VL-NEXT:    # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>)
   ret <16 x i16> %res
 }
@@ -878,9 +815,9 @@
 
 define <32 x i8> @test_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) {
 ; CHECK-LABEL: test_x86_avx2_pblendvb:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x4c,0xc1,0x20]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x4c,0xc1,0x20]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) ; <<32 x i8>> [#uses=1]
   ret <32 x i8> %res
 }
@@ -889,10 +826,10 @@
 
 define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
 ; CHECK-LABEL: test_x86_avx2_pblendw:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpblendw $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0e,0xc1,0x07]
-; CHECK-NEXT:    ## ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpblendw $7, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x0e,0xc1,0x07]
+; CHECK-NEXT:    # ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i8 7) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -901,14 +838,14 @@
 
 define <32 x i8> @test_x86_avx2_pmaxsb(<32 x i8> %a0, <32 x i8> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pmaxsb:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3c,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x3c,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pmaxsb:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3c,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3c,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
   ret <32 x i8> %res
 }
@@ -917,14 +854,14 @@
 
 define <8 x i32> @test_x86_avx2_pmaxsd(<8 x i32> %a0, <8 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pmaxsd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3d,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x3d,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pmaxsd:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3d,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3d,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -933,14 +870,14 @@
 
 define <8 x i32> @test_x86_avx2_pmaxud(<8 x i32> %a0, <8 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pmaxud:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3f,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x3f,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pmaxud:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3f,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3f,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -949,14 +886,14 @@
 
 define <16 x i16> @test_x86_avx2_pmaxuw(<16 x i16> %a0, <16 x i16> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pmaxuw:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3e,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x3e,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pmaxuw:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3e,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3e,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -965,14 +902,14 @@
 
 define <32 x i8> @test_x86_avx2_pminsb(<32 x i8> %a0, <32 x i8> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pminsb:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x38,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x38,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pminsb:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpminsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x38,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpminsb %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x38,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
   ret <32 x i8> %res
 }
@@ -981,14 +918,14 @@
 
 define <8 x i32> @test_x86_avx2_pminsd(<8 x i32> %a0, <8 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pminsd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x39,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x39,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pminsd:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpminsd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x39,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpminsd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x39,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -997,14 +934,14 @@
 
 define <8 x i32> @test_x86_avx2_pminud(<8 x i32> %a0, <8 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pminud:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3b,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x3b,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pminud:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpminud %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3b,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpminud %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3b,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -1013,14 +950,14 @@
 
 define <16 x i16> @test_x86_avx2_pminuw(<16 x i16> %a0, <16 x i16> %a1) {
 ; AVX2-LABEL: test_x86_avx2_pminuw:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpminuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3a,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpminuw %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x3a,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_pminuw:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpminuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3a,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpminuw %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3a,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -1029,10 +966,10 @@
 
 define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx2_pblendd_128:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vblendps $8, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x08]
-; CHECK-NEXT:    ## xmm0 = xmm1[0,1,2],xmm0[3]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vblendps $8, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x08]
+; CHECK-NEXT:    # xmm0 = xmm1[0,1,2],xmm0[3]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i8 7) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -1041,10 +978,10 @@
 
 define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx2_pblendd_256:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vblendps $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0c,0xc1,0x07]
-; CHECK-NEXT:    ## ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; CHECK-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vblendps $7, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x0c,0xc1,0x07]
+; CHECK-NEXT:    # ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i8 7) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -1056,14 +993,14 @@
 ; the instruction.
 define <8 x i32> @test_x86_avx2_permd(<8 x i32> %a0, <8 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_permd:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpermps %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x16,0xc0]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpermps %ymm0, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0x16,0xc0]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_permd:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpermps %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -1075,14 +1012,14 @@
 ; the instruction.
 define <8 x float> @test_x86_avx2_permps(<8 x float> %a0, <8 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_permps:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpermps %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x16,0xc0]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpermps %ymm0, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0x16,0xc0]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_permps:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpermps %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> %a1) ; <<8 x float>> [#uses=1]
   ret <8 x float> %res
 }
@@ -1091,15 +1028,15 @@
 
 define <2 x i64> @test_x86_avx2_maskload_q(i8* %a0, <2 x i64> %a1) {
 ; X86-LABEL: test_x86_avx2_maskload_q:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpmaskmovq (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x8c,0x00]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpmaskmovq (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x8c,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_maskload_q:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpmaskmovq (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x8c,0x07]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpmaskmovq (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x8c,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -1108,15 +1045,15 @@
 
 define <4 x i64> @test_x86_avx2_maskload_q_256(i8* %a0, <4 x i64> %a1) {
 ; X86-LABEL: test_x86_avx2_maskload_q_256:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpmaskmovq (%eax), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x8c,0x00]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpmaskmovq (%eax), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x8c,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_maskload_q_256:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpmaskmovq (%rdi), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x8c,0x07]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpmaskmovq (%rdi), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x8c,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
   ret <4 x i64> %res
 }
@@ -1125,15 +1062,15 @@
 
 define <4 x i32> @test_x86_avx2_maskload_d(i8* %a0, <4 x i32> %a1) {
 ; X86-LABEL: test_x86_avx2_maskload_d:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpmaskmovd (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x8c,0x00]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpmaskmovd (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x8c,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_maskload_d:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpmaskmovd (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x8c,0x07]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpmaskmovd (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x8c,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
@@ -1142,15 +1079,15 @@
 
 define <8 x i32> @test_x86_avx2_maskload_d_256(i8* %a0, <8 x i32> %a1) {
 ; X86-LABEL: test_x86_avx2_maskload_d_256:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpmaskmovd (%eax), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x8c,0x00]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpmaskmovd (%eax), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x8c,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_maskload_d_256:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpmaskmovd (%rdi), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x8c,0x07]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpmaskmovd (%rdi), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x8c,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
@@ -1159,15 +1096,15 @@
 
 define void @test_x86_avx2_maskstore_q(i8* %a0, <2 x i64> %a1, <2 x i64> %a2) {
 ; X86-LABEL: test_x86_avx2_maskstore_q:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpmaskmovq %xmm1, %xmm0, (%eax) ## encoding: [0xc4,0xe2,0xf9,0x8e,0x08]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpmaskmovq %xmm1, %xmm0, (%eax) # encoding: [0xc4,0xe2,0xf9,0x8e,0x08]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_maskstore_q:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpmaskmovq %xmm1, %xmm0, (%rdi) ## encoding: [0xc4,0xe2,0xf9,0x8e,0x0f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpmaskmovq %xmm1, %xmm0, (%rdi) # encoding: [0xc4,0xe2,0xf9,0x8e,0x0f]
+; X64-NEXT:    retq # encoding: [0xc3]
   call void @llvm.x86.avx2.maskstore.q(i8* %a0, <2 x i64> %a1, <2 x i64> %a2)
   ret void
 }
@@ -1176,17 +1113,17 @@
 
 define void @test_x86_avx2_maskstore_q_256(i8* %a0, <4 x i64> %a1, <4 x i64> %a2) {
 ; X86-LABEL: test_x86_avx2_maskstore_q_256:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpmaskmovq %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0xfd,0x8e,0x08]
-; X86-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpmaskmovq %ymm1, %ymm0, (%eax) # encoding: [0xc4,0xe2,0xfd,0x8e,0x08]
+; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_maskstore_q_256:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpmaskmovq %ymm1, %ymm0, (%rdi) ## encoding: [0xc4,0xe2,0xfd,0x8e,0x0f]
-; X64-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpmaskmovq %ymm1, %ymm0, (%rdi) # encoding: [0xc4,0xe2,0xfd,0x8e,0x0f]
+; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X64-NEXT:    retq # encoding: [0xc3]
   call void @llvm.x86.avx2.maskstore.q.256(i8* %a0, <4 x i64> %a1, <4 x i64> %a2)
   ret void
 }
@@ -1195,15 +1132,15 @@
 
 define void @test_x86_avx2_maskstore_d(i8* %a0, <4 x i32> %a1, <4 x i32> %a2) {
 ; X86-LABEL: test_x86_avx2_maskstore_d:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpmaskmovd %xmm1, %xmm0, (%eax) ## encoding: [0xc4,0xe2,0x79,0x8e,0x08]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpmaskmovd %xmm1, %xmm0, (%eax) # encoding: [0xc4,0xe2,0x79,0x8e,0x08]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_maskstore_d:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpmaskmovd %xmm1, %xmm0, (%rdi) ## encoding: [0xc4,0xe2,0x79,0x8e,0x0f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpmaskmovd %xmm1, %xmm0, (%rdi) # encoding: [0xc4,0xe2,0x79,0x8e,0x0f]
+; X64-NEXT:    retq # encoding: [0xc3]
   call void @llvm.x86.avx2.maskstore.d(i8* %a0, <4 x i32> %a1, <4 x i32> %a2)
   ret void
 }
@@ -1212,17 +1149,17 @@
 
 define void @test_x86_avx2_maskstore_d_256(i8* %a0, <8 x i32> %a1, <8 x i32> %a2) {
 ; X86-LABEL: test_x86_avx2_maskstore_d_256:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpmaskmovd %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x8e,0x08]
-; X86-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpmaskmovd %ymm1, %ymm0, (%eax) # encoding: [0xc4,0xe2,0x7d,0x8e,0x08]
+; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_maskstore_d_256:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpmaskmovd %ymm1, %ymm0, (%rdi) ## encoding: [0xc4,0xe2,0x7d,0x8e,0x0f]
-; X64-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpmaskmovd %ymm1, %ymm0, (%rdi) # encoding: [0xc4,0xe2,0x7d,0x8e,0x0f]
+; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X64-NEXT:    retq # encoding: [0xc3]
   call void @llvm.x86.avx2.maskstore.d.256(i8* %a0, <8 x i32> %a1, <8 x i32> %a2)
   ret void
 }
@@ -1231,182 +1168,490 @@
 
 define <4 x i32> @test_x86_avx2_psllv_d(<4 x i32> %a0, <4 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psllv_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsllvd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x47,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllvd %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x47,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psllv_d:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsllvd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllvd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
+
+define <4 x i32> @test_x86_avx2_psllv_d_const() {
+; X86-AVX-LABEL: test_x86_avx2_psllv_d_const:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2]
+; X86-AVX-NEXT:    # encoding: [0xc4,0xe2,0x79,0x58,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    vpaddd {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_avx2_psllv_d_const:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = <4,9,0,u>
+; X86-AVX512VL-NEXT:    # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    vpaddd {{\.LCPI.*}}{1to4}, %xmm0, %xmm0 # encoding: [0x62,0xf1,0x7d,0x18,0xfe,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_avx2_psllv_d_const:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2]
+; X64-AVX-NEXT:    # encoding: [0xc4,0xe2,0x79,0x58,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_avx2_psllv_d_const:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = <4,9,0,u>
+; X64-AVX512VL-NEXT:    # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 # encoding: [0x62,0xf1,0x7d,0x18,0xfe,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
+  %res0 = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> <i32 2, i32 9, i32 0, i32 -1>, <4 x i32> <i32 1, i32 0, i32 33, i32 -1>)
+  %res1 = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> <i32 1, i32 1, i32 1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1,  i32 -1>)
+  %res2 = add <4 x i32> %res0, %res1
+  ret <4 x i32> %res2
+}
 declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone
 
 
 define <8 x i32> @test_x86_avx2_psllv_d_256(<8 x i32> %a0, <8 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psllv_d_256:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x47,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x47,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psllv_d_256:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
+
+define <8 x i32> @test_x86_avx2_psllv_d_256_const() {
+; X86-AVX-LABEL: test_x86_avx2_psllv_d_256_const:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [8,8,8,8,8,8,8,8]
+; X86-AVX-NEXT:    # encoding: [0xc4,0xe2,0x7d,0x58,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    vpaddd {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_avx2_psllv_d_256_const:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = <4,9,0,u,12,7,u,0>
+; X86-AVX512VL-NEXT:    # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    vpaddd {{\.LCPI.*}}{1to8}, %ymm0, %ymm0 # encoding: [0x62,0xf1,0x7d,0x38,0xfe,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_avx2_psllv_d_256_const:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [8,8,8,8,8,8,8,8]
+; X64-AVX-NEXT:    # encoding: [0xc4,0xe2,0x7d,0x58,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_avx2_psllv_d_256_const:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = <4,9,0,u,12,7,u,0>
+; X64-AVX512VL-NEXT:    # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 # encoding: [0x62,0xf1,0x7d,0x38,0xfe,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
+  %res0 = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0>, <8 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2>)
+  %res1 = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 -1>, <8 x i32> <i32 1, i32 1, i32 1,  i32 1, i32 1, i32 1, i32 1, i32 -1>)
+  %res2 = add <8 x i32> %res0, %res1
+  ret <8 x i32> %res2
+}
 declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind readnone
 
 
 define <2 x i64> @test_x86_avx2_psllv_q(<2 x i64> %a0, <2 x i64> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psllv_q:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsllvq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x47,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllvq %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x47,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psllv_q:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsllvq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllvq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
+define <2 x i64> @test_x86_avx2_psllv_q_const() {
+; X86-AVX-LABEL: test_x86_avx2_psllv_q_const:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm0 = [4,0,4294967295,4294967295]
+; X86-AVX-NEXT:    # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    vpsllvq {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_avx2_psllv_q_const:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,0,4294967295,4294967295]
+; X86-AVX512VL-NEXT:    # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    vpsllvq {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_avx2_psllv_q_const:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    movl $8, %eax # encoding: [0xb8,0x08,0x00,0x00,0x00]
+; X64-AVX-NEXT:    vmovq %rax, %xmm0 # encoding: [0xc4,0xe1,0xf9,0x6e,0xc0]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_avx2_psllv_q_const:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    movl $8, %eax # encoding: [0xb8,0x08,0x00,0x00,0x00]
+; X64-AVX512VL-NEXT:    vmovq %rax, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xc0]
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
+  %res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> <i64 4, i64 -1>, <2 x i64> <i64 1, i64 -1>)
+  ret <2 x i64> %res
+}
 declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone
 
 
 define <4 x i64> @test_x86_avx2_psllv_q_256(<4 x i64> %a0, <4 x i64> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psllv_q_256:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsllvq %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x47,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllvq %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x47,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psllv_q_256:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsllvq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllvq %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
   ret <4 x i64> %res
 }
+
+define <4 x i64> @test_x86_avx2_psllv_q_256_const() {
+; X86-AVX-LABEL: test_x86_avx2_psllv_q_256_const:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovdqa {{.*#+}} ymm0 = [4,0,4,0,4,0,4294967295,4294967295]
+; X86-AVX-NEXT:    # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    vpsllvq {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_avx2_psllv_q_256_const:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,0,4,0,4,0,4294967295,4294967295]
+; X86-AVX512VL-NEXT:    # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    vpsllvq {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_avx2_psllv_q_256_const:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [8,8,8,8]
+; X64-AVX-NEXT:    # encoding: [0xc4,0xe2,0x7d,0x19,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_avx2_psllv_q_256_const:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vbroadcastsd {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [8,8,8,8]
+; X64-AVX512VL-NEXT:    # encoding: [0xc4,0xe2,0x7d,0x19,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
+  %res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> <i64 4, i64 4, i64 4, i64 -1>, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
+  ret <4 x i64> %res
+}
 declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind readnone
 
 
 define <4 x i32> @test_x86_avx2_psrlv_d(<4 x i32> %a0, <4 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psrlv_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x45,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x45,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psrlv_d:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
+
+define <4 x i32> @test_x86_avx2_psrlv_d_const() {
+; X86-AVX-LABEL: test_x86_avx2_psrlv_d_const:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2]
+; X86-AVX-NEXT:    # encoding: [0xc4,0xe2,0x79,0x58,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    vpaddd {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_d_const:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = <1,9,0,u>
+; X86-AVX512VL-NEXT:    # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    vpaddd {{\.LCPI.*}}{1to4}, %xmm0, %xmm0 # encoding: [0x62,0xf1,0x7d,0x18,0xfe,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_avx2_psrlv_d_const:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2]
+; X64-AVX-NEXT:    # encoding: [0xc4,0xe2,0x79,0x58,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_d_const:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = <1,9,0,u>
+; X64-AVX512VL-NEXT:    # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 # encoding: [0x62,0xf1,0x7d,0x18,0xfe,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
+  %res0 = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> <i32 2, i32 9, i32 0, i32 -1>, <4 x i32> <i32 1, i32 0, i32 33, i32 -1>)
+  %res1 = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> <i32 4, i32 4, i32 4, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1,  i32 -1>)
+  %res2 = add <4 x i32> %res0, %res1
+  ret <4 x i32> %res2
+}
 declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone
 
 
 define <8 x i32> @test_x86_avx2_psrlv_d_256(<8 x i32> %a0, <8 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psrlv_d_256:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x45,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x45,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psrlv_d_256:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
+
+define <8 x i32> @test_x86_avx2_psrlv_d_256_const() {
+; X86-AVX-LABEL: test_x86_avx2_psrlv_d_256_const:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2,2,2,2,2,2,2,2]
+; X86-AVX-NEXT:    # encoding: [0xc4,0xe2,0x7d,0x58,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    vpaddd {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_d_256_const:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = <1,9,0,u,0,7,u,0>
+; X86-AVX512VL-NEXT:    # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    vpaddd {{\.LCPI.*}}{1to8}, %ymm0, %ymm0 # encoding: [0x62,0xf1,0x7d,0x38,0xfe,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_avx2_psrlv_d_256_const:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2,2,2,2,2,2,2,2]
+; X64-AVX-NEXT:    # encoding: [0xc4,0xe2,0x7d,0x58,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_d_256_const:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = <1,9,0,u,0,7,u,0>
+; X64-AVX512VL-NEXT:    # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 # encoding: [0x62,0xf1,0x7d,0x38,0xfe,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
+  %res0 = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0>, <8 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2>)
+  %res1 = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 -1>, <8 x i32> <i32 1, i32 1, i32 1,  i32 1, i32 1, i32 1, i32 1, i32 -1>)
+  %res2 = add <8 x i32> %res0, %res1
+  ret <8 x i32> %res2
+}
 declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind readnone
 
 
 define <2 x i64> @test_x86_avx2_psrlv_q(<2 x i64> %a0, <2 x i64> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psrlv_q:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrlvq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x45,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvq %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x45,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psrlv_q:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsrlvq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
+
+define <2 x i64> @test_x86_avx2_psrlv_q_const() {
+; X86-AVX-LABEL: test_x86_avx2_psrlv_q_const:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm0 = [4,0,4,0]
+; X86-AVX-NEXT:    # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    vpsrlvq {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_q_const:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,0,4,0]
+; X86-AVX512VL-NEXT:    # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    vpsrlvq {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_avx2_psrlv_q_const:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    movl $2, %eax # encoding: [0xb8,0x02,0x00,0x00,0x00]
+; X64-AVX-NEXT:    vmovq %rax, %xmm0 # encoding: [0xc4,0xe1,0xf9,0x6e,0xc0]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_q_const:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    movl $2, %eax # encoding: [0xb8,0x02,0x00,0x00,0x00]
+; X64-AVX512VL-NEXT:    vmovq %rax, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xc0]
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
+  %res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> <i64 4, i64 4>, <2 x i64> <i64 1, i64 -1>)
+  ret <2 x i64> %res
+}
 declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone
 
 
 define <4 x i64> @test_x86_avx2_psrlv_q_256(<4 x i64> %a0, <4 x i64> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psrlv_q_256:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsrlvq %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x45,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvq %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x45,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psrlv_q_256:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsrlvq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvq %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
   ret <4 x i64> %res
 }
+
+
+define <4 x i64> @test_x86_avx2_psrlv_q_256_const() {
+; X86-AVX-LABEL: test_x86_avx2_psrlv_q_256_const:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovdqa {{.*#+}} ymm0 = [4,0,4,0,4,0,4,0]
+; X86-AVX-NEXT:    # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    vpsrlvq {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
+;
+; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_q_256_const:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,0,4,0,4,0,4,0]
+; X86-AVX512VL-NEXT:    # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    vpsrlvq {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
+;
+; X64-AVX-LABEL: test_x86_avx2_psrlv_q_256_const:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [2,2,2,2]
+; X64-AVX-NEXT:    # encoding: [0xc4,0xe2,0x7d,0x19,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
+;
+; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_q_256_const:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vbroadcastsd {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,2,2,2]
+; X64-AVX512VL-NEXT:    # encoding: [0xc4,0xe2,0x7d,0x19,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
+  %res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> <i64 4, i64 4, i64 4, i64 4>, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
+  ret <4 x i64> %res
+}
 declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind readnone
 
 
 define <4 x i32> @test_x86_avx2_psrav_d(<4 x i32> %a0, <4 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psrav_d:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsravd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsravd %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x46,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psrav_d:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsravd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsravd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
 
-define <4 x i32> @test_x86_avx2_psrav_d_const(<4 x i32> %a0, <4 x i32> %a1) {
+define <4 x i32> @test_x86_avx2_psrav_d_const() {
 ; X86-AVX-LABEL: test_x86_avx2_psrav_d_const:
-; X86-AVX:       ## %bb.0:
+; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
-; X86-AVX-NEXT:    ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI83_0, kind: FK_Data_4
-; X86-AVX-NEXT:    vpsravd LCPI83_1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
-; X86-AVX-NEXT:    ## fixup A - offset: 5, value: LCPI83_1, kind: FK_Data_4
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX-NEXT:    # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    vpsravd {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
 ;
 ; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
-; X86-AVX512VL:       ## %bb.0:
-; X86-AVX512VL-NEXT:    vmovdqa LCPI83_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
-; X86-AVX512VL-NEXT:    ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT:    ## fixup A - offset: 4, value: LCPI83_0, kind: FK_Data_4
-; X86-AVX512VL-NEXT:    vpsravd LCPI83_1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT:    ## fixup A - offset: 5, value: LCPI83_1, kind: FK_Data_4
-; X86-AVX512VL-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
+; X86-AVX512VL-NEXT:    # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    vpsravd {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: test_x86_avx2_psrav_d_const:
-; X64-AVX:       ## %bb.0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
-; X64-AVX-NEXT:    ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI83_0-4, kind: reloc_riprel_4byte
-; X64-AVX-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
-; X64-AVX-NEXT:    ## fixup A - offset: 5, value: LCPI83_1-4, kind: reloc_riprel_4byte
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX-NEXT:    # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
-; X64-AVX512VL:       ## %bb.0:
-; X64-AVX512VL-NEXT:    vmovdqa {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
-; X64-AVX512VL-NEXT:    ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT:    ## fixup A - offset: 4, value: LCPI83_0-4, kind: reloc_riprel_4byte
-; X64-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT:    ## fixup A - offset: 5, value: LCPI83_1-4, kind: reloc_riprel_4byte
-; X64-AVX512VL-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
+; X64-AVX512VL-NEXT:    # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> <i32 2, i32 9, i32 -12, i32 23>, <4 x i32> <i32 1, i32 18, i32 35, i32 52>)
   ret <4 x i32> %res
 }
@@ -1414,54 +1659,54 @@
 
 define <8 x i32> @test_x86_avx2_psrav_d_256(<8 x i32> %a0, <8 x i32> %a1) {
 ; AVX2-LABEL: test_x86_avx2_psrav_d_256:
-; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpsravd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0xc1]
-; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsravd %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x46,0xc1]
+; AVX2-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx2_psrav_d_256:
-; AVX512VL:       ## %bb.0:
-; AVX512VL-NEXT:    vpsravd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0xc1]
-; AVX512VL-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsravd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0xc1]
+; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
 
-define <8 x i32> @test_x86_avx2_psrav_d_256_const(<8 x i32> %a0, <8 x i32> %a1) {
+define <8 x i32> @test_x86_avx2_psrav_d_256_const() {
 ; X86-AVX-LABEL: test_x86_avx2_psrav_d_256_const:
-; X86-AVX:       ## %bb.0:
+; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
-; X86-AVX-NEXT:    ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI85_0, kind: FK_Data_4
-; X86-AVX-NEXT:    vpsravd LCPI85_1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
-; X86-AVX-NEXT:    ## fixup A - offset: 5, value: LCPI85_1, kind: FK_Data_4
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX-NEXT:    # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    vpsravd {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
+; X86-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
 ;
 ; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
-; X86-AVX512VL:       ## %bb.0:
-; X86-AVX512VL-NEXT:    vmovdqa LCPI85_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
-; X86-AVX512VL-NEXT:    ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT:    ## fixup A - offset: 4, value: LCPI85_0, kind: FK_Data_4
-; X86-AVX512VL-NEXT:    vpsravd LCPI85_1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT:    ## fixup A - offset: 5, value: LCPI85_1, kind: FK_Data_4
-; X86-AVX512VL-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
+; X86-AVX512VL-NEXT:    # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    vpsravd {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: test_x86_avx2_psrav_d_256_const:
-; X64-AVX:       ## %bb.0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
-; X64-AVX-NEXT:    ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X64-AVX-NEXT:    ## fixup A - offset: 4, value: LCPI85_0-4, kind: reloc_riprel_4byte
-; X64-AVX-NEXT:    vpsravd {{.*}}(%rip), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
-; X64-AVX-NEXT:    ## fixup A - offset: 5, value: LCPI85_1-4, kind: reloc_riprel_4byte
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX-NEXT:    # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    vpsravd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
+; X64-AVX-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
-; X64-AVX512VL:       ## %bb.0:
-; X64-AVX512VL-NEXT:    vmovdqa {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
-; X64-AVX512VL-NEXT:    ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT:    ## fixup A - offset: 4, value: LCPI85_0-4, kind: reloc_riprel_4byte
-; X64-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT:    ## fixup A - offset: 5, value: LCPI85_1-4, kind: reloc_riprel_4byte
-; X64-AVX512VL-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
+; X64-AVX512VL-NEXT:    # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
+; X64-AVX512VL-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> <i32 2, i32 9, i32 -12, i32 23, i32 -26, i32 37, i32 -40, i32 51>, <8 x i32> <i32 1, i32 18, i32 35, i32 52, i32 69, i32 15, i32 32, i32 49>)
   ret <8 x i32> %res
 }
@@ -1469,15 +1714,15 @@
 
 define <2 x double> @test_x86_avx2_gather_d_pd(<2 x double> %a0, i8* %a1, <4 x i32> %idx, <2 x double> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_d_pd:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vgatherdpd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x92,0x04,0x48]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vgatherdpd %xmm2, (%eax,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0xe9,0x92,0x04,0x48]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_d_pd:
-; X64:       ## %bb.0:
-; X64-NEXT:    vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x92,0x04,0x4f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0xe9,0x92,0x04,0x4f]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> %a0,
                             i8* %a1, <4 x i32> %idx, <2 x double> %mask, i8 2) ;
   ret <2 x double> %res
@@ -1487,15 +1732,15 @@
 
 define <4 x double> @test_x86_avx2_gather_d_pd_256(<4 x double> %a0, i8* %a1, <4 x i32> %idx, <4 x double> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_d_pd_256:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vgatherdpd %ymm2, (%eax,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x92,0x04,0x48]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vgatherdpd %ymm2, (%eax,%xmm1,2), %ymm0 # encoding: [0xc4,0xe2,0xed,0x92,0x04,0x48]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_d_pd_256:
-; X64:       ## %bb.0:
-; X64-NEXT:    vgatherdpd %ymm2, (%rdi,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x92,0x04,0x4f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vgatherdpd %ymm2, (%rdi,%xmm1,2), %ymm0 # encoding: [0xc4,0xe2,0xed,0x92,0x04,0x4f]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %a0,
                             i8* %a1, <4 x i32> %idx, <4 x double> %mask, i8 2) ;
   ret <4 x double> %res
@@ -1505,15 +1750,15 @@
 
 define <2 x double> @test_x86_avx2_gather_q_pd(<2 x double> %a0, i8* %a1, <2 x i64> %idx, <2 x double> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_q_pd:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vgatherqpd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x93,0x04,0x48]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vgatherqpd %xmm2, (%eax,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0xe9,0x93,0x04,0x48]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_q_pd:
-; X64:       ## %bb.0:
-; X64-NEXT:    vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x93,0x04,0x4f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0xe9,0x93,0x04,0x4f]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> %a0,
                             i8* %a1, <2 x i64> %idx, <2 x double> %mask, i8 2) ;
   ret <2 x double> %res
@@ -1523,15 +1768,15 @@
 
 define <4 x double> @test_x86_avx2_gather_q_pd_256(<4 x double> %a0, i8* %a1, <4 x i64> %idx, <4 x double> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_q_pd_256:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vgatherqpd %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x93,0x04,0x48]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vgatherqpd %ymm2, (%eax,%ymm1,2), %ymm0 # encoding: [0xc4,0xe2,0xed,0x93,0x04,0x48]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_q_pd_256:
-; X64:       ## %bb.0:
-; X64-NEXT:    vgatherqpd %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x93,0x04,0x4f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vgatherqpd %ymm2, (%rdi,%ymm1,2), %ymm0 # encoding: [0xc4,0xe2,0xed,0x93,0x04,0x4f]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %a0,
                             i8* %a1, <4 x i64> %idx, <4 x double> %mask, i8 2) ;
   ret <4 x double> %res
@@ -1541,15 +1786,15 @@
 
 define <4 x float> @test_x86_avx2_gather_d_ps(<4 x float> %a0, i8* %a1, <4 x i32> %idx, <4 x float> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_d_ps:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vgatherdps %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x92,0x04,0x48]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vgatherdps %xmm2, (%eax,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0x69,0x92,0x04,0x48]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_d_ps:
-; X64:       ## %bb.0:
-; X64-NEXT:    vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x92,0x04,0x4f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0x69,0x92,0x04,0x4f]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> %a0,
                             i8* %a1, <4 x i32> %idx, <4 x float> %mask, i8 2) ;
   ret <4 x float> %res
@@ -1559,15 +1804,15 @@
 
 define <8 x float> @test_x86_avx2_gather_d_ps_256(<8 x float> %a0, i8* %a1, <8 x i32> %idx, <8 x float> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_d_ps_256:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vgatherdps %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x92,0x04,0x48]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vgatherdps %ymm2, (%eax,%ymm1,2), %ymm0 # encoding: [0xc4,0xe2,0x6d,0x92,0x04,0x48]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_d_ps_256:
-; X64:       ## %bb.0:
-; X64-NEXT:    vgatherdps %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x92,0x04,0x4f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vgatherdps %ymm2, (%rdi,%ymm1,2), %ymm0 # encoding: [0xc4,0xe2,0x6d,0x92,0x04,0x4f]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0,
                             i8* %a1, <8 x i32> %idx, <8 x float> %mask, i8 2) ;
   ret <8 x float> %res
@@ -1577,15 +1822,15 @@
 
 define <4 x float> @test_x86_avx2_gather_q_ps(<4 x float> %a0, i8* %a1, <2 x i64> %idx, <4 x float> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_q_ps:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vgatherqps %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x93,0x04,0x48]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vgatherqps %xmm2, (%eax,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0x69,0x93,0x04,0x48]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_q_ps:
-; X64:       ## %bb.0:
-; X64-NEXT:    vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x93,0x04,0x4f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0x69,0x93,0x04,0x4f]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> %a0,
                             i8* %a1, <2 x i64> %idx, <4 x float> %mask, i8 2) ;
   ret <4 x float> %res
@@ -1595,17 +1840,17 @@
 
 define <4 x float> @test_x86_avx2_gather_q_ps_256(<4 x float> %a0, i8* %a1, <4 x i64> %idx, <4 x float> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_q_ps_256:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vgatherqps %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x48]
-; X86-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vgatherqps %xmm2, (%eax,%ymm1,2), %xmm0 # encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x48]
+; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_q_ps_256:
-; X64:       ## %bb.0:
-; X64-NEXT:    vgatherqps %xmm2, (%rdi,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x4f]
-; X64-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vgatherqps %xmm2, (%rdi,%ymm1,2), %xmm0 # encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x4f]
+; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %a0,
                             i8* %a1, <4 x i64> %idx, <4 x float> %mask, i8 2) ;
   ret <4 x float> %res
@@ -1615,15 +1860,15 @@
 
 define <2 x i64> @test_x86_avx2_gather_d_q(<2 x i64> %a0, i8* %a1, <4 x i32> %idx, <2 x i64> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_d_q:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpgatherdq %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x90,0x04,0x48]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpgatherdq %xmm2, (%eax,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0xe9,0x90,0x04,0x48]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_d_q:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x90,0x04,0x4f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0xe9,0x90,0x04,0x4f]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> %a0,
                             i8* %a1, <4 x i32> %idx, <2 x i64> %mask, i8 2) ;
   ret <2 x i64> %res
@@ -1633,15 +1878,15 @@
 
 define <4 x i64> @test_x86_avx2_gather_d_q_256(<4 x i64> %a0, i8* %a1, <4 x i32> %idx, <4 x i64> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_d_q_256:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpgatherdq %ymm2, (%eax,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x90,0x04,0x48]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpgatherdq %ymm2, (%eax,%xmm1,2), %ymm0 # encoding: [0xc4,0xe2,0xed,0x90,0x04,0x48]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_d_q_256:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x90,0x04,0x4f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 # encoding: [0xc4,0xe2,0xed,0x90,0x04,0x4f]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %a0,
                             i8* %a1, <4 x i32> %idx, <4 x i64> %mask, i8 2) ;
   ret <4 x i64> %res
@@ -1651,15 +1896,15 @@
 
 define <2 x i64> @test_x86_avx2_gather_q_q(<2 x i64> %a0, i8* %a1, <2 x i64> %idx, <2 x i64> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_q_q:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpgatherqq %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x91,0x04,0x48]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpgatherqq %xmm2, (%eax,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0xe9,0x91,0x04,0x48]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_q_q:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x91,0x04,0x4f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0xe9,0x91,0x04,0x4f]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> %a0,
                             i8* %a1, <2 x i64> %idx, <2 x i64> %mask, i8 2) ;
   ret <2 x i64> %res
@@ -1669,15 +1914,15 @@
 
 define <4 x i64> @test_x86_avx2_gather_q_q_256(<4 x i64> %a0, i8* %a1, <4 x i64> %idx, <4 x i64> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_q_q_256:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpgatherqq %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x91,0x04,0x48]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpgatherqq %ymm2, (%eax,%ymm1,2), %ymm0 # encoding: [0xc4,0xe2,0xed,0x91,0x04,0x48]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_q_q_256:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x91,0x04,0x4f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 # encoding: [0xc4,0xe2,0xed,0x91,0x04,0x4f]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %a0,
                             i8* %a1, <4 x i64> %idx, <4 x i64> %mask, i8 2) ;
   ret <4 x i64> %res
@@ -1687,15 +1932,15 @@
 
 define <4 x i32> @test_x86_avx2_gather_d_d(<4 x i32> %a0, i8* %a1, <4 x i32> %idx, <4 x i32> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_d_d:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpgatherdd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x90,0x04,0x48]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpgatherdd %xmm2, (%eax,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0x69,0x90,0x04,0x48]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_d_d:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x90,0x04,0x4f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0x69,0x90,0x04,0x4f]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> %a0,
                             i8* %a1, <4 x i32> %idx, <4 x i32> %mask, i8 2) ;
   ret <4 x i32> %res
@@ -1705,15 +1950,15 @@
 
 define <8 x i32> @test_x86_avx2_gather_d_d_256(<8 x i32> %a0, i8* %a1, <8 x i32> %idx, <8 x i32> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_d_d_256:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpgatherdd %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x90,0x04,0x48]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpgatherdd %ymm2, (%eax,%ymm1,2), %ymm0 # encoding: [0xc4,0xe2,0x6d,0x90,0x04,0x48]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_d_d_256:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x90,0x04,0x4f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 # encoding: [0xc4,0xe2,0x6d,0x90,0x04,0x4f]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %a0,
                             i8* %a1, <8 x i32> %idx, <8 x i32> %mask, i8 2) ;
   ret <8 x i32> %res
@@ -1723,15 +1968,15 @@
 
 define <4 x i32> @test_x86_avx2_gather_q_d(<4 x i32> %a0, i8* %a1, <2 x i64> %idx, <4 x i32> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_q_d:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpgatherqd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x91,0x04,0x48]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpgatherqd %xmm2, (%eax,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0x69,0x91,0x04,0x48]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_q_d:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x91,0x04,0x4f]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 # encoding: [0xc4,0xe2,0x69,0x91,0x04,0x4f]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> %a0,
                             i8* %a1, <2 x i64> %idx, <4 x i32> %mask, i8 2) ;
   ret <4 x i32> %res
@@ -1741,17 +1986,17 @@
 
 define <4 x i32> @test_x86_avx2_gather_q_d_256(<4 x i32> %a0, i8* %a1, <4 x i64> %idx, <4 x i32> %mask) {
 ; X86-LABEL: test_x86_avx2_gather_q_d_256:
-; X86:       ## %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpgatherqd %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x48]
-; X86-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; X86-NEXT:    retl ## encoding: [0xc3]
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpgatherqd %xmm2, (%eax,%ymm1,2), %xmm0 # encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x48]
+; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx2_gather_q_d_256:
-; X64:       ## %bb.0:
-; X64-NEXT:    vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x4f]
-; X64-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; X64-NEXT:    retq ## encoding: [0xc3]
+; X64:       # %bb.0:
+; X64-NEXT:    vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 # encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x4f]
+; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
+; X64-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %a0,
                             i8* %a1, <4 x i64> %idx, <4 x i32> %mask, i8 2) ;
   ret <4 x i32> %res
@@ -1763,36 +2008,36 @@
 define <8 x float>  @test_gather_mask(<8 x float> %a0, float* %a, <8 x i32> %idx, <8 x float> %mask, float* nocapture %out) {
 ;; gather with mask
 ; X86-AVX-LABEL: test_gather_mask:
-; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; X86-AVX-NEXT:    vmovaps %ymm2, %ymm3 ## encoding: [0xc5,0xfc,0x28,0xda]
-; X86-AVX-NEXT:    vgatherdps %ymm3, (%ecx,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x89]
-; X86-AVX-NEXT:    vmovups %ymm2, (%eax) ## encoding: [0xc5,0xfc,0x11,0x10]
-; X86-AVX-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
+; X86-AVX-NEXT:    vmovaps %ymm2, %ymm3 # encoding: [0xc5,0xfc,0x28,0xda]
+; X86-AVX-NEXT:    vgatherdps %ymm3, (%ecx,%ymm1,4), %ymm0 # encoding: [0xc4,0xe2,0x65,0x92,0x04,0x89]
+; X86-AVX-NEXT:    vmovups %ymm2, (%eax) # encoding: [0xc5,0xfc,0x11,0x10]
+; X86-AVX-NEXT:    retl # encoding: [0xc3]
 ;
 ; X86-AVX512VL-LABEL: test_gather_mask:
-; X86-AVX512VL:       ## %bb.0:
-; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
-; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; X86-AVX512VL-NEXT:    vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
-; X86-AVX512VL-NEXT:    vgatherdps %ymm3, (%ecx,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x89]
-; X86-AVX512VL-NEXT:    vmovups %ymm2, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x10]
-; X86-AVX512VL-NEXT:    retl ## encoding: [0xc3]
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
+; X86-AVX512VL-NEXT:    vmovaps %ymm2, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
+; X86-AVX512VL-NEXT:    vgatherdps %ymm3, (%ecx,%ymm1,4), %ymm0 # encoding: [0xc4,0xe2,0x65,0x92,0x04,0x89]
+; X86-AVX512VL-NEXT:    vmovups %ymm2, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x10]
+; X86-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: test_gather_mask:
-; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vmovaps %ymm2, %ymm3 ## encoding: [0xc5,0xfc,0x28,0xda]
-; X64-AVX-NEXT:    vgatherdps %ymm3, (%rdi,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x8f]
-; X64-AVX-NEXT:    vmovups %ymm2, (%rsi) ## encoding: [0xc5,0xfc,0x11,0x16]
-; X64-AVX-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovaps %ymm2, %ymm3 # encoding: [0xc5,0xfc,0x28,0xda]
+; X64-AVX-NEXT:    vgatherdps %ymm3, (%rdi,%ymm1,4), %ymm0 # encoding: [0xc4,0xe2,0x65,0x92,0x04,0x8f]
+; X64-AVX-NEXT:    vmovups %ymm2, (%rsi) # encoding: [0xc5,0xfc,0x11,0x16]
+; X64-AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_gather_mask:
-; X64-AVX512VL:       ## %bb.0:
-; X64-AVX512VL-NEXT:    vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
-; X64-AVX512VL-NEXT:    vgatherdps %ymm3, (%rdi,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x8f]
-; X64-AVX512VL-NEXT:    vmovups %ymm2, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x16]
-; X64-AVX512VL-NEXT:    retq ## encoding: [0xc3]
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovaps %ymm2, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
+; X64-AVX512VL-NEXT:    vgatherdps %ymm3, (%rdi,%ymm1,4), %ymm0 # encoding: [0xc4,0xe2,0x65,0x92,0x04,0x8f]
+; X64-AVX512VL-NEXT:    vmovups %ymm2, (%rsi) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x16]
+; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %a_i8 = bitcast float* %a to i8*
   %res = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0,
                            i8* %a_i8, <8 x i32> %idx, <8 x float> %mask, i8 4) ;
diff --git a/test/CodeGen/X86/avx2-logic.ll b/test/CodeGen/X86/avx2-logic.ll
index 8f2207f..f7b62ea 100644
--- a/test/CodeGen/X86/avx2-logic.ll
+++ b/test/CodeGen/X86/avx2-logic.ll
@@ -91,14 +91,12 @@
 ; X32-LABEL: vpblendvb:
 ; X32:       # %bb.0:
 ; X32-NEXT:    vpsllw $7, %ymm0, %ymm0
-; X32-NEXT:    vpand {{\.LCPI.*}}, %ymm0, %ymm0
 ; X32-NEXT:    vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: vpblendvb:
 ; X64:       # %bb.0:
 ; X64-NEXT:    vpsllw $7, %ymm0, %ymm0
-; X64-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
 ; X64-NEXT:    vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
 ; X64-NEXT:    retq
   %min = select <32 x i1> %cond, <32 x i8> %x, <32 x i8> %y
diff --git a/test/CodeGen/X86/avx2-schedule.ll b/test/CodeGen/X86/avx2-schedule.ll
index 2901827..00c4fcb 100644
--- a/test/CodeGen/X86/avx2-schedule.ll
+++ b/test/CodeGen/X86/avx2-schedule.ll
@@ -1129,12 +1129,12 @@
 ; ZNVER1-NEXT:    vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a0, <32 x i8> %a1)
+  %1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
   %2 = load <32 x i8>, <32 x i8> *%a2, align 32
-  %3 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %1, <32 x i8> %2)
+  %3 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %1, <32 x i8> %2)
   ret <32 x i8> %3
 }
-declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <16 x i16> @test_paddsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
 ; GENERIC-LABEL: test_paddsw:
@@ -1172,12 +1172,12 @@
 ; ZNVER1-NEXT:    vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a0, <16 x i16> %a1)
+  %1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
   %2 = load <16 x i16>, <16 x i16> *%a2, align 32
-  %3 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %1, <16 x i16> %2)
+  %3 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %1, <16 x i16> %2)
   ret <16 x i16> %3
 }
-declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
 
 define <32 x i8> @test_paddusb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
 ; GENERIC-LABEL: test_paddusb:
@@ -1215,12 +1215,12 @@
 ; ZNVER1-NEXT:    vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8> %a0, <32 x i8> %a1)
+  %1 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
   %2 = load <32 x i8>, <32 x i8> *%a2, align 32
-  %3 = call <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8> %1, <32 x i8> %2)
+  %3 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %1, <32 x i8> %2)
   ret <32 x i8> %3
 }
-declare <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <16 x i16> @test_paddusw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
 ; GENERIC-LABEL: test_paddusw:
@@ -1258,12 +1258,12 @@
 ; ZNVER1-NEXT:    vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16> %a0, <16 x i16> %a1)
+  %1 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
   %2 = load <16 x i16>, <16 x i16> *%a2, align 32
-  %3 = call <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16> %1, <16 x i16> %2)
+  %3 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %1, <16 x i16> %2)
   ret <16 x i16> %3
 }
-declare <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
 
 define <16 x i16> @test_paddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
 ; GENERIC-LABEL: test_paddw:
@@ -6505,12 +6505,12 @@
 ; ZNVER1-NEXT:    vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a0, <32 x i8> %a1)
+  %1 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
   %2 = load <32 x i8>, <32 x i8> *%a2, align 32
-  %3 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %1, <32 x i8> %2)
+  %3 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %1, <32 x i8> %2)
   ret <32 x i8> %3
 }
-declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <16 x i16> @test_psubsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
 ; GENERIC-LABEL: test_psubsw:
@@ -6548,12 +6548,12 @@
 ; ZNVER1-NEXT:    vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a0, <16 x i16> %a1)
+  %1 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
   %2 = load <16 x i16>, <16 x i16> *%a2, align 32
-  %3 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %1, <16 x i16> %2)
+  %3 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %1, <16 x i16> %2)
   ret <16 x i16> %3
 }
-declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
 
 define <32 x i8> @test_psubusb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
 ; GENERIC-LABEL: test_psubusb:
@@ -6591,12 +6591,12 @@
 ; ZNVER1-NEXT:    vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %a0, <32 x i8> %a1)
+  %1 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
   %2 = load <32 x i8>, <32 x i8> *%a2, align 32
-  %3 = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %1, <32 x i8> %2)
+  %3 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %1, <32 x i8> %2)
   ret <32 x i8> %3
 }
-declare <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <16 x i16> @test_psubusw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
 ; GENERIC-LABEL: test_psubusw:
@@ -6634,12 +6634,12 @@
 ; ZNVER1-NEXT:    vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %a0, <16 x i16> %a1)
+  %1 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
   %2 = load <16 x i16>, <16 x i16> *%a2, align 32
-  %3 = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %1, <16 x i16> %2)
+  %3 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %1, <16 x i16> %2)
   ret <16 x i16> %3
 }
-declare <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
 
 define <16 x i16> @test_psubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
 ; GENERIC-LABEL: test_psubw:
diff --git a/test/CodeGen/X86/avx512-ext.ll b/test/CodeGen/X86/avx512-ext.ll
index 2381180..072e3c8 100644
--- a/test/CodeGen/X86/avx512-ext.ll
+++ b/test/CodeGen/X86/avx512-ext.ll
@@ -1644,33 +1644,12 @@
 }
 
 define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
-; KNL-LABEL: trunc_16i8_to_16i1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; KNL-NEXT:    vpslld $31, %zmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    # kill: def $ax killed $ax killed $eax
-; KNL-NEXT:    vzeroupper
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: trunc_16i8_to_16i1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0
-; SKX-NEXT:    vpmovb2m %xmm0, %k0
-; SKX-NEXT:    kmovd %k0, %eax
-; SKX-NEXT:    # kill: def $ax killed $ax killed $eax
-; SKX-NEXT:    retq
-;
-; AVX512DQNOBW-LABEL: trunc_16i8_to_16i1:
-; AVX512DQNOBW:       # %bb.0:
-; AVX512DQNOBW-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQNOBW-NEXT:    vpslld $31, %zmm0, %zmm0
-; AVX512DQNOBW-NEXT:    vpmovd2m %zmm0, %k0
-; AVX512DQNOBW-NEXT:    kmovw %k0, %eax
-; AVX512DQNOBW-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX512DQNOBW-NEXT:    vzeroupper
-; AVX512DQNOBW-NEXT:    retq
+; ALL-LABEL: trunc_16i8_to_16i1:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpsllw $7, %xmm0, %xmm0
+; ALL-NEXT:    vpmovmskb %xmm0, %eax
+; ALL-NEXT:    # kill: def $ax killed $ax killed $eax
+; ALL-NEXT:    retq
   %mask_b = trunc <16 x i8>%a to <16 x i1>
   %mask = bitcast <16 x i1> %mask_b to i16
   ret i16 %mask
diff --git a/test/CodeGen/X86/avx512-extract-subvector-load-store.ll b/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
index e38ee56..df80e08 100644
--- a/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
+++ b/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
@@ -15,8 +15,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_4_v2i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 4(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 4(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm2, %xmm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastq %xmm2, %xmm2
@@ -43,8 +42,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_7_v2i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 6(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 6(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm2, %xmm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
@@ -71,8 +69,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v16i1_broadcast_8_v2i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 8(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 8(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm2, %xmm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastq %xmm2, %xmm2
@@ -99,8 +96,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v16i1_broadcast_8_v4i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 8(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 8(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %xmm2, %xmm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastd %xmm2, %xmm2
@@ -127,8 +123,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v16i1_broadcast_15_v2i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 14(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 14(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm2, %xmm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
@@ -155,8 +150,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v16i1_broadcast_15_v4i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 12(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 12(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %xmm2, %xmm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
@@ -183,8 +177,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v2i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 16(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 16(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm2, %xmm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastq %xmm2, %xmm2
@@ -211,8 +204,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v4i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 16(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 16(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %xmm2, %xmm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastd %xmm2, %xmm2
@@ -240,8 +232,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v8i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 16(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 16(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %ymm2, %ymm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastd %xmm2, %ymm2
@@ -269,8 +260,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v2i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 30(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 30(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm2, %xmm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
@@ -297,8 +287,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v4i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 28(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 28(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %xmm2, %xmm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
@@ -327,8 +316,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v8i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 24(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 24(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %ymm2, %ymm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7]
@@ -357,8 +345,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v2i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 32(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 32(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm2, %xmm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastq %xmm2, %xmm2
@@ -385,8 +372,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v4i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 32(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 32(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %xmm2, %xmm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastd %xmm2, %xmm2
@@ -414,8 +400,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v8i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 32(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 32(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %ymm2, %ymm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastd %xmm2, %ymm2
@@ -471,8 +456,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v2i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 62(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 62(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm2, %xmm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
@@ -499,8 +483,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v4i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 60(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 60(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %xmm2, %xmm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
@@ -529,8 +512,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v8i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 56(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 56(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %ymm2, %ymm2 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7]
@@ -690,8 +672,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_4_v2i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 4(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 4(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastq %xmm0, %xmm0
@@ -733,8 +714,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_7_v2i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 6(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 6(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
@@ -776,8 +756,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v16i1_broadcast_8_v2i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 8(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 8(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastq %xmm0, %xmm0
@@ -802,8 +781,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v16i1_broadcast_8_v4i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 8(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 8(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastd %xmm0, %xmm0
@@ -845,8 +823,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v16i1_broadcast_15_v2i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 14(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 14(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
@@ -871,8 +848,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v16i1_broadcast_15_v4i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 12(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 12(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
@@ -914,8 +890,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v2i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 16(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 16(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastq %xmm0, %xmm0
@@ -940,8 +915,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v4i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 16(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 16(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastd %xmm0, %xmm0
@@ -967,8 +941,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v8i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 16(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 16(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastd %xmm0, %ymm0
@@ -1011,8 +984,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v2i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 30(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 30(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
@@ -1037,8 +1009,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v4i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 28(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 28(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
@@ -1065,8 +1036,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v8i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 24(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 24(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [7,7,7,7,7,7,7,7]
@@ -1110,8 +1080,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v2i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 32(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 32(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastq %xmm0, %xmm0
@@ -1136,8 +1105,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v4i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 32(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 32(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastd %xmm0, %xmm0
@@ -1163,8 +1131,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v8i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 32(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 32(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastd %xmm0, %ymm0
@@ -1232,8 +1199,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v2i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 62(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 62(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
@@ -1258,8 +1224,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v4i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 60(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 60(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
@@ -1286,8 +1251,7 @@
 ;
 ; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v8i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    movzbl 56(%rdi), %eax
-; AVX512NOTDQ-NEXT:    kmovd %eax, %k1
+; AVX512NOTDQ-NEXT:    kmovw 56(%rdi), %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; AVX512NOTDQ-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
 ; AVX512NOTDQ-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [7,7,7,7,7,7,7,7]
diff --git a/test/CodeGen/X86/avx512-gather-scatter-intrin-deprecated.ll b/test/CodeGen/X86/avx512-gather-scatter-intrin-deprecated.ll
new file mode 100644
index 0000000..9502ec9
--- /dev/null
+++ b/test/CodeGen/X86/avx512-gather-scatter-intrin-deprecated.ll
@@ -0,0 +1,875 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq | FileCheck %s
+
+declare <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float>, i8*, <16 x i32>, i16, i32)
+declare void @llvm.x86.avx512.scatter.dps.512 (i8*, i16, <16 x i32>, <16 x float>, i32)
+declare <8 x double> @llvm.x86.avx512.gather.dpd.512 (<8 x double>, i8*, <8 x i32>, i8, i32)
+declare void @llvm.x86.avx512.scatter.dpd.512 (i8*, i8, <8 x i32>, <8 x double>, i32)
+
+declare <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x float>, i8*, <8 x i64>, i8, i32)
+declare void @llvm.x86.avx512.scatter.qps.512 (i8*, i8, <8 x i64>, <8 x float>, i32)
+declare <8 x double> @llvm.x86.avx512.gather.qpd.512 (<8 x double>, i8*, <8 x i64>, i8, i32)
+declare void @llvm.x86.avx512.scatter.qpd.512 (i8*, i8, <8 x i64>, <8 x double>, i32)
+
+define void @gather_mask_dps(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base, i8* %stbuf)  {
+; CHECK-LABEL: gather_mask_dps:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    kmovq %k1, %k2
+; CHECK-NEXT:    vgatherdps (%rsi,%zmm0,4), %zmm1 {%k2}
+; CHECK-NEXT:    vpaddd {{.*}}(%rip), %zmm0, %zmm0
+; CHECK-NEXT:    vscatterdps %zmm1, (%rdx,%zmm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 %mask, i32 4)
+  %ind2 = add <16 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+  call void @llvm.x86.avx512.scatter.dps.512 (i8* %stbuf, i16 %mask, <16 x i32>%ind2, <16 x float> %x, i32 4)
+  ret void
+}
+
+define void @gather_mask_dpd(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf)  {
+; CHECK-LABEL: gather_mask_dpd:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    kmovq %k1, %k2
+; CHECK-NEXT:    vgatherdpd (%rsi,%ymm0,4), %zmm1 {%k2}
+; CHECK-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
+; CHECK-NEXT:    vscatterdpd %zmm1, (%rdx,%ymm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = call <8 x double> @llvm.x86.avx512.gather.dpd.512 (<8 x double> %src, i8* %base, <8 x i32>%ind, i8 %mask, i32 4)
+  %ind2 = add <8 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+  call void @llvm.x86.avx512.scatter.dpd.512 (i8* %stbuf, i8 %mask, <8 x i32>%ind2, <8 x double> %x, i32 4)
+  ret void
+}
+
+define void @gather_mask_qps(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base, i8* %stbuf)  {
+; CHECK-LABEL: gather_mask_qps:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    kmovq %k1, %k2
+; CHECK-NEXT:    vgatherqps (%rsi,%zmm0,4), %ymm1 {%k2}
+; CHECK-NEXT:    vpaddq {{.*}}(%rip), %zmm0, %zmm0
+; CHECK-NEXT:    vscatterqps %ymm1, (%rdx,%zmm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = call <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x float> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
+  %ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
+  call void @llvm.x86.avx512.scatter.qps.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x float> %x, i32 4)
+  ret void
+}
+
+define void @gather_mask_qpd(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf)  {
+; CHECK-LABEL: gather_mask_qpd:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    kmovq %k1, %k2
+; CHECK-NEXT:    vgatherqpd (%rsi,%zmm0,4), %zmm1 {%k2}
+; CHECK-NEXT:    vpaddq {{.*}}(%rip), %zmm0, %zmm0
+; CHECK-NEXT:    vscatterqpd %zmm1, (%rdx,%zmm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = call <8 x double> @llvm.x86.avx512.gather.qpd.512 (<8 x double> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
+  %ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
+  call void @llvm.x86.avx512.scatter.qpd.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x double> %x, i32 4)
+  ret void
+}
+;;
+;; Integer Gather/Scatter
+;;
+declare <16 x i32> @llvm.x86.avx512.gather.dpi.512 (<16 x i32>, i8*, <16 x i32>, i16, i32)
+declare void @llvm.x86.avx512.scatter.dpi.512 (i8*, i16, <16 x i32>, <16 x i32>, i32)
+declare <8 x i64> @llvm.x86.avx512.gather.dpq.512 (<8 x i64>, i8*, <8 x i32>, i8, i32)
+declare void @llvm.x86.avx512.scatter.dpq.512 (i8*, i8, <8 x i32>, <8 x i64>, i32)
+
+declare <8 x i32> @llvm.x86.avx512.gather.qpi.512 (<8 x i32>, i8*, <8 x i64>, i8, i32)
+declare void @llvm.x86.avx512.scatter.qpi.512 (i8*, i8, <8 x i64>, <8 x i32>, i32)
+declare <8 x i64> @llvm.x86.avx512.gather.qpq.512 (<8 x i64>, i8*, <8 x i64>, i8, i32)
+declare void @llvm.x86.avx512.scatter.qpq.512 (i8*, i8, <8 x i64>, <8 x i64>, i32)
+
+define void @gather_mask_dd(<16 x i32> %ind, <16 x i32> %src, i16 %mask, i8* %base, i8* %stbuf)  {
+; CHECK-LABEL: gather_mask_dd:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    kmovq %k1, %k2
+; CHECK-NEXT:    vpgatherdd (%rsi,%zmm0,4), %zmm1 {%k2}
+; CHECK-NEXT:    vpaddd {{.*}}(%rip), %zmm0, %zmm0
+; CHECK-NEXT:    vpscatterdd %zmm1, (%rdx,%zmm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = call <16 x i32> @llvm.x86.avx512.gather.dpi.512 (<16 x i32> %src, i8* %base, <16 x i32>%ind, i16 %mask, i32 4)
+  %ind2 = add <16 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+  call void @llvm.x86.avx512.scatter.dpi.512 (i8* %stbuf, i16 %mask, <16 x i32>%ind2, <16 x i32> %x, i32 4)
+  ret void
+}
+
+define void @gather_mask_qd(<8 x i64> %ind, <8 x i32> %src, i8 %mask, i8* %base, i8* %stbuf)  {
+; CHECK-LABEL: gather_mask_qd:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    kmovq %k1, %k2
+; CHECK-NEXT:    vpgatherqd (%rsi,%zmm0,4), %ymm1 {%k2}
+; CHECK-NEXT:    vpaddq {{.*}}(%rip), %zmm0, %zmm0
+; CHECK-NEXT:    vpscatterqd %ymm1, (%rdx,%zmm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = call <8 x i32> @llvm.x86.avx512.gather.qpi.512 (<8 x i32> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
+  %ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
+  call void @llvm.x86.avx512.scatter.qpi.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x i32> %x, i32 4)
+  ret void
+}
+
+define void @gather_mask_qq(<8 x i64> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf)  {
+; CHECK-LABEL: gather_mask_qq:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    kmovq %k1, %k2
+; CHECK-NEXT:    vpgatherqq (%rsi,%zmm0,4), %zmm1 {%k2}
+; CHECK-NEXT:    vpaddq {{.*}}(%rip), %zmm0, %zmm0
+; CHECK-NEXT:    vpscatterqq %zmm1, (%rdx,%zmm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = call <8 x i64> @llvm.x86.avx512.gather.qpq.512 (<8 x i64> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
+  %ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
+  call void @llvm.x86.avx512.scatter.qpq.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x i64> %x, i32 4)
+  ret void
+}
+
+define void @gather_mask_dq(<8 x i32> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf)  {
+; CHECK-LABEL: gather_mask_dq:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    kmovq %k1, %k2
+; CHECK-NEXT:    vpgatherdq (%rsi,%ymm0,4), %zmm1 {%k2}
+; CHECK-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
+; CHECK-NEXT:    vpscatterdq %zmm1, (%rdx,%ymm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = call <8 x i64> @llvm.x86.avx512.gather.dpq.512 (<8 x i64> %src, i8* %base, <8 x i32>%ind, i8 %mask, i32 4)
+  %ind2 = add <8 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+  call void @llvm.x86.avx512.scatter.dpq.512 (i8* %stbuf, i8 %mask, <8 x i32>%ind2, <8 x i64> %x, i32 4)
+  ret void
+}
+
+define void @gather_mask_dpd_execdomain(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf)  {
+; CHECK-LABEL: gather_mask_dpd_execdomain:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    vgatherdpd (%rsi,%ymm0,4), %zmm1 {%k1}
+; CHECK-NEXT:    vmovapd %zmm1, (%rdx)
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = call <8 x double> @llvm.x86.avx512.gather.dpd.512 (<8 x double> %src, i8* %base, <8 x i32>%ind, i8 %mask, i32 4)
+  store <8 x double> %x, <8 x double>* %stbuf
+  ret void
+}
+
+define void @gather_mask_qpd_execdomain(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf)  {
+; CHECK-LABEL: gather_mask_qpd_execdomain:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    vgatherqpd (%rsi,%zmm0,4), %zmm1 {%k1}
+; CHECK-NEXT:    vmovapd %zmm1, (%rdx)
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = call <8 x double> @llvm.x86.avx512.gather.qpd.512 (<8 x double> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
+  store <8 x double> %x, <8 x double>* %stbuf
+  ret void
+}
+
+define <16 x float> @gather_mask_dps_execdomain(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base)  {
+; CHECK-LABEL: gather_mask_dps_execdomain:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    vgatherdps (%rsi,%zmm0,4), %zmm1 {%k1}
+; CHECK-NEXT:    vmovaps %zmm1, %zmm0
+; CHECK-NEXT:    retq
+  %res = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 %mask, i32 4)
+  ret <16 x float> %res;
+}
+
+define <8 x float> @gather_mask_qps_execdomain(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base)  {
+; CHECK-LABEL: gather_mask_qps_execdomain:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    vgatherqps (%rsi,%zmm0,4), %ymm1 {%k1}
+; CHECK-NEXT:    vmovaps %ymm1, %ymm0
+; CHECK-NEXT:    retq
+  %res = call <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x float> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
+  ret <8 x float> %res;
+}
+
+define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf)  {
+; CHECK-LABEL: scatter_mask_dpd_execdomain:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vmovapd (%rdi), %zmm1
+; CHECK-NEXT:    vscatterdpd %zmm1, (%rcx,%ymm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = load <8 x double>, <8 x double>* %src, align 64
+  call void @llvm.x86.avx512.scatter.dpd.512 (i8* %stbuf, i8 %mask, <8 x i32>%ind, <8 x double> %x, i32 4)
+  ret void
+}
+
+define void @scatter_mask_qpd_execdomain(<8 x i64> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf)  {
+; CHECK-LABEL: scatter_mask_qpd_execdomain:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vmovapd (%rdi), %zmm1
+; CHECK-NEXT:    vscatterqpd %zmm1, (%rcx,%zmm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = load <8 x double>, <8 x double>* %src, align 64
+  call void @llvm.x86.avx512.scatter.qpd.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind, <8 x double> %x, i32 4)
+  ret void
+}
+
+define void @scatter_mask_dps_execdomain(<16 x i32> %ind, <16 x float>* %src, i16 %mask, i8* %base, i8* %stbuf)  {
+; CHECK-LABEL: scatter_mask_dps_execdomain:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vmovaps (%rdi), %zmm1
+; CHECK-NEXT:    vscatterdps %zmm1, (%rcx,%zmm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = load <16 x float>, <16 x float>* %src, align 64
+  call void @llvm.x86.avx512.scatter.dps.512 (i8* %stbuf, i16 %mask, <16 x i32>%ind, <16 x float> %x, i32 4)
+  ret void
+}
+
+define void @scatter_mask_qps_execdomain(<8 x i64> %ind, <8 x float>* %src, i8 %mask, i8* %base, i8* %stbuf)  {
+; CHECK-LABEL: scatter_mask_qps_execdomain:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vmovaps (%rdi), %ymm1
+; CHECK-NEXT:    vscatterqps %ymm1, (%rcx,%zmm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = load <8 x float>, <8 x float>* %src, align 32
+  call void @llvm.x86.avx512.scatter.qps.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind, <8 x float> %x, i32 4)
+  ret void
+}
+
+define void @gather_qps(<8 x i64> %ind, <8 x float> %src, i8* %base, i8* %stbuf)  {
+; CHECK-LABEL: gather_qps:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    kxnorw %k0, %k0, %k2
+; CHECK-NEXT:    vgatherqps (%rdi,%zmm0,4), %ymm1 {%k2}
+; CHECK-NEXT:    vpaddq {{.*}}(%rip), %zmm0, %zmm0
+; CHECK-NEXT:    vscatterqps %ymm1, (%rsi,%zmm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %x = call <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x float> %src, i8* %base, <8 x i64>%ind, i8 -1, i32 4)
+  %ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
+  call void @llvm.x86.avx512.scatter.qps.512 (i8* %stbuf, i8 -1, <8 x i64>%ind2, <8 x float> %x, i32 4)
+  ret void
+}
+
+declare  void @llvm.x86.avx512.gatherpf.qps.512(i8, <8 x i64>, i8* , i32, i32);
+declare  void @llvm.x86.avx512.scatterpf.qps.512(i8, <8 x i64>, i8* , i32, i32);
+define void @prefetch(<8 x i64> %ind, i8* %base) {
+; CHECK-LABEL: prefetch:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vgatherpf0qps (%rdi,%zmm0,4) {%k1}
+; CHECK-NEXT:    kxorw %k0, %k0, %k1
+; CHECK-NEXT:    vgatherpf1qps (%rdi,%zmm0,4) {%k1}
+; CHECK-NEXT:    movb $1, %al
+; CHECK-NEXT:    kmovd %eax, %k1
+; CHECK-NEXT:    vscatterpf0qps (%rdi,%zmm0,2) {%k1}
+; CHECK-NEXT:    movb $120, %al
+; CHECK-NEXT:    kmovd %eax, %k1
+; CHECK-NEXT:    vscatterpf1qps (%rdi,%zmm0,2) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.gatherpf.qps.512(i8 -1, <8 x i64> %ind, i8* %base, i32 4, i32 3)
+  call void @llvm.x86.avx512.gatherpf.qps.512(i8 0, <8 x i64> %ind, i8* %base, i32 4, i32 2)
+  call void @llvm.x86.avx512.scatterpf.qps.512(i8 1, <8 x i64> %ind, i8* %base, i32 2, i32 3)
+  call void @llvm.x86.avx512.scatterpf.qps.512(i8 120, <8 x i64> %ind, i8* %base, i32 2, i32 2)
+  ret void
+}
+
+declare <2 x double> @llvm.x86.avx512.gather3div2.df(<2 x double>, i8*, <2 x i64>, i8, i32)
+
+define <2 x double>@test_int_x86_avx512_gather3div2_df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3div2_df:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vgatherqpd (%rdi,%xmm1,4), %xmm0 {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vgatherqpd (%rdi,%xmm1,2), %xmm2 {%k1}
+; CHECK-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <2 x double> @llvm.x86.avx512.gather3div2.df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, i8 %x3, i32 4)
+  %res1 = call <2 x double> @llvm.x86.avx512.gather3div2.df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, i8 -1, i32 2)
+  %res2 = fadd <2 x double> %res, %res1
+  ret <2 x double> %res2
+}
+
+declare <2 x i64> @llvm.x86.avx512.gather3div2.di(<2 x i64>, i8*, <2 x i64>, i8, i32)
+
+define <2 x i64>@test_int_x86_avx512_gather3div2_di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3div2_di:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vpgatherqq (%rdi,%xmm1,8), %xmm0 {%k1}
+; CHECK-NEXT:    vpaddq %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <2 x i64> @llvm.x86.avx512.gather3div2.di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, i8 %x3, i32 8)
+  %res1 = call <2 x i64> @llvm.x86.avx512.gather3div2.di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, i8 %x3, i32 8)
+  %res2 = add <2 x i64> %res, %res1
+  ret <2 x i64> %res2
+}
+
+declare <4 x double> @llvm.x86.avx512.gather3div4.df(<4 x double>, i8*, <4 x i64>, i8, i32)
+
+define <4 x double>@test_int_x86_avx512_gather3div4_df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3div4_df:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vgatherqpd (%rdi,%ymm1,4), %ymm0 {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vgatherqpd (%rdi,%ymm1,2), %ymm2 {%k1}
+; CHECK-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
+  %res = call <4 x double> @llvm.x86.avx512.gather3div4.df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 4)
+  %res1 = call <4 x double> @llvm.x86.avx512.gather3div4.df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, i8 -1, i32 2)
+  %res2 = fadd <4 x double> %res, %res1
+  ret <4 x double> %res2
+}
+
+declare <4 x i64> @llvm.x86.avx512.gather3div4.di(<4 x i64>, i8*, <4 x i64>, i8, i32)
+
+define <4 x i64>@test_int_x86_avx512_gather3div4_di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3div4_di:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vpgatherqq (%rdi,%ymm1,8), %ymm0 {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vpgatherqq (%rdi,%ymm1,8), %ymm2 {%k1}
+; CHECK-NEXT:    vpaddq %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
+  %res = call <4 x i64> @llvm.x86.avx512.gather3div4.di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 8)
+  %res1 = call <4 x i64> @llvm.x86.avx512.gather3div4.di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, i8 -1, i32 8)
+  %res2 = add <4 x i64> %res, %res1
+  ret <4 x i64> %res2
+}
+
+declare <4 x float> @llvm.x86.avx512.gather3div4.sf(<4 x float>, i8*, <2 x i64>, i8, i32)
+
+define <4 x float>@test_int_x86_avx512_gather3div4_sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3div4_sf:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vgatherqps (%rdi,%xmm1,4), %xmm0 {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vgatherqps (%rdi,%xmm1,2), %xmm2 {%k1}
+; CHECK-NEXT:    vaddps %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x float> @llvm.x86.avx512.gather3div4.sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, i8 %x3, i32 4)
+  %res1 = call <4 x float> @llvm.x86.avx512.gather3div4.sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, i8 -1, i32 2)
+  %res2 = fadd <4 x float> %res, %res1
+  ret <4 x float> %res2
+}
+
+declare <4 x i32> @llvm.x86.avx512.gather3div4.si(<4 x i32>, i8*, <2 x i64>, i8, i32)
+
+define <4 x i32>@test_int_x86_avx512_gather3div4_si(<4 x i32> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3div4_si:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    kxnorw %k0, %k0, %k2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vpgatherqd (%rdi,%xmm1,4), %xmm2 {%k2}
+; CHECK-NEXT:    vpgatherqd (%rdi,%xmm1,4), %xmm0 {%k1}
+; CHECK-NEXT:    vpaddd %xmm0, %xmm2, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x i32> @llvm.x86.avx512.gather3div4.si(<4 x i32> %x0, i8* %x1, <2 x i64> %x2, i8 -1, i32 4)
+  %res1 = call <4 x i32> @llvm.x86.avx512.gather3div4.si(<4 x i32> %x0, i8* %x1, <2 x i64> %x2, i8 %x3, i32 4)
+  %res2 = add <4 x i32> %res, %res1
+  ret <4 x i32> %res2
+}
+
+declare <4 x float> @llvm.x86.avx512.gather3div8.sf(<4 x float>, i8*, <4 x i64>, i8, i32)
+
+define <4 x float>@test_int_x86_avx512_gather3div8_sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3div8_sf:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vgatherqps (%rdi,%ymm1,4), %xmm0 {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vgatherqps (%rdi,%ymm1,2), %xmm2 {%k1}
+; CHECK-NEXT:    vaddps %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %res = call <4 x float> @llvm.x86.avx512.gather3div8.sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 4)
+  %res1 = call <4 x float> @llvm.x86.avx512.gather3div8.sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, i8 -1, i32 2)
+  %res2 = fadd <4 x float> %res, %res1
+  ret <4 x float> %res2
+}
+
+declare <4 x i32> @llvm.x86.avx512.gather3div8.si(<4 x i32>, i8*, <4 x i64>, i8, i32)
+
+define <4 x i32>@test_int_x86_avx512_gather3div8_si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3div8_si:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vmovdqa %xmm0, %xmm2
+; CHECK-NEXT:    kmovq %k1, %k2
+; CHECK-NEXT:    vpgatherqd (%rdi,%ymm1,4), %xmm2 {%k2}
+; CHECK-NEXT:    vpgatherqd (%rdi,%ymm1,2), %xmm0 {%k1}
+; CHECK-NEXT:    vpaddd %xmm0, %xmm2, %xmm0
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %res = call <4 x i32> @llvm.x86.avx512.gather3div8.si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 4)
+  %res1 = call <4 x i32> @llvm.x86.avx512.gather3div8.si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 2)
+  %res2 = add <4 x i32> %res, %res1
+  ret <4 x i32> %res2
+}
+
+declare <2 x double> @llvm.x86.avx512.gather3siv2.df(<2 x double>, i8*, <4 x i32>, i8, i32)
+
+define <2 x double>@test_int_x86_avx512_gather3siv2_df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3siv2_df:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vgatherdpd (%rdi,%xmm1,4), %xmm0 {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vgatherdpd (%rdi,%xmm1,2), %xmm2 {%k1}
+; CHECK-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <2 x double> @llvm.x86.avx512.gather3siv2.df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 4)
+  %res1 = call <2 x double> @llvm.x86.avx512.gather3siv2.df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, i8 -1, i32 2)
+  %res2 = fadd <2 x double> %res, %res1
+  ret <2 x double> %res2
+}
+
+declare <2 x i64> @llvm.x86.avx512.gather3siv2.di(<2 x i64>, i8*, <4 x i32>, i8, i32)
+
+define <2 x i64>@test_int_x86_avx512_gather3siv2_di(<2 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3siv2_di:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vpgatherdq (%rdi,%xmm1,8), %xmm0 {%k1}
+; CHECK-NEXT:    vpaddq %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <2 x i64> @llvm.x86.avx512.gather3siv2.di(<2 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 8)
+  %res1 = call <2 x i64> @llvm.x86.avx512.gather3siv2.di(<2 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 8)
+  %res2 = add <2 x i64> %res, %res1
+  ret <2 x i64> %res2
+}
+
+declare <4 x double> @llvm.x86.avx512.gather3siv4.df(<4 x double>, i8*, <4 x i32>, i8, i32)
+
+define <4 x double>@test_int_x86_avx512_gather3siv4_df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3siv4_df:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vgatherdpd (%rdi,%xmm1,4), %ymm0 {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vgatherdpd (%rdi,%xmm1,2), %ymm2 {%k1}
+; CHECK-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
+  %res = call <4 x double> @llvm.x86.avx512.gather3siv4.df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 4)
+  %res1 = call <4 x double> @llvm.x86.avx512.gather3siv4.df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, i8 -1, i32 2)
+  %res2 = fadd <4 x double> %res, %res1
+  ret <4 x double> %res2
+}
+
+declare <4 x i64> @llvm.x86.avx512.gather3siv4.di(<4 x i64>, i8*, <4 x i32>, i8, i32)
+
+define <4 x i64>@test_int_x86_avx512_gather3siv4_di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3siv4_di:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vpgatherdq (%rdi,%xmm1,8), %ymm0 {%k1}
+; CHECK-NEXT:    vpaddq %ymm0, %ymm0, %ymm0
+; CHECK-NEXT:    retq
+  %res = call <4 x i64> @llvm.x86.avx512.gather3siv4.di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 8)
+  %res1 = call <4 x i64> @llvm.x86.avx512.gather3siv4.di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 8)
+  %res2 = add <4 x i64> %res, %res1
+  ret <4 x i64> %res2
+}
+
+declare <4 x float> @llvm.x86.avx512.gather3siv4.sf(<4 x float>, i8*, <4 x i32>, i8, i32)
+
+define <4 x float>@test_int_x86_avx512_gather3siv4_sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3siv4_sf:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vgatherdps (%rdi,%xmm1,4), %xmm0 {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vgatherdps (%rdi,%xmm1,2), %xmm2 {%k1}
+; CHECK-NEXT:    vaddps %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x float> @llvm.x86.avx512.gather3siv4.sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 4)
+  %res1 = call <4 x float> @llvm.x86.avx512.gather3siv4.sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, i8 -1, i32 2)
+  %res2 = fadd <4 x float> %res, %res1
+  ret <4 x float> %res2
+}
+
+declare <4 x i32> @llvm.x86.avx512.gather3siv4.si(<4 x i32>, i8*, <4 x i32>, i8, i32)
+
+define <4 x i32>@test_int_x86_avx512_gather3siv4_si(<4 x i32> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3siv4_si:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    kxnorw %k0, %k0, %k2
+; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vpgatherdd (%rdi,%xmm1,4), %xmm2 {%k2}
+; CHECK-NEXT:    vpgatherdd (%rdi,%xmm1,2), %xmm0 {%k1}
+; CHECK-NEXT:    vpaddd %xmm0, %xmm2, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x i32> @llvm.x86.avx512.gather3siv4.si(<4 x i32> %x0, i8* %x1, <4 x i32> %x2, i8 -1, i32 4)
+  %res1 = call <4 x i32> @llvm.x86.avx512.gather3siv4.si(<4 x i32> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 2)
+  %res2 = add <4 x i32> %res, %res1
+  ret <4 x i32> %res2
+}
+
+declare <8 x float> @llvm.x86.avx512.gather3siv8.sf(<8 x float>, i8*, <8 x i32>, i8, i32)
+
+define <8 x float>@test_int_x86_avx512_gather3siv8_sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3siv8_sf:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vgatherdps (%rdi,%ymm1,4), %ymm0 {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vgatherdps (%rdi,%ymm1,2), %ymm2 {%k1}
+; CHECK-NEXT:    vaddps %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
+  %res = call <8 x float> @llvm.x86.avx512.gather3siv8.sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, i8 %x3, i32 4)
+  %res1 = call <8 x float> @llvm.x86.avx512.gather3siv8.sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, i8 -1, i32 2)
+  %res2 = fadd <8 x float> %res, %res1
+  ret <8 x float> %res2
+}
+
+declare <8 x i32> @llvm.x86.avx512.gather3siv8.si(<8 x i32>, i8*, <8 x i32>, i8, i32)
+
+define <8 x i32>@test_int_x86_avx512_gather3siv8_si(<8 x i32> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_gather3siv8_si:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vmovdqa %ymm0, %ymm2
+; CHECK-NEXT:    kmovq %k1, %k2
+; CHECK-NEXT:    vpgatherdd (%rdi,%ymm1,4), %ymm2 {%k2}
+; CHECK-NEXT:    vpgatherdd (%rdi,%ymm1,2), %ymm0 {%k1}
+; CHECK-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
+; CHECK-NEXT:    retq
+  %res = call <8 x i32> @llvm.x86.avx512.gather3siv8.si(<8 x i32> %x0, i8* %x1, <8 x i32> %x2, i8 %x3, i32 4)
+  %res1 = call <8 x i32> @llvm.x86.avx512.gather3siv8.si(<8 x i32> %x0, i8* %x1, <8 x i32> %x2, i8 %x3, i32 2)
+  %res2 = add <8 x i32> %res, %res1
+  ret <8 x i32> %res2
+}
+
+declare void @llvm.x86.avx512.scatterdiv2.df(i8*, i8, <2 x i64>, <2 x double>, i32)
+
+define void@test_int_x86_avx512_scatterdiv2_df(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x double> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scatterdiv2_df:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    kxnorw %k0, %k0, %k2
+; CHECK-NEXT:    vscatterqpd %xmm1, (%rdi,%xmm0,2) {%k2}
+; CHECK-NEXT:    vscatterqpd %xmm1, (%rdi,%xmm0,4) {%k1}
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scatterdiv2.df(i8* %x0, i8 -1, <2 x i64> %x2, <2 x double> %x3, i32 2)
+  call void @llvm.x86.avx512.scatterdiv2.df(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x double> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scatterdiv2.di(i8*, i8, <2 x i64>, <2 x i64>, i32)
+
+define void@test_int_x86_avx512_scatterdiv2_di(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x i64> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scatterdiv2_di:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vpscatterqq %xmm1, (%rdi,%xmm0,2) {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vpscatterqq %xmm1, (%rdi,%xmm0,4) {%k1}
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scatterdiv2.di(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x i64> %x3, i32 2)
+  call void @llvm.x86.avx512.scatterdiv2.di(i8* %x0, i8 -1, <2 x i64> %x2, <2 x i64> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scatterdiv4.df(i8*, i8, <4 x i64>, <4 x double>, i32)
+
+define void@test_int_x86_avx512_scatterdiv4_df(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x double> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_df:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vscatterqpd %ymm1, (%rdi,%ymm0,2) {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vscatterqpd %ymm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scatterdiv4.df(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x double> %x3, i32 2)
+  call void @llvm.x86.avx512.scatterdiv4.df(i8* %x0, i8 -1, <4 x i64> %x2, <4 x double> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scatterdiv4.di(i8*, i8, <4 x i64>, <4 x i64>, i32)
+
+define void@test_int_x86_avx512_scatterdiv4_di(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i64> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_di:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vpscatterqq %ymm1, (%rdi,%ymm0,2) {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vpscatterqq %ymm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scatterdiv4.di(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i64> %x3, i32 2)
+  call void @llvm.x86.avx512.scatterdiv4.di(i8* %x0, i8 -1, <4 x i64> %x2, <4 x i64> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scatterdiv4.sf(i8*, i8, <2 x i64>, <4 x float>, i32)
+
+define void@test_int_x86_avx512_scatterdiv4_sf(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x float> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_sf:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vscatterqps %xmm1, (%rdi,%xmm0,2) {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vscatterqps %xmm1, (%rdi,%xmm0,4) {%k1}
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scatterdiv4.sf(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x float> %x3, i32 2)
+  call void @llvm.x86.avx512.scatterdiv4.sf(i8* %x0, i8 -1, <2 x i64> %x2, <4 x float> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scatterdiv4.si(i8*, i8, <2 x i64>, <4 x i32>, i32)
+
+define void@test_int_x86_avx512_scatterdiv4_si(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x i32> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_si:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    kxnorw %k0, %k0, %k2
+; CHECK-NEXT:    vpscatterqd %xmm1, (%rdi,%xmm0,2) {%k2}
+; CHECK-NEXT:    vpscatterqd %xmm1, (%rdi,%xmm0,4) {%k1}
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scatterdiv4.si(i8* %x0, i8 -1, <2 x i64> %x2, <4 x i32> %x3, i32 2)
+  call void @llvm.x86.avx512.scatterdiv4.si(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x i32> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scatterdiv8.sf(i8*, i8, <4 x i64>, <4 x float>, i32)
+
+define void@test_int_x86_avx512_scatterdiv8_sf(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x float> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scatterdiv8_sf:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vscatterqps %xmm1, (%rdi,%ymm0,2) {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vscatterqps %xmm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scatterdiv8.sf(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x float> %x3, i32 2)
+  call void @llvm.x86.avx512.scatterdiv8.sf(i8* %x0, i8 -1, <4 x i64> %x2, <4 x float> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scatterdiv8.si(i8*, i8, <4 x i64>, <4 x i32>, i32)
+
+define void@test_int_x86_avx512_scatterdiv8_si(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i32> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scatterdiv8_si:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vpscatterqd %xmm1, (%rdi,%ymm0,2) {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vpscatterqd %xmm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scatterdiv8.si(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i32> %x3, i32 2)
+  call void @llvm.x86.avx512.scatterdiv8.si(i8* %x0, i8 -1, <4 x i64> %x2, <4 x i32> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scattersiv2.df(i8*, i8, <4 x i32>, <2 x double>, i32)
+
+define void@test_int_x86_avx512_scattersiv2_df(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x double> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scattersiv2_df:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    kxnorw %k0, %k0, %k2
+; CHECK-NEXT:    vscatterdpd %xmm1, (%rdi,%xmm0,2) {%k2}
+; CHECK-NEXT:    vscatterdpd %xmm1, (%rdi,%xmm0,4) {%k1}
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scattersiv2.df(i8* %x0, i8 -1, <4 x i32> %x2, <2 x double> %x3, i32 2)
+  call void @llvm.x86.avx512.scattersiv2.df(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x double> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scattersiv2.di(i8*, i8, <4 x i32>, <2 x i64>, i32)
+
+define void@test_int_x86_avx512_scattersiv2_di(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x i64> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scattersiv2_di:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    kxnorw %k0, %k0, %k2
+; CHECK-NEXT:    vpscatterdq %xmm1, (%rdi,%xmm0,2) {%k2}
+; CHECK-NEXT:    vpscatterdq %xmm1, (%rdi,%xmm0,4) {%k1}
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scattersiv2.di(i8* %x0, i8 -1, <4 x i32> %x2, <2 x i64> %x3, i32 2)
+  call void @llvm.x86.avx512.scattersiv2.di(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x i64> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scattersiv4.df(i8*, i8, <4 x i32>, <4 x double>, i32)
+
+define void@test_int_x86_avx512_scattersiv4_df(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x double> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scattersiv4_df:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vscatterdpd %ymm1, (%rdi,%xmm0,2) {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vscatterdpd %ymm1, (%rdi,%xmm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scattersiv4.df(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x double> %x3, i32 2)
+  call void @llvm.x86.avx512.scattersiv4.df(i8* %x0, i8 -1, <4 x i32> %x2, <4 x double> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scattersiv4.di(i8*, i8, <4 x i32>, <4 x i64>, i32)
+
+define void@test_int_x86_avx512_scattersiv4_di(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i64> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scattersiv4_di:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    kxnorw %k0, %k0, %k2
+; CHECK-NEXT:    vpscatterdq %ymm1, (%rdi,%xmm0,2) {%k2}
+; CHECK-NEXT:    vpscatterdq %ymm1, (%rdi,%xmm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scattersiv4.di(i8* %x0, i8 -1, <4 x i32> %x2, <4 x i64> %x3, i32 2)
+  call void @llvm.x86.avx512.scattersiv4.di(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i64> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scattersiv4.sf(i8*, i8, <4 x i32>, <4 x float>, i32)
+
+define void@test_int_x86_avx512_scattersiv4_sf(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x float> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scattersiv4_sf:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vscatterdps %xmm1, (%rdi,%xmm0,2) {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vscatterdps %xmm1, (%rdi,%xmm0,4) {%k1}
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scattersiv4.sf(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x float> %x3, i32 2)
+  call void @llvm.x86.avx512.scattersiv4.sf(i8* %x0, i8 -1, <4 x i32> %x2, <4 x float> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scattersiv4.si(i8*, i8, <4 x i32>, <4 x i32>, i32)
+
+define void@test_int_x86_avx512_scattersiv4_si(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i32> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scattersiv4_si:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vpscatterdd %xmm1, (%rdi,%xmm0,2) {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vpscatterdd %xmm1, (%rdi,%xmm0,4) {%k1}
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scattersiv4.si(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i32> %x3, i32 2)
+  call void @llvm.x86.avx512.scattersiv4.si(i8* %x0, i8 -1, <4 x i32> %x2, <4 x i32> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scattersiv8.sf(i8*, i8, <8 x i32>, <8 x float>, i32)
+
+define void@test_int_x86_avx512_scattersiv8_sf(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x float> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scattersiv8_sf:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vscatterdps %ymm1, (%rdi,%ymm0,2) {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vscatterdps %ymm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scattersiv8.sf(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x float> %x3, i32 2)
+  call void @llvm.x86.avx512.scattersiv8.sf(i8* %x0, i8 -1, <8 x i32> %x2, <8 x float> %x3, i32 4)
+  ret void
+}
+
+declare void @llvm.x86.avx512.scattersiv8.si(i8*, i8, <8 x i32>, <8 x i32>, i32)
+
+define void@test_int_x86_avx512_scattersiv8_si(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x i32> %x3) {
+; CHECK-LABEL: test_int_x86_avx512_scattersiv8_si:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1
+; CHECK-NEXT:    vpscatterdd %ymm1, (%rdi,%ymm0,2) {%k1}
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vpscatterdd %ymm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x i32> %x3, i32 2)
+  call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 -1, <8 x i32> %x2, <8 x i32> %x3, i32 4)
+  ret void
+}
+
+define void @scatter_mask_test(i8* %x0, <8 x i32> %x2, <8 x i32> %x3) {
+; CHECK-LABEL: scatter_mask_test:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vpscatterdd %ymm1, (%rdi,%ymm0,2) {%k1}
+; CHECK-NEXT:    kxorw %k0, %k0, %k1
+; CHECK-NEXT:    vpscatterdd %ymm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT:    movb $1, %al
+; CHECK-NEXT:    kmovd %eax, %k1
+; CHECK-NEXT:    vpscatterdd %ymm1, (%rdi,%ymm0,2) {%k1}
+; CHECK-NEXT:    movb $96, %al
+; CHECK-NEXT:    kmovd %eax, %k1
+; CHECK-NEXT:    vpscatterdd %ymm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 -1, <8 x i32> %x2, <8 x i32> %x3, i32 2)
+  call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 0, <8 x i32> %x2, <8 x i32> %x3, i32 4)
+  call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 1, <8 x i32> %x2, <8 x i32> %x3, i32 2)
+  call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 96, <8 x i32> %x2, <8 x i32> %x3, i32 4)
+  ret void
+}
+
+define <16 x float> @gather_mask_test(<16 x i32> %ind, <16 x float> %src, i8* %base)  {
+; CHECK-LABEL: gather_mask_test:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm2 {%k1}
+; CHECK-NEXT:    kxorw %k0, %k0, %k1
+; CHECK-NEXT:    vmovaps %zmm1, %zmm3
+; CHECK-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm3 {%k1}
+; CHECK-NEXT:    vaddps %zmm3, %zmm2, %zmm2
+; CHECK-NEXT:    movw $1, %ax
+; CHECK-NEXT:    kmovd %eax, %k1
+; CHECK-NEXT:    vmovaps %zmm1, %zmm3
+; CHECK-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm3 {%k1}
+; CHECK-NEXT:    movw $220, %ax
+; CHECK-NEXT:    kmovd %eax, %k1
+; CHECK-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
+; CHECK-NEXT:    vaddps %zmm3, %zmm1, %zmm0
+; CHECK-NEXT:    vaddps %zmm2, %zmm0, %zmm0
+; CHECK-NEXT:    retq
+  %res = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 -1, i32 4)
+  %res1 = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 0, i32 4)
+  %res2 = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 1, i32 4)
+  %res3 = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 220, i32 4)
+
+  %res4 = fadd <16 x float> %res, %res1
+  %res5 = fadd <16 x float> %res3, %res2
+  %res6 = fadd <16 x float> %res5, %res4
+  ret <16 x float> %res6
+}
diff --git a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
index 9502ec9..b1c66ab 100644
--- a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
+++ b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
@@ -1,17 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq | FileCheck %s
 
-declare <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float>, i8*, <16 x i32>, i16, i32)
-declare void @llvm.x86.avx512.scatter.dps.512 (i8*, i16, <16 x i32>, <16 x float>, i32)
-declare <8 x double> @llvm.x86.avx512.gather.dpd.512 (<8 x double>, i8*, <8 x i32>, i8, i32)
-declare void @llvm.x86.avx512.scatter.dpd.512 (i8*, i8, <8 x i32>, <8 x double>, i32)
-
-declare <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x float>, i8*, <8 x i64>, i8, i32)
-declare void @llvm.x86.avx512.scatter.qps.512 (i8*, i8, <8 x i64>, <8 x float>, i32)
-declare <8 x double> @llvm.x86.avx512.gather.qpd.512 (<8 x double>, i8*, <8 x i64>, i8, i32)
-declare void @llvm.x86.avx512.scatter.qpd.512 (i8*, i8, <8 x i64>, <8 x double>, i32)
-
-define void @gather_mask_dps(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base, i8* %stbuf)  {
+define void @gather_mask_dps(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_dps:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
@@ -21,13 +11,14 @@
 ; CHECK-NEXT:    vscatterdps %zmm1, (%rdx,%zmm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 %mask, i32 4)
+  %1 = bitcast i16 %mask to <16 x i1>
+  %x = call <16 x float> @llvm.x86.avx512.mask.gather.dps.512(<16 x float> %src, i8* %base, <16 x i32> %ind, <16 x i1> %1, i32 4)
   %ind2 = add <16 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
-  call void @llvm.x86.avx512.scatter.dps.512 (i8* %stbuf, i16 %mask, <16 x i32>%ind2, <16 x float> %x, i32 4)
+  call void @llvm.x86.avx512.mask.scatter.dps.512(i8* %stbuf, <16 x i1> %1, <16 x i32> %ind2, <16 x float> %x, i32 4)
   ret void
 }
 
-define void @gather_mask_dpd(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf)  {
+define void @gather_mask_dpd(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_dpd:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
@@ -37,13 +28,14 @@
 ; CHECK-NEXT:    vscatterdpd %zmm1, (%rdx,%ymm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = call <8 x double> @llvm.x86.avx512.gather.dpd.512 (<8 x double> %src, i8* %base, <8 x i32>%ind, i8 %mask, i32 4)
+  %1 = bitcast i8 %mask to <8 x i1>
+  %x = call <8 x double> @llvm.x86.avx512.mask.gather.dpd.512(<8 x double> %src, i8* %base, <8 x i32> %ind, <8 x i1> %1, i32 4)
   %ind2 = add <8 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
-  call void @llvm.x86.avx512.scatter.dpd.512 (i8* %stbuf, i8 %mask, <8 x i32>%ind2, <8 x double> %x, i32 4)
+  call void @llvm.x86.avx512.mask.scatter.dpd.512(i8* %stbuf, <8 x i1> %1, <8 x i32> %ind2, <8 x double> %x, i32 4)
   ret void
 }
 
-define void @gather_mask_qps(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base, i8* %stbuf)  {
+define void @gather_mask_qps(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_qps:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
@@ -53,13 +45,14 @@
 ; CHECK-NEXT:    vscatterqps %ymm1, (%rdx,%zmm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = call <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x float> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
+  %1 = bitcast i8 %mask to <8 x i1>
+  %x = call <8 x float> @llvm.x86.avx512.mask.gather.qps.512(<8 x float> %src, i8* %base, <8 x i64> %ind, <8 x i1> %1, i32 4)
   %ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
-  call void @llvm.x86.avx512.scatter.qps.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x float> %x, i32 4)
+  call void @llvm.x86.avx512.mask.scatter.qps.512(i8* %stbuf, <8 x i1> %1, <8 x i64> %ind2, <8 x float> %x, i32 4)
   ret void
 }
 
-define void @gather_mask_qpd(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf)  {
+define void @gather_mask_qpd(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_qpd:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
@@ -69,25 +62,17 @@
 ; CHECK-NEXT:    vscatterqpd %zmm1, (%rdx,%zmm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = call <8 x double> @llvm.x86.avx512.gather.qpd.512 (<8 x double> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
+  %1 = bitcast i8 %mask to <8 x i1>
+  %x = call <8 x double> @llvm.x86.avx512.mask.gather.qpd.512(<8 x double> %src, i8* %base, <8 x i64> %ind, <8 x i1> %1, i32 4)
   %ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
-  call void @llvm.x86.avx512.scatter.qpd.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x double> %x, i32 4)
+  call void @llvm.x86.avx512.mask.scatter.qpd.512(i8* %stbuf, <8 x i1> %1, <8 x i64> %ind2, <8 x double> %x, i32 4)
   ret void
 }
 ;;
 ;; Integer Gather/Scatter
 ;;
-declare <16 x i32> @llvm.x86.avx512.gather.dpi.512 (<16 x i32>, i8*, <16 x i32>, i16, i32)
-declare void @llvm.x86.avx512.scatter.dpi.512 (i8*, i16, <16 x i32>, <16 x i32>, i32)
-declare <8 x i64> @llvm.x86.avx512.gather.dpq.512 (<8 x i64>, i8*, <8 x i32>, i8, i32)
-declare void @llvm.x86.avx512.scatter.dpq.512 (i8*, i8, <8 x i32>, <8 x i64>, i32)
 
-declare <8 x i32> @llvm.x86.avx512.gather.qpi.512 (<8 x i32>, i8*, <8 x i64>, i8, i32)
-declare void @llvm.x86.avx512.scatter.qpi.512 (i8*, i8, <8 x i64>, <8 x i32>, i32)
-declare <8 x i64> @llvm.x86.avx512.gather.qpq.512 (<8 x i64>, i8*, <8 x i64>, i8, i32)
-declare void @llvm.x86.avx512.scatter.qpq.512 (i8*, i8, <8 x i64>, <8 x i64>, i32)
-
-define void @gather_mask_dd(<16 x i32> %ind, <16 x i32> %src, i16 %mask, i8* %base, i8* %stbuf)  {
+define void @gather_mask_dd(<16 x i32> %ind, <16 x i32> %src, i16 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_dd:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
@@ -97,13 +82,14 @@
 ; CHECK-NEXT:    vpscatterdd %zmm1, (%rdx,%zmm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = call <16 x i32> @llvm.x86.avx512.gather.dpi.512 (<16 x i32> %src, i8* %base, <16 x i32>%ind, i16 %mask, i32 4)
+  %1 = bitcast i16 %mask to <16 x i1>
+  %x = call <16 x i32> @llvm.x86.avx512.mask.gather.dpi.512(<16 x i32> %src, i8* %base, <16 x i32> %ind, <16 x i1> %1, i32 4)
   %ind2 = add <16 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
-  call void @llvm.x86.avx512.scatter.dpi.512 (i8* %stbuf, i16 %mask, <16 x i32>%ind2, <16 x i32> %x, i32 4)
+  call void @llvm.x86.avx512.mask.scatter.dpi.512(i8* %stbuf, <16 x i1> %1, <16 x i32> %ind2, <16 x i32> %x, i32 4)
   ret void
 }
 
-define void @gather_mask_qd(<8 x i64> %ind, <8 x i32> %src, i8 %mask, i8* %base, i8* %stbuf)  {
+define void @gather_mask_qd(<8 x i64> %ind, <8 x i32> %src, i8 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_qd:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
@@ -113,13 +99,14 @@
 ; CHECK-NEXT:    vpscatterqd %ymm1, (%rdx,%zmm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = call <8 x i32> @llvm.x86.avx512.gather.qpi.512 (<8 x i32> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
+  %1 = bitcast i8 %mask to <8 x i1>
+  %x = call <8 x i32> @llvm.x86.avx512.mask.gather.qpi.512(<8 x i32> %src, i8* %base, <8 x i64> %ind, <8 x i1> %1, i32 4)
   %ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
-  call void @llvm.x86.avx512.scatter.qpi.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x i32> %x, i32 4)
+  call void @llvm.x86.avx512.mask.scatter.qpi.512(i8* %stbuf, <8 x i1> %1, <8 x i64> %ind2, <8 x i32> %x, i32 4)
   ret void
 }
 
-define void @gather_mask_qq(<8 x i64> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf)  {
+define void @gather_mask_qq(<8 x i64> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_qq:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
@@ -129,13 +116,14 @@
 ; CHECK-NEXT:    vpscatterqq %zmm1, (%rdx,%zmm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = call <8 x i64> @llvm.x86.avx512.gather.qpq.512 (<8 x i64> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
+  %1 = bitcast i8 %mask to <8 x i1>
+  %x = call <8 x i64> @llvm.x86.avx512.mask.gather.qpq.512(<8 x i64> %src, i8* %base, <8 x i64> %ind, <8 x i1> %1, i32 4)
   %ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
-  call void @llvm.x86.avx512.scatter.qpq.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x i64> %x, i32 4)
+  call void @llvm.x86.avx512.mask.scatter.qpq.512(i8* %stbuf, <8 x i1> %1, <8 x i64> %ind2, <8 x i64> %x, i32 4)
   ret void
 }
 
-define void @gather_mask_dq(<8 x i32> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf)  {
+define void @gather_mask_dq(<8 x i32> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_dq:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
@@ -145,13 +133,14 @@
 ; CHECK-NEXT:    vpscatterdq %zmm1, (%rdx,%ymm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = call <8 x i64> @llvm.x86.avx512.gather.dpq.512 (<8 x i64> %src, i8* %base, <8 x i32>%ind, i8 %mask, i32 4)
+  %1 = bitcast i8 %mask to <8 x i1>
+  %x = call <8 x i64> @llvm.x86.avx512.mask.gather.dpq.512(<8 x i64> %src, i8* %base, <8 x i32> %ind, <8 x i1> %1, i32 4)
   %ind2 = add <8 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
-  call void @llvm.x86.avx512.scatter.dpq.512 (i8* %stbuf, i8 %mask, <8 x i32>%ind2, <8 x i64> %x, i32 4)
+  call void @llvm.x86.avx512.mask.scatter.dpq.512(i8* %stbuf, <8 x i1> %1, <8 x i32> %ind2, <8 x i64> %x, i32 4)
   ret void
 }
 
-define void @gather_mask_dpd_execdomain(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf)  {
+define void @gather_mask_dpd_execdomain(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf) {
 ; CHECK-LABEL: gather_mask_dpd_execdomain:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
@@ -159,12 +148,13 @@
 ; CHECK-NEXT:    vmovapd %zmm1, (%rdx)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = call <8 x double> @llvm.x86.avx512.gather.dpd.512 (<8 x double> %src, i8* %base, <8 x i32>%ind, i8 %mask, i32 4)
+  %1 = bitcast i8 %mask to <8 x i1>
+  %x = call <8 x double> @llvm.x86.avx512.mask.gather.dpd.512(<8 x double> %src, i8* %base, <8 x i32> %ind, <8 x i1> %1, i32 4)
   store <8 x double> %x, <8 x double>* %stbuf
   ret void
 }
 
-define void @gather_mask_qpd_execdomain(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf)  {
+define void @gather_mask_qpd_execdomain(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf) {
 ; CHECK-LABEL: gather_mask_qpd_execdomain:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
@@ -172,34 +162,37 @@
 ; CHECK-NEXT:    vmovapd %zmm1, (%rdx)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = call <8 x double> @llvm.x86.avx512.gather.qpd.512 (<8 x double> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
+  %1 = bitcast i8 %mask to <8 x i1>
+  %x = call <8 x double> @llvm.x86.avx512.mask.gather.qpd.512(<8 x double> %src, i8* %base, <8 x i64> %ind, <8 x i1> %1, i32 4)
   store <8 x double> %x, <8 x double>* %stbuf
   ret void
 }
 
-define <16 x float> @gather_mask_dps_execdomain(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base)  {
+define <16 x float> @gather_mask_dps_execdomain(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base) {
 ; CHECK-LABEL: gather_mask_dps_execdomain:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vgatherdps (%rsi,%zmm0,4), %zmm1 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm1, %zmm0
 ; CHECK-NEXT:    retq
-  %res = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 %mask, i32 4)
-  ret <16 x float> %res;
+  %1 = bitcast i16 %mask to <16 x i1>
+  %res = call <16 x float> @llvm.x86.avx512.mask.gather.dps.512(<16 x float> %src, i8* %base, <16 x i32> %ind, <16 x i1> %1, i32 4)
+  ret <16 x float> %res
 }
 
-define <8 x float> @gather_mask_qps_execdomain(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base)  {
+define <8 x float> @gather_mask_qps_execdomain(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base) {
 ; CHECK-LABEL: gather_mask_qps_execdomain:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vgatherqps (%rsi,%zmm0,4), %ymm1 {%k1}
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm0
 ; CHECK-NEXT:    retq
-  %res = call <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x float> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
-  ret <8 x float> %res;
+  %1 = bitcast i8 %mask to <8 x i1>
+  %res = call <8 x float> @llvm.x86.avx512.mask.gather.qps.512(<8 x float> %src, i8* %base, <8 x i64> %ind, <8 x i1> %1, i32 4)
+  ret <8 x float> %res
 }
 
-define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf)  {
+define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: scatter_mask_dpd_execdomain:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
@@ -207,8 +200,9 @@
 ; CHECK-NEXT:    vscatterdpd %zmm1, (%rcx,%ymm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
+  %1 = bitcast i8 %mask to <8 x i1>
   %x = load <8 x double>, <8 x double>* %src, align 64
-  call void @llvm.x86.avx512.scatter.dpd.512 (i8* %stbuf, i8 %mask, <8 x i32>%ind, <8 x double> %x, i32 4)
+  call void @llvm.x86.avx512.mask.scatter.dpd.512(i8* %stbuf, <8 x i1> %1, <8 x i32>%ind, <8 x double> %x, i32 4)
   ret void
 }
 
@@ -220,8 +214,9 @@
 ; CHECK-NEXT:    vscatterqpd %zmm1, (%rcx,%zmm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
+  %1 = bitcast i8 %mask to <8 x i1>
   %x = load <8 x double>, <8 x double>* %src, align 64
-  call void @llvm.x86.avx512.scatter.qpd.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind, <8 x double> %x, i32 4)
+  call void @llvm.x86.avx512.mask.scatter.qpd.512(i8* %stbuf, <8 x i1> %1, <8 x i64>%ind, <8 x double> %x, i32 4)
   ret void
 }
 
@@ -233,8 +228,9 @@
 ; CHECK-NEXT:    vscatterdps %zmm1, (%rcx,%zmm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
+  %1 = bitcast i16 %mask to <16 x i1>
   %x = load <16 x float>, <16 x float>* %src, align 64
-  call void @llvm.x86.avx512.scatter.dps.512 (i8* %stbuf, i16 %mask, <16 x i32>%ind, <16 x float> %x, i32 4)
+  call void @llvm.x86.avx512.mask.scatter.dps.512(i8* %stbuf, <16 x i1> %1, <16 x i32>%ind, <16 x float> %x, i32 4)
   ret void
 }
 
@@ -246,25 +242,26 @@
 ; CHECK-NEXT:    vscatterqps %ymm1, (%rcx,%zmm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
+  %1 = bitcast i8 %mask to <8 x i1>
   %x = load <8 x float>, <8 x float>* %src, align 32
-  call void @llvm.x86.avx512.scatter.qps.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind, <8 x float> %x, i32 4)
+  call void @llvm.x86.avx512.mask.scatter.qps.512(i8* %stbuf, <8 x i1> %1, <8 x i64>%ind, <8 x float> %x, i32 4)
   ret void
 }
 
-define void @gather_qps(<8 x i64> %ind, <8 x float> %src, i8* %base, i8* %stbuf)  {
+define void @gather_qps(<8 x i64> %ind, <8 x float> %src, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_qps:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k2
 ; CHECK-NEXT:    vgatherqps (%rdi,%zmm0,4), %ymm1 {%k2}
 ; CHECK-NEXT:    vpaddq {{.*}}(%rip), %zmm0, %zmm0
 ; CHECK-NEXT:    vscatterqps %ymm1, (%rsi,%zmm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = call <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x float> %src, i8* %base, <8 x i64>%ind, i8 -1, i32 4)
+  %x = call <8 x float> @llvm.x86.avx512.mask.gather.qps.512(<8 x float> %src, i8* %base, <8 x i64> %ind, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i32 4)
   %ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
-  call void @llvm.x86.avx512.scatter.qps.512 (i8* %stbuf, i8 -1, <8 x i64>%ind2, <8 x float> %x, i32 4)
+  call void @llvm.x86.avx512.mask.scatter.qps.512(i8* %stbuf, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i64> %ind2, <8 x float> %x, i32 4)
   ret void
 }
 
@@ -292,10 +289,8 @@
   ret void
 }
 
-declare <2 x double> @llvm.x86.avx512.gather3div2.df(<2 x double>, i8*, <2 x i64>, i8, i32)
-
-define <2 x double>@test_int_x86_avx512_gather3div2_df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3div2_df:
+define <2 x double> @test_int_x86_avx512_mask_gather3div2_df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3div2_df:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherqpd (%rdi,%xmm1,4), %xmm0 {%k1}
@@ -304,31 +299,33 @@
 ; CHECK-NEXT:    vgatherqpd (%rdi,%xmm1,2), %xmm2 {%k1}
 ; CHECK-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
-  %res = call <2 x double> @llvm.x86.avx512.gather3div2.df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, i8 %x3, i32 4)
-  %res1 = call <2 x double> @llvm.x86.avx512.gather3div2.df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, i8 -1, i32 2)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %1, <8 x i1> %1, <2 x i32> <i32 0, i32 1>
+  %res = call <2 x double> @llvm.x86.avx512.mask.gather3div2.df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, <2 x i1> %extract, i32 4)
+  %res1 = call <2 x double> @llvm.x86.avx512.mask.gather3div2.df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, <2 x i1> <i1 true, i1 true>, i32 2)
   %res2 = fadd <2 x double> %res, %res1
   ret <2 x double> %res2
 }
 
-declare <2 x i64> @llvm.x86.avx512.gather3div2.di(<2 x i64>, i8*, <2 x i64>, i8, i32)
-
-define <2 x i64>@test_int_x86_avx512_gather3div2_di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3div2_di:
+define <2 x i64> @test_int_x86_avx512_mask_gather3div2_di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3div2_di:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpgatherqq (%rdi,%xmm1,8), %xmm0 {%k1}
 ; CHECK-NEXT:    vpaddq %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
-  %res = call <2 x i64> @llvm.x86.avx512.gather3div2.di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, i8 %x3, i32 8)
-  %res1 = call <2 x i64> @llvm.x86.avx512.gather3div2.di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, i8 %x3, i32 8)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %1, <8 x i1> %1, <2 x i32> <i32 0, i32 1>
+  %res = call <2 x i64> @llvm.x86.avx512.mask.gather3div2.di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, <2 x i1> %extract1, i32 8)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+  %res1 = call <2 x i64> @llvm.x86.avx512.mask.gather3div2.di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, <2 x i1> %extract, i32 8)
   %res2 = add <2 x i64> %res, %res1
   ret <2 x i64> %res2
 }
 
-declare <4 x double> @llvm.x86.avx512.gather3div4.df(<4 x double>, i8*, <4 x i64>, i8, i32)
-
-define <4 x double>@test_int_x86_avx512_gather3div4_df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3div4_df:
+define <4 x double> @test_int_x86_avx512_mask_gather3div4_df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3div4_df:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherqpd (%rdi,%ymm1,4), %ymm0 {%k1}
@@ -337,16 +334,16 @@
 ; CHECK-NEXT:    vgatherqpd (%rdi,%ymm1,2), %ymm2 {%k1}
 ; CHECK-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
-  %res = call <4 x double> @llvm.x86.avx512.gather3div4.df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 4)
-  %res1 = call <4 x double> @llvm.x86.avx512.gather3div4.df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, i8 -1, i32 2)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %1, <8 x i1> %1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res = call <4 x double> @llvm.x86.avx512.mask.gather3div4.df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, <4 x i1> %extract, i32 4)
+  %res1 = call <4 x double> @llvm.x86.avx512.mask.gather3div4.df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, i32 2)
   %res2 = fadd <4 x double> %res, %res1
   ret <4 x double> %res2
 }
 
-declare <4 x i64> @llvm.x86.avx512.gather3div4.di(<4 x i64>, i8*, <4 x i64>, i8, i32)
-
-define <4 x i64>@test_int_x86_avx512_gather3div4_di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3div4_di:
+define <4 x i64> @test_int_x86_avx512_mask_gather3div4_di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3div4_di:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpgatherqq (%rdi,%ymm1,8), %ymm0 {%k1}
@@ -355,16 +352,16 @@
 ; CHECK-NEXT:    vpgatherqq (%rdi,%ymm1,8), %ymm2 {%k1}
 ; CHECK-NEXT:    vpaddq %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
-  %res = call <4 x i64> @llvm.x86.avx512.gather3div4.di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 8)
-  %res1 = call <4 x i64> @llvm.x86.avx512.gather3div4.di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, i8 -1, i32 8)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %1, <8 x i1> %1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res = call <4 x i64> @llvm.x86.avx512.mask.gather3div4.di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, <4 x i1> %extract, i32 8)
+  %res1 = call <4 x i64> @llvm.x86.avx512.mask.gather3div4.di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, i32 8)
   %res2 = add <4 x i64> %res, %res1
   ret <4 x i64> %res2
 }
 
-declare <4 x float> @llvm.x86.avx512.gather3div4.sf(<4 x float>, i8*, <2 x i64>, i8, i32)
-
-define <4 x float>@test_int_x86_avx512_gather3div4_sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3div4_sf:
+define <4 x float> @test_int_x86_avx512_mask_gather3div4_sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3div4_sf:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherqps (%rdi,%xmm1,4), %xmm0 {%k1}
@@ -373,34 +370,34 @@
 ; CHECK-NEXT:    vgatherqps (%rdi,%xmm1,2), %xmm2 {%k1}
 ; CHECK-NEXT:    vaddps %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
-  %res = call <4 x float> @llvm.x86.avx512.gather3div4.sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, i8 %x3, i32 4)
-  %res1 = call <4 x float> @llvm.x86.avx512.gather3div4.sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, i8 -1, i32 2)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %1, <8 x i1> %1, <2 x i32> <i32 0, i32 1>
+  %res = call <4 x float> @llvm.x86.avx512.mask.gather3div4.sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, <2 x i1> %extract, i32 4)
+  %res1 = call <4 x float> @llvm.x86.avx512.mask.gather3div4.sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, <2 x i1> <i1 true, i1 true>, i32 2)
   %res2 = fadd <4 x float> %res, %res1
   ret <4 x float> %res2
 }
 
-declare <4 x i32> @llvm.x86.avx512.gather3div4.si(<4 x i32>, i8*, <2 x i64>, i8, i32)
-
-define <4 x i32>@test_int_x86_avx512_gather3div4_si(<4 x i32> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3div4_si:
+define <4 x i32> @test_int_x86_avx512_mask_gather3div4_si(<4 x i32> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3div4_si:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovd %esi, %k1
-; CHECK-NEXT:    kxnorw %k0, %k0, %k2
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
 ; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; CHECK-NEXT:    vpgatherqd (%rdi,%xmm1,4), %xmm2 {%k2}
+; CHECK-NEXT:    vpgatherqd (%rdi,%xmm1,4), %xmm2 {%k1}
+; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpgatherqd (%rdi,%xmm1,4), %xmm0 {%k1}
 ; CHECK-NEXT:    vpaddd %xmm0, %xmm2, %xmm0
 ; CHECK-NEXT:    retq
-  %res = call <4 x i32> @llvm.x86.avx512.gather3div4.si(<4 x i32> %x0, i8* %x1, <2 x i64> %x2, i8 -1, i32 4)
-  %res1 = call <4 x i32> @llvm.x86.avx512.gather3div4.si(<4 x i32> %x0, i8* %x1, <2 x i64> %x2, i8 %x3, i32 4)
+  %res = call <4 x i32> @llvm.x86.avx512.mask.gather3div4.si(<4 x i32> %x0, i8* %x1, <2 x i64> %x2, <2 x i1> <i1 true, i1 true>, i32 4)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %1, <8 x i1> %1, <2 x i32> <i32 0, i32 1>
+  %res1 = call <4 x i32> @llvm.x86.avx512.mask.gather3div4.si(<4 x i32> %x0, i8* %x1, <2 x i64> %x2, <2 x i1> %extract, i32 4)
   %res2 = add <4 x i32> %res, %res1
   ret <4 x i32> %res2
 }
 
-declare <4 x float> @llvm.x86.avx512.gather3div8.sf(<4 x float>, i8*, <4 x i64>, i8, i32)
-
-define <4 x float>@test_int_x86_avx512_gather3div8_sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3div8_sf:
+define <4 x float> @test_int_x86_avx512_mask_gather3div8_sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3div8_sf:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherqps (%rdi,%ymm1,4), %xmm0 {%k1}
@@ -410,16 +407,16 @@
 ; CHECK-NEXT:    vaddps %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %res = call <4 x float> @llvm.x86.avx512.gather3div8.sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 4)
-  %res1 = call <4 x float> @llvm.x86.avx512.gather3div8.sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, i8 -1, i32 2)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %1, <8 x i1> %1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res = call <4 x float> @llvm.x86.avx512.mask.gather3div8.sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, <4 x i1> %extract, i32 4)
+  %res1 = call <4 x float> @llvm.x86.avx512.mask.gather3div8.sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, i32 2)
   %res2 = fadd <4 x float> %res, %res1
   ret <4 x float> %res2
 }
 
-declare <4 x i32> @llvm.x86.avx512.gather3div8.si(<4 x i32>, i8*, <4 x i64>, i8, i32)
-
-define <4 x i32>@test_int_x86_avx512_gather3div8_si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3div8_si:
+define <4 x i32> @test_int_x86_avx512_mask_gather3div8_si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3div8_si:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vmovdqa %xmm0, %xmm2
@@ -429,16 +426,18 @@
 ; CHECK-NEXT:    vpaddd %xmm0, %xmm2, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %res = call <4 x i32> @llvm.x86.avx512.gather3div8.si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 4)
-  %res1 = call <4 x i32> @llvm.x86.avx512.gather3div8.si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 2)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %1, <8 x i1> %1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res = call <4 x i32> @llvm.x86.avx512.mask.gather3div8.si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, <4 x i1> %extract1, i32 4)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res1 = call <4 x i32> @llvm.x86.avx512.mask.gather3div8.si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, <4 x i1> %extract, i32 2)
   %res2 = add <4 x i32> %res, %res1
   ret <4 x i32> %res2
 }
 
-declare <2 x double> @llvm.x86.avx512.gather3siv2.df(<2 x double>, i8*, <4 x i32>, i8, i32)
-
-define <2 x double>@test_int_x86_avx512_gather3siv2_df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3siv2_df:
+define <2 x double> @test_int_x86_avx512_mask_gather3siv2_df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv2_df:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherdpd (%rdi,%xmm1,4), %xmm0 {%k1}
@@ -447,31 +446,33 @@
 ; CHECK-NEXT:    vgatherdpd (%rdi,%xmm1,2), %xmm2 {%k1}
 ; CHECK-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
-  %res = call <2 x double> @llvm.x86.avx512.gather3siv2.df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 4)
-  %res1 = call <2 x double> @llvm.x86.avx512.gather3siv2.df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, i8 -1, i32 2)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %1, <8 x i1> %1, <2 x i32> <i32 0, i32 1>
+  %res = call <2 x double> @llvm.x86.avx512.mask.gather3siv2.df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, <2 x i1> %extract, i32 4)
+  %res1 = call <2 x double> @llvm.x86.avx512.mask.gather3siv2.df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, <2 x i1> <i1 true, i1 true>, i32 2)
   %res2 = fadd <2 x double> %res, %res1
   ret <2 x double> %res2
 }
 
-declare <2 x i64> @llvm.x86.avx512.gather3siv2.di(<2 x i64>, i8*, <4 x i32>, i8, i32)
-
-define <2 x i64>@test_int_x86_avx512_gather3siv2_di(<2 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3siv2_di:
+define <2 x i64> @test_int_x86_avx512_mask_gather3siv2_di(<2 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv2_di:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpgatherdq (%rdi,%xmm1,8), %xmm0 {%k1}
 ; CHECK-NEXT:    vpaddq %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
-  %res = call <2 x i64> @llvm.x86.avx512.gather3siv2.di(<2 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 8)
-  %res1 = call <2 x i64> @llvm.x86.avx512.gather3siv2.di(<2 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 8)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %1, <8 x i1> %1, <2 x i32> <i32 0, i32 1>
+  %res = call <2 x i64> @llvm.x86.avx512.mask.gather3siv2.di(<2 x i64> %x0, i8* %x1, <4 x i32> %x2, <2 x i1> %extract1, i32 8)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+  %res1 = call <2 x i64> @llvm.x86.avx512.mask.gather3siv2.di(<2 x i64> %x0, i8* %x1, <4 x i32> %x2, <2 x i1> %extract, i32 8)
   %res2 = add <2 x i64> %res, %res1
   ret <2 x i64> %res2
 }
 
-declare <4 x double> @llvm.x86.avx512.gather3siv4.df(<4 x double>, i8*, <4 x i32>, i8, i32)
-
-define <4 x double>@test_int_x86_avx512_gather3siv4_df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3siv4_df:
+define <4 x double> @test_int_x86_avx512_mask_gather3siv4_df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv4_df:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherdpd (%rdi,%xmm1,4), %ymm0 {%k1}
@@ -480,31 +481,33 @@
 ; CHECK-NEXT:    vgatherdpd (%rdi,%xmm1,2), %ymm2 {%k1}
 ; CHECK-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
-  %res = call <4 x double> @llvm.x86.avx512.gather3siv4.df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 4)
-  %res1 = call <4 x double> @llvm.x86.avx512.gather3siv4.df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, i8 -1, i32 2)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %1, <8 x i1> %1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res = call <4 x double> @llvm.x86.avx512.mask.gather3siv4.df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, <4 x i1> %extract, i32 4)
+  %res1 = call <4 x double> @llvm.x86.avx512.mask.gather3siv4.df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, i32 2)
   %res2 = fadd <4 x double> %res, %res1
   ret <4 x double> %res2
 }
 
-declare <4 x i64> @llvm.x86.avx512.gather3siv4.di(<4 x i64>, i8*, <4 x i32>, i8, i32)
-
-define <4 x i64>@test_int_x86_avx512_gather3siv4_di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3siv4_di:
+define <4 x i64> @test_int_x86_avx512_mask_gather3siv4_di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv4_di:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpgatherdq (%rdi,%xmm1,8), %ymm0 {%k1}
 ; CHECK-NEXT:    vpaddq %ymm0, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
-  %res = call <4 x i64> @llvm.x86.avx512.gather3siv4.di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 8)
-  %res1 = call <4 x i64> @llvm.x86.avx512.gather3siv4.di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 8)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %1, <8 x i1> %1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res = call <4 x i64> @llvm.x86.avx512.mask.gather3siv4.di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, <4 x i1> %extract1, i32 8)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res1 = call <4 x i64> @llvm.x86.avx512.mask.gather3siv4.di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, <4 x i1> %extract, i32 8)
   %res2 = add <4 x i64> %res, %res1
   ret <4 x i64> %res2
 }
 
-declare <4 x float> @llvm.x86.avx512.gather3siv4.sf(<4 x float>, i8*, <4 x i32>, i8, i32)
-
-define <4 x float>@test_int_x86_avx512_gather3siv4_sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3siv4_sf:
+define <4 x float> @test_int_x86_avx512_mask_gather3siv4_sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv4_sf:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherdps (%rdi,%xmm1,4), %xmm0 {%k1}
@@ -513,34 +516,34 @@
 ; CHECK-NEXT:    vgatherdps (%rdi,%xmm1,2), %xmm2 {%k1}
 ; CHECK-NEXT:    vaddps %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
-  %res = call <4 x float> @llvm.x86.avx512.gather3siv4.sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 4)
-  %res1 = call <4 x float> @llvm.x86.avx512.gather3siv4.sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, i8 -1, i32 2)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %1, <8 x i1> %1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res = call <4 x float> @llvm.x86.avx512.mask.gather3siv4.sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, <4 x i1> %extract, i32 4)
+  %res1 = call <4 x float> @llvm.x86.avx512.mask.gather3siv4.sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, i32 2)
   %res2 = fadd <4 x float> %res, %res1
   ret <4 x float> %res2
 }
 
-declare <4 x i32> @llvm.x86.avx512.gather3siv4.si(<4 x i32>, i8*, <4 x i32>, i8, i32)
-
-define <4 x i32>@test_int_x86_avx512_gather3siv4_si(<4 x i32> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3siv4_si:
+define <4 x i32> @test_int_x86_avx512_mask_gather3siv4_si(<4 x i32> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv4_si:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovd %esi, %k1
-; CHECK-NEXT:    kxnorw %k0, %k0, %k2
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
 ; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; CHECK-NEXT:    vpgatherdd (%rdi,%xmm1,4), %xmm2 {%k2}
+; CHECK-NEXT:    vpgatherdd (%rdi,%xmm1,4), %xmm2 {%k1}
+; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpgatherdd (%rdi,%xmm1,2), %xmm0 {%k1}
 ; CHECK-NEXT:    vpaddd %xmm0, %xmm2, %xmm0
 ; CHECK-NEXT:    retq
-  %res = call <4 x i32> @llvm.x86.avx512.gather3siv4.si(<4 x i32> %x0, i8* %x1, <4 x i32> %x2, i8 -1, i32 4)
-  %res1 = call <4 x i32> @llvm.x86.avx512.gather3siv4.si(<4 x i32> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 2)
+  %res = call <4 x i32> @llvm.x86.avx512.mask.gather3siv4.si(<4 x i32> %x0, i8* %x1, <4 x i32> %x2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, i32 4)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %1, <8 x i1> %1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res1 = call <4 x i32> @llvm.x86.avx512.mask.gather3siv4.si(<4 x i32> %x0, i8* %x1, <4 x i32> %x2, <4 x i1> %extract, i32 2)
   %res2 = add <4 x i32> %res, %res1
   ret <4 x i32> %res2
 }
 
-declare <8 x float> @llvm.x86.avx512.gather3siv8.sf(<8 x float>, i8*, <8 x i32>, i8, i32)
-
-define <8 x float>@test_int_x86_avx512_gather3siv8_sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3siv8_sf:
+define <8 x float> @test_int_x86_avx512_mask_gather3siv8_sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv8_sf:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherdps (%rdi,%ymm1,4), %ymm0 {%k1}
@@ -549,16 +552,15 @@
 ; CHECK-NEXT:    vgatherdps (%rdi,%ymm1,2), %ymm2 {%k1}
 ; CHECK-NEXT:    vaddps %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
-  %res = call <8 x float> @llvm.x86.avx512.gather3siv8.sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, i8 %x3, i32 4)
-  %res1 = call <8 x float> @llvm.x86.avx512.gather3siv8.sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, i8 -1, i32 2)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %res = call <8 x float> @llvm.x86.avx512.mask.gather3siv8.sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, <8 x i1> %1, i32 4)
+  %res1 = call <8 x float> @llvm.x86.avx512.mask.gather3siv8.sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i32 2)
   %res2 = fadd <8 x float> %res, %res1
   ret <8 x float> %res2
 }
 
-declare <8 x i32> @llvm.x86.avx512.gather3siv8.si(<8 x i32>, i8*, <8 x i32>, i8, i32)
-
-define <8 x i32>@test_int_x86_avx512_gather3siv8_si(<8 x i32> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_gather3siv8_si:
+define <8 x i32> @test_int_x86_avx512_mask_gather3siv8_si(<8 x i32> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv8_si:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vmovdqa %ymm0, %ymm2
@@ -567,14 +569,14 @@
 ; CHECK-NEXT:    vpgatherdd (%rdi,%ymm1,2), %ymm0 {%k1}
 ; CHECK-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
 ; CHECK-NEXT:    retq
-  %res = call <8 x i32> @llvm.x86.avx512.gather3siv8.si(<8 x i32> %x0, i8* %x1, <8 x i32> %x2, i8 %x3, i32 4)
-  %res1 = call <8 x i32> @llvm.x86.avx512.gather3siv8.si(<8 x i32> %x0, i8* %x1, <8 x i32> %x2, i8 %x3, i32 2)
+  %1 = bitcast i8 %x3 to <8 x i1>
+  %res = call <8 x i32> @llvm.x86.avx512.mask.gather3siv8.si(<8 x i32> %x0, i8* %x1, <8 x i32> %x2, <8 x i1> %1, i32 4)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %res1 = call <8 x i32> @llvm.x86.avx512.mask.gather3siv8.si(<8 x i32> %x0, i8* %x1, <8 x i32> %x2, <8 x i1> %2, i32 2)
   %res2 = add <8 x i32> %res, %res1
   ret <8 x i32> %res2
 }
 
-declare void @llvm.x86.avx512.scatterdiv2.df(i8*, i8, <2 x i64>, <2 x double>, i32)
-
 define void@test_int_x86_avx512_scatterdiv2_df(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x double> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv2_df:
 ; CHECK:       ## %bb.0:
@@ -583,13 +585,13 @@
 ; CHECK-NEXT:    vscatterqpd %xmm1, (%rdi,%xmm0,2) {%k2}
 ; CHECK-NEXT:    vscatterqpd %xmm1, (%rdi,%xmm0,4) {%k1}
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scatterdiv2.df(i8* %x0, i8 -1, <2 x i64> %x2, <2 x double> %x3, i32 2)
-  call void @llvm.x86.avx512.scatterdiv2.df(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x double> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  %2 = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  call void @llvm.x86.avx512.mask.scatterdiv2.df(i8* %x0, <2 x i1> <i1 true, i1 true>, <2 x i64> %x2, <2 x double> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scatterdiv2.df(i8* %x0, <2 x i1> %2, <2 x i64> %x2, <2 x double> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scatterdiv2.di(i8*, i8, <2 x i64>, <2 x i64>, i32)
-
 define void@test_int_x86_avx512_scatterdiv2_di(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x i64> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv2_di:
 ; CHECK:       ## %bb.0:
@@ -598,13 +600,13 @@
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
 ; CHECK-NEXT:    vpscatterqq %xmm1, (%rdi,%xmm0,4) {%k1}
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scatterdiv2.di(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x i64> %x3, i32 2)
-  call void @llvm.x86.avx512.scatterdiv2.di(i8* %x0, i8 -1, <2 x i64> %x2, <2 x i64> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  %2 = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  call void @llvm.x86.avx512.mask.scatterdiv2.di(i8* %x0, <2 x i1> %2, <2 x i64> %x2, <2 x i64> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scatterdiv2.di(i8* %x0, <2 x i1> <i1 true, i1 true>, <2 x i64> %x2, <2 x i64> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scatterdiv4.df(i8*, i8, <4 x i64>, <4 x double>, i32)
-
 define void@test_int_x86_avx512_scatterdiv4_df(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x double> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_df:
 ; CHECK:       ## %bb.0:
@@ -614,13 +616,13 @@
 ; CHECK-NEXT:    vscatterqpd %ymm1, (%rdi,%ymm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scatterdiv4.df(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x double> %x3, i32 2)
-  call void @llvm.x86.avx512.scatterdiv4.df(i8* %x0, i8 -1, <4 x i64> %x2, <4 x double> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  %2 = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  call void @llvm.x86.avx512.mask.scatterdiv4.df(i8* %x0, <4 x i1> %2, <4 x i64> %x2, <4 x double> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scatterdiv4.df(i8* %x0, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i64> %x2, <4 x double> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scatterdiv4.di(i8*, i8, <4 x i64>, <4 x i64>, i32)
-
 define void@test_int_x86_avx512_scatterdiv4_di(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i64> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_di:
 ; CHECK:       ## %bb.0:
@@ -630,13 +632,13 @@
 ; CHECK-NEXT:    vpscatterqq %ymm1, (%rdi,%ymm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scatterdiv4.di(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i64> %x3, i32 2)
-  call void @llvm.x86.avx512.scatterdiv4.di(i8* %x0, i8 -1, <4 x i64> %x2, <4 x i64> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  %2 = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  call void @llvm.x86.avx512.mask.scatterdiv4.di(i8* %x0, <4 x i1> %2, <4 x i64> %x2, <4 x i64> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scatterdiv4.di(i8* %x0, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i64> %x2, <4 x i64> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scatterdiv4.sf(i8*, i8, <2 x i64>, <4 x float>, i32)
-
 define void@test_int_x86_avx512_scatterdiv4_sf(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x float> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_sf:
 ; CHECK:       ## %bb.0:
@@ -645,13 +647,13 @@
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
 ; CHECK-NEXT:    vscatterqps %xmm1, (%rdi,%xmm0,4) {%k1}
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scatterdiv4.sf(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x float> %x3, i32 2)
-  call void @llvm.x86.avx512.scatterdiv4.sf(i8* %x0, i8 -1, <2 x i64> %x2, <4 x float> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  %2 = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  call void @llvm.x86.avx512.mask.scatterdiv4.sf(i8* %x0, <2 x i1> %2, <2 x i64> %x2, <4 x float> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scatterdiv4.sf(i8* %x0, <2 x i1> <i1 true, i1 true>, <2 x i64> %x2, <4 x float> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scatterdiv4.si(i8*, i8, <2 x i64>, <4 x i32>, i32)
-
 define void@test_int_x86_avx512_scatterdiv4_si(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x i32> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_si:
 ; CHECK:       ## %bb.0:
@@ -660,13 +662,13 @@
 ; CHECK-NEXT:    vpscatterqd %xmm1, (%rdi,%xmm0,2) {%k2}
 ; CHECK-NEXT:    vpscatterqd %xmm1, (%rdi,%xmm0,4) {%k1}
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scatterdiv4.si(i8* %x0, i8 -1, <2 x i64> %x2, <4 x i32> %x3, i32 2)
-  call void @llvm.x86.avx512.scatterdiv4.si(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x i32> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  %2 = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  call void @llvm.x86.avx512.mask.scatterdiv4.si(i8* %x0, <2 x i1> <i1 true, i1 true>, <2 x i64> %x2, <4 x i32> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scatterdiv4.si(i8* %x0, <2 x i1> %2, <2 x i64> %x2, <4 x i32> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scatterdiv8.sf(i8*, i8, <4 x i64>, <4 x float>, i32)
-
 define void@test_int_x86_avx512_scatterdiv8_sf(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x float> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv8_sf:
 ; CHECK:       ## %bb.0:
@@ -676,13 +678,13 @@
 ; CHECK-NEXT:    vscatterqps %xmm1, (%rdi,%ymm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scatterdiv8.sf(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x float> %x3, i32 2)
-  call void @llvm.x86.avx512.scatterdiv8.sf(i8* %x0, i8 -1, <4 x i64> %x2, <4 x float> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  %2 = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  call void @llvm.x86.avx512.mask.scatterdiv8.sf(i8* %x0, <4 x i1> %2, <4 x i64> %x2, <4 x float> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scatterdiv8.sf(i8* %x0, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i64> %x2, <4 x float> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scatterdiv8.si(i8*, i8, <4 x i64>, <4 x i32>, i32)
-
 define void@test_int_x86_avx512_scatterdiv8_si(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i32> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv8_si:
 ; CHECK:       ## %bb.0:
@@ -692,13 +694,13 @@
 ; CHECK-NEXT:    vpscatterqd %xmm1, (%rdi,%ymm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scatterdiv8.si(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i32> %x3, i32 2)
-  call void @llvm.x86.avx512.scatterdiv8.si(i8* %x0, i8 -1, <4 x i64> %x2, <4 x i32> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  %2 = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  call void @llvm.x86.avx512.mask.scatterdiv8.si(i8* %x0, <4 x i1> %2, <4 x i64> %x2, <4 x i32> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scatterdiv8.si(i8* %x0, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i64> %x2, <4 x i32> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scattersiv2.df(i8*, i8, <4 x i32>, <2 x double>, i32)
-
 define void@test_int_x86_avx512_scattersiv2_df(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x double> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv2_df:
 ; CHECK:       ## %bb.0:
@@ -707,13 +709,13 @@
 ; CHECK-NEXT:    vscatterdpd %xmm1, (%rdi,%xmm0,2) {%k2}
 ; CHECK-NEXT:    vscatterdpd %xmm1, (%rdi,%xmm0,4) {%k1}
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scattersiv2.df(i8* %x0, i8 -1, <4 x i32> %x2, <2 x double> %x3, i32 2)
-  call void @llvm.x86.avx512.scattersiv2.df(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x double> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  %2 = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  call void @llvm.x86.avx512.mask.scattersiv2.df(i8* %x0, <2 x i1> <i1 true, i1 true>, <4 x i32> %x2, <2 x double> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scattersiv2.df(i8* %x0, <2 x i1> %2, <4 x i32> %x2, <2 x double> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scattersiv2.di(i8*, i8, <4 x i32>, <2 x i64>, i32)
-
 define void@test_int_x86_avx512_scattersiv2_di(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x i64> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv2_di:
 ; CHECK:       ## %bb.0:
@@ -722,13 +724,13 @@
 ; CHECK-NEXT:    vpscatterdq %xmm1, (%rdi,%xmm0,2) {%k2}
 ; CHECK-NEXT:    vpscatterdq %xmm1, (%rdi,%xmm0,4) {%k1}
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scattersiv2.di(i8* %x0, i8 -1, <4 x i32> %x2, <2 x i64> %x3, i32 2)
-  call void @llvm.x86.avx512.scattersiv2.di(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x i64> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  %2 = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  call void @llvm.x86.avx512.mask.scattersiv2.di(i8* %x0, <2 x i1> <i1 true, i1 true>, <4 x i32> %x2, <2 x i64> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scattersiv2.di(i8* %x0, <2 x i1> %2, <4 x i32> %x2, <2 x i64> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scattersiv4.df(i8*, i8, <4 x i32>, <4 x double>, i32)
-
 define void@test_int_x86_avx512_scattersiv4_df(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x double> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv4_df:
 ; CHECK:       ## %bb.0:
@@ -738,13 +740,13 @@
 ; CHECK-NEXT:    vscatterdpd %ymm1, (%rdi,%xmm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scattersiv4.df(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x double> %x3, i32 2)
-  call void @llvm.x86.avx512.scattersiv4.df(i8* %x0, i8 -1, <4 x i32> %x2, <4 x double> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  %2 = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  call void @llvm.x86.avx512.mask.scattersiv4.df(i8* %x0, <4 x i1> %2, <4 x i32> %x2, <4 x double> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scattersiv4.df(i8* %x0, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> %x2, <4 x double> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scattersiv4.di(i8*, i8, <4 x i32>, <4 x i64>, i32)
-
 define void@test_int_x86_avx512_scattersiv4_di(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i64> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv4_di:
 ; CHECK:       ## %bb.0:
@@ -754,13 +756,13 @@
 ; CHECK-NEXT:    vpscatterdq %ymm1, (%rdi,%xmm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scattersiv4.di(i8* %x0, i8 -1, <4 x i32> %x2, <4 x i64> %x3, i32 2)
-  call void @llvm.x86.avx512.scattersiv4.di(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i64> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  %2 = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  call void @llvm.x86.avx512.mask.scattersiv4.di(i8* %x0, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> %x2, <4 x i64> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scattersiv4.di(i8* %x0, <4 x i1> %2, <4 x i32> %x2, <4 x i64> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scattersiv4.sf(i8*, i8, <4 x i32>, <4 x float>, i32)
-
 define void@test_int_x86_avx512_scattersiv4_sf(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x float> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv4_sf:
 ; CHECK:       ## %bb.0:
@@ -769,13 +771,13 @@
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
 ; CHECK-NEXT:    vscatterdps %xmm1, (%rdi,%xmm0,4) {%k1}
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scattersiv4.sf(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x float> %x3, i32 2)
-  call void @llvm.x86.avx512.scattersiv4.sf(i8* %x0, i8 -1, <4 x i32> %x2, <4 x float> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  %2 = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  call void @llvm.x86.avx512.mask.scattersiv4.sf(i8* %x0, <4 x i1> %2, <4 x i32> %x2, <4 x float> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scattersiv4.sf(i8* %x0, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> %x2, <4 x float> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scattersiv4.si(i8*, i8, <4 x i32>, <4 x i32>, i32)
-
 define void@test_int_x86_avx512_scattersiv4_si(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i32> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv4_si:
 ; CHECK:       ## %bb.0:
@@ -784,13 +786,13 @@
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
 ; CHECK-NEXT:    vpscatterdd %xmm1, (%rdi,%xmm0,4) {%k1}
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scattersiv4.si(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i32> %x3, i32 2)
-  call void @llvm.x86.avx512.scattersiv4.si(i8* %x0, i8 -1, <4 x i32> %x2, <4 x i32> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  %2 = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  call void @llvm.x86.avx512.mask.scattersiv4.si(i8* %x0, <4 x i1> %2, <4 x i32> %x2, <4 x i32> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scattersiv4.si(i8* %x0, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> %x2, <4 x i32> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scattersiv8.sf(i8*, i8, <8 x i32>, <8 x float>, i32)
-
 define void@test_int_x86_avx512_scattersiv8_sf(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x float> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv8_sf:
 ; CHECK:       ## %bb.0:
@@ -800,13 +802,12 @@
 ; CHECK-NEXT:    vscatterdps %ymm1, (%rdi,%ymm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scattersiv8.sf(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x float> %x3, i32 2)
-  call void @llvm.x86.avx512.scattersiv8.sf(i8* %x0, i8 -1, <8 x i32> %x2, <8 x float> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  call void @llvm.x86.avx512.mask.scattersiv8.sf(i8* %x0, <8 x i1> %1, <8 x i32> %x2, <8 x float> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scattersiv8.sf(i8* %x0, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> %x2, <8 x float> %x3, i32 4)
   ret void
 }
 
-declare void @llvm.x86.avx512.scattersiv8.si(i8*, i8, <8 x i32>, <8 x i32>, i32)
-
 define void@test_int_x86_avx512_scattersiv8_si(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x i32> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv8_si:
 ; CHECK:       ## %bb.0:
@@ -816,8 +817,9 @@
 ; CHECK-NEXT:    vpscatterdd %ymm1, (%rdi,%ymm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x i32> %x3, i32 2)
-  call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 -1, <8 x i32> %x2, <8 x i32> %x3, i32 4)
+  %1 = bitcast i8 %x1 to <8 x i1>
+  call void @llvm.x86.avx512.mask.scattersiv8.si(i8* %x0, <8 x i1> %1, <8 x i32> %x2, <8 x i32> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scattersiv8.si(i8* %x0, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> %x2, <8 x i32> %x3, i32 4)
   ret void
 }
 
@@ -836,14 +838,14 @@
 ; CHECK-NEXT:    vpscatterdd %ymm1, (%rdi,%ymm0,4) {%k1}
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 -1, <8 x i32> %x2, <8 x i32> %x3, i32 2)
-  call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 0, <8 x i32> %x2, <8 x i32> %x3, i32 4)
-  call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 1, <8 x i32> %x2, <8 x i32> %x3, i32 2)
-  call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 96, <8 x i32> %x2, <8 x i32> %x3, i32 4)
+  call void @llvm.x86.avx512.mask.scattersiv8.si(i8* %x0, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> %x2, <8 x i32> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scattersiv8.si(i8* %x0, <8 x i1> zeroinitializer, <8 x i32> %x2, <8 x i32> %x3, i32 4)
+  call void @llvm.x86.avx512.mask.scattersiv8.si(i8* %x0, <8 x i1> bitcast (<1 x i8> <i8 1> to <8 x i1>), <8 x i32> %x2, <8 x i32> %x3, i32 2)
+  call void @llvm.x86.avx512.mask.scattersiv8.si(i8* %x0, <8 x i1> bitcast (<1 x i8> <i8 96> to <8 x i1>), <8 x i32> %x2, <8 x i32> %x3, i32 4)
   ret void
 }
 
-define <16 x float> @gather_mask_test(<16 x i32> %ind, <16 x float> %src, i8* %base)  {
+define <16 x float> @gather_mask_test(<16 x i32> %ind, <16 x float> %src, i8* %base) {
 ; CHECK-LABEL: gather_mask_test:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -863,13 +865,62 @@
 ; CHECK-NEXT:    vaddps %zmm3, %zmm1, %zmm0
 ; CHECK-NEXT:    vaddps %zmm2, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
-  %res = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 -1, i32 4)
-  %res1 = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 0, i32 4)
-  %res2 = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 1, i32 4)
-  %res3 = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 220, i32 4)
-
+  %res = call <16 x float> @llvm.x86.avx512.mask.gather.dps.512(<16 x float> %src, i8* %base, <16 x i32> %ind, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i32 4)
+  %res1 = call <16 x float> @llvm.x86.avx512.mask.gather.dps.512(<16 x float> %src, i8* %base, <16 x i32> %ind, <16 x i1> zeroinitializer, i32 4)
+  %res2 = call <16 x float> @llvm.x86.avx512.mask.gather.dps.512(<16 x float> %src, i8* %base, <16 x i32> %ind, <16 x i1> bitcast (<1 x i16> <i16 1> to <16 x i1>), i32 4)
+  %res3 = call <16 x float> @llvm.x86.avx512.mask.gather.dps.512(<16 x float> %src, i8* %base, <16 x i32> %ind, <16 x i1> bitcast (<1 x i16> <i16 220> to <16 x i1>), i32 4)
   %res4 = fadd <16 x float> %res, %res1
   %res5 = fadd <16 x float> %res3, %res2
   %res6 = fadd <16 x float> %res5, %res4
   ret <16 x float> %res6
 }
+
+declare <16 x float> @llvm.x86.avx512.mask.gather.dps.512(<16 x float>, i8*, <16 x i32>, <16 x i1>, i32)
+declare <8 x double> @llvm.x86.avx512.mask.gather.dpd.512(<8 x double>, i8*, <8 x i32>, <8 x i1>, i32)
+declare <8 x float> @llvm.x86.avx512.mask.gather.qps.512(<8 x float>, i8*, <8 x i64>, <8 x i1>, i32)
+declare <8 x double> @llvm.x86.avx512.mask.gather.qpd.512(<8 x double>, i8*, <8 x i64>, <8 x i1>, i32)
+declare <16 x i32> @llvm.x86.avx512.mask.gather.dpi.512(<16 x i32>, i8*, <16 x i32>, <16 x i1>, i32)
+declare <8 x i64> @llvm.x86.avx512.mask.gather.dpq.512(<8 x i64>, i8*, <8 x i32>, <8 x i1>, i32)
+declare <8 x i32> @llvm.x86.avx512.mask.gather.qpi.512(<8 x i32>, i8*, <8 x i64>, <8 x i1>, i32)
+declare <8 x i64> @llvm.x86.avx512.mask.gather.qpq.512(<8 x i64>, i8*, <8 x i64>, <8 x i1>, i32)
+declare <2 x double> @llvm.x86.avx512.mask.gather3div2.df(<2 x double>, i8*, <2 x i64>, <2 x i1>, i32)
+declare <2 x i64> @llvm.x86.avx512.mask.gather3div2.di(<2 x i64>, i8*, <2 x i64>, <2 x i1>, i32)
+declare <4 x double> @llvm.x86.avx512.mask.gather3div4.df(<4 x double>, i8*, <4 x i64>, <4 x i1>, i32)
+declare <4 x i64> @llvm.x86.avx512.mask.gather3div4.di(<4 x i64>, i8*, <4 x i64>, <4 x i1>, i32)
+declare <4 x float> @llvm.x86.avx512.mask.gather3div4.sf(<4 x float>, i8*, <2 x i64>, <2 x i1>, i32)
+declare <4 x i32> @llvm.x86.avx512.mask.gather3div4.si(<4 x i32>, i8*, <2 x i64>, <2 x i1>, i32)
+declare <4 x float> @llvm.x86.avx512.mask.gather3div8.sf(<4 x float>, i8*, <4 x i64>, <4 x i1>, i32)
+declare <4 x i32> @llvm.x86.avx512.mask.gather3div8.si(<4 x i32>, i8*, <4 x i64>, <4 x i1>, i32)
+declare <2 x double> @llvm.x86.avx512.mask.gather3siv2.df(<2 x double>, i8*, <4 x i32>, <2 x i1>, i32)
+declare <2 x i64> @llvm.x86.avx512.mask.gather3siv2.di(<2 x i64>, i8*, <4 x i32>, <2 x i1>, i32)
+declare <4 x double> @llvm.x86.avx512.mask.gather3siv4.df(<4 x double>, i8*, <4 x i32>, <4 x i1>, i32)
+declare <4 x i64> @llvm.x86.avx512.mask.gather3siv4.di(<4 x i64>, i8*, <4 x i32>, <4 x i1>, i32)
+declare <4 x float> @llvm.x86.avx512.mask.gather3siv4.sf(<4 x float>, i8*, <4 x i32>, <4 x i1>, i32)
+declare <4 x i32> @llvm.x86.avx512.mask.gather3siv4.si(<4 x i32>, i8*, <4 x i32>, <4 x i1>, i32)
+declare <8 x float> @llvm.x86.avx512.mask.gather3siv8.sf(<8 x float>, i8*, <8 x i32>, <8 x i1>, i32)
+declare <8 x i32> @llvm.x86.avx512.mask.gather3siv8.si(<8 x i32>, i8*, <8 x i32>, <8 x i1>, i32)
+declare void @llvm.x86.avx512.mask.scatter.dps.512(i8*, <16 x i1>, <16 x i32>, <16 x float>, i32)
+declare void @llvm.x86.avx512.mask.scatter.dpd.512(i8*, <8 x i1>, <8 x i32>, <8 x double>, i32)
+declare void @llvm.x86.avx512.mask.scatter.qps.512(i8*, <8 x i1>, <8 x i64>, <8 x float>, i32)
+declare void @llvm.x86.avx512.mask.scatter.qpd.512(i8*, <8 x i1>, <8 x i64>, <8 x double>, i32)
+declare void @llvm.x86.avx512.mask.scatter.dpi.512(i8*, <16 x i1>, <16 x i32>, <16 x i32>, i32)
+declare void @llvm.x86.avx512.mask.scatter.dpq.512(i8*, <8 x i1>, <8 x i32>, <8 x i64>, i32)
+declare void @llvm.x86.avx512.mask.scatter.qpi.512(i8*, <8 x i1>, <8 x i64>, <8 x i32>, i32)
+declare void @llvm.x86.avx512.mask.scatter.qpq.512(i8*, <8 x i1>, <8 x i64>, <8 x i64>, i32)
+declare void @llvm.x86.avx512.mask.scatterdiv2.df(i8*, <2 x i1>, <2 x i64>, <2 x double>, i32)
+declare void @llvm.x86.avx512.mask.scatterdiv2.di(i8*, <2 x i1>, <2 x i64>, <2 x i64>, i32)
+declare void @llvm.x86.avx512.mask.scatterdiv4.df(i8*, <4 x i1>, <4 x i64>, <4 x double>, i32)
+declare void @llvm.x86.avx512.mask.scatterdiv4.di(i8*, <4 x i1>, <4 x i64>, <4 x i64>, i32)
+declare void @llvm.x86.avx512.mask.scatterdiv4.sf(i8*, <2 x i1>, <2 x i64>, <4 x float>, i32)
+declare void @llvm.x86.avx512.mask.scatterdiv4.si(i8*, <2 x i1>, <2 x i64>, <4 x i32>, i32)
+declare void @llvm.x86.avx512.mask.scatterdiv8.sf(i8*, <4 x i1>, <4 x i64>, <4 x float>, i32)
+declare void @llvm.x86.avx512.mask.scatterdiv8.si(i8*, <4 x i1>, <4 x i64>, <4 x i32>, i32)
+declare void @llvm.x86.avx512.mask.scattersiv2.df(i8*, <2 x i1>, <4 x i32>, <2 x double>, i32)
+declare void @llvm.x86.avx512.mask.scattersiv2.di(i8*, <2 x i1>, <4 x i32>, <2 x i64>, i32)
+declare void @llvm.x86.avx512.mask.scattersiv4.df(i8*, <4 x i1>, <4 x i32>, <4 x double>, i32)
+declare void @llvm.x86.avx512.mask.scattersiv4.di(i8*, <4 x i1>, <4 x i32>, <4 x i64>, i32)
+declare void @llvm.x86.avx512.mask.scattersiv4.sf(i8*, <4 x i1>, <4 x i32>, <4 x float>, i32)
+declare void @llvm.x86.avx512.mask.scattersiv4.si(i8*, <4 x i1>, <4 x i32>, <4 x i32>, i32)
+declare void @llvm.x86.avx512.mask.scattersiv8.sf(i8*, <8 x i1>, <8 x i32>, <8 x float>, i32)
+declare void @llvm.x86.avx512.mask.scattersiv8.si(i8*, <8 x i1>, <8 x i32>, <8 x i32>, i32)
+
diff --git a/test/CodeGen/X86/avx512-insert-extract.ll b/test/CodeGen/X86/avx512-insert-extract.ll
index 05542ac..3d19447 100644
--- a/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/test/CodeGen/X86/avx512-insert-extract.ll
@@ -270,7 +270,7 @@
 ; SKX:       ## %bb.0:
 ; SKX-NEXT:    movq %rdi, %rax
 ; SKX-NEXT:    vpcmpgtq %zmm0, %zmm1, %k0
-; SKX-NEXT:    kshiftrw $4, %k0, %k0
+; SKX-NEXT:    kshiftrb $4, %k0, %k0
 ; SKX-NEXT:    kmovd %k0, %ecx
 ; SKX-NEXT:    testb $1, %cl
 ; SKX-NEXT:    cmoveq %rsi, %rax
@@ -976,7 +976,7 @@
 ; SKX-LABEL: test_extractelement_v4i1:
 ; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpnleud %xmm1, %xmm0, %k0
-; SKX-NEXT:    kshiftrw $3, %k0, %k0
+; SKX-NEXT:    kshiftrb $3, %k0, %k0
 ; SKX-NEXT:    kmovd %k0, %eax
 ; SKX-NEXT:    andl $1, %eax
 ; SKX-NEXT:    retq
diff --git a/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
index e04d8e3..6a6fbe3 100644
--- a/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
@@ -169,7 +169,8 @@
 define <16 x float> @test_mm512_mask_shuffle_f32x4(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; X86-LABEL: test_mm512_mask_shuffle_f32x4:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vshuff32x4 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,4,5,6,7],zmm2[0,1,2,3,0,1,2,3]
 ; X86-NEXT:    retl
 ;
@@ -188,7 +189,8 @@
 define <16 x float> @test_mm512_maskz_shuffle_f32x4(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; X86-LABEL: test_mm512_maskz_shuffle_f32x4:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,7],zmm1[0,1,2,3,0,1,2,3]
 ; X86-NEXT:    retl
 ;
@@ -267,7 +269,8 @@
 define <8 x i64> @test_mm512_mask_shuffle_i32x4(<8 x i64> %__W, i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
 ; X86-LABEL: test_mm512_mask_shuffle_i32x4:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vshufi32x4 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,4,5,6,7],zmm2[0,1,2,3,0,1,2,3]
 ; X86-NEXT:    retl
 ;
@@ -289,7 +292,8 @@
 define <8 x i64> @test_mm512_maskz_shuffle_i32x4(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
 ; X86-LABEL: test_mm512_maskz_shuffle_i32x4:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,7],zmm1[0,1,2,3,0,1,2,3]
 ; X86-NEXT:    retl
 ;
@@ -377,7 +381,8 @@
 define zeroext i16 @test_mm512_mask_testn_epi32_mask(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_mask_testn_epi32_mask:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vptestnmd %zmm0, %zmm1, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
 ; X86-NEXT:    movzwl %ax, %eax
@@ -448,7 +453,8 @@
 define zeroext i16 @test_mm512_mask_test_epi32_mask(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_mask_test_epi32_mask:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vptestmd %zmm0, %zmm1, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
 ; X86-NEXT:    movzwl %ax, %eax
@@ -505,7 +511,8 @@
 ; X86-LABEL: test_mm512_mask_set1_epi32:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    kmovw %ecx, %k1
 ; X86-NEXT:    vpbroadcastd %eax, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -528,7 +535,8 @@
 ; X86-LABEL: test_mm512_maskz_set1_epi32:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    kmovw %ecx, %k1
 ; X86-NEXT:    vpbroadcastd %eax, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -607,7 +615,8 @@
 define <8 x i64> @test_mm512_mask_broadcastd_epi32(<8 x i64> %a0, i16 %a1, <2 x i64> %a2) {
 ; X86-LABEL: test_mm512_mask_broadcastd_epi32:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpbroadcastd %xmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -628,7 +637,8 @@
 define <8 x i64> @test_mm512_maskz_broadcastd_epi32(i16 %a0, <2 x i64> %a1) {
 ; X86-LABEL: test_mm512_maskz_broadcastd_epi32:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpbroadcastd %xmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -751,7 +761,8 @@
 define <16 x float> @test_mm512_mask_broadcastss_ps(<16 x float> %a0, i16 %a1, <4 x float> %a2) {
 ; X86-LABEL: test_mm512_mask_broadcastss_ps:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vbroadcastss %xmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -769,7 +780,8 @@
 define <16 x float> @test_mm512_maskz_broadcastss_ps(i16 %a0, <4 x float> %a1) {
 ; X86-LABEL: test_mm512_maskz_broadcastss_ps:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vbroadcastss %xmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -843,7 +855,8 @@
 define <16 x float> @test_mm512_mask_movehdup_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) {
 ; X86-LABEL: test_mm512_mask_movehdup_ps:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vmovshdup {{.*#+}} zmm0 {%k1} = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
 ; X86-NEXT:    retl
 ;
@@ -861,7 +874,8 @@
 define <16 x float> @test_mm512_maskz_movehdup_ps(i16 %a0, <16 x float> %a1) {
 ; X86-LABEL: test_mm512_maskz_movehdup_ps:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
 ; X86-NEXT:    retl
 ;
@@ -888,7 +902,8 @@
 define <16 x float> @test_mm512_mask_moveldup_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) {
 ; X86-LABEL: test_mm512_mask_moveldup_ps:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} = zmm1[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
 ; X86-NEXT:    retl
 ;
@@ -906,7 +921,8 @@
 define <16 x float> @test_mm512_maskz_moveldup_ps(i16 %a0, <16 x float> %a1) {
 ; X86-LABEL: test_mm512_maskz_moveldup_ps:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
 ; X86-NEXT:    retl
 ;
@@ -980,7 +996,8 @@
 define <16 x float> @test_mm512_mask_permute_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) {
 ; X86-LABEL: test_mm512_mask_permute_ps:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} = zmm1[2,0,0,0,6,4,4,4,10,8,8,8,14,12,12,12]
 ; X86-NEXT:    retl
 ;
@@ -998,7 +1015,8 @@
 define <16 x float> @test_mm512_maskz_permute_ps(i16 %a0, <16 x float> %a1) {
 ; X86-LABEL: test_mm512_maskz_permute_ps:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,0,0,0,6,4,4,4,10,8,8,8,14,12,12,12]
 ; X86-NEXT:    retl
 ;
@@ -1121,7 +1139,8 @@
 define <8 x i64> @test_mm512_mask_shuffle_epi32(<8 x i64> %a0, i16 %a1, <8 x i64> %a2) {
 ; X86-LABEL: test_mm512_mask_shuffle_epi32:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpshufd {{.*#+}} zmm0 {%k1} = zmm1[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
 ; X86-NEXT:    retl
 ;
@@ -1142,7 +1161,8 @@
 define <8 x i64> @test_mm512_maskz_shuffle_epi32(i16 %a0, <8 x i64> %a1) {
 ; X86-LABEL: test_mm512_maskz_shuffle_epi32:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
 ; X86-NEXT:    retl
 ;
@@ -1221,7 +1241,8 @@
 define <8 x i64> @test_mm512_mask_unpackhi_epi32(<8 x i64> %a0, i16 %a1, <8 x i64> %a2, <8 x i64> %a3) {
 ; X86-LABEL: test_mm512_mask_unpackhi_epi32:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpunpckhdq {{.*#+}} zmm0 {%k1} = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15]
 ; X86-NEXT:    retl
 ;
@@ -1243,7 +1264,8 @@
 define <8 x i64> @test_mm512_maskz_unpackhi_epi32(i16 %a0, <8 x i64> %a1, <8 x i64> %a2) {
 ; X86-LABEL: test_mm512_maskz_unpackhi_epi32:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpunpckhdq {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; X86-NEXT:    retl
 ;
@@ -1367,7 +1389,8 @@
 define <16 x float> @test_mm512_mask_unpackhi_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2, <16 x float> %a3) {
 ; X86-LABEL: test_mm512_mask_unpackhi_ps:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vunpckhps {{.*#+}} zmm0 {%k1} = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15]
 ; X86-NEXT:    retl
 ;
@@ -1385,7 +1408,8 @@
 define <16 x float> @test_mm512_maskz_unpackhi_ps(i16 %a0, <16 x float> %a1, <16 x float> %a2) {
 ; X86-LABEL: test_mm512_maskz_unpackhi_ps:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; X86-NEXT:    retl
 ;
@@ -1415,7 +1439,8 @@
 define <8 x i64> @test_mm512_mask_unpacklo_epi32(<8 x i64> %a0, i16 %a1, <8 x i64> %a2, <8 x i64> %a3) {
 ; X86-LABEL: test_mm512_mask_unpacklo_epi32:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpunpckldq {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[12],zmm2[12],zmm1[13],zmm2[13]
 ; X86-NEXT:    retl
 ;
@@ -1437,7 +1462,8 @@
 define <8 x i64> @test_mm512_maskz_unpacklo_epi32(i16 %a0, <8 x i64> %a1, <8 x i64> %a2) {
 ; X86-LABEL: test_mm512_maskz_unpacklo_epi32:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpunpckldq {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
 ; X86-NEXT:    retl
 ;
@@ -1561,7 +1587,8 @@
 define <16 x float> @test_mm512_mask_unpacklo_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2, <16 x float> %a3) {
 ; X86-LABEL: test_mm512_mask_unpacklo_ps:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vunpcklps {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[12],zmm2[12],zmm1[13],zmm2[13]
 ; X86-NEXT:    retl
 ;
@@ -1579,7 +1606,8 @@
 define <16 x float> @test_mm512_maskz_unpacklo_ps(i16 %a0, <16 x float> %a1, <16 x float> %a2) {
 ; X86-LABEL: test_mm512_maskz_unpacklo_ps:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
 ; X86-NEXT:    retl
 ;
@@ -1651,10 +1679,6 @@
 define <8 x i64> @test_mm512_mul_epi32(<8 x i64> %__A, <8 x i64> %__B) nounwind {
 ; CHECK-LABEL: test_mm512_mul_epi32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpsllq $32, %zmm0, %zmm0
-; CHECK-NEXT:    vpsraq $32, %zmm0, %zmm0
-; CHECK-NEXT:    vpsllq $32, %zmm1, %zmm1
-; CHECK-NEXT:    vpsraq $32, %zmm1, %zmm1
 ; CHECK-NEXT:    vpmuldq %zmm0, %zmm1, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %tmp = shl <8 x i64> %__A, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
@@ -1718,11 +1742,6 @@
 define <8 x i64> @test_mm512_mul_epu32(<8 x i64> %__A, <8 x i64> %__B) nounwind {
 ; CHECK-LABEL: test_mm512_mul_epu32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    movw $-21846, %ax # imm = 0xAAAA
-; CHECK-NEXT:    kmovw %eax, %k0
-; CHECK-NEXT:    knotw %k0, %k1
-; CHECK-NEXT:    vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vmovdqa32 %zmm1, %zmm1 {%k1} {z}
 ; CHECK-NEXT:    vpmuludq %zmm0, %zmm1, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %tmp = and <8 x i64> %__A, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
@@ -1987,7 +2006,8 @@
 define <2 x i64> @test_mm512_mask_cvtepi32_epi8(<2 x i64> %__O, i16 zeroext %__M, <8 x i64> %__A) {
 ; X86-LABEL: test_mm512_mask_cvtepi32_epi8:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpmovdb %zmm1, %xmm0 {%k1}
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
@@ -2009,7 +2029,8 @@
 define <2 x i64> @test_mm512_maskz_cvtepi32_epi8(i16 zeroext %__M, <8 x i64> %__A) {
 ; X86-LABEL: test_mm512_maskz_cvtepi32_epi8:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpmovdb %zmm0, %xmm0 {%k1} {z}
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
@@ -2158,7 +2179,8 @@
 define <8 x i64> @test_mm512_mask_ternarylogic_epi32(<8 x i64> %__A, i16 zeroext %__U, <8 x i64> %__B, <8 x i64> %__C) {
 ; X86-LABEL: test_mm512_mask_ternarylogic_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpternlogd $4, %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -2181,7 +2203,8 @@
 define <8 x i64> @test_mm512_maskz_ternarylogic_epi32(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B, <8 x i64> %__C) {
 ; X86-LABEL: test_mm512_maskz_ternarylogic_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpternlogd $4, %zmm2, %zmm1, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -2258,7 +2281,8 @@
 define <8 x i64> @test_mm512_mask2_permutex2var_epi32(<8 x i64> %__A, <8 x i64> %__I, i16 zeroext %__U, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_mask2_permutex2var_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpermi2d %zmm2, %zmm0, %zmm1 {%k1}
 ; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
 ; X86-NEXT:    retl
@@ -2310,7 +2334,8 @@
 define <16 x float> @test_mm512_mask2_permutex2var_ps(<16 x float> %__A, <8 x i64> %__I, i16 zeroext %__U, <16 x float> %__B) {
 ; X86-LABEL: test_mm512_mask2_permutex2var_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpermi2ps %zmm2, %zmm0, %zmm1 {%k1}
 ; X86-NEXT:    vmovaps %zmm1, %zmm0
 ; X86-NEXT:    retl
@@ -2371,7 +2396,8 @@
 define <8 x i64> @test_mm512_maskz_permutex2var_epi32(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__I, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_maskz_permutex2var_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpermt2d %zmm2, %zmm1, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -2394,7 +2420,8 @@
 define <8 x i64> @test_mm512_mask_permutex2var_epi32(<8 x i64> %__A, i16 zeroext %__U, <8 x i64> %__I, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_mask_permutex2var_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpermt2d %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -2478,7 +2505,8 @@
 define <16 x float> @test_mm512_mask_permutex2var_ps(<16 x float> %__A, i16 zeroext %__U, <8 x i64> %__I, <16 x float> %__B) {
 ; X86-LABEL: test_mm512_mask_permutex2var_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpermt2ps %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -2498,7 +2526,8 @@
 define <16 x float> @test_mm512_maskz_permutex2var_ps(i16 zeroext %__U, <16 x float> %__A, <8 x i64> %__I, <16 x float> %__B) {
 ; X86-LABEL: test_mm512_maskz_permutex2var_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpermt2ps %zmm2, %zmm1, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -3436,7 +3465,8 @@
 define <16 x float> @test_mm512_mask_fmadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_mask_fmadd_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -3455,7 +3485,8 @@
 define <16 x float> @test_mm512_mask3_fmadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
 ; X86-LABEL: test_mm512_mask3_fmadd_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; X86-NEXT:    vmovaps %zmm2, %zmm0
 ; X86-NEXT:    retl
@@ -3476,7 +3507,8 @@
 define <16 x float> @test_mm512_maskz_fmadd_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_maskz_fmadd_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -3513,7 +3545,8 @@
 define <16 x float> @test_mm512_mask_fmsub_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_mask_fmsub_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmsub132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -3533,7 +3566,8 @@
 define <16 x float> @test_mm512_maskz_fmsub_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_maskz_fmsub_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -3571,7 +3605,8 @@
 define <16 x float> @test_mm512_mask3_fnmadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
 ; X86-LABEL: test_mm512_mask3_fnmadd_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfnmadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; X86-NEXT:    vmovaps %zmm2, %zmm0
 ; X86-NEXT:    retl
@@ -3593,7 +3628,8 @@
 define <16 x float> @test_mm512_maskz_fnmadd_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_maskz_fnmadd_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfnmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -3628,7 +3664,8 @@
 define <16 x float> @test_mm512_maskz_fnmsub_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_maskz_fnmsub_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfnmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -3659,7 +3696,8 @@
 define <16 x float> @test_mm512_mask_fmadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_mask_fmadd_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmadd132ps {{.*#+}} zmm0 = (zmm0 * zmm1) + zmm2
 ; X86-NEXT:    retl
 ;
@@ -3678,7 +3716,8 @@
 define <16 x float> @test_mm512_mask3_fmadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
 ; X86-LABEL: test_mm512_mask3_fmadd_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmadd231ps {{.*#+}} zmm2 = (zmm0 * zmm1) + zmm2
 ; X86-NEXT:    vmovaps %zmm2, %zmm0
 ; X86-NEXT:    retl
@@ -3699,7 +3738,8 @@
 define <16 x float> @test_mm512_maskz_fmadd_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_maskz_fmadd_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
 ; X86-NEXT:    retl
 ;
@@ -3736,7 +3776,8 @@
 define <16 x float> @test_mm512_mask_fmsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_mask_fmsub_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmsub132ps {{.*#+}} zmm0 = (zmm0 * zmm1) - zmm2
 ; X86-NEXT:    retl
 ;
@@ -3756,7 +3797,8 @@
 define <16 x float> @test_mm512_maskz_fmsub_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_maskz_fmsub_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmsub213ps {{.*#+}} zmm0 = (zmm1 * zmm0) - zmm2
 ; X86-NEXT:    retl
 ;
@@ -3794,7 +3836,8 @@
 define <16 x float> @test_mm512_mask3_fnmadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
 ; X86-LABEL: test_mm512_mask3_fnmadd_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfnmadd231ps {{.*#+}} zmm2 = -(zmm0 * zmm1) + zmm2
 ; X86-NEXT:    vmovaps %zmm2, %zmm0
 ; X86-NEXT:    retl
@@ -3816,7 +3859,8 @@
 define <16 x float> @test_mm512_maskz_fnmadd_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_maskz_fnmadd_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfnmadd213ps {{.*#+}} zmm0 = -(zmm1 * zmm0) + zmm2
 ; X86-NEXT:    retl
 ;
@@ -3851,7 +3895,8 @@
 define <16 x float> @test_mm512_maskz_fnmsub_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_maskz_fnmsub_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfnmsub213ps {{.*#+}} zmm0 = -(zmm1 * zmm0) - zmm2
 ; X86-NEXT:    retl
 ;
@@ -4161,7 +4206,8 @@
 define <16 x float> @test_mm512_mask_fmaddsub_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_mask_fmaddsub_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmaddsub132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -4180,7 +4226,8 @@
 define <16 x float> @test_mm512_mask3_fmaddsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
 ; X86-LABEL: test_mm512_mask3_fmaddsub_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmaddsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; X86-NEXT:    vmovaps %zmm2, %zmm0
 ; X86-NEXT:    retl
@@ -4201,7 +4248,8 @@
 define <16 x float> @test_mm512_maskz_fmaddsub_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_maskz_fmaddsub_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmaddsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -4238,7 +4286,8 @@
 define <16 x float> @test_mm512_mask_fmsubadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_mask_fmsubadd_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmsubadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -4258,7 +4307,8 @@
 define <16 x float> @test_mm512_maskz_fmsubadd_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_maskz_fmsubadd_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmsubadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -4291,7 +4341,8 @@
 define <16 x float> @test_mm512_mask_fmaddsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_mask_fmaddsub_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmaddsub132ps {{.*#+}} zmm0 = (zmm0 * zmm1) +/- zmm2
 ; X86-NEXT:    retl
 ;
@@ -4313,7 +4364,8 @@
 define <16 x float> @test_mm512_mask3_fmaddsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
 ; X86-LABEL: test_mm512_mask3_fmaddsub_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmaddsub231ps {{.*#+}} zmm2 = (zmm0 * zmm1) +/- zmm2
 ; X86-NEXT:    vmovaps %zmm2, %zmm0
 ; X86-NEXT:    retl
@@ -4337,7 +4389,8 @@
 define <16 x float> @test_mm512_maskz_fmaddsub_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_maskz_fmaddsub_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmaddsub213ps {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2
 ; X86-NEXT:    retl
 ;
@@ -4372,7 +4425,8 @@
 define <16 x float> @test_mm512_mask_fmsubadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_mask_fmsubadd_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmsubadd132ps {{.*#+}} zmm0 = (zmm0 * zmm1) -/+ zmm2
 ; X86-NEXT:    retl
 ;
@@ -4394,7 +4448,8 @@
 define <16 x float> @test_mm512_maskz_fmsubadd_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_maskz_fmsubadd_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmsubadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) -/+ zmm2
 ; X86-NEXT:    retl
 ;
@@ -4462,7 +4517,8 @@
 define <16 x float> @test_mm512_mask3_fmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
 ; X86-LABEL: test_mm512_mask3_fmsub_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; X86-NEXT:    vmovaps %zmm2, %zmm0
 ; X86-NEXT:    retl
@@ -4484,7 +4540,8 @@
 define <16 x float> @test_mm512_mask3_fmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
 ; X86-LABEL: test_mm512_mask3_fmsub_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmsub231ps {{.*#+}} zmm2 = (zmm0 * zmm1) - zmm2
 ; X86-NEXT:    vmovaps %zmm2, %zmm0
 ; X86-NEXT:    retl
@@ -4554,7 +4611,8 @@
 define <16 x float> @test_mm512_mask3_fmsubadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
 ; X86-LABEL: test_mm512_mask3_fmsubadd_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmsubadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; X86-NEXT:    vmovaps %zmm2, %zmm0
 ; X86-NEXT:    retl
@@ -4576,7 +4634,8 @@
 define <16 x float> @test_mm512_mask3_fmsubadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
 ; X86-LABEL: test_mm512_mask3_fmsubadd_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfmsubadd231ps {{.*#+}} zmm2 = (zmm0 * zmm1) -/+ zmm2
 ; X86-NEXT:    vmovaps %zmm2, %zmm0
 ; X86-NEXT:    retl
@@ -4642,7 +4701,8 @@
 define <16 x float> @test_mm512_mask_fnmadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_mask_fnmadd_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfnmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -4662,7 +4722,8 @@
 define <16 x float> @test_mm512_mask_fnmadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_mask_fnmadd_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfnmadd132ps {{.*#+}} zmm0 = -(zmm0 * zmm1) + zmm2
 ; X86-NEXT:    retl
 ;
@@ -4774,7 +4835,8 @@
 define <16 x float> @test_mm512_mask_fnmsub_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_mask_fnmsub_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfnmsub132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -4795,7 +4857,8 @@
 define <16 x float> @test_mm512_mask3_fnmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
 ; X86-LABEL: test_mm512_mask3_fnmsub_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfnmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; X86-NEXT:    vmovaps %zmm2, %zmm0
 ; X86-NEXT:    retl
@@ -4818,7 +4881,8 @@
 define <16 x float> @test_mm512_mask_fnmsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
 ; X86-LABEL: test_mm512_mask_fnmsub_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfnmsub132ps {{.*#+}} zmm0 = -(zmm0 * zmm1) - zmm2
 ; X86-NEXT:    retl
 ;
@@ -4839,7 +4903,8 @@
 define <16 x float> @test_mm512_mask3_fnmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
 ; X86-LABEL: test_mm512_mask3_fnmsub_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vfnmsub231ps {{.*#+}} zmm2 = -(zmm0 * zmm1) - zmm2
 ; X86-NEXT:    vmovaps %zmm2, %zmm0
 ; X86-NEXT:    retl
@@ -6261,7 +6326,8 @@
 ; X86-LABEL: test_mm512_mask_expandloadu_epi32:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    kmovw %ecx, %k1
 ; X86-NEXT:    vpexpandd (%eax), %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -6283,7 +6349,8 @@
 ; X86-LABEL: test_mm512_maskz_expandloadu_epi32:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    kmovw %ecx, %k1
 ; X86-NEXT:    vpexpandd (%eax), %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -6304,7 +6371,8 @@
 ; X86-LABEL: test_mm512_mask_expandloadu_ps:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    kmovw %ecx, %k1
 ; X86-NEXT:    vexpandps (%eax), %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -6324,7 +6392,8 @@
 ; X86-LABEL: test_mm512_maskz_expandloadu_ps:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    kmovw %ecx, %k1
 ; X86-NEXT:    vexpandps (%eax), %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -6389,9 +6458,10 @@
 define void @test_mm512_mask_compressstoreu_ps(i8* %__P, i16 zeroext %__U, <16 x float> %__A) {
 ; X86-LABEL: test_mm512_mask_compressstoreu_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vcompressps %zmm0, (%eax) {%k1}
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcompressps %zmm0, (%ecx) {%k1}
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
 ;
@@ -6411,9 +6481,10 @@
 define void @test_mm512_mask_compressstoreu_epi32(i8* %__P, i16 zeroext %__U, <8 x i64> %__A) {
 ; X86-LABEL: test_mm512_mask_compressstoreu_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vpcompressd %zmm0, (%eax) {%k1}
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpcompressd %zmm0, (%ecx) {%k1}
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
 ;
@@ -6982,7 +7053,8 @@
 define i32 @test_mm512_mask_reduce_add_epi32(i16 zeroext %__M, <8 x i64> %__W) {
 ; X86-LABEL: test_mm512_mask_reduce_add_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vmovdqa32 %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X86-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
@@ -7038,7 +7110,8 @@
 define i32 @test_mm512_mask_reduce_mul_epi32(i16 zeroext %__M, <8 x i64> %__W) {
 ; X86-LABEL: test_mm512_mask_reduce_mul_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpbroadcastd {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
 ; X86-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; X86-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
@@ -7096,7 +7169,8 @@
 define i32 @test_mm512_mask_reduce_and_epi32(i16 zeroext %__M, <8 x i64> %__W) {
 ; X86-LABEL: test_mm512_mask_reduce_and_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
 ; X86-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; X86-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
@@ -7150,7 +7224,8 @@
 define i32 @test_mm512_mask_reduce_or_epi32(i16 zeroext %__M, <8 x i64> %__W) {
 ; X86-LABEL: test_mm512_mask_reduce_or_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vmovdqa32 %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X86-NEXT:    vpor %ymm1, %ymm0, %ymm0
@@ -7508,7 +7583,8 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %eax
 ; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vmovaps %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    vextractf64x4 $1, %zmm0, %ymm1
 ; X86-NEXT:    vaddps %ymm1, %ymm0, %ymm0
@@ -7564,7 +7640,8 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %eax
 ; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vbroadcastss {{.*#+}} zmm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
 ; X86-NEXT:    vmovaps %zmm0, %zmm1 {%k1}
 ; X86-NEXT:    vextractf64x4 $1, %zmm1, %ymm0
@@ -8424,7 +8501,8 @@
 define i32 @test_mm512_mask_reduce_max_epi32(i16 zeroext %__M, <8 x i64> %__W) {
 ; X86-LABEL: test_mm512_mask_reduce_max_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpbroadcastd {{.*#+}} zmm1 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
 ; X86-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; X86-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
@@ -8486,7 +8564,8 @@
 define i32 @test_mm512_mask_reduce_max_epu32(i16 zeroext %__M, <8 x i64> %__W) {
 ; X86-LABEL: test_mm512_mask_reduce_max_epu32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vmovdqa32 %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X86-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
@@ -8548,7 +8627,8 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %eax
 ; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vbroadcastss {{.*#+}} zmm1 = [-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf]
 ; X86-NEXT:    vmovaps %zmm0, %zmm1 {%k1}
 ; X86-NEXT:    vextractf64x4 $1, %zmm1, %ymm0
@@ -8604,7 +8684,8 @@
 define i32 @test_mm512_mask_reduce_min_epi32(i16 zeroext %__M, <8 x i64> %__W) {
 ; X86-LABEL: test_mm512_mask_reduce_min_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpbroadcastd {{.*#+}} zmm1 = [2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647]
 ; X86-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; X86-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
@@ -8666,7 +8747,8 @@
 define i32 @test_mm512_mask_reduce_min_epu32(i16 zeroext %__M, <8 x i64> %__W) {
 ; X86-LABEL: test_mm512_mask_reduce_min_epu32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
 ; X86-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; X86-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
@@ -8730,7 +8812,8 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %eax
 ; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vbroadcastss {{.*#+}} zmm1 = [+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf]
 ; X86-NEXT:    vmovaps %zmm0, %zmm1 {%k1}
 ; X86-NEXT:    vextractf64x4 $1, %zmm1, %ymm0
@@ -8826,7 +8909,8 @@
 define <16 x float> @test_mm512_mask_max_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; X86-LABEL: test_mm512_mask_max_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vmaxps %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -8897,7 +8981,8 @@
 define <16 x float> @test_mm512_maskz_max_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; X86-LABEL: test_mm512_maskz_max_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vmaxps %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -8916,7 +9001,8 @@
 define <16 x float> @test_mm512_mask_max_round_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; X86-LABEL: test_mm512_mask_max_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vmaxps %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -8937,7 +9023,8 @@
 define <16 x float> @test_mm512_maskz_max_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; X86-LABEL: test_mm512_maskz_max_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vmaxps %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -9058,7 +9145,8 @@
 define <16 x float> @test_mm512_mask_min_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; X86-LABEL: test_mm512_mask_min_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vminps %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -9077,7 +9165,8 @@
 define <16 x float> @test_mm512_maskz_min_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; X86-LABEL: test_mm512_maskz_min_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vminps %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -9096,7 +9185,8 @@
 define <16 x float> @test_mm512_mask_min_round_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; X86-LABEL: test_mm512_mask_min_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vminps %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -9117,7 +9207,8 @@
 define <16 x float> @test_mm512_maskz_min_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; X86-LABEL: test_mm512_maskz_min_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vminps %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -9258,7 +9349,8 @@
 define <16 x float> @test_mm512_mask_sqrt_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A) {
 ; X86-LABEL: test_mm512_mask_sqrt_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vsqrtps %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -9277,7 +9369,8 @@
 define <16 x float> @test_mm512_maskz_sqrt_ps(i16 zeroext %__U, <16 x float> %__A) {
 ; X86-LABEL: test_mm512_maskz_sqrt_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vsqrtps %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -9296,7 +9389,8 @@
 define <16 x float> @test_mm512_mask_sqrt_round_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A) {
 ; X86-LABEL: test_mm512_mask_sqrt_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vsqrtps {rn-sae}, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -9317,7 +9411,8 @@
 define <16 x float> @test_mm512_maskz_sqrt_round_ps(i16 zeroext %__U, <16 x float> %__A) {
 ; X86-LABEL: test_mm512_maskz_sqrt_round_ps:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vsqrtps {rn-sae}, %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -9350,17 +9445,16 @@
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
-  %1 = tail call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %0, i32 5)
+  %1 = tail call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %0, <16 x i32> %0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast <16 x i32> %1 to <8 x i64>
   ret <8 x i64> %2
 }
 
-declare <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32>, i32) #1
-
 define <8 x i64> @test_mm512_mask_rol_epi32(<8 x i64> %__W, i16 zeroext %__U, <8 x i64> %__A) {
 ; X86-LABEL: test_mm512_mask_rol_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vprold $5, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -9371,7 +9465,7 @@
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
-  %1 = tail call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %0, i32 5)
+  %1 = tail call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %0, <16 x i32> %0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast <8 x i64> %__W to <16 x i32>
   %3 = bitcast i16 %__U to <16 x i1>
   %4 = select <16 x i1> %3, <16 x i32> %1, <16 x i32> %2
@@ -9382,7 +9476,8 @@
 define <8 x i64> @test_mm512_maskz_rol_epi32(i16 zeroext %__U, <8 x i64> %__A) {
 ; X86-LABEL: test_mm512_maskz_rol_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vprold $5, %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -9393,7 +9488,7 @@
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
-  %1 = tail call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %0, i32 5)
+  %1 = tail call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %0, <16 x i32> %0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast i16 %__U to <16 x i1>
   %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> zeroinitializer
   %4 = bitcast <16 x i32> %3 to <8 x i64>
@@ -9406,12 +9501,10 @@
 ; CHECK-NEXT:    vprolq $5, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %__A, i32 5)
+  %0 = tail call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %__A, <8 x i64> %__A, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>)
   ret <8 x i64> %0
 }
 
-declare <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64>, i32) #1
-
 define <8 x i64> @test_mm512_mask_rol_epi64(<8 x i64> %__W, i8 zeroext %__U, <8 x i64> %__A) {
 ; X86-LABEL: test_mm512_mask_rol_epi64:
 ; X86:       # %bb.0: # %entry
@@ -9426,7 +9519,7 @@
 ; X64-NEXT:    vprolq $5, %zmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %__A, i32 5)
+  %0 = tail call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %__A, <8 x i64> %__A, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>)
   %1 = bitcast i8 %__U to <8 x i1>
   %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> %__W
   ret <8 x i64> %2
@@ -9446,7 +9539,7 @@
 ; X64-NEXT:    vprolq $5, %zmm0, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %__A, i32 5)
+  %0 = tail call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %__A, <8 x i64> %__A, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>)
   %1 = bitcast i8 %__U to <8 x i1>
   %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> zeroinitializer
   ret <8 x i64> %2
@@ -9460,7 +9553,7 @@
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
   %1 = bitcast <8 x i64> %__B to <16 x i32>
-  %2 = tail call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %0, <16 x i32> %1)
+  %2 = tail call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %0, <16 x i32> %0, <16 x i32> %1)
   %3 = bitcast <16 x i32> %2 to <8 x i64>
   ret <8 x i64> %3
 }
@@ -9468,7 +9561,8 @@
 define <8 x i64> @test_mm512_mask_rolv_epi32(<8 x i64> %__W, i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_mask_rolv_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vprolvd %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -9480,7 +9574,7 @@
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
   %1 = bitcast <8 x i64> %__B to <16 x i32>
-  %2 = tail call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %0, <16 x i32> %1)
+  %2 = tail call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %0, <16 x i32> %0, <16 x i32> %1)
   %3 = bitcast <8 x i64> %__W to <16 x i32>
   %4 = bitcast i16 %__U to <16 x i1>
   %5 = select <16 x i1> %4, <16 x i32> %2, <16 x i32> %3
@@ -9491,7 +9585,8 @@
 define <8 x i64> @test_mm512_maskz_rolv_epi32(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_maskz_rolv_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vprolvd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -9503,7 +9598,7 @@
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
   %1 = bitcast <8 x i64> %__B to <16 x i32>
-  %2 = tail call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %0, <16 x i32> %1)
+  %2 = tail call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %0, <16 x i32> %0, <16 x i32> %1)
   %3 = bitcast i16 %__U to <16 x i1>
   %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> zeroinitializer
   %5 = bitcast <16 x i32> %4 to <8 x i64>
@@ -9516,7 +9611,7 @@
 ; CHECK-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %__A, <8 x i64> %__B)
+  %0 = tail call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %__A, <8 x i64> %__A, <8 x i64> %__B)
   ret <8 x i64> %0
 }
 
@@ -9534,7 +9629,7 @@
 ; X64-NEXT:    vprolvq %zmm2, %zmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %__A, <8 x i64> %__B)
+  %0 = tail call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %__A, <8 x i64> %__A, <8 x i64> %__B)
   %1 = bitcast i8 %__U to <8 x i1>
   %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> %__W
   ret <8 x i64> %2
@@ -9554,7 +9649,7 @@
 ; X64-NEXT:    vprolvq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %__A, <8 x i64> %__B)
+  %0 = tail call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %__A, <8 x i64> %__A, <8 x i64> %__B)
   %1 = bitcast i8 %__U to <8 x i1>
   %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> zeroinitializer
   ret <8 x i64> %2
@@ -9567,17 +9662,17 @@
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
-  %1 = tail call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %0, i32 5)
+  %1 = tail call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %0, <16 x i32> %0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast <16 x i32> %1 to <8 x i64>
   ret <8 x i64> %2
 }
 
-declare <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32>, i32) #1
 
 define <8 x i64> @test_mm512_mask_ror_epi32(<8 x i64> %__W, i16 zeroext %__U, <8 x i64> %__A) {
 ; X86-LABEL: test_mm512_mask_ror_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vprord $5, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -9588,7 +9683,7 @@
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
-  %1 = tail call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %0, i32 5)
+  %1 = tail call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %0, <16 x i32> %0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast <8 x i64> %__W to <16 x i32>
   %3 = bitcast i16 %__U to <16 x i1>
   %4 = select <16 x i1> %3, <16 x i32> %1, <16 x i32> %2
@@ -9599,7 +9694,8 @@
 define <8 x i64> @test_mm512_maskz_ror_epi32(i16 zeroext %__U, <8 x i64> %__A) {
 ; X86-LABEL: test_mm512_maskz_ror_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vprord $5, %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -9610,7 +9706,7 @@
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
-  %1 = tail call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %0, i32 5)
+  %1 = tail call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %0, <16 x i32> %0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast i16 %__U to <16 x i1>
   %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> zeroinitializer
   %4 = bitcast <16 x i32> %3 to <8 x i64>
@@ -9623,12 +9719,10 @@
 ; CHECK-NEXT:    vprorq $5, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %__A, i32 5)
+  %0 = tail call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %__A, <8 x i64> %__A, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>)
   ret <8 x i64> %0
 }
 
-declare <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64>, i32) #1
-
 define <8 x i64> @test_mm512_mask_ror_epi64(<8 x i64> %__W, i8 zeroext %__U, <8 x i64> %__A) {
 ; X86-LABEL: test_mm512_mask_ror_epi64:
 ; X86:       # %bb.0: # %entry
@@ -9643,7 +9737,7 @@
 ; X64-NEXT:    vprorq $5, %zmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %__A, i32 5)
+  %0 = tail call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %__A, <8 x i64> %__A, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>)
   %1 = bitcast i8 %__U to <8 x i1>
   %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> %__W
   ret <8 x i64> %2
@@ -9663,7 +9757,7 @@
 ; X64-NEXT:    vprorq $5, %zmm0, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %__A, i32 5)
+  %0 = tail call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %__A, <8 x i64> %__A, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>)
   %1 = bitcast i8 %__U to <8 x i1>
   %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> zeroinitializer
   ret <8 x i64> %2
@@ -9677,7 +9771,7 @@
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
   %1 = bitcast <8 x i64> %__B to <16 x i32>
-  %2 = tail call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %0, <16 x i32> %1)
+  %2 = tail call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %0, <16 x i32> %0, <16 x i32> %1)
   %3 = bitcast <16 x i32> %2 to <8 x i64>
   ret <8 x i64> %3
 }
@@ -9685,7 +9779,8 @@
 define <8 x i64> @test_mm512_mask_rorv_epi32(<8 x i64> %__W, i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_mask_rorv_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vprorvd %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
@@ -9697,7 +9792,7 @@
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
   %1 = bitcast <8 x i64> %__B to <16 x i32>
-  %2 = tail call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %0, <16 x i32> %1)
+  %2 = tail call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %0, <16 x i32> %0, <16 x i32> %1)
   %3 = bitcast <8 x i64> %__W to <16 x i32>
   %4 = bitcast i16 %__U to <16 x i1>
   %5 = select <16 x i1> %4, <16 x i32> %2, <16 x i32> %3
@@ -9708,7 +9803,8 @@
 define <8 x i64> @test_mm512_maskz_rorv_epi32(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_maskz_rorv_epi32:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vprorvd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
@@ -9720,7 +9816,7 @@
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
   %1 = bitcast <8 x i64> %__B to <16 x i32>
-  %2 = tail call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %0, <16 x i32> %1)
+  %2 = tail call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %0, <16 x i32> %0, <16 x i32> %1)
   %3 = bitcast i16 %__U to <16 x i1>
   %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> zeroinitializer
   %5 = bitcast <16 x i32> %4 to <8 x i64>
@@ -9733,7 +9829,7 @@
 ; CHECK-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %__A, <8 x i64> %__B)
+  %0 = tail call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %__A, <8 x i64> %__A, <8 x i64> %__B)
   ret <8 x i64> %0
 }
 
@@ -9751,7 +9847,7 @@
 ; X64-NEXT:    vprorvq %zmm2, %zmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %__A, <8 x i64> %__B)
+  %0 = tail call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %__A, <8 x i64> %__A, <8 x i64> %__B)
   %1 = bitcast i8 %__U to <8 x i1>
   %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> %__W
   ret <8 x i64> %2
@@ -9771,7 +9867,7 @@
 ; X64-NEXT:    vprorvq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %__A, <8 x i64> %__B)
+  %0 = tail call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %__A, <8 x i64> %__A, <8 x i64> %__B)
   %1 = bitcast i8 %__U to <8 x i1>
   %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> zeroinitializer
   ret <8 x i64> %2
@@ -9799,10 +9895,11 @@
 declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>)
 declare <8 x double> @llvm.sqrt.v8f64(<8 x double>)
 declare <16 x float> @llvm.sqrt.v16f32(<16 x float>)
-declare <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32>, <16 x i32>)
-declare <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64>, <8 x i64>)
-declare <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32>, <16 x i32>)
-declare <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64>, <8 x i64>)
+
+declare <16 x i32> @llvm.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>)
+declare <8 x i64> @llvm.fshl.v8i64(<8 x i64>, <8 x i64>, <8 x i64>)
+declare <16 x i32> @llvm.fshr.v16i32(<16 x i32>, <16 x i32>, <16 x i32>)
+declare <8 x i64> @llvm.fshr.v8i64(<8 x i64>, <8 x i64>, <8 x i64>)
 
 !0 = !{i32 1}
 
diff --git a/test/CodeGen/X86/avx512-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
index 26e8636..bfa7a58 100644
--- a/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
@@ -7,10 +7,8 @@
 define i16 @unpckbw_test(i16 %a0, i16 %a1) {
 ; X86-LABEL: unpckbw_test:
 ; X86:       ## %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT:    kmovw %eax, %k0 ## encoding: [0xc5,0xf8,0x92,0xc0]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k0 ## encoding: [0xc5,0xf8,0x90,0x44,0x24,0x04]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
 ; X86-NEXT:    kunpckbw %k1, %k0, %k0 ## encoding: [0xc5,0xfd,0x4b,0xc1]
 ; X86-NEXT:    kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
 ; X86-NEXT:    ## kill: def $ax killed $ax killed $eax
@@ -3457,6 +3455,282 @@
   ret <8 x i64> %res4
 }
 
+declare <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32>, <16 x i32>)
+
+define <16 x i32>@test_int_x86_avx512_prolv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_prolv_d_512:
+; X86:       ## %bb.0:
+; X86-NEXT:    vprolvd %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0x7d,0x48,0x15,0xd9]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vprolvd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x15,0xd1]
+; X86-NEXT:    vprolvd %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x15,0xc1]
+; X86-NEXT:    vpaddd %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc3]
+; X86-NEXT:    vpaddd %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x6d,0x48,0xfe,0xc0]
+; X86-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prolv_d_512:
+; X64:       ## %bb.0:
+; X64-NEXT:    vprolvd %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0x7d,0x48,0x15,0xd9]
+; X64-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT:    vprolvd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x15,0xd1]
+; X64-NEXT:    vprolvd %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x15,0xc1]
+; X64-NEXT:    vpaddd %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc3]
+; X64-NEXT:    vpaddd %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x6d,0x48,0xfe,0xc0]
+; X64-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %x0, <16 x i32> %x1)
+  %2 = bitcast i16 %x3 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x2
+  %4 = call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %x0, <16 x i32> %x1)
+  %5 = bitcast i16 %x3 to <16 x i1>
+  %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> zeroinitializer
+  %7 = call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %x0, <16 x i32> %x1)
+  %res3 = add <16 x i32> %3, %6
+  %res4 = add <16 x i32> %res3, %7
+  ret <16 x i32> %res4
+}
+
+declare <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64>, <8 x i64>)
+
+define <8 x i64>@test_int_x86_avx512_prolv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prolv_q_512:
+; X86:       ## %bb.0:
+; X86-NEXT:    vprolvq %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0xfd,0x48,0x15,0xd9]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprolvq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x15,0xd1]
+; X86-NEXT:    vprolvq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x15,0xc1]
+; X86-NEXT:    vpaddq %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc3]
+; X86-NEXT:    vpaddq %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xed,0x48,0xd4,0xc0]
+; X86-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prolv_q_512:
+; X64:       ## %bb.0:
+; X64-NEXT:    vprolvq %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0xfd,0x48,0x15,0xd9]
+; X64-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT:    vprolvq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x15,0xd1]
+; X64-NEXT:    vprolvq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x15,0xc1]
+; X64-NEXT:    vpaddq %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc3]
+; X64-NEXT:    vpaddq %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xed,0x48,0xd4,0xc0]
+; X64-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %x0, <8 x i64> %x1)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x2
+  %4 = call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %x0, <8 x i64> %x1)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %6 = select <8 x i1> %5, <8 x i64> %4, <8 x i64> zeroinitializer
+  %7 = call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %x0, <8 x i64> %x1)
+  %res3 = add <8 x i64> %3, %6
+  %res4 = add <8 x i64> %res3, %7
+  ret <8 x i64> %res4
+}
+
+declare <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32>, <16 x i32>)
+
+define <16 x i32>@test_int_x86_avx512_prorv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_prorv_d_512:
+; X86:       ## %bb.0:
+; X86-NEXT:    vprorvd %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0x7d,0x48,0x14,0xd9]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vprorvd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x14,0xd1]
+; X86-NEXT:    vprorvd %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x14,0xc1]
+; X86-NEXT:    vpaddd %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc3]
+; X86-NEXT:    vpaddd %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x6d,0x48,0xfe,0xc0]
+; X86-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prorv_d_512:
+; X64:       ## %bb.0:
+; X64-NEXT:    vprorvd %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0x7d,0x48,0x14,0xd9]
+; X64-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT:    vprorvd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x14,0xd1]
+; X64-NEXT:    vprorvd %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x14,0xc1]
+; X64-NEXT:    vpaddd %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc3]
+; X64-NEXT:    vpaddd %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x6d,0x48,0xfe,0xc0]
+; X64-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1)
+  %2 = bitcast i16 %x3 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x2
+  %4 = call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1)
+  %5 = bitcast i16 %x3 to <16 x i1>
+  %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> zeroinitializer
+  %7 = call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1)
+  %res3 = add <16 x i32> %3, %6
+  %res4 = add <16 x i32> %res3, %7
+  ret <16 x i32> %res4
+}
+
+declare <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64>, <8 x i64>)
+
+define <8 x i64>@test_int_x86_avx512_prorv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prorv_q_512:
+; X86:       ## %bb.0:
+; X86-NEXT:    vprorvq %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0xfd,0x48,0x14,0xd9]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprorvq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x14,0xd1]
+; X86-NEXT:    vprorvq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x14,0xc1]
+; X86-NEXT:    vpaddq %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc3]
+; X86-NEXT:    vpaddq %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xed,0x48,0xd4,0xc0]
+; X86-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prorv_q_512:
+; X64:       ## %bb.0:
+; X64-NEXT:    vprorvq %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0xfd,0x48,0x14,0xd9]
+; X64-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT:    vprorvq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x14,0xd1]
+; X64-NEXT:    vprorvq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x14,0xc1]
+; X64-NEXT:    vpaddq %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc3]
+; X64-NEXT:    vpaddq %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xed,0x48,0xd4,0xc0]
+; X64-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x2
+  %4 = call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %6 = select <8 x i1> %5, <8 x i64> %4, <8 x i64> zeroinitializer
+  %7 = call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1)
+  %res3 = add <8 x i64> %3, %6
+  %res4 = add <8 x i64> %res3, %7
+  ret <8 x i64> %res4
+}
+
+declare <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32>, i32)
+
+define <16 x i32>@test_int_x86_avx512_prol_d_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_prol_d_512:
+; X86:       ## %bb.0:
+; X86-NEXT:    vprold $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0x72,0xc8,0x03]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vprold $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x49,0x72,0xc8,0x03]
+; X86-NEXT:    vprold $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0x72,0xc8,0x03]
+; X86-NEXT:    vpaddd %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc2]
+; X86-NEXT:    vpaddd %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x75,0x48,0xfe,0xc0]
+; X86-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prol_d_512:
+; X64:       ## %bb.0:
+; X64-NEXT:    vprold $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0x72,0xc8,0x03]
+; X64-NEXT:    kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vprold $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x49,0x72,0xc8,0x03]
+; X64-NEXT:    vprold $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0x72,0xc8,0x03]
+; X64-NEXT:    vpaddd %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc2]
+; X64-NEXT:    vpaddd %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x75,0x48,0xfe,0xc0]
+; X64-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %x0, i32 3)
+  %2 = bitcast i16 %x3 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x2
+  %4 = call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %x0, i32 3)
+  %5 = bitcast i16 %x3 to <16 x i1>
+  %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> zeroinitializer
+  %7 = call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %x0, i32 3)
+  %res3 = add <16 x i32> %3, %6
+  %res4 = add <16 x i32> %res3, %7
+  ret <16 x i32> %res4
+}
+
+declare <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64>, i32)
+
+define <8 x i64>@test_int_x86_avx512_prol_q_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prol_q_512:
+; X86:       ## %bb.0:
+; X86-NEXT:    vprolq $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0xed,0x48,0x72,0xc8,0x03]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprolq $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x49,0x72,0xc8,0x03]
+; X86-NEXT:    vprolq $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x72,0xc8,0x03]
+; X86-NEXT:    vpaddq %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc2]
+; X86-NEXT:    vpaddq %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xf5,0x48,0xd4,0xc0]
+; X86-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prol_q_512:
+; X64:       ## %bb.0:
+; X64-NEXT:    vprolq $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0xed,0x48,0x72,0xc8,0x03]
+; X64-NEXT:    kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vprolq $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x49,0x72,0xc8,0x03]
+; X64-NEXT:    vprolq $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x72,0xc8,0x03]
+; X64-NEXT:    vpaddq %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc2]
+; X64-NEXT:    vpaddq %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xf5,0x48,0xd4,0xc0]
+; X64-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %x0, i32 3)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x2
+  %4 = call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %x0, i32 3)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %6 = select <8 x i1> %5, <8 x i64> %4, <8 x i64> zeroinitializer
+  %7 = call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %x0, i32 3)
+  %res3 = add <8 x i64> %3, %6
+  %res4 = add <8 x i64> %res3, %7
+  ret <8 x i64> %res4
+}
+
+declare <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32>, i32)
+
+define <16 x i32>@test_int_x86_avx512_pror_d_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_pror_d_512:
+; X86:       ## %bb.0:
+; X86-NEXT:    vprord $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0x72,0xc0,0x03]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vprord $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x49,0x72,0xc0,0x03]
+; X86-NEXT:    vprord $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0x72,0xc0,0x03]
+; X86-NEXT:    vpaddd %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc2]
+; X86-NEXT:    vpaddd %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x75,0x48,0xfe,0xc0]
+; X86-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_pror_d_512:
+; X64:       ## %bb.0:
+; X64-NEXT:    vprord $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0x72,0xc0,0x03]
+; X64-NEXT:    kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vprord $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x49,0x72,0xc0,0x03]
+; X64-NEXT:    vprord $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0x72,0xc0,0x03]
+; X64-NEXT:    vpaddd %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc2]
+; X64-NEXT:    vpaddd %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x75,0x48,0xfe,0xc0]
+; X64-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %x0, i32 3)
+  %2 = bitcast i16 %x3 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x2
+  %4 = call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %x0, i32 3)
+  %5 = bitcast i16 %x3 to <16 x i1>
+  %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> zeroinitializer
+  %7 = call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %x0, i32 3)
+  %res3 = add <16 x i32> %3, %6
+  %res4 = add <16 x i32> %res3, %7
+  ret <16 x i32> %res4
+}
+
+declare <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64>, i32)
+
+define <8 x i64>@test_int_x86_avx512_pror_q_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_pror_q_512:
+; X86:       ## %bb.0:
+; X86-NEXT:    vprorq $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0xed,0x48,0x72,0xc0,0x03]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprorq $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x49,0x72,0xc0,0x03]
+; X86-NEXT:    vprorq $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x72,0xc0,0x03]
+; X86-NEXT:    vpaddq %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc2]
+; X86-NEXT:    vpaddq %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xf5,0x48,0xd4,0xc0]
+; X86-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_pror_q_512:
+; X64:       ## %bb.0:
+; X64-NEXT:    vprorq $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0xed,0x48,0x72,0xc0,0x03]
+; X64-NEXT:    kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vprorq $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x49,0x72,0xc0,0x03]
+; X64-NEXT:    vprorq $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x72,0xc0,0x03]
+; X64-NEXT:    vpaddq %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc2]
+; X64-NEXT:    vpaddq %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xf5,0x48,0xd4,0xc0]
+; X64-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %x0, i32 3)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x2
+  %4 = call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %x0, i32 3)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %6 = select <8 x i1> %5, <8 x i64> %4, <8 x i64> zeroinitializer
+  %7 = call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %x0, i32 3)
+  %res3 = add <8 x i64> %3, %6
+  %res4 = add <8 x i64> %res3, %7
+  ret <8 x i64> %res4
+}
+
 declare <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64>, i32, <8 x i64>, i8)
 
 define <8 x i64>@test_int_x86_avx512_mask_psrl_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
@@ -4380,14 +4654,14 @@
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
 ; X86-NEXT:    vpermilps {{.*#+}} zmm2 {%k1} = zmm0[2,3,0,1,7,6,5,4,9,8,11,10,12,13,14,15]
 ; X86-NEXT:    ## encoding: [0x62,0xf2,0x7d,0x49,0x0c,0x15,A,A,A,A]
-; X86-NEXT:    ## fixup A - offset: 6, value: LCPI203_0, kind: FK_Data_4
+; X86-NEXT:    ## fixup A - offset: 6, value: LCPI211_0, kind: FK_Data_4
 ; X86-NEXT:    vpermilps {{.*#+}} zmm1 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
 ; X86-NEXT:    ## encoding: [0x62,0xf2,0x7d,0xc9,0x0c,0x0d,A,A,A,A]
-; X86-NEXT:    ## fixup A - offset: 6, value: LCPI203_1, kind: FK_Data_4
+; X86-NEXT:    ## fixup A - offset: 6, value: LCPI211_1, kind: FK_Data_4
 ; X86-NEXT:    vaddps %zmm1, %zmm2, %zmm1 ## encoding: [0x62,0xf1,0x6c,0x48,0x58,0xc9]
 ; X86-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
 ; X86-NEXT:    ## encoding: [0x62,0xf2,0x7d,0x48,0x0c,0x05,A,A,A,A]
-; X86-NEXT:    ## fixup A - offset: 6, value: LCPI203_2, kind: FK_Data_4
+; X86-NEXT:    ## fixup A - offset: 6, value: LCPI211_2, kind: FK_Data_4
 ; X86-NEXT:    vaddps %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x58,0xc1]
 ; X86-NEXT:    retl ## encoding: [0xc3]
 ;
@@ -4396,14 +4670,14 @@
 ; X64-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; X64-NEXT:    vpermilps {{.*#+}} zmm2 {%k1} = zmm0[2,3,0,1,7,6,5,4,9,8,11,10,12,13,14,15]
 ; X64-NEXT:    ## encoding: [0x62,0xf2,0x7d,0x49,0x0c,0x15,A,A,A,A]
-; X64-NEXT:    ## fixup A - offset: 6, value: LCPI203_0-4, kind: reloc_riprel_4byte
+; X64-NEXT:    ## fixup A - offset: 6, value: LCPI211_0-4, kind: reloc_riprel_4byte
 ; X64-NEXT:    vpermilps {{.*#+}} zmm1 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
 ; X64-NEXT:    ## encoding: [0x62,0xf2,0x7d,0xc9,0x0c,0x0d,A,A,A,A]
-; X64-NEXT:    ## fixup A - offset: 6, value: LCPI203_1-4, kind: reloc_riprel_4byte
+; X64-NEXT:    ## fixup A - offset: 6, value: LCPI211_1-4, kind: reloc_riprel_4byte
 ; X64-NEXT:    vaddps %zmm1, %zmm2, %zmm1 ## encoding: [0x62,0xf1,0x6c,0x48,0x58,0xc9]
 ; X64-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
 ; X64-NEXT:    ## encoding: [0x62,0xf2,0x7d,0x48,0x0c,0x05,A,A,A,A]
-; X64-NEXT:    ## fixup A - offset: 6, value: LCPI203_2-4, kind: reloc_riprel_4byte
+; X64-NEXT:    ## fixup A - offset: 6, value: LCPI211_2-4, kind: reloc_riprel_4byte
 ; X64-NEXT:    vaddps %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x58,0xc1]
 ; X64-NEXT:    retq ## encoding: [0xc3]
   %res = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 3, i32 2, i32 1, i32 0, i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3>, <16 x float> %x2, i16 %x3)
diff --git a/test/CodeGen/X86/avx512-intrinsics.ll b/test/CodeGen/X86/avx512-intrinsics.ll
index 53a0760..a0e8393 100644
--- a/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/test/CodeGen/X86/avx512-intrinsics.ll
@@ -990,8 +990,8 @@
 ; CHECK-LABEL: test_x86_vcvtps2ph_256:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtps2ph $2, %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vcvtps2ph $2, %zmm0, %ymm2 {%k1} {z}
+; CHECK-NEXT:    vcvtps2ph $2, %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vpaddw %ymm1, %ymm2, %ymm1
 ; CHECK-NEXT:    vcvtps2ph $2, %zmm0, (%rsi)
 ; CHECK-NEXT:    vmovdqa %ymm1, %ymm0
@@ -2603,11 +2603,11 @@
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qb_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovqb %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vpmovqb %zmm0, %xmm2
 ; CHECK-NEXT:    vpmovqb %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpmovqb %zmm0, %xmm0
-; CHECK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpmovqb %zmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 -1)
@@ -2639,11 +2639,11 @@
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qb_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovsqb %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vpmovsqb %zmm0, %xmm2
 ; CHECK-NEXT:    vpmovsqb %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpmovsqb %zmm0, %xmm0
-; CHECK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpmovsqb %zmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 -1)
@@ -2675,11 +2675,11 @@
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qb_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovusqb %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vpmovusqb %zmm0, %xmm2
 ; CHECK-NEXT:    vpmovusqb %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpmovusqb %zmm0, %xmm0
-; CHECK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpmovusqb %zmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 -1)
@@ -2711,11 +2711,11 @@
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qw_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovqw %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vpmovqw %zmm0, %xmm2
 ; CHECK-NEXT:    vpmovqw %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpaddw %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpmovqw %zmm0, %xmm0
-; CHECK-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpmovqw %zmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vpaddw %xmm0, %xmm2, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 -1)
@@ -2747,11 +2747,11 @@
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qw_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovsqw %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vpmovsqw %zmm0, %xmm2
 ; CHECK-NEXT:    vpmovsqw %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpaddw %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpmovsqw %zmm0, %xmm0
-; CHECK-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpmovsqw %zmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vpaddw %xmm0, %xmm2, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 -1)
@@ -2783,11 +2783,11 @@
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qw_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovusqw %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vpmovusqw %zmm0, %xmm2
 ; CHECK-NEXT:    vpmovusqw %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpaddw %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpmovusqw %zmm0, %xmm0
-; CHECK-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpmovusqw %zmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vpaddw %xmm0, %xmm2, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 -1)
@@ -2924,11 +2924,11 @@
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmov_db_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovdb %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vpmovdb %zmm0, %xmm2
 ; CHECK-NEXT:    vpmovdb %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpmovdb %zmm0, %xmm0
-; CHECK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpmovdb %zmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 -1)
@@ -2960,11 +2960,11 @@
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_db_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovsdb %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vpmovsdb %zmm0, %xmm2
 ; CHECK-NEXT:    vpmovsdb %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpmovsdb %zmm0, %xmm0
-; CHECK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpmovsdb %zmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 -1)
@@ -2996,11 +2996,11 @@
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_db_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovusdb %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vpmovusdb %zmm0, %xmm2
 ; CHECK-NEXT:    vpmovusdb %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpmovusdb %zmm0, %xmm0
-; CHECK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpmovusdb %zmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 -1)
@@ -3032,11 +3032,11 @@
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmov_dw_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovdw %zmm0, %ymm2 {%k1} {z}
+; CHECK-NEXT:    vpmovdw %zmm0, %ymm2
 ; CHECK-NEXT:    vpmovdw %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vpaddw %ymm2, %ymm1, %ymm1
-; CHECK-NEXT:    vpmovdw %zmm0, %ymm0
-; CHECK-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vpmovdw %zmm0, %ymm0 {%k1} {z}
+; CHECK-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
 ; CHECK-NEXT:    retq
     %res0 = call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 -1)
     %res1 = call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2)
@@ -3067,11 +3067,11 @@
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_dw_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovsdw %zmm0, %ymm2 {%k1} {z}
+; CHECK-NEXT:    vpmovsdw %zmm0, %ymm2
 ; CHECK-NEXT:    vpmovsdw %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vpaddw %ymm2, %ymm1, %ymm1
-; CHECK-NEXT:    vpmovsdw %zmm0, %ymm0
-; CHECK-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vpmovsdw %zmm0, %ymm0 {%k1} {z}
+; CHECK-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
 ; CHECK-NEXT:    retq
     %res0 = call <16 x i16> @llvm.x86.avx512.mask.pmovs.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 -1)
     %res1 = call <16 x i16> @llvm.x86.avx512.mask.pmovs.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2)
@@ -3102,11 +3102,11 @@
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_dw_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovusdw %zmm0, %ymm2 {%k1} {z}
+; CHECK-NEXT:    vpmovusdw %zmm0, %ymm2
 ; CHECK-NEXT:    vpmovusdw %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vpaddw %ymm2, %ymm1, %ymm1
-; CHECK-NEXT:    vpmovusdw %zmm0, %ymm0
-; CHECK-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vpmovusdw %zmm0, %ymm0 {%k1} {z}
+; CHECK-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
 ; CHECK-NEXT:    retq
     %res0 = call <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 -1)
     %res1 = call <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2)
@@ -3849,198 +3849,6 @@
 
 declare i32 @llvm.x86.avx512.vcomi.ss(<4 x float>, <4 x float>, i32, i32)
 
-declare <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32>, <16 x i32>)
-
-define <16 x i32>@test_int_x86_avx512_mask_prolv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_prolv_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vprolvd %zmm1, %zmm0, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vprolvd %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vprolvd %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddd %zmm3, %zmm0, %zmm0
-; CHECK-NEXT:    vpaddd %zmm0, %zmm2, %zmm0
-; CHECK-NEXT:    retq
-  %1 = call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %x0, <16 x i32> %x1)
-  %2 = bitcast i16 %x3 to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x2
-  %4 = call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %x0, <16 x i32> %x1)
-  %5 = bitcast i16 %x3 to <16 x i1>
-  %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> zeroinitializer
-  %7 = call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %x0, <16 x i32> %x1)
-  %res3 = add <16 x i32> %3, %6
-  %res4 = add <16 x i32> %res3, %7
-  ret <16 x i32> %res4
-}
-
-declare <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64>, <8 x i64>)
-
-define <8 x i64>@test_int_x86_avx512_mask_prolv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_prolv_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vprolvq %zmm1, %zmm0, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vprolvq %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vprolvq %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddq %zmm3, %zmm0, %zmm0
-; CHECK-NEXT:    vpaddq %zmm0, %zmm2, %zmm0
-; CHECK-NEXT:    retq
-  %1 = call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %x0, <8 x i64> %x1)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x2
-  %4 = call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %x0, <8 x i64> %x1)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %6 = select <8 x i1> %5, <8 x i64> %4, <8 x i64> zeroinitializer
-  %7 = call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %x0, <8 x i64> %x1)
-  %res3 = add <8 x i64> %3, %6
-  %res4 = add <8 x i64> %res3, %7
-  ret <8 x i64> %res4
-}
-
-declare <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32>, <16 x i32>)
-
-define <16 x i32>@test_int_x86_avx512_mask_prorv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_prorv_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vprorvd %zmm1, %zmm0, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vprorvd %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vprorvd %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddd %zmm3, %zmm0, %zmm0
-; CHECK-NEXT:    vpaddd %zmm0, %zmm2, %zmm0
-; CHECK-NEXT:    retq
-  %1 = call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1)
-  %2 = bitcast i16 %x3 to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x2
-  %4 = call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1)
-  %5 = bitcast i16 %x3 to <16 x i1>
-  %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> zeroinitializer
-  %7 = call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1)
-  %res3 = add <16 x i32> %3, %6
-  %res4 = add <16 x i32> %res3, %7
-  ret <16 x i32> %res4
-}
-
-declare <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64>, <8 x i64>)
-
-define <8 x i64>@test_int_x86_avx512_mask_prorv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_prorv_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vprorvq %zmm1, %zmm0, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vprorvq %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vprorvq %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddq %zmm3, %zmm0, %zmm0
-; CHECK-NEXT:    vpaddq %zmm0, %zmm2, %zmm0
-; CHECK-NEXT:    retq
-  %1 = call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x2
-  %4 = call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %6 = select <8 x i1> %5, <8 x i64> %4, <8 x i64> zeroinitializer
-  %7 = call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1)
-  %res3 = add <8 x i64> %3, %6
-  %res4 = add <8 x i64> %res3, %7
-  ret <8 x i64> %res4
-}
-
-declare <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32>, i32)
-
-define <16 x i32>@test_int_x86_avx512_mask_prol_d_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_prol_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vprold $3, %zmm0, %zmm2
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vprold $3, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vprold $3, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddd %zmm2, %zmm0, %zmm0
-; CHECK-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
-; CHECK-NEXT:    retq
-  %1 = call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %x0, i32 3)
-  %2 = bitcast i16 %x3 to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x2
-  %4 = call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %x0, i32 3)
-  %5 = bitcast i16 %x3 to <16 x i1>
-  %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> zeroinitializer
-  %7 = call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %x0, i32 3)
-  %res3 = add <16 x i32> %3, %6
-  %res4 = add <16 x i32> %res3, %7
-  ret <16 x i32> %res4
-}
-
-declare <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64>, i32)
-
-define <8 x i64>@test_int_x86_avx512_mask_prol_q_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_prol_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vprolq $3, %zmm0, %zmm2
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vprolq $3, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vprolq $3, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddq %zmm2, %zmm0, %zmm0
-; CHECK-NEXT:    vpaddq %zmm0, %zmm1, %zmm0
-; CHECK-NEXT:    retq
-  %1 = call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %x0, i32 3)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x2
-  %4 = call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %x0, i32 3)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %6 = select <8 x i1> %5, <8 x i64> %4, <8 x i64> zeroinitializer
-  %7 = call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %x0, i32 3)
-  %res3 = add <8 x i64> %3, %6
-  %res4 = add <8 x i64> %res3, %7
-  ret <8 x i64> %res4
-}
-
-declare <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32>, i32)
-
-define <16 x i32>@test_int_x86_avx512_mask_pror_d_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pror_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vprord $3, %zmm0, %zmm2
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vprord $3, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vprord $3, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddd %zmm2, %zmm0, %zmm0
-; CHECK-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
-; CHECK-NEXT:    retq
-  %1 = call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %x0, i32 3)
-  %2 = bitcast i16 %x3 to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x2
-  %4 = call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %x0, i32 3)
-  %5 = bitcast i16 %x3 to <16 x i1>
-  %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> zeroinitializer
-  %7 = call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %x0, i32 3)
-  %res3 = add <16 x i32> %3, %6
-  %res4 = add <16 x i32> %res3, %7
-  ret <16 x i32> %res4
-}
-
-declare <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64>, i32)
-
-define <8 x i64>@test_int_x86_avx512_mask_pror_q_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pror_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vprorq $3, %zmm0, %zmm2
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vprorq $3, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vprorq $3, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddq %zmm2, %zmm0, %zmm0
-; CHECK-NEXT:    vpaddq %zmm0, %zmm1, %zmm0
-; CHECK-NEXT:    retq
-  %1 = call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %x0, i32 3)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x2
-  %4 = call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %x0, i32 3)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %6 = select <8 x i1> %5, <8 x i64> %4, <8 x i64> zeroinitializer
-  %7 = call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %x0, i32 3)
-  %res3 = add <8 x i64> %3, %6
-  %res4 = add <8 x i64> %res3, %7
-  ret <8 x i64> %res4
-}
-
 declare <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double>, <8 x i64>)
 
 define <8 x double>@test_int_x86_avx512_mask_permvar_df_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) {
@@ -5418,6 +5226,18 @@
   ret <16 x i32> %res
 }
 
+define <16 x i32> @test_x86_avx512_psllv_d_512_const() {
+; CHECK-LABEL: test_x86_avx512_psllv_d_512_const:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <4,9,0,u,12,7,u,0,32,5,u,0,80,3,u,0>
+; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; CHECK-NEXT:    retq
+  %res0 = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0, i32 4, i32 5, i32 -2, i32 0, i32 5, i32 3, i32 -3, i32 0>, <16 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2, i32 3, i32 0, i32 35, i32 -1, i32 4, i32 0, i32 36, i32 -3>)
+  %res1 = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1,  i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,  i32 1, i32 1, i32 1, i32 1, i32 -1>)
+  %res2 = add <16 x i32> %res0, %res1
+  ret <16 x i32> %res2
+}
+
 define <16 x i32> @test_x86_avx512_mask_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psllv_d_512:
 ; CHECK:       ## %bb.0:
@@ -5454,6 +5274,18 @@
   ret <8 x i64> %res
 }
 
+define <8 x i64> @test_x86_avx512_psllv_q_512_const() {
+; CHECK-LABEL: test_x86_avx512_psllv_q_512_const:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <4,9,0,u,12,7,18446744056529682432,0>
+; CHECK-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; CHECK-NEXT:    retq
+  %res0 = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> <i64 2, i64 9, i64 0, i64 -1, i64 3, i64 7, i64 -1, i64 0>, <8 x i64> <i64 1, i64 0, i64 33, i64 -1,i64 2, i64 0, i64 34, i64 -2>)
+  %res1 = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> <i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1,  i64 1, i64 1, i64 1, i64 -1>)
+  %res2 = add <8 x i64> %res0, %res1
+  ret <8 x i64> %res2
+}
+
 define <8 x i64> @test_x86_avx512_mask_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psllv_q_512:
 ; CHECK:       ## %bb.0:
@@ -5562,6 +5394,18 @@
   ret <16 x i32> %res
 }
 
+define <16 x i32> @test_x86_avx512_psrlv_d_512_const() {
+; CHECK-LABEL: test_x86_avx512_psrlv_d_512_const:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <1,9,0,u,0,7,u,0,0,5,u,0,0,3,u,0>
+; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; CHECK-NEXT:    retq
+  %res0 = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0, i32 4, i32 5, i32 -2, i32 0, i32 5, i32 3, i32 -3, i32 0>, <16 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2, i32 3, i32 0, i32 35, i32 -1, i32 4, i32 0, i32 36, i32 -3>)
+  %res1 = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1,  i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,  i32 1, i32 1, i32 1, i32 1, i32 -1  >)
+  %res2 = add <16 x i32> %res0, %res1
+  ret <16 x i32> %res2
+}
+
 define <16 x i32> @test_x86_avx512_mask_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrlv_d_512:
 ; CHECK:       ## %bb.0:
@@ -5598,6 +5442,18 @@
   ret <8 x i64> %res
 }
 
+define <8 x i64> @test_x86_avx512_psrlv_q_512_const() {
+; CHECK-LABEL: test_x86_avx512_psrlv_q_512_const:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <1,9,0,u,0,7,1073741823,0>
+; CHECK-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; CHECK-NEXT:    retq
+  %res0 = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> <i64 2, i64 9, i64 0, i64 -1, i64 3, i64 7, i64 -1, i64 0>, <8 x i64> <i64 1, i64 0, i64 33, i64 -1,i64 2, i64 0, i64 34, i64 -2>)
+  %res1 = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> <i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1,  i64 1, i64 1, i64 1, i64 -1>)
+  %res2 = add <8 x i64> %res0, %res1
+  ret <8 x i64> %res2
+}
+
 define <8 x i64> @test_x86_avx512_mask_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrlv_q_512:
 ; CHECK:       ## %bb.0:
diff --git a/test/CodeGen/X86/avx512-mask-op.ll b/test/CodeGen/X86/avx512-mask-op.ll
index 5996de5..1110e02 100644
--- a/test/CodeGen/X86/avx512-mask-op.ll
+++ b/test/CodeGen/X86/avx512-mask-op.ll
@@ -1278,19 +1278,19 @@
 define <8 x i1> @test18(i8 %a, i16 %y) {
 ; KNL-LABEL: test18:
 ; KNL:       ## %bb.0:
-; KNL-NEXT:    kmovw %edi, %k1
-; KNL-NEXT:    kmovw %esi, %k2
-; KNL-NEXT:    kshiftrw $8, %k2, %k0
-; KNL-NEXT:    kshiftrw $9, %k2, %k2
-; KNL-NEXT:    kshiftrw $6, %k1, %k3
-; KNL-NEXT:    kxorw %k2, %k3, %k2
-; KNL-NEXT:    kshiftlw $15, %k2, %k2
-; KNL-NEXT:    kshiftrw $9, %k2, %k2
-; KNL-NEXT:    kxorw %k2, %k1, %k1
-; KNL-NEXT:    kshiftlw $9, %k1, %k1
+; KNL-NEXT:    kmovw %edi, %k0
+; KNL-NEXT:    kmovw %esi, %k1
+; KNL-NEXT:    kshiftrw $8, %k1, %k2
 ; KNL-NEXT:    kshiftrw $9, %k1, %k1
-; KNL-NEXT:    kshiftlw $7, %k0, %k0
-; KNL-NEXT:    korw %k0, %k1, %k1
+; KNL-NEXT:    kshiftrw $6, %k0, %k3
+; KNL-NEXT:    kxorw %k1, %k3, %k1
+; KNL-NEXT:    kshiftlw $15, %k1, %k1
+; KNL-NEXT:    kshiftrw $9, %k1, %k1
+; KNL-NEXT:    kxorw %k1, %k0, %k0
+; KNL-NEXT:    kshiftlw $9, %k0, %k0
+; KNL-NEXT:    kshiftrw $9, %k0, %k0
+; KNL-NEXT:    kshiftlw $7, %k2, %k1
+; KNL-NEXT:    korw %k1, %k0, %k1
 ; KNL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; KNL-NEXT:    vpmovdw %zmm0, %ymm0
 ; KNL-NEXT:    ## kill: def $xmm0 killed $xmm0 killed $ymm0
@@ -1299,37 +1299,37 @@
 ;
 ; SKX-LABEL: test18:
 ; SKX:       ## %bb.0:
-; SKX-NEXT:    kmovd %edi, %k1
-; SKX-NEXT:    kmovd %esi, %k2
-; SKX-NEXT:    kshiftrw $8, %k2, %k0
-; SKX-NEXT:    kshiftrw $9, %k2, %k2
-; SKX-NEXT:    kshiftrb $6, %k1, %k3
-; SKX-NEXT:    kxorb %k2, %k3, %k2
-; SKX-NEXT:    kshiftlb $7, %k2, %k2
-; SKX-NEXT:    kshiftrb $1, %k2, %k2
-; SKX-NEXT:    kxorb %k2, %k1, %k1
-; SKX-NEXT:    kshiftlb $1, %k1, %k1
+; SKX-NEXT:    kmovd %edi, %k0
+; SKX-NEXT:    kmovd %esi, %k1
+; SKX-NEXT:    kshiftrw $8, %k1, %k2
+; SKX-NEXT:    kshiftrw $9, %k1, %k1
+; SKX-NEXT:    kshiftrb $6, %k0, %k3
+; SKX-NEXT:    kxorb %k1, %k3, %k1
+; SKX-NEXT:    kshiftlb $7, %k1, %k1
 ; SKX-NEXT:    kshiftrb $1, %k1, %k1
-; SKX-NEXT:    kshiftlb $7, %k0, %k0
-; SKX-NEXT:    korb %k0, %k1, %k0
+; SKX-NEXT:    kxorb %k1, %k0, %k0
+; SKX-NEXT:    kshiftlb $1, %k0, %k0
+; SKX-NEXT:    kshiftrb $1, %k0, %k0
+; SKX-NEXT:    kshiftlb $7, %k2, %k1
+; SKX-NEXT:    korb %k1, %k0, %k0
 ; SKX-NEXT:    vpmovm2w %k0, %xmm0
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test18:
 ; AVX512BW:       ## %bb.0:
-; AVX512BW-NEXT:    kmovd %edi, %k1
-; AVX512BW-NEXT:    kmovd %esi, %k2
-; AVX512BW-NEXT:    kshiftrw $8, %k2, %k0
-; AVX512BW-NEXT:    kshiftrw $9, %k2, %k2
-; AVX512BW-NEXT:    kshiftrw $6, %k1, %k3
-; AVX512BW-NEXT:    kxorw %k2, %k3, %k2
-; AVX512BW-NEXT:    kshiftlw $15, %k2, %k2
-; AVX512BW-NEXT:    kshiftrw $9, %k2, %k2
-; AVX512BW-NEXT:    kxorw %k2, %k1, %k1
-; AVX512BW-NEXT:    kshiftlw $9, %k1, %k1
+; AVX512BW-NEXT:    kmovd %edi, %k0
+; AVX512BW-NEXT:    kmovd %esi, %k1
+; AVX512BW-NEXT:    kshiftrw $8, %k1, %k2
 ; AVX512BW-NEXT:    kshiftrw $9, %k1, %k1
-; AVX512BW-NEXT:    kshiftlw $7, %k0, %k0
-; AVX512BW-NEXT:    korw %k0, %k1, %k0
+; AVX512BW-NEXT:    kshiftrw $6, %k0, %k3
+; AVX512BW-NEXT:    kxorw %k1, %k3, %k1
+; AVX512BW-NEXT:    kshiftlw $15, %k1, %k1
+; AVX512BW-NEXT:    kshiftrw $9, %k1, %k1
+; AVX512BW-NEXT:    kxorw %k1, %k0, %k0
+; AVX512BW-NEXT:    kshiftlw $9, %k0, %k0
+; AVX512BW-NEXT:    kshiftrw $9, %k0, %k0
+; AVX512BW-NEXT:    kshiftlw $7, %k2, %k1
+; AVX512BW-NEXT:    korw %k1, %k0, %k0
 ; AVX512BW-NEXT:    vpmovm2w %k0, %zmm0
 ; AVX512BW-NEXT:    ## kill: def $xmm0 killed $xmm0 killed $zmm0
 ; AVX512BW-NEXT:    vzeroupper
@@ -1337,19 +1337,19 @@
 ;
 ; AVX512DQ-LABEL: test18:
 ; AVX512DQ:       ## %bb.0:
-; AVX512DQ-NEXT:    kmovw %edi, %k1
-; AVX512DQ-NEXT:    kmovw %esi, %k2
-; AVX512DQ-NEXT:    kshiftrw $8, %k2, %k0
-; AVX512DQ-NEXT:    kshiftrw $9, %k2, %k2
-; AVX512DQ-NEXT:    kshiftrb $6, %k1, %k3
-; AVX512DQ-NEXT:    kxorb %k2, %k3, %k2
-; AVX512DQ-NEXT:    kshiftlb $7, %k2, %k2
-; AVX512DQ-NEXT:    kshiftrb $1, %k2, %k2
-; AVX512DQ-NEXT:    kxorb %k2, %k1, %k1
-; AVX512DQ-NEXT:    kshiftlb $1, %k1, %k1
+; AVX512DQ-NEXT:    kmovw %edi, %k0
+; AVX512DQ-NEXT:    kmovw %esi, %k1
+; AVX512DQ-NEXT:    kshiftrw $8, %k1, %k2
+; AVX512DQ-NEXT:    kshiftrw $9, %k1, %k1
+; AVX512DQ-NEXT:    kshiftrb $6, %k0, %k3
+; AVX512DQ-NEXT:    kxorb %k1, %k3, %k1
+; AVX512DQ-NEXT:    kshiftlb $7, %k1, %k1
 ; AVX512DQ-NEXT:    kshiftrb $1, %k1, %k1
-; AVX512DQ-NEXT:    kshiftlb $7, %k0, %k0
-; AVX512DQ-NEXT:    korb %k0, %k1, %k0
+; AVX512DQ-NEXT:    kxorb %k1, %k0, %k0
+; AVX512DQ-NEXT:    kshiftlb $1, %k0, %k0
+; AVX512DQ-NEXT:    kshiftrb $1, %k0, %k0
+; AVX512DQ-NEXT:    kshiftlb $7, %k2, %k1
+; AVX512DQ-NEXT:    korb %k1, %k0, %k0
 ; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
 ; AVX512DQ-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512DQ-NEXT:    ## kill: def $xmm0 killed $xmm0 killed $ymm0
@@ -1360,16 +1360,16 @@
 ; X86:       ## %bb.0:
 ; X86-NEXT:    kmovb {{[0-9]+}}(%esp), %k0
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    kshiftrw $9, %k1, %k2
-; X86-NEXT:    kshiftrw $8, %k1, %k1
-; X86-NEXT:    kshiftlb $7, %k1, %k1
+; X86-NEXT:    kshiftrw $8, %k1, %k2
+; X86-NEXT:    kshiftrw $9, %k1, %k1
 ; X86-NEXT:    kshiftrb $6, %k0, %k3
-; X86-NEXT:    kxorb %k2, %k3, %k2
-; X86-NEXT:    kshiftlb $7, %k2, %k2
-; X86-NEXT:    kshiftrb $1, %k2, %k2
-; X86-NEXT:    kxorb %k2, %k0, %k0
+; X86-NEXT:    kxorb %k1, %k3, %k1
+; X86-NEXT:    kshiftlb $7, %k1, %k1
+; X86-NEXT:    kshiftrb $1, %k1, %k1
+; X86-NEXT:    kxorb %k1, %k0, %k0
 ; X86-NEXT:    kshiftlb $1, %k0, %k0
 ; X86-NEXT:    kshiftrb $1, %k0, %k0
+; X86-NEXT:    kshiftlb $7, %k2, %k1
 ; X86-NEXT:    korb %k1, %k0, %k0
 ; X86-NEXT:    vpmovm2w %k0, %xmm0
 ; X86-NEXT:    retl
@@ -2197,8 +2197,7 @@
 define <8 x i64> @load_8i1(<8 x i1>* %a) {
 ; KNL-LABEL: load_8i1:
 ; KNL:       ## %bb.0:
-; KNL-NEXT:    movzbl (%rdi), %eax
-; KNL-NEXT:    kmovw %eax, %k1
+; KNL-NEXT:    kmovw (%rdi), %k1
 ; KNL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
 ;
@@ -2210,8 +2209,7 @@
 ;
 ; AVX512BW-LABEL: load_8i1:
 ; AVX512BW:       ## %bb.0:
-; AVX512BW-NEXT:    movzbl (%rdi), %eax
-; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    kmovw (%rdi), %k1
 ; AVX512BW-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512BW-NEXT:    retq
 ;
@@ -2271,8 +2269,7 @@
 define <2 x i16> @load_2i1(<2 x i1>* %a) {
 ; KNL-LABEL: load_2i1:
 ; KNL:       ## %bb.0:
-; KNL-NEXT:    movzbl (%rdi), %eax
-; KNL-NEXT:    kmovw %eax, %k1
+; KNL-NEXT:    kmovw (%rdi), %k1
 ; KNL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; KNL-NEXT:    ## kill: def $xmm0 killed $xmm0 killed $zmm0
 ; KNL-NEXT:    vzeroupper
@@ -2286,8 +2283,7 @@
 ;
 ; AVX512BW-LABEL: load_2i1:
 ; AVX512BW:       ## %bb.0:
-; AVX512BW-NEXT:    movzbl (%rdi), %eax
-; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    kmovw (%rdi), %k1
 ; AVX512BW-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512BW-NEXT:    ## kill: def $xmm0 killed $xmm0 killed $zmm0
 ; AVX512BW-NEXT:    vzeroupper
@@ -2315,8 +2311,7 @@
 define <4 x i16> @load_4i1(<4 x i1>* %a) {
 ; KNL-LABEL: load_4i1:
 ; KNL:       ## %bb.0:
-; KNL-NEXT:    movzbl (%rdi), %eax
-; KNL-NEXT:    kmovw %eax, %k1
+; KNL-NEXT:    kmovw (%rdi), %k1
 ; KNL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; KNL-NEXT:    ## kill: def $xmm0 killed $xmm0 killed $zmm0
 ; KNL-NEXT:    vzeroupper
@@ -2330,8 +2325,7 @@
 ;
 ; AVX512BW-LABEL: load_4i1:
 ; AVX512BW:       ## %bb.0:
-; AVX512BW-NEXT:    movzbl (%rdi), %eax
-; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    kmovw (%rdi), %k1
 ; AVX512BW-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512BW-NEXT:    ## kill: def $xmm0 killed $xmm0 killed $zmm0
 ; AVX512BW-NEXT:    vzeroupper
@@ -3479,3 +3473,784 @@
   ret void
 }
 declare void @llvm.masked.store.v16i32.p0v16i32(<16 x i32>, <16 x i32>*, i32, <16 x i1>)
+
+define void @ktest_3(<8 x i32> %w, <8 x i32> %x, <8 x i32> %y, <8 x i32> %z) {
+; KNL-LABEL: ktest_3:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    pushq %rax
+; KNL-NEXT:    .cfi_def_cfa_offset 16
+; KNL-NEXT:    ## kill: def $ymm3 killed $ymm3 def $zmm3
+; KNL-NEXT:    ## kill: def $ymm2 killed $ymm2 def $zmm2
+; KNL-NEXT:    ## kill: def $ymm1 killed $ymm1 def $zmm1
+; KNL-NEXT:    ## kill: def $ymm0 killed $ymm0 def $zmm0
+; KNL-NEXT:    vptestnmd %zmm0, %zmm0, %k0
+; KNL-NEXT:    vptestnmd %zmm1, %zmm1, %k1
+; KNL-NEXT:    vptestnmd %zmm2, %zmm2, %k2
+; KNL-NEXT:    vptestnmd %zmm3, %zmm3, %k3
+; KNL-NEXT:    korw %k1, %k0, %k0
+; KNL-NEXT:    korw %k3, %k2, %k1
+; KNL-NEXT:    kandw %k1, %k0, %k0
+; KNL-NEXT:    kmovw %k0, %eax
+; KNL-NEXT:    testb %al, %al
+; KNL-NEXT:    je LBB71_1
+; KNL-NEXT:  ## %bb.2: ## %exit
+; KNL-NEXT:    popq %rax
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    retq
+; KNL-NEXT:  LBB71_1: ## %bar
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    callq _foo
+; KNL-NEXT:    popq %rax
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ktest_3:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    pushq %rax
+; SKX-NEXT:    .cfi_def_cfa_offset 16
+; SKX-NEXT:    vptestnmd %ymm0, %ymm0, %k0
+; SKX-NEXT:    vptestnmd %ymm1, %ymm1, %k1
+; SKX-NEXT:    korb %k1, %k0, %k0
+; SKX-NEXT:    vptestnmd %ymm2, %ymm2, %k1
+; SKX-NEXT:    vptestnmd %ymm3, %ymm3, %k2
+; SKX-NEXT:    korb %k2, %k1, %k1
+; SKX-NEXT:    ktestb %k1, %k0
+; SKX-NEXT:    je LBB71_1
+; SKX-NEXT:  ## %bb.2: ## %exit
+; SKX-NEXT:    popq %rax
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    retq
+; SKX-NEXT:  LBB71_1: ## %bar
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    callq _foo
+; SKX-NEXT:    popq %rax
+; SKX-NEXT:    retq
+;
+; AVX512BW-LABEL: ktest_3:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    pushq %rax
+; AVX512BW-NEXT:    .cfi_def_cfa_offset 16
+; AVX512BW-NEXT:    ## kill: def $ymm3 killed $ymm3 def $zmm3
+; AVX512BW-NEXT:    ## kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512BW-NEXT:    ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    ## kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vptestnmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT:    vptestnmd %zmm1, %zmm1, %k1
+; AVX512BW-NEXT:    vptestnmd %zmm2, %zmm2, %k2
+; AVX512BW-NEXT:    vptestnmd %zmm3, %zmm3, %k3
+; AVX512BW-NEXT:    korw %k1, %k0, %k0
+; AVX512BW-NEXT:    korw %k3, %k2, %k1
+; AVX512BW-NEXT:    kandw %k1, %k0, %k0
+; AVX512BW-NEXT:    kmovd %k0, %eax
+; AVX512BW-NEXT:    testb %al, %al
+; AVX512BW-NEXT:    je LBB71_1
+; AVX512BW-NEXT:  ## %bb.2: ## %exit
+; AVX512BW-NEXT:    popq %rax
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+; AVX512BW-NEXT:  LBB71_1: ## %bar
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    callq _foo
+; AVX512BW-NEXT:    popq %rax
+; AVX512BW-NEXT:    retq
+;
+; AVX512DQ-LABEL: ktest_3:
+; AVX512DQ:       ## %bb.0:
+; AVX512DQ-NEXT:    pushq %rax
+; AVX512DQ-NEXT:    .cfi_def_cfa_offset 16
+; AVX512DQ-NEXT:    ## kill: def $ymm3 killed $ymm3 def $zmm3
+; AVX512DQ-NEXT:    ## kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512DQ-NEXT:    ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512DQ-NEXT:    ## kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512DQ-NEXT:    vptestnmd %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT:    vptestnmd %zmm1, %zmm1, %k1
+; AVX512DQ-NEXT:    vptestnmd %zmm2, %zmm2, %k2
+; AVX512DQ-NEXT:    vptestnmd %zmm3, %zmm3, %k3
+; AVX512DQ-NEXT:    korb %k1, %k0, %k0
+; AVX512DQ-NEXT:    korb %k3, %k2, %k1
+; AVX512DQ-NEXT:    ktestb %k1, %k0
+; AVX512DQ-NEXT:    je LBB71_1
+; AVX512DQ-NEXT:  ## %bb.2: ## %exit
+; AVX512DQ-NEXT:    popq %rax
+; AVX512DQ-NEXT:    vzeroupper
+; AVX512DQ-NEXT:    retq
+; AVX512DQ-NEXT:  LBB71_1: ## %bar
+; AVX512DQ-NEXT:    vzeroupper
+; AVX512DQ-NEXT:    callq _foo
+; AVX512DQ-NEXT:    popq %rax
+; AVX512DQ-NEXT:    retq
+;
+; X86-LABEL: ktest_3:
+; X86:       ## %bb.0:
+; X86-NEXT:    subl $12, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    vptestnmd %ymm0, %ymm0, %k0
+; X86-NEXT:    vptestnmd %ymm1, %ymm1, %k1
+; X86-NEXT:    korb %k1, %k0, %k0
+; X86-NEXT:    vptestnmd %ymm2, %ymm2, %k1
+; X86-NEXT:    vptestnmd %ymm3, %ymm3, %k2
+; X86-NEXT:    korb %k2, %k1, %k1
+; X86-NEXT:    ktestb %k1, %k0
+; X86-NEXT:    je LBB71_1
+; X86-NEXT:  ## %bb.2: ## %exit
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
+; X86-NEXT:  LBB71_1: ## %bar
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    calll _foo
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    retl
+  %a = icmp eq <8 x i32> %w, zeroinitializer
+  %b = icmp eq <8 x i32> %x, zeroinitializer
+  %c = icmp eq <8 x i32> %y, zeroinitializer
+  %d = icmp eq <8 x i32> %z, zeroinitializer
+  %e = or <8 x i1> %a, %b
+  %f = or <8 x i1> %c, %d
+  %g = and <8 x i1> %e, %f
+  %h = bitcast <8 x i1> %g to i8
+  %i = icmp eq i8 %h, 0
+  br i1 %i, label %bar, label %exit
+
+bar:
+  call void @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+define void @ktest_4(<8 x i64> %w, <8 x i64> %x, <8 x i64> %y, <8 x i64> %z) {
+; KNL-LABEL: ktest_4:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    pushq %rax
+; KNL-NEXT:    .cfi_def_cfa_offset 16
+; KNL-NEXT:    vptestnmq %zmm0, %zmm0, %k0
+; KNL-NEXT:    vptestnmq %zmm1, %zmm1, %k1
+; KNL-NEXT:    vptestnmq %zmm2, %zmm2, %k2
+; KNL-NEXT:    vptestnmq %zmm3, %zmm3, %k3
+; KNL-NEXT:    korw %k1, %k0, %k0
+; KNL-NEXT:    korw %k3, %k2, %k1
+; KNL-NEXT:    kandw %k1, %k0, %k0
+; KNL-NEXT:    kmovw %k0, %eax
+; KNL-NEXT:    testb %al, %al
+; KNL-NEXT:    je LBB72_1
+; KNL-NEXT:  ## %bb.2: ## %exit
+; KNL-NEXT:    popq %rax
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    retq
+; KNL-NEXT:  LBB72_1: ## %bar
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    callq _foo
+; KNL-NEXT:    popq %rax
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ktest_4:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    pushq %rax
+; SKX-NEXT:    .cfi_def_cfa_offset 16
+; SKX-NEXT:    vptestnmq %zmm0, %zmm0, %k0
+; SKX-NEXT:    vptestnmq %zmm1, %zmm1, %k1
+; SKX-NEXT:    korb %k1, %k0, %k0
+; SKX-NEXT:    vptestnmq %zmm2, %zmm2, %k1
+; SKX-NEXT:    vptestnmq %zmm3, %zmm3, %k2
+; SKX-NEXT:    korb %k2, %k1, %k1
+; SKX-NEXT:    ktestb %k1, %k0
+; SKX-NEXT:    je LBB72_1
+; SKX-NEXT:  ## %bb.2: ## %exit
+; SKX-NEXT:    popq %rax
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    retq
+; SKX-NEXT:  LBB72_1: ## %bar
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    callq _foo
+; SKX-NEXT:    popq %rax
+; SKX-NEXT:    retq
+;
+; AVX512BW-LABEL: ktest_4:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    pushq %rax
+; AVX512BW-NEXT:    .cfi_def_cfa_offset 16
+; AVX512BW-NEXT:    vptestnmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT:    vptestnmq %zmm1, %zmm1, %k1
+; AVX512BW-NEXT:    vptestnmq %zmm2, %zmm2, %k2
+; AVX512BW-NEXT:    vptestnmq %zmm3, %zmm3, %k3
+; AVX512BW-NEXT:    korw %k1, %k0, %k0
+; AVX512BW-NEXT:    korw %k3, %k2, %k1
+; AVX512BW-NEXT:    kandw %k1, %k0, %k0
+; AVX512BW-NEXT:    kmovd %k0, %eax
+; AVX512BW-NEXT:    testb %al, %al
+; AVX512BW-NEXT:    je LBB72_1
+; AVX512BW-NEXT:  ## %bb.2: ## %exit
+; AVX512BW-NEXT:    popq %rax
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+; AVX512BW-NEXT:  LBB72_1: ## %bar
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    callq _foo
+; AVX512BW-NEXT:    popq %rax
+; AVX512BW-NEXT:    retq
+;
+; AVX512DQ-LABEL: ktest_4:
+; AVX512DQ:       ## %bb.0:
+; AVX512DQ-NEXT:    pushq %rax
+; AVX512DQ-NEXT:    .cfi_def_cfa_offset 16
+; AVX512DQ-NEXT:    vptestnmq %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT:    vptestnmq %zmm1, %zmm1, %k1
+; AVX512DQ-NEXT:    korb %k1, %k0, %k0
+; AVX512DQ-NEXT:    vptestnmq %zmm2, %zmm2, %k1
+; AVX512DQ-NEXT:    vptestnmq %zmm3, %zmm3, %k2
+; AVX512DQ-NEXT:    korb %k2, %k1, %k1
+; AVX512DQ-NEXT:    ktestb %k1, %k0
+; AVX512DQ-NEXT:    je LBB72_1
+; AVX512DQ-NEXT:  ## %bb.2: ## %exit
+; AVX512DQ-NEXT:    popq %rax
+; AVX512DQ-NEXT:    vzeroupper
+; AVX512DQ-NEXT:    retq
+; AVX512DQ-NEXT:  LBB72_1: ## %bar
+; AVX512DQ-NEXT:    vzeroupper
+; AVX512DQ-NEXT:    callq _foo
+; AVX512DQ-NEXT:    popq %rax
+; AVX512DQ-NEXT:    retq
+;
+; X86-LABEL: ktest_4:
+; X86:       ## %bb.0:
+; X86-NEXT:    subl $12, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    vptestnmq %zmm0, %zmm0, %k0
+; X86-NEXT:    vptestnmq %zmm1, %zmm1, %k1
+; X86-NEXT:    korb %k1, %k0, %k0
+; X86-NEXT:    vptestnmq %zmm2, %zmm2, %k1
+; X86-NEXT:    vptestnmq %zmm3, %zmm3, %k2
+; X86-NEXT:    korb %k2, %k1, %k1
+; X86-NEXT:    ktestb %k1, %k0
+; X86-NEXT:    je LBB72_1
+; X86-NEXT:  ## %bb.2: ## %exit
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
+; X86-NEXT:  LBB72_1: ## %bar
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    calll _foo
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    retl
+  %a = icmp eq <8 x i64> %w, zeroinitializer
+  %b = icmp eq <8 x i64> %x, zeroinitializer
+  %c = icmp eq <8 x i64> %y, zeroinitializer
+  %d = icmp eq <8 x i64> %z, zeroinitializer
+  %e = or <8 x i1> %a, %b
+  %f = or <8 x i1> %c, %d
+  %g = and <8 x i1> %e, %f
+  %h = bitcast <8 x i1> %g to i8
+  %i = icmp eq i8 %h, 0
+  br i1 %i, label %bar, label %exit
+
+bar:
+  call void @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+define void @ktest_5(<16 x i32> %w, <16 x i32> %x, <16 x i32> %y, <16 x i32> %z) {
+; KNL-LABEL: ktest_5:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    pushq %rax
+; KNL-NEXT:    .cfi_def_cfa_offset 16
+; KNL-NEXT:    vptestnmd %zmm0, %zmm0, %k0
+; KNL-NEXT:    vptestnmd %zmm1, %zmm1, %k1
+; KNL-NEXT:    korw %k1, %k0, %k0
+; KNL-NEXT:    vptestnmd %zmm2, %zmm2, %k1
+; KNL-NEXT:    vptestnmd %zmm3, %zmm3, %k2
+; KNL-NEXT:    korw %k2, %k1, %k1
+; KNL-NEXT:    kandw %k1, %k0, %k0
+; KNL-NEXT:    kortestw %k0, %k0
+; KNL-NEXT:    je LBB73_1
+; KNL-NEXT:  ## %bb.2: ## %exit
+; KNL-NEXT:    popq %rax
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    retq
+; KNL-NEXT:  LBB73_1: ## %bar
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    callq _foo
+; KNL-NEXT:    popq %rax
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ktest_5:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    pushq %rax
+; SKX-NEXT:    .cfi_def_cfa_offset 16
+; SKX-NEXT:    vptestnmd %zmm0, %zmm0, %k0
+; SKX-NEXT:    vptestnmd %zmm1, %zmm1, %k1
+; SKX-NEXT:    korw %k1, %k0, %k0
+; SKX-NEXT:    vptestnmd %zmm2, %zmm2, %k1
+; SKX-NEXT:    vptestnmd %zmm3, %zmm3, %k2
+; SKX-NEXT:    korw %k2, %k1, %k1
+; SKX-NEXT:    ktestw %k1, %k0
+; SKX-NEXT:    je LBB73_1
+; SKX-NEXT:  ## %bb.2: ## %exit
+; SKX-NEXT:    popq %rax
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    retq
+; SKX-NEXT:  LBB73_1: ## %bar
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    callq _foo
+; SKX-NEXT:    popq %rax
+; SKX-NEXT:    retq
+;
+; AVX512BW-LABEL: ktest_5:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    pushq %rax
+; AVX512BW-NEXT:    .cfi_def_cfa_offset 16
+; AVX512BW-NEXT:    vptestnmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT:    vptestnmd %zmm1, %zmm1, %k1
+; AVX512BW-NEXT:    korw %k1, %k0, %k0
+; AVX512BW-NEXT:    vptestnmd %zmm2, %zmm2, %k1
+; AVX512BW-NEXT:    vptestnmd %zmm3, %zmm3, %k2
+; AVX512BW-NEXT:    korw %k2, %k1, %k1
+; AVX512BW-NEXT:    kandw %k1, %k0, %k0
+; AVX512BW-NEXT:    kortestw %k0, %k0
+; AVX512BW-NEXT:    je LBB73_1
+; AVX512BW-NEXT:  ## %bb.2: ## %exit
+; AVX512BW-NEXT:    popq %rax
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+; AVX512BW-NEXT:  LBB73_1: ## %bar
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    callq _foo
+; AVX512BW-NEXT:    popq %rax
+; AVX512BW-NEXT:    retq
+;
+; AVX512DQ-LABEL: ktest_5:
+; AVX512DQ:       ## %bb.0:
+; AVX512DQ-NEXT:    pushq %rax
+; AVX512DQ-NEXT:    .cfi_def_cfa_offset 16
+; AVX512DQ-NEXT:    vptestnmd %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT:    vptestnmd %zmm1, %zmm1, %k1
+; AVX512DQ-NEXT:    korw %k1, %k0, %k0
+; AVX512DQ-NEXT:    vptestnmd %zmm2, %zmm2, %k1
+; AVX512DQ-NEXT:    vptestnmd %zmm3, %zmm3, %k2
+; AVX512DQ-NEXT:    korw %k2, %k1, %k1
+; AVX512DQ-NEXT:    ktestw %k1, %k0
+; AVX512DQ-NEXT:    je LBB73_1
+; AVX512DQ-NEXT:  ## %bb.2: ## %exit
+; AVX512DQ-NEXT:    popq %rax
+; AVX512DQ-NEXT:    vzeroupper
+; AVX512DQ-NEXT:    retq
+; AVX512DQ-NEXT:  LBB73_1: ## %bar
+; AVX512DQ-NEXT:    vzeroupper
+; AVX512DQ-NEXT:    callq _foo
+; AVX512DQ-NEXT:    popq %rax
+; AVX512DQ-NEXT:    retq
+;
+; X86-LABEL: ktest_5:
+; X86:       ## %bb.0:
+; X86-NEXT:    subl $12, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    vptestnmd %zmm0, %zmm0, %k0
+; X86-NEXT:    vptestnmd %zmm1, %zmm1, %k1
+; X86-NEXT:    korw %k1, %k0, %k0
+; X86-NEXT:    vptestnmd %zmm2, %zmm2, %k1
+; X86-NEXT:    vptestnmd %zmm3, %zmm3, %k2
+; X86-NEXT:    korw %k2, %k1, %k1
+; X86-NEXT:    ktestw %k1, %k0
+; X86-NEXT:    je LBB73_1
+; X86-NEXT:  ## %bb.2: ## %exit
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
+; X86-NEXT:  LBB73_1: ## %bar
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    calll _foo
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    retl
+  %a = icmp eq <16 x i32> %w, zeroinitializer
+  %b = icmp eq <16 x i32> %x, zeroinitializer
+  %c = icmp eq <16 x i32> %y, zeroinitializer
+  %d = icmp eq <16 x i32> %z, zeroinitializer
+  %e = or <16 x i1> %a, %b
+  %f = or <16 x i1> %c, %d
+  %g = and <16 x i1> %e, %f
+  %h = bitcast <16 x i1> %g to i16
+  %i = icmp eq i16 %h, 0
+  br i1 %i, label %bar, label %exit
+
+bar:
+  call void @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+define void @ktest_6(<32 x i16> %w, <32 x i16> %x, <32 x i16> %y, <32 x i16> %z) {
+; KNL-LABEL: ktest_6:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    pushq %rax
+; KNL-NEXT:    .cfi_def_cfa_offset 16
+; KNL-NEXT:    vpxor %xmm8, %xmm8, %xmm8
+; KNL-NEXT:    vpcmpeqw %ymm8, %ymm0, %ymm0
+; KNL-NEXT:    vpcmpeqw %ymm8, %ymm1, %ymm1
+; KNL-NEXT:    vpcmpeqw %ymm8, %ymm2, %ymm2
+; KNL-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; KNL-NEXT:    vpcmpeqw %ymm8, %ymm3, %ymm2
+; KNL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; KNL-NEXT:    vpcmpeqw %ymm8, %ymm4, %ymm2
+; KNL-NEXT:    vpcmpeqw %ymm8, %ymm5, %ymm3
+; KNL-NEXT:    vpcmpeqw %ymm8, %ymm6, %ymm4
+; KNL-NEXT:    vpor %ymm4, %ymm2, %ymm2
+; KNL-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; KNL-NEXT:    vpcmpeqw %ymm8, %ymm7, %ymm2
+; KNL-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; KNL-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; KNL-NEXT:    vpmovsxwd %ymm0, %zmm0
+; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
+; KNL-NEXT:    kmovw %k0, %eax
+; KNL-NEXT:    vpmovsxwd %ymm1, %zmm0
+; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
+; KNL-NEXT:    kmovw %k0, %ecx
+; KNL-NEXT:    shll $16, %ecx
+; KNL-NEXT:    orl %eax, %ecx
+; KNL-NEXT:    je LBB74_1
+; KNL-NEXT:  ## %bb.2: ## %exit
+; KNL-NEXT:    popq %rax
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    retq
+; KNL-NEXT:  LBB74_1: ## %bar
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    callq _foo
+; KNL-NEXT:    popq %rax
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ktest_6:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    pushq %rax
+; SKX-NEXT:    .cfi_def_cfa_offset 16
+; SKX-NEXT:    vptestnmw %zmm0, %zmm0, %k0
+; SKX-NEXT:    vptestnmw %zmm1, %zmm1, %k1
+; SKX-NEXT:    kord %k1, %k0, %k0
+; SKX-NEXT:    vptestnmw %zmm2, %zmm2, %k1
+; SKX-NEXT:    vptestnmw %zmm3, %zmm3, %k2
+; SKX-NEXT:    kord %k2, %k1, %k1
+; SKX-NEXT:    ktestd %k1, %k0
+; SKX-NEXT:    je LBB74_1
+; SKX-NEXT:  ## %bb.2: ## %exit
+; SKX-NEXT:    popq %rax
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    retq
+; SKX-NEXT:  LBB74_1: ## %bar
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    callq _foo
+; SKX-NEXT:    popq %rax
+; SKX-NEXT:    retq
+;
+; AVX512BW-LABEL: ktest_6:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    pushq %rax
+; AVX512BW-NEXT:    .cfi_def_cfa_offset 16
+; AVX512BW-NEXT:    vptestnmw %zmm0, %zmm0, %k0
+; AVX512BW-NEXT:    vptestnmw %zmm1, %zmm1, %k1
+; AVX512BW-NEXT:    kord %k1, %k0, %k0
+; AVX512BW-NEXT:    vptestnmw %zmm2, %zmm2, %k1
+; AVX512BW-NEXT:    vptestnmw %zmm3, %zmm3, %k2
+; AVX512BW-NEXT:    kord %k2, %k1, %k1
+; AVX512BW-NEXT:    ktestd %k1, %k0
+; AVX512BW-NEXT:    je LBB74_1
+; AVX512BW-NEXT:  ## %bb.2: ## %exit
+; AVX512BW-NEXT:    popq %rax
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+; AVX512BW-NEXT:  LBB74_1: ## %bar
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    callq _foo
+; AVX512BW-NEXT:    popq %rax
+; AVX512BW-NEXT:    retq
+;
+; AVX512DQ-LABEL: ktest_6:
+; AVX512DQ:       ## %bb.0:
+; AVX512DQ-NEXT:    pushq %rax
+; AVX512DQ-NEXT:    .cfi_def_cfa_offset 16
+; AVX512DQ-NEXT:    vpxor %xmm8, %xmm8, %xmm8
+; AVX512DQ-NEXT:    vpcmpeqw %ymm8, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpcmpeqw %ymm8, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpcmpeqw %ymm8, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpcmpeqw %ymm8, %ymm3, %ymm2
+; AVX512DQ-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpcmpeqw %ymm8, %ymm4, %ymm2
+; AVX512DQ-NEXT:    vpcmpeqw %ymm8, %ymm5, %ymm3
+; AVX512DQ-NEXT:    vpcmpeqw %ymm8, %ymm6, %ymm4
+; AVX512DQ-NEXT:    vpor %ymm4, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpcmpeqw %ymm8, %ymm7, %ymm2
+; AVX512DQ-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpmovsxwd %ymm0, %zmm0
+; AVX512DQ-NEXT:    vpmovd2m %zmm0, %k0
+; AVX512DQ-NEXT:    kmovw %k0, %eax
+; AVX512DQ-NEXT:    vpmovsxwd %ymm1, %zmm0
+; AVX512DQ-NEXT:    vpmovd2m %zmm0, %k0
+; AVX512DQ-NEXT:    kmovw %k0, %ecx
+; AVX512DQ-NEXT:    shll $16, %ecx
+; AVX512DQ-NEXT:    orl %eax, %ecx
+; AVX512DQ-NEXT:    je LBB74_1
+; AVX512DQ-NEXT:  ## %bb.2: ## %exit
+; AVX512DQ-NEXT:    popq %rax
+; AVX512DQ-NEXT:    vzeroupper
+; AVX512DQ-NEXT:    retq
+; AVX512DQ-NEXT:  LBB74_1: ## %bar
+; AVX512DQ-NEXT:    vzeroupper
+; AVX512DQ-NEXT:    callq _foo
+; AVX512DQ-NEXT:    popq %rax
+; AVX512DQ-NEXT:    retq
+;
+; X86-LABEL: ktest_6:
+; X86:       ## %bb.0:
+; X86-NEXT:    subl $12, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    vptestnmw %zmm0, %zmm0, %k0
+; X86-NEXT:    vptestnmw %zmm1, %zmm1, %k1
+; X86-NEXT:    kord %k1, %k0, %k0
+; X86-NEXT:    vptestnmw %zmm2, %zmm2, %k1
+; X86-NEXT:    vptestnmw %zmm3, %zmm3, %k2
+; X86-NEXT:    kord %k2, %k1, %k1
+; X86-NEXT:    ktestd %k1, %k0
+; X86-NEXT:    je LBB74_1
+; X86-NEXT:  ## %bb.2: ## %exit
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
+; X86-NEXT:  LBB74_1: ## %bar
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    calll _foo
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    retl
+  %a = icmp eq <32 x i16> %w, zeroinitializer
+  %b = icmp eq <32 x i16> %x, zeroinitializer
+  %c = icmp eq <32 x i16> %y, zeroinitializer
+  %d = icmp eq <32 x i16> %z, zeroinitializer
+  %e = or <32 x i1> %a, %b
+  %f = or <32 x i1> %c, %d
+  %g = and <32 x i1> %e, %f
+  %h = bitcast <32 x i1> %g to i32
+  %i = icmp eq i32 %h, 0
+  br i1 %i, label %bar, label %exit
+
+bar:
+  call void @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+define void @ktest_7(<64 x i8> %w, <64 x i8> %x, <64 x i8> %y, <64 x i8> %z) {
+; KNL-LABEL: ktest_7:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    pushq %rax
+; KNL-NEXT:    .cfi_def_cfa_offset 16
+; KNL-NEXT:    vpxor %xmm8, %xmm8, %xmm8
+; KNL-NEXT:    vpcmpeqb %ymm8, %ymm0, %ymm9
+; KNL-NEXT:    vextracti128 $1, %ymm9, %xmm0
+; KNL-NEXT:    vpcmpeqb %ymm8, %ymm1, %ymm10
+; KNL-NEXT:    vextracti128 $1, %ymm10, %xmm1
+; KNL-NEXT:    vpcmpeqb %ymm8, %ymm2, %ymm11
+; KNL-NEXT:    vextracti128 $1, %ymm11, %xmm2
+; KNL-NEXT:    vpor %xmm2, %xmm0, %xmm13
+; KNL-NEXT:    vpcmpeqb %ymm8, %ymm3, %ymm2
+; KNL-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; KNL-NEXT:    vpor %xmm3, %xmm1, %xmm12
+; KNL-NEXT:    vpcmpeqb %ymm8, %ymm4, %ymm3
+; KNL-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; KNL-NEXT:    vpcmpeqb %ymm8, %ymm5, %ymm5
+; KNL-NEXT:    vextracti128 $1, %ymm5, %xmm1
+; KNL-NEXT:    vpcmpeqb %ymm8, %ymm6, %ymm6
+; KNL-NEXT:    vextracti128 $1, %ymm6, %xmm0
+; KNL-NEXT:    vpor %xmm0, %xmm4, %xmm0
+; KNL-NEXT:    vpand %xmm0, %xmm13, %xmm0
+; KNL-NEXT:    vpcmpeqb %ymm8, %ymm7, %ymm4
+; KNL-NEXT:    vextracti128 $1, %ymm4, %xmm7
+; KNL-NEXT:    vpor %xmm7, %xmm1, %xmm1
+; KNL-NEXT:    vpand %xmm1, %xmm12, %xmm1
+; KNL-NEXT:    vpor %xmm2, %xmm10, %xmm2
+; KNL-NEXT:    vpor %xmm11, %xmm9, %xmm7
+; KNL-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; KNL-NEXT:    vpand %xmm4, %xmm2, %xmm2
+; KNL-NEXT:    vpor %xmm6, %xmm3, %xmm3
+; KNL-NEXT:    vpand %xmm3, %xmm7, %xmm3
+; KNL-NEXT:    vpmovsxbd %xmm3, %zmm3
+; KNL-NEXT:    vptestmd %zmm3, %zmm3, %k0
+; KNL-NEXT:    kmovw %k0, %eax
+; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
+; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
+; KNL-NEXT:    kmovw %k0, %ecx
+; KNL-NEXT:    shll $16, %ecx
+; KNL-NEXT:    orl %eax, %ecx
+; KNL-NEXT:    vpmovsxbd %xmm2, %zmm0
+; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
+; KNL-NEXT:    kmovw %k0, %eax
+; KNL-NEXT:    vpmovsxbd %xmm1, %zmm0
+; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
+; KNL-NEXT:    kmovw %k0, %edx
+; KNL-NEXT:    shll $16, %edx
+; KNL-NEXT:    orl %eax, %edx
+; KNL-NEXT:    shlq $32, %rdx
+; KNL-NEXT:    orq %rcx, %rdx
+; KNL-NEXT:    je LBB75_1
+; KNL-NEXT:  ## %bb.2: ## %exit
+; KNL-NEXT:    popq %rax
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    retq
+; KNL-NEXT:  LBB75_1: ## %bar
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    callq _foo
+; KNL-NEXT:    popq %rax
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ktest_7:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    pushq %rax
+; SKX-NEXT:    .cfi_def_cfa_offset 16
+; SKX-NEXT:    vptestnmb %zmm0, %zmm0, %k0
+; SKX-NEXT:    vptestnmb %zmm1, %zmm1, %k1
+; SKX-NEXT:    korq %k1, %k0, %k0
+; SKX-NEXT:    vptestnmb %zmm2, %zmm2, %k1
+; SKX-NEXT:    vptestnmb %zmm3, %zmm3, %k2
+; SKX-NEXT:    korq %k2, %k1, %k1
+; SKX-NEXT:    ktestq %k1, %k0
+; SKX-NEXT:    je LBB75_1
+; SKX-NEXT:  ## %bb.2: ## %exit
+; SKX-NEXT:    popq %rax
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    retq
+; SKX-NEXT:  LBB75_1: ## %bar
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    callq _foo
+; SKX-NEXT:    popq %rax
+; SKX-NEXT:    retq
+;
+; AVX512BW-LABEL: ktest_7:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    pushq %rax
+; AVX512BW-NEXT:    .cfi_def_cfa_offset 16
+; AVX512BW-NEXT:    vptestnmb %zmm0, %zmm0, %k0
+; AVX512BW-NEXT:    vptestnmb %zmm1, %zmm1, %k1
+; AVX512BW-NEXT:    korq %k1, %k0, %k0
+; AVX512BW-NEXT:    vptestnmb %zmm2, %zmm2, %k1
+; AVX512BW-NEXT:    vptestnmb %zmm3, %zmm3, %k2
+; AVX512BW-NEXT:    korq %k2, %k1, %k1
+; AVX512BW-NEXT:    ktestq %k1, %k0
+; AVX512BW-NEXT:    je LBB75_1
+; AVX512BW-NEXT:  ## %bb.2: ## %exit
+; AVX512BW-NEXT:    popq %rax
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+; AVX512BW-NEXT:  LBB75_1: ## %bar
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    callq _foo
+; AVX512BW-NEXT:    popq %rax
+; AVX512BW-NEXT:    retq
+;
+; AVX512DQ-LABEL: ktest_7:
+; AVX512DQ:       ## %bb.0:
+; AVX512DQ-NEXT:    pushq %rax
+; AVX512DQ-NEXT:    .cfi_def_cfa_offset 16
+; AVX512DQ-NEXT:    vpxor %xmm8, %xmm8, %xmm8
+; AVX512DQ-NEXT:    vpcmpeqb %ymm8, %ymm0, %ymm9
+; AVX512DQ-NEXT:    vextracti128 $1, %ymm9, %xmm0
+; AVX512DQ-NEXT:    vpcmpeqb %ymm8, %ymm1, %ymm10
+; AVX512DQ-NEXT:    vextracti128 $1, %ymm10, %xmm1
+; AVX512DQ-NEXT:    vpcmpeqb %ymm8, %ymm2, %ymm11
+; AVX512DQ-NEXT:    vextracti128 $1, %ymm11, %xmm2
+; AVX512DQ-NEXT:    vpor %xmm2, %xmm0, %xmm13
+; AVX512DQ-NEXT:    vpcmpeqb %ymm8, %ymm3, %ymm2
+; AVX512DQ-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512DQ-NEXT:    vpor %xmm3, %xmm1, %xmm12
+; AVX512DQ-NEXT:    vpcmpeqb %ymm8, %ymm4, %ymm3
+; AVX512DQ-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512DQ-NEXT:    vpcmpeqb %ymm8, %ymm5, %ymm5
+; AVX512DQ-NEXT:    vextracti128 $1, %ymm5, %xmm1
+; AVX512DQ-NEXT:    vpcmpeqb %ymm8, %ymm6, %ymm6
+; AVX512DQ-NEXT:    vextracti128 $1, %ymm6, %xmm0
+; AVX512DQ-NEXT:    vpor %xmm0, %xmm4, %xmm0
+; AVX512DQ-NEXT:    vpand %xmm0, %xmm13, %xmm0
+; AVX512DQ-NEXT:    vpcmpeqb %ymm8, %ymm7, %ymm4
+; AVX512DQ-NEXT:    vextracti128 $1, %ymm4, %xmm7
+; AVX512DQ-NEXT:    vpor %xmm7, %xmm1, %xmm1
+; AVX512DQ-NEXT:    vpand %xmm1, %xmm12, %xmm1
+; AVX512DQ-NEXT:    vpor %xmm2, %xmm10, %xmm2
+; AVX512DQ-NEXT:    vpor %xmm11, %xmm9, %xmm7
+; AVX512DQ-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX512DQ-NEXT:    vpand %xmm4, %xmm2, %xmm2
+; AVX512DQ-NEXT:    vpor %xmm6, %xmm3, %xmm3
+; AVX512DQ-NEXT:    vpand %xmm3, %xmm7, %xmm3
+; AVX512DQ-NEXT:    vpmovsxbd %xmm3, %zmm3
+; AVX512DQ-NEXT:    vpmovd2m %zmm3, %k0
+; AVX512DQ-NEXT:    kmovw %k0, %eax
+; AVX512DQ-NEXT:    vpmovsxbd %xmm0, %zmm0
+; AVX512DQ-NEXT:    vpmovd2m %zmm0, %k0
+; AVX512DQ-NEXT:    kmovw %k0, %ecx
+; AVX512DQ-NEXT:    shll $16, %ecx
+; AVX512DQ-NEXT:    orl %eax, %ecx
+; AVX512DQ-NEXT:    vpmovsxbd %xmm2, %zmm0
+; AVX512DQ-NEXT:    vpmovd2m %zmm0, %k0
+; AVX512DQ-NEXT:    kmovw %k0, %eax
+; AVX512DQ-NEXT:    vpmovsxbd %xmm1, %zmm0
+; AVX512DQ-NEXT:    vpmovd2m %zmm0, %k0
+; AVX512DQ-NEXT:    kmovw %k0, %edx
+; AVX512DQ-NEXT:    shll $16, %edx
+; AVX512DQ-NEXT:    orl %eax, %edx
+; AVX512DQ-NEXT:    shlq $32, %rdx
+; AVX512DQ-NEXT:    orq %rcx, %rdx
+; AVX512DQ-NEXT:    je LBB75_1
+; AVX512DQ-NEXT:  ## %bb.2: ## %exit
+; AVX512DQ-NEXT:    popq %rax
+; AVX512DQ-NEXT:    vzeroupper
+; AVX512DQ-NEXT:    retq
+; AVX512DQ-NEXT:  LBB75_1: ## %bar
+; AVX512DQ-NEXT:    vzeroupper
+; AVX512DQ-NEXT:    callq _foo
+; AVX512DQ-NEXT:    popq %rax
+; AVX512DQ-NEXT:    retq
+;
+; X86-LABEL: ktest_7:
+; X86:       ## %bb.0:
+; X86-NEXT:    subl $12, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    vptestnmb %zmm0, %zmm0, %k0
+; X86-NEXT:    vptestnmb %zmm1, %zmm1, %k1
+; X86-NEXT:    korq %k1, %k0, %k0
+; X86-NEXT:    vptestnmb %zmm2, %zmm2, %k1
+; X86-NEXT:    vptestnmb %zmm3, %zmm3, %k2
+; X86-NEXT:    korq %k2, %k1, %k1
+; X86-NEXT:    kandq %k1, %k0, %k0
+; X86-NEXT:    kshiftrq $32, %k0, %k1
+; X86-NEXT:    kortestd %k1, %k0
+; X86-NEXT:    je LBB75_1
+; X86-NEXT:  ## %bb.2: ## %exit
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
+; X86-NEXT:  LBB75_1: ## %bar
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    calll _foo
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    retl
+  %a = icmp eq <64 x i8> %w, zeroinitializer
+  %b = icmp eq <64 x i8> %x, zeroinitializer
+  %c = icmp eq <64 x i8> %y, zeroinitializer
+  %d = icmp eq <64 x i8> %z, zeroinitializer
+  %e = or <64 x i1> %a, %b
+  %f = or <64 x i1> %c, %d
+  %g = and <64 x i1> %e, %f
+  %h = bitcast <64 x i1> %g to i64
+  %i = icmp eq i64 %h, 0
+  br i1 %i, label %bar, label %exit
+
+bar:
+  call void @foo()
+  br label %exit
+
+exit:
+  ret void
+}
diff --git a/test/CodeGen/X86/avx512-schedule.ll b/test/CodeGen/X86/avx512-schedule.ll
index 5c44d86..bc9e6f7 100755
--- a/test/CodeGen/X86/avx512-schedule.ll
+++ b/test/CodeGen/X86/avx512-schedule.ll
@@ -4285,16 +4285,14 @@
 ; GENERIC-LABEL: trunc_16i8_to_16i1:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
-; GENERIC-NEXT:    vpmovb2m %xmm0, %k0 # sched: [1:0.33]
-; GENERIC-NEXT:    kmovd %k0, %eax # sched: [1:0.33]
+; GENERIC-NEXT:    vpmovmskb %xmm0, %eax # sched: [2:1.00]
 ; GENERIC-NEXT:    # kill: def $ax killed $ax killed $eax
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: trunc_16i8_to_16i1:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
-; SKX-NEXT:    vpmovb2m %xmm0, %k0 # sched: [1:1.00]
-; SKX-NEXT:    kmovd %k0, %eax # sched: [3:1.00]
+; SKX-NEXT:    vpmovmskb %xmm0, %eax # sched: [2:1.00]
 ; SKX-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %mask_b = trunc <16 x i8>%a to <16 x i1>
@@ -7302,37 +7300,37 @@
 define <8 x i1> @vmov_test18(i8 %a, i16 %y) {
 ; GENERIC-LABEL: vmov_test18:
 ; GENERIC:       # %bb.0:
-; GENERIC-NEXT:    kmovd %edi, %k1 # sched: [1:0.33]
-; GENERIC-NEXT:    kmovd %esi, %k2 # sched: [1:0.33]
-; GENERIC-NEXT:    kshiftrw $8, %k2, %k0 # sched: [1:1.00]
-; GENERIC-NEXT:    kshiftrw $9, %k2, %k2 # sched: [1:1.00]
-; GENERIC-NEXT:    kshiftrb $6, %k1, %k3 # sched: [1:1.00]
-; GENERIC-NEXT:    kxorb %k2, %k3, %k2 # sched: [1:0.33]
-; GENERIC-NEXT:    kshiftlb $7, %k2, %k2 # sched: [1:1.00]
-; GENERIC-NEXT:    kshiftrb $1, %k2, %k2 # sched: [1:1.00]
-; GENERIC-NEXT:    kxorb %k2, %k1, %k1 # sched: [1:0.33]
-; GENERIC-NEXT:    kshiftlb $1, %k1, %k1 # sched: [1:1.00]
+; GENERIC-NEXT:    kmovd %edi, %k0 # sched: [1:0.33]
+; GENERIC-NEXT:    kmovd %esi, %k1 # sched: [1:0.33]
+; GENERIC-NEXT:    kshiftrw $8, %k1, %k2 # sched: [1:1.00]
+; GENERIC-NEXT:    kshiftrw $9, %k1, %k1 # sched: [1:1.00]
+; GENERIC-NEXT:    kshiftrb $6, %k0, %k3 # sched: [1:1.00]
+; GENERIC-NEXT:    kxorb %k1, %k3, %k1 # sched: [1:0.33]
+; GENERIC-NEXT:    kshiftlb $7, %k1, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    kshiftrb $1, %k1, %k1 # sched: [1:1.00]
-; GENERIC-NEXT:    kshiftlb $7, %k0, %k0 # sched: [1:1.00]
-; GENERIC-NEXT:    korb %k0, %k1, %k0 # sched: [1:0.33]
+; GENERIC-NEXT:    kxorb %k1, %k0, %k0 # sched: [1:0.33]
+; GENERIC-NEXT:    kshiftlb $1, %k0, %k0 # sched: [1:1.00]
+; GENERIC-NEXT:    kshiftrb $1, %k0, %k0 # sched: [1:1.00]
+; GENERIC-NEXT:    kshiftlb $7, %k2, %k1 # sched: [1:1.00]
+; GENERIC-NEXT:    korb %k1, %k0, %k0 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpmovm2w %k0, %xmm0 # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vmov_test18:
 ; SKX:       # %bb.0:
-; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
-; SKX-NEXT:    kmovd %esi, %k2 # sched: [1:1.00]
-; SKX-NEXT:    kshiftrw $8, %k2, %k0 # sched: [3:1.00]
-; SKX-NEXT:    kshiftrw $9, %k2, %k2 # sched: [3:1.00]
-; SKX-NEXT:    kshiftrb $6, %k1, %k3 # sched: [3:1.00]
-; SKX-NEXT:    kxorb %k2, %k3, %k2 # sched: [1:1.00]
-; SKX-NEXT:    kshiftlb $7, %k2, %k2 # sched: [3:1.00]
-; SKX-NEXT:    kshiftrb $1, %k2, %k2 # sched: [3:1.00]
-; SKX-NEXT:    kxorb %k2, %k1, %k1 # sched: [1:1.00]
-; SKX-NEXT:    kshiftlb $1, %k1, %k1 # sched: [3:1.00]
+; SKX-NEXT:    kmovd %edi, %k0 # sched: [1:1.00]
+; SKX-NEXT:    kmovd %esi, %k1 # sched: [1:1.00]
+; SKX-NEXT:    kshiftrw $8, %k1, %k2 # sched: [3:1.00]
+; SKX-NEXT:    kshiftrw $9, %k1, %k1 # sched: [3:1.00]
+; SKX-NEXT:    kshiftrb $6, %k0, %k3 # sched: [3:1.00]
+; SKX-NEXT:    kxorb %k1, %k3, %k1 # sched: [1:1.00]
+; SKX-NEXT:    kshiftlb $7, %k1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    kshiftrb $1, %k1, %k1 # sched: [3:1.00]
-; SKX-NEXT:    kshiftlb $7, %k0, %k0 # sched: [3:1.00]
-; SKX-NEXT:    korb %k0, %k1, %k0 # sched: [1:1.00]
+; SKX-NEXT:    kxorb %k1, %k0, %k0 # sched: [1:1.00]
+; SKX-NEXT:    kshiftlb $1, %k0, %k0 # sched: [3:1.00]
+; SKX-NEXT:    kshiftrb $1, %k0, %k0 # sched: [3:1.00]
+; SKX-NEXT:    kshiftlb $7, %k2, %k1 # sched: [3:1.00]
+; SKX-NEXT:    korb %k1, %k0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovm2w %k0, %xmm0 # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = bitcast i8 %a to <8 x i1>
diff --git a/test/CodeGen/X86/avx512-select.ll b/test/CodeGen/X86/avx512-select.ll
index 4a1aca4..a34c64a 100644
--- a/test/CodeGen/X86/avx512-select.ll
+++ b/test/CodeGen/X86/avx512-select.ll
@@ -151,10 +151,8 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movzbl (%ecx), %ecx
-; X86-NEXT:    kmovw %ecx, %k0
-; X86-NEXT:    movzbl (%eax), %eax
-; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    kmovw (%ecx), %k0
+; X86-NEXT:    kmovw (%eax), %k1
 ; X86-NEXT:    korw %k1, %k0, %k0
 ; X86-NEXT:    kmovw %k0, %eax
 ; X86-NEXT:    # kill: def $al killed $al killed $eax
@@ -162,10 +160,8 @@
 ;
 ; X64-LABEL: select05_mem:
 ; X64:       # %bb.0:
-; X64-NEXT:    movzbl (%rsi), %eax
-; X64-NEXT:    kmovw %eax, %k0
-; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    kmovw (%rsi), %k0
+; X64-NEXT:    kmovw (%rdi), %k1
 ; X64-NEXT:    korw %k1, %k0, %k0
 ; X64-NEXT:    kmovw %k0, %eax
 ; X64-NEXT:    # kill: def $al killed $al killed $eax
@@ -202,10 +198,8 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movzbl (%ecx), %ecx
-; X86-NEXT:    kmovw %ecx, %k0
-; X86-NEXT:    movzbl (%eax), %eax
-; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    kmovw (%ecx), %k0
+; X86-NEXT:    kmovw (%eax), %k1
 ; X86-NEXT:    kandw %k1, %k0, %k0
 ; X86-NEXT:    kmovw %k0, %eax
 ; X86-NEXT:    # kill: def $al killed $al killed $eax
@@ -213,10 +207,8 @@
 ;
 ; X64-LABEL: select06_mem:
 ; X64:       # %bb.0:
-; X64-NEXT:    movzbl (%rsi), %eax
-; X64-NEXT:    kmovw %eax, %k0
-; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    kmovw (%rsi), %k0
+; X64-NEXT:    kmovw (%rdi), %k1
 ; X64-NEXT:    kandw %k1, %k0, %k0
 ; X64-NEXT:    kmovw %k0, %eax
 ; X64-NEXT:    # kill: def $al killed $al killed $eax
@@ -318,27 +310,19 @@
 define <16 x i16> @pr31515(<16 x i1> %a, <16 x i1> %b, <16 x i16> %c) nounwind {
 ; X86-LABEL: pr31515:
 ; X86:       # %bb.0:
-; X86-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; X86-NEXT:    vpslld $31, %zmm1, %zmm1
-; X86-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; X86-NEXT:    vpslld $31, %zmm0, %zmm0
-; X86-NEXT:    vptestmd %zmm0, %zmm0, %k1
-; X86-NEXT:    vptestmd %zmm1, %zmm1, %k1 {%k1}
-; X86-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; X86-NEXT:    vpmovdw %zmm0, %ymm0
+; X86-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; X86-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; X86-NEXT:    vpsllw $15, %ymm0, %ymm0
+; X86-NEXT:    vpsraw $15, %ymm0, %ymm0
 ; X86-NEXT:    vpandn %ymm2, %ymm0, %ymm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: pr31515:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; X64-NEXT:    vpslld $31, %zmm1, %zmm1
-; X64-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; X64-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-NEXT:    vptestmd %zmm0, %zmm0, %k1
-; X64-NEXT:    vptestmd %zmm1, %zmm1, %k1 {%k1}
-; X64-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; X64-NEXT:    vpmovdw %zmm0, %ymm0
+; X64-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; X64-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; X64-NEXT:    vpsllw $15, %ymm0, %ymm0
+; X64-NEXT:    vpsraw $15, %ymm0, %ymm0
 ; X64-NEXT:    vpandn %ymm2, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %mask = and <16 x i1> %a, %b
diff --git a/test/CodeGen/X86/avx512-shift.ll b/test/CodeGen/X86/avx512-shift.ll
index eda27c2..8cf08b8 100644
--- a/test/CodeGen/X86/avx512-shift.ll
+++ b/test/CodeGen/X86/avx512-shift.ll
@@ -1,52 +1,93 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-;RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl | FileCheck %s --check-prefix=CHECK --check-prefix=KNL
-;RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=CHECK --check-prefix=SKX
+;RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl | FileCheck %s --check-prefixes=CHECK,KNL
+;RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx | FileCheck %s --check-prefixes=CHECK,SKX
 
-define <16 x i32> @shift_16_i32(<16 x i32> %a) {
-; CHECK-LABEL: shift_16_i32:
+define <16 x i32> @ashr_16_i32(<16 x i32> %a) {
+; CHECK-LABEL: ashr_16_i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpsrld $1, %zmm0, %zmm0
-; CHECK-NEXT:    vpslld $12, %zmm0, %zmm0
 ; CHECK-NEXT:    vpsrad $12, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
-   %b = lshr <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-   %c = shl <16 x i32> %b, <i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12>
-   %d = ashr <16 x i32> %c, <i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12>
-   ret <16 x i32> %d;
+   %b = ashr <16 x i32> %a, <i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12>
+   ret <16 x i32> %b
 }
 
-define <8 x i64> @shift_8_i64(<8 x i64> %a) {
-; CHECK-LABEL: shift_8_i64:
+define <16 x i32> @lshr_16_i32(<16 x i32> %a) {
+; CHECK-LABEL: lshr_16_i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpsrlq $1, %zmm0, %zmm0
-; CHECK-NEXT:    vpsllq $12, %zmm0, %zmm0
+; CHECK-NEXT:    vpsrld $1, %zmm0, %zmm0
+; CHECK-NEXT:    retq
+   %b = lshr <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+   ret <16 x i32> %b
+}
+
+define <16 x i32> @shl_16_i32(<16 x i32> %a) {
+; CHECK-LABEL: shl_16_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpslld $12, %zmm0, %zmm0
+; CHECK-NEXT:    retq
+   %b = shl <16 x i32> %a, <i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12>
+   ret <16 x i32> %b
+}
+
+define <8 x i64> @ashr_8_i64(<8 x i64> %a) {
+; CHECK-LABEL: ashr_8_i64:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsraq $12, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
-   %b = lshr <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
-   %c = shl <8 x i64> %b,  <i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12>
-   %d = ashr <8 x i64> %c, <i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12>
-   ret <8 x i64> %d;
+   %b = ashr <8 x i64> %a, <i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12>
+   ret <8 x i64> %b
 }
 
-define <4 x i64> @shift_4_i64(<4 x i64> %a) {
-; KNL-LABEL: shift_4_i64:
+define <8 x i64> @lshr_8_i64(<8 x i64> %a) {
+; CHECK-LABEL: lshr_8_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrlq $1, %zmm0, %zmm0
+; CHECK-NEXT:    retq
+   %b = lshr <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+   ret <8 x i64> %b
+}
+
+define <8 x i64> @shl_8_i64(<8 x i64> %a) {
+; CHECK-LABEL: shl_8_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllq $12, %zmm0, %zmm0
+; CHECK-NEXT:    retq
+   %b = shl <8 x i64> %a, <i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12>
+   ret <8 x i64> %b
+}
+
+define <4 x i64> @ashr_4_i64(<4 x i64> %a) {
+; KNL-LABEL: ashr_4_i64:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vpsrlq $1, %ymm0, %ymm0
-; KNL-NEXT:    vpsllq $12, %ymm0, %ymm0
+; KNL-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
 ; KNL-NEXT:    vpsraq $12, %zmm0, %zmm0
 ; KNL-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
 ; KNL-NEXT:    retq
 ;
-; SKX-LABEL: shift_4_i64:
+; SKX-LABEL: ashr_4_i64:
 ; SKX:       # %bb.0:
-; SKX-NEXT:    vpsrlq $1, %ymm0, %ymm0
-; SKX-NEXT:    vpsllq $12, %ymm0, %ymm0
 ; SKX-NEXT:    vpsraq $12, %ymm0, %ymm0
 ; SKX-NEXT:    retq
+   %b = ashr <4 x i64> %a, <i64 12, i64 12, i64 12, i64 12>
+   ret <4 x i64> %b
+}
+
+define <4 x i64> @lshr_4_i64(<4 x i64> %a) {
+; CHECK-LABEL: lshr_4_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrlq $1, %ymm0, %ymm0
+; CHECK-NEXT:    retq
    %b = lshr <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
-   %c = shl <4 x i64> %b,  <i64 12, i64 12, i64 12, i64 12>
-   %d = ashr <4 x i64> %c, <i64 12, i64 12, i64 12, i64 12>
-   ret <4 x i64> %d;
+   ret <4 x i64> %b
+}
+
+define <4 x i64> @shl_4_i64(<4 x i64> %a) {
+; CHECK-LABEL: shl_4_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllq $12, %ymm0, %ymm0
+; CHECK-NEXT:    retq
+   %b = shl <4 x i64> %a,  <i64 12, i64 12, i64 12, i64 12>
+   ret <4 x i64> %b
 }
 
 define <8 x i64> @variable_shl4(<8 x i64> %x, <8 x i64> %y) {
diff --git a/test/CodeGen/X86/avx512bw-intrinsics-canonical.ll b/test/CodeGen/X86/avx512bw-intrinsics-canonical.ll
index ba67a620..7ae02e9 100644
--- a/test/CodeGen/X86/avx512bw-intrinsics-canonical.ll
+++ b/test/CodeGen/X86/avx512bw-intrinsics-canonical.ll
@@ -2,8 +2,292 @@
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW
 ; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512F-32
 
-; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse2-builtins.c
+; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512bw-builtins.c
 
+;
+; Signed Saturation
+;
+
+define <32 x i16> @test_mask_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
+; AVX512BW-LABEL: test_mask_adds_epi16_rr_512:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    vpaddsw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512F-32-LABEL: test_mask_adds_epi16_rr_512:
+; AVX512F-32:       # %bb.0:
+; AVX512F-32-NEXT:    vpaddsw %zmm1, %zmm0, %zmm0
+; AVX512F-32-NEXT:    retl
+  %res = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  ret <32 x i16> %res
+}
+declare <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16>, <32 x i16>)
+
+define <32 x i16> @test_mask_adds_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
+; AVX512BW-LABEL: test_mask_adds_epi16_rrk_512:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    kmovd %edi, %k1
+; AVX512BW-NEXT:    vpaddsw %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512F-32-LABEL: test_mask_adds_epi16_rrk_512:
+; AVX512F-32:       # %bb.0:
+; AVX512F-32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT:    vpaddsw %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512F-32-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512F-32-NEXT:    retl
+  %1 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+  ret <32 x i16> %3
+}
+
+define <32 x i16> @test_mask_adds_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
+; AVX512BW-LABEL: test_mask_adds_epi16_rrkz_512:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    kmovd %edi, %k1
+; AVX512BW-NEXT:    vpaddsw %zmm1, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT:    retq
+;
+; AVX512F-32-LABEL: test_mask_adds_epi16_rrkz_512:
+; AVX512F-32:       # %bb.0:
+; AVX512F-32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT:    vpaddsw %zmm1, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT:    retl
+  %1 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+  ret <32 x i16> %3
+}
+
+define <32 x i16> @test_mask_adds_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
+; AVX512BW-LABEL: test_mask_adds_epi16_rm_512:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    vpaddsw (%rdi), %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512F-32-LABEL: test_mask_adds_epi16_rm_512:
+; AVX512F-32:       # %bb.0:
+; AVX512F-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT:    vpaddsw (%eax), %zmm0, %zmm0
+; AVX512F-32-NEXT:    retl
+  %b = load <32 x i16>, <32 x i16>* %ptr_b
+  %1 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  ret <32 x i16> %1
+}
+
+define <32 x i16> @test_mask_adds_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
+; AVX512BW-LABEL: test_mask_adds_epi16_rmk_512:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    kmovd %esi, %k1
+; AVX512BW-NEXT:    vpaddsw (%rdi), %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512F-32-LABEL: test_mask_adds_epi16_rmk_512:
+; AVX512F-32:       # %bb.0:
+; AVX512F-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT:    vpaddsw (%eax), %zmm0, %zmm1 {%k1}
+; AVX512F-32-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-32-NEXT:    retl
+  %b = load <32 x i16>, <32 x i16>* %ptr_b
+  %1 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+  ret <32 x i16> %3
+}
+
+define <32 x i16> @test_mask_adds_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
+; AVX512BW-LABEL: test_mask_adds_epi16_rmkz_512:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    kmovd %esi, %k1
+; AVX512BW-NEXT:    vpaddsw (%rdi), %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT:    retq
+;
+; AVX512F-32-LABEL: test_mask_adds_epi16_rmkz_512:
+; AVX512F-32:       # %bb.0:
+; AVX512F-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT:    vpaddsw (%eax), %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT:    retl
+  %b = load <32 x i16>, <32 x i16>* %ptr_b
+  %1 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+  ret <32 x i16> %3
+}
+
+define <32 x i16> @test_mask_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
+; AVX512BW-LABEL: test_mask_subs_epi16_rr_512:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    vpsubsw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512F-32-LABEL: test_mask_subs_epi16_rr_512:
+; AVX512F-32:       # %bb.0:
+; AVX512F-32-NEXT:    vpsubsw %zmm1, %zmm0, %zmm0
+; AVX512F-32-NEXT:    retl
+  %sub = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  ret <32 x i16> %sub
+}
+declare <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16>, <32 x i16>)
+
+define <32 x i16> @test_mask_subs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
+; AVX512BW-LABEL: test_mask_subs_epi16_rrk_512:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    kmovd %edi, %k1
+; AVX512BW-NEXT:    vpsubsw %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512F-32-LABEL: test_mask_subs_epi16_rrk_512:
+; AVX512F-32:       # %bb.0:
+; AVX512F-32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT:    vpsubsw %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512F-32-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512F-32-NEXT:    retl
+  %sub = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  %bc = bitcast i32 %mask to <32 x i1>
+  %res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> %passThru
+  ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_subs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
+; AVX512BW-LABEL: test_mask_subs_epi16_rrkz_512:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    kmovd %edi, %k1
+; AVX512BW-NEXT:    vpsubsw %zmm1, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT:    retq
+;
+; AVX512F-32-LABEL: test_mask_subs_epi16_rrkz_512:
+; AVX512F-32:       # %bb.0:
+; AVX512F-32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT:    vpsubsw %zmm1, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT:    retl
+  %sub = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  %bc = bitcast i32 %mask to <32 x i1>
+  %res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> zeroinitializer
+  ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_subs_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
+; AVX512BW-LABEL: test_mask_subs_epi16_rm_512:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    vpsubsw (%rdi), %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512F-32-LABEL: test_mask_subs_epi16_rm_512:
+; AVX512F-32:       # %bb.0:
+; AVX512F-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT:    vpsubsw (%eax), %zmm0, %zmm0
+; AVX512F-32-NEXT:    retl
+  %b = load <32 x i16>, <32 x i16>* %ptr_b
+  %sub = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  ret <32 x i16> %sub
+}
+
+define <32 x i16> @test_mask_subs_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
+; AVX512BW-LABEL: test_mask_subs_epi16_rmk_512:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    kmovd %esi, %k1
+; AVX512BW-NEXT:    vpsubsw (%rdi), %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512F-32-LABEL: test_mask_subs_epi16_rmk_512:
+; AVX512F-32:       # %bb.0:
+; AVX512F-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT:    vpsubsw (%eax), %zmm0, %zmm1 {%k1}
+; AVX512F-32-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-32-NEXT:    retl
+  %b = load <32 x i16>, <32 x i16>* %ptr_b
+  %sub = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  %bc = bitcast i32 %mask to <32 x i1>
+  %res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> %passThru
+  ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_subs_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
+; AVX512BW-LABEL: test_mask_subs_epi16_rmkz_512:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    kmovd %esi, %k1
+; AVX512BW-NEXT:    vpsubsw (%rdi), %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT:    retq
+;
+; AVX512F-32-LABEL: test_mask_subs_epi16_rmkz_512:
+; AVX512F-32:       # %bb.0:
+; AVX512F-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT:    vpsubsw (%eax), %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT:    retl
+  %b = load <32 x i16>, <32 x i16>* %ptr_b
+  %sub = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  %bc = bitcast i32 %mask to <32 x i1>
+  %res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> zeroinitializer
+  ret <32 x i16> %res
+}
+
+
+define <64 x i16> @test_mask_adds_epi16_rr_1024(<64 x i16> %a, <64 x i16> %b) {
+; AVX512BW-LABEL: test_mask_adds_epi16_rr_1024:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    vpaddsw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpaddsw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT:    retq
+;
+; AVX512F-32-LABEL: test_mask_adds_epi16_rr_1024:
+; AVX512F-32:       # %bb.0:
+; AVX512F-32-NEXT:    pushl %ebp
+; AVX512F-32-NEXT:    .cfi_def_cfa_offset 8
+; AVX512F-32-NEXT:    .cfi_offset %ebp, -8
+; AVX512F-32-NEXT:    movl %esp, %ebp
+; AVX512F-32-NEXT:    .cfi_def_cfa_register %ebp
+; AVX512F-32-NEXT:    andl $-64, %esp
+; AVX512F-32-NEXT:    subl $64, %esp
+; AVX512F-32-NEXT:    vpaddsw %zmm2, %zmm0, %zmm0
+; AVX512F-32-NEXT:    vpaddsw 8(%ebp), %zmm1, %zmm1
+; AVX512F-32-NEXT:    movl %ebp, %esp
+; AVX512F-32-NEXT:    popl %ebp
+; AVX512F-32-NEXT:    .cfi_def_cfa %esp, 4
+; AVX512F-32-NEXT:    retl
+  %1 = call <64 x i16> @llvm.sadd.sat.v64i16(<64 x i16> %a, <64 x i16> %b)
+  ret <64 x i16> %1
+}
+declare <64 x i16> @llvm.sadd.sat.v64i16(<64 x i16>, <64 x i16>)
+
+define <64 x i16> @test_mask_subs_epi16_rr_1024(<64 x i16> %a, <64 x i16> %b) {
+; AVX512BW-LABEL: test_mask_subs_epi16_rr_1024:
+; AVX512BW:       ## %bb.0:
+; AVX512BW-NEXT:    vpsubsw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpsubsw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT:    retq
+;
+; AVX512F-32-LABEL: test_mask_subs_epi16_rr_1024:
+; AVX512F-32:       # %bb.0:
+; AVX512F-32-NEXT:    pushl %ebp
+; AVX512F-32-NEXT:    .cfi_def_cfa_offset 8
+; AVX512F-32-NEXT:    .cfi_offset %ebp, -8
+; AVX512F-32-NEXT:    movl %esp, %ebp
+; AVX512F-32-NEXT:    .cfi_def_cfa_register %ebp
+; AVX512F-32-NEXT:    andl $-64, %esp
+; AVX512F-32-NEXT:    subl $64, %esp
+; AVX512F-32-NEXT:    vpsubsw %zmm2, %zmm0, %zmm0
+; AVX512F-32-NEXT:    vpsubsw 8(%ebp), %zmm1, %zmm1
+; AVX512F-32-NEXT:    movl %ebp, %esp
+; AVX512F-32-NEXT:    popl %ebp
+; AVX512F-32-NEXT:    .cfi_def_cfa %esp, 4
+; AVX512F-32-NEXT:    retl
+  %sub = call <64 x i16> @llvm.ssub.sat.v64i16(<64 x i16> %a, <64 x i16> %b)
+  ret <64 x i16> %sub
+}
+declare <64 x i16> @llvm.ssub.sat.v64i16(<64 x i16>, <64 x i16>);
+
+;
+; Unsigned Saturation
+;
 
 define <32 x i16> @test_mask_adds_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) {
 ; AVX512BW-LABEL: test_mask_adds_epu16_rr_512:
@@ -15,11 +299,10 @@
 ; AVX512F-32:       # %bb.0:
 ; AVX512F-32-NEXT:    vpaddusw %zmm1, %zmm0, %zmm0
 ; AVX512F-32-NEXT:    retl
-  %1 = add <32 x i16> %a, %b
-  %2 = icmp ugt <32 x i16> %a, %1
-  %3 = select <32 x i1> %2, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> %1
-  ret <32 x i16> %3
+  %res = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  ret <32 x i16> %res
 }
+declare <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16>, <32 x i16>)
 
 define <32 x i16> @test_mask_adds_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
 ; AVX512BW-LABEL: test_mask_adds_epu16_rrk_512:
@@ -35,12 +318,10 @@
 ; AVX512F-32-NEXT:    vpaddusw %zmm1, %zmm0, %zmm2 {%k1}
 ; AVX512F-32-NEXT:    vmovdqa64 %zmm2, %zmm0
 ; AVX512F-32-NEXT:    retl
-  %1 = add <32 x i16> %a, %b
-  %2 = icmp ugt <32 x i16> %a, %1
-  %3 = select <32 x i1> %2, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> %1
-  %4 = bitcast i32 %mask to <32 x i1>
-  %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> %passThru
-  ret <32 x i16> %5
+  %1 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+  ret <32 x i16> %3
 }
 
 define <32 x i16> @test_mask_adds_epu16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
@@ -55,12 +336,10 @@
 ; AVX512F-32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
 ; AVX512F-32-NEXT:    vpaddusw %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-32-NEXT:    retl
-  %1 = add <32 x i16> %a, %b
-  %2 = icmp ugt <32 x i16> %a, %1
-  %3 = select <32 x i1> %2, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> %1
-  %4 = bitcast i32 %mask to <32 x i1>
-  %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> zeroinitializer
-  ret <32 x i16> %5
+  %1 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+  ret <32 x i16> %3
 }
 
 define <32 x i16> @test_mask_adds_epu16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
@@ -75,10 +354,8 @@
 ; AVX512F-32-NEXT:    vpaddusw (%eax), %zmm0, %zmm0
 ; AVX512F-32-NEXT:    retl
   %b = load <32 x i16>, <32 x i16>* %ptr_b
-  %1 = add <32 x i16> %a, %b
-  %2 = icmp ugt <32 x i16> %a, %1
-  %3 = select <32 x i1> %2, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> %1
-  ret <32 x i16> %3
+  %1 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  ret <32 x i16> %1
 }
 
 define <32 x i16> @test_mask_adds_epu16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
@@ -97,12 +374,10 @@
 ; AVX512F-32-NEXT:    vmovdqa64 %zmm1, %zmm0
 ; AVX512F-32-NEXT:    retl
   %b = load <32 x i16>, <32 x i16>* %ptr_b
-  %1 = add <32 x i16> %a, %b
-  %2 = icmp ugt <32 x i16> %a, %1
-  %3 = select <32 x i1> %2, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> %1
-  %4 = bitcast i32 %mask to <32 x i1>
-  %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> %passThru
-  ret <32 x i16> %5
+  %1 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+  ret <32 x i16> %3
 }
 
 define <32 x i16> @test_mask_adds_epu16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
@@ -119,12 +394,10 @@
 ; AVX512F-32-NEXT:    vpaddusw (%eax), %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-32-NEXT:    retl
   %b = load <32 x i16>, <32 x i16>* %ptr_b
-  %1 = add <32 x i16> %a, %b
-  %2 = icmp ugt <32 x i16> %a, %1
-  %3 = select <32 x i1> %2, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> %1
-  %4 = bitcast i32 %mask to <32 x i1>
-  %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> zeroinitializer
-  ret <32 x i16> %5
+  %1 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+  ret <32 x i16> %3
 }
 
 define <32 x i16> @test_mask_subs_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) {
@@ -137,11 +410,10 @@
 ; AVX512F-32:       # %bb.0:
 ; AVX512F-32-NEXT:    vpsubusw %zmm1, %zmm0, %zmm0
 ; AVX512F-32-NEXT:    retl
-  %cmp = icmp ugt <32 x i16> %a, %b
-  %sel = select <32 x i1> %cmp, <32 x i16> %a, <32 x i16> %b
-  %sub = sub <32 x i16> %sel, %b
+  %sub = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
   ret <32 x i16> %sub
 }
+declare <32 x i16> @llvm.usub.sat.v32i16(<32 x i16>, <32 x i16>)
 
 define <32 x i16> @test_mask_subs_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
 ; AVX512BW-LABEL: test_mask_subs_epu16_rrk_512:
@@ -157,9 +429,7 @@
 ; AVX512F-32-NEXT:    vpsubusw %zmm1, %zmm0, %zmm2 {%k1}
 ; AVX512F-32-NEXT:    vmovdqa64 %zmm2, %zmm0
 ; AVX512F-32-NEXT:    retl
-  %cmp = icmp ugt <32 x i16> %a, %b
-  %sel = select <32 x i1> %cmp, <32 x i16> %a, <32 x i16> %b
-  %sub = sub <32 x i16> %sel, %b
+  %sub = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
   %bc = bitcast i32 %mask to <32 x i1>
   %res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> %passThru
   ret <32 x i16> %res
@@ -177,9 +447,7 @@
 ; AVX512F-32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
 ; AVX512F-32-NEXT:    vpsubusw %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-32-NEXT:    retl
-  %cmp = icmp ugt <32 x i16> %a, %b
-  %sel = select <32 x i1> %cmp, <32 x i16> %a, <32 x i16> %b
-  %sub = sub <32 x i16> %sel, %b
+  %sub = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
   %bc = bitcast i32 %mask to <32 x i1>
   %res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> zeroinitializer
   ret <32 x i16> %res
@@ -197,9 +465,7 @@
 ; AVX512F-32-NEXT:    vpsubusw (%eax), %zmm0, %zmm0
 ; AVX512F-32-NEXT:    retl
   %b = load <32 x i16>, <32 x i16>* %ptr_b
-  %cmp = icmp ugt <32 x i16> %a, %b
-  %sel = select <32 x i1> %cmp, <32 x i16> %a, <32 x i16> %b
-  %sub = sub <32 x i16> %sel, %b
+  %sub = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
   ret <32 x i16> %sub
 }
 
@@ -219,9 +485,7 @@
 ; AVX512F-32-NEXT:    vmovdqa64 %zmm1, %zmm0
 ; AVX512F-32-NEXT:    retl
   %b = load <32 x i16>, <32 x i16>* %ptr_b
-  %cmp = icmp ugt <32 x i16> %a, %b
-  %sel = select <32 x i1> %cmp, <32 x i16> %a, <32 x i16> %b
-  %sub = sub <32 x i16> %sel, %b
+  %sub = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
   %bc = bitcast i32 %mask to <32 x i1>
   %res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> %passThru
   ret <32 x i16> %res
@@ -241,9 +505,7 @@
 ; AVX512F-32-NEXT:    vpsubusw (%eax), %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-32-NEXT:    retl
   %b = load <32 x i16>, <32 x i16>* %ptr_b
-  %cmp = icmp ugt <32 x i16> %a, %b
-  %sel = select <32 x i1> %cmp, <32 x i16> %a, <32 x i16> %b
-  %sub = sub <32 x i16> %sel, %b
+  %sub = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
   %bc = bitcast i32 %mask to <32 x i1>
   %res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> zeroinitializer
   ret <32 x i16> %res
@@ -272,11 +534,10 @@
 ; AVX512F-32-NEXT:    popl %ebp
 ; AVX512F-32-NEXT:    .cfi_def_cfa %esp, 4
 ; AVX512F-32-NEXT:    retl
-  %1 = add <64 x i16> %a, %b
-  %2 = icmp ugt <64 x i16> %a, %1
-  %3 = select <64 x i1> %2, <64 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <64 x i16> %1
-  ret <64 x i16> %3
+  %1 = call <64 x i16> @llvm.uadd.sat.v64i16(<64 x i16> %a, <64 x i16> %b)
+  ret <64 x i16> %1
 }
+declare <64 x i16> @llvm.uadd.sat.v64i16(<64 x i16>, <64 x i16>)
 
 define <64 x i16> @test_mask_subs_epu16_rr_1024(<64 x i16> %a, <64 x i16> %b) {
 ; AVX512BW-LABEL: test_mask_subs_epu16_rr_1024:
@@ -300,9 +561,7 @@
 ; AVX512F-32-NEXT:    popl %ebp
 ; AVX512F-32-NEXT:    .cfi_def_cfa %esp, 4
 ; AVX512F-32-NEXT:    retl
-  %cmp = icmp ugt <64 x i16> %a, %b
-  %sel = select <64 x i1> %cmp, <64 x i16> %a, <64 x i16> %b
-  %sub = sub <64 x i16> %sel, %b
+  %sub = call <64 x i16> @llvm.usub.sat.v64i16(<64 x i16> %a, <64 x i16> %b)
   ret <64 x i16> %sub
 }
-
+declare <64 x i16> @llvm.usub.sat.v64i16(<64 x i16>, <64 x i16>)
diff --git a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
index c623d8b..f62156a 100644
--- a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
@@ -3285,6 +3285,113 @@
 
 declare <64 x i8> @llvm.x86.avx512.mask.psubus.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64)
 
+define <32 x i16> @test_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
+; CHECK-LABEL: test_adds_epi16_rr_512:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpaddsw %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xed,0xc1]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+  %1 = call <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16> %a, <32 x i16> %b)
+  ret <32 x i16> %1
+}
+
+define <32 x i16> @test_adds_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
+; X86-LABEL: test_adds_epi16_rrk_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpaddsw %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xed,0xd1]
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi16_rrk_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpaddsw %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xed,0xd1]
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+  ret <32 x i16> %3
+}
+
+define <32 x i16> @test_adds_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
+; X86-LABEL: test_adds_epi16_rrkz_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpaddsw %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xed,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi16_rrkz_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpaddsw %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xed,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+  ret <32 x i16> %3
+}
+
+define <32 x i16> @test_adds_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
+; X86-LABEL: test_adds_epi16_rm_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpaddsw (%eax), %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xed,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi16_rm_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vpaddsw (%rdi), %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xed,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <32 x i16>, <32 x i16>* %ptr_b
+  %1 = call <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16> %a, <32 x i16> %b)
+  ret <32 x i16> %1
+}
+
+define <32 x i16> @test_adds_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
+; X86-LABEL: test_adds_epi16_rmk_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpaddsw (%eax), %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xed,0x08]
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi16_rmk_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpaddsw (%rdi), %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xed,0x0f]
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <32 x i16>, <32 x i16>* %ptr_b
+  %1 = call <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+  ret <32 x i16> %3
+}
+
+define <32 x i16> @test_adds_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
+; X86-LABEL: test_adds_epi16_rmkz_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpaddsw (%eax), %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xed,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi16_rmkz_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpaddsw (%rdi), %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xed,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <32 x i16>, <32 x i16>* %ptr_b
+  %1 = call <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+  ret <32 x i16> %3
+}
+
+declare <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16>, <32 x i16>)
+
 define <32 x i16> @test_mask_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
 ; CHECK-LABEL: test_mask_adds_epi16_rr_512:
 ; CHECK:       # %bb.0:
@@ -3384,6 +3491,113 @@
 
 declare <32 x i16> @llvm.x86.avx512.mask.padds.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
 
+define <32 x i16> @test_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
+; CHECK-LABEL: test_subs_epi16_rr_512:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsubsw %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xe9,0xc1]
+; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
+  %1 = call <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16> %a, <32 x i16> %b)
+  ret <32 x i16> %1
+}
+
+define <32 x i16> @test_subs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
+; X86-LABEL: test_subs_epi16_rrk_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpsubsw %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xe9,0xd1]
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_subs_epi16_rrk_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsubsw %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xe9,0xd1]
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+  ret <32 x i16> %3
+}
+
+define <32 x i16> @test_subs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
+; X86-LABEL: test_subs_epi16_rrkz_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpsubsw %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xe9,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_subs_epi16_rrkz_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsubsw %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xe9,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+  ret <32 x i16> %3
+}
+
+define <32 x i16> @test_subs_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
+; X86-LABEL: test_subs_epi16_rm_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpsubsw (%eax), %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xe9,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_subs_epi16_rm_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vpsubsw (%rdi), %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xe9,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <32 x i16>, <32 x i16>* %ptr_b
+  %1 = call <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16> %a, <32 x i16> %b)
+  ret <32 x i16> %1
+}
+
+define <32 x i16> @test_subs_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
+; X86-LABEL: test_subs_epi16_rmk_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpsubsw (%eax), %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xe9,0x08]
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_subs_epi16_rmk_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpsubsw (%rdi), %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xe9,0x0f]
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <32 x i16>, <32 x i16>* %ptr_b
+  %1 = call <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+  ret <32 x i16> %3
+}
+
+define <32 x i16> @test_subs_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
+; X86-LABEL: test_subs_epi16_rmkz_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpsubsw (%eax), %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xe9,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_subs_epi16_rmkz_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpsubsw (%rdi), %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xe9,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <32 x i16>, <32 x i16>* %ptr_b
+  %1 = call <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16> %a, <32 x i16> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+  ret <32 x i16> %3
+}
+
+declare <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16>, <32 x i16>)
+
 define <32 x i16> @test_mask_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
 ; CHECK-LABEL: test_mask_subs_epi16_rr_512:
 ; CHECK:       # %bb.0:
@@ -3680,3 +3894,93 @@
 }
 
 declare <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64)
+
+declare <32 x i16> @llvm.x86.avx512.mask.psrlv32hi(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+
+define <32 x i16>@test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_psrlv32hi:
+; X86:       # %bb.0:
+; X86-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf2,0xfd,0x48,0x10,0xd9]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0x10,0xd1]
+; X86-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0x10,0xc1]
+; X86-NEXT:    vpaddw %zmm3, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc3]
+; X86-NEXT:    vpaddw %zmm0, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_psrlv32hi:
+; X64:       # %bb.0:
+; X64-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf2,0xfd,0x48,0x10,0xd9]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0x10,0xd1]
+; X64-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0x10,0xc1]
+; X64-NEXT:    vpaddw %zmm3, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc3]
+; X64-NEXT:    vpaddw %zmm0, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <32 x i16> @llvm.x86.avx512.mask.psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3)
+  %res1 = call <32 x i16> @llvm.x86.avx512.mask.psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3)
+  %res2 = call <32 x i16> @llvm.x86.avx512.mask.psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1)
+  %res3 = add <32 x i16> %res, %res1
+  %res4 = add <32 x i16> %res3, %res2
+  ret <32 x i16> %res4
+}
+
+declare <32 x i16> @llvm.x86.avx512.mask.psrav32.hi(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+
+define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_psrav32_hi:
+; X86:       # %bb.0:
+; X86-NEXT:    vpsravw %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf2,0xfd,0x48,0x11,0xd9]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpsravw %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0x11,0xd1]
+; X86-NEXT:    vpsravw %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0x11,0xc1]
+; X86-NEXT:    vpaddw %zmm3, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc3]
+; X86-NEXT:    vpaddw %zmm0, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_psrav32_hi:
+; X64:       # %bb.0:
+; X64-NEXT:    vpsravw %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf2,0xfd,0x48,0x11,0xd9]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsravw %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0x11,0xd1]
+; X64-NEXT:    vpsravw %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0x11,0xc1]
+; X64-NEXT:    vpaddw %zmm3, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc3]
+; X64-NEXT:    vpaddw %zmm0, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <32 x i16> @llvm.x86.avx512.mask.psrav32.hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3)
+  %res1 = call <32 x i16> @llvm.x86.avx512.mask.psrav32.hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3)
+  %res2 = call <32 x i16> @llvm.x86.avx512.mask.psrav32.hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1)
+  %res3 = add <32 x i16> %res, %res1
+  %res4 = add <32 x i16> %res3, %res2
+  ret <32 x i16> %res4
+}
+
+declare <32 x i16> @llvm.x86.avx512.mask.psllv32hi(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+
+define <32 x i16>@test_int_x86_avx512_mask_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_psllv32hi:
+; X86:       # %bb.0:
+; X86-NEXT:    vpsllvw %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf2,0xfd,0x48,0x12,0xd9]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpsllvw %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0x12,0xd1]
+; X86-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0x12,0xc1]
+; X86-NEXT:    vpaddw %zmm3, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc3]
+; X86-NEXT:    vpaddw %zmm0, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_psllv32hi:
+; X64:       # %bb.0:
+; X64-NEXT:    vpsllvw %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf2,0xfd,0x48,0x12,0xd9]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsllvw %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0x12,0xd1]
+; X64-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0x12,0xc1]
+; X64-NEXT:    vpaddw %zmm3, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc3]
+; X64-NEXT:    vpaddw %zmm0, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <32 x i16> @llvm.x86.avx512.mask.psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3)
+  %res1 = call <32 x i16> @llvm.x86.avx512.mask.psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3)
+  %res2 = call <32 x i16> @llvm.x86.avx512.mask.psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1)
+  %res3 = add <32 x i16> %res, %res1
+  %res4 = add <32 x i16> %res3, %res2
+  ret <32 x i16> %res4
+}
diff --git a/test/CodeGen/X86/avx512bw-intrinsics.ll b/test/CodeGen/X86/avx512bw-intrinsics.ll
index cf52746..8bcdc5d 100644
--- a/test/CodeGen/X86/avx512bw-intrinsics.ll
+++ b/test/CodeGen/X86/avx512bw-intrinsics.ll
@@ -683,419 +683,6 @@
 
 declare <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16>, <32 x i16>)
 
-define <32 x i16> @test_mask_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
-; CHECK-LABEL: test_mask_adds_epi16_rr_512:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpaddsw %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xed,0xc1]
-; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
-  %1 = call <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16> %a, <32 x i16> %b)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @test_mask_adds_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
-; X86-LABEL: test_mask_adds_epi16_rrk_512:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpaddsw %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xed,0xd1]
-; X86-NEXT:    vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi16_rrk_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpaddsw %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xed,0xd1]
-; X64-NEXT:    vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16> %a, <32 x i16> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
-  ret <32 x i16> %3
-}
-
-define <32 x i16> @test_mask_adds_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
-; X86-LABEL: test_mask_adds_epi16_rrkz_512:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpaddsw %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xed,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi16_rrkz_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpaddsw %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xed,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16> %a, <32 x i16> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
-  ret <32 x i16> %3
-}
-
-define <32 x i16> @test_mask_adds_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
-; X86-LABEL: test_mask_adds_epi16_rm_512:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpaddsw (%eax), %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xed,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi16_rm_512:
-; X64:       # %bb.0:
-; X64-NEXT:    vpaddsw (%rdi), %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xed,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <32 x i16>, <32 x i16>* %ptr_b
-  %1 = call <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16> %a, <32 x i16> %b)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @test_mask_adds_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
-; X86-LABEL: test_mask_adds_epi16_rmk_512:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpaddsw (%eax), %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xed,0x08]
-; X86-NEXT:    vmovdqa64 %zmm1, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi16_rmk_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpaddsw (%rdi), %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xed,0x0f]
-; X64-NEXT:    vmovdqa64 %zmm1, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <32 x i16>, <32 x i16>* %ptr_b
-  %1 = call <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16> %a, <32 x i16> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
-  ret <32 x i16> %3
-}
-
-define <32 x i16> @test_mask_adds_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
-; X86-LABEL: test_mask_adds_epi16_rmkz_512:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpaddsw (%eax), %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xed,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi16_rmkz_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpaddsw (%rdi), %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xed,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <32 x i16>, <32 x i16>* %ptr_b
-  %1 = call <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16> %a, <32 x i16> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
-  ret <32 x i16> %3
-}
-
-declare <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16>, <32 x i16>)
-
-define <32 x i16> @test_mask_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
-; CHECK-LABEL: test_mask_subs_epi16_rr_512:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpsubsw %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xe9,0xc1]
-; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
-  %1 = call <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16> %a, <32 x i16> %b)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @test_mask_subs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
-; X86-LABEL: test_mask_subs_epi16_rrk_512:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpsubsw %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xe9,0xd1]
-; X86-NEXT:    vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi16_rrk_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpsubsw %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xe9,0xd1]
-; X64-NEXT:    vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16> %a, <32 x i16> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
-  ret <32 x i16> %3
-}
-
-define <32 x i16> @test_mask_subs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
-; X86-LABEL: test_mask_subs_epi16_rrkz_512:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpsubsw %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xe9,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi16_rrkz_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpsubsw %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xe9,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16> %a, <32 x i16> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
-  ret <32 x i16> %3
-}
-
-define <32 x i16> @test_mask_subs_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
-; X86-LABEL: test_mask_subs_epi16_rm_512:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpsubsw (%eax), %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xe9,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi16_rm_512:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsubsw (%rdi), %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xe9,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <32 x i16>, <32 x i16>* %ptr_b
-  %1 = call <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16> %a, <32 x i16> %b)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @test_mask_subs_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
-; X86-LABEL: test_mask_subs_epi16_rmk_512:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpsubsw (%eax), %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xe9,0x08]
-; X86-NEXT:    vmovdqa64 %zmm1, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi16_rmk_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpsubsw (%rdi), %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xe9,0x0f]
-; X64-NEXT:    vmovdqa64 %zmm1, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <32 x i16>, <32 x i16>* %ptr_b
-  %1 = call <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16> %a, <32 x i16> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
-  ret <32 x i16> %3
-}
-
-define <32 x i16> @test_mask_subs_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
-; X86-LABEL: test_mask_subs_epi16_rmkz_512:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpsubsw (%eax), %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xe9,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi16_rmkz_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpsubsw (%rdi), %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xe9,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <32 x i16>, <32 x i16>* %ptr_b
-  %1 = call <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16> %a, <32 x i16> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
-  ret <32 x i16> %3
-}
-
-declare <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16>, <32 x i16>)
-
-define <64 x i8> @test_mask_adds_epi8_rr_512(<64 x i8> %a, <64 x i8> %b) {
-; CHECK-LABEL: test_mask_adds_epi8_rr_512:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpaddsb %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xec,0xc1]
-; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
-  %res = call <64 x i8> @llvm.x86.avx512.mask.padds.b.512(<64 x i8> %a, <64 x i8> %b, <64 x i8> zeroinitializer, i64 -1)
-  ret <64 x i8> %res
-}
-
-define <64 x i8> @test_mask_adds_epi8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %passThru, i64 %mask) {
-; X86-LABEL: test_mask_adds_epi8_rrk_512:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovq {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpaddsb %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xec,0xd1]
-; X86-NEXT:    vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi8_rrk_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovq %rdi, %k1 # encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
-; X64-NEXT:    vpaddsb %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xec,0xd1]
-; X64-NEXT:    vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <64 x i8> @llvm.x86.avx512.mask.padds.b.512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %passThru, i64 %mask)
-  ret <64 x i8> %res
-}
-
-define <64 x i8> @test_mask_adds_epi8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 %mask) {
-; X86-LABEL: test_mask_adds_epi8_rrkz_512:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovq {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpaddsb %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xec,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi8_rrkz_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovq %rdi, %k1 # encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
-; X64-NEXT:    vpaddsb %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xec,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <64 x i8> @llvm.x86.avx512.mask.padds.b.512(<64 x i8> %a, <64 x i8> %b, <64 x i8> zeroinitializer, i64 %mask)
-  ret <64 x i8> %res
-}
-
-define <64 x i8> @test_mask_adds_epi8_rm_512(<64 x i8> %a, <64 x i8>* %ptr_b) {
-; X86-LABEL: test_mask_adds_epi8_rm_512:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpaddsb (%eax), %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xec,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi8_rm_512:
-; X64:       # %bb.0:
-; X64-NEXT:    vpaddsb (%rdi), %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xec,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <64 x i8>, <64 x i8>* %ptr_b
-  %res = call <64 x i8> @llvm.x86.avx512.mask.padds.b.512(<64 x i8> %a, <64 x i8> %b, <64 x i8> zeroinitializer, i64 -1)
-  ret <64 x i8> %res
-}
-
-define <64 x i8> @test_mask_adds_epi8_rmk_512(<64 x i8> %a, <64 x i8>* %ptr_b, <64 x i8> %passThru, i64 %mask) {
-; X86-LABEL: test_mask_adds_epi8_rmk_512:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovq {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf8,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpaddsb (%eax), %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xec,0x08]
-; X86-NEXT:    vmovdqa64 %zmm1, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi8_rmk_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovq %rsi, %k1 # encoding: [0xc4,0xe1,0xfb,0x92,0xce]
-; X64-NEXT:    vpaddsb (%rdi), %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xec,0x0f]
-; X64-NEXT:    vmovdqa64 %zmm1, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <64 x i8>, <64 x i8>* %ptr_b
-  %res = call <64 x i8> @llvm.x86.avx512.mask.padds.b.512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %passThru, i64 %mask)
-  ret <64 x i8> %res
-}
-
-define <64 x i8> @test_mask_adds_epi8_rmkz_512(<64 x i8> %a, <64 x i8>* %ptr_b, i64 %mask) {
-; X86-LABEL: test_mask_adds_epi8_rmkz_512:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovq {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf8,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpaddsb (%eax), %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xec,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi8_rmkz_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovq %rsi, %k1 # encoding: [0xc4,0xe1,0xfb,0x92,0xce]
-; X64-NEXT:    vpaddsb (%rdi), %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xec,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <64 x i8>, <64 x i8>* %ptr_b
-  %res = call <64 x i8> @llvm.x86.avx512.mask.padds.b.512(<64 x i8> %a, <64 x i8> %b, <64 x i8> zeroinitializer, i64 %mask)
-  ret <64 x i8> %res
-}
-
-declare <64 x i8> @llvm.x86.avx512.mask.padds.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64)
-
-define <64 x i8> @test_mask_subs_epi8_rr_512(<64 x i8> %a, <64 x i8> %b) {
-; CHECK-LABEL: test_mask_subs_epi8_rr_512:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpsubsb %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xe8,0xc1]
-; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
-  %res = call <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8> %a, <64 x i8> %b, <64 x i8> zeroinitializer, i64 -1)
-  ret <64 x i8> %res
-}
-
-define <64 x i8> @test_mask_subs_epi8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %passThru, i64 %mask) {
-; X86-LABEL: test_mask_subs_epi8_rrk_512:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovq {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpsubsb %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xe8,0xd1]
-; X86-NEXT:    vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi8_rrk_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovq %rdi, %k1 # encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
-; X64-NEXT:    vpsubsb %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xe8,0xd1]
-; X64-NEXT:    vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %passThru, i64 %mask)
-  ret <64 x i8> %res
-}
-
-define <64 x i8> @test_mask_subs_epi8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 %mask) {
-; X86-LABEL: test_mask_subs_epi8_rrkz_512:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovq {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpsubsb %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xe8,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi8_rrkz_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovq %rdi, %k1 # encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
-; X64-NEXT:    vpsubsb %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xe8,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8> %a, <64 x i8> %b, <64 x i8> zeroinitializer, i64 %mask)
-  ret <64 x i8> %res
-}
-
-define <64 x i8> @test_mask_subs_epi8_rm_512(<64 x i8> %a, <64 x i8>* %ptr_b) {
-; X86-LABEL: test_mask_subs_epi8_rm_512:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpsubsb (%eax), %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xe8,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi8_rm_512:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsubsb (%rdi), %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xe8,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <64 x i8>, <64 x i8>* %ptr_b
-  %res = call <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8> %a, <64 x i8> %b, <64 x i8> zeroinitializer, i64 -1)
-  ret <64 x i8> %res
-}
-
-define <64 x i8> @test_mask_subs_epi8_rmk_512(<64 x i8> %a, <64 x i8>* %ptr_b, <64 x i8> %passThru, i64 %mask) {
-; X86-LABEL: test_mask_subs_epi8_rmk_512:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovq {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf8,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpsubsb (%eax), %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xe8,0x08]
-; X86-NEXT:    vmovdqa64 %zmm1, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi8_rmk_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovq %rsi, %k1 # encoding: [0xc4,0xe1,0xfb,0x92,0xce]
-; X64-NEXT:    vpsubsb (%rdi), %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x49,0xe8,0x0f]
-; X64-NEXT:    vmovdqa64 %zmm1, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <64 x i8>, <64 x i8>* %ptr_b
-  %res = call <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %passThru, i64 %mask)
-  ret <64 x i8> %res
-}
-
-define <64 x i8> @test_mask_subs_epi8_rmkz_512(<64 x i8> %a, <64 x i8>* %ptr_b, i64 %mask) {
-; X86-LABEL: test_mask_subs_epi8_rmkz_512:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovq {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf8,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpsubsb (%eax), %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xe8,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi8_rmkz_512:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovq %rsi, %k1 # encoding: [0xc4,0xe1,0xfb,0x92,0xce]
-; X64-NEXT:    vpsubsb (%rdi), %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xc9,0xe8,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <64 x i8>, <64 x i8>* %ptr_b
-  %res = call <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8> %a, <64 x i8> %b, <64 x i8> zeroinitializer, i64 %mask)
-  ret <64 x i8> %res
-}
-
-declare <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64)
-
-
 define <32 x i16>@test_int_x86_avx512_mask_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpermt2var_hi_512:
 ; X86:       # %bb.0:
@@ -1566,7 +1153,25 @@
   ret  <8 x i64> %res2
 }
 
-declare <32 x i16> @llvm.x86.avx512.mask.psrlv32hi(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+declare <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16>, <32 x i16>) nounwind readnone
+
+define <32 x i16> @test_x86_avx512_psrlv_w_512_const() optsize {
+; X86-LABEL: test_x86_avx512_psrlv_w_512_const:
+; X86:       # %bb.0:
+; X86-NEXT:    vpbroadcastw {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
+; X86-NEXT:    # encoding: [0x62,0xf2,0x7d,0x48,0x79,0x05,A,A,A,A]
+; X86-NEXT:    # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_x86_avx512_psrlv_w_512_const:
+; X64:       # %bb.0:
+; X64-NEXT:    vpbroadcastw {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
+; X64-NEXT:    # encoding: [0x62,0xf2,0x7d,0x48,0x79,0x05,A,A,A,A]
+; X64-NEXT:    # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res1 = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1,  i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
+  ret <32 x i16> %res1
+}
 
 define <32 x i16>@test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_psrlv32hi:
@@ -1588,15 +1193,19 @@
 ; X64-NEXT:    vpaddw %zmm3, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc3]
 ; X64-NEXT:    vpaddw %zmm0, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <32 x i16> @llvm.x86.avx512.mask.psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3)
-  %res1 = call <32 x i16> @llvm.x86.avx512.mask.psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3)
-  %res2 = call <32 x i16> @llvm.x86.avx512.mask.psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1)
-  %res3 = add <32 x i16> %res, %res1
-  %res4 = add <32 x i16> %res3, %res2
+  %1 = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> %x0, <32 x i16> %x1)
+  %2 = bitcast i32 %x3 to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %x2
+  %4 = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> %x0, <32 x i16> %x1)
+  %5 = bitcast i32 %x3 to <32 x i1>
+  %6 = select <32 x i1> %5, <32 x i16> %4, <32 x i16> zeroinitializer
+  %7 = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> %x0, <32 x i16> %x1)
+  %res3 = add <32 x i16> %3, %6
+  %res4 = add <32 x i16> %res3, %7
   ret <32 x i16> %res4
 }
 
-declare <32 x i16> @llvm.x86.avx512.mask.psrav32.hi(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+declare <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16>, <32 x i16>)
 
 define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_psrav32_hi:
@@ -1618,11 +1227,15 @@
 ; X64-NEXT:    vpaddw %zmm3, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc3]
 ; X64-NEXT:    vpaddw %zmm0, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <32 x i16> @llvm.x86.avx512.mask.psrav32.hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3)
-  %res1 = call <32 x i16> @llvm.x86.avx512.mask.psrav32.hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3)
-  %res2 = call <32 x i16> @llvm.x86.avx512.mask.psrav32.hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1)
-  %res3 = add <32 x i16> %res, %res1
-  %res4 = add <32 x i16> %res3, %res2
+  %1 = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> %x0, <32 x i16> %x1)
+  %2 = bitcast i32 %x3 to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %x2
+  %4 = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> %x0, <32 x i16> %x1)
+  %5 = bitcast i32 %x3 to <32 x i1>
+  %6 = select <32 x i1> %5, <32 x i16> %4, <32 x i16> zeroinitializer
+  %7 = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> %x0, <32 x i16> %x1)
+  %res3 = add <32 x i16> %3, %6
+  %res4 = add <32 x i16> %res3, %7
   ret <32 x i16> %res4
 }
 
@@ -1644,14 +1257,10 @@
 ; X64-NEXT:    vpsravw {{.*}}(%rip), %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x11,0x05,A,A,A,A]
 ; X64-NEXT:    # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <32 x i16> @llvm.x86.avx512.mask.psrav32.hi(<32 x i16> <i16 2, i16 9,  i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9,  i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9,  i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9,  i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51>,
-                                                          <32 x i16> <i16 1, i16 10, i16 35,  i16 52, i16 69,  i16 9,  i16 16,  i16 49, i16 1, i16 10, i16 35,  i16 52, i16 69,  i16 9,  i16 16,  i16 49, i16 1, i16 10, i16 35,  i16 52, i16 69,  i16 9,  i16 16,  i16 49, i16 1, i16 10, i16 35,  i16 52, i16 69,  i16 9,  i16 16,  i16 49>,
-                                                          <32 x i16> zeroinitializer, i32 -1)
-  ret <32 x i16> %res
+  %1 = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> <i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51>, <32 x i16> <i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49, i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49, i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49, i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49>)
+  ret <32 x i16> %1
 }
 
-declare <32 x i16> @llvm.x86.avx512.mask.psllv32hi(<32 x i16>, <32 x i16>, <32 x i16>, i32)
-
 define <32 x i16>@test_int_x86_avx512_mask_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_psllv32hi:
 ; X86:       # %bb.0:
@@ -1672,11 +1281,15 @@
 ; X64-NEXT:    vpaddw %zmm3, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc3]
 ; X64-NEXT:    vpaddw %zmm0, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <32 x i16> @llvm.x86.avx512.mask.psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3)
-  %res1 = call <32 x i16> @llvm.x86.avx512.mask.psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3)
-  %res2 = call <32 x i16> @llvm.x86.avx512.mask.psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1)
-  %res3 = add <32 x i16> %res, %res1
-  %res4 = add <32 x i16> %res3, %res2
+  %1 = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> %x0, <32 x i16> %x1)
+  %2 = bitcast i32 %x3 to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %x2
+  %4 = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> %x0, <32 x i16> %x1)
+  %5 = bitcast i32 %x3 to <32 x i1>
+  %6 = select <32 x i1> %5, <32 x i16> %4, <32 x i16> zeroinitializer
+  %7 = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> %x0, <32 x i16> %x1)
+  %res3 = add <32 x i16> %3, %6
+  %res4 = add <32 x i16> %res3, %7
   ret <32 x i16> %res4
 }
 
@@ -1761,6 +1374,25 @@
 declare <32 x i16> @llvm.x86.avx512.psll.w.512(<32 x i16>, <8 x i16>) nounwind readnone
 
 
+define <32 x i16> @test_x86_avx512_psllv_w_512_const() optsize {
+; X86-LABEL: test_x86_avx512_psllv_w_512_const:
+; X86:       # %bb.0:
+; X86-NEXT:    vpbroadcastw {{.*#+}} zmm0 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; X86-NEXT:    # encoding: [0x62,0xf2,0x7d,0x48,0x79,0x05,A,A,A,A]
+; X86-NEXT:    # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_x86_avx512_psllv_w_512_const:
+; X64:       # %bb.0:
+; X64-NEXT:    vpbroadcastw {{.*#+}} zmm0 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; X64-NEXT:    # encoding: [0x62,0xf2,0x7d,0x48,0x79,0x05,A,A,A,A]
+; X64-NEXT:    # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res1 = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4,  i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1,  i16 1, i16 1, i16 -1>)
+  ret <32 x i16> %res1
+}
+declare <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16>, <32 x i16>) nounwind readnone
+
 define <32 x i16> @test_x86_avx512_pslli_w_512(<32 x i16> %a0) {
 ; CHECK-LABEL: test_x86_avx512_pslli_w_512:
 ; CHECK:       # %bb.0:
diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics-canonical.ll b/test/CodeGen/X86/avx512bwvl-intrinsics-canonical.ll
index 748e29d..fc99406 100644
--- a/test/CodeGen/X86/avx512bwvl-intrinsics-canonical.ll
+++ b/test/CodeGen/X86/avx512bwvl-intrinsics-canonical.ll
@@ -1,19 +1,602 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw -mattr=+avx512vl --show-mc-encoding| FileCheck %s
 
-; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse2-builtins.c
+; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512vlbw-builtins.c
 
 
+;
+; Signed Saturation
+;
+
+define <8 x i16> @test_mask_adds_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_mask_adds_epi16_rr_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  ret <8 x i16> %1
+}
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_mask_adds_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
+; CHECK-LABEL: test_mask_adds_epi16_rrk_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpaddsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xed,0xd1]
+; CHECK-NEXT:    vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+  ret <8 x i16> %3
+}
+
+define <8 x i16> @test_mask_adds_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
+; CHECK-LABEL: test_mask_adds_epi16_rrkz_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xed,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+  ret <8 x i16> %3
+}
+
+define <8 x i16> @test_mask_adds_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
+; CHECK-LABEL: test_mask_adds_epi16_rm_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpaddsw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <8 x i16>, <8 x i16>* %ptr_b
+  %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  ret <8 x i16> %1
+}
+
+define <8 x i16> @test_mask_adds_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
+; CHECK-LABEL: test_mask_adds_epi16_rmk_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpaddsw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xed,0x0f]
+; CHECK-NEXT:    vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <8 x i16>, <8 x i16>* %ptr_b
+  %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+  ret <8 x i16> %3
+}
+
+define <8 x i16> @test_mask_adds_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
+; CHECK-LABEL: test_mask_adds_epi16_rmkz_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpaddsw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xed,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <8 x i16>, <8 x i16>* %ptr_b
+  %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+  ret <8 x i16> %3
+}
+
+define <16 x i16> @test_mask_adds_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
+; CHECK-LABEL: test_mask_adds_epi16_rr_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpaddsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xed,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  ret <16 x i16> %1
+}
+declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @test_mask_adds_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_adds_epi16_rrk_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpaddsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xed,0xd1]
+; CHECK-NEXT:    vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+  ret <16 x i16> %3
+}
+
+define <16 x i16> @test_mask_adds_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
+; CHECK-LABEL: test_mask_adds_epi16_rrkz_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpaddsw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xed,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+  ret <16 x i16> %3
+}
+
+define <16 x i16> @test_mask_adds_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
+; CHECK-LABEL: test_mask_adds_epi16_rm_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpaddsw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xed,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <16 x i16>, <16 x i16>* %ptr_b
+  %1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  ret <16 x i16> %1
+}
+
+define <16 x i16> @test_mask_adds_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_adds_epi16_rmk_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpaddsw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xed,0x0f]
+; CHECK-NEXT:    vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <16 x i16>, <16 x i16>* %ptr_b
+  %1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+  ret <16 x i16> %3
+}
+
+define <16 x i16> @test_mask_adds_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
+; CHECK-LABEL: test_mask_adds_epi16_rmkz_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpaddsw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xed,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <16 x i16>, <16 x i16>* %ptr_b
+  %1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+  ret <16 x i16> %3
+}
+
+define <8 x i16> @test_mask_subs_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_mask_subs_epi16_rr_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %sub = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  ret <8 x i16> %sub
+}
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_mask_subs_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
+; CHECK-LABEL: test_mask_subs_epi16_rrk_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpsubsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe9,0xd1]
+; CHECK-NEXT:    vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %sub = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  %bc = bitcast i8 %mask to <8 x i1>
+  %res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> %passThru
+  ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_subs_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
+; CHECK-LABEL: test_mask_subs_epi16_rrkz_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe9,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %sub = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  %bc = bitcast i8 %mask to <8 x i1>
+  %res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> zeroinitializer
+  ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_subs_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
+; CHECK-LABEL: test_mask_subs_epi16_rm_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpsubsw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <8 x i16>, <8 x i16>* %ptr_b
+  %sub = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  ret <8 x i16> %sub
+}
+
+define <8 x i16> @test_mask_subs_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
+; CHECK-LABEL: test_mask_subs_epi16_rmk_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpsubsw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe9,0x0f]
+; CHECK-NEXT:    vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <8 x i16>, <8 x i16>* %ptr_b
+  %sub = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  %bc = bitcast i8 %mask to <8 x i1>
+  %res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> %passThru
+  ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_subs_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
+; CHECK-LABEL: test_mask_subs_epi16_rmkz_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpsubsw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe9,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <8 x i16>, <8 x i16>* %ptr_b
+  %sub = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  %bc = bitcast i8 %mask to <8 x i1>
+  %res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> zeroinitializer
+  ret <8 x i16> %res
+}
+
+define <16 x i16> @test_mask_subs_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
+; CHECK-LABEL: test_mask_subs_epi16_rr_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpsubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe9,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %sub = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  ret <16 x i16> %sub
+}
+declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @test_mask_subs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_subs_epi16_rrk_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpsubsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe9,0xd1]
+; CHECK-NEXT:    vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %sub = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  %bc = bitcast i16 %mask to <16 x i1>
+  %res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> %passThru
+  ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_subs_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
+; CHECK-LABEL: test_mask_subs_epi16_rrkz_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpsubsw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %sub = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  %bc = bitcast i16 %mask to <16 x i1>
+  %res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> zeroinitializer
+  ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_subs_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
+; CHECK-LABEL: test_mask_subs_epi16_rm_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpsubsw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe9,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <16 x i16>, <16 x i16>* %ptr_b
+  %sub = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  ret <16 x i16> %sub
+}
+
+define <16 x i16> @test_mask_subs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_subs_epi16_rmk_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpsubsw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe9,0x0f]
+; CHECK-NEXT:    vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <16 x i16>, <16 x i16>* %ptr_b
+  %sub = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  %bc = bitcast i16 %mask to <16 x i1>
+  %res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> %passThru
+  ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_subs_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
+; CHECK-LABEL: test_mask_subs_epi16_rmkz_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpsubsw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <16 x i16>, <16 x i16>* %ptr_b
+  %sub = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  %bc = bitcast i16 %mask to <16 x i1>
+  %res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> zeroinitializer
+  ret <16 x i16> %res
+}
+
+define <16 x i8> @test_mask_adds_epi8_rr_128(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_mask_adds_epi8_rr_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  ret <16 x i8> %1
+}
+declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_mask_adds_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_adds_epi8_rrk_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpaddsb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xec,0xd1]
+; CHECK-NEXT:    vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
+  ret <16 x i8> %3
+}
+
+define <16 x i8> @test_mask_adds_epi8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
+; CHECK-LABEL: test_mask_adds_epi8_rrkz_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xec,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
+  ret <16 x i8> %3
+}
+
+define <16 x i8> @test_mask_adds_epi8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
+; CHECK-LABEL: test_mask_adds_epi8_rm_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpaddsb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <16 x i8>, <16 x i8>* %ptr_b
+  %1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  ret <16 x i8> %1
+}
+
+define <16 x i8> @test_mask_adds_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_adds_epi8_rmk_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpaddsb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xec,0x0f]
+; CHECK-NEXT:    vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <16 x i8>, <16 x i8>* %ptr_b
+  %1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
+  ret <16 x i8> %3
+}
+
+define <16 x i8> @test_mask_adds_epi8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
+; CHECK-LABEL: test_mask_adds_epi8_rmkz_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpaddsb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xec,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <16 x i8>, <16 x i8>* %ptr_b
+  %1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
+  ret <16 x i8> %3
+}
+
+define <32 x i8> @test_mask_adds_epi8_rr_256(<32 x i8> %a, <32 x i8> %b) {
+; CHECK-LABEL: test_mask_adds_epi8_rr_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpaddsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xec,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  ret <32 x i8> %1
+}
+declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @test_mask_adds_epi8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
+; CHECK-LABEL: test_mask_adds_epi8_rrk_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpaddsb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xec,0xd1]
+; CHECK-NEXT:    vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
+  ret <32 x i8> %3
+}
+
+define <32 x i8> @test_mask_adds_epi8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
+; CHECK-LABEL: test_mask_adds_epi8_rrkz_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpaddsb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xec,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
+  ret <32 x i8> %3
+}
+
+define <32 x i8> @test_mask_adds_epi8_rm_256(<32 x i8> %a, <32 x i8>* %ptr_b) {
+; CHECK-LABEL: test_mask_adds_epi8_rm_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpaddsb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xec,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <32 x i8>, <32 x i8>* %ptr_b
+  %1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  ret <32 x i8> %1
+}
+
+define <32 x i8> @test_mask_adds_epi8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
+; CHECK-LABEL: test_mask_adds_epi8_rmk_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpaddsb (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xec,0x0f]
+; CHECK-NEXT:    vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <32 x i8>, <32 x i8>* %ptr_b
+  %1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
+  ret <32 x i8> %3
+}
+
+define <32 x i8> @test_mask_adds_epi8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, i32 %mask) {
+; CHECK-LABEL: test_mask_adds_epi8_rmkz_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpaddsb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xec,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <32 x i8>, <32 x i8>* %ptr_b
+  %1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
+  ret <32 x i8> %3
+}
+
+define <16 x i8> @test_mask_subs_epi8_rr_128(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_mask_subs_epi8_rr_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %sub = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  ret <16 x i8> %sub
+}
+declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_mask_subs_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_subs_epi8_rrk_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpsubsb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe8,0xd1]
+; CHECK-NEXT:    vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %sub = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  %bc = bitcast i16 %mask to <16 x i1>
+  %res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> %passThru
+  ret <16 x i8> %res
+}
+
+define <16 x i8> @test_mask_subs_epi8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
+; CHECK-LABEL: test_mask_subs_epi8_rrkz_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe8,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %sub = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  %bc = bitcast i16 %mask to <16 x i1>
+  %res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> zeroinitializer
+  ret <16 x i8> %res
+}
+
+define <16 x i8> @test_mask_subs_epi8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
+; CHECK-LABEL: test_mask_subs_epi8_rm_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpsubsb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <16 x i8>, <16 x i8>* %ptr_b
+  %sub = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  ret <16 x i8> %sub
+}
+
+define <16 x i8> @test_mask_subs_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_subs_epi8_rmk_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpsubsb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe8,0x0f]
+; CHECK-NEXT:    vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <16 x i8>, <16 x i8>* %ptr_b
+  %sub = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  %bc = bitcast i16 %mask to <16 x i1>
+  %res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> %passThru
+  ret <16 x i8> %res
+}
+
+define <16 x i8> @test_mask_subs_epi8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
+; CHECK-LABEL: test_mask_subs_epi8_rmkz_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpsubsb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe8,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <16 x i8>, <16 x i8>* %ptr_b
+  %sub = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  %bc = bitcast i16 %mask to <16 x i1>
+  %res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> zeroinitializer
+  ret <16 x i8> %res
+}
+
+define <32 x i8> @test_mask_subs_epi8_rr_256(<32 x i8> %a, <32 x i8> %b) {
+; CHECK-LABEL: test_mask_subs_epi8_rr_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpsubsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe8,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %sub = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  ret <32 x i8> %sub
+}
+declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @test_mask_subs_epi8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
+; CHECK-LABEL: test_mask_subs_epi8_rrk_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpsubsb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe8,0xd1]
+; CHECK-NEXT:    vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %sub = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  %bc = bitcast i32 %mask to <32 x i1>
+  %res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> %passThru
+  ret <32 x i8> %res
+}
+
+define <32 x i8> @test_mask_subs_epi8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
+; CHECK-LABEL: test_mask_subs_epi8_rrkz_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT:    vpsubsb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe8,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %sub = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  %bc = bitcast i32 %mask to <32 x i1>
+  %res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> zeroinitializer
+  ret <32 x i8> %res
+}
+
+define <32 x i8> @test_mask_subs_epi8_rm_256(<32 x i8> %a, <32 x i8>* %ptr_b) {
+; CHECK-LABEL: test_mask_subs_epi8_rm_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpsubsb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe8,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <32 x i8>, <32 x i8>* %ptr_b
+  %sub = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  ret <32 x i8> %sub
+}
+
+define <32 x i8> @test_mask_subs_epi8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
+; CHECK-LABEL: test_mask_subs_epi8_rmk_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpsubsb (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe8,0x0f]
+; CHECK-NEXT:    vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <32 x i8>, <32 x i8>* %ptr_b
+  %sub = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  %bc = bitcast i32 %mask to <32 x i1>
+  %res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> %passThru
+  ret <32 x i8> %res
+}
+
+define <32 x i8> @test_mask_subs_epi8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, i32 %mask) {
+; CHECK-LABEL: test_mask_subs_epi8_rmkz_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT:    vpsubsb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe8,0x07]
+; CHECK-NEXT:    retq ## encoding: [0xc3]
+  %b = load <32 x i8>, <32 x i8>* %ptr_b
+  %sub = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  %bc = bitcast i32 %mask to <32 x i1>
+  %res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> zeroinitializer
+  ret <32 x i8> %res
+}
+
+;
+; Unsigned Saturation
+;
+
 define <8 x i16> @test_mask_adds_epu16_rr_128(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-LABEL: test_mask_adds_epu16_rr_128:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %1 = add <8 x i16> %a, %b
-  %2 = icmp ugt <8 x i16> %a, %1
-  %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
-  ret <8 x i16> %3
+  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  ret <8 x i16> %1
 }
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
 
 define <8 x i16> @test_mask_adds_epu16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_adds_epu16_rrk_128:
@@ -22,12 +605,10 @@
 ; CHECK-NEXT:    vpaddusw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdd,0xd1]
 ; CHECK-NEXT:    vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %1 = add <8 x i16> %a, %b
-  %2 = icmp ugt <8 x i16> %a, %1
-  %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
-  %4 = bitcast i8 %mask to <8 x i1>
-  %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> %passThru
-  ret <8 x i16> %5
+  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+  ret <8 x i16> %3
 }
 
 define <8 x i16> @test_mask_adds_epu16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
@@ -36,12 +617,10 @@
 ; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
 ; CHECK-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdd,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %1 = add <8 x i16> %a, %b
-  %2 = icmp ugt <8 x i16> %a, %1
-  %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
-  %4 = bitcast i8 %mask to <8 x i1>
-  %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> zeroinitializer
-  ret <8 x i16> %5
+  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+  ret <8 x i16> %3
 }
 
 define <8 x i16> @test_mask_adds_epu16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
@@ -50,10 +629,8 @@
 ; CHECK-NEXT:    vpaddusw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <8 x i16>, <8 x i16>* %ptr_b
-  %1 = add <8 x i16> %a, %b
-  %2 = icmp ugt <8 x i16> %a, %1
-  %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
-  ret <8 x i16> %3
+  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  ret <8 x i16> %1
 }
 
 define <8 x i16> @test_mask_adds_epu16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
@@ -64,12 +641,10 @@
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <8 x i16>, <8 x i16>* %ptr_b
-  %1 = add <8 x i16> %a, %b
-  %2 = icmp ugt <8 x i16> %a, %1
-  %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
-  %4 = bitcast i8 %mask to <8 x i1>
-  %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> %passThru
-  ret <8 x i16> %5
+  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+  ret <8 x i16> %3
 }
 
 define <8 x i16> @test_mask_adds_epu16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
@@ -79,12 +654,10 @@
 ; CHECK-NEXT:    vpaddusw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdd,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <8 x i16>, <8 x i16>* %ptr_b
-  %1 = add <8 x i16> %a, %b
-  %2 = icmp ugt <8 x i16> %a, %1
-  %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
-  %4 = bitcast i8 %mask to <8 x i1>
-  %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> zeroinitializer
-  ret <8 x i16> %5
+  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+  ret <8 x i16> %3
 }
 
 define <16 x i16> @test_mask_adds_epu16_rr_256(<16 x i16> %a, <16 x i16> %b) {
@@ -92,11 +665,10 @@
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpaddusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdd,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %1 = add <16 x i16> %a, %b
-  %2 = icmp ugt <16 x i16> %a, %1
-  %3 = select <16 x i1> %2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %1
-  ret <16 x i16> %3
+  %1 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  ret <16 x i16> %1
 }
+declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>)
 
 define <16 x i16> @test_mask_adds_epu16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_adds_epu16_rrk_256:
@@ -105,12 +677,10 @@
 ; CHECK-NEXT:    vpaddusw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xdd,0xd1]
 ; CHECK-NEXT:    vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %1 = add <16 x i16> %a, %b
-  %2 = icmp ugt <16 x i16> %a, %1
-  %3 = select <16 x i1> %2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %1
-  %4 = bitcast i16 %mask to <16 x i1>
-  %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> %passThru
-  ret <16 x i16> %5
+  %1 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+  ret <16 x i16> %3
 }
 
 define <16 x i16> @test_mask_adds_epu16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
@@ -119,12 +689,10 @@
 ; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
 ; CHECK-NEXT:    vpaddusw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdd,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %1 = add <16 x i16> %a, %b
-  %2 = icmp ugt <16 x i16> %a, %1
-  %3 = select <16 x i1> %2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %1
-  %4 = bitcast i16 %mask to <16 x i1>
-  %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> zeroinitializer
-  ret <16 x i16> %5
+  %1 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+  ret <16 x i16> %3
 }
 
 define <16 x i16> @test_mask_adds_epu16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
@@ -133,10 +701,8 @@
 ; CHECK-NEXT:    vpaddusw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdd,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <16 x i16>, <16 x i16>* %ptr_b
-  %1 = add <16 x i16> %a, %b
-  %2 = icmp ugt <16 x i16> %a, %1
-  %3 = select <16 x i1> %2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %1
-  ret <16 x i16> %3
+  %1 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  ret <16 x i16> %1
 }
 
 define <16 x i16> @test_mask_adds_epu16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
@@ -147,12 +713,10 @@
 ; CHECK-NEXT:    vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <16 x i16>, <16 x i16>* %ptr_b
-  %1 = add <16 x i16> %a, %b
-  %2 = icmp ugt <16 x i16> %a, %1
-  %3 = select <16 x i1> %2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %1
-  %4 = bitcast i16 %mask to <16 x i1>
-  %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> %passThru
-  ret <16 x i16> %5
+  %1 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+  ret <16 x i16> %3
 }
 
 define <16 x i16> @test_mask_adds_epu16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
@@ -162,12 +726,10 @@
 ; CHECK-NEXT:    vpaddusw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdd,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <16 x i16>, <16 x i16>* %ptr_b
-  %1 = add <16 x i16> %a, %b
-  %2 = icmp ugt <16 x i16> %a, %1
-  %3 = select <16 x i1> %2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %1
-  %4 = bitcast i16 %mask to <16 x i1>
-  %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> zeroinitializer
-  ret <16 x i16> %5
+  %1 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+  ret <16 x i16> %3
 }
 
 define <8 x i16> @test_mask_subs_epu16_rr_128(<8 x i16> %a, <8 x i16> %b) {
@@ -175,11 +737,10 @@
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %cmp = icmp ugt <8 x i16> %a, %b
-  %sel = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
-  %sub = sub <8 x i16> %sel, %b
+  %sub = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
   ret <8 x i16> %sub
 }
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
 
 define <8 x i16> @test_mask_subs_epu16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_subs_epu16_rrk_128:
@@ -188,9 +749,7 @@
 ; CHECK-NEXT:    vpsubusw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd9,0xd1]
 ; CHECK-NEXT:    vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %cmp = icmp ugt <8 x i16> %a, %b
-  %sel = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
-  %sub = sub <8 x i16> %sel, %b
+  %sub = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
   %bc = bitcast i8 %mask to <8 x i1>
   %res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> %passThru
   ret <8 x i16> %res
@@ -202,9 +761,7 @@
 ; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
 ; CHECK-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd9,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %cmp = icmp ugt <8 x i16> %a, %b
-  %sel = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
-  %sub = sub <8 x i16> %sel, %b
+  %sub = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
   %bc = bitcast i8 %mask to <8 x i1>
   %res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> zeroinitializer
   ret <8 x i16> %res
@@ -216,9 +773,7 @@
 ; CHECK-NEXT:    vpsubusw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <8 x i16>, <8 x i16>* %ptr_b
-  %cmp = icmp ugt <8 x i16> %a, %b
-  %sel = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
-  %sub = sub <8 x i16> %sel, %b
+  %sub = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
   ret <8 x i16> %sub
 }
 
@@ -230,9 +785,7 @@
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <8 x i16>, <8 x i16>* %ptr_b
-  %cmp = icmp ugt <8 x i16> %a, %b
-  %sel = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
-  %sub = sub <8 x i16> %sel, %b
+  %sub = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
   %bc = bitcast i8 %mask to <8 x i1>
   %res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> %passThru
   ret <8 x i16> %res
@@ -245,9 +798,7 @@
 ; CHECK-NEXT:    vpsubusw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd9,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <8 x i16>, <8 x i16>* %ptr_b
-  %cmp = icmp ugt <8 x i16> %a, %b
-  %sel = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
-  %sub = sub <8 x i16> %sel, %b
+  %sub = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
   %bc = bitcast i8 %mask to <8 x i1>
   %res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> zeroinitializer
   ret <8 x i16> %res
@@ -258,11 +809,10 @@
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsubusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd9,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %cmp = icmp ugt <16 x i16> %a, %b
-  %sel = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
-  %sub = sub <16 x i16> %sel, %b
+  %sub = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
   ret <16 x i16> %sub
 }
+declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>)
 
 define <16 x i16> @test_mask_subs_epu16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_subs_epu16_rrk_256:
@@ -271,9 +821,7 @@
 ; CHECK-NEXT:    vpsubusw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd9,0xd1]
 ; CHECK-NEXT:    vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %cmp = icmp ugt <16 x i16> %a, %b
-  %sel = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
-  %sub = sub <16 x i16> %sel, %b
+  %sub = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
   %bc = bitcast i16 %mask to <16 x i1>
   %res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> %passThru
   ret <16 x i16> %res
@@ -285,9 +833,7 @@
 ; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
 ; CHECK-NEXT:    vpsubusw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd9,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %cmp = icmp ugt <16 x i16> %a, %b
-  %sel = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
-  %sub = sub <16 x i16> %sel, %b
+  %sub = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
   %bc = bitcast i16 %mask to <16 x i1>
   %res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> zeroinitializer
   ret <16 x i16> %res
@@ -299,9 +845,7 @@
 ; CHECK-NEXT:    vpsubusw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd9,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <16 x i16>, <16 x i16>* %ptr_b
-  %cmp = icmp ugt <16 x i16> %a, %b
-  %sel = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
-  %sub = sub <16 x i16> %sel, %b
+  %sub = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
   ret <16 x i16> %sub
 }
 
@@ -313,9 +857,7 @@
 ; CHECK-NEXT:    vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <16 x i16>, <16 x i16>* %ptr_b
-  %cmp = icmp ugt <16 x i16> %a, %b
-  %sel = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
-  %sub = sub <16 x i16> %sel, %b
+  %sub = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
   %bc = bitcast i16 %mask to <16 x i1>
   %res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> %passThru
   ret <16 x i16> %res
@@ -328,9 +870,7 @@
 ; CHECK-NEXT:    vpsubusw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd9,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <16 x i16>, <16 x i16>* %ptr_b
-  %cmp = icmp ugt <16 x i16> %a, %b
-  %sel = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
-  %sub = sub <16 x i16> %sel, %b
+  %sub = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
   %bc = bitcast i16 %mask to <16 x i1>
   %res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> zeroinitializer
   ret <16 x i16> %res
@@ -341,11 +881,10 @@
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %1 = add <16 x i8> %a, %b
-  %2 = icmp ugt <16 x i8> %a, %1
-  %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
-  ret <16 x i8> %3
+  %1 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  ret <16 x i8> %1
 }
+declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
 
 define <16 x i8> @test_mask_adds_epu8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_adds_epu8_rrk_128:
@@ -354,12 +893,10 @@
 ; CHECK-NEXT:    vpaddusb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdc,0xd1]
 ; CHECK-NEXT:    vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %1 = add <16 x i8> %a, %b
-  %2 = icmp ugt <16 x i8> %a, %1
-  %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
-  %4 = bitcast i16 %mask to <16 x i1>
-  %5 = select <16 x i1> %4, <16 x i8> %3, <16 x i8> %passThru
-  ret <16 x i8> %5
+  %1 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
+  ret <16 x i8> %3
 }
 
 define <16 x i8> @test_mask_adds_epu8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
@@ -368,12 +905,10 @@
 ; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
 ; CHECK-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdc,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %1 = add <16 x i8> %a, %b
-  %2 = icmp ugt <16 x i8> %a, %1
-  %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
-  %4 = bitcast i16 %mask to <16 x i1>
-  %5 = select <16 x i1> %4, <16 x i8> %3, <16 x i8> zeroinitializer
-  ret <16 x i8> %5
+  %1 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
+  ret <16 x i8> %3
 }
 
 define <16 x i8> @test_mask_adds_epu8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
@@ -382,10 +917,8 @@
 ; CHECK-NEXT:    vpaddusb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <16 x i8>, <16 x i8>* %ptr_b
-  %1 = add <16 x i8> %a, %b
-  %2 = icmp ugt <16 x i8> %a, %1
-  %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
-  ret <16 x i8> %3
+  %1 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  ret <16 x i8> %1
 }
 
 define <16 x i8> @test_mask_adds_epu8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
@@ -396,12 +929,10 @@
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <16 x i8>, <16 x i8>* %ptr_b
-  %1 = add <16 x i8> %a, %b
-  %2 = icmp ugt <16 x i8> %a, %1
-  %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
-  %4 = bitcast i16 %mask to <16 x i1>
-  %5 = select <16 x i1> %4, <16 x i8> %3, <16 x i8> %passThru
-  ret <16 x i8> %5
+  %1 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
+  ret <16 x i8> %3
 }
 
 define <16 x i8> @test_mask_adds_epu8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
@@ -411,12 +942,10 @@
 ; CHECK-NEXT:    vpaddusb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdc,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <16 x i8>, <16 x i8>* %ptr_b
-  %1 = add <16 x i8> %a, %b
-  %2 = icmp ugt <16 x i8> %a, %1
-  %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
-  %4 = bitcast i16 %mask to <16 x i1>
-  %5 = select <16 x i1> %4, <16 x i8> %3, <16 x i8> zeroinitializer
-  ret <16 x i8> %5
+  %1 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
+  ret <16 x i8> %3
 }
 
 define <32 x i8> @test_mask_adds_epu8_rr_256(<32 x i8> %a, <32 x i8> %b) {
@@ -424,11 +953,10 @@
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpaddusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdc,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %1 = add <32 x i8> %a, %b
-  %2 = icmp ugt <32 x i8> %a, %1
-  %3 = select <32 x i1> %2, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %1
-  ret <32 x i8> %3
+  %1 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  ret <32 x i8> %1
 }
+declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>)
 
 define <32 x i8> @test_mask_adds_epu8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
 ; CHECK-LABEL: test_mask_adds_epu8_rrk_256:
@@ -437,12 +965,10 @@
 ; CHECK-NEXT:    vpaddusb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xdc,0xd1]
 ; CHECK-NEXT:    vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %1 = add <32 x i8> %a, %b
-  %2 = icmp ugt <32 x i8> %a, %1
-  %3 = select <32 x i1> %2, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %1
-  %4 = bitcast i32 %mask to <32 x i1>
-  %5 = select <32 x i1> %4, <32 x i8> %3, <32 x i8> %passThru
-  ret <32 x i8> %5
+  %1 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
+  ret <32 x i8> %3
 }
 
 define <32 x i8> @test_mask_adds_epu8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
@@ -451,12 +977,10 @@
 ; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
 ; CHECK-NEXT:    vpaddusb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdc,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %1 = add <32 x i8> %a, %b
-  %2 = icmp ugt <32 x i8> %a, %1
-  %3 = select <32 x i1> %2, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %1
-  %4 = bitcast i32 %mask to <32 x i1>
-  %5 = select <32 x i1> %4, <32 x i8> %3, <32 x i8> zeroinitializer
-  ret <32 x i8> %5
+  %1 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
+  ret <32 x i8> %3
 }
 
 define <32 x i8> @test_mask_adds_epu8_rm_256(<32 x i8> %a, <32 x i8>* %ptr_b) {
@@ -465,10 +989,8 @@
 ; CHECK-NEXT:    vpaddusb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdc,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <32 x i8>, <32 x i8>* %ptr_b
-  %1 = add <32 x i8> %a, %b
-  %2 = icmp ugt <32 x i8> %a, %1
-  %3 = select <32 x i1> %2, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %1
-  ret <32 x i8> %3
+  %1 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  ret <32 x i8> %1
 }
 
 define <32 x i8> @test_mask_adds_epu8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
@@ -479,12 +1001,10 @@
 ; CHECK-NEXT:    vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <32 x i8>, <32 x i8>* %ptr_b
-  %1 = add <32 x i8> %a, %b
-  %2 = icmp ugt <32 x i8> %a, %1
-  %3 = select <32 x i1> %2, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %1
-  %4 = bitcast i32 %mask to <32 x i1>
-  %5 = select <32 x i1> %4, <32 x i8> %3, <32 x i8> %passThru
-  ret <32 x i8> %5
+  %1 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
+  ret <32 x i8> %3
 }
 
 define <32 x i8> @test_mask_adds_epu8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, i32 %mask) {
@@ -494,12 +1014,10 @@
 ; CHECK-NEXT:    vpaddusb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdc,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <32 x i8>, <32 x i8>* %ptr_b
-  %1 = add <32 x i8> %a, %b
-  %2 = icmp ugt <32 x i8> %a, %1
-  %3 = select <32 x i1> %2, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %1
-  %4 = bitcast i32 %mask to <32 x i1>
-  %5 = select <32 x i1> %4, <32 x i8> %3, <32 x i8> zeroinitializer
-  ret <32 x i8> %5
+  %1 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
+  ret <32 x i8> %3
 }
 
 define <16 x i8> @test_mask_subs_epu8_rr_128(<16 x i8> %a, <16 x i8> %b) {
@@ -507,11 +1025,10 @@
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %cmp = icmp ugt <16 x i8> %a, %b
-  %sel = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
-  %sub = sub <16 x i8> %sel, %b
+  %sub = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
   ret <16 x i8> %sub
 }
+declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
 
 define <16 x i8> @test_mask_subs_epu8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_subs_epu8_rrk_128:
@@ -520,9 +1037,7 @@
 ; CHECK-NEXT:    vpsubusb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd8,0xd1]
 ; CHECK-NEXT:    vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %cmp = icmp ugt <16 x i8> %a, %b
-  %sel = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
-  %sub = sub <16 x i8> %sel, %b
+  %sub = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
   %bc = bitcast i16 %mask to <16 x i1>
   %res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> %passThru
   ret <16 x i8> %res
@@ -534,9 +1049,7 @@
 ; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
 ; CHECK-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd8,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %cmp = icmp ugt <16 x i8> %a, %b
-  %sel = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
-  %sub = sub <16 x i8> %sel, %b
+  %sub = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
   %bc = bitcast i16 %mask to <16 x i1>
   %res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> zeroinitializer
   ret <16 x i8> %res
@@ -548,9 +1061,7 @@
 ; CHECK-NEXT:    vpsubusb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <16 x i8>, <16 x i8>* %ptr_b
-  %cmp = icmp ugt <16 x i8> %a, %b
-  %sel = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
-  %sub = sub <16 x i8> %sel, %b
+  %sub = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
   ret <16 x i8> %sub
 }
 
@@ -562,9 +1073,7 @@
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <16 x i8>, <16 x i8>* %ptr_b
-  %cmp = icmp ugt <16 x i8> %a, %b
-  %sel = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
-  %sub = sub <16 x i8> %sel, %b
+  %sub = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
   %bc = bitcast i16 %mask to <16 x i1>
   %res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> %passThru
   ret <16 x i8> %res
@@ -577,9 +1086,7 @@
 ; CHECK-NEXT:    vpsubusb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd8,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <16 x i8>, <16 x i8>* %ptr_b
-  %cmp = icmp ugt <16 x i8> %a, %b
-  %sel = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
-  %sub = sub <16 x i8> %sel, %b
+  %sub = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
   %bc = bitcast i16 %mask to <16 x i1>
   %res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> zeroinitializer
   ret <16 x i8> %res
@@ -590,11 +1097,10 @@
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsubusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd8,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %cmp = icmp ugt <32 x i8> %a, %b
-  %sel = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
-  %sub = sub <32 x i8> %sel, %b
+  %sub = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
   ret <32 x i8> %sub
 }
+declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>)
 
 define <32 x i8> @test_mask_subs_epu8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
 ; CHECK-LABEL: test_mask_subs_epu8_rrk_256:
@@ -603,9 +1109,7 @@
 ; CHECK-NEXT:    vpsubusb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd8,0xd1]
 ; CHECK-NEXT:    vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %cmp = icmp ugt <32 x i8> %a, %b
-  %sel = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
-  %sub = sub <32 x i8> %sel, %b
+  %sub = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
   %bc = bitcast i32 %mask to <32 x i1>
   %res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> %passThru
   ret <32 x i8> %res
@@ -617,9 +1121,7 @@
 ; CHECK-NEXT:    kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
 ; CHECK-NEXT:    vpsubusb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd8,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
-  %cmp = icmp ugt <32 x i8> %a, %b
-  %sel = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
-  %sub = sub <32 x i8> %sel, %b
+  %sub = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
   %bc = bitcast i32 %mask to <32 x i1>
   %res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> zeroinitializer
   ret <32 x i8> %res
@@ -631,9 +1133,7 @@
 ; CHECK-NEXT:    vpsubusb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd8,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <32 x i8>, <32 x i8>* %ptr_b
-  %cmp = icmp ugt <32 x i8> %a, %b
-  %sel = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
-  %sub = sub <32 x i8> %sel, %b
+  %sub = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
   ret <32 x i8> %sub
 }
 
@@ -645,9 +1145,7 @@
 ; CHECK-NEXT:    vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <32 x i8>, <32 x i8>* %ptr_b
-  %cmp = icmp ugt <32 x i8> %a, %b
-  %sel = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
-  %sub = sub <32 x i8> %sel, %b
+  %sub = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
   %bc = bitcast i32 %mask to <32 x i1>
   %res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> %passThru
   ret <32 x i8> %res
@@ -660,9 +1158,7 @@
 ; CHECK-NEXT:    vpsubusb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd8,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %b = load <32 x i8>, <32 x i8>* %ptr_b
-  %cmp = icmp ugt <32 x i8> %a, %b
-  %sel = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
-  %sub = sub <32 x i8> %sel, %b
+  %sub = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
   %bc = bitcast i32 %mask to <32 x i1>
   %res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> zeroinitializer
   ret <32 x i8> %res
diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
index 21231b0..bbc9799 100644
--- a/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
@@ -7348,6 +7348,338 @@
 
 declare <16 x i16> @llvm.x86.avx512.mask.padds.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
 
+declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) #0
+
+define <8 x i16> @test_test_subs_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
+; X86-LABEL: test_test_subs_epi16_rrk_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpsubsw %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe9,0xd1]
+; X86-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi16_rrk_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsubsw %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe9,0xd1]
+; X64-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+  ret <8 x i16> %3
+}
+
+define <8 x i16> @test_test_subs_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
+; X86-LABEL: test_test_subs_epi16_rrkz_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe9,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi16_rrkz_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe9,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+  ret <8 x i16> %3
+}
+
+define <8 x i16> @test_test_subs_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
+; X86-LABEL: test_test_subs_epi16_rmk_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vpsubsw (%eax), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe9,0x08]
+; X86-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi16_rmk_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpsubsw (%rdi), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe9,0x0f]
+; X64-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <8 x i16>, <8 x i16>* %ptr_b
+  %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+  ret <8 x i16> %3
+}
+
+define <8 x i16> @test_test_subs_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
+; X86-LABEL: test_test_subs_epi16_rmkz_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vpsubsw (%eax), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe9,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi16_rmkz_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpsubsw (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe9,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <8 x i16>, <8 x i16>* %ptr_b
+  %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+  ret <8 x i16> %3
+}
+
+declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) #0
+
+define <16 x i16> @test_test_subs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
+; X86-LABEL: test_test_subs_epi16_rrk_256:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpsubsw %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe9,0xd1]
+; X86-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi16_rrk_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsubsw %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe9,0xd1]
+; X64-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+  ret <16 x i16> %3
+}
+
+define <16 x i16> @test_test_subs_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
+; X86-LABEL: test_test_subs_epi16_rrkz_256:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpsubsw %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi16_rrkz_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsubsw %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+  ret <16 x i16> %3
+}
+
+define <16 x i16> @test_test_subs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
+; X86-LABEL: test_test_subs_epi16_rmk_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpsubsw (%eax), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe9,0x08]
+; X86-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi16_rmk_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpsubsw (%rdi), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe9,0x0f]
+; X64-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <16 x i16>, <16 x i16>* %ptr_b
+  %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+  ret <16 x i16> %3
+}
+
+define <16 x i16> @test_test_subs_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
+; X86-LABEL: test_test_subs_epi16_rmkz_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpsubsw (%eax), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi16_rmkz_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpsubsw (%rdi), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <16 x i16>, <16 x i16>* %ptr_b
+  %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+  ret <16 x i16> %3
+}
+
+declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) #0
+
+define <16 x i8> @test_test_subs_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
+; X86-LABEL: test_test_subs_epi8_rrk_128:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpsubsb %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe8,0xd1]
+; X86-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi8_rrk_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsubsb %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe8,0xd1]
+; X64-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
+  ret <16 x i8> %3
+}
+
+define <16 x i8> @test_test_subs_epi8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
+; X86-LABEL: test_test_subs_epi8_rrkz_128:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe8,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi8_rrkz_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe8,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
+  ret <16 x i8> %3
+}
+
+define <16 x i8> @test_test_subs_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
+; X86-LABEL: test_test_subs_epi8_rmk_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpsubsb (%eax), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe8,0x08]
+; X86-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi8_rmk_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpsubsb (%rdi), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe8,0x0f]
+; X64-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <16 x i8>, <16 x i8>* %ptr_b
+  %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
+  ret <16 x i8> %3
+}
+
+define <16 x i8> @test_test_subs_epi8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
+; X86-LABEL: test_test_subs_epi8_rmkz_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpsubsb (%eax), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe8,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi8_rmkz_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpsubsb (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe8,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <16 x i8>, <16 x i8>* %ptr_b
+  %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
+  ret <16 x i8> %3
+}
+
+declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) #0
+
+define <32 x i8> @test_test_subs_epi8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
+; X86-LABEL: test_test_subs_epi8_rrk_256:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpsubsb %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe8,0xd1]
+; X86-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi8_rrk_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsubsb %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe8,0xd1]
+; X64-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
+  ret <32 x i8> %3
+}
+
+define <32 x i8> @test_test_subs_epi8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
+; X86-LABEL: test_test_subs_epi8_rrkz_256:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpsubsb %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe8,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi8_rrkz_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsubsb %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe8,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
+  ret <32 x i8> %3
+}
+
+define <32 x i8> @test_test_subs_epi8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
+; X86-LABEL: test_test_subs_epi8_rmk_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpsubsb (%eax), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe8,0x08]
+; X86-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi8_rmk_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpsubsb (%rdi), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe8,0x0f]
+; X64-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <32 x i8>, <32 x i8>* %ptr_b
+  %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
+  ret <32 x i8> %3
+}
+
+define <32 x i8> @test_test_subs_epi8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, i32 %mask) {
+; X86-LABEL: test_test_subs_epi8_rmkz_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpsubsb (%eax), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe8,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_test_subs_epi8_rmkz_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpsubsb (%rdi), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe8,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <32 x i8>, <32 x i8>* %ptr_b
+  %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
+  ret <32 x i8> %3
+}
+
 define <8 x i16> @test_mask_subs_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-LABEL: test_mask_subs_epi16_rr_128:
 ; CHECK:       # %bb.0:
@@ -7550,6 +7882,354 @@
 
 declare <16 x i16> @llvm.x86.avx512.mask.psubs.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
 
+declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) #0
+
+define <8 x i16> @test_adds_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
+; X86-LABEL: test_adds_epi16_rrk_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpaddsw %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xed,0xd1]
+; X86-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi16_rrk_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpaddsw %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xed,0xd1]
+; X64-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+  ret <8 x i16> %3
+}
+
+define <8 x i16> @test_adds_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
+; X86-LABEL: test_adds_epi16_rrkz_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xed,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi16_rrkz_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xed,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+  ret <8 x i16> %3
+}
+
+define <8 x i16> @test_adds_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
+; X86-LABEL: test_adds_epi16_rmk_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vpaddsw (%eax), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xed,0x08]
+; X86-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi16_rmk_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpaddsw (%rdi), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xed,0x0f]
+; X64-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <8 x i16>, <8 x i16>* %ptr_b
+  %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+  ret <8 x i16> %3
+}
+
+define <8 x i16> @test_adds_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
+; X86-LABEL: test_adds_epi16_rmkz_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vpaddsw (%eax), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xed,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi16_rmkz_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpaddsw (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xed,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <8 x i16>, <8 x i16>* %ptr_b
+  %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
+  %2 = bitcast i8 %mask to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+  ret <8 x i16> %3
+}
+
+declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) #0
+
+define <16 x i16> @test_adds_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
+; X86-LABEL: test_adds_epi16_rrk_256:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpaddsw %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xed,0xd1]
+; X86-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi16_rrk_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpaddsw %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xed,0xd1]
+; X64-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+  ret <16 x i16> %3
+}
+
+define <16 x i16> @test_adds_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
+; X86-LABEL: test_adds_epi16_rrkz_256:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpaddsw %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xed,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi16_rrkz_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpaddsw %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xed,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+  ret <16 x i16> %3
+}
+
+define <16 x i16> @test_adds_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
+; X86-LABEL: test_adds_epi16_rmk_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpaddsw (%eax), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xed,0x08]
+; X86-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi16_rmk_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpaddsw (%rdi), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xed,0x0f]
+; X64-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <16 x i16>, <16 x i16>* %ptr_b
+  %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+  ret <16 x i16> %3
+}
+
+define <16 x i16> @test_adds_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
+; X86-LABEL: test_adds_epi16_rmkz_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpaddsw (%eax), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xed,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi16_rmkz_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpaddsw (%rdi), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xed,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <16 x i16>, <16 x i16>* %ptr_b
+  %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a, <16 x i16> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+  ret <16 x i16> %3
+}
+
+declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) #0
+
+define <16 x i8> @test_adds_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
+; X86-LABEL: test_adds_epi8_rrk_128:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpaddsb %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xec,0xd1]
+; X86-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi8_rrk_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpaddsb %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xec,0xd1]
+; X64-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
+  ret <16 x i8> %3
+}
+
+define <16 x i8> @test_adds_epi8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
+; X86-LABEL: test_adds_epi8_rrkz_128:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xec,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi8_rrkz_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xec,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
+  ret <16 x i8> %3
+}
+
+define <16 x i8> @test_adds_epi8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
+; X86-LABEL: test_adds_epi8_rm_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpaddsb (%eax), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi8_rm_128:
+; X64:       # %bb.0:
+; X64-NEXT:    vpaddsb (%rdi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <16 x i8>, <16 x i8>* %ptr_b
+  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a, <16 x i8> %b)
+  ret <16 x i8> %1
+}
+
+define <16 x i8> @test_adds_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
+; X86-LABEL: test_adds_epi8_rmk_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpaddsb (%eax), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xec,0x08]
+; X86-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi8_rmk_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpaddsb (%rdi), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xec,0x0f]
+; X64-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <16 x i8>, <16 x i8>* %ptr_b
+  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
+  ret <16 x i8> %3
+}
+
+define <16 x i8> @test_adds_epi8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
+; X86-LABEL: test_adds_epi8_rmkz_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpaddsb (%eax), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xec,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi8_rmkz_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpaddsb (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xec,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <16 x i8>, <16 x i8>* %ptr_b
+  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a, <16 x i8> %b)
+  %2 = bitcast i16 %mask to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
+  ret <16 x i8> %3
+}
+
+declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) #0
+
+define <32 x i8> @test_adds_epi8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
+; X86-LABEL: test_adds_epi8_rrk_256:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpaddsb %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xec,0xd1]
+; X86-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi8_rrk_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpaddsb %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xec,0xd1]
+; X64-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
+  ret <32 x i8> %3
+}
+
+define <32 x i8> @test_adds_epi8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
+; X86-LABEL: test_adds_epi8_rrkz_256:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpaddsb %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xec,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi8_rrkz_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpaddsb %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xec,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
+  ret <32 x i8> %3
+}
+
+define <32 x i8> @test_adds_epi8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
+; X86-LABEL: test_adds_epi8_rmk_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpaddsb (%eax), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xec,0x08]
+; X86-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi8_rmk_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpaddsb (%rdi), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xec,0x0f]
+; X64-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <32 x i8>, <32 x i8>* %ptr_b
+  %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
+  ret <32 x i8> %3
+}
+
+define <32 x i8> @test_adds_epi8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, i32 %mask) {
+; X86-LABEL: test_adds_epi8_rmkz_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vpaddsb (%eax), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xec,0x00]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_adds_epi8_rmkz_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vpaddsb (%rdi), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xec,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %b = load <32 x i8>, <32 x i8>* %ptr_b
+  %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a, <32 x i8> %b)
+  %2 = bitcast i32 %mask to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
+  ret <32 x i8> %3
+}
+
 define <16 x i8> @test_mask_adds_epi8_rr_128(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-LABEL: test_mask_adds_epi8_rr_128:
 ; CHECK:       # %bb.0:
@@ -7946,3 +8626,185 @@
 
 declare <32 x i8> @llvm.x86.avx512.mask.psubs.b.256(<32 x i8>, <32 x i8>, <32 x i8>, i32)
 
+declare <16 x i16> @llvm.x86.avx512.mask.psrav16.hi(<16 x i16>, <16 x i16>, <16 x i16>, i16)
+
+define <16 x i16>@test_int_x86_avx512_mask_psrav16_hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_psrav16_hi:
+; X86:       # %bb.0:
+; X86-NEXT:    vpsravw %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x11,0xd9]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpsravw %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x11,0xd1]
+; X86-NEXT:    vpsravw %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x11,0xc1]
+; X86-NEXT:    vpaddw %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc3]
+; X86-NEXT:    vpaddw %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_psrav16_hi:
+; X64:       # %bb.0:
+; X64-NEXT:    vpsravw %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x11,0xd9]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsravw %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x11,0xd1]
+; X64-NEXT:    vpsravw %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x11,0xc1]
+; X64-NEXT:    vpaddw %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc3]
+; X64-NEXT:    vpaddw %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <16 x i16> @llvm.x86.avx512.mask.psrav16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
+  %res1 = call <16 x i16> @llvm.x86.avx512.mask.psrav16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> zeroinitializer, i16 %x3)
+  %res2 = call <16 x i16> @llvm.x86.avx512.mask.psrav16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 -1)
+  %res3 = add <16 x i16> %res, %res1
+  %res4 = add <16 x i16> %res3, %res2
+  ret <16 x i16> %res4
+}
+
+declare <8 x i16> @llvm.x86.avx512.mask.psrav8.hi(<8 x i16>, <8 x i16>, <8 x i16>, i8)
+
+define <8 x i16>@test_int_x86_avx512_mask_psrav8_hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_psrav8_hi:
+; X86:       # %bb.0:
+; X86-NEXT:    vpsravw %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x11,0xd9]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpsravw %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x11,0xd1]
+; X86-NEXT:    vpsravw %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x11,0xc1]
+; X86-NEXT:    vpaddw %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc3]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_psrav8_hi:
+; X64:       # %bb.0:
+; X64-NEXT:    vpsravw %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x11,0xd9]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsravw %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x11,0xd1]
+; X64-NEXT:    vpsravw %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x11,0xc1]
+; X64-NEXT:    vpaddw %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc3]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <8 x i16> @llvm.x86.avx512.mask.psrav8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
+  %res1 = call <8 x i16> @llvm.x86.avx512.mask.psrav8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> zeroinitializer, i8 %x3)
+  %res2 = call <8 x i16> @llvm.x86.avx512.mask.psrav8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1)
+  %res3 = add <8 x i16> %res, %res1
+  %res4 = add <8 x i16> %res3, %res2
+  ret <8 x i16> %res4
+}
+
+declare <16 x i16> @llvm.x86.avx512.mask.psllv16.hi(<16 x i16>, <16 x i16>, <16 x i16>, i16)
+
+define <16 x i16>@test_int_x86_avx512_mask_psllv16_hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_psllv16_hi:
+; X86:       # %bb.0:
+; X86-NEXT:    vpsllvw %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x12,0xd9]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpsllvw %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x12,0xd1]
+; X86-NEXT:    vpsllvw %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x12,0xc1]
+; X86-NEXT:    vpaddw %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc3]
+; X86-NEXT:    vpaddw %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_psllv16_hi:
+; X64:       # %bb.0:
+; X64-NEXT:    vpsllvw %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x12,0xd9]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsllvw %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x12,0xd1]
+; X64-NEXT:    vpsllvw %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x12,0xc1]
+; X64-NEXT:    vpaddw %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc3]
+; X64-NEXT:    vpaddw %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <16 x i16> @llvm.x86.avx512.mask.psllv16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
+  %res1 = call <16 x i16> @llvm.x86.avx512.mask.psllv16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> zeroinitializer, i16 %x3)
+  %res2 = call <16 x i16> @llvm.x86.avx512.mask.psllv16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 -1)
+  %res3 = add <16 x i16> %res, %res1
+  %res4 = add <16 x i16> %res3, %res2
+  ret <16 x i16> %res4
+}
+
+declare <8 x i16> @llvm.x86.avx512.mask.psllv8.hi(<8 x i16>, <8 x i16>, <8 x i16>, i8)
+
+define <8 x i16>@test_int_x86_avx512_mask_psllv8_hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_psllv8_hi:
+; X86:       # %bb.0:
+; X86-NEXT:    vpsllvw %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x12,0xd9]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpsllvw %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x12,0xd1]
+; X86-NEXT:    vpsllvw %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x12,0xc1]
+; X86-NEXT:    vpaddw %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc3]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_psllv8_hi:
+; X64:       # %bb.0:
+; X64-NEXT:    vpsllvw %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x12,0xd9]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsllvw %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x12,0xd1]
+; X64-NEXT:    vpsllvw %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x12,0xc1]
+; X64-NEXT:    vpaddw %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc3]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <8 x i16> @llvm.x86.avx512.mask.psllv8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
+  %res1 = call <8 x i16> @llvm.x86.avx512.mask.psllv8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> zeroinitializer, i8 %x3)
+  %res2 = call <8 x i16> @llvm.x86.avx512.mask.psllv8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1)
+  %res3 = add <8 x i16> %res, %res1
+  %res4 = add <8 x i16> %res3, %res2
+  ret <8 x i16> %res4
+}
+
+declare <16 x i16> @llvm.x86.avx512.mask.psrlv16.hi(<16 x i16>, <16 x i16>, <16 x i16>, i16)
+
+define <16 x i16>@test_int_x86_avx512_mask_psrlv16_hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_psrlv16_hi:
+; X86:       # %bb.0:
+; X86-NEXT:    vpsrlvw %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x10,0xd9]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpsrlvw %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x10,0xd1]
+; X86-NEXT:    vpsrlvw %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x10,0xc1]
+; X86-NEXT:    vpaddw %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc3]
+; X86-NEXT:    vpaddw %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_psrlv16_hi:
+; X64:       # %bb.0:
+; X64-NEXT:    vpsrlvw %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x10,0xd9]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsrlvw %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x10,0xd1]
+; X64-NEXT:    vpsrlvw %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x10,0xc1]
+; X64-NEXT:    vpaddw %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc3]
+; X64-NEXT:    vpaddw %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <16 x i16> @llvm.x86.avx512.mask.psrlv16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
+  %res1 = call <16 x i16> @llvm.x86.avx512.mask.psrlv16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> zeroinitializer, i16 %x3)
+  %res2 = call <16 x i16> @llvm.x86.avx512.mask.psrlv16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 -1)
+  %res3 = add <16 x i16> %res, %res1
+  %res4 = add <16 x i16> %res3, %res2
+  ret <16 x i16> %res4
+}
+
+declare <8 x i16> @llvm.x86.avx512.mask.psrlv8.hi(<8 x i16>, <8 x i16>, <8 x i16>, i8)
+
+define <8 x i16>@test_int_x86_avx512_mask_psrlv8_hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_psrlv8_hi:
+; X86:       # %bb.0:
+; X86-NEXT:    vpsrlvw %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x10,0xd9]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpsrlvw %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x10,0xd1]
+; X86-NEXT:    vpsrlvw %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x10,0xc1]
+; X86-NEXT:    vpaddw %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc3]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_psrlv8_hi:
+; X64:       # %bb.0:
+; X64-NEXT:    vpsrlvw %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x10,0xd9]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpsrlvw %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x10,0xd1]
+; X64-NEXT:    vpsrlvw %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x10,0xc1]
+; X64-NEXT:    vpaddw %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc3]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <8 x i16> @llvm.x86.avx512.mask.psrlv8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
+  %res1 = call <8 x i16> @llvm.x86.avx512.mask.psrlv8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> zeroinitializer, i8 %x3)
+  %res2 = call <8 x i16> @llvm.x86.avx512.mask.psrlv8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1)
+  %res3 = add <8 x i16> %res, %res1
+  %res4 = add <8 x i16> %res3, %res2
+  ret <8 x i16> %res4
+}
diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics.ll b/test/CodeGen/X86/avx512bwvl-intrinsics.ll
index 568cdec..10ba0e6 100644
--- a/test/CodeGen/X86/avx512bwvl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512bwvl-intrinsics.ll
@@ -1127,686 +1127,6 @@
 
 declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>)
 
-declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) #0
-
-define <8 x i16> @test_mask_adds_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
-; X86-LABEL: test_mask_adds_epi16_rrk_128:
-; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
-; X86-NEXT:    vpaddsw %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xed,0xd1]
-; X86-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi16_rrk_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpaddsw %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xed,0xd1]
-; X64-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
-  %2 = bitcast i8 %mask to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
-  ret <8 x i16> %3
-}
-
-define <8 x i16> @test_mask_adds_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
-; X86-LABEL: test_mask_adds_epi16_rrkz_128:
-; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
-; X86-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xed,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi16_rrkz_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xed,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
-  %2 = bitcast i8 %mask to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
-  ret <8 x i16> %3
-}
-
-define <8 x i16> @test_mask_adds_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
-; X86-LABEL: test_mask_adds_epi16_rmk_128:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
-; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
-; X86-NEXT:    vpaddsw (%eax), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xed,0x08]
-; X86-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi16_rmk_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpaddsw (%rdi), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xed,0x0f]
-; X64-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <8 x i16>, <8 x i16>* %ptr_b
-  %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
-  %2 = bitcast i8 %mask to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
-  ret <8 x i16> %3
-}
-
-define <8 x i16> @test_mask_adds_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
-; X86-LABEL: test_mask_adds_epi16_rmkz_128:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
-; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
-; X86-NEXT:    vpaddsw (%eax), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xed,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi16_rmkz_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpaddsw (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xed,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <8 x i16>, <8 x i16>* %ptr_b
-  %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
-  %2 = bitcast i8 %mask to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
-  ret <8 x i16> %3
-}
-
-declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) #0
-
-define <16 x i16> @test_mask_adds_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
-; X86-LABEL: test_mask_adds_epi16_rrk_256:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpaddsw %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xed,0xd1]
-; X86-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi16_rrk_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpaddsw %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xed,0xd1]
-; X64-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a, <16 x i16> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
-  ret <16 x i16> %3
-}
-
-define <16 x i16> @test_mask_adds_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
-; X86-LABEL: test_mask_adds_epi16_rrkz_256:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpaddsw %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xed,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi16_rrkz_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpaddsw %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xed,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a, <16 x i16> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
-  ret <16 x i16> %3
-}
-
-define <16 x i16> @test_mask_adds_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
-; X86-LABEL: test_mask_adds_epi16_rmk_256:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpaddsw (%eax), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xed,0x08]
-; X86-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi16_rmk_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpaddsw (%rdi), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xed,0x0f]
-; X64-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <16 x i16>, <16 x i16>* %ptr_b
-  %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a, <16 x i16> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
-  ret <16 x i16> %3
-}
-
-define <16 x i16> @test_mask_adds_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
-; X86-LABEL: test_mask_adds_epi16_rmkz_256:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpaddsw (%eax), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xed,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi16_rmkz_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpaddsw (%rdi), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xed,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <16 x i16>, <16 x i16>* %ptr_b
-  %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a, <16 x i16> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
-  ret <16 x i16> %3
-}
-
-declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) #0
-
-define <8 x i16> @test_mask_subs_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
-; X86-LABEL: test_mask_subs_epi16_rrk_128:
-; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
-; X86-NEXT:    vpsubsw %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe9,0xd1]
-; X86-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi16_rrk_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpsubsw %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe9,0xd1]
-; X64-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a, <8 x i16> %b)
-  %2 = bitcast i8 %mask to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
-  ret <8 x i16> %3
-}
-
-define <8 x i16> @test_mask_subs_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
-; X86-LABEL: test_mask_subs_epi16_rrkz_128:
-; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
-; X86-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe9,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi16_rrkz_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe9,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a, <8 x i16> %b)
-  %2 = bitcast i8 %mask to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
-  ret <8 x i16> %3
-}
-
-define <8 x i16> @test_mask_subs_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
-; X86-LABEL: test_mask_subs_epi16_rmk_128:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
-; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
-; X86-NEXT:    vpsubsw (%eax), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe9,0x08]
-; X86-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi16_rmk_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpsubsw (%rdi), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe9,0x0f]
-; X64-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <8 x i16>, <8 x i16>* %ptr_b
-  %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a, <8 x i16> %b)
-  %2 = bitcast i8 %mask to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
-  ret <8 x i16> %3
-}
-
-define <8 x i16> @test_mask_subs_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
-; X86-LABEL: test_mask_subs_epi16_rmkz_128:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
-; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
-; X86-NEXT:    vpsubsw (%eax), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe9,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi16_rmkz_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpsubsw (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe9,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <8 x i16>, <8 x i16>* %ptr_b
-  %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a, <8 x i16> %b)
-  %2 = bitcast i8 %mask to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
-  ret <8 x i16> %3
-}
-
-declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) #0
-
-define <16 x i16> @test_mask_subs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
-; X86-LABEL: test_mask_subs_epi16_rrk_256:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpsubsw %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe9,0xd1]
-; X86-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi16_rrk_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpsubsw %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe9,0xd1]
-; X64-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a, <16 x i16> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
-  ret <16 x i16> %3
-}
-
-define <16 x i16> @test_mask_subs_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
-; X86-LABEL: test_mask_subs_epi16_rrkz_256:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpsubsw %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi16_rrkz_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpsubsw %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a, <16 x i16> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
-  ret <16 x i16> %3
-}
-
-define <16 x i16> @test_mask_subs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
-; X86-LABEL: test_mask_subs_epi16_rmk_256:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpsubsw (%eax), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe9,0x08]
-; X86-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi16_rmk_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpsubsw (%rdi), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe9,0x0f]
-; X64-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <16 x i16>, <16 x i16>* %ptr_b
-  %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a, <16 x i16> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
-  ret <16 x i16> %3
-}
-
-define <16 x i16> @test_mask_subs_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
-; X86-LABEL: test_mask_subs_epi16_rmkz_256:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpsubsw (%eax), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi16_rmkz_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpsubsw (%rdi), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <16 x i16>, <16 x i16>* %ptr_b
-  %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a, <16 x i16> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
-  ret <16 x i16> %3
-}
-
-declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) #0
-
-define <16 x i8> @test_mask_adds_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
-; X86-LABEL: test_mask_adds_epi8_rrk_128:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpaddsb %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xec,0xd1]
-; X86-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi8_rrk_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpaddsb %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xec,0xd1]
-; X64-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a, <16 x i8> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
-  ret <16 x i8> %3
-}
-
-define <16 x i8> @test_mask_adds_epi8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
-; X86-LABEL: test_mask_adds_epi8_rrkz_128:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xec,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi8_rrkz_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xec,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a, <16 x i8> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
-  ret <16 x i8> %3
-}
-
-define <16 x i8> @test_mask_adds_epi8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
-; X86-LABEL: test_mask_adds_epi8_rm_128:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    vpaddsb (%eax), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi8_rm_128:
-; X64:       # %bb.0:
-; X64-NEXT:    vpaddsb (%rdi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <16 x i8>, <16 x i8>* %ptr_b
-  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a, <16 x i8> %b)
-  ret <16 x i8> %1
-}
-
-define <16 x i8> @test_mask_adds_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
-; X86-LABEL: test_mask_adds_epi8_rmk_128:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpaddsb (%eax), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xec,0x08]
-; X86-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi8_rmk_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpaddsb (%rdi), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xec,0x0f]
-; X64-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <16 x i8>, <16 x i8>* %ptr_b
-  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a, <16 x i8> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
-  ret <16 x i8> %3
-}
-
-define <16 x i8> @test_mask_adds_epi8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
-; X86-LABEL: test_mask_adds_epi8_rmkz_128:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpaddsb (%eax), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xec,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi8_rmkz_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpaddsb (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xec,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <16 x i8>, <16 x i8>* %ptr_b
-  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a, <16 x i8> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
-  ret <16 x i8> %3
-}
-
-declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) #0
-
-define <32 x i8> @test_mask_adds_epi8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
-; X86-LABEL: test_mask_adds_epi8_rrk_256:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpaddsb %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xec,0xd1]
-; X86-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi8_rrk_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpaddsb %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xec,0xd1]
-; X64-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a, <32 x i8> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
-  ret <32 x i8> %3
-}
-
-define <32 x i8> @test_mask_adds_epi8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
-; X86-LABEL: test_mask_adds_epi8_rrkz_256:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpaddsb %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xec,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi8_rrkz_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpaddsb %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xec,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a, <32 x i8> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
-  ret <32 x i8> %3
-}
-
-define <32 x i8> @test_mask_adds_epi8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
-; X86-LABEL: test_mask_adds_epi8_rmk_256:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpaddsb (%eax), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xec,0x08]
-; X86-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi8_rmk_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpaddsb (%rdi), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xec,0x0f]
-; X64-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <32 x i8>, <32 x i8>* %ptr_b
-  %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a, <32 x i8> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
-  ret <32 x i8> %3
-}
-
-define <32 x i8> @test_mask_adds_epi8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, i32 %mask) {
-; X86-LABEL: test_mask_adds_epi8_rmkz_256:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpaddsb (%eax), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xec,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_adds_epi8_rmkz_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpaddsb (%rdi), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xec,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <32 x i8>, <32 x i8>* %ptr_b
-  %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a, <32 x i8> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
-  ret <32 x i8> %3
-}
-
-declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) #0
-
-define <16 x i8> @test_mask_subs_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
-; X86-LABEL: test_mask_subs_epi8_rrk_128:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpsubsb %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe8,0xd1]
-; X86-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi8_rrk_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpsubsb %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe8,0xd1]
-; X64-NEXT:    vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a, <16 x i8> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
-  ret <16 x i8> %3
-}
-
-define <16 x i8> @test_mask_subs_epi8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
-; X86-LABEL: test_mask_subs_epi8_rrkz_128:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe8,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi8_rrkz_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe8,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a, <16 x i8> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
-  ret <16 x i8> %3
-}
-
-define <16 x i8> @test_mask_subs_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
-; X86-LABEL: test_mask_subs_epi8_rmk_128:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpsubsb (%eax), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe8,0x08]
-; X86-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi8_rmk_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpsubsb (%rdi), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0xe8,0x0f]
-; X64-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <16 x i8>, <16 x i8>* %ptr_b
-  %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a, <16 x i8> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
-  ret <16 x i8> %3
-}
-
-define <16 x i8> @test_mask_subs_epi8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
-; X86-LABEL: test_mask_subs_epi8_rmkz_128:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpsubsb (%eax), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe8,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi8_rmkz_128:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpsubsb (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0xe8,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <16 x i8>, <16 x i8>* %ptr_b
-  %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a, <16 x i8> %b)
-  %2 = bitcast i16 %mask to <16 x i1>
-  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
-  ret <16 x i8> %3
-}
-
-declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) #0
-
-define <32 x i8> @test_mask_subs_epi8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
-; X86-LABEL: test_mask_subs_epi8_rrk_256:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpsubsb %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe8,0xd1]
-; X86-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi8_rrk_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpsubsb %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe8,0xd1]
-; X64-NEXT:    vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a, <32 x i8> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
-  ret <32 x i8> %3
-}
-
-define <32 x i8> @test_mask_subs_epi8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
-; X86-LABEL: test_mask_subs_epi8_rrkz_256:
-; X86:       # %bb.0:
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpsubsb %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe8,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi8_rrkz_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpsubsb %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe8,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a, <32 x i8> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
-  ret <32 x i8> %3
-}
-
-define <32 x i8> @test_mask_subs_epi8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
-; X86-LABEL: test_mask_subs_epi8_rmk_256:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpsubsb (%eax), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe8,0x08]
-; X86-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi8_rmk_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpsubsb (%rdi), %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x29,0xe8,0x0f]
-; X64-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <32 x i8>, <32 x i8>* %ptr_b
-  %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a, <32 x i8> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
-  ret <32 x i8> %3
-}
-
-define <32 x i8> @test_mask_subs_epi8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, i32 %mask) {
-; X86-LABEL: test_mask_subs_epi8_rmkz_256:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    vpsubsb (%eax), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe8,0x00]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_mask_subs_epi8_rmkz_256:
-; X64:       # %bb.0:
-; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
-; X64-NEXT:    vpsubsb (%rdi), %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0xe8,0x07]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %b = load <32 x i8>, <32 x i8>* %ptr_b
-  %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a, <32 x i8> %b)
-  %2 = bitcast i32 %mask to <32 x i1>
-  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
-  ret <32 x i8> %3
-}
-
 define <8 x i16>@test_int_x86_avx512_mask_vpermt2var_hi_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpermt2var_hi_128:
 ; X86:       # %bb.0:
@@ -2134,22 +1454,23 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmov_wb_128(<8 x i16> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmov_wb_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovwb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x30,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpmovwb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x30,0xc2]
 ; X86-NEXT:    vpmovwb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x30,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovwb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x30,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovwb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x30,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmov_wb_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpmovwb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x30,0xc2]
+; X64-NEXT:    vpmovwb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x30,0xc2]
 ; X64-NEXT:    vpmovwb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x30,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovwb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x30,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovwb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x30,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmov.wb.128(<8 x i16> %x0, <16 x i8> %x1, i8 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmov.wb.128(<8 x i16> %x0, <16 x i8> %x1, i8 %x2)
@@ -2187,22 +1508,23 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmovs_wb_128(<8 x i16> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmovs_wb_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovswb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x20,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpmovswb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x20,0xc2]
 ; X86-NEXT:    vpmovswb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x20,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovswb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x20,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovswb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x20,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovs_wb_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpmovswb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x20,0xc2]
+; X64-NEXT:    vpmovswb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x20,0xc2]
 ; X64-NEXT:    vpmovswb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x20,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovswb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x20,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovswb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x20,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.wb.128(<8 x i16> %x0, <16 x i8> %x1, i8 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.wb.128(<8 x i16> %x0, <16 x i8> %x1, i8 %x2)
@@ -2240,22 +1562,23 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmovus_wb_128(<8 x i16> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmovus_wb_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovuswb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x10,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpmovuswb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x10,0xc2]
 ; X86-NEXT:    vpmovuswb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x10,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovuswb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x10,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovuswb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x10,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovus_wb_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpmovuswb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x10,0xc2]
+; X64-NEXT:    vpmovuswb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x10,0xc2]
 ; X64-NEXT:    vpmovuswb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x10,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovuswb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x10,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovuswb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x10,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.wb.128(<8 x i16> %x0, <16 x i8> %x1, i8 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.wb.128(<8 x i16> %x0, <16 x i8> %x1, i8 %x2)
@@ -2694,7 +2017,48 @@
   ret <8 x i16> %res4
 }
 
-declare <16 x i16> @llvm.x86.avx512.mask.psrav16.hi(<16 x i16>, <16 x i16>, <16 x i16>, i16)
+
+define <8 x i16> @test_int_x86_avx512_psrlv_w_128_const() optsize {
+; X86-LABEL: test_int_x86_avx512_psrlv_w_128_const:
+; X86:       # %bb.0:
+; X86-NEXT:    vpbroadcastw {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,2,2,2,2,2,2,2]
+; X86-NEXT:    # encoding: [0xc4,0xe2,0x79,0x79,0x05,A,A,A,A]
+; X86-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_psrlv_w_128_const:
+; X64:       # %bb.0:
+; X64-NEXT:    vpbroadcastw {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,2,2,2,2,2,2,2]
+; X64-NEXT:    # encoding: [0xc4,0xe2,0x79,0x79,0x05,A,A,A,A]
+; X64-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
+  ret <8 x i16> %res
+}
+
+declare <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16>, <8 x i16>)
+
+define <16 x i16> @test_int_x86_avx512_psrlv_w_256_const() optsize {
+; X86-LABEL: test_int_x86_avx512_psrlv_w_256_const:
+; X86:       # %bb.0:
+; X86-NEXT:    vpbroadcastw {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
+; X86-NEXT:    # encoding: [0xc4,0xe2,0x7d,0x79,0x05,A,A,A,A]
+; X86-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_psrlv_w_256_const:
+; X64:       # %bb.0:
+; X64-NEXT:    vpbroadcastw {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
+; X64-NEXT:    # encoding: [0xc4,0xe2,0x7d,0x79,0x05,A,A,A,A]
+; X64-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <16 x i16> @llvm.x86.avx512.psrlv.w.256(<16 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
+  ret <16 x i16> %res
+}
+
+declare <16 x i16> @llvm.x86.avx512.psrlv.w.256(<16 x i16>, <16 x i16>)
+
+declare <16 x i16> @llvm.x86.avx512.psrav.w.256(<16 x i16>, <16 x i16>)
 
 define <16 x i16>@test_int_x86_avx512_mask_psrav16_hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_psrav16_hi:
@@ -2716,15 +2080,19 @@
 ; X64-NEXT:    vpaddw %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc3]
 ; X64-NEXT:    vpaddw %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <16 x i16> @llvm.x86.avx512.mask.psrav16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
-  %res1 = call <16 x i16> @llvm.x86.avx512.mask.psrav16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> zeroinitializer, i16 %x3)
-  %res2 = call <16 x i16> @llvm.x86.avx512.mask.psrav16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 -1)
-  %res3 = add <16 x i16> %res, %res1
-  %res4 = add <16 x i16> %res3, %res2
+  %1 = call <16 x i16> @llvm.x86.avx512.psrav.w.256(<16 x i16> %x0, <16 x i16> %x1)
+  %2 = bitcast i16 %x3 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %x2
+  %4 = call <16 x i16> @llvm.x86.avx512.psrav.w.256(<16 x i16> %x0, <16 x i16> %x1)
+  %5 = bitcast i16 %x3 to <16 x i1>
+  %6 = select <16 x i1> %5, <16 x i16> %4, <16 x i16> zeroinitializer
+  %7 = call <16 x i16> @llvm.x86.avx512.psrav.w.256(<16 x i16> %x0, <16 x i16> %x1)
+  %res3 = add <16 x i16> %3, %6
+  %res4 = add <16 x i16> %res3, %7
   ret <16 x i16> %res4
 }
 
-declare <8 x i16> @llvm.x86.avx512.mask.psrav8.hi(<8 x i16>, <8 x i16>, <8 x i16>, i8)
+declare <8 x i16> @llvm.x86.avx512.psrav.w.128(<8 x i16>, <8 x i16>)
 
 define <8 x i16>@test_int_x86_avx512_mask_psrav8_hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_psrav8_hi:
@@ -2747,16 +2115,18 @@
 ; X64-NEXT:    vpaddw %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc3]
 ; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <8 x i16> @llvm.x86.avx512.mask.psrav8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
-  %res1 = call <8 x i16> @llvm.x86.avx512.mask.psrav8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> zeroinitializer, i8 %x3)
-  %res2 = call <8 x i16> @llvm.x86.avx512.mask.psrav8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1)
-  %res3 = add <8 x i16> %res, %res1
-  %res4 = add <8 x i16> %res3, %res2
+  %1 = call <8 x i16> @llvm.x86.avx512.psrav.w.128(<8 x i16> %x0, <8 x i16> %x1)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %x2
+  %4 = call <8 x i16> @llvm.x86.avx512.psrav.w.128(<8 x i16> %x0, <8 x i16> %x1)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %6 = select <8 x i1> %5, <8 x i16> %4, <8 x i16> zeroinitializer
+  %7 = call <8 x i16> @llvm.x86.avx512.psrav.w.128(<8 x i16> %x0, <8 x i16> %x1)
+  %res3 = add <8 x i16> %3, %6
+  %res4 = add <8 x i16> %res3, %7
   ret <8 x i16> %res4
 }
 
-declare <16 x i16> @llvm.x86.avx512.mask.psllv16.hi(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-
 define <16 x i16>@test_int_x86_avx512_mask_psllv16_hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_psllv16_hi:
 ; X86:       # %bb.0:
@@ -2777,16 +2147,18 @@
 ; X64-NEXT:    vpaddw %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc3]
 ; X64-NEXT:    vpaddw %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <16 x i16> @llvm.x86.avx512.mask.psllv16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
-  %res1 = call <16 x i16> @llvm.x86.avx512.mask.psllv16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> zeroinitializer, i16 %x3)
-  %res2 = call <16 x i16> @llvm.x86.avx512.mask.psllv16.hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 -1)
-  %res3 = add <16 x i16> %res, %res1
-  %res4 = add <16 x i16> %res3, %res2
+  %1 = call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> %x0, <16 x i16> %x1)
+  %2 = bitcast i16 %x3 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %x2
+  %4 = call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> %x0, <16 x i16> %x1)
+  %5 = bitcast i16 %x3 to <16 x i1>
+  %6 = select <16 x i1> %5, <16 x i16> %4, <16 x i16> zeroinitializer
+  %7 = call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> %x0, <16 x i16> %x1)
+  %res3 = add <16 x i16> %3, %6
+  %res4 = add <16 x i16> %res3, %7
   ret <16 x i16> %res4
 }
 
-declare <8 x i16> @llvm.x86.avx512.mask.psllv8.hi(<8 x i16>, <8 x i16>, <8 x i16>, i8)
-
 define <8 x i16>@test_int_x86_avx512_mask_psllv8_hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_psllv8_hi:
 ; X86:       # %bb.0:
@@ -2808,14 +2180,61 @@
 ; X64-NEXT:    vpaddw %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc3]
 ; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <8 x i16> @llvm.x86.avx512.mask.psllv8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
-  %res1 = call <8 x i16> @llvm.x86.avx512.mask.psllv8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> zeroinitializer, i8 %x3)
-  %res2 = call <8 x i16> @llvm.x86.avx512.mask.psllv8.hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1)
-  %res3 = add <8 x i16> %res, %res1
-  %res4 = add <8 x i16> %res3, %res2
+  %1 = call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> %x0, <8 x i16> %x1)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %x2
+  %4 = call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> %x0, <8 x i16> %x1)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %6 = select <8 x i1> %5, <8 x i16> %4, <8 x i16> zeroinitializer
+  %7 = call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> %x0, <8 x i16> %x1)
+  %res3 = add <8 x i16> %3, %6
+  %res4 = add <8 x i16> %res3, %7
   ret <8 x i16> %res4
 }
 
+define <8 x i16> @test_int_x86_avx512_psllv_w_128_const() optsize {
+; X86-LABEL: test_int_x86_avx512_psllv_w_128_const:
+; X86:       # %bb.0:
+; X86-NEXT:    vpbroadcastw {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [8,8,8,8,8,8,8,8]
+; X86-NEXT:    # encoding: [0xc4,0xe2,0x79,0x79,0x05,A,A,A,A]
+; X86-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_psllv_w_128_const:
+; X64:       # %bb.0:
+; X64-NEXT:    vpbroadcastw {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [8,8,8,8,8,8,8,8]
+; X64-NEXT:    # encoding: [0xc4,0xe2,0x79,0x79,0x05,A,A,A,A]
+; X64-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
+  ret <8 x i16> %res
+}
+
+declare <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16>, <8 x i16>)
+
+
+define <16 x i16> @test_int_x86_avx512_psllv_w_256_const() optsize {
+; X86-LABEL: test_int_x86_avx512_psllv_w_256_const:
+; X86:       # %bb.0:
+; X86-NEXT:    vpbroadcastw {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; X86-NEXT:    # encoding: [0xc4,0xe2,0x7d,0x79,0x05,A,A,A,A]
+; X86-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_psllv_w_256_const:
+; X64:       # %bb.0:
+; X64-NEXT:    vpbroadcastw {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; X64-NEXT:    # encoding: [0xc4,0xe2,0x7d,0x79,0x05,A,A,A,A]
+; X64-NEXT:    # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
+  ret <16 x i16> %res
+}
+
+declare <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16>, <16 x i16>)
+
+
+
 declare <8 x i16> @llvm.x86.avx512.permvar.hi.128(<8 x i16>, <8 x i16>)
 
 define <8 x i16>@test_int_x86_avx512_mask_permvar_hi_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
diff --git a/test/CodeGen/X86/avx512vbmi-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512vbmi-intrinsics-fast-isel.ll
index 495f612..8eb9a7c 100644
--- a/test/CodeGen/X86/avx512vbmi-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx512vbmi-intrinsics-fast-isel.ll
@@ -96,3 +96,67 @@
 }
 
 declare <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8>, <64 x i8>, <64 x i8>)
+
+define <8 x i64> @test_mm512_mask_multishift_epi64_epi8(<8 x i64> %__W, i64 %__M, <8 x i64> %__X, <8 x i64> %__Y) {
+; X86-LABEL: test_mm512_mask_multishift_epi64_epi8:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k0
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    kunpckdq %k1, %k0, %k1
+; X86-NEXT:    vpmultishiftqb %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    retl
+;
+; X64-LABEL: test_mm512_mask_multishift_epi64_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovq %rdi, %k1
+; X64-NEXT:    vpmultishiftqb %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__X to <64 x i8>
+  %1 = bitcast <8 x i64> %__Y to <64 x i8>
+  %2 = tail call <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8> %0, <64 x i8> %1)
+  %3 = bitcast <8 x i64> %__W to <64 x i8>
+  %4 = bitcast i64 %__M to <64 x i1>
+  %5 = select <64 x i1> %4, <64 x i8> %2, <64 x i8> %3
+  %6 = bitcast <64 x i8> %5 to <8 x i64>
+  ret <8 x i64> %6
+}
+
+define <8 x i64> @test_mm512_maskz_multishift_epi64_epi8(i64 %__M, <8 x i64> %__X, <8 x i64> %__Y) {
+; X86-LABEL: test_mm512_maskz_multishift_epi64_epi8:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k0
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    kunpckdq %k1, %k0, %k1
+; X86-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
+;
+; X64-LABEL: test_mm512_maskz_multishift_epi64_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovq %rdi, %k1
+; X64-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <8 x i64> %__X to <64 x i8>
+  %1 = bitcast <8 x i64> %__Y to <64 x i8>
+  %2 = tail call <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8> %0, <64 x i8> %1)
+  %3 = bitcast i64 %__M to <64 x i1>
+  %4 = select <64 x i1> %3, <64 x i8> %2, <64 x i8> zeroinitializer
+  %5 = bitcast <64 x i8> %4 to <8 x i64>
+  ret <8 x i64> %5
+}
+
+define <8 x i64> @test_mm512_multishift_epi64_epi8(<8 x i64> %__X, <8 x i64> %__Y) {
+; CHECK-LABEL: test_mm512_multishift_epi64_epi8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
+entry:
+  %0 = bitcast <8 x i64> %__X to <64 x i8>
+  %1 = bitcast <8 x i64> %__Y to <64 x i8>
+  %2 = tail call <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8> %0, <64 x i8> %1)
+  %3 = bitcast <64 x i8> %2 to <8 x i64>
+  ret <8 x i64> %3
+}
+
+declare <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8>, <64 x i8>)
diff --git a/test/CodeGen/X86/avx512vbmi-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512vbmi-intrinsics-upgrade.ll
index a09cf09..8c6b982 100644
--- a/test/CodeGen/X86/avx512vbmi-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512vbmi-intrinsics-upgrade.ll
@@ -32,6 +32,36 @@
  ret <64 x i8> %res4
 }
 
+declare <64 x i8> @llvm.x86.avx512.mask.pmultishift.qb.512(<64 x i8>, <64 x i8>, <64 x i8>, i64)
+
+define <64 x i8>@test_int_x86_avx512_mask_pmultishift_qb_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_pmultishift_qb_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf2,0xfd,0x48,0x83,0xd9]
+; X86-NEXT:    kmovq {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0x83,0xd1]
+; X86-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0x83,0xc1]
+; X86-NEXT:    vpaddb %zmm3, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfc,0xc3]
+; X86-NEXT:    vpaddb %zmm0, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfc,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_pmultishift_qb_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf2,0xfd,0x48,0x83,0xd9]
+; X64-NEXT:    kmovq %rdi, %k1 # encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
+; X64-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0x83,0xd1]
+; X64-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0x83,0xc1]
+; X64-NEXT:    vpaddb %zmm3, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfc,0xc3]
+; X64-NEXT:    vpaddb %zmm0, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfc,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <64 x i8> @llvm.x86.avx512.mask.pmultishift.qb.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3)
+  %res1 = call <64 x i8> @llvm.x86.avx512.mask.pmultishift.qb.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> zeroinitializer, i64 %x3)
+  %res2 = call <64 x i8> @llvm.x86.avx512.mask.pmultishift.qb.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 -1)
+  %res3 = add <64 x i8> %res, %res1
+  %res4 = add <64 x i8> %res3, %res2
+  ret <64 x i8> %res4
+}
+
 declare <64 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.512(<64 x i8>, <64 x i8>, <64 x i8>, i64)
 
 define <64 x i8>@test_int_x86_avx512_mask_vpermi2var_qi_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
diff --git a/test/CodeGen/X86/avx512vbmi-intrinsics.ll b/test/CodeGen/X86/avx512vbmi-intrinsics.ll
index ffce664..23a7e2a 100644
--- a/test/CodeGen/X86/avx512vbmi-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vbmi-intrinsics.ll
@@ -36,15 +36,13 @@
   ret <64 x i8> %res4
 }
 
-declare <64 x i8> @llvm.x86.avx512.mask.pmultishift.qb.512(<64 x i8>, <64 x i8>, <64 x i8>, i64)
+declare <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8>, <64 x i8>)
 
 define <64 x i8>@test_int_x86_avx512_mask_pmultishift_qb_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmultishift_qb_512:
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf2,0xfd,0x48,0x83,0xd9]
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k0 # encoding: [0xc4,0xe1,0xf9,0x90,0x44,0x24,0x04]
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
-; X86-NEXT:    kunpckdq %k0, %k1, %k1 # encoding: [0xc4,0xe1,0xf4,0x4b,0xc8]
+; X86-NEXT:    kmovq {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf8,0x90,0x4c,0x24,0x04]
 ; X86-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0x83,0xd1]
 ; X86-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0x83,0xc1]
 ; X86-NEXT:    vpaddb %zmm3, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfc,0xc3]
@@ -53,18 +51,22 @@
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmultishift_qb_512:
 ; X64:       # %bb.0:
+; X64-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf2,0xfd,0x48,0x83,0xd9]
 ; X64-NEXT:    kmovq %rdi, %k1 # encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
 ; X64-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0x83,0xd1]
-; X64-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0x83,0xd9]
-; X64-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x83,0xc1]
-; X64-NEXT:    vpaddb %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfc,0xc0]
+; X64-NEXT:    vpmultishiftqb %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0x83,0xc1]
+; X64-NEXT:    vpaddb %zmm3, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfc,0xc3]
 ; X64-NEXT:    vpaddb %zmm0, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfc,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <64 x i8> @llvm.x86.avx512.mask.pmultishift.qb.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3)
-  %res1 = call <64 x i8> @llvm.x86.avx512.mask.pmultishift.qb.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> zeroinitializer, i64 %x3)
-  %res2 = call <64 x i8> @llvm.x86.avx512.mask.pmultishift.qb.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 -1)
-  %res3 = add <64 x i8> %res, %res1
-  %res4 = add <64 x i8> %res3, %res2
+  %1 = call <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8> %x0, <64 x i8> %x1)
+  %2 = bitcast i64 %x3 to <64 x i1>
+  %3 = select <64 x i1> %2, <64 x i8> %1, <64 x i8> %x2
+  %4 = call <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8> %x0, <64 x i8> %x1)
+  %5 = bitcast i64 %x3 to <64 x i1>
+  %6 = select <64 x i1> %5, <64 x i8> %4, <64 x i8> zeroinitializer
+  %7 = call <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8> %x0, <64 x i8> %x1)
+  %res3 = add <64 x i8> %3, %6
+  %res4 = add <64 x i8> %res3, %7
   ret <64 x i8> %res4
 }
 
diff --git a/test/CodeGen/X86/avx512vbmi2-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512vbmi2-intrinsics-fast-isel.ll
index 0a0837d..0ba766c 100644
--- a/test/CodeGen/X86/avx512vbmi2-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx512vbmi2-intrinsics-fast-isel.ll
@@ -308,22 +308,22 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshldq $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    vpshldq $47, %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_shldi_epi64:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldq $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    vpshldq $47, %zmm2, %zmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.vpshld.q.512(<8 x i64> %__A, <8 x i64> %__B, i32 127)
+  %0 = tail call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %__A, <8 x i64> %__B, <8 x i64> <i64 47, i64 47, i64 47, i64 47, i64 47, i64 47, i64 47, i64 47>)
   %1 = bitcast i8 %__U to <8 x i1>
   %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> %__S
   ret <8 x i64> %2
 }
 
-declare <8 x i64> @llvm.x86.avx512.vpshld.q.512(<8 x i64>, <8 x i64>, i32)
+declare <8 x i64> @llvm.fshl.v8i64(<8 x i64>, <8 x i64>, <8 x i64>)
 
 define <8 x i64> @test_mm512_maskz_shldi_epi64(i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_maskz_shldi_epi64:
@@ -339,7 +339,7 @@
 ; X64-NEXT:    vpshldq $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.vpshld.q.512(<8 x i64> %__A, <8 x i64> %__B, i32 63)
+  %0 = tail call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %__A, <8 x i64> %__B, <8 x i64> <i64 63, i64 63, i64 63, i64 63, i64 63, i64 63, i64 63, i64 63>)
   %1 = bitcast i8 %__U to <8 x i1>
   %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> zeroinitializer
   ret <8 x i64> %2
@@ -351,7 +351,7 @@
 ; CHECK-NEXT:    vpshldq $31, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.vpshld.q.512(<8 x i64> %__A, <8 x i64> %__B, i32 31)
+  %0 = tail call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %__A, <8 x i64> %__B, <8 x i64> <i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31>)
   ret <8 x i64> %0
 }
 
@@ -359,18 +359,18 @@
 ; X86-LABEL: test_mm512_mask_shldi_epi32:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    vpshldd $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    vpshldd $7, %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_shldi_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldd $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    vpshldd $7, %zmm2, %zmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
   %1 = bitcast <8 x i64> %__B to <16 x i32>
-  %2 = tail call <16 x i32> @llvm.x86.avx512.vpshld.d.512(<16 x i32> %0, <16 x i32> %1, i32 127)
+  %2 = tail call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %0, <16 x i32> %1, <16 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>)
   %3 = bitcast <8 x i64> %__S to <16 x i32>
   %4 = bitcast i16 %__U to <16 x i1>
   %5 = select <16 x i1> %4, <16 x i32> %2, <16 x i32> %3
@@ -378,24 +378,24 @@
   ret <8 x i64> %6
 }
 
-declare <16 x i32> @llvm.x86.avx512.vpshld.d.512(<16 x i32>, <16 x i32>, i32)
+declare <16 x i32> @llvm.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>)
 
 define <8 x i64> @test_mm512_maskz_shldi_epi32(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_maskz_shldi_epi32:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    vpshldd $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    vpshldd $15, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_shldi_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldd $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    vpshldd $15, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
   %1 = bitcast <8 x i64> %__B to <16 x i32>
-  %2 = tail call <16 x i32> @llvm.x86.avx512.vpshld.d.512(<16 x i32> %0, <16 x i32> %1, i32 63)
+  %2 = tail call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %0, <16 x i32> %1, <16 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>)
   %3 = bitcast i16 %__U to <16 x i1>
   %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> zeroinitializer
   %5 = bitcast <16 x i32> %4 to <8 x i64>
@@ -410,7 +410,7 @@
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
   %1 = bitcast <8 x i64> %__B to <16 x i32>
-  %2 = tail call <16 x i32> @llvm.x86.avx512.vpshld.d.512(<16 x i32> %0, <16 x i32> %1, i32 31)
+  %2 = tail call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %0, <16 x i32> %1, <16 x i32> <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>)
   %3 = bitcast <16 x i32> %2 to <8 x i64>
   ret <8 x i64> %3
 }
@@ -419,18 +419,18 @@
 ; X86-LABEL: test_mm512_mask_shldi_epi16:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    vpshldw $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    vpshldw $3, %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_shldi_epi16:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldw $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    vpshldw $3, %zmm2, %zmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <8 x i64> %__A to <32 x i16>
   %1 = bitcast <8 x i64> %__B to <32 x i16>
-  %2 = tail call <32 x i16> @llvm.x86.avx512.vpshld.w.512(<32 x i16> %0, <32 x i16> %1, i32 127)
+  %2 = tail call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %0, <32 x i16> %1, <32 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
   %3 = bitcast <8 x i64> %__S to <32 x i16>
   %4 = bitcast i32 %__U to <32 x i1>
   %5 = select <32 x i1> %4, <32 x i16> %2, <32 x i16> %3
@@ -438,24 +438,24 @@
   ret <8 x i64> %6
 }
 
-declare <32 x i16> @llvm.x86.avx512.vpshld.w.512(<32 x i16>, <32 x i16>, i32)
+declare <32 x i16> @llvm.fshl.v32i16(<32 x i16>, <32 x i16>, <32 x i16>)
 
 define <8 x i64> @test_mm512_maskz_shldi_epi16(i32 %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_maskz_shldi_epi16:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    vpshldw $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    vpshldw $7, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_shldi_epi16:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldw $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    vpshldw $7, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <8 x i64> %__A to <32 x i16>
   %1 = bitcast <8 x i64> %__B to <32 x i16>
-  %2 = tail call <32 x i16> @llvm.x86.avx512.vpshld.w.512(<32 x i16> %0, <32 x i16> %1, i32 63)
+  %2 = tail call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %0, <32 x i16> %1, <32 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
   %3 = bitcast i32 %__U to <32 x i1>
   %4 = select <32 x i1> %3, <32 x i16> %2, <32 x i16> zeroinitializer
   %5 = bitcast <32 x i16> %4 to <8 x i64>
@@ -465,12 +465,12 @@
 define <8 x i64> @test_mm512_shldi_epi16(<8 x i64> %__A, <8 x i64> %__B) {
 ; CHECK-LABEL: test_mm512_shldi_epi16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vpshldw $31, %zmm1, %zmm0, %zmm0
+; CHECK-NEXT:    vpshldw $15, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <8 x i64> %__A to <32 x i16>
   %1 = bitcast <8 x i64> %__B to <32 x i16>
-  %2 = tail call <32 x i16> @llvm.x86.avx512.vpshld.w.512(<32 x i16> %0, <32 x i16> %1, i32 31)
+  %2 = tail call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %0, <32 x i16> %1, <32 x i16> <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>)
   %3 = bitcast <32 x i16> %2 to <8 x i64>
   ret <8 x i64> %3
 }
@@ -480,22 +480,22 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshrdq $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    vpshrdq $47, %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_shrdi_epi64:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdq $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    vpshrdq $47, %zmm2, %zmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.vpshrd.q.512(<8 x i64> %__A, <8 x i64> %__B, i32 127)
+  %0 = tail call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %__B, <8 x i64> %__A, <8 x i64> <i64 47, i64 47, i64 47, i64 47, i64 47, i64 47, i64 47, i64 47>)
   %1 = bitcast i8 %__U to <8 x i1>
   %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> %__S
   ret <8 x i64> %2
 }
 
-declare <8 x i64> @llvm.x86.avx512.vpshrd.q.512(<8 x i64>, <8 x i64>, i32)
+declare <8 x i64> @llvm.fshr.v8i64(<8 x i64>, <8 x i64>, <8 x i64>)
 
 define <8 x i64> @test_mm512_maskz_shrdi_epi64(i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_maskz_shrdi_epi64:
@@ -511,7 +511,7 @@
 ; X64-NEXT:    vpshrdq $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.vpshrd.q.512(<8 x i64> %__A, <8 x i64> %__B, i32 63)
+  %0 = tail call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %__B, <8 x i64> %__A, <8 x i64> <i64 63, i64 63, i64 63, i64 63, i64 63, i64 63, i64 63, i64 63>)
   %1 = bitcast i8 %__U to <8 x i1>
   %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> zeroinitializer
   ret <8 x i64> %2
@@ -523,7 +523,7 @@
 ; CHECK-NEXT:    vpshrdq $31, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.vpshrd.q.512(<8 x i64> %__A, <8 x i64> %__B, i32 31)
+  %0 = tail call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %__B, <8 x i64> %__A, <8 x i64> <i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31>)
   ret <8 x i64> %0
 }
 
@@ -531,18 +531,18 @@
 ; X86-LABEL: test_mm512_mask_shrdi_epi32:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    vpshrdd $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    vpshrdd $7, %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_shrdi_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdd $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    vpshrdd $7, %zmm2, %zmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
   %1 = bitcast <8 x i64> %__B to <16 x i32>
-  %2 = tail call <16 x i32> @llvm.x86.avx512.vpshrd.d.512(<16 x i32> %0, <16 x i32> %1, i32 127)
+  %2 = tail call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %1, <16 x i32> %0, <16 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>)
   %3 = bitcast <8 x i64> %__S to <16 x i32>
   %4 = bitcast i16 %__U to <16 x i1>
   %5 = select <16 x i1> %4, <16 x i32> %2, <16 x i32> %3
@@ -550,24 +550,24 @@
   ret <8 x i64> %6
 }
 
-declare <16 x i32> @llvm.x86.avx512.vpshrd.d.512(<16 x i32>, <16 x i32>, i32)
+declare <16 x i32> @llvm.fshr.v16i32(<16 x i32>, <16 x i32>, <16 x i32>)
 
 define <8 x i64> @test_mm512_maskz_shrdi_epi32(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_maskz_shrdi_epi32:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    vpshrdd $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    vpshrdd $15, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_shrdi_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdd $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    vpshrdd $15, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
   %1 = bitcast <8 x i64> %__B to <16 x i32>
-  %2 = tail call <16 x i32> @llvm.x86.avx512.vpshrd.d.512(<16 x i32> %0, <16 x i32> %1, i32 63)
+  %2 = tail call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %1, <16 x i32> %0, <16 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>)
   %3 = bitcast i16 %__U to <16 x i1>
   %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> zeroinitializer
   %5 = bitcast <16 x i32> %4 to <8 x i64>
@@ -582,7 +582,7 @@
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
   %1 = bitcast <8 x i64> %__B to <16 x i32>
-  %2 = tail call <16 x i32> @llvm.x86.avx512.vpshrd.d.512(<16 x i32> %0, <16 x i32> %1, i32 31)
+  %2 = tail call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %1, <16 x i32> %0, <16 x i32> <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>)
   %3 = bitcast <16 x i32> %2 to <8 x i64>
   ret <8 x i64> %3
 }
@@ -591,18 +591,18 @@
 ; X86-LABEL: test_mm512_mask_shrdi_epi16:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    vpshrdw $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    vpshrdw $3, %zmm2, %zmm1, %zmm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_shrdi_epi16:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdw $127, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    vpshrdw $3, %zmm2, %zmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <8 x i64> %__A to <32 x i16>
   %1 = bitcast <8 x i64> %__B to <32 x i16>
-  %2 = tail call <32 x i16> @llvm.x86.avx512.vpshrd.w.512(<32 x i16> %0, <32 x i16> %1, i32 127)
+  %2 = tail call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %1, <32 x i16> %0, <32 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
   %3 = bitcast <8 x i64> %__S to <32 x i16>
   %4 = bitcast i32 %__U to <32 x i1>
   %5 = select <32 x i1> %4, <32 x i16> %2, <32 x i16> %3
@@ -610,24 +610,24 @@
   ret <8 x i64> %6
 }
 
-declare <32 x i16> @llvm.x86.avx512.vpshrd.w.512(<32 x i16>, <32 x i16>, i32)
+declare <32 x i16> @llvm.fshr.v32i16(<32 x i16>, <32 x i16>, <32 x i16>)
 
 define <8 x i64> @test_mm512_maskz_shrdi_epi16(i32 %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X86-LABEL: test_mm512_maskz_shrdi_epi16:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    vpshrdw $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    vpshrdw $15, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_shrdi_epi16:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdw $63, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    vpshrdw $15, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <8 x i64> %__A to <32 x i16>
   %1 = bitcast <8 x i64> %__B to <32 x i16>
-  %2 = tail call <32 x i16> @llvm.x86.avx512.vpshrd.w.512(<32 x i16> %0, <32 x i16> %1, i32 63)
+  %2 = tail call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %1, <32 x i16> %0, <32 x i16> <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>)
   %3 = bitcast i32 %__U to <32 x i1>
   %4 = select <32 x i1> %3, <32 x i16> %2, <32 x i16> zeroinitializer
   %5 = bitcast <32 x i16> %4 to <8 x i64>
@@ -637,12 +637,12 @@
 define <8 x i64> @test_mm512_shrdi_epi16(<8 x i64> %__A, <8 x i64> %__B) {
 ; CHECK-LABEL: test_mm512_shrdi_epi16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vpshrdw $31, %zmm1, %zmm0, %zmm0
+; CHECK-NEXT:    vpshrdw $15, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <8 x i64> %__A to <32 x i16>
   %1 = bitcast <8 x i64> %__B to <32 x i16>
-  %2 = tail call <32 x i16> @llvm.x86.avx512.vpshrd.w.512(<32 x i16> %0, <32 x i16> %1, i32 31)
+  %2 = tail call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %1, <32 x i16> %0, <32 x i16> <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>)
   %3 = bitcast <32 x i16> %2 to <8 x i64>
   ret <8 x i64> %3
 }
@@ -661,8 +661,10 @@
 ; X64-NEXT:    vpshldvq %zmm2, %zmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B, i8 %__U)
-  ret <8 x i64> %0
+  %0 = tail call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> %__S
+  ret <8 x i64> %2
 }
 
 define <8 x i64> @test_mm512_maskz_shldv_epi64(i8 zeroext %__U, <8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
@@ -679,8 +681,10 @@
 ; X64-NEXT:    vpshldvq %zmm2, %zmm1, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.maskz.vpshldv.q.512(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B, i8 %__U)
-  ret <8 x i64> %0
+  %0 = tail call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> zeroinitializer
+  ret <8 x i64> %2
 }
 
 define <8 x i64> @test_mm512_shldv_epi64(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
@@ -689,7 +693,7 @@
 ; CHECK-NEXT:    vpshldvq %zmm2, %zmm1, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B, i8 -1)
+  %0 = tail call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B)
   ret <8 x i64> %0
 }
 
@@ -709,9 +713,11 @@
   %0 = bitcast <8 x i64> %__S to <16 x i32>
   %1 = bitcast <8 x i64> %__A to <16 x i32>
   %2 = bitcast <8 x i64> %__B to <16 x i32>
-  %3 = tail call <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2, i16 %__U)
-  %4 = bitcast <16 x i32> %3 to <8 x i64>
-  ret <8 x i64> %4
+  %3 = tail call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i32> %3, <16 x i32> %0
+  %6 = bitcast <16 x i32> %5 to <8 x i64>
+  ret <8 x i64> %6
 }
 
 define <8 x i64> @test_mm512_maskz_shldv_epi32(i16 zeroext %__U, <8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
@@ -730,9 +736,11 @@
   %0 = bitcast <8 x i64> %__S to <16 x i32>
   %1 = bitcast <8 x i64> %__A to <16 x i32>
   %2 = bitcast <8 x i64> %__B to <16 x i32>
-  %3 = tail call <16 x i32> @llvm.x86.avx512.maskz.vpshldv.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2, i16 %__U)
-  %4 = bitcast <16 x i32> %3 to <8 x i64>
-  ret <8 x i64> %4
+  %3 = tail call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i32> %3, <16 x i32> zeroinitializer
+  %6 = bitcast <16 x i32> %5 to <8 x i64>
+  ret <8 x i64> %6
 }
 
 define <8 x i64> @test_mm512_shldv_epi32(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
@@ -744,7 +752,7 @@
   %0 = bitcast <8 x i64> %__S to <16 x i32>
   %1 = bitcast <8 x i64> %__A to <16 x i32>
   %2 = bitcast <8 x i64> %__B to <16 x i32>
-  %3 = tail call <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2, i16 -1)
+  %3 = tail call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2)
   %4 = bitcast <16 x i32> %3 to <8 x i64>
   ret <8 x i64> %4
 }
@@ -765,9 +773,11 @@
   %0 = bitcast <8 x i64> %__S to <32 x i16>
   %1 = bitcast <8 x i64> %__A to <32 x i16>
   %2 = bitcast <8 x i64> %__B to <32 x i16>
-  %3 = tail call <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2, i32 %__U)
-  %4 = bitcast <32 x i16> %3 to <8 x i64>
-  ret <8 x i64> %4
+  %3 = tail call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2)
+  %4 = bitcast i32 %__U to <32 x i1>
+  %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> %0
+  %6 = bitcast <32 x i16> %5 to <8 x i64>
+  ret <8 x i64> %6
 }
 
 define <8 x i64> @test_mm512_maskz_shldv_epi16(i32 %__U, <8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
@@ -786,9 +796,11 @@
   %0 = bitcast <8 x i64> %__S to <32 x i16>
   %1 = bitcast <8 x i64> %__A to <32 x i16>
   %2 = bitcast <8 x i64> %__B to <32 x i16>
-  %3 = tail call <32 x i16> @llvm.x86.avx512.maskz.vpshldv.w.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2, i32 %__U)
-  %4 = bitcast <32 x i16> %3 to <8 x i64>
-  ret <8 x i64> %4
+  %3 = tail call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2)
+  %4 = bitcast i32 %__U to <32 x i1>
+  %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> zeroinitializer
+  %6 = bitcast <32 x i16> %5 to <8 x i64>
+  ret <8 x i64> %6
 }
 
 define <8 x i64> @test_mm512_shldv_epi16(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
@@ -800,7 +812,7 @@
   %0 = bitcast <8 x i64> %__S to <32 x i16>
   %1 = bitcast <8 x i64> %__A to <32 x i16>
   %2 = bitcast <8 x i64> %__B to <32 x i16>
-  %3 = tail call <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2, i32 -1)
+  %3 = tail call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2)
   %4 = bitcast <32 x i16> %3 to <8 x i64>
   ret <8 x i64> %4
 }
@@ -819,8 +831,10 @@
 ; X64-NEXT:    vpshrdvq %zmm2, %zmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B, i8 %__U)
-  ret <8 x i64> %0
+  %0 = tail call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %__A, <8 x i64> %__S, <8 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> %__S
+  ret <8 x i64> %2
 }
 
 define <8 x i64> @test_mm512_maskz_shrdv_epi64(i8 zeroext %__U, <8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
@@ -837,8 +851,10 @@
 ; X64-NEXT:    vpshrdvq %zmm2, %zmm1, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.512(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B, i8 %__U)
-  ret <8 x i64> %0
+  %0 = tail call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %__A, <8 x i64> %__S, <8 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> zeroinitializer
+  ret <8 x i64> %2
 }
 
 define <8 x i64> @test_mm512_shrdv_epi64(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
@@ -847,7 +863,7 @@
 ; CHECK-NEXT:    vpshrdvq %zmm2, %zmm1, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B, i8 -1)
+  %0 = tail call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %__A, <8 x i64> %__S, <8 x i64> %__B)
   ret <8 x i64> %0
 }
 
@@ -867,9 +883,11 @@
   %0 = bitcast <8 x i64> %__S to <16 x i32>
   %1 = bitcast <8 x i64> %__A to <16 x i32>
   %2 = bitcast <8 x i64> %__B to <16 x i32>
-  %3 = tail call <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2, i16 %__U)
-  %4 = bitcast <16 x i32> %3 to <8 x i64>
-  ret <8 x i64> %4
+  %3 = tail call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %1, <16 x i32> %0, <16 x i32> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i32> %3, <16 x i32> %0
+  %6 = bitcast <16 x i32> %5 to <8 x i64>
+  ret <8 x i64> %6
 }
 
 define <8 x i64> @test_mm512_maskz_shrdv_epi32(i16 zeroext %__U, <8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
@@ -888,9 +906,11 @@
   %0 = bitcast <8 x i64> %__S to <16 x i32>
   %1 = bitcast <8 x i64> %__A to <16 x i32>
   %2 = bitcast <8 x i64> %__B to <16 x i32>
-  %3 = tail call <16 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2, i16 %__U)
-  %4 = bitcast <16 x i32> %3 to <8 x i64>
-  ret <8 x i64> %4
+  %3 = tail call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %1, <16 x i32> %0, <16 x i32> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i32> %3, <16 x i32> zeroinitializer
+  %6 = bitcast <16 x i32> %5 to <8 x i64>
+  ret <8 x i64> %6
 }
 
 define <8 x i64> @test_mm512_shrdv_epi32(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
@@ -902,7 +922,7 @@
   %0 = bitcast <8 x i64> %__S to <16 x i32>
   %1 = bitcast <8 x i64> %__A to <16 x i32>
   %2 = bitcast <8 x i64> %__B to <16 x i32>
-  %3 = tail call <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2, i16 -1)
+  %3 = tail call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %1, <16 x i32> %0, <16 x i32> %2)
   %4 = bitcast <16 x i32> %3 to <8 x i64>
   ret <8 x i64> %4
 }
@@ -923,9 +943,11 @@
   %0 = bitcast <8 x i64> %__S to <32 x i16>
   %1 = bitcast <8 x i64> %__A to <32 x i16>
   %2 = bitcast <8 x i64> %__B to <32 x i16>
-  %3 = tail call <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2, i32 %__U)
-  %4 = bitcast <32 x i16> %3 to <8 x i64>
-  ret <8 x i64> %4
+  %3 = tail call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %1, <32 x i16> %0, <32 x i16> %2)
+  %4 = bitcast i32 %__U to <32 x i1>
+  %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> %0
+  %6 = bitcast <32 x i16> %5 to <8 x i64>
+  ret <8 x i64> %6
 }
 
 define <8 x i64> @test_mm512_maskz_shrdv_epi16(i32 %__U, <8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
@@ -944,9 +966,11 @@
   %0 = bitcast <8 x i64> %__S to <32 x i16>
   %1 = bitcast <8 x i64> %__A to <32 x i16>
   %2 = bitcast <8 x i64> %__B to <32 x i16>
-  %3 = tail call <32 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2, i32 %__U)
-  %4 = bitcast <32 x i16> %3 to <8 x i64>
-  ret <8 x i64> %4
+  %3 = tail call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %1, <32 x i16> %0, <32 x i16> %2)
+  %4 = bitcast i32 %__U to <32 x i1>
+  %5 = select <32 x i1> %4, <32 x i16> %3, <32 x i16> zeroinitializer
+  %6 = bitcast <32 x i16> %5 to <8 x i64>
+  ret <8 x i64> %6
 }
 
 define <8 x i64> @test_mm512_shrdv_epi16(<8 x i64> %__S, <8 x i64> %__A, <8 x i64> %__B) {
@@ -958,7 +982,7 @@
   %0 = bitcast <8 x i64> %__S to <32 x i16>
   %1 = bitcast <8 x i64> %__A to <32 x i16>
   %2 = bitcast <8 x i64> %__B to <32 x i16>
-  %3 = tail call <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16> %0, <32 x i16> %1, <32 x i16> %2, i32 -1)
+  %3 = tail call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %1, <32 x i16> %0, <32 x i16> %2)
   %4 = bitcast <32 x i16> %3 to <8 x i64>
   ret <8 x i64> %4
 }
@@ -971,15 +995,3 @@
 declare <64 x i8> @llvm.x86.avx512.mask.expand.b.512(<64 x i8>, <64 x i8>, i64)
 declare <32 x i16> @llvm.masked.expandload.v32i16(i16*, <32 x i1>, <32 x i16>)
 declare <64 x i8> @llvm.masked.expandload.v64i8(i8*, <64 x i1>, <64 x i8>)
-declare <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
-declare <8 x i64> @llvm.x86.avx512.maskz.vpshldv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
-declare <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
-declare <16 x i32> @llvm.x86.avx512.maskz.vpshldv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
-declare <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
-declare <32 x i16> @llvm.x86.avx512.maskz.vpshldv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
-declare <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
-declare <8 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
-declare <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
-declare <16 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
-declare <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
-declare <32 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
diff --git a/test/CodeGen/X86/avx512vbmi2-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512vbmi2-intrinsics-upgrade.ll
index 319008f..5446a8e 100644
--- a/test/CodeGen/X86/avx512vbmi2-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512vbmi2-intrinsics-upgrade.ll
@@ -330,21 +330,21 @@
 define <32 x i16>@test_int_x86_avx512_mask_vpshld_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x3, i32 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshld_w_512:
 ; X86:       # %bb.0:
-; X86-NEXT:    vpshldw $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x70,0xd9,0x16]
+; X86-NEXT:    vpshldw $6, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x70,0xd9,0x06]
 ; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpshldw $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x70,0xd1,0x16]
+; X86-NEXT:    vpshldw $6, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x70,0xd1,0x06]
 ; X86-NEXT:    vpaddw %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc3]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_vpshld_w_512:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshldw $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x70,0xd9,0x16]
+; X64-NEXT:    vpshldw $6, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x70,0xd9,0x06]
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpshldw $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x70,0xd1,0x16]
+; X64-NEXT:    vpshldw $6, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x70,0xd1,0x06]
 ; X64-NEXT:    vpaddw %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <32 x i16> @llvm.x86.avx512.mask.vpshld.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 22, <32 x i16> %x3, i32 %x4)
-  %res1 = call <32 x i16> @llvm.x86.avx512.mask.vpshld.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 22, <32 x i16> %x3, i32 -1)
+  %res = call <32 x i16> @llvm.x86.avx512.mask.vpshld.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 6, <32 x i16> %x3, i32 %x4)
+  %res1 = call <32 x i16> @llvm.x86.avx512.mask.vpshld.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 6, <32 x i16> %x3, i32 -1)
   %res2 = add <32 x i16> %res, %res1
   ret <32 x i16> %res2
 }
@@ -400,22 +400,398 @@
 define <32 x i16>@test_int_x86_avx512_mask_vpshrd_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x3, i32 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrd_w_512:
 ; X86:       # %bb.0:
-; X86-NEXT:    vpshrdw $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x72,0xd9,0x16]
+; X86-NEXT:    vpshrdw $6, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x72,0xd9,0x06]
 ; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpshrdw $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x72,0xd1,0x16]
+; X86-NEXT:    vpshrdw $6, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x72,0xd1,0x06]
 ; X86-NEXT:    vpaddw %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc3]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_vpshrd_w_512:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshrdw $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x72,0xd9,0x16]
+; X64-NEXT:    vpshrdw $6, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x72,0xd9,0x06]
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpshrdw $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x72,0xd1,0x16]
+; X64-NEXT:    vpshrdw $6, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x72,0xd1,0x06]
 ; X64-NEXT:    vpaddw %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <32 x i16> @llvm.x86.avx512.mask.vpshrd.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 22, <32 x i16> %x3, i32 %x4)
-  %res1 = call <32 x i16> @llvm.x86.avx512.mask.vpshrd.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 22, <32 x i16> %x3, i32 -1)
+  %res = call <32 x i16> @llvm.x86.avx512.mask.vpshrd.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 6, <32 x i16> %x3, i32 %x4)
+  %res1 = call <32 x i16> @llvm.x86.avx512.mask.vpshrd.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 6, <32 x i16> %x3, i32 -1)
   %res2 = add <32 x i16> %res, %res1
   ret <32 x i16> %res2
 }
 declare <32 x i16> @llvm.x86.avx512.mask.vpshrd.w.512(<32 x i16>, <32 x i16>, i32, <32 x i16>, i32)
+
+define <16 x i32>@test_int_x86_avx512_mask_vpshld_d_512_2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3, i16 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshld_d_512_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshldd $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0x7d,0x48,0x71,0xd9,0x16]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpshldd $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x49,0x71,0xd1,0x16]
+; X86-NEXT:    vpaddd %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfe,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshld_d_512_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshldd $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0x7d,0x48,0x71,0xd9,0x16]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshldd $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x49,0x71,0xd1,0x16]
+; X64-NEXT:    vpaddd %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfe,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <16 x i32> @llvm.x86.avx512.vpshld.d.512(<16 x i32> %x0, <16 x i32> %x1, i32 22)
+  %2 = bitcast i16 %x4 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x3
+  %4 = call <16 x i32> @llvm.x86.avx512.vpshld.d.512(<16 x i32> %x0, <16 x i32> %x1, i32 22)
+  %res2 = add <16 x i32> %3, %4
+  ret <16 x i32> %res2
+}
+declare <16 x i32> @llvm.x86.avx512.vpshld.d.512(<16 x i32>, <16 x i32>, i32)
+
+define <8 x i64>@test_int_x86_avx512_mask_vpshld_q_512_2(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3, i8 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshld_q_512_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshldq $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x71,0xd9,0x16]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpshldq $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x71,0xd1,0x16]
+; X86-NEXT:    vpaddq %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0xed,0x48,0xd4,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshld_q_512_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshldq $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x71,0xd9,0x16]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshldq $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x71,0xd1,0x16]
+; X64-NEXT:    vpaddq %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0xed,0x48,0xd4,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x i64> @llvm.x86.avx512.vpshld.q.512(<8 x i64> %x0, <8 x i64> %x1, i32 22)
+  %2 = bitcast i8 %x4 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x3
+  %4 = call <8 x i64> @llvm.x86.avx512.vpshld.q.512(<8 x i64> %x0, <8 x i64> %x1, i32 22)
+  %res2 = add <8 x i64> %3, %4
+  ret <8 x i64> %res2
+}
+declare <8 x i64> @llvm.x86.avx512.vpshld.q.512(<8 x i64>, <8 x i64>, i32)
+
+define <32 x i16>@test_int_x86_avx512_mask_vpshld_w_512_2(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x3, i32 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshld_w_512_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshldw $6, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x70,0xd9,0x06]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpshldw $6, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x70,0xd1,0x06]
+; X86-NEXT:    vpaddw %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshld_w_512_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshldw $6, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x70,0xd9,0x06]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshldw $6, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x70,0xd1,0x06]
+; X64-NEXT:    vpaddw %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <32 x i16> @llvm.x86.avx512.vpshld.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 6)
+  %2 = bitcast i32 %x4 to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %x3
+  %4 = call <32 x i16> @llvm.x86.avx512.vpshld.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 6)
+  %res2 = add <32 x i16> %3, %4
+  ret <32 x i16> %res2
+}
+declare <32 x i16> @llvm.x86.avx512.vpshld.w.512(<32 x i16>, <32 x i16>, i32)
+
+define <16 x i32>@test_int_x86_avx512_mask_vpshrd_d_512_2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3, i16 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrd_d_512_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshrdd $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0x7d,0x48,0x73,0xd9,0x16]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpshrdd $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x49,0x73,0xd1,0x16]
+; X86-NEXT:    vpaddd %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfe,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrd_d_512_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshrdd $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0x7d,0x48,0x73,0xd9,0x16]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshrdd $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x49,0x73,0xd1,0x16]
+; X64-NEXT:    vpaddd %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfe,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <16 x i32> @llvm.x86.avx512.vpshrd.d.512(<16 x i32> %x0, <16 x i32> %x1, i32 22)
+  %2 = bitcast i16 %x4 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x3
+  %4 = call <16 x i32> @llvm.x86.avx512.vpshrd.d.512(<16 x i32> %x0, <16 x i32> %x1, i32 22)
+  %res2 = add <16 x i32> %3, %4
+  ret <16 x i32> %res2
+}
+declare <16 x i32> @llvm.x86.avx512.vpshrd.d.512(<16 x i32>, <16 x i32>, i32)
+
+define <8 x i64>@test_int_x86_avx512_mask_vpshrd_q_512_2(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3, i8 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrd_q_512_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshrdq $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x73,0xd9,0x16]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpshrdq $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x73,0xd1,0x16]
+; X86-NEXT:    vpaddq %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0xed,0x48,0xd4,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrd_q_512_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshrdq $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x73,0xd9,0x16]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshrdq $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x73,0xd1,0x16]
+; X64-NEXT:    vpaddq %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0xed,0x48,0xd4,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x i64> @llvm.x86.avx512.vpshrd.q.512(<8 x i64> %x0, <8 x i64> %x1, i32 22)
+  %2 = bitcast i8 %x4 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x3
+  %4 = call <8 x i64> @llvm.x86.avx512.vpshrd.q.512(<8 x i64> %x0, <8 x i64> %x1, i32 22)
+  %res2 = add <8 x i64> %3, %4
+  ret <8 x i64> %res2
+}
+declare <8 x i64> @llvm.x86.avx512.vpshrd.q.512(<8 x i64>, <8 x i64>, i32)
+
+define <32 x i16>@test_int_x86_avx512_mask_vpshrd_w_512_2(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x3, i32 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrd_w_512_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshrdw $6, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x72,0xd9,0x06]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpshrdw $6, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x72,0xd1,0x06]
+; X86-NEXT:    vpaddw %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrd_w_512_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshrdw $6, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x72,0xd9,0x06]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshrdw $6, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x72,0xd1,0x06]
+; X64-NEXT:    vpaddw %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <32 x i16> @llvm.x86.avx512.vpshrd.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 6)
+  %2 = bitcast i32 %x4 to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %x3
+  %4 = call <32 x i16> @llvm.x86.avx512.vpshrd.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 6)
+  %res2 = add <32 x i16> %3, %4
+  ret <32 x i16> %res2
+}
+declare <32 x i16> @llvm.x86.avx512.vpshrd.w.512(<32 x i16>, <32 x i16>, i32)
+
+declare <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+declare <16 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+
+define <16 x i32>@test_int_x86_avx512_mask_vpshrdv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
+; X86-NEXT:    vpshrdvd (%eax), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0x75,0x49,0x73,0x18]
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
+; X86-NEXT:    vpshrdvd %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0x75,0x48,0x73,0xe2]
+; X86-NEXT:    vpshrdvd %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xc9,0x73,0xc2]
+; X86-NEXT:    vpaddd %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0x5d,0x48,0xfe,0xc0]
+; X86-NEXT:    vpaddd %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrdv_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
+; X64-NEXT:    vpshrdvd (%rdi), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0x75,0x49,0x73,0x1f]
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
+; X64-NEXT:    vpshrdvd %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0x75,0x48,0x73,0xe2]
+; X64-NEXT:    vpshrdvd %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xc9,0x73,0xc2]
+; X64-NEXT:    vpaddd %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0x5d,0x48,0xfe,0xc0]
+; X64-NEXT:    vpaddd %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <16 x i32>, <16 x i32>* %x2p
+  %res = call <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
+  %res1 = call <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4, i16 -1)
+  %res2 = call <16 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4, i16  %x3)
+  %res3 = add <16 x i32> %res, %res1
+  %res4 = add <16 x i32> %res2, %res3
+  ret <16 x i32> %res4
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+declare <8 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+
+define <8 x i64>@test_int_x86_avx512_mask_vpshrdv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64>* %x2p, <8 x i64> %x4, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
+; X86-NEXT:    vpshrdvq (%eax), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x49,0x73,0x18]
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
+; X86-NEXT:    vpshrdvq %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0xf5,0x48,0x73,0xe2]
+; X86-NEXT:    vpshrdvq %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x73,0xc2]
+; X86-NEXT:    vpaddq %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0xdd,0x48,0xd4,0xc0]
+; X86-NEXT:    vpaddq %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0xd4,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrdv_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
+; X64-NEXT:    vpshrdvq (%rdi), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x49,0x73,0x1f]
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
+; X64-NEXT:    vpshrdvq %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0xf5,0x48,0x73,0xe2]
+; X64-NEXT:    vpshrdvq %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x73,0xc2]
+; X64-NEXT:    vpaddq %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0xdd,0x48,0xd4,0xc0]
+; X64-NEXT:    vpaddq %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0xd4,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <8 x i64>, <8 x i64>* %x2p
+  %res = call <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
+  %res1 = call <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x4, i8 -1)
+  %res2 = call <8 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x4, i8  %x3)
+  %res3 = add <8 x i64> %res, %res1
+  %res4 = add <8 x i64> %res2, %res3
+  ret <8 x i64> %res4
+}
+
+declare <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+declare <32 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+
+define <32 x i16>@test_int_x86_avx512_mask_vpshrdv_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16>* %x2p, <32 x i16> %x4, i32 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_w_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
+; X86-NEXT:    vpshrdvw (%eax), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x49,0x72,0x18]
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
+; X86-NEXT:    vpshrdvw %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0xf5,0x48,0x72,0xe2]
+; X86-NEXT:    vpshrdvw %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x72,0xc2]
+; X86-NEXT:    vpaddw %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0x5d,0x48,0xfd,0xc0]
+; X86-NEXT:    vpaddw %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrdv_w_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
+; X64-NEXT:    vpshrdvw (%rdi), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x49,0x72,0x1f]
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
+; X64-NEXT:    vpshrdvw %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0xf5,0x48,0x72,0xe2]
+; X64-NEXT:    vpshrdvw %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x72,0xc2]
+; X64-NEXT:    vpaddw %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0x5d,0x48,0xfd,0xc0]
+; X64-NEXT:    vpaddw %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <32 x i16>, <32 x i16>* %x2p
+  %res = call <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3)
+  %res1 = call <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x4, i32 -1)
+  %res2 = call <32 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x4, i32  %x3)
+  %res3 = add <32 x i16> %res, %res1
+  %res4 = add <32 x i16> %res2, %res3
+  ret <32 x i16> %res4
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+declare <16 x i32> @llvm.x86.avx512.maskz.vpshldv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+
+define <16 x i32>@test_int_x86_avx512_mask_vpshldv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshldv_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
+; X86-NEXT:    vpshldvd (%eax), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0x75,0x49,0x71,0x18]
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
+; X86-NEXT:    vpshldvd %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0x75,0x48,0x71,0xe2]
+; X86-NEXT:    vpshldvd %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xc9,0x71,0xc2]
+; X86-NEXT:    vpaddd %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0x5d,0x48,0xfe,0xc0]
+; X86-NEXT:    vpaddd %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshldv_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
+; X64-NEXT:    vpshldvd (%rdi), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0x75,0x49,0x71,0x1f]
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
+; X64-NEXT:    vpshldvd %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0x75,0x48,0x71,0xe2]
+; X64-NEXT:    vpshldvd %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xc9,0x71,0xc2]
+; X64-NEXT:    vpaddd %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0x5d,0x48,0xfe,0xc0]
+; X64-NEXT:    vpaddd %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <16 x i32>, <16 x i32>* %x2p
+  %res = call <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
+  %res1 = call <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4, i16 -1)
+  %res2 = call <16 x i32> @llvm.x86.avx512.maskz.vpshldv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4, i16  %x3)
+  %res3 = add <16 x i32> %res, %res1
+  %res4 = add <16 x i32> %res2, %res3
+  ret <16 x i32> %res4
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+declare <8 x i64> @llvm.x86.avx512.maskz.vpshldv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+
+define <8 x i64>@test_int_x86_avx512_mask_vpshldv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64>* %x2p, <8 x i64> %x4, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshldv_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
+; X86-NEXT:    vpshldvq (%eax), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x49,0x71,0x18]
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
+; X86-NEXT:    vpshldvq %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0xf5,0x48,0x71,0xe2]
+; X86-NEXT:    vpshldvq %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x71,0xc2]
+; X86-NEXT:    vpaddq %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0xdd,0x48,0xd4,0xc0]
+; X86-NEXT:    vpaddq %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0xd4,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshldv_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
+; X64-NEXT:    vpshldvq (%rdi), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x49,0x71,0x1f]
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
+; X64-NEXT:    vpshldvq %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0xf5,0x48,0x71,0xe2]
+; X64-NEXT:    vpshldvq %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x71,0xc2]
+; X64-NEXT:    vpaddq %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0xdd,0x48,0xd4,0xc0]
+; X64-NEXT:    vpaddq %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0xd4,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <8 x i64>, <8 x i64>* %x2p
+  %res = call <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
+  %res1 = call <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x4, i8 -1)
+  %res2 = call <8 x i64> @llvm.x86.avx512.maskz.vpshldv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x4, i8  %x3)
+  %res3 = add <8 x i64> %res, %res1
+  %res4 = add <8 x i64> %res2, %res3
+  ret <8 x i64> %res4
+}
+
+declare <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+declare <32 x i16> @llvm.x86.avx512.maskz.vpshldv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+
+define <32 x i16>@test_int_x86_avx512_mask_vpshldv_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16>* %x2p, <32 x i16> %x4, i32 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshldv_w_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
+; X86-NEXT:    vpshldvw (%eax), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x49,0x70,0x18]
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
+; X86-NEXT:    vpshldvw %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0xf5,0x48,0x70,0xe2]
+; X86-NEXT:    vpshldvw %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x70,0xc2]
+; X86-NEXT:    vpaddw %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0x5d,0x48,0xfd,0xc0]
+; X86-NEXT:    vpaddw %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshldv_w_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
+; X64-NEXT:    vpshldvw (%rdi), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x49,0x70,0x1f]
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
+; X64-NEXT:    vpshldvw %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0xf5,0x48,0x70,0xe2]
+; X64-NEXT:    vpshldvw %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x70,0xc2]
+; X64-NEXT:    vpaddw %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0x5d,0x48,0xfd,0xc0]
+; X64-NEXT:    vpaddw %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <32 x i16>, <32 x i16>* %x2p
+  %res = call <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3)
+  %res1 = call <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x4, i32 -1)
+  %res2 = call <32 x i16> @llvm.x86.avx512.maskz.vpshldv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x4, i32  %x3)
+  %res3 = add <32 x i16> %res, %res1
+  %res4 = add <32 x i16> %res2, %res3
+  ret <32 x i16> %res4
+}
diff --git a/test/CodeGen/X86/avx512vbmi2-intrinsics.ll b/test/CodeGen/X86/avx512vbmi2-intrinsics.ll
index fd98c16..962eac9 100644
--- a/test/CodeGen/X86/avx512vbmi2-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vbmi2-intrinsics.ll
@@ -94,7 +94,7 @@
 
 declare <64 x i8> @llvm.x86.avx512.mask.expand.b.512(<64 x i8> %data, <64 x i8> %src0, i64 %mask)
 
-define <16 x i32>@test_int_x86_avx512_mask_vpshld_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3, i16 %x4) {
+define <16 x i32> @test_int_x86_avx512_mask_vpshld_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3, i16 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshld_d_512:
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpshldd $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0x7d,0x48,0x71,0xd9,0x16]
@@ -110,16 +110,15 @@
 ; X64-NEXT:    vpshldd $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x49,0x71,0xd1,0x16]
 ; X64-NEXT:    vpaddd %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfe,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <16 x i32> @llvm.x86.avx512.vpshld.d.512(<16 x i32> %x0, <16 x i32> %x1, i32 22)
+  %1 = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> <i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22>)
   %2 = bitcast i16 %x4 to <16 x i1>
   %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x3
-  %4 = call <16 x i32> @llvm.x86.avx512.vpshld.d.512(<16 x i32> %x0, <16 x i32> %x1, i32 22)
+  %4 = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> <i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22>)
   %res2 = add <16 x i32> %3, %4
   ret <16 x i32> %res2
 }
-declare <16 x i32> @llvm.x86.avx512.vpshld.d.512(<16 x i32>, <16 x i32>, i32)
 
-define <8 x i64>@test_int_x86_avx512_mask_vpshld_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3, i8 %x4) {
+define <8 x i64> @test_int_x86_avx512_mask_vpshld_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3, i8 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshld_q_512:
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpshldq $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x71,0xd9,0x16]
@@ -136,41 +135,39 @@
 ; X64-NEXT:    vpshldq $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x71,0xd1,0x16]
 ; X64-NEXT:    vpaddq %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0xed,0x48,0xd4,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <8 x i64> @llvm.x86.avx512.vpshld.q.512(<8 x i64> %x0, <8 x i64> %x1, i32 22)
+  %1 = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> <i64 22, i64 22, i64 22, i64 22, i64 22, i64 22, i64 22, i64 22>)
   %2 = bitcast i8 %x4 to <8 x i1>
   %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x3
-  %4 = call <8 x i64> @llvm.x86.avx512.vpshld.q.512(<8 x i64> %x0, <8 x i64> %x1, i32 22)
+  %4 = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> <i64 22, i64 22, i64 22, i64 22, i64 22, i64 22, i64 22, i64 22>)
   %res2 = add <8 x i64> %3, %4
   ret <8 x i64> %res2
 }
-declare <8 x i64> @llvm.x86.avx512.vpshld.q.512(<8 x i64>, <8 x i64>, i32)
 
-define <32 x i16>@test_int_x86_avx512_mask_vpshld_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x3, i32 %x4) {
+define <32 x i16> @test_int_x86_avx512_mask_vpshld_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x3, i32 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshld_w_512:
 ; X86:       # %bb.0:
-; X86-NEXT:    vpshldw $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x70,0xd9,0x16]
+; X86-NEXT:    vpshldw $6, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x70,0xd9,0x06]
 ; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpshldw $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x70,0xd1,0x16]
+; X86-NEXT:    vpshldw $6, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x70,0xd1,0x06]
 ; X86-NEXT:    vpaddw %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc3]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_vpshld_w_512:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshldw $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x70,0xd9,0x16]
+; X64-NEXT:    vpshldw $6, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x70,0xd9,0x06]
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpshldw $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x70,0xd1,0x16]
+; X64-NEXT:    vpshldw $6, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x70,0xd1,0x06]
 ; X64-NEXT:    vpaddw %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <32 x i16> @llvm.x86.avx512.vpshld.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 22)
+  %1 = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>)
   %2 = bitcast i32 %x4 to <32 x i1>
   %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %x3
-  %4 = call <32 x i16> @llvm.x86.avx512.vpshld.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 22)
+  %4 = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>)
   %res2 = add <32 x i16> %3, %4
   ret <32 x i16> %res2
 }
-declare <32 x i16> @llvm.x86.avx512.vpshld.w.512(<32 x i16>, <32 x i16>, i32)
 
-define <16 x i32>@test_int_x86_avx512_mask_vpshrd_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3, i16 %x4) {
+define <16 x i32> @test_int_x86_avx512_mask_vpshrd_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3, i16 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrd_d_512:
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpshrdd $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0x7d,0x48,0x73,0xd9,0x16]
@@ -186,16 +183,15 @@
 ; X64-NEXT:    vpshrdd $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x49,0x73,0xd1,0x16]
 ; X64-NEXT:    vpaddd %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfe,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <16 x i32> @llvm.x86.avx512.vpshrd.d.512(<16 x i32> %x0, <16 x i32> %x1, i32 22)
+  %1 = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> <i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22>)
   %2 = bitcast i16 %x4 to <16 x i1>
   %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x3
-  %4 = call <16 x i32> @llvm.x86.avx512.vpshrd.d.512(<16 x i32> %x0, <16 x i32> %x1, i32 22)
+  %4 = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> <i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22>)
   %res2 = add <16 x i32> %3, %4
   ret <16 x i32> %res2
 }
-declare <16 x i32> @llvm.x86.avx512.vpshrd.d.512(<16 x i32>, <16 x i32>, i32)
 
-define <8 x i64>@test_int_x86_avx512_mask_vpshrd_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3, i8 %x4) {
+define <8 x i64> @test_int_x86_avx512_mask_vpshrd_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3, i8 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrd_q_512:
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpshrdq $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x73,0xd9,0x16]
@@ -212,44 +208,39 @@
 ; X64-NEXT:    vpshrdq $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x73,0xd1,0x16]
 ; X64-NEXT:    vpaddq %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0xed,0x48,0xd4,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <8 x i64> @llvm.x86.avx512.vpshrd.q.512(<8 x i64> %x0, <8 x i64> %x1, i32 22)
+  %1 = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x1, <8 x i64> %x0, <8 x i64> <i64 22, i64 22, i64 22, i64 22, i64 22, i64 22, i64 22, i64 22>)
   %2 = bitcast i8 %x4 to <8 x i1>
   %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x3
-  %4 = call <8 x i64> @llvm.x86.avx512.vpshrd.q.512(<8 x i64> %x0, <8 x i64> %x1, i32 22)
+  %4 = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x1, <8 x i64> %x0, <8 x i64> <i64 22, i64 22, i64 22, i64 22, i64 22, i64 22, i64 22, i64 22>)
   %res2 = add <8 x i64> %3, %4
   ret <8 x i64> %res2
 }
-declare <8 x i64> @llvm.x86.avx512.vpshrd.q.512(<8 x i64>, <8 x i64>, i32)
 
-define <32 x i16>@test_int_x86_avx512_mask_vpshrd_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x3, i32 %x4) {
+define <32 x i16> @test_int_x86_avx512_mask_vpshrd_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x3, i32 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrd_w_512:
 ; X86:       # %bb.0:
-; X86-NEXT:    vpshrdw $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x72,0xd9,0x16]
+; X86-NEXT:    vpshrdw $6, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x72,0xd9,0x06]
 ; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpshrdw $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x72,0xd1,0x16]
+; X86-NEXT:    vpshrdw $6, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x72,0xd1,0x06]
 ; X86-NEXT:    vpaddw %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc3]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_vpshrd_w_512:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshrdw $22, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x72,0xd9,0x16]
+; X64-NEXT:    vpshrdw $6, %zmm1, %zmm0, %zmm3 # encoding: [0x62,0xf3,0xfd,0x48,0x72,0xd9,0x06]
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpshrdw $22, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x72,0xd1,0x16]
+; X64-NEXT:    vpshrdw $6, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x72,0xd1,0x06]
 ; X64-NEXT:    vpaddw %zmm3, %zmm2, %zmm0 # encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <32 x i16> @llvm.x86.avx512.vpshrd.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 22)
+  %1 = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %x1, <32 x i16> %x0, <32 x i16> <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>)
   %2 = bitcast i32 %x4 to <32 x i1>
   %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %x3
-  %4 = call <32 x i16> @llvm.x86.avx512.vpshrd.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 22)
+  %4 = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %x1, <32 x i16> %x0, <32 x i16> <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>)
   %res2 = add <32 x i16> %3, %4
   ret <32 x i16> %res2
 }
-declare <32 x i16> @llvm.x86.avx512.vpshrd.w.512(<32 x i16>, <32 x i16>, i32)
 
-declare <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
-declare <16 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
-
-define <16 x i32>@test_int_x86_avx512_mask_vpshrdv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
+define <16 x i32> @test_int_x86_avx512_mask_vpshrdv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_d_512:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -257,9 +248,9 @@
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
 ; X86-NEXT:    vpshrdvd (%eax), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0x75,0x49,0x73,0x18]
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
-; X86-NEXT:    vpshrdvd %zmm2, %zmm1, %zmm4 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xc9,0x73,0xe2]
-; X86-NEXT:    vpshrdvd %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x75,0x48,0x73,0xc2]
-; X86-NEXT:    vpaddd %zmm4, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc4]
+; X86-NEXT:    vpshrdvd %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0x75,0x48,0x73,0xe2]
+; X86-NEXT:    vpshrdvd %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xc9,0x73,0xc2]
+; X86-NEXT:    vpaddd %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0x5d,0x48,0xfe,0xc0]
 ; X86-NEXT:    vpaddd %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfe,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -275,29 +266,30 @@
 ; X64-NEXT:    vpaddd %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfe,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <16 x i32>, <16 x i32>* %x2p
-  %res = call <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
-  %res1 = call <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4, i16 -1)
-  %res2 = call <16 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4, i16  %x3)
-  %res3 = add <16 x i32> %res, %res1
-  %res4 = add <16 x i32> %res2, %res3
+  %1 = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2)
+  %2 = bitcast i16 %x3 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x0
+  %4 = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x4)
+  %5 = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x4)
+  %6 = bitcast i16 %x3 to <16 x i1>
+  %7 = select <16 x i1> %6, <16 x i32> %5, <16 x i32> zeroinitializer
+  %res3 = add <16 x i32> %3, %4
+  %res4 = add <16 x i32> %7, %res3
   ret <16 x i32> %res4
 }
 
-declare <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
-declare <8 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
-
-define <8 x i64>@test_int_x86_avx512_mask_vpshrdv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64>* %x2p, <8 x i64> %x4, i8 %x3) {
+define <8 x i64> @test_int_x86_avx512_mask_vpshrdv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64>* %x2p, <8 x i64> %x4, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_q_512:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
 ; X86-NEXT:    vpshrdvq (%eax), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x49,0x73,0x18]
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
-; X86-NEXT:    vpshrdvq %zmm2, %zmm1, %zmm4 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x73,0xe2]
-; X86-NEXT:    vpshrdvq %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0xf5,0x48,0x73,0xc2]
-; X86-NEXT:    vpaddq %zmm4, %zmm0, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc4]
+; X86-NEXT:    vpshrdvq %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0xf5,0x48,0x73,0xe2]
+; X86-NEXT:    vpshrdvq %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x73,0xc2]
+; X86-NEXT:    vpaddq %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0xdd,0x48,0xd4,0xc0]
 ; X86-NEXT:    vpaddq %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0xd4,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -313,18 +305,19 @@
 ; X64-NEXT:    vpaddq %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0xd4,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <8 x i64>, <8 x i64>* %x2p
-  %res = call <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
-  %res1 = call <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x4, i8 -1)
-  %res2 = call <8 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x4, i8  %x3)
-  %res3 = add <8 x i64> %res, %res1
-  %res4 = add <8 x i64> %res2, %res3
+  %1 = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x1, <8 x i64> %x0, <8 x i64> %x2)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x0
+  %4 = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x1, <8 x i64> %x0, <8 x i64> %x4)
+  %5 = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x1, <8 x i64> %x0, <8 x i64> %x4)
+  %6 = bitcast i8 %x3 to <8 x i1>
+  %7 = select <8 x i1> %6, <8 x i64> %5, <8 x i64> zeroinitializer
+  %res3 = add <8 x i64> %3, %4
+  %res4 = add <8 x i64> %7, %res3
   ret <8 x i64> %res4
 }
 
-declare <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
-declare <32 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
-
-define <32 x i16>@test_int_x86_avx512_mask_vpshrdv_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16>* %x2p, <32 x i16> %x4, i32 %x3) {
+define <32 x i16> @test_int_x86_avx512_mask_vpshrdv_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16>* %x2p, <32 x i16> %x4, i32 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_w_512:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -332,9 +325,9 @@
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
 ; X86-NEXT:    vpshrdvw (%eax), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x49,0x72,0x18]
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
-; X86-NEXT:    vpshrdvw %zmm2, %zmm1, %zmm4 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x72,0xe2]
-; X86-NEXT:    vpshrdvw %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0xf5,0x48,0x72,0xc2]
-; X86-NEXT:    vpaddw %zmm4, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc4]
+; X86-NEXT:    vpshrdvw %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0xf5,0x48,0x72,0xe2]
+; X86-NEXT:    vpshrdvw %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x72,0xc2]
+; X86-NEXT:    vpaddw %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0x5d,0x48,0xfd,0xc0]
 ; X86-NEXT:    vpaddw %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfd,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -344,24 +337,25 @@
 ; X64-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
 ; X64-NEXT:    vpshrdvw (%rdi), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x49,0x72,0x1f]
 ; X64-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
-; X64-NEXT:    vpshrdvw %zmm2, %zmm1, %zmm4 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x72,0xe2]
-; X64-NEXT:    vpshrdvw %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0xf5,0x48,0x72,0xc2]
-; X64-NEXT:    vpaddw %zmm4, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc4]
+; X64-NEXT:    vpshrdvw %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0xf5,0x48,0x72,0xe2]
+; X64-NEXT:    vpshrdvw %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x72,0xc2]
+; X64-NEXT:    vpaddw %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0x5d,0x48,0xfd,0xc0]
 ; X64-NEXT:    vpaddw %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <32 x i16>, <32 x i16>* %x2p
-  %res = call <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3)
-  %res1 = call <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x4, i32 -1)
-  %res2 = call <32 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x4, i32  %x3)
-  %res3 = add <32 x i16> %res, %res1
-  %res4 = add <32 x i16> %res2, %res3
+  %1 = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %x1, <32 x i16> %x0, <32 x i16> %x2)
+  %2 = bitcast i32 %x3 to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %x0
+  %4 = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %x1, <32 x i16> %x0, <32 x i16> %x4)
+  %5 = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %x1, <32 x i16> %x0, <32 x i16> %x4)
+  %6 = bitcast i32 %x3 to <32 x i1>
+  %7 = select <32 x i1> %6, <32 x i16> %5, <32 x i16> zeroinitializer
+  %res3 = add <32 x i16> %3, %4
+  %res4 = add <32 x i16> %7, %res3
   ret <32 x i16> %res4
 }
 
-declare <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
-declare <16 x i32> @llvm.x86.avx512.maskz.vpshldv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
-
-define <16 x i32>@test_int_x86_avx512_mask_vpshldv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
+define <16 x i32> @test_int_x86_avx512_mask_vpshldv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshldv_d_512:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -369,9 +363,9 @@
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
 ; X86-NEXT:    vpshldvd (%eax), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0x75,0x49,0x71,0x18]
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
-; X86-NEXT:    vpshldvd %zmm2, %zmm1, %zmm4 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xc9,0x71,0xe2]
-; X86-NEXT:    vpshldvd %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x75,0x48,0x71,0xc2]
-; X86-NEXT:    vpaddd %zmm4, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc4]
+; X86-NEXT:    vpshldvd %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0x75,0x48,0x71,0xe2]
+; X86-NEXT:    vpshldvd %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xc9,0x71,0xc2]
+; X86-NEXT:    vpaddd %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0x5d,0x48,0xfe,0xc0]
 ; X86-NEXT:    vpaddd %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfe,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -387,29 +381,30 @@
 ; X64-NEXT:    vpaddd %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfe,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <16 x i32>, <16 x i32>* %x2p
-  %res = call <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
-  %res1 = call <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4, i16 -1)
-  %res2 = call <16 x i32> @llvm.x86.avx512.maskz.vpshldv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4, i16  %x3)
-  %res3 = add <16 x i32> %res, %res1
-  %res4 = add <16 x i32> %res2, %res3
+  %1 = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2)
+  %2 = bitcast i16 %x3 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x0
+  %4 = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4)
+  %5 = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4)
+  %6 = bitcast i16 %x3 to <16 x i1>
+  %7 = select <16 x i1> %6, <16 x i32> %5, <16 x i32> zeroinitializer
+  %res3 = add <16 x i32> %3, %4
+  %res4 = add <16 x i32> %7, %res3
   ret <16 x i32> %res4
 }
 
-declare <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
-declare <8 x i64> @llvm.x86.avx512.maskz.vpshldv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
-
-define <8 x i64>@test_int_x86_avx512_mask_vpshldv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64>* %x2p, <8 x i64> %x4, i8 %x3) {
+define <8 x i64> @test_int_x86_avx512_mask_vpshldv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64>* %x2p, <8 x i64> %x4, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshldv_q_512:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
 ; X86-NEXT:    vpshldvq (%eax), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x49,0x71,0x18]
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
-; X86-NEXT:    vpshldvq %zmm2, %zmm1, %zmm4 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x71,0xe2]
-; X86-NEXT:    vpshldvq %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0xf5,0x48,0x71,0xc2]
-; X86-NEXT:    vpaddq %zmm4, %zmm0, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc4]
+; X86-NEXT:    vpshldvq %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0xf5,0x48,0x71,0xe2]
+; X86-NEXT:    vpshldvq %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x71,0xc2]
+; X86-NEXT:    vpaddq %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0xdd,0x48,0xd4,0xc0]
 ; X86-NEXT:    vpaddq %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0xd4,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -425,18 +420,19 @@
 ; X64-NEXT:    vpaddq %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0xd4,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <8 x i64>, <8 x i64>* %x2p
-  %res = call <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
-  %res1 = call <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x4, i8 -1)
-  %res2 = call <8 x i64> @llvm.x86.avx512.maskz.vpshldv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x4, i8  %x3)
-  %res3 = add <8 x i64> %res, %res1
-  %res4 = add <8 x i64> %res2, %res3
+  %1 = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x0
+  %4 = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x4)
+  %5 = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x4)
+  %6 = bitcast i8 %x3 to <8 x i1>
+  %7 = select <8 x i1> %6, <8 x i64> %5, <8 x i64> zeroinitializer
+  %res3 = add <8 x i64> %3, %4
+  %res4 = add <8 x i64> %7, %res3
   ret <8 x i64> %res4
 }
 
-declare <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
-declare <32 x i16> @llvm.x86.avx512.maskz.vpshldv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
-
-define <32 x i16>@test_int_x86_avx512_mask_vpshldv_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16>* %x2p, <32 x i16> %x4, i32 %x3) {
+define <32 x i16> @test_int_x86_avx512_mask_vpshldv_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16>* %x2p, <32 x i16> %x4, i32 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshldv_w_512:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -444,9 +440,9 @@
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
 ; X86-NEXT:    vpshldvw (%eax), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x49,0x70,0x18]
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
-; X86-NEXT:    vpshldvw %zmm2, %zmm1, %zmm4 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x70,0xe2]
-; X86-NEXT:    vpshldvw %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0xf5,0x48,0x70,0xc2]
-; X86-NEXT:    vpaddw %zmm4, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc4]
+; X86-NEXT:    vpshldvw %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0xf5,0x48,0x70,0xe2]
+; X86-NEXT:    vpshldvw %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x70,0xc2]
+; X86-NEXT:    vpaddw %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0x5d,0x48,0xfd,0xc0]
 ; X86-NEXT:    vpaddw %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfd,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -456,17 +452,27 @@
 ; X64-NEXT:    vmovdqa64 %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xd8]
 ; X64-NEXT:    vpshldvw (%rdi), %zmm1, %zmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x49,0x70,0x1f]
 ; X64-NEXT:    vmovdqa64 %zmm0, %zmm4 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xe0]
-; X64-NEXT:    vpshldvw %zmm2, %zmm1, %zmm4 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x70,0xe2]
-; X64-NEXT:    vpshldvw %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0xf5,0x48,0x70,0xc2]
-; X64-NEXT:    vpaddw %zmm4, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc4]
+; X64-NEXT:    vpshldvw %zmm2, %zmm1, %zmm4 # encoding: [0x62,0xf2,0xf5,0x48,0x70,0xe2]
+; X64-NEXT:    vpshldvw %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0x70,0xc2]
+; X64-NEXT:    vpaddw %zmm0, %zmm4, %zmm0 # encoding: [0x62,0xf1,0x5d,0x48,0xfd,0xc0]
 ; X64-NEXT:    vpaddw %zmm0, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x65,0x48,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <32 x i16>, <32 x i16>* %x2p
-  %res = call <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3)
-  %res1 = call <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x4, i32 -1)
-  %res2 = call <32 x i16> @llvm.x86.avx512.maskz.vpshldv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x4, i32  %x3)
-  %res3 = add <32 x i16> %res, %res1
-  %res4 = add <32 x i16> %res2, %res3
+  %1 = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2)
+  %2 = bitcast i32 %x3 to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %x0
+  %4 = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x4)
+  %5 = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x4)
+  %6 = bitcast i32 %x3 to <32 x i1>
+  %7 = select <32 x i1> %6, <32 x i16> %5, <32 x i16> zeroinitializer
+  %res3 = add <32 x i16> %3, %4
+  %res4 = add <32 x i16> %7, %res3
   ret <32 x i16> %res4
 }
 
+declare <16 x i32> @llvm.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>)
+declare <8 x i64> @llvm.fshl.v8i64(<8 x i64>, <8 x i64>, <8 x i64>)
+declare <32 x i16> @llvm.fshl.v32i16(<32 x i16>, <32 x i16>, <32 x i16>)
+declare <16 x i32> @llvm.fshr.v16i32(<16 x i32>, <16 x i32>, <16 x i32>)
+declare <8 x i64> @llvm.fshr.v8i64(<8 x i64>, <8 x i64>, <8 x i64>)
+declare <32 x i16> @llvm.fshr.v32i16(<32 x i16>, <32 x i16>, <32 x i16>)
diff --git a/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll
index 392ea0b..03594df 100644
--- a/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll
@@ -582,23 +582,23 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshldq $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X86-NEXT:    vpshldq $47, %ymm2, %ymm1, %ymm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_mask_shldi_epi64:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldq $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    vpshldq $47, %ymm2, %ymm1, %ymm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 127)
+  %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__A, <4 x i64> %__B, <4 x i64> <i64 47, i64 47, i64 47, i64 47>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> %__S
   ret <4 x i64> %2
 }
 
-declare <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64>, <4 x i64>, i32)
+declare <4 x i64> @llvm.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>)
 
 define <4 x i64> @test_mm256_maskz_shldi_epi64(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
 ; X86-LABEL: test_mm256_maskz_shldi_epi64:
@@ -614,7 +614,7 @@
 ; X64-NEXT:    vpshldq $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 63)
+  %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__A, <4 x i64> %__B, <4 x i64> <i64 63, i64 63, i64 63, i64 63>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> zeroinitializer
@@ -627,7 +627,7 @@
 ; CHECK-NEXT:    vpshldq $31, %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 31)
+  %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__A, <4 x i64> %__B, <4 x i64> <i64 31, i64 31, i64 31, i64 31>)
   ret <4 x i64> %0
 }
 
@@ -636,23 +636,23 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshldq $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT:    vpshldq $47, %xmm2, %xmm1, %xmm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_shldi_epi64:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldq $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    vpshldq $47, %xmm2, %xmm1, %xmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 127)
+  %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__A, <2 x i64> %__B, <2 x i64> <i64 47, i64 47>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
   %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> %__S
   ret <2 x i64> %2
 }
 
-declare <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64>, <2 x i64>, i32) #3
+declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
 
 define <2 x i64> @test_mm_maskz_shldi_epi64(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
 ; X86-LABEL: test_mm_maskz_shldi_epi64:
@@ -668,7 +668,7 @@
 ; X64-NEXT:    vpshldq $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 63)
+  %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__A, <2 x i64> %__B, <2 x i64> <i64 63, i64 63>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
   %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> zeroinitializer
@@ -681,7 +681,7 @@
 ; CHECK-NEXT:    vpshldq $31, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 31)
+  %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__A, <2 x i64> %__B, <2 x i64> <i64 31, i64 31>)
   ret <2 x i64> %0
 }
 
@@ -690,18 +690,18 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshldd $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X86-NEXT:    vpshldd $7, %ymm2, %ymm1, %ymm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_mask_shldi_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldd $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    vpshldd $7, %ymm2, %ymm1, %ymm0 {%k1}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
   %1 = bitcast <4 x i64> %__B to <8 x i32>
-  %2 = tail call <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32> %0, <8 x i32> %1, i32 127)
+  %2 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>)
   %3 = bitcast <4 x i64> %__S to <8 x i32>
   %4 = bitcast i8 %__U to <8 x i1>
   %5 = select <8 x i1> %4, <8 x i32> %2, <8 x i32> %3
@@ -709,25 +709,25 @@
   ret <4 x i64> %6
 }
 
-declare <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32>, <8 x i32>, i32)
+declare <8 x i32> @llvm.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>)
 
 define <4 x i64> @test_mm256_maskz_shldi_epi32(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
 ; X86-LABEL: test_mm256_maskz_shldi_epi32:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshldd $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X86-NEXT:    vpshldd $15, %ymm1, %ymm0, %ymm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_maskz_shldi_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldd $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT:    vpshldd $15, %ymm1, %ymm0, %ymm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
   %1 = bitcast <4 x i64> %__B to <8 x i32>
-  %2 = tail call <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32> %0, <8 x i32> %1, i32 63)
+  %2 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>)
   %3 = bitcast i8 %__U to <8 x i1>
   %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
   %5 = bitcast <8 x i32> %4 to <4 x i64>
@@ -742,7 +742,7 @@
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
   %1 = bitcast <4 x i64> %__B to <8 x i32>
-  %2 = tail call <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32> %0, <8 x i32> %1, i32 31)
+  %2 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>)
   %3 = bitcast <8 x i32> %2 to <4 x i64>
   ret <4 x i64> %3
 }
@@ -752,18 +752,18 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshldd $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT:    vpshldd $7, %xmm2, %xmm1, %xmm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_shldi_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldd $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    vpshldd $7, %xmm2, %xmm1, %xmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
   %1 = bitcast <2 x i64> %__B to <4 x i32>
-  %2 = tail call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %0, <4 x i32> %1, i32 127)
+  %2 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> <i32 7, i32 7, i32 7, i32 7>)
   %3 = bitcast <2 x i64> %__S to <4 x i32>
   %4 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -772,25 +772,25 @@
   ret <2 x i64> %6
 }
 
-declare <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32>, <4 x i32>, i32)
+declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
 
 define <2 x i64> @test_mm_maskz_shldi_epi32(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
 ; X86-LABEL: test_mm_maskz_shldi_epi32:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshldd $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vpshldd $15, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_maskz_shldi_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldd $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vpshldd $15, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
   %1 = bitcast <2 x i64> %__B to <4 x i32>
-  %2 = tail call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %0, <4 x i32> %1, i32 63)
+  %2 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> <i32 15, i32 15, i32 15, i32 15>)
   %3 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %4 = select <4 x i1> %extract, <4 x i32> %2, <4 x i32> zeroinitializer
@@ -806,7 +806,7 @@
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
   %1 = bitcast <2 x i64> %__B to <4 x i32>
-  %2 = tail call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %0, <4 x i32> %1, i32 31)
+  %2 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> <i32 31, i32 31, i32 31, i32 31>)
   %3 = bitcast <4 x i32> %2 to <2 x i64>
   ret <2 x i64> %3
 }
@@ -815,18 +815,18 @@
 ; X86-LABEL: test_mm256_mask_shldi_epi16:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    vpshldw $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X86-NEXT:    vpshldw $3, %ymm2, %ymm1, %ymm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_mask_shldi_epi16:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldw $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    vpshldw $3, %ymm2, %ymm1, %ymm0 {%k1}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <4 x i64> %__A to <16 x i16>
   %1 = bitcast <4 x i64> %__B to <16 x i16>
-  %2 = tail call <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16> %0, <16 x i16> %1, i32 127)
+  %2 = tail call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %0, <16 x i16> %1, <16 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
   %3 = bitcast <4 x i64> %__S to <16 x i16>
   %4 = bitcast i16 %__U to <16 x i1>
   %5 = select <16 x i1> %4, <16 x i16> %2, <16 x i16> %3
@@ -834,24 +834,24 @@
   ret <4 x i64> %6
 }
 
-declare <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16>, <16 x i16>, i32)
+declare <16 x i16> @llvm.fshl.v16i16(<16 x i16>, <16 x i16>, <16 x i16>)
 
 define <4 x i64> @test_mm256_maskz_shldi_epi16(i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
 ; X86-LABEL: test_mm256_maskz_shldi_epi16:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    vpshldw $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X86-NEXT:    vpshldw $7, %ymm1, %ymm0, %ymm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_maskz_shldi_epi16:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldw $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT:    vpshldw $7, %ymm1, %ymm0, %ymm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <4 x i64> %__A to <16 x i16>
   %1 = bitcast <4 x i64> %__B to <16 x i16>
-  %2 = tail call <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16> %0, <16 x i16> %1, i32 63)
+  %2 = tail call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %0, <16 x i16> %1, <16 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
   %3 = bitcast i16 %__U to <16 x i1>
   %4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> zeroinitializer
   %5 = bitcast <16 x i16> %4 to <4 x i64>
@@ -861,12 +861,12 @@
 define <4 x i64> @test_mm256_shldi_epi16(<4 x i64> %__A, <4 x i64> %__B) {
 ; CHECK-LABEL: test_mm256_shldi_epi16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vpshldw $31, %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vpshldw $15, %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <4 x i64> %__A to <16 x i16>
   %1 = bitcast <4 x i64> %__B to <16 x i16>
-  %2 = tail call <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16> %0, <16 x i16> %1, i32 31)
+  %2 = tail call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %0, <16 x i16> %1, <16 x i16> <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>)
   %3 = bitcast <16 x i16> %2 to <4 x i64>
   ret <4 x i64> %3
 }
@@ -876,18 +876,18 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshldw $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT:    vpshldw $3, %xmm2, %xmm1, %xmm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_shldi_epi16:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldw $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    vpshldw $3, %xmm2, %xmm1, %xmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <2 x i64> %__A to <8 x i16>
   %1 = bitcast <2 x i64> %__B to <8 x i16>
-  %2 = tail call <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16> %0, <8 x i16> %1, i32 127)
+  %2 = tail call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %0, <8 x i16> %1, <8 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
   %3 = bitcast <2 x i64> %__S to <8 x i16>
   %4 = bitcast i8 %__U to <8 x i1>
   %5 = select <8 x i1> %4, <8 x i16> %2, <8 x i16> %3
@@ -895,25 +895,25 @@
   ret <2 x i64> %6
 }
 
-declare <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16>, <8 x i16>, i32)
+declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
 
 define <2 x i64> @test_mm_maskz_shldi_epi16(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
 ; X86-LABEL: test_mm_maskz_shldi_epi16:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshldw $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vpshldw $7, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_maskz_shldi_epi16:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshldw $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vpshldw $7, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <2 x i64> %__A to <8 x i16>
   %1 = bitcast <2 x i64> %__B to <8 x i16>
-  %2 = tail call <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16> %0, <8 x i16> %1, i32 63)
+  %2 = tail call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %0, <8 x i16> %1, <8 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
   %3 = bitcast i8 %__U to <8 x i1>
   %4 = select <8 x i1> %3, <8 x i16> %2, <8 x i16> zeroinitializer
   %5 = bitcast <8 x i16> %4 to <2 x i64>
@@ -923,12 +923,12 @@
 define <2 x i64> @test_mm_shldi_epi16(<2 x i64> %__A, <2 x i64> %__B) {
 ; CHECK-LABEL: test_mm_shldi_epi16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vpshldw $31, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpshldw $15, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <2 x i64> %__A to <8 x i16>
   %1 = bitcast <2 x i64> %__B to <8 x i16>
-  %2 = tail call <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16> %0, <8 x i16> %1, i32 31)
+  %2 = tail call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %0, <8 x i16> %1, <8 x i16> <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>)
   %3 = bitcast <8 x i16> %2 to <2 x i64>
   ret <2 x i64> %3
 }
@@ -938,23 +938,23 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshrdq $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X86-NEXT:    vpshrdq $47, %ymm2, %ymm1, %ymm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_mask_shrdi_epi64:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdq $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    vpshrdq $47, %ymm2, %ymm1, %ymm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 127)
+  %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__B, <4 x i64> %__A, <4 x i64> <i64 47, i64 47, i64 47, i64 47>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> %__S
   ret <4 x i64> %2
 }
 
-declare <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64>, <4 x i64>, i32)
+declare <4 x i64> @llvm.fshr.v4i64(<4 x i64>, <4 x i64>, <4 x i64>)
 
 define <4 x i64> @test_mm256_maskz_shrdi_epi64(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
 ; X86-LABEL: test_mm256_maskz_shrdi_epi64:
@@ -970,7 +970,7 @@
 ; X64-NEXT:    vpshrdq $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 63)
+  %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__B, <4 x i64> %__A, <4 x i64> <i64 63, i64 63, i64 63, i64 63>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> zeroinitializer
@@ -983,7 +983,7 @@
 ; CHECK-NEXT:    vpshrdq $31, %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 31)
+  %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__B, <4 x i64> %__A, <4 x i64> <i64 31, i64 31, i64 31, i64 31>)
   ret <4 x i64> %0
 }
 
@@ -992,23 +992,23 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshrdq $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT:    vpshrdq $47, %xmm2, %xmm1, %xmm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_shrdi_epi64:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdq $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    vpshrdq $47, %xmm2, %xmm1, %xmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 127)
+  %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__B, <2 x i64> %__A, <2 x i64> <i64 47, i64 47>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
   %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> %__S
   ret <2 x i64> %2
 }
 
-declare <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64>, <2 x i64>, i32)
+declare <2 x i64> @llvm.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
 
 define <2 x i64> @test_mm_maskz_shrdi_epi64(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
 ; X86-LABEL: test_mm_maskz_shrdi_epi64:
@@ -1024,7 +1024,7 @@
 ; X64-NEXT:    vpshrdq $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 63)
+  %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__B, <2 x i64> %__A, <2 x i64> <i64 63, i64 63>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
   %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> zeroinitializer
@@ -1037,7 +1037,7 @@
 ; CHECK-NEXT:    vpshrdq $31, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 31)
+  %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__B, <2 x i64> %__A, <2 x i64> <i64 31, i64 31>)
   ret <2 x i64> %0
 }
 
@@ -1046,18 +1046,18 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshrdd $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X86-NEXT:    vpshrdd $7, %ymm2, %ymm1, %ymm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_mask_shrdi_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdd $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    vpshrdd $7, %ymm2, %ymm1, %ymm0 {%k1}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
   %1 = bitcast <4 x i64> %__B to <8 x i32>
-  %2 = tail call <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32> %0, <8 x i32> %1, i32 127)
+  %2 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %1, <8 x i32> %0, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>)
   %3 = bitcast <4 x i64> %__S to <8 x i32>
   %4 = bitcast i8 %__U to <8 x i1>
   %5 = select <8 x i1> %4, <8 x i32> %2, <8 x i32> %3
@@ -1065,25 +1065,25 @@
   ret <4 x i64> %6
 }
 
-declare <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32>, <8 x i32>, i32)
+declare <8 x i32> @llvm.fshr.v8i32(<8 x i32>, <8 x i32>, <8 x i32>)
 
 define <4 x i64> @test_mm256_maskz_shrdi_epi32(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
 ; X86-LABEL: test_mm256_maskz_shrdi_epi32:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshrdd $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X86-NEXT:    vpshrdd $15, %ymm1, %ymm0, %ymm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_maskz_shrdi_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdd $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT:    vpshrdd $15, %ymm1, %ymm0, %ymm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
   %1 = bitcast <4 x i64> %__B to <8 x i32>
-  %2 = tail call <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32> %0, <8 x i32> %1, i32 63)
+  %2 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %1, <8 x i32> %0, <8 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>)
   %3 = bitcast i8 %__U to <8 x i1>
   %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
   %5 = bitcast <8 x i32> %4 to <4 x i64>
@@ -1098,7 +1098,7 @@
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
   %1 = bitcast <4 x i64> %__B to <8 x i32>
-  %2 = tail call <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32> %0, <8 x i32> %1, i32 31)
+  %2 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %1, <8 x i32> %0, <8 x i32> <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>)
   %3 = bitcast <8 x i32> %2 to <4 x i64>
   ret <4 x i64> %3
 }
@@ -1108,18 +1108,18 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshrdd $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT:    vpshrdd $7, %xmm2, %xmm1, %xmm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_shrdi_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdd $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    vpshrdd $7, %xmm2, %xmm1, %xmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
   %1 = bitcast <2 x i64> %__B to <4 x i32>
-  %2 = tail call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %0, <4 x i32> %1, i32 127)
+  %2 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %1, <4 x i32> %0, <4 x i32> <i32 7, i32 7, i32 7, i32 7>)
   %3 = bitcast <2 x i64> %__S to <4 x i32>
   %4 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1128,25 +1128,25 @@
   ret <2 x i64> %6
 }
 
-declare <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32>, <4 x i32>, i32)
+declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
 
 define <2 x i64> @test_mm_maskz_shrdi_epi32(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
 ; X86-LABEL: test_mm_maskz_shrdi_epi32:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshrdd $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vpshrdd $15, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_maskz_shrdi_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdd $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vpshrdd $15, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
   %1 = bitcast <2 x i64> %__B to <4 x i32>
-  %2 = tail call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %0, <4 x i32> %1, i32 63)
+  %2 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %1, <4 x i32> %0, <4 x i32> <i32 15, i32 15, i32 15, i32 15>)
   %3 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %4 = select <4 x i1> %extract, <4 x i32> %2, <4 x i32> zeroinitializer
@@ -1162,7 +1162,7 @@
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
   %1 = bitcast <2 x i64> %__B to <4 x i32>
-  %2 = tail call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %0, <4 x i32> %1, i32 31)
+  %2 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %1, <4 x i32> %0, <4 x i32> <i32 31, i32 31, i32 31, i32 31>)
   %3 = bitcast <4 x i32> %2 to <2 x i64>
   ret <2 x i64> %3
 }
@@ -1171,18 +1171,18 @@
 ; X86-LABEL: test_mm256_mask_shrdi_epi16:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    vpshrdw $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X86-NEXT:    vpshrdw $3, %ymm2, %ymm1, %ymm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_mask_shrdi_epi16:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdw $127, %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    vpshrdw $3, %ymm2, %ymm1, %ymm0 {%k1}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <4 x i64> %__A to <16 x i16>
   %1 = bitcast <4 x i64> %__B to <16 x i16>
-  %2 = tail call <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16> %0, <16 x i16> %1, i32 127)
+  %2 = tail call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %1, <16 x i16> %0, <16 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
   %3 = bitcast <4 x i64> %__S to <16 x i16>
   %4 = bitcast i16 %__U to <16 x i1>
   %5 = select <16 x i1> %4, <16 x i16> %2, <16 x i16> %3
@@ -1190,24 +1190,24 @@
   ret <4 x i64> %6
 }
 
-declare <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16>, <16 x i16>, i32)
+declare <16 x i16> @llvm.fshr.v16i16(<16 x i16>, <16 x i16>, <16 x i16>)
 
 define <4 x i64> @test_mm256_maskz_shrdi_epi16(i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
 ; X86-LABEL: test_mm256_maskz_shrdi_epi16:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    vpshrdw $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X86-NEXT:    vpshrdw $7, %ymm1, %ymm0, %ymm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_maskz_shrdi_epi16:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdw $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT:    vpshrdw $7, %ymm1, %ymm0, %ymm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <4 x i64> %__A to <16 x i16>
   %1 = bitcast <4 x i64> %__B to <16 x i16>
-  %2 = tail call <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16> %0, <16 x i16> %1, i32 63)
+  %2 = tail call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %1, <16 x i16> %0, <16 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
   %3 = bitcast i16 %__U to <16 x i1>
   %4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> zeroinitializer
   %5 = bitcast <16 x i16> %4 to <4 x i64>
@@ -1217,12 +1217,12 @@
 define <4 x i64> @test_mm256_shrdi_epi16(<4 x i64> %__A, <4 x i64> %__B) {
 ; CHECK-LABEL: test_mm256_shrdi_epi16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vpshrdw $31, %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vpshrdw $15, %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <4 x i64> %__A to <16 x i16>
   %1 = bitcast <4 x i64> %__B to <16 x i16>
-  %2 = tail call <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16> %0, <16 x i16> %1, i32 31)
+  %2 = tail call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %1, <16 x i16> %0, <16 x i16> <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>)
   %3 = bitcast <16 x i16> %2 to <4 x i64>
   ret <4 x i64> %3
 }
@@ -1232,18 +1232,18 @@
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshrdw $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT:    vpshrdw $3, %xmm2, %xmm1, %xmm0 {%k1}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_shrdi_epi16:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdw $127, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    vpshrdw $3, %xmm2, %xmm1, %xmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <2 x i64> %__A to <8 x i16>
   %1 = bitcast <2 x i64> %__B to <8 x i16>
-  %2 = tail call <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16> %0, <8 x i16> %1, i32 127)
+  %2 = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %1, <8 x i16> %0, <8 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
   %3 = bitcast <2 x i64> %__S to <8 x i16>
   %4 = bitcast i8 %__U to <8 x i1>
   %5 = select <8 x i1> %4, <8 x i16> %2, <8 x i16> %3
@@ -1251,25 +1251,25 @@
   ret <2 x i64> %6
 }
 
-declare <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16>, <8 x i16>, i32)
+declare <8 x i16> @llvm.fshr.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
 
 define <2 x i64> @test_mm_maskz_shrdi_epi16(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
 ; X86-LABEL: test_mm_maskz_shrdi_epi16:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    kmovd %eax, %k1
-; X86-NEXT:    vpshrdw $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vpshrdw $7, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_maskz_shrdi_epi16:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpshrdw $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vpshrdw $7, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <2 x i64> %__A to <8 x i16>
   %1 = bitcast <2 x i64> %__B to <8 x i16>
-  %2 = tail call <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16> %0, <8 x i16> %1, i32 63)
+  %2 = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %1, <8 x i16> %0, <8 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
   %3 = bitcast i8 %__U to <8 x i1>
   %4 = select <8 x i1> %3, <8 x i16> %2, <8 x i16> zeroinitializer
   %5 = bitcast <8 x i16> %4 to <2 x i64>
@@ -1279,12 +1279,12 @@
 define <2 x i64> @test_mm_shrdi_epi16(<2 x i64> %__A, <2 x i64> %__B) {
 ; CHECK-LABEL: test_mm_shrdi_epi16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vpshrdw $31, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpshrdw $15, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <2 x i64> %__A to <8 x i16>
   %1 = bitcast <2 x i64> %__B to <8 x i16>
-  %2 = tail call <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16> %0, <8 x i16> %1, i32 31)
+  %2 = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %1, <8 x i16> %0, <8 x i16> <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>)
   %3 = bitcast <8 x i16> %2 to <2 x i64>
   ret <2 x i64> %3
 }
@@ -1303,8 +1303,11 @@
 ; X64-NEXT:    vpshldvq %ymm2, %ymm1, %ymm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U)
-  ret <4 x i64> %0
+  %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> %__S
+  ret <4 x i64> %2
 }
 
 define <4 x i64> @test_mm256_maskz_shldv_epi64(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1321,8 +1324,11 @@
 ; X64-NEXT:    vpshldvq %ymm2, %ymm1, %ymm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U)
-  ret <4 x i64> %0
+  %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> zeroinitializer
+  ret <4 x i64> %2
 }
 
 define <4 x i64> @test_mm256_shldv_epi64(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1331,7 +1337,7 @@
 ; CHECK-NEXT:    vpshldvq %ymm2, %ymm1, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 -1)
+  %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B)
   ret <4 x i64> %0
 }
 
@@ -1349,8 +1355,11 @@
 ; X64-NEXT:    vpshldvq %xmm2, %xmm1, %xmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U)
-  ret <2 x i64> %0
+  %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> %__S
+  ret <2 x i64> %2
 }
 
 define <2 x i64> @test_mm_maskz_shldv_epi64(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1367,8 +1376,11 @@
 ; X64-NEXT:    vpshldvq %xmm2, %xmm1, %xmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U)
-  ret <2 x i64> %0
+  %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> zeroinitializer
+  ret <2 x i64> %2
 }
 
 define <2 x i64> @test_mm_shldv_epi64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1377,7 +1389,7 @@
 ; CHECK-NEXT:    vpshldvq %xmm2, %xmm1, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 -1)
+  %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B)
   ret <2 x i64> %0
 }
 
@@ -1398,9 +1410,11 @@
   %0 = bitcast <4 x i64> %__S to <8 x i32>
   %1 = bitcast <4 x i64> %__A to <8 x i32>
   %2 = bitcast <4 x i64> %__B to <8 x i32>
-  %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U)
-  %4 = bitcast <8 x i32> %3 to <4 x i64>
-  ret <4 x i64> %4
+  %3 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %5 = select <8 x i1> %4, <8 x i32> %3, <8 x i32> %0
+  %6 = bitcast <8 x i32> %5 to <4 x i64>
+  ret <4 x i64> %6
 }
 
 define <4 x i64> @test_mm256_maskz_shldv_epi32(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1420,9 +1434,11 @@
   %0 = bitcast <4 x i64> %__S to <8 x i32>
   %1 = bitcast <4 x i64> %__A to <8 x i32>
   %2 = bitcast <4 x i64> %__B to <8 x i32>
-  %3 = tail call <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U)
-  %4 = bitcast <8 x i32> %3 to <4 x i64>
-  ret <4 x i64> %4
+  %3 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %5 = select <8 x i1> %4, <8 x i32> %3, <8 x i32> zeroinitializer
+  %6 = bitcast <8 x i32> %5 to <4 x i64>
+  ret <4 x i64> %6
 }
 
 define <4 x i64> @test_mm256_shldv_epi32(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1434,7 +1450,7 @@
   %0 = bitcast <4 x i64> %__S to <8 x i32>
   %1 = bitcast <4 x i64> %__A to <8 x i32>
   %2 = bitcast <4 x i64> %__B to <8 x i32>
-  %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 -1)
+  %3 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2)
   %4 = bitcast <8 x i32> %3 to <4 x i64>
   ret <4 x i64> %4
 }
@@ -1456,9 +1472,12 @@
   %0 = bitcast <2 x i64> %__S to <4 x i32>
   %1 = bitcast <2 x i64> %__A to <4 x i32>
   %2 = bitcast <2 x i64> %__B to <4 x i32>
-  %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U)
-  %4 = bitcast <4 x i32> %3 to <2 x i64>
-  ret <2 x i64> %4
+  %3 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = select <4 x i1> %extract.i, <4 x i32> %3, <4 x i32> %0
+  %6 = bitcast <4 x i32> %5 to <2 x i64>
+  ret <2 x i64> %6
 }
 
 define <2 x i64> @test_mm_maskz_shldv_epi32(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1478,9 +1497,12 @@
   %0 = bitcast <2 x i64> %__S to <4 x i32>
   %1 = bitcast <2 x i64> %__A to <4 x i32>
   %2 = bitcast <2 x i64> %__B to <4 x i32>
-  %3 = tail call <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U)
-  %4 = bitcast <4 x i32> %3 to <2 x i64>
-  ret <2 x i64> %4
+  %3 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = select <4 x i1> %extract.i, <4 x i32> %3, <4 x i32> zeroinitializer
+  %6 = bitcast <4 x i32> %5 to <2 x i64>
+  ret <2 x i64> %6
 }
 
 define <2 x i64> @test_mm_shldv_epi32(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1492,7 +1514,7 @@
   %0 = bitcast <2 x i64> %__S to <4 x i32>
   %1 = bitcast <2 x i64> %__A to <4 x i32>
   %2 = bitcast <2 x i64> %__B to <4 x i32>
-  %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 -1)
+  %3 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
   %4 = bitcast <4 x i32> %3 to <2 x i64>
   ret <2 x i64> %4
 }
@@ -1513,9 +1535,11 @@
   %0 = bitcast <4 x i64> %__S to <16 x i16>
   %1 = bitcast <4 x i64> %__A to <16 x i16>
   %2 = bitcast <4 x i64> %__B to <16 x i16>
-  %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U)
-  %4 = bitcast <16 x i16> %3 to <4 x i64>
-  ret <4 x i64> %4
+  %3 = tail call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> %0
+  %6 = bitcast <16 x i16> %5 to <4 x i64>
+  ret <4 x i64> %6
 }
 
 define <4 x i64> @test_mm256_maskz_shldv_epi16(i16 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1534,9 +1558,11 @@
   %0 = bitcast <4 x i64> %__S to <16 x i16>
   %1 = bitcast <4 x i64> %__A to <16 x i16>
   %2 = bitcast <4 x i64> %__B to <16 x i16>
-  %3 = tail call <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U)
-  %4 = bitcast <16 x i16> %3 to <4 x i64>
-  ret <4 x i64> %4
+  %3 = tail call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> zeroinitializer
+  %6 = bitcast <16 x i16> %5 to <4 x i64>
+  ret <4 x i64> %6
 }
 
 define <4 x i64> @test_mm256_shldv_epi16(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1548,7 +1574,7 @@
   %0 = bitcast <4 x i64> %__S to <16 x i16>
   %1 = bitcast <4 x i64> %__A to <16 x i16>
   %2 = bitcast <4 x i64> %__B to <16 x i16>
-  %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 -1)
+  %3 = tail call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2)
   %4 = bitcast <16 x i16> %3 to <4 x i64>
   ret <4 x i64> %4
 }
@@ -1570,9 +1596,11 @@
   %0 = bitcast <2 x i64> %__S to <8 x i16>
   %1 = bitcast <2 x i64> %__A to <8 x i16>
   %2 = bitcast <2 x i64> %__B to <8 x i16>
-  %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U)
-  %4 = bitcast <8 x i16> %3 to <2 x i64>
-  ret <2 x i64> %4
+  %3 = tail call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> %0
+  %6 = bitcast <8 x i16> %5 to <2 x i64>
+  ret <2 x i64> %6
 }
 
 define <2 x i64> @test_mm_maskz_shldv_epi16(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1592,9 +1620,11 @@
   %0 = bitcast <2 x i64> %__S to <8 x i16>
   %1 = bitcast <2 x i64> %__A to <8 x i16>
   %2 = bitcast <2 x i64> %__B to <8 x i16>
-  %3 = tail call <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U)
-  %4 = bitcast <8 x i16> %3 to <2 x i64>
-  ret <2 x i64> %4
+  %3 = tail call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> zeroinitializer
+  %6 = bitcast <8 x i16> %5 to <2 x i64>
+  ret <2 x i64> %6
 }
 
 define <2 x i64> @test_mm_shldv_epi16(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1606,7 +1636,7 @@
   %0 = bitcast <2 x i64> %__S to <8 x i16>
   %1 = bitcast <2 x i64> %__A to <8 x i16>
   %2 = bitcast <2 x i64> %__B to <8 x i16>
-  %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 -1)
+  %3 = tail call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
   %4 = bitcast <8 x i16> %3 to <2 x i64>
   ret <2 x i64> %4
 }
@@ -1625,8 +1655,11 @@
 ; X64-NEXT:    vpshrdvq %ymm2, %ymm1, %ymm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U)
-  ret <4 x i64> %0
+  %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__A, <4 x i64> %__S, <4 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> %__S
+  ret <4 x i64> %2
 }
 
 define <4 x i64> @test_mm256_maskz_shrdv_epi64(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1643,8 +1676,11 @@
 ; X64-NEXT:    vpshrdvq %ymm2, %ymm1, %ymm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U)
-  ret <4 x i64> %0
+  %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__A, <4 x i64> %__S, <4 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> zeroinitializer
+  ret <4 x i64> %2
 }
 
 define <4 x i64> @test_mm256_shrdv_epi64(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1653,7 +1689,7 @@
 ; CHECK-NEXT:    vpshrdvq %ymm2, %ymm1, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 -1)
+  %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__A, <4 x i64> %__S, <4 x i64> %__B)
   ret <4 x i64> %0
 }
 
@@ -1671,8 +1707,11 @@
 ; X64-NEXT:    vpshrdvq %xmm2, %xmm1, %xmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U)
-  ret <2 x i64> %0
+  %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__A, <2 x i64> %__S, <2 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> %__S
+  ret <2 x i64> %2
 }
 
 define <2 x i64> @test_mm_maskz_shrdv_epi64(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1689,8 +1728,11 @@
 ; X64-NEXT:    vpshrdvq %xmm2, %xmm1, %xmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U)
-  ret <2 x i64> %0
+  %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__A, <2 x i64> %__S, <2 x i64> %__B)
+  %1 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> zeroinitializer
+  ret <2 x i64> %2
 }
 
 define <2 x i64> @test_mm_shrdv_epi64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1699,7 +1741,7 @@
 ; CHECK-NEXT:    vpshrdvq %xmm2, %xmm1, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 -1)
+  %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__A, <2 x i64> %__S, <2 x i64> %__B)
   ret <2 x i64> %0
 }
 
@@ -1720,9 +1762,11 @@
   %0 = bitcast <4 x i64> %__S to <8 x i32>
   %1 = bitcast <4 x i64> %__A to <8 x i32>
   %2 = bitcast <4 x i64> %__B to <8 x i32>
-  %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U)
-  %4 = bitcast <8 x i32> %3 to <4 x i64>
-  ret <4 x i64> %4
+  %3 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %1, <8 x i32> %0, <8 x i32> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %5 = select <8 x i1> %4, <8 x i32> %3, <8 x i32> %0
+  %6 = bitcast <8 x i32> %5 to <4 x i64>
+  ret <4 x i64> %6
 }
 
 define <4 x i64> @test_mm256_maskz_shrdv_epi32(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1742,9 +1786,11 @@
   %0 = bitcast <4 x i64> %__S to <8 x i32>
   %1 = bitcast <4 x i64> %__A to <8 x i32>
   %2 = bitcast <4 x i64> %__B to <8 x i32>
-  %3 = tail call <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U)
-  %4 = bitcast <8 x i32> %3 to <4 x i64>
-  ret <4 x i64> %4
+  %3 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %1, <8 x i32> %0, <8 x i32> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %5 = select <8 x i1> %4, <8 x i32> %3, <8 x i32> zeroinitializer
+  %6 = bitcast <8 x i32> %5 to <4 x i64>
+  ret <4 x i64> %6
 }
 
 define <4 x i64> @test_mm256_shrdv_epi32(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1756,7 +1802,7 @@
   %0 = bitcast <4 x i64> %__S to <8 x i32>
   %1 = bitcast <4 x i64> %__A to <8 x i32>
   %2 = bitcast <4 x i64> %__B to <8 x i32>
-  %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 -1)
+  %3 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %1, <8 x i32> %0, <8 x i32> %2)
   %4 = bitcast <8 x i32> %3 to <4 x i64>
   ret <4 x i64> %4
 }
@@ -1778,9 +1824,12 @@
   %0 = bitcast <2 x i64> %__S to <4 x i32>
   %1 = bitcast <2 x i64> %__A to <4 x i32>
   %2 = bitcast <2 x i64> %__B to <4 x i32>
-  %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U)
-  %4 = bitcast <4 x i32> %3 to <2 x i64>
-  ret <2 x i64> %4
+  %3 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %1, <4 x i32> %0, <4 x i32> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = select <4 x i1> %extract.i, <4 x i32> %3, <4 x i32> %0
+  %6 = bitcast <4 x i32> %5 to <2 x i64>
+  ret <2 x i64> %6
 }
 
 define <2 x i64> @test_mm_maskz_shrdv_epi32(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1800,9 +1849,12 @@
   %0 = bitcast <2 x i64> %__S to <4 x i32>
   %1 = bitcast <2 x i64> %__A to <4 x i32>
   %2 = bitcast <2 x i64> %__B to <4 x i32>
-  %3 = tail call <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U)
-  %4 = bitcast <4 x i32> %3 to <2 x i64>
-  ret <2 x i64> %4
+  %3 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %1, <4 x i32> %0, <4 x i32> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = select <4 x i1> %extract.i, <4 x i32> %3, <4 x i32> zeroinitializer
+  %6 = bitcast <4 x i32> %5 to <2 x i64>
+  ret <2 x i64> %6
 }
 
 define <2 x i64> @test_mm_shrdv_epi32(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1814,7 +1866,7 @@
   %0 = bitcast <2 x i64> %__S to <4 x i32>
   %1 = bitcast <2 x i64> %__A to <4 x i32>
   %2 = bitcast <2 x i64> %__B to <4 x i32>
-  %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 -1)
+  %3 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %1, <4 x i32> %0, <4 x i32> %2)
   %4 = bitcast <4 x i32> %3 to <2 x i64>
   ret <2 x i64> %4
 }
@@ -1835,9 +1887,11 @@
   %0 = bitcast <4 x i64> %__S to <16 x i16>
   %1 = bitcast <4 x i64> %__A to <16 x i16>
   %2 = bitcast <4 x i64> %__B to <16 x i16>
-  %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U)
-  %4 = bitcast <16 x i16> %3 to <4 x i64>
-  ret <4 x i64> %4
+  %3 = tail call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %1, <16 x i16> %0, <16 x i16> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> %0
+  %6 = bitcast <16 x i16> %5 to <4 x i64>
+  ret <4 x i64> %6
 }
 
 define <4 x i64> @test_mm256_maskz_shrdv_epi16(i16 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1856,9 +1910,11 @@
   %0 = bitcast <4 x i64> %__S to <16 x i16>
   %1 = bitcast <4 x i64> %__A to <16 x i16>
   %2 = bitcast <4 x i64> %__B to <16 x i16>
-  %3 = tail call <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U)
-  %4 = bitcast <16 x i16> %3 to <4 x i64>
-  ret <4 x i64> %4
+  %3 = tail call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %1, <16 x i16> %0, <16 x i16> %2)
+  %4 = bitcast i16 %__U to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> zeroinitializer
+  %6 = bitcast <16 x i16> %5 to <4 x i64>
+  ret <4 x i64> %6
 }
 
 define <4 x i64> @test_mm256_shrdv_epi16(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1870,7 +1926,7 @@
   %0 = bitcast <4 x i64> %__S to <16 x i16>
   %1 = bitcast <4 x i64> %__A to <16 x i16>
   %2 = bitcast <4 x i64> %__B to <16 x i16>
-  %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 -1)
+  %3 = tail call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %1, <16 x i16> %0, <16 x i16> %2)
   %4 = bitcast <16 x i16> %3 to <4 x i64>
   ret <4 x i64> %4
 }
@@ -1892,9 +1948,11 @@
   %0 = bitcast <2 x i64> %__S to <8 x i16>
   %1 = bitcast <2 x i64> %__A to <8 x i16>
   %2 = bitcast <2 x i64> %__B to <8 x i16>
-  %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U)
-  %4 = bitcast <8 x i16> %3 to <2 x i64>
-  ret <2 x i64> %4
+  %3 = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %1, <8 x i16> %0, <8 x i16> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> %0
+  %6 = bitcast <8 x i16> %5 to <2 x i64>
+  ret <2 x i64> %6
 }
 
 define <2 x i64> @test_mm_maskz_shrdv_epi16(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1914,9 +1972,11 @@
   %0 = bitcast <2 x i64> %__S to <8 x i16>
   %1 = bitcast <2 x i64> %__A to <8 x i16>
   %2 = bitcast <2 x i64> %__B to <8 x i16>
-  %3 = tail call <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U)
-  %4 = bitcast <8 x i16> %3 to <2 x i64>
-  ret <2 x i64> %4
+  %3 = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %1, <8 x i16> %0, <8 x i16> %2)
+  %4 = bitcast i8 %__U to <8 x i1>
+  %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> zeroinitializer
+  %6 = bitcast <8 x i16> %5 to <2 x i64>
+  ret <2 x i64> %6
 }
 
 define <2 x i64> @test_mm_shrdv_epi16(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1928,7 +1988,7 @@
   %0 = bitcast <2 x i64> %__S to <8 x i16>
   %1 = bitcast <2 x i64> %__A to <8 x i16>
   %2 = bitcast <2 x i64> %__B to <8 x i16>
-  %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 -1)
+  %3 = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %1, <8 x i16> %0, <8 x i16> %2)
   %4 = bitcast <8 x i16> %3 to <2 x i64>
   ret <2 x i64> %4
 }
@@ -1949,27 +2009,3 @@
 declare <32 x i8> @llvm.x86.avx512.mask.expand.b.256(<32 x i8>, <32 x i8>, i32)
 declare <16 x i16> @llvm.masked.expandload.v16i16(i16*, <16 x i1>, <16 x i16>)
 declare <32 x i8> @llvm.masked.expandload.v32i8(i8*, <32 x i1>, <32 x i8>)
-declare <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
-declare <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
-declare <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
-declare <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
-declare <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
-declare <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
-declare <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
-declare <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
-declare <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-declare <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-declare <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
-declare <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
-declare <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
-declare <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
-declare <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
-declare <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
-declare <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
-declare <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
-declare <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
-declare <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
-declare <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-declare <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-declare <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
-declare <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
diff --git a/test/CodeGen/X86/avx512vbmi2vl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512vbmi2vl-intrinsics-upgrade.ll
index 072f962..760212f 100644
--- a/test/CodeGen/X86/avx512vbmi2vl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512vbmi2vl-intrinsics-upgrade.ll
@@ -474,22 +474,22 @@
 define <8 x i16>@test_int_x86_avx512_mask_vpshld_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x3, i8 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshld_w_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    vpshldw $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x70,0xd9,0x16]
+; X86-NEXT:    vpshldw $6, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x70,0xd9,0x06]
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
-; X86-NEXT:    vpshldw $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x70,0xd1,0x16]
+; X86-NEXT:    vpshldw $6, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x70,0xd1,0x06]
 ; X86-NEXT:    vpaddw %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_vpshld_w_128:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshldw $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x70,0xd9,0x16]
+; X64-NEXT:    vpshldw $6, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x70,0xd9,0x06]
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpshldw $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x70,0xd1,0x16]
+; X64-NEXT:    vpshldw $6, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x70,0xd1,0x06]
 ; X64-NEXT:    vpaddw %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <8 x i16> @llvm.x86.avx512.mask.vpshld.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 22, <8 x i16> %x3, i8 %x4)
-  %res1 = call <8 x i16> @llvm.x86.avx512.mask.vpshld.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 22, <8 x i16> %x3, i8 -1)
+  %res = call <8 x i16> @llvm.x86.avx512.mask.vpshld.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 6, <8 x i16> %x3, i8 %x4)
+  %res1 = call <8 x i16> @llvm.x86.avx512.mask.vpshld.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 6, <8 x i16> %x3, i8 -1)
   %res2 = add <8 x i16> %res, %res1
   ret <8 x i16> %res2
 }
@@ -498,21 +498,21 @@
 define <16 x i16>@test_int_x86_avx512_mask_vpshld_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x3, i16 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshld_w_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    vpshldw $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x70,0xd9,0x16]
+; X86-NEXT:    vpshldw $6, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x70,0xd9,0x06]
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpshldw $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x70,0xd1,0x16]
+; X86-NEXT:    vpshldw $6, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x70,0xd1,0x06]
 ; X86-NEXT:    vpaddw %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc3]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_vpshld_w_256:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshldw $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x70,0xd9,0x16]
+; X64-NEXT:    vpshldw $6, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x70,0xd9,0x06]
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpshldw $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x70,0xd1,0x16]
+; X64-NEXT:    vpshldw $6, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x70,0xd1,0x06]
 ; X64-NEXT:    vpaddw %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <16 x i16> @llvm.x86.avx512.mask.vpshld.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 22, <16 x i16> %x3, i16 %x4)
-  %res1 = call <16 x i16> @llvm.x86.avx512.mask.vpshld.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 22, <16 x i16> %x3, i16 -1)
+  %res = call <16 x i16> @llvm.x86.avx512.mask.vpshld.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 6, <16 x i16> %x3, i16 %x4)
+  %res1 = call <16 x i16> @llvm.x86.avx512.mask.vpshld.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 6, <16 x i16> %x3, i16 -1)
   %res2 = add <16 x i16> %res, %res1
   ret <16 x i16> %res2
 }
@@ -623,22 +623,22 @@
 define <8 x i16>@test_int_x86_avx512_mask_vpshrd_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x3, i8 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrd_w_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    vpshrdw $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x72,0xd9,0x16]
+; X86-NEXT:    vpshrdw $6, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x72,0xd9,0x06]
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
-; X86-NEXT:    vpshrdw $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x72,0xd1,0x16]
+; X86-NEXT:    vpshrdw $6, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x72,0xd1,0x06]
 ; X86-NEXT:    vpaddw %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_vpshrd_w_128:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshrdw $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x72,0xd9,0x16]
+; X64-NEXT:    vpshrdw $6, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x72,0xd9,0x06]
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpshrdw $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x72,0xd1,0x16]
+; X64-NEXT:    vpshrdw $6, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x72,0xd1,0x06]
 ; X64-NEXT:    vpaddw %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <8 x i16> @llvm.x86.avx512.mask.vpshrd.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 22, <8 x i16> %x3, i8 %x4)
-  %res1 = call <8 x i16> @llvm.x86.avx512.mask.vpshrd.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 22, <8 x i16> %x3, i8 -1)
+  %res = call <8 x i16> @llvm.x86.avx512.mask.vpshrd.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 6, <8 x i16> %x3, i8 %x4)
+  %res1 = call <8 x i16> @llvm.x86.avx512.mask.vpshrd.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 6, <8 x i16> %x3, i8 -1)
   %res2 = add <8 x i16> %res, %res1
   ret <8 x i16> %res2
 }
@@ -647,22 +647,810 @@
 define <16 x i16>@test_int_x86_avx512_mask_vpshrd_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x3, i16 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrd_w_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    vpshrdw $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x72,0xd9,0x16]
+; X86-NEXT:    vpshrdw $6, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x72,0xd9,0x06]
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpshrdw $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x72,0xd1,0x16]
+; X86-NEXT:    vpshrdw $6, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x72,0xd1,0x06]
 ; X86-NEXT:    vpaddw %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc3]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_vpshrd_w_256:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshrdw $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x72,0xd9,0x16]
+; X64-NEXT:    vpshrdw $6, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x72,0xd9,0x06]
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpshrdw $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x72,0xd1,0x16]
+; X64-NEXT:    vpshrdw $6, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x72,0xd1,0x06]
 ; X64-NEXT:    vpaddw %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <16 x i16> @llvm.x86.avx512.mask.vpshrd.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 22, <16 x i16> %x3, i16 %x4)
-  %res1 = call <16 x i16> @llvm.x86.avx512.mask.vpshrd.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 22, <16 x i16> %x3, i16 -1)
+  %res = call <16 x i16> @llvm.x86.avx512.mask.vpshrd.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 6, <16 x i16> %x3, i16 %x4)
+  %res1 = call <16 x i16> @llvm.x86.avx512.mask.vpshrd.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 6, <16 x i16> %x3, i16 -1)
   %res2 = add <16 x i16> %res, %res1
   ret <16 x i16> %res2
 }
 declare <16 x i16> @llvm.x86.avx512.mask.vpshrd.w.256(<16 x i16>, <16 x i16>, i32, <16 x i16>, i16)
+
+define <4 x i32>@test_int_x86_avx512_mask_vpshld_d_128_2(<4 x i32> %x0, <4 x i32> %x1,<4 x i32> %x3, i8 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshld_d_128_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshldd $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0x7d,0x08,0x71,0xd9,0x16]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpshldd $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x09,0x71,0xd1,0x16]
+; X86-NEXT:    vpshldd $22, %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0x89,0x71,0xc1,0x16]
+; X86-NEXT:    vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
+; X86-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshld_d_128_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshldd $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0x7d,0x08,0x71,0xd9,0x16]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshldd $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x09,0x71,0xd1,0x16]
+; X64-NEXT:    vpshldd $22, %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0x89,0x71,0xc1,0x16]
+; X64-NEXT:    vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
+; X64-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22)
+  %2 = bitcast i8 %x4 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x3
+  %4 = call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22)
+  %5 = call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22)
+  %6 = bitcast i8 %x4 to <8 x i1>
+  %extract = shufflevector <8 x i1> %6, <8 x i1> %6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %7 = select <4 x i1> %extract, <4 x i32> %5, <4 x i32> zeroinitializer
+  %res3 = add <4 x i32> %3, %4
+  %res4 = add <4 x i32> %res3, %7
+  ret <4 x i32> %res4
+}
+declare <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32>, <4 x i32>, i32)
+
+define <8 x i32>@test_int_x86_avx512_mask_vpshld_d_256_2(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x3, i8 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshld_d_256_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshldd $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0x7d,0x28,0x71,0xd9,0x16]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpshldd $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x29,0x71,0xd1,0x16]
+; X86-NEXT:    vpaddd %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshld_d_256_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshldd $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0x7d,0x28,0x71,0xd9,0x16]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshldd $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x29,0x71,0xd1,0x16]
+; X64-NEXT:    vpaddd %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22)
+  %2 = bitcast i8 %x4 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x3
+  %4 = call <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22)
+  %res2 = add <8 x i32> %3, %4
+  ret <8 x i32> %res2
+}
+declare <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32>, <8 x i32>, i32)
+
+define <2 x i64>@test_int_x86_avx512_mask_vpshld_q_128_2(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x3, i8 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshld_q_128_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshldq $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x71,0xd9,0x16]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpshldq $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x71,0xd1,0x16]
+; X86-NEXT:    vpaddq %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshld_q_128_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshldq $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x71,0xd9,0x16]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshldq $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x71,0xd1,0x16]
+; X64-NEXT:    vpaddq %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22)
+  %2 = bitcast i8 %x4 to <8 x i1>
+  %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+  %3 = select <2 x i1> %extract, <2 x i64> %1, <2 x i64> %x3
+  %4 = call <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22)
+  %res2 = add <2 x i64> %3, %4
+  ret <2 x i64> %res2
+}
+declare <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64>, <2 x i64>, i32)
+
+define <4 x i64>@test_int_x86_avx512_mask_vpshld_q_256_2(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x3, i8 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshld_q_256_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshldq $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x71,0xd9,0x16]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpshldq $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x71,0xd1,0x16]
+; X86-NEXT:    vpaddq %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshld_q_256_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshldq $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x71,0xd9,0x16]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshldq $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x71,0xd1,0x16]
+; X64-NEXT:    vpaddq %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22)
+  %2 = bitcast i8 %x4 to <8 x i1>
+  %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract, <4 x i64> %1, <4 x i64> %x3
+  %4 = call <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22)
+  %res2 = add <4 x i64> %3, %4
+  ret <4 x i64> %res2
+}
+declare <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64>, <4 x i64>, i32)
+
+define <8 x i16>@test_int_x86_avx512_mask_vpshld_w_128_2(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x3, i8 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshld_w_128_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshldw $6, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x70,0xd9,0x06]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpshldw $6, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x70,0xd1,0x06]
+; X86-NEXT:    vpaddw %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshld_w_128_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshldw $6, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x70,0xd9,0x06]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshldw $6, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x70,0xd1,0x06]
+; X64-NEXT:    vpaddw %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 6)
+  %2 = bitcast i8 %x4 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %x3
+  %4 = call <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 6)
+  %res2 = add <8 x i16> %3, %4
+  ret <8 x i16> %res2
+}
+declare <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16>, <8 x i16>, i32)
+
+define <16 x i16>@test_int_x86_avx512_mask_vpshld_w_256_2(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x3, i16 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshld_w_256_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshldw $6, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x70,0xd9,0x06]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpshldw $6, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x70,0xd1,0x06]
+; X86-NEXT:    vpaddw %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshld_w_256_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshldw $6, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x70,0xd9,0x06]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshldw $6, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x70,0xd1,0x06]
+; X64-NEXT:    vpaddw %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 6)
+  %2 = bitcast i16 %x4 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %x3
+  %4 = call <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 6)
+  %res2 = add <16 x i16> %3, %4
+  ret <16 x i16> %res2
+}
+declare <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16>, <16 x i16>, i32)
+
+define <4 x i32>@test_int_x86_avx512_mask_vpshrd_d_128_2(<4 x i32> %x0, <4 x i32> %x1,<4 x i32> %x3, i8 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrd_d_128_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshrdd $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0x7d,0x08,0x73,0xd9,0x16]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpshrdd $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x09,0x73,0xd1,0x16]
+; X86-NEXT:    vpshrdd $22, %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0x89,0x73,0xc1,0x16]
+; X86-NEXT:    vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
+; X86-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrd_d_128_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshrdd $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0x7d,0x08,0x73,0xd9,0x16]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshrdd $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x09,0x73,0xd1,0x16]
+; X64-NEXT:    vpshrdd $22, %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0x89,0x73,0xc1,0x16]
+; X64-NEXT:    vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
+; X64-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22)
+  %2 = bitcast i8 %x4 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x3
+  %4 = call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22)
+  %5 = call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22)
+  %6 = bitcast i8 %x4 to <8 x i1>
+  %extract = shufflevector <8 x i1> %6, <8 x i1> %6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %7 = select <4 x i1> %extract, <4 x i32> %5, <4 x i32> zeroinitializer
+  %res3 = add <4 x i32> %3, %4
+  %res4 = add <4 x i32> %res3, %7
+  ret <4 x i32> %res4
+}
+declare <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32>, <4 x i32>, i32)
+
+define <8 x i32>@test_int_x86_avx512_mask_vpshrd_d_256_2(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x3, i8 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrd_d_256_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshrdd $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0x7d,0x28,0x73,0xd9,0x16]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpshrdd $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x29,0x73,0xd1,0x16]
+; X86-NEXT:    vpaddd %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrd_d_256_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshrdd $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0x7d,0x28,0x73,0xd9,0x16]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshrdd $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x29,0x73,0xd1,0x16]
+; X64-NEXT:    vpaddd %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22)
+  %2 = bitcast i8 %x4 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x3
+  %4 = call <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22)
+  %res2 = add <8 x i32> %3, %4
+  ret <8 x i32> %res2
+}
+declare <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32>, <8 x i32>, i32)
+
+define <2 x i64>@test_int_x86_avx512_mask_vpshrd_q_128_2(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x3, i8 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrd_q_128_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshrdq $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x73,0xd9,0x16]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpshrdq $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x73,0xd1,0x16]
+; X86-NEXT:    vpaddq %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrd_q_128_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshrdq $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x73,0xd9,0x16]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshrdq $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x73,0xd1,0x16]
+; X64-NEXT:    vpaddq %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22)
+  %2 = bitcast i8 %x4 to <8 x i1>
+  %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+  %3 = select <2 x i1> %extract, <2 x i64> %1, <2 x i64> %x3
+  %4 = call <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22)
+  %res2 = add <2 x i64> %3, %4
+  ret <2 x i64> %res2
+}
+declare <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64>, <2 x i64>, i32)
+
+define <4 x i64>@test_int_x86_avx512_mask_vpshrd_q_256_2(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x3, i8 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrd_q_256_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshrdq $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x73,0xd9,0x16]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpshrdq $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x73,0xd1,0x16]
+; X86-NEXT:    vpaddq %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrd_q_256_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshrdq $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x73,0xd9,0x16]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshrdq $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x73,0xd1,0x16]
+; X64-NEXT:    vpaddq %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22)
+  %2 = bitcast i8 %x4 to <8 x i1>
+  %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract, <4 x i64> %1, <4 x i64> %x3
+  %4 = call <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22)
+  %res2 = add <4 x i64> %3, %4
+  ret <4 x i64> %res2
+}
+declare <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64>, <4 x i64>, i32)
+
+define <8 x i16>@test_int_x86_avx512_mask_vpshrd_w_128_2(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x3, i8 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrd_w_128_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshrdw $6, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x72,0xd9,0x06]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
+; X86-NEXT:    vpshrdw $6, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x72,0xd1,0x06]
+; X86-NEXT:    vpaddw %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrd_w_128_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshrdw $6, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x72,0xd9,0x06]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshrdw $6, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x72,0xd1,0x06]
+; X64-NEXT:    vpaddw %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 6)
+  %2 = bitcast i8 %x4 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %x3
+  %4 = call <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 6)
+  %res2 = add <8 x i16> %3, %4
+  ret <8 x i16> %res2
+}
+declare <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16>, <8 x i16>, i32)
+
+define <16 x i16>@test_int_x86_avx512_mask_vpshrd_w_256_2(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x3, i16 %x4) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrd_w_256_2:
+; X86:       # %bb.0:
+; X86-NEXT:    vpshrdw $6, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x72,0xd9,0x06]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpshrdw $6, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x72,0xd1,0x06]
+; X86-NEXT:    vpaddw %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc3]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrd_w_256_2:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshrdw $6, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x72,0xd9,0x06]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpshrdw $6, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x72,0xd1,0x06]
+; X64-NEXT:    vpaddw %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc3]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 6)
+  %2 = bitcast i16 %x4 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %x3
+  %4 = call <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 6)
+  %res2 = add <16 x i16> %3, %4
+  ret <16 x i16> %res2
+}
+declare <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16>, <16 x i16>, i32)
+
+declare <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
+declare <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
+
+define <8 x i32>@test_int_x86_avx512_mask_vpshrdv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_d_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
+; X86-NEXT:    vpshrdvd (%eax), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0x75,0x29,0x73,0x18]
+; X86-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
+; X86-NEXT:    vpshrdvd %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0x75,0x28,0x73,0xe2]
+; X86-NEXT:    vpshrdvd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0x73,0xc2]
+; X86-NEXT:    vpaddd %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xfe,0xc0]
+; X86-NEXT:    vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrdv_d_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
+; X64-NEXT:    vpshrdvd (%rdi), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0x75,0x29,0x73,0x1f]
+; X64-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
+; X64-NEXT:    vpshrdvd %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0x75,0x28,0x73,0xe2]
+; X64-NEXT:    vpshrdvd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0x73,0xc2]
+; X64-NEXT:    vpaddd %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xfe,0xc0]
+; X64-NEXT:    vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <8 x i32>, <8 x i32>* %x2p
+  %res = call <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
+  %res1 = call <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4, i8 -1)
+  %res2 = call <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4, i8  %x3)
+  %res3 = add <8 x i32> %res, %res1
+  %res4 = add <8 x i32> %res2, %res3
+  ret <8 x i32> %res4
+}
+
+declare <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
+declare <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
+
+define <4 x i32>@test_int_x86_avx512_mask_vpshrdv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_d_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
+; X86-NEXT:    vpshrdvd (%eax), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0x75,0x09,0x73,0x18]
+; X86-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
+; X86-NEXT:    vpshrdvd %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0x75,0x08,0x73,0xe2]
+; X86-NEXT:    vpshrdvd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0x73,0xc2]
+; X86-NEXT:    vpaddd %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfe,0xc0]
+; X86-NEXT:    vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrdv_d_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
+; X64-NEXT:    vpshrdvd (%rdi), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0x75,0x09,0x73,0x1f]
+; X64-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
+; X64-NEXT:    vpshrdvd %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0x75,0x08,0x73,0xe2]
+; X64-NEXT:    vpshrdvd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0x73,0xc2]
+; X64-NEXT:    vpaddd %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfe,0xc0]
+; X64-NEXT:    vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <4 x i32>, <4 x i32>* %x2p
+  %res = call <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
+  %res1 = call <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4, i8 -1)
+  %res2 = call <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4, i8  %x3)
+  %res3 = add <4 x i32> %res, %res1
+  %res4 = add <4 x i32> %res2, %res3
+  ret <4 x i32> %res4
+}
+
+declare <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+declare <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+
+define <4 x i64>@test_int_x86_avx512_mask_vpshrdv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64>* %x2p, <4 x i64> %x4, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_q_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
+; X86-NEXT:    vpshrdvq (%eax), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x29,0x73,0x18]
+; X86-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
+; X86-NEXT:    vpshrdvq %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0xf5,0x28,0x73,0xe2]
+; X86-NEXT:    vpshrdvq %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x73,0xc2]
+; X86-NEXT:    vpaddq %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xd4,0xc0]
+; X86-NEXT:    vpaddq %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xd4,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrdv_q_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
+; X64-NEXT:    vpshrdvq (%rdi), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x29,0x73,0x1f]
+; X64-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
+; X64-NEXT:    vpshrdvq %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0xf5,0x28,0x73,0xe2]
+; X64-NEXT:    vpshrdvq %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x73,0xc2]
+; X64-NEXT:    vpaddq %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xd4,0xc0]
+; X64-NEXT:    vpaddq %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xd4,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <4 x i64>, <4 x i64>* %x2p
+  %res = call <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
+  %res1 = call <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x4, i8 -1)
+  %res2 = call <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x4, i8  %x3)
+  %res3 = add <4 x i64> %res, %res1
+  %res4 = add <4 x i64> %res2, %res3
+  ret <4 x i64> %res4
+}
+
+declare <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+declare <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+
+define <2 x i64>@test_int_x86_avx512_mask_vpshrdv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64>* %x2p, <2 x i64> %x4, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_q_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
+; X86-NEXT:    vpshrdvq (%eax), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x09,0x73,0x18]
+; X86-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
+; X86-NEXT:    vpshrdvq %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0xf5,0x08,0x73,0xe2]
+; X86-NEXT:    vpshrdvq %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x73,0xc2]
+; X86-NEXT:    vpaddq %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xd4,0xc0]
+; X86-NEXT:    vpaddq %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xd4,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrdv_q_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
+; X64-NEXT:    vpshrdvq (%rdi), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x09,0x73,0x1f]
+; X64-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
+; X64-NEXT:    vpshrdvq %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0xf5,0x08,0x73,0xe2]
+; X64-NEXT:    vpshrdvq %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x73,0xc2]
+; X64-NEXT:    vpaddq %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xd4,0xc0]
+; X64-NEXT:    vpaddq %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xd4,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <2 x i64>, <2 x i64>* %x2p
+  %res = call <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
+  %res1 = call <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x4, i8 -1)
+  %res2 = call <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x4, i8  %x3)
+  %res3 = add <2 x i64> %res, %res1
+  %res4 = add <2 x i64> %res2, %res3
+  ret <2 x i64> %res4
+}
+
+declare <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
+declare <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
+
+define <16 x i16>@test_int_x86_avx512_mask_vpshrdv_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16>* %x2p, <16 x i16> %x4, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_w_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
+; X86-NEXT:    vpshrdvw (%eax), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x29,0x72,0x18]
+; X86-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
+; X86-NEXT:    vpshrdvw %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0xf5,0x28,0x72,0xe2]
+; X86-NEXT:    vpshrdvw %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x72,0xc2]
+; X86-NEXT:    vpaddw %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xfd,0xc0]
+; X86-NEXT:    vpaddw %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrdv_w_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
+; X64-NEXT:    vpshrdvw (%rdi), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x29,0x72,0x1f]
+; X64-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
+; X64-NEXT:    vpshrdvw %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0xf5,0x28,0x72,0xe2]
+; X64-NEXT:    vpshrdvw %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x72,0xc2]
+; X64-NEXT:    vpaddw %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xfd,0xc0]
+; X64-NEXT:    vpaddw %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <16 x i16>, <16 x i16>* %x2p
+  %res = call <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
+  %res1 = call <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x4, i16 -1)
+  %res2 = call <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x4, i16  %x3)
+  %res3 = add <16 x i16> %res, %res1
+  %res4 = add <16 x i16> %res2, %res3
+  ret <16 x i16> %res4
+}
+
+declare <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
+declare <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
+
+define <8 x i16>@test_int_x86_avx512_mask_vpshrdv_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16>* %x2p, <8 x i16> %x4, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_w_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
+; X86-NEXT:    vpshrdvw (%eax), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x09,0x72,0x18]
+; X86-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
+; X86-NEXT:    vpshrdvw %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0xf5,0x08,0x72,0xe2]
+; X86-NEXT:    vpshrdvw %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x72,0xc2]
+; X86-NEXT:    vpaddw %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfd,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshrdv_w_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
+; X64-NEXT:    vpshrdvw (%rdi), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x09,0x72,0x1f]
+; X64-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
+; X64-NEXT:    vpshrdvw %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0xf5,0x08,0x72,0xe2]
+; X64-NEXT:    vpshrdvw %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x72,0xc2]
+; X64-NEXT:    vpaddw %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfd,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <8 x i16>, <8 x i16>* %x2p
+  %res = call <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
+  %res1 = call <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x4, i8 -1)
+  %res2 = call <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x4, i8  %x3)
+  %res3 = add <8 x i16> %res, %res1
+  %res4 = add <8 x i16> %res2, %res3
+  ret <8 x i16> %res4
+}
+
+declare <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
+declare <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
+
+define <8 x i32>@test_int_x86_avx512_mask_vpshldv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshldv_d_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
+; X86-NEXT:    vpshldvd (%eax), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0x75,0x29,0x71,0x18]
+; X86-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
+; X86-NEXT:    vpshldvd %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0x75,0x28,0x71,0xe2]
+; X86-NEXT:    vpshldvd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0x71,0xc2]
+; X86-NEXT:    vpaddd %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xfe,0xc0]
+; X86-NEXT:    vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshldv_d_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
+; X64-NEXT:    vpshldvd (%rdi), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0x75,0x29,0x71,0x1f]
+; X64-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
+; X64-NEXT:    vpshldvd %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0x75,0x28,0x71,0xe2]
+; X64-NEXT:    vpshldvd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0x71,0xc2]
+; X64-NEXT:    vpaddd %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xfe,0xc0]
+; X64-NEXT:    vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <8 x i32>, <8 x i32>* %x2p
+  %res = call <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
+  %res1 = call <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4, i8 -1)
+  %res2 = call <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4, i8  %x3)
+  %res3 = add <8 x i32> %res, %res1
+  %res4 = add <8 x i32> %res2, %res3
+  ret <8 x i32> %res4
+}
+
+declare <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
+declare <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
+
+define <4 x i32>@test_int_x86_avx512_mask_vpshldv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshldv_d_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
+; X86-NEXT:    vpshldvd (%eax), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0x75,0x09,0x71,0x18]
+; X86-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
+; X86-NEXT:    vpshldvd %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0x75,0x08,0x71,0xe2]
+; X86-NEXT:    vpshldvd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0x71,0xc2]
+; X86-NEXT:    vpaddd %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfe,0xc0]
+; X86-NEXT:    vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshldv_d_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
+; X64-NEXT:    vpshldvd (%rdi), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0x75,0x09,0x71,0x1f]
+; X64-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
+; X64-NEXT:    vpshldvd %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0x75,0x08,0x71,0xe2]
+; X64-NEXT:    vpshldvd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0x71,0xc2]
+; X64-NEXT:    vpaddd %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfe,0xc0]
+; X64-NEXT:    vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <4 x i32>, <4 x i32>* %x2p
+  %res = call <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
+  %res1 = call <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4, i8 -1)
+  %res2 = call <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4, i8  %x3)
+  %res3 = add <4 x i32> %res, %res1
+  %res4 = add <4 x i32> %res2, %res3
+  ret <4 x i32> %res4
+}
+
+declare <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+declare <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+
+define <4 x i64>@test_int_x86_avx512_mask_vpshldv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64>* %x2p, <4 x i64> %x4, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshldv_q_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
+; X86-NEXT:    vpshldvq (%eax), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x29,0x71,0x18]
+; X86-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
+; X86-NEXT:    vpshldvq %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0xf5,0x28,0x71,0xe2]
+; X86-NEXT:    vpshldvq %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x71,0xc2]
+; X86-NEXT:    vpaddq %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xd4,0xc0]
+; X86-NEXT:    vpaddq %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xd4,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshldv_q_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
+; X64-NEXT:    vpshldvq (%rdi), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x29,0x71,0x1f]
+; X64-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
+; X64-NEXT:    vpshldvq %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0xf5,0x28,0x71,0xe2]
+; X64-NEXT:    vpshldvq %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x71,0xc2]
+; X64-NEXT:    vpaddq %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xd4,0xc0]
+; X64-NEXT:    vpaddq %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xd4,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <4 x i64>, <4 x i64>* %x2p
+  %res = call <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
+  %res1 = call <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x4, i8 -1)
+  %res2 = call <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x4, i8  %x3)
+  %res3 = add <4 x i64> %res, %res1
+  %res4 = add <4 x i64> %res2, %res3
+  ret <4 x i64> %res4
+}
+
+declare <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+declare <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+
+define <2 x i64>@test_int_x86_avx512_mask_vpshldv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64>* %x2p, <2 x i64> %x4, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshldv_q_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
+; X86-NEXT:    vpshldvq (%eax), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x09,0x71,0x18]
+; X86-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
+; X86-NEXT:    vpshldvq %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0xf5,0x08,0x71,0xe2]
+; X86-NEXT:    vpshldvq %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x71,0xc2]
+; X86-NEXT:    vpaddq %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xd4,0xc0]
+; X86-NEXT:    vpaddq %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xd4,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshldv_q_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
+; X64-NEXT:    vpshldvq (%rdi), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x09,0x71,0x1f]
+; X64-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
+; X64-NEXT:    vpshldvq %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0xf5,0x08,0x71,0xe2]
+; X64-NEXT:    vpshldvq %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x71,0xc2]
+; X64-NEXT:    vpaddq %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xd4,0xc0]
+; X64-NEXT:    vpaddq %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xd4,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <2 x i64>, <2 x i64>* %x2p
+  %res = call <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
+  %res1 = call <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x4, i8 -1)
+  %res2 = call <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x4, i8  %x3)
+  %res3 = add <2 x i64> %res, %res1
+  %res4 = add <2 x i64> %res2, %res3
+  ret <2 x i64> %res4
+}
+
+declare <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
+declare <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
+
+define <16 x i16>@test_int_x86_avx512_mask_vpshldv_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16>* %x2p, <16 x i16> %x4, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshldv_w_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
+; X86-NEXT:    vpshldvw (%eax), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x29,0x70,0x18]
+; X86-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
+; X86-NEXT:    vpshldvw %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0xf5,0x28,0x70,0xe2]
+; X86-NEXT:    vpshldvw %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x70,0xc2]
+; X86-NEXT:    vpaddw %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xfd,0xc0]
+; X86-NEXT:    vpaddw %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshldv_w_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
+; X64-NEXT:    vpshldvw (%rdi), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x29,0x70,0x1f]
+; X64-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
+; X64-NEXT:    vpshldvw %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0xf5,0x28,0x70,0xe2]
+; X64-NEXT:    vpshldvw %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x70,0xc2]
+; X64-NEXT:    vpaddw %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xfd,0xc0]
+; X64-NEXT:    vpaddw %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <16 x i16>, <16 x i16>* %x2p
+  %res = call <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
+  %res1 = call <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x4, i16 -1)
+  %res2 = call <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x4, i16  %x3)
+  %res3 = add <16 x i16> %res, %res1
+  %res4 = add <16 x i16> %res2, %res3
+  ret <16 x i16> %res4
+}
+
+declare <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
+declare <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
+
+define <8 x i16>@test_int_x86_avx512_mask_vpshldv_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16>* %x2p, <8 x i16> %x4, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_vpshldv_w_128:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
+; X86-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
+; X86-NEXT:    vpshldvw (%eax), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x09,0x70,0x18]
+; X86-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
+; X86-NEXT:    vpshldvw %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0xf5,0x08,0x70,0xe2]
+; X86-NEXT:    vpshldvw %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x70,0xc2]
+; X86-NEXT:    vpaddw %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfd,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfd,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_vpshldv_w_128:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
+; X64-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
+; X64-NEXT:    vpshldvw (%rdi), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x09,0x70,0x1f]
+; X64-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
+; X64-NEXT:    vpshldvw %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0xf5,0x08,0x70,0xe2]
+; X64-NEXT:    vpshldvw %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x70,0xc2]
+; X64-NEXT:    vpaddw %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfd,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfd,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %x2 = load <8 x i16>, <8 x i16>* %x2p
+  %res = call <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
+  %res1 = call <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x4, i8 -1)
+  %res2 = call <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x4, i8  %x3)
+  %res3 = add <8 x i16> %res, %res1
+  %res4 = add <8 x i16> %res2, %res3
+  ret <8 x i16> %res4
+}
diff --git a/test/CodeGen/X86/avx512vbmi2vl-intrinsics.ll b/test/CodeGen/X86/avx512vbmi2vl-intrinsics.ll
index 341adb9..c2a49ab 100644
--- a/test/CodeGen/X86/avx512vbmi2vl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vbmi2vl-intrinsics.ll
@@ -358,7 +358,7 @@
 
 declare <32 x i8> @llvm.x86.avx512.mask.compress.b.256(<32 x i8> %data, <32 x i8> %src0, i32 %mask)
 
-define <4 x i32>@test_int_x86_avx512_mask_vpshld_d_128(<4 x i32> %x0, <4 x i32> %x1,<4 x i32> %x3, i8 %x4) {
+define <4 x i32> @test_int_x86_avx512_mask_vpshld_d_128(<4 x i32> %x0, <4 x i32> %x1,<4 x i32> %x3, i8 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshld_d_128:
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpshldd $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0x7d,0x08,0x71,0xd9,0x16]
@@ -379,12 +379,12 @@
 ; X64-NEXT:    vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
 ; X64-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22)
+  %1 = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> <i32 22, i32 22, i32 22, i32 22>)
   %2 = bitcast i8 %x4 to <8 x i1>
   %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x3
-  %4 = call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22)
-  %5 = call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22)
+  %4 = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> <i32 22, i32 22, i32 22, i32 22>)
+  %5 = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> <i32 22, i32 22, i32 22, i32 22>)
   %6 = bitcast i8 %x4 to <8 x i1>
   %extract = shufflevector <8 x i1> %6, <8 x i1> %6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %7 = select <4 x i1> %extract, <4 x i32> %5, <4 x i32> zeroinitializer
@@ -392,9 +392,8 @@
   %res4 = add <4 x i32> %res3, %7
   ret <4 x i32> %res4
 }
-declare <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32>, <4 x i32>, i32)
 
-define <8 x i32>@test_int_x86_avx512_mask_vpshld_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x3, i8 %x4) {
+define <8 x i32> @test_int_x86_avx512_mask_vpshld_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x3, i8 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshld_d_256:
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpshldd $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0x7d,0x28,0x71,0xd9,0x16]
@@ -411,16 +410,15 @@
 ; X64-NEXT:    vpshldd $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x29,0x71,0xd1,0x16]
 ; X64-NEXT:    vpaddd %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22)
+  %1 = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> <i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22>)
   %2 = bitcast i8 %x4 to <8 x i1>
   %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x3
-  %4 = call <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22)
+  %4 = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> <i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22>)
   %res2 = add <8 x i32> %3, %4
   ret <8 x i32> %res2
 }
-declare <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32>, <8 x i32>, i32)
 
-define <2 x i64>@test_int_x86_avx512_mask_vpshld_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x3, i8 %x4) {
+define <2 x i64> @test_int_x86_avx512_mask_vpshld_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x3, i8 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshld_q_128:
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpshldq $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x71,0xd9,0x16]
@@ -437,17 +435,16 @@
 ; X64-NEXT:    vpshldq $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x71,0xd1,0x16]
 ; X64-NEXT:    vpaddq %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22)
+  %1 = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> <i64 22, i64 22>)
   %2 = bitcast i8 %x4 to <8 x i1>
   %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
   %3 = select <2 x i1> %extract, <2 x i64> %1, <2 x i64> %x3
-  %4 = call <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22)
+  %4 = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> <i64 22, i64 22>)
   %res2 = add <2 x i64> %3, %4
   ret <2 x i64> %res2
 }
-declare <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64>, <2 x i64>, i32)
 
-define <4 x i64>@test_int_x86_avx512_mask_vpshld_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x3, i8 %x4) {
+define <4 x i64> @test_int_x86_avx512_mask_vpshld_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x3, i8 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshld_q_256:
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpshldq $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x71,0xd9,0x16]
@@ -464,68 +461,65 @@
 ; X64-NEXT:    vpshldq $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x71,0xd1,0x16]
 ; X64-NEXT:    vpaddq %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22)
+  %1 = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> <i64 22, i64 22, i64 22, i64 22>)
   %2 = bitcast i8 %x4 to <8 x i1>
   %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %3 = select <4 x i1> %extract, <4 x i64> %1, <4 x i64> %x3
-  %4 = call <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22)
+  %4 = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> <i64 22, i64 22, i64 22, i64 22>)
   %res2 = add <4 x i64> %3, %4
   ret <4 x i64> %res2
 }
-declare <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64>, <4 x i64>, i32)
 
-define <8 x i16>@test_int_x86_avx512_mask_vpshld_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x3, i8 %x4) {
+define <8 x i16> @test_int_x86_avx512_mask_vpshld_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x3, i8 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshld_w_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    vpshldw $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x70,0xd9,0x16]
+; X86-NEXT:    vpshldw $6, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x70,0xd9,0x06]
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
-; X86-NEXT:    vpshldw $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x70,0xd1,0x16]
+; X86-NEXT:    vpshldw $6, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x70,0xd1,0x06]
 ; X86-NEXT:    vpaddw %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_vpshld_w_128:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshldw $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x70,0xd9,0x16]
+; X64-NEXT:    vpshldw $6, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x70,0xd9,0x06]
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpshldw $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x70,0xd1,0x16]
+; X64-NEXT:    vpshldw $6, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x70,0xd1,0x06]
 ; X64-NEXT:    vpaddw %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 22)
+  %1 = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>)
   %2 = bitcast i8 %x4 to <8 x i1>
   %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %x3
-  %4 = call <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 22)
+  %4 = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>)
   %res2 = add <8 x i16> %3, %4
   ret <8 x i16> %res2
 }
-declare <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16>, <8 x i16>, i32)
 
-define <16 x i16>@test_int_x86_avx512_mask_vpshld_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x3, i16 %x4) {
+define <16 x i16> @test_int_x86_avx512_mask_vpshld_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x3, i16 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshld_w_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    vpshldw $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x70,0xd9,0x16]
+; X86-NEXT:    vpshldw $6, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x70,0xd9,0x06]
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpshldw $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x70,0xd1,0x16]
+; X86-NEXT:    vpshldw $6, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x70,0xd1,0x06]
 ; X86-NEXT:    vpaddw %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc3]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_vpshld_w_256:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshldw $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x70,0xd9,0x16]
+; X64-NEXT:    vpshldw $6, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x70,0xd9,0x06]
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpshldw $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x70,0xd1,0x16]
+; X64-NEXT:    vpshldw $6, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x70,0xd1,0x06]
 ; X64-NEXT:    vpaddw %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 22)
+  %1 = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>)
   %2 = bitcast i16 %x4 to <16 x i1>
   %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %x3
-  %4 = call <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 22)
+  %4 = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>)
   %res2 = add <16 x i16> %3, %4
   ret <16 x i16> %res2
 }
-declare <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16>, <16 x i16>, i32)
 
-define <4 x i32>@test_int_x86_avx512_mask_vpshrd_d_128(<4 x i32> %x0, <4 x i32> %x1,<4 x i32> %x3, i8 %x4) {
+define <4 x i32> @test_int_x86_avx512_mask_vpshrd_d_128(<4 x i32> %x0, <4 x i32> %x1,<4 x i32> %x3, i8 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrd_d_128:
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpshrdd $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0x7d,0x08,0x73,0xd9,0x16]
@@ -546,12 +540,12 @@
 ; X64-NEXT:    vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
 ; X64-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22)
+  %1 = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x1, <4 x i32> %x0, <4 x i32> <i32 22, i32 22, i32 22, i32 22>)
   %2 = bitcast i8 %x4 to <8 x i1>
   %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x3
-  %4 = call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22)
-  %5 = call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22)
+  %4 = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x1, <4 x i32> %x0, <4 x i32> <i32 22, i32 22, i32 22, i32 22>)
+  %5 = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x1, <4 x i32> %x0, <4 x i32> <i32 22, i32 22, i32 22, i32 22>)
   %6 = bitcast i8 %x4 to <8 x i1>
   %extract = shufflevector <8 x i1> %6, <8 x i1> %6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %7 = select <4 x i1> %extract, <4 x i32> %5, <4 x i32> zeroinitializer
@@ -559,9 +553,8 @@
   %res4 = add <4 x i32> %res3, %7
   ret <4 x i32> %res4
 }
-declare <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32>, <4 x i32>, i32)
 
-define <8 x i32>@test_int_x86_avx512_mask_vpshrd_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x3, i8 %x4) {
+define <8 x i32> @test_int_x86_avx512_mask_vpshrd_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x3, i8 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrd_d_256:
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpshrdd $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0x7d,0x28,0x73,0xd9,0x16]
@@ -578,16 +571,15 @@
 ; X64-NEXT:    vpshrdd $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x29,0x73,0xd1,0x16]
 ; X64-NEXT:    vpaddd %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22)
+  %1 = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %x1, <8 x i32> %x0, <8 x i32> <i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22>)
   %2 = bitcast i8 %x4 to <8 x i1>
   %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x3
-  %4 = call <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22)
+  %4 = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %x1, <8 x i32> %x0, <8 x i32> <i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22>)
   %res2 = add <8 x i32> %3, %4
   ret <8 x i32> %res2
 }
-declare <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32>, <8 x i32>, i32)
 
-define <2 x i64>@test_int_x86_avx512_mask_vpshrd_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x3, i8 %x4) {
+define <2 x i64> @test_int_x86_avx512_mask_vpshrd_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x3, i8 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrd_q_128:
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpshrdq $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x73,0xd9,0x16]
@@ -604,17 +596,16 @@
 ; X64-NEXT:    vpshrdq $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x73,0xd1,0x16]
 ; X64-NEXT:    vpaddq %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22)
+  %1 = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %x1, <2 x i64> %x0, <2 x i64> <i64 22, i64 22>)
   %2 = bitcast i8 %x4 to <8 x i1>
   %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
   %3 = select <2 x i1> %extract, <2 x i64> %1, <2 x i64> %x3
-  %4 = call <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22)
+  %4 = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %x1, <2 x i64> %x0, <2 x i64> <i64 22, i64 22>)
   %res2 = add <2 x i64> %3, %4
   ret <2 x i64> %res2
 }
-declare <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64>, <2 x i64>, i32)
 
-define <4 x i64>@test_int_x86_avx512_mask_vpshrd_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x3, i8 %x4) {
+define <4 x i64> @test_int_x86_avx512_mask_vpshrd_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x3, i8 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrd_q_256:
 ; X86:       # %bb.0:
 ; X86-NEXT:    vpshrdq $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x73,0xd9,0x16]
@@ -631,82 +622,76 @@
 ; X64-NEXT:    vpshrdq $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x73,0xd1,0x16]
 ; X64-NEXT:    vpaddq %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22)
+  %1 = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %x1, <4 x i64> %x0, <4 x i64> <i64 22, i64 22, i64 22, i64 22>)
   %2 = bitcast i8 %x4 to <8 x i1>
   %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %3 = select <4 x i1> %extract, <4 x i64> %1, <4 x i64> %x3
-  %4 = call <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22)
+  %4 = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %x1, <4 x i64> %x0, <4 x i64> <i64 22, i64 22, i64 22, i64 22>)
   %res2 = add <4 x i64> %3, %4
   ret <4 x i64> %res2
 }
-declare <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64>, <4 x i64>, i32)
 
-define <8 x i16>@test_int_x86_avx512_mask_vpshrd_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x3, i8 %x4) {
+define <8 x i16> @test_int_x86_avx512_mask_vpshrd_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x3, i8 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrd_w_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    vpshrdw $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x72,0xd9,0x16]
+; X86-NEXT:    vpshrdw $6, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x72,0xd9,0x06]
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
-; X86-NEXT:    vpshrdw $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x72,0xd1,0x16]
+; X86-NEXT:    vpshrdw $6, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x72,0xd1,0x06]
 ; X86-NEXT:    vpaddw %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_vpshrd_w_128:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshrdw $22, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x72,0xd9,0x16]
+; X64-NEXT:    vpshrdw $6, %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf3,0xfd,0x08,0x72,0xd9,0x06]
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpshrdw $22, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x72,0xd1,0x16]
+; X64-NEXT:    vpshrdw $6, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x09,0x72,0xd1,0x06]
 ; X64-NEXT:    vpaddw %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 22)
+  %1 = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x1, <8 x i16> %x0, <8 x i16> <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>)
   %2 = bitcast i8 %x4 to <8 x i1>
   %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %x3
-  %4 = call <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 22)
+  %4 = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x1, <8 x i16> %x0, <8 x i16> <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>)
   %res2 = add <8 x i16> %3, %4
   ret <8 x i16> %res2
 }
-declare <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16>, <8 x i16>, i32)
 
-define <16 x i16>@test_int_x86_avx512_mask_vpshrd_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x3, i16 %x4) {
+define <16 x i16> @test_int_x86_avx512_mask_vpshrd_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x3, i16 %x4) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrd_w_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    vpshrdw $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x72,0xd9,0x16]
+; X86-NEXT:    vpshrdw $6, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x72,0xd9,0x06]
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpshrdw $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x72,0xd1,0x16]
+; X86-NEXT:    vpshrdw $6, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x72,0xd1,0x06]
 ; X86-NEXT:    vpaddw %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc3]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_vpshrd_w_256:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshrdw $22, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x72,0xd9,0x16]
+; X64-NEXT:    vpshrdw $6, %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf3,0xfd,0x28,0x72,0xd9,0x06]
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
-; X64-NEXT:    vpshrdw $22, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x72,0xd1,0x16]
+; X64-NEXT:    vpshrdw $6, %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x29,0x72,0xd1,0x06]
 ; X64-NEXT:    vpaddw %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc3]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 22)
+  %1 = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %x1, <16 x i16> %x0, <16 x i16> <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>)
   %2 = bitcast i16 %x4 to <16 x i1>
   %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %x3
-  %4 = call <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 22)
+  %4 = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %x1, <16 x i16> %x0, <16 x i16> <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>)
   %res2 = add <16 x i16> %3, %4
   ret <16 x i16> %res2
 }
-declare <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16>, <16 x i16>, i32)
 
-declare <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
-declare <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
-
-define <8 x i32>@test_int_x86_avx512_mask_vpshrdv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4, i8 %x3) {
+define <8 x i32> @test_int_x86_avx512_mask_vpshrdv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_d_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
 ; X86-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
 ; X86-NEXT:    vpshrdvd (%eax), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0x75,0x29,0x73,0x18]
 ; X86-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
-; X86-NEXT:    vpshrdvd %ymm2, %ymm1, %ymm4 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0x73,0xe2]
-; X86-NEXT:    vpshrdvd %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x75,0x28,0x73,0xc2]
-; X86-NEXT:    vpaddd %ymm4, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc4]
+; X86-NEXT:    vpshrdvd %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0x75,0x28,0x73,0xe2]
+; X86-NEXT:    vpshrdvd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0x73,0xc2]
+; X86-NEXT:    vpaddd %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xfe,0xc0]
 ; X86-NEXT:    vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -722,29 +707,30 @@
 ; X64-NEXT:    vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <8 x i32>, <8 x i32>* %x2p
-  %res = call <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
-  %res1 = call <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4, i8 -1)
-  %res2 = call <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4, i8  %x3)
-  %res3 = add <8 x i32> %res, %res1
-  %res4 = add <8 x i32> %res2, %res3
+  %1 = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %x1, <8 x i32> %x0, <8 x i32> %x2)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x0
+  %4 = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %x1, <8 x i32> %x0, <8 x i32> %x4)
+  %5 = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %x1, <8 x i32> %x0, <8 x i32> %x4)
+  %6 = bitcast i8 %x3 to <8 x i1>
+  %7 = select <8 x i1> %6, <8 x i32> %5, <8 x i32> zeroinitializer
+  %res3 = add <8 x i32> %3, %4
+  %res4 = add <8 x i32> %7, %res3
   ret <8 x i32> %res4
 }
 
-declare <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
-declare <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
-
-define <4 x i32>@test_int_x86_avx512_mask_vpshrdv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4, i8 %x3) {
+define <4 x i32> @test_int_x86_avx512_mask_vpshrdv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_d_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
 ; X86-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
 ; X86-NEXT:    vpshrdvd (%eax), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0x75,0x09,0x73,0x18]
 ; X86-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
-; X86-NEXT:    vpshrdvd %xmm2, %xmm1, %xmm4 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0x73,0xe2]
-; X86-NEXT:    vpshrdvd %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x75,0x08,0x73,0xc2]
-; X86-NEXT:    vpaddd %xmm4, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc4]
+; X86-NEXT:    vpshrdvd %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0x75,0x08,0x73,0xe2]
+; X86-NEXT:    vpshrdvd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0x73,0xc2]
+; X86-NEXT:    vpaddd %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfe,0xc0]
 ; X86-NEXT:    vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -760,29 +746,32 @@
 ; X64-NEXT:    vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <4 x i32>, <4 x i32>* %x2p
-  %res = call <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
-  %res1 = call <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4, i8 -1)
-  %res2 = call <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4, i8  %x3)
-  %res3 = add <4 x i32> %res, %res1
-  %res4 = add <4 x i32> %res2, %res3
+  %1 = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x1, <4 x i32> %x0, <4 x i32> %x2)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract, <4 x i32> %1, <4 x i32> %x0
+  %4 = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x1, <4 x i32> %x0, <4 x i32> %x4)
+  %5 = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x1, <4 x i32> %x0, <4 x i32> %x4)
+  %6 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %6, <8 x i1> %6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %7 = select <4 x i1> %extract1, <4 x i32> %5, <4 x i32> zeroinitializer
+  %res3 = add <4 x i32> %3, %4
+  %res4 = add <4 x i32> %7, %res3
   ret <4 x i32> %res4
 }
 
-declare <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
-declare <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
-
-define <4 x i64>@test_int_x86_avx512_mask_vpshrdv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64>* %x2p, <4 x i64> %x4, i8 %x3) {
+define <4 x i64> @test_int_x86_avx512_mask_vpshrdv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64>* %x2p, <4 x i64> %x4, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_q_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
 ; X86-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
 ; X86-NEXT:    vpshrdvq (%eax), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x29,0x73,0x18]
 ; X86-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
-; X86-NEXT:    vpshrdvq %ymm2, %ymm1, %ymm4 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x73,0xe2]
-; X86-NEXT:    vpshrdvq %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0xf5,0x28,0x73,0xc2]
-; X86-NEXT:    vpaddq %ymm4, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc4]
+; X86-NEXT:    vpshrdvq %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0xf5,0x28,0x73,0xe2]
+; X86-NEXT:    vpshrdvq %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x73,0xc2]
+; X86-NEXT:    vpaddq %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xd4,0xc0]
 ; X86-NEXT:    vpaddq %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xd4,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -798,29 +787,32 @@
 ; X64-NEXT:    vpaddq %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xd4,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <4 x i64>, <4 x i64>* %x2p
-  %res = call <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
-  %res1 = call <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x4, i8 -1)
-  %res2 = call <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x4, i8  %x3)
-  %res3 = add <4 x i64> %res, %res1
-  %res4 = add <4 x i64> %res2, %res3
+  %1 = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %x1, <4 x i64> %x0, <4 x i64> %x2)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract, <4 x i64> %1, <4 x i64> %x0
+  %4 = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %x1, <4 x i64> %x0, <4 x i64> %x4)
+  %5 = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %x1, <4 x i64> %x0, <4 x i64> %x4)
+  %6 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %6, <8 x i1> %6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %7 = select <4 x i1> %extract1, <4 x i64> %5, <4 x i64> zeroinitializer
+  %res3 = add <4 x i64> %3, %4
+  %res4 = add <4 x i64> %7, %res3
   ret <4 x i64> %res4
 }
 
-declare <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
-declare <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
-
-define <2 x i64>@test_int_x86_avx512_mask_vpshrdv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64>* %x2p, <2 x i64> %x4, i8 %x3) {
+define <2 x i64> @test_int_x86_avx512_mask_vpshrdv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64>* %x2p, <2 x i64> %x4, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_q_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
 ; X86-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
 ; X86-NEXT:    vpshrdvq (%eax), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x09,0x73,0x18]
 ; X86-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
-; X86-NEXT:    vpshrdvq %xmm2, %xmm1, %xmm4 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x73,0xe2]
-; X86-NEXT:    vpshrdvq %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0xf5,0x08,0x73,0xc2]
-; X86-NEXT:    vpaddq %xmm4, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc4]
+; X86-NEXT:    vpshrdvq %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0xf5,0x08,0x73,0xe2]
+; X86-NEXT:    vpshrdvq %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x73,0xc2]
+; X86-NEXT:    vpaddq %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xd4,0xc0]
 ; X86-NEXT:    vpaddq %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xd4,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -836,18 +828,21 @@
 ; X64-NEXT:    vpaddq %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xd4,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <2 x i64>, <2 x i64>* %x2p
-  %res = call <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
-  %res1 = call <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x4, i8 -1)
-  %res2 = call <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x4, i8  %x3)
-  %res3 = add <2 x i64> %res, %res1
-  %res4 = add <2 x i64> %res2, %res3
+  %1 = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %x1, <2 x i64> %x0, <2 x i64> %x2)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+  %3 = select <2 x i1> %extract, <2 x i64> %1, <2 x i64> %x0
+  %4 = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %x1, <2 x i64> %x0, <2 x i64> %x4)
+  %5 = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %x1, <2 x i64> %x0, <2 x i64> %x4)
+  %6 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %6, <8 x i1> %6, <2 x i32> <i32 0, i32 1>
+  %7 = select <2 x i1> %extract1, <2 x i64> %5, <2 x i64> zeroinitializer
+  %res3 = add <2 x i64> %3, %4
+  %res4 = add <2 x i64> %7, %res3
   ret <2 x i64> %res4
 }
 
-declare <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-declare <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-
-define <16 x i16>@test_int_x86_avx512_mask_vpshrdv_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16>* %x2p, <16 x i16> %x4, i16 %x3) {
+define <16 x i16> @test_int_x86_avx512_mask_vpshrdv_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16>* %x2p, <16 x i16> %x4, i16 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_w_256:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -855,9 +850,9 @@
 ; X86-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
 ; X86-NEXT:    vpshrdvw (%eax), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x29,0x72,0x18]
 ; X86-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
-; X86-NEXT:    vpshrdvw %ymm2, %ymm1, %ymm4 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x72,0xe2]
-; X86-NEXT:    vpshrdvw %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0xf5,0x28,0x72,0xc2]
-; X86-NEXT:    vpaddw %ymm4, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc4]
+; X86-NEXT:    vpshrdvw %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0xf5,0x28,0x72,0xe2]
+; X86-NEXT:    vpshrdvw %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x72,0xc2]
+; X86-NEXT:    vpaddw %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xfd,0xc0]
 ; X86-NEXT:    vpaddw %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfd,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -873,29 +868,30 @@
 ; X64-NEXT:    vpaddw %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <16 x i16>, <16 x i16>* %x2p
-  %res = call <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
-  %res1 = call <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x4, i16 -1)
-  %res2 = call <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x4, i16  %x3)
-  %res3 = add <16 x i16> %res, %res1
-  %res4 = add <16 x i16> %res2, %res3
+  %1 = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %x1, <16 x i16> %x0, <16 x i16> %x2)
+  %2 = bitcast i16 %x3 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %x0
+  %4 = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %x1, <16 x i16> %x0, <16 x i16> %x4)
+  %5 = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %x1, <16 x i16> %x0, <16 x i16> %x4)
+  %6 = bitcast i16 %x3 to <16 x i1>
+  %7 = select <16 x i1> %6, <16 x i16> %5, <16 x i16> zeroinitializer
+  %res3 = add <16 x i16> %3, %4
+  %res4 = add <16 x i16> %7, %res3
   ret <16 x i16> %res4
 }
 
-declare <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
-declare <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
-
-define <8 x i16>@test_int_x86_avx512_mask_vpshrdv_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16>* %x2p, <8 x i16> %x4, i8 %x3) {
+define <8 x i16> @test_int_x86_avx512_mask_vpshrdv_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16>* %x2p, <8 x i16> %x4, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshrdv_w_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
 ; X86-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
 ; X86-NEXT:    vpshrdvw (%eax), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x09,0x72,0x18]
 ; X86-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
-; X86-NEXT:    vpshrdvw %xmm2, %xmm1, %xmm4 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x72,0xe2]
-; X86-NEXT:    vpshrdvw %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0xf5,0x08,0x72,0xc2]
-; X86-NEXT:    vpaddw %xmm4, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc4]
+; X86-NEXT:    vpshrdvw %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0xf5,0x08,0x72,0xe2]
+; X86-NEXT:    vpshrdvw %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x72,0xc2]
+; X86-NEXT:    vpaddw %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfd,0xc0]
 ; X86-NEXT:    vpaddw %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfd,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -911,29 +907,30 @@
 ; X64-NEXT:    vpaddw %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <8 x i16>, <8 x i16>* %x2p
-  %res = call <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
-  %res1 = call <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x4, i8 -1)
-  %res2 = call <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x4, i8  %x3)
-  %res3 = add <8 x i16> %res, %res1
-  %res4 = add <8 x i16> %res2, %res3
+  %1 = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x1, <8 x i16> %x0, <8 x i16> %x2)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %x0
+  %4 = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x1, <8 x i16> %x0, <8 x i16> %x4)
+  %5 = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x1, <8 x i16> %x0, <8 x i16> %x4)
+  %6 = bitcast i8 %x3 to <8 x i1>
+  %7 = select <8 x i1> %6, <8 x i16> %5, <8 x i16> zeroinitializer
+  %res3 = add <8 x i16> %3, %4
+  %res4 = add <8 x i16> %7, %res3
   ret <8 x i16> %res4
 }
 
-declare <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
-declare <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
-
-define <8 x i32>@test_int_x86_avx512_mask_vpshldv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4, i8 %x3) {
+define <8 x i32> @test_int_x86_avx512_mask_vpshldv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshldv_d_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
 ; X86-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
 ; X86-NEXT:    vpshldvd (%eax), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0x75,0x29,0x71,0x18]
 ; X86-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
-; X86-NEXT:    vpshldvd %ymm2, %ymm1, %ymm4 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0x71,0xe2]
-; X86-NEXT:    vpshldvd %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x75,0x28,0x71,0xc2]
-; X86-NEXT:    vpaddd %ymm4, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc4]
+; X86-NEXT:    vpshldvd %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0x75,0x28,0x71,0xe2]
+; X86-NEXT:    vpshldvd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0x71,0xc2]
+; X86-NEXT:    vpaddd %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xfe,0xc0]
 ; X86-NEXT:    vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -949,29 +946,30 @@
 ; X64-NEXT:    vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <8 x i32>, <8 x i32>* %x2p
-  %res = call <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
-  %res1 = call <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4, i8 -1)
-  %res2 = call <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4, i8  %x3)
-  %res3 = add <8 x i32> %res, %res1
-  %res4 = add <8 x i32> %res2, %res3
+  %1 = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x0
+  %4 = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
+  %5 = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
+  %6 = bitcast i8 %x3 to <8 x i1>
+  %7 = select <8 x i1> %6, <8 x i32> %5, <8 x i32> zeroinitializer
+  %res3 = add <8 x i32> %3, %4
+  %res4 = add <8 x i32> %7, %res3
   ret <8 x i32> %res4
 }
 
-declare <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
-declare <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
-
-define <4 x i32>@test_int_x86_avx512_mask_vpshldv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4, i8 %x3) {
+define <4 x i32> @test_int_x86_avx512_mask_vpshldv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshldv_d_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
 ; X86-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
 ; X86-NEXT:    vpshldvd (%eax), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0x75,0x09,0x71,0x18]
 ; X86-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
-; X86-NEXT:    vpshldvd %xmm2, %xmm1, %xmm4 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0x71,0xe2]
-; X86-NEXT:    vpshldvd %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x75,0x08,0x71,0xc2]
-; X86-NEXT:    vpaddd %xmm4, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc4]
+; X86-NEXT:    vpshldvd %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0x75,0x08,0x71,0xe2]
+; X86-NEXT:    vpshldvd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0x71,0xc2]
+; X86-NEXT:    vpaddd %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfe,0xc0]
 ; X86-NEXT:    vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -987,29 +985,32 @@
 ; X64-NEXT:    vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <4 x i32>, <4 x i32>* %x2p
-  %res = call <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
-  %res1 = call <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4, i8 -1)
-  %res2 = call <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4, i8  %x3)
-  %res3 = add <4 x i32> %res, %res1
-  %res4 = add <4 x i32> %res2, %res3
+  %1 = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract, <4 x i32> %1, <4 x i32> %x0
+  %4 = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
+  %5 = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
+  %6 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %6, <8 x i1> %6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %7 = select <4 x i1> %extract1, <4 x i32> %5, <4 x i32> zeroinitializer
+  %res3 = add <4 x i32> %3, %4
+  %res4 = add <4 x i32> %7, %res3
   ret <4 x i32> %res4
 }
 
-declare <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
-declare <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
-
-define <4 x i64>@test_int_x86_avx512_mask_vpshldv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64>* %x2p, <4 x i64> %x4, i8 %x3) {
+define <4 x i64> @test_int_x86_avx512_mask_vpshldv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64>* %x2p, <4 x i64> %x4, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshldv_q_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
 ; X86-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
 ; X86-NEXT:    vpshldvq (%eax), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x29,0x71,0x18]
 ; X86-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
-; X86-NEXT:    vpshldvq %ymm2, %ymm1, %ymm4 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x71,0xe2]
-; X86-NEXT:    vpshldvq %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0xf5,0x28,0x71,0xc2]
-; X86-NEXT:    vpaddq %ymm4, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc4]
+; X86-NEXT:    vpshldvq %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0xf5,0x28,0x71,0xe2]
+; X86-NEXT:    vpshldvq %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x71,0xc2]
+; X86-NEXT:    vpaddq %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xd4,0xc0]
 ; X86-NEXT:    vpaddq %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xd4,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -1025,29 +1026,32 @@
 ; X64-NEXT:    vpaddq %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xd4,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <4 x i64>, <4 x i64>* %x2p
-  %res = call <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
-  %res1 = call <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x4, i8 -1)
-  %res2 = call <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x4, i8  %x3)
-  %res3 = add <4 x i64> %res, %res1
-  %res4 = add <4 x i64> %res2, %res3
+  %1 = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract, <4 x i64> %1, <4 x i64> %x0
+  %4 = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x4)
+  %5 = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x4)
+  %6 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %6, <8 x i1> %6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %7 = select <4 x i1> %extract1, <4 x i64> %5, <4 x i64> zeroinitializer
+  %res3 = add <4 x i64> %3, %4
+  %res4 = add <4 x i64> %7, %res3
   ret <4 x i64> %res4
 }
 
-declare <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
-declare <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
-
-define <2 x i64>@test_int_x86_avx512_mask_vpshldv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64>* %x2p, <2 x i64> %x4, i8 %x3) {
+define <2 x i64> @test_int_x86_avx512_mask_vpshldv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64>* %x2p, <2 x i64> %x4, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshldv_q_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
 ; X86-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
 ; X86-NEXT:    vpshldvq (%eax), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x09,0x71,0x18]
 ; X86-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
-; X86-NEXT:    vpshldvq %xmm2, %xmm1, %xmm4 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x71,0xe2]
-; X86-NEXT:    vpshldvq %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0xf5,0x08,0x71,0xc2]
-; X86-NEXT:    vpaddq %xmm4, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc4]
+; X86-NEXT:    vpshldvq %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0xf5,0x08,0x71,0xe2]
+; X86-NEXT:    vpshldvq %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x71,0xc2]
+; X86-NEXT:    vpaddq %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xd4,0xc0]
 ; X86-NEXT:    vpaddq %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xd4,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -1063,18 +1067,21 @@
 ; X64-NEXT:    vpaddq %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xd4,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <2 x i64>, <2 x i64>* %x2p
-  %res = call <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
-  %res1 = call <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x4, i8 -1)
-  %res2 = call <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x4, i8  %x3)
-  %res3 = add <2 x i64> %res, %res1
-  %res4 = add <2 x i64> %res2, %res3
+  %1 = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+  %3 = select <2 x i1> %extract, <2 x i64> %1, <2 x i64> %x0
+  %4 = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x4)
+  %5 = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x4)
+  %6 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %6, <8 x i1> %6, <2 x i32> <i32 0, i32 1>
+  %7 = select <2 x i1> %extract1, <2 x i64> %5, <2 x i64> zeroinitializer
+  %res3 = add <2 x i64> %3, %4
+  %res4 = add <2 x i64> %7, %res3
   ret <2 x i64> %res4
 }
 
-declare <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-declare <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-
-define <16 x i16>@test_int_x86_avx512_mask_vpshldv_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16>* %x2p, <16 x i16> %x4, i16 %x3) {
+define <16 x i16> @test_int_x86_avx512_mask_vpshldv_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16>* %x2p, <16 x i16> %x4, i16 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshldv_w_256:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -1082,9 +1089,9 @@
 ; X86-NEXT:    vmovdqa %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
 ; X86-NEXT:    vpshldvw (%eax), %ymm1, %ymm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x29,0x70,0x18]
 ; X86-NEXT:    vmovdqa %ymm0, %ymm4 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xe0]
-; X86-NEXT:    vpshldvw %ymm2, %ymm1, %ymm4 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x70,0xe2]
-; X86-NEXT:    vpshldvw %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0xf5,0x28,0x70,0xc2]
-; X86-NEXT:    vpaddw %ymm4, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc4]
+; X86-NEXT:    vpshldvw %ymm2, %ymm1, %ymm4 # encoding: [0x62,0xf2,0xf5,0x28,0x70,0xe2]
+; X86-NEXT:    vpshldvw %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0x70,0xc2]
+; X86-NEXT:    vpaddw %ymm0, %ymm4, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xdd,0xfd,0xc0]
 ; X86-NEXT:    vpaddw %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfd,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -1100,29 +1107,30 @@
 ; X64-NEXT:    vpaddw %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <16 x i16>, <16 x i16>* %x2p
-  %res = call <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
-  %res1 = call <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x4, i16 -1)
-  %res2 = call <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x4, i16  %x3)
-  %res3 = add <16 x i16> %res, %res1
-  %res4 = add <16 x i16> %res2, %res3
+  %1 = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2)
+  %2 = bitcast i16 %x3 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %x0
+  %4 = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x4)
+  %5 = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x4)
+  %6 = bitcast i16 %x3 to <16 x i1>
+  %7 = select <16 x i1> %6, <16 x i16> %5, <16 x i16> zeroinitializer
+  %res3 = add <16 x i16> %3, %4
+  %res4 = add <16 x i16> %7, %res3
   ret <16 x i16> %res4
 }
 
-declare <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
-declare <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
-
-define <8 x i16>@test_int_x86_avx512_mask_vpshldv_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16>* %x2p, <8 x i16> %x4, i8 %x3) {
+define <8 x i16> @test_int_x86_avx512_mask_vpshldv_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16>* %x2p, <8 x i16> %x4, i8 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_vpshldv_w_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
+; X86-NEXT:    kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
 ; X86-NEXT:    vmovdqa %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
 ; X86-NEXT:    vpshldvw (%eax), %xmm1, %xmm3 {%k1} # encoding: [0x62,0xf2,0xf5,0x09,0x70,0x18]
 ; X86-NEXT:    vmovdqa %xmm0, %xmm4 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xe0]
-; X86-NEXT:    vpshldvw %xmm2, %xmm1, %xmm4 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x70,0xe2]
-; X86-NEXT:    vpshldvw %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0xf5,0x08,0x70,0xc2]
-; X86-NEXT:    vpaddw %xmm4, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc4]
+; X86-NEXT:    vpshldvw %xmm2, %xmm1, %xmm4 # encoding: [0x62,0xf2,0xf5,0x08,0x70,0xe2]
+; X86-NEXT:    vpshldvw %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0x70,0xc2]
+; X86-NEXT:    vpaddw %xmm0, %xmm4, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfd,0xc0]
 ; X86-NEXT:    vpaddw %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfd,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
@@ -1138,11 +1146,27 @@
 ; X64-NEXT:    vpaddw %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %x2 = load <8 x i16>, <8 x i16>* %x2p
-  %res = call <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
-  %res1 = call <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x4, i8 -1)
-  %res2 = call <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x4, i8  %x3)
-  %res3 = add <8 x i16> %res, %res1
-  %res4 = add <8 x i16> %res2, %res3
+  %1 = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %x0
+  %4 = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x4)
+  %5 = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x4)
+  %6 = bitcast i8 %x3 to <8 x i1>
+  %7 = select <8 x i1> %6, <8 x i16> %5, <8 x i16> zeroinitializer
+  %res3 = add <8 x i16> %3, %4
+  %res4 = add <8 x i16> %7, %res3
   ret <8 x i16> %res4
 }
 
+declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>)
+declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.fshl.v16i16(<16 x i16>, <16 x i16>, <16 x i16>)
+declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.fshr.v8i32(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <2 x i64> @llvm.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.fshr.v4i64(<4 x i64>, <4 x i64>, <4 x i64>)
+declare <8 x i16> @llvm.fshr.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.fshr.v16i16(<16 x i16>, <16 x i16>, <16 x i16>)
diff --git a/test/CodeGen/X86/avx512vbmivl-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512vbmivl-intrinsics-fast-isel.ll
index 70848be..896a30a 100644
--- a/test/CodeGen/X86/avx512vbmivl-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx512vbmivl-intrinsics-fast-isel.ll
@@ -174,5 +174,123 @@
   ret <4 x i64> %6
 }
 
+define <2 x i64> @test_mm_mask_multishift_epi64_epi8(<2 x i64> %__W, i16 zeroext %__M, <2 x i64> %__X, <2 x i64> %__Y) {
+; X86-LABEL: test_mm_mask_multishift_epi64_epi8:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpmultishiftqb %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT:    retl
+;
+; X64-LABEL: test_mm_mask_multishift_epi64_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpmultishiftqb %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__X to <16 x i8>
+  %1 = bitcast <2 x i64> %__Y to <16 x i8>
+  %2 = tail call <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8> %0, <16 x i8> %1)
+  %3 = bitcast <2 x i64> %__W to <16 x i8>
+  %4 = bitcast i16 %__M to <16 x i1>
+  %5 = select <16 x i1> %4, <16 x i8> %2, <16 x i8> %3
+  %6 = bitcast <16 x i8> %5 to <2 x i64>
+  ret <2 x i64> %6
+}
+
+define <2 x i64> @test_mm_maskz_multishift_epi64_epi8(i16 zeroext %__M, <2 x i64> %__X, <2 x i64> %__Y) {
+; X86-LABEL: test_mm_maskz_multishift_epi64_epi8:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    retl
+;
+; X64-LABEL: test_mm_maskz_multishift_epi64_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <2 x i64> %__X to <16 x i8>
+  %1 = bitcast <2 x i64> %__Y to <16 x i8>
+  %2 = tail call <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8> %0, <16 x i8> %1)
+  %3 = bitcast i16 %__M to <16 x i1>
+  %4 = select <16 x i1> %3, <16 x i8> %2, <16 x i8> zeroinitializer
+  %5 = bitcast <16 x i8> %4 to <2 x i64>
+  ret <2 x i64> %5
+}
+
+define <2 x i64> @test_mm_multishift_epi64_epi8(<2 x i64> %__X, <2 x i64> %__Y) {
+; CHECK-LABEL: test_mm_multishift_epi64_epi8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
+entry:
+  %0 = bitcast <2 x i64> %__X to <16 x i8>
+  %1 = bitcast <2 x i64> %__Y to <16 x i8>
+  %2 = tail call <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8> %0, <16 x i8> %1)
+  %3 = bitcast <16 x i8> %2 to <2 x i64>
+  ret <2 x i64> %3
+}
+
+define <4 x i64> @test_mm256_mask_multishift_epi64_epi8(<4 x i64> %__W, i32 %__M, <4 x i64> %__X, <4 x i64> %__Y) {
+; X86-LABEL: test_mm256_mask_multishift_epi64_epi8:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpmultishiftqb %ymm2, %ymm1, %ymm0 {%k1}
+; X86-NEXT:    retl
+;
+; X64-LABEL: test_mm256_mask_multishift_epi64_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpmultishiftqb %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__X to <32 x i8>
+  %1 = bitcast <4 x i64> %__Y to <32 x i8>
+  %2 = tail call <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8> %0, <32 x i8> %1)
+  %3 = bitcast <4 x i64> %__W to <32 x i8>
+  %4 = bitcast i32 %__M to <32 x i1>
+  %5 = select <32 x i1> %4, <32 x i8> %2, <32 x i8> %3
+  %6 = bitcast <32 x i8> %5 to <4 x i64>
+  ret <4 x i64> %6
+}
+
+define <4 x i64> @test_mm256_maskz_multishift_epi64_epi8(i32 %__M, <4 x i64> %__X, <4 x i64> %__Y) {
+; X86-LABEL: test_mm256_maskz_multishift_epi64_epi8:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X86-NEXT:    retl
+;
+; X64-LABEL: test_mm256_maskz_multishift_epi64_epi8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    kmovd %edi, %k1
+; X64-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT:    retq
+entry:
+  %0 = bitcast <4 x i64> %__X to <32 x i8>
+  %1 = bitcast <4 x i64> %__Y to <32 x i8>
+  %2 = tail call <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8> %0, <32 x i8> %1) #3
+  %3 = bitcast i32 %__M to <32 x i1>
+  %4 = select <32 x i1> %3, <32 x i8> %2, <32 x i8> zeroinitializer
+  %5 = bitcast <32 x i8> %4 to <4 x i64>
+  ret <4 x i64> %5
+}
+
+define <4 x i64> @test_mm256_multishift_epi64_epi8(<4 x i64> %__X, <4 x i64> %__Y) {
+; CHECK-LABEL: test_mm256_multishift_epi64_epi8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
+entry:
+  %0 = bitcast <4 x i64> %__X to <32 x i8>
+  %1 = bitcast <4 x i64> %__Y to <32 x i8>
+  %2 = tail call <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8> %0, <32 x i8> %1)
+  %3 = bitcast <32 x i8> %2 to <4 x i64>
+  ret <4 x i64> %3
+}
+
 declare <16 x i8> @llvm.x86.avx512.vpermi2var.qi.128(<16 x i8>, <16 x i8>, <16 x i8>)
 declare <32 x i8> @llvm.x86.avx512.vpermi2var.qi.256(<32 x i8>, <32 x i8>, <32 x i8>)
+declare <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8>, <16 x i8>)
+declare <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8>, <32 x i8>)
diff --git a/test/CodeGen/X86/avx512vbmivl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512vbmivl-intrinsics-upgrade.ll
index 7572e40..4b1d51d 100644
--- a/test/CodeGen/X86/avx512vbmivl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512vbmivl-intrinsics-upgrade.ll
@@ -62,6 +62,66 @@
   ret <32 x i8> %res4
 }
 
+declare <16 x i8> @llvm.x86.avx512.mask.pmultishift.qb.128(<16 x i8>, <16 x i8>, <16 x i8>, i16)
+
+define <16 x i8>@test_int_x86_avx512_mask_pmultishift_qb_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_pmultishift_qb_128:
+; X86:       # %bb.0:
+; X86-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x83,0xd9]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x83,0xd1]
+; X86-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x83,0xc1]
+; X86-NEXT:    vpaddb %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc3]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_pmultishift_qb_128:
+; X64:       # %bb.0:
+; X64-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x83,0xd9]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x83,0xd1]
+; X64-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x83,0xc1]
+; X64-NEXT:    vpaddb %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc3]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <16 x i8> @llvm.x86.avx512.mask.pmultishift.qb.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3)
+  %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmultishift.qb.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> zeroinitializer, i16 %x3)
+  %res2 = call <16 x i8> @llvm.x86.avx512.mask.pmultishift.qb.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 -1)
+  %res3 = add <16 x i8> %res, %res1
+  %res4 = add <16 x i8> %res3, %res2
+  ret <16 x i8> %res4
+}
+
+declare <32 x i8> @llvm.x86.avx512.mask.pmultishift.qb.256(<32 x i8>, <32 x i8>, <32 x i8>, i32)
+
+define <32 x i8>@test_int_x86_avx512_mask_pmultishift_qb_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_pmultishift_qb_256:
+; X86:       # %bb.0:
+; X86-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x83,0xd9]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x83,0xd1]
+; X86-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x83,0xc1]
+; X86-NEXT:    vpaddb %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfc,0xc3]
+; X86-NEXT:    vpaddb %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfc,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_pmultishift_qb_256:
+; X64:       # %bb.0:
+; X64-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x83,0xd9]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x83,0xd1]
+; X64-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x83,0xc1]
+; X64-NEXT:    vpaddb %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfc,0xc3]
+; X64-NEXT:    vpaddb %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfc,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %res = call <32 x i8> @llvm.x86.avx512.mask.pmultishift.qb.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3)
+  %res1 = call <32 x i8> @llvm.x86.avx512.mask.pmultishift.qb.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> zeroinitializer, i32 %x3)
+  %res2 = call <32 x i8> @llvm.x86.avx512.mask.pmultishift.qb.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 -1)
+  %res3 = add <32 x i8> %res, %res1
+  %res4 = add <32 x i8> %res3, %res2
+  ret <32 x i8> %res4
+}
+
 declare <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8>, <16 x i8>, <16 x i8>, i16)
 
 define <16 x i8>@test_int_x86_avx512_mask_vpermi2var_qi_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
diff --git a/test/CodeGen/X86/avx512vbmivl-intrinsics.ll b/test/CodeGen/X86/avx512vbmivl-intrinsics.ll
index 79f3210..7c03d78 100644
--- a/test/CodeGen/X86/avx512vbmivl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vbmivl-intrinsics.ll
@@ -70,63 +70,71 @@
   ret <32 x i8> %res4
 }
 
-declare <16 x i8> @llvm.x86.avx512.mask.pmultishift.qb.128(<16 x i8>, <16 x i8>, <16 x i8>, i16)
+declare <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8>, <16 x i8>)
 
 define <16 x i8>@test_int_x86_avx512_mask_pmultishift_qb_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmultishift_qb_128:
 ; X86:       # %bb.0:
+; X86-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x83,0xd9]
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
 ; X86-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x83,0xd1]
-; X86-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x83,0xd9]
-; X86-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x83,0xc1]
-; X86-NEXT:    vpaddb %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfc,0xc0]
+; X86-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x83,0xc1]
+; X86-NEXT:    vpaddb %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc3]
 ; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmultishift_qb_128:
 ; X64:       # %bb.0:
-; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
 ; X64-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x83,0xd9]
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
 ; X64-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x83,0xd1]
 ; X64-NEXT:    vpmultishiftqb %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x83,0xc1]
 ; X64-NEXT:    vpaddb %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc3]
 ; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <16 x i8> @llvm.x86.avx512.mask.pmultishift.qb.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3)
-  %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmultishift.qb.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> zeroinitializer, i16 %x3)
-  %res2 = call <16 x i8> @llvm.x86.avx512.mask.pmultishift.qb.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 -1)
-  %res3 = add <16 x i8> %res, %res1
-  %res4 = add <16 x i8> %res3, %res2
+  %1 = call <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8> %x0, <16 x i8> %x1)
+  %2 = bitcast i16 %x3 to <16 x i1>
+  %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %x2
+  %4 = call <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8> %x0, <16 x i8> %x1)
+  %5 = bitcast i16 %x3 to <16 x i1>
+  %6 = select <16 x i1> %5, <16 x i8> %4, <16 x i8> zeroinitializer
+  %7 = call <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8> %x0, <16 x i8> %x1)
+  %res3 = add <16 x i8> %3, %6
+  %res4 = add <16 x i8> %res3, %7
   ret <16 x i8> %res4
 }
 
-declare <32 x i8> @llvm.x86.avx512.mask.pmultishift.qb.256(<32 x i8>, <32 x i8>, <32 x i8>, i32)
+declare <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8>, <32 x i8>)
 
 define <32 x i8>@test_int_x86_avx512_mask_pmultishift_qb_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmultishift_qb_256:
 ; X86:       # %bb.0:
+; X86-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x83,0xd9]
 ; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
 ; X86-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x83,0xd1]
-; X86-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x83,0xd9]
-; X86-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x83,0xc1]
-; X86-NEXT:    vpaddb %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfc,0xc0]
+; X86-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x83,0xc1]
+; X86-NEXT:    vpaddb %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfc,0xc3]
 ; X86-NEXT:    vpaddb %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfc,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmultishift_qb_256:
 ; X64:       # %bb.0:
+; X64-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x83,0xd9]
 ; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
 ; X64-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x83,0xd1]
-; X64-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x83,0xd9]
-; X64-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x83,0xc1]
-; X64-NEXT:    vpaddb %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfc,0xc0]
+; X64-NEXT:    vpmultishiftqb %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x83,0xc1]
+; X64-NEXT:    vpaddb %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfc,0xc3]
 ; X64-NEXT:    vpaddb %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfc,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  %res = call <32 x i8> @llvm.x86.avx512.mask.pmultishift.qb.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3)
-  %res1 = call <32 x i8> @llvm.x86.avx512.mask.pmultishift.qb.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> zeroinitializer, i32 %x3)
-  %res2 = call <32 x i8> @llvm.x86.avx512.mask.pmultishift.qb.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 -1)
-  %res3 = add <32 x i8> %res, %res1
-  %res4 = add <32 x i8> %res3, %res2
+  %1 = call <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8> %x0, <32 x i8> %x1)
+  %2 = bitcast i32 %x3 to <32 x i1>
+  %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %x2
+  %4 = call <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8> %x0, <32 x i8> %x1)
+  %5 = bitcast i32 %x3 to <32 x i1>
+  %6 = select <32 x i1> %5, <32 x i8> %4, <32 x i8> zeroinitializer
+  %7 = call <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8> %x0, <32 x i8> %x1)
+  %res3 = add <32 x i8> %3, %6
+  %res4 = add <32 x i8> %res3, %7
   ret <32 x i8> %res4
 }
 
diff --git a/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
index 6c1cfa8..8b513f0 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
@@ -6626,13 +6626,11 @@
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
-  %1 = tail call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %0, i32 5)
+  %1 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %0, <4 x i32> <i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast <4 x i32> %1 to <2 x i64>
   ret <2 x i64> %2
 }
 
-declare <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32>, i32)
-
 define <2 x i64> @test_mm_mask_rol_epi32(<2 x i64> %__W, i8 zeroext %__U, <2 x i64> %__A) {
 ; X86-LABEL: test_mm_mask_rol_epi32:
 ; X86:       # %bb.0: # %entry
@@ -6648,7 +6646,7 @@
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
-  %1 = tail call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %0, i32 5)
+  %1 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %0, <4 x i32> <i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast <2 x i64> %__W to <4 x i32>
   %3 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -6672,7 +6670,7 @@
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
-  %1 = tail call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %0, i32 5)
+  %1 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %0, <4 x i32> <i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %3 = select <4 x i1> %extract, <4 x i32> %1, <4 x i32> zeroinitializer
@@ -6687,13 +6685,11 @@
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
-  %1 = tail call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %0, i32 5)
+  %1 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %0, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast <8 x i32> %1 to <4 x i64>
   ret <4 x i64> %2
 }
 
-declare <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32>, i32)
-
 define <4 x i64> @test_mm256_mask_rol_epi32(<4 x i64> %__W, i8 zeroext %__U, <4 x i64> %__A) {
 ; X86-LABEL: test_mm256_mask_rol_epi32:
 ; X86:       # %bb.0: # %entry
@@ -6709,7 +6705,7 @@
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
-  %1 = tail call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %0, i32 5)
+  %1 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %0, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast <4 x i64> %__W to <8 x i32>
   %3 = bitcast i8 %__U to <8 x i1>
   %4 = select <8 x i1> %3, <8 x i32> %1, <8 x i32> %2
@@ -6732,7 +6728,7 @@
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
-  %1 = tail call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %0, i32 5)
+  %1 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %0, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast i8 %__U to <8 x i1>
   %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> zeroinitializer
   %4 = bitcast <8 x i32> %3 to <4 x i64>
@@ -6745,12 +6741,10 @@
 ; CHECK-NEXT:    vprolq $5, %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %__A, i32 5)
+  %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__A, <2 x i64> %__A, <2 x i64> <i64 5, i64 5>)
   ret <2 x i64> %0
 }
 
-declare <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64>, i32)
-
 define <2 x i64> @test_mm_mask_rol_epi64(<2 x i64> %__W, i8 zeroext %__U, <2 x i64> %__A) {
 ; X86-LABEL: test_mm_mask_rol_epi64:
 ; X86:       # %bb.0: # %entry
@@ -6765,7 +6759,7 @@
 ; X64-NEXT:    vprolq $5, %xmm1, %xmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %__A, i32 5)
+  %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__A, <2 x i64> %__A, <2 x i64> <i64 5, i64 5>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
   %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> %__W
@@ -6786,7 +6780,7 @@
 ; X64-NEXT:    vprolq $5, %xmm0, %xmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %__A, i32 5)
+  %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__A, <2 x i64> %__A, <2 x i64> <i64 5, i64 5>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
   %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> zeroinitializer
@@ -6799,12 +6793,10 @@
 ; CHECK-NEXT:    vprolq $5, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %__A, i32 5)
+  %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__A, <4 x i64> %__A, <4 x i64> <i64 5, i64 5,i64 5, i64 5>)
   ret <4 x i64> %0
 }
 
-declare <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64>, i32)
-
 define <4 x i64> @test_mm256_mask_rol_epi64(<4 x i64> %__W, i8 zeroext %__U, <4 x i64> %__A) {
 ; X86-LABEL: test_mm256_mask_rol_epi64:
 ; X86:       # %bb.0: # %entry
@@ -6819,7 +6811,7 @@
 ; X64-NEXT:    vprolq $5, %ymm1, %ymm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %__A, i32 5)
+  %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__A, <4 x i64> %__A, <4 x i64> <i64 5, i64 5,i64 5, i64 5>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> %__W
@@ -6840,7 +6832,7 @@
 ; X64-NEXT:    vprolq $5, %ymm0, %ymm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %__A, i32 5)
+  %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__A, <4 x i64> %__A, <4 x i64> <i64 5, i64 5,i64 5, i64 5>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> zeroinitializer
@@ -6855,7 +6847,7 @@
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
   %1 = bitcast <2 x i64> %__B to <4 x i32>
-  %2 = tail call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %0, <4 x i32> %1)
+  %2 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %0, <4 x i32> %1)
   %3 = bitcast <4 x i32> %2 to <2 x i64>
   ret <2 x i64> %3
 }
@@ -6876,7 +6868,7 @@
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
   %1 = bitcast <2 x i64> %__B to <4 x i32>
-  %2 = tail call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %0, <4 x i32> %1)
+  %2 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %0, <4 x i32> %1)
   %3 = bitcast <2 x i64> %__W to <4 x i32>
   %4 = bitcast i8 %__U to <8 x i1>
   %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -6901,7 +6893,7 @@
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
   %1 = bitcast <2 x i64> %__B to <4 x i32>
-  %2 = tail call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %0, <4 x i32> %1)
+  %2 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %0, <4 x i32> %1)
   %3 = bitcast i8 %__U to <8 x i1>
   %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %4 = select <4 x i1> %extract.i, <4 x i32> %2, <4 x i32> zeroinitializer
@@ -6917,7 +6909,7 @@
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
   %1 = bitcast <4 x i64> %__B to <8 x i32>
-  %2 = tail call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %0, <8 x i32> %1)
+  %2 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %0, <8 x i32> %1)
   %3 = bitcast <8 x i32> %2 to <4 x i64>
   ret <4 x i64> %3
 }
@@ -6938,7 +6930,7 @@
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
   %1 = bitcast <4 x i64> %__B to <8 x i32>
-  %2 = tail call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %0, <8 x i32> %1)
+  %2 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %0, <8 x i32> %1)
   %3 = bitcast <4 x i64> %__W to <8 x i32>
   %4 = bitcast i8 %__U to <8 x i1>
   %5 = select <8 x i1> %4, <8 x i32> %2, <8 x i32> %3
@@ -6962,7 +6954,7 @@
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
   %1 = bitcast <4 x i64> %__B to <8 x i32>
-  %2 = tail call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %0, <8 x i32> %1)
+  %2 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %0, <8 x i32> %1)
   %3 = bitcast i8 %__U to <8 x i1>
   %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
   %5 = bitcast <8 x i32> %4 to <4 x i64>
@@ -6975,7 +6967,7 @@
 ; CHECK-NEXT:    vprolvq %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %__A, <2 x i64> %__B)
+  %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__A, <2 x i64> %__A, <2 x i64> %__B)
   ret <2 x i64> %0
 }
 
@@ -6993,7 +6985,7 @@
 ; X64-NEXT:    vprolvq %xmm2, %xmm1, %xmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %__A, <2 x i64> %__B)
+  %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__A, <2 x i64> %__A, <2 x i64> %__B)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
   %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> %__W
@@ -7014,7 +7006,7 @@
 ; X64-NEXT:    vprolvq %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %__A, <2 x i64> %__B)
+  %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__A, <2 x i64> %__A, <2 x i64> %__B)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
   %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> zeroinitializer
@@ -7027,7 +7019,7 @@
 ; CHECK-NEXT:    vprolvq %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %__A, <4 x i64> %__B)
+  %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__A, <4 x i64> %__A, <4 x i64> %__B)
   ret <4 x i64> %0
 }
 
@@ -7045,7 +7037,7 @@
 ; X64-NEXT:    vprolvq %ymm2, %ymm1, %ymm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %__A, <4 x i64> %__B)
+  %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__A, <4 x i64> %__A, <4 x i64> %__B)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> %__W
@@ -7066,7 +7058,7 @@
 ; X64-NEXT:    vprolvq %ymm1, %ymm0, %ymm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %__A, <4 x i64> %__B)
+  %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__A, <4 x i64> %__A, <4 x i64> %__B)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> zeroinitializer
@@ -7080,13 +7072,11 @@
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
-  %1 = tail call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %0, i32 5)
+  %1 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %0, <4 x i32> %0, <4 x i32> <i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast <4 x i32> %1 to <2 x i64>
   ret <2 x i64> %2
 }
 
-declare <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32>, i32)
-
 define <2 x i64> @test_mm_mask_ror_epi32(<2 x i64> %__W, i8 zeroext %__U, <2 x i64> %__A) {
 ; X86-LABEL: test_mm_mask_ror_epi32:
 ; X86:       # %bb.0: # %entry
@@ -7102,7 +7092,7 @@
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
-  %1 = tail call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %0, i32 5)
+  %1 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %0, <4 x i32> %0, <4 x i32> <i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast <2 x i64> %__W to <4 x i32>
   %3 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -7126,7 +7116,7 @@
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
-  %1 = tail call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %0, i32 5)
+  %1 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %0, <4 x i32> %0, <4 x i32> <i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %3 = select <4 x i1> %extract, <4 x i32> %1, <4 x i32> zeroinitializer
@@ -7141,13 +7131,11 @@
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
-  %1 = tail call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %0, i32 5)
+  %1 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %0, <8 x i32> %0, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast <8 x i32> %1 to <4 x i64>
   ret <4 x i64> %2
 }
 
-declare <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32>, i32)
-
 define <4 x i64> @test_mm256_mask_ror_epi32(<4 x i64> %__W, i8 zeroext %__U, <4 x i64> %__A) {
 ; X86-LABEL: test_mm256_mask_ror_epi32:
 ; X86:       # %bb.0: # %entry
@@ -7163,7 +7151,7 @@
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
-  %1 = tail call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %0, i32 5)
+  %1 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %0, <8 x i32> %0, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast <4 x i64> %__W to <8 x i32>
   %3 = bitcast i8 %__U to <8 x i1>
   %4 = select <8 x i1> %3, <8 x i32> %1, <8 x i32> %2
@@ -7186,7 +7174,7 @@
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
-  %1 = tail call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %0, i32 5)
+  %1 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %0, <8 x i32> %0, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>)
   %2 = bitcast i8 %__U to <8 x i1>
   %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> zeroinitializer
   %4 = bitcast <8 x i32> %3 to <4 x i64>
@@ -7199,12 +7187,10 @@
 ; CHECK-NEXT:    vprorq $5, %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %__A, i32 5)
+  %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__A, <2 x i64> %__A, <2 x i64> <i64 5, i64 5>)
   ret <2 x i64> %0
 }
 
-declare <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64>, i32)
-
 define <2 x i64> @test_mm_mask_ror_epi64(<2 x i64> %__W, i8 zeroext %__U, <2 x i64> %__A) {
 ; X86-LABEL: test_mm_mask_ror_epi64:
 ; X86:       # %bb.0: # %entry
@@ -7219,7 +7205,7 @@
 ; X64-NEXT:    vprorq $5, %xmm1, %xmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %__A, i32 5)
+  %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__A, <2 x i64> %__A, <2 x i64> <i64 5, i64 5>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
   %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> %__W
@@ -7240,7 +7226,7 @@
 ; X64-NEXT:    vprorq $5, %xmm0, %xmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %__A, i32 5)
+  %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__A, <2 x i64> %__A, <2 x i64> <i64 5, i64 5>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
   %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> zeroinitializer
@@ -7253,12 +7239,10 @@
 ; CHECK-NEXT:    vprorq $5, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %__A, i32 5)
+  %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__A, <4 x i64> %__A, <4 x i64> <i64 5, i64 5, i64 5, i64 5>)
   ret <4 x i64> %0
 }
 
-declare <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64>, i32)
-
 define <4 x i64> @test_mm256_mask_ror_epi64(<4 x i64> %__W, i8 zeroext %__U, <4 x i64> %__A) {
 ; X86-LABEL: test_mm256_mask_ror_epi64:
 ; X86:       # %bb.0: # %entry
@@ -7273,7 +7257,7 @@
 ; X64-NEXT:    vprorq $5, %ymm1, %ymm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %__A, i32 5)
+  %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__A, <4 x i64> %__A, <4 x i64> <i64 5, i64 5, i64 5, i64 5>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> %__W
@@ -7294,7 +7278,7 @@
 ; X64-NEXT:    vprorq $5, %ymm0, %ymm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %__A, i32 5)
+  %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__A, <4 x i64> %__A, <4 x i64> <i64 5, i64 5, i64 5, i64 5>)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> zeroinitializer
@@ -7309,7 +7293,7 @@
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
   %1 = bitcast <2 x i64> %__B to <4 x i32>
-  %2 = tail call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %0, <4 x i32> %1)
+  %2 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %0, <4 x i32> %0, <4 x i32> %1)
   %3 = bitcast <4 x i32> %2 to <2 x i64>
   ret <2 x i64> %3
 }
@@ -7330,7 +7314,7 @@
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
   %1 = bitcast <2 x i64> %__B to <4 x i32>
-  %2 = tail call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %0, <4 x i32> %1)
+  %2 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %0, <4 x i32> %0, <4 x i32> %1)
   %3 = bitcast <2 x i64> %__W to <4 x i32>
   %4 = bitcast i8 %__U to <8 x i1>
   %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -7355,7 +7339,7 @@
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
   %1 = bitcast <2 x i64> %__B to <4 x i32>
-  %2 = tail call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %0, <4 x i32> %1)
+  %2 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %0, <4 x i32> %0, <4 x i32> %1)
   %3 = bitcast i8 %__U to <8 x i1>
   %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %4 = select <4 x i1> %extract.i, <4 x i32> %2, <4 x i32> zeroinitializer
@@ -7371,7 +7355,7 @@
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
   %1 = bitcast <4 x i64> %__B to <8 x i32>
-  %2 = tail call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %0, <8 x i32> %1)
+  %2 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %0, <8 x i32> %0, <8 x i32> %1)
   %3 = bitcast <8 x i32> %2 to <4 x i64>
   ret <4 x i64> %3
 }
@@ -7392,7 +7376,7 @@
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
   %1 = bitcast <4 x i64> %__B to <8 x i32>
-  %2 = tail call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %0, <8 x i32> %1)
+  %2 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %0, <8 x i32> %0, <8 x i32> %1)
   %3 = bitcast <4 x i64> %__W to <8 x i32>
   %4 = bitcast i8 %__U to <8 x i1>
   %5 = select <8 x i1> %4, <8 x i32> %2, <8 x i32> %3
@@ -7416,7 +7400,7 @@
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
   %1 = bitcast <4 x i64> %__B to <8 x i32>
-  %2 = tail call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %0, <8 x i32> %1)
+  %2 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %0, <8 x i32> %0, <8 x i32> %1)
   %3 = bitcast i8 %__U to <8 x i1>
   %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
   %5 = bitcast <8 x i32> %4 to <4 x i64>
@@ -7429,7 +7413,7 @@
 ; CHECK-NEXT:    vprorvq %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %__A, <2 x i64> %__B)
+  %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__A, <2 x i64> %__A, <2 x i64> %__B)
   ret <2 x i64> %0
 }
 
@@ -7447,7 +7431,7 @@
 ; X64-NEXT:    vprorvq %xmm2, %xmm1, %xmm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %__A, <2 x i64> %__B)
+  %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__A, <2 x i64> %__A, <2 x i64> %__B)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
   %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> %__W
@@ -7468,7 +7452,7 @@
 ; X64-NEXT:    vprorvq %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %__A, <2 x i64> %__B)
+  %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__A, <2 x i64> %__A, <2 x i64> %__B)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
   %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> zeroinitializer
@@ -7481,7 +7465,7 @@
 ; CHECK-NEXT:    vprorvq %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %__A, <4 x i64> %__B)
+  %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__A, <4 x i64> %__A, <4 x i64> %__B)
   ret <4 x i64> %0
 }
 
@@ -7499,7 +7483,7 @@
 ; X64-NEXT:    vprorvq %ymm2, %ymm1, %ymm0 {%k1}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %__A, <4 x i64> %__B)
+  %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__A, <4 x i64> %__A, <4 x i64> %__B)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> %__W
@@ -7520,7 +7504,7 @@
 ; X64-NEXT:    vprorvq %ymm1, %ymm0, %ymm0 {%k1} {z}
 ; X64-NEXT:    retq
 entry:
-  %0 = tail call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %__A, <4 x i64> %__B)
+  %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__A, <4 x i64> %__A, <4 x i64> %__B)
   %1 = bitcast i8 %__U to <8 x i1>
   %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> zeroinitializer
@@ -7572,13 +7556,13 @@
 declare void @llvm.masked.compressstore.v8f32(<8 x float>, float*, <8 x i1>)
 declare void @llvm.masked.compressstore.v4i32(<4 x i32>, i32*, <4 x i1>)
 declare void @llvm.masked.compressstore.v8i32(<8 x i32>, i32*, <8 x i1>)
-declare <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32>, <8 x i32>)
-declare <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64>, <4 x i64>)
-declare <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32>, <8 x i32>)
-declare <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64>, <4 x i64>)
+declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>)
+declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.fshr.v8i32(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <2 x i64> @llvm.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.fshr.v4i64(<4 x i64>, <4 x i64>, <4 x i64>)
 
 !0 = !{i32 1}
diff --git a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
index 6e78ab9..f8507ae 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
@@ -12592,6 +12592,590 @@
   ret <4 x i64> %res4
 }
 
+declare <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32>, <4 x i32>)
+
+define <4 x i32>@test_int_x86_avx512_prorv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prorv_d_128:
+; X86:       # %bb.0:
+; X86-NEXT:    vprorvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x14,0xd9]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprorvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x14,0xd1]
+; X86-NEXT:    vprorvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x14,0xc1]
+; X86-NEXT:    vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
+; X86-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prorv_d_128:
+; X64:       # %bb.0:
+; X64-NEXT:    vprorvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x14,0xd9]
+; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT:    vprorvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x14,0xd1]
+; X64-NEXT:    vprorvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x14,0xc1]
+; X64-NEXT:    vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
+; X64-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x2
+  %4 = call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %6 = select <4 x i1> %extract, <4 x i32> %4, <4 x i32> zeroinitializer
+  %7 = call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1)
+  %res3 = add <4 x i32> %3, %6
+  %res4 = add <4 x i32> %res3, %7
+  ret <4 x i32> %res4
+}
+
+declare <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32>, <8 x i32>)
+
+define <8 x i32>@test_int_x86_avx512_prorv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prorv_d_256:
+; X86:       # %bb.0:
+; X86-NEXT:    vprorvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x14,0xd9]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprorvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x14,0xd1]
+; X86-NEXT:    vprorvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x14,0xc1]
+; X86-NEXT:    vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
+; X86-NEXT:    vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prorv_d_256:
+; X64:       # %bb.0:
+; X64-NEXT:    vprorvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x14,0xd9]
+; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT:    vprorvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x14,0xd1]
+; X64-NEXT:    vprorvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x14,0xc1]
+; X64-NEXT:    vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
+; X64-NEXT:    vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x2
+  %4 = call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %6 = select <8 x i1> %5, <8 x i32> %4, <8 x i32> zeroinitializer
+  %7 = call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1)
+  %res3 = add <8 x i32> %3, %6
+  %res4 = add <8 x i32> %res3, %7
+  ret <8 x i32> %res4
+}
+
+declare <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64>, <2 x i64>)
+
+define <2 x i64>@test_int_x86_avx512_prorv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prorv_q_128:
+; X86:       # %bb.0:
+; X86-NEXT:    vprorvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x14,0xd9]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprorvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x14,0xd1]
+; X86-NEXT:    vprorvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x14,0xc1]
+; X86-NEXT:    vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
+; X86-NEXT:    vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prorv_q_128:
+; X64:       # %bb.0:
+; X64-NEXT:    vprorvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x14,0xd9]
+; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT:    vprorvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x14,0xd1]
+; X64-NEXT:    vprorvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x14,0xc1]
+; X64-NEXT:    vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
+; X64-NEXT:    vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+  %3 = select <2 x i1> %extract1, <2 x i64> %1, <2 x i64> %x2
+  %4 = call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <2 x i32> <i32 0, i32 1>
+  %6 = select <2 x i1> %extract, <2 x i64> %4, <2 x i64> zeroinitializer
+  %7 = call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1)
+  %res3 = add <2 x i64> %3, %6
+  %res4 = add <2 x i64> %res3, %7
+  ret <2 x i64> %res4
+}
+
+declare <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64>, <4 x i64>)
+
+define <4 x i64>@test_int_x86_avx512_prorv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prorv_q_256:
+; X86:       # %bb.0:
+; X86-NEXT:    vprorvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x14,0xd9]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprorvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x14,0xd1]
+; X86-NEXT:    vprorvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x14,0xc1]
+; X86-NEXT:    vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
+; X86-NEXT:    vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prorv_q_256:
+; X64:       # %bb.0:
+; X64-NEXT:    vprorvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x14,0xd9]
+; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT:    vprorvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x14,0xd1]
+; X64-NEXT:    vprorvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x14,0xc1]
+; X64-NEXT:    vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
+; X64-NEXT:    vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract1, <4 x i64> %1, <4 x i64> %x2
+  %4 = call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %6 = select <4 x i1> %extract, <4 x i64> %4, <4 x i64> zeroinitializer
+  %7 = call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1)
+  %res3 = add <4 x i64> %3, %6
+  %res4 = add <4 x i64> %res3, %7
+  ret <4 x i64> %res4
+}
+
+declare <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32>, i32)
+
+define <4 x i32>@test_int_x86_avx512_prol_d_128(<4 x i32> %x0, i32 %x1, <4 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prol_d_128:
+; X86:       # %bb.0:
+; X86-NEXT:    vprold $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc8,0x03]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprold $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc8,0x03]
+; X86-NEXT:    vprold $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc8,0x03]
+; X86-NEXT:    vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
+; X86-NEXT:    vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prol_d_128:
+; X64:       # %bb.0:
+; X64-NEXT:    vprold $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc8,0x03]
+; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vprold $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc8,0x03]
+; X64-NEXT:    vprold $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc8,0x03]
+; X64-NEXT:    vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
+; X64-NEXT:    vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %x0, i32 3)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x2
+  %4 = call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %x0, i32 3)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %6 = select <4 x i1> %extract, <4 x i32> %4, <4 x i32> zeroinitializer
+  %7 = call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %x0, i32 3)
+  %res3 = add <4 x i32> %3, %6
+  %res4 = add <4 x i32> %res3, %7
+  ret <4 x i32> %res4
+}
+
+declare <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32>, i32)
+
+define <8 x i32>@test_int_x86_avx512_prol_d_256(<8 x i32> %x0, i32 %x1, <8 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prol_d_256:
+; X86:       # %bb.0:
+; X86-NEXT:    vprold $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc8,0x03]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprold $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc8,0x03]
+; X86-NEXT:    vprold $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc8,0x03]
+; X86-NEXT:    vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
+; X86-NEXT:    vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prol_d_256:
+; X64:       # %bb.0:
+; X64-NEXT:    vprold $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc8,0x03]
+; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vprold $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc8,0x03]
+; X64-NEXT:    vprold $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc8,0x03]
+; X64-NEXT:    vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
+; X64-NEXT:    vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %x0, i32 3)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x2
+  %4 = call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %x0, i32 3)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %6 = select <8 x i1> %5, <8 x i32> %4, <8 x i32> zeroinitializer
+  %7 = call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %x0, i32 3)
+  %res3 = add <8 x i32> %3, %6
+  %res4 = add <8 x i32> %res3, %7
+  ret <8 x i32> %res4
+}
+
+declare <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64>, i32)
+
+define <2 x i64>@test_int_x86_avx512_prol_q_128(<2 x i64> %x0, i32 %x1, <2 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prol_q_128:
+; X86:       # %bb.0:
+; X86-NEXT:    vprolq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc8,0x03]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprolq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc8,0x03]
+; X86-NEXT:    vprolq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc8,0x03]
+; X86-NEXT:    vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
+; X86-NEXT:    vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prol_q_128:
+; X64:       # %bb.0:
+; X64-NEXT:    vprolq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc8,0x03]
+; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vprolq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc8,0x03]
+; X64-NEXT:    vprolq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc8,0x03]
+; X64-NEXT:    vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
+; X64-NEXT:    vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %x0, i32 3)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+  %3 = select <2 x i1> %extract1, <2 x i64> %1, <2 x i64> %x2
+  %4 = call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %x0, i32 3)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <2 x i32> <i32 0, i32 1>
+  %6 = select <2 x i1> %extract, <2 x i64> %4, <2 x i64> zeroinitializer
+  %7 = call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %x0, i32 3)
+  %res3 = add <2 x i64> %3, %6
+  %res4 = add <2 x i64> %res3, %7
+  ret <2 x i64> %res4
+}
+
+declare <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64>, i32)
+
+define <4 x i64>@test_int_x86_avx512_prol_q_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prol_q_256:
+; X86:       # %bb.0:
+; X86-NEXT:    vprolq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc8,0x03]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprolq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc8,0x03]
+; X86-NEXT:    vprolq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc8,0x03]
+; X86-NEXT:    vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
+; X86-NEXT:    vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prol_q_256:
+; X64:       # %bb.0:
+; X64-NEXT:    vprolq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc8,0x03]
+; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vprolq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc8,0x03]
+; X64-NEXT:    vprolq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc8,0x03]
+; X64-NEXT:    vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
+; X64-NEXT:    vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %x0, i32 3)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract1, <4 x i64> %1, <4 x i64> %x2
+  %4 = call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %x0, i32 3)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %6 = select <4 x i1> %extract, <4 x i64> %4, <4 x i64> zeroinitializer
+  %7 = call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %x0, i32 3)
+  %res3 = add <4 x i64> %3, %6
+  %res4 = add <4 x i64> %res3, %7
+  ret <4 x i64> %res4
+}
+
+declare <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32>, <4 x i32>)
+
+define <4 x i32>@test_int_x86_avx512_prolv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prolv_d_128:
+; X86:       # %bb.0:
+; X86-NEXT:    vprolvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x15,0xd9]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprolvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x15,0xd1]
+; X86-NEXT:    vprolvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x15,0xc1]
+; X86-NEXT:    vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
+; X86-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prolv_d_128:
+; X64:       # %bb.0:
+; X64-NEXT:    vprolvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x15,0xd9]
+; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT:    vprolvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x15,0xd1]
+; X64-NEXT:    vprolvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x15,0xc1]
+; X64-NEXT:    vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
+; X64-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x2
+  %4 = call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %6 = select <4 x i1> %extract, <4 x i32> %4, <4 x i32> zeroinitializer
+  %7 = call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1)
+  %res3 = add <4 x i32> %3, %6
+  %res4 = add <4 x i32> %res3, %7
+  ret <4 x i32> %res4
+}
+
+declare <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32>, <8 x i32>)
+
+define <8 x i32>@test_int_x86_avx512_prolv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prolv_d_256:
+; X86:       # %bb.0:
+; X86-NEXT:    vprolvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x15,0xd9]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprolvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x15,0xd1]
+; X86-NEXT:    vprolvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x15,0xc1]
+; X86-NEXT:    vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
+; X86-NEXT:    vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prolv_d_256:
+; X64:       # %bb.0:
+; X64-NEXT:    vprolvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x15,0xd9]
+; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT:    vprolvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x15,0xd1]
+; X64-NEXT:    vprolvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x15,0xc1]
+; X64-NEXT:    vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
+; X64-NEXT:    vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x2
+  %4 = call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %6 = select <8 x i1> %5, <8 x i32> %4, <8 x i32> zeroinitializer
+  %7 = call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1)
+  %res3 = add <8 x i32> %3, %6
+  %res4 = add <8 x i32> %res3, %7
+  ret <8 x i32> %res4
+}
+
+declare <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64>, <2 x i64>)
+
+define <2 x i64>@test_int_x86_avx512_prolv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prolv_q_128:
+; X86:       # %bb.0:
+; X86-NEXT:    vprolvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x15,0xd9]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprolvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x15,0xd1]
+; X86-NEXT:    vprolvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x15,0xc1]
+; X86-NEXT:    vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
+; X86-NEXT:    vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prolv_q_128:
+; X64:       # %bb.0:
+; X64-NEXT:    vprolvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x15,0xd9]
+; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT:    vprolvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x15,0xd1]
+; X64-NEXT:    vprolvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x15,0xc1]
+; X64-NEXT:    vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
+; X64-NEXT:    vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+  %3 = select <2 x i1> %extract1, <2 x i64> %1, <2 x i64> %x2
+  %4 = call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <2 x i32> <i32 0, i32 1>
+  %6 = select <2 x i1> %extract, <2 x i64> %4, <2 x i64> zeroinitializer
+  %7 = call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1)
+  %res3 = add <2 x i64> %3, %6
+  %res4 = add <2 x i64> %res3, %7
+  ret <2 x i64> %res4
+}
+
+declare <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64>, <4 x i64>)
+
+define <4 x i64>@test_int_x86_avx512_prolv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_prolv_q_256:
+; X86:       # %bb.0:
+; X86-NEXT:    vprolvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x15,0xd9]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprolvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x15,0xd1]
+; X86-NEXT:    vprolvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x15,0xc1]
+; X86-NEXT:    vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
+; X86-NEXT:    vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_prolv_q_256:
+; X64:       # %bb.0:
+; X64-NEXT:    vprolvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x15,0xd9]
+; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT:    vprolvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x15,0xd1]
+; X64-NEXT:    vprolvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x15,0xc1]
+; X64-NEXT:    vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
+; X64-NEXT:    vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract1, <4 x i64> %1, <4 x i64> %x2
+  %4 = call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %6 = select <4 x i1> %extract, <4 x i64> %4, <4 x i64> zeroinitializer
+  %7 = call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1)
+  %res3 = add <4 x i64> %3, %6
+  %res4 = add <4 x i64> %res3, %7
+  ret <4 x i64> %res4
+}
+
+declare <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32>, i32)
+
+define <4 x i32>@test_int_x86_avx512_pror_d_128(<4 x i32> %x0, i32 %x1, <4 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_pror_d_128:
+; X86:       # %bb.0:
+; X86-NEXT:    vprord $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc0,0x03]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprord $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc0,0x03]
+; X86-NEXT:    vprord $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc0,0x03]
+; X86-NEXT:    vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
+; X86-NEXT:    vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_pror_d_128:
+; X64:       # %bb.0:
+; X64-NEXT:    vprord $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc0,0x03]
+; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vprord $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc0,0x03]
+; X64-NEXT:    vprord $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc0,0x03]
+; X64-NEXT:    vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
+; X64-NEXT:    vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %x0, i32 3)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x2
+  %4 = call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %x0, i32 3)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %6 = select <4 x i1> %extract, <4 x i32> %4, <4 x i32> zeroinitializer
+  %7 = call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %x0, i32 3)
+  %res3 = add <4 x i32> %3, %6
+  %res4 = add <4 x i32> %res3, %7
+  ret <4 x i32> %res4
+}
+
+declare <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32>, i32)
+
+define <8 x i32>@test_int_x86_avx512_pror_d_256(<8 x i32> %x0, i32 %x1, <8 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_pror_d_256:
+; X86:       # %bb.0:
+; X86-NEXT:    vprord $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc0,0x03]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprord $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc0,0x03]
+; X86-NEXT:    vprord $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc0,0x03]
+; X86-NEXT:    vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
+; X86-NEXT:    vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_pror_d_256:
+; X64:       # %bb.0:
+; X64-NEXT:    vprord $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc0,0x03]
+; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vprord $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc0,0x03]
+; X64-NEXT:    vprord $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc0,0x03]
+; X64-NEXT:    vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
+; X64-NEXT:    vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %x0, i32 3)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x2
+  %4 = call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %x0, i32 3)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %6 = select <8 x i1> %5, <8 x i32> %4, <8 x i32> zeroinitializer
+  %7 = call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %x0, i32 3)
+  %res3 = add <8 x i32> %3, %6
+  %res4 = add <8 x i32> %res3, %7
+  ret <8 x i32> %res4
+}
+
+declare <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64>, i32)
+
+define <2 x i64>@test_int_x86_avx512_pror_q_128(<2 x i64> %x0, i32 %x1, <2 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_pror_q_128:
+; X86:       # %bb.0:
+; X86-NEXT:    vprorq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc0,0x03]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprorq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc0,0x03]
+; X86-NEXT:    vprorq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc0,0x03]
+; X86-NEXT:    vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
+; X86-NEXT:    vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_pror_q_128:
+; X64:       # %bb.0:
+; X64-NEXT:    vprorq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc0,0x03]
+; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vprorq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc0,0x03]
+; X64-NEXT:    vprorq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc0,0x03]
+; X64-NEXT:    vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
+; X64-NEXT:    vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %x0, i32 3)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+  %3 = select <2 x i1> %extract1, <2 x i64> %1, <2 x i64> %x2
+  %4 = call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %x0, i32 3)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <2 x i32> <i32 0, i32 1>
+  %6 = select <2 x i1> %extract, <2 x i64> %4, <2 x i64> zeroinitializer
+  %7 = call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %x0, i32 3)
+  %res3 = add <2 x i64> %3, %6
+  %res4 = add <2 x i64> %res3, %7
+  ret <2 x i64> %res4
+}
+
+declare <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64>, i32)
+
+define <4 x i64>@test_int_x86_avx512_pror_q_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_pror_q_256:
+; X86:       # %bb.0:
+; X86-NEXT:    vprorq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc0,0x03]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vprorq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc0,0x03]
+; X86-NEXT:    vprorq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc0,0x03]
+; X86-NEXT:    vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
+; X86-NEXT:    vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_pror_q_256:
+; X64:       # %bb.0:
+; X64-NEXT:    vprorq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc0,0x03]
+; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vprorq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc0,0x03]
+; X64-NEXT:    vprorq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc0,0x03]
+; X64-NEXT:    vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
+; X64-NEXT:    vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %x0, i32 3)
+  %2 = bitcast i8 %x3 to <8 x i1>
+  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = select <4 x i1> %extract1, <4 x i64> %1, <4 x i64> %x2
+  %4 = call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %x0, i32 3)
+  %5 = bitcast i8 %x3 to <8 x i1>
+  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %6 = select <4 x i1> %extract, <4 x i64> %4, <4 x i64> zeroinitializer
+  %7 = call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %x0, i32 3)
+  %res3 = add <4 x i64> %3, %6
+  %res4 = add <4 x i64> %res3, %7
+  ret <4 x i64> %res4
+}
+
 declare <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
 
 define <8 x float> @test_vfmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
diff --git a/test/CodeGen/X86/avx512vl-intrinsics.ll b/test/CodeGen/X86/avx512vl-intrinsics.ll
index f4ec74b..912715b 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics.ll
@@ -1617,22 +1617,23 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_128(<2 x i64> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmov_qb_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovqb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x32,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovqb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x32,0xc2]
 ; X86-NEXT:    vpmovqb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x32,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovqb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x32,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovqb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x32,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmov_qb_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovqb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x32,0xc2]
+; X64-NEXT:    vpmovqb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x32,0xc2]
 ; X64-NEXT:    vpmovqb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x32,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovqb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x32,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovqb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x32,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.128(<2 x i64> %x0, <16 x i8> %x1, i8 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.128(<2 x i64> %x0, <16 x i8> %x1, i8 %x2)
@@ -1670,22 +1671,23 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_128(<2 x i64> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmovs_qb_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovsqb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x22,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovsqb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x22,0xc2]
 ; X86-NEXT:    vpmovsqb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x22,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovsqb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x22,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovsqb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x22,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovs_qb_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovsqb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x22,0xc2]
+; X64-NEXT:    vpmovsqb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x22,0xc2]
 ; X64-NEXT:    vpmovsqb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x22,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovsqb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x22,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovsqb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x22,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.128(<2 x i64> %x0, <16 x i8> %x1, i8 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.128(<2 x i64> %x0, <16 x i8> %x1, i8 %x2)
@@ -1723,22 +1725,23 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_128(<2 x i64> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmovus_qb_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovusqb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x12,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovusqb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x12,0xc2]
 ; X86-NEXT:    vpmovusqb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x12,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovusqb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x12,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovusqb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x12,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovus_qb_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovusqb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x12,0xc2]
+; X64-NEXT:    vpmovusqb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x12,0xc2]
 ; X64-NEXT:    vpmovusqb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x12,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovusqb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x12,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovusqb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x12,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.128(<2 x i64> %x0, <16 x i8> %x1, i8 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.128(<2 x i64> %x0, <16 x i8> %x1, i8 %x2)
@@ -1776,23 +1779,24 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_256(<4 x i64> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmov_qb_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovqb %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x32,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovqb %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x32,0xc2]
 ; X86-NEXT:    vpmovqb %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x32,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovqb %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x32,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovqb %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x32,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmov_qb_256:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovqb %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x32,0xc2]
+; X64-NEXT:    vpmovqb %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x32,0xc2]
 ; X64-NEXT:    vpmovqb %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x32,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovqb %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x32,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovqb %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x32,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.256(<4 x i64> %x0, <16 x i8> %x1, i8 -1)
@@ -1833,23 +1837,24 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_256(<4 x i64> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmovs_qb_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovsqb %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x22,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovsqb %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x22,0xc2]
 ; X86-NEXT:    vpmovsqb %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x22,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovsqb %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x22,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovsqb %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x22,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovs_qb_256:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovsqb %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x22,0xc2]
+; X64-NEXT:    vpmovsqb %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x22,0xc2]
 ; X64-NEXT:    vpmovsqb %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x22,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovsqb %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x22,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovsqb %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x22,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.256(<4 x i64> %x0, <16 x i8> %x1, i8 -1)
@@ -1890,23 +1895,24 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_256(<4 x i64> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmovus_qb_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovusqb %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x12,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovusqb %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x12,0xc2]
 ; X86-NEXT:    vpmovusqb %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x12,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovusqb %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x12,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovusqb %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x12,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovus_qb_256:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovusqb %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x12,0xc2]
+; X64-NEXT:    vpmovusqb %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x12,0xc2]
 ; X64-NEXT:    vpmovusqb %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x12,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovusqb %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x12,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovusqb %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x12,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.256(<4 x i64> %x0, <16 x i8> %x1, i8 -1)
@@ -1949,21 +1955,21 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovqw %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x34,0xc2]
 ; X86-NEXT:    vpmovqw %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x34,0xc1]
-; X86-NEXT:    vpmovqw %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x34,0xc2]
-; X86-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X86-NEXT:    vpmovqw %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x34,0xc0]
-; X86-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X86-NEXT:    vpmovqw %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x34,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmov_qw_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovqw %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x34,0xc2]
+; X64-NEXT:    vpmovqw %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x34,0xc2]
 ; X64-NEXT:    vpmovqw %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x34,0xc1]
-; X64-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X64-NEXT:    vpmovqw %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x34,0xc0]
-; X64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X64-NEXT:    vpmovqw %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x34,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.128(<2 x i64> %x0, <8 x i16> %x1, i8 -1)
     %res1 = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.128(<2 x i64> %x0, <8 x i16> %x1, i8 %x2)
@@ -2003,21 +2009,21 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovsqw %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x24,0xc2]
 ; X86-NEXT:    vpmovsqw %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x24,0xc1]
-; X86-NEXT:    vpmovsqw %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x24,0xc2]
-; X86-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X86-NEXT:    vpmovsqw %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x24,0xc0]
-; X86-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X86-NEXT:    vpmovsqw %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x24,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovs_qw_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovsqw %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x24,0xc2]
+; X64-NEXT:    vpmovsqw %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x24,0xc2]
 ; X64-NEXT:    vpmovsqw %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x24,0xc1]
-; X64-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X64-NEXT:    vpmovsqw %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x24,0xc0]
-; X64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X64-NEXT:    vpmovsqw %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x24,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.128(<2 x i64> %x0, <8 x i16> %x1, i8 -1)
     %res1 = call <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.128(<2 x i64> %x0, <8 x i16> %x1, i8 %x2)
@@ -2057,21 +2063,21 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovusqw %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x14,0xc2]
 ; X86-NEXT:    vpmovusqw %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x14,0xc1]
-; X86-NEXT:    vpmovusqw %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x14,0xc2]
-; X86-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X86-NEXT:    vpmovusqw %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x14,0xc0]
-; X86-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X86-NEXT:    vpmovusqw %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x14,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovus_qw_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovusqw %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x14,0xc2]
+; X64-NEXT:    vpmovusqw %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x14,0xc2]
 ; X64-NEXT:    vpmovusqw %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x14,0xc1]
-; X64-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X64-NEXT:    vpmovusqw %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x14,0xc0]
-; X64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X64-NEXT:    vpmovusqw %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x14,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.128(<2 x i64> %x0, <8 x i16> %x1, i8 -1)
     %res1 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.128(<2 x i64> %x0, <8 x i16> %x1, i8 %x2)
@@ -2111,22 +2117,22 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovqw %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x34,0xc2]
 ; X86-NEXT:    vpmovqw %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x34,0xc1]
-; X86-NEXT:    vpmovqw %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x34,0xc2]
-; X86-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X86-NEXT:    vpmovqw %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x34,0xc0]
-; X86-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X86-NEXT:    vpmovqw %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x34,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmov_qw_256:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovqw %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x34,0xc2]
+; X64-NEXT:    vpmovqw %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x34,0xc2]
 ; X64-NEXT:    vpmovqw %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x34,0xc1]
-; X64-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X64-NEXT:    vpmovqw %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x34,0xc0]
-; X64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X64-NEXT:    vpmovqw %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x34,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.256(<4 x i64> %x0, <8 x i16> %x1, i8 -1)
@@ -2169,22 +2175,22 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovsqw %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x24,0xc2]
 ; X86-NEXT:    vpmovsqw %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x24,0xc1]
-; X86-NEXT:    vpmovsqw %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x24,0xc2]
-; X86-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X86-NEXT:    vpmovsqw %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x24,0xc0]
-; X86-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X86-NEXT:    vpmovsqw %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x24,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovs_qw_256:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovsqw %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x24,0xc2]
+; X64-NEXT:    vpmovsqw %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x24,0xc2]
 ; X64-NEXT:    vpmovsqw %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x24,0xc1]
-; X64-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X64-NEXT:    vpmovsqw %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x24,0xc0]
-; X64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X64-NEXT:    vpmovsqw %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x24,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.256(<4 x i64> %x0, <8 x i16> %x1, i8 -1)
@@ -2227,22 +2233,22 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovusqw %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x14,0xc2]
 ; X86-NEXT:    vpmovusqw %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x14,0xc1]
-; X86-NEXT:    vpmovusqw %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x14,0xc2]
-; X86-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X86-NEXT:    vpmovusqw %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x14,0xc0]
-; X86-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X86-NEXT:    vpmovusqw %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x14,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovus_qw_256:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovusqw %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x14,0xc2]
+; X64-NEXT:    vpmovusqw %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x14,0xc2]
 ; X64-NEXT:    vpmovusqw %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x14,0xc1]
-; X64-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X64-NEXT:    vpmovusqw %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x14,0xc0]
-; X64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X64-NEXT:    vpmovusqw %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x14,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.256(<4 x i64> %x0, <8 x i16> %x1, i8 -1)
@@ -2285,21 +2291,21 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovqd %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x35,0xc2]
 ; X86-NEXT:    vpmovqd %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x35,0xc1]
-; X86-NEXT:    vpmovqd %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x35,0xc2]
-; X86-NEXT:    vpaddd %xmm2, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
-; X86-NEXT:    vpmovqd %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x35,0xc0]
-; X86-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
+; X86-NEXT:    vpmovqd %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x35,0xc0]
+; X86-NEXT:    vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; X86-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmov_qd_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovqd %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x35,0xc2]
+; X64-NEXT:    vpmovqd %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x35,0xc2]
 ; X64-NEXT:    vpmovqd %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x35,0xc1]
-; X64-NEXT:    vpaddd %xmm2, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
-; X64-NEXT:    vpmovqd %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x35,0xc0]
-; X64-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
+; X64-NEXT:    vpmovqd %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x35,0xc0]
+; X64-NEXT:    vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; X64-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <4 x i32> @llvm.x86.avx512.mask.pmov.qd.128(<2 x i64> %x0, <4 x i32> %x1, i8 -1)
     %res1 = call <4 x i32> @llvm.x86.avx512.mask.pmov.qd.128(<2 x i64> %x0, <4 x i32> %x1, i8 %x2)
@@ -2339,20 +2345,20 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vpmovsqd %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x25,0xc1]
-; X86-NEXT:    vpmovsqd %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x25,0xc2]
-; X86-NEXT:    vpaddd %xmm2, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
 ; X86-NEXT:    vpmovsqd %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x25,0xc0]
+; X86-NEXT:    vmovdqa32 %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0x6f,0xc8]
+; X86-NEXT:    vmovdqa32 %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x6f,0xd0]
+; X86-NEXT:    vpaddd %xmm2, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
 ; X86-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovs_qd_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovsqd %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x25,0xc2]
-; X64-NEXT:    vpmovsqd %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x25,0xc1]
-; X64-NEXT:    vpaddd %xmm2, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
 ; X64-NEXT:    vpmovsqd %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x25,0xc0]
+; X64-NEXT:    vmovdqa32 %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x6f,0xd0]
+; X64-NEXT:    vmovdqa32 %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x7d,0x09,0x6f,0xc8]
+; X64-NEXT:    vpaddd %xmm2, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
 ; X64-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <4 x i32> @llvm.x86.avx512.mask.pmovs.qd.128(<2 x i64> %x0, <4 x i32> %x1, i8 -1)
@@ -2393,21 +2399,21 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovusqd %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x15,0xc2]
 ; X86-NEXT:    vpmovusqd %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x15,0xc1]
-; X86-NEXT:    vpmovusqd %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x15,0xc2]
-; X86-NEXT:    vpaddd %xmm2, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
-; X86-NEXT:    vpmovusqd %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x15,0xc0]
-; X86-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
+; X86-NEXT:    vpmovusqd %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x15,0xc0]
+; X86-NEXT:    vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; X86-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovus_qd_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovusqd %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x15,0xc2]
+; X64-NEXT:    vpmovusqd %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x15,0xc2]
 ; X64-NEXT:    vpmovusqd %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x15,0xc1]
-; X64-NEXT:    vpaddd %xmm2, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
-; X64-NEXT:    vpmovusqd %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x15,0xc0]
-; X64-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
+; X64-NEXT:    vpmovusqd %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x15,0xc0]
+; X64-NEXT:    vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; X64-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <4 x i32> @llvm.x86.avx512.mask.pmovus.qd.128(<2 x i64> %x0, <4 x i32> %x1, i8 -1)
     %res1 = call <4 x i32> @llvm.x86.avx512.mask.pmovus.qd.128(<2 x i64> %x0, <4 x i32> %x1, i8 %x2)
@@ -2619,22 +2625,23 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmov_db_128(<4 x i32> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmov_db_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovdb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x31,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovdb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x31,0xc2]
 ; X86-NEXT:    vpmovdb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x31,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovdb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x31,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovdb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x31,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmov_db_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovdb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x31,0xc2]
+; X64-NEXT:    vpmovdb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x31,0xc2]
 ; X64-NEXT:    vpmovdb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x31,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovdb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x31,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovdb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x31,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.128(<4 x i32> %x0, <16 x i8> %x1, i8 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.128(<4 x i32> %x0, <16 x i8> %x1, i8 %x2)
@@ -2672,22 +2679,23 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_128(<4 x i32> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmovs_db_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovsdb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x21,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovsdb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x21,0xc2]
 ; X86-NEXT:    vpmovsdb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x21,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovsdb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x21,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovsdb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x21,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovs_db_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovsdb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x21,0xc2]
+; X64-NEXT:    vpmovsdb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x21,0xc2]
 ; X64-NEXT:    vpmovsdb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x21,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovsdb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x21,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovsdb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x21,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.db.128(<4 x i32> %x0, <16 x i8> %x1, i8 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.db.128(<4 x i32> %x0, <16 x i8> %x1, i8 %x2)
@@ -2725,22 +2733,23 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_128(<4 x i32> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmovus_db_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovusdb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x11,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovusdb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x11,0xc2]
 ; X86-NEXT:    vpmovusdb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x11,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovusdb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x11,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovusdb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x11,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovus_db_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovusdb %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x11,0xc2]
+; X64-NEXT:    vpmovusdb %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x11,0xc2]
 ; X64-NEXT:    vpmovusdb %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x11,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovusdb %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x11,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovusdb %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x11,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.db.128(<4 x i32> %x0, <16 x i8> %x1, i8 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.db.128(<4 x i32> %x0, <16 x i8> %x1, i8 %x2)
@@ -2778,23 +2787,24 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmov_db_256(<8 x i32> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmov_db_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovdb %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x31,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovdb %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x31,0xc2]
 ; X86-NEXT:    vpmovdb %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x31,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovdb %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x31,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovdb %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x31,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmov_db_256:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovdb %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x31,0xc2]
+; X64-NEXT:    vpmovdb %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x31,0xc2]
 ; X64-NEXT:    vpmovdb %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x31,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovdb %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x31,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovdb %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x31,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.256(<8 x i32> %x0, <16 x i8> %x1, i8 -1)
@@ -2835,23 +2845,24 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_256(<8 x i32> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmovs_db_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovsdb %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x21,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovsdb %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x21,0xc2]
 ; X86-NEXT:    vpmovsdb %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x21,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovsdb %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x21,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovsdb %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x21,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovs_db_256:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovsdb %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x21,0xc2]
+; X64-NEXT:    vpmovsdb %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x21,0xc2]
 ; X64-NEXT:    vpmovsdb %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x21,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovsdb %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x21,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovsdb %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x21,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.db.256(<8 x i32> %x0, <16 x i8> %x1, i8 -1)
@@ -2892,23 +2903,24 @@
 define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_256(<8 x i32> %x0, <16 x i8> %x1, i8 %x2) {
 ; X86-LABEL: test_int_x86_avx512_mask_pmovus_db_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
-; X86-NEXT:    vpmovusdb %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x11,0xc2]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovusdb %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x11,0xc2]
 ; X86-NEXT:    vpmovusdb %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x11,0xc1]
-; X86-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X86-NEXT:    vpmovusdb %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x11,0xc0]
-; X86-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X86-NEXT:    vpmovusdb %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x11,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovus_db_256:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovusdb %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x11,0xc2]
+; X64-NEXT:    vpmovusdb %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x11,0xc2]
 ; X64-NEXT:    vpmovusdb %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x11,0xc1]
-; X64-NEXT:    vpaddb %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfc,0xca]
-; X64-NEXT:    vpmovusdb %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x11,0xc0]
-; X64-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
+; X64-NEXT:    vpmovusdb %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x11,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfc,0xc0]
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfc,0xc0]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.db.256(<8 x i32> %x0, <16 x i8> %x1, i8 -1)
@@ -2951,21 +2963,21 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovdw %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x33,0xc2]
 ; X86-NEXT:    vpmovdw %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x33,0xc1]
-; X86-NEXT:    vpmovdw %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x33,0xc2]
-; X86-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X86-NEXT:    vpmovdw %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x33,0xc0]
-; X86-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X86-NEXT:    vpmovdw %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x33,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmov_dw_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovdw %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x33,0xc2]
+; X64-NEXT:    vpmovdw %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x33,0xc2]
 ; X64-NEXT:    vpmovdw %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x33,0xc1]
-; X64-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X64-NEXT:    vpmovdw %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x33,0xc0]
-; X64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X64-NEXT:    vpmovdw %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x33,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmov.dw.128(<4 x i32> %x0, <8 x i16> %x1, i8 -1)
     %res1 = call <8 x i16> @llvm.x86.avx512.mask.pmov.dw.128(<4 x i32> %x0, <8 x i16> %x1, i8 %x2)
@@ -3005,21 +3017,21 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovsdw %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x23,0xc2]
 ; X86-NEXT:    vpmovsdw %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x23,0xc1]
-; X86-NEXT:    vpmovsdw %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x23,0xc2]
-; X86-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X86-NEXT:    vpmovsdw %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x23,0xc0]
-; X86-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X86-NEXT:    vpmovsdw %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x23,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovs_dw_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovsdw %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x23,0xc2]
+; X64-NEXT:    vpmovsdw %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x23,0xc2]
 ; X64-NEXT:    vpmovsdw %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x23,0xc1]
-; X64-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X64-NEXT:    vpmovsdw %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x23,0xc0]
-; X64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X64-NEXT:    vpmovsdw %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x23,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmovs.dw.128(<4 x i32> %x0, <8 x i16> %x1, i8 -1)
     %res1 = call <8 x i16> @llvm.x86.avx512.mask.pmovs.dw.128(<4 x i32> %x0, <8 x i16> %x1, i8 %x2)
@@ -3059,21 +3071,21 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovusdw %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x13,0xc2]
 ; X86-NEXT:    vpmovusdw %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x13,0xc1]
-; X86-NEXT:    vpmovusdw %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x13,0xc2]
-; X86-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X86-NEXT:    vpmovusdw %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x13,0xc0]
-; X86-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X86-NEXT:    vpmovusdw %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x13,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovus_dw_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovusdw %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x13,0xc2]
+; X64-NEXT:    vpmovusdw %xmm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x08,0x13,0xc2]
 ; X64-NEXT:    vpmovusdw %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x09,0x13,0xc1]
-; X64-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X64-NEXT:    vpmovusdw %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x13,0xc0]
-; X64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X64-NEXT:    vpmovusdw %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0x89,0x13,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.dw.128(<4 x i32> %x0, <8 x i16> %x1, i8 -1)
     %res1 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.dw.128(<4 x i32> %x0, <8 x i16> %x1, i8 %x2)
@@ -3113,22 +3125,22 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovdw %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x33,0xc2]
 ; X86-NEXT:    vpmovdw %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x33,0xc1]
-; X86-NEXT:    vpmovdw %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x33,0xc2]
-; X86-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X86-NEXT:    vpmovdw %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x33,0xc0]
-; X86-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X86-NEXT:    vpmovdw %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x33,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmov_dw_256:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovdw %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x33,0xc2]
+; X64-NEXT:    vpmovdw %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x33,0xc2]
 ; X64-NEXT:    vpmovdw %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x33,0xc1]
-; X64-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X64-NEXT:    vpmovdw %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x33,0xc0]
-; X64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X64-NEXT:    vpmovdw %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x33,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmov.dw.256(<8 x i32> %x0, <8 x i16> %x1, i8 -1)
@@ -3171,22 +3183,22 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovsdw %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x23,0xc2]
 ; X86-NEXT:    vpmovsdw %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x23,0xc1]
-; X86-NEXT:    vpmovsdw %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x23,0xc2]
-; X86-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X86-NEXT:    vpmovsdw %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x23,0xc0]
-; X86-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X86-NEXT:    vpmovsdw %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x23,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovs_dw_256:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovsdw %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x23,0xc2]
+; X64-NEXT:    vpmovsdw %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x23,0xc2]
 ; X64-NEXT:    vpmovsdw %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x23,0xc1]
-; X64-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X64-NEXT:    vpmovsdw %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x23,0xc0]
-; X64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X64-NEXT:    vpmovsdw %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x23,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmovs.dw.256(<8 x i32> %x0, <8 x i16> %x1, i8 -1)
@@ -3229,22 +3241,22 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmovusdw %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x13,0xc2]
 ; X86-NEXT:    vpmovusdw %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x13,0xc1]
-; X86-NEXT:    vpmovusdw %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x13,0xc2]
-; X86-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X86-NEXT:    vpmovusdw %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x13,0xc0]
-; X86-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X86-NEXT:    vpmovusdw %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x13,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_int_x86_avx512_mask_pmovus_dw_256:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vpmovusdw %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x13,0xc2]
+; X64-NEXT:    vpmovusdw %ymm0, %xmm2 # encoding: [0x62,0xf2,0x7e,0x28,0x13,0xc2]
 ; X64-NEXT:    vpmovusdw %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0x7e,0x29,0x13,0xc1]
-; X64-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xfd,0xca]
-; X64-NEXT:    vpmovusdw %ymm0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x28,0x13,0xc0]
-; X64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X64-NEXT:    vpmovusdw %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7e,0xa9,0x13,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xfd,0xc0]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.dw.256(<8 x i32> %x0, <8 x i16> %x1, i8 -1)
@@ -4294,21 +4306,21 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vcvtps2ph $2, %xmm0, %xmm2 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc2,0x02]
+; X86-NEXT:    vcvtps2ph $2, %xmm0, %xmm3 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0x89,0x1d,0xc3,0x02]
 ; X86-NEXT:    vcvtps2ph $2, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf3,0x7d,0x09,0x1d,0xc1,0x02]
-; X86-NEXT:    vcvtps2ph $2, %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0x89,0x1d,0xc2,0x02]
-; X86-NEXT:    vpaddw %xmm1, %xmm2, %xmm1 # encoding: [0xc5,0xe9,0xfd,0xc9]
-; X86-NEXT:    vcvtps2ph $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x02]
-; X86-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X86-NEXT:    vpaddw %xmm1, %xmm3, %xmm0 # encoding: [0xc5,0xe1,0xfd,0xc1]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_vcvtps2ph_128:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT:    vcvtps2ph $2, %xmm0, %xmm2 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc2,0x02]
+; X64-NEXT:    vcvtps2ph $2, %xmm0, %xmm3 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0x89,0x1d,0xc3,0x02]
 ; X64-NEXT:    vcvtps2ph $2, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf3,0x7d,0x09,0x1d,0xc1,0x02]
-; X64-NEXT:    vcvtps2ph $2, %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0x89,0x1d,0xc2,0x02]
-; X64-NEXT:    vpaddw %xmm1, %xmm2, %xmm1 # encoding: [0xc5,0xe9,0xfd,0xc9]
-; X64-NEXT:    vcvtps2ph $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x02]
-; X64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X64-NEXT:    vpaddw %xmm1, %xmm3, %xmm0 # encoding: [0xc5,0xe1,0xfd,0xc1]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %res1 = call <8 x i16> @llvm.x86.avx512.mask.vcvtps2ph.128(<4 x float> %a0, i32 2, <8 x i16> zeroinitializer, i8 -1)
   %res2 = call <8 x i16> @llvm.x86.avx512.mask.vcvtps2ph.128(<4 x float> %a0, i32 2, <8 x i16> zeroinitializer, i8 %mask)
@@ -4325,22 +4337,22 @@
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
 ; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vcvtps2ph $2, %ymm0, %xmm2 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0xc2,0x02]
+; X86-NEXT:    vcvtps2ph $2, %ymm0, %xmm3 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0xa9,0x1d,0xc3,0x02]
 ; X86-NEXT:    vcvtps2ph $2, %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf3,0x7d,0x29,0x1d,0xc1,0x02]
-; X86-NEXT:    vcvtps2ph $2, %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0xa9,0x1d,0xc2,0x02]
-; X86-NEXT:    vpaddw %xmm1, %xmm2, %xmm1 # encoding: [0xc5,0xe9,0xfd,0xc9]
-; X86-NEXT:    vcvtps2ph $2, %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x02]
-; X86-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X86-NEXT:    vpaddw %xmm1, %xmm3, %xmm0 # encoding: [0xc5,0xe1,0xfd,0xc1]
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_vcvtps2ph_256:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT:    vcvtps2ph $2, %ymm0, %xmm2 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0xc2,0x02]
+; X64-NEXT:    vcvtps2ph $2, %ymm0, %xmm3 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0xa9,0x1d,0xc3,0x02]
 ; X64-NEXT:    vcvtps2ph $2, %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf3,0x7d,0x29,0x1d,0xc1,0x02]
-; X64-NEXT:    vcvtps2ph $2, %ymm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0xa9,0x1d,0xc2,0x02]
-; X64-NEXT:    vpaddw %xmm1, %xmm2, %xmm1 # encoding: [0xc5,0xe9,0xfd,0xc9]
-; X64-NEXT:    vcvtps2ph $2, %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x02]
-; X64-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
+; X64-NEXT:    vpaddw %xmm1, %xmm3, %xmm0 # encoding: [0xc5,0xe1,0xfd,0xc1]
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0 # encoding: [0xc5,0xe9,0xfd,0xc0]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %res1 = call <8 x i16> @llvm.x86.avx512.mask.vcvtps2ph.256(<8 x float> %a0, i32 2, <8 x i16> zeroinitializer, i8 -1)
@@ -4725,590 +4737,6 @@
 declare <4 x double> @llvm.x86.avx512.rcp14.pd.256(<4 x double>, <4 x double>, i8) nounwind readnone
 declare <2 x double> @llvm.x86.avx512.rcp14.pd.128(<2 x double>, <2 x double>, i8) nounwind readnone
 
-declare <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32>, <4 x i32>)
-
-define <4 x i32>@test_int_x86_avx512_mask_prorv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_prorv_d_128:
-; X86:       # %bb.0:
-; X86-NEXT:    vprorvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x14,0xd9]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprorvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x14,0xd1]
-; X86-NEXT:    vprorvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x14,0xc1]
-; X86-NEXT:    vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
-; X86-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_prorv_d_128:
-; X64:       # %bb.0:
-; X64-NEXT:    vprorvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x14,0xd9]
-; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vprorvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x14,0xd1]
-; X64-NEXT:    vprorvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x14,0xc1]
-; X64-NEXT:    vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
-; X64-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x2
-  %4 = call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %6 = select <4 x i1> %extract, <4 x i32> %4, <4 x i32> zeroinitializer
-  %7 = call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1)
-  %res3 = add <4 x i32> %3, %6
-  %res4 = add <4 x i32> %res3, %7
-  ret <4 x i32> %res4
-}
-
-declare <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32>, <8 x i32>)
-
-define <8 x i32>@test_int_x86_avx512_mask_prorv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_prorv_d_256:
-; X86:       # %bb.0:
-; X86-NEXT:    vprorvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x14,0xd9]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprorvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x14,0xd1]
-; X86-NEXT:    vprorvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x14,0xc1]
-; X86-NEXT:    vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
-; X86-NEXT:    vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_prorv_d_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vprorvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x14,0xd9]
-; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vprorvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x14,0xd1]
-; X64-NEXT:    vprorvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x14,0xc1]
-; X64-NEXT:    vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
-; X64-NEXT:    vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x2
-  %4 = call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %6 = select <8 x i1> %5, <8 x i32> %4, <8 x i32> zeroinitializer
-  %7 = call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1)
-  %res3 = add <8 x i32> %3, %6
-  %res4 = add <8 x i32> %res3, %7
-  ret <8 x i32> %res4
-}
-
-declare <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64>, <2 x i64>)
-
-define <2 x i64>@test_int_x86_avx512_mask_prorv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_prorv_q_128:
-; X86:       # %bb.0:
-; X86-NEXT:    vprorvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x14,0xd9]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprorvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x14,0xd1]
-; X86-NEXT:    vprorvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x14,0xc1]
-; X86-NEXT:    vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
-; X86-NEXT:    vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_prorv_q_128:
-; X64:       # %bb.0:
-; X64-NEXT:    vprorvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x14,0xd9]
-; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vprorvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x14,0xd1]
-; X64-NEXT:    vprorvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x14,0xc1]
-; X64-NEXT:    vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
-; X64-NEXT:    vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
-  %3 = select <2 x i1> %extract1, <2 x i64> %1, <2 x i64> %x2
-  %4 = call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <2 x i32> <i32 0, i32 1>
-  %6 = select <2 x i1> %extract, <2 x i64> %4, <2 x i64> zeroinitializer
-  %7 = call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1)
-  %res3 = add <2 x i64> %3, %6
-  %res4 = add <2 x i64> %res3, %7
-  ret <2 x i64> %res4
-}
-
-declare <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64>, <4 x i64>)
-
-define <4 x i64>@test_int_x86_avx512_mask_prorv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_prorv_q_256:
-; X86:       # %bb.0:
-; X86-NEXT:    vprorvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x14,0xd9]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprorvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x14,0xd1]
-; X86-NEXT:    vprorvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x14,0xc1]
-; X86-NEXT:    vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
-; X86-NEXT:    vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_prorv_q_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vprorvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x14,0xd9]
-; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vprorvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x14,0xd1]
-; X64-NEXT:    vprorvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x14,0xc1]
-; X64-NEXT:    vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
-; X64-NEXT:    vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %3 = select <4 x i1> %extract1, <4 x i64> %1, <4 x i64> %x2
-  %4 = call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %6 = select <4 x i1> %extract, <4 x i64> %4, <4 x i64> zeroinitializer
-  %7 = call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1)
-  %res3 = add <4 x i64> %3, %6
-  %res4 = add <4 x i64> %res3, %7
-  ret <4 x i64> %res4
-}
-
-declare <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32>, i32)
-
-define <4 x i32>@test_int_x86_avx512_mask_prol_d_128(<4 x i32> %x0, i32 %x1, <4 x i32> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_prol_d_128:
-; X86:       # %bb.0:
-; X86-NEXT:    vprold $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc8,0x03]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprold $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc8,0x03]
-; X86-NEXT:    vprold $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc8,0x03]
-; X86-NEXT:    vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
-; X86-NEXT:    vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_prol_d_128:
-; X64:       # %bb.0:
-; X64-NEXT:    vprold $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc8,0x03]
-; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
-; X64-NEXT:    vprold $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc8,0x03]
-; X64-NEXT:    vprold $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc8,0x03]
-; X64-NEXT:    vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
-; X64-NEXT:    vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %x0, i32 3)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x2
-  %4 = call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %x0, i32 3)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %6 = select <4 x i1> %extract, <4 x i32> %4, <4 x i32> zeroinitializer
-  %7 = call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %x0, i32 3)
-  %res3 = add <4 x i32> %3, %6
-  %res4 = add <4 x i32> %res3, %7
-  ret <4 x i32> %res4
-}
-
-declare <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32>, i32)
-
-define <8 x i32>@test_int_x86_avx512_mask_prol_d_256(<8 x i32> %x0, i32 %x1, <8 x i32> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_prol_d_256:
-; X86:       # %bb.0:
-; X86-NEXT:    vprold $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc8,0x03]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprold $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc8,0x03]
-; X86-NEXT:    vprold $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc8,0x03]
-; X86-NEXT:    vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
-; X86-NEXT:    vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_prol_d_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vprold $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc8,0x03]
-; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
-; X64-NEXT:    vprold $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc8,0x03]
-; X64-NEXT:    vprold $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc8,0x03]
-; X64-NEXT:    vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
-; X64-NEXT:    vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %x0, i32 3)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x2
-  %4 = call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %x0, i32 3)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %6 = select <8 x i1> %5, <8 x i32> %4, <8 x i32> zeroinitializer
-  %7 = call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %x0, i32 3)
-  %res3 = add <8 x i32> %3, %6
-  %res4 = add <8 x i32> %res3, %7
-  ret <8 x i32> %res4
-}
-
-declare <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64>, i32)
-
-define <2 x i64>@test_int_x86_avx512_mask_prol_q_128(<2 x i64> %x0, i32 %x1, <2 x i64> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_prol_q_128:
-; X86:       # %bb.0:
-; X86-NEXT:    vprolq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc8,0x03]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprolq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc8,0x03]
-; X86-NEXT:    vprolq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc8,0x03]
-; X86-NEXT:    vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
-; X86-NEXT:    vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_prol_q_128:
-; X64:       # %bb.0:
-; X64-NEXT:    vprolq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc8,0x03]
-; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
-; X64-NEXT:    vprolq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc8,0x03]
-; X64-NEXT:    vprolq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc8,0x03]
-; X64-NEXT:    vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
-; X64-NEXT:    vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %x0, i32 3)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
-  %3 = select <2 x i1> %extract1, <2 x i64> %1, <2 x i64> %x2
-  %4 = call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %x0, i32 3)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <2 x i32> <i32 0, i32 1>
-  %6 = select <2 x i1> %extract, <2 x i64> %4, <2 x i64> zeroinitializer
-  %7 = call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %x0, i32 3)
-  %res3 = add <2 x i64> %3, %6
-  %res4 = add <2 x i64> %res3, %7
-  ret <2 x i64> %res4
-}
-
-declare <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64>, i32)
-
-define <4 x i64>@test_int_x86_avx512_mask_prol_q_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_prol_q_256:
-; X86:       # %bb.0:
-; X86-NEXT:    vprolq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc8,0x03]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprolq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc8,0x03]
-; X86-NEXT:    vprolq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc8,0x03]
-; X86-NEXT:    vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
-; X86-NEXT:    vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_prol_q_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vprolq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc8,0x03]
-; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
-; X64-NEXT:    vprolq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc8,0x03]
-; X64-NEXT:    vprolq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc8,0x03]
-; X64-NEXT:    vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
-; X64-NEXT:    vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %x0, i32 3)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %3 = select <4 x i1> %extract1, <4 x i64> %1, <4 x i64> %x2
-  %4 = call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %x0, i32 3)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %6 = select <4 x i1> %extract, <4 x i64> %4, <4 x i64> zeroinitializer
-  %7 = call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %x0, i32 3)
-  %res3 = add <4 x i64> %3, %6
-  %res4 = add <4 x i64> %res3, %7
-  ret <4 x i64> %res4
-}
-
-declare <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32>, <4 x i32>)
-
-define <4 x i32>@test_int_x86_avx512_mask_prolv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_prolv_d_128:
-; X86:       # %bb.0:
-; X86-NEXT:    vprolvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x15,0xd9]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprolvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x15,0xd1]
-; X86-NEXT:    vprolvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x15,0xc1]
-; X86-NEXT:    vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
-; X86-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_prolv_d_128:
-; X64:       # %bb.0:
-; X64-NEXT:    vprolvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x15,0xd9]
-; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vprolvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x15,0xd1]
-; X64-NEXT:    vprolvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x15,0xc1]
-; X64-NEXT:    vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
-; X64-NEXT:    vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x2
-  %4 = call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %6 = select <4 x i1> %extract, <4 x i32> %4, <4 x i32> zeroinitializer
-  %7 = call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1)
-  %res3 = add <4 x i32> %3, %6
-  %res4 = add <4 x i32> %res3, %7
-  ret <4 x i32> %res4
-}
-
-declare <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32>, <8 x i32>)
-
-define <8 x i32>@test_int_x86_avx512_mask_prolv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_prolv_d_256:
-; X86:       # %bb.0:
-; X86-NEXT:    vprolvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x15,0xd9]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprolvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x15,0xd1]
-; X86-NEXT:    vprolvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x15,0xc1]
-; X86-NEXT:    vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
-; X86-NEXT:    vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_prolv_d_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vprolvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x15,0xd9]
-; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vprolvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x15,0xd1]
-; X64-NEXT:    vprolvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x15,0xc1]
-; X64-NEXT:    vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
-; X64-NEXT:    vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x2
-  %4 = call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %6 = select <8 x i1> %5, <8 x i32> %4, <8 x i32> zeroinitializer
-  %7 = call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1)
-  %res3 = add <8 x i32> %3, %6
-  %res4 = add <8 x i32> %res3, %7
-  ret <8 x i32> %res4
-}
-
-declare <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64>, <2 x i64>)
-
-define <2 x i64>@test_int_x86_avx512_mask_prolv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_prolv_q_128:
-; X86:       # %bb.0:
-; X86-NEXT:    vprolvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x15,0xd9]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprolvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x15,0xd1]
-; X86-NEXT:    vprolvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x15,0xc1]
-; X86-NEXT:    vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
-; X86-NEXT:    vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_prolv_q_128:
-; X64:       # %bb.0:
-; X64-NEXT:    vprolvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x15,0xd9]
-; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vprolvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x15,0xd1]
-; X64-NEXT:    vprolvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x15,0xc1]
-; X64-NEXT:    vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
-; X64-NEXT:    vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
-  %3 = select <2 x i1> %extract1, <2 x i64> %1, <2 x i64> %x2
-  %4 = call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <2 x i32> <i32 0, i32 1>
-  %6 = select <2 x i1> %extract, <2 x i64> %4, <2 x i64> zeroinitializer
-  %7 = call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1)
-  %res3 = add <2 x i64> %3, %6
-  %res4 = add <2 x i64> %res3, %7
-  ret <2 x i64> %res4
-}
-
-declare <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64>, <4 x i64>)
-
-define <4 x i64>@test_int_x86_avx512_mask_prolv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_prolv_q_256:
-; X86:       # %bb.0:
-; X86-NEXT:    vprolvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x15,0xd9]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprolvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x15,0xd1]
-; X86-NEXT:    vprolvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x15,0xc1]
-; X86-NEXT:    vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
-; X86-NEXT:    vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_prolv_q_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vprolvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x15,0xd9]
-; X64-NEXT:    kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT:    vprolvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x15,0xd1]
-; X64-NEXT:    vprolvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x15,0xc1]
-; X64-NEXT:    vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
-; X64-NEXT:    vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %3 = select <4 x i1> %extract1, <4 x i64> %1, <4 x i64> %x2
-  %4 = call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %6 = select <4 x i1> %extract, <4 x i64> %4, <4 x i64> zeroinitializer
-  %7 = call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1)
-  %res3 = add <4 x i64> %3, %6
-  %res4 = add <4 x i64> %res3, %7
-  ret <4 x i64> %res4
-}
-
-declare <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32>, i32)
-
-define <4 x i32>@test_int_x86_avx512_mask_pror_d_128(<4 x i32> %x0, i32 %x1, <4 x i32> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_pror_d_128:
-; X86:       # %bb.0:
-; X86-NEXT:    vprord $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc0,0x03]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprord $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc0,0x03]
-; X86-NEXT:    vprord $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc0,0x03]
-; X86-NEXT:    vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
-; X86-NEXT:    vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_pror_d_128:
-; X64:       # %bb.0:
-; X64-NEXT:    vprord $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc0,0x03]
-; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
-; X64-NEXT:    vprord $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc0,0x03]
-; X64-NEXT:    vprord $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc0,0x03]
-; X64-NEXT:    vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
-; X64-NEXT:    vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %x0, i32 3)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x2
-  %4 = call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %x0, i32 3)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %6 = select <4 x i1> %extract, <4 x i32> %4, <4 x i32> zeroinitializer
-  %7 = call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %x0, i32 3)
-  %res3 = add <4 x i32> %3, %6
-  %res4 = add <4 x i32> %res3, %7
-  ret <4 x i32> %res4
-}
-
-declare <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32>, i32)
-
-define <8 x i32>@test_int_x86_avx512_mask_pror_d_256(<8 x i32> %x0, i32 %x1, <8 x i32> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_pror_d_256:
-; X86:       # %bb.0:
-; X86-NEXT:    vprord $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc0,0x03]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprord $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc0,0x03]
-; X86-NEXT:    vprord $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc0,0x03]
-; X86-NEXT:    vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
-; X86-NEXT:    vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_pror_d_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vprord $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc0,0x03]
-; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
-; X64-NEXT:    vprord $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc0,0x03]
-; X64-NEXT:    vprord $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc0,0x03]
-; X64-NEXT:    vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
-; X64-NEXT:    vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %x0, i32 3)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x2
-  %4 = call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %x0, i32 3)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %6 = select <8 x i1> %5, <8 x i32> %4, <8 x i32> zeroinitializer
-  %7 = call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %x0, i32 3)
-  %res3 = add <8 x i32> %3, %6
-  %res4 = add <8 x i32> %res3, %7
-  ret <8 x i32> %res4
-}
-
-declare <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64>, i32)
-
-define <2 x i64>@test_int_x86_avx512_mask_pror_q_128(<2 x i64> %x0, i32 %x1, <2 x i64> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_pror_q_128:
-; X86:       # %bb.0:
-; X86-NEXT:    vprorq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc0,0x03]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprorq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc0,0x03]
-; X86-NEXT:    vprorq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc0,0x03]
-; X86-NEXT:    vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
-; X86-NEXT:    vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_pror_q_128:
-; X64:       # %bb.0:
-; X64-NEXT:    vprorq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc0,0x03]
-; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
-; X64-NEXT:    vprorq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc0,0x03]
-; X64-NEXT:    vprorq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc0,0x03]
-; X64-NEXT:    vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
-; X64-NEXT:    vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %x0, i32 3)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
-  %3 = select <2 x i1> %extract1, <2 x i64> %1, <2 x i64> %x2
-  %4 = call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %x0, i32 3)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <2 x i32> <i32 0, i32 1>
-  %6 = select <2 x i1> %extract, <2 x i64> %4, <2 x i64> zeroinitializer
-  %7 = call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %x0, i32 3)
-  %res3 = add <2 x i64> %3, %6
-  %res4 = add <2 x i64> %res3, %7
-  ret <2 x i64> %res4
-}
-
-declare <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64>, i32)
-
-define <4 x i64>@test_int_x86_avx512_mask_pror_q_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) {
-; X86-LABEL: test_int_x86_avx512_mask_pror_q_256:
-; X86:       # %bb.0:
-; X86-NEXT:    vprorq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc0,0x03]
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT:    vprorq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc0,0x03]
-; X86-NEXT:    vprorq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc0,0x03]
-; X86-NEXT:    vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
-; X86-NEXT:    vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
-; X86-NEXT:    retl # encoding: [0xc3]
-;
-; X64-LABEL: test_int_x86_avx512_mask_pror_q_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vprorq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc0,0x03]
-; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
-; X64-NEXT:    vprorq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc0,0x03]
-; X64-NEXT:    vprorq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc0,0x03]
-; X64-NEXT:    vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
-; X64-NEXT:    vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
-; X64-NEXT:    retq # encoding: [0xc3]
-  %1 = call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %x0, i32 3)
-  %2 = bitcast i8 %x3 to <8 x i1>
-  %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %3 = select <4 x i1> %extract1, <4 x i64> %1, <4 x i64> %x2
-  %4 = call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %x0, i32 3)
-  %5 = bitcast i8 %x3 to <8 x i1>
-  %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %6 = select <4 x i1> %extract, <4 x i64> %4, <4 x i64> zeroinitializer
-  %7 = call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %x0, i32 3)
-  %res3 = add <4 x i64> %3, %6
-  %res4 = add <4 x i64> %res3, %7
-  ret <4 x i64> %res4
-}
-
 declare <4 x double> @llvm.x86.avx512.permvar.df.256(<4 x double>, <4 x i64>)
 
 define <4 x double>@test_int_x86_avx512_mask_permvar_df_256(<4 x double> %x0, <4 x i64> %x1, <4 x double> %x2, i8 %x3) {
diff --git a/test/CodeGen/X86/barrier.ll b/test/CodeGen/X86/barrier.ll
index 9031a0e..f85c0ae 100644
--- a/test/CodeGen/X86/barrier.ll
+++ b/test/CodeGen/X86/barrier.ll
@@ -1,7 +1,11 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-- -mattr=-sse2 | FileCheck %s
 
 define void @test() {
-; CHECK: lock
+; CHECK-LABEL: test:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lock orl $0, (%esp)
+; CHECK-NEXT:    retl
 	fence seq_cst
 	ret void
 }
diff --git a/test/CodeGen/X86/bitcast-and-setcc-128.ll b/test/CodeGen/X86/bitcast-and-setcc-128.ll
index 364e4ce..289ddcb 100644
--- a/test/CodeGen/X86/bitcast-and-setcc-128.ll
+++ b/test/CodeGen/X86/bitcast-and-setcc-128.ll
@@ -30,11 +30,10 @@
 ; AVX512F-LABEL: v8i16:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vpcmpgtw %xmm3, %xmm2, %xmm1
+; AVX512F-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX512F-NEXT:    vpmovsxwd %xmm0, %ymm0
-; AVX512F-NEXT:    vptestmd %ymm0, %ymm0, %k1
-; AVX512F-NEXT:    vpcmpgtw %xmm3, %xmm2, %xmm0
-; AVX512F-NEXT:    vpmovsxwd %xmm0, %ymm0
-; AVX512F-NEXT:    vptestmd %ymm0, %ymm0, %k0 {%k1}
+; AVX512F-NEXT:    vptestmd %ymm0, %ymm0, %k0
 ; AVX512F-NEXT:    kmovw %k0, %eax
 ; AVX512F-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512F-NEXT:    vzeroupper
@@ -158,14 +157,10 @@
 ; AVX512F-LABEL: v16i8:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
-; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k1
-; AVX512F-NEXT:    vpcmpgtb %xmm3, %xmm2, %xmm0
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
-; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
-; AVX512F-NEXT:    kmovw %k0, %eax
+; AVX512F-NEXT:    vpcmpgtb %xmm3, %xmm2, %xmm1
+; AVX512F-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX512F-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: v16i8:
@@ -217,25 +212,21 @@
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm1
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm0
 ; SSE2-SSSE3-NEXT:    movdqa %xmm0, %xmm5
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm5
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm6, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm5
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm5, %xmm1
 ; SSE2-SSSE3-NEXT:    por %xmm0, %xmm1
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm3
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm2
 ; SSE2-SSSE3-NEXT:    movdqa %xmm2, %xmm0
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm3, %xmm2
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm4, %xmm2
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT:    por %xmm2, %xmm0
-; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm0
-; SSE2-SSSE3-NEXT:    movmskpd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm3, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm3
+; SSE2-SSSE3-NEXT:    por %xmm2, %xmm3
+; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm3
+; SSE2-SSSE3-NEXT:    movmskpd %xmm3, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE2-SSSE3-NEXT:    retq
 ;
@@ -370,25 +361,21 @@
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm1
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm0
 ; SSE2-SSSE3-NEXT:    movdqa %xmm0, %xmm5
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm5
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm6, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm5
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm5, %xmm1
 ; SSE2-SSSE3-NEXT:    por %xmm0, %xmm1
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm3
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm2
 ; SSE2-SSSE3-NEXT:    movdqa %xmm2, %xmm0
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm3, %xmm2
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm4, %xmm2
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT:    por %xmm2, %xmm0
-; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm0
-; SSE2-SSSE3-NEXT:    movmskpd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm3, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm3
+; SSE2-SSSE3-NEXT:    por %xmm2, %xmm3
+; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm3
+; SSE2-SSSE3-NEXT:    movmskpd %xmm3, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE2-SSSE3-NEXT:    retq
 ;
@@ -515,25 +502,21 @@
 ; SSE2-SSSE3-NEXT:    pxor %xmm1, %xmm0
 ; SSE2-SSSE3-NEXT:    pxor %xmm1, %xmm3
 ; SSE2-SSSE3-NEXT:    movdqa %xmm3, %xmm5
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm0, %xmm5
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm0, %xmm3
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm6, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
-; SSE2-SSSE3-NEXT:    por %xmm0, %xmm3
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm0, %xmm5
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm0, %xmm3
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm5, %xmm0
+; SSE2-SSSE3-NEXT:    por %xmm3, %xmm0
 ; SSE2-SSSE3-NEXT:    pxor %xmm1, %xmm2
 ; SSE2-SSSE3-NEXT:    pxor %xmm1, %xmm4
-; SSE2-SSSE3-NEXT:    movdqa %xmm4, %xmm0
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm2, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm2, %xmm4
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-SSSE3-NEXT:    movdqa %xmm4, %xmm1
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm2, %xmm1
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm2, %xmm4
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[0,0,2,2]
 ; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm2
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT:    por %xmm2, %xmm0
-; SSE2-SSSE3-NEXT:    pand %xmm3, %xmm0
-; SSE2-SSSE3-NEXT:    movmskpd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    por %xmm4, %xmm2
+; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm2
+; SSE2-SSSE3-NEXT:    movmskpd %xmm2, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE2-SSSE3-NEXT:    retq
 ;
@@ -624,25 +607,21 @@
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm1
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm0
 ; SSE2-SSSE3-NEXT:    movdqa %xmm0, %xmm5
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm5
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm6, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm5
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm5, %xmm1
 ; SSE2-SSSE3-NEXT:    por %xmm0, %xmm1
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm3
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm2
 ; SSE2-SSSE3-NEXT:    movdqa %xmm2, %xmm0
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm3, %xmm2
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm4, %xmm2
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT:    por %xmm2, %xmm0
-; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm0
-; SSE2-SSSE3-NEXT:    movmskpd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm3, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm3
+; SSE2-SSSE3-NEXT:    por %xmm2, %xmm3
+; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm3
+; SSE2-SSSE3-NEXT:    movmskpd %xmm3, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE2-SSSE3-NEXT:    retq
 ;
@@ -913,10 +892,9 @@
 ; AVX512F-NEXT:    vpsllw $8, %xmm0, %xmm0
 ; AVX512F-NEXT:    vpsraw $8, %xmm0, %xmm0
 ; AVX512F-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vpand %xmm2, %xmm0, %xmm0
 ; AVX512F-NEXT:    vpmovsxwd %xmm0, %ymm0
-; AVX512F-NEXT:    vptestmd %ymm0, %ymm0, %k1
-; AVX512F-NEXT:    vpmovsxwd %xmm2, %ymm0
-; AVX512F-NEXT:    vptestmd %ymm0, %ymm0, %k0 {%k1}
+; AVX512F-NEXT:    vptestmd %ymm0, %ymm0, %k0
 ; AVX512F-NEXT:    kmovw %k0, %eax
 ; AVX512F-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512F-NEXT:    vzeroupper
diff --git a/test/CodeGen/X86/bitcast-and-setcc-256.ll b/test/CodeGen/X86/bitcast-and-setcc-256.ll
index f2a6c66..426cabe 100644
--- a/test/CodeGen/X86/bitcast-and-setcc-256.ll
+++ b/test/CodeGen/X86/bitcast-and-setcc-256.ll
@@ -14,20 +14,18 @@
 ; SSE2-SSSE3-NEXT:    pxor %xmm8, %xmm1
 ; SSE2-SSSE3-NEXT:    movdqa %xmm1, %xmm9
 ; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm9
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm10 = xmm9[0,0,2,2]
 ; SSE2-SSSE3-NEXT:    pcmpeqd %xmm3, %xmm1
 ; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm10, %xmm1
+; SSE2-SSSE3-NEXT:    pand %xmm9, %xmm1
 ; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm9[1,1,3,3]
 ; SSE2-SSSE3-NEXT:    por %xmm1, %xmm3
 ; SSE2-SSSE3-NEXT:    pxor %xmm8, %xmm2
 ; SSE2-SSSE3-NEXT:    pxor %xmm8, %xmm0
 ; SSE2-SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-SSSE3-NEXT:    pcmpgtd %xmm2, %xmm1
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm9 = xmm1[0,0,2,2]
 ; SSE2-SSSE3-NEXT:    pcmpeqd %xmm2, %xmm0
 ; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm9, %xmm2
+; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm2
 ; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
 ; SSE2-SSSE3-NEXT:    por %xmm2, %xmm0
 ; SSE2-SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
@@ -35,22 +33,20 @@
 ; SSE2-SSSE3-NEXT:    pxor %xmm8, %xmm5
 ; SSE2-SSSE3-NEXT:    movdqa %xmm5, %xmm1
 ; SSE2-SSSE3-NEXT:    pcmpgtd %xmm7, %xmm1
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
 ; SSE2-SSSE3-NEXT:    pcmpeqd %xmm7, %xmm5
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm2, %xmm3
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm5[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm2
 ; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-SSSE3-NEXT:    por %xmm3, %xmm1
+; SSE2-SSSE3-NEXT:    por %xmm2, %xmm1
 ; SSE2-SSSE3-NEXT:    pxor %xmm8, %xmm6
 ; SSE2-SSSE3-NEXT:    pxor %xmm8, %xmm4
 ; SSE2-SSSE3-NEXT:    movdqa %xmm4, %xmm2
 ; SSE2-SSSE3-NEXT:    pcmpgtd %xmm6, %xmm2
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE2-SSSE3-NEXT:    pcmpeqd %xmm6, %xmm4
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm3, %xmm4
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pand %xmm2, %xmm3
 ; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-SSSE3-NEXT:    por %xmm4, %xmm2
+; SSE2-SSSE3-NEXT:    por %xmm3, %xmm2
 ; SSE2-SSSE3-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
 ; SSE2-SSSE3-NEXT:    andps %xmm0, %xmm2
 ; SSE2-SSSE3-NEXT:    movmskps %xmm2, %eax
@@ -59,17 +55,17 @@
 ;
 ; AVX1-LABEL: v4i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm1
+; AVX1-NEXT:    vpand %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
 ; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpackssdw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovmskps %xmm0, %eax
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
@@ -78,12 +74,10 @@
 ; AVX2-LABEL: v4i64:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm2, %ymm1
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm2, %ymm1
-; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
-; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovmskps %xmm0, %eax
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
@@ -130,12 +124,10 @@
 ; AVX12-LABEL: v4f64:
 ; AVX12:       # %bb.0:
 ; AVX12-NEXT:    vcmpltpd %ymm0, %ymm1, %ymm0
+; AVX12-NEXT:    vcmpltpd %ymm2, %ymm3, %ymm1
+; AVX12-NEXT:    vandpd %ymm1, %ymm0, %ymm0
 ; AVX12-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX12-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX12-NEXT:    vcmpltpd %ymm2, %ymm3, %ymm1
-; AVX12-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX12-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
-; AVX12-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX12-NEXT:    vmovmskps %xmm0, %eax
 ; AVX12-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX12-NEXT:    vzeroupper
@@ -181,17 +173,17 @@
 ;
 ; AVX1-LABEL: v16i16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vpcmpgtw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpacksswb %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT:    vpcmpgtw %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpcmpgtw %xmm3, %xmm2, %xmm1
+; AVX1-NEXT:    vpand %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
 ; AVX1-NEXT:    vpcmpgtw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpacksswb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpacksswb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
@@ -200,12 +192,10 @@
 ; AVX2-LABEL: v16i16:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpgtw %ymm3, %ymm2, %ymm1
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpcmpgtw %ymm3, %ymm2, %ymm1
-; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
-; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
@@ -214,11 +204,10 @@
 ; AVX512F-LABEL: v16i16:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpcmpgtw %ymm3, %ymm2, %ymm1
+; AVX512F-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
-; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k1
-; AVX512F-NEXT:    vpcmpgtw %ymm3, %ymm2, %ymm0
-; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
-; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
+; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; AVX512F-NEXT:    kmovw %k0, %eax
 ; AVX512F-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512F-NEXT:    vzeroupper
@@ -256,17 +245,17 @@
 ;
 ; AVX1-LABEL: v8i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vpcmpgtd %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm2, %xmm1
+; AVX1-NEXT:    vpand %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
 ; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpackssdw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpacksswb %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
@@ -276,12 +265,10 @@
 ; AVX2-LABEL: v8i32:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpgtd %ymm3, %ymm2, %ymm1
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpcmpgtd %ymm3, %ymm2, %ymm1
-; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
-; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpacksswb %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
@@ -330,12 +317,10 @@
 ; AVX12-LABEL: v8f32:
 ; AVX12:       # %bb.0:
 ; AVX12-NEXT:    vcmpltps %ymm0, %ymm1, %ymm0
+; AVX12-NEXT:    vcmpltps %ymm2, %ymm3, %ymm1
+; AVX12-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX12-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX12-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX12-NEXT:    vcmpltps %ymm2, %ymm3, %ymm1
-; AVX12-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX12-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
-; AVX12-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX12-NEXT:    vpacksswb %xmm0, %xmm0, %xmm0
 ; AVX12-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX12-NEXT:    # kill: def $al killed $al killed $eax
@@ -412,21 +397,9 @@
 ; AVX512F-LABEL: v32i8:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-NEXT:    vpmovsxbd %xmm1, %zmm1
-; AVX512F-NEXT:    vptestmd %zmm1, %zmm1, %k1
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
-; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k2
-; AVX512F-NEXT:    vpcmpgtb %ymm3, %ymm2, %ymm0
-; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-NEXT:    vpmovsxbd %xmm1, %zmm1
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
-; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k2}
-; AVX512F-NEXT:    kmovw %k0, %ecx
-; AVX512F-NEXT:    vptestmd %zmm1, %zmm1, %k0 {%k1}
-; AVX512F-NEXT:    kmovw %k0, %eax
-; AVX512F-NEXT:    shll $16, %eax
-; AVX512F-NEXT:    orl %ecx, %eax
+; AVX512F-NEXT:    vpcmpgtb %ymm3, %ymm2, %ymm1
+; AVX512F-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmovmskb %ymm0, %eax
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
diff --git a/test/CodeGen/X86/bitcast-and-setcc-512.ll b/test/CodeGen/X86/bitcast-and-setcc-512.ll
index 183e32a..bab37c3 100644
--- a/test/CodeGen/X86/bitcast-and-setcc-512.ll
+++ b/test/CodeGen/X86/bitcast-and-setcc-512.ll
@@ -304,18 +304,16 @@
 ; AVX512F-LABEL: v32i16:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpcmpgtw %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT:    vpmovsxwd %ymm1, %zmm1
-; AVX512F-NEXT:    vptestmd %zmm1, %zmm1, %k1
 ; AVX512F-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpcmpgtw %ymm7, %ymm5, %ymm2
+; AVX512F-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpcmpgtw %ymm6, %ymm4, %ymm2
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
-; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k2
-; AVX512F-NEXT:    vpcmpgtw %ymm7, %ymm5, %ymm0
-; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
-; AVX512F-NEXT:    vpcmpgtw %ymm6, %ymm4, %ymm1
-; AVX512F-NEXT:    vpmovsxwd %ymm1, %zmm1
-; AVX512F-NEXT:    vptestmd %zmm1, %zmm1, %k0 {%k2}
+; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; AVX512F-NEXT:    kmovw %k0, %ecx
-; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k1}
+; AVX512F-NEXT:    vpmovsxwd %ymm1, %zmm0
+; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; AVX512F-NEXT:    kmovw %k0, %eax
 ; AVX512F-NEXT:    shll $16, %eax
 ; AVX512F-NEXT:    orl %ecx, %eax
@@ -615,33 +613,29 @@
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpcmpgtb %ymm3, %ymm1, %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512F-NEXT:    vpmovsxbd %xmm3, %zmm3
-; AVX512F-NEXT:    vptestmd %zmm3, %zmm3, %k1
-; AVX512F-NEXT:    vpmovsxbd %xmm1, %zmm1
-; AVX512F-NEXT:    vptestmd %zmm1, %zmm1, %k2
 ; AVX512F-NEXT:    vpcmpgtb %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-NEXT:    vpmovsxbd %xmm1, %zmm1
-; AVX512F-NEXT:    vptestmd %zmm1, %zmm1, %k3
+; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512F-NEXT:    vpcmpgtb %ymm7, %ymm5, %ymm5
+; AVX512F-NEXT:    vextracti128 $1, %ymm5, %xmm7
+; AVX512F-NEXT:    vpand %xmm7, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpgtb %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT:    vextracti128 $1, %ymm4, %xmm6
+; AVX512F-NEXT:    vpand %xmm6, %xmm2, %xmm2
+; AVX512F-NEXT:    vpand %xmm4, %xmm0, %xmm0
 ; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
-; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k4
-; AVX512F-NEXT:    vpcmpgtb %ymm7, %ymm5, %ymm0
-; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-NEXT:    vpmovsxbd %xmm1, %zmm1
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
-; AVX512F-NEXT:    vpcmpgtb %ymm6, %ymm4, %ymm2
-; AVX512F-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-NEXT:    vpmovsxbd %xmm3, %zmm3
-; AVX512F-NEXT:    vpmovsxbd %xmm2, %zmm2
-; AVX512F-NEXT:    vptestmd %zmm2, %zmm2, %k0 {%k4}
+; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; AVX512F-NEXT:    kmovw %k0, %eax
-; AVX512F-NEXT:    vptestmd %zmm3, %zmm3, %k0 {%k3}
+; AVX512F-NEXT:    vpmovsxbd %xmm2, %zmm0
+; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; AVX512F-NEXT:    kmovw %k0, %ecx
 ; AVX512F-NEXT:    shll $16, %ecx
 ; AVX512F-NEXT:    orl %eax, %ecx
-; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k2}
+; AVX512F-NEXT:    vpand %xmm5, %xmm1, %xmm0
+; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
+; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; AVX512F-NEXT:    kmovw %k0, %edx
-; AVX512F-NEXT:    vptestmd %zmm1, %zmm1, %k0 {%k1}
+; AVX512F-NEXT:    vpmovsxbd %xmm3, %zmm0
+; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; AVX512F-NEXT:    kmovw %k0, %eax
 ; AVX512F-NEXT:    shll $16, %eax
 ; AVX512F-NEXT:    orl %edx, %eax
diff --git a/test/CodeGen/X86/bitcast-setcc-128.ll b/test/CodeGen/X86/bitcast-setcc-128.ll
index a1330b0..fb58597 100644
--- a/test/CodeGen/X86/bitcast-setcc-128.ll
+++ b/test/CodeGen/X86/bitcast-setcc-128.ll
@@ -128,11 +128,8 @@
 ; AVX512F-LABEL: v16i8:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
-; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; AVX512F-NEXT:    kmovw %k0, %eax
+; AVX512F-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX512F-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: v16i8:
@@ -167,12 +164,10 @@
 ; SSE2-SSSE3-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-SSSE3-NEXT:    pxor %xmm2, %xmm0
 ; SSE2-SSSE3-NEXT:    movdqa %xmm0, %xmm2
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm2
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm3, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm2
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm2, %xmm1
 ; SSE2-SSSE3-NEXT:    por %xmm0, %xmm1
 ; SSE2-SSSE3-NEXT:    movmskpd %xmm1, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax
@@ -259,12 +254,10 @@
 ; SSE2-SSSE3-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-SSSE3-NEXT:    pxor %xmm2, %xmm0
 ; SSE2-SSSE3-NEXT:    movdqa %xmm0, %xmm2
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm2
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm3, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm2
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm2, %xmm1
 ; SSE2-SSSE3-NEXT:    por %xmm0, %xmm1
 ; SSE2-SSSE3-NEXT:    movmskpd %xmm1, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax
@@ -347,14 +340,12 @@
 ; SSE2-SSSE3-NEXT:    pxor %xmm1, %xmm0
 ; SSE2-SSSE3-NEXT:    pxor %xmm1, %xmm2
 ; SSE2-SSSE3-NEXT:    movdqa %xmm2, %xmm1
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm0, %xmm1
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm3, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-SSSE3-NEXT:    por %xmm0, %xmm1
-; SSE2-SSSE3-NEXT:    movmskpd %xmm1, %eax
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm0, %xmm1
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    por %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    movmskpd %xmm0, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE2-SSSE3-NEXT:    retq
 ;
@@ -417,12 +408,10 @@
 ; SSE2-SSSE3-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-SSSE3-NEXT:    pxor %xmm2, %xmm0
 ; SSE2-SSSE3-NEXT:    movdqa %xmm0, %xmm2
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm2
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm3, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm2
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm2, %xmm1
 ; SSE2-SSSE3-NEXT:    por %xmm0, %xmm1
 ; SSE2-SSSE3-NEXT:    movmskpd %xmm1, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax
diff --git a/test/CodeGen/X86/bitcast-setcc-256.ll b/test/CodeGen/X86/bitcast-setcc-256.ll
index 1e7a632..b0af971 100644
--- a/test/CodeGen/X86/bitcast-setcc-256.ll
+++ b/test/CodeGen/X86/bitcast-setcc-256.ll
@@ -184,15 +184,7 @@
 ; AVX512F-LABEL: v32i8:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm1
-; AVX512F-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; AVX512F-NEXT:    kmovw %k0, %ecx
-; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
-; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; AVX512F-NEXT:    kmovw %k0, %eax
-; AVX512F-NEXT:    shll $16, %eax
-; AVX512F-NEXT:    orl %ecx, %eax
+; AVX512F-NEXT:    vpmovmskb %ymm0, %eax
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -214,25 +206,21 @@
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm3
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm1
 ; SSE2-SSSE3-NEXT:    movdqa %xmm1, %xmm5
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm5
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm3, %xmm1
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm6, %xmm1
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm3, %xmm5
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm1
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm5, %xmm3
 ; SSE2-SSSE3-NEXT:    por %xmm1, %xmm3
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm2
 ; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm0
 ; SSE2-SSSE3-NEXT:    movdqa %xmm0, %xmm1
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm2, %xmm1
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm2, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm4, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-SSSE3-NEXT:    por %xmm0, %xmm1
-; SSE2-SSSE3-NEXT:    packssdw %xmm3, %xmm1
-; SSE2-SSSE3-NEXT:    movmskps %xmm1, %eax
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm2, %xmm1
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm2
+; SSE2-SSSE3-NEXT:    por %xmm0, %xmm2
+; SSE2-SSSE3-NEXT:    packssdw %xmm3, %xmm2
+; SSE2-SSSE3-NEXT:    movmskps %xmm2, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE2-SSSE3-NEXT:    retq
 ;
@@ -477,24 +465,21 @@
 ; SSE2-SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
 ; SSE2-SSSE3-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-SSSE3-NEXT:    movdqa %xmm2, %xmm3
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm3
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm2, %xmm1
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm4, %xmm1
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-SSSE3-NEXT:    por %xmm1, %xmm3
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm3
+; SSE2-SSSE3-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm4
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm3, %xmm1
+; SSE2-SSSE3-NEXT:    por %xmm4, %xmm1
 ; SSE2-SSSE3-NEXT:    pxor %xmm2, %xmm0
-; SSE2-SSSE3-NEXT:    movdqa %xmm2, %xmm1
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm0, %xmm1
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
-; SSE2-SSSE3-NEXT:    pcmpeqd %xmm2, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT:    pand %xmm4, %xmm0
-; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-SSSE3-NEXT:    por %xmm0, %xmm1
-; SSE2-SSSE3-NEXT:    packssdw %xmm3, %xmm1
-; SSE2-SSSE3-NEXT:    movmskps %xmm1, %eax
+; SSE2-SSSE3-NEXT:    movdqa %xmm2, %xmm3
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm0, %xmm3
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pand %xmm3, %xmm0
+; SSE2-SSSE3-NEXT:    por %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    packssdw %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    movmskps %xmm0, %eax
 ; SSE2-SSSE3-NEXT:    movb %al, (%rdi)
 ; SSE2-SSSE3-NEXT:    retq
 ;
diff --git a/test/CodeGen/X86/bitcast-setcc-512.ll b/test/CodeGen/X86/bitcast-setcc-512.ll
index 1911aed..340c7ab 100644
--- a/test/CodeGen/X86/bitcast-setcc-512.ll
+++ b/test/CodeGen/X86/bitcast-setcc-512.ll
@@ -256,26 +256,10 @@
 ;
 ; AVX512F-LABEL: v64i8:
 ; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpcmpgtb %ymm3, %ymm1, %ymm1
 ; AVX512F-NEXT:    vpcmpgtb %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm2
-; AVX512F-NEXT:    vptestmd %zmm2, %zmm2, %k0
-; AVX512F-NEXT:    kmovw %k0, %eax
-; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
-; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; AVX512F-NEXT:    kmovw %k0, %ecx
-; AVX512F-NEXT:    shll $16, %ecx
-; AVX512F-NEXT:    orl %eax, %ecx
-; AVX512F-NEXT:    vpcmpgtb %ymm3, %ymm1, %ymm0
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm1
-; AVX512F-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; AVX512F-NEXT:    kmovw %k0, %edx
-; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
-; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; AVX512F-NEXT:    kmovw %k0, %eax
-; AVX512F-NEXT:    shll $16, %eax
-; AVX512F-NEXT:    orl %edx, %eax
+; AVX512F-NEXT:    vpmovmskb %ymm0, %ecx
+; AVX512F-NEXT:    vpmovmskb %ymm1, %eax
 ; AVX512F-NEXT:    shlq $32, %rax
 ; AVX512F-NEXT:    orq %rcx, %rax
 ; AVX512F-NEXT:    vzeroupper
diff --git a/test/CodeGen/X86/bmi.ll b/test/CodeGen/X86/bmi.ll
index be6f193..8605c41 100644
--- a/test/CodeGen/X86/bmi.ll
+++ b/test/CodeGen/X86/bmi.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+bmi | FileCheck %s --check-prefixes=CHECK,X86,X86-SLOW-BEXTR
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+bmi,+bmi2 | FileCheck %s --check-prefixes=CHECK,X86,X86-SLOW-BEXTR
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+cmov,+bmi | FileCheck %s --check-prefixes=CHECK,X86,X86-SLOW-BEXTR
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+cmov,+bmi,+bmi2 | FileCheck %s --check-prefixes=CHECK,X86,X86-SLOW-BEXTR
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi | FileCheck %s --check-prefixes=CHECK,X64,X64-SLOW-BEXTR
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi,+bmi2 | FileCheck %s --check-prefixes=CHECK,X64,X64-SLOW-BEXTR
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+bmi,+fast-bextr | FileCheck %s --check-prefixes=CHECK,X86,X86-FAST-BEXTR
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+cmov,+bmi,+fast-bextr | FileCheck %s --check-prefixes=CHECK,X86,X86-FAST-BEXTR
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi,+fast-bextr | FileCheck %s --check-prefixes=CHECK,X64,X64-FAST-BEXTR
 
 define i32 @andn32(i32 %x, i32 %y)   {
@@ -157,15 +157,15 @@
 ; X86-LABEL: and_cmp_const:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    notl %eax
 ; X86-NEXT:    andl $43, %eax
+; X86-NEXT:    cmpl $43, %eax
 ; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: and_cmp_const:
 ; X64:       # %bb.0:
-; X64-NEXT:    notl %edi
 ; X64-NEXT:    andl $43, %edi
+; X64-NEXT:    cmpl $43, %edi
 ; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
   %and = and i32 %x, 43
@@ -494,6 +494,51 @@
   ret i32 %tmp2
 }
 
+define i32 @blsi32_z(i32 %a, i32 %b) nounwind {
+; X86-LABEL: blsi32_z:
+; X86:       # %bb.0:
+; X86-NEXT:    blsil {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    jne .LBB24_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:  .LBB24_2:
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsi32_z:
+; X64:       # %bb.0:
+; X64-NEXT:    blsil %edi, %eax
+; X64-NEXT:    cmovel %esi, %eax
+; X64-NEXT:    retq
+  %t0 = sub i32 0, %a
+  %t1 = and i32 %t0, %a
+  %t2 = icmp eq i32 %t1, 0
+  %t3 = select i1 %t2, i32 %b, i32 %t1
+  ret i32 %t3
+}
+
+define i32 @blsi32_z2(i32 %a, i32 %b, i32 %c) nounwind {
+; X86-LABEL: blsi32_z2:
+; X86:       # %bb.0:
+; X86-NEXT:    blsil {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmovel %eax, %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsi32_z2:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    blsil %edi, %ecx
+; X64-NEXT:    cmovnel %edx, %eax
+; X64-NEXT:    retq
+  %t0 = sub i32 0, %a
+  %t1 = and i32 %t0, %a
+  %t2 = icmp eq i32 %t1, 0
+  %t3 = select i1 %t2, i32 %b, i32 %c
+  ret i32 %t3
+}
+
 define i64 @blsi64(i64 %x)   {
 ; X86-LABEL: blsi64:
 ; X86:       # %bb.0:
@@ -521,6 +566,74 @@
   ret i64 %tmp2
 }
 
+define i64 @blsi64_z(i64 %a, i64 %b) nounwind {
+; X86-LABEL: blsi64_z:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    negl %eax
+; X86-NEXT:    sbbl %esi, %edx
+; X86-NEXT:    andl %esi, %edx
+; X86-NEXT:    andl %ecx, %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    orl %edx, %ecx
+; X86-NEXT:    jne .LBB27_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:  .LBB27_2:
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsi64_z:
+; X64:       # %bb.0:
+; X64-NEXT:    blsiq %rdi, %rax
+; X64-NEXT:    cmoveq %rsi, %rax
+; X64-NEXT:    retq
+  %t0 = sub i64 0, %a
+  %t1 = and i64 %t0, %a
+  %t2 = icmp eq i64 %t1, 0
+  %t3 = select i1 %t2, i64 %b, i64 %t1
+  ret i64 %t3
+}
+
+define i64 @blsi64_z2(i64 %a, i64 %b, i64 %c) nounwind {
+; X86-LABEL: blsi64_z2:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    negl %esi
+; X86-NEXT:    sbbl %ecx, %edx
+; X86-NEXT:    andl %ecx, %edx
+; X86-NEXT:    andl %eax, %esi
+; X86-NEXT:    orl %edx, %esi
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmovel %eax, %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    movl 4(%ecx), %edx
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsi64_z2:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rsi, %rax
+; X64-NEXT:    blsiq %rdi, %rcx
+; X64-NEXT:    cmovneq %rdx, %rax
+; X64-NEXT:    retq
+  %t0 = sub i64 0, %a
+  %t1 = and i64 %t0, %a
+  %t2 = icmp eq i64 %t1, 0
+  %t3 = select i1 %t2, i64 %b, i64 %c
+  ret i64 %t3
+}
+
 define i32 @blsmsk32(i32 %x)   {
 ; X86-LABEL: blsmsk32:
 ; X86:       # %bb.0:
@@ -553,6 +666,51 @@
   ret i32 %tmp2
 }
 
+define i32 @blsmsk32_z(i32 %a, i32 %b) nounwind {
+; X86-LABEL: blsmsk32_z:
+; X86:       # %bb.0:
+; X86-NEXT:    blsmskl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    jne .LBB31_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:  .LBB31_2:
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsmsk32_z:
+; X64:       # %bb.0:
+; X64-NEXT:    blsmskl %edi, %eax
+; X64-NEXT:    cmovel %esi, %eax
+; X64-NEXT:    retq
+  %t0 = sub i32 %a, 1
+  %t1 = xor i32 %t0, %a
+  %t2 = icmp eq i32 %t1, 0
+  %t3 = select i1 %t2, i32 %b, i32 %t1
+  ret i32 %t3
+}
+
+define i32 @blsmsk32_z2(i32 %a, i32 %b, i32 %c) nounwind {
+; X86-LABEL: blsmsk32_z2:
+; X86:       # %bb.0:
+; X86-NEXT:    blsmskl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmovel %eax, %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsmsk32_z2:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    blsmskl %edi, %ecx
+; X64-NEXT:    cmovnel %edx, %eax
+; X64-NEXT:    retq
+  %t0 = sub i32 %a, 1
+  %t1 = xor i32 %t0, %a
+  %t2 = icmp eq i32 %t1, 0
+  %t3 = select i1 %t2, i32 %b, i32 %c
+  ret i32 %t3
+}
+
 define i64 @blsmsk64(i64 %x)   {
 ; X86-LABEL: blsmsk64:
 ; X86:       # %bb.0:
@@ -580,6 +738,74 @@
   ret i64 %tmp2
 }
 
+define i64 @blsmsk64_z(i64 %a, i64 %b) nounwind {
+; X86-LABEL: blsmsk64_z:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    addl $-1, %eax
+; X86-NEXT:    movl %esi, %edx
+; X86-NEXT:    adcl $-1, %edx
+; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    xorl %esi, %edx
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    orl %edx, %ecx
+; X86-NEXT:    jne .LBB34_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:  .LBB34_2:
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsmsk64_z:
+; X64:       # %bb.0:
+; X64-NEXT:    blsmskq %rdi, %rax
+; X64-NEXT:    cmoveq %rsi, %rax
+; X64-NEXT:    retq
+  %t0 = sub i64 %a, 1
+  %t1 = xor i64 %t0, %a
+  %t2 = icmp eq i64 %t1, 0
+  %t3 = select i1 %t2, i64 %b, i64 %t1
+  ret i64 %t3
+}
+
+define i64 @blsmsk64_z2(i64 %a, i64 %b, i64 %c) nounwind {
+; X86-LABEL: blsmsk64_z2:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    addl $-1, %edx
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    adcl $-1, %esi
+; X86-NEXT:    xorl %eax, %edx
+; X86-NEXT:    xorl %ecx, %esi
+; X86-NEXT:    orl %edx, %esi
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmovel %eax, %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    movl 4(%ecx), %edx
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsmsk64_z2:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rsi, %rax
+; X64-NEXT:    blsmskq %rdi, %rcx
+; X64-NEXT:    cmovneq %rdx, %rax
+; X64-NEXT:    retq
+  %t0 = sub i64 %a, 1
+  %t1 = xor i64 %t0, %a
+  %t2 = icmp eq i64 %t1, 0
+  %t3 = select i1 %t2, i64 %b, i64 %c
+  ret i64 %t3
+}
+
 define i32 @blsr32(i32 %x)   {
 ; X86-LABEL: blsr32:
 ; X86:       # %bb.0:
@@ -612,6 +838,51 @@
   ret i32 %tmp2
 }
 
+define i32 @blsr32_z(i32 %a, i32 %b) nounwind {
+; X86-LABEL: blsr32_z:
+; X86:       # %bb.0:
+; X86-NEXT:    blsrl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    jne .LBB38_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:  .LBB38_2:
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsr32_z:
+; X64:       # %bb.0:
+; X64-NEXT:    blsrl %edi, %eax
+; X64-NEXT:    cmovel %esi, %eax
+; X64-NEXT:    retq
+  %t0 = sub i32 %a, 1
+  %t1 = and i32 %t0, %a
+  %t2 = icmp eq i32 %t1, 0
+  %t3 = select i1 %t2, i32 %b, i32 %t1
+  ret i32 %t3
+}
+
+define i32 @blsr32_z2(i32 %a, i32 %b, i32 %c) nounwind {
+; X86-LABEL: blsr32_z2:
+; X86:       # %bb.0:
+; X86-NEXT:    blsrl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmovel %eax, %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsr32_z2:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    blsrl %edi, %ecx
+; X64-NEXT:    cmovnel %edx, %eax
+; X64-NEXT:    retq
+  %t0 = sub i32 %a, 1
+  %t1 = and i32 %t0, %a
+  %t2 = icmp eq i32 %t1, 0
+  %t3 = select i1 %t2, i32 %b, i32 %c
+  ret i32 %t3
+}
+
 define i64 @blsr64(i64 %x)   {
 ; X86-LABEL: blsr64:
 ; X86:       # %bb.0:
@@ -639,6 +910,74 @@
   ret i64 %tmp2
 }
 
+define i64 @blsr64_z(i64 %a, i64 %b) nounwind {
+; X86-LABEL: blsr64_z:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    addl $-1, %eax
+; X86-NEXT:    movl %esi, %edx
+; X86-NEXT:    adcl $-1, %edx
+; X86-NEXT:    andl %ecx, %eax
+; X86-NEXT:    andl %esi, %edx
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    orl %edx, %ecx
+; X86-NEXT:    jne .LBB41_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:  .LBB41_2:
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsr64_z:
+; X64:       # %bb.0:
+; X64-NEXT:    blsrq %rdi, %rax
+; X64-NEXT:    cmoveq %rsi, %rax
+; X64-NEXT:    retq
+  %t0 = sub i64 %a, 1
+  %t1 = and i64 %t0, %a
+  %t2 = icmp eq i64 %t1, 0
+  %t3 = select i1 %t2, i64 %b, i64 %t1
+  ret i64 %t3
+}
+
+define i64 @blsr64_z2(i64 %a, i64 %b, i64 %c) nounwind {
+; X86-LABEL: blsr64_z2:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    addl $-1, %edx
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    adcl $-1, %esi
+; X86-NEXT:    andl %eax, %edx
+; X86-NEXT:    andl %ecx, %esi
+; X86-NEXT:    orl %edx, %esi
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmovel %eax, %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    movl 4(%ecx), %edx
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsr64_z2:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rsi, %rax
+; X64-NEXT:    blsrq %rdi, %rcx
+; X64-NEXT:    cmovneq %rdx, %rax
+; X64-NEXT:    retq
+  %t0 = sub i64 %a, 1
+  %t1 = and i64 %t0, %a
+  %t2 = icmp eq i64 %t1, 0
+  %t3 = select i1 %t2, i64 %b, i64 %c
+  ret i64 %t3
+}
+
 ; PR35792 - https://bugs.llvm.org/show_bug.cgi?id=35792
 
 define i64 @blsr_disguised_constant(i64 %x) {
@@ -681,3 +1020,37 @@
   %c = and i64 %b, %a
   ret i64 %c
 }
+
+; FIXME: We should not be using the S flag from BEXTR.
+define void @pr40060(i32, i32) {
+; X86-LABEL: pr40060:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    bextrl %eax, {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    testl %eax, %eax
+; X86-NEXT:    js .LBB45_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    jmp bar # TAILCALL
+; X86-NEXT:  .LBB45_1:
+; X86-NEXT:    retl
+;
+; X64-LABEL: pr40060:
+; X64:       # %bb.0:
+; X64-NEXT:    bextrl %esi, %edi, %eax
+; X64-NEXT:    testl %eax, %eax
+; X64-NEXT:    js .LBB45_1
+; X64-NEXT:  # %bb.2:
+; X64-NEXT:    jmp bar # TAILCALL
+; X64-NEXT:  .LBB45_1:
+; X64-NEXT:    retq
+  %3 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %0, i32 %1)
+  %4 = icmp sgt i32 %3, -1
+  br i1 %4, label %5, label %6
+
+  tail call void @bar()
+  br label %6
+
+  ret void
+}
+
+declare void @bar()
diff --git a/test/CodeGen/X86/broadcastm-lowering.ll b/test/CodeGen/X86/broadcastm-lowering.ll
index f8d6701..986d313 100644
--- a/test/CodeGen/X86/broadcastm-lowering.ll
+++ b/test/CodeGen/X86/broadcastm-lowering.ll
@@ -43,15 +43,9 @@
 ; AVX512CD-LABEL: test_mm_epi32:
 ; AVX512CD:       # %bb.0: # %entry
 ; AVX512CD-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
-; AVX512CD-NEXT:    vpmovsxbd %xmm0, %zmm0
-; AVX512CD-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; AVX512CD-NEXT:    kmovw %k0, %eax
-; AVX512CD-NEXT:    vpxor %xmm0, %xmm0, %xmm0
-; AVX512CD-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; AVX512CD-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0
-; AVX512CD-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0
-; AVX512CD-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX512CD-NEXT:    vzeroupper
+; AVX512CD-NEXT:    vpmovmskb %xmm0, %eax
+; AVX512CD-NEXT:    vmovd %eax, %xmm0
+; AVX512CD-NEXT:    vpbroadcastd %xmm0, %xmm0
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512VLCDBW-LABEL: test_mm_epi32:
@@ -75,23 +69,11 @@
 }
 
 define <16 x i32> @test_mm512_epi32(<16 x i32> %a, <16 x i32> %b) {
-; AVX512CD-LABEL: test_mm512_epi32:
-; AVX512CD:       # %bb.0: # %entry
-; AVX512CD-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
-; AVX512CD-NEXT:    vpbroadcastmw2d %k0, %zmm0
-; AVX512CD-NEXT:    retq
-;
-; AVX512VLCDBW-LABEL: test_mm512_epi32:
-; AVX512VLCDBW:       # %bb.0: # %entry
-; AVX512VLCDBW-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
-; AVX512VLCDBW-NEXT:    vpbroadcastmw2d %k0, %zmm0
-; AVX512VLCDBW-NEXT:    retq
-;
-; X86-AVX512VLCDBW-LABEL: test_mm512_epi32:
-; X86-AVX512VLCDBW:       # %bb.0: # %entry
-; X86-AVX512VLCDBW-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
-; X86-AVX512VLCDBW-NEXT:    vpbroadcastmw2d %k0, %zmm0
-; X86-AVX512VLCDBW-NEXT:    retl
+; ALL-LABEL: test_mm512_epi32:
+; ALL:       # %bb.0: # %entry
+; ALL-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
+; ALL-NEXT:    vpbroadcastmw2d %k0, %zmm0
+; ALL-NEXT:    ret{{[l|q]}}
 entry:
   %0 = icmp eq <16 x i32> %a, %b
   %1 = bitcast <16 x i1> %0 to i16
diff --git a/test/CodeGen/X86/build-vector-128.ll b/test/CodeGen/X86/build-vector-128.ll
index 6c0c2d3..b80f6fa 100644
--- a/test/CodeGen/X86/build-vector-128.ll
+++ b/test/CodeGen/X86/build-vector-128.ll
@@ -507,3 +507,54 @@
   %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %splat
 }
+
+; PR37502 - https://bugs.llvm.org/show_bug.cgi?id=37502
+; Don't use a series of insertps when movddup will do.
+
+define <4 x float> @PR37502(float %x, float %y) {
+; SSE2-32-LABEL: PR37502:
+; SSE2-32:       # %bb.0:
+; SSE2-32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; SSE2-32-NEXT:    retl
+;
+; SSE2-64-LABEL: PR37502:
+; SSE2-64:       # %bb.0:
+; SSE2-64-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; SSE2-64-NEXT:    retq
+;
+; SSE41-32-LABEL: PR37502:
+; SSE41-32:       # %bb.0:
+; SSE41-32-NEXT:    movddup {{.*#+}} xmm0 = mem[0,0]
+; SSE41-32-NEXT:    retl
+;
+; SSE41-64-LABEL: PR37502:
+; SSE41-64:       # %bb.0:
+; SSE41-64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; SSE41-64-NEXT:    movddup {{.*#+}} xmm0 = xmm0[0,0]
+; SSE41-64-NEXT:    retq
+;
+; AVX-32-LABEL: PR37502:
+; AVX-32:       # %bb.0:
+; AVX-32-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX-32-NEXT:    retl
+;
+; AVX1-64-LABEL: PR37502:
+; AVX1-64:       # %bb.0:
+; AVX1-64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX1-64-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1-64-NEXT:    retq
+;
+; AVX2-64-LABEL: PR37502:
+; AVX2-64:       # %bb.0:
+; AVX2-64-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-64-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX2-64-NEXT:    retq
+  %i0 = insertelement <4 x float> undef, float %x, i32 0
+  %i1 = insertelement <4 x float> %i0, float %y, i32 1
+  %i2 = insertelement <4 x float> %i1, float %x, i32 2
+  %i3 = insertelement <4 x float> %i2, float %y, i32 3
+  ret <4 x float> %i3
+}
+
diff --git a/test/CodeGen/X86/buildvec-extract.ll b/test/CodeGen/X86/buildvec-extract.ll
new file mode 100644
index 0000000..b304580
--- /dev/null
+++ b/test/CodeGen/X86/buildvec-extract.ll
@@ -0,0 +1,706 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2   | FileCheck %s --check-prefixes=ANY,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=ANY,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx    | FileCheck %s --check-prefixes=ANY,AVX
+
+define <2 x i64> @extract0_i32_zext_insert0_i64_undef(<4 x i32> %x) {
+; SSE2-LABEL: extract0_i32_zext_insert0_i64_undef:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    xorps %xmm1, %xmm1
+; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract0_i32_zext_insert0_i64_undef:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract0_i32_zext_insert0_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 0
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract0_i32_zext_insert0_i64_zero(<4 x i32> %x) {
+; SSE-LABEL: extract0_i32_zext_insert0_i64_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    movq %rax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract0_i32_zext_insert0_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 0
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract1_i32_zext_insert0_i64_undef(<4 x i32> %x) {
+; SSE-LABEL: extract1_i32_zext_insert0_i64_undef:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlq $32, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract1_i32_zext_insert0_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlq $32, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 1
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract1_i32_zext_insert0_i64_zero(<4 x i32> %x) {
+; SSE2-LABEL: extract1_i32_zext_insert0_i64_zero:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    movq %rax, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract1_i32_zext_insert0_i64_zero:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    extractps $1, %xmm0, %eax
+; SSE41-NEXT:    movq %rax, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract1_i32_zext_insert0_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vextractps $1, %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 1
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract2_i32_zext_insert0_i64_undef(<4 x i32> %x) {
+; SSE-LABEL: extract2_i32_zext_insert0_i64_undef:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract2_i32_zext_insert0_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 2
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract2_i32_zext_insert0_i64_zero(<4 x i32> %x) {
+; SSE2-LABEL: extract2_i32_zext_insert0_i64_zero:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    movq %rax, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract2_i32_zext_insert0_i64_zero:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    extractps $2, %xmm0, %eax
+; SSE41-NEXT:    movq %rax, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract2_i32_zext_insert0_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vextractps $2, %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 2
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract3_i32_zext_insert0_i64_undef(<4 x i32> %x) {
+; SSE-LABEL: extract3_i32_zext_insert0_i64_undef:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract3_i32_zext_insert0_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 3
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract3_i32_zext_insert0_i64_zero(<4 x i32> %x) {
+; SSE2-LABEL: extract3_i32_zext_insert0_i64_zero:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    movq %rax, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract3_i32_zext_insert0_i64_zero:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    extractps $3, %xmm0, %eax
+; SSE41-NEXT:    movq %rax, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract3_i32_zext_insert0_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vextractps $3, %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 3
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract0_i32_zext_insert1_i64_undef(<4 x i32> %x) {
+; SSE2-LABEL: extract0_i32_zext_insert1_i64_undef:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract0_i32_zext_insert1_i64_undef:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract0_i32_zext_insert1_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 0
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract0_i32_zext_insert1_i64_zero(<4 x i32> %x) {
+; SSE-LABEL: extract0_i32_zext_insert1_i64_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    movq %rax, %xmm0
+; SSE-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract0_i32_zext_insert1_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 0
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract1_i32_zext_insert1_i64_undef(<4 x i32> %x) {
+; SSE2-LABEL: extract1_i32_zext_insert1_i64_undef:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    xorps %xmm1, %xmm1
+; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract1_i32_zext_insert1_i64_undef:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract1_i32_zext_insert1_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 1
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract1_i32_zext_insert1_i64_zero(<4 x i32> %x) {
+; SSE2-LABEL: extract1_i32_zext_insert1_i64_zero:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    movq %rax, %xmm0
+; SSE2-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract1_i32_zext_insert1_i64_zero:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    extractps $1, %xmm0, %eax
+; SSE41-NEXT:    movq %rax, %xmm0
+; SSE41-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract1_i32_zext_insert1_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vextractps $1, %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 1
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract2_i32_zext_insert1_i64_undef(<4 x i32> %x) {
+; SSE2-LABEL: extract2_i32_zext_insert1_i64_undef:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    andps {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract2_i32_zext_insert1_i64_undef:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    xorps %xmm1, %xmm1
+; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract2_i32_zext_insert1_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 2
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract2_i32_zext_insert1_i64_zero(<4 x i32> %x) {
+; SSE2-LABEL: extract2_i32_zext_insert1_i64_zero:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    movq %rax, %xmm0
+; SSE2-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract2_i32_zext_insert1_i64_zero:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    extractps $2, %xmm0, %eax
+; SSE41-NEXT:    movq %rax, %xmm0
+; SSE41-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract2_i32_zext_insert1_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vextractps $2, %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 2
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract3_i32_zext_insert1_i64_undef(<4 x i32> %x) {
+; SSE-LABEL: extract3_i32_zext_insert1_i64_undef:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlq $32, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract3_i32_zext_insert1_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlq $32, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 3
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract3_i32_zext_insert1_i64_zero(<4 x i32> %x) {
+; SSE2-LABEL: extract3_i32_zext_insert1_i64_zero:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    movq %rax, %xmm0
+; SSE2-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract3_i32_zext_insert1_i64_zero:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    extractps $3, %xmm0, %eax
+; SSE41-NEXT:    movq %rax, %xmm0
+; SSE41-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract3_i32_zext_insert1_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vextractps $3, %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 3
+  %z = zext i32 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract0_i16_zext_insert0_i64_undef(<8 x i16> %x) {
+; SSE2-LABEL: extract0_i16_zext_insert0_i64_undef:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract0_i16_zext_insert0_i64_undef:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract0_i16_zext_insert0_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 0
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract0_i16_zext_insert0_i64_zero(<8 x i16> %x) {
+; SSE-LABEL: extract0_i16_zext_insert0_i64_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pextrw $0, %xmm0, %eax
+; SSE-NEXT:    movq %rax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract0_i16_zext_insert0_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpextrw $0, %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 0
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract1_i16_zext_insert0_i64_undef(<8 x i16> %x) {
+; SSE2-LABEL: extract1_i16_zext_insert0_i64_undef:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    psrld $16, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract1_i16_zext_insert0_i64_undef:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    psrld $16, %xmm0
+; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract1_i16_zext_insert0_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrld $16, %xmm0, %xmm0
+; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 1
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract1_i16_zext_insert0_i64_zero(<8 x i16> %x) {
+; SSE-LABEL: extract1_i16_zext_insert0_i64_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pextrw $1, %xmm0, %eax
+; SSE-NEXT:    movq %rax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract1_i16_zext_insert0_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 1
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract2_i16_zext_insert0_i64_undef(<8 x i16> %x) {
+; SSE2-LABEL: extract2_i16_zext_insert0_i64_undef:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract2_i16_zext_insert0_i64_undef:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract2_i16_zext_insert0_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 2
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract2_i16_zext_insert0_i64_zero(<8 x i16> %x) {
+; SSE-LABEL: extract2_i16_zext_insert0_i64_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pextrw $2, %xmm0, %eax
+; SSE-NEXT:    movq %rax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract2_i16_zext_insert0_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 2
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract3_i16_zext_insert0_i64_undef(<8 x i16> %x) {
+; SSE-LABEL: extract3_i16_zext_insert0_i64_undef:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlq $48, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract3_i16_zext_insert0_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 3
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract3_i16_zext_insert0_i64_zero(<8 x i16> %x) {
+; SSE-LABEL: extract3_i16_zext_insert0_i64_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pextrw $3, %xmm0, %eax
+; SSE-NEXT:    movq %rax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract3_i16_zext_insert0_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpextrw $3, %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 3
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 0
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract0_i16_zext_insert1_i64_undef(<8 x i16> %x) {
+; SSE2-LABEL: extract0_i16_zext_insert1_i64_undef:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract0_i16_zext_insert1_i64_undef:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6,7]
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract0_i16_zext_insert1_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 0
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract0_i16_zext_insert1_i64_zero(<8 x i16> %x) {
+; SSE-LABEL: extract0_i16_zext_insert1_i64_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pextrw $0, %xmm0, %eax
+; SSE-NEXT:    movq %rax, %xmm0
+; SSE-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract0_i16_zext_insert1_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpextrw $0, %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 0
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract1_i16_zext_insert1_i64_undef(<8 x i16> %x) {
+; SSE2-LABEL: extract1_i16_zext_insert1_i64_undef:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract1_i16_zext_insert1_i64_undef:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract1_i16_zext_insert1_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 1
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract1_i16_zext_insert1_i64_zero(<8 x i16> %x) {
+; SSE-LABEL: extract1_i16_zext_insert1_i64_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pextrw $1, %xmm0, %eax
+; SSE-NEXT:    movq %rax, %xmm0
+; SSE-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract1_i16_zext_insert1_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 1
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract2_i16_zext_insert1_i64_undef(<8 x i16> %x) {
+; SSE2-LABEL: extract2_i16_zext_insert1_i64_undef:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract2_i16_zext_insert1_i64_undef:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6,7]
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract2_i16_zext_insert1_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 2
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract2_i16_zext_insert1_i64_zero(<8 x i16> %x) {
+; SSE-LABEL: extract2_i16_zext_insert1_i64_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pextrw $2, %xmm0, %eax
+; SSE-NEXT:    movq %rax, %xmm0
+; SSE-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract2_i16_zext_insert1_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 2
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract3_i16_zext_insert1_i64_undef(<8 x i16> %x) {
+; SSE2-LABEL: extract3_i16_zext_insert1_i64_undef:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract3_i16_zext_insert1_i64_undef:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE41-NEXT:    pxor %xmm1, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract3_i16_zext_insert1_i64_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 3
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> undef, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @extract3_i16_zext_insert1_i64_zero(<8 x i16> %x) {
+; SSE-LABEL: extract3_i16_zext_insert1_i64_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pextrw $3, %xmm0, %eax
+; SSE-NEXT:    movq %rax, %xmm0
+; SSE-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract3_i16_zext_insert1_i64_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpextrw $3, %xmm0, %eax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; AVX-NEXT:    retq
+  %e = extractelement <8 x i16> %x, i32 3
+  %z = zext i16 %e to i64
+  %r = insertelement <2 x i64> zeroinitializer, i64 %z, i32 1
+  ret <2 x i64> %r
+}
+
diff --git a/test/CodeGen/X86/bypass-slow-division-tune.ll b/test/CodeGen/X86/bypass-slow-division-tune.ll
index 2439f46..a387d68 100644
--- a/test/CodeGen/X86/bypass-slow-division-tune.ll
+++ b/test/CodeGen/X86/bypass-slow-division-tune.ll
@@ -1,32 +1,111 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; Check that a division is bypassed when appropriate only.
-; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mcpu=atom       < %s | FileCheck -check-prefixes=ATOM,CHECK %s
-; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mcpu=silvermont < %s | FileCheck -check-prefixes=REST,CHECK %s
-; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mcpu=skylake    < %s | FileCheck -check-prefixes=REST,CHECK %s
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mcpu=atom       < %s | FileCheck -check-prefixes=CHECK,ATOM %s
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mcpu=silvermont < %s | FileCheck -check-prefixes=CHECK,REST,SLM %s
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mcpu=skylake    < %s | FileCheck -check-prefixes=CHECK,REST,SKL %s
 ; RUN: llc -profile-summary-huge-working-set-size-threshold=1 -mtriple=x86_64-unknown-linux-gnu -mcpu=skylake    < %s | FileCheck -check-prefixes=HUGEWS %s
 
 ; Verify that div32 is bypassed only for Atoms.
 define i32 @div32(i32 %a, i32 %b) {
-entry:
 ; ATOM-LABEL: div32:
-; ATOM: orl   %{{.*}}, [[REG:%[a-z]+]]
-; ATOM: testl $-256, [[REG]]
-; ATOM: divb
+; ATOM:       # %bb.0: # %entry
+; ATOM-NEXT:    movl %edi, %eax
+; ATOM-NEXT:    orl %esi, %eax
+; ATOM-NEXT:    testl $-256, %eax
+; ATOM-NEXT:    je .LBB0_1
+; ATOM-NEXT:  # %bb.2:
+; ATOM-NEXT:    movl %edi, %eax
+; ATOM-NEXT:    cltd
+; ATOM-NEXT:    idivl %esi
+; ATOM-NEXT:    retq
+; ATOM-NEXT:  .LBB0_1:
+; ATOM-NEXT:    movzbl %dil, %eax
+; ATOM-NEXT:    # kill: def $eax killed $eax def $ax
+; ATOM-NEXT:    divb %sil
+; ATOM-NEXT:    movzbl %al, %eax
+; ATOM-NEXT:    retq
 ;
 ; REST-LABEL: div32:
-; REST-NOT: divb
+; REST:       # %bb.0: # %entry
+; REST-NEXT:    movl %edi, %eax
+; REST-NEXT:    cltd
+; REST-NEXT:    idivl %esi
+; REST-NEXT:    retq
 ;
+; HUGEWS-LABEL: div32:
+; HUGEWS:       # %bb.0: # %entry
+; HUGEWS-NEXT:    movl %edi, %eax
+; HUGEWS-NEXT:    cltd
+; HUGEWS-NEXT:    idivl %esi
+; HUGEWS-NEXT:    retq
+entry:
   %div = sdiv i32 %a, %b
   ret i32 %div
 }
 
 ; Verify that div64 is always bypassed.
 define i64 @div64(i64 %a, i64 %b) {
-entry:
-; CHECK-LABEL: div64:
-; CHECK:     orq     %{{.*}}, [[REG:%[a-z]+]]
-; CHECK:     shrq    $32, [[REG]]
-; CHECK:     divl
+; ATOM-LABEL: div64:
+; ATOM:       # %bb.0: # %entry
+; ATOM-NEXT:    movq %rdi, %rcx
+; ATOM-NEXT:    movq %rdi, %rax
+; ATOM-NEXT:    orq %rsi, %rcx
+; ATOM-NEXT:    shrq $32, %rcx
+; ATOM-NEXT:    je .LBB1_1
+; ATOM-NEXT:  # %bb.2:
+; ATOM-NEXT:    cqto
+; ATOM-NEXT:    idivq %rsi
+; ATOM-NEXT:    retq
+; ATOM-NEXT:  .LBB1_1:
+; ATOM-NEXT:    # kill: def $eax killed $eax killed $rax
+; ATOM-NEXT:    xorl %edx, %edx
+; ATOM-NEXT:    divl %esi
+; ATOM-NEXT:    # kill: def $eax killed $eax def $rax
+; ATOM-NEXT:    retq
 ;
+; SLM-LABEL: div64:
+; SLM:       # %bb.0: # %entry
+; SLM-NEXT:    movq %rdi, %rcx
+; SLM-NEXT:    movq %rdi, %rax
+; SLM-NEXT:    orq %rsi, %rcx
+; SLM-NEXT:    shrq $32, %rcx
+; SLM-NEXT:    je .LBB1_1
+; SLM-NEXT:  # %bb.2:
+; SLM-NEXT:    cqto
+; SLM-NEXT:    idivq %rsi
+; SLM-NEXT:    retq
+; SLM-NEXT:  .LBB1_1:
+; SLM-NEXT:    xorl %edx, %edx
+; SLM-NEXT:    # kill: def $eax killed $eax killed $rax
+; SLM-NEXT:    divl %esi
+; SLM-NEXT:    # kill: def $eax killed $eax def $rax
+; SLM-NEXT:    retq
+;
+; SKL-LABEL: div64:
+; SKL:       # %bb.0: # %entry
+; SKL-NEXT:    movq %rdi, %rax
+; SKL-NEXT:    movq %rdi, %rcx
+; SKL-NEXT:    orq %rsi, %rcx
+; SKL-NEXT:    shrq $32, %rcx
+; SKL-NEXT:    je .LBB1_1
+; SKL-NEXT:  # %bb.2:
+; SKL-NEXT:    cqto
+; SKL-NEXT:    idivq %rsi
+; SKL-NEXT:    retq
+; SKL-NEXT:  .LBB1_1:
+; SKL-NEXT:    # kill: def $eax killed $eax killed $rax
+; SKL-NEXT:    xorl %edx, %edx
+; SKL-NEXT:    divl %esi
+; SKL-NEXT:    # kill: def $eax killed $eax def $rax
+; SKL-NEXT:    retq
+;
+; HUGEWS-LABEL: div64:
+; HUGEWS:       # %bb.0: # %entry
+; HUGEWS-NEXT:    movq %rdi, %rax
+; HUGEWS-NEXT:    cqto
+; HUGEWS-NEXT:    idivq %rsi
+; HUGEWS-NEXT:    retq
+entry:
   %div = sdiv i64 %a, %b
   ret i64 %div
 }
@@ -36,32 +115,119 @@
 
 define i64 @div64_optsize(i64 %a, i64 %b) optsize {
 ; CHECK-LABEL: div64_optsize:
-; CHECK-NOT: divl
-; CHECK: ret
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rdi, %rax
+; CHECK-NEXT:    cqto
+; CHECK-NEXT:    idivq %rsi
+; CHECK-NEXT:    retq
+;
+; HUGEWS-LABEL: div64_optsize:
+; HUGEWS:       # %bb.0:
+; HUGEWS-NEXT:    movq %rdi, %rax
+; HUGEWS-NEXT:    cqto
+; HUGEWS-NEXT:    idivq %rsi
+; HUGEWS-NEXT:    retq
   %div = sdiv i64 %a, %b
   ret i64 %div
 }
 
 define i64 @div64_hugews(i64 %a, i64 %b) {
+; ATOM-LABEL: div64_hugews:
+; ATOM:       # %bb.0:
+; ATOM-NEXT:    movq %rdi, %rcx
+; ATOM-NEXT:    movq %rdi, %rax
+; ATOM-NEXT:    orq %rsi, %rcx
+; ATOM-NEXT:    shrq $32, %rcx
+; ATOM-NEXT:    je .LBB3_1
+; ATOM-NEXT:  # %bb.2:
+; ATOM-NEXT:    cqto
+; ATOM-NEXT:    idivq %rsi
+; ATOM-NEXT:    retq
+; ATOM-NEXT:  .LBB3_1:
+; ATOM-NEXT:    # kill: def $eax killed $eax killed $rax
+; ATOM-NEXT:    xorl %edx, %edx
+; ATOM-NEXT:    divl %esi
+; ATOM-NEXT:    # kill: def $eax killed $eax def $rax
+; ATOM-NEXT:    retq
+;
+; SLM-LABEL: div64_hugews:
+; SLM:       # %bb.0:
+; SLM-NEXT:    movq %rdi, %rcx
+; SLM-NEXT:    movq %rdi, %rax
+; SLM-NEXT:    orq %rsi, %rcx
+; SLM-NEXT:    shrq $32, %rcx
+; SLM-NEXT:    je .LBB3_1
+; SLM-NEXT:  # %bb.2:
+; SLM-NEXT:    cqto
+; SLM-NEXT:    idivq %rsi
+; SLM-NEXT:    retq
+; SLM-NEXT:  .LBB3_1:
+; SLM-NEXT:    xorl %edx, %edx
+; SLM-NEXT:    # kill: def $eax killed $eax killed $rax
+; SLM-NEXT:    divl %esi
+; SLM-NEXT:    # kill: def $eax killed $eax def $rax
+; SLM-NEXT:    retq
+;
+; SKL-LABEL: div64_hugews:
+; SKL:       # %bb.0:
+; SKL-NEXT:    movq %rdi, %rax
+; SKL-NEXT:    movq %rdi, %rcx
+; SKL-NEXT:    orq %rsi, %rcx
+; SKL-NEXT:    shrq $32, %rcx
+; SKL-NEXT:    je .LBB3_1
+; SKL-NEXT:  # %bb.2:
+; SKL-NEXT:    cqto
+; SKL-NEXT:    idivq %rsi
+; SKL-NEXT:    retq
+; SKL-NEXT:  .LBB3_1:
+; SKL-NEXT:    # kill: def $eax killed $eax killed $rax
+; SKL-NEXT:    xorl %edx, %edx
+; SKL-NEXT:    divl %esi
+; SKL-NEXT:    # kill: def $eax killed $eax def $rax
+; SKL-NEXT:    retq
+;
 ; HUGEWS-LABEL: div64_hugews:
-; HUGEWS-NOT: divl
-; HUGEWS: ret
+; HUGEWS:       # %bb.0:
+; HUGEWS-NEXT:    movq %rdi, %rax
+; HUGEWS-NEXT:    cqto
+; HUGEWS-NEXT:    idivq %rsi
+; HUGEWS-NEXT:    retq
   %div = sdiv i64 %a, %b
   ret i64 %div
 }
 
 define i32 @div32_optsize(i32 %a, i32 %b) optsize {
 ; CHECK-LABEL: div32_optsize:
-; CHECK-NOT: divb
-; CHECK: ret
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    cltd
+; CHECK-NEXT:    idivl %esi
+; CHECK-NEXT:    retq
+;
+; HUGEWS-LABEL: div32_optsize:
+; HUGEWS:       # %bb.0:
+; HUGEWS-NEXT:    movl %edi, %eax
+; HUGEWS-NEXT:    cltd
+; HUGEWS-NEXT:    idivl %esi
+; HUGEWS-NEXT:    retq
   %div = sdiv i32 %a, %b
   ret i32 %div
 }
 
 define i32 @div32_minsize(i32 %a, i32 %b) minsize {
 ; CHECK-LABEL: div32_minsize:
-; CHECK-NOT: divb
-; CHECK: ret
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    cltd
+; CHECK-NEXT:    idivl %esi
+; CHECK-NEXT:    retq
+;
+; HUGEWS-LABEL: div32_minsize:
+; HUGEWS:       # %bb.0:
+; HUGEWS-NEXT:    movl %edi, %eax
+; HUGEWS-NEXT:    cltd
+; HUGEWS-NEXT:    idivl %esi
+; HUGEWS-NEXT:    retq
   %div = sdiv i32 %a, %b
   ret i32 %div
 }
diff --git a/test/CodeGen/X86/call-push.ll b/test/CodeGen/X86/call-push.ll
index e8afa1e..aee7793 100644
--- a/test/CodeGen/X86/call-push.ll
+++ b/test/CodeGen/X86/call-push.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -disable-fp-elim -no-x86-call-frame-opt | FileCheck %s
+; RUN: llc < %s -mtriple=i386-apple-darwin -frame-pointer=all -no-x86-call-frame-opt | FileCheck %s
 
         %struct.decode_t = type { i8, i8, i8, i8, i16, i8, i8, %struct.range_t** }
         %struct.range_t = type { float, float, i32, i32, i32, [0 x i8] }
diff --git a/test/CodeGen/X86/cmp.ll b/test/CodeGen/X86/cmp.ll
index b6ecda0..2747eca 100644
--- a/test/CodeGen/X86/cmp.ll
+++ b/test/CodeGen/X86/cmp.ll
@@ -465,3 +465,21 @@
   ret i32 %ret
 
 }
+
+define { i64, i64 } @pr39968(i64, i64, i32) {
+; CHECK-LABEL: pr39968:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT:    testb $64, %dl # encoding: [0xf6,0xc2,0x40]
+; CHECK-NEXT:    cmovneq %rdi, %rsi # encoding: [0x48,0x0f,0x45,0xf7]
+; CHECK-NEXT:    cmovneq %rdi, %rax # encoding: [0x48,0x0f,0x45,0xc7]
+; CHECK-NEXT:    movq %rsi, %rdx # encoding: [0x48,0x89,0xf2]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %4 = and i32 %2, 64
+  %5 = icmp ne i32 %4, 0
+  %6 = select i1 %5, i64 %0, i64 %1
+  %7 = select i1 %5, i64 %0, i64 0
+  %8 = insertvalue { i64, i64 } undef, i64 %7, 0
+  %9 = insertvalue { i64, i64 } %8, i64 %6, 1
+  ret { i64, i64 } %9
+}
diff --git a/test/CodeGen/X86/coalescer-dce.ll b/test/CodeGen/X86/coalescer-dce.ll
index 90a0772..7685526 100644
--- a/test/CodeGen/X86/coalescer-dce.ll
+++ b/test/CodeGen/X86/coalescer-dce.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -verify-machineinstrs -disable-fp-elim -disable-machine-dce -verify-coalescing
+; RUN: llc < %s -verify-machineinstrs -frame-pointer=all -disable-machine-dce -verify-coalescing
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-apple-macosx10.7.0"
 
diff --git a/test/CodeGen/X86/combine-abs.ll b/test/CodeGen/X86/combine-abs.ll
index 30bba8d..95a2b7e 100644
--- a/test/CodeGen/X86/combine-abs.ll
+++ b/test/CodeGen/X86/combine-abs.ll
@@ -67,12 +67,8 @@
 ; AVX2-LABEL: combine_v4i64_abs_abs:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm2
-; AVX2-NEXT:    vpaddq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm1
-; AVX2-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubq %ymm0, %ymm1, %ymm1
+; AVX2-NEXT:    vblendvpd %ymm0, %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: combine_v4i64_abs_abs:
diff --git a/test/CodeGen/X86/combine-add-ssat.ll b/test/CodeGen/X86/combine-add-ssat.ll
new file mode 100644
index 0000000..c261253
--- /dev/null
+++ b/test/CodeGen/X86/combine-add-ssat.ll
@@ -0,0 +1,162 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE42
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512BW
+
+declare  i32 @llvm.sadd.sat.i32  (i32, i32)
+declare  i64 @llvm.sadd.sat.i64  (i64, i64)
+declare  <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
+
+; fold (sadd_sat x, undef) -> -1
+define i32 @combine_undef_i32(i32 %a0) {
+; CHECK-LABEL: combine_undef_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl $-1, %eax
+; CHECK-NEXT:    retq
+  %res = call i32 @llvm.sadd.sat.i32(i32 %a0, i32 undef)
+  ret i32 %res
+}
+
+define <8 x i16> @combine_undef_v8i16(<8 x i16> %a0) {
+; SSE-LABEL: combine_undef_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpeqd %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_undef_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> %a0)
+  ret <8 x i16> %res
+}
+
+; fold (sadd_sat c1, c2) -> c3
+define i32 @combine_constfold_i32() {
+; CHECK-LABEL: combine_constfold_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; CHECK-NEXT:    retq
+  %res = call i32 @llvm.sadd.sat.i32(i32 2147483647, i32 100)
+  ret i32 %res
+}
+
+define <8 x i16> @combine_constfold_v8i16() {
+; SSE-LABEL: combine_constfold_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm0 = [1,0,256,65534,0,65280,32768,0]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_constfold_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [1,0,256,65534,0,65280,32768,0]
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -32760, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 -10, i16 65535>)
+  ret <8 x i16> %res
+}
+
+define <8 x i16> @combine_constfold_undef_v8i16() {
+; SSE-LABEL: combine_constfold_undef_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm0 = [65535,65535,65535,65534,0,65280,32768,0]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_constfold_undef_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [65535,65535,65535,65534,0,65280,32768,0]
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 -1, i16 -255, i16 -32760, i16 1>, <8 x i16> <i16 1, i16 undef, i16 undef, i16 65535, i16 1, i16 65535, i16 -10, i16 65535>)
+  ret <8 x i16> %res
+}
+
+; fold (sadd_sat c, x) -> (sadd_sat x, c)
+define i32 @combine_constant_i32(i32 %a0) {
+; CHECK-LABEL: combine_constant_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    movl %edi, %ecx
+; CHECK-NEXT:    incl %ecx
+; CHECK-NEXT:    setns %al
+; CHECK-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
+; CHECK-NEXT:    incl %edi
+; CHECK-NEXT:    cmovnol %edi, %eax
+; CHECK-NEXT:    retq
+  %res = call i32 @llvm.sadd.sat.i32(i32 1, i32 %a0)
+  ret i32 %res
+}
+
+define <8 x i16> @combine_constant_v8i16(<8 x i16> %a0) {
+; SSE-LABEL: combine_constant_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddsw {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_constant_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddsw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0)
+  ret <8 x i16> %res
+}
+
+; fold (sadd_sat c, 0) -> x
+define i32 @combine_zero_i32(i32 %a0) {
+; CHECK-LABEL: combine_zero_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    retq
+  %1 = call i32 @llvm.sadd.sat.i32(i32 %a0, i32 0)
+  ret i32 %1
+}
+
+define <8 x i16> @combine_zero_v8i16(<8 x i16> %a0) {
+; CHECK-LABEL: combine_zero_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
+  %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer)
+  ret <8 x i16> %1
+}
+
+; fold (sadd_sat x, y) -> (add x, y) iff no overflow
+define i32 @combine_no_overflow_i32(i32 %a0, i32 %a1) {
+; CHECK-LABEL: combine_no_overflow_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    sarl $16, %edi
+; CHECK-NEXT:    shrl $16, %esi
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    movl %edi, %ecx
+; CHECK-NEXT:    addl %esi, %ecx
+; CHECK-NEXT:    setns %al
+; CHECK-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
+; CHECK-NEXT:    addl %edi, %esi
+; CHECK-NEXT:    cmovnol %esi, %eax
+; CHECK-NEXT:    retq
+  %1 = ashr i32 %a0, 16
+  %2 = lshr i32 %a1, 16
+  %3 = call i32 @llvm.sadd.sat.i32(i32 %1, i32 %2)
+  ret i32 %3
+}
+
+define <8 x i16> @combine_no_overflow_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: combine_no_overflow_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psraw $10, %xmm0
+; SSE-NEXT:    psrlw $10, %xmm1
+; SSE-NEXT:    paddsw %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_no_overflow_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsraw $10, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $10, %xmm1, %xmm1
+; AVX-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = ashr <8 x i16> %a0, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
+  %2 = lshr <8 x i16> %a1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
+  %3 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2)
+  ret <8 x i16> %3
+}
diff --git a/test/CodeGen/X86/combine-add-usat.ll b/test/CodeGen/X86/combine-add-usat.ll
new file mode 100644
index 0000000..7565f06
--- /dev/null
+++ b/test/CodeGen/X86/combine-add-usat.ll
@@ -0,0 +1,154 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE42
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512BW
+
+declare  i32 @llvm.uadd.sat.i32  (i32, i32)
+declare  i64 @llvm.uadd.sat.i64  (i64, i64)
+declare  <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
+
+; fold (uadd_sat x, undef) -> -1
+define i32 @combine_undef_i32(i32 %a0) {
+; CHECK-LABEL: combine_undef_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl $-1, %eax
+; CHECK-NEXT:    retq
+  %res = call i32 @llvm.uadd.sat.i32(i32 %a0, i32 undef)
+  ret i32 %res
+}
+
+define <8 x i16> @combine_undef_v8i16(<8 x i16> %a0) {
+; SSE-LABEL: combine_undef_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpeqd %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_undef_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> %a0)
+  ret <8 x i16> %res
+}
+
+; fold (uadd_sat c1, c2) -> c3
+define i32 @combine_constfold_i32() {
+; CHECK-LABEL: combine_constfold_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl $-1, %eax
+; CHECK-NEXT:    retq
+  %res = call i32 @llvm.uadd.sat.i32(i32 4294967295, i32 100)
+  ret i32 %res
+}
+
+define <8 x i16> @combine_constfold_v8i16() {
+; SSE-LABEL: combine_constfold_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm0 = [1,65535,256,65535,65535,65535,2,65535]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_constfold_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [1,65535,256,65535,65535,65535,2,65535]
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
+  ret <8 x i16> %res
+}
+
+define <8 x i16> @combine_constfold_undef_v8i16() {
+; SSE-LABEL: combine_constfold_undef_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,65535,2,65535]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_constfold_undef_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,65535,2,65535]
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 undef, i16 undef, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
+  ret <8 x i16> %res
+}
+
+; fold (uadd_sat c, x) -> (add_ssat x, c)
+define i32 @combine_constant_i32(i32 %a0) {
+; CHECK-LABEL: combine_constant_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addl $1, %edi
+; CHECK-NEXT:    movl $-1, %eax
+; CHECK-NEXT:    cmovael %edi, %eax
+; CHECK-NEXT:    retq
+  %1 = call i32 @llvm.uadd.sat.i32(i32 1, i32 %a0)
+  ret i32 %1
+}
+
+define <8 x i16> @combine_constant_v8i16(<8 x i16> %a0) {
+; SSE-LABEL: combine_constant_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusw {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_constant_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddusw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0)
+  ret <8 x i16> %1
+}
+
+; fold (uadd_sat c, 0) -> x
+define i32 @combine_zero_i32(i32 %a0) {
+; CHECK-LABEL: combine_zero_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    retq
+  %1 = call i32 @llvm.uadd.sat.i32(i32 %a0, i32 0)
+  ret i32 %1
+}
+
+define <8 x i16> @combine_zero_v8i16(<8 x i16> %a0) {
+; CHECK-LABEL: combine_zero_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
+  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer)
+  ret <8 x i16> %1
+}
+
+; fold (uadd_sat x, y) -> (add x, y) iff no overflow
+define i32 @combine_no_overflow_i32(i32 %a0, i32 %a1) {
+; CHECK-LABEL: combine_no_overflow_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $esi killed $esi def $rsi
+; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT:    shrl $16, %edi
+; CHECK-NEXT:    shrl $16, %esi
+; CHECK-NEXT:    leal (%rsi,%rdi), %eax
+; CHECK-NEXT:    retq
+  %1 = lshr i32 %a0, 16
+  %2 = lshr i32 %a1, 16
+  %3 = call i32 @llvm.uadd.sat.i32(i32 %1, i32 %2)
+  ret i32 %3
+}
+
+define <8 x i16> @combine_no_overflow_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: combine_no_overflow_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlw $10, %xmm0
+; SSE-NEXT:    psrlw $10, %xmm1
+; SSE-NEXT:    paddw %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_no_overflow_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $10, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $10, %xmm1, %xmm1
+; AVX-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = lshr <8 x i16> %a0, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
+  %2 = lshr <8 x i16> %a1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
+  %3 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2)
+  ret <8 x i16> %3
+}
diff --git a/test/CodeGen/X86/combine-bitselect.ll b/test/CodeGen/X86/combine-bitselect.ll
new file mode 100644
index 0000000..25d35a8
--- /dev/null
+++ b/test/CodeGen/X86/combine-bitselect.ll
@@ -0,0 +1,592 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop  | FileCheck %s --check-prefixes=XOP
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
+
+;
+; 128-bit vectors
+;
+
+define <2 x i64> @bitselect_v2i64_rr(<2 x i64>, <2 x i64>) {
+; SSE-LABEL: bitselect_v2i64_rr:
+; SSE:       # %bb.0:
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    orps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; XOP-LABEL: bitselect_v2i64_rr:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    vandps {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT:    vorps %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX-LABEL: bitselect_v2i64_rr:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vandps {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vorps %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %3 = and <2 x i64> %0, <i64 4294967296, i64 12884901890>
+  %4 = and <2 x i64> %1, <i64 -4294967297, i64 -12884901891>
+  %5 = or <2 x i64> %4, %3
+  ret <2 x i64> %5
+}
+
+define <2 x i64> @bitselect_v2i64_rm(<2 x i64>, <2 x i64>* nocapture readonly) {
+; SSE-LABEL: bitselect_v2i64_rm:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps (%rdi), %xmm1
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    orps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; XOP-LABEL: bitselect_v2i64_rm:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovaps (%rdi), %xmm1
+; XOP-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    vandps {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT:    vorps %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX-LABEL: bitselect_v2i64_rm:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps (%rdi), %xmm1
+; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vandps {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vorps %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %3 = load <2 x i64>, <2 x i64>* %1
+  %4 = and <2 x i64> %0, <i64 8589934593, i64 3>
+  %5 = and <2 x i64> %3, <i64 -8589934594, i64 -4>
+  %6 = or <2 x i64> %5, %4
+  ret <2 x i64> %6
+}
+
+define <2 x i64> @bitselect_v2i64_mr(<2 x i64>* nocapture readonly, <2 x i64>) {
+; SSE-LABEL: bitselect_v2i64_mr:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps (%rdi), %xmm1
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
+; SSE-NEXT:    orps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; XOP-LABEL: bitselect_v2i64_mr:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovaps (%rdi), %xmm1
+; XOP-NEXT:    vandps {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    vorps %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX-LABEL: bitselect_v2i64_mr:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps (%rdi), %xmm1
+; AVX-NEXT:    vandps {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vorps %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %3 = load <2 x i64>, <2 x i64>* %0
+  %4 = and <2 x i64> %3, <i64 12884901890, i64 4294967296>
+  %5 = and <2 x i64> %1, <i64 -12884901891, i64 -4294967297>
+  %6 = or <2 x i64> %4, %5
+  ret <2 x i64> %6
+}
+
+define <2 x i64> @bitselect_v2i64_mm(<2 x i64>* nocapture readonly, <2 x i64>* nocapture readonly) {
+; SSE-LABEL: bitselect_v2i64_mm:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps (%rdi), %xmm1
+; SSE-NEXT:    movaps (%rsi), %xmm0
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
+; SSE-NEXT:    orps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; XOP-LABEL: bitselect_v2i64_mm:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovaps (%rdi), %xmm0
+; XOP-NEXT:    vmovaps (%rsi), %xmm1
+; XOP-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    vandps {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT:    vorps %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX-LABEL: bitselect_v2i64_mm:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps (%rdi), %xmm0
+; AVX-NEXT:    vmovaps (%rsi), %xmm1
+; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vandps {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vorps %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %3 = load <2 x i64>, <2 x i64>* %0
+  %4 = load <2 x i64>, <2 x i64>* %1
+  %5 = and <2 x i64> %3, <i64 3, i64 8589934593>
+  %6 = and <2 x i64> %4, <i64 -4, i64 -8589934594>
+  %7 = or <2 x i64> %6, %5
+  ret <2 x i64> %7
+}
+
+;
+; 256-bit vectors
+;
+
+define <4 x i64> @bitselect_v4i64_rr(<4 x i64>, <4 x i64>) {
+; SSE-LABEL: bitselect_v4i64_rr:
+; SSE:       # %bb.0:
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm3
+; SSE-NEXT:    orps %xmm3, %xmm1
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm2
+; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; XOP-LABEL: bitselect_v4i64_rr:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
+; XOP-NEXT:    vandps {{.*}}(%rip), %ymm1, %ymm1
+; XOP-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; XOP-NEXT:    retq
+;
+; AVX-LABEL: bitselect_v4i64_rr:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    vandps {{.*}}(%rip), %ymm1, %ymm1
+; AVX-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  %3 = and <4 x i64> %0, <i64 4294967296, i64 12884901890, i64 12884901890, i64 12884901890>
+  %4 = and <4 x i64> %1, <i64 -4294967297, i64 -12884901891, i64 -12884901891, i64 -12884901891>
+  %5 = or <4 x i64> %4, %3
+  ret <4 x i64> %5
+}
+
+define <4 x i64> @bitselect_v4i64_rm(<4 x i64>, <4 x i64>* nocapture readonly) {
+; SSE-LABEL: bitselect_v4i64_rm:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm2 = [8589934593,3]
+; SSE-NEXT:    andps %xmm2, %xmm1
+; SSE-NEXT:    andps %xmm2, %xmm0
+; SSE-NEXT:    movaps {{.*#+}} xmm2 = [18446744065119617022,18446744073709551612]
+; SSE-NEXT:    movaps 16(%rdi), %xmm3
+; SSE-NEXT:    andps %xmm2, %xmm3
+; SSE-NEXT:    orps %xmm3, %xmm1
+; SSE-NEXT:    andps (%rdi), %xmm2
+; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; XOP-LABEL: bitselect_v4i64_rm:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovaps (%rdi), %ymm1
+; XOP-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
+; XOP-NEXT:    vandps {{.*}}(%rip), %ymm1, %ymm1
+; XOP-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; XOP-NEXT:    retq
+;
+; AVX-LABEL: bitselect_v4i64_rm:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps (%rdi), %ymm1
+; AVX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    vandps {{.*}}(%rip), %ymm1, %ymm1
+; AVX-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  %3 = load <4 x i64>, <4 x i64>* %1
+  %4 = and <4 x i64> %0, <i64 8589934593, i64 3, i64 8589934593, i64 3>
+  %5 = and <4 x i64> %3, <i64 -8589934594, i64 -4, i64 -8589934594, i64 -4>
+  %6 = or <4 x i64> %5, %4
+  ret <4 x i64> %6
+}
+
+define <4 x i64> @bitselect_v4i64_mr(<4 x i64>* nocapture readonly, <4 x i64>) {
+; SSE-LABEL: bitselect_v4i64_mr:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm2 = [12884901890,4294967296]
+; SSE-NEXT:    movaps 16(%rdi), %xmm3
+; SSE-NEXT:    andps %xmm2, %xmm3
+; SSE-NEXT:    andps (%rdi), %xmm2
+; SSE-NEXT:    movaps {{.*#+}} xmm4 = [18446744060824649725,18446744069414584319]
+; SSE-NEXT:    andps %xmm4, %xmm1
+; SSE-NEXT:    orps %xmm3, %xmm1
+; SSE-NEXT:    andps %xmm4, %xmm0
+; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; XOP-LABEL: bitselect_v4i64_mr:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovaps (%rdi), %ymm1
+; XOP-NEXT:    vandps {{.*}}(%rip), %ymm1, %ymm1
+; XOP-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
+; XOP-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; XOP-NEXT:    retq
+;
+; AVX-LABEL: bitselect_v4i64_mr:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps (%rdi), %ymm1
+; AVX-NEXT:    vandps {{.*}}(%rip), %ymm1, %ymm1
+; AVX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  %3 = load <4 x i64>, <4 x i64>* %0
+  %4 = and <4 x i64> %3, <i64 12884901890, i64 4294967296, i64 12884901890, i64 4294967296>
+  %5 = and <4 x i64> %1, <i64 -12884901891, i64 -4294967297, i64 -12884901891, i64 -4294967297>
+  %6 = or <4 x i64> %4, %5
+  ret <4 x i64> %6
+}
+
+define <4 x i64> @bitselect_v4i64_mm(<4 x i64>* nocapture readonly, <4 x i64>* nocapture readonly) {
+; SSE-LABEL: bitselect_v4i64_mm:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm2 = [3,8589934593]
+; SSE-NEXT:    movaps 16(%rdi), %xmm3
+; SSE-NEXT:    andps %xmm2, %xmm3
+; SSE-NEXT:    andps (%rdi), %xmm2
+; SSE-NEXT:    movaps {{.*#+}} xmm0 = [18446744073709551612,18446744065119617022]
+; SSE-NEXT:    movaps 16(%rsi), %xmm1
+; SSE-NEXT:    andps %xmm0, %xmm1
+; SSE-NEXT:    orps %xmm3, %xmm1
+; SSE-NEXT:    andps (%rsi), %xmm0
+; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; XOP-LABEL: bitselect_v4i64_mm:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovaps (%rdi), %ymm0
+; XOP-NEXT:    vmovaps (%rsi), %ymm1
+; XOP-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
+; XOP-NEXT:    vandps {{.*}}(%rip), %ymm1, %ymm1
+; XOP-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; XOP-NEXT:    retq
+;
+; AVX-LABEL: bitselect_v4i64_mm:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps (%rdi), %ymm0
+; AVX-NEXT:    vmovaps (%rsi), %ymm1
+; AVX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    vandps {{.*}}(%rip), %ymm1, %ymm1
+; AVX-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  %3 = load <4 x i64>, <4 x i64>* %0
+  %4 = load <4 x i64>, <4 x i64>* %1
+  %5 = and <4 x i64> %3, <i64 3, i64 8589934593, i64 3, i64 8589934593>
+  %6 = and <4 x i64> %4, <i64 -4, i64 -8589934594, i64 -4, i64 -8589934594>
+  %7 = or <4 x i64> %6, %5
+  ret <4 x i64> %7
+}
+
+;
+; 512-bit vectors
+;
+
+define <8 x i64> @bitselect_v8i64_rr(<8 x i64>, <8 x i64>) {
+; SSE-LABEL: bitselect_v8i64_rr:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm8 = [12884901890,12884901890]
+; SSE-NEXT:    andps %xmm8, %xmm3
+; SSE-NEXT:    movaps {{.*#+}} xmm9 = [4294967296,12884901890]
+; SSE-NEXT:    andps %xmm9, %xmm2
+; SSE-NEXT:    andps %xmm8, %xmm1
+; SSE-NEXT:    andps %xmm9, %xmm0
+; SSE-NEXT:    movaps {{.*#+}} xmm8 = [18446744060824649725,18446744060824649725]
+; SSE-NEXT:    andps %xmm8, %xmm7
+; SSE-NEXT:    orps %xmm7, %xmm3
+; SSE-NEXT:    movaps {{.*#+}} xmm7 = [18446744069414584319,18446744060824649725]
+; SSE-NEXT:    andps %xmm7, %xmm6
+; SSE-NEXT:    orps %xmm6, %xmm2
+; SSE-NEXT:    andps %xmm5, %xmm8
+; SSE-NEXT:    orps %xmm8, %xmm1
+; SSE-NEXT:    andps %xmm4, %xmm7
+; SSE-NEXT:    orps %xmm7, %xmm0
+; SSE-NEXT:    retq
+;
+; XOP-LABEL: bitselect_v8i64_rr:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovaps {{.*#+}} ymm4 = [4294967296,12884901890,12884901890,12884901890]
+; XOP-NEXT:    vandps %ymm4, %ymm1, %ymm1
+; XOP-NEXT:    vandps %ymm4, %ymm0, %ymm0
+; XOP-NEXT:    vmovaps {{.*#+}} ymm4 = [18446744069414584319,18446744060824649725,18446744060824649725,18446744060824649725]
+; XOP-NEXT:    vandps %ymm4, %ymm3, %ymm3
+; XOP-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; XOP-NEXT:    vandps %ymm4, %ymm2, %ymm2
+; XOP-NEXT:    vorps %ymm0, %ymm2, %ymm0
+; XOP-NEXT:    retq
+;
+; AVX1-LABEL: bitselect_v8i64_rr:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm4 = [4294967296,12884901890,12884901890,12884901890]
+; AVX1-NEXT:    vandps %ymm4, %ymm1, %ymm1
+; AVX1-NEXT:    vandps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm4 = [18446744069414584319,18446744060824649725,18446744060824649725,18446744060824649725]
+; AVX1-NEXT:    vandps %ymm4, %ymm3, %ymm3
+; AVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT:    vandps %ymm4, %ymm2, %ymm2
+; AVX1-NEXT:    vorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: bitselect_v8i64_rr:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovaps {{.*#+}} ymm4 = [4294967296,12884901890,12884901890,12884901890]
+; AVX2-NEXT:    vandps %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vandps %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    vmovaps {{.*#+}} ymm4 = [18446744069414584319,18446744060824649725,18446744060824649725,18446744060824649725]
+; AVX2-NEXT:    vandps %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vandps %ymm4, %ymm2, %ymm2
+; AVX2-NEXT:    vorps %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: bitselect_v8i64_rr:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512F-NEXT:    vporq %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    retq
+  %3 = and <8 x i64> %0, <i64 4294967296, i64 12884901890, i64 12884901890, i64 12884901890, i64 4294967296, i64 12884901890, i64 12884901890, i64 12884901890>
+  %4 = and <8 x i64> %1, <i64 -4294967297, i64 -12884901891, i64 -12884901891, i64 -12884901891, i64 -4294967297, i64 -12884901891, i64 -12884901891, i64 -12884901891>
+  %5 = or <8 x i64> %4, %3
+  ret <8 x i64> %5
+}
+
+define <8 x i64> @bitselect_v8i64_rm(<8 x i64>, <8 x i64>* nocapture readonly) {
+; SSE-LABEL: bitselect_v8i64_rm:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm4 = [8589934593,3]
+; SSE-NEXT:    andps %xmm4, %xmm3
+; SSE-NEXT:    andps %xmm4, %xmm2
+; SSE-NEXT:    andps %xmm4, %xmm1
+; SSE-NEXT:    andps %xmm4, %xmm0
+; SSE-NEXT:    movaps {{.*#+}} xmm4 = [18446744065119617022,18446744073709551612]
+; SSE-NEXT:    movaps 48(%rdi), %xmm5
+; SSE-NEXT:    andps %xmm4, %xmm5
+; SSE-NEXT:    orps %xmm5, %xmm3
+; SSE-NEXT:    movaps 32(%rdi), %xmm5
+; SSE-NEXT:    andps %xmm4, %xmm5
+; SSE-NEXT:    orps %xmm5, %xmm2
+; SSE-NEXT:    movaps 16(%rdi), %xmm5
+; SSE-NEXT:    andps %xmm4, %xmm5
+; SSE-NEXT:    orps %xmm5, %xmm1
+; SSE-NEXT:    andps (%rdi), %xmm4
+; SSE-NEXT:    orps %xmm4, %xmm0
+; SSE-NEXT:    retq
+;
+; XOP-LABEL: bitselect_v8i64_rm:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = [8589934593,3,8589934593,3]
+; XOP-NEXT:    # ymm2 = mem[0,1,0,1]
+; XOP-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; XOP-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; XOP-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = [18446744065119617022,18446744073709551612,18446744065119617022,18446744073709551612]
+; XOP-NEXT:    # ymm2 = mem[0,1,0,1]
+; XOP-NEXT:    vandps 32(%rdi), %ymm2, %ymm3
+; XOP-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; XOP-NEXT:    vandps (%rdi), %ymm2, %ymm2
+; XOP-NEXT:    vorps %ymm0, %ymm2, %ymm0
+; XOP-NEXT:    retq
+;
+; AVX1-LABEL: bitselect_v8i64_rm:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = [8589934593,3,8589934593,3]
+; AVX1-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = [18446744065119617022,18446744073709551612,18446744065119617022,18446744073709551612]
+; AVX1-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX1-NEXT:    vandps 32(%rdi), %ymm2, %ymm3
+; AVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT:    vandps (%rdi), %ymm2, %ymm2
+; AVX1-NEXT:    vorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: bitselect_v8i64_rm:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = [8589934593,3,8589934593,3]
+; AVX2-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX2-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = [18446744065119617022,18446744073709551612,18446744065119617022,18446744073709551612]
+; AVX2-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX2-NEXT:    vandps 32(%rdi), %ymm2, %ymm3
+; AVX2-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vandps (%rdi), %ymm2, %ymm2
+; AVX2-NEXT:    vorps %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: bitselect_v8i64_rm:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512F-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512F-NEXT:    vporq %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    retq
+  %3 = load <8 x i64>, <8 x i64>* %1
+  %4 = and <8 x i64> %0, <i64 8589934593, i64 3, i64 8589934593, i64 3, i64 8589934593, i64 3, i64 8589934593, i64 3>
+  %5 = and <8 x i64> %3, <i64 -8589934594, i64 -4, i64 -8589934594, i64 -4, i64 -8589934594, i64 -4, i64 -8589934594, i64 -4>
+  %6 = or <8 x i64> %5, %4
+  ret <8 x i64> %6
+}
+
+define <8 x i64> @bitselect_v8i64_mr(<8 x i64>* nocapture readonly, <8 x i64>) {
+; SSE-LABEL: bitselect_v8i64_mr:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm4 = [12884901890,4294967296]
+; SSE-NEXT:    movaps 48(%rdi), %xmm5
+; SSE-NEXT:    andps %xmm4, %xmm5
+; SSE-NEXT:    movaps 32(%rdi), %xmm6
+; SSE-NEXT:    andps %xmm4, %xmm6
+; SSE-NEXT:    movaps 16(%rdi), %xmm7
+; SSE-NEXT:    andps %xmm4, %xmm7
+; SSE-NEXT:    andps (%rdi), %xmm4
+; SSE-NEXT:    movaps {{.*#+}} xmm8 = [18446744060824649725,18446744069414584319]
+; SSE-NEXT:    andps %xmm8, %xmm3
+; SSE-NEXT:    orps %xmm5, %xmm3
+; SSE-NEXT:    andps %xmm8, %xmm2
+; SSE-NEXT:    orps %xmm6, %xmm2
+; SSE-NEXT:    andps %xmm8, %xmm1
+; SSE-NEXT:    orps %xmm7, %xmm1
+; SSE-NEXT:    andps %xmm8, %xmm0
+; SSE-NEXT:    orps %xmm4, %xmm0
+; SSE-NEXT:    retq
+;
+; XOP-LABEL: bitselect_v8i64_mr:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = [12884901890,4294967296,12884901890,4294967296]
+; XOP-NEXT:    # ymm2 = mem[0,1,0,1]
+; XOP-NEXT:    vandps 32(%rdi), %ymm2, %ymm3
+; XOP-NEXT:    vandps (%rdi), %ymm2, %ymm2
+; XOP-NEXT:    vbroadcastf128 {{.*#+}} ymm4 = [18446744060824649725,18446744069414584319,18446744060824649725,18446744069414584319]
+; XOP-NEXT:    # ymm4 = mem[0,1,0,1]
+; XOP-NEXT:    vandps %ymm4, %ymm1, %ymm1
+; XOP-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; XOP-NEXT:    vandps %ymm4, %ymm0, %ymm0
+; XOP-NEXT:    vorps %ymm0, %ymm2, %ymm0
+; XOP-NEXT:    retq
+;
+; AVX1-LABEL: bitselect_v8i64_mr:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = [12884901890,4294967296,12884901890,4294967296]
+; AVX1-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX1-NEXT:    vandps 32(%rdi), %ymm2, %ymm3
+; AVX1-NEXT:    vandps (%rdi), %ymm2, %ymm2
+; AVX1-NEXT:    vbroadcastf128 {{.*#+}} ymm4 = [18446744060824649725,18446744069414584319,18446744060824649725,18446744069414584319]
+; AVX1-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX1-NEXT:    vandps %ymm4, %ymm1, %ymm1
+; AVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT:    vandps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: bitselect_v8i64_mr:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = [12884901890,4294967296,12884901890,4294967296]
+; AVX2-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX2-NEXT:    vandps 32(%rdi), %ymm2, %ymm3
+; AVX2-NEXT:    vandps (%rdi), %ymm2, %ymm2
+; AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm4 = [18446744060824649725,18446744069414584319,18446744060824649725,18446744069414584319]
+; AVX2-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX2-NEXT:    vandps %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vandps %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    vorps %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: bitselect_v8i64_mr:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512F-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512F-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT:    vporq %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    retq
+  %3 = load <8 x i64>, <8 x i64>* %0
+  %4 = and <8 x i64> %3, <i64 12884901890, i64 4294967296, i64 12884901890, i64 4294967296, i64 12884901890, i64 4294967296, i64 12884901890, i64 4294967296>
+  %5 = and <8 x i64> %1, <i64 -12884901891, i64 -4294967297, i64 -12884901891, i64 -4294967297, i64 -12884901891, i64 -4294967297, i64 -12884901891, i64 -4294967297>
+  %6 = or <8 x i64> %4, %5
+  ret <8 x i64> %6
+}
+
+define <8 x i64> @bitselect_v8i64_mm(<8 x i64>* nocapture readonly, <8 x i64>* nocapture readonly) {
+; SSE-LABEL: bitselect_v8i64_mm:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm4 = [3,8589934593]
+; SSE-NEXT:    movaps 48(%rdi), %xmm1
+; SSE-NEXT:    andps %xmm4, %xmm1
+; SSE-NEXT:    movaps 32(%rdi), %xmm5
+; SSE-NEXT:    andps %xmm4, %xmm5
+; SSE-NEXT:    movaps 16(%rdi), %xmm6
+; SSE-NEXT:    andps %xmm4, %xmm6
+; SSE-NEXT:    andps (%rdi), %xmm4
+; SSE-NEXT:    movaps {{.*#+}} xmm0 = [18446744073709551612,18446744065119617022]
+; SSE-NEXT:    movaps 48(%rsi), %xmm3
+; SSE-NEXT:    andps %xmm0, %xmm3
+; SSE-NEXT:    orps %xmm1, %xmm3
+; SSE-NEXT:    movaps 32(%rsi), %xmm2
+; SSE-NEXT:    andps %xmm0, %xmm2
+; SSE-NEXT:    orps %xmm5, %xmm2
+; SSE-NEXT:    movaps 16(%rsi), %xmm1
+; SSE-NEXT:    andps %xmm0, %xmm1
+; SSE-NEXT:    orps %xmm6, %xmm1
+; SSE-NEXT:    andps (%rsi), %xmm0
+; SSE-NEXT:    orps %xmm4, %xmm0
+; SSE-NEXT:    retq
+;
+; XOP-LABEL: bitselect_v8i64_mm:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = [3,8589934593,3,8589934593]
+; XOP-NEXT:    # ymm0 = mem[0,1,0,1]
+; XOP-NEXT:    vandps 32(%rdi), %ymm0, %ymm1
+; XOP-NEXT:    vandps (%rdi), %ymm0, %ymm0
+; XOP-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = [18446744073709551612,18446744065119617022,18446744073709551612,18446744065119617022]
+; XOP-NEXT:    # ymm2 = mem[0,1,0,1]
+; XOP-NEXT:    vandps 32(%rsi), %ymm2, %ymm3
+; XOP-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; XOP-NEXT:    vandps (%rsi), %ymm2, %ymm2
+; XOP-NEXT:    vorps %ymm0, %ymm2, %ymm0
+; XOP-NEXT:    retq
+;
+; AVX1-LABEL: bitselect_v8i64_mm:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = [3,8589934593,3,8589934593]
+; AVX1-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX1-NEXT:    vandps 32(%rdi), %ymm0, %ymm1
+; AVX1-NEXT:    vandps (%rdi), %ymm0, %ymm0
+; AVX1-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = [18446744073709551612,18446744065119617022,18446744073709551612,18446744065119617022]
+; AVX1-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX1-NEXT:    vandps 32(%rsi), %ymm2, %ymm3
+; AVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT:    vandps (%rsi), %ymm2, %ymm2
+; AVX1-NEXT:    vorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: bitselect_v8i64_mm:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = [3,8589934593,3,8589934593]
+; AVX2-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX2-NEXT:    vandps 32(%rdi), %ymm0, %ymm1
+; AVX2-NEXT:    vandps (%rdi), %ymm0, %ymm0
+; AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = [18446744073709551612,18446744065119617022,18446744073709551612,18446744065119617022]
+; AVX2-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX2-NEXT:    vandps 32(%rsi), %ymm2, %ymm3
+; AVX2-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vandps (%rsi), %ymm2, %ymm2
+; AVX2-NEXT:    vorps %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: bitselect_v8i64_mm:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 (%rsi), %zmm1
+; AVX512F-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512F-NEXT:    vporq %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    retq
+  %3 = load <8 x i64>, <8 x i64>* %0
+  %4 = load <8 x i64>, <8 x i64>* %1
+  %5 = and <8 x i64> %3, <i64 3, i64 8589934593, i64 3, i64 8589934593, i64 3, i64 8589934593, i64 3, i64 8589934593>
+  %6 = and <8 x i64> %4, <i64 -4, i64 -8589934594, i64 -4, i64 -8589934594, i64 -4, i64 -8589934594, i64 -4, i64 -8589934594>
+  %7 = or <8 x i64> %6, %5
+  ret <8 x i64> %7
+}
diff --git a/test/CodeGen/X86/combine-sdiv.ll b/test/CodeGen/X86/combine-sdiv.ll
index 5d3bb5a..6fff298 100644
--- a/test/CodeGen/X86/combine-sdiv.ll
+++ b/test/CodeGen/X86/combine-sdiv.ll
@@ -1531,15 +1531,13 @@
 define <2 x i64> @combine_vec_sdiv_by_pow2b_v2i64(<2 x i64> %x) {
 ; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movdqa %xmm0, %xmm2
-; SSE2-NEXT:    psrad $31, %xmm2
-; SSE2-NEXT:    psrlq $62, %xmm2
-; SSE2-NEXT:    paddq %xmm0, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    psrad $31, %xmm1
+; SSE2-NEXT:    psrlq $62, %xmm1
+; SSE2-NEXT:    paddq %xmm0, %xmm1
 ; SSE2-NEXT:    psrlq $2, %xmm1
-; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
-; SSE2-NEXT:    movapd {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952]
-; SSE2-NEXT:    xorpd %xmm2, %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952]
+; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    psubq %xmm2, %xmm1
 ; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
 ; SSE2-NEXT:    movapd %xmm1, %xmm0
@@ -1547,13 +1545,11 @@
 ;
 ; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrad $31, %xmm2
-; SSE41-NEXT:    psrlq $62, %xmm2
-; SSE41-NEXT:    paddq %xmm0, %xmm2
-; SSE41-NEXT:    movdqa %xmm2, %xmm1
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrad $31, %xmm1
+; SSE41-NEXT:    psrlq $62, %xmm1
+; SSE41-NEXT:    paddq %xmm0, %xmm1
 ; SSE41-NEXT:    psrlq $2, %xmm1
-; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952]
 ; SSE41-NEXT:    pxor %xmm2, %xmm1
 ; SSE41-NEXT:    psubq %xmm2, %xmm1
@@ -1567,8 +1563,7 @@
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsrlq $62, %xmm1, %xmm1
 ; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpsrlq $2, %xmm1, %xmm2
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpsrlq $2, %xmm1, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952]
 ; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
@@ -1635,66 +1630,60 @@
 define <4 x i64> @combine_vec_sdiv_by_pow2b_v4i64(<4 x i64> %x) {
 ; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movdqa %xmm0, %xmm2
-; SSE2-NEXT:    movdqa %xmm0, %xmm3
-; SSE2-NEXT:    psrad $31, %xmm3
-; SSE2-NEXT:    psrlq $62, %xmm3
-; SSE2-NEXT:    paddq %xmm0, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm0
-; SSE2-NEXT:    psrlq $2, %xmm0
-; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
-; SSE2-NEXT:    movapd {{.*#+}} xmm3 = [9223372036854775808,2305843009213693952]
-; SSE2-NEXT:    xorpd %xmm3, %xmm0
-; SSE2-NEXT:    psubq %xmm3, %xmm0
-; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm2
-; SSE2-NEXT:    psrad $31, %xmm2
-; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT:    movdqa %xmm2, %xmm3
+; SSE2-NEXT:    psrad $31, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    psrlq $61, %xmm3
-; SSE2-NEXT:    psrlq $60, %xmm2
-; SSE2-NEXT:    movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
-; SSE2-NEXT:    paddq %xmm1, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, %xmm1
-; SSE2-NEXT:    psrlq $3, %xmm1
-; SSE2-NEXT:    psrlq $4, %xmm2
-; SSE2-NEXT:    movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
-; SSE2-NEXT:    movapd {{.*#+}} xmm1 = [1152921504606846976,576460752303423488]
-; SSE2-NEXT:    xorpd %xmm1, %xmm2
-; SSE2-NEXT:    psubq %xmm1, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    psrlq $60, %xmm1
+; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
+; SSE2-NEXT:    paddq %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psrlq $3, %xmm2
+; SSE2-NEXT:    psrlq $4, %xmm1
+; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE2-NEXT:    movapd {{.*#+}} xmm2 = [1152921504606846976,576460752303423488]
+; SSE2-NEXT:    xorpd %xmm2, %xmm1
+; SSE2-NEXT:    psubq %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    psrad $31, %xmm2
+; SSE2-NEXT:    psrlq $62, %xmm2
+; SSE2-NEXT:    paddq %xmm0, %xmm2
+; SSE2-NEXT:    psrlq $2, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [9223372036854775808,2305843009213693952]
+; SSE2-NEXT:    pxor %xmm3, %xmm2
+; SSE2-NEXT:    psubq %xmm3, %xmm2
+; SSE2-NEXT:    movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE2-NEXT:    movapd %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    movdqa %xmm0, %xmm3
-; SSE41-NEXT:    psrad $31, %xmm3
-; SSE41-NEXT:    psrlq $62, %xmm3
-; SSE41-NEXT:    paddq %xmm0, %xmm3
-; SSE41-NEXT:    movdqa %xmm3, %xmm0
-; SSE41-NEXT:    psrlq $2, %xmm0
-; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [9223372036854775808,2305843009213693952]
-; SSE41-NEXT:    pxor %xmm3, %xmm0
-; SSE41-NEXT:    psubq %xmm3, %xmm0
-; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
 ; SSE41-NEXT:    movdqa %xmm1, %xmm2
-; SSE41-NEXT:    psrad $31, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT:    movdqa %xmm2, %xmm3
+; SSE41-NEXT:    psrad $31, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
 ; SSE41-NEXT:    psrlq $60, %xmm3
-; SSE41-NEXT:    psrlq $61, %xmm2
-; SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
-; SSE41-NEXT:    paddq %xmm1, %xmm2
-; SSE41-NEXT:    movdqa %xmm2, %xmm1
-; SSE41-NEXT:    psrlq $4, %xmm1
-; SSE41-NEXT:    psrlq $3, %xmm2
-; SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [1152921504606846976,576460752303423488]
-; SSE41-NEXT:    pxor %xmm1, %xmm2
-; SSE41-NEXT:    psubq %xmm1, %xmm2
-; SSE41-NEXT:    movdqa %xmm2, %xmm1
+; SSE41-NEXT:    psrlq $61, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; SSE41-NEXT:    paddq %xmm2, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm2
+; SSE41-NEXT:    psrlq $4, %xmm2
+; SSE41-NEXT:    psrlq $3, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [1152921504606846976,576460752303423488]
+; SSE41-NEXT:    pxor %xmm2, %xmm1
+; SSE41-NEXT:    psubq %xmm2, %xmm1
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    psrad $31, %xmm2
+; SSE41-NEXT:    psrlq $62, %xmm2
+; SSE41-NEXT:    paddq %xmm0, %xmm2
+; SSE41-NEXT:    psrlq $2, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [9223372036854775808,2305843009213693952]
+; SSE41-NEXT:    pxor %xmm3, %xmm2
+; SSE41-NEXT:    psubq %xmm3, %xmm2
+; SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
@@ -1782,118 +1771,108 @@
 define <8 x i64> @combine_vec_sdiv_by_pow2b_v8i64(<8 x i64> %x) {
 ; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v8i64:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movdqa %xmm2, %xmm4
-; SSE2-NEXT:    movdqa %xmm0, %xmm2
-; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    psrad $31, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    psrlq $61, %xmm5
+; SSE2-NEXT:    psrlq $60, %xmm1
+; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm5[0],xmm1[1]
+; SSE2-NEXT:    paddq %xmm3, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    psrlq $3, %xmm3
+; SSE2-NEXT:    psrlq $4, %xmm1
+; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
+; SSE2-NEXT:    movapd {{.*#+}} xmm5 = [1152921504606846976,576460752303423488]
+; SSE2-NEXT:    xorpd %xmm5, %xmm1
+; SSE2-NEXT:    psubq %xmm5, %xmm1
+; SSE2-NEXT:    movdqa %xmm4, %xmm3
+; SSE2-NEXT:    psrad $31, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT:    movdqa %xmm3, %xmm6
+; SSE2-NEXT:    psrlq $61, %xmm6
+; SSE2-NEXT:    psrlq $60, %xmm3
+; SSE2-NEXT:    movsd {{.*#+}} xmm3 = xmm6[0],xmm3[1]
+; SSE2-NEXT:    paddq %xmm4, %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    psrlq $3, %xmm4
+; SSE2-NEXT:    psrlq $4, %xmm3
+; SSE2-NEXT:    movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1]
+; SSE2-NEXT:    xorpd %xmm5, %xmm3
+; SSE2-NEXT:    psubq %xmm5, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    psrad $31, %xmm4
+; SSE2-NEXT:    psrlq $62, %xmm4
+; SSE2-NEXT:    paddq %xmm0, %xmm4
+; SSE2-NEXT:    psrlq $2, %xmm4
+; SSE2-NEXT:    movdqa {{.*#+}} xmm6 = [9223372036854775808,2305843009213693952]
+; SSE2-NEXT:    pxor %xmm6, %xmm4
+; SSE2-NEXT:    psubq %xmm6, %xmm4
+; SSE2-NEXT:    movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
+; SSE2-NEXT:    movdqa %xmm2, %xmm5
 ; SSE2-NEXT:    psrad $31, %xmm5
 ; SSE2-NEXT:    psrlq $62, %xmm5
-; SSE2-NEXT:    paddq %xmm0, %xmm5
-; SSE2-NEXT:    movdqa %xmm5, %xmm0
-; SSE2-NEXT:    psrlq $2, %xmm0
-; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
-; SSE2-NEXT:    movapd {{.*#+}} xmm5 = [9223372036854775808,2305843009213693952]
-; SSE2-NEXT:    xorpd %xmm5, %xmm0
-; SSE2-NEXT:    psubq %xmm5, %xmm0
-; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
-; SSE2-NEXT:    movdqa %xmm4, %xmm6
-; SSE2-NEXT:    psrad $31, %xmm6
-; SSE2-NEXT:    psrlq $62, %xmm6
-; SSE2-NEXT:    paddq %xmm4, %xmm6
-; SSE2-NEXT:    movdqa %xmm6, %xmm2
-; SSE2-NEXT:    psrlq $2, %xmm2
-; SSE2-NEXT:    movsd {{.*#+}} xmm2 = xmm6[0],xmm2[1]
-; SSE2-NEXT:    xorpd %xmm5, %xmm2
-; SSE2-NEXT:    psubq %xmm5, %xmm2
-; SSE2-NEXT:    movsd {{.*#+}} xmm2 = xmm4[0],xmm2[1]
-; SSE2-NEXT:    movdqa %xmm1, %xmm4
-; SSE2-NEXT:    psrad $31, %xmm4
-; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE2-NEXT:    movdqa %xmm4, %xmm5
-; SSE2-NEXT:    psrlq $61, %xmm5
-; SSE2-NEXT:    psrlq $60, %xmm4
-; SSE2-NEXT:    movsd {{.*#+}} xmm4 = xmm5[0],xmm4[1]
-; SSE2-NEXT:    paddq %xmm1, %xmm4
-; SSE2-NEXT:    movdqa %xmm4, %xmm1
-; SSE2-NEXT:    psrlq $3, %xmm1
-; SSE2-NEXT:    psrlq $4, %xmm4
-; SSE2-NEXT:    movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
-; SSE2-NEXT:    movapd {{.*#+}} xmm1 = [1152921504606846976,576460752303423488]
-; SSE2-NEXT:    xorpd %xmm1, %xmm4
-; SSE2-NEXT:    psubq %xmm1, %xmm4
-; SSE2-NEXT:    movdqa %xmm3, %xmm5
-; SSE2-NEXT:    psrad $31, %xmm5
-; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE2-NEXT:    movdqa %xmm5, %xmm6
-; SSE2-NEXT:    psrlq $61, %xmm6
-; SSE2-NEXT:    psrlq $60, %xmm5
-; SSE2-NEXT:    movsd {{.*#+}} xmm5 = xmm6[0],xmm5[1]
-; SSE2-NEXT:    paddq %xmm3, %xmm5
-; SSE2-NEXT:    movdqa %xmm5, %xmm3
-; SSE2-NEXT:    psrlq $3, %xmm3
-; SSE2-NEXT:    psrlq $4, %xmm5
-; SSE2-NEXT:    movsd {{.*#+}} xmm5 = xmm3[0],xmm5[1]
-; SSE2-NEXT:    xorpd %xmm1, %xmm5
-; SSE2-NEXT:    psubq %xmm1, %xmm5
-; SSE2-NEXT:    movdqa %xmm4, %xmm1
-; SSE2-NEXT:    movdqa %xmm5, %xmm3
+; SSE2-NEXT:    paddq %xmm2, %xmm5
+; SSE2-NEXT:    psrlq $2, %xmm5
+; SSE2-NEXT:    pxor %xmm6, %xmm5
+; SSE2-NEXT:    psubq %xmm6, %xmm5
+; SSE2-NEXT:    movsd {{.*#+}} xmm5 = xmm2[0],xmm5[1]
+; SSE2-NEXT:    movapd %xmm4, %xmm0
+; SSE2-NEXT:    movapd %xmm5, %xmm2
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v8i64:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa %xmm2, %xmm4
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    movdqa %xmm0, %xmm5
+; SSE41-NEXT:    movdqa %xmm3, %xmm4
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    psrad $31, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    psrlq $60, %xmm5
+; SSE41-NEXT:    psrlq $61, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT:    paddq %xmm3, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    psrlq $4, %xmm3
+; SSE41-NEXT:    psrlq $3, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [1152921504606846976,576460752303423488]
+; SSE41-NEXT:    pxor %xmm5, %xmm1
+; SSE41-NEXT:    psubq %xmm5, %xmm1
+; SSE41-NEXT:    movdqa %xmm4, %xmm3
+; SSE41-NEXT:    psrad $31, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm3, %xmm6
+; SSE41-NEXT:    psrlq $60, %xmm6
+; SSE41-NEXT:    psrlq $61, %xmm3
+; SSE41-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT:    paddq %xmm4, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm4
+; SSE41-NEXT:    psrlq $4, %xmm4
+; SSE41-NEXT:    psrlq $3, %xmm3
+; SSE41-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
+; SSE41-NEXT:    pxor %xmm5, %xmm3
+; SSE41-NEXT:    psubq %xmm5, %xmm3
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    psrad $31, %xmm4
+; SSE41-NEXT:    psrlq $62, %xmm4
+; SSE41-NEXT:    paddq %xmm0, %xmm4
+; SSE41-NEXT:    psrlq $2, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm6 = [9223372036854775808,2305843009213693952]
+; SSE41-NEXT:    pxor %xmm6, %xmm4
+; SSE41-NEXT:    psubq %xmm6, %xmm4
+; SSE41-NEXT:    pblendw {{.*#+}} xmm4 = xmm0[0,1,2,3],xmm4[4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm2, %xmm5
 ; SSE41-NEXT:    psrad $31, %xmm5
 ; SSE41-NEXT:    psrlq $62, %xmm5
-; SSE41-NEXT:    paddq %xmm0, %xmm5
-; SSE41-NEXT:    movdqa %xmm5, %xmm0
-; SSE41-NEXT:    psrlq $2, %xmm0
-; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm5[0,1,2,3],xmm0[4,5,6,7]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [9223372036854775808,2305843009213693952]
-; SSE41-NEXT:    pxor %xmm5, %xmm0
-; SSE41-NEXT:    psubq %xmm5, %xmm0
-; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
-; SSE41-NEXT:    movdqa %xmm4, %xmm6
-; SSE41-NEXT:    psrad $31, %xmm6
-; SSE41-NEXT:    psrlq $62, %xmm6
-; SSE41-NEXT:    paddq %xmm4, %xmm6
-; SSE41-NEXT:    movdqa %xmm6, %xmm2
-; SSE41-NEXT:    psrlq $2, %xmm2
-; SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm6[0,1,2,3],xmm2[4,5,6,7]
-; SSE41-NEXT:    pxor %xmm5, %xmm2
-; SSE41-NEXT:    psubq %xmm5, %xmm2
-; SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
-; SSE41-NEXT:    movdqa %xmm1, %xmm4
-; SSE41-NEXT:    psrad $31, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE41-NEXT:    movdqa %xmm4, %xmm5
-; SSE41-NEXT:    psrlq $60, %xmm5
-; SSE41-NEXT:    psrlq $61, %xmm4
-; SSE41-NEXT:    pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4,5,6,7]
-; SSE41-NEXT:    paddq %xmm1, %xmm4
-; SSE41-NEXT:    movdqa %xmm4, %xmm1
-; SSE41-NEXT:    psrlq $4, %xmm1
-; SSE41-NEXT:    psrlq $3, %xmm4
-; SSE41-NEXT:    pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm1[4,5,6,7]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [1152921504606846976,576460752303423488]
-; SSE41-NEXT:    pxor %xmm1, %xmm4
-; SSE41-NEXT:    psubq %xmm1, %xmm4
-; SSE41-NEXT:    movdqa %xmm3, %xmm5
-; SSE41-NEXT:    psrad $31, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    movdqa %xmm5, %xmm6
-; SSE41-NEXT:    psrlq $60, %xmm6
-; SSE41-NEXT:    psrlq $61, %xmm5
-; SSE41-NEXT:    pblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4,5,6,7]
-; SSE41-NEXT:    paddq %xmm3, %xmm5
-; SSE41-NEXT:    movdqa %xmm5, %xmm3
-; SSE41-NEXT:    psrlq $4, %xmm3
-; SSE41-NEXT:    psrlq $3, %xmm5
-; SSE41-NEXT:    pblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm3[4,5,6,7]
-; SSE41-NEXT:    pxor %xmm1, %xmm5
-; SSE41-NEXT:    psubq %xmm1, %xmm5
-; SSE41-NEXT:    movdqa %xmm4, %xmm1
-; SSE41-NEXT:    movdqa %xmm5, %xmm3
+; SSE41-NEXT:    paddq %xmm2, %xmm5
+; SSE41-NEXT:    psrlq $2, %xmm5
+; SSE41-NEXT:    pxor %xmm6, %xmm5
+; SSE41-NEXT:    psubq %xmm6, %xmm5
+; SSE41-NEXT:    pblendw {{.*#+}} xmm5 = xmm2[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    movdqa %xmm5, %xmm2
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v8i64:
@@ -3007,55 +2986,47 @@
 ; SSE2-NEXT:    psraw $8, %xmm2
 ; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm2
 ; SSE2-NEXT:    psrlw $8, %xmm2
+; SSE2-NEXT:    pxor %xmm3, %xmm3
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    packuswb %xmm2, %xmm1
 ; SSE2-NEXT:    paddb %xmm0, %xmm1
 ; SSE2-NEXT:    movdqa %xmm1, %xmm0
-; SSE2-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE2-NEXT:    psraw $8, %xmm0
-; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm0
-; SSE2-NEXT:    psrlw $8, %xmm0
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm2
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
 ; SSE2-NEXT:    psraw $8, %xmm2
-; SSE2-NEXT:    psllw $8, %xmm2
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm2
 ; SSE2-NEXT:    psrlw $8, %xmm2
-; SSE2-NEXT:    packuswb %xmm0, %xmm2
+; SSE2-NEXT:    packuswb %xmm2, %xmm0
 ; SSE2-NEXT:    psrlw $7, %xmm1
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
-; SSE2-NEXT:    paddb %xmm2, %xmm1
+; SSE2-NEXT:    paddb %xmm0, %xmm1
 ; SSE2-NEXT:    movdqa %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: pr38658:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT:    pmovsxbw %xmm0, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT:    pmovsxbw %xmm1, %xmm2
 ; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm2
 ; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pxor %xmm0, %xmm0
-; SSE41-NEXT:    packuswb %xmm2, %xmm0
-; SSE41-NEXT:    paddb %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm1
-; SSE41-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE41-NEXT:    psraw $8, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm2
-; SSE41-NEXT:    psllw $6, %xmm2
-; SSE41-NEXT:    psllw $8, %xmm1
-; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
-; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pxor %xmm1, %xmm1
+; SSE41-NEXT:    packuswb %xmm2, %xmm1
+; SSE41-NEXT:    paddb %xmm0, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; SSE41-NEXT:    psraw $8, %xmm0
 ; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE41-NEXT:    psraw $8, %xmm2
-; SSE41-NEXT:    psllw $8, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    packuswb %xmm1, %xmm2
-; SSE41-NEXT:    psrlw $7, %xmm0
-; SSE41-NEXT:    pand {{.*}}(%rip), %xmm0
-; SSE41-NEXT:    pand {{.*}}(%rip), %xmm0
-; SSE41-NEXT:    paddb %xmm2, %xmm0
+; SSE41-NEXT:    psllw $6, %xmm2
+; SSE41-NEXT:    psllw $8, %xmm0
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
+; SSE41-NEXT:    psrlw $8, %xmm0
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE41-NEXT:    packuswb %xmm0, %xmm2
+; SSE41-NEXT:    psrlw $7, %xmm1
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    paddb %xmm2, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: pr38658:
@@ -3073,14 +3044,10 @@
 ; AVX1-NEXT:    vpsllw $8, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
 ; AVX1-NEXT:    vpsrlw $8, %xmm1, %xmm1
-; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT:    vpsraw $8, %xmm2, %xmm2
-; AVX1-NEXT:    vpsllw $8, %xmm2, %xmm2
-; AVX1-NEXT:    vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX1-NEXT:    vpackuswb %xmm1, %xmm2, %xmm1
 ; AVX1-NEXT:    vpsrlw $7, %xmm0, %xmm0
 ; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    retq
 ;
@@ -3099,7 +3066,6 @@
 ; AVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
 ; AVX2-NEXT:    vpsrlw $7, %xmm0, %xmm0
 ; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -3114,7 +3080,6 @@
 ; AVX512F-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
 ; AVX512F-NEXT:    vpsrlw $7, %xmm0, %xmm1
 ; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
 ; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; AVX512F-NEXT:    vpsravd {{.*}}(%rip), %zmm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
@@ -3131,7 +3096,6 @@
 ; AVX512BW-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
 ; AVX512BW-NEXT:    vpsrlw $7, %xmm0, %xmm1
 ; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpmovsxbw %xmm0, %ymm0
 ; AVX512BW-NEXT:    vpsravw {{.*}}(%rip), %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovwb %ymm0, %xmm0
diff --git a/test/CodeGen/X86/combine-shl.ll b/test/CodeGen/X86/combine-shl.ll
index 8e3b03e..64f9f10 100644
--- a/test/CodeGen/X86/combine-shl.ll
+++ b/test/CodeGen/X86/combine-shl.ll
@@ -45,6 +45,14 @@
   ret <4 x i32> %2
 }
 
+define <4 x i32> @combine_vec_shl_outofrange3(<4 x i32> %a0) {
+; CHECK-LABEL: combine_vec_shl_outofrange3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
+  %1 = shl <4 x i32> %a0, <i32 33, i32 34, i32 35, i32 undef>
+  ret <4 x i32> %1
+}
+
 ; fold (shl x, 0) -> x
 define <4 x i32> @combine_vec_shl_by_zero(<4 x i32> %x) {
 ; CHECK-LABEL: combine_vec_shl_by_zero:
@@ -261,33 +269,25 @@
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT:    psrad $16, %xmm1
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT:    psrad $16, %xmm0
-; SSE2-NEXT:    movdqa %xmm0, %xmm2
-; SSE2-NEXT:    pslld $31, %xmm2
-; SSE2-NEXT:    pslld $30, %xmm0
-; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
+; SSE2-NEXT:    psrad $16, %xmm1
 ; SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; SSE2-NEXT:    pslld $29, %xmm2
 ; SSE2-NEXT:    pslld $28, %xmm1
 ; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE2-NEXT:    pslld $30, %xmm0
+; SSE2-NEXT:    xorpd %xmm2, %xmm2
+; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_vec_shl_ext_shl1:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT:    pmovsxwd %xmm1, %xmm1
 ; SSE41-NEXT:    pmovsxwd %xmm0, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    pslld $30, %xmm2
-; SSE41-NEXT:    pslld $31, %xmm0
-; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
-; SSE41-NEXT:    movdqa %xmm1, %xmm2
-; SSE41-NEXT:    pslld $28, %xmm2
-; SSE41-NEXT:    pslld $29, %xmm1
-; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT:    pslld $30, %xmm0
+; SSE41-NEXT:    pxor %xmm1, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_ext_shl1:
diff --git a/test/CodeGen/X86/combine-sra.ll b/test/CodeGen/X86/combine-sra.ll
index 9bce1a7..4a14991 100644
--- a/test/CodeGen/X86/combine-sra.ll
+++ b/test/CodeGen/X86/combine-sra.ll
@@ -50,6 +50,14 @@
   ret <4 x i32> %1
 }
 
+define <4 x i32> @combine_vec_ashr_outofrange2(<4 x i32> %x) {
+; CHECK-LABEL: combine_vec_ashr_outofrange2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
+  %1 = ashr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 undef>
+  ret <4 x i32> %1
+}
+
 ; fold (sra x, 0) -> x
 define <4 x i32> @combine_vec_ashr_by_zero(<4 x i32> %x) {
 ; CHECK-LABEL: combine_vec_ashr_by_zero:
diff --git a/test/CodeGen/X86/combine-srem.ll b/test/CodeGen/X86/combine-srem.ll
index 36a151e..4878d70 100644
--- a/test/CodeGen/X86/combine-srem.ll
+++ b/test/CodeGen/X86/combine-srem.ll
@@ -226,7 +226,7 @@
 ; SSE-NEXT:    psrad $31, %xmm1
 ; SSE-NEXT:    psrld $30, %xmm1
 ; SSE-NEXT:    paddd %xmm0, %xmm1
-; SSE-NEXT:    psrad $2, %xmm1
+; SSE-NEXT:    psrld $2, %xmm1
 ; SSE-NEXT:    pxor %xmm2, %xmm2
 ; SSE-NEXT:    psubd %xmm1, %xmm2
 ; SSE-NEXT:    pslld $2, %xmm2
@@ -238,7 +238,7 @@
 ; AVX-NEXT:    vpsrad $31, %xmm0, %xmm1
 ; AVX-NEXT:    vpsrld $30, %xmm1, %xmm1
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX-NEXT:    vpsrad $2, %xmm1, %xmm1
+; AVX-NEXT:    vpsrld $2, %xmm1, %xmm1
 ; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX-NEXT:    vpsubd %xmm1, %xmm2, %xmm1
 ; AVX-NEXT:    vpslld $2, %xmm1, %xmm1
@@ -252,35 +252,35 @@
 ; SSE-LABEL: combine_vec_srem_by_pow2b:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
-; SSE-NEXT:    psrld $31, %xmm1
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    psrad $31, %xmm2
-; SSE-NEXT:    movdqa %xmm2, %xmm3
-; SSE-NEXT:    psrld $29, %xmm3
-; SSE-NEXT:    pblendw {{.*#+}} xmm3 = xmm1[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT:    psrld $30, %xmm2
-; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
-; SSE-NEXT:    paddd %xmm0, %xmm2
-; SSE-NEXT:    movdqa %xmm2, %xmm1
-; SSE-NEXT:    psrad $3, %xmm1
-; SSE-NEXT:    movdqa %xmm2, %xmm3
+; SSE-NEXT:    psrad $31, %xmm1
+; SSE-NEXT:    movdqa %xmm1, %xmm2
+; SSE-NEXT:    psrld $29, %xmm2
+; SSE-NEXT:    movdqa %xmm1, %xmm3
+; SSE-NEXT:    psrld $31, %xmm3
+; SSE-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; SSE-NEXT:    psrld $30, %xmm1
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE-NEXT:    paddd %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm1, %xmm2
+; SSE-NEXT:    psrad $3, %xmm2
+; SSE-NEXT:    movdqa %xmm1, %xmm3
 ; SSE-NEXT:    psrad $1, %xmm3
-; SSE-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT:    psrad $2, %xmm2
-; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
-; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7]
-; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm2
-; SSE-NEXT:    psubd %xmm2, %xmm0
+; SSE-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; SSE-NEXT:    psrad $2, %xmm1
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm1
+; SSE-NEXT:    psubd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_vec_srem_by_pow2b:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpsrld $31, %xmm0, %xmm1
-; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm2
-; AVX1-NEXT:    vpsrld $29, %xmm2, %xmm3
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-NEXT:    vpsrld $30, %xmm2, %xmm2
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
+; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm1
+; AVX1-NEXT:    vpsrld $29, %xmm1, %xmm2
+; AVX1-NEXT:    vpsrld $31, %xmm1, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpsrld $30, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
 ; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsrad $3, %xmm1, %xmm2
 ; AVX1-NEXT:    vpsrad $1, %xmm1, %xmm3
@@ -317,10 +317,10 @@
 ; SSE-NEXT:    movdqa %xmm1, %xmm3
 ; SSE-NEXT:    psrld $30, %xmm3
 ; SSE-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    psrld $31, %xmm2
-; SSE-NEXT:    psrld $29, %xmm1
-; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT:    movdqa %xmm1, %xmm2
+; SSE-NEXT:    psrld $29, %xmm2
+; SSE-NEXT:    psrld $31, %xmm1
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
 ; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
 ; SSE-NEXT:    paddd %xmm0, %xmm1
 ; SSE-NEXT:    movdqa %xmm1, %xmm2
@@ -344,9 +344,9 @@
 ; AVX1-NEXT:    vpsrld $28, %xmm1, %xmm2
 ; AVX1-NEXT:    vpsrld $30, %xmm1, %xmm3
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-NEXT:    vpsrld $31, %xmm0, %xmm3
-; AVX1-NEXT:    vpsrld $29, %xmm1, %xmm1
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT:    vpsrld $29, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrld $31, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
 ; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsrad $4, %xmm1, %xmm2
diff --git a/test/CodeGen/X86/combine-srl.ll b/test/CodeGen/X86/combine-srl.ll
index 1cecc68..960aa07 100644
--- a/test/CodeGen/X86/combine-srl.ll
+++ b/test/CodeGen/X86/combine-srl.ll
@@ -35,6 +35,14 @@
   ret <4 x i32> %1
 }
 
+define <4 x i32> @combine_vec_lshr_outofrange2(<4 x i32> %x) {
+; CHECK-LABEL: combine_vec_lshr_outofrange2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
+  %1 = lshr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 undef>
+  ret <4 x i32> %1
+}
+
 ; fold (srl x, 0) -> x
 define <4 x i32> @combine_vec_lshr_by_zero(<4 x i32> %x) {
 ; CHECK-LABEL: combine_vec_lshr_by_zero:
diff --git a/test/CodeGen/X86/combine-sub-ssat.ll b/test/CodeGen/X86/combine-sub-ssat.ll
new file mode 100644
index 0000000..4cce355
--- /dev/null
+++ b/test/CodeGen/X86/combine-sub-ssat.ll
@@ -0,0 +1,116 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE42
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512BW
+
+declare  i32 @llvm.ssub.sat.i32  (i32, i32)
+declare  i64 @llvm.ssub.sat.i64  (i64, i64)
+declare  <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
+
+; fold (ssub_sat x, undef) -> 0
+define i32 @combine_undef_i32(i32 %a0) {
+; CHECK-LABEL: combine_undef_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    retq
+  %res = call i32 @llvm.ssub.sat.i32(i32 %a0, i32 undef)
+  ret i32 %res
+}
+
+define <8 x i16> @combine_undef_v8i16(<8 x i16> %a0) {
+; SSE-LABEL: combine_undef_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_undef_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> %a0)
+  ret <8 x i16> %res
+}
+
+; fold (ssub_sat c1, c2) -> c3
+define i32 @combine_constfold_i32() {
+; CHECK-LABEL: combine_constfold_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl $-2147483547, %eax # imm = 0x80000065
+; CHECK-NEXT:    retq
+  %res = call i32 @llvm.ssub.sat.i32(i32 100, i32 2147483647)
+  ret i32 %res
+}
+
+define <8 x i16> @combine_constfold_v8i16() {
+; SSE-LABEL: combine_constfold_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm0 = [65535,2,254,0,65534,65282,32786,2]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_constfold_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [65535,2,254,0,65534,65282,32786,2]
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -32760, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 -10, i16 65535>)
+  ret <8 x i16> %res
+}
+
+define <8 x i16> @combine_constfold_undef_v8i16() {
+; SSE-LABEL: combine_constfold_undef_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,0,0,0,65534,65282,32786,2]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_constfold_undef_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,0,0,65534,65282,32786,2]
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 -1, i16 -255, i16 -32760, i16 1>, <8 x i16> <i16 1, i16 undef, i16 undef, i16 65535, i16 1, i16 65535, i16 -10, i16 65535>)
+  ret <8 x i16> %res
+}
+
+; fold (ssub_sat x, 0) -> x
+define i32 @combine_zero_i32(i32 %a0) {
+; CHECK-LABEL: combine_zero_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    retq
+  %1 = call i32 @llvm.ssub.sat.i32(i32 %a0, i32 0)
+  ret i32 %1
+}
+
+define <8 x i16> @combine_zero_v8i16(<8 x i16> %a0) {
+; CHECK-LABEL: combine_zero_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
+  %1 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer)
+  ret <8 x i16> %1
+}
+
+; fold (ssub_sat x, x) -> 0
+define i32 @combine_self_i32(i32 %a0) {
+; CHECK-LABEL: combine_self_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    retq
+  %1 = call i32 @llvm.ssub.sat.i32(i32 %a0, i32 %a0)
+  ret i32 %1
+}
+
+define <8 x i16> @combine_self_v8i16(<8 x i16> %a0) {
+; SSE-LABEL: combine_self_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_self_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a0)
+  ret <8 x i16> %1
+}
diff --git a/test/CodeGen/X86/combine-sub-usat.ll b/test/CodeGen/X86/combine-sub-usat.ll
new file mode 100644
index 0000000..e26d0d6
--- /dev/null
+++ b/test/CodeGen/X86/combine-sub-usat.ll
@@ -0,0 +1,116 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE42
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512BW
+
+declare  i32 @llvm.usub.sat.i32  (i32, i32)
+declare  i64 @llvm.usub.sat.i64  (i64, i64)
+declare  <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
+
+; fold (usub_sat x, undef) -> 0
+define i32 @combine_undef_i32(i32 %a0) {
+; CHECK-LABEL: combine_undef_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    retq
+  %res = call i32 @llvm.usub.sat.i32(i32 %a0, i32 undef)
+  ret i32 %res
+}
+
+define <8 x i16> @combine_undef_v8i16(<8 x i16> %a0) {
+; SSE-LABEL: combine_undef_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_undef_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> %a0)
+  ret <8 x i16> %res
+}
+
+; fold (usub_sat c1, c2) -> c3
+define i32 @combine_constfold_i32() {
+; CHECK-LABEL: combine_constfold_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    retq
+  %res = call i32 @llvm.usub.sat.i32(i32 100, i32 4294967295)
+  ret i32 %res
+}
+
+define <8 x i16> @combine_constfold_v8i16() {
+; SSE-LABEL: combine_constfold_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,0,254,0,65534,0,0,0]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_constfold_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,254,0,65534,0,0,0]
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
+  ret <8 x i16> %res
+}
+
+define <8 x i16> @combine_constfold_undef_v8i16() {
+; SSE-LABEL: combine_constfold_undef_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,0,0,0,65534,0,0,0]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_constfold_undef_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,0,0,65534,0,0,0]
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 undef, i16 undef, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
+  ret <8 x i16> %res
+}
+
+; fold (usub_sat x, 0) -> x
+define i32 @combine_zero_i32(i32 %a0) {
+; CHECK-LABEL: combine_zero_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    retq
+  %1 = call i32 @llvm.usub.sat.i32(i32 %a0, i32 0)
+  ret i32 %1
+}
+
+define <8 x i16> @combine_zero_v8i16(<8 x i16> %a0) {
+; CHECK-LABEL: combine_zero_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
+  %1 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer)
+  ret <8 x i16> %1
+}
+
+; fold (usub_sat x, x) -> 0
+define i32 @combine_self_i32(i32 %a0) {
+; CHECK-LABEL: combine_self_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    retq
+  %1 = call i32 @llvm.usub.sat.i32(i32 %a0, i32 %a0)
+  ret i32 %1
+}
+
+define <8 x i16> @combine_self_v8i16(<8 x i16> %a0) {
+; SSE-LABEL: combine_self_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_self_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a0)
+  ret <8 x i16> %1
+}
diff --git a/test/CodeGen/X86/compact-unwind.ll b/test/CodeGen/X86/compact-unwind.ll
index f8266a1..ab85f59 100644
--- a/test/CodeGen/X86/compact-unwind.ll
+++ b/test/CodeGen/X86/compact-unwind.ll
@@ -1,8 +1,8 @@
-; RUN: llc < %s -disable-fp-elim -mtriple x86_64-apple-darwin11 -mcpu corei7 | FileCheck -check-prefix=ASM %s
-; RUN: llc < %s -disable-fp-elim -mtriple x86_64-apple-darwin11 -mcpu corei7 -filetype=obj -o - \
+; RUN: llc < %s -frame-pointer=all -mtriple x86_64-apple-darwin11 -mcpu corei7 | FileCheck -check-prefix=ASM %s
+; RUN: llc < %s -frame-pointer=all -mtriple x86_64-apple-darwin11 -mcpu corei7 -filetype=obj -o - \
 ; RUN:  | llvm-objdump -triple x86_64-apple-darwin11 -unwind-info - \
 ; RUN:  | FileCheck -check-prefix=CU %s
-; RUN: llc < %s -disable-fp-elim -mtriple x86_64-apple-darwin11 -mcpu corei7 \
+; RUN: llc < %s -frame-pointer=all -mtriple x86_64-apple-darwin11 -mcpu corei7 \
 ; RUN:  | llvm-mc -triple x86_64-apple-darwin11 -filetype=obj -o - \
 ; RUN:  | llvm-objdump -triple x86_64-apple-darwin11 -unwind-info - \
 ; RUN:  | FileCheck -check-prefix=FROM-ASM %s
diff --git a/test/CodeGen/X86/copysign-constant-magnitude.ll b/test/CodeGen/X86/copysign-constant-magnitude.ll
index 8784efd..aea7b35 100644
--- a/test/CodeGen/X86/copysign-constant-magnitude.ll
+++ b/test/CodeGen/X86/copysign-constant-magnitude.ll
@@ -33,18 +33,17 @@
   ret double %y
 }
 
+; CHECK:        [[ONE3:L.+]]:
+; CHECK-NEXT:   .quad 4607182418800017408     ## double 1
 ; CHECK:        [[SIGNMASK3:L.+]]:
 ; CHECK-NEXT:   .quad -9223372036854775808    ## double -0
 ; CHECK-NEXT:   .quad -9223372036854775808    ## double -0
-; CHECK:        [[ONE3:L.+]]:
-; CHECK-NEXT:   .quad 4607182418800017408     ## double 1
 
 define double @mag_pos1_double(double %x) nounwind {
 ; CHECK-LABEL: mag_pos1_double:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    andps [[SIGNMASK3]](%rip), %xmm0
 ; CHECK-NEXT:    movsd [[ONE3]](%rip), %xmm1
-; CHECK-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0,0]
+; CHECK-NEXT:    andps [[SIGNMASK3]](%rip), %xmm0
 ; CHECK-NEXT:    orps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 ;
@@ -100,20 +99,19 @@
   ret float %y
 }
 
+; CHECK:        [[ONE7:L.+]]:
+; CHECK-NEXT:  .long 1065353216              ## float 1
 ; CHECK:       [[SIGNMASK7:L.+]]:
 ; CHECK-NEXT:  .long 2147483648              ## float -0
 ; CHECK-NEXT:  .long 2147483648              ## float -0
 ; CHECK-NEXT:  .long 2147483648              ## float -0
 ; CHECK-NEXT:  .long 2147483648              ## float -0
-; CHECK:        [[ONE7:L.+]]:
-; CHECK-NEXT:  .long 1065353216              ## float 1
 
 define float @mag_pos1_float(float %x) nounwind {
 ; CHECK-LABEL: mag_pos1_float:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    andps [[SIGNMASK7]](%rip), %xmm0
 ; CHECK-NEXT:    movss [[ONE7]](%rip), %xmm1
-; CHECK-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; CHECK-NEXT:    andps [[SIGNMASK7]](%rip), %xmm0
 ; CHECK-NEXT:    orps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 ;
diff --git a/test/CodeGen/X86/crash-O0.ll b/test/CodeGen/X86/crash-O0.ll
index 1a234d4..d23a996 100644
--- a/test/CodeGen/X86/crash-O0.ll
+++ b/test/CodeGen/X86/crash-O0.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -O0 -relocation-model=pic -disable-fp-elim < %s | FileCheck %s
+; RUN: llc -O0 -relocation-model=pic -frame-pointer=all < %s | FileCheck %s
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-apple-darwin10"
 
diff --git a/test/CodeGen/X86/debug-loclists.ll b/test/CodeGen/X86/debug-loclists.ll
index 874cdc1..20bc0c4 100644
--- a/test/CodeGen/X86/debug-loclists.ll
+++ b/test/CodeGen/X86/debug-loclists.ll
@@ -5,7 +5,7 @@
 ; CHECK-NEXT:               DW_AT_location [DW_FORM_sec_offset]   (0x0000000c
 ; CHECK-NEXT:                  [0x0000000000000000, 0x0000000000000004): DW_OP_breg5 RDI+0
 ; CHECK-NEXT:                  [0x0000000000000004, 0x0000000000000012): DW_OP_breg3 RBX+0)
-; CHECK-NEXT:               DW_AT_name [DW_FORM_strx1]    ( indexed (0000000e) string = "a")
+; CHECK-NEXT:               DW_AT_name [DW_FORM_strx1]    (indexed (0000000e) string = "a")
 ; CHECK-NEXT:               DW_AT_decl_file [DW_FORM_data1]       ("/home/folder{{\\|\/}}test.cc")
 ; CHECK-NEXT:               DW_AT_decl_line [DW_FORM_data1]       (6)
 ; CHECK-NEXT:               DW_AT_type [DW_FORM_ref4]     (cu + 0x0040 => {0x00000040} "A")
diff --git a/test/CodeGen/X86/divide-by-constant.ll b/test/CodeGen/X86/divide-by-constant.ll
index 9fbef11..23a3d1e 100644
--- a/test/CodeGen/X86/divide-by-constant.ll
+++ b/test/CodeGen/X86/divide-by-constant.ll
@@ -441,12 +441,12 @@
 ; X64-NEXT:    movabsq $1237940039285380275, %rcx # imm = 0x112E0BE826D694B3
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    imulq %rcx
+; X64-NEXT:    movq %rdx, %rax
 ; X64-NEXT:    movq %rdx, %rcx
 ; X64-NEXT:    shrq $63, %rcx
-; X64-NEXT:    sarq $28, %rdx
-; X64-NEXT:    leaq (%rdx,%rcx), %rax
-; X64-NEXT:    addl %ecx, %edx
-; X64-NEXT:    imull $-294967296, %edx, %ecx # imm = 0xEE6B2800
+; X64-NEXT:    sarq $28, %rax
+; X64-NEXT:    addq %rcx, %rax
+; X64-NEXT:    imull $-294967296, %eax, %ecx # imm = 0xEE6B2800
 ; X64-NEXT:    subl %ecx, %edi
 ; X64-NEXT:    movl %edi, %edx
 ; X64-NEXT:    retq
diff --git a/test/CodeGen/X86/empty-functions.ll b/test/CodeGen/X86/empty-functions.ll
index a310fb3..faded65 100644
--- a/test/CodeGen/X86/empty-functions.ll
+++ b/test/CodeGen/X86/empty-functions.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck -check-prefix=CHECK-NO-FP %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -disable-fp-elim | FileCheck -check-prefix=CHECK-FP %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -frame-pointer=all | FileCheck -check-prefix=CHECK-FP %s
 ; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck -check-prefix=LINUX-NO-FP %s
-; RUN: llc < %s -mtriple=x86_64-linux-gnu -disable-fp-elim | FileCheck -check-prefix=LINUX-FP %s
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -frame-pointer=all | FileCheck -check-prefix=LINUX-FP %s
 
 define void @func() {
 entry:
diff --git a/test/CodeGen/X86/extract-bits.ll b/test/CodeGen/X86/extract-bits.ll
index b69c0c1..13458a1 100644
--- a/test/CodeGen/X86/extract-bits.ll
+++ b/test/CodeGen/X86/extract-bits.ll
@@ -1614,6 +1614,489 @@
   ret i64 %masked
 }
 
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bextr64_32_a0:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %edi
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %dl
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NOBMI-NEXT:    movl %edi, %esi
+; X86-NOBMI-NEXT:    shrl %cl, %esi
+; X86-NOBMI-NEXT:    shrdl %cl, %edi, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB14_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %esi
+; X86-NOBMI-NEXT:  .LBB14_2:
+; X86-NOBMI-NEXT:    movl $1, %edi
+; X86-NOBMI-NEXT:    movl %edx, %ecx
+; X86-NOBMI-NEXT:    shll %cl, %edi
+; X86-NOBMI-NEXT:    xorl %eax, %eax
+; X86-NOBMI-NEXT:    testb $32, %dl
+; X86-NOBMI-NEXT:    jne .LBB14_4
+; X86-NOBMI-NEXT:  # %bb.3:
+; X86-NOBMI-NEXT:    movl %edi, %eax
+; X86-NOBMI-NEXT:  .LBB14_4:
+; X86-NOBMI-NEXT:    decl %eax
+; X86-NOBMI-NEXT:    andl %esi, %eax
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    popl %edi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1NOTBM-LABEL: bextr64_32_a0:
+; X86-BMI1NOTBM:       # %bb.0:
+; X86-BMI1NOTBM-NEXT:    pushl %edi
+; X86-BMI1NOTBM-NEXT:    pushl %esi
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %dl
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-BMI1NOTBM-NEXT:    movl %edi, %esi
+; X86-BMI1NOTBM-NEXT:    shrl %cl, %esi
+; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edi, %eax
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    jne .LBB14_2
+; X86-BMI1NOTBM-NEXT:  # %bb.1:
+; X86-BMI1NOTBM-NEXT:    movl %eax, %esi
+; X86-BMI1NOTBM-NEXT:  .LBB14_2:
+; X86-BMI1NOTBM-NEXT:    movl $1, %edi
+; X86-BMI1NOTBM-NEXT:    movl %edx, %ecx
+; X86-BMI1NOTBM-NEXT:    shll %cl, %edi
+; X86-BMI1NOTBM-NEXT:    xorl %eax, %eax
+; X86-BMI1NOTBM-NEXT:    testb $32, %dl
+; X86-BMI1NOTBM-NEXT:    jne .LBB14_4
+; X86-BMI1NOTBM-NEXT:  # %bb.3:
+; X86-BMI1NOTBM-NEXT:    movl %edi, %eax
+; X86-BMI1NOTBM-NEXT:  .LBB14_4:
+; X86-BMI1NOTBM-NEXT:    decl %eax
+; X86-BMI1NOTBM-NEXT:    andl %esi, %eax
+; X86-BMI1NOTBM-NEXT:    popl %esi
+; X86-BMI1NOTBM-NEXT:    popl %edi
+; X86-BMI1NOTBM-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bextr64_32_a0:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %ebx
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %bl
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB14_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %edx
+; X86-BMI1BMI2-NEXT:  .LBB14_2:
+; X86-BMI1BMI2-NEXT:    xorl %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %bl
+; X86-BMI1BMI2-NEXT:    jne .LBB14_4
+; X86-BMI1BMI2-NEXT:  # %bb.3:
+; X86-BMI1BMI2-NEXT:    movl $1, %eax
+; X86-BMI1BMI2-NEXT:    shlxl %ebx, %eax, %eax
+; X86-BMI1BMI2-NEXT:  .LBB14_4:
+; X86-BMI1BMI2-NEXT:    decl %eax
+; X86-BMI1BMI2-NEXT:    andl %edx, %eax
+; X86-BMI1BMI2-NEXT:    popl %ebx
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bextr64_32_a0:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq %rsi, %rcx
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NOBMI-NEXT:    shrq %cl, %rdi
+; X64-NOBMI-NEXT:    movl $1, %eax
+; X64-NOBMI-NEXT:    movl %edx, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    decl %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1NOTBM-LABEL: bextr64_32_a0:
+; X64-BMI1NOTBM:       # %bb.0:
+; X64-BMI1NOTBM-NEXT:    movq %rsi, %rcx
+; X64-BMI1NOTBM-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-BMI1NOTBM-NEXT:    shrq %cl, %rdi
+; X64-BMI1NOTBM-NEXT:    movl $1, %eax
+; X64-BMI1NOTBM-NEXT:    movl %edx, %ecx
+; X64-BMI1NOTBM-NEXT:    shlq %cl, %rax
+; X64-BMI1NOTBM-NEXT:    decl %eax
+; X64-BMI1NOTBM-NEXT:    andl %edi, %eax
+; X64-BMI1NOTBM-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-BMI1NOTBM-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bextr64_32_a0:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    shrxq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    movl $1, %ecx
+; X64-BMI1BMI2-NEXT:    shlxq %rdx, %rcx, %rcx
+; X64-BMI1BMI2-NEXT:    decl %ecx
+; X64-BMI1BMI2-NEXT:    andl %ecx, %eax
+; X64-BMI1BMI2-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-BMI1BMI2-NEXT:    retq
+  %shifted = lshr i64 %val, %numskipbits
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %shifted
+  %res = trunc i64 %masked to i32
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_a1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bextr64_32_a1:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %edi
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %dl
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NOBMI-NEXT:    movl %edi, %esi
+; X86-NOBMI-NEXT:    shrl %cl, %esi
+; X86-NOBMI-NEXT:    shrdl %cl, %edi, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB15_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %esi
+; X86-NOBMI-NEXT:  .LBB15_2:
+; X86-NOBMI-NEXT:    movl $1, %eax
+; X86-NOBMI-NEXT:    movl %edx, %ecx
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    decl %eax
+; X86-NOBMI-NEXT:    andl %esi, %eax
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    popl %edi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1NOTBM-LABEL: bextr64_32_a1:
+; X86-BMI1NOTBM:       # %bb.0:
+; X86-BMI1NOTBM-NEXT:    pushl %edi
+; X86-BMI1NOTBM-NEXT:    pushl %esi
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-BMI1NOTBM-NEXT:    movl %edi, %edx
+; X86-BMI1NOTBM-NEXT:    shrl %cl, %edx
+; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edi, %esi
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    jne .LBB15_2
+; X86-BMI1NOTBM-NEXT:  # %bb.1:
+; X86-BMI1NOTBM-NEXT:    movl %esi, %edx
+; X86-BMI1NOTBM-NEXT:  .LBB15_2:
+; X86-BMI1NOTBM-NEXT:    shll $8, %eax
+; X86-BMI1NOTBM-NEXT:    bextrl %eax, %edx, %eax
+; X86-BMI1NOTBM-NEXT:    popl %esi
+; X86-BMI1NOTBM-NEXT:    popl %edi
+; X86-BMI1NOTBM-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bextr64_32_a1:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB15_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %edx
+; X86-BMI1BMI2-NEXT:  .LBB15_2:
+; X86-BMI1BMI2-NEXT:    bzhil %eax, %edx, %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bextr64_32_a1:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq %rsi, %rcx
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NOBMI-NEXT:    shrq %cl, %rdi
+; X64-NOBMI-NEXT:    movl $1, %eax
+; X64-NOBMI-NEXT:    movl %edx, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    decl %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1NOTBM-LABEL: bextr64_32_a1:
+; X64-BMI1NOTBM:       # %bb.0:
+; X64-BMI1NOTBM-NEXT:    # kill: def $edx killed $edx def $rdx
+; X64-BMI1NOTBM-NEXT:    shlq $8, %rdx
+; X64-BMI1NOTBM-NEXT:    movzbl %sil, %eax
+; X64-BMI1NOTBM-NEXT:    orq %rdx, %rax
+; X64-BMI1NOTBM-NEXT:    bextrq %rax, %rdi, %rax
+; X64-BMI1NOTBM-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-BMI1NOTBM-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bextr64_32_a1:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    shrxq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    bzhil %edx, %eax, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %shifted = lshr i64 %val, %numskipbits
+  %truncshifted = trunc i64 %shifted to i32
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %truncshifted
+  ret i32 %masked
+}
+
+; Shifting happens in 64-bit, then truncation (with extra use).
+; Masking is 32-bit.
+define i32 @bextr64_32_a1_trunc_extrause(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bextr64_32_a1_trunc_extrause:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %ebx
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    pushl %eax
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %bl
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    movl %edx, %esi
+; X86-NOBMI-NEXT:    shrl %cl, %esi
+; X86-NOBMI-NEXT:    shrdl %cl, %edx, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB16_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %esi
+; X86-NOBMI-NEXT:  .LBB16_2:
+; X86-NOBMI-NEXT:    movl %esi, (%esp)
+; X86-NOBMI-NEXT:    calll use32
+; X86-NOBMI-NEXT:    movl $1, %eax
+; X86-NOBMI-NEXT:    movl %ebx, %ecx
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    decl %eax
+; X86-NOBMI-NEXT:    andl %esi, %eax
+; X86-NOBMI-NEXT:    addl $4, %esp
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    popl %ebx
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1NOTBM-LABEL: bextr64_32_a1_trunc_extrause:
+; X86-BMI1NOTBM:       # %bb.0:
+; X86-BMI1NOTBM-NEXT:    pushl %ebx
+; X86-BMI1NOTBM-NEXT:    pushl %esi
+; X86-BMI1NOTBM-NEXT:    pushl %eax
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %bl
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1NOTBM-NEXT:    movl %edx, %esi
+; X86-BMI1NOTBM-NEXT:    shrl %cl, %esi
+; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edx, %eax
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    jne .LBB16_2
+; X86-BMI1NOTBM-NEXT:  # %bb.1:
+; X86-BMI1NOTBM-NEXT:    movl %eax, %esi
+; X86-BMI1NOTBM-NEXT:  .LBB16_2:
+; X86-BMI1NOTBM-NEXT:    movl %esi, (%esp)
+; X86-BMI1NOTBM-NEXT:    calll use32
+; X86-BMI1NOTBM-NEXT:    shll $8, %ebx
+; X86-BMI1NOTBM-NEXT:    bextrl %ebx, %esi, %eax
+; X86-BMI1NOTBM-NEXT:    addl $4, %esp
+; X86-BMI1NOTBM-NEXT:    popl %esi
+; X86-BMI1NOTBM-NEXT:    popl %ebx
+; X86-BMI1NOTBM-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bextr64_32_a1_trunc_extrause:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %ebx
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    pushl %eax
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %bl
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %esi
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB16_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %esi
+; X86-BMI1BMI2-NEXT:  .LBB16_2:
+; X86-BMI1BMI2-NEXT:    movl %esi, (%esp)
+; X86-BMI1BMI2-NEXT:    calll use32
+; X86-BMI1BMI2-NEXT:    bzhil %ebx, %esi, %eax
+; X86-BMI1BMI2-NEXT:    addl $4, %esp
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    popl %ebx
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bextr64_32_a1_trunc_extrause:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    pushq %rbp
+; X64-NOBMI-NEXT:    pushq %rbx
+; X64-NOBMI-NEXT:    pushq %rax
+; X64-NOBMI-NEXT:    movl %edx, %ebp
+; X64-NOBMI-NEXT:    movq %rsi, %rcx
+; X64-NOBMI-NEXT:    movq %rdi, %rbx
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NOBMI-NEXT:    shrq %cl, %rbx
+; X64-NOBMI-NEXT:    movl %ebx, %edi
+; X64-NOBMI-NEXT:    callq use32
+; X64-NOBMI-NEXT:    movl $1, %eax
+; X64-NOBMI-NEXT:    movl %ebp, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    decl %eax
+; X64-NOBMI-NEXT:    andl %ebx, %eax
+; X64-NOBMI-NEXT:    addq $8, %rsp
+; X64-NOBMI-NEXT:    popq %rbx
+; X64-NOBMI-NEXT:    popq %rbp
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1NOTBM-LABEL: bextr64_32_a1_trunc_extrause:
+; X64-BMI1NOTBM:       # %bb.0:
+; X64-BMI1NOTBM-NEXT:    pushq %rbp
+; X64-BMI1NOTBM-NEXT:    pushq %rbx
+; X64-BMI1NOTBM-NEXT:    pushq %rax
+; X64-BMI1NOTBM-NEXT:    movl %edx, %ebp
+; X64-BMI1NOTBM-NEXT:    movq %rsi, %rcx
+; X64-BMI1NOTBM-NEXT:    movq %rdi, %rbx
+; X64-BMI1NOTBM-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-BMI1NOTBM-NEXT:    shrq %cl, %rbx
+; X64-BMI1NOTBM-NEXT:    movl %ebx, %edi
+; X64-BMI1NOTBM-NEXT:    callq use32
+; X64-BMI1NOTBM-NEXT:    shll $8, %ebp
+; X64-BMI1NOTBM-NEXT:    bextrl %ebp, %ebx, %eax
+; X64-BMI1NOTBM-NEXT:    addq $8, %rsp
+; X64-BMI1NOTBM-NEXT:    popq %rbx
+; X64-BMI1NOTBM-NEXT:    popq %rbp
+; X64-BMI1NOTBM-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bextr64_32_a1_trunc_extrause:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    pushq %rbp
+; X64-BMI1BMI2-NEXT:    pushq %rbx
+; X64-BMI1BMI2-NEXT:    pushq %rax
+; X64-BMI1BMI2-NEXT:    movl %edx, %ebp
+; X64-BMI1BMI2-NEXT:    shrxq %rsi, %rdi, %rbx
+; X64-BMI1BMI2-NEXT:    movl %ebx, %edi
+; X64-BMI1BMI2-NEXT:    callq use32
+; X64-BMI1BMI2-NEXT:    bzhil %ebp, %ebx, %eax
+; X64-BMI1BMI2-NEXT:    addq $8, %rsp
+; X64-BMI1BMI2-NEXT:    popq %rbx
+; X64-BMI1BMI2-NEXT:    popq %rbp
+; X64-BMI1BMI2-NEXT:    retq
+  %shifted = lshr i64 %val, %numskipbits
+  %truncshifted = trunc i64 %shifted to i32
+  call void @use32(i32 %truncshifted)
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %truncshifted
+  ret i32 %masked
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_a2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bextr64_32_a2:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %edi
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %dl
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NOBMI-NEXT:    movl %edi, %esi
+; X86-NOBMI-NEXT:    shrl %cl, %esi
+; X86-NOBMI-NEXT:    shrdl %cl, %edi, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB17_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %esi
+; X86-NOBMI-NEXT:  .LBB17_2:
+; X86-NOBMI-NEXT:    movl $1, %eax
+; X86-NOBMI-NEXT:    movl %edx, %ecx
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    decl %eax
+; X86-NOBMI-NEXT:    andl %esi, %eax
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    popl %edi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1NOTBM-LABEL: bextr64_32_a2:
+; X86-BMI1NOTBM:       # %bb.0:
+; X86-BMI1NOTBM-NEXT:    pushl %edi
+; X86-BMI1NOTBM-NEXT:    pushl %esi
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-BMI1NOTBM-NEXT:    movl %edi, %edx
+; X86-BMI1NOTBM-NEXT:    shrl %cl, %edx
+; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edi, %esi
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    jne .LBB17_2
+; X86-BMI1NOTBM-NEXT:  # %bb.1:
+; X86-BMI1NOTBM-NEXT:    movl %esi, %edx
+; X86-BMI1NOTBM-NEXT:  .LBB17_2:
+; X86-BMI1NOTBM-NEXT:    shll $8, %eax
+; X86-BMI1NOTBM-NEXT:    bextrl %eax, %edx, %eax
+; X86-BMI1NOTBM-NEXT:    popl %esi
+; X86-BMI1NOTBM-NEXT:    popl %edi
+; X86-BMI1NOTBM-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bextr64_32_a2:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB17_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %edx
+; X86-BMI1BMI2-NEXT:  .LBB17_2:
+; X86-BMI1BMI2-NEXT:    bzhil %eax, %edx, %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bextr64_32_a2:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq %rsi, %rcx
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NOBMI-NEXT:    shrq %cl, %rdi
+; X64-NOBMI-NEXT:    movl $1, %eax
+; X64-NOBMI-NEXT:    movl %edx, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    decl %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1NOTBM-LABEL: bextr64_32_a2:
+; X64-BMI1NOTBM:       # %bb.0:
+; X64-BMI1NOTBM-NEXT:    # kill: def $edx killed $edx def $rdx
+; X64-BMI1NOTBM-NEXT:    shlq $8, %rdx
+; X64-BMI1NOTBM-NEXT:    movzbl %sil, %eax
+; X64-BMI1NOTBM-NEXT:    orq %rdx, %rax
+; X64-BMI1NOTBM-NEXT:    bextrq %rax, %rdi, %rax
+; X64-BMI1NOTBM-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-BMI1NOTBM-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bextr64_32_a2:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    shrxq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    bzhil %edx, %eax, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %shifted = lshr i64 %val, %numskipbits
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %zextmask = zext i32 %mask to i64
+  %masked = and i64 %zextmask, %shifted
+  %truncmasked = trunc i64 %masked to i32
+  ret i32 %truncmasked
+}
+
 ; ---------------------------------------------------------------------------- ;
 ; Pattern b. 32-bit
 ; ---------------------------------------------------------------------------- ;
@@ -2075,22 +2558,22 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %edi
 ; X86-NOBMI-NEXT:    shrdl %cl, %eax, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB20_2
+; X86-NOBMI-NEXT:    je .LBB24_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    xorl %edi, %edi
-; X86-NOBMI-NEXT:  .LBB20_2:
+; X86-NOBMI-NEXT:  .LBB24_2:
 ; X86-NOBMI-NEXT:    movl $-1, %edx
 ; X86-NOBMI-NEXT:    movl $-1, %eax
 ; X86-NOBMI-NEXT:    movb %ch, %cl
 ; X86-NOBMI-NEXT:    shll %cl, %eax
 ; X86-NOBMI-NEXT:    shldl %cl, %edx, %edx
 ; X86-NOBMI-NEXT:    testb $32, %ch
-; X86-NOBMI-NEXT:    je .LBB20_4
+; X86-NOBMI-NEXT:    je .LBB24_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %eax, %edx
 ; X86-NOBMI-NEXT:    xorl %eax, %eax
-; X86-NOBMI-NEXT:  .LBB20_4:
+; X86-NOBMI-NEXT:  .LBB24_4:
 ; X86-NOBMI-NEXT:    notl %edx
 ; X86-NOBMI-NEXT:    andl %edi, %edx
 ; X86-NOBMI-NEXT:    notl %eax
@@ -2112,22 +2595,22 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %edx
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB20_2
+; X86-BMI1NOTBM-NEXT:    je .LBB24_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %edx, %esi
 ; X86-BMI1NOTBM-NEXT:    xorl %edx, %edx
-; X86-BMI1NOTBM-NEXT:  .LBB20_2:
+; X86-BMI1NOTBM-NEXT:  .LBB24_2:
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %edi
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %ebx
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %ecx
 ; X86-BMI1NOTBM-NEXT:    shll %cl, %ebx
 ; X86-BMI1NOTBM-NEXT:    shldl %cl, %edi, %edi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %al
-; X86-BMI1NOTBM-NEXT:    je .LBB20_4
+; X86-BMI1NOTBM-NEXT:    je .LBB24_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %edi
 ; X86-BMI1NOTBM-NEXT:    xorl %ebx, %ebx
-; X86-BMI1NOTBM-NEXT:  .LBB20_4:
+; X86-BMI1NOTBM-NEXT:  .LBB24_4:
 ; X86-BMI1NOTBM-NEXT:    andnl %edx, %edi, %edx
 ; X86-BMI1NOTBM-NEXT:    andnl %esi, %ebx, %eax
 ; X86-BMI1NOTBM-NEXT:    popl %esi
@@ -2147,21 +2630,21 @@
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %edx, %esi
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %edx, %edx
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB20_2
+; X86-BMI1BMI2-NEXT:    je .LBB24_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %edx, %esi
 ; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
-; X86-BMI1BMI2-NEXT:  .LBB20_2:
+; X86-BMI1BMI2-NEXT:  .LBB24_2:
 ; X86-BMI1BMI2-NEXT:    movl $-1, %edi
 ; X86-BMI1BMI2-NEXT:    shlxl %eax, %edi, %ebx
 ; X86-BMI1BMI2-NEXT:    movl %eax, %ecx
 ; X86-BMI1BMI2-NEXT:    shldl %cl, %edi, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %al
-; X86-BMI1BMI2-NEXT:    je .LBB20_4
+; X86-BMI1BMI2-NEXT:    je .LBB24_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %ebx, %edi
 ; X86-BMI1BMI2-NEXT:    xorl %ebx, %ebx
-; X86-BMI1BMI2-NEXT:  .LBB20_4:
+; X86-BMI1BMI2-NEXT:  .LBB24_4:
 ; X86-BMI1BMI2-NEXT:    andnl %edx, %edi, %edx
 ; X86-BMI1BMI2-NEXT:    andnl %esi, %ebx, %eax
 ; X86-BMI1BMI2-NEXT:    popl %esi
@@ -2214,22 +2697,22 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %edi
 ; X86-NOBMI-NEXT:    shrdl %cl, %eax, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB21_2
+; X86-NOBMI-NEXT:    je .LBB25_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    xorl %edi, %edi
-; X86-NOBMI-NEXT:  .LBB21_2:
+; X86-NOBMI-NEXT:  .LBB25_2:
 ; X86-NOBMI-NEXT:    movl $-1, %edx
 ; X86-NOBMI-NEXT:    movl $-1, %eax
 ; X86-NOBMI-NEXT:    movb %ch, %cl
 ; X86-NOBMI-NEXT:    shll %cl, %eax
 ; X86-NOBMI-NEXT:    shldl %cl, %edx, %edx
 ; X86-NOBMI-NEXT:    testb $32, %ch
-; X86-NOBMI-NEXT:    je .LBB21_4
+; X86-NOBMI-NEXT:    je .LBB25_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %eax, %edx
 ; X86-NOBMI-NEXT:    xorl %eax, %eax
-; X86-NOBMI-NEXT:  .LBB21_4:
+; X86-NOBMI-NEXT:  .LBB25_4:
 ; X86-NOBMI-NEXT:    notl %edx
 ; X86-NOBMI-NEXT:    andl %edi, %edx
 ; X86-NOBMI-NEXT:    notl %eax
@@ -2251,22 +2734,22 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %edx
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB21_2
+; X86-BMI1NOTBM-NEXT:    je .LBB25_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %edx, %esi
 ; X86-BMI1NOTBM-NEXT:    xorl %edx, %edx
-; X86-BMI1NOTBM-NEXT:  .LBB21_2:
+; X86-BMI1NOTBM-NEXT:  .LBB25_2:
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %edi
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %ebx
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %ecx
 ; X86-BMI1NOTBM-NEXT:    shll %cl, %ebx
 ; X86-BMI1NOTBM-NEXT:    shldl %cl, %edi, %edi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %al
-; X86-BMI1NOTBM-NEXT:    je .LBB21_4
+; X86-BMI1NOTBM-NEXT:    je .LBB25_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %edi
 ; X86-BMI1NOTBM-NEXT:    xorl %ebx, %ebx
-; X86-BMI1NOTBM-NEXT:  .LBB21_4:
+; X86-BMI1NOTBM-NEXT:  .LBB25_4:
 ; X86-BMI1NOTBM-NEXT:    andnl %edx, %edi, %edx
 ; X86-BMI1NOTBM-NEXT:    andnl %esi, %ebx, %eax
 ; X86-BMI1NOTBM-NEXT:    popl %esi
@@ -2286,21 +2769,21 @@
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %edx, %esi
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %edx, %edx
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB21_2
+; X86-BMI1BMI2-NEXT:    je .LBB25_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %edx, %esi
 ; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
-; X86-BMI1BMI2-NEXT:  .LBB21_2:
+; X86-BMI1BMI2-NEXT:  .LBB25_2:
 ; X86-BMI1BMI2-NEXT:    movl $-1, %edi
 ; X86-BMI1BMI2-NEXT:    shlxl %eax, %edi, %ebx
 ; X86-BMI1BMI2-NEXT:    movl %eax, %ecx
 ; X86-BMI1BMI2-NEXT:    shldl %cl, %edi, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %al
-; X86-BMI1BMI2-NEXT:    je .LBB21_4
+; X86-BMI1BMI2-NEXT:    je .LBB25_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %ebx, %edi
 ; X86-BMI1BMI2-NEXT:    xorl %ebx, %ebx
-; X86-BMI1BMI2-NEXT:  .LBB21_4:
+; X86-BMI1BMI2-NEXT:  .LBB25_4:
 ; X86-BMI1BMI2-NEXT:    andnl %edx, %edi, %edx
 ; X86-BMI1BMI2-NEXT:    andnl %esi, %ebx, %eax
 ; X86-BMI1BMI2-NEXT:    popl %esi
@@ -2359,22 +2842,22 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %edi
 ; X86-NOBMI-NEXT:    shrdl %cl, %eax, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB22_2
+; X86-NOBMI-NEXT:    je .LBB26_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    xorl %edi, %edi
-; X86-NOBMI-NEXT:  .LBB22_2:
+; X86-NOBMI-NEXT:  .LBB26_2:
 ; X86-NOBMI-NEXT:    movl $-1, %edx
 ; X86-NOBMI-NEXT:    movl $-1, %eax
 ; X86-NOBMI-NEXT:    movb %ch, %cl
 ; X86-NOBMI-NEXT:    shll %cl, %eax
 ; X86-NOBMI-NEXT:    shldl %cl, %edx, %edx
 ; X86-NOBMI-NEXT:    testb $32, %ch
-; X86-NOBMI-NEXT:    je .LBB22_4
+; X86-NOBMI-NEXT:    je .LBB26_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %eax, %edx
 ; X86-NOBMI-NEXT:    xorl %eax, %eax
-; X86-NOBMI-NEXT:  .LBB22_4:
+; X86-NOBMI-NEXT:  .LBB26_4:
 ; X86-NOBMI-NEXT:    notl %edx
 ; X86-NOBMI-NEXT:    andl %edi, %edx
 ; X86-NOBMI-NEXT:    notl %eax
@@ -2397,22 +2880,22 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %edx
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB22_2
+; X86-BMI1NOTBM-NEXT:    je .LBB26_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %edx, %esi
 ; X86-BMI1NOTBM-NEXT:    xorl %edx, %edx
-; X86-BMI1NOTBM-NEXT:  .LBB22_2:
+; X86-BMI1NOTBM-NEXT:  .LBB26_2:
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %edi
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %ebx
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %ecx
 ; X86-BMI1NOTBM-NEXT:    shll %cl, %ebx
 ; X86-BMI1NOTBM-NEXT:    shldl %cl, %edi, %edi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %al
-; X86-BMI1NOTBM-NEXT:    je .LBB22_4
+; X86-BMI1NOTBM-NEXT:    je .LBB26_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %edi
 ; X86-BMI1NOTBM-NEXT:    xorl %ebx, %ebx
-; X86-BMI1NOTBM-NEXT:  .LBB22_4:
+; X86-BMI1NOTBM-NEXT:  .LBB26_4:
 ; X86-BMI1NOTBM-NEXT:    andnl %edx, %edi, %edx
 ; X86-BMI1NOTBM-NEXT:    andnl %esi, %ebx, %eax
 ; X86-BMI1NOTBM-NEXT:    popl %esi
@@ -2433,21 +2916,21 @@
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %edi, %edx
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %edi, %esi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB22_2
+; X86-BMI1BMI2-NEXT:    je .LBB26_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %edx, %esi
 ; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
-; X86-BMI1BMI2-NEXT:  .LBB22_2:
+; X86-BMI1BMI2-NEXT:  .LBB26_2:
 ; X86-BMI1BMI2-NEXT:    movl $-1, %edi
 ; X86-BMI1BMI2-NEXT:    shlxl %eax, %edi, %ebx
 ; X86-BMI1BMI2-NEXT:    movl %eax, %ecx
 ; X86-BMI1BMI2-NEXT:    shldl %cl, %edi, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %al
-; X86-BMI1BMI2-NEXT:    je .LBB22_4
+; X86-BMI1BMI2-NEXT:    je .LBB26_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %ebx, %edi
 ; X86-BMI1BMI2-NEXT:    xorl %ebx, %ebx
-; X86-BMI1BMI2-NEXT:  .LBB22_4:
+; X86-BMI1BMI2-NEXT:  .LBB26_4:
 ; X86-BMI1BMI2-NEXT:    andnl %edx, %edi, %edx
 ; X86-BMI1BMI2-NEXT:    andnl %esi, %ebx, %eax
 ; X86-BMI1BMI2-NEXT:    popl %esi
@@ -2503,22 +2986,22 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %edi
 ; X86-NOBMI-NEXT:    shrdl %cl, %eax, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB23_2
+; X86-NOBMI-NEXT:    je .LBB27_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    xorl %edi, %edi
-; X86-NOBMI-NEXT:  .LBB23_2:
+; X86-NOBMI-NEXT:  .LBB27_2:
 ; X86-NOBMI-NEXT:    movl $-1, %edx
 ; X86-NOBMI-NEXT:    movl $-1, %eax
 ; X86-NOBMI-NEXT:    movb %ch, %cl
 ; X86-NOBMI-NEXT:    shll %cl, %eax
 ; X86-NOBMI-NEXT:    shldl %cl, %edx, %edx
 ; X86-NOBMI-NEXT:    testb $32, %ch
-; X86-NOBMI-NEXT:    je .LBB23_4
+; X86-NOBMI-NEXT:    je .LBB27_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %eax, %edx
 ; X86-NOBMI-NEXT:    xorl %eax, %eax
-; X86-NOBMI-NEXT:  .LBB23_4:
+; X86-NOBMI-NEXT:  .LBB27_4:
 ; X86-NOBMI-NEXT:    notl %edx
 ; X86-NOBMI-NEXT:    andl %edi, %edx
 ; X86-NOBMI-NEXT:    notl %eax
@@ -2541,22 +3024,22 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %edx
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB23_2
+; X86-BMI1NOTBM-NEXT:    je .LBB27_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %edx, %esi
 ; X86-BMI1NOTBM-NEXT:    xorl %edx, %edx
-; X86-BMI1NOTBM-NEXT:  .LBB23_2:
+; X86-BMI1NOTBM-NEXT:  .LBB27_2:
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %edi
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %ebx
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %ecx
 ; X86-BMI1NOTBM-NEXT:    shll %cl, %ebx
 ; X86-BMI1NOTBM-NEXT:    shldl %cl, %edi, %edi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %al
-; X86-BMI1NOTBM-NEXT:    je .LBB23_4
+; X86-BMI1NOTBM-NEXT:    je .LBB27_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %edi
 ; X86-BMI1NOTBM-NEXT:    xorl %ebx, %ebx
-; X86-BMI1NOTBM-NEXT:  .LBB23_4:
+; X86-BMI1NOTBM-NEXT:  .LBB27_4:
 ; X86-BMI1NOTBM-NEXT:    andnl %edx, %edi, %edx
 ; X86-BMI1NOTBM-NEXT:    andnl %esi, %ebx, %eax
 ; X86-BMI1NOTBM-NEXT:    popl %esi
@@ -2577,21 +3060,21 @@
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %edi, %edx
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %edi, %esi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB23_2
+; X86-BMI1BMI2-NEXT:    je .LBB27_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %edx, %esi
 ; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
-; X86-BMI1BMI2-NEXT:  .LBB23_2:
+; X86-BMI1BMI2-NEXT:  .LBB27_2:
 ; X86-BMI1BMI2-NEXT:    movl $-1, %edi
 ; X86-BMI1BMI2-NEXT:    shlxl %eax, %edi, %ebx
 ; X86-BMI1BMI2-NEXT:    movl %eax, %ecx
 ; X86-BMI1BMI2-NEXT:    shldl %cl, %edi, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %al
-; X86-BMI1BMI2-NEXT:    je .LBB23_4
+; X86-BMI1BMI2-NEXT:    je .LBB27_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %ebx, %edi
 ; X86-BMI1BMI2-NEXT:    xorl %ebx, %ebx
-; X86-BMI1BMI2-NEXT:  .LBB23_4:
+; X86-BMI1BMI2-NEXT:  .LBB27_4:
 ; X86-BMI1BMI2-NEXT:    andnl %edx, %edi, %edx
 ; X86-BMI1BMI2-NEXT:    andnl %esi, %ebx, %eax
 ; X86-BMI1BMI2-NEXT:    popl %esi
@@ -2651,22 +3134,22 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %edx
 ; X86-NOBMI-NEXT:    shrdl %cl, %esi, %eax
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB24_2
+; X86-NOBMI-NEXT:    je .LBB28_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %edx, %eax
 ; X86-NOBMI-NEXT:    xorl %edx, %edx
-; X86-NOBMI-NEXT:  .LBB24_2:
+; X86-NOBMI-NEXT:  .LBB28_2:
 ; X86-NOBMI-NEXT:    movl $-1, %edi
 ; X86-NOBMI-NEXT:    movl $-1, %esi
 ; X86-NOBMI-NEXT:    movb %ch, %cl
 ; X86-NOBMI-NEXT:    shll %cl, %esi
 ; X86-NOBMI-NEXT:    shldl %cl, %edi, %edi
 ; X86-NOBMI-NEXT:    testb $32, %ch
-; X86-NOBMI-NEXT:    je .LBB24_4
+; X86-NOBMI-NEXT:    je .LBB28_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %esi, %edi
 ; X86-NOBMI-NEXT:    xorl %esi, %esi
-; X86-NOBMI-NEXT:  .LBB24_4:
+; X86-NOBMI-NEXT:  .LBB28_4:
 ; X86-NOBMI-NEXT:    notl %edi
 ; X86-NOBMI-NEXT:    andl %edi, %edx
 ; X86-NOBMI-NEXT:    notl %esi
@@ -2688,22 +3171,22 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %edx
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB24_2
+; X86-BMI1NOTBM-NEXT:    je .LBB28_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %edx, %esi
 ; X86-BMI1NOTBM-NEXT:    xorl %edx, %edx
-; X86-BMI1NOTBM-NEXT:  .LBB24_2:
+; X86-BMI1NOTBM-NEXT:  .LBB28_2:
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %edi
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %ebx
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %ecx
 ; X86-BMI1NOTBM-NEXT:    shll %cl, %ebx
 ; X86-BMI1NOTBM-NEXT:    shldl %cl, %edi, %edi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %al
-; X86-BMI1NOTBM-NEXT:    je .LBB24_4
+; X86-BMI1NOTBM-NEXT:    je .LBB28_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %edi
 ; X86-BMI1NOTBM-NEXT:    xorl %ebx, %ebx
-; X86-BMI1NOTBM-NEXT:  .LBB24_4:
+; X86-BMI1NOTBM-NEXT:  .LBB28_4:
 ; X86-BMI1NOTBM-NEXT:    andnl %edx, %edi, %edx
 ; X86-BMI1NOTBM-NEXT:    andnl %esi, %ebx, %eax
 ; X86-BMI1NOTBM-NEXT:    popl %esi
@@ -2723,21 +3206,21 @@
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %edx, %esi
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %edx, %edx
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB24_2
+; X86-BMI1BMI2-NEXT:    je .LBB28_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %edx, %esi
 ; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
-; X86-BMI1BMI2-NEXT:  .LBB24_2:
+; X86-BMI1BMI2-NEXT:  .LBB28_2:
 ; X86-BMI1BMI2-NEXT:    movl $-1, %edi
 ; X86-BMI1BMI2-NEXT:    shlxl %eax, %edi, %ebx
 ; X86-BMI1BMI2-NEXT:    movl %eax, %ecx
 ; X86-BMI1BMI2-NEXT:    shldl %cl, %edi, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %al
-; X86-BMI1BMI2-NEXT:    je .LBB24_4
+; X86-BMI1BMI2-NEXT:    je .LBB28_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %ebx, %edi
 ; X86-BMI1BMI2-NEXT:    xorl %ebx, %ebx
-; X86-BMI1BMI2-NEXT:  .LBB24_4:
+; X86-BMI1BMI2-NEXT:  .LBB28_4:
 ; X86-BMI1BMI2-NEXT:    andnl %edx, %edi, %edx
 ; X86-BMI1BMI2-NEXT:    andnl %esi, %ebx, %eax
 ; X86-BMI1BMI2-NEXT:    popl %esi
@@ -2794,22 +3277,22 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %ebp
 ; X86-NOBMI-NEXT:    shrdl %cl, %esi, %ebx
 ; X86-NOBMI-NEXT:    testb $32, %al
-; X86-NOBMI-NEXT:    je .LBB25_2
+; X86-NOBMI-NEXT:    je .LBB29_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %ebp, %ebx
 ; X86-NOBMI-NEXT:    xorl %ebp, %ebp
-; X86-NOBMI-NEXT:  .LBB25_2:
+; X86-NOBMI-NEXT:  .LBB29_2:
 ; X86-NOBMI-NEXT:    movl $-1, %esi
 ; X86-NOBMI-NEXT:    movl $-1, %edi
 ; X86-NOBMI-NEXT:    movl %edx, %ecx
 ; X86-NOBMI-NEXT:    shll %cl, %edi
 ; X86-NOBMI-NEXT:    shldl %cl, %esi, %esi
 ; X86-NOBMI-NEXT:    testb $32, %dl
-; X86-NOBMI-NEXT:    je .LBB25_4
+; X86-NOBMI-NEXT:    je .LBB29_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    xorl %edi, %edi
-; X86-NOBMI-NEXT:  .LBB25_4:
+; X86-NOBMI-NEXT:  .LBB29_4:
 ; X86-NOBMI-NEXT:    notl %esi
 ; X86-NOBMI-NEXT:    andl %ebp, %esi
 ; X86-NOBMI-NEXT:    notl %edi
@@ -2844,22 +3327,22 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %esi
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %ebx, %edi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %al
-; X86-BMI1NOTBM-NEXT:    je .LBB25_2
+; X86-BMI1NOTBM-NEXT:    je .LBB29_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %esi, %edi
 ; X86-BMI1NOTBM-NEXT:    xorl %esi, %esi
-; X86-BMI1NOTBM-NEXT:  .LBB25_2:
+; X86-BMI1NOTBM-NEXT:  .LBB29_2:
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %ebx
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %ebp
 ; X86-BMI1NOTBM-NEXT:    movl %edx, %ecx
 ; X86-BMI1NOTBM-NEXT:    shll %cl, %ebp
 ; X86-BMI1NOTBM-NEXT:    shldl %cl, %ebx, %ebx
 ; X86-BMI1NOTBM-NEXT:    testb $32, %dl
-; X86-BMI1NOTBM-NEXT:    je .LBB25_4
+; X86-BMI1NOTBM-NEXT:    je .LBB29_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %ebp, %ebx
 ; X86-BMI1NOTBM-NEXT:    xorl %ebp, %ebp
-; X86-BMI1NOTBM-NEXT:  .LBB25_4:
+; X86-BMI1NOTBM-NEXT:  .LBB29_4:
 ; X86-BMI1NOTBM-NEXT:    andnl %esi, %ebx, %esi
 ; X86-BMI1NOTBM-NEXT:    andnl %edi, %ebp, %edi
 ; X86-BMI1NOTBM-NEXT:    subl $8, %esp
@@ -2891,21 +3374,21 @@
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edi
 ; X86-BMI1BMI2-NEXT:    shrxl %eax, %esi, %esi
 ; X86-BMI1BMI2-NEXT:    testb $32, %al
-; X86-BMI1BMI2-NEXT:    je .LBB25_2
+; X86-BMI1BMI2-NEXT:    je .LBB29_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %esi, %edi
 ; X86-BMI1BMI2-NEXT:    xorl %esi, %esi
-; X86-BMI1BMI2-NEXT:  .LBB25_2:
+; X86-BMI1BMI2-NEXT:  .LBB29_2:
 ; X86-BMI1BMI2-NEXT:    movl $-1, %ebp
 ; X86-BMI1BMI2-NEXT:    shlxl %edx, %ebp, %ebx
 ; X86-BMI1BMI2-NEXT:    movl %edx, %ecx
 ; X86-BMI1BMI2-NEXT:    shldl %cl, %ebp, %ebp
 ; X86-BMI1BMI2-NEXT:    testb $32, %dl
-; X86-BMI1BMI2-NEXT:    je .LBB25_4
+; X86-BMI1BMI2-NEXT:    je .LBB29_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %ebx, %ebp
 ; X86-BMI1BMI2-NEXT:    xorl %ebx, %ebx
-; X86-BMI1BMI2-NEXT:  .LBB25_4:
+; X86-BMI1BMI2-NEXT:  .LBB29_4:
 ; X86-BMI1BMI2-NEXT:    andnl %esi, %ebp, %esi
 ; X86-BMI1BMI2-NEXT:    andnl %edi, %ebx, %edi
 ; X86-BMI1BMI2-NEXT:    subl $8, %esp
@@ -2969,6 +3452,342 @@
   ret i64 %masked
 }
 
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_b0(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bextr64_32_b0:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %edi
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %dl
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NOBMI-NEXT:    movl %edi, %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    shrdl %cl, %edi, %esi
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB30_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %esi, %eax
+; X86-NOBMI-NEXT:  .LBB30_2:
+; X86-NOBMI-NEXT:    movl $-1, %esi
+; X86-NOBMI-NEXT:    movl %edx, %ecx
+; X86-NOBMI-NEXT:    shll %cl, %esi
+; X86-NOBMI-NEXT:    xorl %ecx, %ecx
+; X86-NOBMI-NEXT:    testb $32, %dl
+; X86-NOBMI-NEXT:    jne .LBB30_4
+; X86-NOBMI-NEXT:  # %bb.3:
+; X86-NOBMI-NEXT:    movl %esi, %ecx
+; X86-NOBMI-NEXT:  .LBB30_4:
+; X86-NOBMI-NEXT:    notl %ecx
+; X86-NOBMI-NEXT:    andl %ecx, %eax
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    popl %edi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1NOTBM-LABEL: bextr64_32_b0:
+; X86-BMI1NOTBM:       # %bb.0:
+; X86-BMI1NOTBM-NEXT:    pushl %edi
+; X86-BMI1NOTBM-NEXT:    pushl %esi
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-BMI1NOTBM-NEXT:    movl %edi, %edx
+; X86-BMI1NOTBM-NEXT:    shrl %cl, %edx
+; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edi, %esi
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    jne .LBB30_2
+; X86-BMI1NOTBM-NEXT:  # %bb.1:
+; X86-BMI1NOTBM-NEXT:    movl %esi, %edx
+; X86-BMI1NOTBM-NEXT:  .LBB30_2:
+; X86-BMI1NOTBM-NEXT:    movl $-1, %esi
+; X86-BMI1NOTBM-NEXT:    movl %eax, %ecx
+; X86-BMI1NOTBM-NEXT:    shll %cl, %esi
+; X86-BMI1NOTBM-NEXT:    xorl %ecx, %ecx
+; X86-BMI1NOTBM-NEXT:    testb $32, %al
+; X86-BMI1NOTBM-NEXT:    jne .LBB30_4
+; X86-BMI1NOTBM-NEXT:  # %bb.3:
+; X86-BMI1NOTBM-NEXT:    movl %esi, %ecx
+; X86-BMI1NOTBM-NEXT:  .LBB30_4:
+; X86-BMI1NOTBM-NEXT:    andnl %edx, %ecx, %eax
+; X86-BMI1NOTBM-NEXT:    popl %esi
+; X86-BMI1NOTBM-NEXT:    popl %edi
+; X86-BMI1NOTBM-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bextr64_32_b0:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB30_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %edx
+; X86-BMI1BMI2-NEXT:  .LBB30_2:
+; X86-BMI1BMI2-NEXT:    xorl %ecx, %ecx
+; X86-BMI1BMI2-NEXT:    testb $32, %al
+; X86-BMI1BMI2-NEXT:    jne .LBB30_4
+; X86-BMI1BMI2-NEXT:  # %bb.3:
+; X86-BMI1BMI2-NEXT:    movl $-1, %ecx
+; X86-BMI1BMI2-NEXT:    shlxl %eax, %ecx, %ecx
+; X86-BMI1BMI2-NEXT:  .LBB30_4:
+; X86-BMI1BMI2-NEXT:    andnl %edx, %ecx, %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bextr64_32_b0:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq %rsi, %rcx
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NOBMI-NEXT:    shrq %cl, %rdi
+; X64-NOBMI-NEXT:    movq $-1, %rax
+; X64-NOBMI-NEXT:    movl %edx, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    notl %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1NOTBM-LABEL: bextr64_32_b0:
+; X64-BMI1NOTBM:       # %bb.0:
+; X64-BMI1NOTBM-NEXT:    movq %rsi, %rcx
+; X64-BMI1NOTBM-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-BMI1NOTBM-NEXT:    shrq %cl, %rdi
+; X64-BMI1NOTBM-NEXT:    movq $-1, %rax
+; X64-BMI1NOTBM-NEXT:    movl %edx, %ecx
+; X64-BMI1NOTBM-NEXT:    shlq %cl, %rax
+; X64-BMI1NOTBM-NEXT:    andnl %edi, %eax, %eax
+; X64-BMI1NOTBM-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bextr64_32_b0:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    # kill: def $edx killed $edx def $rdx
+; X64-BMI1BMI2-NEXT:    shrxq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    movq $-1, %rcx
+; X64-BMI1BMI2-NEXT:    shlxq %rdx, %rcx, %rcx
+; X64-BMI1BMI2-NEXT:    andnl %eax, %ecx, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %shiftedval = lshr i64 %val, %numskipbits
+  %widenumlowbits = zext i8 %numlowbits to i64
+  %notmask = shl nsw i64 -1, %widenumlowbits
+  %mask = xor i64 %notmask, -1
+  %wideres = and i64 %shiftedval, %mask
+  %res = trunc i64 %wideres to i32
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_b1(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bextr64_32_b1:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %edi
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %dl
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NOBMI-NEXT:    movl %edi, %esi
+; X86-NOBMI-NEXT:    shrl %cl, %esi
+; X86-NOBMI-NEXT:    shrdl %cl, %edi, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB31_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %esi
+; X86-NOBMI-NEXT:  .LBB31_2:
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    movl %edx, %ecx
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    notl %eax
+; X86-NOBMI-NEXT:    andl %esi, %eax
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    popl %edi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1NOTBM-LABEL: bextr64_32_b1:
+; X86-BMI1NOTBM:       # %bb.0:
+; X86-BMI1NOTBM-NEXT:    pushl %edi
+; X86-BMI1NOTBM-NEXT:    pushl %esi
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-BMI1NOTBM-NEXT:    movl %edi, %edx
+; X86-BMI1NOTBM-NEXT:    shrl %cl, %edx
+; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edi, %esi
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    jne .LBB31_2
+; X86-BMI1NOTBM-NEXT:  # %bb.1:
+; X86-BMI1NOTBM-NEXT:    movl %esi, %edx
+; X86-BMI1NOTBM-NEXT:  .LBB31_2:
+; X86-BMI1NOTBM-NEXT:    shll $8, %eax
+; X86-BMI1NOTBM-NEXT:    bextrl %eax, %edx, %eax
+; X86-BMI1NOTBM-NEXT:    popl %esi
+; X86-BMI1NOTBM-NEXT:    popl %edi
+; X86-BMI1NOTBM-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bextr64_32_b1:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB31_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %edx
+; X86-BMI1BMI2-NEXT:  .LBB31_2:
+; X86-BMI1BMI2-NEXT:    bzhil %eax, %edx, %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bextr64_32_b1:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq %rsi, %rcx
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NOBMI-NEXT:    shrq %cl, %rdi
+; X64-NOBMI-NEXT:    movl $-1, %eax
+; X64-NOBMI-NEXT:    movl %edx, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    notl %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1NOTBM-LABEL: bextr64_32_b1:
+; X64-BMI1NOTBM:       # %bb.0:
+; X64-BMI1NOTBM-NEXT:    # kill: def $edx killed $edx def $rdx
+; X64-BMI1NOTBM-NEXT:    shlq $8, %rdx
+; X64-BMI1NOTBM-NEXT:    movzbl %sil, %eax
+; X64-BMI1NOTBM-NEXT:    orq %rdx, %rax
+; X64-BMI1NOTBM-NEXT:    bextrq %rax, %rdi, %rax
+; X64-BMI1NOTBM-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-BMI1NOTBM-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bextr64_32_b1:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    shrxq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    bzhil %edx, %eax, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %shiftedval = lshr i64 %val, %numskipbits
+  %truncshiftedval = trunc i64 %shiftedval to i32
+  %widenumlowbits = zext i8 %numlowbits to i32
+  %notmask = shl nsw i32 -1, %widenumlowbits
+  %mask = xor i32 %notmask, -1
+  %res = and i32 %truncshiftedval, %mask
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bextr64_32_b2:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %edi
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %dl
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NOBMI-NEXT:    movl %edi, %esi
+; X86-NOBMI-NEXT:    shrl %cl, %esi
+; X86-NOBMI-NEXT:    shrdl %cl, %edi, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB32_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %esi
+; X86-NOBMI-NEXT:  .LBB32_2:
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    movl %edx, %ecx
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    notl %eax
+; X86-NOBMI-NEXT:    andl %esi, %eax
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    popl %edi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1NOTBM-LABEL: bextr64_32_b2:
+; X86-BMI1NOTBM:       # %bb.0:
+; X86-BMI1NOTBM-NEXT:    pushl %edi
+; X86-BMI1NOTBM-NEXT:    pushl %esi
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-BMI1NOTBM-NEXT:    movl %edi, %edx
+; X86-BMI1NOTBM-NEXT:    shrl %cl, %edx
+; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edi, %esi
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    jne .LBB32_2
+; X86-BMI1NOTBM-NEXT:  # %bb.1:
+; X86-BMI1NOTBM-NEXT:    movl %esi, %edx
+; X86-BMI1NOTBM-NEXT:  .LBB32_2:
+; X86-BMI1NOTBM-NEXT:    shll $8, %eax
+; X86-BMI1NOTBM-NEXT:    bextrl %eax, %edx, %eax
+; X86-BMI1NOTBM-NEXT:    popl %esi
+; X86-BMI1NOTBM-NEXT:    popl %edi
+; X86-BMI1NOTBM-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bextr64_32_b2:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB32_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %edx
+; X86-BMI1BMI2-NEXT:  .LBB32_2:
+; X86-BMI1BMI2-NEXT:    bzhil %eax, %edx, %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bextr64_32_b2:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq %rsi, %rcx
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NOBMI-NEXT:    shrq %cl, %rdi
+; X64-NOBMI-NEXT:    movl $-1, %eax
+; X64-NOBMI-NEXT:    movl %edx, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    notl %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1NOTBM-LABEL: bextr64_32_b2:
+; X64-BMI1NOTBM:       # %bb.0:
+; X64-BMI1NOTBM-NEXT:    # kill: def $edx killed $edx def $rdx
+; X64-BMI1NOTBM-NEXT:    shlq $8, %rdx
+; X64-BMI1NOTBM-NEXT:    movzbl %sil, %eax
+; X64-BMI1NOTBM-NEXT:    orq %rdx, %rax
+; X64-BMI1NOTBM-NEXT:    bextrq %rax, %rdi, %rax
+; X64-BMI1NOTBM-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-BMI1NOTBM-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bextr64_32_b2:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    shrxq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    bzhil %edx, %eax, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %shiftedval = lshr i64 %val, %numskipbits
+  %widenumlowbits = zext i8 %numlowbits to i32
+  %notmask = shl nsw i32 -1, %widenumlowbits
+  %mask = xor i32 %notmask, -1
+  %zextmask = zext i32 %mask to i64
+  %wideres = and i64 %shiftedval, %zextmask
+  %res = trunc i64 %wideres to i32
+  ret i32 %res
+}
+
 ; ---------------------------------------------------------------------------- ;
 ; Pattern c. 32-bit
 ; ---------------------------------------------------------------------------- ;
@@ -3830,11 +4649,11 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %edi
 ; X86-NOBMI-NEXT:    shrdl %cl, %eax, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB32_2
+; X86-NOBMI-NEXT:    je .LBB39_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    xorl %edi, %edi
-; X86-NOBMI-NEXT:  .LBB32_2:
+; X86-NOBMI-NEXT:  .LBB39_2:
 ; X86-NOBMI-NEXT:    movb $64, %cl
 ; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-NOBMI-NEXT:    movl $-1, %ebp
@@ -3842,11 +4661,11 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %ebx
 ; X86-NOBMI-NEXT:    shrdl %cl, %ebp, %ebp
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB32_4
+; X86-NOBMI-NEXT:    je .LBB39_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %ebx, %ebp
 ; X86-NOBMI-NEXT:    xorl %ebx, %ebx
-; X86-NOBMI-NEXT:  .LBB32_4:
+; X86-NOBMI-NEXT:  .LBB39_4:
 ; X86-NOBMI-NEXT:    subl $8, %esp
 ; X86-NOBMI-NEXT:    pushl %ebx
 ; X86-NOBMI-NEXT:    pushl %ebp
@@ -3877,11 +4696,11 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %edi
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %eax, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB32_2
+; X86-BMI1NOTBM-NEXT:    je .LBB39_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    xorl %edi, %edi
-; X86-BMI1NOTBM-NEXT:  .LBB32_2:
+; X86-BMI1NOTBM-NEXT:  .LBB39_2:
 ; X86-BMI1NOTBM-NEXT:    movb $64, %cl
 ; X86-BMI1NOTBM-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %ebp
@@ -3889,11 +4708,11 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %ebx
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %ebp, %ebp
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB32_4
+; X86-BMI1NOTBM-NEXT:    je .LBB39_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %ebp
 ; X86-BMI1NOTBM-NEXT:    xorl %ebx, %ebx
-; X86-BMI1NOTBM-NEXT:  .LBB32_4:
+; X86-BMI1NOTBM-NEXT:  .LBB39_4:
 ; X86-BMI1NOTBM-NEXT:    subl $8, %esp
 ; X86-BMI1NOTBM-NEXT:    pushl %ebx
 ; X86-BMI1NOTBM-NEXT:    pushl %ebp
@@ -3923,22 +4742,22 @@
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %esi
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB32_2
+; X86-BMI1BMI2-NEXT:    je .LBB39_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %esi
 ; X86-BMI1BMI2-NEXT:    xorl %edi, %edi
-; X86-BMI1BMI2-NEXT:  .LBB32_2:
+; X86-BMI1BMI2-NEXT:  .LBB39_2:
 ; X86-BMI1BMI2-NEXT:    movb $64, %cl
 ; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1BMI2-NEXT:    movl $-1, %ebx
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %ebx, %ebp
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %ebx, %ebx
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB32_4
+; X86-BMI1BMI2-NEXT:    je .LBB39_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %ebp, %ebx
 ; X86-BMI1BMI2-NEXT:    xorl %ebp, %ebp
-; X86-BMI1BMI2-NEXT:  .LBB32_4:
+; X86-BMI1BMI2-NEXT:  .LBB39_4:
 ; X86-BMI1BMI2-NEXT:    subl $8, %esp
 ; X86-BMI1BMI2-NEXT:    pushl %ebp
 ; X86-BMI1BMI2-NEXT:    pushl %ebx
@@ -4039,11 +4858,11 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %edi
 ; X86-NOBMI-NEXT:    shrdl %cl, %eax, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB33_2
+; X86-NOBMI-NEXT:    je .LBB40_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    xorl %edi, %edi
-; X86-NOBMI-NEXT:  .LBB33_2:
+; X86-NOBMI-NEXT:  .LBB40_2:
 ; X86-NOBMI-NEXT:    movb $64, %cl
 ; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-NOBMI-NEXT:    movl $-1, %ebp
@@ -4051,11 +4870,11 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %ebx
 ; X86-NOBMI-NEXT:    shrdl %cl, %ebp, %ebp
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB33_4
+; X86-NOBMI-NEXT:    je .LBB40_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %ebx, %ebp
 ; X86-NOBMI-NEXT:    xorl %ebx, %ebx
-; X86-NOBMI-NEXT:  .LBB33_4:
+; X86-NOBMI-NEXT:  .LBB40_4:
 ; X86-NOBMI-NEXT:    subl $8, %esp
 ; X86-NOBMI-NEXT:    pushl %ebx
 ; X86-NOBMI-NEXT:    pushl %ebp
@@ -4086,11 +4905,11 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %edi
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %eax, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB33_2
+; X86-BMI1NOTBM-NEXT:    je .LBB40_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    xorl %edi, %edi
-; X86-BMI1NOTBM-NEXT:  .LBB33_2:
+; X86-BMI1NOTBM-NEXT:  .LBB40_2:
 ; X86-BMI1NOTBM-NEXT:    movb $64, %cl
 ; X86-BMI1NOTBM-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %ebp
@@ -4098,11 +4917,11 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %ebx
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %ebp, %ebp
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB33_4
+; X86-BMI1NOTBM-NEXT:    je .LBB40_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %ebp
 ; X86-BMI1NOTBM-NEXT:    xorl %ebx, %ebx
-; X86-BMI1NOTBM-NEXT:  .LBB33_4:
+; X86-BMI1NOTBM-NEXT:  .LBB40_4:
 ; X86-BMI1NOTBM-NEXT:    subl $8, %esp
 ; X86-BMI1NOTBM-NEXT:    pushl %ebx
 ; X86-BMI1NOTBM-NEXT:    pushl %ebp
@@ -4132,22 +4951,22 @@
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %esi
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB33_2
+; X86-BMI1BMI2-NEXT:    je .LBB40_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %esi
 ; X86-BMI1BMI2-NEXT:    xorl %edi, %edi
-; X86-BMI1BMI2-NEXT:  .LBB33_2:
+; X86-BMI1BMI2-NEXT:  .LBB40_2:
 ; X86-BMI1BMI2-NEXT:    movb $64, %cl
 ; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1BMI2-NEXT:    movl $-1, %ebx
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %ebx, %ebp
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %ebx, %ebx
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB33_4
+; X86-BMI1BMI2-NEXT:    je .LBB40_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %ebp, %ebx
 ; X86-BMI1BMI2-NEXT:    xorl %ebp, %ebp
-; X86-BMI1BMI2-NEXT:  .LBB33_4:
+; X86-BMI1BMI2-NEXT:  .LBB40_4:
 ; X86-BMI1BMI2-NEXT:    subl $8, %esp
 ; X86-BMI1BMI2-NEXT:    pushl %ebp
 ; X86-BMI1BMI2-NEXT:    pushl %ebx
@@ -4252,11 +5071,11 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %edi
 ; X86-NOBMI-NEXT:    shrdl %cl, %eax, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB34_2
+; X86-NOBMI-NEXT:    je .LBB41_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    xorl %edi, %edi
-; X86-NOBMI-NEXT:  .LBB34_2:
+; X86-NOBMI-NEXT:  .LBB41_2:
 ; X86-NOBMI-NEXT:    movb $64, %cl
 ; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-NOBMI-NEXT:    movl $-1, %ebp
@@ -4264,11 +5083,11 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %ebx
 ; X86-NOBMI-NEXT:    shrdl %cl, %ebp, %ebp
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB34_4
+; X86-NOBMI-NEXT:    je .LBB41_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %ebx, %ebp
 ; X86-NOBMI-NEXT:    xorl %ebx, %ebx
-; X86-NOBMI-NEXT:  .LBB34_4:
+; X86-NOBMI-NEXT:  .LBB41_4:
 ; X86-NOBMI-NEXT:    subl $8, %esp
 ; X86-NOBMI-NEXT:    pushl %ebx
 ; X86-NOBMI-NEXT:    pushl %ebp
@@ -4300,11 +5119,11 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %edi
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %eax, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB34_2
+; X86-BMI1NOTBM-NEXT:    je .LBB41_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    xorl %edi, %edi
-; X86-BMI1NOTBM-NEXT:  .LBB34_2:
+; X86-BMI1NOTBM-NEXT:  .LBB41_2:
 ; X86-BMI1NOTBM-NEXT:    movb $64, %cl
 ; X86-BMI1NOTBM-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %ebp
@@ -4312,11 +5131,11 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %ebx
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %ebp, %ebp
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB34_4
+; X86-BMI1NOTBM-NEXT:    je .LBB41_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %ebp
 ; X86-BMI1NOTBM-NEXT:    xorl %ebx, %ebx
-; X86-BMI1NOTBM-NEXT:  .LBB34_4:
+; X86-BMI1NOTBM-NEXT:  .LBB41_4:
 ; X86-BMI1NOTBM-NEXT:    subl $8, %esp
 ; X86-BMI1NOTBM-NEXT:    pushl %ebx
 ; X86-BMI1NOTBM-NEXT:    pushl %ebp
@@ -4347,22 +5166,22 @@
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %edi
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %esi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB34_2
+; X86-BMI1BMI2-NEXT:    je .LBB41_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %esi
 ; X86-BMI1BMI2-NEXT:    xorl %edi, %edi
-; X86-BMI1BMI2-NEXT:  .LBB34_2:
+; X86-BMI1BMI2-NEXT:  .LBB41_2:
 ; X86-BMI1BMI2-NEXT:    movb $64, %cl
 ; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1BMI2-NEXT:    movl $-1, %ebx
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %ebx, %ebp
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %ebx, %ebx
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB34_4
+; X86-BMI1BMI2-NEXT:    je .LBB41_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %ebp, %ebx
 ; X86-BMI1BMI2-NEXT:    xorl %ebp, %ebp
-; X86-BMI1BMI2-NEXT:  .LBB34_4:
+; X86-BMI1BMI2-NEXT:  .LBB41_4:
 ; X86-BMI1BMI2-NEXT:    subl $8, %esp
 ; X86-BMI1BMI2-NEXT:    pushl %ebp
 ; X86-BMI1BMI2-NEXT:    pushl %ebx
@@ -4465,11 +5284,11 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %edi
 ; X86-NOBMI-NEXT:    shrdl %cl, %eax, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB35_2
+; X86-NOBMI-NEXT:    je .LBB42_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    xorl %edi, %edi
-; X86-NOBMI-NEXT:  .LBB35_2:
+; X86-NOBMI-NEXT:  .LBB42_2:
 ; X86-NOBMI-NEXT:    movb $64, %cl
 ; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-NOBMI-NEXT:    movl $-1, %ebp
@@ -4477,11 +5296,11 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %ebx
 ; X86-NOBMI-NEXT:    shrdl %cl, %ebp, %ebp
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB35_4
+; X86-NOBMI-NEXT:    je .LBB42_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %ebx, %ebp
 ; X86-NOBMI-NEXT:    xorl %ebx, %ebx
-; X86-NOBMI-NEXT:  .LBB35_4:
+; X86-NOBMI-NEXT:  .LBB42_4:
 ; X86-NOBMI-NEXT:    subl $8, %esp
 ; X86-NOBMI-NEXT:    pushl %ebx
 ; X86-NOBMI-NEXT:    pushl %ebp
@@ -4513,11 +5332,11 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %edi
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %eax, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB35_2
+; X86-BMI1NOTBM-NEXT:    je .LBB42_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    xorl %edi, %edi
-; X86-BMI1NOTBM-NEXT:  .LBB35_2:
+; X86-BMI1NOTBM-NEXT:  .LBB42_2:
 ; X86-BMI1NOTBM-NEXT:    movb $64, %cl
 ; X86-BMI1NOTBM-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %ebp
@@ -4525,11 +5344,11 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %ebx
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %ebp, %ebp
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB35_4
+; X86-BMI1NOTBM-NEXT:    je .LBB42_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %ebp
 ; X86-BMI1NOTBM-NEXT:    xorl %ebx, %ebx
-; X86-BMI1NOTBM-NEXT:  .LBB35_4:
+; X86-BMI1NOTBM-NEXT:  .LBB42_4:
 ; X86-BMI1NOTBM-NEXT:    subl $8, %esp
 ; X86-BMI1NOTBM-NEXT:    pushl %ebx
 ; X86-BMI1NOTBM-NEXT:    pushl %ebp
@@ -4560,22 +5379,22 @@
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %edi
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %esi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB35_2
+; X86-BMI1BMI2-NEXT:    je .LBB42_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %esi
 ; X86-BMI1BMI2-NEXT:    xorl %edi, %edi
-; X86-BMI1BMI2-NEXT:  .LBB35_2:
+; X86-BMI1BMI2-NEXT:  .LBB42_2:
 ; X86-BMI1BMI2-NEXT:    movb $64, %cl
 ; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1BMI2-NEXT:    movl $-1, %ebx
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %ebx, %ebp
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %ebx, %ebx
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB35_4
+; X86-BMI1BMI2-NEXT:    je .LBB42_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %ebp, %ebx
 ; X86-BMI1BMI2-NEXT:    xorl %ebp, %ebp
-; X86-BMI1BMI2-NEXT:  .LBB35_4:
+; X86-BMI1BMI2-NEXT:  .LBB42_4:
 ; X86-BMI1BMI2-NEXT:    subl $8, %esp
 ; X86-BMI1BMI2-NEXT:    pushl %ebp
 ; X86-BMI1BMI2-NEXT:    pushl %ebx
@@ -4680,11 +5499,11 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %edi
 ; X86-NOBMI-NEXT:    shrdl %cl, %eax, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB36_2
+; X86-NOBMI-NEXT:    je .LBB43_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    xorl %edi, %edi
-; X86-NOBMI-NEXT:  .LBB36_2:
+; X86-NOBMI-NEXT:  .LBB43_2:
 ; X86-NOBMI-NEXT:    movb $64, %cl
 ; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-NOBMI-NEXT:    movl $-1, %ebp
@@ -4692,11 +5511,11 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %ebx
 ; X86-NOBMI-NEXT:    shrdl %cl, %ebp, %ebp
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB36_4
+; X86-NOBMI-NEXT:    je .LBB43_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %ebx, %ebp
 ; X86-NOBMI-NEXT:    xorl %ebx, %ebx
-; X86-NOBMI-NEXT:  .LBB36_4:
+; X86-NOBMI-NEXT:  .LBB43_4:
 ; X86-NOBMI-NEXT:    subl $8, %esp
 ; X86-NOBMI-NEXT:    pushl %ebx
 ; X86-NOBMI-NEXT:    pushl %ebp
@@ -4727,11 +5546,11 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %edi
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %eax, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB36_2
+; X86-BMI1NOTBM-NEXT:    je .LBB43_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    xorl %edi, %edi
-; X86-BMI1NOTBM-NEXT:  .LBB36_2:
+; X86-BMI1NOTBM-NEXT:  .LBB43_2:
 ; X86-BMI1NOTBM-NEXT:    movb $64, %cl
 ; X86-BMI1NOTBM-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %ebp
@@ -4739,11 +5558,11 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %ebx
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %ebp, %ebp
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB36_4
+; X86-BMI1NOTBM-NEXT:    je .LBB43_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %ebp
 ; X86-BMI1NOTBM-NEXT:    xorl %ebx, %ebx
-; X86-BMI1NOTBM-NEXT:  .LBB36_4:
+; X86-BMI1NOTBM-NEXT:  .LBB43_4:
 ; X86-BMI1NOTBM-NEXT:    subl $8, %esp
 ; X86-BMI1NOTBM-NEXT:    pushl %ebx
 ; X86-BMI1NOTBM-NEXT:    pushl %ebp
@@ -4773,22 +5592,22 @@
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %esi
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB36_2
+; X86-BMI1BMI2-NEXT:    je .LBB43_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %esi
 ; X86-BMI1BMI2-NEXT:    xorl %edi, %edi
-; X86-BMI1BMI2-NEXT:  .LBB36_2:
+; X86-BMI1BMI2-NEXT:  .LBB43_2:
 ; X86-BMI1BMI2-NEXT:    movb $64, %cl
 ; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1BMI2-NEXT:    movl $-1, %ebx
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %ebx, %ebp
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %ebx, %ebx
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB36_4
+; X86-BMI1BMI2-NEXT:    je .LBB43_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %ebp, %ebx
 ; X86-BMI1BMI2-NEXT:    xorl %ebp, %ebp
-; X86-BMI1BMI2-NEXT:  .LBB36_4:
+; X86-BMI1BMI2-NEXT:  .LBB43_4:
 ; X86-BMI1BMI2-NEXT:    subl $8, %esp
 ; X86-BMI1BMI2-NEXT:    pushl %ebp
 ; X86-BMI1BMI2-NEXT:    pushl %ebx
@@ -4889,11 +5708,11 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %edi
 ; X86-NOBMI-NEXT:    shrdl %cl, %eax, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB37_2
+; X86-NOBMI-NEXT:    je .LBB44_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    xorl %edi, %edi
-; X86-NOBMI-NEXT:  .LBB37_2:
+; X86-NOBMI-NEXT:  .LBB44_2:
 ; X86-NOBMI-NEXT:    movb $64, %cl
 ; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-NOBMI-NEXT:    movl $-1, %ebx
@@ -4901,11 +5720,11 @@
 ; X86-NOBMI-NEXT:    shrl %cl, %ebp
 ; X86-NOBMI-NEXT:    shrdl %cl, %ebx, %ebx
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB37_4
+; X86-NOBMI-NEXT:    je .LBB44_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %ebp, %ebx
 ; X86-NOBMI-NEXT:    xorl %ebp, %ebp
-; X86-NOBMI-NEXT:  .LBB37_4:
+; X86-NOBMI-NEXT:  .LBB44_4:
 ; X86-NOBMI-NEXT:    subl $8, %esp
 ; X86-NOBMI-NEXT:    pushl %ebp
 ; X86-NOBMI-NEXT:    pushl %ebx
@@ -4941,11 +5760,11 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %edi
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %eax, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB37_2
+; X86-BMI1NOTBM-NEXT:    je .LBB44_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    xorl %edi, %edi
-; X86-BMI1NOTBM-NEXT:  .LBB37_2:
+; X86-BMI1NOTBM-NEXT:  .LBB44_2:
 ; X86-BMI1NOTBM-NEXT:    movb $64, %cl
 ; X86-BMI1NOTBM-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1NOTBM-NEXT:    movl $-1, %ebx
@@ -4953,11 +5772,11 @@
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %ebp
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %ebx, %ebx
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB37_4
+; X86-BMI1NOTBM-NEXT:    je .LBB44_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %ebp, %ebx
 ; X86-BMI1NOTBM-NEXT:    xorl %ebp, %ebp
-; X86-BMI1NOTBM-NEXT:  .LBB37_4:
+; X86-BMI1NOTBM-NEXT:  .LBB44_4:
 ; X86-BMI1NOTBM-NEXT:    subl $8, %esp
 ; X86-BMI1NOTBM-NEXT:    pushl %ebp
 ; X86-BMI1NOTBM-NEXT:    pushl %ebx
@@ -4992,22 +5811,22 @@
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %esi
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB37_2
+; X86-BMI1BMI2-NEXT:    je .LBB44_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %esi
 ; X86-BMI1BMI2-NEXT:    xorl %edi, %edi
-; X86-BMI1BMI2-NEXT:  .LBB37_2:
+; X86-BMI1BMI2-NEXT:  .LBB44_2:
 ; X86-BMI1BMI2-NEXT:    movb $64, %cl
 ; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1BMI2-NEXT:    movl $-1, %ebp
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %ebp, %ebx
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %ebp, %ebp
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB37_4
+; X86-BMI1BMI2-NEXT:    je .LBB44_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %ebx, %ebp
 ; X86-BMI1BMI2-NEXT:    xorl %ebx, %ebx
-; X86-BMI1BMI2-NEXT:  .LBB37_4:
+; X86-BMI1BMI2-NEXT:  .LBB44_4:
 ; X86-BMI1BMI2-NEXT:    subl $8, %esp
 ; X86-BMI1BMI2-NEXT:    pushl %ebx
 ; X86-BMI1BMI2-NEXT:    pushl %ebp
@@ -5107,6 +5926,337 @@
   ret i64 %masked
 }
 
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bextr64_32_c0:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT:    movl %esi, %edx
+; X86-NOBMI-NEXT:    shrl %cl, %edx
+; X86-NOBMI-NEXT:    shrdl %cl, %esi, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB45_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:  .LBB45_2:
+; X86-NOBMI-NEXT:    movb $64, %cl
+; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %esi
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    shrdl %cl, %esi, %esi
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB45_4
+; X86-NOBMI-NEXT:  # %bb.3:
+; X86-NOBMI-NEXT:    movl %esi, %eax
+; X86-NOBMI-NEXT:  .LBB45_4:
+; X86-NOBMI-NEXT:    andl %edx, %eax
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1NOTBM-LABEL: bextr64_32_c0:
+; X86-BMI1NOTBM:       # %bb.0:
+; X86-BMI1NOTBM-NEXT:    pushl %esi
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1NOTBM-NEXT:    movl %esi, %edx
+; X86-BMI1NOTBM-NEXT:    shrl %cl, %edx
+; X86-BMI1NOTBM-NEXT:    shrdl %cl, %esi, %eax
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    jne .LBB45_2
+; X86-BMI1NOTBM-NEXT:  # %bb.1:
+; X86-BMI1NOTBM-NEXT:    movl %eax, %edx
+; X86-BMI1NOTBM-NEXT:  .LBB45_2:
+; X86-BMI1NOTBM-NEXT:    movb $64, %cl
+; X86-BMI1NOTBM-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-BMI1NOTBM-NEXT:    movl $-1, %esi
+; X86-BMI1NOTBM-NEXT:    movl $-1, %eax
+; X86-BMI1NOTBM-NEXT:    shrl %cl, %eax
+; X86-BMI1NOTBM-NEXT:    shrdl %cl, %esi, %esi
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    jne .LBB45_4
+; X86-BMI1NOTBM-NEXT:  # %bb.3:
+; X86-BMI1NOTBM-NEXT:    movl %esi, %eax
+; X86-BMI1NOTBM-NEXT:  .LBB45_4:
+; X86-BMI1NOTBM-NEXT:    andl %edx, %eax
+; X86-BMI1NOTBM-NEXT:    popl %esi
+; X86-BMI1NOTBM-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bextr64_32_c0:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB45_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %edx
+; X86-BMI1BMI2-NEXT:  .LBB45_2:
+; X86-BMI1BMI2-NEXT:    movb $64, %cl
+; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $-1, %esi
+; X86-BMI1BMI2-NEXT:    movl $-1, %eax
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB45_4
+; X86-BMI1BMI2-NEXT:  # %bb.3:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %eax
+; X86-BMI1BMI2-NEXT:  .LBB45_4:
+; X86-BMI1BMI2-NEXT:    andl %edx, %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bextr64_32_c0:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq %rsi, %rcx
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NOBMI-NEXT:    shrq %cl, %rdi
+; X64-NOBMI-NEXT:    negb %dl
+; X64-NOBMI-NEXT:    movq $-1, %rax
+; X64-NOBMI-NEXT:    movl %edx, %ecx
+; X64-NOBMI-NEXT:    shrq %cl, %rax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1NOTBM-LABEL: bextr64_32_c0:
+; X64-BMI1NOTBM:       # %bb.0:
+; X64-BMI1NOTBM-NEXT:    movq %rsi, %rcx
+; X64-BMI1NOTBM-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-BMI1NOTBM-NEXT:    shrq %cl, %rdi
+; X64-BMI1NOTBM-NEXT:    negb %dl
+; X64-BMI1NOTBM-NEXT:    movq $-1, %rax
+; X64-BMI1NOTBM-NEXT:    movl %edx, %ecx
+; X64-BMI1NOTBM-NEXT:    shrq %cl, %rax
+; X64-BMI1NOTBM-NEXT:    andl %edi, %eax
+; X64-BMI1NOTBM-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-BMI1NOTBM-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bextr64_32_c0:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    shrxq %rsi, %rdi, %rcx
+; X64-BMI1BMI2-NEXT:    negb %dl
+; X64-BMI1BMI2-NEXT:    movq $-1, %rax
+; X64-BMI1BMI2-NEXT:    shrxq %rdx, %rax, %rax
+; X64-BMI1BMI2-NEXT:    andl %ecx, %eax
+; X64-BMI1BMI2-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-BMI1BMI2-NEXT:    retq
+  %shifted = lshr i64 %val, %numskipbits
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %mask, %shifted
+  %res = trunc i64 %masked to i32
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bextr64_32_c1:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT:    movl %esi, %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    shrdl %cl, %esi, %edx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB46_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %edx, %eax
+; X86-NOBMI-NEXT:  .LBB46_2:
+; X86-NOBMI-NEXT:    xorl %ecx, %ecx
+; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1NOTBM-LABEL: bextr64_32_c1:
+; X86-BMI1NOTBM:       # %bb.0:
+; X86-BMI1NOTBM-NEXT:    pushl %edi
+; X86-BMI1NOTBM-NEXT:    pushl %esi
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-BMI1NOTBM-NEXT:    movl %edi, %edx
+; X86-BMI1NOTBM-NEXT:    shrl %cl, %edx
+; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edi, %esi
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    jne .LBB46_2
+; X86-BMI1NOTBM-NEXT:  # %bb.1:
+; X86-BMI1NOTBM-NEXT:    movl %esi, %edx
+; X86-BMI1NOTBM-NEXT:  .LBB46_2:
+; X86-BMI1NOTBM-NEXT:    shll $8, %eax
+; X86-BMI1NOTBM-NEXT:    bextrl %eax, %edx, %eax
+; X86-BMI1NOTBM-NEXT:    popl %esi
+; X86-BMI1NOTBM-NEXT:    popl %edi
+; X86-BMI1NOTBM-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bextr64_32_c1:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB46_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %edx
+; X86-BMI1BMI2-NEXT:  .LBB46_2:
+; X86-BMI1BMI2-NEXT:    bzhil %eax, %edx, %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bextr64_32_c1:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq %rsi, %rcx
+; X64-NOBMI-NEXT:    movq %rdi, %rax
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NOBMI-NEXT:    shrq %cl, %rax
+; X64-NOBMI-NEXT:    negb %dl
+; X64-NOBMI-NEXT:    movl %edx, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    shrl %cl, %eax
+; X64-NOBMI-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1NOTBM-LABEL: bextr64_32_c1:
+; X64-BMI1NOTBM:       # %bb.0:
+; X64-BMI1NOTBM-NEXT:    # kill: def $edx killed $edx def $rdx
+; X64-BMI1NOTBM-NEXT:    shlq $8, %rdx
+; X64-BMI1NOTBM-NEXT:    movzbl %sil, %eax
+; X64-BMI1NOTBM-NEXT:    orq %rdx, %rax
+; X64-BMI1NOTBM-NEXT:    bextrq %rax, %rdi, %rax
+; X64-BMI1NOTBM-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-BMI1NOTBM-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bextr64_32_c1:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    shrxq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    bzhil %edx, %eax, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %shifted = lshr i64 %val, %numskipbits
+  %truncshifted = trunc i64 %shifted to i32
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %mask, %truncshifted
+  ret i32 %masked
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bextr64_32_c2:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT:    movl %esi, %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    shrdl %cl, %esi, %edx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB47_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %edx, %eax
+; X86-NOBMI-NEXT:  .LBB47_2:
+; X86-NOBMI-NEXT:    xorl %ecx, %ecx
+; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1NOTBM-LABEL: bextr64_32_c2:
+; X86-BMI1NOTBM:       # %bb.0:
+; X86-BMI1NOTBM-NEXT:    pushl %edi
+; X86-BMI1NOTBM-NEXT:    pushl %esi
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-BMI1NOTBM-NEXT:    movl %edi, %edx
+; X86-BMI1NOTBM-NEXT:    shrl %cl, %edx
+; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edi, %esi
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    jne .LBB47_2
+; X86-BMI1NOTBM-NEXT:  # %bb.1:
+; X86-BMI1NOTBM-NEXT:    movl %esi, %edx
+; X86-BMI1NOTBM-NEXT:  .LBB47_2:
+; X86-BMI1NOTBM-NEXT:    shll $8, %eax
+; X86-BMI1NOTBM-NEXT:    bextrl %eax, %edx, %eax
+; X86-BMI1NOTBM-NEXT:    popl %esi
+; X86-BMI1NOTBM-NEXT:    popl %edi
+; X86-BMI1NOTBM-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bextr64_32_c2:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB47_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %edx
+; X86-BMI1BMI2-NEXT:  .LBB47_2:
+; X86-BMI1BMI2-NEXT:    bzhil %eax, %edx, %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bextr64_32_c2:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq %rsi, %rcx
+; X64-NOBMI-NEXT:    movq %rdi, %rax
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NOBMI-NEXT:    shrq %cl, %rax
+; X64-NOBMI-NEXT:    negb %dl
+; X64-NOBMI-NEXT:    movl %edx, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    shrl %cl, %eax
+; X64-NOBMI-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1NOTBM-LABEL: bextr64_32_c2:
+; X64-BMI1NOTBM:       # %bb.0:
+; X64-BMI1NOTBM-NEXT:    # kill: def $edx killed $edx def $rdx
+; X64-BMI1NOTBM-NEXT:    shlq $8, %rdx
+; X64-BMI1NOTBM-NEXT:    movzbl %sil, %eax
+; X64-BMI1NOTBM-NEXT:    orq %rdx, %rax
+; X64-BMI1NOTBM-NEXT:    bextrq %rax, %rdi, %rax
+; X64-BMI1NOTBM-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-BMI1NOTBM-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bextr64_32_c2:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    shrxq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    bzhil %edx, %eax, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %shifted = lshr i64 %val, %numskipbits
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %zextmask = zext i32 %mask to i64
+  %masked = and i64 %zextmask, %shifted
+  %truncmasked = trunc i64 %masked to i32
+  ret i32 %truncmasked
+}
+
 ; ---------------------------------------------------------------------------- ;
 ; Pattern d. 32-bit.
 ; ---------------------------------------------------------------------------- ;
@@ -5487,36 +6637,36 @@
 ; X86-NOBMI-NEXT:    shrdl %cl, %edx, %edi
 ; X86-NOBMI-NEXT:    xorl %esi, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB43_2
+; X86-NOBMI-NEXT:    je .LBB53_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %eax, %edi
 ; X86-NOBMI-NEXT:    xorl %eax, %eax
-; X86-NOBMI-NEXT:  .LBB43_2:
+; X86-NOBMI-NEXT:  .LBB53_2:
 ; X86-NOBMI-NEXT:    movb $64, %cl
 ; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-NOBMI-NEXT:    shldl %cl, %edi, %eax
 ; X86-NOBMI-NEXT:    shll %cl, %edi
 ; X86-NOBMI-NEXT:    testb $32, %cl
 ; X86-NOBMI-NEXT:    movl %edi, %ebx
-; X86-NOBMI-NEXT:    jne .LBB43_4
+; X86-NOBMI-NEXT:    jne .LBB53_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %eax, %ebx
-; X86-NOBMI-NEXT:  .LBB43_4:
+; X86-NOBMI-NEXT:  .LBB53_4:
 ; X86-NOBMI-NEXT:    movl %ebx, %eax
 ; X86-NOBMI-NEXT:    shrl %cl, %eax
 ; X86-NOBMI-NEXT:    testb $32, %cl
 ; X86-NOBMI-NEXT:    movl $0, %edx
-; X86-NOBMI-NEXT:    jne .LBB43_6
+; X86-NOBMI-NEXT:    jne .LBB53_6
 ; X86-NOBMI-NEXT:  # %bb.5:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    movl %eax, %edx
-; X86-NOBMI-NEXT:  .LBB43_6:
+; X86-NOBMI-NEXT:  .LBB53_6:
 ; X86-NOBMI-NEXT:    shrdl %cl, %ebx, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    jne .LBB43_8
+; X86-NOBMI-NEXT:    jne .LBB53_8
 ; X86-NOBMI-NEXT:  # %bb.7:
 ; X86-NOBMI-NEXT:    movl %esi, %eax
-; X86-NOBMI-NEXT:  .LBB43_8:
+; X86-NOBMI-NEXT:  .LBB53_8:
 ; X86-NOBMI-NEXT:    popl %esi
 ; X86-NOBMI-NEXT:    popl %edi
 ; X86-NOBMI-NEXT:    popl %ebx
@@ -5535,36 +6685,36 @@
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edx, %edi
 ; X86-BMI1NOTBM-NEXT:    xorl %esi, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB43_2
+; X86-BMI1NOTBM-NEXT:    je .LBB53_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %edi
 ; X86-BMI1NOTBM-NEXT:    xorl %eax, %eax
-; X86-BMI1NOTBM-NEXT:  .LBB43_2:
+; X86-BMI1NOTBM-NEXT:  .LBB53_2:
 ; X86-BMI1NOTBM-NEXT:    movb $64, %cl
 ; X86-BMI1NOTBM-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1NOTBM-NEXT:    shldl %cl, %edi, %eax
 ; X86-BMI1NOTBM-NEXT:    shll %cl, %edi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
 ; X86-BMI1NOTBM-NEXT:    movl %edi, %ebx
-; X86-BMI1NOTBM-NEXT:    jne .LBB43_4
+; X86-BMI1NOTBM-NEXT:    jne .LBB53_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %ebx
-; X86-BMI1NOTBM-NEXT:  .LBB43_4:
+; X86-BMI1NOTBM-NEXT:  .LBB53_4:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %eax
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %eax
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
 ; X86-BMI1NOTBM-NEXT:    movl $0, %edx
-; X86-BMI1NOTBM-NEXT:    jne .LBB43_6
+; X86-BMI1NOTBM-NEXT:    jne .LBB53_6
 ; X86-BMI1NOTBM-NEXT:  # %bb.5:
 ; X86-BMI1NOTBM-NEXT:    movl %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %edx
-; X86-BMI1NOTBM-NEXT:  .LBB43_6:
+; X86-BMI1NOTBM-NEXT:  .LBB53_6:
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %ebx, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    jne .LBB43_8
+; X86-BMI1NOTBM-NEXT:    jne .LBB53_8
 ; X86-BMI1NOTBM-NEXT:  # %bb.7:
 ; X86-BMI1NOTBM-NEXT:    movl %esi, %eax
-; X86-BMI1NOTBM-NEXT:  .LBB43_8:
+; X86-BMI1NOTBM-NEXT:  .LBB53_8:
 ; X86-BMI1NOTBM-NEXT:    popl %esi
 ; X86-BMI1NOTBM-NEXT:    popl %edi
 ; X86-BMI1NOTBM-NEXT:    popl %ebx
@@ -5581,32 +6731,32 @@
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %edx, %esi
 ; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB43_2
+; X86-BMI1BMI2-NEXT:    je .LBB53_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %esi, %eax
 ; X86-BMI1BMI2-NEXT:    xorl %esi, %esi
-; X86-BMI1BMI2-NEXT:  .LBB43_2:
+; X86-BMI1BMI2-NEXT:  .LBB53_2:
 ; X86-BMI1BMI2-NEXT:    movb $64, %cl
 ; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %esi
 ; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB43_4
+; X86-BMI1BMI2-NEXT:    je .LBB53_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %esi
 ; X86-BMI1BMI2-NEXT:    movl $0, %edi
-; X86-BMI1BMI2-NEXT:  .LBB43_4:
+; X86-BMI1BMI2-NEXT:  .LBB53_4:
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %eax
-; X86-BMI1BMI2-NEXT:    jne .LBB43_6
+; X86-BMI1BMI2-NEXT:    jne .LBB53_6
 ; X86-BMI1BMI2-NEXT:  # %bb.5:
 ; X86-BMI1BMI2-NEXT:    movl %eax, %edx
-; X86-BMI1BMI2-NEXT:  .LBB43_6:
+; X86-BMI1BMI2-NEXT:  .LBB53_6:
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    jne .LBB43_8
+; X86-BMI1BMI2-NEXT:    jne .LBB53_8
 ; X86-BMI1BMI2-NEXT:  # %bb.7:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %eax
-; X86-BMI1BMI2-NEXT:  .LBB43_8:
+; X86-BMI1BMI2-NEXT:  .LBB53_8:
 ; X86-BMI1BMI2-NEXT:    popl %esi
 ; X86-BMI1BMI2-NEXT:    popl %edi
 ; X86-BMI1BMI2-NEXT:    retl
@@ -5657,36 +6807,36 @@
 ; X86-NOBMI-NEXT:    shrdl %cl, %edx, %edi
 ; X86-NOBMI-NEXT:    xorl %esi, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB44_2
+; X86-NOBMI-NEXT:    je .LBB54_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %eax, %edi
 ; X86-NOBMI-NEXT:    xorl %eax, %eax
-; X86-NOBMI-NEXT:  .LBB44_2:
+; X86-NOBMI-NEXT:  .LBB54_2:
 ; X86-NOBMI-NEXT:    movb $64, %cl
 ; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-NOBMI-NEXT:    shldl %cl, %edi, %eax
 ; X86-NOBMI-NEXT:    shll %cl, %edi
 ; X86-NOBMI-NEXT:    testb $32, %cl
 ; X86-NOBMI-NEXT:    movl %edi, %ebx
-; X86-NOBMI-NEXT:    jne .LBB44_4
+; X86-NOBMI-NEXT:    jne .LBB54_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %eax, %ebx
-; X86-NOBMI-NEXT:  .LBB44_4:
+; X86-NOBMI-NEXT:  .LBB54_4:
 ; X86-NOBMI-NEXT:    movl %ebx, %eax
 ; X86-NOBMI-NEXT:    shrl %cl, %eax
 ; X86-NOBMI-NEXT:    testb $32, %cl
 ; X86-NOBMI-NEXT:    movl $0, %edx
-; X86-NOBMI-NEXT:    jne .LBB44_6
+; X86-NOBMI-NEXT:    jne .LBB54_6
 ; X86-NOBMI-NEXT:  # %bb.5:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    movl %eax, %edx
-; X86-NOBMI-NEXT:  .LBB44_6:
+; X86-NOBMI-NEXT:  .LBB54_6:
 ; X86-NOBMI-NEXT:    shrdl %cl, %ebx, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    jne .LBB44_8
+; X86-NOBMI-NEXT:    jne .LBB54_8
 ; X86-NOBMI-NEXT:  # %bb.7:
 ; X86-NOBMI-NEXT:    movl %esi, %eax
-; X86-NOBMI-NEXT:  .LBB44_8:
+; X86-NOBMI-NEXT:  .LBB54_8:
 ; X86-NOBMI-NEXT:    popl %esi
 ; X86-NOBMI-NEXT:    popl %edi
 ; X86-NOBMI-NEXT:    popl %ebx
@@ -5705,36 +6855,36 @@
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edx, %edi
 ; X86-BMI1NOTBM-NEXT:    xorl %esi, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB44_2
+; X86-BMI1NOTBM-NEXT:    je .LBB54_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %edi
 ; X86-BMI1NOTBM-NEXT:    xorl %eax, %eax
-; X86-BMI1NOTBM-NEXT:  .LBB44_2:
+; X86-BMI1NOTBM-NEXT:  .LBB54_2:
 ; X86-BMI1NOTBM-NEXT:    movb $64, %cl
 ; X86-BMI1NOTBM-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1NOTBM-NEXT:    shldl %cl, %edi, %eax
 ; X86-BMI1NOTBM-NEXT:    shll %cl, %edi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
 ; X86-BMI1NOTBM-NEXT:    movl %edi, %ebx
-; X86-BMI1NOTBM-NEXT:    jne .LBB44_4
+; X86-BMI1NOTBM-NEXT:    jne .LBB54_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %ebx
-; X86-BMI1NOTBM-NEXT:  .LBB44_4:
+; X86-BMI1NOTBM-NEXT:  .LBB54_4:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %eax
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %eax
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
 ; X86-BMI1NOTBM-NEXT:    movl $0, %edx
-; X86-BMI1NOTBM-NEXT:    jne .LBB44_6
+; X86-BMI1NOTBM-NEXT:    jne .LBB54_6
 ; X86-BMI1NOTBM-NEXT:  # %bb.5:
 ; X86-BMI1NOTBM-NEXT:    movl %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %edx
-; X86-BMI1NOTBM-NEXT:  .LBB44_6:
+; X86-BMI1NOTBM-NEXT:  .LBB54_6:
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %ebx, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    jne .LBB44_8
+; X86-BMI1NOTBM-NEXT:    jne .LBB54_8
 ; X86-BMI1NOTBM-NEXT:  # %bb.7:
 ; X86-BMI1NOTBM-NEXT:    movl %esi, %eax
-; X86-BMI1NOTBM-NEXT:  .LBB44_8:
+; X86-BMI1NOTBM-NEXT:  .LBB54_8:
 ; X86-BMI1NOTBM-NEXT:    popl %esi
 ; X86-BMI1NOTBM-NEXT:    popl %edi
 ; X86-BMI1NOTBM-NEXT:    popl %ebx
@@ -5751,32 +6901,32 @@
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %edx, %esi
 ; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB44_2
+; X86-BMI1BMI2-NEXT:    je .LBB54_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %esi, %eax
 ; X86-BMI1BMI2-NEXT:    xorl %esi, %esi
-; X86-BMI1BMI2-NEXT:  .LBB44_2:
+; X86-BMI1BMI2-NEXT:  .LBB54_2:
 ; X86-BMI1BMI2-NEXT:    movb $64, %cl
 ; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %esi
 ; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB44_4
+; X86-BMI1BMI2-NEXT:    je .LBB54_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %esi
 ; X86-BMI1BMI2-NEXT:    movl $0, %edi
-; X86-BMI1BMI2-NEXT:  .LBB44_4:
+; X86-BMI1BMI2-NEXT:  .LBB54_4:
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %eax
-; X86-BMI1BMI2-NEXT:    jne .LBB44_6
+; X86-BMI1BMI2-NEXT:    jne .LBB54_6
 ; X86-BMI1BMI2-NEXT:  # %bb.5:
 ; X86-BMI1BMI2-NEXT:    movl %eax, %edx
-; X86-BMI1BMI2-NEXT:  .LBB44_6:
+; X86-BMI1BMI2-NEXT:  .LBB54_6:
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    jne .LBB44_8
+; X86-BMI1BMI2-NEXT:    jne .LBB54_8
 ; X86-BMI1BMI2-NEXT:  # %bb.7:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %eax
-; X86-BMI1BMI2-NEXT:  .LBB44_8:
+; X86-BMI1BMI2-NEXT:  .LBB54_8:
 ; X86-BMI1BMI2-NEXT:    popl %esi
 ; X86-BMI1BMI2-NEXT:    popl %edi
 ; X86-BMI1BMI2-NEXT:    retl
@@ -5833,36 +6983,36 @@
 ; X86-NOBMI-NEXT:    shrdl %cl, %edx, %edi
 ; X86-NOBMI-NEXT:    xorl %esi, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB45_2
+; X86-NOBMI-NEXT:    je .LBB55_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %eax, %edi
 ; X86-NOBMI-NEXT:    xorl %eax, %eax
-; X86-NOBMI-NEXT:  .LBB45_2:
+; X86-NOBMI-NEXT:  .LBB55_2:
 ; X86-NOBMI-NEXT:    movb $64, %cl
 ; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-NOBMI-NEXT:    shldl %cl, %edi, %eax
 ; X86-NOBMI-NEXT:    shll %cl, %edi
 ; X86-NOBMI-NEXT:    testb $32, %cl
 ; X86-NOBMI-NEXT:    movl %edi, %ebx
-; X86-NOBMI-NEXT:    jne .LBB45_4
+; X86-NOBMI-NEXT:    jne .LBB55_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %eax, %ebx
-; X86-NOBMI-NEXT:  .LBB45_4:
+; X86-NOBMI-NEXT:  .LBB55_4:
 ; X86-NOBMI-NEXT:    movl %ebx, %eax
 ; X86-NOBMI-NEXT:    shrl %cl, %eax
 ; X86-NOBMI-NEXT:    testb $32, %cl
 ; X86-NOBMI-NEXT:    movl $0, %edx
-; X86-NOBMI-NEXT:    jne .LBB45_6
+; X86-NOBMI-NEXT:    jne .LBB55_6
 ; X86-NOBMI-NEXT:  # %bb.5:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    movl %eax, %edx
-; X86-NOBMI-NEXT:  .LBB45_6:
+; X86-NOBMI-NEXT:  .LBB55_6:
 ; X86-NOBMI-NEXT:    shrdl %cl, %ebx, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    jne .LBB45_8
+; X86-NOBMI-NEXT:    jne .LBB55_8
 ; X86-NOBMI-NEXT:  # %bb.7:
 ; X86-NOBMI-NEXT:    movl %esi, %eax
-; X86-NOBMI-NEXT:  .LBB45_8:
+; X86-NOBMI-NEXT:  .LBB55_8:
 ; X86-NOBMI-NEXT:    popl %esi
 ; X86-NOBMI-NEXT:    popl %edi
 ; X86-NOBMI-NEXT:    popl %ebx
@@ -5882,36 +7032,36 @@
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edx, %edi
 ; X86-BMI1NOTBM-NEXT:    xorl %esi, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB45_2
+; X86-BMI1NOTBM-NEXT:    je .LBB55_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %edi
 ; X86-BMI1NOTBM-NEXT:    xorl %eax, %eax
-; X86-BMI1NOTBM-NEXT:  .LBB45_2:
+; X86-BMI1NOTBM-NEXT:  .LBB55_2:
 ; X86-BMI1NOTBM-NEXT:    movb $64, %cl
 ; X86-BMI1NOTBM-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1NOTBM-NEXT:    shldl %cl, %edi, %eax
 ; X86-BMI1NOTBM-NEXT:    shll %cl, %edi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
 ; X86-BMI1NOTBM-NEXT:    movl %edi, %ebx
-; X86-BMI1NOTBM-NEXT:    jne .LBB45_4
+; X86-BMI1NOTBM-NEXT:    jne .LBB55_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %ebx
-; X86-BMI1NOTBM-NEXT:  .LBB45_4:
+; X86-BMI1NOTBM-NEXT:  .LBB55_4:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %eax
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %eax
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
 ; X86-BMI1NOTBM-NEXT:    movl $0, %edx
-; X86-BMI1NOTBM-NEXT:    jne .LBB45_6
+; X86-BMI1NOTBM-NEXT:    jne .LBB55_6
 ; X86-BMI1NOTBM-NEXT:  # %bb.5:
 ; X86-BMI1NOTBM-NEXT:    movl %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %edx
-; X86-BMI1NOTBM-NEXT:  .LBB45_6:
+; X86-BMI1NOTBM-NEXT:  .LBB55_6:
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %ebx, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    jne .LBB45_8
+; X86-BMI1NOTBM-NEXT:    jne .LBB55_8
 ; X86-BMI1NOTBM-NEXT:  # %bb.7:
 ; X86-BMI1NOTBM-NEXT:    movl %esi, %eax
-; X86-BMI1NOTBM-NEXT:  .LBB45_8:
+; X86-BMI1NOTBM-NEXT:  .LBB55_8:
 ; X86-BMI1NOTBM-NEXT:    popl %esi
 ; X86-BMI1NOTBM-NEXT:    popl %edi
 ; X86-BMI1NOTBM-NEXT:    popl %ebx
@@ -5929,32 +7079,32 @@
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %edx, %eax
 ; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB45_2
+; X86-BMI1BMI2-NEXT:    je .LBB55_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %esi, %eax
 ; X86-BMI1BMI2-NEXT:    xorl %esi, %esi
-; X86-BMI1BMI2-NEXT:  .LBB45_2:
+; X86-BMI1BMI2-NEXT:  .LBB55_2:
 ; X86-BMI1BMI2-NEXT:    movb $64, %cl
 ; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %esi
 ; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB45_4
+; X86-BMI1BMI2-NEXT:    je .LBB55_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %esi
 ; X86-BMI1BMI2-NEXT:    movl $0, %edi
-; X86-BMI1BMI2-NEXT:  .LBB45_4:
+; X86-BMI1BMI2-NEXT:  .LBB55_4:
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %eax
-; X86-BMI1BMI2-NEXT:    jne .LBB45_6
+; X86-BMI1BMI2-NEXT:    jne .LBB55_6
 ; X86-BMI1BMI2-NEXT:  # %bb.5:
 ; X86-BMI1BMI2-NEXT:    movl %eax, %edx
-; X86-BMI1BMI2-NEXT:  .LBB45_6:
+; X86-BMI1BMI2-NEXT:  .LBB55_6:
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    jne .LBB45_8
+; X86-BMI1BMI2-NEXT:    jne .LBB55_8
 ; X86-BMI1BMI2-NEXT:  # %bb.7:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %eax
-; X86-BMI1BMI2-NEXT:  .LBB45_8:
+; X86-BMI1BMI2-NEXT:  .LBB55_8:
 ; X86-BMI1BMI2-NEXT:    popl %esi
 ; X86-BMI1BMI2-NEXT:    popl %edi
 ; X86-BMI1BMI2-NEXT:    retl
@@ -6007,36 +7157,36 @@
 ; X86-NOBMI-NEXT:    shrdl %cl, %edx, %edi
 ; X86-NOBMI-NEXT:    xorl %esi, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    je .LBB46_2
+; X86-NOBMI-NEXT:    je .LBB56_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %eax, %edi
 ; X86-NOBMI-NEXT:    xorl %eax, %eax
-; X86-NOBMI-NEXT:  .LBB46_2:
+; X86-NOBMI-NEXT:  .LBB56_2:
 ; X86-NOBMI-NEXT:    movb $64, %cl
 ; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-NOBMI-NEXT:    shldl %cl, %edi, %eax
 ; X86-NOBMI-NEXT:    shll %cl, %edi
 ; X86-NOBMI-NEXT:    testb $32, %cl
 ; X86-NOBMI-NEXT:    movl %edi, %ebx
-; X86-NOBMI-NEXT:    jne .LBB46_4
+; X86-NOBMI-NEXT:    jne .LBB56_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %eax, %ebx
-; X86-NOBMI-NEXT:  .LBB46_4:
+; X86-NOBMI-NEXT:  .LBB56_4:
 ; X86-NOBMI-NEXT:    movl %ebx, %eax
 ; X86-NOBMI-NEXT:    shrl %cl, %eax
 ; X86-NOBMI-NEXT:    testb $32, %cl
 ; X86-NOBMI-NEXT:    movl $0, %edx
-; X86-NOBMI-NEXT:    jne .LBB46_6
+; X86-NOBMI-NEXT:    jne .LBB56_6
 ; X86-NOBMI-NEXT:  # %bb.5:
 ; X86-NOBMI-NEXT:    movl %edi, %esi
 ; X86-NOBMI-NEXT:    movl %eax, %edx
-; X86-NOBMI-NEXT:  .LBB46_6:
+; X86-NOBMI-NEXT:  .LBB56_6:
 ; X86-NOBMI-NEXT:    shrdl %cl, %ebx, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
-; X86-NOBMI-NEXT:    jne .LBB46_8
+; X86-NOBMI-NEXT:    jne .LBB56_8
 ; X86-NOBMI-NEXT:  # %bb.7:
 ; X86-NOBMI-NEXT:    movl %esi, %eax
-; X86-NOBMI-NEXT:  .LBB46_8:
+; X86-NOBMI-NEXT:  .LBB56_8:
 ; X86-NOBMI-NEXT:    popl %esi
 ; X86-NOBMI-NEXT:    popl %edi
 ; X86-NOBMI-NEXT:    popl %ebx
@@ -6056,36 +7206,36 @@
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edx, %edi
 ; X86-BMI1NOTBM-NEXT:    xorl %esi, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    je .LBB46_2
+; X86-BMI1NOTBM-NEXT:    je .LBB56_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %edi
 ; X86-BMI1NOTBM-NEXT:    xorl %eax, %eax
-; X86-BMI1NOTBM-NEXT:  .LBB46_2:
+; X86-BMI1NOTBM-NEXT:  .LBB56_2:
 ; X86-BMI1NOTBM-NEXT:    movb $64, %cl
 ; X86-BMI1NOTBM-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1NOTBM-NEXT:    shldl %cl, %edi, %eax
 ; X86-BMI1NOTBM-NEXT:    shll %cl, %edi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
 ; X86-BMI1NOTBM-NEXT:    movl %edi, %ebx
-; X86-BMI1NOTBM-NEXT:    jne .LBB46_4
+; X86-BMI1NOTBM-NEXT:    jne .LBB56_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %ebx
-; X86-BMI1NOTBM-NEXT:  .LBB46_4:
+; X86-BMI1NOTBM-NEXT:  .LBB56_4:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %eax
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %eax
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
 ; X86-BMI1NOTBM-NEXT:    movl $0, %edx
-; X86-BMI1NOTBM-NEXT:    jne .LBB46_6
+; X86-BMI1NOTBM-NEXT:    jne .LBB56_6
 ; X86-BMI1NOTBM-NEXT:  # %bb.5:
 ; X86-BMI1NOTBM-NEXT:    movl %edi, %esi
 ; X86-BMI1NOTBM-NEXT:    movl %eax, %edx
-; X86-BMI1NOTBM-NEXT:  .LBB46_6:
+; X86-BMI1NOTBM-NEXT:  .LBB56_6:
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %ebx, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
-; X86-BMI1NOTBM-NEXT:    jne .LBB46_8
+; X86-BMI1NOTBM-NEXT:    jne .LBB56_8
 ; X86-BMI1NOTBM-NEXT:  # %bb.7:
 ; X86-BMI1NOTBM-NEXT:    movl %esi, %eax
-; X86-BMI1NOTBM-NEXT:  .LBB46_8:
+; X86-BMI1NOTBM-NEXT:  .LBB56_8:
 ; X86-BMI1NOTBM-NEXT:    popl %esi
 ; X86-BMI1NOTBM-NEXT:    popl %edi
 ; X86-BMI1NOTBM-NEXT:    popl %ebx
@@ -6103,32 +7253,32 @@
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %edx, %eax
 ; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB46_2
+; X86-BMI1BMI2-NEXT:    je .LBB56_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %esi, %eax
 ; X86-BMI1BMI2-NEXT:    xorl %esi, %esi
-; X86-BMI1BMI2-NEXT:  .LBB46_2:
+; X86-BMI1BMI2-NEXT:  .LBB56_2:
 ; X86-BMI1BMI2-NEXT:    movb $64, %cl
 ; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %esi
 ; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB46_4
+; X86-BMI1BMI2-NEXT:    je .LBB56_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %esi
 ; X86-BMI1BMI2-NEXT:    movl $0, %edi
-; X86-BMI1BMI2-NEXT:  .LBB46_4:
+; X86-BMI1BMI2-NEXT:  .LBB56_4:
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %eax
-; X86-BMI1BMI2-NEXT:    jne .LBB46_6
+; X86-BMI1BMI2-NEXT:    jne .LBB56_6
 ; X86-BMI1BMI2-NEXT:  # %bb.5:
 ; X86-BMI1BMI2-NEXT:    movl %eax, %edx
-; X86-BMI1BMI2-NEXT:  .LBB46_6:
+; X86-BMI1BMI2-NEXT:  .LBB56_6:
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edi
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    jne .LBB46_8
+; X86-BMI1BMI2-NEXT:    jne .LBB56_8
 ; X86-BMI1BMI2-NEXT:  # %bb.7:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %eax
-; X86-BMI1BMI2-NEXT:  .LBB46_8:
+; X86-BMI1BMI2-NEXT:  .LBB56_8:
 ; X86-BMI1BMI2-NEXT:    popl %esi
 ; X86-BMI1BMI2-NEXT:    popl %edi
 ; X86-BMI1BMI2-NEXT:    retl
@@ -6188,37 +7338,37 @@
 ; X86-NOBMI-NEXT:    shrdl %cl, %edx, %ebx
 ; X86-NOBMI-NEXT:    xorl %edx, %edx
 ; X86-NOBMI-NEXT:    testb $32, %al
-; X86-NOBMI-NEXT:    je .LBB47_2
+; X86-NOBMI-NEXT:    je .LBB57_2
 ; X86-NOBMI-NEXT:  # %bb.1:
 ; X86-NOBMI-NEXT:    movl %esi, %ebx
 ; X86-NOBMI-NEXT:    xorl %esi, %esi
-; X86-NOBMI-NEXT:  .LBB47_2:
+; X86-NOBMI-NEXT:  .LBB57_2:
 ; X86-NOBMI-NEXT:    movb $64, %cl
 ; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-NOBMI-NEXT:    shldl %cl, %ebx, %esi
 ; X86-NOBMI-NEXT:    shll %cl, %ebx
 ; X86-NOBMI-NEXT:    testb $32, %cl
 ; X86-NOBMI-NEXT:    movl %ebx, %ebp
-; X86-NOBMI-NEXT:    jne .LBB47_4
+; X86-NOBMI-NEXT:    jne .LBB57_4
 ; X86-NOBMI-NEXT:  # %bb.3:
 ; X86-NOBMI-NEXT:    movl %esi, %ebp
-; X86-NOBMI-NEXT:  .LBB47_4:
+; X86-NOBMI-NEXT:  .LBB57_4:
 ; X86-NOBMI-NEXT:    movl %ebp, %esi
 ; X86-NOBMI-NEXT:    shrl %cl, %esi
 ; X86-NOBMI-NEXT:    testb $32, %cl
 ; X86-NOBMI-NEXT:    movl $0, %edi
-; X86-NOBMI-NEXT:    jne .LBB47_6
+; X86-NOBMI-NEXT:    jne .LBB57_6
 ; X86-NOBMI-NEXT:  # %bb.5:
 ; X86-NOBMI-NEXT:    movl %ebx, %edx
 ; X86-NOBMI-NEXT:    movl %esi, %edi
-; X86-NOBMI-NEXT:  .LBB47_6:
+; X86-NOBMI-NEXT:  .LBB57_6:
 ; X86-NOBMI-NEXT:    shrdl %cl, %ebp, %edx
 ; X86-NOBMI-NEXT:    testb $32, %cl
 ; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NOBMI-NEXT:    jne .LBB47_8
+; X86-NOBMI-NEXT:    jne .LBB57_8
 ; X86-NOBMI-NEXT:  # %bb.7:
 ; X86-NOBMI-NEXT:    movl %edx, %esi
-; X86-NOBMI-NEXT:  .LBB47_8:
+; X86-NOBMI-NEXT:  .LBB57_8:
 ; X86-NOBMI-NEXT:    subl $8, %esp
 ; X86-NOBMI-NEXT:    pushl %ecx
 ; X86-NOBMI-NEXT:    pushl %eax
@@ -6249,37 +7399,37 @@
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edx, %ebx
 ; X86-BMI1NOTBM-NEXT:    xorl %edx, %edx
 ; X86-BMI1NOTBM-NEXT:    testb $32, %al
-; X86-BMI1NOTBM-NEXT:    je .LBB47_2
+; X86-BMI1NOTBM-NEXT:    je .LBB57_2
 ; X86-BMI1NOTBM-NEXT:  # %bb.1:
 ; X86-BMI1NOTBM-NEXT:    movl %esi, %ebx
 ; X86-BMI1NOTBM-NEXT:    xorl %esi, %esi
-; X86-BMI1NOTBM-NEXT:  .LBB47_2:
+; X86-BMI1NOTBM-NEXT:  .LBB57_2:
 ; X86-BMI1NOTBM-NEXT:    movb $64, %cl
 ; X86-BMI1NOTBM-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1NOTBM-NEXT:    shldl %cl, %ebx, %esi
 ; X86-BMI1NOTBM-NEXT:    shll %cl, %ebx
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %ebp
-; X86-BMI1NOTBM-NEXT:    jne .LBB47_4
+; X86-BMI1NOTBM-NEXT:    jne .LBB57_4
 ; X86-BMI1NOTBM-NEXT:  # %bb.3:
 ; X86-BMI1NOTBM-NEXT:    movl %esi, %ebp
-; X86-BMI1NOTBM-NEXT:  .LBB47_4:
+; X86-BMI1NOTBM-NEXT:  .LBB57_4:
 ; X86-BMI1NOTBM-NEXT:    movl %ebp, %esi
 ; X86-BMI1NOTBM-NEXT:    shrl %cl, %esi
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
 ; X86-BMI1NOTBM-NEXT:    movl $0, %edi
-; X86-BMI1NOTBM-NEXT:    jne .LBB47_6
+; X86-BMI1NOTBM-NEXT:    jne .LBB57_6
 ; X86-BMI1NOTBM-NEXT:  # %bb.5:
 ; X86-BMI1NOTBM-NEXT:    movl %ebx, %edx
 ; X86-BMI1NOTBM-NEXT:    movl %esi, %edi
-; X86-BMI1NOTBM-NEXT:  .LBB47_6:
+; X86-BMI1NOTBM-NEXT:  .LBB57_6:
 ; X86-BMI1NOTBM-NEXT:    shrdl %cl, %ebp, %edx
 ; X86-BMI1NOTBM-NEXT:    testb $32, %cl
 ; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1NOTBM-NEXT:    jne .LBB47_8
+; X86-BMI1NOTBM-NEXT:    jne .LBB57_8
 ; X86-BMI1NOTBM-NEXT:  # %bb.7:
 ; X86-BMI1NOTBM-NEXT:    movl %edx, %esi
-; X86-BMI1NOTBM-NEXT:  .LBB47_8:
+; X86-BMI1NOTBM-NEXT:  .LBB57_8:
 ; X86-BMI1NOTBM-NEXT:    subl $8, %esp
 ; X86-BMI1NOTBM-NEXT:    pushl %ecx
 ; X86-BMI1NOTBM-NEXT:    pushl %eax
@@ -6307,33 +7457,33 @@
 ; X86-BMI1BMI2-NEXT:    shrxl %eax, %edx, %edx
 ; X86-BMI1BMI2-NEXT:    xorl %esi, %esi
 ; X86-BMI1BMI2-NEXT:    testb $32, %al
-; X86-BMI1BMI2-NEXT:    je .LBB47_2
+; X86-BMI1BMI2-NEXT:    je .LBB57_2
 ; X86-BMI1BMI2-NEXT:  # %bb.1:
 ; X86-BMI1BMI2-NEXT:    movl %edx, %edi
 ; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
-; X86-BMI1BMI2-NEXT:  .LBB47_2:
+; X86-BMI1BMI2-NEXT:  .LBB57_2:
 ; X86-BMI1BMI2-NEXT:    movb $64, %cl
 ; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1BMI2-NEXT:    shldl %cl, %edi, %edx
 ; X86-BMI1BMI2-NEXT:    shlxl %ecx, %edi, %ebx
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
-; X86-BMI1BMI2-NEXT:    je .LBB47_4
+; X86-BMI1BMI2-NEXT:    je .LBB57_4
 ; X86-BMI1BMI2-NEXT:  # %bb.3:
 ; X86-BMI1BMI2-NEXT:    movl %ebx, %edx
 ; X86-BMI1BMI2-NEXT:    movl $0, %ebx
-; X86-BMI1BMI2-NEXT:  .LBB47_4:
+; X86-BMI1BMI2-NEXT:  .LBB57_4:
 ; X86-BMI1BMI2-NEXT:    shrxl %ecx, %edx, %edi
-; X86-BMI1BMI2-NEXT:    jne .LBB47_6
+; X86-BMI1BMI2-NEXT:    jne .LBB57_6
 ; X86-BMI1BMI2-NEXT:  # %bb.5:
 ; X86-BMI1BMI2-NEXT:    movl %edi, %esi
-; X86-BMI1BMI2-NEXT:  .LBB47_6:
+; X86-BMI1BMI2-NEXT:  .LBB57_6:
 ; X86-BMI1BMI2-NEXT:    shrdl %cl, %edx, %ebx
 ; X86-BMI1BMI2-NEXT:    testb $32, %cl
 ; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1BMI2-NEXT:    jne .LBB47_8
+; X86-BMI1BMI2-NEXT:    jne .LBB57_8
 ; X86-BMI1BMI2-NEXT:  # %bb.7:
 ; X86-BMI1BMI2-NEXT:    movl %ebx, %edi
-; X86-BMI1BMI2-NEXT:  .LBB47_8:
+; X86-BMI1BMI2-NEXT:  .LBB57_8:
 ; X86-BMI1BMI2-NEXT:    subl $8, %esp
 ; X86-BMI1BMI2-NEXT:    pushl %ecx
 ; X86-BMI1BMI2-NEXT:    pushl %eax
@@ -6393,12 +7543,252 @@
   ret i64 %masked
 }
 
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bextr64_32_d0:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT:    movl %esi, %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    shrdl %cl, %esi, %edx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB58_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:    xorl %eax, %eax
+; X86-NOBMI-NEXT:  .LBB58_2:
+; X86-NOBMI-NEXT:    movb $64, %cl
+; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    shldl %cl, %edx, %eax
+; X86-NOBMI-NEXT:    shll %cl, %edx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB58_4
+; X86-NOBMI-NEXT:  # %bb.3:
+; X86-NOBMI-NEXT:    movl %edx, %eax
+; X86-NOBMI-NEXT:    xorl %edx, %edx
+; X86-NOBMI-NEXT:  .LBB58_4:
+; X86-NOBMI-NEXT:    shrdl %cl, %eax, %edx
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB58_6
+; X86-NOBMI-NEXT:  # %bb.5:
+; X86-NOBMI-NEXT:    movl %edx, %eax
+; X86-NOBMI-NEXT:  .LBB58_6:
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1NOTBM-LABEL: bextr64_32_d0:
+; X86-BMI1NOTBM:       # %bb.0:
+; X86-BMI1NOTBM-NEXT:    pushl %esi
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1NOTBM-NEXT:    movl %esi, %eax
+; X86-BMI1NOTBM-NEXT:    shrl %cl, %eax
+; X86-BMI1NOTBM-NEXT:    shrdl %cl, %esi, %edx
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    je .LBB58_2
+; X86-BMI1NOTBM-NEXT:  # %bb.1:
+; X86-BMI1NOTBM-NEXT:    movl %eax, %edx
+; X86-BMI1NOTBM-NEXT:    xorl %eax, %eax
+; X86-BMI1NOTBM-NEXT:  .LBB58_2:
+; X86-BMI1NOTBM-NEXT:    movb $64, %cl
+; X86-BMI1NOTBM-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-BMI1NOTBM-NEXT:    shldl %cl, %edx, %eax
+; X86-BMI1NOTBM-NEXT:    shll %cl, %edx
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    je .LBB58_4
+; X86-BMI1NOTBM-NEXT:  # %bb.3:
+; X86-BMI1NOTBM-NEXT:    movl %edx, %eax
+; X86-BMI1NOTBM-NEXT:    xorl %edx, %edx
+; X86-BMI1NOTBM-NEXT:  .LBB58_4:
+; X86-BMI1NOTBM-NEXT:    shrdl %cl, %eax, %edx
+; X86-BMI1NOTBM-NEXT:    shrl %cl, %eax
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    jne .LBB58_6
+; X86-BMI1NOTBM-NEXT:  # %bb.5:
+; X86-BMI1NOTBM-NEXT:    movl %edx, %eax
+; X86-BMI1NOTBM-NEXT:  .LBB58_6:
+; X86-BMI1NOTBM-NEXT:    popl %esi
+; X86-BMI1NOTBM-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bextr64_32_d0:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %edx, %eax
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %edx, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB58_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %edx, %eax
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:  .LBB58_2:
+; X86-BMI1BMI2-NEXT:    movb $64, %cl
+; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %edx
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB58_4
+; X86-BMI1BMI2-NEXT:  # %bb.3:
+; X86-BMI1BMI2-NEXT:    movl %eax, %edx
+; X86-BMI1BMI2-NEXT:    xorl %eax, %eax
+; X86-BMI1BMI2-NEXT:  .LBB58_4:
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %edx, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB58_6
+; X86-BMI1BMI2-NEXT:  # %bb.5:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %edx, %eax
+; X86-BMI1BMI2-NEXT:  .LBB58_6:
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bextr64_32_d0:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq %rsi, %rcx
+; X64-NOBMI-NEXT:    movq %rdi, %rax
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NOBMI-NEXT:    shrq %cl, %rax
+; X64-NOBMI-NEXT:    negb %dl
+; X64-NOBMI-NEXT:    movl %edx, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    shrq %cl, %rax
+; X64-NOBMI-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1NOTBM-LABEL: bextr64_32_d0:
+; X64-BMI1NOTBM:       # %bb.0:
+; X64-BMI1NOTBM-NEXT:    shlq $8, %rdx
+; X64-BMI1NOTBM-NEXT:    movzbl %sil, %eax
+; X64-BMI1NOTBM-NEXT:    orq %rdx, %rax
+; X64-BMI1NOTBM-NEXT:    bextrq %rax, %rdi, %rax
+; X64-BMI1NOTBM-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-BMI1NOTBM-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bextr64_32_d0:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    shrxq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    bzhiq %rdx, %rax, %rax
+; X64-BMI1BMI2-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-BMI1BMI2-NEXT:    retq
+  %shifted = lshr i64 %val, %numskipbits
+  %numhighbits = sub i64 64, %numlowbits
+  %highbitscleared = shl i64 %shifted, %numhighbits
+  %masked = lshr i64 %highbitscleared, %numhighbits
+  %res = trunc i64 %masked to i32
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_d1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bextr64_32_d1:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT:    movl %esi, %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    shrdl %cl, %esi, %edx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB59_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %edx, %eax
+; X86-NOBMI-NEXT:  .LBB59_2:
+; X86-NOBMI-NEXT:    xorl %ecx, %ecx
+; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1NOTBM-LABEL: bextr64_32_d1:
+; X86-BMI1NOTBM:       # %bb.0:
+; X86-BMI1NOTBM-NEXT:    pushl %edi
+; X86-BMI1NOTBM-NEXT:    pushl %esi
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1NOTBM-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1NOTBM-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-BMI1NOTBM-NEXT:    movl %edi, %edx
+; X86-BMI1NOTBM-NEXT:    shrl %cl, %edx
+; X86-BMI1NOTBM-NEXT:    shrdl %cl, %edi, %esi
+; X86-BMI1NOTBM-NEXT:    testb $32, %cl
+; X86-BMI1NOTBM-NEXT:    jne .LBB59_2
+; X86-BMI1NOTBM-NEXT:  # %bb.1:
+; X86-BMI1NOTBM-NEXT:    movl %esi, %edx
+; X86-BMI1NOTBM-NEXT:  .LBB59_2:
+; X86-BMI1NOTBM-NEXT:    shll $8, %eax
+; X86-BMI1NOTBM-NEXT:    bextrl %eax, %edx, %eax
+; X86-BMI1NOTBM-NEXT:    popl %esi
+; X86-BMI1NOTBM-NEXT:    popl %edi
+; X86-BMI1NOTBM-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bextr64_32_d1:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB59_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %edx
+; X86-BMI1BMI2-NEXT:  .LBB59_2:
+; X86-BMI1BMI2-NEXT:    bzhil %eax, %edx, %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bextr64_32_d1:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq %rsi, %rcx
+; X64-NOBMI-NEXT:    movq %rdi, %rax
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NOBMI-NEXT:    shrq %cl, %rax
+; X64-NOBMI-NEXT:    negb %dl
+; X64-NOBMI-NEXT:    movl %edx, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    shrl %cl, %eax
+; X64-NOBMI-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1NOTBM-LABEL: bextr64_32_d1:
+; X64-BMI1NOTBM:       # %bb.0:
+; X64-BMI1NOTBM-NEXT:    # kill: def $edx killed $edx def $rdx
+; X64-BMI1NOTBM-NEXT:    shlq $8, %rdx
+; X64-BMI1NOTBM-NEXT:    movzbl %sil, %eax
+; X64-BMI1NOTBM-NEXT:    orq %rdx, %rax
+; X64-BMI1NOTBM-NEXT:    bextrq %rax, %rdi, %rax
+; X64-BMI1NOTBM-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-BMI1NOTBM-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bextr64_32_d1:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    shrxq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    bzhil %edx, %eax, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %shifted = lshr i64 %val, %numskipbits
+  %truncshifted = trunc i64 %shifted to i32
+  %numhighbits = sub i32 32, %numlowbits
+  %highbitscleared = shl i32 %truncshifted, %numhighbits
+  %masked = lshr i32 %highbitscleared, %numhighbits
+  ret i32 %masked
+}
+
 ; ---------------------------------------------------------------------------- ;
 ; Constant
 ; ---------------------------------------------------------------------------- ;
 
 ; https://bugs.llvm.org/show_bug.cgi?id=38938
-define void @pr38938(i32* %a0, i64* %a1) {
+define void @pr38938(i32* %a0, i64* %a1) nounwind {
 ; X86-NOBMI-LABEL: pr38938:
 ; X86-NOBMI:       # %bb.0:
 ; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -6473,7 +7863,7 @@
 }
 
 ; The most canonical variant
-define i32 @c0_i32(i32 %arg) {
+define i32 @c0_i32(i32 %arg) nounwind {
 ; X86-NOBMI-LABEL: c0_i32:
 ; X86-NOBMI:       # %bb.0:
 ; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -6527,7 +7917,7 @@
 }
 
 ; Should be still fine, but the mask is shifted
-define i32 @c1_i32(i32 %arg) {
+define i32 @c1_i32(i32 %arg) nounwind {
 ; X86-LABEL: c1_i32:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -6547,7 +7937,7 @@
 }
 
 ; Should be still fine, but the result is shifted left afterwards
-define i32 @c2_i32(i32 %arg) {
+define i32 @c2_i32(i32 %arg) nounwind {
 ; X86-LABEL: c2_i32:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -6568,7 +7958,7 @@
 }
 
 ; The mask covers newly shifted-in bit
-define i32 @c4_i32_bad(i32 %arg) {
+define i32 @c4_i32_bad(i32 %arg) nounwind {
 ; X86-LABEL: c4_i32_bad:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -6590,7 +7980,7 @@
 ; i64
 
 ; The most canonical variant
-define i64 @c0_i64(i64 %arg) {
+define i64 @c0_i64(i64 %arg) nounwind {
 ; X86-NOBMI-LABEL: c0_i64:
 ; X86-NOBMI:       # %bb.0:
 ; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -6648,7 +8038,7 @@
 }
 
 ; Should be still fine, but the mask is shifted
-define i64 @c1_i64(i64 %arg) {
+define i64 @c1_i64(i64 %arg) nounwind {
 ; X86-LABEL: c1_i64:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -6669,7 +8059,7 @@
 }
 
 ; Should be still fine, but the result is shifted left afterwards
-define i64 @c2_i64(i64 %arg) {
+define i64 @c2_i64(i64 %arg) nounwind {
 ; X86-LABEL: c2_i64:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -6691,7 +8081,7 @@
 }
 
 ; The mask covers newly shifted-in bit
-define i64 @c4_i64_bad(i64 %arg) {
+define i64 @c4_i64_bad(i64 %arg) nounwind {
 ; X86-LABEL: c4_i64_bad:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -6718,7 +8108,7 @@
 ; i32
 
 ; The most canonical variant
-define void @c5_i32(i32 %arg, i32* %ptr) {
+define void @c5_i32(i32 %arg, i32* %ptr) nounwind {
 ; X86-NOBMI-LABEL: c5_i32:
 ; X86-NOBMI:       # %bb.0:
 ; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -6784,7 +8174,7 @@
 }
 
 ; Should be still fine, but the mask is shifted
-define void @c6_i32(i32 %arg, i32* %ptr) {
+define void @c6_i32(i32 %arg, i32* %ptr) nounwind {
 ; X86-NOBMI-LABEL: c6_i32:
 ; X86-NOBMI:       # %bb.0:
 ; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -6850,7 +8240,7 @@
 }
 
 ; Should be still fine, but the result is shifted left afterwards
-define void @c7_i32(i32 %arg, i32* %ptr) {
+define void @c7_i32(i32 %arg, i32* %ptr) nounwind {
 ; X86-LABEL: c7_i32:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -6876,7 +8266,7 @@
 ; i64
 
 ; The most canonical variant
-define void @c5_i64(i64 %arg, i64* %ptr) {
+define void @c5_i64(i64 %arg, i64* %ptr) nounwind {
 ; X86-NOBMI-LABEL: c5_i64:
 ; X86-NOBMI:       # %bb.0:
 ; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -6946,7 +8336,7 @@
 }
 
 ; Should be still fine, but the mask is shifted
-define void @c6_i64(i64 %arg, i64* %ptr) {
+define void @c6_i64(i64 %arg, i64* %ptr) nounwind {
 ; X86-NOBMI-LABEL: c6_i64:
 ; X86-NOBMI:       # %bb.0:
 ; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -7016,7 +8406,7 @@
 }
 
 ; Should be still fine, but the result is shifted left afterwards
-define void @c7_i64(i64 %arg, i64* %ptr) {
+define void @c7_i64(i64 %arg, i64* %ptr) nounwind {
 ; X86-LABEL: c7_i64:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
diff --git a/test/CodeGen/X86/extract-fp.ll b/test/CodeGen/X86/extract-fp.ll
index f72764c..06ba30b 100644
--- a/test/CodeGen/X86/extract-fp.ll
+++ b/test/CodeGen/X86/extract-fp.ll
@@ -4,8 +4,8 @@
 define float @ext_fadd_v4f32(<4 x float> %x) {
 ; CHECK-LABEL: ext_fadd_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; CHECK-NEXT:    addss {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %bo = fadd <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 42.0>
   %ext = extractelement <4 x float> %bo, i32 2
@@ -15,9 +15,9 @@
 define float @ext_fsub_v4f32(<4 x float> %x) {
 ; CHECK-LABEL: ext_fsub_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    movaps {{.*#+}} xmm1 = <u,2.0E+0,u,u>
-; CHECK-NEXT:    subps %xmm0, %xmm1
-; CHECK-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    subss %xmm0, %xmm1
 ; CHECK-NEXT:    movaps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %bo = fsub <4 x float> <float 1.0, float 2.0, float 3.0, float 42.0>, %x
@@ -28,19 +28,20 @@
 define float @ext_fmul_v4f32(<4 x float> %x) {
 ; CHECK-LABEL: ext_fmul_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    mulps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; CHECK-NEXT:    mulss {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %bo = fmul <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 42.0>
   %ext = extractelement <4 x float> %bo, i32 3
   ret float %ext
 }
 
+; TODO: X / 1.0 --> X
+
 define float @ext_fdiv_v4f32(<4 x float> %x) {
 ; CHECK-LABEL: ext_fdiv_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT:    divps %xmm1, %xmm0
+; CHECK-NEXT:    divss {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %bo = fdiv <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 42.0>
   %ext = extractelement <4 x float> %bo, i32 0
@@ -50,9 +51,9 @@
 define float @ext_fdiv_v4f32_constant_op0(<4 x float> %x) {
 ; CHECK-LABEL: ext_fdiv_v4f32_constant_op0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    movaps {{.*#+}} xmm1 = <u,2.0E+0,u,u>
-; CHECK-NEXT:    divps %xmm0, %xmm1
-; CHECK-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    divss %xmm0, %xmm1
 ; CHECK-NEXT:    movaps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %bo = fdiv <4 x float> <float 1.0, float 2.0, float 3.0, float 42.0>, %x
diff --git a/test/CodeGen/X86/fold-load.ll b/test/CodeGen/X86/fold-load.ll
index 5ae46e2..263ad72 100644
--- a/test/CodeGen/X86/fold-load.ll
+++ b/test/CodeGen/X86/fold-load.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mcpu=generic -mtriple=i686-- | FileCheck %s
 	%struct._obstack_chunk = type { i8*, %struct._obstack_chunk*, [4 x i8] }
 	%struct.obstack = type { i32, %struct._obstack_chunk*, i8*, i8*, i8*, i32, i32, %struct._obstack_chunk* (...)*, void (...)*, i8*, i8 }
@@ -5,6 +6,17 @@
 
 ; This should just not crash.
 define void @test1() nounwind {
+; CHECK-LABEL: test1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    testb $1, stmt_obstack+40
+; CHECK-NEXT:    jne .LBB0_1
+; CHECK-NEXT:  # %bb.2: # %cond_false30.i
+; CHECK-NEXT:    pushl $0
+; CHECK-NEXT:    calll 0
+; CHECK-NEXT:    addl $4, %esp
+; CHECK-NEXT:    retl
+; CHECK-NEXT:  .LBB0_1: # %cond_true23.i
+; CHECK-NEXT:    retl
 entry:
 	br i1 true, label %cond_true, label %cond_next
 
@@ -30,6 +42,14 @@
 
 
 define i32 @test2(i16* %P, i16* %Q) nounwind {
+; CHECK-LABEL: test2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    movzwl (%eax), %edx
+; CHECK-NEXT:    movzbl %dl, %eax
+; CHECK-NEXT:    movw %dx, (%ecx)
+; CHECK-NEXT:    retl
   %A = load i16, i16* %P, align 4                      ; <i16> [#uses=11]
   %C = zext i16 %A to i32                         ; <i32> [#uses=1]
   %D = and i32 %C, 255                            ; <i32> [#uses=1]
@@ -39,9 +59,6 @@
   store i16 %A, i16* %Q
   ret i32 %D
 
-; CHECK-LABEL: test2:
-; CHECK: 	movl	4(%esp), %eax
-; CHECK-NEXT:	movzwl	(%eax), %e{{..}}
 
 }
 
@@ -49,10 +66,22 @@
 ; xor in exit block will be CSE'ed and load will be folded to xor in entry.
 define i1 @test3(i32* %P, i32* %Q) nounwind {
 ; CHECK-LABEL: test3:
-; CHECK: movl 8(%esp), %e
-; CHECK: movl 4(%esp), %e
-; CHECK: xorl (%e
-; CHECK: j
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT:    movl (%eax), %eax
+; CHECK-NEXT:    xorl (%ecx), %eax
+; CHECK-NEXT:    testl $89947, %eax # imm = 0x15F5B
+; CHECK-NEXT:    je .LBB2_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
+; CHECK-NEXT:    retl
+; CHECK-NEXT:  .LBB2_2: # %exit
+; CHECK-NEXT:    testl $-838178173, %eax # imm = 0xCE0A6A83
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
+; CHECK-NEXT:    retl
 entry:
   %0 = load i32, i32* %P, align 4
   %1 = load i32, i32* %Q, align 4
diff --git a/test/CodeGen/X86/fold-vector-sext-zext.ll b/test/CodeGen/X86/fold-vector-sext-zext.ll
index a8e78cc..465c7ce 100644
--- a/test/CodeGen/X86/fold-vector-sext-zext.ll
+++ b/test/CodeGen/X86/fold-vector-sext-zext.ll
@@ -261,12 +261,12 @@
 define <4 x i16> @test_zext_4i8_4i16_undef() {
 ; X32-LABEL: test_zext_4i8_4i16_undef:
 ; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = <u,255,u,253>
+; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,0,253]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i16_undef:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} xmm0 = <u,255,u,253>
+; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,0,253]
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 undef, i32 0
   %2 = insertelement <4 x i8> %1, i8 -1, i32 1
@@ -279,12 +279,12 @@
 define <4 x i32> @test_zext_4i8_4i32_undef() {
 ; X32-LABEL: test_zext_4i8_4i32_undef:
 ; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = <0,u,2,u>
+; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,2,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i32_undef:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} xmm0 = <0,u,2,u>
+; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,2,0]
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 0, i32 0
   %2 = insertelement <4 x i8> %1, i8 undef, i32 1
@@ -297,12 +297,12 @@
 define <4 x i64> @test_zext_4i8_4i64_undef() {
 ; X32-LABEL: test_zext_4i8_4i64_undef:
 ; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = <u,u,255,0,2,0,u,u>
+; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,255,0,2,0,0,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i64_undef:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} ymm0 = <u,255,2,u>
+; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [0,255,2,0]
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 undef, i32 0
   %2 = insertelement <4 x i8> %1, i8 -1, i32 1
@@ -359,12 +359,12 @@
 define <8 x i16> @test_zext_8i8_8i16_undef() {
 ; X32-LABEL: test_zext_8i8_8i16_undef:
 ; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = <u,255,u,253,u,251,u,249>
+; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,0,253,0,251,0,249]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_8i8_8i16_undef:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} xmm0 = <u,255,u,253,u,251,u,249>
+; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,0,253,0,251,0,249]
 ; X64-NEXT:    retq
   %1 = insertelement <8 x i8> undef, i8 undef, i32 0
   %2 = insertelement <8 x i8> %1, i8 -1, i32 1
@@ -381,12 +381,12 @@
 define <8 x i32> @test_zext_8i8_8i32_undef() {
 ; X32-LABEL: test_zext_8i8_8i32_undef:
 ; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = <0,u,2,253,4,u,6,u>
+; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,2,253,4,0,6,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_8i8_8i32_undef:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} ymm0 = <0,u,2,253,4,u,6,u>
+; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,2,253,4,0,6,0]
 ; X64-NEXT:    retq
   %1 = insertelement <8 x i8> undef, i8 0, i32 0
   %2 = insertelement <8 x i8> %1, i8 undef, i32 1
diff --git a/test/CodeGen/X86/fp-elim.ll b/test/CodeGen/X86/fp-elim.ll
index 625c16e..65764d1 100644
--- a/test/CodeGen/X86/fp-elim.ll
+++ b/test/CodeGen/X86/fp-elim.ll
@@ -1,5 +1,5 @@
 ; RUN: llc < %s -mtriple=i686-- -asm-verbose=false                           | FileCheck %s -check-prefix=FP-ELIM
-; RUN: llc < %s -mtriple=i686-- -asm-verbose=false -disable-fp-elim          | FileCheck %s -check-prefix=NO-ELIM
+; RUN: llc < %s -mtriple=i686-- -asm-verbose=false -frame-pointer=all          | FileCheck %s -check-prefix=NO-ELIM
 
 ; Implement -momit-leaf-frame-pointer
 ; rdar://7886181
diff --git a/test/CodeGen/X86/fp128-cast.ll b/test/CodeGen/X86/fp128-cast.ll
index 0e76da7..71b9c3f 100644
--- a/test/CodeGen/X86/fp128-cast.ll
+++ b/test/CodeGen/X86/fp128-cast.ll
@@ -845,7 +845,6 @@
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    callq __trunctfdf2
 ; X64-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X64-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0,0]
 ; X64-NEXT:    andps {{.*}}(%rip), %xmm0
 ; X64-NEXT:    orps %xmm1, %xmm0
 ; X64-NEXT:    callq __extenddftf2
@@ -867,7 +866,6 @@
 ; X64_NO_MMX-NEXT:    callq __trunctfdf2
 ; X64_NO_MMX-NEXT:    andps {{.*}}(%rip), %xmm0
 ; X64_NO_MMX-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X64_NO_MMX-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0,0]
 ; X64_NO_MMX-NEXT:    orps %xmm1, %xmm0
 ; X64_NO_MMX-NEXT:    callq __extenddftf2
 ; X64_NO_MMX-NEXT:    addq $8, %rsp
diff --git a/test/CodeGen/X86/fpstack-debuginstr-kill.ll b/test/CodeGen/X86/fpstack-debuginstr-kill.ll
index 93d2dfd..3bf363b 100644
--- a/test/CodeGen/X86/fpstack-debuginstr-kill.ll
+++ b/test/CodeGen/X86/fpstack-debuginstr-kill.ll
@@ -59,7 +59,7 @@
 !7 = !DIGlobalVariableExpression(var: !8, expr: !DIExpression())
 !8 = !DIGlobalVariable(name: "g2", scope: null, file: !2, line: 6, type: !9, isLocal: false, isDefinition: true)
 !9 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
-!10 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !11, producer: "clang version 3.6.0 (http://llvm.org/git/clang 8444ae7cfeaefae031f8fedf0d1435ca3b14d90b) (http://llvm.org/git/llvm 886f0101a7d176543b831f5efb74c03427244a55)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !12, retainedTypes: !12, globals: !13, imports: !12)
+!10 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !11, producer: "clang version 3.6.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !12, retainedTypes: !12, globals: !13, imports: !12)
 !11 = !DIFile(filename: "fpu_ieee.cpp", directory: "x87stackifier")
 !12 = !{}
 !13 = !{!0, !7}
diff --git a/test/CodeGen/X86/fshl.ll b/test/CodeGen/X86/fshl.ll
index b161763..ccf451e 100644
--- a/test/CodeGen/X86/fshl.ll
+++ b/test/CodeGen/X86/fshl.ll
@@ -178,6 +178,26 @@
   ret i32 %tmp
 }
 
+define i32 @var_shift_i32_optsize(i32 %x, i32 %y, i32 %z) nounwind optsize {
+; X86-LABEL: var_shift_i32_optsize:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shldl %cl, %edx, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: var_shift_i32_optsize:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edx, %ecx
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shldl %cl, %esi, %eax
+; X64-NEXT:    retq
+  %tmp = tail call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %z)
+  ret i32 %tmp
+}
+
 define i64 @var_shift_i64(i64 %x, i64 %y, i64 %z) nounwind {
 ; X86-FAST-LABEL: var_shift_i64:
 ; X86-FAST:       # %bb.0:
@@ -198,36 +218,36 @@
 ; X86-FAST-NEXT:    shll %cl, %edi
 ; X86-FAST-NEXT:    shldl %cl, %eax, %ebp
 ; X86-FAST-NEXT:    testb $32, %bl
-; X86-FAST-NEXT:    je .LBB3_2
+; X86-FAST-NEXT:    je .LBB4_2
 ; X86-FAST-NEXT:  # %bb.1:
 ; X86-FAST-NEXT:    movl %edi, %ebp
 ; X86-FAST-NEXT:    xorl %edi, %edi
-; X86-FAST-NEXT:  .LBB3_2:
+; X86-FAST-NEXT:  .LBB4_2:
 ; X86-FAST-NEXT:    movb $64, %cl
 ; X86-FAST-NEXT:    subb %bl, %cl
 ; X86-FAST-NEXT:    movl %edx, %esi
 ; X86-FAST-NEXT:    shrl %cl, %esi
 ; X86-FAST-NEXT:    shrdl %cl, %edx, (%esp) # 4-byte Folded Spill
 ; X86-FAST-NEXT:    testb $32, %cl
-; X86-FAST-NEXT:    jne .LBB3_3
+; X86-FAST-NEXT:    jne .LBB4_3
 ; X86-FAST-NEXT:  # %bb.4:
 ; X86-FAST-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-FAST-NEXT:    movl (%esp), %ecx # 4-byte Reload
 ; X86-FAST-NEXT:    testl %ebx, %ebx
-; X86-FAST-NEXT:    jne .LBB3_6
-; X86-FAST-NEXT:    jmp .LBB3_7
-; X86-FAST-NEXT:  .LBB3_3:
+; X86-FAST-NEXT:    jne .LBB4_6
+; X86-FAST-NEXT:    jmp .LBB4_7
+; X86-FAST-NEXT:  .LBB4_3:
 ; X86-FAST-NEXT:    movl %esi, %ecx
 ; X86-FAST-NEXT:    xorl %esi, %esi
 ; X86-FAST-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-FAST-NEXT:    testl %ebx, %ebx
-; X86-FAST-NEXT:    je .LBB3_7
-; X86-FAST-NEXT:  .LBB3_6:
+; X86-FAST-NEXT:    je .LBB4_7
+; X86-FAST-NEXT:  .LBB4_6:
 ; X86-FAST-NEXT:    orl %esi, %ebp
 ; X86-FAST-NEXT:    orl %ecx, %edi
 ; X86-FAST-NEXT:    movl %edi, %eax
 ; X86-FAST-NEXT:    movl %ebp, %edx
-; X86-FAST-NEXT:  .LBB3_7:
+; X86-FAST-NEXT:  .LBB4_7:
 ; X86-FAST-NEXT:    addl $4, %esp
 ; X86-FAST-NEXT:    popl %esi
 ; X86-FAST-NEXT:    popl %edi
@@ -261,11 +281,11 @@
 ; X86-SLOW-NEXT:    testb %dl, %dl
 ; X86-SLOW-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SLOW-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SLOW-NEXT:    je .LBB3_2
+; X86-SLOW-NEXT:    je .LBB4_2
 ; X86-SLOW-NEXT:  # %bb.1:
 ; X86-SLOW-NEXT:    orl %eax, %ebp
 ; X86-SLOW-NEXT:    movl %ebp, (%esp) # 4-byte Spill
-; X86-SLOW-NEXT:  .LBB3_2:
+; X86-SLOW-NEXT:  .LBB4_2:
 ; X86-SLOW-NEXT:    movl {{[0-9]+}}(%esp), %ebp
 ; X86-SLOW-NEXT:    movl %ebp, %eax
 ; X86-SLOW-NEXT:    movl %ebx, %ecx
@@ -276,41 +296,41 @@
 ; X86-SLOW-NEXT:    negb %cl
 ; X86-SLOW-NEXT:    shrl %cl, %edi
 ; X86-SLOW-NEXT:    testb %ch, %ch
-; X86-SLOW-NEXT:    je .LBB3_4
+; X86-SLOW-NEXT:    je .LBB4_4
 ; X86-SLOW-NEXT:  # %bb.3:
 ; X86-SLOW-NEXT:    orl %edi, %eax
 ; X86-SLOW-NEXT:    movl %eax, %ebp
-; X86-SLOW-NEXT:  .LBB3_4:
+; X86-SLOW-NEXT:  .LBB4_4:
 ; X86-SLOW-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SLOW-NEXT:    movl %eax, %edi
 ; X86-SLOW-NEXT:    movl %ebx, %ecx
 ; X86-SLOW-NEXT:    shll %cl, %edi
 ; X86-SLOW-NEXT:    testb $32, %bl
-; X86-SLOW-NEXT:    je .LBB3_6
+; X86-SLOW-NEXT:    je .LBB4_6
 ; X86-SLOW-NEXT:  # %bb.5:
 ; X86-SLOW-NEXT:    movl %edi, %ebp
 ; X86-SLOW-NEXT:    xorl %edi, %edi
-; X86-SLOW-NEXT:  .LBB3_6:
+; X86-SLOW-NEXT:  .LBB4_6:
 ; X86-SLOW-NEXT:    movb %dh, %cl
 ; X86-SLOW-NEXT:    shrl %cl, %esi
 ; X86-SLOW-NEXT:    testb $32, %dh
-; X86-SLOW-NEXT:    jne .LBB3_7
+; X86-SLOW-NEXT:    jne .LBB4_7
 ; X86-SLOW-NEXT:  # %bb.8:
 ; X86-SLOW-NEXT:    movl (%esp), %ecx # 4-byte Reload
 ; X86-SLOW-NEXT:    testl %ebx, %ebx
-; X86-SLOW-NEXT:    jne .LBB3_10
-; X86-SLOW-NEXT:    jmp .LBB3_11
-; X86-SLOW-NEXT:  .LBB3_7:
+; X86-SLOW-NEXT:    jne .LBB4_10
+; X86-SLOW-NEXT:    jmp .LBB4_11
+; X86-SLOW-NEXT:  .LBB4_7:
 ; X86-SLOW-NEXT:    movl %esi, %ecx
 ; X86-SLOW-NEXT:    xorl %esi, %esi
 ; X86-SLOW-NEXT:    testl %ebx, %ebx
-; X86-SLOW-NEXT:    je .LBB3_11
-; X86-SLOW-NEXT:  .LBB3_10:
+; X86-SLOW-NEXT:    je .LBB4_11
+; X86-SLOW-NEXT:  .LBB4_10:
 ; X86-SLOW-NEXT:    orl %esi, %ebp
 ; X86-SLOW-NEXT:    orl %ecx, %edi
 ; X86-SLOW-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-SLOW-NEXT:    movl %edi, %eax
-; X86-SLOW-NEXT:  .LBB3_11:
+; X86-SLOW-NEXT:  .LBB4_11:
 ; X86-SLOW-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
 ; X86-SLOW-NEXT:    addl $8, %esp
 ; X86-SLOW-NEXT:    popl %esi
diff --git a/test/CodeGen/X86/fshr.ll b/test/CodeGen/X86/fshr.ll
index 5bd31dd..09d63b6 100644
--- a/test/CodeGen/X86/fshr.ll
+++ b/test/CodeGen/X86/fshr.ll
@@ -177,6 +177,26 @@
   ret i32 %tmp
 }
 
+define i32 @var_shift_i32_optsize(i32 %x, i32 %y, i32 %z) nounwind optsize {
+; X86-LABEL: var_shift_i32_optsize:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shrdl %cl, %edx, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: var_shift_i32_optsize:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edx, %ecx
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrdl %cl, %edi, %eax
+; X64-NEXT:    retq
+  %tmp = tail call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 %z)
+  ret i32 %tmp
+}
+
 define i64 @var_shift_i64(i64 %x, i64 %y, i64 %z) nounwind {
 ; X86-FAST-LABEL: var_shift_i64:
 ; X86-FAST:       # %bb.0:
@@ -198,30 +218,30 @@
 ; X86-FAST-NEXT:    shll %cl, %edi
 ; X86-FAST-NEXT:    shldl %cl, %eax, %esi
 ; X86-FAST-NEXT:    testb $32, %cl
-; X86-FAST-NEXT:    je .LBB3_2
+; X86-FAST-NEXT:    je .LBB4_2
 ; X86-FAST-NEXT:  # %bb.1:
 ; X86-FAST-NEXT:    movl %edi, %esi
 ; X86-FAST-NEXT:    xorl %edi, %edi
-; X86-FAST-NEXT:  .LBB3_2:
+; X86-FAST-NEXT:  .LBB4_2:
 ; X86-FAST-NEXT:    movl %edx, %ebp
 ; X86-FAST-NEXT:    movl %ebx, %ecx
 ; X86-FAST-NEXT:    shrl %cl, %ebp
 ; X86-FAST-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-FAST-NEXT:    shrdl %cl, %edx, %eax
 ; X86-FAST-NEXT:    testb $32, %bl
-; X86-FAST-NEXT:    je .LBB3_4
+; X86-FAST-NEXT:    je .LBB4_4
 ; X86-FAST-NEXT:  # %bb.3:
 ; X86-FAST-NEXT:    movl %ebp, %eax
 ; X86-FAST-NEXT:    xorl %ebp, %ebp
-; X86-FAST-NEXT:  .LBB3_4:
+; X86-FAST-NEXT:  .LBB4_4:
 ; X86-FAST-NEXT:    testl %ebx, %ebx
-; X86-FAST-NEXT:    je .LBB3_6
+; X86-FAST-NEXT:    je .LBB4_6
 ; X86-FAST-NEXT:  # %bb.5:
 ; X86-FAST-NEXT:    orl %ebp, %esi
 ; X86-FAST-NEXT:    orl %eax, %edi
 ; X86-FAST-NEXT:    movl %edi, (%esp) # 4-byte Spill
 ; X86-FAST-NEXT:    movl %esi, %edx
-; X86-FAST-NEXT:  .LBB3_6:
+; X86-FAST-NEXT:  .LBB4_6:
 ; X86-FAST-NEXT:    movl (%esp), %eax # 4-byte Reload
 ; X86-FAST-NEXT:    addl $4, %esp
 ; X86-FAST-NEXT:    popl %esi
@@ -256,11 +276,11 @@
 ; X86-SLOW-NEXT:    shrl %cl, %edi
 ; X86-SLOW-NEXT:    testb %ch, %ch
 ; X86-SLOW-NEXT:    movl {{[0-9]+}}(%esp), %ebp
-; X86-SLOW-NEXT:    je .LBB3_2
+; X86-SLOW-NEXT:    je .LBB4_2
 ; X86-SLOW-NEXT:  # %bb.1:
 ; X86-SLOW-NEXT:    orl %edi, %edx
 ; X86-SLOW-NEXT:    movl %edx, (%esp) # 4-byte Spill
-; X86-SLOW-NEXT:  .LBB3_2:
+; X86-SLOW-NEXT:  .LBB4_2:
 ; X86-SLOW-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SLOW-NEXT:    movl %ecx, %edx
 ; X86-SLOW-NEXT:    movl %ebx, %ecx
@@ -273,41 +293,41 @@
 ; X86-SLOW-NEXT:    shll %cl, %edi
 ; X86-SLOW-NEXT:    testb %ah, %ah
 ; X86-SLOW-NEXT:    movl {{[0-9]+}}(%esp), %ebp
-; X86-SLOW-NEXT:    je .LBB3_4
+; X86-SLOW-NEXT:    je .LBB4_4
 ; X86-SLOW-NEXT:  # %bb.3:
 ; X86-SLOW-NEXT:    orl %edx, %edi
 ; X86-SLOW-NEXT:    movl %edi, %ebp
-; X86-SLOW-NEXT:  .LBB3_4:
+; X86-SLOW-NEXT:  .LBB4_4:
 ; X86-SLOW-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; X86-SLOW-NEXT:    movl %ebx, %ecx
 ; X86-SLOW-NEXT:    shrl %cl, %edi
 ; X86-SLOW-NEXT:    testb $32, %bl
-; X86-SLOW-NEXT:    je .LBB3_6
+; X86-SLOW-NEXT:    je .LBB4_6
 ; X86-SLOW-NEXT:  # %bb.5:
 ; X86-SLOW-NEXT:    movl %edi, %ebp
 ; X86-SLOW-NEXT:    xorl %edi, %edi
-; X86-SLOW-NEXT:  .LBB3_6:
+; X86-SLOW-NEXT:  .LBB4_6:
 ; X86-SLOW-NEXT:    movl %eax, %ecx
 ; X86-SLOW-NEXT:    shll %cl, %esi
 ; X86-SLOW-NEXT:    testb $32, %al
 ; X86-SLOW-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-SLOW-NEXT:    jne .LBB3_7
+; X86-SLOW-NEXT:    jne .LBB4_7
 ; X86-SLOW-NEXT:  # %bb.8:
 ; X86-SLOW-NEXT:    movl (%esp), %eax # 4-byte Reload
 ; X86-SLOW-NEXT:    testl %ebx, %ebx
-; X86-SLOW-NEXT:    jne .LBB3_10
-; X86-SLOW-NEXT:    jmp .LBB3_11
-; X86-SLOW-NEXT:  .LBB3_7:
+; X86-SLOW-NEXT:    jne .LBB4_10
+; X86-SLOW-NEXT:    jmp .LBB4_11
+; X86-SLOW-NEXT:  .LBB4_7:
 ; X86-SLOW-NEXT:    movl %esi, %eax
 ; X86-SLOW-NEXT:    xorl %esi, %esi
 ; X86-SLOW-NEXT:    testl %ebx, %ebx
-; X86-SLOW-NEXT:    je .LBB3_11
-; X86-SLOW-NEXT:  .LBB3_10:
+; X86-SLOW-NEXT:    je .LBB4_11
+; X86-SLOW-NEXT:  .LBB4_10:
 ; X86-SLOW-NEXT:    orl %ebp, %esi
 ; X86-SLOW-NEXT:    orl %edi, %eax
 ; X86-SLOW-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-SLOW-NEXT:    movl %eax, %edx
-; X86-SLOW-NEXT:  .LBB3_11:
+; X86-SLOW-NEXT:  .LBB4_11:
 ; X86-SLOW-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-SLOW-NEXT:    addl $8, %esp
 ; X86-SLOW-NEXT:    popl %esi
diff --git a/test/CodeGen/X86/funnel-shift-rot.ll b/test/CodeGen/X86/funnel-shift-rot.ll
index e07d34f..fa16799 100644
--- a/test/CodeGen/X86/funnel-shift-rot.ll
+++ b/test/CodeGen/X86/funnel-shift-rot.ll
@@ -96,51 +96,32 @@
 define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) nounwind {
 ; X32-SSE2-LABEL: rotl_v4i32:
 ; X32-SSE2:       # %bb.0:
-; X32-SSE2-NEXT:    pxor %xmm3, %xmm3
-; X32-SSE2-NEXT:    psubd %xmm1, %xmm3
-; X32-SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [31,31,31,31]
-; X32-SSE2-NEXT:    pand %xmm4, %xmm3
-; X32-SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm3[2,3,3,3,4,5,6,7]
-; X32-SSE2-NEXT:    movdqa %xmm0, %xmm5
-; X32-SSE2-NEXT:    psrld %xmm2, %xmm5
-; X32-SSE2-NEXT:    pshuflw {{.*#+}} xmm6 = xmm3[0,1,1,1,4,5,6,7]
-; X32-SSE2-NEXT:    movdqa %xmm0, %xmm2
-; X32-SSE2-NEXT:    psrld %xmm6, %xmm2
-; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
-; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
-; X32-SSE2-NEXT:    pshuflw {{.*#+}} xmm5 = xmm3[2,3,3,3,4,5,6,7]
-; X32-SSE2-NEXT:    movdqa %xmm0, %xmm6
-; X32-SSE2-NEXT:    psrld %xmm5, %xmm6
-; X32-SSE2-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,1,4,5,6,7]
-; X32-SSE2-NEXT:    movdqa %xmm0, %xmm5
-; X32-SSE2-NEXT:    psrld %xmm3, %xmm5
-; X32-SSE2-NEXT:    punpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1]
-; X32-SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,3],xmm5[0,3]
-; X32-SSE2-NEXT:    pand %xmm4, %xmm1
+; X32-SSE2-NEXT:    pand {{\.LCPI.*}}, %xmm1
 ; X32-SSE2-NEXT:    pslld $23, %xmm1
 ; X32-SSE2-NEXT:    paddd {{\.LCPI.*}}, %xmm1
 ; X32-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
-; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
 ; X32-SSE2-NEXT:    pmuludq %xmm1, %xmm0
-; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
 ; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X32-SSE2-NEXT:    pmuludq %xmm3, %xmm1
+; X32-SSE2-NEXT:    pmuludq %xmm2, %xmm1
+; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE2-NEXT:    orps %xmm0, %xmm2
-; X32-SSE2-NEXT:    movaps %xmm2, %xmm0
+; X32-SSE2-NEXT:    por %xmm3, %xmm0
 ; X32-SSE2-NEXT:    retl
 ;
 ; X64-AVX2-LABEL: rotl_v4i32:
 ; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
-; X64-AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm3
-; X64-AVX2-NEXT:    vpsllvd %xmm3, %xmm0, %xmm3
-; X64-AVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
-; X64-AVX2-NEXT:    vpsubd %xmm1, %xmm4, %xmm1
 ; X64-AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; X64-AVX2-NEXT:    vpsllvd %xmm1, %xmm0, %xmm2
+; X64-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
+; X64-AVX2-NEXT:    vpsubd %xmm1, %xmm3, %xmm1
 ; X64-AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; X64-AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; X64-AVX2-NEXT:    retq
   %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
   ret <4 x i32> %f
@@ -273,52 +254,36 @@
 define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) nounwind {
 ; X32-SSE2-LABEL: rotr_v4i32:
 ; X32-SSE2:       # %bb.0:
-; X32-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [31,31,31,31]
-; X32-SSE2-NEXT:    pxor %xmm3, %xmm3
-; X32-SSE2-NEXT:    psubd %xmm1, %xmm3
-; X32-SSE2-NEXT:    movdqa %xmm1, %xmm4
-; X32-SSE2-NEXT:    pand %xmm2, %xmm4
-; X32-SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm4[2,3,3,3,4,5,6,7]
-; X32-SSE2-NEXT:    movdqa %xmm0, %xmm5
-; X32-SSE2-NEXT:    psrld %xmm1, %xmm5
-; X32-SSE2-NEXT:    pshuflw {{.*#+}} xmm6 = xmm4[0,1,1,1,4,5,6,7]
-; X32-SSE2-NEXT:    movdqa %xmm0, %xmm1
-; X32-SSE2-NEXT:    psrld %xmm6, %xmm1
-; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
-; X32-SSE2-NEXT:    pshuflw {{.*#+}} xmm5 = xmm4[2,3,3,3,4,5,6,7]
-; X32-SSE2-NEXT:    movdqa %xmm0, %xmm6
-; X32-SSE2-NEXT:    psrld %xmm5, %xmm6
-; X32-SSE2-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,1,4,5,6,7]
-; X32-SSE2-NEXT:    movdqa %xmm0, %xmm5
-; X32-SSE2-NEXT:    psrld %xmm4, %xmm5
-; X32-SSE2-NEXT:    punpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1]
-; X32-SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,3],xmm5[0,3]
-; X32-SSE2-NEXT:    pand %xmm2, %xmm3
-; X32-SSE2-NEXT:    pslld $23, %xmm3
-; X32-SSE2-NEXT:    paddd {{\.LCPI.*}}, %xmm3
-; X32-SSE2-NEXT:    cvttps2dq %xmm3, %xmm2
-; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; X32-SSE2-NEXT:    pmuludq %xmm2, %xmm0
+; X32-SSE2-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE2-NEXT:    psubd %xmm1, %xmm2
+; X32-SSE2-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE2-NEXT:    pslld $23, %xmm2
+; X32-SSE2-NEXT:    paddd {{\.LCPI.*}}, %xmm2
+; X32-SSE2-NEXT:    cvttps2dq %xmm2, %xmm1
+; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X32-SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X32-SSE2-NEXT:    pmuludq %xmm2, %xmm1
+; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
 ; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; X32-SSE2-NEXT:    pmuludq %xmm3, %xmm2
-; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-SSE2-NEXT:    orps %xmm0, %xmm1
-; X32-SSE2-NEXT:    movaps %xmm1, %xmm0
+; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-SSE2-NEXT:    por %xmm3, %xmm0
 ; X32-SSE2-NEXT:    retl
 ;
 ; X64-AVX2-LABEL: rotr_v4i32:
 ; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; X64-AVX2-NEXT:    vpsubd %xmm1, %xmm2, %xmm1
 ; X64-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
-; X64-AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm3
-; X64-AVX2-NEXT:    vpsrlvd %xmm3, %xmm0, %xmm3
-; X64-AVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
-; X64-AVX2-NEXT:    vpsubd %xmm1, %xmm4, %xmm1
 ; X64-AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
-; X64-AVX2-NEXT:    vpsllvd %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpsllvd %xmm1, %xmm0, %xmm2
+; X64-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
+; X64-AVX2-NEXT:    vpsubd %xmm1, %xmm3, %xmm1
+; X64-AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; X64-AVX2-NEXT:    retq
   %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
   ret <4 x i32> %f
diff --git a/test/CodeGen/X86/haddsub-2.ll b/test/CodeGen/X86/haddsub-2.ll
index a75fe12..c4d470a 100644
--- a/test/CodeGen/X86/haddsub-2.ll
+++ b/test/CodeGen/X86/haddsub-2.ll
@@ -620,27 +620,15 @@
   ret <8 x i32> %vecinit29
 }
 
-define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) {
+define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) nounwind {
 ; SSE3-LABEL: avx2_vphadd_w_test:
 ; SSE3:       # %bb.0:
 ; SSE3-NEXT:    pushq %rbp
-; SSE3-NEXT:    .cfi_def_cfa_offset 16
 ; SSE3-NEXT:    pushq %r15
-; SSE3-NEXT:    .cfi_def_cfa_offset 24
 ; SSE3-NEXT:    pushq %r14
-; SSE3-NEXT:    .cfi_def_cfa_offset 32
 ; SSE3-NEXT:    pushq %r13
-; SSE3-NEXT:    .cfi_def_cfa_offset 40
 ; SSE3-NEXT:    pushq %r12
-; SSE3-NEXT:    .cfi_def_cfa_offset 48
 ; SSE3-NEXT:    pushq %rbx
-; SSE3-NEXT:    .cfi_def_cfa_offset 56
-; SSE3-NEXT:    .cfi_offset %rbx, -56
-; SSE3-NEXT:    .cfi_offset %r12, -48
-; SSE3-NEXT:    .cfi_offset %r13, -40
-; SSE3-NEXT:    .cfi_offset %r14, -32
-; SSE3-NEXT:    .cfi_offset %r15, -24
-; SSE3-NEXT:    .cfi_offset %rbp, -16
 ; SSE3-NEXT:    movd %xmm0, %eax
 ; SSE3-NEXT:    pextrw $1, %xmm0, %ecx
 ; SSE3-NEXT:    addl %eax, %ecx
@@ -724,17 +712,11 @@
 ; SSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
 ; SSE3-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
 ; SSE3-NEXT:    popq %rbx
-; SSE3-NEXT:    .cfi_def_cfa_offset 48
 ; SSE3-NEXT:    popq %r12
-; SSE3-NEXT:    .cfi_def_cfa_offset 40
 ; SSE3-NEXT:    popq %r13
-; SSE3-NEXT:    .cfi_def_cfa_offset 32
 ; SSE3-NEXT:    popq %r14
-; SSE3-NEXT:    .cfi_def_cfa_offset 24
 ; SSE3-NEXT:    popq %r15
-; SSE3-NEXT:    .cfi_def_cfa_offset 16
 ; SSE3-NEXT:    popq %rbp
-; SSE3-NEXT:    .cfi_def_cfa_offset 8
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: avx2_vphadd_w_test:
@@ -1253,27 +1235,15 @@
   ret <8 x i32> %vecinit29
 }
 
-define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) {
+define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) nounwind {
 ; SSE3-LABEL: avx2_hadd_w:
 ; SSE3:       # %bb.0:
 ; SSE3-NEXT:    pushq %rbp
-; SSE3-NEXT:    .cfi_def_cfa_offset 16
 ; SSE3-NEXT:    pushq %r15
-; SSE3-NEXT:    .cfi_def_cfa_offset 24
 ; SSE3-NEXT:    pushq %r14
-; SSE3-NEXT:    .cfi_def_cfa_offset 32
 ; SSE3-NEXT:    pushq %r13
-; SSE3-NEXT:    .cfi_def_cfa_offset 40
 ; SSE3-NEXT:    pushq %r12
-; SSE3-NEXT:    .cfi_def_cfa_offset 48
 ; SSE3-NEXT:    pushq %rbx
-; SSE3-NEXT:    .cfi_def_cfa_offset 56
-; SSE3-NEXT:    .cfi_offset %rbx, -56
-; SSE3-NEXT:    .cfi_offset %r12, -48
-; SSE3-NEXT:    .cfi_offset %r13, -40
-; SSE3-NEXT:    .cfi_offset %r14, -32
-; SSE3-NEXT:    .cfi_offset %r15, -24
-; SSE3-NEXT:    .cfi_offset %rbp, -16
 ; SSE3-NEXT:    movd %xmm0, %eax
 ; SSE3-NEXT:    pextrw $1, %xmm0, %r10d
 ; SSE3-NEXT:    addl %eax, %r10d
@@ -1357,17 +1327,11 @@
 ; SSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
 ; SSE3-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
 ; SSE3-NEXT:    popq %rbx
-; SSE3-NEXT:    .cfi_def_cfa_offset 48
 ; SSE3-NEXT:    popq %r12
-; SSE3-NEXT:    .cfi_def_cfa_offset 40
 ; SSE3-NEXT:    popq %r13
-; SSE3-NEXT:    .cfi_def_cfa_offset 32
 ; SSE3-NEXT:    popq %r14
-; SSE3-NEXT:    .cfi_def_cfa_offset 24
 ; SSE3-NEXT:    popq %r15
-; SSE3-NEXT:    .cfi_def_cfa_offset 16
 ; SSE3-NEXT:    popq %rbp
-; SSE3-NEXT:    .cfi_def_cfa_offset 8
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: avx2_hadd_w:
diff --git a/test/CodeGen/X86/haddsub-undef.ll b/test/CodeGen/X86/haddsub-undef.ll
index 166528b..c27be13 100644
--- a/test/CodeGen/X86/haddsub-undef.ll
+++ b/test/CodeGen/X86/haddsub-undef.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3           | FileCheck %s --check-prefixes=SSE,SSE-SLOW
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3,fast-hops | FileCheck %s --check-prefixes=SSE,SSE-FAST
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx             | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-SLOW
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops   | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-FAST
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2            | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-SLOW
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2,fast-hops  | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3               | FileCheck %s --check-prefixes=SSE,SSE-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3,fast-hops     | FileCheck %s --check-prefixes=SSE,SSE-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx                | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX1-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops      | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX1-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f            | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX512,AVX512-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,fast-hops  | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX512,AVX512-FAST
 
 ; Verify that we correctly fold horizontal binop even in the presence of UNDEFs.
 
@@ -84,17 +84,27 @@
 }
 
 define <4 x float> @test4_undef(<4 x float> %a, <4 x float> %b) {
-; SSE-LABEL: test4_undef:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE-NEXT:    addss %xmm1, %xmm0
-; SSE-NEXT:    retq
+; SSE-SLOW-LABEL: test4_undef:
+; SSE-SLOW:       # %bb.0:
+; SSE-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: test4_undef:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    retq
+; SSE-FAST-LABEL: test4_undef:
+; SSE-FAST:       # %bb.0:
+; SSE-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: test4_undef:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: test4_undef:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
   %vecext1 = extractelement <4 x float> %a, i32 1
   %add = fadd float %vecext, %vecext1
@@ -103,19 +113,29 @@
 }
 
 define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) {
-; SSE-LABEL: test5_undef:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movapd %xmm0, %xmm1
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE-NEXT:    addsd %xmm0, %xmm1
-; SSE-NEXT:    movapd %xmm1, %xmm0
-; SSE-NEXT:    retq
+; SSE-SLOW-LABEL: test5_undef:
+; SSE-SLOW:       # %bb.0:
+; SSE-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE-SLOW-NEXT:    addsd %xmm0, %xmm1
+; SSE-SLOW-NEXT:    movapd %xmm1, %xmm0
+; SSE-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: test5_undef:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    retq
+; SSE-FAST-LABEL: test5_undef:
+; SSE-FAST:       # %bb.0:
+; SSE-FAST-NEXT:    haddpd %xmm0, %xmm0
+; SSE-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: test5_undef:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: test5_undef:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %vecext = extractelement <2 x double> %a, i32 0
   %vecext1 = extractelement <2 x double> %a, i32 1
   %add = fadd double %vecext, %vecext1
@@ -241,18 +261,25 @@
 }
 
 define <8 x float> @test11_undef(<8 x float> %a, <8 x float> %b) {
-; SSE-LABEL: test11_undef:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE-NEXT:    addss %xmm1, %xmm0
-; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
-; SSE-NEXT:    addss %xmm3, %xmm1
-; SSE-NEXT:    movddup {{.*#+}} xmm1 = xmm1[0,0]
-; SSE-NEXT:    retq
+; SSE-SLOW-LABEL: test11_undef:
+; SSE-SLOW:       # %bb.0:
+; SSE-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; SSE-SLOW-NEXT:    addss %xmm3, %xmm1
+; SSE-SLOW-NEXT:    movddup {{.*#+}} xmm1 = xmm1[0,0]
+; SSE-SLOW-NEXT:    retq
+;
+; SSE-FAST-LABEL: test11_undef:
+; SSE-FAST:       # %bb.0:
+; SSE-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE-FAST-NEXT:    haddps %xmm3, %xmm3
+; SSE-FAST-NEXT:    movddup {{.*#+}} xmm1 = xmm3[0,0]
+; SSE-FAST-NEXT:    retq
 ;
 ; AVX-LABEL: test11_undef:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vhaddps %ymm0, %ymm0, %ymm0
+; AVX-NEXT:    vhaddps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <8 x float> %a, i32 0
   %vecext1 = extractelement <8 x float> %a, i32 1
@@ -316,141 +343,59 @@
   ret <8 x float> %vecinit4
 }
 
-define <8 x i32> @test14_undef(<8 x i32> %a, <8 x i32> %b) {
-; SSE-LABEL: test14_undef:
+define <16 x float> @test13_v16f32_undef(<16 x float> %a, <16 x float> %b) {
+; SSE-LABEL: test13_v16f32_undef:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    phaddd %xmm2, %xmm0
+; SSE-NEXT:    haddps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: test14_undef:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    retq
+; AVX1-SLOW-LABEL: test13_v16f32_undef:
+; AVX1-SLOW:       # %bb.0:
+; AVX1-SLOW-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-SLOW-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT:    retq
 ;
-; AVX2-LABEL: test14_undef:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vphaddd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    retq
-  %vecext = extractelement <8 x i32> %a, i32 0
-  %vecext1 = extractelement <8 x i32> %a, i32 1
-  %add = add i32 %vecext, %vecext1
-  %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
-  %vecext2 = extractelement <8 x i32> %b, i32 2
-  %vecext3 = extractelement <8 x i32> %b, i32 3
-  %add4 = add i32 %vecext2, %vecext3
-  %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 3
-  ret <8 x i32> %vecinit5
+; AVX1-FAST-LABEL: test13_v16f32_undef:
+; AVX1-FAST:       # %bb.0:
+; AVX1-FAST-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-FAST-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT:    retq
+;
+; AVX512-LABEL: test13_v16f32_undef:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX512-NEXT:    vaddss %xmm1, %xmm0, %xmm1
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX512-NEXT:    vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
+; AVX512-NEXT:    vaddss %xmm3, %xmm2, %xmm2
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX512-NEXT:    vaddss %xmm2, %xmm0, %xmm2
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX512-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512-NEXT:    vaddss %xmm0, %xmm2, %xmm0
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512-NEXT:    retq
+  %vecext = extractelement <16 x float> %a, i32 0
+  %vecext1 = extractelement <16 x float> %a, i32 1
+  %add1 = fadd float %vecext, %vecext1
+  %vecinit1 = insertelement <16 x float> undef, float %add1, i32 0
+  %vecext2 = extractelement <16 x float> %a, i32 2
+  %vecext3 = extractelement <16 x float> %a, i32 3
+  %add2 = fadd float %vecext2, %vecext3
+  %vecinit2 = insertelement <16 x float> %vecinit1, float %add2, i32 1
+  %vecext4 = extractelement <16 x float> %a, i32 4
+  %vecext5 = extractelement <16 x float> %a, i32 5
+  %add3 = fadd float %vecext4, %vecext5
+  %vecinit3 = insertelement <16 x float> %vecinit2, float %add3, i32 2
+  %vecext6 = extractelement <16 x float> %a, i32 6
+  %vecext7 = extractelement <16 x float> %a, i32 7
+  %add4 = fadd float %vecext6, %vecext7
+  %vecinit4 = insertelement <16 x float> %vecinit3, float %add4, i32 3
+  ret <16 x float> %vecinit4
 }
-
-; integer horizontal adds instead of two scalar adds followed by vector inserts.
-define <8 x i32> @test15_undef(<8 x i32> %a, <8 x i32> %b) {
-; SSE-LABEL: test15_undef:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movd %xmm0, %eax
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE-NEXT:    movd %xmm0, %ecx
-; SSE-NEXT:    addl %eax, %ecx
-; SSE-NEXT:    movd %xmm3, %eax
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
-; SSE-NEXT:    movd %xmm0, %edx
-; SSE-NEXT:    addl %eax, %edx
-; SSE-NEXT:    movd %ecx, %xmm0
-; SSE-NEXT:    movd %edx, %xmm1
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
-; SSE-NEXT:    retq
-;
-; AVX1-LABEL: test15_undef:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovd %xmm0, %eax
-; AVX1-NEXT:    vpextrd $1, %xmm0, %ecx
-; AVX1-NEXT:    addl %eax, %ecx
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm0
-; AVX1-NEXT:    vmovd %xmm0, %eax
-; AVX1-NEXT:    vpextrd $1, %xmm0, %edx
-; AVX1-NEXT:    addl %eax, %edx
-; AVX1-NEXT:    vmovd %ecx, %xmm0
-; AVX1-NEXT:    vmovd %edx, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: test15_undef:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vphaddd %ymm0, %ymm0, %ymm0
-; AVX2-NEXT:    retq
-  %vecext = extractelement <8 x i32> %a, i32 0
-  %vecext1 = extractelement <8 x i32> %a, i32 1
-  %add = add i32 %vecext, %vecext1
-  %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
-  %vecext2 = extractelement <8 x i32> %b, i32 4
-  %vecext3 = extractelement <8 x i32> %b, i32 5
-  %add4 = add i32 %vecext2, %vecext3
-  %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 6
-  ret <8 x i32> %vecinit5
-}
-
-define <8 x i32> @test16_undef(<8 x i32> %a, <8 x i32> %b) {
-; SSE-LABEL: test16_undef:
-; SSE:       # %bb.0:
-; SSE-NEXT:    phaddd %xmm0, %xmm0
-; SSE-NEXT:    retq
-;
-; AVX1-LABEL: test16_undef:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: test16_undef:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vphaddd %ymm0, %ymm0, %ymm0
-; AVX2-NEXT:    retq
-  %vecext = extractelement <8 x i32> %a, i32 0
-  %vecext1 = extractelement <8 x i32> %a, i32 1
-  %add = add i32 %vecext, %vecext1
-  %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
-  %vecext2 = extractelement <8 x i32> %a, i32 2
-  %vecext3 = extractelement <8 x i32> %a, i32 3
-  %add4 = add i32 %vecext2, %vecext3
-  %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 1
-  ret <8 x i32> %vecinit5
-}
-
-define <8 x i32> @test17_undef(<8 x i32> %a, <8 x i32> %b) {
-; SSE-LABEL: test17_undef:
-; SSE:       # %bb.0:
-; SSE-NEXT:    phaddd %xmm1, %xmm0
-; SSE-NEXT:    retq
-;
-; AVX1-LABEL: test17_undef:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: test17_undef:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    retq
-  %vecext = extractelement <8 x i32> %a, i32 0
-  %vecext1 = extractelement <8 x i32> %a, i32 1
-  %add1 = add i32 %vecext, %vecext1
-  %vecinit1 = insertelement <8 x i32> undef, i32 %add1, i32 0
-  %vecext2 = extractelement <8 x i32> %a, i32 2
-  %vecext3 = extractelement <8 x i32> %a, i32 3
-  %add2 = add i32 %vecext2, %vecext3
-  %vecinit2 = insertelement <8 x i32> %vecinit1, i32 %add2, i32 1
-  %vecext4 = extractelement <8 x i32> %a, i32 4
-  %vecext5 = extractelement <8 x i32> %a, i32 5
-  %add3 = add i32 %vecext4, %vecext5
-  %vecinit3 = insertelement <8 x i32> %vecinit2, i32 %add3, i32 2
-  %vecext6 = extractelement <8 x i32> %a, i32 6
-  %vecext7 = extractelement <8 x i32> %a, i32 7
-  %add4 = add i32 %vecext6, %vecext7
-  %vecinit4 = insertelement <8 x i32> %vecinit3, i32 %add4, i32 3
-  ret <8 x i32> %vecinit4
-}
-
 define <2 x double> @add_pd_003(<2 x double> %x) {
 ; SSE-SLOW-LABEL: add_pd_003:
 ; SSE-SLOW:       # %bb.0:
@@ -463,27 +408,16 @@
 ; SSE-FAST-NEXT:    haddpd %xmm0, %xmm0
 ; SSE-FAST-NEXT:    retq
 ;
-; AVX1-SLOW-LABEL: add_pd_003:
-; AVX1-SLOW:       # %bb.0:
-; AVX1-SLOW-NEXT:    vmovddup {{.*#+}} xmm1 = xmm0[0,0]
-; AVX1-SLOW-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
-; AVX1-SLOW-NEXT:    retq
+; AVX-SLOW-LABEL: add_pd_003:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovddup {{.*#+}} xmm1 = xmm0[0,0]
+; AVX-SLOW-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    retq
 ;
-; AVX1-FAST-LABEL: add_pd_003:
-; AVX1-FAST:       # %bb.0:
-; AVX1-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT:    retq
-;
-; AVX2-SLOW-LABEL: add_pd_003:
-; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vmovddup {{.*#+}} xmm1 = xmm0[0,0]
-; AVX2-SLOW-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
-; AVX2-SLOW-NEXT:    retq
-;
-; AVX2-FAST-LABEL: add_pd_003:
-; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
-; AVX2-FAST-NEXT:    retq
+; AVX-FAST-LABEL: add_pd_003:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %l = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 undef, i32 0>
   %add = fadd <2 x double> %l, %x
   ret <2 x double> %add
@@ -505,27 +439,16 @@
 ; SSE-FAST-NEXT:    haddpd %xmm0, %xmm0
 ; SSE-FAST-NEXT:    retq
 ;
-; AVX1-SLOW-LABEL: add_pd_003_2:
-; AVX1-SLOW:       # %bb.0:
-; AVX1-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX1-SLOW-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
-; AVX1-SLOW-NEXT:    retq
+; AVX-SLOW-LABEL: add_pd_003_2:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    retq
 ;
-; AVX1-FAST-LABEL: add_pd_003_2:
-; AVX1-FAST:       # %bb.0:
-; AVX1-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT:    retq
-;
-; AVX2-SLOW-LABEL: add_pd_003_2:
-; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX2-SLOW-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
-; AVX2-SLOW-NEXT:    retq
-;
-; AVX2-FAST-LABEL: add_pd_003_2:
-; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
-; AVX2-FAST-NEXT:    retq
+; AVX-FAST-LABEL: add_pd_003_2:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %l = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 1, i32 0>
   %add = fadd <2 x double> %l, %x
   ret <2 x double> %add
@@ -545,31 +468,18 @@
 ; SSE-FAST-NEXT:    haddpd %xmm0, %xmm0
 ; SSE-FAST-NEXT:    retq
 ;
-; AVX1-SLOW-LABEL: add_pd_010:
-; AVX1-SLOW:       # %bb.0:
-; AVX1-SLOW-NEXT:    vmovddup {{.*#+}} xmm1 = xmm0[0,0]
-; AVX1-SLOW-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
-; AVX1-SLOW-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX1-SLOW-NEXT:    retq
+; AVX-SLOW-LABEL: add_pd_010:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovddup {{.*#+}} xmm1 = xmm0[0,0]
+; AVX-SLOW-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX-SLOW-NEXT:    retq
 ;
-; AVX1-FAST-LABEL: add_pd_010:
-; AVX1-FAST:       # %bb.0:
-; AVX1-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX1-FAST-NEXT:    retq
-;
-; AVX2-SLOW-LABEL: add_pd_010:
-; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vmovddup {{.*#+}} xmm1 = xmm0[0,0]
-; AVX2-SLOW-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
-; AVX2-SLOW-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX2-SLOW-NEXT:    retq
-;
-; AVX2-FAST-LABEL: add_pd_010:
-; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
-; AVX2-FAST-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX2-FAST-NEXT:    retq
+; AVX-FAST-LABEL: add_pd_010:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX-FAST-NEXT:    retq
   %l = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 undef, i32 0>
   %add = fadd <2 x double> %l, %x
   %shuffle2 = shufflevector <2 x double> %add, <2 x double> undef, <2 x i32> <i32 1, i32 undef>
@@ -590,29 +500,17 @@
 ; SSE-FAST-NEXT:    haddps %xmm0, %xmm0
 ; SSE-FAST-NEXT:    retq
 ;
-; AVX1-SLOW-LABEL: add_ps_007:
-; AVX1-SLOW:       # %bb.0:
-; AVX1-SLOW-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[0,1,0,2]
-; AVX1-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX1-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
-; AVX1-SLOW-NEXT:    retq
+; AVX-SLOW-LABEL: add_ps_007:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[0,1,0,2]
+; AVX-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; AVX-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    retq
 ;
-; AVX1-FAST-LABEL: add_ps_007:
-; AVX1-FAST:       # %bb.0:
-; AVX1-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT:    retq
-;
-; AVX2-SLOW-LABEL: add_ps_007:
-; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[0,1,0,2]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX2-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
-; AVX2-SLOW-NEXT:    retq
-;
-; AVX2-FAST-LABEL: add_ps_007:
-; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
-; AVX2-FAST-NEXT:    retq
+; AVX-FAST-LABEL: add_ps_007:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 2>
   %r = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 1, i32 3>
   %add = fadd <4 x float> %l, %r
@@ -635,33 +533,19 @@
 ; SSE-FAST-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,2,2,3]
 ; SSE-FAST-NEXT:    retq
 ;
-; AVX1-SLOW-LABEL: add_ps_030:
-; AVX1-SLOW:       # %bb.0:
-; AVX1-SLOW-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[0,1,0,2]
-; AVX1-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX1-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
-; AVX1-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,2,3]
-; AVX1-SLOW-NEXT:    retq
+; AVX-SLOW-LABEL: add_ps_030:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[0,1,0,2]
+; AVX-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; AVX-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,2,3]
+; AVX-SLOW-NEXT:    retq
 ;
-; AVX1-FAST-LABEL: add_ps_030:
-; AVX1-FAST:       # %bb.0:
-; AVX1-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,2,3]
-; AVX1-FAST-NEXT:    retq
-;
-; AVX2-SLOW-LABEL: add_ps_030:
-; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[0,1,0,2]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX2-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,2,3]
-; AVX2-SLOW-NEXT:    retq
-;
-; AVX2-FAST-LABEL: add_ps_030:
-; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,2,3]
-; AVX2-FAST-NEXT:    retq
+; AVX-FAST-LABEL: add_ps_030:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,2,3]
+; AVX-FAST-NEXT:    retq
   %l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 2>
   %r = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 1, i32 3>
   %add = fadd <4 x float> %l, %r
@@ -689,22 +573,17 @@
 ; AVX1-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
 ; AVX1-SLOW-NEXT:    retq
 ;
-; AVX1-FAST-LABEL: add_ps_007_2:
-; AVX1-FAST:       # %bb.0:
-; AVX1-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT:    retq
+; AVX-FAST-LABEL: add_ps_007_2:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
 ;
-; AVX2-SLOW-LABEL: add_ps_007_2:
-; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vbroadcastss %xmm0, %xmm1
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX2-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
-; AVX2-SLOW-NEXT:    retq
-;
-; AVX2-FAST-LABEL: add_ps_007_2:
-; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
-; AVX2-FAST-NEXT:    retq
+; AVX512-SLOW-LABEL: add_ps_007_2:
+; AVX512-SLOW:       # %bb.0:
+; AVX512-SLOW-NEXT:    vbroadcastss %xmm0, %xmm1
+; AVX512-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; AVX512-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
+; AVX512-SLOW-NEXT:    retq
   %l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 undef>
   %r = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 1, i32 undef>
   %add = fadd <4 x float> %l, %r
@@ -723,27 +602,16 @@
 ; SSE-FAST-NEXT:    haddps %xmm0, %xmm0
 ; SSE-FAST-NEXT:    retq
 ;
-; AVX1-SLOW-LABEL: add_ps_008:
-; AVX1-SLOW:       # %bb.0:
-; AVX1-SLOW-NEXT:    vmovsldup {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; AVX1-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
-; AVX1-SLOW-NEXT:    retq
+; AVX-SLOW-LABEL: add_ps_008:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovsldup {{.*#+}} xmm1 = xmm0[0,0,2,2]
+; AVX-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    retq
 ;
-; AVX1-FAST-LABEL: add_ps_008:
-; AVX1-FAST:       # %bb.0:
-; AVX1-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT:    retq
-;
-; AVX2-SLOW-LABEL: add_ps_008:
-; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vmovsldup {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; AVX2-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
-; AVX2-SLOW-NEXT:    retq
-;
-; AVX2-FAST-LABEL: add_ps_008:
-; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
-; AVX2-FAST-NEXT:    retq
+; AVX-FAST-LABEL: add_ps_008:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 2>
   %add = fadd <4 x float> %l, %x
   ret <4 x float> %add
@@ -764,31 +632,18 @@
 ; SSE-FAST-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
 ; SSE-FAST-NEXT:    retq
 ;
-; AVX1-SLOW-LABEL: add_ps_017:
-; AVX1-SLOW:       # %bb.0:
-; AVX1-SLOW-NEXT:    vmovsldup {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; AVX1-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
-; AVX1-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX1-SLOW-NEXT:    retq
+; AVX-SLOW-LABEL: add_ps_017:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovsldup {{.*#+}} xmm1 = xmm0[0,0,2,2]
+; AVX-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-SLOW-NEXT:    retq
 ;
-; AVX1-FAST-LABEL: add_ps_017:
-; AVX1-FAST:       # %bb.0:
-; AVX1-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX1-FAST-NEXT:    retq
-;
-; AVX2-SLOW-LABEL: add_ps_017:
-; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vmovsldup {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; AVX2-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX2-SLOW-NEXT:    retq
-;
-; AVX2-FAST-LABEL: add_ps_017:
-; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX2-FAST-NEXT:    retq
+; AVX-FAST-LABEL: add_ps_017:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-FAST-NEXT:    retq
   %l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 2>
   %add = fadd <4 x float> %l, %x
   %shuffle2 = shufflevector <4 x float> %add, <4 x float> undef, <4 x i32> <i32 3, i32 undef, i32 undef, i32 undef>
@@ -818,25 +673,19 @@
 ; AVX1-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; AVX1-SLOW-NEXT:    retq
 ;
-; AVX1-FAST-LABEL: add_ps_018:
-; AVX1-FAST:       # %bb.0:
-; AVX1-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-FAST-NEXT:    retq
+; AVX-FAST-LABEL: add_ps_018:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX-FAST-NEXT:    retq
 ;
-; AVX2-SLOW-LABEL: add_ps_018:
-; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vbroadcastss %xmm0, %xmm1
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX2-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-SLOW-NEXT:    retq
-;
-; AVX2-FAST-LABEL: add_ps_018:
-; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-FAST-NEXT:    retq
+; AVX512-SLOW-LABEL: add_ps_018:
+; AVX512-SLOW:       # %bb.0:
+; AVX512-SLOW-NEXT:    vbroadcastss %xmm0, %xmm1
+; AVX512-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; AVX512-SLOW-NEXT:    vaddps %xmm0, %xmm1, %xmm0
+; AVX512-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX512-SLOW-NEXT:    retq
   %l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 undef>
   %r = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 1, i32 undef>
   %add = fadd <4 x float> %l, %r
@@ -844,3 +693,177 @@
   ret <4 x float> %shuffle2
 }
 
+define <4 x float> @v8f32_inputs_v4f32_output_0101(<8 x float> %a, <8 x float> %b) {
+; SSE-LABEL: v8f32_inputs_v4f32_output_0101:
+; SSE:       # %bb.0:
+; SSE-NEXT:    haddps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v8f32_inputs_v4f32_output_0101:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %a0 = extractelement <8 x float> %a, i32 0
+  %a1 = extractelement <8 x float> %a, i32 1
+  %b0 = extractelement <8 x float> %b, i32 0
+  %b1 = extractelement <8 x float> %b, i32 1
+  %add0 = fadd float %a0, %a1
+  %add2 = fadd float %b0, %b1
+  %r0 = insertelement <4 x float> undef, float %add0, i32 0
+  %r = insertelement <4 x float> %r0, float %add2, i32 2
+  ret <4 x float> %r
+}
+
+define <4 x float> @v8f32_input0_v4f32_output_0123(<8 x float> %a, <4 x float> %b) {
+; SSE-LABEL: v8f32_input0_v4f32_output_0123:
+; SSE:       # %bb.0:
+; SSE-NEXT:    haddps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v8f32_input0_v4f32_output_0123:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %a0 = extractelement <8 x float> %a, i32 0
+  %a1 = extractelement <8 x float> %a, i32 1
+  %b2 = extractelement <4 x float> %b, i32 2
+  %b3 = extractelement <4 x float> %b, i32 3
+  %add0 = fadd float %a0, %a1
+  %add3 = fadd float %b2, %b3
+  %r0 = insertelement <4 x float> undef, float %add0, i32 0
+  %r = insertelement <4 x float> %r0, float %add3, i32 3
+  ret <4 x float> %r
+}
+
+define <4 x float> @v8f32_input1_v4f32_output_2301(<4 x float> %a, <8 x float> %b) {
+; SSE-LABEL: v8f32_input1_v4f32_output_2301:
+; SSE:       # %bb.0:
+; SSE-NEXT:    haddps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v8f32_input1_v4f32_output_2301:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %a2 = extractelement <4 x float> %a, i32 2
+  %a3 = extractelement <4 x float> %a, i32 3
+  %b0 = extractelement <8 x float> %b, i32 0
+  %b1 = extractelement <8 x float> %b, i32 1
+  %add1 = fadd float %a2, %a3
+  %add2 = fadd float %b0, %b1
+  %r1 = insertelement <4 x float> undef, float %add1, i32 1
+  %r = insertelement <4 x float> %r1, float %add2, i32 2
+  ret <4 x float> %r
+}
+
+define <4 x float> @v8f32_inputs_v4f32_output_2323(<8 x float> %a, <8 x float> %b) {
+; SSE-LABEL: v8f32_inputs_v4f32_output_2323:
+; SSE:       # %bb.0:
+; SSE-NEXT:    haddps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v8f32_inputs_v4f32_output_2323:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %a2 = extractelement <8 x float> %a, i32 2
+  %a3 = extractelement <8 x float> %a, i32 3
+  %b2 = extractelement <8 x float> %b, i32 2
+  %b3 = extractelement <8 x float> %b, i32 3
+  %add1 = fadd float %a2, %a3
+  %add3 = fadd float %b2, %b3
+  %r1 = insertelement <4 x float> undef, float %add1, i32 1
+  %r = insertelement <4 x float> %r1, float %add3, i32 3
+  ret <4 x float> %r
+}
+
+define <4 x float> @v16f32_inputs_v4f32_output_0123(<16 x float> %a, <16 x float> %b) {
+; SSE-LABEL: v16f32_inputs_v4f32_output_0123:
+; SSE:       # %bb.0:
+; SSE-NEXT:    haddps %xmm4, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-SLOW-LABEL: v16f32_inputs_v4f32_output_0123:
+; AVX1-SLOW:       # %bb.0:
+; AVX1-SLOW-NEXT:    vhaddps %xmm2, %xmm0, %xmm0
+; AVX1-SLOW-NEXT:    vzeroupper
+; AVX1-SLOW-NEXT:    retq
+;
+; AVX1-FAST-LABEL: v16f32_inputs_v4f32_output_0123:
+; AVX1-FAST:       # %bb.0:
+; AVX1-FAST-NEXT:    vhaddps %xmm2, %xmm0, %xmm0
+; AVX1-FAST-NEXT:    vzeroupper
+; AVX1-FAST-NEXT:    retq
+;
+; AVX512-LABEL: v16f32_inputs_v4f32_output_0123:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %a0 = extractelement <16 x float> %a, i32 0
+  %a1 = extractelement <16 x float> %a, i32 1
+  %b2 = extractelement <16 x float> %b, i32 2
+  %b3 = extractelement <16 x float> %b, i32 3
+  %add0 = fadd float %a0, %a1
+  %add3 = fadd float %b2, %b3
+  %r0 = insertelement <4 x float> undef, float %add0, i32 0
+  %r = insertelement <4 x float> %r0, float %add3, i32 3
+  ret <4 x float> %r
+}
+
+define <8 x float> @v16f32_inputs_v8f32_output_4567(<16 x float> %a, <16 x float> %b) {
+; SSE-LABEL: v16f32_inputs_v8f32_output_4567:
+; SSE:       # %bb.0:
+; SSE-NEXT:    haddps %xmm5, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-SLOW-LABEL: v16f32_inputs_v8f32_output_4567:
+; AVX1-SLOW:       # %bb.0:
+; AVX1-SLOW-NEXT:    vhaddps %ymm2, %ymm0, %ymm0
+; AVX1-SLOW-NEXT:    retq
+;
+; AVX1-FAST-LABEL: v16f32_inputs_v8f32_output_4567:
+; AVX1-FAST:       # %bb.0:
+; AVX1-FAST-NEXT:    vhaddps %ymm2, %ymm0, %ymm0
+; AVX1-FAST-NEXT:    retq
+;
+; AVX512-LABEL: v16f32_inputs_v8f32_output_4567:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vhaddps %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %a4 = extractelement <16 x float> %a, i32 4
+  %a5 = extractelement <16 x float> %a, i32 5
+  %b6 = extractelement <16 x float> %b, i32 6
+  %b7 = extractelement <16 x float> %b, i32 7
+  %add4 = fadd float %a4, %a5
+  %add7 = fadd float %b6, %b7
+  %r4 = insertelement <8 x float> undef, float %add4, i32 4
+  %r = insertelement <8 x float> %r4, float %add7, i32 7
+  ret <8 x float> %r
+}
+
+define <8 x float> @PR40243(<8 x float> %a, <8 x float> %b) {
+; SSE-LABEL: PR40243:
+; SSE:       # %bb.0:
+; SSE-NEXT:    haddps %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: PR40243:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vhaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+  %a4 = extractelement <8 x float> %a, i32 4
+  %a5 = extractelement <8 x float> %a, i32 5
+  %add4 = fadd float %a4, %a5
+  %b6 = extractelement <8 x float> %b, i32 6
+  %b7 = extractelement <8 x float> %b, i32 7
+  %add7 = fadd float %b6, %b7
+  %r4 = insertelement <8 x float> undef, float %add4, i32 4
+  %r = insertelement <8 x float> %r4, float %add7, i32 7
+  ret <8 x float> %r
+}
+
diff --git a/test/CodeGen/X86/haddsub.ll b/test/CodeGen/X86/haddsub.ll
index 6221d4e..aa92f03 100644
--- a/test/CodeGen/X86/haddsub.ll
+++ b/test/CodeGen/X86/haddsub.ll
@@ -1,8 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3           | FileCheck %s --check-prefixes=SSE3,SSE3-SLOW
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3,fast-hops | FileCheck %s --check-prefixes=SSE3,SSE3-FAST
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx            | FileCheck %s --check-prefixes=AVX,AVX-SLOW
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops  | FileCheck %s --check-prefixes=AVX,AVX-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3               | FileCheck %s --check-prefixes=SSE3,SSE3-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3,fast-hops     | FileCheck %s --check-prefixes=SSE3,SSE3-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx                | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX1-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops      | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX1-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f            | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX512-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,fast-hops  | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX512-FAST
 
 define <2 x double> @haddpd1(<2 x double> %x, <2 x double> %y) {
 ; SSE3-LABEL: haddpd1:
@@ -583,3 +585,770 @@
   ret <2 x float> %res1
 }
 
+; 128-bit vectors, float/double, fadd/fsub
+
+define float @extract_extract_v4f32_fadd_f32(<4 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 0
+  %x1 = extractelement <4 x float> %x, i32 1
+  %x01 = fadd float %x0, %x1
+  ret float %x01
+}
+
+define float @extract_extract_v4f32_fadd_f32_commute(<4 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 0
+  %x1 = extractelement <4 x float> %x, i32 1
+  %x01 = fadd float %x1, %x0
+  ret float %x01
+}
+
+define double @extract_extract_v2f64_fadd_f64(<2 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v2f64_fadd_f64:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    addsd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    movapd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v2f64_fadd_f64:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v2f64_fadd_f64:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v2f64_fadd_f64:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <2 x double> %x, i32 0
+  %x1 = extractelement <2 x double> %x, i32 1
+  %x01 = fadd double %x0, %x1
+  ret double %x01
+}
+
+define double @extract_extract_v2f64_fadd_f64_commute(<2 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    addsd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    movapd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <2 x double> %x, i32 0
+  %x1 = extractelement <2 x double> %x, i32 1
+  %x01 = fadd double %x1, %x0
+  ret double %x01
+}
+
+define float @extract_extract_v4f32_fsub_f32(<4 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v4f32_fsub_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    subss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f32_fsub_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    hsubps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f32_fsub_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vsubss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f32_fsub_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 0
+  %x1 = extractelement <4 x float> %x, i32 1
+  %x01 = fsub float %x0, %x1
+  ret float %x01
+}
+
+define float @extract_extract_v4f32_fsub_f32_commute(<4 x float> %x) {
+; SSE3-LABEL: extract_extract_v4f32_fsub_f32_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT:    subss %xmm0, %xmm1
+; SSE3-NEXT:    movaps %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v4f32_fsub_f32_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 0
+  %x1 = extractelement <4 x float> %x, i32 1
+  %x01 = fsub float %x1, %x0
+  ret float %x01
+}
+
+define double @extract_extract_v2f64_fsub_f64(<2 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v2f64_fsub_f64:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    subsd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v2f64_fsub_f64:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    hsubpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v2f64_fsub_f64:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v2f64_fsub_f64:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhsubpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <2 x double> %x, i32 0
+  %x1 = extractelement <2 x double> %x, i32 1
+  %x01 = fsub double %x0, %x1
+  ret double %x01
+}
+
+define double @extract_extract_v2f64_fsub_f64_commute(<2 x double> %x) {
+; SSE3-LABEL: extract_extract_v2f64_fsub_f64_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movapd %xmm0, %xmm1
+; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT:    subsd %xmm0, %xmm1
+; SSE3-NEXT:    movapd %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v2f64_fsub_f64_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vsubsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %x0 = extractelement <2 x double> %x, i32 0
+  %x1 = extractelement <2 x double> %x, i32 1
+  %x01 = fsub double %x1, %x0
+  ret double %x01
+}
+
+; 256-bit vectors, float/double, fadd/fsub
+
+define float @extract_extract_v8f32_fadd_f32(<8 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v8f32_fadd_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8f32_fadd_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8f32_fadd_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8f32_fadd_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <8 x float> %x, i32 0
+  %x1 = extractelement <8 x float> %x, i32 1
+  %x01 = fadd float %x0, %x1
+  ret float %x01
+}
+
+define float @extract_extract_v8f32_fadd_f32_commute(<8 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v8f32_fadd_f32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8f32_fadd_f32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8f32_fadd_f32_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8f32_fadd_f32_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <8 x float> %x, i32 0
+  %x1 = extractelement <8 x float> %x, i32 1
+  %x01 = fadd float %x1, %x0
+  ret float %x01
+}
+
+define double @extract_extract_v4f64_fadd_f64(<4 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v4f64_fadd_f64:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    addsd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    movapd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f64_fadd_f64:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f64_fadd_f64:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f64_fadd_f64:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <4 x double> %x, i32 0
+  %x1 = extractelement <4 x double> %x, i32 1
+  %x01 = fadd double %x0, %x1
+  ret double %x01
+}
+
+define double @extract_extract_v4f64_fadd_f64_commute(<4 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    addsd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    movapd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <4 x double> %x, i32 0
+  %x1 = extractelement <4 x double> %x, i32 1
+  %x01 = fadd double %x1, %x0
+  ret double %x01
+}
+
+define float @extract_extract_v8f32_fsub_f32(<8 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v8f32_fsub_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    subss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8f32_fsub_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    hsubps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8f32_fsub_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vsubss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8f32_fsub_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <8 x float> %x, i32 0
+  %x1 = extractelement <8 x float> %x, i32 1
+  %x01 = fsub float %x0, %x1
+  ret float %x01
+}
+
+; Negative test...or get hoppy and negate?
+
+define float @extract_extract_v8f32_fsub_f32_commute(<8 x float> %x) {
+; SSE3-LABEL: extract_extract_v8f32_fsub_f32_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT:    subss %xmm0, %xmm1
+; SSE3-NEXT:    movaps %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v8f32_fsub_f32_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %x0 = extractelement <8 x float> %x, i32 0
+  %x1 = extractelement <8 x float> %x, i32 1
+  %x01 = fsub float %x1, %x0
+  ret float %x01
+}
+
+define double @extract_extract_v4f64_fsub_f64(<4 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v4f64_fsub_f64:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    subsd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f64_fsub_f64:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    hsubpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f64_fsub_f64:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f64_fsub_f64:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhsubpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <4 x double> %x, i32 0
+  %x1 = extractelement <4 x double> %x, i32 1
+  %x01 = fsub double %x0, %x1
+  ret double %x01
+}
+
+; Negative test...or get hoppy and negate?
+
+define double @extract_extract_v4f64_fsub_f64_commute(<4 x double> %x) {
+; SSE3-LABEL: extract_extract_v4f64_fsub_f64_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movapd %xmm0, %xmm1
+; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT:    subsd %xmm0, %xmm1
+; SSE3-NEXT:    movapd %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v4f64_fsub_f64_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vsubsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %x0 = extractelement <4 x double> %x, i32 0
+  %x1 = extractelement <4 x double> %x, i32 1
+  %x01 = fsub double %x1, %x0
+  ret double %x01
+}
+
+; 512-bit vectors, float/double, fadd/fsub
+
+define float @extract_extract_v16f32_fadd_f32(<16 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v16f32_fadd_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16f32_fadd_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16f32_fadd_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v16f32_fadd_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <16 x float> %x, i32 0
+  %x1 = extractelement <16 x float> %x, i32 1
+  %x01 = fadd float %x0, %x1
+  ret float %x01
+}
+
+define float @extract_extract_v16f32_fadd_f32_commute(<16 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v16f32_fadd_f32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16f32_fadd_f32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16f32_fadd_f32_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v16f32_fadd_f32_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <16 x float> %x, i32 0
+  %x1 = extractelement <16 x float> %x, i32 1
+  %x01 = fadd float %x1, %x0
+  ret float %x01
+}
+
+define double @extract_extract_v8f64_fadd_f64(<8 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v8f64_fadd_f64:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    addsd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    movapd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8f64_fadd_f64:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8f64_fadd_f64:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8f64_fadd_f64:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <8 x double> %x, i32 0
+  %x1 = extractelement <8 x double> %x, i32 1
+  %x01 = fadd double %x0, %x1
+  ret double %x01
+}
+
+define double @extract_extract_v8f64_fadd_f64_commute(<8 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v8f64_fadd_f64_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    addsd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    movapd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8f64_fadd_f64_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8f64_fadd_f64_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8f64_fadd_f64_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <8 x double> %x, i32 0
+  %x1 = extractelement <8 x double> %x, i32 1
+  %x01 = fadd double %x1, %x0
+  ret double %x01
+}
+
+define float @extract_extract_v16f32_fsub_f32(<16 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v16f32_fsub_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    subss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16f32_fsub_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    hsubps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16f32_fsub_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vsubss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v16f32_fsub_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <16 x float> %x, i32 0
+  %x1 = extractelement <16 x float> %x, i32 1
+  %x01 = fsub float %x0, %x1
+  ret float %x01
+}
+
+define float @extract_extract_v16f32_fsub_f32_commute(<16 x float> %x) {
+; SSE3-LABEL: extract_extract_v16f32_fsub_f32_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT:    subss %xmm0, %xmm1
+; SSE3-NEXT:    movaps %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v16f32_fsub_f32_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %x0 = extractelement <16 x float> %x, i32 0
+  %x1 = extractelement <16 x float> %x, i32 1
+  %x01 = fsub float %x1, %x0
+  ret float %x01
+}
+
+define double @extract_extract_v8f64_fsub_f64(<8 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v8f64_fsub_f64:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    subsd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8f64_fsub_f64:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    hsubpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8f64_fsub_f64:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8f64_fsub_f64:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhsubpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <8 x double> %x, i32 0
+  %x1 = extractelement <8 x double> %x, i32 1
+  %x01 = fsub double %x0, %x1
+  ret double %x01
+}
+
+define double @extract_extract_v8f64_fsub_f64_commute(<8 x double> %x) {
+; SSE3-LABEL: extract_extract_v8f64_fsub_f64_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movapd %xmm0, %xmm1
+; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT:    subsd %xmm0, %xmm1
+; SSE3-NEXT:    movapd %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v8f64_fsub_f64_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vsubsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %x0 = extractelement <8 x double> %x, i32 0
+  %x1 = extractelement <8 x double> %x, i32 1
+  %x01 = fsub double %x1, %x0
+  ret double %x01
+}
+
+; Check output when 1 or both extracts have extra uses.
+
+define float @extract_extract_v4f32_fadd_f32_uses1(<4 x float> %x, float* %p) {
+; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses1:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movss %xmm0, (%rdi)
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses1:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    movss %xmm0, (%rdi)
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses1:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovss %xmm0, (%rdi)
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses1:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vmovss %xmm0, (%rdi)
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 0
+  store float %x0, float* %p
+  %x1 = extractelement <4 x float> %x, i32 1
+  %x01 = fadd float %x0, %x1
+  ret float %x01
+}
+
+define float @extract_extract_v4f32_fadd_f32_uses2(<4 x float> %x, float* %p) {
+; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses2:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    movss %xmm1, (%rdi)
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses2:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-FAST-NEXT:    movss %xmm1, (%rdi)
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses2:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vmovss %xmm1, (%rdi)
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses2:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vextractps $1, %xmm0, (%rdi)
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 0
+  %x1 = extractelement <4 x float> %x, i32 1
+  store float %x1, float* %p
+  %x01 = fadd float %x0, %x1
+  ret float %x01
+}
+
+define float @extract_extract_v4f32_fadd_f32_uses3(<4 x float> %x, float* %p1, float* %p2) {
+; SSE3-LABEL: extract_extract_v4f32_fadd_f32_uses3:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movss %xmm0, (%rdi)
+; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT:    movss %xmm1, (%rsi)
+; SSE3-NEXT:    addss %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v4f32_fadd_f32_uses3:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovss %xmm0, (%rdi)
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vmovss %xmm1, (%rsi)
+; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 0
+  store float %x0, float* %p1
+  %x1 = extractelement <4 x float> %x, i32 1
+  store float %x1, float* %p2
+  %x01 = fadd float %x0, %x1
+  ret float %x01
+}
+
diff --git a/test/CodeGen/X86/half.ll b/test/CodeGen/X86/half.ll
index 2bdd537..f8688de 100644
--- a/test/CodeGen/X86/half.ll
+++ b/test/CodeGen/X86/half.ll
@@ -938,4 +938,67 @@
   ret float %tmp3
 }
 
+define half @PR40273(half) #0 {
+; CHECK-LIBCALL-LABEL: PR40273:
+; CHECK-LIBCALL:       # %bb.0:
+; CHECK-LIBCALL-NEXT:    pushq %rax
+; CHECK-LIBCALL-NEXT:    callq __gnu_f2h_ieee
+; CHECK-LIBCALL-NEXT:    movzwl %ax, %edi
+; CHECK-LIBCALL-NEXT:    callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT:    xorps %xmm1, %xmm1
+; CHECK-LIBCALL-NEXT:    ucomiss %xmm1, %xmm0
+; CHECK-LIBCALL-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-LIBCALL-NEXT:    jne .LBB17_3
+; CHECK-LIBCALL-NEXT:  # %bb.1:
+; CHECK-LIBCALL-NEXT:    jp .LBB17_3
+; CHECK-LIBCALL-NEXT:  # %bb.2:
+; CHECK-LIBCALL-NEXT:    xorps %xmm0, %xmm0
+; CHECK-LIBCALL-NEXT:  .LBB17_3:
+; CHECK-LIBCALL-NEXT:    popq %rax
+; CHECK-LIBCALL-NEXT:    retq
+;
+; BWON-F16C-LABEL: PR40273:
+; BWON-F16C:       # %bb.0:
+; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; BWON-F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
+; BWON-F16C-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; BWON-F16C-NEXT:    vucomiss %xmm1, %xmm0
+; BWON-F16C-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; BWON-F16C-NEXT:    jne .LBB17_3
+; BWON-F16C-NEXT:  # %bb.1:
+; BWON-F16C-NEXT:    jp .LBB17_3
+; BWON-F16C-NEXT:  # %bb.2:
+; BWON-F16C-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; BWON-F16C-NEXT:  .LBB17_3:
+; BWON-F16C-NEXT:    retq
+;
+; CHECK-I686-LABEL: PR40273:
+; CHECK-I686:       # %bb.0:
+; CHECK-I686-NEXT:    subl $12, %esp
+; CHECK-I686-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-I686-NEXT:    movss %xmm0, (%esp)
+; CHECK-I686-NEXT:    calll __gnu_f2h_ieee
+; CHECK-I686-NEXT:    movzwl %ax, %eax
+; CHECK-I686-NEXT:    movl %eax, (%esp)
+; CHECK-I686-NEXT:    calll __gnu_h2f_ieee
+; CHECK-I686-NEXT:    fstps {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-I686-NEXT:    xorps %xmm1, %xmm1
+; CHECK-I686-NEXT:    ucomiss %xmm1, %xmm0
+; CHECK-I686-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-I686-NEXT:    jne .LBB17_3
+; CHECK-I686-NEXT:  # %bb.1:
+; CHECK-I686-NEXT:    jp .LBB17_3
+; CHECK-I686-NEXT:  # %bb.2:
+; CHECK-I686-NEXT:    xorps %xmm0, %xmm0
+; CHECK-I686-NEXT:  .LBB17_3:
+; CHECK-I686-NEXT:    movss %xmm0, {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT:    flds {{[0-9]+}}(%esp)
+; CHECK-I686-NEXT:    addl $12, %esp
+; CHECK-I686-NEXT:    retl
+  %2 = fcmp une half %0, 0xH0000
+  %3 = uitofp i1 %2 to half
+  ret half %3
+}
+
 attributes #0 = { nounwind }
diff --git a/test/CodeGen/X86/hidden-vis-pic.ll b/test/CodeGen/X86/hidden-vis-pic.ll
index 23bdb84..6cc41b8 100644
--- a/test/CodeGen/X86/hidden-vis-pic.ll
+++ b/test/CodeGen/X86/hidden-vis-pic.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9 -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=i386-apple-darwin9 -relocation-model=pic -frame-pointer=all | FileCheck %s
 
 
 
diff --git a/test/CodeGen/X86/horizontal-reduce-smax.ll b/test/CodeGen/X86/horizontal-reduce-smax.ll
index 98fbd36..55bac98 100644
--- a/test/CodeGen/X86/horizontal-reduce-smax.ll
+++ b/test/CodeGen/X86/horizontal-reduce-smax.ll
@@ -211,21 +211,19 @@
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i16:
 ; X86-SSE42:       ## %bb.0:
-; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor LCPI2_0, %xmm0
 ; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X86-SSE42-NEXT:    movd %xmm0, %eax
+; X86-SSE42-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X86-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v8i16:
 ; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpxor LCPI2_0, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vmovd %xmm0, %eax
+; X86-AVX-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X86-AVX-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX-NEXT:    retl
 ;
@@ -244,21 +242,19 @@
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i16:
 ; X64-SSE42:       ## %bb.0:
-; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X64-SSE42-NEXT:    movd %xmm0, %eax
+; X64-SSE42-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X64-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v8i16:
 ; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vmovd %xmm0, %eax
+; X64-AVX-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X64-AVX-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX-NEXT:    retq
   %1  = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -309,26 +305,24 @@
 ;
 ; X86-SSE42-LABEL: test_reduce_v16i8:
 ; X86-SSE42:       ## %bb.0:
-; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X86-SSE42-NEXT:    psrlw $8, %xmm2
-; X86-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X86-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor LCPI3_0, %xmm0
+; X86-SSE42-NEXT:    movdqa %xmm0, %xmm1
+; X86-SSE42-NEXT:    psrlw $8, %xmm1
+; X86-SSE42-NEXT:    pminub %xmm0, %xmm1
+; X86-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X86-SSE42-NEXT:    xorb $127, %al
 ; X86-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v16i8:
 ; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpxor LCPI3_0, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX-NEXT:    xorb $127, %al
 ; X86-AVX-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX-NEXT:    retl
 ;
@@ -366,26 +360,24 @@
 ;
 ; X64-SSE42-LABEL: test_reduce_v16i8:
 ; X64-SSE42:       ## %bb.0:
-; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X64-SSE42-NEXT:    psrlw $8, %xmm2
-; X64-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X64-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor {{.*}}(%rip), %xmm0
+; X64-SSE42-NEXT:    movdqa %xmm0, %xmm1
+; X64-SSE42-NEXT:    psrlw $8, %xmm1
+; X64-SSE42-NEXT:    pminub %xmm0, %xmm1
+; X64-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X64-SSE42-NEXT:    xorb $127, %al
 ; X64-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v16i8:
 ; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX-NEXT:    xorb $127, %al
 ; X64-AVX-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX-NEXT:    retq
   %1  = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -736,11 +728,10 @@
 ; X86-SSE42-LABEL: test_reduce_v16i16:
 ; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxsw %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor LCPI6_0, %xmm0
 ; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X86-SSE42-NEXT:    movd %xmm0, %eax
+; X86-SSE42-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X86-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
@@ -748,11 +739,10 @@
 ; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpxor LCPI6_0, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vmovd %xmm0, %eax
+; X86-AVX1-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X86-AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX1-NEXT:    vzeroupper
 ; X86-AVX1-NEXT:    retl
@@ -761,11 +751,10 @@
 ; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpxor LCPI6_0, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vmovd %xmm0, %eax
+; X86-AVX2-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X86-AVX2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX2-NEXT:    vzeroupper
 ; X86-AVX2-NEXT:    retl
@@ -787,11 +776,10 @@
 ; X64-SSE42-LABEL: test_reduce_v16i16:
 ; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxsw %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X64-SSE42-NEXT:    movd %xmm0, %eax
+; X64-SSE42-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X64-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
@@ -799,11 +787,10 @@
 ; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vmovd %xmm0, %eax
+; X64-AVX1-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X64-AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -812,11 +799,10 @@
 ; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vmovd %xmm0, %eax
+; X64-AVX2-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X64-AVX2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
@@ -825,11 +811,10 @@
 ; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; X64-AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vmovd %xmm0, %eax
+; X64-AVX512-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X64-AVX512-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX512-NEXT:    vzeroupper
 ; X64-AVX512-NEXT:    retq
@@ -890,14 +875,13 @@
 ; X86-SSE42-LABEL: test_reduce_v32i8:
 ; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxsb %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X86-SSE42-NEXT:    psrlw $8, %xmm2
-; X86-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X86-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor LCPI7_0, %xmm0
+; X86-SSE42-NEXT:    movdqa %xmm0, %xmm1
+; X86-SSE42-NEXT:    psrlw $8, %xmm1
+; X86-SSE42-NEXT:    pminub %xmm0, %xmm1
+; X86-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X86-SSE42-NEXT:    xorb $127, %al
 ; X86-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
@@ -905,13 +889,12 @@
 ; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpxor LCPI7_0, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX1-NEXT:    xorb $127, %al
 ; X86-AVX1-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX1-NEXT:    vzeroupper
 ; X86-AVX1-NEXT:    retl
@@ -920,13 +903,12 @@
 ; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpxor LCPI7_0, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX2-NEXT:    xorb $127, %al
 ; X86-AVX2-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX2-NEXT:    vzeroupper
 ; X86-AVX2-NEXT:    retl
@@ -971,14 +953,13 @@
 ; X64-SSE42-LABEL: test_reduce_v32i8:
 ; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxsb %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X64-SSE42-NEXT:    psrlw $8, %xmm2
-; X64-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X64-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor {{.*}}(%rip), %xmm0
+; X64-SSE42-NEXT:    movdqa %xmm0, %xmm1
+; X64-SSE42-NEXT:    psrlw $8, %xmm1
+; X64-SSE42-NEXT:    pminub %xmm0, %xmm1
+; X64-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X64-SSE42-NEXT:    xorb $127, %al
 ; X64-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
@@ -986,13 +967,12 @@
 ; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX1-NEXT:    xorb $127, %al
 ; X64-AVX1-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -1001,13 +981,12 @@
 ; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX2-NEXT:    xorb $127, %al
 ; X64-AVX2-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
@@ -1016,13 +995,12 @@
 ; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; X64-AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX512-NEXT:    xorb $127, %al
 ; X64-AVX512-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX512-NEXT:    vzeroupper
 ; X64-AVX512-NEXT:    retq
@@ -1513,11 +1491,10 @@
 ; X86-SSE42-NEXT:    pmaxsw %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pmaxsw %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pmaxsw %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor LCPI10_0, %xmm0
 ; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X86-SSE42-NEXT:    movd %xmm0, %eax
+; X86-SSE42-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X86-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
@@ -1528,11 +1505,10 @@
 ; X86-AVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
 ; X86-AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpmaxsw %xmm2, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpxor LCPI10_0, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vmovd %xmm0, %eax
+; X86-AVX1-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X86-AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX1-NEXT:    vzeroupper
 ; X86-AVX1-NEXT:    retl
@@ -1542,11 +1518,10 @@
 ; X86-AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpxor LCPI10_0, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vmovd %xmm0, %eax
+; X86-AVX2-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X86-AVX2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX2-NEXT:    vzeroupper
 ; X86-AVX2-NEXT:    retl
@@ -1572,11 +1547,10 @@
 ; X64-SSE42-NEXT:    pmaxsw %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pmaxsw %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pmaxsw %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X64-SSE42-NEXT:    movd %xmm0, %eax
+; X64-SSE42-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X64-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
@@ -1587,11 +1561,10 @@
 ; X64-AVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
 ; X64-AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpmaxsw %xmm2, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vmovd %xmm0, %eax
+; X64-AVX1-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X64-AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -1601,11 +1574,10 @@
 ; X64-AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vmovd %xmm0, %eax
+; X64-AVX2-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X64-AVX2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
@@ -1616,11 +1588,10 @@
 ; X64-AVX512-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; X64-AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vmovd %xmm0, %eax
+; X64-AVX512-NEXT:    xorl $32767, %eax ## imm = 0x7FFF
 ; X64-AVX512-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX512-NEXT:    vzeroupper
 ; X64-AVX512-NEXT:    retq
@@ -1696,14 +1667,13 @@
 ; X86-SSE42-NEXT:    pmaxsb %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pmaxsb %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pmaxsb %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X86-SSE42-NEXT:    psrlw $8, %xmm2
-; X86-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X86-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor LCPI11_0, %xmm0
+; X86-SSE42-NEXT:    movdqa %xmm0, %xmm1
+; X86-SSE42-NEXT:    psrlw $8, %xmm1
+; X86-SSE42-NEXT:    pminub %xmm0, %xmm1
+; X86-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X86-SSE42-NEXT:    xorb $127, %al
 ; X86-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
@@ -1714,13 +1684,12 @@
 ; X86-AVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
 ; X86-AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpmaxsb %xmm2, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpxor LCPI11_0, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX1-NEXT:    xorb $127, %al
 ; X86-AVX1-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX1-NEXT:    vzeroupper
 ; X86-AVX1-NEXT:    retl
@@ -1730,13 +1699,12 @@
 ; X86-AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpxor LCPI11_0, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX2-NEXT:    xorb $127, %al
 ; X86-AVX2-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX2-NEXT:    vzeroupper
 ; X86-AVX2-NEXT:    retl
@@ -1793,14 +1761,13 @@
 ; X64-SSE42-NEXT:    pmaxsb %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pmaxsb %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pmaxsb %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X64-SSE42-NEXT:    psrlw $8, %xmm2
-; X64-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X64-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor {{.*}}(%rip), %xmm0
+; X64-SSE42-NEXT:    movdqa %xmm0, %xmm1
+; X64-SSE42-NEXT:    psrlw $8, %xmm1
+; X64-SSE42-NEXT:    pminub %xmm0, %xmm1
+; X64-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X64-SSE42-NEXT:    xorb $127, %al
 ; X64-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
@@ -1811,13 +1778,12 @@
 ; X64-AVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
 ; X64-AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpmaxsb %xmm2, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX1-NEXT:    xorb $127, %al
 ; X64-AVX1-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -1827,13 +1793,12 @@
 ; X64-AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX2-NEXT:    xorb $127, %al
 ; X64-AVX2-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
@@ -1844,13 +1809,12 @@
 ; X64-AVX512-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; X64-AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX512-NEXT:    xorb $127, %al
 ; X64-AVX512-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX512-NEXT:    vzeroupper
 ; X64-AVX512-NEXT:    retq
diff --git a/test/CodeGen/X86/horizontal-reduce-smin.ll b/test/CodeGen/X86/horizontal-reduce-smin.ll
index 3ac8614..7036d93 100644
--- a/test/CodeGen/X86/horizontal-reduce-smin.ll
+++ b/test/CodeGen/X86/horizontal-reduce-smin.ll
@@ -213,21 +213,19 @@
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i16:
 ; X86-SSE42:       ## %bb.0:
-; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor LCPI2_0, %xmm0
 ; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X86-SSE42-NEXT:    movd %xmm0, %eax
+; X86-SSE42-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X86-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v8i16:
 ; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpxor LCPI2_0, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vmovd %xmm0, %eax
+; X86-AVX-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X86-AVX-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX-NEXT:    retl
 ;
@@ -246,21 +244,19 @@
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i16:
 ; X64-SSE42:       ## %bb.0:
-; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X64-SSE42-NEXT:    movd %xmm0, %eax
+; X64-SSE42-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v8i16:
 ; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vmovd %xmm0, %eax
+; X64-AVX-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-AVX-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX-NEXT:    retq
   %1  = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -311,26 +307,24 @@
 ;
 ; X86-SSE42-LABEL: test_reduce_v16i8:
 ; X86-SSE42:       ## %bb.0:
-; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X86-SSE42-NEXT:    psrlw $8, %xmm2
-; X86-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X86-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor LCPI3_0, %xmm0
+; X86-SSE42-NEXT:    movdqa %xmm0, %xmm1
+; X86-SSE42-NEXT:    psrlw $8, %xmm1
+; X86-SSE42-NEXT:    pminub %xmm0, %xmm1
+; X86-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X86-SSE42-NEXT:    xorb $-128, %al
 ; X86-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v16i8:
 ; X86-AVX:       ## %bb.0:
-; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpxor LCPI3_0, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX-NEXT:    xorb $-128, %al
 ; X86-AVX-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX-NEXT:    retl
 ;
@@ -368,26 +362,24 @@
 ;
 ; X64-SSE42-LABEL: test_reduce_v16i8:
 ; X64-SSE42:       ## %bb.0:
-; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X64-SSE42-NEXT:    psrlw $8, %xmm2
-; X64-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X64-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor {{.*}}(%rip), %xmm0
+; X64-SSE42-NEXT:    movdqa %xmm0, %xmm1
+; X64-SSE42-NEXT:    psrlw $8, %xmm1
+; X64-SSE42-NEXT:    pminub %xmm0, %xmm1
+; X64-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X64-SSE42-NEXT:    xorb $-128, %al
 ; X64-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v16i8:
 ; X64-AVX:       ## %bb.0:
-; X64-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX-NEXT:    xorb $-128, %al
 ; X64-AVX-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX-NEXT:    retq
   %1  = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -740,11 +732,10 @@
 ; X86-SSE42-LABEL: test_reduce_v16i16:
 ; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pminsw %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor LCPI6_0, %xmm0
 ; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X86-SSE42-NEXT:    movd %xmm0, %eax
+; X86-SSE42-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X86-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
@@ -752,11 +743,10 @@
 ; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpxor LCPI6_0, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vmovd %xmm0, %eax
+; X86-AVX1-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X86-AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX1-NEXT:    vzeroupper
 ; X86-AVX1-NEXT:    retl
@@ -765,11 +755,10 @@
 ; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpxor LCPI6_0, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vmovd %xmm0, %eax
+; X86-AVX2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X86-AVX2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX2-NEXT:    vzeroupper
 ; X86-AVX2-NEXT:    retl
@@ -791,11 +780,10 @@
 ; X64-SSE42-LABEL: test_reduce_v16i16:
 ; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pminsw %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X64-SSE42-NEXT:    movd %xmm0, %eax
+; X64-SSE42-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
@@ -803,11 +791,10 @@
 ; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vmovd %xmm0, %eax
+; X64-AVX1-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -816,11 +803,10 @@
 ; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vmovd %xmm0, %eax
+; X64-AVX2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-AVX2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
@@ -829,11 +815,10 @@
 ; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; X64-AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vmovd %xmm0, %eax
+; X64-AVX512-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-AVX512-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX512-NEXT:    vzeroupper
 ; X64-AVX512-NEXT:    retq
@@ -894,14 +879,13 @@
 ; X86-SSE42-LABEL: test_reduce_v32i8:
 ; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pminsb %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X86-SSE42-NEXT:    psrlw $8, %xmm2
-; X86-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X86-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor LCPI7_0, %xmm0
+; X86-SSE42-NEXT:    movdqa %xmm0, %xmm1
+; X86-SSE42-NEXT:    psrlw $8, %xmm1
+; X86-SSE42-NEXT:    pminub %xmm0, %xmm1
+; X86-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X86-SSE42-NEXT:    xorb $-128, %al
 ; X86-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
@@ -909,13 +893,12 @@
 ; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpxor LCPI7_0, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX1-NEXT:    xorb $-128, %al
 ; X86-AVX1-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX1-NEXT:    vzeroupper
 ; X86-AVX1-NEXT:    retl
@@ -924,13 +907,12 @@
 ; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpxor LCPI7_0, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX2-NEXT:    xorb $-128, %al
 ; X86-AVX2-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX2-NEXT:    vzeroupper
 ; X86-AVX2-NEXT:    retl
@@ -975,14 +957,13 @@
 ; X64-SSE42-LABEL: test_reduce_v32i8:
 ; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pminsb %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X64-SSE42-NEXT:    psrlw $8, %xmm2
-; X64-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X64-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor {{.*}}(%rip), %xmm0
+; X64-SSE42-NEXT:    movdqa %xmm0, %xmm1
+; X64-SSE42-NEXT:    psrlw $8, %xmm1
+; X64-SSE42-NEXT:    pminub %xmm0, %xmm1
+; X64-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X64-SSE42-NEXT:    xorb $-128, %al
 ; X64-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
@@ -990,13 +971,12 @@
 ; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX1-NEXT:    xorb $-128, %al
 ; X64-AVX1-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -1005,13 +985,12 @@
 ; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX2-NEXT:    xorb $-128, %al
 ; X64-AVX2-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
@@ -1020,13 +999,12 @@
 ; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; X64-AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX512-NEXT:    xorb $-128, %al
 ; X64-AVX512-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX512-NEXT:    vzeroupper
 ; X64-AVX512-NEXT:    retq
@@ -1517,11 +1495,10 @@
 ; X86-SSE42-NEXT:    pminsw %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pminsw %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pminsw %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor LCPI10_0, %xmm0
 ; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X86-SSE42-NEXT:    movd %xmm0, %eax
+; X86-SSE42-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X86-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
@@ -1532,11 +1509,10 @@
 ; X86-AVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm2
 ; X86-AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpminsw %xmm2, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpxor LCPI10_0, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vmovd %xmm0, %eax
+; X86-AVX1-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X86-AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX1-NEXT:    vzeroupper
 ; X86-AVX1-NEXT:    retl
@@ -1546,11 +1522,10 @@
 ; X86-AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpxor LCPI10_0, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vmovd %xmm0, %eax
+; X86-AVX2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X86-AVX2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX2-NEXT:    vzeroupper
 ; X86-AVX2-NEXT:    retl
@@ -1576,11 +1551,10 @@
 ; X64-SSE42-NEXT:    pminsw %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pminsw %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pminsw %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X64-SSE42-NEXT:    movd %xmm0, %eax
+; X64-SSE42-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
@@ -1591,11 +1565,10 @@
 ; X64-AVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm2
 ; X64-AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpminsw %xmm2, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vmovd %xmm0, %eax
+; X64-AVX1-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -1605,11 +1578,10 @@
 ; X64-AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vmovd %xmm0, %eax
+; X64-AVX2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-AVX2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
@@ -1620,11 +1592,10 @@
 ; X64-AVX512-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; X64-AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vmovd %xmm0, %eax
+; X64-AVX512-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-AVX512-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX512-NEXT:    vzeroupper
 ; X64-AVX512-NEXT:    retq
@@ -1700,14 +1671,13 @@
 ; X86-SSE42-NEXT:    pminsb %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pminsb %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pminsb %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X86-SSE42-NEXT:    psrlw $8, %xmm2
-; X86-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X86-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor LCPI11_0, %xmm0
+; X86-SSE42-NEXT:    movdqa %xmm0, %xmm1
+; X86-SSE42-NEXT:    psrlw $8, %xmm1
+; X86-SSE42-NEXT:    pminub %xmm0, %xmm1
+; X86-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X86-SSE42-NEXT:    xorb $-128, %al
 ; X86-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
@@ -1718,13 +1688,12 @@
 ; X86-AVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm2
 ; X86-AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpminsb %xmm2, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpxor LCPI11_0, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX1-NEXT:    xorb $-128, %al
 ; X86-AVX1-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX1-NEXT:    vzeroupper
 ; X86-AVX1-NEXT:    retl
@@ -1734,13 +1703,12 @@
 ; X86-AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpxor LCPI11_0, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX2-NEXT:    xorb $-128, %al
 ; X86-AVX2-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX2-NEXT:    vzeroupper
 ; X86-AVX2-NEXT:    retl
@@ -1797,14 +1765,13 @@
 ; X64-SSE42-NEXT:    pminsb %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pminsb %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pminsb %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X64-SSE42-NEXT:    psrlw $8, %xmm2
-; X64-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X64-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor {{.*}}(%rip), %xmm0
+; X64-SSE42-NEXT:    movdqa %xmm0, %xmm1
+; X64-SSE42-NEXT:    psrlw $8, %xmm1
+; X64-SSE42-NEXT:    pminub %xmm0, %xmm1
+; X64-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X64-SSE42-NEXT:    xorb $-128, %al
 ; X64-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
@@ -1815,13 +1782,12 @@
 ; X64-AVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm2
 ; X64-AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpminsb %xmm2, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX1-NEXT:    xorb $-128, %al
 ; X64-AVX1-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -1831,13 +1797,12 @@
 ; X64-AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX2-NEXT:    xorb $-128, %al
 ; X64-AVX2-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
@@ -1848,13 +1813,12 @@
 ; X64-AVX512-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; X64-AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX512-NEXT:    xorb $-128, %al
 ; X64-AVX512-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX512-NEXT:    vzeroupper
 ; X64-AVX512-NEXT:    retq
diff --git a/test/CodeGen/X86/horizontal-reduce-umax.ll b/test/CodeGen/X86/horizontal-reduce-umax.ll
index 88f6b01..2bc7215 100644
--- a/test/CodeGen/X86/horizontal-reduce-umax.ll
+++ b/test/CodeGen/X86/horizontal-reduce-umax.ll
@@ -240,18 +240,18 @@
 ; X86-SSE2-NEXT:    psrld $16, %xmm1
 ; X86-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X86-SSE2-NEXT:    pmaxsw %xmm0, %xmm1
-; X86-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X86-SSE2-NEXT:    movd %xmm1, %eax
+; X86-SSE2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X86-SSE2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i16:
 ; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor %xmm0, %xmm1
+; X86-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    movd %xmm0, %eax
+; X86-SSE42-NEXT:    notl %eax
 ; X86-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
@@ -260,8 +260,8 @@
 ; X86-AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vmovd %xmm0, %eax
+; X86-AVX-NEXT:    notl %eax
 ; X86-AVX-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX-NEXT:    retl
 ;
@@ -282,18 +282,18 @@
 ; X64-SSE2-NEXT:    psrld $16, %xmm1
 ; X64-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X64-SSE2-NEXT:    pmaxsw %xmm0, %xmm1
-; X64-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X64-SSE2-NEXT:    movd %xmm1, %eax
+; X64-SSE2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-SSE2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i16:
 ; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor %xmm0, %xmm1
+; X64-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    movd %xmm0, %eax
+; X64-SSE42-NEXT:    notl %eax
 ; X64-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
@@ -302,8 +302,8 @@
 ; X64-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vmovd %xmm0, %eax
+; X64-AVX1-NEXT:    notl %eax
 ; X64-AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX1-NEXT:    retq
 ;
@@ -312,8 +312,8 @@
 ; X64-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vmovd %xmm0, %eax
+; X64-AVX2-NEXT:    notl %eax
 ; X64-AVX2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX2-NEXT:    retq
 ;
@@ -321,8 +321,8 @@
 ; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vmovd %xmm0, %eax
+; X64-AVX512-NEXT:    notl %eax
 ; X64-AVX512-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX512-NEXT:    retq
   %1  = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -358,13 +358,13 @@
 ; X86-SSE42-LABEL: test_reduce_v16i8:
 ; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X86-SSE42-NEXT:    psrlw $8, %xmm2
-; X86-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X86-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor %xmm0, %xmm1
+; X86-SSE42-NEXT:    movdqa %xmm1, %xmm0
+; X86-SSE42-NEXT:    psrlw $8, %xmm0
+; X86-SSE42-NEXT:    pminub %xmm1, %xmm0
+; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
 ; X86-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X86-SSE42-NEXT:    notb %al
 ; X86-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
@@ -372,11 +372,11 @@
 ; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX-NEXT:    notb %al
 ; X86-AVX-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX-NEXT:    retl
 ;
@@ -399,13 +399,13 @@
 ; X64-SSE42-LABEL: test_reduce_v16i8:
 ; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X64-SSE42-NEXT:    psrlw $8, %xmm2
-; X64-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X64-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor %xmm0, %xmm1
+; X64-SSE42-NEXT:    movdqa %xmm1, %xmm0
+; X64-SSE42-NEXT:    psrlw $8, %xmm0
+; X64-SSE42-NEXT:    pminub %xmm1, %xmm0
+; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
 ; X64-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X64-SSE42-NEXT:    notb %al
 ; X64-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
@@ -413,11 +413,11 @@
 ; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX1-NEXT:    notb %al
 ; X64-AVX1-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX1-NEXT:    retq
 ;
@@ -425,11 +425,11 @@
 ; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX2-NEXT:    notb %al
 ; X64-AVX2-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX2-NEXT:    retq
 ;
@@ -439,8 +439,8 @@
 ; X64-AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; X64-AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX512-NEXT:    notb %al
 ; X64-AVX512-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX512-NEXT:    retq
   %1  = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -845,8 +845,8 @@
 ; X86-SSE2-NEXT:    psrld $16, %xmm1
 ; X86-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X86-SSE2-NEXT:    pmaxsw %xmm0, %xmm1
-; X86-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X86-SSE2-NEXT:    movd %xmm1, %eax
+; X86-SSE2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X86-SSE2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE2-NEXT:    retl
 ;
@@ -854,10 +854,10 @@
 ; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxuw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor %xmm0, %xmm1
+; X86-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    movd %xmm0, %eax
+; X86-SSE42-NEXT:    notl %eax
 ; X86-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
@@ -868,8 +868,8 @@
 ; X86-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vmovd %xmm0, %eax
+; X86-AVX1-NEXT:    notl %eax
 ; X86-AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX1-NEXT:    vzeroupper
 ; X86-AVX1-NEXT:    retl
@@ -881,8 +881,8 @@
 ; X86-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vmovd %xmm0, %eax
+; X86-AVX2-NEXT:    notl %eax
 ; X86-AVX2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX2-NEXT:    vzeroupper
 ; X86-AVX2-NEXT:    retl
@@ -908,8 +908,8 @@
 ; X64-SSE2-NEXT:    psrld $16, %xmm1
 ; X64-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X64-SSE2-NEXT:    pmaxsw %xmm0, %xmm1
-; X64-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X64-SSE2-NEXT:    movd %xmm1, %eax
+; X64-SSE2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-SSE2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE2-NEXT:    retq
 ;
@@ -917,10 +917,10 @@
 ; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxuw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor %xmm0, %xmm1
+; X64-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    movd %xmm0, %eax
+; X64-SSE42-NEXT:    notl %eax
 ; X64-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
@@ -931,8 +931,8 @@
 ; X64-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vmovd %xmm0, %eax
+; X64-AVX1-NEXT:    notl %eax
 ; X64-AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -944,8 +944,8 @@
 ; X64-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vmovd %xmm0, %eax
+; X64-AVX2-NEXT:    notl %eax
 ; X64-AVX2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
@@ -956,8 +956,8 @@
 ; X64-AVX512-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vmovd %xmm0, %eax
+; X64-AVX512-NEXT:    notl %eax
 ; X64-AVX512-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX512-NEXT:    vzeroupper
 ; X64-AVX512-NEXT:    retq
@@ -999,13 +999,13 @@
 ; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxub %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X86-SSE42-NEXT:    psrlw $8, %xmm2
-; X86-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X86-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor %xmm0, %xmm1
+; X86-SSE42-NEXT:    movdqa %xmm1, %xmm0
+; X86-SSE42-NEXT:    psrlw $8, %xmm0
+; X86-SSE42-NEXT:    pminub %xmm1, %xmm0
+; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
 ; X86-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X86-SSE42-NEXT:    notb %al
 ; X86-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
@@ -1015,11 +1015,11 @@
 ; X86-AVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX1-NEXT:    notb %al
 ; X86-AVX1-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX1-NEXT:    vzeroupper
 ; X86-AVX1-NEXT:    retl
@@ -1030,11 +1030,11 @@
 ; X86-AVX2-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX2-NEXT:    notb %al
 ; X86-AVX2-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX2-NEXT:    vzeroupper
 ; X86-AVX2-NEXT:    retl
@@ -1060,13 +1060,13 @@
 ; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxub %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X64-SSE42-NEXT:    psrlw $8, %xmm2
-; X64-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X64-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor %xmm0, %xmm1
+; X64-SSE42-NEXT:    movdqa %xmm1, %xmm0
+; X64-SSE42-NEXT:    psrlw $8, %xmm0
+; X64-SSE42-NEXT:    pminub %xmm1, %xmm0
+; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
 ; X64-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X64-SSE42-NEXT:    notb %al
 ; X64-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
@@ -1076,11 +1076,11 @@
 ; X64-AVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX1-NEXT:    notb %al
 ; X64-AVX1-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -1091,11 +1091,11 @@
 ; X64-AVX2-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX2-NEXT:    notb %al
 ; X64-AVX2-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
@@ -1108,8 +1108,8 @@
 ; X64-AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; X64-AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX512-NEXT:    notb %al
 ; X64-AVX512-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX512-NEXT:    vzeroupper
 ; X64-AVX512-NEXT:    retq
@@ -1688,8 +1688,8 @@
 ; X86-SSE2-NEXT:    psrld $16, %xmm1
 ; X86-SSE2-NEXT:    pxor %xmm4, %xmm1
 ; X86-SSE2-NEXT:    pmaxsw %xmm0, %xmm1
-; X86-SSE2-NEXT:    pxor %xmm4, %xmm1
 ; X86-SSE2-NEXT:    movd %xmm1, %eax
+; X86-SSE2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X86-SSE2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE2-NEXT:    retl
 ;
@@ -1699,10 +1699,10 @@
 ; X86-SSE42-NEXT:    pmaxuw %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pmaxuw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor %xmm0, %xmm1
+; X86-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    movd %xmm0, %eax
+; X86-SSE42-NEXT:    notl %eax
 ; X86-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
@@ -1716,8 +1716,8 @@
 ; X86-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vmovd %xmm0, %eax
+; X86-AVX1-NEXT:    notl %eax
 ; X86-AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX1-NEXT:    vzeroupper
 ; X86-AVX1-NEXT:    retl
@@ -1730,8 +1730,8 @@
 ; X86-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vmovd %xmm0, %eax
+; X86-AVX2-NEXT:    notl %eax
 ; X86-AVX2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-AVX2-NEXT:    vzeroupper
 ; X86-AVX2-NEXT:    retl
@@ -1761,8 +1761,8 @@
 ; X64-SSE2-NEXT:    psrld $16, %xmm1
 ; X64-SSE2-NEXT:    pxor %xmm4, %xmm1
 ; X64-SSE2-NEXT:    pmaxsw %xmm0, %xmm1
-; X64-SSE2-NEXT:    pxor %xmm4, %xmm1
 ; X64-SSE2-NEXT:    movd %xmm1, %eax
+; X64-SSE2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-SSE2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE2-NEXT:    retq
 ;
@@ -1772,10 +1772,10 @@
 ; X64-SSE42-NEXT:    pmaxuw %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pmaxuw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor %xmm0, %xmm1
+; X64-SSE42-NEXT:    phminposuw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    movd %xmm0, %eax
+; X64-SSE42-NEXT:    notl %eax
 ; X64-SSE42-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
@@ -1789,8 +1789,8 @@
 ; X64-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vmovd %xmm0, %eax
+; X64-AVX1-NEXT:    notl %eax
 ; X64-AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -1803,8 +1803,8 @@
 ; X64-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vmovd %xmm0, %eax
+; X64-AVX2-NEXT:    notl %eax
 ; X64-AVX2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
@@ -1817,8 +1817,8 @@
 ; X64-AVX512-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vmovd %xmm0, %eax
+; X64-AVX512-NEXT:    notl %eax
 ; X64-AVX512-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-AVX512-NEXT:    vzeroupper
 ; X64-AVX512-NEXT:    retq
@@ -1867,13 +1867,13 @@
 ; X86-SSE42-NEXT:    pmaxub %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pmaxub %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X86-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X86-SSE42-NEXT:    psrlw $8, %xmm2
-; X86-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X86-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X86-SSE42-NEXT:    pxor %xmm0, %xmm1
+; X86-SSE42-NEXT:    movdqa %xmm1, %xmm0
+; X86-SSE42-NEXT:    psrlw $8, %xmm0
+; X86-SSE42-NEXT:    pminub %xmm1, %xmm0
+; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
 ; X86-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X86-SSE42-NEXT:    notb %al
 ; X86-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-SSE42-NEXT:    retl
 ;
@@ -1886,11 +1886,11 @@
 ; X86-AVX1-NEXT:    vpmaxub %xmm2, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX1-NEXT:    notb %al
 ; X86-AVX1-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX1-NEXT:    vzeroupper
 ; X86-AVX1-NEXT:    retl
@@ -1902,11 +1902,11 @@
 ; X86-AVX2-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X86-AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X86-AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X86-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; X86-AVX2-NEXT:    notb %al
 ; X86-AVX2-NEXT:    ## kill: def $al killed $al killed $eax
 ; X86-AVX2-NEXT:    vzeroupper
 ; X86-AVX2-NEXT:    retl
@@ -1936,13 +1936,13 @@
 ; X64-SSE42-NEXT:    pmaxub %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pmaxub %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
-; X64-SSE42-NEXT:    movdqa %xmm0, %xmm2
-; X64-SSE42-NEXT:    psrlw $8, %xmm2
-; X64-SSE42-NEXT:    pminub %xmm0, %xmm2
-; X64-SSE42-NEXT:    phminposuw %xmm2, %xmm0
-; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
+; X64-SSE42-NEXT:    pxor %xmm0, %xmm1
+; X64-SSE42-NEXT:    movdqa %xmm1, %xmm0
+; X64-SSE42-NEXT:    psrlw $8, %xmm0
+; X64-SSE42-NEXT:    pminub %xmm1, %xmm0
+; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
 ; X64-SSE42-NEXT:    pextrb $0, %xmm0, %eax
+; X64-SSE42-NEXT:    notb %al
 ; X64-SSE42-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-SSE42-NEXT:    retq
 ;
@@ -1955,11 +1955,11 @@
 ; X64-AVX1-NEXT:    vpmaxub %xmm2, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX1-NEXT:    notb %al
 ; X64-AVX1-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -1971,11 +1971,11 @@
 ; X64-AVX2-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; X64-AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; X64-AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX2-NEXT:    notb %al
 ; X64-AVX2-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
@@ -1990,8 +1990,8 @@
 ; X64-AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; X64-AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; X64-AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; X64-AVX512-NEXT:    notb %al
 ; X64-AVX512-NEXT:    ## kill: def $al killed $al killed $eax
 ; X64-AVX512-NEXT:    vzeroupper
 ; X64-AVX512-NEXT:    retq
diff --git a/test/CodeGen/X86/horizontal-reduce-umin.ll b/test/CodeGen/X86/horizontal-reduce-umin.ll
index 482d082..e4b9223 100644
--- a/test/CodeGen/X86/horizontal-reduce-umin.ll
+++ b/test/CodeGen/X86/horizontal-reduce-umin.ll
@@ -242,8 +242,8 @@
 ; X86-SSE2-NEXT:    psrld $16, %xmm1
 ; X86-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X86-SSE2-NEXT:    pminsw %xmm0, %xmm1
-; X86-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X86-SSE2-NEXT:    movd %xmm1, %eax
+; X86-SSE2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X86-SSE2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE2-NEXT:    retl
 ;
@@ -278,8 +278,8 @@
 ; X64-SSE2-NEXT:    psrld $16, %xmm1
 ; X64-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X64-SSE2-NEXT:    pminsw %xmm0, %xmm1
-; X64-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X64-SSE2-NEXT:    movd %xmm1, %eax
+; X64-SSE2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-SSE2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE2-NEXT:    retq
 ;
@@ -785,8 +785,8 @@
 ; X86-SSE2-NEXT:    psrld $16, %xmm1
 ; X86-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X86-SSE2-NEXT:    pminsw %xmm0, %xmm1
-; X86-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X86-SSE2-NEXT:    movd %xmm1, %eax
+; X86-SSE2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X86-SSE2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE2-NEXT:    retl
 ;
@@ -839,8 +839,8 @@
 ; X64-SSE2-NEXT:    psrld $16, %xmm1
 ; X64-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X64-SSE2-NEXT:    pminsw %xmm0, %xmm1
-; X64-SSE2-NEXT:    pxor %xmm2, %xmm1
 ; X64-SSE2-NEXT:    movd %xmm1, %eax
+; X64-SSE2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-SSE2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE2-NEXT:    retq
 ;
@@ -1592,8 +1592,8 @@
 ; X86-SSE2-NEXT:    psrld $16, %xmm1
 ; X86-SSE2-NEXT:    pxor %xmm4, %xmm1
 ; X86-SSE2-NEXT:    pminsw %xmm0, %xmm1
-; X86-SSE2-NEXT:    pxor %xmm4, %xmm1
 ; X86-SSE2-NEXT:    movd %xmm1, %eax
+; X86-SSE2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X86-SSE2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X86-SSE2-NEXT:    retl
 ;
@@ -1656,8 +1656,8 @@
 ; X64-SSE2-NEXT:    psrld $16, %xmm1
 ; X64-SSE2-NEXT:    pxor %xmm4, %xmm1
 ; X64-SSE2-NEXT:    pminsw %xmm0, %xmm1
-; X64-SSE2-NEXT:    pxor %xmm4, %xmm1
 ; X64-SSE2-NEXT:    movd %xmm1, %eax
+; X64-SSE2-NEXT:    xorl $32768, %eax ## imm = 0x8000
 ; X64-SSE2-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-SSE2-NEXT:    retq
 ;
diff --git a/test/CodeGen/X86/known-bits-vector.ll b/test/CodeGen/X86/known-bits-vector.ll
index 1107baa..df31b22 100644
--- a/test/CodeGen/X86/known-bits-vector.ll
+++ b/test/CodeGen/X86/known-bits-vector.ll
@@ -5,18 +5,14 @@
 define i32 @knownbits_mask_extract_sext(<8 x i16> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_extract_sext:
 ; X32:       # %bb.0:
-; X32-NEXT:    movl $15, %eax
-; X32-NEXT:    vmovd %eax, %xmm1
-; X32-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; X32-NEXT:    vpextrw $0, %xmm0, %eax
+; X32-NEXT:    vmovd %xmm0, %eax
+; X32-NEXT:    andl $15, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_extract_sext:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl $15, %eax
-; X64-NEXT:    vmovd %eax, %xmm1
-; X64-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; X64-NEXT:    vpextrw $0, %xmm0, %eax
+; X64-NEXT:    vmovd %xmm0, %eax
+; X64-NEXT:    andl $15, %eax
 ; X64-NEXT:    retq
   %1 = and <8 x i16> %a0, <i16 15, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   %2 = extractelement <8 x i16> %1, i32 0
@@ -38,8 +34,8 @@
 ;
 ; X64-LABEL: knownbits_mask_extract_uitofp:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; X64-NEXT:    vmovq %xmm0, %rax
+; X64-NEXT:    movzwl %ax, %eax
 ; X64-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %1 = and <2 x i64> %a0, <i64 65535, i64 -1>
@@ -493,14 +489,12 @@
 define <4 x i32> @knownbits_umax_shuffle_ashr(<4 x i32> %a0) {
 ; X32-LABEL: knownbits_umax_shuffle_ashr:
 ; X32:       # %bb.0:
-; X32-NEXT:    vpmaxud {{\.LCPI.*}}, %xmm0, %xmm0
-; X32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
+; X32-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_umax_shuffle_ashr:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpmaxud {{.*}}(%rip), %xmm0, %xmm0
-; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
+; X64-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> <i32 65535, i32 -1, i32 -1, i32 262143>)
   %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 2, i32 2>
diff --git a/test/CodeGen/X86/known-bits.ll b/test/CodeGen/X86/known-bits.ll
index 2ff0939..8f3b983 100644
--- a/test/CodeGen/X86/known-bits.ll
+++ b/test/CodeGen/X86/known-bits.ll
@@ -298,3 +298,36 @@
 declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
 declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
 declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
+
+define i32 @knownbits_fshl(i32 %a0) nounwind {
+; X32-LABEL: knownbits_fshl:
+; X32:       # %bb.0:
+; X32-NEXT:    movl $3, %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: knownbits_fshl:
+; X64:       # %bb.0:
+; X64-NEXT:    movl $3, %eax
+; X64-NEXT:    retq
+  %1 = tail call i32 @llvm.fshl.i32(i32 %a0, i32 -1, i32 5)
+  %2 = and i32 %1, 3
+  ret i32 %2
+}
+
+define i32 @knownbits_fshr(i32 %a0) nounwind {
+; X32-LABEL: knownbits_fshr:
+; X32:       # %bb.0:
+; X32-NEXT:    movl $3, %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: knownbits_fshr:
+; X64:       # %bb.0:
+; X64-NEXT:    movl $3, %eax
+; X64-NEXT:    retq
+  %1 = tail call i32 @llvm.fshr.i32(i32 %a0, i32 -1, i32 5)
+  %2 = and i32 %1, 3
+  ret i32 %2
+}
+
+declare i32 @llvm.fshl.i32(i32, i32, i32) nounwind readnone
+declare i32 @llvm.fshr.i32(i32, i32, i32) nounwind readnone
diff --git a/test/CodeGen/X86/known-signbits-vector.ll b/test/CodeGen/X86/known-signbits-vector.ll
index 06ad258..3defc21 100644
--- a/test/CodeGen/X86/known-signbits-vector.ll
+++ b/test/CodeGen/X86/known-signbits-vector.ll
@@ -74,8 +74,8 @@
 ;
 ; X64-LABEL: signbits_ashr_extract_sitofp_0:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpsrlq $32, %xmm0, %xmm0
 ; X64-NEXT:    vmovq %xmm0, %rax
+; X64-NEXT:    shrq $32, %rax
 ; X64-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %1 = ashr <2 x i64> %a0, <i64 32, i64 32>
@@ -88,9 +88,7 @@
 ; X32-LABEL: signbits_ashr_extract_sitofp_1:
 ; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:    vpsrlq $63, %xmm0, %xmm1
 ; X32-NEXT:    vpsrlq $32, %xmm0, %xmm0
-; X32-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; X32-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,32768,0,0,1,0,0,0]
 ; X32-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X32-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
@@ -103,14 +101,9 @@
 ;
 ; X64-LABEL: signbits_ashr_extract_sitofp_1:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpsrlq $63, %xmm0, %xmm1
-; X64-NEXT:    vpsrlq $32, %xmm0, %xmm0
-; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; X64-NEXT:    vmovdqa {{.*#+}} xmm1 = [2147483648,1]
-; X64-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    vmovq %xmm0, %rax
-; X64-NEXT:    vcvtsi2ssl %eax, %xmm2, %xmm0
+; X64-NEXT:    shrq $32, %rax
+; X64-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %1 = ashr <2 x i64> %a0, <i64 32, i64 63>
   %2 = extractelement <2 x i64> %1, i32 0
@@ -122,9 +115,7 @@
 ; X32-LABEL: signbits_ashr_shl_extract_sitofp:
 ; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:    vpsrlq $60, %xmm0, %xmm1
 ; X32-NEXT:    vpsrlq $61, %xmm0, %xmm0
-; X32-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; X32-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,0,0,0,8,0,0,0]
 ; X32-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X32-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
@@ -138,15 +129,10 @@
 ;
 ; X64-LABEL: signbits_ashr_shl_extract_sitofp:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpsrlq $60, %xmm0, %xmm1
-; X64-NEXT:    vpsrlq $61, %xmm0, %xmm0
-; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; X64-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,8]
-; X64-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; X64-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
-; X64-NEXT:    vpsllq $20, %xmm0, %xmm0
 ; X64-NEXT:    vmovq %xmm0, %rax
-; X64-NEXT:    vcvtsi2ssl %eax, %xmm2, %xmm0
+; X64-NEXT:    sarq $61, %rax
+; X64-NEXT:    shll $20, %eax
+; X64-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %1 = ashr <2 x i64> %a0, <i64 61, i64 60>
   %2 = shl <2 x i64> %1, <i64 20, i64 16>
@@ -176,10 +162,8 @@
 ; X64-LABEL: signbits_ashr_insert_ashr_extract_sitofp:
 ; X64:       # %bb.0:
 ; X64-NEXT:    sarq $30, %rdi
-; X64-NEXT:    vmovq %rdi, %xmm0
-; X64-NEXT:    vpsrlq $3, %xmm0, %xmm0
-; X64-NEXT:    vmovq %xmm0, %rax
-; X64-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
+; X64-NEXT:    shrq $3, %rdi
+; X64-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = ashr i64 %a0, 30
   %2 = insertelement <2 x i64> undef, i64 %1, i32 0
@@ -249,9 +233,7 @@
 ; X32-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp:
 ; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
-; X32-NEXT:    vpsrlq $60, %xmm0, %xmm1
 ; X32-NEXT:    vpsrlq $61, %xmm0, %xmm0
-; X32-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; X32-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,0,0,0,8,0,0,0]
 ; X32-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X32-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
@@ -266,9 +248,7 @@
 ;
 ; X64-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpsrlq $60, %xmm0, %xmm1
 ; X64-NEXT:    vpsrlq $61, %xmm0, %xmm0
-; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; X64-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,8]
 ; X64-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
@@ -346,15 +326,11 @@
 ; X32-NEXT:    vpmovsxdq 16(%ebp), %xmm3
 ; X32-NEXT:    vpmovsxdq 8(%ebp), %xmm4
 ; X32-NEXT:    vextractf128 $1, %ymm2, %xmm5
-; X32-NEXT:    vpsrlq $63, %xmm5, %xmm6
 ; X32-NEXT:    vpsrlq $33, %xmm5, %xmm5
-; X32-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4,5,6,7]
 ; X32-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,16384,0,0,1,0,0,0]
 ; X32-NEXT:    vpxor %xmm6, %xmm5, %xmm5
 ; X32-NEXT:    vpsubq %xmm6, %xmm5, %xmm5
-; X32-NEXT:    vpsrlq $63, %xmm2, %xmm7
 ; X32-NEXT:    vpsrlq $33, %xmm2, %xmm2
-; X32-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm7[4,5,6,7]
 ; X32-NEXT:    vpxor %xmm6, %xmm2, %xmm2
 ; X32-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
 ; X32-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
@@ -377,15 +353,11 @@
 ; X64-LABEL: signbits_ashr_sext_select_shuffle_sitofp:
 ; X64:       # %bb.0:
 ; X64-NEXT:    vextractf128 $1, %ymm2, %xmm4
-; X64-NEXT:    vpsrlq $63, %xmm4, %xmm5
 ; X64-NEXT:    vpsrlq $33, %xmm4, %xmm4
-; X64-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4,5,6,7]
 ; X64-NEXT:    vmovdqa {{.*#+}} xmm5 = [1073741824,1]
 ; X64-NEXT:    vpxor %xmm5, %xmm4, %xmm4
 ; X64-NEXT:    vpsubq %xmm5, %xmm4, %xmm4
-; X64-NEXT:    vpsrlq $63, %xmm2, %xmm6
 ; X64-NEXT:    vpsrlq $33, %xmm2, %xmm2
-; X64-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
 ; X64-NEXT:    vpxor %xmm5, %xmm2, %xmm2
 ; X64-NEXT:    vpsubq %xmm5, %xmm2, %xmm2
 ; X64-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
@@ -413,3 +385,51 @@
   %6 = sitofp <4 x i64> %5 to <4 x float>
   ret <4 x float> %6
 }
+
+; Make sure we can preserve sign bit information into the second basic block
+; so we can avoid having to shift bit 0 into bit 7 for each element due to
+; v32i1->v32i8 promotion and the splitting of v32i8 into 2xv16i8. This requires
+; ComputeNumSignBits handling for insert_subvector.
+define void @cross_bb_signbits_insert_subvec(<32 x i8>* %ptr, <32 x i8> %x, <32 x i8> %z) {
+; X32-LABEL: cross_bb_signbits_insert_subvec:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; X32-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; X32-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; X32-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm0
+; X32-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-NEXT:    vandnps %ymm1, %ymm0, %ymm1
+; X32-NEXT:    vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; X32-NEXT:    vmovaps %ymm0, (%eax)
+; X32-NEXT:    vzeroupper
+; X32-NEXT:    retl
+;
+; X64-LABEL: cross_bb_signbits_insert_subvec:
+; X64:       # %bb.0:
+; X64-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; X64-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; X64-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; X64-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm0
+; X64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-NEXT:    vandnps %ymm1, %ymm0, %ymm1
+; X64-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; X64-NEXT:    vmovaps %ymm0, (%rdi)
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+  %a = icmp eq <32 x i8> %x, zeroinitializer
+  %b = icmp eq <32 x i8> %x, zeroinitializer
+  %c = and <32 x i1> %a, %b
+  br label %block
+
+block:
+  %d = select <32 x i1> %c, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <32 x i8> %z
+  store <32 x i8> %d, <32 x i8>* %ptr, align 32
+  br label %exit
+
+exit:
+  ret void
+}
+
diff --git a/test/CodeGen/X86/late-remat-update-2.mir b/test/CodeGen/X86/late-remat-update-2.mir
new file mode 100644
index 0000000..c6052df
--- /dev/null
+++ b/test/CodeGen/X86/late-remat-update-2.mir
@@ -0,0 +1,63 @@
+# RUN: llc -mtriple=x86_64-- -run-pass=simple-register-coalescing -run-pass=regallocbasic -run-pass=virtregrewriter -late-remat-update-threshold=0 %s -o - | FileCheck %s
+#
+# PR40061: %t2 = %t1 is rematerialized and %t1 is added into toBeUpdated set
+# to postpone its live interval update. After the rematerialization, the live
+# interval of %t1 is larger than necessary. Then %t1 is merged into %t3 and %t1
+# gets removed. After the merge, %t3 contains live interval larger than
+# necessary. Because %t3 is not in toBeUpdated set so its live interval is not
+# updated after register coalescing, and it will break some assumption in
+# regalloc. This test wants to check the live interval is up-to-date after
+# register coalescing.
+#
+# To prevent the test from taking effect only in assert enabled mode, we want
+# to achieve the test goal without dumping regalloc trace. We add strong hint
+# to allocate both %t1 and %t2 to $rax register. If the %t1's live interval is
+# not shrinked properly after register coalescing, %t1 and %t2 will not be
+# both allocated to $rax because of inference, and we utilize the fact to
+# achieve the test goal. But note that the assumption only holds when we use
+# regallocbasic instead of greedy because greedy can update the live interval
+# in the process of splitting.
+#
+# CHECK-LABEL: name: foo
+# CHECK: bb.0.entry:
+# CHECK: $rax = MOV64ri32 -11
+# CHECK: bb.1:
+# CHECK: $rax = MOV64ri32 -11
+# CHECK: $rax = ADD64ri8 killed renamable $rax, 5
+# CHECK: CMP64ri8 renamable $rax
+# CHECK: RET 0, $rax
+# CHECK: bb.2:
+# CHECK: $rax = ADD64ri8 killed renamable $rax, 10
+# CHECK: bb.3:
+# CHECK: RET 0, $rax
+---
+name:            foo
+body:             |
+  bb.0.entry:
+    successors: %bb.1(0x15555555), %bb.2(0x6aaaaaab)
+
+    %t1:gr64 = MOV64ri32 -11
+    CMP64ri8 %t1, 1, implicit-def $eflags
+    JE_1 %bb.2, implicit killed $eflags
+    JMP_1 %bb.1
+
+  bb.1:
+    successors: %bb.1(0x80000000)
+
+    %t2:gr64 = COPY %t1
+    %t2:gr64 = ADD64ri8 %t2, 5, implicit-def $eflags
+    $rax = COPY %t2
+    CMP64ri8 %t2, 1, implicit-def $eflags
+    JE_1 %bb.1, implicit killed $eflags
+    RET 0, $rax
+
+  bb.2:
+    successors: %bb.3(0x80000000)
+    %t3:gr64 = COPY %t1
+    %t3:gr64 = ADD64ri8 %t3, 10, implicit-def $eflags
+
+  bb.3:
+    $rax = COPY %t3
+    RET 0, $rax
+
+...
diff --git a/test/CodeGen/X86/load-combine.ll b/test/CodeGen/X86/load-combine.ll
index 3934bf5..8c69dba 100644
--- a/test/CodeGen/X86/load-combine.ll
+++ b/test/CodeGen/X86/load-combine.ll
@@ -915,7 +915,7 @@
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    movl 12(%ecx,%eax), %eax
+; CHECK-NEXT:    movl 12(%eax,%ecx), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_base_offset_index:
@@ -960,7 +960,7 @@
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    movl 13(%ecx,%eax), %eax
+; CHECK-NEXT:    movl 13(%eax,%ecx), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_base_offset_index_2:
@@ -1016,7 +1016,7 @@
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    movl 12(%ecx,%eax), %eax
+; CHECK-NEXT:    movl 12(%eax,%ecx), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_zaext_loads:
@@ -1072,7 +1072,7 @@
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    movl 12(%ecx,%eax), %eax
+; CHECK-NEXT:    movl 12(%eax,%ecx), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_zsext_loads:
diff --git a/test/CodeGen/X86/madd.ll b/test/CodeGen/X86/madd.ll
index 89dae52..369108e 100644
--- a/test/CodeGen/X86/madd.ll
+++ b/test/CodeGen/X86/madd.ll
@@ -427,8 +427,7 @@
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT:    vpaddd %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddd %xmm5, %xmm2, %xmm2
 ; AVX1-NEXT:    vpaddd %xmm2, %xmm5, %xmm2
 ; AVX1-NEXT:    vpaddd %xmm2, %xmm4, %xmm2
 ; AVX1-NEXT:    vpaddd %xmm3, %xmm0, %xmm0
@@ -582,16 +581,14 @@
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
-; SSE2-NEXT:    psrad $24, %xmm1
 ; SSE2-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3]
-; SSE2-NEXT:    psrad $24, %xmm2
+; SSE2-NEXT:    psraw $8, %xmm1
+; SSE2-NEXT:    psraw $8, %xmm2
 ; SSE2-NEXT:    pmullw %xmm1, %xmm2
-; SSE2-NEXT:    pslld $16, %xmm2
-; SSE2-NEXT:    psrad $16, %xmm2
-; SSE2-NEXT:    paddd %xmm2, %xmm0
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT:    psrad $16, %xmm1
+; SSE2-NEXT:    paddd %xmm1, %xmm0
 ; SSE2-NEXT:    addq $16, %rcx
 ; SSE2-NEXT:    cmpq %rcx, %rax
 ; SSE2-NEXT:    jne .LBB4_1
@@ -667,9 +664,9 @@
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    psraw $8, %xmm2
 ; SSE2-NEXT:    movq {{.*#+}} xmm3 = mem[0],zero
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    psraw $8, %xmm2
 ; SSE2-NEXT:    psraw $8, %xmm3
 ; SSE2-NEXT:    pmaddwd %xmm2, %xmm3
 ; SSE2-NEXT:    paddd %xmm3, %xmm1
@@ -780,22 +777,20 @@
 ; SSE2-NEXT:    .p2align 4, 0x90
 ; SSE2-NEXT:  .LBB6_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
-; SSE2-NEXT:    movq {{.*#+}} xmm3 = mem[0],zero
+; SSE2-NEXT:    movdqu (%rdi,%rcx), %xmm3
+; SSE2-NEXT:    movdqu (%rsi,%rcx), %xmm4
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
+; SSE2-NEXT:    psraw $8, %xmm5
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; SSE2-NEXT:    psraw $8, %xmm6
+; SSE2-NEXT:    pmaddwd %xmm5, %xmm6
+; SSE2-NEXT:    paddd %xmm6, %xmm1
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE2-NEXT:    psraw $8, %xmm3
-; SSE2-NEXT:    movq {{.*#+}} xmm4 = mem[0],zero
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE2-NEXT:    psraw $8, %xmm4
-; SSE2-NEXT:    movq {{.*#+}} xmm5 = mem[0],zero
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    psraw $8, %xmm5
-; SSE2-NEXT:    pmaddwd %xmm3, %xmm5
-; SSE2-NEXT:    paddd %xmm5, %xmm2
-; SSE2-NEXT:    movq {{.*#+}} xmm3 = mem[0],zero
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    psraw $8, %xmm3
-; SSE2-NEXT:    pmaddwd %xmm4, %xmm3
-; SSE2-NEXT:    paddd %xmm3, %xmm1
+; SSE2-NEXT:    pmaddwd %xmm3, %xmm4
+; SSE2-NEXT:    paddd %xmm4, %xmm2
 ; SSE2-NEXT:    addq $16, %rcx
 ; SSE2-NEXT:    cmpq %rcx, %rax
 ; SSE2-NEXT:    jne .LBB6_1
@@ -941,45 +936,41 @@
 ; SSE2-NEXT:    movl %edx, %eax
 ; SSE2-NEXT:    pxor %xmm8, %xmm8
 ; SSE2-NEXT:    xorl %ecx, %ecx
-; SSE2-NEXT:    pxor %xmm9, %xmm9
+; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
 ; SSE2-NEXT:    .p2align 4, 0x90
 ; SSE2-NEXT:  .LBB7_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
-; SSE2-NEXT:    movq {{.*#+}} xmm5 = mem[0],zero
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    movdqu (%rdi,%rcx), %xmm10
+; SSE2-NEXT:    movdqu 16(%rdi,%rcx), %xmm7
+; SSE2-NEXT:    movdqu (%rsi,%rcx), %xmm9
+; SSE2-NEXT:    movdqu 16(%rsi,%rcx), %xmm0
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm7[8],xmm5[9],xmm7[9],xmm5[10],xmm7[10],xmm5[11],xmm7[11],xmm5[12],xmm7[12],xmm5[13],xmm7[13],xmm5[14],xmm7[14],xmm5[15],xmm7[15]
 ; SSE2-NEXT:    psraw $8, %xmm5
-; SSE2-NEXT:    movq {{.*#+}} xmm6 = mem[0],zero
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
 ; SSE2-NEXT:    psraw $8, %xmm6
-; SSE2-NEXT:    movq {{.*#+}} xmm7 = mem[0],zero
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    psraw $8, %xmm7
-; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT:    pmaddwd %xmm5, %xmm6
+; SSE2-NEXT:    paddd %xmm6, %xmm3
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7]
+; SSE2-NEXT:    psraw $8, %xmm5
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE2-NEXT:    psraw $8, %xmm0
-; SSE2-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    psraw $8, %xmm2
-; SSE2-NEXT:    pmaddwd %xmm5, %xmm2
-; SSE2-NEXT:    paddd %xmm2, %xmm9
-; SSE2-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    psraw $8, %xmm2
-; SSE2-NEXT:    pmaddwd %xmm6, %xmm2
-; SSE2-NEXT:    paddd %xmm2, %xmm4
-; SSE2-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    psraw $8, %xmm2
-; SSE2-NEXT:    pmaddwd %xmm7, %xmm2
-; SSE2-NEXT:    paddd %xmm2, %xmm1
-; SSE2-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    psraw $8, %xmm2
-; SSE2-NEXT:    pmaddwd %xmm0, %xmm2
-; SSE2-NEXT:    paddd %xmm2, %xmm3
+; SSE2-NEXT:    pmaddwd %xmm5, %xmm0
+; SSE2-NEXT:    paddd %xmm0, %xmm1
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm10[8],xmm0[9],xmm10[9],xmm0[10],xmm10[10],xmm0[11],xmm10[11],xmm0[12],xmm10[12],xmm0[13],xmm10[13],xmm0[14],xmm10[14],xmm0[15],xmm10[15]
+; SSE2-NEXT:    psraw $8, %xmm0
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm9[8],xmm5[9],xmm9[9],xmm5[10],xmm9[10],xmm5[11],xmm9[11],xmm5[12],xmm9[12],xmm5[13],xmm9[13],xmm5[14],xmm9[14],xmm5[15],xmm9[15]
+; SSE2-NEXT:    psraw $8, %xmm5
+; SSE2-NEXT:    pmaddwd %xmm0, %xmm5
+; SSE2-NEXT:    paddd %xmm5, %xmm4
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
+; SSE2-NEXT:    psraw $8, %xmm0
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
+; SSE2-NEXT:    psraw $8, %xmm5
+; SSE2-NEXT:    pmaddwd %xmm0, %xmm5
+; SSE2-NEXT:    paddd %xmm5, %xmm2
 ; SSE2-NEXT:    addq $32, %rcx
 ; SSE2-NEXT:    cmpq %rcx, %rax
 ; SSE2-NEXT:    jne .LBB7_1
@@ -987,10 +978,10 @@
 ; SSE2-NEXT:    paddd %xmm8, %xmm4
 ; SSE2-NEXT:    paddd %xmm8, %xmm3
 ; SSE2-NEXT:    paddd %xmm4, %xmm3
-; SSE2-NEXT:    paddd %xmm8, %xmm9
+; SSE2-NEXT:    paddd %xmm8, %xmm2
 ; SSE2-NEXT:    paddd %xmm8, %xmm1
 ; SSE2-NEXT:    paddd %xmm3, %xmm1
-; SSE2-NEXT:    paddd %xmm9, %xmm1
+; SSE2-NEXT:    paddd %xmm2, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    paddd %xmm1, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -1036,8 +1027,7 @@
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT:    vpaddd %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddd %xmm5, %xmm2, %xmm2
 ; AVX1-NEXT:    vpaddd %xmm2, %xmm5, %xmm2
 ; AVX1-NEXT:    vpaddd %xmm2, %xmm4, %xmm2
 ; AVX1-NEXT:    vpaddd %xmm3, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/masked_gather_scatter.ll b/test/CodeGen/X86/masked_gather_scatter.ll
index 8212053..5e6d88a 100644
--- a/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/test/CodeGen/X86/masked_gather_scatter.ll
@@ -1743,7 +1743,7 @@
 ; SKX-NEXT:    vmovq %xmm0, %rax
 ; SKX-NEXT:    vpinsrd $0, (%rax), %xmm3, %xmm3
 ; SKX-NEXT:  .LBB31_2: # %else
-; SKX-NEXT:    kshiftrw $1, %k0, %k1
+; SKX-NEXT:    kshiftrb $1, %k0, %k1
 ; SKX-NEXT:    kmovw %k1, %eax
 ; SKX-NEXT:    testb $1, %al
 ; SKX-NEXT:    je .LBB31_4
@@ -1751,7 +1751,7 @@
 ; SKX-NEXT:    vpextrq $1, %xmm0, %rax
 ; SKX-NEXT:    vpinsrd $1, (%rax), %xmm3, %xmm3
 ; SKX-NEXT:  .LBB31_4: # %else2
-; SKX-NEXT:    kshiftrw $2, %k0, %k0
+; SKX-NEXT:    kshiftrb $2, %k0, %k0
 ; SKX-NEXT:    kmovw %k0, %eax
 ; SKX-NEXT:    testb $1, %al
 ; SKX-NEXT:    je .LBB31_6
@@ -1781,7 +1781,7 @@
 ; SKX_32-NEXT:    vmovd %xmm1, %eax
 ; SKX_32-NEXT:    vpinsrd $0, (%eax), %xmm0, %xmm0
 ; SKX_32-NEXT:  .LBB31_2: # %else
-; SKX_32-NEXT:    kshiftrw $1, %k0, %k1
+; SKX_32-NEXT:    kshiftrb $1, %k0, %k1
 ; SKX_32-NEXT:    kmovw %k1, %eax
 ; SKX_32-NEXT:    testb $1, %al
 ; SKX_32-NEXT:    je .LBB31_4
@@ -1789,7 +1789,7 @@
 ; SKX_32-NEXT:    vpextrd $1, %xmm1, %eax
 ; SKX_32-NEXT:    vpinsrd $1, (%eax), %xmm0, %xmm0
 ; SKX_32-NEXT:  .LBB31_4: # %else2
-; SKX_32-NEXT:    kshiftrw $2, %k0, %k0
+; SKX_32-NEXT:    kshiftrb $2, %k0, %k0
 ; SKX_32-NEXT:    kmovw %k0, %eax
 ; SKX_32-NEXT:    testb $1, %al
 ; SKX_32-NEXT:    je .LBB31_6
diff --git a/test/CodeGen/X86/memcmp-optsize.ll b/test/CodeGen/X86/memcmp-optsize.ll
index 7683d1a..d2b390f 100644
--- a/test/CodeGen/X86/memcmp-optsize.ll
+++ b/test/CodeGen/X86/memcmp-optsize.ll
@@ -639,17 +639,33 @@
 }
 
 define i1 @length24_eq(i8* %x, i8* %y) nounwind optsize {
-; X86-LABEL: length24_eq:
-; X86:       # %bb.0:
-; X86-NEXT:    pushl $0
-; X86-NEXT:    pushl $24
-; X86-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NEXT:    calll memcmp
-; X86-NEXT:    addl $16, %esp
-; X86-NEXT:    testl %eax, %eax
-; X86-NEXT:    sete %al
-; X86-NEXT:    retl
+; X86-NOSSE-LABEL: length24_eq:
+; X86-NOSSE:       # %bb.0:
+; X86-NOSSE-NEXT:    pushl $0
+; X86-NOSSE-NEXT:    pushl $24
+; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    calll memcmp
+; X86-NOSSE-NEXT:    addl $16, %esp
+; X86-NOSSE-NEXT:    testl %eax, %eax
+; X86-NOSSE-NEXT:    sete %al
+; X86-NOSSE-NEXT:    retl
+;
+; X86-SSE2-LABEL: length24_eq:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT:    movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT:    movdqu 8(%ecx), %xmm1
+; X86-SSE2-NEXT:    movdqu (%eax), %xmm2
+; X86-SSE2-NEXT:    pcmpeqb %xmm0, %xmm2
+; X86-SSE2-NEXT:    movdqu 8(%eax), %xmm0
+; X86-SSE2-NEXT:    pcmpeqb %xmm1, %xmm0
+; X86-SSE2-NEXT:    pand %xmm2, %xmm0
+; X86-SSE2-NEXT:    pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT:    sete %al
+; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length24_eq:
 ; X64-SSE2:       # %bb.0:
@@ -683,17 +699,30 @@
 }
 
 define i1 @length24_eq_const(i8* %X) nounwind optsize {
-; X86-LABEL: length24_eq_const:
-; X86:       # %bb.0:
-; X86-NEXT:    pushl $0
-; X86-NEXT:    pushl $24
-; X86-NEXT:    pushl $.L.str
-; X86-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NEXT:    calll memcmp
-; X86-NEXT:    addl $16, %esp
-; X86-NEXT:    testl %eax, %eax
-; X86-NEXT:    setne %al
-; X86-NEXT:    retl
+; X86-NOSSE-LABEL: length24_eq_const:
+; X86-NOSSE:       # %bb.0:
+; X86-NOSSE-NEXT:    pushl $0
+; X86-NOSSE-NEXT:    pushl $24
+; X86-NOSSE-NEXT:    pushl $.L.str
+; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    calll memcmp
+; X86-NOSSE-NEXT:    addl $16, %esp
+; X86-NOSSE-NEXT:    testl %eax, %eax
+; X86-NOSSE-NEXT:    setne %al
+; X86-NOSSE-NEXT:    retl
+;
+; X86-SSE2-LABEL: length24_eq_const:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    movdqu (%eax), %xmm0
+; X86-SSE2-NEXT:    movdqu 8(%eax), %xmm1
+; X86-SSE2-NEXT:    pcmpeqb {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT:    pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT:    pand %xmm1, %xmm0
+; X86-SSE2-NEXT:    pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT:    setne %al
+; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length24_eq_const:
 ; X64-SSE2:       # %bb.0:
diff --git a/test/CodeGen/X86/memcmp.ll b/test/CodeGen/X86/memcmp.ll
index 0bb46ee..371c168 100644
--- a/test/CodeGen/X86/memcmp.ll
+++ b/test/CodeGen/X86/memcmp.ll
@@ -362,24 +362,24 @@
 define i1 @length7_eq(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length7_eq:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl $0
-; X86-NEXT:    pushl $7
-; X86-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NEXT:    calll memcmp
-; X86-NEXT:    addl $16, %esp
-; X86-NEXT:    testl %eax, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl (%ecx), %edx
+; X86-NEXT:    movl 3(%ecx), %ecx
+; X86-NEXT:    xorl (%eax), %edx
+; X86-NEXT:    xorl 3(%eax), %ecx
+; X86-NEXT:    orl %edx, %ecx
 ; X86-NEXT:    setne %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length7_eq:
 ; X64:       # %bb.0:
-; X64-NEXT:    pushq %rax
-; X64-NEXT:    movl $7, %edx
-; X64-NEXT:    callq memcmp
-; X64-NEXT:    testl %eax, %eax
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    movl 3(%rdi), %ecx
+; X64-NEXT:    xorl (%rsi), %eax
+; X64-NEXT:    xorl 3(%rsi), %ecx
+; X64-NEXT:    orl %eax, %ecx
 ; X64-NEXT:    setne %al
-; X64-NEXT:    popq %rcx
 ; X64-NEXT:    retq
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 7) nounwind
   %c = icmp ne i32 %m, 0
@@ -548,12 +548,12 @@
 ;
 ; X64-LABEL: length11_eq:
 ; X64:       # %bb.0:
-; X64-NEXT:    pushq %rax
-; X64-NEXT:    movl $11, %edx
-; X64-NEXT:    callq memcmp
-; X64-NEXT:    testl %eax, %eax
+; X64-NEXT:    movq (%rdi), %rax
+; X64-NEXT:    movq 3(%rdi), %rcx
+; X64-NEXT:    xorq (%rsi), %rax
+; X64-NEXT:    xorq 3(%rsi), %rcx
+; X64-NEXT:    orq %rax, %rcx
 ; X64-NEXT:    sete %al
-; X64-NEXT:    popq %rcx
 ; X64-NEXT:    retq
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 11) nounwind
   %c = icmp eq i32 %m, 0
@@ -640,12 +640,12 @@
 ;
 ; X64-LABEL: length13_eq:
 ; X64:       # %bb.0:
-; X64-NEXT:    pushq %rax
-; X64-NEXT:    movl $13, %edx
-; X64-NEXT:    callq memcmp
-; X64-NEXT:    testl %eax, %eax
+; X64-NEXT:    movq (%rdi), %rax
+; X64-NEXT:    movq 5(%rdi), %rcx
+; X64-NEXT:    xorq (%rsi), %rax
+; X64-NEXT:    xorq 5(%rsi), %rcx
+; X64-NEXT:    orq %rax, %rcx
 ; X64-NEXT:    sete %al
-; X64-NEXT:    popq %rcx
 ; X64-NEXT:    retq
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 13) nounwind
   %c = icmp eq i32 %m, 0
@@ -667,12 +667,12 @@
 ;
 ; X64-LABEL: length14_eq:
 ; X64:       # %bb.0:
-; X64-NEXT:    pushq %rax
-; X64-NEXT:    movl $14, %edx
-; X64-NEXT:    callq memcmp
-; X64-NEXT:    testl %eax, %eax
+; X64-NEXT:    movq (%rdi), %rax
+; X64-NEXT:    movq 6(%rdi), %rcx
+; X64-NEXT:    xorq (%rsi), %rax
+; X64-NEXT:    xorq 6(%rsi), %rcx
+; X64-NEXT:    orq %rax, %rcx
 ; X64-NEXT:    sete %al
-; X64-NEXT:    popq %rcx
 ; X64-NEXT:    retq
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 14) nounwind
   %c = icmp eq i32 %m, 0
@@ -694,12 +694,12 @@
 ;
 ; X64-LABEL: length15_eq:
 ; X64:       # %bb.0:
-; X64-NEXT:    pushq %rax
-; X64-NEXT:    movl $15, %edx
-; X64-NEXT:    callq memcmp
-; X64-NEXT:    testl %eax, %eax
+; X64-NEXT:    movq (%rdi), %rax
+; X64-NEXT:    movq 7(%rdi), %rcx
+; X64-NEXT:    xorq (%rsi), %rax
+; X64-NEXT:    xorq 7(%rsi), %rcx
+; X64-NEXT:    orq %rax, %rcx
 ; X64-NEXT:    sete %al
-; X64-NEXT:    popq %rcx
 ; X64-NEXT:    retq
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 15) nounwind
   %c = icmp eq i32 %m, 0
@@ -885,17 +885,45 @@
 }
 
 define i1 @length24_eq(i8* %x, i8* %y) nounwind {
-; X86-LABEL: length24_eq:
-; X86:       # %bb.0:
-; X86-NEXT:    pushl $0
-; X86-NEXT:    pushl $24
-; X86-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NEXT:    calll memcmp
-; X86-NEXT:    addl $16, %esp
-; X86-NEXT:    testl %eax, %eax
-; X86-NEXT:    sete %al
-; X86-NEXT:    retl
+; X86-NOSSE-LABEL: length24_eq:
+; X86-NOSSE:       # %bb.0:
+; X86-NOSSE-NEXT:    pushl $0
+; X86-NOSSE-NEXT:    pushl $24
+; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    calll memcmp
+; X86-NOSSE-NEXT:    addl $16, %esp
+; X86-NOSSE-NEXT:    testl %eax, %eax
+; X86-NOSSE-NEXT:    sete %al
+; X86-NOSSE-NEXT:    retl
+;
+; X86-SSE1-LABEL: length24_eq:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    pushl $0
+; X86-SSE1-NEXT:    pushl $24
+; X86-SSE1-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT:    calll memcmp
+; X86-SSE1-NEXT:    addl $16, %esp
+; X86-SSE1-NEXT:    testl %eax, %eax
+; X86-SSE1-NEXT:    sete %al
+; X86-SSE1-NEXT:    retl
+;
+; X86-SSE2-LABEL: length24_eq:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT:    movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT:    movdqu 8(%ecx), %xmm1
+; X86-SSE2-NEXT:    movdqu (%eax), %xmm2
+; X86-SSE2-NEXT:    pcmpeqb %xmm0, %xmm2
+; X86-SSE2-NEXT:    movdqu 8(%eax), %xmm0
+; X86-SSE2-NEXT:    pcmpeqb %xmm1, %xmm0
+; X86-SSE2-NEXT:    pand %xmm2, %xmm0
+; X86-SSE2-NEXT:    pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT:    sete %al
+; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length24_eq:
 ; X64-SSE2:       # %bb.0:
@@ -929,17 +957,42 @@
 }
 
 define i1 @length24_eq_const(i8* %X) nounwind {
-; X86-LABEL: length24_eq_const:
-; X86:       # %bb.0:
-; X86-NEXT:    pushl $0
-; X86-NEXT:    pushl $24
-; X86-NEXT:    pushl $.L.str
-; X86-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NEXT:    calll memcmp
-; X86-NEXT:    addl $16, %esp
-; X86-NEXT:    testl %eax, %eax
-; X86-NEXT:    setne %al
-; X86-NEXT:    retl
+; X86-NOSSE-LABEL: length24_eq_const:
+; X86-NOSSE:       # %bb.0:
+; X86-NOSSE-NEXT:    pushl $0
+; X86-NOSSE-NEXT:    pushl $24
+; X86-NOSSE-NEXT:    pushl $.L.str
+; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    calll memcmp
+; X86-NOSSE-NEXT:    addl $16, %esp
+; X86-NOSSE-NEXT:    testl %eax, %eax
+; X86-NOSSE-NEXT:    setne %al
+; X86-NOSSE-NEXT:    retl
+;
+; X86-SSE1-LABEL: length24_eq_const:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    pushl $0
+; X86-SSE1-NEXT:    pushl $24
+; X86-SSE1-NEXT:    pushl $.L.str
+; X86-SSE1-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT:    calll memcmp
+; X86-SSE1-NEXT:    addl $16, %esp
+; X86-SSE1-NEXT:    testl %eax, %eax
+; X86-SSE1-NEXT:    setne %al
+; X86-SSE1-NEXT:    retl
+;
+; X86-SSE2-LABEL: length24_eq_const:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    movdqu (%eax), %xmm0
+; X86-SSE2-NEXT:    movdqu 8(%eax), %xmm1
+; X86-SSE2-NEXT:    pcmpeqb {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT:    pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT:    pand %xmm1, %xmm0
+; X86-SSE2-NEXT:    pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT:    setne %al
+; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length24_eq_const:
 ; X64-SSE2:       # %bb.0:
diff --git a/test/CodeGen/X86/movddup-load-fold.ll b/test/CodeGen/X86/movddup-load-fold.ll
new file mode 100644
index 0000000..f1af6e9
--- /dev/null
+++ b/test/CodeGen/X86/movddup-load-fold.ll
@@ -0,0 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-- -mattr=+sse4.1   | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=i686-- -mattr=+avx      | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=i686-- -mattr=+avx2     | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=i686-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+
+; Test an isel pattern for a splatted VZLOAD.
+
+define <4 x float> @movddup_load_fold(float %x, float %y) {
+; SSE-LABEL: movddup_load_fold:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movddup {{.*#+}} xmm0 = mem[0,0]
+; SSE-NEXT:    retl
+;
+; AVX-LABEL: movddup_load_fold:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX-NEXT:    retl
+  %i0 = insertelement <4 x float> zeroinitializer, float %x, i32 0
+  %i1 = insertelement <4 x float> %i0, float %y, i32 1
+  %dup = shufflevector <4 x float> %i1, <4 x float> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+  ret <4 x float> %dup
+}
+
diff --git a/test/CodeGen/X86/movmsk-cmp.ll b/test/CodeGen/X86/movmsk-cmp.ll
index 452676e..bc16d8c 100644
--- a/test/CodeGen/X86/movmsk-cmp.ll
+++ b/test/CodeGen/X86/movmsk-cmp.ll
@@ -22,13 +22,9 @@
 ;
 ; KNL-LABEL: allones_v16i8_sign:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kortestw %k0, %k0
-; KNL-NEXT:    setb %al
-; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    vpmovmskb %xmm0, %eax
+; KNL-NEXT:    cmpw $-1, %ax
+; KNL-NEXT:    sete %al
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: allones_v16i8_sign:
@@ -60,13 +56,9 @@
 ;
 ; KNL-LABEL: allzeros_v16i8_sign:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kortestw %k0, %k0
+; KNL-NEXT:    vpmovmskb %xmm0, %eax
+; KNL-NEXT:    testw %ax, %ax
 ; KNL-NEXT:    sete %al
-; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: allzeros_v16i8_sign:
@@ -117,18 +109,8 @@
 ;
 ; KNL-LABEL: allones_v32i8_sign:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %ecx
-; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
-; KNL-NEXT:    cmpl $-1, %ecx
+; KNL-NEXT:    vpmovmskb %ymm0, %eax
+; KNL-NEXT:    cmpl $-1, %eax
 ; KNL-NEXT:    sete %al
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
@@ -180,17 +162,8 @@
 ;
 ; KNL-LABEL: allzeros_v32i8_sign:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %ecx
-; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
+; KNL-NEXT:    vpmovmskb %ymm0, %eax
+; KNL-NEXT:    testl %eax, %eax
 ; KNL-NEXT:    sete %al
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
@@ -262,30 +235,11 @@
 ;
 ; KNL-LABEL: allones_v64i8_sign:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; KNL-NEXT:    vpcmpgtb %ymm0, %ymm2, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm3
-; KNL-NEXT:    vptestmd %zmm3, %zmm3, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %ecx
-; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
-; KNL-NEXT:    vpcmpgtb %ymm1, %ymm2, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %edx
-; KNL-NEXT:    shll $16, %edx
-; KNL-NEXT:    orl %eax, %edx
-; KNL-NEXT:    shlq $32, %rdx
-; KNL-NEXT:    orq %rcx, %rdx
-; KNL-NEXT:    cmpq $-1, %rdx
+; KNL-NEXT:    vpmovmskb %ymm1, %eax
+; KNL-NEXT:    shlq $32, %rax
+; KNL-NEXT:    vpmovmskb %ymm0, %ecx
+; KNL-NEXT:    orq %rax, %rcx
+; KNL-NEXT:    cmpq $-1, %rcx
 ; KNL-NEXT:    sete %al
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
@@ -354,29 +308,10 @@
 ;
 ; KNL-LABEL: allzeros_v64i8_sign:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; KNL-NEXT:    vpcmpgtb %ymm0, %ymm2, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm3
-; KNL-NEXT:    vptestmd %zmm3, %zmm3, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %ecx
-; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
-; KNL-NEXT:    vpcmpgtb %ymm1, %ymm2, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %edx
-; KNL-NEXT:    shll $16, %edx
-; KNL-NEXT:    orl %eax, %edx
-; KNL-NEXT:    shlq $32, %rdx
-; KNL-NEXT:    orq %rcx, %rdx
+; KNL-NEXT:    vpmovmskb %ymm1, %eax
+; KNL-NEXT:    shlq $32, %rax
+; KNL-NEXT:    vpmovmskb %ymm0, %ecx
+; KNL-NEXT:    orq %rax, %rcx
 ; KNL-NEXT:    sete %al
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
@@ -1101,24 +1036,21 @@
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
 ; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    movdqa %xmm2, %xmm3
-; SSE2-NEXT:    pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE2-NEXT:    pcmpeqd %xmm2, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT:    pand %xmm4, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT:    por %xmm1, %xmm3
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[0,0,2,2]
+; SSE2-NEXT:    pand %xmm3, %xmm1
+; SSE2-NEXT:    por %xmm4, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm0
-; SSE2-NEXT:    movdqa %xmm2, %xmm1
-; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
-; SSE2-NEXT:    pcmpeqd %xmm2, %xmm0
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-NEXT:    pand %xmm4, %xmm0
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT:    por %xmm0, %xmm1
-; SSE2-NEXT:    packssdw %xmm3, %xmm1
-; SSE2-NEXT:    movmskps %xmm1, %eax
+; SSE2-NEXT:    movdqa %xmm2, %xmm3
+; SSE2-NEXT:    pcmpeqd %xmm0, %xmm3
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    packssdw %xmm1, %xmm0
+; SSE2-NEXT:    movmskps %xmm0, %eax
 ; SSE2-NEXT:    cmpb $15, %al
 ; SSE2-NEXT:    sete %al
 ; SSE2-NEXT:    retq
@@ -1164,24 +1096,21 @@
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
 ; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    movdqa %xmm2, %xmm3
-; SSE2-NEXT:    pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE2-NEXT:    pcmpeqd %xmm2, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT:    pand %xmm4, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT:    por %xmm1, %xmm3
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[0,0,2,2]
+; SSE2-NEXT:    pand %xmm3, %xmm1
+; SSE2-NEXT:    por %xmm4, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm0
-; SSE2-NEXT:    movdqa %xmm2, %xmm1
-; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
-; SSE2-NEXT:    pcmpeqd %xmm2, %xmm0
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-NEXT:    pand %xmm4, %xmm0
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT:    por %xmm0, %xmm1
-; SSE2-NEXT:    packssdw %xmm3, %xmm1
-; SSE2-NEXT:    movmskps %xmm1, %eax
+; SSE2-NEXT:    movdqa %xmm2, %xmm3
+; SSE2-NEXT:    pcmpeqd %xmm0, %xmm3
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    packssdw %xmm1, %xmm0
+; SSE2-NEXT:    movmskps %xmm0, %eax
 ; SSE2-NEXT:    testb %al, %al
 ; SSE2-NEXT:    sete %al
 ; SSE2-NEXT:    retq
@@ -1444,14 +1373,10 @@
 ;
 ; KNL-LABEL: allones_v16i8_and1:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; KNL-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; KNL-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kortestw %k0, %k0
-; KNL-NEXT:    setb %al
-; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    vpsllw $7, %xmm0, %xmm0
+; KNL-NEXT:    vpmovmskb %xmm0, %eax
+; KNL-NEXT:    cmpw $-1, %ax
+; KNL-NEXT:    sete %al
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: allones_v16i8_and1:
@@ -1486,14 +1411,10 @@
 ;
 ; KNL-LABEL: allzeros_v16i8_and1:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; KNL-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; KNL-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kortestw %k0, %k0
+; KNL-NEXT:    vpsllw $7, %xmm0, %xmm0
+; KNL-NEXT:    vpmovmskb %xmm0, %eax
+; KNL-NEXT:    testw %ax, %ax
 ; KNL-NEXT:    sete %al
-; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: allzeros_v16i8_and1:
@@ -1552,19 +1473,9 @@
 ;
 ; KNL-LABEL: allones_v32i8_and1:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; KNL-NEXT:    vpand %ymm1, %ymm0, %ymm0
-; KNL-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %ecx
-; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
-; KNL-NEXT:    cmpl $-1, %ecx
+; KNL-NEXT:    vpsllw $7, %ymm0, %ymm0
+; KNL-NEXT:    vpmovmskb %ymm0, %eax
+; KNL-NEXT:    cmpl $-1, %eax
 ; KNL-NEXT:    sete %al
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
@@ -1624,18 +1535,9 @@
 ;
 ; KNL-LABEL: allzeros_v32i8_and1:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; KNL-NEXT:    vpand %ymm1, %ymm0, %ymm0
-; KNL-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %ecx
-; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
+; KNL-NEXT:    vpsllw $7, %ymm0, %ymm0
+; KNL-NEXT:    vpmovmskb %ymm0, %eax
+; KNL-NEXT:    testl %eax, %eax
 ; KNL-NEXT:    sete %al
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
@@ -1722,32 +1624,13 @@
 ;
 ; KNL-LABEL: allones_v64i8_and1:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vmovdqa {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; KNL-NEXT:    vpand %ymm2, %ymm1, %ymm1
-; KNL-NEXT:    vpand %ymm2, %ymm0, %ymm0
-; KNL-NEXT:    vpcmpeqb %ymm2, %ymm0, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm3
-; KNL-NEXT:    vptestmd %zmm3, %zmm3, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %ecx
-; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
-; KNL-NEXT:    vpcmpeqb %ymm2, %ymm1, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %edx
-; KNL-NEXT:    shll $16, %edx
-; KNL-NEXT:    orl %eax, %edx
-; KNL-NEXT:    shlq $32, %rdx
-; KNL-NEXT:    orq %rcx, %rdx
-; KNL-NEXT:    cmpq $-1, %rdx
+; KNL-NEXT:    vpsllw $7, %ymm0, %ymm0
+; KNL-NEXT:    vpsllw $7, %ymm1, %ymm1
+; KNL-NEXT:    vpmovmskb %ymm1, %eax
+; KNL-NEXT:    shlq $32, %rax
+; KNL-NEXT:    vpmovmskb %ymm0, %ecx
+; KNL-NEXT:    orq %rax, %rcx
+; KNL-NEXT:    cmpq $-1, %rcx
 ; KNL-NEXT:    sete %al
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
@@ -1831,31 +1714,12 @@
 ;
 ; KNL-LABEL: allzeros_v64i8_and1:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vmovdqa {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; KNL-NEXT:    vpand %ymm2, %ymm1, %ymm1
-; KNL-NEXT:    vpand %ymm2, %ymm0, %ymm0
-; KNL-NEXT:    vpcmpeqb %ymm2, %ymm0, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm3
-; KNL-NEXT:    vptestmd %zmm3, %zmm3, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %ecx
-; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
-; KNL-NEXT:    vpcmpeqb %ymm2, %ymm1, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %edx
-; KNL-NEXT:    shll $16, %edx
-; KNL-NEXT:    orl %eax, %edx
-; KNL-NEXT:    shlq $32, %rdx
-; KNL-NEXT:    orq %rcx, %rdx
+; KNL-NEXT:    vpsllw $7, %ymm0, %ymm0
+; KNL-NEXT:    vpsllw $7, %ymm1, %ymm1
+; KNL-NEXT:    vpmovmskb %ymm1, %eax
+; KNL-NEXT:    shlq $32, %rax
+; KNL-NEXT:    vpmovmskb %ymm0, %ecx
+; KNL-NEXT:    orq %rax, %rcx
 ; KNL-NEXT:    sete %al
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
@@ -3108,14 +2972,10 @@
 ;
 ; KNL-LABEL: allones_v16i8_and4:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
-; KNL-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; KNL-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kortestw %k0, %k0
-; KNL-NEXT:    setb %al
-; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    vpsllw $5, %xmm0, %xmm0
+; KNL-NEXT:    vpmovmskb %xmm0, %eax
+; KNL-NEXT:    cmpw $-1, %ax
+; KNL-NEXT:    sete %al
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: allones_v16i8_and4:
@@ -3150,14 +3010,10 @@
 ;
 ; KNL-LABEL: allzeros_v16i8_and4:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
-; KNL-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; KNL-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kortestw %k0, %k0
+; KNL-NEXT:    vpsllw $5, %xmm0, %xmm0
+; KNL-NEXT:    vpmovmskb %xmm0, %eax
+; KNL-NEXT:    testw %ax, %ax
 ; KNL-NEXT:    sete %al
-; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: allzeros_v16i8_and4:
@@ -3216,19 +3072,9 @@
 ;
 ; KNL-LABEL: allones_v32i8_and4:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
-; KNL-NEXT:    vpand %ymm1, %ymm0, %ymm0
-; KNL-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %ecx
-; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
-; KNL-NEXT:    cmpl $-1, %ecx
+; KNL-NEXT:    vpsllw $5, %ymm0, %ymm0
+; KNL-NEXT:    vpmovmskb %ymm0, %eax
+; KNL-NEXT:    cmpl $-1, %eax
 ; KNL-NEXT:    sete %al
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
@@ -3288,18 +3134,9 @@
 ;
 ; KNL-LABEL: allzeros_v32i8_and4:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
-; KNL-NEXT:    vpand %ymm1, %ymm0, %ymm0
-; KNL-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %ecx
-; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
+; KNL-NEXT:    vpsllw $5, %ymm0, %ymm0
+; KNL-NEXT:    vpmovmskb %ymm0, %eax
+; KNL-NEXT:    testl %eax, %eax
 ; KNL-NEXT:    sete %al
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
@@ -3386,32 +3223,13 @@
 ;
 ; KNL-LABEL: allones_v64i8_and4:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vmovdqa {{.*#+}} ymm2 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
-; KNL-NEXT:    vpand %ymm2, %ymm1, %ymm1
-; KNL-NEXT:    vpand %ymm2, %ymm0, %ymm0
-; KNL-NEXT:    vpcmpeqb %ymm2, %ymm0, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm3
-; KNL-NEXT:    vptestmd %zmm3, %zmm3, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %ecx
-; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
-; KNL-NEXT:    vpcmpeqb %ymm2, %ymm1, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %edx
-; KNL-NEXT:    shll $16, %edx
-; KNL-NEXT:    orl %eax, %edx
-; KNL-NEXT:    shlq $32, %rdx
-; KNL-NEXT:    orq %rcx, %rdx
-; KNL-NEXT:    cmpq $-1, %rdx
+; KNL-NEXT:    vpsllw $5, %ymm0, %ymm0
+; KNL-NEXT:    vpsllw $5, %ymm1, %ymm1
+; KNL-NEXT:    vpmovmskb %ymm1, %eax
+; KNL-NEXT:    shlq $32, %rax
+; KNL-NEXT:    vpmovmskb %ymm0, %ecx
+; KNL-NEXT:    orq %rax, %rcx
+; KNL-NEXT:    cmpq $-1, %rcx
 ; KNL-NEXT:    sete %al
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
@@ -3495,31 +3313,12 @@
 ;
 ; KNL-LABEL: allzeros_v64i8_and4:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vmovdqa {{.*#+}} ymm2 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
-; KNL-NEXT:    vpand %ymm2, %ymm1, %ymm1
-; KNL-NEXT:    vpand %ymm2, %ymm0, %ymm0
-; KNL-NEXT:    vpcmpeqb %ymm2, %ymm0, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm3
-; KNL-NEXT:    vptestmd %zmm3, %zmm3, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %ecx
-; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
-; KNL-NEXT:    vpcmpeqb %ymm2, %ymm1, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %edx
-; KNL-NEXT:    shll $16, %edx
-; KNL-NEXT:    orl %eax, %edx
-; KNL-NEXT:    shlq $32, %rdx
-; KNL-NEXT:    orq %rcx, %rdx
+; KNL-NEXT:    vpsllw $5, %ymm0, %ymm0
+; KNL-NEXT:    vpsllw $5, %ymm1, %ymm1
+; KNL-NEXT:    vpmovmskb %ymm1, %eax
+; KNL-NEXT:    shlq $32, %rax
+; KNL-NEXT:    vpmovmskb %ymm0, %ecx
+; KNL-NEXT:    orq %rax, %rcx
 ; KNL-NEXT:    sete %al
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
@@ -4830,24 +4629,21 @@
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
 ; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    movdqa %xmm2, %xmm3
-; SSE2-NEXT:    pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE2-NEXT:    pcmpeqd %xmm2, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT:    pand %xmm4, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT:    por %xmm1, %xmm3
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[0,0,2,2]
+; SSE2-NEXT:    pand %xmm3, %xmm1
+; SSE2-NEXT:    por %xmm4, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm0
-; SSE2-NEXT:    movdqa %xmm2, %xmm1
-; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
-; SSE2-NEXT:    pcmpeqd %xmm2, %xmm0
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-NEXT:    pand %xmm4, %xmm0
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT:    por %xmm0, %xmm1
-; SSE2-NEXT:    packssdw %xmm3, %xmm1
-; SSE2-NEXT:    movmskps %xmm1, %eax
+; SSE2-NEXT:    movdqa %xmm2, %xmm3
+; SSE2-NEXT:    pcmpeqd %xmm0, %xmm3
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    packssdw %xmm1, %xmm0
+; SSE2-NEXT:    movmskps %xmm0, %eax
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: movmskpd256:
@@ -4935,12 +4731,7 @@
 ;
 ; KNL-LABEL: movmskb:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    vpmovmskb %xmm0, %eax
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: movmskb:
@@ -4984,17 +4775,7 @@
 ;
 ; KNL-LABEL: movmskb256:
 ; KNL:       # %bb.0:
-; KNL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm1
-; KNL-NEXT:    vptestmd %zmm1, %zmm1, %k0
-; KNL-NEXT:    kmovw %k0, %ecx
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    shll $16, %eax
-; KNL-NEXT:    orl %ecx, %eax
+; KNL-NEXT:    vpmovmskb %ymm0, %eax
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
 ;
diff --git a/test/CodeGen/X86/movpc32-check.ll b/test/CodeGen/X86/movpc32-check.ll
index 3a8c404..7545b52 100644
--- a/test/CodeGen/X86/movpc32-check.ll
+++ b/test/CodeGen/X86/movpc32-check.ll
@@ -19,7 +19,7 @@
 !llvm.module.flags = !{!7, !8, !9}
 !llvm.ident = !{!10}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.8.0 (http://llvm.org/git/clang.git 3490ab8630d5643f71f1f04e46984f05b27b8d67) (http://llvm.org/git/llvm.git d2643e2ff955ed234944fe3c6b4ffc1250085843)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.8.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "test.c", directory: "movpc-test")
 !2 = !{}
 !4 = distinct !DISubprogram(name: "test", scope: !1, file: !1, line: 2, type: !5, isLocal: false, isDefinition: true, scopeLine: 3, isOptimized: false, unit: !0, retainedNodes: !2)
@@ -28,7 +28,7 @@
 !7 = !{i32 2, !"Dwarf Version", i32 4}
 !8 = !{i32 2, !"Debug Info Version", i32 3}
 !9 = !{i32 1, !"PIC Level", i32 2}
-!10 = !{!"clang version 3.8.0 (http://llvm.org/git/clang.git 3490ab8630d5643f71f1f04e46984f05b27b8d67) (http://llvm.org/git/llvm.git d2643e2ff955ed234944fe3c6b4ffc1250085843)"}
+!10 = !{!"clang version 3.8.0"}
 !11 = !DILocation(line: 4, column: 3, scope: !4)
 !12 = !DILocation(line: 5, column: 1, scope: !4)
 
diff --git a/test/CodeGen/X86/mulvi32.ll b/test/CodeGen/X86/mulvi32.ll
index 6c6737a..fc185d1 100644
--- a/test/CodeGen/X86/mulvi32.ll
+++ b/test/CodeGen/X86/mulvi32.ll
@@ -131,29 +131,24 @@
 define <4 x i64> @_mul4xi32toi64a(<4 x i32>, <4 x i32>) {
 ; SSE2-LABEL: _mul4xi32toi64a:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    movdqa %xmm0, %xmm4
-; SSE2-NEXT:    punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-NEXT:    movdqa %xmm1, %xmm2
-; SSE2-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE2-NEXT:    pmuludq %xmm4, %xmm2
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-NEXT:    pmuludq %xmm1, %xmm0
-; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
+; SSE2-NEXT:    pmuludq %xmm3, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,1,3,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm3, %xmm1
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _mul4xi32toi64a:
 ; SSE42:       # %bb.0:
-; SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE42-NEXT:    pmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
-; SSE42-NEXT:    pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
-; SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE42-NEXT:    pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
 ; SSE42-NEXT:    pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
 ; SSE42-NEXT:    pmuludq %xmm3, %xmm2
-; SSE42-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
-; SSE42-NEXT:    pmuludq %xmm4, %xmm0
-; SSE42-NEXT:    movdqa %xmm2, %xmm1
+; SSE42-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,2,3,3]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
+; SSE42-NEXT:    pmuludq %xmm3, %xmm1
+; SSE42-NEXT:    movdqa %xmm2, %xmm0
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: _mul4xi32toi64a:
diff --git a/test/CodeGen/X86/note-cet-property.ll b/test/CodeGen/X86/note-cet-property.ll
index 2b7dbbe..f3cc32b 100644
--- a/test/CodeGen/X86/note-cet-property.ll
+++ b/test/CodeGen/X86/note-cet-property.ll
@@ -22,8 +22,8 @@
 ; X86_64-NEXT: .long    5
 ; X86_64-NEXT: .asciz   "GNU"
 ; X86_64-NEXT: .long    3221225474
-; X86_64-NEXT: .long    8
-; X86_64-NEXT: .quad    3
+; X86_64-NEXT: .long    4
+; X86_64-NEXT: .long    3
 ; X86_64-NEXT: .p2align 3
 
 !llvm.module.flags = !{!0, !1}
diff --git a/test/CodeGen/X86/objc-arc.ll b/test/CodeGen/X86/objc-arc.ll
deleted file mode 100644
index 76f2fd5..0000000
--- a/test/CodeGen/X86/objc-arc.ll
+++ /dev/null
@@ -1,230 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
-
-; Make sure calls to the objc intrinsics are translated to calls in to the
-; runtime
-
-define i8* @test_objc_autorelease(i8* %arg0) {
-; CHECK-LABEL: test_objc_autorelease
-; CHECK: callq _objc_autorelease
-entry:
-    %0 = call i8* @llvm.objc.autorelease(i8* %arg0)
-	ret i8* %0
-}
-
-define void @test_objc_autoreleasePoolPop(i8* %arg0) {
-; CHECK-LABEL: test_objc_autoreleasePoolPop
-; CHECK: callq _objc_autoreleasePoolPop
-entry:
-    call void @llvm.objc.autoreleasePoolPop(i8* %arg0)
-    ret void
-}
-
-define i8* @test_objc_autoreleasePoolPush() {
-; CHECK-LABEL: test_objc_autoreleasePoolPush
-; CHECK: callq _objc_autoreleasePoolPush
-entry:
-    %0 = call i8* @llvm.objc.autoreleasePoolPush()
-	ret i8* %0
-}
-
-define i8* @test_objc_autoreleaseReturnValue(i8* %arg0) {
-; CHECK-LABEL: test_objc_autoreleaseReturnValue
-; CHECK: callq _objc_autoreleaseReturnValue
-entry:
-    %0 = call i8* @llvm.objc.autoreleaseReturnValue(i8* %arg0)
-	ret i8* %0
-}
-
-define void @test_objc_copyWeak(i8** %arg0, i8** %arg1) {
-; CHECK-LABEL: test_objc_copyWeak
-; CHECK: callq _objc_copyWeak
-entry:
-    call void @llvm.objc.copyWeak(i8** %arg0, i8** %arg1)
-    ret void
-}
-
-define void @test_objc_destroyWeak(i8** %arg0) {
-; CHECK-LABEL: test_objc_destroyWeak
-; CHECK: callq _objc_destroyWeak
-entry:
-    call void @llvm.objc.destroyWeak(i8** %arg0)
-    ret void
-}
-
-define i8* @test_objc_initWeak(i8** %arg0, i8* %arg1) {
-; CHECK-LABEL: test_objc_initWeak
-; CHECK: callq _objc_initWeak
-entry:
-    %0 = call i8* @llvm.objc.initWeak(i8** %arg0, i8* %arg1)
-	ret i8* %0
-}
-
-define i8* @test_objc_loadWeak(i8** %arg0) {
-; CHECK-LABEL: test_objc_loadWeak
-; CHECK: callq _objc_loadWeak
-entry:
-    %0 = call i8* @llvm.objc.loadWeak(i8** %arg0)
-	ret i8* %0
-}
-
-define i8* @test_objc_loadWeakRetained(i8** %arg0) {
-; CHECK-LABEL: test_objc_loadWeakRetained
-; CHECK: callq _objc_loadWeakRetained
-entry:
-    %0 = call i8* @llvm.objc.loadWeakRetained(i8** %arg0)
-	ret i8* %0
-}
-
-define void @test_objc_moveWeak(i8** %arg0, i8** %arg1) {
-; CHECK-LABEL: test_objc_moveWeak
-; CHECK: callq _objc_moveWeak
-entry:
-    call void @llvm.objc.moveWeak(i8** %arg0, i8** %arg1)
-    ret void
-}
-
-define void @test_objc_release(i8* %arg0) {
-; CHECK-LABEL: test_objc_release
-; CHECK: callq _objc_release
-entry:
-    call void @llvm.objc.release(i8* %arg0)
-    ret void
-}
-
-define i8* @test_objc_retain(i8* %arg0) {
-; CHECK-LABEL: test_objc_retain
-; CHECK: callq _objc_retain
-entry:
-    %0 = call i8* @llvm.objc.retain(i8* %arg0)
-	ret i8* %0
-}
-
-define i8* @test_objc_retainAutorelease(i8* %arg0) {
-; CHECK-LABEL: test_objc_retainAutorelease
-; CHECK: callq _objc_retainAutorelease
-entry:
-    %0 = call i8* @llvm.objc.retainAutorelease(i8* %arg0)
-	ret i8* %0
-}
-
-define i8* @test_objc_retainAutoreleaseReturnValue(i8* %arg0) {
-; CHECK-LABEL: test_objc_retainAutoreleaseReturnValue
-; CHECK: callq _objc_retainAutoreleaseReturnValue
-entry:
-    %0 = call i8* @llvm.objc.retainAutoreleaseReturnValue(i8* %arg0)
-	ret i8* %0
-}
-
-define i8* @test_objc_retainAutoreleasedReturnValue(i8* %arg0) {
-; CHECK-LABEL: test_objc_retainAutoreleasedReturnValue
-; CHECK: callq _objc_retainAutoreleasedReturnValue
-entry:
-    %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %arg0)
-	ret i8* %0
-}
-
-define i8* @test_objc_retainBlock(i8* %arg0) {
-; CHECK-LABEL: test_objc_retainBlock
-; CHECK: callq _objc_retainBlock
-entry:
-    %0 = call i8* @llvm.objc.retainBlock(i8* %arg0)
-	ret i8* %0
-}
-
-define void @test_objc_storeStrong(i8** %arg0, i8* %arg1) {
-; CHECK-LABEL: test_objc_storeStrong
-; CHECK: callq _objc_storeStrong
-entry:
-    call void @llvm.objc.storeStrong(i8** %arg0, i8* %arg1)
-	ret void
-}
-
-define i8* @test_objc_storeWeak(i8** %arg0, i8* %arg1) {
-; CHECK-LABEL: test_objc_storeWeak
-; CHECK: callq _objc_storeWeak
-entry:
-    %0 = call i8* @llvm.objc.storeWeak(i8** %arg0, i8* %arg1)
-	ret i8* %0
-}
-
-define i8* @test_objc_objc_unsafeClaimAutoreleasedReturnValue(i8* %arg0) {
-; CHECK-LABEL: test_objc_objc_unsafeClaimAutoreleasedReturnValue
-; CHECK: callq _objc_unsafeClaimAutoreleasedReturnValue
-entry:
-    %0 = call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %arg0)
-    ret i8* %0
-}
-
-define i8* @test_objc_objc_retainedObject(i8* %arg0) {
-; CHECK-LABEL: test_objc_objc_retainedObject
-; CHECK: callq _objc_retainedObject
-entry:
-    %0 = call i8* @llvm.objc.retainedObject(i8* %arg0)
-    ret i8* %0
-}
-
-define i8* @test_objc_objc_unretainedObject(i8* %arg0) {
-; CHECK-LABEL: test_objc_objc_unretainedObject
-; CHECK: callq _objc_unretainedObject
-entry:
-    %0 = call i8* @llvm.objc.unretainedObject(i8* %arg0)
-    ret i8* %0
-}
-
-define i8* @test_objc_objc_unretainedPointer(i8* %arg0) {
-; CHECK-LABEL: test_objc_objc_unretainedPointer
-; CHECK: callq _objc_unretainedPointer
-entry:
-    %0 = call i8* @llvm.objc.unretainedPointer(i8* %arg0)
-    ret i8* %0
-}
-
-define i8* @test_objc_objc_retain_autorelease(i8* %arg0) {
-; CHECK-LABEL: test_objc_objc_retain_autorelease
-; CHECK: callq _objc_retain_autorelease
-entry:
-    %0 = call i8* @llvm.objc.retain.autorelease(i8* %arg0)
-    ret i8* %0
-}
-
-define i32 @test_objc_objc_sync_enter(i8* %arg0) {
-; CHECK-LABEL: test_objc_objc_sync_enter
-; CHECK: callq _objc_sync_enter
-entry:
-    %0 = call i32 @llvm.objc.sync.enter(i8* %arg0)
-    ret i32 %0
-}
-
-define i32 @test_objc_objc_sync_exit(i8* %arg0) {
-; CHECK-LABEL: test_objc_objc_sync_exit
-; CHECK: callq _objc_sync_exit
-entry:
-    %0 = call i32 @llvm.objc.sync.exit(i8* %arg0)
-    ret i32 %0
-}
-
-declare i8* @llvm.objc.autorelease(i8*)
-declare void @llvm.objc.autoreleasePoolPop(i8*)
-declare i8* @llvm.objc.autoreleasePoolPush()
-declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
-declare void @llvm.objc.copyWeak(i8**, i8**)
-declare void @llvm.objc.destroyWeak(i8**)
-declare i8* @llvm.objc.initWeak(i8**, i8*)
-declare i8* @llvm.objc.loadWeak(i8**)
-declare i8* @llvm.objc.loadWeakRetained(i8**)
-declare void @llvm.objc.moveWeak(i8**, i8**)
-declare void @llvm.objc.release(i8*)
-declare i8* @llvm.objc.retain(i8*)
-declare i8* @llvm.objc.retainAutorelease(i8*)
-declare i8* @llvm.objc.retainAutoreleaseReturnValue(i8*)
-declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
-declare i8* @llvm.objc.retainBlock(i8*)
-declare void @llvm.objc.storeStrong(i8**, i8*)
-declare i8* @llvm.objc.storeWeak(i8**, i8*)
-declare i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8*)
-declare i8* @llvm.objc.retainedObject(i8*)
-declare i8* @llvm.objc.unretainedObject(i8*)
-declare i8* @llvm.objc.unretainedPointer(i8*)
-declare i8* @llvm.objc.retain.autorelease(i8*)
-declare i32 @llvm.objc.sync.enter(i8*)
-declare i32 @llvm.objc.sync.exit(i8*)
diff --git a/test/CodeGen/X86/opt_phis2.mir b/test/CodeGen/X86/opt_phis2.mir
new file mode 100644
index 0000000..55523f9
--- /dev/null
+++ b/test/CodeGen/X86/opt_phis2.mir
@@ -0,0 +1,72 @@
+# RUN: llc -run-pass opt-phis -march=x86-64 -o - %s | FileCheck %s
+# All PHIs should be removed since they can be securely replaced
+# by %8 register.
+# CHECK-NOT: PHI
+--- |
+  define void @test() {
+    ret void
+  }
+...
+---
+name:            test
+alignment:       4
+tracksRegLiveness: true
+jumpTable:
+  kind:            block-address
+  entries:
+    - id:              0
+      blocks:          [ '%bb.3', '%bb.2', '%bb.1', '%bb.4' ]
+body:             |
+  bb.0:
+    liveins: $edi, $ymm0, $rsi
+
+    %9:gr64 = COPY $rsi
+    %8:vr256 = COPY $ymm0
+    %7:gr32 = COPY $edi
+    %11:gr32 = SAR32ri %7, 31, implicit-def dead $eflags
+    %12:gr32 = SHR32ri %11, 30, implicit-def dead $eflags
+    %13:gr32 = ADD32rr %7, killed %12, implicit-def dead $eflags
+    %14:gr32 = AND32ri8 %13, -4, implicit-def dead $eflags
+    %15:gr32 = SUB32rr %7, %14, implicit-def dead $eflags
+    %10:gr64_nosp = SUBREG_TO_REG 0, %15, %subreg.sub_32bit
+    %16:gr32 = SUB32ri8 %15, 3, implicit-def $eflags
+    JA_1 %bb.8, implicit $eflags
+
+  bb.9:
+    JMP64m $noreg, 8, %10, %jump-table.0, $noreg :: (load 8 from jump-table)
+
+  bb.1:
+    %0:vr256 = COPY %8
+    JMP_1 %bb.5
+
+  bb.2:
+    %1:vr256 = COPY %8
+    JMP_1 %bb.6
+
+  bb.3:
+    %2:vr256 = COPY %8
+    JMP_1 %bb.7
+
+  bb.4:
+    %3:vr256 = COPY %8
+    %17:vr128 = VEXTRACTF128rr %8, 1
+    VPEXTRDmr %9, 1, $noreg, 12, $noreg, killed %17, 2
+
+  bb.5:
+    %4:vr256 = PHI %0, %bb.1, %3, %bb.4
+    %18:vr128 = VEXTRACTF128rr %4, 1
+    VPEXTRDmr %9, 1, $noreg, 8, $noreg, killed %18, 1
+
+  bb.6:
+    %5:vr256 = PHI %1, %bb.2, %4, %bb.5
+    %19:vr128 = VEXTRACTF128rr %5, 1
+    VMOVPDI2DImr %9, 1, $noreg, 4, $noreg, killed %19
+
+  bb.7:
+    %6:vr256 = PHI %2, %bb.3, %5, %bb.6
+    %20:vr128 = COPY %6.sub_xmm
+    VPEXTRDmr %9, 1, $noreg, 0, $noreg, killed %20, 3
+
+  bb.8:
+    RET 0
+...
diff --git a/test/CodeGen/X86/packss.ll b/test/CodeGen/X86/packss.ll
index 85c053f..eecfab2 100644
--- a/test/CodeGen/X86/packss.ll
+++ b/test/CodeGen/X86/packss.ll
@@ -156,19 +156,15 @@
 define <8 x i16> @trunc_ashr_v4i64_demandedelts(<4 x i64> %a0) {
 ; X86-SSE-LABEL: trunc_ashr_v4i64_demandedelts:
 ; X86-SSE:       # %bb.0:
-; X86-SSE-NEXT:    movdqa %xmm1, %xmm2
-; X86-SSE-NEXT:    psllq $63, %xmm2
-; X86-SSE-NEXT:    movdqa %xmm0, %xmm3
-; X86-SSE-NEXT:    psllq $63, %xmm3
-; X86-SSE-NEXT:    psrlq $63, %xmm3
-; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
-; X86-SSE-NEXT:    movapd {{.*#+}} xmm3 = [4.9406564584124654E-324,-0.0E+0]
-; X86-SSE-NEXT:    xorpd %xmm3, %xmm0
-; X86-SSE-NEXT:    psubq %xmm3, %xmm0
-; X86-SSE-NEXT:    psrlq $63, %xmm2
-; X86-SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
-; X86-SSE-NEXT:    xorpd %xmm3, %xmm1
-; X86-SSE-NEXT:    psubq %xmm3, %xmm1
+; X86-SSE-NEXT:    psllq $63, %xmm1
+; X86-SSE-NEXT:    psllq $63, %xmm0
+; X86-SSE-NEXT:    psrlq $63, %xmm0
+; X86-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [4.9406564584124654E-324,-0.0E+0]
+; X86-SSE-NEXT:    pxor %xmm2, %xmm0
+; X86-SSE-NEXT:    psubq %xmm2, %xmm0
+; X86-SSE-NEXT:    psrlq $63, %xmm1
+; X86-SSE-NEXT:    pxor %xmm2, %xmm1
+; X86-SSE-NEXT:    psubq %xmm2, %xmm1
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X86-SSE-NEXT:    packssdw %xmm1, %xmm0
@@ -212,19 +208,15 @@
 ;
 ; X64-SSE-LABEL: trunc_ashr_v4i64_demandedelts:
 ; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movdqa %xmm1, %xmm2
-; X64-SSE-NEXT:    psllq $63, %xmm2
-; X64-SSE-NEXT:    movdqa %xmm0, %xmm3
-; X64-SSE-NEXT:    psllq $63, %xmm3
-; X64-SSE-NEXT:    psrlq $63, %xmm3
-; X64-SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
-; X64-SSE-NEXT:    movapd {{.*#+}} xmm3 = [1,9223372036854775808]
-; X64-SSE-NEXT:    xorpd %xmm3, %xmm0
-; X64-SSE-NEXT:    psubq %xmm3, %xmm0
-; X64-SSE-NEXT:    psrlq $63, %xmm2
-; X64-SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
-; X64-SSE-NEXT:    xorpd %xmm3, %xmm1
-; X64-SSE-NEXT:    psubq %xmm3, %xmm1
+; X64-SSE-NEXT:    psllq $63, %xmm1
+; X64-SSE-NEXT:    psllq $63, %xmm0
+; X64-SSE-NEXT:    psrlq $63, %xmm0
+; X64-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [1,9223372036854775808]
+; X64-SSE-NEXT:    pxor %xmm2, %xmm0
+; X64-SSE-NEXT:    psubq %xmm2, %xmm0
+; X64-SSE-NEXT:    psrlq $63, %xmm1
+; X64-SSE-NEXT:    pxor %xmm2, %xmm1
+; X64-SSE-NEXT:    psubq %xmm2, %xmm1
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X64-SSE-NEXT:    packssdw %xmm1, %xmm0
@@ -253,7 +245,8 @@
 ;
 ; X64-AVX2-LABEL: trunc_ashr_v4i64_demandedelts:
 ; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
+; X64-AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [1,9223372036854775808,1,9223372036854775808]
 ; X64-AVX2-NEXT:    # ymm1 = mem[0,1,0,1]
 ; X64-AVX2-NEXT:    vpxor %ymm1, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/phaddsub-extract.ll b/test/CodeGen/X86/phaddsub-extract.ll
new file mode 100644
index 0000000..57978e4
--- /dev/null
+++ b/test/CodeGen/X86/phaddsub-extract.ll
@@ -0,0 +1,904 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3              | FileCheck %s --check-prefixes=SSE3,SSE3-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3,fast-hops    | FileCheck %s --check-prefixes=SSE3,SSE3-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx                | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX1-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops      | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX1-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2               | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX2-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2,fast-hops     | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX2-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl           | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX512-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX512-FAST
+
+; 128-bit vectors, 16/32-bit, add/sub
+
+define i32 @extract_extract_v4i32_add_i32(<4 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v4i32_add_i32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4i32_add_i32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4i32_add_i32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4i32_add_i32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <4 x i32> %x, i32 0
+  %x1 = extractelement <4 x i32> %x, i32 1
+  %x01 = add i32 %x0, %x1
+  ret i32 %x01
+}
+
+define i32 @extract_extract_v4i32_add_i32_commute(<4 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v4i32_add_i32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4i32_add_i32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4i32_add_i32_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4i32_add_i32_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <4 x i32> %x, i32 0
+  %x1 = extractelement <4 x i32> %x, i32 1
+  %x01 = add i32 %x1, %x0
+  ret i32 %x01
+}
+
+define i16 @extract_extract_v8i16_add_i16(<8 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v8i16_add_i16:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pextrw $1, %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8i16_add_i16:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8i16_add_i16:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8i16_add_i16:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <8 x i16> %x, i32 0
+  %x1 = extractelement <8 x i16> %x, i32 1
+  %x01 = add i16 %x0, %x1
+  ret i16 %x01
+}
+
+define i16 @extract_extract_v8i16_add_i16_commute(<8 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v8i16_add_i16_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pextrw $1, %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8i16_add_i16_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8i16_add_i16_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8i16_add_i16_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <8 x i16> %x, i32 0
+  %x1 = extractelement <8 x i16> %x, i32 1
+  %x01 = add i16 %x1, %x0
+  ret i16 %x01
+}
+
+define i32 @extract_extract_v4i32_sub_i32(<4 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v4i32_sub_i32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    subl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4i32_sub_i32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phsubd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4i32_sub_i32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %eax
+; AVX-SLOW-NEXT:    vpextrd $1, %xmm0, %ecx
+; AVX-SLOW-NEXT:    subl %ecx, %eax
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4i32_sub_i32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphsubd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <4 x i32> %x, i32 0
+  %x1 = extractelement <4 x i32> %x, i32 1
+  %x01 = sub i32 %x0, %x1
+  ret i32 %x01
+}
+
+define i32 @extract_extract_v4i32_sub_i32_commute(<4 x i32> %x) {
+; SSE3-LABEL: extract_extract_v4i32_sub_i32_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movd %xmm0, %ecx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    subl %ecx, %eax
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v4i32_sub_i32_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %xmm0, %ecx
+; AVX-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-NEXT:    subl %ecx, %eax
+; AVX-NEXT:    retq
+  %x0 = extractelement <4 x i32> %x, i32 0
+  %x1 = extractelement <4 x i32> %x, i32 1
+  %x01 = sub i32 %x1, %x0
+  ret i32 %x01
+}
+
+define i16 @extract_extract_v8i16_sub_i16(<8 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v8i16_sub_i16:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    pextrw $1, %xmm0, %ecx
+; SSE3-SLOW-NEXT:    subl %ecx, %eax
+; SSE3-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8i16_sub_i16:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phsubw %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8i16_sub_i16:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %eax
+; AVX-SLOW-NEXT:    vpextrw $1, %xmm0, %ecx
+; AVX-SLOW-NEXT:    subl %ecx, %eax
+; AVX-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8i16_sub_i16:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphsubw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <8 x i16> %x, i32 0
+  %x1 = extractelement <8 x i16> %x, i32 1
+  %x01 = sub i16 %x0, %x1
+  ret i16 %x01
+}
+
+define i16 @extract_extract_v8i16_sub_i16_commute(<8 x i16> %x) {
+; SSE3-LABEL: extract_extract_v8i16_sub_i16_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movd %xmm0, %ecx
+; SSE3-NEXT:    pextrw $1, %xmm0, %eax
+; SSE3-NEXT:    subl %ecx, %eax
+; SSE3-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v8i16_sub_i16_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %xmm0, %ecx
+; AVX-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX-NEXT:    subl %ecx, %eax
+; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-NEXT:    retq
+  %x0 = extractelement <8 x i16> %x, i32 0
+  %x1 = extractelement <8 x i16> %x, i32 1
+  %x01 = sub i16 %x1, %x0
+  ret i16 %x01
+}
+
+; 256-bit vectors, i32/i16, add/sub
+
+define i32 @extract_extract_v8i32_add_i32(<8 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v8i32_add_i32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8i32_add_i32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8i32_add_i32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8i32_add_i32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <8 x i32> %x, i32 0
+  %x1 = extractelement <8 x i32> %x, i32 1
+  %x01 = add i32 %x0, %x1
+  ret i32 %x01
+}
+
+define i32 @extract_extract_v8i32_add_i32_commute(<8 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v8i32_add_i32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8i32_add_i32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8i32_add_i32_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8i32_add_i32_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <8 x i32> %x, i32 0
+  %x1 = extractelement <8 x i32> %x, i32 1
+  %x01 = add i32 %x1, %x0
+  ret i32 %x01
+}
+
+define i16 @extract_extract_v16i16_add_i16(<16 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v16i16_add_i16:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pextrw $1, %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16i16_add_i16:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16i16_add_i16:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v16i16_add_i16:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <16 x i16> %x, i32 0
+  %x1 = extractelement <16 x i16> %x, i32 1
+  %x01 = add i16 %x0, %x1
+  ret i16 %x01
+}
+
+define i16 @extract_extract_v16i16_add_i16_commute(<16 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v16i16_add_i16_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pextrw $1, %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16i16_add_i16_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16i16_add_i16_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v16i16_add_i16_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <16 x i16> %x, i32 0
+  %x1 = extractelement <16 x i16> %x, i32 1
+  %x01 = add i16 %x1, %x0
+  ret i16 %x01
+}
+
+define i32 @extract_extract_v8i32_sub_i32(<8 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v8i32_sub_i32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    subl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8i32_sub_i32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phsubd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8i32_sub_i32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %eax
+; AVX-SLOW-NEXT:    vpextrd $1, %xmm0, %ecx
+; AVX-SLOW-NEXT:    subl %ecx, %eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8i32_sub_i32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphsubd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <8 x i32> %x, i32 0
+  %x1 = extractelement <8 x i32> %x, i32 1
+  %x01 = sub i32 %x0, %x1
+  ret i32 %x01
+}
+
+; Negative test...or get hoppy and negate?
+
+define i32 @extract_extract_v8i32_sub_i32_commute(<8 x i32> %x) {
+; SSE3-LABEL: extract_extract_v8i32_sub_i32_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movd %xmm0, %ecx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    subl %ecx, %eax
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v8i32_sub_i32_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %xmm0, %ecx
+; AVX-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-NEXT:    subl %ecx, %eax
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %x0 = extractelement <8 x i32> %x, i32 0
+  %x1 = extractelement <8 x i32> %x, i32 1
+  %x01 = sub i32 %x1, %x0
+  ret i32 %x01
+}
+
+define i16 @extract_extract_v16i16_sub_i16(<16 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v16i16_sub_i16:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    pextrw $1, %xmm0, %ecx
+; SSE3-SLOW-NEXT:    subl %ecx, %eax
+; SSE3-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16i16_sub_i16:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phsubw %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16i16_sub_i16:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %eax
+; AVX-SLOW-NEXT:    vpextrw $1, %xmm0, %ecx
+; AVX-SLOW-NEXT:    subl %ecx, %eax
+; AVX-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v16i16_sub_i16:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphsubw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <16 x i16> %x, i32 0
+  %x1 = extractelement <16 x i16> %x, i32 1
+  %x01 = sub i16 %x0, %x1
+  ret i16 %x01
+}
+
+; Negative test...or get hoppy and negate?
+
+define i16 @extract_extract_v16i16_sub_i16_commute(<16 x i16> %x) {
+; SSE3-LABEL: extract_extract_v16i16_sub_i16_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movd %xmm0, %ecx
+; SSE3-NEXT:    pextrw $1, %xmm0, %eax
+; SSE3-NEXT:    subl %ecx, %eax
+; SSE3-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v16i16_sub_i16_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %xmm0, %ecx
+; AVX-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX-NEXT:    subl %ecx, %eax
+; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %x0 = extractelement <16 x i16> %x, i32 0
+  %x1 = extractelement <16 x i16> %x, i32 1
+  %x01 = sub i16 %x1, %x0
+  ret i16 %x01
+}
+
+; 512-bit vectors, i32/i16, add/sub
+
+define i32 @extract_extract_v16i32_add_i32(<16 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v16i32_add_i32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16i32_add_i32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16i32_add_i32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v16i32_add_i32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <16 x i32> %x, i32 0
+  %x1 = extractelement <16 x i32> %x, i32 1
+  %x01 = add i32 %x0, %x1
+  ret i32 %x01
+}
+
+define i32 @extract_extract_v16i32_add_i32_commute(<16 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v16i32_add_i32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16i32_add_i32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16i32_add_i32_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v16i32_add_i32_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <16 x i32> %x, i32 0
+  %x1 = extractelement <16 x i32> %x, i32 1
+  %x01 = add i32 %x1, %x0
+  ret i32 %x01
+}
+
+define i16 @extract_extract_v32i16_add_i16(<32 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v32i16_add_i16:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pextrw $1, %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v32i16_add_i16:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v32i16_add_i16:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v32i16_add_i16:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <32 x i16> %x, i32 0
+  %x1 = extractelement <32 x i16> %x, i32 1
+  %x01 = add i16 %x0, %x1
+  ret i16 %x01
+}
+
+define i16 @extract_extract_v32i16_add_i16_commute(<32 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v32i16_add_i16_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pextrw $1, %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v32i16_add_i16_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v32i16_add_i16_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v32i16_add_i16_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <32 x i16> %x, i32 0
+  %x1 = extractelement <32 x i16> %x, i32 1
+  %x01 = add i16 %x1, %x0
+  ret i16 %x01
+}
+
+define i32 @extract_extract_v16i32_sub_i32(<16 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v16i32_sub_i32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    subl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16i32_sub_i32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phsubd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16i32_sub_i32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %eax
+; AVX-SLOW-NEXT:    vpextrd $1, %xmm0, %ecx
+; AVX-SLOW-NEXT:    subl %ecx, %eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v16i32_sub_i32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphsubd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <16 x i32> %x, i32 0
+  %x1 = extractelement <16 x i32> %x, i32 1
+  %x01 = sub i32 %x0, %x1
+  ret i32 %x01
+}
+
+define i32 @extract_extract_v16i32_sub_i32_commute(<16 x i32> %x) {
+; SSE3-LABEL: extract_extract_v16i32_sub_i32_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movd %xmm0, %ecx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    subl %ecx, %eax
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v16i32_sub_i32_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %xmm0, %ecx
+; AVX-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-NEXT:    subl %ecx, %eax
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %x0 = extractelement <16 x i32> %x, i32 0
+  %x1 = extractelement <16 x i32> %x, i32 1
+  %x01 = sub i32 %x1, %x0
+  ret i32 %x01
+}
+
+define i16 @extract_extract_v32i16_sub_i16(<32 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract_v32i16_sub_i16:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    pextrw $1, %xmm0, %ecx
+; SSE3-SLOW-NEXT:    subl %ecx, %eax
+; SSE3-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v32i16_sub_i16:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phsubw %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v32i16_sub_i16:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %eax
+; AVX-SLOW-NEXT:    vpextrw $1, %xmm0, %ecx
+; AVX-SLOW-NEXT:    subl %ecx, %eax
+; AVX-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v32i16_sub_i16:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphsubw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <32 x i16> %x, i32 0
+  %x1 = extractelement <32 x i16> %x, i32 1
+  %x01 = sub i16 %x0, %x1
+  ret i16 %x01
+}
+
+define i16 @extract_extract_v32i16_sub_i16_commute(<32 x i16> %x) {
+; SSE3-LABEL: extract_extract_v32i16_sub_i16_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movd %xmm0, %ecx
+; SSE3-NEXT:    pextrw $1, %xmm0, %eax
+; SSE3-NEXT:    subl %ecx, %eax
+; SSE3-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v32i16_sub_i16_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %xmm0, %ecx
+; AVX-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX-NEXT:    subl %ecx, %eax
+; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %x0 = extractelement <32 x i16> %x, i32 0
+  %x1 = extractelement <32 x i16> %x, i32 1
+  %x01 = sub i16 %x1, %x0
+  ret i16 %x01
+}
+
+; Check output when 1 or both extracts have extra uses.
+
+define i32 @extract_extract_v4i32_add_i32_uses1(<4 x i32> %x, i32* %p) {
+; SSE3-SLOW-LABEL: extract_extract_v4i32_add_i32_uses1:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    movd %xmm0, (%rdi)
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4i32_add_i32_uses1:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    movd %xmm0, (%rdi)
+; SSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4i32_add_i32_uses1:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT:    vmovd %xmm0, (%rdi)
+; AVX-SLOW-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4i32_add_i32_uses1:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vmovd %xmm0, (%rdi)
+; AVX-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <4 x i32> %x, i32 0
+  store i32 %x0, i32* %p
+  %x1 = extractelement <4 x i32> %x, i32 1
+  %x01 = add i32 %x0, %x1
+  ret i32 %x01
+}
+
+define i32 @extract_extract_v4i32_add_i32_uses2(<4 x i32> %x, i32* %p) {
+; SSE3-SLOW-LABEL: extract_extract_v4i32_add_i32_uses2:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    movd %xmm0, (%rdi)
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4i32_add_i32_uses2:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE3-FAST-NEXT:    movd %xmm1, (%rdi)
+; SSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4i32_add_i32_uses2:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    vpextrd $1, %xmm0, (%rdi)
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4i32_add_i32_uses2:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vpextrd $1, %xmm0, (%rdi)
+; AVX-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovd %xmm0, %eax
+; AVX-FAST-NEXT:    retq
+  %x0 = extractelement <4 x i32> %x, i32 0
+  %x1 = extractelement <4 x i32> %x, i32 1
+  store i32 %x1, i32* %p
+  %x01 = add i32 %x0, %x1
+  ret i32 %x01
+}
+
+define i32 @extract_extract_v4i32_add_i32_uses3(<4 x i32> %x, i32* %p1, i32* %p2) {
+; SSE3-LABEL: extract_extract_v4i32_add_i32_uses3:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movd %xmm0, %ecx
+; SSE3-NEXT:    movd %xmm0, (%rdi)
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    addl %ecx, %eax
+; SSE3-NEXT:    movd %xmm0, (%rsi)
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v4i32_add_i32_uses3:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %xmm0, %ecx
+; AVX-NEXT:    vmovd %xmm0, (%rdi)
+; AVX-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-NEXT:    addl %ecx, %eax
+; AVX-NEXT:    vpextrd $1, %xmm0, (%rsi)
+; AVX-NEXT:    retq
+  %x0 = extractelement <4 x i32> %x, i32 0
+  store i32 %x0, i32* %p1
+  %x1 = extractelement <4 x i32> %x, i32 1
+  store i32 %x1, i32* %p2
+  %x01 = add i32 %x0, %x1
+  ret i32 %x01
+}
+
diff --git a/test/CodeGen/X86/phaddsub-undef.ll b/test/CodeGen/X86/phaddsub-undef.ll
new file mode 100644
index 0000000..6fffbef
--- /dev/null
+++ b/test/CodeGen/X86/phaddsub-undef.ll
@@ -0,0 +1,291 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3              | FileCheck %s --check-prefixes=SSE,SSE-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3,fast-hops    | FileCheck %s --check-prefixes=SSE,SSE-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx                | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX1,AVX1-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops      | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX1,AVX1-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2               | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX2,AVX2-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2,fast-hops     | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX2,AVX2-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl           | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX512,AVX512-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX512,AVX512-FAST
+
+; Verify that we correctly fold horizontal binop even in the presence of UNDEFs.
+
+define <8 x i32> @test14_undef(<8 x i32> %a, <8 x i32> %b) {
+; SSE-LABEL: test14_undef:
+; SSE:       # %bb.0:
+; SSE-NEXT:    phaddd %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test14_undef:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test14_undef:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vphaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test14_undef:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vphaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %vecext = extractelement <8 x i32> %a, i32 0
+  %vecext1 = extractelement <8 x i32> %a, i32 1
+  %add = add i32 %vecext, %vecext1
+  %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+  %vecext2 = extractelement <8 x i32> %b, i32 2
+  %vecext3 = extractelement <8 x i32> %b, i32 3
+  %add4 = add i32 %vecext2, %vecext3
+  %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 3
+  ret <8 x i32> %vecinit5
+}
+
+; integer horizontal adds instead of two scalar adds followed by vector inserts.
+define <8 x i32> @test15_undef(<8 x i32> %a, <8 x i32> %b) {
+; SSE-SLOW-LABEL: test15_undef:
+; SSE-SLOW:       # %bb.0:
+; SSE-SLOW-NEXT:    movd %xmm0, %eax
+; SSE-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE-SLOW-NEXT:    addl %eax, %ecx
+; SSE-SLOW-NEXT:    movd %xmm3, %eax
+; SSE-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
+; SSE-SLOW-NEXT:    movd %xmm0, %edx
+; SSE-SLOW-NEXT:    addl %eax, %edx
+; SSE-SLOW-NEXT:    movd %ecx, %xmm0
+; SSE-SLOW-NEXT:    movd %edx, %xmm1
+; SSE-SLOW-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; SSE-SLOW-NEXT:    retq
+;
+; SSE-FAST-LABEL: test15_undef:
+; SSE-FAST:       # %bb.0:
+; SSE-FAST-NEXT:    phaddd %xmm0, %xmm0
+; SSE-FAST-NEXT:    phaddd %xmm3, %xmm3
+; SSE-FAST-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[0,1,0,1]
+; SSE-FAST-NEXT:    retq
+;
+; AVX1-SLOW-LABEL: test15_undef:
+; AVX1-SLOW:       # %bb.0:
+; AVX1-SLOW-NEXT:    vmovd %xmm0, %eax
+; AVX1-SLOW-NEXT:    vpextrd $1, %xmm0, %ecx
+; AVX1-SLOW-NEXT:    addl %eax, %ecx
+; AVX1-SLOW-NEXT:    vextractf128 $1, %ymm1, %xmm0
+; AVX1-SLOW-NEXT:    vmovd %xmm0, %eax
+; AVX1-SLOW-NEXT:    vpextrd $1, %xmm0, %edx
+; AVX1-SLOW-NEXT:    addl %eax, %edx
+; AVX1-SLOW-NEXT:    vmovd %ecx, %xmm0
+; AVX1-SLOW-NEXT:    vmovd %edx, %xmm1
+; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; AVX1-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-SLOW-NEXT:    retq
+;
+; AVX1-FAST-LABEL: test15_undef:
+; AVX1-FAST:       # %bb.0:
+; AVX1-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX1-FAST-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-FAST-NEXT:    vphaddd %xmm1, %xmm1, %xmm1
+; AVX1-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; AVX1-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FAST-NEXT:    retq
+;
+; AVX2-LABEL: test15_undef:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vphaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test15_undef:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vphaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %vecext = extractelement <8 x i32> %a, i32 0
+  %vecext1 = extractelement <8 x i32> %a, i32 1
+  %add = add i32 %vecext, %vecext1
+  %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+  %vecext2 = extractelement <8 x i32> %b, i32 4
+  %vecext3 = extractelement <8 x i32> %b, i32 5
+  %add4 = add i32 %vecext2, %vecext3
+  %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 6
+  ret <8 x i32> %vecinit5
+}
+
+define <8 x i32> @PR40243_alt(<8 x i32> %a, <8 x i32> %b) {
+; SSE-LABEL: PR40243_alt:
+; SSE:       # %bb.0:
+; SSE-NEXT:    phaddd %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: PR40243_alt:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: PR40243_alt:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vphaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: PR40243_alt:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vphaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %a4 = extractelement <8 x i32> %a, i32 4
+  %a5 = extractelement <8 x i32> %a, i32 5
+  %add4 = add i32 %a4, %a5
+  %b6 = extractelement <8 x i32> %b, i32 6
+  %b7 = extractelement <8 x i32> %b, i32 7
+  %add7 = add i32 %b6, %b7
+  %r4 = insertelement <8 x i32> undef, i32 %add4, i32 4
+  %r = insertelement <8 x i32> %r4, i32 %add7, i32 7
+  ret <8 x i32> %r
+}
+
+define <8 x i32> @test16_undef(<8 x i32> %a, <8 x i32> %b) {
+; SSE-LABEL: test16_undef:
+; SSE:       # %bb.0:
+; SSE-NEXT:    phaddd %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test16_undef:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test16_undef:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vphaddd %ymm0, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test16_undef:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vphaddd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %vecext = extractelement <8 x i32> %a, i32 0
+  %vecext1 = extractelement <8 x i32> %a, i32 1
+  %add = add i32 %vecext, %vecext1
+  %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+  %vecext2 = extractelement <8 x i32> %a, i32 2
+  %vecext3 = extractelement <8 x i32> %a, i32 3
+  %add4 = add i32 %vecext2, %vecext3
+  %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 1
+  ret <8 x i32> %vecinit5
+}
+
+define <16 x i32> @test16_v16i32_undef(<16 x i32> %a, <16 x i32> %b) {
+; SSE-LABEL: test16_v16i32_undef:
+; SSE:       # %bb.0:
+; SSE-NEXT:    phaddd %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test16_v16i32_undef:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test16_v16i32_undef:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vphaddd %ymm0, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test16_v16i32_undef:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vphaddd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %vecext = extractelement <16 x i32> %a, i32 0
+  %vecext1 = extractelement <16 x i32> %a, i32 1
+  %add = add i32 %vecext, %vecext1
+  %vecinit = insertelement <16 x i32> undef, i32 %add, i32 0
+  %vecext2 = extractelement <16 x i32> %a, i32 2
+  %vecext3 = extractelement <16 x i32> %a, i32 3
+  %add4 = add i32 %vecext2, %vecext3
+  %vecinit5 = insertelement <16 x i32> %vecinit, i32 %add4, i32 1
+  ret <16 x i32> %vecinit5
+}
+
+define <8 x i32> @test17_undef(<8 x i32> %a, <8 x i32> %b) {
+; SSE-LABEL: test17_undef:
+; SSE:       # %bb.0:
+; SSE-NEXT:    phaddd %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test17_undef:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test17_undef:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test17_undef:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+  %vecext = extractelement <8 x i32> %a, i32 0
+  %vecext1 = extractelement <8 x i32> %a, i32 1
+  %add1 = add i32 %vecext, %vecext1
+  %vecinit1 = insertelement <8 x i32> undef, i32 %add1, i32 0
+  %vecext2 = extractelement <8 x i32> %a, i32 2
+  %vecext3 = extractelement <8 x i32> %a, i32 3
+  %add2 = add i32 %vecext2, %vecext3
+  %vecinit2 = insertelement <8 x i32> %vecinit1, i32 %add2, i32 1
+  %vecext4 = extractelement <8 x i32> %a, i32 4
+  %vecext5 = extractelement <8 x i32> %a, i32 5
+  %add3 = add i32 %vecext4, %vecext5
+  %vecinit3 = insertelement <8 x i32> %vecinit2, i32 %add3, i32 2
+  %vecext6 = extractelement <8 x i32> %a, i32 6
+  %vecext7 = extractelement <8 x i32> %a, i32 7
+  %add4 = add i32 %vecext6, %vecext7
+  %vecinit4 = insertelement <8 x i32> %vecinit3, i32 %add4, i32 3
+  ret <8 x i32> %vecinit4
+}
+
+define <16 x i32> @test17_v16i32_undef(<16 x i32> %a, <16 x i32> %b) {
+; SSE-LABEL: test17_v16i32_undef:
+; SSE:       # %bb.0:
+; SSE-NEXT:    phaddd %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test17_v16i32_undef:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test17_v16i32_undef:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test17_v16i32_undef:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vphaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %vecext = extractelement <16 x i32> %a, i32 0
+  %vecext1 = extractelement <16 x i32> %a, i32 1
+  %add1 = add i32 %vecext, %vecext1
+  %vecinit1 = insertelement <16 x i32> undef, i32 %add1, i32 0
+  %vecext2 = extractelement <16 x i32> %a, i32 2
+  %vecext3 = extractelement <16 x i32> %a, i32 3
+  %add2 = add i32 %vecext2, %vecext3
+  %vecinit2 = insertelement <16 x i32> %vecinit1, i32 %add2, i32 1
+  %vecext4 = extractelement <16 x i32> %a, i32 4
+  %vecext5 = extractelement <16 x i32> %a, i32 5
+  %add3 = add i32 %vecext4, %vecext5
+  %vecinit3 = insertelement <16 x i32> %vecinit2, i32 %add3, i32 2
+  %vecext6 = extractelement <16 x i32> %a, i32 6
+  %vecext7 = extractelement <16 x i32> %a, i32 7
+  %add4 = add i32 %vecext6, %vecext7
+  %vecinit4 = insertelement <16 x i32> %vecinit3, i32 %add4, i32 3
+  ret <16 x i32> %vecinit4
+}
+
diff --git a/test/CodeGen/X86/pic-load-remat.ll b/test/CodeGen/X86/pic-load-remat.ll
index 7729752..61849c8 100644
--- a/test/CodeGen/X86/pic-load-remat.ll
+++ b/test/CodeGen/X86/pic-load-remat.ll
@@ -5,30 +5,30 @@
 	br label %bb
 
 bb:		; preds = %bb, %entry
-	%tmp4403 = tail call <8 x i16> @llvm.x86.sse2.psubs.w( <8 x i16> zeroinitializer, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=2]
-	%tmp4443 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> zeroinitializer, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=1]
+	%tmp4403 = tail call <8 x i16> @llvm.ssub.sat.v8i16( <8 x i16> zeroinitializer, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=2]
+	%tmp4443 = tail call <8 x i16> @llvm.sadd.sat.v8i16( <8 x i16> zeroinitializer, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=1]
 	%tmp4609 = tail call <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> zeroinitializer, <8 x i16> bitcast (<4 x i32> < i32 3, i32 5, i32 6, i32 9 > to <8 x i16>) )		; <<8 x i16>> [#uses=1]
 	%tmp4651 = add <8 x i16> %tmp4609, < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1 >		; <<8 x i16>> [#uses=1]
 	%tmp4658 = tail call <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> %tmp4651, <8 x i16> bitcast (<4 x i32> < i32 4, i32 1, i32 2, i32 3 > to <8 x i16>) )		; <<8 x i16>> [#uses=1]
 	%tmp4669 = tail call <8 x i16> @llvm.x86.sse2.pavg.w( <8 x i16> < i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170 >, <8 x i16> %tmp4443 ) nounwind readnone 		; <<8 x i16>> [#uses=2]
-	%tmp4679 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4669, <8 x i16> %tmp4669 ) nounwind readnone 		; <<8 x i16>> [#uses=1]
+	%tmp4679 = tail call <8 x i16> @llvm.sadd.sat.v8i16( <8 x i16> %tmp4669, <8 x i16> %tmp4669 ) nounwind readnone 		; <<8 x i16>> [#uses=1]
 	%tmp4689 = add <8 x i16> %tmp4679, %tmp4658		; <<8 x i16>> [#uses=1]
-	%tmp4700 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4689, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=1]
+	%tmp4700 = tail call <8 x i16> @llvm.sadd.sat.v8i16( <8 x i16> %tmp4689, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=1]
 	%tmp4708 = bitcast <8 x i16> %tmp4700 to <2 x i64>		; <<2 x i64>> [#uses=1]
 	%tmp4772 = add <8 x i16> zeroinitializer, < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1 >		; <<8 x i16>> [#uses=1]
 	%tmp4779 = tail call <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> %tmp4772, <8 x i16> bitcast (<4 x i32> < i32 3, i32 5, i32 undef, i32 7 > to <8 x i16>) )		; <<8 x i16>> [#uses=1]
 	%tmp4810 = add <8 x i16> zeroinitializer, %tmp4779		; <<8 x i16>> [#uses=1]
-	%tmp4821 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4810, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=1]
+	%tmp4821 = tail call <8 x i16> @llvm.sadd.sat.v8i16( <8 x i16> %tmp4810, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=1]
 	%tmp4829 = bitcast <8 x i16> %tmp4821 to <2 x i64>		; <<2 x i64>> [#uses=1]
 	%tmp4900 = tail call <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> zeroinitializer, <8 x i16> bitcast (<4 x i32> < i32 1, i32 1, i32 2, i32 2 > to <8 x i16>) )		; <<8 x i16>> [#uses=1]
 	%tmp4911 = tail call <8 x i16> @llvm.x86.sse2.pavg.w( <8 x i16> < i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170 >, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=2]
-	%tmp4921 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4911, <8 x i16> %tmp4911 ) nounwind readnone 		; <<8 x i16>> [#uses=1]
+	%tmp4921 = tail call <8 x i16> @llvm.sadd.sat.v8i16( <8 x i16> %tmp4911, <8 x i16> %tmp4911 ) nounwind readnone 		; <<8 x i16>> [#uses=1]
 	%tmp4931 = add <8 x i16> %tmp4921, %tmp4900		; <<8 x i16>> [#uses=1]
-	%tmp4942 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4931, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=1]
+	%tmp4942 = tail call <8 x i16> @llvm.sadd.sat.v8i16( <8 x i16> %tmp4931, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=1]
 	%tmp4950 = bitcast <8 x i16> %tmp4942 to <2 x i64>		; <<2 x i64>> [#uses=1]
-	%tmp4957 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4403, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=1]
+	%tmp4957 = tail call <8 x i16> @llvm.sadd.sat.v8i16( <8 x i16> %tmp4403, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=1]
 	%tmp4958 = bitcast <8 x i16> %tmp4957 to <2 x i64>		; <<2 x i64>> [#uses=1]
-	%tmp4967 = tail call <8 x i16> @llvm.x86.sse2.psubs.w( <8 x i16> %tmp4403, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=1]
+	%tmp4967 = tail call <8 x i16> @llvm.ssub.sat.v8i16( <8 x i16> %tmp4403, <8 x i16> zeroinitializer ) nounwind readnone 		; <<8 x i16>> [#uses=1]
 	%tmp4968 = bitcast <8 x i16> %tmp4967 to <2 x i64>		; <<2 x i64>> [#uses=1]
 	store <2 x i64> %tmp4829, <2 x i64>* null, align 16
 	store <2 x i64> %tmp4958, <2 x i64>* null, align 16
@@ -42,6 +42,6 @@
 
 declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone 
 
-declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone 
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 
 
-declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone 
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 
diff --git a/test/CodeGen/X86/pmovsx-inreg.ll b/test/CodeGen/X86/pmovsx-inreg.ll
index ea9e296..9ab6917 100644
--- a/test/CodeGen/X86/pmovsx-inreg.ll
+++ b/test/CodeGen/X86/pmovsx-inreg.ll
@@ -53,10 +53,8 @@
 ;
 ; AVX1-LABEL: test2:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpmovsxbd (%rdi), %xmm0
-; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT:    vpmovsxbq 2(%rdi), %xmm0
+; AVX1-NEXT:    vpmovsxbq (%rdi), %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vmovdqu %ymm1, (%rax)
@@ -136,10 +134,8 @@
 ;
 ; AVX1-LABEL: test4:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpmovsxbw (%rdi), %xmm0
-; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT:    vpmovsxbd 4(%rdi), %xmm0
+; AVX1-NEXT:    vpmovsxbd (%rdi), %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vmovdqu %ymm1, (%rax)
@@ -300,10 +296,8 @@
 ;
 ; AVX1-LABEL: test8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpmovsxwd (%rdi), %xmm0
-; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT:    vpmovsxwq 4(%rdi), %xmm0
+; AVX1-NEXT:    vpmovsxwq (%rdi), %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vmovdqu %ymm1, (%rax)
diff --git a/test/CodeGen/X86/pmul.ll b/test/CodeGen/X86/pmul.ll
index 2d09877..011ca12 100644
--- a/test/CodeGen/X86/pmul.ll
+++ b/test/CodeGen/X86/pmul.ll
@@ -1251,76 +1251,70 @@
 define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
 ; SSE2-LABEL: mul_v8i64_sext:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movdqa %xmm1, %xmm4
-; SSE2-NEXT:    movdqa %xmm0, %xmm5
-; SSE2-NEXT:    punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; SSE2-NEXT:    movdqa %xmm1, %xmm12
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
+; SSE2-NEXT:    psrad $16, %xmm10
 ; SSE2-NEXT:    pxor %xmm8, %xmm8
+; SSE2-NEXT:    pxor %xmm13, %xmm13
+; SSE2-NEXT:    pcmpgtd %xmm10, %xmm13
+; SSE2-NEXT:    movdqa %xmm10, %xmm9
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm13[2],xmm9[3],xmm13[3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1]
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE2-NEXT:    psrad $16, %xmm0
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT:    pxor %xmm15, %xmm15
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm15
+; SSE2-NEXT:    movdqa %xmm0, %xmm11
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm11 = xmm11[2],xmm15[2],xmm11[3],xmm15[3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[2,3,0,1]
+; SSE2-NEXT:    pxor %xmm14, %xmm14
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm14
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1]
+; SSE2-NEXT:    pxor %xmm5, %xmm5
+; SSE2-NEXT:    pcmpgtd %xmm2, %xmm5
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    pxor %xmm6, %xmm6
-; SSE2-NEXT:    pcmpgtd %xmm4, %xmm6
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
-; SSE2-NEXT:    pmuludq %xmm4, %xmm3
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
-; SSE2-NEXT:    pmuludq %xmm0, %xmm6
-; SSE2-NEXT:    paddq %xmm3, %xmm6
-; SSE2-NEXT:    pshuflw {{.*#+}} xmm3 = xmm5[0,2,2,3,4,5,6,7]
-; SSE2-NEXT:    pmuludq %xmm4, %xmm0
-; SSE2-NEXT:    pxor %xmm4, %xmm4
-; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
-; SSE2-NEXT:    psrad $16, %xmm3
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; SSE2-NEXT:    psllq $32, %xmm6
-; SSE2-NEXT:    paddq %xmm6, %xmm0
-; SSE2-NEXT:    pxor %xmm6, %xmm6
 ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm6
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
+; SSE2-NEXT:    pxor %xmm7, %xmm7
+; SSE2-NEXT:    pcmpgtd %xmm12, %xmm7
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm12 = xmm12[0],xmm7[0],xmm12[1],xmm7[1]
+; SSE2-NEXT:    movdqa %xmm15, %xmm4
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
-; SSE2-NEXT:    pmuludq %xmm1, %xmm4
+; SSE2-NEXT:    pmuludq %xmm12, %xmm4
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
+; SSE2-NEXT:    pmuludq %xmm0, %xmm7
+; SSE2-NEXT:    paddq %xmm4, %xmm7
+; SSE2-NEXT:    psllq $32, %xmm7
+; SSE2-NEXT:    pmuludq %xmm12, %xmm0
+; SSE2-NEXT:    paddq %xmm7, %xmm0
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm15 = xmm15[2],xmm8[2],xmm15[3],xmm8[3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm15
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
-; SSE2-NEXT:    pmuludq %xmm3, %xmm6
-; SSE2-NEXT:    paddq %xmm4, %xmm6
-; SSE2-NEXT:    pxor %xmm4, %xmm4
-; SSE2-NEXT:    pcmpgtd %xmm7, %xmm4
-; SSE2-NEXT:    psrad $16, %xmm7
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
-; SSE2-NEXT:    pmuludq %xmm3, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[2,3,0,1]
+; SSE2-NEXT:    pmuludq %xmm11, %xmm6
+; SSE2-NEXT:    paddq %xmm15, %xmm6
 ; SSE2-NEXT:    psllq $32, %xmm6
+; SSE2-NEXT:    pmuludq %xmm11, %xmm1
 ; SSE2-NEXT:    paddq %xmm6, %xmm1
-; SSE2-NEXT:    pxor %xmm6, %xmm6
-; SSE2-NEXT:    pcmpgtd %xmm2, %xmm6
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
+; SSE2-NEXT:    movdqa %xmm13, %xmm4
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
 ; SSE2-NEXT:    pmuludq %xmm2, %xmm4
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
-; SSE2-NEXT:    pmuludq %xmm7, %xmm6
-; SSE2-NEXT:    paddq %xmm4, %xmm6
-; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1]
-; SSE2-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
-; SSE2-NEXT:    pmuludq %xmm7, %xmm2
-; SSE2-NEXT:    pxor %xmm5, %xmm5
-; SSE2-NEXT:    pcmpgtd %xmm4, %xmm5
-; SSE2-NEXT:    psrad $16, %xmm4
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; SSE2-NEXT:    psllq $32, %xmm6
-; SSE2-NEXT:    paddq %xmm6, %xmm2
-; SSE2-NEXT:    pxor %xmm6, %xmm6
-; SSE2-NEXT:    pcmpgtd %xmm3, %xmm6
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
-; SSE2-NEXT:    pmuludq %xmm3, %xmm5
-; SSE2-NEXT:    pmuludq %xmm4, %xmm6
-; SSE2-NEXT:    paddq %xmm5, %xmm6
-; SSE2-NEXT:    pmuludq %xmm4, %xmm3
-; SSE2-NEXT:    psllq $32, %xmm6
-; SSE2-NEXT:    paddq %xmm6, %xmm3
+; SSE2-NEXT:    pmuludq %xmm10, %xmm5
+; SSE2-NEXT:    paddq %xmm4, %xmm5
+; SSE2-NEXT:    psllq $32, %xmm5
+; SSE2-NEXT:    pmuludq %xmm10, %xmm2
+; SSE2-NEXT:    paddq %xmm5, %xmm2
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm13 = xmm13[2],xmm8[2],xmm13[3],xmm8[3]
+; SSE2-NEXT:    pmuludq %xmm3, %xmm13
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm14 = xmm14[0],xmm8[0],xmm14[1],xmm8[1]
+; SSE2-NEXT:    pmuludq %xmm9, %xmm14
+; SSE2-NEXT:    paddq %xmm13, %xmm14
+; SSE2-NEXT:    psllq $32, %xmm14
+; SSE2-NEXT:    pmuludq %xmm9, %xmm3
+; SSE2-NEXT:    paddq %xmm14, %xmm3
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: mul_v8i64_sext:
@@ -1369,3 +1363,49 @@
   %3 = mul <8 x i64> %1, %2
   ret <8 x i64> %3
 }
+
+define <2 x i64> @pmuldq_square(<2 x i64> %x) {
+; SSE2-LABEL: pmuldq_square:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    psllq $32, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
+; SSE2-NEXT:    psrad $31, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT:    psrlq $32, %xmm0
+; SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; SSE2-NEXT:    paddq %xmm0, %xmm0
+; SSE2-NEXT:    psllq $32, %xmm0
+; SSE2-NEXT:    pmuludq %xmm1, %xmm1
+; SSE2-NEXT:    paddq %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: pmuldq_square:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pmuldq %xmm0, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: pmuldq_square:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmuldq %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = shl <2 x i64> %x, <i64 32, i64 32>
+  %2 = ashr exact <2 x i64> %1, <i64 32, i64 32>
+  %3 = mul nsw <2 x i64> %2, %2
+  ret <2 x i64> %3
+}
+
+define <2 x i64> @pmuludq_square(<2 x i64> %x) {
+; SSE-LABEL: pmuludq_square:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmuludq %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: pmuludq_square:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmuludq %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = and <2 x i64> %x, <i64 4294967295, i64 4294967295>
+  %2 = mul nuw <2 x i64> %1, %1
+  ret <2 x i64> %2
+}
diff --git a/test/CodeGen/X86/postra-licm.ll b/test/CodeGen/X86/postra-licm.ll
index 329184a..72018b1 100644
--- a/test/CodeGen/X86/postra-licm.ll
+++ b/test/CodeGen/X86/postra-licm.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic -disable-fp-elim | FileCheck %s -check-prefix=X86-32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -relocation-model=pic -disable-fp-elim | FileCheck %s -check-prefix=X86-64
+; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic -frame-pointer=all | FileCheck %s -check-prefix=X86-32
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -relocation-model=pic -frame-pointer=all | FileCheck %s -check-prefix=X86-64
 
 ; MachineLICM should be able to hoist loop invariant reload out of the loop.
 ; Only linear scan needs this, -regalloc=greedy sinks the spill instead.
diff --git a/test/CodeGen/X86/pr1489.ll b/test/CodeGen/X86/pr1489.ll
index 13ced2a3..36d30e8 100644
--- a/test/CodeGen/X86/pr1489.ll
+++ b/test/CodeGen/X86/pr1489.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -disable-fp-elim -O0 -mcpu=i486 | grep 1082126238 | count 3
-; RUN: llc < %s -disable-fp-elim -O0 -mcpu=i486 | grep -- -1236950581 | count 1
+; RUN: llc < %s -frame-pointer=all -O0 -mcpu=i486 | grep 1082126238 | count 3
+; RUN: llc < %s -frame-pointer=all -O0 -mcpu=i486 | grep -- -1236950581 | count 1
 ;; magic constants are 3.999f and half of 3.999
 ; ModuleID = '1489.c'
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
diff --git a/test/CodeGen/X86/pr30511.ll b/test/CodeGen/X86/pr30511.ll
index 69d0a94..6764656 100644
--- a/test/CodeGen/X86/pr30511.ll
+++ b/test/CodeGen/X86/pr30511.ll
@@ -9,7 +9,7 @@
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addpd {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    cvtdq2pd %xmm0, %xmm0
-; CHECK-NEXT:    mulpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    mulsd {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    movq %xmm0, %rax
 ; CHECK-NEXT:    retq
   %1 = fadd <2 x double> %a, <double 0x4338000000000000, double 0x4338000000000000>
diff --git a/test/CodeGen/X86/pr3154.ll b/test/CodeGen/X86/pr3154.ll
index 5da8840..fa788b2 100644
--- a/test/CodeGen/X86/pr3154.ll
+++ b/test/CodeGen/X86/pr3154.ll
@@ -1,5 +1,5 @@
 ; RUN: llc < %s -mtriple=i386-pc-linux-gnu -mattr=+sse2
-; RUN: llc < %s -mtriple=i386-pc-linux-gnu -mattr=+sse2 -relocation-model=pic -disable-fp-elim
+; RUN: llc < %s -mtriple=i386-pc-linux-gnu -mattr=+sse2 -relocation-model=pic -frame-pointer=all
 ; PR3154
 
 define void @ff_flac_compute_autocorr_sse2(i32* %data, i32 %len, i32 %lag, double* %autoc) nounwind {
diff --git a/test/CodeGen/X86/pr32329.ll b/test/CodeGen/X86/pr32329.ll
index 7ccd559..2110946 100644
--- a/test/CodeGen/X86/pr32329.ll
+++ b/test/CodeGen/X86/pr32329.ll
@@ -41,7 +41,7 @@
 ; X86-NEXT:    movl %ebx, %edi
 ; X86-NEXT:    subl %esi, %edi
 ; X86-NEXT:    imull %edi, %ecx
-; X86-NEXT:    addl $-1437483407, %ecx # imm = 0xAA51BE71
+; X86-NEXT:    addb $113, %cl
 ; X86-NEXT:    movl $9, %esi
 ; X86-NEXT:    xorl %ebp, %ebp
 ; X86-NEXT:    shldl %cl, %esi, %ebp
@@ -80,7 +80,7 @@
 ; X64-NEXT:    movl %edi, %esi
 ; X64-NEXT:    subl %r8d, %esi
 ; X64-NEXT:    imull %esi, %ecx
-; X64-NEXT:    addl $-1437483407, %ecx # imm = 0xAA51BE71
+; X64-NEXT:    addb $113, %cl
 ; X64-NEXT:    movl $9, %edx
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shlq %cl, %rdx
diff --git a/test/CodeGen/X86/pr32345.ll b/test/CodeGen/X86/pr32345.ll
index 65fcf05..cec3a69 100644
--- a/test/CodeGen/X86/pr32345.ll
+++ b/test/CodeGen/X86/pr32345.ll
@@ -69,8 +69,8 @@
 ; 6860-NEXT:    xorl %ecx, %esi
 ; 6860-NEXT:    movw %si, %ax
 ; 6860-NEXT:    movzwl %ax, %esi
-; 6860-NEXT:    addl $-16610, %ecx # imm = 0xBF1E
 ; 6860-NEXT:    movb %cl, %bl
+; 6860-NEXT:    addb $30, %bl
 ; 6860-NEXT:    xorl %ecx, %ecx
 ; 6860-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; 6860-NEXT:    movb %bl, %cl
@@ -98,14 +98,13 @@
 ;
 ; X64-LABEL: foo:
 ; X64:       # %bb.0: # %bb
-; X64-NEXT:    movzwl {{.*}}(%rip), %eax
 ; X64-NEXT:    movzwl {{.*}}(%rip), %ecx
-; X64-NEXT:    movl %ecx, %edx
-; X64-NEXT:    xorl %edx, %edx
-; X64-NEXT:    xorl %eax, %edx
-; X64-NEXT:    movzwl %dx, %eax
+; X64-NEXT:    movzwl {{.*}}(%rip), %eax
+; X64-NEXT:    xorw %cx, %ax
+; X64-NEXT:    xorl %ecx, %eax
+; X64-NEXT:    movzwl %ax, %eax
 ; X64-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
-; X64-NEXT:    addl $-16610, %ecx # imm = 0xBF1E
+; X64-NEXT:    addb $30, %cl
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shrq %cl, %rax
 ; X64-NEXT:    movb %al, (%rax)
@@ -120,15 +119,14 @@
 ; 686-NEXT:    .cfi_def_cfa_register %ebp
 ; 686-NEXT:    andl $-8, %esp
 ; 686-NEXT:    subl $8, %esp
-; 686-NEXT:    movzwl var_22, %eax
 ; 686-NEXT:    movzwl var_27, %ecx
-; 686-NEXT:    movl %ecx, %edx
-; 686-NEXT:    xorl %ecx, %edx
-; 686-NEXT:    xorl %eax, %edx
-; 686-NEXT:    movzwl %dx, %eax
+; 686-NEXT:    movzwl var_22, %eax
+; 686-NEXT:    xorw %cx, %ax
+; 686-NEXT:    xorl %ecx, %eax
+; 686-NEXT:    movzwl %ax, %eax
 ; 686-NEXT:    movl %eax, (%esp)
 ; 686-NEXT:    movl $0, {{[0-9]+}}(%esp)
-; 686-NEXT:    addl $-16610, %ecx # imm = 0xBF1E
+; 686-NEXT:    addb $30, %cl
 ; 686-NEXT:    xorl %edx, %edx
 ; 686-NEXT:    shrdl %cl, %edx, %eax
 ; 686-NEXT:    testb $32, %cl
diff --git a/test/CodeGen/X86/pr33290.ll b/test/CodeGen/X86/pr33290.ll
index b5d9754..44b7dca 100644
--- a/test/CodeGen/X86/pr33290.ll
+++ b/test/CodeGen/X86/pr33290.ll
@@ -14,8 +14,8 @@
 ; X86-NEXT:  .LBB0_1: # %for.cond
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movzbl c, %ecx
-; X86-NEXT:    leal a+2(%ecx), %ecx
 ; X86-NEXT:    movb $0, c
+; X86-NEXT:    leal a+2(%ecx), %ecx
 ; X86-NEXT:    movl %ecx, (%eax)
 ; X86-NEXT:    jmp .LBB0_1
 ;
diff --git a/test/CodeGen/X86/pr33349.ll b/test/CodeGen/X86/pr33349.ll
index ec9f238..63edae0 100644
--- a/test/CodeGen/X86/pr33349.ll
+++ b/test/CodeGen/X86/pr33349.ll
@@ -12,32 +12,32 @@
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; KNL-NEXT:    kshiftrw $1, %k0, %k1
 ; KNL-NEXT:    kmovw %k1, %eax
+; KNL-NEXT:    kshiftrw $2, %k0, %k1
+; KNL-NEXT:    kshiftrw $1, %k1, %k2
+; KNL-NEXT:    kmovw %k1, %ecx
 ; KNL-NEXT:    testb $1, %al
 ; KNL-NEXT:    fld1
 ; KNL-NEXT:    fldz
 ; KNL-NEXT:    fld %st(0)
 ; KNL-NEXT:    fcmovne %st(2), %st(0)
-; KNL-NEXT:    kshiftrw $2, %k0, %k1
-; KNL-NEXT:    kshiftrw $1, %k1, %k2
-; KNL-NEXT:    kmovw %k2, %eax
-; KNL-NEXT:    testb $1, %al
+; KNL-NEXT:    testb $1, %cl
 ; KNL-NEXT:    fld %st(1)
 ; KNL-NEXT:    fcmovne %st(3), %st(0)
-; KNL-NEXT:    kmovw %k0, %eax
+; KNL-NEXT:    kmovw %k2, %eax
 ; KNL-NEXT:    testb $1, %al
 ; KNL-NEXT:    fld %st(2)
 ; KNL-NEXT:    fcmovne %st(4), %st(0)
-; KNL-NEXT:    kmovw %k1, %eax
+; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    testb $1, %al
 ; KNL-NEXT:    fxch %st(3)
 ; KNL-NEXT:    fcmovne %st(4), %st(0)
 ; KNL-NEXT:    fstp %st(4)
 ; KNL-NEXT:    fxch %st(3)
-; KNL-NEXT:    fstpt 20(%rdi)
-; KNL-NEXT:    fxch %st(1)
 ; KNL-NEXT:    fstpt (%rdi)
 ; KNL-NEXT:    fxch %st(1)
 ; KNL-NEXT:    fstpt 30(%rdi)
+; KNL-NEXT:    fxch %st(1)
+; KNL-NEXT:    fstpt 20(%rdi)
 ; KNL-NEXT:    fstpt 10(%rdi)
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
@@ -46,20 +46,20 @@
 ; SKX:       # %bb.0: # %bb
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovd2m %xmm0, %k0
+; SKX-NEXT:    kshiftrb $1, %k0, %k1
+; SKX-NEXT:    kmovd %k1, %eax
 ; SKX-NEXT:    kshiftrb $2, %k0, %k1
-; SKX-NEXT:    kshiftrw $1, %k1, %k2
-; SKX-NEXT:    kmovd %k2, %eax
+; SKX-NEXT:    kshiftrb $1, %k1, %k2
+; SKX-NEXT:    kmovd %k1, %ecx
 ; SKX-NEXT:    testb $1, %al
 ; SKX-NEXT:    fld1
 ; SKX-NEXT:    fldz
 ; SKX-NEXT:    fld %st(0)
 ; SKX-NEXT:    fcmovne %st(2), %st(0)
-; SKX-NEXT:    kmovd %k1, %eax
-; SKX-NEXT:    testb $1, %al
+; SKX-NEXT:    testb $1, %cl
 ; SKX-NEXT:    fld %st(1)
 ; SKX-NEXT:    fcmovne %st(3), %st(0)
-; SKX-NEXT:    kshiftrw $1, %k0, %k1
-; SKX-NEXT:    kmovd %k1, %eax
+; SKX-NEXT:    kmovd %k2, %eax
 ; SKX-NEXT:    testb $1, %al
 ; SKX-NEXT:    fld %st(2)
 ; SKX-NEXT:    fcmovne %st(4), %st(0)
@@ -71,10 +71,10 @@
 ; SKX-NEXT:    fxch %st(3)
 ; SKX-NEXT:    fstpt (%rdi)
 ; SKX-NEXT:    fxch %st(1)
-; SKX-NEXT:    fstpt 10(%rdi)
+; SKX-NEXT:    fstpt 30(%rdi)
 ; SKX-NEXT:    fxch %st(1)
 ; SKX-NEXT:    fstpt 20(%rdi)
-; SKX-NEXT:    fstpt 30(%rdi)
+; SKX-NEXT:    fstpt 10(%rdi)
 ; SKX-NEXT:    retq
  bb:
    %tmp = select <4 x i1> %m, <4 x x86_fp80> <x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000, x86_fp80             0xK3FFF8000000000000000>, <4 x x86_fp80> zeroinitializer
diff --git a/test/CodeGen/X86/pr34137.ll b/test/CodeGen/X86/pr34137.ll
index 4e81cb8..1a85a66 100644
--- a/test/CodeGen/X86/pr34137.ll
+++ b/test/CodeGen/X86/pr34137.ll
@@ -11,11 +11,12 @@
 ; CHECK-NEXT:    movzwl {{.*}}(%rip), %eax
 ; CHECK-NEXT:    movzwl {{.*}}(%rip), %ecx
 ; CHECK-NEXT:    andl %eax, %ecx
-; CHECK-NEXT:    andl %eax, %ecx
-; CHECK-NEXT:    movzwl %cx, %ecx
-; CHECK-NEXT:    movl %ecx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movl %eax, %edx
+; CHECK-NEXT:    andl %ecx, %edx
+; CHECK-NEXT:    movzwl %dx, %edx
+; CHECK-NEXT:    movl %edx, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    xorl %edx, %edx
-; CHECK-NEXT:    testw %cx, %cx
+; CHECK-NEXT:    testw %cx, %ax
 ; CHECK-NEXT:    sete %dl
 ; CHECK-NEXT:    andl %eax, %edx
 ; CHECK-NEXT:    movq %rdx, {{.*}}(%rip)
diff --git a/test/CodeGen/X86/pr34381.ll b/test/CodeGen/X86/pr34381.ll
index 3053ddd..831b1d2 100644
--- a/test/CodeGen/X86/pr34381.ll
+++ b/test/CodeGen/X86/pr34381.ll
@@ -13,11 +13,9 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movsbl {{.*}}(%rip), %eax
 ; CHECK-NEXT:    negl %eax
-; CHECK-NEXT:    cmpl %eax, {{.*}}(%rip)
-; CHECK-NEXT:    setb %al
 ; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    addb $-1, %al
-; CHECK-NEXT:    sete %cl
+; CHECK-NEXT:    cmpl %eax, {{.*}}(%rip)
+; CHECK-NEXT:    setb %cl
 ; CHECK-NEXT:    movl %ecx, {{.*}}(%rip)
 ; CHECK-NEXT:    movb {{.*}}(%rip), %al
 ; CHECK-NEXT:    movb %al, {{.*}}(%rip)
diff --git a/test/CodeGen/X86/pr35765.ll b/test/CodeGen/X86/pr35765.ll
index 6ff504d..1c6035f 100644
--- a/test/CodeGen/X86/pr35765.ll
+++ b/test/CodeGen/X86/pr35765.ll
@@ -9,10 +9,9 @@
 define void @PR35765() {
 ; CHECK-LABEL: PR35765:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    movzwl {{.*}}(%rip), %ecx
-; CHECK-NEXT:    addl $-1398, %ecx # imm = 0xFA8A
+; CHECK-NEXT:    movb {{.*}}(%rip), %cl
+; CHECK-NEXT:    addb $-118, %cl
 ; CHECK-NEXT:    movl $4, %eax
-; CHECK-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; CHECK-NEXT:    shll %cl, %eax
 ; CHECK-NEXT:    movzwl {{.*}}(%rip), %ecx
 ; CHECK-NEXT:    movzwl {{.*}}(%rip), %edx
diff --git a/test/CodeGen/X86/pr37499.ll b/test/CodeGen/X86/pr37499.ll
index 3fd2ca6..2995017 100644
--- a/test/CodeGen/X86/pr37499.ll
+++ b/test/CodeGen/X86/pr37499.ll
@@ -4,7 +4,11 @@
 define <2 x i64> @undef_tval() {
 ; CHECK-LABEL: undef_tval:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovaps {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1]
+; CHECK-NEXT:    vmovdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1]
+; CHECK-NEXT:    movb $1, %al
+; CHECK-NEXT:    kmovw %eax, %k1
+; CHECK-NEXT:    vpmovqw %zmm0, %xmm0 {%k1}
+; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
   %1 = tail call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> undef, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, i8 1) #3
   %2 = bitcast <8 x i16> %1 to <2 x i64>
diff --git a/test/CodeGen/X86/pr38217.ll b/test/CodeGen/X86/pr38217.ll
new file mode 100644
index 0000000..951d464
--- /dev/null
+++ b/test/CodeGen/X86/pr38217.ll
@@ -0,0 +1,76 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+@_ZL11DIGIT_TABLE = constant [201 x i8] c"00010203040506070809101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899\00", align 16
+
+define void @_Z12d2s_bufferedmPc(i64, i8* nocapture) {
+; CHECK-LABEL: _Z12d2s_bufferedmPc:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cmpq $10000, %rdi # imm = 0x2710
+; CHECK-NEXT:    jb .LBB0_3
+; CHECK-NEXT:  # %bb.1: # %.preheader
+; CHECK-NEXT:    movq %rdi, %r9
+; CHECK-NEXT:    xorl %r10d, %r10d
+; CHECK-NEXT:    movabsq $3777893186295716171, %r8 # imm = 0x346DC5D63886594B
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB0_2: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movq %r9, %rax
+; CHECK-NEXT:    mulq %r8
+; CHECK-NEXT:    shrq $11, %rdx
+; CHECK-NEXT:    imulq $10000, %rdx, %rax # imm = 0x2710
+; CHECK-NEXT:    movq %r9, %rdi
+; CHECK-NEXT:    subq %rax, %rdi
+; CHECK-NEXT:    imulq $1374389535, %rdi, %rax # imm = 0x51EB851F
+; CHECK-NEXT:    shrq $37, %rax
+; CHECK-NEXT:    imull $100, %eax, %ecx
+; CHECK-NEXT:    subl %ecx, %edi
+; CHECK-NEXT:    movl %r10d, %r11d
+; CHECK-NEXT:    movq %rsi, %rcx
+; CHECK-NEXT:    subq %r11, %rcx
+; CHECK-NEXT:    movzwl _ZL11DIGIT_TABLE(%rdi,%rdi), %edi
+; CHECK-NEXT:    movw %di, -1(%rcx)
+; CHECK-NEXT:    movzwl _ZL11DIGIT_TABLE(%rax,%rax), %eax
+; CHECK-NEXT:    movw %ax, -3(%rcx)
+; CHECK-NEXT:    addl $4, %r10d
+; CHECK-NEXT:    cmpq $99999999, %r9 # imm = 0x5F5E0FF
+; CHECK-NEXT:    movq %rdx, %r9
+; CHECK-NEXT:    ja .LBB0_2
+; CHECK-NEXT:  .LBB0_3:
+; CHECK-NEXT:    retq
+  %3 = icmp ugt i64 %0, 9999
+  br i1 %3, label %4, label %31
+
+; <label>:4:                                      ; preds = %2, %4
+  %5 = phi i64 [ %9, %4 ], [ %0, %2 ]
+  %6 = phi i32 [ %29, %4 ], [ 0, %2 ]
+  %7 = urem i64 %5, 10000
+  %8 = trunc i64 %7 to i32
+  %9 = udiv i64 %5, 10000
+  %10 = urem i32 %8, 100
+  %11 = shl nuw nsw i32 %10, 1
+  %12 = udiv i32 %8, 100
+  %13 = shl nuw nsw i32 %12, 1
+  %14 = zext i32 %6 to i64
+  %15 = sub nsw i64 0, %14
+  %16 = getelementptr inbounds i8, i8* %1, i64 %15
+  %17 = getelementptr inbounds i8, i8* %16, i64 -1
+  %18 = zext i32 %11 to i64
+  %19 = getelementptr inbounds [201 x i8], [201 x i8]* @_ZL11DIGIT_TABLE, i64 0, i64 %18
+  %20 = bitcast i8* %19 to i16*
+  %21 = bitcast i8* %17 to i16*
+  %22 = load i16, i16* %20, align 2
+  store i16 %22, i16* %21, align 1
+  %23 = getelementptr inbounds i8, i8* %16, i64 -3
+  %24 = zext i32 %13 to i64
+  %25 = getelementptr inbounds [201 x i8], [201 x i8]* @_ZL11DIGIT_TABLE, i64 0, i64 %24
+  %26 = bitcast i8* %25 to i16*
+  %27 = bitcast i8* %23 to i16*
+  %28 = load i16, i16* %26, align 2
+  store i16 %28, i16* %27, align 1
+  %29 = add i32 %6, 4
+  %30 = icmp ugt i64 %5, 99999999
+  br i1 %30, label %4, label %31
+
+; <label>:31:                                     ; preds = %4, %2
+  ret void
+}
diff --git a/test/CodeGen/X86/pr38743.ll b/test/CodeGen/X86/pr38743.ll
new file mode 100644
index 0000000..ac5d48e
--- /dev/null
+++ b/test/CodeGen/X86/pr38743.ll
@@ -0,0 +1,94 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+%0 = type { %1 }
+%1 = type { %2 }
+%2 = type { %3 }
+%3 = type { %4 }
+%4 = type { %5 }
+%5 = type { i64, i64, i8* }
+%6 = type { %7, [23 x i8] }
+%7 = type { i8 }
+
+@.str.16 = external dso_local unnamed_addr constant [16 x i8], align 1
+@.str.17 = external dso_local unnamed_addr constant [12 x i8], align 1
+@.str.18 = external dso_local unnamed_addr constant [15 x i8], align 1
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #0
+
+define void @pr38743() #1 align 2 {
+; CHECK-LABEL: pr38743:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    cmpl $3, %eax
+; CHECK-NEXT:    je .LBB0_4
+; CHECK-NEXT:  # %bb.1: # %bb
+; CHECK-NEXT:    cmpl $1, %eax
+; CHECK-NEXT:    je .LBB0_2
+; CHECK-NEXT:  # %bb.3: # %bb5
+; CHECK-NEXT:    movzwl .str.17+{{.*}}(%rip), %eax
+; CHECK-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movq {{.*}}(%rip), %rax
+; CHECK-NEXT:    jmp .LBB0_5
+; CHECK-NEXT:  .LBB0_4: # %bb8
+; CHECK-NEXT:    movq .str.18+{{.*}}(%rip), %rax
+; CHECK-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movq {{.*}}(%rip), %rax
+; CHECK-NEXT:    jmp .LBB0_5
+; CHECK-NEXT:  .LBB0_2: # %bb2
+; CHECK-NEXT:    movq .str.16+{{.*}}(%rip), %rax
+; CHECK-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movq {{.*}}(%rip), %rax
+; CHECK-NEXT:  .LBB0_5: # %bb12
+; CHECK-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT:    movq %rax, (%rax)
+; CHECK-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
+; CHECK-NEXT:    movzwl -{{[0-9]+}}(%rsp), %edx
+; CHECK-NEXT:    movl -{{[0-9]+}}(%rsp), %esi
+; CHECK-NEXT:    movb -{{[0-9]+}}(%rsp), %dil
+; CHECK-NEXT:    movb %al, (%rax)
+; CHECK-NEXT:    movq %rcx, 1(%rax)
+; CHECK-NEXT:    movw %dx, 9(%rax)
+; CHECK-NEXT:    movl %esi, 11(%rax)
+; CHECK-NEXT:    movb %dil, 15(%rax)
+; CHECK-NEXT:    retq
+bb:
+  %tmp = alloca %0, align 16
+  %tmp1 = bitcast %0* %tmp to i8*
+  switch i32 undef, label %bb11 [
+    i32 1, label %bb2
+    i32 4, label %bb5
+    i32 2, label %bb5
+    i32 3, label %bb8
+  ]
+
+bb2:                                              ; preds = %bb
+  %tmp3 = bitcast %0* %tmp to %6*
+  %tmp4 = getelementptr inbounds %6, %6* %tmp3, i64 0, i32 1, i64 0
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 1 %tmp4, i8* align 1 getelementptr inbounds ([16 x i8], [16 x i8]* @.str.16, i64 0, i64 0), i64 15, i1 false)
+  br label %bb12
+
+bb5:                                              ; preds = %bb, %bb
+  %tmp6 = bitcast %0* %tmp to %6*
+  %tmp7 = getelementptr inbounds %6, %6* %tmp6, i64 0, i32 1, i64 0
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 1 %tmp7, i8* align 1 getelementptr inbounds ([12 x i8], [12 x i8]* @.str.17, i64 0, i64 0), i64 10, i1 false)
+  br label %bb12
+
+bb8:                                              ; preds = %bb
+  %tmp9 = bitcast %0* %tmp to %6*
+  %tmp10 = getelementptr inbounds %6, %6* %tmp9, i64 0, i32 1, i64 0
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 1 %tmp10, i8* align 1 getelementptr inbounds ([15 x i8], [15 x i8]* @.str.18, i64 0, i64 0), i64 14, i1 false)
+  br label %bb12
+
+bb11:                                             ; preds = %bb
+  unreachable
+
+bb12:                                             ; preds = %bb8, %bb5, %bb2
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 8 undef, i8* nonnull align 16 %tmp1, i64 24, i1 false) #2
+  ret void
+}
+
+attributes #0 = { argmemonly nounwind }
+attributes #1 = { "target-features"="+sse,+sse2,+sse3,+sse4.2" }
+attributes #2 = { nounwind }
diff --git a/test/CodeGen/X86/pr39187-g.ll b/test/CodeGen/X86/pr39187-g.ll
index 46da641..2c9dcc5 100644
--- a/test/CodeGen/X86/pr39187-g.ll
+++ b/test/CodeGen/X86/pr39187-g.ll
@@ -7,22 +7,8 @@
 ; location keep references to its basic block, causing the debug information
 ; to become ambiguous. It causes the debugger to display unreached lines.
 
-; Change the debug location associated with the hoisted instruction, to
-; the debug location from the insertion point in the 'if' block.
-
-; The insertion point is the previous non-debug instruction before the
-; terminator in the parent basic block of the hoisted instruction.
-
-; IR with '-g':
-;
-;  [...]
-;  %frombool = zext i1 %cmp to i8, !dbg !26
-;  call void @llvm.dbg.value(metadata i8 %frombool, metadata !15, metadata !DIExpression()), !dbg !26
-;  call void @llvm.dbg.value(metadata i32 0, metadata !17, metadata !DIExpression()), !dbg !27
-;  br i1 %cmp, label %if.then, label %if.else
-;  [...]
-;
-; Insertion point is: %frombool = zext i1 %cmp to i8, !dbg !26
+; Check that hoisted instructions get unknown-location line numbers -- there
+; is no correct line number for code that has been common'd in this way.
 
 ; IR generated with:
 ; clang -S -g -gno-column-info -O2 -emit-llvm pr39187.cpp -o pr39187-g.ll -mllvm -opt-bisect-limit=10
@@ -54,7 +40,8 @@
 ; CHECK:  %frombool = zext i1 %cmp to i8, !dbg !16
 ; CHECK:  call void @llvm.dbg.value(metadata i8 %frombool, metadata !13, metadata !DIExpression()), !dbg !16
 ; CHECK:  call void @llvm.dbg.value(metadata i32 0, metadata !15, metadata !DIExpression()), !dbg !17
-; CHECK:  %. = select i1 %cmp, i32 8, i32 4, !dbg !16
+; CHECK:  %. = select i1 %cmp, i32 8, i32 4, !dbg ![[MERGEDLOC:[0-9]+]]
+; CHECK:  ![[MERGEDLOC]] = !DILocation(line: 0, scope: !7)
 
 ; ModuleID = 'pr39187.cpp'
 source_filename = "pr39187.cpp"
@@ -77,11 +64,11 @@
 
 if.then:                                          ; preds = %entry
   call void @llvm.dbg.value(metadata i32 8, metadata !14, metadata !DIExpression()), !dbg !25
-  br label %if.end
+  br label %if.end, !dbg !25
 
 if.else:                                          ; preds = %entry
-  call void @llvm.dbg.value(metadata i32 4, metadata !14, metadata !DIExpression()), !dbg !25
-  br label %if.end
+  call void @llvm.dbg.value(metadata i32 4, metadata !14, metadata !DIExpression()), !dbg !27
+  br label %if.end, !dbg !27
 
 if.end:                                           ; preds = %if.else, %if.then
   %beards.0 = phi i32 [ 8, %if.then ], [ 4, %if.else ]
diff --git a/test/CodeGen/X86/pr40090.ll b/test/CodeGen/X86/pr40090.ll
new file mode 100644
index 0000000..d1c38e4
--- /dev/null
+++ b/test/CodeGen/X86/pr40090.ll
@@ -0,0 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+define i64 @foo(i64 %x, i64 %y) {
+; CHECK-LABEL: foo:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    bsrq %rdi, %rax
+; CHECK-NEXT:    xorq $64, %rax
+; CHECK-NEXT:    bsrq %rsi, %rcx
+; CHECK-NEXT:    cmoveq %rax, %rcx
+; CHECK-NEXT:    movl $63, %eax
+; CHECK-NEXT:    subq %rcx, %rax
+; CHECK-NEXT:    retq
+  %1 = tail call i64 @llvm.ctlz.i64(i64 %x, i1 true)
+  %2 = xor i64 %1, 127
+  %3 = tail call i64 @llvm.ctlz.i64(i64 %y, i1 true)
+  %4 = xor i64 %3, 63
+  %5 = icmp eq i64 %y, 0
+  %6 = select i1 %5, i64 %2, i64 %4
+  %7 = sub nsw i64 63, %6
+  ret i64 %7
+}
+
+declare i64 @llvm.ctlz.i64(i64, i1)
diff --git a/test/CodeGen/X86/pr40289-64bit.ll b/test/CodeGen/X86/pr40289-64bit.ll
new file mode 100644
index 0000000..fd7e2d5
--- /dev/null
+++ b/test/CodeGen/X86/pr40289-64bit.ll
@@ -0,0 +1,10 @@
+; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s
+
+define cc 92 < 9 x i64 > @clobber() {
+  %1 = alloca i64
+  %2 = load volatile i64, i64* %1
+  ret < 9 x i64 > undef
+  ; CHECK-LABEL: clobber:
+  ; CHECK-NOT: popq %rsp
+  ; CHECK: addq $8, %rsp
+}
diff --git a/test/CodeGen/X86/pr40289.ll b/test/CodeGen/X86/pr40289.ll
new file mode 100644
index 0000000..abcb5fa
--- /dev/null
+++ b/test/CodeGen/X86/pr40289.ll
@@ -0,0 +1,10 @@
+; RUN: llc < %s -mtriple=i686-pc-windows-msvc | FileCheck %s
+
+define < 3 x i32 > @clobber() {
+  %1 = alloca i32
+  %2 = load volatile i32, i32* %1
+  ret < 3 x i32 > undef
+  ; CHECK-LABEL: clobber:
+  ; CHECK-NOT: popl %esp
+  ; CHECK: addl $4, %esp
+}
diff --git a/test/CodeGen/X86/pr9743.ll b/test/CodeGen/X86/pr9743.ll
index ac3d457..976b749 100644
--- a/test/CodeGen/X86/pr9743.ll
+++ b/test/CodeGen/X86/pr9743.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -disable-fp-elim -asm-verbose=0 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -frame-pointer=all -asm-verbose=0 | FileCheck %s
 
 define void @f() {
   ret void
diff --git a/test/CodeGen/X86/prefer-avx256-mask-extend.ll b/test/CodeGen/X86/prefer-avx256-mask-extend.ll
index b4f8e5b..b4d452f 100644
--- a/test/CodeGen/X86/prefer-avx256-mask-extend.ll
+++ b/test/CodeGen/X86/prefer-avx256-mask-extend.ll
@@ -48,11 +48,9 @@
 ; AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; AVX256-NEXT:    vmovdqa32 %ymm0, %ymm1 {%k2} {z}
 ; AVX256-NEXT:    vpmovdw %ymm1, %xmm1
-; AVX256-NEXT:    vpsrlw $8, %xmm1, %xmm1
 ; AVX256-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
 ; AVX256-NEXT:    vpmovdw %ymm0, %xmm0
-; AVX256-NEXT:    vpsrlw $8, %xmm0, %xmm0
-; AVX256-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
+; AVX256-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
 ; AVX256-NEXT:    vzeroupper
 ; AVX256-NEXT:    retq
 ;
diff --git a/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll b/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll
index 92f6e27..7f4480c 100644
--- a/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll
+++ b/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll
@@ -34,11 +34,9 @@
 ; AVX256VL-NEXT:    kshiftrw $8, %k0, %k2
 ; AVX256VL-NEXT:    vmovdqa32 %ymm0, %ymm1 {%k2} {z}
 ; AVX256VL-NEXT:    vpmovdw %ymm1, %xmm1
-; AVX256VL-NEXT:    vpsrlw $8, %xmm1, %xmm1
 ; AVX256VL-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
 ; AVX256VL-NEXT:    vpmovdw %ymm0, %xmm0
-; AVX256VL-NEXT:    vpsrlw $8, %xmm0, %xmm0
-; AVX256VL-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
+; AVX256VL-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
 ; AVX256VL-NEXT:    vzeroupper
 ; AVX256VL-NEXT:    retq
 ;
@@ -169,11 +167,9 @@
 ; AVX256VL-NEXT:    kshiftrw $8, %k0, %k2
 ; AVX256VL-NEXT:    vmovdqa32 %ymm0, %ymm1 {%k2} {z}
 ; AVX256VL-NEXT:    vpmovdw %ymm1, %xmm1
-; AVX256VL-NEXT:    vpsrlw $8, %xmm1, %xmm1
 ; AVX256VL-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
 ; AVX256VL-NEXT:    vpmovdw %ymm0, %xmm0
-; AVX256VL-NEXT:    vpsrlw $8, %xmm0, %xmm0
-; AVX256VL-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
+; AVX256VL-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
 ; AVX256VL-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX256VL-NEXT:    retq
 ;
diff --git a/test/CodeGen/X86/promote-vec3.ll b/test/CodeGen/X86/promote-vec3.ll
index 29832a2..db337b2 100644
--- a/test/CodeGen/X86/promote-vec3.ll
+++ b/test/CodeGen/X86/promote-vec3.ll
@@ -78,11 +78,9 @@
 ; SSE3-NEXT:    pinsrw $2, %eax, %xmm0
 ; SSE3-NEXT:    psllw $8, %xmm0
 ; SSE3-NEXT:    psraw $8, %xmm0
-; SSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE3-NEXT:    psrad $16, %xmm0
-; SSE3-NEXT:    movd %xmm0, %eax
-; SSE3-NEXT:    pextrw $2, %xmm0, %edx
-; SSE3-NEXT:    pextrw $4, %xmm0, %ecx
+; SSE3-NEXT:    pextrw $0, %xmm0, %eax
+; SSE3-NEXT:    pextrw $1, %xmm0, %edx
+; SSE3-NEXT:    pextrw $2, %xmm0, %ecx
 ; SSE3-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE3-NEXT:    # kill: def $dx killed $dx killed $edx
 ; SSE3-NEXT:    # kill: def $cx killed $cx killed $ecx
diff --git a/test/CodeGen/X86/psubus.ll b/test/CodeGen/X86/psubus.ll
index 53d4ccc..2fc7029 100644
--- a/test/CodeGen/X86/psubus.ll
+++ b/test/CodeGen/X86/psubus.ll
@@ -936,19 +936,16 @@
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-NEXT:    vpminud %xmm3, %xmm2, %xmm4
 ; AVX1-NEXT:    vpcmpeqd %xmm4, %xmm2, %xmm4
-; AVX1-NEXT:    vpcmpeqd %xmm5, %xmm5, %xmm5
-; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
-; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm6
-; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm0, %xmm6
-; AVX1-NEXT:    vpxor %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpackssdw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vpcmpeqd %xmm5, %xmm0, %xmm5
 ; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpandn %xmm0, %xmm5, %xmm0
 ; AVX1-NEXT:    vpsubd %xmm3, %xmm2, %xmm1
+; AVX1-NEXT:    vpandn %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX1-NEXT:    vpand %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -957,14 +954,11 @@
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm2
 ; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm0, %ymm2
-; AVX2-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
-; AVX2-NEXT:    vpxor %ymm3, %ymm2, %ymm2
-; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT:    vpackssdw %xmm3, %xmm2, %xmm2
 ; AVX2-NEXT:    vpsubd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpandn %ymm0, %ymm2, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT:    vpand %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -1067,22 +1061,19 @@
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpmaxud %xmm2, %xmm3, %xmm4
-; AVX1-NEXT:    vpcmpeqd %xmm4, %xmm3, %xmm4
-; AVX1-NEXT:    vpcmpeqd %xmm5, %xmm5, %xmm5
-; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
-; AVX1-NEXT:    vpmaxud %xmm0, %xmm1, %xmm6
-; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm1, %xmm6
-; AVX1-NEXT:    vpxor %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpackssdw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpmaxud %xmm0, %xmm1, %xmm3
+; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm1, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpmaxud %xmm2, %xmm4, %xmm5
+; AVX1-NEXT:    vpcmpeqd %xmm5, %xmm4, %xmm5
 ; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsubd %xmm3, %xmm2, %xmm1
+; AVX1-NEXT:    vpandn %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpsubd %xmm4, %xmm2, %xmm1
+; AVX1-NEXT:    vpandn %xmm1, %xmm5, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX1-NEXT:    vpand %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -1091,14 +1082,11 @@
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vpmaxud %ymm0, %ymm1, %ymm2
 ; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm1, %ymm2
-; AVX2-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
-; AVX2-NEXT:    vpxor %ymm3, %ymm2, %ymm2
-; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT:    vpackssdw %xmm3, %xmm2, %xmm2
 ; AVX2-NEXT:    vpsubd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpandn %ymm0, %ymm2, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT:    vpand %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -1688,49 +1676,45 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm9 = [9223372039002324991,9223372039002324991]
 ; SSE41-NEXT:    movdqa %xmm9, %xmm7
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm7[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
 ; SSE41-NEXT:    movapd {{.*#+}} xmm7 = [65535,65535]
-; SSE41-NEXT:    movapd %xmm7, %xmm11
-; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm11
+; SSE41-NEXT:    movapd %xmm7, %xmm5
+; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm5
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
 ; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm10, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm4
-; SSE41-NEXT:    packusdw %xmm11, %xmm4
+; SSE41-NEXT:    packusdw %xmm5, %xmm4
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
 ; SSE41-NEXT:    pxor %xmm1, %xmm6
-; SSE41-NEXT:    movdqa %xmm9, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm6, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm9, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm7
 ; SSE41-NEXT:    packusdw %xmm3, %xmm7
 ; SSE41-NEXT:    packusdw %xmm4, %xmm7
@@ -2427,3 +2411,317 @@
   ret void
 }
 
+define <16 x i8> @test19(<16 x i8> %x) {
+; SSE-LABEL: test19:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    psubusb {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test19:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    vpsubusb {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+entry:
+  %0 = icmp ugt <16 x i8> %x, <i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70>
+  %1 = select <16 x i1> %0, <16 x i8> %x, <16 x i8> <i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70>
+  %2 = add <16 x i8> %1, <i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70>
+  ret <16 x i8> %2
+}
+
+define <16 x i8> @test20(<16 x i8> %x) {
+; SSE-LABEL: test20:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    psubusb {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test20:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    vpsubusb {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+entry:
+  %0 = icmp ugt <16 x i8> %x, <i8 1, i8 -22, i8 -50, i8 -114, i8 -77, i8 -70, i8 123, i8 98, i8 63, i8 19, i8 -22, i8 100, i8 25, i8 34, i8 55, i8 70>
+  %1 = select <16 x i1> %0, <16 x i8> %x, <16 x i8> <i8 1, i8 -22, i8 -50, i8 -114, i8 -77, i8 -70, i8 123, i8 98, i8 63, i8 19, i8 -22, i8 100, i8 25, i8 34, i8 55, i8 70>
+  %2 = add <16 x i8> %1, <i8 -1, i8 22, i8 50, i8 114, i8 77, i8 70, i8 -123, i8 -98, i8 -63, i8 -19, i8 22, i8 -100, i8 -25, i8 -34, i8 -55, i8 -70>
+  ret <16 x i8> %2
+}
+
+define <8 x i16> @test21(<8 x i16> %x) {
+; SSE-LABEL: test21:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    psubusw {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test21:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    vpsubusw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+entry:
+  %0 = icmp ugt <8 x i16> %x, <i16 700, i16 700, i16 700, i16 700, i16 700, i16 700, i16 700, i16 700>
+  %1 = select <8 x i1> %0, <8 x i16> %x, <8 x i16> <i16 700, i16 700, i16 700, i16 700, i16 700, i16 700, i16 700, i16 700>
+  %2 = add <8 x i16> %1, <i16 -700, i16 -700, i16 -700, i16 -700, i16 -700, i16 -700, i16 -700, i16 -700>
+  ret <8 x i16> %2
+}
+
+define <8 x i16> @test22(<8 x i16> %x) {
+; SSE-LABEL: test22:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    psubusw {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test22:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    vpsubusw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+entry:
+  %0 = icmp ugt <8 x i16> %x, <i16 1, i16 -22000, i16 -770, i16 98, i16 19, i16 1000, i16 3456, i16 70>
+  %1 = select <8 x i1> %0, <8 x i16> %x, <8 x i16> <i16 1, i16 -22000, i16 -770, i16 98, i16 19, i16 1000, i16 3456, i16 70>
+  %2 = add <8 x i16> %1, <i16 -1, i16 22000, i16 770, i16 -98, i16 -19, i16 -1000, i16 -3456, i16 -70>
+  ret <8 x i16> %2
+}
+
+define <32 x i8> @test23(<32 x i8> %x) {
+; SSE-LABEL: test23:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70]
+; SSE-NEXT:    psubusb %xmm2, %xmm0
+; SSE-NEXT:    psubusb %xmm2, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test23:
+; AVX1:       # %bb.0: # %entry
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70]
+; AVX1-NEXT:    vpsubusb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubusb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test23:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    vpsubusb {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test23:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    vpsubusb {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    retq
+entry:
+  %0 = icmp ugt <32 x i8> %x, <i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70>
+  %1 = select <32 x i1> %0, <32 x i8> %x, <32 x i8> <i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70, i8 70>
+  %2 = add <32 x i8> %1, <i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70, i8 -70>
+  ret <32 x i8> %2
+}
+
+define <32 x i8> @test24(<32 x i8> %x) {
+; SSE-LABEL: test24:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    psubusb {{.*}}(%rip), %xmm0
+; SSE-NEXT:    psubusb {{.*}}(%rip), %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test24:
+; AVX1:       # %bb.0: # %entry
+; AVX1-NEXT:    vpsubusb {{.*}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpsubusb {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test24:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    vpsubusb {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test24:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    vpsubusb {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    retq
+entry:
+  %0 = icmp ugt <32 x i8> %x, <i8 1, i8 -22, i8 -50, i8 -114, i8 -77, i8 -70, i8 123, i8 98, i8 63, i8 19, i8 -22, i8 100, i8 25, i8 34, i8 55, i8 70, i8 2, i8 -23, i8 -49, i8 -114, i8 -77, i8 -70, i8 123, i8 98, i8 63, i8 19, i8 -22, i8 110, i8 25, i8 34, i8 55, i8 70>
+  %1 = select <32 x i1> %0, <32 x i8> %x, <32 x i8> <i8 1, i8 -22, i8 -50, i8 -114, i8 -77, i8 -70, i8 123, i8 98, i8 63, i8 19, i8 -22, i8 100, i8 25, i8 34, i8 55, i8 70, i8 2, i8 -23, i8 -49, i8 -114, i8 -77, i8 -70, i8 123, i8 98, i8 63, i8 19, i8 -22, i8 110, i8 25, i8 34, i8 55, i8 70>
+  %2 = add <32 x i8> %1, <i8 -1, i8 22, i8 50, i8 114, i8 77, i8 70, i8 -123, i8 -98, i8 -63, i8 -19, i8 22, i8 -100, i8 -25, i8 -34, i8 -55, i8 -70, i8 -2, i8 23, i8 49, i8 114, i8 77, i8 70, i8 -123, i8 -98, i8 -63, i8 -19, i8 22, i8 -110, i8 -25, i8 -34, i8 -55, i8 -70>
+  ret <32 x i8> %2
+}
+
+define <16 x i16> @test25(<16 x i16> %x) {
+; SSE-LABEL: test25:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [5000,5000,5000,5000,5000,5000,5000,5000]
+; SSE-NEXT:    psubusw %xmm2, %xmm0
+; SSE-NEXT:    psubusw %xmm2, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test25:
+; AVX1:       # %bb.0: # %entry
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [5000,5000,5000,5000,5000,5000,5000,5000]
+; AVX1-NEXT:    vpsubusw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubusw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test25:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    vpsubusw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test25:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    vpsubusw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    retq
+entry:
+  %0 = icmp ugt <16 x i16> %x, <i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000>
+  %1 = select <16 x i1> %0, <16 x i16> %x, <16 x i16> <i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000, i16 5000>
+  %2 = add <16 x i16> %1, <i16 -5000, i16 -5000, i16 -5000, i16 -5000, i16 -5000, i16 -5000, i16 -5000, i16 -5000, i16 -5000, i16 -5000, i16 -5000, i16 -5000, i16 -5000, i16 -5000, i16 -5000, i16 -5000>
+  ret <16 x i16> %2
+}
+
+define <16 x i16> @test26(<16 x i16> %x) {
+; SSE-LABEL: test26:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    psubusw {{.*}}(%rip), %xmm0
+; SSE-NEXT:    psubusw {{.*}}(%rip), %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test26:
+; AVX1:       # %bb.0: # %entry
+; AVX1-NEXT:    vpsubusw {{.*}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpsubusw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test26:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    vpsubusw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test26:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    vpsubusw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    retq
+entry:
+  %0 = icmp ugt <16 x i16> %x, <i16 1, i16 -2200, i16 -50, i16 -114, i16 -77, i16 -70, i16 123, i16 9800, i16 635, i16 19567, i16 -22, i16 100, i16 2534, i16 34, i16 55, i16 70>
+  %1 = select <16 x i1> %0, <16 x i16> %x, <16 x i16> <i16 1, i16 -2200, i16 -50, i16 -114, i16 -77, i16 -70, i16 123, i16 9800, i16 635, i16 19567, i16 -22, i16 100, i16 2534, i16 34, i16 55, i16 70>
+  %2 = add <16 x i16> %1, <i16 -1, i16 2200, i16 50, i16 114, i16 77, i16 70, i16 -123, i16 -9800, i16 -635, i16 -19567, i16 22, i16 -100, i16 -2534, i16 -34, i16 -55, i16 -70>
+  ret <16 x i16> %2
+}
+
+define <64 x i8> @test27(<64 x i8> %x) {
+; SSE-LABEL: test27:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154]
+; SSE-NEXT:    psubusb %xmm4, %xmm0
+; SSE-NEXT:    psubusb %xmm4, %xmm1
+; SSE-NEXT:    psubusb %xmm4, %xmm2
+; SSE-NEXT:    psubusb %xmm4, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test27:
+; AVX1:       # %bb.0: # %entry
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154]
+; AVX1-NEXT:    vpsubusb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubusb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpsubusb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubusb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test27:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154]
+; AVX2-NEXT:    vpsubusb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubusb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test27:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    vpsubusb {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT:    retq
+entry:
+  %0 = icmp ugt <64 x i8> %x, <i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154>
+  %1 = select <64 x i1> %0, <64 x i8> %x, <64 x i8> <i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154, i8 154>
+  %2 = add <64 x i8> %1, <i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154, i8 -154>
+  ret <64 x i8> %2
+}
+
+define <64 x i8> @test28(<64 x i8> %x) {
+; SSE-LABEL: test28:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [1,234,206,142,179,186,123,98,63,19,234,100,25,34,55,70]
+; SSE-NEXT:    psubusb %xmm4, %xmm0
+; SSE-NEXT:    psubusb %xmm4, %xmm2
+; SSE-NEXT:    psubusb {{.*}}(%rip), %xmm1
+; SSE-NEXT:    psubusb {{.*}}(%rip), %xmm3
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test28:
+; AVX1:       # %bb.0: # %entry
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,234,206,142,179,186,123,98,63,19,234,100,25,34,55,70]
+; AVX1-NEXT:    vpsubusb %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpsubusb {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT:    vpsubusb %xmm2, %xmm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vpsubusb {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test28:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    vpsubusb {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpsubusb {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test28:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    vpsubusb {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT:    retq
+entry:
+  %0 = icmp ugt <64 x i8> %x, <i8 1, i8 -22, i8 -50, i8 -114, i8 -77, i8 -70, i8 123, i8 98, i8 63, i8 19, i8 -22, i8 100, i8 25, i8 34, i8 55, i8 70, i8 2, i8 -23, i8 -49, i8 -114, i8 -77, i8 -70, i8 123, i8 98, i8 63, i8 19, i8 -22, i8 110, i8 25, i8 34, i8 55, i8 70, i8 1, i8 -22, i8 -50, i8 -114, i8 -77, i8 -70, i8 123, i8 98, i8 63, i8 19, i8 -22, i8 100, i8 25, i8 34, i8 55, i8 70, i8 2, i8 -23, i8 -49, i8 -116, i8 -77, i8 -70, i8 123, i8 98, i8 67, i8 19, i8 -22, i8 110, i8 25, i8 34, i8 55, i8 70>
+  %1 = select <64 x i1> %0, <64 x i8> %x, <64 x i8> <i8 1, i8 -22, i8 -50, i8 -114, i8 -77, i8 -70, i8 123, i8 98, i8 63, i8 19, i8 -22, i8 100, i8 25, i8 34, i8 55, i8 70, i8 2, i8 -23, i8 -49, i8 -114, i8 -77, i8 -70, i8 123, i8 98, i8 63, i8 19, i8 -22, i8 110, i8 25, i8 34, i8 55, i8 70, i8 1, i8 -22, i8 -50, i8 -114, i8 -77, i8 -70, i8 123, i8 98, i8 63, i8 19, i8 -22, i8 100, i8 25, i8 34, i8 55, i8 70, i8 2, i8 -23, i8 -49, i8 -116, i8 -77, i8 -70, i8 123, i8 98, i8 67, i8 19, i8 -22, i8 110, i8 25, i8 34, i8 55, i8 70>
+  %2 = add <64 x i8> %1, <i8 -1, i8 22, i8 50, i8 114, i8 77, i8 70, i8 -123, i8 -98, i8 -63, i8 -19, i8 22, i8 -100, i8 -25, i8 -34, i8 -55, i8 -70, i8 -2, i8 23, i8 49, i8 114, i8 77, i8 70, i8 -123, i8 -98, i8 -63, i8 -19, i8 22, i8 -110, i8 -25, i8 -34, i8 -55, i8 -70, i8 -1, i8 22, i8 50, i8 114, i8 77, i8 70, i8 -123, i8 -98, i8 -63, i8 -19, i8 22, i8 -100, i8 -25, i8 -34, i8 -55, i8 -70, i8 -2, i8 23, i8 49, i8 116, i8 77, i8 70, i8 -123, i8 -98, i8 -67, i8 -19, i8 22, i8 -110, i8 -25, i8 -34, i8 -55, i8 -70>
+  ret <64 x i8> %2
+}
+
+define <32 x i16> @test29(<32 x i16> %x) {
+; SSE-LABEL: test29:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    psubusw {{.*}}(%rip), %xmm0
+; SSE-NEXT:    psubusw {{.*}}(%rip), %xmm1
+; SSE-NEXT:    psubusw {{.*}}(%rip), %xmm2
+; SSE-NEXT:    psubusw {{.*}}(%rip), %xmm3
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test29:
+; AVX1:       # %bb.0: # %entry
+; AVX1-NEXT:    vpsubusw {{.*}}(%rip), %xmm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpsubusw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT:    vpsubusw {{.*}}(%rip), %xmm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vpsubusw {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test29:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    vpsubusw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpsubusw {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test29:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    vpsubusw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT:    retq
+entry:
+  %0 = icmp ugt <32 x i16> %x, <i16 1, i16 -2200, i16 -50, i16 -114, i16 -77, i16 -70, i16 123, i16 9800, i16 635, i16 19567, i16 -22, i16 100, i16 2534, i16 34, i16 55, i16 70, i16 1, i16 -2200, i16 -50, i16 -114, i16 -77, i16 -70, i16 123, i16 9805, i16 635, i16 19567, i16 -22, i16 100, i16 2534, i16 346, i16 55, i16 70>
+  %1 = select <32 x i1> %0, <32 x i16> %x, <32 x i16> <i16 1, i16 -2200, i16 -50, i16 -114, i16 -77, i16 -70, i16 123, i16 9800, i16 635, i16 19567, i16 -22, i16 100, i16 2534, i16 34, i16 55, i16 70, i16 1, i16 -2200, i16 -50, i16 -114, i16 -77, i16 -70, i16 123, i16 9805, i16 635, i16 19567, i16 -22, i16 100, i16 2534, i16 346, i16 55, i16 70>
+  %2 = add <32 x i16> %1, <i16 -1, i16 2200, i16 50, i16 114, i16 77, i16 70, i16 -123, i16 -9800, i16 -635, i16 -19567, i16 22, i16 -100, i16 -2534, i16 -34, i16 -55, i16 -70, i16 -1, i16 2200, i16 50, i16 114, i16 77, i16 70, i16 -123, i16 -9805, i16 -635, i16 -19567, i16 22, i16 -100, i16 -2534, i16 -346, i16 -55, i16 -70>
+  ret <32 x i16> %2
+}
diff --git a/test/CodeGen/X86/remat-fold-load.ll b/test/CodeGen/X86/remat-fold-load.ll
index e640974..bd28951 100644
--- a/test/CodeGen/X86/remat-fold-load.ll
+++ b/test/CodeGen/X86/remat-fold-load.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -disable-fp-elim -verify-coalescing
+; RUN: llc < %s -frame-pointer=all -verify-coalescing
 ; PR13414
 ;
 ; During coalescing, remat triggers DCE which deletes the penultimate use of a
diff --git a/test/CodeGen/X86/ret-addr.ll b/test/CodeGen/X86/ret-addr.ll
index cf164cc..d90f639 100644
--- a/test/CodeGen/X86/ret-addr.ll
+++ b/test/CodeGen/X86/ret-addr.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -disable-fp-elim -mtriple=i686-- | not grep xor
-; RUN: llc < %s -disable-fp-elim -mtriple=x86_64-- | not grep xor
+; RUN: llc < %s -frame-pointer=all -mtriple=i686-- | not grep xor
+; RUN: llc < %s -frame-pointer=all -mtriple=x86_64-- | not grep xor
 
 define i8* @h() nounwind readnone optsize {
 entry:
diff --git a/test/CodeGen/X86/sad.ll b/test/CodeGen/X86/sad.ll
index 3c8c6d3..f643643 100644
--- a/test/CodeGen/X86/sad.ll
+++ b/test/CodeGen/X86/sad.ll
@@ -307,9 +307,7 @@
 ; AVX1-NEXT:    vpaddd %xmm0, %xmm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT:    vpaddd %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpaddd %xmm4, %xmm4, %xmm5
 ; AVX1-NEXT:    vpaddd %xmm5, %xmm4, %xmm4
 ; AVX1-NEXT:    vpaddd %xmm4, %xmm3, %xmm3
 ; AVX1-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/sadd_sat.ll b/test/CodeGen/X86/sadd_sat.ll
index 39788e8..3f91b66 100644
--- a/test/CodeGen/X86/sadd_sat.ll
+++ b/test/CodeGen/X86/sadd_sat.ll
@@ -1,267 +1,240 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux | FileCheck %s
-; RUN: llc < %s -mcpu=generic -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=CHECK32
+; RUN: llc < %s -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefixes=CHECK,X64
 
 declare  i4  @llvm.sadd.sat.i4   (i4,  i4)
 declare  i32 @llvm.sadd.sat.i32  (i32, i32)
 declare  i64 @llvm.sadd.sat.i64  (i64, i64)
 declare  <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
 
-define i32 @func(i32 %x, i32 %y) {
-; CHECK-LABEL: func:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    movl %edi, %ecx
-; CHECK-NEXT:    addl %esi, %ecx
-; CHECK-NEXT:    setns %al
-; CHECK-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
-; CHECK-NEXT:    addl %esi, %edi
-; CHECK-NEXT:    cmovnol %edi, %eax
-; CHECK-NEXT:    retq
+define i32 @func(i32 %x, i32 %y) nounwind {
+; X86-LABEL: func:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    addl %edx, %esi
+; X86-NEXT:    setns %cl
+; X86-NEXT:    addl $2147483647, %ecx # imm = 0x7FFFFFFF
+; X86-NEXT:    addl %edx, %eax
+; X86-NEXT:    cmovol %ecx, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
 ;
-; CHECK32-LABEL: func:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    .cfi_offset %esi, -8
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    xorl %ecx, %ecx
-; CHECK32-NEXT:    movl %eax, %esi
-; CHECK32-NEXT:    addl %edx, %esi
-; CHECK32-NEXT:    setns %cl
-; CHECK32-NEXT:    addl $2147483647, %ecx # imm = 0x7FFFFFFF
-; CHECK32-NEXT:    addl %edx, %eax
-; CHECK32-NEXT:    cmovol %ecx, %eax
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl
+; X64-LABEL: func:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    movl %edi, %ecx
+; X64-NEXT:    addl %esi, %ecx
+; X64-NEXT:    setns %al
+; X64-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-NEXT:    addl %esi, %edi
+; X64-NEXT:    cmovnol %edi, %eax
+; X64-NEXT:    retq
   %tmp = call i32 @llvm.sadd.sat.i32(i32 %x, i32 %y);
   ret i32 %tmp;
 }
 
-define i64 @func2(i64 %x, i64 %y) {
-; CHECK-LABEL: func2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    addq %rsi, %rax
-; CHECK-NEXT:    setns %cl
-; CHECK-NEXT:    movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
-; CHECK-NEXT:    addq %rcx, %rax
-; CHECK-NEXT:    addq %rsi, %rdi
-; CHECK-NEXT:    cmovnoq %rdi, %rax
-; CHECK-NEXT:    retq
+define i64 @func2(i64 %x, i64 %y) nounwind {
+; X86-LABEL: func2:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %ebx, %ebp
+; X86-NEXT:    adcl %esi, %ebp
+; X86-NEXT:    movl %ebp, %eax
+; X86-NEXT:    sarl $31, %eax
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    testl %ebp, %ebp
+; X86-NEXT:    setns %cl
+; X86-NEXT:    movl %ecx, %edx
+; X86-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; X86-NEXT:    testl %ebx, %ebx
+; X86-NEXT:    setns %bl
+; X86-NEXT:    cmpb %cl, %bl
+; X86-NEXT:    setne %cl
+; X86-NEXT:    testl %esi, %esi
+; X86-NEXT:    setns %ch
+; X86-NEXT:    cmpb %ch, %bl
+; X86-NEXT:    sete %ch
+; X86-NEXT:    testb %cl, %ch
+; X86-NEXT:    cmovel %ebp, %edx
+; X86-NEXT:    cmovel %edi, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; CHECK32-LABEL: func2:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %ebp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    pushl %ebx
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    pushl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 20
-; CHECK32-NEXT:    .cfi_offset %esi, -20
-; CHECK32-NEXT:    .cfi_offset %edi, -16
-; CHECK32-NEXT:    .cfi_offset %ebx, -12
-; CHECK32-NEXT:    .cfi_offset %ebp, -8
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; CHECK32-NEXT:    addl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    movl %ebx, %ebp
-; CHECK32-NEXT:    adcl %esi, %ebp
-; CHECK32-NEXT:    movl %ebp, %eax
-; CHECK32-NEXT:    sarl $31, %eax
-; CHECK32-NEXT:    xorl %ecx, %ecx
-; CHECK32-NEXT:    testl %ebp, %ebp
-; CHECK32-NEXT:    setns %cl
-; CHECK32-NEXT:    movl %ecx, %edx
-; CHECK32-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
-; CHECK32-NEXT:    testl %ebx, %ebx
-; CHECK32-NEXT:    setns %bl
-; CHECK32-NEXT:    cmpb %cl, %bl
-; CHECK32-NEXT:    setne %cl
-; CHECK32-NEXT:    testl %esi, %esi
-; CHECK32-NEXT:    setns %ch
-; CHECK32-NEXT:    cmpb %ch, %bl
-; CHECK32-NEXT:    sete %ch
-; CHECK32-NEXT:    testb %cl, %ch
-; CHECK32-NEXT:    cmovel %ebp, %edx
-; CHECK32-NEXT:    cmovel %edi, %eax
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK32-NEXT:    popl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    popl %ebx
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %ebp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl
+; X64-LABEL: func2:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    addq %rsi, %rax
+; X64-NEXT:    setns %cl
+; X64-NEXT:    movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
+; X64-NEXT:    addq %rcx, %rax
+; X64-NEXT:    addq %rsi, %rdi
+; X64-NEXT:    cmovnoq %rdi, %rax
+; X64-NEXT:    retq
   %tmp = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %y);
   ret i64 %tmp;
 }
 
-define i4 @func3(i4 %x, i4 %y) {
-; CHECK-LABEL: func3:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    shlb $4, %sil
-; CHECK-NEXT:    shlb $4, %al
-; CHECK-NEXT:    movl %eax, %ecx
-; CHECK-NEXT:    addb %sil, %cl
-; CHECK-NEXT:    setns %cl
-; CHECK-NEXT:    addb %sil, %al
-; CHECK-NEXT:    jno .LBB2_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    addb $127, %cl
-; CHECK-NEXT:    movl %ecx, %eax
-; CHECK-NEXT:  .LBB2_2:
-; CHECK-NEXT:    sarb $4, %al
-; CHECK-NEXT:    # kill: def $al killed $al killed $eax
-; CHECK-NEXT:    retq
+define i4 @func3(i4 %x, i4 %y) nounwind {
+; X86-LABEL: func3:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %dl
+; X86-NEXT:    shlb $4, %dl
+; X86-NEXT:    shlb $4, %al
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    addb %dl, %cl
+; X86-NEXT:    setns %cl
+; X86-NEXT:    addb %dl, %al
+; X86-NEXT:    jno .LBB2_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    addb $127, %cl
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB2_2:
+; X86-NEXT:    sarb $4, %al
+; X86-NEXT:    retl
 ;
-; CHECK32-LABEL: func3:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %dl
-; CHECK32-NEXT:    shlb $4, %dl
-; CHECK32-NEXT:    shlb $4, %al
-; CHECK32-NEXT:    movl %eax, %ecx
-; CHECK32-NEXT:    addb %dl, %cl
-; CHECK32-NEXT:    setns %cl
-; CHECK32-NEXT:    addb %dl, %al
-; CHECK32-NEXT:    jno .LBB2_2
-; CHECK32-NEXT:  # %bb.1:
-; CHECK32-NEXT:    addb $127, %cl
-; CHECK32-NEXT:    movl %ecx, %eax
-; CHECK32-NEXT:  .LBB2_2:
-; CHECK32-NEXT:    sarb $4, %al
-; CHECK32-NEXT:    retl
+; X64-LABEL: func3:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    shlb $4, %sil
+; X64-NEXT:    shlb $4, %al
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    addb %sil, %cl
+; X64-NEXT:    setns %cl
+; X64-NEXT:    addb %sil, %al
+; X64-NEXT:    jno .LBB2_2
+; X64-NEXT:  # %bb.1:
+; X64-NEXT:    addb $127, %cl
+; X64-NEXT:    movl %ecx, %eax
+; X64-NEXT:  .LBB2_2:
+; X64-NEXT:    sarb $4, %al
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
   %tmp = call i4 @llvm.sadd.sat.i4(i4 %x, i4 %y);
   ret i4 %tmp;
 }
 
-define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) {
-; CHECK-LABEL: vec:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
-; CHECK-NEXT:    movd %xmm2, %ecx
-; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; CHECK-NEXT:    movd %xmm2, %r8d
-; CHECK-NEXT:    xorl %edx, %edx
-; CHECK-NEXT:    movl %r8d, %esi
-; CHECK-NEXT:    addl %ecx, %esi
-; CHECK-NEXT:    setns %dl
-; CHECK-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
-; CHECK-NEXT:    addl %ecx, %r8d
-; CHECK-NEXT:    cmovol %edx, %r8d
-; CHECK-NEXT:    movd %xmm1, %edx
-; CHECK-NEXT:    movd %xmm0, %ecx
-; CHECK-NEXT:    xorl %esi, %esi
-; CHECK-NEXT:    movl %ecx, %edi
-; CHECK-NEXT:    addl %edx, %edi
-; CHECK-NEXT:    setns %sil
-; CHECK-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
-; CHECK-NEXT:    addl %edx, %ecx
-; CHECK-NEXT:    cmovol %esi, %ecx
-; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; CHECK-NEXT:    movd %xmm2, %edx
-; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; CHECK-NEXT:    movd %xmm2, %eax
-; CHECK-NEXT:    xorl %edi, %edi
-; CHECK-NEXT:    movl %eax, %esi
-; CHECK-NEXT:    addl %edx, %esi
-; CHECK-NEXT:    setns %dil
-; CHECK-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
-; CHECK-NEXT:    addl %edx, %eax
-; CHECK-NEXT:    cmovol %edi, %eax
-; CHECK-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; CHECK-NEXT:    movd %xmm1, %r9d
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; CHECK-NEXT:    movd %xmm0, %edx
-; CHECK-NEXT:    xorl %edi, %edi
-; CHECK-NEXT:    movl %edx, %esi
-; CHECK-NEXT:    addl %r9d, %esi
-; CHECK-NEXT:    setns %dil
-; CHECK-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
-; CHECK-NEXT:    addl %r9d, %edx
-; CHECK-NEXT:    cmovol %edi, %edx
-; CHECK-NEXT:    movd %edx, %xmm0
-; CHECK-NEXT:    movd %eax, %xmm1
-; CHECK-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; CHECK-NEXT:    movd %ecx, %xmm0
-; CHECK-NEXT:    movd %r8d, %xmm2
-; CHECK-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; CHECK-NEXT:    retq
+define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
+; X86-LABEL: vec:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    addl %edx, %esi
+; X86-NEXT:    setns %al
+; X86-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT:    addl %edx, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    cmovol %eax, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    movl %edx, %edi
+; X86-NEXT:    addl %esi, %edi
+; X86-NEXT:    setns %al
+; X86-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT:    addl %esi, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    cmovol %eax, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    movl %esi, %ebx
+; X86-NEXT:    addl %edi, %ebx
+; X86-NEXT:    setns %al
+; X86-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT:    addl %edi, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmovol %eax, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    movl %edi, %ebp
+; X86-NEXT:    addl %eax, %ebp
+; X86-NEXT:    setns %bl
+; X86-NEXT:    addl $2147483647, %ebx # imm = 0x7FFFFFFF
+; X86-NEXT:    addl %eax, %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmovol %ebx, %edi
+; X86-NEXT:    movl %ecx, 12(%eax)
+; X86-NEXT:    movl %edx, 8(%eax)
+; X86-NEXT:    movl %esi, 4(%eax)
+; X86-NEXT:    movl %edi, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl $4
 ;
-; CHECK32-LABEL: vec:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %ebp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    pushl %ebx
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    pushl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 20
-; CHECK32-NEXT:    .cfi_offset %esi, -20
-; CHECK32-NEXT:    .cfi_offset %edi, -16
-; CHECK32-NEXT:    .cfi_offset %ebx, -12
-; CHECK32-NEXT:    .cfi_offset %ebp, -8
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    xorl %eax, %eax
-; CHECK32-NEXT:    movl %ecx, %esi
-; CHECK32-NEXT:    addl %edx, %esi
-; CHECK32-NEXT:    setns %al
-; CHECK32-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
-; CHECK32-NEXT:    addl %edx, %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    cmovol %eax, %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    xorl %eax, %eax
-; CHECK32-NEXT:    movl %edx, %edi
-; CHECK32-NEXT:    addl %esi, %edi
-; CHECK32-NEXT:    setns %al
-; CHECK32-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
-; CHECK32-NEXT:    addl %esi, %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    cmovol %eax, %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    xorl %eax, %eax
-; CHECK32-NEXT:    movl %esi, %ebx
-; CHECK32-NEXT:    addl %edi, %ebx
-; CHECK32-NEXT:    setns %al
-; CHECK32-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
-; CHECK32-NEXT:    addl %edi, %esi
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    cmovol %eax, %esi
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    xorl %ebx, %ebx
-; CHECK32-NEXT:    movl %edi, %ebp
-; CHECK32-NEXT:    addl %eax, %ebp
-; CHECK32-NEXT:    setns %bl
-; CHECK32-NEXT:    addl $2147483647, %ebx # imm = 0x7FFFFFFF
-; CHECK32-NEXT:    addl %eax, %edi
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    cmovol %ebx, %edi
-; CHECK32-NEXT:    movl %ecx, 12(%eax)
-; CHECK32-NEXT:    movl %edx, 8(%eax)
-; CHECK32-NEXT:    movl %esi, 4(%eax)
-; CHECK32-NEXT:    movl %edi, (%eax)
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK32-NEXT:    popl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    popl %ebx
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %ebp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl $4
+; X64-LABEL: vec:
+; X64:       # %bb.0:
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
+; X64-NEXT:    movd %xmm2, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; X64-NEXT:    movd %xmm2, %r8d
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    movl %r8d, %esi
+; X64-NEXT:    addl %ecx, %esi
+; X64-NEXT:    setns %dl
+; X64-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; X64-NEXT:    addl %ecx, %r8d
+; X64-NEXT:    cmovol %edx, %r8d
+; X64-NEXT:    movd %xmm1, %edx
+; X64-NEXT:    movd %xmm0, %ecx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    movl %ecx, %edi
+; X64-NEXT:    addl %edx, %edi
+; X64-NEXT:    setns %sil
+; X64-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; X64-NEXT:    addl %edx, %ecx
+; X64-NEXT:    cmovol %esi, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; X64-NEXT:    movd %xmm2, %edx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; X64-NEXT:    movd %xmm2, %eax
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    movl %eax, %esi
+; X64-NEXT:    addl %edx, %esi
+; X64-NEXT:    setns %dil
+; X64-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; X64-NEXT:    addl %edx, %eax
+; X64-NEXT:    cmovol %edi, %eax
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; X64-NEXT:    movd %xmm1, %r9d
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; X64-NEXT:    movd %xmm0, %edx
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    movl %edx, %esi
+; X64-NEXT:    addl %r9d, %esi
+; X64-NEXT:    setns %dil
+; X64-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; X64-NEXT:    addl %r9d, %edx
+; X64-NEXT:    cmovol %edi, %edx
+; X64-NEXT:    movd %edx, %xmm0
+; X64-NEXT:    movd %eax, %xmm1
+; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X64-NEXT:    movd %ecx, %xmm0
+; X64-NEXT:    movd %r8d, %xmm2
+; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-NEXT:    retq
   %tmp = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y);
   ret <4 x i32> %tmp;
 }
diff --git a/test/CodeGen/X86/sadd_sat_vec.ll b/test/CodeGen/X86/sadd_sat_vec.ll
new file mode 100644
index 0000000..421dc6c
--- /dev/null
+++ b/test/CodeGen/X86/sadd_sat_vec.ll
@@ -0,0 +1,1380 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512
+
+declare <1 x i8> @llvm.sadd.sat.v1i8(<1 x i8>, <1 x i8>)
+declare <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8>, <2 x i8>)
+declare <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8>, <4 x i8>)
+declare <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8>, <8 x i8>)
+declare <12 x i8> @llvm.sadd.sat.v12i8(<12 x i8>, <12 x i8>)
+declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>)
+declare <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8>, <64 x i8>)
+
+declare <1 x i16> @llvm.sadd.sat.v1i16(<1 x i16>, <1 x i16>)
+declare <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16>, <2 x i16>)
+declare <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <12 x i16> @llvm.sadd.sat.v12i16(<12 x i16>, <12 x i16>)
+declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>)
+declare <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16>, <32 x i16>)
+
+declare <16 x i1> @llvm.sadd.sat.v16i1(<16 x i1>, <16 x i1>)
+declare <16 x i4> @llvm.sadd.sat.v16i4(<16 x i4>, <16 x i4>)
+
+declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i24> @llvm.sadd.sat.v4i24(<4 x i24>, <4 x i24>)
+declare <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128>, <2 x i128>)
+
+; Legal types, depending on architecture.
+
+define <16 x i8> @v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
+; SSE-LABEL: v16i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddsb %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v16i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+  ret <16 x i8> %z
+}
+
+define <32 x i8> @v32i8(<32 x i8> %x, <32 x i8> %y) nounwind {
+; SSE-LABEL: v32i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddsb %xmm2, %xmm0
+; SSE-NEXT:    paddsb %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpaddsb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddsb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v32i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddsb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %z = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %x, <32 x i8> %y)
+  ret <32 x i8> %z
+}
+
+define <64 x i8> @v64i8(<64 x i8> %x, <64 x i8> %y) nounwind {
+; SSE-LABEL: v64i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddsb %xmm4, %xmm0
+; SSE-NEXT:    paddsb %xmm5, %xmm1
+; SSE-NEXT:    paddsb %xmm6, %xmm2
+; SSE-NEXT:    paddsb %xmm7, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v64i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpaddsb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpaddsb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpaddsb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpaddsb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v64i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddsb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddsb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v64i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddsb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %z = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> %x, <64 x i8> %y)
+  ret <64 x i8> %z
+}
+
+define <8 x i16> @v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
+; SSE-LABEL: v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddsw %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+  ret <8 x i16> %z
+}
+
+define <16 x i16> @v16i16(<16 x i16> %x, <16 x i16> %y) nounwind {
+; SSE-LABEL: v16i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddsw %xmm2, %xmm0
+; SSE-NEXT:    paddsw %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpaddsw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddsw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v16i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddsw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %z = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %x, <16 x i16> %y)
+  ret <16 x i16> %z
+}
+
+define <32 x i16> @v32i16(<32 x i16> %x, <32 x i16> %y) nounwind {
+; SSE-LABEL: v32i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddsw %xmm4, %xmm0
+; SSE-NEXT:    paddsw %xmm5, %xmm1
+; SSE-NEXT:    paddsw %xmm6, %xmm2
+; SSE-NEXT:    paddsw %xmm7, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v32i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpaddsw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpaddsw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpaddsw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpaddsw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v32i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddsw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddsw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v32i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddsw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %z = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %x, <32 x i16> %y)
+  ret <32 x i16> %z
+}
+
+; Too narrow vectors, legalized by widening.
+
+define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) nounwind {
+; SSE-LABEL: v8i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT:    paddsb %xmm0, %xmm1
+; SSE-NEXT:    movq %xmm1, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v8i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovq %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v8i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v8i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT:    vpmovwb %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <8 x i8>, <8 x i8>* %px
+  %y = load <8 x i8>, <8 x i8>* %py
+  %z = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %x, <8 x i8> %y)
+  store <8 x i8> %z, <8 x i8>* %pz
+  ret void
+}
+
+define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) nounwind {
+; SSE-LABEL: v4i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT:    paddsb %xmm0, %xmm1
+; SSE-NEXT:    movd %xmm1, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v4i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovd %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v4i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovd %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v4i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX512-NEXT:    vpmovdb %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <4 x i8>, <4 x i8>* %px
+  %y = load <4 x i8>, <4 x i8>* %py
+  %z = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %x, <4 x i8> %y)
+  store <4 x i8> %z, <4 x i8>* %pz
+  ret void
+}
+
+define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
+; SSE2-LABEL: v2i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movzwl (%rdi), %eax
+; SSE2-NEXT:    movd %eax, %xmm0
+; SSE2-NEXT:    movzwl (%rsi), %eax
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    paddsb %xmm0, %xmm1
+; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    movw %ax, (%rdx)
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v2i8:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movzwl (%rdi), %eax
+; SSSE3-NEXT:    movd %eax, %xmm0
+; SSSE3-NEXT:    movzwl (%rsi), %eax
+; SSSE3-NEXT:    movd %eax, %xmm1
+; SSSE3-NEXT:    paddsb %xmm0, %xmm1
+; SSSE3-NEXT:    movd %xmm1, %eax
+; SSSE3-NEXT:    movw %ax, (%rdx)
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v2i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movzwl (%rdi), %eax
+; SSE41-NEXT:    movd %eax, %xmm0
+; SSE41-NEXT:    movzwl (%rsi), %eax
+; SSE41-NEXT:    movd %eax, %xmm1
+; SSE41-NEXT:    paddsb %xmm0, %xmm1
+; SSE41-NEXT:    pextrw $0, %xmm1, (%rdx)
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: v2i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    movzwl (%rdi), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm0
+; AVX1-NEXT:    movzwl (%rsi), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm1
+; AVX1-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpextrw $0, %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    movzwl (%rdi), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm0
+; AVX2-NEXT:    movzwl (%rsi), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm1
+; AVX2-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrw $0, %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    movzwl (%rdi), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm0
+; AVX512-NEXT:    movzwl (%rsi), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpmovqb %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <2 x i8>, <2 x i8>* %px
+  %y = load <2 x i8>, <2 x i8>* %py
+  %z = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %x, <2 x i8> %y)
+  store <2 x i8> %z, <2 x i8>* %pz
+  ret void
+}
+
+define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) nounwind {
+; SSE-LABEL: v4i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT:    paddsw %xmm0, %xmm1
+; SSE-NEXT:    movq %xmm1, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v4i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovq %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v4i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v4i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT:    vpmovdw %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <4 x i16>, <4 x i16>* %px
+  %y = load <4 x i16>, <4 x i16>* %py
+  %z = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %x, <4 x i16> %y)
+  store <4 x i16> %z, <4 x i16>* %pz
+  ret void
+}
+
+define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
+; SSE-LABEL: v2i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT:    paddsw %xmm0, %xmm1
+; SSE-NEXT:    movd %xmm1, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v2i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovd %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovd %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT:    vpmovqw %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <2 x i16>, <2 x i16>* %px
+  %y = load <2 x i16>, <2 x i16>* %py
+  %z = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %x, <2 x i16> %y)
+  store <2 x i16> %z, <2 x i16>* %pz
+  ret void
+}
+
+define <12 x i8> @v12i8(<12 x i8> %x, <12 x i8> %y) nounwind {
+; SSE-LABEL: v12i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddsb %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v12i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <12 x i8> @llvm.sadd.sat.v12i8(<12 x i8> %x, <12 x i8> %y)
+  ret <12 x i8> %z
+}
+
+define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) nounwind {
+; SSE-LABEL: v12i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa (%rdi), %xmm0
+; SSE-NEXT:    movdqa 16(%rdi), %xmm1
+; SSE-NEXT:    paddsw (%rsi), %xmm0
+; SSE-NEXT:    paddsw 16(%rsi), %xmm1
+; SSE-NEXT:    movq %xmm1, 16(%rdx)
+; SSE-NEXT:    movdqa %xmm0, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v12i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX1-NEXT:    vpaddsw (%rsi), %xmm0, %xmm0
+; AVX1-NEXT:    vpaddsw 16(%rsi), %xmm1, %xmm1
+; AVX1-NEXT:    vmovq %xmm1, 16(%rdx)
+; AVX1-NEXT:    vmovdqa %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v12i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-NEXT:    vpaddsw (%rsi), %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vmovq %xmm1, 16(%rdx)
+; AVX2-NEXT:    vmovdqa %xmm0, (%rdx)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v12i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512-NEXT:    vpaddsw (%rsi), %ymm0, %ymm0
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm1, 16(%rdx)
+; AVX512-NEXT:    vmovdqa %xmm0, (%rdx)
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x = load <12 x i16>, <12 x i16>* %px
+  %y = load <12 x i16>, <12 x i16>* %py
+  %z = call <12 x i16> @llvm.sadd.sat.v12i16(<12 x i16> %x, <12 x i16> %y)
+  store <12 x i16> %z, <12 x i16>* %pz
+  ret void
+}
+
+; Scalarization
+
+define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind {
+; SSE-LABEL: v1i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movb (%rdi), %cl
+; SSE-NEXT:    movb (%rsi), %dil
+; SSE-NEXT:    movl %ecx, %eax
+; SSE-NEXT:    addb %dil, %al
+; SSE-NEXT:    setns %sil
+; SSE-NEXT:    addb %dil, %cl
+; SSE-NEXT:    jno .LBB13_2
+; SSE-NEXT:  # %bb.1:
+; SSE-NEXT:    addb $127, %sil
+; SSE-NEXT:    movl %esi, %ecx
+; SSE-NEXT:  .LBB13_2:
+; SSE-NEXT:    movb %cl, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v1i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movb (%rdi), %cl
+; AVX-NEXT:    movb (%rsi), %dil
+; AVX-NEXT:    movl %ecx, %eax
+; AVX-NEXT:    addb %dil, %al
+; AVX-NEXT:    setns %sil
+; AVX-NEXT:    addb %dil, %cl
+; AVX-NEXT:    jno .LBB13_2
+; AVX-NEXT:  # %bb.1:
+; AVX-NEXT:    addb $127, %sil
+; AVX-NEXT:    movl %esi, %ecx
+; AVX-NEXT:  .LBB13_2:
+; AVX-NEXT:    movb %cl, (%rdx)
+; AVX-NEXT:    retq
+  %x = load <1 x i8>, <1 x i8>* %px
+  %y = load <1 x i8>, <1 x i8>* %py
+  %z = call <1 x i8> @llvm.sadd.sat.v1i8(<1 x i8> %x, <1 x i8> %y)
+  store <1 x i8> %z, <1 x i8>* %pz
+  ret void
+}
+
+define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) nounwind {
+; SSE-LABEL: v1i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movzwl (%rdi), %eax
+; SSE-NEXT:    movzwl (%rsi), %ecx
+; SSE-NEXT:    xorl %esi, %esi
+; SSE-NEXT:    movl %eax, %edi
+; SSE-NEXT:    addw %cx, %di
+; SSE-NEXT:    setns %sil
+; SSE-NEXT:    addl $32767, %esi # imm = 0x7FFF
+; SSE-NEXT:    addw %cx, %ax
+; SSE-NEXT:    cmovol %esi, %eax
+; SSE-NEXT:    movw %ax, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v1i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movzwl (%rdi), %eax
+; AVX-NEXT:    movzwl (%rsi), %ecx
+; AVX-NEXT:    xorl %esi, %esi
+; AVX-NEXT:    movl %eax, %edi
+; AVX-NEXT:    addw %cx, %di
+; AVX-NEXT:    setns %sil
+; AVX-NEXT:    addl $32767, %esi # imm = 0x7FFF
+; AVX-NEXT:    addw %cx, %ax
+; AVX-NEXT:    cmovol %esi, %eax
+; AVX-NEXT:    movw %ax, (%rdx)
+; AVX-NEXT:    retq
+  %x = load <1 x i16>, <1 x i16>* %px
+  %y = load <1 x i16>, <1 x i16>* %py
+  %z = call <1 x i16> @llvm.sadd.sat.v1i16(<1 x i16> %x, <1 x i16> %y)
+  store <1 x i16> %z, <1 x i16>* %pz
+  ret void
+}
+
+; Promotion
+
+define <16 x i4> @v16i4(<16 x i4> %x, <16 x i4> %y) nounwind {
+; SSE-LABEL: v16i4:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psllw $4, %xmm1
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    psllw $4, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    paddsb %xmm1, %xmm0
+; SSE-NEXT:    psrlw $4, %xmm0
+; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE-NEXT:    pxor %xmm1, %xmm0
+; SSE-NEXT:    psubb %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v16i4:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsllw $4, %xmm1, %xmm1
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <16 x i4> @llvm.sadd.sat.v16i4(<16 x i4> %x, <16 x i4> %y)
+  ret <16 x i4> %z
+}
+
+define <16 x i1> @v16i1(<16 x i1> %x, <16 x i1> %y) nounwind {
+; SSE-LABEL: v16i1:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psllw $7, %xmm1
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    psllw $7, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    paddsb %xmm1, %xmm0
+; SSE-NEXT:    pxor %xmm1, %xmm1
+; SSE-NEXT:    pcmpgtb %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v16i1:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v16i1:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v16i1:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX512-NEXT:    vpmovb2m %xmm1, %k0
+; AVX512-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovb2m %xmm0, %k1
+; AVX512-NEXT:    korw %k0, %k1, %k0
+; AVX512-NEXT:    vpmovm2b %k0, %xmm0
+; AVX512-NEXT:    retq
+  %z = call <16 x i1> @llvm.sadd.sat.v16i1(<16 x i1> %x, <16 x i1> %y)
+  ret <16 x i1> %z
+}
+
+; Expanded
+
+define <4 x i32> @v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
+; SSE2-LABEL: v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
+; SSE2-NEXT:    movd %xmm2, %ecx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE2-NEXT:    movd %xmm2, %r8d
+; SSE2-NEXT:    xorl %edx, %edx
+; SSE2-NEXT:    movl %r8d, %esi
+; SSE2-NEXT:    addl %ecx, %esi
+; SSE2-NEXT:    setns %dl
+; SSE2-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; SSE2-NEXT:    addl %ecx, %r8d
+; SSE2-NEXT:    cmovol %edx, %r8d
+; SSE2-NEXT:    movd %xmm1, %edx
+; SSE2-NEXT:    movd %xmm0, %ecx
+; SSE2-NEXT:    xorl %esi, %esi
+; SSE2-NEXT:    movl %ecx, %edi
+; SSE2-NEXT:    addl %edx, %edi
+; SSE2-NEXT:    setns %sil
+; SSE2-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; SSE2-NEXT:    addl %edx, %ecx
+; SSE2-NEXT:    cmovol %esi, %ecx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE2-NEXT:    movd %xmm2, %edx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE2-NEXT:    movd %xmm2, %eax
+; SSE2-NEXT:    xorl %edi, %edi
+; SSE2-NEXT:    movl %eax, %esi
+; SSE2-NEXT:    addl %edx, %esi
+; SSE2-NEXT:    setns %dil
+; SSE2-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE2-NEXT:    addl %edx, %eax
+; SSE2-NEXT:    cmovol %edi, %eax
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE2-NEXT:    movd %xmm1, %r9d
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %edx
+; SSE2-NEXT:    xorl %edi, %edi
+; SSE2-NEXT:    movl %edx, %esi
+; SSE2-NEXT:    addl %r9d, %esi
+; SSE2-NEXT:    setns %dil
+; SSE2-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE2-NEXT:    addl %r9d, %edx
+; SSE2-NEXT:    cmovol %edi, %edx
+; SSE2-NEXT:    movd %edx, %xmm0
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT:    movd %ecx, %xmm0
+; SSE2-NEXT:    movd %r8d, %xmm2
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v4i32:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
+; SSSE3-NEXT:    movd %xmm2, %ecx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSSE3-NEXT:    movd %xmm2, %r8d
+; SSSE3-NEXT:    xorl %edx, %edx
+; SSSE3-NEXT:    movl %r8d, %esi
+; SSSE3-NEXT:    addl %ecx, %esi
+; SSSE3-NEXT:    setns %dl
+; SSSE3-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    addl %ecx, %r8d
+; SSSE3-NEXT:    cmovol %edx, %r8d
+; SSSE3-NEXT:    movd %xmm1, %edx
+; SSSE3-NEXT:    movd %xmm0, %ecx
+; SSSE3-NEXT:    xorl %esi, %esi
+; SSSE3-NEXT:    movl %ecx, %edi
+; SSSE3-NEXT:    addl %edx, %edi
+; SSSE3-NEXT:    setns %sil
+; SSSE3-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    addl %edx, %ecx
+; SSSE3-NEXT:    cmovol %esi, %ecx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSSE3-NEXT:    movd %xmm2, %edx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSSE3-NEXT:    movd %xmm2, %eax
+; SSSE3-NEXT:    xorl %edi, %edi
+; SSSE3-NEXT:    movl %eax, %esi
+; SSSE3-NEXT:    addl %edx, %esi
+; SSSE3-NEXT:    setns %dil
+; SSSE3-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    addl %edx, %eax
+; SSSE3-NEXT:    cmovol %edi, %eax
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSSE3-NEXT:    movd %xmm1, %r9d
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSSE3-NEXT:    movd %xmm0, %edx
+; SSSE3-NEXT:    xorl %edi, %edi
+; SSSE3-NEXT:    movl %edx, %esi
+; SSSE3-NEXT:    addl %r9d, %esi
+; SSSE3-NEXT:    setns %dil
+; SSSE3-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    addl %r9d, %edx
+; SSSE3-NEXT:    cmovol %edi, %edx
+; SSSE3-NEXT:    movd %edx, %xmm0
+; SSSE3-NEXT:    movd %eax, %xmm1
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSSE3-NEXT:    movd %ecx, %xmm0
+; SSSE3-NEXT:    movd %r8d, %xmm2
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pextrd $3, %xmm1, %ecx
+; SSE41-NEXT:    pextrd $3, %xmm0, %r8d
+; SSE41-NEXT:    xorl %edx, %edx
+; SSE41-NEXT:    movl %r8d, %esi
+; SSE41-NEXT:    addl %ecx, %esi
+; SSE41-NEXT:    setns %dl
+; SSE41-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; SSE41-NEXT:    addl %ecx, %r8d
+; SSE41-NEXT:    cmovol %edx, %r8d
+; SSE41-NEXT:    pextrd $2, %xmm1, %edx
+; SSE41-NEXT:    pextrd $2, %xmm0, %ecx
+; SSE41-NEXT:    xorl %esi, %esi
+; SSE41-NEXT:    movl %ecx, %edi
+; SSE41-NEXT:    addl %edx, %edi
+; SSE41-NEXT:    setns %sil
+; SSE41-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; SSE41-NEXT:    addl %edx, %ecx
+; SSE41-NEXT:    cmovol %esi, %ecx
+; SSE41-NEXT:    movd %xmm1, %edx
+; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl %edi, %edi
+; SSE41-NEXT:    movl %eax, %esi
+; SSE41-NEXT:    addl %edx, %esi
+; SSE41-NEXT:    setns %dil
+; SSE41-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE41-NEXT:    addl %edx, %eax
+; SSE41-NEXT:    cmovol %edi, %eax
+; SSE41-NEXT:    pextrd $1, %xmm1, %r9d
+; SSE41-NEXT:    pextrd $1, %xmm0, %edx
+; SSE41-NEXT:    xorl %edi, %edi
+; SSE41-NEXT:    movl %edx, %esi
+; SSE41-NEXT:    addl %r9d, %esi
+; SSE41-NEXT:    setns %dil
+; SSE41-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE41-NEXT:    addl %r9d, %edx
+; SSE41-NEXT:    cmovol %edi, %edx
+; SSE41-NEXT:    movd %eax, %xmm0
+; SSE41-NEXT:    pinsrd $1, %edx, %xmm0
+; SSE41-NEXT:    pinsrd $2, %ecx, %xmm0
+; SSE41-NEXT:    pinsrd $3, %r8d, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpextrd $3, %xmm1, %ecx
+; AVX-NEXT:    vpextrd $3, %xmm0, %r9d
+; AVX-NEXT:    xorl %edx, %edx
+; AVX-NEXT:    movl %r9d, %esi
+; AVX-NEXT:    addl %ecx, %esi
+; AVX-NEXT:    setns %dl
+; AVX-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; AVX-NEXT:    addl %ecx, %r9d
+; AVX-NEXT:    cmovol %edx, %r9d
+; AVX-NEXT:    vpextrd $2, %xmm1, %edx
+; AVX-NEXT:    vpextrd $2, %xmm0, %ecx
+; AVX-NEXT:    xorl %esi, %esi
+; AVX-NEXT:    movl %ecx, %edi
+; AVX-NEXT:    addl %edx, %edi
+; AVX-NEXT:    setns %sil
+; AVX-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; AVX-NEXT:    addl %edx, %ecx
+; AVX-NEXT:    cmovol %esi, %ecx
+; AVX-NEXT:    vmovd %xmm1, %r8d
+; AVX-NEXT:    vmovd %xmm0, %edx
+; AVX-NEXT:    xorl %edi, %edi
+; AVX-NEXT:    movl %edx, %esi
+; AVX-NEXT:    addl %r8d, %esi
+; AVX-NEXT:    setns %dil
+; AVX-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; AVX-NEXT:    addl %r8d, %edx
+; AVX-NEXT:    cmovol %edi, %edx
+; AVX-NEXT:    vpextrd $1, %xmm1, %r8d
+; AVX-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-NEXT:    xorl %esi, %esi
+; AVX-NEXT:    movl %eax, %edi
+; AVX-NEXT:    addl %r8d, %edi
+; AVX-NEXT:    setns %sil
+; AVX-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; AVX-NEXT:    addl %r8d, %eax
+; AVX-NEXT:    cmovol %esi, %eax
+; AVX-NEXT:    vmovd %edx, %xmm0
+; AVX-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $3, %r9d, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+  ret <4 x i32> %z
+}
+
+define <2 x i32> @v2i32(<2 x i32> %x, <2 x i32> %y) nounwind {
+; SSE2-LABEL: v2i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    psllq $32, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE2-NEXT:    movq %xmm2, %rax
+; SSE2-NEXT:    psllq $32, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE2-NEXT:    movq %xmm2, %rcx
+; SSE2-NEXT:    xorl %edx, %edx
+; SSE2-NEXT:    movq %rcx, %rsi
+; SSE2-NEXT:    addq %rax, %rsi
+; SSE2-NEXT:    setns %dl
+; SSE2-NEXT:    movabsq $9223372036854775807, %r8 # imm = 0x7FFFFFFFFFFFFFFF
+; SSE2-NEXT:    addq %r8, %rdx
+; SSE2-NEXT:    addq %rax, %rcx
+; SSE2-NEXT:    cmovoq %rdx, %rcx
+; SSE2-NEXT:    movq %xmm1, %rax
+; SSE2-NEXT:    movq %xmm0, %rsi
+; SSE2-NEXT:    xorl %edi, %edi
+; SSE2-NEXT:    movq %rsi, %rdx
+; SSE2-NEXT:    addq %rax, %rdx
+; SSE2-NEXT:    setns %dil
+; SSE2-NEXT:    addq %r8, %rdi
+; SSE2-NEXT:    addq %rax, %rsi
+; SSE2-NEXT:    cmovoq %rdi, %rsi
+; SSE2-NEXT:    movq %rsi, %xmm1
+; SSE2-NEXT:    movq %rcx, %xmm0
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
+; SSE2-NEXT:    psrad $31, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v2i32:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    psllq $32, %xmm1
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSSE3-NEXT:    movq %xmm2, %rax
+; SSSE3-NEXT:    psllq $32, %xmm0
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSSE3-NEXT:    movq %xmm2, %rcx
+; SSSE3-NEXT:    xorl %edx, %edx
+; SSSE3-NEXT:    movq %rcx, %rsi
+; SSSE3-NEXT:    addq %rax, %rsi
+; SSSE3-NEXT:    setns %dl
+; SSSE3-NEXT:    movabsq $9223372036854775807, %r8 # imm = 0x7FFFFFFFFFFFFFFF
+; SSSE3-NEXT:    addq %r8, %rdx
+; SSSE3-NEXT:    addq %rax, %rcx
+; SSSE3-NEXT:    cmovoq %rdx, %rcx
+; SSSE3-NEXT:    movq %xmm1, %rax
+; SSSE3-NEXT:    movq %xmm0, %rsi
+; SSSE3-NEXT:    xorl %edi, %edi
+; SSSE3-NEXT:    movq %rsi, %rdx
+; SSSE3-NEXT:    addq %rax, %rdx
+; SSSE3-NEXT:    setns %dil
+; SSSE3-NEXT:    addq %r8, %rdi
+; SSSE3-NEXT:    addq %rax, %rsi
+; SSSE3-NEXT:    cmovoq %rdi, %rsi
+; SSSE3-NEXT:    movq %rsi, %xmm1
+; SSSE3-NEXT:    movq %rcx, %xmm0
+; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
+; SSSE3-NEXT:    psrad $31, %xmm1
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v2i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    psllq $32, %xmm1
+; SSE41-NEXT:    movq %xmm1, %rax
+; SSE41-NEXT:    psllq $32, %xmm0
+; SSE41-NEXT:    movq %xmm0, %rcx
+; SSE41-NEXT:    xorl %edx, %edx
+; SSE41-NEXT:    movq %rcx, %rsi
+; SSE41-NEXT:    addq %rax, %rsi
+; SSE41-NEXT:    setns %dl
+; SSE41-NEXT:    movabsq $9223372036854775807, %r8 # imm = 0x7FFFFFFFFFFFFFFF
+; SSE41-NEXT:    addq %r8, %rdx
+; SSE41-NEXT:    addq %rax, %rcx
+; SSE41-NEXT:    cmovoq %rdx, %rcx
+; SSE41-NEXT:    pextrq $1, %xmm1, %rax
+; SSE41-NEXT:    pextrq $1, %xmm0, %rsi
+; SSE41-NEXT:    xorl %edi, %edi
+; SSE41-NEXT:    movq %rsi, %rdx
+; SSE41-NEXT:    addq %rax, %rdx
+; SSE41-NEXT:    setns %dil
+; SSE41-NEXT:    addq %r8, %rdi
+; SSE41-NEXT:    addq %rax, %rsi
+; SSE41-NEXT:    cmovoq %rdi, %rsi
+; SSE41-NEXT:    movq %rsi, %xmm1
+; SSE41-NEXT:    movq %rcx, %xmm0
+; SSE41-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT:    psrad $31, %xmm0
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: v2i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT:    vmovq %xmm1, %rax
+; AVX1-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT:    vmovq %xmm0, %rcx
+; AVX1-NEXT:    xorl %edx, %edx
+; AVX1-NEXT:    movq %rcx, %rsi
+; AVX1-NEXT:    addq %rax, %rsi
+; AVX1-NEXT:    setns %dl
+; AVX1-NEXT:    movabsq $9223372036854775807, %r8 # imm = 0x7FFFFFFFFFFFFFFF
+; AVX1-NEXT:    addq %r8, %rdx
+; AVX1-NEXT:    addq %rax, %rcx
+; AVX1-NEXT:    cmovoq %rdx, %rcx
+; AVX1-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX1-NEXT:    vpextrq $1, %xmm0, %rsi
+; AVX1-NEXT:    xorl %edi, %edi
+; AVX1-NEXT:    movq %rsi, %rdx
+; AVX1-NEXT:    addq %rax, %rdx
+; AVX1-NEXT:    setns %dil
+; AVX1-NEXT:    addq %r8, %rdi
+; AVX1-NEXT:    addq %rax, %rsi
+; AVX1-NEXT:    cmovoq %rdi, %rsi
+; AVX1-NEXT:    vmovq %rsi, %xmm0
+; AVX1-NEXT:    vmovq %rcx, %xmm1
+; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX2-NEXT:    vmovq %xmm1, %rax
+; AVX2-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, %rcx
+; AVX2-NEXT:    xorl %edx, %edx
+; AVX2-NEXT:    movq %rcx, %rsi
+; AVX2-NEXT:    addq %rax, %rsi
+; AVX2-NEXT:    setns %dl
+; AVX2-NEXT:    movabsq $9223372036854775807, %r8 # imm = 0x7FFFFFFFFFFFFFFF
+; AVX2-NEXT:    addq %r8, %rdx
+; AVX2-NEXT:    addq %rax, %rcx
+; AVX2-NEXT:    cmovoq %rdx, %rcx
+; AVX2-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX2-NEXT:    vpextrq $1, %xmm0, %rsi
+; AVX2-NEXT:    xorl %edi, %edi
+; AVX2-NEXT:    movq %rsi, %rdx
+; AVX2-NEXT:    addq %rax, %rdx
+; AVX2-NEXT:    setns %dil
+; AVX2-NEXT:    addq %r8, %rdi
+; AVX2-NEXT:    addq %rax, %rsi
+; AVX2-NEXT:    cmovoq %rdi, %rsi
+; AVX2-NEXT:    vmovq %rsi, %xmm0
+; AVX2-NEXT:    vmovq %rcx, %xmm1
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT:    vpsrad $31, %xmm0, %xmm1
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX512-NEXT:    vmovq %xmm1, %rax
+; AVX512-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rcx
+; AVX512-NEXT:    xorl %edx, %edx
+; AVX512-NEXT:    movq %rcx, %rsi
+; AVX512-NEXT:    addq %rax, %rsi
+; AVX512-NEXT:    setns %dl
+; AVX512-NEXT:    movabsq $9223372036854775807, %r8 # imm = 0x7FFFFFFFFFFFFFFF
+; AVX512-NEXT:    addq %r8, %rdx
+; AVX512-NEXT:    addq %rax, %rcx
+; AVX512-NEXT:    cmovoq %rdx, %rcx
+; AVX512-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rsi
+; AVX512-NEXT:    xorl %edi, %edi
+; AVX512-NEXT:    movq %rsi, %rdx
+; AVX512-NEXT:    addq %rax, %rdx
+; AVX512-NEXT:    setns %dil
+; AVX512-NEXT:    addq %r8, %rdi
+; AVX512-NEXT:    addq %rax, %rsi
+; AVX512-NEXT:    cmovoq %rdi, %rsi
+; AVX512-NEXT:    vmovq %rsi, %xmm0
+; AVX512-NEXT:    vmovq %rcx, %xmm1
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT:    vpsraq $32, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+  %z = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %x, <2 x i32> %y)
+  ret <2 x i32> %z
+}
+
+define <4 x i24> @v4i24(<4 x i24> %x, <4 x i24> %y) nounwind {
+; SSE2-LABEL: v4i24:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pslld $8, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
+; SSE2-NEXT:    movd %xmm2, %ecx
+; SSE2-NEXT:    pslld $8, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE2-NEXT:    movd %xmm2, %r8d
+; SSE2-NEXT:    xorl %edx, %edx
+; SSE2-NEXT:    movl %r8d, %esi
+; SSE2-NEXT:    addl %ecx, %esi
+; SSE2-NEXT:    setns %dl
+; SSE2-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; SSE2-NEXT:    addl %ecx, %r8d
+; SSE2-NEXT:    cmovol %edx, %r8d
+; SSE2-NEXT:    movd %xmm1, %edx
+; SSE2-NEXT:    movd %xmm0, %ecx
+; SSE2-NEXT:    xorl %esi, %esi
+; SSE2-NEXT:    movl %ecx, %edi
+; SSE2-NEXT:    addl %edx, %edi
+; SSE2-NEXT:    setns %sil
+; SSE2-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; SSE2-NEXT:    addl %edx, %ecx
+; SSE2-NEXT:    cmovol %esi, %ecx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE2-NEXT:    movd %xmm2, %edx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE2-NEXT:    movd %xmm2, %eax
+; SSE2-NEXT:    xorl %edi, %edi
+; SSE2-NEXT:    movl %eax, %esi
+; SSE2-NEXT:    addl %edx, %esi
+; SSE2-NEXT:    setns %dil
+; SSE2-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE2-NEXT:    addl %edx, %eax
+; SSE2-NEXT:    cmovol %edi, %eax
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE2-NEXT:    movd %xmm1, %r9d
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %edx
+; SSE2-NEXT:    xorl %edi, %edi
+; SSE2-NEXT:    movl %edx, %esi
+; SSE2-NEXT:    addl %r9d, %esi
+; SSE2-NEXT:    setns %dil
+; SSE2-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE2-NEXT:    addl %r9d, %edx
+; SSE2-NEXT:    cmovol %edi, %edx
+; SSE2-NEXT:    movd %edx, %xmm0
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT:    movd %ecx, %xmm0
+; SSE2-NEXT:    movd %r8d, %xmm2
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT:    psrad $8, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v4i24:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    pslld $8, %xmm1
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
+; SSSE3-NEXT:    movd %xmm2, %ecx
+; SSSE3-NEXT:    pslld $8, %xmm0
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSSE3-NEXT:    movd %xmm2, %r8d
+; SSSE3-NEXT:    xorl %edx, %edx
+; SSSE3-NEXT:    movl %r8d, %esi
+; SSSE3-NEXT:    addl %ecx, %esi
+; SSSE3-NEXT:    setns %dl
+; SSSE3-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    addl %ecx, %r8d
+; SSSE3-NEXT:    cmovol %edx, %r8d
+; SSSE3-NEXT:    movd %xmm1, %edx
+; SSSE3-NEXT:    movd %xmm0, %ecx
+; SSSE3-NEXT:    xorl %esi, %esi
+; SSSE3-NEXT:    movl %ecx, %edi
+; SSSE3-NEXT:    addl %edx, %edi
+; SSSE3-NEXT:    setns %sil
+; SSSE3-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    addl %edx, %ecx
+; SSSE3-NEXT:    cmovol %esi, %ecx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSSE3-NEXT:    movd %xmm2, %edx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSSE3-NEXT:    movd %xmm2, %eax
+; SSSE3-NEXT:    xorl %edi, %edi
+; SSSE3-NEXT:    movl %eax, %esi
+; SSSE3-NEXT:    addl %edx, %esi
+; SSSE3-NEXT:    setns %dil
+; SSSE3-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    addl %edx, %eax
+; SSSE3-NEXT:    cmovol %edi, %eax
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSSE3-NEXT:    movd %xmm1, %r9d
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSSE3-NEXT:    movd %xmm0, %edx
+; SSSE3-NEXT:    xorl %edi, %edi
+; SSSE3-NEXT:    movl %edx, %esi
+; SSSE3-NEXT:    addl %r9d, %esi
+; SSSE3-NEXT:    setns %dil
+; SSSE3-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    addl %r9d, %edx
+; SSSE3-NEXT:    cmovol %edi, %edx
+; SSSE3-NEXT:    movd %edx, %xmm0
+; SSSE3-NEXT:    movd %eax, %xmm1
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSSE3-NEXT:    movd %ecx, %xmm0
+; SSSE3-NEXT:    movd %r8d, %xmm2
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT:    psrad $8, %xmm0
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v4i24:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pslld $8, %xmm1
+; SSE41-NEXT:    pextrd $3, %xmm1, %ecx
+; SSE41-NEXT:    pslld $8, %xmm0
+; SSE41-NEXT:    pextrd $3, %xmm0, %r8d
+; SSE41-NEXT:    xorl %edx, %edx
+; SSE41-NEXT:    movl %r8d, %esi
+; SSE41-NEXT:    addl %ecx, %esi
+; SSE41-NEXT:    setns %dl
+; SSE41-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; SSE41-NEXT:    addl %ecx, %r8d
+; SSE41-NEXT:    cmovol %edx, %r8d
+; SSE41-NEXT:    pextrd $2, %xmm1, %edx
+; SSE41-NEXT:    pextrd $2, %xmm0, %ecx
+; SSE41-NEXT:    xorl %esi, %esi
+; SSE41-NEXT:    movl %ecx, %edi
+; SSE41-NEXT:    addl %edx, %edi
+; SSE41-NEXT:    setns %sil
+; SSE41-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; SSE41-NEXT:    addl %edx, %ecx
+; SSE41-NEXT:    cmovol %esi, %ecx
+; SSE41-NEXT:    movd %xmm1, %edx
+; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl %edi, %edi
+; SSE41-NEXT:    movl %eax, %esi
+; SSE41-NEXT:    addl %edx, %esi
+; SSE41-NEXT:    setns %dil
+; SSE41-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE41-NEXT:    addl %edx, %eax
+; SSE41-NEXT:    cmovol %edi, %eax
+; SSE41-NEXT:    pextrd $1, %xmm1, %r9d
+; SSE41-NEXT:    pextrd $1, %xmm0, %edx
+; SSE41-NEXT:    xorl %edi, %edi
+; SSE41-NEXT:    movl %edx, %esi
+; SSE41-NEXT:    addl %r9d, %esi
+; SSE41-NEXT:    setns %dil
+; SSE41-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE41-NEXT:    addl %r9d, %edx
+; SSE41-NEXT:    cmovol %edi, %edx
+; SSE41-NEXT:    movd %eax, %xmm0
+; SSE41-NEXT:    pinsrd $1, %edx, %xmm0
+; SSE41-NEXT:    pinsrd $2, %ecx, %xmm0
+; SSE41-NEXT:    pinsrd $3, %r8d, %xmm0
+; SSE41-NEXT:    psrad $8, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: v4i24:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpslld $8, %xmm1, %xmm1
+; AVX-NEXT:    vpextrd $3, %xmm1, %ecx
+; AVX-NEXT:    vpslld $8, %xmm0, %xmm0
+; AVX-NEXT:    vpextrd $3, %xmm0, %r9d
+; AVX-NEXT:    xorl %edx, %edx
+; AVX-NEXT:    movl %r9d, %esi
+; AVX-NEXT:    addl %ecx, %esi
+; AVX-NEXT:    setns %dl
+; AVX-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; AVX-NEXT:    addl %ecx, %r9d
+; AVX-NEXT:    cmovol %edx, %r9d
+; AVX-NEXT:    vpextrd $2, %xmm1, %edx
+; AVX-NEXT:    vpextrd $2, %xmm0, %ecx
+; AVX-NEXT:    xorl %esi, %esi
+; AVX-NEXT:    movl %ecx, %edi
+; AVX-NEXT:    addl %edx, %edi
+; AVX-NEXT:    setns %sil
+; AVX-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; AVX-NEXT:    addl %edx, %ecx
+; AVX-NEXT:    cmovol %esi, %ecx
+; AVX-NEXT:    vmovd %xmm1, %r8d
+; AVX-NEXT:    vmovd %xmm0, %edx
+; AVX-NEXT:    xorl %edi, %edi
+; AVX-NEXT:    movl %edx, %esi
+; AVX-NEXT:    addl %r8d, %esi
+; AVX-NEXT:    setns %dil
+; AVX-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; AVX-NEXT:    addl %r8d, %edx
+; AVX-NEXT:    cmovol %edi, %edx
+; AVX-NEXT:    vpextrd $1, %xmm1, %r8d
+; AVX-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-NEXT:    xorl %esi, %esi
+; AVX-NEXT:    movl %eax, %edi
+; AVX-NEXT:    addl %r8d, %edi
+; AVX-NEXT:    setns %sil
+; AVX-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; AVX-NEXT:    addl %r8d, %eax
+; AVX-NEXT:    cmovol %esi, %eax
+; AVX-NEXT:    vmovd %edx, %xmm0
+; AVX-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $3, %r9d, %xmm0, %xmm0
+; AVX-NEXT:    vpsrad $8, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <4 x i24> @llvm.sadd.sat.v4i24(<4 x i24> %x, <4 x i24> %y)
+  ret <4 x i24> %z
+}
+
+define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
+; SSE-LABEL: v2i128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pushq %r15
+; SSE-NEXT:    pushq %r14
+; SSE-NEXT:    pushq %r13
+; SSE-NEXT:    pushq %r12
+; SSE-NEXT:    pushq %rbx
+; SSE-NEXT:    movq %rdi, %rax
+; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %r14
+; SSE-NEXT:    addq {{[0-9]+}}(%rsp), %rcx
+; SSE-NEXT:    movq %r8, %r13
+; SSE-NEXT:    adcq %r14, %r13
+; SSE-NEXT:    movq %r13, %r10
+; SSE-NEXT:    sarq $63, %r10
+; SSE-NEXT:    xorl %edi, %edi
+; SSE-NEXT:    testq %r13, %r13
+; SSE-NEXT:    setns %dil
+; SSE-NEXT:    movabsq $9223372036854775807, %r12 # imm = 0x7FFFFFFFFFFFFFFF
+; SSE-NEXT:    leaq (%rdi,%r12), %r15
+; SSE-NEXT:    testq %r8, %r8
+; SSE-NEXT:    setns %r8b
+; SSE-NEXT:    cmpb %dil, %r8b
+; SSE-NEXT:    setne %dil
+; SSE-NEXT:    testq %r14, %r14
+; SSE-NEXT:    setns %bl
+; SSE-NEXT:    cmpb %bl, %r8b
+; SSE-NEXT:    sete %bl
+; SSE-NEXT:    testb %dil, %bl
+; SSE-NEXT:    cmoveq %r13, %r15
+; SSE-NEXT:    cmoveq %rcx, %r10
+; SSE-NEXT:    addq %r9, %rsi
+; SSE-NEXT:    movq %rdx, %rdi
+; SSE-NEXT:    adcq %r11, %rdi
+; SSE-NEXT:    setns %bl
+; SSE-NEXT:    movzbl %bl, %ebx
+; SSE-NEXT:    addq %rbx, %r12
+; SSE-NEXT:    movq %rdi, %rcx
+; SSE-NEXT:    sarq $63, %rcx
+; SSE-NEXT:    testq %r11, %r11
+; SSE-NEXT:    setns %r8b
+; SSE-NEXT:    testq %rdx, %rdx
+; SSE-NEXT:    setns %dl
+; SSE-NEXT:    cmpb %r8b, %dl
+; SSE-NEXT:    sete %r8b
+; SSE-NEXT:    cmpb %bl, %dl
+; SSE-NEXT:    setne %dl
+; SSE-NEXT:    testb %dl, %r8b
+; SSE-NEXT:    cmoveq %rsi, %rcx
+; SSE-NEXT:    cmoveq %rdi, %r12
+; SSE-NEXT:    movq %r15, 24(%rax)
+; SSE-NEXT:    movq %r10, 16(%rax)
+; SSE-NEXT:    movq %r12, 8(%rax)
+; SSE-NEXT:    movq %rcx, (%rax)
+; SSE-NEXT:    popq %rbx
+; SSE-NEXT:    popq %r12
+; SSE-NEXT:    popq %r13
+; SSE-NEXT:    popq %r14
+; SSE-NEXT:    popq %r15
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v2i128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %r15
+; AVX-NEXT:    pushq %r14
+; AVX-NEXT:    pushq %r13
+; AVX-NEXT:    pushq %r12
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    movq %rdi, %rax
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r11
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r14
+; AVX-NEXT:    addq {{[0-9]+}}(%rsp), %rcx
+; AVX-NEXT:    movq %r8, %r13
+; AVX-NEXT:    adcq %r14, %r13
+; AVX-NEXT:    movq %r13, %r10
+; AVX-NEXT:    sarq $63, %r10
+; AVX-NEXT:    xorl %edi, %edi
+; AVX-NEXT:    testq %r13, %r13
+; AVX-NEXT:    setns %dil
+; AVX-NEXT:    movabsq $9223372036854775807, %r12 # imm = 0x7FFFFFFFFFFFFFFF
+; AVX-NEXT:    leaq (%rdi,%r12), %r15
+; AVX-NEXT:    testq %r8, %r8
+; AVX-NEXT:    setns %r8b
+; AVX-NEXT:    cmpb %dil, %r8b
+; AVX-NEXT:    setne %dil
+; AVX-NEXT:    testq %r14, %r14
+; AVX-NEXT:    setns %bl
+; AVX-NEXT:    cmpb %bl, %r8b
+; AVX-NEXT:    sete %bl
+; AVX-NEXT:    testb %dil, %bl
+; AVX-NEXT:    cmoveq %r13, %r15
+; AVX-NEXT:    cmoveq %rcx, %r10
+; AVX-NEXT:    addq %r9, %rsi
+; AVX-NEXT:    movq %rdx, %rdi
+; AVX-NEXT:    adcq %r11, %rdi
+; AVX-NEXT:    setns %bl
+; AVX-NEXT:    movzbl %bl, %ebx
+; AVX-NEXT:    addq %rbx, %r12
+; AVX-NEXT:    movq %rdi, %rcx
+; AVX-NEXT:    sarq $63, %rcx
+; AVX-NEXT:    testq %r11, %r11
+; AVX-NEXT:    setns %r8b
+; AVX-NEXT:    testq %rdx, %rdx
+; AVX-NEXT:    setns %dl
+; AVX-NEXT:    cmpb %r8b, %dl
+; AVX-NEXT:    sete %r8b
+; AVX-NEXT:    cmpb %bl, %dl
+; AVX-NEXT:    setne %dl
+; AVX-NEXT:    testb %dl, %r8b
+; AVX-NEXT:    cmoveq %rsi, %rcx
+; AVX-NEXT:    cmoveq %rdi, %r12
+; AVX-NEXT:    movq %r15, 24(%rax)
+; AVX-NEXT:    movq %r10, 16(%rax)
+; AVX-NEXT:    movq %r12, 8(%rax)
+; AVX-NEXT:    movq %rcx, (%rax)
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    popq %r12
+; AVX-NEXT:    popq %r13
+; AVX-NEXT:    popq %r14
+; AVX-NEXT:    popq %r15
+; AVX-NEXT:    retq
+  %z = call <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128> %x, <2 x i128> %y)
+  ret <2 x i128> %z
+}
diff --git a/test/CodeGen/X86/sat-add.ll b/test/CodeGen/X86/sat-add.ll
index f0989e8..e09c241 100644
--- a/test/CodeGen/X86/sat-add.ll
+++ b/test/CodeGen/X86/sat-add.ll
@@ -526,11 +526,9 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [42,42,42,42]
 ; SSE41-NEXT:    paddd %xmm0, %xmm1
-; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [4294967253,4294967253,4294967253,4294967253]
-; SSE41-NEXT:    pminud %xmm0, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [4294967254,4294967254,4294967254,4294967254]
+; SSE41-NEXT:    pmaxud %xmm0, %xmm2
 ; SSE41-NEXT:    pcmpeqd %xmm2, %xmm0
-; SSE41-NEXT:    pcmpeqd %xmm2, %xmm2
-; SSE41-NEXT:    pxor %xmm2, %xmm0
 ; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %a = add <4 x i32> %x, <i32 42, i32 42, i32 42, i32 42>
@@ -570,10 +568,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    paddq {{.*}}(%rip), %xmm2
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
@@ -867,10 +864,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
 ; SSE41-NEXT:    paddq %xmm1, %xmm3
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
diff --git a/test/CodeGen/X86/scheduler-backtracking.ll b/test/CodeGen/X86/scheduler-backtracking.ll
index cb8571b..87c36ad 100644
--- a/test/CodeGen/X86/scheduler-backtracking.ll
+++ b/test/CodeGen/X86/scheduler-backtracking.ll
@@ -16,8 +16,8 @@
 ; ILP-NEXT:    pushq %rbx
 ; ILP-NEXT:    movq %rdi, %rax
 ; ILP-NEXT:    xorl %r8d, %r8d
-; ILP-NEXT:    incl %esi
 ; ILP-NEXT:    addb %sil, %sil
+; ILP-NEXT:    addb $2, %sil
 ; ILP-NEXT:    orb $1, %sil
 ; ILP-NEXT:    movl $1, %r10d
 ; ILP-NEXT:    xorl %r14d, %r14d
@@ -25,35 +25,35 @@
 ; ILP-NEXT:    shldq %cl, %r10, %r14
 ; ILP-NEXT:    movl $1, %edx
 ; ILP-NEXT:    shlq %cl, %rdx
-; ILP-NEXT:    leal -128(%rsi), %r9d
 ; ILP-NEXT:    movb $-128, %r11b
-; ILP-NEXT:    xorl %ebx, %ebx
+; ILP-NEXT:    subb %sil, %r11b
+; ILP-NEXT:    leal -128(%rsi), %r9d
+; ILP-NEXT:    xorl %edi, %edi
 ; ILP-NEXT:    movl %r9d, %ecx
-; ILP-NEXT:    shldq %cl, %r10, %rbx
+; ILP-NEXT:    shldq %cl, %r10, %rdi
+; ILP-NEXT:    movl $1, %ebx
+; ILP-NEXT:    shlq %cl, %rbx
+; ILP-NEXT:    movl %r11d, %ecx
+; ILP-NEXT:    shrdq %cl, %r8, %r10
 ; ILP-NEXT:    testb $64, %sil
 ; ILP-NEXT:    cmovneq %rdx, %r14
 ; ILP-NEXT:    cmovneq %r8, %rdx
-; ILP-NEXT:    movl $1, %edi
-; ILP-NEXT:    shlq %cl, %rdi
-; ILP-NEXT:    subb %sil, %r11b
-; ILP-NEXT:    movl %r11d, %ecx
-; ILP-NEXT:    shrdq %cl, %r8, %r10
 ; ILP-NEXT:    testb $64, %r11b
 ; ILP-NEXT:    cmovneq %r8, %r10
 ; ILP-NEXT:    testb $64, %r9b
-; ILP-NEXT:    cmovneq %rdi, %rbx
-; ILP-NEXT:    cmovneq %r8, %rdi
+; ILP-NEXT:    cmovneq %rbx, %rdi
+; ILP-NEXT:    cmovneq %r8, %rbx
 ; ILP-NEXT:    testb %sil, %sil
 ; ILP-NEXT:    cmovsq %r8, %r14
 ; ILP-NEXT:    cmovsq %r8, %rdx
 ; ILP-NEXT:    movq %r14, 8(%rax)
 ; ILP-NEXT:    movq %rdx, (%rax)
-; ILP-NEXT:    cmovnsq %r8, %rbx
-; ILP-NEXT:    cmoveq %r8, %rbx
-; ILP-NEXT:    movq %rbx, 24(%rax)
-; ILP-NEXT:    cmovnsq %r10, %rdi
+; ILP-NEXT:    cmovnsq %r8, %rdi
 ; ILP-NEXT:    cmoveq %r8, %rdi
-; ILP-NEXT:    movq %rdi, 16(%rax)
+; ILP-NEXT:    movq %rdi, 24(%rax)
+; ILP-NEXT:    cmovnsq %r10, %rbx
+; ILP-NEXT:    cmoveq %r8, %rbx
+; ILP-NEXT:    movq %rbx, 16(%rax)
 ; ILP-NEXT:    popq %rbx
 ; ILP-NEXT:    popq %r14
 ; ILP-NEXT:    retq
@@ -61,8 +61,8 @@
 ; HYBRID-LABEL: test1:
 ; HYBRID:       # %bb.0:
 ; HYBRID-NEXT:    movq %rdi, %rax
-; HYBRID-NEXT:    incl %esi
 ; HYBRID-NEXT:    addb %sil, %sil
+; HYBRID-NEXT:    addb $2, %sil
 ; HYBRID-NEXT:    orb $1, %sil
 ; HYBRID-NEXT:    movb $-128, %cl
 ; HYBRID-NEXT:    subb %sil, %cl
@@ -104,8 +104,8 @@
 ; BURR-LABEL: test1:
 ; BURR:       # %bb.0:
 ; BURR-NEXT:    movq %rdi, %rax
-; BURR-NEXT:    incl %esi
 ; BURR-NEXT:    addb %sil, %sil
+; BURR-NEXT:    addb $2, %sil
 ; BURR-NEXT:    orb $1, %sil
 ; BURR-NEXT:    movb $-128, %cl
 ; BURR-NEXT:    subb %sil, %cl
@@ -148,8 +148,8 @@
 ; SRC:       # %bb.0:
 ; SRC-NEXT:    pushq %rbx
 ; SRC-NEXT:    movq %rdi, %rax
-; SRC-NEXT:    incl %esi
 ; SRC-NEXT:    addb %sil, %sil
+; SRC-NEXT:    addb $2, %sil
 ; SRC-NEXT:    orb $1, %sil
 ; SRC-NEXT:    movb $-128, %cl
 ; SRC-NEXT:    subb %sil, %cl
@@ -195,8 +195,8 @@
 ; LIN-NEXT:    movq %rdi, %rax
 ; LIN-NEXT:    xorl %r9d, %r9d
 ; LIN-NEXT:    movl $1, %r8d
-; LIN-NEXT:    incl %esi
 ; LIN-NEXT:    addb %sil, %sil
+; LIN-NEXT:    addb $2, %sil
 ; LIN-NEXT:    orb $1, %sil
 ; LIN-NEXT:    movl $1, %edx
 ; LIN-NEXT:    movl %esi, %ecx
diff --git a/test/CodeGen/X86/seh-catch-all-win32.ll b/test/CodeGen/X86/seh-catch-all-win32.ll
index 315790a..d90c9e4 100644
--- a/test/CodeGen/X86/seh-catch-all-win32.ll
+++ b/test/CodeGen/X86/seh-catch-all-win32.ll
@@ -12,7 +12,7 @@
 declare i8* @llvm.frameaddress(i32)
 declare i8* @llvm.localrecover(i8*, i8*, i32)
 declare void @llvm.localescape(...)
-declare i8* @llvm.x86.seh.recoverfp(i8*, i8*)
+declare i8* @llvm.eh.recoverfp(i8*, i8*)
 
 define i32 @main() personality i8* bitcast (i32 (...)* @_except_handler3 to i8*) {
 entry:
@@ -37,7 +37,7 @@
 define internal i32 @"filt$main"() {
 entry:
   %ebp = tail call i8* @llvm.frameaddress(i32 1)
-  %parentfp = tail call i8* @llvm.x86.seh.recoverfp(i8* bitcast (i32 ()* @main to i8*), i8* %ebp)
+  %parentfp = tail call i8* @llvm.eh.recoverfp(i8* bitcast (i32 ()* @main to i8*), i8* %ebp)
   %code.i8 = tail call i8* @llvm.localrecover(i8* bitcast (i32 ()* @main to i8*), i8* %parentfp, i32 0)
   %__exceptioncode = bitcast i8* %code.i8 to i32*
   %info.addr = getelementptr inbounds i8, i8* %ebp, i32 -20
diff --git a/test/CodeGen/X86/seh-filter-no-personality.ll b/test/CodeGen/X86/seh-filter-no-personality.ll
index 87bc9c9..638969a 100644
--- a/test/CodeGen/X86/seh-filter-no-personality.ll
+++ b/test/CodeGen/X86/seh-filter-no-personality.ll
@@ -1,10 +1,10 @@
 ; RUN: llc -mtriple=i686-windows-msvc < %s | FileCheck %s
 
-; Mostly make sure that llvm.x86.seh.recoverfp doesn't crash if the parent
+; Mostly make sure that llvm.eh.recoverfp doesn't crash if the parent
 ; function lacks a personality.
 
 declare i8* @llvm.frameaddress(i32)
-declare i8* @llvm.x86.seh.recoverfp(i8*, i8*)
+declare i8* @llvm.eh.recoverfp(i8*, i8*)
 
 define i32 @main() {
 entry:
@@ -14,7 +14,7 @@
 define internal i32 @"filt$main"() {
 entry:
   %ebp = tail call i8* @llvm.frameaddress(i32 1)
-  %parentfp = tail call i8* @llvm.x86.seh.recoverfp(i8* bitcast (i32 ()* @main to i8*), i8* %ebp)
+  %parentfp = tail call i8* @llvm.eh.recoverfp(i8* bitcast (i32 ()* @main to i8*), i8* %ebp)
   %info.addr = getelementptr inbounds i8, i8* %ebp, i32 -20
   %0 = bitcast i8* %info.addr to i32***
   %1 = load i32**, i32*** %0, align 4
diff --git a/test/CodeGen/X86/seh-no-invokes.ll b/test/CodeGen/X86/seh-no-invokes.ll
index 4e64aa2..f32ab5c 100644
--- a/test/CodeGen/X86/seh-no-invokes.ll
+++ b/test/CodeGen/X86/seh-no-invokes.ll
@@ -38,7 +38,7 @@
 define internal i32 @"\01?filt$0@0@f@@"() #1 {
 entry:
   %0 = tail call i8* @llvm.frameaddress(i32 1)
-  %1 = tail call i8* @llvm.x86.seh.recoverfp(i8* bitcast (void ()* @f to i8*), i8* %0)
+  %1 = tail call i8* @llvm.eh.recoverfp(i8* bitcast (void ()* @f to i8*), i8* %0)
   %2 = tail call i8* @llvm.localrecover(i8* bitcast (void ()* @f to i8*), i8* %1, i32 0)
   %__exception_code = bitcast i8* %2 to i32*
   %3 = getelementptr inbounds i8, i8* %0, i32 -20
@@ -55,7 +55,7 @@
 declare i8* @llvm.frameaddress(i32) #2
 
 ; Function Attrs: nounwind readnone
-declare i8* @llvm.x86.seh.recoverfp(i8*, i8*) #2
+declare i8* @llvm.eh.recoverfp(i8*, i8*) #2
 
 ; Function Attrs: nounwind readnone
 declare i8* @llvm.localrecover(i8*, i8*, i32) #2
diff --git a/test/CodeGen/X86/seh-stack-realign.ll b/test/CodeGen/X86/seh-stack-realign.ll
index 75a005c..0301ae8 100644
--- a/test/CodeGen/X86/seh-stack-realign.ll
+++ b/test/CodeGen/X86/seh-stack-realign.ll
@@ -12,7 +12,7 @@
 declare i8* @llvm.frameaddress(i32)
 declare i8* @llvm.localrecover(i8*, i8*, i32)
 declare void @llvm.localescape(...)
-declare i8* @llvm.x86.seh.recoverfp(i8*, i8*)
+declare i8* @llvm.eh.recoverfp(i8*, i8*)
 
 define i32 @main() personality i8* bitcast (i32 (...)* @_except_handler3 to i8*) {
 entry:
@@ -38,7 +38,7 @@
 define internal i32 @"filt$main"() {
 entry:
   %ebp = tail call i8* @llvm.frameaddress(i32 1)
-  %parentfp = tail call i8* @llvm.x86.seh.recoverfp(i8* bitcast (i32 ()* @main to i8*), i8* %ebp)
+  %parentfp = tail call i8* @llvm.eh.recoverfp(i8* bitcast (i32 ()* @main to i8*), i8* %ebp)
   %code.i8 = tail call i8* @llvm.localrecover(i8* bitcast (i32 ()* @main to i8*), i8* %parentfp, i32 0)
   %__exceptioncode = bitcast i8* %code.i8 to i32*
   %info.addr = getelementptr inbounds i8, i8* %ebp, i32 -20
diff --git a/test/CodeGen/X86/setcc-combine.ll b/test/CodeGen/X86/setcc-combine.ll
index 56cff4a..9d2753c 100644
--- a/test/CodeGen/X86/setcc-combine.ll
+++ b/test/CodeGen/X86/setcc-combine.ll
@@ -1,15 +1,22 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=generic < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 < %s | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s --check-prefixes=CHECK,SSE41
 
 define i32 @test_eq_1(<4 x i32> %A, <4 x i32> %B) {
-; CHECK-LABEL: test_eq_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pcmpgtd %xmm0, %xmm1
-; CHECK-NEXT:    pcmpeqd %xmm0, %xmm0
-; CHECK-NEXT:    pxor %xmm1, %xmm0
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; CHECK-NEXT:    movd %xmm0, %eax
-; CHECK-NEXT:    retq
+; SSE2-LABEL: test_eq_1:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    notl %eax
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: test_eq_1:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT:    pextrd $1, %xmm1, %eax
+; SSE41-NEXT:    notl %eax
+; SSE41-NEXT:    retq
   %cmp = icmp slt <4 x i32> %A, %B
   %sext = sext <4 x i1> %cmp to <4 x i32>
   %cmp1 = icmp eq <4 x i32> %sext, zeroinitializer
@@ -19,12 +26,18 @@
 }
 
 define i32 @test_ne_1(<4 x i32> %A, <4 x i32> %B) {
-; CHECK-LABEL: test_ne_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pcmpgtd %xmm0, %xmm1
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; CHECK-NEXT:    movd %xmm0, %eax
-; CHECK-NEXT:    retq
+; SSE2-LABEL: test_ne_1:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: test_ne_1:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT:    pextrd $1, %xmm1, %eax
+; SSE41-NEXT:    retq
   %cmp = icmp slt <4 x i32> %A, %B
   %sext = sext <4 x i1> %cmp to <4 x i32>
   %cmp1 = icmp ne <4 x i32> %sext, zeroinitializer
@@ -47,14 +60,20 @@
 }
 
 define i32 @test_ge_1(<4 x i32> %A, <4 x i32> %B) {
-; CHECK-LABEL: test_ge_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pcmpgtd %xmm0, %xmm1
-; CHECK-NEXT:    pcmpeqd %xmm0, %xmm0
-; CHECK-NEXT:    pxor %xmm1, %xmm0
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; CHECK-NEXT:    movd %xmm0, %eax
-; CHECK-NEXT:    retq
+; SSE2-LABEL: test_ge_1:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    notl %eax
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: test_ge_1:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT:    pextrd $1, %xmm1, %eax
+; SSE41-NEXT:    notl %eax
+; SSE41-NEXT:    retq
   %cmp = icmp slt <4 x i32> %A, %B
   %sext = sext <4 x i1> %cmp to <4 x i32>
   %cmp1 = icmp sge <4 x i32> %sext, zeroinitializer
@@ -64,12 +83,18 @@
 }
 
 define i32 @test_lt_1(<4 x i32> %A, <4 x i32> %B) {
-; CHECK-LABEL: test_lt_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pcmpgtd %xmm0, %xmm1
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; CHECK-NEXT:    movd %xmm0, %eax
-; CHECK-NEXT:    retq
+; SSE2-LABEL: test_lt_1:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: test_lt_1:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT:    pextrd $1, %xmm1, %eax
+; SSE41-NEXT:    retq
   %cmp = icmp slt <4 x i32> %A, %B
   %sext = sext <4 x i1> %cmp to <4 x i32>
   %cmp1 = icmp slt <4 x i32> %sext, zeroinitializer
@@ -92,14 +117,20 @@
 }
 
 define i32 @test_eq_2(<4 x i32> %A, <4 x i32> %B) {
-; CHECK-LABEL: test_eq_2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pcmpgtd %xmm1, %xmm0
-; CHECK-NEXT:    pcmpeqd %xmm1, %xmm1
-; CHECK-NEXT:    pxor %xmm0, %xmm1
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; CHECK-NEXT:    movd %xmm0, %eax
-; CHECK-NEXT:    retq
+; SSE2-LABEL: test_eq_2:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    notl %eax
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: test_eq_2:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE41-NEXT:    pextrd $1, %xmm0, %eax
+; SSE41-NEXT:    notl %eax
+; SSE41-NEXT:    retq
   %cmp = icmp slt <4 x i32> %B, %A
   %sext = sext <4 x i1> %cmp to <4 x i32>
   %cmp1 = icmp eq <4 x i32> %sext, zeroinitializer
@@ -109,12 +140,18 @@
 }
 
 define i32 @test_ne_2(<4 x i32> %A, <4 x i32> %B) {
-; CHECK-LABEL: test_ne_2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pcmpgtd %xmm1, %xmm0
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; CHECK-NEXT:    movd %xmm0, %eax
-; CHECK-NEXT:    retq
+; SSE2-LABEL: test_ne_2:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: test_ne_2:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE41-NEXT:    pextrd $1, %xmm0, %eax
+; SSE41-NEXT:    retq
   %cmp = icmp slt <4 x i32> %B, %A
   %sext = sext <4 x i1> %cmp to <4 x i32>
   %cmp1 = icmp ne <4 x i32> %sext, zeroinitializer
@@ -124,14 +161,20 @@
 }
 
 define i32 @test_le_2(<4 x i32> %A, <4 x i32> %B) {
-; CHECK-LABEL: test_le_2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pcmpgtd %xmm1, %xmm0
-; CHECK-NEXT:    pcmpeqd %xmm1, %xmm1
-; CHECK-NEXT:    pxor %xmm0, %xmm1
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; CHECK-NEXT:    movd %xmm0, %eax
-; CHECK-NEXT:    retq
+; SSE2-LABEL: test_le_2:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    notl %eax
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: test_le_2:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE41-NEXT:    pextrd $1, %xmm0, %eax
+; SSE41-NEXT:    notl %eax
+; SSE41-NEXT:    retq
   %cmp = icmp slt <4 x i32> %B, %A
   %sext = sext <4 x i1> %cmp to <4 x i32>
   %cmp1 = icmp sle <4 x i32> zeroinitializer, %sext
@@ -154,12 +197,18 @@
 }
 
 define i32 @test_lt_2(<4 x i32> %A, <4 x i32> %B) {
-; CHECK-LABEL: test_lt_2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pcmpgtd %xmm1, %xmm0
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; CHECK-NEXT:    movd %xmm0, %eax
-; CHECK-NEXT:    retq
+; SSE2-LABEL: test_lt_2:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: test_lt_2:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE41-NEXT:    pextrd $1, %xmm0, %eax
+; SSE41-NEXT:    retq
   %cmp = icmp slt <4 x i32> %B, %A
   %sext = sext <4 x i1> %cmp to <4 x i32>
   %cmp1 = icmp slt <4 x i32> zeroinitializer, %sext
@@ -169,12 +218,18 @@
 }
 
 define i32 @test_gt_2(<4 x i32> %A, <4 x i32> %B) {
-; CHECK-LABEL: test_gt_2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pcmpgtd %xmm1, %xmm0
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; CHECK-NEXT:    movd %xmm0, %eax
-; CHECK-NEXT:    retq
+; SSE2-LABEL: test_gt_2:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: test_gt_2:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE41-NEXT:    pextrd $1, %xmm0, %eax
+; SSE41-NEXT:    retq
   %cmp = icmp slt <4 x i32> %B, %A
   %sext = sext <4 x i1> %cmp to <4 x i32>
   %cmp1 = icmp sgt <4 x i32> zeroinitializer, %sext
diff --git a/test/CodeGen/X86/shift-double.ll b/test/CodeGen/X86/shift-double.ll
index b1b5f1c..fd555c4 100644
--- a/test/CodeGen/X86/shift-double.ll
+++ b/test/CodeGen/X86/shift-double.ll
@@ -454,3 +454,55 @@
   %sh = or i32 %sh_lo, %sh_hi
   ret i32 %sh
 }
+
+; PR34641 - Masked Shift Counts
+
+define i32 @shld_safe_i32(i32, i32, i32) {
+; X86-LABEL: shld_safe_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shldl %cl, %edx, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: shld_safe_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edx, %ecx
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shldl %cl, %esi, %eax
+; X64-NEXT:    retq
+  %4 = and i32 %2, 31
+  %5 = shl i32 %0, %4
+  %6 = sub i32 0, %2
+  %7 = and i32 %6, 31
+  %8 = lshr i32 %1, %7
+  %9 = or i32 %5, %8
+  ret i32 %9
+}
+
+define i32 @shrd_safe_i32(i32, i32, i32) {
+; X86-LABEL: shrd_safe_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shrdl %cl, %edx, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: shrd_safe_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edx, %ecx
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrdl %cl, %esi, %eax
+; X64-NEXT:    retq
+  %4 = and i32 %2, 31
+  %5 = lshr i32 %0, %4
+  %6 = sub i32 0, %2
+  %7 = and i32 %6, 31
+  %8 = shl i32 %1, %7
+  %9 = or i32 %5, %8
+  ret i32 %9
+}
diff --git a/test/CodeGen/X86/shrink_vmul-widen.ll b/test/CodeGen/X86/shrink_vmul-widen.ll
index 3c047da..d0fad23 100644
--- a/test/CodeGen/X86/shrink_vmul-widen.ll
+++ b/test/CodeGen/X86/shrink_vmul-widen.ll
@@ -2096,7 +2096,7 @@
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE-NEXT:    movdqa (%eax), %xmm5
 ; X86-SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X86-SSE-NEXT:    movdqa (%ecx), %xmm3
+; X86-SSE-NEXT:    movdqa (%ecx), %xmm2
 ; X86-SSE-NEXT:    movdqa 16(%ecx), %xmm6
 ; X86-SSE-NEXT:    pxor %xmm0, %xmm0
 ; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -2110,10 +2110,10 @@
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
 ; X86-SSE-NEXT:    movd %edx, %xmm0
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm5[2,3,0,1]
-; X86-SSE-NEXT:    movd %xmm2, %eax
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm6[2,3,0,1]
-; X86-SSE-NEXT:    movd %xmm2, %esi
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[2,3,0,1]
+; X86-SSE-NEXT:    movd %xmm3, %eax
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm6[2,3,0,1]
+; X86-SSE-NEXT:    movd %xmm3, %esi
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
 ; X86-SSE-NEXT:    movd %edx, %xmm7
@@ -2122,7 +2122,7 @@
 ; X86-SSE-NEXT:    movd %xmm6, %esi
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
-; X86-SSE-NEXT:    movd %edx, %xmm2
+; X86-SSE-NEXT:    movd %edx, %xmm3
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
 ; X86-SSE-NEXT:    movd %xmm5, %eax
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[1,1,2,3]
@@ -2130,60 +2130,57 @@
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
 ; X86-SSE-NEXT:    movd %edx, %xmm5
-; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm7[0]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[3,1,2,3]
 ; X86-SSE-NEXT:    movd %xmm6, %eax
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[3,1,2,3]
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[3,1,2,3]
 ; X86-SSE-NEXT:    movd %xmm6, %esi
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
 ; X86-SSE-NEXT:    movd %edx, %xmm6
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[2,3,0,1]
 ; X86-SSE-NEXT:    movd %xmm7, %eax
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[2,3,0,1]
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm2[2,3,0,1]
 ; X86-SSE-NEXT:    movd %xmm7, %esi
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
 ; X86-SSE-NEXT:    movd %edx, %xmm7
 ; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
 ; X86-SSE-NEXT:    movd %xmm4, %eax
-; X86-SSE-NEXT:    movd %xmm3, %esi
+; X86-SSE-NEXT:    movd %xmm2, %esi
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
 ; X86-SSE-NEXT:    movd %xmm4, %eax
 ; X86-SSE-NEXT:    movd %edx, %xmm4
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
-; X86-SSE-NEXT:    movd %xmm3, %esi
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; X86-SSE-NEXT:    movd %xmm2, %esi
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
-; X86-SSE-NEXT:    movd %edx, %xmm3
-; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; X86-SSE-NEXT:    movd %edx, %xmm2
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
 ; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm7[0]
 ; X86-SSE-NEXT:    movd %xmm1, %eax
-; X86-SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,0],xmm6[0,0]
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm6[0,0]
 ; X86-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [8199,8199,8199,8199]
 ; X86-SSE-NEXT:    pmuludq %xmm1, %xmm4
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; X86-SSE-NEXT:    pmuludq %xmm1, %xmm3
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; X86-SSE-NEXT:    pmuludq %xmm1, %xmm2
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
 ; X86-SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,0],xmm0[0,0]
+; X86-SSE-NEXT:    pmuludq %xmm1, %xmm3
+; X86-SSE-NEXT:    pmuludq %xmm1, %xmm5
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl 32(%ecx)
-; X86-SSE-NEXT:    pmuludq %xmm1, %xmm2
-; X86-SSE-NEXT:    pmuludq %xmm1, %xmm5
-; X86-SSE-NEXT:    movd %edx, %xmm0
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm5[0,2,2,3]
-; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X86-SSE-NEXT:    movl $8199, %eax # imm = 0x2007
-; X86-SSE-NEXT:    movd %eax, %xmm2
-; X86-SSE-NEXT:    pmuludq %xmm0, %xmm2
-; X86-SSE-NEXT:    movd %xmm2, (%eax)
-; X86-SSE-NEXT:    movdqa %xmm1, (%eax)
+; X86-SSE-NEXT:    movdqa %xmm0, (%eax)
 ; X86-SSE-NEXT:    movdqa %xmm4, (%eax)
+; X86-SSE-NEXT:    imull $8199, %edx, %eax # imm = 0x2007
+; X86-SSE-NEXT:    movl %eax, (%eax)
 ; X86-SSE-NEXT:    popl %esi
 ; X86-SSE-NEXT:    retl
 ;
@@ -2252,16 +2249,13 @@
 ; X86-AVX1-NEXT:    vpinsrd $1, (%esp), %xmm1, %xmm1 # 4-byte Folded Reload
 ; X86-AVX1-NEXT:    vpinsrd $2, {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 4-byte Folded Reload
 ; X86-AVX1-NEXT:    vpinsrd $3, {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 4-byte Folded Reload
-; X86-AVX1-NEXT:    vmovd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 4-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm2 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    movl $8199, %eax # imm = 0x2007
-; X86-AVX1-NEXT:    vmovd %eax, %xmm3
-; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [8199,8199,8199,8199]
-; X86-AVX1-NEXT:    vpmulld %xmm4, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpmulld %xmm4, %xmm1, %xmm1
+; X86-AVX1-NEXT:    imull $8199, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-AVX1-NEXT:    # imm = 0x2007
+; X86-AVX1-NEXT:    movl %eax, (%eax)
+; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [8199,8199,8199,8199]
+; X86-AVX1-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
 ; X86-AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; X86-AVX1-NEXT:    vpmulld %xmm3, %xmm2, %xmm1
-; X86-AVX1-NEXT:    vmovd %xmm1, (%eax)
 ; X86-AVX1-NEXT:    vmovaps %ymm0, (%eax)
 ; X86-AVX1-NEXT:    addl $16, %esp
 ; X86-AVX1-NEXT:    popl %esi
@@ -2328,14 +2322,11 @@
 ; X86-AVX2-NEXT:    vmovd %xmm0, %eax
 ; X86-AVX2-NEXT:    xorl %edx, %edx
 ; X86-AVX2-NEXT:    divl 32(%esi)
-; X86-AVX2-NEXT:    vmovd %edx, %xmm0
-; X86-AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [8199,8199,8199,8199,8199,8199,8199,8199]
-; X86-AVX2-NEXT:    vpmulld %ymm2, %ymm1, %ymm1
-; X86-AVX2-NEXT:    movl $8199, %eax # imm = 0x2007
-; X86-AVX2-NEXT:    vmovd %eax, %xmm2
-; X86-AVX2-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vmovd %xmm0, (%eax)
-; X86-AVX2-NEXT:    vmovdqa %ymm1, (%eax)
+; X86-AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [8199,8199,8199,8199,8199,8199,8199,8199]
+; X86-AVX2-NEXT:    vpmulld %ymm0, %ymm1, %ymm0
+; X86-AVX2-NEXT:    imull $8199, %edx, %eax # imm = 0x2007
+; X86-AVX2-NEXT:    movl %eax, (%eax)
+; X86-AVX2-NEXT:    vmovdqa %ymm0, (%eax)
 ; X86-AVX2-NEXT:    popl %esi
 ; X86-AVX2-NEXT:    popl %edi
 ; X86-AVX2-NEXT:    vzeroupper
@@ -2349,8 +2340,8 @@
 ; X64-SSE-NEXT:    movdqa 16(%rsi), %xmm6
 ; X64-SSE-NEXT:    pxor %xmm0, %xmm0
 ; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-SSE-NEXT:    movdqa %xmm5, %xmm4
-; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; X64-SSE-NEXT:    movdqa %xmm5, %xmm3
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
 ; X64-SSE-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[3,1,2,3]
 ; X64-SSE-NEXT:    movd %xmm0, %eax
@@ -2359,10 +2350,10 @@
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl %ecx
 ; X64-SSE-NEXT:    movd %edx, %xmm8
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[2,3,0,1]
-; X64-SSE-NEXT:    movd %xmm3, %eax
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm6[2,3,0,1]
-; X64-SSE-NEXT:    movd %xmm3, %ecx
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1]
+; X64-SSE-NEXT:    movd %xmm4, %eax
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm6[2,3,0,1]
+; X64-SSE-NEXT:    movd %xmm4, %ecx
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl %ecx
 ; X64-SSE-NEXT:    movd %edx, %xmm7
@@ -2371,7 +2362,7 @@
 ; X64-SSE-NEXT:    movd %xmm6, %ecx
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl %ecx
-; X64-SSE-NEXT:    movd %edx, %xmm3
+; X64-SSE-NEXT:    movd %edx, %xmm4
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
 ; X64-SSE-NEXT:    movd %xmm5, %eax
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[1,1,2,3]
@@ -2379,16 +2370,16 @@
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl %ecx
 ; X64-SSE-NEXT:    movd %edx, %xmm5
-; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[3,1,2,3]
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm7[0]
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[3,1,2,3]
 ; X64-SSE-NEXT:    movd %xmm6, %eax
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[3,1,2,3]
 ; X64-SSE-NEXT:    movd %xmm6, %ecx
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl %ecx
 ; X64-SSE-NEXT:    movd %edx, %xmm6
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[2,3,0,1]
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[2,3,0,1]
 ; X64-SSE-NEXT:    movd %xmm7, %eax
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm2[2,3,0,1]
 ; X64-SSE-NEXT:    movd %xmm7, %ecx
@@ -2396,13 +2387,13 @@
 ; X64-SSE-NEXT:    divl %ecx
 ; X64-SSE-NEXT:    movd %edx, %xmm7
 ; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; X64-SSE-NEXT:    movd %xmm4, %eax
+; X64-SSE-NEXT:    movd %xmm3, %eax
 ; X64-SSE-NEXT:    movd %xmm2, %ecx
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl %ecx
 ; X64-SSE-NEXT:    movd %edx, %xmm0
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
-; X64-SSE-NEXT:    movd %xmm4, %eax
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; X64-SSE-NEXT:    movd %xmm3, %eax
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
 ; X64-SSE-NEXT:    movd %xmm2, %ecx
 ; X64-SSE-NEXT:    xorl %edx, %edx
@@ -2413,24 +2404,21 @@
 ; X64-SSE-NEXT:    movd %xmm1, %eax
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl 32(%rsi)
-; X64-SSE-NEXT:    movd %edx, %xmm1
-; X64-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [8199,8199,8199,8199]
-; X64-SSE-NEXT:    pmuludq %xmm4, %xmm0
+; X64-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [8199,8199,8199,8199]
+; X64-SSE-NEXT:    pmuludq %xmm1, %xmm0
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm6[0,0]
-; X64-SSE-NEXT:    pmuludq %xmm4, %xmm2
+; X64-SSE-NEXT:    pmuludq %xmm1, %xmm2
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
 ; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X64-SSE-NEXT:    pmuludq %xmm4, %xmm3
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; X64-SSE-NEXT:    pmuludq %xmm1, %xmm4
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
 ; X64-SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,0],xmm8[0,0]
-; X64-SSE-NEXT:    pmuludq %xmm4, %xmm5
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[0,2,2,3]
-; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; X64-SSE-NEXT:    movl $8199, %eax # imm = 0x2007
-; X64-SSE-NEXT:    movd %eax, %xmm3
-; X64-SSE-NEXT:    pmuludq %xmm1, %xmm3
-; X64-SSE-NEXT:    movd %xmm3, (%rax)
+; X64-SSE-NEXT:    pmuludq %xmm1, %xmm5
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3]
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X64-SSE-NEXT:    imull $8199, %edx, %eax # imm = 0x2007
+; X64-SSE-NEXT:    movl %eax, (%rax)
 ; X64-SSE-NEXT:    movdqa %xmm2, (%rax)
 ; X64-SSE-NEXT:    movdqa %xmm0, (%rax)
 ; X64-SSE-NEXT:    retq
@@ -2499,11 +2487,8 @@
 ; X64-AVX1-NEXT:    vpinsrd $3, %r9d, %xmm2, %xmm2
 ; X64-AVX1-NEXT:    vpmulld %xmm1, %xmm2, %xmm1
 ; X64-AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; X64-AVX1-NEXT:    vmovd %r8d, %xmm1
-; X64-AVX1-NEXT:    movl $8199, %eax # imm = 0x2007
-; X64-AVX1-NEXT:    vmovd %eax, %xmm2
-; X64-AVX1-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
-; X64-AVX1-NEXT:    vmovd %xmm1, (%rax)
+; X64-AVX1-NEXT:    imull $8199, %r8d, %eax # imm = 0x2007
+; X64-AVX1-NEXT:    movl %eax, (%rax)
 ; X64-AVX1-NEXT:    vmovaps %ymm0, (%rax)
 ; X64-AVX1-NEXT:    popq %rbx
 ; X64-AVX1-NEXT:    popq %rbp
@@ -2563,14 +2548,11 @@
 ; X64-AVX2-NEXT:    vmovd %xmm0, %eax
 ; X64-AVX2-NEXT:    xorl %edx, %edx
 ; X64-AVX2-NEXT:    divl 32(%rsi)
-; X64-AVX2-NEXT:    vmovd %edx, %xmm0
-; X64-AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [8199,8199,8199,8199,8199,8199,8199,8199]
-; X64-AVX2-NEXT:    vpmulld %ymm2, %ymm1, %ymm1
-; X64-AVX2-NEXT:    movl $8199, %eax # imm = 0x2007
-; X64-AVX2-NEXT:    vmovd %eax, %xmm2
-; X64-AVX2-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vmovd %xmm0, (%rax)
-; X64-AVX2-NEXT:    vmovdqa %ymm1, (%rax)
+; X64-AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [8199,8199,8199,8199,8199,8199,8199,8199]
+; X64-AVX2-NEXT:    vpmulld %ymm0, %ymm1, %ymm0
+; X64-AVX2-NEXT:    imull $8199, %edx, %eax # imm = 0x2007
+; X64-AVX2-NEXT:    movl %eax, (%rax)
+; X64-AVX2-NEXT:    vmovdqa %ymm0, (%rax)
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
   %a0 = load <9 x i16>, <9 x i16>* %p0, align 64
diff --git a/test/CodeGen/X86/shrink_vmul.ll b/test/CodeGen/X86/shrink_vmul.ll
index 6e9dc60..8a8a396 100644
--- a/test/CodeGen/X86/shrink_vmul.ll
+++ b/test/CodeGen/X86/shrink_vmul.ll
@@ -44,7 +44,7 @@
 ; X86-AVX-NEXT:    movl c, %esi
 ; X86-AVX-NEXT:    vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; X86-AVX-NEXT:    vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; X86-AVX-NEXT:    vpmaddwd %xmm0, %xmm1, %xmm0
+; X86-AVX-NEXT:    vpmuludq %xmm0, %xmm1, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-AVX-NEXT:    vmovq %xmm0, (%esi,%ecx,4)
 ; X86-AVX-NEXT:    popl %esi
@@ -70,7 +70,7 @@
 ; X64-AVX-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-AVX-NEXT:    vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; X64-AVX-NEXT:    vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; X64-AVX-NEXT:    vpmaddwd %xmm0, %xmm1, %xmm0
+; X64-AVX-NEXT:    vpmuludq %xmm0, %xmm1, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rdx,4)
 ; X64-AVX-NEXT:    retq
@@ -916,7 +916,7 @@
 ; X86-AVX-NEXT:    movl c, %esi
 ; X86-AVX-NEXT:    vpmovsxbq (%edx,%ecx), %xmm0
 ; X86-AVX-NEXT:    vpmovsxbq (%eax,%ecx), %xmm1
-; X86-AVX-NEXT:    vpmulld %xmm0, %xmm1, %xmm0
+; X86-AVX-NEXT:    vpmuludq %xmm0, %xmm1, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-AVX-NEXT:    vmovq %xmm0, (%esi,%ecx,4)
 ; X86-AVX-NEXT:    popl %esi
@@ -944,7 +944,7 @@
 ; X64-AVX-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-AVX-NEXT:    vpmovsxbq (%rdi,%rdx), %xmm0
 ; X64-AVX-NEXT:    vpmovsxbq (%rsi,%rdx), %xmm1
-; X64-AVX-NEXT:    vpmulld %xmm0, %xmm1, %xmm0
+; X64-AVX-NEXT:    vpmuludq %xmm0, %xmm1, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rdx,4)
 ; X64-AVX-NEXT:    retq
@@ -1004,7 +1004,7 @@
 ; X86-AVX-NEXT:    movl c, %esi
 ; X86-AVX-NEXT:    vpmovsxbq (%edx,%ecx), %xmm0
 ; X86-AVX-NEXT:    vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; X86-AVX-NEXT:    vpmulld %xmm0, %xmm1, %xmm0
+; X86-AVX-NEXT:    vpmuludq %xmm0, %xmm1, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-AVX-NEXT:    vmovq %xmm0, (%esi,%ecx,4)
 ; X86-AVX-NEXT:    popl %esi
@@ -1033,7 +1033,7 @@
 ; X64-AVX-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-AVX-NEXT:    vpmovsxbq (%rdi,%rdx), %xmm0
 ; X64-AVX-NEXT:    vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; X64-AVX-NEXT:    vpmulld %xmm0, %xmm1, %xmm0
+; X64-AVX-NEXT:    vpmuludq %xmm0, %xmm1, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rdx,4)
 ; X64-AVX-NEXT:    retq
@@ -1087,7 +1087,7 @@
 ; X86-AVX-NEXT:    movl c, %esi
 ; X86-AVX-NEXT:    vpmovsxwq (%edx,%ecx), %xmm0
 ; X86-AVX-NEXT:    vpmovsxwq (%eax,%ecx), %xmm1
-; X86-AVX-NEXT:    vpmulld %xmm0, %xmm1, %xmm0
+; X86-AVX-NEXT:    vpmuludq %xmm0, %xmm1, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-AVX-NEXT:    vmovq %xmm0, (%esi,%ecx,4)
 ; X86-AVX-NEXT:    popl %esi
@@ -1110,7 +1110,7 @@
 ; X64-AVX-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-AVX-NEXT:    vpmovsxwq (%rdi,%rdx), %xmm0
 ; X64-AVX-NEXT:    vpmovsxwq (%rsi,%rdx), %xmm1
-; X64-AVX-NEXT:    vpmulld %xmm0, %xmm1, %xmm0
+; X64-AVX-NEXT:    vpmuludq %xmm0, %xmm1, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rdx,4)
 ; X64-AVX-NEXT:    retq
@@ -1169,9 +1169,8 @@
 ; X86-AVX-NEXT:    vpmovsxwq (%edx,%ecx), %xmm0
 ; X86-AVX-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X86-AVX-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; X86-AVX-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; X86-AVX-NEXT:    vpmulld %xmm0, %xmm1, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-AVX-NEXT:    vpmulld %xmm0, %xmm1, %xmm0
 ; X86-AVX-NEXT:    vmovq %xmm0, (%esi,%ecx,4)
 ; X86-AVX-NEXT:    popl %esi
 ; X86-AVX-NEXT:    retl
@@ -1198,9 +1197,8 @@
 ; X64-AVX-NEXT:    vpmovsxwq (%rdi,%rdx), %xmm0
 ; X64-AVX-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X64-AVX-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; X64-AVX-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; X64-AVX-NEXT:    vpmulld %xmm0, %xmm1, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-AVX-NEXT:    vpmulld %xmm0, %xmm1, %xmm0
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rdx,4)
 ; X64-AVX-NEXT:    retq
 entry:
@@ -1406,7 +1404,7 @@
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-AVX-NEXT:    movl c, %edx
 ; X86-AVX-NEXT:    vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; X86-AVX-NEXT:    vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpmuludq {{\.LCPI.*}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-AVX-NEXT:    vmovq %xmm0, (%edx,%eax,4)
 ; X86-AVX-NEXT:    retl
@@ -1430,7 +1428,7 @@
 ; X64-AVX-NEXT:    movl $255, %ecx
 ; X64-AVX-NEXT:    vmovq %rcx, %xmm1
 ; X64-AVX-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
-; X64-AVX-NEXT:    vpmaddwd %xmm1, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rsi,4)
 ; X64-AVX-NEXT:    retq
@@ -1474,7 +1472,7 @@
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-AVX-NEXT:    movl c, %edx
 ; X86-AVX-NEXT:    vpmovsxbq (%ecx,%eax), %xmm0
-; X86-AVX-NEXT:    vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpmuludq {{\.LCPI.*}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-AVX-NEXT:    vmovq %xmm0, (%edx,%eax,4)
 ; X86-AVX-NEXT:    retl
@@ -1496,7 +1494,7 @@
 ; X64-AVX:       # %bb.0: # %entry
 ; X64-AVX-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-AVX-NEXT:    vpmovsxbq (%rdi,%rsi), %xmm0
-; X64-AVX-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpmuludq {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rsi,4)
 ; X64-AVX-NEXT:    retq
@@ -1542,7 +1540,7 @@
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-AVX-NEXT:    movl c, %edx
 ; X86-AVX-NEXT:    vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; X86-AVX-NEXT:    vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpmuludq {{\.LCPI.*}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-AVX-NEXT:    vmovq %xmm0, (%edx,%eax,4)
 ; X86-AVX-NEXT:    retl
@@ -1569,7 +1567,7 @@
 ; X64-AVX-NEXT:    movl $256, %ecx # imm = 0x100
 ; X64-AVX-NEXT:    vmovq %rcx, %xmm1
 ; X64-AVX-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
-; X64-AVX-NEXT:    vpmaddwd %xmm1, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rsi,4)
 ; X64-AVX-NEXT:    retq
@@ -1615,7 +1613,7 @@
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-AVX-NEXT:    movl c, %edx
 ; X86-AVX-NEXT:    vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; X86-AVX-NEXT:    vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpmuludq {{\.LCPI.*}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-AVX-NEXT:    vmovq %xmm0, (%edx,%eax,4)
 ; X86-AVX-NEXT:    retl
@@ -1639,7 +1637,7 @@
 ; X64-AVX:       # %bb.0: # %entry
 ; X64-AVX-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-AVX-NEXT:    vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; X64-AVX-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpmuludq {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rsi,4)
 ; X64-AVX-NEXT:    retq
@@ -1685,7 +1683,7 @@
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-AVX-NEXT:    movl c, %edx
 ; X86-AVX-NEXT:    vpmovsxbq (%ecx,%eax), %xmm0
-; X86-AVX-NEXT:    vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpmuludq {{\.LCPI.*}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-AVX-NEXT:    vmovq %xmm0, (%edx,%eax,4)
 ; X86-AVX-NEXT:    retl
@@ -1709,7 +1707,7 @@
 ; X64-AVX:       # %bb.0: # %entry
 ; X64-AVX-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-AVX-NEXT:    vpmovsxbq (%rdi,%rsi), %xmm0
-; X64-AVX-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpmuludq {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rsi,4)
 ; X64-AVX-NEXT:    retq
@@ -1755,7 +1753,7 @@
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-AVX-NEXT:    movl c, %edx
 ; X86-AVX-NEXT:    vpmovsxbq (%ecx,%eax), %xmm0
-; X86-AVX-NEXT:    vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpmuludq {{\.LCPI.*}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-AVX-NEXT:    vmovq %xmm0, (%edx,%eax,4)
 ; X86-AVX-NEXT:    retl
@@ -1779,7 +1777,7 @@
 ; X64-AVX:       # %bb.0: # %entry
 ; X64-AVX-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-AVX-NEXT:    vpmovsxbq (%rdi,%rsi), %xmm0
-; X64-AVX-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpmuludq {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rsi,4)
 ; X64-AVX-NEXT:    retq
@@ -1823,9 +1821,7 @@
 ; X86-AVX-NEXT:    movl c, %edx
 ; X86-AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X86-AVX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; X86-AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; X86-AVX-NEXT:    vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-AVX-NEXT:    vmovq %xmm0, (%edx,%eax,4)
 ; X86-AVX-NEXT:    retl
 ;
@@ -1846,12 +1842,11 @@
 ; X64-AVX-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-AVX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; X64-AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; X64-AVX-NEXT:    movl $65535, %ecx # imm = 0xFFFF
 ; X64-AVX-NEXT:    vmovq %rcx, %xmm1
 ; X64-AVX-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
+; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; X64-AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rsi,4)
 ; X64-AVX-NEXT:    retq
 entry:
@@ -1893,7 +1888,7 @@
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-AVX-NEXT:    movl c, %edx
 ; X86-AVX-NEXT:    vpmovsxwq (%ecx,%eax), %xmm0
-; X86-AVX-NEXT:    vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpmuludq {{\.LCPI.*}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-AVX-NEXT:    vmovq %xmm0, (%edx,%eax,4)
 ; X86-AVX-NEXT:    retl
@@ -1914,7 +1909,7 @@
 ; X64-AVX:       # %bb.0: # %entry
 ; X64-AVX-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-AVX-NEXT:    vpmovsxwq (%rdi,%rsi), %xmm0
-; X64-AVX-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpmuludq {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rsi,4)
 ; X64-AVX-NEXT:    retq
@@ -1958,9 +1953,7 @@
 ; X86-AVX-NEXT:    movl c, %edx
 ; X86-AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X86-AVX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; X86-AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; X86-AVX-NEXT:    vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-AVX-NEXT:    vmovq %xmm0, (%edx,%eax,4)
 ; X86-AVX-NEXT:    retl
 ;
@@ -1984,12 +1977,11 @@
 ; X64-AVX-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-AVX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; X64-AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; X64-AVX-NEXT:    movl $65536, %ecx # imm = 0x10000
 ; X64-AVX-NEXT:    vmovq %rcx, %xmm1
 ; X64-AVX-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
+; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; X64-AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rsi,4)
 ; X64-AVX-NEXT:    retq
 entry:
@@ -2031,7 +2023,7 @@
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-AVX-NEXT:    movl c, %edx
 ; X86-AVX-NEXT:    vpmovsxwq (%ecx,%eax), %xmm0
-; X86-AVX-NEXT:    vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpmuludq {{\.LCPI.*}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-AVX-NEXT:    vmovq %xmm0, (%edx,%eax,4)
 ; X86-AVX-NEXT:    retl
@@ -2058,7 +2050,7 @@
 ; X64-AVX-NEXT:    movl $32768, %ecx # imm = 0x8000
 ; X64-AVX-NEXT:    vmovq %rcx, %xmm1
 ; X64-AVX-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
-; X64-AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-AVX-NEXT:    vmovq %xmm0, (%rax,%rsi,4)
 ; X64-AVX-NEXT:    retq
@@ -2087,7 +2079,7 @@
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE-NEXT:    movdqa (%eax), %xmm5
 ; X86-SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X86-SSE-NEXT:    movdqa (%ecx), %xmm3
+; X86-SSE-NEXT:    movdqa (%ecx), %xmm2
 ; X86-SSE-NEXT:    movdqa 16(%ecx), %xmm6
 ; X86-SSE-NEXT:    pxor %xmm0, %xmm0
 ; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -2101,10 +2093,10 @@
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
 ; X86-SSE-NEXT:    movd %edx, %xmm0
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm5[2,3,0,1]
-; X86-SSE-NEXT:    movd %xmm2, %eax
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm6[2,3,0,1]
-; X86-SSE-NEXT:    movd %xmm2, %esi
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[2,3,0,1]
+; X86-SSE-NEXT:    movd %xmm3, %eax
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm6[2,3,0,1]
+; X86-SSE-NEXT:    movd %xmm3, %esi
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
 ; X86-SSE-NEXT:    movd %edx, %xmm7
@@ -2113,7 +2105,7 @@
 ; X86-SSE-NEXT:    movd %xmm6, %esi
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
-; X86-SSE-NEXT:    movd %edx, %xmm2
+; X86-SSE-NEXT:    movd %edx, %xmm3
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
 ; X86-SSE-NEXT:    movd %xmm5, %eax
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[1,1,2,3]
@@ -2121,60 +2113,57 @@
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
 ; X86-SSE-NEXT:    movd %edx, %xmm5
-; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm7[0]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[3,1,2,3]
 ; X86-SSE-NEXT:    movd %xmm6, %eax
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[3,1,2,3]
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[3,1,2,3]
 ; X86-SSE-NEXT:    movd %xmm6, %esi
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
 ; X86-SSE-NEXT:    movd %edx, %xmm6
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[2,3,0,1]
 ; X86-SSE-NEXT:    movd %xmm7, %eax
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[2,3,0,1]
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm2[2,3,0,1]
 ; X86-SSE-NEXT:    movd %xmm7, %esi
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
 ; X86-SSE-NEXT:    movd %edx, %xmm7
 ; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
 ; X86-SSE-NEXT:    movd %xmm4, %eax
-; X86-SSE-NEXT:    movd %xmm3, %esi
+; X86-SSE-NEXT:    movd %xmm2, %esi
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
 ; X86-SSE-NEXT:    movd %xmm4, %eax
 ; X86-SSE-NEXT:    movd %edx, %xmm4
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
-; X86-SSE-NEXT:    movd %xmm3, %esi
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; X86-SSE-NEXT:    movd %xmm2, %esi
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl %esi
-; X86-SSE-NEXT:    movd %edx, %xmm3
-; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; X86-SSE-NEXT:    movd %edx, %xmm2
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
 ; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm7[0]
 ; X86-SSE-NEXT:    movd %xmm1, %eax
-; X86-SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,0],xmm6[0,0]
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm6[0,0]
 ; X86-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [8199,8199,8199,8199]
 ; X86-SSE-NEXT:    pmuludq %xmm1, %xmm4
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; X86-SSE-NEXT:    pmuludq %xmm1, %xmm3
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; X86-SSE-NEXT:    pmuludq %xmm1, %xmm2
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
 ; X86-SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,0],xmm0[0,0]
+; X86-SSE-NEXT:    pmuludq %xmm1, %xmm3
+; X86-SSE-NEXT:    pmuludq %xmm1, %xmm5
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; X86-SSE-NEXT:    xorl %edx, %edx
 ; X86-SSE-NEXT:    divl 32(%ecx)
-; X86-SSE-NEXT:    pmuludq %xmm1, %xmm2
-; X86-SSE-NEXT:    pmuludq %xmm1, %xmm5
-; X86-SSE-NEXT:    movd %edx, %xmm0
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm5[0,2,2,3]
-; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X86-SSE-NEXT:    movl $8199, %eax # imm = 0x2007
-; X86-SSE-NEXT:    movd %eax, %xmm2
-; X86-SSE-NEXT:    pmuludq %xmm0, %xmm2
-; X86-SSE-NEXT:    movd %xmm2, (%eax)
-; X86-SSE-NEXT:    movdqa %xmm1, (%eax)
+; X86-SSE-NEXT:    movdqa %xmm0, (%eax)
 ; X86-SSE-NEXT:    movdqa %xmm4, (%eax)
+; X86-SSE-NEXT:    imull $8199, %edx, %eax # imm = 0x2007
+; X86-SSE-NEXT:    movl %eax, (%eax)
 ; X86-SSE-NEXT:    popl %esi
 ; X86-SSE-NEXT:    retl
 ;
@@ -2243,16 +2232,13 @@
 ; X86-AVX1-NEXT:    vpinsrd $1, (%esp), %xmm1, %xmm1 # 4-byte Folded Reload
 ; X86-AVX1-NEXT:    vpinsrd $2, {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 4-byte Folded Reload
 ; X86-AVX1-NEXT:    vpinsrd $3, {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 4-byte Folded Reload
-; X86-AVX1-NEXT:    vmovd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 4-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm2 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    movl $8199, %eax # imm = 0x2007
-; X86-AVX1-NEXT:    vmovd %eax, %xmm3
-; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [8199,8199,8199,8199]
-; X86-AVX1-NEXT:    vpmulld %xmm4, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpmulld %xmm4, %xmm1, %xmm1
+; X86-AVX1-NEXT:    imull $8199, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-AVX1-NEXT:    # imm = 0x2007
+; X86-AVX1-NEXT:    movl %eax, (%eax)
+; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [8199,8199,8199,8199]
+; X86-AVX1-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
 ; X86-AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; X86-AVX1-NEXT:    vpmulld %xmm3, %xmm2, %xmm1
-; X86-AVX1-NEXT:    vmovd %xmm1, (%eax)
 ; X86-AVX1-NEXT:    vmovaps %ymm0, (%eax)
 ; X86-AVX1-NEXT:    addl $16, %esp
 ; X86-AVX1-NEXT:    popl %esi
@@ -2319,14 +2305,11 @@
 ; X86-AVX2-NEXT:    vmovd %xmm0, %eax
 ; X86-AVX2-NEXT:    xorl %edx, %edx
 ; X86-AVX2-NEXT:    divl 32(%esi)
-; X86-AVX2-NEXT:    vmovd %edx, %xmm0
-; X86-AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [8199,8199,8199,8199,8199,8199,8199,8199]
-; X86-AVX2-NEXT:    vpmulld %ymm2, %ymm1, %ymm1
-; X86-AVX2-NEXT:    movl $8199, %eax # imm = 0x2007
-; X86-AVX2-NEXT:    vmovd %eax, %xmm2
-; X86-AVX2-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
-; X86-AVX2-NEXT:    vmovd %xmm0, (%eax)
-; X86-AVX2-NEXT:    vmovdqa %ymm1, (%eax)
+; X86-AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [8199,8199,8199,8199,8199,8199,8199,8199]
+; X86-AVX2-NEXT:    vpmulld %ymm0, %ymm1, %ymm0
+; X86-AVX2-NEXT:    imull $8199, %edx, %eax # imm = 0x2007
+; X86-AVX2-NEXT:    movl %eax, (%eax)
+; X86-AVX2-NEXT:    vmovdqa %ymm0, (%eax)
 ; X86-AVX2-NEXT:    popl %esi
 ; X86-AVX2-NEXT:    popl %edi
 ; X86-AVX2-NEXT:    vzeroupper
@@ -2340,8 +2323,8 @@
 ; X64-SSE-NEXT:    movdqa 16(%rsi), %xmm6
 ; X64-SSE-NEXT:    pxor %xmm0, %xmm0
 ; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-SSE-NEXT:    movdqa %xmm5, %xmm4
-; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; X64-SSE-NEXT:    movdqa %xmm5, %xmm3
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
 ; X64-SSE-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[3,1,2,3]
 ; X64-SSE-NEXT:    movd %xmm0, %eax
@@ -2350,10 +2333,10 @@
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl %ecx
 ; X64-SSE-NEXT:    movd %edx, %xmm8
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[2,3,0,1]
-; X64-SSE-NEXT:    movd %xmm3, %eax
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm6[2,3,0,1]
-; X64-SSE-NEXT:    movd %xmm3, %ecx
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1]
+; X64-SSE-NEXT:    movd %xmm4, %eax
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm6[2,3,0,1]
+; X64-SSE-NEXT:    movd %xmm4, %ecx
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl %ecx
 ; X64-SSE-NEXT:    movd %edx, %xmm7
@@ -2362,7 +2345,7 @@
 ; X64-SSE-NEXT:    movd %xmm6, %ecx
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl %ecx
-; X64-SSE-NEXT:    movd %edx, %xmm3
+; X64-SSE-NEXT:    movd %edx, %xmm4
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
 ; X64-SSE-NEXT:    movd %xmm5, %eax
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[1,1,2,3]
@@ -2370,16 +2353,16 @@
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl %ecx
 ; X64-SSE-NEXT:    movd %edx, %xmm5
-; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[3,1,2,3]
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm7[0]
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[3,1,2,3]
 ; X64-SSE-NEXT:    movd %xmm6, %eax
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[3,1,2,3]
 ; X64-SSE-NEXT:    movd %xmm6, %ecx
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl %ecx
 ; X64-SSE-NEXT:    movd %edx, %xmm6
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[2,3,0,1]
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[2,3,0,1]
 ; X64-SSE-NEXT:    movd %xmm7, %eax
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm2[2,3,0,1]
 ; X64-SSE-NEXT:    movd %xmm7, %ecx
@@ -2387,13 +2370,13 @@
 ; X64-SSE-NEXT:    divl %ecx
 ; X64-SSE-NEXT:    movd %edx, %xmm7
 ; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; X64-SSE-NEXT:    movd %xmm4, %eax
+; X64-SSE-NEXT:    movd %xmm3, %eax
 ; X64-SSE-NEXT:    movd %xmm2, %ecx
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl %ecx
 ; X64-SSE-NEXT:    movd %edx, %xmm0
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
-; X64-SSE-NEXT:    movd %xmm4, %eax
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; X64-SSE-NEXT:    movd %xmm3, %eax
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
 ; X64-SSE-NEXT:    movd %xmm2, %ecx
 ; X64-SSE-NEXT:    xorl %edx, %edx
@@ -2404,24 +2387,21 @@
 ; X64-SSE-NEXT:    movd %xmm1, %eax
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl 32(%rsi)
-; X64-SSE-NEXT:    movd %edx, %xmm1
-; X64-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [8199,8199,8199,8199]
-; X64-SSE-NEXT:    pmuludq %xmm4, %xmm0
+; X64-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [8199,8199,8199,8199]
+; X64-SSE-NEXT:    pmuludq %xmm1, %xmm0
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm6[0,0]
-; X64-SSE-NEXT:    pmuludq %xmm4, %xmm2
+; X64-SSE-NEXT:    pmuludq %xmm1, %xmm2
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
 ; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X64-SSE-NEXT:    pmuludq %xmm4, %xmm3
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; X64-SSE-NEXT:    pmuludq %xmm1, %xmm4
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
 ; X64-SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,0],xmm8[0,0]
-; X64-SSE-NEXT:    pmuludq %xmm4, %xmm5
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[0,2,2,3]
-; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; X64-SSE-NEXT:    movl $8199, %eax # imm = 0x2007
-; X64-SSE-NEXT:    movd %eax, %xmm3
-; X64-SSE-NEXT:    pmuludq %xmm1, %xmm3
-; X64-SSE-NEXT:    movd %xmm3, (%rax)
+; X64-SSE-NEXT:    pmuludq %xmm1, %xmm5
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3]
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X64-SSE-NEXT:    imull $8199, %edx, %eax # imm = 0x2007
+; X64-SSE-NEXT:    movl %eax, (%rax)
 ; X64-SSE-NEXT:    movdqa %xmm2, (%rax)
 ; X64-SSE-NEXT:    movdqa %xmm0, (%rax)
 ; X64-SSE-NEXT:    retq
@@ -2490,11 +2470,8 @@
 ; X64-AVX1-NEXT:    vpinsrd $3, %r9d, %xmm2, %xmm2
 ; X64-AVX1-NEXT:    vpmulld %xmm1, %xmm2, %xmm1
 ; X64-AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; X64-AVX1-NEXT:    vmovd %r8d, %xmm1
-; X64-AVX1-NEXT:    movl $8199, %eax # imm = 0x2007
-; X64-AVX1-NEXT:    vmovd %eax, %xmm2
-; X64-AVX1-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
-; X64-AVX1-NEXT:    vmovd %xmm1, (%rax)
+; X64-AVX1-NEXT:    imull $8199, %r8d, %eax # imm = 0x2007
+; X64-AVX1-NEXT:    movl %eax, (%rax)
 ; X64-AVX1-NEXT:    vmovaps %ymm0, (%rax)
 ; X64-AVX1-NEXT:    popq %rbx
 ; X64-AVX1-NEXT:    popq %rbp
@@ -2554,14 +2531,11 @@
 ; X64-AVX2-NEXT:    vmovd %xmm0, %eax
 ; X64-AVX2-NEXT:    xorl %edx, %edx
 ; X64-AVX2-NEXT:    divl 32(%rsi)
-; X64-AVX2-NEXT:    vmovd %edx, %xmm0
-; X64-AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [8199,8199,8199,8199,8199,8199,8199,8199]
-; X64-AVX2-NEXT:    vpmulld %ymm2, %ymm1, %ymm1
-; X64-AVX2-NEXT:    movl $8199, %eax # imm = 0x2007
-; X64-AVX2-NEXT:    vmovd %eax, %xmm2
-; X64-AVX2-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vmovd %xmm0, (%rax)
-; X64-AVX2-NEXT:    vmovdqa %ymm1, (%rax)
+; X64-AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [8199,8199,8199,8199,8199,8199,8199,8199]
+; X64-AVX2-NEXT:    vpmulld %ymm0, %ymm1, %ymm0
+; X64-AVX2-NEXT:    imull $8199, %edx, %eax # imm = 0x2007
+; X64-AVX2-NEXT:    movl %eax, (%rax)
+; X64-AVX2-NEXT:    vmovdqa %ymm0, (%rax)
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
   %a0 = load <9 x i16>, <9 x i16>* %p0, align 64
diff --git a/test/CodeGen/X86/sibcall-2.ll b/test/CodeGen/X86/sibcall-2.ll
index 6ed7b5a..d2b78aa 100644
--- a/test/CodeGen/X86/sibcall-2.ll
+++ b/test/CodeGen/X86/sibcall-2.ll
@@ -1,5 +1,5 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=i386-apple-darwin   -disable-fp-elim | FileCheck %s -check-prefix=32
-; RUN: llc -verify-machineinstrs < %s -mtriple=x86_64-apple-darwin -disable-fp-elim | FileCheck %s -check-prefix=64
+; RUN: llc -verify-machineinstrs < %s -mtriple=i386-apple-darwin   -frame-pointer=all | FileCheck %s -check-prefix=32
+; RUN: llc -verify-machineinstrs < %s -mtriple=x86_64-apple-darwin -frame-pointer=all | FileCheck %s -check-prefix=64
 
 ; Tail call should not use ebp / rbp after it's popped. Use esp / rsp.
 
diff --git a/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
index 23d0d66..7fd3dc5 100644
--- a/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
@@ -151,11 +151,11 @@
 ; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
-  %res = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %arg0, <16 x i8> %arg1)
+  %res = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
   %bc = bitcast <16 x i8> %res to <2 x i64>
   ret <2 x i64> %bc
 }
-declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_adds_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; SSE-LABEL: test_mm_adds_epi16:
@@ -174,11 +174,11 @@
 ; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
-  %res = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %arg0, <8 x i16> %arg1)
+  %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
   %bc = bitcast <8 x i16> %res to <2 x i64>
   ret <2 x i64> %bc
 }
-declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_adds_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; SSE-LABEL: test_mm_adds_epu8:
@@ -197,12 +197,11 @@
 ; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
-  %1 = add <16 x i8> %arg0, %arg1
-  %2 = icmp ugt <16 x i8> %arg0, %1
-  %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
-  %bc = bitcast <16 x i8> %3 to <2 x i64>
+  %res = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
+  %bc = bitcast <16 x i8> %res to <2 x i64>
   ret <2 x i64> %bc
 }
+declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
 
 define <2 x i64> @test_mm_adds_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; SSE-LABEL: test_mm_adds_epu16:
@@ -221,12 +220,11 @@
 ; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
-  %1 = add <8 x i16> %arg0, %arg1
-  %2 = icmp ugt <8 x i16> %arg0, %1
-  %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
-  %bc = bitcast <8 x i16> %3 to <2 x i64>
+  %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
+  %bc = bitcast <8 x i16> %res to <2 x i64>
   ret <2 x i64> %bc
 }
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
 
 define <2 x double> @test_mm_and_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; SSE-LABEL: test_mm_and_pd:
@@ -2760,23 +2758,13 @@
 declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
 
 define <2 x i64> @test_mm_mul_epu32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X86-SSE-LABEL: test_mm_mul_epu32:
-; X86-SSE:       # %bb.0:
-; X86-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
-; X86-SSE-NEXT:    # encoding: [0x66,0x0f,0x6f,0x15,A,A,A,A]
-; X86-SSE-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-SSE-NEXT:    pand %xmm2, %xmm0 # encoding: [0x66,0x0f,0xdb,0xc2]
-; X86-SSE-NEXT:    pand %xmm2, %xmm1 # encoding: [0x66,0x0f,0xdb,0xca]
-; X86-SSE-NEXT:    pmuludq %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf4,0xc1]
-; X86-SSE-NEXT:    retl # encoding: [0xc3]
+; SSE-LABEL: test_mm_mul_epu32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pmuludq %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf4,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX1-LABEL: test_mm_mul_epu32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # encoding: [0xc5,0xe9,0xef,0xd2]
-; AVX1-NEXT:    vpblendw $204, %xmm2, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0e,0xc2,0xcc]
-; AVX1-NEXT:    # xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; AVX1-NEXT:    vpblendw $204, %xmm2, %xmm1, %xmm1 # encoding: [0xc4,0xe3,0x71,0x0e,0xca,0xcc]
-; AVX1-NEXT:    # xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
 ; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf4,0xc1]
 ; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
@@ -2789,16 +2777,6 @@
 ; AVX512-NEXT:    # xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
 ; AVX512-NEXT:    vpmullq %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x40,0xc1]
 ; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
-;
-; X64-SSE-LABEL: test_mm_mul_epu32:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
-; X64-SSE-NEXT:    # encoding: [0x66,0x0f,0x6f,0x15,A,A,A,A]
-; X64-SSE-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
-; X64-SSE-NEXT:    pand %xmm2, %xmm0 # encoding: [0x66,0x0f,0xdb,0xc2]
-; X64-SSE-NEXT:    pand %xmm2, %xmm1 # encoding: [0x66,0x0f,0xdb,0xca]
-; X64-SSE-NEXT:    pmuludq %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf4,0xc1]
-; X64-SSE-NEXT:    retq # encoding: [0xc3]
   %A = and <2 x i64> %a0, <i64 4294967295, i64 4294967295>
   %B = and <2 x i64> %a1, <i64 4294967295, i64 4294967295>
   %res = mul nuw <2 x i64> %A, %B
@@ -3981,12 +3959,11 @@
 ;
 ; X86-AVX1-LABEL: test_mm_set1_epi64x:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
-; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
-; X86-AVX1-NEXT:    vmovd %ecx, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc1]
-; X86-AVX1-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc0,0x01]
-; X86-AVX1-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc1,0x02]
-; X86-AVX1-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc0,0x03]
+; X86-AVX1-NEXT:    vmovd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
+; X86-AVX1-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
+; X86-AVX1-NEXT:    vpshufd $68, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x44]
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0,1,0,1]
 ; X86-AVX1-NEXT:    retl # encoding: [0xc3]
 ;
 ; X86-AVX512-LABEL: test_mm_set1_epi64x:
@@ -6176,11 +6153,11 @@
 ; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
-  %res = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %arg0, <16 x i8> %arg1)
+  %res = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
   %bc = bitcast <16 x i8> %res to <2 x i64>
   ret <2 x i64> %bc
 }
-declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_subs_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; SSE-LABEL: test_mm_subs_epi16:
@@ -6199,85 +6176,57 @@
 ; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
-  %res = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %arg0, <8 x i16> %arg1)
+  %res = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
   %bc = bitcast <8 x i16> %res to <2 x i64>
   ret <2 x i64> %bc
 }
-declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_subs_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; SSE-LABEL: test_mm_subs_epu8:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pmaxub %xmm1, %xmm0 # encoding: [0x66,0x0f,0xde,0xc1]
-; SSE-NEXT:    psubb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf8,0xc1]
+; SSE-NEXT:    psubusb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd8,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX1-LABEL: test_mm_subs_epu8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xde,0xc1]
-; AVX1-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf8,0xc1]
+; AVX1-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd8,0xc1]
 ; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512-LABEL: test_mm_subs_epu8:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xde,0xc1]
-; AVX512-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf8,0xc1]
+; AVX512-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
 ; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
-  %cmp = icmp ugt <16 x i8> %arg0, %arg1
-  %sel = select <16 x i1> %cmp, <16 x i8> %arg0, <16 x i8> %arg1
-  %sub = sub <16 x i8> %sel, %arg1
-  %bc = bitcast <16 x i8> %sub to <2 x i64>
+  %res = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
+  %bc = bitcast <16 x i8> %res to <2 x i64>
   ret <2 x i64> %bc
 }
+declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
 
 define <2 x i64> @test_mm_subs_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; X86-SSE-LABEL: test_mm_subs_epu16:
-; X86-SSE:       # %bb.0:
-; X86-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X86-SSE-NEXT:    # encoding: [0x66,0x0f,0x6f,0x15,A,A,A,A]
-; X86-SSE-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-SSE-NEXT:    movdqa %xmm1, %xmm3 # encoding: [0x66,0x0f,0x6f,0xd9]
-; X86-SSE-NEXT:    pxor %xmm2, %xmm3 # encoding: [0x66,0x0f,0xef,0xda]
-; X86-SSE-NEXT:    pxor %xmm2, %xmm0 # encoding: [0x66,0x0f,0xef,0xc2]
-; X86-SSE-NEXT:    pmaxsw %xmm3, %xmm0 # encoding: [0x66,0x0f,0xee,0xc3]
-; X86-SSE-NEXT:    pxor %xmm2, %xmm0 # encoding: [0x66,0x0f,0xef,0xc2]
-; X86-SSE-NEXT:    psubw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf9,0xc1]
-; X86-SSE-NEXT:    retl # encoding: [0xc3]
+; SSE-LABEL: test_mm_subs_epu16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubusw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd9,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX1-LABEL: test_mm_subs_epu16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
-; AVX1-NEXT:    vpsubw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf9,0xc1]
+; AVX1-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd9,0xc1]
 ; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512-LABEL: test_mm_subs_epu16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
-; AVX512-NEXT:    vpsubw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf9,0xc1]
+; AVX512-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
 ; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
-;
-; X64-SSE-LABEL: test_mm_subs_epu16:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X64-SSE-NEXT:    # encoding: [0x66,0x0f,0x6f,0x15,A,A,A,A]
-; X64-SSE-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
-; X64-SSE-NEXT:    movdqa %xmm1, %xmm3 # encoding: [0x66,0x0f,0x6f,0xd9]
-; X64-SSE-NEXT:    pxor %xmm2, %xmm3 # encoding: [0x66,0x0f,0xef,0xda]
-; X64-SSE-NEXT:    pxor %xmm2, %xmm0 # encoding: [0x66,0x0f,0xef,0xc2]
-; X64-SSE-NEXT:    pmaxsw %xmm3, %xmm0 # encoding: [0x66,0x0f,0xee,0xc3]
-; X64-SSE-NEXT:    pxor %xmm2, %xmm0 # encoding: [0x66,0x0f,0xef,0xc2]
-; X64-SSE-NEXT:    psubw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf9,0xc1]
-; X64-SSE-NEXT:    retq # encoding: [0xc3]
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
-  %cmp = icmp ugt <8 x i16> %arg0, %arg1
-  %sel = select <8 x i1> %cmp, <8 x i16> %arg0, <8 x i16> %arg1
-  %sub = sub <8 x i16> %sel, %arg1
-  %bc = bitcast <8 x i16> %sub to <2 x i64>
+  %res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
+  %bc = bitcast <8 x i16> %res to <2 x i64>
   ret <2 x i64> %bc
 }
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
 
 define i32 @test_mm_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; SSE-LABEL: test_mm_ucomieq_sd:
diff --git a/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
index 7216f19..7e4703f 100644
--- a/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
@@ -884,6 +884,48 @@
 declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
 
 
+define <16 x i8> @test_x86_sse2_padds_b(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: test_x86_sse2_padds_b:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    paddsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xec,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_padds_b:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xec,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_padds_b:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+  %res = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
+  ret <16 x i8> %res
+}
+declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+
+
+define <8 x i16> @test_x86_sse2_padds_w(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: test_x86_sse2_padds_w:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    paddsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xed,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_padds_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xed,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_padds_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+  %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
+  ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+
+
 define <16 x i8> @test_x86_sse2_paddus_b(<16 x i8> %a0, <16 x i8> %a1) {
 ; SSE-LABEL: test_x86_sse2_paddus_b:
 ; SSE:       ## %bb.0:
@@ -926,6 +968,48 @@
 declare <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 
+define <16 x i8> @test_x86_sse2_psubs_b(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: test_x86_sse2_psubs_b:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    psubsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe8,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_psubs_b:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe8,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psubs_b:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+  %res = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
+  ret <16 x i8> %res
+}
+declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+
+
+define <8 x i16> @test_x86_sse2_psubs_w(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: test_x86_sse2_psubs_w:
+; SSE:       ## %bb.0:
+; SSE-NEXT:    psubsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe9,0xc1]
+; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX1-LABEL: test_x86_sse2_psubs_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe9,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psubs_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+  %res = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
+  ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+
+
 define <16 x i8> @test_x86_sse2_psubus_b(<16 x i8> %a0, <16 x i8> %a1) {
 ; SSE-LABEL: test_x86_sse2_psubus_b:
 ; SSE:       ## %bb.0:
diff --git a/test/CodeGen/X86/sse2-intrinsics-x86.ll b/test/CodeGen/X86/sse2-intrinsics-x86.ll
index 8dedce5..f8a9074 100644
--- a/test/CodeGen/X86/sse2-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-x86.ll
@@ -919,48 +919,6 @@
 }
 
 
-define <16 x i8> @test_x86_sse2_padds_b(<16 x i8> %a0, <16 x i8> %a1) {
-; SSE-LABEL: test_x86_sse2_padds_b:
-; SSE:       ## %bb.0:
-; SSE-NEXT:    paddsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xec,0xc1]
-; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
-; AVX1-LABEL: test_x86_sse2_padds_b:
-; AVX1:       ## %bb.0:
-; AVX1-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xec,0xc1]
-; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
-; AVX512-LABEL: test_x86_sse2_padds_b:
-; AVX512:       ## %bb.0:
-; AVX512-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0xc1]
-; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-  %res = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
-  ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_padds_w(<8 x i16> %a0, <8 x i16> %a1) {
-; SSE-LABEL: test_x86_sse2_padds_w:
-; SSE:       ## %bb.0:
-; SSE-NEXT:    paddsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xed,0xc1]
-; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
-; AVX1-LABEL: test_x86_sse2_padds_w:
-; AVX1:       ## %bb.0:
-; AVX1-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xed,0xc1]
-; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
-; AVX512-LABEL: test_x86_sse2_padds_w:
-; AVX512:       ## %bb.0:
-; AVX512-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0xc1]
-; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-  %res = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
-  ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-
 define <4 x i32> @test_x86_sse2_pmadd_wd(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_pmadd_wd:
 ; SSE:       ## %bb.0:
@@ -1520,48 +1478,6 @@
 declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) nounwind readnone
 
 
-define <16 x i8> @test_x86_sse2_psubs_b(<16 x i8> %a0, <16 x i8> %a1) {
-; SSE-LABEL: test_x86_sse2_psubs_b:
-; SSE:       ## %bb.0:
-; SSE-NEXT:    psubsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe8,0xc1]
-; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
-; AVX1-LABEL: test_x86_sse2_psubs_b:
-; AVX1:       ## %bb.0:
-; AVX1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe8,0xc1]
-; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
-; AVX512-LABEL: test_x86_sse2_psubs_b:
-; AVX512:       ## %bb.0:
-; AVX512-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0xc1]
-; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-  %res = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
-  ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_psubs_w(<8 x i16> %a0, <8 x i16> %a1) {
-; SSE-LABEL: test_x86_sse2_psubs_w:
-; SSE:       ## %bb.0:
-; SSE-NEXT:    psubsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe9,0xc1]
-; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
-; AVX1-LABEL: test_x86_sse2_psubs_w:
-; AVX1:       ## %bb.0:
-; AVX1-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe9,0xc1]
-; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
-; AVX512-LABEL: test_x86_sse2_psubs_w:
-; AVX512:       ## %bb.0:
-; AVX512-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0xc1]
-; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-  %res = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
-  ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-
 define i32 @test_x86_sse2_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_ucomieq_sd:
 ; SSE:       ## %bb.0:
diff --git a/test/CodeGen/X86/sse2-schedule.ll b/test/CodeGen/X86/sse2-schedule.ll
index 660ba8e..ca014b6 100644
--- a/test/CodeGen/X86/sse2-schedule.ll
+++ b/test/CodeGen/X86/sse2-schedule.ll
@@ -7934,12 +7934,12 @@
 ; ZNVER1-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1)
+  %1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
-  %3 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %1, <16 x i8> %2)
+  %3 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %1, <16 x i8> %2)
   ret <16 x i8> %3
 }
-declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @test_paddsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
 ; GENERIC-LABEL: test_paddsw:
@@ -8059,12 +8059,12 @@
 ; ZNVER1-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1)
+  %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
-  %3 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %1, <8 x i16> %2)
+  %3 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2)
   ret <8 x i16> %3
 }
-declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <16 x i8> @test_paddusb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
 ; GENERIC-LABEL: test_paddusb:
@@ -8184,12 +8184,12 @@
 ; ZNVER1-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddusb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1)
+  %1 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
-  %3 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %1, <16 x i8> %2)
+  %3 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %1, <16 x i8> %2)
   ret <16 x i8> %3
 }
-declare <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @test_paddusw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
 ; GENERIC-LABEL: test_paddusw:
@@ -8309,12 +8309,12 @@
 ; ZNVER1-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddusw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1)
+  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
-  %3 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %1, <8 x i16> %2)
+  %3 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2)
   ret <8 x i16> %3
 }
-declare <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <8 x i16> @test_paddw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
 ; GENERIC-LABEL: test_paddw:
@@ -13870,12 +13870,12 @@
 ; ZNVER1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1)
+  %1 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
-  %3 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %1, <16 x i8> %2)
+  %3 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %1, <16 x i8> %2)
   ret <16 x i8> %3
 }
-declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @test_psubsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
 ; GENERIC-LABEL: test_psubsw:
@@ -13995,12 +13995,12 @@
 ; ZNVER1-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1)
+  %1 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
-  %3 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %1, <8 x i16> %2)
+  %3 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %1, <8 x i16> %2)
   ret <8 x i16> %3
 }
-declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <16 x i8> @test_psubusb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
 ; GENERIC-LABEL: test_psubusb:
@@ -14120,12 +14120,12 @@
 ; ZNVER1-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubusb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1)
+  %1 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
-  %3 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %1, <16 x i8> %2)
+  %3 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %1, <16 x i8> %2)
   ret <16 x i8> %3
 }
-declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @test_psubusw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
 ; GENERIC-LABEL: test_psubusw:
@@ -14245,12 +14245,12 @@
 ; ZNVER1-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubusw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
-  %1 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1)
+  %1 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
-  %3 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %1, <8 x i16> %2)
+  %3 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %1, <8 x i16> %2)
   ret <8 x i16> %3
 }
-declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <8 x i16> @test_psubw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
 ; GENERIC-LABEL: test_psubw:
diff --git a/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll
index dd82bef..9990ac0 100644
--- a/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll
@@ -832,26 +832,11 @@
 define <2 x i64> @test_mm_mul_epi32(<2 x i64> %a0, <2 x i64> %a1) {
 ; SSE-LABEL: test_mm_mul_epi32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    psllq $32, %xmm2
-; SSE-NEXT:    psrad $31, %xmm2
-; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; SSE-NEXT:    movdqa %xmm1, %xmm0
-; SSE-NEXT:    psllq $32, %xmm0
-; SSE-NEXT:    psrad $31, %xmm0
-; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
-; SSE-NEXT:    pmuldq %xmm0, %xmm2
-; SSE-NEXT:    movdqa %xmm2, %xmm0
+; SSE-NEXT:    pmuldq %xmm1, %xmm0
 ; SSE-NEXT:    ret{{[l|q]}}
 ;
 ; AVX1-LABEL: test_mm_mul_epi32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpsllq $32, %xmm0, %xmm2
-; AVX1-NEXT:    vpsrad $31, %xmm2, %xmm2
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm2
-; AVX1-NEXT:    vpsrad $31, %xmm2, %xmm2
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
 ; AVX1-NEXT:    vpmuldq %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    ret{{[l|q]}}
 ;
diff --git a/test/CodeGen/X86/ssp-data-layout.ll b/test/CodeGen/X86/ssp-data-layout.ll
index e954d9c..409dd7c 100644
--- a/test/CodeGen/X86/ssp-data-layout.ll
+++ b/test/CodeGen/X86/ssp-data-layout.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -stack-symbol-ordering=0 -disable-fp-elim -mtriple=x86_64-pc-linux-gnu -mcpu=corei7 -o - | FileCheck %s
+; RUN: llc < %s -stack-symbol-ordering=0 -frame-pointer=all -mtriple=x86_64-pc-linux-gnu -mcpu=corei7 -o - | FileCheck %s
 ;  This test is fairly fragile.  The goal is to ensure that "large" stack
 ;  objects are allocated closest to the stack protector (i.e., farthest away 
 ;  from the Stack Pointer.)  In standard SSP mode this means that large (>=
@@ -11,7 +11,7 @@
 ;  and that the groups have the correct relative stack offset.  The ordering
 ;  within a group is not relevant to this test.  Unfortunately, there is not
 ;  an elegant way to do this, so just match the offset for each object.
-; RUN: llc < %s -disable-fp-elim -mtriple=x86_64-unknown-unknown -O0 -mcpu=corei7 -o - \
+; RUN: llc < %s -frame-pointer=all -mtriple=x86_64-unknown-unknown -O0 -mcpu=corei7 -o - \
 ; RUN:   | FileCheck --check-prefix=FAST-NON-LIN %s
 ; FastISel was not setting the StackProtectorIndex when lowering
 ; Intrinsic::stackprotector and as a result the stack re-arrangement code was
diff --git a/test/CodeGen/X86/ssub_sat.ll b/test/CodeGen/X86/ssub_sat.ll
index 6d9a534..c78e44e 100644
--- a/test/CodeGen/X86/ssub_sat.ll
+++ b/test/CodeGen/X86/ssub_sat.ll
@@ -1,267 +1,240 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux | FileCheck %s
-; RUN: llc < %s -mcpu=generic -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=CHECK32
+; RUN: llc < %s -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefixes=CHECK,X64
 
 declare  i4  @llvm.ssub.sat.i4   (i4,  i4)
 declare  i32 @llvm.ssub.sat.i32  (i32, i32)
 declare  i64 @llvm.ssub.sat.i64  (i64, i64)
 declare  <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
 
-define i32 @func(i32 %x, i32 %y) {
-; CHECK-LABEL: func:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    movl %edi, %ecx
-; CHECK-NEXT:    subl %esi, %ecx
-; CHECK-NEXT:    setns %al
-; CHECK-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
-; CHECK-NEXT:    subl %esi, %edi
-; CHECK-NEXT:    cmovnol %edi, %eax
-; CHECK-NEXT:    retq
+define i32 @func(i32 %x, i32 %y) nounwind {
+; X86-LABEL: func:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    subl %edx, %esi
+; X86-NEXT:    setns %cl
+; X86-NEXT:    addl $2147483647, %ecx # imm = 0x7FFFFFFF
+; X86-NEXT:    subl %edx, %eax
+; X86-NEXT:    cmovol %ecx, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
 ;
-; CHECK32-LABEL: func:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    .cfi_offset %esi, -8
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    xorl %ecx, %ecx
-; CHECK32-NEXT:    movl %eax, %esi
-; CHECK32-NEXT:    subl %edx, %esi
-; CHECK32-NEXT:    setns %cl
-; CHECK32-NEXT:    addl $2147483647, %ecx # imm = 0x7FFFFFFF
-; CHECK32-NEXT:    subl %edx, %eax
-; CHECK32-NEXT:    cmovol %ecx, %eax
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl
+; X64-LABEL: func:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    movl %edi, %ecx
+; X64-NEXT:    subl %esi, %ecx
+; X64-NEXT:    setns %al
+; X64-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-NEXT:    subl %esi, %edi
+; X64-NEXT:    cmovnol %edi, %eax
+; X64-NEXT:    retq
   %tmp = call i32 @llvm.ssub.sat.i32(i32 %x, i32 %y);
   ret i32 %tmp;
 }
 
-define i64 @func2(i64 %x, i64 %y) {
-; CHECK-LABEL: func2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    subq %rsi, %rax
-; CHECK-NEXT:    setns %cl
-; CHECK-NEXT:    movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
-; CHECK-NEXT:    addq %rcx, %rax
-; CHECK-NEXT:    subq %rsi, %rdi
-; CHECK-NEXT:    cmovnoq %rdi, %rax
-; CHECK-NEXT:    retq
+define i64 @func2(i64 %x, i64 %y) nounwind {
+; X86-LABEL: func2:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    subl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %ebx, %ebp
+; X86-NEXT:    sbbl %esi, %ebp
+; X86-NEXT:    movl %ebp, %eax
+; X86-NEXT:    sarl $31, %eax
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    testl %ebp, %ebp
+; X86-NEXT:    setns %cl
+; X86-NEXT:    movl %ecx, %edx
+; X86-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; X86-NEXT:    testl %ebx, %ebx
+; X86-NEXT:    setns %bl
+; X86-NEXT:    cmpb %cl, %bl
+; X86-NEXT:    setne %cl
+; X86-NEXT:    testl %esi, %esi
+; X86-NEXT:    setns %ch
+; X86-NEXT:    cmpb %ch, %bl
+; X86-NEXT:    setne %ch
+; X86-NEXT:    testb %cl, %ch
+; X86-NEXT:    cmovel %ebp, %edx
+; X86-NEXT:    cmovel %edi, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; CHECK32-LABEL: func2:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %ebp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    pushl %ebx
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    pushl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 20
-; CHECK32-NEXT:    .cfi_offset %esi, -20
-; CHECK32-NEXT:    .cfi_offset %edi, -16
-; CHECK32-NEXT:    .cfi_offset %ebx, -12
-; CHECK32-NEXT:    .cfi_offset %ebp, -8
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; CHECK32-NEXT:    subl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    movl %ebx, %ebp
-; CHECK32-NEXT:    sbbl %esi, %ebp
-; CHECK32-NEXT:    movl %ebp, %eax
-; CHECK32-NEXT:    sarl $31, %eax
-; CHECK32-NEXT:    xorl %ecx, %ecx
-; CHECK32-NEXT:    testl %ebp, %ebp
-; CHECK32-NEXT:    setns %cl
-; CHECK32-NEXT:    movl %ecx, %edx
-; CHECK32-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
-; CHECK32-NEXT:    testl %ebx, %ebx
-; CHECK32-NEXT:    setns %bl
-; CHECK32-NEXT:    cmpb %cl, %bl
-; CHECK32-NEXT:    setne %cl
-; CHECK32-NEXT:    testl %esi, %esi
-; CHECK32-NEXT:    setns %ch
-; CHECK32-NEXT:    cmpb %ch, %bl
-; CHECK32-NEXT:    setne %ch
-; CHECK32-NEXT:    testb %cl, %ch
-; CHECK32-NEXT:    cmovel %ebp, %edx
-; CHECK32-NEXT:    cmovel %edi, %eax
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK32-NEXT:    popl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    popl %ebx
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %ebp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl
+; X64-LABEL: func2:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    subq %rsi, %rax
+; X64-NEXT:    setns %cl
+; X64-NEXT:    movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
+; X64-NEXT:    addq %rcx, %rax
+; X64-NEXT:    subq %rsi, %rdi
+; X64-NEXT:    cmovnoq %rdi, %rax
+; X64-NEXT:    retq
   %tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y);
   ret i64 %tmp;
 }
 
-define i4 @func3(i4 %x, i4 %y) {
-; CHECK-LABEL: func3:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    shlb $4, %sil
-; CHECK-NEXT:    shlb $4, %al
-; CHECK-NEXT:    movl %eax, %ecx
-; CHECK-NEXT:    subb %sil, %cl
-; CHECK-NEXT:    setns %cl
-; CHECK-NEXT:    subb %sil, %al
-; CHECK-NEXT:    jno .LBB2_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    addb $127, %cl
-; CHECK-NEXT:    movl %ecx, %eax
-; CHECK-NEXT:  .LBB2_2:
-; CHECK-NEXT:    sarb $4, %al
-; CHECK-NEXT:    # kill: def $al killed $al killed $eax
-; CHECK-NEXT:    retq
+define i4 @func3(i4 %x, i4 %y) nounwind {
+; X86-LABEL: func3:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %dl
+; X86-NEXT:    shlb $4, %dl
+; X86-NEXT:    shlb $4, %al
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    subb %dl, %cl
+; X86-NEXT:    setns %cl
+; X86-NEXT:    subb %dl, %al
+; X86-NEXT:    jno .LBB2_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    addb $127, %cl
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB2_2:
+; X86-NEXT:    sarb $4, %al
+; X86-NEXT:    retl
 ;
-; CHECK32-LABEL: func3:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %dl
-; CHECK32-NEXT:    shlb $4, %dl
-; CHECK32-NEXT:    shlb $4, %al
-; CHECK32-NEXT:    movl %eax, %ecx
-; CHECK32-NEXT:    subb %dl, %cl
-; CHECK32-NEXT:    setns %cl
-; CHECK32-NEXT:    subb %dl, %al
-; CHECK32-NEXT:    jno .LBB2_2
-; CHECK32-NEXT:  # %bb.1:
-; CHECK32-NEXT:    addb $127, %cl
-; CHECK32-NEXT:    movl %ecx, %eax
-; CHECK32-NEXT:  .LBB2_2:
-; CHECK32-NEXT:    sarb $4, %al
-; CHECK32-NEXT:    retl
+; X64-LABEL: func3:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    shlb $4, %sil
+; X64-NEXT:    shlb $4, %al
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    subb %sil, %cl
+; X64-NEXT:    setns %cl
+; X64-NEXT:    subb %sil, %al
+; X64-NEXT:    jno .LBB2_2
+; X64-NEXT:  # %bb.1:
+; X64-NEXT:    addb $127, %cl
+; X64-NEXT:    movl %ecx, %eax
+; X64-NEXT:  .LBB2_2:
+; X64-NEXT:    sarb $4, %al
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
   %tmp = call i4 @llvm.ssub.sat.i4(i4 %x, i4 %y);
   ret i4 %tmp;
 }
 
-define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) {
-; CHECK-LABEL: vec:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
-; CHECK-NEXT:    movd %xmm2, %ecx
-; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; CHECK-NEXT:    movd %xmm2, %r8d
-; CHECK-NEXT:    xorl %edx, %edx
-; CHECK-NEXT:    movl %r8d, %esi
-; CHECK-NEXT:    subl %ecx, %esi
-; CHECK-NEXT:    setns %dl
-; CHECK-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
-; CHECK-NEXT:    subl %ecx, %r8d
-; CHECK-NEXT:    cmovol %edx, %r8d
-; CHECK-NEXT:    movd %xmm1, %edx
-; CHECK-NEXT:    movd %xmm0, %ecx
-; CHECK-NEXT:    xorl %esi, %esi
-; CHECK-NEXT:    movl %ecx, %edi
-; CHECK-NEXT:    subl %edx, %edi
-; CHECK-NEXT:    setns %sil
-; CHECK-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
-; CHECK-NEXT:    subl %edx, %ecx
-; CHECK-NEXT:    cmovol %esi, %ecx
-; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; CHECK-NEXT:    movd %xmm2, %edx
-; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; CHECK-NEXT:    movd %xmm2, %eax
-; CHECK-NEXT:    xorl %edi, %edi
-; CHECK-NEXT:    movl %eax, %esi
-; CHECK-NEXT:    subl %edx, %esi
-; CHECK-NEXT:    setns %dil
-; CHECK-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
-; CHECK-NEXT:    subl %edx, %eax
-; CHECK-NEXT:    cmovol %edi, %eax
-; CHECK-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; CHECK-NEXT:    movd %xmm1, %r9d
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; CHECK-NEXT:    movd %xmm0, %edx
-; CHECK-NEXT:    xorl %edi, %edi
-; CHECK-NEXT:    movl %edx, %esi
-; CHECK-NEXT:    subl %r9d, %esi
-; CHECK-NEXT:    setns %dil
-; CHECK-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
-; CHECK-NEXT:    subl %r9d, %edx
-; CHECK-NEXT:    cmovol %edi, %edx
-; CHECK-NEXT:    movd %edx, %xmm0
-; CHECK-NEXT:    movd %eax, %xmm1
-; CHECK-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; CHECK-NEXT:    movd %ecx, %xmm0
-; CHECK-NEXT:    movd %r8d, %xmm2
-; CHECK-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; CHECK-NEXT:    retq
+define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
+; X86-LABEL: vec:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    subl %edx, %esi
+; X86-NEXT:    setns %al
+; X86-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT:    subl %edx, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    cmovol %eax, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    movl %edx, %edi
+; X86-NEXT:    subl %esi, %edi
+; X86-NEXT:    setns %al
+; X86-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT:    subl %esi, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    cmovol %eax, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    movl %esi, %ebx
+; X86-NEXT:    subl %edi, %ebx
+; X86-NEXT:    setns %al
+; X86-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT:    subl %edi, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmovol %eax, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    movl %edi, %ebp
+; X86-NEXT:    subl %eax, %ebp
+; X86-NEXT:    setns %bl
+; X86-NEXT:    addl $2147483647, %ebx # imm = 0x7FFFFFFF
+; X86-NEXT:    subl %eax, %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmovol %ebx, %edi
+; X86-NEXT:    movl %ecx, 12(%eax)
+; X86-NEXT:    movl %edx, 8(%eax)
+; X86-NEXT:    movl %esi, 4(%eax)
+; X86-NEXT:    movl %edi, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl $4
 ;
-; CHECK32-LABEL: vec:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %ebp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    pushl %ebx
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    pushl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 20
-; CHECK32-NEXT:    .cfi_offset %esi, -20
-; CHECK32-NEXT:    .cfi_offset %edi, -16
-; CHECK32-NEXT:    .cfi_offset %ebx, -12
-; CHECK32-NEXT:    .cfi_offset %ebp, -8
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    xorl %eax, %eax
-; CHECK32-NEXT:    movl %ecx, %esi
-; CHECK32-NEXT:    subl %edx, %esi
-; CHECK32-NEXT:    setns %al
-; CHECK32-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
-; CHECK32-NEXT:    subl %edx, %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    cmovol %eax, %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    xorl %eax, %eax
-; CHECK32-NEXT:    movl %edx, %edi
-; CHECK32-NEXT:    subl %esi, %edi
-; CHECK32-NEXT:    setns %al
-; CHECK32-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
-; CHECK32-NEXT:    subl %esi, %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    cmovol %eax, %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    xorl %eax, %eax
-; CHECK32-NEXT:    movl %esi, %ebx
-; CHECK32-NEXT:    subl %edi, %ebx
-; CHECK32-NEXT:    setns %al
-; CHECK32-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
-; CHECK32-NEXT:    subl %edi, %esi
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    cmovol %eax, %esi
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    xorl %ebx, %ebx
-; CHECK32-NEXT:    movl %edi, %ebp
-; CHECK32-NEXT:    subl %eax, %ebp
-; CHECK32-NEXT:    setns %bl
-; CHECK32-NEXT:    addl $2147483647, %ebx # imm = 0x7FFFFFFF
-; CHECK32-NEXT:    subl %eax, %edi
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    cmovol %ebx, %edi
-; CHECK32-NEXT:    movl %ecx, 12(%eax)
-; CHECK32-NEXT:    movl %edx, 8(%eax)
-; CHECK32-NEXT:    movl %esi, 4(%eax)
-; CHECK32-NEXT:    movl %edi, (%eax)
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK32-NEXT:    popl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    popl %ebx
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %ebp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl $4
+; X64-LABEL: vec:
+; X64:       # %bb.0:
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
+; X64-NEXT:    movd %xmm2, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; X64-NEXT:    movd %xmm2, %r8d
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    movl %r8d, %esi
+; X64-NEXT:    subl %ecx, %esi
+; X64-NEXT:    setns %dl
+; X64-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; X64-NEXT:    subl %ecx, %r8d
+; X64-NEXT:    cmovol %edx, %r8d
+; X64-NEXT:    movd %xmm1, %edx
+; X64-NEXT:    movd %xmm0, %ecx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    movl %ecx, %edi
+; X64-NEXT:    subl %edx, %edi
+; X64-NEXT:    setns %sil
+; X64-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; X64-NEXT:    subl %edx, %ecx
+; X64-NEXT:    cmovol %esi, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; X64-NEXT:    movd %xmm2, %edx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; X64-NEXT:    movd %xmm2, %eax
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    movl %eax, %esi
+; X64-NEXT:    subl %edx, %esi
+; X64-NEXT:    setns %dil
+; X64-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; X64-NEXT:    subl %edx, %eax
+; X64-NEXT:    cmovol %edi, %eax
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; X64-NEXT:    movd %xmm1, %r9d
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; X64-NEXT:    movd %xmm0, %edx
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    movl %edx, %esi
+; X64-NEXT:    subl %r9d, %esi
+; X64-NEXT:    setns %dil
+; X64-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; X64-NEXT:    subl %r9d, %edx
+; X64-NEXT:    cmovol %edi, %edx
+; X64-NEXT:    movd %edx, %xmm0
+; X64-NEXT:    movd %eax, %xmm1
+; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X64-NEXT:    movd %ecx, %xmm0
+; X64-NEXT:    movd %r8d, %xmm2
+; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-NEXT:    retq
   %tmp = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %y);
   ret <4 x i32> %tmp;
 }
diff --git a/test/CodeGen/X86/ssub_sat_vec.ll b/test/CodeGen/X86/ssub_sat_vec.ll
new file mode 100644
index 0000000..28a8ee2
--- /dev/null
+++ b/test/CodeGen/X86/ssub_sat_vec.ll
@@ -0,0 +1,1380 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512
+
+declare <1 x i8> @llvm.ssub.sat.v1i8(<1 x i8>, <1 x i8>)
+declare <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8>, <2 x i8>)
+declare <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8>, <4 x i8>)
+declare <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8>, <8 x i8>)
+declare <12 x i8> @llvm.ssub.sat.v12i8(<12 x i8>, <12 x i8>)
+declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>)
+declare <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8>, <64 x i8>)
+
+declare <1 x i16> @llvm.ssub.sat.v1i16(<1 x i16>, <1 x i16>)
+declare <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16>, <2 x i16>)
+declare <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <12 x i16> @llvm.ssub.sat.v12i16(<12 x i16>, <12 x i16>)
+declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>)
+declare <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16>, <32 x i16>)
+
+declare <16 x i1> @llvm.ssub.sat.v16i1(<16 x i1>, <16 x i1>)
+declare <16 x i4> @llvm.ssub.sat.v16i4(<16 x i4>, <16 x i4>)
+
+declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i24> @llvm.ssub.sat.v4i24(<4 x i24>, <4 x i24>)
+declare <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128>, <2 x i128>)
+
+; Legal types, depending on architecture.
+
+define <16 x i8> @v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
+; SSE-LABEL: v16i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubsb %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v16i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+  ret <16 x i8> %z
+}
+
+define <32 x i8> @v32i8(<32 x i8> %x, <32 x i8> %y) nounwind {
+; SSE-LABEL: v32i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubsb %xmm2, %xmm0
+; SSE-NEXT:    psubsb %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpsubsb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsubsb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v32i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsubsb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %z = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %x, <32 x i8> %y)
+  ret <32 x i8> %z
+}
+
+define <64 x i8> @v64i8(<64 x i8> %x, <64 x i8> %y) nounwind {
+; SSE-LABEL: v64i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubsb %xmm4, %xmm0
+; SSE-NEXT:    psubsb %xmm5, %xmm1
+; SSE-NEXT:    psubsb %xmm6, %xmm2
+; SSE-NEXT:    psubsb %xmm7, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v64i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpsubsb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpsubsb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpsubsb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpsubsb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v64i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsubsb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubsb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v64i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsubsb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %z = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> %x, <64 x i8> %y)
+  ret <64 x i8> %z
+}
+
+define <8 x i16> @v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
+; SSE-LABEL: v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubsw %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+  ret <8 x i16> %z
+}
+
+define <16 x i16> @v16i16(<16 x i16> %x, <16 x i16> %y) nounwind {
+; SSE-LABEL: v16i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubsw %xmm2, %xmm0
+; SSE-NEXT:    psubsw %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpsubsw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsubsw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v16i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsubsw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %z = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %x, <16 x i16> %y)
+  ret <16 x i16> %z
+}
+
+define <32 x i16> @v32i16(<32 x i16> %x, <32 x i16> %y) nounwind {
+; SSE-LABEL: v32i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubsw %xmm4, %xmm0
+; SSE-NEXT:    psubsw %xmm5, %xmm1
+; SSE-NEXT:    psubsw %xmm6, %xmm2
+; SSE-NEXT:    psubsw %xmm7, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v32i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpsubsw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpsubsw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpsubsw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpsubsw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v32i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsubsw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubsw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v32i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsubsw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %z = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %x, <32 x i16> %y)
+  ret <32 x i16> %z
+}
+
+; Too narrow vectors, legalized by widening.
+
+define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) nounwind {
+; SSE-LABEL: v8i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT:    psubsb %xmm1, %xmm0
+; SSE-NEXT:    movq %xmm0, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v8i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovq %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v8i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v8i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT:    vpmovwb %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <8 x i8>, <8 x i8>* %px
+  %y = load <8 x i8>, <8 x i8>* %py
+  %z = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %x, <8 x i8> %y)
+  store <8 x i8> %z, <8 x i8>* %pz
+  ret void
+}
+
+define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) nounwind {
+; SSE-LABEL: v4i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT:    psubsb %xmm1, %xmm0
+; SSE-NEXT:    movd %xmm0, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v4i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovd %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v4i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovd %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v4i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX512-NEXT:    vpmovdb %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <4 x i8>, <4 x i8>* %px
+  %y = load <4 x i8>, <4 x i8>* %py
+  %z = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %x, <4 x i8> %y)
+  store <4 x i8> %z, <4 x i8>* %pz
+  ret void
+}
+
+define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
+; SSE2-LABEL: v2i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movzwl (%rdi), %eax
+; SSE2-NEXT:    movd %eax, %xmm0
+; SSE2-NEXT:    movzwl (%rsi), %eax
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    psubsb %xmm1, %xmm0
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    movw %ax, (%rdx)
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v2i8:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movzwl (%rdi), %eax
+; SSSE3-NEXT:    movd %eax, %xmm0
+; SSSE3-NEXT:    movzwl (%rsi), %eax
+; SSSE3-NEXT:    movd %eax, %xmm1
+; SSSE3-NEXT:    psubsb %xmm1, %xmm0
+; SSSE3-NEXT:    movd %xmm0, %eax
+; SSSE3-NEXT:    movw %ax, (%rdx)
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v2i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movzwl (%rdi), %eax
+; SSE41-NEXT:    movd %eax, %xmm0
+; SSE41-NEXT:    movzwl (%rsi), %eax
+; SSE41-NEXT:    movd %eax, %xmm1
+; SSE41-NEXT:    psubsb %xmm1, %xmm0
+; SSE41-NEXT:    pextrw $0, %xmm0, (%rdx)
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: v2i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    movzwl (%rdi), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm0
+; AVX1-NEXT:    movzwl (%rsi), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm1
+; AVX1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpextrw $0, %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    movzwl (%rdi), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm0
+; AVX2-NEXT:    movzwl (%rsi), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm1
+; AVX2-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrw $0, %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    movzwl (%rdi), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm0
+; AVX512-NEXT:    movzwl (%rsi), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpmovqb %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <2 x i8>, <2 x i8>* %px
+  %y = load <2 x i8>, <2 x i8>* %py
+  %z = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %x, <2 x i8> %y)
+  store <2 x i8> %z, <2 x i8>* %pz
+  ret void
+}
+
+define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) nounwind {
+; SSE-LABEL: v4i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT:    psubsw %xmm1, %xmm0
+; SSE-NEXT:    movq %xmm0, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v4i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovq %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v4i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v4i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT:    vpmovdw %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <4 x i16>, <4 x i16>* %px
+  %y = load <4 x i16>, <4 x i16>* %py
+  %z = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %x, <4 x i16> %y)
+  store <4 x i16> %z, <4 x i16>* %pz
+  ret void
+}
+
+define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
+; SSE-LABEL: v2i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT:    psubsw %xmm1, %xmm0
+; SSE-NEXT:    movd %xmm0, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v2i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovd %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovd %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT:    vpmovqw %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <2 x i16>, <2 x i16>* %px
+  %y = load <2 x i16>, <2 x i16>* %py
+  %z = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %x, <2 x i16> %y)
+  store <2 x i16> %z, <2 x i16>* %pz
+  ret void
+}
+
+define <12 x i8> @v12i8(<12 x i8> %x, <12 x i8> %y) nounwind {
+; SSE-LABEL: v12i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubsb %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v12i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <12 x i8> @llvm.ssub.sat.v12i8(<12 x i8> %x, <12 x i8> %y)
+  ret <12 x i8> %z
+}
+
+define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) nounwind {
+; SSE-LABEL: v12i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa (%rdi), %xmm0
+; SSE-NEXT:    movdqa 16(%rdi), %xmm1
+; SSE-NEXT:    psubsw (%rsi), %xmm0
+; SSE-NEXT:    psubsw 16(%rsi), %xmm1
+; SSE-NEXT:    movq %xmm1, 16(%rdx)
+; SSE-NEXT:    movdqa %xmm0, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v12i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX1-NEXT:    vpsubsw (%rsi), %xmm0, %xmm0
+; AVX1-NEXT:    vpsubsw 16(%rsi), %xmm1, %xmm1
+; AVX1-NEXT:    vmovq %xmm1, 16(%rdx)
+; AVX1-NEXT:    vmovdqa %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v12i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-NEXT:    vpsubsw (%rsi), %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vmovq %xmm1, 16(%rdx)
+; AVX2-NEXT:    vmovdqa %xmm0, (%rdx)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v12i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512-NEXT:    vpsubsw (%rsi), %ymm0, %ymm0
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm1, 16(%rdx)
+; AVX512-NEXT:    vmovdqa %xmm0, (%rdx)
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x = load <12 x i16>, <12 x i16>* %px
+  %y = load <12 x i16>, <12 x i16>* %py
+  %z = call <12 x i16> @llvm.ssub.sat.v12i16(<12 x i16> %x, <12 x i16> %y)
+  store <12 x i16> %z, <12 x i16>* %pz
+  ret void
+}
+
+; Scalarization
+
+define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind {
+; SSE-LABEL: v1i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movb (%rdi), %cl
+; SSE-NEXT:    movb (%rsi), %dil
+; SSE-NEXT:    movl %ecx, %eax
+; SSE-NEXT:    subb %dil, %al
+; SSE-NEXT:    setns %sil
+; SSE-NEXT:    subb %dil, %cl
+; SSE-NEXT:    jno .LBB13_2
+; SSE-NEXT:  # %bb.1:
+; SSE-NEXT:    addb $127, %sil
+; SSE-NEXT:    movl %esi, %ecx
+; SSE-NEXT:  .LBB13_2:
+; SSE-NEXT:    movb %cl, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v1i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movb (%rdi), %cl
+; AVX-NEXT:    movb (%rsi), %dil
+; AVX-NEXT:    movl %ecx, %eax
+; AVX-NEXT:    subb %dil, %al
+; AVX-NEXT:    setns %sil
+; AVX-NEXT:    subb %dil, %cl
+; AVX-NEXT:    jno .LBB13_2
+; AVX-NEXT:  # %bb.1:
+; AVX-NEXT:    addb $127, %sil
+; AVX-NEXT:    movl %esi, %ecx
+; AVX-NEXT:  .LBB13_2:
+; AVX-NEXT:    movb %cl, (%rdx)
+; AVX-NEXT:    retq
+  %x = load <1 x i8>, <1 x i8>* %px
+  %y = load <1 x i8>, <1 x i8>* %py
+  %z = call <1 x i8> @llvm.ssub.sat.v1i8(<1 x i8> %x, <1 x i8> %y)
+  store <1 x i8> %z, <1 x i8>* %pz
+  ret void
+}
+
+define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) nounwind {
+; SSE-LABEL: v1i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movzwl (%rdi), %eax
+; SSE-NEXT:    movzwl (%rsi), %ecx
+; SSE-NEXT:    xorl %esi, %esi
+; SSE-NEXT:    movl %eax, %edi
+; SSE-NEXT:    subw %cx, %di
+; SSE-NEXT:    setns %sil
+; SSE-NEXT:    addl $32767, %esi # imm = 0x7FFF
+; SSE-NEXT:    subw %cx, %ax
+; SSE-NEXT:    cmovol %esi, %eax
+; SSE-NEXT:    movw %ax, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v1i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movzwl (%rdi), %eax
+; AVX-NEXT:    movzwl (%rsi), %ecx
+; AVX-NEXT:    xorl %esi, %esi
+; AVX-NEXT:    movl %eax, %edi
+; AVX-NEXT:    subw %cx, %di
+; AVX-NEXT:    setns %sil
+; AVX-NEXT:    addl $32767, %esi # imm = 0x7FFF
+; AVX-NEXT:    subw %cx, %ax
+; AVX-NEXT:    cmovol %esi, %eax
+; AVX-NEXT:    movw %ax, (%rdx)
+; AVX-NEXT:    retq
+  %x = load <1 x i16>, <1 x i16>* %px
+  %y = load <1 x i16>, <1 x i16>* %py
+  %z = call <1 x i16> @llvm.ssub.sat.v1i16(<1 x i16> %x, <1 x i16> %y)
+  store <1 x i16> %z, <1 x i16>* %pz
+  ret void
+}
+
+; Promotion
+
+define <16 x i4> @v16i4(<16 x i4> %x, <16 x i4> %y) nounwind {
+; SSE-LABEL: v16i4:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psllw $4, %xmm1
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    psllw $4, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    psubsb %xmm1, %xmm0
+; SSE-NEXT:    psrlw $4, %xmm0
+; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE-NEXT:    pxor %xmm1, %xmm0
+; SSE-NEXT:    psubb %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v16i4:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsllw $4, %xmm1, %xmm1
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <16 x i4> @llvm.ssub.sat.v16i4(<16 x i4> %x, <16 x i4> %y)
+  ret <16 x i4> %z
+}
+
+define <16 x i1> @v16i1(<16 x i1> %x, <16 x i1> %y) nounwind {
+; SSE-LABEL: v16i1:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psllw $7, %xmm1
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    psllw $7, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    psubsb %xmm1, %xmm0
+; SSE-NEXT:    pxor %xmm1, %xmm1
+; SSE-NEXT:    pcmpgtb %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v16i1:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v16i1:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v16i1:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovb2m %xmm0, %k0
+; AVX512-NEXT:    vpsllw $7, %xmm1, %xmm0
+; AVX512-NEXT:    vpmovb2m %xmm0, %k1
+; AVX512-NEXT:    kandnw %k0, %k1, %k0
+; AVX512-NEXT:    vpmovm2b %k0, %xmm0
+; AVX512-NEXT:    retq
+  %z = call <16 x i1> @llvm.ssub.sat.v16i1(<16 x i1> %x, <16 x i1> %y)
+  ret <16 x i1> %z
+}
+
+; Expanded
+
+define <4 x i32> @v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
+; SSE2-LABEL: v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
+; SSE2-NEXT:    movd %xmm2, %ecx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE2-NEXT:    movd %xmm2, %r8d
+; SSE2-NEXT:    xorl %edx, %edx
+; SSE2-NEXT:    movl %r8d, %esi
+; SSE2-NEXT:    subl %ecx, %esi
+; SSE2-NEXT:    setns %dl
+; SSE2-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; SSE2-NEXT:    subl %ecx, %r8d
+; SSE2-NEXT:    cmovol %edx, %r8d
+; SSE2-NEXT:    movd %xmm1, %edx
+; SSE2-NEXT:    movd %xmm0, %ecx
+; SSE2-NEXT:    xorl %esi, %esi
+; SSE2-NEXT:    movl %ecx, %edi
+; SSE2-NEXT:    subl %edx, %edi
+; SSE2-NEXT:    setns %sil
+; SSE2-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; SSE2-NEXT:    subl %edx, %ecx
+; SSE2-NEXT:    cmovol %esi, %ecx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE2-NEXT:    movd %xmm2, %edx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE2-NEXT:    movd %xmm2, %eax
+; SSE2-NEXT:    xorl %edi, %edi
+; SSE2-NEXT:    movl %eax, %esi
+; SSE2-NEXT:    subl %edx, %esi
+; SSE2-NEXT:    setns %dil
+; SSE2-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE2-NEXT:    subl %edx, %eax
+; SSE2-NEXT:    cmovol %edi, %eax
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE2-NEXT:    movd %xmm1, %r9d
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %edx
+; SSE2-NEXT:    xorl %edi, %edi
+; SSE2-NEXT:    movl %edx, %esi
+; SSE2-NEXT:    subl %r9d, %esi
+; SSE2-NEXT:    setns %dil
+; SSE2-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE2-NEXT:    subl %r9d, %edx
+; SSE2-NEXT:    cmovol %edi, %edx
+; SSE2-NEXT:    movd %edx, %xmm0
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT:    movd %ecx, %xmm0
+; SSE2-NEXT:    movd %r8d, %xmm2
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v4i32:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
+; SSSE3-NEXT:    movd %xmm2, %ecx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSSE3-NEXT:    movd %xmm2, %r8d
+; SSSE3-NEXT:    xorl %edx, %edx
+; SSSE3-NEXT:    movl %r8d, %esi
+; SSSE3-NEXT:    subl %ecx, %esi
+; SSSE3-NEXT:    setns %dl
+; SSSE3-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    subl %ecx, %r8d
+; SSSE3-NEXT:    cmovol %edx, %r8d
+; SSSE3-NEXT:    movd %xmm1, %edx
+; SSSE3-NEXT:    movd %xmm0, %ecx
+; SSSE3-NEXT:    xorl %esi, %esi
+; SSSE3-NEXT:    movl %ecx, %edi
+; SSSE3-NEXT:    subl %edx, %edi
+; SSSE3-NEXT:    setns %sil
+; SSSE3-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    subl %edx, %ecx
+; SSSE3-NEXT:    cmovol %esi, %ecx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSSE3-NEXT:    movd %xmm2, %edx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSSE3-NEXT:    movd %xmm2, %eax
+; SSSE3-NEXT:    xorl %edi, %edi
+; SSSE3-NEXT:    movl %eax, %esi
+; SSSE3-NEXT:    subl %edx, %esi
+; SSSE3-NEXT:    setns %dil
+; SSSE3-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    subl %edx, %eax
+; SSSE3-NEXT:    cmovol %edi, %eax
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSSE3-NEXT:    movd %xmm1, %r9d
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSSE3-NEXT:    movd %xmm0, %edx
+; SSSE3-NEXT:    xorl %edi, %edi
+; SSSE3-NEXT:    movl %edx, %esi
+; SSSE3-NEXT:    subl %r9d, %esi
+; SSSE3-NEXT:    setns %dil
+; SSSE3-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    subl %r9d, %edx
+; SSSE3-NEXT:    cmovol %edi, %edx
+; SSSE3-NEXT:    movd %edx, %xmm0
+; SSSE3-NEXT:    movd %eax, %xmm1
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSSE3-NEXT:    movd %ecx, %xmm0
+; SSSE3-NEXT:    movd %r8d, %xmm2
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pextrd $3, %xmm1, %ecx
+; SSE41-NEXT:    pextrd $3, %xmm0, %r8d
+; SSE41-NEXT:    xorl %edx, %edx
+; SSE41-NEXT:    movl %r8d, %esi
+; SSE41-NEXT:    subl %ecx, %esi
+; SSE41-NEXT:    setns %dl
+; SSE41-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; SSE41-NEXT:    subl %ecx, %r8d
+; SSE41-NEXT:    cmovol %edx, %r8d
+; SSE41-NEXT:    pextrd $2, %xmm1, %edx
+; SSE41-NEXT:    pextrd $2, %xmm0, %ecx
+; SSE41-NEXT:    xorl %esi, %esi
+; SSE41-NEXT:    movl %ecx, %edi
+; SSE41-NEXT:    subl %edx, %edi
+; SSE41-NEXT:    setns %sil
+; SSE41-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; SSE41-NEXT:    subl %edx, %ecx
+; SSE41-NEXT:    cmovol %esi, %ecx
+; SSE41-NEXT:    movd %xmm1, %edx
+; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl %edi, %edi
+; SSE41-NEXT:    movl %eax, %esi
+; SSE41-NEXT:    subl %edx, %esi
+; SSE41-NEXT:    setns %dil
+; SSE41-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE41-NEXT:    subl %edx, %eax
+; SSE41-NEXT:    cmovol %edi, %eax
+; SSE41-NEXT:    pextrd $1, %xmm1, %r9d
+; SSE41-NEXT:    pextrd $1, %xmm0, %edx
+; SSE41-NEXT:    xorl %edi, %edi
+; SSE41-NEXT:    movl %edx, %esi
+; SSE41-NEXT:    subl %r9d, %esi
+; SSE41-NEXT:    setns %dil
+; SSE41-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE41-NEXT:    subl %r9d, %edx
+; SSE41-NEXT:    cmovol %edi, %edx
+; SSE41-NEXT:    movd %eax, %xmm0
+; SSE41-NEXT:    pinsrd $1, %edx, %xmm0
+; SSE41-NEXT:    pinsrd $2, %ecx, %xmm0
+; SSE41-NEXT:    pinsrd $3, %r8d, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpextrd $3, %xmm1, %ecx
+; AVX-NEXT:    vpextrd $3, %xmm0, %r9d
+; AVX-NEXT:    xorl %edx, %edx
+; AVX-NEXT:    movl %r9d, %esi
+; AVX-NEXT:    subl %ecx, %esi
+; AVX-NEXT:    setns %dl
+; AVX-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; AVX-NEXT:    subl %ecx, %r9d
+; AVX-NEXT:    cmovol %edx, %r9d
+; AVX-NEXT:    vpextrd $2, %xmm1, %edx
+; AVX-NEXT:    vpextrd $2, %xmm0, %ecx
+; AVX-NEXT:    xorl %esi, %esi
+; AVX-NEXT:    movl %ecx, %edi
+; AVX-NEXT:    subl %edx, %edi
+; AVX-NEXT:    setns %sil
+; AVX-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; AVX-NEXT:    subl %edx, %ecx
+; AVX-NEXT:    cmovol %esi, %ecx
+; AVX-NEXT:    vmovd %xmm1, %r8d
+; AVX-NEXT:    vmovd %xmm0, %edx
+; AVX-NEXT:    xorl %edi, %edi
+; AVX-NEXT:    movl %edx, %esi
+; AVX-NEXT:    subl %r8d, %esi
+; AVX-NEXT:    setns %dil
+; AVX-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; AVX-NEXT:    subl %r8d, %edx
+; AVX-NEXT:    cmovol %edi, %edx
+; AVX-NEXT:    vpextrd $1, %xmm1, %r8d
+; AVX-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-NEXT:    xorl %esi, %esi
+; AVX-NEXT:    movl %eax, %edi
+; AVX-NEXT:    subl %r8d, %edi
+; AVX-NEXT:    setns %sil
+; AVX-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; AVX-NEXT:    subl %r8d, %eax
+; AVX-NEXT:    cmovol %esi, %eax
+; AVX-NEXT:    vmovd %edx, %xmm0
+; AVX-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $3, %r9d, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+  ret <4 x i32> %z
+}
+
+define <2 x i32> @v2i32(<2 x i32> %x, <2 x i32> %y) nounwind {
+; SSE2-LABEL: v2i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    psllq $32, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE2-NEXT:    movq %xmm2, %rax
+; SSE2-NEXT:    psllq $32, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE2-NEXT:    movq %xmm2, %rcx
+; SSE2-NEXT:    xorl %edx, %edx
+; SSE2-NEXT:    movq %rcx, %rsi
+; SSE2-NEXT:    subq %rax, %rsi
+; SSE2-NEXT:    setns %dl
+; SSE2-NEXT:    movabsq $9223372036854775807, %r8 # imm = 0x7FFFFFFFFFFFFFFF
+; SSE2-NEXT:    addq %r8, %rdx
+; SSE2-NEXT:    subq %rax, %rcx
+; SSE2-NEXT:    cmovoq %rdx, %rcx
+; SSE2-NEXT:    movq %xmm1, %rax
+; SSE2-NEXT:    movq %xmm0, %rsi
+; SSE2-NEXT:    xorl %edi, %edi
+; SSE2-NEXT:    movq %rsi, %rdx
+; SSE2-NEXT:    subq %rax, %rdx
+; SSE2-NEXT:    setns %dil
+; SSE2-NEXT:    addq %r8, %rdi
+; SSE2-NEXT:    subq %rax, %rsi
+; SSE2-NEXT:    cmovoq %rdi, %rsi
+; SSE2-NEXT:    movq %rsi, %xmm1
+; SSE2-NEXT:    movq %rcx, %xmm0
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
+; SSE2-NEXT:    psrad $31, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v2i32:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    psllq $32, %xmm1
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSSE3-NEXT:    movq %xmm2, %rax
+; SSSE3-NEXT:    psllq $32, %xmm0
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSSE3-NEXT:    movq %xmm2, %rcx
+; SSSE3-NEXT:    xorl %edx, %edx
+; SSSE3-NEXT:    movq %rcx, %rsi
+; SSSE3-NEXT:    subq %rax, %rsi
+; SSSE3-NEXT:    setns %dl
+; SSSE3-NEXT:    movabsq $9223372036854775807, %r8 # imm = 0x7FFFFFFFFFFFFFFF
+; SSSE3-NEXT:    addq %r8, %rdx
+; SSSE3-NEXT:    subq %rax, %rcx
+; SSSE3-NEXT:    cmovoq %rdx, %rcx
+; SSSE3-NEXT:    movq %xmm1, %rax
+; SSSE3-NEXT:    movq %xmm0, %rsi
+; SSSE3-NEXT:    xorl %edi, %edi
+; SSSE3-NEXT:    movq %rsi, %rdx
+; SSSE3-NEXT:    subq %rax, %rdx
+; SSSE3-NEXT:    setns %dil
+; SSSE3-NEXT:    addq %r8, %rdi
+; SSSE3-NEXT:    subq %rax, %rsi
+; SSSE3-NEXT:    cmovoq %rdi, %rsi
+; SSSE3-NEXT:    movq %rsi, %xmm1
+; SSSE3-NEXT:    movq %rcx, %xmm0
+; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
+; SSSE3-NEXT:    psrad $31, %xmm1
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v2i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    psllq $32, %xmm1
+; SSE41-NEXT:    movq %xmm1, %rax
+; SSE41-NEXT:    psllq $32, %xmm0
+; SSE41-NEXT:    movq %xmm0, %rcx
+; SSE41-NEXT:    xorl %edx, %edx
+; SSE41-NEXT:    movq %rcx, %rsi
+; SSE41-NEXT:    subq %rax, %rsi
+; SSE41-NEXT:    setns %dl
+; SSE41-NEXT:    movabsq $9223372036854775807, %r8 # imm = 0x7FFFFFFFFFFFFFFF
+; SSE41-NEXT:    addq %r8, %rdx
+; SSE41-NEXT:    subq %rax, %rcx
+; SSE41-NEXT:    cmovoq %rdx, %rcx
+; SSE41-NEXT:    pextrq $1, %xmm1, %rax
+; SSE41-NEXT:    pextrq $1, %xmm0, %rsi
+; SSE41-NEXT:    xorl %edi, %edi
+; SSE41-NEXT:    movq %rsi, %rdx
+; SSE41-NEXT:    subq %rax, %rdx
+; SSE41-NEXT:    setns %dil
+; SSE41-NEXT:    addq %r8, %rdi
+; SSE41-NEXT:    subq %rax, %rsi
+; SSE41-NEXT:    cmovoq %rdi, %rsi
+; SSE41-NEXT:    movq %rsi, %xmm1
+; SSE41-NEXT:    movq %rcx, %xmm0
+; SSE41-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT:    psrad $31, %xmm0
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: v2i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT:    vmovq %xmm1, %rax
+; AVX1-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT:    vmovq %xmm0, %rcx
+; AVX1-NEXT:    xorl %edx, %edx
+; AVX1-NEXT:    movq %rcx, %rsi
+; AVX1-NEXT:    subq %rax, %rsi
+; AVX1-NEXT:    setns %dl
+; AVX1-NEXT:    movabsq $9223372036854775807, %r8 # imm = 0x7FFFFFFFFFFFFFFF
+; AVX1-NEXT:    addq %r8, %rdx
+; AVX1-NEXT:    subq %rax, %rcx
+; AVX1-NEXT:    cmovoq %rdx, %rcx
+; AVX1-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX1-NEXT:    vpextrq $1, %xmm0, %rsi
+; AVX1-NEXT:    xorl %edi, %edi
+; AVX1-NEXT:    movq %rsi, %rdx
+; AVX1-NEXT:    subq %rax, %rdx
+; AVX1-NEXT:    setns %dil
+; AVX1-NEXT:    addq %r8, %rdi
+; AVX1-NEXT:    subq %rax, %rsi
+; AVX1-NEXT:    cmovoq %rdi, %rsi
+; AVX1-NEXT:    vmovq %rsi, %xmm0
+; AVX1-NEXT:    vmovq %rcx, %xmm1
+; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX2-NEXT:    vmovq %xmm1, %rax
+; AVX2-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, %rcx
+; AVX2-NEXT:    xorl %edx, %edx
+; AVX2-NEXT:    movq %rcx, %rsi
+; AVX2-NEXT:    subq %rax, %rsi
+; AVX2-NEXT:    setns %dl
+; AVX2-NEXT:    movabsq $9223372036854775807, %r8 # imm = 0x7FFFFFFFFFFFFFFF
+; AVX2-NEXT:    addq %r8, %rdx
+; AVX2-NEXT:    subq %rax, %rcx
+; AVX2-NEXT:    cmovoq %rdx, %rcx
+; AVX2-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX2-NEXT:    vpextrq $1, %xmm0, %rsi
+; AVX2-NEXT:    xorl %edi, %edi
+; AVX2-NEXT:    movq %rsi, %rdx
+; AVX2-NEXT:    subq %rax, %rdx
+; AVX2-NEXT:    setns %dil
+; AVX2-NEXT:    addq %r8, %rdi
+; AVX2-NEXT:    subq %rax, %rsi
+; AVX2-NEXT:    cmovoq %rdi, %rsi
+; AVX2-NEXT:    vmovq %rsi, %xmm0
+; AVX2-NEXT:    vmovq %rcx, %xmm1
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT:    vpsrad $31, %xmm0, %xmm1
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX512-NEXT:    vmovq %xmm1, %rax
+; AVX512-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rcx
+; AVX512-NEXT:    xorl %edx, %edx
+; AVX512-NEXT:    movq %rcx, %rsi
+; AVX512-NEXT:    subq %rax, %rsi
+; AVX512-NEXT:    setns %dl
+; AVX512-NEXT:    movabsq $9223372036854775807, %r8 # imm = 0x7FFFFFFFFFFFFFFF
+; AVX512-NEXT:    addq %r8, %rdx
+; AVX512-NEXT:    subq %rax, %rcx
+; AVX512-NEXT:    cmovoq %rdx, %rcx
+; AVX512-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rsi
+; AVX512-NEXT:    xorl %edi, %edi
+; AVX512-NEXT:    movq %rsi, %rdx
+; AVX512-NEXT:    subq %rax, %rdx
+; AVX512-NEXT:    setns %dil
+; AVX512-NEXT:    addq %r8, %rdi
+; AVX512-NEXT:    subq %rax, %rsi
+; AVX512-NEXT:    cmovoq %rdi, %rsi
+; AVX512-NEXT:    vmovq %rsi, %xmm0
+; AVX512-NEXT:    vmovq %rcx, %xmm1
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT:    vpsraq $32, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+  %z = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %x, <2 x i32> %y)
+  ret <2 x i32> %z
+}
+
+define <4 x i24> @v4i24(<4 x i24> %x, <4 x i24> %y) nounwind {
+; SSE2-LABEL: v4i24:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pslld $8, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
+; SSE2-NEXT:    movd %xmm2, %ecx
+; SSE2-NEXT:    pslld $8, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE2-NEXT:    movd %xmm2, %r8d
+; SSE2-NEXT:    xorl %edx, %edx
+; SSE2-NEXT:    movl %r8d, %esi
+; SSE2-NEXT:    subl %ecx, %esi
+; SSE2-NEXT:    setns %dl
+; SSE2-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; SSE2-NEXT:    subl %ecx, %r8d
+; SSE2-NEXT:    cmovol %edx, %r8d
+; SSE2-NEXT:    movd %xmm1, %edx
+; SSE2-NEXT:    movd %xmm0, %ecx
+; SSE2-NEXT:    xorl %esi, %esi
+; SSE2-NEXT:    movl %ecx, %edi
+; SSE2-NEXT:    subl %edx, %edi
+; SSE2-NEXT:    setns %sil
+; SSE2-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; SSE2-NEXT:    subl %edx, %ecx
+; SSE2-NEXT:    cmovol %esi, %ecx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE2-NEXT:    movd %xmm2, %edx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE2-NEXT:    movd %xmm2, %eax
+; SSE2-NEXT:    xorl %edi, %edi
+; SSE2-NEXT:    movl %eax, %esi
+; SSE2-NEXT:    subl %edx, %esi
+; SSE2-NEXT:    setns %dil
+; SSE2-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE2-NEXT:    subl %edx, %eax
+; SSE2-NEXT:    cmovol %edi, %eax
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE2-NEXT:    movd %xmm1, %r9d
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %edx
+; SSE2-NEXT:    xorl %edi, %edi
+; SSE2-NEXT:    movl %edx, %esi
+; SSE2-NEXT:    subl %r9d, %esi
+; SSE2-NEXT:    setns %dil
+; SSE2-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE2-NEXT:    subl %r9d, %edx
+; SSE2-NEXT:    cmovol %edi, %edx
+; SSE2-NEXT:    movd %edx, %xmm0
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT:    movd %ecx, %xmm0
+; SSE2-NEXT:    movd %r8d, %xmm2
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT:    psrad $8, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v4i24:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    pslld $8, %xmm1
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
+; SSSE3-NEXT:    movd %xmm2, %ecx
+; SSSE3-NEXT:    pslld $8, %xmm0
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSSE3-NEXT:    movd %xmm2, %r8d
+; SSSE3-NEXT:    xorl %edx, %edx
+; SSSE3-NEXT:    movl %r8d, %esi
+; SSSE3-NEXT:    subl %ecx, %esi
+; SSSE3-NEXT:    setns %dl
+; SSSE3-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    subl %ecx, %r8d
+; SSSE3-NEXT:    cmovol %edx, %r8d
+; SSSE3-NEXT:    movd %xmm1, %edx
+; SSSE3-NEXT:    movd %xmm0, %ecx
+; SSSE3-NEXT:    xorl %esi, %esi
+; SSSE3-NEXT:    movl %ecx, %edi
+; SSSE3-NEXT:    subl %edx, %edi
+; SSSE3-NEXT:    setns %sil
+; SSSE3-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    subl %edx, %ecx
+; SSSE3-NEXT:    cmovol %esi, %ecx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSSE3-NEXT:    movd %xmm2, %edx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSSE3-NEXT:    movd %xmm2, %eax
+; SSSE3-NEXT:    xorl %edi, %edi
+; SSSE3-NEXT:    movl %eax, %esi
+; SSSE3-NEXT:    subl %edx, %esi
+; SSSE3-NEXT:    setns %dil
+; SSSE3-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    subl %edx, %eax
+; SSSE3-NEXT:    cmovol %edi, %eax
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSSE3-NEXT:    movd %xmm1, %r9d
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSSE3-NEXT:    movd %xmm0, %edx
+; SSSE3-NEXT:    xorl %edi, %edi
+; SSSE3-NEXT:    movl %edx, %esi
+; SSSE3-NEXT:    subl %r9d, %esi
+; SSSE3-NEXT:    setns %dil
+; SSSE3-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSSE3-NEXT:    subl %r9d, %edx
+; SSSE3-NEXT:    cmovol %edi, %edx
+; SSSE3-NEXT:    movd %edx, %xmm0
+; SSSE3-NEXT:    movd %eax, %xmm1
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSSE3-NEXT:    movd %ecx, %xmm0
+; SSSE3-NEXT:    movd %r8d, %xmm2
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT:    psrad $8, %xmm0
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v4i24:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pslld $8, %xmm1
+; SSE41-NEXT:    pextrd $3, %xmm1, %ecx
+; SSE41-NEXT:    pslld $8, %xmm0
+; SSE41-NEXT:    pextrd $3, %xmm0, %r8d
+; SSE41-NEXT:    xorl %edx, %edx
+; SSE41-NEXT:    movl %r8d, %esi
+; SSE41-NEXT:    subl %ecx, %esi
+; SSE41-NEXT:    setns %dl
+; SSE41-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; SSE41-NEXT:    subl %ecx, %r8d
+; SSE41-NEXT:    cmovol %edx, %r8d
+; SSE41-NEXT:    pextrd $2, %xmm1, %edx
+; SSE41-NEXT:    pextrd $2, %xmm0, %ecx
+; SSE41-NEXT:    xorl %esi, %esi
+; SSE41-NEXT:    movl %ecx, %edi
+; SSE41-NEXT:    subl %edx, %edi
+; SSE41-NEXT:    setns %sil
+; SSE41-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; SSE41-NEXT:    subl %edx, %ecx
+; SSE41-NEXT:    cmovol %esi, %ecx
+; SSE41-NEXT:    movd %xmm1, %edx
+; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl %edi, %edi
+; SSE41-NEXT:    movl %eax, %esi
+; SSE41-NEXT:    subl %edx, %esi
+; SSE41-NEXT:    setns %dil
+; SSE41-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE41-NEXT:    subl %edx, %eax
+; SSE41-NEXT:    cmovol %edi, %eax
+; SSE41-NEXT:    pextrd $1, %xmm1, %r9d
+; SSE41-NEXT:    pextrd $1, %xmm0, %edx
+; SSE41-NEXT:    xorl %edi, %edi
+; SSE41-NEXT:    movl %edx, %esi
+; SSE41-NEXT:    subl %r9d, %esi
+; SSE41-NEXT:    setns %dil
+; SSE41-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; SSE41-NEXT:    subl %r9d, %edx
+; SSE41-NEXT:    cmovol %edi, %edx
+; SSE41-NEXT:    movd %eax, %xmm0
+; SSE41-NEXT:    pinsrd $1, %edx, %xmm0
+; SSE41-NEXT:    pinsrd $2, %ecx, %xmm0
+; SSE41-NEXT:    pinsrd $3, %r8d, %xmm0
+; SSE41-NEXT:    psrad $8, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: v4i24:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpslld $8, %xmm1, %xmm1
+; AVX-NEXT:    vpextrd $3, %xmm1, %ecx
+; AVX-NEXT:    vpslld $8, %xmm0, %xmm0
+; AVX-NEXT:    vpextrd $3, %xmm0, %r9d
+; AVX-NEXT:    xorl %edx, %edx
+; AVX-NEXT:    movl %r9d, %esi
+; AVX-NEXT:    subl %ecx, %esi
+; AVX-NEXT:    setns %dl
+; AVX-NEXT:    addl $2147483647, %edx # imm = 0x7FFFFFFF
+; AVX-NEXT:    subl %ecx, %r9d
+; AVX-NEXT:    cmovol %edx, %r9d
+; AVX-NEXT:    vpextrd $2, %xmm1, %edx
+; AVX-NEXT:    vpextrd $2, %xmm0, %ecx
+; AVX-NEXT:    xorl %esi, %esi
+; AVX-NEXT:    movl %ecx, %edi
+; AVX-NEXT:    subl %edx, %edi
+; AVX-NEXT:    setns %sil
+; AVX-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; AVX-NEXT:    subl %edx, %ecx
+; AVX-NEXT:    cmovol %esi, %ecx
+; AVX-NEXT:    vmovd %xmm1, %r8d
+; AVX-NEXT:    vmovd %xmm0, %edx
+; AVX-NEXT:    xorl %edi, %edi
+; AVX-NEXT:    movl %edx, %esi
+; AVX-NEXT:    subl %r8d, %esi
+; AVX-NEXT:    setns %dil
+; AVX-NEXT:    addl $2147483647, %edi # imm = 0x7FFFFFFF
+; AVX-NEXT:    subl %r8d, %edx
+; AVX-NEXT:    cmovol %edi, %edx
+; AVX-NEXT:    vpextrd $1, %xmm1, %r8d
+; AVX-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-NEXT:    xorl %esi, %esi
+; AVX-NEXT:    movl %eax, %edi
+; AVX-NEXT:    subl %r8d, %edi
+; AVX-NEXT:    setns %sil
+; AVX-NEXT:    addl $2147483647, %esi # imm = 0x7FFFFFFF
+; AVX-NEXT:    subl %r8d, %eax
+; AVX-NEXT:    cmovol %esi, %eax
+; AVX-NEXT:    vmovd %edx, %xmm0
+; AVX-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $3, %r9d, %xmm0, %xmm0
+; AVX-NEXT:    vpsrad $8, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <4 x i24> @llvm.ssub.sat.v4i24(<4 x i24> %x, <4 x i24> %y)
+  ret <4 x i24> %z
+}
+
+define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
+; SSE-LABEL: v2i128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pushq %r15
+; SSE-NEXT:    pushq %r14
+; SSE-NEXT:    pushq %r13
+; SSE-NEXT:    pushq %r12
+; SSE-NEXT:    pushq %rbx
+; SSE-NEXT:    movq %rdi, %rax
+; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %r14
+; SSE-NEXT:    subq {{[0-9]+}}(%rsp), %rcx
+; SSE-NEXT:    movq %r8, %r13
+; SSE-NEXT:    sbbq %r14, %r13
+; SSE-NEXT:    movq %r13, %r10
+; SSE-NEXT:    sarq $63, %r10
+; SSE-NEXT:    xorl %edi, %edi
+; SSE-NEXT:    testq %r13, %r13
+; SSE-NEXT:    setns %dil
+; SSE-NEXT:    movabsq $9223372036854775807, %r12 # imm = 0x7FFFFFFFFFFFFFFF
+; SSE-NEXT:    leaq (%rdi,%r12), %r15
+; SSE-NEXT:    testq %r8, %r8
+; SSE-NEXT:    setns %r8b
+; SSE-NEXT:    cmpb %dil, %r8b
+; SSE-NEXT:    setne %dil
+; SSE-NEXT:    testq %r14, %r14
+; SSE-NEXT:    setns %bl
+; SSE-NEXT:    cmpb %bl, %r8b
+; SSE-NEXT:    setne %bl
+; SSE-NEXT:    testb %dil, %bl
+; SSE-NEXT:    cmoveq %r13, %r15
+; SSE-NEXT:    cmoveq %rcx, %r10
+; SSE-NEXT:    subq %r9, %rsi
+; SSE-NEXT:    movq %rdx, %rdi
+; SSE-NEXT:    sbbq %r11, %rdi
+; SSE-NEXT:    setns %bl
+; SSE-NEXT:    movzbl %bl, %ebx
+; SSE-NEXT:    addq %rbx, %r12
+; SSE-NEXT:    movq %rdi, %rcx
+; SSE-NEXT:    sarq $63, %rcx
+; SSE-NEXT:    testq %r11, %r11
+; SSE-NEXT:    setns %r8b
+; SSE-NEXT:    testq %rdx, %rdx
+; SSE-NEXT:    setns %dl
+; SSE-NEXT:    cmpb %r8b, %dl
+; SSE-NEXT:    setne %r8b
+; SSE-NEXT:    cmpb %bl, %dl
+; SSE-NEXT:    setne %dl
+; SSE-NEXT:    testb %dl, %r8b
+; SSE-NEXT:    cmoveq %rsi, %rcx
+; SSE-NEXT:    cmoveq %rdi, %r12
+; SSE-NEXT:    movq %r15, 24(%rax)
+; SSE-NEXT:    movq %r10, 16(%rax)
+; SSE-NEXT:    movq %r12, 8(%rax)
+; SSE-NEXT:    movq %rcx, (%rax)
+; SSE-NEXT:    popq %rbx
+; SSE-NEXT:    popq %r12
+; SSE-NEXT:    popq %r13
+; SSE-NEXT:    popq %r14
+; SSE-NEXT:    popq %r15
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v2i128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %r15
+; AVX-NEXT:    pushq %r14
+; AVX-NEXT:    pushq %r13
+; AVX-NEXT:    pushq %r12
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    movq %rdi, %rax
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r11
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r14
+; AVX-NEXT:    subq {{[0-9]+}}(%rsp), %rcx
+; AVX-NEXT:    movq %r8, %r13
+; AVX-NEXT:    sbbq %r14, %r13
+; AVX-NEXT:    movq %r13, %r10
+; AVX-NEXT:    sarq $63, %r10
+; AVX-NEXT:    xorl %edi, %edi
+; AVX-NEXT:    testq %r13, %r13
+; AVX-NEXT:    setns %dil
+; AVX-NEXT:    movabsq $9223372036854775807, %r12 # imm = 0x7FFFFFFFFFFFFFFF
+; AVX-NEXT:    leaq (%rdi,%r12), %r15
+; AVX-NEXT:    testq %r8, %r8
+; AVX-NEXT:    setns %r8b
+; AVX-NEXT:    cmpb %dil, %r8b
+; AVX-NEXT:    setne %dil
+; AVX-NEXT:    testq %r14, %r14
+; AVX-NEXT:    setns %bl
+; AVX-NEXT:    cmpb %bl, %r8b
+; AVX-NEXT:    setne %bl
+; AVX-NEXT:    testb %dil, %bl
+; AVX-NEXT:    cmoveq %r13, %r15
+; AVX-NEXT:    cmoveq %rcx, %r10
+; AVX-NEXT:    subq %r9, %rsi
+; AVX-NEXT:    movq %rdx, %rdi
+; AVX-NEXT:    sbbq %r11, %rdi
+; AVX-NEXT:    setns %bl
+; AVX-NEXT:    movzbl %bl, %ebx
+; AVX-NEXT:    addq %rbx, %r12
+; AVX-NEXT:    movq %rdi, %rcx
+; AVX-NEXT:    sarq $63, %rcx
+; AVX-NEXT:    testq %r11, %r11
+; AVX-NEXT:    setns %r8b
+; AVX-NEXT:    testq %rdx, %rdx
+; AVX-NEXT:    setns %dl
+; AVX-NEXT:    cmpb %r8b, %dl
+; AVX-NEXT:    setne %r8b
+; AVX-NEXT:    cmpb %bl, %dl
+; AVX-NEXT:    setne %dl
+; AVX-NEXT:    testb %dl, %r8b
+; AVX-NEXT:    cmoveq %rsi, %rcx
+; AVX-NEXT:    cmoveq %rdi, %r12
+; AVX-NEXT:    movq %r15, 24(%rax)
+; AVX-NEXT:    movq %r10, 16(%rax)
+; AVX-NEXT:    movq %r12, 8(%rax)
+; AVX-NEXT:    movq %rcx, (%rax)
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    popq %r12
+; AVX-NEXT:    popq %r13
+; AVX-NEXT:    popq %r14
+; AVX-NEXT:    popq %r15
+; AVX-NEXT:    retq
+  %z = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %x, <2 x i128> %y)
+  ret <2 x i128> %z
+}
diff --git a/test/CodeGen/X86/stack-folding-int-avx1.ll b/test/CodeGen/X86/stack-folding-int-avx1.ll
index 91bf9e2..c694fc1 100644
--- a/test/CodeGen/X86/stack-folding-int-avx1.ll
+++ b/test/CodeGen/X86/stack-folding-int-avx1.ll
@@ -205,37 +205,37 @@
   ;CHECK-LABEL: stack_fold_paddsb
   ;CHECK:       vpaddsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1)
+  %2 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %2
 }
-declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @stack_fold_paddsw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_paddsw
   ;CHECK:       vpaddsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1)
+  %2 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %2
 }
-declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <16 x i8> @stack_fold_paddusb(<16 x i8> %a0, <16 x i8> %a1) {
   ;CHECK-LABEL: stack_fold_paddusb
   ;CHECK:       vpaddusb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1)
+  %2 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %2
 }
-declare <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @stack_fold_paddusw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_paddusw
   ;CHECK:       vpaddusw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1)
+  %2 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %2
 }
-declare <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <8 x i16> @stack_fold_paddw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_paddw
@@ -930,37 +930,37 @@
   ;CHECK-LABEL: stack_fold_psubsb
   ;CHECK:       vpsubsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1)
+  %2 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %2
 }
-declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @stack_fold_psubsw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_psubsw
   ;CHECK:       vpsubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1)
+  %2 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %2
 }
-declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <16 x i8> @stack_fold_psubusb(<16 x i8> %a0, <16 x i8> %a1) {
   ;CHECK-LABEL: stack_fold_psubusb
   ;CHECK:       vpsubusb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1)
+  %2 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %2
 }
-declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @stack_fold_psubusw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_psubusw
   ;CHECK:       vpsubusw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1)
+  %2 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %2
 }
-declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <8 x i16> @stack_fold_psubw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_psubw
diff --git a/test/CodeGen/X86/stack-folding-int-avx2.ll b/test/CodeGen/X86/stack-folding-int-avx2.ll
index 9335acb..a961798 100644
--- a/test/CodeGen/X86/stack-folding-int-avx2.ll
+++ b/test/CodeGen/X86/stack-folding-int-avx2.ll
@@ -161,37 +161,37 @@
   ;CHECK-LABEL: stack_fold_paddsb
   ;CHECK:       vpaddsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a0, <32 x i8> %a1)
+  %2 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
   ret <32 x i8> %2
 }
-declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <16 x i16> @stack_fold_paddsw(<16 x i16> %a0, <16 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_paddsw
   ;CHECK:       vpaddsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a0, <16 x i16> %a1)
+  %2 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
   ret <16 x i16> %2
 }
-declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
 
 define <32 x i8> @stack_fold_paddusb(<32 x i8> %a0, <32 x i8> %a1) {
   ;CHECK-LABEL: stack_fold_paddusb
   ;CHECK:       vpaddusb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8> %a0, <32 x i8> %a1)
+  %2 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
   ret <32 x i8> %2
 }
-declare <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <16 x i16> @stack_fold_paddusw(<16 x i16> %a0, <16 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_paddusw
   ;CHECK:       vpaddusw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16> %a0, <16 x i16> %a1)
+  %2 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
   ret <16 x i16> %2
 }
-declare <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
 
 define <16 x i16> @stack_fold_paddw(<16 x i16> %a0, <16 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_paddw
@@ -1109,37 +1109,37 @@
   ;CHECK-LABEL: stack_fold_psubsb
   ;CHECK:       vpsubsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a0, <32 x i8> %a1)
+  %2 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
   ret <32 x i8> %2
 }
-declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <16 x i16> @stack_fold_psubsw(<16 x i16> %a0, <16 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_psubsw
   ;CHECK:       vpsubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a0, <16 x i16> %a1)
+  %2 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
   ret <16 x i16> %2
 }
-declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
 
 define <32 x i8> @stack_fold_psubusb(<32 x i8> %a0, <32 x i8> %a1) {
   ;CHECK-LABEL: stack_fold_psubusb
   ;CHECK:       vpsubusb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %a0, <32 x i8> %a1)
+  %2 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
   ret <32 x i8> %2
 }
-declare <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <16 x i16> @stack_fold_psubusw(<16 x i16> %a0, <16 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_psubusw
   ;CHECK:       vpsubusw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %a0, <16 x i16> %a1)
+  %2 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
   ret <16 x i16> %2
 }
-declare <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
 
 define <16 x i16> @stack_fold_psubw(<16 x i16> %a0, <16 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_psubw
diff --git a/test/CodeGen/X86/stack-folding-int-avx512vl.ll b/test/CodeGen/X86/stack-folding-int-avx512vl.ll
index 8d8676f..f01bc61 100644
--- a/test/CodeGen/X86/stack-folding-int-avx512vl.ll
+++ b/test/CodeGen/X86/stack-folding-int-avx512vl.ll
@@ -421,73 +421,73 @@
   ;CHECK-LABEL: stack_fold_paddsb
   ;CHECK:       vpaddsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1)
+  %2 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %2
 }
-declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <32 x i8> @stack_fold_paddsb_ymm(<32 x i8> %a0, <32 x i8> %a1) {
   ;CHECK-LABEL: stack_fold_paddsb_ymm
   ;CHECK:       vpaddsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a0, <32 x i8> %a1)
+  %2 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
   ret <32 x i8> %2
 }
-declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <8 x i16> @stack_fold_paddsw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_paddsw
   ;CHECK:       vpaddsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1)
+  %2 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %2
 }
-declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <16 x i16> @stack_fold_paddsw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_paddsw_ymm
   ;CHECK:       vpaddsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a0, <16 x i16> %a1)
+  %2 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
   ret <16 x i16> %2
 }
-declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
 
 define <16 x i8> @stack_fold_paddusb(<16 x i8> %a0, <16 x i8> %a1) {
   ;CHECK-LABEL: stack_fold_paddusb
   ;CHECK:       vpaddusb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1)
+  %2 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %2
 }
-declare <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <32 x i8> @stack_fold_paddusb_ymm(<32 x i8> %a0, <32 x i8> %a1) {
   ;CHECK-LABEL: stack_fold_paddusb_ymm
   ;CHECK:       vpaddusb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8> %a0, <32 x i8> %a1)
+  %2 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
   ret <32 x i8> %2
 }
-declare <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <8 x i16> @stack_fold_paddusw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_paddusw
   ;CHECK:       vpaddusw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1)
+  %2 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %2
 }
-declare <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <16 x i16> @stack_fold_paddusw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_paddusw_ymm
   ;CHECK:       vpaddusw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16> %a0, <16 x i16> %a1)
+  %2 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
   ret <16 x i16> %2
 }
-declare <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
 
 define <8 x i16> @stack_fold_paddw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_paddw
@@ -2355,73 +2355,73 @@
   ;CHECK-LABEL: stack_fold_psubsb
   ;CHECK:       vpsubsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1)
+  %2 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %2
 }
-declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <32 x i8> @stack_fold_psubsb_ymm(<32 x i8> %a0, <32 x i8> %a1) {
   ;CHECK-LABEL: stack_fold_psubsb_ymm
   ;CHECK:       vpsubsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a0, <32 x i8> %a1)
+  %2 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
   ret <32 x i8> %2
 }
-declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <8 x i16> @stack_fold_psubsw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_psubsw
   ;CHECK:       vpsubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1)
+  %2 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %2
 }
-declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <16 x i16> @stack_fold_psubsw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_psubsw_ymm
   ;CHECK:       vpsubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a0, <16 x i16> %a1)
+  %2 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
   ret <16 x i16> %2
 }
-declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
 
 define <16 x i8> @stack_fold_psubusb(<16 x i8> %a0, <16 x i8> %a1) {
   ;CHECK-LABEL: stack_fold_psubusb
   ;CHECK:       vpsubusb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1)
+  %2 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %2
 }
-declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <32 x i8> @stack_fold_psubusb_ymm(<32 x i8> %a0, <32 x i8> %a1) {
   ;CHECK-LABEL: stack_fold_psubusb_ymm
   ;CHECK:       vpsubusb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %a0, <32 x i8> %a1)
+  %2 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
   ret <32 x i8> %2
 }
-declare <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <8 x i16> @stack_fold_psubusw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_psubusw
   ;CHECK:       vpsubusw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1)
+  %2 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %2
 }
-declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <16 x i16> @stack_fold_psubusw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_psubusw_ymm
   ;CHECK:       vpsubusw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %a0, <16 x i16> %a1)
+  %2 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
   ret <16 x i16> %2
 }
-declare <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
 
 define <8 x i16> @stack_fold_psubw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_psubw
diff --git a/test/CodeGen/X86/stack-folding-int-sse42.ll b/test/CodeGen/X86/stack-folding-int-sse42.ll
index 3c5aeb7..c498e2b 100644
--- a/test/CodeGen/X86/stack-folding-int-sse42.ll
+++ b/test/CodeGen/X86/stack-folding-int-sse42.ll
@@ -241,37 +241,37 @@
   ;CHECK-LABEL: stack_fold_paddsb
   ;CHECK:       paddsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1)
+  %2 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %2
 }
-declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @stack_fold_paddsw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_paddsw
   ;CHECK:       paddsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1)
+  %2 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %2
 }
-declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <16 x i8> @stack_fold_paddusb(<16 x i8> %a0, <16 x i8> %a1) {
   ;CHECK-LABEL: stack_fold_paddusb
   ;CHECK:       paddusb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1)
+  %2 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %2
 }
-declare <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @stack_fold_paddusw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_paddusw
   ;CHECK:       paddusw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1)
+  %2 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %2
 }
-declare <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <8 x i16> @stack_fold_paddw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_paddw
@@ -1084,37 +1084,37 @@
   ;CHECK-LABEL: stack_fold_psubsb
   ;CHECK:       psubsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1)
+  %2 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %2
 }
-declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @stack_fold_psubsw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_psubsw
   ;CHECK:       psubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1)
+  %2 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %2
 }
-declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <16 x i8> @stack_fold_psubusb(<16 x i8> %a0, <16 x i8> %a1) {
   ;CHECK-LABEL: stack_fold_psubusb
   ;CHECK:       psubusb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1)
+  %2 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
   ret <16 x i8> %2
 }
-declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @stack_fold_psubusw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_psubusw
   ;CHECK:       psubusw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
-  %2 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1)
+  %2 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
   ret <8 x i16> %2
 }
-declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <8 x i16> @stack_fold_psubw(<8 x i16> %a0, <8 x i16> %a1) {
   ;CHECK-LABEL: stack_fold_psubw
diff --git a/test/CodeGen/X86/sub-with-overflow.ll b/test/CodeGen/X86/sub-with-overflow.ll
index fa00d6f..0bcf2d8 100644
--- a/test/CodeGen/X86/sub-with-overflow.ll
+++ b/test/CodeGen/X86/sub-with-overflow.ll
@@ -1,9 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-linux | FileCheck %s
 
 @ok = internal constant [4 x i8] c"%d\0A\00"
 @no = internal constant [4 x i8] c"no\0A\00"
 
 define i1 @func1(i32 %v1, i32 %v2) nounwind {
+; CHECK-LABEL: func1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subl $12, %esp
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    subl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    jno .LBB0_1
+; CHECK-NEXT:  # %bb.2: # %overflow
+; CHECK-NEXT:    movl $no, (%esp)
+; CHECK-NEXT:    calll printf
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    addl $12, %esp
+; CHECK-NEXT:    retl
+; CHECK-NEXT:  .LBB0_1: # %normal
+; CHECK-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; CHECK-NEXT:    movl $ok, (%esp)
+; CHECK-NEXT:    calll printf
+; CHECK-NEXT:    movb $1, %al
+; CHECK-NEXT:    addl $12, %esp
+; CHECK-NEXT:    retl
 entry:
   %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
   %sum = extractvalue {i32, i1} %t, 0
@@ -18,12 +38,28 @@
   %t2 = tail call i32 (i8*, ...) @printf( i8* getelementptr ([4 x i8], [4 x i8]* @no, i32 0, i32 0) ) nounwind
   ret i1 false
 
-; CHECK-LABEL: func1:
-; CHECK: subl 20(%esp)
-; CHECK-NEXT: jno
 }
 
 define i1 @func2(i32 %v1, i32 %v2) nounwind {
+; CHECK-LABEL: func2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subl $12, %esp
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    subl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    jae .LBB1_1
+; CHECK-NEXT:  # %bb.2: # %carry
+; CHECK-NEXT:    movl $no, (%esp)
+; CHECK-NEXT:    calll printf
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    addl $12, %esp
+; CHECK-NEXT:    retl
+; CHECK-NEXT:  .LBB1_1: # %normal
+; CHECK-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; CHECK-NEXT:    movl $ok, (%esp)
+; CHECK-NEXT:    calll printf
+; CHECK-NEXT:    movb $1, %al
+; CHECK-NEXT:    addl $12, %esp
+; CHECK-NEXT:    retl
 entry:
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
   %sum = extractvalue {i32, i1} %t, 0
@@ -38,9 +74,6 @@
   %t2 = tail call i32 (i8*, ...) @printf( i8* getelementptr ([4 x i8], [4 x i8]* @no, i32 0, i32 0) ) nounwind
   ret i1 false
 
-; CHECK-LABEL: func2:
-; CHECK: subl 20(%esp)
-; CHECK-NEXT: jae
 }
 
 declare i32 @printf(i8*, ...) nounwind
@@ -48,12 +81,14 @@
 declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32)
 
 define i1 @func3(i32 %x) nounwind {
+; CHECK-LABEL: func3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    cmpl $1, {{[0-9]+}}(%esp)
+; CHECK-NEXT:    seto %al
+; CHECK-NEXT:    retl
 entry:
   %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %x, i32 1)
   %obit = extractvalue {i32, i1} %t, 1
   ret i1 %obit
 
-; CHECK-LABEL: func3:
-; CHECK: decl
-; CHECK-NEXT: seto
 }
diff --git a/test/CodeGen/X86/tbm_patterns.ll b/test/CodeGen/X86/tbm_patterns.ll
index 2b335ea..f7bfb5b 100644
--- a/test/CodeGen/X86/tbm_patterns.ll
+++ b/test/CodeGen/X86/tbm_patterns.ll
@@ -150,9 +150,7 @@
 ; CHECK-LABEL: test_x86_tbm_blcfill_u32_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, %eax
-; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT:    leal 1(%rdi), %ecx
-; CHECK-NEXT:    testl %edi, %ecx
+; CHECK-NEXT:    blcfilll %edi, %ecx
 ; CHECK-NEXT:    cmovnel %edx, %eax
 ; CHECK-NEXT:    retq
   %t0 = add i32 %a, 1
@@ -189,8 +187,7 @@
 ; CHECK-LABEL: test_x86_tbm_blcfill_u64_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
-; CHECK-NEXT:    leaq 1(%rdi), %rcx
-; CHECK-NEXT:    testq %rdi, %rcx
+; CHECK-NEXT:    blcfillq %rdi, %rcx
 ; CHECK-NEXT:    cmovneq %rdx, %rax
 ; CHECK-NEXT:    retq
   %t0 = add i64 %a, 1
@@ -229,10 +226,7 @@
 ; CHECK-LABEL: test_x86_tbm_blci_u32_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, %eax
-; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT:    leal 1(%rdi), %ecx
-; CHECK-NEXT:    notl %ecx
-; CHECK-NEXT:    orl %edi, %ecx
+; CHECK-NEXT:    blcil %edi, %ecx
 ; CHECK-NEXT:    cmovnel %edx, %eax
 ; CHECK-NEXT:    retq
   %t0 = add i32 1, %a
@@ -272,9 +266,7 @@
 ; CHECK-LABEL: test_x86_tbm_blci_u64_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
-; CHECK-NEXT:    leaq 1(%rdi), %rcx
-; CHECK-NEXT:    notq %rcx
-; CHECK-NEXT:    orq %rdi, %rcx
+; CHECK-NEXT:    blciq %rdi, %rcx
 ; CHECK-NEXT:    cmovneq %rdx, %rax
 ; CHECK-NEXT:    retq
   %t0 = add i64 1, %a
@@ -334,10 +326,7 @@
 ; CHECK-LABEL: test_x86_tbm_blcic_u32_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, %eax
-; CHECK-NEXT:    movl %edi, %ecx
-; CHECK-NEXT:    notl %ecx
-; CHECK-NEXT:    incl %edi
-; CHECK-NEXT:    testl %ecx, %edi
+; CHECK-NEXT:    blcicl %edi, %ecx
 ; CHECK-NEXT:    cmovnel %edx, %eax
 ; CHECK-NEXT:    retq
   %t0 = xor i32 %a, -1
@@ -377,10 +366,7 @@
 ; CHECK-LABEL: test_x86_tbm_blcic_u64_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
-; CHECK-NEXT:    movq %rdi, %rcx
-; CHECK-NEXT:    notq %rcx
-; CHECK-NEXT:    incq %rdi
-; CHECK-NEXT:    testq %rcx, %rdi
+; CHECK-NEXT:    blcicq %rdi, %rcx
 ; CHECK-NEXT:    cmovneq %rdx, %rax
 ; CHECK-NEXT:    retq
   %t0 = xor i64 %a, -1
@@ -418,9 +404,7 @@
 ; CHECK-LABEL: test_x86_tbm_blcmsk_u32_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, %eax
-; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT:    leal 1(%rdi), %ecx
-; CHECK-NEXT:    xorl %edi, %ecx
+; CHECK-NEXT:    blcmskl %edi, %ecx
 ; CHECK-NEXT:    cmovnel %edx, %eax
 ; CHECK-NEXT:    retq
   %t0 = add i32 %a, 1
@@ -457,8 +441,7 @@
 ; CHECK-LABEL: test_x86_tbm_blcmsk_u64_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
-; CHECK-NEXT:    leaq 1(%rdi), %rcx
-; CHECK-NEXT:    xorq %rdi, %rcx
+; CHECK-NEXT:    blcmskq %rdi, %rcx
 ; CHECK-NEXT:    cmovneq %rdx, %rax
 ; CHECK-NEXT:    retq
   %t0 = add i64 %a, 1
@@ -495,9 +478,7 @@
 ; CHECK-LABEL: test_x86_tbm_blcs_u32_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, %eax
-; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT:    leal 1(%rdi), %ecx
-; CHECK-NEXT:    orl %edi, %ecx
+; CHECK-NEXT:    blcsl %edi, %ecx
 ; CHECK-NEXT:    cmovnel %edx, %eax
 ; CHECK-NEXT:    retq
   %t0 = add i32 %a, 1
@@ -534,8 +515,7 @@
 ; CHECK-LABEL: test_x86_tbm_blcs_u64_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
-; CHECK-NEXT:    leaq 1(%rdi), %rcx
-; CHECK-NEXT:    orq %rdi, %rcx
+; CHECK-NEXT:    blcsq %rdi, %rcx
 ; CHECK-NEXT:    cmovneq %rdx, %rax
 ; CHECK-NEXT:    retq
   %t0 = add i64 %a, 1
@@ -572,9 +552,7 @@
 ; CHECK-LABEL: test_x86_tbm_blsfill_u32_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, %eax
-; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT:    leal -1(%rdi), %ecx
-; CHECK-NEXT:    orl %edi, %ecx
+; CHECK-NEXT:    blsfilll %edi, %ecx
 ; CHECK-NEXT:    cmovnel %edx, %eax
 ; CHECK-NEXT:    retq
   %t0 = add i32 %a, -1
@@ -611,8 +589,7 @@
 ; CHECK-LABEL: test_x86_tbm_blsfill_u64_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
-; CHECK-NEXT:    leaq -1(%rdi), %rcx
-; CHECK-NEXT:    orq %rdi, %rcx
+; CHECK-NEXT:    blsfillq %rdi, %rcx
 ; CHECK-NEXT:    cmovneq %rdx, %rax
 ; CHECK-NEXT:    retq
   %t0 = add i64 %a, -1
@@ -651,10 +628,7 @@
 ; CHECK-LABEL: test_x86_tbm_blsic_u32_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, %eax
-; CHECK-NEXT:    movl %edi, %ecx
-; CHECK-NEXT:    notl %ecx
-; CHECK-NEXT:    decl %edi
-; CHECK-NEXT:    orl %ecx, %edi
+; CHECK-NEXT:    blsicl %edi, %ecx
 ; CHECK-NEXT:    cmovnel %edx, %eax
 ; CHECK-NEXT:    retq
   %t0 = xor i32 %a, -1
@@ -694,10 +668,7 @@
 ; CHECK-LABEL: test_x86_tbm_blsic_u64_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
-; CHECK-NEXT:    movq %rdi, %rcx
-; CHECK-NEXT:    notq %rcx
-; CHECK-NEXT:    decq %rdi
-; CHECK-NEXT:    orq %rcx, %rdi
+; CHECK-NEXT:    blsicq %rdi, %rcx
 ; CHECK-NEXT:    cmovneq %rdx, %rax
 ; CHECK-NEXT:    retq
   %t0 = xor i64 %a, -1
@@ -723,7 +694,6 @@
 ; CHECK-LABEL: test_x86_tbm_t1mskc_u32_z:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    t1mskcl %edi, %eax
-; CHECK-NEXT:    testl %eax, %eax
 ; CHECK-NEXT:    cmovel %esi, %eax
 ; CHECK-NEXT:    retq
   %t0 = xor i32 %a, -1
@@ -738,10 +708,7 @@
 ; CHECK-LABEL: test_x86_tbm_t1mskc_u32_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, %eax
-; CHECK-NEXT:    movl %edi, %ecx
-; CHECK-NEXT:    notl %ecx
-; CHECK-NEXT:    incl %edi
-; CHECK-NEXT:    orl %ecx, %edi
+; CHECK-NEXT:    t1mskcl %edi, %ecx
 ; CHECK-NEXT:    cmovnel %edx, %eax
 ; CHECK-NEXT:    retq
   %t0 = xor i32 %a, -1
@@ -767,7 +734,6 @@
 ; CHECK-LABEL: test_x86_tbm_t1mskc_u64_z:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    t1mskcq %rdi, %rax
-; CHECK-NEXT:    testq %rax, %rax
 ; CHECK-NEXT:    cmoveq %rsi, %rax
 ; CHECK-NEXT:    retq
   %t0 = xor i64 %a, -1
@@ -782,10 +748,7 @@
 ; CHECK-LABEL: test_x86_tbm_t1mskc_u64_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
-; CHECK-NEXT:    movq %rdi, %rcx
-; CHECK-NEXT:    notq %rcx
-; CHECK-NEXT:    incq %rdi
-; CHECK-NEXT:    orq %rcx, %rdi
+; CHECK-NEXT:    t1mskcq %rdi, %rcx
 ; CHECK-NEXT:    cmovneq %rdx, %rax
 ; CHECK-NEXT:    retq
   %t0 = xor i64 %a, -1
@@ -811,7 +774,6 @@
 ; CHECK-LABEL: test_x86_tbm_tzmsk_u32_z:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    tzmskl %edi, %eax
-; CHECK-NEXT:    testl %eax, %eax
 ; CHECK-NEXT:    cmovel %esi, %eax
 ; CHECK-NEXT:    retq
   %t0 = xor i32 %a, -1
@@ -826,10 +788,7 @@
 ; CHECK-LABEL: test_x86_tbm_tzmsk_u32_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, %eax
-; CHECK-NEXT:    movl %edi, %ecx
-; CHECK-NEXT:    notl %ecx
-; CHECK-NEXT:    decl %edi
-; CHECK-NEXT:    testl %edi, %ecx
+; CHECK-NEXT:    tzmskl %edi, %ecx
 ; CHECK-NEXT:    cmovnel %edx, %eax
 ; CHECK-NEXT:    retq
   %t0 = xor i32 %a, -1
@@ -855,7 +814,6 @@
 ; CHECK-LABEL: test_x86_tbm_tzmsk_u64_z:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    tzmskq %rdi, %rax
-; CHECK-NEXT:    testq %rax, %rax
 ; CHECK-NEXT:    cmoveq %rsi, %rax
 ; CHECK-NEXT:    retq
   %t0 = xor i64 %a, -1
@@ -870,10 +828,7 @@
 ; CHECK-LABEL: test_x86_tbm_tzmsk_u64_z2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
-; CHECK-NEXT:    movq %rdi, %rcx
-; CHECK-NEXT:    notq %rcx
-; CHECK-NEXT:    decq %rdi
-; CHECK-NEXT:    testq %rdi, %rcx
+; CHECK-NEXT:    tzmskq %rdi, %rcx
 ; CHECK-NEXT:    cmovneq %rdx, %rax
 ; CHECK-NEXT:    retq
   %t0 = xor i64 %a, -1
diff --git a/test/CodeGen/X86/test-shrink-bug.ll b/test/CodeGen/X86/test-shrink-bug.ll
index 4fba792..ca2316c 100644
--- a/test/CodeGen/X86/test-shrink-bug.ll
+++ b/test/CodeGen/X86/test-shrink-bug.ll
@@ -68,7 +68,7 @@
 ; CHECK-X64:       # %bb.0:
 ; CHECK-X64-NEXT:    pushq %rax
 ; CHECK-X64-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-X64-NEXT:    testw $263, %di # imm = 0x107
+; CHECK-X64-NEXT:    testl $263, %edi # imm = 0x107
 ; CHECK-X64-NEXT:    je .LBB1_2
 ; CHECK-X64-NEXT:  # %bb.1:
 ; CHECK-X64-NEXT:    pand {{.*}}(%rip), %xmm0
diff --git a/test/CodeGen/X86/tls-addr-non-leaf-function.ll b/test/CodeGen/X86/tls-addr-non-leaf-function.ll
index b9cab65..6ebced1 100644
--- a/test/CodeGen/X86/tls-addr-non-leaf-function.ll
+++ b/test/CodeGen/X86/tls-addr-non-leaf-function.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -relocation-model=pic -O2 -disable-fp-elim -o - | FileCheck %s
+; RUN: llc < %s -relocation-model=pic -O2 -frame-pointer=all -o - | FileCheck %s
 ; RUN: llc < %s -relocation-model=pic -O2 -o - | FileCheck %s
 
 ; This test runs twice with different options regarding the frame pointer:
diff --git a/test/CodeGen/X86/twoaddr-dbg-value.mir b/test/CodeGen/X86/twoaddr-dbg-value.mir
new file mode 100644
index 0000000..f2d6c5d
--- /dev/null
+++ b/test/CodeGen/X86/twoaddr-dbg-value.mir
@@ -0,0 +1,27 @@
+# RUN: llc -run-pass=livevars,twoaddressinstruction -mtriple=x86_64-- -o - %s | FileCheck %s
+---
+name:            foo
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $edi
+
+    %0:gr32 = COPY killed $edi
+    %1:gr32 = COPY killed %0
+    %4:gr32 = XOR32ri8 %1, 1, implicit-def dead $eflags
+    DBG_VALUE %4
+    %5:gr32 = COPY %4
+    PUSH32r killed %1, implicit-def $esp, implicit $esp
+    $eax = COPY killed %5
+    RETQ implicit killed $eax
+
+...
+
+# Verify that the DBG_VALUE instruction does not inhibit
+# TwoAddressInstructionPass::rescheduleMIBelowKill optimization
+
+# CHECK: PUSH32r %1, implicit-def $esp, implicit $esp
+# CHECK-NEXT: %2:gr32 = COPY killed %1
+# CHECK-NEXT: %2:gr32 = XOR32ri8 %2, 1, implicit-def dead $eflags
+# CHECK-NEXT: DBG_VALUE %2
+# CHECK-NEXT: %3:gr32 = COPY killed %2
diff --git a/test/CodeGen/X86/uadd_sat.ll b/test/CodeGen/X86/uadd_sat.ll
index f72d036..3552fcd 100644
--- a/test/CodeGen/X86/uadd_sat.ll
+++ b/test/CodeGen/X86/uadd_sat.ll
@@ -1,157 +1,148 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux | FileCheck %s
-; RUN: llc < %s -mcpu=generic -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=CHECK32
+; RUN: llc < %s -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefixes=CHECK,X64
 
 declare  i4  @llvm.uadd.sat.i4   (i4,  i4)
 declare  i32 @llvm.uadd.sat.i32  (i32, i32)
 declare  i64 @llvm.uadd.sat.i64  (i64, i64)
 declare  <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
 
-define i32 @func(i32 %x, i32 %y) {
-; CHECK-LABEL: func:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addl %esi, %edi
-; CHECK-NEXT:    movl $-1, %eax
-; CHECK-NEXT:    cmovael %edi, %eax
-; CHECK-NEXT:    retq
+define i32 @func(i32 %x, i32 %y) nounwind {
+; X86-LABEL: func:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl $-1, %eax
+; X86-NEXT:    cmovael %ecx, %eax
+; X86-NEXT:    retl
 ;
-; CHECK32-LABEL: func:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    addl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl $-1, %eax
-; CHECK32-NEXT:    cmovael %ecx, %eax
-; CHECK32-NEXT:    retl
+; X64-LABEL: func:
+; X64:       # %bb.0:
+; X64-NEXT:    addl %esi, %edi
+; X64-NEXT:    movl $-1, %eax
+; X64-NEXT:    cmovael %edi, %eax
+; X64-NEXT:    retq
   %tmp = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y);
   ret i32 %tmp;
 }
 
-define i64 @func2(i64 %x, i64 %y) {
-; CHECK-LABEL: func2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addq %rsi, %rdi
-; CHECK-NEXT:    movq $-1, %rax
-; CHECK-NEXT:    cmovaeq %rdi, %rax
-; CHECK-NEXT:    retq
+define i64 @func2(i64 %x, i64 %y) nounwind {
+; X86-LABEL: func2:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl $-1, %ecx
+; X86-NEXT:    cmovbl %ecx, %edx
+; X86-NEXT:    cmovbl %ecx, %eax
+; X86-NEXT:    retl
 ;
-; CHECK32-LABEL: func2:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    addl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    adcl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    movl $-1, %ecx
-; CHECK32-NEXT:    cmovbl %ecx, %edx
-; CHECK32-NEXT:    cmovbl %ecx, %eax
-; CHECK32-NEXT:    retl
+; X64-LABEL: func2:
+; X64:       # %bb.0:
+; X64-NEXT:    addq %rsi, %rdi
+; X64-NEXT:    movq $-1, %rax
+; X64-NEXT:    cmovaeq %rdi, %rax
+; X64-NEXT:    retq
   %tmp = call i64 @llvm.uadd.sat.i64(i64 %x, i64 %y);
   ret i64 %tmp;
 }
 
-define i4 @func3(i4 %x, i4 %y) {
-; CHECK-LABEL: func3:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    shlb $4, %sil
-; CHECK-NEXT:    shlb $4, %dil
-; CHECK-NEXT:    addb %sil, %dil
-; CHECK-NEXT:    movb $-1, %al
-; CHECK-NEXT:    jb .LBB2_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:  .LBB2_2:
-; CHECK-NEXT:    shrb $4, %al
-; CHECK-NEXT:    retq
+define i4 @func3(i4 %x, i4 %y) nounwind {
+; X86-LABEL: func3:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    shlb $4, %al
+; X86-NEXT:    shlb $4, %cl
+; X86-NEXT:    addb %al, %cl
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jb .LBB2_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB2_2:
+; X86-NEXT:    shrb $4, %al
+; X86-NEXT:    retl
 ;
-; CHECK32-LABEL: func3:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT:    shlb $4, %al
-; CHECK32-NEXT:    shlb $4, %cl
-; CHECK32-NEXT:    addb %al, %cl
-; CHECK32-NEXT:    movb $-1, %al
-; CHECK32-NEXT:    jb .LBB2_2
-; CHECK32-NEXT:  # %bb.1:
-; CHECK32-NEXT:    movl %ecx, %eax
-; CHECK32-NEXT:  .LBB2_2:
-; CHECK32-NEXT:    shrb $4, %al
-; CHECK32-NEXT:    retl
+; X64-LABEL: func3:
+; X64:       # %bb.0:
+; X64-NEXT:    shlb $4, %sil
+; X64-NEXT:    shlb $4, %dil
+; X64-NEXT:    addb %sil, %dil
+; X64-NEXT:    movb $-1, %al
+; X64-NEXT:    jb .LBB2_2
+; X64-NEXT:  # %bb.1:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:  .LBB2_2:
+; X64-NEXT:    shrb $4, %al
+; X64-NEXT:    retq
   %tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %y);
   ret i4 %tmp;
 }
 
-define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) {
-; CHECK-LABEL: vec:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
-; CHECK-NEXT:    movd %xmm2, %eax
-; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
-; CHECK-NEXT:    movd %xmm2, %ecx
-; CHECK-NEXT:    addl %eax, %ecx
-; CHECK-NEXT:    movl $-1, %eax
-; CHECK-NEXT:    cmovbl %eax, %ecx
-; CHECK-NEXT:    movd %ecx, %xmm2
-; CHECK-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
-; CHECK-NEXT:    movd %xmm3, %ecx
-; CHECK-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
-; CHECK-NEXT:    movd %xmm3, %edx
-; CHECK-NEXT:    addl %ecx, %edx
-; CHECK-NEXT:    cmovbl %eax, %edx
-; CHECK-NEXT:    movd %edx, %xmm3
-; CHECK-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; CHECK-NEXT:    movd %xmm1, %ecx
-; CHECK-NEXT:    movd %xmm0, %edx
-; CHECK-NEXT:    addl %ecx, %edx
-; CHECK-NEXT:    cmovbl %eax, %edx
-; CHECK-NEXT:    movd %edx, %xmm2
-; CHECK-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; CHECK-NEXT:    movd %xmm1, %ecx
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; CHECK-NEXT:    movd %xmm0, %edx
-; CHECK-NEXT:    addl %ecx, %edx
-; CHECK-NEXT:    cmovbl %eax, %edx
-; CHECK-NEXT:    movd %edx, %xmm0
-; CHECK-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; CHECK-NEXT:    movdqa %xmm2, %xmm0
-; CHECK-NEXT:    retq
+define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
+; X86-LABEL: vec:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl $-1, %ebx
+; X86-NEXT:    cmovbl %ebx, %edi
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    cmovbl %ebx, %esi
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    cmovbl %ebx, %edx
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmovbl %ebx, %ecx
+; X86-NEXT:    movl %ecx, 12(%eax)
+; X86-NEXT:    movl %edx, 8(%eax)
+; X86-NEXT:    movl %esi, 4(%eax)
+; X86-NEXT:    movl %edi, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl $4
 ;
-; CHECK32-LABEL: vec:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %ebx
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    pushl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK32-NEXT:    .cfi_offset %esi, -16
-; CHECK32-NEXT:    .cfi_offset %edi, -12
-; CHECK32-NEXT:    .cfi_offset %ebx, -8
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    addl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    movl $-1, %ebx
-; CHECK32-NEXT:    cmovbl %ebx, %edi
-; CHECK32-NEXT:    addl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    cmovbl %ebx, %esi
-; CHECK32-NEXT:    addl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    cmovbl %ebx, %edx
-; CHECK32-NEXT:    addl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    cmovbl %ebx, %ecx
-; CHECK32-NEXT:    movl %ecx, 12(%eax)
-; CHECK32-NEXT:    movl %edx, 8(%eax)
-; CHECK32-NEXT:    movl %esi, 4(%eax)
-; CHECK32-NEXT:    movl %edi, (%eax)
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    popl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %ebx
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl $4
+; X64-LABEL: vec:
+; X64:       # %bb.0:
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
+; X64-NEXT:    movd %xmm2, %eax
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
+; X64-NEXT:    movd %xmm2, %ecx
+; X64-NEXT:    addl %eax, %ecx
+; X64-NEXT:    movl $-1, %eax
+; X64-NEXT:    cmovbl %eax, %ecx
+; X64-NEXT:    movd %ecx, %xmm2
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
+; X64-NEXT:    movd %xmm3, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
+; X64-NEXT:    movd %xmm3, %edx
+; X64-NEXT:    addl %ecx, %edx
+; X64-NEXT:    cmovbl %eax, %edx
+; X64-NEXT:    movd %edx, %xmm3
+; X64-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X64-NEXT:    movd %xmm1, %ecx
+; X64-NEXT:    movd %xmm0, %edx
+; X64-NEXT:    addl %ecx, %edx
+; X64-NEXT:    cmovbl %eax, %edx
+; X64-NEXT:    movd %edx, %xmm2
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; X64-NEXT:    movd %xmm1, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X64-NEXT:    movd %xmm0, %edx
+; X64-NEXT:    addl %ecx, %edx
+; X64-NEXT:    cmovbl %eax, %edx
+; X64-NEXT:    movd %edx, %xmm0
+; X64-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; X64-NEXT:    movdqa %xmm2, %xmm0
+; X64-NEXT:    retq
   %tmp = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y);
   ret <4 x i32> %tmp;
 }
diff --git a/test/CodeGen/X86/uadd_sat_vec.ll b/test/CodeGen/X86/uadd_sat_vec.ll
new file mode 100644
index 0000000..98f6d0f
--- /dev/null
+++ b/test/CodeGen/X86/uadd_sat_vec.ll
@@ -0,0 +1,1015 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512
+
+declare <1 x i8> @llvm.uadd.sat.v1i8(<1 x i8>, <1 x i8>)
+declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>)
+declare <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8>, <4 x i8>)
+declare <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8>, <8 x i8>)
+declare <12 x i8> @llvm.uadd.sat.v12i8(<12 x i8>, <12 x i8>)
+declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>)
+declare <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8>, <64 x i8>)
+
+declare <1 x i16> @llvm.uadd.sat.v1i16(<1 x i16>, <1 x i16>)
+declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>)
+declare <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <12 x i16> @llvm.uadd.sat.v12i16(<12 x i16>, <12 x i16>)
+declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>)
+declare <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16>, <32 x i16>)
+
+declare <16 x i1> @llvm.uadd.sat.v16i1(<16 x i1>, <16 x i1>)
+declare <16 x i4> @llvm.uadd.sat.v16i4(<16 x i4>, <16 x i4>)
+
+declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i24> @llvm.uadd.sat.v4i24(<4 x i24>, <4 x i24>)
+declare <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128>, <2 x i128>)
+
+; Legal types, depending on architecture.
+
+define <16 x i8> @v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
+; SSE-LABEL: v16i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusb %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v16i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+  ret <16 x i8> %z
+}
+
+define <32 x i8> @v32i8(<32 x i8> %x, <32 x i8> %y) nounwind {
+; SSE-LABEL: v32i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusb %xmm2, %xmm0
+; SSE-NEXT:    paddusb %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpaddusb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddusb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v32i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddusb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %z = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %x, <32 x i8> %y)
+  ret <32 x i8> %z
+}
+
+define <64 x i8> @v64i8(<64 x i8> %x, <64 x i8> %y) nounwind {
+; SSE-LABEL: v64i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusb %xmm4, %xmm0
+; SSE-NEXT:    paddusb %xmm5, %xmm1
+; SSE-NEXT:    paddusb %xmm6, %xmm2
+; SSE-NEXT:    paddusb %xmm7, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v64i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpaddusb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpaddusb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpaddusb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpaddusb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v64i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddusb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddusb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v64i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddusb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %z = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %x, <64 x i8> %y)
+  ret <64 x i8> %z
+}
+
+define <8 x i16> @v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
+; SSE-LABEL: v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusw %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+  ret <8 x i16> %z
+}
+
+define <16 x i16> @v16i16(<16 x i16> %x, <16 x i16> %y) nounwind {
+; SSE-LABEL: v16i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusw %xmm2, %xmm0
+; SSE-NEXT:    paddusw %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpaddusw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddusw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v16i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddusw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %z = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %x, <16 x i16> %y)
+  ret <16 x i16> %z
+}
+
+define <32 x i16> @v32i16(<32 x i16> %x, <32 x i16> %y) nounwind {
+; SSE-LABEL: v32i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusw %xmm4, %xmm0
+; SSE-NEXT:    paddusw %xmm5, %xmm1
+; SSE-NEXT:    paddusw %xmm6, %xmm2
+; SSE-NEXT:    paddusw %xmm7, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v32i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpaddusw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpaddusw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpaddusw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpaddusw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v32i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddusw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddusw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v32i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddusw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %z = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %x, <32 x i16> %y)
+  ret <32 x i16> %z
+}
+
+; Too narrow vectors, legalized by widening.
+
+define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) nounwind {
+; SSE-LABEL: v8i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT:    paddusb %xmm0, %xmm1
+; SSE-NEXT:    movq %xmm1, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v8i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovq %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v8i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v8i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT:    vpmovwb %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <8 x i8>, <8 x i8>* %px
+  %y = load <8 x i8>, <8 x i8>* %py
+  %z = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %x, <8 x i8> %y)
+  store <8 x i8> %z, <8 x i8>* %pz
+  ret void
+}
+
+define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) nounwind {
+; SSE-LABEL: v4i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT:    paddusb %xmm0, %xmm1
+; SSE-NEXT:    movd %xmm1, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v4i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovd %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v4i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovd %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v4i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX512-NEXT:    vpmovdb %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <4 x i8>, <4 x i8>* %px
+  %y = load <4 x i8>, <4 x i8>* %py
+  %z = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %x, <4 x i8> %y)
+  store <4 x i8> %z, <4 x i8>* %pz
+  ret void
+}
+
+define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
+; SSE2-LABEL: v2i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movzwl (%rdi), %eax
+; SSE2-NEXT:    movd %eax, %xmm0
+; SSE2-NEXT:    movzwl (%rsi), %eax
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    paddusb %xmm0, %xmm1
+; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    movw %ax, (%rdx)
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v2i8:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movzwl (%rdi), %eax
+; SSSE3-NEXT:    movd %eax, %xmm0
+; SSSE3-NEXT:    movzwl (%rsi), %eax
+; SSSE3-NEXT:    movd %eax, %xmm1
+; SSSE3-NEXT:    paddusb %xmm0, %xmm1
+; SSSE3-NEXT:    movd %xmm1, %eax
+; SSSE3-NEXT:    movw %ax, (%rdx)
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v2i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movzwl (%rdi), %eax
+; SSE41-NEXT:    movd %eax, %xmm0
+; SSE41-NEXT:    movzwl (%rsi), %eax
+; SSE41-NEXT:    movd %eax, %xmm1
+; SSE41-NEXT:    paddusb %xmm0, %xmm1
+; SSE41-NEXT:    pextrw $0, %xmm1, (%rdx)
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: v2i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    movzwl (%rdi), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm0
+; AVX1-NEXT:    movzwl (%rsi), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm1
+; AVX1-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpextrw $0, %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    movzwl (%rdi), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm0
+; AVX2-NEXT:    movzwl (%rsi), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm1
+; AVX2-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrw $0, %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    movzwl (%rdi), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm0
+; AVX512-NEXT:    movzwl (%rsi), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpmovqb %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <2 x i8>, <2 x i8>* %px
+  %y = load <2 x i8>, <2 x i8>* %py
+  %z = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %x, <2 x i8> %y)
+  store <2 x i8> %z, <2 x i8>* %pz
+  ret void
+}
+
+define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) nounwind {
+; SSE-LABEL: v4i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT:    paddusw %xmm0, %xmm1
+; SSE-NEXT:    movq %xmm1, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v4i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovq %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v4i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v4i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT:    vpmovdw %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <4 x i16>, <4 x i16>* %px
+  %y = load <4 x i16>, <4 x i16>* %py
+  %z = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %x, <4 x i16> %y)
+  store <4 x i16> %z, <4 x i16>* %pz
+  ret void
+}
+
+define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
+; SSE-LABEL: v2i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT:    paddusw %xmm0, %xmm1
+; SSE-NEXT:    movd %xmm1, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v2i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovd %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovd %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT:    vpmovqw %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <2 x i16>, <2 x i16>* %px
+  %y = load <2 x i16>, <2 x i16>* %py
+  %z = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %x, <2 x i16> %y)
+  store <2 x i16> %z, <2 x i16>* %pz
+  ret void
+}
+
+define <12 x i8> @v12i8(<12 x i8> %x, <12 x i8> %y) nounwind {
+; SSE-LABEL: v12i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusb %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v12i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <12 x i8> @llvm.uadd.sat.v12i8(<12 x i8> %x, <12 x i8> %y)
+  ret <12 x i8> %z
+}
+
+define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) nounwind {
+; SSE-LABEL: v12i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa (%rdi), %xmm0
+; SSE-NEXT:    movdqa 16(%rdi), %xmm1
+; SSE-NEXT:    paddusw (%rsi), %xmm0
+; SSE-NEXT:    paddusw 16(%rsi), %xmm1
+; SSE-NEXT:    movq %xmm1, 16(%rdx)
+; SSE-NEXT:    movdqa %xmm0, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v12i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX1-NEXT:    vpaddusw (%rsi), %xmm0, %xmm0
+; AVX1-NEXT:    vpaddusw 16(%rsi), %xmm1, %xmm1
+; AVX1-NEXT:    vmovq %xmm1, 16(%rdx)
+; AVX1-NEXT:    vmovdqa %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v12i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-NEXT:    vpaddusw (%rsi), %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vmovq %xmm1, 16(%rdx)
+; AVX2-NEXT:    vmovdqa %xmm0, (%rdx)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v12i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512-NEXT:    vpaddusw (%rsi), %ymm0, %ymm0
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm1, 16(%rdx)
+; AVX512-NEXT:    vmovdqa %xmm0, (%rdx)
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x = load <12 x i16>, <12 x i16>* %px
+  %y = load <12 x i16>, <12 x i16>* %py
+  %z = call <12 x i16> @llvm.uadd.sat.v12i16(<12 x i16> %x, <12 x i16> %y)
+  store <12 x i16> %z, <12 x i16>* %pz
+  ret void
+}
+
+; Scalarization
+
+define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind {
+; SSE-LABEL: v1i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movb (%rdi), %al
+; SSE-NEXT:    addb (%rsi), %al
+; SSE-NEXT:    movb $-1, %cl
+; SSE-NEXT:    jb .LBB13_2
+; SSE-NEXT:  # %bb.1:
+; SSE-NEXT:    movl %eax, %ecx
+; SSE-NEXT:  .LBB13_2:
+; SSE-NEXT:    movb %cl, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v1i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movb (%rdi), %al
+; AVX-NEXT:    addb (%rsi), %al
+; AVX-NEXT:    movb $-1, %cl
+; AVX-NEXT:    jb .LBB13_2
+; AVX-NEXT:  # %bb.1:
+; AVX-NEXT:    movl %eax, %ecx
+; AVX-NEXT:  .LBB13_2:
+; AVX-NEXT:    movb %cl, (%rdx)
+; AVX-NEXT:    retq
+  %x = load <1 x i8>, <1 x i8>* %px
+  %y = load <1 x i8>, <1 x i8>* %py
+  %z = call <1 x i8> @llvm.uadd.sat.v1i8(<1 x i8> %x, <1 x i8> %y)
+  store <1 x i8> %z, <1 x i8>* %pz
+  ret void
+}
+
+define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) nounwind {
+; SSE-LABEL: v1i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movzwl (%rdi), %eax
+; SSE-NEXT:    addw (%rsi), %ax
+; SSE-NEXT:    movl $65535, %ecx # imm = 0xFFFF
+; SSE-NEXT:    cmovael %eax, %ecx
+; SSE-NEXT:    movw %cx, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v1i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movzwl (%rdi), %eax
+; AVX-NEXT:    addw (%rsi), %ax
+; AVX-NEXT:    movl $65535, %ecx # imm = 0xFFFF
+; AVX-NEXT:    cmovael %eax, %ecx
+; AVX-NEXT:    movw %cx, (%rdx)
+; AVX-NEXT:    retq
+  %x = load <1 x i16>, <1 x i16>* %px
+  %y = load <1 x i16>, <1 x i16>* %py
+  %z = call <1 x i16> @llvm.uadd.sat.v1i16(<1 x i16> %x, <1 x i16> %y)
+  store <1 x i16> %z, <1 x i16>* %pz
+  ret void
+}
+
+; Promotion
+
+define <16 x i4> @v16i4(<16 x i4> %x, <16 x i4> %y) nounwind {
+; SSE-LABEL: v16i4:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psllw $4, %xmm1
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    psllw $4, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    paddusb %xmm1, %xmm0
+; SSE-NEXT:    psrlw $4, %xmm0
+; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v16i4:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsllw $4, %xmm1, %xmm1
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <16 x i4> @llvm.uadd.sat.v16i4(<16 x i4> %x, <16 x i4> %y)
+  ret <16 x i4> %z
+}
+
+define <16 x i1> @v16i1(<16 x i1> %x, <16 x i1> %y) nounwind {
+; SSE-LABEL: v16i1:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psllw $7, %xmm1
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    psllw $7, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    paddusb %xmm1, %xmm0
+; SSE-NEXT:    psrlw $7, %xmm0
+; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v16i1:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $7, %xmm0, %xmm0
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v16i1:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $7, %xmm0, %xmm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v16i1:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX512-NEXT:    vpmovb2m %xmm1, %k0
+; AVX512-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovb2m %xmm0, %k1
+; AVX512-NEXT:    korw %k0, %k1, %k0
+; AVX512-NEXT:    vpmovm2b %k0, %xmm0
+; AVX512-NEXT:    retq
+  %z = call <16 x i1> @llvm.uadd.sat.v16i1(<16 x i1> %x, <16 x i1> %y)
+  ret <16 x i1> %z
+}
+
+; Expanded
+
+define <4 x i32> @v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
+; SSE2-LABEL: v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
+; SSE2-NEXT:    movd %xmm2, %eax
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
+; SSE2-NEXT:    movd %xmm2, %ecx
+; SSE2-NEXT:    addl %eax, %ecx
+; SSE2-NEXT:    movl $-1, %eax
+; SSE2-NEXT:    cmovbl %eax, %ecx
+; SSE2-NEXT:    movd %ecx, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
+; SSE2-NEXT:    movd %xmm3, %ecx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
+; SSE2-NEXT:    movd %xmm3, %edx
+; SSE2-NEXT:    addl %ecx, %edx
+; SSE2-NEXT:    cmovbl %eax, %edx
+; SSE2-NEXT:    movd %edx, %xmm3
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT:    movd %xmm1, %ecx
+; SSE2-NEXT:    movd %xmm0, %edx
+; SSE2-NEXT:    addl %ecx, %edx
+; SSE2-NEXT:    cmovbl %eax, %edx
+; SSE2-NEXT:    movd %edx, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSE2-NEXT:    movd %xmm1, %ecx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %edx
+; SSE2-NEXT:    addl %ecx, %edx
+; SSE2-NEXT:    cmovbl %eax, %edx
+; SSE2-NEXT:    movd %edx, %xmm0
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v4i32:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
+; SSSE3-NEXT:    movd %xmm2, %eax
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
+; SSSE3-NEXT:    movd %xmm2, %ecx
+; SSSE3-NEXT:    addl %eax, %ecx
+; SSSE3-NEXT:    movl $-1, %eax
+; SSSE3-NEXT:    cmovbl %eax, %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
+; SSSE3-NEXT:    movd %xmm3, %ecx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
+; SSSE3-NEXT:    movd %xmm3, %edx
+; SSSE3-NEXT:    addl %ecx, %edx
+; SSSE3-NEXT:    cmovbl %eax, %edx
+; SSSE3-NEXT:    movd %edx, %xmm3
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSSE3-NEXT:    movd %xmm1, %ecx
+; SSSE3-NEXT:    movd %xmm0, %edx
+; SSSE3-NEXT:    addl %ecx, %edx
+; SSSE3-NEXT:    cmovbl %eax, %edx
+; SSSE3-NEXT:    movd %edx, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSSE3-NEXT:    movd %xmm1, %ecx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSSE3-NEXT:    movd %xmm0, %edx
+; SSSE3-NEXT:    addl %ecx, %edx
+; SSSE3-NEXT:    cmovbl %eax, %edx
+; SSSE3-NEXT:    movd %edx, %xmm0
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSSE3-NEXT:    movdqa %xmm2, %xmm0
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pextrd $1, %xmm1, %eax
+; SSE41-NEXT:    pextrd $1, %xmm0, %ecx
+; SSE41-NEXT:    addl %eax, %ecx
+; SSE41-NEXT:    movl $-1, %eax
+; SSE41-NEXT:    cmovbl %eax, %ecx
+; SSE41-NEXT:    movd %xmm1, %edx
+; SSE41-NEXT:    movd %xmm0, %esi
+; SSE41-NEXT:    addl %edx, %esi
+; SSE41-NEXT:    cmovbl %eax, %esi
+; SSE41-NEXT:    movd %esi, %xmm2
+; SSE41-NEXT:    pinsrd $1, %ecx, %xmm2
+; SSE41-NEXT:    pextrd $2, %xmm1, %ecx
+; SSE41-NEXT:    pextrd $2, %xmm0, %edx
+; SSE41-NEXT:    addl %ecx, %edx
+; SSE41-NEXT:    cmovbl %eax, %edx
+; SSE41-NEXT:    pinsrd $2, %edx, %xmm2
+; SSE41-NEXT:    pextrd $3, %xmm1, %ecx
+; SSE41-NEXT:    pextrd $3, %xmm0, %edx
+; SSE41-NEXT:    addl %ecx, %edx
+; SSE41-NEXT:    cmovbl %eax, %edx
+; SSE41-NEXT:    pinsrd $3, %edx, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpextrd $1, %xmm1, %eax
+; AVX-NEXT:    vpextrd $1, %xmm0, %ecx
+; AVX-NEXT:    addl %eax, %ecx
+; AVX-NEXT:    movl $-1, %eax
+; AVX-NEXT:    cmovbl %eax, %ecx
+; AVX-NEXT:    vmovd %xmm1, %edx
+; AVX-NEXT:    vmovd %xmm0, %esi
+; AVX-NEXT:    addl %edx, %esi
+; AVX-NEXT:    cmovbl %eax, %esi
+; AVX-NEXT:    vmovd %esi, %xmm2
+; AVX-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX-NEXT:    vpextrd $2, %xmm1, %ecx
+; AVX-NEXT:    vpextrd $2, %xmm0, %edx
+; AVX-NEXT:    addl %ecx, %edx
+; AVX-NEXT:    cmovbl %eax, %edx
+; AVX-NEXT:    vpinsrd $2, %edx, %xmm2, %xmm2
+; AVX-NEXT:    vpextrd $3, %xmm1, %ecx
+; AVX-NEXT:    vpextrd $3, %xmm0, %edx
+; AVX-NEXT:    addl %ecx, %edx
+; AVX-NEXT:    cmovbl %eax, %edx
+; AVX-NEXT:    vpinsrd $3, %edx, %xmm2, %xmm0
+; AVX-NEXT:    retq
+  %z = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+  ret <4 x i32> %z
+}
+
+define <2 x i32> @v2i32(<2 x i32> %x, <2 x i32> %y) nounwind {
+; SSE2-LABEL: v2i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    psllq $32, %xmm1
+; SSE2-NEXT:    movq %xmm1, %rax
+; SSE2-NEXT:    psllq $32, %xmm0
+; SSE2-NEXT:    movq %xmm0, %rcx
+; SSE2-NEXT:    addq %rax, %rcx
+; SSE2-NEXT:    movq $-1, %rax
+; SSE2-NEXT:    cmovbq %rax, %rcx
+; SSE2-NEXT:    movq %rcx, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSE2-NEXT:    movq %xmm1, %rcx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT:    movq %xmm0, %rdx
+; SSE2-NEXT:    addq %rcx, %rdx
+; SSE2-NEXT:    cmovbq %rax, %rdx
+; SSE2-NEXT:    movq %rdx, %xmm0
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
+; SSE2-NEXT:    psrlq $32, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v2i32:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    psllq $32, %xmm1
+; SSSE3-NEXT:    movq %xmm1, %rax
+; SSSE3-NEXT:    psllq $32, %xmm0
+; SSSE3-NEXT:    movq %xmm0, %rcx
+; SSSE3-NEXT:    addq %rax, %rcx
+; SSSE3-NEXT:    movq $-1, %rax
+; SSSE3-NEXT:    cmovbq %rax, %rcx
+; SSSE3-NEXT:    movq %rcx, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSSE3-NEXT:    movq %xmm1, %rcx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSSE3-NEXT:    movq %xmm0, %rdx
+; SSSE3-NEXT:    addq %rcx, %rdx
+; SSSE3-NEXT:    cmovbq %rax, %rdx
+; SSSE3-NEXT:    movq %rdx, %xmm0
+; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
+; SSSE3-NEXT:    psrlq $32, %xmm2
+; SSSE3-NEXT:    movdqa %xmm2, %xmm0
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v2i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    psllq $32, %xmm1
+; SSE41-NEXT:    pextrq $1, %xmm1, %rax
+; SSE41-NEXT:    psllq $32, %xmm0
+; SSE41-NEXT:    pextrq $1, %xmm0, %rcx
+; SSE41-NEXT:    addq %rax, %rcx
+; SSE41-NEXT:    movq $-1, %rax
+; SSE41-NEXT:    cmovbq %rax, %rcx
+; SSE41-NEXT:    movq %rcx, %xmm2
+; SSE41-NEXT:    movq %xmm1, %rcx
+; SSE41-NEXT:    movq %xmm0, %rdx
+; SSE41-NEXT:    addq %rcx, %rdx
+; SSE41-NEXT:    cmovbq %rax, %rdx
+; SSE41-NEXT:    movq %rdx, %xmm0
+; SSE41-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE41-NEXT:    psrlq $32, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: v2i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX-NEXT:    vpextrq $1, %xmm0, %rcx
+; AVX-NEXT:    addq %rax, %rcx
+; AVX-NEXT:    movq $-1, %rax
+; AVX-NEXT:    cmovbq %rax, %rcx
+; AVX-NEXT:    vmovq %rcx, %xmm2
+; AVX-NEXT:    vmovq %xmm1, %rcx
+; AVX-NEXT:    vmovq %xmm0, %rdx
+; AVX-NEXT:    addq %rcx, %rdx
+; AVX-NEXT:    cmovbq %rax, %rdx
+; AVX-NEXT:    vmovq %rdx, %xmm0
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX-NEXT:    vpsrlq $32, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %x, <2 x i32> %y)
+  ret <2 x i32> %z
+}
+
+define <4 x i24> @v4i24(<4 x i24> %x, <4 x i24> %y) nounwind {
+; SSE2-LABEL: v4i24:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pslld $8, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
+; SSE2-NEXT:    movd %xmm2, %eax
+; SSE2-NEXT:    pslld $8, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
+; SSE2-NEXT:    movd %xmm2, %ecx
+; SSE2-NEXT:    addl %eax, %ecx
+; SSE2-NEXT:    movl $-1, %eax
+; SSE2-NEXT:    cmovbl %eax, %ecx
+; SSE2-NEXT:    movd %ecx, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
+; SSE2-NEXT:    movd %xmm3, %ecx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
+; SSE2-NEXT:    movd %xmm3, %edx
+; SSE2-NEXT:    addl %ecx, %edx
+; SSE2-NEXT:    cmovbl %eax, %edx
+; SSE2-NEXT:    movd %edx, %xmm3
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT:    movd %xmm1, %ecx
+; SSE2-NEXT:    movd %xmm0, %edx
+; SSE2-NEXT:    addl %ecx, %edx
+; SSE2-NEXT:    cmovbl %eax, %edx
+; SSE2-NEXT:    movd %edx, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSE2-NEXT:    movd %xmm1, %ecx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %edx
+; SSE2-NEXT:    addl %ecx, %edx
+; SSE2-NEXT:    cmovbl %eax, %edx
+; SSE2-NEXT:    movd %edx, %xmm0
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE2-NEXT:    psrld $8, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v4i24:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    pslld $8, %xmm1
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
+; SSSE3-NEXT:    movd %xmm2, %eax
+; SSSE3-NEXT:    pslld $8, %xmm0
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
+; SSSE3-NEXT:    movd %xmm2, %ecx
+; SSSE3-NEXT:    addl %eax, %ecx
+; SSSE3-NEXT:    movl $-1, %eax
+; SSSE3-NEXT:    cmovbl %eax, %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
+; SSSE3-NEXT:    movd %xmm3, %ecx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
+; SSSE3-NEXT:    movd %xmm3, %edx
+; SSSE3-NEXT:    addl %ecx, %edx
+; SSSE3-NEXT:    cmovbl %eax, %edx
+; SSSE3-NEXT:    movd %edx, %xmm3
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSSE3-NEXT:    movd %xmm1, %ecx
+; SSSE3-NEXT:    movd %xmm0, %edx
+; SSSE3-NEXT:    addl %ecx, %edx
+; SSSE3-NEXT:    cmovbl %eax, %edx
+; SSSE3-NEXT:    movd %edx, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSSE3-NEXT:    movd %xmm1, %ecx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSSE3-NEXT:    movd %xmm0, %edx
+; SSSE3-NEXT:    addl %ecx, %edx
+; SSSE3-NEXT:    cmovbl %eax, %edx
+; SSSE3-NEXT:    movd %edx, %xmm0
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSSE3-NEXT:    psrld $8, %xmm2
+; SSSE3-NEXT:    movdqa %xmm2, %xmm0
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v4i24:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pslld $8, %xmm1
+; SSE41-NEXT:    pextrd $1, %xmm1, %eax
+; SSE41-NEXT:    pslld $8, %xmm0
+; SSE41-NEXT:    pextrd $1, %xmm0, %ecx
+; SSE41-NEXT:    addl %eax, %ecx
+; SSE41-NEXT:    movl $-1, %eax
+; SSE41-NEXT:    cmovbl %eax, %ecx
+; SSE41-NEXT:    movd %xmm1, %edx
+; SSE41-NEXT:    movd %xmm0, %esi
+; SSE41-NEXT:    addl %edx, %esi
+; SSE41-NEXT:    cmovbl %eax, %esi
+; SSE41-NEXT:    movd %esi, %xmm2
+; SSE41-NEXT:    pinsrd $1, %ecx, %xmm2
+; SSE41-NEXT:    pextrd $2, %xmm1, %ecx
+; SSE41-NEXT:    pextrd $2, %xmm0, %edx
+; SSE41-NEXT:    addl %ecx, %edx
+; SSE41-NEXT:    cmovbl %eax, %edx
+; SSE41-NEXT:    pinsrd $2, %edx, %xmm2
+; SSE41-NEXT:    pextrd $3, %xmm1, %ecx
+; SSE41-NEXT:    pextrd $3, %xmm0, %edx
+; SSE41-NEXT:    addl %ecx, %edx
+; SSE41-NEXT:    cmovbl %eax, %edx
+; SSE41-NEXT:    pinsrd $3, %edx, %xmm2
+; SSE41-NEXT:    psrld $8, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: v4i24:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpslld $8, %xmm1, %xmm1
+; AVX-NEXT:    vpextrd $1, %xmm1, %eax
+; AVX-NEXT:    vpslld $8, %xmm0, %xmm0
+; AVX-NEXT:    vpextrd $1, %xmm0, %ecx
+; AVX-NEXT:    addl %eax, %ecx
+; AVX-NEXT:    movl $-1, %eax
+; AVX-NEXT:    cmovbl %eax, %ecx
+; AVX-NEXT:    vmovd %xmm1, %edx
+; AVX-NEXT:    vmovd %xmm0, %esi
+; AVX-NEXT:    addl %edx, %esi
+; AVX-NEXT:    cmovbl %eax, %esi
+; AVX-NEXT:    vmovd %esi, %xmm2
+; AVX-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX-NEXT:    vpextrd $2, %xmm1, %ecx
+; AVX-NEXT:    vpextrd $2, %xmm0, %edx
+; AVX-NEXT:    addl %ecx, %edx
+; AVX-NEXT:    cmovbl %eax, %edx
+; AVX-NEXT:    vpinsrd $2, %edx, %xmm2, %xmm2
+; AVX-NEXT:    vpextrd $3, %xmm1, %ecx
+; AVX-NEXT:    vpextrd $3, %xmm0, %edx
+; AVX-NEXT:    addl %ecx, %edx
+; AVX-NEXT:    cmovbl %eax, %edx
+; AVX-NEXT:    vpinsrd $3, %edx, %xmm2, %xmm0
+; AVX-NEXT:    vpsrld $8, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <4 x i24> @llvm.uadd.sat.v4i24(<4 x i24> %x, <4 x i24> %y)
+  ret <4 x i24> %z
+}
+
+define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
+; SSE-LABEL: v2i128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq %rdi, %rax
+; SSE-NEXT:    addq %r9, %rsi
+; SSE-NEXT:    adcq {{[0-9]+}}(%rsp), %rdx
+; SSE-NEXT:    movq $-1, %rdi
+; SSE-NEXT:    cmovbq %rdi, %rsi
+; SSE-NEXT:    cmovbq %rdi, %rdx
+; SSE-NEXT:    addq {{[0-9]+}}(%rsp), %rcx
+; SSE-NEXT:    adcq {{[0-9]+}}(%rsp), %r8
+; SSE-NEXT:    cmovbq %rdi, %r8
+; SSE-NEXT:    cmovbq %rdi, %rcx
+; SSE-NEXT:    movq %r8, 24(%rax)
+; SSE-NEXT:    movq %rcx, 16(%rax)
+; SSE-NEXT:    movq %rdx, 8(%rax)
+; SSE-NEXT:    movq %rsi, (%rax)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v2i128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movq %rdi, %rax
+; AVX-NEXT:    addq %r9, %rsi
+; AVX-NEXT:    adcq {{[0-9]+}}(%rsp), %rdx
+; AVX-NEXT:    movq $-1, %rdi
+; AVX-NEXT:    cmovbq %rdi, %rsi
+; AVX-NEXT:    cmovbq %rdi, %rdx
+; AVX-NEXT:    addq {{[0-9]+}}(%rsp), %rcx
+; AVX-NEXT:    adcq {{[0-9]+}}(%rsp), %r8
+; AVX-NEXT:    cmovbq %rdi, %r8
+; AVX-NEXT:    cmovbq %rdi, %rcx
+; AVX-NEXT:    movq %r8, 24(%rax)
+; AVX-NEXT:    movq %rcx, 16(%rax)
+; AVX-NEXT:    movq %rdx, 8(%rax)
+; AVX-NEXT:    movq %rsi, (%rax)
+; AVX-NEXT:    retq
+  %z = call <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128> %x, <2 x i128> %y)
+  ret <2 x i128> %z
+}
diff --git a/test/CodeGen/X86/umul-with-overflow.ll b/test/CodeGen/X86/umul-with-overflow.ll
index 72b1fcc..64a8933 100644
--- a/test/CodeGen/X86/umul-with-overflow.ll
+++ b/test/CodeGen/X86/umul-with-overflow.ll
@@ -7,9 +7,8 @@
 define zeroext i1 @a(i32 %x)  nounwind {
 ; X86-LABEL: a:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl $3, %ecx
-; X86-NEXT:    mull %ecx
+; X86-NEXT:    movl $3, %eax
+; X86-NEXT:    mull {{[0-9]+}}(%esp)
 ; X86-NEXT:    seto %al
 ; X86-NEXT:    retl
 ;
diff --git a/test/CodeGen/X86/usub_sat.ll b/test/CodeGen/X86/usub_sat.ll
index 1c9a5c5..ef822fa 100644
--- a/test/CodeGen/X86/usub_sat.ll
+++ b/test/CodeGen/X86/usub_sat.ll
@@ -1,158 +1,127 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux | FileCheck %s
-; RUN: llc < %s -mcpu=generic -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=CHECK32
+; RUN: llc < %s -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefixes=CHECK,X64
 
 declare  i4  @llvm.usub.sat.i4   (i4,  i4)
 declare  i32 @llvm.usub.sat.i32  (i32, i32)
 declare  i64 @llvm.usub.sat.i64  (i64, i64)
 declare  <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>)
 
-define i32 @func(i32 %x, i32 %y) {
-; CHECK-LABEL: func:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    subl %esi, %edi
-; CHECK-NEXT:    cmovael %edi, %eax
-; CHECK-NEXT:    retq
+define i32 @func(i32 %x, i32 %y) nounwind {
+; X86-LABEL: func:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    subl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmovbl %ecx, %eax
+; X86-NEXT:    retl
 ;
-; CHECK32-LABEL: func:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    xorl %ecx, %ecx
-; CHECK32-NEXT:    subl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    cmovbl %ecx, %eax
-; CHECK32-NEXT:    retl
+; X64-LABEL: func:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    subl %esi, %edi
+; X64-NEXT:    cmovael %edi, %eax
+; X64-NEXT:    retq
   %tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %y);
   ret i32 %tmp;
 }
 
-define i64 @func2(i64 %x, i64 %y) {
-; CHECK-LABEL: func2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    subq %rsi, %rdi
-; CHECK-NEXT:    cmovaeq %rdi, %rax
-; CHECK-NEXT:    retq
+define i64 @func2(i64 %x, i64 %y) nounwind {
+; X86-LABEL: func2:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    subl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    cmovbl %ecx, %edx
+; X86-NEXT:    cmovbl %ecx, %eax
+; X86-NEXT:    retl
 ;
-; CHECK32-LABEL: func2:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    xorl %ecx, %ecx
-; CHECK32-NEXT:    subl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    cmovbl %ecx, %edx
-; CHECK32-NEXT:    cmovbl %ecx, %eax
-; CHECK32-NEXT:    retl
+; X64-LABEL: func2:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    subq %rsi, %rdi
+; X64-NEXT:    cmovaeq %rdi, %rax
+; X64-NEXT:    retq
   %tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %y);
   ret i64 %tmp;
 }
 
-define i4 @func3(i4 %x, i4 %y) {
-; CHECK-LABEL: func3:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    shlb $4, %sil
-; CHECK-NEXT:    shlb $4, %al
-; CHECK-NEXT:    subb %sil, %al
-; CHECK-NEXT:    jae .LBB2_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:  .LBB2_2:
-; CHECK-NEXT:    shrb $4, %al
-; CHECK-NEXT:    # kill: def $al killed $al killed $eax
-; CHECK-NEXT:    retq
+define i4 @func3(i4 %x, i4 %y) nounwind {
+; X86-LABEL: func3:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    shlb $4, %cl
+; X86-NEXT:    shlb $4, %al
+; X86-NEXT:    subb %cl, %al
+; X86-NEXT:    jae .LBB2_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:  .LBB2_2:
+; X86-NEXT:    shrb $4, %al
+; X86-NEXT:    # kill: def $al killed $al killed $eax
+; X86-NEXT:    retl
 ;
-; CHECK32-LABEL: func3:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; CHECK32-NEXT:    shlb $4, %cl
-; CHECK32-NEXT:    shlb $4, %al
-; CHECK32-NEXT:    subb %cl, %al
-; CHECK32-NEXT:    jae .LBB2_2
-; CHECK32-NEXT:  # %bb.1:
-; CHECK32-NEXT:    xorl %eax, %eax
-; CHECK32-NEXT:  .LBB2_2:
-; CHECK32-NEXT:    shrb $4, %al
-; CHECK32-NEXT:    # kill: def $al killed $al killed $eax
-; CHECK32-NEXT:    retl
+; X64-LABEL: func3:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    shlb $4, %sil
+; X64-NEXT:    shlb $4, %al
+; X64-NEXT:    subb %sil, %al
+; X64-NEXT:    jae .LBB2_2
+; X64-NEXT:  # %bb.1:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:  .LBB2_2:
+; X64-NEXT:    shrb $4, %al
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
   %tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %y);
   ret i4 %tmp;
 }
 
-define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) {
-; CHECK-LABEL: vec:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
-; CHECK-NEXT:    movd %xmm2, %eax
-; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
-; CHECK-NEXT:    movd %xmm2, %ecx
-; CHECK-NEXT:    xorl %edx, %edx
-; CHECK-NEXT:    subl %eax, %ecx
-; CHECK-NEXT:    cmovbl %edx, %ecx
-; CHECK-NEXT:    movd %ecx, %xmm2
-; CHECK-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
-; CHECK-NEXT:    movd %xmm3, %eax
-; CHECK-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
-; CHECK-NEXT:    movd %xmm3, %ecx
-; CHECK-NEXT:    subl %eax, %ecx
-; CHECK-NEXT:    cmovbl %edx, %ecx
-; CHECK-NEXT:    movd %ecx, %xmm3
-; CHECK-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; CHECK-NEXT:    movd %xmm1, %eax
-; CHECK-NEXT:    movd %xmm0, %ecx
-; CHECK-NEXT:    subl %eax, %ecx
-; CHECK-NEXT:    cmovbl %edx, %ecx
-; CHECK-NEXT:    movd %ecx, %xmm2
-; CHECK-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; CHECK-NEXT:    movd %xmm1, %eax
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; CHECK-NEXT:    movd %xmm0, %ecx
-; CHECK-NEXT:    subl %eax, %ecx
-; CHECK-NEXT:    cmovbl %edx, %ecx
-; CHECK-NEXT:    movd %ecx, %xmm0
-; CHECK-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; CHECK-NEXT:    movdqa %xmm2, %xmm0
-; CHECK-NEXT:    retq
+define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
+; X86-LABEL: vec:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    subl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmovbl %ebx, %edi
+; X86-NEXT:    subl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    cmovbl %ebx, %esi
+; X86-NEXT:    subl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    cmovbl %ebx, %edx
+; X86-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmovbl %ebx, %ecx
+; X86-NEXT:    movl %ecx, 12(%eax)
+; X86-NEXT:    movl %edx, 8(%eax)
+; X86-NEXT:    movl %esi, 4(%eax)
+; X86-NEXT:    movl %edi, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl $4
 ;
-; CHECK32-LABEL: vec:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %ebx
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    pushl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK32-NEXT:    .cfi_offset %esi, -16
-; CHECK32-NEXT:    .cfi_offset %edi, -12
-; CHECK32-NEXT:    .cfi_offset %ebx, -8
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    xorl %ebx, %ebx
-; CHECK32-NEXT:    subl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    cmovbl %ebx, %edi
-; CHECK32-NEXT:    subl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    cmovbl %ebx, %esi
-; CHECK32-NEXT:    subl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    cmovbl %ebx, %edx
-; CHECK32-NEXT:    subl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    cmovbl %ebx, %ecx
-; CHECK32-NEXT:    movl %ecx, 12(%eax)
-; CHECK32-NEXT:    movl %edx, 8(%eax)
-; CHECK32-NEXT:    movl %esi, 4(%eax)
-; CHECK32-NEXT:    movl %edi, (%eax)
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    popl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %ebx
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl $4
+; X64-LABEL: vec:
+; X64:       # %bb.0:
+; X64-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; X64-NEXT:    movdqa %xmm1, %xmm3
+; X64-NEXT:    pxor %xmm2, %xmm3
+; X64-NEXT:    pxor %xmm0, %xmm2
+; X64-NEXT:    pcmpgtd %xmm3, %xmm2
+; X64-NEXT:    pand %xmm2, %xmm0
+; X64-NEXT:    pandn %xmm1, %xmm2
+; X64-NEXT:    por %xmm2, %xmm0
+; X64-NEXT:    psubd %xmm1, %xmm0
+; X64-NEXT:    retq
   %tmp = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y);
   ret <4 x i32> %tmp;
 }
diff --git a/test/CodeGen/X86/usub_sat_vec.ll b/test/CodeGen/X86/usub_sat_vec.ll
new file mode 100644
index 0000000..72c0c51
--- /dev/null
+++ b/test/CodeGen/X86/usub_sat_vec.ll
@@ -0,0 +1,1529 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512
+
+declare <1 x i8> @llvm.usub.sat.v1i8(<1 x i8>, <1 x i8>)
+declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>)
+declare <4 x i8> @llvm.usub.sat.v4i8(<4 x i8>, <4 x i8>)
+declare <8 x i8> @llvm.usub.sat.v8i8(<8 x i8>, <8 x i8>)
+declare <12 x i8> @llvm.usub.sat.v12i8(<12 x i8>, <12 x i8>)
+declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>)
+declare <64 x i8> @llvm.usub.sat.v64i8(<64 x i8>, <64 x i8>)
+
+declare <1 x i16> @llvm.usub.sat.v1i16(<1 x i16>, <1 x i16>)
+declare <2 x i16> @llvm.usub.sat.v2i16(<2 x i16>, <2 x i16>)
+declare <4 x i16> @llvm.usub.sat.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <12 x i16> @llvm.usub.sat.v12i16(<12 x i16>, <12 x i16>)
+declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>)
+declare <32 x i16> @llvm.usub.sat.v32i16(<32 x i16>, <32 x i16>)
+
+declare <16 x i1> @llvm.usub.sat.v16i1(<16 x i1>, <16 x i1>)
+declare <16 x i4> @llvm.usub.sat.v16i4(<16 x i4>, <16 x i4>)
+
+declare <2 x i32> @llvm.usub.sat.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.usub.sat.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.usub.sat.v16i32(<16 x i32>, <16 x i32>)
+declare <2 x i64> @llvm.usub.sat.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.usub.sat.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64> @llvm.usub.sat.v8i64(<8 x i64>, <8 x i64>)
+
+declare <4 x i24> @llvm.usub.sat.v4i24(<4 x i24>, <4 x i24>)
+declare <2 x i128> @llvm.usub.sat.v2i128(<2 x i128>, <2 x i128>)
+
+; Legal types, depending on architecture.
+
+define <16 x i8> @v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
+; SSE-LABEL: v16i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubusb %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v16i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+  ret <16 x i8> %z
+}
+
+define <32 x i8> @v32i8(<32 x i8> %x, <32 x i8> %y) nounwind {
+; SSE-LABEL: v32i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubusb %xmm2, %xmm0
+; SSE-NEXT:    psubusb %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpsubusb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsubusb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v32i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsubusb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %z = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %x, <32 x i8> %y)
+  ret <32 x i8> %z
+}
+
+define <64 x i8> @v64i8(<64 x i8> %x, <64 x i8> %y) nounwind {
+; SSE-LABEL: v64i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubusb %xmm4, %xmm0
+; SSE-NEXT:    psubusb %xmm5, %xmm1
+; SSE-NEXT:    psubusb %xmm6, %xmm2
+; SSE-NEXT:    psubusb %xmm7, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v64i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpsubusb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpsubusb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpsubusb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpsubusb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v64i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsubusb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubusb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v64i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsubusb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %z = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %x, <64 x i8> %y)
+  ret <64 x i8> %z
+}
+
+define <8 x i16> @v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
+; SSE-LABEL: v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubusw %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+  ret <8 x i16> %z
+}
+
+define <16 x i16> @v16i16(<16 x i16> %x, <16 x i16> %y) nounwind {
+; SSE-LABEL: v16i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubusw %xmm2, %xmm0
+; SSE-NEXT:    psubusw %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpsubusw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsubusw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v16i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsubusw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %z = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %x, <16 x i16> %y)
+  ret <16 x i16> %z
+}
+
+define <32 x i16> @v32i16(<32 x i16> %x, <32 x i16> %y) nounwind {
+; SSE-LABEL: v32i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubusw %xmm4, %xmm0
+; SSE-NEXT:    psubusw %xmm5, %xmm1
+; SSE-NEXT:    psubusw %xmm6, %xmm2
+; SSE-NEXT:    psubusw %xmm7, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v32i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpsubusw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpsubusw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpsubusw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpsubusw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v32i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsubusw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubusw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v32i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsubusw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %z = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %x, <32 x i16> %y)
+  ret <32 x i16> %z
+}
+
+; Too narrow vectors, legalized by widening.
+
+define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) nounwind {
+; SSE-LABEL: v8i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT:    psubusb %xmm1, %xmm0
+; SSE-NEXT:    movq %xmm0, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v8i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovq %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v8i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v8i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT:    vpmovwb %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <8 x i8>, <8 x i8>* %px
+  %y = load <8 x i8>, <8 x i8>* %py
+  %z = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %x, <8 x i8> %y)
+  store <8 x i8> %z, <8 x i8>* %pz
+  ret void
+}
+
+define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) nounwind {
+; SSE-LABEL: v4i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT:    psubusb %xmm1, %xmm0
+; SSE-NEXT:    movd %xmm0, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v4i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovd %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v4i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovd %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v4i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX512-NEXT:    vpmovdb %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <4 x i8>, <4 x i8>* %px
+  %y = load <4 x i8>, <4 x i8>* %py
+  %z = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %x, <4 x i8> %y)
+  store <4 x i8> %z, <4 x i8>* %pz
+  ret void
+}
+
+define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
+; SSE2-LABEL: v2i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movzwl (%rdi), %eax
+; SSE2-NEXT:    movd %eax, %xmm0
+; SSE2-NEXT:    movzwl (%rsi), %eax
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    psubusb %xmm1, %xmm0
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    movw %ax, (%rdx)
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v2i8:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movzwl (%rdi), %eax
+; SSSE3-NEXT:    movd %eax, %xmm0
+; SSSE3-NEXT:    movzwl (%rsi), %eax
+; SSSE3-NEXT:    movd %eax, %xmm1
+; SSSE3-NEXT:    psubusb %xmm1, %xmm0
+; SSSE3-NEXT:    movd %xmm0, %eax
+; SSSE3-NEXT:    movw %ax, (%rdx)
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v2i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movzwl (%rdi), %eax
+; SSE41-NEXT:    movd %eax, %xmm0
+; SSE41-NEXT:    movzwl (%rsi), %eax
+; SSE41-NEXT:    movd %eax, %xmm1
+; SSE41-NEXT:    psubusb %xmm1, %xmm0
+; SSE41-NEXT:    pextrw $0, %xmm0, (%rdx)
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: v2i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    movzwl (%rdi), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm0
+; AVX1-NEXT:    movzwl (%rsi), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm1
+; AVX1-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpextrw $0, %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    movzwl (%rdi), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm0
+; AVX2-NEXT:    movzwl (%rsi), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm1
+; AVX2-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrw $0, %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    movzwl (%rdi), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm0
+; AVX512-NEXT:    movzwl (%rsi), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpmovqb %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <2 x i8>, <2 x i8>* %px
+  %y = load <2 x i8>, <2 x i8>* %py
+  %z = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %x, <2 x i8> %y)
+  store <2 x i8> %z, <2 x i8>* %pz
+  ret void
+}
+
+define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) nounwind {
+; SSE-LABEL: v4i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT:    psubusw %xmm1, %xmm0
+; SSE-NEXT:    movq %xmm0, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v4i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovq %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v4i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v4i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT:    vpmovdw %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <4 x i16>, <4 x i16>* %px
+  %y = load <4 x i16>, <4 x i16>* %py
+  %z = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %x, <4 x i16> %y)
+  store <4 x i16> %z, <4 x i16>* %pz
+  ret void
+}
+
+define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
+; SSE-LABEL: v2i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT:    psubusw %xmm1, %xmm0
+; SSE-NEXT:    movd %xmm0, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v2i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovd %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovd %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT:    vpmovqw %xmm0, (%rdx)
+; AVX512-NEXT:    retq
+  %x = load <2 x i16>, <2 x i16>* %px
+  %y = load <2 x i16>, <2 x i16>* %py
+  %z = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %x, <2 x i16> %y)
+  store <2 x i16> %z, <2 x i16>* %pz
+  ret void
+}
+
+define <12 x i8> @v12i8(<12 x i8> %x, <12 x i8> %y) nounwind {
+; SSE-LABEL: v12i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubusb %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v12i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <12 x i8> @llvm.usub.sat.v12i8(<12 x i8> %x, <12 x i8> %y)
+  ret <12 x i8> %z
+}
+
+define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) nounwind {
+; SSE-LABEL: v12i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa (%rdi), %xmm0
+; SSE-NEXT:    movdqa 16(%rdi), %xmm1
+; SSE-NEXT:    psubusw (%rsi), %xmm0
+; SSE-NEXT:    psubusw 16(%rsi), %xmm1
+; SSE-NEXT:    movq %xmm1, 16(%rdx)
+; SSE-NEXT:    movdqa %xmm0, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v12i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX1-NEXT:    vpsubusw (%rsi), %xmm0, %xmm0
+; AVX1-NEXT:    vpsubusw 16(%rsi), %xmm1, %xmm1
+; AVX1-NEXT:    vmovq %xmm1, 16(%rdx)
+; AVX1-NEXT:    vmovdqa %xmm0, (%rdx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v12i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-NEXT:    vpsubusw (%rsi), %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vmovq %xmm1, 16(%rdx)
+; AVX2-NEXT:    vmovdqa %xmm0, (%rdx)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v12i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512-NEXT:    vpsubusw (%rsi), %ymm0, %ymm0
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm1, 16(%rdx)
+; AVX512-NEXT:    vmovdqa %xmm0, (%rdx)
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x = load <12 x i16>, <12 x i16>* %px
+  %y = load <12 x i16>, <12 x i16>* %py
+  %z = call <12 x i16> @llvm.usub.sat.v12i16(<12 x i16> %x, <12 x i16> %y)
+  store <12 x i16> %z, <12 x i16>* %pz
+  ret void
+}
+
+; Scalarization
+
+define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind {
+; SSE-LABEL: v1i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movb (%rdi), %al
+; SSE-NEXT:    subb (%rsi), %al
+; SSE-NEXT:    jae .LBB13_2
+; SSE-NEXT:  # %bb.1:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:  .LBB13_2:
+; SSE-NEXT:    movb %al, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v1i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movb (%rdi), %al
+; AVX-NEXT:    subb (%rsi), %al
+; AVX-NEXT:    jae .LBB13_2
+; AVX-NEXT:  # %bb.1:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:  .LBB13_2:
+; AVX-NEXT:    movb %al, (%rdx)
+; AVX-NEXT:    retq
+  %x = load <1 x i8>, <1 x i8>* %px
+  %y = load <1 x i8>, <1 x i8>* %py
+  %z = call <1 x i8> @llvm.usub.sat.v1i8(<1 x i8> %x, <1 x i8> %y)
+  store <1 x i8> %z, <1 x i8>* %pz
+  ret void
+}
+
+define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) nounwind {
+; SSE-LABEL: v1i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movzwl (%rdi), %eax
+; SSE-NEXT:    xorl %ecx, %ecx
+; SSE-NEXT:    subw (%rsi), %ax
+; SSE-NEXT:    cmovbl %ecx, %eax
+; SSE-NEXT:    movw %ax, (%rdx)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v1i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movzwl (%rdi), %eax
+; AVX-NEXT:    xorl %ecx, %ecx
+; AVX-NEXT:    subw (%rsi), %ax
+; AVX-NEXT:    cmovbl %ecx, %eax
+; AVX-NEXT:    movw %ax, (%rdx)
+; AVX-NEXT:    retq
+  %x = load <1 x i16>, <1 x i16>* %px
+  %y = load <1 x i16>, <1 x i16>* %py
+  %z = call <1 x i16> @llvm.usub.sat.v1i16(<1 x i16> %x, <1 x i16> %y)
+  store <1 x i16> %z, <1 x i16>* %pz
+  ret void
+}
+
+; Promotion
+
+define <16 x i4> @v16i4(<16 x i4> %x, <16 x i4> %y) nounwind {
+; SSE-LABEL: v16i4:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psllw $4, %xmm1
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    psllw $4, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    psubusb %xmm1, %xmm0
+; SSE-NEXT:    psrlw $4, %xmm0
+; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v16i4:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsllw $4, %xmm1, %xmm1
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <16 x i4> @llvm.usub.sat.v16i4(<16 x i4> %x, <16 x i4> %y)
+  ret <16 x i4> %z
+}
+
+define <16 x i1> @v16i1(<16 x i1> %x, <16 x i1> %y) nounwind {
+; SSE-LABEL: v16i1:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psllw $7, %xmm1
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    psllw $7, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    psubusb %xmm1, %xmm0
+; SSE-NEXT:    psrlw $7, %xmm0
+; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: v16i1:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $7, %xmm0, %xmm0
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v16i1:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $7, %xmm0, %xmm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v16i1:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovb2m %xmm0, %k0
+; AVX512-NEXT:    vpsllw $7, %xmm1, %xmm0
+; AVX512-NEXT:    vpmovb2m %xmm0, %k1
+; AVX512-NEXT:    kandnw %k0, %k1, %k0
+; AVX512-NEXT:    vpmovm2b %k0, %xmm0
+; AVX512-NEXT:    retq
+  %z = call <16 x i1> @llvm.usub.sat.v16i1(<16 x i1> %x, <16 x i1> %y)
+  ret <16 x i1> %z
+}
+
+; Expanded
+
+define <2 x i32> @v2i32(<2 x i32> %x, <2 x i32> %y) nounwind {
+; SSE2-LABEL: v2i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    psllq $32, %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    pxor %xmm2, %xmm3
+; SSE2-NEXT:    psllq $32, %xmm0
+; SSE2-NEXT:    pxor %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm3, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pand %xmm5, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm1, %xmm3
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    psubq %xmm1, %xmm0
+; SSE2-NEXT:    psrlq $32, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v2i32:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    psllq $32, %xmm1
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT:    movdqa %xmm1, %xmm3
+; SSSE3-NEXT:    pxor %xmm2, %xmm3
+; SSSE3-NEXT:    psllq $32, %xmm0
+; SSSE3-NEXT:    pxor %xmm0, %xmm2
+; SSSE3-NEXT:    movdqa %xmm2, %xmm4
+; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm3, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm5, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSSE3-NEXT:    por %xmm2, %xmm3
+; SSSE3-NEXT:    pand %xmm3, %xmm0
+; SSSE3-NEXT:    pandn %xmm1, %xmm3
+; SSSE3-NEXT:    por %xmm3, %xmm0
+; SSSE3-NEXT:    psubq %xmm1, %xmm0
+; SSSE3-NEXT:    psrlq $32, %xmm0
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v2i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    psllq $32, %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    pxor %xmm0, %xmm3
+; SSE41-NEXT:    psllq $32, %xmm2
+; SSE41-NEXT:    pxor %xmm2, %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
+; SSE41-NEXT:    psubq %xmm1, %xmm3
+; SSE41-NEXT:    psrlq $32, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: v2i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm3
+; AVX1-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlq $32, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm2
+; AVX2-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlq $32, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX512-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX512-NEXT:    vpmaxuq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlq $32, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+  %z = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %x, <2 x i32> %y)
+  ret <2 x i32> %z
+}
+
+define <4 x i32> @v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
+; SSE2-LABEL: v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    pxor %xmm2, %xmm3
+; SSE2-NEXT:    pxor %xmm0, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm2
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    pandn %xmm1, %xmm2
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    psubd %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v4i32:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT:    movdqa %xmm1, %xmm3
+; SSSE3-NEXT:    pxor %xmm2, %xmm3
+; SSSE3-NEXT:    pxor %xmm0, %xmm2
+; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm2
+; SSSE3-NEXT:    pand %xmm2, %xmm0
+; SSSE3-NEXT:    pandn %xmm1, %xmm2
+; SSSE3-NEXT:    por %xmm2, %xmm0
+; SSSE3-NEXT:    psubd %xmm1, %xmm0
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pmaxud %xmm1, %xmm0
+; SSE41-NEXT:    psubd %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %z = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+  ret <4 x i32> %z
+}
+
+define <8 x i32> @v8i32(<8 x i32> %x, <8 x i32> %y) nounwind {
+; SSE2-LABEL: v8i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT:    movdqa %xmm2, %xmm6
+; SSE2-NEXT:    pxor %xmm5, %xmm6
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    pxor %xmm5, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm6, %xmm4
+; SSE2-NEXT:    pand %xmm4, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm4
+; SSE2-NEXT:    por %xmm0, %xmm4
+; SSE2-NEXT:    psubd %xmm2, %xmm4
+; SSE2-NEXT:    movdqa %xmm3, %xmm0
+; SSE2-NEXT:    pxor %xmm5, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm5
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm5
+; SSE2-NEXT:    pand %xmm5, %xmm1
+; SSE2-NEXT:    pandn %xmm3, %xmm5
+; SSE2-NEXT:    por %xmm5, %xmm1
+; SSE2-NEXT:    psubd %xmm3, %xmm1
+; SSE2-NEXT:    movdqa %xmm4, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v8i32:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT:    movdqa %xmm2, %xmm6
+; SSSE3-NEXT:    pxor %xmm5, %xmm6
+; SSSE3-NEXT:    movdqa %xmm0, %xmm4
+; SSSE3-NEXT:    pxor %xmm5, %xmm4
+; SSSE3-NEXT:    pcmpgtd %xmm6, %xmm4
+; SSSE3-NEXT:    pand %xmm4, %xmm0
+; SSSE3-NEXT:    pandn %xmm2, %xmm4
+; SSSE3-NEXT:    por %xmm0, %xmm4
+; SSSE3-NEXT:    psubd %xmm2, %xmm4
+; SSSE3-NEXT:    movdqa %xmm3, %xmm0
+; SSSE3-NEXT:    pxor %xmm5, %xmm0
+; SSSE3-NEXT:    pxor %xmm1, %xmm5
+; SSSE3-NEXT:    pcmpgtd %xmm0, %xmm5
+; SSSE3-NEXT:    pand %xmm5, %xmm1
+; SSSE3-NEXT:    pandn %xmm3, %xmm5
+; SSSE3-NEXT:    por %xmm5, %xmm1
+; SSSE3-NEXT:    psubd %xmm3, %xmm1
+; SSSE3-NEXT:    movdqa %xmm4, %xmm0
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v8i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pmaxud %xmm2, %xmm0
+; SSE41-NEXT:    psubd %xmm2, %xmm0
+; SSE41-NEXT:    pmaxud %xmm3, %xmm1
+; SSE41-NEXT:    psubd %xmm3, %xmm1
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpmaxud %xmm2, %xmm3, %xmm3
+; AVX1-NEXT:    vpsubd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v8i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpsubd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %z = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %x, <8 x i32> %y)
+  ret <8 x i32> %z
+}
+
+define <16 x i32> @v16i32(<16 x i32> %x, <16 x i32> %y) nounwind {
+; SSE2-LABEL: v16i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm1, %xmm8
+; SSE2-NEXT:    movdqa %xmm0, %xmm10
+; SSE2-NEXT:    movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT:    movdqa %xmm4, %xmm1
+; SSE2-NEXT:    pxor %xmm9, %xmm1
+; SSE2-NEXT:    pxor %xmm9, %xmm0
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm0, %xmm10
+; SSE2-NEXT:    pandn %xmm4, %xmm0
+; SSE2-NEXT:    por %xmm10, %xmm0
+; SSE2-NEXT:    psubd %xmm4, %xmm0
+; SSE2-NEXT:    movdqa %xmm5, %xmm4
+; SSE2-NEXT:    pxor %xmm9, %xmm4
+; SSE2-NEXT:    movdqa %xmm8, %xmm1
+; SSE2-NEXT:    pxor %xmm9, %xmm1
+; SSE2-NEXT:    pcmpgtd %xmm4, %xmm1
+; SSE2-NEXT:    pand %xmm1, %xmm8
+; SSE2-NEXT:    pandn %xmm5, %xmm1
+; SSE2-NEXT:    por %xmm8, %xmm1
+; SSE2-NEXT:    psubd %xmm5, %xmm1
+; SSE2-NEXT:    movdqa %xmm6, %xmm5
+; SSE2-NEXT:    pxor %xmm9, %xmm5
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    pxor %xmm9, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm5, %xmm4
+; SSE2-NEXT:    pand %xmm4, %xmm2
+; SSE2-NEXT:    pandn %xmm6, %xmm4
+; SSE2-NEXT:    por %xmm2, %xmm4
+; SSE2-NEXT:    psubd %xmm6, %xmm4
+; SSE2-NEXT:    movdqa %xmm7, %xmm2
+; SSE2-NEXT:    pxor %xmm9, %xmm2
+; SSE2-NEXT:    pxor %xmm3, %xmm9
+; SSE2-NEXT:    pcmpgtd %xmm2, %xmm9
+; SSE2-NEXT:    pand %xmm9, %xmm3
+; SSE2-NEXT:    pandn %xmm7, %xmm9
+; SSE2-NEXT:    por %xmm9, %xmm3
+; SSE2-NEXT:    psubd %xmm7, %xmm3
+; SSE2-NEXT:    movdqa %xmm4, %xmm2
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v16i32:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movdqa %xmm1, %xmm8
+; SSSE3-NEXT:    movdqa %xmm0, %xmm10
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT:    movdqa %xmm4, %xmm1
+; SSSE3-NEXT:    pxor %xmm9, %xmm1
+; SSSE3-NEXT:    pxor %xmm9, %xmm0
+; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSSE3-NEXT:    pand %xmm0, %xmm10
+; SSSE3-NEXT:    pandn %xmm4, %xmm0
+; SSSE3-NEXT:    por %xmm10, %xmm0
+; SSSE3-NEXT:    psubd %xmm4, %xmm0
+; SSSE3-NEXT:    movdqa %xmm5, %xmm4
+; SSSE3-NEXT:    pxor %xmm9, %xmm4
+; SSSE3-NEXT:    movdqa %xmm8, %xmm1
+; SSSE3-NEXT:    pxor %xmm9, %xmm1
+; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm1
+; SSSE3-NEXT:    pand %xmm1, %xmm8
+; SSSE3-NEXT:    pandn %xmm5, %xmm1
+; SSSE3-NEXT:    por %xmm8, %xmm1
+; SSSE3-NEXT:    psubd %xmm5, %xmm1
+; SSSE3-NEXT:    movdqa %xmm6, %xmm5
+; SSSE3-NEXT:    pxor %xmm9, %xmm5
+; SSSE3-NEXT:    movdqa %xmm2, %xmm4
+; SSSE3-NEXT:    pxor %xmm9, %xmm4
+; SSSE3-NEXT:    pcmpgtd %xmm5, %xmm4
+; SSSE3-NEXT:    pand %xmm4, %xmm2
+; SSSE3-NEXT:    pandn %xmm6, %xmm4
+; SSSE3-NEXT:    por %xmm2, %xmm4
+; SSSE3-NEXT:    psubd %xmm6, %xmm4
+; SSSE3-NEXT:    movdqa %xmm7, %xmm2
+; SSSE3-NEXT:    pxor %xmm9, %xmm2
+; SSSE3-NEXT:    pxor %xmm3, %xmm9
+; SSSE3-NEXT:    pcmpgtd %xmm2, %xmm9
+; SSSE3-NEXT:    pand %xmm9, %xmm3
+; SSSE3-NEXT:    pandn %xmm7, %xmm9
+; SSSE3-NEXT:    por %xmm9, %xmm3
+; SSSE3-NEXT:    psubd %xmm7, %xmm3
+; SSSE3-NEXT:    movdqa %xmm4, %xmm2
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v16i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pmaxud %xmm4, %xmm0
+; SSE41-NEXT:    psubd %xmm4, %xmm0
+; SSE41-NEXT:    pmaxud %xmm5, %xmm1
+; SSE41-NEXT:    psubd %xmm5, %xmm1
+; SSE41-NEXT:    pmaxud %xmm6, %xmm2
+; SSE41-NEXT:    psubd %xmm6, %xmm2
+; SSE41-NEXT:    pmaxud %xmm7, %xmm3
+; SSE41-NEXT:    psubd %xmm7, %xmm3
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: v16i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpmaxud %xmm4, %xmm5, %xmm5
+; AVX1-NEXT:    vpsubd %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpmaxud %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpsubd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpmaxud %xmm2, %xmm4, %xmm4
+; AVX1-NEXT:    vpsubd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpmaxud %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v16i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmaxud %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpmaxud %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v16i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmaxud %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vpsubd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %z = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %x, <16 x i32> %y)
+  ret <16 x i32> %z
+}
+
+define <2 x i64> @v2i64(<2 x i64> %x, <2 x i64> %y) nounwind {
+; SSE2-LABEL: v2i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    pxor %xmm2, %xmm3
+; SSE2-NEXT:    pxor %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm3, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pand %xmm5, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm1, %xmm3
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    psubq %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v2i64:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT:    movdqa %xmm1, %xmm3
+; SSSE3-NEXT:    pxor %xmm2, %xmm3
+; SSSE3-NEXT:    pxor %xmm0, %xmm2
+; SSSE3-NEXT:    movdqa %xmm2, %xmm4
+; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm3, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm5, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSSE3-NEXT:    por %xmm2, %xmm3
+; SSSE3-NEXT:    pand %xmm3, %xmm0
+; SSSE3-NEXT:    pandn %xmm1, %xmm3
+; SSSE3-NEXT:    por %xmm3, %xmm0
+; SSSE3-NEXT:    psubq %xmm1, %xmm0
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v2i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    pxor %xmm0, %xmm3
+; SSE41-NEXT:    pxor %xmm2, %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
+; SSE41-NEXT:    psubq %xmm1, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: v2i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm3
+; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm2
+; AVX2-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmaxuq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+  %z = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %x, <2 x i64> %y)
+  ret <2 x i64> %z
+}
+
+define <4 x i64> @v4i64(<4 x i64> %x, <4 x i64> %y) nounwind {
+; SSE2-LABEL: v4i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT:    movdqa %xmm2, %xmm5
+; SSE2-NEXT:    pxor %xmm4, %xmm5
+; SSE2-NEXT:    movdqa %xmm0, %xmm6
+; SSE2-NEXT:    pxor %xmm4, %xmm6
+; SSE2-NEXT:    movdqa %xmm6, %xmm7
+; SSE2-NEXT:    pcmpgtd %xmm5, %xmm7
+; SSE2-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm5, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
+; SSE2-NEXT:    pand %xmm8, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
+; SSE2-NEXT:    por %xmm5, %xmm6
+; SSE2-NEXT:    pand %xmm6, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm6
+; SSE2-NEXT:    por %xmm6, %xmm0
+; SSE2-NEXT:    psubq %xmm2, %xmm0
+; SSE2-NEXT:    movdqa %xmm3, %xmm2
+; SSE2-NEXT:    pxor %xmm4, %xmm2
+; SSE2-NEXT:    pxor %xmm1, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm5
+; SSE2-NEXT:    pcmpgtd %xmm2, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm2, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT:    pand %xmm6, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSE2-NEXT:    por %xmm2, %xmm4
+; SSE2-NEXT:    pand %xmm4, %xmm1
+; SSE2-NEXT:    pandn %xmm3, %xmm4
+; SSE2-NEXT:    por %xmm4, %xmm1
+; SSE2-NEXT:    psubq %xmm3, %xmm1
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v4i64:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT:    movdqa %xmm2, %xmm5
+; SSSE3-NEXT:    pxor %xmm4, %xmm5
+; SSSE3-NEXT:    movdqa %xmm0, %xmm6
+; SSSE3-NEXT:    pxor %xmm4, %xmm6
+; SSSE3-NEXT:    movdqa %xmm6, %xmm7
+; SSSE3-NEXT:    pcmpgtd %xmm5, %xmm7
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm5, %xmm6
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm8, %xmm5
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
+; SSSE3-NEXT:    por %xmm5, %xmm6
+; SSSE3-NEXT:    pand %xmm6, %xmm0
+; SSSE3-NEXT:    pandn %xmm2, %xmm6
+; SSSE3-NEXT:    por %xmm6, %xmm0
+; SSSE3-NEXT:    psubq %xmm2, %xmm0
+; SSSE3-NEXT:    movdqa %xmm3, %xmm2
+; SSSE3-NEXT:    pxor %xmm4, %xmm2
+; SSSE3-NEXT:    pxor %xmm1, %xmm4
+; SSSE3-NEXT:    movdqa %xmm4, %xmm5
+; SSSE3-NEXT:    pcmpgtd %xmm2, %xmm5
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm2, %xmm4
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm6, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSSE3-NEXT:    por %xmm2, %xmm4
+; SSSE3-NEXT:    pand %xmm4, %xmm1
+; SSSE3-NEXT:    pandn %xmm3, %xmm4
+; SSSE3-NEXT:    por %xmm4, %xmm1
+; SSSE3-NEXT:    psubq %xmm3, %xmm1
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v4i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm6 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT:    movdqa %xmm2, %xmm5
+; SSE41-NEXT:    pxor %xmm6, %xmm5
+; SSE41-NEXT:    movdqa %xmm0, %xmm7
+; SSE41-NEXT:    pxor %xmm6, %xmm7
+; SSE41-NEXT:    movdqa %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm5, %xmm7
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm5
+; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    movdqa %xmm2, %xmm5
+; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm5
+; SSE41-NEXT:    psubq %xmm2, %xmm5
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    pxor %xmm6, %xmm0
+; SSE41-NEXT:    pxor %xmm1, %xmm6
+; SSE41-NEXT:    movdqa %xmm6, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    movdqa %xmm3, %xmm2
+; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
+; SSE41-NEXT:    psubq %xmm3, %xmm2
+; SSE41-NEXT:    movdqa %xmm5, %xmm0
+; SSE41-NEXT:    movdqa %xmm2, %xmm1
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpxor %xmm3, %xmm5, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm5
+; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm3
+; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm2
+; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v4i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmaxuq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpsubq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %z = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %x, <4 x i64> %y)
+  ret <4 x i64> %z
+}
+
+define <8 x i64> @v8i64(<8 x i64> %x, <8 x i64> %y) nounwind {
+; SSE2-LABEL: v8i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT:    movdqa %xmm4, %xmm9
+; SSE2-NEXT:    pxor %xmm8, %xmm9
+; SSE2-NEXT:    movdqa %xmm0, %xmm10
+; SSE2-NEXT:    pxor %xmm8, %xmm10
+; SSE2-NEXT:    movdqa %xmm10, %xmm11
+; SSE2-NEXT:    pcmpgtd %xmm9, %xmm11
+; SSE2-NEXT:    pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm9, %xmm10
+; SSE2-NEXT:    pshufd {{.*#+}} xmm9 = xmm10[1,1,3,3]
+; SSE2-NEXT:    pand %xmm12, %xmm9
+; SSE2-NEXT:    pshufd {{.*#+}} xmm10 = xmm11[1,1,3,3]
+; SSE2-NEXT:    por %xmm9, %xmm10
+; SSE2-NEXT:    pand %xmm10, %xmm0
+; SSE2-NEXT:    pandn %xmm4, %xmm10
+; SSE2-NEXT:    por %xmm10, %xmm0
+; SSE2-NEXT:    psubq %xmm4, %xmm0
+; SSE2-NEXT:    movdqa %xmm5, %xmm9
+; SSE2-NEXT:    pxor %xmm8, %xmm9
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    pxor %xmm8, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm10
+; SSE2-NEXT:    pcmpgtd %xmm9, %xmm10
+; SSE2-NEXT:    pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm9, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm9 = xmm4[1,1,3,3]
+; SSE2-NEXT:    pand %xmm11, %xmm9
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm10[1,1,3,3]
+; SSE2-NEXT:    por %xmm9, %xmm4
+; SSE2-NEXT:    pand %xmm4, %xmm1
+; SSE2-NEXT:    pandn %xmm5, %xmm4
+; SSE2-NEXT:    por %xmm4, %xmm1
+; SSE2-NEXT:    psubq %xmm5, %xmm1
+; SSE2-NEXT:    movdqa %xmm6, %xmm4
+; SSE2-NEXT:    pxor %xmm8, %xmm4
+; SSE2-NEXT:    movdqa %xmm2, %xmm5
+; SSE2-NEXT:    pxor %xmm8, %xmm5
+; SSE2-NEXT:    movdqa %xmm5, %xmm9
+; SSE2-NEXT:    pcmpgtd %xmm4, %xmm9
+; SSE2-NEXT:    pshufd {{.*#+}} xmm10 = xmm9[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm4, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSE2-NEXT:    pand %xmm10, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm9[1,1,3,3]
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    pand %xmm5, %xmm2
+; SSE2-NEXT:    pandn %xmm6, %xmm5
+; SSE2-NEXT:    por %xmm5, %xmm2
+; SSE2-NEXT:    psubq %xmm6, %xmm2
+; SSE2-NEXT:    movdqa %xmm7, %xmm4
+; SSE2-NEXT:    pxor %xmm8, %xmm4
+; SSE2-NEXT:    pxor %xmm3, %xmm8
+; SSE2-NEXT:    movdqa %xmm8, %xmm5
+; SSE2-NEXT:    pcmpgtd %xmm4, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm4, %xmm8
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm8[1,1,3,3]
+; SSE2-NEXT:    pand %xmm6, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    pand %xmm5, %xmm3
+; SSE2-NEXT:    pandn %xmm7, %xmm5
+; SSE2-NEXT:    por %xmm5, %xmm3
+; SSE2-NEXT:    psubq %xmm7, %xmm3
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v8i64:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT:    movdqa %xmm4, %xmm9
+; SSSE3-NEXT:    pxor %xmm8, %xmm9
+; SSSE3-NEXT:    movdqa %xmm0, %xmm10
+; SSSE3-NEXT:    pxor %xmm8, %xmm10
+; SSSE3-NEXT:    movdqa %xmm10, %xmm11
+; SSSE3-NEXT:    pcmpgtd %xmm9, %xmm11
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm9, %xmm10
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm9 = xmm10[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm12, %xmm9
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm10 = xmm11[1,1,3,3]
+; SSSE3-NEXT:    por %xmm9, %xmm10
+; SSSE3-NEXT:    pand %xmm10, %xmm0
+; SSSE3-NEXT:    pandn %xmm4, %xmm10
+; SSSE3-NEXT:    por %xmm10, %xmm0
+; SSSE3-NEXT:    psubq %xmm4, %xmm0
+; SSSE3-NEXT:    movdqa %xmm5, %xmm9
+; SSSE3-NEXT:    pxor %xmm8, %xmm9
+; SSSE3-NEXT:    movdqa %xmm1, %xmm4
+; SSSE3-NEXT:    pxor %xmm8, %xmm4
+; SSSE3-NEXT:    movdqa %xmm4, %xmm10
+; SSSE3-NEXT:    pcmpgtd %xmm9, %xmm10
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm9, %xmm4
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm9 = xmm4[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm11, %xmm9
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm10[1,1,3,3]
+; SSSE3-NEXT:    por %xmm9, %xmm4
+; SSSE3-NEXT:    pand %xmm4, %xmm1
+; SSSE3-NEXT:    pandn %xmm5, %xmm4
+; SSSE3-NEXT:    por %xmm4, %xmm1
+; SSSE3-NEXT:    psubq %xmm5, %xmm1
+; SSSE3-NEXT:    movdqa %xmm6, %xmm4
+; SSSE3-NEXT:    pxor %xmm8, %xmm4
+; SSSE3-NEXT:    movdqa %xmm2, %xmm5
+; SSSE3-NEXT:    pxor %xmm8, %xmm5
+; SSSE3-NEXT:    movdqa %xmm5, %xmm9
+; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm9
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm10 = xmm9[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm4, %xmm5
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm10, %xmm4
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm5 = xmm9[1,1,3,3]
+; SSSE3-NEXT:    por %xmm4, %xmm5
+; SSSE3-NEXT:    pand %xmm5, %xmm2
+; SSSE3-NEXT:    pandn %xmm6, %xmm5
+; SSSE3-NEXT:    por %xmm5, %xmm2
+; SSSE3-NEXT:    psubq %xmm6, %xmm2
+; SSSE3-NEXT:    movdqa %xmm7, %xmm4
+; SSSE3-NEXT:    pxor %xmm8, %xmm4
+; SSSE3-NEXT:    pxor %xmm3, %xmm8
+; SSSE3-NEXT:    movdqa %xmm8, %xmm5
+; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm5
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm4, %xmm8
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm8[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm6, %xmm4
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSSE3-NEXT:    por %xmm4, %xmm5
+; SSSE3-NEXT:    pand %xmm5, %xmm3
+; SSSE3-NEXT:    pandn %xmm7, %xmm5
+; SSSE3-NEXT:    por %xmm5, %xmm3
+; SSSE3-NEXT:    psubq %xmm7, %xmm3
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: v8i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm1, %xmm8
+; SSE41-NEXT:    movdqa %xmm0, %xmm11
+; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT:    movdqa %xmm4, %xmm9
+; SSE41-NEXT:    pxor %xmm10, %xmm9
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    pxor %xmm10, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm9, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm12 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm9, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm12, %xmm1
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    movdqa %xmm4, %xmm9
+; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm9
+; SSE41-NEXT:    psubq %xmm4, %xmm9
+; SSE41-NEXT:    movdqa %xmm5, %xmm0
+; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    movdqa %xmm8, %xmm1
+; SSE41-NEXT:    pxor %xmm10, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm11 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm11, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    movdqa %xmm5, %xmm1
+; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm1
+; SSE41-NEXT:    psubq %xmm5, %xmm1
+; SSE41-NEXT:    movdqa %xmm6, %xmm0
+; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    pxor %xmm10, %xmm4
+; SSE41-NEXT:    movdqa %xmm4, %xmm5
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm5[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    movdqa %xmm6, %xmm4
+; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm4
+; SSE41-NEXT:    psubq %xmm6, %xmm4
+; SSE41-NEXT:    movdqa %xmm7, %xmm0
+; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    pxor %xmm3, %xmm10
+; SSE41-NEXT:    movdqa %xmm10, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm10
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    movdqa %xmm7, %xmm5
+; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm5
+; SSE41-NEXT:    psubq %xmm7, %xmm5
+; SSE41-NEXT:    movdqa %xmm9, %xmm0
+; SSE41-NEXT:    movdqa %xmm4, %xmm2
+; SSE41-NEXT:    movdqa %xmm5, %xmm3
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: v8i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm6
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT:    vpxor %xmm5, %xmm7, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm8
+; AVX1-NEXT:    vpxor %xmm5, %xmm2, %xmm7
+; AVX1-NEXT:    vpxor %xmm5, %xmm0, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vinsertf128 $1, %xmm8, %ymm6, %ymm6
+; AVX1-NEXT:    vblendvpd %ymm6, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT:    vpxor %xmm5, %xmm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT:    vpxor %xmm5, %xmm6, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpxor %xmm5, %xmm3, %xmm6
+; AVX1-NEXT:    vpxor %xmm5, %xmm1, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
+; AVX1-NEXT:    vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpsubq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v8i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-NEXT:    vpxor %ymm4, %ymm2, %ymm5
+; AVX2-NEXT:    vpxor %ymm4, %ymm0, %ymm6
+; AVX2-NEXT:    vpcmpgtq %ymm5, %ymm6, %ymm5
+; AVX2-NEXT:    vblendvpd %ymm5, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %ymm4, %ymm3, %ymm2
+; AVX2-NEXT:    vpxor %ymm4, %ymm1, %ymm4
+; AVX2-NEXT:    vpcmpgtq %ymm2, %ymm4, %ymm2
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpsubq %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v8i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmaxuq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vpsubq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %z = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %x, <8 x i64> %y)
+  ret <8 x i64> %z
+}
+
+define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
+; SSE-LABEL: v2i128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq %rdi, %rax
+; SSE-NEXT:    xorl %edi, %edi
+; SSE-NEXT:    subq %r9, %rsi
+; SSE-NEXT:    sbbq {{[0-9]+}}(%rsp), %rdx
+; SSE-NEXT:    cmovbq %rdi, %rsi
+; SSE-NEXT:    cmovbq %rdi, %rdx
+; SSE-NEXT:    subq {{[0-9]+}}(%rsp), %rcx
+; SSE-NEXT:    sbbq {{[0-9]+}}(%rsp), %r8
+; SSE-NEXT:    cmovbq %rdi, %r8
+; SSE-NEXT:    cmovbq %rdi, %rcx
+; SSE-NEXT:    movq %r8, 24(%rax)
+; SSE-NEXT:    movq %rcx, 16(%rax)
+; SSE-NEXT:    movq %rdx, 8(%rax)
+; SSE-NEXT:    movq %rsi, (%rax)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v2i128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movq %rdi, %rax
+; AVX-NEXT:    xorl %edi, %edi
+; AVX-NEXT:    subq %r9, %rsi
+; AVX-NEXT:    sbbq {{[0-9]+}}(%rsp), %rdx
+; AVX-NEXT:    cmovbq %rdi, %rsi
+; AVX-NEXT:    cmovbq %rdi, %rdx
+; AVX-NEXT:    subq {{[0-9]+}}(%rsp), %rcx
+; AVX-NEXT:    sbbq {{[0-9]+}}(%rsp), %r8
+; AVX-NEXT:    cmovbq %rdi, %r8
+; AVX-NEXT:    cmovbq %rdi, %rcx
+; AVX-NEXT:    movq %r8, 24(%rax)
+; AVX-NEXT:    movq %rcx, 16(%rax)
+; AVX-NEXT:    movq %rdx, 8(%rax)
+; AVX-NEXT:    movq %rsi, (%rax)
+; AVX-NEXT:    retq
+  %z = call <2 x i128> @llvm.usub.sat.v2i128(<2 x i128> %x, <2 x i128> %y)
+  ret <2 x i128> %z
+}
diff --git a/test/CodeGen/X86/vec_cast.ll b/test/CodeGen/X86/vec_cast.ll
index 23870bc..6e9a167 100644
--- a/test/CodeGen/X86/vec_cast.ll
+++ b/test/CodeGen/X86/vec_cast.ll
@@ -14,11 +14,10 @@
 ;
 ; CHECK-WIN-LABEL: a:
 ; CHECK-WIN:       # %bb.0:
-; CHECK-WIN-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; CHECK-WIN-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; CHECK-WIN-NEXT:    movdqa (%rcx), %xmm1
+; CHECK-WIN-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; CHECK-WIN-NEXT:    psrad $16, %xmm0
-; CHECK-WIN-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; CHECK-WIN-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
+; CHECK-WIN-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
 ; CHECK-WIN-NEXT:    psrad $16, %xmm1
 ; CHECK-WIN-NEXT:    retq
   %c = sext <8 x i16> %a to <8 x i32>
diff --git a/test/CodeGen/X86/vec_cast3.ll b/test/CodeGen/X86/vec_cast3.ll
index e4e6aa5..e4ff93a 100644
--- a/test/CodeGen/X86/vec_cast3.ll
+++ b/test/CodeGen/X86/vec_cast3.ll
@@ -236,3 +236,26 @@
   %res = fptoui <2 x float> %src to <2 x i32>
   ret <2 x i32> %res
 }
+
+define <32 x i8> @PR40146(<4 x i64> %x) {
+; CHECK-LABEL: PR40146:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; CHECK-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; CHECK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; CHECK-NEXT:    retl
+;
+; CHECK-WIDE-LABEL: PR40146:
+; CHECK-WIDE:       ## %bb.0:
+; CHECK-WIDE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; CHECK-WIDE-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; CHECK-WIDE-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; CHECK-WIDE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; CHECK-WIDE-NEXT:    retl
+  %perm = shufflevector <4 x i64> %x, <4 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 1, i32 undef>
+  %t1 = bitcast <4 x i64> %perm to <32 x i8>
+  %t2 = shufflevector <32 x i8> %t1, <32 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, <32 x i32> <i32 0, i32 32, i32 1, i32 32, i32 2, i32 32, i32 3, i32 32, i32 4, i32 32, i32 5, i32 32, i32 6, i32 32, i32 7, i32 32, i32 16, i32 48, i32 17, i32 48, i32 18, i32 48, i32 19, i32 48, i32 20, i32 48, i32 21, i32 48, i32 22, i32 48, i32 23, i32 48>
+  ret <32 x i8> %t2
+}
+
diff --git a/test/CodeGen/X86/vec_int_to_fp.ll b/test/CodeGen/X86/vec_int_to_fp.ll
index 1ccd636..a9cb10c 100644
--- a/test/CodeGen/X86/vec_int_to_fp.ll
+++ b/test/CodeGen/X86/vec_int_to_fp.ll
@@ -4278,13 +4278,12 @@
 define <8 x float> @sitofp_load_8i16_to_8f32(<8 x i16> *%a) {
 ; SSE2-LABEL: sitofp_load_8i16_to_8f32:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT:    psrad $16, %xmm1
-; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT:    movdqa (%rdi), %xmm1
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSE2-NEXT:    psrad $16, %xmm0
 ; SSE2-NEXT:    cvtdq2ps %xmm0, %xmm0
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    psrad $16, %xmm1
 ; SSE2-NEXT:    cvtdq2ps %xmm1, %xmm1
 ; SSE2-NEXT:    retq
 ;
@@ -4323,15 +4322,13 @@
 define <8 x float> @sitofp_load_8i8_to_8f32(<8 x i8> *%a) {
 ; SSE2-LABEL: sitofp_load_8i8_to_8f32:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT:    psrad $24, %xmm1
-; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSE2-NEXT:    psrad $24, %xmm0
 ; SSE2-NEXT:    cvtdq2ps %xmm0, %xmm0
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    psrad $24, %xmm1
 ; SSE2-NEXT:    cvtdq2ps %xmm1, %xmm1
 ; SSE2-NEXT:    retq
 ;
@@ -4345,10 +4342,8 @@
 ;
 ; AVX1-LABEL: sitofp_load_8i8_to_8f32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpmovsxbw (%rdi), %xmm0
-; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT:    vpmovsxbd 4(%rdi), %xmm0
+; AVX1-NEXT:    vpmovsxbd (%rdi), %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; AVX1-NEXT:    retq
diff --git a/test/CodeGen/X86/vec_minmax_match.ll b/test/CodeGen/X86/vec_minmax_match.ll
index c3652f3..4d6bb79 100644
--- a/test/CodeGen/X86/vec_minmax_match.ll
+++ b/test/CodeGen/X86/vec_minmax_match.ll
@@ -223,12 +223,11 @@
 ; CHECK-LABEL: wrong_pred_for_smin_with_not:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm2
-; CHECK-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm3
-; CHECK-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vmovaps {{.*#+}} xmm1 = [4294967291,4294967291,4294967291,4294967291]
-; CHECK-NEXT:    vblendvps %xmm0, %xmm2, %xmm1, %xmm0
+; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm1
+; CHECK-NEXT:    vpmaxud {{.*}}(%rip), %xmm0, %xmm2
+; CHECK-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    vmovaps {{.*#+}} xmm2 = [4294967291,4294967291,4294967291,4294967291]
+; CHECK-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %not_x = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
   %cmp = icmp ugt <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
diff --git a/test/CodeGen/X86/vec_minmax_sint.ll b/test/CodeGen/X86/vec_minmax_sint.ll
index a0b620a..a6afe94 100644
--- a/test/CodeGen/X86/vec_minmax_sint.ll
+++ b/test/CodeGen/X86/vec_minmax_sint.ll
@@ -42,10 +42,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    movapd %xmm1, %xmm0
 ; SSE41-NEXT:    retq
@@ -122,17 +121,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm4
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm5, %xmm0
-; SSE41-NEXT:    movdqa %xmm4, %xmm6
+; SSE41-NEXT:    movdqa %xmm2, %xmm6
 ; SSE41-NEXT:    pxor %xmm5, %xmm6
-; SSE41-NEXT:    movdqa %xmm6, %xmm7
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm0, %xmm7
+; SSE41-NEXT:    pxor %xmm5, %xmm7
+; SSE41-NEXT:    movdqa %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm7
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm8, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -142,10 +140,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm3, %xmm1
@@ -435,10 +432,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    movapd %xmm1, %xmm0
 ; SSE41-NEXT:    retq
@@ -515,17 +511,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm4
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm5, %xmm0
-; SSE41-NEXT:    movdqa %xmm4, %xmm6
+; SSE41-NEXT:    movdqa %xmm2, %xmm6
 ; SSE41-NEXT:    pxor %xmm5, %xmm6
-; SSE41-NEXT:    movdqa %xmm6, %xmm7
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm0, %xmm7
+; SSE41-NEXT:    pxor %xmm5, %xmm7
+; SSE41-NEXT:    movdqa %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm7
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm8, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -535,10 +530,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm3, %xmm1
@@ -828,10 +822,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    movapd %xmm1, %xmm0
 ; SSE41-NEXT:    retq
@@ -916,10 +909,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm0
@@ -928,10 +920,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm3, %xmm1
@@ -1216,10 +1207,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    movapd %xmm1, %xmm0
 ; SSE41-NEXT:    retq
@@ -1304,10 +1294,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm0
@@ -1316,10 +1305,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm3, %xmm1
diff --git a/test/CodeGen/X86/vec_minmax_uint.ll b/test/CodeGen/X86/vec_minmax_uint.ll
index 3f1b8ac..4f8477a 100644
--- a/test/CodeGen/X86/vec_minmax_uint.ll
+++ b/test/CodeGen/X86/vec_minmax_uint.ll
@@ -42,10 +42,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    movapd %xmm1, %xmm0
 ; SSE41-NEXT:    retq
@@ -132,17 +131,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm4
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm5, %xmm0
-; SSE41-NEXT:    movdqa %xmm4, %xmm6
+; SSE41-NEXT:    movdqa %xmm2, %xmm6
 ; SSE41-NEXT:    pxor %xmm5, %xmm6
-; SSE41-NEXT:    movdqa %xmm6, %xmm7
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm0, %xmm7
+; SSE41-NEXT:    pxor %xmm5, %xmm7
+; SSE41-NEXT:    movdqa %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm7
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm8, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -152,10 +150,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm3, %xmm1
@@ -463,10 +460,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    movapd %xmm1, %xmm0
 ; SSE41-NEXT:    retq
@@ -553,17 +549,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm4
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm5, %xmm0
-; SSE41-NEXT:    movdqa %xmm4, %xmm6
+; SSE41-NEXT:    movdqa %xmm2, %xmm6
 ; SSE41-NEXT:    pxor %xmm5, %xmm6
-; SSE41-NEXT:    movdqa %xmm6, %xmm7
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm0, %xmm7
+; SSE41-NEXT:    pxor %xmm5, %xmm7
+; SSE41-NEXT:    movdqa %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm7
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm8, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -573,10 +568,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm3, %xmm1
@@ -884,10 +878,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    movapd %xmm1, %xmm0
 ; SSE41-NEXT:    retq
@@ -981,10 +974,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm0
@@ -993,10 +985,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm3, %xmm1
@@ -1304,10 +1295,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    movapd %xmm1, %xmm0
 ; SSE41-NEXT:    retq
@@ -1401,10 +1391,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm0
@@ -1413,10 +1402,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm3, %xmm1
diff --git a/test/CodeGen/X86/vec_setcc-2.ll b/test/CodeGen/X86/vec_setcc-2.ll
index 4c22606..946c9fc 100644
--- a/test/CodeGen/X86/vec_setcc-2.ll
+++ b/test/CodeGen/X86/vec_setcc-2.ll
@@ -32,17 +32,15 @@
 ; SSE41-NEXT:    je LBB0_3
 ; SSE41-NEXT:  ## %bb.1: ## %for.body.preheader
 ; SSE41-NEXT:    xorl %eax, %eax
-; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [26,26,26,26,26,26,26,26]
-; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [25,25,25,25,25,25,25,25]
 ; SSE41-NEXT:    .p2align 4, 0x90
 ; SSE41-NEXT:  LBB0_2: ## %for.body
 ; SSE41-NEXT:    ## =>This Inner Loop Header: Depth=1
-; SSE41-NEXT:    movdqa (%rdi,%rax), %xmm2
-; SSE41-NEXT:    movdqa %xmm2, %xmm3
-; SSE41-NEXT:    pmaxuw %xmm0, %xmm3
-; SSE41-NEXT:    pcmpeqw %xmm2, %xmm3
-; SSE41-NEXT:    pxor %xmm1, %xmm3
-; SSE41-NEXT:    movdqa %xmm3, (%rsi,%rax)
+; SSE41-NEXT:    movdqa (%rdi,%rax), %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm2
+; SSE41-NEXT:    pminuw %xmm0, %xmm2
+; SSE41-NEXT:    pcmpeqw %xmm1, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, (%rsi,%rax)
 ; SSE41-NEXT:    addq $16, %rax
 ; SSE41-NEXT:    decl %edx
 ; SSE41-NEXT:    jne LBB0_2
@@ -146,11 +144,9 @@
 define <16 x i8> @test_ult_byte(<16 x i8> %a) {
 ; CHECK-LABEL: test_ult_byte:
 ; CHECK:       ## %bb.0: ## %entry
-; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = [11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11]
-; CHECK-NEXT:    pmaxub %xmm0, %xmm1
+; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = [10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10]
+; CHECK-NEXT:    pminub %xmm0, %xmm1
 ; CHECK-NEXT:    pcmpeqb %xmm1, %xmm0
-; CHECK-NEXT:    pcmpeqd %xmm1, %xmm1
-; CHECK-NEXT:    pxor %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 entry:
   %icmp = icmp ult <16 x i8> %a, <i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11>
@@ -187,11 +183,9 @@
 define <16 x i1> @ugt_v16i8_splat(<16 x i8> %x) {
 ; CHECK-LABEL: ugt_v16i8_splat:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
-; CHECK-NEXT:    pminub %xmm0, %xmm1
+; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = [43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43]
+; CHECK-NEXT:    pmaxub %xmm0, %xmm1
 ; CHECK-NEXT:    pcmpeqb %xmm1, %xmm0
-; CHECK-NEXT:    pcmpeqd %xmm1, %xmm1
-; CHECK-NEXT:    pxor %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %cmp = icmp ugt <16 x i8> %x, <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>
   ret <16 x i1> %cmp
@@ -206,11 +200,9 @@
 ;
 ; SSE41-LABEL: ugt_v8i16_splat:
 ; SSE41:       ## %bb.0:
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [242,242,242,242,242,242,242,242]
-; SSE41-NEXT:    pminuw %xmm0, %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [243,243,243,243,243,243,243,243]
+; SSE41-NEXT:    pmaxuw %xmm0, %xmm1
 ; SSE41-NEXT:    pcmpeqw %xmm1, %xmm0
-; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %cmp = icmp ugt <8 x i16> %x, <i16 242, i16 242, i16 242, i16 242, i16 242, i16 242, i16 242, i16 242>
   ret <8 x i1> %cmp
@@ -225,11 +217,9 @@
 ;
 ; SSE41-LABEL: ugt_v4i32_splat:
 ; SSE41:       ## %bb.0:
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [4294967254,4294967254,4294967254,4294967254]
-; SSE41-NEXT:    pminud %xmm0, %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [4294967255,4294967255,4294967255,4294967255]
+; SSE41-NEXT:    pmaxud %xmm0, %xmm1
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
-; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %cmp = icmp ugt <4 x i32> %x, <i32 -42, i32 -42, i32 -42, i32 -42>
   ret <4 x i1> %cmp
@@ -341,11 +331,9 @@
 define <16 x i1> @ult_v16i8_splat(<16 x i8> %x) {
 ; CHECK-LABEL: ult_v16i8_splat:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
-; CHECK-NEXT:    pmaxub %xmm0, %xmm1
+; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = [41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41]
+; CHECK-NEXT:    pminub %xmm0, %xmm1
 ; CHECK-NEXT:    pcmpeqb %xmm1, %xmm0
-; CHECK-NEXT:    pcmpeqd %xmm1, %xmm1
-; CHECK-NEXT:    pxor %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %cmp = icmp ult <16 x i8> %x, <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>
   ret <16 x i1> %cmp
@@ -361,11 +349,9 @@
 ;
 ; SSE41-LABEL: ult_v8i16_splat:
 ; SSE41:       ## %bb.0:
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [242,242,242,242,242,242,242,242]
-; SSE41-NEXT:    pmaxuw %xmm0, %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [241,241,241,241,241,241,241,241]
+; SSE41-NEXT:    pminuw %xmm0, %xmm1
 ; SSE41-NEXT:    pcmpeqw %xmm1, %xmm0
-; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %cmp = icmp ult <8 x i16> %x, <i16 242, i16 242, i16 242, i16 242, i16 242, i16 242, i16 242, i16 242>
   ret <8 x i1> %cmp
@@ -382,11 +368,9 @@
 ;
 ; SSE41-LABEL: ult_v4i32_splat:
 ; SSE41:       ## %bb.0:
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [4294967254,4294967254,4294967254,4294967254]
-; SSE41-NEXT:    pmaxud %xmm0, %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [4294967253,4294967253,4294967253,4294967253]
+; SSE41-NEXT:    pminud %xmm0, %xmm1
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
-; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %cmp = icmp ult <4 x i32> %x, <i32 -42, i32 -42, i32 -42, i32 -42>
   ret <4 x i1> %cmp
@@ -494,6 +478,30 @@
   ret <2 x i1> %cmp
 }
 
+; This should be simplified before we reach lowering, but
+; make sure that we are not getting it wrong by underflowing.
+
+define <4 x i1> @ult_v4i32_splat_0_simplify(<4 x i32> %x) {
+; CHECK-LABEL: ult_v4i32_splat_0_simplify:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    xorps %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %cmp = icmp ult <4 x i32> %x, <i32 0, i32 0, i32 0, i32 0>
+  ret <4 x i1> %cmp
+}
+
+; This should be simplified before we reach lowering, but
+; make sure that we are not getting it wrong by overflowing.
+
+define <4 x i1> @ugt_v4i32_splat_maxval_simplify(<4 x i32> %x) {
+; CHECK-LABEL: ugt_v4i32_splat_maxval_simplify:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    xorps %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %cmp = icmp ugt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
+  ret <4 x i1> %cmp
+}
+
 define <4 x i1> @ugt_v4i32_nonsplat(<4 x i32> %x) {
 ; SSE2-LABEL: ugt_v4i32_nonsplat:
 ; SSE2:       ## %bb.0:
@@ -524,11 +532,9 @@
 ;
 ; SSE41-LABEL: ugt_v4i32_splat_commute:
 ; SSE41:       ## %bb.0:
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [4,4,4,4]
-; SSE41-NEXT:    pmaxud %xmm0, %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [3,3,3,3]
+; SSE41-NEXT:    pminud %xmm0, %xmm1
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
-; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %cmp = icmp ugt <4 x i32> <i32 4, i32 4, i32 4, i32 4>, %x
   ret <4 x i1> %cmp
@@ -549,11 +555,9 @@
 ; SSE41-LABEL: PR39859:
 ; SSE41:       ## %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [42,42,42,42,42,42,42,42]
-; SSE41-NEXT:    pminuw %xmm0, %xmm3
-; SSE41-NEXT:    pcmpeqw %xmm0, %xmm3
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm3, %xmm0
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [43,43,43,43,43,43,43,43]
+; SSE41-NEXT:    pmaxuw %xmm2, %xmm0
+; SSE41-NEXT:    pcmpeqw %xmm2, %xmm0
 ; SSE41-NEXT:    pblendvb %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    retq
diff --git a/test/CodeGen/X86/vector-ext-logic.ll b/test/CodeGen/X86/vector-ext-logic.ll
new file mode 100644
index 0000000..01c6c1a
--- /dev/null
+++ b/test/CodeGen/X86/vector-ext-logic.ll
@@ -0,0 +1,468 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,SSE2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX2
+
+define <8 x i32> @zext_and_v8i32(<8 x i16> %x, <8 x i16> %y) {
+; SSE2-LABEL: zext_and_v8i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pand %xmm1, %xmm2
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: zext_and_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    retq
+  %xz = zext <8 x i16> %x to <8 x i32>
+  %yz = zext <8 x i16> %y to <8 x i32>
+  %r = and <8 x i32> %xz, %yz
+  ret <8 x i32> %r
+}
+
+define <8 x i32> @zext_or_v8i32(<8 x i16> %x, <8 x i16> %y) {
+; SSE2-LABEL: zext_or_v8i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm1, %xmm2
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: zext_or_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    retq
+  %xz = zext <8 x i16> %x to <8 x i32>
+  %yz = zext <8 x i16> %y to <8 x i32>
+  %r = or <8 x i32> %xz, %yz
+  ret <8 x i32> %r
+}
+
+define <8 x i32> @zext_xor_v8i32(<8 x i16> %x, <8 x i16> %y) {
+; SSE2-LABEL: zext_xor_v8i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pxor %xmm1, %xmm2
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: zext_xor_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    retq
+  %xz = zext <8 x i16> %x to <8 x i32>
+  %yz = zext <8 x i16> %y to <8 x i32>
+  %r = xor <8 x i32> %xz, %yz
+  ret <8 x i32> %r
+}
+
+define <8 x i32> @sext_and_v8i32(<8 x i16> %x, <8 x i16> %y) {
+; SSE2-LABEL: sext_and_v8i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand %xmm1, %xmm0
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT:    psrad $16, %xmm2
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT:    psrad $16, %xmm1
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: sext_and_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT:    retq
+  %xs = sext <8 x i16> %x to <8 x i32>
+  %ys = sext <8 x i16> %y to <8 x i32>
+  %r = and <8 x i32> %xs, %ys
+  ret <8 x i32> %r
+}
+
+define <8 x i32> @sext_or_v8i32(<8 x i16> %x, <8 x i16> %y) {
+; SSE2-LABEL: sext_or_v8i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT:    psrad $16, %xmm2
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT:    psrad $16, %xmm1
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: sext_or_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT:    retq
+  %xs = sext <8 x i16> %x to <8 x i32>
+  %ys = sext <8 x i16> %y to <8 x i32>
+  %r = or <8 x i32> %xs, %ys
+  ret <8 x i32> %r
+}
+
+define <8 x i32> @sext_xor_v8i32(<8 x i16> %x, <8 x i16> %y) {
+; SSE2-LABEL: sext_xor_v8i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pxor %xmm1, %xmm0
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT:    psrad $16, %xmm2
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT:    psrad $16, %xmm1
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: sext_xor_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT:    retq
+  %xs = sext <8 x i16> %x to <8 x i32>
+  %ys = sext <8 x i16> %y to <8 x i32>
+  %r = xor <8 x i32> %xs, %ys
+  ret <8 x i32> %r
+}
+
+define <8 x i16> @zext_and_v8i16(<8 x i8> %x, <8 x i8> %y) {
+; SSE2-LABEL: zext_and_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    andps %xmm1, %xmm0
+; SSE2-NEXT:    andps {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: zext_and_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vandps %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    retq
+  %xz = zext <8 x i8> %x to <8 x i16>
+  %yz = zext <8 x i8> %y to <8 x i16>
+  %r = and <8 x i16> %xz, %yz
+  ret <8 x i16> %r
+}
+
+define <8 x i16> @zext_or_v8i16(<8 x i8> %x, <8 x i8> %y) {
+; SSE2-LABEL: zext_or_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    orps %xmm1, %xmm0
+; SSE2-NEXT:    andps {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: zext_or_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vorps %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    retq
+  %xz = zext <8 x i8> %x to <8 x i16>
+  %yz = zext <8 x i8> %y to <8 x i16>
+  %r = or <8 x i16> %xz, %yz
+  ret <8 x i16> %r
+}
+
+define <8 x i16> @zext_xor_v8i16(<8 x i8> %x, <8 x i8> %y) {
+; SSE2-LABEL: zext_xor_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    xorps %xmm1, %xmm0
+; SSE2-NEXT:    andps {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: zext_xor_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vxorps %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    retq
+  %xz = zext <8 x i8> %x to <8 x i16>
+  %yz = zext <8 x i8> %y to <8 x i16>
+  %r = xor <8 x i16> %xz, %yz
+  ret <8 x i16> %r
+}
+
+define <8 x i16> @sext_and_v8i16(<8 x i8> %x, <8 x i8> %y) {
+; SSE2-LABEL: sext_and_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    psllw $8, %xmm0
+; SSE2-NEXT:    psraw $8, %xmm0
+; SSE2-NEXT:    psllw $8, %xmm1
+; SSE2-NEXT:    psraw $8, %xmm1
+; SSE2-NEXT:    pand %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: sext_and_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllw $8, %xmm0, %xmm0
+; AVX2-NEXT:    vpsraw $8, %xmm0, %xmm0
+; AVX2-NEXT:    vpsllw $8, %xmm1, %xmm1
+; AVX2-NEXT:    vpsraw $8, %xmm1, %xmm1
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+  %xs = sext <8 x i8> %x to <8 x i16>
+  %ys = sext <8 x i8> %y to <8 x i16>
+  %r = and <8 x i16> %xs, %ys
+  ret <8 x i16> %r
+}
+
+define <8 x i16> @sext_or_v8i16(<8 x i8> %x, <8 x i8> %y) {
+; SSE2-LABEL: sext_or_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    psllw $8, %xmm0
+; SSE2-NEXT:    psraw $8, %xmm0
+; SSE2-NEXT:    psllw $8, %xmm1
+; SSE2-NEXT:    psraw $8, %xmm1
+; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: sext_or_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllw $8, %xmm0, %xmm0
+; AVX2-NEXT:    vpsraw $8, %xmm0, %xmm0
+; AVX2-NEXT:    vpsllw $8, %xmm1, %xmm1
+; AVX2-NEXT:    vpsraw $8, %xmm1, %xmm1
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+  %xs = sext <8 x i8> %x to <8 x i16>
+  %ys = sext <8 x i8> %y to <8 x i16>
+  %r = or <8 x i16> %xs, %ys
+  ret <8 x i16> %r
+}
+
+define <8 x i16> @sext_xor_v8i16(<8 x i8> %x, <8 x i8> %y) {
+; SSE2-LABEL: sext_xor_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    psllw $8, %xmm0
+; SSE2-NEXT:    psraw $8, %xmm0
+; SSE2-NEXT:    psllw $8, %xmm1
+; SSE2-NEXT:    psraw $8, %xmm1
+; SSE2-NEXT:    pxor %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: sext_xor_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllw $8, %xmm0, %xmm0
+; AVX2-NEXT:    vpsraw $8, %xmm0, %xmm0
+; AVX2-NEXT:    vpsllw $8, %xmm1, %xmm1
+; AVX2-NEXT:    vpsraw $8, %xmm1, %xmm1
+; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+  %xs = sext <8 x i8> %x to <8 x i16>
+  %ys = sext <8 x i8> %y to <8 x i16>
+  %r = xor <8 x i16> %xs, %ys
+  ret <8 x i16> %r
+}
+
+define <8 x i32> @bool_zext_and(<8 x i1> %x, <8 x i1> %y) {
+; SSE2-LABEL: bool_zext_and:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    pxor %xmm4, %xmm4
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT:    pand %xmm3, %xmm2
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-NEXT:    pand %xmm1, %xmm0
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: bool_zext_and:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+  %xz = zext <8 x i1> %x to <8 x i32>
+  %yz = zext <8 x i1> %y to <8 x i32>
+  %r = and <8 x i32> %xz, %yz
+  ret <8 x i32> %r
+}
+
+define <8 x i32> @bool_zext_or(<8 x i1> %x, <8 x i1> %y) {
+; SSE2-LABEL: bool_zext_or:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1]
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT:    por %xmm4, %xmm2
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: bool_zext_or:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    retq
+  %xz = zext <8 x i1> %x to <8 x i32>
+  %yz = zext <8 x i1> %y to <8 x i32>
+  %r = or <8 x i32> %xz, %yz
+  ret <8 x i32> %r
+}
+
+define <8 x i32> @bool_zext_xor(<8 x i1> %x, <8 x i1> %y) {
+; SSE2-LABEL: bool_zext_xor:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1]
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT:    pxor %xmm4, %xmm2
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT:    pxor %xmm1, %xmm0
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: bool_zext_xor:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    retq
+  %xz = zext <8 x i1> %x to <8 x i32>
+  %yz = zext <8 x i1> %y to <8 x i32>
+  %r = xor <8 x i32> %xz, %yz
+  ret <8 x i32> %r
+}
+
+define <8 x i32> @bool_sext_and(<8 x i1> %x, <8 x i1> %y) {
+; SSE2-LABEL: bool_sext_and:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pslld $31, %xmm0
+; SSE2-NEXT:    psrad $31, %xmm0
+; SSE2-NEXT:    pslld $31, %xmm2
+; SSE2-NEXT:    psrad $31, %xmm2
+; SSE2-NEXT:    pslld $31, %xmm1
+; SSE2-NEXT:    psrad $31, %xmm1
+; SSE2-NEXT:    pand %xmm0, %xmm1
+; SSE2-NEXT:    pslld $31, %xmm3
+; SSE2-NEXT:    psrad $31, %xmm3
+; SSE2-NEXT:    pand %xmm3, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: bool_sext_and:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    vpslld $31, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
+; AVX2-NEXT:    vpslld $31, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrad $31, %ymm1, %ymm1
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+  %xs = sext <8 x i1> %x to <8 x i32>
+  %ys = sext <8 x i1> %y to <8 x i32>
+  %r = and <8 x i32> %xs, %ys
+  ret <8 x i32> %r
+}
+
+define <8 x i32> @bool_sext_or(<8 x i1> %x, <8 x i1> %y) {
+; SSE2-LABEL: bool_sext_or:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pslld $31, %xmm0
+; SSE2-NEXT:    psrad $31, %xmm0
+; SSE2-NEXT:    pslld $31, %xmm2
+; SSE2-NEXT:    psrad $31, %xmm2
+; SSE2-NEXT:    pslld $31, %xmm1
+; SSE2-NEXT:    psrad $31, %xmm1
+; SSE2-NEXT:    por %xmm0, %xmm1
+; SSE2-NEXT:    pslld $31, %xmm3
+; SSE2-NEXT:    psrad $31, %xmm3
+; SSE2-NEXT:    por %xmm3, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: bool_sext_or:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    vpslld $31, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
+; AVX2-NEXT:    vpslld $31, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrad $31, %ymm1, %ymm1
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+  %xs = sext <8 x i1> %x to <8 x i32>
+  %ys = sext <8 x i1> %y to <8 x i32>
+  %r = or <8 x i32> %xs, %ys
+  ret <8 x i32> %r
+}
+
+define <8 x i32> @bool_sext_xor(<8 x i1> %x, <8 x i1> %y) {
+; SSE2-LABEL: bool_sext_xor:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pslld $31, %xmm0
+; SSE2-NEXT:    psrad $31, %xmm0
+; SSE2-NEXT:    pslld $31, %xmm2
+; SSE2-NEXT:    psrad $31, %xmm2
+; SSE2-NEXT:    pslld $31, %xmm1
+; SSE2-NEXT:    psrad $31, %xmm1
+; SSE2-NEXT:    pxor %xmm0, %xmm1
+; SSE2-NEXT:    pslld $31, %xmm3
+; SSE2-NEXT:    psrad $31, %xmm3
+; SSE2-NEXT:    pxor %xmm3, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; AVX2-LABEL: bool_sext_xor:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    vpslld $31, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
+; AVX2-NEXT:    vpslld $31, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrad $31, %ymm1, %ymm1
+; AVX2-NEXT:    vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+  %xs = sext <8 x i1> %x to <8 x i32>
+  %ys = sext <8 x i1> %y to <8 x i32>
+  %r = xor <8 x i32> %xs, %ys
+  ret <8 x i32> %r
+}
+
diff --git a/test/CodeGen/X86/vector-fshl-128.ll b/test/CodeGen/X86/vector-fshl-128.ll
new file mode 100644
index 0000000..4a51e33
--- /dev/null
+++ b/test/CodeGen/X86/vector-fshl-128.ll
@@ -0,0 +1,3051 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefixes=AVX512,AVX512VBMI2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVBMI2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2
+
+; Just one 32-bit run to make sure we do reasonable things for i64 cases.
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X32-SSE,X32-SSE2
+
+declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
+declare <16 x i8> @llvm.fshl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)
+
+;
+; Variable Shifts
+;
+
+define <2 x i64> @var_funnnel_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v2i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    psllq %xmm2, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    psllq %xmm4, %xmm5
+; SSE2-NEXT:    movsd {{.*#+}} xmm5 = xmm3[0],xmm5[1]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [64,64]
+; SSE2-NEXT:    psubq %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    psrlq %xmm3, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; SSE2-NEXT:    psrlq %xmm3, %xmm1
+; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm4[0],xmm1[1]
+; SSE2-NEXT:    orpd %xmm5, %xmm1
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    pcmpeqd %xmm2, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,0,3,2]
+; SSE2-NEXT:    pand %xmm3, %xmm2
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    pandn %xmm1, %xmm2
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v2i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    psllq %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
+; SSE41-NEXT:    movdqa %xmm3, %xmm4
+; SSE41-NEXT:    psllq %xmm5, %xmm4
+; SSE41-NEXT:    pblendw {{.*#+}} xmm4 = xmm0[0,1,2,3],xmm4[4,5,6,7]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [64,64]
+; SSE41-NEXT:    psubq %xmm2, %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    psrlq %xmm0, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT:    psrlq %xmm0, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT:    por %xmm1, %xmm4
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pcmpeqq %xmm2, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm4
+; SSE41-NEXT:    movapd %xmm4, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: var_funnnel_v2i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpsllq %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
+; AVX1-NEXT:    vpsllq %xmm4, %xmm0, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; AVX1-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
+; AVX1-NEXT:    vpsrlq %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v2i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpsllvq %xmm2, %xmm0, %xmm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; AVX2-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpsrlvq %xmm4, %xmm1, %xmm1
+; AVX2-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512F-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512F-NEXT:    vpsllvq %xmm4, %xmm0, %xmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512F-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpsrlvq %xmm4, %xmm1, %xmm1
+; AVX512F-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512F-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512VL-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VL-NEXT:    vpsllvq %xmm4, %xmm0, %xmm5
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VL-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpsrlvq %xmm4, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VL-NEXT:    vptestnmq %xmm3, %xmm2, %k1
+; AVX512VL-NEXT:    vmovdqa64 %xmm0, %xmm1 {%k1}
+; AVX512VL-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512BW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512BW-NEXT:    vpsllvq %xmm4, %xmm0, %xmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512BW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpsrlvq %xmm4, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512BW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v2i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512VBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VBMI2-NEXT:    vpsllvq %xmm4, %xmm0, %xmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VBMI2-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpsrlvq %xmm4, %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VBMI2-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpsllvq %xmm4, %xmm0, %xmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VLBW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpsrlvq %xmm4, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VLBW-NEXT:    vptestnmq %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa64 %xmm0, %xmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v2i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvq %xmm2, %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v2i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlq %xmm2, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vpsubq {{.*}}(%rip), %xmm2, %xmm4
+; XOPAVX1-NEXT:    vpshlq %xmm4, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqq %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v2i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsllvq %xmm2, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; XOPAVX2-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpsrlvq %xmm4, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcomeqq %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm3
+; X32-SSE-NEXT:    psllq %xmm2, %xmm3
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm5
+; X32-SSE-NEXT:    psllq %xmm4, %xmm5
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm5 = xmm3[0],xmm5[1]
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm3 = [64,0,64,0]
+; X32-SSE-NEXT:    psubq %xmm2, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm4
+; X32-SSE-NEXT:    psrlq %xmm3, %xmm4
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; X32-SSE-NEXT:    psrlq %xmm3, %xmm1
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm4[0],xmm1[1]
+; X32-SSE-NEXT:    orpd %xmm5, %xmm1
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    pcmpeqd %xmm2, %xmm3
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,0,3,2]
+; X32-SSE-NEXT:    pand %xmm3, %xmm2
+; X32-SSE-NEXT:    pand %xmm2, %xmm0
+; X32-SSE-NEXT:    pandn %xmm1, %xmm2
+; X32-SSE-NEXT:    por %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %amt)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @var_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [32,32,32,32]
+; SSE2-NEXT:    psubd %xmm2, %xmm4
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm3 = xmm4[2,3,3,3,4,5,6,7]
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    psrld %xmm3, %xmm5
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm6 = xmm4[0,1,1,1,4,5,6,7]
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    psrld %xmm6, %xmm3
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm5 = xmm4[2,3,3,3,4,5,6,7]
+; SSE2-NEXT:    movdqa %xmm1, %xmm6
+; SSE2-NEXT:    psrld %xmm5, %xmm6
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,1,4,5,6,7]
+; SSE2-NEXT:    psrld %xmm4, %xmm1
+; SSE2-NEXT:    punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm6[1]
+; SSE2-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,3],xmm1[0,3]
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    pcmpeqd %xmm2, %xmm1
+; SSE2-NEXT:    pslld $23, %xmm2
+; SSE2-NEXT:    paddd {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    cvttps2dq %xmm2, %xmm2
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    pmuludq %xmm2, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm5, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; SSE2-NEXT:    por %xmm3, %xmm4
+; SSE2-NEXT:    pand %xmm1, %xmm0
+; SSE2-NEXT:    pandn %xmm4, %xmm1
+; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [32,32,32,32]
+; SSE41-NEXT:    psubd %xmm2, %xmm0
+; SSE41-NEXT:    pshuflw {{.*#+}} xmm4 = xmm0[2,3,3,3,4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    psrld %xmm4, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
+; SSE41-NEXT:    pshuflw {{.*#+}} xmm6 = xmm4[2,3,3,3,4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm1, %xmm7
+; SSE41-NEXT:    psrld %xmm6, %xmm7
+; SSE41-NEXT:    pblendw {{.*#+}} xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7]
+; SSE41-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,1,4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    psrld %xmm0, %xmm5
+; SSE41-NEXT:    pshuflw {{.*#+}} xmm0 = xmm4[0,1,1,1,4,5,6,7]
+; SSE41-NEXT:    psrld %xmm0, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm7[2,3],xmm1[4,5],xmm7[6,7]
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm2, %xmm0
+; SSE41-NEXT:    pslld $23, %xmm2
+; SSE41-NEXT:    paddd {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    cvttps2dq %xmm2, %xmm2
+; SSE41-NEXT:    pmulld %xmm3, %xmm2
+; SSE41-NEXT:    por %xmm1, %xmm2
+; SSE41-NEXT:    blendvps %xmm0, %xmm3, %xmm2
+; SSE41-NEXT:    movaps %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: var_funnnel_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [32,32,32,32]
+; AVX1-NEXT:    vpsubd %xmm2, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrld %xmm4, %xmm1, %xmm4
+; AVX1-NEXT:    vpsrlq $32, %xmm3, %xmm5
+; AVX1-NEXT:    vpsrld %xmm5, %xmm1, %xmm5
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm6 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX1-NEXT:    vpsrld %xmm6, %xmm1, %xmm6
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT:    vpsrld %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpslld $23, %xmm2, %xmm3
+; AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT:    vcvttps2dq %xmm3, %xmm3
+; AVX1-NEXT:    vpmulld %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpeqd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX2-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpsllvd %xmm2, %xmm0, %xmm3
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpsrlvd %xmm4, %xmm1, %xmm1
+; AVX2-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512F-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512F-NEXT:    vpsllvd %xmm4, %xmm0, %xmm5
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512F-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpsrlvd %xmm4, %xmm1, %xmm1
+; AVX512F-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512F-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512VL-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VL-NEXT:    vpsllvd %xmm4, %xmm0, %xmm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VL-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpsrlvd %xmm4, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VL-NEXT:    vptestnmd %xmm3, %xmm2, %k1
+; AVX512VL-NEXT:    vmovdqa32 %xmm0, %xmm1 {%k1}
+; AVX512VL-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512BW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512BW-NEXT:    vpsllvd %xmm4, %xmm0, %xmm5
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512BW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpsrlvd %xmm4, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512BW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v4i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512VBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VBMI2-NEXT:    vpsllvd %xmm4, %xmm0, %xmm5
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VBMI2-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpsrlvd %xmm4, %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VBMI2-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpsllvd %xmm4, %xmm0, %xmm5
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VLBW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpsrlvd %xmm4, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VLBW-NEXT:    vptestnmd %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa32 %xmm0, %xmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v4i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvd %xmm2, %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v4i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshld %xmm2, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vpsubd {{.*}}(%rip), %xmm2, %xmm4
+; XOPAVX1-NEXT:    vpshld %xmm4, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqd %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v4i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; XOPAVX2-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsllvd %xmm2, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
+; XOPAVX2-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpsrlvd %xmm4, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcomeqd %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [32,32,32,32]
+; X32-SSE-NEXT:    psubd %xmm2, %xmm4
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm4[2,3,3,3,4,5,6,7]
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm5
+; X32-SSE-NEXT:    psrld %xmm3, %xmm5
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm4[0,1,1,1,4,5,6,7]
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm3
+; X32-SSE-NEXT:    psrld %xmm6, %xmm3
+; X32-SSE-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm4[2,3,3,3,4,5,6,7]
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm6
+; X32-SSE-NEXT:    psrld %xmm5, %xmm6
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,1,4,5,6,7]
+; X32-SSE-NEXT:    psrld %xmm4, %xmm1
+; X32-SSE-NEXT:    punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm6[1]
+; X32-SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,3],xmm1[0,3]
+; X32-SSE-NEXT:    pxor %xmm1, %xmm1
+; X32-SSE-NEXT:    pcmpeqd %xmm2, %xmm1
+; X32-SSE-NEXT:    pslld $23, %xmm2
+; X32-SSE-NEXT:    paddd {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    cvttps2dq %xmm2, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm4
+; X32-SSE-NEXT:    pmuludq %xmm2, %xmm4
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm5, %xmm2
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; X32-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; X32-SSE-NEXT:    por %xmm3, %xmm4
+; X32-SSE-NEXT:    pand %xmm1, %xmm0
+; X32-SSE-NEXT:    pandn %xmm4, %xmm1
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %amt)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; SSE2-NEXT:    psubw %xmm2, %xmm3
+; SSE2-NEXT:    psllw $12, %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    psraw $15, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm5
+; SSE2-NEXT:    psrlw $8, %xmm1
+; SSE2-NEXT:    pand %xmm4, %xmm1
+; SSE2-NEXT:    por %xmm5, %xmm1
+; SSE2-NEXT:    paddw %xmm3, %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    psraw $15, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm5
+; SSE2-NEXT:    psrlw $4, %xmm1
+; SSE2-NEXT:    pand %xmm4, %xmm1
+; SSE2-NEXT:    por %xmm5, %xmm1
+; SSE2-NEXT:    paddw %xmm3, %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    psraw $15, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm5
+; SSE2-NEXT:    psrlw $2, %xmm1
+; SSE2-NEXT:    pand %xmm4, %xmm1
+; SSE2-NEXT:    por %xmm5, %xmm1
+; SSE2-NEXT:    paddw %xmm3, %xmm3
+; SSE2-NEXT:    psraw $15, %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    pandn %xmm1, %xmm4
+; SSE2-NEXT:    psrlw $1, %xmm1
+; SSE2-NEXT:    pand %xmm3, %xmm1
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    movdqa %xmm2, %xmm5
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; SSE2-NEXT:    pslld $23, %xmm5
+; SSE2-NEXT:    movdqa {{.*#+}} xmm6 = [1065353216,1065353216,1065353216,1065353216]
+; SSE2-NEXT:    paddd %xmm6, %xmm5
+; SSE2-NEXT:    cvttps2dq %xmm5, %xmm5
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm5 = xmm5[0,2,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
+; SSE2-NEXT:    movdqa %xmm2, %xmm7
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
+; SSE2-NEXT:    pslld $23, %xmm7
+; SSE2-NEXT:    paddd %xmm6, %xmm7
+; SSE2-NEXT:    cvttps2dq %xmm7, %xmm6
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[0,2,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm5[0]
+; SSE2-NEXT:    pmullw %xmm0, %xmm6
+; SSE2-NEXT:    por %xmm4, %xmm6
+; SSE2-NEXT:    por %xmm1, %xmm6
+; SSE2-NEXT:    pcmpeqw %xmm3, %xmm2
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    pandn %xmm6, %xmm2
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v8i16:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [16,16,16,16,16,16,16,16]
+; SSE41-NEXT:    psubw %xmm2, %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    psllw $12, %xmm4
+; SSE41-NEXT:    psllw $4, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    paddw %xmm0, %xmm4
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    psrlw $8, %xmm5
+; SSE41-NEXT:    pblendvb %xmm0, %xmm5, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    psrlw $4, %xmm5
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm5, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    psrlw $2, %xmm5
+; SSE41-NEXT:    paddw %xmm4, %xmm4
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm5, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    psrlw $1, %xmm5
+; SSE41-NEXT:    paddw %xmm4, %xmm4
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm5, %xmm1
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE41-NEXT:    pslld $23, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
+; SSE41-NEXT:    paddd %xmm5, %xmm4
+; SSE41-NEXT:    cvttps2dq %xmm4, %xmm6
+; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; SSE41-NEXT:    pslld $23, %xmm4
+; SSE41-NEXT:    paddd %xmm5, %xmm4
+; SSE41-NEXT:    cvttps2dq %xmm4, %xmm4
+; SSE41-NEXT:    packusdw %xmm6, %xmm4
+; SSE41-NEXT:    pmullw %xmm3, %xmm4
+; SSE41-NEXT:    por %xmm1, %xmm4
+; SSE41-NEXT:    pcmpeqw %xmm2, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm4
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: var_funnnel_v8i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX1-NEXT:    vpsubw %xmm2, %xmm3, %xmm3
+; AVX1-NEXT:    vpsllw $12, %xmm3, %xmm4
+; AVX1-NEXT:    vpsllw $4, %xmm3, %xmm3
+; AVX1-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddw %xmm3, %xmm3, %xmm4
+; AVX1-NEXT:    vpsrlw $8, %xmm1, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm3, %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm3
+; AVX1-NEXT:    vpblendvb %xmm4, %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $2, %xmm1, %xmm3
+; AVX1-NEXT:    vpaddw %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpblendvb %xmm4, %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm3
+; AVX1-NEXT:    vpaddw %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpblendvb %xmm4, %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX1-NEXT:    vpslld $23, %xmm4, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT:    vpaddd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vcvttps2dq %xmm4, %xmm4
+; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm6 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX1-NEXT:    vpslld $23, %xmm6, %xmm6
+; AVX1-NEXT:    vpaddd %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vcvttps2dq %xmm5, %xmm5
+; AVX1-NEXT:    vpackusdw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpmullw %xmm4, %xmm0, %xmm4
+; AVX1-NEXT:    vpor %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX2-NEXT:    vpsllvd %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm2, %xmm5, %xmm5
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT:    vpsrlvd %ymm5, %ymm1, %ymm1
+; AVX2-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX512F-NEXT:    vpsllvd %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512F-NEXT:    vpsrlvd %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    vpmovdw %zmm1, %ymm1
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX512VL-NEXT:    vpsllvd %ymm4, %ymm3, %ymm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512VL-NEXT:    vpsrlvd %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    vpmovdw %ymm1, %xmm1
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v8i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512BW-NEXT:    vpsllvw %zmm4, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512BW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v8i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX512VBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VBMI2-NEXT:    vpsllvw %zmm4, %zmm0, %zmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512VBMI2-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VBMI2-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v8i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpsllvw %xmm4, %xmm0, %xmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpsrlvw %xmm4, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VLBW-NEXT:    vptestnmw %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu16 %xmm0, %xmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v8i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvw %xmm2, %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOP-LABEL: var_funnnel_v8i16:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOP-NEXT:    vpshlw %xmm2, %xmm0, %xmm3
+; XOP-NEXT:    vpsubw {{.*}}(%rip), %xmm2, %xmm4
+; XOP-NEXT:    vpshlw %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; XOP-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOP-NEXT:    vpcomeqw %xmm3, %xmm2, %xmm2
+; XOP-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; X32-SSE-NEXT:    psubw %xmm2, %xmm3
+; X32-SSE-NEXT:    psllw $12, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm3, %xmm4
+; X32-SSE-NEXT:    psraw $15, %xmm4
+; X32-SSE-NEXT:    movdqa %xmm4, %xmm5
+; X32-SSE-NEXT:    pandn %xmm1, %xmm5
+; X32-SSE-NEXT:    psrlw $8, %xmm1
+; X32-SSE-NEXT:    pand %xmm4, %xmm1
+; X32-SSE-NEXT:    por %xmm5, %xmm1
+; X32-SSE-NEXT:    paddw %xmm3, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm3, %xmm4
+; X32-SSE-NEXT:    psraw $15, %xmm4
+; X32-SSE-NEXT:    movdqa %xmm4, %xmm5
+; X32-SSE-NEXT:    pandn %xmm1, %xmm5
+; X32-SSE-NEXT:    psrlw $4, %xmm1
+; X32-SSE-NEXT:    pand %xmm4, %xmm1
+; X32-SSE-NEXT:    por %xmm5, %xmm1
+; X32-SSE-NEXT:    paddw %xmm3, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm3, %xmm4
+; X32-SSE-NEXT:    psraw $15, %xmm4
+; X32-SSE-NEXT:    movdqa %xmm4, %xmm5
+; X32-SSE-NEXT:    pandn %xmm1, %xmm5
+; X32-SSE-NEXT:    psrlw $2, %xmm1
+; X32-SSE-NEXT:    pand %xmm4, %xmm1
+; X32-SSE-NEXT:    por %xmm5, %xmm1
+; X32-SSE-NEXT:    paddw %xmm3, %xmm3
+; X32-SSE-NEXT:    psraw $15, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm3, %xmm4
+; X32-SSE-NEXT:    pandn %xmm1, %xmm4
+; X32-SSE-NEXT:    psrlw $1, %xmm1
+; X32-SSE-NEXT:    pand %xmm3, %xmm1
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm5
+; X32-SSE-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; X32-SSE-NEXT:    pslld $23, %xmm5
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm6 = [1065353216,1065353216,1065353216,1065353216]
+; X32-SSE-NEXT:    paddd %xmm6, %xmm5
+; X32-SSE-NEXT:    cvttps2dq %xmm5, %xmm5
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm5[0,2,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm7
+; X32-SSE-NEXT:    punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
+; X32-SSE-NEXT:    pslld $23, %xmm7
+; X32-SSE-NEXT:    paddd %xmm6, %xmm7
+; X32-SSE-NEXT:    cvttps2dq %xmm7, %xmm6
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[0,2,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
+; X32-SSE-NEXT:    punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm5[0]
+; X32-SSE-NEXT:    pmullw %xmm0, %xmm6
+; X32-SSE-NEXT:    por %xmm4, %xmm6
+; X32-SSE-NEXT:    por %xmm1, %xmm6
+; X32-SSE-NEXT:    pcmpeqw %xmm3, %xmm2
+; X32-SSE-NEXT:    pand %xmm2, %xmm0
+; X32-SSE-NEXT:    pandn %xmm6, %xmm2
+; X32-SSE-NEXT:    por %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v16i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm5
+; SSE2-NEXT:    psllw $5, %xmm5
+; SSE2-NEXT:    pxor %xmm4, %xmm4
+; SSE2-NEXT:    pxor %xmm6, %xmm6
+; SSE2-NEXT:    pcmpgtb %xmm5, %xmm6
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    psllw $4, %xmm3
+; SSE2-NEXT:    pand %xmm6, %xmm3
+; SSE2-NEXT:    pandn %xmm0, %xmm6
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm3
+; SSE2-NEXT:    por %xmm6, %xmm3
+; SSE2-NEXT:    paddb %xmm5, %xmm5
+; SSE2-NEXT:    pxor %xmm6, %xmm6
+; SSE2-NEXT:    pcmpgtb %xmm5, %xmm6
+; SSE2-NEXT:    movdqa %xmm6, %xmm7
+; SSE2-NEXT:    pandn %xmm3, %xmm7
+; SSE2-NEXT:    psllw $2, %xmm3
+; SSE2-NEXT:    pand %xmm6, %xmm3
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm3
+; SSE2-NEXT:    por %xmm7, %xmm3
+; SSE2-NEXT:    paddb %xmm5, %xmm5
+; SSE2-NEXT:    pxor %xmm6, %xmm6
+; SSE2-NEXT:    pcmpgtb %xmm5, %xmm6
+; SSE2-NEXT:    movdqa %xmm6, %xmm8
+; SSE2-NEXT:    pandn %xmm3, %xmm8
+; SSE2-NEXT:    paddb %xmm3, %xmm3
+; SSE2-NEXT:    pand %xmm6, %xmm3
+; SSE2-NEXT:    movdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE2-NEXT:    psubb %xmm2, %xmm6
+; SSE2-NEXT:    psllw $5, %xmm6
+; SSE2-NEXT:    pxor %xmm7, %xmm7
+; SSE2-NEXT:    pcmpgtb %xmm6, %xmm7
+; SSE2-NEXT:    movdqa %xmm7, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm5
+; SSE2-NEXT:    psrlw $4, %xmm1
+; SSE2-NEXT:    pand %xmm7, %xmm1
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    por %xmm5, %xmm1
+; SSE2-NEXT:    paddb %xmm6, %xmm6
+; SSE2-NEXT:    pxor %xmm5, %xmm5
+; SSE2-NEXT:    pcmpgtb %xmm6, %xmm5
+; SSE2-NEXT:    movdqa %xmm5, %xmm7
+; SSE2-NEXT:    pandn %xmm1, %xmm7
+; SSE2-NEXT:    psrlw $2, %xmm1
+; SSE2-NEXT:    pand %xmm5, %xmm1
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    por %xmm7, %xmm1
+; SSE2-NEXT:    paddb %xmm6, %xmm6
+; SSE2-NEXT:    pcmpeqb %xmm4, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm6, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm5
+; SSE2-NEXT:    psrlw $1, %xmm1
+; SSE2-NEXT:    pand %xmm4, %xmm1
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    por %xmm5, %xmm1
+; SSE2-NEXT:    por %xmm8, %xmm1
+; SSE2-NEXT:    por %xmm3, %xmm1
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    pandn %xmm1, %xmm2
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v16i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE41-NEXT:    psubb %xmm2, %xmm4
+; SSE41-NEXT:    pxor %xmm5, %xmm5
+; SSE41-NEXT:    pcmpeqb %xmm2, %xmm5
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    psllw $5, %xmm0
+; SSE41-NEXT:    movdqa %xmm3, %xmm6
+; SSE41-NEXT:    psllw $4, %xmm6
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm6
+; SSE41-NEXT:    movdqa %xmm3, %xmm2
+; SSE41-NEXT:    pblendvb %xmm0, %xmm6, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm6
+; SSE41-NEXT:    psllw $2, %xmm6
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm6
+; SSE41-NEXT:    paddb %xmm0, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm6, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm6
+; SSE41-NEXT:    paddb %xmm2, %xmm6
+; SSE41-NEXT:    paddb %xmm0, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm6, %xmm2
+; SSE41-NEXT:    psllw $5, %xmm4
+; SSE41-NEXT:    movdqa %xmm4, %xmm6
+; SSE41-NEXT:    paddb %xmm4, %xmm6
+; SSE41-NEXT:    movdqa %xmm1, %xmm7
+; SSE41-NEXT:    psrlw $4, %xmm7
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm7
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm7, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm4
+; SSE41-NEXT:    psrlw $2, %xmm4
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE41-NEXT:    movdqa %xmm6, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm4, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm4
+; SSE41-NEXT:    psrlw $1, %xmm4
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE41-NEXT:    paddb %xmm6, %xmm6
+; SSE41-NEXT:    movdqa %xmm6, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm4, %xmm1
+; SSE41-NEXT:    por %xmm1, %xmm2
+; SSE41-NEXT:    movdqa %xmm5, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: var_funnnel_v16i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:    vpsllw $5, %xmm2, %xmm3
+; AVX-NEXT:    vpsllw $4, %xmm0, %xmm4
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm4, %xmm4
+; AVX-NEXT:    vpblendvb %xmm3, %xmm4, %xmm0, %xmm4
+; AVX-NEXT:    vpsllw $2, %xmm4, %xmm5
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm5, %xmm5
+; AVX-NEXT:    vpaddb %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
+; AVX-NEXT:    vpaddb %xmm4, %xmm4, %xmm5
+; AVX-NEXT:    vpaddb %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpblendvb %xmm3, %xmm5, %xmm4, %xmm3
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX-NEXT:    vpsllw $5, %xmm4, %xmm4
+; AVX-NEXT:    vpaddb %xmm4, %xmm4, %xmm5
+; AVX-NEXT:    vpsrlw $4, %xmm1, %xmm6
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm6, %xmm6
+; AVX-NEXT:    vpblendvb %xmm4, %xmm6, %xmm1, %xmm1
+; AVX-NEXT:    vpsrlw $2, %xmm1, %xmm4
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm4, %xmm4
+; AVX-NEXT:    vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
+; AVX-NEXT:    vpsrlw $1, %xmm1, %xmm4
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm4, %xmm4
+; AVX-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
+; AVX-NEXT:    vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
+; AVX-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512F-NEXT:    vpsllvd %zmm4, %zmm3, %zmm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero,xmm4[4],zero,zero,zero,xmm4[5],zero,zero,zero,xmm4[6],zero,zero,zero,xmm4[7],zero,zero,zero,xmm4[8],zero,zero,zero,xmm4[9],zero,zero,zero,xmm4[10],zero,zero,zero,xmm4[11],zero,zero,zero,xmm4[12],zero,zero,zero,xmm4[13],zero,zero,zero,xmm4[14],zero,zero,zero,xmm4[15],zero,zero,zero
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512F-NEXT:    vpsrlvd %zmm4, %zmm1, %zmm1
+; AVX512F-NEXT:    vpord %zmm1, %zmm3, %zmm1
+; AVX512F-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsllvd %zmm4, %zmm3, %zmm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero,xmm4[4],zero,zero,zero,xmm4[5],zero,zero,zero,xmm4[6],zero,zero,zero,xmm4[7],zero,zero,zero,xmm4[8],zero,zero,zero,xmm4[9],zero,zero,zero,xmm4[10],zero,zero,zero,xmm4[11],zero,zero,zero,xmm4[12],zero,zero,zero,xmm4[13],zero,zero,zero,xmm4[14],zero,zero,zero,xmm4[15],zero,zero,zero
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd %zmm4, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpord %zmm1, %zmm3, %zmm1
+; AVX512VL-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v16i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %xmm4, %xmm2, %xmm5
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512BW-NEXT:    vpsllvw %zmm6, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT:    vpsubb %xmm5, %xmm6, %xmm5
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm5, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT:    vptestnmb %zmm4, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v16i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VBMI2-NEXT:    vpand %xmm4, %xmm2, %xmm5
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512VBMI2-NEXT:    vpsllvw %zmm6, %zmm3, %zmm3
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VBMI2-NEXT:    vpsubb %xmm5, %xmm6, %xmm5
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm5, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512VBMI2-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VBMI2-NEXT:    vptestnmb %zmm4, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v16i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLBW-NEXT:    vpsllvw %ymm5, %ymm6, %ymm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT:    vpsrlvw %ymm4, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VLBW-NEXT:    vpmovwb %ymm1, %xmm1
+; AVX512VLBW-NEXT:    vptestnmb %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %xmm0, %xmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v16i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLVBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLVBMI2-NEXT:    vpsllvw %ymm5, %ymm6, %ymm5
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLVBMI2-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLVBMI2-NEXT:    vpsrlvw %ymm4, %ymm1, %ymm1
+; AVX512VLVBMI2-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VLVBMI2-NEXT:    vpmovwb %ymm1, %xmm1
+; AVX512VLVBMI2-NEXT:    vptestnmb %xmm3, %xmm2, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %xmm0, %xmm1 {%k1}
+; AVX512VLVBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    vzeroupper
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOP-LABEL: var_funnnel_v16i8:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOP-NEXT:    vpshlb %xmm2, %xmm0, %xmm3
+; XOP-NEXT:    vpsubb {{.*}}(%rip), %xmm2, %xmm4
+; XOP-NEXT:    vpshlb %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; XOP-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOP-NEXT:    vpcomeqb %xmm3, %xmm2, %xmm2
+; XOP-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm4
+; X32-SSE-NEXT:    psllw $5, %xmm4
+; X32-SSE-NEXT:    pxor %xmm5, %xmm5
+; X32-SSE-NEXT:    pcmpgtb %xmm4, %xmm5
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm3
+; X32-SSE-NEXT:    psllw $4, %xmm3
+; X32-SSE-NEXT:    pand %xmm5, %xmm3
+; X32-SSE-NEXT:    pandn %xmm0, %xmm5
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm3
+; X32-SSE-NEXT:    por %xmm5, %xmm3
+; X32-SSE-NEXT:    paddb %xmm4, %xmm4
+; X32-SSE-NEXT:    pxor %xmm5, %xmm5
+; X32-SSE-NEXT:    pcmpgtb %xmm4, %xmm5
+; X32-SSE-NEXT:    movdqa %xmm5, %xmm6
+; X32-SSE-NEXT:    pandn %xmm3, %xmm6
+; X32-SSE-NEXT:    psllw $2, %xmm3
+; X32-SSE-NEXT:    pand %xmm5, %xmm3
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm3
+; X32-SSE-NEXT:    por %xmm6, %xmm3
+; X32-SSE-NEXT:    paddb %xmm4, %xmm4
+; X32-SSE-NEXT:    pxor %xmm5, %xmm5
+; X32-SSE-NEXT:    pcmpgtb %xmm4, %xmm5
+; X32-SSE-NEXT:    movdqa %xmm5, %xmm4
+; X32-SSE-NEXT:    pandn %xmm3, %xmm4
+; X32-SSE-NEXT:    paddb %xmm3, %xmm3
+; X32-SSE-NEXT:    pand %xmm5, %xmm3
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; X32-SSE-NEXT:    psubb %xmm2, %xmm5
+; X32-SSE-NEXT:    psllw $5, %xmm5
+; X32-SSE-NEXT:    pxor %xmm6, %xmm6
+; X32-SSE-NEXT:    pcmpgtb %xmm5, %xmm6
+; X32-SSE-NEXT:    movdqa %xmm6, %xmm7
+; X32-SSE-NEXT:    pandn %xmm1, %xmm7
+; X32-SSE-NEXT:    psrlw $4, %xmm1
+; X32-SSE-NEXT:    pand %xmm6, %xmm1
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    por %xmm7, %xmm1
+; X32-SSE-NEXT:    paddb %xmm5, %xmm5
+; X32-SSE-NEXT:    pxor %xmm6, %xmm6
+; X32-SSE-NEXT:    pcmpgtb %xmm5, %xmm6
+; X32-SSE-NEXT:    movdqa %xmm6, %xmm7
+; X32-SSE-NEXT:    pandn %xmm1, %xmm7
+; X32-SSE-NEXT:    psrlw $2, %xmm1
+; X32-SSE-NEXT:    pand %xmm6, %xmm1
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    por %xmm7, %xmm1
+; X32-SSE-NEXT:    pxor %xmm6, %xmm6
+; X32-SSE-NEXT:    paddb %xmm5, %xmm5
+; X32-SSE-NEXT:    pcmpeqb %xmm6, %xmm2
+; X32-SSE-NEXT:    pcmpgtb %xmm5, %xmm6
+; X32-SSE-NEXT:    movdqa %xmm6, %xmm5
+; X32-SSE-NEXT:    pandn %xmm1, %xmm5
+; X32-SSE-NEXT:    psrlw $1, %xmm1
+; X32-SSE-NEXT:    pand %xmm6, %xmm1
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    por %xmm5, %xmm1
+; X32-SSE-NEXT:    por %xmm4, %xmm1
+; X32-SSE-NEXT:    por %xmm3, %xmm1
+; X32-SSE-NEXT:    pand %xmm2, %xmm0
+; X32-SSE-NEXT:    pandn %xmm1, %xmm2
+; X32-SSE-NEXT:    por %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
+  ret <16 x i8> %res
+}
+
+;
+; Uniform Variable Shifts
+;
+
+define <2 x i64> @splatvar_funnnel_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %amt) nounwind {
+; SSE2-LABEL: splatvar_funnnel_v2i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    psllq %xmm2, %xmm3
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [64,64]
+; SSE2-NEXT:    psubq %xmm2, %xmm4
+; SSE2-NEXT:    psrlq %xmm4, %xmm1
+; SSE2-NEXT:    por %xmm3, %xmm1
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    pcmpeqd %xmm2, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,0,3,2]
+; SSE2-NEXT:    pand %xmm3, %xmm2
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    pandn %xmm1, %xmm2
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: splatvar_funnnel_v2i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,1,0,1]
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    psllq %xmm4, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [64,64]
+; SSE41-NEXT:    psubq %xmm4, %xmm0
+; SSE41-NEXT:    psrlq %xmm0, %xmm1
+; SSE41-NEXT:    por %xmm1, %xmm2
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pcmpeqq %xmm4, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm2
+; SSE41-NEXT:    movapd %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v2i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpsllq %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; AVX1-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlq %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v2i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq %xmm2, %xmm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpsllq %xmm2, %xmm0, %xmm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; AVX2-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpsrlq %xmm4, %xmm1, %xmm1
+; AVX2-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastq %xmm2, %xmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512F-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512F-NEXT:    vpsllq %xmm4, %xmm0, %xmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512F-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpsrlq %xmm4, %xmm1, %xmm1
+; AVX512F-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512F-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq %xmm2, %xmm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512VL-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VL-NEXT:    vpsllq %xmm4, %xmm0, %xmm5
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VL-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpsrlq %xmm4, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VL-NEXT:    vptestnmq %xmm3, %xmm2, %k1
+; AVX512VL-NEXT:    vmovdqa64 %xmm0, %xmm1 {%k1}
+; AVX512VL-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastq %xmm2, %xmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512BW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512BW-NEXT:    vpsllq %xmm4, %xmm0, %xmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512BW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpsrlq %xmm4, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512BW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v2i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512VBMI2-NEXT:    vpbroadcastq %xmm2, %xmm2
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512VBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VBMI2-NEXT:    vpsllq %xmm4, %xmm0, %xmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VBMI2-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpsrlq %xmm4, %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VBMI2-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastq %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpsllq %xmm4, %xmm0, %xmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VLBW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpsrlq %xmm4, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VLBW-NEXT:    vptestnmq %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa64 %xmm0, %xmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v2i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastq %xmm2, %xmm2
+; AVX512VLVBMI2-NEXT:    vpshldvq %xmm2, %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v2i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
+; XOPAVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsllq %xmm2, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; XOPAVX1-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsrlq %xmm4, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqq %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v2i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastq %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsllq %xmm2, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; XOPAVX2-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpsrlq %xmm4, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcomeqq %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm3
+; X32-SSE-NEXT:    psllq %xmm2, %xmm3
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm5
+; X32-SSE-NEXT:    psllq %xmm4, %xmm5
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm5 = xmm3[0],xmm5[1]
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm3 = [64,0,64,0]
+; X32-SSE-NEXT:    psubq %xmm2, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm4
+; X32-SSE-NEXT:    psrlq %xmm3, %xmm4
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; X32-SSE-NEXT:    psrlq %xmm3, %xmm1
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm4[0],xmm1[1]
+; X32-SSE-NEXT:    orpd %xmm5, %xmm1
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    pcmpeqd %xmm2, %xmm3
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,0,3,2]
+; X32-SSE-NEXT:    pand %xmm3, %xmm2
+; X32-SSE-NEXT:    pand %xmm2, %xmm0
+; X32-SSE-NEXT:    pandn %xmm1, %xmm2
+; X32-SSE-NEXT:    por %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <2 x i64> %amt, <2 x i64> undef, <2 x i32> zeroinitializer
+  %res = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %splat)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %amt) nounwind {
+; SSE2-LABEL: splatvar_funnnel_v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    xorps %xmm4, %xmm4
+; SSE2-NEXT:    movss {{.*#+}} xmm4 = xmm2[0],xmm4[1,2,3]
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pslld %xmm4, %xmm5
+; SSE2-NEXT:    movd %xmm2, %eax
+; SSE2-NEXT:    movl $32, %ecx
+; SSE2-NEXT:    subl %eax, %ecx
+; SSE2-NEXT:    movd %ecx, %xmm4
+; SSE2-NEXT:    psrld %xmm4, %xmm1
+; SSE2-NEXT:    por %xmm5, %xmm1
+; SSE2-NEXT:    pcmpeqd %xmm3, %xmm2
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    pandn %xmm1, %xmm2
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: splatvar_funnnel_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,0,0]
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero
+; SSE41-NEXT:    movdqa %xmm3, %xmm2
+; SSE41-NEXT:    pslld %xmm0, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [32,32,32,32]
+; SSE41-NEXT:    psubd %xmm4, %xmm0
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT:    psrld %xmm0, %xmm1
+; SSE41-NEXT:    por %xmm1, %xmm2
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm0
+; SSE41-NEXT:    blendvps %xmm0, %xmm3, %xmm2
+; SSE41-NEXT:    movaps %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT:    vpslld %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [32,32,32,32]
+; AVX1-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX1-NEXT:    vpsrld %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX2-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; AVX2-NEXT:    vpslld %xmm3, %xmm0, %xmm3
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX2-NEXT:    vpsrld %xmm4, %xmm1, %xmm1
+; AVX2-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512F-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512F-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512F-NEXT:    vpslld %xmm5, %xmm0, %xmm5
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512F-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512F-NEXT:    vpsrld %xmm4, %xmm1, %xmm1
+; AVX512F-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512F-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512VL-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VL-NEXT:    vpslld %xmm5, %xmm0, %xmm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VL-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VL-NEXT:    vpsrld %xmm4, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VL-NEXT:    vptestnmd %xmm3, %xmm2, %k1
+; AVX512VL-NEXT:    vmovdqa32 %xmm0, %xmm1 {%k1}
+; AVX512VL-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512BW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512BW-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512BW-NEXT:    vpslld %xmm5, %xmm0, %xmm5
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512BW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512BW-NEXT:    vpsrld %xmm4, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512BW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v4i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512VBMI2-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512VBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VBMI2-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VBMI2-NEXT:    vpslld %xmm5, %xmm0, %xmm5
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VBMI2-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VBMI2-NEXT:    vpsrld %xmm4, %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VBMI2-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VLBW-NEXT:    vpslld %xmm5, %xmm0, %xmm5
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VLBW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VLBW-NEXT:    vpsrld %xmm4, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VLBW-NEXT:    vptestnmd %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa32 %xmm0, %xmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v4i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX512VLVBMI2-NEXT:    vpshldvd %xmm2, %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v4i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; XOPAVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; XOPAVX1-NEXT:    vpslld %xmm3, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [32,32,32,32]
+; XOPAVX1-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; XOPAVX1-NEXT:    vpsrld %xmm4, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqd %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v4i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastd %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; XOPAVX2-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; XOPAVX2-NEXT:    vpslld %xmm3, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
+; XOPAVX2-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; XOPAVX2-NEXT:    vpsrld %xmm4, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcomeqd %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    xorps %xmm4, %xmm4
+; X32-SSE-NEXT:    movss {{.*#+}} xmm4 = xmm2[0],xmm4[1,2,3]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm5
+; X32-SSE-NEXT:    pslld %xmm4, %xmm5
+; X32-SSE-NEXT:    movd %xmm2, %eax
+; X32-SSE-NEXT:    movl $32, %ecx
+; X32-SSE-NEXT:    subl %eax, %ecx
+; X32-SSE-NEXT:    movd %ecx, %xmm4
+; X32-SSE-NEXT:    psrld %xmm4, %xmm1
+; X32-SSE-NEXT:    por %xmm5, %xmm1
+; X32-SSE-NEXT:    pcmpeqd %xmm3, %xmm2
+; X32-SSE-NEXT:    pand %xmm2, %xmm0
+; X32-SSE-NEXT:    pandn %xmm1, %xmm2
+; X32-SSE-NEXT:    por %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <4 x i32> %amt, <4 x i32> undef, <4 x i32> zeroinitializer
+  %res = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %splat)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @splatvar_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt) nounwind {
+; SSE2-LABEL: splatvar_funnnel_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; SSE2-NEXT:    psubw %xmm2, %xmm3
+; SSE2-NEXT:    pxor %xmm4, %xmm4
+; SSE2-NEXT:    pcmpeqw %xmm2, %xmm4
+; SSE2-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    psllw %xmm2, %xmm5
+; SSE2-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    psrlw %xmm3, %xmm1
+; SSE2-NEXT:    por %xmm5, %xmm1
+; SSE2-NEXT:    pand %xmm4, %xmm0
+; SSE2-NEXT:    pandn %xmm1, %xmm4
+; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: splatvar_funnnel_v8i16:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pshuflw {{.*#+}} xmm0 = xmm2[0,0,2,3,4,5,6,7]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,0,0]
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; SSE41-NEXT:    movdqa %xmm3, %xmm2
+; SSE41-NEXT:    psllw %xmm0, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [16,16,16,16,16,16,16,16]
+; SSE41-NEXT:    psubw %xmm4, %xmm0
+; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT:    psrlw %xmm0, %xmm1
+; SSE41-NEXT:    por %xmm1, %xmm2
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pcmpeqw %xmm4, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v8i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX1-NEXT:    vpsllw %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX1-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm3, %xmm0, %xmm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm4, %xmm1, %xmm1
+; AVX2-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm3, %xmm0, %xmm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm4, %xmm1, %xmm1
+; AVX512F-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm3, %xmm0, %xmm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm4, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v8i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsllw %xmm5, %xmm0, %xmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsrlw %xmm4, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512BW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v8i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512VBMI2-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX512VBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VBMI2-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VBMI2-NEXT:    vpsllw %xmm5, %xmm0, %xmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512VBMI2-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VBMI2-NEXT:    vpsrlw %xmm4, %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VBMI2-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v8i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsllw %xmm5, %xmm0, %xmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsrlw %xmm4, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512VLBW-NEXT:    vptestnmw %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu16 %xmm0, %xmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v8i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX512VLVBMI2-NEXT:    vpshldvw %xmm2, %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v8i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; XOPAVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; XOPAVX1-NEXT:    vpsllw %xmm3, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; XOPAVX1-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; XOPAVX1-NEXT:    vpsrlw %xmm4, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqw %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v8i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastw %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; XOPAVX2-NEXT:    vpsllw %xmm3, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; XOPAVX2-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; XOPAVX2-NEXT:    vpsrlw %xmm4, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcomeqw %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; X32-SSE-NEXT:    psubw %xmm2, %xmm3
+; X32-SSE-NEXT:    pxor %xmm4, %xmm4
+; X32-SSE-NEXT:    pcmpeqw %xmm2, %xmm4
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm5
+; X32-SSE-NEXT:    psllw %xmm2, %xmm5
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    psrlw %xmm3, %xmm1
+; X32-SSE-NEXT:    por %xmm5, %xmm1
+; X32-SSE-NEXT:    pand %xmm4, %xmm0
+; X32-SSE-NEXT:    pandn %xmm1, %xmm4
+; X32-SSE-NEXT:    por %xmm4, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <8 x i16> %amt, <8 x i16> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %splat)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt) nounwind {
+; SSE2-LABEL: splatvar_funnnel_v16i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE2-NEXT:    psubb %xmm2, %xmm3
+; SSE2-NEXT:    pxor %xmm4, %xmm4
+; SSE2-NEXT:    pcmpeqb %xmm2, %xmm4
+; SSE2-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    psllw %xmm2, %xmm5
+; SSE2-NEXT:    pcmpeqd %xmm6, %xmm6
+; SSE2-NEXT:    psllw %xmm2, %xmm6
+; SSE2-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,0,0,0]
+; SSE2-NEXT:    pand %xmm5, %xmm6
+; SSE2-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    psrlw %xmm3, %xmm1
+; SSE2-NEXT:    psrlw %xmm3, %xmm2
+; SSE2-NEXT:    psrlw $8, %xmm2
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE2-NEXT:    pand %xmm1, %xmm2
+; SSE2-NEXT:    por %xmm6, %xmm2
+; SSE2-NEXT:    pand %xmm4, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm4
+; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: splatvar_funnnel_v16i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pshufb %xmm0, %xmm2
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm5 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT:    movdqa %xmm3, %xmm4
+; SSE41-NEXT:    psllw %xmm5, %xmm4
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm6
+; SSE41-NEXT:    pcmpeqd %xmm7, %xmm7
+; SSE41-NEXT:    psllw %xmm5, %xmm7
+; SSE41-NEXT:    pshufb %xmm0, %xmm7
+; SSE41-NEXT:    pand %xmm7, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE41-NEXT:    psubb %xmm2, %xmm5
+; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,zero,zero,zero,zero,xmm5[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT:    psrlw %xmm5, %xmm1
+; SSE41-NEXT:    psrlw %xmm5, %xmm6
+; SSE41-NEXT:    pshufb {{.*#+}} xmm6 = xmm6[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; SSE41-NEXT:    pand %xmm1, %xmm6
+; SSE41-NEXT:    por %xmm6, %xmm4
+; SSE41-NEXT:    pcmpeqb %xmm2, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm4
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v16i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsllw %xmm4, %xmm0, %xmm5
+; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpsllw %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpshufb %xmm3, %xmm4, %xmm4
+; AVX1-NEXT:    vpand %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX1-NEXT:    vpsubb %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,zero,zero,zero,zero,xmm5[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpand %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v16i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm3, %xmm0, %xmm4
+; AVX2-NEXT:    vpcmpeqd %xmm5, %xmm5, %xmm5
+; AVX2-NEXT:    vpsllw %xmm3, %xmm5, %xmm3
+; AVX2-NEXT:    vpbroadcastb %xmm3, %xmm3
+; AVX2-NEXT:    vpand %xmm3, %xmm4, %xmm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX2-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm4, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrlw %xmm4, %xmm5, %xmm4
+; AVX2-NEXT:    vpsrlw $8, %xmm4, %xmm4
+; AVX2-NEXT:    vpbroadcastb %xmm4, %xmm4
+; AVX2-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX2-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512F-NEXT:    vpsllvd %zmm4, %zmm3, %zmm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero,xmm4[4],zero,zero,zero,xmm4[5],zero,zero,zero,xmm4[6],zero,zero,zero,xmm4[7],zero,zero,zero,xmm4[8],zero,zero,zero,xmm4[9],zero,zero,zero,xmm4[10],zero,zero,zero,xmm4[11],zero,zero,zero,xmm4[12],zero,zero,zero,xmm4[13],zero,zero,zero,xmm4[14],zero,zero,zero,xmm4[15],zero,zero,zero
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512F-NEXT:    vpsrlvd %zmm4, %zmm1, %zmm1
+; AVX512F-NEXT:    vpord %zmm1, %zmm3, %zmm1
+; AVX512F-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsllvd %zmm4, %zmm3, %zmm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero,xmm4[4],zero,zero,zero,xmm4[5],zero,zero,zero,xmm4[6],zero,zero,zero,xmm4[7],zero,zero,zero,xmm4[8],zero,zero,zero,xmm4[9],zero,zero,zero,xmm4[10],zero,zero,zero,xmm4[11],zero,zero,zero,xmm4[12],zero,zero,zero,xmm4[13],zero,zero,zero,xmm4[14],zero,zero,zero,xmm4[15],zero,zero,zero
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd %zmm4, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpord %zmm1, %zmm3, %zmm1
+; AVX512VL-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v16i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %xmm4, %xmm2, %xmm5
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512BW-NEXT:    vpsllvw %zmm6, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT:    vpsubb %xmm5, %xmm6, %xmm5
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm5, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT:    vptestnmb %zmm4, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v16i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512VBMI2-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VBMI2-NEXT:    vpand %xmm4, %xmm2, %xmm5
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512VBMI2-NEXT:    vpsllvw %zmm6, %zmm3, %zmm3
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VBMI2-NEXT:    vpsubb %xmm5, %xmm6, %xmm5
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm5, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512VBMI2-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VBMI2-NEXT:    vptestnmb %zmm4, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v16i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLBW-NEXT:    vpsllvw %ymm5, %ymm6, %ymm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT:    vpsrlvw %ymm4, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VLBW-NEXT:    vpmovwb %ymm1, %xmm1
+; AVX512VLBW-NEXT:    vptestnmb %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %xmm0, %xmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v16i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLVBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLVBMI2-NEXT:    vpsllvw %ymm5, %ymm6, %ymm5
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLVBMI2-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLVBMI2-NEXT:    vpsrlvw %ymm4, %ymm1, %ymm1
+; AVX512VLVBMI2-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VLVBMI2-NEXT:    vpmovwb %ymm1, %xmm1
+; AVX512VLVBMI2-NEXT:    vptestnmb %xmm3, %xmm2, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %xmm0, %xmm1 {%k1}
+; AVX512VLVBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    vzeroupper
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v16i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlb %xmm2, %xmm0, %xmm4
+; XOPAVX1-NEXT:    vpsubb {{.*}}(%rip), %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpshlb %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpor %xmm1, %xmm4, %xmm1
+; XOPAVX1-NEXT:    vpcomeqb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v16i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastb %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpshlb %xmm2, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vpsubb {{.*}}(%rip), %xmm2, %xmm4
+; XOPAVX2-NEXT:    vpshlb %xmm4, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcomeqb %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; X32-SSE-NEXT:    psubb %xmm2, %xmm3
+; X32-SSE-NEXT:    pxor %xmm4, %xmm4
+; X32-SSE-NEXT:    pcmpeqb %xmm2, %xmm4
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm5
+; X32-SSE-NEXT:    psllw %xmm2, %xmm5
+; X32-SSE-NEXT:    pcmpeqd %xmm6, %xmm6
+; X32-SSE-NEXT:    psllw %xmm2, %xmm6
+; X32-SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,0,0,0]
+; X32-SSE-NEXT:    pand %xmm5, %xmm6
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    psrlw %xmm3, %xmm1
+; X32-SSE-NEXT:    psrlw %xmm3, %xmm2
+; X32-SSE-NEXT:    psrlw $8, %xmm2
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; X32-SSE-NEXT:    pand %xmm1, %xmm2
+; X32-SSE-NEXT:    por %xmm6, %xmm2
+; X32-SSE-NEXT:    pand %xmm4, %xmm0
+; X32-SSE-NEXT:    pandn %xmm2, %xmm4
+; X32-SSE-NEXT:    por %xmm4, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <16 x i8> %amt, <16 x i8> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %splat)
+  ret <16 x i8> %res
+}
+
+;
+; Constant Shifts
+;
+
+define <2 x i64> @constant_funnnel_v2i64(<2 x i64> %x, <2 x i64> %y) nounwind {
+; SSE2-LABEL: constant_funnnel_v2i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psrlq $60, %xmm2
+; SSE2-NEXT:    psrlq $50, %xmm1
+; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    psllq $4, %xmm2
+; SSE2-NEXT:    psllq $14, %xmm0
+; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
+; SSE2-NEXT:    orpd %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: constant_funnnel_v2i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm1, %xmm2
+; SSE41-NEXT:    psrlq $50, %xmm2
+; SSE41-NEXT:    psrlq $60, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    psllq $14, %xmm2
+; SSE41-NEXT:    psllq $4, %xmm0
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: constant_funnnel_v2i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrlq $50, %xmm1, %xmm2
+; AVX1-NEXT:    vpsrlq $60, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpsllq $14, %xmm0, %xmm2
+; AVX1-NEXT:    vpsllq $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v2i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
+; AVX512BW-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v2i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v2i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvq {{.*}}(%rip), %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v2i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshlq {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpshlq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v2i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm2
+; X32-SSE-NEXT:    psrlq $60, %xmm2
+; X32-SSE-NEXT:    psrlq $50, %xmm1
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    psllq $4, %xmm2
+; X32-SSE-NEXT:    psllq $14, %xmm0
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
+; X32-SSE-NEXT:    orpd %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> <i64 4, i64 14>)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
+; SSE2-LABEL: constant_funnnel_v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psrld $25, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    psrld $26, %xmm3
+; SSE2-NEXT:    punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psrld $27, %xmm2
+; SSE2-NEXT:    psrld $28, %xmm1
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [16,32,64,128]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm3, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: constant_funnnel_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm1, %xmm2
+; SSE41-NEXT:    psrld $25, %xmm2
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    psrld $27, %xmm3
+; SSE41-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm1, %xmm2
+; SSE41-NEXT:    psrld $26, %xmm2
+; SSE41-NEXT:    psrld $28, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: constant_funnnel_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrld $25, %xmm1, %xmm2
+; AVX1-NEXT:    vpsrld $27, %xmm1, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpsrld $26, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrld $28, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX512BW-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v4i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v4i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvd {{.*}}(%rip), %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v4i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshld {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpshld {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v4i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm2
+; X32-SSE-NEXT:    psrld $25, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm3
+; X32-SSE-NEXT:    psrld $26, %xmm3
+; X32-SSE-NEXT:    punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm2
+; X32-SSE-NEXT:    psrld $27, %xmm2
+; X32-SSE-NEXT:    psrld $28, %xmm1
+; X32-SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X32-SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [16,32,64,128]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm2, %xmm0
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm3, %xmm2
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; X32-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 4, i32 5, i32 6, i32 7>)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
+; SSE2-LABEL: constant_funnnel_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
+; SSE2-NEXT:    pmulhuw %xmm2, %xmm1
+; SSE2-NEXT:    pmullw %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm1, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535]
+; SSE2-NEXT:    pand %xmm1, %xmm2
+; SSE2-NEXT:    pandn %xmm0, %xmm1
+; SSE2-NEXT:    por %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: constant_funnnel_v8i16:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
+; SSE41-NEXT:    pmulhuw %xmm2, %xmm1
+; SSE41-NEXT:    pmullw %xmm0, %xmm2
+; SSE41-NEXT:    por %xmm1, %xmm2
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3,4,5,6,7]
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: constant_funnnel_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
+; AVX-NEXT:    vpmulhuw %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpmullw %xmm2, %xmm0, %xmm2
+; AVX-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
+; AVX512F-NEXT:    vpmulhuw %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpmullw %xmm2, %xmm0, %xmm2
+; AVX512F-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
+; AVX512VL-NEXT:    vpmulhuw %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmullw %xmm2, %xmm0, %xmm2
+; AVX512VL-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX512VL-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v8i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [16,15,14,13,12,11,10,9]
+; AVX512BW-NEXT:    vpsrlvw %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7]
+; AVX512BW-NEXT:    vpsllvw %zmm2, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v8i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm2 = [16,15,14,13,12,11,10,9]
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm2, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7]
+; AVX512VBMI2-NEXT:    vpsllvw %zmm2, %zmm0, %zmm2
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX512VBMI2-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v8i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %xmm0, %xmm2
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX512VLBW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v8i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvw {{.*}}(%rip), %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOP-LABEL: constant_funnnel_v8i16:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpshlw {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT:    vpshlw {{.*}}(%rip), %xmm0, %xmm2
+; XOP-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; XOP-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
+; X32-SSE-NEXT:    pmulhuw %xmm2, %xmm1
+; X32-SSE-NEXT:    pmullw %xmm0, %xmm2
+; X32-SSE-NEXT:    por %xmm1, %xmm2
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535]
+; X32-SSE-NEXT:    pand %xmm1, %xmm2
+; X32-SSE-NEXT:    pandn %xmm0, %xmm1
+; X32-SSE-NEXT:    por %xmm2, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
+; SSE2-LABEL: constant_funnnel_v16i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [1,128,64,32,16,8,4,2]
+; SSE2-NEXT:    pmullw %xmm4, %xmm3
+; SSE2-NEXT:    psrlw $8, %xmm3
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128]
+; SSE2-NEXT:    pmullw %xmm5, %xmm1
+; SSE2-NEXT:    psrlw $8, %xmm1
+; SSE2-NEXT:    packuswb %xmm3, %xmm1
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-NEXT:    pmullw %xmm4, %xmm3
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm4, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT:    pmullw %xmm5, %xmm2
+; SSE2-NEXT:    pand %xmm4, %xmm2
+; SSE2-NEXT:    packuswb %xmm3, %xmm2
+; SSE2-NEXT:    por %xmm1, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm1, %xmm2
+; SSE2-NEXT:    pandn %xmm0, %xmm1
+; SSE2-NEXT:    por %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: constant_funnnel_v16i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [1,128,64,32,16,8,4,2]
+; SSE41-NEXT:    pmullw %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
+; SSE41-NEXT:    pmullw %xmm4, %xmm3
+; SSE41-NEXT:    psrlw $8, %xmm3
+; SSE41-NEXT:    packuswb %xmm1, %xmm3
+; SSE41-NEXT:    movdqa %xmm2, %xmm1
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE41-NEXT:    pmullw %xmm0, %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT:    pand %xmm0, %xmm1
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm5 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; SSE41-NEXT:    pmullw %xmm4, %xmm5
+; SSE41-NEXT:    pand %xmm0, %xmm5
+; SSE41-NEXT:    packuswb %xmm1, %xmm5
+; SSE41-NEXT:    por %xmm3, %xmm5
+; SSE41-NEXT:    movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; SSE41-NEXT:    pblendvb %xmm0, %xmm5, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: constant_funnnel_v16i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,128,64,32,16,8,4,2]
+; AVX1-NEXT:    vpmullw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vpmullw %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $8, %xmm1, %xmm1
+; AVX1-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT:    vpmullw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpand %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpackuswb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v16i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,128,64,32,16,8,4,2]
+; AVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT:    vpackuswb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT:    vpmullw %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT:    vpackuswb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512F-NEXT:    vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpsllvd {{.*}}(%rip), %zmm2, %zmm2
+; AVX512F-NEXT:    vpord %zmm1, %zmm2, %zmm1
+; AVX512F-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsllvd {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VL-NEXT:    vpord %zmm1, %zmm2, %zmm1
+; AVX512VL-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512VL-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v16i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [8,7,6,5,4,3,2,1,8,1,2,3,4,5,6,7]
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1]
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT:    vpsllvw %zmm2, %zmm3, %zmm2
+; AVX512BW-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512BW-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v16i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm2 = [8,7,6,5,4,3,2,1,8,1,2,3,4,5,6,7]
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm2, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1]
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VBMI2-NEXT:    vpsllvw %zmm2, %zmm3, %zmm2
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512VBMI2-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512VBMI2-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v16i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512VLBW-NEXT:    vpmovwb %ymm1, %xmm1
+; AVX512VLBW-NEXT:    movw $257, %ax # imm = 0x101
+; AVX512VLBW-NEXT:    kmovd %eax, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %xmm0, %xmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v16i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLVBMI2-NEXT:    vpsrlvw {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLVBMI2-NEXT:    vpsllvw {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VLVBMI2-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512VLVBMI2-NEXT:    vpmovwb %ymm1, %xmm1
+; AVX512VLVBMI2-NEXT:    movw $257, %ax # imm = 0x101
+; AVX512VLVBMI2-NEXT:    kmovd %eax, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %xmm0, %xmm1 {%k1}
+; AVX512VLVBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    vzeroupper
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOP-LABEL: constant_funnnel_v16i8:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpshlb {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT:    vpshlb {{.*}}(%rip), %xmm0, %xmm2
+; XOP-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; XOP-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm3
+; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [1,128,64,32,16,8,4,2]
+; X32-SSE-NEXT:    pmullw %xmm4, %xmm3
+; X32-SSE-NEXT:    psrlw $8, %xmm3
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128]
+; X32-SSE-NEXT:    pmullw %xmm5, %xmm1
+; X32-SSE-NEXT:    psrlw $8, %xmm1
+; X32-SSE-NEXT:    packuswb %xmm3, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm3
+; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; X32-SSE-NEXT:    pmullw %xmm4, %xmm3
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; X32-SSE-NEXT:    pand %xmm4, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; X32-SSE-NEXT:    pmullw %xmm5, %xmm2
+; X32-SSE-NEXT:    pand %xmm4, %xmm2
+; X32-SSE-NEXT:    packuswb %xmm3, %xmm2
+; X32-SSE-NEXT:    por %xmm1, %xmm2
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; X32-SSE-NEXT:    pand %xmm1, %xmm2
+; X32-SSE-NEXT:    pandn %xmm0, %xmm1
+; X32-SSE-NEXT:    por %xmm1, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
+  ret <16 x i8> %res
+}
+
+;
+; Uniform Constant Shifts
+;
+
+define <2 x i64> @splatconstant_funnnel_v2i64(<2 x i64> %x, <2 x i64> %y) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v2i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlq $50, %xmm1
+; SSE-NEXT:    psllq $14, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlq $50, %xmm1, %xmm1
+; AVX-NEXT:    vpsllq $14, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlq $50, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsllq $14, %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlq $50, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsllq $14, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlq $50, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpsllq $14, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v2i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlq $50, %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpsllq $14, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlq $50, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsllq $14, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v2i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldq $14, %xmm1, %xmm0, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v2i64:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpsrlq $50, %xmm1, %xmm1
+; XOP-NEXT:    vpsllq $14, %xmm0, %xmm0
+; XOP-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    psrlq $50, %xmm1
+; X32-SSE-NEXT:    psllq $14, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> <i64 14, i64 14>)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @splatconstant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrld $28, %xmm1
+; SSE-NEXT:    pslld $4, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrld $28, %xmm1, %xmm1
+; AVX-NEXT:    vpslld $4, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrld $28, %xmm1, %xmm1
+; AVX512F-NEXT:    vpslld $4, %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrld $28, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpslld $4, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrld $28, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpslld $4, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v4i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrld $28, %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpslld $4, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrld $28, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpslld $4, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v4i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldd $4, %xmm1, %xmm0, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v4i32:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpsrld $28, %xmm1, %xmm1
+; XOP-NEXT:    vpslld $4, %xmm0, %xmm0
+; XOP-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    psrld $28, %xmm1
+; X32-SSE-NEXT:    pslld $4, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 4, i32 4, i32 4, i32 4>)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @splatconstant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlw $9, %xmm1
+; SSE-NEXT:    psllw $7, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $9, %xmm1, %xmm1
+; AVX-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $9, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $9, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v8i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlw $9, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v8i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlw $9, %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v8i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlw $9, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v8i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldw $7, %xmm1, %xmm0, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v8i16:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpsrlw $9, %xmm1, %xmm1
+; XOP-NEXT:    vpsllw $7, %xmm0, %xmm0
+; XOP-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    psrlw $9, %xmm1
+; X32-SSE-NEXT:    psllw $7, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @splatconstant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v16i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlw $4, %xmm1
+; SSE-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE-NEXT:    psllw $4, %xmm0
+; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v16i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_funnnel_v16i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v16i8:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpshlb {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT:    vpshlb {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    psrlw $4, %xmm1
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    psllw $4, %xmm0
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
+  ret <16 x i8> %res
+}
diff --git a/test/CodeGen/X86/vector-fshl-256.ll b/test/CodeGen/X86/vector-fshl-256.ll
new file mode 100644
index 0000000..8aeae70
--- /dev/null
+++ b/test/CodeGen/X86/vector-fshl-256.ll
@@ -0,0 +1,2566 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefixes=AVX512,AVX512VBMI2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVBMI2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2
+
+declare <4 x i64> @llvm.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>)
+declare <8 x i32> @llvm.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <16 x i16> @llvm.fshl.v16i16(<16 x i16>, <16 x i16>, <16 x i16>)
+declare <32 x i8> @llvm.fshl.v32i8(<32 x i8>, <32 x i8>, <32 x i8>)
+
+;
+; Variable Shifts
+;
+
+define <4 x i64> @var_funnnel_v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vpsllq %xmm4, %xmm3, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm4[2,3,0,1]
+; AVX1-NEXT:    vpsllq %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpsllq %xmm2, %xmm0, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[2,3,0,1]
+; AVX1-NEXT:    vpsllq %xmm6, %xmm0, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [64,64]
+; AVX1-NEXT:    vpsubq %xmm4, %xmm8, %xmm6
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT:    vpsrlq %xmm6, %xmm7, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
+; AVX1-NEXT:    vpsrlq %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4,5,6,7]
+; AVX1-NEXT:    vpsubq %xmm2, %xmm8, %xmm6
+; AVX1-NEXT:    vpsrlq %xmm6, %xmm1, %xmm7
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
+; AVX1-NEXT:    vpsrlq %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm7[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; AVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpsllvq %ymm2, %ymm0, %ymm3
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [64,64,64,64]
+; AVX2-NEXT:    vpsubq %ymm2, %ymm4, %ymm4
+; AVX2-NEXT:    vpsrlvq %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT:    vpsllvq %ymm4, %ymm0, %ymm5
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [64,64,64,64]
+; AVX512F-NEXT:    vpsubq %ymm4, %ymm6, %ymm4
+; AVX512F-NEXT:    vpsrlvq %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512F-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpsllvq %ymm4, %ymm0, %ymm5
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [64,64,64,64]
+; AVX512VL-NEXT:    vpsubq %ymm4, %ymm6, %ymm4
+; AVX512VL-NEXT:    vpsrlvq %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VL-NEXT:    vptestnmq %ymm3, %ymm2, %k1
+; AVX512VL-NEXT:    vmovdqa64 %ymm0, %ymm1 {%k1}
+; AVX512VL-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpsllvq %ymm4, %ymm0, %ymm5
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [64,64,64,64]
+; AVX512BW-NEXT:    vpsubq %ymm4, %ymm6, %ymm4
+; AVX512BW-NEXT:    vpsrlvq %ymm4, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512BW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v4i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512VBMI2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpsllvq %ymm4, %ymm0, %ymm5
+; AVX512VBMI2-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [64,64,64,64]
+; AVX512VBMI2-NEXT:    vpsubq %ymm4, %ymm6, %ymm4
+; AVX512VBMI2-NEXT:    vpsrlvq %ymm4, %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VBMI2-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpsllvq %ymm4, %ymm0, %ymm5
+; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [64,64,64,64]
+; AVX512VLBW-NEXT:    vpsubq %ymm4, %ymm6, %ymm4
+; AVX512VLBW-NEXT:    vpsrlvq %ymm4, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VLBW-NEXT:    vptestnmq %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa64 %ymm0, %ymm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v4i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvq %ymm2, %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vpshlq %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpshlq %xmm2, %xmm0, %xmm5
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [64,64]
+; XOPAVX1-NEXT:    vpsubq %xmm5, %xmm4, %xmm6
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; XOPAVX1-NEXT:    vpshlq %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpsubq %xmm5, %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpshlq %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; XOPAVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqq %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpcomeqq %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; XOPAVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpsllvq %ymm2, %ymm0, %ymm3
+; XOPAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [64,64,64,64]
+; XOPAVX2-NEXT:    vpsubq %ymm2, %ymm4, %ymm4
+; XOPAVX2-NEXT:    vpsrlvq %ymm4, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqq %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %amt)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [32,32,32,32]
+; AVX1-NEXT:    vpsubd %xmm3, %xmm8, %xmm6
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm6[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT:    vpsrld %xmm4, %xmm7, %xmm4
+; AVX1-NEXT:    vpsrlq $32, %xmm6, %xmm5
+; AVX1-NEXT:    vpsrld %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpxor %xmm9, %xmm9, %xmm9
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm4 = xmm6[2],xmm9[2],xmm6[3],xmm9[3]
+; AVX1-NEXT:    vpsrld %xmm4, %xmm7, %xmm4
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero
+; AVX1-NEXT:    vpsrld %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; AVX1-NEXT:    vpsubd %xmm2, %xmm8, %xmm5
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm5[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrld %xmm6, %xmm1, %xmm6
+; AVX1-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; AVX1-NEXT:    vpsrld %xmm7, %xmm1, %xmm7
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4,5,6,7]
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
+; AVX1-NEXT:    vpsrld %xmm7, %xmm1, %xmm7
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero
+; AVX1-NEXT:    vpsrld %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm6[2,3],xmm1[4,5],xmm6[6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; AVX1-NEXT:    vpslld $23, %xmm3, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT:    vpaddd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vcvttps2dq %xmm4, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpmulld %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpslld $23, %xmm2, %xmm6
+; AVX1-NEXT:    vpaddd %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vcvttps2dq %xmm5, %xmm5
+; AVX1-NEXT:    vpmulld %xmm5, %xmm0, %xmm5
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
+; AVX1-NEXT:    vorps %ymm1, %ymm4, %ymm1
+; AVX1-NEXT:    vpcmpeqd %xmm9, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqd %xmm9, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vblendvps %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpsllvd %ymm2, %ymm0, %ymm3
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm4 = [32,32,32,32,32,32,32,32]
+; AVX2-NEXT:    vpsubd %ymm2, %ymm4, %ymm4
+; AVX2-NEXT:    vpsrlvd %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vblendvps %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT:    vpsllvd %ymm4, %ymm0, %ymm5
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [32,32,32,32,32,32,32,32]
+; AVX512F-NEXT:    vpsubd %ymm4, %ymm6, %ymm4
+; AVX512F-NEXT:    vpsrlvd %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512F-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpsllvd %ymm4, %ymm0, %ymm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [32,32,32,32,32,32,32,32]
+; AVX512VL-NEXT:    vpsubd %ymm4, %ymm6, %ymm4
+; AVX512VL-NEXT:    vpsrlvd %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VL-NEXT:    vptestnmd %ymm3, %ymm2, %k1
+; AVX512VL-NEXT:    vmovdqa32 %ymm0, %ymm1 {%k1}
+; AVX512VL-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpsllvd %ymm4, %ymm0, %ymm5
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [32,32,32,32,32,32,32,32]
+; AVX512BW-NEXT:    vpsubd %ymm4, %ymm6, %ymm4
+; AVX512BW-NEXT:    vpsrlvd %ymm4, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512BW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v8i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpsllvd %ymm4, %ymm0, %ymm5
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [32,32,32,32,32,32,32,32]
+; AVX512VBMI2-NEXT:    vpsubd %ymm4, %ymm6, %ymm4
+; AVX512VBMI2-NEXT:    vpsrlvd %ymm4, %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VBMI2-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpsllvd %ymm4, %ymm0, %ymm5
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [32,32,32,32,32,32,32,32]
+; AVX512VLBW-NEXT:    vpsubd %ymm4, %ymm6, %ymm4
+; AVX512VLBW-NEXT:    vpsrlvd %ymm4, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VLBW-NEXT:    vptestnmd %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa32 %ymm0, %ymm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v8i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvd %ymm2, %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vpshld %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpshld %xmm2, %xmm0, %xmm5
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [32,32,32,32]
+; XOPAVX1-NEXT:    vpsubd %xmm5, %xmm4, %xmm6
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; XOPAVX1-NEXT:    vpshld %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpsubd %xmm5, %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpshld %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; XOPAVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqd %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpcomeqd %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vblendvps %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; XOPAVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpsllvd %ymm2, %ymm0, %ymm3
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} ymm4 = [32,32,32,32,32,32,32,32]
+; XOPAVX2-NEXT:    vpsubd %ymm2, %ymm4, %ymm4
+; XOPAVX2-NEXT:    vpsrlvd %ymm4, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqd %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vblendvps %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %amt)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [16,16,16,16,16,16,16,16]
+; AVX1-NEXT:    vpsubw %xmm3, %xmm8, %xmm5
+; AVX1-NEXT:    vpsllw $12, %xmm5, %xmm6
+; AVX1-NEXT:    vpsllw $4, %xmm5, %xmm5
+; AVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpaddw %xmm5, %xmm5, %xmm6
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT:    vpsrlw $8, %xmm7, %xmm4
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm4, %xmm7, %xmm4
+; AVX1-NEXT:    vpsrlw $4, %xmm4, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw $2, %xmm4, %xmm5
+; AVX1-NEXT:    vpaddw %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw $1, %xmm4, %xmm5
+; AVX1-NEXT:    vpaddw %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpsubw %xmm2, %xmm8, %xmm5
+; AVX1-NEXT:    vpsllw $12, %xmm5, %xmm6
+; AVX1-NEXT:    vpsllw $4, %xmm5, %xmm5
+; AVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpaddw %xmm5, %xmm5, %xmm6
+; AVX1-NEXT:    vpsrlw $8, %xmm1, %xmm7
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $2, %xmm1, %xmm5
+; AVX1-NEXT:    vpaddw %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm5
+; AVX1-NEXT:    vpaddw %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm8
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; AVX1-NEXT:    vpslld $23, %xmm5, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT:    vpaddd %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vcvttps2dq %xmm5, %xmm5
+; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm7 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX1-NEXT:    vpslld $23, %xmm7, %xmm7
+; AVX1-NEXT:    vpaddd %xmm6, %xmm7, %xmm7
+; AVX1-NEXT:    vcvttps2dq %xmm7, %xmm7
+; AVX1-NEXT:    vpackusdw %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT:    vpmullw %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX1-NEXT:    vpslld $23, %xmm7, %xmm7
+; AVX1-NEXT:    vpaddd %xmm6, %xmm7, %xmm7
+; AVX1-NEXT:    vcvttps2dq %xmm7, %xmm7
+; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX1-NEXT:    vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vpmullw %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; AVX1-NEXT:    vorps %ymm8, %ymm1, %ymm1
+; AVX1-NEXT:    vpcmpeqw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vandnps %ymm1, %ymm2, %ymm1
+; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15]
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15]
+; AVX2-NEXT:    vpsllvd %ymm5, %ymm4, %ymm4
+; AVX2-NEXT:    vpsrld $16, %ymm4, %ymm4
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
+; AVX2-NEXT:    vpsllvd %ymm6, %ymm5, %ymm5
+; AVX2-NEXT:    vpsrld $16, %ymm5, %ymm5
+; AVX2-NEXT:    vpackusdw %ymm4, %ymm5, %ymm4
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm3[4],ymm1[4],ymm3[5],ymm1[5],ymm3[6],ymm1[6],ymm3[7],ymm1[7],ymm3[12],ymm1[12],ymm3[13],ymm1[13],ymm3[14],ymm1[14],ymm3[15],ymm1[15]
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %ymm2, %ymm6, %ymm6
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} ymm7 = ymm6[4],ymm3[4],ymm6[5],ymm3[5],ymm6[6],ymm3[6],ymm6[7],ymm3[7],ymm6[12],ymm3[12],ymm6[13],ymm3[13],ymm6[14],ymm3[14],ymm6[15],ymm3[15]
+; AVX2-NEXT:    vpsrlvd %ymm7, %ymm5, %ymm5
+; AVX2-NEXT:    vpsrld $16, %ymm5, %ymm5
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[8],ymm1[8],ymm3[9],ymm1[9],ymm3[10],ymm1[10],ymm3[11],ymm1[11]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm6[0],ymm3[0],ymm6[1],ymm3[1],ymm6[2],ymm3[2],ymm6[3],ymm3[3],ymm6[8],ymm3[8],ymm6[9],ymm3[9],ymm6[10],ymm3[10],ymm6[11],ymm3[11]
+; AVX2-NEXT:    vpsrlvd %ymm6, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrld $16, %ymm1, %ymm1
+; AVX2-NEXT:    vpackusdw %ymm5, %ymm1, %ymm1
+; AVX2-NEXT:    vpor %ymm1, %ymm4, %ymm1
+; AVX2-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v16i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm4 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpsllvd %zmm4, %zmm3, %zmm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %ymm2, %ymm4, %ymm4
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT:    vpsrlvd %zmm4, %zmm1, %zmm1
+; AVX512F-NEXT:    vpord %zmm1, %zmm3, %zmm1
+; AVX512F-NEXT:    vpmovdw %zmm1, %ymm1
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm4 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT:    vpsllvd %zmm4, %zmm3, %zmm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %ymm2, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT:    vpsrlvd %zmm4, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpord %zmm1, %zmm3, %zmm1
+; AVX512VL-NEXT:    vpmovdw %zmm1, %ymm1
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v16i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpsllvw %zmm4, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %ymm4, %ymm6, %ymm4
+; AVX512BW-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512BW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v16i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpsllvw %zmm4, %zmm0, %zmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VBMI2-NEXT:    vpsubw %ymm4, %ymm6, %ymm4
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VBMI2-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v16i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpsllvw %ymm4, %ymm0, %ymm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %ymm4, %ymm6, %ymm4
+; AVX512VLBW-NEXT:    vpsrlvw %ymm4, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VLBW-NEXT:    vptestnmw %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu16 %ymm0, %ymm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v16i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvw %ymm2, %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vpshlw %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpshlw %xmm2, %xmm0, %xmm5
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; XOPAVX1-NEXT:    vpsubw %xmm5, %xmm4, %xmm6
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; XOPAVX1-NEXT:    vpshlw %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpsubw %xmm5, %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpshlw %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; XOPAVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqw %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpcomeqw %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vpcmov %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; XOPAVX2-NEXT:    vpshlw %xmm4, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpshlw %xmm2, %xmm0, %xmm4
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; XOPAVX2-NEXT:    vpsubw %ymm2, %ymm4, %ymm4
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; XOPAVX2-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; XOPAVX2-NEXT:    vpsubw %xmm5, %xmm6, %xmm5
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm7
+; XOPAVX2-NEXT:    vpshlw %xmm5, %xmm7, %xmm5
+; XOPAVX2-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; XOPAVX2-NEXT:    vpshlw %xmm4, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm5, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %amt)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpsrlw $4, %xmm4, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm8, %xmm3, %xmm6
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm9 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX1-NEXT:    vpsubb %xmm3, %xmm9, %xmm5
+; AVX1-NEXT:    vpsllw $5, %xmm5, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw $2, %xmm4, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX1-NEXT:    vpand %xmm10, %xmm6, %xmm6
+; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw $1, %xmm4, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT:    vpand %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm5
+; AVX1-NEXT:    vpand %xmm8, %xmm5, %xmm5
+; AVX1-NEXT:    vpsubb %xmm2, %xmm9, %xmm6
+; AVX1-NEXT:    vpsllw $5, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $2, %xmm1, %xmm5
+; AVX1-NEXT:    vpand %xmm10, %xmm5, %xmm5
+; AVX1-NEXT:    vpaddb %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm5
+; AVX1-NEXT:    vpand %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpaddb %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpsllw $4, %xmm4, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT:    vpand %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpsllw $5, %xmm3, %xmm7
+; AVX1-NEXT:    vpblendvb %xmm7, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpsllw $2, %xmm4, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm5
+; AVX1-NEXT:    vpaddb %xmm7, %xmm7, %xmm7
+; AVX1-NEXT:    vpblendvb %xmm7, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpaddb %xmm4, %xmm4, %xmm5
+; AVX1-NEXT:    vpaddb %xmm7, %xmm7, %xmm7
+; AVX1-NEXT:    vpblendvb %xmm7, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm5
+; AVX1-NEXT:    vpand %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpsllw $5, %xmm2, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm0, %xmm5
+; AVX1-NEXT:    vpsllw $2, %xmm5, %xmm7
+; AVX1-NEXT:    vpand %xmm1, %xmm7, %xmm1
+; AVX1-NEXT:    vpaddb %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm5
+; AVX1-NEXT:    vpaddb %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; AVX1-NEXT:    vorps %ymm8, %ymm1, %ymm1
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpeqb %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vandnps %ymm1, %ymm2, %ymm1
+; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpsllw $5, %ymm2, %ymm3
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm4
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm4, %ymm0, %ymm4
+; AVX2-NEXT:    vpsllw $2, %ymm4, %ymm5
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm5, %ymm5
+; AVX2-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX2-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
+; AVX2-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX2-NEXT:    vpsubb %ymm2, %ymm4, %ymm4
+; AVX2-NEXT:    vpsllw $5, %ymm4, %ymm4
+; AVX2-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
+; AVX2-NEXT:    vpsrlw $4, %ymm1, %ymm6
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm6, %ymm6
+; AVX2-NEXT:    vpblendvb %ymm4, %ymm6, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $2, %ymm1, %ymm4
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX2-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $1, %ymm1, %ymm4
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX2-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
+; AVX2-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v32i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $5, %ymm2, %ymm3
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm4
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm4, %ymm0, %ymm4
+; AVX512F-NEXT:    vpsllw $2, %ymm4, %ymm5
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm5, %ymm5
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %ymm2, %ymm4, %ymm4
+; AVX512F-NEXT:    vpsllw $5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
+; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm6
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm6, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $2, %ymm1, %ymm4
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm4
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
+; AVX512F-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $5, %ymm2, %ymm3
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm4, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpsllw $2, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm5, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %ymm2, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpsllw $5, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm6
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm6, %ymm6
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm6, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $2, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $1, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpsllvw %zmm5, %zmm6, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v32i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VBMI2-NEXT:    vpsllvw %zmm5, %zmm6, %zmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VBMI2-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VBMI2-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VBMI2-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v32i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLBW-NEXT:    vpsllvw %zmm5, %zmm6, %zmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VLBW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VLBW-NEXT:    vptestnmb %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %ymm0, %ymm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v32i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLVBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLVBMI2-NEXT:    vpsllvw %zmm5, %zmm6, %zmm5
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLVBMI2-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLVBMI2-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm1
+; AVX512VLVBMI2-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VLVBMI2-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VLVBMI2-NEXT:    vptestnmb %ymm3, %ymm2, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %ymm0, %ymm1 {%k1}
+; AVX512VLVBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vpshlb %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpshlb %xmm2, %xmm0, %xmm5
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; XOPAVX1-NEXT:    vpsubb %xmm5, %xmm4, %xmm6
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpsubb %xmm5, %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpshlb %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; XOPAVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqb %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpcomeqb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vpcmov %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; XOPAVX2-NEXT:    vpshlb %xmm4, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpshlb %xmm2, %xmm0, %xmm4
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; XOPAVX2-NEXT:    vpsubb %ymm2, %ymm4, %ymm4
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; XOPAVX2-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; XOPAVX2-NEXT:    vpsubb %xmm5, %xmm6, %xmm5
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm7
+; XOPAVX2-NEXT:    vpshlb %xmm5, %xmm7, %xmm5
+; XOPAVX2-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; XOPAVX2-NEXT:    vpshlb %xmm4, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm5, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt)
+  ret <32 x i8> %res
+}
+
+;
+; Uniform Variable Shifts
+;
+
+define <4 x i64> @splatvar_funnnel_v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovddup {{.*#+}} xmm2 = xmm2[0,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpsllq %xmm2, %xmm3, %xmm3
+; AVX1-NEXT:    vpsllq %xmm2, %xmm0, %xmm4
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [64,64]
+; AVX1-NEXT:    vpsubq %xmm4, %xmm5, %xmm6
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT:    vpsrlq %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vpsubq %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpsrlq %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
+; AVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpsllq %xmm2, %ymm0, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; AVX2-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpsrlq %xmm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT:    vpsllq %xmm4, %ymm0, %ymm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512F-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpsrlq %xmm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512F-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpsllq %xmm4, %ymm0, %ymm5
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VL-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpsrlq %xmm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VL-NEXT:    vptestnmq %ymm3, %ymm2, %k1
+; AVX512VL-NEXT:    vmovdqa64 %ymm0, %ymm1 {%k1}
+; AVX512VL-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpsllq %xmm4, %ymm0, %ymm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512BW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpsrlq %xmm4, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512BW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v4i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512VBMI2-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX512VBMI2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpsllq %xmm4, %ymm0, %ymm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VBMI2-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpsrlq %xmm4, %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VBMI2-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpsllq %xmm4, %ymm0, %ymm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VLBW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpsrlq %xmm4, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VLBW-NEXT:    vptestnmq %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa64 %ymm0, %ymm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v4i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX512VLVBMI2-NEXT:    vpshldvq %ymm2, %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovddup {{.*#+}} xmm2 = xmm2[0,0]
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vpsllq %xmm2, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpsllq %xmm2, %xmm0, %xmm4
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [64,64]
+; XOPAVX1-NEXT:    vpsubq %xmm4, %xmm5, %xmm6
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; XOPAVX1-NEXT:    vpsrlq %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpsubq %xmm2, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpsrlq %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; XOPAVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqq %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpcomeqq %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastq %xmm2, %ymm2
+; XOPAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; XOPAVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpsllq %xmm2, %ymm0, %ymm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; XOPAVX2-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpsrlq %xmm4, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqq %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <4 x i64> %amt, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %splat)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpslld %xmm3, %xmm4, %xmm4
+; AVX1-NEXT:    vpslld %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [32,32,32,32]
+; AVX1-NEXT:    vpsubd %xmm4, %xmm5, %xmm6
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT:    vpsrld %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vpsubd %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero
+; AVX1-NEXT:    vpsrld %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
+; AVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; AVX1-NEXT:    vblendvps %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd %xmm2, %ymm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; AVX2-NEXT:    vpslld %xmm3, %ymm0, %ymm3
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX2-NEXT:    vpsrld %xmm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vblendvps %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastd %xmm2, %ymm2
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512F-NEXT:    vpslld %xmm5, %ymm0, %ymm5
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512F-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512F-NEXT:    vpsrld %xmm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512F-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd %xmm2, %ymm2
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VL-NEXT:    vpslld %xmm5, %ymm0, %ymm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VL-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VL-NEXT:    vpsrld %xmm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VL-NEXT:    vptestnmd %ymm3, %ymm2, %k1
+; AVX512VL-NEXT:    vmovdqa32 %ymm0, %ymm1 {%k1}
+; AVX512VL-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastd %xmm2, %ymm2
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512BW-NEXT:    vpslld %xmm5, %ymm0, %ymm5
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512BW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512BW-NEXT:    vpsrld %xmm4, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512BW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v8i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512VBMI2-NEXT:    vpbroadcastd %xmm2, %ymm2
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VBMI2-NEXT:    vpslld %xmm5, %ymm0, %ymm5
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VBMI2-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VBMI2-NEXT:    vpsrld %xmm4, %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VBMI2-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd %xmm2, %ymm2
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VLBW-NEXT:    vpslld %xmm5, %ymm0, %ymm5
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VLBW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VLBW-NEXT:    vpsrld %xmm4, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VLBW-NEXT:    vptestnmd %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa32 %ymm0, %ymm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v8i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastd %xmm2, %ymm2
+; AVX512VLVBMI2-NEXT:    vpshldvd %ymm2, %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT:    vpslld %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpslld %xmm3, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [32,32,32,32]
+; XOPAVX1-NEXT:    vpsubd %xmm4, %xmm5, %xmm6
+; XOPAVX1-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; XOPAVX1-NEXT:    vpsrld %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpsubd %xmm2, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero
+; XOPAVX1-NEXT:    vpsrld %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; XOPAVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqd %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpcomeqd %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vblendvps %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastd %xmm2, %ymm2
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; XOPAVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; XOPAVX2-NEXT:    vpslld %xmm3, %ymm0, %ymm3
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
+; XOPAVX2-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; XOPAVX2-NEXT:    vpsrld %xmm4, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqd %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vblendvps %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <8 x i32> %amt, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %splat)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpsllw %xmm3, %xmm4, %xmm4
+; AVX1-NEXT:    vpsllw %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; AVX1-NEXT:    vpsubw %xmm4, %xmm5, %xmm6
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT:    vpsrlw %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vpsubw %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
+; AVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; AVX1-NEXT:    vandnps %ymm1, %ymm2, %ymm1
+; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm3, %ymm0, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v16i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm0, %ymm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm0, %ymm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v16i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsllw %xmm5, %ymm0, %ymm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsrlw %xmm4, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512BW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v16i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512VBMI2-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VBMI2-NEXT:    vpsllw %xmm5, %ymm0, %ymm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512VBMI2-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VBMI2-NEXT:    vpsrlw %xmm4, %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VBMI2-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v16i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsllw %xmm5, %ymm0, %ymm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsrlw %xmm4, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512VLBW-NEXT:    vptestnmw %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu16 %ymm0, %ymm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v16i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512VLVBMI2-NEXT:    vpshldvw %ymm2, %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT:    vpsllw %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsllw %xmm3, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; XOPAVX1-NEXT:    vpsubw %xmm4, %xmm5, %xmm6
+; XOPAVX1-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; XOPAVX1-NEXT:    vpsrlw %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpsubw %xmm2, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
+; XOPAVX1-NEXT:    vpsrlw %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; XOPAVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqw %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpcomeqw %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vpcmov %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastw %xmm2, %ymm2
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; XOPAVX2-NEXT:    vpsllw %xmm3, %ymm0, %ymm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; XOPAVX2-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; XOPAVX2-NEXT:    vpsrlw %xmm4, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <16 x i16> %amt, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %splat)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm8, %xmm8, %xmm8
+; AVX1-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpsllw %xmm4, %xmm5, %xmm5
+; AVX1-NEXT:    vpcmpeqd %xmm9, %xmm9, %xmm9
+; AVX1-NEXT:    vpsllw %xmm4, %xmm9, %xmm7
+; AVX1-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
+; AVX1-NEXT:    vpand %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpsllw %xmm4, %xmm0, %xmm4
+; AVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm10 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX1-NEXT:    vpsubb %xmm5, %xmm10, %xmm3
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT:    vpsrlw %xmm3, %xmm6, %xmm6
+; AVX1-NEXT:    vpsrlw %xmm3, %xmm9, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpshufb %xmm7, %xmm3, %xmm3
+; AVX1-NEXT:    vpand %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vpsubb %xmm2, %xmm10, %xmm6
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,zero,zero,zero,zero,xmm6[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw %xmm6, %xmm9, %xmm6
+; AVX1-NEXT:    vpshufb %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vpand %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT:    vorps %ymm1, %ymm4, %ymm1
+; AVX1-NEXT:    vpcmpeqb %xmm8, %xmm5, %xmm3
+; AVX1-NEXT:    vpcmpeqb %xmm8, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vandnps %ymm1, %ymm2, %ymm1
+; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm3, %ymm0, %ymm4
+; AVX2-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
+; AVX2-NEXT:    vpsllw %xmm3, %ymm5, %ymm3
+; AVX2-NEXT:    vpbroadcastb %xmm3, %ymm3
+; AVX2-NEXT:    vpand %ymm3, %ymm4, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX2-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw %xmm4, %ymm5, %ymm4
+; AVX2-NEXT:    vpsrlw $8, %ymm4, %ymm4
+; AVX2-NEXT:    vpbroadcastb %xmm4, %ymm4
+; AVX2-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v32i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm0, %ymm4
+; AVX512F-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm5, %ymm3
+; AVX512F-NEXT:    vpbroadcastb %xmm3, %ymm3
+; AVX512F-NEXT:    vpand %ymm3, %ymm4, %ymm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw %xmm4, %ymm5, %ymm4
+; AVX512F-NEXT:    vpsrlw $8, %ymm4, %ymm4
+; AVX512F-NEXT:    vpbroadcastb %xmm4, %ymm4
+; AVX512F-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm5, %ymm3
+; AVX512VL-NEXT:    vpbroadcastb %xmm3, %ymm3
+; AVX512VL-NEXT:    vpand %ymm3, %ymm4, %ymm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw %xmm4, %ymm5, %ymm4
+; AVX512VL-NEXT:    vpsrlw $8, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpbroadcastb %xmm4, %ymm4
+; AVX512VL-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpsllvw %zmm5, %zmm6, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v32i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512VBMI2-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VBMI2-NEXT:    vpsllvw %zmm5, %zmm6, %zmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VBMI2-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VBMI2-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VBMI2-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v32i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLBW-NEXT:    vpsllvw %zmm5, %zmm6, %zmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VLBW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VLBW-NEXT:    vptestnmb %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %ymm0, %ymm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v32i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLVBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLVBMI2-NEXT:    vpsllvw %zmm5, %zmm6, %zmm5
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLVBMI2-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLVBMI2-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm1
+; AVX512VLVBMI2-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VLVBMI2-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VLVBMI2-NEXT:    vptestnmb %ymm3, %ymm2, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %ymm0, %ymm1 {%k1}
+; AVX512VLVBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpxor %xmm8, %xmm8, %xmm8
+; XOPAVX1-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; XOPAVX1-NEXT:    vpshlb %xmm4, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpshlb %xmm2, %xmm0, %xmm6
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm4, %xmm7
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOPAVX1-NEXT:    vpshlb %xmm7, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm2, %xmm6
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm5, %ymm1
+; XOPAVX1-NEXT:    vpcomeqb %xmm8, %xmm4, %xmm3
+; XOPAVX1-NEXT:    vpcomeqb %xmm8, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vpcmov %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastb %xmm2, %ymm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; XOPAVX2-NEXT:    vpshlb %xmm4, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpshlb %xmm2, %xmm0, %xmm4
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; XOPAVX2-NEXT:    vpsubb %ymm2, %ymm4, %ymm4
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; XOPAVX2-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; XOPAVX2-NEXT:    vpsubb %xmm5, %xmm6, %xmm5
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm7
+; XOPAVX2-NEXT:    vpshlb %xmm5, %xmm7, %xmm5
+; XOPAVX2-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; XOPAVX2-NEXT:    vpshlb %xmm4, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm5, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <32 x i8> %amt, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %splat)
+  ret <32 x i8> %res
+}
+
+;
+; Constant Shifts
+;
+
+define <4 x i64> @constant_funnnel_v4i64(<4 x i64> %x, <4 x i64> %y) nounwind {
+; AVX1-LABEL: constant_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpsrlq $4, %xmm2, %xmm3
+; AVX1-NEXT:    vpsrlq $14, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpsrlq $50, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrlq $60, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpsllq $60, %xmm2, %xmm3
+; AVX1-NEXT:    vpsllq $50, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpsllq $14, %xmm0, %xmm3
+; AVX1-NEXT:    vpsllq $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v4i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v4i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvq {{.*}}(%rip), %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshlq {{.*}}(%rip), %xmm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOPAVX1-NEXT:    vpshlq {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; XOPAVX1-NEXT:    vpshlq {{.*}}(%rip), %xmm0, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vpshlq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> <i64 4, i64 14, i64 50, i64 60>)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y) nounwind {
+; AVX1-LABEL: constant_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpsrld $21, %xmm2, %xmm3
+; AVX1-NEXT:    vpsrld $23, %xmm2, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpsrld $22, %xmm2, %xmm4
+; AVX1-NEXT:    vpsrld $24, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpsrld $25, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrld $27, %xmm1, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpsrld $26, %xmm1, %xmm4
+; AVX1-NEXT:    vpsrld $28, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v8i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v8i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvd {{.*}}(%rip), %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshld {{.*}}(%rip), %xmm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOPAVX1-NEXT:    vpshld {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; XOPAVX1-NEXT:    vpshld {{.*}}(%rip), %xmm0, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vpshld {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwind {
+; AVX1-LABEL: constant_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [256,512,1024,2048,4096,8192,16384,32768]
+; AVX1-NEXT:    vpmulhuw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vpmulhuw %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpmullw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpmullw %xmm4, %xmm0, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT:    vorps %ymm1, %ymm2, %ymm1
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm2 = [0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT:    vandnps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX2-NEXT:    vpmulhuw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpmullw %ymm2, %ymm0, %ymm2
+; AVX2-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v16i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX512F-NEXT:    vpmulhuw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX512VL-NEXT:    vpmulhuw %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpmullw %ymm2, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v16i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
+; AVX512BW-NEXT:    vpsrlvw %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512BW-NEXT:    vpsllvw %zmm2, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v16i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm2 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm2, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512VBMI2-NEXT:    vpsllvw %zmm2, %zmm0, %zmm2
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512VBMI2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
+; AVX512VBMI2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v16i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %ymm0, %ymm2
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512VLBW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
+; AVX512VLBW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v16i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvw {{.*}}(%rip), %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshlw {{.*}}(%rip), %xmm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOPAVX1-NEXT:    vpshlw {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; XOPAVX1-NEXT:    vpshlw {{.*}}(%rip), %xmm0, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vpshlw {{.*}}(%rip), %xmm3, %xmm3
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm2, %ymm1
+; XOPAVX1-NEXT:    vpcmov {{.*}}(%rip), %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; XOPAVX2-NEXT:    vpmulhuw %ymm2, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpmullw %ymm2, %ymm0, %ymm2
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
+; XOPAVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; XOPAVX2-NEXT:    retq
+  %res = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind {
+; AVX1-LABEL: constant_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [1,128,64,32,16,8,4,2]
+; AVX1-NEXT:    vpmullw %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw $8, %xmm4, %xmm4
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vpmullw %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
+; AVX1-NEXT:    vpmullw %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $8, %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT:    vpmullw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $8, %xmm1, %xmm1
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX1-NEXT:    vpmullw %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT:    vpmullw %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT:    vpmullw %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpand %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT:    vorps %ymm1, %ymm2, %ymm1
+; AVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [NaN,NaN,NaN,NaN]
+; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT:    vandnps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm2
+; AVX2-NEXT:    vpsllw $2, %ymm2, %ymm4
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX2-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm4, %ymm2, %ymm2
+; AVX2-NEXT:    vpaddb %ymm2, %ymm2, %ymm4
+; AVX2-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm4, %ymm2, %ymm2
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm1[8],ymm3[8],ymm1[9],ymm3[9],ymm1[10],ymm3[10],ymm1[11],ymm3[11],ymm1[12],ymm3[12],ymm1[13],ymm3[13],ymm1[14],ymm3[14],ymm1[15],ymm3[15],ymm1[24],ymm3[24],ymm1[25],ymm3[25],ymm1[26],ymm3[26],ymm1[27],ymm3[27],ymm1[28],ymm3[28],ymm1[29],ymm3[29],ymm1[30],ymm3[30],ymm1[31],ymm3[31]
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm4, %ymm4
+; AVX2-NEXT:    vpsrlw $8, %ymm4, %ymm4
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[4],ymm3[4],ymm1[5],ymm3[5],ymm1[6],ymm3[6],ymm1[7],ymm3[7],ymm1[16],ymm3[16],ymm1[17],ymm3[17],ymm1[18],ymm3[18],ymm1[19],ymm3[19],ymm1[20],ymm3[20],ymm1[21],ymm3[21],ymm1[22],ymm3[22],ymm1[23],ymm3[23]
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX2-NEXT:    vpackuswb %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v32i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpsllw $2, %ymm2, %ymm4
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm1[8],ymm3[8],ymm1[9],ymm3[9],ymm1[10],ymm3[10],ymm1[11],ymm3[11],ymm1[12],ymm3[12],ymm1[13],ymm3[13],ymm1[14],ymm3[14],ymm1[15],ymm3[15],ymm1[24],ymm3[24],ymm1[25],ymm3[25],ymm1[26],ymm3[26],ymm1[27],ymm3[27],ymm1[28],ymm3[28],ymm1[29],ymm3[29],ymm1[30],ymm3[30],ymm1[31],ymm3[31]
+; AVX512F-NEXT:    vpmullw {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT:    vpsrlw $8, %ymm4, %ymm4
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[4],ymm3[4],ymm1[5],ymm3[5],ymm1[6],ymm3[6],ymm1[7],ymm3[7],ymm1[16],ymm3[16],ymm1[17],ymm3[17],ymm1[18],ymm3[18],ymm1[19],ymm3[19],ymm1[20],ymm3[20],ymm1[21],ymm3[21],ymm1[22],ymm3[22],ymm1[23],ymm3[23]
+; AVX512F-NEXT:    vpmullw {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512F-NEXT:    vpackuswb %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm3 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpsllw $2, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpmullw {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpmullw {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512BW-NEXT:    vporq %zmm1, %zmm2, %zmm1
+; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512BW-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v32i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VBMI2-NEXT:    vpsrlvw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VBMI2-NEXT:    vpsllvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VBMI2-NEXT:    vporq %zmm1, %zmm2, %zmm1
+; AVX512VBMI2-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512VBMI2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v32i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm2, %zmm1
+; AVX512VLBW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VLBW-NEXT:    movl $16843009, %eax # imm = 0x1010101
+; AVX512VLBW-NEXT:    kmovd %eax, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %ymm0, %ymm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v32i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLVBMI2-NEXT:    vpsrlvw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLVBMI2-NEXT:    vpsllvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vporq %zmm1, %zmm2, %zmm1
+; AVX512VLVBMI2-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VLVBMI2-NEXT:    movl $16843009, %eax # imm = 0x1010101
+; AVX512VLVBMI2-NEXT:    kmovd %eax, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %ymm0, %ymm1 {%k1}
+; AVX512VLVBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [248,249,250,251,252,253,254,255,248,255,254,253,252,251,250,249]
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1]
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm2, %ymm1
+; XOPAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [NaN,NaN,NaN,NaN]
+; XOPAVX1-NEXT:    vpcmov %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [248,249,250,251,252,253,254,255,248,255,254,253,252,251,250,249]
+; XOPAVX2-NEXT:    vpshlb %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1]
+; XOPAVX2-NEXT:    vpshlb %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpshlb %xmm3, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; XOPAVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
+  ret <32 x i8> %res
+}
+
+;
+; Uniform Constant Shifts
+;
+
+define <4 x i64> @splatconstant_funnnel_v4i64(<4 x i64> %x, <4 x i64> %y) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrlq $50, %xmm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vpsrlq $50, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    vpsllq $14, %xmm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpsllq $14, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlq $50, %ymm1, %ymm1
+; AVX2-NEXT:    vpsllq $14, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlq $50, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsllq $14, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlq $50, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsllq $14, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlq $50, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpsllq $14, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v4i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlq $50, %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpsllq $14, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlq $50, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsllq $14, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v4i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldq $14, %ymm1, %ymm0, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpsrlq $50, %xmm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOPAVX1-NEXT:    vpsrlq $50, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; XOPAVX1-NEXT:    vpsllq $14, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vpsllq $14, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrlq $50, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpsllq $14, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> <i64 14, i64 14, i64 14, i64 14>)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @splatconstant_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrld $28, %xmm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vpsrld $28, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    vpslld $4, %xmm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpslld $4, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrld $28, %ymm1, %ymm1
+; AVX2-NEXT:    vpslld $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrld $28, %ymm1, %ymm1
+; AVX512F-NEXT:    vpslld $4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrld $28, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpslld $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrld $28, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpslld $4, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v8i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrld $28, %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpslld $4, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrld $28, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpslld $4, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v8i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldd $4, %ymm1, %ymm0, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpsrld $28, %xmm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOPAVX1-NEXT:    vpsrld $28, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; XOPAVX1-NEXT:    vpslld $4, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vpslld $4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrld $28, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpslld $4, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @splatconstant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrlw $9, %xmm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vpsrlw $9, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    vpsllw $7, %xmm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlw $9, %ymm1, %ymm1
+; AVX2-NEXT:    vpsllw $7, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v16i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $9, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsllw $7, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $9, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsllw $7, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v16i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlw $9, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpsllw $7, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v16i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlw $9, %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpsllw $7, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v16i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlw $9, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsllw $7, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v16i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldw $7, %ymm1, %ymm0, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpsrlw $9, %xmm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOPAVX1-NEXT:    vpsrlw $9, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; XOPAVX1-NEXT:    vpsllw $7, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vpsllw $7, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrlw $9, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpsllw $7, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @splatconstant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpsrlw $4, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpsllw $4, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_funnnel_v32i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpsllw $4, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
+  ret <32 x i8> %res
+}
diff --git a/test/CodeGen/X86/vector-fshl-512.ll b/test/CodeGen/X86/vector-fshl-512.ll
new file mode 100644
index 0000000..d71ba1c
--- /dev/null
+++ b/test/CodeGen/X86/vector-fshl-512.ll
@@ -0,0 +1,1573 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefixes=AVX512,AVX512VBMI2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVBMI2
+
+declare <8 x i64> @llvm.fshl.v8i64(<8 x i64>, <8 x i64>, <8 x i64>)
+declare <16 x i32> @llvm.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>)
+declare <32 x i16> @llvm.fshl.v32i16(<32 x i16>, <32 x i16>, <32 x i16>)
+declare <64 x i8> @llvm.fshl.v64i8(<64 x i8>, <64 x i8>, <64 x i8>)
+
+;
+; Variable Shifts
+;
+
+define <8 x i64> @var_funnnel_v8i64(<8 x i64> %x, <8 x i64> %y, <8 x i64> %amt) nounwind {
+; AVX512F-LABEL: var_funnnel_v8i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512F-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512F-NEXT:    vpsllvq %zmm4, %zmm0, %zmm5
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} zmm6 = [64,64,64,64,64,64,64,64]
+; AVX512F-NEXT:    vpsubq %zmm4, %zmm6, %zmm4
+; AVX512F-NEXT:    vpsrlvq %zmm4, %zmm1, %zmm1
+; AVX512F-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512F-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v8i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512VL-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VL-NEXT:    vpsllvq %zmm4, %zmm0, %zmm5
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} zmm6 = [64,64,64,64,64,64,64,64]
+; AVX512VL-NEXT:    vpsubq %zmm4, %zmm6, %zmm4
+; AVX512VL-NEXT:    vpsrlvq %zmm4, %zmm1, %zmm1
+; AVX512VL-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VL-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VL-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512VL-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v8i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpsllvq %zmm4, %zmm0, %zmm5
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} zmm6 = [64,64,64,64,64,64,64,64]
+; AVX512BW-NEXT:    vpsubq %zmm4, %zmm6, %zmm4
+; AVX512BW-NEXT:    vpsrlvq %zmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512BW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v8i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshldvq %zmm2, %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v8i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpsllvq %zmm4, %zmm0, %zmm5
+; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} zmm6 = [64,64,64,64,64,64,64,64]
+; AVX512VLBW-NEXT:    vpsubq %zmm4, %zmm6, %zmm4
+; AVX512VLBW-NEXT:    vpsrlvq %zmm4, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VLBW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v8i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvq %zmm2, %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x, <8 x i64> %y, <8 x i64> %amt)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @var_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i32> %amt) nounwind {
+; AVX512F-LABEL: var_funnnel_v16i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512F-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512F-NEXT:    vpsllvd %zmm4, %zmm0, %zmm5
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
+; AVX512F-NEXT:    vpsubd %zmm4, %zmm6, %zmm4
+; AVX512F-NEXT:    vpsrlvd %zmm4, %zmm1, %zmm1
+; AVX512F-NEXT:    vpord %zmm1, %zmm5, %zmm1
+; AVX512F-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v16i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512VL-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512VL-NEXT:    vpsllvd %zmm4, %zmm0, %zmm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
+; AVX512VL-NEXT:    vpsubd %zmm4, %zmm6, %zmm4
+; AVX512VL-NEXT:    vpsrlvd %zmm4, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpord %zmm1, %zmm5, %zmm1
+; AVX512VL-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VL-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512VL-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v16i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512BW-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpsllvd %zmm4, %zmm0, %zmm5
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
+; AVX512BW-NEXT:    vpsubd %zmm4, %zmm6, %zmm4
+; AVX512BW-NEXT:    vpsrlvd %zmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpord %zmm1, %zmm5, %zmm1
+; AVX512BW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v16i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshldvd %zmm2, %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v16i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512VLBW-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpsllvd %zmm4, %zmm0, %zmm5
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
+; AVX512VLBW-NEXT:    vpsubd %zmm4, %zmm6, %zmm4
+; AVX512VLBW-NEXT:    vpsrlvd %zmm4, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpord %zmm1, %zmm5, %zmm1
+; AVX512VLBW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v16i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvd %zmm2, %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i32> %amt)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @var_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i16> %amt) nounwind {
+; AVX512F-LABEL: var_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm7 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm8 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT:    vpsllvd %zmm7, %zmm8, %zmm7
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %ymm4, %ymm8, %ymm9
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm9 = ymm9[0],zero,ymm9[1],zero,ymm9[2],zero,ymm9[3],zero,ymm9[4],zero,ymm9[5],zero,ymm9[6],zero,ymm9[7],zero,ymm9[8],zero,ymm9[9],zero,ymm9[10],zero,ymm9[11],zero,ymm9[12],zero,ymm9[13],zero,ymm9[14],zero,ymm9[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpsrlvd %zmm9, %zmm2, %zmm2
+; AVX512F-NEXT:    vpord %zmm2, %zmm7, %zmm2
+; AVX512F-NEXT:    vpmovdw %zmm2, %ymm2
+; AVX512F-NEXT:    vpxor %xmm7, %xmm7, %xmm7
+; AVX512F-NEXT:    vpcmpeqw %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpand %ymm6, %ymm5, %ymm2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm4 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm5 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT:    vpsllvd %zmm4, %zmm5, %zmm4
+; AVX512F-NEXT:    vpsubw %ymm2, %ymm8, %ymm5
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm5 = ymm5[0],zero,ymm5[1],zero,ymm5[2],zero,ymm5[3],zero,ymm5[4],zero,ymm5[5],zero,ymm5[6],zero,ymm5[7],zero,ymm5[8],zero,ymm5[9],zero,ymm5[10],zero,ymm5[11],zero,ymm5[12],zero,ymm5[13],zero,ymm5[14],zero,ymm5[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
+; AVX512F-NEXT:    vpsrlvd %zmm5, %zmm3, %zmm3
+; AVX512F-NEXT:    vpord %zmm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vpmovdw %zmm3, %ymm3
+; AVX512F-NEXT:    vpcmpeqw %ymm7, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm7 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm8 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT:    vpsllvd %zmm7, %zmm8, %zmm7
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm8 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %ymm4, %ymm8, %ymm9
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm9 = ymm9[0],zero,ymm9[1],zero,ymm9[2],zero,ymm9[3],zero,ymm9[4],zero,ymm9[5],zero,ymm9[6],zero,ymm9[7],zero,ymm9[8],zero,ymm9[9],zero,ymm9[10],zero,ymm9[11],zero,ymm9[12],zero,ymm9[13],zero,ymm9[14],zero,ymm9[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT:    vpsrlvd %zmm9, %zmm2, %zmm2
+; AVX512VL-NEXT:    vpord %zmm2, %zmm7, %zmm2
+; AVX512VL-NEXT:    vpmovdw %zmm2, %ymm2
+; AVX512VL-NEXT:    vpxor %xmm7, %xmm7, %xmm7
+; AVX512VL-NEXT:    vpcmpeqw %ymm7, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpand %ymm6, %ymm5, %ymm2
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm4 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm5 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT:    vpsllvd %zmm4, %zmm5, %zmm4
+; AVX512VL-NEXT:    vpsubw %ymm2, %ymm8, %ymm5
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm5 = ymm5[0],zero,ymm5[1],zero,ymm5[2],zero,ymm5[3],zero,ymm5[4],zero,ymm5[5],zero,ymm5[6],zero,ymm5[7],zero,ymm5[8],zero,ymm5[9],zero,ymm5[10],zero,ymm5[11],zero,ymm5[12],zero,ymm5[13],zero,ymm5[14],zero,ymm5[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
+; AVX512VL-NEXT:    vpsrlvd %zmm5, %zmm3, %zmm3
+; AVX512VL-NEXT:    vpord %zmm3, %zmm4, %zmm3
+; AVX512VL-NEXT:    vpmovdw %zmm3, %ymm3
+; AVX512VL-NEXT:    vpcmpeqw %ymm7, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpsllvw %zmm4, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %zmm4, %zmm6, %zmm4
+; AVX512BW-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512BW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v32i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshldvw %zmm2, %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpsllvw %zmm4, %zmm0, %zmm5
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %zmm4, %zmm6, %zmm4
+; AVX512VLBW-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VLBW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v32i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvw %zmm2, %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i16> %amt)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @var_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %amt) nounwind {
+; AVX512F-LABEL: var_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm7
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT:    vpand %ymm6, %ymm7, %ymm8
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512F-NEXT:    vpand %ymm7, %ymm4, %ymm9
+; AVX512F-NEXT:    vpsllw $5, %ymm9, %ymm10
+; AVX512F-NEXT:    vpblendvb %ymm10, %ymm8, %ymm0, %ymm8
+; AVX512F-NEXT:    vpsllw $2, %ymm8, %ymm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT:    vpand %ymm4, %ymm11, %ymm11
+; AVX512F-NEXT:    vpaddb %ymm10, %ymm10, %ymm10
+; AVX512F-NEXT:    vpblendvb %ymm10, %ymm11, %ymm8, %ymm8
+; AVX512F-NEXT:    vpaddb %ymm8, %ymm8, %ymm11
+; AVX512F-NEXT:    vpaddb %ymm10, %ymm10, %ymm10
+; AVX512F-NEXT:    vpblendvb %ymm10, %ymm11, %ymm8, %ymm10
+; AVX512F-NEXT:    vpsrlw $4, %ymm2, %ymm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm8, %ymm11, %ymm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm12 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %ymm9, %ymm12, %ymm13
+; AVX512F-NEXT:    vpsllw $5, %ymm13, %ymm13
+; AVX512F-NEXT:    vpblendvb %ymm13, %ymm11, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $2, %ymm2, %ymm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm14 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512F-NEXT:    vpand %ymm14, %ymm11, %ymm11
+; AVX512F-NEXT:    vpaddb %ymm13, %ymm13, %ymm13
+; AVX512F-NEXT:    vpblendvb %ymm13, %ymm11, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm15 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpand %ymm15, %ymm11, %ymm11
+; AVX512F-NEXT:    vpaddb %ymm13, %ymm13, %ymm13
+; AVX512F-NEXT:    vpblendvb %ymm13, %ymm11, %ymm2, %ymm2
+; AVX512F-NEXT:    vpor %ymm2, %ymm10, %ymm2
+; AVX512F-NEXT:    vpxor %xmm10, %xmm10, %xmm10
+; AVX512F-NEXT:    vpcmpeqb %ymm10, %ymm9, %ymm9
+; AVX512F-NEXT:    vpblendvb %ymm9, %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT:    vpand %ymm7, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsllw $5, %ymm5, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm2
+; AVX512F-NEXT:    vpsllw $2, %ymm2, %ymm7
+; AVX512F-NEXT:    vpand %ymm4, %ymm7, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $4, %ymm3, %ymm4
+; AVX512F-NEXT:    vpand %ymm8, %ymm4, %ymm4
+; AVX512F-NEXT:    vpsubb %ymm5, %ymm12, %ymm6
+; AVX512F-NEXT:    vpsllw $5, %ymm6, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $2, %ymm3, %ymm4
+; AVX512F-NEXT:    vpand %ymm14, %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $1, %ymm3, %ymm4
+; AVX512F-NEXT:    vpand %ymm15, %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpcmpeqb %ymm10, %ymm5, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm6
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm8 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VL-NEXT:    vpand %ymm8, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpsllw $5, %ymm4, %ymm9
+; AVX512VL-NEXT:    vpblendvb %ymm9, %ymm6, %ymm0, %ymm6
+; AVX512VL-NEXT:    vpsllw $2, %ymm6, %ymm10
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm11 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT:    vpand %ymm11, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpaddb %ymm9, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpblendvb %ymm9, %ymm10, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpaddb %ymm6, %ymm6, %ymm10
+; AVX512VL-NEXT:    vpaddb %ymm9, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpblendvb %ymm9, %ymm10, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpsrlw $4, %ymm2, %ymm9
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm10 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm10, %ymm9, %ymm9
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm12 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %ymm4, %ymm12, %ymm13
+; AVX512VL-NEXT:    vpsllw $5, %ymm13, %ymm13
+; AVX512VL-NEXT:    vpblendvb %ymm13, %ymm9, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $2, %ymm2, %ymm9
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm14 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512VL-NEXT:    vpand %ymm14, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpaddb %ymm13, %ymm13, %ymm13
+; AVX512VL-NEXT:    vpblendvb %ymm13, %ymm9, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $1, %ymm2, %ymm9
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm15 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-NEXT:    vpand %ymm15, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpaddb %ymm13, %ymm13, %ymm13
+; AVX512VL-NEXT:    vpblendvb %ymm13, %ymm9, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpor %ymm2, %ymm6, %ymm2
+; AVX512VL-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512VL-NEXT:    vpcmpeqb %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpand %ymm8, %ymm5, %ymm4
+; AVX512VL-NEXT:    vpsllw $5, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpsllw $2, %ymm2, %ymm7
+; AVX512VL-NEXT:    vpand %ymm11, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm7, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm7
+; AVX512VL-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm7, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $4, %ymm3, %ymm5
+; AVX512VL-NEXT:    vpand %ymm10, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsubb %ymm4, %ymm12, %ymm7
+; AVX512VL-NEXT:    vpsllw $5, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm5, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsrlw $2, %ymm3, %ymm5
+; AVX512VL-NEXT:    vpand %ymm14, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm7, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm5, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsrlw $1, %ymm3, %ymm5
+; AVX512VL-NEXT:    vpand %ymm15, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm7, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm5, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpcmpeqb %ymm6, %ymm4, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT:    vpsubb %zmm4, %zmm5, %zmm5
+; AVX512BW-NEXT:    vpsllw $5, %zmm5, %zmm5
+; AVX512BW-NEXT:    vpaddb %zmm5, %zmm5, %zmm6
+; AVX512BW-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512BW-NEXT:    vpmovb2m %zmm5, %k2
+; AVX512BW-NEXT:    vpsrlw $4, %zmm1, %zmm5
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512BW-NEXT:    vmovdqu8 %zmm5, %zmm1 {%k2}
+; AVX512BW-NEXT:    vpsrlw $2, %zmm1, %zmm5
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512BW-NEXT:    vmovdqu8 %zmm5, %zmm1 {%k1}
+; AVX512BW-NEXT:    vpsrlw $1, %zmm1, %zmm5
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512BW-NEXT:    vpaddb %zmm6, %zmm6, %zmm6
+; AVX512BW-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm5, %zmm1 {%k1}
+; AVX512BW-NEXT:    vpsllw $5, %zmm4, %zmm4
+; AVX512BW-NEXT:    vpaddb %zmm4, %zmm4, %zmm5
+; AVX512BW-NEXT:    vpmovb2m %zmm5, %k1
+; AVX512BW-NEXT:    vpmovb2m %zmm4, %k2
+; AVX512BW-NEXT:    vpsllw $4, %zmm0, %zmm4
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512BW-NEXT:    vpblendmb %zmm4, %zmm0, %zmm4 {%k2}
+; AVX512BW-NEXT:    vpsllw $2, %zmm4, %zmm6
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm6, %zmm6
+; AVX512BW-NEXT:    vmovdqu8 %zmm6, %zmm4 {%k1}
+; AVX512BW-NEXT:    vpaddb %zmm5, %zmm5, %zmm5
+; AVX512BW-NEXT:    vpmovb2m %zmm5, %k1
+; AVX512BW-NEXT:    vpaddb %zmm4, %zmm4, %zmm4 {%k1}
+; AVX512BW-NEXT:    vporq %zmm1, %zmm4, %zmm1
+; AVX512BW-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v64i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VBMI2-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VBMI2-NEXT:    vpsubb %zmm4, %zmm5, %zmm5
+; AVX512VBMI2-NEXT:    vpsllw $5, %zmm5, %zmm5
+; AVX512VBMI2-NEXT:    vpaddb %zmm5, %zmm5, %zmm6
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm5, %k2
+; AVX512VBMI2-NEXT:    vpsrlw $4, %zmm1, %zmm5
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm5, %zmm1 {%k2}
+; AVX512VBMI2-NEXT:    vpsrlw $2, %zmm1, %zmm5
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm5, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vpsrlw $1, %zmm1, %zmm5
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VBMI2-NEXT:    vpaddb %zmm6, %zmm6, %zmm6
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm5, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vpsllw $5, %zmm4, %zmm4
+; AVX512VBMI2-NEXT:    vpaddb %zmm4, %zmm4, %zmm5
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm5, %k1
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm4, %k2
+; AVX512VBMI2-NEXT:    vpsllw $4, %zmm0, %zmm4
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512VBMI2-NEXT:    vpblendmb %zmm4, %zmm0, %zmm4 {%k2}
+; AVX512VBMI2-NEXT:    vpsllw $2, %zmm4, %zmm6
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm6, %zmm6
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm6, %zmm4 {%k1}
+; AVX512VBMI2-NEXT:    vpaddb %zmm5, %zmm5, %zmm5
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm5, %k1
+; AVX512VBMI2-NEXT:    vpaddb %zmm4, %zmm4, %zmm4 {%k1}
+; AVX512VBMI2-NEXT:    vporq %zmm1, %zmm4, %zmm1
+; AVX512VBMI2-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT:    vpsubb %zmm4, %zmm5, %zmm5
+; AVX512VLBW-NEXT:    vpsllw $5, %zmm5, %zmm5
+; AVX512VLBW-NEXT:    vpaddb %zmm5, %zmm5, %zmm6
+; AVX512VLBW-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VLBW-NEXT:    vpmovb2m %zmm5, %k2
+; AVX512VLBW-NEXT:    vpsrlw $4, %zmm1, %zmm5
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm5, %zmm1 {%k2}
+; AVX512VLBW-NEXT:    vpsrlw $2, %zmm1, %zmm5
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm5, %zmm1 {%k1}
+; AVX512VLBW-NEXT:    vpsrlw $1, %zmm1, %zmm5
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VLBW-NEXT:    vpaddb %zmm6, %zmm6, %zmm6
+; AVX512VLBW-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm5, %zmm1 {%k1}
+; AVX512VLBW-NEXT:    vpsllw $5, %zmm4, %zmm4
+; AVX512VLBW-NEXT:    vpaddb %zmm4, %zmm4, %zmm5
+; AVX512VLBW-NEXT:    vpmovb2m %zmm5, %k1
+; AVX512VLBW-NEXT:    vpmovb2m %zmm4, %k2
+; AVX512VLBW-NEXT:    vpsllw $4, %zmm0, %zmm4
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512VLBW-NEXT:    vpblendmb %zmm4, %zmm0, %zmm4 {%k2}
+; AVX512VLBW-NEXT:    vpsllw $2, %zmm4, %zmm6
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm6, %zmm6
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm6, %zmm4 {%k1}
+; AVX512VLBW-NEXT:    vpaddb %zmm5, %zmm5, %zmm5
+; AVX512VLBW-NEXT:    vpmovb2m %zmm5, %k1
+; AVX512VLBW-NEXT:    vpaddb %zmm4, %zmm4, %zmm4 {%k1}
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm4, %zmm1
+; AVX512VLBW-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v64i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLVBMI2-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLVBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLVBMI2-NEXT:    vpsubb %zmm4, %zmm5, %zmm5
+; AVX512VLVBMI2-NEXT:    vpsllw $5, %zmm5, %zmm5
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm5, %zmm5, %zmm6
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm5, %k2
+; AVX512VLVBMI2-NEXT:    vpsrlw $4, %zmm1, %zmm5
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm5, %zmm1 {%k2}
+; AVX512VLVBMI2-NEXT:    vpsrlw $2, %zmm1, %zmm5
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm5, %zmm1 {%k1}
+; AVX512VLVBMI2-NEXT:    vpsrlw $1, %zmm1, %zmm5
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm6, %zmm6, %zmm6
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm5, %zmm1 {%k1}
+; AVX512VLVBMI2-NEXT:    vpsllw $5, %zmm4, %zmm4
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm4, %zmm4, %zmm5
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm5, %k1
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm4, %k2
+; AVX512VLVBMI2-NEXT:    vpsllw $4, %zmm0, %zmm4
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512VLVBMI2-NEXT:    vpblendmb %zmm4, %zmm0, %zmm4 {%k2}
+; AVX512VLVBMI2-NEXT:    vpsllw $2, %zmm4, %zmm6
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm6, %zmm6
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm6, %zmm4 {%k1}
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm5, %zmm5, %zmm5
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm5, %k1
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm4, %zmm4, %zmm4 {%k1}
+; AVX512VLVBMI2-NEXT:    vporq %zmm1, %zmm4, %zmm1
+; AVX512VLVBMI2-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512VLVBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %amt)
+  ret <64 x i8> %res
+}
+
+;
+; Uniform Variable Shifts
+;
+
+define <8 x i64> @splatvar_funnnel_v8i64(<8 x i64> %x, <8 x i64> %y, <8 x i64> %amt) nounwind {
+; AVX512F-LABEL: splatvar_funnnel_v8i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastq %xmm2, %zmm2
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512F-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512F-NEXT:    vpsllq %xmm4, %zmm0, %zmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512F-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpsrlq %xmm4, %zmm1, %zmm1
+; AVX512F-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512F-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v8i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq %xmm2, %zmm2
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512VL-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VL-NEXT:    vpsllq %xmm4, %zmm0, %zmm5
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VL-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpsrlq %xmm4, %zmm1, %zmm1
+; AVX512VL-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VL-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VL-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512VL-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v8i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastq %xmm2, %zmm2
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpsllq %xmm4, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512BW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpsrlq %xmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512BW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v8i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpbroadcastq %xmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpshldvq %zmm2, %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v8i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastq %xmm2, %zmm2
+; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpsllq %xmm4, %zmm0, %zmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VLBW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpsrlq %xmm4, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VLBW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v8i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastq %xmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpshldvq %zmm2, %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %splat = shufflevector <8 x i64> %amt, <8 x i64> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x, <8 x i64> %y, <8 x i64> %splat)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i32> %amt) nounwind {
+; AVX512F-LABEL: splatvar_funnnel_v16i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastd %xmm2, %zmm2
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512F-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512F-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512F-NEXT:    vpslld %xmm5, %zmm0, %zmm5
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512F-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512F-NEXT:    vpsrld %xmm4, %zmm1, %zmm1
+; AVX512F-NEXT:    vpord %zmm1, %zmm5, %zmm1
+; AVX512F-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v16i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd %xmm2, %zmm2
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512VL-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VL-NEXT:    vpslld %xmm5, %zmm0, %zmm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VL-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VL-NEXT:    vpsrld %xmm4, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpord %zmm1, %zmm5, %zmm1
+; AVX512VL-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VL-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512VL-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v16i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastd %xmm2, %zmm2
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512BW-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512BW-NEXT:    vpslld %xmm5, %zmm0, %zmm5
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512BW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512BW-NEXT:    vpsrld %xmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpord %zmm1, %zmm5, %zmm1
+; AVX512BW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v16i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpbroadcastd %xmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpshldvd %zmm2, %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v16i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd %xmm2, %zmm2
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512VLBW-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VLBW-NEXT:    vpslld %xmm5, %zmm0, %zmm5
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VLBW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VLBW-NEXT:    vpsrld %xmm4, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpord %zmm1, %zmm5, %zmm1
+; AVX512VLBW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v16i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastd %xmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpshldvd %zmm2, %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %splat = shufflevector <16 x i32> %amt, <16 x i32> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i32> %splat)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i16> %amt) nounwind {
+; AVX512F-LABEL: splatvar_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastw %xmm4, %ymm4
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm5, %ymm0, %ymm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = [16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %xmm4, %xmm7, %xmm7
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm7, %ymm2, %ymm2
+; AVX512F-NEXT:    vpor %ymm2, %ymm6, %ymm2
+; AVX512F-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512F-NEXT:    vpcmpeqw %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpsllw %xmm5, %ymm1, %ymm2
+; AVX512F-NEXT:    vpsrlw %xmm7, %ymm3, %ymm3
+; AVX512F-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm1, %ymm2, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastw %xmm4, %ymm4
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm5, %ymm0, %ymm6
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm7 = [16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %xmm4, %xmm7, %xmm7
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm7, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpor %ymm2, %ymm6, %ymm2
+; AVX512VL-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512VL-NEXT:    vpcmpeqw %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpsllw %xmm5, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpsrlw %xmm7, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastw %xmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsllw %xmm5, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsrlw %xmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512BW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v32i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpbroadcastw %xmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpshldvw %zmm2, %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastw %xmm2, %zmm2
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsllw %xmm5, %zmm0, %zmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsrlw %xmm4, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VLBW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v32i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastw %xmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpshldvw %zmm2, %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %splat = shufflevector <32 x i16> %amt, <32 x i16> undef, <32 x i32> zeroinitializer
+  %res = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i16> %splat)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %amt) nounwind {
+; AVX512F-LABEL: splatvar_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastb %xmm4, %ymm4
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm5, %ymm0, %ymm6
+; AVX512F-NEXT:    vpcmpeqd %ymm9, %ymm9, %ymm9
+; AVX512F-NEXT:    vpsllw %xmm5, %ymm9, %ymm8
+; AVX512F-NEXT:    vpbroadcastb %xmm8, %ymm8
+; AVX512F-NEXT:    vpand %ymm8, %ymm6, %ymm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm4, %xmm7, %xmm7
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,zero,zero,zero,zero,xmm7[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm7, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw %xmm7, %ymm9, %ymm9
+; AVX512F-NEXT:    vpsrlw $8, %ymm9, %ymm9
+; AVX512F-NEXT:    vpbroadcastb %xmm9, %ymm9
+; AVX512F-NEXT:    vpand %ymm9, %ymm2, %ymm2
+; AVX512F-NEXT:    vpor %ymm2, %ymm6, %ymm2
+; AVX512F-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512F-NEXT:    vpcmpeqb %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpsllw %xmm5, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm8, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw %xmm7, %ymm3, %ymm3
+; AVX512F-NEXT:    vpand %ymm9, %ymm3, %ymm3
+; AVX512F-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm1, %ymm2, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastb %xmm4, %ymm4
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm5, %ymm0, %ymm6
+; AVX512VL-NEXT:    vpcmpeqd %ymm9, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpsllw %xmm5, %ymm9, %ymm8
+; AVX512VL-NEXT:    vpbroadcastb %xmm8, %ymm8
+; AVX512VL-NEXT:    vpand %ymm8, %ymm6, %ymm6
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm4, %xmm7, %xmm7
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,zero,zero,zero,zero,xmm7[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm7, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw %xmm7, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpsrlw $8, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpbroadcastb %xmm9, %ymm9
+; AVX512VL-NEXT:    vpand %ymm9, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpor %ymm2, %ymm6, %ymm2
+; AVX512VL-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512VL-NEXT:    vpcmpeqb %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpsllw %xmm5, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw %xmm7, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpand %ymm9, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastb %xmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpmovzxbq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vpsllw %xmm5, %zmm0, %zmm6
+; AVX512BW-NEXT:    vpternlogd $255, %zmm7, %zmm7, %zmm7
+; AVX512BW-NEXT:    vpsllw %xmm5, %zmm7, %zmm5
+; AVX512BW-NEXT:    vpbroadcastb %xmm5, %zmm5
+; AVX512BW-NEXT:    vpandq %zmm5, %zmm6, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vpsrlw %xmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsrlw %xmm4, %zmm7, %zmm4
+; AVX512BW-NEXT:    vpsrlw $8, %zmm4, %zmm4
+; AVX512BW-NEXT:    vpbroadcastb %xmm4, %zmm4
+; AVX512BW-NEXT:    vpandq %zmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512BW-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v64i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpbroadcastb %xmm2, %zmm2
+; AVX512VBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VBMI2-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VBMI2-NEXT:    vpmovzxbq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VBMI2-NEXT:    vpsllw %xmm5, %zmm0, %zmm6
+; AVX512VBMI2-NEXT:    vpternlogd $255, %zmm7, %zmm7, %zmm7
+; AVX512VBMI2-NEXT:    vpsllw %xmm5, %zmm7, %zmm5
+; AVX512VBMI2-NEXT:    vpbroadcastb %xmm5, %zmm5
+; AVX512VBMI2-NEXT:    vpandq %zmm5, %zmm6, %zmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VBMI2-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VBMI2-NEXT:    vpsrlw %xmm4, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vpsrlw %xmm4, %zmm7, %zmm4
+; AVX512VBMI2-NEXT:    vpsrlw $8, %zmm4, %zmm4
+; AVX512VBMI2-NEXT:    vpbroadcastb %xmm4, %zmm4
+; AVX512VBMI2-NEXT:    vpandq %zmm4, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VBMI2-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm2, %zmm2
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpmovzxbq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VLBW-NEXT:    vpsllw %xmm5, %zmm0, %zmm6
+; AVX512VLBW-NEXT:    vpternlogd $255, %zmm7, %zmm7, %zmm7
+; AVX512VLBW-NEXT:    vpsllw %xmm5, %zmm7, %zmm5
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm5, %zmm5
+; AVX512VLBW-NEXT:    vpandq %zmm5, %zmm6, %zmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VLBW-NEXT:    vpsrlw %xmm4, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsrlw %xmm4, %zmm7, %zmm4
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm4, %zmm4
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm4, %zmm4
+; AVX512VLBW-NEXT:    vpandq %zmm4, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VLBW-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v64i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastb %xmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLVBMI2-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VLVBMI2-NEXT:    vpsllw %xmm5, %zmm0, %zmm6
+; AVX512VLVBMI2-NEXT:    vpternlogd $255, %zmm7, %zmm7, %zmm7
+; AVX512VLVBMI2-NEXT:    vpsllw %xmm5, %zmm7, %zmm5
+; AVX512VLVBMI2-NEXT:    vpbroadcastb %xmm5, %zmm5
+; AVX512VLVBMI2-NEXT:    vpandq %zmm5, %zmm6, %zmm5
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLVBMI2-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VLVBMI2-NEXT:    vpsrlw %xmm4, %zmm1, %zmm1
+; AVX512VLVBMI2-NEXT:    vpsrlw %xmm4, %zmm7, %zmm4
+; AVX512VLVBMI2-NEXT:    vpsrlw $8, %zmm4, %zmm4
+; AVX512VLVBMI2-NEXT:    vpbroadcastb %xmm4, %zmm4
+; AVX512VLVBMI2-NEXT:    vpandq %zmm4, %zmm1, %zmm1
+; AVX512VLVBMI2-NEXT:    vporq %zmm1, %zmm5, %zmm1
+; AVX512VLVBMI2-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512VLVBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %splat = shufflevector <64 x i8> %amt, <64 x i8> undef, <64 x i32> zeroinitializer
+  %res = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %splat)
+  ret <64 x i8> %res
+}
+
+;
+; Constant Shifts
+;
+
+define <8 x i64> @constant_funnnel_v8i64(<8 x i64> %x, <8 x i64> %y) nounwind {
+; AVX512F-LABEL: constant_funnnel_v8i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlvq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512F-NEXT:    vpsllvq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v8i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VL-NEXT:    vpsllvq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VL-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v8i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllvq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v8i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshldvq {{.*}}(%rip), %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v8i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllvq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v8i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvq {{.*}}(%rip), %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x, <8 x i64> %y, <8 x i64> <i64 4, i64 14, i64 50, i64 60, i64 4, i64 14, i64 50, i64 60>)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @constant_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y) nounwind {
+; AVX512F-LABEL: constant_funnnel_v16i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
+; AVX512F-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v16i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VL-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v16i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v16i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshldvd {{.*}}(%rip), %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v16i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v16i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvd {{.*}}(%rip), %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @constant_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y) nounwind {
+; AVX512F-LABEL: constant_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX512F-NEXT:    vpmulhuw %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm0, %ymm5
+; AVX512F-NEXT:    vpor %ymm2, %ymm5, %ymm2
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4,5,6,7],ymm0[8],ymm2[9,10,11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vpmulhuw %ymm4, %ymm3, %ymm2
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm1, %ymm3
+; AVX512F-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7],ymm1[8],ymm2[9,10,11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX512VL-NEXT:    vpmulhuw %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmullw %ymm4, %ymm0, %ymm5
+; AVX512VL-NEXT:    vpor %ymm2, %ymm5, %ymm2
+; AVX512VL-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4,5,6,7],ymm0[8],ymm2[9,10,11,12,13,14,15]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX512VL-NEXT:    vpmulhuw %ymm4, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpmullw %ymm4, %ymm1, %ymm3
+; AVX512VL-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7],ymm1[8],ymm2[9,10,11,12,13,14,15]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm2
+; AVX512BW-NEXT:    vporq %zmm1, %zmm2, %zmm1
+; AVX512BW-NEXT:    movl $65537, %eax # imm = 0x10001
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v32i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshldvw {{.*}}(%rip), %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm2
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm2, %zmm1
+; AVX512VLBW-NEXT:    movl $65537, %eax # imm = 0x10001
+; AVX512VLBW-NEXT:    kmovd %eax, %k1
+; AVX512VLBW-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v32i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldvw {{.*}}(%rip), %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwind {
+; AVX512F-LABEL: constant_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT:    vpand %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm4, %ymm0, %ymm4
+; AVX512F-NEXT:    vpsllw $2, %ymm4, %ymm7
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT:    vpand %ymm8, %ymm7, %ymm7
+; AVX512F-NEXT:    vpaddb %ymm6, %ymm6, %ymm9
+; AVX512F-NEXT:    vpblendvb %ymm9, %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm4, %ymm4, %ymm7
+; AVX512F-NEXT:    vpaddb %ymm9, %ymm9, %ymm10
+; AVX512F-NEXT:    vpblendvb %ymm10, %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT:    vpxor %xmm7, %xmm7, %xmm7
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm11 = ymm2[8],ymm7[8],ymm2[9],ymm7[9],ymm2[10],ymm7[10],ymm2[11],ymm7[11],ymm2[12],ymm7[12],ymm2[13],ymm7[13],ymm2[14],ymm7[14],ymm2[15],ymm7[15],ymm2[24],ymm7[24],ymm2[25],ymm7[25],ymm2[26],ymm7[26],ymm2[27],ymm7[27],ymm2[28],ymm7[28],ymm2[29],ymm7[29],ymm2[30],ymm7[30],ymm2[31],ymm7[31]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2]
+; AVX512F-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpmullw %ymm12, %ymm11, %ymm11
+; AVX512F-NEXT:    vpsrlw $8, %ymm11, %ymm11
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm7[0],ymm2[1],ymm7[1],ymm2[2],ymm7[2],ymm2[3],ymm7[3],ymm2[4],ymm7[4],ymm2[5],ymm7[5],ymm2[6],ymm7[6],ymm2[7],ymm7[7],ymm2[16],ymm7[16],ymm2[17],ymm7[17],ymm2[18],ymm7[18],ymm2[19],ymm7[19],ymm2[20],ymm7[20],ymm2[21],ymm7[21],ymm2[22],ymm7[22],ymm2[23],ymm7[23]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; AVX512F-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpmullw %ymm13, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512F-NEXT:    vpackuswb %ymm11, %ymm2, %ymm2
+; AVX512F-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [18446744073709551360,18446744073709551360,18446744073709551360,18446744073709551360]
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm2
+; AVX512F-NEXT:    vpsllw $2, %ymm2, %ymm5
+; AVX512F-NEXT:    vpand %ymm8, %ymm5, %ymm5
+; AVX512F-NEXT:    vpblendvb %ymm9, %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm5
+; AVX512F-NEXT:    vpblendvb %ymm10, %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm3[8],ymm7[8],ymm3[9],ymm7[9],ymm3[10],ymm7[10],ymm3[11],ymm7[11],ymm3[12],ymm7[12],ymm3[13],ymm7[13],ymm3[14],ymm7[14],ymm3[15],ymm7[15],ymm3[24],ymm7[24],ymm3[25],ymm7[25],ymm3[26],ymm7[26],ymm3[27],ymm7[27],ymm3[28],ymm7[28],ymm3[29],ymm7[29],ymm3[30],ymm7[30],ymm3[31],ymm7[31]
+; AVX512F-NEXT:    vpmullw %ymm12, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm7[0],ymm3[1],ymm7[1],ymm3[2],ymm7[2],ymm3[3],ymm7[3],ymm3[4],ymm7[4],ymm3[5],ymm7[5],ymm3[6],ymm7[6],ymm3[7],ymm7[7],ymm3[16],ymm7[16],ymm3[17],ymm7[17],ymm3[18],ymm7[18],ymm3[19],ymm7[19],ymm3[20],ymm7[20],ymm3[21],ymm7[21],ymm3[22],ymm7[22],ymm3[23],ymm7[23]
+; AVX512F-NEXT:    vpmullw %ymm13, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512F-NEXT:    vpackuswb %ymm5, %ymm3, %ymm3
+; AVX512F-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT:    vpand %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm6 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512VL-NEXT:    vpblendvb %ymm6, %ymm4, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpsllw $2, %ymm4, %ymm7
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT:    vpand %ymm8, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpaddb %ymm6, %ymm6, %ymm9
+; AVX512VL-NEXT:    vpblendvb %ymm9, %ymm7, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm7
+; AVX512VL-NEXT:    vpaddb %ymm9, %ymm9, %ymm10
+; AVX512VL-NEXT:    vpblendvb %ymm10, %ymm7, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm7, %ymm7
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2]
+; AVX512VL-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpmullw %ymm11, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpsrlw $8, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; AVX512VL-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpmullw %ymm12, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpackuswb %ymm7, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [18446744073709551360,18446744073709551360,18446744073709551360,18446744073709551360]
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpsllw $2, %ymm2, %ymm5
+; AVX512VL-NEXT:    vpand %ymm8, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpblendvb %ymm9, %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm5
+; AVX512VL-NEXT:    vpblendvb %ymm10, %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpmullw %ymm11, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpmullw %ymm12, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpackuswb %ymm5, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512BW-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512BW-NEXT:    vpsllw $4, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512BW-NEXT:    vpblendmb %zmm3, %zmm0, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpsllw $2, %zmm3, %zmm4
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512BW-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm4, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512BW-NEXT:    vpaddb %zmm3, %zmm3, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512BW-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512BW-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512BW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpackuswb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vporq %zmm1, %zmm3, %zmm1
+; AVX512BW-NEXT:    movabsq $72340172838076673, %rax # imm = 0x101010101010101
+; AVX512BW-NEXT:    kmovq %rax, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v64i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VBMI2-NEXT:    vpsllw $4, %zmm0, %zmm3
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VBMI2-NEXT:    vpblendmb %zmm3, %zmm0, %zmm3 {%k1}
+; AVX512VBMI2-NEXT:    vpsllw $2, %zmm3, %zmm4
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512VBMI2-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm4, %zmm3 {%k1}
+; AVX512VBMI2-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VBMI2-NEXT:    vpaddb %zmm3, %zmm3, %zmm3 {%k1}
+; AVX512VBMI2-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512VBMI2-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpsllvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512VBMI2-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vpsllvw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vpackuswb %zmm2, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vporq %zmm1, %zmm3, %zmm1
+; AVX512VBMI2-NEXT:    movabsq $72340172838076673, %rax # imm = 0x101010101010101
+; AVX512VBMI2-NEXT:    kmovq %rax, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512VLBW-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VLBW-NEXT:    vpsllw $4, %zmm0, %zmm3
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLBW-NEXT:    vpblendmb %zmm3, %zmm0, %zmm3 {%k1}
+; AVX512VLBW-NEXT:    vpsllw $2, %zmm3, %zmm4
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512VLBW-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm4, %zmm3 {%k1}
+; AVX512VLBW-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VLBW-NEXT:    vpaddb %zmm3, %zmm3, %zmm3 {%k1}
+; AVX512VLBW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpackuswb %zmm2, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm3, %zmm1
+; AVX512VLBW-NEXT:    movabsq $72340172838076673, %rax # imm = 0x101010101010101
+; AVX512VLBW-NEXT:    kmovq %rax, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v64i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VLVBMI2-NEXT:    vpsllw $4, %zmm0, %zmm3
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLVBMI2-NEXT:    vpblendmb %zmm3, %zmm0, %zmm3 {%k1}
+; AVX512VLVBMI2-NEXT:    vpsllw $2, %zmm3, %zmm4
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm4, %zmm3 {%k1}
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm3, %zmm3, %zmm3 {%k1}
+; AVX512VLVBMI2-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512VLVBMI2-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpsllvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512VLVBMI2-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512VLVBMI2-NEXT:    vpsllvw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLVBMI2-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512VLVBMI2-NEXT:    vpackuswb %zmm2, %zmm1, %zmm1
+; AVX512VLVBMI2-NEXT:    vporq %zmm1, %zmm3, %zmm1
+; AVX512VLVBMI2-NEXT:    movabsq $72340172838076673, %rax # imm = 0x101010101010101
+; AVX512VLVBMI2-NEXT:    kmovq %rax, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512VLVBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
+  ret <64 x i8> %res
+}
+
+;
+; Uniform Constant Shifts
+;
+
+define <8 x i64> @splatconstant_funnnel_v8i64(<8 x i64> %x, <8 x i64> %y) nounwind {
+; AVX512F-LABEL: splatconstant_funnnel_v8i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlq $50, %zmm1, %zmm1
+; AVX512F-NEXT:    vpsllq $14, %zmm0, %zmm0
+; AVX512F-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v8i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlq $50, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpsllq $14, %zmm0, %zmm0
+; AVX512VL-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v8i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlq $50, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllq $14, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v8i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshldq $14, %zmm1, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v8i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlq $50, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllq $14, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v8i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldq $14, %zmm1, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x, <8 x i64> %y, <8 x i64> <i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14>)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @splatconstant_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y) nounwind {
+; AVX512F-LABEL: splatconstant_funnnel_v16i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrld $28, %zmm1, %zmm1
+; AVX512F-NEXT:    vpslld $4, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v16i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrld $28, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpslld $4, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v16i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrld $28, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpslld $4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v16i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshldd $4, %zmm1, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v16i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrld $28, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpslld $4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v16i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldd $4, %zmm1, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @splatconstant_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y) nounwind {
+; AVX512F-LABEL: splatconstant_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $9, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $7, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $9, %ymm3, %ymm2
+; AVX512F-NEXT:    vpsllw $7, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $9, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $7, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $9, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpsllw $7, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlw $9, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllw $7, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v32i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshldw $7, %zmm1, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlw $9, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllw $7, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v32i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshldw $7, %zmm1, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @splatconstant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwind {
+; AVX512F-LABEL: splatconstant_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $4, %ymm2, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT:    vpand %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $4, %ymm3, %ymm2
+; AVX512F-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT:    vpand %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $4, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpand %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlw $4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllw $4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v64i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlw $4, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vpsllw $4, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlw $4, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllw $4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v64i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpsrlw $4, %zmm1, %zmm1
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLVBMI2-NEXT:    vpsllw $4, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
+  ret <64 x i8> %res
+}
diff --git a/test/CodeGen/X86/vector-fshl-rot-128.ll b/test/CodeGen/X86/vector-fshl-rot-128.ll
new file mode 100644
index 0000000..2e12eea
--- /dev/null
+++ b/test/CodeGen/X86/vector-fshl-rot-128.ll
@@ -0,0 +1,1865 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2
+
+; Just one 32-bit run to make sure we do reasonable things for i64 cases.
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X32-SSE,X32-SSE2
+
+declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
+declare <16 x i8> @llvm.fshl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)
+
+;
+; Variable Shifts
+;
+
+define <2 x i64> @var_funnnel_v2i64(<2 x i64> %x, <2 x i64> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v2i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [63,63]
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    psubq %xmm1, %xmm3
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    psllq %xmm1, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    psllq %xmm1, %xmm5
+; SSE2-NEXT:    movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1]
+; SSE2-NEXT:    pand %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    psrlq %xmm3, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
+; SSE2-NEXT:    psrlq %xmm2, %xmm0
+; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT:    orpd %xmm5, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v2i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [63,63]
+; SSE41-NEXT:    pxor %xmm3, %xmm3
+; SSE41-NEXT:    psubq %xmm1, %xmm3
+; SSE41-NEXT:    pand %xmm2, %xmm1
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    psllq %xmm1, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSE41-NEXT:    movdqa %xmm0, %xmm5
+; SSE41-NEXT:    psllq %xmm1, %xmm5
+; SSE41-NEXT:    pblendw {{.*#+}} xmm5 = xmm4[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT:    pand %xmm2, %xmm3
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlq %xmm3, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
+; SSE41-NEXT:    psrlq %xmm2, %xmm0
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: var_funnnel_v2i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX1-NEXT:    vpsllq %xmm3, %xmm0, %xmm4
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; AVX1-NEXT:    vpsllq %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpsubq %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v2i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vpsllvq %xmm3, %xmm0, %xmm3
+; AVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT:    vpsubq %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrlvq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprolvq %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprolvq %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: var_funnnel_v2i64:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [63,0,63,0]
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    psubq %xmm1, %xmm3
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm4
+; X32-SSE-NEXT:    psllq %xmm1, %xmm4
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm5
+; X32-SSE-NEXT:    psllq %xmm1, %xmm5
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1]
+; X32-SSE-NEXT:    pand %xmm2, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psrlq %xmm3, %xmm1
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
+; X32-SSE-NEXT:    psrlq %xmm2, %xmm0
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X32-SSE-NEXT:    orpd %xmm5, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %x, <2 x i64> %x, <2 x i64> %amt)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @var_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    pslld $23, %xmm1
+; SSE2-NEXT:    paddd {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    pslld $23, %xmm1
+; SSE41-NEXT:    paddd {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    cvttps2dq %xmm1, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pmuludq %xmm2, %xmm3
+; SSE41-NEXT:    pmuludq %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: var_funnnel_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsllvd %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vprolvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprolvd %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vprolvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprolvd %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: var_funnnel_v4i32:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    pslld $23, %xmm1
+; X32-SSE-NEXT:    paddd {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    cvttps2dq %xmm1, %xmm1
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm1, %xmm0
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm2, %xmm1
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; X32-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X32-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-SSE-NEXT:    por %xmm3, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %amt)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT:    pslld $23, %xmm3
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [1065353216,1065353216,1065353216,1065353216]
+; SSE2-NEXT:    paddd %xmm4, %xmm3
+; SSE2-NEXT:    cvttps2dq %xmm3, %xmm3
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT:    pslld $23, %xmm1
+; SSE2-NEXT:    paddd %xmm4, %xmm1
+; SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pmulhuw %xmm1, %xmm2
+; SSE2-NEXT:    pmullw %xmm1, %xmm0
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v8i16:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE41-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE41-NEXT:    pslld $23, %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
+; SSE41-NEXT:    paddd %xmm3, %xmm1
+; SSE41-NEXT:    cvttps2dq %xmm1, %xmm1
+; SSE41-NEXT:    pslld $23, %xmm2
+; SSE41-NEXT:    paddd %xmm3, %xmm2
+; SSE41-NEXT:    cvttps2dq %xmm2, %xmm2
+; SSE41-NEXT:    packusdw %xmm1, %xmm2
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    pmulhuw %xmm2, %xmm1
+; SSE41-NEXT:    pmullw %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: var_funnnel_v8i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX1-NEXT:    vpslld $23, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vcvttps2dq %xmm2, %xmm2
+; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT:    vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT:    vpsllvd %ymm2, %ymm0, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512F-NEXT:    vpsllvd %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512F-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512VL-NEXT:    vpsllvd %ymm2, %ymm0, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512VL-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v8i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v8i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsllvw %xmm1, %xmm0, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX512VLBW-NEXT:    vpsrlvw %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: var_funnnel_v8i16:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm3
+; X32-SSE-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; X32-SSE-NEXT:    pslld $23, %xmm3
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [1065353216,1065353216,1065353216,1065353216]
+; X32-SSE-NEXT:    paddd %xmm4, %xmm3
+; X32-SSE-NEXT:    cvttps2dq %xmm3, %xmm3
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; X32-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; X32-SSE-NEXT:    pslld $23, %xmm1
+; X32-SSE-NEXT:    paddd %xmm4, %xmm1
+; X32-SSE-NEXT:    cvttps2dq %xmm1, %xmm1
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X32-SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    pmulhuw %xmm1, %xmm2
+; X32-SSE-NEXT:    pmullw %xmm1, %xmm0
+; X32-SSE-NEXT:    por %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x, <8 x i16> %x, <8 x i16> %amt)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v16i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    psllw $5, %xmm1
+; SSE2-NEXT:    pxor %xmm0, %xmm0
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    psrlw $4, %xmm4
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT:    movdqa %xmm2, %xmm5
+; SSE2-NEXT:    psllw $4, %xmm5
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm5
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    pand %xmm3, %xmm5
+; SSE2-NEXT:    pandn %xmm2, %xmm3
+; SSE2-NEXT:    por %xmm5, %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm2
+; SSE2-NEXT:    psrlw $6, %xmm2
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    psllw $2, %xmm4
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT:    por %xmm2, %xmm4
+; SSE2-NEXT:    paddb %xmm1, %xmm1
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT:    pand %xmm2, %xmm4
+; SSE2-NEXT:    pandn %xmm3, %xmm2
+; SSE2-NEXT:    por %xmm4, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm3
+; SSE2-NEXT:    paddb %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    psrlw $7, %xmm4
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT:    por %xmm3, %xmm4
+; SSE2-NEXT:    paddb %xmm1, %xmm1
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm0, %xmm4
+; SSE2-NEXT:    pandn %xmm2, %xmm0
+; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v16i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm1, %xmm2
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $4, %xmm0
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    psllw $4, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT:    por %xmm0, %xmm3
+; SSE41-NEXT:    psllw $5, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    psrlw $6, %xmm0
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    psllw $2, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT:    por %xmm0, %xmm3
+; SSE41-NEXT:    paddb %xmm2, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    paddb %xmm1, %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    psrlw $7, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT:    por %xmm0, %xmm3
+; SSE41-NEXT:    paddb %xmm2, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: var_funnnel_v16i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm2
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:    vpsllw $4, %xmm0, %xmm3
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $6, %xmm0, %xmm2
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:    vpsllw $2, %xmm0, %xmm3
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
+; AVX-NEXT:    vpsrlw $7, %xmm0, %xmm3
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512F-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero,xmm3[8],zero,zero,zero,xmm3[9],zero,zero,zero,xmm3[10],zero,zero,zero,xmm3[11],zero,zero,zero,xmm3[12],zero,zero,zero,xmm3[13],zero,zero,zero,xmm3[14],zero,zero,zero,xmm3[15],zero,zero,zero
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpsllvd %zmm3, %zmm0, %zmm3
+; AVX512F-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512F-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512F-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512F-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm0, %zmm3, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VL-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero,xmm3[8],zero,zero,zero,xmm3[9],zero,zero,zero,xmm3[10],zero,zero,zero,xmm3[11],zero,zero,zero,xmm3[12],zero,zero,zero,xmm3[13],zero,zero,zero,xmm3[14],zero,zero,zero,xmm3[15],zero,zero,zero
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsllvd %zmm3, %zmm0, %zmm3
+; AVX512VL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512VL-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm0, %zmm3, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v16i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512BW-NEXT:    vpsllvw %zmm3, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v16i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLBW-NEXT:    vpsllvw %ymm3, %ymm0, %ymm3
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT:    vpsrlvw %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX512VLBW-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: var_funnnel_v16i8:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    psllw $5, %xmm1
+; X32-SSE-NEXT:    pxor %xmm0, %xmm0
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    pcmpgtb %xmm1, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm4
+; X32-SSE-NEXT:    psrlw $4, %xmm4
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm5
+; X32-SSE-NEXT:    psllw $4, %xmm5
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm5
+; X32-SSE-NEXT:    por %xmm4, %xmm5
+; X32-SSE-NEXT:    pand %xmm3, %xmm5
+; X32-SSE-NEXT:    pandn %xmm2, %xmm3
+; X32-SSE-NEXT:    por %xmm5, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm3, %xmm2
+; X32-SSE-NEXT:    psrlw $6, %xmm2
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm3, %xmm4
+; X32-SSE-NEXT:    psllw $2, %xmm4
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT:    por %xmm2, %xmm4
+; X32-SSE-NEXT:    paddb %xmm1, %xmm1
+; X32-SSE-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE-NEXT:    pcmpgtb %xmm1, %xmm2
+; X32-SSE-NEXT:    pand %xmm2, %xmm4
+; X32-SSE-NEXT:    pandn %xmm3, %xmm2
+; X32-SSE-NEXT:    por %xmm4, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm3
+; X32-SSE-NEXT:    paddb %xmm2, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm4
+; X32-SSE-NEXT:    psrlw $7, %xmm4
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT:    por %xmm3, %xmm4
+; X32-SSE-NEXT:    paddb %xmm1, %xmm1
+; X32-SSE-NEXT:    pcmpgtb %xmm1, %xmm0
+; X32-SSE-NEXT:    pand %xmm0, %xmm4
+; X32-SSE-NEXT:    pandn %xmm2, %xmm0
+; X32-SSE-NEXT:    por %xmm4, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> %amt)
+  ret <16 x i8> %res
+}
+
+;
+; Uniform Variable Shifts
+;
+
+define <2 x i64> @splatvar_funnnel_v2i64(<2 x i64> %x, <2 x i64> %amt) nounwind {
+; SSE-LABEL: splatvar_funnnel_v2i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [63,63]
+; SSE-NEXT:    pxor %xmm3, %xmm3
+; SSE-NEXT:    psubq %xmm1, %xmm3
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    movdqa %xmm0, %xmm4
+; SSE-NEXT:    psllq %xmm1, %xmm4
+; SSE-NEXT:    pand %xmm2, %xmm3
+; SSE-NEXT:    psrlq %xmm3, %xmm0
+; SSE-NEXT:    por %xmm4, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v2i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX1-NEXT:    vpsllq %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpsubq %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v2i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq %xmm1, %xmm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vpsllq %xmm3, %xmm0, %xmm3
+; AVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT:    vpsubq %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastq %xmm1, %xmm1
+; AVX512F-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq %xmm1, %xmm1
+; AVX512VL-NEXT:    vprolvq %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastq %xmm1, %xmm1
+; AVX512BW-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastq %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vprolvq %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v2i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; XOPAVX1-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v2i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastq %xmm1, %xmm1
+; XOPAVX2-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [63,0,63,0]
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    psubq %xmm1, %xmm3
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm4
+; X32-SSE-NEXT:    psllq %xmm1, %xmm4
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm5
+; X32-SSE-NEXT:    psllq %xmm1, %xmm5
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1]
+; X32-SSE-NEXT:    pand %xmm2, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psrlq %xmm3, %xmm1
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
+; X32-SSE-NEXT:    psrlq %xmm2, %xmm0
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X32-SSE-NEXT:    orpd %xmm5, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <2 x i64> %amt, <2 x i64> undef, <2 x i32> zeroinitializer
+  %res = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %x, <2 x i64> %x, <2 x i64> %splat)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind {
+; SSE2-LABEL: splatvar_funnnel_v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    andl $31, %eax
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pslld %xmm1, %xmm2
+; SSE2-NEXT:    movl $32, %ecx
+; SSE2-NEXT:    subl %eax, %ecx
+; SSE2-NEXT:    movd %ecx, %xmm1
+; SSE2-NEXT:    psrld %xmm1, %xmm0
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: splatvar_funnnel_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pslld %xmm2, %xmm3
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [32,32,32,32]
+; SSE41-NEXT:    psubd %xmm1, %xmm2
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero
+; SSE41-NEXT:    psrld %xmm1, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT:    vpslld %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [32,32,32,32]
+; AVX1-NEXT:    vpsubd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd %xmm1, %xmm1
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; AVX2-NEXT:    vpslld %xmm2, %xmm0, %xmm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX2-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastd %xmm1, %xmm1
+; AVX512F-NEXT:    vprolvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd %xmm1, %xmm1
+; AVX512VL-NEXT:    vprolvd %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastd %xmm1, %xmm1
+; AVX512BW-NEXT:    vprolvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vprolvd %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v4i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; XOPAVX1-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v4i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastd %xmm1, %xmm1
+; XOPAVX2-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movd %xmm1, %eax
+; X32-SSE-NEXT:    andl $31, %eax
+; X32-SSE-NEXT:    movd %eax, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    pslld %xmm1, %xmm2
+; X32-SSE-NEXT:    movl $32, %ecx
+; X32-SSE-NEXT:    subl %eax, %ecx
+; X32-SSE-NEXT:    movd %ecx, %xmm1
+; X32-SSE-NEXT:    psrld %xmm1, %xmm0
+; X32-SSE-NEXT:    por %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <4 x i32> %amt, <4 x i32> undef, <4 x i32> zeroinitializer
+  %res = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %splat)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @splatvar_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind {
+; SSE2-LABEL: splatvar_funnnel_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; SSE2-NEXT:    psubw %xmm1, %xmm2
+; SSE2-NEXT:    pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    psllw %xmm1, %xmm3
+; SSE2-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    psrlw %xmm2, %xmm0
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: splatvar_funnnel_v8i16:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    psllw %xmm2, %xmm3
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; SSE41-NEXT:    psubw %xmm1, %xmm2
+; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; SSE41-NEXT:    psrlw %xmm1, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v8i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX1-NEXT:    vpsllw %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX1-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastw %xmm1, %xmm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm2, %xmm0, %xmm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_funnnel_v8i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastw %xmm1, %xmm1
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512-NEXT:    vpsllw %xmm2, %xmm0, %xmm2
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX512-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v8i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; XOPAVX1-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v8i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastw %xmm1, %xmm1
+; XOPAVX2-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; X32-SSE-NEXT:    psubw %xmm1, %xmm2
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm3
+; X32-SSE-NEXT:    psllw %xmm1, %xmm3
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    psrlw %xmm2, %xmm0
+; X32-SSE-NEXT:    por %xmm3, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <8 x i16> %amt, <8 x i16> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x, <8 x i16> %x, <8 x i16> %splat)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind {
+; SSE2-LABEL: splatvar_funnnel_v16i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE2-NEXT:    psubb %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    psllw %xmm3, %xmm1
+; SSE2-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE2-NEXT:    pcmpeqd %xmm5, %xmm5
+; SSE2-NEXT:    psllw %xmm3, %xmm5
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm3 = xmm5[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
+; SSE2-NEXT:    pand %xmm3, %xmm1
+; SSE2-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    psrlw %xmm2, %xmm0
+; SSE2-NEXT:    psrlw %xmm2, %xmm4
+; SSE2-NEXT:    psrlw $8, %xmm4
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm4[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE2-NEXT:    pand %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: splatvar_funnnel_v16i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pxor %xmm3, %xmm3
+; SSE41-NEXT:    pshufb %xmm3, %xmm1
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm4 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    psllw %xmm4, %xmm2
+; SSE41-NEXT:    pcmpeqd %xmm5, %xmm5
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm6
+; SSE41-NEXT:    psllw %xmm4, %xmm6
+; SSE41-NEXT:    pshufb %xmm3, %xmm6
+; SSE41-NEXT:    pand %xmm6, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE41-NEXT:    psubb %xmm1, %xmm3
+; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm1 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT:    psrlw %xmm1, %xmm0
+; SSE41-NEXT:    psrlw %xmm1, %xmm5
+; SSE41-NEXT:    pshufb {{.*#+}} xmm5 = xmm5[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; SSE41-NEXT:    pand %xmm0, %xmm5
+; SSE41-NEXT:    por %xmm5, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v16i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsllw %xmm3, %xmm0, %xmm4
+; AVX1-NEXT:    vpcmpeqd %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpsllw %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX1-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v16i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm2, %xmm0, %xmm3
+; AVX2-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX2-NEXT:    vpsllw %xmm2, %xmm4, %xmm2
+; AVX2-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX2-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX2-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vpsrlw $8, %xmm1, %xmm1
+; AVX2-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512F-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero,xmm3[8],zero,zero,zero,xmm3[9],zero,zero,zero,xmm3[10],zero,zero,zero,xmm3[11],zero,zero,zero,xmm3[12],zero,zero,zero,xmm3[13],zero,zero,zero,xmm3[14],zero,zero,zero,xmm3[15],zero,zero,zero
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpsllvd %zmm3, %zmm0, %zmm3
+; AVX512F-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512F-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512F-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512F-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm0, %zmm3, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VL-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero,xmm3[8],zero,zero,zero,xmm3[9],zero,zero,zero,xmm3[10],zero,zero,zero,xmm3[11],zero,zero,zero,xmm3[12],zero,zero,zero,xmm3[13],zero,zero,zero,xmm3[14],zero,zero,zero,xmm3[15],zero,zero,zero
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsllvd %zmm3, %zmm0, %zmm3
+; AVX512VL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512VL-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm0, %zmm3, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v16i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512BW-NEXT:    vpsllvw %zmm3, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v16i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLBW-NEXT:    vpsllvw %ymm3, %ymm0, %ymm3
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT:    vpsrlvw %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX512VLBW-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v16i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v16i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastb %xmm1, %xmm1
+; XOPAVX2-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; X32-SSE-NEXT:    psubb %xmm1, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm3
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psllw %xmm3, %xmm1
+; X32-SSE-NEXT:    pcmpeqd %xmm4, %xmm4
+; X32-SSE-NEXT:    pcmpeqd %xmm5, %xmm5
+; X32-SSE-NEXT:    psllw %xmm3, %xmm5
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm5[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
+; X32-SSE-NEXT:    pand %xmm3, %xmm1
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    psrlw %xmm2, %xmm0
+; X32-SSE-NEXT:    psrlw %xmm2, %xmm4
+; X32-SSE-NEXT:    psrlw $8, %xmm4
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm4[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; X32-SSE-NEXT:    pand %xmm0, %xmm2
+; X32-SSE-NEXT:    por %xmm2, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <16 x i8> %amt, <16 x i8> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> %splat)
+  ret <16 x i8> %res
+}
+
+;
+; Constant Shifts
+;
+
+define <2 x i64> @constant_funnnel_v2i64(<2 x i64> %x) nounwind {
+; SSE2-LABEL: constant_funnnel_v2i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    psrlq $60, %xmm1
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    psrlq $50, %xmm2
+; SSE2-NEXT:    movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    psllq $4, %xmm1
+; SSE2-NEXT:    psllq $14, %xmm0
+; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT:    orpd %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: constant_funnnel_v2i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlq $50, %xmm1
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    psrlq $60, %xmm2
+; SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psllq $14, %xmm1
+; SSE41-NEXT:    psllq $4, %xmm0
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: constant_funnnel_v2i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrlq $50, %xmm0, %xmm1
+; AVX1-NEXT:    vpsrlq $60, %xmm0, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT:    vpsllq $14, %xmm0, %xmm2
+; AVX1-NEXT:    vpsllq $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v2i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvq {{.*}}(%rip), %xmm0, %xmm1
+; AVX2-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,14]
+; AVX512F-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprolvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,14]
+; AVX512BW-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprolvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: constant_funnnel_v2i64:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotq {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psrlq $60, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    psrlq $50, %xmm2
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psllq $4, %xmm1
+; X32-SSE-NEXT:    psllq $14, %xmm0
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X32-SSE-NEXT:    orpd %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %x, <2 x i64> %x, <2 x i64> <i64 4, i64 14>)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind {
+; SSE2-LABEL: constant_funnnel_v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [16,32,64,128]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: constant_funnnel_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [16,32,64,128]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pmuludq %xmm2, %xmm3
+; SSE41-NEXT:    pmuludq %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: constant_funnnel_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [16,32,64,128]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm1
+; AVX2-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,5,6,7]
+; AVX512F-NEXT:    vprolvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprolvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,5,6,7]
+; AVX512BW-NEXT:    vprolvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprolvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: constant_funnnel_v4i32:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotd {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [16,32,64,128]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm1, %xmm0
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm2, %xmm1
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; X32-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X32-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-SSE-NEXT:    por %xmm3, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 4, i32 5, i32 6, i32 7>)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x) nounwind {
+; SSE-LABEL: constant_funnnel_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pmulhuw %xmm1, %xmm2
+; SSE-NEXT:    pmullw %xmm1, %xmm0
+; SSE-NEXT:    por %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: constant_funnnel_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; AVX-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; AVX512F-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; AVX512VL-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v8i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [16,15,14,13,12,11,10,9]
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7]
+; AVX512BW-NEXT:    vpsllvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v8i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %xmm0, %xmm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: constant_funnnel_v8i16:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotw {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    pmulhuw %xmm1, %xmm2
+; X32-SSE-NEXT:    pmullw %xmm1, %xmm0
+; X32-SSE-NEXT:    por %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x, <8 x i16> %x, <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x) nounwind {
+; SSE2-LABEL: constant_funnnel_v16i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    psrlw $8, %xmm2
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm3
+; SSE2-NEXT:    psrlw $8, %xmm3
+; SSE2-NEXT:    packuswb %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    packuswb %xmm1, %xmm0
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: constant_funnnel_v16i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT:    pand %xmm3, %xmm2
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
+; SSE41-NEXT:    pmullw %xmm1, %xmm4
+; SSE41-NEXT:    pand %xmm3, %xmm4
+; SSE41-NEXT:    packuswb %xmm2, %xmm4
+; SSE41-NEXT:    pxor %xmm2, %xmm2
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    psrlw $8, %xmm0
+; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    packuswb %xmm0, %xmm1
+; SSE41-NEXT:    por %xmm4, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: constant_funnnel_v16i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm3, %xmm4
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm0
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm3, %xmm2
+; AVX1-NEXT:    vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v16i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm1
+; AVX2-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpsrlvd {{.*}}(%rip), %zmm0, %zmm1
+; AVX512F-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VL-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v16i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7]
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1]
+; AVX512BW-NEXT:    vpsllvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v16i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %ymm0, %ymm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: constant_funnnel_v16i8:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotb {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pxor %xmm1, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    psrlw $8, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm3
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm3
+; X32-SSE-NEXT:    psrlw $8, %xmm3
+; X32-SSE-NEXT:    packuswb %xmm2, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT:    pand %xmm2, %xmm0
+; X32-SSE-NEXT:    packuswb %xmm1, %xmm0
+; X32-SSE-NEXT:    por %xmm3, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
+  ret <16 x i8> %res
+}
+
+;
+; Uniform Constant Shifts
+;
+
+define <2 x i64> @splatconstant_funnnel_v2i64(<2 x i64> %x) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v2i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrlq $50, %xmm1
+; SSE-NEXT:    psllq $14, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlq $50, %xmm0, %xmm1
+; AVX-NEXT:    vpsllq $14, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vprolq $14, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprolq $14, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vprolq $14, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprolq $14, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v2i64:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotq $14, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psrlq $50, %xmm1
+; X32-SSE-NEXT:    psllq $14, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %x, <2 x i64> %x, <2 x i64> <i64 14, i64 14>)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @splatconstant_funnnel_v4i32(<4 x i32> %x) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrld $28, %xmm1
+; SSE-NEXT:    pslld $4, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrld $28, %xmm0, %xmm1
+; AVX-NEXT:    vpslld $4, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vprold $4, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprold $4, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vprold $4, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprold $4, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v4i32:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotd $4, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psrld $28, %xmm1
+; X32-SSE-NEXT:    pslld $4, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 4, i32 4, i32 4, i32 4>)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @splatconstant_funnnel_v8i16(<8 x i16> %x) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrlw $9, %xmm1
+; SSE-NEXT:    psllw $7, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $9, %xmm0, %xmm1
+; AVX-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_funnnel_v8i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsrlw $9, %xmm0, %xmm1
+; AVX512-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX512-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v8i16:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotw $7, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psrlw $9, %xmm1
+; X32-SSE-NEXT:    psllw $7, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x, <8 x i16> %x, <8 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @splatconstant_funnnel_v16i8(<16 x i8> %x) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v16i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrlw $4, %xmm1
+; SSE-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE-NEXT:    psllw $4, %xmm0
+; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v16i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm1
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_funnnel_v16i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsrlw $4, %xmm0, %xmm1
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v16i8:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotb $4, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psrlw $4, %xmm1
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    psllw $4, %xmm0
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
+  ret <16 x i8> %res
+}
diff --git a/test/CodeGen/X86/vector-fshl-rot-256.ll b/test/CodeGen/X86/vector-fshl-rot-256.ll
new file mode 100644
index 0000000..b34b03c
--- /dev/null
+++ b/test/CodeGen/X86/vector-fshl-rot-256.ll
@@ -0,0 +1,1527 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2
+
+declare <4 x i64> @llvm.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>)
+declare <8 x i32> @llvm.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <16 x i16> @llvm.fshl.v16i16(<16 x i16>, <16 x i16>, <16 x i16>)
+declare <32 x i8> @llvm.fshl.v32i8(<32 x i8>, <32 x i8>, <32 x i8>)
+
+;
+; Variable Shifts
+;
+
+define <4 x i64> @var_funnnel_v4i64(<4 x i64> %x, <4 x i64> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm1, %ymm3
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT:    vpsllq %xmm4, %xmm2, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
+; AVX1-NEXT:    vpsllq %xmm4, %xmm2, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpsllq %xmm3, %xmm0, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; AVX1-NEXT:    vpsllq %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpsubq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [63,63]
+; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlq %xmm4, %xmm2, %xmm7
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
+; AVX1-NEXT:    vpsrlq %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpsubq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpand %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm4
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm0, %ymm3, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [63,63,63,63]
+; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm3
+; AVX2-NEXT:    vpsllvq %ymm3, %ymm0, %ymm3
+; AVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT:    vpsubq %ymm1, %ymm4, %ymm1
+; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlvq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprolvq %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprolvq %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vprotq %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vprotq %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %x, <4 x i64> %x, <4 x i64> %amt)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [31,31,31,31]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpslld $23, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT:    vpaddd %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vcvttps2dq %xmm2, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vpmuludq %xmm2, %xmm6, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm5[2,3],xmm6[4,5],xmm5[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,0,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7]
+; AVX1-NEXT:    vpor %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31]
+; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpsllvd %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [32,32,32,32,32,32,32,32]
+; AVX2-NEXT:    vpsubd %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vprolvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprolvd %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vprolvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprolvd %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vprotd %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vprotd %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %x, <8 x i32> %x, <8 x i32> %amt)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX1-NEXT:    vpslld $23, %xmm5, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT:    vpaddd %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vcvttps2dq %xmm5, %xmm5
+; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX1-NEXT:    vpslld $23, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddd %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vcvttps2dq %xmm2, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpmulhuw %xmm2, %xmm5, %xmm7
+; AVX1-NEXT:    vpmullw %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpor %xmm7, %xmm2, %xmm2
+; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; AVX1-NEXT:    vpslld $23, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddd %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vcvttps2dq %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT:    vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
+; AVX2-NEXT:    vpsllvd %ymm4, %ymm3, %ymm4
+; AVX2-NEXT:    vpsrld $16, %ymm4, %ymm4
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
+; AVX2-NEXT:    vpsllvd %ymm5, %ymm0, %ymm5
+; AVX2-NEXT:    vpsrld $16, %ymm5, %ymm5
+; AVX2-NEXT:    vpackusdw %ymm4, %ymm5, %ymm4
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm5 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %ymm1, %ymm5, %ymm1
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
+; AVX2-NEXT:    vpsrlvd %ymm5, %ymm3, %ymm3
+; AVX2-NEXT:    vpsrld $16, %ymm3, %ymm3
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
+; AVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrld $16, %ymm0, %ymm0
+; AVX2-NEXT:    vpackusdw %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm4, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v16i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT:    vpsllvd %zmm2, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm0, %zmm2, %zmm0
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT:    vpsllvd %zmm2, %zmm0, %zmm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm0, %zmm2, %zmm0
+; AVX512VL-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v16i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %ymm1, %ymm3, %ymm1
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v16i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsllvw %ymm1, %ymm0, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %ymm1, %ymm3, %ymm1
+; AVX512VLBW-NEXT:    vpsrlvw %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vprotw %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vprotw %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %x, <16 x i16> %x, <16 x i16> %amt)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpsrlw $4, %xmm2, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm8, %xmm3, %xmm3
+; AVX1-NEXT:    vpsllw $4, %xmm2, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT:    vpand %xmm9, %xmm5, %xmm5
+; AVX1-NEXT:    vpor %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT:    vpsllw $5, %xmm5, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $6, %xmm2, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm10 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX1-NEXT:    vpand %xmm10, %xmm3, %xmm3
+; AVX1-NEXT:    vpsllw $2, %xmm2, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm4
+; AVX1-NEXT:    vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $7, %xmm2, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm7
+; AVX1-NEXT:    vpor %xmm3, %xmm7, %xmm3
+; AVX1-NEXT:    vpaddb %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm3
+; AVX1-NEXT:    vpand %xmm8, %xmm3, %xmm3
+; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm4
+; AVX1-NEXT:    vpand %xmm9, %xmm4, %xmm4
+; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $6, %xmm0, %xmm3
+; AVX1-NEXT:    vpand %xmm10, %xmm3, %xmm3
+; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm4
+; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $7, %xmm0, %xmm3
+; AVX1-NEXT:    vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm4
+; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm3
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX2-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrlw $6, %ymm0, %ymm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX2-NEXT:    vpsrlw $7, %ymm0, %ymm3
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v32i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm3
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $6, %ymm0, %ymm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512F-NEXT:    vpsrlw $7, %ymm0, %ymm3
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $6, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %ymm2, %ymm1, %ymm3
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpsllvw %zmm3, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubb %ymm1, %ymm4, %ymm1
+; AVX512BW-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v32i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %ymm2, %ymm1, %ymm3
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLBW-NEXT:    vpsllvw %zmm3, %zmm0, %zmm3
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubb %ymm1, %ymm4, %ymm1
+; AVX512VLBW-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm0, %zmm3, %zmm0
+; AVX512VLBW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vprotb %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vprotb %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> %x, <32 x i8> %x, <32 x i8> %amt)
+  ret <32 x i8> %res
+}
+
+;
+; Uniform Variable Shifts
+;
+
+define <4 x i64> @splatvar_funnnel_v4i64(<4 x i64> %x, <4 x i64> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovddup {{.*#+}} xmm1 = xmm1[0,0]
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubq %xmm1, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpsrlq %xmm2, %xmm4, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[2,3,0,1]
+; AVX1-NEXT:    vpsrlq %xmm6, %xmm4, %xmm7
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-NEXT:    vpsrlq %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vpsrlq %xmm6, %xmm0, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
+; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpsllq %xmm1, %xmm4, %xmm3
+; AVX1-NEXT:    vpsllq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vpsllq %xmm3, %ymm0, %ymm3
+; AVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT:    vpsubq %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrlq %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX512F-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX512VL-NEXT:    vprolvq %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX512BW-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX512VLBW-NEXT:    vprolvq %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovddup {{.*#+}} xmm1 = xmm1[0,0]
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT:    vprotq %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastq %xmm1, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; XOPAVX2-NEXT:    vprotq %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <4 x i64> %amt, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %x, <4 x i64> %x, <4 x i64> %splat)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT:    vpslld %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [32,32,32,32]
+; AVX1-NEXT:    vpsubd %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT:    vpsrld %xmm1, %xmm2, %xmm2
+; AVX1-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpslld %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd %xmm1, %ymm1
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; AVX2-NEXT:    vpslld %xmm2, %ymm0, %ymm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX2-NEXT:    vpsrld %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastd %xmm1, %ymm1
+; AVX512F-NEXT:    vprolvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd %xmm1, %ymm1
+; AVX512VL-NEXT:    vprolvd %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastd %xmm1, %ymm1
+; AVX512BW-NEXT:    vprolvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd %xmm1, %ymm1
+; AVX512VLBW-NEXT:    vprolvd %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT:    vprotd %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastd %xmm1, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; XOPAVX2-NEXT:    vprotd %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <8 x i32> %amt, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %x, <8 x i32> %x, <8 x i32> %splat)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX1-NEXT:    vpsllw %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; AVX1-NEXT:    vpsubw %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm2, %xmm2
+; AVX1-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpsllw %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastw %xmm1, %ymm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm2, %ymm0, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_funnnel_v16i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastw %xmm1, %ymm1
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512-NEXT:    vpsllw %xmm2, %ymm0, %ymm2
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX512-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT:    vprotw %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastw %xmm1, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; XOPAVX2-NEXT:    vprotw %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <16 x i16> %amt, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %x, <16 x i16> %x, <16 x i16> %splat)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpsllw %xmm3, %xmm4, %xmm5
+; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpsllw %xmm3, %xmm6, %xmm7
+; AVX1-NEXT:    vpshufb %xmm2, %xmm7, %xmm2
+; AVX1-NEXT:    vpand %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX1-NEXT:    vpsubb %xmm1, %xmm7, %xmm1
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm6, %xmm6
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpsllw %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm2, %ymm0, %ymm3
+; AVX2-NEXT:    vpcmpeqd %ymm4, %ymm4, %ymm4
+; AVX2-NEXT:    vpsllw %xmm2, %ymm4, %ymm2
+; AVX2-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX2-NEXT:    vpand %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX2-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrlw %xmm1, %ymm4, %ymm1
+; AVX2-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX2-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v32i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm2, %ymm0, %ymm3
+; AVX512F-NEXT:    vpcmpeqd %ymm4, %ymm4, %ymm4
+; AVX512F-NEXT:    vpsllw %xmm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512F-NEXT:    vpand %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw %xmm1, %ymm4, %ymm1
+; AVX512F-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512F-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm2, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpcmpeqd %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpsllw %xmm2, %ymm4, %ymm2
+; AVX512VL-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512VL-NEXT:    vpand %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw %xmm1, %ymm4, %ymm1
+; AVX512VL-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512VL-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %ymm2, %ymm1, %ymm3
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpsllvw %zmm3, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubb %ymm1, %ymm4, %ymm1
+; AVX512BW-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v32i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %ymm2, %ymm1, %ymm3
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLBW-NEXT:    vpsllvw %zmm3, %zmm0, %zmm3
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubb %ymm1, %ymm4, %ymm1
+; AVX512VLBW-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm0, %zmm3, %zmm0
+; AVX512VLBW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT:    vprotb %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastb %xmm1, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; XOPAVX2-NEXT:    vprotb %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <32 x i8> %amt, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> %x, <32 x i8> %x, <32 x i8> %splat)
+  ret <32 x i8> %res
+}
+
+;
+; Constant Shifts
+;
+
+define <4 x i64> @constant_funnnel_v4i64(<4 x i64> %x) nounwind {
+; AVX1-LABEL: constant_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpsrlq $4, %xmm1, %xmm2
+; AVX1-NEXT:    vpsrlq $14, %xmm1, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpsrlq $50, %xmm0, %xmm3
+; AVX1-NEXT:    vpsrlq $60, %xmm0, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT:    vpsllq $60, %xmm1, %xmm3
+; AVX1-NEXT:    vpsllq $50, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpsllq $14, %xmm0, %xmm3
+; AVX1-NEXT:    vpsllq $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvq {{.*}}(%rip), %ymm0, %ymm1
+; AVX2-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,14,50,60]
+; AVX512F-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprolvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,14,50,60]
+; AVX512BW-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprolvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vprotq {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vprotq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vprotq {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT:    vprotq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %x, <4 x i64> %x, <4 x i64> <i64 4, i64 14, i64 50, i64 60>)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x) nounwind {
+; AVX1-LABEL: constant_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [256,512,1024,2048]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [16,32,64,128]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpmuludq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %ymm0, %ymm1
+; AVX2-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,5,6,7,8,9,10,11]
+; AVX512F-NEXT:    vprolvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprolvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,5,6,7,8,9,10,11]
+; AVX512BW-NEXT:    vprolvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprolvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vprotd {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vprotd {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vprotd {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT:    vprotd {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %x, <8 x i32> %x, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x) nounwind {
+; AVX1-LABEL: constant_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [256,512,1024,2048,4096,8192,16384,32768]
+; AVX1-NEXT:    vpmulhuw %xmm2, %xmm1, %xmm3
+; AVX1-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vpmulhuw %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX2-NEXT:    vpmulhuw %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v16i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX512F-NEXT:    vpmulhuw %ymm1, %ymm0, %ymm2
+; AVX512F-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX512VL-NEXT:    vpmulhuw %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v16i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512BW-NEXT:    vpsllvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v16i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %ymm0, %ymm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vprotw {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vprotw {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vprotw {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT:    vprotw {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %x, <16 x i16> %x, <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x) nounwind {
+; AVX1-LABEL: constant_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpxor %xmm8, %xmm8, %xmm8
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm9 = [256,128,64,32,16,8,4,2]
+; AVX1-NEXT:    vpmullw %xmm9, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $8, %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [256,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vpmullw %xmm6, %xmm5, %xmm7
+; AVX1-NEXT:    vpsrlw $8, %xmm7, %xmm7
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm7, %xmm3
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,128,64,32,16,8,4,2]
+; AVX1-NEXT:    vpmullw %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vpmullw %xmm4, %xmm5, %xmm5
+; AVX1-NEXT:    vpand %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpackuswb %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
+; AVX1-NEXT:    vpmullw %xmm9, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $8, %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT:    vpmullw %xmm6, %xmm5, %xmm6
+; AVX1-NEXT:    vpsrlw $8, %xmm6, %xmm6
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm7, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpmullw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsllw $2, %ymm1, %ymm3
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddb %ymm1, %ymm1, %ymm3
+; AVX2-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX2-NEXT:    vpackuswb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v32i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm1
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpsllw $2, %ymm1, %ymm3
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm3
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
+; AVX512F-NEXT:    vpmullw {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
+; AVX512F-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512F-NEXT:    vpackuswb %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm1
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm1
+; AVX512VL-NEXT:    vpsllw $2, %ymm1, %ymm3
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm3
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmullw {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v32i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
+; XOPAVX1-NEXT:    vprotb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vprotb %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
+; XOPAVX2-NEXT:    vprotb %xmm2, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vprotb %xmm2, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> %x, <32 x i8> %x, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
+  ret <32 x i8> %res
+}
+
+;
+; Uniform Constant Shifts
+;
+
+define <4 x i64> @splatconstant_funnnel_v4i64(<4 x i64> %x) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrlq $50, %xmm0, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpsrlq $50, %xmm2, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT:    vpsllq $14, %xmm0, %xmm0
+; AVX1-NEXT:    vpsllq $14, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlq $50, %ymm0, %ymm1
+; AVX2-NEXT:    vpsllq $14, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vprolq $14, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprolq $14, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vprolq $14, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprolq $14, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vprotq $14, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vprotq $14, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vprotq $14, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT:    vprotq $14, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %x, <4 x i64> %x, <4 x i64> <i64 14, i64 14, i64 14, i64 14>)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @splatconstant_funnnel_v8i32(<8 x i32> %x) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpsrld $28, %xmm1, %xmm2
+; AVX1-NEXT:    vpslld $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $28, %xmm0, %xmm2
+; AVX1-NEXT:    vpslld $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrld $28, %ymm0, %ymm1
+; AVX2-NEXT:    vpslld $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vprold $4, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprold $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vprold $4, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprold $4, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vprotd $4, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vprotd $4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vprotd $4, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT:    vprotd $4, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %x, <8 x i32> %x, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @splatconstant_funnnel_v16i16(<16 x i16> %x) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpsrlw $9, %xmm1, %xmm2
+; AVX1-NEXT:    vpsllw $7, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $9, %xmm0, %xmm2
+; AVX1-NEXT:    vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlw $9, %ymm0, %ymm1
+; AVX2-NEXT:    vpsllw $7, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_funnnel_v16i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsrlw $9, %ymm0, %ymm1
+; AVX512-NEXT:    vpsllw $7, %ymm0, %ymm0
+; AVX512-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vprotw $7, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vprotw $7, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vprotw $7, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT:    vprotw $7, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %x, <16 x i16> %x, <16 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @splatconstant_funnnel_v32i8(<32 x i8> %x) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsllw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm2
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_funnnel_v32i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsrlw $4, %ymm0, %ymm1
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vprotb $4, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vprotb $4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vprotb $4, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT:    vprotb $4, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> %x, <32 x i8> %x, <32 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
+  ret <32 x i8> %res
+}
diff --git a/test/CodeGen/X86/vector-fshl-rot-512.ll b/test/CodeGen/X86/vector-fshl-rot-512.ll
new file mode 100644
index 0000000..4d6654b
--- /dev/null
+++ b/test/CodeGen/X86/vector-fshl-rot-512.ll
@@ -0,0 +1,827 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW
+
+declare <8 x i64> @llvm.fshl.v8i64(<8 x i64>, <8 x i64>, <8 x i64>)
+declare <16 x i32> @llvm.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>)
+declare <32 x i16> @llvm.fshl.v32i16(<32 x i16>, <32 x i16>, <32 x i16>)
+declare <64 x i8> @llvm.fshl.v64i8(<64 x i8>, <64 x i8>, <64 x i8>)
+
+;
+; Variable Shifts
+;
+
+define <8 x i64> @var_funnnel_v8i64(<8 x i64> %x, <8 x i64> %amt) nounwind {
+; AVX512-LABEL: var_funnnel_v8i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %res = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x, <8 x i64> %x, <8 x i64> %amt)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @var_funnnel_v16i32(<16 x i32> %x, <16 x i32> %amt) nounwind {
+; AVX512-LABEL: var_funnnel_v16i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vprolvd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %res = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x, <16 x i32> %x, <16 x i32> %amt)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @var_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounwind {
+; AVX512F-LABEL: var_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm5 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT:    vpsllvd %zmm5, %zmm0, %zmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %ymm2, %ymm6, %ymm2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpsrlvd %zmm2, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm0, %zmm5, %zmm0
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm4, %ymm3, %ymm2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT:    vpsllvd %zmm3, %zmm1, %zmm3
+; AVX512F-NEXT:    vpsubw %ymm2, %ymm6, %ymm2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpsrlvd %zmm2, %zmm1, %zmm1
+; AVX512F-NEXT:    vpord %zmm1, %zmm3, %zmm1
+; AVX512F-NEXT:    vpmovdw %zmm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm5 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT:    vpsllvd %zmm5, %zmm0, %zmm5
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %ymm2, %ymm6, %ymm2
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT:    vpsrlvd %zmm2, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm0, %zmm5, %zmm0
+; AVX512VL-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm4, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT:    vpsllvd %zmm3, %zmm1, %zmm3
+; AVX512VL-NEXT:    vpsubw %ymm2, %ymm6, %ymm2
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT:    vpsrlvd %zmm2, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpord %zmm1, %zmm3, %zmm1
+; AVX512VL-NEXT:    vpmovdw %zmm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpandq %zmm2, %zmm1, %zmm3
+; AVX512BW-NEXT:    vpsllvw %zmm3, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubw %zmm1, %zmm4, %zmm1
+; AVX512BW-NEXT:    vpandq %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpandq %zmm2, %zmm1, %zmm3
+; AVX512VLBW-NEXT:    vpsllvw %zmm3, %zmm0, %zmm3
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubw %zmm1, %zmm4, %zmm1
+; AVX512VLBW-NEXT:    vpandq %zmm2, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm0, %zmm3, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %res = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %x, <32 x i16> %x, <32 x i16> %amt)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @var_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
+; AVX512F-LABEL: var_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT:    vpor %ymm4, %ymm6, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512F-NEXT:    vpand %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $6, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX512F-NEXT:    vpand %ymm8, %ymm4, %ymm4
+; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm9
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm10 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT:    vpand %ymm10, %ymm9, %ymm9
+; AVX512F-NEXT:    vpor %ymm4, %ymm9, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $7, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT:    vpand %ymm9, %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm11
+; AVX512F-NEXT:    vpor %ymm4, %ymm11, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm4
+; AVX512F-NEXT:    vpand %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsllw $5, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $6, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm8, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $2, %ymm1, %ymm4
+; AVX512F-NEXT:    vpand %ymm10, %ymm4, %ymm4
+; AVX512F-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $7, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm9, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm4
+; AVX512F-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm6
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpor %ymm4, %ymm6, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm6 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VL-NEXT:    vpand %ymm6, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $6, %ymm0, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm8 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX512VL-NEXT:    vpand %ymm8, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm9
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm10 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT:    vpand %ymm10, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpor %ymm4, %ymm9, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-NEXT:    vpand %ymm9, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm11
+; AVX512VL-NEXT:    vpor %ymm4, %ymm11, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpand %ymm7, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512VL-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsllw $5, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $6, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $2, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpand %ymm10, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $7, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm9, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-NEXT:    vpsubb %zmm1, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpsllw $5, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpaddb %zmm2, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpmovb2m %zmm4, %k1
+; AVX512BW-NEXT:    vpmovb2m %zmm2, %k2
+; AVX512BW-NEXT:    vpsrlw $4, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm2, %zmm2
+; AVX512BW-NEXT:    vpblendmb %zmm2, %zmm0, %zmm2 {%k2}
+; AVX512BW-NEXT:    vpsrlw $2, %zmm2, %zmm5
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512BW-NEXT:    vmovdqu8 %zmm5, %zmm2 {%k1}
+; AVX512BW-NEXT:    vpsrlw $1, %zmm2, %zmm5
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512BW-NEXT:    vpaddb %zmm4, %zmm4, %zmm4
+; AVX512BW-NEXT:    vpmovb2m %zmm4, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm5, %zmm2 {%k1}
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllw $5, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpaddb %zmm1, %zmm1, %zmm3
+; AVX512BW-NEXT:    vpmovb2m %zmm3, %k1
+; AVX512BW-NEXT:    vpmovb2m %zmm1, %k2
+; AVX512BW-NEXT:    vpsllw $4, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k2}
+; AVX512BW-NEXT:    vpsllw $2, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    vpaddb %zmm3, %zmm3, %zmm1
+; AVX512BW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT:    vpaddb %zmm0, %zmm0, %zmm0 {%k1}
+; AVX512BW-NEXT:    vporq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vpsubb %zmm1, %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpsllw $5, %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpaddb %zmm2, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpmovb2m %zmm4, %k1
+; AVX512VLBW-NEXT:    vpmovb2m %zmm2, %k2
+; AVX512VLBW-NEXT:    vpsrlw $4, %zmm0, %zmm2
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpblendmb %zmm2, %zmm0, %zmm2 {%k2}
+; AVX512VLBW-NEXT:    vpsrlw $2, %zmm2, %zmm5
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm5, %zmm2 {%k1}
+; AVX512VLBW-NEXT:    vpsrlw $1, %zmm2, %zmm5
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VLBW-NEXT:    vpaddb %zmm4, %zmm4, %zmm4
+; AVX512VLBW-NEXT:    vpmovb2m %zmm4, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm5, %zmm2 {%k1}
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllw $5, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpaddb %zmm1, %zmm1, %zmm3
+; AVX512VLBW-NEXT:    vpmovb2m %zmm3, %k1
+; AVX512VLBW-NEXT:    vpmovb2m %zmm1, %k2
+; AVX512VLBW-NEXT:    vpsllw $4, %zmm0, %zmm1
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k2}
+; AVX512VLBW-NEXT:    vpsllw $2, %zmm0, %zmm1
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    vpaddb %zmm3, %zmm3, %zmm1
+; AVX512VLBW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT:    vpaddb %zmm0, %zmm0, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    vporq %zmm2, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %res = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %x, <64 x i8> %x, <64 x i8> %amt)
+  ret <64 x i8> %res
+}
+
+;
+; Uniform Variable Shifts
+;
+
+define <8 x i64> @splatvar_funnnel_v8i64(<8 x i64> %x, <8 x i64> %amt) nounwind {
+; AVX512-LABEL: splatvar_funnnel_v8i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastq %xmm1, %zmm1
+; AVX512-NEXT:    vprolvq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %splat = shufflevector <8 x i64> %amt, <8 x i64> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x, <8 x i64> %x, <8 x i64> %splat)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %amt) nounwind {
+; AVX512-LABEL: splatvar_funnnel_v16i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastd %xmm1, %zmm1
+; AVX512-NEXT:    vprolvd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %splat = shufflevector <16 x i32> %amt, <16 x i32> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x, <16 x i32> %x, <16 x i32> %splat)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounwind {
+; AVX512F-LABEL: splatvar_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %xmm2, %xmm5, %xmm2
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm4, %ymm0
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm0, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %xmm2, %xmm5, %xmm2
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm4, %ymm0
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm1, %ymm3
+; AVX512VL-NEXT:    vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastw %xmm1, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsllw %xmm3, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubw %xmm1, %xmm4, %xmm1
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsrlw %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastw %xmm1, %zmm1
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsllw %xmm3, %zmm0, %zmm3
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubw %xmm1, %xmm4, %xmm1
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsrlw %xmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm0, %zmm3, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %splat = shufflevector <32 x i16> %amt, <32 x i16> undef, <32 x i32> zeroinitializer
+  %res = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %x, <32 x i16> %x, <32 x i16> %splat)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
+; AVX512F-LABEL: splatvar_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm0, %ymm4
+; AVX512F-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm5, %ymm6
+; AVX512F-NEXT:    vpbroadcastb %xmm6, %ymm6
+; AVX512F-NEXT:    vpand %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm2, %xmm7, %xmm2
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw %xmm2, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512F-NEXT:    vpbroadcastb %xmm5, %ymm5
+; AVX512F-NEXT:    vpand %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm4, %ymm0
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm5, %ymm6
+; AVX512VL-NEXT:    vpbroadcastb %xmm6, %ymm6
+; AVX512VL-NEXT:    vpand %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm2, %xmm7, %xmm2
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw %xmm2, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpbroadcastb %xmm5, %ymm5
+; AVX512VL-NEXT:    vpand %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm4, %ymm0
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm1, %ymm3
+; AVX512VL-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpand %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastb %xmm1, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512BW-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vpsllw %xmm3, %zmm0, %zmm4
+; AVX512BW-NEXT:    vpternlogd $255, %zmm5, %zmm5, %zmm5
+; AVX512BW-NEXT:    vpsllw %xmm3, %zmm5, %zmm3
+; AVX512BW-NEXT:    vpbroadcastb %xmm3, %zmm3
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vpsrlw %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpsrlw %xmm1, %zmm5, %zmm1
+; AVX512BW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpbroadcastb %xmm1, %zmm1
+; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm1, %zmm1
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512VLBW-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VLBW-NEXT:    vpsllw %xmm3, %zmm0, %zmm4
+; AVX512VLBW-NEXT:    vpternlogd $255, %zmm5, %zmm5, %zmm5
+; AVX512VLBW-NEXT:    vpsllw %xmm3, %zmm5, %zmm3
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm3, %zmm3
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm4, %zmm3
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VLBW-NEXT:    vpsrlw %xmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpsrlw %xmm1, %zmm5, %zmm1
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm1, %zmm1
+; AVX512VLBW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm0, %zmm3, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %splat = shufflevector <64 x i8> %amt, <64 x i8> undef, <64 x i32> zeroinitializer
+  %res = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %x, <64 x i8> %x, <64 x i8> %splat)
+  ret <64 x i8> %res
+}
+
+;
+; Constant Shifts
+;
+
+define <8 x i64> @constant_funnnel_v8i64(<8 x i64> %x) nounwind {
+; AVX512-LABEL: constant_funnnel_v8i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vprolvq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %res = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x, <8 x i64> %x, <8 x i64> <i64 4, i64 14, i64 50, i64 60, i64 4, i64 14, i64 50, i64 60>)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @constant_funnnel_v16i32(<16 x i32> %x) nounwind {
+; AVX512-LABEL: constant_funnnel_v16i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vprolvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %res = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x, <16 x i32> %x, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @constant_funnnel_v32i16(<32 x i16> %x) nounwind {
+; AVX512F-LABEL: constant_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768>
+; AVX512F-NEXT:    vpmulhuw %ymm2, %ymm0, %ymm3
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm4 = ymm0[0],ymm3[1,2,3,4,5,6,7],ymm0[8],ymm3[9,10,11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmulhuw %ymm2, %ymm1, %ymm2
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0],ymm2[1,2,3,4,5,6,7],ymm1[8],ymm2[9,10,11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768>
+; AVX512VL-NEXT:    vpmulhuw %ymm2, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpblendw {{.*#+}} ymm4 = ymm0[0],ymm3[1,2,3,4,5,6,7],ymm0[8],ymm3[9,10,11,12,13,14,15]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX512VL-NEXT:    vpmullw %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpmulhuw %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0],ymm2[1,2,3,4,5,6,7],ymm1[8],ymm2[9,10,11,12,13,14,15]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512VL-NEXT:    vpmullw %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %res = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %x, <32 x i16> %x, <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x) nounwind {
+; AVX512F-LABEL: constant_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpsllw $2, %ymm2, %ymm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT:    vpand %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT:    vpaddb %ymm4, %ymm4, %ymm7
+; AVX512F-NEXT:    vpblendvb %ymm7, %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm5
+; AVX512F-NEXT:    vpaddb %ymm7, %ymm7, %ymm8
+; AVX512F-NEXT:    vpblendvb %ymm8, %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm9 = ymm0[8],ymm5[8],ymm0[9],ymm5[9],ymm0[10],ymm5[10],ymm0[11],ymm5[11],ymm0[12],ymm5[12],ymm0[13],ymm5[13],ymm0[14],ymm5[14],ymm0[15],ymm5[15],ymm0[24],ymm5[24],ymm0[25],ymm5[25],ymm0[26],ymm5[26],ymm0[27],ymm5[27],ymm0[28],ymm5[28],ymm0[29],ymm5[29],ymm0[30],ymm5[30],ymm0[31],ymm5[31]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
+; AVX512F-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpmullw %ymm10, %ymm9, %ymm9
+; AVX512F-NEXT:    vpsrlw $8, %ymm9, %ymm9
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[1],ymm5[1],ymm0[2],ymm5[2],ymm0[3],ymm5[3],ymm0[4],ymm5[4],ymm0[5],ymm5[5],ymm0[6],ymm5[6],ymm0[7],ymm5[7],ymm0[16],ymm5[16],ymm0[17],ymm5[17],ymm0[18],ymm5[18],ymm0[19],ymm5[19],ymm0[20],ymm5[20],ymm0[21],ymm5[21],ymm0[22],ymm5[22],ymm0[23],ymm5[23]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128]
+; AVX512F-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpmullw %ymm11, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512F-NEXT:    vpackuswb %ymm9, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm2
+; AVX512F-NEXT:    vpsllw $2, %ymm2, %ymm3
+; AVX512F-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm7, %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm8, %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm5[8],ymm1[9],ymm5[9],ymm1[10],ymm5[10],ymm1[11],ymm5[11],ymm1[12],ymm5[12],ymm1[13],ymm5[13],ymm1[14],ymm5[14],ymm1[15],ymm5[15],ymm1[24],ymm5[24],ymm1[25],ymm5[25],ymm1[26],ymm5[26],ymm1[27],ymm5[27],ymm1[28],ymm5[28],ymm1[29],ymm5[29],ymm1[30],ymm5[30],ymm1[31],ymm5[31]
+; AVX512F-NEXT:    vpmullw %ymm10, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm5[0],ymm1[1],ymm5[1],ymm1[2],ymm5[2],ymm1[3],ymm5[3],ymm1[4],ymm5[4],ymm1[5],ymm5[5],ymm1[6],ymm5[6],ymm1[7],ymm5[7],ymm1[16],ymm5[16],ymm1[17],ymm5[17],ymm1[18],ymm5[18],ymm1[19],ymm5[19],ymm1[20],ymm5[20],ymm1[21],ymm5[21],ymm1[22],ymm5[22],ymm1[23],ymm5[23]
+; AVX512F-NEXT:    vpmullw %ymm11, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512F-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpsllw $2, %ymm2, %ymm5
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT:    vpand %ymm6, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm7
+; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm7, %ymm7, %ymm8
+; AVX512VL-NEXT:    vpblendvb %ymm8, %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
+; AVX512VL-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpmullw %ymm9, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128]
+; AVX512VL-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpmullw %ymm10, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpackuswb %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpsllw $2, %ymm2, %ymm3
+; AVX512VL-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm8, %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpmullw %ymm9, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpmullw %ymm10, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512BW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT:    vpsllw $4, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm2, %zmm2
+; AVX512BW-NEXT:    vpblendmb %zmm2, %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT:    vpsllw $2, %zmm2, %zmm3
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512BW-NEXT:    vpaddb %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm3, %zmm2 {%k1}
+; AVX512BW-NEXT:    vpaddb %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT:    vpaddb %zmm2, %zmm2, %zmm2 {%k1}
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512BW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512BW-NEXT:    vpsrlw $8, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vpsrlw $8, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm0, %zmm2, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512VLBW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT:    vpsllw $4, %zmm0, %zmm2
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpblendmb %zmm2, %zmm0, %zmm2 {%k1}
+; AVX512VLBW-NEXT:    vpsllw $2, %zmm2, %zmm3
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLBW-NEXT:    vpaddb %zmm1, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm3, %zmm2 {%k1}
+; AVX512VLBW-NEXT:    vpaddb %zmm1, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT:    vpaddb %zmm2, %zmm2, %zmm2 {%k1}
+; AVX512VLBW-NEXT:    vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm0, %zmm2, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %res = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %x, <64 x i8> %x, <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
+  ret <64 x i8> %res
+}
+
+;
+; Uniform Constant Shifts
+;
+
+define <8 x i64> @splatconstant_funnnel_v8i64(<8 x i64> %x) nounwind {
+; AVX512-LABEL: splatconstant_funnnel_v8i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vprolq $14, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %res = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x, <8 x i64> %x, <8 x i64> <i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14>)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @splatconstant_funnnel_v16i32(<16 x i32> %x) nounwind {
+; AVX512-LABEL: splatconstant_funnnel_v16i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vprold $4, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %res = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x, <16 x i32> %x, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @splatconstant_funnnel_v32i16(<32 x i16> %x) nounwind {
+; AVX512F-LABEL: splatconstant_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $9, %ymm0, %ymm2
+; AVX512F-NEXT:    vpsllw $7, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $9, %ymm1, %ymm2
+; AVX512F-NEXT:    vpsllw $7, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $9, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpsllw $7, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $9, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpsllw $7, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlw $9, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpsllw $7, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlw $9, %zmm0, %zmm1
+; AVX512VLBW-NEXT:    vpsllw $7, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %res = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %x, <32 x i16> %x, <32 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @splatconstant_funnnel_v64i8(<64 x i8> %x) nounwind {
+; AVX512F-LABEL: splatconstant_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlw $4, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllw $4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlw $4, %zmm0, %zmm1
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllw $4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %res = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %x, <64 x i8> %x, <64 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
+  ret <64 x i8> %res
+}
diff --git a/test/CodeGen/X86/vector-fshr-128.ll b/test/CodeGen/X86/vector-fshr-128.ll
new file mode 100644
index 0000000..1f70fc9
--- /dev/null
+++ b/test/CodeGen/X86/vector-fshr-128.ll
@@ -0,0 +1,3070 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefixes=AVX512,AVX512VBMI2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVBMI2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2
+
+; Just one 32-bit run to make sure we do reasonable things for i64 cases.
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X32-SSE,X32-SSE2
+
+declare <2 x i64> @llvm.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <8 x i16> @llvm.fshr.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
+declare <16 x i8> @llvm.fshr.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)
+
+;
+; Variable Shifts
+;
+
+define <2 x i64> @var_funnnel_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v2i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    psrlq %xmm2, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    psrlq %xmm4, %xmm5
+; SSE2-NEXT:    movsd {{.*#+}} xmm5 = xmm3[0],xmm5[1]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [64,64]
+; SSE2-NEXT:    psubq %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    psllq %xmm3, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; SSE2-NEXT:    psllq %xmm3, %xmm0
+; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSE2-NEXT:    orpd %xmm5, %xmm0
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    pcmpeqd %xmm2, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,0,3,2]
+; SSE2-NEXT:    pand %xmm3, %xmm2
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    pandn %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v2i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    psrlq %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    psrlq %xmm4, %xmm5
+; SSE41-NEXT:    pblendw {{.*#+}} xmm5 = xmm0[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [64,64]
+; SSE41-NEXT:    psubq %xmm2, %xmm0
+; SSE41-NEXT:    movdqa %xmm3, %xmm4
+; SSE41-NEXT:    psllq %xmm0, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT:    psllq %xmm0, %xmm3
+; SSE41-NEXT:    pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; SSE41-NEXT:    por %xmm5, %xmm3
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pcmpeqq %xmm2, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
+; SSE41-NEXT:    movapd %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: var_funnnel_v2i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlq %xmm2, %xmm1, %xmm3
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
+; AVX1-NEXT:    vpsrlq %xmm4, %xmm1, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; AVX1-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; AVX1-NEXT:    vpsllq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
+; AVX1-NEXT:    vpsllq %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v2i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpsrlvq %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; AVX2-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpsllvq %xmm4, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512F-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512F-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512F-NEXT:    vpsrlvq %xmm4, %xmm1, %xmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512F-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpsllvq %xmm4, %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512F-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512VL-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VL-NEXT:    vpsrlvq %xmm4, %xmm1, %xmm5
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VL-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpsllvq %xmm4, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VL-NEXT:    vptestnmq %xmm3, %xmm2, %k1
+; AVX512VL-NEXT:    vmovdqa64 %xmm1, %xmm0 {%k1}
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512BW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512BW-NEXT:    vpsrlvq %xmm4, %xmm1, %xmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512BW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpsllvq %xmm4, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512BW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v2i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512VBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VBMI2-NEXT:    vpsrlvq %xmm4, %xmm1, %xmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VBMI2-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpsllvq %xmm4, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpsrlvq %xmm4, %xmm1, %xmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VLBW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpsllvq %xmm4, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vptestnmq %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa64 %xmm1, %xmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v2i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvq %xmm2, %xmm0, %xmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v2i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm4
+; XOPAVX1-NEXT:    vpshlq %xmm4, %xmm1, %xmm4
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [64,64]
+; XOPAVX1-NEXT:    vpsubq %xmm2, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpshlq %xmm5, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpcomeqq %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v2i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsrlvq %xmm2, %xmm1, %xmm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; XOPAVX2-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpsllvq %xmm4, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcomeqq %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm3
+; X32-SSE-NEXT:    psrlq %xmm2, %xmm3
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm5
+; X32-SSE-NEXT:    psrlq %xmm4, %xmm5
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm5 = xmm3[0],xmm5[1]
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm3 = [64,0,64,0]
+; X32-SSE-NEXT:    psubq %xmm2, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm4
+; X32-SSE-NEXT:    psllq %xmm3, %xmm4
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; X32-SSE-NEXT:    psllq %xmm3, %xmm0
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; X32-SSE-NEXT:    orpd %xmm5, %xmm0
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    pcmpeqd %xmm2, %xmm3
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,0,3,2]
+; X32-SSE-NEXT:    pand %xmm3, %xmm2
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    pandn %xmm0, %xmm2
+; X32-SSE-NEXT:    por %xmm1, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %amt)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @var_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    psrld %xmm3, %xmm4
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm5 = xmm2[0,1,1,1,4,5,6,7]
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    psrld %xmm5, %xmm3
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm5 = xmm4[2,3,3,3,4,5,6,7]
+; SSE2-NEXT:    movdqa %xmm1, %xmm6
+; SSE2-NEXT:    psrld %xmm5, %xmm6
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,1,4,5,6,7]
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    psrld %xmm4, %xmm5
+; SSE2-NEXT:    punpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1]
+; SSE2-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,3],xmm5[0,3]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [32,32,32,32]
+; SSE2-NEXT:    psubd %xmm2, %xmm4
+; SSE2-NEXT:    pslld $23, %xmm4
+; SSE2-NEXT:    paddd {{.*}}(%rip), %xmm4
+; SSE2-NEXT:    cvttps2dq %xmm4, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm4, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm5, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
+; SSE2-NEXT:    por %xmm3, %xmm6
+; SSE2-NEXT:    pxor %xmm0, %xmm0
+; SSE2-NEXT:    pcmpeqd %xmm2, %xmm0
+; SSE2-NEXT:    pand %xmm0, %xmm1
+; SSE2-NEXT:    pandn %xmm6, %xmm0
+; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    pshuflw {{.*#+}} xmm0 = xmm2[2,3,3,3,4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm1, %xmm4
+; SSE41-NEXT:    psrld %xmm0, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
+; SSE41-NEXT:    pshuflw {{.*#+}} xmm5 = xmm0[2,3,3,3,4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm1, %xmm6
+; SSE41-NEXT:    psrld %xmm5, %xmm6
+; SSE41-NEXT:    pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT:    pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,1,4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    psrld %xmm4, %xmm5
+; SSE41-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,1,4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm1, %xmm4
+; SSE41-NEXT:    psrld %xmm0, %xmm4
+; SSE41-NEXT:    pblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
+; SSE41-NEXT:    pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm6[2,3],xmm4[4,5],xmm6[6,7]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [32,32,32,32]
+; SSE41-NEXT:    psubd %xmm2, %xmm0
+; SSE41-NEXT:    pslld $23, %xmm0
+; SSE41-NEXT:    paddd {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    cvttps2dq %xmm0, %xmm0
+; SSE41-NEXT:    pmulld %xmm0, %xmm3
+; SSE41-NEXT:    por %xmm4, %xmm3
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm2, %xmm0
+; SSE41-NEXT:    blendvps %xmm0, %xmm1, %xmm3
+; SSE41-NEXT:    movaps %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: var_funnnel_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrld %xmm3, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrlq $32, %xmm2, %xmm4
+; AVX1-NEXT:    vpsrld %xmm4, %xmm1, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX1-NEXT:    vpsrld %xmm5, %xmm1, %xmm5
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT:    vpsrld %xmm6, %xmm1, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3],xmm5[4,5],xmm3[6,7]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [32,32,32,32]
+; AVX1-NEXT:    vpsubd %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpslld $23, %xmm5, %xmm5
+; AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm5, %xmm5
+; AVX1-NEXT:    vcvttps2dq %xmm5, %xmm5
+; AVX1-NEXT:    vpmulld %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpcmpeqd %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX2-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpsrlvd %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpsllvd %xmm4, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512F-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512F-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512F-NEXT:    vpsrlvd %xmm4, %xmm1, %xmm5
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512F-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpsllvd %xmm4, %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512F-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512VL-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VL-NEXT:    vpsrlvd %xmm4, %xmm1, %xmm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VL-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpsllvd %xmm4, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VL-NEXT:    vptestnmd %xmm3, %xmm2, %k1
+; AVX512VL-NEXT:    vmovdqa32 %xmm1, %xmm0 {%k1}
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512BW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512BW-NEXT:    vpsrlvd %xmm4, %xmm1, %xmm5
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512BW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpsllvd %xmm4, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512BW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v4i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512VBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VBMI2-NEXT:    vpsrlvd %xmm4, %xmm1, %xmm5
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VBMI2-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpsllvd %xmm4, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpsrlvd %xmm4, %xmm1, %xmm5
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VLBW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpsllvd %xmm4, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vptestnmd %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa32 %xmm1, %xmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v4i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvd %xmm2, %xmm0, %xmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v4i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpsubd %xmm2, %xmm3, %xmm4
+; XOPAVX1-NEXT:    vpshld %xmm4, %xmm1, %xmm4
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [32,32,32,32]
+; XOPAVX1-NEXT:    vpsubd %xmm2, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpshld %xmm5, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpcomeqd %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v4i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; XOPAVX2-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsrlvd %xmm2, %xmm1, %xmm3
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
+; XOPAVX2-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpsllvd %xmm4, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcomeqd %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm4
+; X32-SSE-NEXT:    psrld %xmm3, %xmm4
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm2[0,1,1,1,4,5,6,7]
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm3
+; X32-SSE-NEXT:    psrld %xmm5, %xmm3
+; X32-SSE-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm4[2,3,3,3,4,5,6,7]
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm6
+; X32-SSE-NEXT:    psrld %xmm5, %xmm6
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,1,4,5,6,7]
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm5
+; X32-SSE-NEXT:    psrld %xmm4, %xmm5
+; X32-SSE-NEXT:    punpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1]
+; X32-SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,3],xmm5[0,3]
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [32,32,32,32]
+; X32-SSE-NEXT:    psubd %xmm2, %xmm4
+; X32-SSE-NEXT:    pslld $23, %xmm4
+; X32-SSE-NEXT:    paddd {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT:    cvttps2dq %xmm4, %xmm4
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm4, %xmm0
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[0,2,2,3]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm5, %xmm0
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X32-SSE-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
+; X32-SSE-NEXT:    por %xmm3, %xmm6
+; X32-SSE-NEXT:    pxor %xmm0, %xmm0
+; X32-SSE-NEXT:    pcmpeqd %xmm2, %xmm0
+; X32-SSE-NEXT:    pand %xmm0, %xmm1
+; X32-SSE-NEXT:    pandn %xmm6, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %amt)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    psllw $12, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm3
+; SSE2-NEXT:    psraw $15, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    psrlw $8, %xmm5
+; SSE2-NEXT:    pand %xmm3, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm3
+; SSE2-NEXT:    por %xmm5, %xmm3
+; SSE2-NEXT:    paddw %xmm4, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm5
+; SSE2-NEXT:    psraw $15, %xmm5
+; SSE2-NEXT:    movdqa %xmm5, %xmm6
+; SSE2-NEXT:    pandn %xmm3, %xmm6
+; SSE2-NEXT:    psrlw $4, %xmm3
+; SSE2-NEXT:    pand %xmm5, %xmm3
+; SSE2-NEXT:    por %xmm6, %xmm3
+; SSE2-NEXT:    paddw %xmm4, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm5
+; SSE2-NEXT:    psraw $15, %xmm5
+; SSE2-NEXT:    movdqa %xmm5, %xmm6
+; SSE2-NEXT:    pandn %xmm3, %xmm6
+; SSE2-NEXT:    psrlw $2, %xmm3
+; SSE2-NEXT:    pand %xmm5, %xmm3
+; SSE2-NEXT:    por %xmm6, %xmm3
+; SSE2-NEXT:    paddw %xmm4, %xmm4
+; SSE2-NEXT:    psraw $15, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm5
+; SSE2-NEXT:    pandn %xmm3, %xmm5
+; SSE2-NEXT:    psrlw $1, %xmm3
+; SSE2-NEXT:    pand %xmm4, %xmm3
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; SSE2-NEXT:    psubw %xmm2, %xmm4
+; SSE2-NEXT:    pxor %xmm8, %xmm8
+; SSE2-NEXT:    movdqa %xmm4, %xmm7
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; SSE2-NEXT:    pslld $23, %xmm7
+; SSE2-NEXT:    movdqa {{.*#+}} xmm6 = [1065353216,1065353216,1065353216,1065353216]
+; SSE2-NEXT:    paddd %xmm6, %xmm7
+; SSE2-NEXT:    cvttps2dq %xmm7, %xmm7
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm7 = xmm7[0,2,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
+; SSE2-NEXT:    pslld $23, %xmm4
+; SSE2-NEXT:    paddd %xmm6, %xmm4
+; SSE2-NEXT:    cvttps2dq %xmm4, %xmm4
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm7[0]
+; SSE2-NEXT:    pmullw %xmm0, %xmm4
+; SSE2-NEXT:    por %xmm5, %xmm4
+; SSE2-NEXT:    por %xmm3, %xmm4
+; SSE2-NEXT:    pcmpeqw %xmm8, %xmm2
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    pandn %xmm4, %xmm2
+; SSE2-NEXT:    por %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v8i16:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm8
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; SSE41-NEXT:    psubw %xmm2, %xmm5
+; SSE41-NEXT:    pxor %xmm4, %xmm4
+; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm6 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
+; SSE41-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; SSE41-NEXT:    pcmpeqw %xmm2, %xmm4
+; SSE41-NEXT:    psllw $12, %xmm2
+; SSE41-NEXT:    psllw $4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    paddw %xmm0, %xmm2
+; SSE41-NEXT:    movdqa %xmm1, %xmm7
+; SSE41-NEXT:    psrlw $8, %xmm7
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    pblendvb %xmm0, %xmm7, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm7
+; SSE41-NEXT:    psrlw $4, %xmm7
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm7, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm7
+; SSE41-NEXT:    psrlw $2, %xmm7
+; SSE41-NEXT:    paddw %xmm2, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm7, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm7
+; SSE41-NEXT:    psrlw $1, %xmm7
+; SSE41-NEXT:    paddw %xmm2, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm7, %xmm3
+; SSE41-NEXT:    pslld $23, %xmm5
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [1065353216,1065353216,1065353216,1065353216]
+; SSE41-NEXT:    paddd %xmm0, %xmm5
+; SSE41-NEXT:    cvttps2dq %xmm5, %xmm2
+; SSE41-NEXT:    pslld $23, %xmm6
+; SSE41-NEXT:    paddd %xmm0, %xmm6
+; SSE41-NEXT:    cvttps2dq %xmm6, %xmm0
+; SSE41-NEXT:    packusdw %xmm2, %xmm0
+; SSE41-NEXT:    pmullw %xmm0, %xmm8
+; SSE41-NEXT:    por %xmm3, %xmm8
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm1, %xmm8
+; SSE41-NEXT:    movdqa %xmm8, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: var_funnnel_v8i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpsllw $12, %xmm2, %xmm3
+; AVX1-NEXT:    vpsllw $4, %xmm2, %xmm4
+; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpaddw %xmm3, %xmm3, %xmm4
+; AVX1-NEXT:    vpsrlw $8, %xmm1, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm3, %xmm5, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrlw $4, %xmm3, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm4, %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $2, %xmm3, %xmm5
+; AVX1-NEXT:    vpaddw %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpblendvb %xmm4, %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $1, %xmm3, %xmm5
+; AVX1-NEXT:    vpaddw %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpblendvb %xmm4, %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX1-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX1-NEXT:    vpslld $23, %xmm6, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT:    vpaddd %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vcvttps2dq %xmm6, %xmm6
+; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX1-NEXT:    vpslld $23, %xmm4, %xmm4
+; AVX1-NEXT:    vpaddd %xmm7, %xmm4, %xmm4
+; AVX1-NEXT:    vcvttps2dq %xmm4, %xmm4
+; AVX1-NEXT:    vpackusdw %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpmullw %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpcmpeqw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX2-NEXT:    vpsrlvd %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm2, %xmm5, %xmm5
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    vpsllvd %ymm5, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX512F-NEXT:    vpsrlvd %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512F-NEXT:    vpsllvd %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX512VL-NEXT:    vpsrlvd %ymm4, %ymm3, %ymm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512VL-NEXT:    vpsllvd %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v8i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512BW-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpsllvw %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512BW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v8i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX512VBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512VBMI2-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpsllvw %zmm4, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v8i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpsrlvw %xmm4, %xmm1, %xmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpsllvw %xmm4, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vptestnmw %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu16 %xmm1, %xmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v8i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvw %xmm2, %xmm0, %xmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOP-LABEL: var_funnnel_v8i16:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOP-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOP-NEXT:    vpsubw %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpshlw %xmm4, %xmm1, %xmm4
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; XOP-NEXT:    vpsubw %xmm2, %xmm5, %xmm5
+; XOP-NEXT:    vpshlw %xmm5, %xmm0, %xmm0
+; XOP-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; XOP-NEXT:    vpcomeqw %xmm3, %xmm2, %xmm2
+; XOP-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    subl $28, %esp
+; X32-SSE-NEXT:    movups %xmm0, (%esp) # 16-byte Spill
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm4
+; X32-SSE-NEXT:    psllw $12, %xmm4
+; X32-SSE-NEXT:    movdqa %xmm4, %xmm3
+; X32-SSE-NEXT:    psraw $15, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm5
+; X32-SSE-NEXT:    psrlw $8, %xmm5
+; X32-SSE-NEXT:    pand %xmm3, %xmm5
+; X32-SSE-NEXT:    pandn %xmm1, %xmm3
+; X32-SSE-NEXT:    por %xmm5, %xmm3
+; X32-SSE-NEXT:    paddw %xmm4, %xmm4
+; X32-SSE-NEXT:    movdqa %xmm4, %xmm5
+; X32-SSE-NEXT:    psraw $15, %xmm5
+; X32-SSE-NEXT:    movdqa %xmm5, %xmm6
+; X32-SSE-NEXT:    pandn %xmm3, %xmm6
+; X32-SSE-NEXT:    psrlw $4, %xmm3
+; X32-SSE-NEXT:    pand %xmm5, %xmm3
+; X32-SSE-NEXT:    por %xmm6, %xmm3
+; X32-SSE-NEXT:    paddw %xmm4, %xmm4
+; X32-SSE-NEXT:    movdqa %xmm4, %xmm5
+; X32-SSE-NEXT:    psraw $15, %xmm5
+; X32-SSE-NEXT:    movdqa %xmm5, %xmm6
+; X32-SSE-NEXT:    pandn %xmm3, %xmm6
+; X32-SSE-NEXT:    psrlw $2, %xmm3
+; X32-SSE-NEXT:    pand %xmm5, %xmm3
+; X32-SSE-NEXT:    por %xmm6, %xmm3
+; X32-SSE-NEXT:    paddw %xmm4, %xmm4
+; X32-SSE-NEXT:    psraw $15, %xmm4
+; X32-SSE-NEXT:    movdqa %xmm4, %xmm5
+; X32-SSE-NEXT:    pandn %xmm3, %xmm5
+; X32-SSE-NEXT:    psrlw $1, %xmm3
+; X32-SSE-NEXT:    pand %xmm4, %xmm3
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; X32-SSE-NEXT:    psubw %xmm2, %xmm4
+; X32-SSE-NEXT:    pxor %xmm6, %xmm6
+; X32-SSE-NEXT:    movdqa %xmm4, %xmm7
+; X32-SSE-NEXT:    punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; X32-SSE-NEXT:    pslld $23, %xmm7
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm0 = [1065353216,1065353216,1065353216,1065353216]
+; X32-SSE-NEXT:    paddd %xmm0, %xmm7
+; X32-SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
+; X32-SSE-NEXT:    pslld $23, %xmm4
+; X32-SSE-NEXT:    paddd %xmm0, %xmm4
+; X32-SSE-NEXT:    cvttps2dq %xmm7, %xmm0
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X32-SSE-NEXT:    cvttps2dq %xmm4, %xmm4
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; X32-SSE-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
+; X32-SSE-NEXT:    movdqu (%esp), %xmm0 # 16-byte Reload
+; X32-SSE-NEXT:    pmullw %xmm0, %xmm4
+; X32-SSE-NEXT:    por %xmm5, %xmm4
+; X32-SSE-NEXT:    por %xmm3, %xmm4
+; X32-SSE-NEXT:    pcmpeqw %xmm6, %xmm2
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    pandn %xmm4, %xmm2
+; X32-SSE-NEXT:    por %xmm1, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm0
+; X32-SSE-NEXT:    addl $28, %esp
+; X32-SSE-NEXT:    retl
+  %res = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v16i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm5
+; SSE2-NEXT:    psllw $5, %xmm5
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    pxor %xmm6, %xmm6
+; SSE2-NEXT:    pcmpgtb %xmm5, %xmm6
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    psrlw $4, %xmm4
+; SSE2-NEXT:    pand %xmm6, %xmm4
+; SSE2-NEXT:    pandn %xmm1, %xmm6
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT:    por %xmm6, %xmm4
+; SSE2-NEXT:    paddb %xmm5, %xmm5
+; SSE2-NEXT:    pxor %xmm6, %xmm6
+; SSE2-NEXT:    pcmpgtb %xmm5, %xmm6
+; SSE2-NEXT:    movdqa %xmm6, %xmm7
+; SSE2-NEXT:    pandn %xmm4, %xmm7
+; SSE2-NEXT:    psrlw $2, %xmm4
+; SSE2-NEXT:    pand %xmm6, %xmm4
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT:    por %xmm7, %xmm4
+; SSE2-NEXT:    paddb %xmm5, %xmm5
+; SSE2-NEXT:    pxor %xmm6, %xmm6
+; SSE2-NEXT:    pcmpgtb %xmm5, %xmm6
+; SSE2-NEXT:    movdqa %xmm6, %xmm5
+; SSE2-NEXT:    pandn %xmm4, %xmm5
+; SSE2-NEXT:    psrlw $1, %xmm4
+; SSE2-NEXT:    pand %xmm6, %xmm4
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT:    por %xmm5, %xmm4
+; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE2-NEXT:    psubb %xmm2, %xmm5
+; SSE2-NEXT:    psllw $5, %xmm5
+; SSE2-NEXT:    pxor %xmm6, %xmm6
+; SSE2-NEXT:    pcmpgtb %xmm5, %xmm6
+; SSE2-NEXT:    movdqa %xmm6, %xmm7
+; SSE2-NEXT:    pandn %xmm0, %xmm7
+; SSE2-NEXT:    psllw $4, %xmm0
+; SSE2-NEXT:    pand %xmm6, %xmm0
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    por %xmm7, %xmm0
+; SSE2-NEXT:    paddb %xmm5, %xmm5
+; SSE2-NEXT:    pxor %xmm6, %xmm6
+; SSE2-NEXT:    pcmpgtb %xmm5, %xmm6
+; SSE2-NEXT:    movdqa %xmm6, %xmm7
+; SSE2-NEXT:    pandn %xmm0, %xmm7
+; SSE2-NEXT:    psllw $2, %xmm0
+; SSE2-NEXT:    pand %xmm6, %xmm0
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    por %xmm7, %xmm0
+; SSE2-NEXT:    paddb %xmm5, %xmm5
+; SSE2-NEXT:    pcmpeqb %xmm3, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm5, %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm5
+; SSE2-NEXT:    pandn %xmm0, %xmm5
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    paddb %xmm0, %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    por %xmm5, %xmm0
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    pandn %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v16i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE41-NEXT:    psubb %xmm2, %xmm4
+; SSE41-NEXT:    pxor %xmm5, %xmm5
+; SSE41-NEXT:    pcmpeqb %xmm2, %xmm5
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    psllw $5, %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm2
+; SSE41-NEXT:    psrlw $4, %xmm2
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa %xmm1, %xmm6
+; SSE41-NEXT:    pblendvb %xmm0, %xmm2, %xmm6
+; SSE41-NEXT:    movdqa %xmm6, %xmm2
+; SSE41-NEXT:    psrlw $2, %xmm2
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    paddb %xmm0, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm2, %xmm6
+; SSE41-NEXT:    movdqa %xmm6, %xmm2
+; SSE41-NEXT:    psrlw $1, %xmm2
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    paddb %xmm0, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm2, %xmm6
+; SSE41-NEXT:    psllw $5, %xmm4
+; SSE41-NEXT:    movdqa %xmm4, %xmm2
+; SSE41-NEXT:    paddb %xmm4, %xmm2
+; SSE41-NEXT:    movdqa %xmm3, %xmm7
+; SSE41-NEXT:    psllw $4, %xmm7
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm7
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm7, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm4
+; SSE41-NEXT:    psllw $2, %xmm4
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm4, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm4
+; SSE41-NEXT:    paddb %xmm3, %xmm4
+; SSE41-NEXT:    paddb %xmm2, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm4, %xmm3
+; SSE41-NEXT:    por %xmm6, %xmm3
+; SSE41-NEXT:    movdqa %xmm5, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm1, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: var_funnnel_v16i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:    vpsllw $5, %xmm2, %xmm3
+; AVX-NEXT:    vpsrlw $4, %xmm1, %xmm4
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm4, %xmm4
+; AVX-NEXT:    vpblendvb %xmm3, %xmm4, %xmm1, %xmm4
+; AVX-NEXT:    vpsrlw $2, %xmm4, %xmm5
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm5, %xmm5
+; AVX-NEXT:    vpaddb %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
+; AVX-NEXT:    vpsrlw $1, %xmm4, %xmm5
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm5, %xmm5
+; AVX-NEXT:    vpaddb %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpblendvb %xmm3, %xmm5, %xmm4, %xmm3
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX-NEXT:    vpsllw $5, %xmm4, %xmm4
+; AVX-NEXT:    vpaddb %xmm4, %xmm4, %xmm5
+; AVX-NEXT:    vpsllw $4, %xmm0, %xmm6
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm6, %xmm6
+; AVX-NEXT:    vpblendvb %xmm4, %xmm6, %xmm0, %xmm0
+; AVX-NEXT:    vpsllw $2, %xmm0, %xmm4
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm4, %xmm4
+; AVX-NEXT:    vpblendvb %xmm5, %xmm4, %xmm0, %xmm0
+; AVX-NEXT:    vpaddb %xmm0, %xmm0, %xmm4
+; AVX-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
+; AVX-NEXT:    vpblendvb %xmm5, %xmm4, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512F-NEXT:    vpsrlvd %zmm4, %zmm3, %zmm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero,xmm4[4],zero,zero,zero,xmm4[5],zero,zero,zero,xmm4[6],zero,zero,zero,xmm4[7],zero,zero,zero,xmm4[8],zero,zero,zero,xmm4[9],zero,zero,zero,xmm4[10],zero,zero,zero,xmm4[11],zero,zero,zero,xmm4[12],zero,zero,zero,xmm4[13],zero,zero,zero,xmm4[14],zero,zero,zero,xmm4[15],zero,zero,zero
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpsllvd %zmm4, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm3, %zmm0, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd %zmm4, %zmm3, %zmm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero,xmm4[4],zero,zero,zero,xmm4[5],zero,zero,zero,xmm4[6],zero,zero,zero,xmm4[7],zero,zero,zero,xmm4[8],zero,zero,zero,xmm4[9],zero,zero,zero,xmm4[10],zero,zero,zero,xmm4[11],zero,zero,zero,xmm4[12],zero,zero,zero,xmm4[13],zero,zero,zero,xmm4[14],zero,zero,zero,xmm4[15],zero,zero,zero
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsllvd %zmm4, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm3, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v16i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %xmm4, %xmm2, %xmm5
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm6, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT:    vpsubb %xmm5, %xmm6, %xmm5
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512BW-NEXT:    vpsllvw %zmm5, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    vptestnmb %zmm4, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v16i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VBMI2-NEXT:    vpand %xmm4, %xmm2, %xmm5
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm6, %zmm3, %zmm3
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VBMI2-NEXT:    vpsubb %xmm5, %xmm6, %xmm5
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512VBMI2-NEXT:    vpsllvw %zmm5, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VBMI2-NEXT:    vptestnmb %zmm4, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v16i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT:    vpsrlvw %ymm5, %ymm6, %ymm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLBW-NEXT:    vpsllvw %ymm4, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512VLBW-NEXT:    vptestnmb %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %xmm1, %xmm0 {%k1}
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v16i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLVBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLVBMI2-NEXT:    vpsrlvw %ymm5, %ymm6, %ymm5
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLVBMI2-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLVBMI2-NEXT:    vpsllvw %ymm4, %ymm0, %ymm0
+; AVX512VLVBMI2-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VLVBMI2-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512VLVBMI2-NEXT:    vptestnmb %xmm3, %xmm2, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %xmm1, %xmm0 {%k1}
+; AVX512VLVBMI2-NEXT:    vzeroupper
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOP-LABEL: var_funnnel_v16i8:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOP-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOP-NEXT:    vpsubb %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpshlb %xmm4, %xmm1, %xmm4
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; XOP-NEXT:    vpsubb %xmm2, %xmm5, %xmm5
+; XOP-NEXT:    vpshlb %xmm5, %xmm0, %xmm0
+; XOP-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; XOP-NEXT:    vpcomeqb %xmm3, %xmm2, %xmm2
+; XOP-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm5
+; X32-SSE-NEXT:    psllw $5, %xmm5
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    pxor %xmm6, %xmm6
+; X32-SSE-NEXT:    pcmpgtb %xmm5, %xmm6
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm4
+; X32-SSE-NEXT:    psrlw $4, %xmm4
+; X32-SSE-NEXT:    pand %xmm6, %xmm4
+; X32-SSE-NEXT:    pandn %xmm1, %xmm6
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT:    por %xmm6, %xmm4
+; X32-SSE-NEXT:    paddb %xmm5, %xmm5
+; X32-SSE-NEXT:    pxor %xmm6, %xmm6
+; X32-SSE-NEXT:    pcmpgtb %xmm5, %xmm6
+; X32-SSE-NEXT:    movdqa %xmm6, %xmm7
+; X32-SSE-NEXT:    pandn %xmm4, %xmm7
+; X32-SSE-NEXT:    psrlw $2, %xmm4
+; X32-SSE-NEXT:    pand %xmm6, %xmm4
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT:    por %xmm7, %xmm4
+; X32-SSE-NEXT:    paddb %xmm5, %xmm5
+; X32-SSE-NEXT:    pxor %xmm6, %xmm6
+; X32-SSE-NEXT:    pcmpgtb %xmm5, %xmm6
+; X32-SSE-NEXT:    movdqa %xmm6, %xmm5
+; X32-SSE-NEXT:    pandn %xmm4, %xmm5
+; X32-SSE-NEXT:    psrlw $1, %xmm4
+; X32-SSE-NEXT:    pand %xmm6, %xmm4
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT:    por %xmm5, %xmm4
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; X32-SSE-NEXT:    psubb %xmm2, %xmm5
+; X32-SSE-NEXT:    psllw $5, %xmm5
+; X32-SSE-NEXT:    pxor %xmm6, %xmm6
+; X32-SSE-NEXT:    pcmpgtb %xmm5, %xmm6
+; X32-SSE-NEXT:    movdqa %xmm6, %xmm7
+; X32-SSE-NEXT:    pandn %xmm0, %xmm7
+; X32-SSE-NEXT:    psllw $4, %xmm0
+; X32-SSE-NEXT:    pand %xmm6, %xmm0
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT:    por %xmm7, %xmm0
+; X32-SSE-NEXT:    paddb %xmm5, %xmm5
+; X32-SSE-NEXT:    pxor %xmm6, %xmm6
+; X32-SSE-NEXT:    pcmpgtb %xmm5, %xmm6
+; X32-SSE-NEXT:    movdqa %xmm6, %xmm7
+; X32-SSE-NEXT:    pandn %xmm0, %xmm7
+; X32-SSE-NEXT:    psllw $2, %xmm0
+; X32-SSE-NEXT:    pand %xmm6, %xmm0
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT:    por %xmm7, %xmm0
+; X32-SSE-NEXT:    paddb %xmm5, %xmm5
+; X32-SSE-NEXT:    pcmpeqb %xmm3, %xmm2
+; X32-SSE-NEXT:    pcmpgtb %xmm5, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm3, %xmm5
+; X32-SSE-NEXT:    pandn %xmm0, %xmm5
+; X32-SSE-NEXT:    por %xmm4, %xmm5
+; X32-SSE-NEXT:    paddb %xmm0, %xmm0
+; X32-SSE-NEXT:    pand %xmm3, %xmm0
+; X32-SSE-NEXT:    por %xmm5, %xmm0
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    pandn %xmm0, %xmm2
+; X32-SSE-NEXT:    por %xmm1, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
+  ret <16 x i8> %res
+}
+
+;
+; Uniform Variable Shifts
+;
+
+define <2 x i64> @splatvar_funnnel_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %amt) nounwind {
+; SSE2-LABEL: splatvar_funnnel_v2i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    psrlq %xmm2, %xmm3
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [64,64]
+; SSE2-NEXT:    psubq %xmm2, %xmm4
+; SSE2-NEXT:    psllq %xmm4, %xmm0
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    pcmpeqd %xmm2, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,0,3,2]
+; SSE2-NEXT:    pand %xmm3, %xmm2
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    pandn %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: splatvar_funnnel_v2i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    psrlq %xmm2, %xmm0
+; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [64,64]
+; SSE41-NEXT:    psubq %xmm2, %xmm4
+; SSE41-NEXT:    psllq %xmm4, %xmm3
+; SSE41-NEXT:    por %xmm0, %xmm3
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pcmpeqq %xmm2, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
+; SSE41-NEXT:    movapd %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v2i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlq %xmm2, %xmm1, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; AVX1-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; AVX1-NEXT:    vpsllq %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v2i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq %xmm2, %xmm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpsrlq %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; AVX2-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpsllq %xmm4, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT:    vpbroadcastq %xmm2, %xmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512F-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512F-NEXT:    vpsrlq %xmm4, %xmm1, %xmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512F-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpsllq %xmm4, %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512F-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq %xmm2, %xmm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512VL-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VL-NEXT:    vpsrlq %xmm4, %xmm1, %xmm5
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VL-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpsllq %xmm4, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VL-NEXT:    vptestnmq %xmm3, %xmm2, %k1
+; AVX512VL-NEXT:    vmovdqa64 %xmm1, %xmm0 {%k1}
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    vpbroadcastq %xmm2, %xmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512BW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512BW-NEXT:    vpsrlq %xmm4, %xmm1, %xmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512BW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpsllq %xmm4, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512BW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v2i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512VBMI2-NEXT:    vpbroadcastq %xmm2, %xmm2
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512VBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VBMI2-NEXT:    vpsrlq %xmm4, %xmm1, %xmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VBMI2-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpsllq %xmm4, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastq %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpsrlq %xmm4, %xmm1, %xmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VLBW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpsllq %xmm4, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vptestnmq %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa64 %xmm1, %xmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v2i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastq %xmm2, %xmm2
+; AVX512VLVBMI2-NEXT:    vpshrdvq %xmm2, %xmm0, %xmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v2i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
+; XOPAVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsrlq %xmm2, %xmm1, %xmm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; XOPAVX1-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsllq %xmm4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqq %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v2i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastq %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsrlq %xmm2, %xmm1, %xmm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; XOPAVX2-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpsllq %xmm4, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcomeqq %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm3
+; X32-SSE-NEXT:    psrlq %xmm2, %xmm3
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm5
+; X32-SSE-NEXT:    psrlq %xmm4, %xmm5
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm5 = xmm3[0],xmm5[1]
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm3 = [64,0,64,0]
+; X32-SSE-NEXT:    psubq %xmm2, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm4
+; X32-SSE-NEXT:    psllq %xmm3, %xmm4
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; X32-SSE-NEXT:    psllq %xmm3, %xmm0
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; X32-SSE-NEXT:    orpd %xmm5, %xmm0
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    pcmpeqd %xmm2, %xmm3
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,0,3,2]
+; X32-SSE-NEXT:    pand %xmm3, %xmm2
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    pandn %xmm0, %xmm2
+; X32-SSE-NEXT:    por %xmm1, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <2 x i64> %amt, <2 x i64> undef, <2 x i32> zeroinitializer
+  %res = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %splat)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %amt) nounwind {
+; SSE2-LABEL: splatvar_funnnel_v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    xorps %xmm4, %xmm4
+; SSE2-NEXT:    movss {{.*#+}} xmm4 = xmm2[0],xmm4[1,2,3]
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    psrld %xmm4, %xmm5
+; SSE2-NEXT:    movd %xmm2, %eax
+; SSE2-NEXT:    movl $32, %ecx
+; SSE2-NEXT:    subl %eax, %ecx
+; SSE2-NEXT:    movd %ecx, %xmm4
+; SSE2-NEXT:    pslld %xmm4, %xmm0
+; SSE2-NEXT:    por %xmm5, %xmm0
+; SSE2-NEXT:    pcmpeqd %xmm3, %xmm2
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    pandn %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: splatvar_funnnel_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero
+; SSE41-NEXT:    movdqa %xmm1, %xmm4
+; SSE41-NEXT:    psrld %xmm0, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [32,32,32,32]
+; SSE41-NEXT:    psubd %xmm2, %xmm0
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT:    pslld %xmm0, %xmm3
+; SSE41-NEXT:    por %xmm4, %xmm3
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm2, %xmm0
+; SSE41-NEXT:    blendvps %xmm0, %xmm1, %xmm3
+; SSE41-NEXT:    movaps %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT:    vpsrld %xmm3, %xmm1, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [32,32,32,32]
+; AVX1-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX1-NEXT:    vpslld %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX2-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; AVX2-NEXT:    vpsrld %xmm3, %xmm1, %xmm3
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX2-NEXT:    vpslld %xmm4, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512F-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512F-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512F-NEXT:    vpsrld %xmm5, %xmm1, %xmm5
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512F-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512F-NEXT:    vpslld %xmm4, %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512F-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512VL-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VL-NEXT:    vpsrld %xmm5, %xmm1, %xmm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VL-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VL-NEXT:    vpslld %xmm4, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VL-NEXT:    vptestnmd %xmm3, %xmm2, %k1
+; AVX512VL-NEXT:    vmovdqa32 %xmm1, %xmm0 {%k1}
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512BW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512BW-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512BW-NEXT:    vpsrld %xmm5, %xmm1, %xmm5
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512BW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512BW-NEXT:    vpslld %xmm4, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512BW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v4i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512VBMI2-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512VBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VBMI2-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VBMI2-NEXT:    vpsrld %xmm5, %xmm1, %xmm5
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VBMI2-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VBMI2-NEXT:    vpslld %xmm4, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VLBW-NEXT:    vpsrld %xmm5, %xmm1, %xmm5
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VLBW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VLBW-NEXT:    vpslld %xmm4, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vptestnmd %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa32 %xmm1, %xmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v4i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX512VLVBMI2-NEXT:    vpshrdvd %xmm2, %xmm0, %xmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v4i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; XOPAVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; XOPAVX1-NEXT:    vpsrld %xmm3, %xmm1, %xmm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [32,32,32,32]
+; XOPAVX1-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; XOPAVX1-NEXT:    vpslld %xmm4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqd %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v4i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastd %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
+; XOPAVX2-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; XOPAVX2-NEXT:    vpsrld %xmm3, %xmm1, %xmm3
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
+; XOPAVX2-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; XOPAVX2-NEXT:    vpslld %xmm4, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcomeqd %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    xorps %xmm4, %xmm4
+; X32-SSE-NEXT:    movss {{.*#+}} xmm4 = xmm2[0],xmm4[1,2,3]
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm5
+; X32-SSE-NEXT:    psrld %xmm4, %xmm5
+; X32-SSE-NEXT:    movd %xmm2, %eax
+; X32-SSE-NEXT:    movl $32, %ecx
+; X32-SSE-NEXT:    subl %eax, %ecx
+; X32-SSE-NEXT:    movd %ecx, %xmm4
+; X32-SSE-NEXT:    pslld %xmm4, %xmm0
+; X32-SSE-NEXT:    por %xmm5, %xmm0
+; X32-SSE-NEXT:    pcmpeqd %xmm3, %xmm2
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    pandn %xmm0, %xmm2
+; X32-SSE-NEXT:    por %xmm1, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <4 x i32> %amt, <4 x i32> undef, <4 x i32> zeroinitializer
+  %res = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %splat)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @splatvar_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt) nounwind {
+; SSE2-LABEL: splatvar_funnnel_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm3
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; SSE2-NEXT:    psubw %xmm3, %xmm4
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpeqw %xmm3, %xmm2
+; SSE2-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    psrlw %xmm3, %xmm5
+; SSE2-NEXT:    pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm4 = xmm4[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    psllw %xmm4, %xmm0
+; SSE2-NEXT:    por %xmm5, %xmm0
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    pandn %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: splatvar_funnnel_v8i16:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pshuflw {{.*#+}} xmm0 = xmm2[0,0,2,3,4,5,6,7]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0]
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; SSE41-NEXT:    movdqa %xmm1, %xmm4
+; SSE41-NEXT:    psrlw %xmm0, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [16,16,16,16,16,16,16,16]
+; SSE41-NEXT:    psubw %xmm2, %xmm0
+; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT:    psllw %xmm0, %xmm3
+; SSE41-NEXT:    por %xmm4, %xmm3
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pcmpeqw %xmm2, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm1, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v8i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm3, %xmm1, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX1-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX1-NEXT:    vpsllw %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm3, %xmm1, %xmm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm4, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm3, %xmm1, %xmm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm4, %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm3, %xmm1, %xmm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm4, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v8i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsrlw %xmm5, %xmm1, %xmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsllw %xmm4, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512BW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v8i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512VBMI2-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX512VBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VBMI2-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VBMI2-NEXT:    vpsrlw %xmm5, %xmm1, %xmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512VBMI2-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VBMI2-NEXT:    vpsllw %xmm4, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v8i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsrlw %xmm5, %xmm1, %xmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsllw %xmm4, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vptestnmw %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu16 %xmm1, %xmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v8i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX512VLVBMI2-NEXT:    vpshrdvw %xmm2, %xmm0, %xmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v8i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; XOPAVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; XOPAVX1-NEXT:    vpsrlw %xmm3, %xmm1, %xmm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; XOPAVX1-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; XOPAVX1-NEXT:    vpsllw %xmm4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqw %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v8i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastw %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; XOPAVX2-NEXT:    vpsrlw %xmm3, %xmm1, %xmm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; XOPAVX2-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; XOPAVX2-NEXT:    vpsllw %xmm4, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcomeqw %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm3
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; X32-SSE-NEXT:    psubw %xmm3, %xmm4
+; X32-SSE-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE-NEXT:    pcmpeqw %xmm3, %xmm2
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm5
+; X32-SSE-NEXT:    psrlw %xmm3, %xmm5
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm4 = xmm4[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    psllw %xmm4, %xmm0
+; X32-SSE-NEXT:    por %xmm5, %xmm0
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    pandn %xmm0, %xmm2
+; X32-SSE-NEXT:    por %xmm1, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <8 x i16> %amt, <8 x i16> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %splat)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt) nounwind {
+; SSE2-LABEL: splatvar_funnnel_v16i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm3
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE2-NEXT:    psubb %xmm3, %xmm4
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpeqb %xmm3, %xmm2
+; SSE2-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    psrlw %xmm3, %xmm5
+; SSE2-NEXT:    pcmpeqd %xmm6, %xmm6
+; SSE2-NEXT:    psrlw %xmm3, %xmm6
+; SSE2-NEXT:    pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT:    psrlw $8, %xmm6
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,0,0,0]
+; SSE2-NEXT:    pand %xmm5, %xmm6
+; SSE2-NEXT:    pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm4 = xmm4[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    psllw %xmm4, %xmm0
+; SSE2-NEXT:    psllw %xmm4, %xmm3
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
+; SSE2-NEXT:    pand %xmm0, %xmm3
+; SSE2-NEXT:    por %xmm6, %xmm3
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    pandn %xmm3, %xmm2
+; SSE2-NEXT:    por %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: splatvar_funnnel_v16i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pshufb %xmm0, %xmm2
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm4 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    psrlw %xmm4, %xmm5
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm6
+; SSE41-NEXT:    pcmpeqd %xmm7, %xmm7
+; SSE41-NEXT:    psrlw %xmm4, %xmm7
+; SSE41-NEXT:    pshufb {{.*#+}} xmm7 = xmm7[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; SSE41-NEXT:    pand %xmm5, %xmm7
+; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE41-NEXT:    psubb %xmm2, %xmm4
+; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT:    psllw %xmm4, %xmm3
+; SSE41-NEXT:    psllw %xmm4, %xmm6
+; SSE41-NEXT:    pshufb %xmm0, %xmm6
+; SSE41-NEXT:    pand %xmm6, %xmm3
+; SSE41-NEXT:    por %xmm7, %xmm3
+; SSE41-NEXT:    pcmpeqb %xmm2, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm1, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v16i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpsrlw %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpand %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX1-NEXT:    vpsubb %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,zero,zero,zero,zero,xmm5[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsllw %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vpsllw %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpshufb %xmm3, %xmm5, %xmm5
+; AVX1-NEXT:    vpand %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v16i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm3, %xmm1, %xmm4
+; AVX2-NEXT:    vpcmpeqd %xmm5, %xmm5, %xmm5
+; AVX2-NEXT:    vpsrlw %xmm3, %xmm5, %xmm3
+; AVX2-NEXT:    vpsrlw $8, %xmm3, %xmm3
+; AVX2-NEXT:    vpbroadcastb %xmm3, %xmm3
+; AVX2-NEXT:    vpand %xmm3, %xmm4, %xmm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX2-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm4, %xmm0, %xmm0
+; AVX2-NEXT:    vpsllw %xmm4, %xmm5, %xmm4
+; AVX2-NEXT:    vpbroadcastb %xmm4, %xmm4
+; AVX2-NEXT:    vpand %xmm4, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512F-NEXT:    vpsrlvd %zmm4, %zmm3, %zmm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero,xmm4[4],zero,zero,zero,xmm4[5],zero,zero,zero,xmm4[6],zero,zero,zero,xmm4[7],zero,zero,zero,xmm4[8],zero,zero,zero,xmm4[9],zero,zero,zero,xmm4[10],zero,zero,zero,xmm4[11],zero,zero,zero,xmm4[12],zero,zero,zero,xmm4[13],zero,zero,zero,xmm4[14],zero,zero,zero,xmm4[15],zero,zero,zero
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpsllvd %zmm4, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm3, %zmm0, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd %zmm4, %zmm3, %zmm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero,xmm4[4],zero,zero,zero,xmm4[5],zero,zero,zero,xmm4[6],zero,zero,zero,xmm4[7],zero,zero,zero,xmm4[8],zero,zero,zero,xmm4[9],zero,zero,zero,xmm4[10],zero,zero,zero,xmm4[11],zero,zero,zero,xmm4[12],zero,zero,zero,xmm4[13],zero,zero,zero,xmm4[14],zero,zero,zero,xmm4[15],zero,zero,zero
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsllvd %zmm4, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm3, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v16i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %xmm4, %xmm2, %xmm5
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm6, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT:    vpsubb %xmm5, %xmm6, %xmm5
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512BW-NEXT:    vpsllvw %zmm5, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    vptestnmb %zmm4, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v16i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512VBMI2-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VBMI2-NEXT:    vpand %xmm4, %xmm2, %xmm5
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm6, %zmm3, %zmm3
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VBMI2-NEXT:    vpsubb %xmm5, %xmm6, %xmm5
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
+; AVX512VBMI2-NEXT:    vpsllvw %zmm5, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VBMI2-NEXT:    vptestnmb %zmm4, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v16i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT:    vpsrlvw %ymm5, %ymm6, %ymm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLBW-NEXT:    vpsllvw %ymm4, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512VLBW-NEXT:    vptestnmb %xmm3, %xmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %xmm1, %xmm0 {%k1}
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v16i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLVBMI2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLVBMI2-NEXT:    vpsrlvw %ymm5, %ymm6, %ymm5
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLVBMI2-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLVBMI2-NEXT:    vpsllvw %ymm4, %ymm0, %ymm0
+; AVX512VLVBMI2-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VLVBMI2-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512VLVBMI2-NEXT:    vptestnmb %xmm3, %xmm2, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %xmm1, %xmm0 {%k1}
+; AVX512VLVBMI2-NEXT:    vzeroupper
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v16i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsubb %xmm2, %xmm3, %xmm4
+; XOPAVX1-NEXT:    vpshlb %xmm4, %xmm1, %xmm4
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; XOPAVX1-NEXT:    vpsubb %xmm2, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpshlb %xmm5, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpcomeqb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v16i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastb %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpsubb %xmm2, %xmm3, %xmm4
+; XOPAVX2-NEXT:    vpshlb %xmm4, %xmm1, %xmm4
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; XOPAVX2-NEXT:    vpsubb %xmm2, %xmm5, %xmm5
+; XOPAVX2-NEXT:    vpshlb %xmm5, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpcomeqb %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm3
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; X32-SSE-NEXT:    psubb %xmm3, %xmm4
+; X32-SSE-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE-NEXT:    pcmpeqb %xmm3, %xmm2
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm5
+; X32-SSE-NEXT:    psrlw %xmm3, %xmm5
+; X32-SSE-NEXT:    pcmpeqd %xmm6, %xmm6
+; X32-SSE-NEXT:    psrlw %xmm3, %xmm6
+; X32-SSE-NEXT:    pcmpeqd %xmm3, %xmm3
+; X32-SSE-NEXT:    psrlw $8, %xmm6
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,0,0,0]
+; X32-SSE-NEXT:    pand %xmm5, %xmm6
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm4 = xmm4[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    psllw %xmm4, %xmm0
+; X32-SSE-NEXT:    psllw %xmm4, %xmm3
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
+; X32-SSE-NEXT:    pand %xmm0, %xmm3
+; X32-SSE-NEXT:    por %xmm6, %xmm3
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    pandn %xmm3, %xmm2
+; X32-SSE-NEXT:    por %xmm1, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <16 x i8> %amt, <16 x i8> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %splat)
+  ret <16 x i8> %res
+}
+
+;
+; Constant Shifts
+;
+
+define <2 x i64> @constant_funnnel_v2i64(<2 x i64> %x, <2 x i64> %y) nounwind {
+; SSE2-LABEL: constant_funnnel_v2i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psrlq $4, %xmm2
+; SSE2-NEXT:    psrlq $14, %xmm1
+; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    psllq $60, %xmm2
+; SSE2-NEXT:    psllq $50, %xmm0
+; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
+; SSE2-NEXT:    orpd %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: constant_funnnel_v2i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm1, %xmm2
+; SSE41-NEXT:    psrlq $14, %xmm2
+; SSE41-NEXT:    psrlq $4, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    psllq $50, %xmm2
+; SSE41-NEXT:    psllq $60, %xmm0
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: constant_funnnel_v2i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrlq $14, %xmm1, %xmm2
+; AVX1-NEXT:    vpsrlq $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpsllq $50, %xmm0, %xmm2
+; AVX1-NEXT:    vpsllq $60, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v2i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
+; AVX512BW-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v2i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v2i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvq {{.*}}(%rip), %xmm0, %xmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v2i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshlq {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpshlq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v2i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm2
+; X32-SSE-NEXT:    psrlq $4, %xmm2
+; X32-SSE-NEXT:    psrlq $14, %xmm1
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    psllq $60, %xmm2
+; X32-SSE-NEXT:    psllq $50, %xmm0
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
+; X32-SSE-NEXT:    orpd %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> <i64 4, i64 14>)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
+; SSE2-LABEL: constant_funnnel_v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psrld $7, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    psrld $6, %xmm3
+; SSE2-NEXT:    punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psrld $5, %xmm2
+; SSE2-NEXT:    psrld $4, %xmm1
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [268435456,134217728,67108864,33554432]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm3, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: constant_funnnel_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm1, %xmm2
+; SSE41-NEXT:    psrld $7, %xmm2
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    psrld $5, %xmm3
+; SSE41-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm1, %xmm2
+; SSE41-NEXT:    psrld $6, %xmm2
+; SSE41-NEXT:    psrld $4, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: constant_funnnel_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrld $7, %xmm1, %xmm2
+; AVX1-NEXT:    vpsrld $5, %xmm1, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpsrld $6, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrld $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX512BW-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v4i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v4i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvd {{.*}}(%rip), %xmm0, %xmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v4i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshld {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpshld {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v4i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm2
+; X32-SSE-NEXT:    psrld $7, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm3
+; X32-SSE-NEXT:    psrld $6, %xmm3
+; X32-SSE-NEXT:    punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm2
+; X32-SSE-NEXT:    psrld $5, %xmm2
+; X32-SSE-NEXT:    psrld $4, %xmm1
+; X32-SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X32-SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [268435456,134217728,67108864,33554432]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm2, %xmm0
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm3, %xmm2
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; X32-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 4, i32 5, i32 6, i32 7>)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
+; SSE2-LABEL: constant_funnnel_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535]
+; SSE2-NEXT:    movdqa %xmm2, %xmm3
+; SSE2-NEXT:    pandn %xmm1, %xmm3
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = <u,32768,16384,8192,4096,2048,1024,512>
+; SSE2-NEXT:    pmulhuw %xmm4, %xmm1
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    pmullw %xmm4, %xmm0
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: constant_funnnel_v8i16:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = <u,32768,16384,8192,4096,2048,1024,512>
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    pmulhuw %xmm2, %xmm3
+; SSE41-NEXT:    pmullw %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: constant_funnnel_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,32768,16384,8192,4096,2048,1024,512>
+; AVX-NEXT:    vpmulhuw %xmm2, %xmm1, %xmm3
+; AVX-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,32768,16384,8192,4096,2048,1024,512>
+; AVX512F-NEXT:    vpmulhuw %xmm2, %xmm1, %xmm3
+; AVX512F-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,32768,16384,8192,4096,2048,1024,512>
+; AVX512VL-NEXT:    vpmulhuw %xmm2, %xmm1, %xmm3
+; AVX512VL-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v8i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7]
+; AVX512BW-NEXT:    vpsrlvw %zmm2, %zmm1, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,15,14,13,12,11,10,9]
+; AVX512BW-NEXT:    vpsllvw %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v8i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512VBMI2-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7]
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm2, %zmm1, %zmm2
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,15,14,13,12,11,10,9]
+; AVX512VBMI2-NEXT:    vpsllvw %zmm3, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v8i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %xmm1, %xmm2
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v8i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvw {{.*}}(%rip), %xmm0, %xmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOP-LABEL: constant_funnnel_v8i16:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpshlw {{.*}}(%rip), %xmm1, %xmm2
+; XOP-NEXT:    vpshlw {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; XOP-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535]
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm3
+; X32-SSE-NEXT:    pandn %xmm1, %xmm3
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm4 = <u,32768,16384,8192,4096,2048,1024,512>
+; X32-SSE-NEXT:    pmulhuw %xmm4, %xmm1
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    pmullw %xmm4, %xmm0
+; X32-SSE-NEXT:    por %xmm3, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    pand %xmm2, %xmm0
+; X32-SSE-NEXT:    por %xmm3, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
+; SSE2-LABEL: constant_funnnel_v16i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm3
+; SSE2-NEXT:    psrlw $8, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm4
+; SSE2-NEXT:    psrlw $8, %xmm4
+; SSE2-NEXT:    packuswb %xmm3, %xmm4
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm3, %xmm2
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    packuswb %xmm2, %xmm0
+; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    pandn %xmm1, %xmm2
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: constant_funnnel_v16i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    pand %xmm3, %xmm2
+; SSE41-NEXT:    packuswb %xmm0, %xmm2
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm3
+; SSE41-NEXT:    psrlw $8, %xmm3
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm4
+; SSE41-NEXT:    psrlw $8, %xmm4
+; SSE41-NEXT:    packuswb %xmm3, %xmm4
+; SSE41-NEXT:    por %xmm2, %xmm4
+; SSE41-NEXT:    movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; SSE41-NEXT:    pblendvb %xmm0, %xmm4, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: constant_funnnel_v16i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $8, %xmm3, %xmm3
+; AVX1-NEXT:    vpackuswb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v16i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT:    vpackuswb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; AVX2-NEXT:    vpackuswb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512F-NEXT:    vpsrlvd {{.*}}(%rip), %zmm2, %zmm2
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm2, %zmm0, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm2, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512VL-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v16i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1]
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm2, %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [8,7,6,5,4,3,2,1,8,1,2,3,4,5,6,7]
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT:    vpsllvw %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512BW-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v16i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1]
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm2, %zmm3, %zmm2
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm3 = [8,7,6,5,4,3,2,1,8,1,2,3,4,5,6,7]
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VBMI2-NEXT:    vpsllvw %zmm3, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512VBMI2-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512VBMI2-NEXT:    vzeroupper
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v16i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512VLBW-NEXT:    movw $257, %ax # imm = 0x101
+; AVX512VLBW-NEXT:    kmovd %eax, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %xmm1, %xmm0 {%k1}
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v16i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLVBMI2-NEXT:    vpsrlvw {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLVBMI2-NEXT:    vpsllvw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLVBMI2-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VLVBMI2-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512VLVBMI2-NEXT:    movw $257, %ax # imm = 0x101
+; AVX512VLVBMI2-NEXT:    kmovd %eax, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %xmm1, %xmm0 {%k1}
+; AVX512VLVBMI2-NEXT:    vzeroupper
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOP-LABEL: constant_funnnel_v16i8:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpshlb {{.*}}(%rip), %xmm1, %xmm2
+; XOP-NEXT:    vpshlb {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; XOP-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm3
+; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm3
+; X32-SSE-NEXT:    psrlw $8, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm4
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT:    psrlw $8, %xmm4
+; X32-SSE-NEXT:    packuswb %xmm3, %xmm4
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; X32-SSE-NEXT:    pand %xmm3, %xmm2
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT:    pand %xmm3, %xmm0
+; X32-SSE-NEXT:    packuswb %xmm2, %xmm0
+; X32-SSE-NEXT:    por %xmm4, %xmm0
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; X32-SSE-NEXT:    pand %xmm2, %xmm0
+; X32-SSE-NEXT:    pandn %xmm1, %xmm2
+; X32-SSE-NEXT:    por %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
+  ret <16 x i8> %res
+}
+
+;
+; Uniform Constant Shifts
+;
+
+define <2 x i64> @splatconstant_funnnel_v2i64(<2 x i64> %x, <2 x i64> %y) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v2i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlq $14, %xmm1
+; SSE-NEXT:    psllq $50, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlq $14, %xmm1, %xmm1
+; AVX-NEXT:    vpsllq $50, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlq $14, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsllq $50, %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlq $14, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsllq $50, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlq $14, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpsllq $50, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v2i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlq $14, %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpsllq $50, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlq $14, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsllq $50, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v2i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdq $14, %xmm0, %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v2i64:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpsrlq $14, %xmm1, %xmm1
+; XOP-NEXT:    vpsllq $50, %xmm0, %xmm0
+; XOP-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    psrlq $14, %xmm1
+; X32-SSE-NEXT:    psllq $50, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> <i64 14, i64 14>)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @splatconstant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrld $4, %xmm1
+; SSE-NEXT:    pslld $28, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrld $4, %xmm1, %xmm1
+; AVX-NEXT:    vpslld $28, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrld $4, %xmm1, %xmm1
+; AVX512F-NEXT:    vpslld $28, %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrld $4, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpslld $28, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrld $4, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpslld $28, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v4i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrld $4, %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpslld $28, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrld $4, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpslld $28, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v4i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdd $4, %xmm0, %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v4i32:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpsrld $4, %xmm1, %xmm1
+; XOP-NEXT:    vpslld $28, %xmm0, %xmm0
+; XOP-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    psrld $4, %xmm1
+; X32-SSE-NEXT:    pslld $28, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 4, i32 4, i32 4, i32 4>)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @splatconstant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlw $7, %xmm1
+; SSE-NEXT:    psllw $9, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $7, %xmm1, %xmm1
+; AVX-NEXT:    vpsllw $9, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $7, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsllw $9, %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $7, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsllw $9, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v8i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlw $7, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpsllw $9, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v8i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlw $7, %xmm1, %xmm1
+; AVX512VBMI2-NEXT:    vpsllw $9, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v8i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlw $7, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsllw $9, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v8i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdw $7, %xmm0, %xmm1, %xmm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v8i16:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpsrlw $7, %xmm1, %xmm1
+; XOP-NEXT:    vpsllw $9, %xmm0, %xmm0
+; XOP-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    psrlw $7, %xmm1
+; X32-SSE-NEXT:    psllw $9, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @splatconstant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v16i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlw $4, %xmm1
+; SSE-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE-NEXT:    psllw $4, %xmm0
+; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v16i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_funnnel_v16i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v16i8:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpshlb {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT:    vpshlb {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    psrlw $4, %xmm1
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    psllw $4, %xmm0
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
+  ret <16 x i8> %res
+}
diff --git a/test/CodeGen/X86/vector-fshr-256.ll b/test/CodeGen/X86/vector-fshr-256.ll
new file mode 100644
index 0000000..688b6fd
--- /dev/null
+++ b/test/CodeGen/X86/vector-fshr-256.ll
@@ -0,0 +1,2579 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefixes=AVX512,AVX512VBMI2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVBMI2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2
+
+declare <4 x i64> @llvm.fshr.v4i64(<4 x i64>, <4 x i64>, <4 x i64>)
+declare <8 x i32> @llvm.fshr.v8i32(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <16 x i16> @llvm.fshr.v16i16(<16 x i16>, <16 x i16>, <16 x i16>)
+declare <32 x i8> @llvm.fshr.v32i8(<32 x i8>, <32 x i8>, <32 x i8>)
+
+;
+; Variable Shifts
+;
+
+define <4 x i64> @var_funnnel_v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vpsrlq %xmm4, %xmm3, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm4[2,3,0,1]
+; AVX1-NEXT:    vpsrlq %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpsrlq %xmm2, %xmm1, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[2,3,0,1]
+; AVX1-NEXT:    vpsrlq %xmm6, %xmm1, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [64,64]
+; AVX1-NEXT:    vpsubq %xmm4, %xmm8, %xmm6
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT:    vpsllq %xmm6, %xmm7, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
+; AVX1-NEXT:    vpsllq %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4,5,6,7]
+; AVX1-NEXT:    vpsubq %xmm2, %xmm8, %xmm6
+; AVX1-NEXT:    vpsllq %xmm6, %xmm0, %xmm7
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
+; AVX1-NEXT:    vpsllq %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm3, %ymm0, %ymm0
+; AVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; AVX1-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpsrlvq %ymm2, %ymm1, %ymm3
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [64,64,64,64]
+; AVX2-NEXT:    vpsubq %ymm2, %ymm4, %ymm4
+; AVX2-NEXT:    vpsllvq %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512F-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT:    vpsrlvq %ymm4, %ymm1, %ymm5
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [64,64,64,64]
+; AVX512F-NEXT:    vpsubq %ymm4, %ymm6, %ymm4
+; AVX512F-NEXT:    vpsllvq %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpsrlvq %ymm4, %ymm1, %ymm5
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [64,64,64,64]
+; AVX512VL-NEXT:    vpsubq %ymm4, %ymm6, %ymm4
+; AVX512VL-NEXT:    vpsllvq %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vptestnmq %ymm3, %ymm2, %k1
+; AVX512VL-NEXT:    vmovdqa64 %ymm1, %ymm0 {%k1}
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpsrlvq %ymm4, %ymm1, %ymm5
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [64,64,64,64]
+; AVX512BW-NEXT:    vpsubq %ymm4, %ymm6, %ymm4
+; AVX512BW-NEXT:    vpsllvq %ymm4, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512BW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v4i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512VBMI2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpsrlvq %ymm4, %ymm1, %ymm5
+; AVX512VBMI2-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [64,64,64,64]
+; AVX512VBMI2-NEXT:    vpsubq %ymm4, %ymm6, %ymm4
+; AVX512VBMI2-NEXT:    vpsllvq %ymm4, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpsrlvq %ymm4, %ymm1, %ymm5
+; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [64,64,64,64]
+; AVX512VLBW-NEXT:    vpsubq %ymm4, %ymm6, %ymm4
+; AVX512VLBW-NEXT:    vpsllvq %ymm4, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vptestnmq %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa64 %ymm1, %ymm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v4i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvq %ymm2, %ymm0, %ymm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; XOPAVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm4, %xmm5
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; XOPAVX1-NEXT:    vpshlq %xmm5, %xmm6, %xmm5
+; XOPAVX1-NEXT:    vpsubq %xmm2, %xmm4, %xmm6
+; XOPAVX1-NEXT:    vpshlq %xmm6, %xmm1, %xmm6
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [64,64]
+; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm8, %xmm7
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; XOPAVX1-NEXT:    vpshlq %xmm7, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpsubq %xmm2, %xmm8, %xmm7
+; XOPAVX1-NEXT:    vpshlq %xmm7, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vorpd %ymm5, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vpcomeqq %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqq %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; XOPAVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpsrlvq %ymm2, %ymm1, %ymm3
+; XOPAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [64,64,64,64]
+; XOPAVX2-NEXT:    vpsubq %ymm2, %ymm4, %ymm4
+; XOPAVX2-NEXT:    vpsllvq %ymm4, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqq %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %amt)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrld %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; AVX1-NEXT:    vpsrld %xmm6, %xmm5, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpxor %xmm8, %xmm8, %xmm8
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; AVX1-NEXT:    vpsrld %xmm7, %xmm5, %xmm7
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT:    vpsrld %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm6[2,3],xmm4[4,5],xmm6[6,7]
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrld %xmm5, %xmm1, %xmm5
+; AVX1-NEXT:    vpsrlq $32, %xmm2, %xmm6
+; AVX1-NEXT:    vpsrld %xmm6, %xmm1, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm6 = xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; AVX1-NEXT:    vpsrld %xmm6, %xmm1, %xmm6
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm7 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT:    vpsrld %xmm7, %xmm1, %xmm7
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5],xmm5[6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm9 = [32,32,32,32]
+; AVX1-NEXT:    vpsubd %xmm3, %xmm9, %xmm6
+; AVX1-NEXT:    vpslld $23, %xmm6, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT:    vpaddd %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vcvttps2dq %xmm6, %xmm6
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpmulld %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpsubd %xmm2, %xmm9, %xmm6
+; AVX1-NEXT:    vpslld $23, %xmm6, %xmm6
+; AVX1-NEXT:    vpaddd %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vcvttps2dq %xmm6, %xmm6
+; AVX1-NEXT:    vpmulld %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpsrlvd %ymm2, %ymm1, %ymm3
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm4 = [32,32,32,32,32,32,32,32]
+; AVX2-NEXT:    vpsubd %ymm2, %ymm4, %ymm4
+; AVX2-NEXT:    vpsllvd %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512F-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT:    vpsrlvd %ymm4, %ymm1, %ymm5
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [32,32,32,32,32,32,32,32]
+; AVX512F-NEXT:    vpsubd %ymm4, %ymm6, %ymm4
+; AVX512F-NEXT:    vpsllvd %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpsrlvd %ymm4, %ymm1, %ymm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [32,32,32,32,32,32,32,32]
+; AVX512VL-NEXT:    vpsubd %ymm4, %ymm6, %ymm4
+; AVX512VL-NEXT:    vpsllvd %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vptestnmd %ymm3, %ymm2, %k1
+; AVX512VL-NEXT:    vmovdqa32 %ymm1, %ymm0 {%k1}
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpsrlvd %ymm4, %ymm1, %ymm5
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [32,32,32,32,32,32,32,32]
+; AVX512BW-NEXT:    vpsubd %ymm4, %ymm6, %ymm4
+; AVX512BW-NEXT:    vpsllvd %ymm4, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512BW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v8i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpsrlvd %ymm4, %ymm1, %ymm5
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [32,32,32,32,32,32,32,32]
+; AVX512VBMI2-NEXT:    vpsubd %ymm4, %ymm6, %ymm4
+; AVX512VBMI2-NEXT:    vpsllvd %ymm4, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpsrlvd %ymm4, %ymm1, %ymm5
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [32,32,32,32,32,32,32,32]
+; AVX512VLBW-NEXT:    vpsubd %ymm4, %ymm6, %ymm4
+; AVX512VLBW-NEXT:    vpsllvd %ymm4, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vptestnmd %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa32 %ymm1, %ymm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v8i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvd %ymm2, %ymm0, %ymm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; XOPAVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsubd %xmm3, %xmm4, %xmm5
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; XOPAVX1-NEXT:    vpshld %xmm5, %xmm6, %xmm5
+; XOPAVX1-NEXT:    vpsubd %xmm2, %xmm4, %xmm6
+; XOPAVX1-NEXT:    vpshld %xmm6, %xmm1, %xmm6
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [32,32,32,32]
+; XOPAVX1-NEXT:    vpsubd %xmm3, %xmm8, %xmm7
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; XOPAVX1-NEXT:    vpshld %xmm7, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpsubd %xmm2, %xmm8, %xmm7
+; XOPAVX1-NEXT:    vpshld %xmm7, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm5, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vpcomeqd %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqd %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; XOPAVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpsrlvd %ymm2, %ymm1, %ymm3
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} ymm4 = [32,32,32,32,32,32,32,32]
+; XOPAVX2-NEXT:    vpsubd %ymm2, %ymm4, %ymm4
+; XOPAVX2-NEXT:    vpsllvd %ymm4, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqd %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %amt)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT:    vpsllw $12, %xmm3, %xmm4
+; AVX1-NEXT:    vpsllw $4, %xmm3, %xmm5
+; AVX1-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpaddw %xmm4, %xmm4, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT:    vpsrlw $8, %xmm6, %xmm7
+; AVX1-NEXT:    vpblendvb %xmm4, %xmm7, %xmm6, %xmm4
+; AVX1-NEXT:    vpsrlw $4, %xmm4, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw $2, %xmm4, %xmm6
+; AVX1-NEXT:    vpaddw %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw $1, %xmm4, %xmm6
+; AVX1-NEXT:    vpaddw %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpsllw $12, %xmm2, %xmm5
+; AVX1-NEXT:    vpsllw $4, %xmm2, %xmm6
+; AVX1-NEXT:    vpor %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpaddw %xmm5, %xmm5, %xmm6
+; AVX1-NEXT:    vpsrlw $8, %xmm1, %xmm7
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm7, %xmm1, %xmm5
+; AVX1-NEXT:    vpsrlw $4, %xmm5, %xmm7
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpsrlw $2, %xmm5, %xmm7
+; AVX1-NEXT:    vpaddw %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpsrlw $1, %xmm5, %xmm7
+; AVX1-NEXT:    vpaddw %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm8
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm9 = [16,16,16,16,16,16,16,16]
+; AVX1-NEXT:    vpsubw %xmm3, %xmm9, %xmm6
+; AVX1-NEXT:    vpxor %xmm10, %xmm10, %xmm10
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm6[4],xmm10[4],xmm6[5],xmm10[5],xmm6[6],xmm10[6],xmm6[7],xmm10[7]
+; AVX1-NEXT:    vpslld $23, %xmm4, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT:    vpaddd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vcvttps2dq %xmm4, %xmm4
+; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX1-NEXT:    vpslld $23, %xmm6, %xmm6
+; AVX1-NEXT:    vpaddd %xmm5, %xmm6, %xmm6
+; AVX1-NEXT:    vcvttps2dq %xmm6, %xmm6
+; AVX1-NEXT:    vpackusdw %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpsubw %xmm2, %xmm9, %xmm6
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm10[4],xmm6[5],xmm10[5],xmm6[6],xmm10[6],xmm6[7],xmm10[7]
+; AVX1-NEXT:    vpslld $23, %xmm7, %xmm7
+; AVX1-NEXT:    vpaddd %xmm5, %xmm7, %xmm7
+; AVX1-NEXT:    vcvttps2dq %xmm7, %xmm7
+; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX1-NEXT:    vpslld $23, %xmm6, %xmm6
+; AVX1-NEXT:    vpaddd %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vcvttps2dq %xmm5, %xmm5
+; AVX1-NEXT:    vpackusdw %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm8, %ymm0, %ymm0
+; AVX1-NEXT:    vpcmpeqw %xmm10, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqw %xmm10, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vandnps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm1[4],ymm3[5],ymm1[5],ymm3[6],ymm1[6],ymm3[7],ymm1[7],ymm3[12],ymm1[12],ymm3[13],ymm1[13],ymm3[14],ymm1[14],ymm3[15],ymm1[15]
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15]
+; AVX2-NEXT:    vpsrlvd %ymm5, %ymm4, %ymm4
+; AVX2-NEXT:    vpsrld $16, %ymm4, %ymm4
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[8],ymm1[8],ymm3[9],ymm1[9],ymm3[10],ymm1[10],ymm3[11],ymm1[11]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
+; AVX2-NEXT:    vpsrlvd %ymm6, %ymm5, %ymm5
+; AVX2-NEXT:    vpsrld $16, %ymm5, %ymm5
+; AVX2-NEXT:    vpackusdw %ymm4, %ymm5, %ymm4
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15]
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %ymm2, %ymm6, %ymm6
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} ymm7 = ymm6[4],ymm3[4],ymm6[5],ymm3[5],ymm6[6],ymm3[6],ymm6[7],ymm3[7],ymm6[12],ymm3[12],ymm6[13],ymm3[13],ymm6[14],ymm3[14],ymm6[15],ymm3[15]
+; AVX2-NEXT:    vpsllvd %ymm7, %ymm5, %ymm5
+; AVX2-NEXT:    vpsrld $16, %ymm5, %ymm5
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm6[0],ymm3[0],ymm6[1],ymm3[1],ymm6[2],ymm3[2],ymm6[3],ymm3[3],ymm6[8],ymm3[8],ymm6[9],ymm3[9],ymm6[10],ymm3[10],ymm6[11],ymm3[11]
+; AVX2-NEXT:    vpsllvd %ymm6, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrld $16, %ymm0, %ymm0
+; AVX2-NEXT:    vpackusdw %ymm5, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v16i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm4 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpsrlvd %zmm4, %zmm3, %zmm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %ymm2, %ymm4, %ymm4
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT:    vpsllvd %zmm4, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm3, %zmm0, %zmm0
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm4 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT:    vpsrlvd %zmm4, %zmm3, %zmm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %ymm2, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT:    vpsllvd %zmm4, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm3, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v16i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %ymm4, %ymm6, %ymm4
+; AVX512BW-NEXT:    vpsllvw %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512BW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v16i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VBMI2-NEXT:    vpsubw %ymm4, %ymm6, %ymm4
+; AVX512VBMI2-NEXT:    vpsllvw %zmm4, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v16i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpsrlvw %ymm4, %ymm1, %ymm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %ymm4, %ymm6, %ymm4
+; AVX512VLBW-NEXT:    vpsllvw %ymm4, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vptestnmw %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v16i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvw %ymm2, %ymm0, %ymm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; XOPAVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsubw %xmm3, %xmm4, %xmm5
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; XOPAVX1-NEXT:    vpshlw %xmm5, %xmm6, %xmm5
+; XOPAVX1-NEXT:    vpsubw %xmm2, %xmm4, %xmm6
+; XOPAVX1-NEXT:    vpshlw %xmm6, %xmm1, %xmm6
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [16,16,16,16,16,16,16,16]
+; XOPAVX1-NEXT:    vpsubw %xmm3, %xmm8, %xmm7
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; XOPAVX1-NEXT:    vpshlw %xmm7, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpsubw %xmm2, %xmm8, %xmm7
+; XOPAVX1-NEXT:    vpshlw %xmm7, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm5, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vpcomeqw %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqw %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vpcmov %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; XOPAVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpsubw %xmm3, %xmm4, %xmm3
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; XOPAVX2-NEXT:    vpshlw %xmm3, %xmm5, %xmm3
+; XOPAVX2-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpshlw %xmm4, %xmm1, %xmm4
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; XOPAVX2-NEXT:    vpsubw %ymm2, %ymm4, %ymm4
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm6
+; XOPAVX2-NEXT:    vpshlw %xmm5, %xmm6, %xmm5
+; XOPAVX2-NEXT:    vpshlw %xmm4, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %amt)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpsllw $4, %xmm4, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT:    vpand %xmm8, %xmm3, %xmm6
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm9 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX1-NEXT:    vpsubb %xmm3, %xmm9, %xmm5
+; AVX1-NEXT:    vpsllw $5, %xmm5, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpsllw $2, %xmm4, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX1-NEXT:    vpand %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpaddb %xmm4, %xmm4, %xmm6
+; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm5
+; AVX1-NEXT:    vpand %xmm8, %xmm5, %xmm5
+; AVX1-NEXT:    vpsubb %xmm2, %xmm9, %xmm6
+; AVX1-NEXT:    vpsllw $5, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm5
+; AVX1-NEXT:    vpand %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpaddb %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm5
+; AVX1-NEXT:    vpaddb %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpsrlw $4, %xmm4, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm9 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm9, %xmm5, %xmm5
+; AVX1-NEXT:    vpsllw $5, %xmm3, %xmm7
+; AVX1-NEXT:    vpblendvb %xmm7, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw $2, %xmm4, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX1-NEXT:    vpand %xmm10, %xmm5, %xmm5
+; AVX1-NEXT:    vpaddb %xmm7, %xmm7, %xmm7
+; AVX1-NEXT:    vpblendvb %xmm7, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw $1, %xmm4, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT:    vpand %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpaddb %xmm7, %xmm7, %xmm7
+; AVX1-NEXT:    vpblendvb %xmm7, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm5
+; AVX1-NEXT:    vpand %xmm9, %xmm5, %xmm5
+; AVX1-NEXT:    vpsllw $5, %xmm2, %xmm7
+; AVX1-NEXT:    vpblendvb %xmm7, %xmm5, %xmm1, %xmm5
+; AVX1-NEXT:    vpsrlw $2, %xmm5, %xmm0
+; AVX1-NEXT:    vpand %xmm10, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddb %xmm7, %xmm7, %xmm7
+; AVX1-NEXT:    vpblendvb %xmm7, %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vpsrlw $1, %xmm0, %xmm5
+; AVX1-NEXT:    vpand %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpaddb %xmm7, %xmm7, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm6, %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm0, %ymm8, %ymm0
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpeqb %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vandnps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpsllw $5, %ymm2, %ymm3
+; AVX2-NEXT:    vpsrlw $4, %ymm1, %ymm4
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm4, %ymm1, %ymm4
+; AVX2-NEXT:    vpsrlw $2, %ymm4, %ymm5
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm5, %ymm5
+; AVX2-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX2-NEXT:    vpsrlw $1, %ymm4, %ymm5
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm5, %ymm5
+; AVX2-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX2-NEXT:    vpsubb %ymm2, %ymm4, %ymm4
+; AVX2-NEXT:    vpsllw $5, %ymm4, %ymm4
+; AVX2-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm6
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm6, %ymm6
+; AVX2-NEXT:    vpblendvb %ymm4, %ymm6, %ymm0, %ymm0
+; AVX2-NEXT:    vpsllw $2, %ymm0, %ymm4
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX2-NEXT:    vpblendvb %ymm5, %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm4
+; AVX2-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
+; AVX2-NEXT:    vpblendvb %ymm5, %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v32i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $5, %ymm2, %ymm3
+; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm4
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm4, %ymm1, %ymm4
+; AVX512F-NEXT:    vpsrlw $2, %ymm4, %ymm5
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm5, %ymm5
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpsrlw $1, %ymm4, %ymm5
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm5, %ymm5
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %ymm2, %ymm4, %ymm4
+; AVX512F-NEXT:    vpsllw $5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm6
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm6, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm6, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm4
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm5, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
+; AVX512F-NEXT:    vpblendvb %ymm5, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $5, %ymm2, %ymm3
+; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm4, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpsrlw $2, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm5, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpsrlw $1, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm5, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %ymm2, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpsllw $5, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm6
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm6, %ymm6
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm6, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm5, %zmm6, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpsllvw %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v32i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512VBMI2-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm5, %zmm6, %zmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VBMI2-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VBMI2-NEXT:    vpsllvw %zmm4, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VBMI2-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v32i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT:    vpsrlvw %zmm5, %zmm6, %zmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLBW-NEXT:    vpsllvw %zmm4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VLBW-NEXT:    vptestnmb %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v32i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLVBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLVBMI2-NEXT:    vpsrlvw %zmm5, %zmm6, %zmm5
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLVBMI2-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLVBMI2-NEXT:    vpsllvw %zmm4, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VLVBMI2-NEXT:    vptestnmb %ymm3, %ymm2, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k1}
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; XOPAVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsubb %xmm3, %xmm4, %xmm5
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; XOPAVX1-NEXT:    vpshlb %xmm5, %xmm6, %xmm5
+; XOPAVX1-NEXT:    vpsubb %xmm2, %xmm4, %xmm6
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm1, %xmm6
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; XOPAVX1-NEXT:    vpsubb %xmm3, %xmm8, %xmm7
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; XOPAVX1-NEXT:    vpshlb %xmm7, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpsubb %xmm2, %xmm8, %xmm7
+; XOPAVX1-NEXT:    vpshlb %xmm7, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm5, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vpcomeqb %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqb %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vpcmov %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; XOPAVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpsubb %xmm3, %xmm4, %xmm3
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; XOPAVX2-NEXT:    vpshlb %xmm3, %xmm5, %xmm3
+; XOPAVX2-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpshlb %xmm4, %xmm1, %xmm4
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; XOPAVX2-NEXT:    vpsubb %ymm2, %ymm4, %ymm4
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm6
+; XOPAVX2-NEXT:    vpshlb %xmm5, %xmm6, %xmm5
+; XOPAVX2-NEXT:    vpshlb %xmm4, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt)
+  ret <32 x i8> %res
+}
+
+;
+; Uniform Variable Shifts
+;
+
+define <4 x i64> @splatvar_funnnel_v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovddup {{.*#+}} xmm2 = xmm2[0,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vpsrlq %xmm2, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlq %xmm2, %xmm1, %xmm4
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [64,64]
+; AVX1-NEXT:    vpsubq %xmm4, %xmm5, %xmm6
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT:    vpsllq %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vpsubq %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpsllq %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm3, %ymm0, %ymm0
+; AVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; AVX1-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpsrlq %xmm2, %ymm1, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; AVX2-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpsllq %xmm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT:    vpsrlq %xmm4, %ymm1, %ymm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512F-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpsllq %xmm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpsrlq %xmm4, %ymm1, %ymm5
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VL-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpsllq %xmm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vptestnmq %ymm3, %ymm2, %k1
+; AVX512VL-NEXT:    vmovdqa64 %ymm1, %ymm0 {%k1}
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpsrlq %xmm4, %ymm1, %ymm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512BW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpsllq %xmm4, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512BW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v4i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512VBMI2-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX512VBMI2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpsrlq %xmm4, %ymm1, %ymm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VBMI2-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpsllq %xmm4, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpsrlq %xmm4, %ymm1, %ymm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VLBW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpsllq %xmm4, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vptestnmq %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa64 %ymm1, %ymm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v4i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX512VLVBMI2-NEXT:    vpshrdvq %ymm2, %ymm0, %ymm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovddup {{.*#+}} xmm2 = xmm2[0,0]
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOPAVX1-NEXT:    vpsrlq %xmm2, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpsrlq %xmm2, %xmm1, %xmm4
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [64,64]
+; XOPAVX1-NEXT:    vpsubq %xmm4, %xmm5, %xmm6
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; XOPAVX1-NEXT:    vpsllq %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpsubq %xmm2, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpsllq %xmm5, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm3, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqq %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpcomeqq %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastq %xmm2, %ymm2
+; XOPAVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [63,63,63,63]
+; XOPAVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpsrlq %xmm2, %ymm1, %ymm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [64,64]
+; XOPAVX2-NEXT:    vpsubq %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpsllq %xmm4, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqq %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <4 x i64> %amt, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %splat)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpsrld %xmm3, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrld %xmm3, %xmm1, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [32,32,32,32]
+; AVX1-NEXT:    vpsubd %xmm4, %xmm5, %xmm6
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT:    vpslld %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vpsubd %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero
+; AVX1-NEXT:    vpslld %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm3, %ymm0, %ymm0
+; AVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; AVX1-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd %xmm2, %ymm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; AVX2-NEXT:    vpsrld %xmm3, %ymm1, %ymm3
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX2-NEXT:    vpslld %xmm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT:    vpbroadcastd %xmm2, %ymm2
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512F-NEXT:    vpsrld %xmm5, %ymm1, %ymm5
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512F-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512F-NEXT:    vpslld %xmm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd %xmm2, %ymm2
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VL-NEXT:    vpsrld %xmm5, %ymm1, %ymm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VL-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VL-NEXT:    vpslld %xmm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vptestnmd %ymm3, %ymm2, %k1
+; AVX512VL-NEXT:    vmovdqa32 %ymm1, %ymm0 {%k1}
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    vpbroadcastd %xmm2, %ymm2
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512BW-NEXT:    vpsrld %xmm5, %ymm1, %ymm5
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512BW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512BW-NEXT:    vpslld %xmm4, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512BW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v8i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512VBMI2-NEXT:    vpbroadcastd %xmm2, %ymm2
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VBMI2-NEXT:    vpsrld %xmm5, %ymm1, %ymm5
+; AVX512VBMI2-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VBMI2-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VBMI2-NEXT:    vpslld %xmm4, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd %xmm2, %ymm2
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VLBW-NEXT:    vpsrld %xmm5, %ymm1, %ymm5
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VLBW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VLBW-NEXT:    vpslld %xmm4, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vptestnmd %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa32 %ymm1, %ymm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v8i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastd %xmm2, %ymm2
+; AVX512VLVBMI2-NEXT:    vpshrdvd %ymm2, %ymm0, %ymm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; XOPAVX1-NEXT:    vpsrld %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsrld %xmm3, %xmm1, %xmm3
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [32,32,32,32]
+; XOPAVX1-NEXT:    vpsubd %xmm4, %xmm5, %xmm6
+; XOPAVX1-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; XOPAVX1-NEXT:    vpslld %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpsubd %xmm2, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero
+; XOPAVX1-NEXT:    vpslld %xmm5, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm3, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqd %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpcomeqd %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastd %xmm2, %ymm2
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
+; XOPAVX2-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; XOPAVX2-NEXT:    vpsrld %xmm3, %ymm1, %ymm3
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
+; XOPAVX2-NEXT:    vpsubd %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; XOPAVX2-NEXT:    vpslld %xmm4, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqd %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <8 x i32> %amt, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %splat)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpsrlw %xmm3, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw %xmm3, %xmm1, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; AVX1-NEXT:    vpsubw %xmm4, %xmm5, %xmm6
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT:    vpsllw %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vpsubw %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
+; AVX1-NEXT:    vpsllw %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm3, %ymm0, %ymm0
+; AVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; AVX1-NEXT:    vandnps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm3, %ymm1, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v16i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm3, %ymm1, %ymm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v16i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsrlw %xmm5, %ymm1, %ymm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsllw %xmm4, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512BW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v16i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512VBMI2-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VBMI2-NEXT:    vpsrlw %xmm5, %ymm1, %ymm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512VBMI2-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VBMI2-NEXT:    vpsllw %xmm4, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v16i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsrlw %xmm5, %ymm1, %ymm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsllw %xmm4, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vptestnmw %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v16i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512VLVBMI2-NEXT:    vpshrdvw %ymm2, %ymm0, %ymm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; XOPAVX1-NEXT:    vpsrlw %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsrlw %xmm3, %xmm1, %xmm3
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; XOPAVX1-NEXT:    vpsubw %xmm4, %xmm5, %xmm6
+; XOPAVX1-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; XOPAVX1-NEXT:    vpsllw %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpsubw %xmm2, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
+; XOPAVX1-NEXT:    vpsllw %xmm5, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm3, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpcomeqw %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpcomeqw %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vpcmov %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastw %xmm2, %ymm2
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; XOPAVX2-NEXT:    vpsrlw %xmm3, %ymm1, %ymm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; XOPAVX2-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; XOPAVX2-NEXT:    vpsllw %xmm4, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqw %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <16 x i16> %amt, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %splat)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT:    vpsrlw %xmm4, %xmm5, %xmm5
+; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm8, %xmm8
+; AVX1-NEXT:    vpsrlw %xmm4, %xmm8, %xmm7
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpand %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpsrlw %xmm4, %xmm1, %xmm4
+; AVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX1-NEXT:    vpsubb %xmm5, %xmm7, %xmm6
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,zero,zero,zero,zero,xmm6[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpsllw %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpsllw %xmm6, %xmm8, %xmm6
+; AVX1-NEXT:    vpshufb %xmm3, %xmm6, %xmm6
+; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpsubb %xmm2, %xmm7, %xmm6
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,zero,zero,zero,zero,xmm6[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsllw %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vpsllw %xmm6, %xmm8, %xmm6
+; AVX1-NEXT:    vpshufb %xmm3, %xmm6, %xmm6
+; AVX1-NEXT:    vpand %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm9, %ymm0, %ymm0
+; AVX1-NEXT:    vpcmpeqb %xmm3, %xmm5, %xmm4
+; AVX1-NEXT:    vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; AVX1-NEXT:    vandnps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm3, %ymm1, %ymm4
+; AVX2-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
+; AVX2-NEXT:    vpsrlw %xmm3, %ymm5, %ymm3
+; AVX2-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX2-NEXT:    vpbroadcastb %xmm3, %ymm3
+; AVX2-NEXT:    vpand %ymm3, %ymm4, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX2-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpsllw %xmm4, %ymm5, %ymm4
+; AVX2-NEXT:    vpbroadcastb %xmm4, %ymm4
+; AVX2-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v32i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm3, %ymm1, %ymm4
+; AVX512F-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsrlw %xmm3, %ymm5, %ymm3
+; AVX512F-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512F-NEXT:    vpbroadcastb %xmm3, %ymm3
+; AVX512F-NEXT:    vpand %ymm3, %ymm4, %ymm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw %xmm4, %ymm5, %ymm4
+; AVX512F-NEXT:    vpbroadcastb %xmm4, %ymm4
+; AVX512F-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm3, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsrlw %xmm3, %ymm5, %ymm3
+; AVX512VL-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpbroadcastb %xmm3, %ymm3
+; AVX512VL-NEXT:    vpand %ymm3, %ymm4, %ymm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw %xmm4, %ymm5, %ymm4
+; AVX512VL-NEXT:    vpbroadcastb %xmm4, %ymm4
+; AVX512VL-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm5, %zmm6, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpsllvw %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v32i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512VBMI2-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm5, %zmm6, %zmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VBMI2-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VBMI2-NEXT:    vpsllvw %zmm4, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VBMI2-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v32i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT:    vpsrlvw %zmm5, %zmm6, %zmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLBW-NEXT:    vpsllvw %zmm4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VLBW-NEXT:    vptestnmb %ymm3, %ymm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v32i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLVBMI2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm5 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm6 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLVBMI2-NEXT:    vpsrlvw %zmm5, %zmm6, %zmm5
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLVBMI2-NEXT:    vpsubb %ymm4, %ymm6, %ymm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero,ymm4[16],zero,ymm4[17],zero,ymm4[18],zero,ymm4[19],zero,ymm4[20],zero,ymm4[21],zero,ymm4[22],zero,ymm4[23],zero,ymm4[24],zero,ymm4[25],zero,ymm4[26],zero,ymm4[27],zero,ymm4[28],zero,ymm4[29],zero,ymm4[30],zero,ymm4[31],zero
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLVBMI2-NEXT:    vpsllvw %zmm4, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VLVBMI2-NEXT:    vptestnmb %ymm3, %ymm2, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k1}
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vandps {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT:    vpsubb %xmm4, %xmm3, %xmm5
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; XOPAVX1-NEXT:    vpshlb %xmm5, %xmm6, %xmm5
+; XOPAVX1-NEXT:    vpsubb %xmm2, %xmm3, %xmm6
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm1, %xmm6
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; XOPAVX1-NEXT:    vpsubb %xmm4, %xmm8, %xmm7
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; XOPAVX1-NEXT:    vpshlb %xmm7, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpsubb %xmm2, %xmm8, %xmm7
+; XOPAVX1-NEXT:    vpshlb %xmm7, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm5, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vpcomeqb %xmm3, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpcomeqb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vpcmov %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastb %xmm2, %ymm2
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; XOPAVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpsubb %xmm3, %xmm4, %xmm3
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; XOPAVX2-NEXT:    vpshlb %xmm3, %xmm5, %xmm3
+; XOPAVX2-NEXT:    vpsubb %xmm2, %xmm4, %xmm4
+; XOPAVX2-NEXT:    vpshlb %xmm4, %xmm1, %xmm4
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; XOPAVX2-NEXT:    vpsubb %ymm2, %ymm4, %ymm4
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm6
+; XOPAVX2-NEXT:    vpshlb %xmm5, %xmm6, %xmm5
+; XOPAVX2-NEXT:    vpshlb %xmm4, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
+; XOPAVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <32 x i8> %amt, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %splat)
+  ret <32 x i8> %res
+}
+
+;
+; Constant Shifts
+;
+
+define <4 x i64> @constant_funnnel_v4i64(<4 x i64> %x, <4 x i64> %y) nounwind {
+; AVX1-LABEL: constant_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpsrlq $60, %xmm2, %xmm3
+; AVX1-NEXT:    vpsrlq $50, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpsrlq $14, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrlq $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpsllq $4, %xmm2, %xmm3
+; AVX1-NEXT:    vpsllq $14, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpsllq $50, %xmm0, %xmm3
+; AVX1-NEXT:    vpsllq $60, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v4i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v4i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvq {{.*}}(%rip), %ymm0, %ymm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshlq {{.*}}(%rip), %xmm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOPAVX1-NEXT:    vpshlq {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; XOPAVX1-NEXT:    vpshlq {{.*}}(%rip), %xmm0, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vpshlq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> <i64 4, i64 14, i64 50, i64 60>)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y) nounwind {
+; AVX1-LABEL: constant_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpsrld $11, %xmm2, %xmm3
+; AVX1-NEXT:    vpsrld $9, %xmm2, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpsrld $10, %xmm2, %xmm4
+; AVX1-NEXT:    vpsrld $8, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpsrld $7, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrld $5, %xmm1, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpsrld $6, %xmm1, %xmm4
+; AVX1-NEXT:    vpsrld $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v8i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v8i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvd {{.*}}(%rip), %ymm0, %ymm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshld {{.*}}(%rip), %xmm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOPAVX1-NEXT:    vpshld {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; XOPAVX1-NEXT:    vpshld {{.*}}(%rip), %xmm0, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vpshld {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwind {
+; AVX1-LABEL: constant_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [256,128,64,32,16,8,4,2]
+; AVX1-NEXT:    vpmullw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,32768,16384,8192,4096,2048,1024,512>
+; AVX1-NEXT:    vpmullw %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpmulhuw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpmulhuw %xmm4, %xmm1, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3,4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT:    vorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm2 = [0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    vandnps %ymm1, %ymm2, %ymm1
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2>
+; AVX2-NEXT:    vpmulhuw %ymm2, %ymm1, %ymm3
+; AVX2-NEXT:    vpblendw {{.*#+}} ymm4 = ymm1[0],ymm3[1,2,3,4,5,6,7],ymm1[8],ymm3[9,10,11,12,13,14,15]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v16i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2>
+; AVX512F-NEXT:    vpmulhuw %ymm2, %ymm1, %ymm3
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm4 = ymm1[0],ymm3[1,2,3,4,5,6,7],ymm1[8],ymm3[9,10,11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2>
+; AVX512VL-NEXT:    vpmulhuw %ymm2, %ymm1, %ymm3
+; AVX512VL-NEXT:    vpblendw {{.*#+}} ymm4 = ymm1[0],ymm3[1,2,3,4,5,6,7],ymm1[8],ymm3[9,10,11,12,13,14,15]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512VL-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v16i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512BW-NEXT:    vpsrlvw %zmm2, %zmm1, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
+; AVX512BW-NEXT:    vpsllvw %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v16i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512VBMI2-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512VBMI2-NEXT:    vpsrlvw %zmm2, %zmm1, %zmm2
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
+; AVX512VBMI2-NEXT:    vpsllvw %zmm3, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
+; AVX512VBMI2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v16i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %ymm1, %ymm2
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
+; AVX512VLBW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v16i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvw {{.*}}(%rip), %ymm0, %ymm1
+; AVX512VLVBMI2-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshlw {{.*}}(%rip), %xmm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOPAVX1-NEXT:    vpshlw {{.*}}(%rip), %xmm3, %xmm3
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vpshlw {{.*}}(%rip), %xmm0, %xmm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vpshlw {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vpcmov {{.*}}(%rip), %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2>
+; XOPAVX2-NEXT:    vpmulhuw %ymm2, %ymm1, %ymm3
+; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm4 = ymm1[0],ymm3[1,2,3,4,5,6,7],ymm1[8],ymm3[9,10,11,12,13,14,15]
+; XOPAVX2-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; XOPAVX2-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
+; XOPAVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; XOPAVX2-NEXT:    retq
+  %res = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind {
+; AVX1-LABEL: constant_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [256,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vpmullw %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw $8, %xmm4, %xmm4
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [256,128,64,32,16,8,4,2]
+; AVX1-NEXT:    vpmullw %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
+; AVX1-NEXT:    vpmullw %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $8, %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT:    vpmullw %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw $8, %xmm4, %xmm4
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,2,4,8,16,32,64,128>
+; AVX1-NEXT:    vpmullw %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,128,64,32,16,8,4,2>
+; AVX1-NEXT:    vpmullw %xmm7, %xmm3, %xmm3
+; AVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpackuswb %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT:    vpmullw %xmm7, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vpackuswb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [NaN,NaN,NaN,NaN]
+; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    vandnps %ymm1, %ymm2, %ymm1
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536]
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpsllw $2, %ymm0, %ymm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX2-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX2-NEXT:    vpackuswb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v32i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536]
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
+; AVX512F-NEXT:    vpmullw {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
+; AVX512F-NEXT:    vpmullw {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512F-NEXT:    vpackuswb %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm3 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536]
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmullw {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpmullw {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpackuswb %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512BW-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v32i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VBMI2-NEXT:    vpsrlvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VBMI2-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vporq %zmm2, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX512VBMI2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v32i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm2, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VLBW-NEXT:    movl $16843009, %eax # imm = 0x1010101
+; AVX512VLBW-NEXT:    kmovd %eax, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v32i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLVBMI2-NEXT:    vpsrlvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLVBMI2-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    vporq %zmm2, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VLVBMI2-NEXT:    movl $16843009, %eax # imm = 0x1010101
+; AVX512VLVBMI2-NEXT:    kmovd %eax, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k1}
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,255,254,253,252,251,250,249,0,249,250,251,252,253,254,255]
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm1, %xmm3
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,7,6,5,4,3,2,1,8,1,2,3,4,5,6,7]
+; XOPAVX1-NEXT:    vpshlb %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpshlb %xmm4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [NaN,NaN,NaN,NaN]
+; XOPAVX1-NEXT:    vpcmov %ymm2, %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,255,254,253,252,251,250,249,0,249,250,251,252,253,254,255]
+; XOPAVX2-NEXT:    vpshlb %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpshlb %xmm3, %xmm1, %xmm3
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,7,6,5,4,3,2,1,8,1,2,3,4,5,6,7]
+; XOPAVX2-NEXT:    vpshlb %xmm4, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpshlb %xmm4, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; XOPAVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
+  ret <32 x i8> %res
+}
+
+;
+; Uniform Constant Shifts
+;
+
+define <4 x i64> @splatconstant_funnnel_v4i64(<4 x i64> %x, <4 x i64> %y) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrlq $14, %xmm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vpsrlq $14, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    vpsllq $50, %xmm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpsllq $50, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlq $14, %ymm1, %ymm1
+; AVX2-NEXT:    vpsllq $50, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlq $14, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsllq $50, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlq $14, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsllq $50, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlq $14, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpsllq $50, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v4i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlq $14, %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpsllq $50, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlq $14, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsllq $50, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v4i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdq $14, %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpsrlq $14, %xmm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOPAVX1-NEXT:    vpsrlq $14, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; XOPAVX1-NEXT:    vpsllq $50, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vpsllq $50, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrlq $14, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpsllq $50, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> <i64 14, i64 14, i64 14, i64 14>)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @splatconstant_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrld $4, %xmm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vpsrld $4, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    vpslld $28, %xmm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpslld $28, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrld $4, %ymm1, %ymm1
+; AVX2-NEXT:    vpslld $28, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrld $4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpslld $28, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrld $4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpslld $28, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrld $4, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpslld $28, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v8i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrld $4, %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpslld $28, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrld $4, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpslld $28, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v8i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdd $4, %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpsrld $4, %xmm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOPAVX1-NEXT:    vpsrld $4, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; XOPAVX1-NEXT:    vpslld $28, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vpslld $28, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrld $4, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpslld $28, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @splatconstant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrlw $7, %xmm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vpsrlw $7, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    vpsllw $9, %xmm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpsllw $9, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlw $7, %ymm1, %ymm1
+; AVX2-NEXT:    vpsllw $9, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v16i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $7, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsllw $9, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $7, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsllw $9, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v16i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlw $7, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpsllw $9, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v16i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlw $7, %ymm1, %ymm1
+; AVX512VBMI2-NEXT:    vpsllw $9, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v16i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlw $7, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsllw $9, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v16i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdw $7, %ymm0, %ymm1, %ymm0
+; AVX512VLVBMI2-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpsrlw $7, %xmm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOPAVX1-NEXT:    vpsrlw $7, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; XOPAVX1-NEXT:    vpsllw $9, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vpsllw $9, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrlw $7, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpsllw $9, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @splatconstant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpsrlw $4, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpsllw $4, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_funnnel_v32i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpsllw $4, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
+  ret <32 x i8> %res
+}
diff --git a/test/CodeGen/X86/vector-fshr-512.ll b/test/CodeGen/X86/vector-fshr-512.ll
new file mode 100644
index 0000000..a333e15
--- /dev/null
+++ b/test/CodeGen/X86/vector-fshr-512.ll
@@ -0,0 +1,1565 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefixes=AVX512,AVX512VBMI2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVBMI2
+
+declare <8 x i64> @llvm.fshr.v8i64(<8 x i64>, <8 x i64>, <8 x i64>)
+declare <16 x i32> @llvm.fshr.v16i32(<16 x i32>, <16 x i32>, <16 x i32>)
+declare <32 x i16> @llvm.fshr.v32i16(<32 x i16>, <32 x i16>, <32 x i16>)
+declare <64 x i8> @llvm.fshr.v64i8(<64 x i8>, <64 x i8>, <64 x i8>)
+
+;
+; Variable Shifts
+;
+
+define <8 x i64> @var_funnnel_v8i64(<8 x i64> %x, <8 x i64> %y, <8 x i64> %amt) nounwind {
+; AVX512F-LABEL: var_funnnel_v8i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512F-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512F-NEXT:    vpsrlvq %zmm4, %zmm1, %zmm5
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} zmm6 = [64,64,64,64,64,64,64,64]
+; AVX512F-NEXT:    vpsubq %zmm4, %zmm6, %zmm4
+; AVX512F-NEXT:    vpsllvq %zmm4, %zmm0, %zmm0
+; AVX512F-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512F-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v8i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512VL-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VL-NEXT:    vpsrlvq %zmm4, %zmm1, %zmm5
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} zmm6 = [64,64,64,64,64,64,64,64]
+; AVX512VL-NEXT:    vpsubq %zmm4, %zmm6, %zmm4
+; AVX512VL-NEXT:    vpsllvq %zmm4, %zmm0, %zmm0
+; AVX512VL-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VL-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VL-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v8i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpsrlvq %zmm4, %zmm1, %zmm5
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} zmm6 = [64,64,64,64,64,64,64,64]
+; AVX512BW-NEXT:    vpsubq %zmm4, %zmm6, %zmm4
+; AVX512BW-NEXT:    vpsllvq %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512BW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v8i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshrdvq %zmm2, %zmm0, %zmm1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v8i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpsrlvq %zmm4, %zmm1, %zmm5
+; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} zmm6 = [64,64,64,64,64,64,64,64]
+; AVX512VLBW-NEXT:    vpsubq %zmm4, %zmm6, %zmm4
+; AVX512VLBW-NEXT:    vpsllvq %zmm4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v8i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvq %zmm2, %zmm0, %zmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x, <8 x i64> %y, <8 x i64> %amt)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @var_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i32> %amt) nounwind {
+; AVX512F-LABEL: var_funnnel_v16i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512F-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512F-NEXT:    vpsrlvd %zmm4, %zmm1, %zmm5
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
+; AVX512F-NEXT:    vpsubd %zmm4, %zmm6, %zmm4
+; AVX512F-NEXT:    vpsllvd %zmm4, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm5, %zmm0, %zmm0
+; AVX512F-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v16i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512VL-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512VL-NEXT:    vpsrlvd %zmm4, %zmm1, %zmm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
+; AVX512VL-NEXT:    vpsubd %zmm4, %zmm6, %zmm4
+; AVX512VL-NEXT:    vpsllvd %zmm4, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm5, %zmm0, %zmm0
+; AVX512VL-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VL-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v16i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512BW-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpsrlvd %zmm4, %zmm1, %zmm5
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
+; AVX512BW-NEXT:    vpsubd %zmm4, %zmm6, %zmm4
+; AVX512BW-NEXT:    vpsllvd %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpord %zmm5, %zmm0, %zmm0
+; AVX512BW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v16i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshrdvd %zmm2, %zmm0, %zmm1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v16i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512VLBW-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpsrlvd %zmm4, %zmm1, %zmm5
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
+; AVX512VLBW-NEXT:    vpsubd %zmm4, %zmm6, %zmm4
+; AVX512VLBW-NEXT:    vpsllvd %zmm4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpord %zmm5, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v16i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvd %zmm2, %zmm0, %zmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i32> %amt)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @var_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i16> %amt) nounwind {
+; AVX512F-LABEL: var_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm7 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm8 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpsrlvd %zmm7, %zmm8, %zmm7
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %ymm4, %ymm8, %ymm9
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm9 = ymm9[0],zero,ymm9[1],zero,ymm9[2],zero,ymm9[3],zero,ymm9[4],zero,ymm9[5],zero,ymm9[6],zero,ymm9[7],zero,ymm9[8],zero,ymm9[9],zero,ymm9[10],zero,ymm9[11],zero,ymm9[12],zero,ymm9[13],zero,ymm9[14],zero,ymm9[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT:    vpsllvd %zmm9, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm7, %zmm0, %zmm0
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    vpxor %xmm7, %xmm7, %xmm7
+; AVX512F-NEXT:    vpcmpeqw %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm6, %ymm5, %ymm2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm4 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm5 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
+; AVX512F-NEXT:    vpsrlvd %zmm4, %zmm5, %zmm4
+; AVX512F-NEXT:    vpsubw %ymm2, %ymm8, %ymm5
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm5 = ymm5[0],zero,ymm5[1],zero,ymm5[2],zero,ymm5[3],zero,ymm5[4],zero,ymm5[5],zero,ymm5[6],zero,ymm5[7],zero,ymm5[8],zero,ymm5[9],zero,ymm5[10],zero,ymm5[11],zero,ymm5[12],zero,ymm5[13],zero,ymm5[14],zero,ymm5[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT:    vpsllvd %zmm5, %zmm1, %zmm1
+; AVX512F-NEXT:    vpord %zmm4, %zmm1, %zmm1
+; AVX512F-NEXT:    vpmovdw %zmm1, %ymm1
+; AVX512F-NEXT:    vpcmpeqw %ymm7, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm7 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm8 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT:    vpsrlvd %zmm7, %zmm8, %zmm7
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm8 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %ymm4, %ymm8, %ymm9
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm9 = ymm9[0],zero,ymm9[1],zero,ymm9[2],zero,ymm9[3],zero,ymm9[4],zero,ymm9[5],zero,ymm9[6],zero,ymm9[7],zero,ymm9[8],zero,ymm9[9],zero,ymm9[10],zero,ymm9[11],zero,ymm9[12],zero,ymm9[13],zero,ymm9[14],zero,ymm9[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT:    vpsllvd %zmm9, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm7, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512VL-NEXT:    vpxor %xmm7, %xmm7, %xmm7
+; AVX512VL-NEXT:    vpcmpeqw %ymm7, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm6, %ymm5, %ymm2
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm4 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm5 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
+; AVX512VL-NEXT:    vpsrlvd %zmm4, %zmm5, %zmm4
+; AVX512VL-NEXT:    vpsubw %ymm2, %ymm8, %ymm5
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm5 = ymm5[0],zero,ymm5[1],zero,ymm5[2],zero,ymm5[3],zero,ymm5[4],zero,ymm5[5],zero,ymm5[6],zero,ymm5[7],zero,ymm5[8],zero,ymm5[9],zero,ymm5[10],zero,ymm5[11],zero,ymm5[12],zero,ymm5[13],zero,ymm5[14],zero,ymm5[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT:    vpsllvd %zmm5, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpord %zmm4, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpmovdw %zmm1, %ymm1
+; AVX512VL-NEXT:    vpcmpeqw %ymm7, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %zmm4, %zmm6, %zmm4
+; AVX512BW-NEXT:    vpsllvw %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512BW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v32i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshrdvw %zmm2, %zmm0, %zmm1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpsrlvw %zmm4, %zmm1, %zmm5
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %zmm4, %zmm6, %zmm4
+; AVX512VLBW-NEXT:    vpsllvw %zmm4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v32i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvw %zmm2, %zmm0, %zmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i16> %amt)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @var_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %amt) nounwind {
+; AVX512F-LABEL: var_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $4, %ymm2, %ymm7
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm6, %ymm7, %ymm8
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512F-NEXT:    vpand %ymm7, %ymm4, %ymm9
+; AVX512F-NEXT:    vpsllw $5, %ymm9, %ymm10
+; AVX512F-NEXT:    vpblendvb %ymm10, %ymm8, %ymm2, %ymm8
+; AVX512F-NEXT:    vpsrlw $2, %ymm8, %ymm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512F-NEXT:    vpand %ymm4, %ymm11, %ymm11
+; AVX512F-NEXT:    vpaddb %ymm10, %ymm10, %ymm10
+; AVX512F-NEXT:    vpblendvb %ymm10, %ymm11, %ymm8, %ymm11
+; AVX512F-NEXT:    vpsrlw $1, %ymm11, %ymm12
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpand %ymm8, %ymm12, %ymm12
+; AVX512F-NEXT:    vpaddb %ymm10, %ymm10, %ymm10
+; AVX512F-NEXT:    vpblendvb %ymm10, %ymm12, %ymm11, %ymm10
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm12 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT:    vpand %ymm12, %ymm11, %ymm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm13 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %ymm9, %ymm13, %ymm14
+; AVX512F-NEXT:    vpsllw $5, %ymm14, %ymm14
+; AVX512F-NEXT:    vpblendvb %ymm14, %ymm11, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm15 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT:    vpand %ymm15, %ymm11, %ymm11
+; AVX512F-NEXT:    vpaddb %ymm14, %ymm14, %ymm14
+; AVX512F-NEXT:    vpblendvb %ymm14, %ymm11, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm11
+; AVX512F-NEXT:    vpaddb %ymm14, %ymm14, %ymm14
+; AVX512F-NEXT:    vpblendvb %ymm14, %ymm11, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm10, %ymm0, %ymm0
+; AVX512F-NEXT:    vpxor %xmm10, %xmm10, %xmm10
+; AVX512F-NEXT:    vpcmpeqb %ymm10, %ymm9, %ymm9
+; AVX512F-NEXT:    vpblendvb %ymm9, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $4, %ymm3, %ymm2
+; AVX512F-NEXT:    vpand %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT:    vpand %ymm7, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsllw $5, %ymm5, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vpsrlw $2, %ymm2, %ymm7
+; AVX512F-NEXT:    vpand %ymm4, %ymm7, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm4
+; AVX512F-NEXT:    vpand %ymm8, %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm4
+; AVX512F-NEXT:    vpand %ymm12, %ymm4, %ymm4
+; AVX512F-NEXT:    vpsubb %ymm5, %ymm13, %ymm6
+; AVX512F-NEXT:    vpsllw $5, %ymm6, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsllw $2, %ymm1, %ymm4
+; AVX512F-NEXT:    vpand %ymm15, %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpcmpeqb %ymm10, %ymm5, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $4, %ymm2, %ymm6
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm7 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm8 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VL-NEXT:    vpand %ymm8, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpsllw $5, %ymm4, %ymm9
+; AVX512VL-NEXT:    vpblendvb %ymm9, %ymm6, %ymm2, %ymm6
+; AVX512VL-NEXT:    vpsrlw $2, %ymm6, %ymm10
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512VL-NEXT:    vpand %ymm11, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpaddb %ymm9, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpblendvb %ymm9, %ymm10, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpsrlw $1, %ymm6, %ymm10
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm12 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-NEXT:    vpand %ymm12, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpaddb %ymm9, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpblendvb %ymm9, %ymm10, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm9
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm10 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT:    vpand %ymm10, %ymm9, %ymm9
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm13 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %ymm4, %ymm13, %ymm14
+; AVX512VL-NEXT:    vpsllw $5, %ymm14, %ymm14
+; AVX512VL-NEXT:    vpblendvb %ymm14, %ymm9, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm9
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm15 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT:    vpand %ymm15, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpaddb %ymm14, %ymm14, %ymm14
+; AVX512VL-NEXT:    vpblendvb %ymm14, %ymm9, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm9
+; AVX512VL-NEXT:    vpaddb %ymm14, %ymm14, %ymm14
+; AVX512VL-NEXT:    vpblendvb %ymm14, %ymm9, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm6, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512VL-NEXT:    vpcmpeqb %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $4, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpand %ymm8, %ymm5, %ymm4
+; AVX512VL-NEXT:    vpsllw $5, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpsrlw $2, %ymm2, %ymm7
+; AVX512VL-NEXT:    vpand %ymm11, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm7, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $1, %ymm2, %ymm7
+; AVX512VL-NEXT:    vpand %ymm12, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm7, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm5
+; AVX512VL-NEXT:    vpand %ymm10, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsubb %ymm4, %ymm13, %ymm7
+; AVX512VL-NEXT:    vpsllw $5, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsllw $2, %ymm1, %ymm5
+; AVX512VL-NEXT:    vpand %ymm15, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm7, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm7, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpcmpeqb %ymm6, %ymm4, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpsllw $5, %zmm4, %zmm5
+; AVX512BW-NEXT:    vpaddb %zmm5, %zmm5, %zmm6
+; AVX512BW-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512BW-NEXT:    vpmovb2m %zmm5, %k2
+; AVX512BW-NEXT:    vpsrlw $4, %zmm1, %zmm5
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512BW-NEXT:    vpblendmb %zmm5, %zmm1, %zmm5 {%k2}
+; AVX512BW-NEXT:    vpsrlw $2, %zmm5, %zmm7
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm7, %zmm7
+; AVX512BW-NEXT:    vmovdqu8 %zmm7, %zmm5 {%k1}
+; AVX512BW-NEXT:    vpsrlw $1, %zmm5, %zmm7
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm7, %zmm7
+; AVX512BW-NEXT:    vpaddb %zmm6, %zmm6, %zmm6
+; AVX512BW-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm7, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT:    vpsubb %zmm4, %zmm6, %zmm4
+; AVX512BW-NEXT:    vpsllw $5, %zmm4, %zmm4
+; AVX512BW-NEXT:    vpaddb %zmm4, %zmm4, %zmm6
+; AVX512BW-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512BW-NEXT:    vpmovb2m %zmm4, %k2
+; AVX512BW-NEXT:    vpsllw $4, %zmm0, %zmm4
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512BW-NEXT:    vmovdqu8 %zmm4, %zmm0 {%k2}
+; AVX512BW-NEXT:    vpsllw $2, %zmm0, %zmm4
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512BW-NEXT:    vmovdqu8 %zmm4, %zmm0 {%k1}
+; AVX512BW-NEXT:    vpaddb %zmm6, %zmm6, %zmm4
+; AVX512BW-NEXT:    vpmovb2m %zmm4, %k1
+; AVX512BW-NEXT:    vpaddb %zmm0, %zmm0, %zmm0 {%k1}
+; AVX512BW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512BW-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: var_funnnel_v64i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VBMI2-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VBMI2-NEXT:    vpsllw $5, %zmm4, %zmm5
+; AVX512VBMI2-NEXT:    vpaddb %zmm5, %zmm5, %zmm6
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm5, %k2
+; AVX512VBMI2-NEXT:    vpsrlw $4, %zmm1, %zmm5
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VBMI2-NEXT:    vpblendmb %zmm5, %zmm1, %zmm5 {%k2}
+; AVX512VBMI2-NEXT:    vpsrlw $2, %zmm5, %zmm7
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm7, %zmm7
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm7, %zmm5 {%k1}
+; AVX512VBMI2-NEXT:    vpsrlw $1, %zmm5, %zmm7
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm7, %zmm7
+; AVX512VBMI2-NEXT:    vpaddb %zmm6, %zmm6, %zmm6
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm7, %zmm5 {%k1}
+; AVX512VBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VBMI2-NEXT:    vpsubb %zmm4, %zmm6, %zmm4
+; AVX512VBMI2-NEXT:    vpsllw $5, %zmm4, %zmm4
+; AVX512VBMI2-NEXT:    vpaddb %zmm4, %zmm4, %zmm6
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm4, %k2
+; AVX512VBMI2-NEXT:    vpsllw $4, %zmm0, %zmm4
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm4, %zmm0 {%k2}
+; AVX512VBMI2-NEXT:    vpsllw $2, %zmm0, %zmm4
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm4, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    vpaddb %zmm6, %zmm6, %zmm4
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm4, %k1
+; AVX512VBMI2-NEXT:    vpaddb %zmm0, %zmm0, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpsllw $5, %zmm4, %zmm5
+; AVX512VLBW-NEXT:    vpaddb %zmm5, %zmm5, %zmm6
+; AVX512VLBW-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VLBW-NEXT:    vpmovb2m %zmm5, %k2
+; AVX512VLBW-NEXT:    vpsrlw $4, %zmm1, %zmm5
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VLBW-NEXT:    vpblendmb %zmm5, %zmm1, %zmm5 {%k2}
+; AVX512VLBW-NEXT:    vpsrlw $2, %zmm5, %zmm7
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm7, %zmm7
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm7, %zmm5 {%k1}
+; AVX512VLBW-NEXT:    vpsrlw $1, %zmm5, %zmm7
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm7, %zmm7
+; AVX512VLBW-NEXT:    vpaddb %zmm6, %zmm6, %zmm6
+; AVX512VLBW-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm7, %zmm5 {%k1}
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT:    vpsubb %zmm4, %zmm6, %zmm4
+; AVX512VLBW-NEXT:    vpsllw $5, %zmm4, %zmm4
+; AVX512VLBW-NEXT:    vpaddb %zmm4, %zmm4, %zmm6
+; AVX512VLBW-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VLBW-NEXT:    vpmovb2m %zmm4, %k2
+; AVX512VLBW-NEXT:    vpsllw $4, %zmm0, %zmm4
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm4, %zmm0 {%k2}
+; AVX512VLBW-NEXT:    vpsllw $2, %zmm0, %zmm4
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm4, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    vpaddb %zmm6, %zmm6, %zmm4
+; AVX512VLBW-NEXT:    vpmovb2m %zmm4, %k1
+; AVX512VLBW-NEXT:    vpaddb %zmm0, %zmm0, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: var_funnnel_v64i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLVBMI2-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLVBMI2-NEXT:    vpsllw $5, %zmm4, %zmm5
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm5, %zmm5, %zmm6
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm5, %k2
+; AVX512VLVBMI2-NEXT:    vpsrlw $4, %zmm1, %zmm5
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VLVBMI2-NEXT:    vpblendmb %zmm5, %zmm1, %zmm5 {%k2}
+; AVX512VLVBMI2-NEXT:    vpsrlw $2, %zmm5, %zmm7
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm7, %zmm7
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm7, %zmm5 {%k1}
+; AVX512VLVBMI2-NEXT:    vpsrlw $1, %zmm5, %zmm7
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm7, %zmm7
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm6, %zmm6, %zmm6
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm7, %zmm5 {%k1}
+; AVX512VLVBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLVBMI2-NEXT:    vpsubb %zmm4, %zmm6, %zmm4
+; AVX512VLVBMI2-NEXT:    vpsllw $5, %zmm4, %zmm4
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm4, %zmm4, %zmm6
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm6, %k1
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm4, %k2
+; AVX512VLVBMI2-NEXT:    vpsllw $4, %zmm0, %zmm4
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm4, %zmm0 {%k2}
+; AVX512VLVBMI2-NEXT:    vpsllw $2, %zmm0, %zmm4
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm4, %zmm0 {%k1}
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm6, %zmm6, %zmm4
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm4, %k1
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm0, %zmm0, %zmm0 {%k1}
+; AVX512VLVBMI2-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %amt)
+  ret <64 x i8> %res
+}
+
+;
+; Uniform Variable Shifts
+;
+
+define <8 x i64> @splatvar_funnnel_v8i64(<8 x i64> %x, <8 x i64> %y, <8 x i64> %amt) nounwind {
+; AVX512F-LABEL: splatvar_funnnel_v8i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastq %xmm2, %zmm2
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512F-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512F-NEXT:    vpsrlq %xmm4, %zmm1, %zmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512F-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpsllq %xmm4, %zmm0, %zmm0
+; AVX512F-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512F-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v8i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq %xmm2, %zmm2
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512VL-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VL-NEXT:    vpsrlq %xmm4, %zmm1, %zmm5
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VL-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpsllq %xmm4, %zmm0, %zmm0
+; AVX512VL-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VL-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VL-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v8i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastq %xmm2, %zmm2
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpsrlq %xmm4, %zmm1, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512BW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpsllq %xmm4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512BW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v8i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpbroadcastq %xmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpshrdvq %zmm2, %zmm0, %zmm1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v8i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastq %xmm2, %zmm2
+; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [63,63,63,63,63,63,63,63]
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpsrlq %xmm4, %zmm1, %zmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [64,64]
+; AVX512VLBW-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpsllq %xmm4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vptestnmq %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v8i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastq %xmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpshrdvq %zmm2, %zmm0, %zmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %splat = shufflevector <8 x i64> %amt, <8 x i64> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x, <8 x i64> %y, <8 x i64> %splat)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i32> %amt) nounwind {
+; AVX512F-LABEL: splatvar_funnnel_v16i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastd %xmm2, %zmm2
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512F-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512F-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512F-NEXT:    vpsrld %xmm5, %zmm1, %zmm5
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512F-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512F-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512F-NEXT:    vpslld %xmm4, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm5, %zmm0, %zmm0
+; AVX512F-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v16i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd %xmm2, %zmm2
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512VL-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VL-NEXT:    vpsrld %xmm5, %zmm1, %zmm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VL-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VL-NEXT:    vpslld %xmm4, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm5, %zmm0, %zmm0
+; AVX512VL-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VL-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v16i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastd %xmm2, %zmm2
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512BW-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512BW-NEXT:    vpsrld %xmm5, %zmm1, %zmm5
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512BW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512BW-NEXT:    vpslld %xmm4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpord %zmm5, %zmm0, %zmm0
+; AVX512BW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v16i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpbroadcastd %xmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpshrdvd %zmm2, %zmm0, %zmm1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v16i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd %xmm2, %zmm2
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} zmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512VLBW-NEXT:    vpandd %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
+; AVX512VLBW-NEXT:    vpsrld %xmm5, %zmm1, %zmm5
+; AVX512VLBW-NEXT:    vpbroadcastd {{.*#+}} xmm6 = [32,32,32,32]
+; AVX512VLBW-NEXT:    vpsubd %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX512VLBW-NEXT:    vpslld %xmm4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpord %zmm5, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vptestnmd %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v16i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastd %xmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpshrdvd %zmm2, %zmm0, %zmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %splat = shufflevector <16 x i32> %amt, <16 x i32> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i32> %splat)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i16> %amt) nounwind {
+; AVX512F-LABEL: splatvar_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastw %xmm4, %ymm4
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm5, %ymm2, %ymm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = [16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %xmm4, %xmm7, %xmm7
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm7, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm6, %ymm0, %ymm0
+; AVX512F-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512F-NEXT:    vpcmpeqw %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw %xmm5, %ymm3, %ymm2
+; AVX512F-NEXT:    vpsllw %xmm7, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastw %xmm4, %ymm4
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm5, %ymm2, %ymm6
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm7 = [16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %xmm4, %xmm7, %xmm7
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm7, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm6, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512VL-NEXT:    vpcmpeqw %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw %xmm5, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpsllw %xmm7, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastw %xmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsrlw %xmm5, %zmm1, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsllw %xmm4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512BW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v32i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpbroadcastw %xmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpshrdvw %zmm2, %zmm0, %zmm1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastw %xmm2, %zmm2
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsrlw %xmm5, %zmm1, %zmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsllw %xmm4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vptestnmw %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v32i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastw %xmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpshrdvw %zmm2, %zmm0, %zmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %splat = shufflevector <32 x i16> %amt, <32 x i16> undef, <32 x i32> zeroinitializer
+  %res = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i16> %splat)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %amt) nounwind {
+; AVX512F-LABEL: splatvar_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastb %xmm4, %ymm4
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm5, %ymm2, %ymm6
+; AVX512F-NEXT:    vpcmpeqd %ymm9, %ymm9, %ymm9
+; AVX512F-NEXT:    vpsrlw %xmm5, %ymm9, %ymm8
+; AVX512F-NEXT:    vpsrlw $8, %ymm8, %ymm8
+; AVX512F-NEXT:    vpbroadcastb %xmm8, %ymm8
+; AVX512F-NEXT:    vpand %ymm8, %ymm6, %ymm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm4, %xmm7, %xmm7
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,zero,zero,zero,zero,xmm7[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm7, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw %xmm7, %ymm9, %ymm9
+; AVX512F-NEXT:    vpbroadcastb %xmm9, %ymm9
+; AVX512F-NEXT:    vpand %ymm9, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm6, %ymm0, %ymm0
+; AVX512F-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512F-NEXT:    vpcmpeqb %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw %xmm5, %ymm3, %ymm2
+; AVX512F-NEXT:    vpand %ymm8, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw %xmm7, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm9, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastb %xmm4, %ymm4
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm5, %ymm2, %ymm6
+; AVX512VL-NEXT:    vpcmpeqd %ymm9, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpsrlw %xmm5, %ymm9, %ymm8
+; AVX512VL-NEXT:    vpsrlw $8, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpbroadcastb %xmm8, %ymm8
+; AVX512VL-NEXT:    vpand %ymm8, %ymm6, %ymm6
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm4, %xmm7, %xmm7
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,zero,zero,zero,zero,xmm7[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm7, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw %xmm7, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpbroadcastb %xmm9, %ymm9
+; AVX512VL-NEXT:    vpand %ymm9, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm6, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512VL-NEXT:    vpcmpeqb %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw %xmm5, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpand %ymm8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw %xmm7, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpand %ymm9, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastb %xmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpmovzxbq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vpsrlw %xmm5, %zmm1, %zmm6
+; AVX512BW-NEXT:    vpternlogd $255, %zmm7, %zmm7, %zmm7
+; AVX512BW-NEXT:    vpsrlw %xmm5, %zmm7, %zmm5
+; AVX512BW-NEXT:    vpsrlw $8, %zmm5, %zmm5
+; AVX512BW-NEXT:    vpbroadcastb %xmm5, %zmm5
+; AVX512BW-NEXT:    vpandq %zmm5, %zmm6, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vpsllw %xmm4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpsllw %xmm4, %zmm7, %zmm4
+; AVX512BW-NEXT:    vpbroadcastb %xmm4, %zmm4
+; AVX512BW-NEXT:    vpandq %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512BW-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatvar_funnnel_v64i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpbroadcastb %xmm2, %zmm2
+; AVX512VBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VBMI2-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VBMI2-NEXT:    vpmovzxbq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VBMI2-NEXT:    vpsrlw %xmm5, %zmm1, %zmm6
+; AVX512VBMI2-NEXT:    vpternlogd $255, %zmm7, %zmm7, %zmm7
+; AVX512VBMI2-NEXT:    vpsrlw %xmm5, %zmm7, %zmm5
+; AVX512VBMI2-NEXT:    vpsrlw $8, %zmm5, %zmm5
+; AVX512VBMI2-NEXT:    vpbroadcastb %xmm5, %zmm5
+; AVX512VBMI2-NEXT:    vpandq %zmm5, %zmm6, %zmm5
+; AVX512VBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VBMI2-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512VBMI2-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VBMI2-NEXT:    vpsllw %xmm4, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vpsllw %xmm4, %zmm7, %zmm4
+; AVX512VBMI2-NEXT:    vpbroadcastb %xmm4, %zmm4
+; AVX512VBMI2-NEXT:    vpandq %zmm4, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm2, %zmm2
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLBW-NEXT:    vpmovzxbq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VLBW-NEXT:    vpsrlw %xmm5, %zmm1, %zmm6
+; AVX512VLBW-NEXT:    vpternlogd $255, %zmm7, %zmm7, %zmm7
+; AVX512VLBW-NEXT:    vpsrlw %xmm5, %zmm7, %zmm5
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm5, %zmm5
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm5, %zmm5
+; AVX512VLBW-NEXT:    vpandq %zmm5, %zmm6, %zmm5
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512VLBW-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VLBW-NEXT:    vpsllw %xmm4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpsllw %xmm4, %zmm7, %zmm4
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm4, %zmm4
+; AVX512VLBW-NEXT:    vpandq %zmm4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatvar_funnnel_v64i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpbroadcastb %xmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLVBMI2-NEXT:    vpandq %zmm3, %zmm2, %zmm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbq {{.*#+}} xmm5 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VLVBMI2-NEXT:    vpsrlw %xmm5, %zmm1, %zmm6
+; AVX512VLVBMI2-NEXT:    vpternlogd $255, %zmm7, %zmm7, %zmm7
+; AVX512VLVBMI2-NEXT:    vpsrlw %xmm5, %zmm7, %zmm5
+; AVX512VLVBMI2-NEXT:    vpsrlw $8, %zmm5, %zmm5
+; AVX512VLVBMI2-NEXT:    vpbroadcastb %xmm5, %zmm5
+; AVX512VLVBMI2-NEXT:    vpandq %zmm5, %zmm6, %zmm5
+; AVX512VLVBMI2-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLVBMI2-NEXT:    vpsubb %xmm4, %xmm6, %xmm4
+; AVX512VLVBMI2-NEXT:    vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VLVBMI2-NEXT:    vpsllw %xmm4, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    vpsllw %xmm4, %zmm7, %zmm4
+; AVX512VLVBMI2-NEXT:    vpbroadcastb %xmm4, %zmm4
+; AVX512VLVBMI2-NEXT:    vpandq %zmm4, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    vporq %zmm5, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    vptestnmb %zmm3, %zmm2, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VLVBMI2-NEXT:    retq
+  %splat = shufflevector <64 x i8> %amt, <64 x i8> undef, <64 x i32> zeroinitializer
+  %res = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %splat)
+  ret <64 x i8> %res
+}
+
+;
+; Constant Shifts
+;
+
+define <8 x i64> @constant_funnnel_v8i64(<8 x i64> %x, <8 x i64> %y) nounwind {
+; AVX512F-LABEL: constant_funnnel_v8i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlvq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512F-NEXT:    vpsllvq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v8i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VL-NEXT:    vpsllvq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VL-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v8i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllvq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v8i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshrdvq {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v8i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllvq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v8i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvq {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x, <8 x i64> %y, <8 x i64> <i64 4, i64 14, i64 50, i64 60, i64 4, i64 14, i64 50, i64 60>)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @constant_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y) nounwind {
+; AVX512F-LABEL: constant_funnnel_v16i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
+; AVX512F-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v16i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VL-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v16i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v16i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshrdvd {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v16i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v16i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvd {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @constant_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y) nounwind {
+; AVX512F-LABEL: constant_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2>
+; AVX512F-NEXT:    vpmulhuw %ymm4, %ymm2, %ymm5
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm6 = ymm2[0],ymm5[1,2,3,4,5,6,7],ymm2[8],ymm5[9,10,11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3,4,5,6,7],ymm2[8],ymm0[9,10,11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vpmulhuw %ymm4, %ymm3, %ymm2
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm5 = ymm3[0],ymm2[1,2,3,4,5,6,7],ymm3[8],ymm2[9,10,11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm1[1,2,3,4,5,6,7],ymm3[8],ymm1[9,10,11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2>
+; AVX512VL-NEXT:    vpmulhuw %ymm4, %ymm2, %ymm5
+; AVX512VL-NEXT:    vpblendw {{.*#+}} ymm6 = ymm2[0],ymm5[1,2,3,4,5,6,7],ymm2[8],ymm5[9,10,11,12,13,14,15]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
+; AVX512VL-NEXT:    vpmullw %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3,4,5,6,7],ymm2[8],ymm0[9,10,11,12,13,14,15]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512VL-NEXT:    vpmulhuw %ymm4, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpblendw {{.*#+}} ymm5 = ymm3[0],ymm2[1,2,3,4,5,6,7],ymm3[8],ymm2[9,10,11,12,13,14,15]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
+; AVX512VL-NEXT:    vpmullw %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm1[1,2,3,4,5,6,7],ymm3[8],ymm1[9,10,11,12,13,14,15]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm1, %zmm2
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    movl $65537, %eax # imm = 0x10001
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v32i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshrdvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm1, %zmm2
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm2, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    movl $65537, %eax # imm = 0x10001
+; AVX512VLBW-NEXT:    kmovd %eax, %k1
+; AVX512VLBW-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v32i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VLVBMI2-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwind {
+; AVX512F-LABEL: constant_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT:    vpand %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536]
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT:    vpand %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm6, %ymm6, %ymm8
+; AVX512F-NEXT:    vpblendvb %ymm8, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm8, %ymm8, %ymm9
+; AVX512F-NEXT:    vpblendvb %ymm9, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128]
+; AVX512F-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpmullw %ymm11, %ymm10, %ymm10
+; AVX512F-NEXT:    vpsrlw $8, %ymm10, %ymm10
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm12 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
+; AVX512F-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpmullw %ymm13, %ymm12, %ymm12
+; AVX512F-NEXT:    vpsrlw $8, %ymm12, %ymm12
+; AVX512F-NEXT:    vpackuswb %ymm10, %ymm12, %ymm10
+; AVX512F-NEXT:    vpor %ymm10, %ymm0, %ymm0
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm10 = [18446744073709551360,18446744073709551360,18446744073709551360,18446744073709551360]
+; AVX512F-NEXT:    vpblendvb %ymm10, %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsllw $2, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm9, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15],ymm3[24],ymm4[24],ymm3[25],ymm4[25],ymm3[26],ymm4[26],ymm3[27],ymm4[27],ymm3[28],ymm4[28],ymm3[29],ymm4[29],ymm3[30],ymm4[30],ymm3[31],ymm4[31]
+; AVX512F-NEXT:    vpmullw %ymm11, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[16],ymm4[16],ymm3[17],ymm4[17],ymm3[18],ymm4[18],ymm3[19],ymm4[19],ymm3[20],ymm4[20],ymm3[21],ymm4[21],ymm3[22],ymm4[22],ymm3[23],ymm4[23]
+; AVX512F-NEXT:    vpmullw %ymm13, %ymm4, %ymm4
+; AVX512F-NEXT:    vpsrlw $8, %ymm4, %ymm4
+; AVX512F-NEXT:    vpackuswb %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendvb %ymm10, %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT:    vpand %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm6 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536]
+; AVX512VL-NEXT:    vpblendvb %ymm6, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT:    vpand %ymm7, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm6, %ymm6, %ymm8
+; AVX512VL-NEXT:    vpblendvb %ymm8, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm8, %ymm8, %ymm9
+; AVX512VL-NEXT:    vpblendvb %ymm9, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm4, %ymm4
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128]
+; AVX512VL-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpmullw %ymm10, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpsrlw $8, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm11 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm11, %ymm11
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
+; AVX512VL-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpmullw %ymm12, %ymm11, %ymm11
+; AVX512VL-NEXT:    vpsrlw $8, %ymm11, %ymm11
+; AVX512VL-NEXT:    vpackuswb %ymm4, %ymm11, %ymm4
+; AVX512VL-NEXT:    vpor %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [18446744073709551360,18446744073709551360,18446744073709551360,18446744073709551360]
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsllw $2, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm9, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmullw %ymm10, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpmullw %ymm12, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpackuswb %ymm2, %ymm5, %ymm2
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536]
+; AVX512BW-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512BW-NEXT:    vpsllw $4, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512BW-NEXT:    vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512BW-NEXT:    vpsllw $2, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512BW-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512BW-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512BW-NEXT:    vpaddb %zmm0, %zmm0, %zmm0 {%k1}
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512BW-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512BW-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512BW-NEXT:    vpsrlw $8, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm3, %zmm3
+; AVX512BW-NEXT:    vpsrlw $8, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpackuswb %zmm2, %zmm3, %zmm2
+; AVX512BW-NEXT:    vporq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    movabsq $72340172838076673, %rax # imm = 0x101010101010101
+; AVX512BW-NEXT:    kmovq %rax, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: constant_funnnel_v64i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536]
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VBMI2-NEXT:    vpsllw $4, %zmm0, %zmm3
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    vpsllw $2, %zmm0, %zmm3
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VBMI2-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VBMI2-NEXT:    vpaddb %zmm0, %zmm0, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512VBMI2-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpsllvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512VBMI2-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512VBMI2-NEXT:    vpsrlw $8, %zmm3, %zmm3
+; AVX512VBMI2-NEXT:    vpsllvw {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VBMI2-NEXT:    vpsrlw $8, %zmm3, %zmm3
+; AVX512VBMI2-NEXT:    vpackuswb %zmm2, %zmm3, %zmm2
+; AVX512VBMI2-NEXT:    vporq %zmm2, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    movabsq $72340172838076673, %rax # imm = 0x101010101010101
+; AVX512VBMI2-NEXT:    kmovq %rax, %k1
+; AVX512VBMI2-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536]
+; AVX512VLBW-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VLBW-NEXT:    vpsllw $4, %zmm0, %zmm3
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    vpsllw $2, %zmm0, %zmm3
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLBW-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VLBW-NEXT:    vpaddb %zmm0, %zmm0, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm3, %zmm3
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm3, %zmm3
+; AVX512VLBW-NEXT:    vpackuswb %zmm2, %zmm3, %zmm2
+; AVX512VLBW-NEXT:    vporq %zmm2, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    movabsq $72340172838076673, %rax # imm = 0x101010101010101
+; AVX512VLBW-NEXT:    kmovq %rax, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: constant_funnnel_v64i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536]
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VLVBMI2-NEXT:    vpsllw $4, %zmm0, %zmm3
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512VLVBMI2-NEXT:    vpsllw $2, %zmm0, %zmm3
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm2, %zmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VLVBMI2-NEXT:    vpaddb %zmm0, %zmm0, %zmm0 {%k1}
+; AVX512VLVBMI2-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512VLVBMI2-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpsllvw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpsrlw $8, %zmm2, %zmm2
+; AVX512VLVBMI2-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512VLVBMI2-NEXT:    vpsrlw $8, %zmm3, %zmm3
+; AVX512VLVBMI2-NEXT:    vpsllvw {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLVBMI2-NEXT:    vpsrlw $8, %zmm3, %zmm3
+; AVX512VLVBMI2-NEXT:    vpackuswb %zmm2, %zmm3, %zmm2
+; AVX512VLVBMI2-NEXT:    vporq %zmm2, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    movabsq $72340172838076673, %rax # imm = 0x101010101010101
+; AVX512VLVBMI2-NEXT:    kmovq %rax, %k1
+; AVX512VLVBMI2-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
+  ret <64 x i8> %res
+}
+
+;
+; Uniform Constant Shifts
+;
+
+define <8 x i64> @splatconstant_funnnel_v8i64(<8 x i64> %x, <8 x i64> %y) nounwind {
+; AVX512F-LABEL: splatconstant_funnnel_v8i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlq $14, %zmm1, %zmm1
+; AVX512F-NEXT:    vpsllq $50, %zmm0, %zmm0
+; AVX512F-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v8i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlq $14, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpsllq $50, %zmm0, %zmm0
+; AVX512VL-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v8i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlq $14, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllq $50, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v8i64:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshrdq $14, %zmm0, %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v8i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlq $14, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllq $50, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v8i64:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdq $14, %zmm0, %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x, <8 x i64> %y, <8 x i64> <i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14>)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @splatconstant_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y) nounwind {
+; AVX512F-LABEL: splatconstant_funnnel_v16i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrld $4, %zmm1, %zmm1
+; AVX512F-NEXT:    vpslld $28, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v16i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrld $4, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpslld $28, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v16i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrld $4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpslld $28, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v16i32:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshrdd $4, %zmm0, %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v16i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrld $4, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpslld $28, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v16i32:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdd $4, %zmm0, %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @splatconstant_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y) nounwind {
+; AVX512F-LABEL: splatconstant_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $7, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $9, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $7, %ymm3, %ymm2
+; AVX512F-NEXT:    vpsllw $9, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $7, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $9, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $7, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpsllw $9, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlw $7, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllw $9, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v32i16:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpshrdw $7, %zmm0, %zmm1, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlw $7, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllw $9, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v32i16:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpshrdw $7, %zmm0, %zmm1, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @splatconstant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwind {
+; AVX512F-LABEL: splatconstant_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $4, %ymm2, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT:    vpand %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $4, %ymm3, %ymm2
+; AVX512F-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT:    vpand %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $4, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpand %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlw $4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllw $4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VBMI2-LABEL: splatconstant_funnnel_v64i8:
+; AVX512VBMI2:       # %bb.0:
+; AVX512VBMI2-NEXT:    vpsrlw $4, %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VBMI2-NEXT:    vpsllw $4, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VBMI2-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlw $4, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllw $4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v64i8:
+; AVX512VLVBMI2:       # %bb.0:
+; AVX512VLVBMI2-NEXT:    vpsrlw $4, %zmm1, %zmm1
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLVBMI2-NEXT:    vpsllw $4, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLVBMI2-NEXT:    retq
+  %res = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
+  ret <64 x i8> %res
+}
diff --git a/test/CodeGen/X86/vector-fshr-rot-128.ll b/test/CodeGen/X86/vector-fshr-rot-128.ll
new file mode 100644
index 0000000..fc915c2
--- /dev/null
+++ b/test/CodeGen/X86/vector-fshr-rot-128.ll
@@ -0,0 +1,1948 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2
+
+; Just one 32-bit run to make sure we do reasonable things for i64 cases.
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X32-SSE,X32-SSE2
+
+declare <2 x i64> @llvm.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <8 x i16> @llvm.fshr.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
+declare <16 x i8> @llvm.fshr.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)
+
+;
+; Variable Shifts
+;
+
+define <2 x i64> @var_funnnel_v2i64(<2 x i64> %x, <2 x i64> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v2i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [63,63]
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    psubq %xmm1, %xmm3
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    psrlq %xmm1, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    psrlq %xmm1, %xmm5
+; SSE2-NEXT:    movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1]
+; SSE2-NEXT:    pand %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    psllq %xmm3, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
+; SSE2-NEXT:    psllq %xmm2, %xmm0
+; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT:    orpd %xmm5, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v2i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [63,63]
+; SSE41-NEXT:    pxor %xmm3, %xmm3
+; SSE41-NEXT:    psubq %xmm1, %xmm3
+; SSE41-NEXT:    pand %xmm2, %xmm1
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    psrlq %xmm1, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSE41-NEXT:    movdqa %xmm0, %xmm5
+; SSE41-NEXT:    psrlq %xmm1, %xmm5
+; SSE41-NEXT:    pblendw {{.*#+}} xmm5 = xmm4[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT:    pand %xmm2, %xmm3
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psllq %xmm3, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
+; SSE41-NEXT:    psllq %xmm2, %xmm0
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: var_funnnel_v2i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrlq %xmm3, %xmm0, %xmm4
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; AVX1-NEXT:    vpsrlq %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpsubq %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsllq %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX1-NEXT:    vpsllq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v2i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vpsrlvq %xmm3, %xmm0, %xmm3
+; AVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT:    vpsubq %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsllvq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprorvq %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprorvq %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: var_funnnel_v2i64:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT:    vpsubq %xmm1, %xmm2, %xmm1
+; XOP-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [63,0,63,0]
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    psubq %xmm1, %xmm3
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm4
+; X32-SSE-NEXT:    psrlq %xmm1, %xmm4
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm5
+; X32-SSE-NEXT:    psrlq %xmm1, %xmm5
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1]
+; X32-SSE-NEXT:    pand %xmm2, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psllq %xmm3, %xmm1
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
+; X32-SSE-NEXT:    psllq %xmm2, %xmm0
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X32-SSE-NEXT:    orpd %xmm5, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %x, <2 x i64> %x, <2 x i64> %amt)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @var_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    psubd %xmm1, %xmm2
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    pslld $23, %xmm2
+; SSE2-NEXT:    paddd {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    cvttps2dq %xmm2, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pxor %xmm2, %xmm2
+; SSE41-NEXT:    psubd %xmm1, %xmm2
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    pslld $23, %xmm2
+; SSE41-NEXT:    paddd {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    cvttps2dq %xmm2, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pmuludq %xmm2, %xmm3
+; SSE41-NEXT:    pmuludq %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: var_funnnel_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpsubd %xmm1, %xmm2, %xmm1
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsllvd %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vprorvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprorvd %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vprorvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprorvd %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: var_funnnel_v4i32:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT:    vpsubd %xmm1, %xmm2, %xmm1
+; XOP-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE-NEXT:    psubd %xmm1, %xmm2
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    pslld $23, %xmm2
+; X32-SSE-NEXT:    paddd {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    cvttps2dq %xmm2, %xmm1
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm1, %xmm0
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm2, %xmm1
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; X32-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X32-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-SSE-NEXT:    por %xmm3, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %amt)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    psubw %xmm1, %xmm3
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm1
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT:    pslld $23, %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [1065353216,1065353216,1065353216,1065353216]
+; SSE2-NEXT:    paddd %xmm4, %xmm1
+; SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT:    pslld $23, %xmm3
+; SSE2-NEXT:    paddd %xmm4, %xmm3
+; SSE2-NEXT:    cvttps2dq %xmm3, %xmm2
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    pmulhuw %xmm2, %xmm1
+; SSE2-NEXT:    pmullw %xmm2, %xmm0
+; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v8i16:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pxor %xmm2, %xmm2
+; SSE41-NEXT:    pxor %xmm3, %xmm3
+; SSE41-NEXT:    psubw %xmm1, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; SSE41-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE41-NEXT:    pslld $23, %xmm3
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [1065353216,1065353216,1065353216,1065353216]
+; SSE41-NEXT:    paddd %xmm2, %xmm3
+; SSE41-NEXT:    cvttps2dq %xmm3, %xmm3
+; SSE41-NEXT:    pslld $23, %xmm1
+; SSE41-NEXT:    paddd %xmm2, %xmm1
+; SSE41-NEXT:    cvttps2dq %xmm1, %xmm1
+; SSE41-NEXT:    packusdw %xmm3, %xmm1
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    pmulhuw %xmm1, %xmm2
+; SSE41-NEXT:    pmullw %xmm1, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: var_funnnel_v8i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX1-NEXT:    vpslld $23, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vcvttps2dq %xmm2, %xmm2
+; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT:    vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    vpsllvd %ymm2, %ymm0, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512F-NEXT:    vpsllvd %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512F-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512VL-NEXT:    vpsllvd %ymm2, %ymm0, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512VL-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v8i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v8i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsllvw %xmm1, %xmm0, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX512VLBW-NEXT:    vpsrlvw %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: var_funnnel_v8i16:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; XOP-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    psubw %xmm1, %xmm3
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm3, %xmm1
+; X32-SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; X32-SSE-NEXT:    pslld $23, %xmm1
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [1065353216,1065353216,1065353216,1065353216]
+; X32-SSE-NEXT:    paddd %xmm4, %xmm1
+; X32-SSE-NEXT:    cvttps2dq %xmm1, %xmm1
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X32-SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; X32-SSE-NEXT:    pslld $23, %xmm3
+; X32-SSE-NEXT:    paddd %xmm4, %xmm3
+; X32-SSE-NEXT:    cvttps2dq %xmm3, %xmm2
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; X32-SSE-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    pmulhuw %xmm2, %xmm1
+; X32-SSE-NEXT:    pmullw %xmm2, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x, <8 x i16> %x, <8 x i16> %amt)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind {
+; SSE2-LABEL: var_funnnel_v16i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pxor %xmm0, %xmm0
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    psubb %xmm1, %xmm3
+; SSE2-NEXT:    psllw $5, %xmm3
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    pcmpgtb %xmm3, %xmm1
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    psrlw $4, %xmm4
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT:    movdqa %xmm2, %xmm5
+; SSE2-NEXT:    psllw $4, %xmm5
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm5
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    pand %xmm1, %xmm5
+; SSE2-NEXT:    pandn %xmm2, %xmm1
+; SSE2-NEXT:    por %xmm5, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psrlw $6, %xmm2
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    psllw $2, %xmm4
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT:    por %xmm2, %xmm4
+; SSE2-NEXT:    paddb %xmm3, %xmm3
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm3, %xmm2
+; SSE2-NEXT:    pand %xmm2, %xmm4
+; SSE2-NEXT:    pandn %xmm1, %xmm2
+; SSE2-NEXT:    por %xmm4, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    paddb %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    psrlw $7, %xmm4
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT:    por %xmm1, %xmm4
+; SSE2-NEXT:    paddb %xmm3, %xmm3
+; SSE2-NEXT:    pcmpgtb %xmm3, %xmm0
+; SSE2-NEXT:    pand %xmm0, %xmm4
+; SSE2-NEXT:    pandn %xmm2, %xmm0
+; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: var_funnnel_v16i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    psrlw $4, %xmm0
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm2, %xmm3
+; SSE41-NEXT:    psllw $4, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT:    por %xmm0, %xmm3
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    psubb %xmm1, %xmm0
+; SSE41-NEXT:    psllw $5, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm1
+; SSE41-NEXT:    psrlw $6, %xmm1
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    movdqa %xmm2, %xmm3
+; SSE41-NEXT:    psllw $2, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT:    por %xmm1, %xmm3
+; SSE41-NEXT:    paddb %xmm0, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm1
+; SSE41-NEXT:    paddb %xmm2, %xmm1
+; SSE41-NEXT:    movdqa %xmm2, %xmm3
+; SSE41-NEXT:    psrlw $7, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT:    por %xmm1, %xmm3
+; SSE41-NEXT:    paddb %xmm0, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: var_funnnel_v16i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm2
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:    vpsllw $4, %xmm0, %xmm3
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $6, %xmm0, %xmm2
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:    vpsllw $2, %xmm0, %xmm3
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
+; AVX-NEXT:    vpsrlw $7, %xmm0, %xmm3
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512F-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero,xmm3[8],zero,zero,zero,xmm3[9],zero,zero,zero,xmm3[10],zero,zero,zero,xmm3[11],zero,zero,zero,xmm3[12],zero,zero,zero,xmm3[13],zero,zero,zero,xmm3[14],zero,zero,zero,xmm3[15],zero,zero,zero
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpsrlvd %zmm3, %zmm0, %zmm3
+; AVX512F-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512F-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512F-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512F-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm3, %zmm0, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VL-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero,xmm3[8],zero,zero,zero,xmm3[9],zero,zero,zero,xmm3[10],zero,zero,zero,xmm3[11],zero,zero,zero,xmm3[12],zero,zero,zero,xmm3[13],zero,zero,zero,xmm3[14],zero,zero,zero,xmm3[15],zero,zero,zero
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd %zmm3, %zmm0, %zmm3
+; AVX512VL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512VL-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm3, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v16i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm3, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v16i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLBW-NEXT:    vpsrlvw %ymm3, %ymm0, %ymm3
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT:    vpsllvw %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: var_funnnel_v16i8:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
+; XOP-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: var_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    pxor %xmm0, %xmm0
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    psubb %xmm1, %xmm3
+; X32-SSE-NEXT:    psllw $5, %xmm3
+; X32-SSE-NEXT:    pxor %xmm1, %xmm1
+; X32-SSE-NEXT:    pcmpgtb %xmm3, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm4
+; X32-SSE-NEXT:    psrlw $4, %xmm4
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm5
+; X32-SSE-NEXT:    psllw $4, %xmm5
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm5
+; X32-SSE-NEXT:    por %xmm4, %xmm5
+; X32-SSE-NEXT:    pand %xmm1, %xmm5
+; X32-SSE-NEXT:    pandn %xmm2, %xmm1
+; X32-SSE-NEXT:    por %xmm5, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm2
+; X32-SSE-NEXT:    psrlw $6, %xmm2
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm4
+; X32-SSE-NEXT:    psllw $2, %xmm4
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT:    por %xmm2, %xmm4
+; X32-SSE-NEXT:    paddb %xmm3, %xmm3
+; X32-SSE-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE-NEXT:    pcmpgtb %xmm3, %xmm2
+; X32-SSE-NEXT:    pand %xmm2, %xmm4
+; X32-SSE-NEXT:    pandn %xmm1, %xmm2
+; X32-SSE-NEXT:    por %xmm4, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm1
+; X32-SSE-NEXT:    paddb %xmm2, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm2, %xmm4
+; X32-SSE-NEXT:    psrlw $7, %xmm4
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT:    por %xmm1, %xmm4
+; X32-SSE-NEXT:    paddb %xmm3, %xmm3
+; X32-SSE-NEXT:    pcmpgtb %xmm3, %xmm0
+; X32-SSE-NEXT:    pand %xmm0, %xmm4
+; X32-SSE-NEXT:    pandn %xmm2, %xmm0
+; X32-SSE-NEXT:    por %xmm4, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> %amt)
+  ret <16 x i8> %res
+}
+
+;
+; Uniform Variable Shifts
+;
+
+define <2 x i64> @splatvar_funnnel_v2i64(<2 x i64> %x, <2 x i64> %amt) nounwind {
+; SSE-LABEL: splatvar_funnnel_v2i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [63,63]
+; SSE-NEXT:    pxor %xmm3, %xmm3
+; SSE-NEXT:    psubq %xmm1, %xmm3
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    movdqa %xmm0, %xmm4
+; SSE-NEXT:    psrlq %xmm1, %xmm4
+; SSE-NEXT:    pand %xmm2, %xmm3
+; SSE-NEXT:    psllq %xmm3, %xmm0
+; SSE-NEXT:    por %xmm4, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v2i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrlq %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpsubq %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsllq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v2i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq %xmm1, %xmm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vpsrlq %xmm3, %xmm0, %xmm3
+; AVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT:    vpsubq %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsllq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastq %xmm1, %xmm1
+; AVX512F-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq %xmm1, %xmm1
+; AVX512VL-NEXT:    vprorvq %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastq %xmm1, %xmm1
+; AVX512BW-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastq %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vprorvq %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v2i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; XOPAVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsubq %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v2i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastq %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsubq %xmm1, %xmm2, %xmm1
+; XOPAVX2-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [63,0,63,0]
+; X32-SSE-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE-NEXT:    psubq %xmm1, %xmm3
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm4
+; X32-SSE-NEXT:    psrlq %xmm1, %xmm4
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm5
+; X32-SSE-NEXT:    psrlq %xmm1, %xmm5
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1]
+; X32-SSE-NEXT:    pand %xmm2, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psllq %xmm3, %xmm1
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
+; X32-SSE-NEXT:    psllq %xmm2, %xmm0
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X32-SSE-NEXT:    orpd %xmm5, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <2 x i64> %amt, <2 x i64> undef, <2 x i32> zeroinitializer
+  %res = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %x, <2 x i64> %x, <2 x i64> %splat)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind {
+; SSE2-LABEL: splatvar_funnnel_v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    negl %eax
+; SSE2-NEXT:    andl $31, %eax
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pslld %xmm1, %xmm2
+; SSE2-NEXT:    movl $32, %ecx
+; SSE2-NEXT:    subl %eax, %ecx
+; SSE2-NEXT:    movd %ecx, %xmm1
+; SSE2-NEXT:    psrld %xmm1, %xmm0
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: splatvar_funnnel_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; SSE41-NEXT:    pxor %xmm2, %xmm2
+; SSE41-NEXT:    psubd %xmm1, %xmm2
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pslld %xmm1, %xmm3
+; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32,32,32,32]
+; SSE41-NEXT:    psubd %xmm2, %xmm1
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT:    psrld %xmm1, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT:    vpslld %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [32,32,32,32]
+; AVX1-NEXT:    vpsubd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd %xmm1, %xmm1
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpsubd %xmm1, %xmm2, %xmm1
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; AVX2-NEXT:    vpslld %xmm2, %xmm0, %xmm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX2-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastd %xmm1, %xmm1
+; AVX512F-NEXT:    vprorvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd %xmm1, %xmm1
+; AVX512VL-NEXT:    vprorvd %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastd %xmm1, %xmm1
+; AVX512BW-NEXT:    vprorvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vprorvd %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v4i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; XOPAVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsubd %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v4i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastd %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsubd %xmm1, %xmm2, %xmm1
+; XOPAVX2-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movd %xmm1, %eax
+; X32-SSE-NEXT:    negl %eax
+; X32-SSE-NEXT:    andl $31, %eax
+; X32-SSE-NEXT:    movd %eax, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    pslld %xmm1, %xmm2
+; X32-SSE-NEXT:    movl $32, %ecx
+; X32-SSE-NEXT:    subl %eax, %ecx
+; X32-SSE-NEXT:    movd %ecx, %xmm1
+; X32-SSE-NEXT:    psrld %xmm1, %xmm0
+; X32-SSE-NEXT:    por %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <4 x i32> %amt, <4 x i32> undef, <4 x i32> zeroinitializer
+  %res = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %splat)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @splatvar_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind {
+; SSE2-LABEL: splatvar_funnnel_v8i16:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    psubw %xmm1, %xmm2
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16]
+; SSE2-NEXT:    psubw %xmm2, %xmm1
+; SSE2-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    psllw %xmm2, %xmm3
+; SSE2-NEXT:    pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    psrlw %xmm1, %xmm0
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: splatvar_funnnel_v8i16:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; SSE41-NEXT:    pxor %xmm2, %xmm2
+; SSE41-NEXT:    psubw %xmm1, %xmm2
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    psllw %xmm1, %xmm3
+; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16]
+; SSE41-NEXT:    psubw %xmm2, %xmm1
+; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; SSE41-NEXT:    psrlw %xmm1, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v8i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX1-NEXT:    vpsllw %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX1-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v8i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastw %xmm1, %xmm1
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm2, %xmm0, %xmm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_funnnel_v8i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastw %xmm1, %xmm1
+; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512-NEXT:    vpsllw %xmm2, %xmm0, %xmm2
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX512-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v8i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; XOPAVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v8i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastw %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; XOPAVX2-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; X32-SSE-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE-NEXT:    psubw %xmm1, %xmm2
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16]
+; X32-SSE-NEXT:    psubw %xmm2, %xmm1
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm3
+; X32-SSE-NEXT:    psllw %xmm2, %xmm3
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    psrlw %xmm1, %xmm0
+; X32-SSE-NEXT:    por %xmm3, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <8 x i16> %amt, <8 x i16> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x, <8 x i16> %x, <8 x i16> %splat)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind {
+; SSE2-LABEL: splatvar_funnnel_v16i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    psubb %xmm1, %xmm2
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE2-NEXT:    psubb %xmm2, %xmm3
+; SSE2-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    psllw %xmm2, %xmm1
+; SSE2-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE2-NEXT:    pcmpeqd %xmm5, %xmm5
+; SSE2-NEXT:    psllw %xmm2, %xmm5
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm5[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    psrlw %xmm3, %xmm0
+; SSE2-NEXT:    psrlw %xmm3, %xmm4
+; SSE2-NEXT:    psrlw $8, %xmm4
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm4[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE2-NEXT:    pand %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: splatvar_funnnel_v16i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pxor %xmm2, %xmm2
+; SSE41-NEXT:    pshufb %xmm2, %xmm1
+; SSE41-NEXT:    pxor %xmm3, %xmm3
+; SSE41-NEXT:    psubb %xmm1, %xmm3
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm4 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psllw %xmm4, %xmm1
+; SSE41-NEXT:    pcmpeqd %xmm5, %xmm5
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm6
+; SSE41-NEXT:    psllw %xmm4, %xmm6
+; SSE41-NEXT:    pshufb %xmm2, %xmm6
+; SSE41-NEXT:    pand %xmm6, %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE41-NEXT:    psubb %xmm3, %xmm2
+; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT:    psrlw %xmm2, %xmm0
+; SSE41-NEXT:    psrlw %xmm2, %xmm5
+; SSE41-NEXT:    pshufb {{.*#+}} xmm5 = xmm5[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; SSE41-NEXT:    pand %xmm0, %xmm5
+; SSE41-NEXT:    por %xmm5, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: splatvar_funnnel_v16i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsllw %xmm3, %xmm0, %xmm4
+; AVX1-NEXT:    vpcmpeqd %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpsllw %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX1-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v16i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm2, %xmm0, %xmm3
+; AVX2-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX2-NEXT:    vpsllw %xmm2, %xmm4, %xmm2
+; AVX2-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX2-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX2-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vpsrlw $8, %xmm1, %xmm1
+; AVX2-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512F-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero,xmm3[8],zero,zero,zero,xmm3[9],zero,zero,zero,xmm3[10],zero,zero,zero,xmm3[11],zero,zero,zero,xmm3[12],zero,zero,zero,xmm3[13],zero,zero,zero,xmm3[14],zero,zero,zero,xmm3[15],zero,zero,zero
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpsrlvd %zmm3, %zmm0, %zmm3
+; AVX512F-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512F-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512F-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512F-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm3, %zmm0, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VL-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero,xmm3[8],zero,zero,zero,xmm3[9],zero,zero,zero,xmm3[10],zero,zero,zero,xmm3[11],zero,zero,zero,xmm3[12],zero,zero,zero,xmm3[13],zero,zero,zero,xmm3[14],zero,zero,zero,xmm3[15],zero,zero,zero
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd %zmm3, %zmm0, %zmm3
+; AVX512VL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512VL-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm3, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v16i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm3, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v16i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLBW-NEXT:    vpsrlvw %ymm3, %ymm0, %ymm3
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT:    vpsllvw %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v16i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v16i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastb %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
+; XOPAVX2-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; X32-SSE-LABEL: splatvar_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; X32-SSE-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE-NEXT:    psubb %xmm1, %xmm2
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; X32-SSE-NEXT:    psubb %xmm2, %xmm3
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psllw %xmm2, %xmm1
+; X32-SSE-NEXT:    pcmpeqd %xmm4, %xmm4
+; X32-SSE-NEXT:    pcmpeqd %xmm5, %xmm5
+; X32-SSE-NEXT:    psllw %xmm2, %xmm5
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm5[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    psrlw %xmm3, %xmm0
+; X32-SSE-NEXT:    psrlw %xmm3, %xmm4
+; X32-SSE-NEXT:    psrlw $8, %xmm4
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm4[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; X32-SSE-NEXT:    pand %xmm0, %xmm2
+; X32-SSE-NEXT:    por %xmm2, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %splat = shufflevector <16 x i8> %amt, <16 x i8> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> %splat)
+  ret <16 x i8> %res
+}
+
+;
+; Constant Shifts
+;
+
+define <2 x i64> @constant_funnnel_v2i64(<2 x i64> %x) nounwind {
+; SSE2-LABEL: constant_funnnel_v2i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    psrlq $4, %xmm1
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    psrlq $14, %xmm2
+; SSE2-NEXT:    movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    psllq $60, %xmm1
+; SSE2-NEXT:    psllq $50, %xmm0
+; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT:    orpd %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: constant_funnnel_v2i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlq $14, %xmm1
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    psrlq $4, %xmm2
+; SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psllq $50, %xmm1
+; SSE41-NEXT:    psllq $60, %xmm0
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: constant_funnnel_v2i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrlq $14, %xmm0, %xmm1
+; AVX1-NEXT:    vpsrlq $4, %xmm0, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT:    vpsllq $50, %xmm0, %xmm2
+; AVX1-NEXT:    vpsllq $60, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v2i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvq {{.*}}(%rip), %xmm0, %xmm1
+; AVX2-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,14]
+; AVX512F-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprorvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,14]
+; AVX512BW-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprorvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: constant_funnnel_v2i64:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotq {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psrlq $4, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    psrlq $14, %xmm2
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psllq $60, %xmm1
+; X32-SSE-NEXT:    psllq $50, %xmm0
+; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X32-SSE-NEXT:    orpd %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %x, <2 x i64> %x, <2 x i64> <i64 4, i64 14>)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind {
+; SSE2-LABEL: constant_funnnel_v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [268435456,134217728,67108864,33554432]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: constant_funnnel_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [268435456,134217728,67108864,33554432]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pmuludq %xmm2, %xmm3
+; SSE41-NEXT:    pmuludq %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: constant_funnnel_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [268435456,134217728,67108864,33554432]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm1
+; AVX2-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,5,6,7]
+; AVX512F-NEXT:    vprorvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,5,6,7]
+; AVX512BW-NEXT:    vprorvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: constant_funnnel_v4i32:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotd {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [268435456,134217728,67108864,33554432]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm1, %xmm0
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X32-SSE-NEXT:    pmuludq %xmm2, %xmm1
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; X32-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X32-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-SSE-NEXT:    por %xmm3, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 4, i32 5, i32 6, i32 7>)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x) nounwind {
+; SSE-LABEL: constant_funnnel_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [1,32768,16384,8192,4096,2048,1024,512]
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pmulhuw %xmm1, %xmm2
+; SSE-NEXT:    pmullw %xmm1, %xmm0
+; SSE-NEXT:    por %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: constant_funnnel_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [1,32768,16384,8192,4096,2048,1024,512]
+; AVX-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [1,32768,16384,8192,4096,2048,1024,512]
+; AVX512F-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1,32768,16384,8192,4096,2048,1024,512]
+; AVX512VL-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v8i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [16,1,2,3,4,5,6,7]
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,15,14,13,12,11,10,9]
+; AVX512BW-NEXT:    vpsllvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v8i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %xmm0, %xmm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: constant_funnnel_v8i16:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotw {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [1,32768,16384,8192,4096,2048,1024,512]
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    pmulhuw %xmm1, %xmm2
+; X32-SSE-NEXT:    pmullw %xmm1, %xmm0
+; X32-SSE-NEXT:    por %xmm2, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x, <8 x i16> %x, <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x) nounwind {
+; SSE2-LABEL: constant_funnnel_v16i8:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    psrlw $8, %xmm2
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm3
+; SSE2-NEXT:    psrlw $8, %xmm3
+; SSE2-NEXT:    packuswb %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    packuswb %xmm1, %xmm0
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: constant_funnnel_v16i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT:    pand %xmm3, %xmm2
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [1,128,64,32,16,8,4,2]
+; SSE41-NEXT:    pmullw %xmm1, %xmm4
+; SSE41-NEXT:    pand %xmm3, %xmm4
+; SSE41-NEXT:    packuswb %xmm2, %xmm4
+; SSE41-NEXT:    pxor %xmm2, %xmm2
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    psrlw $8, %xmm0
+; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    packuswb %xmm0, %xmm1
+; SSE41-NEXT:    por %xmm4, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: constant_funnnel_v16i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm3, %xmm4
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm0
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm3, %xmm2
+; AVX1-NEXT:    vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v16i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm1
+; AVX2-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpsrlvd {{.*}}(%rip), %zmm0, %zmm1
+; AVX512F-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VL-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v16i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1]
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7]
+; AVX512BW-NEXT:    vpsllvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v16i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %ymm0, %ymm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: constant_funnnel_v16i8:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotb {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: constant_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    pxor %xmm1, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    psrlw $8, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm3
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm3
+; X32-SSE-NEXT:    psrlw $8, %xmm3
+; X32-SSE-NEXT:    packuswb %xmm2, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT:    pand %xmm2, %xmm0
+; X32-SSE-NEXT:    packuswb %xmm1, %xmm0
+; X32-SSE-NEXT:    por %xmm3, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
+  ret <16 x i8> %res
+}
+
+;
+; Uniform Constant Shifts
+;
+
+define <2 x i64> @splatconstant_funnnel_v2i64(<2 x i64> %x) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v2i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrlq $14, %xmm1
+; SSE-NEXT:    psllq $50, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlq $14, %xmm0, %xmm1
+; AVX-NEXT:    vpsllq $50, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v2i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vprorq $14, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v2i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprorq $14, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v2i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vprorq $14, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v2i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprorq $14, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v2i64:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotq $50, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v2i64:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psrlq $14, %xmm1
+; X32-SSE-NEXT:    psllq $50, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %x, <2 x i64> %x, <2 x i64> <i64 14, i64 14>)
+  ret <2 x i64> %res
+}
+
+define <4 x i32> @splatconstant_funnnel_v4i32(<4 x i32> %x) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrld $4, %xmm1
+; SSE-NEXT:    pslld $28, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrld $4, %xmm0, %xmm1
+; AVX-NEXT:    vpslld $28, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v4i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vprord $4, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v4i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprord $4, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v4i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT:    vprord $4, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v4i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprord $4, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v4i32:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotd $28, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v4i32:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psrld $4, %xmm1
+; X32-SSE-NEXT:    pslld $28, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 4, i32 4, i32 4, i32 4>)
+  ret <4 x i32> %res
+}
+
+define <8 x i16> @splatconstant_funnnel_v8i16(<8 x i16> %x) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrlw $7, %xmm1
+; SSE-NEXT:    psllw $9, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $7, %xmm0, %xmm1
+; AVX-NEXT:    vpsllw $9, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_funnnel_v8i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsrlw $7, %xmm0, %xmm1
+; AVX512-NEXT:    vpsllw $9, %xmm0, %xmm0
+; AVX512-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v8i16:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotw $9, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v8i16:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psrlw $7, %xmm1
+; X32-SSE-NEXT:    psllw $9, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x, <8 x i16> %x, <8 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
+  ret <8 x i16> %res
+}
+
+define <16 x i8> @splatconstant_funnnel_v16i8(<16 x i8> %x) nounwind {
+; SSE-LABEL: splatconstant_funnnel_v16i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrlw $4, %xmm1
+; SSE-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE-NEXT:    psllw $4, %xmm0
+; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: splatconstant_funnnel_v16i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm1
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_funnnel_v16i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsrlw $4, %xmm0, %xmm1
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
+; XOP-LABEL: splatconstant_funnnel_v16i8:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vprotb $4, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; X32-SSE-LABEL: splatconstant_funnnel_v16i8:
+; X32-SSE:       # %bb.0:
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psrlw $4, %xmm1
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    psllw $4, %xmm0
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT:    por %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+  %res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
+  ret <16 x i8> %res
+}
diff --git a/test/CodeGen/X86/vector-fshr-rot-256.ll b/test/CodeGen/X86/vector-fshr-rot-256.ll
new file mode 100644
index 0000000..87c8475
--- /dev/null
+++ b/test/CodeGen/X86/vector-fshr-rot-256.ll
@@ -0,0 +1,1604 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2
+
+declare <4 x i64> @llvm.fshr.v4i64(<4 x i64>, <4 x i64>, <4 x i64>)
+declare <8 x i32> @llvm.fshr.v8i32(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <16 x i16> @llvm.fshr.v16i16(<16 x i16>, <16 x i16>, <16 x i16>)
+declare <32 x i8> @llvm.fshr.v32i8(<32 x i8>, <32 x i8>, <32 x i8>)
+
+;
+; Variable Shifts
+;
+
+define <4 x i64> @var_funnnel_v4i64(<4 x i64> %x, <4 x i64> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm1, %ymm3
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT:    vpsrlq %xmm4, %xmm2, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
+; AVX1-NEXT:    vpsrlq %xmm4, %xmm2, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpsrlq %xmm3, %xmm0, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; AVX1-NEXT:    vpsrlq %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpsubq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [63,63]
+; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpsllq %xmm4, %xmm2, %xmm7
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
+; AVX1-NEXT:    vpsllq %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpsubq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpand %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpsllq %xmm1, %xmm0, %xmm4
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX1-NEXT:    vpsllq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm3, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [63,63,63,63]
+; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm3
+; AVX2-NEXT:    vpsrlvq %ymm3, %ymm0, %ymm3
+; AVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT:    vpsubq %ymm1, %ymm4, %ymm1
+; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpsllvq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprorvq %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprorvq %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT:    vprotq %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT:    vpsubq %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsubq %ymm1, %ymm2, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vprotq %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %x, <4 x i64> %x, <4 x i64> %amt)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm8, %xmm8, %xmm8
+; AVX1-NEXT:    vpsubd %xmm2, %xmm8, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [31,31,31,31]
+; AVX1-NEXT:    vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpslld $23, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT:    vpaddd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vcvttps2dq %xmm2, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm7[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpmuludq %xmm2, %xmm7, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm3[2,3],xmm6[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpor %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubd %xmm1, %xmm8, %xmm1
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpsubd %ymm1, %ymm2, %ymm1
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31]
+; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpsllvd %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [32,32,32,32,32,32,32,32]
+; AVX2-NEXT:    vpsubd %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vprorvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprorvd %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vprorvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprorvd %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpsubd %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT:    vprotd %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT:    vpsubd %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsubd %ymm1, %ymm2, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vprotd %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %x, <8 x i32> %x, <8 x i32> %amt)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpsubw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX1-NEXT:    vpslld $23, %xmm5, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT:    vpaddd %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vcvttps2dq %xmm5, %xmm5
+; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX1-NEXT:    vpslld $23, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddd %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vcvttps2dq %xmm2, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpmulhuw %xmm2, %xmm5, %xmm7
+; AVX1-NEXT:    vpmullw %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpor %xmm7, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX1-NEXT:    vpslld $23, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddd %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vcvttps2dq %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT:    vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
+; AVX2-NEXT:    vpsubw %ymm1, %ymm2, %ymm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
+; AVX2-NEXT:    vpsllvd %ymm4, %ymm3, %ymm4
+; AVX2-NEXT:    vpsrld $16, %ymm4, %ymm4
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
+; AVX2-NEXT:    vpsllvd %ymm5, %ymm0, %ymm5
+; AVX2-NEXT:    vpsrld $16, %ymm5, %ymm5
+; AVX2-NEXT:    vpackusdw %ymm4, %ymm5, %ymm4
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm5 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %ymm1, %ymm5, %ymm1
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
+; AVX2-NEXT:    vpsrlvd %ymm5, %ymm3, %ymm3
+; AVX2-NEXT:    vpsrld $16, %ymm3, %ymm3
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
+; AVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrld $16, %ymm0, %ymm0
+; AVX2-NEXT:    vpackusdw %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm4, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v16i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpsubw %ymm1, %ymm2, %ymm1
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT:    vpsllvd %zmm2, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm0, %zmm2, %zmm0
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpsubw %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT:    vpsllvd %zmm2, %zmm0, %zmm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm0, %zmm2, %zmm0
+; AVX512VL-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v16i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-NEXT:    vpsubw %ymm1, %ymm2, %ymm1
+; AVX512BW-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpsubw %ymm1, %ymm3, %ymm1
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v16i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vpsubw %ymm1, %ymm2, %ymm1
+; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsllvw %ymm1, %ymm0, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT:    vpsubw %ymm1, %ymm3, %ymm1
+; AVX512VLBW-NEXT:    vpsrlvw %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpsubw %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT:    vprotw %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsubw %ymm1, %ymm2, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vprotw %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %x, <16 x i16> %x, <16 x i16> %amt)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind {
+; AVX1-LABEL: var_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpsrlw $4, %xmm2, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm8, %xmm3, %xmm3
+; AVX1-NEXT:    vpsllw $4, %xmm2, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT:    vpand %xmm9, %xmm5, %xmm5
+; AVX1-NEXT:    vpor %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT:    vpxor %xmm10, %xmm10, %xmm10
+; AVX1-NEXT:    vpsubb %xmm5, %xmm10, %xmm5
+; AVX1-NEXT:    vpsllw $5, %xmm5, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $6, %xmm2, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm11 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX1-NEXT:    vpand %xmm11, %xmm3, %xmm3
+; AVX1-NEXT:    vpsllw $2, %xmm2, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX1-NEXT:    vpand %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vpor %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $7, %xmm2, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm4
+; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm4
+; AVX1-NEXT:    vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm3
+; AVX1-NEXT:    vpand %xmm8, %xmm3, %xmm3
+; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm4
+; AVX1-NEXT:    vpand %xmm9, %xmm4, %xmm4
+; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpsubb %xmm1, %xmm10, %xmm1
+; AVX1-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $6, %xmm0, %xmm3
+; AVX1-NEXT:    vpand %xmm11, %xmm3, %xmm3
+; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm4
+; AVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
+; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $7, %xmm0, %xmm3
+; AVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm4
+; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm3
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpsubb %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX2-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrlw $6, %ymm0, %ymm2
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX2-NEXT:    vpsrlw $7, %ymm0, %ymm3
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: var_funnnel_v32i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm3
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpsubb %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $6, %ymm0, %ymm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512F-NEXT:    vpsrlw $7, %ymm0, %ymm3
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpsubb %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $6, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %ymm2, %ymm1, %ymm3
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm3, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubb %ymm1, %ymm4, %ymm1
+; AVX512BW-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v32i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %ymm2, %ymm1, %ymm3
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLBW-NEXT:    vpsrlvw %zmm3, %zmm0, %zmm3
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubb %ymm1, %ymm4, %ymm1
+; AVX512VLBW-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm3, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: var_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpsubb %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT:    vprotb %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: var_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsubb %ymm1, %ymm2, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vprotb %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %x, <32 x i8> %x, <32 x i8> %amt)
+  ret <32 x i8> %res
+}
+
+;
+; Uniform Variable Shifts
+;
+
+define <4 x i64> @splatvar_funnnel_v4i64(<4 x i64> %x, <4 x i64> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovddup {{.*#+}} xmm1 = xmm1[0,0]
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubq %xmm1, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [63,63]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpsllq %xmm2, %xmm4, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[2,3,0,1]
+; AVX1-NEXT:    vpsllq %xmm6, %xmm4, %xmm7
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-NEXT:    vpsllq %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vpsllq %xmm6, %xmm0, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
+; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq %xmm1, %xmm4, %xmm3
+; AVX1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vpsrlq %xmm3, %ymm0, %ymm3
+; AVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT:    vpsubq %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsllq %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX512F-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX512VL-NEXT:    vprorvq %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX512BW-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX512VLBW-NEXT:    vprorvq %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovddup {{.*#+}} xmm1 = xmm1[0,0]
+; XOPAVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsubq %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT:    vprotq %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastq %xmm1, %ymm1
+; XOPAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsubq %ymm1, %ymm2, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vprotq %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <4 x i64> %amt, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %x, <4 x i64> %x, <4 x i64> %splat)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpslld %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [32,32,32,32]
+; AVX1-NEXT:    vpsubd %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT:    vpsrld %xmm1, %xmm3, %xmm3
+; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpslld %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd %xmm1, %ymm1
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpsubd %xmm1, %xmm2, %xmm1
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; AVX2-NEXT:    vpslld %xmm2, %ymm0, %ymm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX2-NEXT:    vpsrld %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vpbroadcastd %xmm1, %ymm1
+; AVX512F-NEXT:    vprorvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastd %xmm1, %ymm1
+; AVX512VL-NEXT:    vprorvd %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vpbroadcastd %xmm1, %ymm1
+; AVX512BW-NEXT:    vprorvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastd %xmm1, %ymm1
+; AVX512VLBW-NEXT:    vprorvd %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; XOPAVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsubd %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT:    vprotd %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastd %xmm1, %ymm1
+; XOPAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsubd %ymm1, %ymm2, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vprotd %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <8 x i32> %amt, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %x, <8 x i32> %x, <8 x i32> %splat)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpsllw %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; AVX1-NEXT:    vpsubw %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm3, %xmm3
+; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpsllw %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastw %xmm1, %ymm1
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm2, %ymm0, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_funnnel_v16i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastw %xmm1, %ymm1
+; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512-NEXT:    vpsllw %xmm2, %ymm0, %ymm2
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX512-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; XOPAVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT:    vprotw %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastw %xmm1, %ymm1
+; XOPAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsubw %ymm1, %ymm2, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vprotw %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <16 x i16> %amt, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %x, <16 x i16> %x, <16 x i16> %splat)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind {
+; AVX1-LABEL: splatvar_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpsllw %xmm3, %xmm4, %xmm5
+; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpsllw %xmm3, %xmm6, %xmm7
+; AVX1-NEXT:    vpshufb %xmm2, %xmm7, %xmm2
+; AVX1-NEXT:    vpand %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX1-NEXT:    vpsubb %xmm1, %xmm7, %xmm1
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm6, %xmm6
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpsllw %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatvar_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm2, %ymm0, %ymm3
+; AVX2-NEXT:    vpcmpeqd %ymm4, %ymm4, %ymm4
+; AVX2-NEXT:    vpsllw %xmm2, %ymm4, %ymm2
+; AVX2-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX2-NEXT:    vpand %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX2-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrlw %xmm1, %ymm4, %ymm1
+; AVX2-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX2-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatvar_funnnel_v32i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm2, %ymm0, %ymm3
+; AVX512F-NEXT:    vpcmpeqd %ymm4, %ymm4, %ymm4
+; AVX512F-NEXT:    vpsllw %xmm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512F-NEXT:    vpand %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw %xmm1, %ymm4, %ymm1
+; AVX512F-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512F-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm2, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpcmpeqd %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpsllw %xmm2, %ymm4, %ymm2
+; AVX512VL-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512VL-NEXT:    vpand %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw %xmm1, %ymm4, %ymm1
+; AVX512VL-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512VL-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %ymm2, %ymm1, %ymm3
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm3, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubb %ymm1, %ymm4, %ymm1
+; AVX512BW-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v32i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %ymm2, %ymm1, %ymm3
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLBW-NEXT:    vpsrlvw %zmm3, %zmm0, %zmm3
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubb %ymm1, %ymm4, %ymm1
+; AVX512VLBW-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm3, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatvar_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT:    vprotb %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatvar_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpbroadcastb %xmm1, %ymm1
+; XOPAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpsubb %ymm1, %ymm2, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT:    vprotb %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %splat = shufflevector <32 x i8> %amt, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %x, <32 x i8> %x, <32 x i8> %splat)
+  ret <32 x i8> %res
+}
+
+;
+; Constant Shifts
+;
+
+define <4 x i64> @constant_funnnel_v4i64(<4 x i64> %x) nounwind {
+; AVX1-LABEL: constant_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpsrlq $60, %xmm1, %xmm2
+; AVX1-NEXT:    vpsrlq $50, %xmm1, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpsrlq $14, %xmm0, %xmm3
+; AVX1-NEXT:    vpsrlq $4, %xmm0, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT:    vpsllq $4, %xmm1, %xmm3
+; AVX1-NEXT:    vpsllq $14, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpsllq $50, %xmm0, %xmm3
+; AVX1-NEXT:    vpsllq $60, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvq {{.*}}(%rip), %ymm0, %ymm1
+; AVX2-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,14,50,60]
+; AVX512F-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprorvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,14,50,60]
+; AVX512BW-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprorvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vprotq {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vprotq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vprotq {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT:    vprotq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %x, <4 x i64> %x, <4 x i64> <i64 4, i64 14, i64 50, i64 60>)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x) nounwind {
+; AVX1-LABEL: constant_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [16777216,8388608,4194304,2097152]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [268435456,134217728,67108864,33554432]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpmuludq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %ymm0, %ymm1
+; AVX2-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,5,6,7,8,9,10,11]
+; AVX512F-NEXT:    vprorvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,5,6,7,8,9,10,11]
+; AVX512BW-NEXT:    vprorvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprorvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vprotd {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vprotd {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vprotd {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT:    vprotd {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %x, <8 x i32> %x, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x) nounwind {
+; AVX1-LABEL: constant_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [256,128,64,32,16,8,4,2]
+; AVX1-NEXT:    vpmulhuw %xmm2, %xmm1, %xmm3
+; AVX1-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,32768,16384,8192,4096,2048,1024,512]
+; AVX1-NEXT:    vpmulhuw %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2]
+; AVX2-NEXT:    vpmulhuw %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v16i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2]
+; AVX512F-NEXT:    vpmulhuw %ymm1, %ymm0, %ymm2
+; AVX512F-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2]
+; AVX512VL-NEXT:    vpmulhuw %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v16i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [16,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
+; AVX512BW-NEXT:    vpsllvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v16i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %ymm0, %ymm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vprotw {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vprotw {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vprotw {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT:    vprotw {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %x, <16 x i16> %x, <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x) nounwind {
+; AVX1-LABEL: constant_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpxor %xmm8, %xmm8, %xmm8
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm9 = [256,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vpmullw %xmm9, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $8, %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [256,128,64,32,16,8,4,2]
+; AVX1-NEXT:    vpmullw %xmm6, %xmm5, %xmm7
+; AVX1-NEXT:    vpsrlw $8, %xmm7, %xmm7
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm7, %xmm3
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vpmullw %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [1,128,64,32,16,8,4,2]
+; AVX1-NEXT:    vpmullw %xmm4, %xmm5, %xmm5
+; AVX1-NEXT:    vpand %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpackuswb %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
+; AVX1-NEXT:    vpmullw %xmm9, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $8, %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT:    vpmullw %xmm6, %xmm5, %xmm6
+; AVX1-NEXT:    vpsrlw $8, %xmm6, %xmm6
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm7, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpmullw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536]
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsllw $2, %ymm1, %ymm3
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddb %ymm1, %ymm1, %ymm3
+; AVX2-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX2-NEXT:    vpackuswb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constant_funnnel_v32i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm1
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536]
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpsllw $2, %ymm1, %ymm3
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm3
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
+; AVX512F-NEXT:    vpmullw {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
+; AVX512F-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512F-NEXT:    vpackuswb %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm1
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm2 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536]
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm1
+; AVX512VL-NEXT:    vpsllw $2, %ymm1, %ymm3
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm3
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmullw {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v32i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: constant_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,254,253,252,251,250,249,248,249,250,251,252,253,254,255]
+; XOPAVX1-NEXT:    vprotb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vprotb %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: constant_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; XOPAVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,255,254,253,252,251,250,249,248,249,250,251,252,253,254,255]
+; XOPAVX2-NEXT:    vprotb %xmm2, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vprotb %xmm2, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %x, <32 x i8> %x, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
+  ret <32 x i8> %res
+}
+
+;
+; Uniform Constant Shifts
+;
+
+define <4 x i64> @splatconstant_funnnel_v4i64(<4 x i64> %x) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrlq $14, %xmm0, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpsrlq $14, %xmm2, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT:    vpsllq $50, %xmm0, %xmm0
+; AVX1-NEXT:    vpsllq $50, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlq $14, %ymm0, %ymm1
+; AVX2-NEXT:    vpsllq $50, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v4i64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vprorq $14, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v4i64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprorq $14, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v4i64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vprorq $14, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v4i64:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprorq $14, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v4i64:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vprotq $50, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vprotq $50, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v4i64:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vprotq $50, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT:    vprotq $50, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %x, <4 x i64> %x, <4 x i64> <i64 14, i64 14, i64 14, i64 14>)
+  ret <4 x i64> %res
+}
+
+define <8 x i32> @splatconstant_funnnel_v8i32(<8 x i32> %x) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpsrld $4, %xmm1, %xmm2
+; AVX1-NEXT:    vpslld $28, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $4, %xmm0, %xmm2
+; AVX1-NEXT:    vpslld $28, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrld $4, %ymm0, %ymm1
+; AVX2-NEXT:    vpslld $28, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: splatconstant_funnnel_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vprord $4, %zmm0, %zmm0
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v8i32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vprord $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT:    vprord $4, %zmm0, %zmm0
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v8i32:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vprord $4, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v8i32:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vprotd $28, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vprotd $28, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v8i32:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vprotd $28, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT:    vprotd $28, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %x, <8 x i32> %x, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>)
+  ret <8 x i32> %res
+}
+
+define <16 x i16> @splatconstant_funnnel_v16i16(<16 x i16> %x) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpsrlw $7, %xmm1, %xmm2
+; AVX1-NEXT:    vpsllw $9, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $7, %xmm0, %xmm2
+; AVX1-NEXT:    vpsllw $9, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlw $7, %ymm0, %ymm1
+; AVX2-NEXT:    vpsllw $9, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_funnnel_v16i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsrlw $7, %ymm0, %ymm1
+; AVX512-NEXT:    vpsllw $9, %ymm0, %ymm0
+; AVX512-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v16i16:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vprotw $9, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vprotw $9, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v16i16:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vprotw $9, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT:    vprotw $9, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %x, <16 x i16> %x, <16 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
+  ret <16 x i16> %res
+}
+
+define <32 x i8> @splatconstant_funnnel_v32i8(<32 x i8> %x) nounwind {
+; AVX1-LABEL: splatconstant_funnnel_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsllw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm2
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: splatconstant_funnnel_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_funnnel_v32i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsrlw $4, %ymm0, %ymm1
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+;
+; XOPAVX1-LABEL: splatconstant_funnnel_v32i8:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vprotb $4, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vprotb $4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: splatconstant_funnnel_v32i8:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vprotb $4, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT:    vprotb $4, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    retq
+  %res = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %x, <32 x i8> %x, <32 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
+  ret <32 x i8> %res
+}
diff --git a/test/CodeGen/X86/vector-fshr-rot-512.ll b/test/CodeGen/X86/vector-fshr-rot-512.ll
new file mode 100644
index 0000000..e38420a
--- /dev/null
+++ b/test/CodeGen/X86/vector-fshr-rot-512.ll
@@ -0,0 +1,847 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW
+
+declare <8 x i64> @llvm.fshr.v8i64(<8 x i64>, <8 x i64>, <8 x i64>)
+declare <16 x i32> @llvm.fshr.v16i32(<16 x i32>, <16 x i32>, <16 x i32>)
+declare <32 x i16> @llvm.fshr.v32i16(<32 x i16>, <32 x i16>, <32 x i16>)
+declare <64 x i8> @llvm.fshr.v64i8(<64 x i8>, <64 x i8>, <64 x i8>)
+
+;
+; Variable Shifts
+;
+
+define <8 x i64> @var_funnnel_v8i64(<8 x i64> %x, <8 x i64> %amt) nounwind {
+; AVX512-LABEL: var_funnnel_v8i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %res = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x, <8 x i64> %x, <8 x i64> %amt)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @var_funnnel_v16i32(<16 x i32> %x, <16 x i32> %amt) nounwind {
+; AVX512-LABEL: var_funnnel_v16i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vprorvd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %res = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x, <16 x i32> %x, <16 x i32> %amt)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @var_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounwind {
+; AVX512F-LABEL: var_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512F-NEXT:    vpsubw %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm6 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT:    vpsllvd %zmm6, %zmm0, %zmm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %ymm2, %ymm7, %ymm2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpsrlvd %zmm2, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm0, %zmm6, %zmm0
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    vpsubw %ymm3, %ymm4, %ymm2
+; AVX512F-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT:    vpsllvd %zmm3, %zmm1, %zmm3
+; AVX512F-NEXT:    vpsubw %ymm2, %ymm7, %ymm2
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpsrlvd %zmm2, %zmm1, %zmm1
+; AVX512F-NEXT:    vpord %zmm1, %zmm3, %zmm1
+; AVX512F-NEXT:    vpmovdw %zmm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpsubw %ymm2, %ymm4, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm6 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT:    vpsllvd %zmm6, %zmm0, %zmm6
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm7 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %ymm2, %ymm7, %ymm2
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT:    vpsrlvd %zmm2, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm0, %zmm6, %zmm0
+; AVX512VL-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512VL-NEXT:    vpsubw %ymm3, %ymm4, %ymm2
+; AVX512VL-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT:    vpsllvd %zmm3, %zmm1, %zmm3
+; AVX512VL-NEXT:    vpsubw %ymm2, %ymm7, %ymm2
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT:    vpsrlvd %zmm2, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpord %zmm1, %zmm3, %zmm1
+; AVX512VL-NEXT:    vpmovdw %zmm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpandq %zmm2, %zmm1, %zmm3
+; AVX512BW-NEXT:    vpsrlvw %zmm3, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubw %zmm1, %zmm4, %zmm1
+; AVX512BW-NEXT:    vpandq %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpandq %zmm2, %zmm1, %zmm3
+; AVX512VLBW-NEXT:    vpsrlvw %zmm3, %zmm0, %zmm3
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubw %zmm1, %zmm4, %zmm1
+; AVX512VLBW-NEXT:    vpandq %zmm2, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm3, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %res = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %x, <32 x i16> %x, <32 x i16> %amt)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @var_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
+; AVX512F-LABEL: var_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT:    vpor %ymm4, %ymm6, %ymm4
+; AVX512F-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512F-NEXT:    vpsubb %ymm2, %ymm6, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512F-NEXT:    vpand %ymm8, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $6, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX512F-NEXT:    vpand %ymm9, %ymm4, %ymm4
+; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm10
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm11 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT:    vpand %ymm11, %ymm10, %ymm10
+; AVX512F-NEXT:    vpor %ymm4, %ymm10, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $7, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm10 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT:    vpand %ymm10, %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm12
+; AVX512F-NEXT:    vpor %ymm4, %ymm12, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm4
+; AVX512F-NEXT:    vpand %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpsubb %ymm3, %ymm6, %ymm3
+; AVX512F-NEXT:    vpand %ymm8, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsllw $5, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $6, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm9, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $2, %ymm1, %ymm4
+; AVX512F-NEXT:    vpand %ymm11, %ymm4, %ymm4
+; AVX512F-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $7, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm10, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm4
+; AVX512F-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: var_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm6
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpor %ymm4, %ymm6, %ymm4
+; AVX512VL-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512VL-NEXT:    vpsubb %ymm2, %ymm6, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm8 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VL-NEXT:    vpand %ymm8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $6, %ymm0, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm9 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX512VL-NEXT:    vpand %ymm9, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm10
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm11 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT:    vpand %ymm11, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpor %ymm4, %ymm10, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm10 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-NEXT:    vpand %ymm10, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm12
+; AVX512VL-NEXT:    vpor %ymm4, %ymm12, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpand %ymm7, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512VL-NEXT:    vpsubb %ymm3, %ymm6, %ymm3
+; AVX512VL-NEXT:    vpand %ymm8, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsllw $5, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $6, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm9, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $2, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpand %ymm11, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $7, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm10, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: var_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpandq %zmm2, %zmm1, %zmm3
+; AVX512BW-NEXT:    vpsllw $5, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpaddb %zmm3, %zmm3, %zmm4
+; AVX512BW-NEXT:    vpmovb2m %zmm4, %k1
+; AVX512BW-NEXT:    vpmovb2m %zmm3, %k2
+; AVX512BW-NEXT:    vpsrlw $4, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512BW-NEXT:    vpblendmb %zmm3, %zmm0, %zmm3 {%k2}
+; AVX512BW-NEXT:    vpsrlw $2, %zmm3, %zmm5
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512BW-NEXT:    vmovdqu8 %zmm5, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpsrlw $1, %zmm3, %zmm5
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512BW-NEXT:    vpaddb %zmm4, %zmm4, %zmm4
+; AVX512BW-NEXT:    vpmovb2m %zmm4, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm5, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubb %zmm1, %zmm4, %zmm1
+; AVX512BW-NEXT:    vpandq %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllw $5, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpaddb %zmm1, %zmm1, %zmm2
+; AVX512BW-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512BW-NEXT:    vpmovb2m %zmm1, %k2
+; AVX512BW-NEXT:    vpsllw $4, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k2}
+; AVX512BW-NEXT:    vpsllw $2, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    vpaddb %zmm2, %zmm2, %zmm1
+; AVX512BW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT:    vpaddb %zmm0, %zmm0, %zmm0 {%k1}
+; AVX512BW-NEXT:    vporq %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: var_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpandq %zmm2, %zmm1, %zmm3
+; AVX512VLBW-NEXT:    vpsllw $5, %zmm3, %zmm3
+; AVX512VLBW-NEXT:    vpaddb %zmm3, %zmm3, %zmm4
+; AVX512VLBW-NEXT:    vpmovb2m %zmm4, %k1
+; AVX512VLBW-NEXT:    vpmovb2m %zmm3, %k2
+; AVX512VLBW-NEXT:    vpsrlw $4, %zmm0, %zmm3
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLBW-NEXT:    vpblendmb %zmm3, %zmm0, %zmm3 {%k2}
+; AVX512VLBW-NEXT:    vpsrlw $2, %zmm3, %zmm5
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm5, %zmm3 {%k1}
+; AVX512VLBW-NEXT:    vpsrlw $1, %zmm3, %zmm5
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm5, %zmm5
+; AVX512VLBW-NEXT:    vpaddb %zmm4, %zmm4, %zmm4
+; AVX512VLBW-NEXT:    vpmovb2m %zmm4, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm5, %zmm3 {%k1}
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubb %zmm1, %zmm4, %zmm1
+; AVX512VLBW-NEXT:    vpandq %zmm2, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllw $5, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpaddb %zmm1, %zmm1, %zmm2
+; AVX512VLBW-NEXT:    vpmovb2m %zmm2, %k1
+; AVX512VLBW-NEXT:    vpmovb2m %zmm1, %k2
+; AVX512VLBW-NEXT:    vpsllw $4, %zmm0, %zmm1
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k2}
+; AVX512VLBW-NEXT:    vpsllw $2, %zmm0, %zmm1
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    vpaddb %zmm2, %zmm2, %zmm1
+; AVX512VLBW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT:    vpaddb %zmm0, %zmm0, %zmm0 {%k1}
+; AVX512VLBW-NEXT:    vporq %zmm3, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %res = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %x, <64 x i8> %x, <64 x i8> %amt)
+  ret <64 x i8> %res
+}
+
+;
+; Uniform Variable Shifts
+;
+
+define <8 x i64> @splatvar_funnnel_v8i64(<8 x i64> %x, <8 x i64> %amt) nounwind {
+; AVX512-LABEL: splatvar_funnnel_v8i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastq %xmm1, %zmm1
+; AVX512-NEXT:    vprorvq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %splat = shufflevector <8 x i64> %amt, <8 x i64> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x, <8 x i64> %x, <8 x i64> %splat)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %amt) nounwind {
+; AVX512-LABEL: splatvar_funnnel_v16i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastd %xmm1, %zmm1
+; AVX512-NEXT:    vprorvd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %splat = shufflevector <16 x i32> %amt, <16 x i32> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x, <16 x i32> %x, <16 x i32> %splat)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounwind {
+; AVX512F-LABEL: splatvar_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpsubw %xmm2, %xmm3, %xmm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; AVX512F-NEXT:    vpsubw %xmm2, %xmm5, %xmm2
+; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm4, %ymm0
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastw %xmm2, %ymm2
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpsubw %xmm2, %xmm3, %xmm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm0, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT:    vpsubw %xmm2, %xmm5, %xmm2
+; AVX512VL-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm4, %ymm0
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm1, %ymm3
+; AVX512VL-NEXT:    vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastw %xmm1, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsrlw %xmm3, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubw %xmm1, %xmm4, %xmm1
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512BW-NEXT:    vpsllw %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastw %xmm1, %zmm1
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15]
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsrlw %xmm3, %zmm0, %zmm3
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubw %xmm1, %xmm4, %xmm1
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512VLBW-NEXT:    vpsllw %xmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm3, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %splat = shufflevector <32 x i16> %amt, <32 x i16> undef, <32 x i32> zeroinitializer
+  %res = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %x, <32 x i16> %x, <32 x i16> %splat)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
+; AVX512F-LABEL: splatvar_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512F-NEXT:    vpsubb %xmm2, %xmm3, %xmm2
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm0, %ymm4
+; AVX512F-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm5, %ymm6
+; AVX512F-NEXT:    vpbroadcastb %xmm6, %ymm6
+; AVX512F-NEXT:    vpand %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm2, %xmm7, %xmm2
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw %xmm2, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512F-NEXT:    vpbroadcastb %xmm5, %ymm5
+; AVX512F-NEXT:    vpand %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm4, %ymm0
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatvar_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpsubb %xmm2, %xmm3, %xmm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm5, %ymm6
+; AVX512VL-NEXT:    vpbroadcastb %xmm6, %ymm6
+; AVX512VL-NEXT:    vpand %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm2, %xmm7, %xmm2
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw %xmm2, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpbroadcastb %xmm5, %ymm5
+; AVX512VL-NEXT:    vpand %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm4, %ymm0
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm1, %ymm3
+; AVX512VL-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpand %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastb %xmm1, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512BW-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vpsrlw %xmm3, %zmm0, %zmm4
+; AVX512BW-NEXT:    vpternlogd $255, %zmm5, %zmm5, %zmm5
+; AVX512BW-NEXT:    vpsrlw %xmm3, %zmm5, %zmm3
+; AVX512BW-NEXT:    vpsrlw $8, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpbroadcastb %xmm3, %zmm3
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512BW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vpsllw %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpsllw %xmm1, %zmm5, %zmm1
+; AVX512BW-NEXT:    vpbroadcastb %xmm1, %zmm1
+; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatvar_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm1, %zmm1
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX512VLBW-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VLBW-NEXT:    vpsrlw %xmm3, %zmm0, %zmm4
+; AVX512VLBW-NEXT:    vpternlogd $255, %zmm5, %zmm5, %zmm5
+; AVX512VLBW-NEXT:    vpsrlw %xmm3, %zmm5, %zmm3
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm3, %zmm3
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm3, %zmm3
+; AVX512VLBW-NEXT:    vpandq %zmm3, %zmm4, %zmm3
+; AVX512VLBW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBW-NEXT:    vpsubb %xmm1, %xmm4, %xmm1
+; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VLBW-NEXT:    vpsllw %xmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpsllw %xmm1, %zmm5, %zmm1
+; AVX512VLBW-NEXT:    vpbroadcastb %xmm1, %zmm1
+; AVX512VLBW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm3, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %splat = shufflevector <64 x i8> %amt, <64 x i8> undef, <64 x i32> zeroinitializer
+  %res = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %x, <64 x i8> %x, <64 x i8> %splat)
+  ret <64 x i8> %res
+}
+
+;
+; Constant Shifts
+;
+
+define <8 x i64> @constant_funnnel_v8i64(<8 x i64> %x) nounwind {
+; AVX512-LABEL: constant_funnnel_v8i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vprorvq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %res = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x, <8 x i64> %x, <8 x i64> <i64 4, i64 14, i64 50, i64 60, i64 4, i64 14, i64 50, i64 60>)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @constant_funnnel_v16i32(<16 x i32> %x) nounwind {
+; AVX512-LABEL: constant_funnnel_v16i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vprorvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %res = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x, <16 x i32> %x, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @constant_funnnel_v32i16(<32 x i16> %x) nounwind {
+; AVX512F-LABEL: constant_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2>
+; AVX512F-NEXT:    vpmulhuw %ymm2, %ymm0, %ymm3
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm4 = ymm0[0],ymm3[1,2,3,4,5,6,7],ymm0[8],ymm3[9,10,11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [1,32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmulhuw %ymm2, %ymm1, %ymm2
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0],ymm2[1,2,3,4,5,6,7],ymm1[8],ymm2[9,10,11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2>
+; AVX512VL-NEXT:    vpmulhuw %ymm2, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpblendw {{.*#+}} ymm4 = ymm0[0],ymm3[1,2,3,4,5,6,7],ymm0[8],ymm3[9,10,11,12,13,14,15]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [1,32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2]
+; AVX512VL-NEXT:    vpmullw %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpmulhuw %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0],ymm2[1,2,3,4,5,6,7],ymm1[8],ymm2[9,10,11,12,13,14,15]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512VL-NEXT:    vpmullw %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %res = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %x, <32 x i16> %x, <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x) nounwind {
+; AVX512F-LABEL: constant_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536]
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpsllw $2, %ymm2, %ymm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT:    vpand %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT:    vpaddb %ymm4, %ymm4, %ymm7
+; AVX512F-NEXT:    vpblendvb %ymm7, %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm5
+; AVX512F-NEXT:    vpaddb %ymm7, %ymm7, %ymm8
+; AVX512F-NEXT:    vpblendvb %ymm8, %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm9 = ymm0[8],ymm5[8],ymm0[9],ymm5[9],ymm0[10],ymm5[10],ymm0[11],ymm5[11],ymm0[12],ymm5[12],ymm0[13],ymm5[13],ymm0[14],ymm5[14],ymm0[15],ymm5[15],ymm0[24],ymm5[24],ymm0[25],ymm5[25],ymm0[26],ymm5[26],ymm0[27],ymm5[27],ymm0[28],ymm5[28],ymm0[29],ymm5[29],ymm0[30],ymm5[30],ymm0[31],ymm5[31]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128]
+; AVX512F-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpmullw %ymm10, %ymm9, %ymm9
+; AVX512F-NEXT:    vpsrlw $8, %ymm9, %ymm9
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[1],ymm5[1],ymm0[2],ymm5[2],ymm0[3],ymm5[3],ymm0[4],ymm5[4],ymm0[5],ymm5[5],ymm0[6],ymm5[6],ymm0[7],ymm5[7],ymm0[16],ymm5[16],ymm0[17],ymm5[17],ymm0[18],ymm5[18],ymm0[19],ymm5[19],ymm0[20],ymm5[20],ymm0[21],ymm5[21],ymm0[22],ymm5[22],ymm0[23],ymm5[23]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
+; AVX512F-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpmullw %ymm11, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512F-NEXT:    vpackuswb %ymm9, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm2
+; AVX512F-NEXT:    vpsllw $2, %ymm2, %ymm3
+; AVX512F-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm7, %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm8, %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm5[8],ymm1[9],ymm5[9],ymm1[10],ymm5[10],ymm1[11],ymm5[11],ymm1[12],ymm5[12],ymm1[13],ymm5[13],ymm1[14],ymm5[14],ymm1[15],ymm5[15],ymm1[24],ymm5[24],ymm1[25],ymm5[25],ymm1[26],ymm5[26],ymm1[27],ymm5[27],ymm1[28],ymm5[28],ymm1[29],ymm5[29],ymm1[30],ymm5[30],ymm1[31],ymm5[31]
+; AVX512F-NEXT:    vpmullw %ymm10, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm5[0],ymm1[1],ymm5[1],ymm1[2],ymm5[2],ymm1[3],ymm5[3],ymm1[4],ymm5[4],ymm1[5],ymm5[5],ymm1[6],ymm5[6],ymm1[7],ymm5[7],ymm1[16],ymm5[16],ymm1[17],ymm5[17],ymm1[18],ymm5[18],ymm1[19],ymm5[19],ymm1[20],ymm5[20],ymm1[21],ymm5[21],ymm1[22],ymm5[22],ymm1[23],ymm5[23]
+; AVX512F-NEXT:    vpmullw %ymm11, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512F-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constant_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536]
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpsllw $2, %ymm2, %ymm5
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT:    vpand %ymm6, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm7
+; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm7, %ymm7, %ymm8
+; AVX512VL-NEXT:    vpblendvb %ymm8, %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128]
+; AVX512VL-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpmullw %ymm9, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
+; AVX512VL-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpmullw %ymm10, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpackuswb %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpsllw $2, %ymm2, %ymm3
+; AVX512VL-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm8, %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpmullw %ymm9, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpmullw %ymm10, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536]
+; AVX512BW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT:    vpsllw $4, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm2, %zmm2
+; AVX512BW-NEXT:    vpblendmb %zmm2, %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT:    vpsllw $2, %zmm2, %zmm3
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512BW-NEXT:    vpaddb %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm3, %zmm2 {%k1}
+; AVX512BW-NEXT:    vpaddb %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT:    vpaddb %zmm2, %zmm2, %zmm2 {%k1}
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512BW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512BW-NEXT:    vpsrlw $8, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vpsrlw $8, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm0, %zmm2, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: constant_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536]
+; AVX512VLBW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT:    vpsllw $4, %zmm0, %zmm2
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VLBW-NEXT:    vpblendmb %zmm2, %zmm0, %zmm2 {%k1}
+; AVX512VLBW-NEXT:    vpsllw $2, %zmm2, %zmm3
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLBW-NEXT:    vpaddb %zmm1, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT:    vmovdqu8 %zmm3, %zmm2 {%k1}
+; AVX512VLBW-NEXT:    vpaddb %zmm1, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT:    vpaddb %zmm2, %zmm2, %zmm2 {%k1}
+; AVX512VLBW-NEXT:    vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpsrlw $8, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm0, %zmm2, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %res = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %x, <64 x i8> %x, <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
+  ret <64 x i8> %res
+}
+
+;
+; Uniform Constant Shifts
+;
+
+define <8 x i64> @splatconstant_funnnel_v8i64(<8 x i64> %x) nounwind {
+; AVX512-LABEL: splatconstant_funnnel_v8i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vprorq $14, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %res = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x, <8 x i64> %x, <8 x i64> <i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14>)
+  ret <8 x i64> %res
+}
+
+define <16 x i32> @splatconstant_funnnel_v16i32(<16 x i32> %x) nounwind {
+; AVX512-LABEL: splatconstant_funnnel_v16i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vprord $4, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %res = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x, <16 x i32> %x, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>)
+  ret <16 x i32> %res
+}
+
+define <32 x i16> @splatconstant_funnnel_v32i16(<32 x i16> %x) nounwind {
+; AVX512F-LABEL: splatconstant_funnnel_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $7, %ymm0, %ymm2
+; AVX512F-NEXT:    vpsllw $9, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $7, %ymm1, %ymm2
+; AVX512F-NEXT:    vpsllw $9, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpsllw $9, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $7, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpsllw $9, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v32i16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlw $7, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpsllw $9, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v32i16:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlw $7, %zmm0, %zmm1
+; AVX512VLBW-NEXT:    vpsllw $9, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %res = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %x, <32 x i16> %x, <32 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
+  ret <32 x i16> %res
+}
+
+define <64 x i8> @splatconstant_funnnel_v64i8(<64 x i8> %x) nounwind {
+; AVX512F-LABEL: splatconstant_funnnel_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm2
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: splatconstant_funnnel_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_funnnel_v64i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlw $4, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsllw $4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: splatconstant_funnnel_v64i8:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpsrlw $4, %zmm0, %zmm1
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpsllw $4, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    retq
+  %res = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %x, <64 x i8> %x, <64 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
+  ret <64 x i8> %res
+}
diff --git a/test/CodeGen/X86/vector-gep.ll b/test/CodeGen/X86/vector-gep.ll
index 6b3e709..8f62fe5 100644
--- a/test/CodeGen/X86/vector-gep.ll
+++ b/test/CodeGen/X86/vector-gep.ll
@@ -20,9 +20,8 @@
 define i32 @AGEP1(<4 x i32*> %param) nounwind {
 ; CHECK-LABEL: AGEP1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpaddd {{\.LCPI.*}}, %xmm0, %xmm0
-; CHECK-NEXT:    vpextrd $3, %xmm0, %eax
-; CHECK-NEXT:    movl (%eax), %eax
+; CHECK-NEXT:    vextractps $3, %xmm0, %eax
+; CHECK-NEXT:    movl 16(%eax), %eax
 ; CHECK-NEXT:    retl
   %A2 = getelementptr i32, <4 x i32*> %param, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
   %k = extractelement <4 x i32*> %A2, i32 3
diff --git a/test/CodeGen/X86/vector-reduce-mul-widen.ll b/test/CodeGen/X86/vector-reduce-mul-widen.ll
index 7fbd724..32bb4bf 100644
--- a/test/CodeGen/X86/vector-reduce-mul-widen.ll
+++ b/test/CodeGen/X86/vector-reduce-mul-widen.ll
@@ -3095,56 +3095,56 @@
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT:    vpmullw %zmm2, %zmm3, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpmullw %zmm2, %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BW-NEXT:    vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpackuswb %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpackuswb %zmm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BW-NEXT:    vpmullw %zmm4, %zmm3, %zmm3
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpmullw %zmm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
 ; AVX512BW-NEXT:    vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpackuswb %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpackuswb %zmm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BW-NEXT:    vpmullw %zmm4, %zmm3, %zmm3
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpmullw %zmm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
 ; AVX512BW-NEXT:    vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpackuswb %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpackuswb %zmm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; AVX512BW-NEXT:    vpmullw %zmm0, %zmm1, %zmm0
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm2 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX512BW-NEXT:    vpmullw %zmm0, %zmm3, %zmm0
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmullw %zmm0, %zmm2, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm2 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX512BW-NEXT:    vpmullw %zmm0, %zmm3, %zmm0
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmullw %zmm0, %zmm2, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpsrlw $8, %xmm0, %xmm3
+; AVX512BW-NEXT:    vpsrlw $8, %xmm0, %xmm2
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT:    vpmullw %zmm3, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmullw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpextrb $0, %xmm0, %eax
 ; AVX512BW-NEXT:    # kill: def $al killed $al killed $eax
@@ -3155,56 +3155,56 @@
 ; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
 ; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BWVL-NEXT:    vpmullw %zmm2, %zmm3, %zmm3
-; AVX512BWVL-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm3, %zmm3
+; AVX512BWVL-NEXT:    vpmullw %zmm2, %zmm3, %zmm2
+; AVX512BWVL-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BWVL-NEXT:    vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpackuswb %zmm3, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpackuswb %zmm2, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
 ; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BWVL-NEXT:    vpmullw %zmm4, %zmm3, %zmm3
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm3, %zmm3
+; AVX512BWVL-NEXT:    vpmullw %zmm4, %zmm2, %zmm2
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
 ; AVX512BWVL-NEXT:    vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpackuswb %zmm3, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpackuswb %zmm2, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
 ; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BWVL-NEXT:    vpmullw %zmm4, %zmm3, %zmm3
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm3, %zmm3
+; AVX512BWVL-NEXT:    vpmullw %zmm4, %zmm2, %zmm2
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
 ; AVX512BWVL-NEXT:    vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpackuswb %zmm3, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpackuswb %zmm2, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; AVX512BWVL-NEXT:    vpmullw %zmm0, %zmm1, %zmm0
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BWVL-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm2 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX512BWVL-NEXT:    vpmullw %zmm0, %zmm3, %zmm0
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpmullw %zmm0, %zmm2, %zmm0
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm2 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX512BWVL-NEXT:    vpmullw %zmm0, %zmm3, %zmm0
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpmullw %zmm0, %zmm2, %zmm0
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpsrlw $8, %xmm0, %xmm3
+; AVX512BWVL-NEXT:    vpsrlw $8, %xmm0, %xmm2
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BWVL-NEXT:    vpmullw %zmm3, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpmullw %zmm2, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vpextrb $0, %xmm0, %eax
 ; AVX512BWVL-NEXT:    # kill: def $al killed $al killed $eax
diff --git a/test/CodeGen/X86/vector-reduce-mul.ll b/test/CodeGen/X86/vector-reduce-mul.ll
index ea80a5b..ab1ebd4 100644
--- a/test/CodeGen/X86/vector-reduce-mul.ll
+++ b/test/CodeGen/X86/vector-reduce-mul.ll
@@ -790,35 +790,12 @@
 ; AVX-NEXT:    vmovd %xmm0, %eax
 ; AVX-NEXT:    retq
 ;
-; AVX512BW-LABEL: test_v2i32:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT:    vmovd %xmm0, %eax
-; AVX512BW-NEXT:    retq
-;
-; AVX512BWVL-LABEL: test_v2i32:
-; AVX512BWVL:       # %bb.0:
-; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BWVL-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT:    vmovd %xmm0, %eax
-; AVX512BWVL-NEXT:    retq
-;
-; AVX512DQ-LABEL: test_v2i32:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQ-NEXT:    vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT:    vmovd %xmm0, %eax
-; AVX512DQ-NEXT:    vzeroupper
-; AVX512DQ-NEXT:    retq
-;
-; AVX512DQVL-LABEL: test_v2i32:
-; AVX512DQVL:       # %bb.0:
-; AVX512DQVL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQVL-NEXT:    vpmullq %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT:    vmovd %xmm0, %eax
-; AVX512DQVL-NEXT:    retq
+; AVX512-LABEL: test_v2i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    retq
   %1 = call i32 @llvm.experimental.vector.reduce.mul.i32.v2i32(<2 x i32> %a0)
   ret i32 %1
 }
@@ -1156,39 +1133,13 @@
 ; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-NEXT:    retq
 ;
-; AVX512BW-LABEL: test_v2i16:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT:    vmovd %xmm0, %eax
-; AVX512BW-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX512BW-NEXT:    retq
-;
-; AVX512BWVL-LABEL: test_v2i16:
-; AVX512BWVL:       # %bb.0:
-; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BWVL-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT:    vmovd %xmm0, %eax
-; AVX512BWVL-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX512BWVL-NEXT:    retq
-;
-; AVX512DQ-LABEL: test_v2i16:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQ-NEXT:    vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT:    vmovd %xmm0, %eax
-; AVX512DQ-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX512DQ-NEXT:    vzeroupper
-; AVX512DQ-NEXT:    retq
-;
-; AVX512DQVL-LABEL: test_v2i16:
-; AVX512DQVL:       # %bb.0:
-; AVX512DQVL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQVL-NEXT:    vpmullq %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT:    vmovd %xmm0, %eax
-; AVX512DQVL-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX512DQVL-NEXT:    retq
+; AVX512-LABEL: test_v2i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX512-NEXT:    retq
   %1 = call i16 @llvm.experimental.vector.reduce.mul.i16.v2i16(<2 x i16> %a0)
   ret i16 %1
 }
@@ -1634,39 +1585,13 @@
 ; AVX-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-NEXT:    retq
 ;
-; AVX512BW-LABEL: test_v2i8:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $0, %xmm0, %eax
-; AVX512BW-NEXT:    # kill: def $al killed $al killed $eax
-; AVX512BW-NEXT:    retq
-;
-; AVX512BWVL-LABEL: test_v2i8:
-; AVX512BWVL:       # %bb.0:
-; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BWVL-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT:    vpextrb $0, %xmm0, %eax
-; AVX512BWVL-NEXT:    # kill: def $al killed $al killed $eax
-; AVX512BWVL-NEXT:    retq
-;
-; AVX512DQ-LABEL: test_v2i8:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQ-NEXT:    vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT:    vpextrb $0, %xmm0, %eax
-; AVX512DQ-NEXT:    # kill: def $al killed $al killed $eax
-; AVX512DQ-NEXT:    vzeroupper
-; AVX512DQ-NEXT:    retq
-;
-; AVX512DQVL-LABEL: test_v2i8:
-; AVX512DQVL:       # %bb.0:
-; AVX512DQVL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQVL-NEXT:    vpmullq %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT:    vpextrb $0, %xmm0, %eax
-; AVX512DQVL-NEXT:    # kill: def $al killed $al killed $eax
-; AVX512DQVL-NEXT:    retq
+; AVX512-LABEL: test_v2i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    # kill: def $al killed $al killed $eax
+; AVX512-NEXT:    retq
   %1 = call i8 @llvm.experimental.vector.reduce.mul.i8.v2i8(<2 x i8> %a0)
   ret i8 %1
 }
@@ -3125,56 +3050,56 @@
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT:    vpmullw %zmm2, %zmm3, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpmullw %zmm2, %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BW-NEXT:    vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpackuswb %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpackuswb %zmm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BW-NEXT:    vpmullw %zmm4, %zmm3, %zmm3
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpmullw %zmm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
 ; AVX512BW-NEXT:    vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpackuswb %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpackuswb %zmm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BW-NEXT:    vpmullw %zmm4, %zmm3, %zmm3
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpmullw %zmm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
 ; AVX512BW-NEXT:    vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpackuswb %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpackuswb %zmm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; AVX512BW-NEXT:    vpmullw %zmm0, %zmm1, %zmm0
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm2 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX512BW-NEXT:    vpmullw %zmm0, %zmm3, %zmm0
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmullw %zmm0, %zmm2, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm2 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX512BW-NEXT:    vpmullw %zmm0, %zmm3, %zmm0
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmullw %zmm0, %zmm2, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpsrlw $8, %xmm0, %xmm3
+; AVX512BW-NEXT:    vpsrlw $8, %xmm0, %xmm2
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT:    vpmullw %zmm3, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmullw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpextrb $0, %xmm0, %eax
 ; AVX512BW-NEXT:    # kill: def $al killed $al killed $eax
@@ -3185,56 +3110,56 @@
 ; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
 ; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BWVL-NEXT:    vpmullw %zmm2, %zmm3, %zmm3
-; AVX512BWVL-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm3, %zmm3
+; AVX512BWVL-NEXT:    vpmullw %zmm2, %zmm3, %zmm2
+; AVX512BWVL-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BWVL-NEXT:    vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpackuswb %zmm3, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpackuswb %zmm2, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
 ; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BWVL-NEXT:    vpmullw %zmm4, %zmm3, %zmm3
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm3, %zmm3
+; AVX512BWVL-NEXT:    vpmullw %zmm4, %zmm2, %zmm2
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
 ; AVX512BWVL-NEXT:    vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpackuswb %zmm3, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpackuswb %zmm2, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
 ; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BWVL-NEXT:    vpmullw %zmm4, %zmm3, %zmm3
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm3, %zmm3
+; AVX512BWVL-NEXT:    vpmullw %zmm4, %zmm2, %zmm2
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
 ; AVX512BWVL-NEXT:    vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpackuswb %zmm3, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpackuswb %zmm2, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BWVL-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; AVX512BWVL-NEXT:    vpmullw %zmm0, %zmm1, %zmm0
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BWVL-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm2 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX512BWVL-NEXT:    vpmullw %zmm0, %zmm3, %zmm0
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpmullw %zmm0, %zmm2, %zmm0
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm2 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX512BWVL-NEXT:    vpmullw %zmm0, %zmm3, %zmm0
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpmullw %zmm0, %zmm2, %zmm0
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpsrlw $8, %xmm0, %xmm3
+; AVX512BWVL-NEXT:    vpsrlw $8, %xmm0, %xmm2
 ; AVX512BWVL-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BWVL-NEXT:    vpmullw %zmm3, %zmm0, %zmm0
-; AVX512BWVL-NEXT:    vpandq %zmm2, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpmullw %zmm2, %zmm0, %zmm0
+; AVX512BWVL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
 ; AVX512BWVL-NEXT:    vpextrb $0, %xmm0, %eax
 ; AVX512BWVL-NEXT:    # kill: def $al killed $al killed $eax
diff --git a/test/CodeGen/X86/vector-reduce-smax-widen.ll b/test/CodeGen/X86/vector-reduce-smax-widen.ll
index 5376d43..7be5175 100644
--- a/test/CodeGen/X86/vector-reduce-smax-widen.ll
+++ b/test/CodeGen/X86/vector-reduce-smax-widen.ll
@@ -36,17 +36,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    pxor %xmm0, %xmm3
-; SSE41-NEXT:    pxor %xmm2, %xmm0
-; SSE41-NEXT:    movdqa %xmm3, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    pxor %xmm3, %xmm4
+; SSE41-NEXT:    pxor %xmm2, %xmm3
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
 ; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
@@ -120,17 +119,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm2
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm1, %xmm0
-; SSE41-NEXT:    pxor %xmm3, %xmm0
-; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    movdqa %xmm1, %xmm4
 ; SSE41-NEXT:    pxor %xmm3, %xmm4
-; SSE41-NEXT:    movdqa %xmm4, %xmm5
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm0, %xmm5
+; SSE41-NEXT:    pxor %xmm3, %xmm5
+; SSE41-NEXT:    movdqa %xmm5, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm6, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
 ; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
@@ -141,10 +139,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
 ; SSE41-NEXT:    retq
@@ -270,17 +267,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm4
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm5, %xmm0
-; SSE41-NEXT:    movdqa %xmm4, %xmm6
+; SSE41-NEXT:    movdqa %xmm2, %xmm6
 ; SSE41-NEXT:    pxor %xmm5, %xmm6
-; SSE41-NEXT:    movdqa %xmm6, %xmm7
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm0, %xmm7
+; SSE41-NEXT:    pxor %xmm5, %xmm7
+; SSE41-NEXT:    movdqa %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm7
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm8, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -291,10 +287,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movapd %xmm3, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
@@ -304,10 +299,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -317,10 +311,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm5, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -513,10 +506,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm11
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm12, %xmm10
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm11[1,1,3,3]
-; SSE41-NEXT:    por %xmm10, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm12, %xmm0
+; SSE41-NEXT:    por %xmm11, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm5
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -526,22 +518,20 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm10
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm11, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm11, %xmm0
+; SSE41-NEXT:    por %xmm10, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm7
-; SSE41-NEXT:    movdqa %xmm4, %xmm0
-; SSE41-NEXT:    pxor %xmm9, %xmm0
-; SSE41-NEXT:    movdqa %xmm8, %xmm1
+; SSE41-NEXT:    movdqa %xmm4, %xmm1
 ; SSE41-NEXT:    pxor %xmm9, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm8, %xmm3
+; SSE41-NEXT:    pxor %xmm9, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm10, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
 ; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm4
 ; SSE41-NEXT:    movdqa %xmm6, %xmm0
@@ -552,10 +542,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm6
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -565,10 +554,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm6
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -578,10 +566,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm7
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -591,10 +578,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
@@ -604,10 +590,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm9, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm9[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -1135,31 +1120,28 @@
 ;
 ; SSE41-LABEL: test_v8i16:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_v8i16:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8i16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    retq
   %1 = call i16 @llvm.experimental.vector.reduce.smax.i16.v8i16(<8 x i16> %a0)
@@ -1184,11 +1166,10 @@
 ; SSE41-LABEL: test_v16i16:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmaxsw %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1196,11 +1177,10 @@
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1209,11 +1189,10 @@
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1222,11 +1201,10 @@
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1256,11 +1234,10 @@
 ; SSE41-NEXT:    pmaxsw %xmm3, %xmm1
 ; SSE41-NEXT:    pmaxsw %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxsw %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1271,11 +1248,10 @@
 ; AVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
 ; AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmaxsw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1285,11 +1261,10 @@
 ; AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1300,11 +1275,10 @@
 ; AVX512-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1342,11 +1316,10 @@
 ; SSE41-NEXT:    pmaxsw %xmm4, %xmm0
 ; SSE41-NEXT:    pmaxsw %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxsw %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1363,11 +1336,10 @@
 ; AVX1-NEXT:    vpmaxsw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmaxsw %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1379,11 +1351,10 @@
 ; AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1395,11 +1366,10 @@
 ; AVX512-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1612,38 +1582,35 @@
 ;
 ; SSE41-LABEL: test_v16i8:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $127, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_v16i8:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX-NEXT:    xorb $127, %al
 ; AVX-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16i8:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $127, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    retq
   %1 = call i8 @llvm.experimental.vector.reduce.smax.i8.v16i8(<16 x i8> %a0)
@@ -1691,14 +1658,13 @@
 ; SSE41-LABEL: test_v32i8:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmaxsb %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $127, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1706,13 +1672,12 @@
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    xorb $127, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1721,13 +1686,12 @@
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    xorb $127, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1736,13 +1700,12 @@
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $127, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1803,14 +1766,13 @@
 ; SSE41-NEXT:    pmaxsb %xmm3, %xmm1
 ; SSE41-NEXT:    pmaxsb %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxsb %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $127, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1821,13 +1783,12 @@
 ; AVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
 ; AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmaxsb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    xorb $127, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1837,13 +1798,12 @@
 ; AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    xorb $127, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1854,13 +1814,12 @@
 ; AVX512-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $127, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1945,14 +1904,13 @@
 ; SSE41-NEXT:    pmaxsb %xmm4, %xmm0
 ; SSE41-NEXT:    pmaxsb %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxsb %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $127, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1969,13 +1927,12 @@
 ; AVX1-NEXT:    vpmaxsb %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmaxsb %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    xorb $127, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1987,13 +1944,12 @@
 ; AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    xorb $127, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2005,13 +1961,12 @@
 ; AVX512-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $127, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
diff --git a/test/CodeGen/X86/vector-reduce-smax.ll b/test/CodeGen/X86/vector-reduce-smax.ll
index b77134d..162af26 100644
--- a/test/CodeGen/X86/vector-reduce-smax.ll
+++ b/test/CodeGen/X86/vector-reduce-smax.ll
@@ -36,17 +36,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    pxor %xmm0, %xmm3
-; SSE41-NEXT:    pxor %xmm2, %xmm0
-; SSE41-NEXT:    movdqa %xmm3, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    pxor %xmm3, %xmm4
+; SSE41-NEXT:    pxor %xmm2, %xmm3
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
 ; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
@@ -120,17 +119,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm2
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm1, %xmm0
-; SSE41-NEXT:    pxor %xmm3, %xmm0
-; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    movdqa %xmm1, %xmm4
 ; SSE41-NEXT:    pxor %xmm3, %xmm4
-; SSE41-NEXT:    movdqa %xmm4, %xmm5
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm0, %xmm5
+; SSE41-NEXT:    pxor %xmm3, %xmm5
+; SSE41-NEXT:    movdqa %xmm5, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm6, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
 ; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
@@ -141,10 +139,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
 ; SSE41-NEXT:    retq
@@ -270,17 +267,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm4
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm5, %xmm0
-; SSE41-NEXT:    movdqa %xmm4, %xmm6
+; SSE41-NEXT:    movdqa %xmm2, %xmm6
 ; SSE41-NEXT:    pxor %xmm5, %xmm6
-; SSE41-NEXT:    movdqa %xmm6, %xmm7
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm0, %xmm7
+; SSE41-NEXT:    pxor %xmm5, %xmm7
+; SSE41-NEXT:    movdqa %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm7
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm8, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -291,10 +287,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movapd %xmm3, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
@@ -304,10 +299,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -317,10 +311,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm5, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -513,10 +506,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm11
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm12, %xmm10
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm11[1,1,3,3]
-; SSE41-NEXT:    por %xmm10, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm12, %xmm0
+; SSE41-NEXT:    por %xmm11, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm5
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -526,22 +518,20 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm10
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm11, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm11, %xmm0
+; SSE41-NEXT:    por %xmm10, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm7
-; SSE41-NEXT:    movdqa %xmm4, %xmm0
-; SSE41-NEXT:    pxor %xmm9, %xmm0
-; SSE41-NEXT:    movdqa %xmm8, %xmm1
+; SSE41-NEXT:    movdqa %xmm4, %xmm1
 ; SSE41-NEXT:    pxor %xmm9, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm8, %xmm3
+; SSE41-NEXT:    pxor %xmm9, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm10, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
 ; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm4
 ; SSE41-NEXT:    movdqa %xmm6, %xmm0
@@ -552,10 +542,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm6
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -565,10 +554,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm6
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -578,10 +566,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm7
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -591,10 +578,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
@@ -604,10 +590,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm9, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm9[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -724,17 +709,15 @@
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3]
 ; SSE41-NEXT:    psrad $31, %xmm3
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm3 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm3, %xmm2
-; SSE41-NEXT:    pxor %xmm0, %xmm2
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm2, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm2, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    pxor %xmm2, %xmm0
+; SSE41-NEXT:    pxor %xmm1, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pand %xmm4, %xmm0
 ; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movd %xmm3, %eax
@@ -1187,12 +1170,10 @@
 ; SSE41-NEXT:    pxor %xmm0, %xmm2
 ; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movdqa %xmm2, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm2, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pand %xmm4, %xmm0
 ; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
 ; SSE41-NEXT:    movd %xmm1, %eax
@@ -1357,31 +1338,28 @@
 ;
 ; SSE41-LABEL: test_v8i16:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_v8i16:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8i16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    retq
   %1 = call i16 @llvm.experimental.vector.reduce.smax.i16.v8i16(<8 x i16> %a0)
@@ -1406,11 +1384,10 @@
 ; SSE41-LABEL: test_v16i16:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmaxsw %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1418,11 +1395,10 @@
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1431,11 +1407,10 @@
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1444,11 +1419,10 @@
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1478,11 +1452,10 @@
 ; SSE41-NEXT:    pmaxsw %xmm3, %xmm1
 ; SSE41-NEXT:    pmaxsw %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxsw %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1493,11 +1466,10 @@
 ; AVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
 ; AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmaxsw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1507,11 +1479,10 @@
 ; AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1522,11 +1493,10 @@
 ; AVX512-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1564,11 +1534,10 @@
 ; SSE41-NEXT:    pmaxsw %xmm4, %xmm0
 ; SSE41-NEXT:    pmaxsw %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxsw %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1585,11 +1554,10 @@
 ; AVX1-NEXT:    vpmaxsw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmaxsw %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1601,11 +1569,10 @@
 ; AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1617,11 +1584,10 @@
 ; AVX512-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1690,12 +1656,10 @@
 ; SSE41-NEXT:    pxor %xmm0, %xmm2
 ; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movdqa %xmm2, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm2, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pand %xmm4, %xmm0
 ; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
 ; SSE41-NEXT:    pextrb $0, %xmm1, %eax
@@ -1965,38 +1929,35 @@
 ;
 ; SSE41-LABEL: test_v16i8:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $127, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_v16i8:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX-NEXT:    xorb $127, %al
 ; AVX-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16i8:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $127, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    retq
   %1 = call i8 @llvm.experimental.vector.reduce.smax.i8.v16i8(<16 x i8> %a0)
@@ -2044,14 +2005,13 @@
 ; SSE41-LABEL: test_v32i8:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmaxsb %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $127, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -2059,13 +2019,12 @@
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    xorb $127, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -2074,13 +2033,12 @@
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    xorb $127, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2089,13 +2047,12 @@
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $127, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -2156,14 +2113,13 @@
 ; SSE41-NEXT:    pmaxsb %xmm3, %xmm1
 ; SSE41-NEXT:    pmaxsb %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxsb %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $127, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -2174,13 +2130,12 @@
 ; AVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
 ; AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmaxsb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    xorb $127, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -2190,13 +2145,12 @@
 ; AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    xorb $127, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2207,13 +2161,12 @@
 ; AVX512-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $127, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -2298,14 +2251,13 @@
 ; SSE41-NEXT:    pmaxsb %xmm4, %xmm0
 ; SSE41-NEXT:    pmaxsb %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxsb %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $127, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -2322,13 +2274,12 @@
 ; AVX1-NEXT:    vpmaxsb %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmaxsb %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    xorb $127, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -2340,13 +2291,12 @@
 ; AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    xorb $127, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2358,13 +2308,12 @@
 ; AVX512-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $127, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
diff --git a/test/CodeGen/X86/vector-reduce-smin-widen.ll b/test/CodeGen/X86/vector-reduce-smin-widen.ll
index 86a9ce7..dcc522a 100644
--- a/test/CodeGen/X86/vector-reduce-smin-widen.ll
+++ b/test/CodeGen/X86/vector-reduce-smin-widen.ll
@@ -44,10 +44,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
 ; SSE41-NEXT:    retq
@@ -127,10 +126,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -140,10 +138,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
 ; SSE41-NEXT:    retq
@@ -277,10 +274,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm0
@@ -290,10 +286,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm2
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
@@ -303,10 +298,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -316,10 +310,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -512,10 +505,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm11
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm12, %xmm10
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm11[1,1,3,3]
-; SSE41-NEXT:    por %xmm10, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm12, %xmm0
+; SSE41-NEXT:    por %xmm11, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm6
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -525,10 +517,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm10
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT:    pand %xmm11, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT:    pand %xmm11, %xmm0
+; SSE41-NEXT:    por %xmm10, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm4
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -538,10 +529,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm8
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm8[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm8[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT:    pand %xmm10, %xmm0
+; SSE41-NEXT:    por %xmm8, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm7
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -551,10 +541,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm5
 ; SSE41-NEXT:    movapd %xmm5, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -564,10 +553,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm7
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -577,10 +565,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm6
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -590,10 +577,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
@@ -603,10 +589,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm9[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -1134,31 +1119,28 @@
 ;
 ; SSE41-LABEL: test_v8i16:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_v8i16:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8i16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    retq
   %1 = call i16 @llvm.experimental.vector.reduce.smin.i16.v8i16(<8 x i16> %a0)
@@ -1183,11 +1165,10 @@
 ; SSE41-LABEL: test_v16i16:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pminsw %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1195,11 +1176,10 @@
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1208,11 +1188,10 @@
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1221,11 +1200,10 @@
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1255,11 +1233,10 @@
 ; SSE41-NEXT:    pminsw %xmm3, %xmm1
 ; SSE41-NEXT:    pminsw %xmm2, %xmm0
 ; SSE41-NEXT:    pminsw %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1270,11 +1247,10 @@
 ; AVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm2
 ; AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpminsw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1284,11 +1260,10 @@
 ; AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1299,11 +1274,10 @@
 ; AVX512-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1341,11 +1315,10 @@
 ; SSE41-NEXT:    pminsw %xmm4, %xmm0
 ; SSE41-NEXT:    pminsw %xmm2, %xmm0
 ; SSE41-NEXT:    pminsw %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1362,11 +1335,10 @@
 ; AVX1-NEXT:    vpminsw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpminsw %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1378,11 +1350,10 @@
 ; AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1394,11 +1365,10 @@
 ; AVX512-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1611,38 +1581,35 @@
 ;
 ; SSE41-LABEL: test_v16i8:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $-128, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_v16i8:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX-NEXT:    xorb $-128, %al
 ; AVX-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16i8:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $-128, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    retq
   %1 = call i8 @llvm.experimental.vector.reduce.smin.i8.v16i8(<16 x i8> %a0)
@@ -1690,14 +1657,13 @@
 ; SSE41-LABEL: test_v32i8:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pminsb %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $-128, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1705,13 +1671,12 @@
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    xorb $-128, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1720,13 +1685,12 @@
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    xorb $-128, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1735,13 +1699,12 @@
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $-128, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1802,14 +1765,13 @@
 ; SSE41-NEXT:    pminsb %xmm3, %xmm1
 ; SSE41-NEXT:    pminsb %xmm2, %xmm0
 ; SSE41-NEXT:    pminsb %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $-128, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1820,13 +1782,12 @@
 ; AVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm2
 ; AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpminsb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    xorb $-128, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1836,13 +1797,12 @@
 ; AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    xorb $-128, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1853,13 +1813,12 @@
 ; AVX512-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $-128, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1944,14 +1903,13 @@
 ; SSE41-NEXT:    pminsb %xmm4, %xmm0
 ; SSE41-NEXT:    pminsb %xmm2, %xmm0
 ; SSE41-NEXT:    pminsb %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $-128, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1968,13 +1926,12 @@
 ; AVX1-NEXT:    vpminsb %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpminsb %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    xorb $-128, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1986,13 +1943,12 @@
 ; AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    xorb $-128, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2004,13 +1960,12 @@
 ; AVX512-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $-128, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
diff --git a/test/CodeGen/X86/vector-reduce-smin.ll b/test/CodeGen/X86/vector-reduce-smin.ll
index b4de8f8..b27812e 100644
--- a/test/CodeGen/X86/vector-reduce-smin.ll
+++ b/test/CodeGen/X86/vector-reduce-smin.ll
@@ -44,10 +44,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
 ; SSE41-NEXT:    retq
@@ -127,10 +126,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -140,10 +138,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
 ; SSE41-NEXT:    retq
@@ -277,10 +274,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm0
@@ -290,10 +286,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm2
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
@@ -303,10 +298,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -316,10 +310,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -512,10 +505,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm11
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm12, %xmm10
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm11[1,1,3,3]
-; SSE41-NEXT:    por %xmm10, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm12, %xmm0
+; SSE41-NEXT:    por %xmm11, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm6
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -525,10 +517,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm10
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT:    pand %xmm11, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT:    pand %xmm11, %xmm0
+; SSE41-NEXT:    por %xmm10, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm4
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -538,10 +529,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm8
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm8[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm8[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT:    pand %xmm10, %xmm0
+; SSE41-NEXT:    por %xmm8, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm7
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -551,10 +541,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm5
 ; SSE41-NEXT:    movapd %xmm5, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -564,10 +553,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm7
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -577,10 +565,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm6
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -590,10 +577,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
@@ -603,10 +589,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm9[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -723,17 +708,15 @@
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3]
 ; SSE41-NEXT:    psrad $31, %xmm3
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm3 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm1, %xmm2
-; SSE41-NEXT:    pxor %xmm0, %xmm2
-; SSE41-NEXT:    pxor %xmm3, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm2, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm2, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm2, %xmm0
+; SSE41-NEXT:    pxor %xmm3, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pand %xmm4, %xmm0
 ; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movd %xmm3, %eax
@@ -1181,17 +1164,15 @@
 ; SSE41-NEXT:    psrad $16, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm3, %xmm2
-; SSE41-NEXT:    pxor %xmm0, %xmm2
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm2, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm2, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    pxor %xmm2, %xmm0
+; SSE41-NEXT:    pxor %xmm1, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pand %xmm4, %xmm0
 ; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
 ; SSE41-NEXT:    movd %xmm1, %eax
@@ -1356,31 +1337,28 @@
 ;
 ; SSE41-LABEL: test_v8i16:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_v8i16:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8i16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    retq
   %1 = call i16 @llvm.experimental.vector.reduce.smin.i16.v8i16(<8 x i16> %a0)
@@ -1405,11 +1383,10 @@
 ; SSE41-LABEL: test_v16i16:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pminsw %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1417,11 +1394,10 @@
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1430,11 +1406,10 @@
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1443,11 +1418,10 @@
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1477,11 +1451,10 @@
 ; SSE41-NEXT:    pminsw %xmm3, %xmm1
 ; SSE41-NEXT:    pminsw %xmm2, %xmm0
 ; SSE41-NEXT:    pminsw %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1492,11 +1465,10 @@
 ; AVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm2
 ; AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpminsw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1506,11 +1478,10 @@
 ; AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1521,11 +1492,10 @@
 ; AVX512-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1563,11 +1533,10 @@
 ; SSE41-NEXT:    pminsw %xmm4, %xmm0
 ; SSE41-NEXT:    pminsw %xmm2, %xmm0
 ; SSE41-NEXT:    pminsw %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1584,11 +1553,10 @@
 ; AVX1-NEXT:    vpminsw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpminsw %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1600,11 +1568,10 @@
 ; AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1616,11 +1583,10 @@
 ; AVX512-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -1684,17 +1650,15 @@
 ; SSE41-NEXT:    psrad $24, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm3, %xmm2
-; SSE41-NEXT:    pxor %xmm0, %xmm2
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm2, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm2, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    pxor %xmm2, %xmm0
+; SSE41-NEXT:    pxor %xmm1, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pand %xmm4, %xmm0
 ; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
 ; SSE41-NEXT:    pextrb $0, %xmm1, %eax
@@ -1964,38 +1928,35 @@
 ;
 ; SSE41-LABEL: test_v16i8:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $-128, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_v16i8:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX-NEXT:    xorb $-128, %al
 ; AVX-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16i8:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $-128, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    retq
   %1 = call i8 @llvm.experimental.vector.reduce.smin.i8.v16i8(<16 x i8> %a0)
@@ -2043,14 +2004,13 @@
 ; SSE41-LABEL: test_v32i8:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pminsb %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $-128, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -2058,13 +2018,12 @@
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    xorb $-128, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -2073,13 +2032,12 @@
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    xorb $-128, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2088,13 +2046,12 @@
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $-128, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -2155,14 +2112,13 @@
 ; SSE41-NEXT:    pminsb %xmm3, %xmm1
 ; SSE41-NEXT:    pminsb %xmm2, %xmm0
 ; SSE41-NEXT:    pminsb %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $-128, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -2173,13 +2129,12 @@
 ; AVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm2
 ; AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpminsb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    xorb $-128, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -2189,13 +2144,12 @@
 ; AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    xorb $-128, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2206,13 +2160,12 @@
 ; AVX512-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $-128, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -2297,14 +2250,13 @@
 ; SSE41-NEXT:    pminsb %xmm4, %xmm0
 ; SSE41-NEXT:    pminsb %xmm2, %xmm0
 ; SSE41-NEXT:    pminsb %xmm1, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    pminub %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    xorb $-128, %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -2321,13 +2273,12 @@
 ; AVX1-NEXT:    vpminsb %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpminsb %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    xorb $-128, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -2339,13 +2290,12 @@
 ; AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    xorb $-128, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2357,13 +2307,12 @@
 ; AVX512-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512-NEXT:    xorb $-128, %al
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
diff --git a/test/CodeGen/X86/vector-reduce-umax-widen.ll b/test/CodeGen/X86/vector-reduce-umax-widen.ll
index 8407da01..46c9599 100644
--- a/test/CodeGen/X86/vector-reduce-umax-widen.ll
+++ b/test/CodeGen/X86/vector-reduce-umax-widen.ll
@@ -36,17 +36,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    pxor %xmm0, %xmm3
-; SSE41-NEXT:    pxor %xmm2, %xmm0
-; SSE41-NEXT:    movdqa %xmm3, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    pxor %xmm3, %xmm4
+; SSE41-NEXT:    pxor %xmm2, %xmm3
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
 ; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
@@ -123,17 +122,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm2
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT:    movdqa %xmm1, %xmm0
-; SSE41-NEXT:    pxor %xmm3, %xmm0
-; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    movdqa %xmm1, %xmm4
 ; SSE41-NEXT:    pxor %xmm3, %xmm4
-; SSE41-NEXT:    movdqa %xmm4, %xmm5
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm0, %xmm5
+; SSE41-NEXT:    pxor %xmm3, %xmm5
+; SSE41-NEXT:    movdqa %xmm5, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm6, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
 ; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
@@ -144,10 +142,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
 ; SSE41-NEXT:    retq
@@ -283,17 +280,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm4
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm5, %xmm0
-; SSE41-NEXT:    movdqa %xmm4, %xmm6
+; SSE41-NEXT:    movdqa %xmm2, %xmm6
 ; SSE41-NEXT:    pxor %xmm5, %xmm6
-; SSE41-NEXT:    movdqa %xmm6, %xmm7
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm0, %xmm7
+; SSE41-NEXT:    pxor %xmm5, %xmm7
+; SSE41-NEXT:    movdqa %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm7
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm8, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -304,10 +300,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movapd %xmm3, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
@@ -317,10 +312,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -330,10 +324,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm5, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -542,10 +535,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm11
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm12, %xmm10
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm11[1,1,3,3]
-; SSE41-NEXT:    por %xmm10, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm12, %xmm0
+; SSE41-NEXT:    por %xmm11, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm5
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -555,22 +547,20 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm10
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm11, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm11, %xmm0
+; SSE41-NEXT:    por %xmm10, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm7
-; SSE41-NEXT:    movdqa %xmm4, %xmm0
-; SSE41-NEXT:    pxor %xmm9, %xmm0
-; SSE41-NEXT:    movdqa %xmm8, %xmm1
+; SSE41-NEXT:    movdqa %xmm4, %xmm1
 ; SSE41-NEXT:    pxor %xmm9, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm8, %xmm3
+; SSE41-NEXT:    pxor %xmm9, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm10, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
 ; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm4
 ; SSE41-NEXT:    movdqa %xmm6, %xmm0
@@ -581,10 +571,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm6
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -594,10 +583,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm6
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -607,10 +595,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm7
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -620,10 +607,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
@@ -633,10 +619,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm9, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm9[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -1181,8 +1166,8 @@
 ; SSE2-NEXT:    pxor %xmm2, %xmm0
 ; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    pmaxsw %xmm0, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1227,8 +1212,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm0
 ; SSE2-NEXT:    pxor %xmm2, %xmm0
 ; SSE2-NEXT:    pmaxsw %xmm1, %xmm0
-; SSE2-NEXT:    pxor %xmm2, %xmm0
 ; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1284,18 +1269,18 @@
 ; SSE2-NEXT:    psrld $16, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    pmaxsw %xmm0, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test_v8i16:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    notl %eax
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1304,8 +1289,8 @@
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    notl %eax
 ; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-NEXT:    retq
 ;
@@ -1314,8 +1299,8 @@
 ; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
 ; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovd %xmm0, %eax
+; AVX512BW-NEXT:    notl %eax
 ; AVX512BW-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -1324,8 +1309,8 @@
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vmovd %xmm0, %eax
+; AVX512VL-NEXT:    notl %eax
 ; AVX512VL-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512VL-NEXT:    retq
   %1 = call i16 @llvm.experimental.vector.reduce.umax.i16.v8i16(<8 x i16> %a0)
@@ -1354,8 +1339,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    pmaxsw %xmm0, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1363,10 +1348,10 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmaxuw %xmm1, %xmm0
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    notl %eax
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1377,8 +1362,8 @@
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    notl %eax
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1390,8 +1375,8 @@
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    notl %eax
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1402,8 +1387,8 @@
 ; AVX512BW-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovd %xmm0, %eax
+; AVX512BW-NEXT:    notl %eax
 ; AVX512BW-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -1414,8 +1399,8 @@
 ; AVX512VL-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vmovd %xmm0, %eax
+; AVX512VL-NEXT:    notl %eax
 ; AVX512VL-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
@@ -1449,8 +1434,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm1
 ; SSE2-NEXT:    pxor %xmm4, %xmm1
 ; SSE2-NEXT:    pmaxsw %xmm0, %xmm1
-; SSE2-NEXT:    pxor %xmm4, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1460,10 +1445,10 @@
 ; SSE41-NEXT:    pmaxuw %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxuw %xmm1, %xmm0
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    notl %eax
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1477,8 +1462,8 @@
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    notl %eax
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1491,8 +1476,8 @@
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    notl %eax
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1505,8 +1490,8 @@
 ; AVX512BW-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovd %xmm0, %eax
+; AVX512BW-NEXT:    notl %eax
 ; AVX512BW-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -1519,8 +1504,8 @@
 ; AVX512VL-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vmovd %xmm0, %eax
+; AVX512VL-NEXT:    notl %eax
 ; AVX512VL-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
@@ -1562,8 +1547,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm0
 ; SSE2-NEXT:    pxor %xmm8, %xmm0
 ; SSE2-NEXT:    pmaxsw %xmm1, %xmm0
-; SSE2-NEXT:    pxor %xmm8, %xmm0
 ; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1577,10 +1562,10 @@
 ; SSE41-NEXT:    pmaxuw %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxuw %xmm1, %xmm0
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    notl %eax
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1600,8 +1585,8 @@
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    notl %eax
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1616,8 +1601,8 @@
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    notl %eax
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1631,8 +1616,8 @@
 ; AVX512BW-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovd %xmm0, %eax
+; AVX512BW-NEXT:    notl %eax
 ; AVX512BW-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -1646,8 +1631,8 @@
 ; AVX512VL-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vmovd %xmm0, %eax
+; AVX512VL-NEXT:    notl %eax
 ; AVX512VL-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
@@ -1821,13 +1806,13 @@
 ; SSE41-LABEL: test_v16i8:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    psrlw $8, %xmm0
+; SSE41-NEXT:    pminub %xmm1, %xmm0
+; SSE41-NEXT:    phminposuw %xmm0, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    notb %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1835,11 +1820,11 @@
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX-NEXT:    notb %al
 ; AVX-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-NEXT:    retq
 ;
@@ -1850,8 +1835,8 @@
 ; AVX512BW-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512BW-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512BW-NEXT:    notb %al
 ; AVX512BW-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -1862,8 +1847,8 @@
 ; AVX512VL-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512VL-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512VL-NEXT:    notb %al
 ; AVX512VL-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512VL-NEXT:    retq
   %1 = call i8 @llvm.experimental.vector.reduce.umax.i8.v16i8(<16 x i8> %a0)
@@ -1892,13 +1877,13 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmaxub %xmm1, %xmm0
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    psrlw $8, %xmm0
+; SSE41-NEXT:    pminub %xmm1, %xmm0
+; SSE41-NEXT:    phminposuw %xmm0, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    notb %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1908,11 +1893,11 @@
 ; AVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    notb %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1923,11 +1908,11 @@
 ; AVX2-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    notb %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1940,8 +1925,8 @@
 ; AVX512BW-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512BW-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512BW-NEXT:    notb %al
 ; AVX512BW-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -1954,8 +1939,8 @@
 ; AVX512VL-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512VL-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512VL-NEXT:    notb %al
 ; AVX512VL-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
@@ -1989,13 +1974,13 @@
 ; SSE41-NEXT:    pmaxub %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxub %xmm1, %xmm0
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    psrlw $8, %xmm0
+; SSE41-NEXT:    pminub %xmm1, %xmm0
+; SSE41-NEXT:    phminposuw %xmm0, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    notb %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -2008,11 +1993,11 @@
 ; AVX1-NEXT:    vpmaxub %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    notb %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -2024,11 +2009,11 @@
 ; AVX2-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    notb %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2043,8 +2028,8 @@
 ; AVX512BW-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512BW-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512BW-NEXT:    notb %al
 ; AVX512BW-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -2059,8 +2044,8 @@
 ; AVX512VL-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512VL-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512VL-NEXT:    notb %al
 ; AVX512VL-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
@@ -2102,13 +2087,13 @@
 ; SSE41-NEXT:    pmaxub %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxub %xmm1, %xmm0
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    psrlw $8, %xmm0
+; SSE41-NEXT:    pminub %xmm1, %xmm0
+; SSE41-NEXT:    phminposuw %xmm0, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    notb %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -2127,11 +2112,11 @@
 ; AVX1-NEXT:    vpmaxub %xmm4, %xmm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    notb %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -2145,11 +2130,11 @@
 ; AVX2-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    notb %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2165,8 +2150,8 @@
 ; AVX512BW-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512BW-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512BW-NEXT:    notb %al
 ; AVX512BW-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -2182,8 +2167,8 @@
 ; AVX512VL-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512VL-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512VL-NEXT:    notb %al
 ; AVX512VL-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
diff --git a/test/CodeGen/X86/vector-reduce-umax.ll b/test/CodeGen/X86/vector-reduce-umax.ll
index 954911d..32ea3f5 100644
--- a/test/CodeGen/X86/vector-reduce-umax.ll
+++ b/test/CodeGen/X86/vector-reduce-umax.ll
@@ -36,17 +36,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    pxor %xmm0, %xmm3
-; SSE41-NEXT:    pxor %xmm2, %xmm0
-; SSE41-NEXT:    movdqa %xmm3, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    pxor %xmm3, %xmm4
+; SSE41-NEXT:    pxor %xmm2, %xmm3
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
 ; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
@@ -123,17 +122,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm2
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT:    movdqa %xmm1, %xmm0
-; SSE41-NEXT:    pxor %xmm3, %xmm0
-; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    movdqa %xmm1, %xmm4
 ; SSE41-NEXT:    pxor %xmm3, %xmm4
-; SSE41-NEXT:    movdqa %xmm4, %xmm5
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm0, %xmm5
+; SSE41-NEXT:    pxor %xmm3, %xmm5
+; SSE41-NEXT:    movdqa %xmm5, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm6, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
 ; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
@@ -144,10 +142,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
 ; SSE41-NEXT:    retq
@@ -283,17 +280,16 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm4
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm5, %xmm0
-; SSE41-NEXT:    movdqa %xmm4, %xmm6
+; SSE41-NEXT:    movdqa %xmm2, %xmm6
 ; SSE41-NEXT:    pxor %xmm5, %xmm6
-; SSE41-NEXT:    movdqa %xmm6, %xmm7
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm0, %xmm7
+; SSE41-NEXT:    pxor %xmm5, %xmm7
+; SSE41-NEXT:    movdqa %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm7
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm8, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
 ; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -304,10 +300,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movapd %xmm3, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
@@ -317,10 +312,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -330,10 +324,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm5, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -542,10 +535,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm11
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm12, %xmm10
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm11[1,1,3,3]
-; SSE41-NEXT:    por %xmm10, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm12, %xmm0
+; SSE41-NEXT:    por %xmm11, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm5
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -555,22 +547,20 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm10
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm11, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm11, %xmm0
+; SSE41-NEXT:    por %xmm10, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm7
-; SSE41-NEXT:    movdqa %xmm4, %xmm0
-; SSE41-NEXT:    pxor %xmm9, %xmm0
-; SSE41-NEXT:    movdqa %xmm8, %xmm1
+; SSE41-NEXT:    movdqa %xmm4, %xmm1
 ; SSE41-NEXT:    pxor %xmm9, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT:    movdqa %xmm8, %xmm3
+; SSE41-NEXT:    pxor %xmm9, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
 ; SSE41-NEXT:    pand %xmm10, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
 ; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm4
 ; SSE41-NEXT:    movdqa %xmm6, %xmm0
@@ -581,10 +571,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm6
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -594,10 +583,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm6
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -607,10 +595,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm7
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -620,10 +607,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
@@ -633,10 +619,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm9, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm9[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -775,12 +760,10 @@
 ; SSE41-NEXT:    pxor %xmm0, %xmm3
 ; SSE41-NEXT:    pxor %xmm2, %xmm0
 ; SSE41-NEXT:    movdqa %xmm3, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pand %xmm4, %xmm0
 ; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movd %xmm2, %eax
@@ -1264,12 +1247,10 @@
 ; SSE41-NEXT:    por %xmm0, %xmm3
 ; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movdqa %xmm3, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pand %xmm4, %xmm0
 ; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movd %xmm2, %eax
@@ -1406,18 +1387,18 @@
 ; SSE2-NEXT:    psrld $16, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    pmaxsw %xmm0, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test_v8i16:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    notl %eax
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1426,8 +1407,8 @@
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    notl %eax
 ; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-NEXT:    retq
 ;
@@ -1436,8 +1417,8 @@
 ; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
 ; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovd %xmm0, %eax
+; AVX512BW-NEXT:    notl %eax
 ; AVX512BW-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -1446,8 +1427,8 @@
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vmovd %xmm0, %eax
+; AVX512VL-NEXT:    notl %eax
 ; AVX512VL-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512VL-NEXT:    retq
   %1 = call i16 @llvm.experimental.vector.reduce.umax.i16.v8i16(<8 x i16> %a0)
@@ -1476,8 +1457,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    pmaxsw %xmm0, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1485,10 +1466,10 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmaxuw %xmm1, %xmm0
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    notl %eax
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1499,8 +1480,8 @@
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    notl %eax
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1512,8 +1493,8 @@
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    notl %eax
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1524,8 +1505,8 @@
 ; AVX512BW-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovd %xmm0, %eax
+; AVX512BW-NEXT:    notl %eax
 ; AVX512BW-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -1536,8 +1517,8 @@
 ; AVX512VL-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vmovd %xmm0, %eax
+; AVX512VL-NEXT:    notl %eax
 ; AVX512VL-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
@@ -1571,8 +1552,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm1
 ; SSE2-NEXT:    pxor %xmm4, %xmm1
 ; SSE2-NEXT:    pmaxsw %xmm0, %xmm1
-; SSE2-NEXT:    pxor %xmm4, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1582,10 +1563,10 @@
 ; SSE41-NEXT:    pmaxuw %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxuw %xmm1, %xmm0
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    notl %eax
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1599,8 +1580,8 @@
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    notl %eax
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1613,8 +1594,8 @@
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    notl %eax
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1627,8 +1608,8 @@
 ; AVX512BW-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovd %xmm0, %eax
+; AVX512BW-NEXT:    notl %eax
 ; AVX512BW-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -1641,8 +1622,8 @@
 ; AVX512VL-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vmovd %xmm0, %eax
+; AVX512VL-NEXT:    notl %eax
 ; AVX512VL-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
@@ -1684,8 +1665,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm0
 ; SSE2-NEXT:    pxor %xmm8, %xmm0
 ; SSE2-NEXT:    pmaxsw %xmm1, %xmm0
-; SSE2-NEXT:    pxor %xmm8, %xmm0
 ; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1699,10 +1680,10 @@
 ; SSE41-NEXT:    pmaxuw %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxuw %xmm1, %xmm0
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    phminposuw %xmm0, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    phminposuw %xmm1, %xmm0
 ; SSE41-NEXT:    movd %xmm0, %eax
+; SSE41-NEXT:    notl %eax
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -1722,8 +1703,8 @@
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    notl %eax
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1738,8 +1719,8 @@
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    notl %eax
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1753,8 +1734,8 @@
 ; AVX512BW-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovd %xmm0, %eax
+; AVX512BW-NEXT:    notl %eax
 ; AVX512BW-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -1768,8 +1749,8 @@
 ; AVX512VL-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vmovd %xmm0, %eax
+; AVX512VL-NEXT:    notl %eax
 ; AVX512VL-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
@@ -1815,18 +1796,16 @@
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    por %xmm0, %xmm3
-; SSE41-NEXT:    por %xmm2, %xmm0
-; SSE41-NEXT:    movdqa %xmm3, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa %xmm1, %xmm4
+; SSE41-NEXT:    por %xmm3, %xmm4
+; SSE41-NEXT:    por %xmm2, %xmm3
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
 ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    pextrb $0, %xmm2, %eax
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
@@ -2070,13 +2049,13 @@
 ; SSE41-LABEL: test_v16i8:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    psrlw $8, %xmm0
+; SSE41-NEXT:    pminub %xmm1, %xmm0
+; SSE41-NEXT:    phminposuw %xmm0, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    notb %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -2084,11 +2063,11 @@
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX-NEXT:    notb %al
 ; AVX-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-NEXT:    retq
 ;
@@ -2099,8 +2078,8 @@
 ; AVX512BW-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512BW-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512BW-NEXT:    notb %al
 ; AVX512BW-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -2111,8 +2090,8 @@
 ; AVX512VL-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512VL-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512VL-NEXT:    notb %al
 ; AVX512VL-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512VL-NEXT:    retq
   %1 = call i8 @llvm.experimental.vector.reduce.umax.i8.v16i8(<16 x i8> %a0)
@@ -2141,13 +2120,13 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmaxub %xmm1, %xmm0
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    psrlw $8, %xmm0
+; SSE41-NEXT:    pminub %xmm1, %xmm0
+; SSE41-NEXT:    phminposuw %xmm0, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    notb %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -2157,11 +2136,11 @@
 ; AVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    notb %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -2172,11 +2151,11 @@
 ; AVX2-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    notb %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2189,8 +2168,8 @@
 ; AVX512BW-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512BW-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512BW-NEXT:    notb %al
 ; AVX512BW-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -2203,8 +2182,8 @@
 ; AVX512VL-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512VL-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512VL-NEXT:    notb %al
 ; AVX512VL-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
@@ -2238,13 +2217,13 @@
 ; SSE41-NEXT:    pmaxub %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxub %xmm1, %xmm0
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    psrlw $8, %xmm0
+; SSE41-NEXT:    pminub %xmm1, %xmm0
+; SSE41-NEXT:    phminposuw %xmm0, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    notb %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -2257,11 +2236,11 @@
 ; AVX1-NEXT:    vpmaxub %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    notb %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -2273,11 +2252,11 @@
 ; AVX2-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    notb %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2292,8 +2271,8 @@
 ; AVX512BW-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512BW-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512BW-NEXT:    notb %al
 ; AVX512BW-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -2308,8 +2287,8 @@
 ; AVX512VL-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512VL-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512VL-NEXT:    notb %al
 ; AVX512VL-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
@@ -2351,13 +2330,13 @@
 ; SSE41-NEXT:    pmaxub %xmm2, %xmm0
 ; SSE41-NEXT:    pmaxub %xmm1, %xmm0
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pxor %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    psrlw $8, %xmm2
-; SSE41-NEXT:    pminub %xmm0, %xmm2
-; SSE41-NEXT:    phminposuw %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm0, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    psrlw $8, %xmm0
+; SSE41-NEXT:    pminub %xmm1, %xmm0
+; SSE41-NEXT:    phminposuw %xmm0, %xmm0
 ; SSE41-NEXT:    pextrb $0, %xmm0, %eax
+; SSE41-NEXT:    notb %al
 ; SSE41-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE41-NEXT:    retq
 ;
@@ -2376,11 +2355,11 @@
 ; AVX1-NEXT:    vpmaxub %xmm4, %xmm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX1-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    notb %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -2394,11 +2373,11 @@
 ; AVX2-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm2
-; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    notb %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2414,8 +2393,8 @@
 ; AVX512BW-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512BW-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512BW-NEXT:    notb %al
 ; AVX512BW-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -2431,8 +2410,8 @@
 ; AVX512VL-NEXT:    vpsrlw $8, %xmm0, %xmm1
 ; AVX512VL-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX512VL-NEXT:    notb %al
 ; AVX512VL-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
diff --git a/test/CodeGen/X86/vector-reduce-umin-widen.ll b/test/CodeGen/X86/vector-reduce-umin-widen.ll
index 3424328..9fac3bb 100644
--- a/test/CodeGen/X86/vector-reduce-umin-widen.ll
+++ b/test/CodeGen/X86/vector-reduce-umin-widen.ll
@@ -44,10 +44,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
 ; SSE41-NEXT:    retq
@@ -130,10 +129,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -143,10 +141,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
 ; SSE41-NEXT:    retq
@@ -290,10 +287,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm0
@@ -303,10 +299,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm2
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
@@ -316,10 +311,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -329,10 +323,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -541,10 +534,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm11
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm12, %xmm10
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm11[1,1,3,3]
-; SSE41-NEXT:    por %xmm10, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm12, %xmm0
+; SSE41-NEXT:    por %xmm11, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm6
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -554,10 +546,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm10
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT:    pand %xmm11, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT:    pand %xmm11, %xmm0
+; SSE41-NEXT:    por %xmm10, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm4
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -567,10 +558,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm8
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm8[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm8[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT:    pand %xmm10, %xmm0
+; SSE41-NEXT:    por %xmm8, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm7
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -580,10 +570,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm5
 ; SSE41-NEXT:    movapd %xmm5, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -593,10 +582,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm7
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -606,10 +594,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm6
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -619,10 +606,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
@@ -632,10 +618,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm9[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -1180,8 +1165,8 @@
 ; SSE2-NEXT:    pxor %xmm2, %xmm0
 ; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    pminsw %xmm0, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1226,8 +1211,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm0
 ; SSE2-NEXT:    pxor %xmm2, %xmm0
 ; SSE2-NEXT:    pminsw %xmm1, %xmm0
-; SSE2-NEXT:    pxor %xmm2, %xmm0
 ; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1283,8 +1268,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    pminsw %xmm0, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1334,8 +1319,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    pminsw %xmm0, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1406,8 +1391,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm1
 ; SSE2-NEXT:    pxor %xmm4, %xmm1
 ; SSE2-NEXT:    pminsw %xmm0, %xmm1
-; SSE2-NEXT:    pxor %xmm4, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1494,8 +1479,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm0
 ; SSE2-NEXT:    pxor %xmm8, %xmm0
 ; SSE2-NEXT:    pminsw %xmm1, %xmm0
-; SSE2-NEXT:    pxor %xmm8, %xmm0
 ; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
diff --git a/test/CodeGen/X86/vector-reduce-umin.ll b/test/CodeGen/X86/vector-reduce-umin.ll
index 949bee2..e3adb9f 100644
--- a/test/CodeGen/X86/vector-reduce-umin.ll
+++ b/test/CodeGen/X86/vector-reduce-umin.ll
@@ -44,10 +44,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
 ; SSE41-NEXT:    retq
@@ -130,10 +129,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -143,10 +141,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movq %xmm2, %rax
 ; SSE41-NEXT:    retq
@@ -290,10 +287,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm0
@@ -303,10 +299,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm2
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
@@ -316,10 +311,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -329,10 +323,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -541,10 +534,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm11
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm12, %xmm10
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm11[1,1,3,3]
-; SSE41-NEXT:    por %xmm10, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm12, %xmm0
+; SSE41-NEXT:    por %xmm11, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm6
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -554,10 +546,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm10
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT:    pand %xmm11, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT:    pand %xmm11, %xmm0
+; SSE41-NEXT:    por %xmm10, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm4
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -567,10 +558,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm8
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm8[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm8[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT:    pand %xmm10, %xmm0
+; SSE41-NEXT:    por %xmm8, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm7
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
@@ -580,10 +570,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm5
 ; SSE41-NEXT:    movapd %xmm5, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -593,10 +582,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm7
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -606,10 +594,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm6
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
@@ -619,10 +606,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm7
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
@@ -632,10 +618,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm9[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    retq
@@ -769,17 +754,15 @@
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    pxor %xmm0, %xmm3
-; SSE41-NEXT:    pxor %xmm2, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm3, %xmm0
+; SSE41-NEXT:    pxor %xmm2, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm4
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pand %xmm4, %xmm0
 ; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movd %xmm2, %eax
@@ -1258,17 +1241,15 @@
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7]
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    por %xmm0, %xmm3
-; SSE41-NEXT:    por %xmm2, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm4
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pand %xmm4, %xmm0
 ; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    movd %xmm2, %eax
@@ -1405,8 +1386,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    pminsw %xmm0, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1456,8 +1437,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    pminsw %xmm0, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1528,8 +1509,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm1
 ; SSE2-NEXT:    pxor %xmm4, %xmm1
 ; SSE2-NEXT:    pminsw %xmm0, %xmm1
-; SSE2-NEXT:    pxor %xmm4, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1616,8 +1597,8 @@
 ; SSE2-NEXT:    psrld $16, %xmm0
 ; SSE2-NEXT:    pxor %xmm8, %xmm0
 ; SSE2-NEXT:    pminsw %xmm1, %xmm0
-; SSE2-NEXT:    pxor %xmm8, %xmm0
 ; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    xorl $32768, %eax # imm = 0x8000
 ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -1721,17 +1702,15 @@
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    por %xmm0, %xmm3
-; SSE41-NEXT:    por %xmm2, %xmm0
-; SSE41-NEXT:    movdqa %xmm0, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm4
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pand %xmm4, %xmm0
 ; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    pextrb $0, %xmm2, %eax
diff --git a/test/CodeGen/X86/vector-rotate-128.ll b/test/CodeGen/X86/vector-rotate-128.ll
index 82ed7c5..4c528a5 100644
--- a/test/CodeGen/X86/vector-rotate-128.ll
+++ b/test/CodeGen/X86/vector-rotate-128.ll
@@ -348,13 +348,13 @@
 ; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; AVX512F-NEXT:    vpsllvd %ymm2, %ymm0, %ymm2
-; AVX512F-NEXT:    vpmovdw %zmm2, %ymm2
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
 ; AVX512F-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; AVX512F-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm2, %ymm0
 ; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -364,13 +364,12 @@
 ; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; AVX512VL-NEXT:    vpsllvd %ymm2, %ymm0, %ymm2
-; AVX512VL-NEXT:    vpmovdw %ymm2, %xmm2
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
 ; AVX512VL-NEXT:    vpsubw %xmm1, %xmm3, %xmm1
 ; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; AVX512VL-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm2, %ymm0
 ; AVX512VL-NEXT:    vpmovdw %ymm0, %xmm0
-; AVX512VL-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
@@ -539,50 +538,30 @@
 ;
 ; AVX512F-LABEL: var_rotate_v16i8:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpsrlw $4, %xmm0, %xmm2
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512F-NEXT:    vpsllw $4, %xmm0, %xmm3
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512F-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512F-NEXT:    vpsllw $5, %xmm1, %xmm1
-; AVX512F-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT:    vpsrlw $6, %xmm0, %xmm2
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512F-NEXT:    vpsllw $2, %xmm0, %xmm3
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512F-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512F-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
-; AVX512F-NEXT:    vpsrlw $7, %xmm0, %xmm3
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512F-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512F-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm1, %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpsllvd %zmm1, %zmm0, %zmm1
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512F-NEXT:    vpsrlvd %zmm2, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: var_rotate_v16i8:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpsrlw $4, %xmm0, %xmm2
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512VL-NEXT:    vpsllw $4, %xmm0, %xmm3
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512VL-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512VL-NEXT:    vpsllw $5, %xmm1, %xmm1
-; AVX512VL-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpsrlw $6, %xmm0, %xmm2
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512VL-NEXT:    vpsllw $2, %xmm0, %xmm3
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512VL-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512VL-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; AVX512VL-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
-; AVX512VL-NEXT:    vpsrlw $7, %xmm0, %xmm3
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512VL-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512VL-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; AVX512VL-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm1, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsllvd %zmm1, %zmm0, %zmm1
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd %zmm2, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm0, %zmm1, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: var_rotate_v16i8:
@@ -592,11 +571,11 @@
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm1
-; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
 ; AVX512BW-NEXT:    vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
 ;
@@ -607,11 +586,10 @@
 ; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
 ; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; AVX512VLBW-NEXT:    vpsllvw %ymm1, %ymm0, %ymm1
-; AVX512VLBW-NEXT:    vpmovwb %ymm1, %xmm1
 ; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
 ; AVX512VLBW-NEXT:    vpsrlvw %ymm2, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512VLBW-NEXT:    vpmovwb %ymm0, %xmm0
-; AVX512VLBW-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; AVX512VLBW-NEXT:    vzeroupper
 ; AVX512VLBW-NEXT:    retq
 ;
@@ -760,18 +738,16 @@
 define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
 ; SSE2-LABEL: splatvar_rotate_v4i32:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
-; SSE2-NEXT:    xorps %xmm2, %xmm2
-; SSE2-NEXT:    xorps %xmm3, %xmm3
-; SSE2-NEXT:    movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
-; SSE2-NEXT:    movdqa %xmm0, %xmm4
-; SSE2-NEXT:    pslld %xmm3, %xmm4
-; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [32,32,32,32]
-; SSE2-NEXT:    psubd %xmm1, %xmm3
-; SSE2-NEXT:    movss {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3]
-; SSE2-NEXT:    psrld %xmm2, %xmm0
-; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    andl $31, %eax
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pslld %xmm1, %xmm2
+; SSE2-NEXT:    movl $32, %ecx
+; SSE2-NEXT:    subl %eax, %ecx
+; SSE2-NEXT:    movd %ecx, %xmm1
+; SSE2-NEXT:    psrld %xmm1, %xmm0
+; SSE2-NEXT:    por %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: splatvar_rotate_v4i32:
@@ -859,18 +835,16 @@
 ;
 ; X32-SSE-LABEL: splatvar_rotate_v4i32:
 ; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT:    xorps %xmm2, %xmm2
-; X32-SSE-NEXT:    xorps %xmm3, %xmm3
-; X32-SSE-NEXT:    movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
-; X32-SSE-NEXT:    movdqa %xmm0, %xmm4
-; X32-SSE-NEXT:    pslld %xmm3, %xmm4
-; X32-SSE-NEXT:    movdqa {{.*#+}} xmm3 = [32,32,32,32]
-; X32-SSE-NEXT:    psubd %xmm1, %xmm3
-; X32-SSE-NEXT:    movss {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3]
-; X32-SSE-NEXT:    psrld %xmm2, %xmm0
-; X32-SSE-NEXT:    por %xmm4, %xmm0
+; X32-SSE-NEXT:    movd %xmm1, %eax
+; X32-SSE-NEXT:    andl $31, %eax
+; X32-SSE-NEXT:    movd %eax, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    pslld %xmm1, %xmm2
+; X32-SSE-NEXT:    movl $32, %ecx
+; X32-SSE-NEXT:    subl %eax, %ecx
+; X32-SSE-NEXT:    movd %ecx, %xmm1
+; X32-SSE-NEXT:    psrld %xmm1, %xmm0
+; X32-SSE-NEXT:    por %xmm2, %xmm0
 ; X32-SSE-NEXT:    retl
   %splat = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
   %splat32 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %splat
@@ -993,187 +967,131 @@
 define <16 x i8> @splatvar_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
 ; SSE2-LABEL: splatvar_rotate_v16i8:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
-; SSE2-NEXT:    movdqa %xmm2, %xmm0
-; SSE2-NEXT:    psrlw $4, %xmm0
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT:    movdqa %xmm2, %xmm3
-; SSE2-NEXT:    psllw $4, %xmm3
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm3
-; SSE2-NEXT:    por %xmm0, %xmm3
-; SSE2-NEXT:    psllw $5, %xmm1
-; SSE2-NEXT:    pxor %xmm0, %xmm0
-; SSE2-NEXT:    pxor %xmm4, %xmm4
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm4
-; SSE2-NEXT:    pand %xmm4, %xmm3
-; SSE2-NEXT:    pandn %xmm2, %xmm4
-; SSE2-NEXT:    por %xmm3, %xmm4
-; SSE2-NEXT:    movdqa %xmm4, %xmm2
-; SSE2-NEXT:    psrlw $6, %xmm2
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
-; SSE2-NEXT:    movdqa %xmm4, %xmm3
-; SSE2-NEXT:    psllw $2, %xmm3
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm3
-; SSE2-NEXT:    por %xmm2, %xmm3
-; SSE2-NEXT:    paddb %xmm1, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm2
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT:    pand %xmm2, %xmm3
-; SSE2-NEXT:    pandn %xmm4, %xmm2
-; SSE2-NEXT:    por %xmm3, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, %xmm3
-; SSE2-NEXT:    paddb %xmm2, %xmm3
-; SSE2-NEXT:    movdqa %xmm2, %xmm4
-; SSE2-NEXT:    psrlw $7, %xmm4
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm4
-; SSE2-NEXT:    por %xmm3, %xmm4
-; SSE2-NEXT:    paddb %xmm1, %xmm1
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm0
-; SSE2-NEXT:    pand %xmm0, %xmm4
-; SSE2-NEXT:    pandn %xmm2, %xmm0
-; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE2-NEXT:    psubb %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    psllw %xmm3, %xmm1
+; SSE2-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE2-NEXT:    pcmpeqd %xmm5, %xmm5
+; SSE2-NEXT:    psllw %xmm3, %xmm5
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm3 = xmm5[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
+; SSE2-NEXT:    pand %xmm3, %xmm1
+; SSE2-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
+; SSE2-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    psrlw %xmm2, %xmm0
+; SSE2-NEXT:    psrlw %xmm2, %xmm4
+; SSE2-NEXT:    psrlw $8, %xmm4
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm4[0,0,2,3,4,5,6,7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE2-NEXT:    pand %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: splatvar_rotate_v16i8:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa %xmm1, %xmm2
-; SSE41-NEXT:    movdqa %xmm0, %xmm1
-; SSE41-NEXT:    pxor %xmm0, %xmm0
-; SSE41-NEXT:    pshufb %xmm0, %xmm2
-; SSE41-NEXT:    movdqa %xmm1, %xmm0
-; SSE41-NEXT:    psrlw $4, %xmm0
-; SSE41-NEXT:    pand {{.*}}(%rip), %xmm0
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    psllw $4, %xmm3
-; SSE41-NEXT:    pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT:    por %xmm0, %xmm3
-; SSE41-NEXT:    psllw $5, %xmm2
+; SSE41-NEXT:    pxor %xmm3, %xmm3
+; SSE41-NEXT:    pshufb %xmm3, %xmm1
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm4 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    psllw %xmm4, %xmm2
+; SSE41-NEXT:    pcmpeqd %xmm5, %xmm5
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm6
+; SSE41-NEXT:    psllw %xmm4, %xmm6
+; SSE41-NEXT:    pshufb %xmm3, %xmm6
+; SSE41-NEXT:    pand %xmm6, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; SSE41-NEXT:    psubb %xmm1, %xmm3
+; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm1 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT:    psrlw %xmm1, %xmm0
+; SSE41-NEXT:    psrlw %xmm1, %xmm5
+; SSE41-NEXT:    pshufb {{.*#+}} xmm5 = xmm5[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; SSE41-NEXT:    pand %xmm0, %xmm5
+; SSE41-NEXT:    por %xmm5, %xmm2
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm0
-; SSE41-NEXT:    psrlw $6, %xmm0
-; SSE41-NEXT:    pand {{.*}}(%rip), %xmm0
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    psllw $2, %xmm3
-; SSE41-NEXT:    pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT:    por %xmm0, %xmm3
-; SSE41-NEXT:    paddb %xmm2, %xmm2
-; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm0
-; SSE41-NEXT:    paddb %xmm1, %xmm0
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    psrlw $7, %xmm3
-; SSE41-NEXT:    pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT:    por %xmm0, %xmm3
-; SSE41-NEXT:    paddb %xmm2, %xmm2
-; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: splatvar_rotate_v16i8:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm2
-; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm3
-; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX1-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpsllw $5, %xmm1, %xmm1
-; AVX1-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $6, %xmm0, %xmm2
-; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm3
-; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX1-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
-; AVX1-NEXT:    vpsrlw $7, %xmm0, %xmm3
-; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX1-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsllw %xmm3, %xmm0, %xmm4
+; AVX1-NEXT:    vpcmpeqd %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpsllw %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX1-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: splatvar_rotate_v16i8:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastb %xmm1, %xmm1
-; AVX2-NEXT:    vpsrlw $4, %xmm0, %xmm2
-; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX2-NEXT:    vpsllw $4, %xmm0, %xmm3
-; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX2-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX2-NEXT:    vpsllw $5, %xmm1, %xmm1
-; AVX2-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $6, %xmm0, %xmm2
-; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX2-NEXT:    vpsllw $2, %xmm0, %xmm3
-; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX2-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX2-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
-; AVX2-NEXT:    vpsrlw $7, %xmm0, %xmm3
-; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX2-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX2-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm2, %xmm0, %xmm3
+; AVX2-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX2-NEXT:    vpsllw %xmm2, %xmm4, %xmm2
+; AVX2-NEXT:    vpbroadcastb %xmm2, %xmm2
+; AVX2-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX2-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vpsrlw $8, %xmm1, %xmm1
+; AVX2-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: splatvar_rotate_v16i8:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpbroadcastb %xmm1, %xmm1
-; AVX512F-NEXT:    vpsrlw $4, %xmm0, %xmm2
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512F-NEXT:    vpsllw $4, %xmm0, %xmm3
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512F-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512F-NEXT:    vpsllw $5, %xmm1, %xmm1
-; AVX512F-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT:    vpsrlw $6, %xmm0, %xmm2
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512F-NEXT:    vpsllw $2, %xmm0, %xmm3
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512F-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512F-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
-; AVX512F-NEXT:    vpsrlw $7, %xmm0, %xmm3
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512F-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512F-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm1, %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512F-NEXT:    vpsllvd %zmm1, %zmm0, %zmm1
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512F-NEXT:    vpsrlvd %zmm2, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: splatvar_rotate_v16i8:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpbroadcastb %xmm1, %xmm1
-; AVX512VL-NEXT:    vpsrlw $4, %xmm0, %xmm2
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512VL-NEXT:    vpsllw $4, %xmm0, %xmm3
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512VL-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512VL-NEXT:    vpsllw $5, %xmm1, %xmm1
-; AVX512VL-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpsrlw $6, %xmm0, %xmm2
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512VL-NEXT:    vpsllw $2, %xmm0, %xmm3
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512VL-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512VL-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; AVX512VL-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
-; AVX512VL-NEXT:    vpsrlw $7, %xmm0, %xmm3
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512VL-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512VL-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; AVX512VL-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm1, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsllvd %zmm1, %zmm0, %zmm1
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd %zmm2, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm0, %zmm1, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: splatvar_rotate_v16i8:
@@ -1184,11 +1102,11 @@
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
 ; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm1
-; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
 ; AVX512BW-NEXT:    vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
 ;
@@ -1200,11 +1118,10 @@
 ; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
 ; AVX512VLBW-NEXT:    vpsllvw %ymm1, %ymm0, %ymm1
-; AVX512VLBW-NEXT:    vpmovwb %ymm1, %xmm1
 ; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
 ; AVX512VLBW-NEXT:    vpsrlvw %ymm2, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512VLBW-NEXT:    vpmovwb %ymm0, %xmm0
-; AVX512VLBW-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; AVX512VLBW-NEXT:    vzeroupper
 ; AVX512VLBW-NEXT:    retq
 ;
@@ -1223,48 +1140,35 @@
 ;
 ; X32-SSE-LABEL: splatvar_rotate_v16i8:
 ; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
 ; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
-; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
-; X32-SSE-NEXT:    movdqa %xmm2, %xmm0
-; X32-SSE-NEXT:    psrlw $4, %xmm0
-; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT:    movdqa %xmm2, %xmm3
-; X32-SSE-NEXT:    psllw $4, %xmm3
-; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm3
-; X32-SSE-NEXT:    por %xmm0, %xmm3
-; X32-SSE-NEXT:    psllw $5, %xmm1
-; X32-SSE-NEXT:    pxor %xmm0, %xmm0
-; X32-SSE-NEXT:    pxor %xmm4, %xmm4
-; X32-SSE-NEXT:    pcmpgtb %xmm1, %xmm4
-; X32-SSE-NEXT:    pand %xmm4, %xmm3
-; X32-SSE-NEXT:    pandn %xmm2, %xmm4
-; X32-SSE-NEXT:    por %xmm3, %xmm4
-; X32-SSE-NEXT:    movdqa %xmm4, %xmm2
-; X32-SSE-NEXT:    psrlw $6, %xmm2
-; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm2
-; X32-SSE-NEXT:    movdqa %xmm4, %xmm3
-; X32-SSE-NEXT:    psllw $2, %xmm3
-; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm3
-; X32-SSE-NEXT:    por %xmm2, %xmm3
-; X32-SSE-NEXT:    paddb %xmm1, %xmm1
-; X32-SSE-NEXT:    pxor %xmm2, %xmm2
-; X32-SSE-NEXT:    pcmpgtb %xmm1, %xmm2
-; X32-SSE-NEXT:    pand %xmm2, %xmm3
-; X32-SSE-NEXT:    pandn %xmm4, %xmm2
-; X32-SSE-NEXT:    por %xmm3, %xmm2
-; X32-SSE-NEXT:    movdqa %xmm2, %xmm3
-; X32-SSE-NEXT:    paddb %xmm2, %xmm3
-; X32-SSE-NEXT:    movdqa %xmm2, %xmm4
-; X32-SSE-NEXT:    psrlw $7, %xmm4
-; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm4
-; X32-SSE-NEXT:    por %xmm3, %xmm4
-; X32-SSE-NEXT:    paddb %xmm1, %xmm1
-; X32-SSE-NEXT:    pcmpgtb %xmm1, %xmm0
-; X32-SSE-NEXT:    pand %xmm0, %xmm4
-; X32-SSE-NEXT:    pandn %xmm2, %xmm0
-; X32-SSE-NEXT:    por %xmm4, %xmm0
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; X32-SSE-NEXT:    psubb %xmm1, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm3
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    psllw %xmm3, %xmm1
+; X32-SSE-NEXT:    pcmpeqd %xmm4, %xmm4
+; X32-SSE-NEXT:    pcmpeqd %xmm5, %xmm5
+; X32-SSE-NEXT:    psllw %xmm3, %xmm5
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm5[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
+; X32-SSE-NEXT:    pand %xmm3, %xmm1
+; X32-SSE-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
+; X32-SSE-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT:    psrlw %xmm2, %xmm0
+; X32-SSE-NEXT:    psrlw %xmm2, %xmm4
+; X32-SSE-NEXT:    psrlw $8, %xmm4
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm4[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; X32-SSE-NEXT:    pand %xmm0, %xmm2
+; X32-SSE-NEXT:    por %xmm2, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm0
 ; X32-SSE-NEXT:    retl
   %splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
   %splat8 = sub <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %splat
@@ -1557,148 +1461,104 @@
 define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
 ; SSE2-LABEL: constant_rotate_v16i8:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movdqa %xmm0, %xmm1
-; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,57600,41152,24704,8256]
-; SSE2-NEXT:    pxor %xmm0, %xmm0
-; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pcmpgtb %xmm2, %xmm3
-; SSE2-NEXT:    movdqa %xmm1, %xmm4
-; SSE2-NEXT:    psrlw $4, %xmm4
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm4
-; SSE2-NEXT:    movdqa %xmm1, %xmm5
-; SSE2-NEXT:    psllw $4, %xmm5
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm5
-; SSE2-NEXT:    por %xmm4, %xmm5
-; SSE2-NEXT:    pand %xmm3, %xmm5
-; SSE2-NEXT:    pandn %xmm1, %xmm3
-; SSE2-NEXT:    por %xmm5, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm1
-; SSE2-NEXT:    psrlw $6, %xmm1
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
-; SSE2-NEXT:    psllw $2, %xmm4
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm4
-; SSE2-NEXT:    por %xmm1, %xmm4
-; SSE2-NEXT:    paddb %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT:    pand %xmm1, %xmm4
-; SSE2-NEXT:    pandn %xmm3, %xmm1
-; SSE2-NEXT:    por %xmm4, %xmm1
-; SSE2-NEXT:    movdqa %xmm1, %xmm3
-; SSE2-NEXT:    paddb %xmm1, %xmm3
-; SSE2-NEXT:    movdqa %xmm1, %xmm4
-; SSE2-NEXT:    psrlw $7, %xmm4
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm4
-; SSE2-NEXT:    por %xmm3, %xmm4
-; SSE2-NEXT:    paddb %xmm2, %xmm2
-; SSE2-NEXT:    pcmpgtb %xmm2, %xmm0
-; SSE2-NEXT:    pand %xmm0, %xmm4
-; SSE2-NEXT:    pandn %xmm1, %xmm0
-; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    psrlw $8, %xmm2
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm3
+; SSE2-NEXT:    psrlw $8, %xmm3
+; SSE2-NEXT:    packuswb %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    packuswb %xmm1, %xmm0
+; SSE2-NEXT:    por %xmm3, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: constant_rotate_v16i8:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa %xmm0, %xmm1
-; SSE41-NEXT:    psrlw $4, %xmm0
-; SSE41-NEXT:    pand {{.*}}(%rip), %xmm0
-; SSE41-NEXT:    movdqa %xmm1, %xmm2
-; SSE41-NEXT:    psllw $4, %xmm2
-; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
-; SSE41-NEXT:    por %xmm0, %xmm2
-; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,57600,41152,24704,8256]
-; SSE41-NEXT:    pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm2
-; SSE41-NEXT:    psrlw $6, %xmm2
-; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    psllw $2, %xmm3
-; SSE41-NEXT:    pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT:    por %xmm2, %xmm3
-; SSE41-NEXT:    paddb %xmm0, %xmm0
-; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm2
-; SSE41-NEXT:    paddb %xmm1, %xmm2
-; SSE41-NEXT:    movdqa %xmm1, %xmm3
-; SSE41-NEXT:    psrlw $7, %xmm3
-; SSE41-NEXT:    pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT:    por %xmm2, %xmm3
-; SSE41-NEXT:    paddb %xmm0, %xmm0
-; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm1
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT:    pand %xmm3, %xmm2
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
+; SSE41-NEXT:    pmullw %xmm1, %xmm4
+; SSE41-NEXT:    pand %xmm3, %xmm4
+; SSE41-NEXT:    packuswb %xmm2, %xmm4
+; SSE41-NEXT:    pxor %xmm2, %xmm2
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    psrlw $8, %xmm0
+; SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    psrlw $8, %xmm1
+; SSE41-NEXT:    packuswb %xmm0, %xmm1
+; SSE41-NEXT:    por %xmm4, %xmm1
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX-LABEL: constant_rotate_v16i8:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm1
-; AVX-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX-NEXT:    vpsllw $4, %xmm0, %xmm2
-; AVX-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpsrlw $6, %xmm0, %xmm1
-; AVX-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX-NEXT:    vpsllw $2, %xmm0, %xmm3
-; AVX-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX-NEXT:    vpor %xmm1, %xmm3, %xmm1
-; AVX-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
-; AVX-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpaddb %xmm0, %xmm0, %xmm1
-; AVX-NEXT:    vpsrlw $7, %xmm0, %xmm3
-; AVX-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX-NEXT:    vpor %xmm3, %xmm1, %xmm1
-; AVX-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
-; AVX-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: constant_rotate_v16i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm3, %xmm4
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm0
+; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm3, %xmm2
+; AVX1-NEXT:    vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constant_rotate_v16i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm1
+; AVX2-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: constant_rotate_v16i8:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpsrlw $4, %xmm0, %xmm1
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512F-NEXT:    vpsllw $4, %xmm0, %xmm2
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512F-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX512F-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT:    vpsrlw $6, %xmm0, %xmm1
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512F-NEXT:    vpsllw $2, %xmm0, %xmm3
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512F-NEXT:    vpor %xmm1, %xmm3, %xmm1
-; AVX512F-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
-; AVX512F-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT:    vpaddb %xmm0, %xmm0, %xmm1
-; AVX512F-NEXT:    vpsrlw $7, %xmm0, %xmm3
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512F-NEXT:    vpor %xmm3, %xmm1, %xmm1
-; AVX512F-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
-; AVX512F-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512F-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm1
+; AVX512F-NEXT:    vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: constant_rotate_v16i8:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpsrlw $4, %xmm0, %xmm1
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512VL-NEXT:    vpsllw $4, %xmm0, %xmm2
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512VL-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX512VL-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpsrlw $6, %xmm0, %xmm1
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512VL-NEXT:    vpsllw $2, %xmm0, %xmm3
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512VL-NEXT:    vpor %xmm1, %xmm3, %xmm1
-; AVX512VL-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
-; AVX512VL-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpaddb %xmm0, %xmm0, %xmm1
-; AVX512VL-NEXT:    vpsrlw $7, %xmm0, %xmm3
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX512VL-NEXT:    vpor %xmm3, %xmm1, %xmm1
-; AVX512VL-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
-; AVX512VL-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VL-NEXT:    vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm0, %zmm1, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: constant_rotate_v16i8:
@@ -1706,11 +1566,11 @@
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm1
-; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7]
 ; AVX512BW-NEXT:    vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
 ;
@@ -1718,10 +1578,9 @@
 ; AVX512VLBW:       # %bb.0:
 ; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %ymm0, %ymm1
-; AVX512VLBW-NEXT:    vpmovwb %ymm1, %xmm1
 ; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512VLBW-NEXT:    vpmovwb %ymm0, %xmm0
-; AVX512VLBW-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; AVX512VLBW-NEXT:    vzeroupper
 ; AVX512VLBW-NEXT:    retq
 ;
@@ -1732,45 +1591,26 @@
 ;
 ; X32-SSE-LABEL: constant_rotate_v16i8:
 ; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
-; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,57600,41152,24704,8256]
-; X32-SSE-NEXT:    pxor %xmm0, %xmm0
-; X32-SSE-NEXT:    pxor %xmm3, %xmm3
-; X32-SSE-NEXT:    pcmpgtb %xmm2, %xmm3
-; X32-SSE-NEXT:    movdqa %xmm1, %xmm4
-; X32-SSE-NEXT:    psrlw $4, %xmm4
-; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm4
-; X32-SSE-NEXT:    movdqa %xmm1, %xmm5
-; X32-SSE-NEXT:    psllw $4, %xmm5
-; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm5
-; X32-SSE-NEXT:    por %xmm4, %xmm5
-; X32-SSE-NEXT:    pand %xmm3, %xmm5
-; X32-SSE-NEXT:    pandn %xmm1, %xmm3
-; X32-SSE-NEXT:    por %xmm5, %xmm3
-; X32-SSE-NEXT:    movdqa %xmm3, %xmm1
-; X32-SSE-NEXT:    psrlw $6, %xmm1
-; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT:    movdqa %xmm3, %xmm4
-; X32-SSE-NEXT:    psllw $2, %xmm4
-; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm4
-; X32-SSE-NEXT:    por %xmm1, %xmm4
-; X32-SSE-NEXT:    paddb %xmm2, %xmm2
 ; X32-SSE-NEXT:    pxor %xmm1, %xmm1
-; X32-SSE-NEXT:    pcmpgtb %xmm2, %xmm1
-; X32-SSE-NEXT:    pand %xmm1, %xmm4
-; X32-SSE-NEXT:    pandn %xmm3, %xmm1
-; X32-SSE-NEXT:    por %xmm4, %xmm1
-; X32-SSE-NEXT:    movdqa %xmm1, %xmm3
-; X32-SSE-NEXT:    paddb %xmm1, %xmm3
-; X32-SSE-NEXT:    movdqa %xmm1, %xmm4
-; X32-SSE-NEXT:    psrlw $7, %xmm4
-; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm4
-; X32-SSE-NEXT:    por %xmm3, %xmm4
-; X32-SSE-NEXT:    paddb %xmm2, %xmm2
-; X32-SSE-NEXT:    pcmpgtb %xmm2, %xmm0
-; X32-SSE-NEXT:    pand %xmm0, %xmm4
-; X32-SSE-NEXT:    pandn %xmm1, %xmm0
-; X32-SSE-NEXT:    por %xmm4, %xmm0
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT:    psrlw $8, %xmm2
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm3
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm3
+; X32-SSE-NEXT:    psrlw $8, %xmm3
+; X32-SSE-NEXT:    packuswb %xmm2, %xmm3
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
+; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; X32-SSE-NEXT:    pand %xmm2, %xmm1
+; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT:    pand %xmm2, %xmm0
+; X32-SSE-NEXT:    packuswb %xmm1, %xmm0
+; X32-SSE-NEXT:    por %xmm3, %xmm0
 ; X32-SSE-NEXT:    retl
   %shl = shl <16 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>
   %lshr = lshr <16 x i8> %a, <i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
@@ -1963,41 +1803,14 @@
 ; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
-; AVX512F-LABEL: splatconstant_rotate_v16i8:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpsrlw $4, %xmm0, %xmm1
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512F-NEXT:    vpsllw $4, %xmm0, %xmm0
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512F-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT:    retq
-;
-; AVX512VL-LABEL: splatconstant_rotate_v16i8:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpsrlw $4, %xmm0, %xmm1
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512VL-NEXT:    vpsllw $4, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512VL-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT:    retq
-;
-; AVX512BW-LABEL: splatconstant_rotate_v16i8:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vpsllw $4, %xmm0, %xmm1
-; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512BW-NEXT:    vpsrlw $4, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512BW-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512BW-NEXT:    retq
-;
-; AVX512VLBW-LABEL: splatconstant_rotate_v16i8:
-; AVX512VLBW:       # %bb.0:
-; AVX512VLBW-NEXT:    vpsllw $4, %xmm0, %xmm1
-; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512VLBW-NEXT:    vpsrlw $4, %xmm0, %xmm0
-; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512VLBW-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512VLBW-NEXT:    retq
+; AVX512-LABEL: splatconstant_rotate_v16i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsllw $4, %xmm0, %xmm1
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    retq
 ;
 ; XOP-LABEL: splatconstant_rotate_v16i8:
 ; XOP:       # %bb.0:
@@ -2221,43 +2034,14 @@
 ; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
-; AVX512F-LABEL: splatconstant_rotate_mask_v16i8:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpsrlw $4, %xmm0, %xmm1
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512F-NEXT:    vpsllw $4, %xmm0, %xmm0
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512F-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512F-NEXT:    retq
-;
-; AVX512VL-LABEL: splatconstant_rotate_mask_v16i8:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpsrlw $4, %xmm0, %xmm1
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512VL-NEXT:    vpsllw $4, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512VL-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512VL-NEXT:    retq
-;
-; AVX512BW-LABEL: splatconstant_rotate_mask_v16i8:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vpsllw $4, %xmm0, %xmm1
-; AVX512BW-NEXT:    vpsrlw $4, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512BW-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512BW-NEXT:    retq
-;
-; AVX512VLBW-LABEL: splatconstant_rotate_mask_v16i8:
-; AVX512VLBW:       # %bb.0:
-; AVX512VLBW-NEXT:    vpsllw $4, %xmm0, %xmm1
-; AVX512VLBW-NEXT:    vpsrlw $4, %xmm0, %xmm0
-; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512VLBW-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512VLBW-NEXT:    retq
+; AVX512-LABEL: splatconstant_rotate_mask_v16i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsllw $4, %xmm0, %xmm1
+; AVX512-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    retq
 ;
 ; XOP-LABEL: splatconstant_rotate_mask_v16i8:
 ; XOP:       # %bb.0:
diff --git a/test/CodeGen/X86/vector-rotate-256.ll b/test/CodeGen/X86/vector-rotate-256.ll
index c3ac313..b0d9cd9 100644
--- a/test/CodeGen/X86/vector-rotate-256.ll
+++ b/test/CodeGen/X86/vector-rotate-256.ll
@@ -265,13 +265,12 @@
 ; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
 ; AVX512F-NEXT:    vpsllvd %zmm2, %zmm0, %zmm2
-; AVX512F-NEXT:    vpmovdw %zmm2, %ymm2
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
 ; AVX512F-NEXT:    vpsubw %ymm1, %ymm3, %ymm1
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
 ; AVX512F-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm0, %zmm2, %zmm0
 ; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT:    vpor %ymm0, %ymm2, %ymm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: var_rotate_v16i16:
@@ -280,13 +279,12 @@
 ; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
 ; AVX512VL-NEXT:    vpsllvd %zmm2, %zmm0, %zmm2
-; AVX512VL-NEXT:    vpmovdw %zmm2, %ymm2
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
 ; AVX512VL-NEXT:    vpsubw %ymm1, %ymm3, %ymm1
 ; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
 ; AVX512VL-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm0, %zmm2, %zmm0
 ; AVX512VL-NEXT:    vpmovdw %zmm0, %ymm0
-; AVX512VL-NEXT:    vpor %ymm0, %ymm2, %ymm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: var_rotate_v16i16:
@@ -466,11 +464,10 @@
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
 ; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm1
-; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
 ; AVX512BW-NEXT:    vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512VLBW-LABEL: var_rotate_v32i8:
@@ -480,11 +477,10 @@
 ; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
 ; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
 ; AVX512VLBW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm1
-; AVX512VLBW-NEXT:    vpmovwb %zmm1, %ymm1
 ; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
 ; AVX512VLBW-NEXT:    vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm0, %zmm1, %zmm0
 ; AVX512VLBW-NEXT:    vpmovwb %zmm0, %ymm0
-; AVX512VLBW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512VLBW-NEXT:    retq
 ;
 ; XOPAVX1-LABEL: var_rotate_v32i8:
@@ -759,125 +755,91 @@
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vpsrlw $4, %xmm2, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT:    vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT:    vpsllw $4, %xmm2, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT:    vpand %xmm9, %xmm5, %xmm5
-; AVX1-NEXT:    vpor %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpsllw $5, %xmm1, %xmm1
-; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpsrlw $6, %xmm2, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm10 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX1-NEXT:    vpand %xmm10, %xmm3, %xmm3
-; AVX1-NEXT:    vpsllw $2, %xmm2, %xmm7
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm11 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX1-NEXT:    vpand %xmm11, %xmm7, %xmm7
-; AVX1-NEXT:    vpor %xmm3, %xmm7, %xmm3
-; AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm7
-; AVX1-NEXT:    vpblendvb %xmm7, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpsrlw $7, %xmm2, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm5
-; AVX1-NEXT:    vpor %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpaddb %xmm7, %xmm7, %xmm5
-; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm3
-; AVX1-NEXT:    vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm4
-; AVX1-NEXT:    vpand %xmm9, %xmm4, %xmm4
-; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $6, %xmm0, %xmm1
-; AVX1-NEXT:    vpand %xmm10, %xmm1, %xmm1
-; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm3
-; AVX1-NEXT:    vpand %xmm11, %xmm3, %xmm3
-; AVX1-NEXT:    vpor %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpblendvb %xmm7, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $7, %xmm0, %xmm1
-; AVX1-NEXT:    vpand %xmm6, %xmm1, %xmm1
-; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm3
-; AVX1-NEXT:    vpor %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpblendvb %xmm5, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpsllw %xmm3, %xmm4, %xmm5
+; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpsllw %xmm3, %xmm6, %xmm7
+; AVX1-NEXT:    vpshufb %xmm2, %xmm7, %xmm2
+; AVX1-NEXT:    vpand %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX1-NEXT:    vpsubb %xmm1, %xmm7, %xmm1
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm6, %xmm6
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpsllw %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: splatvar_rotate_v32i8:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastb %xmm1, %ymm1
-; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm2
-; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm3
-; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX2-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX2-NEXT:    vpsllw $5, %ymm1, %ymm1
-; AVX2-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpsrlw $6, %ymm0, %ymm2
-; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT:    vpsllw $2, %ymm0, %ymm3
-; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX2-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX2-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
-; AVX2-NEXT:    vpsrlw $7, %ymm0, %ymm3
-; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX2-NEXT:    vpor %ymm3, %ymm2, %ymm2
-; AVX2-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsllw %xmm2, %ymm0, %ymm3
+; AVX2-NEXT:    vpcmpeqd %ymm4, %ymm4, %ymm4
+; AVX2-NEXT:    vpsllw %xmm2, %ymm4, %ymm2
+; AVX2-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX2-NEXT:    vpand %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX2-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrlw %xmm1, %ymm4, %ymm1
+; AVX2-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX2-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm2, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: splatvar_rotate_v32i8:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpbroadcastb %xmm1, %ymm1
-; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm2
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm3
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX512F-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT:    vpsllw $5, %ymm1, %ymm1
-; AVX512F-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsrlw $6, %ymm0, %ymm2
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm3
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX512F-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
-; AVX512F-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
-; AVX512F-NEXT:    vpsrlw $7, %ymm0, %ymm3
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX512F-NEXT:    vpor %ymm3, %ymm2, %ymm2
-; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
-; AVX512F-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm2, %ymm0, %ymm3
+; AVX512F-NEXT:    vpcmpeqd %ymm4, %ymm4, %ymm4
+; AVX512F-NEXT:    vpsllw %xmm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512F-NEXT:    vpand %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw %xmm1, %ymm4, %ymm1
+; AVX512F-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512F-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm2, %ymm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: splatvar_rotate_v32i8:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpbroadcastb %xmm1, %ymm1
-; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm2
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm3
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX512VL-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512VL-NEXT:    vpsllw $5, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $6, %ymm0, %ymm2
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm3
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX512VL-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
-; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm3
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX512VL-NEXT:    vpor %ymm3, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm2, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpcmpeqd %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpsllw %xmm2, %ymm4, %ymm2
+; AVX512VL-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512VL-NEXT:    vpand %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw %xmm1, %ymm4, %ymm1
+; AVX512VL-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512VL-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm2, %ymm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: splatvar_rotate_v32i8:
@@ -888,11 +850,10 @@
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
 ; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm1
-; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
 ; AVX512BW-NEXT:    vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512VLBW-LABEL: splatvar_rotate_v32i8:
@@ -903,11 +864,10 @@
 ; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
 ; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
 ; AVX512VLBW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm1
-; AVX512VLBW-NEXT:    vpmovwb %zmm1, %ymm1
 ; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
 ; AVX512VLBW-NEXT:    vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm0, %zmm1, %zmm0
 ; AVX512VLBW-NEXT:    vpmovwb %zmm0, %ymm0
-; AVX512VLBW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512VLBW-NEXT:    retq
 ;
 ; XOPAVX1-LABEL: splatvar_rotate_v32i8:
@@ -1178,141 +1138,132 @@
 ; AVX1-LABEL: constant_rotate_v32i8:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm2
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT:    vpand %xmm8, %xmm2, %xmm2
-; AVX1-NEXT:    vpsllw $4, %xmm1, %xmm4
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT:    vpand %xmm9, %xmm4, %xmm4
-; AVX1-NEXT:    vpor %xmm2, %xmm4, %xmm2
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX1-NEXT:    vpblendvb %xmm4, %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $6, %xmm1, %xmm2
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm10 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX1-NEXT:    vpand %xmm10, %xmm2, %xmm2
-; AVX1-NEXT:    vpsllw $2, %xmm1, %xmm7
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm11 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX1-NEXT:    vpand %xmm11, %xmm7, %xmm7
-; AVX1-NEXT:    vpor %xmm2, %xmm7, %xmm2
-; AVX1-NEXT:    vpaddb %xmm4, %xmm4, %xmm7
-; AVX1-NEXT:    vpblendvb %xmm7, %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $7, %xmm1, %xmm2
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-NEXT:    vpand %xmm5, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm6
-; AVX1-NEXT:    vpor %xmm2, %xmm6, %xmm2
-; AVX1-NEXT:    vpaddb %xmm7, %xmm7, %xmm6
-; AVX1-NEXT:    vpblendvb %xmm6, %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm2
-; AVX1-NEXT:    vpand %xmm8, %xmm2, %xmm2
-; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm3
-; AVX1-NEXT:    vpand %xmm9, %xmm3, %xmm3
-; AVX1-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpblendvb %xmm4, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $6, %xmm0, %xmm2
-; AVX1-NEXT:    vpand %xmm10, %xmm2, %xmm2
-; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm3
-; AVX1-NEXT:    vpand %xmm11, %xmm3, %xmm3
-; AVX1-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpblendvb %xmm7, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $7, %xmm0, %xmm2
-; AVX1-NEXT:    vpand %xmm5, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm3
-; AVX1-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpblendvb %xmm6, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor %xmm8, %xmm8, %xmm8
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm9 = [256,128,64,32,16,8,4,2]
+; AVX1-NEXT:    vpmullw %xmm9, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $8, %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [256,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vpmullw %xmm6, %xmm5, %xmm7
+; AVX1-NEXT:    vpsrlw $8, %xmm7, %xmm7
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm7, %xmm3
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,128,64,32,16,8,4,2]
+; AVX1-NEXT:    vpmullw %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT:    vpmullw %xmm4, %xmm5, %xmm5
+; AVX1-NEXT:    vpand %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpackuswb %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
+; AVX1-NEXT:    vpmullw %xmm9, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $8, %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT:    vpmullw %xmm6, %xmm5, %xmm6
+; AVX1-NEXT:    vpsrlw $8, %xmm6, %xmm6
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm7, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpmullw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpor %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: constant_rotate_v32i8:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm1
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm1
 ; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm2
-; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpsrlw $6, %ymm0, %ymm1
-; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsllw $2, %ymm1, %ymm3
 ; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX2-NEXT:    vpor %ymm1, %ymm3, %ymm1
 ; AVX2-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; AVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm1
-; AVX2-NEXT:    vpsrlw $7, %ymm0, %ymm3
-; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX2-NEXT:    vpor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddb %ymm1, %ymm1, %ymm3
 ; AVX2-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; AVX2-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
+; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX2-NEXT:    vpackuswb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: constant_rotate_v32i8:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm1
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm1
 ; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm2
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512F-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsrlw $6, %ymm0, %ymm1
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpsllw $2, %ymm1, %ymm3
 ; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX512F-NEXT:    vpor %ymm1, %ymm3, %ymm1
 ; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm1
-; AVX512F-NEXT:    vpsrlw $7, %ymm0, %ymm3
-; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX512F-NEXT:    vpor %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm3
 ; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
+; AVX512F-NEXT:    vpmullw {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
+; AVX512F-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512F-NEXT:    vpackuswb %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: constant_rotate_v32i8:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm1
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm1
 ; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm2
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512VL-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $6, %ymm0, %ymm1
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm1
+; AVX512VL-NEXT:    vpsllw $2, %ymm1, %ymm3
 ; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX512VL-NEXT:    vpor %ymm1, %ymm3, %ymm1
 ; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm1
-; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm3
-; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX512VL-NEXT:    vpor %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm3
 ; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmullw {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: constant_rotate_v32i8:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
 ; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm1
-; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1
 ; AVX512BW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vporq %zmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512VLBW-LABEL: constant_rotate_v32i8:
 ; AVX512VLBW:       # %bb.0:
 ; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
 ; AVX512VLBW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm1
-; AVX512VLBW-NEXT:    vpmovwb %zmm1, %ymm1
 ; AVX512VLBW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vporq %zmm0, %zmm1, %zmm0
 ; AVX512VLBW-NEXT:    vpmovwb %zmm0, %ymm0
-; AVX512VLBW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512VLBW-NEXT:    retq
 ;
 ; XOPAVX1-LABEL: constant_rotate_v32i8:
diff --git a/test/CodeGen/X86/vector-rotate-512.ll b/test/CodeGen/X86/vector-rotate-512.ll
index 3e48d76..896d6c0 100644
--- a/test/CodeGen/X86/vector-rotate-512.ll
+++ b/test/CodeGen/X86/vector-rotate-512.ll
@@ -40,23 +40,21 @@
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm5 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
 ; AVX512F-NEXT:    vpsllvd %zmm5, %zmm0, %zmm5
-; AVX512F-NEXT:    vpmovdw %zmm5, %ymm5
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
 ; AVX512F-NEXT:    vpsubw %ymm2, %ymm6, %ymm2
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
 ; AVX512F-NEXT:    vpsrlvd %zmm2, %zmm0, %zmm0
+; AVX512F-NEXT:    vpord %zmm0, %zmm5, %zmm0
 ; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT:    vpor %ymm0, %ymm5, %ymm0
 ; AVX512F-NEXT:    vpand %ymm4, %ymm3, %ymm2
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
 ; AVX512F-NEXT:    vpsllvd %zmm3, %zmm1, %zmm3
-; AVX512F-NEXT:    vpmovdw %zmm3, %ymm3
 ; AVX512F-NEXT:    vpsubw %ymm2, %ymm6, %ymm2
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
 ; AVX512F-NEXT:    vpsrlvd %zmm2, %zmm1, %zmm1
+; AVX512F-NEXT:    vpord %zmm1, %zmm3, %zmm1
 ; AVX512F-NEXT:    vpmovdw %zmm1, %ymm1
-; AVX512F-NEXT:    vpor %ymm1, %ymm3, %ymm1
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: var_rotate_v32i16:
@@ -66,23 +64,21 @@
 ; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm5 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
 ; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
 ; AVX512VL-NEXT:    vpsllvd %zmm5, %zmm0, %zmm5
-; AVX512VL-NEXT:    vpmovdw %zmm5, %ymm5
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
 ; AVX512VL-NEXT:    vpsubw %ymm2, %ymm6, %ymm2
 ; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
 ; AVX512VL-NEXT:    vpsrlvd %zmm2, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpord %zmm0, %zmm5, %zmm0
 ; AVX512VL-NEXT:    vpmovdw %zmm0, %ymm0
-; AVX512VL-NEXT:    vpor %ymm0, %ymm5, %ymm0
 ; AVX512VL-NEXT:    vpand %ymm4, %ymm3, %ymm2
 ; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
 ; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
 ; AVX512VL-NEXT:    vpsllvd %zmm3, %zmm1, %zmm3
-; AVX512VL-NEXT:    vpmovdw %zmm3, %ymm3
 ; AVX512VL-NEXT:    vpsubw %ymm2, %ymm6, %ymm2
 ; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
 ; AVX512VL-NEXT:    vpsrlvd %zmm2, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpord %zmm1, %zmm3, %zmm1
 ; AVX512VL-NEXT:    vpmovdw %zmm1, %ymm1
-; AVX512VL-NEXT:    vpor %ymm1, %ymm3, %ymm1
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: var_rotate_v32i16:
@@ -382,95 +378,53 @@
 ; AVX512F-LABEL: splatvar_rotate_v64i8:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpbroadcastb %xmm2, %ymm2
-; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm3
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm5
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512F-NEXT:    vpand %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT:    vpor %ymm3, %ymm5, %ymm3
-; AVX512F-NEXT:    vpsllw $5, %ymm2, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsrlw $6, %ymm0, %ymm3
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX512F-NEXT:    vpand %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm7
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512F-NEXT:    vpand %ymm8, %ymm7, %ymm7
-; AVX512F-NEXT:    vpor %ymm3, %ymm7, %ymm3
-; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm7
-; AVX512F-NEXT:    vpblendvb %ymm7, %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsrlw $7, %ymm0, %ymm3
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT:    vpand %ymm9, %ymm3, %ymm3
-; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm10
-; AVX512F-NEXT:    vpor %ymm3, %ymm10, %ymm3
-; AVX512F-NEXT:    vpaddb %ymm7, %ymm7, %ymm10
-; AVX512F-NEXT:    vpblendvb %ymm10, %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm3
-; AVX512F-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm4
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm0, %ymm4
+; AVX512F-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm5, %ymm6
+; AVX512F-NEXT:    vpbroadcastb %xmm6, %ymm6
 ; AVX512F-NEXT:    vpand %ymm6, %ymm4, %ymm4
-; AVX512F-NEXT:    vpor %ymm3, %ymm4, %ymm3
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT:    vpsrlw $6, %ymm1, %ymm2
-; AVX512F-NEXT:    vpand %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT:    vpsllw $2, %ymm1, %ymm3
-; AVX512F-NEXT:    vpand %ymm8, %ymm3, %ymm3
-; AVX512F-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
-; AVX512F-NEXT:    vpsrlw $7, %ymm1, %ymm2
-; AVX512F-NEXT:    vpand %ymm9, %ymm2, %ymm2
-; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm3
-; AVX512F-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm10, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT:    vpsubb %xmm2, %xmm7, %xmm2
+; AVX512F-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpsrlw %xmm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw %xmm2, %ymm5, %ymm5
+; AVX512F-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512F-NEXT:    vpbroadcastb %xmm5, %ymm5
+; AVX512F-NEXT:    vpand %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm4, %ymm0
+; AVX512F-NEXT:    vpsllw %xmm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm3, %ymm1
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: splatvar_rotate_v64i8:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpbroadcastb %xmm2, %ymm2
-; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm3
-; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm5
-; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512VL-NEXT:    vpand %ymm6, %ymm5, %ymm5
-; AVX512VL-NEXT:    vpor %ymm3, %ymm5, %ymm3
-; AVX512VL-NEXT:    vpsllw $5, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $6, %ymm0, %ymm3
-; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm5 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX512VL-NEXT:    vpand %ymm5, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm7
-; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512VL-NEXT:    vpand %ymm8, %ymm7, %ymm7
-; AVX512VL-NEXT:    vpor %ymm3, %ymm7, %ymm3
-; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm7
-; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm3, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm3
-; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-NEXT:    vpand %ymm9, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm10
-; AVX512VL-NEXT:    vpor %ymm3, %ymm10, %ymm3
-; AVX512VL-NEXT:    vpaddb %ymm7, %ymm7, %ymm10
-; AVX512VL-NEXT:    vpblendvb %ymm10, %ymm3, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm3
-; AVX512VL-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm5, %ymm6
+; AVX512VL-NEXT:    vpbroadcastb %xmm6, %ymm6
 ; AVX512VL-NEXT:    vpand %ymm6, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpor %ymm3, %ymm4, %ymm3
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpsrlw $6, %ymm1, %ymm2
-; AVX512VL-NEXT:    vpand %ymm5, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpsllw $2, %ymm1, %ymm3
-; AVX512VL-NEXT:    vpand %ymm8, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpsrlw $7, %ymm1, %ymm2
-; AVX512VL-NEXT:    vpand %ymm9, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm3
-; AVX512VL-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm10, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT:    vpsubb %xmm2, %xmm7, %xmm2
+; AVX512VL-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT:    vpsrlw %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw %xmm2, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpbroadcastb %xmm5, %ymm5
+; AVX512VL-NEXT:    vpand %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm4, %ymm0
+; AVX512VL-NEXT:    vpsllw %xmm3, %ymm1, %ymm3
+; AVX512VL-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpand %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm3, %ymm1
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: splatvar_rotate_v64i8:
@@ -591,94 +545,97 @@
 define <64 x i8> @constant_rotate_v64i8(<64 x i8> %a) nounwind {
 ; AVX512F-LABEL: constant_rotate_v64i8:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm2
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
 ; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm4
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512F-NEXT:    vpand %ymm5, %ymm4, %ymm4
-; AVX512F-NEXT:    vpor %ymm2, %ymm4, %ymm2
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX512F-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsrlw $6, %ymm0, %ymm2
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX512F-NEXT:    vpand %ymm6, %ymm2, %ymm2
-; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm7
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512F-NEXT:    vpand %ymm8, %ymm7, %ymm7
-; AVX512F-NEXT:    vpor %ymm2, %ymm7, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpsllw $2, %ymm2, %ymm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT:    vpand %ymm6, %ymm5, %ymm5
 ; AVX512F-NEXT:    vpaddb %ymm4, %ymm4, %ymm7
-; AVX512F-NEXT:    vpblendvb %ymm7, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsrlw $7, %ymm0, %ymm2
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT:    vpand %ymm9, %ymm2, %ymm2
-; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm10
-; AVX512F-NEXT:    vpor %ymm2, %ymm10, %ymm2
-; AVX512F-NEXT:    vpaddb %ymm7, %ymm7, %ymm10
-; AVX512F-NEXT:    vpblendvb %ymm10, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm7, %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm5
+; AVX512F-NEXT:    vpaddb %ymm7, %ymm7, %ymm8
+; AVX512F-NEXT:    vpblendvb %ymm8, %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm9 = ymm0[8],ymm5[8],ymm0[9],ymm5[9],ymm0[10],ymm5[10],ymm0[11],ymm5[11],ymm0[12],ymm5[12],ymm0[13],ymm5[13],ymm0[14],ymm5[14],ymm0[15],ymm5[15],ymm0[24],ymm5[24],ymm0[25],ymm5[25],ymm0[26],ymm5[26],ymm0[27],ymm5[27],ymm0[28],ymm5[28],ymm0[29],ymm5[29],ymm0[30],ymm5[30],ymm0[31],ymm5[31]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
+; AVX512F-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpmullw %ymm10, %ymm9, %ymm9
+; AVX512F-NEXT:    vpsrlw $8, %ymm9, %ymm9
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[1],ymm5[1],ymm0[2],ymm5[2],ymm0[3],ymm5[3],ymm0[4],ymm5[4],ymm0[5],ymm5[5],ymm0[6],ymm5[6],ymm0[7],ymm5[7],ymm0[16],ymm5[16],ymm0[17],ymm5[17],ymm0[18],ymm5[18],ymm0[19],ymm5[19],ymm0[20],ymm5[20],ymm0[21],ymm5[21],ymm0[22],ymm5[22],ymm0[23],ymm5[23]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128]
+; AVX512F-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpmullw %ymm11, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512F-NEXT:    vpackuswb %ymm9, %ymm0, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm2
 ; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; AVX512F-NEXT:    vpsllw $4, %ymm1, %ymm3
-; AVX512F-NEXT:    vpand %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
-; AVX512F-NEXT:    vpsrlw $6, %ymm1, %ymm2
-; AVX512F-NEXT:    vpand %ymm6, %ymm2, %ymm2
-; AVX512F-NEXT:    vpsllw $2, %ymm1, %ymm3
-; AVX512F-NEXT:    vpand %ymm8, %ymm3, %ymm3
-; AVX512F-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
-; AVX512F-NEXT:    vpsrlw $7, %ymm1, %ymm2
-; AVX512F-NEXT:    vpand %ymm9, %ymm2, %ymm2
-; AVX512F-NEXT:    vpaddb %ymm1, %ymm1, %ymm3
-; AVX512F-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm10, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm2
+; AVX512F-NEXT:    vpsllw $2, %ymm2, %ymm3
+; AVX512F-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm7, %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm8, %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm5[8],ymm1[9],ymm5[9],ymm1[10],ymm5[10],ymm1[11],ymm5[11],ymm1[12],ymm5[12],ymm1[13],ymm5[13],ymm1[14],ymm5[14],ymm1[15],ymm5[15],ymm1[24],ymm5[24],ymm1[25],ymm5[25],ymm1[26],ymm5[26],ymm1[27],ymm5[27],ymm1[28],ymm5[28],ymm1[29],ymm5[29],ymm1[30],ymm5[30],ymm1[31],ymm5[31]
+; AVX512F-NEXT:    vpmullw %ymm10, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm5[0],ymm1[1],ymm5[1],ymm1[2],ymm5[2],ymm1[3],ymm5[3],ymm1[4],ymm5[4],ymm1[5],ymm5[5],ymm1[6],ymm5[6],ymm1[7],ymm5[7],ymm1[16],ymm5[16],ymm1[17],ymm5[17],ymm1[18],ymm5[18],ymm1[19],ymm5[19],ymm1[20],ymm5[20],ymm1[21],ymm5[21],ymm1[22],ymm5[22],ymm1[23],ymm5[23]
+; AVX512F-NEXT:    vpmullw %ymm11, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512F-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: constant_rotate_v64i8:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm2
-; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
 ; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm4
-; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512VL-NEXT:    vpand %ymm5, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpor %ymm2, %ymm4, %ymm2
-; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $6, %ymm0, %ymm2
-; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX512VL-NEXT:    vpand %ymm6, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm7
-; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512VL-NEXT:    vpand %ymm8, %ymm7, %ymm7
-; AVX512VL-NEXT:    vpor %ymm2, %ymm7, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm4 = [8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256]
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpsllw $2, %ymm2, %ymm5
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT:    vpand %ymm6, %ymm5, %ymm5
 ; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm7
-; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm2
-; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-NEXT:    vpand %ymm9, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm10
-; AVX512VL-NEXT:    vpor %ymm2, %ymm10, %ymm2
-; AVX512VL-NEXT:    vpaddb %ymm7, %ymm7, %ymm10
-; AVX512VL-NEXT:    vpblendvb %ymm10, %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm7, %ymm7, %ymm8
+; AVX512VL-NEXT:    vpblendvb %ymm8, %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
+; AVX512VL-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpmullw %ymm9, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpsrlw $8, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128]
+; AVX512VL-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpmullw %ymm10, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpackuswb %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm2
 ; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpsllw $4, %ymm1, %ymm3
-; AVX512VL-NEXT:    vpand %ymm5, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpsrlw $6, %ymm1, %ymm2
-; AVX512VL-NEXT:    vpand %ymm6, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpsllw $2, %ymm1, %ymm3
-; AVX512VL-NEXT:    vpand %ymm8, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpsrlw $7, %ymm1, %ymm2
-; AVX512VL-NEXT:    vpand %ymm9, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm3
-; AVX512VL-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm10, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpsllw $2, %ymm2, %ymm3
+; AVX512VL-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm7, %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm8, %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpmullw %ymm9, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpmullw %ymm10, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: constant_rotate_v64i8:
diff --git a/test/CodeGen/X86/vector-sext-widen.ll b/test/CodeGen/X86/vector-sext-widen.ll
index 895d0e5..5cd814d 100644
--- a/test/CodeGen/X86/vector-sext-widen.ll
+++ b/test/CodeGen/X86/vector-sext-widen.ll
@@ -1388,23 +1388,13 @@
 ; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; AVX2-NEXT:    retq
 ;
-; AVX512F-LABEL: load_sext_2i1_to_2i64:
-; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    movzbl (%rdi), %eax
-; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512F-NEXT:    vzeroupper
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: load_sext_2i1_to_2i64:
-; AVX512BW:       # %bb.0: # %entry
-; AVX512BW-NEXT:    movzbl (%rdi), %eax
-; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512BW-NEXT:    vzeroupper
-; AVX512BW-NEXT:    retq
+; AVX512-LABEL: load_sext_2i1_to_2i64:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    kmovw (%rdi), %k1
+; AVX512-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
 ;
 ; X32-SSE2-LABEL: load_sext_2i1_to_2i64:
 ; X32-SSE2:       # %bb.0: # %entry
@@ -1608,23 +1598,13 @@
 ; AVX2-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512F-LABEL: load_sext_4i1_to_4i32:
-; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    movzbl (%rdi), %eax
-; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512F-NEXT:    vzeroupper
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: load_sext_4i1_to_4i32:
-; AVX512BW:       # %bb.0: # %entry
-; AVX512BW-NEXT:    movzbl (%rdi), %eax
-; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512BW-NEXT:    vzeroupper
-; AVX512BW-NEXT:    retq
+; AVX512-LABEL: load_sext_4i1_to_4i32:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    kmovw (%rdi), %k1
+; AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
 ;
 ; X32-SSE2-LABEL: load_sext_4i1_to_4i32:
 ; X32-SSE2:       # %bb.0: # %entry
@@ -1846,21 +1826,12 @@
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512F-LABEL: load_sext_4i1_to_4i64:
-; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    movzbl (%rdi), %eax
-; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: load_sext_4i1_to_4i64:
-; AVX512BW:       # %bb.0: # %entry
-; AVX512BW-NEXT:    movzbl (%rdi), %eax
-; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512BW-NEXT:    retq
+; AVX512-LABEL: load_sext_4i1_to_4i64:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    kmovw (%rdi), %k1
+; AVX512-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512-NEXT:    retq
 ;
 ; X32-SSE2-LABEL: load_sext_4i1_to_4i64:
 ; X32-SSE2:       # %bb.0: # %entry
@@ -1921,30 +1892,28 @@
 define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) {
 ; SSE2-LABEL: load_sext_4i8_to_4i64:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    movsbq 1(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    movsbq (%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm0
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT:    movsbq 3(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm2
-; SSE2-NEXT:    movsbq 2(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT:    psrad $24, %xmm1
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_sext_4i8_to_4i64:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    movsbq 1(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm1
-; SSSE3-NEXT:    movsbq (%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm0
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSSE3-NEXT:    movsbq 3(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm2
-; SSSE3-NEXT:    movsbq 2(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm1
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSSE3-NEXT:    psrad $24, %xmm1
+; SSSE3-NEXT:    pxor %xmm2, %xmm2
+; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_sext_4i8_to_4i64:
@@ -1999,20 +1968,24 @@
 define <2 x i64> @load_sext_4i8_to_4i64_extract(<4 x i8> *%ptr) {
 ; SSE2-LABEL: load_sext_4i8_to_4i64_extract:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movsbq 3(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    movsbq 2(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm0
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT:    psrad $24, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_sext_4i8_to_4i64_extract:
 ; SSSE3:       # %bb.0:
-; SSSE3-NEXT:    movsbq 3(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm1
-; SSSE3-NEXT:    movsbq 2(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm0
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSSE3-NEXT:    psrad $24, %xmm0
+; SSSE3-NEXT:    pxor %xmm1, %xmm1
+; SSSE3-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_sext_4i8_to_4i64_extract:
@@ -2254,8 +2227,7 @@
 ;
 ; AVX512F-LABEL: load_sext_8i1_to_8i16:
 ; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    movzbl (%rdi), %eax
-; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw (%rdi), %k1
 ; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
@@ -2264,8 +2236,7 @@
 ;
 ; AVX512BW-LABEL: load_sext_8i1_to_8i16:
 ; AVX512BW:       # %bb.0: # %entry
-; AVX512BW-NEXT:    movzbl (%rdi), %eax
-; AVX512BW-NEXT:    kmovd %eax, %k0
+; AVX512BW-NEXT:    kmovw (%rdi), %k0
 ; AVX512BW-NEXT:    vpmovm2w %k0, %zmm0
 ; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
 ; AVX512BW-NEXT:    vzeroupper
@@ -2402,50 +2373,42 @@
 define <8 x i64> @load_sext_8i8_to_8i64(<8 x i8> *%ptr) {
 ; SSE2-LABEL: load_sext_8i8_to_8i64:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    movsbq 1(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    movsbq (%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm0
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT:    movsbq 3(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm2
-; SSE2-NEXT:    movsbq 2(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE2-NEXT:    movsbq 5(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm3
-; SSE2-NEXT:    movsbq 4(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm2
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSE2-NEXT:    movsbq 7(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm4
-; SSE2-NEXT:    movsbq 6(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm3
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; SSE2-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT:    psrad $24, %xmm1
+; SSE2-NEXT:    pxor %xmm4, %xmm4
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT:    psrad $24, %xmm3
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT:    movdqa %xmm3, %xmm2
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_sext_8i8_to_8i64:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    movsbq 1(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm1
-; SSSE3-NEXT:    movsbq (%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm0
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSSE3-NEXT:    movsbq 3(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm2
-; SSSE3-NEXT:    movsbq 2(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm1
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSSE3-NEXT:    movsbq 5(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm3
-; SSSE3-NEXT:    movsbq 4(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm2
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSSE3-NEXT:    movsbq 7(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm4
-; SSSE3-NEXT:    movsbq 6(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm3
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; SSSE3-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT:    psrad $24, %xmm1
+; SSSE3-NEXT:    pxor %xmm4, %xmm4
+; SSSE3-NEXT:    pxor %xmm3, %xmm3
+; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm3
+; SSSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSSE3-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSSE3-NEXT:    psrad $24, %xmm3
+; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSSE3-NEXT:    movdqa %xmm3, %xmm2
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_sext_8i8_to_8i64:
@@ -2717,21 +2680,12 @@
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512F-LABEL: load_sext_8i1_to_8i32:
-; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    movzbl (%rdi), %eax
-; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: load_sext_8i1_to_8i32:
-; AVX512BW:       # %bb.0: # %entry
-; AVX512BW-NEXT:    movzbl (%rdi), %eax
-; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512BW-NEXT:    retq
+; AVX512-LABEL: load_sext_8i1_to_8i32:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    kmovw (%rdi), %k1
+; AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512-NEXT:    retq
 ;
 ; X32-SSE2-LABEL: load_sext_8i1_to_8i32:
 ; X32-SSE2:       # %bb.0: # %entry
diff --git a/test/CodeGen/X86/vector-sext.ll b/test/CodeGen/X86/vector-sext.ll
index 42fd2d1..a5784f3 100644
--- a/test/CodeGen/X86/vector-sext.ll
+++ b/test/CodeGen/X86/vector-sext.ll
@@ -493,33 +493,25 @@
 ; SSE2-LABEL: sext_16i8_to_4i64:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    pcmpgtd %xmm2, %xmm1
-; SSE2-NEXT:    psrad $24, %xmm2
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; SSE2-NEXT:    pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; SSE2-NEXT:    psrad $24, %xmm1
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: sext_16i8_to_4i64:
 ; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSSE3-NEXT:    pxor %xmm3, %xmm3
-; SSSE3-NEXT:    pxor %xmm1, %xmm1
-; SSSE3-NEXT:    pcmpgtd %xmm2, %xmm1
-; SSSE3-NEXT:    psrad $24, %xmm2
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSSE3-NEXT:    pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm3
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; SSSE3-NEXT:    psrad $24, %xmm1
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSSE3-NEXT:    movdqa %xmm2, %xmm0
+; SSSE3-NEXT:    pxor %xmm2, %xmm2
+; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: sext_16i8_to_4i64:
@@ -551,17 +543,13 @@
 ; X32-SSE2-LABEL: sext_16i8_to_4i64:
 ; X32-SSE2:       # %bb.0: # %entry
 ; X32-SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; X32-SSE2-NEXT:    pxor %xmm3, %xmm3
-; X32-SSE2-NEXT:    pxor %xmm1, %xmm1
-; X32-SSE2-NEXT:    pcmpgtd %xmm2, %xmm1
-; X32-SSE2-NEXT:    psrad $24, %xmm2
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X32-SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; X32-SSE2-NEXT:    pcmpgtd %xmm1, %xmm3
+; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; X32-SSE2-NEXT:    psrad $24, %xmm1
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; X32-SSE2-NEXT:    movdqa %xmm2, %xmm0
+; X32-SSE2-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; X32-SSE2-NEXT:    movdqa %xmm1, %xmm0
+; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X32-SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; X32-SSE2-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: sext_16i8_to_4i64:
@@ -580,57 +568,41 @@
 define <8 x i64> @sext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: sext_16i8_to_8i64:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT:    pxor %xmm5, %xmm5
-; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    pcmpgtd %xmm4, %xmm1
-; SSE2-NEXT:    psrad $24, %xmm4
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    pcmpgtd %xmm2, %xmm1
-; SSE2-NEXT:    psrad $24, %xmm2
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; SSE2-NEXT:    pxor %xmm0, %xmm0
-; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; SSE2-NEXT:    psrad $24, %xmm1
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
-; SSE2-NEXT:    pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT:    pxor %xmm5, %xmm5
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
 ; SSE2-NEXT:    psrad $24, %xmm3
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT:    movdqa %xmm3, %xmm2
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
 ; SSE2-NEXT:    movdqa %xmm4, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: sext_16i8_to_8i64:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSSE3-NEXT:    pxor %xmm5, %xmm5
-; SSSE3-NEXT:    pxor %xmm1, %xmm1
-; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm1
-; SSSE3-NEXT:    psrad $24, %xmm4
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSSE3-NEXT:    pxor %xmm1, %xmm1
-; SSSE3-NEXT:    pcmpgtd %xmm2, %xmm1
-; SSSE3-NEXT:    psrad $24, %xmm2
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSSE3-NEXT:    pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; SSSE3-NEXT:    pxor %xmm0, %xmm0
-; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; SSSE3-NEXT:    psrad $24, %xmm1
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSSE3-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
-; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm5
+; SSSE3-NEXT:    pxor %xmm5, %xmm5
+; SSSE3-NEXT:    pxor %xmm2, %xmm2
+; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSSE3-NEXT:    movdqa %xmm1, %xmm4
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
 ; SSSE3-NEXT:    psrad $24, %xmm3
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm5
+; SSSE3-NEXT:    movdqa %xmm3, %xmm2
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
 ; SSSE3-NEXT:    movdqa %xmm4, %xmm0
 ; SSSE3-NEXT:    retq
 ;
@@ -676,29 +648,21 @@
 ;
 ; X32-SSE2-LABEL: sext_16i8_to_8i64:
 ; X32-SSE2:       # %bb.0: # %entry
-; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
 ; X32-SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; X32-SSE2-NEXT:    pxor %xmm5, %xmm5
-; X32-SSE2-NEXT:    pxor %xmm1, %xmm1
-; X32-SSE2-NEXT:    pcmpgtd %xmm4, %xmm1
-; X32-SSE2-NEXT:    psrad $24, %xmm4
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; X32-SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; X32-SSE2-NEXT:    pxor %xmm1, %xmm1
-; X32-SSE2-NEXT:    pcmpgtd %xmm2, %xmm1
-; X32-SSE2-NEXT:    psrad $24, %xmm2
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X32-SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; X32-SSE2-NEXT:    pxor %xmm0, %xmm0
-; X32-SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
+; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; X32-SSE2-NEXT:    psrad $24, %xmm1
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-SSE2-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
-; X32-SSE2-NEXT:    pcmpgtd %xmm3, %xmm5
+; X32-SSE2-NEXT:    pxor %xmm5, %xmm5
+; X32-SSE2-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; X32-SSE2-NEXT:    movdqa %xmm1, %xmm4
+; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; X32-SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; X32-SSE2-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
 ; X32-SSE2-NEXT:    psrad $24, %xmm3
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; X32-SSE2-NEXT:    pcmpgtd %xmm3, %xmm5
+; X32-SSE2-NEXT:    movdqa %xmm3, %xmm2
+; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
+; X32-SSE2-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
 ; X32-SSE2-NEXT:    movdqa %xmm4, %xmm0
 ; X32-SSE2-NEXT:    retl
 ;
@@ -974,32 +938,24 @@
 define <4 x i64> @sext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: sext_8i16_to_4i64:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    pcmpgtd %xmm2, %xmm1
-; SSE2-NEXT:    psrad $16, %xmm2
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; SSE2-NEXT:    pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; SSE2-NEXT:    psrad $16, %xmm1
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: sext_8i16_to_4i64:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSSE3-NEXT:    pxor %xmm3, %xmm3
-; SSSE3-NEXT:    pxor %xmm1, %xmm1
-; SSSE3-NEXT:    pcmpgtd %xmm2, %xmm1
-; SSSE3-NEXT:    psrad $16, %xmm2
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSSE3-NEXT:    pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm3
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; SSSE3-NEXT:    psrad $16, %xmm1
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSSE3-NEXT:    movdqa %xmm2, %xmm0
+; SSSE3-NEXT:    pxor %xmm2, %xmm2
+; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: sext_8i16_to_4i64:
@@ -1030,17 +986,13 @@
 ;
 ; X32-SSE2-LABEL: sext_8i16_to_4i64:
 ; X32-SSE2:       # %bb.0: # %entry
-; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; X32-SSE2-NEXT:    pxor %xmm3, %xmm3
-; X32-SSE2-NEXT:    pxor %xmm1, %xmm1
-; X32-SSE2-NEXT:    pcmpgtd %xmm2, %xmm1
-; X32-SSE2-NEXT:    psrad $16, %xmm2
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X32-SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; X32-SSE2-NEXT:    pcmpgtd %xmm1, %xmm3
+; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; X32-SSE2-NEXT:    psrad $16, %xmm1
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; X32-SSE2-NEXT:    movdqa %xmm2, %xmm0
+; X32-SSE2-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; X32-SSE2-NEXT:    movdqa %xmm1, %xmm0
+; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X32-SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; X32-SSE2-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: sext_8i16_to_4i64:
@@ -1059,53 +1011,39 @@
 define <8 x i64> @sext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: sext_8i16_to_8i64:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT:    pxor %xmm5, %xmm5
-; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    pcmpgtd %xmm4, %xmm1
-; SSE2-NEXT:    psrad $16, %xmm4
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; SSE2-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    pcmpgtd %xmm2, %xmm1
-; SSE2-NEXT:    psrad $16, %xmm2
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; SSE2-NEXT:    psrad $16, %xmm1
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE2-NEXT:    pshuflw {{.*#+}} xmm3 = xmm0[0,2,2,3,4,5,6,7]
-; SSE2-NEXT:    pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT:    pxor %xmm5, %xmm5
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
 ; SSE2-NEXT:    psrad $16, %xmm3
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT:    movdqa %xmm3, %xmm2
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
 ; SSE2-NEXT:    movdqa %xmm4, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: sext_8i16_to_8i64:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSSE3-NEXT:    pxor %xmm5, %xmm5
-; SSSE3-NEXT:    pxor %xmm1, %xmm1
-; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm1
-; SSSE3-NEXT:    psrad $16, %xmm4
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; SSSE3-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSSE3-NEXT:    pxor %xmm1, %xmm1
-; SSSE3-NEXT:    pcmpgtd %xmm2, %xmm1
-; SSSE3-NEXT:    psrad $16, %xmm2
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSSE3-NEXT:    pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; SSSE3-NEXT:    pxor %xmm3, %xmm3
-; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm3
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; SSSE3-NEXT:    psrad $16, %xmm1
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSSE3-NEXT:    pshuflw {{.*#+}} xmm3 = xmm0[0,2,2,3,4,5,6,7]
-; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm5
+; SSSE3-NEXT:    pxor %xmm5, %xmm5
+; SSSE3-NEXT:    pxor %xmm2, %xmm2
+; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSSE3-NEXT:    movdqa %xmm1, %xmm4
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
 ; SSSE3-NEXT:    psrad $16, %xmm3
-; SSSE3-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm5
+; SSSE3-NEXT:    movdqa %xmm3, %xmm2
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
 ; SSSE3-NEXT:    movdqa %xmm4, %xmm0
 ; SSSE3-NEXT:    retq
 ;
@@ -1150,27 +1088,20 @@
 ;
 ; X32-SSE2-LABEL: sext_8i16_to_8i64:
 ; X32-SSE2:       # %bb.0: # %entry
-; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; X32-SSE2-NEXT:    pxor %xmm5, %xmm5
-; X32-SSE2-NEXT:    pxor %xmm1, %xmm1
-; X32-SSE2-NEXT:    pcmpgtd %xmm4, %xmm1
-; X32-SSE2-NEXT:    psrad $16, %xmm4
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; X32-SSE2-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X32-SSE2-NEXT:    pxor %xmm1, %xmm1
-; X32-SSE2-NEXT:    pcmpgtd %xmm2, %xmm1
-; X32-SSE2-NEXT:    psrad $16, %xmm2
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X32-SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; X32-SSE2-NEXT:    pxor %xmm3, %xmm3
-; X32-SSE2-NEXT:    pcmpgtd %xmm1, %xmm3
+; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; X32-SSE2-NEXT:    psrad $16, %xmm1
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X32-SSE2-NEXT:    pshuflw {{.*#+}} xmm3 = xmm0[0,2,2,3,4,5,6,7]
-; X32-SSE2-NEXT:    pcmpgtd %xmm3, %xmm5
+; X32-SSE2-NEXT:    pxor %xmm5, %xmm5
+; X32-SSE2-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; X32-SSE2-NEXT:    movdqa %xmm1, %xmm4
+; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; X32-SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; X32-SSE2-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
 ; X32-SSE2-NEXT:    psrad $16, %xmm3
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; X32-SSE2-NEXT:    pcmpgtd %xmm3, %xmm5
+; X32-SSE2-NEXT:    movdqa %xmm3, %xmm2
+; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
+; X32-SSE2-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
 ; X32-SSE2-NEXT:    movdqa %xmm4, %xmm0
 ; X32-SSE2-NEXT:    retl
 ;
@@ -1457,23 +1388,13 @@
 ; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; AVX2-NEXT:    retq
 ;
-; AVX512F-LABEL: load_sext_2i1_to_2i64:
-; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    movzbl (%rdi), %eax
-; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512F-NEXT:    vzeroupper
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: load_sext_2i1_to_2i64:
-; AVX512BW:       # %bb.0: # %entry
-; AVX512BW-NEXT:    movzbl (%rdi), %eax
-; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512BW-NEXT:    vzeroupper
-; AVX512BW-NEXT:    retq
+; AVX512-LABEL: load_sext_2i1_to_2i64:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    kmovw (%rdi), %k1
+; AVX512-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
 ;
 ; X32-SSE2-LABEL: load_sext_2i1_to_2i64:
 ; X32-SSE2:       # %bb.0: # %entry
@@ -1677,23 +1598,13 @@
 ; AVX2-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512F-LABEL: load_sext_4i1_to_4i32:
-; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    movzbl (%rdi), %eax
-; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512F-NEXT:    vzeroupper
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: load_sext_4i1_to_4i32:
-; AVX512BW:       # %bb.0: # %entry
-; AVX512BW-NEXT:    movzbl (%rdi), %eax
-; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512BW-NEXT:    vzeroupper
-; AVX512BW-NEXT:    retq
+; AVX512-LABEL: load_sext_4i1_to_4i32:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    kmovw (%rdi), %k1
+; AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
 ;
 ; X32-SSE2-LABEL: load_sext_4i1_to_4i32:
 ; X32-SSE2:       # %bb.0: # %entry
@@ -1915,21 +1826,12 @@
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512F-LABEL: load_sext_4i1_to_4i64:
-; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    movzbl (%rdi), %eax
-; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: load_sext_4i1_to_4i64:
-; AVX512BW:       # %bb.0: # %entry
-; AVX512BW-NEXT:    movzbl (%rdi), %eax
-; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512BW-NEXT:    retq
+; AVX512-LABEL: load_sext_4i1_to_4i64:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    kmovw (%rdi), %k1
+; AVX512-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512-NEXT:    retq
 ;
 ; X32-SSE2-LABEL: load_sext_4i1_to_4i64:
 ; X32-SSE2:       # %bb.0: # %entry
@@ -1990,30 +1892,28 @@
 define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) {
 ; SSE2-LABEL: load_sext_4i8_to_4i64:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    movsbq 1(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    movsbq (%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm0
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT:    movsbq 3(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm2
-; SSE2-NEXT:    movsbq 2(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT:    psrad $24, %xmm1
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_sext_4i8_to_4i64:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    movsbq 1(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm1
-; SSSE3-NEXT:    movsbq (%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm0
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSSE3-NEXT:    movsbq 3(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm2
-; SSSE3-NEXT:    movsbq 2(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm1
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSSE3-NEXT:    psrad $24, %xmm1
+; SSSE3-NEXT:    pxor %xmm2, %xmm2
+; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_sext_4i8_to_4i64:
@@ -2024,10 +1924,8 @@
 ;
 ; AVX1-LABEL: load_sext_4i8_to_4i64:
 ; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    vpmovsxbd (%rdi), %xmm0
-; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT:    vpmovsxbq 2(%rdi), %xmm0
+; AVX1-NEXT:    vpmovsxbq (%rdi), %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    retq
 ;
@@ -2044,28 +1942,15 @@
 ; X32-SSE2-LABEL: load_sext_4i8_to_4i64:
 ; X32-SSE2:       # %bb.0: # %entry
 ; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT:    movsbl 1(%eax), %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm1
-; X32-SSE2-NEXT:    sarl $31, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm0
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-SSE2-NEXT:    movsbl (%eax), %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm0
-; X32-SSE2-NEXT:    sarl $31, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm2
+; X32-SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X32-SSE2-NEXT:    psrad $24, %xmm1
+; X32-SSE2-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; X32-SSE2-NEXT:    movdqa %xmm1, %xmm0
 ; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-SSE2-NEXT:    movsbl 3(%eax), %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm2
-; X32-SSE2-NEXT:    sarl $31, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm1
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X32-SSE2-NEXT:    movsbl 2(%eax), %eax
-; X32-SSE2-NEXT:    movd %eax, %xmm1
-; X32-SSE2-NEXT:    sarl $31, %eax
-; X32-SSE2-NEXT:    movd %eax, %xmm3
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X32-SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; X32-SSE2-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: load_sext_4i8_to_4i64:
@@ -2083,20 +1968,24 @@
 define <2 x i64> @load_sext_4i8_to_4i64_extract(<4 x i8> *%ptr) {
 ; SSE2-LABEL: load_sext_4i8_to_4i64_extract:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movsbq 3(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    movsbq 2(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm0
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT:    psrad $24, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_sext_4i8_to_4i64_extract:
 ; SSSE3:       # %bb.0:
-; SSSE3-NEXT:    movsbq 3(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm1
-; SSSE3-NEXT:    movsbq 2(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm0
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSSE3-NEXT:    psrad $24, %xmm0
+; SSSE3-NEXT:    pxor %xmm1, %xmm1
+; SSSE3-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_sext_4i8_to_4i64_extract:
@@ -2106,9 +1995,7 @@
 ;
 ; AVX1-LABEL: load_sext_4i8_to_4i64_extract:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpmovsxbd (%rdi), %xmm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT:    vpmovsxbq 2(%rdi), %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_sext_4i8_to_4i64_extract:
@@ -2128,17 +2015,13 @@
 ; X32-SSE2-LABEL: load_sext_4i8_to_4i64_extract:
 ; X32-SSE2:       # %bb.0:
 ; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT:    movsbl 3(%eax), %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm1
-; X32-SSE2-NEXT:    sarl $31, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm0
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-SSE2-NEXT:    movsbl 2(%eax), %eax
-; X32-SSE2-NEXT:    movd %eax, %xmm0
-; X32-SSE2-NEXT:    sarl $31, %eax
-; X32-SSE2-NEXT:    movd %eax, %xmm2
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X32-SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; X32-SSE2-NEXT:    psrad $24, %xmm0
+; X32-SSE2-NEXT:    pxor %xmm1, %xmm1
+; X32-SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
+; X32-SSE2-NEXT:    punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; X32-SSE2-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: load_sext_4i8_to_4i64_extract:
@@ -2344,8 +2227,7 @@
 ;
 ; AVX512F-LABEL: load_sext_8i1_to_8i16:
 ; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    movzbl (%rdi), %eax
-; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    kmovw (%rdi), %k1
 ; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
@@ -2354,8 +2236,7 @@
 ;
 ; AVX512BW-LABEL: load_sext_8i1_to_8i16:
 ; AVX512BW:       # %bb.0: # %entry
-; AVX512BW-NEXT:    movzbl (%rdi), %eax
-; AVX512BW-NEXT:    kmovd %eax, %k0
+; AVX512BW-NEXT:    kmovw (%rdi), %k0
 ; AVX512BW-NEXT:    vpmovm2w %k0, %zmm0
 ; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
 ; AVX512BW-NEXT:    vzeroupper
@@ -2492,50 +2373,42 @@
 define <8 x i64> @load_sext_8i8_to_8i64(<8 x i8> *%ptr) {
 ; SSE2-LABEL: load_sext_8i8_to_8i64:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    movsbq 1(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    movsbq (%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm0
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT:    movsbq 3(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm2
-; SSE2-NEXT:    movsbq 2(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE2-NEXT:    movsbq 5(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm3
-; SSE2-NEXT:    movsbq 4(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm2
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSE2-NEXT:    movsbq 7(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm4
-; SSE2-NEXT:    movsbq 6(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm3
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; SSE2-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT:    psrad $24, %xmm1
+; SSE2-NEXT:    pxor %xmm4, %xmm4
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT:    psrad $24, %xmm3
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT:    movdqa %xmm3, %xmm2
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_sext_8i8_to_8i64:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    movsbq 1(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm1
-; SSSE3-NEXT:    movsbq (%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm0
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSSE3-NEXT:    movsbq 3(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm2
-; SSSE3-NEXT:    movsbq 2(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm1
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSSE3-NEXT:    movsbq 5(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm3
-; SSSE3-NEXT:    movsbq 4(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm2
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSSE3-NEXT:    movsbq 7(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm4
-; SSSE3-NEXT:    movsbq 6(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm3
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; SSSE3-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT:    psrad $24, %xmm1
+; SSSE3-NEXT:    pxor %xmm4, %xmm4
+; SSSE3-NEXT:    pxor %xmm3, %xmm3
+; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm3
+; SSSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSSE3-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSSE3-NEXT:    psrad $24, %xmm3
+; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSSE3-NEXT:    movdqa %xmm3, %xmm2
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_sext_8i8_to_8i64:
@@ -2548,15 +2421,11 @@
 ;
 ; AVX1-LABEL: load_sext_8i8_to_8i64:
 ; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    vpmovsxbd (%rdi), %xmm0
-; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT:    vpmovsxbd 4(%rdi), %xmm1
-; AVX1-NEXT:    vpmovsxdq %xmm1, %xmm2
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX1-NEXT:    vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT:    vpmovsxbq 6(%rdi), %xmm1
+; AVX1-NEXT:    vpmovsxbq 4(%rdi), %xmm2
+; AVX1-NEXT:    vpmovsxbq 2(%rdi), %xmm0
+; AVX1-NEXT:    vpmovsxbq (%rdi), %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; AVX1-NEXT:    retq
 ;
@@ -2574,50 +2443,22 @@
 ; X32-SSE2-LABEL: load_sext_8i8_to_8i64:
 ; X32-SSE2:       # %bb.0: # %entry
 ; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT:    movsbl 1(%eax), %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm1
-; X32-SSE2-NEXT:    sarl $31, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm0
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-SSE2-NEXT:    movsbl (%eax), %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm0
-; X32-SSE2-NEXT:    sarl $31, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm2
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-SSE2-NEXT:    movsbl 3(%eax), %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm2
-; X32-SSE2-NEXT:    sarl $31, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm1
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X32-SSE2-NEXT:    movsbl 2(%eax), %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm1
-; X32-SSE2-NEXT:    sarl $31, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm3
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X32-SSE2-NEXT:    movsbl 5(%eax), %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm3
-; X32-SSE2-NEXT:    sarl $31, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm2
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; X32-SSE2-NEXT:    movsbl 4(%eax), %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm2
-; X32-SSE2-NEXT:    sarl $31, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm4
+; X32-SSE2-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
+; X32-SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; X32-SSE2-NEXT:    psrad $24, %xmm1
+; X32-SSE2-NEXT:    pxor %xmm4, %xmm4
+; X32-SSE2-NEXT:    pxor %xmm3, %xmm3
+; X32-SSE2-NEXT:    pcmpgtd %xmm1, %xmm3
+; X32-SSE2-NEXT:    movdqa %xmm1, %xmm0
+; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; X32-SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; X32-SSE2-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; X32-SSE2-NEXT:    psrad $24, %xmm3
+; X32-SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
+; X32-SSE2-NEXT:    movdqa %xmm3, %xmm2
 ; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; X32-SSE2-NEXT:    movsbl 7(%eax), %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm4
-; X32-SSE2-NEXT:    sarl $31, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm3
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; X32-SSE2-NEXT:    movsbl 6(%eax), %eax
-; X32-SSE2-NEXT:    movd %eax, %xmm3
-; X32-SSE2-NEXT:    sarl $31, %eax
-; X32-SSE2-NEXT:    movd %eax, %xmm5
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; X32-SSE2-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
 ; X32-SSE2-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: load_sext_8i8_to_8i64:
@@ -2839,21 +2680,12 @@
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512F-LABEL: load_sext_8i1_to_8i32:
-; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    movzbl (%rdi), %eax
-; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: load_sext_8i1_to_8i32:
-; AVX512BW:       # %bb.0: # %entry
-; AVX512BW-NEXT:    movzbl (%rdi), %eax
-; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512BW-NEXT:    retq
+; AVX512-LABEL: load_sext_8i1_to_8i32:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    kmovw (%rdi), %k1
+; AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512-NEXT:    retq
 ;
 ; X32-SSE2-LABEL: load_sext_8i1_to_8i32:
 ; X32-SSE2:       # %bb.0: # %entry
@@ -2951,25 +2783,21 @@
 define <8 x i32> @load_sext_8i8_to_8i32(<8 x i8> *%ptr) {
 ; SSE2-LABEL: load_sext_8i8_to_8i32:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT:    psrad $24, %xmm0
-; SSE2-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT:    psrad $24, %xmm0
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
 ; SSE2-NEXT:    psrad $24, %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_sext_8i8_to_8i32:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSSE3-NEXT:    psrad $24, %xmm0
-; SSSE3-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT:    psrad $24, %xmm0
+; SSSE3-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
 ; SSSE3-NEXT:    psrad $24, %xmm1
 ; SSSE3-NEXT:    retq
 ;
@@ -2981,10 +2809,8 @@
 ;
 ; AVX1-LABEL: load_sext_8i8_to_8i32:
 ; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    vpmovsxbw (%rdi), %xmm0
-; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT:    vpmovsxbd 4(%rdi), %xmm0
+; AVX1-NEXT:    vpmovsxbd (%rdi), %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    retq
 ;
@@ -3001,13 +2827,11 @@
 ; X32-SSE2-LABEL: load_sext_8i8_to_8i32:
 ; X32-SSE2:       # %bb.0: # %entry
 ; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; X32-SSE2-NEXT:    psrad $24, %xmm0
-; X32-SSE2-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; X32-SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
+; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X32-SSE2-NEXT:    psrad $24, %xmm0
+; X32-SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
 ; X32-SSE2-NEXT:    psrad $24, %xmm1
 ; X32-SSE2-NEXT:    retl
 ;
@@ -5296,21 +5120,19 @@
 define <16 x i16> @load_sext_16i8_to_16i16(<16 x i8> *%ptr) {
 ; SSE2-LABEL: load_sext_16i8_to_16i16:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    movdqa (%rdi), %xmm1
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; SSE2-NEXT:    psraw $8, %xmm0
-; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; SSE2-NEXT:    psraw $8, %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_sext_16i8_to_16i16:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSSE3-NEXT:    movdqa (%rdi), %xmm1
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; SSSE3-NEXT:    psraw $8, %xmm0
-; SSSE3-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSSE3-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; SSSE3-NEXT:    psraw $8, %xmm1
 ; SSSE3-NEXT:    retq
 ;
@@ -5340,11 +5162,10 @@
 ; X32-SSE2-LABEL: load_sext_16i8_to_16i16:
 ; X32-SSE2:       # %bb.0: # %entry
 ; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE2-NEXT:    movdqa (%eax), %xmm1
+; X32-SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; X32-SSE2-NEXT:    psraw $8, %xmm0
-; X32-SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE2-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; X32-SSE2-NEXT:    psraw $8, %xmm1
 ; X32-SSE2-NEXT:    retl
 ;
@@ -5460,30 +5281,26 @@
 define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) {
 ; SSE2-LABEL: load_sext_4i16_to_4i64:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    movswq 2(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    movswq (%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm0
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT:    movswq 6(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm2
-; SSE2-NEXT:    movswq 4(%rdi), %rax
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT:    psrad $16, %xmm1
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_sext_4i16_to_4i64:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    movswq 2(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm1
-; SSSE3-NEXT:    movswq (%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm0
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSSE3-NEXT:    movswq 6(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm2
-; SSSE3-NEXT:    movswq 4(%rdi), %rax
-; SSSE3-NEXT:    movq %rax, %xmm1
-; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSSE3-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSSE3-NEXT:    psrad $16, %xmm1
+; SSSE3-NEXT:    pxor %xmm2, %xmm2
+; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSSE3-NEXT:    movdqa %xmm1, %xmm0
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_sext_4i16_to_4i64:
@@ -5494,10 +5311,8 @@
 ;
 ; AVX1-LABEL: load_sext_4i16_to_4i64:
 ; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    vpmovsxwd (%rdi), %xmm0
-; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT:    vpmovsxwq 4(%rdi), %xmm0
+; AVX1-NEXT:    vpmovsxwq (%rdi), %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    retq
 ;
@@ -5514,28 +5329,14 @@
 ; X32-SSE2-LABEL: load_sext_4i16_to_4i64:
 ; X32-SSE2:       # %bb.0: # %entry
 ; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT:    movswl 2(%eax), %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm1
-; X32-SSE2-NEXT:    sarl $31, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm0
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-SSE2-NEXT:    movswl (%eax), %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm0
-; X32-SSE2-NEXT:    sarl $31, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm2
+; X32-SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X32-SSE2-NEXT:    psrad $16, %xmm1
+; X32-SSE2-NEXT:    pxor %xmm2, %xmm2
+; X32-SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; X32-SSE2-NEXT:    movdqa %xmm1, %xmm0
 ; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-SSE2-NEXT:    movswl 6(%eax), %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm2
-; X32-SSE2-NEXT:    sarl $31, %ecx
-; X32-SSE2-NEXT:    movd %ecx, %xmm1
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X32-SSE2-NEXT:    movswl 4(%eax), %eax
-; X32-SSE2-NEXT:    movd %eax, %xmm1
-; X32-SSE2-NEXT:    sarl $31, %eax
-; X32-SSE2-NEXT:    movd %eax, %xmm3
-; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; X32-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X32-SSE2-NEXT:    punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; X32-SSE2-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: load_sext_4i16_to_4i64:
@@ -5553,21 +5354,19 @@
 define <8 x i32> @load_sext_8i16_to_8i32(<8 x i16> *%ptr) {
 ; SSE2-LABEL: load_sext_8i16_to_8i32:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT:    movdqa (%rdi), %xmm1
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSE2-NEXT:    psrad $16, %xmm0
-; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
 ; SSE2-NEXT:    psrad $16, %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_sext_8i16_to_8i32:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSSE3-NEXT:    movdqa (%rdi), %xmm1
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSSE3-NEXT:    psrad $16, %xmm0
-; SSSE3-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
+; SSSE3-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
 ; SSSE3-NEXT:    psrad $16, %xmm1
 ; SSSE3-NEXT:    retq
 ;
@@ -5597,11 +5396,10 @@
 ; X32-SSE2-LABEL: load_sext_8i16_to_8i32:
 ; X32-SSE2:       # %bb.0: # %entry
 ; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; X32-SSE2-NEXT:    movdqa (%eax), %xmm1
+; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; X32-SSE2-NEXT:    psrad $16, %xmm0
-; X32-SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
+; X32-SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
 ; X32-SSE2-NEXT:    psrad $16, %xmm1
 ; X32-SSE2-NEXT:    retl
 ;
diff --git a/test/CodeGen/X86/vector-shift-ashr-sub128-widen.ll b/test/CodeGen/X86/vector-shift-ashr-sub128-widen.ll
index e9bccd2..e910c9c 100644
--- a/test/CodeGen/X86/vector-shift-ashr-sub128-widen.ll
+++ b/test/CodeGen/X86/vector-shift-ashr-sub128-widen.ll
@@ -1975,24 +1975,20 @@
 define <8 x i8> @constant_shift_v8i8(<8 x i8> %a) nounwind {
 ; SSE-LABEL: constant_shift_v8i8:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa %xmm0, %xmm1
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE-NEXT:    psraw $8, %xmm1
-; SSE-NEXT:    psllw $8, %xmm1
-; SSE-NEXT:    psrlw $8, %xmm1
+; SSE-NEXT:    pxor %xmm1, %xmm1
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
 ; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE-NEXT:    psraw $8, %xmm0
 ; SSE-NEXT:    pmullw {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    psrlw $8, %xmm0
-; SSE-NEXT:    packuswb %xmm1, %xmm0
+; SSE-NEXT:    packuswb %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: constant_shift_v8i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT:    vpsraw $8, %xmm1, %xmm1
-; AVX1-NEXT:    vpsllw $8, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $8, %xmm1, %xmm1
+; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
 ; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; AVX1-NEXT:    vpsraw $8, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
@@ -2051,16 +2047,14 @@
 ;
 ; X32-SSE-LABEL: constant_shift_v8i8:
 ; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
-; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; X32-SSE-NEXT:    psraw $8, %xmm1
-; X32-SSE-NEXT:    psllw $8, %xmm1
-; X32-SSE-NEXT:    psrlw $8, %xmm1
+; X32-SSE-NEXT:    pxor %xmm1, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
 ; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; X32-SSE-NEXT:    psraw $8, %xmm0
 ; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm0
 ; X32-SSE-NEXT:    psrlw $8, %xmm0
-; X32-SSE-NEXT:    packuswb %xmm1, %xmm0
+; X32-SSE-NEXT:    packuswb %xmm2, %xmm0
 ; X32-SSE-NEXT:    retl
   %shift = ashr <8 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
   ret <8 x i8> %shift
@@ -2069,24 +2063,20 @@
 define <4 x i8> @constant_shift_v4i8(<4 x i8> %a) nounwind {
 ; SSE-LABEL: constant_shift_v4i8:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa %xmm0, %xmm1
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE-NEXT:    psraw $8, %xmm1
-; SSE-NEXT:    psllw $8, %xmm1
-; SSE-NEXT:    psrlw $8, %xmm1
+; SSE-NEXT:    pxor %xmm1, %xmm1
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
 ; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE-NEXT:    psraw $8, %xmm0
 ; SSE-NEXT:    pmullw {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    psrlw $8, %xmm0
-; SSE-NEXT:    packuswb %xmm1, %xmm0
+; SSE-NEXT:    packuswb %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: constant_shift_v4i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT:    vpsraw $8, %xmm1, %xmm1
-; AVX1-NEXT:    vpsllw $8, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $8, %xmm1, %xmm1
+; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
 ; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; AVX1-NEXT:    vpsraw $8, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
@@ -2145,16 +2135,14 @@
 ;
 ; X32-SSE-LABEL: constant_shift_v4i8:
 ; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
-; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; X32-SSE-NEXT:    psraw $8, %xmm1
-; X32-SSE-NEXT:    psllw $8, %xmm1
-; X32-SSE-NEXT:    psrlw $8, %xmm1
+; X32-SSE-NEXT:    pxor %xmm1, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
 ; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; X32-SSE-NEXT:    psraw $8, %xmm0
 ; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm0
 ; X32-SSE-NEXT:    psrlw $8, %xmm0
-; X32-SSE-NEXT:    packuswb %xmm1, %xmm0
+; X32-SSE-NEXT:    packuswb %xmm2, %xmm0
 ; X32-SSE-NEXT:    retl
   %shift = ashr <4 x i8> %a, <i8 0, i8 1, i8 2, i8 3>
   ret <4 x i8> %shift
@@ -2163,24 +2151,20 @@
 define <2 x i8> @constant_shift_v2i8(<2 x i8> %a) nounwind {
 ; SSE-LABEL: constant_shift_v2i8:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa %xmm0, %xmm1
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE-NEXT:    psraw $8, %xmm1
-; SSE-NEXT:    psllw $8, %xmm1
-; SSE-NEXT:    psrlw $8, %xmm1
+; SSE-NEXT:    pxor %xmm1, %xmm1
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
 ; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE-NEXT:    psraw $8, %xmm0
 ; SSE-NEXT:    pmullw {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    psrlw $8, %xmm0
-; SSE-NEXT:    packuswb %xmm1, %xmm0
+; SSE-NEXT:    packuswb %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: constant_shift_v2i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT:    vpsraw $8, %xmm1, %xmm1
-; AVX1-NEXT:    vpsllw $8, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $8, %xmm1, %xmm1
+; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
 ; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; AVX1-NEXT:    vpsraw $8, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
@@ -2239,16 +2223,14 @@
 ;
 ; X32-SSE-LABEL: constant_shift_v2i8:
 ; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
-; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; X32-SSE-NEXT:    psraw $8, %xmm1
-; X32-SSE-NEXT:    psllw $8, %xmm1
-; X32-SSE-NEXT:    psrlw $8, %xmm1
+; X32-SSE-NEXT:    pxor %xmm1, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
+; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
 ; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; X32-SSE-NEXT:    psraw $8, %xmm0
 ; X32-SSE-NEXT:    pmullw {{\.LCPI.*}}, %xmm0
 ; X32-SSE-NEXT:    psrlw $8, %xmm0
-; X32-SSE-NEXT:    packuswb %xmm1, %xmm0
+; X32-SSE-NEXT:    packuswb %xmm2, %xmm0
 ; X32-SSE-NEXT:    retl
   %shift = ashr <2 x i8> %a, <i8 2, i8 3>
   ret <2 x i8> %shift
diff --git a/test/CodeGen/X86/vector-shuffle-128-v8.ll b/test/CodeGen/X86/vector-shuffle-128-v8.ll
index 316b015..c5224bb 100644
--- a/test/CodeGen/X86/vector-shuffle-128-v8.ll
+++ b/test/CodeGen/X86/vector-shuffle-128-v8.ll
@@ -2475,6 +2475,66 @@
   ret <8 x i16> %shuffle
 }
 
+; PR40306
+define <8 x i16> @shuffle_v8i16_9zzzuuuu(<8 x i16> %x) {
+; SSE2-LABEL: shuffle_v8i16_9zzzuuuu:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    psrld $16, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: shuffle_v8i16_9zzzuuuu:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[2,3],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: shuffle_v8i16_9zzzuuuu:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    psrld $16, %xmm0
+; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: shuffle_v8i16_9zzzuuuu:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrld $16, %xmm0, %xmm0
+; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX-NEXT:    retq
+  %r = shufflevector <8 x i16> zeroinitializer, <8 x i16> %x, <8 x i32> <i32 9, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+  ret <8 x i16> %r
+}
+
+; PR40318
+define <8 x i16> @shuffle_v8i16_2zzzuuuu(<8 x i16> %x) {
+; SSE2-LABEL: shuffle_v8i16_2zzzuuuu:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: shuffle_v8i16_2zzzuuuu:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[4,5],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: shuffle_v8i16_2zzzuuuu:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: shuffle_v8i16_2zzzuuuu:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX-NEXT:    retq
+  %r = shufflevector <8 x i16> %x, <8 x i16> zeroinitializer, <8 x i32> <i32 2, i32 9, i32 10, i32 11, i32 undef, i32 undef, i32 undef, i32 undef>
+  ret <8 x i16> %r
+}
+
 define <8 x i16> @mask_v8i16_012345ef(<8 x i16> %a, <8 x i16> %b) {
 ; SSE2-LABEL: mask_v8i16_012345ef:
 ; SSE2:       # %bb.0:
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/test/CodeGen/X86/vector-shuffle-combining-avx.ll
index 03fe8c4..5651174 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX --check-prefix=X32-AVX1
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX --check-prefix=X32-AVX2
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX512
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX512
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX2,X86-AVX2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86,AVX512,X86-AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX2,X64-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X64,AVX512,X64-AVX512
 ;
 ; Combine tests involving AVX target shuffles
 
@@ -23,37 +23,28 @@
 declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>, i8)
 
 define <4 x float> @combine_vpermilvar_4f32_identity(<4 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_4f32_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_4f32_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_4f32_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
   %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>  %1, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
   ret <4 x float> %2
 }
 
 define <4 x float> @combine_vpermilvar_4f32_movddup(<4 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_4f32_movddup:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_4f32_movddup:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_4f32_movddup:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 1, i32 0, i32 1>)
   ret <4 x float> %1
 }
 define <4 x float> @combine_vpermilvar_4f32_movddup_load(<4 x float> *%a0) {
-; X32-LABEL: combine_vpermilvar_4f32_movddup_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermilvar_4f32_movddup_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermilvar_4f32_movddup_load:
 ; X64:       # %bb.0:
@@ -65,119 +56,75 @@
 }
 
 define <4 x float> @combine_vpermilvar_4f32_movshdup(<4 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_4f32_movshdup:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_4f32_movshdup:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_4f32_movshdup:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 undef, i32 1, i32 3, i32 3>)
   ret <4 x float> %1
 }
 
 define <4 x float> @combine_vpermilvar_4f32_movsldup(<4 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_4f32_movsldup:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_4f32_movsldup:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_4f32_movsldup:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 2, i32 undef>)
   ret <4 x float> %1
 }
 
 define <4 x float> @combine_vpermilvar_4f32_unpckh(<4 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_4f32_unpckh:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_4f32_unpckh:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_4f32_unpckh:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 2, i32 2, i32 3, i32 3>)
   ret <4 x float> %1
 }
 
 define <4 x float> @combine_vpermilvar_4f32_unpckl(<4 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_4f32_unpckl:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_4f32_unpckl:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_4f32_unpckl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 1, i32 1>)
   ret <4 x float> %1
 }
 
 define <8 x float> @combine_vpermilvar_8f32_identity(<8 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_8f32_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_8f32_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_8f32_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 undef>)
   %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>  %1, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 1>)
   ret <8 x float> %2
 }
 
 define <8 x float> @combine_vpermilvar_8f32_10326u4u(<8 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_8f32_10326u4u:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_8f32_10326u4u:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_8f32_10326u4u:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 0, i32 1, i32 2, i32 undef>)
   %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>  %1, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 undef>)
   ret <8 x float> %2
 }
 
 define <8 x float> @combine_vpermilvar_vperm2f128_8f32(<8 x float> %a0) {
-; X32-AVX1-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X32-AVX1:       # %bb.0:
-; X32-AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; X32-AVX1-NEXT:    retl
+; AVX1-LABEL: combine_vpermilvar_vperm2f128_8f32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX1-NEXT:    ret{{[l|q]}}
 ;
-; X32-AVX2-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X32-AVX2:       # %bb.0:
-; X32-AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; X32-AVX2-NEXT:    retl
+; AVX2-LABEL: combine_vpermilvar_vperm2f128_8f32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX2-NEXT:    ret{{[l|q]}}
 ;
-; X32-AVX512-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X32-AVX512:       # %bb.0:
-; X32-AVX512-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; X32-AVX512-NEXT:    retl
-;
-; X64-AVX1-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; X64-AVX1-NEXT:    retq
-;
-; X64-AVX2-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; X64-AVX2-NEXT:    retq
-;
-; X64-AVX512-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; X64-AVX512-NEXT:    retq
+; AVX512-LABEL: combine_vpermilvar_vperm2f128_8f32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX512-NEXT:    ret{{[l|q]}}
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
   %2 = shufflevector <8 x float> %1, <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
   %3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>  %2, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
@@ -185,15 +132,10 @@
 }
 
 define <8 x float> @combine_vpermilvar_vperm2f128_zero_8f32(<8 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
-; X32:       # %bb.0:
-; X32-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
-; X64:       # %bb.0:
-; X64-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
   %2 = shufflevector <8 x float> %1, <8 x float> zeroinitializer, <8 x i32> <i32 8, i32 8, i32 8, i32 8, i32 0, i32 1, i32 2, i32 3>
   %3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>  %2, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
@@ -201,19 +143,12 @@
 }
 
 define <4 x double> @combine_vperm2f128_vpermilvar_as_vpblendpd(<4 x double> %a0) {
-; X32-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
-; X32-NEXT:    vmovapd %xmm0, %xmm0
-; X32-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
-; X64-NEXT:    vmovapd %xmm0, %xmm0
-; X64-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; CHECK-NEXT:    vmovapd %xmm0, %xmm0
+; CHECK-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
   %2 = shufflevector <4 x double> %1, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
   %3 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %2, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
@@ -221,24 +156,19 @@
 }
 
 define <8 x float> @combine_vpermilvar_8f32_movddup(<8 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_8f32_movddup:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_8f32_movddup:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_8f32_movddup:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>)
   ret <8 x float> %1
 }
 define <8 x float> @combine_vpermilvar_8f32_movddup_load(<8 x float> *%a0) {
-; X32-LABEL: combine_vpermilvar_8f32_movddup_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermilvar_8f32_movddup_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermilvar_8f32_movddup_load:
 ; X64:       # %bb.0:
@@ -250,97 +180,64 @@
 }
 
 define <8 x float> @combine_vpermilvar_8f32_movshdup(<8 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_8f32_movshdup:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_8f32_movshdup:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_8f32_movshdup:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 5, i32 7, i32 7>)
   ret <8 x float> %1
 }
 
 define <8 x float> @combine_vpermilvar_8f32_movsldup(<8 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_8f32_movsldup:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_8f32_movsldup:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_8f32_movsldup:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>)
   ret <8 x float> %1
 }
 
 define <2 x double> @combine_vpermilvar_2f64_identity(<2 x double> %a0) {
-; X32-LABEL: combine_vpermilvar_2f64_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_2f64_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_2f64_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 2, i64 0>)
   %2 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>  %1, <2 x i64> <i64 2, i64 0>)
   ret <2 x double> %2
 }
 
 define <2 x double> @combine_vpermilvar_2f64_movddup(<2 x double> %a0) {
-; X32-LABEL: combine_vpermilvar_2f64_movddup:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_2f64_movddup:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_2f64_movddup:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 0, i64 0>)
   ret <2 x double> %1
 }
 
 define <4 x double> @combine_vpermilvar_4f64_identity(<4 x double> %a0) {
-; X32-LABEL: combine_vpermilvar_4f64_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_4f64_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_4f64_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
   %2 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>  %1, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
   ret <4 x double> %2
 }
 
 define <4 x double> @combine_vpermilvar_4f64_movddup(<4 x double> %a0) {
-; X32-LABEL: combine_vpermilvar_4f64_movddup:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_4f64_movddup:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_4f64_movddup:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 0, i64 0, i64 4, i64 4>)
   ret <4 x double> %1
 }
 
 define <4 x float> @combine_vpermilvar_4f32_4stage(<4 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_4f32_4stage:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_4f32_4stage:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_4f32_4stage:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
   %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>  %1, <4 x i32> <i32 2, i32 3, i32 0, i32 1>)
   %3 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>  %2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>)
@@ -349,15 +246,10 @@
 }
 
 define <8 x float> @combine_vpermilvar_8f32_4stage(<8 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_8f32_4stage:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_8f32_4stage:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_8f32_4stage:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
   %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>  %1, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>)
   %3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>  %2, <8 x i32> <i32 0, i32 2, i32 1, i32 3, i32 0, i32 2, i32 1, i32 3>)
@@ -366,119 +258,94 @@
 }
 
 define <4 x float> @combine_vpermilvar_4f32_as_insertps(<4 x float> %a0) {
-; X32-LABEL: combine_vpermilvar_4f32_as_insertps:
-; X32:       # %bb.0:
-; X32-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_4f32_as_insertps:
-; X64:       # %bb.0:
-; X64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_4f32_as_insertps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
   %2 = shufflevector <4 x float> %1, <4 x float> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 1, i32 4>
   ret <4 x float> %2
 }
 
 define <2 x double> @constant_fold_vpermilvar_pd() {
-; X32-LABEL: constant_fold_vpermilvar_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [2.0E+0,1.0E+0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: constant_fold_vpermilvar_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [2.0E+0,1.0E+0]
-; X64-NEXT:    retq
+; CHECK-LABEL: constant_fold_vpermilvar_pd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps {{.*#+}} xmm0 = [2.0E+0,1.0E+0]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> <double 1.0, double 2.0>, <2 x i64> <i64 2, i64 0>)
   ret <2 x double> %1
 }
 
 define <4 x double> @constant_fold_vpermilvar_pd_256() {
-; X32-LABEL: constant_fold_vpermilvar_pd_256:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [2.0E+0,1.0E+0,3.0E+0,4.0E+0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: constant_fold_vpermilvar_pd_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [2.0E+0,1.0E+0,3.0E+0,4.0E+0]
-; X64-NEXT:    retq
+; CHECK-LABEL: constant_fold_vpermilvar_pd_256:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps {{.*#+}} ymm0 = [2.0E+0,1.0E+0,3.0E+0,4.0E+0]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> <double 1.0, double 2.0, double 3.0, double 4.0>, <4 x i64> <i64 2, i64 0, i64 0, i64 2>)
   ret <4 x double> %1
 }
 
 define <4 x float> @constant_fold_vpermilvar_ps() {
-; X32-LABEL: constant_fold_vpermilvar_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [4.0E+0,1.0E+0,3.0E+0,2.0E+0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: constant_fold_vpermilvar_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [4.0E+0,1.0E+0,3.0E+0,2.0E+0]
-; X64-NEXT:    retq
+; CHECK-LABEL: constant_fold_vpermilvar_ps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps {{.*#+}} xmm0 = [4.0E+0,1.0E+0,3.0E+0,2.0E+0]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, <4 x i32> <i32 3, i32 0, i32 2, i32 1>)
   ret <4 x float> %1
 }
 
 define <8 x float> @constant_fold_vpermilvar_ps_256() {
-; X32-LABEL: constant_fold_vpermilvar_ps_256:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [1.0E+0,1.0E+0,3.0E+0,2.0E+0,5.0E+0,6.0E+0,6.0E+0,6.0E+0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: constant_fold_vpermilvar_ps_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [1.0E+0,1.0E+0,3.0E+0,2.0E+0,5.0E+0,6.0E+0,6.0E+0,6.0E+0]
-; X64-NEXT:    retq
+; CHECK-LABEL: constant_fold_vpermilvar_ps_256:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps {{.*#+}} ymm0 = [1.0E+0,1.0E+0,3.0E+0,2.0E+0,5.0E+0,6.0E+0,6.0E+0,6.0E+0]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, <8 x i32> <i32 4, i32 0, i32 2, i32 1, i32 0, i32 1, i32 1, i32 1>)
   ret <8 x float> %1
 }
 
 define void @PR39483() {
-; X32-AVX1-LABEL: PR39483:
-; X32-AVX1:       # %bb.0: # %entry
-; X32-AVX1-NEXT:    vmovups 32, %ymm0
-; X32-AVX1-NEXT:    vmovups 64, %xmm1
-; X32-AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1],mem[0,3]
-; X32-AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; X32-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; X32-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; X32-AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
-; X32-AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; X32-AVX1-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; X32-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; X32-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
-; X32-AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX1-NEXT:    vmulps %ymm1, %ymm0, %ymm0
-; X32-AVX1-NEXT:    vaddps %ymm1, %ymm0, %ymm0
-; X32-AVX1-NEXT:    vmovups %ymm0, (%eax)
+; X86-AVX1-LABEL: PR39483:
+; X86-AVX1:       # %bb.0: # %entry
+; X86-AVX1-NEXT:    vmovups 32, %ymm0
+; X86-AVX1-NEXT:    vmovups 64, %xmm1
+; X86-AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1],mem[0,3]
+; X86-AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; X86-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; X86-AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
+; X86-AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
+; X86-AVX1-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; X86-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; X86-AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX1-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; X86-AVX1-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; X86-AVX1-NEXT:    vmovups %ymm0, (%eax)
 ;
-; X32-AVX2-LABEL: PR39483:
-; X32-AVX2:       # %bb.0: # %entry
-; X32-AVX2-NEXT:    vmovups 32, %ymm0
-; X32-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; X32-AVX2-NEXT:    vmovaps {{.*#+}} ymm1 = <2,5,0,3,6,u,u,u>
-; X32-AVX2-NEXT:    vpermps %ymm0, %ymm1, %ymm0
-; X32-AVX2-NEXT:    vpermilps {{.*#+}} ymm1 = mem[0,1,0,3,4,5,4,7]
-; X32-AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
-; X32-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
-; X32-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vmulps %ymm1, %ymm0, %ymm0
-; X32-AVX2-NEXT:    vaddps %ymm1, %ymm0, %ymm0
-; X32-AVX2-NEXT:    vmovups %ymm0, (%eax)
+; X86-AVX2-LABEL: PR39483:
+; X86-AVX2:       # %bb.0: # %entry
+; X86-AVX2-NEXT:    vmovups 32, %ymm0
+; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; X86-AVX2-NEXT:    vmovaps {{.*#+}} ymm1 = <2,5,0,3,6,u,u,u>
+; X86-AVX2-NEXT:    vpermps %ymm0, %ymm1, %ymm0
+; X86-AVX2-NEXT:    vpermilps {{.*#+}} ymm1 = mem[0,1,0,3,4,5,4,7]
+; X86-AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
+; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX2-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; X86-AVX2-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; X86-AVX2-NEXT:    vmovups %ymm0, (%eax)
 ;
-; X32-AVX512-LABEL: PR39483:
-; X32-AVX512:       # %bb.0: # %entry
-; X32-AVX512-NEXT:    vmovups 0, %zmm0
-; X32-AVX512-NEXT:    vmovups 64, %ymm1
-; X32-AVX512-NEXT:    vmovaps {{.*#+}} zmm2 = <2,5,8,11,14,17,20,23,u,u,u,u,u,u,u,u>
-; X32-AVX512-NEXT:    vpermi2ps %zmm1, %zmm0, %zmm2
-; X32-AVX512-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-AVX512-NEXT:    vmulps %ymm0, %ymm2, %ymm1
-; X32-AVX512-NEXT:    vaddps %ymm0, %ymm1, %ymm0
-; X32-AVX512-NEXT:    vmovups %ymm0, (%eax)
+; X86-AVX512-LABEL: PR39483:
+; X86-AVX512:       # %bb.0: # %entry
+; X86-AVX512-NEXT:    vmovups 0, %zmm0
+; X86-AVX512-NEXT:    vmovups 64, %ymm1
+; X86-AVX512-NEXT:    vmovaps {{.*#+}} zmm2 = <2,5,8,11,14,17,20,23,u,u,u,u,u,u,u,u>
+; X86-AVX512-NEXT:    vpermi2ps %zmm1, %zmm0, %zmm2
+; X86-AVX512-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X86-AVX512-NEXT:    vmulps %ymm0, %ymm2, %ymm1
+; X86-AVX512-NEXT:    vaddps %ymm0, %ymm1, %ymm0
+; X86-AVX512-NEXT:    vmovups %ymm0, (%eax)
 ;
 ; X64-AVX1-LABEL: PR39483:
 ; X64-AVX1:       # %bb.0: # %entry
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
index 2ea0f1a..bce3ac3 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
@@ -611,6 +611,40 @@
 }
 declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) nounwind readnone
 
+define <32 x i8> @combine_pshufb_as_packsswb(<16 x i16> %a0, <16 x i16> %a1) nounwind {
+; CHECK-LABEL: combine_pshufb_as_packsswb:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsraw $11, %ymm0, %ymm0
+; CHECK-NEXT:    vpsraw $11, %ymm1, %ymm1
+; CHECK-NEXT:    vpacksswb %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
+  %1 = ashr <16 x i16> %a0, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
+  %2 = ashr <16 x i16> %a1, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
+  %3 = bitcast <16 x i16> %1 to <32 x i8>
+  %4 = bitcast <16 x i16> %2 to <32 x i8>
+  %5 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %3, <32 x i8> <i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  %6 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %4, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14>)
+  %7 = or <32 x i8> %5, %6
+  ret <32 x i8> %7
+}
+
+define <32 x i8> @combine_pshufb_as_packuswb(<16 x i16> %a0, <16 x i16> %a1) nounwind {
+; CHECK-LABEL: combine_pshufb_as_packuswb:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrlw $11, %ymm0, %ymm0
+; CHECK-NEXT:    vpsrlw $11, %ymm1, %ymm1
+; CHECK-NEXT:    vpackuswb %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
+  %1 = lshr <16 x i16> %a0, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
+  %2 = lshr <16 x i16> %a1, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
+  %3 = bitcast <16 x i16> %1 to <32 x i8>
+  %4 = bitcast <16 x i16> %2 to <32 x i8>
+  %5 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %3, <32 x i8> <i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  %6 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %4, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14>)
+  %7 = or <32 x i8> %5, %6
+  ret <32 x i8> %7
+}
+
 define <16 x i8> @combine_pshufb_insertion_as_broadcast_v2i64(i64 %a0) {
 ; X86-LABEL: combine_pshufb_insertion_as_broadcast_v2i64:
 ; X86:       # %bb.0:
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll b/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
index b8d3824..729863c 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=CHECK,X64
 
 declare <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64)
 
@@ -26,28 +26,24 @@
 declare <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
 
 define <8 x double> @combine_permvar_8f64_identity(<8 x double> %x0, <8 x double> %x1) {
-; X32-LABEL: combine_permvar_8f64_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_permvar_8f64_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_permvar_8f64_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double> %x0, <8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>)
   %2 = call <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double> %1, <8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>)
   ret <8 x double> %2
 }
 define <8 x double> @combine_permvar_8f64_identity_mask(<8 x double> %x0, <8 x double> %x1, i8 %m) {
-; X32-LABEL: combine_permvar_8f64_identity_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovapd {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    kmovd %eax, %k1
-; X32-NEXT:    vpermpd %zmm0, %zmm2, %zmm1 {%k1}
-; X32-NEXT:    vmovapd {{.*#+}} zmm0 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
-; X32-NEXT:    vpermpd %zmm1, %zmm0, %zmm1 {%k1}
-; X32-NEXT:    vmovapd %zmm1, %zmm0
-; X32-NEXT:    retl
+; X86-LABEL: combine_permvar_8f64_identity_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovapd {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovd %eax, %k1
+; X86-NEXT:    vpermpd %zmm0, %zmm2, %zmm1 {%k1}
+; X86-NEXT:    vmovapd {{.*#+}} zmm0 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
+; X86-NEXT:    vpermpd %zmm1, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovapd %zmm1, %zmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_permvar_8f64_identity_mask:
 ; X64:       # %bb.0:
@@ -68,28 +64,24 @@
 }
 
 define <8 x i64> @combine_permvar_8i64_identity(<8 x i64> %x0, <8 x i64> %x1) {
-; X32-LABEL: combine_permvar_8i64_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_permvar_8i64_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_permvar_8i64_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> %x0, <8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>)
   %2 = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> %1, <8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>)
   ret <8 x i64> %2
 }
 define <8 x i64> @combine_permvar_8i64_identity_mask(<8 x i64> %x0, <8 x i64> %x1, i8 %m) {
-; X32-LABEL: combine_permvar_8i64_identity_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    kmovd %eax, %k1
-; X32-NEXT:    vpermq %zmm0, %zmm2, %zmm1 {%k1}
-; X32-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
-; X32-NEXT:    vpermq %zmm1, %zmm0, %zmm1 {%k1}
-; X32-NEXT:    vmovdqa64 %zmm1, %zmm0
-; X32-NEXT:    retl
+; X86-LABEL: combine_permvar_8i64_identity_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovd %eax, %k1
+; X86-NEXT:    vpermq %zmm0, %zmm2, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
+; X86-NEXT:    vpermq %zmm1, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_permvar_8i64_identity_mask:
 ; X64:       # %bb.0:
@@ -110,27 +102,23 @@
 }
 
 define <8 x double> @combine_vpermt2var_8f64_identity(<8 x double> %x0, <8 x double> %x1) {
-; X32-LABEL: combine_vpermt2var_8f64_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_8f64_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_8f64_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x double> %x0, <8 x double> %x1, i8 -1)
   %res1 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x double> %res0, <8 x double> %res0, i8 -1)
   ret <8 x double> %res1
 }
 define <8 x double> @combine_vpermt2var_8f64_identity_mask(<8 x double> %x0, <8 x double> %x1, i8 %m) {
-; X32-LABEL: combine_vpermt2var_8f64_identity_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovapd {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    kmovd %eax, %k1
-; X32-NEXT:    vpermi2pd %zmm0, %zmm0, %zmm1 {%k1} {z}
-; X32-NEXT:    vmovapd {{.*#+}} zmm0 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
-; X32-NEXT:    vpermi2pd %zmm1, %zmm1, %zmm0 {%k1} {z}
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_8f64_identity_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovapd {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovd %eax, %k1
+; X86-NEXT:    vpermi2pd %zmm0, %zmm0, %zmm1 {%k1} {z}
+; X86-NEXT:    vmovapd {{.*#+}} zmm0 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
+; X86-NEXT:    vpermi2pd %zmm1, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_8f64_identity_mask:
 ; X64:       # %bb.0:
@@ -146,24 +134,19 @@
 }
 
 define <8 x double> @combine_vpermt2var_8f64_movddup(<8 x double> %x0, <8 x double> %x1) {
-; X32-LABEL: combine_vpermt2var_8f64_movddup:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_8f64_movddup:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_8f64_movddup:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 0, i64 0, i64 2, i64 2, i64 4, i64 4, i64 undef, i64 undef>, <8 x double> %x0, <8 x double> %x1, i8 -1)
   ret <8 x double> %res0
 }
 define <8 x double> @combine_vpermt2var_8f64_movddup_load(<8 x double> *%p0, <8 x double> %x1) {
-; X32-LABEL: combine_vpermt2var_8f64_movddup_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vmovddup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6]
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_8f64_movddup_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vmovddup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_8f64_movddup_load:
 ; X64:       # %bb.0:
@@ -174,12 +157,12 @@
   ret <8 x double> %res0
 }
 define <8 x double> @combine_vpermt2var_8f64_movddup_mask(<8 x double> %x0, <8 x double> %x1, i8 %m) {
-; X32-LABEL: combine_vpermt2var_8f64_movddup_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    kmovd %eax, %k1
-; X32-NEXT:    vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_8f64_movddup_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovd %eax, %k1
+; X86-NEXT:    vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_8f64_movddup_mask:
 ; X64:       # %bb.0:
@@ -191,27 +174,23 @@
 }
 
 define <8 x i64> @combine_vpermt2var_8i64_identity(<8 x i64> %x0, <8 x i64> %x1) {
-; X32-LABEL: combine_vpermt2var_8i64_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_8i64_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_8i64_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> <i64 undef, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x i64> %x0, <8 x i64> %x1, i8 -1)
   %res1 = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> <i64 undef, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x i64> %res0, <8 x i64> %res0, i8 -1)
   ret <8 x i64> %res1
 }
 define <8 x i64> @combine_vpermt2var_8i64_identity_mask(<8 x i64> %x0, <8 x i64> %x1, i8 %m) {
-; X32-LABEL: combine_vpermt2var_8i64_identity_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    kmovd %eax, %k1
-; X32-NEXT:    vpermi2q %zmm0, %zmm0, %zmm1 {%k1} {z}
-; X32-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
-; X32-NEXT:    vpermi2q %zmm1, %zmm1, %zmm0 {%k1} {z}
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_8i64_identity_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovd %eax, %k1
+; X86-NEXT:    vpermi2q %zmm0, %zmm0, %zmm1 {%k1} {z}
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
+; X86-NEXT:    vpermi2q %zmm1, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_8i64_identity_mask:
 ; X64:       # %bb.0:
@@ -227,26 +206,22 @@
 }
 
 define <16 x float> @combine_vpermt2var_16f32_identity(<16 x float> %x0, <16 x float> %x1) {
-; X32-LABEL: combine_vpermt2var_16f32_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_16f32_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_16f32_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x float> %x0, <16 x float> %x1, i16 -1)
   %res1 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 15, i32 30, i32 13, i32 28, i32 11, i32 26, i32 9, i32 24, i32 7, i32 22, i32 5, i32 20, i32 3, i32 18, i32 1, i32 16>, <16 x float> %res0, <16 x float> %res0, i16 -1)
   ret <16 x float> %res1
 }
 define <16 x float> @combine_vpermt2var_16f32_identity_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
-; X32-LABEL: combine_vpermt2var_16f32_identity_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} zmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
-; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vpermi2ps %zmm0, %zmm0, %zmm1 {%k1} {z}
-; X32-NEXT:    vmovaps {{.*#+}} zmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
-; X32-NEXT:    vpermi2ps %zmm1, %zmm1, %zmm0 {%k1} {z}
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16f32_identity_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} zmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermi2ps %zmm0, %zmm0, %zmm1 {%k1} {z}
+; X86-NEXT:    vmovaps {{.*#+}} zmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
+; X86-NEXT:    vpermi2ps %zmm1, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16f32_identity_mask:
 ; X64:       # %bb.0:
@@ -262,28 +237,22 @@
 }
 
 define <16 x float> @combine_vpermt2var_16f32_vmovddup(<16 x float> %x0, <16 x float> %x1) {
-; X32-LABEL: combine_vpermt2var_16f32_vmovddup:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
-; X32-NEXT:    vpermt2ps %zmm0, %zmm1, %zmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_16f32_vmovddup:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
-; X64-NEXT:    vpermt2ps %zmm0, %zmm1, %zmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_16f32_vmovddup:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
+; CHECK-NEXT:    vpermt2ps %zmm0, %zmm1, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 8, i32 9, i32 8, i32 9, i32 12, i32 13, i32 12, i32 13>, <16 x float> %x0, <16 x float> %x1, i16 -1)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovddup_load(<16 x float> *%p0, <16 x float> %x1) {
-; X32-LABEL: combine_vpermt2var_16f32_vmovddup_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vmovaps (%eax), %zmm1
-; X32-NEXT:    vmovaps {{.*#+}} zmm0 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
-; X32-NEXT:    vpermi2ps %zmm0, %zmm1, %zmm0
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16f32_vmovddup_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vmovaps (%eax), %zmm1
+; X86-NEXT:    vmovaps {{.*#+}} zmm0 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
+; X86-NEXT:    vpermi2ps %zmm0, %zmm1, %zmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16f32_vmovddup_load:
 ; X64:       # %bb.0:
@@ -296,12 +265,12 @@
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
-; X32-LABEL: combine_vpermt2var_16f32_vmovddup_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
-; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vpermt2ps %zmm0, %zmm1, %zmm0 {%k1} {z}
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16f32_vmovddup_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermt2ps %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16f32_vmovddup_mask:
 ; X64:       # %bb.0:
@@ -313,14 +282,14 @@
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask_load(<16 x float> *%p0, <16 x float> %x1, i16 %m) {
-; X32-LABEL: combine_vpermt2var_16f32_vmovddup_mask_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vmovaps (%eax), %zmm1
-; X32-NEXT:    vmovaps {{.*#+}} zmm0 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
-; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vpermi2ps %zmm0, %zmm1, %zmm0 {%k1} {z}
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16f32_vmovddup_mask_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vmovaps (%eax), %zmm1
+; X86-NEXT:    vmovaps {{.*#+}} zmm0 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermi2ps %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16f32_vmovddup_mask_load:
 ; X64:       # %bb.0:
@@ -335,24 +304,19 @@
 }
 
 define <16 x float> @combine_vpermt2var_16f32_vmovshdup(<16 x float> %x0, <16 x float> %x1) {
-; X32-LABEL: combine_vpermt2var_16f32_vmovshdup:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_16f32_vmovshdup:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_16f32_vmovshdup:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>, <16 x float> %x0, <16 x float> %x1, i16 -1)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovshdup_load(<16 x float> *%p0, <16 x float> %x1) {
-; X32-LABEL: combine_vpermt2var_16f32_vmovshdup_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vmovshdup {{.*#+}} zmm0 = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16f32_vmovshdup_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vmovshdup {{.*#+}} zmm0 = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16f32_vmovshdup_load:
 ; X64:       # %bb.0:
@@ -363,11 +327,11 @@
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovshdup_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
-; X32-LABEL: combine_vpermt2var_16f32_vmovshdup_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16f32_vmovshdup_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16f32_vmovshdup_mask:
 ; X64:       # %bb.0:
@@ -379,24 +343,19 @@
 }
 
 define <16 x float> @combine_vpermt2var_16f32_vmovsldup(<16 x float> %x0, <16 x float> %x1) {
-; X32-LABEL: combine_vpermt2var_16f32_vmovsldup:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_16f32_vmovsldup:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_16f32_vmovsldup:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>, <16 x float> %x0, <16 x float> %x1, i16 -1)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovsldup_load(<16 x float> *%p0, <16 x float> %x1) {
-; X32-LABEL: combine_vpermt2var_16f32_vmovsldup_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vmovsldup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16f32_vmovsldup_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vmovsldup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16f32_vmovsldup_load:
 ; X64:       # %bb.0:
@@ -407,11 +366,11 @@
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovsldup_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
-; X32-LABEL: combine_vpermt2var_16f32_vmovsldup_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16f32_vmovsldup_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16f32_vmovsldup_mask:
 ; X64:       # %bb.0:
@@ -422,12 +381,12 @@
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vmovsldup_mask_load(<16 x float> *%p0, <16 x float> %x1, i16 %m) {
-; X32-LABEL: combine_vpermt2var_16f32_vmovsldup_mask_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16f32_vmovsldup_mask_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16f32_vmovsldup_mask_load:
 ; X64:       # %bb.0:
@@ -440,24 +399,19 @@
 }
 
 define <16 x float> @combine_vpermt2var_16f32_vpermilps(<16 x float> %x0, <16 x float> %x1) {
-; X32-LABEL: combine_vpermt2var_16f32_vpermilps:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_16f32_vpermilps:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_16f32_vpermilps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>, <16 x float> %x0, <16 x float> %x1, i16 -1)
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vpermilps_load(<16 x float> *%p0, <16 x float> %x1) {
-; X32-LABEL: combine_vpermt2var_16f32_vpermilps_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpermilps {{.*#+}} zmm0 = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16f32_vpermilps_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpermilps {{.*#+}} zmm0 = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16f32_vpermilps_load:
 ; X64:       # %bb.0:
@@ -468,11 +422,11 @@
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vpermilps_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
-; X32-LABEL: combine_vpermt2var_16f32_vpermilps_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16f32_vpermilps_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16f32_vpermilps_mask:
 ; X64:       # %bb.0:
@@ -483,12 +437,12 @@
   ret <16 x float> %res0
 }
 define <16 x float> @combine_vpermt2var_16f32_vpermilps_mask_load(<16 x float> *%p0, <16 x float> %x1, i16 %m) {
-; X32-LABEL: combine_vpermt2var_16f32_vpermilps_mask_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16f32_vpermilps_mask_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16f32_vpermilps_mask_load:
 ; X64:       # %bb.0:
@@ -501,26 +455,22 @@
 }
 
 define <16 x i32> @combine_vpermt2var_16i32_identity(<16 x i32> %x0, <16 x i32> %x1) {
-; X32-LABEL: combine_vpermt2var_16i32_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_16i32_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_16i32_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 undef>, <16 x i32> %x0, <16 x i32> %x1, i16 -1)
   %res1 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> <i32 15, i32 30, i32 13, i32 28, i32 undef, i32 26, i32 9, i32 24, i32 7, i32 22, i32 5, i32 20, i32 3, i32 18, i32 1, i32 16>, <16 x i32> %res0, <16 x i32> %res0, i16 -1)
   ret <16 x i32> %res1
 }
 define <16 x i32> @combine_vpermt2var_16i32_identity_mask(<16 x i32> %x0, <16 x i32> %x1, i16 %m) {
-; X32-LABEL: combine_vpermt2var_16i32_identity_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
-; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vpermi2d %zmm0, %zmm0, %zmm1 {%k1} {z}
-; X32-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
-; X32-NEXT:    vpermi2d %zmm1, %zmm1, %zmm0 {%k1} {z}
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16i32_identity_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermi2d %zmm0, %zmm0, %zmm1 {%k1} {z}
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
+; X86-NEXT:    vpermi2d %zmm1, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16i32_identity_mask:
 ; X64:       # %bb.0:
@@ -536,26 +486,22 @@
 }
 
 define <32 x i16> @combine_vpermt2var_32i16_identity(<32 x i16> %x0, <32 x i16> %x1) {
-; X32-LABEL: combine_vpermt2var_32i16_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_32i16_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_32i16_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16> <i16 31, i16 30, i16 29, i16 28, i16 27, i16 26, i16 25, i16 24, i16 23, i16 22, i16 21, i16 20, i16 19, i16 18, i16 17, i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <32 x i16> %x0, <32 x i16> %x1, i32 -1)
   %res1 = call <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16> <i16 63, i16 30, i16 61, i16 28, i16 59, i16 26, i16 57, i16 24, i16 55, i16 22, i16 53, i16 20, i16 51, i16 18, i16 49, i16 16, i16 47, i16 46, i16 13, i16 44, i16 11, i16 42, i16 9, i16 40, i16 7, i16 38, i16 5, i16 36, i16 3, i16 34, i16 1, i16 32>, <32 x i16> %res0, <32 x i16> %res0, i32 -1)
   ret <32 x i16> %res1
 }
 define <32 x i16> @combine_vpermt2var_32i16_identity_mask(<32 x i16> %x0, <32 x i16> %x1, i32 %m) {
-; X32-LABEL: combine_vpermt2var_32i16_identity_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
-; X32-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vpermi2w %zmm0, %zmm0, %zmm1 {%k1} {z}
-; X32-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [63,30,61,28,59,26,57,24,55,22,53,20,51,18,49,16,47,46,13,44,11,42,9,40,7,38,5,36,3,34,1,32]
-; X32-NEXT:    vpermi2w %zmm1, %zmm1, %zmm0 {%k1} {z}
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_32i16_identity_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermi2w %zmm0, %zmm0, %zmm1 {%k1} {z}
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [63,30,61,28,59,26,57,24,55,22,53,20,51,18,49,16,47,46,13,44,11,42,9,40,7,38,5,36,3,34,1,32]
+; X86-NEXT:    vpermi2w %zmm1, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_32i16_identity_mask:
 ; X64:       # %bb.0:
@@ -571,13 +517,9 @@
 }
 
 define <64 x i8> @combine_pshufb_identity(<64 x i8> %x0) {
-; X32-LABEL: combine_pshufb_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_pshufb_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_pshufb_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %select = bitcast <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1> to <64 x i8>
   %mask = bitcast <16 x i32> <i32 202182159, i32 134810123, i32 67438087, i32 66051, i32 202182159, i32 undef, i32 67438087, i32 66051, i32 202182159, i32 134810123, i32 67438087, i32 66051, i32 202182159, i32 134810123, i32 67438087, i32 66051> to <64 x i8>
   %res0 = call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %x0, <64 x i8> %mask, <64 x i8> %select, i64 -1)
@@ -585,16 +527,16 @@
   ret <64 x i8> %res1
 }
 define <64 x i8> @combine_pshufb_identity_mask(<64 x i8> %x0, i64 %m) {
-; X32-LABEL: combine_pshufb_identity_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
-; X32-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
-; X32-NEXT:    kmovq {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vpternlogd $255, %zmm3, %zmm3, %zmm3
-; X32-NEXT:    vpshufb %zmm2, %zmm0, %zmm3 {%k1}
-; X32-NEXT:    vpshufb %zmm2, %zmm3, %zmm1 {%k1}
-; X32-NEXT:    vmovdqa64 %zmm1, %zmm0
-; X32-NEXT:    retl
+; X86-LABEL: combine_pshufb_identity_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
+; X86-NEXT:    kmovq {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpternlogd $255, %zmm3, %zmm3, %zmm3
+; X86-NEXT:    vpshufb %zmm2, %zmm0, %zmm3 {%k1}
+; X86-NEXT:    vpshufb %zmm2, %zmm3, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_pshufb_identity_mask:
 ; X64:       # %bb.0:
@@ -614,68 +556,48 @@
 }
 
 define <32 x i16> @combine_permvar_as_vpbroadcastw512(<32 x i16> %x0) {
-; X32-LABEL: combine_permvar_as_vpbroadcastw512:
-; X32:       # %bb.0:
-; X32-NEXT:    vpbroadcastw %xmm0, %zmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_permvar_as_vpbroadcastw512:
-; X64:       # %bb.0:
-; X64-NEXT:    vpbroadcastw %xmm0, %zmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_permvar_as_vpbroadcastw512:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpbroadcastw %xmm0, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> %x0, <32 x i16> zeroinitializer)
   ret <32 x i16> %1
 }
 
 define <16 x i32> @combine_permvar_as_vpbroadcastd512(<16 x i32> %x0) {
-; X32-LABEL: combine_permvar_as_vpbroadcastd512:
-; X32:       # %bb.0:
-; X32-NEXT:    vbroadcastss %xmm0, %zmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_permvar_as_vpbroadcastd512:
-; X64:       # %bb.0:
-; X64-NEXT:    vbroadcastss %xmm0, %zmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_permvar_as_vpbroadcastd512:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vbroadcastss %xmm0, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> %x0, <16 x i32> zeroinitializer)
   ret <16 x i32> %1
 }
 
 define <8 x i64> @combine_permvar_as_vpbroadcastq512(<8 x i64> %x0) {
-; X32-LABEL: combine_permvar_as_vpbroadcastq512:
-; X32:       # %bb.0:
-; X32-NEXT:    vbroadcastsd %xmm0, %zmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_permvar_as_vpbroadcastq512:
-; X64:       # %bb.0:
-; X64-NEXT:    vbroadcastsd %xmm0, %zmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_permvar_as_vpbroadcastq512:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vbroadcastsd %xmm0, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> %x0, <8 x i64> zeroinitializer)
   ret <8 x i64> %1
 }
 
 define <8 x i64> @combine_permvar_8i64_as_permq(<8 x i64> %x0, <8 x i64> %x1) {
-; X32-LABEL: combine_permvar_8i64_as_permq:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_permvar_8i64_as_permq:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_permvar_8i64_as_permq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> %x0, <8 x i64> <i64 3, i64 2, i64 1, i64 undef, i64 undef, i64 6, i64 5, i64 4>)
   ret <8 x i64> %1
 }
 define <8 x i64> @combine_permvar_8i64_as_permq_mask(<8 x i64> %x0, <8 x i64> %x1, i8 %m) {
-; X32-LABEL: combine_permvar_8i64_as_permq_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    kmovd %eax, %k1
-; X32-NEXT:    vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
-; X32-NEXT:    vmovdqa64 %zmm1, %zmm0
-; X32-NEXT:    retl
+; X86-LABEL: combine_permvar_8i64_as_permq_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovd %eax, %k1
+; X86-NEXT:    vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_permvar_8i64_as_permq_mask:
 ; X64:       # %bb.0:
@@ -690,26 +612,21 @@
 }
 
 define <8 x double> @combine_permvar_8f64_as_permpd(<8 x double> %x0, <8 x double> %x1) {
-; X32-LABEL: combine_permvar_8f64_as_permpd:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_permvar_8f64_as_permpd:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_permvar_8f64_as_permpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double> %x0, <8 x i64> <i64 3, i64 2, i64 1, i64 undef, i64 undef, i64 6, i64 5, i64 4>)
   ret <8 x double> %1
 }
 define <8 x double> @combine_permvar_8f64_as_permpd_mask(<8 x double> %x0, <8 x double> %x1, i8 %m) {
-; X32-LABEL: combine_permvar_8f64_as_permpd_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    kmovd %eax, %k1
-; X32-NEXT:    vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
-; X32-NEXT:    vmovapd %zmm1, %zmm0
-; X32-NEXT:    retl
+; X86-LABEL: combine_permvar_8f64_as_permpd_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovd %eax, %k1
+; X86-NEXT:    vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
+; X86-NEXT:    vmovapd %zmm1, %zmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_permvar_8f64_as_permpd_mask:
 ; X64:       # %bb.0:
@@ -724,39 +641,29 @@
 }
 
 define <16 x float> @combine_vpermilvar_16f32_230146759A8BCFDE(<16 x float> %x0) {
-; X32-LABEL: combine_vpermilvar_16f32_230146759A8BCFDE:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[2,3,0,1,4,6,7,5,9,10,8,11,12,15,13,14]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermilvar_16f32_230146759A8BCFDE:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[2,3,0,1,4,6,7,5,9,10,8,11,12,15,13,14]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermilvar_16f32_230146759A8BCFDE:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[2,3,0,1,4,6,7,5,9,10,8,11,12,15,13,14]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 1, i32 1, i32 0, i32 3, i32 2>, <16 x float> undef, i16 -1)
   %res1 = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %res0, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 3, i32 1, i32 0, i32 2, i32 3, i32 0, i32 2, i32 1, i32 1, i32 2, i32 0, i32 3>, <16 x float> undef, i16 -1)
   ret <16 x float> %res1
 }
 
 define <64 x i8> @combine_pshufb_as_pslldq(<64 x i8> %a0) {
-; X32-LABEL: combine_pshufb_as_pslldq:
-; X32:       # %bb.0:
-; X32-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_pshufb_as_pslldq:
-; X64:       # %bb.0:
-; X64-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_pshufb_as_pslldq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %a0, <64 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5>, <64 x i8> undef, i64 -1)
   ret <64 x i8> %res0
 }
 define <64 x i8> @combine_pshufb_as_pslldq_mask(<64 x i8> %a0, i64 %m) {
-; X32-LABEL: combine_pshufb_as_pslldq_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    kmovq {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vpshufb {{.*#+}} zmm0 {%k1} {z} = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
-; X32-NEXT:    retl
+; X86-LABEL: combine_pshufb_as_pslldq_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovq {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpshufb {{.*#+}} zmm0 {%k1} {z} = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_pshufb_as_pslldq_mask:
 ; X64:       # %bb.0:
@@ -768,24 +675,19 @@
 }
 
 define <64 x i8> @combine_pshufb_as_psrldq(<64 x i8> %a0) {
-; X32-LABEL: combine_pshufb_as_psrldq:
-; X32:       # %bb.0:
-; X32-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_pshufb_as_psrldq:
-; X64:       # %bb.0:
-; X64-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_pshufb_as_psrldq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %a0, <64 x i8> <i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>, <64 x i8> undef, i64 -1)
   ret <64 x i8> %res0
 }
 define <64 x i8> @combine_pshufb_as_psrldq_mask(<64 x i8> %a0, i64 %m) {
-; X32-LABEL: combine_pshufb_as_psrldq_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    kmovq {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X32-NEXT:    retl
+; X86-LABEL: combine_pshufb_as_psrldq_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovq {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_pshufb_as_psrldq_mask:
 ; X64:       # %bb.0:
@@ -797,161 +699,145 @@
 }
 
 define <32 x i16> @combine_permvar_as_pshuflw(<32 x i16> %a0) {
-; X32-LABEL: combine_permvar_as_pshuflw:
-; X32:       # %bb.0:
-; X32-NEXT:    vpshuflw {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15,17,16,19,18,20,21,22,23,25,24,27,26,28,29,30,31]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_permvar_as_pshuflw:
-; X64:       # %bb.0:
-; X64-NEXT:    vpshuflw {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15,17,16,19,18,20,21,22,23,25,24,27,26,28,29,30,31]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_permvar_as_pshuflw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpshuflw {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15,17,16,19,18,20,21,22,23,25,24,27,26,28,29,30,31]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 1, i16 0, i16 3, i16 2, i16 4, i16 5, i16 6, i16 7, i16 9, i16 8, i16 11, i16 10, i16 12, i16 13, i16 14, i16 15, i16 17, i16 16, i16 19, i16 18, i16 20, i16 21, i16 22, i16 23, i16 25, i16 24, i16 27, i16 26, i16 28, i16 29, i16 30, i16 31>)
   ret <32 x i16> %1
 }
 
 define <32 x i16> @combine_pshufb_as_pshufhw(<32 x i16> %a0) {
-; X32-LABEL: combine_pshufb_as_pshufhw:
-; X32:       # %bb.0:
-; X32-NEXT:    vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14,16,17,18,19,21,20,23,22,24,25,26,27,29,28,31,30]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_pshufb_as_pshufhw:
-; X64:       # %bb.0:
-; X64-NEXT:    vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14,16,17,18,19,21,20,23,22,24,25,26,27,29,28,31,30]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_pshufb_as_pshufhw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14,16,17,18,19,21,20,23,22,24,25,26,27,29,28,31,30]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 5, i16 4, i16 7, i16 6, i16 8, i16 9, i16 10, i16 11, i16 13, i16 12, i16 15, i16 14, i16 16, i16 17, i16 18, i16 19, i16 21, i16 20, i16 23, i16 22, i16 24, i16 25, i16 26, i16 27, i16 29, i16 28, i16 31, i16 30>)
   ret <32 x i16> %1
 }
 
+define <64 x i8> @combine_pshufb_as_packsswb(<32 x i16> %a0, <32 x i16> %a1) nounwind {
+; CHECK-LABEL: combine_pshufb_as_packsswb:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsraw $11, %zmm0, %zmm0
+; CHECK-NEXT:    vpsraw $11, %zmm1, %zmm1
+; CHECK-NEXT:    vpacksswb %zmm1, %zmm0, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
+  %1 = ashr <32 x i16> %a0, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
+  %2 = ashr <32 x i16> %a1, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
+  %3 = bitcast <32 x i16> %1 to <64 x i8>
+  %4 = bitcast <32 x i16> %2 to <64 x i8>
+  %5 = tail call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %3, <64 x i8> <i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> undef, i64 -1)
+  %6 = tail call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %4, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14>, <64 x i8> undef, i64 -1)
+  %7 = or <64 x i8> %5, %6
+  ret <64 x i8> %7
+}
+
+define <64 x i8> @combine_pshufb_as_packuswb(<32 x i16> %a0, <32 x i16> %a1) nounwind {
+; CHECK-LABEL: combine_pshufb_as_packuswb:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrlw $11, %zmm0, %zmm0
+; CHECK-NEXT:    vpsrlw $11, %zmm1, %zmm1
+; CHECK-NEXT:    vpackuswb %zmm1, %zmm0, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
+  %1 = lshr <32 x i16> %a0, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
+  %2 = lshr <32 x i16> %a1, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
+  %3 = bitcast <32 x i16> %1 to <64 x i8>
+  %4 = bitcast <32 x i16> %2 to <64 x i8>
+  %5 = tail call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %3, <64 x i8> <i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> undef, i64 -1)
+  %6 = tail call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %4, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14>, <64 x i8> undef, i64 -1)
+  %7 = or <64 x i8> %5, %6
+  ret <64 x i8> %7
+}
+
 define <32 x i16> @combine_vpermi2var_32i16_as_pshufb(<32 x i16> %a0) {
-; X32-LABEL: combine_vpermi2var_32i16_as_pshufb:
-; X32:       # %bb.0:
-; X32-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13,18,19,16,17,22,23,20,21,26,27,24,25,30,31,28,29,34,35,32,33,38,39,36,37,42,43,40,41,46,47,44,45,50,51,48,49,54,55,52,53,58,59,56,57,62,63,60,61]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_32i16_as_pshufb:
-; X64:       # %bb.0:
-; X64-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13,18,19,16,17,22,23,20,21,26,27,24,25,30,31,28,29,34,35,32,33,38,39,36,37,42,43,40,41,46,47,44,45,50,51,48,49,54,55,52,53,58,59,56,57,62,63,60,61]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_32i16_as_pshufb:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13,18,19,16,17,22,23,20,21,26,27,24,25,30,31,28,29,34,35,32,33,38,39,36,37,42,43,40,41,46,47,44,45,50,51,48,49,54,55,52,53,58,59,56,57,62,63,60,61]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 1, i16 0, i16 3, i16 2, i16 4, i16 5, i16 6, i16 7, i16 9, i16 8, i16 11, i16 10, i16 12, i16 13, i16 14, i16 15, i16 17, i16 16, i16 19, i16 18, i16 20, i16 21, i16 22, i16 23, i16 25, i16 24, i16 27, i16 26, i16 28, i16 29, i16 30, i16 31>)
   %2 = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> %1, <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 5, i16 4, i16 7, i16 6, i16 8, i16 9, i16 10, i16 11, i16 13, i16 12, i16 15, i16 14, i16 16, i16 17, i16 18, i16 19, i16 21, i16 20, i16 23, i16 22, i16 24, i16 25, i16 26, i16 27, i16 29, i16 28, i16 31, i16 30>)
   ret <32 x i16> %2
 }
 
 define <8 x double> @combine_vpermi2var_8f64_identity(<8 x double> %x0, <8 x double> %x1) {
-; X32-LABEL: combine_vpermi2var_8f64_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_8f64_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_8f64_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <8 x double> @llvm.x86.avx512.mask.vpermi2var.pd.512(<8 x double> %x0, <8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x double> %x1, i8 -1)
   %res1 = call <8 x double> @llvm.x86.avx512.mask.vpermi2var.pd.512(<8 x double> %res0, <8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x double> %res0, i8 -1)
   ret <8 x double> %res1
 }
 
 define <8 x double> @combine_vpermi2var_8f64_as_shufpd(<8 x double> %x0, <8 x double> %x1) {
-; X32-LABEL: combine_vpermi2var_8f64_as_shufpd:
-; X32:       # %bb.0:
-; X32-NEXT:    vshufpd {{.*#+}} zmm0 = zmm0[1],zmm1[0],zmm0[2],zmm1[2],zmm0[5],zmm1[5],zmm0[6],zmm1[7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_8f64_as_shufpd:
-; X64:       # %bb.0:
-; X64-NEXT:    vshufpd {{.*#+}} zmm0 = zmm0[1],zmm1[0],zmm0[2],zmm1[2],zmm0[5],zmm1[5],zmm0[6],zmm1[7]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_8f64_as_shufpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vshufpd {{.*#+}} zmm0 = zmm0[1],zmm1[0],zmm0[2],zmm1[2],zmm0[5],zmm1[5],zmm0[6],zmm1[7]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x double> @llvm.x86.avx512.mask.vpermi2var.pd.512(<8 x double> %x0, <8 x i64> <i64 1, i64 8, i64 2, i64 10, i64 5, i64 13, i64 6, i64 15>, <8 x double> %x1, i8 -1)
   ret <8 x double> %1
 }
 
 define <8 x i64> @combine_vpermi2var_8i64_identity(<8 x i64> %x0, <8 x i64> %x1) {
-; X32-LABEL: combine_vpermi2var_8i64_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_8i64_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_8i64_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64> %x0, <8 x i64> <i64 undef, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x i64> %x1, i8 -1)
   %res1 = call <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64> %res0, <8 x i64> <i64 undef, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x i64> %res0, i8 -1)
   ret <8 x i64> %res1
 }
 
 define <16 x float> @combine_vpermi2var_16f32_identity(<16 x float> %x0, <16 x float> %x1) {
-; X32-LABEL: combine_vpermi2var_16f32_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_16f32_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_16f32_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float> %x0, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x float> %x1, i16 -1)
   %res1 = call <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float> %res0, <16 x i32> <i32 15, i32 30, i32 13, i32 28, i32 11, i32 26, i32 9, i32 24, i32 7, i32 22, i32 5, i32 20, i32 3, i32 18, i32 1, i32 16>, <16 x float> %res0, i16 -1)
   ret <16 x float> %res1
 }
 
 define <16 x i32> @combine_vpermi2var_16i32_identity(<16 x i32> %x0, <16 x i32> %x1) {
-; X32-LABEL: combine_vpermi2var_16i32_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_16i32_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_16i32_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32> %x0, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 undef>, <16 x i32> %x1, i16 -1)
   %res1 = call <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32> %res0, <16 x i32> <i32 15, i32 30, i32 13, i32 28, i32 undef, i32 26, i32 9, i32 24, i32 7, i32 22, i32 5, i32 20, i32 3, i32 18, i32 1, i32 16>, <16 x i32> %res0, i16 -1)
   ret <16 x i32> %res1
 }
 
 define <16 x float> @combine_vpermt2var_vpermi2var_16f32_as_unpckhps(<16 x float> %a0, <16 x float> %a1) {
-; X32-LABEL: combine_vpermt2var_vpermi2var_16f32_as_unpckhps:
-; X32:       # %bb.0:
-; X32-NEXT:    vunpckhps {{.*#+}} zmm0 = zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[14],zmm0[14],zmm1[15],zmm0[15]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_vpermi2var_16f32_as_unpckhps:
-; X64:       # %bb.0:
-; X64-NEXT:    vunpckhps {{.*#+}} zmm0 = zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[14],zmm0[14],zmm1[15],zmm0[15]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_vpermi2var_16f32_as_unpckhps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vunpckhps {{.*#+}} zmm0 = zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[14],zmm0[14],zmm1[15],zmm0[15]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float> %a0, <16 x i32> <i32 18, i32 2, i32 19, i32 3, i32 22, i32 6, i32 23, i32 7, i32 26, i32 10, i32 27, i32 11, i32 30, i32 14, i32 31, i32 15>, <16 x float> %a1, i16 -1)
   ret <16 x float> %res0
 }
 
 define <16 x i32> @vpermt2var_vpermi2var_16i32_as_unpckldq(<16 x i32> %a0, <16 x i32> %a1) {
-; X32-LABEL: vpermt2var_vpermi2var_16i32_as_unpckldq:
-; X32:       # %bb.0:
-; X32-NEXT:    vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
-; X32-NEXT:    retl
-;
-; X64-LABEL: vpermt2var_vpermi2var_16i32_as_unpckldq:
-; X64:       # %bb.0:
-; X64-NEXT:    vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
-; X64-NEXT:    retq
+; CHECK-LABEL: vpermt2var_vpermi2var_16i32_as_unpckldq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32> %a0, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>, <16 x i32> %a1, i16 -1)
   ret <16 x i32> %res0
 }
 
 define <32 x i16> @combine_vpermi2var_32i16_identity(<32 x i16> %x0, <32 x i16> %x1) {
-; X32-LABEL: combine_vpermi2var_32i16_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_32i16_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_32i16_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16> %x0, <32 x i16> <i16 31, i16 30, i16 29, i16 28, i16 27, i16 26, i16 25, i16 24, i16 23, i16 22, i16 21, i16 20, i16 19, i16 18, i16 17, i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <32 x i16> %x1, i32 -1)
   %res1 = call <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16> %res0, <32 x i16> <i16 63, i16 30, i16 61, i16 28, i16 59, i16 26, i16 57, i16 24, i16 55, i16 22, i16 53, i16 20, i16 51, i16 18, i16 49, i16 16, i16 47, i16 46, i16 13, i16 44, i16 11, i16 42, i16 9, i16 40, i16 7, i16 38, i16 5, i16 36, i16 3, i16 34, i16 1, i16 32>, <32 x i16> %res0, i32 -1)
   ret <32 x i16> %res1
 }
 
 define <8 x double> @combine_vpermi2var_8f64_as_vpermpd(<8 x double> %x0, <8 x double> %x1) {
-; X32-LABEL: combine_vpermi2var_8f64_as_vpermpd:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
-; X32-NEXT:    vpermpd %zmm0, %zmm1, %zmm0
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermi2var_8f64_as_vpermpd:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
+; X86-NEXT:    vpermpd %zmm0, %zmm1, %zmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermi2var_8f64_as_vpermpd:
 ; X64:       # %bb.0:
@@ -964,11 +850,11 @@
 }
 
 define <8 x i64> @combine_vpermt2var_8i64_as_vpermq(<8 x i64> %x0, <8 x i64> %x1) {
-; X32-LABEL: combine_vpermt2var_8i64_as_vpermq:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
-; X32-NEXT:    vpermpd %zmm0, %zmm1, %zmm0
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_8i64_as_vpermq:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
+; X86-NEXT:    vpermpd %zmm0, %zmm1, %zmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_8i64_as_vpermq:
 ; X64:       # %bb.0:
@@ -981,63 +867,45 @@
 }
 
 define <16 x float> @combine_vpermi2var_16f32_as_vpermps(<16 x float> %x0, <16 x float> %x1) {
-; X32-LABEL: combine_vpermi2var_16f32_as_vpermps:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} zmm1 = [7,7,5,5,3,3,1,1,15,15,13,13,11,11,9,9]
-; X32-NEXT:    vpermps %zmm0, %zmm1, %zmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_16f32_as_vpermps:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} zmm1 = [7,7,5,5,3,3,1,1,15,15,13,13,11,11,9,9]
-; X64-NEXT:    vpermps %zmm0, %zmm1, %zmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_16f32_as_vpermps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps {{.*#+}} zmm1 = [7,7,5,5,3,3,1,1,15,15,13,13,11,11,9,9]
+; CHECK-NEXT:    vpermps %zmm0, %zmm1, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float> %x0, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>, <16 x float> %x1, i16 -1)
   %res1 = call <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float> %res0, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>, <16 x float> %res0, i16 -1)
   ret <16 x float> %res1
 }
 
 define <16 x i32> @combine_vpermt2var_16i32_as_vpermd(<16 x i32> %x0, <16 x i32> %x1) {
-; X32-LABEL: combine_vpermt2var_16i32_as_vpermd:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} zmm1 = [7,7,5,5,3,3,1,1,15,15,13,13,11,11,9,9]
-; X32-NEXT:    vpermps %zmm0, %zmm1, %zmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_16i32_as_vpermd:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} zmm1 = [7,7,5,5,3,3,1,1,15,15,13,13,11,11,9,9]
-; X64-NEXT:    vpermps %zmm0, %zmm1, %zmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_16i32_as_vpermd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps {{.*#+}} zmm1 = [7,7,5,5,3,3,1,1,15,15,13,13,11,11,9,9]
+; CHECK-NEXT:    vpermps %zmm0, %zmm1, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>, <16 x i32> %x0, <16 x i32> %x1, i16 -1)
   %res1 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>, <16 x i32> %res0, <16 x i32> %res0, i16 -1)
   ret <16 x i32> %res1
 }
 
 define <32 x i16> @combine_vpermi2var_32i16_as_permw(<32 x i16> %x0, <32 x i16> %x1) {
-; X32-LABEL: combine_vpermi2var_32i16_as_permw:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [15,16,14,17,13,18,12,19,11,20,10,21,9,22,8,23,7,24,6,25,5,26,4,27,3,28,2,29,1,30,0,31]
-; X32-NEXT:    vpermw %zmm0, %zmm1, %zmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_32i16_as_permw:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [15,16,14,17,13,18,12,19,11,20,10,21,9,22,8,23,7,24,6,25,5,26,4,27,3,28,2,29,1,30,0,31]
-; X64-NEXT:    vpermw %zmm0, %zmm1, %zmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_32i16_as_permw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [15,16,14,17,13,18,12,19,11,20,10,21,9,22,8,23,7,24,6,25,5,26,4,27,3,28,2,29,1,30,0,31]
+; CHECK-NEXT:    vpermw %zmm0, %zmm1, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16> %x0, <32 x i16> <i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0, i16 31, i16 30, i16 29, i16 28, i16 27, i16 26, i16 25, i16 24, i16 23, i16 22, i16 21, i16 20, i16 19, i16 18, i16 17, i16 16>, <32 x i16> %x1, i32 -1)
   %res1 = call <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16> %res0, <32 x i16> <i16 0, i16 31, i16 1, i16 30, i16 2, i16 29, i16 3, i16 28, i16 4, i16 27, i16 5, i16 26, i16 6, i16 25, i16 7, i16 24, i16 8, i16 23, i16 9, i16 22, i16 10, i16 21, i16 11, i16 20, i16 12, i16 19, i16 13, i16 18, i16 14, i16 17, i16 15, i16 16>, <32 x i16> %res0, i32 -1)
   ret <32 x i16> %res1
 }
 
 define <8 x double> @combine_vpermi2var_vpermt2var_8f64_as_vperm2(<8 x double> %x0, <8 x double> %x1) {
-; X32-LABEL: combine_vpermi2var_vpermt2var_8f64_as_vperm2:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovapd {{.*#+}} zmm2 = [4,0,14,0,3,0,12,0,7,0,8,0,0,0,15,0]
-; X32-NEXT:    vpermi2pd %zmm0, %zmm1, %zmm2
-; X32-NEXT:    vmovapd %zmm2, %zmm0
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermi2var_vpermt2var_8f64_as_vperm2:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovapd {{.*#+}} zmm2 = [4,0,14,0,3,0,12,0,7,0,8,0,0,0,15,0]
+; X86-NEXT:    vpermi2pd %zmm0, %zmm1, %zmm2
+; X86-NEXT:    vmovapd %zmm2, %zmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermi2var_vpermt2var_8f64_as_vperm2:
 ; X64:       # %bb.0:
@@ -1051,48 +919,35 @@
 }
 
 define <16 x i32> @combine_vpermi2var_vpermt2var_16i32_as_vpermd(<16 x i32> %x0, <16 x i32> %x1) {
-; X32-LABEL: combine_vpermi2var_vpermt2var_16i32_as_vpermd:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,31,2,2,4,29,6,27,8,25,10,23,12,21,14,19]
-; X32-NEXT:    vpermt2d %zmm1, %zmm2, %zmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_vpermt2var_16i32_as_vpermd:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,31,2,2,4,29,6,27,8,25,10,23,12,21,14,19]
-; X64-NEXT:    vpermt2d %zmm1, %zmm2, %zmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_vpermt2var_16i32_as_vpermd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,31,2,2,4,29,6,27,8,25,10,23,12,21,14,19]
+; CHECK-NEXT:    vpermt2d %zmm1, %zmm2, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32> %x0, <16 x i32> <i32 0, i32 31, i32 2, i32 29, i32 4, i32 27, i32 6, i32 25, i32 8, i32 23, i32 10, i32 21, i32 12, i32 19, i32 14, i32 17>, <16 x i32> %x1, i16 -1)
   %res1 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> <i32 0, i32 17, i32 2, i32 18, i32 4, i32 19, i32 6, i32 21, i32 8, i32 23, i32 10, i32 25, i32 12, i32 27, i32 14, i32 29>, <16 x i32> %res0, <16 x i32> %res0, i16 -1)
   ret <16 x i32> %res1
 }
 
 define <32 x i16> @combine_vpermt2var_vpermi2var_32i16_as_permw(<32 x i16> %x0, <32 x i16> %x1) {
-; X32-LABEL: combine_vpermt2var_vpermi2var_32i16_as_permw:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [17,39,19,38,21,37,23,36,25,35,27,34,29,33,31,32,1,47,3,46,5,45,7,44,9,43,11,42,13,41,15,40]
-; X32-NEXT:    vpermi2w %zmm0, %zmm1, %zmm2
-; X32-NEXT:    vmovdqa64 %zmm2, %zmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_vpermi2var_32i16_as_permw:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [17,39,19,38,21,37,23,36,25,35,27,34,29,33,31,32,1,47,3,46,5,45,7,44,9,43,11,42,13,41,15,40]
-; X64-NEXT:    vpermi2w %zmm0, %zmm1, %zmm2
-; X64-NEXT:    vmovdqa64 %zmm2, %zmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_vpermi2var_32i16_as_permw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [17,39,19,38,21,37,23,36,25,35,27,34,29,33,31,32,1,47,3,46,5,45,7,44,9,43,11,42,13,41,15,40]
+; CHECK-NEXT:    vpermi2w %zmm0, %zmm1, %zmm2
+; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16> <i16 0, i16 63, i16 1, i16 61, i16 2, i16 59, i16 3, i16 57, i16 4, i16 55, i16 5, i16 53, i16 6, i16 51, i16 7, i16 49, i16 8, i16 47, i16 9, i16 45, i16 10, i16 43, i16 11, i16 41, i16 12, i16 39, i16 13, i16 37, i16 14, i16 35, i16 15, i16 33>, <32 x i16> %x0, <32 x i16> %x1, i32 -1)
   %res1 = call <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16> %res0, <32 x i16> <i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0, i16 31, i16 30, i16 29, i16 28, i16 27, i16 26, i16 25, i16 24, i16 23, i16 22, i16 21, i16 20, i16 19, i16 18, i16 17, i16 16>, <32 x i16> %res0, i32 -1)
   ret <32 x i16> %res1
 }
 
 define <8 x double> @combine_vpermi2var_vpermvar_8f64_as_vperm2_zero(<8 x double> %x0) {
-; X32-LABEL: combine_vpermi2var_vpermvar_8f64_as_vperm2_zero:
-; X32:       # %bb.0:
-; X32-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; X32-NEXT:    vmovapd {{.*#+}} zmm2 = [8,0,3,0,10,0,11,0,1,0,7,0,14,0,5,0]
-; X32-NEXT:    vpermt2pd %zmm1, %zmm2, %zmm0
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermi2var_vpermvar_8f64_as_vperm2_zero:
+; X86:       # %bb.0:
+; X86-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; X86-NEXT:    vmovapd {{.*#+}} zmm2 = [8,0,3,0,10,0,11,0,1,0,7,0,14,0,5,0]
+; X86-NEXT:    vpermt2pd %zmm1, %zmm2, %zmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermi2var_vpermvar_8f64_as_vperm2_zero:
 ; X64:       # %bb.0:
@@ -1106,29 +961,22 @@
 }
 
 define <16 x float> @combine_vpermi2var_vpermvar_16f32_as_vperm2_zero(<16 x float> %x0) {
-; X32-LABEL: combine_vpermi2var_vpermvar_16f32_as_vperm2_zero:
-; X32:       # %bb.0:
-; X32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-NEXT:    vmovaps {{.*#+}} zmm2 = [0,13,1,12,4,9,22,12,4,25,26,9,5,29,30,8]
-; X32-NEXT:    vpermt2ps %zmm1, %zmm2, %zmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_vpermvar_16f32_as_vperm2_zero:
-; X64:       # %bb.0:
-; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X64-NEXT:    vmovaps {{.*#+}} zmm2 = [0,13,1,12,4,9,22,12,4,25,26,9,5,29,30,8]
-; X64-NEXT:    vpermt2ps %zmm1, %zmm2, %zmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_vpermvar_16f32_as_vperm2_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT:    vmovaps {{.*#+}} zmm2 = [0,13,1,12,4,9,22,12,4,25,26,9,5,29,30,8]
+; CHECK-NEXT:    vpermt2ps %zmm1, %zmm2, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = shufflevector <16 x float> %x0, <16 x float> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
   %res1 = call <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float> %res0, <16 x i32> <i32 0, i32 14, i32 2, i32 12, i32 4, i32 10, i32 3, i32 12, i32 4, i32 11, i32 5, i32 10, i32 6, i32 9, i32 7, i32 8>, <16 x float> %res0, i16 -1)
   ret <16 x float> %res1
 }
 
 define <8 x i64> @combine_vpermvar_insertion_as_broadcast_v8i64(i64 %a0) {
-; X32-LABEL: combine_vpermvar_insertion_as_broadcast_v8i64:
-; X32:       # %bb.0:
-; X32-NEXT:    vbroadcastsd {{[0-9]+}}(%esp), %zmm0
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermvar_insertion_as_broadcast_v8i64:
+; X86:       # %bb.0:
+; X86-NEXT:    vbroadcastsd {{[0-9]+}}(%esp), %zmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermvar_insertion_as_broadcast_v8i64:
 ; X64:       # %bb.0:
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll b/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll
index c4ab922..fd41c9f 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll
@@ -1,31 +1,27 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64
 
 declare <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
 declare <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
 
 define <16 x i16> @combine_vpermt2var_16i16_identity(<16 x i16> %x0, <16 x i16> %x1) {
-; X32-LABEL: combine_vpermt2var_16i16_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_16i16_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_16i16_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <16 x i16> %x0, <16 x i16> %x1, i16 -1)
   %res1 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 15, i16 30, i16 13, i16 28, i16 11, i16 26, i16 9, i16 24, i16 7, i16 22, i16 5, i16 20, i16 3, i16 18, i16 1, i16 16>, <16 x i16> %res0, <16 x i16> %res0, i16 -1)
   ret <16 x i16> %res1
 }
 define <16 x i16> @combine_vpermt2var_16i16_identity_mask(<16 x i16> %x0, <16 x i16> %x1, i16 %m) {
-; X32-LABEL: combine_vpermt2var_16i16_identity_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa {{.*#+}} ymm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
-; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vpermi2w %ymm0, %ymm0, %ymm1 {%k1} {z}
-; X32-NEXT:    vmovdqa {{.*#+}} ymm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
-; X32-NEXT:    vpermi2w %ymm1, %ymm1, %ymm0 {%k1} {z}
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16i16_identity_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa {{.*#+}} ymm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermi2w %ymm0, %ymm0, %ymm1 {%k1} {z}
+; X86-NEXT:    vmovdqa {{.*#+}} ymm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
+; X86-NEXT:    vpermi2w %ymm1, %ymm1, %ymm0 {%k1} {z}
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16i16_identity_mask:
 ; X64:       # %bb.0:
@@ -41,63 +37,41 @@
 }
 
 define <16 x i16> @combine_vpermi2var_16i16_as_permw(<16 x i16> %x0, <16 x i16> %x1) {
-; X32-LABEL: combine_vpermi2var_16i16_as_permw:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa {{.*#+}} ymm1 = [15,0,14,1,13,2,12,3,11,4,10,5,9,6,8,7]
-; X32-NEXT:    vpermw %ymm0, %ymm1, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_16i16_as_permw:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovdqa {{.*#+}} ymm1 = [15,0,14,1,13,2,12,3,11,4,10,5,9,6,8,7]
-; X64-NEXT:    vpermw %ymm0, %ymm1, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_16i16_as_permw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovdqa {{.*#+}} ymm1 = [15,0,14,1,13,2,12,3,11,4,10,5,9,6,8,7]
+; CHECK-NEXT:    vpermw %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %x0, <16 x i16> <i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <16 x i16> %x1, i16 -1)
   %res1 = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %res0, <16 x i16> <i16 0, i16 15, i16 1, i16 14, i16 2, i16 13, i16 3, i16 12, i16 4, i16 11, i16 5, i16 10, i16 6, i16 9, i16 7, i16 8>, <16 x i16> %res0, i16 -1)
   ret <16 x i16> %res1
 }
 
 define <16 x i16> @combine_vpermt2var_vpermi2var_16i16_as_vperm2(<16 x i16> %x0, <16 x i16> %x1) {
-; X32-LABEL: combine_vpermt2var_vpermi2var_16i16_as_vperm2:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,31,2,2,4,29,6,27,8,25,10,23,12,21,14,19]
-; X32-NEXT:    vpermt2w %ymm1, %ymm2, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_vpermi2var_16i16_as_vperm2:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,31,2,2,4,29,6,27,8,25,10,23,12,21,14,19]
-; X64-NEXT:    vpermt2w %ymm1, %ymm2, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_vpermi2var_16i16_as_vperm2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,31,2,2,4,29,6,27,8,25,10,23,12,21,14,19]
+; CHECK-NEXT:    vpermt2w %ymm1, %ymm2, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %x0, <16 x i16> <i16 0, i16 31, i16 2, i16 29, i16 4, i16 27, i16 6, i16 25, i16 8, i16 23, i16 10, i16 21, i16 12, i16 19, i16 14, i16 17>, <16 x i16> %x1, i16 -1)
   %res1 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 0, i16 17, i16 2, i16 18, i16 4, i16 19, i16 6, i16 21, i16 8, i16 23, i16 10, i16 25, i16 12, i16 27, i16 14, i16 29>, <16 x i16> %res0, <16 x i16> %res0, i16 -1)
   ret <16 x i16> %res1
 }
 
 define <16 x i16> @combine_vpermt2var_vpermi2var_16i16_as_unpckhwd(<16 x i16> %a0, <16 x i16> %a1) {
-; X32-LABEL: combine_vpermt2var_vpermi2var_16i16_as_unpckhwd:
-; X32:       # %bb.0:
-; X32-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_vpermi2var_16i16_as_unpckhwd:
-; X64:       # %bb.0:
-; X64-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_vpermi2var_16i16_as_unpckhwd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %a0, <16 x i16> <i16 20, i16 4, i16 21, i16 5, i16 22, i16 6, i16 23, i16 7, i16 28, i16 12, i16 29, i16 13, i16 30, i16 14, i16 31, i16 15>, <16 x i16> %a1, i16 -1)
   ret <16 x i16> %res0
 }
 
 define <16 x i16> @combine_vpermt2var_vpermi2var_16i16_as_unpcklwd(<16 x i16> %a0, <16 x i16> %a1) {
-; X32-LABEL: combine_vpermt2var_vpermi2var_16i16_as_unpcklwd:
-; X32:       # %bb.0:
-; X32-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_vpermi2var_16i16_as_unpcklwd:
-; X64:       # %bb.0:
-; X64-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_vpermi2var_16i16_as_unpcklwd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 0, i16 16, i16 1, i16 17, i16 2, i16 18, i16 3, i16 19, i16 8, i16 24, i16 9, i16 25, i16 10, i16 26, i16 11, i16 27>, <16 x i16> %a0, <16 x i16> %a1, i16 -1)
   ret <16 x i16> %res0
 }
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll b/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll
index 5350dda..0e0dfec 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512vbmi,+avx512vl | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vbmi,+avx512vl | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512vbmi,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vbmi,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64
 
 declare <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8>, <16 x i8>, <16 x i8>, i16)
 declare <16 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.128(<16 x i8>, <16 x i8>, <16 x i8>, i16)
@@ -19,26 +19,22 @@
 declare <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64)
 
 define <16 x i8> @combine_vpermt2var_16i8_identity(<16 x i8> %x0, <16 x i8> %x1) {
-; X32-LABEL: combine_vpermt2var_16i8_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_16i8_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_16i8_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> %x0, <16 x i8> %x1, i16 -1)
   %res1 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> <i8 15, i8 30, i8 13, i8 28, i8 11, i8 26, i8 9, i8 24, i8 7, i8 22, i8 5, i8 20, i8 3, i8 18, i8 1, i8 16>, <16 x i8> %res0, <16 x i8> %res0, i16 -1)
   ret <16 x i8> %res1
 }
 define <16 x i8> @combine_vpermt2var_16i8_identity_mask(<16 x i8> %x0, <16 x i8> %x1, i16 %m) {
-; X32-LABEL: combine_vpermt2var_16i8_identity_mask:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa {{.*#+}} xmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
-; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X32-NEXT:    vpermi2b %xmm0, %xmm0, %xmm1 {%k1} {z}
-; X32-NEXT:    vmovdqa {{.*#+}} xmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
-; X32-NEXT:    vpermi2b %xmm1, %xmm1, %xmm0 {%k1} {z}
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpermt2var_16i8_identity_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa {{.*#+}} xmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermi2b %xmm0, %xmm0, %xmm1 {%k1} {z}
+; X86-NEXT:    vmovdqa {{.*#+}} xmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
+; X86-NEXT:    vpermi2b %xmm1, %xmm1, %xmm0 {%k1} {z}
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpermt2var_16i8_identity_mask:
 ; X64:       # %bb.0:
@@ -54,100 +50,63 @@
 }
 
 define <16 x i8> @combine_vpermi2var_16i8_as_vpshufb(<16 x i8> %x0, <16 x i8> %x1) {
-; X32-LABEL: combine_vpermi2var_16i8_as_vpshufb:
-; X32:       # %bb.0:
-; X32-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[15,0,14,1,13,2,12,3,11,4,10,5,9,6,8,7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_16i8_as_vpshufb:
-; X64:       # %bb.0:
-; X64-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[15,0,14,1,13,2,12,3,11,4,10,5,9,6,8,7]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_16i8_as_vpshufb:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[15,0,14,1,13,2,12,3,11,4,10,5,9,6,8,7]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8> %x0, <16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> %x1, i16 -1)
   %res1 = call <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8> %res0, <16 x i8> <i8 0, i8 15, i8 1, i8 14, i8 2, i8 13, i8 3, i8 12, i8 4, i8 11, i8 5, i8 10, i8 6, i8 9, i8 7, i8 8>, <16 x i8> %res0, i16 -1)
   ret <16 x i8> %res1
 }
 define <32 x i8> @combine_vpermi2var_32i8_as_vpermb(<32 x i8> %x0, <32 x i8> %x1) {
-; X32-LABEL: combine_vpermi2var_32i8_as_vpermb:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,0,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,0,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
-; X32-NEXT:    vpermb %ymm0, %ymm1, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_32i8_as_vpermb:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,0,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,0,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
-; X64-NEXT:    vpermb %ymm0, %ymm1, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_32i8_as_vpermb:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,0,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,0,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
+; CHECK-NEXT:    vpermb %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = shufflevector <32 x i8> %x0, <32 x i8> %x1, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
   %res1 = call <32 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.256(<32 x i8> %res0, <32 x i8> <i8 0, i8 32, i8 2, i8 30, i8 4, i8 28, i8 6, i8 26, i8 8, i8 28, i8 10, i8 26, i8 12, i8 24, i8 14, i8 22, i8 0, i8 32, i8 2, i8 30, i8 4, i8 28, i8 6, i8 26, i8 8, i8 28, i8 10, i8 26, i8 12, i8 24, i8 14, i8 22>, <32 x i8> %res0, i32 -1)
   ret <32 x i8> %res1
 }
 define <64 x i8> @combine_vpermi2var_64i8_as_vpermb(<64 x i8> %x0, <64 x i8> %x1) {
-; X32-LABEL: combine_vpermi2var_64i8_as_vpermb:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
-; X32-NEXT:    vpermb %zmm0, %zmm1, %zmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_64i8_as_vpermb:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
-; X64-NEXT:    vpermb %zmm0, %zmm1, %zmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_64i8_as_vpermb:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
+; CHECK-NEXT:    vpermb %zmm0, %zmm1, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = shufflevector <64 x i8> %x0, <64 x i8> %x1, <64 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119>
   %res1 = call <64 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.512(<64 x i8> %res0, <64 x i8> <i8 0, i8 32, i8 2, i8 30, i8 4, i8 28, i8 6, i8 26, i8 8, i8 28, i8 10, i8 26, i8 12, i8 24, i8 14, i8 22, i8 0, i8 32, i8 2, i8 30, i8 4, i8 28, i8 6, i8 26, i8 8, i8 28, i8 10, i8 26, i8 12, i8 24, i8 14, i8 22, i8 0, i8 32, i8 2, i8 30, i8 4, i8 28, i8 6, i8 26, i8 8, i8 28, i8 10, i8 26, i8 12, i8 24, i8 14, i8 22, i8 0, i8 32, i8 2, i8 30, i8 4, i8 28, i8 6, i8 26, i8 8, i8 28, i8 10, i8 26, i8 12, i8 24, i8 14, i8 22>, <64 x i8> %res0, i64 -1)
   ret <64 x i8> %res1
 }
 
 define <16 x i8> @combine_vpermt2var_vpermi2var_16i8_as_vperm2(<16 x i8> %x0, <16 x i8> %x1) {
-; X32-LABEL: combine_vpermt2var_vpermi2var_16i8_as_vperm2:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,31,2,29,4,27,6,25,8,23,10,21,12,19,14,17]
-; X32-NEXT:    vpermi2b %xmm1, %xmm0, %xmm2
-; X32-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,17,2,18,4,19,6,21,8,23,10,25,12,27,14,29]
-; X32-NEXT:    vpermi2b %xmm2, %xmm2, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermt2var_vpermi2var_16i8_as_vperm2:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,31,2,29,4,27,6,25,8,23,10,21,12,19,14,17]
-; X64-NEXT:    vpermi2b %xmm1, %xmm0, %xmm2
-; X64-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,17,2,18,4,19,6,21,8,23,10,25,12,27,14,29]
-; X64-NEXT:    vpermi2b %xmm2, %xmm2, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermt2var_vpermi2var_16i8_as_vperm2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,31,2,29,4,27,6,25,8,23,10,21,12,19,14,17]
+; CHECK-NEXT:    vpermi2b %xmm1, %xmm0, %xmm2
+; CHECK-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,17,2,18,4,19,6,21,8,23,10,25,12,27,14,29]
+; CHECK-NEXT:    vpermi2b %xmm2, %xmm2, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8> %x0, <16 x i8> <i8 0, i8 31, i8 2, i8 29, i8 4, i8 27, i8 6, i8 25, i8 8, i8 23, i8 10, i8 21, i8 12, i8 19, i8 14, i8 17>, <16 x i8> %x1, i16 -1)
   %res1 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> <i8 0, i8 17, i8 2, i8 18, i8 4, i8 19, i8 6, i8 21, i8 8, i8 23, i8 10, i8 25, i8 12, i8 27, i8 14, i8 29>, <16 x i8> %res0, <16 x i8> %res0, i16 -1)
   ret <16 x i8> %res1
 }
 define <32 x i8> @combine_vpermi2var_32i8_as_vperm2(<32 x i8> %x0, <32 x i8> %x1) {
-; X32-LABEL: combine_vpermi2var_32i8_as_vperm2:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
-; X32-NEXT:    vpermt2b %ymm1, %ymm2, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_32i8_as_vperm2:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
-; X64-NEXT:    vpermt2b %ymm1, %ymm2, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_32i8_as_vperm2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
+; CHECK-NEXT:    vpermt2b %ymm1, %ymm2, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = shufflevector <32 x i8> %x0, <32 x i8> %x1, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
   %res1 = call <32 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.256(<32 x i8> %res0, <32 x i8> <i8 0, i8 32, i8 2, i8 30, i8 4, i8 28, i8 6, i8 26, i8 8, i8 28, i8 10, i8 26, i8 12, i8 24, i8 14, i8 22, i8 0, i8 32, i8 2, i8 30, i8 4, i8 28, i8 6, i8 26, i8 8, i8 28, i8 10, i8 26, i8 12, i8 24, i8 14, i8 22>, <32 x i8> %x1, i32 -1)
   ret <32 x i8> %res1
 }
 define <64 x i8> @combine_vpermi2var_64i8_as_vperm2(<64 x i8> %x0, <64 x i8> %x1) {
-; X32-LABEL: combine_vpermi2var_64i8_as_vperm2:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,80,1,70,2,54,3,49,4,36,5,23,6,18,7,5,0,90,1,100,2,110,3,120,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
-; X32-NEXT:    vpermt2b %zmm1, %zmm2, %zmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermi2var_64i8_as_vperm2:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,80,1,70,2,54,3,49,4,36,5,23,6,18,7,5,0,90,1,100,2,110,3,120,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
-; X64-NEXT:    vpermt2b %zmm1, %zmm2, %zmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermi2var_64i8_as_vperm2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,80,1,70,2,54,3,49,4,36,5,23,6,18,7,5,0,90,1,100,2,110,3,120,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
+; CHECK-NEXT:    vpermt2b %zmm1, %zmm2, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = shufflevector <64 x i8> %x0, <64 x i8> %x1, <64 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119>
   %res1 = call <64 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.512(<64 x i8> %res0, <64 x i8> <i8 0, i8 80, i8 2, i8 70, i8 4, i8 60, i8 6, i8 50, i8 8, i8 40, i8 10, i8 30, i8 12, i8 20, i8 14, i8 10, i8 0, i8 90, i8 2, i8 100, i8 4, i8 110, i8 6, i8 120, i8 8, i8 28, i8 10, i8 26, i8 12, i8 24, i8 14, i8 22, i8 0, i8 32, i8 2, i8 30, i8 4, i8 28, i8 6, i8 26, i8 8, i8 28, i8 10, i8 26, i8 12, i8 24, i8 14, i8 22, i8 0, i8 32, i8 2, i8 30, i8 4, i8 28, i8 6, i8 26, i8 8, i8 28, i8 10, i8 26, i8 12, i8 24, i8 14, i8 22>, <64 x i8> %x1, i64 -1)
   ret <64 x i8> %res1
diff --git a/test/CodeGen/X86/vector-shuffle-combining-sse41.ll b/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
index 27ccdef..60c3c86 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512F
-;
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512F
+
 ; Combine tests involving SSE41 target shuffles (BLEND,INSERTPS,MOVZX)
 
 declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
diff --git a/test/CodeGen/X86/vector-shuffle-combining-sse4a.ll b/test/CodeGen/X86/vector-shuffle-combining-sse4a.ll
index 1b701f8..7fe6403 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-sse4a.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-sse4a.ll
@@ -1,28 +1,28 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3,+sse4a | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2,+sse4a | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE42
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,+sse4a| FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2,+sse4a | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3,+sse4a | FileCheck %s --check-prefixes=CHECK,SSE,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2,+sse4a | FileCheck %s --check-prefixes=CHECK,SSE,SSE42
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,+sse4a| FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2,+sse4a | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
 ;
 ; Combine tests involving SSE4A target shuffles (EXTRQI,INSERTQI)
 
 declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
 
 define <16 x i8> @combine_extrqi_pshufb_16i8(<16 x i8> %a0) {
-; ALL-LABEL: combine_extrqi_pshufb_16i8:
-; ALL:       # %bb.0:
-; ALL-NEXT:    extrq {{.*#+}} xmm0 = xmm0[1,2],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_extrqi_pshufb_16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    extrq {{.*#+}} xmm0 = xmm0[1,2],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; CHECK-NEXT:    retq
   %1 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 1, i32 2, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 255, i8 255, i8 255, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
   ret <16 x i8> %2
 }
 
 define <8 x i16> @combine_extrqi_pshufb_8i16(<8 x i16> %a0) {
-; ALL-LABEL: combine_extrqi_pshufb_8i16:
-; ALL:       # %bb.0:
-; ALL-NEXT:    extrq {{.*#+}} xmm0 = xmm0[2,3],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_extrqi_pshufb_8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    extrq {{.*#+}} xmm0 = xmm0[2,3],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; CHECK-NEXT:    retq
   %1 = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> <i32 1, i32 2, i32 8, i32 8, i32 undef, i32 undef, i32 undef, i32 undef>
   %2 = bitcast <8 x i16> %1 to <16 x i8>
   %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 1, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
@@ -75,10 +75,10 @@
 }
 
 define <16 x i8> @combine_pshufb_insertqi_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
-; ALL-LABEL: combine_pshufb_insertqi_pshufb:
-; ALL:       # %bb.0:
-; ALL-NEXT:    insertq {{.*#+}} xmm0 = xmm0[0],xmm1[0,1],xmm0[3,4,5,6,7,u,u,u,u,u,u,u,u]
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_pshufb_insertqi_pshufb:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    insertq {{.*#+}} xmm0 = xmm0[0],xmm1[0,1],xmm0[3,4,5,6,7,u,u,u,u,u,u,u,u]
+; CHECK-NEXT:    retq
   %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
   %2 = shufflevector <16 x i8> %1, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 17, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 7, i8 1, i8 2, i8 4, i8 3, i8 undef, i8 undef, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
diff --git a/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll b/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
index f32f87b..a531bf6 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=CHECK,SSE,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512F
 ;
 ; Combine tests involving SSE3/SSSE3 target shuffles (MOVDDUP, MOVSHDUP, MOVSLDUP, PSHUFB)
 
@@ -457,9 +457,9 @@
 }
 
 define <8 x i16> @combine_pshufb_as_unpacklo_undef(<16 x i8> %a0) {
-; ALL-LABEL: combine_pshufb_as_unpacklo_undef:
-; ALL:       # %bb.0:
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_pshufb_as_unpacklo_undef:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
   %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 undef, i8 undef, i8 0, i8 1, i8 undef, i8 undef, i8 2, i8 3, i8 undef, i8 undef, i8 4, i8 5, i8 undef, i8 undef, i8 6, i8 7>)
   %2 = bitcast <16 x i8> %1 to <8 x i16>
   %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -467,9 +467,9 @@
 }
 
 define <16 x i8> @combine_pshufb_as_unpackhi_undef(<16 x i8> %a0) {
-; ALL-LABEL: combine_pshufb_as_unpackhi_undef:
-; ALL:       # %bb.0:
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_pshufb_as_unpackhi_undef:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
   %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 undef, i8 9, i8 undef, i8 10, i8 undef, i8 11, i8 undef, i8 12, i8 undef, i8 13, i8 undef, i8 14, i8 undef, i8 15, i8 undef>)
   %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
   ret <16 x i8> %2
diff --git a/test/CodeGen/X86/vector-shuffle-combining-xop.ll b/test/CodeGen/X86/vector-shuffle-combining-xop.ll
index 5fe0a2b..7da6afd 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-xop.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-xop.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefix=X32 --check-prefix=X86AVX
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2,+xop | FileCheck %s --check-prefix=X32 --check-prefix=X86AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefix=X64 --check-prefix=X64AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+xop | FileCheck %s --check-prefix=X64 --check-prefix=X64AVX2
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2,+xop | FileCheck %s --check-prefixes=CHECK,X86,AVX2,X86-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+xop | FileCheck %s --check-prefixes=CHECK,X64,AVX2,X64-AVX2
 
 declare <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double>, <2 x double>, <2 x i64>, i8) nounwind readnone
 declare <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double>, <4 x double>, <4 x i64>, i8) nounwind readnone
@@ -13,211 +13,138 @@
 declare <16 x i8> @llvm.x86.xop.vpperm(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
 
 define <2 x double> @combine_vpermil2pd_identity(<2 x double> %a0, <2 x double> %a1) {
-; X32-LABEL: combine_vpermil2pd_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermil2pd_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps %xmm1, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermil2pd_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps %xmm1, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a1, <2 x double> %a0, <2 x i64> <i64 2, i64 0>, i8 0)
   %res1 = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %res0, <2 x double> undef, <2 x i64> <i64 2, i64 0>, i8 0)
   ret <2 x double> %res1
 }
 
 define <4 x double> @combine_vpermil2pd256_identity(<4 x double> %a0, <4 x double> %a1) {
-; X32-LABEL: combine_vpermil2pd256_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps %ymm1, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermil2pd256_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps %ymm1, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermil2pd256_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps %ymm1, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a1, <4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>, i8 0)
   %res1 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %res0, <4 x double> undef, <4 x i64> <i64 2, i64 0, i64 2, i64 0>, i8 0)
   ret <4 x double> %res1
 }
 
 define <4 x double> @combine_vpermil2pd256_0z73(<4 x double> %a0, <4 x double> %a1) {
-; X32-LABEL: combine_vpermil2pd256_0z73:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermil2pd {{.*#+}} ymm0 = ymm0[0],zero,ymm1[3],ymm0[3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermil2pd256_0z73:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermil2pd {{.*#+}} ymm0 = ymm0[0],zero,ymm1[3],ymm0[3]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermil2pd256_0z73:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpermil2pd {{.*#+}} ymm0 = ymm0[0],zero,ymm1[3],ymm0[3]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 undef, i32 7, i32 3>
   %res1 = shufflevector <4 x double> %res0, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 7, i32 2, i32 3>
   ret <4 x double> %res1
 }
 
 define <4 x float> @combine_vpermil2ps_identity(<4 x float> %a0, <4 x float> %a1) {
-; X32-LABEL: combine_vpermil2ps_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermil2ps_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps %xmm1, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermil2ps_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps %xmm1, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a1, <4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>, i8 0)
   %res1 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %res0, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>, i8 0)
   ret <4 x float> %res1
 }
 
 define <4 x float> @combine_vpermil2ps_1z74(<4 x float> %a0, <4 x float> %a1) {
-; X32-LABEL: combine_vpermil2ps_1z74:
-; X32:       # %bb.0:
-; X32-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,0]
-; X32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermil2ps_1z74:
-; X64:       # %bb.0:
-; X64-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,0]
-; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X64-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermil2ps_1z74:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,0]
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 1, i32 1, i32 7, i32 4>, i8 0)
   %res1 = shufflevector <4 x float> %res0, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 7, i32 2, i32 3>
   ret <4 x float> %res1
 }
 
 define <4 x float> @combine_vpermil2ps_02zu(<4 x float> %a0, <4 x float> %a1) {
-; X32-LABEL: combine_vpermil2ps_02zu:
-; X32:       # %bb.0:
-; X32-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermil2ps_02zu:
-; X64:       # %bb.0:
-; X64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermil2ps_02zu:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a0, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 undef>, i8 0)
   ret <4 x float> %res0
 }
 
 define <8 x float> @combine_vpermil2ps256_identity(<8 x float> %a0, <8 x float> %a1) {
-; X32-LABEL: combine_vpermil2ps256_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps %ymm1, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermil2ps256_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps %ymm1, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermil2ps256_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps %ymm1, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a1, <8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 1, i32 0, i32 3, i32 2>, i8 0)
   %res1 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %res0, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 1, i32 0, i32 3, i32 2>, i8 0)
   ret <8 x float> %res1
 }
 
 define <8 x float> @combine_vpermil2ps256_08z945Az(<8 x float> %a0, <8 x float> %a1) {
-; X32-LABEL: combine_vpermil2ps256_08z945Az:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermil2ps {{.*#+}} ymm0 = ymm0[0],ymm1[0],zero,ymm1[1],ymm0[4,5],ymm1[6],zero
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermil2ps256_08z945Az:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermil2ps {{.*#+}} ymm0 = ymm0[0],ymm1[0],zero,ymm1[1],ymm0[4,5],ymm1[6],zero
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermil2ps256_08z945Az:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpermil2ps {{.*#+}} ymm0 = ymm0[0],ymm1[0],zero,ymm1[1],ymm0[4,5],ymm1[6],zero
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 0, i32 1, i32 6, i32 7>, i8 0)
   %res1 = shufflevector <8 x float> %res0, <8 x float> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 8, i32 3, i32 4, i32 5, i32 6, i32 8>
   ret <8 x float> %res1
 }
 
 define <8 x float> @combine_vpermil2ps256_zero(<8 x float> %a0, <8 x float> %a1) {
-; X32-LABEL: combine_vpermil2ps256_zero:
-; X32:       # %bb.0:
-; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermil2ps256_zero:
-; X64:       # %bb.0:
-; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermil2ps256_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a1, <8 x float> %a0, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11>, i8 2)
   ret <8 x float> %res0
 }
 
 define <4 x float> @combine_vpermil2ps_blend_with_zero(<4 x float> %a0, <4 x float> %a1) {
-; X32-LABEL: combine_vpermil2ps_blend_with_zero:
-; X32:       # %bb.0:
-; X32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermil2ps_blend_with_zero:
-; X64:       # %bb.0:
-; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X64-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermil2ps_blend_with_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 8, i32 1, i32 2, i32 3>, i8 2)
   ret <4 x float> %res0
 }
 
 define <2 x double> @combine_vpermil2pd_as_shufpd(<2 x double> %a0, <2 x double> %a1) {
-; X32-LABEL: combine_vpermil2pd_as_shufpd:
-; X32:       # %bb.0:
-; X32-NEXT:    vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermil2pd_as_shufpd:
-; X64:       # %bb.0:
-; X64-NEXT:    vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermil2pd_as_shufpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a0, <2 x double> %a1, <2 x i64> <i64 2, i64 4>, i8 0)
   ret <2 x double> %res0
 }
 
 define <4 x double> @combine_vpermil2pd256_as_shufpd(<4 x double> %a0, <4 x double> %a1) {
-; X32-LABEL: combine_vpermil2pd256_as_shufpd:
-; X32:       # %bb.0:
-; X32-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[3]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpermil2pd256_as_shufpd:
-; X64:       # %bb.0:
-; X64-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[3]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpermil2pd256_as_shufpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[3]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a0, <4 x double> %a1, <4 x i64> <i64 0, i64 4, i64 2, i64 7>, i8 0)
   ret <4 x double> %res0
 }
 
 define <16 x i8> @combine_vpperm_identity(<16 x i8> %a0, <16 x i8> %a1) {
-; X32-LABEL: combine_vpperm_identity:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps %xmm1, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpperm_identity:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps %xmm1, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpperm_identity:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps %xmm1, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> <i8 31, i8 30, i8 29, i8 28, i8 27, i8 26, i8 25, i8 24, i8 23, i8 22, i8 21, i8 20, i8 19, i8 18, i8 17, i8 16>)
   %res1 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %res0, <16 x i8> undef, <16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
   ret <16 x i8> %res1
 }
 
 define <16 x i8> @combine_vpperm_zero(<16 x i8> %a0, <16 x i8> %a1) {
-; X32-LABEL: combine_vpperm_zero:
-; X32:       # %bb.0:
-; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpperm_zero:
-; X64:       # %bb.0:
-; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpperm_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> <i8 128, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>)
   %res1 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %res0, <16 x i8> undef, <16 x i8> <i8 0, i8 128, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>)
   %res2 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %res1, <16 x i8> undef, <16 x i8> <i8 0, i8 1, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
@@ -225,10 +152,10 @@
 }
 
 define <16 x i8> @combine_vpperm_identity_bitcast(<16 x i8> %a0, <16 x i8> %a1) {
-; X32-LABEL: combine_vpperm_identity_bitcast:
-; X32:       # %bb.0:
-; X32-NEXT:    vpaddq {{\.LCPI.*}}, %xmm0, %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: combine_vpperm_identity_bitcast:
+; X86:       # %bb.0:
+; X86-NEXT:    vpaddq {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: combine_vpperm_identity_bitcast:
 ; X64:       # %bb.0:
@@ -244,73 +171,47 @@
 }
 
 define <16 x i8> @combine_vpperm_as_blend_with_zero(<16 x i8> %a0, <16 x i8> %a1) {
-; X32-LABEL: combine_vpperm_as_blend_with_zero:
-; X32:       # %bb.0:
-; X32-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; X32-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4,5,6,7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpperm_as_blend_with_zero:
-; X64:       # %bb.0:
-; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4,5,6,7]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpperm_as_blend_with_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4,5,6,7]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> <i8 0, i8 1, i8 128, i8 129, i8 4, i8 5, i8 6, i8 7, i8 130, i8 131, i8 132, i8 133, i8 134, i8 135, i8 136, i8 137>)
   ret <16 x i8> %res0
 }
 
 define <16 x i8> @combine_vpperm_as_unary_unpckhbw(<16 x i8> %a0, <16 x i8> %a1) {
-; X32-LABEL: combine_vpperm_as_unary_unpckhbw:
-; X32:       # %bb.0:
-; X32-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpperm_as_unary_unpckhbw:
-; X64:       # %bb.0:
-; X64-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpperm_as_unary_unpckhbw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a0, <16 x i8> <i8 8, i8 undef, i8 9, i8 25, i8 10, i8 26, i8 11, i8 27, i8 12, i8 28, i8 13, i8 29, i8 14, i8 30, i8 15, i8 31>)
   ret <16 x i8> %res0
 }
 
 define <16 x i8> @combine_vpperm_as_unpckhbw(<16 x i8> %a0, <16 x i8> %a1) {
-; X32-LABEL: combine_vpperm_as_unpckhbw:
-; X32:       # %bb.0:
-; X32-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpperm_as_unpckhbw:
-; X64:       # %bb.0:
-; X64-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpperm_as_unpckhbw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> <i8 8, i8 24, i8 9, i8 25, i8 10, i8 26, i8 11, i8 27, i8 12, i8 28, i8 13, i8 29, i8 14, i8 30, i8 15, i8 31>)
   ret <16 x i8> %res0
 }
 
 define <16 x i8> @combine_vpperm_as_unpcklbw(<16 x i8> %a0, <16 x i8> %a1) {
-; X32-LABEL: combine_vpperm_as_unpcklbw:
-; X32:       # %bb.0:
-; X32-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpperm_as_unpcklbw:
-; X64:       # %bb.0:
-; X64-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpperm_as_unpcklbw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> <i8 16, i8 0, i8 17, i8 1, i8 18, i8 2, i8 19, i8 3, i8 20, i8 4, i8 21, i8 5, i8 22, i8 6, i8 23, i8 7>)
   ret <16 x i8> %res0
 }
 
 define <4 x i32> @combine_vpperm_10zz32BA(<4 x i32> %a0, <4 x i32> %a1) {
-; X32-LABEL: combine_vpperm_10zz32BA:
-; X32:       # %bb.0:
-; X32-NEXT:    vpperm {{.*#+}} xmm0 = xmm0[2,3,0,1],zero,zero,zero,zero,xmm0[6,7,4,5],xmm1[6,7,4,5]
-; X32-NEXT:    retl
-;
-; X64-LABEL: combine_vpperm_10zz32BA:
-; X64:       # %bb.0:
-; X64-NEXT:    vpperm {{.*#+}} xmm0 = xmm0[2,3,0,1],zero,zero,zero,zero,xmm0[6,7,4,5],xmm1[6,7,4,5]
-; X64-NEXT:    retq
+; CHECK-LABEL: combine_vpperm_10zz32BA:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpperm {{.*#+}} xmm0 = xmm0[2,3,0,1],zero,zero,zero,zero,xmm0[6,7,4,5],xmm1[6,7,4,5]
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
   %res1 = bitcast <4 x i32> %res0 to <16 x i8>
   %res2 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %res1, <16 x i8> undef, <16 x i8> <i8 2, i8 3, i8 0, i8 1, i8 128, i8 128, i8 128, i8 128, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
@@ -320,35 +221,34 @@
 
 ; FIXME: Duplicated load in i686
 define void @buildvector_v4f32_0404(float %a, float %b, <4 x float>* %ptr) {
-; X86AVX-LABEL: buildvector_v4f32_0404:
-; X86AVX:       # %bb.0:
-; X86AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; X86AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
-; X86AVX-NEXT:    vmovaps %xmm0, (%eax)
-; X86AVX-NEXT:    retl
+; X86-AVX-LABEL: buildvector_v4f32_0404:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; X86-AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
+; X86-AVX-NEXT:    vmovaps %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
 ;
-; X86AVX2-LABEL: buildvector_v4f32_0404:
-; X86AVX2:       # %bb.0:
-; X86AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X86AVX2-NEXT:    vmovapd %xmm0, (%eax)
-; X86AVX2-NEXT:    retl
+; X86-AVX2-LABEL: buildvector_v4f32_0404:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X86-AVX2-NEXT:    vmovapd %xmm0, (%eax)
+; X86-AVX2-NEXT:    retl
 ;
-; X64AVX-LABEL: buildvector_v4f32_0404:
-; X64AVX:       # %bb.0:
-; X64AVX-NEXT:    vpermil2ps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[0],xmm1[0]
-; X64AVX-NEXT:    vmovaps %xmm0, (%rdi)
-; X64AVX-NEXT:    retq
+; X64-AVX-LABEL: buildvector_v4f32_0404:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpermil2ps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[0],xmm1[0]
+; X64-AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; X64-AVX-NEXT:    retq
 ;
-; X64AVX2-LABEL: buildvector_v4f32_0404:
-; X64AVX2:       # %bb.0:
-; X64AVX2-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X64AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X64AVX2-NEXT:    vmovapd %xmm0, (%rdi)
-; X64AVX2-NEXT:    retq
+; X64-AVX2-LABEL: buildvector_v4f32_0404:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-AVX2-NEXT:    vmovapd %xmm0, (%rdi)
+; X64-AVX2-NEXT:    retq
   %v0 = insertelement <4 x float> undef, float %a, i32 0
   %v1 = insertelement <4 x float> %v0,   float %b, i32 1
   %v2 = insertelement <4 x float> %v1,   float %a, i32 2
@@ -358,13 +258,13 @@
 }
 
 define void @buildvector_v4f32_07z6(float %a, <4 x float> %b, <4 x float>* %ptr) {
-; X32-LABEL: buildvector_v4f32_07z6:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT:    vpermil2ps {{.*#+}} xmm0 = xmm1[0],xmm0[3],zero,xmm0[2]
-; X32-NEXT:    vmovaps %xmm0, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: buildvector_v4f32_07z6:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT:    vpermil2ps {{.*#+}} xmm0 = xmm1[0],xmm0[3],zero,xmm0[2]
+; X86-NEXT:    vmovaps %xmm0, (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: buildvector_v4f32_07z6:
 ; X64:       # %bb.0:
@@ -382,82 +282,57 @@
 }
 
 define <2 x double> @constant_fold_vpermil2pd() {
-; X32-LABEL: constant_fold_vpermil2pd:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [-2.0E+0,2.0E+0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: constant_fold_vpermil2pd:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [-2.0E+0,2.0E+0]
-; X64-NEXT:    retq
+; CHECK-LABEL: constant_fold_vpermil2pd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps {{.*#+}} xmm0 = [-2.0E+0,2.0E+0]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> <double 1.0, double 2.0>, <2 x double> <double -2.0, double -1.0>, <2 x i64> <i64 4, i64 2>, i8 2)
   ret <2 x double> %1
 }
 
 define <4 x double> @constant_fold_vpermil2pd_256() {
-; X32-LABEL: constant_fold_vpermil2pd_256:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [-4.0E+0,0.0E+0,4.0E+0,3.0E+0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: constant_fold_vpermil2pd_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [-4.0E+0,0.0E+0,4.0E+0,3.0E+0]
-; X64-NEXT:    retq
+; CHECK-LABEL: constant_fold_vpermil2pd_256:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps {{.*#+}} ymm0 = [-4.0E+0,0.0E+0,4.0E+0,3.0E+0]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> <double 1.0, double 2.0, double 3.0, double 4.0>, <4 x double> <double -4.0, double -3.0, double -2.0, double -1.0>, <4 x i64> <i64 4, i64 8, i64 2, i64 0>, i8 2)
   ret <4 x double> %1
 }
 
 define <4 x float> @constant_fold_vpermil2ps() {
-; X32-LABEL: constant_fold_vpermil2ps:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [-4.0E+0,1.0E+0,3.0E+0,0.0E+0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: constant_fold_vpermil2ps:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [-4.0E+0,1.0E+0,3.0E+0,0.0E+0]
-; X64-NEXT:    retq
+; CHECK-LABEL: constant_fold_vpermil2ps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps {{.*#+}} xmm0 = [-4.0E+0,1.0E+0,3.0E+0,0.0E+0]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, <4 x float> <float -4.0, float -3.0, float -2.0, float -1.0>, <4 x i32> <i32 4, i32 0, i32 2, i32 8>, i8 2)
   ret <4 x float> %1
 }
 
 define <8 x float> @constant_fold_vpermil2ps_256() {
-; X32-LABEL: constant_fold_vpermil2ps_256:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [-8.0E+0,1.0E+0,3.0E+0,0.0E+0,5.0E+0,0.0E+0,5.0E+0,7.0E+0]
-; X32-NEXT:    retl
-;
-; X64-LABEL: constant_fold_vpermil2ps_256:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [-8.0E+0,1.0E+0,3.0E+0,0.0E+0,5.0E+0,0.0E+0,5.0E+0,7.0E+0]
-; X64-NEXT:    retq
+; CHECK-LABEL: constant_fold_vpermil2ps_256:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps {{.*#+}} ymm0 = [-8.0E+0,1.0E+0,3.0E+0,0.0E+0,5.0E+0,0.0E+0,5.0E+0,7.0E+0]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, <8 x float> <float -8.0, float -7.0, float -6.0, float -5.0, float -4.0, float -3.0, float -2.0, float -1.0>, <8 x i32> <i32 4, i32 0, i32 2, i32 8, i32 0, i32 8, i32 0, i32 2>, i8 2)
   ret <8 x float> %1
 }
 
 define <16 x i8> @constant_fold_vpperm() {
-; X32-LABEL: constant_fold_vpperm:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; X32-NEXT:    retl
-;
-; X64-LABEL: constant_fold_vpperm:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; X64-NEXT:    retq
+; CHECK-LABEL: constant_fold_vpperm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovaps {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> <i8 0, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15>, <16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> <i8 31, i8 30, i8 29, i8 28, i8 27, i8 26, i8 25, i8 24, i8 23, i8 22, i8 21, i8 20, i8 19, i8 18, i8 17, i8 16>)
   ret <16 x i8> %1
 }
 
 define <4 x float> @PR31296(i8* %in) {
-; X32-LABEL: PR31296:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,mem[0]
-; X32-NEXT:    retl
+; X86-LABEL: PR31296:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,mem[0]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: PR31296:
 ; X64:       # %bb.0: # %entry
diff --git a/test/CodeGen/X86/vector-shuffle-combining.ll b/test/CodeGen/X86/vector-shuffle-combining.ll
index 199a05e..61d3fc3 100644
--- a/test/CodeGen/X86/vector-shuffle-combining.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX2,AVX2-SLOW
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=ALL,AVX,AVX2,AVX2-FAST
+; RUN: llc < %s -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
+; RUN: llc < %s -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefixes=CHECK,SSE,SSSE3
+; RUN: llc < %s -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2,AVX2-SLOW
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX2,AVX2-FAST
 ;
 ; Verify that the DAG combiner correctly folds bitwise operations across
 ; shuffles, nested shuffles with undef, pairs of nested shuffles, and other
@@ -18,9 +18,9 @@
 declare <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16>, i8)
 
 define <4 x i32> @combine_pshufd1(<4 x i32> %a) {
-; ALL-LABEL: combine_pshufd1:
-; ALL:       # %bb.0: # %entry
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_pshufd1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    retq
 entry:
   %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
   %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 27)
@@ -28,9 +28,9 @@
 }
 
 define <4 x i32> @combine_pshufd2(<4 x i32> %a) {
-; ALL-LABEL: combine_pshufd2:
-; ALL:       # %bb.0: # %entry
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_pshufd2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    retq
 entry:
   %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
   %b.cast = bitcast <4 x i32> %b to <8 x i16>
@@ -41,9 +41,9 @@
 }
 
 define <4 x i32> @combine_pshufd3(<4 x i32> %a) {
-; ALL-LABEL: combine_pshufd3:
-; ALL:       # %bb.0: # %entry
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_pshufd3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    retq
 entry:
   %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
   %b.cast = bitcast <4 x i32> %b to <8 x i16>
@@ -113,9 +113,9 @@
 }
 
 define <8 x i16> @combine_pshuflw1(<8 x i16> %a) {
-; ALL-LABEL: combine_pshuflw1:
-; ALL:       # %bb.0: # %entry
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_pshuflw1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    retq
 entry:
   %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
   %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27)
@@ -123,9 +123,9 @@
 }
 
 define <8 x i16> @combine_pshuflw2(<8 x i16> %a) {
-; ALL-LABEL: combine_pshuflw2:
-; ALL:       # %bb.0: # %entry
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_pshuflw2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    retq
 entry:
   %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
   %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 -28)
@@ -811,9 +811,9 @@
 
 ; The following pair of shuffles is folded into vector %A.
 define <4 x i32> @combine_nested_undef_test13(<4 x i32> %A, <4 x i32> %B) {
-; ALL-LABEL: combine_nested_undef_test13:
-; ALL:       # %bb.0:
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_nested_undef_test13:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 4, i32 2, i32 6>
   %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 4, i32 0, i32 2, i32 4>
   ret <4 x i32> %2
@@ -1371,9 +1371,9 @@
 }
 
 define <4 x float> @combine_test11(<4 x float> %a, <4 x float> %b) {
-; ALL-LABEL: combine_test11:
-; ALL:       # %bb.0:
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_test11:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
   %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
   ret <4 x float> %2
@@ -1464,9 +1464,9 @@
 }
 
 define <4 x i32> @combine_test16(<4 x i32> %a, <4 x i32> %b) {
-; ALL-LABEL: combine_test16:
-; ALL:       # %bb.0:
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_test16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
   %2 = shufflevector <4 x i32> %1, <4 x i32> %a, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
   ret <4 x i32> %2
@@ -2161,9 +2161,9 @@
 ;  (shuffle(shuffle A, Undef, M0), A, M1) -> (shuffle A, Undef, M2)
 
 define <4 x float> @combine_undef_input_test6(<4 x float> %a) {
-; ALL-LABEL: combine_undef_input_test6:
-; ALL:       # %bb.0:
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_undef_input_test6:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
   %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 4, i32 5, i32 1, i32 2>
   ret <4 x float> %2
@@ -2235,9 +2235,9 @@
 }
 
 define <4 x float> @combine_undef_input_test10(<4 x float> %a) {
-; ALL-LABEL: combine_undef_input_test10:
-; ALL:       # %bb.0:
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_undef_input_test10:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
   %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 2, i32 6, i32 7>
   ret <4 x float> %2
@@ -2351,9 +2351,9 @@
 ; combined into a single legal shuffle operation.
 
 define <4 x float> @combine_undef_input_test16(<4 x float> %a) {
-; ALL-LABEL: combine_undef_input_test16:
-; ALL:       # %bb.0:
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_undef_input_test16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
   %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 0, i32 1, i32 5, i32 3>
   ret <4 x float> %2
@@ -2425,9 +2425,9 @@
 }
 
 define <4 x float> @combine_undef_input_test20(<4 x float> %a) {
-; ALL-LABEL: combine_undef_input_test20:
-; ALL:       # %bb.0:
-; ALL-NEXT:    retq
+; CHECK-LABEL: combine_undef_input_test20:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
   %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 4, i32 6, i32 2, i32 3>
   ret <4 x float> %2
diff --git a/test/CodeGen/X86/vector-trunc-packus-widen.ll b/test/CodeGen/X86/vector-trunc-packus-widen.ll
index 2b456dd..232c15a 100644
--- a/test/CodeGen/X86/vector-trunc-packus-widen.ll
+++ b/test/CodeGen/X86/vector-trunc-packus-widen.ll
@@ -131,53 +131,49 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm2
 ; SSE41-NEXT:    movapd {{.*#+}} xmm4 = [4294967295,4294967295]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
-; SSE41-NEXT:    pxor %xmm8, %xmm0
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
+; SSE41-NEXT:    pxor %xmm3, %xmm0
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm6 = [2147483647,2147483647]
 ; SSE41-NEXT:    movdqa %xmm6, %xmm5
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm4, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm5
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
-; SSE41-NEXT:    pxor %xmm8, %xmm0
+; SSE41-NEXT:    pxor %xmm3, %xmm0
 ; SSE41-NEXT:    movdqa %xmm6, %xmm2
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm4
 ; SSE41-NEXT:    xorpd %xmm1, %xmm1
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
-; SSE41-NEXT:    xorpd %xmm8, %xmm0
+; SSE41-NEXT:    xorpd %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm2
-; SSE41-NEXT:    pcmpgtd %xmm8, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    xorpd %xmm8, %xmm0
-; SSE41-NEXT:    movapd %xmm0, %xmm3
-; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    xorpd %xmm3, %xmm0
+; SSE41-NEXT:    movapd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm1
 ; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
 ; SSE41-NEXT:    movaps %xmm1, %xmm0
@@ -498,12 +494,11 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm11 = [2147483647,2147483647]
 ; SSE41-NEXT:    movdqa %xmm11, %xmm6
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm8
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm8
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -512,10 +507,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm9
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm9
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
@@ -524,10 +518,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm4
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -536,10 +529,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm7
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
@@ -548,10 +540,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm3, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm3
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
@@ -560,10 +551,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm1
 ; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
@@ -573,10 +563,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    pxor %xmm3, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm3
 ; SSE41-NEXT:    movapd %xmm8, %xmm0
@@ -585,10 +574,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm2
 ; SSE41-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
 ; SSE41-NEXT:    movaps %xmm2, %xmm0
@@ -915,51 +903,47 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm11 = [2147549183,2147549183]
-; SSE41-NEXT:    movdqa %xmm11, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm6 = [2147549183,2147549183]
+; SSE41-NEXT:    movdqa %xmm6, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm8
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm8
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
-; SSE41-NEXT:    movdqa %xmm11, %xmm2
+; SSE41-NEXT:    movdqa %xmm6, %xmm2
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm2
 ; SSE41-NEXT:    movdqa %xmm9, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
-; SSE41-NEXT:    movdqa %xmm11, %xmm3
+; SSE41-NEXT:    movdqa %xmm6, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm4
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
-; SSE41-NEXT:    movdqa %xmm11, %xmm3
+; SSE41-NEXT:    movdqa %xmm6, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm7
 ; SSE41-NEXT:    pxor %xmm3, %xmm3
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
@@ -968,10 +952,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm5
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
@@ -980,10 +963,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm1
 ; SSE41-NEXT:    packusdw %xmm5, %xmm1
@@ -993,10 +975,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    pxor %xmm4, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm4
 ; SSE41-NEXT:    movapd %xmm8, %xmm0
@@ -1005,10 +986,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm3
 ; SSE41-NEXT:    packusdw %xmm4, %xmm3
 ; SSE41-NEXT:    packusdw %xmm3, %xmm1
@@ -1562,104 +1542,96 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm9
 ; SSE41-NEXT:    movapd {{.*#+}} xmm8 = [255,255]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm6 = [2147483648,2147483648]
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
-; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [2147483903,2147483903]
 ; SSE41-NEXT:    movdqa %xmm5, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm5, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
-; SSE41-NEXT:    movapd %xmm8, %xmm11
-; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm11
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    movapd %xmm8, %xmm10
+; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm10
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm5, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm5, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
-; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm5, %xmm2
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm5, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm4
 ; SSE41-NEXT:    movdqa %xmm9, %xmm0
-; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm5, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm5, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm5
 ; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    xorpd %xmm10, %xmm0
+; SSE41-NEXT:    xorpd %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm1
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
-; SSE41-NEXT:    xorpd %xmm10, %xmm0
+; SSE41-NEXT:    xorpd %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm5
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm5
 ; SSE41-NEXT:    movapd %xmm3, %xmm0
-; SSE41-NEXT:    xorpd %xmm10, %xmm0
+; SSE41-NEXT:    xorpd %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    pxor %xmm4, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm4
-; SSE41-NEXT:    movapd %xmm11, %xmm0
-; SSE41-NEXT:    xorpd %xmm10, %xmm0
+; SSE41-NEXT:    movapd %xmm10, %xmm0
+; SSE41-NEXT:    xorpd %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
-; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm10, %xmm2
 ; SSE41-NEXT:    andpd %xmm8, %xmm2
 ; SSE41-NEXT:    andpd %xmm8, %xmm4
 ; SSE41-NEXT:    packusdw %xmm2, %xmm4
@@ -1980,104 +1952,96 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm9
 ; SSE41-NEXT:    movapd {{.*#+}} xmm8 = [255,255]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm6 = [2147483648,2147483648]
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
-; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [2147483903,2147483903]
 ; SSE41-NEXT:    movdqa %xmm5, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm5, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
-; SSE41-NEXT:    movapd %xmm8, %xmm11
-; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm11
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    movapd %xmm8, %xmm10
+; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm10
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm5, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm5, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
-; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm5, %xmm2
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm5, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm4
 ; SSE41-NEXT:    movdqa %xmm9, %xmm0
-; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm5, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm5, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm5
 ; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    xorpd %xmm10, %xmm0
+; SSE41-NEXT:    xorpd %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm1
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
-; SSE41-NEXT:    xorpd %xmm10, %xmm0
+; SSE41-NEXT:    xorpd %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm5
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm5
 ; SSE41-NEXT:    movapd %xmm3, %xmm0
-; SSE41-NEXT:    xorpd %xmm10, %xmm0
+; SSE41-NEXT:    xorpd %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    pxor %xmm4, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm4
-; SSE41-NEXT:    movapd %xmm11, %xmm0
-; SSE41-NEXT:    xorpd %xmm10, %xmm0
+; SSE41-NEXT:    movapd %xmm10, %xmm0
+; SSE41-NEXT:    xorpd %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
-; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm10, %xmm2
 ; SSE41-NEXT:    andpd %xmm8, %xmm2
 ; SSE41-NEXT:    andpd %xmm8, %xmm4
 ; SSE41-NEXT:    packusdw %xmm2, %xmm4
@@ -2596,10 +2560,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm10
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm13 = xmm10[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm13, %xmm14
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
-; SSE41-NEXT:    por %xmm14, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm13, %xmm0
+; SSE41-NEXT:    por %xmm10, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm10
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm10
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
@@ -2608,22 +2571,20 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm13 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm13, %xmm14
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm14, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm13, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm13
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm13
 ; SSE41-NEXT:    movdqa %xmm4, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
 ; SSE41-NEXT:    movdqa %xmm12, %xmm6
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm14, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm14
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm14
 ; SSE41-NEXT:    movdqa %xmm5, %xmm0
@@ -2632,10 +2593,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm15
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm15
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
@@ -2644,10 +2604,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm5
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -2656,10 +2615,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm6
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm6
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
@@ -2668,10 +2626,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm3
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -2680,113 +2637,104 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm11
-; SSE41-NEXT:    xorpd %xmm8, %xmm8
+; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    movapd %xmm11, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm9, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
-; SSE41-NEXT:    pxor %xmm4, %xmm4
-; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm4
-; SSE41-NEXT:    movapd %xmm3, %xmm0
-; SSE41-NEXT:    xorpd %xmm9, %xmm0
-; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm9, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm7, %xmm7
+; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm7
+; SSE41-NEXT:    movapd %xmm3, %xmm0
+; SSE41-NEXT:    xorpd %xmm9, %xmm0
+; SSE41-NEXT:    movapd %xmm0, %xmm1
+; SSE41-NEXT:    pcmpgtd %xmm9, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
-; SSE41-NEXT:    packusdw %xmm4, %xmm1
+; SSE41-NEXT:    packusdw %xmm7, %xmm1
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
-; SSE41-NEXT:    movapd %xmm0, %xmm2
-; SSE41-NEXT:    pcmpgtd %xmm9, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
-; SSE41-NEXT:    pxor %xmm2, %xmm2
-; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm2
-; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    xorpd %xmm9, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm9, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    pxor %xmm3, %xmm3
-; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm3
-; SSE41-NEXT:    packusdw %xmm2, %xmm3
-; SSE41-NEXT:    packusdw %xmm3, %xmm1
+; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm3
+; SSE41-NEXT:    movapd %xmm5, %xmm0
+; SSE41-NEXT:    xorpd %xmm9, %xmm0
+; SSE41-NEXT:    movapd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm9, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pxor %xmm4, %xmm4
+; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm4
+; SSE41-NEXT:    packusdw %xmm3, %xmm4
+; SSE41-NEXT:    packusdw %xmm4, %xmm1
 ; SSE41-NEXT:    movapd %xmm15, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
-; SSE41-NEXT:    movapd %xmm0, %xmm2
-; SSE41-NEXT:    pcmpgtd %xmm9, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT:    movapd %xmm0, %xmm3
+; SSE41-NEXT:    pcmpgtd %xmm9, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
-; SSE41-NEXT:    pxor %xmm2, %xmm2
-; SSE41-NEXT:    blendvpd %xmm0, %xmm15, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pxor %xmm4, %xmm4
+; SSE41-NEXT:    blendvpd %xmm0, %xmm15, %xmm4
 ; SSE41-NEXT:    movapd %xmm14, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm9, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    pxor %xmm3, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm14, %xmm3
-; SSE41-NEXT:    packusdw %xmm2, %xmm3
+; SSE41-NEXT:    packusdw %xmm4, %xmm3
 ; SSE41-NEXT:    movapd %xmm13, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
-; SSE41-NEXT:    movapd %xmm0, %xmm2
-; SSE41-NEXT:    pcmpgtd %xmm9, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
-; SSE41-NEXT:    pxor %xmm2, %xmm2
-; SSE41-NEXT:    blendvpd %xmm0, %xmm13, %xmm2
-; SSE41-NEXT:    movapd %xmm10, %xmm0
-; SSE41-NEXT:    xorpd %xmm9, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm9, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
-; SSE41-NEXT:    blendvpd %xmm0, %xmm10, %xmm8
-; SSE41-NEXT:    packusdw %xmm2, %xmm8
-; SSE41-NEXT:    packusdw %xmm8, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pxor %xmm4, %xmm4
+; SSE41-NEXT:    blendvpd %xmm0, %xmm13, %xmm4
+; SSE41-NEXT:    movapd %xmm10, %xmm0
+; SSE41-NEXT:    xorpd %xmm9, %xmm0
+; SSE41-NEXT:    movapd %xmm0, %xmm5
+; SSE41-NEXT:    pcmpgtd %xmm9, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm10, %xmm2
+; SSE41-NEXT:    packusdw %xmm4, %xmm2
+; SSE41-NEXT:    packusdw %xmm2, %xmm3
 ; SSE41-NEXT:    packuswb %xmm3, %xmm1
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    retq
diff --git a/test/CodeGen/X86/vector-trunc-packus.ll b/test/CodeGen/X86/vector-trunc-packus.ll
index 1931619..f49ee8f 100644
--- a/test/CodeGen/X86/vector-trunc-packus.ll
+++ b/test/CodeGen/X86/vector-trunc-packus.ll
@@ -131,53 +131,49 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm2
 ; SSE41-NEXT:    movapd {{.*#+}} xmm4 = [4294967295,4294967295]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
-; SSE41-NEXT:    pxor %xmm8, %xmm0
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
+; SSE41-NEXT:    pxor %xmm3, %xmm0
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm6 = [2147483647,2147483647]
 ; SSE41-NEXT:    movdqa %xmm6, %xmm5
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm4, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm5
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
-; SSE41-NEXT:    pxor %xmm8, %xmm0
+; SSE41-NEXT:    pxor %xmm3, %xmm0
 ; SSE41-NEXT:    movdqa %xmm6, %xmm2
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm4
 ; SSE41-NEXT:    xorpd %xmm1, %xmm1
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
-; SSE41-NEXT:    xorpd %xmm8, %xmm0
+; SSE41-NEXT:    xorpd %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm2
-; SSE41-NEXT:    pcmpgtd %xmm8, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    xorpd %xmm8, %xmm0
-; SSE41-NEXT:    movapd %xmm0, %xmm3
-; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    xorpd %xmm3, %xmm0
+; SSE41-NEXT:    movapd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm1
 ; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
 ; SSE41-NEXT:    movaps %xmm1, %xmm0
@@ -498,12 +494,11 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm11 = [2147483647,2147483647]
 ; SSE41-NEXT:    movdqa %xmm11, %xmm6
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm8
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm8
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -512,10 +507,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm9
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm9
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
@@ -524,10 +518,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm4
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -536,10 +529,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm7
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
@@ -548,10 +540,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm3, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm3
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
@@ -560,10 +551,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm1
 ; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
@@ -573,10 +563,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    pxor %xmm3, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm3
 ; SSE41-NEXT:    movapd %xmm8, %xmm0
@@ -585,10 +574,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm2
 ; SSE41-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
 ; SSE41-NEXT:    movaps %xmm2, %xmm0
@@ -915,51 +903,47 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm11 = [2147549183,2147549183]
-; SSE41-NEXT:    movdqa %xmm11, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm6 = [2147549183,2147549183]
+; SSE41-NEXT:    movdqa %xmm6, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm8
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm8
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
-; SSE41-NEXT:    movdqa %xmm11, %xmm2
+; SSE41-NEXT:    movdqa %xmm6, %xmm2
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm2
 ; SSE41-NEXT:    movdqa %xmm9, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
-; SSE41-NEXT:    movdqa %xmm11, %xmm3
+; SSE41-NEXT:    movdqa %xmm6, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm4
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
-; SSE41-NEXT:    movdqa %xmm11, %xmm3
+; SSE41-NEXT:    movdqa %xmm6, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm7
 ; SSE41-NEXT:    pxor %xmm3, %xmm3
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
@@ -968,10 +952,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm5
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
@@ -980,10 +963,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm1
 ; SSE41-NEXT:    packusdw %xmm5, %xmm1
@@ -993,10 +975,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    pxor %xmm4, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm4
 ; SSE41-NEXT:    movapd %xmm8, %xmm0
@@ -1005,10 +986,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm3
 ; SSE41-NEXT:    packusdw %xmm4, %xmm3
 ; SSE41-NEXT:    packusdw %xmm3, %xmm1
@@ -1555,51 +1535,47 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm11 = [2147483903,2147483903]
-; SSE41-NEXT:    movdqa %xmm11, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm6 = [2147483903,2147483903]
+; SSE41-NEXT:    movdqa %xmm6, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm8
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm8
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
-; SSE41-NEXT:    movdqa %xmm11, %xmm2
+; SSE41-NEXT:    movdqa %xmm6, %xmm2
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm2
 ; SSE41-NEXT:    movdqa %xmm9, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
-; SSE41-NEXT:    movdqa %xmm11, %xmm3
+; SSE41-NEXT:    movdqa %xmm6, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm4
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
-; SSE41-NEXT:    movdqa %xmm11, %xmm3
+; SSE41-NEXT:    movdqa %xmm6, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm7
 ; SSE41-NEXT:    pxor %xmm3, %xmm3
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
@@ -1608,10 +1584,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm5
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
@@ -1620,10 +1595,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm1
 ; SSE41-NEXT:    packusdw %xmm5, %xmm1
@@ -1633,10 +1607,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    pxor %xmm4, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm4
 ; SSE41-NEXT:    movapd %xmm8, %xmm0
@@ -1645,10 +1618,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm10, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm3
 ; SSE41-NEXT:    packusdw %xmm4, %xmm3
 ; SSE41-NEXT:    packusdw %xmm3, %xmm1
@@ -1950,104 +1922,96 @@
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm9
 ; SSE41-NEXT:    movapd {{.*#+}} xmm8 = [255,255]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm6 = [2147483648,2147483648]
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
-; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [2147483903,2147483903]
 ; SSE41-NEXT:    movdqa %xmm5, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm5, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
-; SSE41-NEXT:    movapd %xmm8, %xmm11
-; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm11
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    movapd %xmm8, %xmm10
+; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm10
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm5, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm5, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
-; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm5, %xmm2
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm5, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm4
 ; SSE41-NEXT:    movdqa %xmm9, %xmm0
-; SSE41-NEXT:    pxor %xmm10, %xmm0
+; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm5, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm5, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm5
 ; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    xorpd %xmm10, %xmm0
+; SSE41-NEXT:    xorpd %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm1
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
-; SSE41-NEXT:    xorpd %xmm10, %xmm0
+; SSE41-NEXT:    xorpd %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm5
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm5
 ; SSE41-NEXT:    movapd %xmm3, %xmm0
-; SSE41-NEXT:    xorpd %xmm10, %xmm0
+; SSE41-NEXT:    xorpd %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm4
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    pxor %xmm4, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm4
-; SSE41-NEXT:    movapd %xmm11, %xmm0
-; SSE41-NEXT:    xorpd %xmm10, %xmm0
+; SSE41-NEXT:    movapd %xmm10, %xmm0
+; SSE41-NEXT:    xorpd %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
-; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm10, %xmm2
 ; SSE41-NEXT:    andpd %xmm8, %xmm2
 ; SSE41-NEXT:    andpd %xmm8, %xmm4
 ; SSE41-NEXT:    packusdw %xmm2, %xmm4
@@ -2564,10 +2528,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm10
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm13 = xmm10[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm13, %xmm14
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
-; SSE41-NEXT:    por %xmm14, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm13, %xmm0
+; SSE41-NEXT:    por %xmm10, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm10
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm10
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
@@ -2576,22 +2539,20 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm13 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm13, %xmm14
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm14, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm13, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm13
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm13
 ; SSE41-NEXT:    movdqa %xmm4, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
 ; SSE41-NEXT:    movdqa %xmm12, %xmm6
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm14, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm14
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm14
 ; SSE41-NEXT:    movdqa %xmm5, %xmm0
@@ -2600,10 +2561,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm15
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm15
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
@@ -2612,10 +2572,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm5
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -2624,10 +2583,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm6
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm6
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
@@ -2636,10 +2594,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm3
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -2648,113 +2605,104 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm11
-; SSE41-NEXT:    xorpd %xmm8, %xmm8
+; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    movapd %xmm11, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm9, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
-; SSE41-NEXT:    pxor %xmm4, %xmm4
-; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm4
-; SSE41-NEXT:    movapd %xmm3, %xmm0
-; SSE41-NEXT:    xorpd %xmm9, %xmm0
-; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm9, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    pxor %xmm7, %xmm7
+; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm7
+; SSE41-NEXT:    movapd %xmm3, %xmm0
+; SSE41-NEXT:    xorpd %xmm9, %xmm0
+; SSE41-NEXT:    movapd %xmm0, %xmm1
+; SSE41-NEXT:    pcmpgtd %xmm9, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
-; SSE41-NEXT:    packusdw %xmm4, %xmm1
+; SSE41-NEXT:    packusdw %xmm7, %xmm1
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
-; SSE41-NEXT:    movapd %xmm0, %xmm2
-; SSE41-NEXT:    pcmpgtd %xmm9, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
-; SSE41-NEXT:    pxor %xmm2, %xmm2
-; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm2
-; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    xorpd %xmm9, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm9, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    pxor %xmm3, %xmm3
-; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm3
-; SSE41-NEXT:    packusdw %xmm2, %xmm3
-; SSE41-NEXT:    packusdw %xmm3, %xmm1
+; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm3
+; SSE41-NEXT:    movapd %xmm5, %xmm0
+; SSE41-NEXT:    xorpd %xmm9, %xmm0
+; SSE41-NEXT:    movapd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm9, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pxor %xmm4, %xmm4
+; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm4
+; SSE41-NEXT:    packusdw %xmm3, %xmm4
+; SSE41-NEXT:    packusdw %xmm4, %xmm1
 ; SSE41-NEXT:    movapd %xmm15, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
-; SSE41-NEXT:    movapd %xmm0, %xmm2
-; SSE41-NEXT:    pcmpgtd %xmm9, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT:    movapd %xmm0, %xmm3
+; SSE41-NEXT:    pcmpgtd %xmm9, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
-; SSE41-NEXT:    pxor %xmm2, %xmm2
-; SSE41-NEXT:    blendvpd %xmm0, %xmm15, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pxor %xmm4, %xmm4
+; SSE41-NEXT:    blendvpd %xmm0, %xmm15, %xmm4
 ; SSE41-NEXT:    movapd %xmm14, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm9, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    pxor %xmm3, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm14, %xmm3
-; SSE41-NEXT:    packusdw %xmm2, %xmm3
+; SSE41-NEXT:    packusdw %xmm4, %xmm3
 ; SSE41-NEXT:    movapd %xmm13, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
-; SSE41-NEXT:    movapd %xmm0, %xmm2
-; SSE41-NEXT:    pcmpgtd %xmm9, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
-; SSE41-NEXT:    pxor %xmm2, %xmm2
-; SSE41-NEXT:    blendvpd %xmm0, %xmm13, %xmm2
-; SSE41-NEXT:    movapd %xmm10, %xmm0
-; SSE41-NEXT:    xorpd %xmm9, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm9, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
-; SSE41-NEXT:    blendvpd %xmm0, %xmm10, %xmm8
-; SSE41-NEXT:    packusdw %xmm2, %xmm8
-; SSE41-NEXT:    packusdw %xmm8, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pxor %xmm4, %xmm4
+; SSE41-NEXT:    blendvpd %xmm0, %xmm13, %xmm4
+; SSE41-NEXT:    movapd %xmm10, %xmm0
+; SSE41-NEXT:    xorpd %xmm9, %xmm0
+; SSE41-NEXT:    movapd %xmm0, %xmm5
+; SSE41-NEXT:    pcmpgtd %xmm9, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm10, %xmm2
+; SSE41-NEXT:    packusdw %xmm4, %xmm2
+; SSE41-NEXT:    packusdw %xmm2, %xmm3
 ; SSE41-NEXT:    packuswb %xmm3, %xmm1
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    retq
diff --git a/test/CodeGen/X86/vector-trunc-ssat-widen.ll b/test/CodeGen/X86/vector-trunc-ssat-widen.ll
index d25d525..7ba3dc8 100644
--- a/test/CodeGen/X86/vector-trunc-ssat-widen.ll
+++ b/test/CodeGen/X86/vector-trunc-ssat-widen.ll
@@ -143,53 +143,49 @@
 ; SSE41-NEXT:    movapd {{.*#+}} xmm4 = [2147483647,2147483647]
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
 ; SSE41-NEXT:    pxor %xmm3, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm8 = [4294967295,4294967295]
-; SSE41-NEXT:    movdqa %xmm8, %xmm5
+; SSE41-NEXT:    movdqa {{.*#+}} xmm6 = [4294967295,4294967295]
+; SSE41-NEXT:    movdqa %xmm6, %xmm5
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm4, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm5
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm3, %xmm0
-; SSE41-NEXT:    movdqa %xmm8, %xmm2
+; SSE41-NEXT:    movdqa %xmm6, %xmm2
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm4
 ; SSE41-NEXT:    movapd {{.*#+}} xmm1 = [18446744071562067968,18446744071562067968]
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
 ; SSE41-NEXT:    xorpd %xmm3, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm8 = [18446744069414584320,18446744069414584320]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [18446744069414584320,18446744069414584320]
 ; SSE41-NEXT:    movapd %xmm0, %xmm6
-; SSE41-NEXT:    pcmpgtd %xmm8, %xmm6
+; SSE41-NEXT:    pcmpgtd %xmm2, %xmm6
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
-; SSE41-NEXT:    movapd %xmm1, %xmm2
-; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
-; SSE41-NEXT:    xorpd %xmm5, %xmm3
-; SSE41-NEXT:    movapd %xmm3, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm8, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm3
+; SSE41-NEXT:    pcmpeqd %xmm2, %xmm0
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    movapd %xmm1, %xmm6
+; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm6
+; SSE41-NEXT:    xorpd %xmm5, %xmm3
+; SSE41-NEXT:    movapd %xmm3, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm2, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm2, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm1
-; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm6[0,2]
 ; SSE41-NEXT:    movaps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
@@ -517,7 +513,7 @@
 ;
 ; SSE41-LABEL: trunc_ssat_v8i64_v8i32:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa %xmm0, %xmm9
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
 ; SSE41-NEXT:    movapd {{.*#+}} xmm7 = [2147483647,2147483647]
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
 ; SSE41-NEXT:    pxor %xmm5, %xmm0
@@ -526,22 +522,20 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm8
-; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm8
+; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm8
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm0
 ; SSE41-NEXT:    movdqa %xmm10, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm9 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm9, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm9
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm9
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
@@ -550,10 +544,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm4
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -562,61 +555,56 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm7
 ; SSE41-NEXT:    movapd {{.*#+}} xmm2 = [18446744071562067968,18446744071562067968]
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [18446744069414584320,18446744069414584320]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [18446744069414584320,18446744069414584320]
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
-; SSE41-NEXT:    movapd %xmm2, %xmm3
-; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm3
+; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    movapd %xmm2, %xmm6
+; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm6
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm1
-; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm6[0,2]
 ; SSE41-NEXT:    movapd %xmm9, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
-; SSE41-NEXT:    movapd %xmm0, %xmm3
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
-; SSE41-NEXT:    movapd %xmm2, %xmm3
-; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm3
-; SSE41-NEXT:    xorpd %xmm8, %xmm5
-; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
+; SSE41-NEXT:    movapd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    movapd %xmm2, %xmm4
+; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm4
+; SSE41-NEXT:    xorpd %xmm8, %xmm5
+; SSE41-NEXT:    movapd %xmm5, %xmm6
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm3, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm2
-; SSE41-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
+; SSE41-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
 ; SSE41-NEXT:    movaps %xmm2, %xmm0
 ; SSE41-NEXT:    retq
 ;
@@ -947,12 +935,11 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm11 = [2147516415,2147516415]
 ; SSE41-NEXT:    movdqa %xmm11, %xmm6
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm8
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm8
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -961,10 +948,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm9
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm9
 ; SSE41-NEXT:    movdqa %xmm10, %xmm0
@@ -973,10 +959,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm10, %xmm2
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -985,59 +970,54 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm7
 ; SSE41-NEXT:    movapd {{.*#+}} xmm3 = [18446744073709518848,18446744073709518848]
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [18446744071562035200,18446744071562035200]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [18446744071562035200,18446744071562035200]
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
-; SSE41-NEXT:    movapd %xmm3, %xmm4
-; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm4
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    movapd %xmm3, %xmm6
+; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm6
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm3, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
-; SSE41-NEXT:    packssdw %xmm4, %xmm1
+; SSE41-NEXT:    packssdw %xmm6, %xmm1
 ; SSE41-NEXT:    movapd %xmm9, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm2
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm3, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm2
 ; SSE41-NEXT:    xorpd %xmm8, %xmm5
-; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    movapd %xmm5, %xmm6
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm3
 ; SSE41-NEXT:    packssdw %xmm2, %xmm3
 ; SSE41-NEXT:    packssdw %xmm3, %xmm1
@@ -1463,12 +1443,11 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [2147483775,2147483775]
 ; SSE41-NEXT:    movdqa %xmm10, %xmm6
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm9, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm9
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm9
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
@@ -1477,10 +1456,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm11
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm11
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -1489,10 +1467,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm6
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm6
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
@@ -1501,10 +1478,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm7
 ; SSE41-NEXT:    movapd {{.*#+}} xmm2 = [18446744073709551488,18446744073709551488]
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
@@ -1514,45 +1490,41 @@
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm7
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm7
 ; SSE41-NEXT:    movapd %xmm11, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm3
 ; SSE41-NEXT:    xorpd %xmm9, %xmm5
-; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movapd %xmm5, %xmm6
+; SSE41-NEXT:    pcmpgtd %xmm8, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm2
 ; SSE41-NEXT:    movapd {{.*#+}} xmm0 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
 ; SSE41-NEXT:    andpd %xmm0, %xmm2
@@ -1903,12 +1875,11 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [2147483775,2147483775]
 ; SSE41-NEXT:    movdqa %xmm10, %xmm6
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm9, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm9
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm9
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
@@ -1917,10 +1888,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm11
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm11
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -1929,10 +1899,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm6
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm6
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
@@ -1941,10 +1910,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm7
 ; SSE41-NEXT:    movapd {{.*#+}} xmm1 = [18446744073709551488,18446744073709551488]
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
@@ -1954,45 +1922,41 @@
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm1, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm2
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm1, %xmm7
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm7
 ; SSE41-NEXT:    movapd %xmm11, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm1, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm3
 ; SSE41-NEXT:    xorpd %xmm9, %xmm5
-; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movapd %xmm5, %xmm6
+; SSE41-NEXT:    pcmpgtd %xmm8, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm1
 ; SSE41-NEXT:    movapd {{.*#+}} xmm0 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
 ; SSE41-NEXT:    andpd %xmm0, %xmm1
@@ -2548,10 +2512,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm10
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm13 = xmm10[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm13, %xmm14
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
-; SSE41-NEXT:    por %xmm14, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm13, %xmm0
+; SSE41-NEXT:    por %xmm10, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm10
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm10
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
@@ -2560,22 +2523,20 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm13 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm13, %xmm14
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm14, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm13, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm13
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm13
 ; SSE41-NEXT:    movdqa %xmm4, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
 ; SSE41-NEXT:    movdqa %xmm12, %xmm6
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm14, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm14
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm14
 ; SSE41-NEXT:    movdqa %xmm5, %xmm0
@@ -2584,10 +2545,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm15
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm15
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
@@ -2596,10 +2556,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm5
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -2608,10 +2567,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm6
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm6
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
@@ -2620,10 +2578,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm7
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm7
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -2632,10 +2589,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm11
 ; SSE41-NEXT:    movapd {{.*#+}} xmm2 = [18446744073709551488,18446744073709551488]
 ; SSE41-NEXT:    movapd %xmm11, %xmm0
@@ -2645,35 +2601,32 @@
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
-; SSE41-NEXT:    movapd %xmm2, %xmm3
-; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    movapd %xmm2, %xmm4
+; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm4
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm11 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm11, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1
-; SSE41-NEXT:    packssdw %xmm3, %xmm1
+; SSE41-NEXT:    packssdw %xmm4, %xmm1
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm3
 ; SSE41-NEXT:    movapd %xmm5, %xmm0
@@ -2682,10 +2635,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm4
 ; SSE41-NEXT:    packssdw %xmm3, %xmm4
@@ -2696,10 +2648,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm15, %xmm3
 ; SSE41-NEXT:    movapd %xmm14, %xmm0
@@ -2708,10 +2659,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm14, %xmm4
 ; SSE41-NEXT:    packssdw %xmm3, %xmm4
@@ -2721,21 +2671,19 @@
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm13, %xmm3
 ; SSE41-NEXT:    xorpd %xmm10, %xmm9
-; SSE41-NEXT:    movapd %xmm9, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movapd %xmm9, %xmm5
+; SSE41-NEXT:    pcmpgtd %xmm8, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm9
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm9[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm10, %xmm2
 ; SSE41-NEXT:    packssdw %xmm3, %xmm2
 ; SSE41-NEXT:    packssdw %xmm2, %xmm4
diff --git a/test/CodeGen/X86/vector-trunc-ssat.ll b/test/CodeGen/X86/vector-trunc-ssat.ll
index b36b844..d690c0f 100644
--- a/test/CodeGen/X86/vector-trunc-ssat.ll
+++ b/test/CodeGen/X86/vector-trunc-ssat.ll
@@ -143,53 +143,49 @@
 ; SSE41-NEXT:    movapd {{.*#+}} xmm4 = [2147483647,2147483647]
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
 ; SSE41-NEXT:    pxor %xmm3, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm8 = [4294967295,4294967295]
-; SSE41-NEXT:    movdqa %xmm8, %xmm5
+; SSE41-NEXT:    movdqa {{.*#+}} xmm6 = [4294967295,4294967295]
+; SSE41-NEXT:    movdqa %xmm6, %xmm5
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm4, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm5
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm3, %xmm0
-; SSE41-NEXT:    movdqa %xmm8, %xmm2
+; SSE41-NEXT:    movdqa %xmm6, %xmm2
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm4
 ; SSE41-NEXT:    movapd {{.*#+}} xmm1 = [18446744071562067968,18446744071562067968]
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
 ; SSE41-NEXT:    xorpd %xmm3, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm8 = [18446744069414584320,18446744069414584320]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [18446744069414584320,18446744069414584320]
 ; SSE41-NEXT:    movapd %xmm0, %xmm6
-; SSE41-NEXT:    pcmpgtd %xmm8, %xmm6
+; SSE41-NEXT:    pcmpgtd %xmm2, %xmm6
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm7, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm2, %xmm0
-; SSE41-NEXT:    movapd %xmm1, %xmm2
-; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
-; SSE41-NEXT:    xorpd %xmm5, %xmm3
-; SSE41-NEXT:    movapd %xmm3, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm8, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm3
+; SSE41-NEXT:    pcmpeqd %xmm2, %xmm0
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    movapd %xmm1, %xmm6
+; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm6
+; SSE41-NEXT:    xorpd %xmm5, %xmm3
+; SSE41-NEXT:    movapd %xmm3, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm2, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm2, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm1
-; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm6[0,2]
 ; SSE41-NEXT:    movaps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
@@ -517,7 +513,7 @@
 ;
 ; SSE41-LABEL: trunc_ssat_v8i64_v8i32:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa %xmm0, %xmm9
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
 ; SSE41-NEXT:    movapd {{.*#+}} xmm7 = [2147483647,2147483647]
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
 ; SSE41-NEXT:    pxor %xmm5, %xmm0
@@ -526,22 +522,20 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm8
-; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm8
+; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm8
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm5, %xmm0
 ; SSE41-NEXT:    movdqa %xmm10, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm9 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm9, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm9
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm9
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
@@ -550,10 +544,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm4
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -562,61 +555,56 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm7
 ; SSE41-NEXT:    movapd {{.*#+}} xmm2 = [18446744071562067968,18446744071562067968]
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [18446744069414584320,18446744069414584320]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [18446744069414584320,18446744069414584320]
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
-; SSE41-NEXT:    movapd %xmm2, %xmm3
-; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm3
+; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    movapd %xmm2, %xmm6
+; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm6
 ; SSE41-NEXT:    movapd %xmm4, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm1
-; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm6[0,2]
 ; SSE41-NEXT:    movapd %xmm9, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
-; SSE41-NEXT:    movapd %xmm0, %xmm3
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
-; SSE41-NEXT:    movapd %xmm2, %xmm3
-; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm3
-; SSE41-NEXT:    xorpd %xmm8, %xmm5
-; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
+; SSE41-NEXT:    movapd %xmm0, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    movapd %xmm2, %xmm4
+; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm4
+; SSE41-NEXT:    xorpd %xmm8, %xmm5
+; SSE41-NEXT:    movapd %xmm5, %xmm6
+; SSE41-NEXT:    pcmpgtd %xmm3, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm3, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm2
-; SSE41-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
+; SSE41-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
 ; SSE41-NEXT:    movaps %xmm2, %xmm0
 ; SSE41-NEXT:    retq
 ;
@@ -947,12 +935,11 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm11 = [2147516415,2147516415]
 ; SSE41-NEXT:    movdqa %xmm11, %xmm6
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm8
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm8
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -961,10 +948,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm9
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm9
 ; SSE41-NEXT:    movdqa %xmm10, %xmm0
@@ -973,10 +959,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm10, %xmm2
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -985,59 +970,54 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm7
 ; SSE41-NEXT:    movapd {{.*#+}} xmm3 = [18446744073709518848,18446744073709518848]
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [18446744071562035200,18446744071562035200]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [18446744071562035200,18446744071562035200]
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
-; SSE41-NEXT:    movapd %xmm3, %xmm4
-; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm4
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    movapd %xmm3, %xmm6
+; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm6
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm3, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
-; SSE41-NEXT:    packssdw %xmm4, %xmm1
+; SSE41-NEXT:    packssdw %xmm6, %xmm1
 ; SSE41-NEXT:    movapd %xmm9, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm2
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm3, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm2
 ; SSE41-NEXT:    xorpd %xmm8, %xmm5
-; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    movapd %xmm5, %xmm6
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm3
 ; SSE41-NEXT:    packssdw %xmm2, %xmm3
 ; SSE41-NEXT:    packssdw %xmm3, %xmm1
@@ -1451,12 +1431,11 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm11 = [2147483775,2147483775]
 ; SSE41-NEXT:    movdqa %xmm11, %xmm6
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm8
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm8
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -1465,10 +1444,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm9
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm9
 ; SSE41-NEXT:    movdqa %xmm10, %xmm0
@@ -1477,10 +1455,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm10, %xmm2
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -1489,59 +1466,54 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm7
 ; SSE41-NEXT:    movapd {{.*#+}} xmm3 = [18446744073709551488,18446744073709551488]
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
-; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [18446744071562067840,18446744071562067840]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [18446744071562067840,18446744071562067840]
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
-; SSE41-NEXT:    movapd %xmm3, %xmm4
-; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm4
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    movapd %xmm3, %xmm6
+; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm6
 ; SSE41-NEXT:    movapd %xmm2, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm3, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
-; SSE41-NEXT:    packssdw %xmm4, %xmm1
+; SSE41-NEXT:    packssdw %xmm6, %xmm1
 ; SSE41-NEXT:    movapd %xmm9, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm2
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm2
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm3, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm2
 ; SSE41-NEXT:    xorpd %xmm8, %xmm5
-; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
-; SSE41-NEXT:    pcmpeqd %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    movapd %xmm5, %xmm6
+; SSE41-NEXT:    pcmpgtd %xmm4, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm3
 ; SSE41-NEXT:    packssdw %xmm2, %xmm3
 ; SSE41-NEXT:    packssdw %xmm3, %xmm1
@@ -1871,12 +1843,11 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [2147483775,2147483775]
 ; SSE41-NEXT:    movdqa %xmm10, %xmm6
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm9, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm9
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm9
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
@@ -1885,10 +1856,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm11
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm11
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -1897,10 +1867,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm7, %xmm6
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm6
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
@@ -1909,10 +1878,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm7
 ; SSE41-NEXT:    movapd {{.*#+}} xmm1 = [18446744073709551488,18446744073709551488]
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
@@ -1922,45 +1890,41 @@
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm1, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm2
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm1, %xmm7
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm7
 ; SSE41-NEXT:    movapd %xmm11, %xmm0
 ; SSE41-NEXT:    xorpd %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm1, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm3
 ; SSE41-NEXT:    xorpd %xmm9, %xmm5
-; SSE41-NEXT:    movapd %xmm5, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movapd %xmm5, %xmm6
+; SSE41-NEXT:    pcmpgtd %xmm8, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm1
 ; SSE41-NEXT:    movapd {{.*#+}} xmm0 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
 ; SSE41-NEXT:    andpd %xmm0, %xmm1
@@ -2514,10 +2478,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm10
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm13 = xmm10[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm13, %xmm14
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
-; SSE41-NEXT:    por %xmm14, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm13, %xmm0
+; SSE41-NEXT:    por %xmm10, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm10
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm10
 ; SSE41-NEXT:    movdqa %xmm7, %xmm0
@@ -2526,22 +2489,20 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm13 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm13, %xmm14
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm14, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm13, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm13
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm13
 ; SSE41-NEXT:    movdqa %xmm4, %xmm0
 ; SSE41-NEXT:    pxor %xmm9, %xmm0
 ; SSE41-NEXT:    movdqa %xmm12, %xmm6
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm14, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm14
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm14
 ; SSE41-NEXT:    movdqa %xmm5, %xmm0
@@ -2550,10 +2511,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm15
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm15
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
@@ -2562,10 +2522,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm5
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
@@ -2574,10 +2533,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm6
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm6
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
@@ -2586,10 +2544,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm11, %xmm7
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm7
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
@@ -2598,10 +2555,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm12, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm11
 ; SSE41-NEXT:    movapd {{.*#+}} xmm2 = [18446744073709551488,18446744073709551488]
 ; SSE41-NEXT:    movapd %xmm11, %xmm0
@@ -2611,35 +2567,32 @@
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
-; SSE41-NEXT:    movapd %xmm2, %xmm3
-; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    movapd %xmm2, %xmm4
+; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm4
 ; SSE41-NEXT:    movapd %xmm7, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm11 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm11, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1
-; SSE41-NEXT:    packssdw %xmm3, %xmm1
+; SSE41-NEXT:    packssdw %xmm4, %xmm1
 ; SSE41-NEXT:    movapd %xmm6, %xmm0
 ; SSE41-NEXT:    xorpd %xmm9, %xmm0
 ; SSE41-NEXT:    movapd %xmm0, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm3
 ; SSE41-NEXT:    movapd %xmm5, %xmm0
@@ -2648,10 +2601,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm4
 ; SSE41-NEXT:    packssdw %xmm3, %xmm4
@@ -2662,10 +2614,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm4, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm15, %xmm3
 ; SSE41-NEXT:    movapd %xmm14, %xmm0
@@ -2674,10 +2625,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm4
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm14, %xmm4
 ; SSE41-NEXT:    packssdw %xmm3, %xmm4
@@ -2687,21 +2637,19 @@
 ; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm2, %xmm3
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm13, %xmm3
 ; SSE41-NEXT:    xorpd %xmm10, %xmm9
-; SSE41-NEXT:    movapd %xmm9, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm8, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movapd %xmm9, %xmm5
+; SSE41-NEXT:    pcmpgtd %xmm8, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm8, %xmm9
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm9[1,1,3,3]
-; SSE41-NEXT:    pand %xmm5, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm10, %xmm2
 ; SSE41-NEXT:    packssdw %xmm3, %xmm2
 ; SSE41-NEXT:    packssdw %xmm2, %xmm4
diff --git a/test/CodeGen/X86/vector-trunc-usat-widen.ll b/test/CodeGen/X86/vector-trunc-usat-widen.ll
index 9dd97b3..6922fde 100644
--- a/test/CodeGen/X86/vector-trunc-usat-widen.ll
+++ b/test/CodeGen/X86/vector-trunc-usat-widen.ll
@@ -90,19 +90,17 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm4, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm3
+; SSE41-NEXT:    por %xmm5, %xmm3
 ; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movdqa %xmm4, %xmm5
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm4, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    movapd {{.*#+}} xmm4 = [4294967295,4294967295]
 ; SSE41-NEXT:    movapd {{.*#+}} xmm5 = [4294967295,429496729]
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm5
@@ -337,24 +335,22 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm9 = [9223372039002259455,9223372039002259455]
 ; SSE41-NEXT:    movdqa %xmm9, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm6, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm5
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    pxor %xmm7, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm6, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm4
 ; SSE41-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2]
@@ -364,21 +360,19 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm6, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    pxor %xmm8, %xmm7
-; SSE41-NEXT:    movdqa %xmm9, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm7, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm9, %xmm1
+; SSE41-NEXT:    pcmpgtd %xmm7, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm7[1,1,3,3]
-; SSE41-NEXT:    pand %xmm1, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm6
 ; SSE41-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,2],xmm2[0,2]
 ; SSE41-NEXT:    movaps %xmm6, %xmm0
@@ -609,24 +603,22 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm9 = [9223372039002324991,9223372039002324991]
 ; SSE41-NEXT:    movdqa %xmm9, %xmm7
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
 ; SSE41-NEXT:    movapd %xmm5, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm4
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
 ; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm5, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm1
 ; SSE41-NEXT:    packusdw %xmm4, %xmm1
@@ -634,23 +626,21 @@
 ; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm5, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm4
 ; SSE41-NEXT:    pxor %xmm2, %xmm6
-; SSE41-NEXT:    movdqa %xmm9, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm6, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm9, %xmm3
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm5
 ; SSE41-NEXT:    packusdw %xmm4, %xmm5
 ; SSE41-NEXT:    packusdw %xmm5, %xmm1
@@ -1103,47 +1093,43 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm9 = [9223372039002259711,9223372039002259711]
 ; SSE41-NEXT:    movdqa %xmm9, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm4
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm7, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm5
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm5[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm5
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    pxor %xmm7, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    pxor %xmm3, %xmm7
-; SSE41-NEXT:    movdqa %xmm9, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm7, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm9, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm7, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm2
 ; SSE41-NEXT:    andpd %xmm8, %xmm2
@@ -1372,47 +1358,43 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm9 = [9223372039002259711,9223372039002259711]
 ; SSE41-NEXT:    movdqa %xmm9, %xmm6
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm6
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm6
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm7, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm4
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    pxor %xmm7, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    pxor %xmm3, %xmm7
-; SSE41-NEXT:    movdqa %xmm9, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm7, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm9, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm7, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm2
 ; SSE41-NEXT:    andpd %xmm8, %xmm2
@@ -1746,10 +1728,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm12
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm13, %xmm14
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm12[1,1,3,3]
-; SSE41-NEXT:    por %xmm14, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm13, %xmm0
+; SSE41-NEXT:    por %xmm12, %xmm0
 ; SSE41-NEXT:    movapd %xmm9, %xmm12
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm12
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
@@ -1758,10 +1739,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm13 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm13, %xmm14
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm14, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm13, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm9, %xmm13
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm13
 ; SSE41-NEXT:    packusdw %xmm12, %xmm13
@@ -1771,36 +1751,33 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm12 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm12
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm12, %xmm0
-; SSE41-NEXT:    movapd %xmm9, %xmm12
-; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm12
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    movapd %xmm9, %xmm1
+; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
 ; SSE41-NEXT:    movdqa %xmm11, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
-; SSE41-NEXT:    movapd %xmm9, %xmm1
-; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
-; SSE41-NEXT:    packusdw %xmm12, %xmm1
-; SSE41-NEXT:    packusdw %xmm1, %xmm13
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    movapd %xmm9, %xmm3
+; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
+; SSE41-NEXT:    packusdw %xmm1, %xmm3
+; SSE41-NEXT:    packusdw %xmm3, %xmm13
 ; SSE41-NEXT:    movdqa %xmm5, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
 ; SSE41-NEXT:    movdqa %xmm11, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm9, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm1
 ; SSE41-NEXT:    movdqa %xmm4, %xmm0
@@ -1809,10 +1786,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm9, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    packusdw %xmm1, %xmm2
@@ -1822,21 +1798,19 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm9, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1
 ; SSE41-NEXT:    pxor %xmm6, %xmm10
-; SSE41-NEXT:    movdqa %xmm11, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm11, %xmm3
+; SSE41-NEXT:    pcmpgtd %xmm10, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm10
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm10[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm9
 ; SSE41-NEXT:    packusdw %xmm1, %xmm9
 ; SSE41-NEXT:    packusdw %xmm9, %xmm2
diff --git a/test/CodeGen/X86/vector-trunc-usat.ll b/test/CodeGen/X86/vector-trunc-usat.ll
index 3f2810a..c5a0bab 100644
--- a/test/CodeGen/X86/vector-trunc-usat.ll
+++ b/test/CodeGen/X86/vector-trunc-usat.ll
@@ -90,19 +90,17 @@
 ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm4, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm3
+; SSE41-NEXT:    por %xmm5, %xmm3
 ; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    movdqa %xmm4, %xmm5
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm4, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm6, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
 ; SSE41-NEXT:    movapd {{.*#+}} xmm4 = [4294967295,4294967295]
 ; SSE41-NEXT:    movapd {{.*#+}} xmm5 = [4294967295,429496729]
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm5
@@ -337,24 +335,22 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm9 = [9223372039002259455,9223372039002259455]
 ; SSE41-NEXT:    movdqa %xmm9, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm6, %xmm5
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm5
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    pxor %xmm7, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    movapd %xmm6, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm4
 ; SSE41-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2]
@@ -364,21 +360,19 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm6, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 ; SSE41-NEXT:    pxor %xmm8, %xmm7
-; SSE41-NEXT:    movdqa %xmm9, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm7, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm9, %xmm1
+; SSE41-NEXT:    pcmpgtd %xmm7, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm7[1,1,3,3]
-; SSE41-NEXT:    pand %xmm1, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm6
 ; SSE41-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,2],xmm2[0,2]
 ; SSE41-NEXT:    movaps %xmm6, %xmm0
@@ -609,24 +603,22 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm9 = [9223372039002324991,9223372039002324991]
 ; SSE41-NEXT:    movdqa %xmm9, %xmm7
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
 ; SSE41-NEXT:    movapd %xmm5, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm4
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
 ; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm5, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm1
 ; SSE41-NEXT:    packusdw %xmm4, %xmm1
@@ -634,23 +626,21 @@
 ; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm5, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm4
 ; SSE41-NEXT:    pxor %xmm2, %xmm6
-; SSE41-NEXT:    movdqa %xmm9, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm6, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm9, %xmm3
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm5
 ; SSE41-NEXT:    packusdw %xmm4, %xmm5
 ; SSE41-NEXT:    packusdw %xmm5, %xmm1
@@ -1094,24 +1084,22 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm9 = [9223372039002259711,9223372039002259711]
 ; SSE41-NEXT:    movdqa %xmm9, %xmm7
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
 ; SSE41-NEXT:    movapd %xmm5, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm4
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
 ; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm5, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm1
 ; SSE41-NEXT:    packusdw %xmm4, %xmm1
@@ -1119,23 +1107,21 @@
 ; SSE41-NEXT:    pxor %xmm6, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm5, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm4
 ; SSE41-NEXT:    pxor %xmm2, %xmm6
-; SSE41-NEXT:    movdqa %xmm9, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm6, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm9, %xmm3
+; SSE41-NEXT:    pcmpgtd %xmm6, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm5
 ; SSE41-NEXT:    packusdw %xmm4, %xmm5
 ; SSE41-NEXT:    packusdw %xmm5, %xmm1
@@ -1343,47 +1329,43 @@
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm9 = [9223372039002259711,9223372039002259711]
 ; SSE41-NEXT:    movdqa %xmm9, %xmm6
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm6
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm6
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm7, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm4
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm4, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm4
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm4
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    pxor %xmm7, %xmm0
 ; SSE41-NEXT:    movdqa %xmm9, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm10 = xmm1[0,0,2,2]
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm10, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
 ; SSE41-NEXT:    pxor %xmm3, %xmm7
-; SSE41-NEXT:    movdqa %xmm9, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm7, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm9, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm7, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm9, %xmm7
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
+; SSE41-NEXT:    pand %xmm5, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm8, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm2
 ; SSE41-NEXT:    andpd %xmm8, %xmm2
@@ -1715,10 +1697,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm12
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm13, %xmm14
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm12[1,1,3,3]
-; SSE41-NEXT:    por %xmm14, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm13, %xmm0
+; SSE41-NEXT:    por %xmm12, %xmm0
 ; SSE41-NEXT:    movapd %xmm9, %xmm12
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm12
 ; SSE41-NEXT:    movdqa %xmm8, %xmm0
@@ -1727,10 +1708,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm13 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm13, %xmm14
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm14, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm13, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm9, %xmm13
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm13
 ; SSE41-NEXT:    packusdw %xmm12, %xmm13
@@ -1740,36 +1720,33 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm12 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm12
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm12, %xmm0
-; SSE41-NEXT:    movapd %xmm9, %xmm12
-; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm12
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
+; SSE41-NEXT:    movapd %xmm9, %xmm1
+; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
 ; SSE41-NEXT:    movdqa %xmm11, %xmm3
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm8, %xmm1
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE41-NEXT:    por %xmm1, %xmm0
-; SSE41-NEXT:    movapd %xmm9, %xmm1
-; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
-; SSE41-NEXT:    packusdw %xmm12, %xmm1
-; SSE41-NEXT:    packusdw %xmm1, %xmm13
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm8, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    movapd %xmm9, %xmm3
+; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm3
+; SSE41-NEXT:    packusdw %xmm1, %xmm3
+; SSE41-NEXT:    packusdw %xmm3, %xmm13
 ; SSE41-NEXT:    movdqa %xmm5, %xmm0
 ; SSE41-NEXT:    pxor %xmm10, %xmm0
 ; SSE41-NEXT:    movdqa %xmm11, %xmm1
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm2, %xmm3
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm9, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm1
 ; SSE41-NEXT:    movdqa %xmm4, %xmm0
@@ -1778,10 +1755,9 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm5
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
 ; SSE41-NEXT:    movapd %xmm9, %xmm2
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    packusdw %xmm1, %xmm2
@@ -1791,21 +1767,19 @@
 ; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    movapd %xmm9, %xmm1
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1
 ; SSE41-NEXT:    pxor %xmm6, %xmm10
-; SSE41-NEXT:    movdqa %xmm11, %xmm0
-; SSE41-NEXT:    pcmpgtd %xmm10, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm11, %xmm3
+; SSE41-NEXT:    pcmpgtd %xmm10, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 ; SSE41-NEXT:    pcmpeqd %xmm11, %xmm10
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm10[1,1,3,3]
-; SSE41-NEXT:    pand %xmm3, %xmm4
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT:    por %xmm4, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
+; SSE41-NEXT:    pand %xmm4, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
 ; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm9
 ; SSE41-NEXT:    packusdw %xmm1, %xmm9
 ; SSE41-NEXT:    packusdw %xmm9, %xmm2
diff --git a/test/CodeGen/X86/vector-trunc-widen.ll b/test/CodeGen/X86/vector-trunc-widen.ll
index 4973937..be44f46 100644
--- a/test/CodeGen/X86/vector-trunc-widen.ll
+++ b/test/CodeGen/X86/vector-trunc-widen.ll
@@ -468,11 +468,7 @@
 define <8 x i16> @trunc8i32_8i16_lshr(<8 x i32> %a) {
 ; SSE2-LABEL: trunc8i32_8i16_lshr:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    psrld $16, %xmm0
-; SSE2-NEXT:    psrld $16, %xmm1
-; SSE2-NEXT:    pslld $16, %xmm1
 ; SSE2-NEXT:    psrad $16, %xmm1
-; SSE2-NEXT:    pslld $16, %xmm0
 ; SSE2-NEXT:    psrad $16, %xmm0
 ; SSE2-NEXT:    packssdw %xmm1, %xmm0
 ; SSE2-NEXT:    retq
@@ -767,18 +763,10 @@
 define void @trunc16i32_16i16_lshr(<16 x i32> %a) {
 ; SSE2-LABEL: trunc16i32_16i16_lshr:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    psrld $16, %xmm2
-; SSE2-NEXT:    psrld $16, %xmm3
-; SSE2-NEXT:    psrld $16, %xmm0
-; SSE2-NEXT:    psrld $16, %xmm1
-; SSE2-NEXT:    pslld $16, %xmm1
 ; SSE2-NEXT:    psrad $16, %xmm1
-; SSE2-NEXT:    pslld $16, %xmm0
 ; SSE2-NEXT:    psrad $16, %xmm0
 ; SSE2-NEXT:    packssdw %xmm1, %xmm0
-; SSE2-NEXT:    pslld $16, %xmm3
 ; SSE2-NEXT:    psrad $16, %xmm3
-; SSE2-NEXT:    pslld $16, %xmm2
 ; SSE2-NEXT:    psrad $16, %xmm2
 ; SSE2-NEXT:    packssdw %xmm3, %xmm2
 ; SSE2-NEXT:    movdqu %xmm2, (%rax)
@@ -787,18 +775,10 @@
 ;
 ; SSSE3-LABEL: trunc16i32_16i16_lshr:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    psrld $16, %xmm2
-; SSSE3-NEXT:    psrld $16, %xmm3
-; SSSE3-NEXT:    psrld $16, %xmm0
-; SSSE3-NEXT:    psrld $16, %xmm1
-; SSSE3-NEXT:    pslld $16, %xmm1
 ; SSSE3-NEXT:    psrad $16, %xmm1
-; SSSE3-NEXT:    pslld $16, %xmm0
 ; SSSE3-NEXT:    psrad $16, %xmm0
 ; SSSE3-NEXT:    packssdw %xmm1, %xmm0
-; SSSE3-NEXT:    pslld $16, %xmm3
 ; SSSE3-NEXT:    psrad $16, %xmm3
-; SSSE3-NEXT:    pslld $16, %xmm2
 ; SSSE3-NEXT:    psrad $16, %xmm2
 ; SSSE3-NEXT:    packssdw %xmm3, %xmm2
 ; SSSE3-NEXT:    movdqu %xmm2, (%rax)
diff --git a/test/CodeGen/X86/vector-trunc.ll b/test/CodeGen/X86/vector-trunc.ll
index c17a618..ba353fe 100644
--- a/test/CodeGen/X86/vector-trunc.ll
+++ b/test/CodeGen/X86/vector-trunc.ll
@@ -478,11 +478,7 @@
 define <8 x i16> @trunc8i32_8i16_lshr(<8 x i32> %a) {
 ; SSE2-LABEL: trunc8i32_8i16_lshr:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    psrld $16, %xmm0
-; SSE2-NEXT:    psrld $16, %xmm1
-; SSE2-NEXT:    pslld $16, %xmm1
 ; SSE2-NEXT:    psrad $16, %xmm1
-; SSE2-NEXT:    pslld $16, %xmm0
 ; SSE2-NEXT:    psrad $16, %xmm0
 ; SSE2-NEXT:    packssdw %xmm1, %xmm0
 ; SSE2-NEXT:    retq
@@ -777,18 +773,10 @@
 define void @trunc16i32_16i16_lshr(<16 x i32> %a) {
 ; SSE2-LABEL: trunc16i32_16i16_lshr:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    psrld $16, %xmm2
-; SSE2-NEXT:    psrld $16, %xmm3
-; SSE2-NEXT:    psrld $16, %xmm0
-; SSE2-NEXT:    psrld $16, %xmm1
-; SSE2-NEXT:    pslld $16, %xmm1
 ; SSE2-NEXT:    psrad $16, %xmm1
-; SSE2-NEXT:    pslld $16, %xmm0
 ; SSE2-NEXT:    psrad $16, %xmm0
 ; SSE2-NEXT:    packssdw %xmm1, %xmm0
-; SSE2-NEXT:    pslld $16, %xmm3
 ; SSE2-NEXT:    psrad $16, %xmm3
-; SSE2-NEXT:    pslld $16, %xmm2
 ; SSE2-NEXT:    psrad $16, %xmm2
 ; SSE2-NEXT:    packssdw %xmm3, %xmm2
 ; SSE2-NEXT:    movdqu %xmm2, (%rax)
@@ -797,18 +785,10 @@
 ;
 ; SSSE3-LABEL: trunc16i32_16i16_lshr:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    psrld $16, %xmm2
-; SSSE3-NEXT:    psrld $16, %xmm3
-; SSSE3-NEXT:    psrld $16, %xmm0
-; SSSE3-NEXT:    psrld $16, %xmm1
-; SSSE3-NEXT:    pslld $16, %xmm1
 ; SSSE3-NEXT:    psrad $16, %xmm1
-; SSSE3-NEXT:    pslld $16, %xmm0
 ; SSSE3-NEXT:    psrad $16, %xmm0
 ; SSSE3-NEXT:    packssdw %xmm1, %xmm0
-; SSSE3-NEXT:    pslld $16, %xmm3
 ; SSSE3-NEXT:    psrad $16, %xmm3
-; SSSE3-NEXT:    pslld $16, %xmm2
 ; SSSE3-NEXT:    psrad $16, %xmm2
 ; SSSE3-NEXT:    packssdw %xmm3, %xmm2
 ; SSSE3-NEXT:    movdqu %xmm2, (%rax)
diff --git a/test/CodeGen/X86/viabs.ll b/test/CodeGen/X86/viabs.ll
index af7e796..cbb6a9a 100644
--- a/test/CodeGen/X86/viabs.ll
+++ b/test/CodeGen/X86/viabs.ll
@@ -1,10 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2     | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3    | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx      | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2     | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl --show-mc-encoding | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F --check-prefix=AVX512VL
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl,+avx512bw --show-mc-encoding | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2     | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3    | FileCheck %s --check-prefixes=SSE,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1   | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx      | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2     | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl --show-mc-encoding | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl,+avx512bw --show-mc-encoding | FileCheck %s --check-prefixes=AVX,AVX512,AVX512BW
 
 define <4 x i32> @test_abs_gt_v4i32(<4 x i32> %a) nounwind {
 ; SSE2-LABEL: test_abs_gt_v4i32:
@@ -20,6 +21,11 @@
 ; SSSE3-NEXT:    pabsd %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
+; SSE41-LABEL: test_abs_gt_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pabsd %xmm0, %xmm0
+; SSE41-NEXT:    retq
+;
 ; AVX1-LABEL: test_abs_gt_v4i32:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm0
@@ -54,6 +60,11 @@
 ; SSSE3-NEXT:    pabsd %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
+; SSE41-LABEL: test_abs_ge_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pabsd %xmm0, %xmm0
+; SSE41-NEXT:    retq
+;
 ; AVX1-LABEL: test_abs_ge_v4i32:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm0
@@ -88,6 +99,11 @@
 ; SSSE3-NEXT:    pabsw %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
+; SSE41-LABEL: test_abs_gt_v8i16:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pabsw %xmm0, %xmm0
+; SSE41-NEXT:    retq
+;
 ; AVX1-LABEL: test_abs_gt_v8i16:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsw %xmm0, %xmm0
@@ -127,6 +143,11 @@
 ; SSSE3-NEXT:    pabsb %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
+; SSE41-LABEL: test_abs_lt_v16i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pabsb %xmm0, %xmm0
+; SSE41-NEXT:    retq
+;
 ; AVX1-LABEL: test_abs_lt_v16i8:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsb %xmm0, %xmm0
@@ -166,6 +187,11 @@
 ; SSSE3-NEXT:    pabsd %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
+; SSE41-LABEL: test_abs_le_v4i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pabsd %xmm0, %xmm0
+; SSE41-NEXT:    retq
+;
 ; AVX1-LABEL: test_abs_le_v4i32:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm0
@@ -205,6 +231,12 @@
 ; SSSE3-NEXT:    pabsd %xmm1, %xmm1
 ; SSSE3-NEXT:    retq
 ;
+; SSE41-LABEL: test_abs_gt_v8i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pabsd %xmm0, %xmm0
+; SSE41-NEXT:    pabsd %xmm1, %xmm1
+; SSE41-NEXT:    retq
+;
 ; AVX1-LABEL: test_abs_gt_v8i32:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm1
@@ -247,6 +279,12 @@
 ; SSSE3-NEXT:    pabsd %xmm1, %xmm1
 ; SSSE3-NEXT:    retq
 ;
+; SSE41-LABEL: test_abs_ge_v8i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pabsd %xmm0, %xmm0
+; SSE41-NEXT:    pabsd %xmm1, %xmm1
+; SSE41-NEXT:    retq
+;
 ; AVX1-LABEL: test_abs_ge_v8i32:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm1
@@ -289,6 +327,12 @@
 ; SSSE3-NEXT:    pabsw %xmm1, %xmm1
 ; SSSE3-NEXT:    retq
 ;
+; SSE41-LABEL: test_abs_gt_v16i16:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pabsw %xmm0, %xmm0
+; SSE41-NEXT:    pabsw %xmm1, %xmm1
+; SSE41-NEXT:    retq
+;
 ; AVX1-LABEL: test_abs_gt_v16i16:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsw %xmm0, %xmm1
@@ -336,6 +380,12 @@
 ; SSSE3-NEXT:    pabsb %xmm1, %xmm1
 ; SSSE3-NEXT:    retq
 ;
+; SSE41-LABEL: test_abs_lt_v32i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pabsb %xmm0, %xmm0
+; SSE41-NEXT:    pabsb %xmm1, %xmm1
+; SSE41-NEXT:    retq
+;
 ; AVX1-LABEL: test_abs_lt_v32i8:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsb %xmm0, %xmm1
@@ -383,6 +433,12 @@
 ; SSSE3-NEXT:    pabsd %xmm1, %xmm1
 ; SSSE3-NEXT:    retq
 ;
+; SSE41-LABEL: test_abs_le_v8i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pabsd %xmm0, %xmm0
+; SSE41-NEXT:    pabsd %xmm1, %xmm1
+; SSE41-NEXT:    retq
+;
 ; AVX1-LABEL: test_abs_le_v8i32:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm1
@@ -435,6 +491,14 @@
 ; SSSE3-NEXT:    pabsd %xmm3, %xmm3
 ; SSSE3-NEXT:    retq
 ;
+; SSE41-LABEL: test_abs_le_16i32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pabsd %xmm0, %xmm0
+; SSE41-NEXT:    pabsd %xmm1, %xmm1
+; SSE41-NEXT:    pabsd %xmm2, %xmm2
+; SSE41-NEXT:    pabsd %xmm3, %xmm3
+; SSE41-NEXT:    retq
+;
 ; AVX1-LABEL: test_abs_le_16i32:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm2
@@ -464,29 +528,43 @@
 }
 
 define <2 x i64> @test_abs_ge_v2i64(<2 x i64> %a) nounwind {
-; SSE-LABEL: test_abs_ge_v2i64:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa %xmm0, %xmm1
-; SSE-NEXT:    psrad $31, %xmm1
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE-NEXT:    paddq %xmm1, %xmm0
-; SSE-NEXT:    pxor %xmm1, %xmm0
-; SSE-NEXT:    retq
+; SSE2-LABEL: test_abs_ge_v2i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    psrad $31, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    paddq %xmm1, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: test_abs_ge_v2i64:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movdqa %xmm0, %xmm1
+; SSSE3-NEXT:    psrad $31, %xmm1
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT:    paddq %xmm1, %xmm0
+; SSSE3-NEXT:    pxor %xmm1, %xmm0
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: test_abs_ge_v2i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pxor %xmm1, %xmm1
+; SSE41-NEXT:    psubq %xmm0, %xmm1
+; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm0
+; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_ge_v2i64:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm1
-; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_ge_v2i64:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm1
-; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
+; AVX2-NEXT:    vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_ge_v2i64:
@@ -500,39 +578,62 @@
 }
 
 define <4 x i64> @test_abs_gt_v4i64(<4 x i64> %a) nounwind {
-; SSE-LABEL: test_abs_gt_v4i64:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    psrad $31, %xmm2
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE-NEXT:    paddq %xmm2, %xmm0
-; SSE-NEXT:    pxor %xmm2, %xmm0
-; SSE-NEXT:    movdqa %xmm1, %xmm2
-; SSE-NEXT:    psrad $31, %xmm2
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE-NEXT:    paddq %xmm2, %xmm1
-; SSE-NEXT:    pxor %xmm2, %xmm1
-; SSE-NEXT:    retq
+; SSE2-LABEL: test_abs_gt_v4i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    psrad $31, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    paddq %xmm2, %xmm0
+; SSE2-NEXT:    pxor %xmm2, %xmm0
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psrad $31, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    paddq %xmm2, %xmm1
+; SSE2-NEXT:    pxor %xmm2, %xmm1
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: test_abs_gt_v4i64:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movdqa %xmm0, %xmm2
+; SSSE3-NEXT:    psrad $31, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT:    paddq %xmm2, %xmm0
+; SSSE3-NEXT:    pxor %xmm2, %xmm0
+; SSSE3-NEXT:    movdqa %xmm1, %xmm2
+; SSSE3-NEXT:    psrad $31, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT:    paddq %xmm2, %xmm1
+; SSSE3-NEXT:    pxor %xmm2, %xmm1
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: test_abs_gt_v4i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    pxor %xmm3, %xmm3
+; SSE41-NEXT:    pxor %xmm4, %xmm4
+; SSE41-NEXT:    psubq %xmm0, %xmm4
+; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
+; SSE41-NEXT:    psubq %xmm1, %xmm3
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
+; SSE41-NEXT:    movapd %xmm2, %xmm0
+; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_gt_v4i64:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm2, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm2
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm4
-; AVX1-NEXT:    vpaddq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vxorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT:    vpsubq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpsubq %xmm0, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    vblendvpd %ymm0, %ymm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_gt_v4i64:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm1
-; AVX2-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubq %ymm0, %ymm1, %ymm1
+; AVX2-NEXT:    vblendvpd %ymm0, %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_gt_v4i64:
@@ -546,60 +647,97 @@
 }
 
 define <8 x i64> @test_abs_le_v8i64(<8 x i64> %a) nounwind {
-; SSE-LABEL: test_abs_le_v8i64:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa %xmm0, %xmm4
-; SSE-NEXT:    psrad $31, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT:    paddq %xmm4, %xmm0
-; SSE-NEXT:    pxor %xmm4, %xmm0
-; SSE-NEXT:    movdqa %xmm1, %xmm4
-; SSE-NEXT:    psrad $31, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT:    paddq %xmm4, %xmm1
-; SSE-NEXT:    pxor %xmm4, %xmm1
-; SSE-NEXT:    movdqa %xmm2, %xmm4
-; SSE-NEXT:    psrad $31, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT:    paddq %xmm4, %xmm2
-; SSE-NEXT:    pxor %xmm4, %xmm2
-; SSE-NEXT:    movdqa %xmm3, %xmm4
-; SSE-NEXT:    psrad $31, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT:    paddq %xmm4, %xmm3
-; SSE-NEXT:    pxor %xmm4, %xmm3
-; SSE-NEXT:    retq
+; SSE2-LABEL: test_abs_le_v8i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    psrad $31, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT:    paddq %xmm4, %xmm0
+; SSE2-NEXT:    pxor %xmm4, %xmm0
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    psrad $31, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT:    paddq %xmm4, %xmm1
+; SSE2-NEXT:    pxor %xmm4, %xmm1
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    psrad $31, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT:    paddq %xmm4, %xmm2
+; SSE2-NEXT:    pxor %xmm4, %xmm2
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    psrad $31, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT:    paddq %xmm4, %xmm3
+; SSE2-NEXT:    pxor %xmm4, %xmm3
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: test_abs_le_v8i64:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movdqa %xmm0, %xmm4
+; SSSE3-NEXT:    psrad $31, %xmm4
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT:    paddq %xmm4, %xmm0
+; SSSE3-NEXT:    pxor %xmm4, %xmm0
+; SSSE3-NEXT:    movdqa %xmm1, %xmm4
+; SSSE3-NEXT:    psrad $31, %xmm4
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT:    paddq %xmm4, %xmm1
+; SSSE3-NEXT:    pxor %xmm4, %xmm1
+; SSSE3-NEXT:    movdqa %xmm2, %xmm4
+; SSSE3-NEXT:    psrad $31, %xmm4
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT:    paddq %xmm4, %xmm2
+; SSSE3-NEXT:    pxor %xmm4, %xmm2
+; SSSE3-NEXT:    movdqa %xmm3, %xmm4
+; SSSE3-NEXT:    psrad $31, %xmm4
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT:    paddq %xmm4, %xmm3
+; SSSE3-NEXT:    pxor %xmm4, %xmm3
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: test_abs_le_v8i64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    pxor %xmm5, %xmm5
+; SSE41-NEXT:    pxor %xmm6, %xmm6
+; SSE41-NEXT:    psubq %xmm0, %xmm6
+; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm4
+; SSE41-NEXT:    pxor %xmm6, %xmm6
+; SSE41-NEXT:    psubq %xmm1, %xmm6
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm1
+; SSE41-NEXT:    pxor %xmm6, %xmm6
+; SSE41-NEXT:    psubq %xmm2, %xmm6
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm2
+; SSE41-NEXT:    psubq %xmm3, %xmm5
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm3
+; SSE41-NEXT:    movapd %xmm4, %xmm0
+; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_le_v8i64:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm6
-; AVX1-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddq %xmm5, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT:    vxorps %ymm6, %ymm0, %ymm0
+; AVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsubq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
+; AVX1-NEXT:    vblendvpd %ymm0, %ymm2, %ymm0, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm5
-; AVX1-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-NEXT:    vxorps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsubq %xmm1, %xmm3, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT:    vblendvpd %ymm1, %ymm2, %ymm1, %ymm1
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_le_v8i64:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm2, %ymm3
-; AVX2-NEXT:    vpaddq %ymm3, %ymm0, %ymm0
-; AVX2-NEXT:    vpxor %ymm3, %ymm0, %ymm0
-; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm2, %ymm2
-; AVX2-NEXT:    vpaddq %ymm2, %ymm1, %ymm1
-; AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubq %ymm0, %ymm2, %ymm3
+; AVX2-NEXT:    vblendvpd %ymm0, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubq %ymm1, %ymm2, %ymm2
+; AVX2-NEXT:    vblendvpd %ymm1, %ymm2, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_le_v8i64:
@@ -613,55 +751,103 @@
 }
 
 define <8 x i64> @test_abs_le_v8i64_fold(<8 x i64>* %a.ptr) nounwind {
-; SSE-LABEL: test_abs_le_v8i64_fold:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movdqu (%rdi), %xmm0
-; SSE-NEXT:    movdqu 16(%rdi), %xmm1
-; SSE-NEXT:    movdqu 32(%rdi), %xmm2
-; SSE-NEXT:    movdqu 48(%rdi), %xmm3
-; SSE-NEXT:    movdqa %xmm0, %xmm4
-; SSE-NEXT:    psrad $31, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT:    paddq %xmm4, %xmm0
-; SSE-NEXT:    pxor %xmm4, %xmm0
-; SSE-NEXT:    movdqa %xmm1, %xmm4
-; SSE-NEXT:    psrad $31, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT:    paddq %xmm4, %xmm1
-; SSE-NEXT:    pxor %xmm4, %xmm1
-; SSE-NEXT:    movdqa %xmm2, %xmm4
-; SSE-NEXT:    psrad $31, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT:    paddq %xmm4, %xmm2
-; SSE-NEXT:    pxor %xmm4, %xmm2
-; SSE-NEXT:    movdqa %xmm3, %xmm4
-; SSE-NEXT:    psrad $31, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT:    paddq %xmm4, %xmm3
-; SSE-NEXT:    pxor %xmm4, %xmm3
-; SSE-NEXT:    retq
+; SSE2-LABEL: test_abs_le_v8i64_fold:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqu (%rdi), %xmm0
+; SSE2-NEXT:    movdqu 16(%rdi), %xmm1
+; SSE2-NEXT:    movdqu 32(%rdi), %xmm2
+; SSE2-NEXT:    movdqu 48(%rdi), %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    psrad $31, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT:    paddq %xmm4, %xmm0
+; SSE2-NEXT:    pxor %xmm4, %xmm0
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    psrad $31, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT:    paddq %xmm4, %xmm1
+; SSE2-NEXT:    pxor %xmm4, %xmm1
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    psrad $31, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT:    paddq %xmm4, %xmm2
+; SSE2-NEXT:    pxor %xmm4, %xmm2
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    psrad $31, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT:    paddq %xmm4, %xmm3
+; SSE2-NEXT:    pxor %xmm4, %xmm3
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: test_abs_le_v8i64_fold:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movdqu (%rdi), %xmm0
+; SSSE3-NEXT:    movdqu 16(%rdi), %xmm1
+; SSSE3-NEXT:    movdqu 32(%rdi), %xmm2
+; SSSE3-NEXT:    movdqu 48(%rdi), %xmm3
+; SSSE3-NEXT:    movdqa %xmm0, %xmm4
+; SSSE3-NEXT:    psrad $31, %xmm4
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT:    paddq %xmm4, %xmm0
+; SSSE3-NEXT:    pxor %xmm4, %xmm0
+; SSSE3-NEXT:    movdqa %xmm1, %xmm4
+; SSSE3-NEXT:    psrad $31, %xmm4
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT:    paddq %xmm4, %xmm1
+; SSSE3-NEXT:    pxor %xmm4, %xmm1
+; SSSE3-NEXT:    movdqa %xmm2, %xmm4
+; SSSE3-NEXT:    psrad $31, %xmm4
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT:    paddq %xmm4, %xmm2
+; SSSE3-NEXT:    pxor %xmm4, %xmm2
+; SSSE3-NEXT:    movdqa %xmm3, %xmm4
+; SSSE3-NEXT:    psrad $31, %xmm4
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT:    paddq %xmm4, %xmm3
+; SSSE3-NEXT:    pxor %xmm4, %xmm3
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: test_abs_le_v8i64_fold:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqu (%rdi), %xmm1
+; SSE41-NEXT:    movdqu 16(%rdi), %xmm2
+; SSE41-NEXT:    movdqu 32(%rdi), %xmm3
+; SSE41-NEXT:    movdqu 48(%rdi), %xmm4
+; SSE41-NEXT:    pxor %xmm5, %xmm5
+; SSE41-NEXT:    pxor %xmm6, %xmm6
+; SSE41-NEXT:    psubq %xmm1, %xmm6
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm1
+; SSE41-NEXT:    pxor %xmm6, %xmm6
+; SSE41-NEXT:    psubq %xmm2, %xmm6
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm2
+; SSE41-NEXT:    pxor %xmm6, %xmm6
+; SSE41-NEXT:    psubq %xmm3, %xmm6
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm6, %xmm3
+; SSE41-NEXT:    psubq %xmm4, %xmm5
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm4
+; SSE41-NEXT:    movapd %xmm1, %xmm0
+; SSE41-NEXT:    movapd %xmm2, %xmm1
+; SSE41-NEXT:    movapd %xmm3, %xmm2
+; SSE41-NEXT:    movapd %xmm4, %xmm3
+; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_le_v8i64_fold:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqu (%rdi), %xmm0
-; AVX1-NEXT:    vmovdqu 16(%rdi), %xmm1
-; AVX1-NEXT:    vmovdqu 32(%rdi), %xmm2
-; AVX1-NEXT:    vmovdqu 48(%rdi), %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
-; AVX1-NEXT:    vpaddq %xmm5, %xmm1, %xmm1
-; AVX1-NEXT:    vpaddq %xmm6, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vxorps %ymm7, %ymm0, %ymm0
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm5
-; AVX1-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT:    vxorps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT:    vmovupd (%rdi), %ymm0
+; AVX1-NEXT:    vmovupd 32(%rdi), %ymm1
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubq 16(%rdi), %xmm2, %xmm3
+; AVX1-NEXT:    vpsubq (%rdi), %xmm2, %xmm4
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT:    vblendvpd %ymm0, %ymm3, %ymm0, %ymm0
+; AVX1-NEXT:    vpsubq 48(%rdi), %xmm2, %xmm3
+; AVX1-NEXT:    vpsubq 32(%rdi), %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vblendvpd %ymm1, %ymm2, %ymm1, %ymm1
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_le_v8i64_fold:
@@ -669,12 +855,10 @@
 ; AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVX2-NEXT:    vmovdqu 32(%rdi), %ymm1
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm2, %ymm3
-; AVX2-NEXT:    vpaddq %ymm3, %ymm0, %ymm0
-; AVX2-NEXT:    vpxor %ymm3, %ymm0, %ymm0
-; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm2, %ymm2
-; AVX2-NEXT:    vpaddq %ymm2, %ymm1, %ymm1
-; AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubq %ymm0, %ymm2, %ymm3
+; AVX2-NEXT:    vblendvpd %ymm0, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubq %ymm1, %ymm2, %ymm2
+; AVX2-NEXT:    vblendvpd %ymm1, %ymm2, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_le_v8i64_fold:
@@ -717,6 +901,14 @@
 ; SSSE3-NEXT:    pabsb %xmm3, %xmm3
 ; SSSE3-NEXT:    retq
 ;
+; SSE41-LABEL: test_abs_lt_v64i8:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pabsb %xmm0, %xmm0
+; SSE41-NEXT:    pabsb %xmm1, %xmm1
+; SSE41-NEXT:    pabsb %xmm2, %xmm2
+; SSE41-NEXT:    pabsb %xmm3, %xmm3
+; SSE41-NEXT:    retq
+;
 ; AVX1-LABEL: test_abs_lt_v64i8:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsb %xmm0, %xmm2
@@ -780,6 +972,14 @@
 ; SSSE3-NEXT:    pabsw %xmm3, %xmm3
 ; SSSE3-NEXT:    retq
 ;
+; SSE41-LABEL: test_abs_gt_v32i16:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pabsw %xmm0, %xmm0
+; SSE41-NEXT:    pabsw %xmm1, %xmm1
+; SSE41-NEXT:    pabsw %xmm2, %xmm2
+; SSE41-NEXT:    pabsw %xmm3, %xmm3
+; SSE41-NEXT:    retq
+;
 ; AVX1-LABEL: test_abs_gt_v32i16:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsw %xmm0, %xmm2
diff --git a/test/CodeGen/X86/vpshufbitqbm-intrinsics-upgrade.ll b/test/CodeGen/X86/vpshufbitqbm-intrinsics-upgrade.ll
new file mode 100644
index 0000000..d06d8a9
--- /dev/null
+++ b/test/CodeGen/X86/vpshufbitqbm-intrinsics-upgrade.ll
@@ -0,0 +1,44 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512bitalg,+avx512vl | FileCheck %s
+
+declare i16 @llvm.x86.avx512.mask.vpshufbitqmb.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
+define i16 @test_vpshufbitqmb_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
+; CHECK-LABEL: test_vpshufbitqmb_128:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpshufbitqmb %xmm3, %xmm2, %k1
+; CHECK-NEXT:    vpshufbitqmb %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT:    kmovd %k0, %eax
+; CHECK-NEXT:    ## kill: def $ax killed $ax killed $eax
+; CHECK-NEXT:    retq
+  %res = call i16 @llvm.x86.avx512.mask.vpshufbitqmb.128(<16 x i8> %a, <16 x i8> %b, i16 -1)
+  %res2 = call i16 @llvm.x86.avx512.mask.vpshufbitqmb.128(<16 x i8> %c, <16 x i8> %d, i16 %res)
+  ret i16 %res2
+}
+
+declare i32 @llvm.x86.avx512.mask.vpshufbitqmb.256(<32 x i8> %a, <32 x i8> %b, i32 %mask)
+define i32 @test_vpshufbitqmb_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
+; CHECK-LABEL: test_vpshufbitqmb_256:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpshufbitqmb %ymm3, %ymm2, %k1
+; CHECK-NEXT:    vpshufbitqmb %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT:    kmovd %k0, %eax
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %res = call i32 @llvm.x86.avx512.mask.vpshufbitqmb.256(<32 x i8> %a, <32 x i8> %b, i32 -1)
+  %res2 = call i32 @llvm.x86.avx512.mask.vpshufbitqmb.256(<32 x i8> %c, <32 x i8> %d, i32 %res)
+  ret i32 %res2
+}
+
+declare i64 @llvm.x86.avx512.mask.vpshufbitqmb.512(<64 x i8> %a, <64 x i8> %b, i64 %mask)
+define i64 @test_vpshufbitqmb_512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
+; CHECK-LABEL: test_vpshufbitqmb_512:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vpshufbitqmb %zmm3, %zmm2, %k1
+; CHECK-NEXT:    vpshufbitqmb %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT:    kmovq %k0, %rax
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %res = call i64 @llvm.x86.avx512.mask.vpshufbitqmb.512(<64 x i8> %a, <64 x i8> %b, i64 -1)
+  %res2 = call i64 @llvm.x86.avx512.mask.vpshufbitqmb.512(<64 x i8> %c, <64 x i8> %d, i64 %res)
+  ret i64 %res2
+}
diff --git a/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll b/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
index a80ed2f..f83f2df 100644
--- a/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
+++ b/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
@@ -1,41 +1,51 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512bitalg,+avx512vl | FileCheck %s
 
-declare i16 @llvm.x86.avx512.mask.vpshufbitqmb.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
-define i16 @test_vpshufbitqmb_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
+define i16 @test_vpshufbitqmb_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
 ; CHECK-LABEL: test_vpshufbitqmb_128:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vpshufbitqmb %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT:    vpshufbitqmb %xmm1, %xmm0, %k1
+; CHECK-NEXT:    vpshufbitqmb %xmm3, %xmm2, %k0 {%k1}
 ; CHECK-NEXT:    kmovd %k0, %eax
 ; CHECK-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; CHECK-NEXT:    retq
-  %res = call i16 @llvm.x86.avx512.mask.vpshufbitqmb.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
-  ret i16 %res
+  %tmp = call <16 x i1> @llvm.x86.avx512.vpshufbitqmb.128(<16 x i8> %a, <16 x i8> %b)
+  %tmp1 = call <16 x i1> @llvm.x86.avx512.vpshufbitqmb.128(<16 x i8> %c, <16 x i8> %d)
+  %tmp2 = and <16 x i1> %tmp, %tmp1
+  %tmp3 = bitcast <16 x i1> %tmp2 to i16
+  ret i16 %tmp3
 }
 
-declare i32 @llvm.x86.avx512.mask.vpshufbitqmb.256(<32 x i8> %a, <32 x i8> %b, i32 %mask)
-define i32 @test_vpshufbitqmb_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
+define i32 @test_vpshufbitqmb_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
 ; CHECK-LABEL: test_vpshufbitqmb_256:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vpshufbitqmb %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT:    vpshufbitqmb %ymm1, %ymm0, %k1
+; CHECK-NEXT:    vpshufbitqmb %ymm3, %ymm2, %k0 {%k1}
 ; CHECK-NEXT:    kmovd %k0, %eax
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %res = call i32 @llvm.x86.avx512.mask.vpshufbitqmb.256(<32 x i8> %a, <32 x i8> %b, i32 %mask)
-  ret i32 %res
+  %tmp = call <32 x i1> @llvm.x86.avx512.vpshufbitqmb.256(<32 x i8> %a, <32 x i8> %b)
+  %tmp1 = call <32 x i1> @llvm.x86.avx512.vpshufbitqmb.256(<32 x i8> %c, <32 x i8> %d)
+  %tmp2 = and <32 x i1> %tmp, %tmp1
+  %tmp3 = bitcast <32 x i1> %tmp2 to i32
+  ret i32 %tmp3
 }
 
-declare i64 @llvm.x86.avx512.mask.vpshufbitqmb.512(<64 x i8> %a, <64 x i8> %b, i64 %mask)
-define i64 @test_vpshufbitqmb_512(<64 x i8> %a, <64 x i8> %b, i64 %mask) {
+define i64 @test_vpshufbitqmb_512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
 ; CHECK-LABEL: test_vpshufbitqmb_512:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovq %rdi, %k1
-; CHECK-NEXT:    vpshufbitqmb %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT:    vpshufbitqmb %zmm1, %zmm0, %k1
+; CHECK-NEXT:    vpshufbitqmb %zmm3, %zmm2, %k0 {%k1}
 ; CHECK-NEXT:    kmovq %k0, %rax
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %res = call i64 @llvm.x86.avx512.mask.vpshufbitqmb.512(<64 x i8> %a, <64 x i8> %b, i64 %mask)
-  ret i64 %res
+  %tmp = call <64 x i1> @llvm.x86.avx512.vpshufbitqmb.512(<64 x i8> %a, <64 x i8> %b)
+  %tmp1 = call <64 x i1> @llvm.x86.avx512.vpshufbitqmb.512(<64 x i8> %c, <64 x i8> %d)
+  %tmp2 = and <64 x i1> %tmp, %tmp1
+  %tmp3 = bitcast <64 x i1> %tmp2 to i64
+  ret i64 %tmp3
 }
+
+declare <16 x i1> @llvm.x86.avx512.vpshufbitqmb.128(<16 x i8>, <16 x i8>)
+declare <32 x i1> @llvm.x86.avx512.vpshufbitqmb.256(<32 x i8>, <32 x i8>)
+declare <64 x i1> @llvm.x86.avx512.vpshufbitqmb.512(<64 x i8>, <64 x i8>)
diff --git a/test/CodeGen/X86/vsel-cmp-load.ll b/test/CodeGen/X86/vsel-cmp-load.ll
index d317377..cbbbd63 100644
--- a/test/CodeGen/X86/vsel-cmp-load.ll
+++ b/test/CodeGen/X86/vsel-cmp-load.ll
@@ -11,10 +11,9 @@
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX1-NEXT:    vpslld $24, %xmm3, %xmm3
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX1-NEXT:    vpslld $24, %xmm2, %xmm2
+; AVX1-NEXT:    vpmovsxwd %xmm2, %xmm3
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; AVX1-NEXT:    vpmovsxwd %xmm2, %xmm2
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
 ; AVX1-NEXT:    vblendvps %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX1-NEXT:    retq
@@ -181,10 +180,9 @@
 ; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX1-NEXT:    vpslld $24, %xmm3, %xmm3
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX1-NEXT:    vpslld $24, %xmm2, %xmm2
+; AVX1-NEXT:    vpmovsxwd %xmm2, %xmm3
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; AVX1-NEXT:    vpmovsxwd %xmm2, %xmm2
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
 ; AVX1-NEXT:    vblendvps %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX1-NEXT:    retq
@@ -215,12 +213,11 @@
 ; AVX1-LABEL: sgt_zero_fp_select:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovsxbd (%rdi), %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vpmovsxdq %xmm2, %xmm3
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
 ; AVX1-NEXT:    vpmovsxdq %xmm2, %xmm2
-; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
 ; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX1-NEXT:    retq
diff --git a/test/CodeGen/X86/win32-seh-catchpad.ll b/test/CodeGen/X86/win32-seh-catchpad.ll
index 995a4b9..f601953 100644
--- a/test/CodeGen/X86/win32-seh-catchpad.ll
+++ b/test/CodeGen/X86/win32-seh-catchpad.ll
@@ -53,7 +53,7 @@
 define internal i32 @try_except_filter_catchall() #0 {
 entry:
   %0 = call i8* @llvm.frameaddress(i32 1)
-  %1 = call i8* @llvm.x86.seh.recoverfp(i8* bitcast (void ()* @try_except to i8*), i8* %0)
+  %1 = call i8* @llvm.eh.recoverfp(i8* bitcast (void ()* @try_except to i8*), i8* %0)
   %2 = call i8* @llvm.localrecover(i8* bitcast (void ()* @try_except to i8*), i8* %1, i32 0)
   %__exception_code = bitcast i8* %2 to i32*
   %3 = getelementptr inbounds i8, i8* %0, i32 -20
@@ -169,7 +169,7 @@
 define internal i32 @nested_exceptions_filter_catchall() #0 {
 entry:
   %0 = call i8* @llvm.frameaddress(i32 1)
-  %1 = call i8* @llvm.x86.seh.recoverfp(i8* bitcast (void ()* @nested_exceptions to i8*), i8* %0)
+  %1 = call i8* @llvm.eh.recoverfp(i8* bitcast (void ()* @nested_exceptions to i8*), i8* %0)
   %2 = call i8* @llvm.localrecover(i8* bitcast (void ()* @nested_exceptions to i8*), i8* %1, i32 0)
   %__exception_code3 = bitcast i8* %2 to i32*
   %3 = getelementptr inbounds i8, i8* %0, i32 -20
@@ -213,7 +213,7 @@
 declare i8* @llvm.frameaddress(i32) #1
 
 ; Function Attrs: nounwind readnone
-declare i8* @llvm.x86.seh.recoverfp(i8*, i8*) #1
+declare i8* @llvm.eh.recoverfp(i8*, i8*) #1
 
 ; Function Attrs: nounwind readnone
 declare i8* @llvm.localrecover(i8*, i8*, i32) #1
diff --git a/test/CodeGen/X86/xaluo.ll b/test/CodeGen/X86/xaluo.ll
index 25aa45e..c76fa7e 100644
--- a/test/CodeGen/X86/xaluo.ll
+++ b/test/CodeGen/X86/xaluo.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=x86_64-darwin-unknown < %s | FileCheck %s --check-prefix=SDAG
 ; RUN: llc -mtriple=x86_64-darwin-unknown -fast-isel -fast-isel-abort=1 < %s | FileCheck %s --check-prefix=FAST
-; RUN: llc -mtriple=x86_64-darwin-unknown -mcpu=knl < %s | FileCheck %s --check-prefix=KNL
+; RUN: llc -mtriple=x86_64-darwin-unknown -mcpu=knl < %s | FileCheck %s --check-prefix=SDAG
 
 ;
 ; Get the actual value of the overflow bit.
@@ -23,13 +23,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoi8:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addb %sil, %dil
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movb %dil, (%rdx)
-; KNL-NEXT:    retq
   %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v1, i8 %v2)
   %val = extractvalue {i8, i1} %t, 0
   %obit = extractvalue {i8, i1} %t, 1
@@ -53,13 +46,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoi16:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addw %si, %di
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movw %di, (%rdx)
-; KNL-NEXT:    retq
   %t = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %v1, i16 %v2)
   %val = extractvalue {i16, i1} %t, 0
   %obit = extractvalue {i16, i1} %t, 1
@@ -83,13 +69,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoi32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addl %esi, %edi
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movl %edi, (%rdx)
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -113,13 +92,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoi64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addq %rsi, %rdi
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movq %rdi, (%rdx)
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -144,13 +116,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoinci8:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    incb %dil
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movb %dil, (%rsi)
-; KNL-NEXT:    retq
   %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v1, i8 1)
   %val = extractvalue {i8, i1} %t, 0
   %obit = extractvalue {i8, i1} %t, 1
@@ -174,13 +139,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoinci16:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    incw %di
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movw %di, (%rsi)
-; KNL-NEXT:    retq
   %t = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %v1, i16 1)
   %val = extractvalue {i16, i1} %t, 0
   %obit = extractvalue {i16, i1} %t, 1
@@ -204,13 +162,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoinci32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    incl %edi
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movl %edi, (%rsi)
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 1)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -234,13 +185,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoinci64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    incq %rdi
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movq %rdi, (%rsi)
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 1)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -267,14 +211,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoi64imm1:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movl $2, %ecx
-; KNL-NEXT:    addq %rdi, %rcx
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movq %rcx, (%rsi)
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 2, i64 %v1)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -299,13 +235,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoi64imm2:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addq $-2147483648, %rdi ## imm = 0x80000000
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movq %rdi, (%rsi)
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -2147483648)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -331,14 +260,6 @@
 ; FAST-NEXT:    andb $1, %cl
 ; FAST-NEXT:    movzbl %cl, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoi64imm3:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movabsq $-21474836489, %rcx ## imm = 0xFFFFFFFAFFFFFFF7
-; KNL-NEXT:    addq %rdi, %rcx
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movq %rcx, (%rsi)
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -21474836489)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -362,13 +283,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoi64imm4:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addq $2147483647, %rdi ## imm = 0x7FFFFFFF
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movq %rdi, (%rsi)
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 2147483647)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -394,14 +308,6 @@
 ; FAST-NEXT:    andb $1, %cl
 ; FAST-NEXT:    movzbl %cl, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoi64imm5:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movl $2147483648, %ecx ## imm = 0x80000000
-; KNL-NEXT:    addq %rdi, %rcx
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movq %rcx, (%rsi)
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 2147483648)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -426,13 +332,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: uaddoi32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addl %esi, %edi
-; KNL-NEXT:    setb %al
-; KNL-NEXT:    movl %edi, (%rdx)
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -456,13 +355,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: uaddoi64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addq %rsi, %rdi
-; KNL-NEXT:    setb %al
-; KNL-NEXT:    movq %rdi, (%rdx)
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -487,13 +379,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: uaddoinci8:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addb $1, %dil
-; KNL-NEXT:    setb %al
-; KNL-NEXT:    movb %dil, (%rsi)
-; KNL-NEXT:    retq
   %t = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %v1, i8 1)
   %val = extractvalue {i8, i1} %t, 0
   %obit = extractvalue {i8, i1} %t, 1
@@ -517,13 +402,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: uaddoinci16:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addw $1, %di
-; KNL-NEXT:    setb %al
-; KNL-NEXT:    movw %di, (%rsi)
-; KNL-NEXT:    retq
   %t = call {i16, i1} @llvm.uadd.with.overflow.i16(i16 %v1, i16 1)
   %val = extractvalue {i16, i1} %t, 0
   %obit = extractvalue {i16, i1} %t, 1
@@ -547,13 +425,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: uaddoinci32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addl $1, %edi
-; KNL-NEXT:    setb %al
-; KNL-NEXT:    movl %edi, (%rsi)
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 1)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -577,13 +448,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: uaddoinci64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addq $1, %rdi
-; KNL-NEXT:    setb %al
-; KNL-NEXT:    movq %rdi, (%rsi)
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 1)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -608,13 +472,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: ssuboi32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    subl %esi, %edi
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movl %edi, (%rdx)
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -638,13 +495,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: ssuboi64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    subq %rsi, %rdi
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movq %rdi, (%rdx)
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -669,13 +519,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: usuboi32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    subl %esi, %edi
-; KNL-NEXT:    setb %al
-; KNL-NEXT:    movl %edi, (%rdx)
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -699,13 +542,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: usuboi64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    subq %rsi, %rdi
-; KNL-NEXT:    setb %al
-; KNL-NEXT:    movq %rdi, (%rdx)
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -732,14 +568,6 @@
 ; FAST-NEXT:    addl %esi, %ecx
 ; FAST-NEXT:    cmovol %edi, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoselecti32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movl %esi, %eax
-; KNL-NEXT:    movl %edi, %ecx
-; KNL-NEXT:    addl %esi, %ecx
-; KNL-NEXT:    cmovol %edi, %eax
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
   %obit = extractvalue {i32, i1} %t, 1
   %ret = select i1 %obit, i32 %v1, i32 %v2
@@ -762,14 +590,6 @@
 ; FAST-NEXT:    addq %rsi, %rcx
 ; FAST-NEXT:    cmovoq %rdi, %rax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddoselecti64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movq %rsi, %rax
-; KNL-NEXT:    movq %rdi, %rcx
-; KNL-NEXT:    addq %rsi, %rcx
-; KNL-NEXT:    cmovoq %rdi, %rax
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
   %obit = extractvalue {i64, i1} %t, 1
   %ret = select i1 %obit, i64 %v1, i64 %v2
@@ -792,14 +612,6 @@
 ; FAST-NEXT:    addl %esi, %ecx
 ; FAST-NEXT:    cmovbl %edi, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: uaddoselecti32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movl %esi, %eax
-; KNL-NEXT:    movl %edi, %ecx
-; KNL-NEXT:    addl %esi, %ecx
-; KNL-NEXT:    cmovbl %edi, %eax
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
   %obit = extractvalue {i32, i1} %t, 1
   %ret = select i1 %obit, i32 %v1, i32 %v2
@@ -822,14 +634,6 @@
 ; FAST-NEXT:    addq %rsi, %rcx
 ; FAST-NEXT:    cmovbq %rdi, %rax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: uaddoselecti64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movq %rsi, %rax
-; KNL-NEXT:    movq %rdi, %rcx
-; KNL-NEXT:    addq %rsi, %rcx
-; KNL-NEXT:    cmovbq %rdi, %rax
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
   %obit = extractvalue {i64, i1} %t, 1
   %ret = select i1 %obit, i64 %v1, i64 %v2
@@ -850,13 +654,6 @@
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    cmovol %edi, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: ssuboselecti32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movl %esi, %eax
-; KNL-NEXT:    cmpl %esi, %edi
-; KNL-NEXT:    cmovol %edi, %eax
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
   %obit = extractvalue {i32, i1} %t, 1
   %ret = select i1 %obit, i32 %v1, i32 %v2
@@ -877,13 +674,6 @@
 ; FAST-NEXT:    cmpq %rsi, %rdi
 ; FAST-NEXT:    cmovoq %rdi, %rax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: ssuboselecti64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movq %rsi, %rax
-; KNL-NEXT:    cmpq %rsi, %rdi
-; KNL-NEXT:    cmovoq %rdi, %rax
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
   %obit = extractvalue {i64, i1} %t, 1
   %ret = select i1 %obit, i64 %v1, i64 %v2
@@ -904,13 +694,6 @@
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    cmovbl %edi, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: usuboselecti32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movl %esi, %eax
-; KNL-NEXT:    cmpl %esi, %edi
-; KNL-NEXT:    cmovbl %edi, %eax
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
   %obit = extractvalue {i32, i1} %t, 1
   %ret = select i1 %obit, i32 %v1, i32 %v2
@@ -931,13 +714,6 @@
 ; FAST-NEXT:    cmpq %rsi, %rdi
 ; FAST-NEXT:    cmovbq %rdi, %rax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: usuboselecti64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movq %rsi, %rax
-; KNL-NEXT:    cmpq %rsi, %rdi
-; KNL-NEXT:    cmovbq %rdi, %rax
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
   %obit = extractvalue {i64, i1} %t, 1
   %ret = select i1 %obit, i64 %v1, i64 %v2
@@ -973,17 +749,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddobri32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addl %esi, %edi
-; KNL-NEXT:    jo LBB31_1
-; KNL-NEXT:  ## %bb.2: ## %continue
-; KNL-NEXT:    movb $1, %al
-; KNL-NEXT:    retq
-; KNL-NEXT:  LBB31_1: ## %overflow
-; KNL-NEXT:    xorl %eax, %eax
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -1022,17 +787,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: saddobri64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addq %rsi, %rdi
-; KNL-NEXT:    jo LBB32_1
-; KNL-NEXT:  ## %bb.2: ## %continue
-; KNL-NEXT:    movb $1, %al
-; KNL-NEXT:    retq
-; KNL-NEXT:  LBB32_1: ## %overflow
-; KNL-NEXT:    xorl %eax, %eax
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -1071,17 +825,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: uaddobri32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addl %esi, %edi
-; KNL-NEXT:    jb LBB33_1
-; KNL-NEXT:  ## %bb.2: ## %continue
-; KNL-NEXT:    movb $1, %al
-; KNL-NEXT:    retq
-; KNL-NEXT:  LBB33_1: ## %overflow
-; KNL-NEXT:    xorl %eax, %eax
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -1120,17 +863,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: uaddobri64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    addq %rsi, %rdi
-; KNL-NEXT:    jb LBB34_1
-; KNL-NEXT:  ## %bb.2: ## %continue
-; KNL-NEXT:    movb $1, %al
-; KNL-NEXT:    retq
-; KNL-NEXT:  LBB34_1: ## %overflow
-; KNL-NEXT:    xorl %eax, %eax
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -1169,17 +901,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: ssubobri32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    cmpl %esi, %edi
-; KNL-NEXT:    jo LBB35_1
-; KNL-NEXT:  ## %bb.2: ## %continue
-; KNL-NEXT:    movb $1, %al
-; KNL-NEXT:    retq
-; KNL-NEXT:  LBB35_1: ## %overflow
-; KNL-NEXT:    xorl %eax, %eax
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -1218,17 +939,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: ssubobri64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    cmpq %rsi, %rdi
-; KNL-NEXT:    jo LBB36_1
-; KNL-NEXT:  ## %bb.2: ## %continue
-; KNL-NEXT:    movb $1, %al
-; KNL-NEXT:    retq
-; KNL-NEXT:  LBB36_1: ## %overflow
-; KNL-NEXT:    xorl %eax, %eax
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -1267,17 +977,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: usubobri32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    cmpl %esi, %edi
-; KNL-NEXT:    jb LBB37_1
-; KNL-NEXT:  ## %bb.2: ## %continue
-; KNL-NEXT:    movb $1, %al
-; KNL-NEXT:    retq
-; KNL-NEXT:  LBB37_1: ## %overflow
-; KNL-NEXT:    xorl %eax, %eax
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -1316,17 +1015,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: usubobri64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    cmpq %rsi, %rdi
-; KNL-NEXT:    jb LBB38_1
-; KNL-NEXT:  ## %bb.2: ## %continue
-; KNL-NEXT:    movb $1, %al
-; KNL-NEXT:    retq
-; KNL-NEXT:  LBB38_1: ## %overflow
-; KNL-NEXT:    xorl %eax, %eax
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -1355,14 +1043,6 @@
 ; FAST-NEXT:    addq %rcx, %rax
 ; FAST-NEXT:    xorl %edx, %edx
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: uaddoovf:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movzbl %dil, %ecx
-; KNL-NEXT:    movzbl %sil, %eax
-; KNL-NEXT:    addq %rcx, %rax
-; KNL-NEXT:    xorl %edx, %edx
-; KNL-NEXT:    retq
   %1 = and i64 %a, 255
   %2 = and i64 %b, 255
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %1, i64 %2)
@@ -1383,13 +1063,6 @@
 ; FAST-NEXT:    notq %rax
 ; FAST-NEXT:    xorl %edx, %edx
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: usuboovf:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movq %rsi, %rax
-; KNL-NEXT:    notq %rax
-; KNL-NEXT:    xorl %edx, %edx
-; KNL-NEXT:    retq
   %t0 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %a)
   %v0 = extractvalue {i64, i1} %t0, 0
   %o0 = extractvalue {i64, i1} %t0, 1
@@ -1405,6 +1078,60 @@
   ret {i64, i1} %t
 }
 
+; Make sure we select an INC for both the data use and the flag use.
+define i32 @incovfselectstore(i32 %v1, i32 %v2, i32* %x) {
+; SDAG-LABEL: incovfselectstore:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    movl %esi, %eax
+; SDAG-NEXT:    movl %edi, %ecx
+; SDAG-NEXT:    incl %ecx
+; SDAG-NEXT:    cmovol %edi, %eax
+; SDAG-NEXT:    movl %ecx, (%rdx)
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: incovfselectstore:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    movl %esi, %eax
+; FAST-NEXT:    movl %edi, %ecx
+; FAST-NEXT:    incl %ecx
+; FAST-NEXT:    cmovol %edi, %eax
+; FAST-NEXT:    movl %ecx, (%rdx)
+; FAST-NEXT:    retq
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 1)
+  %obit = extractvalue {i32, i1} %t, 1
+  %ret = select i1 %obit, i32 %v1, i32 %v2
+  %val = extractvalue {i32, i1} %t, 0
+  store i32 %val, i32* %x
+  ret i32 %ret
+}
+
+; Make sure we select a DEC for both the data use and the flag use.
+define i32 @decovfselectstore(i32 %v1, i32 %v2, i32* %x) {
+; SDAG-LABEL: decovfselectstore:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    movl %esi, %eax
+; SDAG-NEXT:    movl %edi, %ecx
+; SDAG-NEXT:    decl %ecx
+; SDAG-NEXT:    cmovol %edi, %eax
+; SDAG-NEXT:    movl %ecx, (%rdx)
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: decovfselectstore:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    movl %esi, %eax
+; FAST-NEXT:    movl %edi, %ecx
+; FAST-NEXT:    decl %ecx
+; FAST-NEXT:    cmovol %edi, %eax
+; FAST-NEXT:    movl %ecx, (%rdx)
+; FAST-NEXT:    retq
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 1)
+  %obit = extractvalue {i32, i1} %t, 1
+  %ret = select i1 %obit, i32 %v1, i32 %v2
+  %val = extractvalue {i32, i1} %t, 0
+  store i32 %val, i32* %x
+  ret i32 %ret
+}
+
 declare {i8,  i1} @llvm.sadd.with.overflow.i8 (i8,  i8 ) nounwind readnone
 declare {i16, i1} @llvm.sadd.with.overflow.i16(i16, i16) nounwind readnone
 declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
diff --git a/test/CodeGen/X86/xmulo.ll b/test/CodeGen/X86/xmulo.ll
index 8d2e81f..86b15cc 100644
--- a/test/CodeGen/X86/xmulo.ll
+++ b/test/CodeGen/X86/xmulo.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-darwin-unknown < %s | FileCheck %s --check-prefix=SDAG
-; RUN: llc -mtriple=x86_64-darwin-unknown -fast-isel -fast-isel-abort=1 < %s | FileCheck %s --check-prefix=FAST
-; RUN: llc -mtriple=x86_64-darwin-unknown -mcpu=knl < %s | FileCheck %s --check-prefix=KNL
+; RUN: llc -disable-peephole -mtriple=x86_64-darwin-unknown < %s | FileCheck %s --check-prefix=SDAG
+; RUN: llc -disable-peephole -mtriple=x86_64-darwin-unknown -fast-isel -fast-isel-abort=1 < %s | FileCheck %s --check-prefix=FAST
+; RUN: llc -disable-peephole -mtriple=x86_64-darwin-unknown -mcpu=knl < %s | FileCheck %s --check-prefix=SDAG --check-prefix=KNL
 
 define {i64, i1} @t1() nounwind {
 ; SDAG-LABEL: t1:
@@ -19,14 +19,6 @@
 ; FAST-NEXT:    mulq %rcx
 ; FAST-NEXT:    seto %dl
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: t1:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movl $8, %ecx
-; KNL-NEXT:    movl $9, %eax
-; KNL-NEXT:    mulq %rcx
-; KNL-NEXT:    seto %dl
-; KNL-NEXT:    retq
   %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 8)
   ret {i64, i1} %1
 }
@@ -47,14 +39,6 @@
 ; FAST-NEXT:    mulq %rcx
 ; FAST-NEXT:    seto %dl
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: t2:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    xorl %ecx, %ecx
-; KNL-NEXT:    movl $9, %eax
-; KNL-NEXT:    mulq %rcx
-; KNL-NEXT:    seto %dl
-; KNL-NEXT:    retq
   %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 0)
   ret {i64, i1} %1
 }
@@ -75,14 +59,6 @@
 ; FAST-NEXT:    mulq %rcx
 ; FAST-NEXT:    seto %dl
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: t3:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movq $-1, %rcx
-; KNL-NEXT:    movl $9, %eax
-; KNL-NEXT:    mulq %rcx
-; KNL-NEXT:    seto %dl
-; KNL-NEXT:    retq
   %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 -1)
   ret {i64, i1} %1
 }
@@ -109,16 +85,6 @@
 ; FAST-NEXT:    andb $1, %cl
 ; FAST-NEXT:    movzbl %cl, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: smuloi8:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movl %edi, %eax
-; KNL-NEXT:    ## kill: def $al killed $al killed $eax
-; KNL-NEXT:    imulb %sil
-; KNL-NEXT:    seto %cl
-; KNL-NEXT:    movb %al, (%rdx)
-; KNL-NEXT:    movl %ecx, %eax
-; KNL-NEXT:    retq
   %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %v1, i8 %v2)
   %val = extractvalue {i8, i1} %t, 0
   %obit = extractvalue {i8, i1} %t, 1
@@ -142,13 +108,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: smuloi16:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    imulw %si, %di
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movw %di, (%rdx)
-; KNL-NEXT:    retq
   %t = call {i16, i1} @llvm.smul.with.overflow.i16(i16 %v1, i16 %v2)
   %val = extractvalue {i16, i1} %t, 0
   %obit = extractvalue {i16, i1} %t, 1
@@ -172,13 +131,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: smuloi32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    imull %esi, %edi
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movl %edi, (%rdx)
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -202,13 +154,6 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: smuloi64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    imulq %rsi, %rdi
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    movq %rdi, (%rdx)
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -238,16 +183,6 @@
 ; FAST-NEXT:    andb $1, %cl
 ; FAST-NEXT:    movzbl %cl, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: umuloi8:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movl %edi, %eax
-; KNL-NEXT:    ## kill: def $al killed $al killed $eax
-; KNL-NEXT:    mulb %sil
-; KNL-NEXT:    seto %cl
-; KNL-NEXT:    movb %al, (%rdx)
-; KNL-NEXT:    movl %ecx, %eax
-; KNL-NEXT:    retq
   %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %v1, i8 %v2)
   %val = extractvalue {i8, i1} %t, 0
   %obit = extractvalue {i8, i1} %t, 1
@@ -278,17 +213,6 @@
 ; FAST-NEXT:    andb $1, %dl
 ; FAST-NEXT:    movzbl %dl, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: umuloi16:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movq %rdx, %rcx
-; KNL-NEXT:    movl %edi, %eax
-; KNL-NEXT:    ## kill: def $ax killed $ax killed $eax
-; KNL-NEXT:    mulw %si
-; KNL-NEXT:    seto %dl
-; KNL-NEXT:    movw %ax, (%rcx)
-; KNL-NEXT:    movl %edx, %eax
-; KNL-NEXT:    retq
   %t = call {i16, i1} @llvm.umul.with.overflow.i16(i16 %v1, i16 %v2)
   %val = extractvalue {i16, i1} %t, 0
   %obit = extractvalue {i16, i1} %t, 1
@@ -317,16 +241,6 @@
 ; FAST-NEXT:    andb $1, %dl
 ; FAST-NEXT:    movzbl %dl, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: umuloi32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movq %rdx, %rcx
-; KNL-NEXT:    movl %edi, %eax
-; KNL-NEXT:    mull %esi
-; KNL-NEXT:    seto %dl
-; KNL-NEXT:    movl %eax, (%rcx)
-; KNL-NEXT:    movl %edx, %eax
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -355,16 +269,6 @@
 ; FAST-NEXT:    andb $1, %dl
 ; FAST-NEXT:    movzbl %dl, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: umuloi64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movq %rdx, %rcx
-; KNL-NEXT:    movq %rdi, %rax
-; KNL-NEXT:    mulq %rsi
-; KNL-NEXT:    seto %dl
-; KNL-NEXT:    movq %rax, (%rcx)
-; KNL-NEXT:    movl %edx, %eax
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -391,14 +295,6 @@
 ; FAST-NEXT:    imull %esi, %ecx
 ; FAST-NEXT:    cmovol %edi, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: smuloselecti32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movl %esi, %eax
-; KNL-NEXT:    movl %edi, %ecx
-; KNL-NEXT:    imull %esi, %ecx
-; KNL-NEXT:    cmovol %edi, %eax
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
   %obit = extractvalue {i32, i1} %t, 1
   %ret = select i1 %obit, i32 %v1, i32 %v2
@@ -421,14 +317,6 @@
 ; FAST-NEXT:    imulq %rsi, %rcx
 ; FAST-NEXT:    cmovoq %rdi, %rax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: smuloselecti64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movq %rsi, %rax
-; KNL-NEXT:    movq %rdi, %rcx
-; KNL-NEXT:    imulq %rsi, %rcx
-; KNL-NEXT:    cmovoq %rdi, %rax
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
   %obit = extractvalue {i64, i1} %t, 1
   %ret = select i1 %obit, i64 %v1, i64 %v2
@@ -451,14 +339,6 @@
 ; FAST-NEXT:    cmovol %edi, %esi
 ; FAST-NEXT:    movl %esi, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: umuloselecti32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movl %edi, %eax
-; KNL-NEXT:    mull %esi
-; KNL-NEXT:    cmovol %edi, %esi
-; KNL-NEXT:    movl %esi, %eax
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
   %obit = extractvalue {i32, i1} %t, 1
   %ret = select i1 %obit, i32 %v1, i32 %v2
@@ -481,14 +361,6 @@
 ; FAST-NEXT:    cmovoq %rdi, %rsi
 ; FAST-NEXT:    movq %rsi, %rax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: umuloselecti64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movq %rdi, %rax
-; KNL-NEXT:    mulq %rsi
-; KNL-NEXT:    cmovoq %rdi, %rsi
-; KNL-NEXT:    movq %rsi, %rax
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
   %obit = extractvalue {i64, i1} %t, 1
   %ret = select i1 %obit, i64 %v1, i64 %v2
@@ -498,10 +370,12 @@
 ;
 ; Check the use of the overflow bit in combination with a branch instruction.
 ;
-define zeroext i1 @smulobri32(i32 %v1, i32 %v2) {
-; SDAG-LABEL: smulobri32:
+define zeroext i1 @smulobri8(i8 %v1, i8 %v2) {
+; SDAG-LABEL: smulobri8:
 ; SDAG:       ## %bb.0:
-; SDAG-NEXT:    imull %esi, %edi
+; SDAG-NEXT:    movl %edi, %eax
+; SDAG-NEXT:    ## kill: def $al killed $al killed $eax
+; SDAG-NEXT:    imulb %sil
 ; SDAG-NEXT:    jo LBB15_1
 ; SDAG-NEXT:  ## %bb.2: ## %continue
 ; SDAG-NEXT:    movb $1, %al
@@ -510,10 +384,14 @@
 ; SDAG-NEXT:    xorl %eax, %eax
 ; SDAG-NEXT:    retq
 ;
-; FAST-LABEL: smulobri32:
+; FAST-LABEL: smulobri8:
 ; FAST:       ## %bb.0:
-; FAST-NEXT:    imull %esi, %edi
-; FAST-NEXT:    jo LBB15_1
+; FAST-NEXT:    movl %edi, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
+; FAST-NEXT:    imulb %sil
+; FAST-NEXT:    seto %al
+; FAST-NEXT:    testb $1, %al
+; FAST-NEXT:    jne LBB15_1
 ; FAST-NEXT:  ## %bb.2: ## %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
@@ -524,17 +402,84 @@
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
+  %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %v1, i8 %v2)
+  %val = extractvalue {i8, i1} %t, 0
+  %obit = extractvalue {i8, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @smulobri16(i16 %v1, i16 %v2) {
+; SDAG-LABEL: smulobri16:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    imulw %si, %di
+; SDAG-NEXT:    jo LBB16_1
+; SDAG-NEXT:  ## %bb.2: ## %continue
+; SDAG-NEXT:    movb $1, %al
+; SDAG-NEXT:    retq
+; SDAG-NEXT:  LBB16_1: ## %overflow
+; SDAG-NEXT:    xorl %eax, %eax
+; SDAG-NEXT:    retq
 ;
-; KNL-LABEL: smulobri32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    imull %esi, %edi
-; KNL-NEXT:    jo LBB15_1
-; KNL-NEXT:  ## %bb.2: ## %continue
-; KNL-NEXT:    movb $1, %al
-; KNL-NEXT:    retq
-; KNL-NEXT:  LBB15_1: ## %overflow
-; KNL-NEXT:    xorl %eax, %eax
-; KNL-NEXT:    retq
+; FAST-LABEL: smulobri16:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    imulw %si, %di
+; FAST-NEXT:    seto %al
+; FAST-NEXT:    testb $1, %al
+; FAST-NEXT:    jne LBB16_1
+; FAST-NEXT:  ## %bb.2: ## %continue
+; FAST-NEXT:    movb $1, %al
+; FAST-NEXT:    andb $1, %al
+; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    retq
+; FAST-NEXT:  LBB16_1: ## %overflow
+; FAST-NEXT:    xorl %eax, %eax
+; FAST-NEXT:    andb $1, %al
+; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    retq
+  %t = call {i16, i1} @llvm.smul.with.overflow.i16(i16 %v1, i16 %v2)
+  %val = extractvalue {i16, i1} %t, 0
+  %obit = extractvalue {i16, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @smulobri32(i32 %v1, i32 %v2) {
+; SDAG-LABEL: smulobri32:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    imull %esi, %edi
+; SDAG-NEXT:    jo LBB17_1
+; SDAG-NEXT:  ## %bb.2: ## %continue
+; SDAG-NEXT:    movb $1, %al
+; SDAG-NEXT:    retq
+; SDAG-NEXT:  LBB17_1: ## %overflow
+; SDAG-NEXT:    xorl %eax, %eax
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: smulobri32:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    imull %esi, %edi
+; FAST-NEXT:    jo LBB17_1
+; FAST-NEXT:  ## %bb.2: ## %continue
+; FAST-NEXT:    movb $1, %al
+; FAST-NEXT:    andb $1, %al
+; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    retq
+; FAST-NEXT:  LBB17_1: ## %overflow
+; FAST-NEXT:    xorl %eax, %eax
+; FAST-NEXT:    andb $1, %al
+; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    retq
   %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -551,39 +496,28 @@
 ; SDAG-LABEL: smulobri64:
 ; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    imulq %rsi, %rdi
-; SDAG-NEXT:    jo LBB16_1
+; SDAG-NEXT:    jo LBB18_1
 ; SDAG-NEXT:  ## %bb.2: ## %continue
 ; SDAG-NEXT:    movb $1, %al
 ; SDAG-NEXT:    retq
-; SDAG-NEXT:  LBB16_1: ## %overflow
+; SDAG-NEXT:  LBB18_1: ## %overflow
 ; SDAG-NEXT:    xorl %eax, %eax
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: smulobri64:
 ; FAST:       ## %bb.0:
 ; FAST-NEXT:    imulq %rsi, %rdi
-; FAST-NEXT:    jo LBB16_1
+; FAST-NEXT:    jo LBB18_1
 ; FAST-NEXT:  ## %bb.2: ## %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-; FAST-NEXT:  LBB16_1: ## %overflow
+; FAST-NEXT:  LBB18_1: ## %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: smulobri64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    imulq %rsi, %rdi
-; KNL-NEXT:    jo LBB16_1
-; KNL-NEXT:  ## %bb.2: ## %continue
-; KNL-NEXT:    movb $1, %al
-; KNL-NEXT:    retq
-; KNL-NEXT:  LBB16_1: ## %overflow
-; KNL-NEXT:    xorl %eax, %eax
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -596,16 +530,104 @@
   ret i1 true
 }
 
+define zeroext i1 @umulobri8(i8 %v1, i8 %v2) {
+; SDAG-LABEL: umulobri8:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    movl %edi, %eax
+; SDAG-NEXT:    ## kill: def $al killed $al killed $eax
+; SDAG-NEXT:    mulb %sil
+; SDAG-NEXT:    jo LBB19_1
+; SDAG-NEXT:  ## %bb.2: ## %continue
+; SDAG-NEXT:    movb $1, %al
+; SDAG-NEXT:    retq
+; SDAG-NEXT:  LBB19_1: ## %overflow
+; SDAG-NEXT:    xorl %eax, %eax
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: umulobri8:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    movl %edi, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
+; FAST-NEXT:    mulb %sil
+; FAST-NEXT:    seto %al
+; FAST-NEXT:    testb $1, %al
+; FAST-NEXT:    jne LBB19_1
+; FAST-NEXT:  ## %bb.2: ## %continue
+; FAST-NEXT:    movb $1, %al
+; FAST-NEXT:    andb $1, %al
+; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    retq
+; FAST-NEXT:  LBB19_1: ## %overflow
+; FAST-NEXT:    xorl %eax, %eax
+; FAST-NEXT:    andb $1, %al
+; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    retq
+  %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %v1, i8 %v2)
+  %val = extractvalue {i8, i1} %t, 0
+  %obit = extractvalue {i8, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @umulobri16(i16 %v1, i16 %v2) {
+; SDAG-LABEL: umulobri16:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    movl %edi, %eax
+; SDAG-NEXT:    ## kill: def $ax killed $ax killed $eax
+; SDAG-NEXT:    mulw %si
+; SDAG-NEXT:    jo LBB20_1
+; SDAG-NEXT:  ## %bb.2: ## %continue
+; SDAG-NEXT:    movb $1, %al
+; SDAG-NEXT:    retq
+; SDAG-NEXT:  LBB20_1: ## %overflow
+; SDAG-NEXT:    xorl %eax, %eax
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: umulobri16:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    movl %edi, %eax
+; FAST-NEXT:    ## kill: def $ax killed $ax killed $eax
+; FAST-NEXT:    mulw %si
+; FAST-NEXT:    seto %al
+; FAST-NEXT:    testb $1, %al
+; FAST-NEXT:    jne LBB20_1
+; FAST-NEXT:  ## %bb.2: ## %continue
+; FAST-NEXT:    movb $1, %al
+; FAST-NEXT:    andb $1, %al
+; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    retq
+; FAST-NEXT:  LBB20_1: ## %overflow
+; FAST-NEXT:    xorl %eax, %eax
+; FAST-NEXT:    andb $1, %al
+; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    retq
+  %t = call {i16, i1} @llvm.umul.with.overflow.i16(i16 %v1, i16 %v2)
+  %val = extractvalue {i16, i1} %t, 0
+  %obit = extractvalue {i16, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
 define zeroext i1 @umulobri32(i32 %v1, i32 %v2) {
 ; SDAG-LABEL: umulobri32:
 ; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    movl %edi, %eax
 ; SDAG-NEXT:    mull %esi
-; SDAG-NEXT:    jo LBB17_1
+; SDAG-NEXT:    jo LBB21_1
 ; SDAG-NEXT:  ## %bb.2: ## %continue
 ; SDAG-NEXT:    movb $1, %al
 ; SDAG-NEXT:    retq
-; SDAG-NEXT:  LBB17_1: ## %overflow
+; SDAG-NEXT:  LBB21_1: ## %overflow
 ; SDAG-NEXT:    xorl %eax, %eax
 ; SDAG-NEXT:    retq
 ;
@@ -613,29 +635,17 @@
 ; FAST:       ## %bb.0:
 ; FAST-NEXT:    movl %edi, %eax
 ; FAST-NEXT:    mull %esi
-; FAST-NEXT:    jo LBB17_1
+; FAST-NEXT:    jo LBB21_1
 ; FAST-NEXT:  ## %bb.2: ## %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-; FAST-NEXT:  LBB17_1: ## %overflow
+; FAST-NEXT:  LBB21_1: ## %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: umulobri32:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movl %edi, %eax
-; KNL-NEXT:    mull %esi
-; KNL-NEXT:    jo LBB17_1
-; KNL-NEXT:  ## %bb.2: ## %continue
-; KNL-NEXT:    movb $1, %al
-; KNL-NEXT:    retq
-; KNL-NEXT:  LBB17_1: ## %overflow
-; KNL-NEXT:    xorl %eax, %eax
-; KNL-NEXT:    retq
   %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -653,11 +663,11 @@
 ; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    movq %rdi, %rax
 ; SDAG-NEXT:    mulq %rsi
-; SDAG-NEXT:    jo LBB18_1
+; SDAG-NEXT:    jo LBB22_1
 ; SDAG-NEXT:  ## %bb.2: ## %continue
 ; SDAG-NEXT:    movb $1, %al
 ; SDAG-NEXT:    retq
-; SDAG-NEXT:  LBB18_1: ## %overflow
+; SDAG-NEXT:  LBB22_1: ## %overflow
 ; SDAG-NEXT:    xorl %eax, %eax
 ; SDAG-NEXT:    retq
 ;
@@ -665,29 +675,17 @@
 ; FAST:       ## %bb.0:
 ; FAST-NEXT:    movq %rdi, %rax
 ; FAST-NEXT:    mulq %rsi
-; FAST-NEXT:    jo LBB18_1
+; FAST-NEXT:    jo LBB22_1
 ; FAST-NEXT:  ## %bb.2: ## %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-; FAST-NEXT:  LBB18_1: ## %overflow
+; FAST-NEXT:  LBB22_1: ## %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: umulobri64:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movq %rdi, %rax
-; KNL-NEXT:    mulq %rsi
-; KNL-NEXT:    jo LBB18_1
-; KNL-NEXT:  ## %bb.2: ## %continue
-; KNL-NEXT:    movb $1, %al
-; KNL-NEXT:    retq
-; KNL-NEXT:  LBB18_1: ## %overflow
-; KNL-NEXT:    xorl %eax, %eax
-; KNL-NEXT:    retq
   %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -718,21 +716,447 @@
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    orb %sil, %al
 ; FAST-NEXT:    retq
-;
-; KNL-LABEL: bug27873:
-; KNL:       ## %bb.0:
-; KNL-NEXT:    movq %rdi, %rax
-; KNL-NEXT:    movl $160, %ecx
-; KNL-NEXT:    mulq %rcx
-; KNL-NEXT:    seto %al
-; KNL-NEXT:    orb %sil, %al
-; KNL-NEXT:    retq
   %mul = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %c1, i64 160)
   %mul.overflow = extractvalue { i64, i1 } %mul, 1
   %x1 = or i1 %c2, %mul.overflow
   ret i1 %x1
 }
 
+define zeroext i1 @smuloi8_load(i8* %ptr1, i8 %v2, i8* %res) {
+; SDAG-LABEL: smuloi8_load:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    movl %esi, %eax
+; SDAG-NEXT:    ## kill: def $al killed $al killed $eax
+; SDAG-NEXT:    imulb (%rdi)
+; SDAG-NEXT:    seto %cl
+; SDAG-NEXT:    movb %al, (%rdx)
+; SDAG-NEXT:    movl %ecx, %eax
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: smuloi8_load:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    movb (%rdi), %al
+; FAST-NEXT:    imulb %sil
+; FAST-NEXT:    seto %cl
+; FAST-NEXT:    movb %al, (%rdx)
+; FAST-NEXT:    andb $1, %cl
+; FAST-NEXT:    movzbl %cl, %eax
+; FAST-NEXT:    retq
+  %v1 = load i8, i8* %ptr1
+  %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %v1, i8 %v2)
+  %val = extractvalue {i8, i1} %t, 0
+  %obit = extractvalue {i8, i1} %t, 1
+  store i8 %val, i8* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @smuloi8_load2(i8 %v1, i8* %ptr2, i8* %res) {
+; SDAG-LABEL: smuloi8_load2:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    movl %edi, %eax
+; SDAG-NEXT:    ## kill: def $al killed $al killed $eax
+; SDAG-NEXT:    imulb (%rsi)
+; SDAG-NEXT:    seto %cl
+; SDAG-NEXT:    movb %al, (%rdx)
+; SDAG-NEXT:    movl %ecx, %eax
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: smuloi8_load2:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    movl %edi, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
+; FAST-NEXT:    imulb (%rsi)
+; FAST-NEXT:    seto %cl
+; FAST-NEXT:    movb %al, (%rdx)
+; FAST-NEXT:    andb $1, %cl
+; FAST-NEXT:    movzbl %cl, %eax
+; FAST-NEXT:    retq
+  %v2 = load i8, i8* %ptr2
+  %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %v1, i8 %v2)
+  %val = extractvalue {i8, i1} %t, 0
+  %obit = extractvalue {i8, i1} %t, 1
+  store i8 %val, i8* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @smuloi16_load(i16* %ptr1, i16 %v2, i16* %res) {
+; SDAG-LABEL: smuloi16_load:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    imulw (%rdi), %si
+; SDAG-NEXT:    seto %al
+; SDAG-NEXT:    movw %si, (%rdx)
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: smuloi16_load:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    imulw (%rdi), %si
+; FAST-NEXT:    seto %al
+; FAST-NEXT:    movw %si, (%rdx)
+; FAST-NEXT:    andb $1, %al
+; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    retq
+  %v1 = load i16, i16* %ptr1
+  %t = call {i16, i1} @llvm.smul.with.overflow.i16(i16 %v1, i16 %v2)
+  %val = extractvalue {i16, i1} %t, 0
+  %obit = extractvalue {i16, i1} %t, 1
+  store i16 %val, i16* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @smuloi16_load2(i16 %v1, i16* %ptr2, i16* %res) {
+; SDAG-LABEL: smuloi16_load2:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    imulw (%rsi), %di
+; SDAG-NEXT:    seto %al
+; SDAG-NEXT:    movw %di, (%rdx)
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: smuloi16_load2:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    imulw (%rsi), %di
+; FAST-NEXT:    seto %al
+; FAST-NEXT:    movw %di, (%rdx)
+; FAST-NEXT:    andb $1, %al
+; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    retq
+  %v2 = load i16, i16* %ptr2
+  %t = call {i16, i1} @llvm.smul.with.overflow.i16(i16 %v1, i16 %v2)
+  %val = extractvalue {i16, i1} %t, 0
+  %obit = extractvalue {i16, i1} %t, 1
+  store i16 %val, i16* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @smuloi32_load(i32* %ptr1, i32 %v2, i32* %res) {
+; SDAG-LABEL: smuloi32_load:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    imull (%rdi), %esi
+; SDAG-NEXT:    seto %al
+; SDAG-NEXT:    movl %esi, (%rdx)
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: smuloi32_load:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    imull (%rdi), %esi
+; FAST-NEXT:    seto %al
+; FAST-NEXT:    movl %esi, (%rdx)
+; FAST-NEXT:    andb $1, %al
+; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    retq
+  %v1 = load i32, i32* %ptr1
+  %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @smuloi32_load2(i32 %v1, i32* %ptr2, i32* %res) {
+; SDAG-LABEL: smuloi32_load2:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    imull (%rsi), %edi
+; SDAG-NEXT:    seto %al
+; SDAG-NEXT:    movl %edi, (%rdx)
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: smuloi32_load2:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    imull (%rsi), %edi
+; FAST-NEXT:    seto %al
+; FAST-NEXT:    movl %edi, (%rdx)
+; FAST-NEXT:    andb $1, %al
+; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    retq
+  %v2 = load i32, i32* %ptr2
+  %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @smuloi64_load(i64* %ptr1, i64 %v2, i64* %res) {
+; SDAG-LABEL: smuloi64_load:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    imulq (%rdi), %rsi
+; SDAG-NEXT:    seto %al
+; SDAG-NEXT:    movq %rsi, (%rdx)
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: smuloi64_load:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    imulq (%rdi), %rsi
+; FAST-NEXT:    seto %al
+; FAST-NEXT:    movq %rsi, (%rdx)
+; FAST-NEXT:    andb $1, %al
+; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    retq
+  %v1 = load i64, i64* %ptr1
+  %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @smuloi64_load2(i64 %v1, i64* %ptr2, i64* %res) {
+; SDAG-LABEL: smuloi64_load2:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    imulq (%rsi), %rdi
+; SDAG-NEXT:    seto %al
+; SDAG-NEXT:    movq %rdi, (%rdx)
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: smuloi64_load2:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    imulq (%rsi), %rdi
+; FAST-NEXT:    seto %al
+; FAST-NEXT:    movq %rdi, (%rdx)
+; FAST-NEXT:    andb $1, %al
+; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    retq
+  %v2 = load i64, i64* %ptr2
+  %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @umuloi8_load(i8* %ptr1, i8 %v2, i8* %res) {
+; SDAG-LABEL: umuloi8_load:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    movl %esi, %eax
+; SDAG-NEXT:    ## kill: def $al killed $al killed $eax
+; SDAG-NEXT:    mulb (%rdi)
+; SDAG-NEXT:    seto %cl
+; SDAG-NEXT:    movb %al, (%rdx)
+; SDAG-NEXT:    movl %ecx, %eax
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: umuloi8_load:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    movb (%rdi), %al
+; FAST-NEXT:    mulb %sil
+; FAST-NEXT:    seto %cl
+; FAST-NEXT:    movb %al, (%rdx)
+; FAST-NEXT:    andb $1, %cl
+; FAST-NEXT:    movzbl %cl, %eax
+; FAST-NEXT:    retq
+  %v1 = load i8, i8* %ptr1
+  %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %v1, i8 %v2)
+  %val = extractvalue {i8, i1} %t, 0
+  %obit = extractvalue {i8, i1} %t, 1
+  store i8 %val, i8* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @umuloi8_load2(i8 %v1, i8* %ptr2, i8* %res) {
+; SDAG-LABEL: umuloi8_load2:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    movl %edi, %eax
+; SDAG-NEXT:    ## kill: def $al killed $al killed $eax
+; SDAG-NEXT:    mulb (%rsi)
+; SDAG-NEXT:    seto %cl
+; SDAG-NEXT:    movb %al, (%rdx)
+; SDAG-NEXT:    movl %ecx, %eax
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: umuloi8_load2:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    movl %edi, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
+; FAST-NEXT:    mulb (%rsi)
+; FAST-NEXT:    seto %cl
+; FAST-NEXT:    movb %al, (%rdx)
+; FAST-NEXT:    andb $1, %cl
+; FAST-NEXT:    movzbl %cl, %eax
+; FAST-NEXT:    retq
+  %v2 = load i8, i8* %ptr2
+  %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %v1, i8 %v2)
+  %val = extractvalue {i8, i1} %t, 0
+  %obit = extractvalue {i8, i1} %t, 1
+  store i8 %val, i8* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @umuloi16_load(i16* %ptr1, i16 %v2, i16* %res) {
+; SDAG-LABEL: umuloi16_load:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    movq %rdx, %rcx
+; SDAG-NEXT:    movl %esi, %eax
+; SDAG-NEXT:    ## kill: def $ax killed $ax killed $eax
+; SDAG-NEXT:    mulw (%rdi)
+; SDAG-NEXT:    seto %dl
+; SDAG-NEXT:    movw %ax, (%rcx)
+; SDAG-NEXT:    movl %edx, %eax
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: umuloi16_load:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    movq %rdx, %rcx
+; FAST-NEXT:    movzwl (%rdi), %eax
+; FAST-NEXT:    mulw %si
+; FAST-NEXT:    seto %dl
+; FAST-NEXT:    movw %ax, (%rcx)
+; FAST-NEXT:    andb $1, %dl
+; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    retq
+  %v1 = load i16, i16* %ptr1
+  %t = call {i16, i1} @llvm.umul.with.overflow.i16(i16 %v1, i16 %v2)
+  %val = extractvalue {i16, i1} %t, 0
+  %obit = extractvalue {i16, i1} %t, 1
+  store i16 %val, i16* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @umuloi16_load2(i16 %v1, i16* %ptr2, i16* %res) {
+; SDAG-LABEL: umuloi16_load2:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    movq %rdx, %rcx
+; SDAG-NEXT:    movl %edi, %eax
+; SDAG-NEXT:    ## kill: def $ax killed $ax killed $eax
+; SDAG-NEXT:    mulw (%rsi)
+; SDAG-NEXT:    seto %dl
+; SDAG-NEXT:    movw %ax, (%rcx)
+; SDAG-NEXT:    movl %edx, %eax
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: umuloi16_load2:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    movq %rdx, %rcx
+; FAST-NEXT:    movl %edi, %eax
+; FAST-NEXT:    ## kill: def $ax killed $ax killed $eax
+; FAST-NEXT:    mulw (%rsi)
+; FAST-NEXT:    seto %dl
+; FAST-NEXT:    movw %ax, (%rcx)
+; FAST-NEXT:    andb $1, %dl
+; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    retq
+  %v2 = load i16, i16* %ptr2
+  %t = call {i16, i1} @llvm.umul.with.overflow.i16(i16 %v1, i16 %v2)
+  %val = extractvalue {i16, i1} %t, 0
+  %obit = extractvalue {i16, i1} %t, 1
+  store i16 %val, i16* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @umuloi32_load(i32* %ptr1, i32 %v2, i32* %res) {
+; SDAG-LABEL: umuloi32_load:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    movq %rdx, %rcx
+; SDAG-NEXT:    movl %esi, %eax
+; SDAG-NEXT:    mull (%rdi)
+; SDAG-NEXT:    seto %dl
+; SDAG-NEXT:    movl %eax, (%rcx)
+; SDAG-NEXT:    movl %edx, %eax
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: umuloi32_load:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    movq %rdx, %rcx
+; FAST-NEXT:    movl (%rdi), %eax
+; FAST-NEXT:    mull %esi
+; FAST-NEXT:    seto %dl
+; FAST-NEXT:    movl %eax, (%rcx)
+; FAST-NEXT:    andb $1, %dl
+; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    retq
+  %v1 = load i32, i32* %ptr1
+  %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @umuloi32_load2(i32 %v1, i32* %ptr2, i32* %res) {
+; SDAG-LABEL: umuloi32_load2:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    movq %rdx, %rcx
+; SDAG-NEXT:    movl %edi, %eax
+; SDAG-NEXT:    mull (%rsi)
+; SDAG-NEXT:    seto %dl
+; SDAG-NEXT:    movl %eax, (%rcx)
+; SDAG-NEXT:    movl %edx, %eax
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: umuloi32_load2:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    movq %rdx, %rcx
+; FAST-NEXT:    movl %edi, %eax
+; FAST-NEXT:    mull (%rsi)
+; FAST-NEXT:    seto %dl
+; FAST-NEXT:    movl %eax, (%rcx)
+; FAST-NEXT:    andb $1, %dl
+; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    retq
+  %v2 = load i32, i32* %ptr2
+  %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @umuloi64_load(i64* %ptr1, i64 %v2, i64* %res) {
+; SDAG-LABEL: umuloi64_load:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    movq %rdx, %rcx
+; SDAG-NEXT:    movq %rsi, %rax
+; SDAG-NEXT:    mulq (%rdi)
+; SDAG-NEXT:    seto %dl
+; SDAG-NEXT:    movq %rax, (%rcx)
+; SDAG-NEXT:    movl %edx, %eax
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: umuloi64_load:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    movq %rdx, %rcx
+; FAST-NEXT:    movq (%rdi), %rax
+; FAST-NEXT:    mulq %rsi
+; FAST-NEXT:    seto %dl
+; FAST-NEXT:    movq %rax, (%rcx)
+; FAST-NEXT:    andb $1, %dl
+; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    retq
+  %v1 = load i64, i64* %ptr1
+  %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @umuloi64_load2(i64 %v1, i64* %ptr2, i64* %res) {
+; SDAG-LABEL: umuloi64_load2:
+; SDAG:       ## %bb.0:
+; SDAG-NEXT:    movq %rdx, %rcx
+; SDAG-NEXT:    movq %rdi, %rax
+; SDAG-NEXT:    mulq (%rsi)
+; SDAG-NEXT:    seto %dl
+; SDAG-NEXT:    movq %rax, (%rcx)
+; SDAG-NEXT:    movl %edx, %eax
+; SDAG-NEXT:    retq
+;
+; FAST-LABEL: umuloi64_load2:
+; FAST:       ## %bb.0:
+; FAST-NEXT:    movq %rdx, %rcx
+; FAST-NEXT:    movq %rdi, %rax
+; FAST-NEXT:    mulq (%rsi)
+; FAST-NEXT:    seto %dl
+; FAST-NEXT:    movq %rax, (%rcx)
+; FAST-NEXT:    andb $1, %dl
+; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    retq
+  %v2 = load i64, i64* %ptr2
+  %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64* %res
+  ret i1 %obit
+}
+
 declare {i8,  i1} @llvm.smul.with.overflow.i8 (i8,  i8 ) nounwind readnone
 declare {i16, i1} @llvm.smul.with.overflow.i16(i16, i16) nounwind readnone
 declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
diff --git a/test/CodeGen/X86/xop-intrinsics-fast-isel.ll b/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
index 911ab94..43f5970 100644
--- a/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
@@ -5,15 +5,10 @@
 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/xop-builtins.c
 
 define <2 x i64> @test_mm_maccs_epi16(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
-; X32-LABEL: test_mm_maccs_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmacssww %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_maccs_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmacssww %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_maccs_epi16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpmacssww %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %arg2 = bitcast <2 x i64> %a2 to <8 x i16>
@@ -24,15 +19,10 @@
 declare <8 x i16> @llvm.x86.xop.vpmacssww(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_macc_epi16(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
-; X32-LABEL: test_mm_macc_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmacsww %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_macc_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmacsww %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_macc_epi16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpmacsww %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %arg2 = bitcast <2 x i64> %a2 to <8 x i16>
@@ -43,15 +33,10 @@
 declare <8 x i16> @llvm.x86.xop.vpmacsww(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_maccsd_epi16(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
-; X32-LABEL: test_mm_maccsd_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmacsswd %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_maccsd_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmacsswd %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_maccsd_epi16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpmacsswd %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %arg2 = bitcast <2 x i64> %a2 to <4 x i32>
@@ -62,15 +47,10 @@
 declare <4 x i32> @llvm.x86.xop.vpmacsswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_maccd_epi16(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
-; X32-LABEL: test_mm_maccd_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmacswd %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_maccd_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmacswd %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_maccd_epi16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpmacswd %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %arg2 = bitcast <2 x i64> %a2 to <4 x i32>
@@ -81,15 +61,10 @@
 declare <4 x i32> @llvm.x86.xop.vpmacswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_maccs_epi32(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
-; X32-LABEL: test_mm_maccs_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmacssdd %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_maccs_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmacssdd %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_maccs_epi32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpmacssdd %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %arg2 = bitcast <2 x i64> %a2 to <4 x i32>
@@ -100,15 +75,10 @@
 declare <4 x i32> @llvm.x86.xop.vpmacssdd(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_macc_epi32(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
-; X32-LABEL: test_mm_macc_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmacsdd %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_macc_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmacsdd %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_macc_epi32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpmacsdd %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %arg2 = bitcast <2 x i64> %a2 to <4 x i32>
@@ -119,15 +89,10 @@
 declare <4 x i32> @llvm.x86.xop.vpmacsdd(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_maccslo_epi32(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
-; X32-LABEL: test_mm_maccslo_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmacssdql %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_maccslo_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmacssdql %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_maccslo_epi32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpmacssdql %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = call <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32> %arg0, <4 x i32> %arg1, <2 x i64> %a2)
@@ -136,15 +101,10 @@
 declare <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
 
 define <2 x i64> @test_mm_macclo_epi32(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
-; X32-LABEL: test_mm_macclo_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmacsdql %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_macclo_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmacsdql %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_macclo_epi32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpmacsdql %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = call <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32> %arg0, <4 x i32> %arg1, <2 x i64> %a2)
@@ -153,15 +113,10 @@
 declare <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
 
 define <2 x i64> @test_mm_maccshi_epi32(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
-; X32-LABEL: test_mm_maccshi_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmacssdqh %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_maccshi_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmacssdqh %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_maccshi_epi32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpmacssdqh %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = call <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32> %arg0, <4 x i32> %arg1, <2 x i64> %a2)
@@ -170,15 +125,10 @@
 declare <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
 
 define <2 x i64> @test_mm_macchi_epi32(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
-; X32-LABEL: test_mm_macchi_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmacsdqh %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_macchi_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmacsdqh %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_macchi_epi32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpmacsdqh %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = call <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32> %arg0, <4 x i32> %arg1, <2 x i64> %a2)
@@ -187,15 +137,10 @@
 declare <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
 
 define <2 x i64> @test_mm_maddsd_epi16(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
-; X32-LABEL: test_mm_maddsd_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmadcsswd %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_maddsd_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmadcsswd %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_maddsd_epi16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpmadcsswd %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %arg2 = bitcast <2 x i64> %a2 to <4 x i32>
@@ -206,15 +151,10 @@
 declare <4 x i32> @llvm.x86.xop.vpmadcsswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_maddd_epi16(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
-; X32-LABEL: test_mm_maddd_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmadcswd %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_maddd_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmadcswd %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_maddd_epi16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpmadcswd %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %arg2 = bitcast <2 x i64> %a2 to <4 x i32>
@@ -225,15 +165,10 @@
 declare <4 x i32> @llvm.x86.xop.vpmadcswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_haddw_epi8(<2 x i64> %a0) {
-; X32-LABEL: test_mm_haddw_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    vphaddbw %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_haddw_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    vphaddbw %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_haddw_epi8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphaddbw %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %res = call <8 x i16> @llvm.x86.xop.vphaddbw(<16 x i8> %arg0)
   %bc = bitcast <8 x i16> %res to <2 x i64>
@@ -242,15 +177,10 @@
 declare <8 x i16> @llvm.x86.xop.vphaddbw(<16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_haddd_epi8(<2 x i64> %a0) {
-; X32-LABEL: test_mm_haddd_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    vphaddbd %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_haddd_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    vphaddbd %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_haddd_epi8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphaddbd %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %res = call <4 x i32> @llvm.x86.xop.vphaddbd(<16 x i8> %arg0)
   %bc = bitcast <4 x i32> %res to <2 x i64>
@@ -259,15 +189,10 @@
 declare <4 x i32> @llvm.x86.xop.vphaddbd(<16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_haddq_epi8(<2 x i64> %a0) {
-; X32-LABEL: test_mm_haddq_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    vphaddbq %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_haddq_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    vphaddbq %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_haddq_epi8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphaddbq %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %res = call <2 x i64> @llvm.x86.xop.vphaddbq(<16 x i8> %arg0)
   ret <2 x i64> %res
@@ -275,15 +200,10 @@
 declare <2 x i64> @llvm.x86.xop.vphaddbq(<16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_haddd_epi16(<2 x i64> %a0) {
-; X32-LABEL: test_mm_haddd_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    vphaddwd %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_haddd_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    vphaddwd %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_haddd_epi16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphaddwd %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %res = call <4 x i32> @llvm.x86.xop.vphaddwd(<8 x i16> %arg0)
   %bc = bitcast <4 x i32> %res to <2 x i64>
@@ -292,15 +212,10 @@
 declare <4 x i32> @llvm.x86.xop.vphaddwd(<8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_haddq_epi16(<2 x i64> %a0) {
-; X32-LABEL: test_mm_haddq_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    vphaddwq %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_haddq_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    vphaddwq %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_haddq_epi16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphaddwq %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %res = call <2 x i64> @llvm.x86.xop.vphaddwq(<8 x i16> %arg0)
   ret <2 x i64> %res
@@ -308,15 +223,10 @@
 declare <2 x i64> @llvm.x86.xop.vphaddwq(<8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_haddq_epi32(<2 x i64> %a0) {
-; X32-LABEL: test_mm_haddq_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    vphadddq %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_haddq_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    vphadddq %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_haddq_epi32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphadddq %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %res = call <2 x i64> @llvm.x86.xop.vphadddq(<4 x i32> %arg0)
   ret <2 x i64> %res
@@ -324,15 +234,10 @@
 declare <2 x i64> @llvm.x86.xop.vphadddq(<4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_haddw_epu8(<2 x i64> %a0) {
-; X32-LABEL: test_mm_haddw_epu8:
-; X32:       # %bb.0:
-; X32-NEXT:    vphaddubw %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_haddw_epu8:
-; X64:       # %bb.0:
-; X64-NEXT:    vphaddubw %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_haddw_epu8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphaddubw %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %res = call <8 x i16> @llvm.x86.xop.vphaddubw(<16 x i8> %arg0)
   %bc = bitcast <8 x i16> %res to <2 x i64>
@@ -341,15 +246,10 @@
 declare <8 x i16> @llvm.x86.xop.vphaddubw(<16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_haddd_epu8(<2 x i64> %a0) {
-; X32-LABEL: test_mm_haddd_epu8:
-; X32:       # %bb.0:
-; X32-NEXT:    vphaddubd %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_haddd_epu8:
-; X64:       # %bb.0:
-; X64-NEXT:    vphaddubd %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_haddd_epu8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphaddubd %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %res = call <4 x i32> @llvm.x86.xop.vphaddubd(<16 x i8> %arg0)
   %bc = bitcast <4 x i32> %res to <2 x i64>
@@ -358,15 +258,10 @@
 declare <4 x i32> @llvm.x86.xop.vphaddubd(<16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_haddq_epu8(<2 x i64> %a0) {
-; X32-LABEL: test_mm_haddq_epu8:
-; X32:       # %bb.0:
-; X32-NEXT:    vphaddubq %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_haddq_epu8:
-; X64:       # %bb.0:
-; X64-NEXT:    vphaddubq %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_haddq_epu8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphaddubq %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %res = call <2 x i64> @llvm.x86.xop.vphaddubq(<16 x i8> %arg0)
   ret <2 x i64> %res
@@ -374,15 +269,10 @@
 declare <2 x i64> @llvm.x86.xop.vphaddubq(<16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_haddd_epu16(<2 x i64> %a0) {
-; X32-LABEL: test_mm_haddd_epu16:
-; X32:       # %bb.0:
-; X32-NEXT:    vphadduwd %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_haddd_epu16:
-; X64:       # %bb.0:
-; X64-NEXT:    vphadduwd %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_haddd_epu16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphadduwd %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %res = call <4 x i32> @llvm.x86.xop.vphadduwd(<8 x i16> %arg0)
   %bc = bitcast <4 x i32> %res to <2 x i64>
@@ -392,15 +282,10 @@
 
 
 define <2 x i64> @test_mm_haddq_epu16(<2 x i64> %a0) {
-; X32-LABEL: test_mm_haddq_epu16:
-; X32:       # %bb.0:
-; X32-NEXT:    vphadduwq %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_haddq_epu16:
-; X64:       # %bb.0:
-; X64-NEXT:    vphadduwq %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_haddq_epu16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphadduwq %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %res = call <2 x i64> @llvm.x86.xop.vphadduwq(<8 x i16> %arg0)
   ret <2 x i64> %res
@@ -408,15 +293,10 @@
 declare <2 x i64> @llvm.x86.xop.vphadduwq(<8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_haddq_epu32(<2 x i64> %a0) {
-; X32-LABEL: test_mm_haddq_epu32:
-; X32:       # %bb.0:
-; X32-NEXT:    vphaddudq %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_haddq_epu32:
-; X64:       # %bb.0:
-; X64-NEXT:    vphaddudq %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_haddq_epu32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphaddudq %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %res = call <2 x i64> @llvm.x86.xop.vphaddudq(<4 x i32> %arg0)
   ret <2 x i64> %res
@@ -424,15 +304,10 @@
 declare <2 x i64> @llvm.x86.xop.vphaddudq(<4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_hsubw_epi8(<2 x i64> %a0) {
-; X32-LABEL: test_mm_hsubw_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    vphsubbw %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_hsubw_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    vphsubbw %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_hsubw_epi8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphsubbw %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %res = call <8 x i16> @llvm.x86.xop.vphsubbw(<16 x i8> %arg0)
   %bc = bitcast <8 x i16> %res to <2 x i64>
@@ -441,15 +316,10 @@
 declare <8 x i16> @llvm.x86.xop.vphsubbw(<16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_hsubd_epi16(<2 x i64> %a0) {
-; X32-LABEL: test_mm_hsubd_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    vphsubwd %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_hsubd_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    vphsubwd %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_hsubd_epi16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphsubwd %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %res = call <4 x i32> @llvm.x86.xop.vphsubwd(<8 x i16> %arg0)
   %bc = bitcast <4 x i32> %res to <2 x i64>
@@ -458,15 +328,10 @@
 declare <4 x i32> @llvm.x86.xop.vphsubwd(<8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_hsubq_epi32(<2 x i64> %a0) {
-; X32-LABEL: test_mm_hsubq_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    vphsubdq %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_hsubq_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    vphsubdq %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_hsubq_epi32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vphsubdq %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %res = call <2 x i64> @llvm.x86.xop.vphsubdq(<4 x i32> %arg0)
   ret <2 x i64> %res
@@ -474,63 +339,39 @@
 declare <2 x i64> @llvm.x86.xop.vphsubdq(<4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_cmov_si128(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) {
-; X32-LABEL: test_mm_cmov_si128:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
-; X32-NEXT:    vpxor %xmm3, %xmm2, %xmm3
-; X32-NEXT:    vpand %xmm2, %xmm0, %xmm0
-; X32-NEXT:    vpand %xmm3, %xmm1, %xmm1
-; X32-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_cmov_si128:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
-; X64-NEXT:    vpxor %xmm3, %xmm2, %xmm3
-; X64-NEXT:    vpand %xmm2, %xmm0, %xmm0
-; X64-NEXT:    vpand %xmm3, %xmm1, %xmm1
-; X64-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_cmov_si128:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; ALL-NEXT:    vpxor %xmm3, %xmm2, %xmm3
+; ALL-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; ALL-NEXT:    vpand %xmm3, %xmm1, %xmm1
+; ALL-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2)
   ret <2 x i64> %res
 }
 declare <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64>, <2 x i64>, <2 x i64>) nounwind readnone
 
 define <4 x i64> @test_mm256_cmov_si256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) {
-; X32-LABEL: test_mm256_cmov_si256:
-; X32:       # %bb.0:
-; X32-NEXT:    vxorps %xmm3, %xmm3, %xmm3
-; X32-NEXT:    vcmptrueps %ymm3, %ymm3, %ymm3
-; X32-NEXT:    vxorps %ymm3, %ymm2, %ymm3
-; X32-NEXT:    vandps %ymm2, %ymm0, %ymm0
-; X32-NEXT:    vandps %ymm3, %ymm1, %ymm1
-; X32-NEXT:    vorps %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm256_cmov_si256:
-; X64:       # %bb.0:
-; X64-NEXT:    vxorps %xmm3, %xmm3, %xmm3
-; X64-NEXT:    vcmptrueps %ymm3, %ymm3, %ymm3
-; X64-NEXT:    vxorps %ymm3, %ymm2, %ymm3
-; X64-NEXT:    vandps %ymm2, %ymm0, %ymm0
-; X64-NEXT:    vandps %ymm3, %ymm1, %ymm1
-; X64-NEXT:    vorps %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm256_cmov_si256:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; ALL-NEXT:    vcmptrueps %ymm3, %ymm3, %ymm3
+; ALL-NEXT:    vxorps %ymm3, %ymm2, %ymm3
+; ALL-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; ALL-NEXT:    vandps %ymm3, %ymm1, %ymm1
+; ALL-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; ALL-NEXT:    ret{{[l|q]}}
   %res = call <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2)
   ret <4 x i64> %res
 }
 declare <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64>, <4 x i64>, <4 x i64>) nounwind readnone
 
 define <2 x i64> @test_mm_perm_epi8(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) {
-; X32-LABEL: test_mm_perm_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    vpperm %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_perm_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    vpperm %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_perm_epi8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpperm %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %arg2 = bitcast <2 x i64> %a2 to <16 x i8>
@@ -541,150 +382,101 @@
 declare <16 x i8> @llvm.x86.xop.vpperm(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_rot_epi8(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_rot_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    vprotb %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_rot_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    vprotb %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_rot_epi8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
-  %res = call <16 x i8> @llvm.x86.xop.vprotb(<16 x i8> %arg0, <16 x i8> %arg1)
+  %res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %arg0, <16 x i8> %arg0, <16 x i8> %arg1)
   %bc = bitcast <16 x i8> %res to <2 x i64>
   ret <2 x i64> %bc
 }
-declare <16 x i8> @llvm.x86.xop.vprotb(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.fshl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_rot_epi16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_rot_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    vprotw %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_rot_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    vprotw %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_rot_epi16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
-  %res = call <8 x i16> @llvm.x86.xop.vprotw(<8 x i16> %arg0, <8 x i16> %arg1)
+  %res = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %arg0, <8 x i16> %arg0, <8 x i16> %arg1)
   %bc = bitcast <8 x i16> %res to <2 x i64>
   ret <2 x i64> %bc
 }
-declare <8 x i16> @llvm.x86.xop.vprotw(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_rot_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_rot_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    vprotd %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_rot_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    vprotd %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_rot_epi32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
-  %res = call <4 x i32> @llvm.x86.xop.vprotd(<4 x i32> %arg0, <4 x i32> %arg1)
+  %res = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %arg0, <4 x i32> %arg0, <4 x i32> %arg1)
   %bc = bitcast <4 x i32> %res to <2 x i64>
   ret <2 x i64> %bc
 }
-declare <4 x i32> @llvm.x86.xop.vprotd(<4 x i32>, <4 x i32>) nounwind readnone
+declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_rot_epi64(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_rot_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    vprotq %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_rot_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    vprotq %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
-  %res = call <2 x i64> @llvm.x86.xop.vprotq(<2 x i64> %a0, <2 x i64> %a1)
+; ALL-LABEL: test_mm_rot_epi64:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
+  %res = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %a0, <2 x i64> %a0, <2 x i64> %a1)
   ret <2 x i64> %res
 }
-declare <2 x i64> @llvm.x86.xop.vprotq(<2 x i64>, <2 x i64>) nounwind readnone
+declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) nounwind readnone
 
 define <2 x i64> @test_mm_roti_epi8(<2 x i64> %a0) {
-; X32-LABEL: test_mm_roti_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    vprotb $1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_roti_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    vprotb $1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_roti_epi8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vprotb $1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
-  %res = call <16 x i8> @llvm.x86.xop.vprotbi(<16 x i8> %arg0, i8 1)
+  %res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %arg0, <16 x i8> %arg0, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
   %bc = bitcast <16 x i8> %res to <2 x i64>
   ret <2 x i64> %bc
 }
-declare <16 x i8> @llvm.x86.xop.vprotbi(<16 x i8>, i8) nounwind readnone
 
 define <2 x i64> @test_mm_roti_epi16(<2 x i64> %a0) {
-; X32-LABEL: test_mm_roti_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    vprotw $50, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_roti_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    vprotw $50, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_roti_epi16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vprotw $2, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
-  %res = call <8 x i16> @llvm.x86.xop.vprotwi(<8 x i16> %arg0, i8 50)
+  %res = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %arg0, <8 x i16> %arg0, <8 x i16> <i16 50, i16 50, i16 50, i16 50, i16 50, i16 50, i16 50, i16 50>)
   %bc = bitcast <8 x i16> %res to <2 x i64>
   ret <2 x i64> %bc
 }
-declare <8 x i16> @llvm.x86.xop.vprotwi(<8 x i16>, i8) nounwind readnone
 
 define <2 x i64> @test_mm_roti_epi32(<2 x i64> %a0) {
-; X32-LABEL: test_mm_roti_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    vprotd $226, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_roti_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    vprotd $226, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_roti_epi32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vprotd $2, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
-  %res = call <4 x i32> @llvm.x86.xop.vprotdi(<4 x i32> %arg0, i8 -30)
+  %res = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %arg0, <4 x i32> %arg0, <4 x i32> <i32 -30, i32 -30, i32 -30, i32 -30>)
   %bc = bitcast <4 x i32> %res to <2 x i64>
   ret <2 x i64> %bc
 }
-declare <4 x i32> @llvm.x86.xop.vprotdi(<4 x i32>, i8) nounwind readnone
 
 define <2 x i64> @test_mm_roti_epi64(<2 x i64> %a0) {
-; X32-LABEL: test_mm_roti_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    vprotq $100, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_roti_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    vprotq $100, %xmm0, %xmm0
-; X64-NEXT:    retq
-  %res = call <2 x i64> @llvm.x86.xop.vprotqi(<2 x i64> %a0, i8 100)
+; ALL-LABEL: test_mm_roti_epi64:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vprotq $36, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
+  %res = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %a0, <2 x i64> %a0, <2 x i64> <i64 100, i64 100>)
   ret <2 x i64> %res
 }
-declare <2 x i64> @llvm.x86.xop.vprotqi(<2 x i64>, i8) nounwind readnone
 
 define <2 x i64> @test_mm_shl_epi8(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_shl_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    vpshlb %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_shl_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    vpshlb %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_shl_epi8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpshlb %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call <16 x i8> @llvm.x86.xop.vpshlb(<16 x i8> %arg0, <16 x i8> %arg1)
@@ -694,15 +486,10 @@
 declare <16 x i8> @llvm.x86.xop.vpshlb(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_shl_epi16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_shl_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    vpshlw %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_shl_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    vpshlw %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_shl_epi16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpshlw %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.xop.vpshlw(<8 x i16> %arg0, <8 x i16> %arg1)
@@ -712,15 +499,10 @@
 declare <8 x i16> @llvm.x86.xop.vpshlw(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_shl_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_shl_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    vpshld %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_shl_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    vpshld %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_shl_epi32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpshld %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = call <4 x i32> @llvm.x86.xop.vpshld(<4 x i32> %arg0, <4 x i32> %arg1)
@@ -730,30 +512,20 @@
 declare <4 x i32> @llvm.x86.xop.vpshld(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_shl_epi64(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_shl_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    vpshlq %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_shl_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    vpshlq %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_shl_epi64:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpshlq %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.xop.vpshlq(<2 x i64> %a0, <2 x i64> %a1)
   ret <2 x i64> %res
 }
 declare <2 x i64> @llvm.x86.xop.vpshlq(<2 x i64>, <2 x i64>) nounwind readnone
 
 define <2 x i64> @test_mm_sha_epi8(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_sha_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    vpshab %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sha_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    vpshab %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_sha_epi8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpshab %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call <16 x i8> @llvm.x86.xop.vpshab(<16 x i8> %arg0, <16 x i8> %arg1)
@@ -763,15 +535,10 @@
 declare <16 x i8> @llvm.x86.xop.vpshab(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <2 x i64> @test_mm_sha_epi16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_sha_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    vpshaw %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sha_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    vpshaw %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_sha_epi16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpshaw %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.xop.vpshaw(<8 x i16> %arg0, <8 x i16> %arg1)
@@ -781,15 +548,10 @@
 declare <8 x i16> @llvm.x86.xop.vpshaw(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <2 x i64> @test_mm_sha_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_sha_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    vpshad %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sha_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    vpshad %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_sha_epi32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpshad %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = call <4 x i32> @llvm.x86.xop.vpshad(<4 x i32> %arg0, <4 x i32> %arg1)
@@ -799,30 +561,20 @@
 declare <4 x i32> @llvm.x86.xop.vpshad(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_mm_sha_epi64(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_sha_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    vpshaq %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_sha_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    vpshaq %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_sha_epi64:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpshaq %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.xop.vpshaq(<2 x i64> %a0, <2 x i64> %a1)
   ret <2 x i64> %res
 }
 declare <2 x i64> @llvm.x86.xop.vpshaq(<2 x i64>, <2 x i64>) nounwind readnone
 
 define <2 x i64> @test_mm_com_epu8(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_com_epu8:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcomltub %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_com_epu8:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcomltub %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_com_epu8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpcomltub %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %arg0, <16 x i8> %arg1, i8 0)
@@ -832,15 +584,10 @@
 declare <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8>, <16 x i8>, i8) nounwind readnone
 
 define <2 x i64> @test_mm_com_epu16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_com_epu16:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcomltuw %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_com_epu16:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcomltuw %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_com_epu16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpcomltuw %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %arg0, <8 x i16> %arg1, i8 0)
@@ -850,15 +597,10 @@
 declare <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16>, <8 x i16>, i8) nounwind readnone
 
 define <2 x i64> @test_mm_com_epu32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_com_epu32:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcomltud %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_com_epu32:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcomltud %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_com_epu32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpcomltud %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %arg0, <4 x i32> %arg1, i8 0)
@@ -868,30 +610,20 @@
 declare <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32>, <4 x i32>, i8) nounwind readnone
 
 define <2 x i64> @test_mm_com_epu64(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_com_epu64:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcomltuq %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_com_epu64:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcomltuq %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_com_epu64:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpcomltuq %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %a0, <2 x i64> %a1, i8 0)
   ret <2 x i64> %res
 }
 declare <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64>, <2 x i64>, i8) nounwind readnone
 
 define <2 x i64> @test_mm_com_epi8(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_com_epi8:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcomltb %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_com_epi8:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcomltb %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_com_epi8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpcomltb %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
   %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
   %res = call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %arg0, <16 x i8> %arg1, i8 0)
@@ -901,15 +633,10 @@
 declare <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8>, <16 x i8>, i8) nounwind readnone
 
 define <2 x i64> @test_mm_com_epi16(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_com_epi16:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcomltw %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_com_epi16:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcomltw %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_com_epi16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpcomltw %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
   %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
   %res = call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %arg0, <8 x i16> %arg1, i8 0)
@@ -919,15 +646,10 @@
 declare <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16>, <8 x i16>, i8) nounwind readnone
 
 define <2 x i64> @test_mm_com_epi32(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_com_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcomltd %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_com_epi32:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcomltd %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_com_epi32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpcomltd %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
   %res = call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %arg0, <4 x i32> %arg1, i8 0)
@@ -937,60 +659,40 @@
 declare <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32>, <4 x i32>, i8) nounwind readnone
 
 define <2 x i64> @test_mm_com_epi64(<2 x i64> %a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_com_epi64:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcomltq %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_com_epi64:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcomltq %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_com_epi64:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpcomltq %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %a0, <2 x i64> %a1, i8 0)
   ret <2 x i64> %res
 }
 declare <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64>, <2 x i64>, i8) nounwind readnone
 
 define <2 x double> @test_mm_permute2_pd(<2 x double> %a0, <2 x double> %a1, <2 x i64> %a2) {
-; X32-LABEL: test_mm_permute2_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermil2pd $0, %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_permute2_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermil2pd $0, %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_permute2_pd:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpermil2pd $0, %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a0, <2 x double> %a1, <2 x i64> %a2, i8 0)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double>, <2 x double>, <2 x i64>, i8) nounwind readnone
 
 define <4 x double> @test_mm256_permute2_pd(<4 x double> %a0, <4 x double> %a1, <4 x i64> %a2) {
-; X32-LABEL: test_mm256_permute2_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermil2pd $0, %ymm2, %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm256_permute2_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermil2pd $0, %ymm2, %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm256_permute2_pd:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpermil2pd $0, %ymm2, %ymm1, %ymm0, %ymm0
+; ALL-NEXT:    ret{{[l|q]}}
   %res = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a0, <4 x double> %a1, <4 x i64> %a2, i8 0)
   ret <4 x double> %res
 }
 declare <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double>, <4 x double>, <4 x i64>, i8) nounwind readnone
 
 define <4 x float> @test_mm_permute2_ps(<4 x float> %a0, <4 x float> %a1, <2 x i64> %a2) {
-; X32-LABEL: test_mm_permute2_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermil2ps $0, %xmm2, %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_permute2_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermil2ps $0, %xmm2, %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_permute2_ps:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpermil2ps $0, %xmm2, %xmm1, %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg2 = bitcast <2 x i64> %a2 to <4 x i32>
   %res = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a0, <4 x float> %a1, <4 x i32> %arg2, i8 0)
   ret <4 x float> %res
@@ -998,15 +700,10 @@
 declare <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float>, <4 x float>, <4 x i32>, i8) nounwind readnone
 
 define <8 x float> @test_mm256_permute2_ps(<8 x float> %a0, <8 x float> %a1, <4 x i64> %a2) {
-; X32-LABEL: test_mm256_permute2_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    vpermil2ps $0, %ymm2, %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm256_permute2_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    vpermil2ps $0, %ymm2, %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm256_permute2_ps:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpermil2ps $0, %ymm2, %ymm1, %ymm0, %ymm0
+; ALL-NEXT:    ret{{[l|q]}}
   %arg2 = bitcast <4 x i64> %a2 to <8 x i32>
   %res = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a0, <8 x float> %a1, <8 x i32> %arg2, i8 0)
   ret <8 x float> %res
@@ -1014,108 +711,61 @@
 declare <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float>, <8 x float>, <8 x i32>, i8) nounwind readnone
 
 define <4 x float> @test_mm_frcz_ss(<4 x float> %a0) {
-; X32-LABEL: test_mm_frcz_ss:
-; X32:       # %bb.0:
-; X32-NEXT:    vfrczss %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_frcz_ss:
-; X64:       # %bb.0:
-; X64-NEXT:    vfrczss %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_frcz_ss:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vfrczss %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float> %a0)
   ret <4 x float> %res
 }
 declare <4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float>) nounwind readnone
 
 define <2 x double> @test_mm_frcz_sd(<2 x double> %a0) {
-; X32-LABEL: test_mm_frcz_sd:
-; X32:       # %bb.0:
-; X32-NEXT:    vfrczsd %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_frcz_sd:
-; X64:       # %bb.0:
-; X64-NEXT:    vfrczsd %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_frcz_sd:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vfrczsd %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double> %a0)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double>) nounwind readnone
 
 define <4 x float> @test_mm_frcz_ps(<4 x float> %a0) {
-; X32-LABEL: test_mm_frcz_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    vfrczps %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_frcz_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    vfrczps %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_frcz_ps:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vfrczps %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.xop.vfrcz.ps(<4 x float> %a0)
   ret <4 x float> %res
 }
 declare <4 x float> @llvm.x86.xop.vfrcz.ps(<4 x float>) nounwind readnone
 
 define <2 x double> @test_mm_frcz_pd(<2 x double> %a0) {
-; X32-LABEL: test_mm_frcz_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    vfrczpd %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm_frcz_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    vfrczpd %xmm0, %xmm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm_frcz_pd:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vfrczpd %xmm0, %xmm0
+; ALL-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.xop.vfrcz.pd(<2 x double> %a0)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.xop.vfrcz.pd(<2 x double>) nounwind readnone
 
 define <8 x float> @test_mm256_frcz_ps(<8 x float> %a0) {
-; X32-LABEL: test_mm256_frcz_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    vfrczps %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm256_frcz_ps:
-; X64:       # %bb.0:
-; X64-NEXT:    vfrczps %ymm0, %ymm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm256_frcz_ps:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vfrczps %ymm0, %ymm0
+; ALL-NEXT:    ret{{[l|q]}}
   %res = call <8 x float> @llvm.x86.xop.vfrcz.ps.256(<8 x float> %a0)
   ret <8 x float> %res
 }
 declare <8 x float> @llvm.x86.xop.vfrcz.ps.256(<8 x float>) nounwind readnone
 
 define <4 x double> @test_mm256_frcz_pd(<4 x double> %a0) {
-; X32-LABEL: test_mm256_frcz_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    vfrczpd %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_mm256_frcz_pd:
-; X64:       # %bb.0:
-; X64-NEXT:    vfrczpd %ymm0, %ymm0
-; X64-NEXT:    retq
+; ALL-LABEL: test_mm256_frcz_pd:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vfrczpd %ymm0, %ymm0
+; ALL-NEXT:    ret{{[l|q]}}
   %res = call <4 x double> @llvm.x86.xop.vfrcz.pd.256(<4 x double> %a0)
   ret <4 x double> %res
 }
 declare <4 x double> @llvm.x86.xop.vfrcz.pd.256(<4 x double>) nounwind readnone
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/test/CodeGen/X86/xop-intrinsics-x86_64-upgrade.ll b/test/CodeGen/X86/xop-intrinsics-x86_64-upgrade.ll
index c549336..0f34836 100644
--- a/test/CodeGen/X86/xop-intrinsics-x86_64-upgrade.ll
+++ b/test/CodeGen/X86/xop-intrinsics-x86_64-upgrade.ll
@@ -764,3 +764,82 @@
 }
 declare <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64>, <4 x i64>, <4 x i64>) nounwind readnone
 
+define <16 x i8> @test_int_x86_xop_vprotb(<16 x i8> %a0, <16 x i8> %a1) {
+; CHECK-LABEL: test_int_x86_xop_vprotb:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vprotb %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <16 x i8> @llvm.x86.xop.vprotb(<16 x i8> %a0, <16 x i8> %a1) ;
+  ret <16 x i8> %res
+}
+declare <16 x i8> @llvm.x86.xop.vprotb(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @test_int_x86_xop_vprotd(<4 x i32> %a0, <4 x i32> %a1) {
+; CHECK-LABEL: test_int_x86_xop_vprotd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vprotd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x i32> @llvm.x86.xop.vprotd(<4 x i32> %a0, <4 x i32> %a1) ;
+  ret <4 x i32> %res
+}
+declare <4 x i32> @llvm.x86.xop.vprotd(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <2 x i64> @test_int_x86_xop_vprotq(<2 x i64> %a0, <2 x i64> %a1) {
+; CHECK-LABEL: test_int_x86_xop_vprotq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vprotq %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <2 x i64> @llvm.x86.xop.vprotq(<2 x i64> %a0, <2 x i64> %a1) ;
+  ret <2 x i64> %res
+}
+declare <2 x i64> @llvm.x86.xop.vprotq(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i16> @test_int_x86_xop_vprotw(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: test_int_x86_xop_vprotw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vprotw %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <8 x i16> @llvm.x86.xop.vprotw(<8 x i16> %a0, <8 x i16> %a1) ;
+  ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.x86.xop.vprotw(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @test_int_x86_xop_vprotbi(<16 x i8> %a0) {
+; CHECK-LABEL: test_int_x86_xop_vprotbi:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vprotb $1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <16 x i8> @llvm.x86.xop.vprotbi(<16 x i8> %a0, i8 1) ;
+  ret <16 x i8> %res
+}
+declare <16 x i8> @llvm.x86.xop.vprotbi(<16 x i8>, i8) nounwind readnone
+
+define <4 x i32> @test_int_x86_xop_vprotdi(<4 x i32> %a0) {
+; CHECK-LABEL: test_int_x86_xop_vprotdi:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vprotd $30, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x i32> @llvm.x86.xop.vprotdi(<4 x i32> %a0, i8 -2) ;
+  ret <4 x i32> %res
+}
+declare <4 x i32> @llvm.x86.xop.vprotdi(<4 x i32>, i8) nounwind readnone
+
+define <2 x i64> @test_int_x86_xop_vprotqi(<2 x i64> %a0) {
+; CHECK-LABEL: test_int_x86_xop_vprotqi:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vprotq $3, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <2 x i64> @llvm.x86.xop.vprotqi(<2 x i64> %a0, i8 3) ;
+  ret <2 x i64> %res
+}
+declare <2 x i64> @llvm.x86.xop.vprotqi(<2 x i64>, i8) nounwind readnone
+
+define <8 x i16> @test_int_x86_xop_vprotwi(<8 x i16> %a0) {
+; CHECK-LABEL: test_int_x86_xop_vprotwi:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vprotw $12, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <8 x i16> @llvm.x86.xop.vprotwi(<8 x i16> %a0, i8 -4) ;
+  ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.x86.xop.vprotwi(<8 x i16>, i8) nounwind readnone
diff --git a/test/CodeGen/X86/xop-intrinsics-x86_64.ll b/test/CodeGen/X86/xop-intrinsics-x86_64.ll
index d4c5420..80a3c5b 100644
--- a/test/CodeGen/X86/xop-intrinsics-x86_64.ll
+++ b/test/CodeGen/X86/xop-intrinsics-x86_64.ll
@@ -450,86 +450,6 @@
 }
 declare <16 x i8> @llvm.x86.xop.vpperm(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
 
-define <16 x i8> @test_int_x86_xop_vprotb(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: test_int_x86_xop_vprotb:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vprotb %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-  %res = call <16 x i8> @llvm.x86.xop.vprotb(<16 x i8> %a0, <16 x i8> %a1) ;
-  ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.xop.vprotb(<16 x i8>, <16 x i8>) nounwind readnone
-
-define <4 x i32> @test_int_x86_xop_vprotd(<4 x i32> %a0, <4 x i32> %a1) {
-; CHECK-LABEL: test_int_x86_xop_vprotd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vprotd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-  %res = call <4 x i32> @llvm.x86.xop.vprotd(<4 x i32> %a0, <4 x i32> %a1) ;
-  ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.xop.vprotd(<4 x i32>, <4 x i32>) nounwind readnone
-
-define <2 x i64> @test_int_x86_xop_vprotq(<2 x i64> %a0, <2 x i64> %a1) {
-; CHECK-LABEL: test_int_x86_xop_vprotq:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vprotq %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-  %res = call <2 x i64> @llvm.x86.xop.vprotq(<2 x i64> %a0, <2 x i64> %a1) ;
-  ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.xop.vprotq(<2 x i64>, <2 x i64>) nounwind readnone
-
-define <8 x i16> @test_int_x86_xop_vprotw(<8 x i16> %a0, <8 x i16> %a1) {
-; CHECK-LABEL: test_int_x86_xop_vprotw:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vprotw %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-  %res = call <8 x i16> @llvm.x86.xop.vprotw(<8 x i16> %a0, <8 x i16> %a1) ;
-  ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.xop.vprotw(<8 x i16>, <8 x i16>) nounwind readnone
-
-define <16 x i8> @test_int_x86_xop_vprotbi(<16 x i8> %a0) {
-; CHECK-LABEL: test_int_x86_xop_vprotbi:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vprotb $1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-  %res = call <16 x i8> @llvm.x86.xop.vprotbi(<16 x i8> %a0, i8 1) ;
-  ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.xop.vprotbi(<16 x i8>, i8) nounwind readnone
-
-define <4 x i32> @test_int_x86_xop_vprotdi(<4 x i32> %a0) {
-; CHECK-LABEL: test_int_x86_xop_vprotdi:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vprotd $254, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-  %res = call <4 x i32> @llvm.x86.xop.vprotdi(<4 x i32> %a0, i8 -2) ;
-  ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.xop.vprotdi(<4 x i32>, i8) nounwind readnone
-
-define <2 x i64> @test_int_x86_xop_vprotqi(<2 x i64> %a0) {
-; CHECK-LABEL: test_int_x86_xop_vprotqi:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vprotq $3, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-  %res = call <2 x i64> @llvm.x86.xop.vprotqi(<2 x i64> %a0, i8 3) ;
-  ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.xop.vprotqi(<2 x i64>, i8) nounwind readnone
-
-define <8 x i16> @test_int_x86_xop_vprotwi(<8 x i16> %a0) {
-; CHECK-LABEL: test_int_x86_xop_vprotwi:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vprotw $252, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-  %res = call <8 x i16> @llvm.x86.xop.vprotwi(<8 x i16> %a0, i8 -4) ;
-  ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.xop.vprotwi(<8 x i16>, i8) nounwind readnone
-
 define <16 x i8> @test_int_x86_xop_vpshab(<16 x i8> %a0, <16 x i8> %a1) {
 ; CHECK-LABEL: test_int_x86_xop_vpshab:
 ; CHECK:       # %bb.0:
diff --git a/test/CodeGen/X86/xor.ll b/test/CodeGen/X86/xor.ll
index 65e5e83..f86b581 100644
--- a/test/CodeGen/X86/xor.ll
+++ b/test/CodeGen/X86/xor.ll
@@ -420,26 +420,14 @@
 ; X64-LIN-LABEL: PR17487:
 ; X64-LIN:       # %bb.0:
 ; X64-LIN-NEXT:    movd %edi, %xmm0
-; X64-LIN-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; X64-LIN-NEXT:    pandn {{.*}}(%rip), %xmm0
-; X64-LIN-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X64-LIN-NEXT:    movq %xmm0, %rcx
-; X64-LIN-NEXT:    xorl %eax, %eax
-; X64-LIN-NEXT:    cmpq $1, %rcx
-; X64-LIN-NEXT:    setne %al
+; X64-LIN-NEXT:    pextrw $0, %xmm0, %eax
+; X64-LIN-NEXT:    andl $1, %eax
 ; X64-LIN-NEXT:    retq
 ;
 ; X64-WIN-LABEL: PR17487:
 ; X64-WIN:       # %bb.0:
+; X64-WIN-NEXT:    andb $1, %cl
 ; X64-WIN-NEXT:    movzbl %cl, %eax
-; X64-WIN-NEXT:    movd %eax, %xmm0
-; X64-WIN-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; X64-WIN-NEXT:    pandn __xmm@{{.*}}(%rip), %xmm0
-; X64-WIN-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X64-WIN-NEXT:    movq %xmm0, %rcx
-; X64-WIN-NEXT:    xorl %eax, %eax
-; X64-WIN-NEXT:    cmpq $1, %rcx
-; X64-WIN-NEXT:    setne %al
 ; X64-WIN-NEXT:    retq
   %tmp = insertelement <2 x i1> undef, i1 %tobool, i32 1
   %tmp1 = zext <2 x i1> %tmp to <2 x i64>
diff --git a/test/CodeGen/X86/zext-logicop-shift-load.ll b/test/CodeGen/X86/zext-logicop-shift-load.ll
index 26182fe..319c177 100644
--- a/test/CodeGen/X86/zext-logicop-shift-load.ll
+++ b/test/CodeGen/X86/zext-logicop-shift-load.ll
@@ -1,12 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
 
 
 define i64 @test1(i8* %data) {
 ; CHECK-LABEL: test1:
-; CHECK:       movzbl
-; CHECK-NEXT:  shlq
-; CHECK-NEXT:  andl
-; CHECK-NEXT:  retq
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movzbl (%rdi), %eax
+; CHECK-NEXT:    shlq $2, %rax
+; CHECK-NEXT:    andl $60, %eax
+; CHECK-NEXT:    retq
 entry:
   %bf.load = load i8, i8* %data, align 4
   %bf.clear = shl i8 %bf.load, 2
@@ -17,10 +19,11 @@
 
 define i8* @test2(i8* %data) {
 ; CHECK-LABEL: test2:
-; CHECK:       movzbl
-; CHECK-NEXT:  andl
-; CHECK-NEXT:  leaq
-; CHECK-NEXT:  retq
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movzbl (%rdi), %eax
+; CHECK-NEXT:    andl $15, %eax
+; CHECK-NEXT:    leaq (%rdi,%rax,4), %rax
+; CHECK-NEXT:    retq
 entry:
   %bf.load = load i8, i8* %data, align 4
   %bf.clear = shl i8 %bf.load, 2
@@ -33,11 +36,12 @@
 ; If the shift op is SHL, the logic op can only be AND.
 define i64 @test3(i8* %data) {
 ; CHECK-LABEL: test3:
-; CHECK:       movb
-; CHECK-NEXT:  shlb
-; CHECK-NEXT:  xorb
-; CHECK-NEXT:  movzbl
-; CHECK-NEXT:  retq
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movb (%rdi), %al
+; CHECK-NEXT:    shlb $2, %al
+; CHECK-NEXT:    xorb $60, %al
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    retq
 entry:
   %bf.load = load i8, i8* %data, align 4
   %bf.clear = shl i8 %bf.load, 2
@@ -48,10 +52,11 @@
 
 define i64 @test4(i8* %data) {
 ; CHECK-LABEL: test4:
-; CHECK:       movzbl
-; CHECK-NEXT:  shrq
-; CHECK-NEXT:  andl
-; CHECK-NEXT:  retq
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movzbl (%rdi), %eax
+; CHECK-NEXT:    shrq $2, %rax
+; CHECK-NEXT:    andl $60, %eax
+; CHECK-NEXT:    retq
 entry:
   %bf.load = load i8, i8* %data, align 4
   %bf.clear = lshr i8 %bf.load, 2
@@ -62,10 +67,11 @@
 
 define i64 @test5(i8* %data) {
 ; CHECK-LABEL: test5:
-; CHECK:       movzbl
-; CHECK-NEXT:  shrq
-; CHECK-NEXT:  xorq
-; CHECK-NEXT:  retq
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movzbl (%rdi), %eax
+; CHECK-NEXT:    shrq $2, %rax
+; CHECK-NEXT:    xorq $60, %rax
+; CHECK-NEXT:    retq
 entry:
   %bf.load = load i8, i8* %data, align 4
   %bf.clear = lshr i8 %bf.load, 2
@@ -76,10 +82,11 @@
 
 define i64 @test6(i8* %data) {
 ; CHECK-LABEL: test6:
-; CHECK:       movzbl
-; CHECK-NEXT:  shrq
-; CHECK-NEXT:  orq
-; CHECK-NEXT:  retq
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movzbl (%rdi), %eax
+; CHECK-NEXT:    shrq $2, %rax
+; CHECK-NEXT:    orq $60, %rax
+; CHECK-NEXT:    retq
 entry:
   %bf.load = load i8, i8* %data, align 4
   %bf.clear = lshr i8 %bf.load, 2
@@ -91,10 +98,13 @@
 ; Load is folded with sext.
 define i64 @test8(i8* %data) {
 ; CHECK-LABEL: test8:
-; CHECK:       movsbl
-; CHECK-NEXT:  movzwl
-; CHECK-NEXT:  shrl
-; CHECK-NEXT:  orl
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movsbl (%rdi), %eax
+; CHECK-NEXT:    movzwl %ax, %eax
+; CHECK-NEXT:    shrl $2, %eax
+; CHECK-NEXT:    orl $60, %eax
+; CHECK-NEXT:    movl %eax, %eax
+; CHECK-NEXT:    retq
 entry:
   %bf.load = load i8, i8* %data, align 4
   %ext = sext i8 %bf.load to i16
diff --git a/test/CodeGen/XCore/epilogue_prologue.ll b/test/CodeGen/XCore/epilogue_prologue.ll
index 30e1bb9..a719441 100644
--- a/test/CodeGen/XCore/epilogue_prologue.ll
+++ b/test/CodeGen/XCore/epilogue_prologue.ll
@@ -1,5 +1,5 @@
 ; RUN: llc < %s -march=xcore | FileCheck %s
-; RUN: llc < %s -march=xcore -disable-fp-elim | FileCheck %s -check-prefix=CHECKFP
+; RUN: llc < %s -march=xcore -frame-pointer=all | FileCheck %s -check-prefix=CHECKFP
 
 ; When using SP for small frames, we don't need any scratch registers (SR).
 ; When using SP for large frames, we may need two scratch registers.
diff --git a/test/CodeGen/XCore/llvm-intrinsics.ll b/test/CodeGen/XCore/llvm-intrinsics.ll
index b7868d3..cad5656 100644
--- a/test/CodeGen/XCore/llvm-intrinsics.ll
+++ b/test/CodeGen/XCore/llvm-intrinsics.ll
@@ -1,5 +1,5 @@
 ; RUN: llc < %s -march=xcore | FileCheck %s
-; RUN: llc < %s -march=xcore -disable-fp-elim | FileCheck %s -check-prefix=CHECKFP
+; RUN: llc < %s -march=xcore -frame-pointer=all | FileCheck %s -check-prefix=CHECKFP
 
 declare i8* @llvm.frameaddress(i32) nounwind readnone
 declare i8* @llvm.returnaddress(i32) nounwind
diff --git a/test/DebugInfo/AArch64/dwarfdump.ll b/test/DebugInfo/AArch64/dwarfdump.ll
index 6ce5f02..956b10d 100644
--- a/test/DebugInfo/AArch64/dwarfdump.ll
+++ b/test/DebugInfo/AArch64/dwarfdump.ll
@@ -14,9 +14,9 @@
 
 ; A couple of ABS64s similarly:
 
-; CHECK: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
+; CHECK: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000 ".text")
 ; CHECK-4: DW_AT_high_pc [DW_FORM_data4] (0x00000008)
-; CHECK-3: DW_AT_high_pc [DW_FORM_addr] (0x0000000000000008)
+; CHECK-3: DW_AT_high_pc [DW_FORM_addr] (0x0000000000000008 ".text")
 
 define i32 @main() nounwind !dbg !3 {
   ret i32 0, !dbg !8
diff --git a/test/DebugInfo/AArch64/frameindices.ll b/test/DebugInfo/AArch64/frameindices.ll
index 18cd54a..a74e6ba 100644
--- a/test/DebugInfo/AArch64/frameindices.ll
+++ b/test/DebugInfo/AArch64/frameindices.ll
@@ -1,4 +1,4 @@
-; RUN: llc -disable-fp-elim -O0 -fast-isel -filetype=obj < %s | llvm-dwarfdump -v - | FileCheck %s
+; RUN: llc -frame-pointer=all -O0 -fast-isel -filetype=obj < %s | llvm-dwarfdump -v - | FileCheck %s
 ; Test that a variable with multiple entries in the MMI table makes it into the
 ; debug info.
 ;
diff --git a/test/DebugInfo/AArch64/prologue_end.ll b/test/DebugInfo/AArch64/prologue_end.ll
index 0187032..bafbcf7 100644
--- a/test/DebugInfo/AArch64/prologue_end.ll
+++ b/test/DebugInfo/AArch64/prologue_end.ll
@@ -1,4 +1,4 @@
-; RUN: llc -disable-fp-elim -O0 -fast-isel %s -mtriple aarch64-apple-darwin -o - | FileCheck %s
+; RUN: llc -frame-pointer=all -O0 -fast-isel %s -mtriple aarch64-apple-darwin -o - | FileCheck %s
 
 ; int func(void);
 ; void prologue_end_test() {
diff --git a/test/DebugInfo/AArch64/return-address-signing.ll b/test/DebugInfo/AArch64/return-address-signing.ll
new file mode 100644
index 0000000..c679a9d
--- /dev/null
+++ b/test/DebugInfo/AArch64/return-address-signing.ll
@@ -0,0 +1,27 @@
+; RUN: llc -mtriple=aarch64-arm-none-eabi < %s -filetype=obj -o - \
+; RUN:    | llvm-dwarfdump -v - | FileCheck -check-prefix=CHECK %s
+
+;CHECK: CIE
+;CHECK: Augmentation:          "zR"
+define i32 @foo()  "sign-return-address"="all" {
+  ret i32 0
+}
+
+;CHECK: CIE
+;CHECK: Augmentation:          "zRB"
+
+define i32 @bar()  "sign-return-address"="all" "sign-return-address-key"="b_key" {
+  ret i32 0
+}
+
+;CHECK-NOT: CIE
+
+define i32 @baz()  "sign-return-address"="all" nounwind {
+  ret i32 0
+}
+
+;CHECK-NOT: CIE
+
+define i32 @qux()  "sign-return-address"="all" "sign-return-address-key"="b_key" nounwind {
+  ret i32 0
+}
diff --git a/test/DebugInfo/AArch64/tls-at-location.ll b/test/DebugInfo/AArch64/tls-at-location.ll
index 4fbbccf..e0cbe412 100644
--- a/test/DebugInfo/AArch64/tls-at-location.ll
+++ b/test/DebugInfo/AArch64/tls-at-location.ll
@@ -21,7 +21,7 @@
 
 !0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
 !1 = distinct !DIGlobalVariable(name: "var", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
-!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 7.0.0 (https://github.com/llvm-mirror/clang.git 43eac1f9d7d2c985831b485d9ccc807416d1cf29) (https://github.com/llvm-mirror/llvm.git d53cdbf4cc5414ea540174a036202c555ce8fc4b)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 7.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
 !3 = !DIFile(filename: "tls-at-location.c", directory: "/home/lliu0/llvm/tls-at-location/DebugInfo/AArch64")
 !4 = !{}
 !5 = !{!0}
@@ -29,7 +29,7 @@
 !7 = !{i32 2, !"Dwarf Version", i32 4}
 !8 = !{i32 2, !"Debug Info Version", i32 3}
 !9 = !{i32 1, !"wchar_size", i32 4}
-!10 = !{!"clang version 7.0.0 (https://github.com/llvm-mirror/clang.git 43eac1f9d7d2c985831b485d9ccc807416d1cf29) (https://github.com/llvm-mirror/llvm.git d53cdbf4cc5414ea540174a036202c555ce8fc4b)"}
+!10 = !{!"clang version 7.0.0"}
 !11 = distinct !DISubprogram(name: "foo", scope: !3, file: !3, line: 3, type: !12, isLocal: false, isDefinition: true, scopeLine: 3, isOptimized: false, unit: !2)
 !12 = !DISubroutineType(types: !13)
 !13 = !{!6}
diff --git a/test/DebugInfo/ARM/PR26163.ll b/test/DebugInfo/ARM/PR26163.ll
index 3b3ec9c..5dac6ad 100644
--- a/test/DebugInfo/ARM/PR26163.ll
+++ b/test/DebugInfo/ARM/PR26163.ll
@@ -1,16 +1,17 @@
 ; RUN: llc -filetype=obj -o - < %s | llvm-dwarfdump -v -debug-info - | FileCheck %s
 ;
-; Checks that we're creating two ranges, one that terminates immediately
-; and one that spans the rest of the function. This isn't necessarily the
-; best thing to do here (and also not necessarily correct, since the first
-; one has a bit_piece), but it is what is currently being emitted, any
-; change here needs to be intentional, so the test is very specific.
+; Checks that we're omitting the first range, as it is empty, and that we're
+; emitting one that spans the rest of the function. In this case, the first
+; range, which we omit, describes 8 bytes of the variable using DW_OP_litX,
+; whereas the second one only describes 4 bytes, so clobbering the whole 8 byte
+; fragment with the 4 bytes fragment isn't necessarily best thing to do here,
+; but it is what is currently being emitted. Any change here needs to be
+; intentional, so the test is very specific.
 ;
 ; CHECK: DW_TAG_inlined_subroutine
 ; CHECK: DW_TAG_variable
 ; CHECK:   DW_AT_location [DW_FORM_sec_offset] ({{.*}}
-; CHECK:      [0x00000004, 0x00000004): DW_OP_lit0, DW_OP_stack_value, DW_OP_piece 0x8
-; CHECK:      [0x00000004, 0x00000014): DW_OP_lit0, DW_OP_stack_value, DW_OP_piece 0x4)
+; CHECK-NEXT: [0x00000004, 0x00000014): DW_OP_lit0, DW_OP_stack_value, DW_OP_piece 0x4)
 
 ; Created form the following test case (PR26163) with
 ; clang -cc1 -triple armv4t--freebsd11.0-gnueabi -emit-obj -debug-info-kind=standalone -O2 -x c test.c
@@ -69,7 +70,7 @@
 !llvm.module.flags = !{!22, !23, !24}
 !llvm.ident = !{!25}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.9.0 (https://github.com/llvm-mirror/clang 89dda3855cda574f355e6defa1d77bdae5053994) (llvm/trunk 257891)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.9.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "<stdin>", directory: "/home/ubuntu/bugs")
 !2 = !{}
 !4 = distinct !DISubprogram(name: "parse_config_file", scope: !5, file: !5, line: 22, type: !6, isLocal: false, isDefinition: true, scopeLine: 23, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !9)
@@ -93,7 +94,7 @@
 !22 = !{i32 2, !"Debug Info Version", i32 3}
 !23 = !{i32 1, !"wchar_size", i32 4}
 !24 = !{i32 1, !"min_enum_size", i32 4}
-!25 = !{!"clang version 3.9.0 (https://github.com/llvm-mirror/clang 89dda3855cda574f355e6defa1d77bdae5053994) (llvm/trunk 257891)"}
+!25 = !{!"clang version 3.9.0"}
 !26 = !DIExpression()
 !27 = !DILocation(line: 11, scope: !11, inlinedAt: !28)
 !28 = distinct !DILocation(line: 26, scope: !4)
diff --git a/test/DebugInfo/ARM/multiple-constant-uses-drops-dbgloc.ll b/test/DebugInfo/ARM/multiple-constant-uses-drops-dbgloc.ll
index a62bbda..a8e31b0 100644
--- a/test/DebugInfo/ARM/multiple-constant-uses-drops-dbgloc.ll
+++ b/test/DebugInfo/ARM/multiple-constant-uses-drops-dbgloc.ll
@@ -45,7 +45,7 @@
 !9 = !{i32 2, !"Debug Info Version", i32 3}
 !10 = !{i32 1, !"wchar_size", i32 4}
 !11 = !{i32 1, !"min_enum_size", i32 4}
-!12 = !{!"clang version 3.7.0 (http://llvm.org/git/clang.git 9b0abb9df531ef7928c8182120e1869affca17d5) (http://llvm.org/git/llvm.git b1e759524dd94f7ce1e24935daed8383927e96c1)"}
+!12 = !{!"clang version 3.7.0"}
 !13 = distinct !DISubprogram(name: "proc", scope: !3, file: !3, line: 4, type: !14, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: false, unit: !2, retainedNodes: !16)
 !14 = !DISubroutineType(types: !15)
 !15 = !{null}
diff --git a/test/DebugInfo/ARM/prologue_end.ll b/test/DebugInfo/ARM/prologue_end.ll
index 7a4a0c7..2c4922d 100644
--- a/test/DebugInfo/ARM/prologue_end.ll
+++ b/test/DebugInfo/ARM/prologue_end.ll
@@ -1,5 +1,5 @@
-; RUN: llc -disable-fp-elim -O0 %s -mtriple armv7-apple-darwin -o - | FileCheck %s
-; RUN: llc -disable-fp-elim -O0 %s -mtriple thumbv7-apple-darwin -o - | FileCheck %s
+; RUN: llc -frame-pointer=all -O0 %s -mtriple armv7-apple-darwin -o - | FileCheck %s
+; RUN: llc -frame-pointer=all -O0 %s -mtriple thumbv7-apple-darwin -o - | FileCheck %s
 
 ; int func(void);
 ; void prologue_end_test() {
diff --git a/test/DebugInfo/COFF/enum-co.ll b/test/DebugInfo/COFF/enum-co.ll
index 14c1955..edb3ee6 100644
--- a/test/DebugInfo/COFF/enum-co.ll
+++ b/test/DebugInfo/COFF/enum-co.ll
@@ -130,7 +130,7 @@
 !llvm.module.flags = !{!25, !26, !27, !28}
 !llvm.ident = !{!29}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 8.0.0 (https://github.com/llvm-mirror/clang.git 9884fc1d0881576784e9b50da9eb61a5eb427f1c) (https://github.com/llvm-mirror/llvm.git 33b1a96b81ba4e33cfc4a129ce43b5331e16936b)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 8.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
 !1 = !DIFile(filename: "enum-co.cpp", directory: "D:\5Cupstream\5Cllvm\5Ctest\5CDebugInfo\5CCOFF", checksumkind: CSK_MD5, checksum: "2e53b90441669acca735bad28ed3a1ab")
 !2 = !{!3, !8, !13, !18}
 !3 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "Enum", file: !1, line: 4, baseType: !4, size: 32, elements: !5, identifier: ".?AW4Enum@@")
@@ -138,7 +138,7 @@
 !5 = !{!6, !7}
 !6 = !DIEnumerator(name: "ON", value: 0)
 !7 = !DIEnumerator(name: "OFF", value: 1)
-!8 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "EnumClass", file: !1, line: 7, baseType: !4, size: 32, flags: DIFlagFixedEnum, elements: !9, identifier: ".?AW4EnumClass@@")
+!8 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "EnumClass", file: !1, line: 7, baseType: !4, size: 32, flags: DIFlagEnumClass, elements: !9, identifier: ".?AW4EnumClass@@")
 !9 = !{!10, !11, !12}
 !10 = !DIEnumerator(name: "RED", value: 0)
 !11 = !DIEnumerator(name: "BLUE", value: 1)
@@ -159,7 +159,7 @@
 !26 = !{i32 2, !"Debug Info Version", i32 3}
 !27 = !{i32 1, !"wchar_size", i32 2}
 !28 = !{i32 7, !"PIC Level", i32 2}
-!29 = !{!"clang version 8.0.0 (https://github.com/llvm-mirror/clang.git 9884fc1d0881576784e9b50da9eb61a5eb427f1c) (https://github.com/llvm-mirror/llvm.git 33b1a96b81ba4e33cfc4a129ce43b5331e16936b)"}
+!29 = !{!"clang version 8.0.0"}
 !30 = distinct !DISubprogram(name: "Func_Enum", linkageName: "?Func_Enum@@YA?AW4Enum@@AEAW41@@Z", scope: !1, file: !1, line: 5, type: !31, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: false, unit: !0, retainedNodes: !17)
 !31 = !DISubroutineType(types: !32)
 !32 = !{!3, !33}
diff --git a/test/DebugInfo/COFF/frameproc-flags.ll b/test/DebugInfo/COFF/frameproc-flags.ll
index a9d0fe0..b7c0592 100644
--- a/test/DebugInfo/COFF/frameproc-flags.ll
+++ b/test/DebugInfo/COFF/frameproc-flags.ll
@@ -62,44 +62,44 @@
 ; }
 ; }
 
-; CHECK-LABEL: S_GPROC32_ID [size = 50] `use_alloca`
-; CHECK: S_FRAMEPROC [size = 30]
+; CHECK-LABEL: S_GPROC32_ID [size = 52] `use_alloca`
+; CHECK: S_FRAMEPROC [size = 32]
 ; CHECK:   local fp reg = VFRAME, param fp reg = EBP
 ; CHECK:   flags = has alloca | secure checks | opt speed
-; CHECK-LABEL: S_GPROC32_ID [size = 51] `call_setjmp`
-; CHECK: S_FRAMEPROC [size = 30]
+; CHECK-LABEL: S_GPROC32_ID [size = 52] `call_setjmp`
+; CHECK: S_FRAMEPROC [size = 32]
 ; CHECK:   local fp reg = NONE, param fp reg = NONE
 ; CHECK:   flags = has setjmp | opt speed
-; CHECK-LABEL: S_GPROC32_ID [size = 53] `use_inlineasm`
-; CHECK: S_FRAMEPROC [size = 30]
+; CHECK-LABEL: S_GPROC32_ID [size = 56] `use_inlineasm`
+; CHECK: S_FRAMEPROC [size = 32]
 ; CHECK:   local fp reg = NONE, param fp reg = NONE
 ; CHECK:   flags = has inline asm | opt speed
-; CHECK-LABEL: S_GPROC32_ID [size = 46] `cpp_eh`
-; CHECK: S_FRAMEPROC [size = 30]
+; CHECK-LABEL: S_GPROC32_ID [size = 48] `cpp_eh`
+; CHECK: S_FRAMEPROC [size = 32]
 ; CHECK:   local fp reg = EBP, param fp reg = EBP
 ; CHECK:   flags = has eh | opt speed
-; CHECK-LABEL: S_GPROC32_ID [size = 50] `use_inline`
-; CHECK: S_FRAMEPROC [size = 30]
+; CHECK-LABEL: S_GPROC32_ID [size = 52] `use_inline`
+; CHECK: S_FRAMEPROC [size = 32]
 ; CHECK:   local fp reg = NONE, param fp reg = NONE
 ; CHECK:   flags = opt speed
 ; CHECK-LABEL: S_LPROC32_ID [size = 56] `is_marked_inline`
-; CHECK: S_FRAMEPROC [size = 30]
+; CHECK: S_FRAMEPROC [size = 32]
 ; CHECK:   local fp reg = NONE, param fp reg = NONE
 ; CHECK:   flags = marked inline | opt speed
-; CHECK-LABEL: S_GPROC32_ID [size = 43] `seh`
-; CHECK: S_FRAMEPROC [size = 30]
+; CHECK-LABEL: S_GPROC32_ID [size = 44] `seh`
+; CHECK: S_FRAMEPROC [size = 32]
 ; CHECK:   local fp reg = EBP, param fp reg = EBP
 ; CHECK:   flags = has seh | opt speed
-; CHECK-LABEL: S_LPROC32_ID [size = 55] `?filt$0@0@seh@@`
-; CHECK: S_FRAMEPROC [size = 30]
+; CHECK-LABEL: S_LPROC32_ID [size = 56] `?filt$0@0@seh@@`
+; CHECK: S_FRAMEPROC [size = 32]
 ; CHECK:   local fp reg = EBP, param fp reg = EBP
 ; CHECK:   flags = opt speed
-; CHECK-LABEL: S_GPROC32_ID [size = 49] `use_naked`
-; CHECK: S_FRAMEPROC [size = 30]
+; CHECK-LABEL: S_GPROC32_ID [size = 52] `use_naked`
+; CHECK: S_FRAMEPROC [size = 32]
 ; CHECK:   local fp reg = NONE, param fp reg = NONE
 ; CHECK:   flags = has inline asm | naked | opt speed
-; CHECK-LABEL: S_GPROC32_ID [size = 51] `stack_guard`
-; CHECK: S_FRAMEPROC [size = 30]
+; CHECK-LABEL: S_GPROC32_ID [size = 52] `stack_guard`
+; CHECK: S_FRAMEPROC [size = 32]
 ; CHECK:   local fp reg = VFRAME, param fp reg = EBP
 ; CHECK:   flags = secure checks | opt speed
 
@@ -216,7 +216,7 @@
 define internal i32 @"?filt$0@0@seh@@"() #8 !dbg !74 {
 entry:
   %0 = tail call i8* @llvm.frameaddress(i32 1)
-  %1 = tail call i8* @llvm.x86.seh.recoverfp(i8* bitcast (void ()* @seh to i8*), i8* %0)
+  %1 = tail call i8* @llvm.eh.recoverfp(i8* bitcast (void ()* @seh to i8*), i8* %0)
   %2 = tail call i8* @llvm.localrecover(i8* bitcast (void ()* @seh to i8*), i8* %1, i32 0)
   %__exception_code = bitcast i8* %2 to i32*
   %3 = getelementptr inbounds i8, i8* %0, i32 -20, !dbg !76
@@ -233,7 +233,7 @@
 declare i8* @llvm.frameaddress(i32) #9
 
 ; Function Attrs: nounwind readnone
-declare i8* @llvm.x86.seh.recoverfp(i8*, i8*) #9
+declare i8* @llvm.eh.recoverfp(i8*, i8*) #9
 
 ; Function Attrs: nounwind readnone
 declare i8* @llvm.localrecover(i8*, i8*, i32) #9
diff --git a/test/DebugInfo/COFF/function-options.ll b/test/DebugInfo/COFF/function-options.ll
index 57d6447..98378f2 100644
--- a/test/DebugInfo/COFF/function-options.ll
+++ b/test/DebugInfo/COFF/function-options.ll
@@ -473,14 +473,14 @@
 !llvm.module.flags = !{!3, !4, !5, !6}
 !llvm.ident = !{!7}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 8.0.0 (https://github.com/llvm-mirror/clang.git 9884fc1d0881576784e9b50da9eb61a5eb427f1c) (https://github.com/llvm-mirror/llvm.git 33b1a96b81ba4e33cfc4a129ce43b5331e16936b)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 8.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
 !1 = !DIFile(filename: "function-options.cpp", directory: "\5Ctest\5CDebugInfo\5CCOFF", checksumkind: CSK_MD5, checksum: "e73e74ea0bd81174051f0a4746343e00")
 !2 = !{}
 !3 = !{i32 2, !"CodeView", i32 1}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
 !5 = !{i32 1, !"wchar_size", i32 2}
 !6 = !{i32 7, !"PIC Level", i32 2}
-!7 = !{!"clang version 8.0.0 (https://github.com/llvm-mirror/clang.git 9884fc1d0881576784e9b50da9eb61a5eb427f1c) (https://github.com/llvm-mirror/llvm.git 33b1a96b81ba4e33cfc4a129ce43b5331e16936b)"}
+!7 = !{!"clang version 8.0.0"}
 !8 = distinct !DISubprogram(name: "Func_AClass", linkageName: "?Func_AClass@@YA?AVAClass@@AEAV1@@Z", scope: !9, file: !9, line: 6, type: !10, isLocal: false, isDefinition: true, scopeLine: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !0, retainedNodes: !2)
 !9 = !DIFile(filename: "function-options.cpp", directory: "D:\5Cupstream\5Cllvm\5Ctest\5CDebugInfo\5CCOFF")
 !10 = !DISubroutineType(types: !11)
diff --git a/test/DebugInfo/COFF/global_visibility.ll b/test/DebugInfo/COFF/global_visibility.ll
new file mode 100644
index 0000000..a93707f
--- /dev/null
+++ b/test/DebugInfo/COFF/global_visibility.ll
@@ -0,0 +1,276 @@
+; RUN: llc < %s -filetype=obj | llvm-readobj - -codeview | FileCheck %s
+;
+; This test verifies global variables are emitted within the correct scope.
+;
+; -- global_visibility.cpp ----------------------------------------------------
+;  1	
+;  2	int global_int = 0;
+;  3	
+;  4	template <typename T> struct A {
+;  5	  static T comdat_int;
+;  6	  static T set(T value) { T r = comdat_int; comdat_int = value; return r; };
+;  7	};
+;  8	
+;  9	template <typename T> T A<T>::comdat_int = 42;
+; 10	
+; 11	void foo() {
+; 12	  static int local_int = 1;
+; 13	  {
+; 14	    static int nested_int = 2;
+; 15	    local_int = nested_int;
+; 16	  }
+; 17	  local_int = A<int>::set(42);
+; 18	}
+; 19	
+; 20	void bar() {
+; 21	  static int local_int = 3;
+; 22	  {
+; 23	    static int nested_int = 4;
+; 24	    local_int = nested_int;
+; 25	  }
+; 26	  local_int = A<unsigned>::set(42);
+; 27	}
+; 28	
+; -----------------------------------------------------------------------------
+;
+; $ clang -S -emit-llvm -g -gcodeview global_visibility.cpp
+;
+; NOTE: The scope for both DIGlobalVariable's named "nested_int" should refer
+;       to the appropriate DILexicalBlock, not a DISubprogram.
+;
+
+; CHECK: CodeViewDebugInfo [
+; CHECK:   Section: .debug$S (9)
+
+; CHECK:   Subsection [
+; CHECK:     SubSectionType: Symbols (0xF1)
+; CHECK:     GlobalProcIdSym {
+; CHECK:       Kind: S_GPROC32_ID (0x1147)
+; CHECK:       DisplayName: foo
+; CHECK:       LinkageName: ?foo@@YAXXZ
+; CHECK:     }
+; CHECK:     DataSym {
+; CHECK:       Kind: S_LDATA32 (0x110C)
+; CHECK:       DisplayName: local_int
+; CHECK:       LinkageName: ?local_int@?1??foo@@YAXXZ@4HA
+; CHECK:     }
+; CHECK:     BlockSym {
+; CHECK:       Kind: S_BLOCK32 (0x1103)
+; CHECK:     }
+; CHECK:     DataSym {
+; CHECK:       Kind: S_LDATA32 (0x110C)
+; CHECK:       DisplayName: nested_int
+; CHECK:       LinkageName: ?nested_int@?1??foo@@YAXXZ@4HA
+; CHECK:     }
+; CHECK:     ScopeEndSym {
+; CHECK:       Kind: S_END (0x6)
+; CHECK:     }
+; CHECK:     ProcEnd {
+; CHECK:       Kind: S_PROC_ID_END (0x114F)
+; CHECK:     }
+; CHECK:   ]
+; CHECK:   Subsection [
+; CHECK:     SubSectionType: Symbols (0xF1)
+; CHECK:     GlobalProcIdSym {
+; CHECK:       Kind: S_GPROC32_ID (0x1147)
+; CHECK:       DisplayName: bar
+; CHECK:       LinkageName: ?bar@@YAXXZ
+; CHECK:     }
+; CHECK:     DataSym {
+; CHECK:       Kind: S_LDATA32 (0x110C)
+; CHECK:       DisplayName: local_int
+; CHECK:       LinkageName: ?local_int@?1??bar@@YAXXZ@4HA
+; CHECK:     }
+; CHECK:     BlockSym {
+; CHECK:       Kind: S_BLOCK32 (0x1103)
+; CHECK:     }
+; CHECK:     DataSym {
+; CHECK:       Kind: S_LDATA32 (0x110C)
+; CHECK:       DisplayName: nested_int
+; CHECK:       LinkageName: ?nested_int@?1??bar@@YAXXZ@4HA
+; CHECK:     }
+; CHECK:     ScopeEndSym {
+; CHECK:       Kind: S_END (0x6)
+; CHECK:     }
+; CHECK:     ProcEnd {
+; CHECK:       Kind: S_PROC_ID_END (0x114F)
+; CHECK:     }
+; CHECK:   ]
+; CHECK:   Subsection [
+; CHECK:     SubSectionType: Symbols (0xF1)
+; CHECK:     GlobalData {
+; CHECK:       Kind: S_GDATA32 (0x110D)
+; CHECK:       DisplayName: global_int
+; CHECK:       LinkageName: ?global_int@@3HA
+; CHECK:     }
+; CHECK:   ]
+; CHECK: ]
+; CHECK: CodeViewDebugInfo [
+; CHECK:   Section: .debug$S (16)
+; CHECK:   Subsection [
+; CHECK:     SubSectionType: Symbols (0xF1)
+; CHECK:     GlobalData {
+; CHECK:       Kind: S_GDATA32 (0x110D)
+; CHECK:       DisplayName: comdat_int
+; CHECK:       LinkageName: ?comdat_int@?$A@H@@2HA
+; CHECK:     }
+; CHECK:   ]
+; CHECK: ]
+; CHECK: CodeViewDebugInfo [
+; CHECK:   Section: .debug$S (17)
+; CHECK:   Subsection [
+; CHECK:     SubSectionType: Symbols (0xF1)
+; CHECK:     GlobalData {
+; CHECK:       Kind: S_GDATA32 (0x110D)
+; CHECK:       DisplayName: comdat_int
+; CHECK:       LinkageName: ?comdat_int@?$A@I@@2IA
+; CHECK:     }
+; CHECK:   ]
+; CHECK: ]
+;
+
+; ModuleID = 'global_visibility.cpp'
+source_filename = "global_visibility.cpp"
+target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc19.15.26730"
+
+$"?set@?$A@H@@SAHH@Z" = comdat any
+
+$"?set@?$A@I@@SAII@Z" = comdat any
+
+$"?comdat_int@?$A@H@@2HA" = comdat any
+
+$"?comdat_int@?$A@I@@2IA" = comdat any
+
+@"?global_int@@3HA" = dso_local global i32 0, align 4, !dbg !0
+@"?local_int@?1??foo@@YAXXZ@4HA" = internal global i32 1, align 4, !dbg !6
+@"?nested_int@?1??foo@@YAXXZ@4HA" = internal global i32 2, align 4, !dbg !12
+@"?local_int@?1??bar@@YAXXZ@4HA" = internal global i32 3, align 4, !dbg !14
+@"?nested_int@?1??bar@@YAXXZ@4HA" = internal global i32 4, align 4, !dbg !17
+@"?comdat_int@?$A@H@@2HA" = linkonce_odr dso_local global i32 42, comdat, align 4, !dbg !19
+@"?comdat_int@?$A@I@@2IA" = linkonce_odr dso_local global i32 42, comdat, align 4, !dbg !29
+
+; Function Attrs: noinline optnone uwtable
+define dso_local void @"?foo@@YAXXZ"() #0 !dbg !8 {
+entry:
+  %0 = load i32, i32* @"?nested_int@?1??foo@@YAXXZ@4HA", align 4, !dbg !45
+  store i32 %0, i32* @"?local_int@?1??foo@@YAXXZ@4HA", align 4, !dbg !45
+  %call = call i32 @"?set@?$A@H@@SAHH@Z"(i32 42), !dbg !47
+  store i32 %call, i32* @"?local_int@?1??foo@@YAXXZ@4HA", align 4, !dbg !47
+  ret void, !dbg !48
+}
+
+; Function Attrs: noinline nounwind optnone uwtable
+define linkonce_odr dso_local i32 @"?set@?$A@H@@SAHH@Z"(i32 %value) #1 comdat align 2 !dbg !49 {
+entry:
+  %value.addr = alloca i32, align 4
+  %r = alloca i32, align 4
+  store i32 %value, i32* %value.addr, align 4
+  call void @llvm.dbg.declare(metadata i32* %value.addr, metadata !50, metadata !DIExpression()), !dbg !51
+  call void @llvm.dbg.declare(metadata i32* %r, metadata !52, metadata !DIExpression()), !dbg !51
+  %0 = load i32, i32* @"?comdat_int@?$A@H@@2HA", align 4, !dbg !51
+  store i32 %0, i32* %r, align 4, !dbg !51
+  %1 = load i32, i32* %value.addr, align 4, !dbg !51
+  store i32 %1, i32* @"?comdat_int@?$A@H@@2HA", align 4, !dbg !51
+  %2 = load i32, i32* %r, align 4, !dbg !51
+  ret i32 %2, !dbg !51
+}
+
+; Function Attrs: noinline optnone uwtable
+define dso_local void @"?bar@@YAXXZ"() #0 !dbg !16 {
+entry:
+  %0 = load i32, i32* @"?nested_int@?1??bar@@YAXXZ@4HA", align 4, !dbg !53
+  store i32 %0, i32* @"?local_int@?1??bar@@YAXXZ@4HA", align 4, !dbg !53
+  %call = call i32 @"?set@?$A@I@@SAII@Z"(i32 42), !dbg !55
+  store i32 %call, i32* @"?local_int@?1??bar@@YAXXZ@4HA", align 4, !dbg !55
+  ret void, !dbg !56
+}
+
+; Function Attrs: noinline nounwind optnone uwtable
+define linkonce_odr dso_local i32 @"?set@?$A@I@@SAII@Z"(i32 %value) #1 comdat align 2 !dbg !57 {
+entry:
+  %value.addr = alloca i32, align 4
+  %r = alloca i32, align 4
+  store i32 %value, i32* %value.addr, align 4
+  call void @llvm.dbg.declare(metadata i32* %value.addr, metadata !58, metadata !DIExpression()), !dbg !59
+  call void @llvm.dbg.declare(metadata i32* %r, metadata !60, metadata !DIExpression()), !dbg !59
+  %0 = load i32, i32* @"?comdat_int@?$A@I@@2IA", align 4, !dbg !59
+  store i32 %0, i32* %r, align 4, !dbg !59
+  %1 = load i32, i32* %value.addr, align 4, !dbg !59
+  store i32 %1, i32* @"?comdat_int@?$A@I@@2IA", align 4, !dbg !59
+  %2 = load i32, i32* %r, align 4, !dbg !59
+  ret i32 %2, !dbg !59
+}
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
+
+attributes #0 = { noinline optnone uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { noinline nounwind optnone uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone speculatable }
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!40, !41, !42, !43}
+!llvm.ident = !{!44}
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "global_int", linkageName: "?global_int@@3HA", scope: !2, file: !3, line: 2, type: !11, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 8.0.0 (trunk)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "global_visibility.cpp", directory: "C:\5Cpath\5Cto\5Cdirectory", checksumkind: CSK_MD5, checksum: "f59b9e5de12391471b1a61888cb68a3e")
+!4 = !{}
+!5 = !{!0, !6, !12, !14, !17, !19, !29}
+!6 = !DIGlobalVariableExpression(var: !7, expr: !DIExpression())
+!7 = distinct !DIGlobalVariable(name: "local_int", scope: !8, file: !3, line: 12, type: !11, isLocal: true, isDefinition: true)
+!8 = distinct !DISubprogram(name: "foo", linkageName: "?foo@@YAXXZ", scope: !3, file: !3, line: 11, type: !9, scopeLine: 11, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !2, retainedNodes: !4)
+!9 = !DISubroutineType(types: !10)
+!10 = !{null}
+!11 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!12 = !DIGlobalVariableExpression(var: !13, expr: !DIExpression())
+!13 = distinct !DIGlobalVariable(name: "nested_int", scope: !46, file: !3, line: 14, type: !11, isLocal: true, isDefinition: true)
+!14 = !DIGlobalVariableExpression(var: !15, expr: !DIExpression())
+!15 = distinct !DIGlobalVariable(name: "local_int", scope: !16, file: !3, line: 21, type: !11, isLocal: true, isDefinition: true)
+!16 = distinct !DISubprogram(name: "bar", linkageName: "?bar@@YAXXZ", scope: !3, file: !3, line: 20, type: !9, scopeLine: 20, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !2, retainedNodes: !4)
+!17 = !DIGlobalVariableExpression(var: !18, expr: !DIExpression())
+!18 = distinct !DIGlobalVariable(name: "nested_int", scope: !54, file: !3, line: 23, type: !11, isLocal: true, isDefinition: true)
+!19 = !DIGlobalVariableExpression(var: !20, expr: !DIExpression())
+!20 = distinct !DIGlobalVariable(name: "comdat_int", linkageName: "?comdat_int@?$A@H@@2HA", scope: !2, file: !3, line: 9, type: !11, isLocal: false, isDefinition: true, declaration: !21)
+!21 = !DIDerivedType(tag: DW_TAG_member, name: "comdat_int", scope: !22, file: !3, line: 5, baseType: !11, flags: DIFlagStaticMember)
+!22 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "A<int>", file: !3, line: 4, size: 8, flags: DIFlagTypePassByValue | DIFlagTrivial, elements: !23, templateParams: !27, identifier: ".?AU?$A@H@@")
+!23 = !{!21, !24}
+!24 = !DISubprogram(name: "set", linkageName: "?set@?$A@H@@SAHH@Z", scope: !22, file: !3, line: 6, type: !25, scopeLine: 6, flags: DIFlagPrototyped | DIFlagStaticMember, spFlags: 0)
+!25 = !DISubroutineType(types: !26)
+!26 = !{!11, !11}
+!27 = !{!28}
+!28 = !DITemplateTypeParameter(name: "T", type: !11)
+!29 = !DIGlobalVariableExpression(var: !30, expr: !DIExpression())
+!30 = distinct !DIGlobalVariable(name: "comdat_int", linkageName: "?comdat_int@?$A@I@@2IA", scope: !2, file: !3, line: 9, type: !31, isLocal: false, isDefinition: true, declaration: !32)
+!31 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned)
+!32 = !DIDerivedType(tag: DW_TAG_member, name: "comdat_int", scope: !33, file: !3, line: 5, baseType: !31, flags: DIFlagStaticMember)
+!33 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "A<unsigned int>", file: !3, line: 4, size: 8, flags: DIFlagTypePassByValue | DIFlagTrivial, elements: !34, templateParams: !38, identifier: ".?AU?$A@I@@")
+!34 = !{!32, !35}
+!35 = !DISubprogram(name: "set", linkageName: "?set@?$A@I@@SAII@Z", scope: !33, file: !3, line: 6, type: !36, scopeLine: 6, flags: DIFlagPrototyped | DIFlagStaticMember, spFlags: 0)
+!36 = !DISubroutineType(types: !37)
+!37 = !{!31, !31}
+!38 = !{!39}
+!39 = !DITemplateTypeParameter(name: "T", type: !31)
+!40 = !{i32 2, !"CodeView", i32 1}
+!41 = !{i32 2, !"Debug Info Version", i32 3}
+!42 = !{i32 1, !"wchar_size", i32 2}
+!43 = !{i32 7, !"PIC Level", i32 2}
+!44 = !{!"clang version 8.0.0 (trunk)"}
+!45 = !DILocation(line: 15, scope: !46)
+!46 = distinct !DILexicalBlock(scope: !8, file: !3, line: 13)
+!47 = !DILocation(line: 17, scope: !8)
+!48 = !DILocation(line: 18, scope: !8)
+!49 = distinct !DISubprogram(name: "set", linkageName: "?set@?$A@H@@SAHH@Z", scope: !22, file: !3, line: 6, type: !25, scopeLine: 6, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !2, declaration: !24, retainedNodes: !4)
+!50 = !DILocalVariable(name: "value", arg: 1, scope: !49, file: !3, line: 6, type: !11)
+!51 = !DILocation(line: 6, scope: !49)
+!52 = !DILocalVariable(name: "r", scope: !49, file: !3, line: 6, type: !11)
+!53 = !DILocation(line: 24, scope: !54)
+!54 = distinct !DILexicalBlock(scope: !16, file: !3, line: 22)
+!55 = !DILocation(line: 26, scope: !16)
+!56 = !DILocation(line: 27, scope: !16)
+!57 = distinct !DISubprogram(name: "set", linkageName: "?set@?$A@I@@SAII@Z", scope: !33, file: !3, line: 6, type: !36, scopeLine: 6, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !2, declaration: !35, retainedNodes: !4)
+!58 = !DILocalVariable(name: "value", arg: 1, scope: !57, file: !3, line: 6, type: !31)
+!59 = !DILocation(line: 6, scope: !57)
+!60 = !DILocalVariable(name: "r", scope: !57, file: !3, line: 6, type: !31)
diff --git a/test/DebugInfo/COFF/multifunction.ll b/test/DebugInfo/COFF/multifunction.ll
index c74b332..1c19b34 100644
--- a/test/DebugInfo/COFF/multifunction.ll
+++ b/test/DebugInfo/COFF/multifunction.ll
@@ -80,6 +80,7 @@
 ; X86-NEXT: .secidx _x
 ; X86-NEXT: .byte   0
 ; X86-NEXT: .asciz "x"
+; X86-NEXT: .p2align 2
 ; X86-NEXT: [[PROC_SEGMENT_END]]:
 ; X86-NEXT: .short  [[FPROC_END:[^ ]*]]-[[FPROC_BEG:[^ ]*]]           # Record length
 ; X86-NEXT: [[FPROC_BEG]]:
@@ -91,6 +92,7 @@
 ; X86-NEXT: .long   0                       # Exception handler offset
 ; X86-NEXT: .short  0                       # Exception handler section
 ; X86-NEXT: .long   0                       # Flags (defines frame register)
+; X86-NEXT: .p2align 2
 ; X86-NEXT: [[FPROC_END]]:
 ; X86-NEXT: .short  2
 ; X86-NEXT: .short  4431
@@ -117,6 +119,7 @@
 ; X86-NEXT: .secidx _y
 ; X86-NEXT: .byte   0
 ; X86-NEXT: .asciz "y"
+; X86-NEXT: .p2align 2
 ; X86-NEXT: [[PROC_SEGMENT_END]]:
 ; X86-NEXT: .short  [[FPROC_END:[^ ]*]]-[[FPROC_BEG:[^ ]*]]           # Record length
 ; X86-NEXT: [[FPROC_BEG]]:
@@ -128,6 +131,7 @@
 ; X86-NEXT: .long   0                       # Exception handler offset
 ; X86-NEXT: .short  0                       # Exception handler section
 ; X86-NEXT: .long   0                       # Flags (defines frame register)
+; X86-NEXT: .p2align 2
 ; X86-NEXT: [[FPROC_END]]:
 ; X86-NEXT: .short  2
 ; X86-NEXT: .short  4431
@@ -154,6 +158,7 @@
 ; X86-NEXT: .secidx _f
 ; X86-NEXT: .byte   0
 ; X86-NEXT: .asciz "f"
+; X86-NEXT: .p2align 2
 ; X86-NEXT: [[PROC_SEGMENT_END]]:
 ; X86-NEXT: .short  [[FPROC_END:[^ ]*]]-[[FPROC_BEG:[^ ]*]]           # Record length
 ; X86-NEXT: [[FPROC_BEG]]:
@@ -165,6 +170,7 @@
 ; X86-NEXT: .long   0                       # Exception handler offset
 ; X86-NEXT: .short  0                       # Exception handler section
 ; X86-NEXT: .long   0                       # Flags (defines frame register)
+; X86-NEXT: .p2align 2
 ; X86-NEXT: [[FPROC_END]]:
 ; X86-NEXT: .short  2
 ; X86-NEXT: .short  4431
@@ -386,6 +392,7 @@
 ; X64-NEXT: .secidx x
 ; X64-NEXT: .byte   0
 ; X64-NEXT: .asciz "x"
+; X64-NEXT: .p2align 2
 ; X64-NEXT: [[PROC_SEGMENT_END]]:
 ; X64-NEXT: .short  [[FPROC_END:[^ ]*]]-[[FPROC_BEG:[^ ]*]]           # Record length
 ; X64-NEXT: [[FPROC_BEG]]:
@@ -397,6 +404,7 @@
 ; X64-NEXT: .long   0                       # Exception handler offset
 ; X64-NEXT: .short  0                       # Exception handler section
 ; X64-NEXT: .long   81920                       # Flags (defines frame register)
+; X64-NEXT: .p2align 2
 ; X64-NEXT: [[FPROC_END]]:
 ; X64-NEXT: .short  2
 ; X64-NEXT: .short  4431
@@ -422,6 +430,7 @@
 ; X64-NEXT: .secidx y
 ; X64-NEXT: .byte   0
 ; X64-NEXT: .asciz "y"
+; X64-NEXT: .p2align 2
 ; X64-NEXT: [[PROC_SEGMENT_END]]:
 ; X64-NEXT: .short  [[FPROC_END:[^ ]*]]-[[FPROC_BEG:[^ ]*]]           # Record length
 ; X64-NEXT: [[FPROC_BEG]]:
@@ -433,6 +442,7 @@
 ; X64-NEXT: .long   0                       # Exception handler offset
 ; X64-NEXT: .short  0                       # Exception handler section
 ; X64-NEXT: .long   81920                       # Flags (defines frame register)
+; X64-NEXT: .p2align 2
 ; X64-NEXT: [[FPROC_END]]:
 ; X64-NEXT: .short  2
 ; X64-NEXT: .short  4431
@@ -458,6 +468,7 @@
 ; X64-NEXT: .secidx f
 ; X64-NEXT: .byte   0
 ; X64-NEXT: .asciz "f"
+; X64-NEXT: .p2align 2
 ; X64-NEXT: [[PROC_SEGMENT_END]]:
 ; X64-NEXT: .short  [[FPROC_END:[^ ]*]]-[[FPROC_BEG:[^ ]*]]           # Record length
 ; X64-NEXT: [[FPROC_BEG]]:
@@ -469,6 +480,7 @@
 ; X64-NEXT: .long   0                       # Exception handler offset
 ; X64-NEXT: .short  0                       # Exception handler section
 ; X64-NEXT: .long   81920                       # Flags (defines frame register)
+; X64-NEXT: .p2align 2
 ; X64-NEXT: [[FPROC_END]]:
 ; X64-NEXT: .short  2
 ; X64-NEXT: .short  4431
@@ -488,16 +500,16 @@
 ; OBJ64:      Relocations [
 ; OBJ64-NEXT:   0x64 IMAGE_REL_AMD64_SECREL x
 ; OBJ64-NEXT:   0x68 IMAGE_REL_AMD64_SECTION x
-; OBJ64-NEXT:   0x98 IMAGE_REL_AMD64_SECREL x
-; OBJ64-NEXT:   0x9C IMAGE_REL_AMD64_SECTION x
-; OBJ64-NEXT:   0xFC IMAGE_REL_AMD64_SECREL y
-; OBJ64-NEXT:   0x100 IMAGE_REL_AMD64_SECTION y
-; OBJ64-NEXT:   0x130 IMAGE_REL_AMD64_SECREL y
-; OBJ64-NEXT:   0x134 IMAGE_REL_AMD64_SECTION y
-; OBJ64-NEXT:   0x194 IMAGE_REL_AMD64_SECREL f
-; OBJ64-NEXT:   0x198 IMAGE_REL_AMD64_SECTION f
-; OBJ64-NEXT:   0x1C8 IMAGE_REL_AMD64_SECREL f
-; OBJ64-NEXT:   0x1CC IMAGE_REL_AMD64_SECTION f
+; OBJ64-NEXT:   0x9C IMAGE_REL_AMD64_SECREL x
+; OBJ64-NEXT:   0xA0 IMAGE_REL_AMD64_SECTION x
+; OBJ64-NEXT:   0x100 IMAGE_REL_AMD64_SECREL y
+; OBJ64-NEXT:   0x104 IMAGE_REL_AMD64_SECTION y
+; OBJ64-NEXT:   0x138 IMAGE_REL_AMD64_SECREL y
+; OBJ64-NEXT:   0x13C IMAGE_REL_AMD64_SECTION y
+; OBJ64-NEXT:   0x19C IMAGE_REL_AMD64_SECREL f
+; OBJ64-NEXT:   0x1A0 IMAGE_REL_AMD64_SECTION f
+; OBJ64-NEXT:   0x1D4 IMAGE_REL_AMD64_SECREL f
+; OBJ64-NEXT:   0x1D8 IMAGE_REL_AMD64_SECTION f
 ; OBJ64-NEXT: ]
 ; OBJ64:      Subsection [
 ; OBJ64-NEXT:   SubSectionType: Symbols (0xF1)
diff --git a/test/DebugInfo/COFF/simple.ll b/test/DebugInfo/COFF/simple.ll
index 3c495a3..58d0f42 100644
--- a/test/DebugInfo/COFF/simple.ll
+++ b/test/DebugInfo/COFF/simple.ll
@@ -58,6 +58,7 @@
 ; X86-NEXT: .secidx _f
 ; X86-NEXT: .byte   0
 ; X86-NEXT: .asciz "f"
+; X86-NEXT: .p2align 2
 ; X86-NEXT: [[PROC_SEGMENT_END]]:
 ; X86-NEXT: .short  [[FPROC_END:[^ ]*]]-[[FPROC_BEG:[^ ]*]]           # Record length
 ; X86-NEXT: [[FPROC_BEG]]:
@@ -69,6 +70,7 @@
 ; X86-NEXT: .long   0                       # Exception handler offset
 ; X86-NEXT: .short  0                       # Exception handler section
 ; X86-NEXT: .long   0                       # Flags (defines frame register)
+; X86-NEXT: .p2align 2
 ; X86-NEXT: [[FPROC_END]]:
 ; X86-NEXT: .short  2
 ; X86-NEXT: .short  4431
@@ -89,8 +91,8 @@
 ; OBJ32-NEXT:   0x44 IMAGE_REL_I386_DIR32NB _f
 ; OBJ32-NEXT:   0x90 IMAGE_REL_I386_SECREL _f
 ; OBJ32-NEXT:   0x94 IMAGE_REL_I386_SECTION _f
-; OBJ32-NEXT:   0xC4 IMAGE_REL_I386_SECREL _f
-; OBJ32-NEXT:   0xC8 IMAGE_REL_I386_SECTION _f
+; OBJ32-NEXT:   0xC8 IMAGE_REL_I386_SECREL _f
+; OBJ32-NEXT:   0xCC IMAGE_REL_I386_SECTION _f
 ; OBJ32-NEXT: ]
 ; OBJ32:      Subsection [
 ; OBJ32-NEXT:   SubSectionType: Symbols (0xF1)
@@ -184,6 +186,7 @@
 ; X64-NEXT: .secidx f
 ; X64-NEXT: .byte   0
 ; X64-NEXT: .asciz "f"
+; X64-NEXT: .p2align 2
 ; X64-NEXT: [[PROC_SEGMENT_END]]:
 ; X64-NEXT: .short  [[FPROC_END:[^ ]*]]-[[FPROC_BEG:[^ ]*]]           # Record length
 ; X64-NEXT: [[FPROC_BEG]]:
@@ -195,6 +198,7 @@
 ; X64-NEXT: .long   0                       # Exception handler offset
 ; X64-NEXT: .short  0                       # Exception handler section
 ; X64-NEXT: .long   81920                       # Flags (defines frame register)
+; X64-NEXT: .p2align 2
 ; X64-NEXT: [[FPROC_END]]:
 ; X64-NEXT: .short  2
 ; X64-NEXT: .short  4431
@@ -214,8 +218,8 @@
 ; OBJ64:      Relocations [
 ; OBJ64-NEXT:   0x64 IMAGE_REL_AMD64_SECREL f
 ; OBJ64-NEXT:   0x68 IMAGE_REL_AMD64_SECTION f
-; OBJ64-NEXT:   0x98 IMAGE_REL_AMD64_SECREL f
-; OBJ64-NEXT:   0x9C IMAGE_REL_AMD64_SECTION f
+; OBJ64-NEXT:   0x9C IMAGE_REL_AMD64_SECREL f
+; OBJ64-NEXT:   0xA0 IMAGE_REL_AMD64_SECTION f
 ; OBJ64-NEXT: ]
 ; OBJ64:      Subsection [
 ; OBJ64-NEXT:   SubSectionType: Symbols (0xF1)
diff --git a/test/DebugInfo/COFF/thunk.ll b/test/DebugInfo/COFF/thunk.ll
index eac48ba..c8f5c5a 100644
--- a/test/DebugInfo/COFF/thunk.ll
+++ b/test/DebugInfo/COFF/thunk.ll
@@ -70,6 +70,7 @@
 ; ASM-NEXT:   .short  Lfunc_end{{.*}}-"[[NAME1]]" # Code size 
 ; ASM-NEXT:   .byte   0                       # Ordinal 
 ; ASM-NEXT:   .asciz  "[[NAME1]]"             # Function name 
+; ASM-NEXT:   .p2align 2
 ; ASM-NEXT: [[END1]]:
 ; ASM-NEXT:   .short  2                       # Record length 
 ; ASM-NEXT:   .short  4431                    # Record kind: S_PROC_ID_END 
@@ -88,6 +89,7 @@
 ; ASM-NEXT:   .short Lfunc_end{{.*}}-"[[NAME2]]" # Code size
 ; ASM-NEXT:   .byte 0                         # Ordinal
 ; ASM-NEXT:   .asciz "[[NAME2]]"              # Function name
+; ASM-NEXT:   .p2align 2
 ; ASM-NEXT: [[END2]]:
 ; ASM-NEXT:   .short 2                        # Record length
 ; ASM-NEXT:   .short 4431                     # Record kind: S_PROC_ID_END 
diff --git a/test/DebugInfo/COFF/type-quals.ll b/test/DebugInfo/COFF/type-quals.ll
index c81daa8..fdf92bd 100644
--- a/test/DebugInfo/COFF/type-quals.ll
+++ b/test/DebugInfo/COFF/type-quals.ll
@@ -209,28 +209,42 @@
 ; CHECK:     FunctionType: void (int& __restrict) (0x1011)
 ; CHECK:     Name: g
 ; CHECK:   }
-; CHECK:   ArgList (0x1013) {
+; CHECK:   Modifier (0x1013) {
+; CHECK:     TypeLeafKind: LF_MODIFIER (0x1001)
+; CHECK:     ModifiedType: char (0x70)
+; CHECK:     Modifiers [ (0x1)
+; CHECK:       Const (0x1)
+; CHECK:     ]
+; CHECK:   }
+; CHECK:   Array (0x1014) {
+; CHECK:     TypeLeafKind: LF_ARRAY (0x1503)
+; CHECK:     ElementType: const char (0x1013)
+; CHECK:     IndexType: unsigned __int64 (0x23)
+; CHECK:     SizeOf: 4
+; CHECK:     Name:
+; CHECK:   }
+; CHECK:   ArgList (0x1015) {
 ; CHECK:     TypeLeafKind: LF_ARGLIST (0x1201)
 ; CHECK:     NumArgs: 0
 ; CHECK:     Arguments [
 ; CHECK:     ]
 ; CHECK:   }
-; CHECK:   Procedure (0x1014) {
+; CHECK:   Procedure (0x1016) {
 ; CHECK:     TypeLeafKind: LF_PROCEDURE (0x1008)
 ; CHECK:     ReturnType: void (0x3)
 ; CHECK:     CallingConvention: NearC (0x0)
 ; CHECK:     FunctionOptions [ (0x0)
 ; CHECK:     ]
 ; CHECK:     NumParameters: 0
-; CHECK:     ArgListType: () (0x1013)
+; CHECK:     ArgListType: () (0x1015)
 ; CHECK:   }
-; CHECK:   FuncId (0x1015) {
+; CHECK:   FuncId (0x1017) {
 ; CHECK:     TypeLeafKind: LF_FUNC_ID (0x1601)
 ; CHECK:     ParentScope: 0x0
-; CHECK:     FunctionType: void () (0x1014)
+; CHECK:     FunctionType: void () (0x1016)
 ; CHECK:     Name: h
 ; CHECK:   }
-; CHECK:   Struct (0x1016) {
+; CHECK:   Struct (0x1018) {
 ; CHECK:     TypeLeafKind: LF_STRUCTURE (0x1505)
 ; CHECK:     MemberCount: 0
 ; CHECK:     Properties [ (0x180)
@@ -243,9 +257,9 @@
 ; CHECK:     SizeOf: 0
 ; CHECK:     Name: h::Foo
 ; CHECK:   }
-; CHECK:   Pointer (0x1017) {
+; CHECK:   Pointer (0x1019) {
 ; CHECK:     TypeLeafKind: LF_POINTER (0x1002)
-; CHECK:     PointeeType: h::Foo (0x1016)
+; CHECK:     PointeeType: h::Foo (0x1018)
 ; CHECK:     PtrType: Near64 (0xC)
 ; CHECK:     PtrMode: Pointer (0x0)
 ; CHECK:     IsFlat: 0
@@ -255,26 +269,26 @@
 ; CHECK:     IsRestrict: 0
 ; CHECK:     SizeOf: 8
 ; CHECK:   }
-; CHECK:   ArgList (0x1018) {
+; CHECK:   ArgList (0x101A) {
 ; CHECK:     TypeLeafKind: LF_ARGLIST (0x1201)
 ; CHECK:     NumArgs: 1
 ; CHECK:     Arguments [
 ; CHECK:       ArgType: int (0x74)
 ; CHECK:     ]
 ; CHECK:   }
-; CHECK:   MemberFunction (0x1019) {
+; CHECK:   MemberFunction (0x101B) {
 ; CHECK:     TypeLeafKind: LF_MFUNCTION (0x1009)
 ; CHECK:     ReturnType: int (0x74)
-; CHECK:     ClassType: h::Foo (0x1016)
-; CHECK:     ThisType: h::Foo* const (0x1017)
+; CHECK:     ClassType: h::Foo (0x1018)
+; CHECK:     ThisType: h::Foo* const (0x1019)
 ; CHECK:     CallingConvention: NearC (0x0)
 ; CHECK:     FunctionOptions [ (0x0)
 ; CHECK:     ]
 ; CHECK:     NumParameters: 1
-; CHECK:     ArgListType: (int) (0x1018)
+; CHECK:     ArgListType: (int) (0x101A)
 ; CHECK:     ThisAdjustment: 0
 ; CHECK:   }
-; CHECK:   FieldList (0x101A) {
+; CHECK:   FieldList (0x101C) {
 ; CHECK:     TypeLeafKind: LF_FIELDLIST (0x1203)
 ; CHECK:     DataMember {
 ; CHECK:       TypeLeafKind: LF_MEMBER (0x150D)
@@ -286,24 +300,24 @@
 ; CHECK:     OneMethod {
 ; CHECK:       TypeLeafKind: LF_ONEMETHOD (0x1511)
 ; CHECK:       AccessSpecifier: Public (0x3)
-; CHECK:       Type: int h::Foo::(int) (0x1019)
+; CHECK:       Type: int h::Foo::(int) (0x101B)
 ; CHECK:       Name: func
 ; CHECK:     }
 ; CHECK:   }
-; CHECK:   Struct (0x101B) {
+; CHECK:   Struct (0x101D) {
 ; CHECK:     TypeLeafKind: LF_STRUCTURE (0x1505)
 ; CHECK:     MemberCount: 2
 ; CHECK:     Properties [ (0x100)
 ; CHECK:       Scoped (0x100)
 ; CHECK:     ]
-; CHECK:     FieldList: <field list> (0x101A)
+; CHECK:     FieldList: <field list> (0x101C)
 ; CHECK:     DerivedFrom: 0x0
 ; CHECK:     VShape: 0x0
 ; CHECK:     SizeOf: 4
 ; CHECK:     Name: h::Foo
 ; CHECK:   }
 
-; CHECK:   Pointer (0x101D) {
+; CHECK:   Pointer (0x101F) {
 ; CHECK:     TypeLeafKind: LF_POINTER (0x1002)
 ; CHECK:     PointeeType: int (0x74)
 ; CHECK:     PtrType: Near64 (0xC)
@@ -314,12 +328,12 @@
 ; CHECK:     IsUnaligned: 0
 ; CHECK:     IsRestrict: 1
 ; CHECK:     SizeOf: 4
-; CHECK:     ClassType: h::Foo (0x1016)
+; CHECK:     ClassType: h::Foo (0x1018)
 ; CHECK:     Representation: SingleInheritanceData (0x1)
 ; CHECK:   }
-; CHECK:   Pointer (0x101E) {
+; CHECK:   Pointer (0x1020) {
 ; CHECK:     TypeLeafKind: LF_POINTER (0x1002)
-; CHECK:     PointeeType: int h::Foo::(int) (0x1019)
+; CHECK:     PointeeType: int h::Foo::(int) (0x101B)
 ; CHECK:     PtrType: Near64 (0xC)
 ; CHECK:     PtrMode: PointerToMemberFunction (0x3)
 ; CHECK:     IsFlat: 0
@@ -328,18 +342,18 @@
 ; CHECK:     IsUnaligned: 0
 ; CHECK:     IsRestrict: 0
 ; CHECK:     SizeOf: 8
-; CHECK:     ClassType: h::Foo (0x1016)
+; CHECK:     ClassType: h::Foo (0x1018)
 ; CHECK:     Representation: SingleInheritanceFunction (0x5)
 ; CHECK:   }
-; CHECK:   MemberFuncId (0x101F) {
+; CHECK:   MemberFuncId (0x1021) {
 ; CHECK:     TypeLeafKind: LF_MFUNC_ID (0x1602)
-; CHECK:     ClassType: h::Foo (0x1016)
-; CHECK:     FunctionType: int h::Foo::(int) (0x1019)
+; CHECK:     ClassType: h::Foo (0x1018)
+; CHECK:     FunctionType: int h::Foo::(int) (0x101B)
 ; CHECK:     Name: func
 ; CHECK:   }
-; CHECK:   Pointer (0x1020) {
+; CHECK:   Pointer (0x1022) {
 ; CHECK:     TypeLeafKind: LF_POINTER (0x1002)
-; CHECK:     PointeeType: h::Foo (0x1016)
+; CHECK:     PointeeType: h::Foo (0x1018)
 ; CHECK:     PtrType: Near64 (0xC)
 ; CHECK:     PtrMode: Pointer (0x0)
 ; CHECK:     IsFlat: 0
@@ -349,20 +363,6 @@
 ; CHECK:     IsRestrict: 0
 ; CHECK:     SizeOf: 8
 ; CHECK:   }
-; CHECK:   Modifier (0x1021) {
-; CHECK:     TypeLeafKind: LF_MODIFIER (0x1001)
-; CHECK:     ModifiedType: char (0x70)
-; CHECK:     Modifiers [ (0x1)
-; CHECK:       Const (0x1)
-; CHECK:     ]
-; CHECK:   }
-; CHECK:   Array (0x1022) {
-; CHECK:     TypeLeafKind: LF_ARRAY (0x1503)
-; CHECK:     ElementType: const char (0x1021)
-; CHECK:     IndexType: unsigned __int64 (0x23)
-; CHECK:     SizeOf: 4
-; CHECK:     Name:
-; CHECK:   }
 ; CHECK: ]
 
 ; CHECK-LABEL: CodeViewDebugInfo [
diff --git a/test/DebugInfo/COFF/types-cvarargs.ll b/test/DebugInfo/COFF/types-cvarargs.ll
index deb3e6e..5e366f0 100644
--- a/test/DebugInfo/COFF/types-cvarargs.ll
+++ b/test/DebugInfo/COFF/types-cvarargs.ll
@@ -25,7 +25,7 @@
 ; CHECK:  }
 ; CHECK:  Subsection [
 ; CHECK:    SubSectionType: Symbols (0xF1)
-; CHECK:    SubSectionSize: 0x2A
+; CHECK:    SubSectionSize:
 ; CHECK:    UDTSym {
 ; CHECK:      Kind: S_UDT (0x1108)
 ; CHECK:      Type: MemberTest::A (0x1008)
diff --git a/test/DebugInfo/COFF/types-this-not-ptr.ll b/test/DebugInfo/COFF/types-this-not-ptr.ll
new file mode 100644
index 0000000..ae47e6f
--- /dev/null
+++ b/test/DebugInfo/COFF/types-this-not-ptr.ll
@@ -0,0 +1,45 @@
+; RUN: llc -filetype=obj %s -o %t.obj
+; RUN: llvm-pdbutil dump -types %t.obj | FileCheck %s
+
+; Manually remove the "static" flag from the LLVM IR generated by compiling the
+; following C++ source:
+; struct Foo  {
+;   static void st_meth(int x, int y);
+; } f;
+
+; "this type" below should be <no type>.
+; CHECK: LF_MFUNCTION
+; CHECK-NEXT: return type = 0x0003 (void), # args = 2, param list = {{.*}}
+; CHECK-NEXT: class type = {{.*}}, this type = <no type>, this adjust = 0
+; CHECK-NEXT: calling conv = cdecl, options = None
+
+; ModuleID = 't.cpp'
+source_filename = "t.cpp"
+target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc19.14.26433"
+
+%struct.Foo = type { i8 }
+
+@"?f@@3UFoo@@A" = dso_local global %struct.Foo zeroinitializer, align 1, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!12, !13, !14, !15}
+!llvm.ident = !{!16}
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "f", linkageName: "?f@@3UFoo@@A", scope: !2, file: !3, line: 3, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 8.0.0 ", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: None)
+!3 = !DIFile(filename: "t.cpp", directory: "C:\5Csrc\5Cllvm-project\5Cbuild", checksumkind: CSK_MD5, checksum: "ac580c6cde5f3f394632dcaad04873a4")
+!4 = !{}
+!5 = !{!0}
+!6 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Foo", file: !3, line: 1, size: 8, flags: DIFlagTypePassByValue | DIFlagTrivial, elements: !7, identifier: ".?AUFoo@@")
+!7 = !{!8}
+!8 = !DISubprogram(name: "st_meth", linkageName: "?st_meth@Foo@@SAXHH@Z", scope: !6, file: !3, line: 2, type: !9, scopeLine: 2, flags: DIFlagPrototyped, spFlags: 0)
+!9 = !DISubroutineType(types: !10)
+!10 = !{null, !11, !11}
+!11 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!12 = !{i32 2, !"CodeView", i32 1}
+!13 = !{i32 2, !"Debug Info Version", i32 3}
+!14 = !{i32 1, !"wchar_size", i32 2}
+!15 = !{i32 7, !"PIC Level", i32 2}
+!16 = !{!"clang version 8.0.0 "}
diff --git a/test/DebugInfo/COFF/udts-complete.ll b/test/DebugInfo/COFF/udts-complete.ll
index 1683cea..ace86ee 100644
--- a/test/DebugInfo/COFF/udts-complete.ll
+++ b/test/DebugInfo/COFF/udts-complete.ll
@@ -31,7 +31,7 @@
 ; CHECK:                           Symbols
 ; CHECK: ============================================================
 ; CHECK:   Mod 0000 | `.debug$S`:
-; CHECK:        0 | S_GDATA32 [size = 17] `gv`
+; CHECK:        0 | S_GDATA32 [size = 20] `gv`
 ; CHECK:            type = 0x1002 (Foo), addr = 0000:0000
 ; CHECK:        0 | S_UDT [size = 12] `Bar`
 ; CHECK:            original type = 0x1002
diff --git a/test/DebugInfo/COFF/udts.ll b/test/DebugInfo/COFF/udts.ll
index 114e11a..d17f5cd 100644
--- a/test/DebugInfo/COFF/udts.ll
+++ b/test/DebugInfo/COFF/udts.ll
@@ -79,13 +79,13 @@
 ; PDBUTIL:                           Symbols
 ; PDBUTIL-NEXT: ============================================================
 ; PDBUTIL-NOT:   S_UDT {{.*}} `A::C`
-; PDBUTIL:       S_UDT [size = 15] `f::FOO`
-; PDBUTIL:       S_UDT [size = 15] `g::pun`
-; PDBUTIL:       S_UDT [size = 10] `S`
-; PDBUTIL:       S_UDT [size = 10] `A`
-; PDBUTIL:       S_UDT [size = 13] `A::D`
-; PDBUTIL:       S_UDT [size = 10] `U`
-; PDBUTIL:       S_UDT [size = 10] `U`
+; PDBUTIL:       S_UDT [size = 16] `f::FOO`
+; PDBUTIL:       S_UDT [size = 16] `g::pun`
+; PDBUTIL:       S_UDT [size = 12] `S`
+; PDBUTIL:       S_UDT [size = 12] `A`
+; PDBUTIL:       S_UDT [size = 16] `A::D`
+; PDBUTIL:       S_UDT [size = 12] `U`
+; PDBUTIL:       S_UDT [size = 12] `U`
 
 source_filename = "test/DebugInfo/COFF/udts.ll"
 target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
@@ -190,4 +190,4 @@
 !53 = !DILocation(line: 9, column: 5, scope: !38)
 !54 = !DILocation(line: 9, column: 7, scope: !38)
 !55 = !DILocation(line: 10, column: 12, scope: !38)
-!56 = !DILocation(line: 10, column: 3, scope: !38)
\ No newline at end of file
+!56 = !DILocation(line: 10, column: 3, scope: !38)
diff --git a/test/DebugInfo/COFF/vframe-fpo.ll b/test/DebugInfo/COFF/vframe-fpo.ll
index b3b7153..27273cd 100644
--- a/test/DebugInfo/COFF/vframe-fpo.ll
+++ b/test/DebugInfo/COFF/vframe-fpo.ll
@@ -64,14 +64,14 @@
 ; CODEVIEW-NEXT:   Magic: 0x4
 ; CODEVIEW-NEXT:   Subsection [
 ; CODEVIEW-NEXT:     SubSectionType: Symbols (0xF1)
-; CODEVIEW-NEXT:     SubSectionSize: 0x2F
+; CODEVIEW-NEXT:     SubSectionSize:
 ; CODEVIEW-NEXT:     Compile3Sym {
 ; CODEVIEW-NEXT:       Kind: S_COMPILE3 (0x113C)
 ; CODEVIEW:          }
 ; CODEVIEW:        ]
 ; CODEVIEW:        Subsection [
 ; CODEVIEW-NEXT:     SubSectionType: FrameData (0xF5)
-; CODEVIEW-NEXT:     SubSectionSize: 0xA4
+; CODEVIEW-NEXT:     SubSectionSize:
 ; CODEVIEW-NEXT:     LinkageName: _main
 ; CODEVIEW:          FrameData {
 ; CODEVIEW:          }
@@ -109,7 +109,7 @@
 
 ; CODEVIEW:      Subsection [
 ; CODEVIEW-NEXT:   SubSectionType: Symbols (0xF1)
-; CODEVIEW-NEXT:   SubSectionSize: 0xA2
+; CODEVIEW-NEXT:   SubSectionSize:
 ; CODEVIEW-NEXT:   GlobalProcIdSym {
 ; CODEVIEW-NEXT:     Kind: S_GPROC32_ID (0x1147)
 ; CODEVIEW:          DisplayName: main
diff --git a/test/DebugInfo/Generic/2010-04-19-FramePtr.ll b/test/DebugInfo/Generic/2010-04-19-FramePtr.ll
index 11c3d68..77e4fa0 100644
--- a/test/DebugInfo/Generic/2010-04-19-FramePtr.ll
+++ b/test/DebugInfo/Generic/2010-04-19-FramePtr.ll
@@ -1,6 +1,6 @@
 ; RUN: %llc_dwarf -debugger-tune=lldb -asm-verbose -O1 -o - < %s | FileCheck %s
 ; RUN: %llc_dwarf -debugger-tune=gdb -asm-verbose -O1 -o - < %s | FileCheck %s --check-prefix=DISABLE
-; RUN: %llc_dwarf -disable-fp-elim -debugger-tune=lldb -asm-verbose -O1 -o - < %s | FileCheck %s --check-prefix=DISABLE
+; RUN: %llc_dwarf -frame-pointer=all -debugger-tune=lldb -asm-verbose -O1 -o - < %s | FileCheck %s --check-prefix=DISABLE
 
 ; CHECK: DW_AT_APPLE_omit_frame_ptr
 ; DISABLE-NOT: DW_AT_APPLE_omit_frame_ptr
diff --git a/test/DebugInfo/Generic/debug-info-enum.ll b/test/DebugInfo/Generic/debug-info-enum.ll
index 5d887cd..85e2293 100644
--- a/test/DebugInfo/Generic/debug-info-enum.ll
+++ b/test/DebugInfo/Generic/debug-info-enum.ll
@@ -28,7 +28,7 @@
 !4 = !{!5, !10, !14, !19, !23, !28, !32, !37, !41}
 
 ; Test enumeration with a fixed "signed char" underlying type.
-!5 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E0", file: !3, line: 2, baseType: !6, size: 8, flags: DIFlagFixedEnum, elements: !7, identifier: "_ZTS2E0")
+!5 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E0", file: !3, line: 2, baseType: !6, size: 8, flags: DIFlagEnumClass, elements: !7, identifier: "_ZTS2E0")
 !6 = !DIBasicType(name: "signed char", size: 8, encoding: DW_ATE_signed_char)
 !7 = !{!8, !9}
 !8 = !DIEnumerator(name: "A0", value: -128)
@@ -46,7 +46,7 @@
 ; CHECK-NEXT:      DW_AT_const_value     (127)
 
 ; Test enumeration with a fixed "unsigned char" underlying type.
-!10 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E1", file: !3, line: 12, baseType: !11, size: 8, flags: DIFlagFixedEnum, elements: !12, identifier: "_ZTS2E1")
+!10 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E1", file: !3, line: 12, baseType: !11, size: 8, flags: DIFlagEnumClass, elements: !12, identifier: "_ZTS2E1")
 !11 = !DIBasicType(name: "unsigned char", size: 8, encoding: DW_ATE_unsigned_char)
 !12 = !{!13}
 !13 = !DIEnumerator(name: "A1", value: 255, isUnsigned: true)
@@ -60,7 +60,7 @@
 ; CHECK-NEXT:      DW_AT_const_value     (255)
 
 ; Test enumeration with a fixed "short" underlying type.
-!14 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E2", file: !3, line: 18, baseType: !15, size: 16, flags: DIFlagFixedEnum, elements: !16, identifier: "_ZTS2E2")
+!14 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E2", file: !3, line: 18, baseType: !15, size: 16, flags: DIFlagEnumClass, elements: !16, identifier: "_ZTS2E2")
 !15 = !DIBasicType(name: "short", size: 16, encoding: DW_ATE_signed)
 !16 = !{!17, !18}
 !17 = !DIEnumerator(name: "A2", value: -32768)
@@ -78,7 +78,7 @@
 ; CHECK-NEXT:      DW_AT_const_value     (32767)
 
 ; Test enumeration with a fixed "unsigned short" underlying type.
-!19 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E3", file: !3, line: 28, baseType: !20, size: 16, flags: DIFlagFixedEnum, elements: !21, identifier: "_ZTS2E3")
+!19 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E3", file: !3, line: 28, baseType: !20, size: 16, flags: DIFlagEnumClass, elements: !21, identifier: "_ZTS2E3")
 !20 = !DIBasicType(name: "unsigned short", size: 16, encoding: DW_ATE_unsigned)
 !21 = !{!22}
 !22 = !DIEnumerator(name: "A3", value: 65535, isUnsigned: true)
@@ -92,7 +92,7 @@
 ; CHECK-NEXT:      DW_AT_const_value     (65535)
 
 ; Test enumeration with a fixed "int" underlying type.
-!23 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E4", file: !3, line: 34, baseType: !24, size: 32, flags: DIFlagFixedEnum, elements: !25, identifier: "_ZTS2E4")
+!23 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E4", file: !3, line: 34, baseType: !24, size: 32, flags: DIFlagEnumClass, elements: !25, identifier: "_ZTS2E4")
 !24 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
 !25 = !{!26, !27}
 !26 = !DIEnumerator(name: "A4", value: -2147483648)
@@ -110,7 +110,7 @@
 ; CHECK-NEXT:      DW_AT_const_value     (2147483647)
 
 ; Test enumeration with a fixed "unsigend int" underlying type.
-!28 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E5", file: !3, line: 41, baseType: !29, size: 32, flags: DIFlagFixedEnum, elements: !30, identifier: "_ZTS2E5")
+!28 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E5", file: !3, line: 41, baseType: !29, size: 32, flags: DIFlagEnumClass, elements: !30, identifier: "_ZTS2E5")
 !29 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned)
 !30 = !{!31}
 !31 = !DIEnumerator(name: "A5", value: 4294967295, isUnsigned: true)
@@ -124,7 +124,7 @@
 ; CHECK-NEXT:      DW_AT_const_value     (4294967295)
 
 ; Test enumeration with a fixed "long long" underlying type.
-!32 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E6", file: !3, line: 47, baseType: !33, size: 64, flags: DIFlagFixedEnum, elements: !34, identifier: "_ZTS2E6")
+!32 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E6", file: !3, line: 47, baseType: !33, size: 64, flags: DIFlagEnumClass, elements: !34, identifier: "_ZTS2E6")
 !33 = !DIBasicType(name: "long long int", size: 64, encoding: DW_ATE_signed)
 !34 = !{!35, !36}
 !35 = !DIEnumerator(name: "A6", value: -9223372036854775808)
@@ -142,7 +142,7 @@
 ; CHECK-NEXT:      DW_AT_const_value     (9223372036854775807)
 
 ; Test enumeration with a fixed "unsigned long long" underlying type.
-!37 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E7", file: !3, line: 57, baseType: !38, size: 64, flags: DIFlagFixedEnum, elements: !39, identifier: "_ZTS2E7")
+!37 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E7", file: !3, line: 57, baseType: !38, size: 64, flags: DIFlagEnumClass, elements: !39, identifier: "_ZTS2E7")
 !38 = !DIBasicType(name: "long long unsigned int", size: 64, encoding: DW_ATE_unsigned)
 !39 = !{!40}
 !40 = !DIEnumerator(name: "A7", value: 18446744073709551615, isUnsigned: true)
@@ -168,11 +168,11 @@
 ; CHECK-NOT:       DW_AT_enum_class
 ; CHECK:           DW_AT_name      ("E8")
 
-; Test enumeration without a fixed underlying type, but with the DIFlagFixedEnum
+; Test enumeration without a fixed underlying type, but with the DIFlagEnumClass
 ; set. The DW_AT_enum_class attribute should be absent. This behaviour is
 ; intented to keep compatibilty with existing DWARF consumers, which may imply
 ; the type is present whenever DW_AT_enum_class is set.
-!63 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E9", file: !3, line: 63, size: 32, flags: DIFlagFixedEnum,  elements: !64, identifier: "_ZTS2E9")
+!63 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E9", file: !3, line: 63, size: 32, flags: DIFlagEnumClass,  elements: !64, identifier: "_ZTS2E9")
 !64 = !{!65, !66}
 !65 = !DIEnumerator(name: "A9", value: -128)
 !66 = !DIEnumerator(name: "B9", value: 127)
diff --git a/test/DebugInfo/Generic/debuginfofinder-forward-declaration.ll b/test/DebugInfo/Generic/debuginfofinder-forward-declaration.ll
index 771ac57..675230d 100644
--- a/test/DebugInfo/Generic/debuginfofinder-forward-declaration.ll
+++ b/test/DebugInfo/Generic/debuginfofinder-forward-declaration.ll
@@ -28,7 +28,7 @@
 
 !0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
 !1 = !DIGlobalVariable(name: "y", scope: !2, file: !3, line: 7, type: !6, isLocal: false, isDefinition: true)
-!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 3.7.0 (http://llvm.org/git/clang.git 247b30a043eb8f39ea3708e7e995089da0a6b00f) (http://llvm.org/git/llvm.git 6ecc7365a89c771fd229bdd9ffcc178684ea1aa5)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, retainedTypes: !4, globals: !5, imports: !4)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 3.7.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, retainedTypes: !4, globals: !5, imports: !4)
 !3 = !DIFile(filename: "minimal.c", directory: "/tmp")
 !4 = !{}
 !5 = !{!0}
@@ -39,5 +39,5 @@
 !10 = !DICompositeType(tag: DW_TAG_structure_type, name: "X", file: !3, line: 1, flags: DIFlagFwdDecl)
 !11 = !{i32 2, !"Dwarf Version", i32 4}
 !12 = !{i32 2, !"Debug Info Version", i32 3}
-!13 = !{!"clang version 3.7.0 (http://llvm.org/git/clang.git 247b30a043eb8f39ea3708e7e995089da0a6b00f) (http://llvm.org/git/llvm.git 6ecc7365a89c771fd229bdd9ffcc178684ea1aa5)"}
+!13 = !{!"clang version 3.7.0"}
 
diff --git a/test/DebugInfo/Generic/debuginfofinder-imported-global-variable.ll b/test/DebugInfo/Generic/debuginfofinder-imported-global-variable.ll
index 4ec06a2..270ab7a 100644
--- a/test/DebugInfo/Generic/debuginfofinder-imported-global-variable.ll
+++ b/test/DebugInfo/Generic/debuginfofinder-imported-global-variable.ll
@@ -29,7 +29,7 @@
 !2 = !DINamespace(name: "s", scope: null)
 !3 = !DIFile(filename: "source.cpp", directory: "/somewhere")
 !4 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
-!5 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 7.0.99 (https://git.llvm.org/git/clang.git/ ec45e6c6530b8f9a1046d0a7efae467b3151783b) (https://git.llvm.org/git/llvm.git/ 9d2fcc2bf4a301d05d9b440a805847645637ab28)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !6, globals: !7, imports: !8)
+!5 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 7.0.99", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !6, globals: !7, imports: !8)
 !6 = !{}
 !7 = !{!0}
 !8 = !{!9}
@@ -38,4 +38,4 @@
 !11 = !{i32 2, !"Debug Info Version", i32 3}
 !12 = !{i32 1, !"wchar_size", i32 4}
 !13 = !{i32 7, !"PIC Level", i32 2}
-!14 = !{!"clang version 7.0.99 (https://git.llvm.org/git/clang.git/ ec45e6c6530b8f9a1046d0a7efae467b3151783b) (https://git.llvm.org/git/llvm.git/ 9d2fcc2bf4a301d05d9b440a805847645637ab28)"}
+!14 = !{!"clang version 7.0.99"}
diff --git a/test/DebugInfo/Generic/dwarf-public-names.ll b/test/DebugInfo/Generic/dwarf-public-names.ll
index 947b1f5..5ba431a 100644
--- a/test/DebugInfo/Generic/dwarf-public-names.ll
+++ b/test/DebugInfo/Generic/dwarf-public-names.ll
@@ -117,7 +117,7 @@
 !17 = !DIGlobalVariableExpression(var: !18, expr: !DIExpression())
 !18 = !DIGlobalVariable(name: "global_namespace_variable", linkageName: "_ZN2ns25global_namespace_variableE", scope: !19, file: !3, line: 27, type: !6, isLocal: false, isDefinition: true)
 !19 = !DINamespace(name: "ns", scope: null)
-!20 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 3.3 (http://llvm.org/git/clang.git a09cd8103a6a719cb2628cdf0c91682250a17bd2) (http://llvm.org/git/llvm.git 47d03cec0afca0c01ae42b82916d1d731716cd20)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !11, retainedTypes: !11, globals: !21, imports: !11) ; previously: invalid DW_TAG_base_type
+!20 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 3.3", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !11, retainedTypes: !11, globals: !21, imports: !11) ; previously: invalid DW_TAG_base_type
 !21 = !{!0, !15, !17}
 !22 = !{i32 1, !"Debug Info Version", i32 3}
 !23 = distinct !DISubprogram(name: "member_function", linkageName: "_ZN1C15member_functionEv", scope: null, file: !3, line: 9, type: !8, isLocal: false, isDefinition: true, scopeLine: 9, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !20, declaration: !7, retainedNodes: !11)
diff --git a/test/DebugInfo/Generic/string-offsets-form.ll b/test/DebugInfo/Generic/string-offsets-form.ll
index 31e5de4..7aec82f 100644
--- a/test/DebugInfo/Generic/string-offsets-form.ll
+++ b/test/DebugInfo/Generic/string-offsets-form.ll
@@ -16,7 +16,7 @@
 ;
 ; CHECK:     .debug_info contents:
 ; CHECK-NOT: DW_FORM_strx2
-; CHECK:     DW_AT_name [DW_FORM_strx2] ( indexed (00000100) string =
+; CHECK:     DW_AT_name [DW_FORM_strx2] (indexed (00000100) string =
 
 ; ModuleID = 'enum.cpp'
 source_filename = "enum.cpp"
diff --git a/test/DebugInfo/Generic/varargs.ll b/test/DebugInfo/Generic/varargs.ll
index 96ad9c0..c8193c0 100644
--- a/test/DebugInfo/Generic/varargs.ll
+++ b/test/DebugInfo/Generic/varargs.ll
@@ -95,7 +95,7 @@
 !23 = !DILocalVariable(name: "a", line: 16, scope: !14, file: !15, type: !4)
 !24 = !DILocation(line: 16, scope: !14)
 ; Manually modifed to avoid dependence on pointer size
-!25 = !DILocalVariable(name: "fptr", line: 18, scope: !14, file: !15, type: !16)
+!25 = !DILocalVariable(name: "fptr", line: 18, scope: !14, file: !15, type: !26)
 !26 = !DIDerivedType(tag: DW_TAG_pointer_type, size: 64, align: 64, baseType: !16)
 !27 = !DILocation(line: 18, scope: !14)
 !28 = !DILocation(line: 22, scope: !14)
diff --git a/test/DebugInfo/Inputs/dwarfdump-test.elf-x86-64 b/test/DebugInfo/Inputs/dwarfdump-test.elf-x86-64
index 455dd1c..e3afa0f 100755
--- a/test/DebugInfo/Inputs/dwarfdump-test.elf-x86-64
+++ b/test/DebugInfo/Inputs/dwarfdump-test.elf-x86-64
Binary files differ
diff --git a/test/DebugInfo/Inputs/dwarfdump-test.elf-x86-64.debuglink b/test/DebugInfo/Inputs/dwarfdump-test.elf-x86-64.debuglink
index 8c08037..9b51321 100755
--- a/test/DebugInfo/Inputs/dwarfdump-test.elf-x86-64.debuglink
+++ b/test/DebugInfo/Inputs/dwarfdump-test.elf-x86-64.debuglink
Binary files differ
diff --git a/test/DebugInfo/Inputs/llvm-symbolizer-test.elf-x86-64 b/test/DebugInfo/Inputs/llvm-symbolizer-test.elf-x86-64
index 99a448a..c952cff 100755
--- a/test/DebugInfo/Inputs/llvm-symbolizer-test.elf-x86-64
+++ b/test/DebugInfo/Inputs/llvm-symbolizer-test.elf-x86-64
Binary files differ
diff --git a/test/DebugInfo/Inputs/loclists-dwp-b.ll b/test/DebugInfo/Inputs/loclists-dwp-b.ll
index 77081bd..0976bf9 100644
--- a/test/DebugInfo/Inputs/loclists-dwp-b.ll
+++ b/test/DebugInfo/Inputs/loclists-dwp-b.ll
@@ -13,13 +13,13 @@
 !llvm.module.flags = !{!3, !4, !5}
 !llvm.ident = !{!6}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 8.0.0 (https://git.llvm.org/git/clang.git/ 41055c6168135fe539801799e5c5636247cf0302) (https://git.llvm.org/git/llvm.git/ de0558be123ffbb5b5bd692c17dbd57a75fe684f)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 8.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
 !1 = !DIFile(filename: "b.cpp", directory: "/home/test/PRs/PR38990")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
 !5 = !{i32 1, !"wchar_size", i32 4}
-!6 = !{!"clang version 8.0.0 (https://git.llvm.org/git/clang.git/ 41055c6168135fe539801799e5c5636247cf0302) (https://git.llvm.org/git/llvm.git/ de0558be123ffbb5b5bd692c17dbd57a75fe684f)"}
+!6 = !{!"clang version 8.0.0"}
 !7 = distinct !DISubprogram(name: "b", linkageName: "_Z1bi", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !11)
 !8 = !DISubroutineType(types: !9)
 !9 = !{null, !10}
diff --git a/test/DebugInfo/Inputs/test-inline.o b/test/DebugInfo/Inputs/test-inline.o
index a650c91..93073a1 100644
--- a/test/DebugInfo/Inputs/test-inline.o
+++ b/test/DebugInfo/Inputs/test-inline.o
Binary files differ
diff --git a/test/DebugInfo/Inputs/test-parameters.o b/test/DebugInfo/Inputs/test-parameters.o
index 7f4b670..3b887c4 100644
--- a/test/DebugInfo/Inputs/test-parameters.o
+++ b/test/DebugInfo/Inputs/test-parameters.o
Binary files differ
diff --git a/test/DebugInfo/Mips/prologue_end.ll b/test/DebugInfo/Mips/prologue_end.ll
index 6bb0448..7886b9b 100644
--- a/test/DebugInfo/Mips/prologue_end.ll
+++ b/test/DebugInfo/Mips/prologue_end.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -O0 -mtriple mips-unknown-linux-gnu -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC
-; RUN: llc -O0 -mtriple mips-unknown-linux-gnu -relocation-model=static -disable-fp-elim < %s | FileCheck %s -check-prefix=STATIC-FP
+; RUN: llc -O0 -mtriple mips-unknown-linux-gnu -relocation-model=static -frame-pointer=all < %s | FileCheck %s -check-prefix=STATIC-FP
 ; RUN: llc -O0 -mtriple mips-unknown-linux-gnu -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC
-; RUN: llc -O0 -mtriple mips-unknown-linux-gnu -relocation-model=pic -disable-fp-elim < %s | FileCheck %s -check-prefix=PIC-FP
+; RUN: llc -O0 -mtriple mips-unknown-linux-gnu -relocation-model=pic -frame-pointer=all < %s | FileCheck %s -check-prefix=PIC-FP
 
 ; Generated using clang -O0 -emit-llvm -S -target mipsel-unknown-linux -g test.c -o test.ll
 ; test.c:
diff --git a/test/DebugInfo/NVPTX/cu-range-hole.ll b/test/DebugInfo/NVPTX/cu-range-hole.ll
index c8ea509..e44b43d 100644
--- a/test/DebugInfo/NVPTX/cu-range-hole.ll
+++ b/test/DebugInfo/NVPTX/cu-range-hole.ll
@@ -71,144 +71,144 @@
 }
 
 ; CHECK: // .section .debug_abbrev
-; CHECK: // {
-; CHECK: // .b8 1                                // Abbreviation Code
-; CHECK: // .b8 17                               // DW_TAG_compile_unit
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 37                               // DW_AT_producer
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 19                               // DW_AT_language
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 16                               // DW_AT_stmt_list
-; CHECK: // .b8 6                                // DW_FORM_data4
-; CHECK: // .b8 27                               // DW_AT_comp_dir
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 17                               // DW_AT_low_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 18                               // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 2                                // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 17                               // DW_AT_low_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 18                               // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 64                               // DW_AT_frame_base
-; CHECK: // .b8 10                               // DW_FORM_block1
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 39                               // DW_AT_prototyped
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 3                                // Abbreviation Code
-; CHECK: // .b8 5                                // DW_TAG_formal_parameter
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 4                                // Abbreviation Code
-; CHECK: // .b8 36                               // DW_TAG_base_type
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 62                               // DW_AT_encoding
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 11                               // DW_AT_byte_size
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 0                                // EOM(3)
-; CHECK: // }
-; CHECK: // .section .debug_info
-; CHECK: // {
-; CHECK: // .b32 183                             // Length of Unit
-; CHECK: // .b8 2                                // DWARF version number
-; CHECK: // .b8 0
-; CHECK: // .b32 .debug_abbrev                   // Offset Into Abbrev. Section
-; CHECK: // .b8 8                                // Address Size (in bytes)
-; CHECK: // .b8 1                                // Abbrev [1] 0xb:0xb0 DW_TAG_compile_unit
-; CHECK: // .b8 99,108,97,110,103,32,118,101,114,115,105,111,110,32,51,46,53,46,48,32,40,116,114,117,110,107,32,50,48,52,49,54,52,41,32,40,108,108,118,109 // DW_AT_producer
-; CHECK: // .b8 47,116,114,117,110,107,32,50,48,52,49,56,51,41
-; CHECK: // .b8 0
-; CHECK: // .b8 12                               // DW_AT_language
-; CHECK: // .b8 0
-; CHECK: // .b8 98,46,99                         // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 .debug_line                     // DW_AT_stmt_list
-; CHECK: // .b8 47,115,111,117,114,99,101        // DW_AT_comp_dir
-; CHECK: // .b8 0
-; CHECK: // .b64 Lfunc_begin0                    // DW_AT_low_pc
-; CHECK: // .b64 Lfunc_end2                      // DW_AT_high_pc
-; CHECK: // .b8 2                                // Abbrev [2] 0x65:0x27 DW_TAG_subprogram
-; CHECK: // .b64 Lfunc_begin0                    // DW_AT_low_pc
-; CHECK: // .b64 Lfunc_end0                      // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_AT_frame_base
-; CHECK: // .b8 156
-; CHECK: // .b8 98                               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 1                                // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_prototyped
-; CHECK: // .b32 179                             // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // Abbrev [3] 0x82:0x9 DW_TAG_formal_parameter
-; CHECK: // .b8 99                               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 1                                // DW_AT_decl_line
-; CHECK: // .b32 179                             // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 2                                // Abbrev [2] 0x8c:0x27 DW_TAG_subprogram
-; CHECK: // .b64 Lfunc_begin2                    // DW_AT_low_pc
-; CHECK: // .b64 Lfunc_end2                      // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_AT_frame_base
-; CHECK: // .b8 156
-; CHECK: // .b8 100                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 3                                // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_prototyped
-; CHECK: // .b32 179                             // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // Abbrev [3] 0xa9:0x9 DW_TAG_formal_parameter
-; CHECK: // .b8 101                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 3                                // DW_AT_decl_line
-; CHECK: // .b32 179                             // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 4                                // Abbrev [4] 0xb3:0x7 DW_TAG_base_type
-; CHECK: // .b8 105,110,116                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 5                                // DW_AT_encoding
-; CHECK: // .b8 4                                // DW_AT_byte_size
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // }
-; CHECK: // .section .debug_macinfo
-; CHECK: // {
-; CHECK: // .b8 0                                // End Of Macro List Mark
-; CHECK: // }
+; CHECK-NEXT: // {
+; CHECK-NEXT: // .b8 1                                // Abbreviation Code
+; CHECK-NEXT: // .b8 17                               // DW_TAG_compile_unit
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 37                               // DW_AT_producer
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 19                               // DW_AT_language
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 16                               // DW_AT_stmt_list
+; CHECK-NEXT: // .b8 6                                // DW_FORM_data4
+; CHECK-NEXT: // .b8 27                               // DW_AT_comp_dir
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 17                               // DW_AT_low_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 18                               // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 2                                // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 17                               // DW_AT_low_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 18                               // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 64                               // DW_AT_frame_base
+; CHECK-NEXT: // .b8 10                               // DW_FORM_block1
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 39                               // DW_AT_prototyped
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 3                                // Abbreviation Code
+; CHECK-NEXT: // .b8 5                                // DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 4                                // Abbreviation Code
+; CHECK-NEXT: // .b8 36                               // DW_TAG_base_type
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 62                               // DW_AT_encoding
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 11                               // DW_AT_byte_size
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 0                                // EOM(3)
+; CHECK-NEXT: // }
+; CHECK-NEXT: // .section .debug_info
+; CHECK-NEXT: // {
+; CHECK-NEXT: // .b32 183                             // Length of Unit
+; CHECK-NEXT: // .b8 2                                // DWARF version number
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 .debug_abbrev                   // Offset Into Abbrev. Section
+; CHECK-NEXT: // .b8 8                                // Address Size (in bytes)
+; CHECK-NEXT: // .b8 1                                // Abbrev [1] 0xb:0xb0 DW_TAG_compile_unit
+; CHECK-NEXT: // .b8 99,108,97,110,103,32,118,101,114,115,105,111,110,32,51,46,53,46,48,32,40,116,114,117,110,107,32,50,48,52,49,54,52,41,32,40,108,108,118,109 // DW_AT_producer
+; CHECK-NEXT: // .b8 47,116,114,117,110,107,32,50,48,52,49,56,51,41
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 12                               // DW_AT_language
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 98,46,99                         // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 .debug_line                     // DW_AT_stmt_list
+; CHECK-NEXT: // .b8 47,115,111,117,114,99,101        // DW_AT_comp_dir
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b64 Lfunc_begin0                    // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Lfunc_end2                      // DW_AT_high_pc
+; CHECK-NEXT: // .b8 2                                // Abbrev [2] 0x65:0x27 DW_TAG_subprogram
+; CHECK-NEXT: // .b64 Lfunc_begin0                    // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Lfunc_end0                      // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_AT_frame_base
+; CHECK-NEXT: // .b8 156
+; CHECK-NEXT: // .b8 98                               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_prototyped
+; CHECK-NEXT: // .b32 179                             // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x82:0x9 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 99                               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_line
+; CHECK-NEXT: // .b32 179                             // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 2                                // Abbrev [2] 0x8c:0x27 DW_TAG_subprogram
+; CHECK-NEXT: // .b64 Lfunc_begin2                    // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Lfunc_end2                      // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_AT_frame_base
+; CHECK-NEXT: // .b8 156
+; CHECK-NEXT: // .b8 100                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_prototyped
+; CHECK-NEXT: // .b32 179                             // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xa9:0x9 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 101                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_line
+; CHECK-NEXT: // .b32 179                             // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0xb3:0x7 DW_TAG_base_type
+; CHECK-NEXT: // .b8 105,110,116                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 5                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 4                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // }
+; CHECK-NEXT: // .section .debug_macinfo
+; CHECK-NEXT: // {
+; CHECK-NEXT: // .b8 0                                // End Of Macro List Mark
+; CHECK:      // }
 
 attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { nounwind readnone }
diff --git a/test/DebugInfo/NVPTX/dbg-declare-alloca.ll b/test/DebugInfo/NVPTX/dbg-declare-alloca.ll
index 9a4beed..d6a77ca 100644
--- a/test/DebugInfo/NVPTX/dbg-declare-alloca.ll
+++ b/test/DebugInfo/NVPTX/dbg-declare-alloca.ll
@@ -24,168 +24,168 @@
 ; CHECK: .file 1 "test{{(/|\\\\)}}t.c"
 
 ; CHECK: // .section .debug_abbrev
-; CHECK: // {
-; CHECK: // .b8 1                                // Abbreviation Code
-; CHECK: // .b8 17                               // DW_TAG_compile_unit
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 37                               // DW_AT_producer
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 19                               // DW_AT_language
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 16                               // DW_AT_stmt_list
-; CHECK: // .b8 6                                // DW_FORM_data4
-; CHECK: // .b8 27                               // DW_AT_comp_dir
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 17                               // DW_AT_low_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 18                               // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 2                                // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 17                               // DW_AT_low_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 18                               // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 64                               // DW_AT_frame_base
-; CHECK: // .b8 10                               // DW_FORM_block1
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 39                               // DW_AT_prototyped
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 3                                // Abbreviation Code
-; CHECK: // .b8 52                               // DW_TAG_variable
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 2                                // DW_AT_location
-; CHECK: // .b8 10                               // DW_FORM_block1
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 4                                // Abbreviation Code
-; CHECK: // .b8 19                               // DW_TAG_structure_type
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 11                               // DW_AT_byte_size
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 5                                // Abbreviation Code
-; CHECK: // .b8 13                               // DW_TAG_member
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 56                               // DW_AT_data_member_location
-; CHECK: // .b8 10                               // DW_FORM_block1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 6                                // Abbreviation Code
-; CHECK: // .b8 36                               // DW_TAG_base_type
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 62                               // DW_AT_encoding
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 11                               // DW_AT_byte_size
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 0                                // EOM(3)
-; CHECK: // }
-; CHECK: // .section .debug_info
-; CHECK: // {
-; CHECK: // .b32 135                             // Length of Unit
-; CHECK: // .b8 2                                // DWARF version number
-; CHECK: // .b8 0
-; CHECK: // .b32 .debug_abbrev                   // Offset Into Abbrev. Section
-; CHECK: // .b8 8                                // Address Size (in bytes)
-; CHECK: // .b8 1                                // Abbrev [1] 0xb:0x80 DW_TAG_compile_unit
-; CHECK: // .b8 99,108,97,110,103                // DW_AT_producer
-; CHECK: // .b8 0
-; CHECK: // .b8 12                               // DW_AT_language
-; CHECK: // .b8 0
-; CHECK: // .b8 116,46,99                        // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 .debug_line                     // DW_AT_stmt_list
-; CHECK: // .b8 116,101,115,116                  // DW_AT_comp_dir
-; CHECK: // .b8 0
-; CHECK: // .b64 Lfunc_begin0                    // DW_AT_low_pc
-; CHECK: // .b64 Lfunc_end0                      // DW_AT_high_pc
-; CHECK: // .b8 2                                // Abbrev [2] 0x31:0x3d DW_TAG_subprogram
-; CHECK: // .b64 Lfunc_begin0                    // DW_AT_low_pc
-; CHECK: // .b64 Lfunc_end0                      // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_AT_frame_base
-; CHECK: // .b8 156
-; CHECK: // .b8 117,115,101,95,100,98,103,95,100,101,99,108,97,114,101 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 3                                // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_prototyped
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // Abbrev [3] 0x58:0x15 DW_TAG_variable
-; CHECK: // .b8 11                               // DW_AT_location
-; CHECK: // .b8 3
-; CHECK: // .b64 __local_depot0
-; CHECK: // .b8 35
-; CHECK: // .b8 0
-; CHECK: // .b8 111                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 4                                // DW_AT_decl_line
-; CHECK: // .b32 110                             // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 4                                // Abbrev [4] 0x6e:0x15 DW_TAG_structure_type
-; CHECK: // .b8 70,111,111                       // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_byte_size
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 1                                // DW_AT_decl_line
-; CHECK: // .b8 5                                // Abbrev [5] 0x76:0xc DW_TAG_member
-; CHECK: // .b8 120                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 131                             // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 1                                // DW_AT_decl_line
-; CHECK: // .b8 2                                // DW_AT_data_member_location
-; CHECK: // .b8 35
-; CHECK: // .b8 0
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 6                                // Abbrev [6] 0x83:0x7 DW_TAG_base_type
-; CHECK: // .b8 105,110,116                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 5                                // DW_AT_encoding
-; CHECK: // .b8 4                                // DW_AT_byte_size
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // }
+; CHECK-NEXT: // {
+; CHECK-NEXT: // .b8 1                                // Abbreviation Code
+; CHECK-NEXT: // .b8 17                               // DW_TAG_compile_unit
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 37                               // DW_AT_producer
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 19                               // DW_AT_language
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 16                               // DW_AT_stmt_list
+; CHECK-NEXT: // .b8 6                                // DW_FORM_data4
+; CHECK-NEXT: // .b8 27                               // DW_AT_comp_dir
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 17                               // DW_AT_low_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 18                               // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 2                                // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 17                               // DW_AT_low_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 18                               // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 64                               // DW_AT_frame_base
+; CHECK-NEXT: // .b8 10                               // DW_FORM_block1
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 39                               // DW_AT_prototyped
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 3                                // Abbreviation Code
+; CHECK-NEXT: // .b8 52                               // DW_TAG_variable
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 2                                // DW_AT_location
+; CHECK-NEXT: // .b8 10                               // DW_FORM_block1
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 4                                // Abbreviation Code
+; CHECK-NEXT: // .b8 19                               // DW_TAG_structure_type
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 11                               // DW_AT_byte_size
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 5                                // Abbreviation Code
+; CHECK-NEXT: // .b8 13                               // DW_TAG_member
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 56                               // DW_AT_data_member_location
+; CHECK-NEXT: // .b8 10                               // DW_FORM_block1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 6                                // Abbreviation Code
+; CHECK-NEXT: // .b8 36                               // DW_TAG_base_type
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 62                               // DW_AT_encoding
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 11                               // DW_AT_byte_size
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 0                                // EOM(3)
+; CHECK-NEXT: // }
+; CHECK-NEXT: // .section .debug_info
+; CHECK-NEXT: // {
+; CHECK-NEXT: // .b32 135                             // Length of Unit
+; CHECK-NEXT: // .b8 2                                // DWARF version number
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 .debug_abbrev                   // Offset Into Abbrev. Section
+; CHECK-NEXT: // .b8 8                                // Address Size (in bytes)
+; CHECK-NEXT: // .b8 1                                // Abbrev [1] 0xb:0x80 DW_TAG_compile_unit
+; CHECK-NEXT: // .b8 99,108,97,110,103                // DW_AT_producer
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 12                               // DW_AT_language
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 116,46,99                        // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 .debug_line                     // DW_AT_stmt_list
+; CHECK-NEXT: // .b8 116,101,115,116                  // DW_AT_comp_dir
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b64 Lfunc_begin0                    // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Lfunc_end0                      // DW_AT_high_pc
+; CHECK-NEXT: // .b8 2                                // Abbrev [2] 0x31:0x3d DW_TAG_subprogram
+; CHECK-NEXT: // .b64 Lfunc_begin0                    // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Lfunc_end0                      // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_AT_frame_base
+; CHECK-NEXT: // .b8 156
+; CHECK-NEXT: // .b8 117,115,101,95,100,98,103,95,100,101,99,108,97,114,101 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_prototyped
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x58:0x15 DW_TAG_variable
+; CHECK-NEXT: // .b8 11                               // DW_AT_location
+; CHECK-NEXT: // .b8 3
+; CHECK-NEXT: // .b64 __local_depot0
+; CHECK-NEXT: // .b8 35
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 111                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_line
+; CHECK-NEXT: // .b32 110                             // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x6e:0x15 DW_TAG_structure_type
+; CHECK-NEXT: // .b8 70,111,111                       // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x76:0xc DW_TAG_member
+; CHECK-NEXT: // .b8 120                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 131                             // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2                                // DW_AT_data_member_location
+; CHECK-NEXT: // .b8 35
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x83:0x7 DW_TAG_base_type
+; CHECK-NEXT: // .b8 105,110,116                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 5                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 4                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // }
 
 %struct.Foo = type { i32 }
 
diff --git a/test/DebugInfo/NVPTX/debug-file-loc.ll b/test/DebugInfo/NVPTX/debug-file-loc.ll
index a9ea67c..be4b2ef 100644
--- a/test/DebugInfo/NVPTX/debug-file-loc.ll
+++ b/test/DebugInfo/NVPTX/debug-file-loc.ll
@@ -30,51 +30,51 @@
 ; CHECK-DAG: .file [[FOO]] "{{.*}}foo.h"
 ; CHECK-DAG: .file [[BAR]] "{{.*}}bar.cu"
 ; CHECK: // .section .debug_abbrev
-; CHECK: // {
-; CHECK: // .b8 1                                // Abbreviation Code
-; CHECK: // .b8 17                               // DW_TAG_compile_unit
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 37                               // DW_AT_producer
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 19                               // DW_AT_language
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 16                               // DW_AT_stmt_list
-; CHECK: // .b8 6                                // DW_FORM_data4
-; CHECK: // .b8 27                               // DW_AT_comp_dir
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 17                               // DW_AT_low_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 18                               // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 0                                // EOM(3)
-; CHECK: // }
-; CHECK: // .section .debug_info
-; CHECK: // {
-; CHECK: // .b32 50                              // Length of Unit
-; CHECK: // .b8 2                                // DWARF version number
-; CHECK: // .b8 0
-; CHECK: // .b32 .debug_abbrev                   // Offset Into Abbrev. Section
-; CHECK: // .b8 8                                // Address Size (in bytes)
-; CHECK: // .b8 1                                // Abbrev [1] 0xb:0x2b DW_TAG_compile_unit
-; CHECK: // .b8 0                                // DW_AT_producer
-; CHECK: // .b8 4                                // DW_AT_language
-; CHECK: // .b8 0
-; CHECK: // .b8 98,97,114,46,99,117              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 .debug_line                     // DW_AT_stmt_list
-; CHECK: // .b8 47,115,111,117,114,99,101,47,100,105,114                // DW_AT_comp_dir
-; CHECK: // .b8 0
-; CHECK: // .b64 Lfunc_begin0                    // DW_AT_low_pc
-; CHECK: // .b64 Lfunc_end1                      // DW_AT_high_pc
-; CHECK: // }
-; CHECK: // .section .debug_macinfo
-; CHECK: // {
-; CHECK: // .b8 0                                // End Of Macro List Mark
-; CHECK: // }
+; CHECK-NEXT: // {
+; CHECK-NEXT: // .b8 1                                // Abbreviation Code
+; CHECK-NEXT: // .b8 17                               // DW_TAG_compile_unit
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 37                               // DW_AT_producer
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 19                               // DW_AT_language
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 16                               // DW_AT_stmt_list
+; CHECK-NEXT: // .b8 6                                // DW_FORM_data4
+; CHECK-NEXT: // .b8 27                               // DW_AT_comp_dir
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 17                               // DW_AT_low_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 18                               // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 0                                // EOM(3)
+; CHECK-NEXT: // }
+; CHECK-NEXT: // .section .debug_info
+; CHECK-NEXT: // {
+; CHECK-NEXT: // .b32 50                              // Length of Unit
+; CHECK-NEXT: // .b8 2                                // DWARF version number
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 .debug_abbrev                   // Offset Into Abbrev. Section
+; CHECK-NEXT: // .b8 8                                // Address Size (in bytes)
+; CHECK-NEXT: // .b8 1                                // Abbrev [1] 0xb:0x2b DW_TAG_compile_unit
+; CHECK-NEXT: // .b8 0                                // DW_AT_producer
+; CHECK-NEXT: // .b8 4                                // DW_AT_language
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 98,97,114,46,99,117              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 .debug_line                     // DW_AT_stmt_list
+; CHECK-NEXT: // .b8 47,115,111,117,114,99,101,47,100,105,114                // DW_AT_comp_dir
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b64 Lfunc_begin0                    // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Lfunc_end1                      // DW_AT_high_pc
+; CHECK-NEXT: // }
+; CHECK-NEXT: // .section .debug_macinfo
+; CHECK-NEXT: // {
+; CHECK-NEXT: // .b8 0                                // End Of Macro List Mark
+; CHECK:      // }
 
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!8, !9}
diff --git a/test/DebugInfo/NVPTX/debug-info.ll b/test/DebugInfo/NVPTX/debug-info.ll
index 47101b0..aac34da 100644
--- a/test/DebugInfo/NVPTX/debug-info.ll
+++ b/test/DebugInfo/NVPTX/debug-info.ll
@@ -105,4696 +105,4698 @@
 ; CHECK-DAG: .file [[DEBUG_INFO_CU]] "{{.*}}debug-info.cu"
 ; CHECK-DAG: .file [[BUILTUIN_VARS_H]] "{{.*}}clang/include{{/|\\\\}}__clang_cuda_builtin_vars.h"
 
-; CHECK: //	.section	.debug_abbrev
-; CHECK: //	{
-; CHECK: // .b8 1                                // Abbreviation Code
-; CHECK: // .b8 17                               // DW_TAG_compile_unit
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 37                               // DW_AT_producer
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 19                               // DW_AT_language
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 16                               // DW_AT_stmt_list
-; CHECK: // .b8 6                                // DW_FORM_data4
-; CHECK: // .b8 27                               // DW_AT_comp_dir
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 17                               // DW_AT_low_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 18                               // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 2                                // Abbreviation Code
-; CHECK: // .b8 57                               // DW_TAG_namespace
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 3                                // Abbreviation Code
-; CHECK: // .b8 8                                // DW_TAG_imported_declaration
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 24                               // DW_AT_import
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 4                                // Abbreviation Code
-; CHECK: // .b8 8                                // DW_TAG_imported_declaration
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 24                               // DW_AT_import
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 5                                // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 135,64                           // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 6                                // Abbreviation Code
-; CHECK: // .b8 5                                // DW_TAG_formal_parameter
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 7                                // Abbreviation Code
-; CHECK: // .b8 36                               // DW_TAG_base_type
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 62                               // DW_AT_encoding
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 11                               // DW_AT_byte_size
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 8                                // Abbreviation Code
-; CHECK: // .b8 15                               // DW_TAG_pointer_type
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 9                                // Abbreviation Code
-; CHECK: // .b8 38                               // DW_TAG_const_type
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 10                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 11                               // Abbreviation Code
-; CHECK: // .b8 22                               // DW_TAG_typedef
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 12                               // Abbreviation Code
-; CHECK: // .b8 19                               // DW_TAG_structure_type
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 13                               // Abbreviation Code
-; CHECK: // .b8 19                               // DW_TAG_structure_type
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 11                               // DW_AT_byte_size
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 14                               // Abbreviation Code
-; CHECK: // .b8 13                               // DW_TAG_member
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 56                               // DW_AT_data_member_location
-; CHECK: // .b8 10                               // DW_FORM_block1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 15                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 135,1                            // DW_AT_noreturn
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 16                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 17                               // Abbreviation Code
-; CHECK: // .b8 21                               // DW_TAG_subroutine_type
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 18                               // Abbreviation Code
-; CHECK: // .b8 15                               // DW_TAG_pointer_type
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 19                               // Abbreviation Code
-; CHECK: // .b8 38                               // DW_TAG_const_type
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 20                               // Abbreviation Code
-; CHECK: // .b8 22                               // DW_TAG_typedef
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 21                               // Abbreviation Code
-; CHECK: // .b8 21                               // DW_TAG_subroutine_type
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 22                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 135,1                            // DW_AT_noreturn
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 23                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 24                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 25                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 135,64                           // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 26                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 135,64                           // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 27                               // Abbreviation Code
-; CHECK: // .b8 19                               // DW_TAG_structure_type
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 11                               // DW_AT_byte_size
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 28                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 135,64                           // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 29                               // Abbreviation Code
-; CHECK: // .b8 5                                // DW_TAG_formal_parameter
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 52                               // DW_AT_artificial
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 30                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 50                               // DW_AT_accessibility
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 31                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 135,64                           // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 50                               // DW_AT_accessibility
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 32                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 135,64                           // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 50                               // DW_AT_accessibility
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 33                               // Abbreviation Code
-; CHECK: // .b8 16                               // DW_TAG_reference_type
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 34                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 71                               // DW_AT_specification
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 32                               // DW_AT_inline
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 35                               // Abbreviation Code
-; CHECK: // .b8 19                               // DW_TAG_structure_type
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 11                               // DW_AT_byte_size
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 36                               // Abbreviation Code
-; CHECK: // .b8 13                               // DW_TAG_member
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 56                               // DW_AT_data_member_location
-; CHECK: // .b8 10                               // DW_FORM_block1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 37                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 135,64                           // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 38                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 135,64                           // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 32                               // DW_AT_inline
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 39                               // Abbreviation Code
-; CHECK: // .b8 5                                // DW_TAG_formal_parameter
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 40                               // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 17                               // DW_AT_low_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 18                               // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 64                               // DW_AT_frame_base
-; CHECK: // .b8 10                               // DW_FORM_block1
-; CHECK: // .b8 135,64                           // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 41                               // Abbreviation Code
-; CHECK: // .b8 52                               // DW_TAG_variable
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 42                               // Abbreviation Code
-; CHECK: // .b8 29                               // DW_TAG_inlined_subroutine
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 49                               // DW_AT_abstract_origin
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 17                               // DW_AT_low_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 18                               // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 88                               // DW_AT_call_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 89                               // DW_AT_call_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 43                               // Abbreviation Code
-; CHECK: // .b8 29                               // DW_TAG_inlined_subroutine
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 49                               // DW_AT_abstract_origin
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 17                               // DW_AT_low_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 18                               // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 88                               // DW_AT_call_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 89                               // DW_AT_call_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 44                               // Abbreviation Code
-; CHECK: // .b8 5                                // DW_TAG_formal_parameter
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 49                               // DW_AT_abstract_origin
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 0                                // EOM(3)
-; CHECK: // }
-; CHECK: // .section .debug_info
-; CHECK: // {
-; CHECK: // .b32 10030                           // Length of Unit
-; CHECK: // .b8 2                                // DWARF version number
-; CHECK: // .b8 0
-; CHECK: // .b32 .debug_abbrev                   // Offset Into Abbrev. Section
-; CHECK: // .b8 8                                // Address Size (in bytes)
-; CHECK: // .b8 1                                // Abbrev [1] 0xb:0x2727 DW_TAG_compile_unit
-; CHECK: // .b8 0                                // DW_AT_producer
-; CHECK: // .b8 4                                // DW_AT_language
-; CHECK: // .b8 0
-; CHECK: // .b8 100,101,98,117,103,45,105,110,102,111,46,99,117 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 .debug_line                     // DW_AT_stmt_list
-; CHECK: // .b8 47,115,111,109,101,47,100,105,114,101,99,116,111,114,121 // DW_AT_comp_dir
-; CHECK: // .b8 0
-; CHECK: // .b64 Lfunc_begin0                    // DW_AT_low_pc
-; CHECK: // .b64 Lfunc_end0                      // DW_AT_high_pc
-; CHECK: // .b8 2                                // Abbrev [2] 0x41:0x588 DW_TAG_namespace
-; CHECK: // .b8 115,116,100                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 3                                // Abbrev [3] 0x46:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 202                              // DW_AT_decl_line
-; CHECK: // .b32 1481                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x4d:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 203                              // DW_AT_decl_line
-; CHECK: // .b32 1525                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x54:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 204                              // DW_AT_decl_line
-; CHECK: // .b32 1563                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x5b:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 205                              // DW_AT_decl_line
-; CHECK: // .b32 1594                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x62:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 206                              // DW_AT_decl_line
-; CHECK: // .b32 1623                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x69:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 207                              // DW_AT_decl_line
-; CHECK: // .b32 1654                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x70:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 208                              // DW_AT_decl_line
-; CHECK: // .b32 1683                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x77:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 209                              // DW_AT_decl_line
-; CHECK: // .b32 1720                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x7e:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 210                              // DW_AT_decl_line
-; CHECK: // .b32 1751                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x85:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 211                              // DW_AT_decl_line
-; CHECK: // .b32 1780                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x8c:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 212                              // DW_AT_decl_line
-; CHECK: // .b32 1809                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x93:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 213                              // DW_AT_decl_line
-; CHECK: // .b32 1852                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x9a:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 214                              // DW_AT_decl_line
-; CHECK: // .b32 1879                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0xa1:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 215                              // DW_AT_decl_line
-; CHECK: // .b32 1908                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0xa8:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 216                              // DW_AT_decl_line
-; CHECK: // .b32 1935                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0xaf:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 217                              // DW_AT_decl_line
-; CHECK: // .b32 1964                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0xb6:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 218                              // DW_AT_decl_line
-; CHECK: // .b32 1991                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0xbd:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 219                              // DW_AT_decl_line
-; CHECK: // .b32 2020                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0xc4:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 220                              // DW_AT_decl_line
-; CHECK: // .b32 2051                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0xcb:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 221                              // DW_AT_decl_line
-; CHECK: // .b32 2080                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0xd2:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 222                              // DW_AT_decl_line
-; CHECK: // .b32 2115                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0xd9:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 223                              // DW_AT_decl_line
-; CHECK: // .b32 2146                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0xe0:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 224                              // DW_AT_decl_line
-; CHECK: // .b32 2185                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0xe7:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 225                              // DW_AT_decl_line
-; CHECK: // .b32 2220                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0xee:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 226                              // DW_AT_decl_line
-; CHECK: // .b32 2255                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0xf5:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 227                              // DW_AT_decl_line
-; CHECK: // .b32 2290                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0xfc:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 228                              // DW_AT_decl_line
-; CHECK: // .b32 2339                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x103:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 229                              // DW_AT_decl_line
-; CHECK: // .b32 2382                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x10a:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 230                              // DW_AT_decl_line
-; CHECK: // .b32 2419                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x111:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 231                              // DW_AT_decl_line
-; CHECK: // .b32 2450                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x118:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 232                              // DW_AT_decl_line
-; CHECK: // .b32 2495                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x11f:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 233                              // DW_AT_decl_line
-; CHECK: // .b32 2540                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x126:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 234                              // DW_AT_decl_line
-; CHECK: // .b32 2596                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x12d:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 235                              // DW_AT_decl_line
-; CHECK: // .b32 2627                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x134:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 236                              // DW_AT_decl_line
-; CHECK: // .b32 2666                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x13b:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 237                              // DW_AT_decl_line
-; CHECK: // .b32 2716                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x142:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 238                              // DW_AT_decl_line
-; CHECK: // .b32 2770                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x149:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 239                              // DW_AT_decl_line
-; CHECK: // .b32 2801                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x150:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 240                              // DW_AT_decl_line
-; CHECK: // .b32 2838                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x157:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 241                              // DW_AT_decl_line
-; CHECK: // .b32 2888                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x15e:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 242                              // DW_AT_decl_line
-; CHECK: // .b32 2929                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x165:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 243                              // DW_AT_decl_line
-; CHECK: // .b32 2966                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x16c:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 244                              // DW_AT_decl_line
-; CHECK: // .b32 2999                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x173:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 245                              // DW_AT_decl_line
-; CHECK: // .b32 3030                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x17a:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 246                              // DW_AT_decl_line
-; CHECK: // .b32 3063                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x181:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 247                              // DW_AT_decl_line
-; CHECK: // .b32 3090                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x188:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 248                              // DW_AT_decl_line
-; CHECK: // .b32 3121                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x18f:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 249                              // DW_AT_decl_line
-; CHECK: // .b32 3152                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x196:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 250                              // DW_AT_decl_line
-; CHECK: // .b32 3181                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x19d:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 251                              // DW_AT_decl_line
-; CHECK: // .b32 3210                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x1a4:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 252                              // DW_AT_decl_line
-; CHECK: // .b32 3241                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x1ab:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 253                              // DW_AT_decl_line
-; CHECK: // .b32 3274                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x1b2:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 254                              // DW_AT_decl_line
-; CHECK: // .b32 3309                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x1b9:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 255                              // DW_AT_decl_line
-; CHECK: // .b32 3350                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x1c0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 0                                // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3407                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x1c8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 1                                // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3438                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x1d0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 2                                // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3477                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x1d8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 3                                // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3522                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x1e0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 4                                // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3555                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x1e8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 5                                // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3600                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x1f0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 6                                // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3646                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x1f8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 7                                // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3675                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x200:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 8                                // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3706                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x208:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 9                                // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3747                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x210:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 10                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3786                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x218:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3821                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x220:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 12                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3848                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x228:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 13                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3877                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x230:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 14                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3906                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x238:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 15                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3933                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x240:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 16                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3962                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x248:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 17                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 3995                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x250:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 102                              // DW_AT_decl_line
-; CHECK: // .b32 4026                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x257:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 121                              // DW_AT_decl_line
-; CHECK: // .b32 4046                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x25e:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 140                              // DW_AT_decl_line
-; CHECK: // .b32 4066                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x265:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 159                              // DW_AT_decl_line
-; CHECK: // .b32 4086                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x26c:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 180                              // DW_AT_decl_line
-; CHECK: // .b32 4112                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x273:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 199                              // DW_AT_decl_line
-; CHECK: // .b32 4132                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x27a:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 218                              // DW_AT_decl_line
-; CHECK: // .b32 4151                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x281:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 237                              // DW_AT_decl_line
-; CHECK: // .b32 4171                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x288:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 0                                // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4190                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x290:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 19                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4210                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x298:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 38                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4231                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x2a0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4256                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x2a8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 78                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4282                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x2b0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 97                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4308                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x2b8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 116                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4327                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x2c0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 135                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4348                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x2c8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 147                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4378                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x2d0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 184                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4402                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x2d8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 203                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4421                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x2e0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 222                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4441                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x2e8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 241                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4461                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x2f0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 3                                // DW_AT_decl_file
-; CHECK: // .b8 4                                // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b32 4480                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x2f8:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 118                              // DW_AT_decl_line
-; CHECK: // .b32 4500                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x2ff:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 119                              // DW_AT_decl_line
-; CHECK: // .b32 4515                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x306:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 121                              // DW_AT_decl_line
-; CHECK: // .b32 4563                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x30d:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 122                              // DW_AT_decl_line
-; CHECK: // .b32 4576                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x314:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 123                              // DW_AT_decl_line
-; CHECK: // .b32 4596                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x31b:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 129                              // DW_AT_decl_line
-; CHECK: // .b32 4625                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x322:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 130                              // DW_AT_decl_line
-; CHECK: // .b32 4645                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x329:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 131                              // DW_AT_decl_line
-; CHECK: // .b32 4666                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x330:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 132                              // DW_AT_decl_line
-; CHECK: // .b32 4687                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x337:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 133                              // DW_AT_decl_line
-; CHECK: // .b32 4815                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x33e:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 134                              // DW_AT_decl_line
-; CHECK: // .b32 4843                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x345:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 135                              // DW_AT_decl_line
-; CHECK: // .b32 4868                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x34c:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 136                              // DW_AT_decl_line
-; CHECK: // .b32 4886                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x353:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 137                              // DW_AT_decl_line
-; CHECK: // .b32 4903                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x35a:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 138                              // DW_AT_decl_line
-; CHECK: // .b32 4931                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x361:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 139                              // DW_AT_decl_line
-; CHECK: // .b32 4952                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x368:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 140                              // DW_AT_decl_line
-; CHECK: // .b32 4978                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x36f:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 142                              // DW_AT_decl_line
-; CHECK: // .b32 5001                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x376:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 143                              // DW_AT_decl_line
-; CHECK: // .b32 5028                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x37d:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 144                              // DW_AT_decl_line
-; CHECK: // .b32 5079                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x384:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 146                              // DW_AT_decl_line
-; CHECK: // .b32 5112                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x38b:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 152                              // DW_AT_decl_line
-; CHECK: // .b32 5145                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x392:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 153                              // DW_AT_decl_line
-; CHECK: // .b32 5160                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x399:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 154                              // DW_AT_decl_line
-; CHECK: // .b32 5189                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x3a0:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 155                              // DW_AT_decl_line
-; CHECK: // .b32 5223                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x3a7:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 156                              // DW_AT_decl_line
-; CHECK: // .b32 5255                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x3ae:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 157                              // DW_AT_decl_line
-; CHECK: // .b32 5287                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x3b5:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 158                              // DW_AT_decl_line
-; CHECK: // .b32 5320                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x3bc:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 160                              // DW_AT_decl_line
-; CHECK: // .b32 5343                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x3c3:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 161                              // DW_AT_decl_line
-; CHECK: // .b32 5388                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x3ca:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 241                              // DW_AT_decl_line
-; CHECK: // .b32 5536                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x3d1:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 243                              // DW_AT_decl_line
-; CHECK: // .b32 5585                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x3d8:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 245                              // DW_AT_decl_line
-; CHECK: // .b32 5604                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x3df:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 246                              // DW_AT_decl_line
-; CHECK: // .b32 5490                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x3e6:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 247                              // DW_AT_decl_line
-; CHECK: // .b32 5626                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x3ed:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 249                              // DW_AT_decl_line
-; CHECK: // .b32 5653                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x3f4:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 250                              // DW_AT_decl_line
-; CHECK: // .b32 5768                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x3fb:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 251                              // DW_AT_decl_line
-; CHECK: // .b32 5675                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x402:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 252                              // DW_AT_decl_line
-; CHECK: // .b32 5708                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x409:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 253                              // DW_AT_decl_line
-; CHECK: // .b32 5795                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x410:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 149                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 5838                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x418:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 150                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 5870                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x420:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 151                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 5904                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x428:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 152                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 5936                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x430:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 153                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 5970                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x438:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 154                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6010                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x440:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 155                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6042                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x448:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 156                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6076                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x450:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 157                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6108                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x458:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 158                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6140                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x460:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 159                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6186                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x468:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 160                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6216                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x470:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 161                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6248                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x478:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 162                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6280                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x480:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 163                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6310                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x488:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 164                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6342                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x490:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 165                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6372                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x498:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 166                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6406                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x4a0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 167                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6438                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x4a8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 168                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6476                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x4b0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 169                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6510                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x4b8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 170                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6552                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x4c0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 171                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6590                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x4c8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 172                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6628                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x4d0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 173                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6666                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x4d8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 174                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6707                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x4e0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 175                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6747                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x4e8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 176                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6781                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x4f0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 177                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6821                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x4f8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 178                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6857                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x500:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 179                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6893                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x508:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 180                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6931                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x510:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 181                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6965                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x518:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 182                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 6999                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x520:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 183                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7031                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x528:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 184                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7063                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x530:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 185                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7093                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x538:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 186                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7127                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x540:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 187                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7163                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x548:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 188                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7202                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x550:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 189                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7245                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x558:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 190                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7294                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x560:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 191                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7330                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x568:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 192                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7379                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x570:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 193                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7428                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x578:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 194                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7460                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x580:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 195                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7494                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x588:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 196                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7538                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x590:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 197                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7580                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x598:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 198                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7610                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x5a0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 199                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7642                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x5a8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 200                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7674                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x5b0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 201                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7704                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x5b8:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 202                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7736                            // DW_AT_import
-; CHECK: // .b8 4                                // Abbrev [4] 0x5c0:0x8 DW_TAG_imported_declaration
-; CHECK: // .b8 10                               // DW_AT_decl_file
-; CHECK: // .b8 203                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 7772                            // DW_AT_import
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x5c9:0x1b DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,51,97,98,115,120        // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,98,115                        // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 44                               // DW_AT_decl_line
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x5de:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 7                                // Abbrev [7] 0x5e4:0x11 DW_TAG_base_type
-; CHECK: // .b8 108,111,110,103,32,108,111,110,103,32,105,110,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 5                                // DW_AT_encoding
-; CHECK: // .b8 8                                // DW_AT_byte_size
-; CHECK: // .b8 5                                // Abbrev [5] 0x5f5:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,97,99,111,115,102    // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,99,111,115                    // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 46                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x60c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 7                                // Abbrev [7] 0x612:0x9 DW_TAG_base_type
-; CHECK: // .b8 102,108,111,97,116               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_encoding
-; CHECK: // .b8 4                                // DW_AT_byte_size
-; CHECK: // .b8 5                                // Abbrev [5] 0x61b:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,97,99,111,115,104,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,99,111,115,104                // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 48                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x634:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x63a:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,97,115,105,110,102   // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,115,105,110                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 50                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x651:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x657:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,97,115,105,110,104,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,115,105,110,104               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 52                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x670:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x676:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,97,116,97,110,102    // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,116,97,110                    // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 56                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x68d:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x693:0x25 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,97,116,97,110,50,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,116,97,110,50                 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 54                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x6ad:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x6b2:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x6b8:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,97,116,97,110,104,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,116,97,110,104                // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 58                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x6d1:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x6d7:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,99,98,114,116,102    // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 99,98,114,116                    // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 60                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x6ee:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x6f4:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,99,101,105,108,102   // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 99,101,105,108                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 62                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x70b:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x711:0x2b DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,56,99,111,112,121,115,105,103,110,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 99,111,112,121,115,105,103,110   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 64                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x731:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x736:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x73c:0x1b DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,51,99,111,115,102       // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 99,111,115                       // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 66                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x751:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x757:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,99,111,115,104,102   // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 99,111,115,104                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 68                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x76e:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x774:0x1b DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,51,101,114,102,102      // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 101,114,102                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 72                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x789:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x78f:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,101,114,102,99,102   // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 101,114,102,99                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 70                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x7a6:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x7ac:0x1b DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,51,101,120,112,102      // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 101,120,112                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 76                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x7c1:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x7c7:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,101,120,112,50,102   // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 101,120,112,50                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 74                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x7de:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x7e4:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,101,120,112,109,49,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 101,120,112,109,49               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 78                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x7fd:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x803:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,102,97,98,115,102    // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,97,98,115                    // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 80                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x81a:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x820:0x23 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,102,100,105,109,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,100,105,109                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 82                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x838:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x83d:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x843:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,102,108,111,111,114,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,108,111,111,114              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 84                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x85c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x862:0x27 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,51,102,109,97,102,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,109,97                       // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 86                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x879:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x87e:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x883:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x889:0x23 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,102,109,97,120,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,109,97,120                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 88                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x8a1:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x8a6:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x8ac:0x23 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,102,109,105,110,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,109,105,110                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 90                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x8c4:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x8c9:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x8cf:0x23 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,102,109,111,100,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,109,111,100                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 92                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x8e7:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x8ec:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x8f2:0x2a DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,49,48,102,112,99,108,97,115,115,105,102,121,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,112,99,108,97,115,115,105,102,121 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 94                               // DW_AT_decl_line
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x916:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 7                                // Abbrev [7] 0x91c:0x7 DW_TAG_base_type
-; CHECK: // .b8 105,110,116                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 5                                // DW_AT_encoding
-; CHECK: // .b8 4                                // DW_AT_byte_size
-; CHECK: // .b8 5                                // Abbrev [5] 0x923:0x26 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,102,114,101,120,112,102,80,105 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,114,101,120,112              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 96                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x93e:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x943:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2377                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 8                                // Abbrev [8] 0x949:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 5                                // Abbrev [5] 0x94e:0x25 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,104,121,112,111,116,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 104,121,112,111,116              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 98                               // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x968:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x96d:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x973:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,105,108,111,103,98,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 105,108,111,103,98               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 100                              // DW_AT_decl_line
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x98c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x992:0x25 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,56,105,115,102,105,110,105,116,101,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 105,115,102,105,110,105,116,101  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 102                              // DW_AT_decl_line
-; CHECK: // .b32 2487                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x9b1:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 7                                // Abbrev [7] 0x9b7:0x8 DW_TAG_base_type
-; CHECK: // .b8 98,111,111,108                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_encoding
-; CHECK: // .b8 1                                // DW_AT_byte_size
-; CHECK: // .b8 5                                // Abbrev [5] 0x9bf:0x2d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,57,105,115,103,114,101,97,116,101,114,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 105,115,103,114,101,97,116,101,114 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 106                              // DW_AT_decl_line
-; CHECK: // .b32 2487                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x9e1:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x9e6:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0x9ec:0x38 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,49,52,105,115,103,114,101,97,116,101,114,101,113,117,97,108,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 105,115,103,114,101,97,116,101,114,101,113,117,97,108 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 105                              // DW_AT_decl_line
-; CHECK: // .b32 2487                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xa19:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0xa1e:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xa24:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,105,115,105,110,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 105,115,105,110,102              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 108                              // DW_AT_decl_line
-; CHECK: // .b32 2487                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xa3d:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xa43:0x27 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,105,115,108,101,115,115,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 105,115,108,101,115,115          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 112                              // DW_AT_decl_line
-; CHECK: // .b32 2487                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xa5f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0xa64:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xa6a:0x32 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,49,49,105,115,108,101,115,115,101,113,117,97,108,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 105,115,108,101,115,115,101,113,117,97,108 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 111                              // DW_AT_decl_line
-; CHECK: // .b32 2487                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xa91:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0xa96:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xa9c:0x36 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,49,51,105,115,108,101,115,115,103,114,101,97,116,101,114,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 105,115,108,101,115,115,103,114,101,97,116,101,114 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 114                              // DW_AT_decl_line
-; CHECK: // .b32 2487                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xac7:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0xacc:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xad2:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,105,115,110,97,110,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 105,115,110,97,110               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 116                              // DW_AT_decl_line
-; CHECK: // .b32 2487                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xaeb:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xaf1:0x25 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,56,105,115,110,111,114,109,97,108,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 105,115,110,111,114,109,97,108   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 118                              // DW_AT_decl_line
-; CHECK: // .b32 2487                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xb10:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xb16:0x32 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,49,49,105,115,117,110,111,114,100,101,114,101,100,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 105,115,117,110,111,114,100,101,114,101,100 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 120                              // DW_AT_decl_line
-; CHECK: // .b32 2487                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xb3d:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0xb42:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xb48:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,108,97,98,115,108    // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,97,98,115                    // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 121                              // DW_AT_decl_line
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xb5f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 7                                // Abbrev [7] 0xb65:0xc DW_TAG_base_type
-; CHECK: // .b8 108,111,110,103,32,105,110,116   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 5                                // DW_AT_encoding
-; CHECK: // .b8 8                                // DW_AT_byte_size
-; CHECK: // .b8 5                                // Abbrev [5] 0xb71:0x25 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,108,100,101,120,112,102,105 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,100,101,120,112              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 123                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xb8b:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0xb90:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xb96:0x21 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,108,103,97,109,109,97,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,103,97,109,109,97            // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 125                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xbb1:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xbb7:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,108,108,97,98,115,120 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,108,97,98,115                // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 126                              // DW_AT_decl_line
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xbd0:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xbd6:0x21 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,108,108,114,105,110,116,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,108,114,105,110,116          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 128                              // DW_AT_decl_line
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xbf1:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xbf7:0x1b DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,51,108,111,103,102      // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,111,103                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 138                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xc0c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xc12:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,108,111,103,49,48,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,111,103,49,48                // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 130                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xc2b:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xc31:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,108,111,103,49,112,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,111,103,49,112               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 132                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xc4a:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xc50:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,108,111,103,50,102   // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,111,103,50                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 134                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xc67:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xc6d:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,108,111,103,98,102   // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,111,103,98                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 136                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xc84:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xc8a:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,108,114,105,110,116,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,114,105,110,116              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 140                              // DW_AT_decl_line
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xca3:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xca9:0x21 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,108,114,111,117,110,100,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,114,111,117,110,100          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 142                              // DW_AT_decl_line
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xcc4:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xcca:0x23 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,55,108,108,114,111,117,110,100,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,108,114,111,117,110,100      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 143                              // DW_AT_decl_line
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xce7:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xced:0x24 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,109,111,100,102,102,80,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 109,111,100,102                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 145                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xd06:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0xd0b:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3345                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 8                                // Abbrev [8] 0xd11:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 5                                // Abbrev [5] 0xd16:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,51,110,97,110,80,75,99  // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 110,97,110                       // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 146                              // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xd2d:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 7                                // Abbrev [7] 0xd33:0xa DW_TAG_base_type
-; CHECK: // .b8 100,111,117,98,108,101           // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_encoding
-; CHECK: // .b8 8                                // DW_AT_byte_size
-; CHECK: // .b8 8                                // Abbrev [8] 0xd3d:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 3394                            // DW_AT_type
-; CHECK: // .b8 9                                // Abbrev [9] 0xd42:0x5 DW_TAG_const_type
-; CHECK: // .b32 3399                            // DW_AT_type
-; CHECK: // .b8 7                                // Abbrev [7] 0xd47:0x8 DW_TAG_base_type
-; CHECK: // .b8 99,104,97,114                    // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 8                                // DW_AT_encoding
-; CHECK: // .b8 1                                // DW_AT_byte_size
-; CHECK: // .b8 5                                // Abbrev [5] 0xd4f:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,110,97,110,102,80,75,99 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 110,97,110,102                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 147                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xd68:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xd6e:0x27 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,57,110,101,97,114,98,121,105,110,116,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 110,101,97,114,98,121,105,110,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 149                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xd8f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xd95:0x2d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,57,110,101,120,116,97,102,116,101,114,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 110,101,120,116,97,102,116,101,114 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 151                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xdb7:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0xdbc:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xdc2:0x21 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,51,112,111,119,102,105  // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 112,111,119                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 155                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xdd8:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0xddd:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xde3:0x2d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,57,114,101,109,97,105,110,100,101,114,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 114,101,109,97,105,110,100,101,114 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 157                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xe05:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0xe0a:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xe10:0x2e DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,114,101,109,113,117,111,102,102,80,105 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 114,101,109,113,117,111          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 159                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xe2e:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0xe33:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0xe38:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2377                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xe3e:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,114,105,110,116,102  // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 114,105,110,116                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 161                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xe55:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xe5b:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,114,111,117,110,100,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 114,111,117,110,100              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 163                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xe74:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xe7a:0x29 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,55,115,99,97,108,98,108,110,102,108 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 115,99,97,108,98,108,110         // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 165                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xe98:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0xe9d:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xea3:0x27 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,115,99,97,108,98,110,102,105 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 115,99,97,108,98,110             // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 167                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xebf:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0xec4:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xeca:0x23 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,55,115,105,103,110,98,105,116,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 115,105,103,110,98,105,116       // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 169                              // DW_AT_decl_line
-; CHECK: // .b32 2487                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xee7:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xeed:0x1b DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,51,115,105,110,102      // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 115,105,110                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 171                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xf02:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xf08:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,115,105,110,104,102  // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 115,105,110,104                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 173                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xf1f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xf25:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,115,113,114,116,102  // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 115,113,114,116                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 175                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xf3c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xf42:0x1b DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,51,116,97,110,102       // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 116,97,110                       // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 177                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xf57:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xf5d:0x1d DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,116,97,110,104,102   // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 116,97,110,104                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 179                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xf74:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xf7a:0x21 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,116,103,97,109,109,97,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 116,103,97,109,109,97            // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 181                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xf95:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 5                                // Abbrev [5] 0xf9b:0x1f DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,116,114,117,110,99,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 116,114,117,110,99               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 183                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0xfb4:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0xfba:0x14 DW_TAG_subprogram
-; CHECK: // .b8 97,99,111,115                    // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 54                               // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0xfc8:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0xfce:0x14 DW_TAG_subprogram
-; CHECK: // .b8 97,115,105,110                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 56                               // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0xfdc:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0xfe2:0x14 DW_TAG_subprogram
-; CHECK: // .b8 97,116,97,110                    // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 58                               // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0xff0:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0xff6:0x1a DW_TAG_subprogram
-; CHECK: // .b8 97,116,97,110,50                 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 60                               // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1005:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x100a:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x1010:0x14 DW_TAG_subprogram
-; CHECK: // .b8 99,101,105,108                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 178                              // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x101e:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x1024:0x13 DW_TAG_subprogram
-; CHECK: // .b8 99,111,115                       // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 63                               // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1031:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x1037:0x14 DW_TAG_subprogram
-; CHECK: // .b8 99,111,115,104                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 72                               // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1045:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x104b:0x13 DW_TAG_subprogram
-; CHECK: // .b8 101,120,112                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 100                              // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1058:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x105e:0x14 DW_TAG_subprogram
-; CHECK: // .b8 102,97,98,115                    // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 181                              // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x106c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x1072:0x15 DW_TAG_subprogram
-; CHECK: // .b8 102,108,111,111,114              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 184                              // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1081:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x1087:0x19 DW_TAG_subprogram
-; CHECK: // .b8 102,109,111,100                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 187                              // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1095:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x109a:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x10a0:0x1a DW_TAG_subprogram
-; CHECK: // .b8 102,114,101,120,112              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 103                              // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x10af:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x10b4:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2377                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x10ba:0x1a DW_TAG_subprogram
-; CHECK: // .b8 108,100,101,120,112              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 106                              // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x10c9:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x10ce:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x10d4:0x13 DW_TAG_subprogram
-; CHECK: // .b8 108,111,103                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 109                              // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x10e1:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x10e7:0x15 DW_TAG_subprogram
-; CHECK: // .b8 108,111,103,49,48                // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 112                              // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x10f6:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x10fc:0x19 DW_TAG_subprogram
-; CHECK: // .b8 109,111,100,102                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 115                              // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x110a:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x110f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4373                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 8                                // Abbrev [8] 0x1115:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 10                               // Abbrev [10] 0x111a:0x18 DW_TAG_subprogram
-; CHECK: // .b8 112,111,119                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 153                              // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1127:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x112c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x1132:0x13 DW_TAG_subprogram
-; CHECK: // .b8 115,105,110                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 65                               // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x113f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x1145:0x14 DW_TAG_subprogram
-; CHECK: // .b8 115,105,110,104                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 74                               // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1153:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x1159:0x14 DW_TAG_subprogram
-; CHECK: // .b8 115,113,114,116                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 156                              // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1167:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x116d:0x13 DW_TAG_subprogram
-; CHECK: // .b8 116,97,110                       // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 67                               // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x117a:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x1180:0x14 DW_TAG_subprogram
-; CHECK: // .b8 116,97,110,104                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 76                               // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x118e:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 11                               // Abbrev [11] 0x1194:0xd DW_TAG_typedef
-; CHECK: // .b32 4513                            // DW_AT_type
-; CHECK: // .b8 100,105,118,95,116               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 101                              // DW_AT_decl_line
-; CHECK: // .b8 12                               // Abbrev [12] 0x11a1:0x2 DW_TAG_structure_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 11                               // Abbrev [11] 0x11a3:0xe DW_TAG_typedef
-; CHECK: // .b32 4529                            // DW_AT_type
-; CHECK: // .b8 108,100,105,118,95,116           // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 109                              // DW_AT_decl_line
-; CHECK: // .b8 13                               // Abbrev [13] 0x11b1:0x22 DW_TAG_structure_type
-; CHECK: // .b8 16                               // DW_AT_byte_size
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 105                              // DW_AT_decl_line
-; CHECK: // .b8 14                               // Abbrev [14] 0x11b5:0xf DW_TAG_member
-; CHECK: // .b8 113,117,111,116                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 107                              // DW_AT_decl_line
-; CHECK: // .b8 2                                // DW_AT_data_member_location
-; CHECK: // .b8 35
-; CHECK: // .b8 0
-; CHECK: // .b8 14                               // Abbrev [14] 0x11c4:0xe DW_TAG_member
-; CHECK: // .b8 114,101,109                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 108                              // DW_AT_decl_line
-; CHECK: // .b8 2                                // DW_AT_data_member_location
-; CHECK: // .b8 35
-; CHECK: // .b8 8
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 15                               // Abbrev [15] 0x11d3:0xd DW_TAG_subprogram
-; CHECK: // .b8 97,98,111,114,116                // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 3                                // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 1                                // DW_AT_noreturn
-; CHECK: // .b8 16                               // Abbrev [16] 0x11e0:0x14 DW_TAG_subprogram
-; CHECK: // .b8 97,98,115                        // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 7                                // DW_AT_decl_line
-; CHECK: // .b8 3
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x11ee:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x11f4:0x17 DW_TAG_subprogram
-; CHECK: // .b8 97,116,101,120,105,116           // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 7                                // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1205:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4619                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 8                                // Abbrev [8] 0x120b:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 4624                            // DW_AT_type
-; CHECK: // .b8 17                               // Abbrev [17] 0x1210:0x1 DW_TAG_subroutine_type
-; CHECK: // .b8 10                               // Abbrev [10] 0x1211:0x14 DW_TAG_subprogram
-; CHECK: // .b8 97,116,111,102                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 6                                // DW_AT_decl_file
-; CHECK: // .b8 26                               // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x121f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x1225:0x15 DW_TAG_subprogram
-; CHECK: // .b8 97,116,111,105                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 22                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1234:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x123a:0x15 DW_TAG_subprogram
-; CHECK: // .b8 97,116,111,108                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 27                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1249:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x124f:0x2b DW_TAG_subprogram
-; CHECK: // .b8 98,115,101,97,114,99,104         // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 7                                // DW_AT_decl_file
-; CHECK: // .b8 20                               // DW_AT_decl_line
-; CHECK: // .b32 4730                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1260:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4731                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1265:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4731                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x126a:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4737                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x126f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4737                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1274:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4772                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 18                               // Abbrev [18] 0x127a:0x1 DW_TAG_pointer_type
-; CHECK: // .b8 8                                // Abbrev [8] 0x127b:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 4736                            // DW_AT_type
-; CHECK: // .b8 19                               // Abbrev [19] 0x1280:0x1 DW_TAG_const_type
-; CHECK: // .b8 11                               // Abbrev [11] 0x1281:0xe DW_TAG_typedef
-; CHECK: // .b32 4751                            // DW_AT_type
-; CHECK: // .b8 115,105,122,101,95,116           // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 8                                // DW_AT_decl_file
-; CHECK: // .b8 62                               // DW_AT_decl_line
-; CHECK: // .b8 7                                // Abbrev [7] 0x128f:0x15 DW_TAG_base_type
-; CHECK: // .b8 108,111,110,103,32,117,110,115,105,103,110,101,100,32,105,110,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 7                                // DW_AT_encoding
-; CHECK: // .b8 8                                // DW_AT_byte_size
-; CHECK: // .b8 20                               // Abbrev [20] 0x12a4:0x16 DW_TAG_typedef
-; CHECK: // .b32 4794                            // DW_AT_type
-; CHECK: // .b8 95,95,99,111,109,112,97,114,95,102,110,95,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 230                              // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b8 8                                // Abbrev [8] 0x12ba:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 4799                            // DW_AT_type
-; CHECK: // .b8 21                               // Abbrev [21] 0x12bf:0x10 DW_TAG_subroutine_type
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x12c4:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4731                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x12c9:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4731                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x12cf:0x1c DW_TAG_subprogram
-; CHECK: // .b8 99,97,108,108,111,99             // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 212                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4730                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x12e0:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4737                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x12e5:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4737                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x12eb:0x19 DW_TAG_subprogram
-; CHECK: // .b8 100,105,118                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 21                               // DW_AT_decl_line
-; CHECK: // .b8 3
-; CHECK: // .b32 4500                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x12f9:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x12fe:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 22                               // Abbrev [22] 0x1304:0x12 DW_TAG_subprogram
-; CHECK: // .b8 101,120,105,116                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 31                               // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 1                                // DW_AT_noreturn
-; CHECK: // .b8 6                                // Abbrev [6] 0x1310:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 23                               // Abbrev [23] 0x1316:0x11 DW_TAG_subprogram
-; CHECK: // .b8 102,114,101,101                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 227                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1321:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4730                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x1327:0x17 DW_TAG_subprogram
-; CHECK: // .b8 103,101,116,101,110,118          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 52                               // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b32 4926                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1338:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 8                                // Abbrev [8] 0x133e:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 3399                            // DW_AT_type
-; CHECK: // .b8 16                               // Abbrev [16] 0x1343:0x15 DW_TAG_subprogram
-; CHECK: // .b8 108,97,98,115                    // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 8                                // DW_AT_decl_line
-; CHECK: // .b8 3
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1352:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x1358:0x1a DW_TAG_subprogram
-; CHECK: // .b8 108,100,105,118                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 23                               // DW_AT_decl_line
-; CHECK: // .b8 3
-; CHECK: // .b32 4515                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1367:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x136c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x1372:0x17 DW_TAG_subprogram
-; CHECK: // .b8 109,97,108,108,111,99            // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 210                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4730                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1383:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4737                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x1389:0x1b DW_TAG_subprogram
-; CHECK: // .b8 109,98,108,101,110               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 95                               // DW_AT_decl_line
-; CHECK: // .b8 3
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1399:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x139e:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4737                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x13a4:0x23 DW_TAG_subprogram
-; CHECK: // .b8 109,98,115,116,111,119,99,115    // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 106                              // DW_AT_decl_line
-; CHECK: // .b8 3
-; CHECK: // .b32 4737                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x13b7:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5063                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x13bc:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x13c1:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4737                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 8                                // Abbrev [8] 0x13c7:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 5068                            // DW_AT_type
-; CHECK: // .b8 7                                // Abbrev [7] 0x13cc:0xb DW_TAG_base_type
-; CHECK: // .b8 119,99,104,97,114,95,116         // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 5                                // DW_AT_encoding
-; CHECK: // .b8 4                                // DW_AT_byte_size
-; CHECK: // .b8 16                               // Abbrev [16] 0x13d7:0x21 DW_TAG_subprogram
-; CHECK: // .b8 109,98,116,111,119,99            // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 98                               // DW_AT_decl_line
-; CHECK: // .b8 3
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x13e8:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5063                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x13ed:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x13f2:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4737                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 23                               // Abbrev [23] 0x13f8:0x21 DW_TAG_subprogram
-; CHECK: // .b8 113,115,111,114,116              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 253                              // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1404:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4730                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1409:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4737                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x140e:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4737                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1413:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4772                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 24                               // Abbrev [24] 0x1419:0xf DW_TAG_subprogram
-; CHECK: // .b8 114,97,110,100                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 118                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 16                               // Abbrev [16] 0x1428:0x1d DW_TAG_subprogram
-; CHECK: // .b8 114,101,97,108,108,111,99        // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 224                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 4730                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x143a:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4730                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x143f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4737                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 23                               // Abbrev [23] 0x1445:0x12 DW_TAG_subprogram
-; CHECK: // .b8 115,114,97,110,100               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 120                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1451:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 7                                // Abbrev [7] 0x1457:0x10 DW_TAG_base_type
-; CHECK: // .b8 117,110,115,105,103,110,101,100,32,105,110,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 7                                // DW_AT_encoding
-; CHECK: // .b8 4                                // DW_AT_byte_size
-; CHECK: // .b8 10                               // Abbrev [10] 0x1467:0x1b DW_TAG_subprogram
-; CHECK: // .b8 115,116,114,116,111,100          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 164                              // DW_AT_decl_line
-; CHECK: // .b32 3379                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1477:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x147c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5250                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 8                                // Abbrev [8] 0x1482:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 4926                            // DW_AT_type
-; CHECK: // .b8 10                               // Abbrev [10] 0x1487:0x20 DW_TAG_subprogram
-; CHECK: // .b8 115,116,114,116,111,108          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 183                              // DW_AT_decl_line
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1497:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x149c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5250                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x14a1:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x14a7:0x21 DW_TAG_subprogram
-; CHECK: // .b8 115,116,114,116,111,117,108      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 187                              // DW_AT_decl_line
-; CHECK: // .b32 4751                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x14b8:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x14bd:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5250                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x14c2:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x14c8:0x17 DW_TAG_subprogram
-; CHECK: // .b8 115,121,115,116,101,109          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 205                              // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x14d9:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x14df:0x23 DW_TAG_subprogram
-; CHECK: // .b8 119,99,115,116,111,109,98,115    // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 109                              // DW_AT_decl_line
-; CHECK: // .b8 3
-; CHECK: // .b32 4737                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x14f2:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4926                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x14f7:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5378                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x14fc:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4737                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 8                                // Abbrev [8] 0x1502:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 5383                            // DW_AT_type
-; CHECK: // .b8 9                                // Abbrev [9] 0x1507:0x5 DW_TAG_const_type
-; CHECK: // .b32 5068                            // DW_AT_type
-; CHECK: // .b8 16                               // Abbrev [16] 0x150c:0x1c DW_TAG_subprogram
-; CHECK: // .b8 119,99,116,111,109,98            // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 102                              // DW_AT_decl_line
-; CHECK: // .b8 3
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x151d:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 4926                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1522:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5068                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 2                                // Abbrev [2] 0x1528:0x78 DW_TAG_namespace
-; CHECK: // .b8 95,95,103,110,117,95,99,120,120  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 3                                // Abbrev [3] 0x1533:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 201                              // DW_AT_decl_line
-; CHECK: // .b32 5536                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x153a:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 207                              // DW_AT_decl_line
-; CHECK: // .b32 5585                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x1541:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 211                              // DW_AT_decl_line
-; CHECK: // .b32 5604                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x1548:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 217                              // DW_AT_decl_line
-; CHECK: // .b32 5626                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x154f:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 228                              // DW_AT_decl_line
-; CHECK: // .b32 5653                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x1556:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 229                              // DW_AT_decl_line
-; CHECK: // .b32 5675                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x155d:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 230                              // DW_AT_decl_line
-; CHECK: // .b32 5708                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x1564:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 232                              // DW_AT_decl_line
-; CHECK: // .b32 5768                            // DW_AT_import
-; CHECK: // .b8 3                                // Abbrev [3] 0x156b:0x7 DW_TAG_imported_declaration
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 233                              // DW_AT_decl_line
-; CHECK: // .b32 5795                            // DW_AT_import
-; CHECK: // .b8 25                               // Abbrev [25] 0x1572:0x2d DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,57,95,95,103,110,117,95,99,120,120,51,100,105,118,69,120,120 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 100,105,118                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 5                                // DW_AT_decl_file
-; CHECK: // .b8 214                              // DW_AT_decl_line
-; CHECK: // .b32 5536                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1594:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1599:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 11                               // Abbrev [11] 0x15a0:0xf DW_TAG_typedef
-; CHECK: // .b32 5551                            // DW_AT_type
-; CHECK: // .b8 108,108,100,105,118,95,116       // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 121                              // DW_AT_decl_line
-; CHECK: // .b8 13                               // Abbrev [13] 0x15af:0x22 DW_TAG_structure_type
-; CHECK: // .b8 16                               // DW_AT_byte_size
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 117                              // DW_AT_decl_line
-; CHECK: // .b8 14                               // Abbrev [14] 0x15b3:0xf DW_TAG_member
-; CHECK: // .b8 113,117,111,116                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 119                              // DW_AT_decl_line
-; CHECK: // .b8 2                                // DW_AT_data_member_location
-; CHECK: // .b8 35
-; CHECK: // .b8 0
-; CHECK: // .b8 14                               // Abbrev [14] 0x15c2:0xe DW_TAG_member
-; CHECK: // .b8 114,101,109                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 120                              // DW_AT_decl_line
-; CHECK: // .b8 2                                // DW_AT_data_member_location
-; CHECK: // .b8 35
-; CHECK: // .b8 8
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 22                               // Abbrev [22] 0x15d1:0x13 DW_TAG_subprogram
-; CHECK: // .b8 95,69,120,105,116                // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 45                               // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 1                                // DW_AT_noreturn
-; CHECK: // .b8 6                                // Abbrev [6] 0x15de:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x15e4:0x16 DW_TAG_subprogram
-; CHECK: // .b8 108,108,97,98,115                // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 12                               // DW_AT_decl_line
-; CHECK: // .b8 3
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x15f4:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x15fa:0x1b DW_TAG_subprogram
-; CHECK: // .b8 108,108,100,105,118              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 29                               // DW_AT_decl_line
-; CHECK: // .b8 3
-; CHECK: // .b32 5536                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x160a:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x160f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 16                               // Abbrev [16] 0x1615:0x16 DW_TAG_subprogram
-; CHECK: // .b8 97,116,111,108,108               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 36                               // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1625:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x162b:0x21 DW_TAG_subprogram
-; CHECK: // .b8 115,116,114,116,111,108,108      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 209                              // DW_AT_decl_line
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x163c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1641:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5250                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1646:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x164c:0x22 DW_TAG_subprogram
-; CHECK: // .b8 115,116,114,116,111,117,108,108  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 214                              // DW_AT_decl_line
-; CHECK: // .b32 5742                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x165e:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1663:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5250                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1668:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 7                                // Abbrev [7] 0x166e:0x1a DW_TAG_base_type
-; CHECK: // .b8 108,111,110,103,32,108,111,110,103,32,117,110,115,105,103,110,101,100,32,105,110,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 7                                // DW_AT_encoding
-; CHECK: // .b8 8                                // DW_AT_byte_size
-; CHECK: // .b8 10                               // Abbrev [10] 0x1688:0x1b DW_TAG_subprogram
-; CHECK: // .b8 115,116,114,116,111,102          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 172                              // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x1698:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x169d:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5250                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 10                               // Abbrev [10] 0x16a3:0x1c DW_TAG_subprogram
-; CHECK: // .b8 115,116,114,116,111,108,100      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_decl_file
-; CHECK: // .b8 175                              // DW_AT_decl_line
-; CHECK: // .b32 5823                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x16b4:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3389                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x16b9:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5250                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 7                                // Abbrev [7] 0x16bf:0xf DW_TAG_base_type
-; CHECK: // .b8 108,111,110,103,32,100,111,117,98,108,101 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_encoding
-; CHECK: // .b8 8                                // DW_AT_byte_size
-; CHECK: // .b8 26                               // Abbrev [26] 0x16ce:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,97,99,111,115,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,99,111,115,102                // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 62                               // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x16e8:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x16ee:0x22 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,97,99,111,115,104,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,99,111,115,104,102            // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 90                               // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x170a:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1710:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,97,115,105,110,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,115,105,110,102               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 57                               // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x172a:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1730:0x22 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,97,115,105,110,104,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,115,105,110,104,102           // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 95                               // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x174c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1752:0x28 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,97,116,97,110,50,102,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,116,97,110,50,102             // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 47                               // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x176f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1774:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x177a:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,97,116,97,110,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,116,97,110,102                // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 52                               // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1794:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x179a:0x22 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,97,116,97,110,104,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 97,116,97,110,104,102            // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 100                              // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x17b6:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x17bc:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,99,98,114,116,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 99,98,114,116,102                // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 150                              // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x17d6:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x17dc:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,99,101,105,108,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 99,101,105,108,102               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 11                               // DW_AT_decl_file
-; CHECK: // .b8 155                              // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x17f6:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x17fc:0x2e DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,57,99,111,112,121,115,105,103,110,102,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 99,111,112,121,115,105,103,110,102 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 165                              // DW_AT_decl_line
-; CHECK: // .b8 4
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x181f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1824:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x182a:0x1e DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,99,111,115,102,102   // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 99,111,115,102                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 219                              // DW_AT_decl_line
-; CHECK: // .b8 4
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1842:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1848:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,99,111,115,104,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 99,111,115,104,102               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 32                               // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1862:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1868:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,101,114,102,99,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 101,114,102,99,102               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 210                              // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1882:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1888:0x1e DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,101,114,102,102,102  // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 101,114,102,102                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 200                              // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x18a0:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x18a6:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,101,120,112,50,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 101,120,112,50,102               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 11                               // DW_AT_decl_file
-; CHECK: // .b8 145                              // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x18c0:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x18c6:0x1e DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,101,120,112,102,102  // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 101,120,112,102                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 14                               // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x18de:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x18e4:0x22 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,101,120,112,109,49,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 101,120,112,109,49,102           // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 105                              // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1900:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1906:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,102,97,98,115,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,97,98,115,102                // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 11                               // DW_AT_decl_file
-; CHECK: // .b8 95                               // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1920:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1926:0x26 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,102,100,105,109,102,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,100,105,109,102              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 80                               // DW_AT_decl_line
-; CHECK: // .b8 6
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1941:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1946:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x194c:0x22 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,102,108,111,111,114,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,108,111,111,114,102          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 11                               // DW_AT_decl_file
-; CHECK: // .b8 85                               // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1968:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x196e:0x2a DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,102,109,97,102,102,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,109,97,102                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 32                               // DW_AT_decl_line
-; CHECK: // .b8 6
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1988:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x198d:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1992:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1998:0x26 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,102,109,97,120,102,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,109,97,120,102               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 11                               // DW_AT_decl_file
-; CHECK: // .b8 110                              // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x19b3:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x19b8:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x19be:0x26 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,102,109,105,110,102,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,109,105,110,102              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 11                               // DW_AT_decl_file
-; CHECK: // .b8 105                              // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x19d9:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x19de:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x19e4:0x26 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,102,109,111,100,102,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,109,111,100,102              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 17                               // DW_AT_decl_line
-; CHECK: // .b8 6
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x19ff:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1a04:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1a0a:0x29 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,102,114,101,120,112,102,102,80,105 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 102,114,101,120,112,102          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 7                                // DW_AT_decl_line
-; CHECK: // .b8 6
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1a28:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1a2d:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2377                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1a33:0x28 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,104,121,112,111,116,102,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 104,121,112,111,116,102          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 110                              // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1a50:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1a55:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1a5b:0x22 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,105,108,111,103,98,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 105,108,111,103,98,102           // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 85                               // DW_AT_decl_line
-; CHECK: // .b8 6
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1a77:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1a7d:0x28 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,108,100,101,120,112,102,102,105 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,100,101,120,112,102          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 240                              // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1a9a:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1a9f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1aa5:0x24 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,55,108,103,97,109,109,97,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,103,97,109,109,97,102        // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 235                              // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1ac3:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1ac9:0x24 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,55,108,108,114,105,110,116,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,108,114,105,110,116,102      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 125                              // DW_AT_decl_line
-; CHECK: // .b8 4
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1ae7:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1aed:0x26 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,56,108,108,114,111,117,110,100,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,108,114,111,117,110,100,102  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 66                               // DW_AT_decl_line
-; CHECK: // .b8 6
-; CHECK: // .b32 1508                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1b0d:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1b13:0x22 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,108,111,103,49,48,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,111,103,49,48,102            // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 76                               // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1b2f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1b35:0x22 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,108,111,103,49,112,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,111,103,49,112,102           // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 85                               // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1b51:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1b57:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,108,111,103,50,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,111,103,50,102               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 5                                // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1b71:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1b77:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,108,111,103,98,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,111,103,98,102               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 90                               // DW_AT_decl_line
-; CHECK: // .b8 6
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1b91:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1b97:0x1e DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,108,111,103,102,102  // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,111,103,102                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 67                               // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1baf:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1bb5:0x22 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,108,114,105,110,116,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,114,105,110,116,102          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 116                              // DW_AT_decl_line
-; CHECK: // .b8 4
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1bd1:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1bd7:0x24 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,55,108,114,111,117,110,100,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 108,114,111,117,110,100,102      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 71                               // DW_AT_decl_line
-; CHECK: // .b8 6
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1bf5:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1bfb:0x27 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,109,111,100,102,102,102,80,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 109,111,100,102,102              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 12                               // DW_AT_decl_line
-; CHECK: // .b8 6
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1c17:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1c1c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 3345                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1c22:0x2b DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,49,48,110,101,97,114,98,121,105,110,116,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 110,101,97,114,98,121,105,110,116,102 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 130                              // DW_AT_decl_line
-; CHECK: // .b8 4
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1c47:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1c4d:0x31 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,49,48,110,101,120,116,97,102,116,101,114,102,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 110,101,120,116,97,102,116,101,114,102 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 194                              // DW_AT_decl_line
-; CHECK: // .b8 4
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1c73:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1c78:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1c7e:0x24 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,112,111,119,102,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 112,111,119,102                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 47                               // DW_AT_decl_line
-; CHECK: // .b8 6
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1c97:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1c9c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1ca2:0x31 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,49,48,114,101,109,97,105,110,100,101,114,102,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 114,101,109,97,105,110,100,101,114,102 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 22                               // DW_AT_decl_line
-; CHECK: // .b8 6
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1cc8:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1ccd:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1cd3:0x31 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,55,114,101,109,113,117,111,102,102,102,80,105 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 114,101,109,113,117,111,102      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 27                               // DW_AT_decl_line
-; CHECK: // .b8 6
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1cf4:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1cf9:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1cfe:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2377                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1d04:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,114,105,110,116,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 114,105,110,116,102              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 111                              // DW_AT_decl_line
-; CHECK: // .b8 4
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1d1e:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1d24:0x22 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,114,111,117,110,100,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 114,111,117,110,100,102          // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 61                               // DW_AT_decl_line
-; CHECK: // .b8 6
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1d40:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1d46:0x2c DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,56,115,99,97,108,98,108,110,102,102,108 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 115,99,97,108,98,108,110,102     // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 250                              // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1d67:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1d6c:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2917                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1d72:0x2a DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,55,115,99,97,108,98,110,102,102,105 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 115,99,97,108,98,110,102         // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 245                              // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1d91:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x1d96:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1d9c:0x1e DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,115,105,110,102,102  // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 115,105,110,102                  // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 210                              // DW_AT_decl_line
-; CHECK: // .b8 4
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1db4:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1dba:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,115,105,110,104,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 115,105,110,104,102              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 37                               // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1dd4:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1dda:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,115,113,114,116,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 115,113,114,116,102              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 11                               // DW_AT_decl_file
-; CHECK: // .b8 139                              // DW_AT_decl_line
-; CHECK: // .b8 3
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1df4:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1dfa:0x1e DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,52,116,97,110,102,102   // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 116,97,110,102                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 252                              // DW_AT_decl_line
-; CHECK: // .b8 4
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1e12:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1e18:0x20 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,53,116,97,110,104,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 116,97,110,104,102               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 42                               // DW_AT_decl_line
-; CHECK: // .b8 5
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1e32:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1e38:0x24 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,55,116,103,97,109,109,97,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 116,103,97,109,109,97,102        // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 9                                // DW_AT_decl_file
-; CHECK: // .b8 56                               // DW_AT_decl_line
-; CHECK: // .b8 6
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1e56:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 26                               // Abbrev [26] 0x1e5c:0x22 DW_TAG_subprogram
-; CHECK: // .b8 95,90,76,54,116,114,117,110,99,102,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 116,114,117,110,99,102           // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 11                               // DW_AT_decl_file
-; CHECK: // .b8 150                              // DW_AT_decl_line
-; CHECK: // .b8 2
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 6                                // Abbrev [6] 0x1e78:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 27                               // Abbrev [27] 0x1e7e:0x22a DW_TAG_structure_type
-; CHECK: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_byte_size
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 77                               // DW_AT_decl_line
-; CHECK: // .b8 28                               // Abbrev [28] 0x1e9c:0x4f DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116,49,55,95,95,102,101,116,99,104,95 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 98,117,105,108,116,105,110,95,120,69,118
-; CHECK: // .b8 0
-; CHECK: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,120 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 78                               // DW_AT_decl_line
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 28                               // Abbrev [28] 0x1eeb:0x4f DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116,49,55,95,95,102,101,116,99,104,95 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 98,117,105,108,116,105,110,95,121,69,118
-; CHECK: // .b8 0
-; CHECK: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,121 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 79                               // DW_AT_decl_line
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 28                               // Abbrev [28] 0x1f3a:0x4f DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116,49,55,95,95,102,101,116,99,104,95 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 98,117,105,108,116,105,110,95,122,69,118
-; CHECK: // .b8 0
-; CHECK: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,122 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 80                               // DW_AT_decl_line
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 25                               // Abbrev [25] 0x1f89:0x49 DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,75,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116,99,118,53,117,105,110,116,51,69 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 118
-; CHECK: // .b8 0
-; CHECK: // .b8 111,112,101,114,97,116,111,114,32,117,105,110,116,51 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 83                               // DW_AT_decl_line
-; CHECK: // .b32 8360                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 29                               // Abbrev [29] 0x1fcb:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 8407                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 30                               // Abbrev [30] 0x1fd2:0x27 DW_TAG_subprogram
-; CHECK: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 85                               // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // DW_AT_accessibility
-; CHECK:                                         // DW_ACCESS_private
-; CHECK: // .b8 29                               // Abbrev [29] 0x1ff2:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 8417                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 30                               // Abbrev [30] 0x1ff9:0x2c DW_TAG_subprogram
-; CHECK: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 85                               // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // DW_AT_accessibility
-; CHECK:                                         // DW_ACCESS_private
-; CHECK: // .b8 29                               // Abbrev [29] 0x2019:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 8417                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 6                                // Abbrev [6] 0x201f:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 8422                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 31                               // Abbrev [31] 0x2025:0x43 DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,75,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116,97,83,69,82,75,83,95 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 111,112,101,114,97,116,111,114,61 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 85                               // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // DW_AT_accessibility
-; CHECK:                                         // DW_ACCESS_private
-; CHECK: // .b8 29                               // Abbrev [29] 0x205c:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 8407                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 6                                // Abbrev [6] 0x2062:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 8422                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 32                               // Abbrev [32] 0x2068:0x3f DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,75,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116,97,100,69,118 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 111,112,101,114,97,116,111,114,38 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 85                               // DW_AT_decl_line
-; CHECK: // .b32 8427                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // DW_AT_accessibility
-; CHECK:                                         // DW_ACCESS_private
-; CHECK: // .b8 29                               // Abbrev [29] 0x20a0:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 8407                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 27                               // Abbrev [27] 0x20a8:0x2f DW_TAG_structure_type
-; CHECK: // .b8 117,105,110,116,51               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 12                               // DW_AT_byte_size
-; CHECK: // .b8 14                               // DW_AT_decl_file
-; CHECK: // .b8 190                              // DW_AT_decl_line
-; CHECK: // .b8 14                               // Abbrev [14] 0x20b2:0xc DW_TAG_member
-; CHECK: // .b8 120                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 14                               // DW_AT_decl_file
-; CHECK: // .b8 192                              // DW_AT_decl_line
-; CHECK: // .b8 2                                // DW_AT_data_member_location
-; CHECK: // .b8 35
-; CHECK: // .b8 0
-; CHECK: // .b8 14                               // Abbrev [14] 0x20be:0xc DW_TAG_member
-; CHECK: // .b8 121                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 14                               // DW_AT_decl_file
-; CHECK: // .b8 192                              // DW_AT_decl_line
-; CHECK: // .b8 2                                // DW_AT_data_member_location
-; CHECK: // .b8 35
-; CHECK: // .b8 4
-; CHECK: // .b8 14                               // Abbrev [14] 0x20ca:0xc DW_TAG_member
-; CHECK: // .b8 122                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 14                               // DW_AT_decl_file
-; CHECK: // .b8 192                              // DW_AT_decl_line
-; CHECK: // .b8 2                                // DW_AT_data_member_location
-; CHECK: // .b8 35
-; CHECK: // .b8 8
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 8                                // Abbrev [8] 0x20d7:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 8412                            // DW_AT_type
-; CHECK: // .b8 9                                // Abbrev [9] 0x20dc:0x5 DW_TAG_const_type
-; CHECK: // .b32 7806                            // DW_AT_type
-; CHECK: // .b8 8                                // Abbrev [8] 0x20e1:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 7806                            // DW_AT_type
-; CHECK: // .b8 33                               // Abbrev [33] 0x20e6:0x5 DW_TAG_reference_type
-; CHECK: // .b32 8412                            // DW_AT_type
-; CHECK: // .b8 8                                // Abbrev [8] 0x20eb:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 7806                            // DW_AT_type
-; CHECK: // .b8 34                               // Abbrev [34] 0x20f0:0x6 DW_TAG_subprogram
-; CHECK: // .b32 7836                            // DW_AT_specification
-; CHECK: // .b8 1                                // DW_AT_inline
-; CHECK: // .b8 27                               // Abbrev [27] 0x20f6:0x228 DW_TAG_structure_type
-; CHECK: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_byte_size
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 88                               // DW_AT_decl_line
-; CHECK: // .b8 28                               // Abbrev [28] 0x2114:0x4f DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116,49,55,95,95,102,101,116,99,104,95 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 98,117,105,108,116,105,110,95,120,69,118
-; CHECK: // .b8 0
-; CHECK: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,120 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 89                               // DW_AT_decl_line
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 28                               // Abbrev [28] 0x2163:0x4f DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116,49,55,95,95,102,101,116,99,104,95 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 98,117,105,108,116,105,110,95,121,69,118
-; CHECK: // .b8 0
-; CHECK: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,121 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 90                               // DW_AT_decl_line
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 28                               // Abbrev [28] 0x21b2:0x4f DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116,49,55,95,95,102,101,116,99,104,95 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 98,117,105,108,116,105,110,95,122,69,118
-; CHECK: // .b8 0
-; CHECK: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,122 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 91                               // DW_AT_decl_line
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 25                               // Abbrev [25] 0x2201:0x47 DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,75,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116,99,118,52,100,105,109,51,69,118 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 111,112,101,114,97,116,111,114,32,100,105,109,51 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 94                               // DW_AT_decl_line
-; CHECK: // .b32 8990                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 29                               // Abbrev [29] 0x2241:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 9166                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 30                               // Abbrev [30] 0x2248:0x27 DW_TAG_subprogram
-; CHECK: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 96                               // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // DW_AT_accessibility
-; CHECK:                                         // DW_ACCESS_private
-; CHECK: // .b8 29                               // Abbrev [29] 0x2268:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 9176                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 30                               // Abbrev [30] 0x226f:0x2c DW_TAG_subprogram
-; CHECK: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 96                               // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // DW_AT_accessibility
-; CHECK:                                         // DW_ACCESS_private
-; CHECK: // .b8 29                               // Abbrev [29] 0x228f:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 9176                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 6                                // Abbrev [6] 0x2295:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 9181                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 31                               // Abbrev [31] 0x229b:0x43 DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,75,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116,97,83,69,82,75,83,95 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 111,112,101,114,97,116,111,114,61 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 96                               // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // DW_AT_accessibility
-; CHECK:                                         // DW_ACCESS_private
-; CHECK: // .b8 29                               // Abbrev [29] 0x22d2:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 9166                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 6                                // Abbrev [6] 0x22d8:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 9181                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 32                               // Abbrev [32] 0x22de:0x3f DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,75,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116,97,100,69,118 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 111,112,101,114,97,116,111,114,38 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 96                               // DW_AT_decl_line
-; CHECK: // .b32 9186                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // DW_AT_accessibility
-; CHECK:                                         // DW_ACCESS_private
-; CHECK: // .b8 29                               // Abbrev [29] 0x2316:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 9166                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 35                               // Abbrev [35] 0x231e:0x9d DW_TAG_structure_type
-; CHECK: // .b8 100,105,109,51                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 12                               // DW_AT_byte_size
-; CHECK: // .b8 14                               // DW_AT_decl_file
-; CHECK: // .b8 161                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b8 36                               // Abbrev [36] 0x2328:0xd DW_TAG_member
-; CHECK: // .b8 120                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 14                               // DW_AT_decl_file
-; CHECK: // .b8 163                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b8 2                                // DW_AT_data_member_location
-; CHECK: // .b8 35
-; CHECK: // .b8 0
-; CHECK: // .b8 36                               // Abbrev [36] 0x2335:0xd DW_TAG_member
-; CHECK: // .b8 121                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 14                               // DW_AT_decl_file
-; CHECK: // .b8 163                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b8 2                                // DW_AT_data_member_location
-; CHECK: // .b8 35
-; CHECK: // .b8 4
-; CHECK: // .b8 36                               // Abbrev [36] 0x2342:0xd DW_TAG_member
-; CHECK: // .b8 122                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 14                               // DW_AT_decl_file
-; CHECK: // .b8 163                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b8 2                                // DW_AT_data_member_location
-; CHECK: // .b8 35
-; CHECK: // .b8 8
-; CHECK: // .b8 23                               // Abbrev [23] 0x234f:0x21 DW_TAG_subprogram
-; CHECK: // .b8 100,105,109,51                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 14                               // DW_AT_decl_file
-; CHECK: // .b8 165                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 29                               // Abbrev [29] 0x235a:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 9147                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 6                                // Abbrev [6] 0x2360:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x2365:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 6                                // Abbrev [6] 0x236a:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 23                               // Abbrev [23] 0x2370:0x17 DW_TAG_subprogram
-; CHECK: // .b8 100,105,109,51                   // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 14                               // DW_AT_decl_file
-; CHECK: // .b8 166                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 29                               // Abbrev [29] 0x237b:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 9147                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 6                                // Abbrev [6] 0x2381:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 9152                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 37                               // Abbrev [37] 0x2387:0x33 DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,52,100,105,109,51,99,118,53,117,105,110,116,51,69,118 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 111,112,101,114,97,116,111,114,32,117,105,110,116,51 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 14                               // DW_AT_decl_file
-; CHECK: // .b8 167                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b32 9152                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 29                               // Abbrev [29] 0x23b3:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 9147                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 8                                // Abbrev [8] 0x23bb:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 8990                            // DW_AT_type
-; CHECK: // .b8 20                               // Abbrev [20] 0x23c0:0xe DW_TAG_typedef
-; CHECK: // .b32 8360                            // DW_AT_type
-; CHECK: // .b8 117,105,110,116,51               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 14                               // DW_AT_decl_file
-; CHECK: // .b8 127                              // DW_AT_decl_line
-; CHECK: // .b8 1
-; CHECK: // .b8 8                                // Abbrev [8] 0x23ce:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 9171                            // DW_AT_type
-; CHECK: // .b8 9                                // Abbrev [9] 0x23d3:0x5 DW_TAG_const_type
-; CHECK: // .b32 8438                            // DW_AT_type
-; CHECK: // .b8 8                                // Abbrev [8] 0x23d8:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 8438                            // DW_AT_type
-; CHECK: // .b8 33                               // Abbrev [33] 0x23dd:0x5 DW_TAG_reference_type
-; CHECK: // .b32 9171                            // DW_AT_type
-; CHECK: // .b8 8                                // Abbrev [8] 0x23e2:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 8438                            // DW_AT_type
-; CHECK: // .b8 34                               // Abbrev [34] 0x23e7:0x6 DW_TAG_subprogram
-; CHECK: // .b32 8468                            // DW_AT_specification
-; CHECK: // .b8 1                                // DW_AT_inline
-; CHECK: // .b8 27                               // Abbrev [27] 0x23ed:0x233 DW_TAG_structure_type
-; CHECK: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_byte_size
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 66                               // DW_AT_decl_line
-; CHECK: // .b8 28                               // Abbrev [28] 0x240c:0x50 DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,50,54,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116,49,55,95,95,102,101,116,99,104 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 95,98,117,105,108,116,105,110,95,120,69,118
-; CHECK: // .b8 0
-; CHECK: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,120 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 67                               // DW_AT_decl_line
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 28                               // Abbrev [28] 0x245c:0x50 DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,50,54,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116,49,55,95,95,102,101,116,99,104 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 95,98,117,105,108,116,105,110,95,121,69,118
-; CHECK: // .b8 0
-; CHECK: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,121 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 68                               // DW_AT_decl_line
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 28                               // Abbrev [28] 0x24ac:0x50 DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,50,54,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116,49,55,95,95,102,101,116,99,104 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 95,98,117,105,108,116,105,110,95,122,69,118
-; CHECK: // .b8 0
-; CHECK: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,122 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 69                               // DW_AT_decl_line
-; CHECK: // .b32 5207                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 25                               // Abbrev [25] 0x24fc:0x4a DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,75,50,54,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116,99,118,53,117,105,110,116,51 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 69,118
-; CHECK: // .b8 0
-; CHECK: // .b8 111,112,101,114,97,116,111,114,32,117,105,110,116,51 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 72                               // DW_AT_decl_line
-; CHECK: // .b32 8360                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 29                               // Abbrev [29] 0x253f:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 9760                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 30                               // Abbrev [30] 0x2546:0x28 DW_TAG_subprogram
-; CHECK: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 74                               // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // DW_AT_accessibility
-; CHECK:                                         // DW_ACCESS_private
-; CHECK: // .b8 29                               // Abbrev [29] 0x2567:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 9770                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 30                               // Abbrev [30] 0x256e:0x2d DW_TAG_subprogram
-; CHECK: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 74                               // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // DW_AT_accessibility
-; CHECK:                                         // DW_ACCESS_private
-; CHECK: // .b8 29                               // Abbrev [29] 0x258f:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 9770                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 6                                // Abbrev [6] 0x2595:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 9775                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 31                               // Abbrev [31] 0x259b:0x44 DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,75,50,54,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116,97,83,69,82,75,83,95 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 111,112,101,114,97,116,111,114,61 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 74                               // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // DW_AT_accessibility
-; CHECK:                                         // DW_ACCESS_private
-; CHECK: // .b8 29                               // Abbrev [29] 0x25d3:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 9760                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 6                                // Abbrev [6] 0x25d9:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 9775                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 32                               // Abbrev [32] 0x25df:0x40 DW_TAG_subprogram
-; CHECK: // .b8 95,90,78,75,50,54,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116,97,100,69,118 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 111,112,101,114,97,116,111,114,38 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 13                               // DW_AT_decl_file
-; CHECK: // .b8 74                               // DW_AT_decl_line
-; CHECK: // .b32 9780                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 3                                // DW_AT_accessibility
-; CHECK:                                         // DW_ACCESS_private
-; CHECK: // .b8 29                               // Abbrev [29] 0x2618:0x6 DW_TAG_formal_parameter
-; CHECK: // .b32 9760                            // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_artificial
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 8                                // Abbrev [8] 0x2620:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 9765                            // DW_AT_type
-; CHECK: // .b8 9                                // Abbrev [9] 0x2625:0x5 DW_TAG_const_type
-; CHECK: // .b32 9197                            // DW_AT_type
-; CHECK: // .b8 8                                // Abbrev [8] 0x262a:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 9197                            // DW_AT_type
-; CHECK: // .b8 33                               // Abbrev [33] 0x262f:0x5 DW_TAG_reference_type
-; CHECK: // .b32 9765                            // DW_AT_type
-; CHECK: // .b8 8                                // Abbrev [8] 0x2634:0x5 DW_TAG_pointer_type
-; CHECK: // .b32 9197                            // DW_AT_type
-; CHECK: // .b8 34                               // Abbrev [34] 0x2639:0x6 DW_TAG_subprogram
-; CHECK: // .b32 9228                            // DW_AT_specification
-; CHECK: // .b8 1                                // DW_AT_inline
-; CHECK: // .b8 38                               // Abbrev [38] 0x263f:0x32 DW_TAG_subprogram
-; CHECK: // .b8 95,90,51,114,101,115,102,102,80,102 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 114,101,115                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 12                               // DW_AT_decl_file
-; CHECK: // .b8 3                                // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 1                                // DW_AT_inline
-; CHECK: // .b8 39                               // Abbrev [39] 0x2653:0x9 DW_TAG_formal_parameter
-; CHECK: // .b8 120                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 12                               // DW_AT_decl_file
-; CHECK: // .b8 3                                // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 39                               // Abbrev [39] 0x265c:0x9 DW_TAG_formal_parameter
-; CHECK: // .b8 121                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 12                               // DW_AT_decl_file
-; CHECK: // .b8 3                                // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 39                               // Abbrev [39] 0x2665:0xb DW_TAG_formal_parameter
-; CHECK: // .b8 114,101,115                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 12                               // DW_AT_decl_file
-; CHECK: // .b8 3                                // DW_AT_decl_line
-; CHECK: // .b32 3345                            // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 40                               // Abbrev [40] 0x2671:0xc0 DW_TAG_subprogram
-; CHECK: // .b64 Lfunc_begin0                    // DW_AT_low_pc
-; CHECK: // .b64 Lfunc_end0                      // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_AT_frame_base
-; CHECK: // .b8 156
-; CHECK: // .b8 95,90,53,115,97,120,112,121,105,102,80,102,83,95 // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 115,97,120,112,121               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 12                               // DW_AT_decl_file
-; CHECK: // .b8 5                                // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 39                               // Abbrev [39] 0x269c:0x9 DW_TAG_formal_parameter
-; CHECK: // .b8 110                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 12                               // DW_AT_decl_file
-; CHECK: // .b8 5                                // DW_AT_decl_line
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 39                               // Abbrev [39] 0x26a5:0x9 DW_TAG_formal_parameter
-; CHECK: // .b8 97                               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 12                               // DW_AT_decl_file
-; CHECK: // .b8 5                                // DW_AT_decl_line
-; CHECK: // .b32 1554                            // DW_AT_type
-; CHECK: // .b8 39                               // Abbrev [39] 0x26ae:0x9 DW_TAG_formal_parameter
-; CHECK: // .b8 120                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 12                               // DW_AT_decl_file
-; CHECK: // .b8 5                                // DW_AT_decl_line
-; CHECK: // .b32 3345                            // DW_AT_type
-; CHECK: // .b8 39                               // Abbrev [39] 0x26b7:0x9 DW_TAG_formal_parameter
-; CHECK: // .b8 121                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 12                               // DW_AT_decl_file
-; CHECK: // .b8 5                                // DW_AT_decl_line
-; CHECK: // .b32 3345                            // DW_AT_type
-; CHECK: // .b8 41                               // Abbrev [41] 0x26c0:0x9 DW_TAG_variable
-; CHECK: // .b8 105                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 12                               // DW_AT_decl_file
-; CHECK: // .b8 6                                // DW_AT_decl_line
-; CHECK: // .b32 2332                            // DW_AT_type
-; CHECK: // .b8 42                               // Abbrev [42] 0x26c9:0x17 DW_TAG_inlined_subroutine
-; CHECK: // .b32 8432                            // DW_AT_abstract_origin
-; CHECK: // .b64 Ltmp0                           // DW_AT_low_pc
-; CHECK: // .b64 Ltmp1                           // DW_AT_high_pc
-; CHECK: // .b8 12                               // DW_AT_call_file
-; CHECK: // .b8 6                                // DW_AT_call_line
-; CHECK: // .b8 42                               // Abbrev [42] 0x26e0:0x17 DW_TAG_inlined_subroutine
-; CHECK: // .b32 9191                            // DW_AT_abstract_origin
-; CHECK: // .b64 Ltmp1                           // DW_AT_low_pc
-; CHECK: // .b64 Ltmp2                           // DW_AT_high_pc
-; CHECK: // .b8 12                               // DW_AT_call_file
-; CHECK: // .b8 6                                // DW_AT_call_line
-; CHECK: // .b8 42                               // Abbrev [42] 0x26f7:0x17 DW_TAG_inlined_subroutine
-; CHECK: // .b32 9785                            // DW_AT_abstract_origin
-; CHECK: // .b64 Ltmp2                           // DW_AT_low_pc
-; CHECK: // .b64 Ltmp3                           // DW_AT_high_pc
-; CHECK: // .b8 12                               // DW_AT_call_file
-; CHECK: // .b8 6                                // DW_AT_call_line
-; CHECK: // .b8 43                               // Abbrev [43] 0x270e:0x22 DW_TAG_inlined_subroutine
-; CHECK: // .b32 9791                            // DW_AT_abstract_origin
-; CHECK: // .b64 Ltmp8                           // DW_AT_low_pc
-; CHECK: // .b64 Ltmp9                           // DW_AT_high_pc
-; CHECK: // .b8 12                               // DW_AT_call_file
-; CHECK: // .b8 8                                // DW_AT_call_line
-; CHECK: // .b8 44                               // Abbrev [44] 0x2725:0x5 DW_TAG_formal_parameter
-; CHECK: // .b32 9820                            // DW_AT_abstract_origin
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // }
-; CHECK: // .section .debug_macinfo
-; CHECK: // {
-; CHECK: // .b8 0                                // End Of Macro List Mark
-; CHECK: // }
+; CHECK: // .section .debug_abbrev
+; CHECK-NEXT: // {
+; CHECK-NEXT: // .b8 1                                // Abbreviation Code
+; CHECK-NEXT: // .b8 17                               // DW_TAG_compile_unit
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 37                               // DW_AT_producer
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 19                               // DW_AT_language
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 16                               // DW_AT_stmt_list
+; CHECK-NEXT: // .b8 6                                // DW_FORM_data4
+; CHECK-NEXT: // .b8 27                               // DW_AT_comp_dir
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 17                               // DW_AT_low_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 18                               // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 2                                // Abbreviation Code
+; CHECK-NEXT: // .b8 57                               // DW_TAG_namespace
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 3                                // Abbreviation Code
+; CHECK-NEXT: // .b8 8                                // DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 24                               // DW_AT_import
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 4                                // Abbreviation Code
+; CHECK-NEXT: // .b8 8                                // DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 24                               // DW_AT_import
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 5                                // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 135,64                           // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 6                                // Abbreviation Code
+; CHECK-NEXT: // .b8 5                                // DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 7                                // Abbreviation Code
+; CHECK-NEXT: // .b8 36                               // DW_TAG_base_type
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 62                               // DW_AT_encoding
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 11                               // DW_AT_byte_size
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 8                                // Abbreviation Code
+; CHECK-NEXT: // .b8 15                               // DW_TAG_pointer_type
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 9                                // Abbreviation Code
+; CHECK-NEXT: // .b8 38                               // DW_TAG_const_type
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 10                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 11                               // Abbreviation Code
+; CHECK-NEXT: // .b8 22                               // DW_TAG_typedef
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 12                               // Abbreviation Code
+; CHECK-NEXT: // .b8 19                               // DW_TAG_structure_type
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 13                               // Abbreviation Code
+; CHECK-NEXT: // .b8 19                               // DW_TAG_structure_type
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 11                               // DW_AT_byte_size
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 14                               // Abbreviation Code
+; CHECK-NEXT: // .b8 13                               // DW_TAG_member
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 56                               // DW_AT_data_member_location
+; CHECK-NEXT: // .b8 10                               // DW_FORM_block1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 15                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 135,1                            // DW_AT_noreturn
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 16                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 17                               // Abbreviation Code
+; CHECK-NEXT: // .b8 21                               // DW_TAG_subroutine_type
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 18                               // Abbreviation Code
+; CHECK-NEXT: // .b8 15                               // DW_TAG_pointer_type
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 19                               // Abbreviation Code
+; CHECK-NEXT: // .b8 38                               // DW_TAG_const_type
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 20                               // Abbreviation Code
+; CHECK-NEXT: // .b8 22                               // DW_TAG_typedef
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 21                               // Abbreviation Code
+; CHECK-NEXT: // .b8 21                               // DW_TAG_subroutine_type
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 22                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 135,1                            // DW_AT_noreturn
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 23                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 24                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 25                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 135,64                           // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 26                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 135,64                           // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 27                               // Abbreviation Code
+; CHECK-NEXT: // .b8 19                               // DW_TAG_structure_type
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 11                               // DW_AT_byte_size
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 28                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 135,64                           // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 29                               // Abbreviation Code
+; CHECK-NEXT: // .b8 5                                // DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 52                               // DW_AT_artificial
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 30                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 50                               // DW_AT_accessibility
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 31                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 135,64                           // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 50                               // DW_AT_accessibility
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 32                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 135,64                           // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 50                               // DW_AT_accessibility
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 33                               // Abbreviation Code
+; CHECK-NEXT: // .b8 16                               // DW_TAG_reference_type
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 34                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 71                               // DW_AT_specification
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 32                               // DW_AT_inline
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 35                               // Abbreviation Code
+; CHECK-NEXT: // .b8 19                               // DW_TAG_structure_type
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 11                               // DW_AT_byte_size
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 36                               // Abbreviation Code
+; CHECK-NEXT: // .b8 13                               // DW_TAG_member
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 56                               // DW_AT_data_member_location
+; CHECK-NEXT: // .b8 10                               // DW_FORM_block1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 37                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 135,64                           // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 38                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 135,64                           // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 32                               // DW_AT_inline
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 39                               // Abbreviation Code
+; CHECK-NEXT: // .b8 5                                // DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 40                               // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 17                               // DW_AT_low_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 18                               // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 64                               // DW_AT_frame_base
+; CHECK-NEXT: // .b8 10                               // DW_FORM_block1
+; CHECK-NEXT: // .b8 135,64                           // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 41                               // Abbreviation Code
+; CHECK-NEXT: // .b8 52                               // DW_TAG_variable
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 42                               // Abbreviation Code
+; CHECK-NEXT: // .b8 29                               // DW_TAG_inlined_subroutine
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 49                               // DW_AT_abstract_origin
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 17                               // DW_AT_low_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 18                               // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 88                               // DW_AT_call_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 89                               // DW_AT_call_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 43                               // Abbreviation Code
+; CHECK-NEXT: // .b8 29                               // DW_TAG_inlined_subroutine
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 49                               // DW_AT_abstract_origin
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 17                               // DW_AT_low_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 18                               // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 88                               // DW_AT_call_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 89                               // DW_AT_call_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 44                               // Abbreviation Code
+; CHECK-NEXT: // .b8 5                                // DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 49                               // DW_AT_abstract_origin
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 0                                // EOM(3)
+; CHECK-NEXT: // }
+; CHECK-NEXT: // .section .debug_info
+; CHECK-NEXT: // {
+; CHECK-NEXT: // .b32 10030                           // Length of Unit
+; CHECK-NEXT: // .b8 2                                // DWARF version number
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 .debug_abbrev                   // Offset Into Abbrev. Section
+; CHECK-NEXT: // .b8 8                                // Address Size (in bytes)
+; CHECK-NEXT: // .b8 1                                // Abbrev [1] 0xb:0x2727 DW_TAG_compile_unit
+; CHECK-NEXT: // .b8 0                                // DW_AT_producer
+; CHECK-NEXT: // .b8 4                                // DW_AT_language
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 100,101,98,117,103,45,105,110,102,111,46,99,117 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 .debug_line                     // DW_AT_stmt_list
+; CHECK-NEXT: // .b8 47,115,111,109,101,47,100,105,114,101,99,116,111,114,121 // DW_AT_comp_dir
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b64 Lfunc_begin0                    // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Lfunc_end0                      // DW_AT_high_pc
+; CHECK-NEXT: // .b8 2                                // Abbrev [2] 0x41:0x588 DW_TAG_namespace
+; CHECK-NEXT: // .b8 115,116,100                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x46:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 202                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1481                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x4d:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 203                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1525                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x54:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 204                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1563                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x5b:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 205                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1594                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x62:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 206                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1623                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x69:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 207                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1654                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x70:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 208                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1683                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x77:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 209                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1720                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x7e:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 210                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1751                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x85:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 211                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1780                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x8c:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 212                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1809                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x93:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 213                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1852                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x9a:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 214                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1879                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xa1:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 215                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1908                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xa8:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 216                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1935                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xaf:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 217                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1964                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xb6:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 218                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1991                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xbd:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 219                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2020                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xc4:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 220                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2051                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xcb:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 221                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2080                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xd2:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 222                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2115                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xd9:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 223                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2146                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xe0:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 224                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2185                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xe7:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 225                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2220                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xee:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 226                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2255                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xf5:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 227                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2290                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0xfc:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 228                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2339                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x103:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 229                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2382                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x10a:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 230                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2419                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x111:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 231                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2450                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x118:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 232                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2495                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x11f:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 233                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2540                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x126:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 234                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2596                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x12d:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 235                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2627                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x134:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 236                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2666                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x13b:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 237                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2716                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x142:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 238                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2770                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x149:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 239                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2801                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x150:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 240                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2838                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x157:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 241                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2888                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x15e:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 242                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2929                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x165:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 243                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2966                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x16c:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 244                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2999                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x173:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 245                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3030                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x17a:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 246                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3063                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x181:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 247                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3090                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x188:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 248                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3121                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x18f:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 249                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3152                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x196:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 250                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3181                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x19d:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 251                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3210                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x1a4:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 252                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3241                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x1ab:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 253                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3274                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x1b2:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 254                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3309                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x1b9:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 255                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3350                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x1c0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 0                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3407                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x1c8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3438                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x1d0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3477                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x1d8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3522                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x1e0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3555                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x1e8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3600                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x1f0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 6                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3646                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x1f8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 7                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3675                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x200:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 8                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3706                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x208:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3747                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x210:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3786                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x218:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3821                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x220:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 12                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3848                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x228:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3877                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x230:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 14                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3906                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x238:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 15                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3933                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x240:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 16                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3962                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x248:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 17                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 3995                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x250:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 102                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4026                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x257:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 121                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4046                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x25e:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 140                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4066                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x265:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 159                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4086                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x26c:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 180                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4112                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x273:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 199                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4132                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x27a:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 218                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4151                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x281:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 237                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4171                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x288:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 0                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4190                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x290:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 19                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4210                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x298:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 38                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4231                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x2a0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4256                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x2a8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 78                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4282                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x2b0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 97                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4308                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x2b8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 116                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4327                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x2c0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 135                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4348                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x2c8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 147                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4378                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x2d0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 184                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4402                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x2d8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 203                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4421                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x2e0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 222                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4441                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x2e8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 241                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4461                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x2f0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b32 4480                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x2f8:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 118                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4500                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x2ff:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 119                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4515                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x306:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 121                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4563                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x30d:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 122                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4576                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x314:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 123                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4596                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x31b:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 129                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4625                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x322:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 130                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4645                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x329:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 131                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4666                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x330:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 132                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4687                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x337:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 133                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4815                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x33e:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 134                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4843                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x345:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 135                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4868                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x34c:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 136                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4886                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x353:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 137                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4903                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x35a:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 138                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4931                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x361:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 139                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4952                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x368:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 140                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4978                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x36f:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 142                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5001                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x376:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 143                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5028                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x37d:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 144                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5079                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x384:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 146                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5112                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x38b:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 152                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5145                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x392:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 153                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5160                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x399:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 154                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5189                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x3a0:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 155                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5223                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x3a7:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 156                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5255                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x3ae:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 157                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5287                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x3b5:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 158                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5320                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x3bc:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 160                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5343                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x3c3:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 161                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5388                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x3ca:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 241                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5536                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x3d1:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 243                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5585                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x3d8:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 245                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5604                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x3df:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 246                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5490                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x3e6:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 247                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5626                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x3ed:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 249                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5653                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x3f4:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 250                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5768                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x3fb:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 251                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5675                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x402:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 252                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5708                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x409:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 253                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5795                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x410:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 149                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 5838                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x418:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 150                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 5870                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x420:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 151                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 5904                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x428:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 152                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 5936                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x430:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 153                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 5970                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x438:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 154                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6010                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x440:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 155                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6042                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x448:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 156                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6076                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x450:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 157                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6108                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x458:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 158                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6140                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x460:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 159                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6186                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x468:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 160                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6216                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x470:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 161                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6248                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x478:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 162                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6280                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x480:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 163                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6310                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x488:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 164                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6342                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x490:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 165                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6372                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x498:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 166                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6406                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x4a0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 167                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6438                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x4a8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 168                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6476                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x4b0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 169                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6510                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x4b8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 170                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6552                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x4c0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 171                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6590                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x4c8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 172                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6628                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x4d0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 173                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6666                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x4d8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 174                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6707                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x4e0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 175                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6747                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x4e8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 176                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6781                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x4f0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 177                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6821                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x4f8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 178                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6857                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x500:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 179                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6893                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x508:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 180                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6931                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x510:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 181                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6965                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x518:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 182                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 6999                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x520:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 183                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7031                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x528:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 184                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7063                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x530:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 185                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7093                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x538:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 186                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7127                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x540:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 187                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7163                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x548:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 188                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7202                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x550:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 189                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7245                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x558:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 190                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7294                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x560:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 191                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7330                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x568:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 192                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7379                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x570:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 193                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7428                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x578:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 194                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7460                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x580:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 195                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7494                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x588:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 196                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7538                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x590:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 197                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7580                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x598:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 198                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7610                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x5a0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 199                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7642                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x5a8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 200                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7674                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x5b0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 201                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7704                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x5b8:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 202                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7736                            // DW_AT_import
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x5c0:0x8 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 10                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 203                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 7772                            // DW_AT_import
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x5c9:0x1b DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,51,97,98,115,120        // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,98,115                        // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 44                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x5de:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 7                                // Abbrev [7] 0x5e4:0x11 DW_TAG_base_type
+; CHECK-NEXT: // .b8 108,111,110,103,32,108,111,110,103,32,105,110,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 5                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 8                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x5f5:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,97,99,111,115,102    // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,99,111,115                    // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 46                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x60c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 7                                // Abbrev [7] 0x612:0x9 DW_TAG_base_type
+; CHECK-NEXT: // .b8 102,108,111,97,116               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 4                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x61b:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,97,99,111,115,104,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,99,111,115,104                // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 48                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x634:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x63a:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,97,115,105,110,102   // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,115,105,110                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 50                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x651:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x657:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,97,115,105,110,104,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,115,105,110,104               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 52                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x670:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x676:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,97,116,97,110,102    // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,116,97,110                    // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 56                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x68d:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x693:0x25 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,97,116,97,110,50,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,116,97,110,50                 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 54                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x6ad:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x6b2:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x6b8:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,97,116,97,110,104,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,116,97,110,104                // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x6d1:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x6d7:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,99,98,114,116,102    // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 99,98,114,116                    // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 60                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x6ee:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x6f4:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,99,101,105,108,102   // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 99,101,105,108                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 62                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x70b:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x711:0x2b DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,56,99,111,112,121,115,105,103,110,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 99,111,112,121,115,105,103,110   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 64                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x731:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x736:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x73c:0x1b DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,51,99,111,115,102       // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 99,111,115                       // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 66                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x751:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x757:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,99,111,115,104,102   // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 99,111,115,104                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 68                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x76e:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x774:0x1b DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,51,101,114,102,102      // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 101,114,102                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 72                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x789:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x78f:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,101,114,102,99,102   // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 101,114,102,99                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 70                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x7a6:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x7ac:0x1b DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,51,101,120,112,102      // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 101,120,112                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 76                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x7c1:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x7c7:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,101,120,112,50,102   // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 101,120,112,50                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 74                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x7de:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x7e4:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,101,120,112,109,49,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 101,120,112,109,49               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 78                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x7fd:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x803:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,102,97,98,115,102    // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,97,98,115                    // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 80                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x81a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x820:0x23 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,102,100,105,109,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,100,105,109                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 82                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x838:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x83d:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x843:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,102,108,111,111,114,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,108,111,111,114              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 84                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x85c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x862:0x27 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,51,102,109,97,102,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,109,97                       // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 86                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x879:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x87e:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x883:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x889:0x23 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,102,109,97,120,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,109,97,120                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 88                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x8a1:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x8a6:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x8ac:0x23 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,102,109,105,110,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,109,105,110                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 90                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x8c4:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x8c9:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x8cf:0x23 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,102,109,111,100,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,109,111,100                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 92                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x8e7:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x8ec:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x8f2:0x2a DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,49,48,102,112,99,108,97,115,115,105,102,121,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,112,99,108,97,115,115,105,102,121 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 94                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x916:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 7                                // Abbrev [7] 0x91c:0x7 DW_TAG_base_type
+; CHECK-NEXT: // .b8 105,110,116                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 5                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 4                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x923:0x26 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,102,114,101,120,112,102,80,105 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,114,101,120,112              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 96                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x93e:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x943:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2377                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x949:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x94e:0x25 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,104,121,112,111,116,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 104,121,112,111,116              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 98                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x968:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x96d:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x973:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,105,108,111,103,98,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 105,108,111,103,98               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 100                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x98c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x992:0x25 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,56,105,115,102,105,110,105,116,101,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 105,115,102,105,110,105,116,101  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 102                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2487                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x9b1:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 7                                // Abbrev [7] 0x9b7:0x8 DW_TAG_base_type
+; CHECK-NEXT: // .b8 98,111,111,108                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 1                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x9bf:0x2d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,57,105,115,103,114,101,97,116,101,114,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 105,115,103,114,101,97,116,101,114 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 106                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2487                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x9e1:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x9e6:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x9ec:0x38 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,49,52,105,115,103,114,101,97,116,101,114,101,113,117,97,108,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 105,115,103,114,101,97,116,101,114,101,113,117,97,108 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 105                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2487                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xa19:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xa1e:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xa24:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,105,115,105,110,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 105,115,105,110,102              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 108                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2487                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xa3d:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xa43:0x27 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,105,115,108,101,115,115,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 105,115,108,101,115,115          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 112                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2487                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xa5f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xa64:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xa6a:0x32 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,49,49,105,115,108,101,115,115,101,113,117,97,108,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 105,115,108,101,115,115,101,113,117,97,108 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 111                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2487                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xa91:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xa96:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xa9c:0x36 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,49,51,105,115,108,101,115,115,103,114,101,97,116,101,114,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 105,115,108,101,115,115,103,114,101,97,116,101,114 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 114                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2487                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xac7:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xacc:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xad2:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,105,115,110,97,110,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 105,115,110,97,110               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 116                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2487                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xaeb:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xaf1:0x25 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,56,105,115,110,111,114,109,97,108,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 105,115,110,111,114,109,97,108   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 118                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2487                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xb10:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xb16:0x32 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,49,49,105,115,117,110,111,114,100,101,114,101,100,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 105,115,117,110,111,114,100,101,114,101,100 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 120                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2487                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xb3d:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xb42:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xb48:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,108,97,98,115,108    // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,97,98,115                    // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 121                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xb5f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 7                                // Abbrev [7] 0xb65:0xc DW_TAG_base_type
+; CHECK-NEXT: // .b8 108,111,110,103,32,105,110,116   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 5                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 8                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xb71:0x25 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,108,100,101,120,112,102,105 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,100,101,120,112              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 123                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xb8b:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xb90:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xb96:0x21 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,108,103,97,109,109,97,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,103,97,109,109,97            // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 125                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xbb1:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xbb7:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,108,108,97,98,115,120 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,108,97,98,115                // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 126                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xbd0:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xbd6:0x21 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,108,108,114,105,110,116,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,108,114,105,110,116          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 128                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xbf1:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xbf7:0x1b DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,51,108,111,103,102      // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,111,103                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 138                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xc0c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xc12:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,108,111,103,49,48,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,111,103,49,48                // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 130                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xc2b:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xc31:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,108,111,103,49,112,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,111,103,49,112               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 132                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xc4a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xc50:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,108,111,103,50,102   // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,111,103,50                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 134                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xc67:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xc6d:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,108,111,103,98,102   // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,111,103,98                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 136                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xc84:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xc8a:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,108,114,105,110,116,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,114,105,110,116              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 140                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xca3:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xca9:0x21 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,108,114,111,117,110,100,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,114,111,117,110,100          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 142                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xcc4:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xcca:0x23 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,55,108,108,114,111,117,110,100,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,108,114,111,117,110,100      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 143                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xce7:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xced:0x24 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,109,111,100,102,102,80,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 109,111,100,102                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 145                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xd06:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xd0b:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3345                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0xd11:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xd16:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,51,110,97,110,80,75,99  // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 110,97,110                       // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 146                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xd2d:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 7                                // Abbrev [7] 0xd33:0xa DW_TAG_base_type
+; CHECK-NEXT: // .b8 100,111,117,98,108,101           // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 8                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0xd3d:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 3394                            // DW_AT_type
+; CHECK-NEXT: // .b8 9                                // Abbrev [9] 0xd42:0x5 DW_TAG_const_type
+; CHECK-NEXT: // .b32 3399                            // DW_AT_type
+; CHECK-NEXT: // .b8 7                                // Abbrev [7] 0xd47:0x8 DW_TAG_base_type
+; CHECK-NEXT: // .b8 99,104,97,114                    // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 8                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 1                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xd4f:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,110,97,110,102,80,75,99 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 110,97,110,102                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 147                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xd68:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xd6e:0x27 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,57,110,101,97,114,98,121,105,110,116,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 110,101,97,114,98,121,105,110,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 149                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xd8f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xd95:0x2d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,57,110,101,120,116,97,102,116,101,114,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 110,101,120,116,97,102,116,101,114 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 151                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xdb7:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xdbc:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xdc2:0x21 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,51,112,111,119,102,105  // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 112,111,119                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 155                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xdd8:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xddd:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xde3:0x2d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,57,114,101,109,97,105,110,100,101,114,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 114,101,109,97,105,110,100,101,114 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 157                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xe05:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xe0a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xe10:0x2e DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,114,101,109,113,117,111,102,102,80,105 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 114,101,109,113,117,111          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 159                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xe2e:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xe33:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xe38:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2377                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xe3e:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,114,105,110,116,102  // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 114,105,110,116                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 161                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xe55:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xe5b:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,114,111,117,110,100,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 114,111,117,110,100              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 163                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xe74:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xe7a:0x29 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,55,115,99,97,108,98,108,110,102,108 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 115,99,97,108,98,108,110         // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 165                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xe98:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xe9d:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xea3:0x27 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,115,99,97,108,98,110,102,105 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 115,99,97,108,98,110             // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 167                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xebf:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xec4:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xeca:0x23 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,55,115,105,103,110,98,105,116,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 115,105,103,110,98,105,116       // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 169                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2487                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xee7:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xeed:0x1b DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,51,115,105,110,102      // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 115,105,110                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 171                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xf02:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xf08:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,115,105,110,104,102  // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 115,105,110,104                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 173                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xf1f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xf25:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,115,113,114,116,102  // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 115,113,114,116                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 175                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xf3c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xf42:0x1b DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,51,116,97,110,102       // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 116,97,110                       // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 177                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xf57:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xf5d:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,116,97,110,104,102   // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 116,97,110,104                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 179                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xf74:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xf7a:0x21 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,116,103,97,109,109,97,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 116,103,97,109,109,97            // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 181                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xf95:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0xf9b:0x1f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,116,114,117,110,99,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 116,114,117,110,99               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 183                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xfb4:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0xfba:0x14 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 97,99,111,115                    // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 54                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xfc8:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0xfce:0x14 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 97,115,105,110                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 56                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xfdc:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0xfe2:0x14 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 97,116,97,110                    // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0xff0:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0xff6:0x1a DW_TAG_subprogram
+; CHECK-NEXT: // .b8 97,116,97,110,50                 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 60                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1005:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x100a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x1010:0x14 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 99,101,105,108                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 178                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x101e:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x1024:0x13 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 99,111,115                       // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 63                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1031:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x1037:0x14 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 99,111,115,104                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 72                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1045:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x104b:0x13 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 101,120,112                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 100                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1058:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x105e:0x14 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 102,97,98,115                    // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 181                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x106c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x1072:0x15 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 102,108,111,111,114              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 184                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1081:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x1087:0x19 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 102,109,111,100                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 187                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1095:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x109a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x10a0:0x1a DW_TAG_subprogram
+; CHECK-NEXT: // .b8 102,114,101,120,112              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 103                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x10af:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x10b4:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2377                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x10ba:0x1a DW_TAG_subprogram
+; CHECK-NEXT: // .b8 108,100,101,120,112              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 106                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x10c9:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x10ce:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x10d4:0x13 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 108,111,103                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 109                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x10e1:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x10e7:0x15 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 108,111,103,49,48                // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 112                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x10f6:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x10fc:0x19 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 109,111,100,102                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 115                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x110a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x110f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4373                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x1115:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x111a:0x18 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 112,111,119                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 153                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1127:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x112c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x1132:0x13 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 115,105,110                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 65                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x113f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x1145:0x14 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 115,105,110,104                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 74                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1153:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x1159:0x14 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 115,113,114,116                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 156                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1167:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x116d:0x13 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 116,97,110                       // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 67                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x117a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x1180:0x14 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 116,97,110,104                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 76                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x118e:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 11                               // Abbrev [11] 0x1194:0xd DW_TAG_typedef
+; CHECK-NEXT: // .b32 4513                            // DW_AT_type
+; CHECK-NEXT: // .b8 100,105,118,95,116               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 101                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 12                               // Abbrev [12] 0x11a1:0x2 DW_TAG_structure_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 11                               // Abbrev [11] 0x11a3:0xe DW_TAG_typedef
+; CHECK-NEXT: // .b32 4529                            // DW_AT_type
+; CHECK-NEXT: // .b8 108,100,105,118,95,116           // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 109                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 13                               // Abbrev [13] 0x11b1:0x22 DW_TAG_structure_type
+; CHECK-NEXT: // .b8 16                               // DW_AT_byte_size
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 105                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 14                               // Abbrev [14] 0x11b5:0xf DW_TAG_member
+; CHECK-NEXT: // .b8 113,117,111,116                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 107                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2                                // DW_AT_data_member_location
+; CHECK-NEXT: // .b8 35
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 14                               // Abbrev [14] 0x11c4:0xe DW_TAG_member
+; CHECK-NEXT: // .b8 114,101,109                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 108                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2                                // DW_AT_data_member_location
+; CHECK-NEXT: // .b8 35
+; CHECK-NEXT: // .b8 8
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 15                               // Abbrev [15] 0x11d3:0xd DW_TAG_subprogram
+; CHECK-NEXT: // .b8 97,98,111,114,116                // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 1                                // DW_AT_noreturn
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x11e0:0x14 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 97,98,115                        // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 7                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 3
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x11ee:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x11f4:0x17 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 97,116,101,120,105,116           // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 7                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1205:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4619                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x120b:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 4624                            // DW_AT_type
+; CHECK-NEXT: // .b8 17                               // Abbrev [17] 0x1210:0x1 DW_TAG_subroutine_type
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x1211:0x14 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 97,116,111,102                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 6                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 26                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x121f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x1225:0x15 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 97,116,111,105                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 22                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1234:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x123a:0x15 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 97,116,111,108                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 27                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1249:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x124f:0x2b DW_TAG_subprogram
+; CHECK-NEXT: // .b8 98,115,101,97,114,99,104         // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 7                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 20                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4730                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1260:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4731                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1265:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4731                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x126a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4737                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x126f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4737                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1274:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4772                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 18                               // Abbrev [18] 0x127a:0x1 DW_TAG_pointer_type
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x127b:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 4736                            // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // Abbrev [19] 0x1280:0x1 DW_TAG_const_type
+; CHECK-NEXT: // .b8 11                               // Abbrev [11] 0x1281:0xe DW_TAG_typedef
+; CHECK-NEXT: // .b32 4751                            // DW_AT_type
+; CHECK-NEXT: // .b8 115,105,122,101,95,116           // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 8                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 62                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 7                                // Abbrev [7] 0x128f:0x15 DW_TAG_base_type
+; CHECK-NEXT: // .b8 108,111,110,103,32,117,110,115,105,103,110,101,100,32,105,110,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 7                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 8                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 20                               // Abbrev [20] 0x12a4:0x16 DW_TAG_typedef
+; CHECK-NEXT: // .b32 4794                            // DW_AT_type
+; CHECK-NEXT: // .b8 95,95,99,111,109,112,97,114,95,102,110,95,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 230                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x12ba:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 4799                            // DW_AT_type
+; CHECK-NEXT: // .b8 21                               // Abbrev [21] 0x12bf:0x10 DW_TAG_subroutine_type
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x12c4:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4731                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x12c9:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4731                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x12cf:0x1c DW_TAG_subprogram
+; CHECK-NEXT: // .b8 99,97,108,108,111,99             // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 212                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4730                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x12e0:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4737                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x12e5:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4737                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x12eb:0x19 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 100,105,118                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 21                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 3
+; CHECK-NEXT: // .b32 4500                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x12f9:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x12fe:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 22                               // Abbrev [22] 0x1304:0x12 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 101,120,105,116                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 31                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 1                                // DW_AT_noreturn
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1310:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 23                               // Abbrev [23] 0x1316:0x11 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 102,114,101,101                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 227                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1321:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4730                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x1327:0x17 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 103,101,116,101,110,118          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 52                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b32 4926                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1338:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x133e:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 3399                            // DW_AT_type
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x1343:0x15 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 108,97,98,115                    // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 8                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 3
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1352:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x1358:0x1a DW_TAG_subprogram
+; CHECK-NEXT: // .b8 108,100,105,118                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 23                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 3
+; CHECK-NEXT: // .b32 4515                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1367:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x136c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x1372:0x17 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 109,97,108,108,111,99            // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 210                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4730                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1383:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4737                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x1389:0x1b DW_TAG_subprogram
+; CHECK-NEXT: // .b8 109,98,108,101,110               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 95                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 3
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1399:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x139e:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4737                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x13a4:0x23 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 109,98,115,116,111,119,99,115    // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 106                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 3
+; CHECK-NEXT: // .b32 4737                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x13b7:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5063                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x13bc:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x13c1:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4737                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x13c7:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 5068                            // DW_AT_type
+; CHECK-NEXT: // .b8 7                                // Abbrev [7] 0x13cc:0xb DW_TAG_base_type
+; CHECK-NEXT: // .b8 119,99,104,97,114,95,116         // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 5                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 4                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x13d7:0x21 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 109,98,116,111,119,99            // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 98                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 3
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x13e8:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5063                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x13ed:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x13f2:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4737                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 23                               // Abbrev [23] 0x13f8:0x21 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 113,115,111,114,116              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 253                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1404:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4730                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1409:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4737                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x140e:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4737                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1413:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4772                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 24                               // Abbrev [24] 0x1419:0xf DW_TAG_subprogram
+; CHECK-NEXT: // .b8 114,97,110,100                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 118                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x1428:0x1d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 114,101,97,108,108,111,99        // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 224                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 4730                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x143a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4730                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x143f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4737                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 23                               // Abbrev [23] 0x1445:0x12 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 115,114,97,110,100               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 120                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1451:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 7                                // Abbrev [7] 0x1457:0x10 DW_TAG_base_type
+; CHECK-NEXT: // .b8 117,110,115,105,103,110,101,100,32,105,110,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 7                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 4                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x1467:0x1b DW_TAG_subprogram
+; CHECK-NEXT: // .b8 115,116,114,116,111,100          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 164                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3379                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1477:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x147c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5250                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x1482:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 4926                            // DW_AT_type
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x1487:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 115,116,114,116,111,108          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 183                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1497:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x149c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5250                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x14a1:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x14a7:0x21 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 115,116,114,116,111,117,108      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 187                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 4751                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x14b8:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x14bd:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5250                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x14c2:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x14c8:0x17 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 115,121,115,116,101,109          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 205                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x14d9:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x14df:0x23 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 119,99,115,116,111,109,98,115    // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 109                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 3
+; CHECK-NEXT: // .b32 4737                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x14f2:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4926                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x14f7:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5378                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x14fc:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4737                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x1502:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 5383                            // DW_AT_type
+; CHECK-NEXT: // .b8 9                                // Abbrev [9] 0x1507:0x5 DW_TAG_const_type
+; CHECK-NEXT: // .b32 5068                            // DW_AT_type
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x150c:0x1c DW_TAG_subprogram
+; CHECK-NEXT: // .b8 119,99,116,111,109,98            // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 102                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 3
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x151d:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 4926                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1522:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5068                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 2                                // Abbrev [2] 0x1528:0x78 DW_TAG_namespace
+; CHECK-NEXT: // .b8 95,95,103,110,117,95,99,120,120  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x1533:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 201                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5536                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x153a:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 207                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5585                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x1541:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 211                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5604                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x1548:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 217                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5626                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x154f:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 228                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5653                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x1556:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 229                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5675                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x155d:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 230                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5708                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x1564:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 232                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5768                            // DW_AT_import
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x156b:0x7 DW_TAG_imported_declaration
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 233                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5795                            // DW_AT_import
+; CHECK-NEXT: // .b8 25                               // Abbrev [25] 0x1572:0x2d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,57,95,95,103,110,117,95,99,120,120,51,100,105,118,69,120,120 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 100,105,118                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 214                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5536                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1594:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1599:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 11                               // Abbrev [11] 0x15a0:0xf DW_TAG_typedef
+; CHECK-NEXT: // .b32 5551                            // DW_AT_type
+; CHECK-NEXT: // .b8 108,108,100,105,118,95,116       // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 121                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 13                               // Abbrev [13] 0x15af:0x22 DW_TAG_structure_type
+; CHECK-NEXT: // .b8 16                               // DW_AT_byte_size
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 117                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 14                               // Abbrev [14] 0x15b3:0xf DW_TAG_member
+; CHECK-NEXT: // .b8 113,117,111,116                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 119                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2                                // DW_AT_data_member_location
+; CHECK-NEXT: // .b8 35
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 14                               // Abbrev [14] 0x15c2:0xe DW_TAG_member
+; CHECK-NEXT: // .b8 114,101,109                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 120                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2                                // DW_AT_data_member_location
+; CHECK-NEXT: // .b8 35
+; CHECK-NEXT: // .b8 8
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 22                               // Abbrev [22] 0x15d1:0x13 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,69,120,105,116                // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 45                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 1                                // DW_AT_noreturn
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x15de:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x15e4:0x16 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 108,108,97,98,115                // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 12                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 3
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x15f4:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x15fa:0x1b DW_TAG_subprogram
+; CHECK-NEXT: // .b8 108,108,100,105,118              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 29                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 3
+; CHECK-NEXT: // .b32 5536                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x160a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x160f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 16                               // Abbrev [16] 0x1615:0x16 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 97,116,111,108,108               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 36                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1625:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x162b:0x21 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 115,116,114,116,111,108,108      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 209                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x163c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1641:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5250                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1646:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x164c:0x22 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 115,116,114,116,111,117,108,108  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 214                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5742                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x165e:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1663:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5250                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1668:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 7                                // Abbrev [7] 0x166e:0x1a DW_TAG_base_type
+; CHECK-NEXT: // .b8 108,111,110,103,32,108,111,110,103,32,117,110,115,105,103,110,101,100,32,105,110,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 7                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 8                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x1688:0x1b DW_TAG_subprogram
+; CHECK-NEXT: // .b8 115,116,114,116,111,102          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 172                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1698:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x169d:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5250                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 10                               // Abbrev [10] 0x16a3:0x1c DW_TAG_subprogram
+; CHECK-NEXT: // .b8 115,116,114,116,111,108,100      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 175                              // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5823                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x16b4:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3389                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x16b9:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5250                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 7                                // Abbrev [7] 0x16bf:0xf DW_TAG_base_type
+; CHECK-NEXT: // .b8 108,111,110,103,32,100,111,117,98,108,101 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 8                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x16ce:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,97,99,111,115,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,99,111,115,102                // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 62                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x16e8:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x16ee:0x22 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,97,99,111,115,104,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,99,111,115,104,102            // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 90                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x170a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1710:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,97,115,105,110,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,115,105,110,102               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 57                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x172a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1730:0x22 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,97,115,105,110,104,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,115,105,110,104,102           // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 95                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x174c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1752:0x28 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,97,116,97,110,50,102,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,116,97,110,50,102             // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 47                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x176f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1774:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x177a:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,97,116,97,110,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,116,97,110,102                // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 52                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1794:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x179a:0x22 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,97,116,97,110,104,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 97,116,97,110,104,102            // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 100                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x17b6:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x17bc:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,99,98,114,116,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 99,98,114,116,102                // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 150                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x17d6:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x17dc:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,99,101,105,108,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 99,101,105,108,102               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 11                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 155                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x17f6:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x17fc:0x2e DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,57,99,111,112,121,115,105,103,110,102,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 99,111,112,121,115,105,103,110,102 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 165                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 4
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x181f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1824:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x182a:0x1e DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,99,111,115,102,102   // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 99,111,115,102                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 219                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 4
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1842:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1848:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,99,111,115,104,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 99,111,115,104,102               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 32                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1862:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1868:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,101,114,102,99,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 101,114,102,99,102               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 210                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1882:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1888:0x1e DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,101,114,102,102,102  // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 101,114,102,102                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 200                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x18a0:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x18a6:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,101,120,112,50,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 101,120,112,50,102               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 11                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 145                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x18c0:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x18c6:0x1e DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,101,120,112,102,102  // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 101,120,112,102                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 14                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x18de:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x18e4:0x22 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,101,120,112,109,49,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 101,120,112,109,49,102           // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 105                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1900:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1906:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,102,97,98,115,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,97,98,115,102                // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 11                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 95                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1920:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1926:0x26 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,102,100,105,109,102,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,100,105,109,102              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 80                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 6
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1941:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1946:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x194c:0x22 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,102,108,111,111,114,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,108,111,111,114,102          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 11                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 85                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1968:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x196e:0x2a DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,102,109,97,102,102,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,109,97,102                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 32                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 6
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1988:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x198d:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1992:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1998:0x26 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,102,109,97,120,102,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,109,97,120,102               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 11                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 110                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x19b3:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x19b8:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x19be:0x26 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,102,109,105,110,102,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,109,105,110,102              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 11                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 105                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x19d9:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x19de:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x19e4:0x26 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,102,109,111,100,102,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,109,111,100,102              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 17                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 6
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x19ff:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1a04:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1a0a:0x29 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,102,114,101,120,112,102,102,80,105 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 102,114,101,120,112,102          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 7                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 6
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1a28:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1a2d:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2377                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1a33:0x28 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,104,121,112,111,116,102,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 104,121,112,111,116,102          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 110                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1a50:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1a55:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1a5b:0x22 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,105,108,111,103,98,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 105,108,111,103,98,102           // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 85                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 6
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1a77:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1a7d:0x28 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,108,100,101,120,112,102,102,105 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,100,101,120,112,102          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 240                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1a9a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1a9f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1aa5:0x24 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,55,108,103,97,109,109,97,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,103,97,109,109,97,102        // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 235                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1ac3:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1ac9:0x24 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,55,108,108,114,105,110,116,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,108,114,105,110,116,102      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 125                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 4
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1ae7:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1aed:0x26 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,56,108,108,114,111,117,110,100,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,108,114,111,117,110,100,102  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 66                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 6
+; CHECK-NEXT: // .b32 1508                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1b0d:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1b13:0x22 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,108,111,103,49,48,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,111,103,49,48,102            // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 76                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1b2f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1b35:0x22 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,108,111,103,49,112,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,111,103,49,112,102           // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 85                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1b51:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1b57:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,108,111,103,50,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,111,103,50,102               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1b71:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1b77:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,108,111,103,98,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,111,103,98,102               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 90                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 6
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1b91:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1b97:0x1e DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,108,111,103,102,102  // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,111,103,102                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 67                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1baf:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1bb5:0x22 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,108,114,105,110,116,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,114,105,110,116,102          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 116                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 4
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1bd1:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1bd7:0x24 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,55,108,114,111,117,110,100,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 108,114,111,117,110,100,102      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 71                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 6
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1bf5:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1bfb:0x27 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,109,111,100,102,102,102,80,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 109,111,100,102,102              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 12                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 6
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1c17:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1c1c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 3345                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1c22:0x2b DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,49,48,110,101,97,114,98,121,105,110,116,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 110,101,97,114,98,121,105,110,116,102 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 130                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 4
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1c47:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1c4d:0x31 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,49,48,110,101,120,116,97,102,116,101,114,102,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 110,101,120,116,97,102,116,101,114,102 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 194                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 4
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1c73:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1c78:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1c7e:0x24 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,112,111,119,102,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 112,111,119,102                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 47                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 6
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1c97:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1c9c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1ca2:0x31 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,49,48,114,101,109,97,105,110,100,101,114,102,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 114,101,109,97,105,110,100,101,114,102 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 22                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 6
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1cc8:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1ccd:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1cd3:0x31 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,55,114,101,109,113,117,111,102,102,102,80,105 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 114,101,109,113,117,111,102      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 27                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 6
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1cf4:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1cf9:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1cfe:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2377                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1d04:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,114,105,110,116,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 114,105,110,116,102              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 111                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 4
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1d1e:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1d24:0x22 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,114,111,117,110,100,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 114,111,117,110,100,102          // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 61                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 6
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1d40:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1d46:0x2c DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,56,115,99,97,108,98,108,110,102,102,108 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 115,99,97,108,98,108,110,102     // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 250                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1d67:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1d6c:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2917                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1d72:0x2a DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,55,115,99,97,108,98,110,102,102,105 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 115,99,97,108,98,110,102         // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 245                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1d91:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1d96:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1d9c:0x1e DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,115,105,110,102,102  // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 115,105,110,102                  // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 210                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 4
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1db4:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1dba:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,115,105,110,104,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 115,105,110,104,102              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 37                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1dd4:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1dda:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,115,113,114,116,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 115,113,114,116,102              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 11                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 139                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 3
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1df4:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1dfa:0x1e DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,52,116,97,110,102,102   // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 116,97,110,102                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 252                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 4
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1e12:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1e18:0x20 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,53,116,97,110,104,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 116,97,110,104,102               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 42                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 5
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1e32:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1e38:0x24 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,55,116,103,97,109,109,97,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 116,103,97,109,109,97,102        // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 9                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 56                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 6
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1e56:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 26                               // Abbrev [26] 0x1e5c:0x22 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,76,54,116,114,117,110,99,102,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 116,114,117,110,99,102           // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 11                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 150                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x1e78:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 27                               // Abbrev [27] 0x1e7e:0x22a DW_TAG_structure_type
+; CHECK-NEXT: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 77                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 28                               // Abbrev [28] 0x1e9c:0x4f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116,49,55,95,95,102,101,116,99,104,95 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 98,117,105,108,116,105,110,95,120,69,118
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,120 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 78                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 28                               // Abbrev [28] 0x1eeb:0x4f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116,49,55,95,95,102,101,116,99,104,95 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 98,117,105,108,116,105,110,95,121,69,118
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,121 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 79                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 28                               // Abbrev [28] 0x1f3a:0x4f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116,49,55,95,95,102,101,116,99,104,95 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 98,117,105,108,116,105,110,95,122,69,118
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,122 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 80                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 25                               // Abbrev [25] 0x1f89:0x49 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,75,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116,99,118,53,117,105,110,116,51,69 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 118
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 111,112,101,114,97,116,111,114,32,117,105,110,116,51 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 83                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 8360                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x1fcb:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 8407                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 30                               // Abbrev [30] 0x1fd2:0x27 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 85                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // DW_AT_accessibility
+; CHECK-NEXT:                                         // DW_ACCESS_private
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x1ff2:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 8417                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 30                               // Abbrev [30] 0x1ff9:0x2c DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 85                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // DW_AT_accessibility
+; CHECK-NEXT:                                         // DW_ACCESS_private
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x2019:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 8417                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x201f:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 8422                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 31                               // Abbrev [31] 0x2025:0x43 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,75,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116,97,83,69,82,75,83,95 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 111,112,101,114,97,116,111,114,61 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 85                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // DW_AT_accessibility
+; CHECK-NEXT:                                         // DW_ACCESS_private
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x205c:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 8407                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x2062:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 8422                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 32                               // Abbrev [32] 0x2068:0x3f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,75,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,73,100,120,95,116,97,100,69,118 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 111,112,101,114,97,116,111,114,38 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 85                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 8427                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // DW_AT_accessibility
+; CHECK-NEXT:                                         // DW_ACCESS_private
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x20a0:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 8407                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 27                               // Abbrev [27] 0x20a8:0x2f DW_TAG_structure_type
+; CHECK-NEXT: // .b8 117,105,110,116,51               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 12                               // DW_AT_byte_size
+; CHECK-NEXT: // .b8 14                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 190                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 14                               // Abbrev [14] 0x20b2:0xc DW_TAG_member
+; CHECK-NEXT: // .b8 120                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 14                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 192                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2                                // DW_AT_data_member_location
+; CHECK-NEXT: // .b8 35
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 14                               // Abbrev [14] 0x20be:0xc DW_TAG_member
+; CHECK-NEXT: // .b8 121                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 14                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 192                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2                                // DW_AT_data_member_location
+; CHECK-NEXT: // .b8 35
+; CHECK-NEXT: // .b8 4
+; CHECK-NEXT: // .b8 14                               // Abbrev [14] 0x20ca:0xc DW_TAG_member
+; CHECK-NEXT: // .b8 122                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 14                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 192                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 2                                // DW_AT_data_member_location
+; CHECK-NEXT: // .b8 35
+; CHECK-NEXT: // .b8 8
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x20d7:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 8412                            // DW_AT_type
+; CHECK-NEXT: // .b8 9                                // Abbrev [9] 0x20dc:0x5 DW_TAG_const_type
+; CHECK-NEXT: // .b32 7806                            // DW_AT_type
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x20e1:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 7806                            // DW_AT_type
+; CHECK-NEXT: // .b8 33                               // Abbrev [33] 0x20e6:0x5 DW_TAG_reference_type
+; CHECK-NEXT: // .b32 8412                            // DW_AT_type
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x20eb:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 7806                            // DW_AT_type
+; CHECK-NEXT: // .b8 34                               // Abbrev [34] 0x20f0:0x6 DW_TAG_subprogram
+; CHECK-NEXT: // .b32 7836                            // DW_AT_specification
+; CHECK-NEXT: // .b8 1                                // DW_AT_inline
+; CHECK-NEXT: // .b8 27                               // Abbrev [27] 0x20f6:0x228 DW_TAG_structure_type
+; CHECK-NEXT: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 88                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 28                               // Abbrev [28] 0x2114:0x4f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116,49,55,95,95,102,101,116,99,104,95 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 98,117,105,108,116,105,110,95,120,69,118
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,120 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 89                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 28                               // Abbrev [28] 0x2163:0x4f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116,49,55,95,95,102,101,116,99,104,95 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 98,117,105,108,116,105,110,95,121,69,118
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,121 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 90                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 28                               // Abbrev [28] 0x21b2:0x4f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116,49,55,95,95,102,101,116,99,104,95 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 98,117,105,108,116,105,110,95,122,69,118
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,122 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 91                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 25                               // Abbrev [25] 0x2201:0x47 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,75,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116,99,118,52,100,105,109,51,69,118 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 111,112,101,114,97,116,111,114,32,100,105,109,51 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 94                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 8990                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x2241:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9166                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 30                               // Abbrev [30] 0x2248:0x27 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 96                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // DW_AT_accessibility
+; CHECK-NEXT:                                         // DW_ACCESS_private
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x2268:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9176                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 30                               // Abbrev [30] 0x226f:0x2c DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 96                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // DW_AT_accessibility
+; CHECK-NEXT:                                         // DW_ACCESS_private
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x228f:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9176                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x2295:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9181                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 31                               // Abbrev [31] 0x229b:0x43 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,75,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116,97,83,69,82,75,83,95 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 111,112,101,114,97,116,111,114,61 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 96                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // DW_AT_accessibility
+; CHECK-NEXT:                                         // DW_ACCESS_private
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x22d2:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9166                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x22d8:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9181                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 32                               // Abbrev [32] 0x22de:0x3f DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,75,50,53,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,98,108,111,99,107,68,105,109,95,116,97,100,69,118 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 111,112,101,114,97,116,111,114,38 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 96                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 9186                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // DW_AT_accessibility
+; CHECK-NEXT:                                         // DW_ACCESS_private
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x2316:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9166                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 35                               // Abbrev [35] 0x231e:0x9d DW_TAG_structure_type
+; CHECK-NEXT: // .b8 100,105,109,51                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 12                               // DW_AT_byte_size
+; CHECK-NEXT: // .b8 14                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 161                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b8 36                               // Abbrev [36] 0x2328:0xd DW_TAG_member
+; CHECK-NEXT: // .b8 120                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 14                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 163                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b8 2                                // DW_AT_data_member_location
+; CHECK-NEXT: // .b8 35
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 36                               // Abbrev [36] 0x2335:0xd DW_TAG_member
+; CHECK-NEXT: // .b8 121                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 14                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 163                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b8 2                                // DW_AT_data_member_location
+; CHECK-NEXT: // .b8 35
+; CHECK-NEXT: // .b8 4
+; CHECK-NEXT: // .b8 36                               // Abbrev [36] 0x2342:0xd DW_TAG_member
+; CHECK-NEXT: // .b8 122                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 14                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 163                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b8 2                                // DW_AT_data_member_location
+; CHECK-NEXT: // .b8 35
+; CHECK-NEXT: // .b8 8
+; CHECK-NEXT: // .b8 23                               // Abbrev [23] 0x234f:0x21 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 100,105,109,51                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 14                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 165                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x235a:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9147                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x2360:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x2365:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x236a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 23                               // Abbrev [23] 0x2370:0x17 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 100,105,109,51                   // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 14                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 166                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x237b:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9147                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x2381:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9152                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 37                               // Abbrev [37] 0x2387:0x33 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,52,100,105,109,51,99,118,53,117,105,110,116,51,69,118 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 111,112,101,114,97,116,111,114,32,117,105,110,116,51 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 14                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 167                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b32 9152                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x23b3:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9147                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x23bb:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 8990                            // DW_AT_type
+; CHECK-NEXT: // .b8 20                               // Abbrev [20] 0x23c0:0xe DW_TAG_typedef
+; CHECK-NEXT: // .b32 8360                            // DW_AT_type
+; CHECK-NEXT: // .b8 117,105,110,116,51               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 14                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 127                              // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x23ce:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 9171                            // DW_AT_type
+; CHECK-NEXT: // .b8 9                                // Abbrev [9] 0x23d3:0x5 DW_TAG_const_type
+; CHECK-NEXT: // .b32 8438                            // DW_AT_type
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x23d8:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 8438                            // DW_AT_type
+; CHECK-NEXT: // .b8 33                               // Abbrev [33] 0x23dd:0x5 DW_TAG_reference_type
+; CHECK-NEXT: // .b32 9171                            // DW_AT_type
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x23e2:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 8438                            // DW_AT_type
+; CHECK-NEXT: // .b8 34                               // Abbrev [34] 0x23e7:0x6 DW_TAG_subprogram
+; CHECK-NEXT: // .b32 8468                            // DW_AT_specification
+; CHECK-NEXT: // .b8 1                                // DW_AT_inline
+; CHECK-NEXT: // .b8 27                               // Abbrev [27] 0x23ed:0x233 DW_TAG_structure_type
+; CHECK-NEXT: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 66                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 28                               // Abbrev [28] 0x240c:0x50 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,50,54,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116,49,55,95,95,102,101,116,99,104 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 95,98,117,105,108,116,105,110,95,120,69,118
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,120 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 67                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 28                               // Abbrev [28] 0x245c:0x50 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,50,54,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116,49,55,95,95,102,101,116,99,104 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 95,98,117,105,108,116,105,110,95,121,69,118
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,121 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 68                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 28                               // Abbrev [28] 0x24ac:0x50 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,50,54,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116,49,55,95,95,102,101,116,99,104 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 95,98,117,105,108,116,105,110,95,122,69,118
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 95,95,102,101,116,99,104,95,98,117,105,108,116,105,110,95,122 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 69                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 5207                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 25                               // Abbrev [25] 0x24fc:0x4a DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,75,50,54,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116,99,118,53,117,105,110,116,51 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 69,118
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 111,112,101,114,97,116,111,114,32,117,105,110,116,51 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 72                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 8360                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x253f:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9760                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 30                               // Abbrev [30] 0x2546:0x28 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 74                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // DW_AT_accessibility
+; CHECK-NEXT:                                         // DW_ACCESS_private
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x2567:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9770                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 30                               // Abbrev [30] 0x256e:0x2d DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 74                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // DW_AT_accessibility
+; CHECK-NEXT:                                         // DW_ACCESS_private
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x258f:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9770                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x2595:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9775                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 31                               // Abbrev [31] 0x259b:0x44 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,75,50,54,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116,97,83,69,82,75,83,95 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 111,112,101,114,97,116,111,114,61 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 74                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // DW_AT_accessibility
+; CHECK-NEXT:                                         // DW_ACCESS_private
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x25d3:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9760                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x25d9:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9775                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 32                               // Abbrev [32] 0x25df:0x40 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,78,75,50,54,95,95,99,117,100,97,95,98,117,105,108,116,105,110,95,116,104,114,101,97,100,73,100,120,95,116,97,100,69,118 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 111,112,101,114,97,116,111,114,38 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 13                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 74                               // DW_AT_decl_line
+; CHECK-NEXT: // .b32 9780                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 3                                // DW_AT_accessibility
+; CHECK-NEXT:                                         // DW_ACCESS_private
+; CHECK-NEXT: // .b8 29                               // Abbrev [29] 0x2618:0x6 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9760                            // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_artificial
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x2620:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 9765                            // DW_AT_type
+; CHECK-NEXT: // .b8 9                                // Abbrev [9] 0x2625:0x5 DW_TAG_const_type
+; CHECK-NEXT: // .b32 9197                            // DW_AT_type
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x262a:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 9197                            // DW_AT_type
+; CHECK-NEXT: // .b8 33                               // Abbrev [33] 0x262f:0x5 DW_TAG_reference_type
+; CHECK-NEXT: // .b32 9765                            // DW_AT_type
+; CHECK-NEXT: // .b8 8                                // Abbrev [8] 0x2634:0x5 DW_TAG_pointer_type
+; CHECK-NEXT: // .b32 9197                            // DW_AT_type
+; CHECK-NEXT: // .b8 34                               // Abbrev [34] 0x2639:0x6 DW_TAG_subprogram
+; CHECK-NEXT: // .b32 9228                            // DW_AT_specification
+; CHECK-NEXT: // .b8 1                                // DW_AT_inline
+; CHECK-NEXT: // .b8 38                               // Abbrev [38] 0x263f:0x32 DW_TAG_subprogram
+; CHECK-NEXT: // .b8 95,90,51,114,101,115,102,102,80,102 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 114,101,115                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 12                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 1                                // DW_AT_inline
+; CHECK-NEXT: // .b8 39                               // Abbrev [39] 0x2653:0x9 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 120                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 12                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 39                               // Abbrev [39] 0x265c:0x9 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 121                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 12                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 39                               // Abbrev [39] 0x2665:0xb DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 114,101,115                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 12                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 3                                // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3345                            // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 40                               // Abbrev [40] 0x2671:0xc0 DW_TAG_subprogram
+; CHECK-NEXT: // .b64 Lfunc_begin0                    // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Lfunc_end0                      // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_AT_frame_base
+; CHECK-NEXT: // .b8 156
+; CHECK-NEXT: // .b8 95,90,53,115,97,120,112,121,105,102,80,102,83,95 // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 115,97,120,112,121               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 12                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 39                               // Abbrev [39] 0x269c:0x9 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 110                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 12                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 39                               // Abbrev [39] 0x26a5:0x9 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 97                               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 12                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_line
+; CHECK-NEXT: // .b32 1554                            // DW_AT_type
+; CHECK-NEXT: // .b8 39                               // Abbrev [39] 0x26ae:0x9 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 120                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 12                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3345                            // DW_AT_type
+; CHECK-NEXT: // .b8 39                               // Abbrev [39] 0x26b7:0x9 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 121                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 12                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 5                                // DW_AT_decl_line
+; CHECK-NEXT: // .b32 3345                            // DW_AT_type
+; CHECK-NEXT: // .b8 41                               // Abbrev [41] 0x26c0:0x9 DW_TAG_variable
+; CHECK-NEXT: // .b8 105                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 12                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 6                                // DW_AT_decl_line
+; CHECK-NEXT: // .b32 2332                            // DW_AT_type
+; CHECK-NEXT: // .b8 42                               // Abbrev [42] 0x26c9:0x17 DW_TAG_inlined_subroutine
+; CHECK-NEXT: // .b32 8432                            // DW_AT_abstract_origin
+; CHECK-NEXT: // .b64 Ltmp0                           // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Ltmp1                           // DW_AT_high_pc
+; CHECK-NEXT: // .b8 12                               // DW_AT_call_file
+; CHECK-NEXT: // .b8 6                                // DW_AT_call_line
+; CHECK-NEXT: // .b8 42                               // Abbrev [42] 0x26e0:0x17 DW_TAG_inlined_subroutine
+; CHECK-NEXT: // .b32 9191                            // DW_AT_abstract_origin
+; CHECK-NEXT: // .b64 Ltmp1                           // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Ltmp2                           // DW_AT_high_pc
+; CHECK-NEXT: // .b8 12                               // DW_AT_call_file
+; CHECK-NEXT: // .b8 6                                // DW_AT_call_line
+; CHECK-NEXT: // .b8 42                               // Abbrev [42] 0x26f7:0x17 DW_TAG_inlined_subroutine
+; CHECK-NEXT: // .b32 9785                            // DW_AT_abstract_origin
+; CHECK-NEXT: // .b64 Ltmp2                           // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Ltmp3                           // DW_AT_high_pc
+; CHECK-NEXT: // .b8 12                               // DW_AT_call_file
+; CHECK-NEXT: // .b8 6                                // DW_AT_call_line
+; CHECK-NEXT: // .b8 43                               // Abbrev [43] 0x270e:0x22 DW_TAG_inlined_subroutine
+; CHECK-NEXT: // .b32 9791                            // DW_AT_abstract_origin
+; CHECK-NEXT: // .b64 Ltmp8                           // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Ltmp9                           // DW_AT_high_pc
+; CHECK-NEXT: // .b8 12                               // DW_AT_call_file
+; CHECK-NEXT: // .b8 8                                // DW_AT_call_line
+; CHECK-NEXT: // .b8 44                               // Abbrev [44] 0x2725:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9811                            // DW_AT_abstract_origin
+; CHECK-NEXT: // .b8 44                               // Abbrev [44] 0x272a:0x5 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b32 9820                            // DW_AT_abstract_origin
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // }
+; CHECK-NEXT: // .section .debug_macinfo
+; CHECK-NEXT: // {
+; CHECK-NEXT: // .b8 0                                // End Of Macro List Mark
+; CHECK:      // }
 
 ; Function Attrs: nounwind readnone
 declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() #1
diff --git a/test/DebugInfo/NVPTX/debug-loc-offset.ll b/test/DebugInfo/NVPTX/debug-loc-offset.ll
index 9192651..df7835f 100644
--- a/test/DebugInfo/NVPTX/debug-loc-offset.ll
+++ b/test/DebugInfo/NVPTX/debug-loc-offset.ll
@@ -128,206 +128,206 @@
 !34 = !DILocation(line: 12, scope: !14)
 
 ; CHECK: // .section .debug_abbrev
-; CHECK: // {
-; CHECK: // .b8 1                                // Abbreviation Code
-; CHECK: // .b8 17                               // DW_TAG_compile_unit
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 37                               // DW_AT_producer
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 19                               // DW_AT_language
-; CHECK: // .b8 5                                // DW_FORM_data2
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 16                               // DW_AT_stmt_list
-; CHECK: // .b8 6                                // DW_FORM_data4
-; CHECK: // .b8 27                               // DW_AT_comp_dir
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 17                               // DW_AT_low_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 18                               // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 2                                // Abbreviation Code
-; CHECK: // .b8 19                               // DW_TAG_structure_type
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 60                               // DW_AT_declaration
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 3                                // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 17                               // DW_AT_low_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 18                               // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 64                               // DW_AT_frame_base
-; CHECK: // .b8 10                               // DW_FORM_block1
-; CHECK: // .b8 135,64                           // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 4                                // Abbreviation Code
-; CHECK: // .b8 52                               // DW_TAG_variable
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 16                               // DW_FORM_ref_addr
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 5                                // Abbreviation Code
-; CHECK: // .b8 46                               // DW_TAG_subprogram
-; CHECK: // .b8 1                                // DW_CHILDREN_yes
-; CHECK: // .b8 17                               // DW_AT_low_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 18                               // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_FORM_addr
-; CHECK: // .b8 64                               // DW_AT_frame_base
-; CHECK: // .b8 10                               // DW_FORM_block1
-; CHECK: // .b8 135,64                           // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 63                               // DW_AT_external
-; CHECK: // .b8 12                               // DW_FORM_flag
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 6                                // Abbreviation Code
-; CHECK: // .b8 5                                // DW_TAG_formal_parameter
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 58                               // DW_AT_decl_file
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 59                               // DW_AT_decl_line
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 73                               // DW_AT_type
-; CHECK: // .b8 19                               // DW_FORM_ref4
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 7                                // Abbreviation Code
-; CHECK: // .b8 36                               // DW_TAG_base_type
-; CHECK: // .b8 0                                // DW_CHILDREN_no
-; CHECK: // .b8 3                                // DW_AT_name
-; CHECK: // .b8 8                                // DW_FORM_string
-; CHECK: // .b8 62                               // DW_AT_encoding
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 11                               // DW_AT_byte_size
-; CHECK: // .b8 11                               // DW_FORM_data1
-; CHECK: // .b8 0                                // EOM(1)
-; CHECK: // .b8 0                                // EOM(2)
-; CHECK: // .b8 0                                // EOM(3)
-; CHECK: // }
-; CHECK: // .section .debug_info
-; CHECK: // {
-; CHECK: // .b32 150                             // Length of Unit
-; CHECK: // .b8 2                                // DWARF version number
-; CHECK: // .b8 0
-; CHECK: // .b32 .debug_abbrev                   // Offset Into Abbrev. Section
-; CHECK: // .b8 8                                // Address Size (in bytes)
-; CHECK: // .b8 1                                // Abbrev [1] 0xb:0x8f DW_TAG_compile_unit
-; CHECK: // .b8 99,108,97,110,103,32,118,101,114,115,105,111,110,32,51,46,53,46,48,32,40,50,49,48,52,55,57,41 // DW_AT_producer
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_language
-; CHECK: // .b8 0
-; CHECK: // .b8 100,101,98,117,103,45,108,111,99,45,111,102,102,115,101,116,50,46,99,99 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 .debug_line                     // DW_AT_stmt_list
-; CHECK: // .b8 47,108,108,118,109,95,99,109,97,107,101,95,103,99,99 // DW_AT_comp_dir
-; CHECK: // .b8 0
-; CHECK: // .b64 Lfunc_begin1                    // DW_AT_low_pc
-; CHECK: // .b64 Lfunc_end1                      // DW_AT_high_pc
-; CHECK: // .b8 2                                // Abbrev [2] 0x64:0x4 DW_TAG_structure_type
-; CHECK: // .b8 65                               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_declaration
-; CHECK: // .b8 3                                // Abbrev [3] 0x68:0x31 DW_TAG_subprogram
-; CHECK: // .b64 Lfunc_begin1                    // DW_AT_low_pc
-; CHECK: // .b64 Lfunc_end1                      // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_AT_frame_base
-; CHECK: // .b8 156
-; CHECK: // .b8 95,90,51,98,97,122,49,65         // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 98,97,122                        // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 6                                // DW_AT_decl_line
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 4                                // Abbrev [4] 0x8b:0xd DW_TAG_variable
-; CHECK: // .b8 122                              // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 2                                // DW_AT_decl_file
-; CHECK: // .b8 7                                // DW_AT_decl_line
-; CHECK: // .b64 .debug_info+302                 // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b32 152                             // Length of Unit
-; CHECK: // .b8 2                                // DWARF version number
-; CHECK: // .b8 0
-; CHECK: // .b32 .debug_abbrev                   // Offset Into Abbrev. Section
-; CHECK: // .b8 8                                // Address Size (in bytes)
-; CHECK: // .b8 1                                // Abbrev [1] 0xb:0x91 DW_TAG_compile_unit
-; CHECK: // .b8 99,108,97,110,103,32,118,101,114,115,105,111,110,32,51,46,53,46,48,32,40,50,49,48,52,55,57,41 // DW_AT_producer
-; CHECK: // .b8 0
-; CHECK: // .b8 4                                // DW_AT_language
-; CHECK: // .b8 0
-; CHECK: // .b8 100,101,98,117,103,45,108,111,99,45,111,102,102,115,101,116,49,46,99,99 // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b32 .debug_line                     // DW_AT_stmt_list
-; CHECK: // .b8 47,108,108,118,109,95,99,109,97,107,101,95,103,99,99 // DW_AT_comp_dir
-; CHECK: // .b8 0
-; CHECK: // .b64 Lfunc_begin0                    // DW_AT_low_pc
-; CHECK: // .b64 Lfunc_end0                      // DW_AT_high_pc
-; CHECK: // .b8 5                                // Abbrev [5] 0x64:0x30 DW_TAG_subprogram
-; CHECK: // .b64 Lfunc_begin0                    // DW_AT_low_pc
-; CHECK: // .b64 Lfunc_end0                      // DW_AT_high_pc
-; CHECK: // .b8 1                                // DW_AT_frame_base
-; CHECK: // .b8 156
-; CHECK: // .b8 95,90,51,98,97,114,105           // DW_AT_MIPS_linkage_name
-; CHECK: // .b8 0
-; CHECK: // .b8 98,97,114                        // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 1                                // DW_AT_decl_line
-; CHECK: // .b32 148                             // DW_AT_type
-; CHECK: // .b8 1                                // DW_AT_external
-; CHECK: // .b8 6                                // Abbrev [6] 0x8a:0x9 DW_TAG_formal_parameter
-; CHECK: // .b8 98                               // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 1                                // DW_AT_decl_file
-; CHECK: // .b8 1                                // DW_AT_decl_line
-; CHECK: // .b32 148                             // DW_AT_type
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // .b8 7                                // Abbrev [7] 0x94:0x7 DW_TAG_base_type
-; CHECK: // .b8 105,110,116                      // DW_AT_name
-; CHECK: // .b8 0
-; CHECK: // .b8 5                                // DW_AT_encoding
-; CHECK: // .b8 4                                // DW_AT_byte_size
-; CHECK: // .b8 0                                // End Of Children Mark
-; CHECK: // }
-; CHECK: // .section .debug_macinfo
-; CHECK: // {
-; CHECK: // .b8 0                                // End Of Macro List Mark
-; CHECK: // }
+; CHECK-NEXT: // {
+; CHECK-NEXT: // .b8 1                                // Abbreviation Code
+; CHECK-NEXT: // .b8 17                               // DW_TAG_compile_unit
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 37                               // DW_AT_producer
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 19                               // DW_AT_language
+; CHECK-NEXT: // .b8 5                                // DW_FORM_data2
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 16                               // DW_AT_stmt_list
+; CHECK-NEXT: // .b8 6                                // DW_FORM_data4
+; CHECK-NEXT: // .b8 27                               // DW_AT_comp_dir
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 17                               // DW_AT_low_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 18                               // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 2                                // Abbreviation Code
+; CHECK-NEXT: // .b8 19                               // DW_TAG_structure_type
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 60                               // DW_AT_declaration
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 3                                // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 17                               // DW_AT_low_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 18                               // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 64                               // DW_AT_frame_base
+; CHECK-NEXT: // .b8 10                               // DW_FORM_block1
+; CHECK-NEXT: // .b8 135,64                           // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 4                                // Abbreviation Code
+; CHECK-NEXT: // .b8 52                               // DW_TAG_variable
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 16                               // DW_FORM_ref_addr
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 5                                // Abbreviation Code
+; CHECK-NEXT: // .b8 46                               // DW_TAG_subprogram
+; CHECK-NEXT: // .b8 1                                // DW_CHILDREN_yes
+; CHECK-NEXT: // .b8 17                               // DW_AT_low_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 18                               // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_FORM_addr
+; CHECK-NEXT: // .b8 64                               // DW_AT_frame_base
+; CHECK-NEXT: // .b8 10                               // DW_FORM_block1
+; CHECK-NEXT: // .b8 135,64                           // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 63                               // DW_AT_external
+; CHECK-NEXT: // .b8 12                               // DW_FORM_flag
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 6                                // Abbreviation Code
+; CHECK-NEXT: // .b8 5                                // DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 58                               // DW_AT_decl_file
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 59                               // DW_AT_decl_line
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 73                               // DW_AT_type
+; CHECK-NEXT: // .b8 19                               // DW_FORM_ref4
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 7                                // Abbreviation Code
+; CHECK-NEXT: // .b8 36                               // DW_TAG_base_type
+; CHECK-NEXT: // .b8 0                                // DW_CHILDREN_no
+; CHECK-NEXT: // .b8 3                                // DW_AT_name
+; CHECK-NEXT: // .b8 8                                // DW_FORM_string
+; CHECK-NEXT: // .b8 62                               // DW_AT_encoding
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 11                               // DW_AT_byte_size
+; CHECK-NEXT: // .b8 11                               // DW_FORM_data1
+; CHECK-NEXT: // .b8 0                                // EOM(1)
+; CHECK-NEXT: // .b8 0                                // EOM(2)
+; CHECK-NEXT: // .b8 0                                // EOM(3)
+; CHECK-NEXT: // }
+; CHECK-NEXT: // .section .debug_info
+; CHECK-NEXT: // {
+; CHECK-NEXT: // .b32 150                             // Length of Unit
+; CHECK-NEXT: // .b8 2                                // DWARF version number
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 .debug_abbrev                   // Offset Into Abbrev. Section
+; CHECK-NEXT: // .b8 8                                // Address Size (in bytes)
+; CHECK-NEXT: // .b8 1                                // Abbrev [1] 0xb:0x8f DW_TAG_compile_unit
+; CHECK-NEXT: // .b8 99,108,97,110,103,32,118,101,114,115,105,111,110,32,51,46,53,46,48,32,40,50,49,48,52,55,57,41 // DW_AT_producer
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_language
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 100,101,98,117,103,45,108,111,99,45,111,102,102,115,101,116,50,46,99,99 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 .debug_line                     // DW_AT_stmt_list
+; CHECK-NEXT: // .b8 47,108,108,118,109,95,99,109,97,107,101,95,103,99,99 // DW_AT_comp_dir
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b64 Lfunc_begin1                    // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Lfunc_end1                      // DW_AT_high_pc
+; CHECK-NEXT: // .b8 2                                // Abbrev [2] 0x64:0x4 DW_TAG_structure_type
+; CHECK-NEXT: // .b8 65                               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_declaration
+; CHECK-NEXT: // .b8 3                                // Abbrev [3] 0x68:0x31 DW_TAG_subprogram
+; CHECK-NEXT: // .b64 Lfunc_begin1                    // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Lfunc_end1                      // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_AT_frame_base
+; CHECK-NEXT: // .b8 156
+; CHECK-NEXT: // .b8 95,90,51,98,97,122,49,65         // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 98,97,122                        // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 6                                // DW_AT_decl_line
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 4                                // Abbrev [4] 0x8b:0xd DW_TAG_variable
+; CHECK-NEXT: // .b8 122                              // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 2                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 7                                // DW_AT_decl_line
+; CHECK-NEXT: // .b64 .debug_info+302                 // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b32 152                             // Length of Unit
+; CHECK-NEXT: // .b8 2                                // DWARF version number
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 .debug_abbrev                   // Offset Into Abbrev. Section
+; CHECK-NEXT: // .b8 8                                // Address Size (in bytes)
+; CHECK-NEXT: // .b8 1                                // Abbrev [1] 0xb:0x91 DW_TAG_compile_unit
+; CHECK-NEXT: // .b8 99,108,97,110,103,32,118,101,114,115,105,111,110,32,51,46,53,46,48,32,40,50,49,48,52,55,57,41 // DW_AT_producer
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 4                                // DW_AT_language
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 100,101,98,117,103,45,108,111,99,45,111,102,102,115,101,116,49,46,99,99 // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b32 .debug_line                     // DW_AT_stmt_list
+; CHECK-NEXT: // .b8 47,108,108,118,109,95,99,109,97,107,101,95,103,99,99 // DW_AT_comp_dir
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b64 Lfunc_begin0                    // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Lfunc_end0                      // DW_AT_high_pc
+; CHECK-NEXT: // .b8 5                                // Abbrev [5] 0x64:0x30 DW_TAG_subprogram
+; CHECK-NEXT: // .b64 Lfunc_begin0                    // DW_AT_low_pc
+; CHECK-NEXT: // .b64 Lfunc_end0                      // DW_AT_high_pc
+; CHECK-NEXT: // .b8 1                                // DW_AT_frame_base
+; CHECK-NEXT: // .b8 156
+; CHECK-NEXT: // .b8 95,90,51,98,97,114,105           // DW_AT_MIPS_linkage_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 98,97,114                        // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_line
+; CHECK-NEXT: // .b32 148                             // DW_AT_type
+; CHECK-NEXT: // .b8 1                                // DW_AT_external
+; CHECK-NEXT: // .b8 6                                // Abbrev [6] 0x8a:0x9 DW_TAG_formal_parameter
+; CHECK-NEXT: // .b8 98                               // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_file
+; CHECK-NEXT: // .b8 1                                // DW_AT_decl_line
+; CHECK-NEXT: // .b32 148                             // DW_AT_type
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // .b8 7                                // Abbrev [7] 0x94:0x7 DW_TAG_base_type
+; CHECK-NEXT: // .b8 105,110,116                      // DW_AT_name
+; CHECK-NEXT: // .b8 0
+; CHECK-NEXT: // .b8 5                                // DW_AT_encoding
+; CHECK-NEXT: // .b8 4                                // DW_AT_byte_size
+; CHECK-NEXT: // .b8 0                                // End Of Children Mark
+; CHECK-NEXT: // }
+; CHECK-NEXT: // .section .debug_macinfo
+; CHECK-NEXT: // {
+; CHECK-NEXT: // .b8 0                                // End Of Macro List Mark
+; CHECK:      // }
diff --git a/test/DebugInfo/Sparc/gnu-window-save.ll b/test/DebugInfo/Sparc/gnu-window-save.ll
index f137277..fb9602b 100644
--- a/test/DebugInfo/Sparc/gnu-window-save.ll
+++ b/test/DebugInfo/Sparc/gnu-window-save.ll
@@ -55,7 +55,7 @@
 !llvm.module.flags = !{!9, !10}
 !llvm.ident = !{!11}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, producer: "clang version 3.5 (http://llvm.org/git/clang.git 6a0714fee07fb7c4e32d3972b4fe2ce2f5678cf4) (llvm/ 672e88e934757f76d5c5e5258be41e7615094844)", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !2, globals: !2, imports: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, producer: "clang version 3.5", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !2, globals: !2, imports: !2)
 !1 = !DIFile(filename: "hello.c", directory: "/home/venkatra/work/benchmarks/test/hello")
 !2 = !{}
 !4 = distinct !DISubprogram(name: "main", line: 3, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !0, scopeLine: 4, file: !1, scope: !5, type: !6, retainedNodes: !2)
@@ -65,6 +65,6 @@
 !8 = !DIBasicType(tag: DW_TAG_base_type, name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
 !9 = !{i32 2, !"Dwarf Version", i32 4}
 !10 = !{i32 1, !"Debug Info Version", i32 3}
-!11 = !{!"clang version 3.5 (http://llvm.org/git/clang.git 6a0714fee07fb7c4e32d3972b4fe2ce2f5678cf4) (llvm/ 672e88e934757f76d5c5e5258be41e7615094844)"}
+!11 = !{!"clang version 3.5"}
 !12 = !DILocation(line: 5, scope: !4)
 !13 = !DILocation(line: 6, scope: !4)
diff --git a/test/DebugInfo/Sparc/prologue_end.ll b/test/DebugInfo/Sparc/prologue_end.ll
index 78bd3de..91e6db2 100644
--- a/test/DebugInfo/Sparc/prologue_end.ll
+++ b/test/DebugInfo/Sparc/prologue_end.ll
@@ -1,4 +1,4 @@
-; RUN: llc -disable-fp-elim -O0 %s -mtriple sparc -o - | FileCheck %s
+; RUN: llc -frame-pointer=all -O0 %s -mtriple sparc -o - | FileCheck %s
 
 ; int func(void);
 ; void prologue_end_test() {
diff --git a/test/DebugInfo/SystemZ/prologue_end.ll b/test/DebugInfo/SystemZ/prologue_end.ll
index 8251e82..c430b79 100644
--- a/test/DebugInfo/SystemZ/prologue_end.ll
+++ b/test/DebugInfo/SystemZ/prologue_end.ll
@@ -1,4 +1,4 @@
-; RUN: llc -disable-fp-elim -O0 %s -mtriple s390x-linux-gnu -o - | FileCheck %s
+; RUN: llc -frame-pointer=all -O0 %s -mtriple s390x-linux-gnu -o - | FileCheck %s
 
 ; int func(void);
 ; void prologue_end_test() {
diff --git a/test/DebugInfo/SystemZ/variable-loc.ll b/test/DebugInfo/SystemZ/variable-loc.ll
index a281eb6..6112a81 100644
--- a/test/DebugInfo/SystemZ/variable-loc.ll
+++ b/test/DebugInfo/SystemZ/variable-loc.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=s390x-linux-gnu -disable-fp-elim < %s | FileCheck %s
-; RUN: llc -mtriple=s390x-linux-gnu -disable-fp-elim -filetype=obj < %s \
+; RUN: llc -mtriple=s390x-linux-gnu -frame-pointer=all < %s | FileCheck %s
+; RUN: llc -mtriple=s390x-linux-gnu -frame-pointer=all -filetype=obj < %s \
 ; RUN:     | llvm-dwarfdump -v -debug-info - | FileCheck --check-prefix=DEBUG %s
 ;
 ; This is a regression test making sure the location of variables is correct in
diff --git a/test/DebugInfo/SystemZ/variable-loc.s b/test/DebugInfo/SystemZ/variable-loc.s
index 6940b1b..8e2a592 100644
--- a/test/DebugInfo/SystemZ/variable-loc.s
+++ b/test/DebugInfo/SystemZ/variable-loc.s
@@ -11,8 +11,8 @@
 #
 # A couple of R_390_64s similarly:
 #
-# CHECK: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
-# CHECK: DW_AT_high_pc [DW_FORM_addr] (0x0000000000000050)
+# CHECK: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000 ".text")
+# CHECK: DW_AT_high_pc [DW_FORM_addr] (0x0000000000000050 ".text")
 
 
 	.file	"test/DebugInfo/SystemZ/variable-loc.ll"
diff --git a/test/DebugInfo/WebAssembly/dbg-value-live-interval.ll b/test/DebugInfo/WebAssembly/dbg-value-live-interval.ll
index 1754d2a..c43678e 100644
--- a/test/DebugInfo/WebAssembly/dbg-value-live-interval.ll
+++ b/test/DebugInfo/WebAssembly/dbg-value-live-interval.ll
@@ -4,7 +4,7 @@
 ; CHECK: bb.3.for.body.for.body_crit_edge:
 ; CHECK: [[REG:%[0-9]+]]:i32 = nsw ADD_I32 {{.*}} fib.c:7:7
 ; CHECK: DBG_VALUE [[REG]]:i32, $noreg, !"a", {{.*}} fib.c:5:13
-; CHECK: After WebAssembly Store Results:
+; CHECK: After WebAssembly Memory Intrinsic Results:
 
 ; ModuleID = 'fib.bc'
 source_filename = "fib.c"
diff --git a/test/DebugInfo/WebAssembly/dbg-value-move-clone.mir b/test/DebugInfo/WebAssembly/dbg-value-move-clone.mir
new file mode 100644
index 0000000..1e10c13
--- /dev/null
+++ b/test/DebugInfo/WebAssembly/dbg-value-move-clone.mir
@@ -0,0 +1,65 @@
+# RUN: llc < %s -run-pass=wasm-reg-stackify -x=mir 2>&1 | FileCheck %s
+
+# CHECK: body:
+# CHECK: bb.0:
+# CHECK: %[[REG1:[0-9]+]]:i32 = CONST_I32 0,
+# CHECK-NEXT: DBG_VALUE %[[REG1]],
+# CHECK-NEXT: CALL_VOID @foo, %[[REG1]],
+# CHECK: bb.1:
+# CHECK: %[[REG2:[0-9]+]]:i32 = CONST_I32 0,
+# CHECK-NEXT: DBG_VALUE %[[REG2]],
+# CHECK-NEXT: CALL_VOID @foo, %[[REG2]],
+# CHECK: %[[REG3:[0-9]+]]:i32 = CONST_I32 0,
+# CHECK-NEXT: DBG_VALUE %[[REG3]],
+# CHECK-NEXT: CALL_VOID @foo, %[[REG3]],
+
+--- |
+  target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+  target triple = "wasm32-unknown-unknown"
+
+  declare void @foo(i32)
+  declare i32 @bar()
+
+  define void @test(i64 %arg) {
+    unreachable
+  }
+
+  !llvm.dbg.cu = !{!0}
+  !llvm.module.flags = !{!4}
+  !0 = distinct !DICompileUnit(language: DW_LANG_Rust, file: !2, producer: "clang LLVM (rustc version 1.30.0-dev)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !1, globals: !1)
+  !1 = !{}
+  !2 = !DIFile(filename: "<unknown>", directory: "")
+  !3 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "&str", file: !2, size: 64, align: 32, elements: !{}, identifier: "111094d970b097647de579f9c509ef08")
+  !4 = !{i32 2, !"Debug Info Version", i32 3}
+  !5 = distinct !DILexicalBlock(scope: !6, file: !2, line: 357, column: 8)
+  !6 = distinct !DISubprogram(name: "testfoo", linkageName: "_testba", scope: !7, file: !2, line: 353, type: !8, isLocal: true, isDefinition: true, scopeLine: 353, flags: DIFlagPrototyped, isOptimized: true, unit: !0, templateParams: !1, retainedNodes: !9)
+  !7 = !DINamespace(name: "ptr", scope: null)
+  !8 = !DISubroutineType(types: !1)
+  !9 = !{!10}
+  !10 = !DILocalVariable(name: "val0", scope: !5, file: !2, line: 357, type: !3, align: 4)
+  !11 = !DILocalVariable(name: "val1", scope: !5, file: !2, line: 358, type: !3, align: 4)
+  !12 = !DILocalVariable(name: "val2", scope: !5, file: !2, line: 359, type: !3, align: 4)
+  !13 = !DILocation(line: 357, column: 12, scope: !5)
+  !14 = !DILocation(line: 358, column: 12, scope: !5)
+  !15 = !DILocation(line: 359, column: 12, scope: !5)
+
+---
+name: test
+liveins:
+  - { reg: '$arguments' }
+tracksRegLiveness: true
+body: |
+  bb.0:
+    successors: %bb.1
+    liveins: $arguments
+    %0:i64 = ARGUMENT_i64 0, implicit $arguments
+    %1:i32 = CONST_I32 0, implicit-def dead $arguments
+    DBG_VALUE %1:i32, $noreg, !10, !DIExpression(), debug-location !13; <unknown>:357:12 line no:357
+    CALL_VOID @foo, %1:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64
+
+  bb.1:
+    CALL_VOID @foo, %1:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64
+    CALL_VOID @foo, %1:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64
+    RETURN_VOID implicit-def dead $arguments
+
+...
diff --git a/test/DebugInfo/WebAssembly/dbg-value-move-reg-stackify.mir b/test/DebugInfo/WebAssembly/dbg-value-move-reg-stackify.mir
new file mode 100644
index 0000000..d8ebd42
--- /dev/null
+++ b/test/DebugInfo/WebAssembly/dbg-value-move-reg-stackify.mir
@@ -0,0 +1,60 @@
+# RUN: llc < %s -run-pass=wasm-reg-stackify -x=mir 2>&1 | FileCheck %s
+
+# CHECK: body:
+# CHECK: %1:i32 = I32_WRAP_I64 %0,
+# CHECK-NEXT: DBG_VALUE %1,
+# CHECK-NEXT: %1:i32 = CALL_I32 @bar,
+# CHECK-NEXT: DBG_VALUE %1,
+# CHECK-NEXT: %[[NEWREG:.*]]:i32 = CALL_I32 @bar,
+# CHECK-NEXT: DBG_VALUE %[[NEWREG]],
+# CHECK-NEXT: CALL_VOID @foo, %[[NEWREG]],
+
+--- |
+  target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+  target triple = "wasm32-unknown-unknown"
+
+  declare void @foo(i32)
+  declare i32 @bar()
+
+  define void @test(i64 %arg) {
+    unreachable
+  }
+
+  !llvm.dbg.cu = !{!0}
+  !llvm.module.flags = !{!4}
+  !0 = distinct !DICompileUnit(language: DW_LANG_Rust, file: !2, producer: "clang LLVM (rustc version 1.30.0-dev)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !1, globals: !1)
+  !1 = !{}
+  !2 = !DIFile(filename: "<unknown>", directory: "")
+  !3 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "&str", file: !2, size: 64, align: 32, elements: !{}, identifier: "111094d970b097647de579f9c509ef08")
+  !4 = !{i32 2, !"Debug Info Version", i32 3}
+  !5 = distinct !DILexicalBlock(scope: !6, file: !2, line: 357, column: 8)
+  !6 = distinct !DISubprogram(name: "testfoo", linkageName: "_testba", scope: !7, file: !2, line: 353, type: !8, isLocal: true, isDefinition: true, scopeLine: 353, flags: DIFlagPrototyped, isOptimized: true, unit: !0, templateParams: !1, retainedNodes: !9)
+  !7 = !DINamespace(name: "ptr", scope: null)
+  !8 = !DISubroutineType(types: !1)
+  !9 = !{!10}
+  !10 = !DILocalVariable(name: "val0", scope: !5, file: !2, line: 357, type: !3, align: 4)
+  !11 = !DILocalVariable(name: "val1", scope: !5, file: !2, line: 358, type: !3, align: 4)
+  !12 = !DILocalVariable(name: "val2", scope: !5, file: !2, line: 359, type: !3, align: 4)
+  !13 = !DILocation(line: 357, column: 12, scope: !5)
+  !14 = !DILocation(line: 358, column: 12, scope: !5)
+  !15 = !DILocation(line: 359, column: 12, scope: !5)
+
+---
+name: test
+liveins:
+  - { reg: '$arguments' }
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $arguments
+    %0:i64 = ARGUMENT_i64 0, implicit $arguments
+    %1:i32 = I32_WRAP_I64 %0:i64, implicit-def dead $arguments
+    DBG_VALUE %1:i32, $noreg, !10, !DIExpression(), debug-location !13; <unknown>:357:12 line no:357
+    %1:i32 = CALL_I32 @bar, implicit-def dead $arguments, implicit $sp32, implicit $sp64
+    DBG_VALUE %1:i32, $noreg, !11, !DIExpression(), debug-location !14; <unknown>:357:12 line no:357
+    %1:i32 = CALL_I32 @bar, implicit-def dead $arguments, implicit $sp32, implicit $sp64
+    DBG_VALUE %1:i32, $noreg, !12, !DIExpression(), debug-location !15; <unknown>:357:12 line no:357
+    CALL_VOID @foo, %1:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64
+    RETURN_VOID implicit-def dead $arguments
+
+...
diff --git a/test/DebugInfo/X86/PR26148.ll b/test/DebugInfo/X86/PR26148.ll
index 685f2d5..c42b8e5 100644
--- a/test/DebugInfo/X86/PR26148.ll
+++ b/test/DebugInfo/X86/PR26148.ll
@@ -16,11 +16,10 @@
 ; This is similar to the bug in test/DebugInfo/ARM/PR26163.ll, except that there is an
 ; extra non-overlapping range first. Thus, we make sure that the backend actually looks
 ; at all expressions when determining whether to merge ranges, not just the first one.
-; AS in 26163, we expect two ranges (as opposed to one), the first one being zero sized
+; AS in 26163, we only expect one range as the first one is zero sized.
 ;
 ;
-; CHECK: [0x0000000000000004, 0x0000000000000004): DW_OP_lit3, DW_OP_piece 0x4, DW_OP_reg5 RDI, DW_OP_piece 0x2
-; CHECK: [0x0000000000000004, 0x0000000000000014): DW_OP_lit3, DW_OP_piece 0x4, DW_OP_lit0, DW_OP_piece 0x4
+; CHECK: [0x0000000000000000, 0x000000000000000f): DW_OP_lit3, DW_OP_piece 0x4, DW_OP_lit0, DW_OP_piece 0x4
 
 source_filename = "test/DebugInfo/X86/PR26148.ll"
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
@@ -35,11 +34,8 @@
 
 ; Function Attrs: nounwind readnone
 declare void @llvm.dbg.value(metadata, metadata, metadata) #0
-; The attributes are here to force the zero-sized range not to be at the start of
-; the function, which has special interpretation in DWARF. The fact that this happens
-; at all is probably an LLVM bug.
 
-define void @fn1(i16 signext %p1) #1 !dbg !16 {
+define void @fn1(i16 signext %p1) !dbg !16 {
 entry:
   tail call void @llvm.dbg.value(metadata i16 %p1, metadata !20, metadata !23), !dbg !24
   tail call void @llvm.dbg.declare(metadata %struct.S0* undef, metadata !21, metadata !23), !dbg !25
@@ -60,7 +56,6 @@
 }
 
 attributes #0 = { nounwind readnone }
-attributes #1 = { "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" }
 
 !llvm.dbg.cu = !{!2}
 !llvm.module.flags = !{!12, !13, !14}
@@ -68,7 +63,7 @@
 
 !0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
 !1 = !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 4, type: !6, isLocal: false, isDefinition: true)
-!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 3.9.0 (https://github.com/llvm-mirror/clang 8f258397c5afd7a708bd95770c718e81d08fb11a) (https://github.com/llvm-mirror/llvm 18481855bdfa1b4a424f81be8525db002671348d)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 3.9.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
 !3 = !DIFile(filename: "small.c", directory: "/Users/kfischer/Projects/clangbug")
 !4 = !{}
 !5 = !{!0}
@@ -81,7 +76,7 @@
 !12 = !{i32 2, !"Dwarf Version", i32 2}
 !13 = !{i32 2, !"Debug Info Version", i32 3}
 !14 = !{i32 1, !"PIC Level", i32 2}
-!15 = !{!"clang version 3.9.0 (https://github.com/llvm-mirror/clang 8f258397c5afd7a708bd95770c718e81d08fb11a) (https://github.com/llvm-mirror/llvm 18481855bdfa1b4a424f81be8525db002671348d)"}
+!15 = !{!"clang version 3.9.0"}
 !16 = distinct !DISubprogram(name: "fn1", scope: !3, file: !3, line: 5, type: !17, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !2, retainedNodes: !19)
 !17 = !DISubroutineType(types: !18)
 !18 = !{null, !9}
diff --git a/test/DebugInfo/X86/addr_comments.ll b/test/DebugInfo/X86/addr_comments.ll
new file mode 100644
index 0000000..780b379
--- /dev/null
+++ b/test/DebugInfo/X86/addr_comments.ll
@@ -0,0 +1,33 @@
+; RUN: llc %s -mtriple=i386-unknown-linux-gnu -filetype=asm -o - | FileCheck %s
+
+; CHECK:   .section .debug_addr
+; CHECK:   .long   8 # Length of contribution
+; CHECK:   .short  5 # DWARF version number
+; CHECK:   .byte   4 # Address size
+; CHECK:   .byte   0 # Segment selector size
+; CHECK: .Laddr_table_base0:
+; CHECK:   .long   .Lfunc_begin0
+ 
+; Function Attrs: noinline nounwind optnone uwtable
+define dso_local void @foo() #0 !dbg !7 {
+entry:
+  ret void, !dbg !10
+}
+
+attributes #0 = { noinline nounwind optnone uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 8.0.0 (trunk 350004) (llvm/trunk 350008)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
+!1 = !DIFile(filename: "foo.c", directory: "/usr/local/google/home/blaikie/dev/scratch")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 5}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"wchar_size", i32 4}
+!6 = !{!"clang version 8.0.0 (trunk 350004) (llvm/trunk 350008)"}
+!7 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !8, scopeLine: 1, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !2)
+!8 = !DISubroutineType(types: !9)
+!9 = !{null}
+!10 = !DILocation(line: 2, column: 1, scope: !7)
diff --git a/test/DebugInfo/X86/align_c11.ll b/test/DebugInfo/X86/align_c11.ll
index ab447f2..c488128 100644
--- a/test/DebugInfo/X86/align_c11.ll
+++ b/test/DebugInfo/X86/align_c11.ll
@@ -58,7 +58,7 @@
 
 !0 = distinct !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
 !1 = !DIGlobalVariable(name: "d", scope: !2, file: !3, line: 7, type: !6, isLocal: false, isDefinition: true, align: 16384)
-!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 4.0.0 (http://llvm.org/git/clang.git 9ce5220b821054019059c2ac4a9b132c7723832d) (http://llvm.org/git/llvm.git 9a6298be89ce0359b151c0a37af2776a12c69e85)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 4.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
 !3 = !DIFile(filename: "test.c", directory: "/tmp")
 !4 = !{}
 !5 = !{!0}
@@ -72,7 +72,7 @@
 !13 = !DISubrange(count: 2)
 !14 = !{i32 2, !"Dwarf Version", i32 4}
 !15 = !{i32 2, !"Debug Info Version", i32 3}
-!16 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git 9ce5220b821054019059c2ac4a9b132c7723832d) (http://llvm.org/git/llvm.git 9a6298be89ce0359b151c0a37af2776a12c69e85)"}
+!16 = !{!"clang version 4.0.0"}
 !17 = distinct !DISubprogram(name: "foo", scope: !3, file: !3, line: 8, type: !18, isLocal: false, isDefinition: true, scopeLine: 9, flags: DIFlagPrototyped, isOptimized: false, unit: !2, retainedNodes: !4)
 !18 = !DISubroutineType(types: !19)
 !19 = !{!20}
diff --git a/test/DebugInfo/X86/align_cpp11.ll b/test/DebugInfo/X86/align_cpp11.ll
index cc866fc..c15714c 100644
--- a/test/DebugInfo/X86/align_cpp11.ll
+++ b/test/DebugInfo/X86/align_cpp11.ll
@@ -120,7 +120,7 @@
 
 !0 = distinct !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
 !1 = !DIGlobalVariable(name: "c0", scope: !2, file: !6, line: 19, type: !19, isLocal: false, isDefinition: true)
-!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 4.0.0 (http://llvm.org/git/clang.git 9ce5220b821054019059c2ac4a9b132c7723832d) (http://llvm.org/git/llvm.git 9a6298be89ce0359b151c0a37af2776a12c69e85)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !11)
+!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 4.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !11)
 !3 = !DIFile(filename: "test.cpp", directory: "/tmp")
 !4 = !{!5}
 !5 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E", file: !6, line: 13, size: 32, align: 128, elements: !7, identifier: "_ZTS1E")
@@ -141,7 +141,7 @@
 !20 = !{}
 !21 = !{i32 2, !"Dwarf Version", i32 4}
 !22 = !{i32 2, !"Debug Info Version", i32 3}
-!23 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git 9ce5220b821054019059c2ac4a9b132c7723832d) (http://llvm.org/git/llvm.git 9a6298be89ce0359b151c0a37af2776a12c69e85)"}
+!23 = !{!"clang version 4.0.0"}
 !24 = distinct !DISubprogram(name: "foo", linkageName: "_Z3foov", scope: !6, file: !6, line: 23, type: !25, isLocal: false, isDefinition: true, scopeLine: 24, flags: DIFlagPrototyped, isOptimized: false, unit: !2, retainedNodes: !20)
 !25 = !DISubroutineType(types: !26)
 !26 = !{null}
diff --git a/test/DebugInfo/X86/align_objc.ll b/test/DebugInfo/X86/align_objc.ll
index 99726bb..1a69fd0 100644
--- a/test/DebugInfo/X86/align_objc.ll
+++ b/test/DebugInfo/X86/align_objc.ll
@@ -70,7 +70,7 @@
 
 !0 = distinct !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
 !1 = !DIGlobalVariable(name: "s0", scope: !2, file: !3, line: 10, type: !6, isLocal: false, isDefinition: true)
-!2 = distinct !DICompileUnit(language: DW_LANG_ObjC, file: !3, producer: "clang version 4.0.0 (http://llvm.org/git/clang.git 9ce5220b821054019059c2ac4a9b132c7723832d) (http://llvm.org/git/llvm.git 9a6298be89ce0359b151c0a37af2776a12c69e85)", isOptimized: false, runtimeVersion: 1, emissionKind: FullDebug, enums: !4, globals: !5)
+!2 = distinct !DICompileUnit(language: DW_LANG_ObjC, file: !3, producer: "clang version 4.0.0", isOptimized: false, runtimeVersion: 1, emissionKind: FullDebug, enums: !4, globals: !5)
 !3 = !DIFile(filename: "test.m", directory: "/tmp")
 !4 = !{}
 !5 = !{!0}
@@ -81,7 +81,7 @@
 !10 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
 !11 = !{i32 2, !"Dwarf Version", i32 4}
 !12 = !{i32 2, !"Debug Info Version", i32 3}
-!13 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git 9ce5220b821054019059c2ac4a9b132c7723832d) (http://llvm.org/git/llvm.git 9a6298be89ce0359b151c0a37af2776a12c69e85)"}
+!13 = !{!"clang version 4.0.0"}
 !14 = distinct !DISubprogram(name: "f", scope: !3, file: !3, line: 12, type: !15, isLocal: false, isDefinition: true, scopeLine: 12, isOptimized: false, unit: !2, retainedNodes: !4)
 !15 = !DISubroutineType(types: !16)
 !16 = !{null}
diff --git a/test/DebugInfo/X86/atomic-c11-dwarf-4.ll b/test/DebugInfo/X86/atomic-c11-dwarf-4.ll
index efbf1a2..737ff63 100644
--- a/test/DebugInfo/X86/atomic-c11-dwarf-4.ll
+++ b/test/DebugInfo/X86/atomic-c11-dwarf-4.ll
@@ -24,7 +24,7 @@
 
 !0 = distinct !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
 !1 = !DIGlobalVariable(name: "i", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
-!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 4.0.0 (http://llvm.org/git/clang.git cd238117e3a8a57271a82d0bb03d6df6ad8f073e) (http://llvm.org/git/llvm.git 9fd063832c1541aad3907cd60ac344d36997905f)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 4.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
 !3 = !DIFile(filename: "atomic.c", directory: "/tmp")
 !4 = !{}
 !5 = !{!0}
@@ -33,5 +33,5 @@
 !8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
 !9 = !{i32 2, !"Dwarf Version", i32 4}
 !10 = !{i32 2, !"Debug Info Version", i32 3}
-!11 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git cd238117e3a8a57271a82d0bb03d6df6ad8f073e) (http://llvm.org/git/llvm.git 9fd063832c1541aad3907cd60ac344d36997905f)"}
+!11 = !{!"clang version 4.0.0"}
 
diff --git a/test/DebugInfo/X86/atomic-c11-dwarf-5.ll b/test/DebugInfo/X86/atomic-c11-dwarf-5.ll
index 8596176..1fe755f 100644
--- a/test/DebugInfo/X86/atomic-c11-dwarf-5.ll
+++ b/test/DebugInfo/X86/atomic-c11-dwarf-5.ll
@@ -25,7 +25,7 @@
 
 !0 = distinct !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
 !1 = !DIGlobalVariable(name: "i", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
-!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 4.0.0 (http://llvm.org/git/clang.git cd238117e3a8a57271a82d0bb03d6df6ad8f073e) (http://llvm.org/git/llvm.git 9fd063832c1541aad3907cd60ac344d36997905f)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 4.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
 !3 = !DIFile(filename: "atomic.c", directory: "/tmp")
 !4 = !{}
 !5 = !{!0}
@@ -34,5 +34,5 @@
 !8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
 !9 = !{i32 2, !"Dwarf Version", i32 5}
 !10 = !{i32 2, !"Debug Info Version", i32 3}
-!11 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git cd238117e3a8a57271a82d0bb03d6df6ad8f073e) (http://llvm.org/git/llvm.git 9fd063832c1541aad3907cd60ac344d36997905f)"}
+!11 = !{!"clang version 4.0.0"}
 
diff --git a/test/DebugInfo/X86/clone-module-2.ll b/test/DebugInfo/X86/clone-module-2.ll
index 23b1846..320557c 100644
--- a/test/DebugInfo/X86/clone-module-2.ll
+++ b/test/DebugInfo/X86/clone-module-2.ll
@@ -39,14 +39,14 @@
 !llvm.module.flags = !{!3, !4, !5, !6}
 !llvm.ident = !{!7}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 7.0.0 (https://git.llvm.org/git/clang.git/ 195459d046e795f5952f7d2121e505eeb59c5574) (https://git.llvm.org/git/llvm.git/ e9dc5b5ade57869d1a443c568c6cf556ccf3b7af)", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 7.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly, enums: !2)
 !1 = !DIFile(filename: "test.c", directory: "/Volumes/Data/llvm/build/obj")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
 !5 = !{i32 1, !"wchar_size", i32 4}
 !6 = !{i32 7, !"PIC Level", i32 2}
-!7 = !{!"clang version 7.0.0 (https://git.llvm.org/git/clang.git/ 195459d046e795f5952f7d2121e505eeb59c5574) (https://git.llvm.org/git/llvm.git/ e9dc5b5ade57869d1a443c568c6cf556ccf3b7af)"}
+!7 = !{!"clang version 7.0.0"}
 !8 = !DILocation(line: 3, column: 72, scope: !9)
 !9 = distinct !DISubprogram(name: "eliminated", scope: !1, file: !1, line: 3, type: !10, isLocal: true, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
 !10 = !DISubroutineType(types: !2)
diff --git a/test/DebugInfo/X86/clone-module.ll b/test/DebugInfo/X86/clone-module.ll
index ae334be..8c7fd52 100644
--- a/test/DebugInfo/X86/clone-module.ll
+++ b/test/DebugInfo/X86/clone-module.ll
@@ -61,14 +61,14 @@
 !llvm.module.flags = !{!3, !4, !5, !6}
 !llvm.ident = !{!7}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 7.0.0 (https://git.llvm.org/git/clang.git/ 195459d046e795f5952f7d2121e505eeb59c5574) (https://git.llvm.org/git/llvm.git/ 69ec7d5667e9928db8435bfbee0da151c85a91c9)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 7.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "clone-module.c", directory: "/somewhere")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
 !5 = !{i32 1, !"wchar_size", i32 4}
 !6 = !{i32 7, !"PIC Level", i32 2}
-!7 = !{!"clang version 7.0.0 (https://git.llvm.org/git/clang.git/ 195459d046e795f5952f7d2121e505eeb59c5574) (https://git.llvm.org/git/llvm.git/ 69ec7d5667e9928db8435bfbee0da151c85a91c9)"}
+!7 = !{!"clang version 7.0.0"}
 !8 = !DILocalVariable(name: "j", arg: 1, scope: !9, file: !1, line: 3, type: !12)
 !9 = distinct !DISubprogram(name: "inlined", scope: !1, file: !1, line: 3, type: !10, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !13)
 !10 = !DISubroutineType(types: !11)
diff --git a/test/DebugInfo/X86/debug-loc-offset.mir b/test/DebugInfo/X86/debug-loc-offset.mir
index c5f80d0..07b7972 100644
--- a/test/DebugInfo/X86/debug-loc-offset.mir
+++ b/test/DebugInfo/X86/debug-loc-offset.mir
@@ -32,7 +32,7 @@
 # Checking that we have two compile units with two sets of high/lo_pc.
 # CHECK: .debug_info contents
 # CHECK: DW_TAG_compile_unit
-# CHECK: DW_AT_low_pc {{.*}} (0x0000000000000020)
+# CHECK: DW_AT_low_pc {{.*}} (0x0000000000000020 ".text")
 # CHECK: DW_AT_high_pc
 #
 # CHECK: DW_TAG_subprogram
@@ -51,7 +51,7 @@
 # CHECK-NOT: DW_AT_location
 #
 # CHECK: DW_TAG_compile_unit
-# CHECK: DW_AT_low_pc {{.*}} (0x0000000000000000)
+# CHECK: DW_AT_low_pc {{.*}} (0x0000000000000000 ".text")
 # CHECK: DW_AT_high_pc
 #
 # CHECK: DW_TAG_subprogram
diff --git a/test/DebugInfo/X86/debug_addr.ll b/test/DebugInfo/X86/debug_addr.ll
index b50428a..e6dbe7d 100644
--- a/test/DebugInfo/X86/debug_addr.ll
+++ b/test/DebugInfo/X86/debug_addr.ll
@@ -32,7 +32,7 @@
 ; DWARF5-NOT: DW_TAG_{{.*}}
 ; DWARF5: DW_AT_GNU_dwo_name{{.*}}test.dwo
 ; DWARF5: DW_AT_addr_base{{.*}}0x00000008
-; DWARF5: DW_AT_low_pc [DW_FORM_addrx] ( indexed (00000000) address = 0x0000000000000000)
+; DWARF5: DW_AT_low_pc [DW_FORM_addrx] (indexed (00000000) address = 0x0000000000000000 ".text")
 ; DWARF5: .debug_addr contents:
 ; DWARF5-NEXT: 0x00000000: Addr Section: length = 0x0000000c, version = 0x0005, addr_size = 0x04, seg_size = 0x00
 ; DWARF5-NEXT: Addrs: [
diff --git a/test/DebugInfo/X86/dwarf-public-names.ll b/test/DebugInfo/X86/dwarf-public-names.ll
index a4df331..30b83df 100644
--- a/test/DebugInfo/X86/dwarf-public-names.ll
+++ b/test/DebugInfo/X86/dwarf-public-names.ll
@@ -123,7 +123,7 @@
 !17 = !DIGlobalVariableExpression(var: !18, expr: !DIExpression())
 !18 = !DIGlobalVariable(name: "global_namespace_variable", linkageName: "_ZN2ns25global_namespace_variableE", scope: !19, file: !3, line: 27, type: !6, isLocal: false, isDefinition: true)
 !19 = !DINamespace(name: "ns", scope: null)
-!20 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 3.3 (http://llvm.org/git/clang.git a09cd8103a6a719cb2628cdf0c91682250a17bd2) (http://llvm.org/git/llvm.git 47d03cec0afca0c01ae42b82916d1d731716cd20)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !11, retainedTypes: !11, globals: !21, imports: !11) ; previously: invalid DW_TAG_base_type
+!20 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 3.3", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !11, retainedTypes: !11, globals: !21, imports: !11) ; previously: invalid DW_TAG_base_type
 !21 = !{!0, !15, !17}
 !22 = !{i32 1, !"Debug Info Version", i32 3}
 !23 = distinct !DISubprogram(name: "member_function", linkageName: "_ZN1C15member_functionEv", scope: null, file: !3, line: 9, type: !8, isLocal: false, isDefinition: true, scopeLine: 9, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !20, declaration: !7, retainedNodes: !11)
diff --git a/test/DebugInfo/X86/dwarfdump-ranges-baseaddr.s b/test/DebugInfo/X86/dwarfdump-ranges-baseaddr.s
index 25ea30d..7d5a6f9 100644
--- a/test/DebugInfo/X86/dwarfdump-ranges-baseaddr.s
+++ b/test/DebugInfo/X86/dwarfdump-ranges-baseaddr.s
@@ -4,7 +4,7 @@
 
 # CHECK: .debug_info contents:
 # CHECK: 0x0000000b: DW_TAG_compile_unit [1]
-# CHECK:             DW_AT_low_pc [DW_FORM_addr]       (0x0000000000000000)
+# CHECK:             DW_AT_low_pc [DW_FORM_addr]       (0x0000000000000000 ".text")
 # CHECK-NEXT:        DW_AT_ranges [DW_FORM_sec_offset] (0x00000000
 # CHECK-NEXT:    [0x0000000000000000, 0x0000000000000001) ".text"
 # CHECK-NEXT:    [0x0000000000000003, 0x0000000000000006) ".text"
diff --git a/test/DebugInfo/X86/dwarfdump-str-offsets-dwp.s b/test/DebugInfo/X86/dwarfdump-str-offsets-dwp.s
index fb095e3..6620bea 100644
--- a/test/DebugInfo/X86/dwarfdump-str-offsets-dwp.s
+++ b/test/DebugInfo/X86/dwarfdump-str-offsets-dwp.s
@@ -291,33 +291,33 @@
 # CHECK:      Compile Unit
 # CHECK-NOT:  NULL
 # CHECK:      DW_TAG_compile_unit
-# CHECK-NEXT: DW_AT_producer [DW_FORM_strx] ( indexed (00000000) string = "Handmade DWARF producer")
-# CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "Compile_Unit_1")
-# CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000002) string = "/home/test/CU1")
+# CHECK-NEXT: DW_AT_producer [DW_FORM_strx] (indexed (00000000) string = "Handmade DWARF producer")
+# CHECK-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000001) string = "Compile_Unit_1")
+# CHECK-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000002) string = "/home/test/CU1")
 # CHECK-NOT:  NULL
 
 # CHECK:      Compile Unit
 # CHECK-NOT:  NULL
 # CHECK:      DW_TAG_compile_unit
-# CHECK-NEXT: DW_AT_producer [DW_FORM_strx] ( indexed (00000000) string = "Handmade DWARF producer")
-# CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "Compile_Unit_2")
-# CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000002) string = "/home/test/CU2")
+# CHECK-NEXT: DW_AT_producer [DW_FORM_strx] (indexed (00000000) string = "Handmade DWARF producer")
+# CHECK-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000001) string = "Compile_Unit_2")
+# CHECK-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000002) string = "/home/test/CU2")
 # 
 # CHECK:      Type Unit
 # CHECK-NOT:  NULL
 # CHECK:      DW_TAG_type_unit
-# CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000003) string = "Type_Unit_1")
+# CHECK-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000003) string = "Type_Unit_1")
 # CHECK-NOT:  NULL
 # CHECK:      DW_TAG_structure_type
-# CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000004) string = "MyStruct_1")
+# CHECK-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000004) string = "MyStruct_1")
 #
 # CHECK:      Type Unit
 # CHECK-NOT:  NULL
 # CHECK:      DW_TAG_type_unit
-# CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000003) string = "Type_Unit_2")
+# CHECK-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000003) string = "Type_Unit_2")
 # CHECK-NOT:  NULL
 # CHECK:      DW_TAG_structure_type
-# CHECK-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000004) string = "MyStruct_2")
+# CHECK-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000004) string = "MyStruct_2")
 
 # Verify the correct offets of the compile and type units contributions in the
 # index tables.
diff --git a/test/DebugInfo/X86/dwarfdump-str-offsets-macho.s b/test/DebugInfo/X86/dwarfdump-str-offsets-macho.s
index b2b5104..1332a94 100644
--- a/test/DebugInfo/X86/dwarfdump-str-offsets-macho.s
+++ b/test/DebugInfo/X86/dwarfdump-str-offsets-macho.s
@@ -216,37 +216,37 @@
 # COMMON:      .debug_info contents:
 # COMMON-NOT:  contents:     
 # COMMON:      DW_TAG_compile_unit
-# COMMON-NEXT: DW_AT_producer [DW_FORM_strx] ( indexed (00000000) string = "Handmade DWARF producer")
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "Compile_Unit_1")
+# COMMON-NEXT: DW_AT_producer [DW_FORM_strx] (indexed (00000000) string = "Handmade DWARF producer")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000001) string = "Compile_Unit_1")
 # COMMON-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x00000008)
-# COMMON-NEXT: DW_AT_comp_dir [DW_FORM_strx] ( indexed (00000002) string = "/home/test/CU1")
+# COMMON-NEXT: DW_AT_comp_dir [DW_FORM_strx] (indexed (00000002) string = "/home/test/CU1")
 # COMMON-NOT:  NULL
 # COMMON:      DW_TAG_subprogram
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx1] ( indexed (00000003) string = "MyFunc")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx1] (indexed (00000003) string = "MyFunc")
 # COMMON-NOT:  NULL
 # COMMON:      DW_TAG_variable
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx2] ( indexed (00000004) string = "MyVar1")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx2] (indexed (00000004) string = "MyVar1")
 # COMMON-NOT:  NULL
 # COMMON:      DW_TAG_variable
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx3] ( indexed (00000005) string = "MyVar2")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx3] (indexed (00000005) string = "MyVar2")
 # COMMON-NOT:  NULL
 # COMMON:      DW_TAG_variable
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx4] ( indexed (00000006) string = "MyVar3")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx4] (indexed (00000006) string = "MyVar3")
 # 
 # Second compile unit (b.cpp)
 # COMMON:      DW_TAG_compile_unit
-# COMMON-NEXT: DW_AT_producer [DW_FORM_strx] ( indexed (00000000) string = "Handmade DWARF producer")
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "Compile_Unit_2")
+# COMMON-NEXT: DW_AT_producer [DW_FORM_strx] (indexed (00000000) string = "Handmade DWARF producer")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000001) string = "Compile_Unit_2")
 # COMMON-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x00000038)
-# COMMON-NEXT: DW_AT_comp_dir [DW_FORM_strx] ( indexed (00000002) string = "/home/test/CU2")
+# COMMON-NEXT: DW_AT_comp_dir [DW_FORM_strx] (indexed (00000002) string = "/home/test/CU2")
 # 
 # The type unit
 # COMMON:      .debug_types contents:
 # COMMON:      DW_TAG_type_unit
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000000) string = "Type_Unit")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000000) string = "Type_Unit")
 # COMMON-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset]       (0x00000058)
 # COMMON:      DW_TAG_structure_type
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "MyStruct")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000001) string = "MyStruct")
 # 
 # The .debug_str_offsets section
 # COMMON:      .debug_str_offsets contents:
diff --git a/test/DebugInfo/X86/dwarfdump-str-offsets.s b/test/DebugInfo/X86/dwarfdump-str-offsets.s
index 2f4215a..230c660 100644
--- a/test/DebugInfo/X86/dwarfdump-str-offsets.s
+++ b/test/DebugInfo/X86/dwarfdump-str-offsets.s
@@ -334,51 +334,51 @@
 # COMMON:      .debug_info contents:
 # COMMON-NOT:  contents:     
 # COMMON:      DW_TAG_compile_unit
-# COMMON-NEXT: DW_AT_producer [DW_FORM_strx] ( indexed (00000000) string = "Handmade DWARF producer")
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "Compile_Unit_1")
+# COMMON-NEXT: DW_AT_producer [DW_FORM_strx] (indexed (00000000) string = "Handmade DWARF producer")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000001) string = "Compile_Unit_1")
 # COMMON-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x00000008)
-# COMMON-NEXT: DW_AT_comp_dir [DW_FORM_strx] ( indexed (00000002) string = "/home/test/CU1")
+# COMMON-NEXT: DW_AT_comp_dir [DW_FORM_strx] (indexed (00000002) string = "/home/test/CU1")
 # COMMON-NOT:  NULL
 # COMMON:      DW_TAG_subprogram
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx1] ( indexed (00000003) string = "MyFunc")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx1] (indexed (00000003) string = "MyFunc")
 # COMMON-NOT:  NULL
 # COMMON:      DW_TAG_variable
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx2] ( indexed (00000004) string = "MyVar1")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx2] (indexed (00000004) string = "MyVar1")
 # COMMON-NOT:  NULL
 # COMMON:      DW_TAG_variable
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx3] ( indexed (00000005) string = "MyVar2")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx3] (indexed (00000005) string = "MyVar2")
 # COMMON-NOT:  NULL
 # COMMON:      DW_TAG_variable
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx4] ( indexed (00000006) string = "MyVar3")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx4] (indexed (00000006) string = "MyVar3")
 # 
 # Second compile unit (b.cpp)
 # COMMON:      DW_TAG_compile_unit
-# COMMON-NEXT: DW_AT_producer [DW_FORM_strx] ( indexed (00000000) string = "Handmade DWARF producer")
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "Compile_Unit_2")
+# COMMON-NEXT: DW_AT_producer [DW_FORM_strx] (indexed (00000000) string = "Handmade DWARF producer")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000001) string = "Compile_Unit_2")
 # COMMON-NEXT: DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x00000038)
-# COMMON-NEXT: DW_AT_comp_dir [DW_FORM_strx] ( indexed (00000002) string = "/home/test/CU2")
+# COMMON-NEXT: DW_AT_comp_dir [DW_FORM_strx] (indexed (00000002) string = "/home/test/CU2")
 # 
 # The split CU
 # SPLIT:       .debug_info.dwo contents:
 # SPLIT-NOT:   contents:
 # SPLIT:       DW_TAG_compile_unit
-# SPLIT-NEXT:  DW_AT_producer [DW_FORM_strx] ( indexed (00000000) string = "Handmade split DWARF producer")
-# SPLIT-NEXT:  DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "V5_split_compile_unit")
-# SPLIT-NEXT:  DW_AT_comp_dir [DW_FORM_strx] ( indexed (00000002) string = "/home/test/splitCU")
+# SPLIT-NEXT:  DW_AT_producer [DW_FORM_strx] (indexed (00000000) string = "Handmade split DWARF producer")
+# SPLIT-NEXT:  DW_AT_name [DW_FORM_strx] (indexed (00000001) string = "V5_split_compile_unit")
+# SPLIT-NEXT:  DW_AT_comp_dir [DW_FORM_strx] (indexed (00000002) string = "/home/test/splitCU")
 # 
 # The type unit
 # COMMON:      .debug_types contents:
 # COMMON:      DW_TAG_type_unit
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000000) string = "Type_Unit")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000000) string = "Type_Unit")
 # COMMON:      DW_TAG_structure_type
-# COMMON-NEXT: DW_AT_name [DW_FORM_strx] ( indexed (00000001) string = "MyStruct")
+# COMMON-NEXT: DW_AT_name [DW_FORM_strx] (indexed (00000001) string = "MyStruct")
 # 
 # The split type unit
 # SPLIT:       .debug_types.dwo contents:
 # SPLIT:       DW_TAG_type_unit
-# SPLIT-NEXT:  DW_AT_name [DW_FORM_strx] ( indexed (00000003) string = "V5_split_type_unit")
+# SPLIT-NEXT:  DW_AT_name [DW_FORM_strx] (indexed (00000003) string = "V5_split_type_unit")
 # SPLIT:       DW_TAG_structure_type
-# SPLIT-NEXT:  DW_AT_name [DW_FORM_strx] ( indexed (00000004) string = "V5_split_Mystruct")
+# SPLIT-NEXT:  DW_AT_name [DW_FORM_strx] (indexed (00000004) string = "V5_split_Mystruct")
 # 
 # The .debug_str_offsets section
 # COMMON:      .debug_str_offsets contents:
diff --git a/test/DebugInfo/X86/enum-class.ll b/test/DebugInfo/X86/enum-class.ll
index 92398c4..19b594e 100644
--- a/test/DebugInfo/X86/enum-class.ll
+++ b/test/DebugInfo/X86/enum-class.ll
@@ -13,13 +13,13 @@
 !0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
 !1 = !DIGlobalVariable(name: "a", scope: null, file: !2, line: 4, type: !3, isLocal: false, isDefinition: true)
 !2 = !DIFile(filename: "foo.cpp", directory: "/Users/echristo/tmp")
-!3 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "A", file: !2, line: 1, baseType: !4, size: 32, flags: DIFlagFixedEnum, align: 32, elements: !5)
+!3 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "A", file: !2, line: 1, baseType: !4, size: 32, flags: DIFlagEnumClass, align: 32, elements: !5)
 !4 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
 !5 = !{!6}
 !6 = !DIEnumerator(name: "A1", value: 1)
 !7 = !DIGlobalVariableExpression(var: !8, expr: !DIExpression()) ; [ DW_TAG_enumerator ]
 !8 = !DIGlobalVariable(name: "b", scope: null, file: !2, line: 5, type: !9, isLocal: false, isDefinition: true)
-!9 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "B", file: !2, line: 2, baseType: !10, size: 64, flags: DIFlagFixedEnum, align: 64, elements: !11)
+!9 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "B", file: !2, line: 2, baseType: !10, size: 64, flags: DIFlagEnumClass, align: 64, elements: !11)
 !10 = !DIBasicType(name: "long unsigned int", size: 64, align: 64, encoding: DW_ATE_unsigned)
 !11 = !{!12}
 !12 = !DIEnumerator(name: "B1", value: 1) ; [ DW_TAG_enumerator ]
diff --git a/test/DebugInfo/X86/fission-cu.ll b/test/DebugInfo/X86/fission-cu.ll
index 1551bed..d5cca77 100644
--- a/test/DebugInfo/X86/fission-cu.ll
+++ b/test/DebugInfo/X86/fission-cu.ll
@@ -25,18 +25,18 @@
 ; CHECK: Abbrev table for offset: 0x00000000
 ; CHECK: [1] DW_TAG_compile_unit DW_CHILDREN_no
 ; CHECK: DW_AT_stmt_list DW_FORM_sec_offset
-; CHECK: DW_AT_GNU_dwo_name      DW_FORM_strp
 ; CHECK: DW_AT_comp_dir  DW_FORM_strp
+; CHECK: DW_AT_GNU_dwo_name      DW_FORM_strp
 ; CHECK: DW_AT_GNU_dwo_id        DW_FORM_data8
 
 ; Check that we're using the right forms.
 ; CHECK: .debug_abbrev.dwo contents:
 ; CHECK: Abbrev table for offset: 0x00000000
 ; CHECK: [1] DW_TAG_compile_unit DW_CHILDREN_yes
-; CHECK: DW_AT_GNU_dwo_name  DW_FORM_GNU_str_index
 ; CHECK: DW_AT_producer  DW_FORM_GNU_str_index
 ; CHECK: DW_AT_language  DW_FORM_data2
 ; CHECK: DW_AT_name      DW_FORM_GNU_str_index
+; CHECK: DW_AT_GNU_dwo_name  DW_FORM_GNU_str_index
 ; CHECK-NOT: DW_AT_low_pc
 ; CHECK-NOT: DW_AT_stmt_list
 ; CHECK-NOT: DW_AT_comp_dir
@@ -58,48 +58,48 @@
 ; CHECK: .debug_info contents:
 ; CHECK: DW_TAG_compile_unit
 ; CHECK-NEXT: DW_AT_stmt_list [DW_FORM_sec_offset]   (0x00000000)
-; CHECK-NEXT: DW_AT_GNU_dwo_name [DW_FORM_strp] ( .debug_str[0x00000000] = "baz.dwo")
-; CHECK-NEXT: DW_AT_comp_dir [DW_FORM_strp]     ( .debug_str[0x00000008] = "/usr/local/google/home/echristo/tmp")
+; CHECK-NEXT: DW_AT_comp_dir [DW_FORM_strp]     ( .debug_str[0x00000000] = "/usr/local/google/home/echristo/tmp")
+; CHECK-NEXT: DW_AT_GNU_dwo_name [DW_FORM_strp] ( .debug_str[0x00000024] = "baz.dwo")
 ; CHECK-NEXT: DW_AT_GNU_dwo_id [DW_FORM_data8]  (0x1f1f859683d49324)
 
 ; Check that the rest of the compile units have information.
 ; CHECK: .debug_info.dwo contents:
 ; CHECK: DW_TAG_compile_unit
-; CHECK: DW_AT_GNU_dwo_name [DW_FORM_GNU_str_index] ( indexed (00000000) string = "baz.dwo")
-; CHECK: DW_AT_producer [DW_FORM_GNU_str_index] ( indexed (00000001) string = "clang version 3.3 (trunk 169021) (llvm/trunk 169020)")
+; CHECK: DW_AT_producer [DW_FORM_GNU_str_index] (indexed (00000002) string = "clang version 3.3 (trunk 169021) (llvm/trunk 169020)")
 ; CHECK: DW_AT_language [DW_FORM_data2]        (DW_LANG_C99)
-; CHECK: DW_AT_name [DW_FORM_GNU_str_index]    ( indexed (00000002) string = "baz.c")
+; CHECK: DW_AT_name [DW_FORM_GNU_str_index]    (indexed (00000003) string = "baz.c")
+; CHECK: DW_AT_GNU_dwo_name [DW_FORM_GNU_str_index] (indexed (00000004) string = "baz.dwo")
 ; CHECK-NOT: DW_AT_low_pc
 ; CHECK-NOT: DW_AT_stmt_list
 ; CHECK-NOT: DW_AT_comp_dir
 ; CHECK: DW_AT_GNU_dwo_id [DW_FORM_data8]  (0x1f1f859683d49324)
 ; CHECK: DW_TAG_variable
-; CHECK: DW_AT_name [DW_FORM_GNU_str_index]     ( indexed (00000003) string = "a")
+; CHECK: DW_AT_name [DW_FORM_GNU_str_index]     (indexed (00000000) string = "a")
 ; CHECK: DW_AT_type [DW_FORM_ref4]       (cu + 0x{{[0-9a-f]*}} => {[[TYPE:0x[0-9a-f]*]]}
 ; CHECK: DW_AT_external [DW_FORM_flag_present]   (true)
 ; CHECK: DW_AT_decl_file [DW_FORM_data1] (0x01)
 ; CHECK: DW_AT_decl_line [DW_FORM_data1] (1)
 ; CHECK: DW_AT_location [DW_FORM_exprloc] (DW_OP_GNU_addr_index 0x0)
 ; CHECK: [[TYPE]]: DW_TAG_base_type
-; CHECK: DW_AT_name [DW_FORM_GNU_str_index]     ( indexed (00000004) string = "int")
+; CHECK: DW_AT_name [DW_FORM_GNU_str_index]     (indexed (00000001) string = "int")
 
 ; CHECK: .debug_str contents:
-; CHECK: 0x00000000: "baz.dwo"
-; CHECK: 0x00000008: "/usr/local/google/home/echristo/tmp"
+; CHECK: 0x00000000: "/usr/local/google/home/echristo/tmp"
+; CHECK: 0x00000024: "baz.dwo"
 
 ; CHECK: .debug_str.dwo contents:
-; CHECK: 0x00000000: "baz.dwo"
-; CHECK: 0x00000008: "clang version 3.3 (trunk 169021) (llvm/trunk 169020)"
-; CHECK: 0x0000003d: "baz.c"
-; CHECK: 0x00000043: "a"
-; CHECK: 0x00000045: "int"
+; CHECK: 0x00000000: "a"
+; CHECK: 0x00000002: "int"
+; CHECK: 0x00000006: "clang version 3.3 (trunk 169021) (llvm/trunk 169020)"
+; CHECK: 0x0000003b: "baz.c"
+; CHECK: 0x00000041: "baz.dwo"
 
 ; CHECK: .debug_str_offsets.dwo contents:
 ; CHECK: 0x00000000: 00000000
-; CHECK: 0x00000004: 00000008
-; CHECK: 0x00000008: 0000003d
-; CHECK: 0x0000000c: 00000043
-; CHECK: 0x00000010: 00000045
+; CHECK: 0x00000004: 00000002
+; CHECK: 0x00000008: 00000006
+; CHECK: 0x0000000c: 0000003b
+; CHECK: 0x00000010: 00000041
 
 ; Object file checks
 ; For x86-64-linux we should have this set of relocations for the debug info section
diff --git a/test/DebugInfo/X86/fission-local-import.ll b/test/DebugInfo/X86/fission-local-import.ll
new file mode 100644
index 0000000..4f43a22
--- /dev/null
+++ b/test/DebugInfo/X86/fission-local-import.ll
@@ -0,0 +1,33 @@
+; RUN: llc -split-dwarf-file=foo.dwo -O0 %s -mtriple=x86_64-unknown-linux-gnu -filetype=obj -o %t
+; RUN: llvm-dwarfdump %t | FileCheck %s
+
+; CHECK: .debug_info.dwo contents:
+; CHECK: DW_TAG_compile_unit
+; CHECK:   DW_TAG_subprogram
+; CHECK:     DW_TAG_imported_module
+
+; Function Attrs: noinline nounwind optnone uwtable
+define dso_local void @_Z4testv() !dbg !5 {
+entry:
+  ret void, !dbg !13
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10, !11}
+!llvm.ident = !{!12}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 8.0.0 (trunk 349508) (llvm/trunk 349520)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, imports: !3, nameTableKind: None)
+!1 = !DIFile(filename: "test.cpp", directory: "/usr/local/google/home/blaikie/dev/scratch")
+!2 = !{}
+!3 = !{!4}
+!4 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !5, entity: !8, file: !1, line: 2)
+!5 = distinct !DISubprogram(name: "test", linkageName: "_Z4testv", scope: !1, file: !1, line: 2, type: !6, scopeLine: 2, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !2)
+!6 = !DISubroutineType(types: !7)
+!7 = !{null}
+!8 = !DINamespace(name: "ns1", scope: null)
+!9 = !{i32 2, !"Dwarf Version", i32 4}
+!10 = !{i32 2, !"Debug Info Version", i32 3}
+!11 = !{i32 1, !"wchar_size", i32 4}
+!12 = !{!"clang version 8.0.0 (trunk 349508) (llvm/trunk 349520)"}
+!13 = !DILocation(line: 2, column: 36, scope: !5)
+
diff --git a/test/DebugInfo/X86/fission-ranges.ll b/test/DebugInfo/X86/fission-ranges.ll
index 5883d2b..1a1dc22 100644
--- a/test/DebugInfo/X86/fission-ranges.ll
+++ b/test/DebugInfo/X86/fission-ranges.ll
@@ -8,8 +8,8 @@
 ; CHECK: .debug_info contents:
 ; CHECK: DW_TAG_compile_unit
 ; CHECK-NEXT: DW_AT_stmt_list
-; CHECK-NEXT: DW_AT_GNU_dwo_name
 ; CHECK-NEXT: DW_AT_comp_dir
+; CHECK-NEXT: DW_AT_GNU_dwo_name
 ; CHECK-NEXT: DW_AT_GNU_dwo_id
 ; CHECK-NEXT: DW_AT_GNU_ranges_base
 ; CHECK-NEXT: DW_AT_GNU_addr_base [DW_FORM_sec_offset]                   (0x00000000)
diff --git a/test/DebugInfo/X86/generate-odr-hash.ll b/test/DebugInfo/X86/generate-odr-hash.ll
index f2776f3..8a46f6d 100644
--- a/test/DebugInfo/X86/generate-odr-hash.ll
+++ b/test/DebugInfo/X86/generate-odr-hash.ll
@@ -80,7 +80,7 @@
 ; CHECK-NOT: type_signature
 ; CHECK-LABEL: type_signature = 0x1d02f3be30cc5688
 ; CHECK: DW_TAG_structure_type
-; FISSION-NEXT: DW_AT_name {{.*}} ( indexed {{.*}} "bar"
+; FISSION-NEXT: DW_AT_name {{.*}} (indexed {{.*}} "bar"
 ; SINGLE-NEXT: DW_AT_name {{.*}} "bar"
 
 ; Check that we generate a hash for fluffy and the value.
diff --git a/test/DebugInfo/X86/lazy-fission-comp-dir.ll b/test/DebugInfo/X86/lazy-fission-comp-dir.ll
new file mode 100644
index 0000000..6055e7e
--- /dev/null
+++ b/test/DebugInfo/X86/lazy-fission-comp-dir.ll
@@ -0,0 +1,32 @@
+; RUN: llc -split-dwarf-file=foo.dwo -O0 %s -mtriple=x86_64-unknown-linux-gnu -filetype=obj -o %t
+; RUN: llvm-dwarfdump -debug-info -debug-line %t | FileCheck %s
+
+; CHECK: .debug_info contents:
+; CHECK: DW_AT_comp_dir ("/usr/local/google/home/blaikie/dev/scratch")
+
+; CHECK: .debug_line contents:
+; CHECK: file_names[ 1]:
+; CHECK-NEXT:      name: "main.c"
+; CHECK-NEXT: dir_index: 0
+
+; Function Attrs: noinline nounwind optnone uwtable
+define dso_local i32 @main() !dbg !6 {
+entry:
+  ret i32 0, !dbg !10
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!5}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 8.0.0 (trunk 349782) (llvm/trunk 349794)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: GNU)
+!1 = !DIFile(filename: "main.c", directory: "/usr/local/google/home/blaikie/dev/scratch")
+!2 = !{}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !{i32 1, !"wchar_size", i32 4}
+!5 = !{!"clang version 8.0.0 (trunk 349782) (llvm/trunk 349794)"}
+!6 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 1, type: !7, scopeLine: 1, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !2)
+!7 = !DISubroutineType(types: !8)
+!8 = !{!9}
+!9 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!10 = !DILocation(line: 2, column: 1, scope: !6)
diff --git a/test/DebugInfo/X86/length_symbol_difference.ll b/test/DebugInfo/X86/length_symbol_difference.ll
new file mode 100644
index 0000000..78684e7
--- /dev/null
+++ b/test/DebugInfo/X86/length_symbol_difference.ll
@@ -0,0 +1,30 @@
+; RUN: llc -filetype=asm -O0 -mtriple=x86_64-linux-gnu < %s | FileCheck %s
+
+; CHECK:      .long   .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+; CHECK-NEXT: .Ldebug_info_start0:
+; CHECK-NOT:  .byte   0
+; CHECK:      .byte   0                       # End Of Children Mark
+; CHECK-NEXT: .Ldebug_info_end0:
+; CHECK-NEXT: .section
+
+
+define dso_local void @_Z2f1v() !dbg !7 {
+entry:
+  ret void, !dbg !10
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 8.0.0 (trunk 349394) (llvm/trunk 349377)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
+!1 = !DIFile(filename: "foo.cpp", directory: "/usr/local/google/home/blaikie/dev/scratch")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"wchar_size", i32 4}
+!6 = !{!"clang version 8.0.0 (trunk 349394) (llvm/trunk 349377)"}
+!7 = distinct !DISubprogram(name: "f1", linkageName: "_Z2f1v", scope: !1, file: !1, line: 1, type: !8, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !2)
+!8 = !DISubroutineType(types: !9)
+!9 = !{null}
+!10 = !DILocation(line: 1, column: 12, scope: !7)
diff --git a/test/DebugInfo/X86/loclists-dwp.ll b/test/DebugInfo/X86/loclists-dwp.ll
index a5ce922..cba81a8 100644
--- a/test/DebugInfo/X86/loclists-dwp.ll
+++ b/test/DebugInfo/X86/loclists-dwp.ll
@@ -42,13 +42,13 @@
 !llvm.module.flags = !{!3, !4, !5}
 !llvm.ident = !{!6}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 8.0.0 (https://git.llvm.org/git/clang.git/ 41055c6168135fe539801799e5c5636247cf0302) (https://git.llvm.org/git/llvm.git/ de0558be123ffbb5b5bd692c17dbd57a75fe684f)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 8.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
 !1 = !DIFile(filename: "a.cpp", directory: "/home/test/PRs/PR38990")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
 !5 = !{i32 1, !"wchar_size", i32 4}
-!6 = !{!"clang version 8.0.0 (https://git.llvm.org/git/clang.git/ 41055c6168135fe539801799e5c5636247cf0302) (https://git.llvm.org/git/llvm.git/ de0558be123ffbb5b5bd692c17dbd57a75fe684f)"}
+!6 = !{!"clang version 8.0.0"}
 !7 = distinct !DISubprogram(name: "a", linkageName: "_Z1ai", scope: !1, file: !1, line: 2, type: !8, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !11)
 !8 = !DISubroutineType(types: !9)
 !9 = !{null, !10}
diff --git a/test/DebugInfo/X86/noreturn_c11.ll b/test/DebugInfo/X86/noreturn_c11.ll
index d044e585..8afbd9c 100644
--- a/test/DebugInfo/X86/noreturn_c11.ll
+++ b/test/DebugInfo/X86/noreturn_c11.ll
@@ -36,12 +36,12 @@
 !llvm.module.flags = !{!3, !4}
 !llvm.ident = !{!5}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0 (http://llvm.org/git/clang.git 08946d46f2add8cb241fdc09fc3731dd9dc5ecb5) (http://llvm.org/git/llvm.git d048aeecd34b8c336d1fd44e36c15b0b11c2ea4d)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "test.c", directory: "/home/del/test")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
-!5 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git 08946d46f2add8cb241fdc09fc3731dd9dc5ecb5) (http://llvm.org/git/llvm.git d048aeecd34b8c336d1fd44e36c15b0b11c2ea4d)"}
+!5 = !{!"clang version 4.0.0"}
 !6 = distinct !DISubprogram(name: "f", scope: !7, file: !7, line: 3, type: !8, isLocal: false, isDefinition: true, scopeLine: 4, flags: DIFlagNoReturn, isOptimized: false, unit: !0, retainedNodes: !2)
 !7 = !DIFile(filename: "./test.c", directory: "/home/del/test")
 !8 = !DISubroutineType(types: !9)
diff --git a/test/DebugInfo/X86/noreturn_cpp11.ll b/test/DebugInfo/X86/noreturn_cpp11.ll
index a4902e8..d3b6104 100644
--- a/test/DebugInfo/X86/noreturn_cpp11.ll
+++ b/test/DebugInfo/X86/noreturn_cpp11.ll
@@ -42,12 +42,12 @@
 !llvm.module.flags = !{!3, !4}
 !llvm.ident = !{!5}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 4.0.0 (http://llvm.org/git/clang.git 08946d46f2add8cb241fdc09fc3731dd9dc5ecb5) (http://llvm.org/git/llvm.git d048aeecd34b8c336d1fd44e36c15b0b11c2ea4d)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 4.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "test.cpp", directory: "/home/del/test")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
-!5 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git 08946d46f2add8cb241fdc09fc3731dd9dc5ecb5) (http://llvm.org/git/llvm.git d048aeecd34b8c336d1fd44e36c15b0b11c2ea4d)"}
+!5 = !{!"clang version 4.0.0"}
 !6 = distinct !DISubprogram(name: "f", linkageName: "_Z1fv", scope: !1, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped | DIFlagNoReturn, isOptimized: false, unit: !0, retainedNodes: !2)
 !7 = !DISubroutineType(types: !8)
 !8 = !{null}
diff --git a/test/DebugInfo/X86/noreturn_objc.ll b/test/DebugInfo/X86/noreturn_objc.ll
index 83c77c5..1a63ab6 100644
--- a/test/DebugInfo/X86/noreturn_objc.ll
+++ b/test/DebugInfo/X86/noreturn_objc.ll
@@ -38,12 +38,12 @@
 !llvm.module.flags = !{!3, !4}
 !llvm.ident = !{!5}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_ObjC, file: !1, producer: "clang version 4.0.0 (http://llvm.org/git/clang.git 08946d46f2add8cb241fdc09fc3731dd9dc5ecb5) (http://llvm.org/git/llvm.git d048aeecd34b8c336d1fd44e36c15b0b11c2ea4d)", isOptimized: false, runtimeVersion: 1, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_ObjC, file: !1, producer: "clang version 4.0.0", isOptimized: false, runtimeVersion: 1, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "test.m", directory: "/home/del/test/noreturn/objc")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
-!5 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git 08946d46f2add8cb241fdc09fc3731dd9dc5ecb5) (http://llvm.org/git/llvm.git d048aeecd34b8c336d1fd44e36c15b0b11c2ea4d)"}
+!5 = !{!"clang version 4.0.0"}
 !6 = distinct !DISubprogram(name: "f", scope: !7, file: !7, line: 2, type: !8, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagNoReturn, isOptimized: false, unit: !0, retainedNodes: !2)
 !7 = !DIFile(filename: "./test.m", directory: "/home/del/test/noreturn/objc")
 !8 = !DISubroutineType(types: !9)
diff --git a/test/DebugInfo/X86/pieces-3.ll b/test/DebugInfo/X86/pieces-3.ll
index 0f705f1..e91bd26 100644
--- a/test/DebugInfo/X86/pieces-3.ll
+++ b/test/DebugInfo/X86/pieces-3.ll
@@ -18,12 +18,10 @@
 ; CHECK: DW_TAG_formal_parameter [3]
 ; CHECK-NEXT:   DW_AT_location [DW_FORM_data4]        (
 ; CHECK-NEXT:     [0x0000000000000000, 0x0000000000000007): DW_OP_reg5 RDI, DW_OP_piece 0x8, DW_OP_piece 0x4, DW_OP_reg4 RSI, DW_OP_piece 0x4
-; CHECK-NEXT:     [0x0000000000000007, 0x0000000000000007): DW_OP_reg5 RDI, DW_OP_piece 0x8, DW_OP_piece 0x4, DW_OP_reg0 RAX, DW_OP_piece 0x4)
 ; CHECK-NEXT:   DW_AT_name {{.*}}"outer"
 ; CHECK: DW_TAG_variable
-; CHECK-NEXT:   DW_AT_location [DW_FORM_data4]        (0x00000044
-; CHECK-NEXT:     [0x0000000000000007, 0x0000000000000007): DW_OP_reg0 RAX, DW_OP_piece 0x4)
-; CHECK-NEXT:   "i1"
+; CHECK-NEXT:   DW_AT_name {{.*}}"i1"
+; CHECK-NOT:    DW_AT_location
 
 ; ModuleID = '/Volumes/Data/llvm/test/DebugInfo/X86/sroasplit-2.ll'
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/DebugInfo/X86/prologue-stack.ll b/test/DebugInfo/X86/prologue-stack.ll
index ffce403..6072543 100644
--- a/test/DebugInfo/X86/prologue-stack.ll
+++ b/test/DebugInfo/X86/prologue-stack.ll
@@ -1,4 +1,4 @@
-; RUN: llc -fast-isel-sink-local-values -disable-fp-elim -O0 %s -mtriple x86_64-unknown-linux-gnu -o - | FileCheck %s
+; RUN: llc -fast-isel-sink-local-values -frame-pointer=all -O0 %s -mtriple x86_64-unknown-linux-gnu -o - | FileCheck %s
 
 ; int callme(int);
 ; int isel_line_test2() {
diff --git a/test/DebugInfo/X86/string-offsets-multiple-cus.ll b/test/DebugInfo/X86/string-offsets-multiple-cus.ll
index 7851dbf..785bbec 100644
--- a/test/DebugInfo/X86/string-offsets-multiple-cus.ll
+++ b/test/DebugInfo/X86/string-offsets-multiple-cus.ll
@@ -43,21 +43,21 @@
 ; TYPEUNITS-NOT:  NULL
 ; TYPEUNITS:      DW_TAG_enumerator
 ; TYPEUNITS-NOT:  {{DW_TAG|NULL}}
-; TYPEUNITS:      DW_AT_name [DW_FORM_strx1] ( indexed (00000005) string = "b")
+; TYPEUNITS:      DW_AT_name [DW_FORM_strx1] (indexed (00000005) string = "b")
 ; TYPEUNITS-NOT:  contents:
 ; TYPEUNITS:      DW_TAG_type_unit
 ; TYPEUNITS-NOT:  {{DW_TAG|NULL}}
 ; TYPEUNITS:      DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x[[CU1_STROFF]])
 ; TYPEUNITS-NOT:  NULL
 ; TYPEUNITS:      DW_TAG_enumeration_type
-; TYPEUNITS:      DW_AT_name [DW_FORM_strx1] ( indexed (0000000d) string = "E2")
+; TYPEUNITS:      DW_AT_name [DW_FORM_strx1] (indexed (0000000d) string = "E2")
 ; TYPEUNITS-NOT:  contents:
 ; TYPEUNITS:      DW_TAG_type_unit
 ; TYPEUNITS-NOT:  {{DW_TAG|NULL}}
 ; TYPEUNITS:      DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x[[CU1_STROFF]])
 ; TYPEUNITS-NOT:  NULL
 ; TYPEUNITS:      DW_TAG_enumeration_type
-; TYPEUNITS:      DW_AT_name [DW_FORM_strx1] ( indexed (00000013) string = "E3")
+; TYPEUNITS:      DW_AT_name [DW_FORM_strx1] (indexed (00000013) string = "E3")
 
 ; CU 1
 ; BOTH-NOT:    .contents:
@@ -74,7 +74,7 @@
 ; BOTH-NOT:    NULL
 ; BOTH:        DW_TAG_variable
 ; BOTH-NOT:    {{DW_TAG|NULL}}
-; BOTH:        DW_AT_name [DW_FORM_strx1] ( indexed (00000009) string = "glob2")
+; BOTH:        DW_AT_name [DW_FORM_strx1] (indexed (00000009) string = "glob2")
 ;
 ; CU 3
 ; BOTH-NOT:    contents:
@@ -84,7 +84,7 @@
 ; BOTH-NOT:    NULL
 ; BOTH:        DW_TAG_variable
 ; BOTH-NOT:    {{DW_TAG|NULL}}
-; BOTH:        DW_AT_name [DW_FORM_strx1] ( indexed (0000000f) string = "glob3")
+; BOTH:        DW_AT_name [DW_FORM_strx1] (indexed (0000000f) string = "glob3")
 ;
 ; Extract the offset of a string to verify that it is referenced in the string
 ; offsets section.
diff --git a/test/DebugInfo/X86/string-offsets-table-order.ll b/test/DebugInfo/X86/string-offsets-table-order.ll
index 79c0ffc..e39f70c 100644
--- a/test/DebugInfo/X86/string-offsets-table-order.ll
+++ b/test/DebugInfo/X86/string-offsets-table-order.ll
@@ -13,11 +13,11 @@
 
 ; CHECK: .debug_info contents:
 ; CHECK:   DW_TAG_compile_unit
-; CHECK:     DW_AT_comp_dir [DW_FORM_strx1] ( indexed (00000001) string = "X3")
+; CHECK:     DW_AT_comp_dir [DW_FORM_strx1] (indexed (00000000) string = "X3")
 ; CHECK:   DW_TAG_compile_unit
-; CHECK:     DW_AT_comp_dir [DW_FORM_strx1] ( indexed (00000002) string = "X2")
+; CHECK:     DW_AT_comp_dir [DW_FORM_strx1] (indexed (00000001) string = "X2")
 ; CHECK:   DW_TAG_compile_unit
-; CHECK:     DW_AT_comp_dir [DW_FORM_strx1] ( indexed (00000003) string = "X1")
+; CHECK:     DW_AT_comp_dir [DW_FORM_strx1] (indexed (00000002) string = "X1")
 ; CHECK: .debug_info.dwo contents:
 
 ; CHECK: .debug_str contents:
@@ -27,10 +27,10 @@
 
 ; CHECK: .debug_str_offsets contents:
 ; CHECK: Format = DWARF32, Version = 5
-; CHECK-NEXT: 00000000 "foo.dwo"
 ; CHECK-NEXT: [[X3]] "X3"
 ; CHECK-NEXT: [[X2]] "X2"
 ; CHECK-NEXT: [[X1]] "X1"
+; CHECK-NEXT: "foo.dwo"
 ; CHECK-EMPTY:
 
 
diff --git a/test/DebugInfo/X86/string-offsets-table.ll b/test/DebugInfo/X86/string-offsets-table.ll
index b061510..9960fd8 100644
--- a/test/DebugInfo/X86/string-offsets-table.ll
+++ b/test/DebugInfo/X86/string-offsets-table.ll
@@ -27,11 +27,11 @@
 ; MONOLITHIC-NOT:      contents:
 ; MONOLITHIC:          DW_TAG_compile_unit
 ; MONOLITHIC-NOT:      {{DW_TAG|NULL}}
-; MONOLITHIC:          DW_AT_producer [DW_FORM_strx1] ( indexed (00000000) string = "clang{{.*}}")
+; MONOLITHIC:          DW_AT_producer [DW_FORM_strx1] (indexed (00000000) string = "clang{{.*}}")
 ; MONOLITHIC-NOT:      {{DW_TAG|NULL}}
 ; MONOLITHIC:          DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x00000008)
 ; MONOLITHIC-NOT:      {{DW_TAG|NULL}}
-; MONOLITHIC:          DW_AT_comp_dir [DW_FORM_strx1] ( indexed (00000002) string = "/home/{{.*}}")
+; MONOLITHIC:          DW_AT_comp_dir [DW_FORM_strx1] (indexed (00000002) string = "/home/{{.*}}")
 
 ; Extract the string offsets from the .debug_str section so we can check that 
 ; they are referenced correctly in the .debug_str_offsets section.
@@ -59,8 +59,8 @@
 ; SPLIT:      DW_TAG_compile_unit
 ; SPLIT-NOT:  {{DW_TAG|contents:}}
 ; SPLIT:      DW_AT_str_offsets_base [DW_FORM_sec_offset] (0x00000008)
-; SPLIT:      DW_AT_GNU_dwo_name [DW_FORM_strx1] ( indexed (00000000) string = "foo.dwo")
-; SPLIT:      DW_AT_comp_dir [DW_FORM_strx1] ( indexed (00000001) string = "/home/test")
+; SPLIT:      DW_AT_comp_dir [DW_FORM_strx1] (indexed (00000000) string = "/home/test")
+; SPLIT:      DW_AT_GNU_dwo_name [DW_FORM_strx1] (indexed (00000001) string = "foo.dwo")
 
 ; Check for the split CU in .debug_info.dwo.
 ; SPLIT:      .debug_info.dwo contents:
@@ -73,18 +73,18 @@
 ; SPLIT-NOT:  contents:
 ; SPLIT:      DW_TAG_enumerator
 ; SPLIT-NOT:  {{DW_TAG|NULL}}
-; SPLIT:      DW_AT_name [DW_FORM_strx1]    ( indexed (00000004) string = "a")
+; SPLIT:      DW_AT_name [DW_FORM_strx1]    (indexed (00000001) string = "a")
 ; SPLIT-NOT:  contents:
 ; SPLIT:      DW_TAG_enumerator
 ; SPLIT-NOT:  {{DW_TAG|NULL}}
-; SPLIT:      DW_AT_name [DW_FORM_strx1]    ( indexed (00000005) string = "b")
+; SPLIT:      DW_AT_name [DW_FORM_strx1]    (indexed (00000002) string = "b")
 ;
 ; Extract the string offsets referenced in the main file by the skeleton unit.
 ; SPLIT:      .debug_str contents:
-; SPLIT-NEXT: 0x00000000: "foo.dwo"
-; SPLIT-NEXT: 0x[[STRING2SPLIT:[0-9a-f]*]]: "/home/test"
-; SPLIT-NEXT: 0x[[STRING3SPLIT:[0-9a-f]*]]: "E"
-; SPLIT-NEXT: 0x[[STRING4SPLIT:[0-9a-f]*]]: "glob"
+; SPLIT-NEXT: 0x[[STRHOMETESTSPLIT:[0-9a-f]*]]: "/home/test"
+; SPLIT-NEXT: 0x[[STRESPLIT:[0-9a-f]*]]: "E"
+; SPLIT-NEXT: 0x[[STRGLOBSPLIT:[0-9a-f]*]]: "glob"
+; SPLIT-NEXT: 0x[[STRFOODWOSPLIT:[0-9a-f]*]]: "foo.dwo"
 ;
 ; Extract the string offsets referenced in the .dwo file by the split unit.
 ; SPLIT:      .debug_str.dwo contents:
@@ -98,8 +98,8 @@
 ; referenced by the debug info.
 ; SPLIT:      .debug_str_offsets contents:
 ; SPLIT-NEXT: 0x00000000: Contribution size = 12, Format = DWARF32, Version = 5
-; SPLIT-NEXT: 0x00000008: 00000000 "foo.dwo"
-; SPLIT-NEXT: 0x0000000c: [[STRING2SPLIT]] "/home/test"
+; SPLIT-NEXT: 0x00000008: [[STRHOMETESTSPLIT]] "/home/test"
+; SPLIT-NEXT: 0x0000000c: [[STRFOODWOSPLIT]] "foo.dwo"
 ; SPLIT-EMPTY:
 
 ; SPLIT:      .debug_str_offsets.dwo contents:
diff --git a/test/DebugInfo/debuglineinfo.test b/test/DebugInfo/debuglineinfo.test
index 720ba12..651bbe5 100644
--- a/test/DebugInfo/debuglineinfo.test
+++ b/test/DebugInfo/debuglineinfo.test
@@ -1,50 +1,19 @@
 RUN: llvm-rtdyld -printline %p/Inputs/test-inline.o \
-RUN:   | FileCheck %s -check-prefix TEST_INLINE
+RUN:   | FileCheck %s
 RUN: llvm-rtdyld -printdebugline %p/Inputs/test-inline.o \
-RUN:   | FileCheck %s -check-prefix TEST_INLINE
+RUN:   | FileCheck %s
 RUN: llvm-rtdyld -printline %p/Inputs/test-parameters.o \
-RUN:   | FileCheck %s -check-prefix TEST_PARAMETERS
+RUN:   | FileCheck %s
 RUN: llvm-rtdyld -printdebugline %p/Inputs/test-parameters.o \
-RUN:   | FileCheck %s -check-prefix TEST_PARAMETERS
+RUN:   | FileCheck %s
 
 ; This test verifies that relocations are correctly applied to the
 ; .debug_line section and exercises DIContext::getLineInfoForAddressRange().
-; If relocations are not applied the first two functions will be reported as
-; both starting at address zero in the; line number table.
-TEST_INLINE:      Function: _Z15test_parametersPfPA2_dR11char_structPPitm, Size = 170
-TEST_INLINE-NEXT: Line info @ 0: test-inline.cpp, line:33
-TEST_INLINE-NEXT: Line info @ 35: test-inline.cpp, line:34
-TEST_INLINE-NEXT: Line info @ 165: test-inline.cpp, line:35
-TEST_INLINE-NEXT: Function: _Z3foov, Size = 3
-TEST_INLINE-NEXT: Line info @ 0: test-inline.cpp, line:28
-TEST_INLINE-NEXT: Line info @ 2: test-inline.cpp, line:29
-TEST_INLINE-NEXT: Function: main, Size = 146
-TEST_INLINE-NEXT: Line info @ 0: test-inline.cpp, line:39
-TEST_INLINE-NEXT: Line info @ 21: test-inline.cpp, line:41
-TEST_INLINE-NEXT: Line info @ 39: test-inline.cpp, line:42
-TEST_INLINE-NEXT: Line info @ 60: test-inline.cpp, line:44
-TEST_INLINE-NEXT: Line info @ 80: test-inline.cpp, line:48
-TEST_INLINE-NEXT: Line info @ 90: test-inline.cpp, line:45
-TEST_INLINE-NEXT: Line info @ 95: test-inline.cpp, line:46
-TEST_INLINE-NEXT: Line info @ 114: test-inline.cpp, line:48 
-TEST_INLINE-NEXT: Line info @ 141: test-inline.cpp, line:49
 
-; This test checks the case where all code is in a single section.
-TEST_PARAMETERS:      Function: _Z15test_parametersPfPA2_dR11char_structPPitm, Size = 170
-TEST_PARAMETERS-NEXT: Line info @ 0: test-parameters.cpp, line:33
-TEST_PARAMETERS-NEXT: Line info @ 35: test-parameters.cpp, line:34
-TEST_PARAMETERS-NEXT: Line info @ 165: test-parameters.cpp, line:35
-TEST_PARAMETERS-NEXT: Function: _Z3foov, Size = 3
-TEST_PARAMETERS-NEXT: Line info @ 0: test-parameters.cpp, line:28
-TEST_PARAMETERS-NEXT: Line info @ 2: test-parameters.cpp, line:29
-TEST_PARAMETERS-NEXT: Function: main, Size = 146
-TEST_PARAMETERS-NEXT: Line info @ 0: test-parameters.cpp, line:39
-TEST_PARAMETERS-NEXT: Line info @ 21: test-parameters.cpp, line:41
-TEST_PARAMETERS-NEXT: Line info @ 39: test-parameters.cpp, line:42
-TEST_PARAMETERS-NEXT: Line info @ 60: test-parameters.cpp, line:44
-TEST_PARAMETERS-NEXT: Line info @ 80: test-parameters.cpp, line:48
-TEST_PARAMETERS-NEXT: Line info @ 90: test-parameters.cpp, line:45
-TEST_PARAMETERS-NEXT: Line info @ 95: test-parameters.cpp, line:46
-TEST_PARAMETERS-NEXT: Line info @ 114: test-parameters.cpp, line:48 
-TEST_PARAMETERS-NEXT: Line info @ 141: test-parameters.cpp, line:49
-
+CHECK:      Function: _Z2f1v, Size = 6
+CHECK-NEXT:   Line info @ 0: test-inline.cpp, line:1
+CHECK-NEXT:   Line info @ 4: test-inline.cpp, line:1
+CHECK-NEXT: Function: _Z2f2v, Size = 11
+CHECK-NEXT:   Line info @ 0: test-inline.cpp, line:2
+CHECK-NEXT:   Line info @ 4: test-inline.cpp, line:2
+CHECK-NEXT:   Line info @ 9: test-inline.cpp, line:2
diff --git a/test/DebugInfo/llvm-symbolizer-split-dwarf-no-skel-address.test b/test/DebugInfo/llvm-symbolizer-split-dwarf-no-skel-address.test
index b6c9ccc..1b8da86 100644
--- a/test/DebugInfo/llvm-symbolizer-split-dwarf-no-skel-address.test
+++ b/test/DebugInfo/llvm-symbolizer-split-dwarf-no-skel-address.test
@@ -15,5 +15,8 @@
 high/low pc (& update the CU length field and abbrev to match) & then
 compile/objcopy to make the .o and .dwo.
 
-CHECK: _Z2f2v
-CHECK: test.cpp:2:51
+Ensure that the f2 inlined frame is not included - it's inefficient to have to
+go and load all the debug info and search for the address ranges, so assume
+that a lack of ranges on the CU means the CU covers no addresses.
+
+CHECK-NOT: _Z2f2v
diff --git a/test/DebugInfo/llvm-symbolizer.test b/test/DebugInfo/llvm-symbolizer.test
index a6a1549..91d9a42 100644
--- a/test/DebugInfo/llvm-symbolizer.test
+++ b/test/DebugInfo/llvm-symbolizer.test
@@ -1,10 +1,10 @@
 RUN: rm -rf %t
 RUN: mkdir -p %t
-RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64 0x400559" > %t.input
-RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64.debuglink 0x400559" >> %t.input
-RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64 0x400436" >> %t.input
-RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64 0x400528" >> %t.input
-RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64 0x400586" >> %t.input
+RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64 0x40113f" > %t.input
+RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64.debuglink 0x40113f" >> %t.input
+RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64 0x401020" >> %t.input
+RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64 0x40110e" >> %t.input
+RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64 0x401160" >> %t.input
 RUN: echo "%p/Inputs/dwarfdump-test2.elf-x86-64 0x4004e8" >> %t.input
 RUN: echo "%p/Inputs/dwarfdump-test2.elf-x86-64 0x4004f4" >> %t.input
 RUN: echo "%p/Inputs/dwarfdump-test4.elf-x86-64 0x62c" >> %t.input
@@ -181,8 +181,8 @@
 UNKNOWN-ARCH: ??
 UNKNOWN-ARCH-NOT: main
 
-RUN: echo "0x400559" > %t.input4
-RUN: echo "0x400436" >> %t.input4
+RUN: echo "0x40113f" > %t.input4
+RUN: echo "0x401020" >> %t.input4
 RUN: llvm-symbolizer --obj %p/Inputs/dwarfdump-test.elf-x86-64 < %t.input4 \
 RUN:   | FileCheck %s --check-prefix=BINARY
 
@@ -190,9 +190,9 @@
 BINARY-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test.cc:16
 BINARY:      _start
 
-RUN: echo "0x400720" > %t.input5
-RUN: echo "0x4004a0" >> %t.input5
-RUN: echo "0x4006f0" >> %t.input5
+RUN: echo "0x401140" > %t.input5
+RUN: echo "0x401020" >> %t.input5
+RUN: echo "0x401120" >> %t.input5
 RUN: llvm-symbolizer --obj %p/Inputs/llvm-symbolizer-test.elf-x86-64 < %t.input5 \
 RUN:   | FileCheck %s --check-prefix=BINARY_C
 
@@ -210,5 +210,7 @@
 RUN: echo "%p/Inputs/dwarfdump-test4.elf-x86-64 0x62c" > %t.input7
 RUN: llvm-symbolizer --functions=short --demangle=false < %t.input7 \
 RUN:    | FileCheck %s --check-prefix=SHORT_FUNCTION_NAME
+RUN: llvm-symbolizer --functions=short -C=false < %t.input7 \
+RUN:    | FileCheck %s --check-prefix=SHORT_FUNCTION_NAME
 
 SHORT_FUNCTION_NAME-NOT: _Z1cv
diff --git a/test/DebugInfo/unrolled-loop-remainder.ll b/test/DebugInfo/unrolled-loop-remainder.ll
index 96a25e2..1256063 100644
--- a/test/DebugInfo/unrolled-loop-remainder.ll
+++ b/test/DebugInfo/unrolled-loop-remainder.ll
@@ -68,7 +68,7 @@
 
 !0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
 !1 = distinct !DIGlobalVariable(name: "b", scope: !2, file: !3, line: 2, type: !9, isLocal: false, isDefinition: true)
-!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 6.0.0 (http://llvm.org/git/clang.git 044091b728654e62444a7ea10e6efb489c705bed) (http://llvm.org/git/llvm.git 1c7b55afdb1c5791e0557d9e32e2dd07c7acb2b0)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 6.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
 !3 = !DIFile(filename: "loop.c", directory: "/work/projects/src/tests/unroll-debug-info")
 !4 = !{}
 !5 = !{!6, !0}
@@ -79,7 +79,7 @@
 !10 = !{i32 2, !"Dwarf Version", i32 4}
 !11 = !{i32 2, !"Debug Info Version", i32 3}
 !12 = !{i32 1, !"wchar_size", i32 4}
-!13 = !{!"clang version 6.0.0 (http://llvm.org/git/clang.git 044091b728654e62444a7ea10e6efb489c705bed) (http://llvm.org/git/llvm.git 1c7b55afdb1c5791e0557d9e32e2dd07c7acb2b0)"}
+!13 = !{!"clang version 6.0.0"}
 !14 = distinct !DISubprogram(name: "c", scope: !3, file: !3, line: 3, type: !15, isLocal: false, isDefinition: true, scopeLine: 3, isOptimized: true, unit: !2, retainedNodes: !4)
 !15 = !DISubroutineType(types: !16)
 !16 = !{!9}
diff --git a/test/Demangle/ms-cxx17-noexcept.test b/test/Demangle/ms-cxx17-noexcept.test
new file mode 100644
index 0000000..fc60d66
--- /dev/null
+++ b/test/Demangle/ms-cxx17-noexcept.test
@@ -0,0 +1,25 @@
+; RUN: llvm-undname < %s | FileCheck %s
+
+; CHECK-NOT: Invalid mangled name
+
+?nochange@@YAXXZ
+; CHECK: void __cdecl nochange(void)
+
+?a@@YAXP6AHXZ@Z
+; CHECK: void __cdecl a(int (__cdecl *)(void))
+?a@@YAXP6AHX_E@Z
+; CHECK: void __cdecl a(int (__cdecl *)(void) noexcept)
+
+?b@@YAXP6AHXZ@Z
+; CHECK: void __cdecl b(int (__cdecl *)(void))
+
+?c@@YAXP6AHXZ@Z
+; CHECK: void __cdecl c(int (__cdecl *)(void))
+?c@@YAXP6AHX_E@Z
+; CHECK: void __cdecl c(int (__cdecl *)(void) noexcept)
+
+?ee@?$e@$$A6AXXZ@@EEAAXXZ
+; CHECK: private: virtual void __cdecl e<void __cdecl(void)>::ee(void)
+
+?ee@?$e@$$A6AXX_E@@EEAAXXZ
+; CHECK: private: virtual void __cdecl e<void __cdecl(void) noexcept>::ee(void)
diff --git a/test/FileCheck/defines.txt b/test/FileCheck/defines.txt
index c788124..24947b25 100644
--- a/test/FileCheck/defines.txt
+++ b/test/FileCheck/defines.txt
@@ -3,16 +3,32 @@
 ;
 ; RUN: not FileCheck -DVALUE=10 -check-prefix NOT -input-file %s %s 2>&1 | FileCheck %s -check-prefix NOT-ERRMSG
 ; RUN: FileCheck -DVALUE=20 -check-prefix NOT -input-file %s %s
+; RUN: not FileCheck -DVALUE10 -input-file %s %s 2>&1 | FileCheck %s -check-prefix ERRCLIEQ1
+; RUN: not FileCheck -D -input-file %s %s 2>&1 | FileCheck %s -check-prefix ERRCLIEQ2
+; RUN: not FileCheck -D=10 -input-file %s %s 2>&1 | FileCheck %s -check-prefix ERRCLIVAR1
+; RUN: not FileCheck -D= -input-file %s %s 2>&1 | FileCheck %s -check-prefix ERRCLIVAR2
+; RUN: FileCheck -DVALUE= -check-prefix EMPTY -input-file %s %s 2>&1
 
 Value = 10
 ; CHECK: Value = [[VALUE]]
 ; NOT-NOT: Value = [[VALUE]]
 
-; ERRMSG: defines.txt:8:10: error: CHECK: expected string not found in input
+; ERRMSG: defines.txt:[[@LINE-3]]:10: error: CHECK: expected string not found in input
 ; ERRMSG: defines.txt:1:1: note: scanning from here
 ; ERRMSG: defines.txt:1:1: note: with variable "VALUE" equal to "20"
-; ERRMSG: defines.txt:7:1: note: possible intended match here
+; ERRMSG: defines.txt:[[@LINE-7]]:1: note: possible intended match here
 
-; NOT-ERRMSG: defines.txt:9:12: error: {{NOT}}-NOT: excluded string found in input
-; NOT-ERRMSG: defines.txt:7:1: note: found here
-; NOT-ERRMSG: defines.txt:7:1: note: with variable "VALUE" equal to "10"
\ No newline at end of file
+; NOT-ERRMSG: defines.txt:[[@LINE-7]]:12: error: {{NOT}}-NOT: excluded string found in input
+; NOT-ERRMSG: defines.txt:[[@LINE-10]]:1: note: found here
+; NOT-ERRMSG: defines.txt:[[@LINE-11]]:1: note: with variable "VALUE" equal to "10"
+
+; ERRCLIEQ1: Missing equal sign in command-line definition '-DVALUE10'
+
+; ERRCLIEQ2: FileCheck{{[^:]*}}: for the -D option: requires a value!
+
+; ERRCLIVAR1: Missing pattern variable name in command-line definition '-D=10'
+
+; ERRCLIVAR2: Missing pattern variable name in command-line definition '-D='
+
+Empty value = @@
+; EMPTY: Empty value = @[[VALUE]]@
diff --git a/test/FileCheck/dump-input-annotations.txt b/test/FileCheck/dump-input-annotations.txt
new file mode 100644
index 0000000..eaf1c94
--- /dev/null
+++ b/test/FileCheck/dump-input-annotations.txt
@@ -0,0 +1,394 @@
+;--------------------------------------------------
+; Use -strict-whitespace to check marker and note alignment here.
+; (Also check multiline marker where start/end columns vary across lines.)
+;
+; In the remaining checks, don't use -strict-whitespace and thus check just the
+; presence, order, and lengths of markers.  That way, if we ever change padding
+; within line labels, we don't have to adjust so many tests.
+;--------------------------------------------------
+
+; RUN: echo 'hello world' > %t.in
+; RUN: echo 'goodbye' >> %t.in
+; RUN: echo 'world' >> %t.in
+; RUN: echo 'unicorn' >> %t.in
+
+; RUN: echo 'CHECK: hello' > %t.chk
+; RUN: echo 'CHECK: universe' >> %t.chk
+
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -v 2>&1 \
+; RUN: | FileCheck -strict-whitespace -match-full-lines -check-prefix=ALIGN %s
+
+; ALIGN:Full input was:
+; ALIGN-NEXT:<<<<<<
+; ALIGN-NEXT:           1: hello world
+; ALIGN-NEXT:check:1       ^~~~~
+; ALIGN-NEXT:check:2'0           X~~~~
+; ALIGN-NEXT:           2: goodbye
+; ALIGN-NEXT:check:2'0     ~~~~~~~
+; ALIGN-NEXT:           3: world
+; ALIGN-NEXT:check:2'0     ~~~~~
+; ALIGN-NEXT:           4: unicorn
+; ALIGN-NEXT:check:2'0     ~~~~~~~ error: no match found
+; ALIGN-NEXT:check:2'1     ?       possible intended match
+; ALIGN-NEXT:>>>>>>
+; ALIGN-NOT:{{.}}
+
+;--------------------------------------------------
+; CHECK (also: multi-line search range, fuzzy match)
+;--------------------------------------------------
+
+; Good match and no match.
+
+; RUN: echo 'hello'   > %t.in
+; RUN: echo 'again'   >> %t.in
+; RUN: echo 'whirled' >> %t.in
+
+; RUN: echo 'CHECK: hello' > %t.chk
+; RUN: echo 'CHECK: world' >> %t.chk
+
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefix=CHK
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -v 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=CHK,CHK-V
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -vv 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=CHK,CHK-V
+
+; CHK:        <<<<<<
+; CHK-NEXT:              1: hello
+; CHK-V-NEXT: check:1       ^~~~~
+; CHK-NEXT:              2: again
+; CHK-NEXT:   check:2'0     X~~~~
+; CHK-NEXT:              3: whirled
+; CHK-NEXT:   check:2'0     ~~~~~~~ error: no match found
+; CHK-NEXT:   check:2'1     ?       possible intended match
+; CHK-NEXT:   >>>>>>
+; CHK-NOT:    {{.}}
+
+;--------------------------------------------------
+; CHECK-COUNT-<num>
+;--------------------------------------------------
+
+; Good match and no match.
+
+; RUN: echo 'pete'   > %t.in
+; RUN: echo 'repete' >> %t.in
+; RUN: echo 'repeat' >> %t.in
+
+; RUN: echo 'CHECK-COUNT-3: pete' > %t.chk
+
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=CNT,CNT-Q
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -v 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=CNT,CNT-V
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -vv 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=CNT,CNT-V
+
+; CNT:          <<<<<<
+; CNT-NEXT:                1: pete
+; CNT-V-NEXT:   count:1'0     ^~~~
+; CNT-NEXT:                2: repete
+; CNT-V-NEXT:   count:1'1       ^~~~
+; CNT-NEXT:                3: repeat
+; CNT-Q-NEXT:   count:1       X~~~~~ error: no match found
+; CNT-V-NEXT:   count:1'2     X~~~~~ error: no match found
+; CNT-NEXT:     >>>>>>
+; CNT-NOT:      {{.}}
+
+;--------------------------------------------------
+; CHECK-NEXT (also: EOF search-range, wrong-line match)
+;--------------------------------------------------
+
+; Good match and no match.
+
+; RUN: echo 'hello' > %t.in
+; RUN: echo 'again' >> %t.in
+
+; RUN: echo 'CHECK: hello' > %t.chk
+; RUN: echo 'CHECK-NEXT: again' >> %t.chk
+; RUN: echo 'CHECK-NEXT: world' >> %t.chk
+
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefix=NXT
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -v 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=NXT,NXT-V
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -vv 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=NXT,NXT-V,NXT-VV
+
+; NXT:        <<<<<<
+; NXT-NEXT:            1: hello
+; NXT-V-NEXT: check:1     ^~~~~
+; NXT-NEXT:            2: again
+; NXT-V-NEXT: next:2      ^~~~~
+; NXT-NEXT:            3:
+; NXT-NEXT:   next:3      X error: no match found
+; NXT-NEXT:   >>>>>>
+; NXT-NOT:    {{.}}
+
+; Wrong-line match.
+
+; RUN: echo 'yonder' >> %t.in
+; RUN: echo 'world' >> %t.in
+
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefix=NXT2
+
+; NXT2:      <<<<<<
+; NXT2-NEXT:         1: hello
+; NXT2-NEXT:         2: again
+; NXT2-NEXT:         3: yonder
+; NXT2-NEXT:         4: world
+; NXT2-NEXT: next:3     !~~~~ error: match on wrong line
+; NXT2-NEXT: >>>>>>
+; NXT2-NOT:  {{.}}
+
+;--------------------------------------------------
+; CHECK-SAME (also: multiple annotations per line, single-char search range,
+; wrong-line match)
+;--------------------------------------------------
+
+; Good match and no match.
+
+; RUN: echo 'hello world!' > %t.in
+
+; RUN: echo 'CHECK: hello' > %t.chk
+; RUN: echo 'CHECK-SAME: world' >> %t.chk
+; RUN: echo 'CHECK-SAME: again' >> %t.chk
+
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefix=SAM
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -v 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=SAM,SAM-V
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -vv 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=SAM,SAM-V,SAM-VV
+
+; SAM:        <<<<<<
+; SAM-NEXT:            1: hello world!
+; SAM-V-NEXT: check:1     ^~~~~
+; SAM-V-NEXT: same:2            ^~~~~
+; SAM-NEXT:   same:3                 X error: no match found
+; SAM-NEXT:   >>>>>>
+; SAM-NOT:    {{.}}
+
+; Wrong-line match.
+
+; RUN: echo 'again' >> %t.in
+
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -v 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=SAM2
+
+; SAM2:      <<<<<<
+; SAM2-NEXT:          1: hello world!
+; SAM2-NEXT: check:1     ^~~~~
+; SAM2-NEXT: same:2            ^~~~~
+; SAM2-NEXT:          2: again
+; SAM2-NEXT: same:3      !~~~~ error: match on wrong line
+; SAM2-NEXT: >>>>>>
+; SAM2-NOT:  {{.}}
+
+;--------------------------------------------------
+; CHECK-EMPTY (also: search range ends at label, single-char match, wrong-line
+; match)
+;--------------------------------------------------
+
+; Good match and no match.
+;
+; CHECK-EMPTY always seems to match an empty line at EOF (illegally when it's
+; not the next line) unless either (1) the last line is non-empty and has no
+; newline or (2) there's a CHECK-LABEL to end the search range before EOF.  We
+; choose scenario 2 to check the case of no match.
+
+; RUN: echo 'hello' > %t.in
+; RUN: echo '' >> %t.in
+; RUN: echo 'world' >> %t.in
+; RUN: echo 'label' >> %t.in
+
+; RUN: echo 'CHECK: hello' > %t.chk
+; RUN: echo 'CHECK-EMPTY:' >> %t.chk
+; RUN: echo 'CHECK-EMPTY:' >> %t.chk
+; RUN: echo 'CHECK-LABEL: label' >> %t.chk
+
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefix=EMP
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -v 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=EMP,EMP-V
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -vv 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=EMP,EMP-V,EMP-VV
+
+; EMP:        <<<<<<
+; EMP-NEXT:            1: hello
+; EMP-V-NEXT: check:1     ^~~~~
+; EMP-NEXT:            2:
+; EMP-V-NEXT: empty:2     ^
+; EMP-NEXT:            3: world
+; EMP-NEXT:   empty:3     X~~~~
+; EMP-NEXT:            4: label
+; EMP-NEXT:   empty:3     ~~~~~ error: no match found
+; EMP-V-NEXT: label:4     ^~~~~
+; EMP-NEXT:   >>>>>>
+; EMP-NOT:    {{.}}
+
+; Wrong-line match.
+
+; RUN: echo 'hello' > %t.in
+; RUN: echo 'world' >> %t.in
+
+; RUN: echo 'CHECK: hello' > %t.chk
+; RUN: echo 'CHECK-EMPTY:' >> %t.chk
+
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefix=EMP2
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -v 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=EMP2,EMP2-V
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -vv 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=EMP2,EMP2-V,EMP2-VV
+
+; EMP2:        <<<<<<
+; EMP2-NEXT:            1: hello
+; EMP2-V-NEXT: check:1     ^~~~~
+; EMP2-NEXT:            2: world
+; EMP2-NEXT:            3:
+; EMP2-NEXT:   empty:2     !     error: match on wrong line
+; EMP2-NEXT:   >>>>>>
+; EMP2-NOT:    {{.}}
+
+;--------------------------------------------------
+; CHECK-NOT (also: EOF pattern, and multiline range that ends before EOL)
+;--------------------------------------------------
+
+; No match (success) and unexpected match (error).
+
+; RUN: echo 'hello' > %t.in
+; RUN: echo 'world' >> %t.in
+; RUN: echo 'again' >> %t.in
+
+; RUN: echo 'CHECK-NOT: goodbye' > %t.chk
+; RUN: echo 'CHECK-NOT: world' >> %t.chk
+
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefix=NOT
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -v 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=NOT,NOT-V
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -vv 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=NOT,NOT-V,NOT-VV
+
+; NOT:         <<<<<<
+; NOT-NEXT:           1: hello
+; NOT-VV-NEXT: not:1     X~~~~
+; NOT-NEXT:           2: world
+; NOT-VV-NEXT: not:1     ~~~~~
+; NOT-NEXT:    not:2     !~~~~ error: no match expected
+; NOT-NEXT:           3: again
+; NOT-VV-NEXT: not:1     ~~~~~
+; NOT-VV-NEXT:        4:
+; NOT-VV-NEXT: eof:2     ^
+; NOT-NEXT:    >>>>>>
+; NOT-NOT:     {{.}}
+
+; Again, but with a CHECK instead of EOF as search range end.
+
+; RUN: echo 'CHECK: ain' >> %t.chk
+
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefix=NOT2
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -v 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=NOT2,NOT2-V
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -vv 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=NOT2,NOT2-V,NOT2-VV
+
+; NOT2:         <<<<<<
+; NOT2-NEXT:             1: hello
+; NOT2-VV-NEXT: not:1       X~~~~
+; NOT2-NEXT:             2: world
+; NOT2-VV-NEXT: not:1       ~~~~~
+; NOT2-NEXT:    not:2       !~~~~ error: no match expected
+; NOT2-NEXT:             3: again
+; NOT2-VV-NEXT: not:1       ~~
+; NOT2-V-NEXT:  check:3       ^~~
+; NOT2-NEXT:    >>>>>>
+; NOT2-NOT:     {{.}}
+
+;--------------------------------------------------
+; CHECK-DAG (also: matches in different order than directives, discarded match)
+;--------------------------------------------------
+
+; Good match, discarded match plus good match, and no match.
+
+; RUN: echo 'abc' > %t.in
+; RUN: echo 'def' >> %t.in
+; RUN: echo 'abc' >> %t.in
+
+; RUN: echo 'CHECK-DAG: def' > %t.chk
+; RUN: echo 'CHECK-DAG: abc' >> %t.chk
+; RUN: echo 'CHECK-DAG: abc' >> %t.chk
+; RUN: echo 'CHECK-DAG: def' >> %t.chk
+
+; Prefixes used here:
+; DAG    = quiet, -v, or -vv
+; DAG-Q  = quiet
+; DAG-V  = -v or -vv (-vv implies -v)
+; DAG-VQ = -v and not -vv
+; DAG-VV = -vv
+
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=DAG,DAG-Q
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -v 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=DAG,DAG-V,DAG-VQ
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -vv 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=DAG,DAG-V,DAG-VV
+
+; DAG:         <<<<<<
+; DAG-NEXT:             1: abc
+; DAG-V-NEXT:  dag:2       ^~~
+; DAG-VV-NEXT: dag:3'0     !~~ discard: overlaps earlier match
+; DAG-NEXT:             2: def
+; DAG-V-NEXT:  dag:1       ^~~
+; DAG-VV-NEXT: dag:4'0     !~~ discard: overlaps earlier match
+; DAG-NEXT:             3: abc
+; DAG-VQ-NEXT: dag:3       ^~~
+; DAG-VV-NEXT: dag:3'1     ^~~
+; DAG-Q-NEXT:  dag:4       X~~ error: no match found
+; DAG-VQ-NEXT: dag:4       X~~ error: no match found
+; DAG-VV-NEXT: dag:4'1     X~~ error: no match found
+; DAG-NEXT:    >>>>>>
+; DAG-NOT:     {{.}}
+
+;--------------------------------------------------
+; CHECK-LABEL
+;
+; FIXME: Labels sometimes produce redundant diagnostics for good matches.
+; That bug is independent of but affects -dump-input.
+;--------------------------------------------------
+
+; Good match and no match.
+
+; RUN: echo 'lab0' > %t.in
+; RUN: echo 'foo' >> %t.in
+; RUN: echo 'lab1' >> %t.in
+; RUN: echo 'bar' >> %t.in
+
+; RUN: echo 'CHECK-LABEL: lab0' > %t.chk
+; RUN: echo 'CHECK: foo' >> %t.chk
+; RUN: echo 'CHECK-LABEL: lab2' >> %t.chk
+
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=LAB
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -v 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=LAB,LAB-V
+; RUN: not FileCheck -dump-input=always -input-file %t.in %t.chk -vv 2>&1 \
+; RUN: | FileCheck -match-full-lines %s -check-prefixes=LAB,LAB-V,LAB-VV
+
+; LAB:         <<<<<<
+; LAB-NEXT:               1: lab0
+; LAB-V-NEXT:  label:1'0     ^~~~
+; LAB-V-NEXT:  label:1'1     ^~~~
+; LAB-NEXT:               2: foo
+; LAB-NEXT:    label:3'0     X~~
+; LAB-NEXT:               3: lab1
+; LAB-NEXT:    label:3'0     ~~~~
+; LAB-NEXT:    label:3'1     ?    possible intended match
+; LAB-NEXT:               4: bar
+; LAB-NEXT:    label:3'0     ~~~ error: no match found
+; LAB-NEXT:    >>>>>>
+; LAB-NOT:     {{.}}
+
+
diff --git a/test/FileCheck/dump-input-enable.txt b/test/FileCheck/dump-input-enable.txt
new file mode 100644
index 0000000..71e3d5d
--- /dev/null
+++ b/test/FileCheck/dump-input-enable.txt
@@ -0,0 +1,128 @@
+; RUN: echo ciao > %t.good
+; RUN: echo world >> %t.good
+
+; RUN: echo hello > %t.err
+; RUN: echo world >> %t.err
+
+; RUN: echo 'CHECK: ciao' > %t.check
+; RUN: echo 'CHECK-NEXT: world' >> %t.check
+
+;--------------------------------------------------
+; unknown value
+;--------------------------------------------------
+
+; RUN: not FileCheck -input-file %t.good %t.check -check-prefix=CHECK \
+; RUN:               -match-full-lines -dump-input=foobar 2>&1 \
+; RUN: | FileCheck %s -match-full-lines -check-prefix=BADVAL
+
+; No positional arg.
+; RUN: not FileCheck -dump-input=foobar 2>&1 \
+; RUN: | FileCheck %s -match-full-lines -check-prefix=BADVAL
+
+BADVAL: FileCheck{{.*}}: for the -dump-input option: Cannot find option named 'foobar'!
+
+;--------------------------------------------------
+; help
+;--------------------------------------------------
+
+; Appended to normal command line.
+; RUN: FileCheck -input-file %t.err -color %t.check -dump-input=help \
+; RUN: | FileCheck %s -check-prefix=HELP
+
+; No positional arg.
+; RUN: FileCheck -dump-input=help | FileCheck %s -check-prefix=HELP
+
+HELP-NOT: {{.}}
+HELP: The following description was requested by -dump-input=help
+HELP: try{{.*}}-color
+HELP-NOT: {{.}}
+
+;--------------------------------------------------
+; never
+;--------------------------------------------------
+
+; RUN: FileCheck -input-file %t.good %t.check -check-prefix=CHECK \
+; RUN:           -match-full-lines -dump-input=never 2>&1 \
+; RUN: | FileCheck %s -match-full-lines -check-prefix=CHECK-NODUMP -allow-empty
+
+; RUN: not FileCheck -input-file %t.err %t.check -check-prefix=CHECK \
+; RUN:               -match-full-lines -dump-input=never 2>&1 \
+; RUN: | FileCheck %s -match-full-lines -check-prefix=CHECK-NODUMP
+
+;--------------------------------------------------
+; default: never
+;--------------------------------------------------
+
+; RUN: FileCheck -input-file %t.good %t.check -check-prefix=CHECK \
+; RUN:           -match-full-lines 2>&1 \
+; RUN: | FileCheck %s -match-full-lines -check-prefix=CHECK-NODUMP -allow-empty
+
+; RUN: not FileCheck -input-file %t.err %t.check -check-prefix=CHECK \
+; RUN:               -match-full-lines 2>&1 \
+; RUN: | FileCheck %s -match-full-lines -check-prefix=CHECK-NODUMP
+
+;--------------------------------------------------
+; fail
+;--------------------------------------------------
+
+; RUN: FileCheck -input-file %t.good %t.check -check-prefix=CHECK \
+; RUN:           -match-full-lines -dump-input=fail 2>&1 \
+; RUN: | FileCheck %s -match-full-lines -check-prefix=CHECK-NODUMP -allow-empty
+
+; RUN: not FileCheck -input-file %t.err %t.check -check-prefix=CHECK \
+; RUN:               -match-full-lines -dump-input=fail 2>&1 \
+; RUN: | FileCheck %s -match-full-lines -check-prefix=CHECK-ERR
+
+;--------------------------------------------------
+; -dump-input-on-failure
+;--------------------------------------------------
+
+; RUN: FileCheck -input-file %t.good %t.check -check-prefix=CHECK \
+; RUN:           -match-full-lines -dump-input-on-failure 2>&1 \
+; RUN: | FileCheck %s -match-full-lines -check-prefix=CHECK-NODUMP -allow-empty
+
+; RUN: not FileCheck -input-file %t.err %t.check -check-prefix=CHECK \
+; RUN:               -match-full-lines -dump-input-on-failure 2>&1 \
+; RUN: | FileCheck %s -match-full-lines -check-prefix=CHECK-ERR
+
+; RUN: env FILECHECK_DUMP_INPUT_ON_FAILURE=1 \
+; RUN: FileCheck -input-file %t.good %t.check -check-prefix=CHECK \
+; RUN:           -match-full-lines 2>&1 \
+; RUN: | FileCheck %s -match-full-lines -check-prefix=CHECK-NODUMP -allow-empty
+
+; RUN: env FILECHECK_DUMP_INPUT_ON_FAILURE=1 \
+; RUN: not FileCheck -input-file %t.err %t.check -check-prefix=CHECK \
+; RUN:               -match-full-lines 2>&1 \
+; RUN: | FileCheck %s -match-full-lines -check-prefix=CHECK-ERR
+
+;--------------------------------------------------
+; always
+;--------------------------------------------------
+
+; RUN: FileCheck -input-file %t.good %t.check -check-prefix=CHECK \
+; RUN:           -match-full-lines -dump-input=always -v 2>&1 \
+; RUN: | FileCheck %s -match-full-lines -check-prefix=CHECK-GOOD
+
+; RUN: not FileCheck -input-file %t.err %t.check -check-prefix=CHECK \
+; RUN:               -match-full-lines -dump-input=always 2>&1 \
+; RUN: | FileCheck %s -match-full-lines -check-prefix=CHECK-ERR
+
+; END.
+
+; CHECK-GOOD: Full input was:
+; CHECK-GOOD-NEXT: <<<<<<
+; CHECK-GOOD-NEXT:          1: ciao
+; CHECK-GOOD-NEXT: check:1     ^~~~
+; CHECK-GOOD-NEXT:          2: world
+; CHECK-GOOD-NEXT: next:2      ^~~~~
+; CHECK-GOOD-NEXT: >>>>>>
+
+; CHECK-ERR: Full input was:
+; CHECK-ERR-NEXT: <<<<<<
+; CHECK-ERR-NEXT:          1: hello
+; CHECK-ERR-NEXT: check:1     X~~~~
+; CHECK-ERR-NEXT:          2: world
+; CHECK-ERR-NEXT: check:1     ~~~~~ error: no match found
+; CHECK-ERR-NEXT: >>>>>>
+
+; CHECK-NODUMP-NOT: <<<<<<
diff --git a/test/FileCheck/lit.local.cfg b/test/FileCheck/lit.local.cfg
new file mode 100644
index 0000000..307da45
--- /dev/null
+++ b/test/FileCheck/lit.local.cfg
@@ -0,0 +1,13 @@
+# Unset environment variables that the FileCheck tests
+# expect not to be set.
+file_check_expected_unset_vars = [
+  'FILECHECK_DUMP_INPUT_ON_FAILURE',
+  'FILECHECK_OPTS',
+]
+
+for env_var in file_check_expected_unset_vars:
+  if env_var in config.environment:
+    lit_config.note('Removing {} from environment for FileCheck tests'.format(
+      env_var)
+    )
+    config.environment.pop(env_var)
diff --git a/test/FileCheck/no-check-file.txt b/test/FileCheck/no-check-file.txt
new file mode 100644
index 0000000..e9325d6
--- /dev/null
+++ b/test/FileCheck/no-check-file.txt
@@ -0,0 +1,3 @@
+; RUN: not FileCheck 2>&1 | FileCheck %s
+
+CHECK: <check-file> not specified
diff --git a/test/FileCheck/verbose_mode.txt b/test/FileCheck/verbose_mode.txt
deleted file mode 100644
index 2146d6a..0000000
--- a/test/FileCheck/verbose_mode.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: not FileCheck -input-file %s %s --check-prefix=CHECK1 --match-full-lines --dump-input-on-failure 2>&1 | FileCheck %s --check-prefix=CHECKERROR --match-full-lines
-; RUN: env FILECHECK_DUMP_INPUT_ON_FAILURE=1 not FileCheck -input-file %s %s --check-prefix=CHECK1 --match-full-lines 2>&1 |  FileCheck %s --check-prefix=CHECKERROR --match-full-lines
-; RUN: env FILECHECK_DUMP_INPUT_ON_FAILURE=1 not FileCheck -input-file %s %s --check-prefix=CHECK1 --match-full-lines --dump-input-on-failure=0 2>&1 |  FileCheck %s --check-prefix=CHECKERRORNOVERBOSE --match-full-lines
-
-hello
-world
-
-; CHECK1: ciao
-; CHECK1-NEXT: world
-
-; CHECKERROR: Full input was:
-; CHECKERROR-NEXT: <<<<<<
-; CHECKERROR: hello
-; CHECKERROR: world
-; CHECKERROR: >>>>>>
-
-; CHECKERRORNOVERBOSE-NOT: <<<<<<
diff --git a/test/Instrumentation/AddressSanitizer/global_cstring_darwin.ll b/test/Instrumentation/AddressSanitizer/global_cstring_darwin.ll
index eb31c15..0fc3205 100644
--- a/test/Instrumentation/AddressSanitizer/global_cstring_darwin.ll
+++ b/test/Instrumentation/AddressSanitizer/global_cstring_darwin.ll
@@ -8,14 +8,14 @@
 @.str.1 = private unnamed_addr constant [13 x i8] c"Hello world.\00", align 1
 @.str.2 = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
 
-; CHECK: @.str.1 = internal unnamed_addr constant { [13 x i8], [51 x i8] } { [13 x i8] c"Hello world.\00", [51 x i8] zeroinitializer }, section "__TEXT,__asan_cstring,regular", align 32
-; CHECK: @.str.2 = internal unnamed_addr constant { [4 x i8], [60 x i8] } { [4 x i8] c"%s\0A\00", [60 x i8] zeroinitializer }, section "__TEXT,__asan_cstring,regular", align 32
+; CHECK: @.str.1 = internal constant { [13 x i8], [51 x i8] } { [13 x i8] c"Hello world.\00", [51 x i8] zeroinitializer }, section "__TEXT,__asan_cstring,regular", align 32
+; CHECK: @.str.2 = internal constant { [4 x i8], [60 x i8] } { [4 x i8] c"%s\0A\00", [60 x i8] zeroinitializer }, section "__TEXT,__asan_cstring,regular", align 32
 
 ; Shouldn't be put into special section:
 @.str.3 = private unnamed_addr constant [4 x i8] c"\00\01\02\03", align 1
 @.str.4 = private unnamed_addr global [7 x i8] c"Hello.\00", align 1
 @.str.5 = private unnamed_addr constant [8 x i8] c"Hello.\00\00", align 1
 
-; CHECK: @.str.3 = internal unnamed_addr constant { [4 x i8], [60 x i8] } { [4 x i8] c"\00\01\02\03", [60 x i8] zeroinitializer }, align 32
-; CHECK: @.str.4 = private unnamed_addr global { [7 x i8], [57 x i8] } { [7 x i8] c"Hello.\00", [57 x i8] zeroinitializer }, align 32
-; CHECK: @.str.5 = internal unnamed_addr constant { [8 x i8], [56 x i8] } { [8 x i8] c"Hello.\00\00", [56 x i8] zeroinitializer }, align 32
+; CHECK: @.str.3 = internal constant { [4 x i8], [60 x i8] } { [4 x i8] c"\00\01\02\03", [60 x i8] zeroinitializer }, align 32
+; CHECK: @.str.4 = private global { [7 x i8], [57 x i8] } { [7 x i8] c"Hello.\00", [57 x i8] zeroinitializer }, align 32
+; CHECK: @.str.5 = internal constant { [8 x i8], [56 x i8] } { [8 x i8] c"Hello.\00\00", [56 x i8] zeroinitializer }, align 32
diff --git a/test/Instrumentation/AddressSanitizer/global_metadata.ll b/test/Instrumentation/AddressSanitizer/global_metadata.ll
index ee42f1c..ecee772 100644
--- a/test/Instrumentation/AddressSanitizer/global_metadata.ll
+++ b/test/Instrumentation/AddressSanitizer/global_metadata.ll
@@ -15,7 +15,7 @@
 ; Check that globals were instrumented:
 
 ; CHECK: @global = global { i32, [60 x i8] } zeroinitializer, comdat, align 32
-; CHECK: @.str = internal unnamed_addr constant { [14 x i8], [50 x i8] } { [14 x i8] c"Hello, world!\00", [50 x i8] zeroinitializer }, comdat($".str${{[01-9a-f]+}}"), align 32
+; CHECK: @.str = internal constant { [14 x i8], [50 x i8] } { [14 x i8] c"Hello, world!\00", [50 x i8] zeroinitializer }, comdat($".str${{[01-9a-f]+}}"), align 32
 
 ; Check emitted location descriptions:
 ; CHECK: [[VARNAME:@___asan_gen_.[0-9]+]] = private unnamed_addr constant [7 x i8] c"global\00", align 1
diff --git a/test/Instrumentation/AddressSanitizer/global_metadata_array.ll b/test/Instrumentation/AddressSanitizer/global_metadata_array.ll
index 76f7377..fbe0016 100644
--- a/test/Instrumentation/AddressSanitizer/global_metadata_array.ll
+++ b/test/Instrumentation/AddressSanitizer/global_metadata_array.ll
@@ -15,7 +15,7 @@
 
 ; Check that globals were instrumented:
 ; CHECK: @global = global { i32, [60 x i8] } zeroinitializer, align 32
-; CHECK: @.str = internal unnamed_addr constant { [14 x i8], [50 x i8] } { [14 x i8] c"Hello, world!\00", [50 x i8] zeroinitializer }{{.*}}, align 32
+; CHECK: @.str = internal constant { [14 x i8], [50 x i8] } { [14 x i8] c"Hello, world!\00", [50 x i8] zeroinitializer }{{.*}}, align 32
 
 ; Check emitted location descriptions:
 ; CHECK: [[VARNAME:@___asan_gen_.[0-9]+]] = private unnamed_addr constant [7 x i8] c"global\00", align 1
diff --git a/test/Instrumentation/AddressSanitizer/global_metadata_bitcasts.ll b/test/Instrumentation/AddressSanitizer/global_metadata_bitcasts.ll
new file mode 100644
index 0000000..324a04e
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/global_metadata_bitcasts.ll
@@ -0,0 +1,13 @@
+; Test that the compiler doesn't crash when the llvm.asan.globals containts
+; an entry that points to a BitCast instruction.
+
+; RUN: opt < %s -asan -asan-module -asan-globals-live-support=1 -S
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.11.0"
+
+@g = global [1 x i32] zeroinitializer, align 4
+
+!llvm.asan.globals = !{!0, !1}
+!0 = !{[1 x i32]* @g, null, !"name", i1 false, i1 false}
+!1 = !{i8* bitcast ([1 x i32]* @g to i8*), null, !"name", i1 false, i1 false}
diff --git a/test/Instrumentation/AddressSanitizer/global_metadata_windows.ll b/test/Instrumentation/AddressSanitizer/global_metadata_windows.ll
index eeac65e..5602cb2 100644
--- a/test/Instrumentation/AddressSanitizer/global_metadata_windows.ll
+++ b/test/Instrumentation/AddressSanitizer/global_metadata_windows.ll
@@ -14,8 +14,8 @@
 ; CHECK: $dead_global = comdat noduplicates
 ; CHECK: $private_str = comdat noduplicates
 
-; CHECK: @dead_global = local_unnamed_addr global { i32, [60 x i8] } { i32 42, [60 x i8] zeroinitializer }, comdat, align 32
-; CHECK: @private_str = internal unnamed_addr constant { [8 x i8], [56 x i8] } { [8 x i8] c"private\00", [56 x i8] zeroinitializer }, comdat, align 32
+; CHECK: @dead_global = global { i32, [60 x i8] } { i32 42, [60 x i8] zeroinitializer }, comdat, align 32
+; CHECK: @private_str = internal constant { [8 x i8], [56 x i8] } { [8 x i8] c"private\00", [56 x i8] zeroinitializer }, comdat, align 32
 
 ; CHECK: @__asan_global_dead_global = private global { {{.*}} }, section ".ASAN$GL", comdat($dead_global), align 64
 ; CHECK: @__asan_global_private_str = private global { {{.*}} }, section ".ASAN$GL", comdat($private_str), align 64
diff --git a/test/Instrumentation/AddressSanitizer/localescape.ll b/test/Instrumentation/AddressSanitizer/localescape.ll
index d9daa8c..015b0e8 100644
--- a/test/Instrumentation/AddressSanitizer/localescape.ll
+++ b/test/Instrumentation/AddressSanitizer/localescape.ll
@@ -6,7 +6,7 @@
 
 declare i32 @llvm.eh.typeid.for(i8*) #2
 declare i8* @llvm.frameaddress(i32)
-declare i8* @llvm.x86.seh.recoverfp(i8*, i8*)
+declare i8* @llvm.eh.recoverfp(i8*, i8*)
 declare i8* @llvm.localrecover(i8*, i8*, i32)
 declare void @llvm.localescape(...) #1
 
@@ -56,7 +56,7 @@
 define internal i32 @"\01?filt$0@0@main@@"() #1 {
 entry:
   %0 = tail call i8* @llvm.frameaddress(i32 1)
-  %1 = tail call i8* @llvm.x86.seh.recoverfp(i8* bitcast (i32 ()* @main to i8*), i8* %0)
+  %1 = tail call i8* @llvm.eh.recoverfp(i8* bitcast (i32 ()* @main to i8*), i8* %0)
   %2 = tail call i8* @llvm.localrecover(i8* bitcast (i32 ()* @main to i8*), i8* %1, i32 0)
   %__exception_code = bitcast i8* %2 to i32*
   %3 = getelementptr inbounds i8, i8* %0, i32 -20
diff --git a/test/Instrumentation/AddressSanitizer/odr-check-ignore.ll b/test/Instrumentation/AddressSanitizer/odr-check-ignore.ll
index 3a9e614..cf48d19 100644
--- a/test/Instrumentation/AddressSanitizer/odr-check-ignore.ll
+++ b/test/Instrumentation/AddressSanitizer/odr-check-ignore.ll
@@ -14,4 +14,4 @@
 
 ; CHECK: @__asan_global_c = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [56 x i8] }* @c to i64), i64 8, i64 64, i64 ptrtoint ([2 x i8]* @___asan_gen_.3 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
 
-; CHECK: @__asan_global_d = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [56 x i8] }* @d to i64), i64 8, i64 64, i64 ptrtoint ([2 x i8]* @___asan_gen_.4 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
+; CHECK: @__asan_global_d = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [56 x i8] }* @d to i64), i64 8, i64 64, i64 ptrtoint ([2 x i8]* @___asan_gen_.4 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
diff --git a/test/Instrumentation/AddressSanitizer/win-string-literal.ll b/test/Instrumentation/AddressSanitizer/win-string-literal.ll
index a86581c..4d5126b 100644
--- a/test/Instrumentation/AddressSanitizer/win-string-literal.ll
+++ b/test/Instrumentation/AddressSanitizer/win-string-literal.ll
@@ -7,7 +7,7 @@
 ; CHECK: $"??_C@_04JIHMPGLA@asdf?$AA@" = comdat any
 
 ; CHECK: @"??_C@_04JIHMPGLA@asdf?$AA@" =
-; CHECK-SAME: linkonce_odr dso_local unnamed_addr constant { [5 x i8], [59 x i8] }
+; CHECK-SAME: linkonce_odr dso_local constant { [5 x i8], [59 x i8] }
 ; CHECK-SAME: { [5 x i8] c"asdf\00", [59 x i8] zeroinitializer }, comdat, align 32
 
 ; CHECK: @"__asan_global_??_C@_04JIHMPGLA@asdf?$AA@" =
@@ -15,7 +15,7 @@
 ; CHECK-SAME: { i64 ptrtoint ({ [5 x i8], [59 x i8] }* @"??_C@_04JIHMPGLA@asdf?$AA@" to i64),
 ; CHECK-SAME:   i64 5, i64 64, i64 ptrtoint ([17 x i8]* @___asan_gen_.1 to i64),
 ; CHECK-SAME:   i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0,
-; CHECK-SAME:   i64 ptrtoint ({ [6 x i8]*, i32, i32 }* @___asan_gen_.3 to i64), i64 -1 },
+; CHECK-SAME:   i64 ptrtoint ({ [6 x i8]*, i32, i32 }* @___asan_gen_.3 to i64), i64 0 },
 ; CHECK-SAME:   section ".ASAN$GL", comdat($"??_C@_04JIHMPGLA@asdf?$AA@"), align 64
 
 ; ModuleID = 't.cpp'
diff --git a/test/Instrumentation/HWAddressSanitizer/basic.ll b/test/Instrumentation/HWAddressSanitizer/basic.ll
index 8253016..e02e5fc 100644
--- a/test/Instrumentation/HWAddressSanitizer/basic.ll
+++ b/test/Instrumentation/HWAddressSanitizer/basic.ll
@@ -5,6 +5,9 @@
 ; RUN: opt < %s -hwasan -hwasan-recover=0 -hwasan-mapping-offset=0 -S | FileCheck %s --check-prefixes=CHECK,ABORT,ZERO-BASED-SHADOW
 ; RUN: opt < %s -hwasan -hwasan-recover=1 -hwasan-mapping-offset=0 -S | FileCheck %s --check-prefixes=CHECK,RECOVER,ZERO-BASED-SHADOW
 
+; CHECK: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 0, void ()* @hwasan.module_ctor, i8* bitcast (void ()* @hwasan.module_ctor to i8*) }]
+; CHECK: @__hwasan = private constant [0 x i8] zeroinitializer, section "__hwasan_frames", comdat($hwasan.module_ctor)
+
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--linux-android"
 
@@ -352,7 +355,7 @@
 
 ; CHECK: declare void @__hwasan_init()
 
-; CHECK:      define internal void @hwasan.module_ctor() {
+; CHECK:      define internal void @hwasan.module_ctor() comdat {
 ; CHECK-NEXT:   call void @__hwasan_init()
 ; CHECK-NEXT:   call void @__hwasan_init_frames(
 ; CHECK-NEXT:   ret void
diff --git a/test/Instrumentation/HWAddressSanitizer/lazy-thread-init.ll b/test/Instrumentation/HWAddressSanitizer/lazy-thread-init.ll
new file mode 100644
index 0000000..dab62d6
--- /dev/null
+++ b/test/Instrumentation/HWAddressSanitizer/lazy-thread-init.ll
@@ -0,0 +1,25 @@
+; RUN: opt -S -hwasan < %s | FileCheck %s
+
+target triple = "x86_64-unknown-linux-gnu"
+
+declare void @bar([16 x i32]* %p)
+
+define void @foo() sanitize_hwaddress "hwasan-abi"="interceptor" {
+  ; CHECK: [[LOAD:%[^ ]*]] = load i64, i64* @__hwasan_tls
+  ; CHECK: [[ICMP:%[^ ]*]] = icmp eq i64 [[LOAD]], 0
+  ; CHECK: br i1 [[ICMP]], label %[[INIT:[^,]*]], label %[[CONT:[^,]*]], !prof [[PROF:![0-9]+]]
+
+  ; CHECK: [[INIT]]:
+  ; CHECK: call void @__hwasan_thread_enter()
+  ; CHECK: [[RELOAD:%[^ ]*]] = load i64, i64* @__hwasan_tls
+  ; CHECK: br label %[[CONT]]
+
+  ; CHECK: [[CONT]]:
+  ; CHECK: phi i64 [ [[LOAD]], %0 ], [ [[RELOAD]], %[[INIT]] ]
+
+  %p = alloca [16 x i32]
+  call void @bar([16 x i32]* %p)
+  ret void
+}
+
+; CHECK: [[PROF]] = !{!"branch_weights", i32 1, i32 100000}
diff --git a/test/Instrumentation/HWAddressSanitizer/mem-intrinsics.ll b/test/Instrumentation/HWAddressSanitizer/mem-intrinsics.ll
new file mode 100644
index 0000000..b6a4a30
--- /dev/null
+++ b/test/Instrumentation/HWAddressSanitizer/mem-intrinsics.ll
@@ -0,0 +1,40 @@
+; RUN: opt -S -hwasan %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind optnone uwtable
+define dso_local i32 @main() sanitize_hwaddress {
+entry:
+  %retval = alloca i32, align 4
+  %Q = alloca [10 x i8], align 1
+  %P = alloca [10 x i8], align 1
+  store i32 0, i32* %retval, align 4
+  %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %Q, i32 0, i32 0
+
+  call void @llvm.memset.p0i8.i64(i8* align 1 %arraydecay, i8 0, i64 10, i1 false)
+; CHECK: call i8* @__hwasan_memset
+
+  %arraydecay1 = getelementptr inbounds [10 x i8], [10 x i8]* %Q, i32 0, i32 0
+  %arraydecay2 = getelementptr inbounds [10 x i8], [10 x i8]* %Q, i32 0, i32 0
+  %add.ptr = getelementptr inbounds i8, i8* %arraydecay2, i64 5
+
+  call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %arraydecay1, i8* align 1 %add.ptr, i64 5, i1 false)
+; CHECK: call i8* @__hwasan_memmove
+
+  %arraydecay3 = getelementptr inbounds [10 x i8], [10 x i8]* %P, i32 0, i32 0
+  %arraydecay4 = getelementptr inbounds [10 x i8], [10 x i8]* %Q, i32 0, i32 0
+
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %arraydecay3, i8* align 1 %arraydecay4, i64 10, i1 false)
+; CHECK: call i8* @__hwasan_memcpy
+  ret i32 0
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
diff --git a/test/Instrumentation/HWAddressSanitizer/prologue.ll b/test/Instrumentation/HWAddressSanitizer/prologue.ll
index 6b02b986..d8e6d58 100644
--- a/test/Instrumentation/HWAddressSanitizer/prologue.ll
+++ b/test/Instrumentation/HWAddressSanitizer/prologue.ll
@@ -29,7 +29,7 @@
 ; CHECK-GLOBAL: load i64, i64* @__hwasan_shadow_memory_dynamic_address
 
 ; CHECK-TLS:   %[[A:[^ ]*]] = call i8* @llvm.thread.pointer()
-; CHECK-TLS:   %[[B:[^ ]*]] = getelementptr i8, i8* %[[A]], i32 64
+; CHECK-TLS:   %[[B:[^ ]*]] = getelementptr i8, i8* %[[A]], i32 48
 ; CHECK-TLS:   %[[C:[^ ]*]] = bitcast i8* %[[B]] to i64*
 ; CHECK-TLS:   %[[D:[^ ]*]] = load i64, i64* %[[C]]
 ; CHECK-TLS:   %[[E:[^ ]*]] = or i64 %[[D]], 4294967295
@@ -60,7 +60,7 @@
 ; CHECK-GLOBAL: load i64, i64* @__hwasan_shadow_memory_dynamic_address
 
 ; CHECK-TLS:   %[[A:[^ ]*]] = call i8* @llvm.thread.pointer()
-; CHECK-TLS:   %[[B:[^ ]*]] = getelementptr i8, i8* %[[A]], i32 64
+; CHECK-TLS:   %[[B:[^ ]*]] = getelementptr i8, i8* %[[A]], i32 48
 ; CHECK-TLS:   %[[C:[^ ]*]] = bitcast i8* %[[B]] to i64*
 ; CHECK-TLS:   %[[D:[^ ]*]] = load i64, i64* %[[C]]
 
diff --git a/test/Instrumentation/HWAddressSanitizer/with-calls.ll b/test/Instrumentation/HWAddressSanitizer/with-calls.ll
index 8d6068c..9f5bc66 100644
--- a/test/Instrumentation/HWAddressSanitizer/with-calls.ll
+++ b/test/Instrumentation/HWAddressSanitizer/with-calls.ll
@@ -197,7 +197,7 @@
 
 ; CHECK: declare void @__hwasan_init()
 
-; CHECK:      define internal void @hwasan.module_ctor() {
+; CHECK:      define internal void @hwasan.module_ctor() comdat {
 ; CHECK-NEXT:   call void @__hwasan_init()
 ; CHECK-NEXT:   call void @__hwasan_init_frames(
 ; CHECK-NEXT:   ret void
diff --git a/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll b/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
index 08387f8..2f401b0 100644
--- a/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
+++ b/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
@@ -1,3 +1,4 @@
+; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -S | FileCheck %s
 
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll b/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
index 1463070..03d1fda 100644
--- a/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
+++ b/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
@@ -1,3 +1,4 @@
+; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -S | FileCheck %s
 
 target datalayout = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll b/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
index 9edcb36..98b1d82 100644
--- a/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
+++ b/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
@@ -1,3 +1,4 @@
+; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -S | FileCheck %s
 
 target datalayout = "e-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll b/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
index 5a26869..9e74752 100644
--- a/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
+++ b/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
@@ -1,3 +1,4 @@
+; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -S | FileCheck %s
 
 target datalayout = "E-m:e-i64:64-n32:64"
diff --git a/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll b/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
index c139d62..9d6c436 100644
--- a/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
+++ b/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
@@ -1,3 +1,4 @@
+; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -S | FileCheck %s
 
 target datalayout = "e-m:e-i64:64-n32:64"
diff --git a/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll b/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll
index 800bd15..cd113a0 100644
--- a/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll
+++ b/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -msan-check-access-address=0 -S 2>&1 -passes=msan | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S 2>&1 | FileCheck %s
 
 ; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
diff --git a/test/Instrumentation/MemorySanitizer/X86/vararg.ll b/test/Instrumentation/MemorySanitizer/X86/vararg.ll
index 518c3db..f384d8f 100644
--- a/test/Instrumentation/MemorySanitizer/X86/vararg.ll
+++ b/test/Instrumentation/MemorySanitizer/X86/vararg.ll
@@ -1,3 +1,4 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S
 ; Test that code using va_start can be compiled on i386.
 
diff --git a/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll b/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
index 3d036d7..2836dd2 100644
--- a/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
+++ b/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
@@ -1,5 +1,11 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck %s "--check-prefixes=CHECK,CHECK-ORIGIN"
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s --check-prefixes=CHECK,CHECK-ORIGIN
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=2 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck %s "--check-prefixes=CHECK,CHECK-ORIGIN"
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck %s --check-prefixes=CHECK,CHECK-ORIGIN
 
 ; Test that shadow and origin are stored for variadic function params.
diff --git a/test/Instrumentation/MemorySanitizer/alloca.ll b/test/Instrumentation/MemorySanitizer/alloca.ll
index d1d3bc5..1d28db8 100644
--- a/test/Instrumentation/MemorySanitizer/alloca.ll
+++ b/test/Instrumentation/MemorySanitizer/alloca.ll
@@ -1,7 +1,17 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s "--check-prefixes=CHECK,INLINE"
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s --check-prefixes=CHECK,INLINE
+; RUN: opt < %s -msan-check-access-address=0 -msan-poison-stack-with-call=1 -S \
+; RUN: -passes=msan 2>&1 | FileCheck %s "--check-prefixes=CHECK,CALL"
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-poison-stack-with-call=1 -S | FileCheck %s --check-prefixes=CHECK,CALL
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck %s "--check-prefixes=CHECK,ORIGIN"
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s --check-prefixes=CHECK,ORIGIN
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=2 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck %s "--check-prefixes=CHECK,ORIGIN"
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck %s --check-prefixes=CHECK,ORIGIN
+; RUN: opt < %s -msan-kernel=1 -S -passes=msan 2>&1 | FileCheck %s             \
+; RUN: "--check-prefixes=CHECK,KMSAN"
 ; RUN: opt < %s -msan -msan-kernel=1 -S | FileCheck %s --check-prefixes=CHECK,KMSAN
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/array_types.ll b/test/Instrumentation/MemorySanitizer/array_types.ll
index e96716a..d322354 100644
--- a/test/Instrumentation/MemorySanitizer/array_types.ll
+++ b/test/Instrumentation/MemorySanitizer/array_types.ll
@@ -1,4 +1,9 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck -check-prefix=CHECK                       \
+; RUN: -check-prefix=CHECK-ORIGINS %s --allow-empty
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/atomics.ll b/test/Instrumentation/MemorySanitizer/atomics.ll
index 8033ed1..56f12a1 100644
--- a/test/Instrumentation/MemorySanitizer/atomics.ll
+++ b/test/Instrumentation/MemorySanitizer/atomics.ll
@@ -1,5 +1,11 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=2 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/byval-alignment.ll b/test/Instrumentation/MemorySanitizer/byval-alignment.ll
index df82a92..0d5da36 100644
--- a/test/Instrumentation/MemorySanitizer/byval-alignment.ll
+++ b/test/Instrumentation/MemorySanitizer/byval-alignment.ll
@@ -1,5 +1,7 @@
 ; Test that copy alignment for byval arguments is limited by param-tls slot alignment.
 
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/check-constant-shadow.ll b/test/Instrumentation/MemorySanitizer/check-constant-shadow.ll
index f147944..1011075 100644
--- a/test/Instrumentation/MemorySanitizer/check-constant-shadow.ll
+++ b/test/Instrumentation/MemorySanitizer/check-constant-shadow.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -msan-check-access-address=0 -msan-check-constant-shadow=1     \
+; RUN: -msan-track-origins=1 -S -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-check-constant-shadow=1 -msan-track-origins=1 -S | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/check_access_address.ll b/test/Instrumentation/MemorySanitizer/check_access_address.ll
index 21bb412..8c245fe 100644
--- a/test/Instrumentation/MemorySanitizer/check_access_address.ll
+++ b/test/Instrumentation/MemorySanitizer/check_access_address.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -msan-check-access-address=1 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=1 -S | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/csr.ll b/test/Instrumentation/MemorySanitizer/csr.ll
index a7664d4..f91888a 100644
--- a/test/Instrumentation/MemorySanitizer/csr.ll
+++ b/test/Instrumentation/MemorySanitizer/csr.ll
@@ -1,4 +1,8 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+; RUN: opt < %s -msan-check-access-address=1 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s --check-prefix=ADDR
 ; RUN: opt < %s -msan -msan-check-access-address=1 -S | FileCheck %s --check-prefix=ADDR
 ; REQUIRES: x86-registered-target
 
diff --git a/test/Instrumentation/MemorySanitizer/global_ctors_2to3.ll b/test/Instrumentation/MemorySanitizer/global_ctors_2to3.ll
deleted file mode 100644
index 8e0ab26..0000000
--- a/test/Instrumentation/MemorySanitizer/global_ctors_2to3.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; MSan converts 2-element global_ctors to 3-element when adding the new entry.
-; RUN: opt < %s -msan -msan-with-comdat -S | FileCheck %s
-
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-; CHECK: $msan.module_ctor = comdat any
-; CHECK: @llvm.global_ctors = appending global [2 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @f, i8* null }, { i32, void ()*, i8* } { i32 0, void ()* @msan.module_ctor, i8* bitcast (void ()* @msan.module_ctor to i8*) }]
-
-@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @f }]
-
-define internal void @f() {
-entry:
-  ret void
-}
-
-; CHECK: define internal void @msan.module_ctor() comdat {
diff --git a/test/Instrumentation/MemorySanitizer/instrumentation-with-call-threshold.ll b/test/Instrumentation/MemorySanitizer/instrumentation-with-call-threshold.ll
index 855125a..424a5e0 100644
--- a/test/Instrumentation/MemorySanitizer/instrumentation-with-call-threshold.ll
+++ b/test/Instrumentation/MemorySanitizer/instrumentation-with-call-threshold.ll
@@ -2,8 +2,19 @@
 ; Test that in with-calls mode there are no calls to __msan_chain_origin - they
 ; are done from __msan_maybe_store_origin_*.
 
+; RUN: opt < %s -msan-check-access-address=0                                   \
+; RUN: -msan-instrumentation-with-call-threshold=0 -S -passes=msan 2>&1 |      \
+; RUN: FileCheck %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-instrumentation-with-call-threshold=0 -S | FileCheck %s
+; RUN: opt < %s -msan-check-access-address=0                                   \
+; RUN: -msan-instrumentation-with-call-threshold=0 -msan-track-origins=1 -S    \
+; RUN: -passes=msan 2>&1 | FileCheck -check-prefix=CHECK                       \
+; RUN: -check-prefix=CHECK-ORIGINS %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-instrumentation-with-call-threshold=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
+; RUN: opt < %s -msan-check-access-address=0                                   \
+; RUN: -msan-instrumentation-with-call-threshold=0 -msan-track-origins=2 -S    \
+; RUN: -passes=msan 2>&1 | FileCheck -check-prefix=CHECK                       \
+; RUN: -check-prefix=CHECK-ORIGINS %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-instrumentation-with-call-threshold=0 -msan-track-origins=2 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/manual-shadow.ll b/test/Instrumentation/MemorySanitizer/manual-shadow.ll
index f4664cb..ab19f3d 100644
--- a/test/Instrumentation/MemorySanitizer/manual-shadow.ll
+++ b/test/Instrumentation/MemorySanitizer/manual-shadow.ll
@@ -1,9 +1,20 @@
 ; Test that the msan layout customization options work as expected
 ;
+; RUN: opt < %s -msan-shadow-base 3735928559 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: --check-prefix=CHECK-BASE %s
 ; RUN: opt < %s -msan -msan-shadow-base 3735928559 -S | FileCheck --check-prefix=CHECK-BASE %s
+; RUN: opt < %s -msan-shadow-base 3735928559 -msan-and-mask 4294901760 -S      \
+; RUN: -passes=msan 2>&1 | FileCheck --check-prefix=CHECK-AND %s
 ; RUN: opt < %s -msan -msan-shadow-base 3735928559 -msan-and-mask 4294901760 -S | FileCheck --check-prefix=CHECK-AND %s
+; RUN: opt < %s -msan-shadow-base 3735928559 -msan-xor-mask 48879 -S           \
+; RUN: -passes=msan 2>&1 | FileCheck --check-prefix=CHECK-XOR %s
 ; RUN: opt < %s -msan -msan-shadow-base 3735928559 -msan-xor-mask 48879 -S | FileCheck --check-prefix=CHECK-XOR %s
+; RUN: opt < %s -msan-shadow-base 3735928559 -msan-xor-mask 48879              \
+; RUN: -msan-and-mask 4294901760 -S -passes=msan 2>&1 | FileCheck              \
+; RUN: --check-prefix=CHECK-XOR-AND %s
 ; RUN: opt < %s -msan -msan-shadow-base 3735928559 -msan-xor-mask 48879 -msan-and-mask 4294901760 -S | FileCheck --check-prefix=CHECK-XOR-AND %s
+; RUN: opt < %s -msan-track-origins 1 -msan-origin-base 1777777 -S -passes=msan\
+; RUN: 2>&1 | FileCheck --check-prefix=CHECK-ORIGIN-BASE %s
 ; RUN: opt < %s -msan -msan-track-origins 1 -msan-origin-base 1777777 -S | FileCheck --check-prefix=CHECK-ORIGIN-BASE %s
 
 target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Instrumentation/MemorySanitizer/masked-store-load.ll b/test/Instrumentation/MemorySanitizer/masked-store-load.ll
index 9391c9f..226f665 100644
--- a/test/Instrumentation/MemorySanitizer/masked-store-load.ll
+++ b/test/Instrumentation/MemorySanitizer/masked-store-load.ll
@@ -1,5 +1,11 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck %s "--check-prefixes=CHECK,CHECK-ORIGIN"
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s --check-prefixes=CHECK,CHECK-ORIGIN
+; RUN: opt < %s -msan-check-access-address=1 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s --check-prefix=ADDR
 ; RUN: opt < %s -msan -msan-check-access-address=1 -S | FileCheck %s --check-prefix=ADDR
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/missing_origin.ll b/test/Instrumentation/MemorySanitizer/missing_origin.ll
index 0a18914..f529098 100644
--- a/test/Instrumentation/MemorySanitizer/missing_origin.ll
+++ b/test/Instrumentation/MemorySanitizer/missing_origin.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll b/test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll
index bc6c44c..c338d04 100644
--- a/test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll
+++ b/test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll
@@ -1,5 +1,11 @@
 ; Test for handling of asm constraints in MSan instrumentation.
+; RUN: opt < %s -msan-kernel=1 -msan-check-access-address=0                    \
+; RUN: -msan-handle-asm-conservative=0 -S -passes=msan 2>&1 | FileCheck        \
+; RUN: "-check-prefixes=CHECK,CHECK-NONCONS" %s
 ; RUN: opt < %s -msan -msan-kernel=1 -msan-check-access-address=0 -msan-handle-asm-conservative=0 -S | FileCheck -check-prefixes=CHECK,CHECK-NONCONS %s
+; RUN: opt < %s -msan-kernel=1 -msan-check-access-address=0                    \
+; RUN: -msan-handle-asm-conservative=1 -S -passes=msan 2>&1 | FileCheck        \
+; RUN: "-check-prefixes=CHECK,CHECK-CONS" %s
 ; RUN: opt < %s -msan -msan-kernel=1 -msan-check-access-address=0 -msan-handle-asm-conservative=1 -S | FileCheck -check-prefixes=CHECK,CHECK-CONS %s
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -179,8 +185,6 @@
 }
 
 ; CHECK-LABEL: @f_2i_2o_mem
-; CHECK-CONS: call void @__msan_instrument_asm_load({{.*}}@is1{{.*}}, i64 4)
-; CHECK-CONS: call void @__msan_instrument_asm_load({{.*}}@is2{{.*}}, i64 4)
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id1{{.*}}, i64 4)
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id2{{.*}}, i64 4)
 ; CHECK: call void asm "", "=*m,=*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(i32* @id1, i32* @id2, i32* @is1, i32* @is2)
@@ -198,7 +202,6 @@
 
 ; CHECK-LABEL: @f_1i_1o_memreg
 ; CHECK: [[IS1_F7:%.*]] = load i32, i32* @is1, align 4
-; CHECK-CONS: call void @__msan_instrument_asm_load({{.*}}@is1{{.*}}, i64 4)
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id1{{.*}}, i64 4)
 ; CHECK: call void @__msan_warning
 ; CHECK: call i32 asm "", "=r,=*m,r,*m,~{dirflag},~{fpsr},~{flags}"(i32* @id1, i32 [[IS1_F7]], i32* @is1)
@@ -257,9 +260,6 @@
 }
 
 ; CHECK-LABEL: @f_3i_3o_complex_mem
-; CHECK-CONS: call void @__msan_instrument_asm_load({{.*}}@pair1{{.*}}, i64 8)
-; CHECK-CONS: call void @__msan_instrument_asm_load(i8* @c1, i64 1)
-; CHECK-CONS: call void @__msan_instrument_asm_load({{.*}}@memcpy_s1{{.*}}, i64 8)
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@pair2{{.*}}, i64 8)
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@c2{{.*}}, i64 1)
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@memcpy_d1{{.*}}, i64 8)
diff --git a/test/Instrumentation/MemorySanitizer/msan_basic.ll b/test/Instrumentation/MemorySanitizer/msan_basic.ll
index 78d31dc..f4cbc63 100644
--- a/test/Instrumentation/MemorySanitizer/msan_basic.ll
+++ b/test/Instrumentation/MemorySanitizer/msan_basic.ll
@@ -1,10 +1,15 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: -allow-deprecated-dag-overlap %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck -allow-deprecated-dag-overlap %s
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck -allow-deprecated-dag-overlap             \
+; RUN: -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -allow-deprecated-dag-overlap -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @llvm.global_ctors {{.*}} { i32 0, void ()* @msan.module_ctor, i8* null }
+; CHECK: @llvm.global_ctors {{.*}} { i32 0, void ()* @__msan_init, i8* null }
 
 ; Check the presence and the linkage type of __msan_track_origins and
 ; other interface symbols.
@@ -986,5 +991,4 @@
 ; CHECK-NEXT: ret i8*
 
 
-; CHECK-LABEL: define internal void @msan.module_ctor() {
-; CHECK: call void @__msan_init()
+; CHECK: declare void @__msan_init()
diff --git a/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll b/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll
index 28bbf3c..680a672 100644
--- a/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll
+++ b/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll
@@ -1,4 +1,6 @@
 ; KMSAN instrumentation tests
+; RUN: opt < %s -msan-kernel=1 -S -passes=msan 2>&1 | FileCheck %s             \
+; RUN: -check-prefixes=CHECK
 ; RUN: opt < %s -msan -msan-kernel=1 -S | FileCheck %s -check-prefixes=CHECK
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/msan_llvm_is_constant.ll b/test/Instrumentation/MemorySanitizer/msan_llvm_is_constant.ll
new file mode 100644
index 0000000..b7847db
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/msan_llvm_is_constant.ll
@@ -0,0 +1,21 @@
+; Make sure MSan doesn't insert shadow checks for @llvm.is.constant.* arguments.
+
+; RUN: opt < %s -msan -msan-kernel=1 -S | FileCheck -check-prefixes=CHECK %s
+; RUN: opt < %s -msan -S | FileCheck -check-prefixes=CHECK %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readnone uwtable
+define dso_local i32 @bar(i32 %v) local_unnamed_addr sanitize_memory {
+entry:
+  %0 = tail call i1 @llvm.is.constant.i32(i32 %v)
+  %1 = zext i1 %0 to i32
+  ret i32 %1
+}
+
+; CHECK-LABEL: bar
+; CHECK-NOT: call void @__msan_warning
+
+; Function Attrs: nounwind readnone
+declare i1 @llvm.is.constant.i32(i32)
diff --git a/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll b/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll
index c9e6f1a..1c0072f 100644
--- a/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll
+++ b/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll
@@ -1,5 +1,11 @@
 ; Test for the conservative assembly handling mode used by KMSAN.
+; RUN: opt < %s -msan-kernel=1 -msan-check-access-address=0                    \
+; RUN: -msan-handle-asm-conservative=0 -S -passes=msan 2>&1 | FileCheck        \
+; RUN: "-check-prefixes=CHECK,CHECK-NONCONS" %s
 ; RUN: opt < %s -msan -msan-kernel=1 -msan-check-access-address=0 -msan-handle-asm-conservative=0 -S | FileCheck -check-prefixes=CHECK,CHECK-NONCONS %s
+; RUN: opt < %s -msan-kernel=1 -msan-check-access-address=0                    \
+; RUN: -msan-handle-asm-conservative=1 -S -passes=msan 2>&1 | FileCheck        \
+; RUN: "-check-prefixes=CHECK,CHECK-CONS" %s
 ; RUN: opt < %s -msan -msan-kernel=1 -msan-check-access-address=0 -msan-handle-asm-conservative=1 -S | FileCheck -check-prefixes=CHECK,CHECK-CONS %s
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll b/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll
index c4ec7fa..c5bdc2a 100644
--- a/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll
+++ b/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll
@@ -1,4 +1,9 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck -check-prefix=CHECK                       \
+; RUN: -check-prefix=CHECK-ORIGINS %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
 ; REQUIRES: x86-registered-target
 
@@ -46,14 +51,14 @@
 ; Check that shadow is OR'ed, and origin is Select'ed
 ; And no shadow checks!
 
-define <8 x i16> @Paddsw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable sanitize_memory {
-  %call = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
+define <8 x i16> @Pmulhuw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable sanitize_memory {
+  %call = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a, <8 x i16> %b)
   ret <8 x i16> %call
 }
 
-declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b) nounwind
+declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a, <8 x i16> %b) nounwind
 
-; CHECK-LABEL: @Paddsw128
+; CHECK-LABEL: @Pmulhuw128
 ; CHECK-NEXT: load <8 x i16>, <8 x i16>* {{.*}} @__msan_param_tls
 ; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls
 ; CHECK-NEXT: load <8 x i16>, <8 x i16>* {{.*}} @__msan_param_tls
@@ -62,7 +67,7 @@
 ; CHECK-ORIGINS: = bitcast <8 x i16> {{.*}} to i128
 ; CHECK-ORIGINS-NEXT: = icmp ne i128 {{.*}}, 0
 ; CHECK-ORIGINS-NEXT: = select i1 {{.*}}, i32 {{.*}}, i32
-; CHECK-NEXT: call <8 x i16> @llvm.x86.sse2.padds.w
+; CHECK-NEXT: call <8 x i16> @llvm.x86.sse2.pmulhu.w
 ; CHECK-NEXT: store <8 x i16> {{.*}} @__msan_retval_tls
 ; CHECK-ORIGINS: store i32 {{.*}} @__msan_retval_origin_tls
 ; CHECK-NEXT: ret <8 x i16>
diff --git a/test/Instrumentation/MemorySanitizer/mul_by_constant.ll b/test/Instrumentation/MemorySanitizer/mul_by_constant.ll
index 7736d94..959f019 100644
--- a/test/Instrumentation/MemorySanitizer/mul_by_constant.ll
+++ b/test/Instrumentation/MemorySanitizer/mul_by_constant.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/nosanitize.ll b/test/Instrumentation/MemorySanitizer/nosanitize.ll
index 082a6f5..d68be08 100644
--- a/test/Instrumentation/MemorySanitizer/nosanitize.ll
+++ b/test/Instrumentation/MemorySanitizer/nosanitize.ll
@@ -1,5 +1,7 @@
 ; Verify that calls with !nosanitize are not instrumented by MSan.
+; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -S | FileCheck %s
+; RUN: opt < %s -msan-track-origins=1 -S -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -msan-track-origins=1 -S | FileCheck %s
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Instrumentation/MemorySanitizer/origin-alignment.ll b/test/Instrumentation/MemorySanitizer/origin-alignment.ll
index abd8dd0..c2afe79 100644
--- a/test/Instrumentation/MemorySanitizer/origin-alignment.ll
+++ b/test/Instrumentation/MemorySanitizer/origin-alignment.ll
@@ -1,4 +1,10 @@
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck -check-prefix=CHECK                       \
+; RUN: -check-prefix=CHECK-ORIGINS1 %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS1 %s
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=2 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck -check-prefix=CHECK                       \
+; RUN: -check-prefix=CHECK-ORIGINS2 %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS2 %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/origin-array.ll b/test/Instrumentation/MemorySanitizer/origin-array.ll
index d9936ff..0a64787 100644
--- a/test/Instrumentation/MemorySanitizer/origin-array.ll
+++ b/test/Instrumentation/MemorySanitizer/origin-array.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=2 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck %s
 
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/pr32842.ll b/test/Instrumentation/MemorySanitizer/pr32842.ll
index 5d74c9a..c342f16 100644
--- a/test/Instrumentation/MemorySanitizer/pr32842.ll
+++ b/test/Instrumentation/MemorySanitizer/pr32842.ll
@@ -1,5 +1,6 @@
 ; Regression test for https://bugs.llvm.org/show_bug.cgi?id=32842
 ;
+; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -S | FileCheck %s
 ;target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Instrumentation/MemorySanitizer/return_from_main.ll b/test/Instrumentation/MemorySanitizer/return_from_main.ll
index 82e2d13..0e5d340 100644
--- a/test/Instrumentation/MemorySanitizer/return_from_main.ll
+++ b/test/Instrumentation/MemorySanitizer/return_from_main.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Instrumentation/MemorySanitizer/store-long-origin.ll b/test/Instrumentation/MemorySanitizer/store-long-origin.ll
index 23ba4a1..f8973fc 100644
--- a/test/Instrumentation/MemorySanitizer/store-long-origin.ll
+++ b/test/Instrumentation/MemorySanitizer/store-long-origin.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/store-origin.ll b/test/Instrumentation/MemorySanitizer/store-origin.ll
index 70722c6..9a0c019 100644
--- a/test/Instrumentation/MemorySanitizer/store-origin.ll
+++ b/test/Instrumentation/MemorySanitizer/store-origin.ll
@@ -1,5 +1,13 @@
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck                                           \
+; RUN: "-check-prefixes=CHECK,CHECK-MSAN,CHECK-ORIGINS1" %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefixes=CHECK,CHECK-MSAN,CHECK-ORIGINS1 %s
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=2 -S          \
+; RUN: -passes=msan 2>&1 | FileCheck                                           \
+; RUN: "-check-prefixes=CHECK,CHECK-MSAN,CHECK-ORIGINS2" %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck -check-prefixes=CHECK,CHECK-MSAN,CHECK-ORIGINS2 %s
+; RUN: opt < %s -msan-kernel=1 -msan-check-access-address=0 -S -passes=msan    \
+; RUN: 2>&1 | FileCheck "-check-prefixes=CHECK,CHECK-KMSAN,CHECK-ORIGINS2" %s
 ; RUN: opt < %s -msan -msan-kernel=1 -msan-check-access-address=0 -S | FileCheck -check-prefixes=CHECK,CHECK-KMSAN,CHECK-ORIGINS2 %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/str-nobuiltin.ll b/test/Instrumentation/MemorySanitizer/str-nobuiltin.ll
index 0aa1967..96223cd 100644
--- a/test/Instrumentation/MemorySanitizer/str-nobuiltin.ll
+++ b/test/Instrumentation/MemorySanitizer/str-nobuiltin.ll
@@ -1,5 +1,6 @@
 ; Test marking string functions as nobuiltin in memory sanitizer.
 ;
+; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -S | FileCheck %s
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Instrumentation/MemorySanitizer/unreachable.ll b/test/Instrumentation/MemorySanitizer/unreachable.ll
index ac5aea9..eff7371 100644
--- a/test/Instrumentation/MemorySanitizer/unreachable.ll
+++ b/test/Instrumentation/MemorySanitizer/unreachable.ll
@@ -1,3 +1,4 @@
+; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -S | FileCheck %s
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Instrumentation/MemorySanitizer/unsized_type.ll b/test/Instrumentation/MemorySanitizer/unsized_type.ll
index 94ae92d..4bffe80 100644
--- a/test/Instrumentation/MemorySanitizer/unsized_type.ll
+++ b/test/Instrumentation/MemorySanitizer/unsized_type.ll
@@ -1,5 +1,6 @@
 ; Check that unsized token types used by coroutine intrinsics do not cause
 ; assertion failures.
+; RUN: opt < %s -S 2>&1 -passes=msan | FileCheck %s
 ; RUN: opt < %s -msan -S 2>&1 | FileCheck %s
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/MemorySanitizer/vector_arith.ll b/test/Instrumentation/MemorySanitizer/vector_arith.ll
index 6652fdf..4b213d1 100644
--- a/test/Instrumentation/MemorySanitizer/vector_arith.ll
+++ b/test/Instrumentation/MemorySanitizer/vector_arith.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
 ; REQUIRES: x86-registered-target
 
diff --git a/test/Instrumentation/MemorySanitizer/vector_cmp.ll b/test/Instrumentation/MemorySanitizer/vector_cmp.ll
index 910b135..6031ddd 100644
--- a/test/Instrumentation/MemorySanitizer/vector_cmp.ll
+++ b/test/Instrumentation/MemorySanitizer/vector_cmp.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
 ; REQUIRES: x86-registered-target
 
diff --git a/test/Instrumentation/MemorySanitizer/vector_cvt.ll b/test/Instrumentation/MemorySanitizer/vector_cvt.ll
index b70ef7d..abbb0a8 100644
--- a/test/Instrumentation/MemorySanitizer/vector_cvt.ll
+++ b/test/Instrumentation/MemorySanitizer/vector_cvt.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
 ; REQUIRES: x86-registered-target
 
diff --git a/test/Instrumentation/MemorySanitizer/vector_pack.ll b/test/Instrumentation/MemorySanitizer/vector_pack.ll
index 574e7b8..50d9303 100644
--- a/test/Instrumentation/MemorySanitizer/vector_pack.ll
+++ b/test/Instrumentation/MemorySanitizer/vector_pack.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
 ; REQUIRES: x86-registered-target
 
diff --git a/test/Instrumentation/MemorySanitizer/vector_shift.ll b/test/Instrumentation/MemorySanitizer/vector_shift.ll
index c605c97..9f74869 100644
--- a/test/Instrumentation/MemorySanitizer/vector_shift.ll
+++ b/test/Instrumentation/MemorySanitizer/vector_shift.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
+; RUN: %s
 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
 ; REQUIRES: x86-registered-target
 
diff --git a/test/Instrumentation/MemorySanitizer/with-call-type-size.ll b/test/Instrumentation/MemorySanitizer/with-call-type-size.ll
index 2a3cbf7..da5e75b 100644
--- a/test/Instrumentation/MemorySanitizer/with-call-type-size.ll
+++ b/test/Instrumentation/MemorySanitizer/with-call-type-size.ll
@@ -1,3 +1,5 @@
+; RUN: opt < %s -msan-instrumentation-with-call-threshold=0 -S -passes=msan    \
+; RUN: 2>&1 | FileCheck %s
 ; RUN: opt < %s -msan -msan-instrumentation-with-call-threshold=0 -S | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -82,4 +84,4 @@
 ; CHECK-NOT:     call void @__msan_maybe_warning_
 ; CHECK:         icmp ne i65 %{{.*}}, 0
 ; CHECK-NOT:     call void @__msan_maybe_warning_
-; CHECK:         ret <4 x i32>
\ No newline at end of file
+; CHECK:         ret <4 x i32>
diff --git a/test/Instrumentation/SanitizerCoverage/coff-used-ctor.ll b/test/Instrumentation/SanitizerCoverage/coff-used-ctor.ll
new file mode 100644
index 0000000..fd12eed
--- /dev/null
+++ b/test/Instrumentation/SanitizerCoverage/coff-used-ctor.ll
@@ -0,0 +1,11 @@
+; Checks that sancov.module_ctor is marked used.
+; RUN: opt < %s -sancov -sanitizer-coverage-level=1 -sanitizer-coverage-inline-8bit-counters=1 -sanitizer-coverage-pc-table=1 -S | FileCheck %s
+target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc19.14.26433"
+
+define void @foo() {
+entry:
+  ret void
+}
+
+; CHECK: @llvm.used = appending global {{.*}} @sancov.module_ctor
\ No newline at end of file
diff --git a/test/Instrumentation/SanitizerCoverage/interposable-symbol-nocomdat.ll b/test/Instrumentation/SanitizerCoverage/interposable-symbol-nocomdat.ll
new file mode 100644
index 0000000..c79a2fb
--- /dev/null
+++ b/test/Instrumentation/SanitizerCoverage/interposable-symbol-nocomdat.ll
@@ -0,0 +1,37 @@
+; Test that interposable symbols do not get put in comdats.
+; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-trace-pc-guard -mtriple x86_64-linux-gnu -S | FileCheck %s
+; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-trace-pc-guard -mtriple x86_64-windows-msvc -S | FileCheck %s
+
+define void @Vanilla() {
+entry:
+  ret void
+}
+
+define linkonce void @LinkOnce() {
+entry:
+  ret void
+}
+
+define weak void @Weak() {
+entry:
+  ret void
+}
+
+declare extern_weak void @ExternWeak()
+
+define linkonce_odr void @LinkOnceOdr() {
+entry:
+  ret void
+}
+
+define weak_odr void @WeakOdr() {
+entry:
+  ret void
+}
+
+; CHECK: define void @Vanilla() comdat {
+; CHECK: define linkonce void @LinkOnce() {
+; CHECK: define weak void @Weak() {
+; CHECK: declare extern_weak void @ExternWeak()
+; CHECK: define linkonce_odr void @LinkOnceOdr() comdat {
+; CHECK: define weak_odr void @WeakOdr() comdat {
diff --git a/test/Instrumentation/SanitizerCoverage/seh.ll b/test/Instrumentation/SanitizerCoverage/seh.ll
index f432573..94d1a2e 100644
--- a/test/Instrumentation/SanitizerCoverage/seh.ll
+++ b/test/Instrumentation/SanitizerCoverage/seh.ll
@@ -7,7 +7,7 @@
 
 declare i32 @llvm.eh.typeid.for(i8*) #2
 declare i8* @llvm.frameaddress(i32)
-declare i8* @llvm.x86.seh.recoverfp(i8*, i8*)
+declare i8* @llvm.eh.recoverfp(i8*, i8*)
 declare i8* @llvm.localrecover(i8*, i8*, i32)
 declare void @llvm.localescape(...) #1
 
@@ -55,7 +55,7 @@
 define internal i32 @"\01?filt$0@0@main@@"() #1 {
 entry:
   %0 = tail call i8* @llvm.frameaddress(i32 1)
-  %1 = tail call i8* @llvm.x86.seh.recoverfp(i8* bitcast (i32 ()* @main to i8*), i8* %0)
+  %1 = tail call i8* @llvm.eh.recoverfp(i8* bitcast (i32 ()* @main to i8*), i8* %0)
   %2 = tail call i8* @llvm.localrecover(i8* bitcast (i32 ()* @main to i8*), i8* %1, i32 0)
   %__exception_code = bitcast i8* %2 to i32*
   %3 = getelementptr inbounds i8, i8* %0, i32 -20
diff --git a/test/Instrumentation/ThreadSanitizer/tsan_basic.ll b/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
index 69d4117..8b85d7b 100644
--- a/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
+++ b/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
@@ -1,4 +1,5 @@
 ; RUN: opt < %s -tsan -S | FileCheck %s
+; RUN: opt < %s -passes=tsan -S | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-unknown-linux-gnu"
@@ -77,6 +78,5 @@
   call void @SwiftError(i8** %0)
   ret void
 }
-
 ; CHECK: define internal void @tsan.module_ctor()
 ; CHECK: call void @__tsan_init()
diff --git a/test/LTO/Resolution/X86/export-jumptable.ll b/test/LTO/Resolution/X86/export-jumptable.ll
index d6cce85..8ced9d9 100644
--- a/test/LTO/Resolution/X86/export-jumptable.ll
+++ b/test/LTO/Resolution/X86/export-jumptable.ll
@@ -2,7 +2,7 @@
 ; the full LTO object file; any such functions will be referenced by the jump
 ; table.
 
-; RUN: opt -thinlto-bc -o %t %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-lto2 run -o %t2 -r %t,f1,p -r %t,f2,p -r %t,_start,px %t -save-temps
 ; RUN: llvm-dis %t2.1.2.internalize.bc -o - | FileCheck %s
 
diff --git a/test/LTO/Resolution/X86/local-def-dllimport.ll b/test/LTO/Resolution/X86/local-def-dllimport.ll
new file mode 100644
index 0000000..c97e4b7
--- /dev/null
+++ b/test/LTO/Resolution/X86/local-def-dllimport.ll
@@ -0,0 +1,32 @@
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t0.bc %s
+; RUN: llvm-lto2 run -r %t0.bc,__imp_f,l \
+; RUN:               -r %t0.bc,g,p \
+; RUN:               -r %t0.bc,g,l \
+; RUN:               -r %t0.bc,e,l \
+; RUN:               -r %t0.bc,main,x \
+; RUN:               -save-temps -o %t1 %t0.bc
+; RUN: llvm-dis %t1.1.3.import.bc -o - | FileCheck %s
+source_filename = "test.cpp"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+$g = comdat any
+@g = global i8 42, comdat, !type !0
+
+; CHECK: define
+; CHECK-NOT: dllimport
+; CHECK-SAME: @f
+define available_externally dllimport i8* @f() {
+  ret i8* @g
+}
+
+define i8* @e() {
+  ret i8* @g
+}
+
+define i32 @main() {
+  %1 = call i8* @f()
+  %2 = ptrtoint i8* %1 to i32
+  ret i32 %2
+}
+!0 = !{i32 0, !"typeid"}
diff --git a/test/LTO/Resolution/X86/lowertypetests.ll b/test/LTO/Resolution/X86/lowertypetests.ll
index b87452c..e5be4b9 100644
--- a/test/LTO/Resolution/X86/lowertypetests.ll
+++ b/test/LTO/Resolution/X86/lowertypetests.ll
@@ -1,4 +1,4 @@
-; RUN: opt -thinlto-bc -o %t %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-lto2 run -r %t,f,plx -r %t,g_alias,plx -r %t,foo,lx -r %t,foo,plx -r %t,bar,lx -r %t,bar,plx -o %t1 %t
 ; RUN: llvm-nm %t1.0 | FileCheck --check-prefix=MERGED %s
 ; RUN: llvm-nm %t1.1 | FileCheck %s
diff --git a/test/LTO/Resolution/X86/lto-unit-check.ll b/test/LTO/Resolution/X86/lto-unit-check.ll
new file mode 100644
index 0000000..1736a5b
--- /dev/null
+++ b/test/LTO/Resolution/X86/lto-unit-check.ll
@@ -0,0 +1,55 @@
+; Test to ensure that the Enable Split LTO Unit flag is set properly in the
+; summary, and that we correctly silently handle linking bitcode files with
+; different values of this flag.
+
+; Linking bitcode both with EnableSplitLTOUnit set should work
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t1 %s
+; RUN: llvm-bcanalyzer -dump %t1 | FileCheck %s --check-prefix=SPLITLTOUNIT
+; RUN: llvm-dis -o - %t1 | FileCheck %s --check-prefix=ENABLESPLITFLAG
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t2 %s
+; RUN: llvm-bcanalyzer -dump %t2 | FileCheck %s --check-prefix=SPLITLTOUNIT
+; RUN: llvm-dis -o - %t2 | FileCheck %s --check-prefix=ENABLESPLITFLAG
+; RUN: llvm-lto2 run -o %t3 %t1 %t2
+
+; Linking bitcode both without EnableSplitLTOUnit set should work
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit=false -o %t1 %s
+; RUN: llvm-bcanalyzer -dump %t1 | FileCheck %s --check-prefix=NOSPLITLTOUNIT
+; RUN: llvm-dis -o - %t1 | FileCheck %s --check-prefix=NOENABLESPLITFLAG
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit=false -o %t2 %s
+; RUN: llvm-bcanalyzer -dump %t2 | FileCheck %s --check-prefix=NOSPLITLTOUNIT
+; RUN: llvm-dis -o - %t2 | FileCheck %s --check-prefix=NOENABLESPLITFLAG
+; RUN: llvm-lto2 run -o %t3 %t1 %t2
+
+; Linking bitcode with different values of EnableSplitLTOUnit should succeed
+; (silently skipping any optimizations like whole program devirt that rely
+; on all modules being split).
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t1 %s
+; RUN: llvm-bcanalyzer -dump %t1 | FileCheck %s --check-prefix=SPLITLTOUNIT
+; RUN: llvm-dis -o - %t1 | FileCheck %s --check-prefix=ENABLESPLITFLAG
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit=false -o %t2 %s
+; RUN: llvm-bcanalyzer -dump %t2 | FileCheck %s --check-prefix=NOSPLITLTOUNIT
+; RUN: llvm-dis -o - %t2 | FileCheck %s --check-prefix=NOENABLESPLITFLAG
+; RUN: llvm-lto2 run -o %t3 %t1 %t2
+
+; Linking bitcode with different values of EnableSplitLTOUnit (reverse order)
+; should succeed (silently skipping any optimizations like whole program devirt
+; that rely on all modules being split).
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit=false -o %t1 %s
+; RUN: llvm-bcanalyzer -dump %t1 | FileCheck %s --check-prefix=NOSPLITLTOUNIT
+; RUN: llvm-dis -o - %t1 | FileCheck %s --check-prefix=NOENABLESPLITFLAG
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t2 %s
+; RUN: llvm-bcanalyzer -dump %t2 | FileCheck %s --check-prefix=SPLITLTOUNIT
+; RUN: llvm-dis -o - %t2 | FileCheck %s --check-prefix=ENABLESPLITFLAG
+; RUN: llvm-lto2 run -o %t3 %t1 %t2
+
+; The flag should be set when splitting is disabled (for backwards compatibility
+; with older bitcode where it was always enabled).
+; SPLITLTOUNIT: <FLAGS op0=8/>
+; NOSPLITLTOUNIT: <FLAGS op0=0/>
+
+; Check that the corresponding module flag is set when expected.
+; ENABLESPLITFLAG: !{i32 1, !"EnableSplitLTOUnit", i32 1}
+; NOENABLESPLITFLAG-NOT: !{i32 1, !"EnableSplitLTOUnit", i32 1}
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Linker/Inputs/metadata-source-a.ll b/test/Linker/Inputs/metadata-source-a.ll
index e828770..21a2a84 100644
--- a/test/Linker/Inputs/metadata-source-a.ll
+++ b/test/Linker/Inputs/metadata-source-a.ll
@@ -11,7 +11,7 @@
 
 !0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
 !1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
-!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 7.0.0 (https://git.llvm.org/git/clang.git/ c12b573f9ac61655cce52628b34235f58edaf984) (https://scott.linder@llvm.org/git/llvm.git 90c4822e8541eb07891cd03e614c530c30f8aa12)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 7.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
 !3 = !DIFile(filename: "a.c", directory: "/home/slinder1/test/link", source: "int a;\0A")
 !4 = !{}
 !5 = !{!0}
@@ -19,4 +19,4 @@
 !7 = !{i32 2, !"Dwarf Version", i32 4}
 !8 = !{i32 2, !"Debug Info Version", i32 3}
 !9 = !{i32 1, !"wchar_size", i32 4}
-!10 = !{!"clang version 7.0.0 (https://git.llvm.org/git/clang.git/ c12b573f9ac61655cce52628b34235f58edaf984) (https://scott.linder@llvm.org/git/llvm.git 90c4822e8541eb07891cd03e614c530c30f8aa12)"}
+!10 = !{!"clang version 7.0.0"}
diff --git a/test/Linker/Inputs/metadata-source-b.ll b/test/Linker/Inputs/metadata-source-b.ll
index 2bda92e..074704b 100644
--- a/test/Linker/Inputs/metadata-source-b.ll
+++ b/test/Linker/Inputs/metadata-source-b.ll
@@ -11,7 +11,7 @@
 
 !0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
 !1 = distinct !DIGlobalVariable(name: "b", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
-!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 7.0.0 (https://git.llvm.org/git/clang.git/ c12b573f9ac61655cce52628b34235f58edaf984) (https://scott.linder@llvm.org/git/llvm.git 90c4822e8541eb07891cd03e614c530c30f8aa12)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 7.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
 !3 = !DIFile(filename: "b.c", directory: "/home/slinder1/test/link", source: "int b;\0A")
 !4 = !{}
 !5 = !{!0}
@@ -19,4 +19,4 @@
 !7 = !{i32 2, !"Dwarf Version", i32 4}
 !8 = !{i32 2, !"Debug Info Version", i32 3}
 !9 = !{i32 1, !"wchar_size", i32 4}
-!10 = !{!"clang version 7.0.0 (https://git.llvm.org/git/clang.git/ c12b573f9ac61655cce52628b34235f58edaf984) (https://scott.linder@llvm.org/git/llvm.git 90c4822e8541eb07891cd03e614c530c30f8aa12)"}
+!10 = !{!"clang version 7.0.0"}
diff --git a/test/Linker/Inputs/type-unique-inheritance-a.ll b/test/Linker/Inputs/type-unique-inheritance-a.ll
index 094ea64..4c85026 100644
--- a/test/Linker/Inputs/type-unique-inheritance-a.ll
+++ b/test/Linker/Inputs/type-unique-inheritance-a.ll
@@ -66,7 +66,7 @@
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!19, !25}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.4 (http://llvm.org/git/clang.git f54e02f969d02d640103db73efc30c45439fceab) (http://llvm.org/git/llvm.git 284353b55896cb1babfaa7add7c0a363245342d2)", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !3, globals: !2, imports: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.4", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !3, globals: !2, imports: !2)
 !1 = !DIFile(filename: "foo.cpp", directory: "/Users/mren/c_testing/type_unique_air/inher")
 !2 = !{}
 !3 = !{!4, !8}
diff --git a/test/Linker/Inputs/type-unique-inheritance-b.ll b/test/Linker/Inputs/type-unique-inheritance-b.ll
index 15f6cb2..1684a2e 100644
--- a/test/Linker/Inputs/type-unique-inheritance-b.ll
+++ b/test/Linker/Inputs/type-unique-inheritance-b.ll
@@ -40,7 +40,7 @@
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!27, !38}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.4 (http://llvm.org/git/clang.git f54e02f969d02d640103db73efc30c45439fceab) (http://llvm.org/git/llvm.git 284353b55896cb1babfaa7add7c0a363245342d2)", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !3, globals: !2, imports: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.4", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !3, globals: !2, imports: !2)
 !1 = !DIFile(filename: "bar.cpp", directory: "/Users/mren/c_testing/type_unique_air/inher")
 !2 = !{}
 !3 = !{!4, !11, !15}
diff --git a/test/Linker/Inputs/type-unique-simple2-a.ll b/test/Linker/Inputs/type-unique-simple2-a.ll
index ae0f931..35f6ea5 100644
--- a/test/Linker/Inputs/type-unique-simple2-a.ll
+++ b/test/Linker/Inputs/type-unique-simple2-a.ll
@@ -63,7 +63,7 @@
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!16, !22}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.4 (http://llvm.org/git/clang.git 8a3f9e46cb988d2c664395b21910091e3730ae82) (http://llvm.org/git/llvm.git 4699e9549358bc77824a59114548eecc3f7c523c)", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !3, globals: !2, imports: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.4", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !3, globals: !2, imports: !2)
 !1 = !DIFile(filename: "foo.cpp", directory: ".")
 !2 = !{}
 !3 = !{!4}
diff --git a/test/Linker/Inputs/type-unique-simple2-b.ll b/test/Linker/Inputs/type-unique-simple2-b.ll
index 04e139d..e81466e 100644
--- a/test/Linker/Inputs/type-unique-simple2-b.ll
+++ b/test/Linker/Inputs/type-unique-simple2-b.ll
@@ -36,7 +36,7 @@
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!19, !28}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.4 (http://llvm.org/git/clang.git 8a3f9e46cb988d2c664395b21910091e3730ae82) (http://llvm.org/git/llvm.git 4699e9549358bc77824a59114548eecc3f7c523c)", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !3, globals: !2, imports: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.4", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !3, globals: !2, imports: !2)
 !1 = !DIFile(filename: "bar.cpp", directory: ".")
 !2 = !{}
 !3 = !{!4}
diff --git a/test/Linker/type-unique-simple-a.ll b/test/Linker/type-unique-simple-a.ll
index c53d4fe..c6e6c2c 100644
--- a/test/Linker/type-unique-simple-a.ll
+++ b/test/Linker/type-unique-simple-a.ll
@@ -68,7 +68,7 @@
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!14, !20}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.4 (http://llvm.org/git/clang.git c23b1db6268c8e7ce64026d57d1510c1aac200a0) (http://llvm.org/git/llvm.git 09b98fe3978eddefc2145adc1056cf21580ce945)", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !3, globals: !2, imports: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.4", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !3, globals: !2, imports: !2)
 !1 = !DIFile(filename: "foo.cpp", directory: "/Users/mren/c_testing/type_unique_air/simple")
 !2 = !{}
 !3 = !{!4}
diff --git a/test/Linker/type-unique-simple-b.ll b/test/Linker/type-unique-simple-b.ll
index 8f9f7e1..85dcfca 100644
--- a/test/Linker/type-unique-simple-b.ll
+++ b/test/Linker/type-unique-simple-b.ll
@@ -38,7 +38,7 @@
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!17, !26}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.4 (http://llvm.org/git/clang.git c23b1db6268c8e7ce64026d57d1510c1aac200a0) (http://llvm.org/git/llvm.git 09b98fe3978eddefc2145adc1056cf21580ce945)", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !3, globals: !2, imports: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.4", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !3, globals: !2, imports: !2)
 !1 = !DIFile(filename: "bar.cpp", directory: "/Users/mren/c_testing/type_unique_air/simple")
 !2 = !{}
 !3 = !{!4}
diff --git a/test/MC/AArch64/SVE/directive-arch.s b/test/MC/AArch64/SVE/directive-arch.s
new file mode 100644
index 0000000..99ddf4e
--- /dev/null
+++ b/test/MC/AArch64/SVE/directive-arch.s
@@ -0,0 +1,6 @@
+// RUN: llvm-mc -triple=aarch64 < %s | FileCheck %s
+
+.arch armv8-a+sve
+
+ptrue   p0.b, pow2
+// CHECK: ptrue   p0.b, pow2
diff --git a/test/MC/AArch64/armv8.5a-predctrl-error.s b/test/MC/AArch64/armv8.5a-predctrl-error.s
deleted file mode 100644
index 0fd49b0..0000000
--- a/test/MC/AArch64/armv8.5a-predctrl-error.s
+++ /dev/null
@@ -1,20 +0,0 @@
-// RUN: not llvm-mc -triple aarch64-none-linux-gnu -show-encoding -mattr=+predctrl < %s 2>&1| FileCheck %s
-
-cfp rctx
-dvp rctx
-cpp rctx
-
-// CHECK: specified cfp op requires a register
-// CHECK: specified dvp op requires a register
-// CHECK: specified cpp op requires a register
-
-cfp x0, x1
-dvp x1, x2
-cpp x2, x3
-
-// CHECK:      invalid operand for prediction restriction instruction
-// CHECK-NEXT: cfp
-// CHECK:      invalid operand for prediction restriction instruction
-// CHECK-NEXT: dvp
-// CHECK:      invalid operand for prediction restriction instruction
-// CHECK-NEXT: cpp
diff --git a/test/MC/AArch64/armv8.5a-predctrl.s b/test/MC/AArch64/armv8.5a-predctrl.s
deleted file mode 100644
index af7dda7..0000000
--- a/test/MC/AArch64/armv8.5a-predctrl.s
+++ /dev/null
@@ -1,18 +0,0 @@
-// RUN:     llvm-mc -triple aarch64-none-linux-gnu -show-encoding -mattr=+predctrl < %s      | FileCheck %s
-// RUN:     llvm-mc -triple aarch64-none-linux-gnu -show-encoding -mattr=+v8.5a    < %s      | FileCheck %s
-// RUN: not llvm-mc -triple aarch64-none-linux-gnu -show-encoding -mattr=-predctrl < %s 2>&1 | FileCheck %s --check-prefix=NOPREDCTRL
-
-cfp rctx, x0
-dvp rctx, x1
-cpp rctx, x2
-
-// CHECK: cfp rctx, x0      // encoding: [0x80,0x73,0x0b,0xd5]
-// CHECK: dvp rctx, x1      // encoding: [0xa1,0x73,0x0b,0xd5]
-// CHECK: cpp rctx, x2      // encoding: [0xe2,0x73,0x0b,0xd5]
-
-// NOPREDCTRL: CFPRCTX requires predctrl
-// NOPREDCTRL-NEXT: cfp
-// NOPREDCTRL: DVPRCTX requires predctrl
-// NOPREDCTRL-NEXT: dvp
-// NOPREDCTRL: CPPRCTX requires predctrl
-// NOPREDCTRL-NEXT: cpp
diff --git a/test/MC/AArch64/armv8.5a-predres-error.s b/test/MC/AArch64/armv8.5a-predres-error.s
new file mode 100644
index 0000000..295252d
--- /dev/null
+++ b/test/MC/AArch64/armv8.5a-predres-error.s
@@ -0,0 +1,20 @@
+// RUN: not llvm-mc -triple aarch64-none-linux-gnu -show-encoding -mattr=+predres < %s 2>&1| FileCheck %s
+
+cfp rctx
+dvp rctx
+cpp rctx
+
+// CHECK: specified cfp op requires a register
+// CHECK: specified dvp op requires a register
+// CHECK: specified cpp op requires a register
+
+cfp x0, x1
+dvp x1, x2
+cpp x2, x3
+
+// CHECK:      invalid operand for prediction restriction instruction
+// CHECK-NEXT: cfp
+// CHECK:      invalid operand for prediction restriction instruction
+// CHECK-NEXT: dvp
+// CHECK:      invalid operand for prediction restriction instruction
+// CHECK-NEXT: cpp
diff --git a/test/MC/AArch64/armv8.5a-predres.s b/test/MC/AArch64/armv8.5a-predres.s
new file mode 100644
index 0000000..4bab347
--- /dev/null
+++ b/test/MC/AArch64/armv8.5a-predres.s
@@ -0,0 +1,18 @@
+// RUN:     llvm-mc -triple aarch64-none-linux-gnu -show-encoding -mattr=+predres < %s      | FileCheck %s
+// RUN:     llvm-mc -triple aarch64-none-linux-gnu -show-encoding -mattr=+v8.5a    < %s      | FileCheck %s
+// RUN: not llvm-mc -triple aarch64-none-linux-gnu -show-encoding -mattr=-predres < %s 2>&1 | FileCheck %s --check-prefix=NOPREDCTRL
+
+cfp rctx, x0
+dvp rctx, x1
+cpp rctx, x2
+
+// CHECK: cfp rctx, x0      // encoding: [0x80,0x73,0x0b,0xd5]
+// CHECK: dvp rctx, x1      // encoding: [0xa1,0x73,0x0b,0xd5]
+// CHECK: cpp rctx, x2      // encoding: [0xe2,0x73,0x0b,0xd5]
+
+// NOPREDCTRL: CFPRCTX requires predres
+// NOPREDCTRL-NEXT: cfp
+// NOPREDCTRL: DVPRCTX requires predres
+// NOPREDCTRL-NEXT: dvp
+// NOPREDCTRL: CPPRCTX requires predres
+// NOPREDCTRL-NEXT: cpp
diff --git a/test/MC/AArch64/armv8.5a-sb.s b/test/MC/AArch64/armv8.5a-sb.s
new file mode 100644
index 0000000..7d88aed
--- /dev/null
+++ b/test/MC/AArch64/armv8.5a-sb.s
@@ -0,0 +1,11 @@
+// RUN:     llvm-mc -triple aarch64-none-linux-gnu -show-encoding -mattr=+sb -o - %s      | FileCheck %s
+// RUN:     llvm-mc -triple aarch64-none-linux-gnu -show-encoding -mattr=+v8.5a -o - %s      | FileCheck %s
+// RUN: not llvm-mc -triple aarch64-none-linux-gnu -show-encoding -mattr=-sb -o - %s 2>&1 | FileCheck %s --check-prefix=NOSB
+
+// Flag manipulation
+sb
+
+// CHECK: sb // encoding: [0xff,0x30,0x03,0xd5]
+
+// NOSB: instruction requires: sb
+// NOSB-NEXT: sb
diff --git a/test/MC/AArch64/armv8.5a-specctrl.s b/test/MC/AArch64/armv8.5a-specctrl.s
deleted file mode 100644
index 279a396..0000000
--- a/test/MC/AArch64/armv8.5a-specctrl.s
+++ /dev/null
@@ -1,11 +0,0 @@
-// RUN:     llvm-mc -triple aarch64-none-linux-gnu -show-encoding -mattr=+specctrl < %s      | FileCheck %s
-// RUN:     llvm-mc -triple aarch64-none-linux-gnu -show-encoding -mattr=+v8.5a    < %s      | FileCheck %s
-// RUN: not llvm-mc -triple aarch64-none-linux-gnu -show-encoding -mattr=-specctrl < %s 2>&1 | FileCheck %s --check-prefix=NOSB
-
-// Flag manipulation
-sb
-
-// CHECK: sb // encoding: [0xff,0x30,0x03,0xd5]
-
-// NOSB: instruction requires: specctrl
-// NOSB-NEXT: sb
diff --git a/test/MC/AArch64/directive-arch_extension-nosimd.s b/test/MC/AArch64/directive-arch_extension-nosimd.s
new file mode 100644
index 0000000..cbc03d0
--- /dev/null
+++ b/test/MC/AArch64/directive-arch_extension-nosimd.s
@@ -0,0 +1,6 @@
+// RUN: not llvm-mc -triple aarch64 -filetype asm -o - %s 2>&1 | FileCheck %s
+
+	.arch_extension nosimd
+
+	add v0.8b, v0.8b, v0.8b
+// CHECK: error: instruction requires: neon
diff --git a/test/MC/AArch64/directive-arch_extension-simd.s b/test/MC/AArch64/directive-arch_extension-simd.s
new file mode 100644
index 0000000..04ebeee
--- /dev/null
+++ b/test/MC/AArch64/directive-arch_extension-simd.s
@@ -0,0 +1,6 @@
+// RUN: llvm-mc -triple aarch64 -mattr=-simd -filetype asm -o - %s | FileCheck %s
+
+	.arch_extension simd
+
+	add v0.8b, v0.8b, v0.8b
+// CHECK: add v0.8b, v0.8b, v0.8b
diff --git a/test/MC/AArch64/fixup-absolute-signed.s b/test/MC/AArch64/fixup-absolute-signed.s
new file mode 100644
index 0000000..3dd93c9
--- /dev/null
+++ b/test/MC/AArch64/fixup-absolute-signed.s
@@ -0,0 +1,44 @@
+// RUN: llvm-mc -triple aarch64--none-eabi -filetype obj < %s -o - | llvm-objdump -d - | FileCheck %s
+
+onepart_before = 12345
+twopart_before = -12345678
+threepart_before = -1234567890
+
+// CHECK: movn     x0, #0, lsl #32
+// CHECK: movn     x0, #0, lsl #32
+movz x0, #:abs_g2_s:threepart_before
+movz x0, #:abs_g2_s:threepart_after
+
+// CHECK: movk    x0, #65535, lsl #32
+// CHECK: movk    x0, #65535, lsl #32
+movk x0, #:abs_g2_nc:threepart_before
+movk x0, #:abs_g2_nc:threepart_after
+
+// CHECK: mov     x0, #-12320769
+// CHECK: mov     x0, #-12320769
+movz x0, #:abs_g1_s:twopart_before
+movz x0, #:abs_g1_s:twopart_after
+
+// CHECK: movk    x0, #46697, lsl #16
+// CHECK: movk    x0, #46697, lsl #16
+movk x0, #:abs_g1_nc:threepart_before
+movk x0, #:abs_g1_nc:threepart_after
+
+// CHECK: mov     x0, #12345
+// CHECK: mov     x0, #12345
+movz x0, #:abs_g0_s:onepart_before
+movz x0, #:abs_g0_s:onepart_after
+
+// CHECK: movk    x0, #64814
+// CHECK: movk    x0, #64814
+movk x0, #:abs_g0_nc:threepart_before
+movk x0, #:abs_g0_nc:threepart_after
+
+// CHECK: mov     x0, #12345
+// CHECK: mov     x0, #12345
+movn x0, #:abs_g0_s:onepart_before
+movn x0, #:abs_g0_s:onepart_after
+
+onepart_after = 12345
+twopart_after = -12345678
+threepart_after = -1234567890
diff --git a/test/MC/AArch64/fixup-absolute.s b/test/MC/AArch64/fixup-absolute.s
new file mode 100644
index 0000000..ed4b3b0
--- /dev/null
+++ b/test/MC/AArch64/fixup-absolute.s
@@ -0,0 +1,40 @@
+// RUN: llvm-mc -triple aarch64--none-eabi -filetype obj < %s -o - | llvm-objdump -d - | FileCheck %s
+
+onepart_before = 0x1234
+twopart_before = 0x12345678
+threepart_before = 0x1234567890AB
+fourpart_before = 0x1234567890ABCDEF
+
+// CHECK: mov     x0, #1311673391471656960
+// CHECK: mov     x0, #1311673391471656960
+movz x0, #:abs_g3:fourpart_before
+movz x0, #:abs_g3:fourpart_after
+// CHECK: mov     x0, #20014547599360
+// CHECK: mov     x0, #20014547599360
+movz x0, #:abs_g2:threepart_before
+movz x0, #:abs_g2:threepart_after
+// CHECK: movk    x0, #22136, lsl #32
+// CHECK: movk    x0, #22136, lsl #32
+movk x0, #:abs_g2_nc:fourpart_before
+movk x0, #:abs_g2_nc:fourpart_after
+// CHECK: mov     x0, #305397760
+// CHECK: mov     x0, #305397760
+movz x0, #:abs_g1:twopart_before
+movz x0, #:abs_g1:twopart_after
+// CHECK: movk    x0, #37035, lsl #16
+// CHECK: movk    x0, #37035, lsl #16
+movk x0, #:abs_g1_nc:fourpart_before
+movk x0, #:abs_g1_nc:fourpart_after
+// CHECK: mov     x0, #4660
+// CHECK: mov     x0, #4660
+movz x0, #:abs_g0:onepart_before
+movz x0, #:abs_g0:onepart_after
+// CHECK: movk    x0, #52719
+// CHECK: movk    x0, #52719
+movk x0, #:abs_g0_nc:fourpart_before
+movk x0, #:abs_g0_nc:fourpart_after
+
+onepart_after = 0x1234
+twopart_after = 0x12345678
+threepart_after = 0x1234567890AB
+fourpart_after = 0x1234567890ABCDEF
diff --git a/test/MC/AArch64/fixup-out-of-range.s b/test/MC/AArch64/fixup-out-of-range.s
index 3acffe2..500c1d1 100644
--- a/test/MC/AArch64/fixup-out-of-range.s
+++ b/test/MC/AArch64/fixup-out-of-range.s
@@ -55,6 +55,22 @@
 // CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: fixup not sufficiently aligned
   b unaligned
 
+// CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: fixup value out of range
+  movz x0, #:abs_g0:value1
+
+// CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: fixup value out of range
+  movz x0, #:abs_g1:value2
+
+// CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: fixup value out of range
+  movz x0, #:abs_g0_s:value1
+
+// CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: fixup value out of range
+  movz x0, #:abs_g1_s:value2
+
+// CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: relocation for a thread-local variable points to an absolute symbol
+  movz x0, #:tprel_g0:value1
+
+
   .byte 0
 unaligned:
   .byte 0
@@ -63,3 +79,5 @@
   .balign 8
 distant:
   .word 0
+value1 = 0x12345678
+value2 = 0x123456789
diff --git a/test/MC/AArch64/seh.s b/test/MC/AArch64/seh.s
new file mode 100644
index 0000000..ca862cb
--- /dev/null
+++ b/test/MC/AArch64/seh.s
@@ -0,0 +1,84 @@
+// This test checks that the SEH directives emit the correct unwind data.
+
+// RUN: llvm-mc -triple aarch64-pc-win32 -filetype=obj %s | llvm-readobj -s -r | FileCheck %s
+
+// CHECK:      Sections [
+// CHECK:        Section {
+// CHECK:          Name: .text
+// CHECK:          RelocationCount: 0
+// CHECK:          Characteristics [
+// CHECK-NEXT:       ALIGN_4BYTES
+// CHECK-NEXT:       CNT_CODE
+// CHECK-NEXT:       MEM_EXECUTE
+// CHECK-NEXT:       MEM_READ
+// CHECK-NEXT:     ]
+// CHECK-NEXT:   }
+// CHECK:        Section {
+// CHECK:          Name: .xdata
+// CHECK:          RawDataSize: 24
+// CHECK:          RelocationCount: 1
+// CHECK:          Characteristics [
+// CHECK-NEXT:       ALIGN_4BYTES
+// CHECK-NEXT:       CNT_INITIALIZED_DATA
+// CHECK-NEXT:       MEM_READ
+// CHECK-NEXT:     ]
+// CHECK-NEXT:   }
+// CHECK:        Section {
+// CHECK:          Name: .pdata
+// CHECK:          RelocationCount: 6
+// CHECK:          Characteristics [
+// CHECK-NEXT:       ALIGN_4BYTES
+// CHECK-NEXT:       CNT_INITIALIZED_DATA
+// CHECK-NEXT:       MEM_READ
+// CHECK-NEXT:     ]
+// CHECK-NEXT:   }
+// CHECK-NEXT: ]
+
+// CHECK-NEXT: Relocations [
+// CHECK-NEXT:   Section (4) .xdata {
+// CHECK-NEXT:     0x8 IMAGE_REL_ARM64_ADDR32NB __C_specific_handler
+// CHECK-NEXT:   }
+// CHECK-NEXT:   Section (5) .pdata {
+// CHECK-NEXT:     0x0 IMAGE_REL_ARM64_ADDR32NB func
+// CHECK-NEXT:     0x4 IMAGE_REL_ARM64_ADDR32NB .xdata
+// CHECK-NEXT:     0x8 IMAGE_REL_ARM64_ADDR32NB func
+// CHECK-NEXT:     0xC IMAGE_REL_ARM64_ADDR32NB .xdata
+// CHECK-NEXT:     0x10 IMAGE_REL_ARM64_ADDR32NB smallFunc
+// CHECK-NEXT:     0x14 IMAGE_REL_ARM64_ADDR32NB .xdata
+// CHECK-NEXT:   }
+// CHECK-NEXT: ]
+
+
+    .text
+    .globl func
+    .def func
+    .scl 2
+    .type 32
+    .endef
+    .seh_proc func
+func:
+    sub sp, sp, #24
+    .seh_stackalloc 24
+    mov x29, sp
+    .seh_endprologue
+    .seh_handler __C_specific_handler, @except
+    .seh_handlerdata
+    .long 0
+    .text
+    .seh_startchained
+    .seh_endprologue
+    .seh_endchained
+    add sp, sp, #24
+    ret
+    .seh_endproc
+
+// Test emission of small functions.
+    .globl smallFunc
+    .def smallFunc
+    .scl 2
+    .type 32
+    .endef
+    .seh_proc smallFunc
+smallFunc:
+    ret
+    .seh_endproc
diff --git a/test/MC/AMDGPU/reloc.s b/test/MC/AMDGPU/reloc.s
index 06c4459..0a349da 100644
--- a/test/MC/AMDGPU/reloc.s
+++ b/test/MC/AMDGPU/reloc.s
@@ -3,14 +3,14 @@
 // CHECK: Relocations [
 // CHECK: .rel.text {
 // CHECK: R_AMDGPU_ABS32_LO SCRATCH_RSRC_DWORD0 0x0
-// CHECK: R_AMDGPU_ABS32_HI SCRATCH_RSRC_DWORD1 0x0
+// CHECK: R_AMDGPU_ABS32_LO SCRATCH_RSRC_DWORD1 0x0
 // CHECK: R_AMDGPU_GOTPCREL global_var0 0x0
 // CHECK: R_AMDGPU_GOTPCREL32_LO global_var1 0x0
 // CHECK: R_AMDGPU_GOTPCREL32_HI global_var2 0x0
 // CHECK: R_AMDGPU_REL32_LO global_var3 0x0
 // CHECK: R_AMDGPU_REL32_HI global_var4 0x0
 // CHECK: R_AMDGPU_ABS32_LO SCRATCH_RSRC_DWORD0 0x0
-// CHECK: R_AMDGPU_ABS32_HI SCRATCH_RSRC_DWORD1 0x0
+// CHECK: R_AMDGPU_ABS32_LO SCRATCH_RSRC_DWORD1 0x0
 // CHECK: R_AMDGPU_GOTPCREL global_var0 0x0
 // CHECK: R_AMDGPU_GOTPCREL32_LO global_var1 0x0
 // CHECK: R_AMDGPU_GOTPCREL32_HI global_var2 0x0
diff --git a/test/MC/ARM/armv8.5a-sb-error-thumb.s b/test/MC/ARM/armv8.5a-sb-error-thumb.s
new file mode 100644
index 0000000..5f88bf6
--- /dev/null
+++ b/test/MC/ARM/armv8.5a-sb-error-thumb.s
@@ -0,0 +1,6 @@
+// RUN: not llvm-mc -triple thumbv8 -show-encoding -mattr=+sb < %s 2>&1 | FileCheck %s
+
+it eq
+sbeq
+
+// CHECK: instruction 'sb' is not predicable, but condition code specified
diff --git a/test/MC/ARM/armv8.5a-sb-error.s b/test/MC/ARM/armv8.5a-sb-error.s
new file mode 100644
index 0000000..917b742
--- /dev/null
+++ b/test/MC/ARM/armv8.5a-sb-error.s
@@ -0,0 +1,5 @@
+// RUN: not llvm-mc -triple armv8 -show-encoding -mattr=+sb < %s 2>&1 | FileCheck %s
+
+sbeq
+
+// CHECK: instruction 'sb' is not predicable
diff --git a/test/MC/ARM/armv8.5a-sb.s b/test/MC/ARM/armv8.5a-sb.s
new file mode 100644
index 0000000..f0c9ee4
--- /dev/null
+++ b/test/MC/ARM/armv8.5a-sb.s
@@ -0,0 +1,15 @@
+// RUN:     llvm-mc -triple armv8   -show-encoding -mattr=+sb < %s      | FileCheck %s
+// RUN:     llvm-mc -triple armv8   -show-encoding -mattr=+v8.5a    < %s      | FileCheck %s
+// RUN: not llvm-mc -triple armv8   -show-encoding -mattr=-sb < %s 2>&1 | FileCheck %s --check-prefix=NOSB
+// RUN:     llvm-mc -triple thumbv8 -show-encoding -mattr=+sb < %s      | FileCheck %s --check-prefix=THUMB
+// RUN:     llvm-mc -triple thumbv8 -show-encoding -mattr=+v8.5a    < %s      | FileCheck %s --check-prefix=THUMB
+// RUN: not llvm-mc -triple thumbv8 -show-encoding -mattr=-sb < %s 2>&1 | FileCheck %s --check-prefix=NOSB
+
+// Flag manipulation
+sb
+
+// CHECK: sb    @ encoding: [0x70,0xf0,0x7f,0xf5]
+// THUMB: sb    @ encoding: [0xbf,0xf3,0x70,0x8f]
+
+// NOSB: instruction requires: sb
+// NOSB-NEXT: sb
diff --git a/test/MC/ARM/armv8.5a-specctrl-error-thumb.s b/test/MC/ARM/armv8.5a-specctrl-error-thumb.s
deleted file mode 100644
index 359aec9..0000000
--- a/test/MC/ARM/armv8.5a-specctrl-error-thumb.s
+++ /dev/null
@@ -1,6 +0,0 @@
-// RUN: not llvm-mc -triple thumbv8 -show-encoding -mattr=+specctrl < %s 2>&1 | FileCheck %s
-
-it eq
-sbeq
-
-// CHECK: instruction 'sb' is not predicable, but condition code specified
diff --git a/test/MC/ARM/armv8.5a-specctrl-error.s b/test/MC/ARM/armv8.5a-specctrl-error.s
deleted file mode 100644
index 5a018df..0000000
--- a/test/MC/ARM/armv8.5a-specctrl-error.s
+++ /dev/null
@@ -1,5 +0,0 @@
-// RUN: not llvm-mc -triple armv8 -show-encoding -mattr=+specctrl < %s 2>&1 | FileCheck %s
-
-sbeq
-
-// CHECK: instruction 'sb' is not predicable
diff --git a/test/MC/ARM/armv8.5a-specctrl.s b/test/MC/ARM/armv8.5a-specctrl.s
deleted file mode 100644
index 2d799e6..0000000
--- a/test/MC/ARM/armv8.5a-specctrl.s
+++ /dev/null
@@ -1,15 +0,0 @@
-// RUN:     llvm-mc -triple armv8   -show-encoding -mattr=+specctrl < %s      | FileCheck %s
-// RUN:     llvm-mc -triple armv8   -show-encoding -mattr=+v8.5a    < %s      | FileCheck %s
-// RUN: not llvm-mc -triple armv8   -show-encoding -mattr=-specctrl < %s 2>&1 | FileCheck %s --check-prefix=NOSB
-// RUN:     llvm-mc -triple thumbv8 -show-encoding -mattr=+specctrl < %s      | FileCheck %s --check-prefix=THUMB
-// RUN:     llvm-mc -triple thumbv8 -show-encoding -mattr=+v8.5a    < %s      | FileCheck %s --check-prefix=THUMB
-// RUN: not llvm-mc -triple thumbv8 -show-encoding -mattr=-specctrl < %s 2>&1 | FileCheck %s --check-prefix=NOSB
-
-// Flag manipulation
-sb
-
-// CHECK: sb    @ encoding: [0x70,0xf0,0x7f,0xf5]
-// THUMB: sb    @ encoding: [0xbf,0xf3,0x70,0x8f]
-
-// NOSB: instruction requires: specctrl
-// NOSB-NEXT: sb
diff --git a/test/MC/AsmParser/floating-literals.s b/test/MC/AsmParser/floating-literals.s
index de0b4ca..9dca77f 100644
--- a/test/MC/AsmParser/floating-literals.s
+++ b/test/MC/AsmParser/floating-literals.s
@@ -10,12 +10,21 @@
 # CHECK: .long	2139095040
 .single InFinIty
 
+# CHECK: .quad	9218868437227405312
+.double infinity
+
 # CHECK: .long	4286578688
 .single -iNf
 
+# CHECK: .quad	-4503599627370496
+.double -inf
+
 # CHECK: .long	2147483647
 .single nAN
 
+# CHECK: .quad	9223372036854775807
+.double NaN
+
 # CHECK: .long  1067928519
 .float 1.307
         
diff --git a/test/MC/AsmParser/negativ_altmacro_expression.s b/test/MC/AsmParser/negativ_altmacro_expression.s
index fabf46c..be8c66e 100644
--- a/test/MC/AsmParser/negativ_altmacro_expression.s
+++ b/test/MC/AsmParser/negativ_altmacro_expression.s
@@ -6,7 +6,7 @@
 # .noaltmacro returns the format into a regular macro handling.
 # The default mode is ".noaltmacro" as first test checks.
 
-# CHECK:  error: unknown token in expression
+# CHECK:  error: expected immediate expression
 # CHECK-NEXT: addl $%(1%4), %eax
 .macro inner_percent arg
     addl $\arg, %eax
@@ -18,12 +18,12 @@
 .noaltmacro
 
 # CHECK: multi_args_macro %(1+4-5) 1 %2+1
-# CHECK: error: unknown token in expression
+# CHECK: error: expected immediate expression
 # CHECK-NEXT: addl $%(1+4-5), %eax
 
 
 # CHECK: multi_args_macro %(1+4-5),1,%4%10
-# CHECK: error: unknown token in expression
+# CHECK: error: expected immediate expression
 # CHECK-NEXT: addl $%(1+4-5), %eax
 .macro multi_args_macro arg1 arg2 arg3
   label\arg1\arg2\arg3:
diff --git a/test/MC/COFF/bigobj.py b/test/MC/COFF/bigobj.py
index 2d61073..7908fb3 100644
--- a/test/MC/COFF/bigobj.py
+++ b/test/MC/COFF/bigobj.py
@@ -1,5 +1,7 @@
 # RUN: python %s | llvm-mc -filetype=obj -triple i686-pc-win32 - | llvm-readobj -h | FileCheck %s
 
+from __future__ import print_function
+
 # This test checks that the COFF object emitter can produce objects with
 # more than 65279 sections.
 
diff --git a/test/MC/COFF/cross-section-relative.s b/test/MC/COFF/cross-section-relative.s
index 6f17292..a9b7e6d 100644
--- a/test/MC/COFF/cross-section-relative.s
+++ b/test/MC/COFF/cross-section-relative.s
@@ -87,21 +87,25 @@
 // READOBJ-NEXT:      Offset: 0x0
 // READOBJ-NEXT:      Type: IMAGE_REL_AMD64_REL32 (4)
 // READOBJ-NEXT:      Symbol: g3
+// READOBJ-NEXT:      SymbolIndex: 12
 // READOBJ-NEXT:    }
 // READOBJ-NEXT:    Relocation {
 // READOBJ-NEXT:      Offset: 0x4
 // READOBJ-NEXT:      Type: IMAGE_REL_AMD64_ADDR32NB (3)
 // READOBJ-NEXT:      Symbol: g3
+// READOBJ-NEXT:      SymbolIndex: 12
 // READOBJ-NEXT:    }
 // READOBJ-NEXT:    Relocation {
 // READOBJ-NEXT:      Offset: 0x20
 // READOBJ-NEXT:      Type: IMAGE_REL_AMD64_REL32 (4)
 // READOBJ-NEXT:      Symbol: g3
+// READOBJ-NEXT:      SymbolIndex: 12
 // READOBJ-NEXT:    }
 // READOBJ-NEXT:    Relocation {
 // READOBJ-NEXT:      Offset: 0x28
 // READOBJ-NEXT:      Type: IMAGE_REL_AMD64_REL32 (4)
 // READOBJ-NEXT:      Symbol: foobar
+// READOBJ-NEXT:      SymbolIndex: 20
 // READOBJ-NEXT:    }
 // READOBJ-NEXT:  }
 // READOBJ-NEXT:]
diff --git a/test/MC/COFF/cv-def-range-align.s b/test/MC/COFF/cv-def-range-align.s
new file mode 100644
index 0000000..57bd3bf
--- /dev/null
+++ b/test/MC/COFF/cv-def-range-align.s
@@ -0,0 +1,161 @@
+# RUN: llvm-mc -triple x86_64-windows-msvc %s -filetype=obj -o %t.o
+# RUN: llvm-pdbutil dump -symbols %t.o | FileCheck %s
+
+# We used to have a label flushing bug down below by the "BUG" comments that
+# would cause the S_DEFRANGE_FRAMEPOINTER_REL records to appear missing. In
+# practice, the label would extend past the def range, so it would appear that
+# every local was optimized out or had no def ranges.
+
+# CHECK: S_GPROC32_ID {{.*}} `max`
+# CHECK: S_LOCAL [size = {{.*}}] `a`
+# CHECK: S_DEFRANGE_FRAMEPOINTER_REL
+# CHECK: S_LOCAL [size = {{.*}}] `b`
+# CHECK: S_DEFRANGE_FRAMEPOINTER_REL
+
+	.text
+	.def	 @feat.00;
+	.scl	3;
+	.type	0;
+	.endef
+	.globl	@feat.00
+.set @feat.00, 0
+	.def	 max;
+	.scl	2;
+	.type	32;
+	.endef
+	.globl	max                     # -- Begin function max
+	.p2align	4, 0x90
+max:                                    # @max
+.Lfunc_begin0:
+	.cv_func_id 0
+	.cv_file	1 "C:\\src\\llvm-project\\build\\t.c" "44649E6EBC4FC8880991A1AF1F2D2990" 1
+	.cv_loc	0 1 1 0                 # t.c:1:0
+.seh_proc max
+# %bb.0:                                # %entry
+	pushq	%rax
+	.seh_stackalloc 8
+	.seh_endprologue
+	movl	%edx, 4(%rsp)
+	movl	%ecx, (%rsp)
+.Ltmp0:
+	.cv_loc	0 1 2 0                 # t.c:2:0
+	movl	(%rsp), %eax
+	cmpl	4(%rsp), %eax
+	jle	.LBB0_2
+# %bb.1:                                # %cond.true
+	movl	(%rsp), %eax
+	jmp	.LBB0_3
+.LBB0_2:                                # %cond.false
+	movl	4(%rsp), %eax
+.LBB0_3:                                # %cond.end
+	popq	%rcx
+	retq
+.Ltmp1:
+.Lfunc_end0:
+	.seh_handlerdata
+	.text
+	.seh_endproc
+                                        # -- End function
+	.section	.debug$S,"dr"
+	.p2align	2
+	.long 4
+	.long	241                     # Symbol subsection for max
+	.long	.Ltmp7-.Ltmp6           # Subsection size
+.Ltmp6:
+	.short	.Ltmp9-.Ltmp8           # Record length
+.Ltmp8:
+	.short	4423                    # Record kind: S_GPROC32_ID
+	.long	0                       # PtrParent
+	.long	0                       # PtrEnd
+	.long	0                       # PtrNext
+	.long	.Lfunc_end0-max         # Code size
+	.long	0                       # Offset after prologue
+	.long	0                       # Offset before epilogue
+	.long	4098                    # Function type index
+	.secrel32	max             # Function section relative address
+	.secidx	max                     # Function section index
+	.byte	0                       # Flags
+	.asciz	"max"                   # Function name
+.Ltmp9:
+	.short	.Ltmp11-.Ltmp10         # Record length
+.Ltmp10:
+	.short	4114                    # Record kind: S_FRAMEPROC
+	.long	8                       # FrameSize
+	.long	0                       # Padding
+	.long	0                       # Offset of padding
+	.long	0                       # Bytes of callee saved registers
+	.long	0                       # Exception handler offset
+	.short	0                       # Exception handler section
+	.long	81920                   # Flags (defines frame register)
+.Ltmp11:
+	.short	.Ltmp13-.Ltmp12         # Record length
+.Ltmp12:
+	.short	4414                    # Record kind: S_LOCAL
+	.long	18                      # TypeIndex
+	.short	1                       # Flags
+	.asciz	"a"
+	# BUG
+	.p2align 2
+.Ltmp13:
+	.cv_def_range	 .Ltmp0 .Ltmp1, "B\021\000\000\000\000"
+	.short	.Ltmp15-.Ltmp14         # Record length
+.Ltmp14:
+	.short	4414                    # Record kind: S_LOCAL
+	.long	18                      # TypeIndex
+	.short	1                       # Flags
+	.asciz	"b"
+	# BUG
+	.p2align 2
+.Ltmp15:
+	.cv_def_range	 .Ltmp0 .Ltmp1, "B\021\004\000\000\000"
+	.short	2                       # Record length
+	.short	4431                    # Record kind: S_PROC_ID_END
+.Ltmp7:
+	.p2align	2
+	.cv_linetable	0, max, .Lfunc_end0
+	.cv_filechecksums               # File index to string table offset subsection
+	.cv_stringtable                 # String table
+	.long	241
+	.long	.Ltmp17-.Ltmp16         # Subsection size
+.Ltmp16:
+.Ltmp17:
+	.p2align	2
+	.section	.debug$T,"dr"
+	.p2align	2
+	.long	4                       # Debug section magic
+	# ArgList (0x1000) {
+	#   TypeLeafKind: LF_ARGLIST (0x1201)
+	#   NumArgs: 2
+	#   Arguments [
+	#     ArgType: long (0x12)
+	#     ArgType: long (0x12)
+	#   ]
+	# }
+	.byte	0x0e, 0x00, 0x01, 0x12
+	.byte	0x02, 0x00, 0x00, 0x00
+	.byte	0x12, 0x00, 0x00, 0x00
+	.byte	0x12, 0x00, 0x00, 0x00
+	# Procedure (0x1001) {
+	#   TypeLeafKind: LF_PROCEDURE (0x1008)
+	#   ReturnType: long (0x12)
+	#   CallingConvention: NearC (0x0)
+	#   FunctionOptions [ (0x0)
+	#   ]
+	#   NumParameters: 2
+	#   ArgListType: (long, long) (0x1000)
+	# }
+	.byte	0x0e, 0x00, 0x08, 0x10
+	.byte	0x12, 0x00, 0x00, 0x00
+	.byte	0x00, 0x00, 0x02, 0x00
+	.byte	0x00, 0x10, 0x00, 0x00
+	# FuncId (0x1002) {
+	#   TypeLeafKind: LF_FUNC_ID (0x1601)
+	#   ParentScope: 0x0
+	#   FunctionType: long (long, long) (0x1001)
+	#   Name: max
+	# }
+	.byte	0x0e, 0x00, 0x01, 0x16
+	.byte	0x00, 0x00, 0x00, 0x00
+	.byte	0x01, 0x10, 0x00, 0x00
+	.byte	0x6d, 0x61, 0x78, 0x00
+
diff --git a/test/MC/COFF/file.s b/test/MC/COFF/file.s
index a18a1f4..dbe3b02 100644
--- a/test/MC/COFF/file.s
+++ b/test/MC/COFF/file.s
@@ -1,6 +1,11 @@
 // RUN: llvm-mc -triple i686-windows -filetype obj %s -o - | llvm-objdump -t - \
 // RUN:   | FileCheck %s
 
+// Round trip through .s output to exercise MCAsmStreamer.
+// RUN: llvm-mc -triple i686-windows %s -o - \
+// RUN:   | llvm-mc -triple i686-windows - -filetype=obj -o - | llvm-objdump -t - \
+// RUN:   | FileCheck %s
+
 // RUN: llvm-mc -triple i686-windows -filetype obj %s -o - \
 // RUN:	  | llvm-readobj -symbols | FileCheck %s -check-prefix CHECK-SCN
 
diff --git a/test/MC/Disassembler/AArch64/armv8.5a-predctrl.txt b/test/MC/Disassembler/AArch64/armv8.5a-predctrl.txt
deleted file mode 100644
index ecfdeec..0000000
--- a/test/MC/Disassembler/AArch64/armv8.5a-predctrl.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-# RUN: llvm-mc -triple=aarch64 -mattr=+predctrl -disassemble < %s      | FileCheck %s
-# RUN: llvm-mc -triple=aarch64 -mattr=+v8.5a    -disassemble < %s      | FileCheck %s
-# RUN: llvm-mc -triple=aarch64 -mattr=-predctrl -disassemble < %s 2>&1 | FileCheck %s --check-prefix=NOSB
-
-[0x80 0x73 0x0b 0xd5]
-[0xa1 0x73 0x0b 0xd5]
-[0xe2 0x73 0x0b 0xd5]
-
-# CHECK: cfp rctx, x0
-# CHECK: dvp rctx, x1
-# CHECK: cpp rctx, x2
-
-# NOSB: sys #3, c7, c3, #4, x0
-# NOSB: sys #3, c7, c3, #5, x1
-# NOSB: sys #3, c7, c3, #7, x2
diff --git a/test/MC/Disassembler/AArch64/armv8.5a-predres.txt b/test/MC/Disassembler/AArch64/armv8.5a-predres.txt
new file mode 100644
index 0000000..5d4e073
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/armv8.5a-predres.txt
@@ -0,0 +1,15 @@
+# RUN: llvm-mc -triple=aarch64 -mattr=+predres -disassemble < %s      | FileCheck %s
+# RUN: llvm-mc -triple=aarch64 -mattr=+v8.5a    -disassemble < %s      | FileCheck %s
+# RUN: llvm-mc -triple=aarch64 -mattr=-predres -disassemble < %s 2>&1 | FileCheck %s --check-prefix=NOSB
+
+[0x80 0x73 0x0b 0xd5]
+[0xa1 0x73 0x0b 0xd5]
+[0xe2 0x73 0x0b 0xd5]
+
+# CHECK: cfp rctx, x0
+# CHECK: dvp rctx, x1
+# CHECK: cpp rctx, x2
+
+# NOSB: sys #3, c7, c3, #4, x0
+# NOSB: sys #3, c7, c3, #5, x1
+# NOSB: sys #3, c7, c3, #7, x2
diff --git a/test/MC/Disassembler/AArch64/armv8.5a-sb.txt b/test/MC/Disassembler/AArch64/armv8.5a-sb.txt
new file mode 100644
index 0000000..25f32d4
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/armv8.5a-sb.txt
@@ -0,0 +1,9 @@
+# RUN: llvm-mc -triple=aarch64-none-linux-gnu -mattr=+sb -disassemble < %s      | FileCheck %s
+# RUN: llvm-mc -triple=aarch64-none-linux-gnu -mattr=+v8.5a    -disassemble < %s      | FileCheck %s
+# RUN: llvm-mc -triple=aarch64-none-linux-gnu -mattr=-sb -disassemble < %s 2>&1 | FileCheck %s --check-prefix=NOSB
+
+# New reg
+0xff 0x30 0x03 0xd5
+
+# CHECK: sb
+# NOSB:  msr S0_3_C3_C0_7, xzr
diff --git a/test/MC/Disassembler/AArch64/armv8.5a-specctrl.txt b/test/MC/Disassembler/AArch64/armv8.5a-specctrl.txt
deleted file mode 100644
index d2174ae..0000000
--- a/test/MC/Disassembler/AArch64/armv8.5a-specctrl.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# RUN: llvm-mc -triple=aarch64-none-linux-gnu -mattr=+specctrl -disassemble < %s      | FileCheck %s
-# RUN: llvm-mc -triple=aarch64-none-linux-gnu -mattr=+v8.5a    -disassemble < %s      | FileCheck %s
-# RUN: llvm-mc -triple=aarch64-none-linux-gnu -mattr=-specctrl -disassemble < %s 2>&1 | FileCheck %s --check-prefix=NOSB
-
-# New reg
-0xff 0x30 0x03 0xd5
-
-# CHECK: sb
-# NOSB:  msr S0_3_C3_C0_7, xzr
diff --git a/test/MC/Disassembler/ARM/armv8.5a-sb-thumb.txt b/test/MC/Disassembler/ARM/armv8.5a-sb-thumb.txt
new file mode 100644
index 0000000..f2389f1
--- /dev/null
+++ b/test/MC/Disassembler/ARM/armv8.5a-sb-thumb.txt
@@ -0,0 +1,9 @@
+# RUN: llvm-mc -triple=thumbv8 -mattr=+sb -disassemble < %s      | FileCheck %s
+# RUN: llvm-mc -triple=thumbv8 -mattr=+v8.5a    -disassemble < %s      | FileCheck %s
+# RUN: llvm-mc -triple=thumbv8 -mattr=-sb -disassemble < %s 2>&1 | FileCheck %s --check-prefix=NOSB
+
+0xbf 0xf3 0x70 0x8f
+
+# CHECK: sb
+# NOSB: invalid instruction encoding
+# NOSB-NEXT: 0xbf 0xf3 0x70 0x8f
diff --git a/test/MC/Disassembler/ARM/armv8.5a-sb.txt b/test/MC/Disassembler/ARM/armv8.5a-sb.txt
new file mode 100644
index 0000000..40c9478
--- /dev/null
+++ b/test/MC/Disassembler/ARM/armv8.5a-sb.txt
@@ -0,0 +1,9 @@
+# RUN: llvm-mc -triple=armv8 -mattr=+sb -disassemble < %s      | FileCheck %s
+# RUN: llvm-mc -triple=armv8 -mattr=+v8.5a    -disassemble < %s      | FileCheck %s
+# RUN: llvm-mc -triple=armv8 -mattr=-sb -disassemble < %s 2>&1 | FileCheck %s --check-prefix=NOSB
+
+0x70 0xf0 0x7f 0xf5
+
+# CHECK:    sb
+# NOSB: invalid instruction encoding
+# NOSB-NEXT: 0x70 0xf0 0x7f 0xf5
diff --git a/test/MC/Disassembler/ARM/armv8.5a-specctrl-thumb.txt b/test/MC/Disassembler/ARM/armv8.5a-specctrl-thumb.txt
deleted file mode 100644
index 5703408..0000000
--- a/test/MC/Disassembler/ARM/armv8.5a-specctrl-thumb.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# RUN: llvm-mc -triple=thumbv8 -mattr=+specctrl -disassemble < %s      | FileCheck %s
-# RUN: llvm-mc -triple=thumbv8 -mattr=+v8.5a    -disassemble < %s      | FileCheck %s
-# RUN: llvm-mc -triple=thumbv8 -mattr=-specctrl -disassemble < %s 2>&1 | FileCheck %s --check-prefix=NOSB
-
-0xbf 0xf3 0x70 0x8f
-
-# CHECK: sb
-# NOSB: invalid instruction encoding
-# NOSB-NEXT: 0xbf 0xf3 0x70 0x8f
diff --git a/test/MC/Disassembler/ARM/armv8.5a-specctrl.txt b/test/MC/Disassembler/ARM/armv8.5a-specctrl.txt
deleted file mode 100644
index f9d8b53..0000000
--- a/test/MC/Disassembler/ARM/armv8.5a-specctrl.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# RUN: llvm-mc -triple=armv8 -mattr=+specctrl -disassemble < %s      | FileCheck %s
-# RUN: llvm-mc -triple=armv8 -mattr=+v8.5a    -disassemble < %s      | FileCheck %s
-# RUN: llvm-mc -triple=armv8 -mattr=-specctrl -disassemble < %s 2>&1 | FileCheck %s --check-prefix=NOSB
-
-0x70 0xf0 0x7f 0xf5
-
-# CHECK:    sb
-# NOSB: invalid instruction encoding
-# NOSB-NEXT: 0x70 0xf0 0x7f 0xf5
diff --git a/test/MC/Disassembler/MSP430/msp430.txt b/test/MC/Disassembler/MSP430/msp430.txt
index c7d6ff5..8e06f9d 100644
--- a/test/MC/Disassembler/MSP430/msp430.txt
+++ b/test/MC/Disassembler/MSP430/msp430.txt
@@ -19,9 +19,10 @@
 0x1f 0x40 0x2a 0x00           # CHECK: mov 42, r15
 0xb0 0x12 0x81 0x01           # CHECK: call #385
 0x97 0x12 0x06 0x00           # CHECK: call 6(r7)
-0xa7 0xb2 0x02 0x00           # CHECK: bit #34, 2(r7)
+0xa7 0xb2 0x02 0x00           # CHECK: bit #4, 2(r7)
 0xa9 0x57 0x08 0x00           # CHECK: add @r7, 8(r9)
 0xb7 0xe7 0xfe 0xff           # CHECK: xor @r7+, -2(r7)
 
 0xbf 0x40 0x2a 0x00 0x0c 0x00 # CHECK: mov #42, 12(r15)
+0xb7 0xb0 0x22 0x00 0x02 0x00 # CHECK: bit #34, 2(r7)
 0x9a 0xb9 0x10 0x00 0x08 0x00 # CHECK: bit 16(r9), 8(r10)
diff --git a/test/MC/Disassembler/MSP430/unknown.txt b/test/MC/Disassembler/MSP430/unknown.txt
new file mode 100644
index 0000000..d7e680b
--- /dev/null
+++ b/test/MC/Disassembler/MSP430/unknown.txt
@@ -0,0 +1,13 @@
+# RUN: not llvm-mc -disassemble -triple=msp430 %s 2>&1 | FileCheck %s
+
+# This should not decode as 'and.b @r15+, (0)r1' [0xf1,0xff,0x00,0x00]
+[0xf1 0xff]
+# CHECK: warning: invalid instruction encoding
+
+# This should not decode as 'add 6(r7), 6(r5)' [0x95 0x57 0x06 0x00 0x06 0x00]
+[0x95 0x57 0x06 0x00]
+# CHECK: warning: invalid instruction encoding
+
+# This should not decode as 'call 6(r7)' [0x97 0x12 0x06 0x00]
+[0x97 0x12]
+# CHECK: warning: invalid instruction encoding
diff --git a/test/MC/Disassembler/WebAssembly/wasm-error.txt b/test/MC/Disassembler/WebAssembly/wasm-error.txt
new file mode 100644
index 0000000..6468a4f
--- /dev/null
+++ b/test/MC/Disassembler/WebAssembly/wasm-error.txt
@@ -0,0 +1,9 @@
+# RUN: llvm-mc --disassemble %s -triple=wasm32-unknown-unknown | FileCheck %s
+
+# CHECK: .text
+
+# CHECK: block invalid_type
+0x02 0x00
+
+# CHECK: br 16 # Invalid depth argument!
+0x0C 0x10
diff --git a/test/MC/Disassembler/WebAssembly/wasm.txt b/test/MC/Disassembler/WebAssembly/wasm.txt
index fd21db9..2632f23 100644
--- a/test/MC/Disassembler/WebAssembly/wasm.txt
+++ b/test/MC/Disassembler/WebAssembly/wasm.txt
@@ -14,21 +14,19 @@
 # CHECK: i64.load32_u 16:p2align=1
 0x35 0x01 0x10
 
-# CHECK: block
-# 3
-# FIXME: WebAssemblyInstPrinter does not currently print block number.
-0x02 0x03
+# CHECK: block f64
+0x02 0x7C
 
 # CHECK: call_indirect
 # $0=, 128, 0
 # FIXME: WebAssemblyInstPrinter does not print immediates.
 0x11 0x80 0x01 0x00
 
-# CHECK: get_local 128
+# CHECK: local.get 128
 0x20 0x80 0x01
 
 # Prefix byte example:
-# CHECK: i64.trunc_u:sat/f64
+# CHECK: i64.trunc_sat_f64_u
 0xFC 0x07
 
 # v128.const is arbitrarily disassembled as v16i8
@@ -46,3 +44,7 @@
 # CHECK: i64x2.any_true
 # CHECK-NOT: i64.div_u
 0xFD 0x85 0x81 0x80 0x80 0x80 0x80 0x00
+
+# Check br_table, which has its own operand type.
+# CHECK: br_table {0, 1, 2}
+0x0E 0x02 0x00 0x01 0x02
diff --git a/test/MC/ELF/ARM/clang-section.s b/test/MC/ELF/ARM/clang-section.s
index 73bae69..0018cfe 100644
--- a/test/MC/ELF/ARM/clang-section.s
+++ b/test/MC/ELF/ARM/clang-section.s
@@ -220,7 +220,7 @@
 	.size	p, 4
 
 
-	.ident	"clang version 5.0.0 (http://llvm.org/git/clang.git 254242a3ad440307fb451093a429c71ea9a8c888) (http://llvm.org/git/llvm.git 3c8daefbe3d1672ac1dae775b211f881f0063038)"
+	.ident	"clang version 5.0.0"
 	.section	".note.GNU-stack","",%progbits
 	.eabi_attribute	30, 1	@ Tag_ABI_optimization_goals
 
diff --git a/test/MC/ELF/cfi-b-key-frame.s b/test/MC/ELF/cfi-b-key-frame.s
new file mode 100644
index 0000000..95d5146
--- /dev/null
+++ b/test/MC/ELF/cfi-b-key-frame.s
@@ -0,0 +1,7 @@
+// REQUIRES: aarch64-registered-target
+// RUN: llvm-mc -filetype=obj -triple aarch64-arm-none-eabi %s -o - | llvm-dwarfdump - -v | FileCheck %s
+#CHECK:   Augmentation:          "zRB"
+f1:
+        .cfi_startproc
+        .cfi_b_key_frame
+        .cfi_endproc
diff --git a/test/MC/MSP430/addrmode.s b/test/MC/MSP430/addrmode.s
index 46051c0..c787e68 100644
--- a/test/MC/MSP430/addrmode.s
+++ b/test/MC/MSP430/addrmode.s
@@ -21,11 +21,13 @@
   mov #42, 12(r15)
   mov #42, &disp
   mov disp, disp+2
+  mov r7, @r15
 
 ; CHECK: mov #42, r15          ; encoding: [0x3f,0x40,0x2a,0x00]
 ; CHECK: mov #42, 12(r15)      ; encoding: [0xbf,0x40,0x2a,0x00,0x0c,0x00]
 ; CHECK: mov #42, &disp        ; encoding: [0xb2,0x40,0x2a,0x00,A,A]
 ; CHECK: mov disp, disp+2      ; encoding: [0x90,0x40,A,A,B,B]
+; CHECK: mov r7, 0(r15)        ; encoding: [0x8f,0x47,0x00,0x00]
 
   add r7, r8
   add 6(r7), r8
@@ -93,6 +95,8 @@
 
   call r7
   call 6(r7)
+  call @r7
+  call @r7+
   call disp+6(r7)
   call &disp
   call disp
@@ -100,11 +104,61 @@
 
 ; CHECK: call r7               ; encoding: [0x87,0x12]
 ; CHECK: call 6(r7)            ; encoding: [0x97,0x12,0x06,0x00]
+; CHECK: call @r7              ; encoding: [0xa7,0x12]
+; CHECK: call @r7+             ; encoding: [0xb7,0x12]
 ; CHECK: call disp+6(r7)       ; encoding: [0x97,0x12,A,A]
 ; CHECK: call &disp            ; encoding: [0x92,0x12,A,A]
 ; CHECK: call disp             ; encoding: [0x90,0x12,A,A]
 ; CHECK: call #disp            ; encoding: [0xb0,0x12,A,A]
 
+  rra r7      ; CHECK: rra r7                ; encoding: [0x07,0x11]
+  rra 2(r7)   ; CHECK: rra 2(r7)             ; encoding: [0x17,0x11,0x02,0x00]
+  rra @r7     ; CHECK: rra @r7               ; encoding: [0x27,0x11]
+  rra @r7+    ; CHECK: rra @r7+              ; encoding: [0x37,0x11]
+
+  rrc r7      ; CHECK: rrc r7                ; encoding: [0x07,0x10]
+  rrc 2(r7)   ; CHECK: rrc 2(r7)             ; encoding: [0x17,0x10,0x02,0x00]
+  rrc @r7     ; CHECK: rrc @r7               ; encoding: [0x27,0x10]
+  rrc @r7+    ; CHECK: rrc @r7+              ; encoding: [0x37,0x10]
+
+  swpb r7     ; CHECK: swpb r7               ; encoding: [0x87,0x10]
+  swpb 2(r7)  ; CHECK: swpb 2(r7)            ; encoding: [0x97,0x10,0x02,0x00]
+  swpb @r7    ; CHECK: swpb @r7              ; encoding: [0xa7,0x10]
+  swpb @r7+   ; CHECK: swpb @r7+             ; encoding: [0xb7,0x10]
+
+  sxt r7      ; CHECK: sxt r7                ; encoding: [0x87,0x11]
+  sxt 2(r7)   ; CHECK: sxt 2(r7)             ; encoding: [0x97,0x11,0x02,0x00]
+  sxt @r7     ; CHECK: sxt @r7               ; encoding: [0xa7,0x11]
+  sxt @r7+    ; CHECK: sxt @r7+              ; encoding: [0xb7,0x11]
+
+  cmp r5, r7        ; CHECK: cmp r5, r7        ; encoding: [0x07,0x95]
+  cmp 2(r5), r7     ; CHECK: cmp 2(r5), r7     ; encoding: [0x17,0x95,0x02,0x00]
+  cmp #-1, r7       ; CHECK: cmp #-1, r7       ; encoding: [0x37,0x93]
+  cmp #42, r7       ; CHECK: cmp #42, r7       ; encoding: [0x37,0x90,0x2a,0x00]
+  cmp @r5, r7       ; CHECK: cmp @r5, r7       ; encoding: [0x27,0x95]
+  cmp @r5+, r7      ; CHECK: cmp @r5+, r7      ; encoding: [0x37,0x95]
+
+  cmp r5, 2(r7)     ; CHECK: cmp r5, 2(r7)     ; encoding: [0x87,0x95,0x02,0x00]
+  cmp 2(r7), 2(r7)  ; CHECK: cmp 2(r7), 2(r7)  ; encoding: [0x97,0x97,0x02,0x00,0x02,0x00]
+  cmp #-1, 2(r7)    ; CHECK: cmp #-1, 2(r7)    ; encoding: [0xb7,0x93,0x02,0x00]
+  cmp #42, 2(r7)    ; CHECK: cmp #42, 2(r7)    ; encoding: [0xb7,0x90,0x2a,0x00,0x02,0x00]
+  cmp @r5, 2(r7)    ; CHECK: cmp @r5, 2(r7)    ; encoding: [0xa7,0x95,0x02,0x00]
+  cmp @r5+, 2(r7)   ; CHECK: cmp @r5+, 2(r7)   ; encoding: [0xb7,0x95,0x02,0x00]
+
+  bit r5, r7        ; CHECK: bit r5, r7        ; encoding: [0x07,0xb5]
+  bit 2(r5), r7     ; CHECK: bit 2(r5), r7     ; encoding: [0x17,0xb5,0x02,0x00]
+  bit #-1, r7       ; CHECK: bit #-1, r7       ; encoding: [0x37,0xb3]
+  bit #42, r7       ; CHECK: bit #42, r7       ; encoding: [0x37,0xb0,0x2a,0x00]
+  bit @r5, r7       ; CHECK: bit @r5, r7       ; encoding: [0x27,0xb5]
+  bit @r5+, r7      ; CHECK: bit @r5+, r7      ; encoding: [0x37,0xb5]
+
+  bit r5, 2(r7)     ; CHECK: bit r5, 2(r7)     ; encoding: [0x87,0xb5,0x02,0x00]
+  bit 2(r7), 2(r7)  ; CHECK: bit 2(r7), 2(r7)  ; encoding: [0x97,0xb7,0x02,0x00,0x02,0x00]
+  bit #-1, 2(r7)    ; CHECK: bit #-1, 2(r7)    ; encoding: [0xb7,0xb3,0x02,0x00]
+  bit #42, 2(r7)    ; CHECK: bit #42, 2(r7)    ; encoding: [0xb7,0xb0,0x2a,0x00,0x02,0x00]
+  bit @r5, 2(r7)    ; CHECK: bit @r5, 2(r7)    ; encoding: [0xa7,0xb5,0x02,0x00]
+  bit @r5+, 2(r7)   ; CHECK: bit @r5+, 2(r7)   ; encoding: [0xb7,0xb5,0x02,0x00]
+
 disp:
   .word 0xcafe
   .word 0xbabe
diff --git a/test/MC/MSP430/const.s b/test/MC/MSP430/const.s
index f5cca10..dfaf32f 100644
--- a/test/MC/MSP430/const.s
+++ b/test/MC/MSP430/const.s
@@ -1,10 +1,13 @@
 ; RUN: llvm-mc -triple msp430 -show-encoding < %s | FileCheck %s
-  mov #4, r15 ; CHECK: mov #4, r15 ; encoding: [0x2f,0x42]
-  mov #8, r15 ; CHECK: mov #8, r15 ; encoding: [0x3f,0x42]
-  mov #0, r15 ; CHECK: clr r15     ; encoding: [0x0f,0x43]
-  mov #1, r15 ; CHECK: mov #1, r15 ; encoding: [0x1f,0x43]
-  mov #2, r15 ; CHECK: mov #2, r15 ; encoding: [0x2f,0x43]
-  mov #-1, r7 ; CHECK: mov #-1, r7 ; encoding: [0x37,0x43]
 
-  push #8     ; CHECK: push #8     ; encoding: [0x32,0x12]
-  push #42    ; CHECK: push #42    ; encoding: [0x30,0x12,0x2a,0x00]
+  mov #4, r15   ; CHECK: mov #4, r15   ; encoding: [0x2f,0x42]
+  mov #8, r15   ; CHECK: mov #8, r15   ; encoding: [0x3f,0x42]
+  mov #0, r15   ; CHECK: clr r15       ; encoding: [0x0f,0x43]
+  mov #1, r15   ; CHECK: mov #1, r15   ; encoding: [0x1f,0x43]
+  mov #2, r15   ; CHECK: mov #2, r15   ; encoding: [0x2f,0x43]
+  mov #-1, r7   ; CHECK: mov #-1, r7   ; encoding: [0x37,0x43]
+
+  push #8       ; CHECK: push #8       ; encoding: [0x32,0x12]
+  push #42      ; CHECK: push #42      ; encoding: [0x30,0x12,0x2a,0x00]
+
+  bit #1, 0(r7) ; CHECK: bit #1, 0(r7) ; encoding: [0x97,0xb3,0x00,0x00]
diff --git a/test/MC/MSP430/invalid.s b/test/MC/MSP430/invalid.s
index 2815b52..ce686bd 100644
--- a/test/MC/MSP430/invalid.s
+++ b/test/MC/MSP430/invalid.s
@@ -4,7 +4,6 @@
   mov    r7        ; CHECK: :[[@LINE]]:3: error: too few operands for instruction
 
   ;; invalid destination addressing modes
-  mov    r7, @r15  ; CHECK: :[[@LINE]]:14: error: invalid operand for instruction
   mov    r7, @r15+ ; CHECK: :[[@LINE]]:14: error: invalid operand for instruction
   mov    r7, #0    ; CHECK: :[[@LINE]]:14: error: invalid operand for instruction
   mov    r7, #123  ; CHECK: :[[@LINE]]:14: error: invalid operand for instruction
diff --git a/test/MC/MSP430/msp430-separator.s b/test/MC/MSP430/msp430-separator.s
new file mode 100644
index 0000000..498e86e
--- /dev/null
+++ b/test/MC/MSP430/msp430-separator.s
@@ -0,0 +1,15 @@
+; RUN: llvm-mc -triple msp430 < %s | FileCheck %s
+
+; MSP430 supports multiple assembly statements on the same line
+; separated by a '{' character.
+
+; Check that the '{' is recognized as a line separator and
+; multiple statements correctly parsed.
+
+_foo:
+; CHECK:      foo
+; CHECK:      add r10, r11
+; CHECK-NEXT: call r11
+; CHECK-NEXT: mov r11, 2(r1)
+add r10, r11 { call r11 { mov r11, 2(r1)
+ret
diff --git a/test/MC/MachO/file-single.s b/test/MC/MachO/file-single.s
new file mode 100644
index 0000000..747af22
--- /dev/null
+++ b/test/MC/MachO/file-single.s
@@ -0,0 +1,8 @@
+// RUN: not llvm-mc -triple i386-apple-darwin9 %s -o /dev/null 2>&1 | FileCheck %s
+
+// Previously this crashed MC.
+
+// CHECK: error: target does not support '.file' without a number
+
+        .file "dir/foo"
+        nop
diff --git a/test/MC/Mips/cprestore-noreorder-noat.s b/test/MC/Mips/cprestore-noreorder-noat.s
index 24c99d4..cba299e 100644
--- a/test/MC/Mips/cprestore-noreorder-noat.s
+++ b/test/MC/Mips/cprestore-noreorder-noat.s
@@ -1,6 +1,6 @@
 # RUN: not llvm-mc %s -triple mips-unknown-linux-gnu -mcpu=mips32 \
 # RUN:                --position-independent -filetype=obj -o /dev/null 2>&1 \
-# RUN;  | FileCheck %s -check-prefix=O32
+# RUN:  | FileCheck %s -check-prefix=O32
 
 # RUN: llvm-mc %s -triple mips64-unknown-linux-gnu -filetype=obj \
 # RUN:            -o /dev/null 2>&1 \
diff --git a/test/MC/Mips/cpsetup.s b/test/MC/Mips/cpsetup.s
index 907e4fe..c963df0 100644
--- a/test/MC/Mips/cpsetup.s
+++ b/test/MC/Mips/cpsetup.s
@@ -1,5 +1,5 @@
 # RUN: llvm-mc -triple mips-unknown-linux -target-abi o32 -filetype=obj -o - %s | \
-# RUN:   llvm-objdump -d -r - | FileCheck -check-prefixes=ALL,O32 %s
+# RUN:   llvm-objdump -d -r -z - | FileCheck -check-prefixes=ALL,O32 %s
 
 # RUN: llvm-mc -triple mips-unknown-linux -target-abi o32 %s | \
 # RUN:   FileCheck -check-prefixes=ALL,ASM,ASM-O32 %s
@@ -7,14 +7,14 @@
 # FIXME: Now we check .cpsetup expansion for `-mno-shared` case only.
 #        We also need to implement/check the `-mshared` case.
 # RUN: llvm-mc -triple mips64-unknown-linux -target-abi n32 -filetype=obj -o - %s | \
-# RUN:   llvm-objdump -d -r - | \
+# RUN:   llvm-objdump -d -r -z - | \
 # RUN:   FileCheck -check-prefixes=ALL,NXX,N32 %s
 
 # RUN: llvm-mc -triple mips64-unknown-linux -target-abi n32 %s | \
 # RUN:   FileCheck -check-prefixes=ALL,ASM,ASM-N32 %s
 
 # RUN: llvm-mc -triple mips64-unknown-linux %s -filetype=obj -o - | \
-# RUN:   llvm-objdump -d -r - | \
+# RUN:   llvm-objdump -d -r -z - | \
 # RUN:   FileCheck -check-prefixes=ALL,NXX,N64 %s
 
 # RUN: llvm-mc -triple mips64-unknown-linux %s | \
diff --git a/test/MC/Mips/micromips-branch-fixup.s b/test/MC/Mips/micromips-branch-fixup.s
index 98b4842..3ad625f 100644
--- a/test/MC/Mips/micromips-branch-fixup.s
+++ b/test/MC/Mips/micromips-branch-fixup.s
@@ -24,29 +24,23 @@
 # CHECK-FIXUP:                     value: bar, kind: fixup_MICROMIPS_PC16_S1
 # CHECK-FIXUP: nop             # encoding: [0x00,0x0c]
 # CHECK-FIXUP: beq $3, $4, bar # encoding: [0x83'A',0x94'A',0x00,0x00]
-# CHECK-FIXUP:                 #   fixup A - offset: 0,
-# CHECK-FIXUP:                     value: bar, kind: fixup_MICROMIPS_PC16_S1
-# CHECK-FIXUP: nop             # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-FIXUP:                 #   fixup A - offset: 0, value: bar, kind: fixup_MICROMIPS_PC16_S1
+# CHECK-FIXUP: nop             # encoding: [0x00,0x0c]
 # CHECK-FIXUP: bne $3, $4, bar # encoding: [0x83'A',0xb4'A',0x00,0x00]
-# CHECK-FIXUP:                 #   fixup A - offset: 0,
-# CHECK-FIXUP:                     value: bar, kind: fixup_MICROMIPS_PC16_S1
-# CHECK-FIXUP: nop             # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-FIXUP:                 #   fixup A - offset: 0, value: bar, kind: fixup_MICROMIPS_PC16_S1
+# CHECK-FIXUP: nop             # encoding: [0x00,0x0c]
 # CHECK-FIXUP: bgez    $4, bar # encoding: [0x44'A',0x40'A',0x00,0x00]
-# CHECK-FIXUP:                 #   fixup A - offset: 0,
-# CHECK-FIXUP:                     value: bar, kind: fixup_MICROMIPS_PC16_S1
-# CHECK-FIXUP: nop             # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-FIXUP-NEXT:                 #   fixup A - offset: 0, value: bar, kind: fixup_MICROMIPS_PC16_S1
+# CHECK-FIXUP-NEXT: nop             # encoding: [0x00,0x0c]
 # CHECK-FIXUP: bgtz    $4, bar # encoding: [0xc4'A',0x40'A',0x00,0x00]
-# CHECK-FIXUP:                 #   fixup A - offset: 0,
-# CHECK-FIXUP:                     value: bar, kind: fixup_MICROMIPS_PC16_S1
-# CHECK-FIXUP: nop             # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-FIXUP-NEXT:                 #   fixup A - offset: 0, value: bar, kind: fixup_MICROMIPS_PC16_S1
+# CHECK-FIXUP-NEXT: nop             # encoding: [0x00,0x0c]
 # CHECK-FIXUP: blez    $4, bar # encoding: [0x84'A',0x40'A',0x00,0x00]
-# CHECK-FIXUP:                 #   fixup A - offset: 0,
-# CHECK-FIXUP:                     value: bar, kind: fixup_MICROMIPS_PC16_S1
-# CHECK-FIXUP: nop             # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-FIXUP-NEXT:                 #   fixup A - offset: 0, value: bar, kind: fixup_MICROMIPS_PC16_S1
+# CHECK-FIXUP-NEXT: nop             # encoding: [0x00,0x0c]
 # CHECK-FIXUP: bltz    $4, bar # encoding: [0x04'A',0x40'A',0x00,0x00]
-# CHECK-FIXUP:                 #   fixup A - offset: 0,
-# CHECK-FIXUP:                     value: bar, kind: fixup_MICROMIPS_PC16_S1
-# CHECK-FIXUP: nop             # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-FIXUP-NEXT:                 #   fixup A - offset: 0, value: bar, kind: fixup_MICROMIPS_PC16_S1
+# CHECK-FIXUP-NEXT: nop             # encoding: [0x00,0x0c]
 # CHECK-FIXUP: bgezal  $4, bar # encoding: [0x64'A',0x40'A',0x00,0x00]
 # CHECK-FIXUP:                 #   fixup A - offset: 0,
 # CHECK-FIXUP:                     value: bar, kind: fixup_MICROMIPS_PC16_S1
diff --git a/test/MC/Mips/micromips-branch-instructions.s b/test/MC/Mips/micromips-branch-instructions.s
index e85b925..d82d5ad 100644
--- a/test/MC/Mips/micromips-branch-instructions.s
+++ b/test/MC/Mips/micromips-branch-instructions.s
@@ -12,23 +12,23 @@
 # CHECK-EL: b   1332             # encoding: [0x00,0x94,0x9a,0x02]
 # CHECK-EL: nop                  # encoding: [0x00,0x0c]
 # CHECK-EL: beq $9, $6, 1332     # encoding: [0xc9,0x94,0x9a,0x02]
-# CHECK-EL: nop                  # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EL-NEXT: nop                  # encoding: [0x00,0x0c]
 # CHECK-EL: bgez $6, 1332        # encoding: [0x46,0x40,0x9a,0x02]
-# CHECK-EL: nop                  # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EL-NEXT: nop                  # encoding: [0x00,0x0c]
 # CHECK-EL: bgezal $6, 1332      # encoding: [0x66,0x40,0x9a,0x02]
 # CHECK-EL: nop                  # encoding: [0x00,0x00,0x00,0x00]
 # CHECK-EL: bltzal $6, 1332      # encoding: [0x26,0x40,0x9a,0x02]
 # CHECK-EL: nop                  # encoding: [0x00,0x00,0x00,0x00]
 # CHECK-EL: bgtz $6, 1332        # encoding: [0xc6,0x40,0x9a,0x02]
-# CHECK-EL: nop                  # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EL-NEXT: nop                  # encoding: [0x00,0x0c]
 # CHECK-EL: blez $6, 1332        # encoding: [0x86,0x40,0x9a,0x02]
-# CHECK-EL: nop                  # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EL-NEXT: nop                  # encoding: [0x00,0x0c]
 # CHECK-EL: bne $9, $6, 1332     # encoding: [0xc9,0xb4,0x9a,0x02]
-# CHECK-EL: nop                  # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EL-NEXT: nop                  # encoding: [0x00,0x0c]
 # CHECK-EL: bal 1332             # encoding: [0x60,0x40,0x9a,0x02]
 # CHECK-EL: nop                  # encoding: [0x00,0x00,0x00,0x00]
 # CHECK-EL: bltz $6, 1332        # encoding: [0x06,0x40,0x9a,0x02]
-# CHECK-EL: nop                  # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EL-NEXT: nop                  # encoding: [0x00,0x0c]
 # CHECK-EL: bgezals $6, 1332     # encoding: [0x66,0x42,0x9a,0x02]
 # CHECK-EL: nop                  # encoding: [0x00,0x0c]
 # CHECK-EL: bltzals $6, 1332     # encoding: [0x26,0x42,0x9a,0x02]
@@ -39,23 +39,23 @@
 # CHECK-EB: b   1332             # encoding: [0x94,0x00,0x02,0x9a]
 # CHECK-EB: nop                  # encoding: [0x0c,0x00]
 # CHECK-EB: beq $9, $6, 1332     # encoding: [0x94,0xc9,0x02,0x9a]
-# CHECK-EB: nop                  # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EB-NEXT: nop                  # encoding: [0x0c,0x00]
 # CHECK-EB: bgez $6, 1332        # encoding: [0x40,0x46,0x02,0x9a]
-# CHECK-EB: nop                  # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EB-NEXT: nop                  # encoding: [0x0c,0x00]
 # CHECK-EB: bgezal $6, 1332      # encoding: [0x40,0x66,0x02,0x9a]
 # CHECK-EB: nop                  # encoding: [0x00,0x00,0x00,0x00]
 # CHECK-EB: bltzal $6, 1332      # encoding: [0x40,0x26,0x02,0x9a]
 # CHECK-EB: nop                  # encoding: [0x00,0x00,0x00,0x00]
 # CHECK-EB: bgtz $6, 1332        # encoding: [0x40,0xc6,0x02,0x9a]
-# CHECK-EB: nop                  # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EB-NEXT: nop                  # encoding: [0x0c,0x00]
 # CHECK-EB: blez $6, 1332        # encoding: [0x40,0x86,0x02,0x9a]
-# CHECK-EB: nop                  # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EB-NEXT: nop                  # encoding: [0x0c,0x00]
 # CHECK-EB: bne $9, $6, 1332     # encoding: [0xb4,0xc9,0x02,0x9a]
-# CHECK-EB: nop                  # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EB-NEXT: nop                  # encoding: [0x0c,0x00]
 # CHECK-EB: bal 1332             # encoding: [0x40,0x60,0x02,0x9a]
 # CHECK-EB: nop                  # encoding: [0x00,0x00,0x00,0x00]
 # CHECK-EB: bltz $6, 1332        # encoding: [0x40,0x06,0x02,0x9a]
-# CHECK-EB: nop                  # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EB-NEXT: nop                  # encoding: [0x0c,0x00]
 # CHECK-EB: bgezals $6, 1332     # encoding: [0x42,0x66,0x02,0x9a]
 # CHECK-EB: nop                  # encoding: [0x0c,0x00]
 # CHECK-EB: bltzals $6, 1332     # encoding: [0x42,0x26,0x02,0x9a]
diff --git a/test/MC/Mips/micromips-el-fixup-data.s b/test/MC/Mips/micromips-el-fixup-data.s
index 0f26c39..0b9a02f 100644
--- a/test/MC/Mips/micromips-el-fixup-data.s
+++ b/test/MC/Mips/micromips-el-fixup-data.s
@@ -16,7 +16,7 @@
     addiu   $sp, $sp, -16
     bnez    $9, lab1
 
-# CHECK:    09 b4 04 00    bnez $9, 12
+# CHECK:    09 b4 03 00    bnez $9, 10
 
     addu    $zero, $zero, $zero
 lab1:
diff --git a/test/MC/Mips/micromips-jump-instructions.s b/test/MC/Mips/micromips-jump-instructions.s
index 3147a3f..f8d4974 100644
--- a/test/MC/Mips/micromips-jump-instructions.s
+++ b/test/MC/Mips/micromips-jump-instructions.s
@@ -10,7 +10,7 @@
 # Little endian
 #------------------------------------------------------------------------------
 # CHECK-EL: j 1328      # encoding: [0x00,0xd4,0x98,0x02]
-# CHECK-EL: nop         # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EL: nop         # encoding: [0x00,0x0c]
 # CHECK-EL: jal 1328    # encoding: [0x00,0xf4,0x98,0x02]
 # CHECK-EL: nop         # encoding: [0x00,0x00,0x00,0x00]
 # CHECK-EL: jalr $ra, $6 # encoding: [0xe6,0x03,0x3c,0x0f]
@@ -33,7 +33,7 @@
 # Big endian
 #------------------------------------------------------------------------------
 # CHECK-EB: j 1328      # encoding: [0xd4,0x00,0x02,0x98]
-# CHECK-EB: nop         # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EB: nop         # encoding: [0x0c,0x00]
 # CHECK-EB: jal 1328    # encoding: [0xf4,0x00,0x02,0x98]
 # CHECK-EB: nop         # encoding: [0x00,0x00,0x00,0x00]
 # CHECK-EB: jalr $ra, $6 # encoding: [0x03,0xe6,0x0f,0x3c]
diff --git a/test/MC/Mips/micromips-tailr.s b/test/MC/Mips/micromips-tailr.s
index 0c21a7b..a11ac61 100644
--- a/test/MC/Mips/micromips-tailr.s
+++ b/test/MC/Mips/micromips-tailr.s
@@ -11,9 +11,8 @@
 # CHECK-FIXUP:   addiu $2, $zero, 1332
 # CHECK-FIXUP:         # encoding: [0x40,0x30,0x34,0x05]
 # CHECK-FIXUP:   j foo # encoding: [A,0xd4'A',A,0b000000AA]
-# CHECK-FIXUP:         #   fixup A - offset: 0,
-# CHECK-FIXUP:             value: foo, kind: fixup_MICROMIPS_26_S1
-# CHECK-FIXUP:   nop   # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-FIXUP-NEXT:         #   fixup A - offset: 0, value: foo, kind: fixup_MICROMIPS_26_S1
+# CHECK-FIXUP-NEXT:   nop   # encoding: [0x00,0x0c]
 #------------------------------------------------------------------------------
 # Check that the appropriate relocations were created.
 #------------------------------------------------------------------------------
diff --git a/test/MC/Mips/nacl-mask.s b/test/MC/Mips/nacl-mask.s
index 604b5c8..e7eba37 100644
--- a/test/MC/Mips/nacl-mask.s
+++ b/test/MC/Mips/nacl-mask.s
@@ -1,5 +1,5 @@
 # RUN: llvm-mc -filetype=obj -triple=mipsel-unknown-nacl %s \
-# RUN:   | llvm-objdump -disassemble -no-show-raw-insn - | FileCheck %s
+# RUN:   | llvm-objdump -disassemble -z -no-show-raw-insn - | FileCheck %s
 
 # This test tests that address-masking sandboxing is added when given assembly
 # input.
diff --git a/test/MC/Mips/unsupported-relocation.s b/test/MC/Mips/unsupported-relocation.s
index 151a559..258b483 100644
--- a/test/MC/Mips/unsupported-relocation.s
+++ b/test/MC/Mips/unsupported-relocation.s
@@ -1,5 +1,5 @@
-# RUN: not llvm-mc -triple mips-unknown-linux -filetype=obj %s 2>%t
-# RUN: FileCheck %s < %t
+# RUN: not llvm-mc -triple mips-unknown-linux -filetype=obj -o %t %s 2>&1 \
+# RUN:  |  FileCheck %s
 
 # Check that we emit an error for unsupported relocations instead of crashing.
 
@@ -8,6 +8,8 @@
         .data
 foo:
         .byte   x
+# CHECK: :[[@LINE-1]]:17: error: MIPS does not support one byte relocations
         .byte   x+1
-
-# CHECK: LLVM ERROR: MIPS does not support one byte relocations
+# CHECK: :[[@LINE-1]]:17: error: MIPS does not support one byte relocations
+        .quad   x-foo
+# CHECK: :[[@LINE-1]]:17: error: MIPS does not support 64-bit PC-relative relocations
diff --git a/test/MC/RISCV/fixups.s b/test/MC/RISCV/fixups.s
index 0f5432d..f0377de 100644
--- a/test/MC/RISCV/fixups.s
+++ b/test/MC/RISCV/fixups.s
@@ -24,15 +24,26 @@
 # CHECK-FIXUP: fixup A - offset: 0, value: %lo(val), kind: fixup_riscv_lo12_s
 # CHECK-INSTR: sw a0, 1656(t1)
 
+1:
+auipc t1, %pcrel_hi(.LBB0)
+# CHECK-FIXUP: fixup A - offset: 0, value: %pcrel_hi(.LBB0), kind: fixup_riscv_pcrel_hi20
+# CHECK-INSTR: auipc t1, 0
+addi t1, t1, %pcrel_lo(1b)
+# CHECK-FIXUP: fixup A - offset: 0, value: %pcrel_lo(.Ltmp0), kind: fixup_riscv_pcrel_lo12_i
+# CHECK-INSTR: addi t1, t1, -16
+sw t1, %pcrel_lo(1b)(t1)
+# CHECK-FIXUP: fixup A - offset: 0, value: %pcrel_lo(.Ltmp0), kind: fixup_riscv_pcrel_lo12_s
+# CHECK-INSTR: sw t1, -16(t1)
+
 jal zero, .LBB0
 # CHECK-FIXUP: fixup A - offset: 0, value: .LBB0, kind: fixup_riscv_jal
-# CHECK-INSTR: jal zero, -16
+# CHECK-INSTR: jal zero, -28
 jal zero, .LBB2
 # CHECK-FIXUP: fixup A - offset: 0, value: .LBB2, kind: fixup_riscv_jal
 # CHECK-INSTR: jal zero, 330996
 beq a0, a1, .LBB0
 # CHECK-FIXUP: fixup A - offset: 0, value: .LBB0, kind: fixup_riscv_branch
-# CHECK-INSTR: beq a0, a1, -24
+# CHECK-INSTR: beq a0, a1, -36
 blt a0, a1, .LBB1
 # CHECK-FIXUP: fixup A - offset: 0, value: .LBB1, kind: fixup_riscv_branch
 # CHECK-INSTR: blt a0, a1, 1108
diff --git a/test/MC/RISCV/pcrel-lo12-invalid.s b/test/MC/RISCV/pcrel-lo12-invalid.s
new file mode 100644
index 0000000..7cf2494
--- /dev/null
+++ b/test/MC/RISCV/pcrel-lo12-invalid.s
@@ -0,0 +1,5 @@
+# RUN: not llvm-mc -triple riscv32 -mattr=-relax -filetype obj < %s -o /dev/null 2>&1 | FileCheck %s
+# RUN: not llvm-mc -triple riscv32 -mattr=+relax -filetype obj < %s -o /dev/null 2>&1 | FileCheck %s
+
+1:
+  addi a0, a0, %pcrel_lo(1b) # CHECK: :[[@LINE]]:3: error: could not find corresponding %pcrel_hi
diff --git a/test/MC/RISCV/relocations.s b/test/MC/RISCV/relocations.s
index b68b11b..a879c9a 100644
--- a/test/MC/RISCV/relocations.s
+++ b/test/MC/RISCV/relocations.s
@@ -44,6 +44,7 @@
 # INSTR: sb t1, %lo(foo+4)(a2)
 # FIXUP: fixup A - offset: 0, value: %lo(foo+4), kind: fixup_riscv_lo12_s
 
+.L0:
 auipc t1, %pcrel_hi(foo)
 # RELOC: R_RISCV_PCREL_HI20 foo 0x0
 # INSTR: auipc t1, %pcrel_hi(foo)
@@ -54,25 +55,15 @@
 # INSTR: auipc t1, %pcrel_hi(foo+4)
 # FIXUP: fixup A - offset: 0, value: %pcrel_hi(foo+4), kind: fixup_riscv_pcrel_hi20
 
-addi t1, t1, %pcrel_lo(foo)
-# RELOC: R_RISCV_PCREL_LO12_I foo 0x0
-# INSTR: addi t1, t1, %pcrel_lo(foo)
-# FIXUP: fixup A - offset: 0, value: %pcrel_lo(foo), kind: fixup_riscv_pcrel_lo12_i
+addi t1, t1, %pcrel_lo(.L0)
+# RELOC: R_RISCV_PCREL_LO12_I .L0 0x0
+# INSTR: addi t1, t1, %pcrel_lo(.L0)
+# FIXUP: fixup A - offset: 0, value: %pcrel_lo(.L0), kind: fixup_riscv_pcrel_lo12_i
 
-addi t1, t1, %pcrel_lo(foo+4)
-# RELOC: R_RISCV_PCREL_LO12_I foo 0x4
-# INSTR: addi t1, t1, %pcrel_lo(foo+4)
-# FIXUP: fixup A - offset: 0, value: %pcrel_lo(foo+4), kind: fixup_riscv_pcrel_lo12_i
-
-sb t1, %pcrel_lo(foo)(a2)
-# RELOC: R_RISCV_PCREL_LO12_S foo 0x0
-# INSTR: sb t1, %pcrel_lo(foo)(a2)
-# FIXUP: fixup A - offset: 0, value: %pcrel_lo(foo), kind: fixup_riscv_pcrel_lo12_s
-
-sb t1, %pcrel_lo(foo+4)(a2)
-# RELOC: R_RISCV_PCREL_LO12_S foo 0x4
-# INSTR: sb t1, %pcrel_lo(foo+4)(a2)
-# FIXUP: fixup A - offset: 0, value: %pcrel_lo(foo+4), kind: fixup_riscv_pcrel_lo12_s
+sb t1, %pcrel_lo(.L0)(a2)
+# RELOC: R_RISCV_PCREL_LO12_S .L0 0x0
+# INSTR: sb t1, %pcrel_lo(.L0)(a2)
+# FIXUP: fixup A - offset: 0, value: %pcrel_lo(.L0), kind: fixup_riscv_pcrel_lo12_s
 
 jal zero, foo
 # RELOC: R_RISCV_JAL
diff --git a/test/MC/RISCV/rv32i-aliases-valid.s b/test/MC/RISCV/rv32i-aliases-valid.s
index 04baa9e..f986672 100644
--- a/test/MC/RISCV/rv32i-aliases-valid.s
+++ b/test/MC/RISCV/rv32i-aliases-valid.s
@@ -3,10 +3,10 @@
 # RUN: llvm-mc %s -triple=riscv32 \
 # RUN:     | FileCheck -check-prefixes=CHECK-EXPAND,CHECK-ALIAS %s
 # RUN: llvm-mc -filetype=obj -triple riscv32 < %s \
-# RUN:     | llvm-objdump -riscv-no-aliases -d - \
-# RUN:     | FileCheck -check-prefixes=CHECK-EXPAND,CHECK-INST %s
+# RUN:     | llvm-objdump -riscv-no-aliases -d -r - \
+# RUN:     | FileCheck -check-prefixes=CHECK-OBJ-NOALIAS,CHECK-EXPAND,CHECK-INST %s
 # RUN: llvm-mc -filetype=obj -triple riscv32 < %s \
-# RUN:     | llvm-objdump -d - \
+# RUN:     | llvm-objdump -d -r - \
 # RUN:     | FileCheck -check-prefixes=CHECK-EXPAND,CHECK-ALIAS %s
 
 # The following check prefixes are used in this test:
@@ -69,6 +69,32 @@
 # CHECK-EXPAND: addi a2, zero, -1
 li x12, 0xFFFFFFFF
 
+# CHECK-EXPAND: addi a0, zero, 1110
+li a0, %lo(0x123456)
+# CHECK-OBJ-NOALIAS: addi a0, zero, 0
+# CHECK-OBJ: R_RISCV_PCREL_LO12
+li a0, %pcrel_lo(0x123456)
+
+# CHECK-OBJ-NOALIAS: addi a0, zero, 0
+# CHECK-OBJ: R_RISCV_LO12
+li a0, %lo(foo)
+# CHECK-OBJ-NOALIAS: addi a0, zero, 0
+# CHECK-OBJ: R_RISCV_PCREL_LO12
+li a0, %pcrel_lo(foo)
+
+.equ CONST, 0x123456
+# CHECK-EXPAND: lui a0, 291
+# CHECK-EXPAND: addi a0, a0, 1110
+li a0, CONST
+# CHECK-EXPAND: lui a0, 291
+# CHECK-EXPAND: addi a0, a0, 1111
+li a0, CONST+1
+
+.equ CONST, 0x654321
+# CHECK-EXPAND: lui a0, 1620
+# CHECK-EXPAND: addi a0, a0, 801
+li a0, CONST
+
 # CHECK-INST: csrrs t4, instreth, zero
 # CHECK-ALIAS: rdinstreth t4
 rdinstreth x29
diff --git a/test/MC/RISCV/rv32i-valid.s b/test/MC/RISCV/rv32i-valid.s
index 0df53e6..85a841b 100644
--- a/test/MC/RISCV/rv32i-valid.s
+++ b/test/MC/RISCV/rv32i-valid.s
@@ -9,6 +9,8 @@
 # RUN:     | llvm-objdump -riscv-no-aliases -d -r - \
 # RUN:     | FileCheck -check-prefixes=CHECK-OBJ,CHECK-ASM-AND-OBJ %s
 
+.equ CONST, 30
+
 # CHECK-ASM-AND-OBJ: lui a0, 2
 # CHECK-ASM: encoding: [0x37,0x25,0x00,0x00]
 lui a0, 2
@@ -35,6 +37,12 @@
 # CHECK-OBJ: lui a0, 0
 # CHECK-OBJ: R_RISCV_HI20 foo
 lui a0, %hi(foo)
+# CHECK-ASM-AND-OBJ: lui a0, 30
+# CHECK-ASM: encoding: [0x37,0xe5,0x01,0x00]
+lui a0, CONST
+# CHECK-ASM-AND-OBJ: lui a0, 31
+# CHECK-ASM: encoding: [0x37,0xf5,0x01,0x00]
+lui a0, CONST+1
 
 # CHECK-ASM-AND-OBJ: auipc a0, 2
 # CHECK-ASM: encoding: [0x17,0x25,0x00,0x00]
@@ -53,6 +61,9 @@
 # CHECK-OBJ: auipc a0, 0
 # CHECK-OBJ: R_RISCV_PCREL_HI20 foo
 auipc a0, %pcrel_hi(foo)
+# CHECK-ASM-AND-OBJ: auipc a0, 30
+# CHECK-ASM: encoding: [0x17,0xe5,0x01,0x00]
+auipc a0, CONST
 
 # CHECK-ASM-AND-OBJ: jal a2, 1048574
 # CHECK-ASM: encoding: [0x6f,0xf6,0xff,0x7f]
@@ -70,6 +81,9 @@
 # CHECK-OBJ: jal a0, 0
 # CHECK-OBJ: R_RISCV_JAL a0
 jal a0, a0
+# CHECK-ASM-AND-OBJ: jal a0, 30
+# CHECK-ASM: encoding: [0x6f,0x05,0xe0,0x01]
+jal a0, CONST
 
 # CHECK-ASM-AND-OBJ: jalr a0, a1, -2048
 # CHECK-ASM: encoding: [0x67,0x85,0x05,0x80]
@@ -83,6 +97,9 @@
 # CHECK-ASM-AND-OBJ: jalr sp, zero, 256
 # CHECK-ASM: encoding: [0x67,0x01,0x00,0x10]
 jalr sp, zero, 256
+# CHECK-ASM-AND-OBJ: jalr a1, a2, 30
+# CHECK-ASM: encoding: [0xe7,0x05,0xe6,0x01]
+jalr a1, a2, CONST
 
 # CHECK-ASM-AND-OBJ: beq s1, s1, 102
 # CHECK-ASM: encoding: [0x63,0x83,0x94,0x06]
@@ -102,6 +119,9 @@
 # CHECK-ASM-AND-OBJ: bgeu s8, sp, 512
 # CHECK-ASM: encoding: [0x63,0x70,0x2c,0x20]
 bgeu s8, sp, 512
+# CHECK-ASM-AND-OBJ: bgeu t0, t1, 30
+# CHECK-ASM: encoding: [0x63,0xff,0x62,0x00]
+bgeu t0, t1, CONST
 
 # CHECK-ASM-AND-OBJ: lb s3, 4(ra)
 # CHECK-ASM: encoding: [0x83,0x89,0x40,0x00]
@@ -131,6 +151,9 @@
 # CHECK-OBJ: lhu t3, 0(t3)
 # CHECK-OBJ: R_RISCV_PCREL_LO12
 lhu t3, %pcrel_lo(foo)(t3)
+# CHECK-ASM-AND-OBJ: lb t0, 30(t1)
+# CHECK-ASM: encoding: [0x83,0x02,0xe3,0x01]
+lb t0, CONST(t1)
 
 # CHECK-ASM-AND-OBJ: sb a0, 2047(a2)
 # CHECK-ASM: encoding: [0xa3,0x0f,0xa6,0x7e]
@@ -144,6 +167,9 @@
 # CHECK-ASM-AND-OBJ: sw ra, 999(zero)
 # CHECK-ASM: encoding: [0xa3,0x23,0x10,0x3e]
 sw ra, 999(zero)
+# CHECK-ASM-AND-OBJ: sw a0, 30(t0)
+# CHECK-ASM: encoding: [0x23,0xaf,0xa2,0x00]
+sw a0, CONST(t0)
 
 # CHECK-ASM-AND-OBJ: addi ra, sp, 2
 # CHECK-ASM: encoding: [0x93,0x00,0x21,0x00]
@@ -153,6 +179,9 @@
 # CHECK-OBJ: addi ra, sp, 0
 # CHECK-OBJ: R_RISCV_LO12
 addi ra, sp, %lo(foo)
+# CHECK-ASM-AND-OBJ: addi ra, sp, 30
+# CHECK-ASM: encoding: [0x93,0x00,0xe1,0x01]
+addi ra, sp, CONST
 # CHECK-ASM-AND-OBJ: slti a0, a2, -20
 # CHECK-ASM: encoding: [0x13,0x25,0xc6,0xfe]
 slti a0, a2, -20
@@ -184,6 +213,9 @@
 # CHECK-ASM-AND-OBJ: srai a2, sp, 15
 # CHECK-ASM: encoding: [0x13,0x56,0xf1,0x40]
 srai a2, sp, 15
+# CHECK-ASM-AND-OBJ: slli t3, t3, 30
+# CHECK-ASM: encoding: [0x13,0x1e,0xee,0x01]
+slli t3, t3, CONST
 
 # CHECK-ASM-AND-OBJ: add ra, zero, zero
 # CHECK-ASM: encoding: [0xb3,0x00,0x00,0x00]
@@ -252,6 +284,8 @@
 # CHECK-ASM: encoding: [0x73,0x10,0x00,0xc0]
 unimp
 
+.equ CONST, 16
+
 # CHECK-ASM-AND-OBJ: csrrw t0, 4095, t1
 # CHECK-ASM: encoding: [0xf3,0x12,0xf3,0xff]
 csrrw t0, 0xfff, t1
diff --git a/test/MC/RISCV/rv64i-aliases-valid.s b/test/MC/RISCV/rv64i-aliases-valid.s
index 64bb36e..a705525 100644
--- a/test/MC/RISCV/rv64i-aliases-valid.s
+++ b/test/MC/RISCV/rv64i-aliases-valid.s
@@ -4,7 +4,7 @@
 # RUN:     | FileCheck -check-prefixes=CHECK-EXPAND,CHECK-ALIAS %s
 # RUN: llvm-mc -filetype=obj -triple riscv64 < %s \
 # RUN:     | llvm-objdump -riscv-no-aliases -d - \
-# RUN:     | FileCheck -check-prefixes=CHECK-EXPAND,CHECK-INST %s
+# RUN:     | FileCheck -check-prefixes=CHECK-OBJ-NOALIAS,CHECK-EXPAND,CHECK-INST %s
 # RUN: llvm-mc -filetype=obj -triple riscv64 < %s \
 # RUN:     | llvm-objdump -d - \
 # RUN:     | FileCheck -check-prefixes=CHECK-EXPAND,CHECK-ALIAS %s
@@ -105,6 +105,29 @@
 # CHECK-EXPAND: addi t5, zero, -1
 li t5, 0xFFFFFFFFFFFFFFFF
 
+# CHECK-EXPAND: addi a0, zero, 1110
+li a0, %lo(0x123456)
+# CHECK-OBJ-NOALIAS: addi a0, zero, 0
+# CHECK-OBJ: R_RISCV_PCREL_LO12
+li a0, %pcrel_lo(0x123456)
+
+# CHECK-OBJ-NOALIAS: addi a0, zero, 0
+# CHECK-OBJ: R_RISCV_LO12
+li a0, %lo(foo)
+# CHECK-OBJ-NOALIAS: addi a0, zero, 0
+# CHECK-OBJ: R_RISCV_PCREL_LO12
+li a0, %pcrel_lo(foo)
+
+.equ CONST, 0x123456
+# CHECK-EXPAND: lui a0, 291
+# CHECK-EXPAND: addiw a0, a0, 1110
+li a0, CONST
+
+.equ CONST, 0x654321
+# CHECK-EXPAND: lui a0, 1620
+# CHECK-EXPAND: addiw a0, a0, 801
+li a0, CONST
+
 # CHECK-INST: subw t6, zero, ra
 # CHECK-ALIAS: negw t6, ra
 negw x31, x1
diff --git a/test/MC/RISCV/rv64i-valid.s b/test/MC/RISCV/rv64i-valid.s
index 337b996..dab502c 100644
--- a/test/MC/RISCV/rv64i-valid.s
+++ b/test/MC/RISCV/rv64i-valid.s
@@ -4,6 +4,8 @@
 # RUN:     | llvm-objdump -riscv-no-aliases -d -r - \
 # RUN:     | FileCheck -check-prefixes=CHECK-OBJ,CHECK-ASM-AND-OBJ %s
 
+.equ CONST, 31
+
 # CHECK-ASM-AND-OBJ: lwu zero, 4(ra)
 # CHECK-ASM: encoding: [0x03,0xe0,0x40,0x00]
 lwu x0, 4(x1)
@@ -49,6 +51,9 @@
 # CHECK-ASM-AND-OBJ: srai s10, s11, 31
 # CHECK-ASM: encoding: [0x13,0xdd,0xfd,0x41]
 srai x26, x27, 31
+# CHECK-ASM-AND-OBJ: srai s10, s11, 31
+# CHECK-ASM: encoding: [0x13,0xdd,0xfd,0x41]
+srai x26, x27, CONST
 
 # CHECK-ASM-AND-OBJ: addiw t3, t4, -2048
 # CHECK-ASM: encoding: [0x1b,0x8e,0x0e,0x80]
@@ -75,6 +80,9 @@
 # CHECK-ASM-AND-OBJ: sraiw a0, a1, 31
 # CHECK-ASM: encoding: [0x1b,0xd5,0xf5,0x41]
 sraiw a0, a1, 31
+# CHECK-ASM-AND-OBJ: sraiw a0, a1, 31
+# CHECK-ASM: encoding: [0x1b,0xd5,0xf5,0x41]
+sraiw a0, a1, CONST
 
 # CHECK-ASM-AND-OBJ: addw a2, a3, a4
 # CHECK-ASM: encoding: [0x3b,0x86,0xe6,0x00]
diff --git a/test/MC/WebAssembly/array-fill.ll b/test/MC/WebAssembly/array-fill.ll
index 7af4e74..7f3d8bd 100644
--- a/test/MC/WebAssembly/array-fill.ll
+++ b/test/MC/WebAssembly/array-fill.ll
@@ -22,6 +22,6 @@
 ; CHECK-NEXT:     SegmentInfo:    
 ; CHECK-NEXT:       - Index:           0
 ; CHECK-NEXT:         Name:            .data
-; CHECK-NEXT:         Alignment:       1
+; CHECK-NEXT:         Alignment:       0
 ; CHECK-NEXT:         Flags:           [ ]
 ; CHECK-NEXT: ...
diff --git a/test/MC/WebAssembly/assembler-binary.ll b/test/MC/WebAssembly/assembler-binary.ll
index 3667a52..def7f8c 100644
--- a/test/MC/WebAssembly/assembler-binary.ll
+++ b/test/MC/WebAssembly/assembler-binary.ll
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:         Field:           __indirect_function_table
 ; CHECK-NEXT:         Kind:            TABLE
 ; CHECK-NEXT:         Table:
-; CHECK-NEXT:           ElemType:        ANYFUNC
+; CHECK-NEXT:           ElemType:        FUNCREF
 ; CHECK-NEXT:           Limits:
 ; CHECK-NEXT:             Initial:         0x00000000
 ; CHECK-NEXT:       - Module:          env
@@ -77,7 +77,7 @@
 ; CHECK-NEXT:         Body:            1080808080000B
 ; CHECK-NEXT:   - Type:            CUSTOM
 ; CHECK-NEXT:     Name:            linking
-; CHECK-NEXT:     Version:         1
+; CHECK-NEXT:     Version:         2
 ; CHECK-NEXT:     SymbolTable:
 ; CHECK-NEXT:       - Index:           0
 ; CHECK-NEXT:         Kind:            FUNCTION
diff --git a/test/MC/WebAssembly/basic-assembly-errors.s b/test/MC/WebAssembly/basic-assembly-errors.s
new file mode 100644
index 0000000..06fc4f8
--- /dev/null
+++ b/test/MC/WebAssembly/basic-assembly-errors.s
@@ -0,0 +1,25 @@
+# RUN: not llvm-mc -triple=wasm32-unknown-unknown -mattr=+simd128,+nontrapping-fptoint,+exception-handling < %s 2>&1 | FileCheck %s
+
+    .text
+    .section    .text.main,"",@
+    .type       test0,@function
+# CHECK: End of block construct with no start: end_try
+    end_try
+test0:
+    .functype   test0 () -> ()
+# CHECK: Block construct type mismatch, expected: end_function, instead got: end_loop
+    end_loop
+    block
+# CHECK: Block construct type mismatch, expected: end_block, instead got: end_if
+    end_if
+    try
+    loop
+# CHECK: Block construct type mismatch, expected: end_loop, instead got: end_function
+# CHECK: error: Unmatched block construct(s) at function end: loop
+# CHECK: error: Unmatched block construct(s) at function end: try
+# CHECK: error: Unmatched block construct(s) at function end: block
+# CHECK: error: Unmatched block construct(s) at function end: function
+    end_function
+.Lfunc_end0:
+    .size       test0, .Lfunc_end0-test0
+
diff --git a/test/MC/WebAssembly/basic-assembly.s b/test/MC/WebAssembly/basic-assembly.s
index 408d02d..4cf9162 100644
--- a/test/MC/WebAssembly/basic-assembly.s
+++ b/test/MC/WebAssembly/basic-assembly.s
@@ -1,6 +1,6 @@
-# RUN: llvm-mc -triple=wasm32-unknown-unknown -mattr=+simd128,+nontrapping-fptoint,+exception-handling < %s | FileCheck %s
+# RUN: llvm-mc -triple=wasm32-unknown-unknown -mattr=+unimplemented-simd128,+nontrapping-fptoint,+exception-handling < %s | FileCheck %s
 # this one is just here to see if it converts to .o without errors, but doesn't check any output:
-# RUN: llvm-mc -triple=wasm32-unknown-unknown -filetype=obj -mattr=+simd128,+nontrapping-fptoint,+exception-handling < %s
+# RUN: llvm-mc -triple=wasm32-unknown-unknown -filetype=obj -mattr=+unimplemented-simd128,+nontrapping-fptoint,+exception-handling < %s
 
     .text
     .section .text.main,"",@
@@ -11,24 +11,24 @@
     .eventtype  __cpp_exception i32
     .local      f32, f64, v128, v128
     # Explicit getlocal/setlocal:
-    get_local   2
-    set_local   2
+    local.get   2
+    local.set   2
     # Immediates:
     i32.const   -1
     f64.const   0x1.999999999999ap1
     v128.const  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
     v128.const  0, 1, 2, 3, 4, 5, 6, 7
     # Indirect addressing:
-    get_local   0
+    local.get   0
     f64.store   0
     # Loops, conditionals, binary ops, calls etc:
-    block
+    block       i32
     i32.const   1
-    get_local   0
+    local.get   0
     i32.ge_s
     br_if       0        # 0: down to label0
 .LBB0_1:
-    loop             # label1:
+    loop        i32      # label1:
     call        something1@FUNCTION
     i64.const   1234
     i32.call    something2@FUNCTION
@@ -36,32 +36,51 @@
     call_indirect 0
     i32.const   1
     i32.add
-    tee_local   0
-    get_local   0
+    local.tee   0
+    local.get   0
     i32.lt_s
     br_if       0        # 0: up to label1
 .LBB0_2:
     end_loop
-    end_block                       # label0:
-    get_local   4
-    get_local   5
+    end_block            # label0:
+    local.get   4
+    local.get   5
+    block       void
+    block       i64
+    block       f32
+    block       f64
+    br_table {0, 1, 2}   # 2 entries, default
+    end_block            # first entry jumps here.
+    i32.const   1
+    br 2
+    end_block            # second entry jumps here.
+    i32.const   2
+    br 1
+    end_block            # default jumps here.
+    i32.const   3
+    end_block            # "switch" exit.
+    if          # void
+    if          i32
+    end_if
+    else
+    end_if
     f32x4.add
     # Test correct parsing of instructions with / and : in them:
     # TODO: enable once instruction has been added.
-    #i32x4.trunc_s/f32x4:sat
-    i32.trunc_s/f32
-    try
+    #i32x4.trunc_sat_f32x4_s
+    i32.trunc_f32_s
+    try         except_ref
 .LBB0_3:
     i32.catch   0
 .LBB0_4:
     catch_all
 .LBB0_5:
     end_try
-    #i32.trunc_s:sat/f32
-    get_global  __stack_pointer@GLOBAL
+    #i32.trunc_sat_f32_s
+    global.get  __stack_pointer@GLOBAL
     end_function
 .Lfunc_end0:
-	.size	test0, .Lfunc_end0-test0
+    .size	test0, .Lfunc_end0-test0
     .globaltype	__stack_pointer, i32
 
 # CHECK:           .text
@@ -69,21 +88,21 @@
 # CHECK-NEXT:      .functype test0 (i32, i64) -> (i32)
 # CHECK-NEXT:      .eventtype  __cpp_exception i32
 # CHECK-NEXT:      .local      f32, f64
-# CHECK-NEXT:      get_local   2
-# CHECK-NEXT:      set_local   2
+# CHECK-NEXT:      local.get   2
+# CHECK-NEXT:      local.set   2
 # CHECK-NEXT:      i32.const   -1
 # CHECK-NEXT:      f64.const   0x1.999999999999ap1
 # CHECK-NEXT:      v128.const  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
 # CHECK-NEXT:      v128.const  0, 1, 2, 3, 4, 5, 6, 7
-# CHECK-NEXT:      get_local   0
+# CHECK-NEXT:      local.get   0
 # CHECK-NEXT:      f64.store   0:p2align=0
-# CHECK-NEXT:      block
+# CHECK-NEXT:      block       i32
 # CHECK-NEXT:      i32.const   1
-# CHECK-NEXT:      get_local   0
+# CHECK-NEXT:      local.get   0
 # CHECK-NEXT:      i32.ge_s
 # CHECK-NEXT:      br_if 0            # 0: down to label0
 # CHECK-NEXT:  .LBB0_1:
-# CHECK-NEXT:      loop                    # label1:
+# CHECK-NEXT:      loop        i32         # label1:
 # CHECK-NEXT:      call        something1@FUNCTION
 # CHECK-NEXT:      i64.const   1234
 # CHECK-NEXT:      i32.call    something2@FUNCTION
@@ -91,25 +110,45 @@
 # CHECK-NEXT:      call_indirect 0
 # CHECK-NEXT:      i32.const   1
 # CHECK-NEXT:      i32.add
-# CHECK-NEXT:      tee_local   0
-# CHECK-NEXT:      get_local   0
+# CHECK-NEXT:      local.tee   0
+# CHECK-NEXT:      local.get   0
 # CHECK-NEXT:      i32.lt_s
 # CHECK-NEXT:      br_if 0            # 0: up to label1
 # CHECK-NEXT:  .LBB0_2:
 # CHECK-NEXT:      end_loop
 # CHECK-NEXT:      end_block                       # label0:
-# CHECK-NEXT:      get_local   4
-# CHECK-NEXT:      get_local   5
+# CHECK-NEXT:      local.get   4
+# CHECK-NEXT:      local.get   5
+# CHECK-NEXT:      block
+# CHECK-NEXT:      block       i64
+# CHECK-NEXT:      block       f32
+# CHECK-NEXT:      block       f64
+# CHECK-NEXT:      br_table {0, 1, 2}  # 1: down to label4
+# CHECK-NEXT:                          # 2: down to label3
+# CHECK-NEXT:      end_block           # label5:
+# CHECK-NEXT:      i32.const 1
+# CHECK-NEXT:      br 2                # 2: down to label2
+# CHECK-NEXT:      end_block           # label4:
+# CHECK-NEXT:      i32.const 2
+# CHECK-NEXT:      br 1                # 1: down to label2
+# CHECK-NEXT:      end_block           # label3:
+# CHECK-NEXT:      i32.const 3
+# CHECK-NEXT:      end_block           # label2:
+# CHECK-NEXT:      if
+# CHECK-NEXT:      if          i32
+# CHECK-NEXT:      end_if
+# CHECK-NEXT:      else
+# CHECK-NEXT:      end_if
 # CHECK-NEXT:      f32x4.add
-# CHECK-NEXT:      i32.trunc_s/f32
-# CHECK-NEXT:      try
+# CHECK-NEXT:      i32.trunc_f32_s
+# CHECK-NEXT:      try         except_ref
 # CHECK-NEXT:  .LBB0_3:
 # CHECK-NEXT:      i32.catch   0
 # CHECK-NEXT:  .LBB0_4:
 # CHECK-NEXT:      catch_all
 # CHECK-NEXT:  .LBB0_5:
 # CHECK-NEXT:      end_try
-# CHECK-NEXT:      get_global  __stack_pointer@GLOBAL
+# CHECK-NEXT:      global.get  __stack_pointer@GLOBAL
 # CHECK-NEXT:      end_function
 
 # CHECK:           .globaltype	__stack_pointer, i32
diff --git a/test/MC/WebAssembly/bss.ll b/test/MC/WebAssembly/bss.ll
index 2c0cdf2..1ab3ec1 100644
--- a/test/MC/WebAssembly/bss.ll
+++ b/test/MC/WebAssembly/bss.ll
@@ -64,18 +64,18 @@
 ; CHECK-NEXT:     SegmentInfo:    
 ; CHECK-NEXT:       - Index:           0
 ; CHECK-NEXT:         Name:            .bss.g0
-; CHECK-NEXT:         Alignment:       4
+; CHECK-NEXT:         Alignment:       2
 ; CHECK-NEXT:         Flags:           [ ]
 ; CHECK-NEXT:       - Index:           1
 ; CHECK-NEXT:         Name:            .bss.g1
-; CHECK-NEXT:         Alignment:       4
+; CHECK-NEXT:         Alignment:       2
 ; CHECK-NEXT:         Flags:           [ ]
 ; CHECK-NEXT:       - Index:           2
 ; CHECK-NEXT:         Name:            .bss.foo
-; CHECK-NEXT:         Alignment:       1
+; CHECK-NEXT:         Alignment:       0
 ; CHECK-NEXT:         Flags:           [ ]
 ; CHECK-NEXT:       - Index:           3
 ; CHECK-NEXT:         Name:            .bss.bar
-; CHECK-NEXT:         Alignment:       1
+; CHECK-NEXT:         Alignment:       0
 ; CHECK-NEXT:         Flags:           [ ]
 ; CHECK-NEXT: ...
diff --git a/test/MC/WebAssembly/comdat.ll b/test/MC/WebAssembly/comdat.ll
index 7bba78b..997840a 100644
--- a/test/MC/WebAssembly/comdat.ll
+++ b/test/MC/WebAssembly/comdat.ll
@@ -41,7 +41,7 @@
 ; CHECK-NEXT:         Field:           __indirect_function_table
 ; CHECK-NEXT:         Kind:            TABLE
 ; CHECK-NEXT:         Table:           
-; CHECK-NEXT:           ElemType:        ANYFUNC
+; CHECK-NEXT:           ElemType:        FUNCREF
 ; CHECK-NEXT:           Limits:          
 ; CHECK-NEXT:             Initial:         0x00000000
 ; CHECK-NEXT:       - Module:          env
@@ -75,7 +75,7 @@
 ; CHECK-NEXT:        Content:         '616263'
 ; CHECK-NEXT:  - Type:            CUSTOM
 ; CHECK-NEXT:    Name:            linking
-; CHECK-NEXT:    Version:         1
+; CHECK-NEXT:    Version:         2
 ; CHECK-NEXT:    SymbolTable:
 ; CHECK-NEXT:       - Index:           0
 ; CHECK-NEXT:         Kind:            FUNCTION
@@ -106,7 +106,7 @@
 ; CHECK-NEXT:    SegmentInfo:
 ; CHECK-NEXT:      - Index:           0
 ; CHECK-NEXT:        Name:            .rodata.constantData
-; CHECK-NEXT:        Alignment:       1
+; CHECK-NEXT:        Alignment:       0
 ; CHECK-NEXT:        Flags:           [  ]
 ; CHECK-NEXT:    Comdats:
 ; CHECK-NEXT:        Name:            basicInlineFn
diff --git a/test/MC/WebAssembly/event-section.ll b/test/MC/WebAssembly/event-section.ll
index 2138170..dc2e9b6 100644
--- a/test/MC/WebAssembly/event-section.ll
+++ b/test/MC/WebAssembly/event-section.ll
@@ -44,7 +44,7 @@
 
 ; CHECK:        - Type:            CUSTOM
 ; CHECK-NEXT:     Name:            linking
-; CHECK-NEXT:     Version:         1
+; CHECK-NEXT:     Version:         2
 ; CHECK-NEXT:     SymbolTable:
 
 ; CHECK:            - Index:           1
diff --git a/test/MC/WebAssembly/explicit-sections.ll b/test/MC/WebAssembly/explicit-sections.ll
index e069cee..ab2bb16 100644
--- a/test/MC/WebAssembly/explicit-sections.ll
+++ b/test/MC/WebAssembly/explicit-sections.ll
@@ -60,14 +60,14 @@
 ; CHECK-NEXT:     SegmentInfo:    
 ; CHECK-NEXT:       - Index:           0
 ; CHECK-NEXT:         Name:            .data.global0
-; CHECK-NEXT:         Alignment:       8
+; CHECK-NEXT:         Alignment:       3
 ; CHECK-NEXT:         Flags:           [ ]
 ; CHECK-NEXT:       - Index:           1
 ; CHECK-NEXT:         Name:            .sec1
-; CHECK-NEXT:         Alignment:       8
+; CHECK-NEXT:         Alignment:       3
 ; CHECK-NEXT:         Flags:           [ ]
 ; CHECK-NEXT:       - Index:           2
 ; CHECK-NEXT:         Name:            .sec2
-; CHECK-NEXT:         Alignment:       8
+; CHECK-NEXT:         Alignment:       3
 ; CHECK-NEXT:         Flags:           [ ]
 ; CHECK-NEXT: ...
diff --git a/test/MC/WebAssembly/function-sections.ll b/test/MC/WebAssembly/function-sections.ll
index 8f1e29a..b9b9c28 100644
--- a/test/MC/WebAssembly/function-sections.ll
+++ b/test/MC/WebAssembly/function-sections.ll
@@ -14,7 +14,7 @@
 
 ; CHECK:        - Type:            CUSTOM
 ; CHECK-NEXT:     Name:            linking
-; CHECK-NEXT:     Version:         1
+; CHECK-NEXT:     Version:         2
 ; CHECK-NEXT:     SymbolTable:     
 ; CHECK-NEXT:       - Index:           0
 ; CHECK-NEXT:         Kind:            FUNCTION
diff --git a/test/MC/WebAssembly/global-ctor-dtor.ll b/test/MC/WebAssembly/global-ctor-dtor.ll
index 7464d66..4dd8deb 100644
--- a/test/MC/WebAssembly/global-ctor-dtor.ll
+++ b/test/MC/WebAssembly/global-ctor-dtor.ll
@@ -30,7 +30,7 @@
 ; CHECK-NEXT:         Field:           __indirect_function_table
 ; CHECK-NEXT:         Kind:            TABLE
 ; CHECK-NEXT:         Table:           
-; CHECK-NEXT:           ElemType:        ANYFUNC
+; CHECK-NEXT:           ElemType:        FUNCREF
 ; CHECK-NEXT:           Limits:          
 ; CHECK-NEXT:             Initial:         0x00000002
 ; CHECK-NEXT:       - Module:          env
@@ -110,7 +110,7 @@
 ; CHECK-NEXT:         Content:         '01040000'
 ; CHECK-NEXT:   - Type:            CUSTOM
 ; CHECK-NEXT:     Name:            linking
-; CHECK-NEXT:     Version:         1
+; CHECK-NEXT:     Version:         2
 ; CHECK-NEXT:     SymbolTable:
 ; CHECK-NEXT:       - Index:           0
 ; CHECK-NEXT:         Kind:            FUNCTION
@@ -170,7 +170,7 @@
 ; CHECK-NEXT:     SegmentInfo:
 ; CHECK-NEXT:       - Index:           0
 ; CHECK-NEXT:         Name:            .data.global1
-; CHECK-NEXT:         Alignment:       8
+; CHECK-NEXT:         Alignment:       3
 ; CHECK-NEXT:         Flags:           [ ]
 ; CHECK-NEXT:     InitFunctions:     
 ; CHECK-NEXT:       - Priority: 42
diff --git a/test/MC/WebAssembly/simd-encodings.s b/test/MC/WebAssembly/simd-encodings.s
index abb033c..0752f34 100644
--- a/test/MC/WebAssembly/simd-encodings.s
+++ b/test/MC/WebAssembly/simd-encodings.s
@@ -1,4 +1,7 @@
-# RUN: llvm-mc -show-encoding -triple=wasm32-unkown-unknown -mattr=+sign-ext,+simd128 < %s | FileCheck %s
+# RUN: llvm-mc -show-encoding -triple=wasm32-unkown-unknown -mattr=+unimplemented-simd128 < %s | FileCheck %s
+
+main:
+    .functype main () -> ()
 
     # CHECK: v128.load 48:p2align=0 # encoding: [0xfd,0x00,0x00,0x30]
     v128.load 48
@@ -424,28 +427,28 @@
     # CHECK: f64x2.max # encoding: [0xfd,0xaa,0x01]
     f64x2.max
 
-    # CHECK: i32x4.trunc_sat_s/f32x4 # encoding: [0xfd,0xab,0x01]
-    i32x4.trunc_sat_s/f32x4
+    # CHECK: i32x4.trunc_sat_f32x4_s # encoding: [0xfd,0xab,0x01]
+    i32x4.trunc_sat_f32x4_s
 
-    # CHECK: i32x4.trunc_sat_u/f32x4 # encoding: [0xfd,0xac,0x01]
-    i32x4.trunc_sat_u/f32x4
+    # CHECK: i32x4.trunc_sat_f32x4_u # encoding: [0xfd,0xac,0x01]
+    i32x4.trunc_sat_f32x4_u
 
-    # CHECK: i64x2.trunc_sat_s/f64x2 # encoding: [0xfd,0xad,0x01]
-    i64x2.trunc_sat_s/f64x2
+    # CHECK: i64x2.trunc_sat_f64x2_s # encoding: [0xfd,0xad,0x01]
+    i64x2.trunc_sat_f64x2_s
 
-    # CHECK: i64x2.trunc_sat_u/f64x2 # encoding: [0xfd,0xae,0x01]
-    i64x2.trunc_sat_u/f64x2
+    # CHECK: i64x2.trunc_sat_f64x2_u # encoding: [0xfd,0xae,0x01]
+    i64x2.trunc_sat_f64x2_u
 
-    # CHECK: f32x4.convert_s/i32x4 # encoding: [0xfd,0xaf,0x01]
-    f32x4.convert_s/i32x4
+    # CHECK: f32x4.convert_i32x4_s # encoding: [0xfd,0xaf,0x01]
+    f32x4.convert_i32x4_s
 
-    # CHECK: f32x4.convert_u/i32x4 # encoding: [0xfd,0xb0,0x01]
-    f32x4.convert_u/i32x4
+    # CHECK: f32x4.convert_i32x4_u # encoding: [0xfd,0xb0,0x01]
+    f32x4.convert_i32x4_u
 
-    # CHECK: f64x2.convert_s/i64x2 # encoding: [0xfd,0xb1,0x01]
-    f64x2.convert_s/i64x2
+    # CHECK: f64x2.convert_i64x2_s # encoding: [0xfd,0xb1,0x01]
+    f64x2.convert_i64x2_s
 
-    # CHECK: f64x2.convert_u/i64x2 # encoding: [0xfd,0xb2,0x01]
-    f64x2.convert_u/i64x2
+    # CHECK: f64x2.convert_i64x2_u # encoding: [0xfd,0xb2,0x01]
+    f64x2.convert_i64x2_u
 
     end_function
diff --git a/test/MC/WebAssembly/types.ll b/test/MC/WebAssembly/types.ll
index b801242..6eeeef2 100644
--- a/test/MC/WebAssembly/types.ll
+++ b/test/MC/WebAssembly/types.ll
@@ -1,4 +1,4 @@
-; RUN: llc -wasm-enable-unimplemented-simd -mattr=+sign-ext,+simd128 -filetype=obj %s -o - | obj2yaml | FileCheck %s
+; RUN: llc -mattr=+unimplemented-simd128 -filetype=obj %s -o - | obj2yaml | FileCheck %s
 
 target triple = "wasm32-unknown-unknown"
 
diff --git a/test/MC/WebAssembly/unnamed-data.ll b/test/MC/WebAssembly/unnamed-data.ll
index 4794902..ef7a0e5 100644
--- a/test/MC/WebAssembly/unnamed-data.ll
+++ b/test/MC/WebAssembly/unnamed-data.ll
@@ -44,7 +44,7 @@
 ; CHECK-NEXT:         Content:         '06000000'
 ; CHECK-NEXT:   - Type:            CUSTOM
 ; CHECK-NEXT:     Name:            linking
-; CHECK-NEXT:     Version:         1
+; CHECK-NEXT:     Version:         2
 ; CHECK-NEXT:     SymbolTable:      
 ; CHECK-NEXT:       - Index:           0
 ; CHECK-NEXT:         Kind:            DATA
@@ -73,18 +73,18 @@
 ; CHECK-NEXT:     SegmentInfo:    
 ; CHECK-NEXT:       - Index:       0
 ; CHECK-NEXT:         Name:        .rodata..L.str1
-; CHECK-NEXT:         Alignment:   1
+; CHECK-NEXT:         Alignment:   0
 ; CHECK-NEXT:         Flags:       [ ]
 ; CHECK-NEXT:       - Index:       1
 ; CHECK-NEXT:         Name:        .rodata..L.str2
-; CHECK-NEXT:         Alignment:   1
+; CHECK-NEXT:         Alignment:   0
 ; CHECK-NEXT:         Flags:       [ ]
 ; CHECK-NEXT:       - Index:       2
 ; CHECK-NEXT:         Name:        .data.a
-; CHECK-NEXT:         Alignment:   8
+; CHECK-NEXT:         Alignment:   3
 ; CHECK-NEXT:         Flags:       [ ]
 ; CHECK-NEXT:       - Index:       3
 ; CHECK-NEXT:         Name:        .data.b
-; CHECK-NEXT:         Alignment:   8
+; CHECK-NEXT:         Alignment:   3
 ; CHECK-NEXT:         Flags:       [ ]
 ; CHECK_NEXT:   ...
diff --git a/test/MC/WebAssembly/weak-alias.ll b/test/MC/WebAssembly/weak-alias.ll
index bf63695..ea496db 100644
--- a/test/MC/WebAssembly/weak-alias.ll
+++ b/test/MC/WebAssembly/weak-alias.ll
@@ -62,7 +62,7 @@
 ; CHECK-NEXT:         Field:           __indirect_function_table
 ; CHECK-NEXT:         Kind:            TABLE
 ; CHECK-NEXT:         Table:
-; CHECK-NEXT:           ElemType:        ANYFUNC
+; CHECK-NEXT:           ElemType:        FUNCREF
 ; CHECK-NEXT:           Limits:
 ; CHECK-NEXT:             Initial:         0x00000001
 ; CHECK-NEXT:   - Type:            FUNCTION
@@ -138,7 +138,7 @@
 ; CHECK-NEXT:         Content:         '01000000'
 ; CHECK-NEXT:   - Type:            CUSTOM
 ; CHECK-NEXT:     Name:            linking
-; CHECK-NEXT:     Version:         1
+; CHECK-NEXT:     Version:         2
 ; CHECK-NEXT:     SymbolTable:
 ; CHECK-NEXT:       - Index:           0
 ; CHECK-NEXT:         Kind:            FUNCTION
@@ -197,15 +197,15 @@
 ; CHECK-NEXT:     SegmentInfo:
 ; CHECK-NEXT:       - Index:           0
 ; CHECK-NEXT:         Name:            .data.bar
-; CHECK-NEXT:         Alignment:       8
+; CHECK-NEXT:         Alignment:       3
 ; CHECK-NEXT:         Flags:           [ ]
 ; CHECK-NEXT:       - Index:           1
 ; CHECK-NEXT:         Name:            .data.direct_address
-; CHECK-NEXT:         Alignment:       8
+; CHECK-NEXT:         Alignment:       3
 ; CHECK-NEXT:         Flags:           [ ]
 ; CHECK-NEXT:       - Index:           2
 ; CHECK-NEXT:         Name:            .data.alias_address
-; CHECK-NEXT:         Alignment:       8
+; CHECK-NEXT:         Alignment:       3
 ; CHECK-NEXT:         Flags:           [ ]
 ; CHECK-NEXT: ...
 
diff --git a/test/MC/X86/LWP-32.s b/test/MC/X86/LWP-32.s
new file mode 100644
index 0000000..d91d16c
--- /dev/null
+++ b/test/MC/X86/LWP-32.s
@@ -0,0 +1,65 @@
+// RUN: llvm-mc -triple i386-unknown-unknown --show-encoding %s | FileCheck %s
+
+// CHECK: llwpcb %eax
+// CHECK: encoding: [0x8f,0xe9,0x78,0x12,0xc0]
+llwpcb %eax
+
+// CHECK: lwpins $0, -485498096(%edx,%eax,4), %edx
+// CHECK: encoding: [0x8f,0xea,0x68,0x12,0x84,0x82,0x10,0xe3,0x0f,0xe3,0x00,0x00,0x00,0x00]
+lwpins $0, -485498096(%edx,%eax,4), %edx
+
+// CHECK: lwpins $0, 485498096(%edx,%eax,4), %edx
+// CHECK: encoding: [0x8f,0xea,0x68,0x12,0x84,0x82,0xf0,0x1c,0xf0,0x1c,0x00,0x00,0x00,0x00]
+lwpins $0, 485498096(%edx,%eax,4), %edx
+
+// CHECK: lwpins $0, 485498096(%edx), %edx
+// CHECK: encoding: [0x8f,0xea,0x68,0x12,0x82,0xf0,0x1c,0xf0,0x1c,0x00,0x00,0x00,0x00]
+lwpins $0, 485498096(%edx), %edx
+
+// CHECK: lwpins $0, 485498096, %edx
+// CHECK: encoding: [0x8f,0xea,0x68,0x12,0x05,0xf0,0x1c,0xf0,0x1c,0x00,0x00,0x00,0x00]
+lwpins $0, 485498096, %edx
+
+// CHECK: lwpins $0, 64(%edx,%eax), %edx
+// CHECK: encoding: [0x8f,0xea,0x68,0x12,0x44,0x02,0x40,0x00,0x00,0x00,0x00]
+lwpins $0, 64(%edx,%eax), %edx
+
+// CHECK: lwpins $0, %eax, %edx
+// CHECK: encoding: [0x8f,0xea,0x68,0x12,0xc0,0x00,0x00,0x00,0x00]
+lwpins $0, %eax, %edx
+
+// CHECK: lwpins $0, (%edx), %edx
+// CHECK: encoding: [0x8f,0xea,0x68,0x12,0x02,0x00,0x00,0x00,0x00]
+lwpins $0, (%edx), %edx
+
+// CHECK: lwpval $0, -485498096(%edx,%eax,4), %edx
+// CHECK: encoding: [0x8f,0xea,0x68,0x12,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x00,0x00,0x00,0x00]
+lwpval $0, -485498096(%edx,%eax,4), %edx
+
+// CHECK: lwpval $0, 485498096(%edx,%eax,4), %edx
+// CHECK: encoding: [0x8f,0xea,0x68,0x12,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x00,0x00,0x00,0x00]
+lwpval $0, 485498096(%edx,%eax,4), %edx
+
+// CHECK: lwpval $0, 485498096(%edx), %edx
+// CHECK: encoding: [0x8f,0xea,0x68,0x12,0x8a,0xf0,0x1c,0xf0,0x1c,0x00,0x00,0x00,0x00]
+lwpval $0, 485498096(%edx), %edx
+
+// CHECK: lwpval $0, 485498096, %edx
+// CHECK: encoding: [0x8f,0xea,0x68,0x12,0x0d,0xf0,0x1c,0xf0,0x1c,0x00,0x00,0x00,0x00]
+lwpval $0, 485498096, %edx
+
+// CHECK: lwpval $0, 64(%edx,%eax), %edx
+// CHECK: encoding: [0x8f,0xea,0x68,0x12,0x4c,0x02,0x40,0x00,0x00,0x00,0x00]
+lwpval $0, 64(%edx,%eax), %edx
+
+// CHECK: lwpval $0, %eax, %edx
+// CHECK: encoding: [0x8f,0xea,0x68,0x12,0xc8,0x00,0x00,0x00,0x00]
+lwpval $0, %eax, %edx
+
+// CHECK: lwpval $0, (%edx), %edx
+// CHECK: encoding: [0x8f,0xea,0x68,0x12,0x0a,0x00,0x00,0x00,0x00]
+lwpval $0, (%edx), %edx
+
+// CHECK: slwpcb %eax
+// CHECK: encoding: [0x8f,0xe9,0x78,0x12,0xc8]
+slwpcb %eax
diff --git a/test/MC/X86/LWP-64.s b/test/MC/X86/LWP-64.s
new file mode 100644
index 0000000..e1971c9
--- /dev/null
+++ b/test/MC/X86/LWP-64.s
@@ -0,0 +1,129 @@
+// RUN: llvm-mc -triple x86_64-unknown-unknown --show-encoding %s | FileCheck %s
+
+// CHECK: llwpcb %r13d
+// CHECK: encoding: [0x8f,0xc9,0x78,0x12,0xc5]
+llwpcb %r13d
+
+// CHECK: llwpcb %r13
+// CHECK: encoding: [0x8f,0xc9,0xf8,0x12,0xc5]
+llwpcb %r13
+
+// CHECK: lwpins $0, 485498096, %r15d
+// CHECK: encoding: [0x8f,0xea,0x00,0x12,0x04,0x25,0xf0,0x1c,0xf0,0x1c,0x00,0x00,0x00,0x00]
+lwpins $0, 485498096, %r15d
+
+// CHECK: lwpins $0, 485498096, %r15
+// CHECK: encoding: [0x8f,0xea,0x80,0x12,0x04,0x25,0xf0,0x1c,0xf0,0x1c,0x00,0x00,0x00,0x00]
+lwpins $0, 485498096, %r15
+
+// CHECK: lwpins $0, 64(%rdx), %r15d
+// CHECK: encoding: [0x8f,0xea,0x00,0x12,0x42,0x40,0x00,0x00,0x00,0x00]
+lwpins $0, 64(%rdx), %r15d
+
+// CHECK: lwpins $0, 64(%rdx), %r15
+// CHECK: encoding: [0x8f,0xea,0x80,0x12,0x42,0x40,0x00,0x00,0x00,0x00]
+lwpins $0, 64(%rdx), %r15
+
+// CHECK: lwpins $0, 64(%rdx,%rax,4), %r15d
+// CHECK: encoding: [0x8f,0xea,0x00,0x12,0x44,0x82,0x40,0x00,0x00,0x00,0x00]
+lwpins $0, 64(%rdx,%rax,4), %r15d
+
+// CHECK: lwpins $0, 64(%rdx,%rax,4), %r15
+// CHECK: encoding: [0x8f,0xea,0x80,0x12,0x44,0x82,0x40,0x00,0x00,0x00,0x00]
+lwpins $0, 64(%rdx,%rax,4), %r15
+
+// CHECK: lwpins $0, -64(%rdx,%rax,4), %r15d
+// CHECK: encoding: [0x8f,0xea,0x00,0x12,0x44,0x82,0xc0,0x00,0x00,0x00,0x00]
+lwpins $0, -64(%rdx,%rax,4), %r15d
+
+// CHECK: lwpins $0, -64(%rdx,%rax,4), %r15
+// CHECK: encoding: [0x8f,0xea,0x80,0x12,0x44,0x82,0xc0,0x00,0x00,0x00,0x00]
+lwpins $0, -64(%rdx,%rax,4), %r15
+
+// CHECK: lwpins $0, 64(%rdx,%rax), %r15d
+// CHECK: encoding: [0x8f,0xea,0x00,0x12,0x44,0x02,0x40,0x00,0x00,0x00,0x00]
+lwpins $0, 64(%rdx,%rax), %r15d
+
+// CHECK: lwpins $0, 64(%rdx,%rax), %r15
+// CHECK: encoding: [0x8f,0xea,0x80,0x12,0x44,0x02,0x40,0x00,0x00,0x00,0x00]
+lwpins $0, 64(%rdx,%rax), %r15
+
+// CHECK: lwpins $0, %r13d, %r15d
+// CHECK: encoding: [0x8f,0xca,0x00,0x12,0xc5,0x00,0x00,0x00,0x00]
+lwpins $0, %r13d, %r15d
+
+// CHECK: lwpins $0, %r13d, %r15
+// CHECK: encoding: [0x8f,0xca,0x80,0x12,0xc5,0x00,0x00,0x00,0x00]
+lwpins $0, %r13d, %r15
+
+// CHECK: lwpins $0, (%rdx), %r15d
+// CHECK: encoding: [0x8f,0xea,0x00,0x12,0x02,0x00,0x00,0x00,0x00]
+lwpins $0, (%rdx), %r15d
+
+// CHECK: lwpins $0, (%rdx), %r15
+// CHECK: encoding: [0x8f,0xea,0x80,0x12,0x02,0x00,0x00,0x00,0x00]
+lwpins $0, (%rdx), %r15
+
+// CHECK: lwpval $0, 485498096, %r15d
+// CHECK: encoding: [0x8f,0xea,0x00,0x12,0x0c,0x25,0xf0,0x1c,0xf0,0x1c,0x00,0x00,0x00,0x00]
+lwpval $0, 485498096, %r15d
+
+// CHECK: lwpval $0, 485498096, %r15
+// CHECK: encoding: [0x8f,0xea,0x80,0x12,0x0c,0x25,0xf0,0x1c,0xf0,0x1c,0x00,0x00,0x00,0x00]
+lwpval $0, 485498096, %r15
+
+// CHECK: lwpval $0, 64(%rdx), %r15d
+// CHECK: encoding: [0x8f,0xea,0x00,0x12,0x4a,0x40,0x00,0x00,0x00,0x00]
+lwpval $0, 64(%rdx), %r15d
+
+// CHECK: lwpval $0, 64(%rdx), %r15
+// CHECK: encoding: [0x8f,0xea,0x80,0x12,0x4a,0x40,0x00,0x00,0x00,0x00]
+lwpval $0, 64(%rdx), %r15
+
+// CHECK: lwpval $0, 64(%rdx,%rax,4), %r15d
+// CHECK: encoding: [0x8f,0xea,0x00,0x12,0x4c,0x82,0x40,0x00,0x00,0x00,0x00]
+lwpval $0, 64(%rdx,%rax,4), %r15d
+
+// CHECK: lwpval $0, 64(%rdx,%rax,4), %r15
+// CHECK: encoding: [0x8f,0xea,0x80,0x12,0x4c,0x82,0x40,0x00,0x00,0x00,0x00]
+lwpval $0, 64(%rdx,%rax,4), %r15
+
+// CHECK: lwpval $0, -64(%rdx,%rax,4), %r15d
+// CHECK: encoding: [0x8f,0xea,0x00,0x12,0x4c,0x82,0xc0,0x00,0x00,0x00,0x00]
+lwpval $0, -64(%rdx,%rax,4), %r15d
+
+// CHECK: lwpval $0, -64(%rdx,%rax,4), %r15
+// CHECK: encoding: [0x8f,0xea,0x80,0x12,0x4c,0x82,0xc0,0x00,0x00,0x00,0x00]
+lwpval $0, -64(%rdx,%rax,4), %r15
+
+// CHECK: lwpval $0, 64(%rdx,%rax), %r15d
+// CHECK: encoding: [0x8f,0xea,0x00,0x12,0x4c,0x02,0x40,0x00,0x00,0x00,0x00]
+lwpval $0, 64(%rdx,%rax), %r15d
+
+// CHECK: lwpval $0, 64(%rdx,%rax), %r15
+// CHECK: encoding: [0x8f,0xea,0x80,0x12,0x4c,0x02,0x40,0x00,0x00,0x00,0x00]
+lwpval $0, 64(%rdx,%rax), %r15
+
+// CHECK: lwpval $0, %r13d, %r15d
+// CHECK: encoding: [0x8f,0xca,0x00,0x12,0xcd,0x00,0x00,0x00,0x00]
+lwpval $0, %r13d, %r15d
+
+// CHECK: lwpval $0, %r13d, %r15
+// CHECK: encoding: [0x8f,0xca,0x80,0x12,0xcd,0x00,0x00,0x00,0x00]
+lwpval $0, %r13d, %r15
+
+// CHECK: lwpval $0, (%rdx), %r15d
+// CHECK: encoding: [0x8f,0xea,0x00,0x12,0x0a,0x00,0x00,0x00,0x00]
+lwpval $0, (%rdx), %r15d
+
+// CHECK: lwpval $0, (%rdx), %r15
+// CHECK: encoding: [0x8f,0xea,0x80,0x12,0x0a,0x00,0x00,0x00,0x00]
+lwpval $0, (%rdx), %r15
+
+// CHECK: slwpcb %r13d
+// CHECK: encoding: [0x8f,0xc9,0x78,0x12,0xcd]
+slwpcb %r13d
+
+// CHECK: slwpcb %r13
+// CHECK: encoding: [0x8f,0xc9,0xf8,0x12,0xcd]
+slwpcb %r13
diff --git a/test/MC/X86/XOP-32.s b/test/MC/X86/XOP-32.s
new file mode 100644
index 0000000..669b3f4
--- /dev/null
+++ b/test/MC/X86/XOP-32.s
@@ -0,0 +1,2250 @@
+// RUN: llvm-mc -triple i386-unknown-unknown --show-encoding %s | FileCheck %s
+
+// CHECK: vfrczpd -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x81,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vfrczpd -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vfrczpd 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x81,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vfrczpd 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vfrczpd -485498096(%edx,%eax,4), %ymm4
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x81,0xa4,0x82,0x10,0xe3,0x0f,0xe3]
+vfrczpd -485498096(%edx,%eax,4), %ymm4
+
+// CHECK: vfrczpd 485498096(%edx,%eax,4), %ymm4
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x81,0xa4,0x82,0xf0,0x1c,0xf0,0x1c]
+vfrczpd 485498096(%edx,%eax,4), %ymm4
+
+// CHECK: vfrczpd 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x81,0x8a,0xf0,0x1c,0xf0,0x1c]
+vfrczpd 485498096(%edx), %xmm1
+
+// CHECK: vfrczpd 485498096(%edx), %ymm4
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x81,0xa2,0xf0,0x1c,0xf0,0x1c]
+vfrczpd 485498096(%edx), %ymm4
+
+// CHECK: vfrczpd 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x81,0x0d,0xf0,0x1c,0xf0,0x1c]
+vfrczpd 485498096, %xmm1
+
+// CHECK: vfrczpd 485498096, %ymm4
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x81,0x25,0xf0,0x1c,0xf0,0x1c]
+vfrczpd 485498096, %ymm4
+
+// CHECK: vfrczpd 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x81,0x4c,0x02,0x40]
+vfrczpd 64(%edx,%eax), %xmm1
+
+// CHECK: vfrczpd 64(%edx,%eax), %ymm4
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x81,0x64,0x02,0x40]
+vfrczpd 64(%edx,%eax), %ymm4
+
+// CHECK: vfrczpd (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x81,0x0a]
+vfrczpd (%edx), %xmm1
+
+// CHECK: vfrczpd (%edx), %ymm4
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x81,0x22]
+vfrczpd (%edx), %ymm4
+
+// CHECK: vfrczpd %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x81,0xc9]
+vfrczpd %xmm1, %xmm1
+
+// CHECK: vfrczpd %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x81,0xe4]
+vfrczpd %ymm4, %ymm4
+
+// CHECK: vfrczps -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x80,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vfrczps -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vfrczps 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x80,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vfrczps 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vfrczps -485498096(%edx,%eax,4), %ymm4
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x80,0xa4,0x82,0x10,0xe3,0x0f,0xe3]
+vfrczps -485498096(%edx,%eax,4), %ymm4
+
+// CHECK: vfrczps 485498096(%edx,%eax,4), %ymm4
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x80,0xa4,0x82,0xf0,0x1c,0xf0,0x1c]
+vfrczps 485498096(%edx,%eax,4), %ymm4
+
+// CHECK: vfrczps 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x80,0x8a,0xf0,0x1c,0xf0,0x1c]
+vfrczps 485498096(%edx), %xmm1
+
+// CHECK: vfrczps 485498096(%edx), %ymm4
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x80,0xa2,0xf0,0x1c,0xf0,0x1c]
+vfrczps 485498096(%edx), %ymm4
+
+// CHECK: vfrczps 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x80,0x0d,0xf0,0x1c,0xf0,0x1c]
+vfrczps 485498096, %xmm1
+
+// CHECK: vfrczps 485498096, %ymm4
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x80,0x25,0xf0,0x1c,0xf0,0x1c]
+vfrczps 485498096, %ymm4
+
+// CHECK: vfrczps 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x80,0x4c,0x02,0x40]
+vfrczps 64(%edx,%eax), %xmm1
+
+// CHECK: vfrczps 64(%edx,%eax), %ymm4
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x80,0x64,0x02,0x40]
+vfrczps 64(%edx,%eax), %ymm4
+
+// CHECK: vfrczps (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x80,0x0a]
+vfrczps (%edx), %xmm1
+
+// CHECK: vfrczps (%edx), %ymm4
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x80,0x22]
+vfrczps (%edx), %ymm4
+
+// CHECK: vfrczps %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x80,0xc9]
+vfrczps %xmm1, %xmm1
+
+// CHECK: vfrczps %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x80,0xe4]
+vfrczps %ymm4, %ymm4
+
+// CHECK: vfrczsd -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x83,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vfrczsd -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vfrczsd 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x83,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vfrczsd 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vfrczsd 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x83,0x8a,0xf0,0x1c,0xf0,0x1c]
+vfrczsd 485498096(%edx), %xmm1
+
+// CHECK: vfrczsd 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x83,0x0d,0xf0,0x1c,0xf0,0x1c]
+vfrczsd 485498096, %xmm1
+
+// CHECK: vfrczsd 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x83,0x4c,0x02,0x40]
+vfrczsd 64(%edx,%eax), %xmm1
+
+// CHECK: vfrczsd (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x83,0x0a]
+vfrczsd (%edx), %xmm1
+
+// CHECK: vfrczsd %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x83,0xc9]
+vfrczsd %xmm1, %xmm1
+
+// CHECK: vfrczss -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x82,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vfrczss -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vfrczss 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x82,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vfrczss 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vfrczss 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x82,0x8a,0xf0,0x1c,0xf0,0x1c]
+vfrczss 485498096(%edx), %xmm1
+
+// CHECK: vfrczss 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x82,0x0d,0xf0,0x1c,0xf0,0x1c]
+vfrczss 485498096, %xmm1
+
+// CHECK: vfrczss 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x82,0x4c,0x02,0x40]
+vfrczss 64(%edx,%eax), %xmm1
+
+// CHECK: vfrczss (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x82,0x0a]
+vfrczss (%edx), %xmm1
+
+// CHECK: vfrczss %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0x82,0xc9]
+vfrczss %xmm1, %xmm1
+
+// CHECK: vpcmov -485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0xf0,0xa2,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpcmov -485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpcmov 485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0xf0,0xa2,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpcmov 485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpcmov -485498096(%edx,%eax,4), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe8,0xdc,0xa2,0xa4,0x82,0x10,0xe3,0x0f,0xe3,0x40]
+vpcmov -485498096(%edx,%eax,4), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpcmov 485498096(%edx,%eax,4), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe8,0xdc,0xa2,0xa4,0x82,0xf0,0x1c,0xf0,0x1c,0x40]
+vpcmov 485498096(%edx,%eax,4), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpcmov 485498096(%edx), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0xf0,0xa2,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpcmov 485498096(%edx), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpcmov 485498096(%edx), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe8,0xdc,0xa2,0xa2,0xf0,0x1c,0xf0,0x1c,0x40]
+vpcmov 485498096(%edx), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpcmov 485498096, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0xf0,0xa2,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpcmov 485498096, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpcmov 485498096, %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe8,0xdc,0xa2,0x25,0xf0,0x1c,0xf0,0x1c,0x40]
+vpcmov 485498096, %ymm4, %ymm4, %ymm4
+
+// CHECK: vpcmov 64(%edx,%eax), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0xf0,0xa2,0x4c,0x02,0x40,0x10]
+vpcmov 64(%edx,%eax), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpcmov 64(%edx,%eax), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe8,0xdc,0xa2,0x64,0x02,0x40,0x40]
+vpcmov 64(%edx,%eax), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpcmov (%edx), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0xf0,0xa2,0x0a,0x10]
+vpcmov (%edx), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpcmov (%edx), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe8,0xdc,0xa2,0x22,0x40]
+vpcmov (%edx), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpcmov %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa2,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpcmov %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcmov %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa2,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpcmov %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcmov %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa2,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpcmov %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpcmov %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa2,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpcmov %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpcmov %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa2,0x4c,0x02,0x40,0x10]
+vpcmov %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpcmov %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa2,0x0a,0x10]
+vpcmov %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpcmov %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa2,0xc9,0x10]
+vpcmov %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpcmov %ymm4, -485498096(%edx,%eax,4), %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe8,0x5c,0xa2,0xa4,0x82,0x10,0xe3,0x0f,0xe3,0x40]
+vpcmov %ymm4, -485498096(%edx,%eax,4), %ymm4, %ymm4
+
+// CHECK: vpcmov %ymm4, 485498096(%edx,%eax,4), %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe8,0x5c,0xa2,0xa4,0x82,0xf0,0x1c,0xf0,0x1c,0x40]
+vpcmov %ymm4, 485498096(%edx,%eax,4), %ymm4, %ymm4
+
+// CHECK: vpcmov %ymm4, 485498096(%edx), %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe8,0x5c,0xa2,0xa2,0xf0,0x1c,0xf0,0x1c,0x40]
+vpcmov %ymm4, 485498096(%edx), %ymm4, %ymm4
+
+// CHECK: vpcmov %ymm4, 485498096, %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe8,0x5c,0xa2,0x25,0xf0,0x1c,0xf0,0x1c,0x40]
+vpcmov %ymm4, 485498096, %ymm4, %ymm4
+
+// CHECK: vpcmov %ymm4, 64(%edx,%eax), %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe8,0x5c,0xa2,0x64,0x02,0x40,0x40]
+vpcmov %ymm4, 64(%edx,%eax), %ymm4, %ymm4
+
+// CHECK: vpcmov %ymm4, (%edx), %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe8,0x5c,0xa2,0x22,0x40]
+vpcmov %ymm4, (%edx), %ymm4, %ymm4
+
+// CHECK: vpcmov %ymm4, %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0x8f,0xe8,0x5c,0xa2,0xe4,0x40]
+vpcmov %ymm4, %ymm4, %ymm4, %ymm4
+
+// CHECK: vpcomltb -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcc,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x00]
+vpcomltb -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltb 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcc,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltb 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltb 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcc,0x8a,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltb 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltb 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcc,0x0d,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltb 485498096, %xmm1, %xmm1
+
+// CHECK: vpcomltb 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcc,0x4c,0x02,0x40,0x00]
+vpcomltb 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpcomltb (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcc,0x0a,0x00]
+vpcomltb (%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltb %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcc,0xc9,0x00]
+vpcomltb %xmm1, %xmm1, %xmm1
+
+// CHECK: vpcomltd -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xce,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x00]
+vpcomltd -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltd 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xce,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltd 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltd 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xce,0x8a,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltd 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltd 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xce,0x0d,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltd 485498096, %xmm1, %xmm1
+
+// CHECK: vpcomltd 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xce,0x4c,0x02,0x40,0x00]
+vpcomltd 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpcomltd (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xce,0x0a,0x00]
+vpcomltd (%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltd %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xce,0xc9,0x00]
+vpcomltd %xmm1, %xmm1, %xmm1
+
+// CHECK: vpcomltq -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcf,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x00]
+vpcomltq -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltq 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcf,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltq 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltq 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcf,0x8a,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltq 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltq 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcf,0x0d,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltq 485498096, %xmm1, %xmm1
+
+// CHECK: vpcomltq 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcf,0x4c,0x02,0x40,0x00]
+vpcomltq 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpcomltq (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcf,0x0a,0x00]
+vpcomltq (%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltq %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcf,0xc9,0x00]
+vpcomltq %xmm1, %xmm1, %xmm1
+
+// CHECK: vpcomltub -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xec,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x00]
+vpcomltub -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltub 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xec,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltub 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltub 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xec,0x8a,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltub 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltub 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xec,0x0d,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltub 485498096, %xmm1, %xmm1
+
+// CHECK: vpcomltub 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xec,0x4c,0x02,0x40,0x00]
+vpcomltub 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpcomltub (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xec,0x0a,0x00]
+vpcomltub (%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltub %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xec,0xc9,0x00]
+vpcomltub %xmm1, %xmm1, %xmm1
+
+// CHECK: vpcomltud -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xee,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x00]
+vpcomltud -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltud 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xee,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltud 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltud 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xee,0x8a,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltud 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltud 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xee,0x0d,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltud 485498096, %xmm1, %xmm1
+
+// CHECK: vpcomltud 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xee,0x4c,0x02,0x40,0x00]
+vpcomltud 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpcomltud (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xee,0x0a,0x00]
+vpcomltud (%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltud %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xee,0xc9,0x00]
+vpcomltud %xmm1, %xmm1, %xmm1
+
+// CHECK: vpcomltuq -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xef,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x00]
+vpcomltuq -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltuq 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xef,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltuq 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltuq 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xef,0x8a,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltuq 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltuq 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xef,0x0d,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltuq 485498096, %xmm1, %xmm1
+
+// CHECK: vpcomltuq 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xef,0x4c,0x02,0x40,0x00]
+vpcomltuq 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpcomltuq (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xef,0x0a,0x00]
+vpcomltuq (%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltuq %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xef,0xc9,0x00]
+vpcomltuq %xmm1, %xmm1, %xmm1
+
+// CHECK: vpcomltuw -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xed,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x00]
+vpcomltuw -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltuw 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xed,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltuw 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltuw 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xed,0x8a,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltuw 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltuw 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xed,0x0d,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltuw 485498096, %xmm1, %xmm1
+
+// CHECK: vpcomltuw 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xed,0x4c,0x02,0x40,0x00]
+vpcomltuw 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpcomltuw (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xed,0x0a,0x00]
+vpcomltuw (%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltuw %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xed,0xc9,0x00]
+vpcomltuw %xmm1, %xmm1, %xmm1
+
+// CHECK: vpcomltw -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcd,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x00]
+vpcomltw -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltw 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcd,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltw 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpcomltw 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcd,0x8a,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltw 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltw 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcd,0x0d,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltw 485498096, %xmm1, %xmm1
+
+// CHECK: vpcomltw 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcd,0x4c,0x02,0x40,0x00]
+vpcomltw 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpcomltw (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcd,0x0a,0x00]
+vpcomltw (%edx), %xmm1, %xmm1
+
+// CHECK: vpcomltw %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xcd,0xc9,0x00]
+vpcomltw %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2pd $0, -485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0xf1,0x49,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpermil2pd $0, -485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2pd $0, 485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0xf1,0x49,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpermil2pd $0, 485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2pd $0, -485498096(%edx,%eax,4), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0xdd,0x49,0xa4,0x82,0x10,0xe3,0x0f,0xe3,0x40]
+vpermil2pd $0, -485498096(%edx,%eax,4), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpermil2pd $0, 485498096(%edx,%eax,4), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0xdd,0x49,0xa4,0x82,0xf0,0x1c,0xf0,0x1c,0x40]
+vpermil2pd $0, 485498096(%edx,%eax,4), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpermil2pd $0, 485498096(%edx), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0xf1,0x49,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpermil2pd $0, 485498096(%edx), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2pd $0, 485498096(%edx), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0xdd,0x49,0xa2,0xf0,0x1c,0xf0,0x1c,0x40]
+vpermil2pd $0, 485498096(%edx), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpermil2pd $0, 485498096, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0xf1,0x49,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpermil2pd $0, 485498096, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2pd $0, 485498096, %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0xdd,0x49,0x25,0xf0,0x1c,0xf0,0x1c,0x40]
+vpermil2pd $0, 485498096, %ymm4, %ymm4, %ymm4
+
+// CHECK: vpermil2pd $0, 64(%edx,%eax), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0xf1,0x49,0x4c,0x02,0x40,0x10]
+vpermil2pd $0, 64(%edx,%eax), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2pd $0, 64(%edx,%eax), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0xdd,0x49,0x64,0x02,0x40,0x40]
+vpermil2pd $0, 64(%edx,%eax), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpermil2pd $0, (%edx), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0xf1,0x49,0x0a,0x10]
+vpermil2pd $0, (%edx), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2pd $0, (%edx), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0xdd,0x49,0x22,0x40]
+vpermil2pd $0, (%edx), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpermil2pd $0, %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x71,0x49,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpermil2pd $0, %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpermil2pd $0, %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x71,0x49,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpermil2pd $0, %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpermil2pd $0, %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x71,0x49,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpermil2pd $0, %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpermil2pd $0, %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x71,0x49,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpermil2pd $0, %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpermil2pd $0, %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x71,0x49,0x4c,0x02,0x40,0x10]
+vpermil2pd $0, %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpermil2pd $0, %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x71,0x49,0x0a,0x10]
+vpermil2pd $0, %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpermil2pd $0, %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x71,0x49,0xc9,0x10]
+vpermil2pd $0, %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2pd $0, %ymm4, -485498096(%edx,%eax,4), %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0x5d,0x49,0xa4,0x82,0x10,0xe3,0x0f,0xe3,0x40]
+vpermil2pd $0, %ymm4, -485498096(%edx,%eax,4), %ymm4, %ymm4
+
+// CHECK: vpermil2pd $0, %ymm4, 485498096(%edx,%eax,4), %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0x5d,0x49,0xa4,0x82,0xf0,0x1c,0xf0,0x1c,0x40]
+vpermil2pd $0, %ymm4, 485498096(%edx,%eax,4), %ymm4, %ymm4
+
+// CHECK: vpermil2pd $0, %ymm4, 485498096(%edx), %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0x5d,0x49,0xa2,0xf0,0x1c,0xf0,0x1c,0x40]
+vpermil2pd $0, %ymm4, 485498096(%edx), %ymm4, %ymm4
+
+// CHECK: vpermil2pd $0, %ymm4, 485498096, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0x5d,0x49,0x25,0xf0,0x1c,0xf0,0x1c,0x40]
+vpermil2pd $0, %ymm4, 485498096, %ymm4, %ymm4
+
+// CHECK: vpermil2pd $0, %ymm4, 64(%edx,%eax), %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0x5d,0x49,0x64,0x02,0x40,0x40]
+vpermil2pd $0, %ymm4, 64(%edx,%eax), %ymm4, %ymm4
+
+// CHECK: vpermil2pd $0, %ymm4, (%edx), %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0x5d,0x49,0x22,0x40]
+vpermil2pd $0, %ymm4, (%edx), %ymm4, %ymm4
+
+// CHECK: vpermil2pd $0, %ymm4, %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0x5d,0x49,0xe4,0x40]
+vpermil2pd $0, %ymm4, %ymm4, %ymm4, %ymm4
+
+// CHECK: vpermil2ps $0, -485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0xf1,0x48,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpermil2ps $0, -485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2ps $0, 485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0xf1,0x48,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpermil2ps $0, 485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2ps $0, -485498096(%edx,%eax,4), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0xdd,0x48,0xa4,0x82,0x10,0xe3,0x0f,0xe3,0x40]
+vpermil2ps $0, -485498096(%edx,%eax,4), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpermil2ps $0, 485498096(%edx,%eax,4), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0xdd,0x48,0xa4,0x82,0xf0,0x1c,0xf0,0x1c,0x40]
+vpermil2ps $0, 485498096(%edx,%eax,4), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpermil2ps $0, 485498096(%edx), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0xf1,0x48,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpermil2ps $0, 485498096(%edx), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2ps $0, 485498096(%edx), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0xdd,0x48,0xa2,0xf0,0x1c,0xf0,0x1c,0x40]
+vpermil2ps $0, 485498096(%edx), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpermil2ps $0, 485498096, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0xf1,0x48,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpermil2ps $0, 485498096, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2ps $0, 485498096, %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0xdd,0x48,0x25,0xf0,0x1c,0xf0,0x1c,0x40]
+vpermil2ps $0, 485498096, %ymm4, %ymm4, %ymm4
+
+// CHECK: vpermil2ps $0, 64(%edx,%eax), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0xf1,0x48,0x4c,0x02,0x40,0x10]
+vpermil2ps $0, 64(%edx,%eax), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2ps $0, 64(%edx,%eax), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0xdd,0x48,0x64,0x02,0x40,0x40]
+vpermil2ps $0, 64(%edx,%eax), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpermil2ps $0, (%edx), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0xf1,0x48,0x0a,0x10]
+vpermil2ps $0, (%edx), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2ps $0, (%edx), %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0xdd,0x48,0x22,0x40]
+vpermil2ps $0, (%edx), %ymm4, %ymm4, %ymm4
+
+// CHECK: vpermil2ps $0, %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x71,0x48,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpermil2ps $0, %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpermil2ps $0, %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x71,0x48,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpermil2ps $0, %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpermil2ps $0, %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x71,0x48,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpermil2ps $0, %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpermil2ps $0, %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x71,0x48,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpermil2ps $0, %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpermil2ps $0, %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x71,0x48,0x4c,0x02,0x40,0x10]
+vpermil2ps $0, %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpermil2ps $0, %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x71,0x48,0x0a,0x10]
+vpermil2ps $0, %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpermil2ps $0, %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x71,0x48,0xc9,0x10]
+vpermil2ps $0, %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpermil2ps $0, %ymm4, -485498096(%edx,%eax,4), %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0x5d,0x48,0xa4,0x82,0x10,0xe3,0x0f,0xe3,0x40]
+vpermil2ps $0, %ymm4, -485498096(%edx,%eax,4), %ymm4, %ymm4
+
+// CHECK: vpermil2ps $0, %ymm4, 485498096(%edx,%eax,4), %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0x5d,0x48,0xa4,0x82,0xf0,0x1c,0xf0,0x1c,0x40]
+vpermil2ps $0, %ymm4, 485498096(%edx,%eax,4), %ymm4, %ymm4
+
+// CHECK: vpermil2ps $0, %ymm4, 485498096(%edx), %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0x5d,0x48,0xa2,0xf0,0x1c,0xf0,0x1c,0x40]
+vpermil2ps $0, %ymm4, 485498096(%edx), %ymm4, %ymm4
+
+// CHECK: vpermil2ps $0, %ymm4, 485498096, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0x5d,0x48,0x25,0xf0,0x1c,0xf0,0x1c,0x40]
+vpermil2ps $0, %ymm4, 485498096, %ymm4, %ymm4
+
+// CHECK: vpermil2ps $0, %ymm4, 64(%edx,%eax), %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0x5d,0x48,0x64,0x02,0x40,0x40]
+vpermil2ps $0, %ymm4, 64(%edx,%eax), %ymm4, %ymm4
+
+// CHECK: vpermil2ps $0, %ymm4, (%edx), %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0x5d,0x48,0x22,0x40]
+vpermil2ps $0, %ymm4, (%edx), %ymm4, %ymm4
+
+// CHECK: vpermil2ps $0, %ymm4, %ymm4, %ymm4, %ymm4
+// CHECK: encoding: [0xc4,0xe3,0x5d,0x48,0xe4,0x40]
+vpermil2ps $0, %ymm4, %ymm4, %ymm4, %ymm4
+
+// CHECK: vphaddbd -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc2,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphaddbd -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddbd 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc2,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphaddbd 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddbd 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc2,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphaddbd 485498096(%edx), %xmm1
+
+// CHECK: vphaddbd 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc2,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphaddbd 485498096, %xmm1
+
+// CHECK: vphaddbd 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc2,0x4c,0x02,0x40]
+vphaddbd 64(%edx,%eax), %xmm1
+
+// CHECK: vphaddbd (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc2,0x0a]
+vphaddbd (%edx), %xmm1
+
+// CHECK: vphaddbd %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc2,0xc9]
+vphaddbd %xmm1, %xmm1
+
+// CHECK: vphaddbq -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc3,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphaddbq -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddbq 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc3,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphaddbq 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddbq 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc3,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphaddbq 485498096(%edx), %xmm1
+
+// CHECK: vphaddbq 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc3,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphaddbq 485498096, %xmm1
+
+// CHECK: vphaddbq 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc3,0x4c,0x02,0x40]
+vphaddbq 64(%edx,%eax), %xmm1
+
+// CHECK: vphaddbq (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc3,0x0a]
+vphaddbq (%edx), %xmm1
+
+// CHECK: vphaddbq %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc3,0xc9]
+vphaddbq %xmm1, %xmm1
+
+// CHECK: vphaddbw -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc1,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphaddbw -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddbw 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc1,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphaddbw 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddbw 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc1,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphaddbw 485498096(%edx), %xmm1
+
+// CHECK: vphaddbw 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc1,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphaddbw 485498096, %xmm1
+
+// CHECK: vphaddbw 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc1,0x4c,0x02,0x40]
+vphaddbw 64(%edx,%eax), %xmm1
+
+// CHECK: vphaddbw (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc1,0x0a]
+vphaddbw (%edx), %xmm1
+
+// CHECK: vphaddbw %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc1,0xc9]
+vphaddbw %xmm1, %xmm1
+
+// CHECK: vphadddq -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xcb,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphadddq -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphadddq 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xcb,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphadddq 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphadddq 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xcb,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphadddq 485498096(%edx), %xmm1
+
+// CHECK: vphadddq 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xcb,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphadddq 485498096, %xmm1
+
+// CHECK: vphadddq 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xcb,0x4c,0x02,0x40]
+vphadddq 64(%edx,%eax), %xmm1
+
+// CHECK: vphadddq (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xcb,0x0a]
+vphadddq (%edx), %xmm1
+
+// CHECK: vphadddq %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xcb,0xc9]
+vphadddq %xmm1, %xmm1
+
+// CHECK: vphaddubd -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd2,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphaddubd -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddubd 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd2,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphaddubd 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddubd 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd2,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphaddubd 485498096(%edx), %xmm1
+
+// CHECK: vphaddubd 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd2,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphaddubd 485498096, %xmm1
+
+// CHECK: vphaddubd 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd2,0x4c,0x02,0x40]
+vphaddubd 64(%edx,%eax), %xmm1
+
+// CHECK: vphaddubd (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd2,0x0a]
+vphaddubd (%edx), %xmm1
+
+// CHECK: vphaddubd %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd2,0xc9]
+vphaddubd %xmm1, %xmm1
+
+// CHECK: vphaddubq -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd3,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphaddubq -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddubq 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd3,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphaddubq 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddubq 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd3,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphaddubq 485498096(%edx), %xmm1
+
+// CHECK: vphaddubq 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd3,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphaddubq 485498096, %xmm1
+
+// CHECK: vphaddubq 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd3,0x4c,0x02,0x40]
+vphaddubq 64(%edx,%eax), %xmm1
+
+// CHECK: vphaddubq (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd3,0x0a]
+vphaddubq (%edx), %xmm1
+
+// CHECK: vphaddubq %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd3,0xc9]
+vphaddubq %xmm1, %xmm1
+
+// CHECK: vphaddubw -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd1,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphaddubw -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddubw 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd1,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphaddubw 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddubw 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd1,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphaddubw 485498096(%edx), %xmm1
+
+// CHECK: vphaddubw 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd1,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphaddubw 485498096, %xmm1
+
+// CHECK: vphaddubw 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd1,0x4c,0x02,0x40]
+vphaddubw 64(%edx,%eax), %xmm1
+
+// CHECK: vphaddubw (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd1,0x0a]
+vphaddubw (%edx), %xmm1
+
+// CHECK: vphaddubw %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd1,0xc9]
+vphaddubw %xmm1, %xmm1
+
+// CHECK: vphaddudq -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xdb,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphaddudq -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddudq 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xdb,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphaddudq 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddudq 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xdb,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphaddudq 485498096(%edx), %xmm1
+
+// CHECK: vphaddudq 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xdb,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphaddudq 485498096, %xmm1
+
+// CHECK: vphaddudq 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xdb,0x4c,0x02,0x40]
+vphaddudq 64(%edx,%eax), %xmm1
+
+// CHECK: vphaddudq (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xdb,0x0a]
+vphaddudq (%edx), %xmm1
+
+// CHECK: vphaddudq %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xdb,0xc9]
+vphaddudq %xmm1, %xmm1
+
+// CHECK: vphadduwd -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd6,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphadduwd -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphadduwd 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd6,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphadduwd 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphadduwd 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd6,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphadduwd 485498096(%edx), %xmm1
+
+// CHECK: vphadduwd 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd6,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphadduwd 485498096, %xmm1
+
+// CHECK: vphadduwd 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd6,0x4c,0x02,0x40]
+vphadduwd 64(%edx,%eax), %xmm1
+
+// CHECK: vphadduwd (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd6,0x0a]
+vphadduwd (%edx), %xmm1
+
+// CHECK: vphadduwd %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd6,0xc9]
+vphadduwd %xmm1, %xmm1
+
+// CHECK: vphadduwq -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd7,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphadduwq -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphadduwq 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd7,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphadduwq 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphadduwq 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd7,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphadduwq 485498096(%edx), %xmm1
+
+// CHECK: vphadduwq 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd7,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphadduwq 485498096, %xmm1
+
+// CHECK: vphadduwq 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd7,0x4c,0x02,0x40]
+vphadduwq 64(%edx,%eax), %xmm1
+
+// CHECK: vphadduwq (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd7,0x0a]
+vphadduwq (%edx), %xmm1
+
+// CHECK: vphadduwq %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd7,0xc9]
+vphadduwq %xmm1, %xmm1
+
+// CHECK: vphaddwd -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc6,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphaddwd -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddwd 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc6,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphaddwd 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddwd 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc6,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphaddwd 485498096(%edx), %xmm1
+
+// CHECK: vphaddwd 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc6,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphaddwd 485498096, %xmm1
+
+// CHECK: vphaddwd 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc6,0x4c,0x02,0x40]
+vphaddwd 64(%edx,%eax), %xmm1
+
+// CHECK: vphaddwd (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc6,0x0a]
+vphaddwd (%edx), %xmm1
+
+// CHECK: vphaddwd %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc6,0xc9]
+vphaddwd %xmm1, %xmm1
+
+// CHECK: vphaddwq -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc7,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphaddwq -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddwq 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc7,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphaddwq 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphaddwq 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc7,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphaddwq 485498096(%edx), %xmm1
+
+// CHECK: vphaddwq 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc7,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphaddwq 485498096, %xmm1
+
+// CHECK: vphaddwq 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc7,0x4c,0x02,0x40]
+vphaddwq 64(%edx,%eax), %xmm1
+
+// CHECK: vphaddwq (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc7,0x0a]
+vphaddwq (%edx), %xmm1
+
+// CHECK: vphaddwq %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc7,0xc9]
+vphaddwq %xmm1, %xmm1
+
+// CHECK: vphsubbw -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe1,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphsubbw -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphsubbw 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe1,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphsubbw 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphsubbw 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe1,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphsubbw 485498096(%edx), %xmm1
+
+// CHECK: vphsubbw 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe1,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphsubbw 485498096, %xmm1
+
+// CHECK: vphsubbw 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe1,0x4c,0x02,0x40]
+vphsubbw 64(%edx,%eax), %xmm1
+
+// CHECK: vphsubbw (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe1,0x0a]
+vphsubbw (%edx), %xmm1
+
+// CHECK: vphsubbw %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe1,0xc9]
+vphsubbw %xmm1, %xmm1
+
+// CHECK: vphsubdq -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe3,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphsubdq -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphsubdq 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe3,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphsubdq 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphsubdq 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe3,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphsubdq 485498096(%edx), %xmm1
+
+// CHECK: vphsubdq 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe3,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphsubdq 485498096, %xmm1
+
+// CHECK: vphsubdq 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe3,0x4c,0x02,0x40]
+vphsubdq 64(%edx,%eax), %xmm1
+
+// CHECK: vphsubdq (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe3,0x0a]
+vphsubdq (%edx), %xmm1
+
+// CHECK: vphsubdq %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe3,0xc9]
+vphsubdq %xmm1, %xmm1
+
+// CHECK: vphsubwd -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe2,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vphsubwd -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphsubwd 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe2,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vphsubwd 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vphsubwd 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe2,0x8a,0xf0,0x1c,0xf0,0x1c]
+vphsubwd 485498096(%edx), %xmm1
+
+// CHECK: vphsubwd 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe2,0x0d,0xf0,0x1c,0xf0,0x1c]
+vphsubwd 485498096, %xmm1
+
+// CHECK: vphsubwd 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe2,0x4c,0x02,0x40]
+vphsubwd 64(%edx,%eax), %xmm1
+
+// CHECK: vphsubwd (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe2,0x0a]
+vphsubwd (%edx), %xmm1
+
+// CHECK: vphsubwd %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe2,0xc9]
+vphsubwd %xmm1, %xmm1
+
+// CHECK: vpmacsdd %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x9e,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpmacsdd %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacsdd %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x9e,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsdd %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacsdd %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x9e,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsdd %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpmacsdd %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x9e,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsdd %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpmacsdd %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x9e,0x4c,0x02,0x40,0x10]
+vpmacsdd %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpmacsdd %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x9e,0x0a,0x10]
+vpmacsdd %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpmacsdd %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x9e,0xc9,0x10]
+vpmacsdd %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpmacsdqh %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x9f,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpmacsdqh %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacsdqh %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x9f,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsdqh %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacsdqh %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x9f,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsdqh %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpmacsdqh %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x9f,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsdqh %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpmacsdqh %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x9f,0x4c,0x02,0x40,0x10]
+vpmacsdqh %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpmacsdqh %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x9f,0x0a,0x10]
+vpmacsdqh %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpmacsdqh %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x9f,0xc9,0x10]
+vpmacsdqh %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpmacsdql %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x97,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpmacsdql %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacsdql %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x97,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsdql %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacsdql %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x97,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsdql %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpmacsdql %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x97,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsdql %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpmacsdql %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x97,0x4c,0x02,0x40,0x10]
+vpmacsdql %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpmacsdql %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x97,0x0a,0x10]
+vpmacsdql %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpmacsdql %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x97,0xc9,0x10]
+vpmacsdql %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpmacssdd %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x8e,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpmacssdd %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacssdd %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x8e,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacssdd %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacssdd %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x8e,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacssdd %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpmacssdd %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x8e,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacssdd %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpmacssdd %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x8e,0x4c,0x02,0x40,0x10]
+vpmacssdd %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpmacssdd %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x8e,0x0a,0x10]
+vpmacssdd %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpmacssdd %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x8e,0xc9,0x10]
+vpmacssdd %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpmacssdqh %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x8f,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpmacssdqh %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacssdqh %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x8f,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacssdqh %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacssdqh %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x8f,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacssdqh %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpmacssdqh %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x8f,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacssdqh %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpmacssdqh %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x8f,0x4c,0x02,0x40,0x10]
+vpmacssdqh %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpmacssdqh %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x8f,0x0a,0x10]
+vpmacssdqh %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpmacssdqh %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x8f,0xc9,0x10]
+vpmacssdqh %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpmacssdql %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x87,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpmacssdql %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacssdql %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x87,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacssdql %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacssdql %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x87,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacssdql %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpmacssdql %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x87,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacssdql %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpmacssdql %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x87,0x4c,0x02,0x40,0x10]
+vpmacssdql %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpmacssdql %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x87,0x0a,0x10]
+vpmacssdql %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpmacssdql %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x87,0xc9,0x10]
+vpmacssdql %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpmacsswd %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x86,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpmacsswd %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacsswd %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x86,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsswd %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacsswd %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x86,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsswd %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpmacsswd %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x86,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsswd %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpmacsswd %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x86,0x4c,0x02,0x40,0x10]
+vpmacsswd %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpmacsswd %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x86,0x0a,0x10]
+vpmacsswd %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpmacsswd %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x86,0xc9,0x10]
+vpmacsswd %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpmacssww %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x85,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpmacssww %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacssww %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x85,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacssww %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacssww %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x85,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacssww %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpmacssww %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x85,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacssww %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpmacssww %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x85,0x4c,0x02,0x40,0x10]
+vpmacssww %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpmacssww %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x85,0x0a,0x10]
+vpmacssww %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpmacssww %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x85,0xc9,0x10]
+vpmacssww %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpmacswd %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x96,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpmacswd %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacswd %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x96,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacswd %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacswd %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x96,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacswd %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpmacswd %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x96,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacswd %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpmacswd %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x96,0x4c,0x02,0x40,0x10]
+vpmacswd %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpmacswd %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x96,0x0a,0x10]
+vpmacswd %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpmacswd %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x96,0xc9,0x10]
+vpmacswd %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpmacsww %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x95,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpmacsww %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacsww %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x95,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsww %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmacsww %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x95,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsww %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpmacsww %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x95,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmacsww %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpmacsww %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x95,0x4c,0x02,0x40,0x10]
+vpmacsww %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpmacsww %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x95,0x0a,0x10]
+vpmacsww %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpmacsww %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0x95,0xc9,0x10]
+vpmacsww %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpmadcsswd %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa6,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpmadcsswd %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmadcsswd %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa6,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmadcsswd %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmadcsswd %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa6,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmadcsswd %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpmadcsswd %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa6,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmadcsswd %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpmadcsswd %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa6,0x4c,0x02,0x40,0x10]
+vpmadcsswd %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpmadcsswd %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa6,0x0a,0x10]
+vpmadcsswd %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpmadcsswd %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa6,0xc9,0x10]
+vpmadcsswd %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpmadcswd %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xb6,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpmadcswd %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmadcswd %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xb6,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmadcswd %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpmadcswd %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xb6,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmadcswd %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpmadcswd %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xb6,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpmadcswd %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpmadcswd %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xb6,0x4c,0x02,0x40,0x10]
+vpmadcswd %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpmadcswd %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xb6,0x0a,0x10]
+vpmadcswd %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpmadcswd %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xb6,0xc9,0x10]
+vpmadcswd %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpperm -485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0xf0,0xa3,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpperm -485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpperm 485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0xf0,0xa3,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpperm 485498096(%edx,%eax,4), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpperm 485498096(%edx), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0xf0,0xa3,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpperm 485498096(%edx), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpperm 485498096, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0xf0,0xa3,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpperm 485498096, %xmm1, %xmm1, %xmm1
+
+// CHECK: vpperm 64(%edx,%eax), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0xf0,0xa3,0x4c,0x02,0x40,0x10]
+vpperm 64(%edx,%eax), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpperm (%edx), %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0xf0,0xa3,0x0a,0x10]
+vpperm (%edx), %xmm1, %xmm1, %xmm1
+
+// CHECK: vpperm %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa3,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x10]
+vpperm %xmm1, -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpperm %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa3,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x10]
+vpperm %xmm1, 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpperm %xmm1, 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa3,0x8a,0xf0,0x1c,0xf0,0x1c,0x10]
+vpperm %xmm1, 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpperm %xmm1, 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa3,0x0d,0xf0,0x1c,0xf0,0x1c,0x10]
+vpperm %xmm1, 485498096, %xmm1, %xmm1
+
+// CHECK: vpperm %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa3,0x4c,0x02,0x40,0x10]
+vpperm %xmm1, 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpperm %xmm1, (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa3,0x0a,0x10]
+vpperm %xmm1, (%edx), %xmm1, %xmm1
+
+// CHECK: vpperm %xmm1, %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x70,0xa3,0xc9,0x10]
+vpperm %xmm1, %xmm1, %xmm1, %xmm1
+
+// CHECK: vprotb $0, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc0,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x00]
+vprotb $0, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotb $0, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc0,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotb $0, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotb $0, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc0,0x8a,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotb $0, 485498096(%edx), %xmm1
+
+// CHECK: vprotb $0, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc0,0x0d,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotb $0, 485498096, %xmm1
+
+// CHECK: vprotb $0, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc0,0x4c,0x02,0x40,0x00]
+vprotb $0, 64(%edx,%eax), %xmm1
+
+// CHECK: vprotb $0, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc0,0x0a,0x00]
+vprotb $0, (%edx), %xmm1
+
+// CHECK: vprotb $0, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc0,0xc9,0x00]
+vprotb $0, %xmm1, %xmm1
+
+// CHECK: vprotb -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x90,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vprotb -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vprotb 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x90,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vprotb 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vprotb 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x90,0x8a,0xf0,0x1c,0xf0,0x1c]
+vprotb 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vprotb 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x90,0x0d,0xf0,0x1c,0xf0,0x1c]
+vprotb 485498096, %xmm1, %xmm1
+
+// CHECK: vprotb 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x90,0x4c,0x02,0x40]
+vprotb 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vprotb (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x90,0x0a]
+vprotb (%edx), %xmm1, %xmm1
+
+// CHECK: vprotb %xmm1, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x90,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vprotb %xmm1, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotb %xmm1, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x90,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vprotb %xmm1, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotb %xmm1, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x90,0x8a,0xf0,0x1c,0xf0,0x1c]
+vprotb %xmm1, 485498096(%edx), %xmm1
+
+// CHECK: vprotb %xmm1, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x90,0x0d,0xf0,0x1c,0xf0,0x1c]
+vprotb %xmm1, 485498096, %xmm1
+
+// CHECK: vprotb %xmm1, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x90,0x4c,0x02,0x40]
+vprotb %xmm1, 64(%edx,%eax), %xmm1
+
+// CHECK: vprotb %xmm1, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x90,0x0a]
+vprotb %xmm1, (%edx), %xmm1
+
+// CHECK: vprotb %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x90,0xc9]
+vprotb %xmm1, %xmm1, %xmm1
+
+// CHECK: vprotd $0, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc2,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x00]
+vprotd $0, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotd $0, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc2,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotd $0, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotd $0, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc2,0x8a,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotd $0, 485498096(%edx), %xmm1
+
+// CHECK: vprotd $0, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc2,0x0d,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotd $0, 485498096, %xmm1
+
+// CHECK: vprotd $0, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc2,0x4c,0x02,0x40,0x00]
+vprotd $0, 64(%edx,%eax), %xmm1
+
+// CHECK: vprotd $0, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc2,0x0a,0x00]
+vprotd $0, (%edx), %xmm1
+
+// CHECK: vprotd $0, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc2,0xc9,0x00]
+vprotd $0, %xmm1, %xmm1
+
+// CHECK: vprotd -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x92,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vprotd -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vprotd 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x92,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vprotd 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vprotd 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x92,0x8a,0xf0,0x1c,0xf0,0x1c]
+vprotd 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vprotd 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x92,0x0d,0xf0,0x1c,0xf0,0x1c]
+vprotd 485498096, %xmm1, %xmm1
+
+// CHECK: vprotd 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x92,0x4c,0x02,0x40]
+vprotd 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vprotd (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x92,0x0a]
+vprotd (%edx), %xmm1, %xmm1
+
+// CHECK: vprotd %xmm1, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x92,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vprotd %xmm1, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotd %xmm1, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x92,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vprotd %xmm1, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotd %xmm1, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x92,0x8a,0xf0,0x1c,0xf0,0x1c]
+vprotd %xmm1, 485498096(%edx), %xmm1
+
+// CHECK: vprotd %xmm1, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x92,0x0d,0xf0,0x1c,0xf0,0x1c]
+vprotd %xmm1, 485498096, %xmm1
+
+// CHECK: vprotd %xmm1, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x92,0x4c,0x02,0x40]
+vprotd %xmm1, 64(%edx,%eax), %xmm1
+
+// CHECK: vprotd %xmm1, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x92,0x0a]
+vprotd %xmm1, (%edx), %xmm1
+
+// CHECK: vprotd %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x92,0xc9]
+vprotd %xmm1, %xmm1, %xmm1
+
+// CHECK: vprotq $0, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc3,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x00]
+vprotq $0, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotq $0, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc3,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotq $0, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotq $0, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc3,0x8a,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotq $0, 485498096(%edx), %xmm1
+
+// CHECK: vprotq $0, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc3,0x0d,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotq $0, 485498096, %xmm1
+
+// CHECK: vprotq $0, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc3,0x4c,0x02,0x40,0x00]
+vprotq $0, 64(%edx,%eax), %xmm1
+
+// CHECK: vprotq $0, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc3,0x0a,0x00]
+vprotq $0, (%edx), %xmm1
+
+// CHECK: vprotq $0, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc3,0xc9,0x00]
+vprotq $0, %xmm1, %xmm1
+
+// CHECK: vprotq -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x93,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vprotq -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vprotq 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x93,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vprotq 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vprotq 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x93,0x8a,0xf0,0x1c,0xf0,0x1c]
+vprotq 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vprotq 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x93,0x0d,0xf0,0x1c,0xf0,0x1c]
+vprotq 485498096, %xmm1, %xmm1
+
+// CHECK: vprotq 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x93,0x4c,0x02,0x40]
+vprotq 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vprotq (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x93,0x0a]
+vprotq (%edx), %xmm1, %xmm1
+
+// CHECK: vprotq %xmm1, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x93,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vprotq %xmm1, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotq %xmm1, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x93,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vprotq %xmm1, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotq %xmm1, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x93,0x8a,0xf0,0x1c,0xf0,0x1c]
+vprotq %xmm1, 485498096(%edx), %xmm1
+
+// CHECK: vprotq %xmm1, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x93,0x0d,0xf0,0x1c,0xf0,0x1c]
+vprotq %xmm1, 485498096, %xmm1
+
+// CHECK: vprotq %xmm1, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x93,0x4c,0x02,0x40]
+vprotq %xmm1, 64(%edx,%eax), %xmm1
+
+// CHECK: vprotq %xmm1, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x93,0x0a]
+vprotq %xmm1, (%edx), %xmm1
+
+// CHECK: vprotq %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x93,0xc9]
+vprotq %xmm1, %xmm1, %xmm1
+
+// CHECK: vprotw $0, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc1,0x8c,0x82,0x10,0xe3,0x0f,0xe3,0x00]
+vprotw $0, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotw $0, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc1,0x8c,0x82,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotw $0, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotw $0, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc1,0x8a,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotw $0, 485498096(%edx), %xmm1
+
+// CHECK: vprotw $0, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc1,0x0d,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotw $0, 485498096, %xmm1
+
+// CHECK: vprotw $0, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc1,0x4c,0x02,0x40,0x00]
+vprotw $0, 64(%edx,%eax), %xmm1
+
+// CHECK: vprotw $0, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc1,0x0a,0x00]
+vprotw $0, (%edx), %xmm1
+
+// CHECK: vprotw $0, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc1,0xc9,0x00]
+vprotw $0, %xmm1, %xmm1
+
+// CHECK: vprotw -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x91,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vprotw -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vprotw 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x91,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vprotw 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vprotw 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x91,0x8a,0xf0,0x1c,0xf0,0x1c]
+vprotw 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vprotw 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x91,0x0d,0xf0,0x1c,0xf0,0x1c]
+vprotw 485498096, %xmm1, %xmm1
+
+// CHECK: vprotw 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x91,0x4c,0x02,0x40]
+vprotw 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vprotw (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x91,0x0a]
+vprotw (%edx), %xmm1, %xmm1
+
+// CHECK: vprotw %xmm1, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x91,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vprotw %xmm1, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotw %xmm1, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x91,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vprotw %xmm1, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vprotw %xmm1, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x91,0x8a,0xf0,0x1c,0xf0,0x1c]
+vprotw %xmm1, 485498096(%edx), %xmm1
+
+// CHECK: vprotw %xmm1, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x91,0x0d,0xf0,0x1c,0xf0,0x1c]
+vprotw %xmm1, 485498096, %xmm1
+
+// CHECK: vprotw %xmm1, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x91,0x4c,0x02,0x40]
+vprotw %xmm1, 64(%edx,%eax), %xmm1
+
+// CHECK: vprotw %xmm1, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x91,0x0a]
+vprotw %xmm1, (%edx), %xmm1
+
+// CHECK: vprotw %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x91,0xc9]
+vprotw %xmm1, %xmm1, %xmm1
+
+// CHECK: vpshab -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x98,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshab -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshab 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x98,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshab 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshab 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x98,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshab 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpshab 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x98,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshab 485498096, %xmm1, %xmm1
+
+// CHECK: vpshab 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x98,0x4c,0x02,0x40]
+vpshab 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpshab (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x98,0x0a]
+vpshab (%edx), %xmm1, %xmm1
+
+// CHECK: vpshab %xmm1, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x98,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshab %xmm1, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshab %xmm1, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x98,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshab %xmm1, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshab %xmm1, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x98,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshab %xmm1, 485498096(%edx), %xmm1
+
+// CHECK: vpshab %xmm1, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x98,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshab %xmm1, 485498096, %xmm1
+
+// CHECK: vpshab %xmm1, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x98,0x4c,0x02,0x40]
+vpshab %xmm1, 64(%edx,%eax), %xmm1
+
+// CHECK: vpshab %xmm1, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x98,0x0a]
+vpshab %xmm1, (%edx), %xmm1
+
+// CHECK: vpshab %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x98,0xc9]
+vpshab %xmm1, %xmm1, %xmm1
+
+// CHECK: vpshad -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x9a,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshad -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshad 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x9a,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshad 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshad 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x9a,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshad 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpshad 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x9a,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshad 485498096, %xmm1, %xmm1
+
+// CHECK: vpshad 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x9a,0x4c,0x02,0x40]
+vpshad 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpshad (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x9a,0x0a]
+vpshad (%edx), %xmm1, %xmm1
+
+// CHECK: vpshad %xmm1, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x9a,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshad %xmm1, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshad %xmm1, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x9a,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshad %xmm1, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshad %xmm1, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x9a,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshad %xmm1, 485498096(%edx), %xmm1
+
+// CHECK: vpshad %xmm1, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x9a,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshad %xmm1, 485498096, %xmm1
+
+// CHECK: vpshad %xmm1, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x9a,0x4c,0x02,0x40]
+vpshad %xmm1, 64(%edx,%eax), %xmm1
+
+// CHECK: vpshad %xmm1, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x9a,0x0a]
+vpshad %xmm1, (%edx), %xmm1
+
+// CHECK: vpshad %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x9a,0xc9]
+vpshad %xmm1, %xmm1, %xmm1
+
+// CHECK: vpshaq -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x9b,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshaq -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshaq 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x9b,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshaq 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshaq 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x9b,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshaq 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpshaq 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x9b,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshaq 485498096, %xmm1, %xmm1
+
+// CHECK: vpshaq 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x9b,0x4c,0x02,0x40]
+vpshaq 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpshaq (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x9b,0x0a]
+vpshaq (%edx), %xmm1, %xmm1
+
+// CHECK: vpshaq %xmm1, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x9b,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshaq %xmm1, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshaq %xmm1, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x9b,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshaq %xmm1, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshaq %xmm1, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x9b,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshaq %xmm1, 485498096(%edx), %xmm1
+
+// CHECK: vpshaq %xmm1, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x9b,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshaq %xmm1, 485498096, %xmm1
+
+// CHECK: vpshaq %xmm1, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x9b,0x4c,0x02,0x40]
+vpshaq %xmm1, 64(%edx,%eax), %xmm1
+
+// CHECK: vpshaq %xmm1, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x9b,0x0a]
+vpshaq %xmm1, (%edx), %xmm1
+
+// CHECK: vpshaq %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x9b,0xc9]
+vpshaq %xmm1, %xmm1, %xmm1
+
+// CHECK: vpshaw -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x99,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshaw -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshaw 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x99,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshaw 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshaw 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x99,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshaw 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpshaw 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x99,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshaw 485498096, %xmm1, %xmm1
+
+// CHECK: vpshaw 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x99,0x4c,0x02,0x40]
+vpshaw 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpshaw (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x99,0x0a]
+vpshaw (%edx), %xmm1, %xmm1
+
+// CHECK: vpshaw %xmm1, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x99,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshaw %xmm1, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshaw %xmm1, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x99,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshaw %xmm1, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshaw %xmm1, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x99,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshaw %xmm1, 485498096(%edx), %xmm1
+
+// CHECK: vpshaw %xmm1, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x99,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshaw %xmm1, 485498096, %xmm1
+
+// CHECK: vpshaw %xmm1, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x99,0x4c,0x02,0x40]
+vpshaw %xmm1, 64(%edx,%eax), %xmm1
+
+// CHECK: vpshaw %xmm1, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x99,0x0a]
+vpshaw %xmm1, (%edx), %xmm1
+
+// CHECK: vpshaw %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x99,0xc9]
+vpshaw %xmm1, %xmm1, %xmm1
+
+// CHECK: vpshlb -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x94,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshlb -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshlb 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x94,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshlb 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshlb 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x94,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshlb 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpshlb 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x94,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshlb 485498096, %xmm1, %xmm1
+
+// CHECK: vpshlb 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x94,0x4c,0x02,0x40]
+vpshlb 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpshlb (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x94,0x0a]
+vpshlb (%edx), %xmm1, %xmm1
+
+// CHECK: vpshlb %xmm1, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x94,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshlb %xmm1, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshlb %xmm1, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x94,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshlb %xmm1, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshlb %xmm1, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x94,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshlb %xmm1, 485498096(%edx), %xmm1
+
+// CHECK: vpshlb %xmm1, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x94,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshlb %xmm1, 485498096, %xmm1
+
+// CHECK: vpshlb %xmm1, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x94,0x4c,0x02,0x40]
+vpshlb %xmm1, 64(%edx,%eax), %xmm1
+
+// CHECK: vpshlb %xmm1, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x94,0x0a]
+vpshlb %xmm1, (%edx), %xmm1
+
+// CHECK: vpshlb %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x94,0xc9]
+vpshlb %xmm1, %xmm1, %xmm1
+
+// CHECK: vpshld -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x96,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshld -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshld 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x96,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshld 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshld 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x96,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshld 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpshld 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x96,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshld 485498096, %xmm1, %xmm1
+
+// CHECK: vpshld 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x96,0x4c,0x02,0x40]
+vpshld 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpshld (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x96,0x0a]
+vpshld (%edx), %xmm1, %xmm1
+
+// CHECK: vpshld %xmm1, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x96,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshld %xmm1, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshld %xmm1, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x96,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshld %xmm1, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshld %xmm1, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x96,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshld %xmm1, 485498096(%edx), %xmm1
+
+// CHECK: vpshld %xmm1, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x96,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshld %xmm1, 485498096, %xmm1
+
+// CHECK: vpshld %xmm1, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x96,0x4c,0x02,0x40]
+vpshld %xmm1, 64(%edx,%eax), %xmm1
+
+// CHECK: vpshld %xmm1, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x96,0x0a]
+vpshld %xmm1, (%edx), %xmm1
+
+// CHECK: vpshld %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x96,0xc9]
+vpshld %xmm1, %xmm1, %xmm1
+
+// CHECK: vpshlq -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x97,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshlq -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshlq 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x97,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshlq 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshlq 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x97,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshlq 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpshlq 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x97,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshlq 485498096, %xmm1, %xmm1
+
+// CHECK: vpshlq 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x97,0x4c,0x02,0x40]
+vpshlq 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpshlq (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x97,0x0a]
+vpshlq (%edx), %xmm1, %xmm1
+
+// CHECK: vpshlq %xmm1, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x97,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshlq %xmm1, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshlq %xmm1, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x97,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshlq %xmm1, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshlq %xmm1, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x97,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshlq %xmm1, 485498096(%edx), %xmm1
+
+// CHECK: vpshlq %xmm1, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x97,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshlq %xmm1, 485498096, %xmm1
+
+// CHECK: vpshlq %xmm1, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x97,0x4c,0x02,0x40]
+vpshlq %xmm1, 64(%edx,%eax), %xmm1
+
+// CHECK: vpshlq %xmm1, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x97,0x0a]
+vpshlq %xmm1, (%edx), %xmm1
+
+// CHECK: vpshlq %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x97,0xc9]
+vpshlq %xmm1, %xmm1, %xmm1
+
+// CHECK: vpshlw -485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x95,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshlw -485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshlw 485498096(%edx,%eax,4), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x95,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshlw 485498096(%edx,%eax,4), %xmm1, %xmm1
+
+// CHECK: vpshlw 485498096(%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x95,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshlw 485498096(%edx), %xmm1, %xmm1
+
+// CHECK: vpshlw 485498096, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x95,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshlw 485498096, %xmm1, %xmm1
+
+// CHECK: vpshlw 64(%edx,%eax), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x95,0x4c,0x02,0x40]
+vpshlw 64(%edx,%eax), %xmm1, %xmm1
+
+// CHECK: vpshlw (%edx), %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0xf0,0x95,0x0a]
+vpshlw (%edx), %xmm1, %xmm1
+
+// CHECK: vpshlw %xmm1, -485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x95,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
+vpshlw %xmm1, -485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshlw %xmm1, 485498096(%edx,%eax,4), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x95,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
+vpshlw %xmm1, 485498096(%edx,%eax,4), %xmm1
+
+// CHECK: vpshlw %xmm1, 485498096(%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x95,0x8a,0xf0,0x1c,0xf0,0x1c]
+vpshlw %xmm1, 485498096(%edx), %xmm1
+
+// CHECK: vpshlw %xmm1, 485498096, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x95,0x0d,0xf0,0x1c,0xf0,0x1c]
+vpshlw %xmm1, 485498096, %xmm1
+
+// CHECK: vpshlw %xmm1, 64(%edx,%eax), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x95,0x4c,0x02,0x40]
+vpshlw %xmm1, 64(%edx,%eax), %xmm1
+
+// CHECK: vpshlw %xmm1, (%edx), %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x95,0x0a]
+vpshlw %xmm1, (%edx), %xmm1
+
+// CHECK: vpshlw %xmm1, %xmm1, %xmm1
+// CHECK: encoding: [0x8f,0xe9,0x70,0x95,0xc9]
+vpshlw %xmm1, %xmm1, %xmm1
+
diff --git a/test/MC/X86/XOP-64.s b/test/MC/X86/XOP-64.s
new file mode 100644
index 0000000..bdf108d
--- /dev/null
+++ b/test/MC/X86/XOP-64.s
@@ -0,0 +1,4502 @@
+// RUN: llvm-mc -triple x86_64-unknown-unknown --show-encoding %s | FileCheck %s
+
+// CHECK: vfrczpd 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x81,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vfrczpd 485498096, %xmm15
+
+// CHECK: vfrczpd 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x81,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vfrczpd 485498096, %xmm6
+
+// CHECK: vfrczpd 485498096, %ymm7
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x81,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vfrczpd 485498096, %ymm7
+
+// CHECK: vfrczpd 485498096, %ymm9
+// CHECK: encoding: [0x8f,0x69,0x7c,0x81,0x0c,0x25,0xf0,0x1c,0xf0,0x1c]
+vfrczpd 485498096, %ymm9
+
+// CHECK: vfrczpd 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x81,0x7c,0x82,0x40]
+vfrczpd 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vfrczpd -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x81,0x7c,0x82,0xc0]
+vfrczpd -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vfrczpd 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x81,0x74,0x82,0x40]
+vfrczpd 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vfrczpd -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x81,0x74,0x82,0xc0]
+vfrczpd -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vfrczpd 64(%rdx,%rax,4), %ymm7
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x81,0x7c,0x82,0x40]
+vfrczpd 64(%rdx,%rax,4), %ymm7
+
+// CHECK: vfrczpd -64(%rdx,%rax,4), %ymm7
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x81,0x7c,0x82,0xc0]
+vfrczpd -64(%rdx,%rax,4), %ymm7
+
+// CHECK: vfrczpd 64(%rdx,%rax,4), %ymm9
+// CHECK: encoding: [0x8f,0x69,0x7c,0x81,0x4c,0x82,0x40]
+vfrczpd 64(%rdx,%rax,4), %ymm9
+
+// CHECK: vfrczpd -64(%rdx,%rax,4), %ymm9
+// CHECK: encoding: [0x8f,0x69,0x7c,0x81,0x4c,0x82,0xc0]
+vfrczpd -64(%rdx,%rax,4), %ymm9
+
+// CHECK: vfrczpd 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x81,0x7c,0x02,0x40]
+vfrczpd 64(%rdx,%rax), %xmm15
+
+// CHECK: vfrczpd 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x81,0x74,0x02,0x40]
+vfrczpd 64(%rdx,%rax), %xmm6
+
+// CHECK: vfrczpd 64(%rdx,%rax), %ymm7
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x81,0x7c,0x02,0x40]
+vfrczpd 64(%rdx,%rax), %ymm7
+
+// CHECK: vfrczpd 64(%rdx,%rax), %ymm9
+// CHECK: encoding: [0x8f,0x69,0x7c,0x81,0x4c,0x02,0x40]
+vfrczpd 64(%rdx,%rax), %ymm9
+
+// CHECK: vfrczpd 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x81,0x7a,0x40]
+vfrczpd 64(%rdx), %xmm15
+
+// CHECK: vfrczpd 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x81,0x72,0x40]
+vfrczpd 64(%rdx), %xmm6
+
+// CHECK: vfrczpd 64(%rdx), %ymm7
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x81,0x7a,0x40]
+vfrczpd 64(%rdx), %ymm7
+
+// CHECK: vfrczpd 64(%rdx), %ymm9
+// CHECK: encoding: [0x8f,0x69,0x7c,0x81,0x4a,0x40]
+vfrczpd 64(%rdx), %ymm9
+
+// CHECK: vfrczpd (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x81,0x3a]
+vfrczpd (%rdx), %xmm15
+
+// CHECK: vfrczpd (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x81,0x32]
+vfrczpd (%rdx), %xmm6
+
+// CHECK: vfrczpd (%rdx), %ymm7
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x81,0x3a]
+vfrczpd (%rdx), %ymm7
+
+// CHECK: vfrczpd (%rdx), %ymm9
+// CHECK: encoding: [0x8f,0x69,0x7c,0x81,0x0a]
+vfrczpd (%rdx), %ymm9
+
+// CHECK: vfrczpd %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0x81,0xff]
+vfrczpd %xmm15, %xmm15
+
+// CHECK: vfrczpd %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x81,0xf6]
+vfrczpd %xmm6, %xmm6
+
+// CHECK: vfrczpd %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x81,0xff]
+vfrczpd %ymm7, %ymm7
+
+// CHECK: vfrczpd %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x49,0x7c,0x81,0xc9]
+vfrczpd %ymm9, %ymm9
+
+// CHECK: vfrczps 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x80,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vfrczps 485498096, %xmm15
+
+// CHECK: vfrczps 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x80,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vfrczps 485498096, %xmm6
+
+// CHECK: vfrczps 485498096, %ymm7
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x80,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vfrczps 485498096, %ymm7
+
+// CHECK: vfrczps 485498096, %ymm9
+// CHECK: encoding: [0x8f,0x69,0x7c,0x80,0x0c,0x25,0xf0,0x1c,0xf0,0x1c]
+vfrczps 485498096, %ymm9
+
+// CHECK: vfrczps 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x80,0x7c,0x82,0x40]
+vfrczps 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vfrczps -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x80,0x7c,0x82,0xc0]
+vfrczps -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vfrczps 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x80,0x74,0x82,0x40]
+vfrczps 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vfrczps -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x80,0x74,0x82,0xc0]
+vfrczps -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vfrczps 64(%rdx,%rax,4), %ymm7
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x80,0x7c,0x82,0x40]
+vfrczps 64(%rdx,%rax,4), %ymm7
+
+// CHECK: vfrczps -64(%rdx,%rax,4), %ymm7
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x80,0x7c,0x82,0xc0]
+vfrczps -64(%rdx,%rax,4), %ymm7
+
+// CHECK: vfrczps 64(%rdx,%rax,4), %ymm9
+// CHECK: encoding: [0x8f,0x69,0x7c,0x80,0x4c,0x82,0x40]
+vfrczps 64(%rdx,%rax,4), %ymm9
+
+// CHECK: vfrczps -64(%rdx,%rax,4), %ymm9
+// CHECK: encoding: [0x8f,0x69,0x7c,0x80,0x4c,0x82,0xc0]
+vfrczps -64(%rdx,%rax,4), %ymm9
+
+// CHECK: vfrczps 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x80,0x7c,0x02,0x40]
+vfrczps 64(%rdx,%rax), %xmm15
+
+// CHECK: vfrczps 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x80,0x74,0x02,0x40]
+vfrczps 64(%rdx,%rax), %xmm6
+
+// CHECK: vfrczps 64(%rdx,%rax), %ymm7
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x80,0x7c,0x02,0x40]
+vfrczps 64(%rdx,%rax), %ymm7
+
+// CHECK: vfrczps 64(%rdx,%rax), %ymm9
+// CHECK: encoding: [0x8f,0x69,0x7c,0x80,0x4c,0x02,0x40]
+vfrczps 64(%rdx,%rax), %ymm9
+
+// CHECK: vfrczps 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x80,0x7a,0x40]
+vfrczps 64(%rdx), %xmm15
+
+// CHECK: vfrczps 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x80,0x72,0x40]
+vfrczps 64(%rdx), %xmm6
+
+// CHECK: vfrczps 64(%rdx), %ymm7
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x80,0x7a,0x40]
+vfrczps 64(%rdx), %ymm7
+
+// CHECK: vfrczps 64(%rdx), %ymm9
+// CHECK: encoding: [0x8f,0x69,0x7c,0x80,0x4a,0x40]
+vfrczps 64(%rdx), %ymm9
+
+// CHECK: vfrczps (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x80,0x3a]
+vfrczps (%rdx), %xmm15
+
+// CHECK: vfrczps (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x80,0x32]
+vfrczps (%rdx), %xmm6
+
+// CHECK: vfrczps (%rdx), %ymm7
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x80,0x3a]
+vfrczps (%rdx), %ymm7
+
+// CHECK: vfrczps (%rdx), %ymm9
+// CHECK: encoding: [0x8f,0x69,0x7c,0x80,0x0a]
+vfrczps (%rdx), %ymm9
+
+// CHECK: vfrczps %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0x80,0xff]
+vfrczps %xmm15, %xmm15
+
+// CHECK: vfrczps %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x80,0xf6]
+vfrczps %xmm6, %xmm6
+
+// CHECK: vfrczps %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe9,0x7c,0x80,0xff]
+vfrczps %ymm7, %ymm7
+
+// CHECK: vfrczps %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x49,0x7c,0x80,0xc9]
+vfrczps %ymm9, %ymm9
+
+// CHECK: vfrczsd 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x83,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vfrczsd 485498096, %xmm15
+
+// CHECK: vfrczsd 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x83,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vfrczsd 485498096, %xmm6
+
+// CHECK: vfrczsd 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x83,0x7c,0x82,0x40]
+vfrczsd 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vfrczsd -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x83,0x7c,0x82,0xc0]
+vfrczsd -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vfrczsd 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x83,0x74,0x82,0x40]
+vfrczsd 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vfrczsd -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x83,0x74,0x82,0xc0]
+vfrczsd -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vfrczsd 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x83,0x7c,0x02,0x40]
+vfrczsd 64(%rdx,%rax), %xmm15
+
+// CHECK: vfrczsd 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x83,0x74,0x02,0x40]
+vfrczsd 64(%rdx,%rax), %xmm6
+
+// CHECK: vfrczsd 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x83,0x7a,0x40]
+vfrczsd 64(%rdx), %xmm15
+
+// CHECK: vfrczsd 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x83,0x72,0x40]
+vfrczsd 64(%rdx), %xmm6
+
+// CHECK: vfrczsd (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x83,0x3a]
+vfrczsd (%rdx), %xmm15
+
+// CHECK: vfrczsd (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x83,0x32]
+vfrczsd (%rdx), %xmm6
+
+// CHECK: vfrczsd %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0x83,0xff]
+vfrczsd %xmm15, %xmm15
+
+// CHECK: vfrczsd %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x83,0xf6]
+vfrczsd %xmm6, %xmm6
+
+// CHECK: vfrczss 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x82,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vfrczss 485498096, %xmm15
+
+// CHECK: vfrczss 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x82,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vfrczss 485498096, %xmm6
+
+// CHECK: vfrczss 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x82,0x7c,0x82,0x40]
+vfrczss 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vfrczss -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x82,0x7c,0x82,0xc0]
+vfrczss -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vfrczss 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x82,0x74,0x82,0x40]
+vfrczss 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vfrczss -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x82,0x74,0x82,0xc0]
+vfrczss -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vfrczss 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x82,0x7c,0x02,0x40]
+vfrczss 64(%rdx,%rax), %xmm15
+
+// CHECK: vfrczss 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x82,0x74,0x02,0x40]
+vfrczss 64(%rdx,%rax), %xmm6
+
+// CHECK: vfrczss 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x82,0x7a,0x40]
+vfrczss 64(%rdx), %xmm15
+
+// CHECK: vfrczss 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x82,0x72,0x40]
+vfrczss 64(%rdx), %xmm6
+
+// CHECK: vfrczss (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0x82,0x3a]
+vfrczss (%rdx), %xmm15
+
+// CHECK: vfrczss (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x82,0x32]
+vfrczss (%rdx), %xmm6
+
+// CHECK: vfrczss %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0x82,0xff]
+vfrczss %xmm15, %xmm15
+
+// CHECK: vfrczss %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0x82,0xf6]
+vfrczss %xmm6, %xmm6
+
+// CHECK: vpcmov 485498096, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x80,0xa2,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpcmov 485498096, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcmov 485498096, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0xc8,0xa2,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpcmov 485498096, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpcmov 485498096, %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe8,0xc4,0xa2,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x70]
+vpcmov 485498096, %ymm7, %ymm7, %ymm7
+
+// CHECK: vpcmov 485498096, %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x68,0xb4,0xa2,0x0c,0x25,0xf0,0x1c,0xf0,0x1c,0x90]
+vpcmov 485498096, %ymm9, %ymm9, %ymm9
+
+// CHECK: vpcmov 64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x80,0xa2,0x7c,0x82,0x40,0xf0]
+vpcmov 64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcmov -64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x80,0xa2,0x7c,0x82,0xc0,0xf0]
+vpcmov -64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcmov 64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0xc8,0xa2,0x74,0x82,0x40,0x60]
+vpcmov 64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpcmov -64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0xc8,0xa2,0x74,0x82,0xc0,0x60]
+vpcmov -64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpcmov 64(%rdx,%rax,4), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe8,0xc4,0xa2,0x7c,0x82,0x40,0x70]
+vpcmov 64(%rdx,%rax,4), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpcmov -64(%rdx,%rax,4), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe8,0xc4,0xa2,0x7c,0x82,0xc0,0x70]
+vpcmov -64(%rdx,%rax,4), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpcmov 64(%rdx,%rax,4), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x68,0xb4,0xa2,0x4c,0x82,0x40,0x90]
+vpcmov 64(%rdx,%rax,4), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpcmov -64(%rdx,%rax,4), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x68,0xb4,0xa2,0x4c,0x82,0xc0,0x90]
+vpcmov -64(%rdx,%rax,4), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpcmov 64(%rdx,%rax), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x80,0xa2,0x7c,0x02,0x40,0xf0]
+vpcmov 64(%rdx,%rax), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcmov 64(%rdx,%rax), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0xc8,0xa2,0x74,0x02,0x40,0x60]
+vpcmov 64(%rdx,%rax), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpcmov 64(%rdx,%rax), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe8,0xc4,0xa2,0x7c,0x02,0x40,0x70]
+vpcmov 64(%rdx,%rax), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpcmov 64(%rdx,%rax), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x68,0xb4,0xa2,0x4c,0x02,0x40,0x90]
+vpcmov 64(%rdx,%rax), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpcmov 64(%rdx), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x80,0xa2,0x7a,0x40,0xf0]
+vpcmov 64(%rdx), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcmov 64(%rdx), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0xc8,0xa2,0x72,0x40,0x60]
+vpcmov 64(%rdx), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpcmov 64(%rdx), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe8,0xc4,0xa2,0x7a,0x40,0x70]
+vpcmov 64(%rdx), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpcmov 64(%rdx), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x68,0xb4,0xa2,0x4a,0x40,0x90]
+vpcmov 64(%rdx), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpcmov (%rdx), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x80,0xa2,0x3a,0xf0]
+vpcmov (%rdx), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcmov (%rdx), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0xc8,0xa2,0x32,0x60]
+vpcmov (%rdx), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpcmov (%rdx), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe8,0xc4,0xa2,0x3a,0x70]
+vpcmov (%rdx), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpcmov (%rdx), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x68,0xb4,0xa2,0x0a,0x90]
+vpcmov (%rdx), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpcmov %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa2,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpcmov %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpcmov %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa2,0x7c,0x82,0x40,0xf0]
+vpcmov %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcmov %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa2,0x7c,0x82,0xc0,0xf0]
+vpcmov %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcmov %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa2,0x7c,0x02,0x40,0xf0]
+vpcmov %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpcmov %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa2,0x7a,0x40,0xf0]
+vpcmov %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpcmov %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa2,0x3a,0xf0]
+vpcmov %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpcmov %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0xa2,0xff,0xf0]
+vpcmov %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcmov %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa2,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpcmov %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpcmov %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa2,0x74,0x82,0x40,0x60]
+vpcmov %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcmov %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa2,0x74,0x82,0xc0,0x60]
+vpcmov %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcmov %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa2,0x74,0x02,0x40,0x60]
+vpcmov %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpcmov %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa2,0x72,0x40,0x60]
+vpcmov %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpcmov %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa2,0x32,0x60]
+vpcmov %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpcmov %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa2,0xf6,0x60]
+vpcmov %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpcmov %ymm7, 485498096, %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe8,0x44,0xa2,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x70]
+vpcmov %ymm7, 485498096, %ymm7, %ymm7
+
+// CHECK: vpcmov %ymm7, 64(%rdx,%rax,4), %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe8,0x44,0xa2,0x7c,0x82,0x40,0x70]
+vpcmov %ymm7, 64(%rdx,%rax,4), %ymm7, %ymm7
+
+// CHECK: vpcmov %ymm7, -64(%rdx,%rax,4), %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe8,0x44,0xa2,0x7c,0x82,0xc0,0x70]
+vpcmov %ymm7, -64(%rdx,%rax,4), %ymm7, %ymm7
+
+// CHECK: vpcmov %ymm7, 64(%rdx,%rax), %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe8,0x44,0xa2,0x7c,0x02,0x40,0x70]
+vpcmov %ymm7, 64(%rdx,%rax), %ymm7, %ymm7
+
+// CHECK: vpcmov %ymm7, 64(%rdx), %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe8,0x44,0xa2,0x7a,0x40,0x70]
+vpcmov %ymm7, 64(%rdx), %ymm7, %ymm7
+
+// CHECK: vpcmov %ymm7, (%rdx), %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe8,0x44,0xa2,0x3a,0x70]
+vpcmov %ymm7, (%rdx), %ymm7, %ymm7
+
+// CHECK: vpcmov %ymm7, %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0x8f,0xe8,0x44,0xa2,0xff,0x70]
+vpcmov %ymm7, %ymm7, %ymm7, %ymm7
+
+// CHECK: vpcmov %ymm9, 485498096, %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x68,0x34,0xa2,0x0c,0x25,0xf0,0x1c,0xf0,0x1c,0x90]
+vpcmov %ymm9, 485498096, %ymm9, %ymm9
+
+// CHECK: vpcmov %ymm9, 64(%rdx,%rax,4), %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x68,0x34,0xa2,0x4c,0x82,0x40,0x90]
+vpcmov %ymm9, 64(%rdx,%rax,4), %ymm9, %ymm9
+
+// CHECK: vpcmov %ymm9, -64(%rdx,%rax,4), %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x68,0x34,0xa2,0x4c,0x82,0xc0,0x90]
+vpcmov %ymm9, -64(%rdx,%rax,4), %ymm9, %ymm9
+
+// CHECK: vpcmov %ymm9, 64(%rdx,%rax), %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x68,0x34,0xa2,0x4c,0x02,0x40,0x90]
+vpcmov %ymm9, 64(%rdx,%rax), %ymm9, %ymm9
+
+// CHECK: vpcmov %ymm9, 64(%rdx), %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x68,0x34,0xa2,0x4a,0x40,0x90]
+vpcmov %ymm9, 64(%rdx), %ymm9, %ymm9
+
+// CHECK: vpcmov %ymm9, (%rdx), %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x68,0x34,0xa2,0x0a,0x90]
+vpcmov %ymm9, (%rdx), %ymm9, %ymm9
+
+// CHECK: vpcmov %ymm9, %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0x8f,0x48,0x34,0xa2,0xc9,0x90]
+vpcmov %ymm9, %ymm9, %ymm9, %ymm9
+
+// CHECK: vpcomltb 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcc,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltb 485498096, %xmm15, %xmm15
+
+// CHECK: vpcomltb 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcc,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltb 485498096, %xmm6, %xmm6
+
+// CHECK: vpcomltb 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcc,0x7c,0x82,0x40,0x00]
+vpcomltb 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltb -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcc,0x7c,0x82,0xc0,0x00]
+vpcomltb -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltb 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcc,0x74,0x82,0x40,0x00]
+vpcomltb 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltb -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcc,0x74,0x82,0xc0,0x00]
+vpcomltb -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltb 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcc,0x7c,0x02,0x40,0x00]
+vpcomltb 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpcomltb 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcc,0x74,0x02,0x40,0x00]
+vpcomltb 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpcomltb 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcc,0x7a,0x40,0x00]
+vpcomltb 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltb 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcc,0x72,0x40,0x00]
+vpcomltb 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltb (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcc,0x3a,0x00]
+vpcomltb (%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltb (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcc,0x32,0x00]
+vpcomltb (%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltb %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0xcc,0xff,0x00]
+vpcomltb %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcomltb %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcc,0xf6,0x00]
+vpcomltb %xmm6, %xmm6, %xmm6
+
+// CHECK: vpcomltd 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xce,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltd 485498096, %xmm15, %xmm15
+
+// CHECK: vpcomltd 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xce,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltd 485498096, %xmm6, %xmm6
+
+// CHECK: vpcomltd 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xce,0x7c,0x82,0x40,0x00]
+vpcomltd 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltd -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xce,0x7c,0x82,0xc0,0x00]
+vpcomltd -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltd 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xce,0x74,0x82,0x40,0x00]
+vpcomltd 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltd -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xce,0x74,0x82,0xc0,0x00]
+vpcomltd -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltd 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xce,0x7c,0x02,0x40,0x00]
+vpcomltd 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpcomltd 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xce,0x74,0x02,0x40,0x00]
+vpcomltd 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpcomltd 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xce,0x7a,0x40,0x00]
+vpcomltd 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltd 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xce,0x72,0x40,0x00]
+vpcomltd 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltd (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xce,0x3a,0x00]
+vpcomltd (%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltd (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xce,0x32,0x00]
+vpcomltd (%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltd %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0xce,0xff,0x00]
+vpcomltd %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcomltd %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xce,0xf6,0x00]
+vpcomltd %xmm6, %xmm6, %xmm6
+
+// CHECK: vpcomltq 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcf,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltq 485498096, %xmm15, %xmm15
+
+// CHECK: vpcomltq 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcf,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltq 485498096, %xmm6, %xmm6
+
+// CHECK: vpcomltq 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcf,0x7c,0x82,0x40,0x00]
+vpcomltq 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltq -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcf,0x7c,0x82,0xc0,0x00]
+vpcomltq -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltq 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcf,0x74,0x82,0x40,0x00]
+vpcomltq 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltq -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcf,0x74,0x82,0xc0,0x00]
+vpcomltq -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltq 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcf,0x7c,0x02,0x40,0x00]
+vpcomltq 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpcomltq 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcf,0x74,0x02,0x40,0x00]
+vpcomltq 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpcomltq 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcf,0x7a,0x40,0x00]
+vpcomltq 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltq 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcf,0x72,0x40,0x00]
+vpcomltq 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltq (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcf,0x3a,0x00]
+vpcomltq (%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltq (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcf,0x32,0x00]
+vpcomltq (%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltq %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0xcf,0xff,0x00]
+vpcomltq %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcomltq %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcf,0xf6,0x00]
+vpcomltq %xmm6, %xmm6, %xmm6
+
+// CHECK: vpcomltub 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xec,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltub 485498096, %xmm15, %xmm15
+
+// CHECK: vpcomltub 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xec,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltub 485498096, %xmm6, %xmm6
+
+// CHECK: vpcomltub 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xec,0x7c,0x82,0x40,0x00]
+vpcomltub 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltub -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xec,0x7c,0x82,0xc0,0x00]
+vpcomltub -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltub 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xec,0x74,0x82,0x40,0x00]
+vpcomltub 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltub -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xec,0x74,0x82,0xc0,0x00]
+vpcomltub -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltub 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xec,0x7c,0x02,0x40,0x00]
+vpcomltub 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpcomltub 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xec,0x74,0x02,0x40,0x00]
+vpcomltub 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpcomltub 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xec,0x7a,0x40,0x00]
+vpcomltub 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltub 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xec,0x72,0x40,0x00]
+vpcomltub 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltub (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xec,0x3a,0x00]
+vpcomltub (%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltub (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xec,0x32,0x00]
+vpcomltub (%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltub %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0xec,0xff,0x00]
+vpcomltub %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcomltub %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xec,0xf6,0x00]
+vpcomltub %xmm6, %xmm6, %xmm6
+
+// CHECK: vpcomltud 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xee,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltud 485498096, %xmm15, %xmm15
+
+// CHECK: vpcomltud 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xee,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltud 485498096, %xmm6, %xmm6
+
+// CHECK: vpcomltud 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xee,0x7c,0x82,0x40,0x00]
+vpcomltud 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltud -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xee,0x7c,0x82,0xc0,0x00]
+vpcomltud -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltud 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xee,0x74,0x82,0x40,0x00]
+vpcomltud 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltud -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xee,0x74,0x82,0xc0,0x00]
+vpcomltud -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltud 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xee,0x7c,0x02,0x40,0x00]
+vpcomltud 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpcomltud 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xee,0x74,0x02,0x40,0x00]
+vpcomltud 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpcomltud 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xee,0x7a,0x40,0x00]
+vpcomltud 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltud 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xee,0x72,0x40,0x00]
+vpcomltud 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltud (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xee,0x3a,0x00]
+vpcomltud (%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltud (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xee,0x32,0x00]
+vpcomltud (%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltud %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0xee,0xff,0x00]
+vpcomltud %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcomltud %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xee,0xf6,0x00]
+vpcomltud %xmm6, %xmm6, %xmm6
+
+// CHECK: vpcomltuq 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xef,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltuq 485498096, %xmm15, %xmm15
+
+// CHECK: vpcomltuq 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xef,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltuq 485498096, %xmm6, %xmm6
+
+// CHECK: vpcomltuq 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xef,0x7c,0x82,0x40,0x00]
+vpcomltuq 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltuq -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xef,0x7c,0x82,0xc0,0x00]
+vpcomltuq -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltuq 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xef,0x74,0x82,0x40,0x00]
+vpcomltuq 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltuq -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xef,0x74,0x82,0xc0,0x00]
+vpcomltuq -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltuq 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xef,0x7c,0x02,0x40,0x00]
+vpcomltuq 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpcomltuq 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xef,0x74,0x02,0x40,0x00]
+vpcomltuq 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpcomltuq 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xef,0x7a,0x40,0x00]
+vpcomltuq 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltuq 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xef,0x72,0x40,0x00]
+vpcomltuq 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltuq (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xef,0x3a,0x00]
+vpcomltuq (%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltuq (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xef,0x32,0x00]
+vpcomltuq (%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltuq %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0xef,0xff,0x00]
+vpcomltuq %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcomltuq %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xef,0xf6,0x00]
+vpcomltuq %xmm6, %xmm6, %xmm6
+
+// CHECK: vpcomltuw 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xed,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltuw 485498096, %xmm15, %xmm15
+
+// CHECK: vpcomltuw 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xed,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltuw 485498096, %xmm6, %xmm6
+
+// CHECK: vpcomltuw 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xed,0x7c,0x82,0x40,0x00]
+vpcomltuw 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltuw -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xed,0x7c,0x82,0xc0,0x00]
+vpcomltuw -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltuw 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xed,0x74,0x82,0x40,0x00]
+vpcomltuw 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltuw -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xed,0x74,0x82,0xc0,0x00]
+vpcomltuw -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltuw 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xed,0x7c,0x02,0x40,0x00]
+vpcomltuw 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpcomltuw 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xed,0x74,0x02,0x40,0x00]
+vpcomltuw 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpcomltuw 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xed,0x7a,0x40,0x00]
+vpcomltuw 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltuw 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xed,0x72,0x40,0x00]
+vpcomltuw 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltuw (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xed,0x3a,0x00]
+vpcomltuw (%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltuw (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xed,0x32,0x00]
+vpcomltuw (%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltuw %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0xed,0xff,0x00]
+vpcomltuw %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcomltuw %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xed,0xf6,0x00]
+vpcomltuw %xmm6, %xmm6, %xmm6
+
+// CHECK: vpcomltw 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcd,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltw 485498096, %xmm15, %xmm15
+
+// CHECK: vpcomltw 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcd,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vpcomltw 485498096, %xmm6, %xmm6
+
+// CHECK: vpcomltw 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcd,0x7c,0x82,0x40,0x00]
+vpcomltw 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltw -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcd,0x7c,0x82,0xc0,0x00]
+vpcomltw -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpcomltw 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcd,0x74,0x82,0x40,0x00]
+vpcomltw 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltw -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcd,0x74,0x82,0xc0,0x00]
+vpcomltw -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpcomltw 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcd,0x7c,0x02,0x40,0x00]
+vpcomltw 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpcomltw 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcd,0x74,0x02,0x40,0x00]
+vpcomltw 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpcomltw 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcd,0x7a,0x40,0x00]
+vpcomltw 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltw 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcd,0x72,0x40,0x00]
+vpcomltw 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltw (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xcd,0x3a,0x00]
+vpcomltw (%rdx), %xmm15, %xmm15
+
+// CHECK: vpcomltw (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcd,0x32,0x00]
+vpcomltw (%rdx), %xmm6, %xmm6
+
+// CHECK: vpcomltw %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0xcd,0xff,0x00]
+vpcomltw %xmm15, %xmm15, %xmm15
+
+// CHECK: vpcomltw %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xcd,0xf6,0x00]
+vpcomltw %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2pd $0, 485498096, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x81,0x49,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpermil2pd $0, 485498096, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpermil2pd $0, 485498096, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0xc9,0x49,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpermil2pd $0, 485498096, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2pd $0, 485498096, %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0xc5,0x49,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x70]
+vpermil2pd $0, 485498096, %ymm7, %ymm7, %ymm7
+
+// CHECK: vpermil2pd $0, 485498096, %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0xb5,0x49,0x0c,0x25,0xf0,0x1c,0xf0,0x1c,0x90]
+vpermil2pd $0, 485498096, %ymm9, %ymm9, %ymm9
+
+// CHECK: vpermil2pd $0, 64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x81,0x49,0x7c,0x82,0x40,0xf0]
+vpermil2pd $0, 64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpermil2pd $0, -64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x81,0x49,0x7c,0x82,0xc0,0xf0]
+vpermil2pd $0, -64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpermil2pd $0, 64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0xc9,0x49,0x74,0x82,0x40,0x60]
+vpermil2pd $0, 64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2pd $0, -64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0xc9,0x49,0x74,0x82,0xc0,0x60]
+vpermil2pd $0, -64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2pd $0, 64(%rdx,%rax,4), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0xc5,0x49,0x7c,0x82,0x40,0x70]
+vpermil2pd $0, 64(%rdx,%rax,4), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpermil2pd $0, -64(%rdx,%rax,4), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0xc5,0x49,0x7c,0x82,0xc0,0x70]
+vpermil2pd $0, -64(%rdx,%rax,4), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpermil2pd $0, 64(%rdx,%rax,4), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0xb5,0x49,0x4c,0x82,0x40,0x90]
+vpermil2pd $0, 64(%rdx,%rax,4), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpermil2pd $0, -64(%rdx,%rax,4), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0xb5,0x49,0x4c,0x82,0xc0,0x90]
+vpermil2pd $0, -64(%rdx,%rax,4), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpermil2pd $0, 64(%rdx,%rax), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x81,0x49,0x7c,0x02,0x40,0xf0]
+vpermil2pd $0, 64(%rdx,%rax), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpermil2pd $0, 64(%rdx,%rax), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0xc9,0x49,0x74,0x02,0x40,0x60]
+vpermil2pd $0, 64(%rdx,%rax), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2pd $0, 64(%rdx,%rax), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0xc5,0x49,0x7c,0x02,0x40,0x70]
+vpermil2pd $0, 64(%rdx,%rax), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpermil2pd $0, 64(%rdx,%rax), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0xb5,0x49,0x4c,0x02,0x40,0x90]
+vpermil2pd $0, 64(%rdx,%rax), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpermil2pd $0, 64(%rdx), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x81,0x49,0x7a,0x40,0xf0]
+vpermil2pd $0, 64(%rdx), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpermil2pd $0, 64(%rdx), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0xc9,0x49,0x72,0x40,0x60]
+vpermil2pd $0, 64(%rdx), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2pd $0, 64(%rdx), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0xc5,0x49,0x7a,0x40,0x70]
+vpermil2pd $0, 64(%rdx), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpermil2pd $0, 64(%rdx), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0xb5,0x49,0x4a,0x40,0x90]
+vpermil2pd $0, 64(%rdx), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpermil2pd $0, (%rdx), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x81,0x49,0x3a,0xf0]
+vpermil2pd $0, (%rdx), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpermil2pd $0, (%rdx), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0xc9,0x49,0x32,0x60]
+vpermil2pd $0, (%rdx), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2pd $0, (%rdx), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0xc5,0x49,0x3a,0x70]
+vpermil2pd $0, (%rdx), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpermil2pd $0, (%rdx), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0xb5,0x49,0x0a,0x90]
+vpermil2pd $0, (%rdx), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpermil2pd $0, %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x01,0x49,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpermil2pd $0, %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpermil2pd $0, %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x01,0x49,0x7c,0x82,0x40,0xf0]
+vpermil2pd $0, %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpermil2pd $0, %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x01,0x49,0x7c,0x82,0xc0,0xf0]
+vpermil2pd $0, %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpermil2pd $0, %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x01,0x49,0x7c,0x02,0x40,0xf0]
+vpermil2pd $0, %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpermil2pd $0, %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x01,0x49,0x7a,0x40,0xf0]
+vpermil2pd $0, %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpermil2pd $0, %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x01,0x49,0x3a,0xf0]
+vpermil2pd $0, %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpermil2pd $0, %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x43,0x01,0x49,0xff,0xf0]
+vpermil2pd $0, %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpermil2pd $0, %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0x49,0x49,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpermil2pd $0, %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpermil2pd $0, %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0x49,0x49,0x74,0x82,0x40,0x60]
+vpermil2pd $0, %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpermil2pd $0, %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0x49,0x49,0x74,0x82,0xc0,0x60]
+vpermil2pd $0, %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpermil2pd $0, %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0x49,0x49,0x74,0x02,0x40,0x60]
+vpermil2pd $0, %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpermil2pd $0, %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0x49,0x49,0x72,0x40,0x60]
+vpermil2pd $0, %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpermil2pd $0, %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0x49,0x49,0x32,0x60]
+vpermil2pd $0, %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpermil2pd $0, %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0x49,0x49,0xf6,0x60]
+vpermil2pd $0, %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2pd $0, %ymm7, 485498096, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0x45,0x49,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x70]
+vpermil2pd $0, %ymm7, 485498096, %ymm7, %ymm7
+
+// CHECK: vpermil2pd $0, %ymm7, 64(%rdx,%rax,4), %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0x45,0x49,0x7c,0x82,0x40,0x70]
+vpermil2pd $0, %ymm7, 64(%rdx,%rax,4), %ymm7, %ymm7
+
+// CHECK: vpermil2pd $0, %ymm7, -64(%rdx,%rax,4), %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0x45,0x49,0x7c,0x82,0xc0,0x70]
+vpermil2pd $0, %ymm7, -64(%rdx,%rax,4), %ymm7, %ymm7
+
+// CHECK: vpermil2pd $0, %ymm7, 64(%rdx,%rax), %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0x45,0x49,0x7c,0x02,0x40,0x70]
+vpermil2pd $0, %ymm7, 64(%rdx,%rax), %ymm7, %ymm7
+
+// CHECK: vpermil2pd $0, %ymm7, 64(%rdx), %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0x45,0x49,0x7a,0x40,0x70]
+vpermil2pd $0, %ymm7, 64(%rdx), %ymm7, %ymm7
+
+// CHECK: vpermil2pd $0, %ymm7, (%rdx), %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0x45,0x49,0x3a,0x70]
+vpermil2pd $0, %ymm7, (%rdx), %ymm7, %ymm7
+
+// CHECK: vpermil2pd $0, %ymm7, %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0x45,0x49,0xff,0x70]
+vpermil2pd $0, %ymm7, %ymm7, %ymm7, %ymm7
+
+// CHECK: vpermil2pd $0, %ymm9, 485498096, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0x35,0x49,0x0c,0x25,0xf0,0x1c,0xf0,0x1c,0x90]
+vpermil2pd $0, %ymm9, 485498096, %ymm9, %ymm9
+
+// CHECK: vpermil2pd $0, %ymm9, 64(%rdx,%rax,4), %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0x35,0x49,0x4c,0x82,0x40,0x90]
+vpermil2pd $0, %ymm9, 64(%rdx,%rax,4), %ymm9, %ymm9
+
+// CHECK: vpermil2pd $0, %ymm9, -64(%rdx,%rax,4), %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0x35,0x49,0x4c,0x82,0xc0,0x90]
+vpermil2pd $0, %ymm9, -64(%rdx,%rax,4), %ymm9, %ymm9
+
+// CHECK: vpermil2pd $0, %ymm9, 64(%rdx,%rax), %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0x35,0x49,0x4c,0x02,0x40,0x90]
+vpermil2pd $0, %ymm9, 64(%rdx,%rax), %ymm9, %ymm9
+
+// CHECK: vpermil2pd $0, %ymm9, 64(%rdx), %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0x35,0x49,0x4a,0x40,0x90]
+vpermil2pd $0, %ymm9, 64(%rdx), %ymm9, %ymm9
+
+// CHECK: vpermil2pd $0, %ymm9, (%rdx), %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0x35,0x49,0x0a,0x90]
+vpermil2pd $0, %ymm9, (%rdx), %ymm9, %ymm9
+
+// CHECK: vpermil2pd $0, %ymm9, %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x43,0x35,0x49,0xc9,0x90]
+vpermil2pd $0, %ymm9, %ymm9, %ymm9, %ymm9
+
+// CHECK: vpermil2ps $0, 485498096, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x81,0x48,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpermil2ps $0, 485498096, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpermil2ps $0, 485498096, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0xc9,0x48,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpermil2ps $0, 485498096, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2ps $0, 485498096, %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0xc5,0x48,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x70]
+vpermil2ps $0, 485498096, %ymm7, %ymm7, %ymm7
+
+// CHECK: vpermil2ps $0, 485498096, %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0xb5,0x48,0x0c,0x25,0xf0,0x1c,0xf0,0x1c,0x90]
+vpermil2ps $0, 485498096, %ymm9, %ymm9, %ymm9
+
+// CHECK: vpermil2ps $0, 64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x81,0x48,0x7c,0x82,0x40,0xf0]
+vpermil2ps $0, 64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpermil2ps $0, -64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x81,0x48,0x7c,0x82,0xc0,0xf0]
+vpermil2ps $0, -64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpermil2ps $0, 64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0xc9,0x48,0x74,0x82,0x40,0x60]
+vpermil2ps $0, 64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2ps $0, -64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0xc9,0x48,0x74,0x82,0xc0,0x60]
+vpermil2ps $0, -64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2ps $0, 64(%rdx,%rax,4), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0xc5,0x48,0x7c,0x82,0x40,0x70]
+vpermil2ps $0, 64(%rdx,%rax,4), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpermil2ps $0, -64(%rdx,%rax,4), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0xc5,0x48,0x7c,0x82,0xc0,0x70]
+vpermil2ps $0, -64(%rdx,%rax,4), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpermil2ps $0, 64(%rdx,%rax,4), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0xb5,0x48,0x4c,0x82,0x40,0x90]
+vpermil2ps $0, 64(%rdx,%rax,4), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpermil2ps $0, -64(%rdx,%rax,4), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0xb5,0x48,0x4c,0x82,0xc0,0x90]
+vpermil2ps $0, -64(%rdx,%rax,4), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpermil2ps $0, 64(%rdx,%rax), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x81,0x48,0x7c,0x02,0x40,0xf0]
+vpermil2ps $0, 64(%rdx,%rax), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpermil2ps $0, 64(%rdx,%rax), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0xc9,0x48,0x74,0x02,0x40,0x60]
+vpermil2ps $0, 64(%rdx,%rax), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2ps $0, 64(%rdx,%rax), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0xc5,0x48,0x7c,0x02,0x40,0x70]
+vpermil2ps $0, 64(%rdx,%rax), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpermil2ps $0, 64(%rdx,%rax), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0xb5,0x48,0x4c,0x02,0x40,0x90]
+vpermil2ps $0, 64(%rdx,%rax), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpermil2ps $0, 64(%rdx), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x81,0x48,0x7a,0x40,0xf0]
+vpermil2ps $0, 64(%rdx), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpermil2ps $0, 64(%rdx), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0xc9,0x48,0x72,0x40,0x60]
+vpermil2ps $0, 64(%rdx), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2ps $0, 64(%rdx), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0xc5,0x48,0x7a,0x40,0x70]
+vpermil2ps $0, 64(%rdx), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpermil2ps $0, 64(%rdx), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0xb5,0x48,0x4a,0x40,0x90]
+vpermil2ps $0, 64(%rdx), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpermil2ps $0, (%rdx), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x81,0x48,0x3a,0xf0]
+vpermil2ps $0, (%rdx), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpermil2ps $0, (%rdx), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0xc9,0x48,0x32,0x60]
+vpermil2ps $0, (%rdx), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2ps $0, (%rdx), %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0xc5,0x48,0x3a,0x70]
+vpermil2ps $0, (%rdx), %ymm7, %ymm7, %ymm7
+
+// CHECK: vpermil2ps $0, (%rdx), %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0xb5,0x48,0x0a,0x90]
+vpermil2ps $0, (%rdx), %ymm9, %ymm9, %ymm9
+
+// CHECK: vpermil2ps $0, %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x01,0x48,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpermil2ps $0, %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpermil2ps $0, %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x01,0x48,0x7c,0x82,0x40,0xf0]
+vpermil2ps $0, %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpermil2ps $0, %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x01,0x48,0x7c,0x82,0xc0,0xf0]
+vpermil2ps $0, %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpermil2ps $0, %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x01,0x48,0x7c,0x02,0x40,0xf0]
+vpermil2ps $0, %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpermil2ps $0, %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x01,0x48,0x7a,0x40,0xf0]
+vpermil2ps $0, %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpermil2ps $0, %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x63,0x01,0x48,0x3a,0xf0]
+vpermil2ps $0, %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpermil2ps $0, %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0xc4,0x43,0x01,0x48,0xff,0xf0]
+vpermil2ps $0, %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpermil2ps $0, %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0x49,0x48,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpermil2ps $0, %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpermil2ps $0, %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0x49,0x48,0x74,0x82,0x40,0x60]
+vpermil2ps $0, %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpermil2ps $0, %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0x49,0x48,0x74,0x82,0xc0,0x60]
+vpermil2ps $0, %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpermil2ps $0, %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0x49,0x48,0x74,0x02,0x40,0x60]
+vpermil2ps $0, %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpermil2ps $0, %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0x49,0x48,0x72,0x40,0x60]
+vpermil2ps $0, %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpermil2ps $0, %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0x49,0x48,0x32,0x60]
+vpermil2ps $0, %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpermil2ps $0, %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0xc4,0xe3,0x49,0x48,0xf6,0x60]
+vpermil2ps $0, %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpermil2ps $0, %ymm7, 485498096, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0x45,0x48,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x70]
+vpermil2ps $0, %ymm7, 485498096, %ymm7, %ymm7
+
+// CHECK: vpermil2ps $0, %ymm7, 64(%rdx,%rax,4), %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0x45,0x48,0x7c,0x82,0x40,0x70]
+vpermil2ps $0, %ymm7, 64(%rdx,%rax,4), %ymm7, %ymm7
+
+// CHECK: vpermil2ps $0, %ymm7, -64(%rdx,%rax,4), %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0x45,0x48,0x7c,0x82,0xc0,0x70]
+vpermil2ps $0, %ymm7, -64(%rdx,%rax,4), %ymm7, %ymm7
+
+// CHECK: vpermil2ps $0, %ymm7, 64(%rdx,%rax), %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0x45,0x48,0x7c,0x02,0x40,0x70]
+vpermil2ps $0, %ymm7, 64(%rdx,%rax), %ymm7, %ymm7
+
+// CHECK: vpermil2ps $0, %ymm7, 64(%rdx), %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0x45,0x48,0x7a,0x40,0x70]
+vpermil2ps $0, %ymm7, 64(%rdx), %ymm7, %ymm7
+
+// CHECK: vpermil2ps $0, %ymm7, (%rdx), %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0x45,0x48,0x3a,0x70]
+vpermil2ps $0, %ymm7, (%rdx), %ymm7, %ymm7
+
+// CHECK: vpermil2ps $0, %ymm7, %ymm7, %ymm7, %ymm7
+// CHECK: encoding: [0xc4,0xe3,0x45,0x48,0xff,0x70]
+vpermil2ps $0, %ymm7, %ymm7, %ymm7, %ymm7
+
+// CHECK: vpermil2ps $0, %ymm9, 485498096, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0x35,0x48,0x0c,0x25,0xf0,0x1c,0xf0,0x1c,0x90]
+vpermil2ps $0, %ymm9, 485498096, %ymm9, %ymm9
+
+// CHECK: vpermil2ps $0, %ymm9, 64(%rdx,%rax,4), %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0x35,0x48,0x4c,0x82,0x40,0x90]
+vpermil2ps $0, %ymm9, 64(%rdx,%rax,4), %ymm9, %ymm9
+
+// CHECK: vpermil2ps $0, %ymm9, -64(%rdx,%rax,4), %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0x35,0x48,0x4c,0x82,0xc0,0x90]
+vpermil2ps $0, %ymm9, -64(%rdx,%rax,4), %ymm9, %ymm9
+
+// CHECK: vpermil2ps $0, %ymm9, 64(%rdx,%rax), %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0x35,0x48,0x4c,0x02,0x40,0x90]
+vpermil2ps $0, %ymm9, 64(%rdx,%rax), %ymm9, %ymm9
+
+// CHECK: vpermil2ps $0, %ymm9, 64(%rdx), %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0x35,0x48,0x4a,0x40,0x90]
+vpermil2ps $0, %ymm9, 64(%rdx), %ymm9, %ymm9
+
+// CHECK: vpermil2ps $0, %ymm9, (%rdx), %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x63,0x35,0x48,0x0a,0x90]
+vpermil2ps $0, %ymm9, (%rdx), %ymm9, %ymm9
+
+// CHECK: vpermil2ps $0, %ymm9, %ymm9, %ymm9, %ymm9
+// CHECK: encoding: [0xc4,0x43,0x35,0x48,0xc9,0x90]
+vpermil2ps $0, %ymm9, %ymm9, %ymm9, %ymm9
+
+// CHECK: vphaddbd 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc2,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddbd 485498096, %xmm15
+
+// CHECK: vphaddbd 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc2,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddbd 485498096, %xmm6
+
+// CHECK: vphaddbd 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc2,0x7c,0x82,0x40]
+vphaddbd 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddbd -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc2,0x7c,0x82,0xc0]
+vphaddbd -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddbd 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc2,0x74,0x82,0x40]
+vphaddbd 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddbd -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc2,0x74,0x82,0xc0]
+vphaddbd -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddbd 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc2,0x7c,0x02,0x40]
+vphaddbd 64(%rdx,%rax), %xmm15
+
+// CHECK: vphaddbd 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc2,0x74,0x02,0x40]
+vphaddbd 64(%rdx,%rax), %xmm6
+
+// CHECK: vphaddbd 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc2,0x7a,0x40]
+vphaddbd 64(%rdx), %xmm15
+
+// CHECK: vphaddbd 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc2,0x72,0x40]
+vphaddbd 64(%rdx), %xmm6
+
+// CHECK: vphaddbd (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc2,0x3a]
+vphaddbd (%rdx), %xmm15
+
+// CHECK: vphaddbd (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc2,0x32]
+vphaddbd (%rdx), %xmm6
+
+// CHECK: vphaddbd %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xc2,0xff]
+vphaddbd %xmm15, %xmm15
+
+// CHECK: vphaddbd %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc2,0xf6]
+vphaddbd %xmm6, %xmm6
+
+// CHECK: vphaddbq 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc3,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddbq 485498096, %xmm15
+
+// CHECK: vphaddbq 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc3,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddbq 485498096, %xmm6
+
+// CHECK: vphaddbq 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc3,0x7c,0x82,0x40]
+vphaddbq 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddbq -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc3,0x7c,0x82,0xc0]
+vphaddbq -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddbq 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc3,0x74,0x82,0x40]
+vphaddbq 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddbq -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc3,0x74,0x82,0xc0]
+vphaddbq -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddbq 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc3,0x7c,0x02,0x40]
+vphaddbq 64(%rdx,%rax), %xmm15
+
+// CHECK: vphaddbq 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc3,0x74,0x02,0x40]
+vphaddbq 64(%rdx,%rax), %xmm6
+
+// CHECK: vphaddbq 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc3,0x7a,0x40]
+vphaddbq 64(%rdx), %xmm15
+
+// CHECK: vphaddbq 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc3,0x72,0x40]
+vphaddbq 64(%rdx), %xmm6
+
+// CHECK: vphaddbq (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc3,0x3a]
+vphaddbq (%rdx), %xmm15
+
+// CHECK: vphaddbq (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc3,0x32]
+vphaddbq (%rdx), %xmm6
+
+// CHECK: vphaddbq %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xc3,0xff]
+vphaddbq %xmm15, %xmm15
+
+// CHECK: vphaddbq %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc3,0xf6]
+vphaddbq %xmm6, %xmm6
+
+// CHECK: vphaddbw 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc1,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddbw 485498096, %xmm15
+
+// CHECK: vphaddbw 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc1,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddbw 485498096, %xmm6
+
+// CHECK: vphaddbw 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc1,0x7c,0x82,0x40]
+vphaddbw 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddbw -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc1,0x7c,0x82,0xc0]
+vphaddbw -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddbw 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc1,0x74,0x82,0x40]
+vphaddbw 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddbw -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc1,0x74,0x82,0xc0]
+vphaddbw -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddbw 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc1,0x7c,0x02,0x40]
+vphaddbw 64(%rdx,%rax), %xmm15
+
+// CHECK: vphaddbw 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc1,0x74,0x02,0x40]
+vphaddbw 64(%rdx,%rax), %xmm6
+
+// CHECK: vphaddbw 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc1,0x7a,0x40]
+vphaddbw 64(%rdx), %xmm15
+
+// CHECK: vphaddbw 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc1,0x72,0x40]
+vphaddbw 64(%rdx), %xmm6
+
+// CHECK: vphaddbw (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc1,0x3a]
+vphaddbw (%rdx), %xmm15
+
+// CHECK: vphaddbw (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc1,0x32]
+vphaddbw (%rdx), %xmm6
+
+// CHECK: vphaddbw %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xc1,0xff]
+vphaddbw %xmm15, %xmm15
+
+// CHECK: vphaddbw %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc1,0xf6]
+vphaddbw %xmm6, %xmm6
+
+// CHECK: vphadddq 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xcb,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphadddq 485498096, %xmm15
+
+// CHECK: vphadddq 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xcb,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphadddq 485498096, %xmm6
+
+// CHECK: vphadddq 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xcb,0x7c,0x82,0x40]
+vphadddq 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphadddq -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xcb,0x7c,0x82,0xc0]
+vphadddq -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphadddq 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xcb,0x74,0x82,0x40]
+vphadddq 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphadddq -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xcb,0x74,0x82,0xc0]
+vphadddq -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphadddq 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xcb,0x7c,0x02,0x40]
+vphadddq 64(%rdx,%rax), %xmm15
+
+// CHECK: vphadddq 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xcb,0x74,0x02,0x40]
+vphadddq 64(%rdx,%rax), %xmm6
+
+// CHECK: vphadddq 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xcb,0x7a,0x40]
+vphadddq 64(%rdx), %xmm15
+
+// CHECK: vphadddq 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xcb,0x72,0x40]
+vphadddq 64(%rdx), %xmm6
+
+// CHECK: vphadddq (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xcb,0x3a]
+vphadddq (%rdx), %xmm15
+
+// CHECK: vphadddq (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xcb,0x32]
+vphadddq (%rdx), %xmm6
+
+// CHECK: vphadddq %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xcb,0xff]
+vphadddq %xmm15, %xmm15
+
+// CHECK: vphadddq %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xcb,0xf6]
+vphadddq %xmm6, %xmm6
+
+// CHECK: vphaddubd 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd2,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddubd 485498096, %xmm15
+
+// CHECK: vphaddubd 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd2,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddubd 485498096, %xmm6
+
+// CHECK: vphaddubd 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd2,0x7c,0x82,0x40]
+vphaddubd 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddubd -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd2,0x7c,0x82,0xc0]
+vphaddubd -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddubd 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd2,0x74,0x82,0x40]
+vphaddubd 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddubd -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd2,0x74,0x82,0xc0]
+vphaddubd -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddubd 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd2,0x7c,0x02,0x40]
+vphaddubd 64(%rdx,%rax), %xmm15
+
+// CHECK: vphaddubd 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd2,0x74,0x02,0x40]
+vphaddubd 64(%rdx,%rax), %xmm6
+
+// CHECK: vphaddubd 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd2,0x7a,0x40]
+vphaddubd 64(%rdx), %xmm15
+
+// CHECK: vphaddubd 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd2,0x72,0x40]
+vphaddubd 64(%rdx), %xmm6
+
+// CHECK: vphaddubd (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd2,0x3a]
+vphaddubd (%rdx), %xmm15
+
+// CHECK: vphaddubd (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd2,0x32]
+vphaddubd (%rdx), %xmm6
+
+// CHECK: vphaddubd %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xd2,0xff]
+vphaddubd %xmm15, %xmm15
+
+// CHECK: vphaddubd %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd2,0xf6]
+vphaddubd %xmm6, %xmm6
+
+// CHECK: vphaddubq 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd3,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddubq 485498096, %xmm15
+
+// CHECK: vphaddubq 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd3,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddubq 485498096, %xmm6
+
+// CHECK: vphaddubq 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd3,0x7c,0x82,0x40]
+vphaddubq 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddubq -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd3,0x7c,0x82,0xc0]
+vphaddubq -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddubq 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd3,0x74,0x82,0x40]
+vphaddubq 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddubq -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd3,0x74,0x82,0xc0]
+vphaddubq -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddubq 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd3,0x7c,0x02,0x40]
+vphaddubq 64(%rdx,%rax), %xmm15
+
+// CHECK: vphaddubq 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd3,0x74,0x02,0x40]
+vphaddubq 64(%rdx,%rax), %xmm6
+
+// CHECK: vphaddubq 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd3,0x7a,0x40]
+vphaddubq 64(%rdx), %xmm15
+
+// CHECK: vphaddubq 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd3,0x72,0x40]
+vphaddubq 64(%rdx), %xmm6
+
+// CHECK: vphaddubq (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd3,0x3a]
+vphaddubq (%rdx), %xmm15
+
+// CHECK: vphaddubq (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd3,0x32]
+vphaddubq (%rdx), %xmm6
+
+// CHECK: vphaddubq %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xd3,0xff]
+vphaddubq %xmm15, %xmm15
+
+// CHECK: vphaddubq %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd3,0xf6]
+vphaddubq %xmm6, %xmm6
+
+// CHECK: vphaddubw 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd1,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddubw 485498096, %xmm15
+
+// CHECK: vphaddubw 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd1,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddubw 485498096, %xmm6
+
+// CHECK: vphaddubw 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd1,0x7c,0x82,0x40]
+vphaddubw 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddubw -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd1,0x7c,0x82,0xc0]
+vphaddubw -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddubw 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd1,0x74,0x82,0x40]
+vphaddubw 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddubw -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd1,0x74,0x82,0xc0]
+vphaddubw -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddubw 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd1,0x7c,0x02,0x40]
+vphaddubw 64(%rdx,%rax), %xmm15
+
+// CHECK: vphaddubw 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd1,0x74,0x02,0x40]
+vphaddubw 64(%rdx,%rax), %xmm6
+
+// CHECK: vphaddubw 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd1,0x7a,0x40]
+vphaddubw 64(%rdx), %xmm15
+
+// CHECK: vphaddubw 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd1,0x72,0x40]
+vphaddubw 64(%rdx), %xmm6
+
+// CHECK: vphaddubw (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd1,0x3a]
+vphaddubw (%rdx), %xmm15
+
+// CHECK: vphaddubw (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd1,0x32]
+vphaddubw (%rdx), %xmm6
+
+// CHECK: vphaddubw %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xd1,0xff]
+vphaddubw %xmm15, %xmm15
+
+// CHECK: vphaddubw %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd1,0xf6]
+vphaddubw %xmm6, %xmm6
+
+// CHECK: vphaddudq 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xdb,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddudq 485498096, %xmm15
+
+// CHECK: vphaddudq 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xdb,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddudq 485498096, %xmm6
+
+// CHECK: vphaddudq 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xdb,0x7c,0x82,0x40]
+vphaddudq 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddudq -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xdb,0x7c,0x82,0xc0]
+vphaddudq -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddudq 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xdb,0x74,0x82,0x40]
+vphaddudq 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddudq -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xdb,0x74,0x82,0xc0]
+vphaddudq -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddudq 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xdb,0x7c,0x02,0x40]
+vphaddudq 64(%rdx,%rax), %xmm15
+
+// CHECK: vphaddudq 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xdb,0x74,0x02,0x40]
+vphaddudq 64(%rdx,%rax), %xmm6
+
+// CHECK: vphaddudq 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xdb,0x7a,0x40]
+vphaddudq 64(%rdx), %xmm15
+
+// CHECK: vphaddudq 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xdb,0x72,0x40]
+vphaddudq 64(%rdx), %xmm6
+
+// CHECK: vphaddudq (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xdb,0x3a]
+vphaddudq (%rdx), %xmm15
+
+// CHECK: vphaddudq (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xdb,0x32]
+vphaddudq (%rdx), %xmm6
+
+// CHECK: vphaddudq %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xdb,0xff]
+vphaddudq %xmm15, %xmm15
+
+// CHECK: vphaddudq %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xdb,0xf6]
+vphaddudq %xmm6, %xmm6
+
+// CHECK: vphadduwd 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd6,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphadduwd 485498096, %xmm15
+
+// CHECK: vphadduwd 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd6,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphadduwd 485498096, %xmm6
+
+// CHECK: vphadduwd 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd6,0x7c,0x82,0x40]
+vphadduwd 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphadduwd -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd6,0x7c,0x82,0xc0]
+vphadduwd -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphadduwd 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd6,0x74,0x82,0x40]
+vphadduwd 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphadduwd -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd6,0x74,0x82,0xc0]
+vphadduwd -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphadduwd 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd6,0x7c,0x02,0x40]
+vphadduwd 64(%rdx,%rax), %xmm15
+
+// CHECK: vphadduwd 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd6,0x74,0x02,0x40]
+vphadduwd 64(%rdx,%rax), %xmm6
+
+// CHECK: vphadduwd 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd6,0x7a,0x40]
+vphadduwd 64(%rdx), %xmm15
+
+// CHECK: vphadduwd 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd6,0x72,0x40]
+vphadduwd 64(%rdx), %xmm6
+
+// CHECK: vphadduwd (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd6,0x3a]
+vphadduwd (%rdx), %xmm15
+
+// CHECK: vphadduwd (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd6,0x32]
+vphadduwd (%rdx), %xmm6
+
+// CHECK: vphadduwd %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xd6,0xff]
+vphadduwd %xmm15, %xmm15
+
+// CHECK: vphadduwd %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd6,0xf6]
+vphadduwd %xmm6, %xmm6
+
+// CHECK: vphadduwq 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd7,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphadduwq 485498096, %xmm15
+
+// CHECK: vphadduwq 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd7,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphadduwq 485498096, %xmm6
+
+// CHECK: vphadduwq 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd7,0x7c,0x82,0x40]
+vphadduwq 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphadduwq -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd7,0x7c,0x82,0xc0]
+vphadduwq -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphadduwq 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd7,0x74,0x82,0x40]
+vphadduwq 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphadduwq -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd7,0x74,0x82,0xc0]
+vphadduwq -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphadduwq 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd7,0x7c,0x02,0x40]
+vphadduwq 64(%rdx,%rax), %xmm15
+
+// CHECK: vphadduwq 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd7,0x74,0x02,0x40]
+vphadduwq 64(%rdx,%rax), %xmm6
+
+// CHECK: vphadduwq 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd7,0x7a,0x40]
+vphadduwq 64(%rdx), %xmm15
+
+// CHECK: vphadduwq 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd7,0x72,0x40]
+vphadduwq 64(%rdx), %xmm6
+
+// CHECK: vphadduwq (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xd7,0x3a]
+vphadduwq (%rdx), %xmm15
+
+// CHECK: vphadduwq (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd7,0x32]
+vphadduwq (%rdx), %xmm6
+
+// CHECK: vphadduwq %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xd7,0xff]
+vphadduwq %xmm15, %xmm15
+
+// CHECK: vphadduwq %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xd7,0xf6]
+vphadduwq %xmm6, %xmm6
+
+// CHECK: vphaddwd 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc6,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddwd 485498096, %xmm15
+
+// CHECK: vphaddwd 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc6,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddwd 485498096, %xmm6
+
+// CHECK: vphaddwd 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc6,0x7c,0x82,0x40]
+vphaddwd 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddwd -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc6,0x7c,0x82,0xc0]
+vphaddwd -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddwd 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc6,0x74,0x82,0x40]
+vphaddwd 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddwd -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc6,0x74,0x82,0xc0]
+vphaddwd -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddwd 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc6,0x7c,0x02,0x40]
+vphaddwd 64(%rdx,%rax), %xmm15
+
+// CHECK: vphaddwd 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc6,0x74,0x02,0x40]
+vphaddwd 64(%rdx,%rax), %xmm6
+
+// CHECK: vphaddwd 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc6,0x7a,0x40]
+vphaddwd 64(%rdx), %xmm15
+
+// CHECK: vphaddwd 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc6,0x72,0x40]
+vphaddwd 64(%rdx), %xmm6
+
+// CHECK: vphaddwd (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc6,0x3a]
+vphaddwd (%rdx), %xmm15
+
+// CHECK: vphaddwd (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc6,0x32]
+vphaddwd (%rdx), %xmm6
+
+// CHECK: vphaddwd %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xc6,0xff]
+vphaddwd %xmm15, %xmm15
+
+// CHECK: vphaddwd %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc6,0xf6]
+vphaddwd %xmm6, %xmm6
+
+// CHECK: vphaddwq 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc7,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddwq 485498096, %xmm15
+
+// CHECK: vphaddwq 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc7,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphaddwq 485498096, %xmm6
+
+// CHECK: vphaddwq 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc7,0x7c,0x82,0x40]
+vphaddwq 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddwq -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc7,0x7c,0x82,0xc0]
+vphaddwq -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphaddwq 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc7,0x74,0x82,0x40]
+vphaddwq 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddwq -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc7,0x74,0x82,0xc0]
+vphaddwq -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphaddwq 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc7,0x7c,0x02,0x40]
+vphaddwq 64(%rdx,%rax), %xmm15
+
+// CHECK: vphaddwq 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc7,0x74,0x02,0x40]
+vphaddwq 64(%rdx,%rax), %xmm6
+
+// CHECK: vphaddwq 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc7,0x7a,0x40]
+vphaddwq 64(%rdx), %xmm15
+
+// CHECK: vphaddwq 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc7,0x72,0x40]
+vphaddwq 64(%rdx), %xmm6
+
+// CHECK: vphaddwq (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xc7,0x3a]
+vphaddwq (%rdx), %xmm15
+
+// CHECK: vphaddwq (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc7,0x32]
+vphaddwq (%rdx), %xmm6
+
+// CHECK: vphaddwq %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xc7,0xff]
+vphaddwq %xmm15, %xmm15
+
+// CHECK: vphaddwq %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xc7,0xf6]
+vphaddwq %xmm6, %xmm6
+
+// CHECK: vphsubbw 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe1,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphsubbw 485498096, %xmm15
+
+// CHECK: vphsubbw 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe1,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphsubbw 485498096, %xmm6
+
+// CHECK: vphsubbw 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe1,0x7c,0x82,0x40]
+vphsubbw 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphsubbw -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe1,0x7c,0x82,0xc0]
+vphsubbw -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphsubbw 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe1,0x74,0x82,0x40]
+vphsubbw 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphsubbw -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe1,0x74,0x82,0xc0]
+vphsubbw -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphsubbw 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe1,0x7c,0x02,0x40]
+vphsubbw 64(%rdx,%rax), %xmm15
+
+// CHECK: vphsubbw 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe1,0x74,0x02,0x40]
+vphsubbw 64(%rdx,%rax), %xmm6
+
+// CHECK: vphsubbw 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe1,0x7a,0x40]
+vphsubbw 64(%rdx), %xmm15
+
+// CHECK: vphsubbw 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe1,0x72,0x40]
+vphsubbw 64(%rdx), %xmm6
+
+// CHECK: vphsubbw (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe1,0x3a]
+vphsubbw (%rdx), %xmm15
+
+// CHECK: vphsubbw (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe1,0x32]
+vphsubbw (%rdx), %xmm6
+
+// CHECK: vphsubbw %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xe1,0xff]
+vphsubbw %xmm15, %xmm15
+
+// CHECK: vphsubbw %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe1,0xf6]
+vphsubbw %xmm6, %xmm6
+
+// CHECK: vphsubdq 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe3,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphsubdq 485498096, %xmm15
+
+// CHECK: vphsubdq 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe3,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphsubdq 485498096, %xmm6
+
+// CHECK: vphsubdq 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe3,0x7c,0x82,0x40]
+vphsubdq 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphsubdq -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe3,0x7c,0x82,0xc0]
+vphsubdq -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphsubdq 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe3,0x74,0x82,0x40]
+vphsubdq 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphsubdq -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe3,0x74,0x82,0xc0]
+vphsubdq -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphsubdq 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe3,0x7c,0x02,0x40]
+vphsubdq 64(%rdx,%rax), %xmm15
+
+// CHECK: vphsubdq 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe3,0x74,0x02,0x40]
+vphsubdq 64(%rdx,%rax), %xmm6
+
+// CHECK: vphsubdq 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe3,0x7a,0x40]
+vphsubdq 64(%rdx), %xmm15
+
+// CHECK: vphsubdq 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe3,0x72,0x40]
+vphsubdq 64(%rdx), %xmm6
+
+// CHECK: vphsubdq (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe3,0x3a]
+vphsubdq (%rdx), %xmm15
+
+// CHECK: vphsubdq (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe3,0x32]
+vphsubdq (%rdx), %xmm6
+
+// CHECK: vphsubdq %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xe3,0xff]
+vphsubdq %xmm15, %xmm15
+
+// CHECK: vphsubdq %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe3,0xf6]
+vphsubdq %xmm6, %xmm6
+
+// CHECK: vphsubwd 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe2,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vphsubwd 485498096, %xmm15
+
+// CHECK: vphsubwd 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe2,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vphsubwd 485498096, %xmm6
+
+// CHECK: vphsubwd 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe2,0x7c,0x82,0x40]
+vphsubwd 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphsubwd -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe2,0x7c,0x82,0xc0]
+vphsubwd -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vphsubwd 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe2,0x74,0x82,0x40]
+vphsubwd 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphsubwd -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe2,0x74,0x82,0xc0]
+vphsubwd -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vphsubwd 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe2,0x7c,0x02,0x40]
+vphsubwd 64(%rdx,%rax), %xmm15
+
+// CHECK: vphsubwd 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe2,0x74,0x02,0x40]
+vphsubwd 64(%rdx,%rax), %xmm6
+
+// CHECK: vphsubwd 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe2,0x7a,0x40]
+vphsubwd 64(%rdx), %xmm15
+
+// CHECK: vphsubwd 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe2,0x72,0x40]
+vphsubwd 64(%rdx), %xmm6
+
+// CHECK: vphsubwd (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x78,0xe2,0x3a]
+vphsubwd (%rdx), %xmm15
+
+// CHECK: vphsubwd (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe2,0x32]
+vphsubwd (%rdx), %xmm6
+
+// CHECK: vphsubwd %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x78,0xe2,0xff]
+vphsubwd %xmm15, %xmm15
+
+// CHECK: vphsubwd %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x78,0xe2,0xf6]
+vphsubwd %xmm6, %xmm6
+
+// CHECK: vpmacsdd %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x9e,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpmacsdd %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpmacsdd %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x
+vpmacsdd %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacsdd %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x9e,0x7c,0x82,0x40,0xf0]
+vpmacsdd %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacsdd %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x9e,0x7c,0x82,0xc0,0xf0]
+vpmacsdd %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacsdd %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x9e,0x7c,0x02,0x40,0xf0]
+vpmacsdd %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpmacsdd %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x9e,0x7a,0x40,0xf0]
+vpmacsdd %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacsdd %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x9e,0x3a,0xf0]
+vpmacsdd %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacsdd %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0x9e,0xff,0xf0]
+vpmacsdd %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpmacsdd %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x9e,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpmacsdd %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpmacsdd %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x9e,0x74,0x82,0x40,0x60]
+vpmacsdd %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacsdd %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x9e,0x74,0x82,0xc0,0x60]
+vpmacsdd %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacsdd %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x9e,0x74,0x02,0x40,0x60]
+vpmacsdd %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpmacsdd %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x9e,0x72,0x40,0x60]
+vpmacsdd %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacsdd %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x9e,0x32,0x60]
+vpmacsdd %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacsdd %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x9e,0xf6,0x60]
+vpmacsdd %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpmacsdqh %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x9f,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpmacsdqh %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpmacsdqh %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x9f,0x7c,0x82,0x40,0xf0]
+vpmacsdqh %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacsdqh %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x9f,0x7c,0x82,0xc0,0xf0]
+vpmacsdqh %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacsdqh %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x9f,0x7c,0x02,0x40,0xf0]
+vpmacsdqh %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpmacsdqh %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x9f,0x7a,0x40,0xf0]
+vpmacsdqh %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacsdqh %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x9f,0x3a,0xf0]
+vpmacsdqh %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacsdqh %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0x9f,0xff,0xf0]
+vpmacsdqh %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpmacsdqh %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x9f,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpmacsdqh %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpmacsdqh %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x9f,0x74,0x82,0x40,0x60]
+vpmacsdqh %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacsdqh %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x9f,0x74,0x82,0xc0,0x60]
+vpmacsdqh %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacsdqh %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x9f,0x74,0x02,0x40,0x60]
+vpmacsdqh %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpmacsdqh %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x9f,0x72,0x40,0x60]
+vpmacsdqh %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacsdqh %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x9f,0x32,0x60]
+vpmacsdqh %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacsdqh %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x9f,0xf6,0x60]
+vpmacsdqh %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpmacsdql %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x97,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpmacsdql %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpmacsdql %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x97,0x7c,0x82,0x40,0xf0]
+vpmacsdql %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacsdql %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x97,0x7c,0x82,0xc0,0xf0]
+vpmacsdql %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacsdql %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x97,0x7c,0x02,0x40,0xf0]
+vpmacsdql %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpmacsdql %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x97,0x7a,0x40,0xf0]
+vpmacsdql %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacsdql %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x97,0x3a,0xf0]
+vpmacsdql %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacsdql %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0x97,0xff,0xf0]
+vpmacsdql %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpmacsdql %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x97,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpmacsdql %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpmacsdql %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x97,0x74,0x82,0x40,0x60]
+vpmacsdql %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacsdql %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x97,0x74,0x82,0xc0,0x60]
+vpmacsdql %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacsdql %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x97,0x74,0x02,0x40,0x60]
+vpmacsdql %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpmacsdql %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x97,0x72,0x40,0x60]
+vpmacsdql %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacsdql %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x97,0x32,0x60]
+vpmacsdql %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacsdql %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x97,0xf6,0x60]
+vpmacsdql %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpmacssdd %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x8e,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpmacssdd %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpmacssdd %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x8e,0x7c,0x82,0x40,0xf0]
+vpmacssdd %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacssdd %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x8e,0x7c,0x82,0xc0,0xf0]
+vpmacssdd %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacssdd %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x8e,0x7c,0x02,0x40,0xf0]
+vpmacssdd %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpmacssdd %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x8e,0x7a,0x40,0xf0]
+vpmacssdd %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacssdd %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x8e,0x3a,0xf0]
+vpmacssdd %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacssdd %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0x8e,0xff,0xf0]
+vpmacssdd %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpmacssdd %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x8e,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpmacssdd %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpmacssdd %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x8e,0x74,0x82,0x40,0x60]
+vpmacssdd %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacssdd %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x8e,0x74,0x82,0xc0,0x60]
+vpmacssdd %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacssdd %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x8e,0x74,0x02,0x40,0x60]
+vpmacssdd %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpmacssdd %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x8e,0x72,0x40,0x60]
+vpmacssdd %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacssdd %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x8e,0x32,0x60]
+vpmacssdd %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacssdd %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x8e,0xf6,0x60]
+vpmacssdd %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpmacssdqh %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x8f,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpmacssdqh %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpmacssdqh %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x8f,0x7c,0x82,0x40,0xf0]
+vpmacssdqh %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacssdqh %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x8f,0x7c,0x82,0xc0,0xf0]
+vpmacssdqh %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacssdqh %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x8f,0x7c,0x02,0x40,0xf0]
+vpmacssdqh %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpmacssdqh %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x8f,0x7a,0x40,0xf0]
+vpmacssdqh %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacssdqh %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x8f,0x3a,0xf0]
+vpmacssdqh %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacssdqh %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0x8f,0xff,0xf0]
+vpmacssdqh %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpmacssdqh %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x8f,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpmacssdqh %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpmacssdqh %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x8f,0x74,0x82,0x40,0x60]
+vpmacssdqh %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacssdqh %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x8f,0x74,0x82,0xc0,0x60]
+vpmacssdqh %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacssdqh %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x8f,0x74,0x02,0x40,0x60]
+vpmacssdqh %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpmacssdqh %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x8f,0x72,0x40,0x60]
+vpmacssdqh %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacssdqh %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x8f,0x32,0x60]
+vpmacssdqh %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacssdqh %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x8f,0xf6,0x60]
+vpmacssdqh %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpmacssdql %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x87,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpmacssdql %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpmacssdql %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x87,0x7c,0x82,0x40,0xf0]
+vpmacssdql %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacssdql %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x87,0x7c,0x82,0xc0,0xf0]
+vpmacssdql %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacssdql %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x87,0x7c,0x02,0x40,0xf0]
+vpmacssdql %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpmacssdql %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x87,0x7a,0x40,0xf0]
+vpmacssdql %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacssdql %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x87,0x3a,0xf0]
+vpmacssdql %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacssdql %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0x87,0xff,0xf0]
+vpmacssdql %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpmacssdql %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x87,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpmacssdql %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpmacssdql %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x87,0x74,0x82,0x40,0x60]
+vpmacssdql %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacssdql %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x87,0x74,0x82,0xc0,0x60]
+vpmacssdql %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacssdql %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x87,0x74,0x02,0x40,0x60]
+vpmacssdql %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpmacssdql %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x87,0x72,0x40,0x60]
+vpmacssdql %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacssdql %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x87,0x32,0x60]
+vpmacssdql %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacssdql %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x87,0xf6,0x60]
+vpmacssdql %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpmacsswd %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x86,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpmacsswd %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpmacsswd %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x86,0x7c,0x82,0x40,0xf0]
+vpmacsswd %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacsswd %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x86,0x7c,0x82,0xc0,0xf0]
+vpmacsswd %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacsswd %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x86,0x7c,0x02,0x40,0xf0]
+vpmacsswd %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpmacsswd %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x86,0x7a,0x40,0xf0]
+vpmacsswd %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacsswd %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x86,0x3a,0xf0]
+vpmacsswd %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacsswd %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0x86,0xff,0xf0]
+vpmacsswd %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpmacsswd %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x86,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpmacsswd %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpmacsswd %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x86,0x74,0x82,0x40,0x60]
+vpmacsswd %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacsswd %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x86,0x74,0x82,0xc0,0x60]
+vpmacsswd %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacsswd %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x86,0x74,0x02,0x40,0x60]
+vpmacsswd %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpmacsswd %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x86,0x72,0x40,0x60]
+vpmacsswd %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacsswd %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x86,0x32,0x60]
+vpmacsswd %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacsswd %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x86,0xf6,0x60]
+vpmacsswd %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpmacssww %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x85,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpmacssww %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpmacssww %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x85,0x7c,0x82,0x40,0xf0]
+vpmacssww %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacssww %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x85,0x7c,0x82,0xc0,0xf0]
+vpmacssww %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacssww %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x85,0x7c,0x02,0x40,0xf0]
+vpmacssww %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpmacssww %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x85,0x7a,0x40,0xf0]
+vpmacssww %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacssww %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x85,0x3a,0xf0]
+vpmacssww %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacssww %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0x85,0xff,0xf0]
+vpmacssww %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpmacssww %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x85,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpmacssww %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpmacssww %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x85,0x74,0x82,0x40,0x60]
+vpmacssww %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacssww %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x85,0x74,0x82,0xc0,0x60]
+vpmacssww %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacssww %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x85,0x74,0x02,0x40,0x60]
+vpmacssww %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpmacssww %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x85,0x72,0x40,0x60]
+vpmacssww %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacssww %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x85,0x32,0x60]
+vpmacssww %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacssww %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x85,0xf6,0x60]
+vpmacssww %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpmacswd %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x96,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpmacswd %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpmacswd %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x96,0x7c,0x82,0x40,0xf0]
+vpmacswd %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacswd %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x96,0x7c,0x82,0xc0,0xf0]
+vpmacswd %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacswd %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x96,0x7c,0x02,0x40,0xf0]
+vpmacswd %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpmacswd %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x96,0x7a,0x40,0xf0]
+vpmacswd %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacswd %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x96,0x3a,0xf0]
+vpmacswd %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacswd %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0x96,0xff,0xf0]
+vpmacswd %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpmacswd %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x96,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpmacswd %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpmacswd %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x96,0x74,0x82,0x40,0x60]
+vpmacswd %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacswd %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x96,0x74,0x82,0xc0,0x60]
+vpmacswd %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacswd %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x96,0x74,0x02,0x40,0x60]
+vpmacswd %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpmacswd %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x96,0x72,0x40,0x60]
+vpmacswd %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacswd %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x96,0x32,0x60]
+vpmacswd %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacswd %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x96,0xf6,0x60]
+vpmacswd %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpmacsww %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x95,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpmacsww %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpmacsww %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x95,0x7c,0x82,0x40,0xf0]
+vpmacsww %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacsww %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x95,0x7c,0x82,0xc0,0xf0]
+vpmacsww %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmacsww %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x95,0x7c,0x02,0x40,0xf0]
+vpmacsww %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpmacsww %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x95,0x7a,0x40,0xf0]
+vpmacsww %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacsww %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0x95,0x3a,0xf0]
+vpmacsww %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpmacsww %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0x95,0xff,0xf0]
+vpmacsww %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpmacsww %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x95,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpmacsww %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpmacsww %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x95,0x74,0x82,0x40,0x60]
+vpmacsww %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacsww %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x95,0x74,0x82,0xc0,0x60]
+vpmacsww %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmacsww %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x95,0x74,0x02,0x40,0x60]
+vpmacsww %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpmacsww %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x95,0x72,0x40,0x60]
+vpmacsww %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacsww %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x95,0x32,0x60]
+vpmacsww %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpmacsww %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0x95,0xf6,0x60]
+vpmacsww %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpmadcsswd %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa6,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpmadcsswd %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpmadcsswd %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa6,0x7c,0x82,0x40,0xf0]
+vpmadcsswd %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmadcsswd %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa6,0x7c,0x82,0xc0,0xf0]
+vpmadcsswd %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmadcsswd %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa6,0x7c,0x02,0x40,0xf0]
+vpmadcsswd %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpmadcsswd %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa6,0x7a,0x40,0xf0]
+vpmadcsswd %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpmadcsswd %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa6,0x3a,0xf0]
+vpmadcsswd %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpmadcsswd %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0xa6,0xff,0xf0]
+vpmadcsswd %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpmadcsswd %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa6,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpmadcsswd %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpmadcsswd %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa6,0x74,0x82,0x40,0x60]
+vpmadcsswd %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmadcsswd %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa6,0x74,0x82,0xc0,0x60]
+vpmadcsswd %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmadcsswd %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa6,0x74,0x02,0x40,0x60]
+vpmadcsswd %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpmadcsswd %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa6,0x72,0x40,0x60]
+vpmadcsswd %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpmadcsswd %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa6,0x32,0x60]
+vpmadcsswd %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpmadcsswd %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa6,0xf6,0x60]
+vpmadcsswd %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpmadcswd %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xb6,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpmadcswd %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpmadcswd %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xb6,0x7c,0x82,0x40,0xf0]
+vpmadcswd %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmadcswd %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xb6,0x7c,0x82,0xc0,0xf0]
+vpmadcswd %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpmadcswd %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xb6,0x7c,0x02,0x40,0xf0]
+vpmadcswd %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpmadcswd %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xb6,0x7a,0x40,0xf0]
+vpmadcswd %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpmadcswd %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xb6,0x3a,0xf0]
+vpmadcswd %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpmadcswd %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0xb6,0xff,0xf0]
+vpmadcswd %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpmadcswd %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xb6,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpmadcswd %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpmadcswd %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xb6,0x74,0x82,0x40,0x60]
+vpmadcswd %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmadcswd %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xb6,0x74,0x82,0xc0,0x60]
+vpmadcswd %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpmadcswd %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xb6,0x74,0x02,0x40,0x60]
+vpmadcswd %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpmadcswd %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xb6,0x72,0x40,0x60]
+vpmadcswd %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpmadcswd %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xb6,0x32,0x60]
+vpmadcswd %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpmadcswd %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xb6,0xf6,0x60]
+vpmadcswd %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpperm 485498096, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x80,0xa3,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpperm 485498096, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpperm 485498096, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0xc8,0xa3,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpperm 485498096, %xmm6, %xmm6, %xmm6
+
+// CHECK: vpperm 64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x80,0xa3,0x7c,0x82,0x40,0xf0]
+vpperm 64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpperm -64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x80,0xa3,0x7c,0x82,0xc0,0xf0]
+vpperm -64(%rdx,%rax,4), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpperm 64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0xc8,0xa3,0x74,0x82,0x40,0x60]
+vpperm 64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpperm -64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0xc8,0xa3,0x74,0x82,0xc0,0x60]
+vpperm -64(%rdx,%rax,4), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpperm 64(%rdx,%rax), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x80,0xa3,0x7c,0x02,0x40,0xf0]
+vpperm 64(%rdx,%rax), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpperm 64(%rdx,%rax), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0xc8,0xa3,0x74,0x02,0x40,0x60]
+vpperm 64(%rdx,%rax), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpperm 64(%rdx), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x80,0xa3,0x7a,0x40,0xf0]
+vpperm 64(%rdx), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpperm 64(%rdx), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0xc8,0xa3,0x72,0x40,0x60]
+vpperm 64(%rdx), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpperm (%rdx), %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x80,0xa3,0x3a,0xf0]
+vpperm (%rdx), %xmm15, %xmm15, %xmm15
+
+// CHECK: vpperm (%rdx), %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0xc8,0xa3,0x32,0x60]
+vpperm (%rdx), %xmm6, %xmm6, %xmm6
+
+// CHECK: vpperm %xmm15, 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa3,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0xf0]
+vpperm %xmm15, 485498096, %xmm15, %xmm15
+
+// CHECK: vpperm %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa3,0x7c,0x82,0x40,0xf0]
+vpperm %xmm15, 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpperm %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa3,0x7c,0x82,0xc0,0xf0]
+vpperm %xmm15, -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpperm %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa3,0x7c,0x02,0x40,0xf0]
+vpperm %xmm15, 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpperm %xmm15, 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa3,0x7a,0x40,0xf0]
+vpperm %xmm15, 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpperm %xmm15, (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x00,0xa3,0x3a,0xf0]
+vpperm %xmm15, (%rdx), %xmm15, %xmm15
+
+// CHECK: vpperm %xmm15, %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x00,0xa3,0xff,0xf0]
+vpperm %xmm15, %xmm15, %xmm15, %xmm15
+
+// CHECK: vpperm %xmm6, 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa3,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x60]
+vpperm %xmm6, 485498096, %xmm6, %xmm6
+
+// CHECK: vpperm %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa3,0x74,0x82,0x40,0x60]
+vpperm %xmm6, 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpperm %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa3,0x74,0x82,0xc0,0x60]
+vpperm %xmm6, -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpperm %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa3,0x74,0x02,0x40,0x60]
+vpperm %xmm6, 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpperm %xmm6, 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa3,0x72,0x40,0x60]
+vpperm %xmm6, 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpperm %xmm6, (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa3,0x32,0x60]
+vpperm %xmm6, (%rdx), %xmm6, %xmm6
+
+// CHECK: vpperm %xmm6, %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x48,0xa3,0xf6,0x60]
+vpperm %xmm6, %xmm6, %xmm6, %xmm6
+
+// CHECK: vprotb $0, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc0,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotb $0, 485498096, %xmm15
+
+// CHECK: vprotb $0, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc0,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotb $0, 485498096, %xmm6
+
+// CHECK: vprotb $0, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc0,0x7c,0x82,0x40,0x00]
+vprotb $0, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotb $0, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc0,0x7c,0x82,0xc0,0x00]
+vprotb $0, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotb $0, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc0,0x74,0x82,0x40,0x00]
+vprotb $0, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotb $0, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc0,0x74,0x82,0xc0,0x00]
+vprotb $0, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotb $0, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc0,0x7c,0x02,0x40,0x00]
+vprotb $0, 64(%rdx,%rax), %xmm15
+
+// CHECK: vprotb $0, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc0,0x74,0x02,0x40,0x00]
+vprotb $0, 64(%rdx,%rax), %xmm6
+
+// CHECK: vprotb $0, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc0,0x7a,0x40,0x00]
+vprotb $0, 64(%rdx), %xmm15
+
+// CHECK: vprotb $0, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc0,0x72,0x40,0x00]
+vprotb $0, 64(%rdx), %xmm6
+
+// CHECK: vprotb $0, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc0,0x3a,0x00]
+vprotb $0, (%rdx), %xmm15
+
+// CHECK: vprotb $0, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc0,0x32,0x00]
+vprotb $0, (%rdx), %xmm6
+
+// CHECK: vprotb $0, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x78,0xc0,0xff,0x00]
+vprotb $0, %xmm15, %xmm15
+
+// CHECK: vprotb $0, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc0,0xf6,0x00]
+vprotb $0, %xmm6, %xmm6
+
+// CHECK: vprotb 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x90,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotb 485498096, %xmm15, %xmm15
+
+// CHECK: vprotb 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x90,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotb 485498096, %xmm6, %xmm6
+
+// CHECK: vprotb 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x90,0x7c,0x82,0x40]
+vprotb 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vprotb -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x90,0x7c,0x82,0xc0]
+vprotb -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vprotb 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x90,0x74,0x82,0x40]
+vprotb 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vprotb -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x90,0x74,0x82,0xc0]
+vprotb -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vprotb 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x90,0x7c,0x02,0x40]
+vprotb 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vprotb 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x90,0x74,0x02,0x40]
+vprotb 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vprotb 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x90,0x7a,0x40]
+vprotb 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vprotb 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x90,0x72,0x40]
+vprotb 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vprotb (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x90,0x3a]
+vprotb (%rdx), %xmm15, %xmm15
+
+// CHECK: vprotb (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x90,0x32]
+vprotb (%rdx), %xmm6, %xmm6
+
+// CHECK: vprotb %xmm15, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x90,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotb %xmm15, 485498096, %xmm15
+
+// CHECK: vprotb %xmm15, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x90,0x7c,0x82,0x40]
+vprotb %xmm15, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotb %xmm15, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x90,0x7c,0x82,0xc0]
+vprotb %xmm15, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotb %xmm15, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x90,0x7c,0x02,0x40]
+vprotb %xmm15, 64(%rdx,%rax), %xmm15
+
+// CHECK: vprotb %xmm15, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x90,0x7a,0x40]
+vprotb %xmm15, 64(%rdx), %xmm15
+
+// CHECK: vprotb %xmm15, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x90,0x3a]
+vprotb %xmm15, (%rdx), %xmm15
+
+// CHECK: vprotb %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x00,0x90,0xff]
+vprotb %xmm15, %xmm15, %xmm15
+
+// CHECK: vprotb %xmm6, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x90,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotb %xmm6, 485498096, %xmm6
+
+// CHECK: vprotb %xmm6, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x90,0x74,0x82,0x40]
+vprotb %xmm6, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotb %xmm6, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x90,0x74,0x82,0xc0]
+vprotb %xmm6, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotb %xmm6, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x90,0x74,0x02,0x40]
+vprotb %xmm6, 64(%rdx,%rax), %xmm6
+
+// CHECK: vprotb %xmm6, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x90,0x72,0x40]
+vprotb %xmm6, 64(%rdx), %xmm6
+
+// CHECK: vprotb %xmm6, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x90,0x32]
+vprotb %xmm6, (%rdx), %xmm6
+
+// CHECK: vprotb %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x90,0xf6]
+vprotb %xmm6, %xmm6, %xmm6
+
+// CHECK: vprotd $0, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc2,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotd $0, 485498096, %xmm15
+
+// CHECK: vprotd $0, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc2,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotd $0, 485498096, %xmm6
+
+// CHECK: vprotd $0, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc2,0x7c,0x82,0x40,0x00]
+vprotd $0, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotd $0, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc2,0x7c,0x82,0xc0,0x00]
+vprotd $0, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotd $0, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc2,0x74,0x82,0x40,0x00]
+vprotd $0, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotd $0, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc2,0x74,0x82,0xc0,0x00]
+vprotd $0, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotd $0, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc2,0x7c,0x02,0x40,0x00]
+vprotd $0, 64(%rdx,%rax), %xmm15
+
+// CHECK: vprotd $0, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc2,0x74,0x02,0x40,0x00]
+vprotd $0, 64(%rdx,%rax), %xmm6
+
+// CHECK: vprotd $0, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc2,0x7a,0x40,0x00]
+vprotd $0, 64(%rdx), %xmm15
+
+// CHECK: vprotd $0, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc2,0x72,0x40,0x00]
+vprotd $0, 64(%rdx), %xmm6
+
+// CHECK: vprotd $0, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc2,0x3a,0x00]
+vprotd $0, (%rdx), %xmm15
+
+// CHECK: vprotd $0, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc2,0x32,0x00]
+vprotd $0, (%rdx), %xmm6
+
+// CHECK: vprotd $0, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x78,0xc2,0xff,0x00]
+vprotd $0, %xmm15, %xmm15
+
+// CHECK: vprotd $0, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc2,0xf6,0x00]
+vprotd $0, %xmm6, %xmm6
+
+// CHECK: vprotd 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x92,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotd 485498096, %xmm15, %xmm15
+
+// CHECK: vprotd 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x92,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotd 485498096, %xmm6, %xmm6
+
+// CHECK: vprotd 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x92,0x7c,0x82,0x40]
+vprotd 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vprotd -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x92,0x7c,0x82,0xc0]
+vprotd -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vprotd 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x92,0x74,0x82,0x40]
+vprotd 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vprotd -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x92,0x74,0x82,0xc0]
+vprotd -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vprotd 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x92,0x7c,0x02,0x40]
+vprotd 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vprotd 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x92,0x74,0x02,0x40]
+vprotd 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vprotd 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x92,0x7a,0x40]
+vprotd 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vprotd 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x92,0x72,0x40]
+vprotd 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vprotd (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x92,0x3a]
+vprotd (%rdx), %xmm15, %xmm15
+
+// CHECK: vprotd (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x92,0x32]
+vprotd (%rdx), %xmm6, %xmm6
+
+// CHECK: vprotd %xmm15, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x92,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotd %xmm15, 485498096, %xmm15
+
+// CHECK: vprotd %xmm15, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x92,0x7c,0x82,0x40]
+vprotd %xmm15, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotd %xmm15, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x92,0x7c,0x82,0xc0]
+vprotd %xmm15, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotd %xmm15, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x92,0x7c,0x02,0x40]
+vprotd %xmm15, 64(%rdx,%rax), %xmm15
+
+// CHECK: vprotd %xmm15, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x92,0x7a,0x40]
+vprotd %xmm15, 64(%rdx), %xmm15
+
+// CHECK: vprotd %xmm15, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x92,0x3a]
+vprotd %xmm15, (%rdx), %xmm15
+
+// CHECK: vprotd %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x00,0x92,0xff]
+vprotd %xmm15, %xmm15, %xmm15
+
+// CHECK: vprotd %xmm6, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x92,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotd %xmm6, 485498096, %xmm6
+
+// CHECK: vprotd %xmm6, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x92,0x74,0x82,0x40]
+vprotd %xmm6, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotd %xmm6, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x92,0x74,0x82,0xc0]
+vprotd %xmm6, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotd %xmm6, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x92,0x74,0x02,0x40]
+vprotd %xmm6, 64(%rdx,%rax), %xmm6
+
+// CHECK: vprotd %xmm6, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x92,0x72,0x40]
+vprotd %xmm6, 64(%rdx), %xmm6
+
+// CHECK: vprotd %xmm6, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x92,0x32]
+vprotd %xmm6, (%rdx), %xmm6
+
+// CHECK: vprotd %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x92,0xf6]
+vprotd %xmm6, %xmm6, %xmm6
+
+// CHECK: vprotq $0, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc3,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotq $0, 485498096, %xmm15
+
+// CHECK: vprotq $0, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc3,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotq $0, 485498096, %xmm6
+
+// CHECK: vprotq $0, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc3,0x7c,0x82,0x40,0x00]
+vprotq $0, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotq $0, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc3,0x7c,0x82,0xc0,0x00]
+vprotq $0, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotq $0, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc3,0x74,0x82,0x40,0x00]
+vprotq $0, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotq $0, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc3,0x74,0x82,0xc0,0x00]
+vprotq $0, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotq $0, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc3,0x7c,0x02,0x40,0x00]
+vprotq $0, 64(%rdx,%rax), %xmm15
+
+// CHECK: vprotq $0, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc3,0x74,0x02,0x40,0x00]
+vprotq $0, 64(%rdx,%rax), %xmm6
+
+// CHECK: vprotq $0, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc3,0x7a,0x40,0x00]
+vprotq $0, 64(%rdx), %xmm15
+
+// CHECK: vprotq $0, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc3,0x72,0x40,0x00]
+vprotq $0, 64(%rdx), %xmm6
+
+// CHECK: vprotq $0, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc3,0x3a,0x00]
+vprotq $0, (%rdx), %xmm15
+
+// CHECK: vprotq $0, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc3,0x32,0x00]
+vprotq $0, (%rdx), %xmm6
+
+// CHECK: vprotq $0, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x78,0xc3,0xff,0x00]
+vprotq $0, %xmm15, %xmm15
+
+// CHECK: vprotq $0, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc3,0xf6,0x00]
+vprotq $0, %xmm6, %xmm6
+
+// CHECK: vprotq 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x93,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotq 485498096, %xmm15, %xmm15
+
+// CHECK: vprotq 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x93,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotq 485498096, %xmm6, %xmm6
+
+// CHECK: vprotq 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x93,0x7c,0x82,0x40]
+vprotq 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vprotq -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x93,0x7c,0x82,0xc0]
+vprotq -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vprotq 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x93,0x74,0x82,0x40]
+vprotq 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vprotq -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x93,0x74,0x82,0xc0]
+vprotq -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vprotq 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x93,0x7c,0x02,0x40]
+vprotq 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vprotq 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x93,0x74,0x02,0x40]
+vprotq 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vprotq 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x93,0x7a,0x40]
+vprotq 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vprotq 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x93,0x72,0x40]
+vprotq 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vprotq (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x93,0x3a]
+vprotq (%rdx), %xmm15, %xmm15
+
+// CHECK: vprotq (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x93,0x32]
+vprotq (%rdx), %xmm6, %xmm6
+
+// CHECK: vprotq %xmm15, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x93,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotq %xmm15, 485498096, %xmm15
+
+// CHECK: vprotq %xmm15, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x93,0x7c,0x82,0x40]
+vprotq %xmm15, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotq %xmm15, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x93,0x7c,0x82,0xc0]
+vprotq %xmm15, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotq %xmm15, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x93,0x7c,0x02,0x40]
+vprotq %xmm15, 64(%rdx,%rax), %xmm15
+
+// CHECK: vprotq %xmm15, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x93,0x7a,0x40]
+vprotq %xmm15, 64(%rdx), %xmm15
+
+// CHECK: vprotq %xmm15, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x93,0x3a]
+vprotq %xmm15, (%rdx), %xmm15
+
+// CHECK: vprotq %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x00,0x93,0xff]
+vprotq %xmm15, %xmm15, %xmm15
+
+// CHECK: vprotq %xmm6, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x93,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotq %xmm6, 485498096, %xmm6
+
+// CHECK: vprotq %xmm6, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x93,0x74,0x82,0x40]
+vprotq %xmm6, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotq %xmm6, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x93,0x74,0x82,0xc0]
+vprotq %xmm6, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotq %xmm6, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x93,0x74,0x02,0x40]
+vprotq %xmm6, 64(%rdx,%rax), %xmm6
+
+// CHECK: vprotq %xmm6, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x93,0x72,0x40]
+vprotq %xmm6, 64(%rdx), %xmm6
+
+// CHECK: vprotq %xmm6, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x93,0x32]
+vprotq %xmm6, (%rdx), %xmm6
+
+// CHECK: vprotq %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x93,0xf6]
+vprotq %xmm6, %xmm6, %xmm6
+
+// CHECK: vprotw $0, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc1,0x3c,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotw $0, 485498096, %xmm15
+
+// CHECK: vprotw $0, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc1,0x34,0x25,0xf0,0x1c,0xf0,0x1c,0x00]
+vprotw $0, 485498096, %xmm6
+
+// CHECK: vprotw $0, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc1,0x7c,0x82,0x40,0x00]
+vprotw $0, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotw $0, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc1,0x7c,0x82,0xc0,0x00]
+vprotw $0, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotw $0, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc1,0x74,0x82,0x40,0x00]
+vprotw $0, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotw $0, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc1,0x74,0x82,0xc0,0x00]
+vprotw $0, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotw $0, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc1,0x7c,0x02,0x40,0x00]
+vprotw $0, 64(%rdx,%rax), %xmm15
+
+// CHECK: vprotw $0, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc1,0x74,0x02,0x40,0x00]
+vprotw $0, 64(%rdx,%rax), %xmm6
+
+// CHECK: vprotw $0, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc1,0x7a,0x40,0x00]
+vprotw $0, 64(%rdx), %xmm15
+
+// CHECK: vprotw $0, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc1,0x72,0x40,0x00]
+vprotw $0, 64(%rdx), %xmm6
+
+// CHECK: vprotw $0, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x68,0x78,0xc1,0x3a,0x00]
+vprotw $0, (%rdx), %xmm15
+
+// CHECK: vprotw $0, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc1,0x32,0x00]
+vprotw $0, (%rdx), %xmm6
+
+// CHECK: vprotw $0, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x48,0x78,0xc1,0xff,0x00]
+vprotw $0, %xmm15, %xmm15
+
+// CHECK: vprotw $0, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe8,0x78,0xc1,0xf6,0x00]
+vprotw $0, %xmm6, %xmm6
+
+// CHECK: vprotw 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x91,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotw 485498096, %xmm15, %xmm15
+
+// CHECK: vprotw 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x91,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotw 485498096, %xmm6, %xmm6
+
+// CHECK: vprotw 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x91,0x7c,0x82,0x40]
+vprotw 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vprotw -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x91,0x7c,0x82,0xc0]
+vprotw -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vprotw 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x91,0x74,0x82,0x40]
+vprotw 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vprotw -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x91,0x74,0x82,0xc0]
+vprotw -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vprotw 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x91,0x7c,0x02,0x40]
+vprotw 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vprotw 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x91,0x74,0x02,0x40]
+vprotw 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vprotw 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x91,0x7a,0x40]
+vprotw 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vprotw 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x91,0x72,0x40]
+vprotw 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vprotw (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x91,0x3a]
+vprotw (%rdx), %xmm15, %xmm15
+
+// CHECK: vprotw (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x91,0x32]
+vprotw (%rdx), %xmm6, %xmm6
+
+// CHECK: vprotw %xmm15, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x91,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotw %xmm15, 485498096, %xmm15
+
+// CHECK: vprotw %xmm15, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x91,0x7c,0x82,0x40]
+vprotw %xmm15, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotw %xmm15, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x91,0x7c,0x82,0xc0]
+vprotw %xmm15, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vprotw %xmm15, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x91,0x7c,0x02,0x40]
+vprotw %xmm15, 64(%rdx,%rax), %xmm15
+
+// CHECK: vprotw %xmm15, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x91,0x7a,0x40]
+vprotw %xmm15, 64(%rdx), %xmm15
+
+// CHECK: vprotw %xmm15, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x91,0x3a]
+vprotw %xmm15, (%rdx), %xmm15
+
+// CHECK: vprotw %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x00,0x91,0xff]
+vprotw %xmm15, %xmm15, %xmm15
+
+// CHECK: vprotw %xmm6, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x91,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vprotw %xmm6, 485498096, %xmm6
+
+// CHECK: vprotw %xmm6, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x91,0x74,0x82,0x40]
+vprotw %xmm6, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotw %xmm6, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x91,0x74,0x82,0xc0]
+vprotw %xmm6, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vprotw %xmm6, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x91,0x74,0x02,0x40]
+vprotw %xmm6, 64(%rdx,%rax), %xmm6
+
+// CHECK: vprotw %xmm6, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x91,0x72,0x40]
+vprotw %xmm6, 64(%rdx), %xmm6
+
+// CHECK: vprotw %xmm6, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x91,0x32]
+vprotw %xmm6, (%rdx), %xmm6
+
+// CHECK: vprotw %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x91,0xf6]
+vprotw %xmm6, %xmm6, %xmm6
+
+// CHECK: vpshab 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x98,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshab 485498096, %xmm15, %xmm15
+
+// CHECK: vpshab 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x98,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshab 485498096, %xmm6, %xmm6
+
+// CHECK: vpshab 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x98,0x7c,0x82,0x40]
+vpshab 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshab -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x98,0x7c,0x82,0xc0]
+vpshab -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshab 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x98,0x74,0x82,0x40]
+vpshab 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshab -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x98,0x74,0x82,0xc0]
+vpshab -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshab 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x98,0x7c,0x02,0x40]
+vpshab 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpshab 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x98,0x74,0x02,0x40]
+vpshab 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpshab 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x98,0x7a,0x40]
+vpshab 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpshab 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x98,0x72,0x40]
+vpshab 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpshab (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x98,0x3a]
+vpshab (%rdx), %xmm15, %xmm15
+
+// CHECK: vpshab (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x98,0x32]
+vpshab (%rdx), %xmm6, %xmm6
+
+// CHECK: vpshab %xmm15, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x98,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshab %xmm15, 485498096, %xmm15
+
+// CHECK: vpshab %xmm15, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x98,0x7c,0x82,0x40]
+vpshab %xmm15, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshab %xmm15, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x98,0x7c,0x82,0xc0]
+vpshab %xmm15, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshab %xmm15, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x98,0x7c,0x02,0x40]
+vpshab %xmm15, 64(%rdx,%rax), %xmm15
+
+// CHECK: vpshab %xmm15, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x98,0x7a,0x40]
+vpshab %xmm15, 64(%rdx), %xmm15
+
+// CHECK: vpshab %xmm15, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x98,0x3a]
+vpshab %xmm15, (%rdx), %xmm15
+
+// CHECK: vpshab %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x00,0x98,0xff]
+vpshab %xmm15, %xmm15, %xmm15
+
+// CHECK: vpshab %xmm6, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x98,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshab %xmm6, 485498096, %xmm6
+
+// CHECK: vpshab %xmm6, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x98,0x74,0x82,0x40]
+vpshab %xmm6, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshab %xmm6, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x98,0x74,0x82,0xc0]
+vpshab %xmm6, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshab %xmm6, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x98,0x74,0x02,0x40]
+vpshab %xmm6, 64(%rdx,%rax), %xmm6
+
+// CHECK: vpshab %xmm6, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x98,0x72,0x40]
+vpshab %xmm6, 64(%rdx), %xmm6
+
+// CHECK: vpshab %xmm6, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x98,0x32]
+vpshab %xmm6, (%rdx), %xmm6
+
+// CHECK: vpshab %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x98,0xf6]
+vpshab %xmm6, %xmm6, %xmm6
+
+// CHECK: vpshad 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x9a,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshad 485498096, %xmm15, %xmm15
+
+// CHECK: vpshad 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x9a,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshad 485498096, %xmm6, %xmm6
+
+// CHECK: vpshad 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x9a,0x7c,0x82,0x40]
+vpshad 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshad -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x9a,0x7c,0x82,0xc0]
+vpshad -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshad 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x9a,0x74,0x82,0x40]
+vpshad 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshad -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x9a,0x74,0x82,0xc0]
+vpshad -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshad 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x9a,0x7c,0x02,0x40]
+vpshad 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpshad 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x9a,0x74,0x02,0x40]
+vpshad 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpshad 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x9a,0x7a,0x40]
+vpshad 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpshad 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x9a,0x72,0x40]
+vpshad 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpshad (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x9a,0x3a]
+vpshad (%rdx), %xmm15, %xmm15
+
+// CHECK: vpshad (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x9a,0x32]
+vpshad (%rdx), %xmm6, %xmm6
+
+// CHECK: vpshad %xmm15, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x9a,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshad %xmm15, 485498096, %xmm15
+
+// CHECK: vpshad %xmm15, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x9a,0x7c,0x82,0x40]
+vpshad %xmm15, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshad %xmm15, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x9a,0x7c,0x82,0xc0]
+vpshad %xmm15, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshad %xmm15, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x9a,0x7c,0x02,0x40]
+vpshad %xmm15, 64(%rdx,%rax), %xmm15
+
+// CHECK: vpshad %xmm15, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x9a,0x7a,0x40]
+vpshad %xmm15, 64(%rdx), %xmm15
+
+// CHECK: vpshad %xmm15, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x9a,0x3a]
+vpshad %xmm15, (%rdx), %xmm15
+
+// CHECK: vpshad %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x00,0x9a,0xff]
+vpshad %xmm15, %xmm15, %xmm15
+
+// CHECK: vpshad %xmm6, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x9a,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshad %xmm6, 485498096, %xmm6
+
+// CHECK: vpshad %xmm6, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x9a,0x74,0x82,0x40]
+vpshad %xmm6, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshad %xmm6, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x9a,0x74,0x82,0xc0]
+vpshad %xmm6, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshad %xmm6, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x9a,0x74,0x02,0x40]
+vpshad %xmm6, 64(%rdx,%rax), %xmm6
+
+// CHECK: vpshad %xmm6, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x9a,0x72,0x40]
+vpshad %xmm6, 64(%rdx), %xmm6
+
+// CHECK: vpshad %xmm6, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x9a,0x32]
+vpshad %xmm6, (%rdx), %xmm6
+
+// CHECK: vpshad %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x9a,0xf6]
+vpshad %xmm6, %xmm6, %xmm6
+
+// CHECK: vpshaq 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x9b,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshaq 485498096, %xmm15, %xmm15
+
+// CHECK: vpshaq 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x9b,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshaq 485498096, %xmm6, %xmm6
+
+// CHECK: vpshaq 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x9b,0x7c,0x82,0x40]
+vpshaq 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshaq -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x9b,0x7c,0x82,0xc0]
+vpshaq -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshaq 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x9b,0x74,0x82,0x40]
+vpshaq 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshaq -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x9b,0x74,0x82,0xc0]
+vpshaq -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshaq 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x9b,0x7c,0x02,0x40]
+vpshaq 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpshaq 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x9b,0x74,0x02,0x40]
+vpshaq 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpshaq 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x9b,0x7a,0x40]
+vpshaq 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpshaq 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x9b,0x72,0x40]
+vpshaq 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpshaq (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x9b,0x3a]
+vpshaq (%rdx), %xmm15, %xmm15
+
+// CHECK: vpshaq (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x9b,0x32]
+vpshaq (%rdx), %xmm6, %xmm6
+
+// CHECK: vpshaq %xmm15, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x9b,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshaq %xmm15, 485498096, %xmm15
+
+// CHECK: vpshaq %xmm15, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x9b,0x7c,0x82,0x40]
+vpshaq %xmm15, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshaq %xmm15, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x9b,0x7c,0x82,0xc0]
+vpshaq %xmm15, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshaq %xmm15, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x9b,0x7c,0x02,0x40]
+vpshaq %xmm15, 64(%rdx,%rax), %xmm15
+
+// CHECK: vpshaq %xmm15, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x9b,0x7a,0x40]
+vpshaq %xmm15, 64(%rdx), %xmm15
+
+// CHECK: vpshaq %xmm15, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x9b,0x3a]
+vpshaq %xmm15, (%rdx), %xmm15
+
+// CHECK: vpshaq %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x00,0x9b,0xff]
+vpshaq %xmm15, %xmm15, %xmm15
+
+// CHECK: vpshaq %xmm6, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x9b,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshaq %xmm6, 485498096, %xmm6
+
+// CHECK: vpshaq %xmm6, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x9b,0x74,0x82,0x40]
+vpshaq %xmm6, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshaq %xmm6, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x9b,0x74,0x82,0xc0]
+vpshaq %xmm6, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshaq %xmm6, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x9b,0x74,0x02,0x40]
+vpshaq %xmm6, 64(%rdx,%rax), %xmm6
+
+// CHECK: vpshaq %xmm6, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x9b,0x72,0x40]
+vpshaq %xmm6, 64(%rdx), %xmm6
+
+// CHECK: vpshaq %xmm6, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x9b,0x32]
+vpshaq %xmm6, (%rdx), %xmm6
+
+// CHECK: vpshaq %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x9b,0xf6]
+vpshaq %xmm6, %xmm6, %xmm6
+
+// CHECK: vpshaw 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x99,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshaw 485498096, %xmm15, %xmm15
+
+// CHECK: vpshaw 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x99,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshaw 485498096, %xmm6, %xmm6
+
+// CHECK: vpshaw 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x99,0x7c,0x82,0x40]
+vpshaw 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshaw -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x99,0x7c,0x82,0xc0]
+vpshaw -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshaw 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x99,0x74,0x82,0x40]
+vpshaw 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshaw -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x99,0x74,0x82,0xc0]
+vpshaw -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshaw 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x99,0x7c,0x02,0x40]
+vpshaw 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpshaw 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x99,0x74,0x02,0x40]
+vpshaw 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpshaw 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x99,0x7a,0x40]
+vpshaw 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpshaw 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x99,0x72,0x40]
+vpshaw 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpshaw (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x99,0x3a]
+vpshaw (%rdx), %xmm15, %xmm15
+
+// CHECK: vpshaw (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x99,0x32]
+vpshaw (%rdx), %xmm6, %xmm6
+
+// CHECK: vpshaw %xmm15, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x99,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshaw %xmm15, 485498096, %xmm15
+
+// CHECK: vpshaw %xmm15, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x99,0x7c,0x82,0x40]
+vpshaw %xmm15, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshaw %xmm15, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x99,0x7c,0x82,0xc0]
+vpshaw %xmm15, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshaw %xmm15, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x99,0x7c,0x02,0x40]
+vpshaw %xmm15, 64(%rdx,%rax), %xmm15
+
+// CHECK: vpshaw %xmm15, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x99,0x7a,0x40]
+vpshaw %xmm15, 64(%rdx), %xmm15
+
+// CHECK: vpshaw %xmm15, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x99,0x3a]
+vpshaw %xmm15, (%rdx), %xmm15
+
+// CHECK: vpshaw %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x00,0x99,0xff]
+vpshaw %xmm15, %xmm15, %xmm15
+
+// CHECK: vpshaw %xmm6, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x99,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshaw %xmm6, 485498096, %xmm6
+
+// CHECK: vpshaw %xmm6, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x99,0x74,0x82,0x40]
+vpshaw %xmm6, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshaw %xmm6, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x99,0x74,0x82,0xc0]
+vpshaw %xmm6, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshaw %xmm6, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x99,0x74,0x02,0x40]
+vpshaw %xmm6, 64(%rdx,%rax), %xmm6
+
+// CHECK: vpshaw %xmm6, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x99,0x72,0x40]
+vpshaw %xmm6, 64(%rdx), %xmm6
+
+// CHECK: vpshaw %xmm6, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x99,0x32]
+vpshaw %xmm6, (%rdx), %xmm6
+
+// CHECK: vpshaw %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x99,0xf6]
+vpshaw %xmm6, %xmm6, %xmm6
+
+// CHECK: vpshlb 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x94,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshlb 485498096, %xmm15, %xmm15
+
+// CHECK: vpshlb 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x94,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshlb 485498096, %xmm6, %xmm6
+
+// CHECK: vpshlb 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x94,0x7c,0x82,0x40]
+vpshlb 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshlb -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x94,0x7c,0x82,0xc0]
+vpshlb -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshlb 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x94,0x74,0x82,0x40]
+vpshlb 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshlb -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x94,0x74,0x82,0xc0]
+vpshlb -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshlb 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x94,0x7c,0x02,0x40]
+vpshlb 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpshlb 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x94,0x74,0x02,0x40]
+vpshlb 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpshlb 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x94,0x7a,0x40]
+vpshlb 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpshlb 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x94,0x72,0x40]
+vpshlb 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpshlb (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x94,0x3a]
+vpshlb (%rdx), %xmm15, %xmm15
+
+// CHECK: vpshlb (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x94,0x32]
+vpshlb (%rdx), %xmm6, %xmm6
+
+// CHECK: vpshlb %xmm15, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x94,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshlb %xmm15, 485498096, %xmm15
+
+// CHECK: vpshlb %xmm15, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x94,0x7c,0x82,0x40]
+vpshlb %xmm15, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshlb %xmm15, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x94,0x7c,0x82,0xc0]
+vpshlb %xmm15, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshlb %xmm15, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x94,0x7c,0x02,0x40]
+vpshlb %xmm15, 64(%rdx,%rax), %xmm15
+
+// CHECK: vpshlb %xmm15, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x94,0x7a,0x40]
+vpshlb %xmm15, 64(%rdx), %xmm15
+
+// CHECK: vpshlb %xmm15, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x94,0x3a]
+vpshlb %xmm15, (%rdx), %xmm15
+
+// CHECK: vpshlb %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x00,0x94,0xff]
+vpshlb %xmm15, %xmm15, %xmm15
+
+// CHECK: vpshlb %xmm6, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x94,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshlb %xmm6, 485498096, %xmm6
+
+// CHECK: vpshlb %xmm6, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x94,0x74,0x82,0x40]
+vpshlb %xmm6, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshlb %xmm6, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x94,0x74,0x82,0xc0]
+vpshlb %xmm6, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshlb %xmm6, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x94,0x74,0x02,0x40]
+vpshlb %xmm6, 64(%rdx,%rax), %xmm6
+
+// CHECK: vpshlb %xmm6, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x94,0x72,0x40]
+vpshlb %xmm6, 64(%rdx), %xmm6
+
+// CHECK: vpshlb %xmm6, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x94,0x32]
+vpshlb %xmm6, (%rdx), %xmm6
+
+// CHECK: vpshlb %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x94,0xf6]
+vpshlb %xmm6, %xmm6, %xmm6
+
+// CHECK: vpshld 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x96,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshld 485498096, %xmm15, %xmm15
+
+// CHECK: vpshld 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x96,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshld 485498096, %xmm6, %xmm6
+
+// CHECK: vpshld 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x96,0x7c,0x82,0x40]
+vpshld 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshld -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x96,0x7c,0x82,0xc0]
+vpshld -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshld 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x96,0x74,0x82,0x40]
+vpshld 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshld -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x96,0x74,0x82,0xc0]
+vpshld -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshld 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x96,0x7c,0x02,0x40]
+vpshld 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpshld 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x96,0x74,0x02,0x40]
+vpshld 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpshld 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x96,0x7a,0x40]
+vpshld 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpshld 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x96,0x72,0x40]
+vpshld 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpshld (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x96,0x3a]
+vpshld (%rdx), %xmm15, %xmm15
+
+// CHECK: vpshld (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x96,0x32]
+vpshld (%rdx), %xmm6, %xmm6
+
+// CHECK: vpshld %xmm15, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x96,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshld %xmm15, 485498096, %xmm15
+
+// CHECK: vpshld %xmm15, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x96,0x7c,0x82,0x40]
+vpshld %xmm15, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshld %xmm15, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x96,0x7c,0x82,0xc0]
+vpshld %xmm15, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshld %xmm15, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x96,0x7c,0x02,0x40]
+vpshld %xmm15, 64(%rdx,%rax), %xmm15
+
+// CHECK: vpshld %xmm15, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x96,0x7a,0x40]
+vpshld %xmm15, 64(%rdx), %xmm15
+
+// CHECK: vpshld %xmm15, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x96,0x3a]
+vpshld %xmm15, (%rdx), %xmm15
+
+// CHECK: vpshld %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x00,0x96,0xff]
+vpshld %xmm15, %xmm15, %xmm15
+
+// CHECK: vpshld %xmm6, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x96,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshld %xmm6, 485498096, %xmm6
+
+// CHECK: vpshld %xmm6, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x96,0x74,0x82,0x40]
+vpshld %xmm6, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshld %xmm6, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x96,0x74,0x82,0xc0]
+vpshld %xmm6, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshld %xmm6, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x96,0x74,0x02,0x40]
+vpshld %xmm6, 64(%rdx,%rax), %xmm6
+
+// CHECK: vpshld %xmm6, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x96,0x72,0x40]
+vpshld %xmm6, 64(%rdx), %xmm6
+
+// CHECK: vpshld %xmm6, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x96,0x32]
+vpshld %xmm6, (%rdx), %xmm6
+
+// CHECK: vpshld %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x96,0xf6]
+vpshld %xmm6, %xmm6, %xmm6
+
+// CHECK: vpshlq 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x97,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshlq 485498096, %xmm15, %xmm15
+
+// CHECK: vpshlq 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x97,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshlq 485498096, %xmm6, %xmm6
+
+// CHECK: vpshlq 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x97,0x7c,0x82,0x40]
+vpshlq 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshlq -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x97,0x7c,0x82,0xc0]
+vpshlq -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshlq 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x97,0x74,0x82,0x40]
+vpshlq 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshlq -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x97,0x74,0x82,0xc0]
+vpshlq -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshlq 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x97,0x7c,0x02,0x40]
+vpshlq 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpshlq 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x97,0x74,0x02,0x40]
+vpshlq 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpshlq 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x97,0x7a,0x40]
+vpshlq 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpshlq 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x97,0x72,0x40]
+vpshlq 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpshlq (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x97,0x3a]
+vpshlq (%rdx), %xmm15, %xmm15
+
+// CHECK: vpshlq (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x97,0x32]
+vpshlq (%rdx), %xmm6, %xmm6
+
+// CHECK: vpshlq %xmm15, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x97,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshlq %xmm15, 485498096, %xmm15
+
+// CHECK: vpshlq %xmm15, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x97,0x7c,0x82,0x40]
+vpshlq %xmm15, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshlq %xmm15, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x97,0x7c,0x82,0xc0]
+vpshlq %xmm15, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshlq %xmm15, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x97,0x7c,0x02,0x40]
+vpshlq %xmm15, 64(%rdx,%rax), %xmm15
+
+// CHECK: vpshlq %xmm15, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x97,0x7a,0x40]
+vpshlq %xmm15, 64(%rdx), %xmm15
+
+// CHECK: vpshlq %xmm15, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x97,0x3a]
+vpshlq %xmm15, (%rdx), %xmm15
+
+// CHECK: vpshlq %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x00,0x97,0xff]
+vpshlq %xmm15, %xmm15, %xmm15
+
+// CHECK: vpshlq %xmm6, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x97,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshlq %xmm6, 485498096, %xmm6
+
+// CHECK: vpshlq %xmm6, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x97,0x74,0x82,0x40]
+vpshlq %xmm6, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshlq %xmm6, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x97,0x74,0x82,0xc0]
+vpshlq %xmm6, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshlq %xmm6, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x97,0x74,0x02,0x40]
+vpshlq %xmm6, 64(%rdx,%rax), %xmm6
+
+// CHECK: vpshlq %xmm6, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x97,0x72,0x40]
+vpshlq %xmm6, 64(%rdx), %xmm6
+
+// CHECK: vpshlq %xmm6, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x97,0x32]
+vpshlq %xmm6, (%rdx), %xmm6
+
+// CHECK: vpshlq %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x97,0xf6]
+vpshlq %xmm6, %xmm6, %xmm6
+
+// CHECK: vpshlw 485498096, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x95,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshlw 485498096, %xmm15, %xmm15
+
+// CHECK: vpshlw 485498096, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x95,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshlw 485498096, %xmm6, %xmm6
+
+// CHECK: vpshlw 64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x95,0x7c,0x82,0x40]
+vpshlw 64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshlw -64(%rdx,%rax,4), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x95,0x7c,0x82,0xc0]
+vpshlw -64(%rdx,%rax,4), %xmm15, %xmm15
+
+// CHECK: vpshlw 64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x95,0x74,0x82,0x40]
+vpshlw 64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshlw -64(%rdx,%rax,4), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x95,0x74,0x82,0xc0]
+vpshlw -64(%rdx,%rax,4), %xmm6, %xmm6
+
+// CHECK: vpshlw 64(%rdx,%rax), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x95,0x7c,0x02,0x40]
+vpshlw 64(%rdx,%rax), %xmm15, %xmm15
+
+// CHECK: vpshlw 64(%rdx,%rax), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x95,0x74,0x02,0x40]
+vpshlw 64(%rdx,%rax), %xmm6, %xmm6
+
+// CHECK: vpshlw 64(%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x95,0x7a,0x40]
+vpshlw 64(%rdx), %xmm15, %xmm15
+
+// CHECK: vpshlw 64(%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x95,0x72,0x40]
+vpshlw 64(%rdx), %xmm6, %xmm6
+
+// CHECK: vpshlw (%rdx), %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x80,0x95,0x3a]
+vpshlw (%rdx), %xmm15, %xmm15
+
+// CHECK: vpshlw (%rdx), %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0xc8,0x95,0x32]
+vpshlw (%rdx), %xmm6, %xmm6
+
+// CHECK: vpshlw %xmm15, 485498096, %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x95,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshlw %xmm15, 485498096, %xmm15
+
+// CHECK: vpshlw %xmm15, 64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x95,0x7c,0x82,0x40]
+vpshlw %xmm15, 64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshlw %xmm15, -64(%rdx,%rax,4), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x95,0x7c,0x82,0xc0]
+vpshlw %xmm15, -64(%rdx,%rax,4), %xmm15
+
+// CHECK: vpshlw %xmm15, 64(%rdx,%rax), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x95,0x7c,0x02,0x40]
+vpshlw %xmm15, 64(%rdx,%rax), %xmm15
+
+// CHECK: vpshlw %xmm15, 64(%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x95,0x7a,0x40]
+vpshlw %xmm15, 64(%rdx), %xmm15
+
+// CHECK: vpshlw %xmm15, (%rdx), %xmm15
+// CHECK: encoding: [0x8f,0x69,0x00,0x95,0x3a]
+vpshlw %xmm15, (%rdx), %xmm15
+
+// CHECK: vpshlw %xmm15, %xmm15, %xmm15
+// CHECK: encoding: [0x8f,0x49,0x00,0x95,0xff]
+vpshlw %xmm15, %xmm15, %xmm15
+
+// CHECK: vpshlw %xmm6, 485498096, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x95,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
+vpshlw %xmm6, 485498096, %xmm6
+
+// CHECK: vpshlw %xmm6, 64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x95,0x74,0x82,0x40]
+vpshlw %xmm6, 64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshlw %xmm6, -64(%rdx,%rax,4), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x95,0x74,0x82,0xc0]
+vpshlw %xmm6, -64(%rdx,%rax,4), %xmm6
+
+// CHECK: vpshlw %xmm6, 64(%rdx,%rax), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x95,0x74,0x02,0x40]
+vpshlw %xmm6, 64(%rdx,%rax), %xmm6
+
+// CHECK: vpshlw %xmm6, 64(%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x95,0x72,0x40]
+vpshlw %xmm6, 64(%rdx), %xmm6
+
+// CHECK: vpshlw %xmm6, (%rdx), %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x95,0x32]
+vpshlw %xmm6, (%rdx), %xmm6
+
+// CHECK: vpshlw %xmm6, %xmm6, %xmm6
+// CHECK: encoding: [0x8f,0xe9,0x48,0x95,0xf6]
+vpshlw %xmm6, %xmm6, %xmm6
+
diff --git a/test/MC/X86/compact-unwind.s b/test/MC/X86/compact-unwind.s
index 70fc018..03138c4 100644
--- a/test/MC/X86/compact-unwind.s
+++ b/test/MC/X86/compact-unwind.s
@@ -64,6 +64,24 @@
 	retq
 	.cfi_endproc
 
+# Check that a adjustment through a push %rax is the same as a sub.
+
+# CHECK:   Entry at offset 0x40:
+# CHECK-NEXT:     start:                0x2a _testrax
+# CHECK-NEXT:     length:               0x5
+# CHECK-NEXT:     compact encoding:     0x02020000
+	.globl	_testrax
+_testrax:                               ## @testrax
+	.cfi_startproc
+## %bb.0:                               ## %entry
+	pushq	%rax
+Ltmp15:
+	.cfi_def_cfa_offset 16
+	xorl	%eax, %eax
+	popq	%rax
+	retq
+	.cfi_endproc
+
 	.section	__TEXT,__cstring,cstring_literals
 L_.str:                                 ## @.str
 	.asciz	"%d\n"
diff --git a/test/MC/X86/disassemble-zeroes.s b/test/MC/X86/disassemble-zeroes.s
new file mode 100644
index 0000000..2ecfdad
--- /dev/null
+++ b/test/MC/X86/disassemble-zeroes.s
@@ -0,0 +1,81 @@
+// RUN: llvm-mc -filetype=obj -triple x86_64-unknown-unknown %s -o %t
+// RUN: llvm-objdump -d %t | FileCheck %s --check-prefix=NODISASM
+
+// The exact rules of skipping the bytes you can find in the code.
+// This test checks that we follow these rules and can force
+// dissasembly of zero blocks with the -z and --disassemble-zeroes options.
+
+// NODISASM:      Disassembly of section .text:
+// NODISASM-NEXT:  0000000000000000 main:
+// NODISASM-NEXT:   0:  00 00               addb %al, (%rax)
+// NODISASM-NEXT:   2:  00 00               addb %al, (%rax)
+// NODISASM-NEXT:   4:  00 00               addb %al, (%rax)
+// NODISASM-NEXT:   6:  00 90 00 00 00 00   addb %dl, (%rax)
+// NODISASM-NEXT:       ...
+// NODISASM-NEXT:   20: 90                  nop
+// NODISASM-NEXT:       ...
+// NODISASM:      0000000000000031 foo:
+// NODISASM-NEXT:   31: 00 00               addb %al, (%rax)
+// NODISASM-NEXT:   33: 00 00               addb %al, (%rax)
+// NODISASM:      0000000000000035 bar:
+// NODISASM-NEXT:       ...
+
+// Check that with -z we disassemble blocks of zeroes.
+// RUN: llvm-objdump -d -z %t | FileCheck %s --check-prefix=DISASM
+
+// DISASM: Disassembly of section .text:
+// DISASM-NEXT: 0000000000000000 main:
+// DISASM-NEXT:   0: 00 00              addb %al, (%rax)
+// DISASM-NEXT:   2: 00 00              addb %al, (%rax)
+// DISASM-NEXT:   4: 00 00              addb %al, (%rax)
+// DISASM-NEXT:   6: 00 90 00 00 00 00  addb %dl, (%rax)
+// DISASM-NEXT:   c: 00 00              addb %al, (%rax)
+// DISASM-NEXT:   e: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  10: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  12: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  14: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  16: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  18: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  1a: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  1c: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  1e: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  20: 90                 nop
+// DISASM-NEXT:  21: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  23: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  25: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  27: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  29: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  2b: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  2d: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  2f: 00 00              addb %al, (%rax)
+// DISASM:      0000000000000031 foo:
+// DISASM-NEXT:  31: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  33: 00 00              addb %al, (%rax)
+// DISASM:      0000000000000035 bar:
+// DISASM-NEXT:  35: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  37: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  39: 00 00              addb %al, (%rax)
+// DISASM-NEXT:  3b: 00 00              addb %al, (%rax)
+
+// Check that --disassemble-zeroes work as alias for -z.
+// RUN: llvm-objdump -d --disassemble-zeroes %t | FileCheck %s --check-prefix=DISASM
+
+.text
+.globl main
+.type main, @function
+main:
+ .long 0
+ .byte 0
+ .byte 0
+ .byte 0
+ nop
+ .quad 0
+ .quad 0
+ .quad 0
+ nop
+ .quad 0
+ .quad 0
+foo:
+ .long 0
+bar:
+ .quad 0
diff --git a/test/MC/X86/stackmap-nops.ll b/test/MC/X86/stackmap-nops.ll
index 33ef862..97cef8b 100644
--- a/test/MC/X86/stackmap-nops.ll
+++ b/test/MC/X86/stackmap-nops.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim -filetype=obj %s -o - | llvm-objdump -d - | FileCheck %s
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim -filetype=asm %s -o - | llvm-mc -triple=x86_64-apple-darwin -mcpu=corei7 -filetype=obj - | llvm-objdump -d - | FileCheck %s
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=corei7 -frame-pointer=all -filetype=obj %s -o - | llvm-objdump -d - | FileCheck %s
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=corei7 -frame-pointer=all -filetype=asm %s -o - | llvm-mc -triple=x86_64-apple-darwin -mcpu=corei7 -filetype=obj - | llvm-objdump -d - | FileCheck %s
 
 define void @nop_test() {
 entry:
diff --git a/test/MC/X86/x86_errors.s b/test/MC/X86/x86_errors.s
index 1fe0a58..d999591 100644
--- a/test/MC/X86/x86_errors.s
+++ b/test/MC/X86/x86_errors.s
@@ -118,3 +118,43 @@
 // 32: error: register %riz is only available in 64-bit mode
 // 64: error: base register is 32-bit, but index register is not
 mov (%eax,%riz), %ebx
+
+
+// Parse errors from assembler parsing. 
+
+v_ecx = %ecx
+v_eax = %eax
+v_gs  = %gs
+v_imm = 4
+$test = %ebx
+
+// 32: 7: error: expected register here
+// 64: 7: error: expected register here
+mov 4(4), %eax	
+
+// 32: 7: error: expected register here
+// 64: 7: error: expected register here
+mov 5(v_imm), %eax		
+	
+// 32: 7: error: invalid register name
+// 64: 7: error: invalid register name
+mov 6(%v_imm), %eax		
+	
+// 32: 8: warning: scale factor without index register is ignored
+// 64: 8: warning: scale factor without index register is ignored
+mov 7(,v_imm), %eax		
+
+// 64: 6: error: expected immediate expression
+mov $%eax, %ecx
+
+// 32: 6: error: expected immediate expression
+// 64: 6: error: expected immediate expression
+mov $v_eax, %ecx
+
+// 32: error: unexpected token in argument list
+// 64: error: unexpected token in argument list
+mov v_ecx(%eax), %ecx	
+
+// 32: 7: error: invalid operand for instruction
+// 64: 7: error: invalid operand for instruction
+addb (%dx), %al
diff --git a/test/MC/X86/x86_operands.s b/test/MC/X86/x86_operands.s
index 2258a95..3aa1b8d 100644
--- a/test/MC/X86/x86_operands.s
+++ b/test/MC/X86/x86_operands.s
@@ -61,3 +61,406 @@
 # CHECK: movl	%gs:8, %eax
 movl %gs:8, %eax
 
+# Make sure we handle parsing uses of variables assigned
+# to registers in operands.
+
+v_ecx = %ecx
+v_eax = %eax
+v_gs  = %gs
+v_imm = 4
+
+#CHECK:	movl %eax, %ecx
+	movl %eax, v_ecx
+
+#CHECK: movl $1, %gs:0
+	movl $1, v_gs:(,)
+#CHECK: movl $1, %gs:(,%eax)
+	movl $1, v_gs:(,%eax)
+#CHECK: movl $1, %gs:(,%eax,2)
+	movl $1, v_gs:(,%eax,2)
+#CHECK: movl $1, %gs:(,%eax,4)
+	movl $1, v_gs:(,%eax,v_imm)
+#CHECK: movl $1, %gs:(,%eax)
+	movl $1, v_gs:(,v_eax)
+#CHECK: movl $1, %gs:(,%eax,2)
+	movl $1, v_gs:(,v_eax,2)
+#CHECK: movl $1, %gs:(,%eax,4)
+	movl $1, v_gs:(,v_eax,v_imm)
+#CHECK: movl $1, %gs:(%ecx)
+	movl $1, v_gs:(%ecx)
+#CHECK: movl $1, %gs:(%ecx)
+	movl $1, v_gs:(%ecx,)
+#CHECK: movl $1, %gs:(%ecx,%eax)
+	movl $1, v_gs:(%ecx,%eax)
+#CHECK: movl $1, %gs:(%ecx,%eax,2)
+	movl $1, v_gs:(%ecx,%eax,2)
+#CHECK: movl $1, %gs:(%ecx,%eax,4)
+	movl $1, v_gs:(%ecx,%eax,v_imm)
+#CHECK: movl $1, %gs:(%ecx,%eax)
+	movl $1, v_gs:(%ecx,v_eax)
+#CHECK: movl $1, %gs:(%ecx,%eax,2)
+	movl $1, v_gs:(%ecx,v_eax,2)
+#CHECK: movl $1, %gs:(%ecx,%eax,4)
+	movl $1, v_gs:(%ecx,v_eax,v_imm)
+#CHECK: movl $1, %gs:(%ecx)
+	movl $1, v_gs:(v_ecx)
+#CHECK: movl $1, %gs:(%ecx)
+	movl $1, v_gs:(v_ecx,)
+#CHECK: movl $1, %gs:(%ecx,%eax)
+	movl $1, v_gs:(v_ecx,%eax)
+#CHECK: movl $1, %gs:(%ecx,%eax,2)
+	movl $1, v_gs:(v_ecx,%eax,2)
+#CHECK: movl $1, %gs:(%ecx,%eax,4)
+	movl $1, v_gs:(v_ecx,%eax,v_imm)
+#CHECK: movl $1, %gs:(%ecx,%eax)
+	movl $1, v_gs:(v_ecx,v_eax)
+#CHECK: movl $1, %gs:(%ecx,%eax,2)
+	movl $1, v_gs:(v_ecx,v_eax,2)
+#CHECK: movl $1, %gs:(%ecx,%eax,4)
+	movl $1, v_gs:(v_ecx,v_eax,v_imm)
+#CHECK: movl $1, %gs:4
+	movl $1, v_gs:4
+#CHECK: movl $1, %gs:4
+	movl $1, v_gs:4()
+#CHECK: movl $1, %gs:4
+	movl $1, v_gs:4(,)
+#CHECK: movl $1, %gs:4(,%eax)
+	movl $1, v_gs:4(,%eax)
+#CHECK: movl $1, %gs:4(,%eax,2)
+	movl $1, v_gs:4(,%eax,2)
+#CHECK: movl $1, %gs:4(,%eax,4)
+	movl $1, v_gs:4(,%eax,v_imm)
+#CHECK: movl $1, %gs:4(,%eax)
+	movl $1, v_gs:4(,v_eax)
+#CHECK: movl $1, %gs:4(,%eax,2)
+	movl $1, v_gs:4(,v_eax,2)
+#CHECK: movl $1, %gs:4(,%eax,4)
+	movl $1, v_gs:4(,v_eax,v_imm)
+#CHECK: movl $1, %gs:4(%ecx)
+	movl $1, v_gs:4(%ecx)
+#CHECK: movl $1, %gs:4(%ecx)
+	movl $1, v_gs:4(%ecx,)
+#CHECK: movl $1, %gs:4(%ecx,%eax)
+	movl $1, v_gs:4(%ecx,%eax)
+#CHECK: movl $1, %gs:4(%ecx,%eax,2)
+	movl $1, v_gs:4(%ecx,%eax,2)
+#CHECK: movl $1, %gs:4(%ecx,%eax,4)
+	movl $1, v_gs:4(%ecx,%eax,v_imm)
+#CHECK: movl $1, %gs:4(%ecx,%eax)
+	movl $1, v_gs:4(%ecx,v_eax)
+#CHECK: movl $1, %gs:4(%ecx,%eax,2)
+	movl $1, v_gs:4(%ecx,v_eax,2)
+#CHECK: movl $1, %gs:4(%ecx,%eax,4)
+	movl $1, v_gs:4(%ecx,v_eax,v_imm)
+#CHECK: movl $1, %gs:4(%ecx)
+	movl $1, v_gs:4(v_ecx)
+#CHECK: movl $1, %gs:4(%ecx)
+	movl $1, v_gs:4(v_ecx,)
+#CHECK: movl $1, %gs:4(%ecx,%eax)
+	movl $1, v_gs:4(v_ecx,%eax)
+#CHECK: movl $1, %gs:4(%ecx,%eax,2)
+	movl $1, v_gs:4(v_ecx,%eax,2)
+#CHECK: movl $1, %gs:4(%ecx,%eax,4)
+	movl $1, v_gs:4(v_ecx,%eax,v_imm)
+#CHECK: movl $1, %gs:4(%ecx,%eax)
+	movl $1, v_gs:4(v_ecx,v_eax)
+#CHECK: movl $1, %gs:4(%ecx,%eax,2)
+	movl $1, v_gs:4(v_ecx,v_eax,2)
+#CHECK: movl $1, %gs:4(%ecx,%eax,4)
+	movl $1, v_gs:4(v_ecx,v_eax,v_imm)
+#CHECK: movl $1, %gs:4
+	movl $1, v_gs:v_imm
+#CHECK: movl $1, %gs:4
+	movl $1, v_gs:v_imm()
+#CHECK: movl $1, %gs:4
+	movl $1, v_gs:v_imm(,)
+#CHECK: movl $1, %gs:4(,%eax)
+	movl $1, v_gs:v_imm(,%eax)
+#CHECK: movl $1, %gs:4(,%eax,2)
+	movl $1, v_gs:v_imm(,%eax,2)
+#CHECK: movl $1, %gs:4(,%eax,4)
+	movl $1, v_gs:v_imm(,%eax,v_imm)
+#CHECK: movl $1, %gs:4(,%eax)
+	movl $1, v_gs:v_imm(,v_eax)
+#CHECK: movl $1, %gs:4(,%eax,2)
+	movl $1, v_gs:v_imm(,v_eax,2)
+#CHECK: movl $1, %gs:4(,%eax,4)
+	movl $1, v_gs:v_imm(,v_eax,v_imm)
+#CHECK: movl $1, %gs:4(%ecx)
+	movl $1, v_gs:v_imm(%ecx)
+#CHECK: movl $1, %gs:4(%ecx)
+	movl $1, v_gs:v_imm(%ecx,)
+#CHECK: movl $1, %gs:4(%ecx,%eax)
+	movl $1, v_gs:v_imm(%ecx,%eax)
+#CHECK: movl $1, %gs:4(%ecx,%eax,2)
+	movl $1, v_gs:v_imm(%ecx,%eax,2)
+#CHECK: movl $1, %gs:4(%ecx,%eax,4)
+	movl $1, v_gs:v_imm(%ecx,%eax,v_imm)
+#CHECK: movl $1, %gs:4(%ecx,%eax)
+	movl $1, v_gs:v_imm(%ecx,v_eax)
+#CHECK: movl $1, %gs:4(%ecx,%eax,2)
+	movl $1, v_gs:v_imm(%ecx,v_eax,2)
+#CHECK: movl $1, %gs:4(%ecx,%eax,4)
+	movl $1, v_gs:v_imm(%ecx,v_eax,v_imm)
+#CHECK: movl $1, %gs:4(%ecx)
+	movl $1, v_gs:v_imm(v_ecx)
+#CHECK: movl $1, %gs:4(%ecx)
+	movl $1, v_gs:v_imm(v_ecx,)
+#CHECK: movl $1, %gs:4(%ecx,%eax)
+	movl $1, v_gs:v_imm(v_ecx,%eax)
+#CHECK: movl $1, %gs:4(%ecx,%eax,2)
+	movl $1, v_gs:v_imm(v_ecx,%eax,2)
+#CHECK: movl $1, %gs:4(%ecx,%eax,4)
+	movl $1, v_gs:v_imm(v_ecx,%eax,v_imm)
+#CHECK: movl $1, %gs:4(%ecx,%eax)
+	movl $1, v_gs:v_imm(v_ecx,v_eax)
+#CHECK: movl $1, %gs:4(%ecx,%eax,2)
+	movl $1, v_gs:v_imm(v_ecx,v_eax,2)
+#CHECK: movl $1, %gs:4(%ecx,%eax,4)
+	movl $1, v_gs:v_imm(v_ecx,v_eax,v_imm)
+#CHECK: movl $1, %gs:8
+	movl $1, v_gs:(v_imm+4)
+#CHECK: movl $1, %gs:8
+	movl $1, v_gs:(v_imm+4)()
+#CHECK: movl $1, %gs:8
+	movl $1, v_gs:(v_imm+4)(,)
+#CHECK: movl $1, %gs:8(,%eax)
+	movl $1, v_gs:(v_imm+4)(,%eax)
+#CHECK: movl $1, %gs:8(,%eax,2)
+	movl $1, v_gs:(v_imm+4)(,%eax,2)
+#CHECK: movl $1, %gs:8(,%eax,4)
+	movl $1, v_gs:(v_imm+4)(,%eax,v_imm)
+#CHECK: movl $1, %gs:8(,%eax)
+	movl $1, v_gs:(v_imm+4)(,v_eax)
+#CHECK: movl $1, %gs:8(,%eax,2)
+	movl $1, v_gs:(v_imm+4)(,v_eax,2)
+#CHECK: movl $1, %gs:8(,%eax,4)
+	movl $1, v_gs:(v_imm+4)(,v_eax,v_imm)
+#CHECK: movl $1, %gs:8(%ecx)
+	movl $1, v_gs:(v_imm+4)(%ecx)
+#CHECK: movl $1, %gs:8(%ecx)
+	movl $1, v_gs:(v_imm+4)(%ecx,)
+#CHECK: movl $1, %gs:8(%ecx,%eax)
+	movl $1, v_gs:(v_imm+4)(%ecx,%eax)
+#CHECK: movl $1, %gs:8(%ecx,%eax,2)
+	movl $1, v_gs:(v_imm+4)(%ecx,%eax,2)
+#CHECK: movl $1, %gs:8(%ecx,%eax,4)
+	movl $1, v_gs:(v_imm+4)(%ecx,%eax,v_imm)
+#CHECK: movl $1, %gs:8(%ecx,%eax)
+	movl $1, v_gs:(v_imm+4)(%ecx,v_eax)
+#CHECK: movl $1, %gs:8(%ecx,%eax,2)
+	movl $1, v_gs:(v_imm+4)(%ecx,v_eax,2)
+#CHECK: movl $1, %gs:8(%ecx,%eax,4)
+	movl $1, v_gs:(v_imm+4)(%ecx,v_eax,v_imm)
+#CHECK: movl $1, %gs:8(%ecx)
+	movl $1, v_gs:(v_imm+4)(v_ecx)
+#CHECK: movl $1, %gs:8(%ecx)
+	movl $1, v_gs:(v_imm+4)(v_ecx,)
+#CHECK: movl $1, %gs:8(%ecx,%eax)
+	movl $1, v_gs:(v_imm+4)(v_ecx,%eax)
+#CHECK: movl $1, %gs:8(%ecx,%eax,2)
+	movl $1, v_gs:(v_imm+4)(v_ecx,%eax,2)
+#CHECK: movl $1, %gs:8(%ecx,%eax,4)
+	movl $1, v_gs:(v_imm+4)(v_ecx,%eax,v_imm)
+#CHECK: movl $1, %gs:8(%ecx,%eax)
+	movl $1, v_gs:(v_imm+4)(v_ecx,v_eax)
+#CHECK: movl $1, %gs:8(%ecx,%eax,2)
+	movl $1, v_gs:(v_imm+4)(v_ecx,v_eax,2)
+#CHECK: movl $1, %gs:8(%ecx,%eax,4)
+	movl $1, v_gs:(v_imm+4)(v_ecx,v_eax,v_imm)
+#CHECK: movl $1, %fs:0
+	movl $1, %fs:(,)
+#CHECK: movl $1, %fs:(,%eax)
+	movl $1, %fs:(,%eax)
+#CHECK: movl $1, %fs:(,%eax,2)
+	movl $1, %fs:(,%eax,2)
+#CHECK: movl $1, %fs:(,%eax,4)
+	movl $1, %fs:(,%eax,v_imm)
+#CHECK: movl $1, %fs:(,%eax)
+	movl $1, %fs:(,v_eax)
+#CHECK: movl $1, %fs:(,%eax,2)
+	movl $1, %fs:(,v_eax,2)
+#CHECK: movl $1, %fs:(,%eax,4)
+	movl $1, %fs:(,v_eax,v_imm)
+#CHECK: movl $1, %fs:(%ecx)
+	movl $1, %fs:(%ecx)
+#CHECK: movl $1, %fs:(%ecx)
+	movl $1, %fs:(%ecx,)
+#CHECK: movl $1, %fs:(%ecx,%eax)
+	movl $1, %fs:(%ecx,%eax)
+#CHECK: movl $1, %fs:(%ecx,%eax,2)
+	movl $1, %fs:(%ecx,%eax,2)
+#CHECK: movl $1, %fs:(%ecx,%eax,4)
+	movl $1, %fs:(%ecx,%eax,v_imm)
+#CHECK: movl $1, %fs:(%ecx,%eax)
+	movl $1, %fs:(%ecx,v_eax)
+#CHECK: movl $1, %fs:(%ecx,%eax,2)
+	movl $1, %fs:(%ecx,v_eax,2)
+#CHECK: movl $1, %fs:(%ecx,%eax,4)
+	movl $1, %fs:(%ecx,v_eax,v_imm)
+#CHECK: movl $1, %fs:(%ecx)
+	movl $1, %fs:(v_ecx)
+#CHECK: movl $1, %fs:(%ecx)
+	movl $1, %fs:(v_ecx,)
+#CHECK: movl $1, %fs:(%ecx,%eax)
+	movl $1, %fs:(v_ecx,%eax)
+#CHECK: movl $1, %fs:(%ecx,%eax,2)
+	movl $1, %fs:(v_ecx,%eax,2)
+#CHECK: movl $1, %fs:(%ecx,%eax,4)
+	movl $1, %fs:(v_ecx,%eax,v_imm)
+#CHECK: movl $1, %fs:(%ecx,%eax)
+	movl $1, %fs:(v_ecx,v_eax)
+#CHECK: movl $1, %fs:(%ecx,%eax,2)
+	movl $1, %fs:(v_ecx,v_eax,2)
+#CHECK: movl $1, %fs:(%ecx,%eax,4)
+	movl $1, %fs:(v_ecx,v_eax,v_imm)
+#CHECK: movl $1, %fs:4
+	movl $1, %fs:4
+#CHECK: movl $1, %fs:4
+	movl $1, %fs:4()
+#CHECK: movl $1, %fs:4
+	movl $1, %fs:4(,)
+#CHECK: movl $1, %fs:4(,%eax)
+	movl $1, %fs:4(,%eax)
+#CHECK: movl $1, %fs:4(,%eax,2)
+	movl $1, %fs:4(,%eax,2)
+#CHECK: movl $1, %fs:4(,%eax,4)
+	movl $1, %fs:4(,%eax,v_imm)
+#CHECK: movl $1, %fs:4(,%eax)
+	movl $1, %fs:4(,v_eax)
+#CHECK: movl $1, %fs:4(,%eax,2)
+	movl $1, %fs:4(,v_eax,2)
+#CHECK: movl $1, %fs:4(,%eax,4)
+	movl $1, %fs:4(,v_eax,v_imm)
+#CHECK: movl $1, %fs:4(%ecx)
+	movl $1, %fs:4(%ecx)
+#CHECK: movl $1, %fs:4(%ecx)
+	movl $1, %fs:4(%ecx,)
+#CHECK: movl $1, %fs:4(%ecx,%eax)
+	movl $1, %fs:4(%ecx,%eax)
+#CHECK: movl $1, %fs:4(%ecx,%eax,2)
+	movl $1, %fs:4(%ecx,%eax,2)
+#CHECK: movl $1, %fs:4(%ecx,%eax,4)
+	movl $1, %fs:4(%ecx,%eax,v_imm)
+#CHECK: movl $1, %fs:4(%ecx,%eax)
+	movl $1, %fs:4(%ecx,v_eax)
+#CHECK: movl $1, %fs:4(%ecx,%eax,2)
+	movl $1, %fs:4(%ecx,v_eax,2)
+#CHECK: movl $1, %fs:4(%ecx,%eax,4)
+	movl $1, %fs:4(%ecx,v_eax,v_imm)
+#CHECK: movl $1, %fs:4(%ecx)
+	movl $1, %fs:4(v_ecx)
+#CHECK: movl $1, %fs:4(%ecx)
+	movl $1, %fs:4(v_ecx,)
+#CHECK: movl $1, %fs:4(%ecx,%eax)
+	movl $1, %fs:4(v_ecx,%eax)
+#CHECK: movl $1, %fs:4(%ecx,%eax,2)
+	movl $1, %fs:4(v_ecx,%eax,2)
+#CHECK: movl $1, %fs:4(%ecx,%eax,4)
+	movl $1, %fs:4(v_ecx,%eax,v_imm)
+#CHECK: movl $1, %fs:4(%ecx,%eax)
+	movl $1, %fs:4(v_ecx,v_eax)
+#CHECK: movl $1, %fs:4(%ecx,%eax,2)
+	movl $1, %fs:4(v_ecx,v_eax,2)
+#CHECK: movl $1, %fs:4(%ecx,%eax,4)
+	movl $1, %fs:4(v_ecx,v_eax,v_imm)
+#CHECK: movl $1, %fs:4
+	movl $1, %fs:v_imm
+#CHECK: movl $1, %fs:4
+	movl $1, %fs:v_imm()
+#CHECK: movl $1, %fs:4
+	movl $1, %fs:v_imm(,)
+#CHECK: movl $1, %fs:4(,%eax)
+	movl $1, %fs:v_imm(,%eax)
+#CHECK: movl $1, %fs:4(,%eax,2)
+	movl $1, %fs:v_imm(,%eax,2)
+#CHECK: movl $1, %fs:4(,%eax,4)
+	movl $1, %fs:v_imm(,%eax,v_imm)
+#CHECK: movl $1, %fs:4(,%eax)
+	movl $1, %fs:v_imm(,v_eax)
+#CHECK: movl $1, %fs:4(,%eax,2)
+	movl $1, %fs:v_imm(,v_eax,2)
+#CHECK: movl $1, %fs:4(,%eax,4)
+	movl $1, %fs:v_imm(,v_eax,v_imm)
+#CHECK: movl $1, %fs:4(%ecx)
+	movl $1, %fs:v_imm(%ecx)
+#CHECK: movl $1, %fs:4(%ecx)
+	movl $1, %fs:v_imm(%ecx,)
+#CHECK: movl $1, %fs:4(%ecx,%eax)
+	movl $1, %fs:v_imm(%ecx,%eax)
+#CHECK: movl $1, %fs:4(%ecx,%eax,2)
+	movl $1, %fs:v_imm(%ecx,%eax,2)
+#CHECK: movl $1, %fs:4(%ecx,%eax,4)
+	movl $1, %fs:v_imm(%ecx,%eax,v_imm)
+#CHECK: movl $1, %fs:4(%ecx,%eax)
+	movl $1, %fs:v_imm(%ecx,v_eax)
+#CHECK: movl $1, %fs:4(%ecx,%eax,2)
+	movl $1, %fs:v_imm(%ecx,v_eax,2)
+#CHECK: movl $1, %fs:4(%ecx,%eax,4)
+	movl $1, %fs:v_imm(%ecx,v_eax,v_imm)
+#CHECK: movl $1, %fs:4(%ecx)
+	movl $1, %fs:v_imm(v_ecx)
+#CHECK: movl $1, %fs:4(%ecx)
+	movl $1, %fs:v_imm(v_ecx,)
+#CHECK: movl $1, %fs:4(%ecx,%eax)
+	movl $1, %fs:v_imm(v_ecx,%eax)
+#CHECK: movl $1, %fs:4(%ecx,%eax,2)
+	movl $1, %fs:v_imm(v_ecx,%eax,2)
+#CHECK: movl $1, %fs:4(%ecx,%eax,4)
+	movl $1, %fs:v_imm(v_ecx,%eax,v_imm)
+#CHECK: movl $1, %fs:4(%ecx,%eax)
+	movl $1, %fs:v_imm(v_ecx,v_eax)
+#CHECK: movl $1, %fs:4(%ecx,%eax,2)
+	movl $1, %fs:v_imm(v_ecx,v_eax,2)
+#CHECK: movl $1, %fs:4(%ecx,%eax,4)
+	movl $1, %fs:v_imm(v_ecx,v_eax,v_imm)
+#CHECK: movl $1, %fs:8
+	movl $1, %fs:(v_imm+4)
+#CHECK: movl $1, %fs:8
+	movl $1, %fs:(v_imm+4)()
+#CHECK: movl $1, %fs:8
+	movl $1, %fs:(v_imm+4)(,)
+#CHECK: movl $1, %fs:8(,%eax)
+	movl $1, %fs:(v_imm+4)(,%eax)
+#CHECK: movl $1, %fs:8(,%eax,2)
+	movl $1, %fs:(v_imm+4)(,%eax,2)
+#CHECK: movl $1, %fs:8(,%eax,4)
+	movl $1, %fs:(v_imm+4)(,%eax,v_imm)
+#CHECK: movl $1, %fs:8(,%eax)
+	movl $1, %fs:(v_imm+4)(,v_eax)
+#CHECK: movl $1, %fs:8(,%eax,2)
+	movl $1, %fs:(v_imm+4)(,v_eax,2)
+#CHECK: movl $1, %fs:8(,%eax,4)
+	movl $1, %fs:(v_imm+4)(,v_eax,v_imm)
+#CHECK: movl $1, %fs:8(%ecx)
+	movl $1, %fs:(v_imm+4)(%ecx)
+#CHECK: movl $1, %fs:8(%ecx)
+	movl $1, %fs:(v_imm+4)(%ecx,)
+#CHECK: movl $1, %fs:8(%ecx,%eax)
+	movl $1, %fs:(v_imm+4)(%ecx,%eax)
+#CHECK: movl $1, %fs:8(%ecx,%eax,2)
+	movl $1, %fs:(v_imm+4)(%ecx,%eax,2)
+#CHECK: movl $1, %fs:8(%ecx,%eax,4)
+	movl $1, %fs:(v_imm+4)(%ecx,%eax,v_imm)
+#CHECK: movl $1, %fs:8(%ecx,%eax)
+	movl $1, %fs:(v_imm+4)(%ecx,v_eax)
+#CHECK: movl $1, %fs:8(%ecx,%eax,2)
+	movl $1, %fs:(v_imm+4)(%ecx,v_eax,2)
+#CHECK: movl $1, %fs:8(%ecx,%eax,4)
+	movl $1, %fs:(v_imm+4)(%ecx,v_eax,v_imm)
+#CHECK: movl $1, %fs:8(%ecx)
+	movl $1, %fs:(v_imm+4)(v_ecx)
+#CHECK: movl $1, %fs:8(%ecx)
+	movl $1, %fs:(v_imm+4)(v_ecx,)
+#CHECK: movl $1, %fs:8(%ecx,%eax)
+	movl $1, %fs:(v_imm+4)(v_ecx,%eax)
+#CHECK: movl $1, %fs:8(%ecx,%eax,2)
+	movl $1, %fs:(v_imm+4)(v_ecx,%eax,2)
+#CHECK: movl $1, %fs:8(%ecx,%eax,4)
+	movl $1, %fs:(v_imm+4)(v_ecx,%eax,v_imm)
+#CHECK: movl $1, %fs:8(%ecx,%eax)
+	movl $1, %fs:(v_imm+4)(v_ecx,v_eax)
+#CHECK: movl $1, %fs:8(%ecx,%eax,2)
+	movl $1, %fs:(v_imm+4)(v_ecx,v_eax,2)
+#CHECK: movl $1, %fs:8(%ecx,%eax,4)
+	movl $1, %fs:(v_imm+4)(v_ecx,v_eax,v_imm)
diff --git a/test/Object/Inputs/WASM/invalid-section-order.wasm b/test/Object/Inputs/WASM/invalid-section-order.wasm
new file mode 100644
index 0000000..f2726ff
--- /dev/null
+++ b/test/Object/Inputs/WASM/invalid-section-order.wasm
Binary files differ
diff --git a/test/Object/Inputs/trivial-object-test.wasm b/test/Object/Inputs/trivial-object-test.wasm
index 8652d67..a894522 100644
--- a/test/Object/Inputs/trivial-object-test.wasm
+++ b/test/Object/Inputs/trivial-object-test.wasm
Binary files differ
diff --git a/test/Object/ar-create.test b/test/Object/ar-create.test
index 95d994e..4b08c97 100644
--- a/test/Object/ar-create.test
+++ b/test/Object/ar-create.test
@@ -13,5 +13,5 @@
 RUN: llvm-ar r %t.foo.a %t 2>&1 | FileCheck --check-prefix=CREATE %s
 RUN: rm -f %t.foo.a
 
-CHECK: llvm-ar{{(.exe|.EXE)?}}: error loading '{{[^']+}}.foo.a':
+CHECK: llvm-ar{{(.exe|.EXE)?}}: error: error loading '{{[^']+}}.foo.a':
 CREATE: creating {{.*}}.foo.a
diff --git a/test/Object/ar-error.test b/test/Object/ar-error.test
index 7add9b4..d212555 100644
--- a/test/Object/ar-error.test
+++ b/test/Object/ar-error.test
@@ -3,4 +3,4 @@
 RUN: not llvm-ar r %t.out.a sparkle.o %t 2>&1 | FileCheck %s
 
 # Don't check the message "No such file or directory".
-CHECK: llvm-ar{{(.exe|.EXE)?}}: sparkle.o:
+CHECK: llvm-ar{{(.exe|.EXE)?}}: error: sparkle.o:
diff --git a/test/Object/archive-extract.test b/test/Object/archive-extract.test
index 85d853d..916ccc2 100644
--- a/test/Object/archive-extract.test
+++ b/test/Object/archive-extract.test
@@ -49,7 +49,7 @@
 CHECK-DARWIN: 1472 Nov 19 03:01 2004 very_long_bytecode_file_name.bc
 
 RUN: not llvm-ar x %p/Inputs/GNU.a foo.o 2>&1 | FileCheck --check-prefix=NOTFOUND %s
-NOTFOUND: foo.o was not found
+NOTFOUND: error: 'foo.o' was not found
 
 RUN: not llvm-ar x %p/Inputs/thin.a foo.o 2>&1 | FileCheck %s --check-prefix=THINEXTRACT
 THINEXTRACT: extracting from a thin archive is not supported
diff --git a/test/Object/archive-format.test b/test/Object/archive-format.test
index b1ae411..b555bf8 100644
--- a/test/Object/archive-format.test
+++ b/test/Object/archive-format.test
@@ -15,8 +15,7 @@
 RUN: cat %t.a | FileCheck -strict-whitespace %s
 
 CHECK:      !<arch>
-CHECK-NEXT: //                                              36        `
-CHECK-NEXT: 0123456789abcdef/
+CHECK-NEXT: //                                              18        `
 CHECK-NEXT: 0123456789abcdef/
 CHECK-NEXT: 0123456789abcde/0           0     0     644     4         `
 CHECK-NEXT: bar.
@@ -24,7 +23,7 @@
 CHECK-NEXT: zed.
 CHECK-SAME: 0123456789abcde/0           0     0     644     4         `
 CHECK-NEXT: bar2
-CHECK-SAME: /18             0           0     0     644     4         `
+CHECK-SAME: /0              0           0     0     644     4         `
 CHECK-NEXT: zed2
 
 RUN: rm -f %t.a
diff --git a/test/Object/obj2yaml.test b/test/Object/obj2yaml.test
index 46a0d77..5accdd4 100644
--- a/test/Object/obj2yaml.test
+++ b/test/Object/obj2yaml.test
@@ -651,7 +651,7 @@
 WASM-NEXT:   Version:         0x00000001
 WASM:        - Type:            CUSTOM
 WASM-NEXT:     Name:            linking
-WASM-NEXT:     Version:         1
+WASM-NEXT:     Version:         2
 WASM-NEXT:     SymbolTable:
 WASM-NEXT:       - Index:           0
 WASM-NEXT:         Kind:            FUNCTION
@@ -675,7 +675,7 @@
 WASM:          SegmentInfo:
 WASM-NEXT:       - Index:           0
 WASM-NEXT:         Name:            .rodata..L.str
-WASM-NEXT:         Alignment:       1
+WASM-NEXT:         Alignment:       0
 WASM-NEXT:         Flags:           [  ]
 WASM-NEXT:       - Index:           1
 WASM:      ...
diff --git a/test/Object/wasm-bad-metadata-version.yaml b/test/Object/wasm-bad-metadata-version.yaml
index b2970bb..7bda5b8 100644
--- a/test/Object/wasm-bad-metadata-version.yaml
+++ b/test/Object/wasm-bad-metadata-version.yaml
@@ -8,4 +8,4 @@
     Name:            linking
     Version:         0
 
-# CHECK: {{.*}}: Unexpected metadata version: 0 (Expected: 1)
+# CHECK: {{.*}}: Unexpected metadata version: 0 (Expected: 2)
diff --git a/test/Object/wasm-invalid-section-order.test b/test/Object/wasm-invalid-section-order.test
new file mode 100644
index 0000000..bb008ff
--- /dev/null
+++ b/test/Object/wasm-invalid-section-order.test
@@ -0,0 +1,16 @@
+# RUN: not obj2yaml %p/Inputs/WASM/invalid-section-order.wasm 2>&1 | FileCheck %s
+# CHECK: {{.*}}: Out of order section type: 10
+
+# Inputs/WASM/invalid-section-order.wasm is generated from this ll file, by
+# modifying WasmObjectWriter to incorrectly write the data section before the
+# code section.
+#
+# target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+# target triple = "wasm32-unknown-unknown"
+#
+# @data = global i32 0, align 4
+#
+# define void @foo() {
+# entry:
+#   ret void
+# }
diff --git a/test/Object/yaml2obj-readobj.test b/test/Object/yaml2obj-readobj.test
index 3bd0c6b..e7ee2a0 100644
--- a/test/Object/yaml2obj-readobj.test
+++ b/test/Object/yaml2obj-readobj.test
@@ -13,16 +13,19 @@
 // COFF-I386-NEXT:       Offset: 0xE
 // COFF-I386-NEXT:       Type: IMAGE_REL_I386_DIR32 (6)
 // COFF-I386-NEXT:       Symbol: L_.str
+// COFF-I386-NEXT:       SymbolIndex: 5
 // COFF-I386-NEXT:     }
 // COFF-I386-NEXT:     Relocation {
 // COFF-I386-NEXT:       Offset: 0x13
 // COFF-I386-NEXT:       Type: IMAGE_REL_I386_REL32 (20)
 // COFF-I386-NEXT:       Symbol: _puts
+// COFF-I386-NEXT:       SymbolIndex: 6
 // COFF-I386-NEXT:     }
 // COFF-I386-NEXT:     Relocation {
 // COFF-I386-NEXT:       Offset: 0x18
 // COFF-I386-NEXT:       Type: IMAGE_REL_I386_REL32 (20)
 // COFF-I386-NEXT:       Symbol: _SomeOtherFunction
+// COFF-I386-NEXT:       SymbolIndex: 7
 // COFF-I386-NEXT:     }
 // COFF-I386-NEXT:   }
 // COFF-I386-NEXT: ]
diff --git a/test/ObjectYAML/wasm/code_section.yaml b/test/ObjectYAML/wasm/code_section.yaml
index 0d7cf61..1a63ce5 100644
--- a/test/ObjectYAML/wasm/code_section.yaml
+++ b/test/ObjectYAML/wasm/code_section.yaml
@@ -39,7 +39,7 @@
         Body:            108180808000210020000F0B
   - Type:            CUSTOM
     Name:            linking
-    Version:         1
+    Version:         2
     SymbolTable:
       - Index:           0
         Kind:            FUNCTION
diff --git a/test/ObjectYAML/wasm/data_section.yaml b/test/ObjectYAML/wasm/data_section.yaml
index ef5945d..aa18301 100644
--- a/test/ObjectYAML/wasm/data_section.yaml
+++ b/test/ObjectYAML/wasm/data_section.yaml
@@ -24,7 +24,7 @@
         Addend:          -6
   - Type:            CUSTOM
     Name:            linking
-    Version:         1
+    Version:         2
     SymbolTable:
       - Index:           0
         Kind:            DATA
diff --git a/test/ObjectYAML/wasm/elem_section.yaml b/test/ObjectYAML/wasm/elem_section.yaml
index 684256e..73989dd 100644
--- a/test/ObjectYAML/wasm/elem_section.yaml
+++ b/test/ObjectYAML/wasm/elem_section.yaml
@@ -5,7 +5,7 @@
 Sections:
   - Type:            TABLE
     Tables:         
-      - ElemType:          ANYFUNC
+      - ElemType:          FUNCREF
         Limits:
           Flags:           [ HAS_MAX ]
           Initial:         0x00000010
@@ -18,7 +18,7 @@
         Functions:
           - 1
       - Offset:
-          Opcode:        GET_GLOBAL
+          Opcode:        GLOBAL_GET
           Index:         1
         Functions:
           - 4
@@ -34,7 +34,7 @@
 # CHECK:           Value:            3
 # CHECK:         Functions: [ 1 ]
 # CHECK:       - Offset:
-# CHECK:           Opcode:           GET_GLOBAL
+# CHECK:           Opcode:           GLOBAL_GET
 # CHECK:           Index:            1
 # CHECK:         Functions: [ 4 ]
 # CHECK: ...
diff --git a/test/ObjectYAML/wasm/event_section.yaml b/test/ObjectYAML/wasm/event_section.yaml
index 017efda..f026899 100644
--- a/test/ObjectYAML/wasm/event_section.yaml
+++ b/test/ObjectYAML/wasm/event_section.yaml
@@ -32,7 +32,7 @@
         Body:            200008808080800041000B
   - Type:            CUSTOM
     Name:            linking
-    Version:         1
+    Version:         2
     SymbolTable:
       - Index:           0
         Kind:            FUNCTION
@@ -78,7 +78,7 @@
 # CHECK-NEXT:         Body:            200008808080800041000B
 # CHECK-NEXT:   - Type:            CUSTOM
 # CHECK-NEXT:     Name:            linking
-# CHECK-NEXT:     Version:         1
+# CHECK-NEXT:     Version:         2
 # CHECK-NEXT:     SymbolTable:
 # CHECK-NEXT:       - Index:           0
 # CHECK-NEXT:         Kind:            FUNCTION
diff --git a/test/ObjectYAML/wasm/import_section.yaml b/test/ObjectYAML/wasm/import_section.yaml
index fc75705..90de6f0 100644
--- a/test/ObjectYAML/wasm/import_section.yaml
+++ b/test/ObjectYAML/wasm/import_section.yaml
@@ -31,7 +31,7 @@
         Field:           imported_table
         Kind:            TABLE
         Table:
-          ElemType:      ANYFUNC
+          ElemType:      FUNCREF
           Limits:
             Flags:           [ HAS_MAX ]
             Initial:         0x00000020
@@ -63,7 +63,7 @@
 # CHECK:         Field:           imported_table
 # CHECK:         Kind:            TABLE
 # CHECK:         Table:
-# CHECK:           ElemType:      ANYFUNC
+# CHECK:           ElemType:      FUNCREF
 # CHECK:           Limits:
 # CHECK:             Flags:           [ HAS_MAX ]
 # CHECK:             Initial:         0x00000020
diff --git a/test/ObjectYAML/wasm/invalid_global_weak.yaml b/test/ObjectYAML/wasm/invalid_global_weak.yaml
index c364075..4566e8f 100644
--- a/test/ObjectYAML/wasm/invalid_global_weak.yaml
+++ b/test/ObjectYAML/wasm/invalid_global_weak.yaml
@@ -13,7 +13,7 @@
         GlobalMutable:   false
   - Type:            CUSTOM
     Name:            linking
-    Version:         1
+    Version:         2
     SymbolTable:
       - Index:           0
         Kind:            GLOBAL
diff --git a/test/ObjectYAML/wasm/invalid_section_order.yaml b/test/ObjectYAML/wasm/invalid_section_order.yaml
new file mode 100644
index 0000000..52ad136
--- /dev/null
+++ b/test/ObjectYAML/wasm/invalid_section_order.yaml
@@ -0,0 +1,20 @@
+# RUN: not yaml2obj %s -o /dev/null 2>&1 | FileCheck %s
+
+--- !WASM
+FileHeader:
+  Version:         0x00000001
+Sections:
+  - Type:            TYPE
+    Signatures:
+      - Index:           0
+        ReturnType:      NORESULT
+        ParamTypes:      []
+  - Type:            CODE
+    Functions:
+      - Index:           0
+        Locals:          []
+        Body:            0B
+  # CHECK: Out of order section type: 3
+  - Type:            FUNCTION
+    FunctionTypes:   [ 0 ]
+...
diff --git a/test/ObjectYAML/wasm/linking_section.yaml b/test/ObjectYAML/wasm/linking_section.yaml
index d1f0243..69817f5 100644
--- a/test/ObjectYAML/wasm/linking_section.yaml
+++ b/test/ObjectYAML/wasm/linking_section.yaml
@@ -29,7 +29,7 @@
         Content:         '11110000'
   - Type:            CUSTOM
     Name:            linking
-    Version:         1
+    Version:         2
     SymbolTable:
       - Index:           0
         Kind:            FUNCTION
diff --git a/test/ObjectYAML/wasm/table_section.yaml b/test/ObjectYAML/wasm/table_section.yaml
index 5996b63..90e73fd 100644
--- a/test/ObjectYAML/wasm/table_section.yaml
+++ b/test/ObjectYAML/wasm/table_section.yaml
@@ -5,7 +5,7 @@
 Sections:
   - Type:            TABLE
     Tables:         
-      - ElemType:        ANYFUNC
+      - ElemType:        FUNCREF
         Limits:
           Flags:           [ HAS_MAX ]
           Initial:         0x00000010
@@ -17,7 +17,7 @@
 # CHECK: Sections:
 # CHECK:  - Type:            TABLE
 # CHECK:    Tables:         
-# CHECK:      - ElemType:        ANYFUNC
+# CHECK:      - ElemType:        FUNCREF
 # CHECK:        Limits:
 # CHECK:          Flags:           [ HAS_MAX ]
 # CHECK:          Initial:         0x00000010
diff --git a/test/ObjectYAML/wasm/weak_symbols.yaml b/test/ObjectYAML/wasm/weak_symbols.yaml
index 3e9ca34..fb85b82 100644
--- a/test/ObjectYAML/wasm/weak_symbols.yaml
+++ b/test/ObjectYAML/wasm/weak_symbols.yaml
@@ -36,7 +36,7 @@
         Body:            00
   - Type:            CUSTOM
     Name:            linking
-    Version:         1
+    Version:         2
     SymbolTable:
       - Index:           0
         Kind:            FUNCTION
diff --git a/test/Other/loop-deletion-printer.ll b/test/Other/loop-deletion-printer.ll
index d344568..6cb29ae 100644
--- a/test/Other/loop-deletion-printer.ll
+++ b/test/Other/loop-deletion-printer.ll
@@ -5,10 +5,14 @@
 ; RUN:     -passes=loop-instsimplify -print-after-all  2>&1 | FileCheck %s -check-prefix=SIMPLIFY
 ; RUN: opt < %s -disable-output \
 ; RUN:     -passes=loop-deletion,loop-instsimplify -print-after-all  2>&1 | FileCheck %s -check-prefix=DELETED
+; RUN: opt < %s -disable-output \
+; RUN:     -passes=loop-deletion,loop-instsimplify -print-after-all -print-module-scope  2>&1 | FileCheck %s -check-prefix=DELETED-BUT-PRINTED
 ;
 ; SIMPLIFY: IR Dump {{.*}} LoopInstSimplifyPass
 ; DELETED-NOT: IR Dump {{.*}}LoopInstSimplifyPass
 ; DELETED-NOT: IR Dump {{.*}}LoopDeletionPass
+; DELETED-BUT-PRINTED: IR Dump {{.*}}LoopDeletionPass {{.*invalidated:}}
+; DELETED-BUT-PRINTED-NOT: IR Dump {{.*}}LoopInstSimplifyPass
 
 define void @deleteme() {
 entry:
diff --git a/test/Other/opt-bisect-helper.py b/test/Other/opt-bisect-helper.py
index d75950f..d2ab4ce 100755
--- a/test/Other/opt-bisect-helper.py
+++ b/test/Other/opt-bisect-helper.py
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import print_function
+
 import os
 import sys
 import argparse
diff --git a/test/Other/scc-deleted-printer.ll b/test/Other/scc-deleted-printer.ll
index e6323f0..f7b97c3 100644
--- a/test/Other/scc-deleted-printer.ll
+++ b/test/Other/scc-deleted-printer.ll
@@ -9,7 +9,7 @@
 ; INL: IR Dump After {{InlinerPass .*scc: .tester}}
 
 ; INL-MOD: IR Dump Before {{InlinerPass .*scc: .tester, foo}}
-; INL-MOD-NOT: IR Dump After {{InlinerPass}}
+; INL-MOD: IR Dump After {{InlinerPass .*invalidated: .*scc: .tester, foo}}
 ; INL-MOD: IR Dump Before {{InlinerPass .*scc: .tester}}
 ; INL-MOD: IR Dump After {{InlinerPass .*scc: .tester}}
 
diff --git a/test/TableGen/FixedLenDecoderEmitter/conflict.td b/test/TableGen/FixedLenDecoderEmitter/conflict.td
new file mode 100644
index 0000000..a9a95bc
--- /dev/null
+++ b/test/TableGen/FixedLenDecoderEmitter/conflict.td
@@ -0,0 +1,35 @@
+// RUN: llvm-tblgen -gen-disassembler -I %p/../../../include %s -o - 2>%t
+// RUN: FileCheck %s < %t
+
+include "llvm/Target/Target.td"
+
+def MyTargetISA : InstrInfo;
+def MyTarget : Target { let InstructionSet = MyTargetISA; }
+
+def R0 : Register<"r0"> { let Namespace = "MyTarget"; }
+def GPR32 : RegisterClass<"MyTarget", [i32], 32, (add R0)>;
+
+class I<dag OOps, dag IOps, list<dag> Pat>
+  : Instruction {
+  let Namespace = "MyTarget";
+  let OutOperandList = OOps;
+  let InOperandList = IOps;
+  let Pattern = Pat;
+  bits<32> Inst;
+  bits<32> SoftFail;
+}
+
+def A : I<(outs GPR32:$dst), (ins GPR32:$src1), []> {
+  let Size = 4;
+  let Inst{31-0} = 0;
+}
+def B : I<(outs GPR32:$dst), (ins GPR32:$src1), []> {
+  let Size = 4;
+  let Inst{31-0} = 0;
+}
+
+// CHECK: Decoding Conflict:
+// CHECK:   00000000000000000000000000000000
+// CHECK:   ................................
+// CHECK: A 00000000000000000000000000000000
+// CHECK: B 00000000000000000000000000000000
diff --git a/test/ThinLTO/X86/Inputs/deadstrip.ll b/test/ThinLTO/X86/Inputs/deadstrip.ll
index a9161a3..057305a 100644
--- a/test/ThinLTO/X86/Inputs/deadstrip.ll
+++ b/test/ThinLTO/X86/Inputs/deadstrip.ll
@@ -20,3 +20,8 @@
     call void @dead_func()
     ret void
 }
+
+define linkonce_odr void @linkonceodrfuncwithalias() {
+entry:
+  ret void
+}
diff --git a/test/ThinLTO/X86/cache-icall.ll b/test/ThinLTO/X86/cache-icall.ll
index 5e64a71..034ff97 100644
--- a/test/ThinLTO/X86/cache-icall.ll
+++ b/test/ThinLTO/X86/cache-icall.ll
@@ -4,8 +4,8 @@
 ; This affects code generated for any users of f(). Make sure that we don't pull a stale object
 ; file for %t.o from the cache.
 
-; RUN: opt -module-hash -module-summary -thinlto-bc %s -o %t.bc
-; RUN: opt -module-hash -module-summary -thinlto-bc %p/Inputs/cache-icall.ll -o %t2.bc
+; RUN: opt -module-hash -module-summary -thinlto-bc -thinlto-split-lto-unit %s -o %t.bc
+; RUN: opt -module-hash -module-summary -thinlto-bc -thinlto-split-lto-unit %p/Inputs/cache-icall.ll -o %t2.bc
 
 ; RUN: rm -Rf %t.cache && mkdir %t.cache
 
diff --git a/test/ThinLTO/X86/cfi-devirt.ll b/test/ThinLTO/X86/cfi-devirt.ll
index 7ade794..45d6960 100644
--- a/test/ThinLTO/X86/cfi-devirt.ll
+++ b/test/ThinLTO/X86/cfi-devirt.ll
@@ -2,7 +2,7 @@
 
 ; Test CFI devirtualization through the thin link and backend.
 
-; RUN: opt -thinlto-bc -o %t.o %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t.o %s
 
 ; Legacy PM
 ; FIXME: Fix machine verifier issues and remove -verify-machineinstrs=0. PR39436.
@@ -44,6 +44,27 @@
 
 ; REMARK: single-impl: devirtualized a call to _ZN1A1nEi
 
+; Next check that we emit an error when trying to LTO link this module
+; containing an llvm.type.checked.load (with a split LTO Unit) with one
+; that does not have a split LTO Unit.
+; RUN: opt -thinlto-bc -o %t2.o %S/Inputs/empty.ll
+; RUN: not llvm-lto2 run %t.o %t2.o -save-temps -pass-remarks=. \
+; RUN:   -verify-machineinstrs=0 \
+; RUN:   -o %t3 \
+; RUN:   -r=%t.o,test,px \
+; RUN:   -r=%t.o,_ZN1A1nEi,p \
+; RUN:   -r=%t.o,_ZN1B1fEi,p \
+; RUN:   -r=%t.o,_ZN1C1fEi,p \
+; RUN:   -r=%t.o,empty,p \
+; RUN:   -r=%t.o,_ZTV1B, \
+; RUN:   -r=%t.o,_ZTV1C, \
+; RUN:   -r=%t.o,_ZN1A1nEi, \
+; RUN:   -r=%t.o,_ZN1B1fEi, \
+; RUN:   -r=%t.o,_ZN1C1fEi, \
+; RUN:   -r=%t.o,_ZTV1B,px \
+; RUN:   -r=%t.o,_ZTV1C,px 2>&1 | FileCheck %s --check-prefix=ERROR
+; ERROR: LLVM ERROR: inconsistent LTO Unit splitting with llvm.type.test or llvm.type.checked.load
+
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-grtev4-linux-gnu"
 
diff --git a/test/ThinLTO/X86/cfi-distributed.ll b/test/ThinLTO/X86/cfi-distributed.ll
index 94f3f95..5339228 100644
--- a/test/ThinLTO/X86/cfi-distributed.ll
+++ b/test/ThinLTO/X86/cfi-distributed.ll
@@ -3,8 +3,8 @@
 ; Test to ensure that only referenced type ID records are emitted into
 ; each distributed index file.
 
-; RUN: opt -thinlto-bc -o %t1.o %s
-; RUN: opt -thinlto-bc -o %t2.o %p/Inputs/cfi-distributed.ll
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t1.o %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t2.o %p/Inputs/cfi-distributed.ll
 
 ; RUN: llvm-lto2 run -thinlto-distributed-indexes %t1.o %t2.o \
 ; RUN:   -o %t3 \
diff --git a/test/ThinLTO/X86/cfi-icall.ll b/test/ThinLTO/X86/cfi-icall.ll
index 1ab184f..42c26f1 100644
--- a/test/ThinLTO/X86/cfi-icall.ll
+++ b/test/ThinLTO/X86/cfi-icall.ll
@@ -1,4 +1,4 @@
-; RUN: opt -thinlto-bc %s -o %t1.bc
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit %s -o %t1.bc
 ; RUN: llvm-lto2 run  -thinlto-distributed-indexes %t1.bc -o %t.out -save-temps \
 ; RUN:   -r %t1.bc,foo,plx \
 ; RUN:   -r %t1.bc,bar,x \
diff --git a/test/ThinLTO/X86/cfi.ll b/test/ThinLTO/X86/cfi.ll
index 9b1bde3..0edddb8 100644
--- a/test/ThinLTO/X86/cfi.ll
+++ b/test/ThinLTO/X86/cfi.ll
@@ -2,7 +2,7 @@
 
 ; Test CFI through the thin link and backend.
 
-; RUN: opt -thinlto-bc -o %t.o %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t.o %s
 
 ; Legacy PM
 ; RUN: llvm-lto2 run -save-temps %t.o \
diff --git a/test/ThinLTO/X86/deadstrip.ll b/test/ThinLTO/X86/deadstrip.ll
index 76dc09e..257cfaf 100644
--- a/test/ThinLTO/X86/deadstrip.ll
+++ b/test/ThinLTO/X86/deadstrip.ll
@@ -20,10 +20,14 @@
 ; RUN:   -r %t1.bc,_live_available_externally_func,l \
 ; RUN:   -r %t1.bc,_live_linkonce_odr_func,l \
 ; RUN:   -r %t1.bc,_live_weak_odr_func,l \
+; RUN:   -r %t1.bc,_linkonceodralias,pl \
+; RUN:   -r %t1.bc,_linkonceodrfuncwithalias,l \
+; RUN:   -r %t1.bc,_linkonceodrfuncwithalias_caller,pl \
 ; RUN:   -r %t2.bc,_baz,pl \
 ; RUN:   -r %t2.bc,_boo,pl \
 ; RUN:   -r %t2.bc,_dead_func,l \
 ; RUN:   -r %t2.bc,_another_dead_func,pl \
+; RUN:   -r %t2.bc,_linkonceodrfuncwithalias,pl \
 ; RUN:   -thinlto-threads=1 \
 ; RUN:	 -debug-only=function-import 2>&1 | FileCheck %s --check-prefix=DEBUG --check-prefix=STATS
 ; RUN: llvm-dis < %t.out.1.3.import.bc | FileCheck %s --check-prefix=LTO2
@@ -71,6 +75,11 @@
 ; llvm.global_ctors
 ; CHECK2: define void @boo()
 ; LTO2-CHECK2: define dso_local void @boo()
+
+; Make sure we keep @linkonceodrfuncwithalias in Input/deadstrip.ll alive as it
+; is reachable from @main.
+; LTO2-CHECK2: define weak_odr dso_local void @linkonceodrfuncwithalias() {
+
 ; We should have eventually removed @baz since it was internalized and unused
 ; CHECK2-NM-NOT: _baz
 
@@ -105,10 +114,14 @@
 ; RUN:   -r %t1.bc,_live_available_externally_func,l \
 ; RUN:   -r %t1.bc,_live_linkonce_odr_func,l \
 ; RUN:   -r %t1.bc,_live_weak_odr_func,l \
+; RUN:   -r %t1.bc,_linkonceodralias,pl \
+; RUN:   -r %t1.bc,_linkonceodrfuncwithalias,l \
+; RUN:   -r %t1.bc,_linkonceodrfuncwithalias_caller,pl \
 ; RUN:   -r %t3.bc,_baz,pl \
 ; RUN:   -r %t3.bc,_boo,pl \
 ; RUN:   -r %t3.bc,_dead_func,l \
-; RUN:   -r %t3.bc,_another_dead_func,pl
+; RUN:   -r %t3.bc,_another_dead_func,pl \
+; RUN:   -r %t3.bc,_linkonceodrfuncwithalias,pl
 ; RUN: llvm-dis < %t4.out.1.3.import.bc | FileCheck %s --check-prefix=CHECK-NOTDEAD
 ; RUN: llvm-nm %t4.out.0 | FileCheck %s --check-prefix=CHECK-NM-NOTDEAD
 
@@ -163,11 +176,31 @@
     ret void
 }
 
+; This alias will set its base object in this file (linkonceodrfuncwithalias)
+; alive.
+; We want to make sure the @linkonceodrfuncwithalias copy in Input/deadstrip.ll
+; is also scanned when computing reachability.
+@linkonceodralias = linkonce_odr alias void (), void ()* @linkonceodrfuncwithalias
+
+define linkonce_odr void @linkonceodrfuncwithalias() {
+entry:
+  ret void
+}
+
+define void @linkonceodrfuncwithalias_caller() {
+entry:
+  call void @linkonceodrfuncwithalias()
+  ret void
+}
+
+
 define void @main() {
     call void @bar()
     call void @bar_internal()
     call void @live_linkonce_odr_func()
     call void @live_weak_odr_func()
     call void @live_available_externally_func()
+    call void @linkonceodrfuncwithalias_caller()
+    call void @linkonceodralias()
     ret void
 }
diff --git a/test/ThinLTO/X86/devirt-after-icp.ll b/test/ThinLTO/X86/devirt-after-icp.ll
index 9872217..fd5dcb7 100644
--- a/test/ThinLTO/X86/devirt-after-icp.ll
+++ b/test/ThinLTO/X86/devirt-after-icp.ll
@@ -42,7 +42,7 @@
 ; will use the same vtable pointer. Without a dominance check, we could
 ; incorrectly devirtualize a->foo() to B::foo();
 
-; RUN: opt -thinlto-bc -o %t.o %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t.o %s
 
 ; Legacy PM
 ; FIXME: Fix machine verifier issues and remove -verify-machineinstrs=0. PR39436.
diff --git a/test/ThinLTO/X86/lazyload_metadata.ll b/test/ThinLTO/X86/lazyload_metadata.ll
index 4680e46..5fbf7a9 100644
--- a/test/ThinLTO/X86/lazyload_metadata.ll
+++ b/test/ThinLTO/X86/lazyload_metadata.ll
@@ -10,13 +10,13 @@
 ; RUN: llvm-lto -thinlto-action=import %t2.bc -thinlto-index=%t3.bc \
 ; RUN:          -o /dev/null -stats \
 ; RUN:  2>&1 | FileCheck %s -check-prefix=LAZY
-; LAZY: 55 bitcode-reader  - Number of Metadata records loaded
+; LAZY: 57 bitcode-reader  - Number of Metadata records loaded
 ; LAZY: 2 bitcode-reader  - Number of MDStrings loaded
 
 ; RUN: llvm-lto -thinlto-action=import %t2.bc -thinlto-index=%t3.bc \
 ; RUN:          -o /dev/null -disable-ondemand-mds-loading -stats \
 ; RUN:  2>&1 | FileCheck %s -check-prefix=NOTLAZY
-; NOTLAZY: 64 bitcode-reader  - Number of Metadata records loaded
+; NOTLAZY: 66 bitcode-reader  - Number of Metadata records loaded
 ; NOTLAZY: 7 bitcode-reader  - Number of MDStrings loaded
 
 
diff --git a/test/Transforms/ADCE/2016-09-06.ll b/test/Transforms/ADCE/2016-09-06.ll
index 82c333b..6a2d396 100644
--- a/test/Transforms/ADCE/2016-09-06.ll
+++ b/test/Transforms/ADCE/2016-09-06.ll
@@ -52,4 +52,4 @@
 
 !llvm.ident = !{!0}
 
-!0 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git 5864a13abf4490e76ae2eb0896198e1305927df2)"}
+!0 = !{!"clang version 4.0.0"}
diff --git a/test/Transforms/AddDiscriminators/invoke.ll b/test/Transforms/AddDiscriminators/invoke.ll
new file mode 100644
index 0000000..0a2f869
--- /dev/null
+++ b/test/Transforms/AddDiscriminators/invoke.ll
@@ -0,0 +1,134 @@
+; RUN: opt < %s -add-discriminators -S | FileCheck %s
+; ModuleID = 'invoke.bc'
+source_filename = "invoke.cpp"
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.14.0"
+
+; Function Attrs: ssp uwtable
+define void @_Z3foov() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg !8 {
+entry:
+  %exn.slot = alloca i8*
+  %ehselector.slot = alloca i32
+  ; CHECK: call void @_Z12bar_noexceptv({{.*}} !dbg ![[CALL1:[0-9]+]]
+  call void @_Z12bar_noexceptv() #4, !dbg !11
+  ; CHECK: call void @_Z12bar_noexceptv({{.*}} !dbg ![[CALL2:[0-9]+]]
+  call void @_Z12bar_noexceptv() #4, !dbg !13
+  invoke void @_Z3barv()
+  ; CHECK: unwind label {{.*}} !dbg ![[INVOKE:[0-9]+]]
+          to label %invoke.cont unwind label %lpad, !dbg !14
+
+invoke.cont:                                      ; preds = %entry
+  br label %try.cont, !dbg !15
+
+lpad:                                             ; preds = %entry
+  %0 = landingpad { i8*, i32 }
+          catch i8* null, !dbg !16
+  %1 = extractvalue { i8*, i32 } %0, 0, !dbg !16
+  store i8* %1, i8** %exn.slot, align 8, !dbg !16
+  %2 = extractvalue { i8*, i32 } %0, 1, !dbg !16
+  store i32 %2, i32* %ehselector.slot, align 4, !dbg !16
+  br label %catch, !dbg !16
+
+catch:                                            ; preds = %lpad
+  %exn = load i8*, i8** %exn.slot, align 8, !dbg !15
+  %3 = call i8* @__cxa_begin_catch(i8* %exn) #4, !dbg !15
+  invoke void @__cxa_rethrow() #5
+          to label %unreachable unwind label %lpad1, !dbg !17
+
+lpad1:                                            ; preds = %catch
+  %4 = landingpad { i8*, i32 }
+          cleanup, !dbg !19
+  %5 = extractvalue { i8*, i32 } %4, 0, !dbg !19
+  store i8* %5, i8** %exn.slot, align 8, !dbg !19
+  %6 = extractvalue { i8*, i32 } %4, 1, !dbg !19
+  store i32 %6, i32* %ehselector.slot, align 4, !dbg !19
+  invoke void @__cxa_end_catch()
+          to label %invoke.cont2 unwind label %terminate.lpad, !dbg !20
+
+invoke.cont2:                                     ; preds = %lpad1
+  br label %eh.resume, !dbg !20
+
+try.cont:                                         ; preds = %invoke.cont
+  ret void, !dbg !21
+
+eh.resume:                                        ; preds = %invoke.cont2
+  %exn3 = load i8*, i8** %exn.slot, align 8, !dbg !20
+  %sel = load i32, i32* %ehselector.slot, align 4, !dbg !20
+  %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn3, 0, !dbg !20
+  %lpad.val4 = insertvalue { i8*, i32 } %lpad.val, i32 %sel, 1, !dbg !20
+  resume { i8*, i32 } %lpad.val4, !dbg !20
+
+terminate.lpad:                                   ; preds = %lpad1
+  %7 = landingpad { i8*, i32 }
+          catch i8* null, !dbg !20
+  %8 = extractvalue { i8*, i32 } %7, 0, !dbg !20
+  call void @__clang_call_terminate(i8* %8) #6, !dbg !20
+  unreachable, !dbg !20
+
+unreachable:                                      ; preds = %catch
+  unreachable
+}
+
+; Function Attrs: nounwind
+declare void @_Z12bar_noexceptv() #1
+
+declare void @_Z3barv() #2
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @__cxa_rethrow()
+
+declare void @__cxa_end_catch()
+
+; Function Attrs: noinline noreturn nounwind
+define linkonce_odr hidden void @__clang_call_terminate(i8*) #3 {
+  %2 = call i8* @__cxa_begin_catch(i8* %0) #4
+  call void @_ZSt9terminatev() #6
+  unreachable
+}
+
+declare void @_ZSt9terminatev()
+
+attributes #0 = { ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { noinline noreturn nounwind }
+attributes #4 = { nounwind }
+attributes #5 = { noreturn }
+attributes #6 = { noreturn nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5, !6}
+!llvm.ident = !{!7}
+
+; CHECK: ![[CALL1]] = !DILocation(line: 7, column: 5, scope: ![[SCOPE1:[0-9]+]])
+; CHECK: ![[SCOPE1]] = distinct !DILexicalBlock(scope: !8, file: !1, line: 6, column: 7)
+; CHECK: ![[CALL2]] = !DILocation(line: 7, column: 21, scope: ![[SCOPE2:[0-9]+]])
+; CHECK: ![[SCOPE2]] = !DILexicalBlockFile(scope: ![[SCOPE1]], file: !1, discriminator: 2)
+; CHECK: ![[INVOKE]] = !DILocation(line: 7, column: 37, scope: ![[SCOPE3:[0-9]+]])
+; CHECK: ![[SCOPE3]] = !DILexicalBlockFile(scope: ![[SCOPE1]], file: !1, discriminator: 4)
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 8.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: GNU)
+!1 = !DIFile(filename: "invoke.cpp", directory: "examples")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"wchar_size", i32 4}
+!6 = !{i32 7, !"PIC Level", i32 2}
+!7 = !{!"clang version 8.0.0"}
+!8 = distinct !DISubprogram(name: "foo", linkageName: "_Z3foov", scope: !1, file: !1, line: 5, type: !9, scopeLine: 5, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2)
+!9 = !DISubroutineType(types: !10)
+!10 = !{null}
+!11 = !DILocation(line: 7, column: 5, scope: !12)
+!12 = distinct !DILexicalBlock(scope: !8, file: !1, line: 6, column: 7)
+!13 = !DILocation(line: 7, column: 21, scope: !12)
+!14 = !DILocation(line: 7, column: 37, scope: !12)
+!15 = !DILocation(line: 8, column: 3, scope: !12)
+!16 = !DILocation(line: 12, column: 1, scope: !12)
+!17 = !DILocation(line: 10, column: 5, scope: !18)
+!18 = distinct !DILexicalBlock(scope: !8, file: !1, line: 9, column: 15)
+!19 = !DILocation(line: 12, column: 1, scope: !18)
+!20 = !DILocation(line: 11, column: 3, scope: !18)
+!21 = !DILocation(line: 12, column: 1, scope: !8)
diff --git a/test/Transforms/AggressiveInstCombine/rotate.ll b/test/Transforms/AggressiveInstCombine/rotate.ll
index 84a7d4a..2049908 100644
--- a/test/Transforms/AggressiveInstCombine/rotate.ll
+++ b/test/Transforms/AggressiveInstCombine/rotate.ll
@@ -9,14 +9,10 @@
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
 ; CHECK:       rotbb:
-; CHECK-NEXT:    [[SUB:%.*]] = sub i32 32, [[B]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[A:%.*]], [[SUB]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A]], [[B]]
-; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ [[OR]], [[ROTBB]] ], [ [[A]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    ret i32 [[COND]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.fshl.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B]])
+; CHECK-NEXT:    ret i32 [[TMP0]]
 ;
 entry:
   %cmp = icmp eq i32 %b, 0
@@ -40,14 +36,10 @@
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
 ; CHECK:       rotbb:
-; CHECK-NEXT:    [[SUB:%.*]] = sub i32 32, [[B]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[A:%.*]], [[SUB]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A]], [[B]]
-; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
-; CHECK-NEXT:    ret i32 [[COND]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.fshl.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B]])
+; CHECK-NEXT:    ret i32 [[TMP0]]
 ;
 entry:
   %cmp = icmp eq i32 %b, 0
@@ -71,14 +63,10 @@
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
 ; CHECK:       rotbb:
-; CHECK-NEXT:    [[SUB:%.*]] = sub i32 32, [[B]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[A:%.*]], [[SUB]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A]], [[B]]
-; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
-; CHECK-NEXT:    ret i32 [[COND]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.fshl.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B]])
+; CHECK-NEXT:    ret i32 [[TMP0]]
 ;
 entry:
   %cmp = icmp eq i32 %b, 0
@@ -96,20 +84,49 @@
   ret i32 %cond
 }
 
+; Verify that the intrinsic is inserted into a valid position.
+
+define i32 @rotl_insert_valid_location(i32 %a, i32 %b) {
+; CHECK-LABEL: @rotl_insert_valid_location(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK:       rotbb:
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
+; CHECK-NEXT:    [[OTHER:%.*]] = phi i32 [ 1, [[ROTBB]] ], [ 2, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.fshl.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B]])
+; CHECK-NEXT:    [[RES:%.*]] = or i32 [[TMP0]], [[OTHER]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+entry:
+  %cmp = icmp eq i32 %b, 0
+  br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+  %sub = sub i32 32, %b
+  %shr = lshr i32 %a, %sub
+  %shl = shl i32 %a, %b
+  %or = or i32 %shr, %shl
+  br label %end
+
+end:
+  %cond = phi i32 [ %or, %rotbb ], [ %a, %entry ]
+  %other = phi i32 [ 1, %rotbb ], [ 2, %entry ]
+  %res = or i32 %cond, %other
+  ret i32 %res
+}
+
 define i32 @rotr(i32 %a, i32 %b) {
 ; CHECK-LABEL: @rotr(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
 ; CHECK:       rotbb:
-; CHECK-NEXT:    [[SUB:%.*]] = sub i32 32, [[B]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[A]], [[B]]
-; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ [[OR]], [[ROTBB]] ], [ [[A]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    ret i32 [[COND]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.fshr.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B]])
+; CHECK-NEXT:    ret i32 [[TMP0]]
 ;
 entry:
   %cmp = icmp eq i32 %b, 0
@@ -133,14 +150,10 @@
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
 ; CHECK:       rotbb:
-; CHECK-NEXT:    [[SUB:%.*]] = sub i32 32, [[B]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[A]], [[B]]
-; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
-; CHECK-NEXT:    ret i32 [[COND]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.fshr.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B]])
+; CHECK-NEXT:    ret i32 [[TMP0]]
 ;
 entry:
   %cmp = icmp eq i32 %b, 0
@@ -164,12 +177,283 @@
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
 ; CHECK:       rotbb:
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.fshr.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B]])
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+entry:
+  %cmp = icmp eq i32 %b, 0
+  br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+  %sub = sub i32 32, %b
+  %shl = shl i32 %a, %sub
+  %shr = lshr i32 %a, %b
+  %or = or i32 %shl, %shr
+  br label %end
+
+end:
+  %cond = phi i32 [ %a, %entry ], [ %or, %rotbb ]
+  ret i32 %cond
+}
+
+; Negative test - non-power-of-2 might require urem expansion in the backend.
+
+define i12 @could_be_rotr_weird_type(i12 %a, i12 %b) {
+; CHECK-LABEL: @could_be_rotr_weird_type(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i12 [[B:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK:       rotbb:
+; CHECK-NEXT:    [[SUB:%.*]] = sub i12 12, [[B]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl i12 [[A:%.*]], [[SUB]]
+; CHECK-NEXT:    [[SHR:%.*]] = lshr i12 [[A]], [[B]]
+; CHECK-NEXT:    [[OR:%.*]] = or i12 [[SHL]], [[SHR]]
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
+; CHECK-NEXT:    [[COND:%.*]] = phi i12 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
+; CHECK-NEXT:    ret i12 [[COND]]
+;
+entry:
+  %cmp = icmp eq i12 %b, 0
+  br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+  %sub = sub i12 12, %b
+  %shl = shl i12 %a, %sub
+  %shr = lshr i12 %a, %b
+  %or = or i12 %shl, %shr
+  br label %end
+
+end:
+  %cond = phi i12 [ %a, %entry ], [ %or, %rotbb ]
+  ret i12 %cond
+}
+
+; Negative test - wrong phi ops.
+
+define i32 @not_rotr_1(i32 %a, i32 %b) {
+; CHECK-LABEL: @not_rotr_1(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK:       rotbb:
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i32 32, [[B]]
 ; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
 ; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[A]], [[B]]
 ; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
+; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ [[B]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
+; CHECK-NEXT:    ret i32 [[COND]]
+;
+entry:
+  %cmp = icmp eq i32 %b, 0
+  br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+  %sub = sub i32 32, %b
+  %shl = shl i32 %a, %sub
+  %shr = lshr i32 %a, %b
+  %or = or i32 %shl, %shr
+  br label %end
+
+end:
+  %cond = phi i32 [ %b, %entry ], [ %or, %rotbb ]
+  ret i32 %cond
+}
+
+; Negative test - too many phi ops.
+
+define i32 @not_rotr_2(i32 %a, i32 %b, i32 %c) {
+; CHECK-LABEL: @not_rotr_2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK:       rotbb:
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 32, [[B]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[A]], [[B]]
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT:    [[CMP42:%.*]] = icmp ugt i32 [[OR]], 42
+; CHECK-NEXT:    br i1 [[CMP42]], label [[END]], label [[BOGUS:%.*]]
+; CHECK:       bogus:
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
+; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ], [ [[C:%.*]], [[BOGUS]] ]
+; CHECK-NEXT:    ret i32 [[COND]]
+;
+entry:
+  %cmp = icmp eq i32 %b, 0
+  br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+  %sub = sub i32 32, %b
+  %shl = shl i32 %a, %sub
+  %shr = lshr i32 %a, %b
+  %or = or i32 %shl, %shr
+  %cmp42 = icmp ugt i32 %or, 42
+  br i1 %cmp42, label %end, label %bogus
+
+bogus:
+  br label %end
+
+end:
+  %cond = phi i32 [ %a, %entry ], [ %or, %rotbb ], [ %c, %bogus ]
+  ret i32 %cond
+}
+
+; Negative test - wrong cmp (but this should match?).
+
+define i32 @not_rotr_3(i32 %a, i32 %b) {
+; CHECK-LABEL: @not_rotr_3(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sle i32 [[B:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK:       rotbb:
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 32, [[B]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[A]], [[B]]
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
+; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
+; CHECK-NEXT:    ret i32 [[COND]]
+;
+entry:
+  %cmp = icmp sle i32 %b, 0
+  br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+  %sub = sub i32 32, %b
+  %shl = shl i32 %a, %sub
+  %shr = lshr i32 %a, %b
+  %or = or i32 %shl, %shr
+  br label %end
+
+end:
+  %cond = phi i32 [ %a, %entry ], [ %or, %rotbb ]
+  ret i32 %cond
+}
+
+; Negative test - wrong shift.
+
+define i32 @not_rotr_4(i32 %a, i32 %b) {
+; CHECK-LABEL: @not_rotr_4(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK:       rotbb:
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 32, [[B]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT:    [[SHR:%.*]] = ashr i32 [[A]], [[B]]
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
+; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
+; CHECK-NEXT:    ret i32 [[COND]]
+;
+entry:
+  %cmp = icmp eq i32 %b, 0
+  br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+  %sub = sub i32 32, %b
+  %shl = shl i32 %a, %sub
+  %shr = ashr i32 %a, %b
+  %or = or i32 %shl, %shr
+  br label %end
+
+end:
+  %cond = phi i32 [ %a, %entry ], [ %or, %rotbb ]
+  ret i32 %cond
+}
+
+; Negative test - wrong shift.
+
+define i32 @not_rotr_5(i32 %a, i32 %b) {
+; CHECK-LABEL: @not_rotr_5(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK:       rotbb:
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 32, [[B]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[B]], [[SUB]]
+; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[A:%.*]], [[B]]
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
+; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
+; CHECK-NEXT:    ret i32 [[COND]]
+;
+entry:
+  %cmp = icmp eq i32 %b, 0
+  br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+  %sub = sub i32 32, %b
+  %shl = shl i32 %b, %sub
+  %shr = lshr i32 %a, %b
+  %or = or i32 %shl, %shr
+  br label %end
+
+end:
+  %cond = phi i32 [ %a, %entry ], [ %or, %rotbb ]
+  ret i32 %cond
+}
+
+; Negative test - wrong sub.
+
+define i32 @not_rotr_6(i32 %a, i32 %b) {
+; CHECK-LABEL: @not_rotr_6(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK:       rotbb:
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 8, [[B]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[A]], [[B]]
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
+; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
+; CHECK-NEXT:    ret i32 [[COND]]
+;
+entry:
+  %cmp = icmp eq i32 %b, 0
+  br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+  %sub = sub i32 8, %b
+  %shl = shl i32 %a, %sub
+  %shr = lshr i32 %a, %b
+  %or = or i32 %shl, %shr
+  br label %end
+
+end:
+  %cond = phi i32 [ %a, %entry ], [ %or, %rotbb ]
+  ret i32 %cond
+}
+
+; Negative test - extra use. Technically, we could transform this
+; because it doesn't increase the instruction count, but we're
+; being cautious not to cause a potential perf pessimization for
+; targets that do not have a rotate instruction.
+
+define i32 @could_be_rotr(i32 %a, i32 %b, i32* %p) {
+; CHECK-LABEL: @could_be_rotr(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK:       rotbb:
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 32, [[B]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[A]], [[B]]
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT:    store i32 [[OR]], i32* [[P:%.*]]
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
 ; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
 ; CHECK-NEXT:    ret i32 [[COND]]
 ;
@@ -182,6 +466,7 @@
   %shl = shl i32 %a, %sub
   %shr = lshr i32 %a, %b
   %or = or i32 %shl, %shr
+  store i32 %or, i32* %p
   br label %end
 
 end:
diff --git a/test/Transforms/ArgumentPromotion/X86/attributes.ll b/test/Transforms/ArgumentPromotion/X86/attributes.ll
new file mode 100644
index 0000000..82a2641
--- /dev/null
+++ b/test/Transforms/ArgumentPromotion/X86/attributes.ll
@@ -0,0 +1,53 @@
+; RUN: opt -S -argpromotion < %s | FileCheck %s
+; RUN: opt -S -passes=argpromotion < %s | FileCheck %s
+; Test that we only promote arguments when the caller/callee have compatible
+; function attrubtes.
+
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK-LABEL: @no_promote_avx2(<4 x i64>* %arg, <4 x i64>* readonly %arg1)
+define internal fastcc void @no_promote_avx2(<4 x i64>* %arg, <4 x i64>* readonly %arg1) #0 {
+bb:
+  %tmp = load <4 x i64>, <4 x i64>* %arg1
+  store <4 x i64> %tmp, <4 x i64>* %arg
+  ret void
+}
+
+define void @no_promote(<4 x i64>* %arg) #1 {
+bb:
+  %tmp = alloca <4 x i64>, align 32
+  %tmp2 = alloca <4 x i64>, align 32
+  %tmp3 = bitcast <4 x i64>* %tmp to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false)
+  call fastcc void @no_promote_avx2(<4 x i64>* %tmp2, <4 x i64>* %tmp)
+  %tmp4 = load <4 x i64>, <4 x i64>* %tmp2, align 32
+  store <4 x i64> %tmp4, <4 x i64>* %arg, align 2
+  ret void
+}
+
+; CHECK-LABEL: @promote_avx2(<4 x i64>* %arg, <4 x i64> %
+define internal fastcc void @promote_avx2(<4 x i64>* %arg, <4 x i64>* readonly %arg1) #0 {
+bb:
+  %tmp = load <4 x i64>, <4 x i64>* %arg1
+  store <4 x i64> %tmp, <4 x i64>* %arg
+  ret void
+}
+
+define void @promote(<4 x i64>* %arg) #0 {
+bb:
+  %tmp = alloca <4 x i64>, align 32
+  %tmp2 = alloca <4 x i64>, align 32
+  %tmp3 = bitcast <4 x i64>* %tmp to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false)
+  call fastcc void @promote_avx2(<4 x i64>* %tmp2, <4 x i64>* %tmp)
+  %tmp4 = load <4 x i64>, <4 x i64>* %tmp2, align 32
+  store <4 x i64> %tmp4, <4 x i64>* %arg, align 2
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #2
+
+attributes #0 = { inlinehint norecurse nounwind uwtable "target-features"="+avx2" }
+attributes #1 = { nounwind uwtable }
+attributes #2 = { argmemonly nounwind }
diff --git a/test/Transforms/ArgumentPromotion/nonzero-address-spaces.ll b/test/Transforms/ArgumentPromotion/nonzero-address-spaces.ll
new file mode 100644
index 0000000..2ed362b
--- /dev/null
+++ b/test/Transforms/ArgumentPromotion/nonzero-address-spaces.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -argpromotion -S | FileCheck %s
+
+; ArgumentPromotion should preserve the default function address space
+; from the data layout.
+
+target datalayout = "e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8"
+
+@g = common global i32 0, align 4
+
+define i32 @bar() {
+entry:
+  %call = call i32 @foo(i32* @g)
+; CHECK: %call = call addrspace(1) i32 @foo()
+  ret i32 %call
+}
+
+; CHECK: define internal i32 @foo() addrspace(1)
+define internal i32 @foo(i32*) {
+entry:
+  %retval = alloca i32, align 4
+  call void asm sideeffect "ldr r0, [r0] \0Abx lr        \0A", ""()
+  unreachable
+}
+
diff --git a/test/Transforms/BDCE/dead-uses.ll b/test/Transforms/BDCE/dead-uses.ll
index 95c2893..34f8cc0 100644
--- a/test/Transforms/BDCE/dead-uses.ll
+++ b/test/Transforms/BDCE/dead-uses.ll
@@ -10,7 +10,7 @@
 define i32 @pr39771_fshr_multi_use_instr(i32 %a) {
 ; CHECK-LABEL: @pr39771_fshr_multi_use_instr(
 ; CHECK-NEXT:    [[X:%.*]] = or i32 [[A:%.*]], 0
-; CHECK-NEXT:    [[B:%.*]] = tail call i32 @llvm.fshr.i32(i32 [[X]], i32 [[X]], i32 1)
+; CHECK-NEXT:    [[B:%.*]] = tail call i32 @llvm.fshr.i32(i32 0, i32 [[X]], i32 1)
 ; CHECK-NEXT:    [[C:%.*]] = lshr i32 [[B]], 23
 ; CHECK-NEXT:    [[D:%.*]] = xor i32 [[C]], [[B]]
 ; CHECK-NEXT:    [[E:%.*]] = and i32 [[D]], 31
@@ -28,7 +28,7 @@
 define <2 x i32> @pr39771_fshr_multi_use_instr_vec(<2 x i32> %a) {
 ; CHECK-LABEL: @pr39771_fshr_multi_use_instr_vec(
 ; CHECK-NEXT:    [[X:%.*]] = or <2 x i32> [[A:%.*]], zeroinitializer
-; CHECK-NEXT:    [[B:%.*]] = tail call <2 x i32> @llvm.fshr.v2i32(<2 x i32> [[X]], <2 x i32> [[X]], <2 x i32> <i32 1, i32 1>)
+; CHECK-NEXT:    [[B:%.*]] = tail call <2 x i32> @llvm.fshr.v2i32(<2 x i32> zeroinitializer, <2 x i32> [[X]], <2 x i32> <i32 1, i32 1>)
 ; CHECK-NEXT:    [[C:%.*]] = lshr <2 x i32> [[B]], <i32 23, i32 23>
 ; CHECK-NEXT:    [[D:%.*]] = xor <2 x i32> [[C]], [[B]]
 ; CHECK-NEXT:    [[E:%.*]] = and <2 x i32> [[D]], <i32 31, i32 31>
@@ -45,7 +45,7 @@
 ; First fshr operand is dead, but it comes from an argument, not instruction.
 define i32 @pr39771_fshr_multi_use_arg(i32 %a) {
 ; CHECK-LABEL: @pr39771_fshr_multi_use_arg(
-; CHECK-NEXT:    [[B:%.*]] = tail call i32 @llvm.fshr.i32(i32 [[A:%.*]], i32 [[A]], i32 1)
+; CHECK-NEXT:    [[B:%.*]] = tail call i32 @llvm.fshr.i32(i32 0, i32 [[A:%.*]], i32 1)
 ; CHECK-NEXT:    [[C:%.*]] = lshr i32 [[B]], 23
 ; CHECK-NEXT:    [[D:%.*]] = xor i32 [[C]], [[B]]
 ; CHECK-NEXT:    [[E:%.*]] = and i32 [[D]], 31
@@ -58,11 +58,10 @@
   ret i32 %e
 }
 
-; Second or operand is dead, but BDCE does not realize this.
 define i32 @pr39771_expanded_fshr_multi_use(i32 %a) {
 ; CHECK-LABEL: @pr39771_expanded_fshr_multi_use(
 ; CHECK-NEXT:    [[TMP:%.*]] = lshr i32 [[A:%.*]], 1
-; CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[A]], 32
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i32 0, 31
 ; CHECK-NEXT:    [[B:%.*]] = or i32 [[TMP]], [[TMP2]]
 ; CHECK-NEXT:    [[C:%.*]] = lshr i32 [[B]], 23
 ; CHECK-NEXT:    [[D:%.*]] = xor i32 [[C]], [[B]]
@@ -70,10 +69,35 @@
 ; CHECK-NEXT:    ret i32 [[E]]
 ;
   %tmp = lshr i32 %a, 1
-  %tmp2 = shl i32 %a, 32
+  %tmp2 = shl i32 %a, 31
   %b = or i32 %tmp, %tmp2
   %c = lshr i32 %b, 23
   %d = xor i32 %c, %b
   %e = and i32 %d, 31
   ret i32 %e
 }
+
+; %b operand of %c will be dead initially, but later found live.
+define void @dead_use_invalidation(i32 %a) {
+; CHECK-LABEL: @dead_use_invalidation(
+; CHECK-NEXT:    [[B:%.*]] = or i32 [[A:%.*]], 0
+; CHECK-NEXT:    [[C:%.*]] = shl i32 [[B]], 31
+; CHECK-NEXT:    [[D:%.*]] = and i32 [[C]], 1
+; CHECK-NEXT:    [[E:%.*]] = or i32 [[C]], 0
+; CHECK-NEXT:    [[F:%.*]] = or i32 [[D]], 0
+; CHECK-NEXT:    call void @dummy(i32 [[E]])
+; CHECK-NEXT:    call void @dummy(i32 [[F]])
+; CHECK-NEXT:    call void @dummy(i32 [[B]])
+; CHECK-NEXT:    ret void
+;
+  %b = or i32 %a, 0
+  %c = shl i32 %b, 31
+  %d = and i32 %c, 1
+  %e = or i32 %c, 0
+  %f = or i32 %d, 0
+  call void @dummy(i32 %e)
+  call void @dummy(i32 %f)
+  call void @dummy(i32 %b)
+  ret void
+}
+declare void @dummy(i32)
diff --git a/test/Transforms/BDCE/invalidate-assumptions.ll b/test/Transforms/BDCE/invalidate-assumptions.ll
index d165d74..69a99b3 100644
--- a/test/Transforms/BDCE/invalidate-assumptions.ll
+++ b/test/Transforms/BDCE/invalidate-assumptions.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -bdce %s -S | FileCheck %s
 
 ; The 'nuw' on the subtract allows us to deduce that %setbit is not demanded.
@@ -8,8 +9,7 @@
 
 define i1 @PR33695(i1 %b, i8 %x) {
 ; CHECK-LABEL: @PR33695(
-; CHECK-NEXT:    [[SETBIT:%.*]] = or i8 %x, 64
-; CHECK-NEXT:    [[LITTLE_NUMBER:%.*]] = zext i1 %b to i8
+; CHECK-NEXT:    [[LITTLE_NUMBER:%.*]] = zext i1 [[B:%.*]] to i8
 ; CHECK-NEXT:    [[BIG_NUMBER:%.*]] = shl i8 0, 1
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i8 [[BIG_NUMBER]], [[LITTLE_NUMBER]]
 ; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i8 [[SUB]] to i1
@@ -28,16 +28,12 @@
 
 define i64 @PR34037(i64 %m, i32 %r, i64 %j, i1 %b, i32 %k, i64 %p) {
 ; CHECK-LABEL: @PR34037(
-; CHECK-NEXT:    [[CONV:%.*]] = zext i32 %r to i64
-; CHECK-NEXT:    [[AND:%.*]] = and i64 %m, 0
-; CHECK-NEXT:    [[NEG:%.*]] = xor i64 0, 34359738367
-; CHECK-NEXT:    [[OR:%.*]] = or i64 %j, 0
 ; CHECK-NEXT:    [[SHL:%.*]] = shl i64 0, 29
-; CHECK-NEXT:    [[CONV1:%.*]] = select i1 %b, i64 7, i64 0
+; CHECK-NEXT:    [[CONV1:%.*]] = select i1 [[B:%.*]], i64 7, i64 0
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i64 [[SHL]], [[CONV1]]
-; CHECK-NEXT:    [[CONV2:%.*]] = zext i32 %k to i64
+; CHECK-NEXT:    [[CONV2:%.*]] = zext i32 [[K:%.*]] to i64
 ; CHECK-NEXT:    [[MUL:%.*]] = mul i64 [[SUB]], [[CONV2]]
-; CHECK-NEXT:    [[CONV4:%.*]] = and i64 %p, 65535
+; CHECK-NEXT:    [[CONV4:%.*]] = and i64 [[P:%.*]], 65535
 ; CHECK-NEXT:    [[AND5:%.*]] = and i64 [[MUL]], [[CONV4]]
 ; CHECK-NEXT:    ret i64 [[AND5]]
 ;
@@ -63,8 +59,7 @@
 
 define i1 @poison_on_call_user_is_ok(i1 %b, i8 %x) {
 ; CHECK-LABEL: @poison_on_call_user_is_ok(
-; CHECK-NEXT:    [[SETBIT:%.*]] = or i8 %x, 64
-; CHECK-NEXT:    [[LITTLE_NUMBER:%.*]] = zext i1 %b to i8
+; CHECK-NEXT:    [[LITTLE_NUMBER:%.*]] = zext i1 [[B:%.*]] to i8
 ; CHECK-NEXT:    [[BIG_NUMBER:%.*]] = shl i8 0, 1
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i8 [[BIG_NUMBER]], [[LITTLE_NUMBER]]
 ; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i8 [[SUB]] to i1
@@ -90,7 +85,7 @@
 
 define void @PR34179(i32* %a) {
 ; CHECK-LABEL: @PR34179(
-; CHECK-NEXT:    [[T0:%.*]] = load volatile i32, i32* %a
+; CHECK-NEXT:    [[T0:%.*]] = load volatile i32, i32* [[A:%.*]]
 ; CHECK-NEXT:    ret void
 ;
   %t0 = load volatile i32, i32* %a
diff --git a/test/Transforms/BDCE/vectors.ll b/test/Transforms/BDCE/vectors.ll
index fde22fd..63a8587 100644
--- a/test/Transforms/BDCE/vectors.ll
+++ b/test/Transforms/BDCE/vectors.ll
@@ -1,26 +1,17 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -bdce -instsimplify < %s | FileCheck %s
-; RUN: opt -S -instsimplify < %s | FileCheck %s -check-prefix=CHECK-IO
-; CHECK-IO lines to ensure that transformations are not performed by only instsimplify.
+; RUN: opt -S -bdce < %s | FileCheck %s
 
 ; BDCE applied to integer vectors.
 
 define <2 x i32> @test_basic(<2 x i32> %a, <2 x i32> %b) {
 ; CHECK-LABEL: @test_basic(
+; CHECK-NEXT:    [[A3:%.*]] = and <2 x i32> zeroinitializer, <i32 4, i32 4>
 ; CHECK-NEXT:    [[B2:%.*]] = add <2 x i32> [[B:%.*]], <i32 1, i32 1>
 ; CHECK-NEXT:    [[B3:%.*]] = and <2 x i32> [[B2]], <i32 8, i32 8>
-; CHECK-NEXT:    [[D:%.*]] = ashr <2 x i32> [[B3]], <i32 3, i32 3>
+; CHECK-NEXT:    [[C:%.*]] = or <2 x i32> [[A3]], [[B3]]
+; CHECK-NEXT:    [[D:%.*]] = ashr <2 x i32> [[C]], <i32 3, i32 3>
 ; CHECK-NEXT:    ret <2 x i32> [[D]]
 ;
-; CHECK-IO-LABEL: @test_basic(
-; CHECK-IO-NEXT:    [[A2:%.*]] = add <2 x i32> [[A:%.*]], <i32 1, i32 1>
-; CHECK-IO-NEXT:    [[A3:%.*]] = and <2 x i32> [[A2]], <i32 4, i32 4>
-; CHECK-IO-NEXT:    [[B2:%.*]] = add <2 x i32> [[B:%.*]], <i32 1, i32 1>
-; CHECK-IO-NEXT:    [[B3:%.*]] = and <2 x i32> [[B2]], <i32 8, i32 8>
-; CHECK-IO-NEXT:    [[C:%.*]] = or <2 x i32> [[A3]], [[B3]]
-; CHECK-IO-NEXT:    [[D:%.*]] = ashr <2 x i32> [[C]], <i32 3, i32 3>
-; CHECK-IO-NEXT:    ret <2 x i32> [[D]]
-;
   %a2 = add <2 x i32> %a, <i32 1, i32 1>
   %a3 = and <2 x i32> %a2, <i32 4, i32 4>
   %b2 = add <2 x i32> %b, <i32 1, i32 1>
@@ -33,22 +24,14 @@
 ; Going vector -> scalar
 define i32 @test_extractelement(<2 x i32> %a, <2 x i32> %b) {
 ; CHECK-LABEL: @test_extractelement(
+; CHECK-NEXT:    [[A3:%.*]] = and <2 x i32> zeroinitializer, <i32 4, i32 4>
 ; CHECK-NEXT:    [[B2:%.*]] = add <2 x i32> [[B:%.*]], <i32 1, i32 1>
 ; CHECK-NEXT:    [[B3:%.*]] = and <2 x i32> [[B2]], <i32 8, i32 8>
-; CHECK-NEXT:    [[D:%.*]] = extractelement <2 x i32> [[B3]], i32 0
+; CHECK-NEXT:    [[C:%.*]] = or <2 x i32> [[A3]], [[B3]]
+; CHECK-NEXT:    [[D:%.*]] = extractelement <2 x i32> [[C]], i32 0
 ; CHECK-NEXT:    [[E:%.*]] = ashr i32 [[D]], 3
 ; CHECK-NEXT:    ret i32 [[E]]
 ;
-; CHECK-IO-LABEL: @test_extractelement(
-; CHECK-IO-NEXT:    [[A2:%.*]] = add <2 x i32> [[A:%.*]], <i32 1, i32 1>
-; CHECK-IO-NEXT:    [[A3:%.*]] = and <2 x i32> [[A2]], <i32 4, i32 4>
-; CHECK-IO-NEXT:    [[B2:%.*]] = add <2 x i32> [[B:%.*]], <i32 1, i32 1>
-; CHECK-IO-NEXT:    [[B3:%.*]] = and <2 x i32> [[B2]], <i32 8, i32 8>
-; CHECK-IO-NEXT:    [[C:%.*]] = or <2 x i32> [[A3]], [[B3]]
-; CHECK-IO-NEXT:    [[D:%.*]] = extractelement <2 x i32> [[C]], i32 0
-; CHECK-IO-NEXT:    [[E:%.*]] = ashr i32 [[D]], 3
-; CHECK-IO-NEXT:    ret i32 [[E]]
-;
   %a2 = add <2 x i32> %a, <i32 1, i32 1>
   %a3 = and <2 x i32> %a2, <i32 4, i32 4>
   %b2 = add <2 x i32> %b, <i32 1, i32 1>
@@ -62,23 +45,14 @@
 ; Going scalar -> vector
 define <2 x i32> @test_insertelement(i32 %a, i32 %b) {
 ; CHECK-LABEL: @test_insertelement(
+; CHECK-NEXT:    [[X3:%.*]] = and <2 x i32> zeroinitializer, <i32 4, i32 4>
 ; CHECK-NEXT:    [[Y:%.*]] = insertelement <2 x i32> undef, i32 [[B:%.*]], i32 0
 ; CHECK-NEXT:    [[Y2:%.*]] = insertelement <2 x i32> [[Y]], i32 [[A:%.*]], i32 1
 ; CHECK-NEXT:    [[Y3:%.*]] = and <2 x i32> [[Y2]], <i32 8, i32 8>
-; CHECK-NEXT:    [[U:%.*]] = ashr <2 x i32> [[Y3]], <i32 3, i32 3>
+; CHECK-NEXT:    [[Z:%.*]] = or <2 x i32> [[X3]], [[Y3]]
+; CHECK-NEXT:    [[U:%.*]] = ashr <2 x i32> [[Z]], <i32 3, i32 3>
 ; CHECK-NEXT:    ret <2 x i32> [[U]]
 ;
-; CHECK-IO-LABEL: @test_insertelement(
-; CHECK-IO-NEXT:    [[X:%.*]] = insertelement <2 x i32> undef, i32 [[A:%.*]], i32 0
-; CHECK-IO-NEXT:    [[X2:%.*]] = insertelement <2 x i32> [[X]], i32 [[B:%.*]], i32 1
-; CHECK-IO-NEXT:    [[X3:%.*]] = and <2 x i32> [[X2]], <i32 4, i32 4>
-; CHECK-IO-NEXT:    [[Y:%.*]] = insertelement <2 x i32> undef, i32 [[B]], i32 0
-; CHECK-IO-NEXT:    [[Y2:%.*]] = insertelement <2 x i32> [[Y]], i32 [[A]], i32 1
-; CHECK-IO-NEXT:    [[Y3:%.*]] = and <2 x i32> [[Y2]], <i32 8, i32 8>
-; CHECK-IO-NEXT:    [[Z:%.*]] = or <2 x i32> [[X3]], [[Y3]]
-; CHECK-IO-NEXT:    [[U:%.*]] = ashr <2 x i32> [[Z]], <i32 3, i32 3>
-; CHECK-IO-NEXT:    ret <2 x i32> [[U]]
-;
   %x = insertelement <2 x i32> undef, i32 %a, i32 0
   %x2 = insertelement <2 x i32> %x, i32 %b, i32 1
   %x3 = and <2 x i32> %x2, <i32 4, i32 4>
@@ -101,15 +75,6 @@
 ; CHECK-NEXT:    [[U:%.*]] = ashr <2 x i32> [[Z]], <i32 3, i32 3>
 ; CHECK-NEXT:    ret <2 x i32> [[U]]
 ;
-; CHECK-IO-LABEL: @test_conversion(
-; CHECK-IO-NEXT:    [[A2:%.*]] = add <2 x i32> [[A:%.*]], <i32 1, i32 1>
-; CHECK-IO-NEXT:    [[A3:%.*]] = and <2 x i32> [[A2]], <i32 2, i32 2>
-; CHECK-IO-NEXT:    [[X:%.*]] = uitofp <2 x i32> [[A3]] to <2 x double>
-; CHECK-IO-NEXT:    [[Y:%.*]] = fadd <2 x double> [[X]], <double 1.000000e+00, double 1.000000e+00>
-; CHECK-IO-NEXT:    [[Z:%.*]] = fptoui <2 x double> [[Y]] to <2 x i32>
-; CHECK-IO-NEXT:    [[U:%.*]] = ashr <2 x i32> [[Z]], <i32 3, i32 3>
-; CHECK-IO-NEXT:    ret <2 x i32> [[U]]
-;
   %a2 = add <2 x i32> %a, <i32 1, i32 1>
   %a3 = and <2 x i32> %a2, <i32 2, i32 2>
   %x = uitofp <2 x i32> %a3 to <2 x double>
@@ -123,18 +88,11 @@
 define <2 x i1> @test_assumption_invalidation(<2 x i1> %b, <2 x i8> %x) {
 ; CHECK-LABEL: @test_assumption_invalidation(
 ; CHECK-NEXT:    [[LITTLE_NUMBER:%.*]] = zext <2 x i1> [[B:%.*]] to <2 x i8>
-; CHECK-NEXT:    [[SUB:%.*]] = sub <2 x i8> zeroinitializer, [[LITTLE_NUMBER]]
+; CHECK-NEXT:    [[BIG_NUMBER:%.*]] = shl <2 x i8> zeroinitializer, <i8 1, i8 1>
+; CHECK-NEXT:    [[SUB:%.*]] = sub <2 x i8> [[BIG_NUMBER]], [[LITTLE_NUMBER]]
 ; CHECK-NEXT:    [[TRUNC:%.*]] = trunc <2 x i8> [[SUB]] to <2 x i1>
 ; CHECK-NEXT:    ret <2 x i1> [[TRUNC]]
 ;
-; CHECK-IO-LABEL: @test_assumption_invalidation(
-; CHECK-IO-NEXT:    [[SETBIT:%.*]] = or <2 x i8> [[X:%.*]], <i8 64, i8 64>
-; CHECK-IO-NEXT:    [[LITTLE_NUMBER:%.*]] = zext <2 x i1> [[B:%.*]] to <2 x i8>
-; CHECK-IO-NEXT:    [[BIG_NUMBER:%.*]] = shl <2 x i8> [[SETBIT]], <i8 1, i8 1>
-; CHECK-IO-NEXT:    [[SUB:%.*]] = sub nuw <2 x i8> [[BIG_NUMBER]], [[LITTLE_NUMBER]]
-; CHECK-IO-NEXT:    [[TRUNC:%.*]] = trunc <2 x i8> [[SUB]] to <2 x i1>
-; CHECK-IO-NEXT:    ret <2 x i1> [[TRUNC]]
-;
   %setbit = or <2 x i8> %x, <i8 64, i8 64>
   %little_number = zext <2 x i1> %b to <2 x i8>
   %big_number = shl <2 x i8> %setbit, <i8 1, i8 1>
diff --git a/test/Transforms/CanonicalizeAliases/canonicalize.ll b/test/Transforms/CanonicalizeAliases/canonicalize.ll
new file mode 100644
index 0000000..e762fc5
--- /dev/null
+++ b/test/Transforms/CanonicalizeAliases/canonicalize.ll
@@ -0,0 +1,37 @@
+; RUN: opt -S -canonicalize-aliases < %s | FileCheck %s
+; RUN: opt -prepare-for-thinlto -O0 -module-summary -o - < %s | llvm-dis -o - | FileCheck %s
+; RUN: opt -S -passes=canonicalize-aliases < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK-DAG: @analias = alias void (), void ()* @aliasee
+; CHECK-DAG: @anotheralias = alias void (), void ()* @aliasee
+; CHECK-DAG: define void @aliasee()
+
+@analias = alias void (), void ()* @anotheralias
+@anotheralias = alias void (), bitcast (void ()* @aliasee to void ()*)
+
+; Function Attrs: nounwind uwtable
+define void @aliasee() #0 {
+entry:
+    ret void
+}
+
+%struct.S1 = type { i32, i32, i32 }
+
+; CHECK-DAG: @S = global %struct.S1 { i32 31, i32 32, i32 33 }
+; CHECK-DAG: @Salias = alias i32, getelementptr inbounds (%struct.S1, %struct.S1* @S, i32 0, i32 1)
+; CHECK-DAG: @Salias2 = alias i32, getelementptr inbounds (%struct.S1, %struct.S1* @S, i32 0, i32 1)
+; CHECK-DAG: @Salias3 = alias i32, getelementptr inbounds (%struct.S1, %struct.S1* @S, i32 0, i32 1)
+
+@S = global %struct.S1 { i32 31, i32 32, i32 33 }, align 4
+@Salias = alias i32, getelementptr inbounds (%struct.S1, %struct.S1* @S, i32 0, i32 1)
+@Salias2 = alias i32, i32* @Salias
+@Salias3 = alias i32, i32* @Salias2
+
+; CHECK-DAG: @Salias4 = alias %struct.S1, %struct.S1* @S
+; CHECK-DAG: @Salias5 = alias i32, getelementptr inbounds (%struct.S1, %struct.S1* @S, i32 0, i32 1)
+
+@Salias4 = alias %struct.S1, %struct.S1* @S
+@Salias5 = alias i32, getelementptr inbounds (%struct.S1, %struct.S1* @Salias4, i32 0, i32 1)
diff --git a/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll b/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll
index 6bb38d4..04789ea 100644
--- a/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll
+++ b/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll
@@ -6,10 +6,14 @@
 
 @g = external local_unnamed_addr global i32, align 4
 
+; CHECK-LABEL: define{{.*}}@caller(
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 -1, i8* %tmp.i)
+; CHECK-NEXT: call void @callee_unknown_use1.{{.*}}(i8* %tmp.i
+; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* %tmp.i)
+
 define i32 @callee_unknown_use1(i32 %arg) local_unnamed_addr #0 {
 ; CHECK-LABEL:define{{.*}}@callee_unknown_use1.{{[0-9]}}
 ; CHECK-NOT: alloca
-; CHECK: call void @llvm.lifetime
 bb:
   %tmp = alloca  i8, align 4
   %tmp2 = load i32, i32* @g, align 4, !tbaa !2
diff --git a/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll b/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll
index 9c53496..0bde58f 100644
--- a/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll
+++ b/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll
@@ -9,7 +9,6 @@
 define i32 @callee_unknown_use2(i32 %arg) local_unnamed_addr #0 {
 ; CHECK-LABEL:define{{.*}}@callee_unknown_use2.{{[0-9]}}
 ; CHECK-NOT: alloca
-; CHECK: call void @llvm.lifetime
 bb:
   %tmp = alloca i32, align 4
   %tmp1 = bitcast i32* %tmp to i8*
diff --git a/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll b/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll
index 5cc00b7..005ea37 100644
--- a/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll
+++ b/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll
@@ -88,18 +88,23 @@
 }
 
 declare %struct_type* @foo()
+declare void @foo2()
 
 define void @test4(i32 %n) personality i32 (...)* @__FrameHandler {
 ; CHECK-LABEL: test4
 entry:
-  %struct = invoke %struct_type* @foo() to label %while_cond unwind label %cleanup
+  br label %while_cond
 
 while_cond:
   %phi = phi i32 [ 0, %entry ], [ %i, %while_body ]
+  %struct = invoke %struct_type* @foo() to label %while_cond_x unwind label %cleanup
+
+while_cond_x:
 ; CHECK:     mov     w{{[0-9]+}}, #40000
 ; CHECK-NOT: mov     w{{[0-9]+}}, #40004
   %gep0 = getelementptr %struct_type, %struct_type* %struct, i64 0, i32 1
   %gep1 = getelementptr %struct_type, %struct_type* %struct, i64 0, i32 2
+  store i32 0, i32* %gep0
   %cmp = icmp slt i32 %phi, %n
   br i1 %cmp, label %while_body, label %while_end
 
@@ -114,8 +119,10 @@
   ret void
 
 cleanup:
-  landingpad { i8*, i32 } cleanup
-  unreachable
+  %x10 = landingpad { i8*, i32 }
+          cleanup
+  call void @foo2()
+  resume { i8*, i32 } %x10
 }
 
 declare i32 @__FrameHandler(...)
diff --git a/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll b/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll
index 36440da..f5644e4 100644
--- a/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll
+++ b/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll
@@ -32,6 +32,6 @@
 
 !0 = !{i32 1, !"wchar_size", i32 4}
 !1 = !{i32 1, !"min_enum_size", i32 4}
-!2 = !{!"clang version 3.8.0 (http://llvm.org/git/clang.git b7441a0f42c43a8eea9e3e706be187252db747fa)"}
+!2 = !{!"clang version 3.8.0"}
 !3 = distinct !{!3, !4}
 !4 = !{!"llvm.loop.unroll.full"}
diff --git a/test/Transforms/CodeGenPrepare/bitreverse-hang.ll b/test/Transforms/CodeGenPrepare/bitreverse-hang.ll
index c81dcc1..3404405 100644
--- a/test/Transforms/CodeGenPrepare/bitreverse-hang.ll
+++ b/test/Transforms/CodeGenPrepare/bitreverse-hang.ll
@@ -46,7 +46,7 @@
 !llvm.ident = !{!1}
 
 !0 = !{i32 1, !"PIC Level", i32 2}
-!1 = !{!"clang version 3.8.0 (http://llvm.org/git/clang.git eb70f4e9cc9a4dc3dd57b032fb858d56b4b64a0e)"}
+!1 = !{!"clang version 3.8.0"}
 !2 = !{!3, !3, i64 0}
 !3 = !{!"int", !4, i64 0}
 !4 = !{!"omnipotent char", !5, i64 0}
diff --git a/test/Transforms/CodeGenPrepare/dom-tree.ll b/test/Transforms/CodeGenPrepare/dom-tree.ll
index b012706..28c3b94 100644
--- a/test/Transforms/CodeGenPrepare/dom-tree.ll
+++ b/test/Transforms/CodeGenPrepare/dom-tree.ll
@@ -36,6 +36,6 @@
 
 !0 = !{i32 1, !"wchar_size", i32 4}
 !1 = !{i32 1, !"min_enum_size", i32 4}
-!2 = !{!"clang version 3.8.0 (http://llvm.org/git/clang.git b7441a0f42c43a8eea9e3e706be187252db747fa)"}
+!2 = !{!"clang version 3.8.0"}
 !3 = distinct !{!3, !4}
 !4 = !{!"llvm.loop.unroll.full"}
diff --git a/test/Transforms/ConstProp/2007-11-23-cttz.ll b/test/Transforms/ConstProp/2007-11-23-cttz.ll
deleted file mode 100644
index c5ee70c..0000000
--- a/test/Transforms/ConstProp/2007-11-23-cttz.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: opt < %s -constprop -S | grep "ret i13 13"
-; PR1816
-declare i13 @llvm.cttz.i13(i13, i1)
-
-define i13 @test() {
-	%X = call i13 @llvm.cttz.i13(i13 0, i1 false)
-	ret i13 %X
-}
diff --git a/test/Transforms/ConstProp/overflow-ops.ll b/test/Transforms/ConstProp/overflow-ops.ll
index 1ae3e56..303b3b9 100644
--- a/test/Transforms/ConstProp/overflow-ops.ll
+++ b/test/Transforms/ConstProp/overflow-ops.ll
@@ -1,6 +1,6 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -constprop -S | FileCheck %s
 
-
 declare {i8, i1} @llvm.uadd.with.overflow.i8(i8, i8)
 declare {i8, i1} @llvm.usub.with.overflow.i8(i8, i8)
 declare {i8, i1} @llvm.umul.with.overflow.i8(i8, i8)
@@ -14,21 +14,27 @@
 ;;-----------------------------
 
 define {i8, i1} @uadd_1() nounwind {
-entry:
+; CHECK-LABEL: @uadd_1(
+; CHECK-NEXT:    ret { i8, i1 } { i8 -114, i1 false }
+;
   %t = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 42, i8 100)
   ret {i8, i1} %t
-
-; CHECK-LABEL: @uadd_1(
-; CHECK: ret { i8, i1 } { i8 -114, i1 false }
 }
 
 define {i8, i1} @uadd_2() nounwind {
-entry:
+; CHECK-LABEL: @uadd_2(
+; CHECK-NEXT:    ret { i8, i1 } { i8 6, i1 true }
+;
   %t = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 142, i8 120)
   ret {i8, i1} %t
+}
 
-; CHECK-LABEL: @uadd_2(
-; CHECK: ret { i8, i1 } { i8 6, i1 true }
+define {i8, i1} @uadd_undef() nounwind {
+; CHECK-LABEL: @uadd_undef(
+; CHECK-NEXT:    ret { i8, i1 } undef
+;
+  %t = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 142, i8 undef)
+  ret {i8, i1} %t
 }
 
 ;;-----------------------------
@@ -36,21 +42,27 @@
 ;;-----------------------------
 
 define {i8, i1} @usub_1() nounwind {
-entry:
+; CHECK-LABEL: @usub_1(
+; CHECK-NEXT:    ret { i8, i1 } { i8 2, i1 false }
+;
   %t = call {i8, i1} @llvm.usub.with.overflow.i8(i8 4, i8 2)
   ret {i8, i1} %t
-
-; CHECK-LABEL: @usub_1(
-; CHECK: ret { i8, i1 } { i8 2, i1 false }
 }
 
 define {i8, i1} @usub_2() nounwind {
-entry:
+; CHECK-LABEL: @usub_2(
+; CHECK-NEXT:    ret { i8, i1 } { i8 -2, i1 true }
+;
   %t = call {i8, i1} @llvm.usub.with.overflow.i8(i8 4, i8 6)
   ret {i8, i1} %t
+}
 
-; CHECK-LABEL: @usub_2(
-; CHECK: ret { i8, i1 } { i8 -2, i1 true }
+define {i8, i1} @usub_undef() nounwind {
+; CHECK-LABEL: @usub_undef(
+; CHECK-NEXT:    ret { i8, i1 } undef
+;
+  %t = call {i8, i1} @llvm.usub.with.overflow.i8(i8 4, i8 undef)
+  ret {i8, i1} %t
 }
 
 ;;-----------------------------
@@ -58,21 +70,35 @@
 ;;-----------------------------
 
 define {i8, i1} @umul_1() nounwind {
-entry:
+; CHECK-LABEL: @umul_1(
+; CHECK-NEXT:    ret { i8, i1 } { i8 44, i1 true }
+;
   %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 100, i8 3)
   ret {i8, i1} %t
-
-; CHECK-LABEL: @umul_1(
-; CHECK: ret { i8, i1 } { i8 44, i1 true }
 }
 
 define {i8, i1} @umul_2() nounwind {
-entry:
+; CHECK-LABEL: @umul_2(
+; CHECK-NEXT:    ret { i8, i1 } { i8 -56, i1 false }
+;
   %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 100, i8 2)
   ret {i8, i1} %t
+}
 
-; CHECK-LABEL: @umul_2(
-; CHECK: ret { i8, i1 } { i8 -56, i1 false }
+define {i8, i1} @umul_undef() nounwind {
+; CHECK-LABEL: @umul_undef(
+; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
+;
+  %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 undef, i8 2)
+  ret {i8, i1} %t
+}
+
+define {i8, i1} @umul_both_undef() nounwind {
+; CHECK-LABEL: @umul_both_undef(
+; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
+;
+  %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 undef, i8 undef)
+  ret {i8, i1} %t
 }
 
 ;;-----------------------------
@@ -80,128 +106,145 @@
 ;;-----------------------------
 
 define {i8, i1} @sadd_1() nounwind {
-entry:
+; CHECK-LABEL: @sadd_1(
+; CHECK-NEXT:    ret { i8, i1 } { i8 44, i1 false }
+;
   %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 42, i8 2)
   ret {i8, i1} %t
-
-; CHECK-LABEL: @sadd_1(
-; CHECK: ret { i8, i1 } { i8 44, i1 false }
 }
 
 define {i8, i1} @sadd_2() nounwind {
-entry:
+; CHECK-LABEL: @sadd_2(
+; CHECK-NEXT:    ret { i8, i1 } { i8 -126, i1 true }
+;
   %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 120, i8 10)
   ret {i8, i1} %t
-
-; CHECK-LABEL: @sadd_2(
-; CHECK: ret { i8, i1 } { i8 -126, i1 true }
 }
 
 define {i8, i1} @sadd_3() nounwind {
-entry:
+; CHECK-LABEL: @sadd_3(
+; CHECK-NEXT:    ret { i8, i1 } { i8 -110, i1 false }
+;
   %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 -120, i8 10)
   ret {i8, i1} %t
-
-; CHECK-LABEL: @sadd_3(
-; CHECK: ret { i8, i1 } { i8 -110, i1 false }
 }
 
 define {i8, i1} @sadd_4() nounwind {
-entry:
+; CHECK-LABEL: @sadd_4(
+; CHECK-NEXT:    ret { i8, i1 } { i8 126, i1 true }
+;
   %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 -120, i8 -10)
   ret {i8, i1} %t
-
-; CHECK-LABEL: @sadd_4(
-; CHECK: ret { i8, i1 } { i8 126, i1 true }
 }
 
 define {i8, i1} @sadd_5() nounwind {
-entry:
+; CHECK-LABEL: @sadd_5(
+; CHECK-NEXT:    ret { i8, i1 } { i8 -8, i1 false }
+;
   %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 2, i8 -10)
   ret {i8, i1} %t
-
-; CHECK-LABEL: @sadd_5(
-; CHECK: ret { i8, i1 } { i8 -8, i1 false }
 }
 
+define {i8, i1} @sadd_undef() nounwind {
+; CHECK-LABEL: @sadd_undef(
+; CHECK-NEXT:    ret { i8, i1 } undef
+;
+  %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 undef, i8 -10)
+  ret {i8, i1} %t
+}
 
 ;;-----------------------------
 ;; ssub
 ;;-----------------------------
 
 define {i8, i1} @ssub_1() nounwind {
-entry:
+; CHECK-LABEL: @ssub_1(
+; CHECK-NEXT:    ret { i8, i1 } { i8 2, i1 false }
+;
   %t = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 4, i8 2)
   ret {i8, i1} %t
-
-; CHECK-LABEL: @ssub_1(
-; CHECK: ret { i8, i1 } { i8 2, i1 false }
 }
 
 define {i8, i1} @ssub_2() nounwind {
-entry:
+; CHECK-LABEL: @ssub_2(
+; CHECK-NEXT:    ret { i8, i1 } { i8 -2, i1 false }
+;
   %t = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 4, i8 6)
   ret {i8, i1} %t
-
-; CHECK-LABEL: @ssub_2(
-; CHECK: ret { i8, i1 } { i8 -2, i1 false }
 }
 
 define {i8, i1} @ssub_3() nounwind {
-entry:
+; CHECK-LABEL: @ssub_3(
+; CHECK-NEXT:    ret { i8, i1 } { i8 126, i1 true }
+;
   %t = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 -10, i8 120)
   ret {i8, i1} %t
-
-; CHECK-LABEL: @ssub_3(
-; CHECK: ret { i8, i1 } { i8 126, i1 true }
 }
 
 define {i8, i1} @ssub_3b() nounwind {
-entry:
+; CHECK-LABEL: @ssub_3b(
+; CHECK-NEXT:    ret { i8, i1 } { i8 -20, i1 false }
+;
   %t = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 -10, i8 10)
   ret {i8, i1} %t
-
-; CHECK-LABEL: @ssub_3b(
-; CHECK: ret { i8, i1 } { i8 -20, i1 false }
 }
 
 define {i8, i1} @ssub_4() nounwind {
-entry:
+; CHECK-LABEL: @ssub_4(
+; CHECK-NEXT:    ret { i8, i1 } { i8 -126, i1 true }
+;
   %t = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 120, i8 -10)
   ret {i8, i1} %t
-
-; CHECK-LABEL: @ssub_4(
-; CHECK: ret { i8, i1 } { i8 -126, i1 true }
 }
 
 define {i8, i1} @ssub_4b() nounwind {
-entry:
+; CHECK-LABEL: @ssub_4b(
+; CHECK-NEXT:    ret { i8, i1 } { i8 30, i1 false }
+;
   %t = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 20, i8 -10)
   ret {i8, i1} %t
-
-; CHECK-LABEL: @ssub_4b(
-; CHECK: ret { i8, i1 } { i8 30, i1 false }
 }
 
 define {i8, i1} @ssub_5() nounwind {
-entry:
+; CHECK-LABEL: @ssub_5(
+; CHECK-NEXT:    ret { i8, i1 } { i8 -10, i1 false }
+;
   %t = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 -20, i8 -10)
   ret {i8, i1} %t
+}
 
-; CHECK-LABEL: @ssub_5(
-; CHECK: ret { i8, i1 } { i8 -10, i1 false }
+define {i8, i1} @ssub_undef() nounwind {
+; CHECK-LABEL: @ssub_undef(
+; CHECK-NEXT:    ret { i8, i1 } undef
+;
+  %t = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 undef, i8 -10)
+  ret {i8, i1} %t
 }
 
 ;;-----------------------------
 ;; smul
 ;;-----------------------------
 
-; rdar://8501501
 define {i8, i1} @smul_1() nounwind {
-entry:
+; CHECK-LABEL: @smul_1(
+; CHECK-NEXT:    ret { i8, i1 } { i8 -56, i1 true }
+;
   %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 -20, i8 -10)
   ret {i8, i1} %t
+}
 
-; CHECK-LABEL: @smul_1(
-; CHECK: ret { i8, i1 } { i8 -56, i1 true }
+define {i8, i1} @smul_undef() nounwind {
+; CHECK-LABEL: @smul_undef(
+; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
+;
+  %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 -20, i8 undef)
+  ret {i8, i1} %t
+}
+
+define {i8, i1} @smul_both_undef() nounwind {
+; CHECK-LABEL: @smul_both_undef(
+; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
+;
+  %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 undef, i8 undef)
+  ret {i8, i1} %t
 }
diff --git a/test/Transforms/Coroutines/coro-debug.ll b/test/Transforms/Coroutines/coro-debug.ll
index fefffd5..e9e61ed 100644
--- a/test/Transforms/Coroutines/coro-debug.ll
+++ b/test/Transforms/Coroutines/coro-debug.ll
@@ -100,12 +100,12 @@
 !llvm.module.flags = !{!3, !4}
 !llvm.ident = !{!5}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 97b002238b11ff30d94d0516d6a0515db5725fd8) (http://llvm.org/git/llvm.git 0cb060ba567f1aa5b4b04e86665f88e4632b528a)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "<stdin>", directory: "C:\5CGitHub\5Cllvm\5Cbuild\5CDebug\5Cbin")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
-!5 = !{!"clang version 5.0.0 (http://llvm.org/git/clang.git 97b002238b11ff30d94d0516d6a0515db5725fd8) (http://llvm.org/git/llvm.git 0cb060ba567f1aa5b4b04e86665f88e4632b528a)"}
+!5 = !{!"clang version 5.0.0"}
 !6 = distinct !DISubprogram(name: "f", linkageName: "flink", scope: !7, file: !7, line: 55, type: !8, isLocal: false, isDefinition: true, scopeLine: 55, flags: DIFlagPrototyped, isOptimized: false, unit: !0, retainedNodes: !2)
 !7 = !DIFile(filename: "simple-repro.c", directory: "C:\5CGitHub\5Cllvm\5Cbuild\5CDebug\5Cbin")
 !8 = !DISubroutineType(types: !9)
diff --git a/test/Transforms/Coroutines/coro-split-dbg.ll b/test/Transforms/Coroutines/coro-split-dbg.ll
index 328ea2c..e79d871 100644
--- a/test/Transforms/Coroutines/coro-split-dbg.ll
+++ b/test/Transforms/Coroutines/coro-split-dbg.ll
@@ -77,12 +77,12 @@
 !llvm.module.flags = !{!3, !4}
 !llvm.ident = !{!5}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0 (http://llvm.org/git/clang.git 6c405f93921ac99ff5b8521bb1b3df4449deede4) (http://llvm.org/git/llvm.git 6e6d3247102a0f87ce14e906dcf6a3a5ed3faa65)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "coro.c", directory: "/home/gor/build/bin")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
-!5 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git 6c405f93921ac99ff5b8521bb1b3df4449deede4) (http://llvm.org/git/llvm.git 6e6d3247102a0f87ce14e906dcf6a3a5ed3faa65)"}
+!5 = !{!"clang version 4.0.0"}
 !6 = distinct !DISubprogram(name: "print", scope: !1, file: !1, line: 6, type: !7, isLocal: false, isDefinition: true, scopeLine: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !10)
 !7 = !DISubroutineType(types: !8)
 !8 = !{null, !9}
diff --git a/test/Transforms/DeadArgElim/nonzero-address-spaces.ll b/test/Transforms/DeadArgElim/nonzero-address-spaces.ll
new file mode 100644
index 0000000..1b2aa06
--- /dev/null
+++ b/test/Transforms/DeadArgElim/nonzero-address-spaces.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -deadargelim %s | FileCheck %s
+
+; DeadArgumentElimination should respect the function address space
+; in the data layout.
+
+target datalayout = "e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8"
+
+; CHECK: define internal i32 @foo() addrspace(1)
+define internal i32 @foo(i32 %x) #0 {
+  tail call void asm sideeffect inteldialect "mov eax, [esp + $$4]\0A\09ret", "~{eax},~{dirflag},~{fpsr},~{flags}"()
+  unreachable
+}
+
+define i32 @f(i32 %x, i32 %y) {
+  ; CHECK: %r = call addrspace(1) i32 @foo()
+  %r = call i32 @foo(i32 %x)
+
+  ret i32 %r
+}
+
diff --git a/test/Transforms/EarlyCSE/debug-info-undef.ll b/test/Transforms/EarlyCSE/debug-info-undef.ll
new file mode 100644
index 0000000..4615aa2
--- /dev/null
+++ b/test/Transforms/EarlyCSE/debug-info-undef.ll
@@ -0,0 +1,69 @@
+; RUN: opt -S %s -early-cse | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+@a = global i8 25, align 1, !dbg !0
+
+define signext i16 @b() !dbg !12 {
+entry:
+  call void @llvm.dbg.value(metadata i16 23680, metadata !17, metadata !DIExpression()), !dbg !18
+  %0 = load i8, i8* @a, align 1, !dbg !19, !tbaa !20
+  %conv = sext i8 %0 to i16, !dbg !19
+
+; CHECK: call void @llvm.dbg.value(metadata i16 undef, metadata !17, metadata !DIExpression()), !dbg !18
+; CHECK-NEXT:  call i32 (...) @optimize_me_not()
+
+  call void @llvm.dbg.value(metadata i16 %conv, metadata !17, metadata !DIExpression()), !dbg !18
+  %call = call i32 (...) @optimize_me_not(), !dbg !23
+  %1 = load i8, i8* @a, align 1, !dbg !24, !tbaa !20
+  %conv1 = sext i8 %1 to i16, !dbg !24
+  ret i16 %conv1, !dbg !25
+}
+
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+declare i32 @optimize_me_not(...)
+
+define i32 @main() !dbg !26 {
+entry:
+  %call = call signext i16 @b(), !dbg !30
+  ret i32 0, !dbg !31
+}
+
+declare void @llvm.dbg.value(metadata, metadata, metadata)
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!7, !8, !9, !10}
+!llvm.ident = !{!11}
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 8.0.0 ", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5, nameTableKind: GNU)
+!3 = !DIFile(filename: "patatino.c", directory: "/Users/davide/llvm-monorepo/llvm-mono/build/bin")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+!9 = !{i32 1, !"wchar_size", i32 4}
+!10 = !{i32 7, !"PIC Level", i32 2}
+!11 = !{!"clang version 8.0.0 "}
+!12 = distinct !DISubprogram(name: "b", scope: !3, file: !3, line: 2, type: !13, scopeLine: 2, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !2, retainedNodes: !16)
+!13 = !DISubroutineType(types: !14)
+!14 = !{!15}
+!15 = !DIBasicType(name: "short", size: 16, encoding: DW_ATE_signed)
+!16 = !{!17}
+!17 = !DILocalVariable(name: "i", scope: !12, file: !3, line: 3, type: !15)
+!18 = !DILocation(line: 3, column: 9, scope: !12)
+!19 = !DILocation(line: 4, column: 7, scope: !12)
+!20 = !{!21, !21, i64 0}
+!21 = !{!"omnipotent char", !22, i64 0}
+!22 = !{!"Simple C/C++ TBAA"}
+!23 = !DILocation(line: 5, column: 3, scope: !12)
+!24 = !DILocation(line: 6, column: 10, scope: !12)
+!25 = !DILocation(line: 6, column: 3, scope: !12)
+!26 = distinct !DISubprogram(name: "main", scope: !3, file: !3, line: 8, type: !27, scopeLine: 8, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !2, retainedNodes: !4)
+!27 = !DISubroutineType(types: !28)
+!28 = !{!29}
+!29 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!30 = !DILocation(line: 8, column: 14, scope: !26)
+!31 = !DILocation(line: 8, column: 19, scope: !26)
diff --git a/test/Transforms/ExpandMemCmp/X86/memcmp.ll b/test/Transforms/ExpandMemCmp/X86/memcmp.ll
index 37bd850..c1cbcc3 100644
--- a/test/Transforms/ExpandMemCmp/X86/memcmp.ll
+++ b/test/Transforms/ExpandMemCmp/X86/memcmp.ll
@@ -130,11 +130,11 @@
 ; ALL-NEXT:    [[TMP9:%.*]] = icmp eq i32 [[TMP7]], [[TMP8]]
 ; ALL-NEXT:    br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]]
 ; ALL:       loadbb1:
-; ALL-NEXT:    [[TMP10:%.*]] = bitcast i8* [[X]] to i16*
-; ALL-NEXT:    [[TMP11:%.*]] = bitcast i8* [[Y]] to i16*
-; ALL-NEXT:    [[TMP12:%.*]] = getelementptr i16, i16* [[TMP10]], i16 2
-; ALL-NEXT:    [[TMP13:%.*]] = getelementptr i16, i16* [[TMP11]], i16 2
-; ALL-NEXT:    [[TMP14:%.*]] = load i16, i16* [[TMP12]]
+; ALL-NEXT:    [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 4
+; ALL-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i16*
+; ALL-NEXT:    [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i8 4
+; ALL-NEXT:    [[TMP13:%.*]] = bitcast i8* [[TMP12]] to i16*
+; ALL-NEXT:    [[TMP14:%.*]] = load i16, i16* [[TMP11]]
 ; ALL-NEXT:    [[TMP15:%.*]] = load i16, i16* [[TMP13]]
 ; ALL-NEXT:    [[TMP16:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP14]])
 ; ALL-NEXT:    [[TMP17:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP15]])
@@ -178,11 +178,11 @@
 ; X32-NEXT:    [[TMP9:%.*]] = icmp eq i32 [[TMP7]], [[TMP8]]
 ; X32-NEXT:    br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]]
 ; X32:       loadbb1:
-; X32-NEXT:    [[TMP10:%.*]] = bitcast i8* [[X]] to i32*
-; X32-NEXT:    [[TMP11:%.*]] = bitcast i8* [[Y]] to i32*
-; X32-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 1
-; X32-NEXT:    [[TMP13:%.*]] = getelementptr i32, i32* [[TMP11]], i32 1
-; X32-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP12]]
+; X32-NEXT:    [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 4
+; X32-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
+; X32-NEXT:    [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i8 4
+; X32-NEXT:    [[TMP13:%.*]] = bitcast i8* [[TMP12]] to i32*
+; X32-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP11]]
 ; X32-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP13]]
 ; X32-NEXT:    [[TMP16]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
 ; X32-NEXT:    [[TMP17]] = call i32 @llvm.bswap.i32(i32 [[TMP15]])
@@ -272,11 +272,11 @@
 ; X64-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]]
 ; X64-NEXT:    br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]]
 ; X64:       loadbb1:
-; X64-NEXT:    [[TMP10:%.*]] = bitcast i8* [[X]] to i16*
-; X64-NEXT:    [[TMP11:%.*]] = bitcast i8* [[Y]] to i16*
-; X64-NEXT:    [[TMP12:%.*]] = getelementptr i16, i16* [[TMP10]], i16 4
-; X64-NEXT:    [[TMP13:%.*]] = getelementptr i16, i16* [[TMP11]], i16 4
-; X64-NEXT:    [[TMP14:%.*]] = load i16, i16* [[TMP12]]
+; X64-NEXT:    [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 8
+; X64-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i16*
+; X64-NEXT:    [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i8 8
+; X64-NEXT:    [[TMP13:%.*]] = bitcast i8* [[TMP12]] to i16*
+; X64-NEXT:    [[TMP14:%.*]] = load i16, i16* [[TMP11]]
 ; X64-NEXT:    [[TMP15:%.*]] = load i16, i16* [[TMP13]]
 ; X64-NEXT:    [[TMP16:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP14]])
 ; X64-NEXT:    [[TMP17:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP15]])
@@ -324,11 +324,11 @@
 ; X64-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]]
 ; X64-NEXT:    br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]]
 ; X64:       loadbb1:
-; X64-NEXT:    [[TMP10:%.*]] = bitcast i8* [[X]] to i32*
-; X64-NEXT:    [[TMP11:%.*]] = bitcast i8* [[Y]] to i32*
-; X64-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 2
-; X64-NEXT:    [[TMP13:%.*]] = getelementptr i32, i32* [[TMP11]], i32 2
-; X64-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP12]]
+; X64-NEXT:    [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 8
+; X64-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
+; X64-NEXT:    [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i8 8
+; X64-NEXT:    [[TMP13:%.*]] = bitcast i8* [[TMP12]] to i32*
+; X64-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP11]]
 ; X64-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP13]]
 ; X64-NEXT:    [[TMP16:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
 ; X64-NEXT:    [[TMP17:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP15]])
@@ -394,11 +394,11 @@
 ; X64-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]]
 ; X64-NEXT:    br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]]
 ; X64:       loadbb1:
-; X64-NEXT:    [[TMP10:%.*]] = bitcast i8* [[X]] to i64*
-; X64-NEXT:    [[TMP11:%.*]] = bitcast i8* [[Y]] to i64*
-; X64-NEXT:    [[TMP12:%.*]] = getelementptr i64, i64* [[TMP10]], i64 1
-; X64-NEXT:    [[TMP13:%.*]] = getelementptr i64, i64* [[TMP11]], i64 1
-; X64-NEXT:    [[TMP14:%.*]] = load i64, i64* [[TMP12]]
+; X64-NEXT:    [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 8
+; X64-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i64*
+; X64-NEXT:    [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i8 8
+; X64-NEXT:    [[TMP13:%.*]] = bitcast i8* [[TMP12]] to i64*
+; X64-NEXT:    [[TMP14:%.*]] = load i64, i64* [[TMP11]]
 ; X64-NEXT:    [[TMP15:%.*]] = load i64, i64* [[TMP13]]
 ; X64-NEXT:    [[TMP16]] = call i64 @llvm.bswap.i64(i64 [[TMP14]])
 ; X64-NEXT:    [[TMP17]] = call i64 @llvm.bswap.i64(i64 [[TMP15]])
@@ -597,11 +597,11 @@
 ; X32-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP1]]
 ; X32-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP2]]
 ; X32-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
-; X32-NEXT:    [[TMP6:%.*]] = bitcast i8* [[X]] to i16*
-; X32-NEXT:    [[TMP7:%.*]] = bitcast i8* [[Y]] to i16*
-; X32-NEXT:    [[TMP8:%.*]] = getelementptr i16, i16* [[TMP6]], i16 2
-; X32-NEXT:    [[TMP9:%.*]] = getelementptr i16, i16* [[TMP7]], i16 2
-; X32-NEXT:    [[TMP10:%.*]] = load i16, i16* [[TMP8]]
+; X32-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4
+; X32-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i16*
+; X32-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4
+; X32-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i16*
+; X32-NEXT:    [[TMP10:%.*]] = load i16, i16* [[TMP7]]
 ; X32-NEXT:    [[TMP11:%.*]] = load i16, i16* [[TMP9]]
 ; X32-NEXT:    [[TMP12:%.*]] = zext i16 [[TMP10]] to i32
 ; X32-NEXT:    [[TMP13:%.*]] = zext i16 [[TMP11]] to i32
@@ -625,11 +625,11 @@
 ; X64_1LD-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]]
 ; X64_1LD-NEXT:    br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
 ; X64_1LD:       loadbb1:
-; X64_1LD-NEXT:    [[TMP6:%.*]] = bitcast i8* [[X]] to i16*
-; X64_1LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[Y]] to i16*
-; X64_1LD-NEXT:    [[TMP8:%.*]] = getelementptr i16, i16* [[TMP6]], i16 2
-; X64_1LD-NEXT:    [[TMP9:%.*]] = getelementptr i16, i16* [[TMP7]], i16 2
-; X64_1LD-NEXT:    [[TMP10:%.*]] = load i16, i16* [[TMP8]]
+; X64_1LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4
+; X64_1LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i16*
+; X64_1LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4
+; X64_1LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i16*
+; X64_1LD-NEXT:    [[TMP10:%.*]] = load i16, i16* [[TMP7]]
 ; X64_1LD-NEXT:    [[TMP11:%.*]] = load i16, i16* [[TMP9]]
 ; X64_1LD-NEXT:    [[TMP12:%.*]] = icmp ne i16 [[TMP10]], [[TMP11]]
 ; X64_1LD-NEXT:    br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]]
@@ -645,11 +645,11 @@
 ; X64_2LD-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP1]]
 ; X64_2LD-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP2]]
 ; X64_2LD-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
-; X64_2LD-NEXT:    [[TMP6:%.*]] = bitcast i8* [[X]] to i16*
-; X64_2LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[Y]] to i16*
-; X64_2LD-NEXT:    [[TMP8:%.*]] = getelementptr i16, i16* [[TMP6]], i16 2
-; X64_2LD-NEXT:    [[TMP9:%.*]] = getelementptr i16, i16* [[TMP7]], i16 2
-; X64_2LD-NEXT:    [[TMP10:%.*]] = load i16, i16* [[TMP8]]
+; X64_2LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4
+; X64_2LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i16*
+; X64_2LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4
+; X64_2LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i16*
+; X64_2LD-NEXT:    [[TMP10:%.*]] = load i16, i16* [[TMP7]]
 ; X64_2LD-NEXT:    [[TMP11:%.*]] = load i16, i16* [[TMP9]]
 ; X64_2LD-NEXT:    [[TMP12:%.*]] = zext i16 [[TMP10]] to i32
 ; X64_2LD-NEXT:    [[TMP13:%.*]] = zext i16 [[TMP11]] to i32
@@ -668,11 +668,71 @@
 }
 
 define i32 @cmp_eq7(i8* nocapture readonly %x, i8* nocapture readonly %y)  {
-; ALL-LABEL: @cmp_eq7(
-; ALL-NEXT:    [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 7)
-; ALL-NEXT:    [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; ALL-NEXT:    ret i32 [[CONV]]
+; X32-LABEL: @cmp_eq7(
+; X32-NEXT:    [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT:    [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP2]]
+; X32-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
+; X32-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 3
+; X32-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32*
+; X32-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 3
+; X32-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32*
+; X32-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP7]]
+; X32-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP9]]
+; X32-NEXT:    [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]]
+; X32-NEXT:    [[TMP13:%.*]] = or i32 [[TMP5]], [[TMP12]]
+; X32-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+; X32-NEXT:    [[TMP15:%.*]] = zext i1 [[TMP14]] to i32
+; X32-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0
+; X32-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X32-NEXT:    ret i32 [[CONV]]
+;
+; X64_1LD-LABEL: @cmp_eq7(
+; X64_1LD-NEXT:    br label [[LOADBB:%.*]]
+; X64_1LD:       res_block:
+; X64_1LD-NEXT:    br label [[ENDBLOCK:%.*]]
+; X64_1LD:       loadbb:
+; X64_1LD-NEXT:    [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X64_1LD-NEXT:    [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X64_1LD-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X64_1LD-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP2]]
+; X64_1LD-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]]
+; X64_1LD-NEXT:    br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X64_1LD:       loadbb1:
+; X64_1LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 3
+; X64_1LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32*
+; X64_1LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 3
+; X64_1LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32*
+; X64_1LD-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP7]]
+; X64_1LD-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP9]]
+; X64_1LD-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP10]], [[TMP11]]
+; X64_1LD-NEXT:    br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X64_1LD:       endblock:
+; X64_1LD-NEXT:    [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
+; X64_1LD-NEXT:    [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X64_1LD-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64_1LD-NEXT:    ret i32 [[CONV]]
+;
+; X64_2LD-LABEL: @cmp_eq7(
+; X64_2LD-NEXT:    [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X64_2LD-NEXT:    [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X64_2LD-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X64_2LD-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP2]]
+; X64_2LD-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
+; X64_2LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 3
+; X64_2LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32*
+; X64_2LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 3
+; X64_2LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32*
+; X64_2LD-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP7]]
+; X64_2LD-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP9]]
+; X64_2LD-NEXT:    [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]]
+; X64_2LD-NEXT:    [[TMP13:%.*]] = or i32 [[TMP5]], [[TMP12]]
+; X64_2LD-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+; X64_2LD-NEXT:    [[TMP15:%.*]] = zext i1 [[TMP14]] to i32
+; X64_2LD-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0
+; X64_2LD-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64_2LD-NEXT:    ret i32 [[CONV]]
 ;
   %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 7)
   %cmp = icmp eq i32 %call, 0
@@ -687,11 +747,11 @@
 ; X32-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP1]]
 ; X32-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP2]]
 ; X32-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
-; X32-NEXT:    [[TMP6:%.*]] = bitcast i8* [[X]] to i32*
-; X32-NEXT:    [[TMP7:%.*]] = bitcast i8* [[Y]] to i32*
-; X32-NEXT:    [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 1
-; X32-NEXT:    [[TMP9:%.*]] = getelementptr i32, i32* [[TMP7]], i32 1
-; X32-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X32-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4
+; X32-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32*
+; X32-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4
+; X32-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32*
+; X32-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP7]]
 ; X32-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP9]]
 ; X32-NEXT:    [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]]
 ; X32-NEXT:    [[TMP13:%.*]] = or i32 [[TMP5]], [[TMP12]]
@@ -794,11 +854,11 @@
 ; X64_1LD-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
 ; X64_1LD-NEXT:    br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
 ; X64_1LD:       loadbb1:
-; X64_1LD-NEXT:    [[TMP6:%.*]] = bitcast i8* [[X]] to i16*
-; X64_1LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[Y]] to i16*
-; X64_1LD-NEXT:    [[TMP8:%.*]] = getelementptr i16, i16* [[TMP6]], i16 4
-; X64_1LD-NEXT:    [[TMP9:%.*]] = getelementptr i16, i16* [[TMP7]], i16 4
-; X64_1LD-NEXT:    [[TMP10:%.*]] = load i16, i16* [[TMP8]]
+; X64_1LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 8
+; X64_1LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i16*
+; X64_1LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 8
+; X64_1LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i16*
+; X64_1LD-NEXT:    [[TMP10:%.*]] = load i16, i16* [[TMP7]]
 ; X64_1LD-NEXT:    [[TMP11:%.*]] = load i16, i16* [[TMP9]]
 ; X64_1LD-NEXT:    [[TMP12:%.*]] = icmp ne i16 [[TMP10]], [[TMP11]]
 ; X64_1LD-NEXT:    br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]]
@@ -814,11 +874,11 @@
 ; X64_2LD-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP1]]
 ; X64_2LD-NEXT:    [[TMP4:%.*]] = load i64, i64* [[TMP2]]
 ; X64_2LD-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]]
-; X64_2LD-NEXT:    [[TMP6:%.*]] = bitcast i8* [[X]] to i16*
-; X64_2LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[Y]] to i16*
-; X64_2LD-NEXT:    [[TMP8:%.*]] = getelementptr i16, i16* [[TMP6]], i16 4
-; X64_2LD-NEXT:    [[TMP9:%.*]] = getelementptr i16, i16* [[TMP7]], i16 4
-; X64_2LD-NEXT:    [[TMP10:%.*]] = load i16, i16* [[TMP8]]
+; X64_2LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 8
+; X64_2LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i16*
+; X64_2LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 8
+; X64_2LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i16*
+; X64_2LD-NEXT:    [[TMP10:%.*]] = load i16, i16* [[TMP7]]
 ; X64_2LD-NEXT:    [[TMP11:%.*]] = load i16, i16* [[TMP9]]
 ; X64_2LD-NEXT:    [[TMP12:%.*]] = zext i16 [[TMP10]] to i64
 ; X64_2LD-NEXT:    [[TMP13:%.*]] = zext i16 [[TMP11]] to i64
@@ -837,11 +897,57 @@
 }
 
 define i32 @cmp_eq11(i8* nocapture readonly %x, i8* nocapture readonly %y)  {
-; ALL-LABEL: @cmp_eq11(
-; ALL-NEXT:    [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 11)
-; ALL-NEXT:    [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; ALL-NEXT:    ret i32 [[CONV]]
+; X32-LABEL: @cmp_eq11(
+; X32-NEXT:    [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 11)
+; X32-NEXT:    [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
+; X32-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X32-NEXT:    ret i32 [[CONV]]
+;
+; X64_1LD-LABEL: @cmp_eq11(
+; X64_1LD-NEXT:    br label [[LOADBB:%.*]]
+; X64_1LD:       res_block:
+; X64_1LD-NEXT:    br label [[ENDBLOCK:%.*]]
+; X64_1LD:       loadbb:
+; X64_1LD-NEXT:    [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64_1LD-NEXT:    [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64_1LD-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64_1LD-NEXT:    [[TMP4:%.*]] = load i64, i64* [[TMP2]]
+; X64_1LD-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
+; X64_1LD-NEXT:    br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X64_1LD:       loadbb1:
+; X64_1LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 3
+; X64_1LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64*
+; X64_1LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 3
+; X64_1LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64*
+; X64_1LD-NEXT:    [[TMP10:%.*]] = load i64, i64* [[TMP7]]
+; X64_1LD-NEXT:    [[TMP11:%.*]] = load i64, i64* [[TMP9]]
+; X64_1LD-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP10]], [[TMP11]]
+; X64_1LD-NEXT:    br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X64_1LD:       endblock:
+; X64_1LD-NEXT:    [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
+; X64_1LD-NEXT:    [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X64_1LD-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64_1LD-NEXT:    ret i32 [[CONV]]
+;
+; X64_2LD-LABEL: @cmp_eq11(
+; X64_2LD-NEXT:    [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64_2LD-NEXT:    [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64_2LD-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64_2LD-NEXT:    [[TMP4:%.*]] = load i64, i64* [[TMP2]]
+; X64_2LD-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]]
+; X64_2LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 3
+; X64_2LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64*
+; X64_2LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 3
+; X64_2LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64*
+; X64_2LD-NEXT:    [[TMP10:%.*]] = load i64, i64* [[TMP7]]
+; X64_2LD-NEXT:    [[TMP11:%.*]] = load i64, i64* [[TMP9]]
+; X64_2LD-NEXT:    [[TMP12:%.*]] = xor i64 [[TMP10]], [[TMP11]]
+; X64_2LD-NEXT:    [[TMP13:%.*]] = or i64 [[TMP5]], [[TMP12]]
+; X64_2LD-NEXT:    [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
+; X64_2LD-NEXT:    [[TMP15:%.*]] = zext i1 [[TMP14]] to i32
+; X64_2LD-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0
+; X64_2LD-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64_2LD-NEXT:    ret i32 [[CONV]]
 ;
   %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 11)
   %cmp = icmp eq i32 %call, 0
@@ -868,11 +974,11 @@
 ; X64_1LD-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
 ; X64_1LD-NEXT:    br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
 ; X64_1LD:       loadbb1:
-; X64_1LD-NEXT:    [[TMP6:%.*]] = bitcast i8* [[X]] to i32*
-; X64_1LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[Y]] to i32*
-; X64_1LD-NEXT:    [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 2
-; X64_1LD-NEXT:    [[TMP9:%.*]] = getelementptr i32, i32* [[TMP7]], i32 2
-; X64_1LD-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X64_1LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 8
+; X64_1LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32*
+; X64_1LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 8
+; X64_1LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32*
+; X64_1LD-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP7]]
 ; X64_1LD-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP9]]
 ; X64_1LD-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP10]], [[TMP11]]
 ; X64_1LD-NEXT:    br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]]
@@ -888,11 +994,11 @@
 ; X64_2LD-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP1]]
 ; X64_2LD-NEXT:    [[TMP4:%.*]] = load i64, i64* [[TMP2]]
 ; X64_2LD-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]]
-; X64_2LD-NEXT:    [[TMP6:%.*]] = bitcast i8* [[X]] to i32*
-; X64_2LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[Y]] to i32*
-; X64_2LD-NEXT:    [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 2
-; X64_2LD-NEXT:    [[TMP9:%.*]] = getelementptr i32, i32* [[TMP7]], i32 2
-; X64_2LD-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X64_2LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 8
+; X64_2LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32*
+; X64_2LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 8
+; X64_2LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32*
+; X64_2LD-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP7]]
 ; X64_2LD-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP9]]
 ; X64_2LD-NEXT:    [[TMP12:%.*]] = zext i32 [[TMP10]] to i64
 ; X64_2LD-NEXT:    [[TMP13:%.*]] = zext i32 [[TMP11]] to i64
@@ -911,11 +1017,57 @@
 }
 
 define i32 @cmp_eq13(i8* nocapture readonly %x, i8* nocapture readonly %y)  {
-; ALL-LABEL: @cmp_eq13(
-; ALL-NEXT:    [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 13)
-; ALL-NEXT:    [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; ALL-NEXT:    ret i32 [[CONV]]
+; X32-LABEL: @cmp_eq13(
+; X32-NEXT:    [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 13)
+; X32-NEXT:    [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
+; X32-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X32-NEXT:    ret i32 [[CONV]]
+;
+; X64_1LD-LABEL: @cmp_eq13(
+; X64_1LD-NEXT:    br label [[LOADBB:%.*]]
+; X64_1LD:       res_block:
+; X64_1LD-NEXT:    br label [[ENDBLOCK:%.*]]
+; X64_1LD:       loadbb:
+; X64_1LD-NEXT:    [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64_1LD-NEXT:    [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64_1LD-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64_1LD-NEXT:    [[TMP4:%.*]] = load i64, i64* [[TMP2]]
+; X64_1LD-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
+; X64_1LD-NEXT:    br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X64_1LD:       loadbb1:
+; X64_1LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 5
+; X64_1LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64*
+; X64_1LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 5
+; X64_1LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64*
+; X64_1LD-NEXT:    [[TMP10:%.*]] = load i64, i64* [[TMP7]]
+; X64_1LD-NEXT:    [[TMP11:%.*]] = load i64, i64* [[TMP9]]
+; X64_1LD-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP10]], [[TMP11]]
+; X64_1LD-NEXT:    br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X64_1LD:       endblock:
+; X64_1LD-NEXT:    [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
+; X64_1LD-NEXT:    [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X64_1LD-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64_1LD-NEXT:    ret i32 [[CONV]]
+;
+; X64_2LD-LABEL: @cmp_eq13(
+; X64_2LD-NEXT:    [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64_2LD-NEXT:    [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64_2LD-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64_2LD-NEXT:    [[TMP4:%.*]] = load i64, i64* [[TMP2]]
+; X64_2LD-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]]
+; X64_2LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 5
+; X64_2LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64*
+; X64_2LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 5
+; X64_2LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64*
+; X64_2LD-NEXT:    [[TMP10:%.*]] = load i64, i64* [[TMP7]]
+; X64_2LD-NEXT:    [[TMP11:%.*]] = load i64, i64* [[TMP9]]
+; X64_2LD-NEXT:    [[TMP12:%.*]] = xor i64 [[TMP10]], [[TMP11]]
+; X64_2LD-NEXT:    [[TMP13:%.*]] = or i64 [[TMP5]], [[TMP12]]
+; X64_2LD-NEXT:    [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
+; X64_2LD-NEXT:    [[TMP15:%.*]] = zext i1 [[TMP14]] to i32
+; X64_2LD-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0
+; X64_2LD-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64_2LD-NEXT:    ret i32 [[CONV]]
 ;
   %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 13)
   %cmp = icmp eq i32 %call, 0
@@ -924,11 +1076,57 @@
 }
 
 define i32 @cmp_eq14(i8* nocapture readonly %x, i8* nocapture readonly %y)  {
-; ALL-LABEL: @cmp_eq14(
-; ALL-NEXT:    [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 14)
-; ALL-NEXT:    [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; ALL-NEXT:    ret i32 [[CONV]]
+; X32-LABEL: @cmp_eq14(
+; X32-NEXT:    [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 14)
+; X32-NEXT:    [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
+; X32-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X32-NEXT:    ret i32 [[CONV]]
+;
+; X64_1LD-LABEL: @cmp_eq14(
+; X64_1LD-NEXT:    br label [[LOADBB:%.*]]
+; X64_1LD:       res_block:
+; X64_1LD-NEXT:    br label [[ENDBLOCK:%.*]]
+; X64_1LD:       loadbb:
+; X64_1LD-NEXT:    [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64_1LD-NEXT:    [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64_1LD-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64_1LD-NEXT:    [[TMP4:%.*]] = load i64, i64* [[TMP2]]
+; X64_1LD-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
+; X64_1LD-NEXT:    br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X64_1LD:       loadbb1:
+; X64_1LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 6
+; X64_1LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64*
+; X64_1LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 6
+; X64_1LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64*
+; X64_1LD-NEXT:    [[TMP10:%.*]] = load i64, i64* [[TMP7]]
+; X64_1LD-NEXT:    [[TMP11:%.*]] = load i64, i64* [[TMP9]]
+; X64_1LD-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP10]], [[TMP11]]
+; X64_1LD-NEXT:    br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X64_1LD:       endblock:
+; X64_1LD-NEXT:    [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
+; X64_1LD-NEXT:    [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X64_1LD-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64_1LD-NEXT:    ret i32 [[CONV]]
+;
+; X64_2LD-LABEL: @cmp_eq14(
+; X64_2LD-NEXT:    [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64_2LD-NEXT:    [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64_2LD-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64_2LD-NEXT:    [[TMP4:%.*]] = load i64, i64* [[TMP2]]
+; X64_2LD-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]]
+; X64_2LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 6
+; X64_2LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64*
+; X64_2LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 6
+; X64_2LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64*
+; X64_2LD-NEXT:    [[TMP10:%.*]] = load i64, i64* [[TMP7]]
+; X64_2LD-NEXT:    [[TMP11:%.*]] = load i64, i64* [[TMP9]]
+; X64_2LD-NEXT:    [[TMP12:%.*]] = xor i64 [[TMP10]], [[TMP11]]
+; X64_2LD-NEXT:    [[TMP13:%.*]] = or i64 [[TMP5]], [[TMP12]]
+; X64_2LD-NEXT:    [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
+; X64_2LD-NEXT:    [[TMP15:%.*]] = zext i1 [[TMP14]] to i32
+; X64_2LD-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0
+; X64_2LD-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64_2LD-NEXT:    ret i32 [[CONV]]
 ;
   %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 14)
   %cmp = icmp eq i32 %call, 0
@@ -937,11 +1135,57 @@
 }
 
 define i32 @cmp_eq15(i8* nocapture readonly %x, i8* nocapture readonly %y)  {
-; ALL-LABEL: @cmp_eq15(
-; ALL-NEXT:    [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 15)
-; ALL-NEXT:    [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; ALL-NEXT:    ret i32 [[CONV]]
+; X32-LABEL: @cmp_eq15(
+; X32-NEXT:    [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 15)
+; X32-NEXT:    [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
+; X32-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X32-NEXT:    ret i32 [[CONV]]
+;
+; X64_1LD-LABEL: @cmp_eq15(
+; X64_1LD-NEXT:    br label [[LOADBB:%.*]]
+; X64_1LD:       res_block:
+; X64_1LD-NEXT:    br label [[ENDBLOCK:%.*]]
+; X64_1LD:       loadbb:
+; X64_1LD-NEXT:    [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64_1LD-NEXT:    [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64_1LD-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64_1LD-NEXT:    [[TMP4:%.*]] = load i64, i64* [[TMP2]]
+; X64_1LD-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
+; X64_1LD-NEXT:    br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X64_1LD:       loadbb1:
+; X64_1LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 7
+; X64_1LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64*
+; X64_1LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 7
+; X64_1LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64*
+; X64_1LD-NEXT:    [[TMP10:%.*]] = load i64, i64* [[TMP7]]
+; X64_1LD-NEXT:    [[TMP11:%.*]] = load i64, i64* [[TMP9]]
+; X64_1LD-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP10]], [[TMP11]]
+; X64_1LD-NEXT:    br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X64_1LD:       endblock:
+; X64_1LD-NEXT:    [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
+; X64_1LD-NEXT:    [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X64_1LD-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64_1LD-NEXT:    ret i32 [[CONV]]
+;
+; X64_2LD-LABEL: @cmp_eq15(
+; X64_2LD-NEXT:    [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64_2LD-NEXT:    [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64_2LD-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64_2LD-NEXT:    [[TMP4:%.*]] = load i64, i64* [[TMP2]]
+; X64_2LD-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]]
+; X64_2LD-NEXT:    [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 7
+; X64_2LD-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64*
+; X64_2LD-NEXT:    [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 7
+; X64_2LD-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64*
+; X64_2LD-NEXT:    [[TMP10:%.*]] = load i64, i64* [[TMP7]]
+; X64_2LD-NEXT:    [[TMP11:%.*]] = load i64, i64* [[TMP9]]
+; X64_2LD-NEXT:    [[TMP12:%.*]] = xor i64 [[TMP10]], [[TMP11]]
+; X64_2LD-NEXT:    [[TMP13:%.*]] = or i64 [[TMP5]], [[TMP12]]
+; X64_2LD-NEXT:    [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
+; X64_2LD-NEXT:    [[TMP15:%.*]] = zext i1 [[TMP14]] to i32
+; X64_2LD-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0
+; X64_2LD-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64_2LD-NEXT:    ret i32 [[CONV]]
 ;
   %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 15)
   %cmp = icmp eq i32 %call, 0
diff --git a/test/Transforms/HotColdSplit/lifetime-markers-on-inputs.ll b/test/Transforms/HotColdSplit/lifetime-markers-on-inputs.ll
new file mode 100644
index 0000000..c6482f8
--- /dev/null
+++ b/test/Transforms/HotColdSplit/lifetime-markers-on-inputs.ll
@@ -0,0 +1,66 @@
+; RUN: opt -S -hotcoldsplit < %s 2>&1 | FileCheck %s
+
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+
+declare void @use(i8*)
+
+declare void @cold_use2(i8*, i8*) cold
+
+; CHECK-LABEL: define {{.*}}@foo(
+define void @foo() {
+entry:
+  %local1 = alloca i256
+  %local2 = alloca i256
+  %local1_cast = bitcast i256* %local1 to i8*
+  %local2_cast = bitcast i256* %local2 to i8*
+  br i1 undef, label %normalPath, label %outlinedPath
+
+normalPath:
+  ; These two uses of stack slots are non-overlapping. Based on this alone,
+  ; the stack slots could be merged.
+  call void @llvm.lifetime.start.p0i8(i64 1, i8* %local1_cast)
+  call void @use(i8* %local1_cast)
+  call void @llvm.lifetime.end.p0i8(i64 1, i8* %local1_cast)
+  call void @llvm.lifetime.start.p0i8(i64 1, i8* %local2_cast)
+  call void @use(i8* %local2_cast)
+  call void @llvm.lifetime.end.p0i8(i64 1, i8* %local2_cast)
+  ret void
+
+; CHECK-LABEL: codeRepl:
+; CHECK: [[local1_cast:%.*]] = bitcast i256* %local1 to i8*
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[local1_cast]])
+; CHECK: [[local2_cast:%.*]] = bitcast i256* %local2 to i8*
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[local2_cast]])
+; CHECK: call i1 @foo.cold.1(i8* %local1_cast, i8* %local2_cast)
+; CHECK: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[local2_cast]])
+; CHECK: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[local1_cast]])
+; CHECK: br i1
+
+outlinedPath:
+  ; These two uses of stack slots are overlapping. This should prevent
+  ; merging of stack slots. CodeExtractor must replicate the effects of
+  ; these markers in the caller to inhibit stack coloring.
+  %gep1 = getelementptr inbounds i8, i8* %local1_cast, i64 1
+  call void @llvm.lifetime.start.p0i8(i64 1, i8* %gep1)
+  call void @llvm.lifetime.start.p0i8(i64 1, i8* %local2_cast)
+  call void @cold_use2(i8* %local1_cast, i8* %local2_cast)
+  call void @llvm.lifetime.end.p0i8(i64 1, i8* %gep1)
+  call void @llvm.lifetime.end.p0i8(i64 1, i8* %local2_cast)
+  br i1 undef, label %outlinedPath2, label %outlinedPathExit
+
+outlinedPath2:
+  ; These extra lifetime markers are used to test that we emit only one
+  ; pair of guard markers in the caller per memory object.
+  call void @llvm.lifetime.start.p0i8(i64 1, i8* %local2_cast)
+  call void @use(i8* %local2_cast)
+  call void @llvm.lifetime.end.p0i8(i64 1, i8* %local2_cast)
+  ret void
+
+outlinedPathExit:
+  ret void
+}
+
+; CHECK-LABEL: define {{.*}}@foo.cold.1(
+; CHECK-NOT: @llvm.lifetime
diff --git a/test/Transforms/IRCE/only-lower-check.ll b/test/Transforms/IRCE/only-lower-check.ll
index e913ea6..ad379fb 100644
--- a/test/Transforms/IRCE/only-lower-check.ll
+++ b/test/Transforms/IRCE/only-lower-check.ll
@@ -3,7 +3,6 @@
 
 ; CHECK: irce: loop has 1 inductive range checks:
 ; CHECK-NEXT: InductiveRangeCheck:
-; CHECK-NEXT:   Kind: RANGE_CHECK_LOWER
 ; CHECK-NEXT:   Begin: (-1 + %n)  Step: -1  End: 2147483647
 ; CHECK-NEXT:   CheckUse:   br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1 Operand: 0
 ; CHECK-NEXT: irce: in function only_lower_check: constrained Loop at depth 1 containing: %loop<header><exiting>,%in.bounds<latch><exiting>
diff --git a/test/Transforms/IRCE/only-upper-check.ll b/test/Transforms/IRCE/only-upper-check.ll
index 859ac4e..45a911b 100644
--- a/test/Transforms/IRCE/only-upper-check.ll
+++ b/test/Transforms/IRCE/only-upper-check.ll
@@ -3,7 +3,6 @@
 
 ; CHECK: irce: loop has 1 inductive range checks:
 ; CHECK-NEXT:InductiveRangeCheck:
-; CHECK-NEXT:  Kind: RANGE_CHECK_UPPER
 ; CHECK-NEXT:  Begin: %offset  Step: 1  End:   %len
 ; CHECK-NEXT:  CheckUse:   br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1 Operand: 0
 ; CHECK-NEXT: irce: in function incrementing: constrained Loop at depth 1 containing: %loop<header><exiting>,%in.bounds<latch><exiting>
diff --git a/test/Transforms/Inline/inline-remark.ll b/test/Transforms/Inline/inline-remark.ll
index d436fa6..4024935 100644
--- a/test/Transforms/Inline/inline-remark.ll
+++ b/test/Transforms/Inline/inline-remark.ll
@@ -1,7 +1,9 @@
-; RUN: opt < %s -inline -inline-remark-attribute --inline-threshold=-2 -S | FileCheck %s
+; RUN: opt < %s -inline -inline-remark-attribute --inline-threshold=0 -S | FileCheck %s
 
 ; Test that the inliner adds inline remark attributes to non-inlined callsites.
 
+declare void @ext();
+
 define void @foo() {
   call void @bar(i1 true)
   ret void
@@ -12,6 +14,7 @@
 
 bb1:
   call void @foo()
+  call void @ext()
   ret void
 
 bb2:
@@ -43,6 +46,6 @@
   ret void
 }
 
-; CHECK: attributes [[ATTR1]] = { "inline-remark"="(cost=-5, threshold=-6)" }
+; CHECK: attributes [[ATTR1]] = { "inline-remark"="(cost=25, threshold=0)" }
 ; CHECK: attributes [[ATTR2]] = { "inline-remark"="(cost=never): recursive" }
 ; CHECK: attributes [[ATTR3]] = { "inline-remark"="unsupported operand bundle; (cost={{.*}}, threshold={{.*}})" }
diff --git a/test/Transforms/Inline/parallel-loop-md-callee.ll b/test/Transforms/Inline/parallel-loop-md-callee.ll
new file mode 100644
index 0000000..4a87c00
--- /dev/null
+++ b/test/Transforms/Inline/parallel-loop-md-callee.ll
@@ -0,0 +1,56 @@
+; RUN: opt -S -inline < %s | FileCheck %s
+;
+; Check that the !llvm.access.group is still present after inlining.
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @Body(i32* nocapture %res, i32* nocapture readnone %c, i32* nocapture readonly %d, i32* nocapture readonly %p, i32 %i) {
+entry:
+  %idxprom = sext i32 %i to i64
+  %arrayidx = getelementptr inbounds i32, i32* %p, i64 %idxprom
+  %0 = load i32, i32* %arrayidx, align 4, !llvm.access.group !0
+  %cmp = icmp eq i32 %0, 0
+  %arrayidx2 = getelementptr inbounds i32, i32* %res, i64 %idxprom
+  %1 = load i32, i32* %arrayidx2, align 4, !llvm.access.group !0
+  br i1 %cmp, label %cond.end, label %cond.false
+
+cond.false:
+  %arrayidx6 = getelementptr inbounds i32, i32* %d, i64 %idxprom
+  %2 = load i32, i32* %arrayidx6, align 4, !llvm.access.group !0
+  %add = add nsw i32 %2, %1
+  br label %cond.end
+
+cond.end:
+  %cond = phi i32 [ %add, %cond.false ], [ %1, %entry ]
+  store i32 %cond, i32* %arrayidx2, align 4
+  ret void
+}
+
+define void @Test(i32* %res, i32* %c, i32* %d, i32* %p, i32 %n) {
+entry:
+  br label %for.cond
+
+for.cond:
+  %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+  %cmp = icmp slt i32 %i.0, 1600
+  br i1 %cmp, label %for.body, label %for.end
+
+for.body:
+  call void @Body(i32* %res, i32* undef, i32* %d, i32* %p, i32 %i.0), !llvm.access.group !0
+  %inc = add nsw i32 %i.0, 1
+  br label %for.cond, !llvm.loop !1
+
+for.end:
+  ret void
+}
+
+!0 = distinct !{}                                          ; access group
+!1 = distinct !{!1, !{!"llvm.loop.parallel_accesses", !0}} ; LoopID
+
+
+; CHECK-LABEL: @Test
+; CHECK: load i32,{{.*}}, !llvm.access.group !0
+; CHECK: load i32,{{.*}}, !llvm.access.group !0
+; CHECK: load i32,{{.*}}, !llvm.access.group !0
+; CHECK: store i32 {{.*}}, !llvm.access.group !0
+; CHECK: br label %for.cond, !llvm.loop !1
diff --git a/test/Transforms/Inline/parallel-loop-md-merge.ll b/test/Transforms/Inline/parallel-loop-md-merge.ll
new file mode 100644
index 0000000..a53efb7
--- /dev/null
+++ b/test/Transforms/Inline/parallel-loop-md-merge.ll
@@ -0,0 +1,78 @@
+; RUN: opt -always-inline -globalopt -S < %s | FileCheck %s
+;
+; static void __attribute__((always_inline)) callee(long n, double A[static const restrict n], long i) {
+;   for (long j = 0; j < n; j += 1)
+;     A[i * n + j] = 42;
+; }
+;
+; void caller(long n, double A[static const restrict n]) {
+;   for (long i = 0; i < n; i += 1)
+;     callee(n, A, i);
+; }
+;
+; Check that the access groups (llvm.access.group) are correctly merged.
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define internal void @callee(i64 %n, double* noalias nonnull %A, i64 %i) #0 {
+entry:
+  br label %for.cond
+
+for.cond:
+  %j.0 = phi i64 [ 0, %entry ], [ %add1, %for.body ]
+  %cmp = icmp slt i64 %j.0, %n
+  br i1 %cmp, label %for.body, label %for.end
+
+for.body:
+  %mul = mul nsw i64 %i, %n
+  %add = add nsw i64 %mul, %j.0
+  %arrayidx = getelementptr inbounds double, double* %A, i64 %add
+  store double 4.200000e+01, double* %arrayidx, align 8, !llvm.access.group !6
+  %add1 = add nuw nsw i64 %j.0, 1
+  br label %for.cond, !llvm.loop !7
+
+for.end:
+  ret void
+}
+
+attributes #0 = { alwaysinline }
+
+!6 = distinct !{}       ; access group
+!7 = distinct !{!7, !9} ; LoopID
+!9 = !{!"llvm.loop.parallel_accesses", !6}
+
+
+define void @caller(i64 %n, double* noalias nonnull %A) {
+entry:
+  br label %for.cond
+
+for.cond:
+  %i.0 = phi i64 [ 0, %entry ], [ %add, %for.body ]
+  %cmp = icmp slt i64 %i.0, %n
+  br i1 %cmp, label %for.body, label %for.end
+
+for.body:
+  call void @callee(i64 %n, double* %A, i64 %i.0), !llvm.access.group !10
+  %add = add nuw nsw i64 %i.0, 1
+  br label %for.cond, !llvm.loop !11
+
+for.end:
+  ret void
+}
+
+!10 = distinct !{}         ; access group
+!11 = distinct !{!11, !12} ; LoopID
+!12 = !{!"llvm.loop.parallel_accesses", !10}
+
+
+; CHECK: store double 4.200000e+01, {{.*}} !llvm.access.group ![[ACCESS_GROUP_LIST_3:[0-9]+]]
+; CHECK: br label %for.cond.i, !llvm.loop ![[LOOP_INNER:[0-9]+]]
+; CHECK: br label %for.cond, !llvm.loop ![[LOOP_OUTER:[0-9]+]]
+
+; CHECK: ![[ACCESS_GROUP_LIST_3]] = !{![[ACCESS_GROUP_INNER:[0-9]+]], ![[ACCESS_GROUP_OUTER:[0-9]+]]}
+; CHECK: ![[ACCESS_GROUP_INNER]] = distinct !{}
+; CHECK: ![[ACCESS_GROUP_OUTER]] = distinct !{}
+; CHECK: ![[LOOP_INNER]] = distinct !{![[LOOP_INNER]], ![[ACCESSES_INNER:[0-9]+]]}
+; CHECK: ![[ACCESSES_INNER]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_INNER]]}
+; CHECK: ![[LOOP_OUTER]] = distinct !{![[LOOP_OUTER]], ![[ACCESSES_OUTER:[0-9]+]]}
+; CHECK: ![[ACCESSES_OUTER]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_OUTER]]}
diff --git a/test/Transforms/Inline/parallel-loop-md.ll b/test/Transforms/Inline/parallel-loop-md.ll
index abbacc9..a55392d 100644
--- a/test/Transforms/Inline/parallel-loop-md.ll
+++ b/test/Transforms/Inline/parallel-loop-md.ll
@@ -37,22 +37,22 @@
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  call void @Body(i32* %res, i32* undef, i32* %d, i32* %p, i32 %i.0), !llvm.mem.parallel_loop_access !0
+  call void @Body(i32* %res, i32* undef, i32* %d, i32* %p, i32 %i.0), !llvm.access.group !0
   %inc = add nsw i32 %i.0, 1
-  br label %for.cond, !llvm.loop !0
+  br label %for.cond, !llvm.loop !1
 
 for.end:                                          ; preds = %for.cond
   ret void
 }
 
 ; CHECK-LABEL: @Test
-; CHECK: load i32,{{.*}}, !llvm.mem.parallel_loop_access !0
-; CHECK: load i32,{{.*}}, !llvm.mem.parallel_loop_access !0
-; CHECK: load i32,{{.*}}, !llvm.mem.parallel_loop_access !0
-; CHECK: store i32{{.*}}, !llvm.mem.parallel_loop_access !0
-; CHECK: br label %for.cond, !llvm.loop !0
+; CHECK: load i32,{{.*}}, !llvm.access.group !0
+; CHECK: load i32,{{.*}}, !llvm.access.group !0
+; CHECK: load i32,{{.*}}, !llvm.access.group !0
+; CHECK: store i32{{.*}}, !llvm.access.group !0
+; CHECK: br label %for.cond, !llvm.loop !1
 
 attributes #0 = { norecurse nounwind uwtable }
 
-!0 = distinct !{!0}
-
+!0 = distinct !{}
+!1 = distinct !{!0, !{!"llvm.loop.parallel_accesses", !0}}
diff --git a/test/Transforms/InstCombine/AMDGPU/amdgcn-demanded-vector-elts.ll b/test/Transforms/InstCombine/AMDGPU/amdgcn-demanded-vector-elts.ll
index af34a3f..75e8618 100644
--- a/test/Transforms/InstCombine/AMDGPU/amdgcn-demanded-vector-elts.ll
+++ b/test/Transforms/InstCombine/AMDGPU/amdgcn-demanded-vector-elts.ll
@@ -316,6 +316,966 @@
 declare <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32>, i32, i32, i1, i1) #1
 
 ; --------------------------------------------------------------------
+; llvm.amdgcn.raw.buffer.load
+; --------------------------------------------------------------------
+
+; CHECK-LABEL: @raw_buffer_load_f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @raw_buffer_load_f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret float %data
+}
+
+; CHECK-LABEL: @raw_buffer_load_v1f32(
+; CHECK-NEXT: %data = call <1 x float> @llvm.amdgcn.raw.buffer.load.v1f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <1 x float> %data
+define amdgpu_ps <1 x float> @raw_buffer_load_v1f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <1 x float> @llvm.amdgcn.raw.buffer.load.v1f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret <1 x float> %data
+}
+
+; CHECK-LABEL: @raw_buffer_load_v2f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <2 x float> %data
+define amdgpu_ps <2 x float> @raw_buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret <2 x float> %data
+}
+
+; CHECK-LABEL: @raw_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <4 x float> %data
+define amdgpu_ps <4 x float> @raw_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret <4 x float> %data
+}
+
+; CHECK-LABEL: @extract_elt0_raw_buffer_load_v2f32(
+; CHECK: %data = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_raw_buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt0 = extractelement <2 x float> %data, i32 0
+  ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_raw_buffer_load_v2f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <2 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_raw_buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <2 x float> %data, i32 1
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_raw_buffer_load_v4f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_raw_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt0 = extractelement <4 x float> %data, i32 0
+  ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_raw_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <2 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_raw_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <4 x float> %data, i32 1
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt2_raw_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <4 x float> %data, i32 2
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt2_raw_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <4 x float> %data, i32 2
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt3_raw_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <4 x float> %data, i32 3
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt3_raw_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <4 x float> %data, i32 3
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_raw_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <2 x float>
+define amdgpu_ps <2 x float> @extract_elt0_elt1_raw_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 0, i32 1>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_raw_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt1_elt2_raw_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 1, i32 2>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt2_elt3_raw_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt2_elt3_raw_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_elt2_raw_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt0_elt1_elt2_raw_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_elt3_raw_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 1, i32 2, i32 3>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt1_elt2_elt3_raw_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 1, i32 2, i32 3>
+  ret <3 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_elt2_elt3_raw_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 2, i32 3>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt0_elt2_elt3_raw_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 2, i32 3>
+  ret <3 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_raw_buffer_load_v3f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_raw_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt0 = extractelement <3 x float> %data, i32 0
+  ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_raw_buffer_load_v3f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <2 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_raw_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <3 x float> %data, i32 1
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt2_raw_buffer_load_v3f32(
+; CHECK-NEXT: %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <3 x float> %data, i32 2
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt2_raw_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <3 x float> %data, i32 2
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_raw_buffer_load_v3f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <2 x float>
+define amdgpu_ps <2 x float> @extract_elt0_elt1_raw_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 0, i32 1>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_raw_buffer_load_v3f32(
+; CHECK-NEXT: %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt1_elt2_raw_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 1, i32 2>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract0_bitcast_raw_buffer_load_v4f32(
+; CHECK-NEXT: %tmp = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %tmp2 = bitcast float %tmp to i32
+; CHECK-NEXT: ret i32 %tmp2
+define i32 @extract0_bitcast_raw_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %tmp = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %tmp1 = bitcast <4 x float> %tmp to <4 x i32>
+  %tmp2 = extractelement <4 x i32> %tmp1, i32 0
+  ret i32 %tmp2
+}
+
+; CHECK-LABEL: @extract0_bitcast_raw_buffer_load_v4i32(
+; CHECK-NEXT: %tmp = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %tmp2 = bitcast i32 %tmp to float
+; CHECK-NEXT: ret float %tmp2
+define float @extract0_bitcast_raw_buffer_load_v4i32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %tmp = call <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %tmp1 = bitcast <4 x i32> %tmp to <4 x float>
+  %tmp2 = extractelement <4 x float> %tmp1, i32 0
+  ret float %tmp2
+}
+
+; CHECK-LABEL: @preserve_metadata_extract_elt0_raw_buffer_load_v2f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent), !fpmath !0
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @preserve_metadata_extract_elt0_raw_buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent), !fpmath !0
+  %elt0 = extractelement <2 x float> %data, i32 0
+  ret float %elt0
+}
+
+declare float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32>, i32, i32, i32) #1
+declare <1 x float> @llvm.amdgcn.raw.buffer.load.v1f32(<4 x i32>, i32, i32, i32) #1
+declare <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32>, i32, i32, i32) #1
+declare <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32>, i32, i32, i32) #1
+declare <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32>, i32, i32, i32) #1
+
+declare <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32>, i32, i32, i32) #1
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.raw.buffer.load.format
+; --------------------------------------------------------------------
+
+; CHECK-LABEL: @raw_buffer_load_format_f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.raw.buffer.load.format.f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @raw_buffer_load_format_f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call float @llvm.amdgcn.raw.buffer.load.format.f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret float %data
+}
+
+; CHECK-LABEL: @raw_buffer_load_format_v1f32(
+; CHECK-NEXT: %data = call <1 x float> @llvm.amdgcn.raw.buffer.load.format.v1f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <1 x float> %data
+define amdgpu_ps <1 x float> @raw_buffer_load_format_v1f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <1 x float> @llvm.amdgcn.raw.buffer.load.format.v1f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret <1 x float> %data
+}
+
+; CHECK-LABEL: @raw_buffer_load_format_v2f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <2 x float> %data
+define amdgpu_ps <2 x float> @raw_buffer_load_format_v2f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret <2 x float> %data
+}
+
+; CHECK-LABEL: @raw_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <4 x float> %data
+define amdgpu_ps <4 x float> @raw_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret <4 x float> %data
+}
+
+; CHECK-LABEL: @extract_elt0_raw_buffer_load_format_v2f32(
+; CHECK: %data = call float @llvm.amdgcn.raw.buffer.load.format.f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_raw_buffer_load_format_v2f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt0 = extractelement <2 x float> %data, i32 0
+  ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_raw_buffer_load_format_v2f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <2 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_raw_buffer_load_format_v2f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <2 x float> %data, i32 1
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_raw_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.raw.buffer.load.format.f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_raw_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt0 = extractelement <4 x float> %data, i32 0
+  ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_raw_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <2 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_raw_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <4 x float> %data, i32 1
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt2_raw_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <4 x float> %data, i32 2
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt2_raw_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <4 x float> %data, i32 2
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt3_raw_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <4 x float> %data, i32 3
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt3_raw_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <4 x float> %data, i32 3
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_raw_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <2 x float>
+define amdgpu_ps <2 x float> @extract_elt0_elt1_raw_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 0, i32 1>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_raw_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt1_elt2_raw_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 1, i32 2>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt2_elt3_raw_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt2_elt3_raw_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_elt2_raw_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt0_elt1_elt2_raw_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_elt3_raw_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 1, i32 2, i32 3>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt1_elt2_elt3_raw_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 1, i32 2, i32 3>
+  ret <3 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_elt2_elt3_raw_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 2, i32 3>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt0_elt2_elt3_raw_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 2, i32 3>
+  ret <3 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_raw_buffer_load_format_v3f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.raw.buffer.load.format.f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_raw_buffer_load_format_v3f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt0 = extractelement <3 x float> %data, i32 0
+  ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_raw_buffer_load_format_v3f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <2 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_raw_buffer_load_format_v3f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <3 x float> %data, i32 1
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt2_raw_buffer_load_format_v3f32(
+; CHECK-NEXT: %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <3 x float> %data, i32 2
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt2_raw_buffer_load_format_v3f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <3 x float> %data, i32 2
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_raw_buffer_load_format_v3f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <2 x float>
+define amdgpu_ps <2 x float> @extract_elt0_elt1_raw_buffer_load_format_v3f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 0, i32 1>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_raw_buffer_load_format_v3f32(
+; CHECK-NEXT: %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt1_elt2_raw_buffer_load_format_v3f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 1, i32 2>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract0_bitcast_raw_buffer_load_format_v4f32(
+; CHECK-NEXT: %tmp = call float @llvm.amdgcn.raw.buffer.load.format.f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %tmp2 = bitcast float %tmp to i32
+; CHECK-NEXT: ret i32 %tmp2
+define i32 @extract0_bitcast_raw_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %tmp = call <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %tmp1 = bitcast <4 x float> %tmp to <4 x i32>
+  %tmp2 = extractelement <4 x i32> %tmp1, i32 0
+  ret i32 %tmp2
+}
+
+; CHECK-LABEL: @extract0_bitcast_raw_buffer_load_format_v4i32(
+; CHECK-NEXT: %tmp = call i32 @llvm.amdgcn.raw.buffer.load.format.i32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %tmp2 = bitcast i32 %tmp to float
+; CHECK-NEXT: ret float %tmp2
+define float @extract0_bitcast_raw_buffer_load_format_v4i32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %tmp = call <4 x i32> @llvm.amdgcn.raw.buffer.load.format.v4i32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent)
+  %tmp1 = bitcast <4 x i32> %tmp to <4 x float>
+  %tmp2 = extractelement <4 x float> %tmp1, i32 0
+  ret float %tmp2
+}
+
+; CHECK-LABEL: @preserve_metadata_extract_elt0_raw_buffer_load_format_v2f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.raw.buffer.load.format.f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent), !fpmath !0
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @preserve_metadata_extract_elt0_raw_buffer_load_format_v2f32(<4 x i32> inreg %rsrc, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.raw.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 %sofs, i32 %coherent), !fpmath !0
+  %elt0 = extractelement <2 x float> %data, i32 0
+  ret float %elt0
+}
+
+declare float @llvm.amdgcn.raw.buffer.load.format.f32(<4 x i32>, i32, i32, i32) #1
+declare <1 x float> @llvm.amdgcn.raw.buffer.load.format.v1f32(<4 x i32>, i32, i32, i32) #1
+declare <2 x float> @llvm.amdgcn.raw.buffer.load.format.v2f32(<4 x i32>, i32, i32, i32) #1
+declare <3 x float> @llvm.amdgcn.raw.buffer.load.format.v3f32(<4 x i32>, i32, i32, i32) #1
+declare <4 x float> @llvm.amdgcn.raw.buffer.load.format.v4f32(<4 x i32>, i32, i32, i32) #1
+
+declare <4 x i32> @llvm.amdgcn.raw.buffer.load.format.v4i32(<4 x i32>, i32, i32, i32) #1
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.struct.buffer.load
+; --------------------------------------------------------------------
+
+; CHECK-LABEL: @struct_buffer_load_f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @struct_buffer_load_f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret float %data
+}
+
+; CHECK-LABEL: @struct_buffer_load_v1f32(
+; CHECK-NEXT: %data = call <1 x float> @llvm.amdgcn.struct.buffer.load.v1f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <1 x float> %data
+define amdgpu_ps <1 x float> @struct_buffer_load_v1f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <1 x float> @llvm.amdgcn.struct.buffer.load.v1f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret <1 x float> %data
+}
+
+; CHECK-LABEL: @struct_buffer_load_v2f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <2 x float> %data
+define amdgpu_ps <2 x float> @struct_buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret <2 x float> %data
+}
+
+; CHECK-LABEL: @struct_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <4 x float> %data
+define amdgpu_ps <4 x float> @struct_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret <4 x float> %data
+}
+
+; CHECK-LABEL: @extract_elt0_struct_buffer_load_v2f32(
+; CHECK: %data = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_struct_buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt0 = extractelement <2 x float> %data, i32 0
+  ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_struct_buffer_load_v2f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <2 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_struct_buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <2 x float> %data, i32 1
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_struct_buffer_load_v4f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_struct_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt0 = extractelement <4 x float> %data, i32 0
+  ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_struct_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <2 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_struct_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <4 x float> %data, i32 1
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt2_struct_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <4 x float> %data, i32 2
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt2_struct_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <4 x float> %data, i32 2
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt3_struct_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <4 x float> %data, i32 3
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt3_struct_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <4 x float> %data, i32 3
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_struct_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <2 x float>
+define amdgpu_ps <2 x float> @extract_elt0_elt1_struct_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 0, i32 1>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_struct_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt1_elt2_struct_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 1, i32 2>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt2_elt3_struct_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt2_elt3_struct_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_elt2_struct_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt0_elt1_elt2_struct_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_elt3_struct_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 1, i32 2, i32 3>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt1_elt2_elt3_struct_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 1, i32 2, i32 3>
+  ret <3 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_elt2_elt3_struct_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 2, i32 3>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt0_elt2_elt3_struct_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 2, i32 3>
+  ret <3 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_struct_buffer_load_v3f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_struct_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt0 = extractelement <3 x float> %data, i32 0
+  ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_struct_buffer_load_v3f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <2 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_struct_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <3 x float> %data, i32 1
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt2_struct_buffer_load_v3f32(
+; CHECK-NEXT: %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <3 x float> %data, i32 2
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt2_struct_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <3 x float> %data, i32 2
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_struct_buffer_load_v3f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <2 x float>
+define amdgpu_ps <2 x float> @extract_elt0_elt1_struct_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 0, i32 1>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_struct_buffer_load_v3f32(
+; CHECK-NEXT: %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt1_elt2_struct_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 1, i32 2>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract0_bitcast_struct_buffer_load_v4f32(
+; CHECK-NEXT: %tmp = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %tmp2 = bitcast float %tmp to i32
+; CHECK-NEXT: ret i32 %tmp2
+define i32 @extract0_bitcast_struct_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %tmp = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %tmp1 = bitcast <4 x float> %tmp to <4 x i32>
+  %tmp2 = extractelement <4 x i32> %tmp1, i32 0
+  ret i32 %tmp2
+}
+
+; CHECK-LABEL: @extract0_bitcast_struct_buffer_load_v4i32(
+; CHECK-NEXT: %tmp = call i32 @llvm.amdgcn.struct.buffer.load.i32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %tmp2 = bitcast i32 %tmp to float
+; CHECK-NEXT: ret float %tmp2
+define float @extract0_bitcast_struct_buffer_load_v4i32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %tmp = call <4 x i32> @llvm.amdgcn.struct.buffer.load.v4i32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %tmp1 = bitcast <4 x i32> %tmp to <4 x float>
+  %tmp2 = extractelement <4 x float> %tmp1, i32 0
+  ret float %tmp2
+}
+
+; CHECK-LABEL: @preserve_metadata_extract_elt0_struct_buffer_load_v2f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent), !fpmath !0
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @preserve_metadata_extract_elt0_struct_buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent), !fpmath !0
+  %elt0 = extractelement <2 x float> %data, i32 0
+  ret float %elt0
+}
+
+declare float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32>, i32, i32, i32, i32) #1
+declare <1 x float> @llvm.amdgcn.struct.buffer.load.v1f32(<4 x i32>, i32, i32, i32, i32) #1
+declare <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32>, i32, i32, i32, i32) #1
+declare <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32>, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32>, i32, i32, i32, i32) #1
+
+declare <4 x i32> @llvm.amdgcn.struct.buffer.load.v4i32(<4 x i32>, i32, i32, i32, i32) #1
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.struct.buffer.load.format
+; --------------------------------------------------------------------
+
+; CHECK-LABEL: @struct_buffer_load_format_f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.struct.buffer.load.format.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @struct_buffer_load_format_f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call float @llvm.amdgcn.struct.buffer.load.format.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret float %data
+}
+
+; CHECK-LABEL: @struct_buffer_load_format_v1f32(
+; CHECK-NEXT: %data = call <1 x float> @llvm.amdgcn.struct.buffer.load.format.v1f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <1 x float> %data
+define amdgpu_ps <1 x float> @struct_buffer_load_format_v1f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <1 x float> @llvm.amdgcn.struct.buffer.load.format.v1f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret <1 x float> %data
+}
+
+; CHECK-LABEL: @struct_buffer_load_format_v2f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <2 x float> %data
+define amdgpu_ps <2 x float> @struct_buffer_load_format_v2f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret <2 x float> %data
+}
+
+; CHECK-LABEL: @struct_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <4 x float> %data
+define amdgpu_ps <4 x float> @struct_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  ret <4 x float> %data
+}
+
+; CHECK-LABEL: @extract_elt0_struct_buffer_load_format_v2f32(
+; CHECK: %data = call float @llvm.amdgcn.struct.buffer.load.format.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_struct_buffer_load_format_v2f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt0 = extractelement <2 x float> %data, i32 0
+  ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_struct_buffer_load_format_v2f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <2 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_struct_buffer_load_format_v2f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <2 x float> %data, i32 1
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_struct_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.struct.buffer.load.format.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_struct_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt0 = extractelement <4 x float> %data, i32 0
+  ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_struct_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <2 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_struct_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <4 x float> %data, i32 1
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt2_struct_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <4 x float> %data, i32 2
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt2_struct_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <4 x float> %data, i32 2
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt3_struct_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <4 x float> %data, i32 3
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt3_struct_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <4 x float> %data, i32 3
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_struct_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <2 x float>
+define amdgpu_ps <2 x float> @extract_elt0_elt1_struct_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 0, i32 1>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_struct_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt1_elt2_struct_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 1, i32 2>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt2_elt3_struct_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt2_elt3_struct_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_elt2_struct_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt0_elt1_elt2_struct_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_elt3_struct_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 1, i32 2, i32 3>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt1_elt2_elt3_struct_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 1, i32 2, i32 3>
+  ret <3 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_elt2_elt3_struct_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 2, i32 3>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt0_elt2_elt3_struct_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 2, i32 3>
+  ret <3 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_struct_buffer_load_format_v3f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.struct.buffer.load.format.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_struct_buffer_load_format_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt0 = extractelement <3 x float> %data, i32 0
+  ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_struct_buffer_load_format_v3f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <2 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_struct_buffer_load_format_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <3 x float> %data, i32 1
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt2_struct_buffer_load_format_v3f32(
+; CHECK-NEXT: %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %elt1 = extractelement <3 x float> %data, i32 2
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt2_struct_buffer_load_format_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %elt1 = extractelement <3 x float> %data, i32 2
+  ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_struct_buffer_load_format_v3f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: ret <2 x float>
+define amdgpu_ps <2 x float> @extract_elt0_elt1_struct_buffer_load_format_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 0, i32 1>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_struct_buffer_load_format_v3f32(
+; CHECK-NEXT: %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt1_elt2_struct_buffer_load_format_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 1, i32 2>
+  ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract0_bitcast_struct_buffer_load_format_v4f32(
+; CHECK-NEXT: %tmp = call float @llvm.amdgcn.struct.buffer.load.format.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %tmp2 = bitcast float %tmp to i32
+; CHECK-NEXT: ret i32 %tmp2
+define i32 @extract0_bitcast_struct_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %tmp = call <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %tmp1 = bitcast <4 x float> %tmp to <4 x i32>
+  %tmp2 = extractelement <4 x i32> %tmp1, i32 0
+  ret i32 %tmp2
+}
+
+; CHECK-LABEL: @extract0_bitcast_struct_buffer_load_format_v4i32(
+; CHECK-NEXT: %tmp = call i32 @llvm.amdgcn.struct.buffer.load.format.i32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+; CHECK-NEXT: %tmp2 = bitcast i32 %tmp to float
+; CHECK-NEXT: ret float %tmp2
+define float @extract0_bitcast_struct_buffer_load_format_v4i32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %tmp = call <4 x i32> @llvm.amdgcn.struct.buffer.load.format.v4i32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent)
+  %tmp1 = bitcast <4 x i32> %tmp to <4 x float>
+  %tmp2 = extractelement <4 x float> %tmp1, i32 0
+  ret float %tmp2
+}
+
+; CHECK-LABEL: @preserve_metadata_extract_elt0_struct_buffer_load_format_v2f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.struct.buffer.load.format.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent), !fpmath !0
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @preserve_metadata_extract_elt0_struct_buffer_load_format_v2f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent) #0 {
+  %data = call <2 x float> @llvm.amdgcn.struct.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i32 %sofs, i32 %coherent), !fpmath !0
+  %elt0 = extractelement <2 x float> %data, i32 0
+  ret float %elt0
+}
+
+declare float @llvm.amdgcn.struct.buffer.load.format.f32(<4 x i32>, i32, i32, i32, i32) #1
+declare <1 x float> @llvm.amdgcn.struct.buffer.load.format.v1f32(<4 x i32>, i32, i32, i32, i32) #1
+declare <2 x float> @llvm.amdgcn.struct.buffer.load.format.v2f32(<4 x i32>, i32, i32, i32, i32) #1
+declare <3 x float> @llvm.amdgcn.struct.buffer.load.format.v3f32(<4 x i32>, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.amdgcn.struct.buffer.load.format.v4f32(<4 x i32>, i32, i32, i32, i32) #1
+
+declare <4 x i32> @llvm.amdgcn.struct.buffer.load.format.v4i32(<4 x i32>, i32, i32, i32, i32) #1
+
+; --------------------------------------------------------------------
 ; llvm.amdgcn.image.sample
 ; --------------------------------------------------------------------
 
@@ -328,6 +1288,28 @@
   ret float %elt0
 }
 
+; Check that the intrinsic remains unchanged in the presence of TFE or LWE
+; CHECK-LABEL: @extract_elt0_image_sample_1d_v4f32_f32_tfe(
+; CHECK-NEXT: %data = call { <4 x float>, i32 } @llvm.amdgcn.image.sample.1d.sl_v4f32i32s.f32(i32 15, float %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i1 false, i32 1, i32 0)
+; CHECK: ret float %elt0
+define amdgpu_ps float @extract_elt0_image_sample_1d_v4f32_f32_tfe(float %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 {
+  %data = call {<4 x float>,i32} @llvm.amdgcn.image.sample.1d.sl_v4f32i32s.f32(i32 15, float %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i1 false, i32 1, i32 0)
+  %data.vec = extractvalue {<4 x float>,i32} %data, 0
+  %elt0 = extractelement <4 x float> %data.vec, i32 0
+  ret float %elt0
+}
+
+; Check that the intrinsic remains unchanged in the presence of TFE or LWE
+; CHECK-LABEL: @extract_elt0_image_sample_1d_v4f32_f32_lwe(
+; CHECK-NEXT: %data = call { <4 x float>, i32 } @llvm.amdgcn.image.sample.1d.sl_v4f32i32s.f32(i32 15, float %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i1 false, i32 2, i32 0)
+; CHECK: ret float %elt0
+define amdgpu_ps float @extract_elt0_image_sample_1d_v4f32_f32_lwe(float %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 {
+  %data = call {<4 x float>,i32} @llvm.amdgcn.image.sample.1d.sl_v4f32i32s.f32(i32 15, float %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i1 false, i32 2, i32 0)
+  %data.vec = extractvalue {<4 x float>,i32} %data, 0
+  %elt0 = extractelement <4 x float> %data.vec, i32 0
+  ret float %elt0
+}
+
 ; CHECK-LABEL: @extract_elt0_image_sample_2d_v4f32_f32(
 ; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.2d.f32.f32(i32 1, float %s, float %t, <8 x i32> %sampler, <4 x i32> %rsrc, i1 false, i32 0, i32 0)
 ; CHECK-NEXT: ret float %data
@@ -506,6 +1488,7 @@
 }
 
 declare <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.sample.1d.sl_v4f32i32s.f32(i32, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.sample.3d.v4f32.f32(i32, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
 declare <4 x float> @llvm.amdgcn.image.sample.1darray.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
diff --git a/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll b/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
index 0a179d1..b8e19e2 100644
--- a/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
+++ b/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
@@ -1406,6 +1406,7 @@
 
 declare i64 @llvm.amdgcn.icmp.i32(i32, i32, i32) nounwind readnone convergent
 declare i64 @llvm.amdgcn.icmp.i64(i64, i64, i32) nounwind readnone convergent
+declare i64 @llvm.amdgcn.icmp.i1(i1, i1, i32) nounwind readnone convergent
 
 ; Make sure there's no crash for invalid input
 ; CHECK-LABEL: @invalid_nonconstant_icmp_code(
@@ -1815,6 +1816,198 @@
   ret i64 %mask
 }
 
+; 1-bit NE comparisons
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_eq_i1(
+; CHECK-NEXT: icmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_eq_i1(i32 %a, i32 %b) {
+  %cmp = icmp eq i32 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_ne_i1(
+; CHECK-NEXT: icmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_ne_i1(i32 %a, i32 %b) {
+  %cmp = icmp ne i32 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_sle_i1(
+; CHECK-NEXT: icmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_sle_i1(i32 %a, i32 %b) {
+  %cmp = icmp sle i32 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_ugt_i64(
+; CHECK-NEXT: icmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_ugt_i64(i64 %a, i64 %b) {
+  %cmp = icmp ugt i64 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_ult_swap_i64(
+; CHECK-NEXT: icmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_ult_swap_i64(i64 %a, i64 %b) {
+  %cmp = icmp ugt i64 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 false, i1 %cmp, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_fcmp_oeq_f32(
+; CHECK-NEXT: fcmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_fcmp_oeq_f32(float %a, float %b) {
+  %cmp = fcmp oeq float %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_fcmp_une_f32(
+; CHECK-NEXT: fcmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_fcmp_une_f32(float %a, float %b) {
+  %cmp = fcmp une float %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_fcmp_olt_f64(
+; CHECK-NEXT: fcmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_fcmp_olt_f64(double %a, double %b) {
+  %cmp = fcmp olt double %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_eq_i4(
+; CHECK-NEXT: icmp
+; CHECK: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_eq_i4(i4 %a, i4 %b) {
+  %cmp = icmp eq i4 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_eq_i8(
+; CHECK-NEXT: icmp
+; CHECK: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_eq_i8(i8 %a, i8 %b) {
+  %cmp = icmp eq i8 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_eq_i16(
+; CHECK-NEXT: icmp
+; CHECK: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_eq_i16(i16 %a, i16 %b) {
+  %cmp = icmp eq i16 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_eq_i36(
+; CHECK-NEXT: icmp
+; CHECK: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_eq_i36(i36 %a, i36 %b) {
+  %cmp = icmp eq i36 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_eq_i128(
+; CHECK-NEXT: icmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_eq_i128(i128 %a, i128 %b) {
+  %cmp = icmp eq i128 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_fcmp_oeq_f16(
+; CHECK-NEXT: fcmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_fcmp_oeq_f16(half %a, half %b) {
+  %cmp = fcmp oeq half %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_fcmp_oeq_f128(
+; CHECK-NEXT: fcmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_fcmp_oeq_f128(fp128 %a, fp128 %b) {
+;
+  %cmp = fcmp oeq fp128 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_slt_i4(
+; CHECK-NEXT: icmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_slt_i4(i4 %a, i4 %b) {
+  %cmp = icmp slt i4 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_slt_i8(
+; CHECK-NEXT: icmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_slt_i8(i8 %a, i8 %b) {
+  %cmp = icmp slt i8 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_slt_i16(
+; CHECK-NEXT: icmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_slt_i16(i16 %a, i16 %b) {
+  %cmp = icmp slt i16 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_ult_i4(
+; CHECK-NEXT: icmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_ult_i4(i4 %a, i4 %b) {
+  %cmp = icmp ult i4 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_ult_i8(
+; CHECK-NEXT: icmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_ult_i8(i8 %a, i8 %b) {
+  %cmp = icmp ult i8 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_ult_i16(
+; CHECK-NEXT: icmp
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+define i64 @fold_icmp_i1_ne_0_icmp_ult_i16(i16 %a, i16 %b) {
+  %cmp = icmp ult i16 %a, %b
+  %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33)
+  ret i64 %mask
+}
+
 ; --------------------------------------------------------------------
 ; llvm.amdgcn.fcmp
 ; --------------------------------------------------------------------
diff --git a/test/Transforms/InstCombine/X86/x86-adds-subs.ll b/test/Transforms/InstCombine/X86/x86-adds-subs.ll
deleted file mode 100644
index 54cccbe..0000000
--- a/test/Transforms/InstCombine/X86/x86-adds-subs.ll
+++ /dev/null
@@ -1,351 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -instcombine -S | FileCheck %s
-
-define <16 x i8> @sse2_adds_b_constant() {
-; CHECK-LABEL: @sse2_adds_b_constant(
-; CHECK-NEXT: ret <16 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <16 x i8> %1
-}
-
-define <16 x i8> @sse2_adds_b_constant_underflow() {
-; CHECK-LABEL: @sse2_adds_b_constant_underflow(
-; CHECK-NEXT: ret <16 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -128, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -107, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <16 x i8> %1
-}
-
-define <16 x i8> @sse2_adds_b_constant_overflow() {
-; CHECK-LABEL: @sse2_adds_b_constant_overflow(
-; CHECK-NEXT: ret <16 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 127, i8 24, i8 26, i8 28, i8 30, i8 127>
-  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 125, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 120>)
-  ret <16 x i8> %1
-}
-
-define <16 x i8> @sse2_adds_b_constant_undefs() {
-; CHECK-LABEL: @sse2_adds_b_constant_undefs(
-; CHECK-NEXT: ret <16 x i8> <i8 undef, i8 4, i8 6, i8 8, i8 10, i8 undef, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-  %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <16 x i8> %1
-}
-
-define <32 x i8> @avx2_adds_b_constant() {
-; CHECK-LABEL: @avx2_adds_b_constant(
-; CHECK-NEXT: ret <32 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-  %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <32 x i8> %1
-}
-
-define <32 x i8> @avx2_adds_b_constant_underflow() {
-; CHECK-LABEL: @avx2_adds_b_constant_underflow(
-; CHECK-NEXT: ret <32 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -128, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -128, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-  %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -107, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -107, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <32 x i8> %1
-}
-
-define <32 x i8> @avx2_adds_b_constant_overflow() {
-; CHECK-LABEL: @avx2_adds_b_constant_overflow(
-; CHECK-NEXT: ret <32 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 127, i8 24, i8 26, i8 28, i8 30, i8 127, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 127, i8 24, i8 26, i8 28, i8 30, i8 127>
-  %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 125, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 125, i8 12, i8 13, i8 14, i8 15, i8 16>, <32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 120>)
-  ret <32 x i8> %1
-}
-
-define <32 x i8> @avx2_adds_b_constant_undefs() {
-; CHECK-LABEL: @avx2_adds_b_constant_undefs(
-; CHECK-NEXT: ret <32 x i8> <i8 undef, i8 4, i8 6, i8 8, i8 10, i8 undef, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 undef, i8 4, i8 6, i8 8, i8 10, i8 undef, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-  %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <32 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <32 x i8> %1
-}
-
-define <64 x i8> @avx512_mask_adds_b_constant() {
-; CHECK-LABEL: @avx512_mask_adds_b_constant(
-; CHECK-NEXT: ret <64 x i8> <i8 2, i8 0, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-  %1 = call <64 x i8> @llvm.x86.avx512.mask.padds.b.512(<64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>, <64 x i8> zeroinitializer, i64 -3)
-  ret <64 x i8> %1
-}
-
-define <64 x i8> @avx512_mask_adds_b_constant_underflow() {
-; CHECK-LABEL: @avx512_mask_adds_b_constant_underflow(
-; CHECK-NEXT: ret <64 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -128, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -128, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -128, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -128, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-  %1 = call <64 x i8> @llvm.x86.avx512.mask.padds.b.512(<64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -107, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -107, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -107, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -107, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>, <64 x i8> zeroinitializer, i64 -1)
-  ret <64 x i8> %1
-}
-
-define <64 x i8> @avx512_mask_adds_b_constant_overflow() {
-; CHECK-LABEL: @avx512_mask_adds_b_constant_overflow(
-; CHECK-NEXT: ret <64 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 127, i8 24, i8 26, i8 28, i8 30, i8 127, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 127, i8 24, i8 26, i8 28, i8 30, i8 127, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 127, i8 24, i8 26, i8 28, i8 30, i8 127, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 127, i8 24, i8 26, i8 28, i8 30, i8 127>
-  %1 = call <64 x i8> @llvm.x86.avx512.mask.padds.b.512(<64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 125, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 125, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 125, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 125, i8 12, i8 13, i8 14, i8 15, i8 16>, <64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 120>, <64 x i8> zeroinitializer, i64 -1)
-  ret <64 x i8> %1
-}
-
-define <64 x i8> @avx512_mask_adds_b_constant_undefs() {
-; CHECK-LABEL: @avx512_mask_adds_b_constant_undefs(
-; CHECK-NEXT: ret <64 x i8> <i8 undef, i8 4, i8 6, i8 8, i8 10, i8 undef, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 undef, i8 4, i8 6, i8 8, i8 10, i8 undef, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 undef, i8 4, i8 6, i8 8, i8 10, i8 undef, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 undef, i8 4, i8 6, i8 8, i8 10, i8 undef, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-  %1 = call <64 x i8> @llvm.x86.avx512.mask.padds.b.512(<64 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <64 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>, <64 x i8> zeroinitializer, i64 -1)
-  ret <64 x i8> %1
-}
-
-define <8 x i16> @sse2_adds_w_constant() {
-; CHECK-LABEL: @sse2_adds_w_constant(
-; CHECK-NEXT: ret <8 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16>
-  %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
-  ret <8 x i16> %1
-}
-
-define <8 x i16> @sse2_adds_w_constant_underflow() {
-; CHECK-LABEL: @sse2_adds_w_constant_underflow(
-; CHECK-NEXT: ret <8 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 -32768>
-  %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -32107>, <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -12188>)
-  ret <8 x i16> %1
-}
-
-define <8 x i16> @sse2_adds_w_constant_overflow() {
-; CHECK-LABEL: @sse2_adds_w_constant_overflow(
-; CHECK-NEXT: ret <8 x i16> <i16 2, i16 32767, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16>
-  %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> <i16 1, i16 8248, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, <8 x i16> <i16 1, i16 25192, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
-  ret <8 x i16> %1
-}
-
-define <8 x i16> @sse2_adds_w_constant_undefs() {
-; CHECK-LABEL: @sse2_adds_w_constant_undefs(
-; CHECK-NEXT: ret <8 x i16> <i16 undef, i16 4, i16 6, i16 8, i16 10, i16 undef, i16 14, i16 16>
-  %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 undef, i16 7, i16 8>, <8 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
-  ret <8 x i16> %1
-}
-
-define <16 x i16> @avx2_adds_w_constant() {
-; CHECK-LABEL: @avx2_adds_w_constant(
-; CHECK-NEXT: ret <16 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33>
-  %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>, <16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <16 x i16> %1
-}
-
-define <16 x i16> @avx2_adds_w_constant_underflow() {
-; CHECK-LABEL: @avx2_adds_w_constant_underflow(
-; CHECK-NEXT: ret <16 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 -32768, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33>
-  %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -21107, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>, <16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -15188, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <16 x i16> %1
-}
-
-define <16 x i16> @avx2_adds_w_constant_overflow() {
-; CHECK-LABEL: @avx2_adds_w_constant_overflow(
-; CHECK-NEXT: ret <16 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16, i16 18, i16 20, i16 32767, i16 24, i16 26, i16 28, i16 30, i16 32767>
-  %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 20125, i16 12, i16 13, i16 14, i16 15, i16 20160>, <16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 20230, i16 12, i16 13, i16 14, i16 15, i16 20120>)
-  ret <16 x i16> %1
-}
-
-define <16 x i16> @avx2_adds_w_constant_undefs() {
-; CHECK-LABEL: @avx2_adds_w_constant_undefs(
-; CHECK-NEXT: ret <16 x i16> <i16 undef, i16 4, i16 6, i16 8, i16 10, i16 undef, i16 14, i16 16, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33>
-  %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 undef, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>, <16 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <16 x i16> %1
-}
-
-define <32 x i16> @avx512_mask_adds_w_constant() {
-; CHECK-LABEL: @avx512_mask_adds_w_constant(
-; CHECK-NEXT: ret <32 x i16> <i16 2, i16 0, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33, i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33>
-  %1 = call <32 x i16> @llvm.x86.avx512.mask.padds.w.512(<32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>, <32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>, <32 x i16> zeroinitializer, i32 -3)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @avx512_mask_adds_w_constant_underflow() {
-; CHECK-LABEL: @avx512_mask_adds_w_constant_underflow(
-; CHECK-NEXT: ret <32 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 -32768, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33, i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 -32768, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33>
-  %1 = call <32 x i16> @llvm.x86.avx512.mask.padds.w.512(<32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -20107, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -20107, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>, <32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -20168, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -20248, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>, <32 x i16> zeroinitializer, i32 -1)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @avx512_mask_adds_w_constant_overflow() {
-; CHECK-LABEL: @avx512_mask_adds_w_constant_overflow(
-; CHECK-NEXT: ret <32 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16, i16 18, i16 20, i16 32767, i16 24, i16 26, i16 28, i16 30, i16 32767, i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16, i16 18, i16 20, i16 32767, i16 24, i16 26, i16 28, i16 30, i16 32767>
-  %1 = call <32 x i16> @llvm.x86.avx512.mask.padds.w.512(<32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 20125, i16 12, i16 13, i16 14, i16 15, i16 20200, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 20125, i16 12, i16 13, i16 14, i16 15, i16 20200>, <32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 20200, i16 12, i16 13, i16 14, i16 15, i16 20120, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 20211, i16 12, i16 13, i16 14, i16 15, i16 20120>, <32 x i16> zeroinitializer, i32 -1)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @avx512_mask_adds_w_constant_undefs() {
-; CHECK-LABEL: @avx512_mask_adds_w_constant_undefs(
-; CHECK-NEXT: ret <32 x i16> <i16 undef, i16 4, i16 6, i16 8, i16 10, i16 undef, i16 14, i16 16, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33, i16 undef, i16 4, i16 6, i16 8, i16 10, i16 undef, i16 14, i16 16, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33>
-  %1 = call <32 x i16> @llvm.x86.avx512.mask.padds.w.512(<32 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 undef, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 undef, i16 2, i16 3, i16 4, i16 5, i16 undef, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>, <32 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17, i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>, <32 x i16> zeroinitializer, i32 -1)
-  ret <32 x i16> %1
-}
-
-define <16 x i8> @sse2_subs_b_constant() {
-; CHECK-LABEL: @sse2_subs_b_constant(
-; CHECK-NEXT: ret <16 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-  %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <16 x i8> %1
-}
-
-define <16 x i8> @sse2_subs_b_constant_underflow() {
-; CHECK-LABEL: @sse2_subs_b_constant_underflow(
-; CHECK-NEXT: ret <16 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -128, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-  %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -107, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <16 x i8> %1
-}
-
-define <16 x i8> @sse2_subs_b_constant_overflow() {
-; CHECK-LABEL: @sse2_subs_b_constant_overflow(
-; CHECK-NEXT: ret <16 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 127, i8 -24, i8 -26, i8 -28, i8 -30, i8 127>
-  %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 125, i8 -12, i8 -13, i8 -14, i8 -15, i8 16>, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 -11, i8 12, i8 13, i8 14, i8 15, i8 -120>)
-  ret <16 x i8> %1
-}
-
-define <16 x i8> @sse2_subs_b_constant_undefs() {
-; CHECK-LABEL: @sse2_subs_b_constant_undefs(
-; CHECK-NEXT: ret <16 x i8> <i8 undef, i8 -4, i8 -6, i8 -8, i8 -10, i8 undef, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-  %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> <i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <16 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <16 x i8> %1
-}
-
-define <32 x i8> @avx2_subs_b_constant() {
-; CHECK-LABEL: @avx2_subs_b_constant(
-; CHECK-NEXT: ret <32 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-  %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <32 x i8> %1
-}
-
-define <32 x i8> @avx2_subs_b_constant_underflow() {
-; CHECK-LABEL: @avx2_subs_b_constant_underflow(
-; CHECK-NEXT: ret <32 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -128, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -128, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-  %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -107, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -107, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <32 x i8> %1
-}
-
-define <32 x i8> @avx2_subs_b_constant_overflow() {
-; CHECK-LABEL: @avx2_subs_b_constant_overflow(
-; CHECK-NEXT: ret <32 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 127, i8 -24, i8 -26, i8 -28, i8 -30, i8 127, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 127, i8 -24, i8 -26, i8 -28, i8 -30, i8 127>
-  %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 125, i8 -12, i8 -13, i8 -14, i8 -15, i8 16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 125, i8 -12, i8 -13, i8 -14, i8 -15, i8 16>, <32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 -11, i8 12, i8 13, i8 14, i8 15, i8 -120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 -11, i8 12, i8 13, i8 14, i8 15, i8 -120>)
-  ret <32 x i8> %1
-}
-
-define <32 x i8> @avx2_subs_b_constant_undefs() {
-; CHECK-LABEL: @avx2_subs_b_constant_undefs(
-; CHECK-NEXT: ret <32 x i8> <i8 undef, i8 -4, i8 -6, i8 -8, i8 -10, i8 undef, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 undef, i8 -4, i8 -6, i8 -8, i8 -10, i8 undef, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-  %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> <i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <32 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <32 x i8> %1
-}
-
-define <64 x i8> @avx512_mask_subs_b_constant() {
-; CHECK-LABEL: @avx512_mask_subs_b_constant(
-; CHECK-NEXT: ret <64 x i8> <i8 -2, i8 0, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-  %1 = call <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>, <64 x i8> zeroinitializer, i64 -3)
-  ret <64 x i8> %1
-}
-
-define <64 x i8> @avx512_mask_subs_b_constant_underflow() {
-; CHECK-LABEL: @avx512_mask_subs_b_constant_underflow(
-; CHECK-NEXT: ret <64 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -128, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -128, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -128, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -128, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-  %1 = call <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -107, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -107, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -107, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -107, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>, <64 x i8> zeroinitializer, i64 -1)
-  ret <64 x i8> %1
-}
-
-define <64 x i8> @avx512_mask_subs_b_constant_overflow() {
-; CHECK-LABEL: @avx512_mask_subs_b_constant_overflow(
-; CHECK-NEXT: ret <64 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 127, i8 -24, i8 -26, i8 -28, i8 -30, i8 127, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 127, i8 -24, i8 -26, i8 -28, i8 -30, i8 127, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 127, i8 -24, i8 -26, i8 -28, i8 -30, i8 127, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 127, i8 -24, i8 -26, i8 -28, i8 -30, i8 127>
-  %1 = call <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 125, i8 -12, i8 -13, i8 -14, i8 -15, i8 16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 125, i8 -12, i8 -13, i8 -14, i8 -15, i8 16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 125, i8 -12, i8 -13, i8 -14, i8 -15, i8 16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 125, i8 -12, i8 -13, i8 -14, i8 -15, i8 16>, <64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 -11, i8 12, i8 13, i8 14, i8 15, i8 -120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 -11, i8 12, i8 13, i8 14, i8 15, i8 -120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 -11, i8 12, i8 13, i8 14, i8 15, i8 -120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 -11, i8 12, i8 13, i8 14, i8 15, i8 -120>, <64 x i8> zeroinitializer, i64 -1)
-  ret <64 x i8> %1
-}
-
-define <64 x i8> @avx512_mask_subs_b_constant_undefs() {
-; CHECK-LABEL: @avx512_mask_subs_b_constant_undefs(
-; CHECK-NEXT: ret <64 x i8> <i8 undef, i8 -4, i8 -6, i8 -8, i8 -10, i8 undef, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 undef, i8 -4, i8 -6, i8 -8, i8 -10, i8 undef, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 undef, i8 -4, i8 -6, i8 -8, i8 -10, i8 undef, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 undef, i8 -4, i8 -6, i8 -8, i8 -10, i8 undef, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-  %1 = call <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8> <i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <64 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>, <64 x i8> zeroinitializer, i64 -1)
-  ret <64 x i8> %1
-}
-
-define <8 x i16> @sse2_subs_w_constant() {
-; CHECK-LABEL: @sse2_subs_w_constant(
-; CHECK-NEXT: ret <8 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16>
-  %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8>, <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
-  ret <8 x i16> %1
-}
-
-define <8 x i16> @sse2_subs_w_constant_underflow() {
-; CHECK-LABEL: @sse2_subs_w_constant_underflow(
-; CHECK-NEXT: ret <8 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -32768>
-  %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -32107>, <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 12188>)
-  ret <8 x i16> %1
-}
-
-define <8 x i16> @sse2_subs_w_constant_overflow() {
-; CHECK-LABEL: @sse2_subs_w_constant_overflow(
-; CHECK-NEXT: ret <8 x i16> <i16 -2, i16 32767, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16>
-  %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> <i16 -1, i16 8248, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8>, <8 x i16> <i16 1, i16 -25192, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
-  ret <8 x i16> %1
-}
-
-define <8 x i16> @sse2_subs_w_constant_undefs() {
-; CHECK-LABEL: @sse2_subs_w_constant_undefs(
-; CHECK-NEXT: ret <8 x i16> <i16 undef, i16 -4, i16 -6, i16 -8, i16 -10, i16 undef, i16 -14, i16 -16>
-  %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> <i16 undef, i16 -2, i16 -3, i16 -4, i16 -5, i16 undef, i16 -7, i16 -8>, <8 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
-  ret <8 x i16> %1
-}
-
-define <16 x i16> @avx2_subs_w_constant() {
-; CHECK-LABEL: @avx2_subs_w_constant(
-; CHECK-NEXT: ret <16 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33>
-  %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16>, <16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <16 x i16> %1
-}
-
-define <16 x i16> @avx2_subs_w_constant_underflow() {
-; CHECK-LABEL: @avx2_subs_w_constant_underflow(
-; CHECK-NEXT: ret <16 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -32768, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33>
-  %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -21107, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16>, <16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 15188, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <16 x i16> %1
-}
-
-define <16 x i16> @avx2_subs_w_constant_overflow() {
-; CHECK-LABEL: @avx2_subs_w_constant_overflow(
-; CHECK-NEXT: ret <16 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16, i16 -18, i16 -20, i16 32767, i16 -24, i16 -26, i16 -28, i16 -30, i16 32767>
-  %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 20125, i16 -12, i16 -13, i16 -14, i16 -15, i16 20160>, <16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 -20230, i16 12, i16 13, i16 14, i16 15, i16 -20120>)
-  ret <16 x i16> %1
-}
-
-define <16 x i16> @avx2_subs_w_constant_undefs() {
-; CHECK-LABEL: @avx2_subs_w_constant_undefs(
-; CHECK-NEXT: ret <16 x i16> <i16 undef, i16 -4, i16 -6, i16 -8, i16 -10, i16 undef, i16 -14, i16 -16, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33>
-  %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> <i16 undef, i16 -2, i16 -3, i16 -4, i16 -5, i16 undef, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16>, <16 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <16 x i16> %1
-}
-
-define <32 x i16> @avx512_mask_subs_w_constant() {
-; CHECK-LABEL: @avx512_mask_subs_w_constant(
-; CHECK-NEXT: ret <32 x i16> <i16 -2, i16 0, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33, i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33>
-  %1 = call <32 x i16> @llvm.x86.avx512.mask.psubs.w.512(<32 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16, i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16>, <32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>, <32 x i16> zeroinitializer, i32 -3)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @avx512_mask_subs_w_constant_underflow() {
-; CHECK-LABEL: @avx512_mask_subs_w_constant_underflow(
-; CHECK-NEXT: ret <32 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -32768, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33, i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -32768, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33>
-  %1 = call <32 x i16> @llvm.x86.avx512.mask.psubs.w.512(<32 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -20107, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16, i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -20107, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16>, <32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 20168, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 20248, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>, <32 x i16> zeroinitializer, i32 -1)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @avx512_mask_subs_w_constant_overflow() {
-; CHECK-LABEL: @avx512_mask_subs_w_constant_overflow(
-; CHECK-NEXT: ret <32 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16, i16 -18, i16 -20, i16 32767, i16 -24, i16 -26, i16 -28, i16 -30, i16 32767, i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16, i16 -18, i16 -20, i16 32767, i16 -24, i16 -26, i16 -28, i16 -30, i16 32767>
-  %1 = call <32 x i16> @llvm.x86.avx512.mask.psubs.w.512(<32 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 20125, i16 -12, i16 -13, i16 -14, i16 -15, i16 20200, i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 20125, i16 -12, i16 -13, i16 -14, i16 -15, i16 20200>, <32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 -20200, i16 12, i16 13, i16 14, i16 15, i16 -20120, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 -20211, i16 12, i16 13, i16 14, i16 15, i16 -20120>, <32 x i16> zeroinitializer, i32 -1)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @avx512_mask_subs_w_constant_undefs() {
-; CHECK-LABEL: @avx512_mask_subs_w_constant_undefs(
-; CHECK-NEXT: ret <32 x i16> <i16 undef, i16 -4, i16 -6, i16 -8, i16 -10, i16 undef, i16 -14, i16 -16, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33, i16 undef, i16 -4, i16 -6, i16 -8, i16 -10, i16 undef, i16 -14, i16 -16, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33>
-  %1 = call <32 x i16> @llvm.x86.avx512.mask.psubs.w.512(<32 x i16> <i16 undef, i16 -2, i16 -3, i16 -4, i16 -5, i16 undef, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16, i16 undef, i16 -2, i16 -3, i16 -4, i16 -5, i16 undef, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16>, <32 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17, i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>, <32 x i16> zeroinitializer, i32 -1)
-  ret <32 x i16> %1
-}
-
-declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
-declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
-declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
-declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) nounwind readnone
-declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
-declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) nounwind readnone
-declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readnone
-declare <64 x i8> @llvm.x86.avx512.mask.padds.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) nounwind readnone
-declare <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) nounwind readnone
-declare <32 x i16> @llvm.x86.avx512.mask.padds.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32) nounwind readnone
-declare <32 x i16> @llvm.x86.avx512.mask.psubs.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32) nounwind readnone
diff --git a/test/Transforms/InstCombine/bitcast-store.ll b/test/Transforms/InstCombine/bitcast-store.ll
index 6d79527..2308d77 100644
--- a/test/Transforms/InstCombine/bitcast-store.ll
+++ b/test/Transforms/InstCombine/bitcast-store.ll
@@ -10,11 +10,11 @@
 @G = external constant [5 x i8*]
 
 ; CHECK-LABEL: @foo
-; CHECK: store i32 %x, i32* %{{.*}}, align 16, !noalias !0
+; CHECK: store i32 %x, i32* %{{.*}}, align 16, !noalias !0, !llvm.access.group !1
 define void @foo(i32 %x, float* %p) nounwind {
 entry:
   %x.cast = bitcast i32 %x to float
-  store float %x.cast, float* %p, align 16, !noalias !0
+  store float %x.cast, float* %p, align 16, !noalias !0, !llvm.access.group !1
   ret void
 }
 
@@ -48,3 +48,4 @@
 }
 
 !0 = !{!0}
+!1 = !{}
\ No newline at end of file
diff --git a/test/Transforms/InstCombine/bitcount.ll b/test/Transforms/InstCombine/bitcount.ll
deleted file mode 100644
index 318ca73..0000000
--- a/test/Transforms/InstCombine/bitcount.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; Tests to make sure bit counts of constants are folded
-; RUN: opt < %s -instcombine -S | grep "ret i32 19"
-; RUN: opt < %s -instcombine -S | \
-; RUN:   grep -v declare | not grep llvm.ct
-
-declare i31 @llvm.ctpop.i31(i31 %val) 
-declare i32 @llvm.cttz.i32(i32 %val, i1) 
-declare i33 @llvm.ctlz.i33(i33 %val, i1) 
-
-define i32 @test(i32 %A) {
-  %c1 = call i31 @llvm.ctpop.i31(i31 12415124)
-  %c2 = call i32 @llvm.cttz.i32(i32 87359874, i1 true)
-  %c3 = call i33 @llvm.ctlz.i33(i33 87359874, i1 true)
-  %t1 = zext i31 %c1 to i32
-  %t3 = trunc i33 %c3 to i32
-  %r1 = add i32 %t1, %c2
-  %r2 = add i32 %r1, %t3
-  ret i32 %r2
-}
diff --git a/test/Transforms/InstCombine/bitreverse-hang.ll b/test/Transforms/InstCombine/bitreverse-hang.ll
index 6823bd0..8e6585e 100644
--- a/test/Transforms/InstCombine/bitreverse-hang.ll
+++ b/test/Transforms/InstCombine/bitreverse-hang.ll
@@ -46,7 +46,7 @@
 !llvm.ident = !{!1}
 
 !0 = !{i32 1, !"PIC Level", i32 2}
-!1 = !{!"clang version 3.8.0 (http://llvm.org/git/clang.git eb70f4e9cc9a4dc3dd57b032fb858d56b4b64a0e)"}
+!1 = !{!"clang version 3.8.0"}
 !2 = !{!3, !3, i64 0}
 !3 = !{!"int", !4, i64 0}
 !4 = !{!"omnipotent char", !5, i64 0}
diff --git a/test/Transforms/InstCombine/cast.ll b/test/Transforms/InstCombine/cast.ll
index 64a7416..b6d1eda 100644
--- a/test/Transforms/InstCombine/cast.ll
+++ b/test/Transforms/InstCombine/cast.ll
@@ -3,110 +3,105 @@
 ; RUN: opt < %s -instcombine -S | FileCheck %s
 target datalayout = "E-p:64:64:64-p1:32:32:32-p2:64:64:64-p3:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128-n8:16:32:64"
 
-@inbuf = external global [32832 x i8]           ; <[32832 x i8]*> [#uses=1]
+@inbuf = external global [32832 x i8]
 
 define i32 @test1(i32 %A) {
 ; CHECK-LABEL: @test1(
-; CHECK-NEXT:    ret i32 %A
+; CHECK-NEXT:    ret i32 [[A:%.*]]
 ;
-  %c1 = bitcast i32 %A to i32             ; <i32> [#uses=1]
-  %c2 = bitcast i32 %c1 to i32            ; <i32> [#uses=1]
+  %c1 = bitcast i32 %A to i32
+  %c2 = bitcast i32 %c1 to i32
   ret i32 %c2
 }
 
 define i64 @test2(i8 %A) {
 ; CHECK-LABEL: @test2(
-; CHECK-NEXT:    [[RET:%.*]] = zext i8 %A to i64
+; CHECK-NEXT:    [[RET:%.*]] = zext i8 [[A:%.*]] to i64
 ; CHECK-NEXT:    ret i64 [[RET]]
 ;
-  %c1 = zext i8 %A to i16         ; <i16> [#uses=1]
-  %c2 = zext i16 %c1 to i32               ; <i32> [#uses=1]
-  %Ret = zext i32 %c2 to i64              ; <i64> [#uses=1]
+  %c1 = zext i8 %A to i16
+  %c2 = zext i16 %c1 to i32
+  %Ret = zext i32 %c2 to i64
   ret i64 %Ret
 }
 
-; This function should just use bitwise AND
 define i64 @test3(i64 %A) {
 ; CHECK-LABEL: @test3(
-; CHECK-NEXT:    [[C2:%.*]] = and i64 %A, 255
+; CHECK-NEXT:    [[C2:%.*]] = and i64 [[A:%.*]], 255
 ; CHECK-NEXT:    ret i64 [[C2]]
 ;
-  %c1 = trunc i64 %A to i8                ; <i8> [#uses=1]
-  %c2 = zext i8 %c1 to i64                ; <i64> [#uses=1]
+  %c1 = trunc i64 %A to i8
+  %c2 = zext i8 %c1 to i64
   ret i64 %c2
 }
 
 define i32 @test4(i32 %A, i32 %B) {
 ; CHECK-LABEL: @test4(
-; CHECK-NEXT:    [[COND:%.*]] = icmp slt i32 %A, %B
+; CHECK-NEXT:    [[COND:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
 ; CHECK-NEXT:    [[RESULT:%.*]] = zext i1 [[COND]] to i32
 ; CHECK-NEXT:    ret i32 [[RESULT]]
 ;
-  %COND = icmp slt i32 %A, %B             ; <i1> [#uses=1]
-  ; Booleans are unsigned integrals
-  %c = zext i1 %COND to i8                ; <i8> [#uses=1]
-  ; for the cast elim purpose
-  %result = zext i8 %c to i32             ; <i32> [#uses=1]
+  %COND = icmp slt i32 %A, %B
+  %c = zext i1 %COND to i8
+  %result = zext i8 %c to i32
   ret i32 %result
 }
 
 define i32 @test5(i1 %B) {
-        ; This cast should get folded into
 ; CHECK-LABEL: @test5(
-; CHECK-NEXT:    [[RESULT:%.*]] = zext i1 %B to i32
+; CHECK-NEXT:    [[RESULT:%.*]] = zext i1 [[B:%.*]] to i32
 ; CHECK-NEXT:    ret i32 [[RESULT]]
 ;
-  %c = zext i1 %B to i8           ; <i8> [#uses=1]
-  ; this cast
-  %result = zext i8 %c to i32             ; <i32> [#uses=1]
+  %c = zext i1 %B to i8
+  %result = zext i8 %c to i32
   ret i32 %result
 }
 
 define i32 @test6(i64 %A) {
 ; CHECK-LABEL: @test6(
-; CHECK-NEXT:    [[C1:%.*]] = trunc i64 %A to i32
+; CHECK-NEXT:    [[C1:%.*]] = trunc i64 [[A:%.*]] to i32
 ; CHECK-NEXT:    ret i32 [[C1]]
 ;
-  %c1 = trunc i64 %A to i32               ; <i32> [#uses=1]
-  %res = bitcast i32 %c1 to i32           ; <i32> [#uses=1]
+  %c1 = trunc i64 %A to i32
+  %res = bitcast i32 %c1 to i32
   ret i32 %res
 }
 
 define i64 @test7(i1 %A) {
 ; CHECK-LABEL: @test7(
-; CHECK-NEXT:    [[RES:%.*]] = zext i1 %A to i64
+; CHECK-NEXT:    [[RES:%.*]] = zext i1 [[A:%.*]] to i64
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
-  %c1 = zext i1 %A to i32         ; <i32> [#uses=1]
-  %res = sext i32 %c1 to i64              ; <i64> [#uses=1]
+  %c1 = zext i1 %A to i32
+  %res = sext i32 %c1 to i64
   ret i64 %res
 }
 
 define i64 @test8(i8 %A) {
 ; CHECK-LABEL: @test8(
-; CHECK-NEXT:    [[C1:%.*]] = sext i8 %A to i64
+; CHECK-NEXT:    [[C1:%.*]] = sext i8 [[A:%.*]] to i64
 ; CHECK-NEXT:    ret i64 [[C1]]
 ;
-  %c1 = sext i8 %A to i64         ; <i64> [#uses=1]
-  %res = bitcast i64 %c1 to i64           ; <i64> [#uses=1]
+  %c1 = sext i8 %A to i64
+  %res = bitcast i64 %c1 to i64
   ret i64 %res
 }
 
 define i16 @test9(i16 %A) {
 ; CHECK-LABEL: @test9(
-; CHECK-NEXT:    ret i16 %A
+; CHECK-NEXT:    ret i16 [[A:%.*]]
 ;
-  %c1 = sext i16 %A to i32                ; <i32> [#uses=1]
-  %c2 = trunc i32 %c1 to i16              ; <i16> [#uses=1]
+  %c1 = sext i16 %A to i32
+  %c2 = trunc i32 %c1 to i16
   ret i16 %c2
 }
 
 define i16 @test10(i16 %A) {
 ; CHECK-LABEL: @test10(
-; CHECK-NEXT:    ret i16 %A
+; CHECK-NEXT:    ret i16 [[A:%.*]]
 ;
-  %c1 = sext i16 %A to i32                ; <i32> [#uses=1]
-  %c2 = trunc i32 %c1 to i16              ; <i16> [#uses=1]
+  %c1 = sext i16 %A to i32
+  %c2 = trunc i32 %c1 to i16
   ret i16 %c2
 }
 
@@ -114,10 +109,10 @@
 
 define void @test11(i32* %P) {
 ; CHECK-LABEL: @test11(
-; CHECK-NEXT:    call void (i32, ...) @varargs(i32 5, i32* %P)
+; CHECK-NEXT:    call void (i32, ...) @varargs(i32 5, i32* [[P:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  %c = bitcast i32* %P to i16*            ; <i16*> [#uses=1]
+  %c = bitcast i32* %P to i16*
   call void (i32, ...) @varargs( i32 5, i16* %c )
   ret void
 }
@@ -126,12 +121,14 @@
 define void @test_invoke_vararg_cast(i32* %a, i32* %b) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
 ; CHECK-LABEL: @test_invoke_vararg_cast(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    invoke void (i32, ...) @varargs(i32 1, i32* %b, i32* %a)
-; CHECK-NEXT:    to label %invoke.cont unwind label %lpad
+; CHECK-NEXT:    invoke void (i32, ...) @varargs(i32 1, i32* [[B:%.*]], i32* [[A:%.*]])
+; CHECK-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
 ; CHECK:       invoke.cont:
 ; CHECK-NEXT:    ret void
 ; CHECK:       lpad:
-; CHECK-NEXT:    [[TMP0:%.*]] = landingpad { i8*, i32
+; CHECK-NEXT:    [[TMP0:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT:    cleanup
+; CHECK-NEXT:    ret void
 ;
 entry:
   %0 = bitcast i32* %b to i8*
@@ -139,10 +136,10 @@
   invoke void (i32, ...) @varargs(i32 1, i8* %0, i64* %1)
   to label %invoke.cont unwind label %lpad
 
-invoke.cont:                                      ; preds = %entry
+invoke.cont:
   ret void
 
-lpad:                                             ; preds = %entry
+lpad:
   %2 = landingpad { i8*, i32 }
   cleanup
   ret void
@@ -150,20 +147,20 @@
 
 define i8* @test13(i64 %A) {
 ; CHECK-LABEL: @test13(
-; CHECK-NEXT:    [[C:%.*]] = getelementptr [32832 x i8], [32832 x i8]* @inbuf, i64 0, i64 %A
+; CHECK-NEXT:    [[C:%.*]] = getelementptr [32832 x i8], [32832 x i8]* @inbuf, i64 0, i64 [[A:%.*]]
 ; CHECK-NEXT:    ret i8* [[C]]
 ;
-  %c = getelementptr [0 x i8], [0 x i8]* bitcast ([32832 x i8]* @inbuf to [0 x i8]*), i64 0, i64 %A             ; <i8*> [#uses=1]
+  %c = getelementptr [0 x i8], [0 x i8]* bitcast ([32832 x i8]* @inbuf to [0 x i8]*), i64 0, i64 %A
   ret i8* %c
 }
 
 define i1 @test14(i8 %A) {
 ; CHECK-LABEL: @test14(
-; CHECK-NEXT:    [[X:%.*]] = icmp sgt i8 %A, -1
+; CHECK-NEXT:    [[X:%.*]] = icmp sgt i8 [[A:%.*]], -1
 ; CHECK-NEXT:    ret i1 [[X]]
 ;
-  %c = bitcast i8 %A to i8                ; <i8> [#uses=1]
-  %X = icmp ult i8 %c, -128               ; <i1> [#uses=1]
+  %c = bitcast i8 %A to i8
+  %X = icmp ult i8 %c, -128
   ret i1 %X
 }
 
@@ -177,36 +174,36 @@
 
 define i1 @test16(i32* %P) {
 ; CHECK-LABEL: @test16(
-; CHECK-NEXT:    [[C:%.*]] = icmp ne i32* %P, null
+; CHECK-NEXT:    [[C:%.*]] = icmp ne i32* [[P:%.*]], null
 ; CHECK-NEXT:    ret i1 [[C]]
 ;
-  %c = icmp ne i32* %P, null              ; <i1> [#uses=1]
+  %c = icmp ne i32* %P, null
   ret i1 %c
 }
 
-define i16 @test17(i1 %tmp3) {
+define i16 @test17(i1 %x) {
 ; CHECK-LABEL: @test17(
-; CHECK-NEXT:    [[T86:%.*]] = zext i1 %tmp3 to i16
+; CHECK-NEXT:    [[T86:%.*]] = zext i1 [[X:%.*]] to i16
 ; CHECK-NEXT:    ret i16 [[T86]]
 ;
-  %c = zext i1 %tmp3 to i32               ; <i32> [#uses=1]
-  %t86 = trunc i32 %c to i16              ; <i16> [#uses=1]
+  %c = zext i1 %x to i32
+  %t86 = trunc i32 %c to i16
   ret i16 %t86
 }
 
-define i16 @test18(i8 %tmp3) {
+define i16 @test18(i8 %x) {
 ; CHECK-LABEL: @test18(
-; CHECK-NEXT:    [[T86:%.*]] = sext i8 %tmp3 to i16
+; CHECK-NEXT:    [[T86:%.*]] = sext i8 [[X:%.*]] to i16
 ; CHECK-NEXT:    ret i16 [[T86]]
 ;
-  %c = sext i8 %tmp3 to i32               ; <i32> [#uses=1]
-  %t86 = trunc i32 %c to i16              ; <i16> [#uses=1]
+  %c = sext i8 %x to i32
+  %t86 = trunc i32 %c to i16
   ret i16 %t86
 }
 
 define i1 @test19(i32 %X) {
 ; CHECK-LABEL: @test19(
-; CHECK-NEXT:    [[Z:%.*]] = icmp slt i32 %X, 12345
+; CHECK-NEXT:    [[Z:%.*]] = icmp slt i32 [[X:%.*]], 12345
 ; CHECK-NEXT:    ret i1 [[Z]]
 ;
   %c = sext i32 %X to i64
@@ -216,7 +213,7 @@
 
 define <2 x i1> @test19vec(<2 x i32> %X) {
 ; CHECK-LABEL: @test19vec(
-; CHECK-NEXT:    [[Z:%.*]] = icmp slt <2 x i32> %X, <i32 12345, i32 2147483647>
+; CHECK-NEXT:    [[Z:%.*]] = icmp slt <2 x i32> [[X:%.*]], <i32 12345, i32 2147483647>
 ; CHECK-NEXT:    ret <2 x i1> [[Z]]
 ;
   %c = sext <2 x i32> %X to <2 x i64>
@@ -226,7 +223,7 @@
 
 define <3 x i1> @test19vec2(<3 x i1> %X) {
 ; CHECK-LABEL: @test19vec2(
-; CHECK-NEXT:    [[CMPEQ:%.*]] = xor <3 x i1> %X, <i1 true, i1 true, i1 true>
+; CHECK-NEXT:    [[CMPEQ:%.*]] = xor <3 x i1> [[X:%.*]], <i1 true, i1 true, i1 true>
 ; CHECK-NEXT:    ret <3 x i1> [[CMPEQ]]
 ;
   %sext = sext <3 x i1> %X to <3 x i32>
@@ -238,45 +235,40 @@
 ; CHECK-LABEL: @test20(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %c = zext i1 %B to i32          ; <i32> [#uses=1]
-  %D = icmp slt i32 %c, -1                ; <i1> [#uses=1]
-  ;; false
+  %c = zext i1 %B to i32
+  %D = icmp slt i32 %c, -1
   ret i1 %D
 }
 
 define i32 @test21(i32 %X) {
 ; CHECK-LABEL: @test21(
-; CHECK-NEXT:    [[C21:%.*]] = and i32 %X, 255
-; CHECK-NEXT:    ret i32 [[C21]]
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], 255
+; CHECK-NEXT:    ret i32 [[TMP1]]
 ;
-  %c1 = trunc i32 %X to i8                ; <i8> [#uses=1]
-  ;; sext -> zext -> and -> nop
-  %c2 = sext i8 %c1 to i32                ; <i32> [#uses=1]
-  %RV = and i32 %c2, 255          ; <i32> [#uses=1]
+  %c1 = trunc i32 %X to i8
+  %c2 = sext i8 %c1 to i32
+  %RV = and i32 %c2, 255
   ret i32 %RV
 }
 
 define i32 @test22(i32 %X) {
 ; CHECK-LABEL: @test22(
-; CHECK-NEXT:    [[SEXT:%.*]] = shl i32 %X, 24
+; CHECK-NEXT:    [[SEXT:%.*]] = shl i32 [[X:%.*]], 24
 ; CHECK-NEXT:    ret i32 [[SEXT]]
 ;
-  %c1 = trunc i32 %X to i8                ; <i8> [#uses=1]
-  ;; sext -> zext -> and -> nop
-  %c2 = sext i8 %c1 to i32                ; <i32> [#uses=1]
-  %RV = shl i32 %c2, 24           ; <i32> [#uses=1]
+  %c1 = trunc i32 %X to i8
+  %c2 = sext i8 %c1 to i32
+  %RV = shl i32 %c2, 24
   ret i32 %RV
 }
 
 define i32 @test23(i32 %X) {
-        ;; Turn into an AND even though X
 ; CHECK-LABEL: @test23(
-; CHECK-NEXT:    [[C2:%.*]] = and i32 %X, 65535
+; CHECK-NEXT:    [[C2:%.*]] = and i32 [[X:%.*]], 65535
 ; CHECK-NEXT:    ret i32 [[C2]]
 ;
-  %c1 = trunc i32 %X to i16               ; <i16> [#uses=1]
-  ;; and Z are signed.
-  %c2 = zext i16 %c1 to i32               ; <i32> [#uses=1]
+  %c1 = trunc i32 %X to i16
+  %c2 = zext i16 %c1 to i32
   ret i32 %c2
 }
 
@@ -284,70 +276,68 @@
 ; CHECK-LABEL: @test24(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %X = select i1 %C, i32 14, i32 1234             ; <i32> [#uses=1]
-  ;; Fold cast into select
-  %c = icmp ne i32 %X, 0          ; <i1> [#uses=1]
+  %X = select i1 %C, i32 14, i32 1234
+  %c = icmp ne i32 %X, 0
   ret i1 %c
 }
 
 define i32 @test26(float %F) {
-        ;; no need to cast from float->double.
 ; CHECK-LABEL: @test26(
-; CHECK-NEXT:    [[D:%.*]] = fptosi float %F to i32
+; CHECK-NEXT:    [[D:%.*]] = fptosi float [[F:%.*]] to i32
 ; CHECK-NEXT:    ret i32 [[D]]
 ;
-  %c = fpext float %F to double           ; <double> [#uses=1]
-  %D = fptosi double %c to i32            ; <i32> [#uses=1]
+  %c = fpext float %F to double
+  %D = fptosi double %c to i32
   ret i32 %D
 }
 
 define [4 x float]* @test27([9 x [4 x float]]* %A) {
 ; CHECK-LABEL: @test27(
-; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [9 x [4 x float]], [9 x [4 x float]]* %A, i64 0, i64 0
+; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [9 x [4 x float]], [9 x [4 x float]]* [[A:%.*]], i64 0, i64 0
 ; CHECK-NEXT:    ret [4 x float]* [[C]]
 ;
-  %c = bitcast [9 x [4 x float]]* %A to [4 x float]*              ; <[4 x float]*> [#uses=1]
+  %c = bitcast [9 x [4 x float]]* %A to [4 x float]*
   ret [4 x float]* %c
 }
 
 define float* @test28([4 x float]* %A) {
 ; CHECK-LABEL: @test28(
-; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [4 x float], [4 x float]* %A, i64 0, i64 0
+; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [4 x float], [4 x float]* [[A:%.*]], i64 0, i64 0
 ; CHECK-NEXT:    ret float* [[C]]
 ;
-  %c = bitcast [4 x float]* %A to float*          ; <float*> [#uses=1]
+  %c = bitcast [4 x float]* %A to float*
   ret float* %c
 }
 
 define i32 @test29(i32 %c1, i32 %c2) {
 ; CHECK-LABEL: @test29(
-; CHECK-NEXT:    [[TMP2:%.*]] = or i32 %c2, %c1
-; CHECK-NEXT:    [[TMP10:%.*]] = and i32 [[TMP2]], 255
-; CHECK-NEXT:    ret i32 [[TMP10]]
+; CHECK-NEXT:    [[T21:%.*]] = or i32 [[C2:%.*]], [[C1:%.*]]
+; CHECK-NEXT:    [[T10:%.*]] = and i32 [[T21]], 255
+; CHECK-NEXT:    ret i32 [[T10]]
 ;
-  %tmp1 = trunc i32 %c1 to i8             ; <i8> [#uses=1]
-  %tmp4.mask = trunc i32 %c2 to i8                ; <i8> [#uses=1]
-  %tmp = or i8 %tmp4.mask, %tmp1          ; <i8> [#uses=1]
-  %tmp10 = zext i8 %tmp to i32            ; <i32> [#uses=1]
-  ret i32 %tmp10
+  %t1 = trunc i32 %c1 to i8
+  %tmask = trunc i32 %c2 to i8
+  %t2 = or i8 %tmask, %t1
+  %t10 = zext i8 %t2 to i32
+  ret i32 %t10
 }
 
 define i32 @test30(i32 %c1) {
 ; CHECK-LABEL: @test30(
-; CHECK-NEXT:    [[C3:%.*]] = and i32 %c1, 255
+; CHECK-NEXT:    [[C3:%.*]] = and i32 [[C1:%.*]], 255
 ; CHECK-NEXT:    [[C4:%.*]] = xor i32 [[C3]], 1
 ; CHECK-NEXT:    ret i32 [[C4]]
 ;
-  %c2 = trunc i32 %c1 to i8               ; <i8> [#uses=1]
-  %c3 = xor i8 %c2, 1             ; <i8> [#uses=1]
-  %c4 = zext i8 %c3 to i32                ; <i32> [#uses=1]
+  %c2 = trunc i32 %c1 to i8
+  %c3 = xor i8 %c2, 1
+  %c4 = zext i8 %c3 to i32
   ret i32 %c4
 }
 
 define i1 @test31(i64 %A) {
 ; CHECK-LABEL: @test31(
-; CHECK-NEXT:    [[C:%.*]] = and i64 %A, 42
-; CHECK-NEXT:    [[D:%.*]] = icmp eq i64 [[C]], 10
+; CHECK-NEXT:    [[C1:%.*]] = and i64 [[A:%.*]], 42
+; CHECK-NEXT:    [[D:%.*]] = icmp eq i64 [[C1]], 10
 ; CHECK-NEXT:    ret i1 [[D]]
 ;
   %B = trunc i64 %A to i32
@@ -360,7 +350,7 @@
 ; Does this depend on the whether the source/dest types of the trunc are legal in the data layout?
 define <2 x i1> @test31vec(<2 x i64> %A) {
 ; CHECK-LABEL: @test31vec(
-; CHECK-NEXT:    [[B:%.*]] = trunc <2 x i64> %A to <2 x i32>
+; CHECK-NEXT:    [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32>
 ; CHECK-NEXT:    [[C:%.*]] = and <2 x i32> [[B]], <i32 42, i32 42>
 ; CHECK-NEXT:    [[D:%.*]] = icmp eq <2 x i32> [[C]], <i32 10, i32 10>
 ; CHECK-NEXT:    ret <2 x i1> [[D]]
@@ -376,7 +366,7 @@
 
 define <2 x i1> @test32vec(<2 x i8> %A) {
 ; CHECK-LABEL: @test32vec(
-; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i8> %A, <i8 42, i8 42>
+; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i8> [[A:%.*]], <i8 42, i8 42>
 ; CHECK-NEXT:    [[D:%.*]] = icmp eq <2 x i8> [[TMP1]], <i8 10, i8 10>
 ; CHECK-NEXT:    ret <2 x i1> [[D]]
 ;
@@ -388,39 +378,39 @@
 
 define i32 @test33(i32 %c1) {
 ; CHECK-LABEL: @test33(
-; CHECK-NEXT:    ret i32 %c1
+; CHECK-NEXT:    ret i32 [[C1:%.*]]
 ;
-  %x = bitcast i32 %c1 to float           ; <float> [#uses=1]
-  %y = bitcast float %x to i32            ; <i32> [#uses=1]
+  %x = bitcast i32 %c1 to float
+  %y = bitcast float %x to i32
   ret i32 %y
 }
 
 define i16 @test34(i16 %a) {
 ; CHECK-LABEL: @test34(
-; CHECK-NEXT:    [[TMP21:%.*]] = lshr i16 %a, 8
-; CHECK-NEXT:    ret i16 [[TMP21]]
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i16 [[A:%.*]], 8
+; CHECK-NEXT:    ret i16 [[TMP1]]
 ;
-  %c1 = zext i16 %a to i32                ; <i32> [#uses=1]
-  %tmp21 = lshr i32 %c1, 8                ; <i32> [#uses=1]
-  %c2 = trunc i32 %tmp21 to i16           ; <i16> [#uses=1]
+  %c1 = zext i16 %a to i32
+  %t21 = lshr i32 %c1, 8
+  %c2 = trunc i32 %t21 to i16
   ret i16 %c2
 }
 
 define i16 @test35(i16 %a) {
 ; CHECK-LABEL: @test35(
-; CHECK-NEXT:    [[TMP2:%.*]] = lshr i16 %a, 8
-; CHECK-NEXT:    ret i16 [[TMP2]]
+; CHECK-NEXT:    [[T2:%.*]] = lshr i16 [[A:%.*]], 8
+; CHECK-NEXT:    ret i16 [[T2]]
 ;
-  %c1 = bitcast i16 %a to i16             ; <i16> [#uses=1]
-  %tmp2 = lshr i16 %c1, 8         ; <i16> [#uses=1]
-  %c2 = bitcast i16 %tmp2 to i16          ; <i16> [#uses=1]
+  %c1 = bitcast i16 %a to i16
+  %t2 = lshr i16 %c1, 8
+  %c2 = bitcast i16 %t2 to i16
   ret i16 %c2
 }
 
 ; rdar://6480391
 define i1 @test36(i32 %a) {
 ; CHECK-LABEL: @test36(
-; CHECK-NEXT:    [[D:%.*]] = icmp sgt i32 %a, -1
+; CHECK-NEXT:    [[D:%.*]] = icmp sgt i32 [[A:%.*]], -1
 ; CHECK-NEXT:    ret i1 [[D]]
 ;
   %b = lshr i32 %a, 31
@@ -431,7 +421,7 @@
 
 define <2 x i1> @test36vec(<2 x i32> %a) {
 ; CHECK-LABEL: @test36vec(
-; CHECK-NEXT:    [[D:%.*]] = icmp sgt <2 x i32> %a, <i32 -1, i32 -1>
+; CHECK-NEXT:    [[D:%.*]] = icmp sgt <2 x i32> [[A:%.*]], <i32 -1, i32 -1>
 ; CHECK-NEXT:    ret <2 x i1> [[D]]
 ;
   %b = lshr <2 x i32> %a, <i32 31, i32 31>
@@ -453,7 +443,7 @@
 
 define i64 @test38(i32 %a) {
 ; CHECK-LABEL: @test38(
-; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i32 %a, -2
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[A:%.*]], -2
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i64
 ; CHECK-NEXT:    ret i64 [[TMP2]]
 ;
@@ -466,91 +456,91 @@
 
 define i16 @test39(i16 %a) {
 ; CHECK-LABEL: @test39(
-; CHECK-NEXT:    [[TMP_UPGRD_32:%.*]] = call i16 @llvm.bswap.i16(i16 %a)
-; CHECK-NEXT:    ret i16 [[TMP_UPGRD_32]]
+; CHECK-NEXT:    [[REV:%.*]] = call i16 @llvm.bswap.i16(i16 [[A:%.*]])
+; CHECK-NEXT:    ret i16 [[REV]]
 ;
-  %tmp = zext i16 %a to i32
-  %tmp21 = lshr i32 %tmp, 8
-  %tmp5 = shl i32 %tmp, 8
-  %tmp.upgrd.32 = or i32 %tmp21, %tmp5
-  %tmp.upgrd.3 = trunc i32 %tmp.upgrd.32 to i16
-  ret i16 %tmp.upgrd.3
+  %t = zext i16 %a to i32
+  %t21 = lshr i32 %t, 8
+  %t5 = shl i32 %t, 8
+  %t32 = or i32 %t21, %t5
+  %r = trunc i32 %t32 to i16
+  ret i16 %r
 }
 
 define i16 @test40(i16 %a) {
 ; CHECK-LABEL: @test40(
-; CHECK-NEXT:    [[TMP21:%.*]] = lshr i16 %a, 9
-; CHECK-NEXT:    [[TMP5:%.*]] = shl i16 %a, 8
-; CHECK-NEXT:    [[TMP_UPGRD_32:%.*]] = or i16 [[TMP21]], [[TMP5]]
-; CHECK-NEXT:    ret i16 [[TMP_UPGRD_32]]
+; CHECK-NEXT:    [[T21:%.*]] = lshr i16 [[A:%.*]], 9
+; CHECK-NEXT:    [[T5:%.*]] = shl i16 [[A]], 8
+; CHECK-NEXT:    [[T32:%.*]] = or i16 [[T21]], [[T5]]
+; CHECK-NEXT:    ret i16 [[T32]]
 ;
-  %tmp = zext i16 %a to i32
-  %tmp21 = lshr i32 %tmp, 9
-  %tmp5 = shl i32 %tmp, 8
-  %tmp.upgrd.32 = or i32 %tmp21, %tmp5
-  %tmp.upgrd.3 = trunc i32 %tmp.upgrd.32 to i16
-  ret i16 %tmp.upgrd.3
+  %t = zext i16 %a to i32
+  %t21 = lshr i32 %t, 9
+  %t5 = shl i32 %t, 8
+  %t32 = or i32 %t21, %t5
+  %r = trunc i32 %t32 to i16
+  ret i16 %r
 }
 
 define <2 x i16> @test40vec(<2 x i16> %a) {
 ; CHECK-LABEL: @test40vec(
-; CHECK-NEXT:    [[TMP21:%.*]] = lshr <2 x i16> [[A:%.*]], <i16 9, i16 9>
-; CHECK-NEXT:    [[TMP5:%.*]] = shl <2 x i16> [[A]], <i16 8, i16 8>
-; CHECK-NEXT:    [[TMP_UPGRD_32:%.*]] = or <2 x i16> [[TMP21]], [[TMP5]]
-; CHECK-NEXT:    ret <2 x i16> [[TMP_UPGRD_32]]
+; CHECK-NEXT:    [[T21:%.*]] = lshr <2 x i16> [[A:%.*]], <i16 9, i16 9>
+; CHECK-NEXT:    [[T5:%.*]] = shl <2 x i16> [[A]], <i16 8, i16 8>
+; CHECK-NEXT:    [[T32:%.*]] = or <2 x i16> [[T21]], [[T5]]
+; CHECK-NEXT:    ret <2 x i16> [[T32]]
 ;
-  %tmp = zext <2 x i16> %a to <2 x i32>
-  %tmp21 = lshr <2 x i32> %tmp, <i32 9, i32 9>
-  %tmp5 = shl <2 x i32> %tmp, <i32 8, i32 8>
-  %tmp.upgrd.32 = or <2 x i32> %tmp21, %tmp5
-  %tmp.upgrd.3 = trunc <2 x i32> %tmp.upgrd.32 to <2 x i16>
-  ret <2 x i16> %tmp.upgrd.3
+  %t = zext <2 x i16> %a to <2 x i32>
+  %t21 = lshr <2 x i32> %t, <i32 9, i32 9>
+  %t5 = shl <2 x i32> %t, <i32 8, i32 8>
+  %t32 = or <2 x i32> %t21, %t5
+  %r = trunc <2 x i32> %t32 to <2 x i16>
+  ret <2 x i16> %r
 }
 
 ; PR1263
-define i32* @test41(i32* %tmp1) {
+define i32* @test41(i32* %t1) {
 ; CHECK-LABEL: @test41(
-; CHECK-NEXT:    ret i32* %tmp1
+; CHECK-NEXT:    ret i32* [[T1:%.*]]
 ;
-  %tmp64 = bitcast i32* %tmp1 to { i32 }*
-  %tmp65 = getelementptr { i32 }, { i32 }* %tmp64, i32 0, i32 0
-  ret i32* %tmp65
+  %t64 = bitcast i32* %t1 to { i32 }*
+  %t65 = getelementptr { i32 }, { i32 }* %t64, i32 0, i32 0
+  ret i32* %t65
 }
 
-define i32 addrspace(1)* @test41_addrspacecast_smaller(i32* %tmp1) {
+define i32 addrspace(1)* @test41_addrspacecast_smaller(i32* %t1) {
 ; CHECK-LABEL: @test41_addrspacecast_smaller(
-; CHECK-NEXT:    [[TMP65:%.*]] = addrspacecast i32* %tmp1 to i32 addrspace(1)*
-; CHECK-NEXT:    ret i32 addrspace(1)* [[TMP65]]
+; CHECK-NEXT:    [[T65:%.*]] = addrspacecast i32* [[T1:%.*]] to i32 addrspace(1)*
+; CHECK-NEXT:    ret i32 addrspace(1)* [[T65]]
 ;
-  %tmp64 = addrspacecast i32* %tmp1 to { i32 } addrspace(1)*
-  %tmp65 = getelementptr { i32 }, { i32 } addrspace(1)* %tmp64, i32 0, i32 0
-  ret i32 addrspace(1)* %tmp65
+  %t64 = addrspacecast i32* %t1 to { i32 } addrspace(1)*
+  %t65 = getelementptr { i32 }, { i32 } addrspace(1)* %t64, i32 0, i32 0
+  ret i32 addrspace(1)* %t65
 }
 
-define i32* @test41_addrspacecast_larger(i32 addrspace(1)* %tmp1) {
+define i32* @test41_addrspacecast_larger(i32 addrspace(1)* %t1) {
 ; CHECK-LABEL: @test41_addrspacecast_larger(
-; CHECK-NEXT:    [[TMP65:%.*]] = addrspacecast i32 addrspace(1)* %tmp1 to i32*
-; CHECK-NEXT:    ret i32* [[TMP65]]
+; CHECK-NEXT:    [[T65:%.*]] = addrspacecast i32 addrspace(1)* [[T1:%.*]] to i32*
+; CHECK-NEXT:    ret i32* [[T65]]
 ;
-  %tmp64 = addrspacecast i32 addrspace(1)* %tmp1 to { i32 }*
-  %tmp65 = getelementptr { i32 }, { i32 }* %tmp64, i32 0, i32 0
-  ret i32* %tmp65
+  %t64 = addrspacecast i32 addrspace(1)* %t1 to { i32 }*
+  %t65 = getelementptr { i32 }, { i32 }* %t64, i32 0, i32 0
+  ret i32* %t65
 }
 
 define i32 @test42(i32 %X) {
 ; CHECK-LABEL: @test42(
-; CHECK-NEXT:    [[Z:%.*]] = and i32 %X, 255
+; CHECK-NEXT:    [[Z:%.*]] = and i32 [[X:%.*]], 255
 ; CHECK-NEXT:    ret i32 [[Z]]
 ;
-  %Y = trunc i32 %X to i8         ; <i8> [#uses=1]
-  %Z = zext i8 %Y to i32          ; <i32> [#uses=1]
+  %Y = trunc i32 %X to i8
+  %Z = zext i8 %Y to i32
   ret i32 %Z
 }
 
 ; rdar://6598839
-define zeroext i64 @test43(i8 zeroext %on_off) nounwind readonly {
+define zeroext i64 @test43(i8 zeroext %on_off) {
 ; CHECK-LABEL: @test43(
-; CHECK-NEXT:    [[A:%.*]] = zext i8 %on_off to i64
+; CHECK-NEXT:    [[A:%.*]] = zext i8 [[ON_OFF:%.*]] to i64
 ; CHECK-NEXT:    [[B:%.*]] = add nsw i64 [[A]], -1
 ; CHECK-NEXT:    ret i64 [[B]]
 ;
@@ -562,7 +552,7 @@
 
 define i64 @test44(i8 %T) {
 ; CHECK-LABEL: @test44(
-; CHECK-NEXT:    [[A:%.*]] = zext i8 %T to i64
+; CHECK-NEXT:    [[A:%.*]] = zext i8 [[T:%.*]] to i64
 ; CHECK-NEXT:    [[B:%.*]] = or i64 [[A]], 1234
 ; CHECK-NEXT:    ret i64 [[B]]
 ;
@@ -574,8 +564,8 @@
 
 define i64 @test45(i8 %A, i64 %Q) {
 ; CHECK-LABEL: @test45(
-; CHECK-NEXT:    [[B:%.*]] = sext i8 %A to i64
-; CHECK-NEXT:    [[C:%.*]] = or i64 [[B]], %Q
+; CHECK-NEXT:    [[B:%.*]] = sext i8 [[A:%.*]] to i64
+; CHECK-NEXT:    [[C:%.*]] = or i64 [[B]], [[Q:%.*]]
 ; CHECK-NEXT:    [[E:%.*]] = and i64 [[C]], 4294967295
 ; CHECK-NEXT:    ret i64 [[E]]
 ;
@@ -589,7 +579,7 @@
 
 define i64 @test46(i64 %A) {
 ; CHECK-LABEL: @test46(
-; CHECK-NEXT:    [[C:%.*]] = shl i64 %A, 8
+; CHECK-NEXT:    [[C:%.*]] = shl i64 [[A:%.*]], 8
 ; CHECK-NEXT:    [[D:%.*]] = and i64 [[C]], 10752
 ; CHECK-NEXT:    ret i64 [[D]]
 ;
@@ -602,9 +592,11 @@
 
 define <2 x i64> @test46vec(<2 x i64> %A) {
 ; CHECK-LABEL: @test46vec(
-; CHECK-NEXT:    [[C:%.*]] = shl <2 x i64> [[A:%.*]], <i64 8, i64 8>
-; CHECK-NEXT:    [[D:%.*]] = and <2 x i64> [[C]], <i64 10752, i64 10752>
-; CHECK-NEXT:    ret <2 x i64> [[D]]
+; CHECK-NEXT:    [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32>
+; CHECK-NEXT:    [[C:%.*]] = shl <2 x i32> [[B]], <i32 8, i32 8>
+; CHECK-NEXT:    [[D:%.*]] = and <2 x i32> [[C]], <i32 10752, i32 10752>
+; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[D]] to <2 x i64>
+; CHECK-NEXT:    ret <2 x i64> [[E]]
 ;
   %B = trunc <2 x i64> %A to <2 x i32>
   %C = and <2 x i32> %B, <i32 42, i32 42>
@@ -628,7 +620,7 @@
 
 define i64 @test48(i8 %A1, i8 %a2) {
 ; CHECK-LABEL: @test48(
-; CHECK-NEXT:    [[Z2:%.*]] = zext i8 %A1 to i32
+; CHECK-NEXT:    [[Z2:%.*]] = zext i8 [[A1:%.*]] to i32
 ; CHECK-NEXT:    [[C:%.*]] = shl nuw nsw i32 [[Z2]], 8
 ; CHECK-NEXT:    [[D:%.*]] = or i32 [[C]], [[Z2]]
 ; CHECK-NEXT:    [[E:%.*]] = zext i32 [[D]] to i64
@@ -644,7 +636,7 @@
 
 define i64 @test49(i64 %A) {
 ; CHECK-LABEL: @test49(
-; CHECK-NEXT:    [[C:%.*]] = shl i64 %A, 32
+; CHECK-NEXT:    [[C:%.*]] = shl i64 [[A:%.*]], 32
 ; CHECK-NEXT:    [[SEXT:%.*]] = ashr exact i64 [[C]], 32
 ; CHECK-NEXT:    [[D:%.*]] = or i64 [[SEXT]], 1
 ; CHECK-NEXT:    ret i64 [[D]]
@@ -655,15 +647,15 @@
   ret i64 %D
 }
 
-define i64 @test50(i64 %A) {
+define i64 @test50(i64 %x) {
 ; CHECK-LABEL: @test50(
-; CHECK-NEXT:    [[A:%.*]] = lshr i64 %A, 2
+; CHECK-NEXT:    [[A:%.*]] = lshr i64 [[X:%.*]], 2
 ; CHECK-NEXT:    [[D:%.*]] = shl i64 [[A]], 32
 ; CHECK-NEXT:    [[SEXT:%.*]] = add i64 [[D]], -4294967296
 ; CHECK-NEXT:    [[E:%.*]] = ashr exact i64 [[SEXT]], 32
 ; CHECK-NEXT:    ret i64 [[E]]
 ;
-  %a = lshr i64 %A, 2
+  %a = lshr i64 %x, 2
   %B = trunc i64 %a to i32
   %D = add i32 %B, -1
   %E = sext i32 %D to i64
@@ -673,9 +665,9 @@
 
 define i64 @test51(i64 %A, i1 %cond) {
 ; CHECK-LABEL: @test51(
-; CHECK-NEXT:    [[C:%.*]] = and i64 %A, 4294967294
-; CHECK-NEXT:    [[D:%.*]] = or i64 %A, 1
-; CHECK-NEXT:    [[E:%.*]] = select i1 %cond, i64 [[C]], i64 [[D]]
+; CHECK-NEXT:    [[C:%.*]] = and i64 [[A:%.*]], 4294967294
+; CHECK-NEXT:    [[D:%.*]] = or i64 [[A]], 1
+; CHECK-NEXT:    [[E:%.*]] = select i1 [[COND:%.*]], i64 [[C]], i64 [[D]]
 ; CHECK-NEXT:    [[SEXT:%.*]] = shl i64 [[E]], 32
 ; CHECK-NEXT:    [[F:%.*]] = ashr exact i64 [[SEXT]], 32
 ; CHECK-NEXT:    ret i64 [[F]]
@@ -690,7 +682,7 @@
 
 define i32 @test52(i64 %A) {
 ; CHECK-LABEL: @test52(
-; CHECK-NEXT:    [[B:%.*]] = trunc i64 %A to i32
+; CHECK-NEXT:    [[B:%.*]] = trunc i64 [[A:%.*]] to i32
 ; CHECK-NEXT:    [[C:%.*]] = and i32 [[B]], 7224
 ; CHECK-NEXT:    [[D:%.*]] = or i32 [[C]], 32962
 ; CHECK-NEXT:    ret i32 [[D]]
@@ -704,7 +696,7 @@
 
 define i64 @test53(i32 %A) {
 ; CHECK-LABEL: @test53(
-; CHECK-NEXT:    [[TMP1:%.*]] = and i32 %A, 7224
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 7224
 ; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[TMP1]], 32962
 ; CHECK-NEXT:    [[D:%.*]] = zext i32 [[TMP2]] to i64
 ; CHECK-NEXT:    ret i64 [[D]]
@@ -718,7 +710,7 @@
 
 define i32 @test54(i64 %A) {
 ; CHECK-LABEL: @test54(
-; CHECK-NEXT:    [[B:%.*]] = trunc i64 %A to i32
+; CHECK-NEXT:    [[B:%.*]] = trunc i64 [[A:%.*]] to i32
 ; CHECK-NEXT:    [[C:%.*]] = and i32 [[B]], 7224
 ; CHECK-NEXT:    [[D:%.*]] = or i32 [[C]], -32574
 ; CHECK-NEXT:    ret i32 [[D]]
@@ -732,7 +724,7 @@
 
 define i64 @test55(i32 %A) {
 ; CHECK-LABEL: @test55(
-; CHECK-NEXT:    [[TMP1:%.*]] = and i32 %A, 7224
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 7224
 ; CHECK-NEXT:    [[C:%.*]] = zext i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[D:%.*]] = or i64 [[C]], -32574
 ; CHECK-NEXT:    ret i64 [[D]]
@@ -744,35 +736,35 @@
   ret i64 %E
 }
 
-define i64 @test56(i16 %A) nounwind {
+define i64 @test56(i16 %A) {
 ; CHECK-LABEL: @test56(
-; CHECK-NEXT:    [[TMP353:%.*]] = sext i16 %A to i64
-; CHECK-NEXT:    [[TMP354:%.*]] = lshr i64 [[TMP353]], 5
-; CHECK-NEXT:    [[TMP355:%.*]] = and i64 [[TMP354]], 134217727
-; CHECK-NEXT:    ret i64 [[TMP355]]
+; CHECK-NEXT:    [[P353:%.*]] = sext i16 [[A:%.*]] to i64
+; CHECK-NEXT:    [[P354:%.*]] = lshr i64 [[P353]], 5
+; CHECK-NEXT:    [[P355:%.*]] = and i64 [[P354]], 134217727
+; CHECK-NEXT:    ret i64 [[P355]]
 ;
-  %tmp353 = sext i16 %A to i32
-  %tmp354 = lshr i32 %tmp353, 5
-  %tmp355 = zext i32 %tmp354 to i64
-  ret i64 %tmp355
+  %p353 = sext i16 %A to i32
+  %p354 = lshr i32 %p353, 5
+  %p355 = zext i32 %p354 to i64
+  ret i64 %p355
 }
 
-define <2 x i64> @test56vec(<2 x i16> %A) nounwind {
+define <2 x i64> @test56vec(<2 x i16> %A) {
 ; CHECK-LABEL: @test56vec(
-; CHECK-NEXT:    [[TMP353:%.*]] = sext <2 x i16> [[A:%.*]] to <2 x i64>
-; CHECK-NEXT:    [[TMP354:%.*]] = lshr <2 x i64> [[TMP353]], <i64 5, i64 5>
-; CHECK-NEXT:    [[TMP355:%.*]] = and <2 x i64> [[TMP354]], <i64 134217727, i64 134217727>
-; CHECK-NEXT:    ret <2 x i64> [[TMP355]]
+; CHECK-NEXT:    [[P353:%.*]] = sext <2 x i16> [[A:%.*]] to <2 x i32>
+; CHECK-NEXT:    [[P354:%.*]] = lshr <2 x i32> [[P353]], <i32 5, i32 5>
+; CHECK-NEXT:    [[P355:%.*]] = zext <2 x i32> [[P354]] to <2 x i64>
+; CHECK-NEXT:    ret <2 x i64> [[P355]]
 ;
-  %tmp353 = sext <2 x i16> %A to <2 x i32>
-  %tmp354 = lshr <2 x i32> %tmp353, <i32 5, i32 5>
-  %tmp355 = zext <2 x i32> %tmp354 to <2 x i64>
-  ret <2 x i64> %tmp355
+  %p353 = sext <2 x i16> %A to <2 x i32>
+  %p354 = lshr <2 x i32> %p353, <i32 5, i32 5>
+  %p355 = zext <2 x i32> %p354 to <2 x i64>
+  ret <2 x i64> %p355
 }
 
-define i64 @test57(i64 %A) nounwind {
+define i64 @test57(i64 %A) {
 ; CHECK-LABEL: @test57(
-; CHECK-NEXT:    [[C:%.*]] = lshr i64 %A, 8
+; CHECK-NEXT:    [[C:%.*]] = lshr i64 [[A:%.*]], 8
 ; CHECK-NEXT:    [[E:%.*]] = and i64 [[C]], 16777215
 ; CHECK-NEXT:    ret i64 [[E]]
 ;
@@ -782,10 +774,11 @@
   ret i64 %E
 }
 
-define <2 x i64> @test57vec(<2 x i64> %A) nounwind {
+define <2 x i64> @test57vec(<2 x i64> %A) {
 ; CHECK-LABEL: @test57vec(
-; CHECK-NEXT:    [[C:%.*]] = lshr <2 x i64> [[A:%.*]], <i64 8, i64 8>
-; CHECK-NEXT:    [[E:%.*]] = and <2 x i64> [[C]], <i64 16777215, i64 16777215>
+; CHECK-NEXT:    [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32>
+; CHECK-NEXT:    [[C:%.*]] = lshr <2 x i32> [[B]], <i32 8, i32 8>
+; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[C]] to <2 x i64>
 ; CHECK-NEXT:    ret <2 x i64> [[E]]
 ;
   %B = trunc <2 x i64> %A to <2 x i32>
@@ -794,9 +787,9 @@
   ret <2 x i64> %E
 }
 
-define i64 @test58(i64 %A) nounwind {
+define i64 @test58(i64 %A) {
 ; CHECK-LABEL: @test58(
-; CHECK-NEXT:    [[C:%.*]] = lshr i64 %A, 8
+; CHECK-NEXT:    [[C:%.*]] = lshr i64 [[A:%.*]], 8
 ; CHECK-NEXT:    [[D:%.*]] = and i64 [[C]], 16777087
 ; CHECK-NEXT:    [[E:%.*]] = or i64 [[D]], 128
 ; CHECK-NEXT:    ret i64 [[E]]
@@ -809,12 +802,12 @@
 
 }
 
-define i64 @test59(i8 %A, i8 %B) nounwind {
+define i64 @test59(i8 %A, i8 %B) {
 ; CHECK-LABEL: @test59(
-; CHECK-NEXT:    [[C:%.*]] = zext i8 %A to i64
+; CHECK-NEXT:    [[C:%.*]] = zext i8 [[A:%.*]] to i64
 ; CHECK-NEXT:    [[D:%.*]] = shl nuw nsw i64 [[C]], 4
 ; CHECK-NEXT:    [[E:%.*]] = and i64 [[D]], 48
-; CHECK-NEXT:    [[TMP1:%.*]] = lshr i8 %B, 4
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i8 [[B:%.*]], 4
 ; CHECK-NEXT:    [[G:%.*]] = zext i8 [[TMP1]] to i64
 ; CHECK-NEXT:    [[H:%.*]] = or i64 [[E]], [[G]]
 ; CHECK-NEXT:    ret i64 [[H]]
@@ -829,76 +822,74 @@
   ret i64 %I
 }
 
-define <3 x i32> @test60(<4 x i32> %call4) nounwind {
+define <3 x i32> @test60(<4 x i32> %call4) {
 ; CHECK-LABEL: @test60(
-; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <4 x i32> %call4, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
-; CHECK-NEXT:    ret <3 x i32> [[TMP10]]
+; CHECK-NEXT:    [[P10:%.*]] = shufflevector <4 x i32> [[CALL4:%.*]], <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+; CHECK-NEXT:    ret <3 x i32> [[P10]]
 ;
-  %tmp11 = bitcast <4 x i32> %call4 to i128
-  %tmp9 = trunc i128 %tmp11 to i96
-  %tmp10 = bitcast i96 %tmp9 to <3 x i32>
-  ret <3 x i32> %tmp10
+  %p11 = bitcast <4 x i32> %call4 to i128
+  %p9 = trunc i128 %p11 to i96
+  %p10 = bitcast i96 %p9 to <3 x i32>
+  ret <3 x i32> %p10
 
 }
 
-define <4 x i32> @test61(<3 x i32> %call4) nounwind {
+define <4 x i32> @test61(<3 x i32> %call4) {
 ; CHECK-LABEL: @test61(
-; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <3 x i32> %call4, <3 x i32> <i32 0, i32 undef, i32 undef>, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT:    ret <4 x i32> [[TMP10]]
+; CHECK-NEXT:    [[P10:%.*]] = shufflevector <3 x i32> [[CALL4:%.*]], <3 x i32> <i32 0, i32 undef, i32 undef>, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    ret <4 x i32> [[P10]]
 ;
-  %tmp11 = bitcast <3 x i32> %call4 to i96
-  %tmp9 = zext i96 %tmp11 to i128
-  %tmp10 = bitcast i128 %tmp9 to <4 x i32>
-  ret <4 x i32> %tmp10
+  %p11 = bitcast <3 x i32> %call4 to i96
+  %p9 = zext i96 %p11 to i128
+  %p10 = bitcast i128 %p9 to <4 x i32>
+  ret <4 x i32> %p10
 }
 
-define <4 x i32> @test62(<3 x float> %call4) nounwind {
+define <4 x i32> @test62(<3 x float> %call4) {
 ; CHECK-LABEL: @test62(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <3 x float> %call4 to <3 x i32>
-; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <3 x i32> [[TMP1]], <3 x i32> <i32 0, i32 undef, i32 undef>, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT:    ret <4 x i32> [[TMP10]]
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <3 x float> [[CALL4:%.*]] to <3 x i32>
+; CHECK-NEXT:    [[P10:%.*]] = shufflevector <3 x i32> [[TMP1]], <3 x i32> <i32 0, i32 undef, i32 undef>, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    ret <4 x i32> [[P10]]
 ;
-  %tmp11 = bitcast <3 x float> %call4 to i96
-  %tmp9 = zext i96 %tmp11 to i128
-  %tmp10 = bitcast i128 %tmp9 to <4 x i32>
-  ret <4 x i32> %tmp10
+  %p11 = bitcast <3 x float> %call4 to i96
+  %p9 = zext i96 %p11 to i128
+  %p10 = bitcast i128 %p9 to <4 x i32>
+  ret <4 x i32> %p10
 }
 
 ; PR7311 - Don't create invalid IR on scalar->vector cast.
-define <2 x float> @test63(i64 %tmp8) nounwind {
+define <2 x float> @test63(i64 %t8) {
 ; CHECK-LABEL: @test63(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[A:%.*]] = bitcast i64 %tmp8 to <2 x i32>
+; CHECK-NEXT:    [[A:%.*]] = bitcast i64 [[T8:%.*]] to <2 x i32>
 ; CHECK-NEXT:    [[VCVT_I:%.*]] = uitofp <2 x i32> [[A]] to <2 x float>
 ; CHECK-NEXT:    ret <2 x float> [[VCVT_I]]
 ;
-entry:
-  %a = bitcast i64 %tmp8 to <2 x i32>
+  %a = bitcast i64 %t8 to <2 x i32>
   %vcvt.i = uitofp <2 x i32> %a to <2 x float>
   ret <2 x float> %vcvt.i
 }
 
-define <4 x float> @test64(<4 x float> %c) nounwind {
+define <4 x float> @test64(<4 x float> %c) {
 ; CHECK-LABEL: @test64(
-; CHECK-NEXT:    ret <4 x float> %c
+; CHECK-NEXT:    ret <4 x float> [[C:%.*]]
 ;
   %t0 = bitcast <4 x float> %c to <4 x i32>
   %t1 = bitcast <4 x i32> %t0 to <4 x float>
   ret <4 x float> %t1
 }
 
-define <4 x float> @test65(<4 x float> %c) nounwind {
+define <4 x float> @test65(<4 x float> %c) {
 ; CHECK-LABEL: @test65(
-; CHECK-NEXT:    ret <4 x float> %c
+; CHECK-NEXT:    ret <4 x float> [[C:%.*]]
 ;
   %t0 = bitcast <4 x float> %c to <2 x double>
   %t1 = bitcast <2 x double> %t0 to <4 x float>
   ret <4 x float> %t1
 }
 
-define <2 x float> @test66(<2 x float> %c) nounwind {
+define <2 x float> @test66(<2 x float> %c) {
 ; CHECK-LABEL: @test66(
-; CHECK-NEXT:    ret <2 x float> %c
+; CHECK-NEXT:    ret <2 x float> [[C:%.*]]
 ;
   %t0 = bitcast <2 x float> %c to double
   %t1 = bitcast double %t0 to <2 x float>
@@ -912,18 +903,18 @@
   ret float extractelement (<2 x float> bitcast (double bitcast (<2 x float> <float -1.000000e+00, float -1.000000e+00> to double) to <2 x float>), i32 0)
 }
 
-define i64 @test_mmx(<2 x i32> %c) nounwind {
+define i64 @test_mmx(<2 x i32> %x) {
 ; CHECK-LABEL: @test_mmx(
-; CHECK-NEXT:    [[C:%.*]] = bitcast <2 x i32> %c to i64
+; CHECK-NEXT:    [[C:%.*]] = bitcast <2 x i32> [[X:%.*]] to i64
 ; CHECK-NEXT:    ret i64 [[C]]
 ;
-  %A = bitcast <2 x i32> %c to x86_mmx
+  %A = bitcast <2 x i32> %x to x86_mmx
   %B = bitcast x86_mmx %A to <2 x i32>
   %C = bitcast <2 x i32> %B to i64
   ret i64 %C
 }
 
-define i64 @test_mmx_const(<2 x i32> %c) nounwind {
+define i64 @test_mmx_const(<2 x i32> %c) {
 ; CHECK-LABEL: @test_mmx_const(
 ; CHECK-NEXT:    ret i64 0
 ;
@@ -938,8 +929,8 @@
 ; CHECK-LABEL: @test67(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %tmp2 = zext i1 %a to i32
-  %conv6 = xor i32 %tmp2, 1
+  %t2 = zext i1 %a to i32
+  %conv6 = xor i32 %t2, 1
   %and = and i32 %b, %conv6
   %sext = shl nuw nsw i32 %and, 24
   %neg.i = xor i32 %sext, -16777216
@@ -953,9 +944,9 @@
 
 define %s @test68(%s *%p, i64 %i) {
 ; CHECK-LABEL: @test68(
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr %s, %s* %p, i64 %i
-; CHECK-NEXT:    [[L:%.*]] = load %s, %s* [[PP1]], align 4
-; CHECK-NEXT:    ret %s [[L]]
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr [[S:%.*]], %s* [[P:%.*]], i64 [[I:%.*]]
+; CHECK-NEXT:    [[L:%.*]] = load [[S]], %s* [[PP1]], align 4
+; CHECK-NEXT:    ret [[S]] %l
 ;
   %o = mul i64 %i, 12
   %q = bitcast %s* %p to i8*
@@ -968,9 +959,9 @@
 ; addrspacecasts should be eliminated.
 define %s @test68_addrspacecast(%s* %p, i64 %i) {
 ; CHECK-LABEL: @test68_addrspacecast(
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr %s, %s* %p, i64 %i
-; CHECK-NEXT:    [[L:%.*]] = load %s, %s* [[PP1]], align 4
-; CHECK-NEXT:    ret %s [[L]]
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr [[S:%.*]], %s* [[P:%.*]], i64 [[I:%.*]]
+; CHECK-NEXT:    [[L:%.*]] = load [[S]], %s* [[PP1]], align 4
+; CHECK-NEXT:    ret [[S]] %l
 ;
   %o = mul i64 %i, 12
   %q = addrspacecast %s* %p to i8 addrspace(2)*
@@ -982,10 +973,10 @@
 
 define %s @test68_addrspacecast_2(%s* %p, i64 %i) {
 ; CHECK-LABEL: @test68_addrspacecast_2(
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr %s, %s* %p, i64 %i
-; CHECK-NEXT:    [[R:%.*]] = addrspacecast %s* [[PP1]] to %s addrspace(1)*
-; CHECK-NEXT:    [[L:%.*]] = load %s, %s addrspace(1)* [[R]], align 4
-; CHECK-NEXT:    ret %s [[L]]
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr [[S:%.*]], %s* [[P:%.*]], i64 [[I:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = addrspacecast %s* [[PP1]] to [[S]] addrspace(1)*
+; CHECK-NEXT:    [[L:%.*]] = load [[S]], [[S]] addrspace(1)* [[R]], align 4
+; CHECK-NEXT:    ret [[S]] %l
 ;
   %o = mul i64 %i, 12
   %q = addrspacecast %s* %p to i8 addrspace(2)*
@@ -997,9 +988,9 @@
 
 define %s @test68_as1(%s addrspace(1)* %p, i32 %i) {
 ; CHECK-LABEL: @test68_as1(
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr %s, %s addrspace(1)* %p, i32 %i
-; CHECK-NEXT:    [[L:%.*]] = load %s, %s addrspace(1)* [[PP1]], align 4
-; CHECK-NEXT:    ret %s [[L]]
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr [[S:%.*]], [[S]] addrspace(1)* [[P:%.*]], i32 [[I:%.*]]
+; CHECK-NEXT:    [[L:%.*]] = load [[S]], [[S]] addrspace(1)* [[PP1]], align 4
+; CHECK-NEXT:    ret [[S]] %l
 ;
   %o = mul i32 %i, 12
   %q = bitcast %s addrspace(1)* %p to i8 addrspace(1)*
@@ -1011,7 +1002,7 @@
 
 define double @test69(double *%p, i64 %i) {
 ; CHECK-LABEL: @test69(
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr inbounds double, double* %p, i64 %i
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr inbounds double, double* [[P:%.*]], i64 [[I:%.*]]
 ; CHECK-NEXT:    [[L:%.*]] = load double, double* [[PP1]], align 8
 ; CHECK-NEXT:    ret double [[L]]
 ;
@@ -1025,10 +1016,10 @@
 
 define %s @test70(%s *%p, i64 %i) {
 ; CHECK-LABEL: @test70(
-; CHECK-NEXT:    [[O:%.*]] = mul nsw i64 %i, 3
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr inbounds %s, %s* %p, i64 [[O]]
-; CHECK-NEXT:    [[L:%.*]] = load %s, %s* [[PP1]], align 4
-; CHECK-NEXT:    ret %s [[L]]
+; CHECK-NEXT:    [[O:%.*]] = mul nsw i64 [[I:%.*]], 3
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr inbounds [[S:%.*]], %s* [[P:%.*]], i64 [[O]]
+; CHECK-NEXT:    [[L:%.*]] = load [[S]], %s* [[PP1]], align 4
+; CHECK-NEXT:    ret [[S]] %l
 ;
   %o = mul nsw i64 %i, 36
   %q = bitcast %s* %p to i8*
@@ -1040,8 +1031,8 @@
 
 define double @test71(double *%p, i64 %i) {
 ; CHECK-LABEL: @test71(
-; CHECK-NEXT:    [[O:%.*]] = shl i64 %i, 2
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr double, double* %p, i64 [[O]]
+; CHECK-NEXT:    [[O:%.*]] = shl i64 [[I:%.*]], 2
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr double, double* [[P:%.*]], i64 [[O]]
 ; CHECK-NEXT:    [[L:%.*]] = load double, double* [[PP1]], align 8
 ; CHECK-NEXT:    ret double [[L]]
 ;
@@ -1055,8 +1046,8 @@
 
 define double @test72(double *%p, i32 %i) {
 ; CHECK-LABEL: @test72(
-; CHECK-NEXT:    [[O:%.*]] = sext i32 %i to i64
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr inbounds double, double* %p, i64 [[O]]
+; CHECK-NEXT:    [[O:%.*]] = sext i32 [[I:%.*]] to i64
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr inbounds double, double* [[P:%.*]], i64 [[O]]
 ; CHECK-NEXT:    [[L:%.*]] = load double, double* [[PP1]], align 8
 ; CHECK-NEXT:    ret double [[L]]
 ;
@@ -1071,8 +1062,8 @@
 
 define double @test73(double *%p, i128 %i) {
 ; CHECK-LABEL: @test73(
-; CHECK-NEXT:    [[O:%.*]] = trunc i128 %i to i64
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr double, double* %p, i64 [[O]]
+; CHECK-NEXT:    [[I_TR:%.*]] = trunc i128 [[I:%.*]] to i64
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr double, double* [[P:%.*]], i64 [[I_TR]]
 ; CHECK-NEXT:    [[L:%.*]] = load double, double* [[PP1]], align 8
 ; CHECK-NEXT:    ret double [[L]]
 ;
@@ -1087,7 +1078,7 @@
 
 define double @test74(double *%p, i64 %i) {
 ; CHECK-LABEL: @test74(
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr inbounds double, double* %p, i64 %i
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr inbounds double, double* [[P:%.*]], i64 [[I:%.*]]
 ; CHECK-NEXT:    [[L:%.*]] = load double, double* [[PP1]], align 8
 ; CHECK-NEXT:    ret double [[L]]
 ;
@@ -1100,9 +1091,9 @@
 
 define i32* @test75(i32* %p, i32 %x) {
 ; CHECK-LABEL: @test75(
-; CHECK-NEXT:    [[Y:%.*]] = shl i32 %x, 3
+; CHECK-NEXT:    [[Y:%.*]] = shl i32 [[X:%.*]], 3
 ; CHECK-NEXT:    [[Z:%.*]] = sext i32 [[Y]] to i64
-; CHECK-NEXT:    [[Q:%.*]] = bitcast i32* %p to i8*
+; CHECK-NEXT:    [[Q:%.*]] = bitcast i32* [[P:%.*]] to i8*
 ; CHECK-NEXT:    [[R:%.*]] = getelementptr i8, i8* [[Q]], i64 [[Z]]
 ; CHECK-NEXT:    [[S:%.*]] = bitcast i8* [[R]] to i32*
 ; CHECK-NEXT:    ret i32* [[S]]
@@ -1117,10 +1108,10 @@
 
 define %s @test76(%s *%p, i64 %i, i64 %j) {
 ; CHECK-LABEL: @test76(
-; CHECK-NEXT:    [[O2:%.*]] = mul i64 %i, %j
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr %s, %s* %p, i64 [[O2]]
-; CHECK-NEXT:    [[L:%.*]] = load %s, %s* [[PP1]], align 4
-; CHECK-NEXT:    ret %s [[L]]
+; CHECK-NEXT:    [[O2:%.*]] = mul i64 [[I:%.*]], [[J:%.*]]
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr [[S:%.*]], %s* [[P:%.*]], i64 [[O2]]
+; CHECK-NEXT:    [[L:%.*]] = load [[S]], %s* [[PP1]], align 4
+; CHECK-NEXT:    ret [[S]] %l
 ;
   %o = mul i64 %i, 12
   %o2 = mul nsw i64 %o, %j
@@ -1133,11 +1124,11 @@
 
 define %s @test77(%s *%p, i64 %i, i64 %j) {
 ; CHECK-LABEL: @test77(
-; CHECK-NEXT:    [[O:%.*]] = mul nsw i64 %i, 3
-; CHECK-NEXT:    [[O2:%.*]] = mul nsw i64 [[O]], %j
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr inbounds %s, %s* %p, i64 [[O2]]
-; CHECK-NEXT:    [[L:%.*]] = load %s, %s* [[PP1]], align 4
-; CHECK-NEXT:    ret %s [[L]]
+; CHECK-NEXT:    [[O:%.*]] = mul nsw i64 [[I:%.*]], 3
+; CHECK-NEXT:    [[O2:%.*]] = mul nsw i64 [[O]], [[J:%.*]]
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr inbounds [[S:%.*]], %s* [[P:%.*]], i64 [[O2]]
+; CHECK-NEXT:    [[L:%.*]] = load [[S]], %s* [[PP1]], align 4
+; CHECK-NEXT:    ret [[S]] %l
 ;
   %o = mul nsw i64 %i, 36
   %o2 = mul nsw i64 %o, %j
@@ -1150,17 +1141,17 @@
 
 define %s @test78(%s *%p, i64 %i, i64 %j, i32 %k, i32 %l, i128 %m, i128 %n) {
 ; CHECK-LABEL: @test78(
-; CHECK-NEXT:    [[A:%.*]] = mul nsw i32 %k, 3
-; CHECK-NEXT:    [[B:%.*]] = mul nsw i32 [[A]], %l
+; CHECK-NEXT:    [[A:%.*]] = mul nsw i32 [[K:%.*]], 3
+; CHECK-NEXT:    [[B:%.*]] = mul nsw i32 [[A]], [[L:%.*]]
 ; CHECK-NEXT:    [[C:%.*]] = sext i32 [[B]] to i128
-; CHECK-NEXT:    [[D:%.*]] = mul nsw i128 [[C]], %m
-; CHECK-NEXT:    [[E:%.*]] = mul i128 [[D]], %n
+; CHECK-NEXT:    [[D:%.*]] = mul nsw i128 [[C]], [[M:%.*]]
+; CHECK-NEXT:    [[E:%.*]] = mul i128 [[D]], [[N:%.*]]
 ; CHECK-NEXT:    [[F:%.*]] = trunc i128 [[E]] to i64
-; CHECK-NEXT:    [[G:%.*]] = mul i64 [[F]], %i
-; CHECK-NEXT:    [[H:%.*]] = mul i64 [[G]], %j
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr %s, %s* %p, i64 [[H]]
-; CHECK-NEXT:    [[LOAD:%.*]] = load %s, %s* [[PP1]], align 4
-; CHECK-NEXT:    ret %s [[LOAD]]
+; CHECK-NEXT:    [[G:%.*]] = mul i64 [[F]], [[I:%.*]]
+; CHECK-NEXT:    [[H:%.*]] = mul i64 [[G]], [[J:%.*]]
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr [[S:%.*]], %s* [[P:%.*]], i64 [[H]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load [[S]], %s* [[PP1]], align 4
+; CHECK-NEXT:    ret [[S]] %load
 ;
   %a = mul nsw i32 %k, 36
   %b = mul nsw i32 %a, %l
@@ -1179,15 +1170,15 @@
 
 define %s @test79(%s *%p, i64 %i, i32 %j) {
 ; CHECK-LABEL: @test79(
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 %i to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i32
 ; CHECK-NEXT:    [[B:%.*]] = mul i32 [[TMP1]], 36
-; CHECK-NEXT:    [[C:%.*]] = mul i32 [[B]], %j
-; CHECK-NEXT:    [[Q:%.*]] = bitcast %s* %p to i8*
+; CHECK-NEXT:    [[C:%.*]] = mul i32 [[B]], [[J:%.*]]
+; CHECK-NEXT:    [[Q:%.*]] = bitcast %s* [[P:%.*]] to i8*
 ; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[C]] to i64
 ; CHECK-NEXT:    [[PP:%.*]] = getelementptr inbounds i8, i8* [[Q]], i64 [[TMP2]]
 ; CHECK-NEXT:    [[R:%.*]] = bitcast i8* [[PP]] to %s*
-; CHECK-NEXT:    [[L:%.*]] = load %s, %s* [[R]], align 4
-; CHECK-NEXT:    ret %s [[L]]
+; CHECK-NEXT:    [[L:%.*]] = load [[S:%.*]], %s* [[R]], align 4
+; CHECK-NEXT:    ret [[S]] %l
 ;
   %a = mul nsw i64 %i, 36
   %b = trunc i64 %a to i32
@@ -1201,14 +1192,14 @@
 
 define double @test80([100 x double]* %p, i32 %i) {
 ; CHECK-LABEL: @test80(
-; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 %i to i64
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr [100 x double], [100 x double]* %p, i64 0, i64 [[TMP1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr [100 x double], [100 x double]* [[P:%.*]], i64 0, i64 [[TMP1]]
 ; CHECK-NEXT:    [[L:%.*]] = load double, double* [[PP1]], align 8
 ; CHECK-NEXT:    ret double [[L]]
 ;
-  %tmp = shl nsw i32 %i, 3
+  %t = shl nsw i32 %i, 3
   %q = bitcast [100 x double]* %p to i8*
-  %pp = getelementptr i8, i8* %q, i32 %tmp
+  %pp = getelementptr i8, i8* %q, i32 %t
   %r = bitcast i8* %pp to double*
   %l = load double, double* %r
   ret double %l
@@ -1216,13 +1207,13 @@
 
 define double @test80_addrspacecast([100 x double] addrspace(1)* %p, i32 %i) {
 ; CHECK-LABEL: @test80_addrspacecast(
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr [100 x double], [100 x double] addrspace(1)* %p, i32 0, i32 %i
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr [100 x double], [100 x double] addrspace(1)* [[P:%.*]], i32 0, i32 [[I:%.*]]
 ; CHECK-NEXT:    [[L:%.*]] = load double, double addrspace(1)* [[PP1]], align 8
 ; CHECK-NEXT:    ret double [[L]]
 ;
-  %tmp = shl nsw i32 %i, 3
+  %t = shl nsw i32 %i, 3
   %q = addrspacecast [100 x double] addrspace(1)* %p to i8 addrspace(2)*
-  %pp = getelementptr i8, i8 addrspace(2)* %q, i32 %tmp
+  %pp = getelementptr i8, i8 addrspace(2)* %q, i32 %t
   %r = addrspacecast i8 addrspace(2)* %pp to double addrspace(1)*
   %l = load double, double addrspace(1)* %r
   ret double %l
@@ -1230,14 +1221,14 @@
 
 define double @test80_addrspacecast_2([100 x double] addrspace(1)* %p, i32 %i) {
 ; CHECK-LABEL: @test80_addrspacecast_2(
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr [100 x double], [100 x double] addrspace(1)* %p, i32 0, i32 %i
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr [100 x double], [100 x double] addrspace(1)* [[P:%.*]], i32 0, i32 [[I:%.*]]
 ; CHECK-NEXT:    [[R:%.*]] = addrspacecast double addrspace(1)* [[PP1]] to double addrspace(3)*
 ; CHECK-NEXT:    [[L:%.*]] = load double, double addrspace(3)* [[R]], align 8
 ; CHECK-NEXT:    ret double [[L]]
 ;
-  %tmp = shl nsw i32 %i, 3
+  %t = shl nsw i32 %i, 3
   %q = addrspacecast [100 x double] addrspace(1)* %p to i8 addrspace(2)*
-  %pp = getelementptr i8, i8 addrspace(2)* %q, i32 %tmp
+  %pp = getelementptr i8, i8 addrspace(2)* %q, i32 %t
   %r = addrspacecast i8 addrspace(2)* %pp to double addrspace(3)*
   %l = load double, double addrspace(3)* %r
   ret double %l
@@ -1245,14 +1236,14 @@
 
 define double @test80_as1([100 x double] addrspace(1)* %p, i16 %i) {
 ; CHECK-LABEL: @test80_as1(
-; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 %i to i32
-; CHECK-NEXT:    [[PP1:%.*]] = getelementptr [100 x double], [100 x double] addrspace(1)* %p, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[I:%.*]] to i32
+; CHECK-NEXT:    [[PP1:%.*]] = getelementptr [100 x double], [100 x double] addrspace(1)* [[P:%.*]], i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[L:%.*]] = load double, double addrspace(1)* [[PP1]], align 8
 ; CHECK-NEXT:    ret double [[L]]
 ;
-  %tmp = shl nsw i16 %i, 3
+  %t = shl nsw i16 %i, 3
   %q = bitcast [100 x double] addrspace(1)* %p to i8 addrspace(1)*
-  %pp = getelementptr i8, i8 addrspace(1)* %q, i16 %tmp
+  %pp = getelementptr i8, i8 addrspace(1)* %q, i16 %t
   %r = bitcast i8 addrspace(1)* %pp to double addrspace(1)*
   %l = load double, double addrspace(1)* %r
   ret double %l
@@ -1260,8 +1251,8 @@
 
 define double @test81(double *%p, float %f) {
 ; CHECK-LABEL: @test81(
-; CHECK-NEXT:    [[I:%.*]] = fptosi float %f to i64
-; CHECK-NEXT:    [[Q:%.*]] = bitcast double* %p to i8*
+; CHECK-NEXT:    [[I:%.*]] = fptosi float [[F:%.*]] to i64
+; CHECK-NEXT:    [[Q:%.*]] = bitcast double* [[P:%.*]] to i8*
 ; CHECK-NEXT:    [[PP:%.*]] = getelementptr i8, i8* [[Q]], i64 [[I]]
 ; CHECK-NEXT:    [[R:%.*]] = bitcast i8* [[PP]] to double*
 ; CHECK-NEXT:    [[L:%.*]] = load double, double* [[R]], align 8
@@ -1275,9 +1266,9 @@
   ret double %l
 }
 
-define i64 @test82(i64 %A) nounwind {
+define i64 @test82(i64 %A) {
 ; CHECK-LABEL: @test82(
-; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 %A, 1
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[A:%.*]], 1
 ; CHECK-NEXT:    [[E:%.*]] = and i64 [[TMP1]], 4294966784
 ; CHECK-NEXT:    ret i64 [[E]]
 ;
@@ -1291,8 +1282,8 @@
 ; PR15959
 define i64 @test83(i16 %a, i64 %k) {
 ; CHECK-LABEL: @test83(
-; CHECK-NEXT:    [[CONV:%.*]] = sext i16 %a to i32
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 %k to i32
+; CHECK-NEXT:    [[CONV:%.*]] = sext i16 [[A:%.*]] to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[K:%.*]] to i32
 ; CHECK-NEXT:    [[SH_PROM:%.*]] = add i32 [[TMP1]], -1
 ; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[CONV]], [[SH_PROM]]
 ; CHECK-NEXT:    [[SH_PROM1:%.*]] = zext i32 [[SHL]] to i64
@@ -1308,7 +1299,7 @@
 
 define i8 @test84(i32 %a) {
 ; CHECK-LABEL: @test84(
-; CHECK-NEXT:    [[ADD:%.*]] = add i32 %a, 2130706432
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[A:%.*]], 2130706432
 ; CHECK-NEXT:    [[SHR:%.*]] = lshr exact i32 [[ADD]], 23
 ; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i32 [[SHR]] to i8
 ; CHECK-NEXT:    ret i8 [[TRUNC]]
@@ -1321,7 +1312,7 @@
 
 define i8 @test85(i32 %a) {
 ; CHECK-LABEL: @test85(
-; CHECK-NEXT:    [[ADD:%.*]] = add i32 %a, 2130706432
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[A:%.*]], 2130706432
 ; CHECK-NEXT:    [[SHR:%.*]] = lshr exact i32 [[ADD]], 23
 ; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i32 [[SHR]] to i8
 ; CHECK-NEXT:    ret i8 [[TRUNC]]
@@ -1334,8 +1325,8 @@
 
 define i16 @test86(i16 %v) {
 ; CHECK-LABEL: @test86(
-; CHECK-NEXT:    [[S1:%.*]] = ashr i16 %v, 4
-; CHECK-NEXT:    ret i16 [[S1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr i16 [[V:%.*]], 4
+; CHECK-NEXT:    ret i16 [[TMP1]]
 ;
   %a = sext i16 %v to i32
   %s = ashr i32 %a, 4
@@ -1345,8 +1336,8 @@
 
 define i16 @test87(i16 %v) {
 ; CHECK-LABEL: @test87(
-; CHECK-NEXT:    [[A1:%.*]] = ashr i16 %v, 12
-; CHECK-NEXT:    ret i16 [[A1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr i16 [[V:%.*]], 12
+; CHECK-NEXT:    ret i16 [[TMP1]]
 ;
   %c = sext i16 %v to i32
   %m = mul nsw i32 %c, 16
@@ -1357,7 +1348,7 @@
 
 define i16 @test88(i16 %v) {
 ; CHECK-LABEL: @test88(
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr i16 %v, 15
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr i16 [[V:%.*]], 15
 ; CHECK-NEXT:    ret i16 [[TMP1]]
 ;
   %a = sext i16 %v to i32
@@ -1368,7 +1359,7 @@
 
 define i32 @PR21388(i32* %v) {
 ; CHECK-LABEL: @PR21388(
-; CHECK-NEXT:    [[ICMP:%.*]] = icmp slt i32* %v, null
+; CHECK-NEXT:    [[ICMP:%.*]] = icmp slt i32* [[V:%.*]], null
 ; CHECK-NEXT:    [[SEXT:%.*]] = sext i1 [[ICMP]] to i32
 ; CHECK-NEXT:    ret i32 [[SEXT]]
 ;
@@ -1379,7 +1370,7 @@
 
 define float @sitofp_zext(i16 %a) {
 ; CHECK-LABEL: @sitofp_zext(
-; CHECK-NEXT:    [[SITOFP:%.*]] = uitofp i16 %a to float
+; CHECK-NEXT:    [[SITOFP:%.*]] = uitofp i16 [[A:%.*]] to float
 ; CHECK-NEXT:    ret float [[SITOFP]]
 ;
   %zext = zext i16 %a to i32
@@ -1389,7 +1380,7 @@
 
 define i1 @PR23309(i32 %A, i32 %B) {
 ; CHECK-LABEL: @PR23309(
-; CHECK-NEXT:    [[SUB:%.*]] = sub i32 %A, %B
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[SUB]], 1
 ; CHECK-NEXT:    [[TRUNC:%.*]] = icmp ne i32 [[TMP1]], 0
 ; CHECK-NEXT:    ret i1 [[TRUNC]]
@@ -1402,7 +1393,7 @@
 
 define i1 @PR23309v2(i32 %A, i32 %B) {
 ; CHECK-LABEL: @PR23309v2(
-; CHECK-NEXT:    [[SUB:%.*]] = add i32 %A, %B
+; CHECK-NEXT:    [[SUB:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[SUB]], 1
 ; CHECK-NEXT:    [[TRUNC:%.*]] = icmp ne i32 [[TMP1]], 0
 ; CHECK-NEXT:    ret i1 [[TRUNC]]
@@ -1415,7 +1406,7 @@
 
 define i16 @PR24763(i8 %V) {
 ; CHECK-LABEL: @PR24763(
-; CHECK-NEXT:    [[L:%.*]] = ashr i8 %V, 1
+; CHECK-NEXT:    [[L:%.*]] = ashr i8 [[V:%.*]], 1
 ; CHECK-NEXT:    [[T:%.*]] = sext i8 [[L]] to i16
 ; CHECK-NEXT:    ret i16 [[T]]
 ;
@@ -1428,7 +1419,7 @@
 define i64 @PR28745() {
 ; CHECK-LABEL: @PR28745(
 ; CHECK-NEXT:    ret i64 1
-
+;
   %b = zext i32 extractvalue ({ i32 } select (i1 icmp eq (i16 extractelement (<2 x i16> bitcast (<1 x i32> <i32 1> to <2 x i16>), i32 0), i16 0), { i32 } { i32 1 }, { i32 } zeroinitializer), 0) to i64
   ret i64 %b
 }
@@ -1436,20 +1427,22 @@
 define i32 @test89() {
 ; CHECK-LABEL: @test89(
 ; CHECK-NEXT:    ret i32 393216
+;
   ret i32 bitcast (<2 x i16> <i16 6, i16 undef> to i32)
 }
 
 define <2 x i32> @test90() {
 ; CHECK-LABEL: @test90(
-; CHECK: ret <2 x i32> <i32 0, i32 15360>
-  %tmp6 = bitcast <4 x half> <half undef, half undef, half undef, half 0xH3C00> to <2 x i32>
-  ret <2 x i32> %tmp6
+; CHECK-NEXT:    ret <2 x i32> <i32 0, i32 15360>
+;
+  %t6 = bitcast <4 x half> <half undef, half undef, half undef, half 0xH3C00> to <2 x i32>
+  ret <2 x i32> %t6
 }
 
 ; Do not optimize to ashr i64 (shift by 48 > 96 - 64)
 define i64 @test91(i64 %A) {
 ; CHECK-LABEL: @test91(
-; CHECK-NEXT:    [[B:%.*]] = sext i64 %A to i96
+; CHECK-NEXT:    [[B:%.*]] = sext i64 [[A:%.*]] to i96
 ; CHECK-NEXT:    [[C:%.*]] = lshr i96 [[B]], 48
 ; CHECK-NEXT:    [[D:%.*]] = trunc i96 [[C]] to i64
 ; CHECK-NEXT:    ret i64 [[D]]
@@ -1463,8 +1456,8 @@
 ; Do optimize to ashr i64 (shift by 32 <= 96 - 64)
 define i64 @test92(i64 %A) {
 ; CHECK-LABEL: @test92(
-; CHECK-NEXT:    [[C:%.*]] = ashr i64 %A, 32
-; CHECK-NEXT:    ret i64 [[C]]
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr i64 [[A:%.*]], 32
+; CHECK-NEXT:    ret i64 [[TMP1]]
 ;
   %B = sext i64 %A to i96
   %C = lshr i96 %B, 32
@@ -1475,8 +1468,8 @@
 ; When optimizing to ashr i32, don't shift by more than 31.
 define i32 @test93(i32 %A) {
 ; CHECK-LABEL: @test93(
-; CHECK-NEXT:    [[C:%.*]] = ashr i32 %A, 31
-; CHECK-NEXT:    ret i32 [[C]]
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr i32 [[A:%.*]], 31
+; CHECK-NEXT:    ret i32 [[TMP1]]
 ;
   %B = sext i32 %A to i96
   %C = lshr i96 %B, 64
@@ -1489,8 +1482,8 @@
 
 define i8 @pr33078_1(i8 %A) {
 ; CHECK-LABEL: @pr33078_1(
-; CHECK-NEXT:    [[C:%.*]] = ashr i8 [[A:%.*]], 7
-; CHECK-NEXT:    ret i8 [[C]]
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr i8 [[A:%.*]], 7
+; CHECK-NEXT:    ret i8 [[TMP1]]
 ;
   %B = sext i8 %A to i16
   %C = lshr i16 %B, 8
@@ -1526,10 +1519,11 @@
 define i8 @pr33078_4(i3 %x) {
 ; Don't turn this in an `ashr`. This was getting miscompiled
 ; CHECK-LABEL: @pr33078_4(
-; CHECK-NEXT:    [[B:%.*]] = sext i3 %x to i16
+; CHECK-NEXT:    [[B:%.*]] = sext i3 [[X:%.*]] to i16
 ; CHECK-NEXT:    [[C:%.*]] = lshr i16 [[B]], 13
 ; CHECK-NEXT:    [[D:%.*]] = trunc i16 [[C]] to i8
 ; CHECK-NEXT:    ret i8 [[D]]
+;
   %B = sext i3 %x to i16
   %C = lshr i16 %B, 13
   %D = trunc i16 %C to i8
diff --git a/test/Transforms/InstCombine/cmp-intrinsic.ll b/test/Transforms/InstCombine/cmp-intrinsic.ll
index 7fc1d12..d9199ce 100644
--- a/test/Transforms/InstCombine/cmp-intrinsic.ll
+++ b/test/Transforms/InstCombine/cmp-intrinsic.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -instcombine -S | FileCheck %s
 
 declare i16 @llvm.bswap.i16(i16)
@@ -13,7 +14,7 @@
 
 define i1 @bswap_eq_i16(i16 %x) {
 ; CHECK-LABEL: @bswap_eq_i16(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i16 %x, 256
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i16 [[X:%.*]], 256
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %bs = call i16 @llvm.bswap.i16(i16 %x)
@@ -23,7 +24,7 @@
 
 define i1 @bswap_ne_i32(i32 %x) {
 ; CHECK-LABEL: @bswap_ne_i32(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 %x, 33554432
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[X:%.*]], 33554432
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %bs = tail call i32 @llvm.bswap.i32(i32 %x)
@@ -33,7 +34,7 @@
 
 define <2 x i1> @bswap_eq_v2i64(<2 x i64> %x) {
 ; CHECK-LABEL: @bswap_eq_v2i64(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i64> %x, <i64 216172782113783808, i64 216172782113783808>
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i64> [[X:%.*]], <i64 216172782113783808, i64 216172782113783808>
 ; CHECK-NEXT:    ret <2 x i1> [[CMP]]
 ;
   %bs = tail call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %x)
@@ -43,7 +44,7 @@
 
 define i1 @ctlz_eq_bitwidth_i32(i32 %x) {
 ; CHECK-LABEL: @ctlz_eq_bitwidth_i32(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 %x, 0
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X:%.*]], 0
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %lz = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
@@ -51,9 +52,84 @@
   ret i1 %cmp
 }
 
+define i1 @ctlz_eq_zero_i32(i32 %x) {
+; CHECK-LABEL: @ctlz_eq_zero_i32(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[X:%.*]], 0
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %lz = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+  %cmp = icmp eq i32 %lz, 0
+  ret i1 %cmp
+}
+
+define <2 x i1> @ctlz_ne_zero_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: @ctlz_ne_zero_v2i32(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <2 x i32> [[A:%.*]], <i32 -1, i32 -1>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %x = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false)
+  %cmp = icmp ne <2 x i32> %x, zeroinitializer
+  ret <2 x i1> %cmp
+}
+
+define i1 @ctlz_eq_bw_minus_1_i32(i32 %x) {
+; CHECK-LABEL: @ctlz_eq_bw_minus_1_i32(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X:%.*]], 1
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %lz = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+  %cmp = icmp eq i32 %lz, 31
+  ret i1 %cmp
+}
+
+define <2 x i1> @ctlz_ne_bw_minus_1_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: @ctlz_ne_bw_minus_1_v2i32(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i32> [[A:%.*]], <i32 1, i32 1>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %x = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false)
+  %cmp = icmp ne <2 x i32> %x, <i32 31, i32 31>
+  ret <2 x i1> %cmp
+}
+
+define i1 @ctlz_eq_other_i32(i32 %x) {
+; CHECK-LABEL: @ctlz_eq_other_i32(
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], -128
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP1]], 128
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %lz = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+  %cmp = icmp eq i32 %lz, 24
+  ret i1 %cmp
+}
+
+define <2 x i1> @ctlz_ne_other_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: @ctlz_ne_other_v2i32(
+; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], <i32 -128, i32 -128>
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i32> [[TMP1]], <i32 128, i32 128>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %x = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false)
+  %cmp = icmp ne <2 x i32> %x, <i32 24, i32 24>
+  ret <2 x i1> %cmp
+}
+
+define i1 @ctlz_eq_other_i32_multiuse(i32 %x, i32* %p) {
+; CHECK-LABEL: @ctlz_eq_other_i32_multiuse(
+; CHECK-NEXT:    [[LZ:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range !0
+; CHECK-NEXT:    store i32 [[LZ]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[LZ]], 24
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %lz = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+  store i32 %lz, i32* %p
+  %cmp = icmp eq i32 %lz, 24
+  ret i1 %cmp
+}
+
 define <2 x i1> @ctlz_ne_bitwidth_v2i32(<2 x i32> %a) {
 ; CHECK-LABEL: @ctlz_ne_bitwidth_v2i32(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i32> %a, zeroinitializer
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i32> [[A:%.*]], zeroinitializer
 ; CHECK-NEXT:    ret <2 x i1> [[CMP]]
 ;
   %x = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false)
@@ -61,9 +137,121 @@
   ret <2 x i1> %cmp
 }
 
+define i1 @ctlz_ugt_zero_i32(i32 %x) {
+; CHECK-LABEL: @ctlz_ugt_zero_i32(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], -1
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %lz = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+  %cmp = icmp ugt i32 %lz, 0
+  ret i1 %cmp
+}
+
+define i1 @ctlz_ugt_one_i32(i32 %x) {
+; CHECK-LABEL: @ctlz_ugt_one_i32(
+; CHECK-NEXT:    [[LZ:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range !0
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[LZ]], 1
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %lz = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+  %cmp = icmp ugt i32 %lz, 1
+  ret i1 %cmp
+}
+
+define i1 @ctlz_ugt_other_i32(i32 %x) {
+; CHECK-LABEL: @ctlz_ugt_other_i32(
+; CHECK-NEXT:    [[LZ:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range !0
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[LZ]], 16
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %lz = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+  %cmp = icmp ugt i32 %lz, 16
+  ret i1 %cmp
+}
+
+define i1 @ctlz_ugt_other_multiuse_i32(i32 %x, i32* %p) {
+; CHECK-LABEL: @ctlz_ugt_other_multiuse_i32(
+; CHECK-NEXT:    [[LZ:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range !0
+; CHECK-NEXT:    store i32 [[LZ]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[LZ]], 16
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %lz = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+  store i32 %lz, i32* %p
+  %cmp = icmp ugt i32 %lz, 16
+  ret i1 %cmp
+}
+
+define i1 @ctlz_ugt_bw_minus_one_i32(i32 %x) {
+; CHECK-LABEL: @ctlz_ugt_bw_minus_one_i32(
+; CHECK-NEXT:    [[LZ:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range !0
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[LZ]], 31
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %lz = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+  %cmp = icmp ugt i32 %lz, 31
+  ret i1 %cmp
+}
+
+define <2 x i1> @ctlz_ult_one_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: @ctlz_ult_one_v2i32(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <2 x i32> [[X:%.*]], zeroinitializer
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %lz = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %x, i1 false)
+  %cmp = icmp ult <2 x i32> %lz, <i32 1, i32 1>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @ctlz_ult_other_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: @ctlz_ult_other_v2i32(
+; CHECK-NEXT:    [[LZ:%.*]] = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> [[X:%.*]], i1 false)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> [[LZ]], <i32 16, i32 16>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %lz = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %x, i1 false)
+  %cmp = icmp ult <2 x i32> %lz, <i32 16, i32 16>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @ctlz_ult_other_multiuse_v2i32(<2 x i32> %x, <2 x i32>* %p) {
+; CHECK-LABEL: @ctlz_ult_other_multiuse_v2i32(
+; CHECK-NEXT:    [[LZ:%.*]] = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> [[X:%.*]], i1 false)
+; CHECK-NEXT:    store <2 x i32> [[LZ]], <2 x i32>* [[P:%.*]], align 8
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> [[LZ]], <i32 16, i32 16>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %lz = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %x, i1 false)
+  store <2 x i32> %lz, <2 x i32>* %p
+  %cmp = icmp ult <2 x i32> %lz, <i32 16, i32 16>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @ctlz_ult_bw_minus_one_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: @ctlz_ult_bw_minus_one_v2i32(
+; CHECK-NEXT:    [[LZ:%.*]] = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> [[X:%.*]], i1 false)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> [[LZ]], <i32 31, i32 31>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %lz = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %x, i1 false)
+  %cmp = icmp ult <2 x i32> %lz, <i32 31, i32 31>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @ctlz_ult_bitwidth_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: @ctlz_ult_bitwidth_v2i32(
+; CHECK-NEXT:    [[LZ:%.*]] = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> [[X:%.*]], i1 false)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> [[LZ]], <i32 32, i32 32>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %lz = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %x, i1 false)
+  %cmp = icmp ult <2 x i32> %lz, <i32 32, i32 32>
+  ret <2 x i1> %cmp
+}
+
 define i1 @cttz_ne_bitwidth_i33(i33 %x) {
 ; CHECK-LABEL: @cttz_ne_bitwidth_i33(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i33 %x, 0
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i33 [[X:%.*]], 0
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %tz = tail call i33 @llvm.cttz.i33(i33 %x, i1 false)
@@ -73,7 +261,7 @@
 
 define <2 x i1> @cttz_eq_bitwidth_v2i32(<2 x i32> %a) {
 ; CHECK-LABEL: @cttz_eq_bitwidth_v2i32(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i32> %a, zeroinitializer
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i32> [[A:%.*]], zeroinitializer
 ; CHECK-NEXT:    ret <2 x i1> [[CMP]]
 ;
   %x = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %a, i1 false)
@@ -81,9 +269,199 @@
   ret <2 x i1> %cmp
 }
 
+define i1 @cttz_eq_zero_i33(i33 %x) {
+; CHECK-LABEL: @cttz_eq_zero_i33(
+; CHECK-NEXT:    [[TMP1:%.*]] = and i33 [[X:%.*]], 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i33 [[TMP1]], 0
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %tz = tail call i33 @llvm.cttz.i33(i33 %x, i1 false)
+  %cmp = icmp eq i33 %tz, 0
+  ret i1 %cmp
+}
+
+define <2 x i1> @cttz_ne_zero_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: @cttz_ne_zero_v2i32(
+; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], <i32 1, i32 1>
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i32> [[TMP1]], zeroinitializer
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %x = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %a, i1 false)
+  %cmp = icmp ne <2 x i32> %x, zeroinitializer
+  ret <2 x i1> %cmp
+}
+
+define i1 @cttz_eq_bw_minus_1_i33(i33 %x) {
+; CHECK-LABEL: @cttz_eq_bw_minus_1_i33(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i33 [[X:%.*]], -4294967296
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %tz = tail call i33 @llvm.cttz.i33(i33 %x, i1 false)
+  %cmp = icmp eq i33 %tz, 32
+  ret i1 %cmp
+}
+
+define <2 x i1> @cttz_ne_bw_minus_1_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: @cttz_ne_bw_minus_1_v2i32(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i32> [[A:%.*]], <i32 -2147483648, i32 -2147483648>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %x = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %a, i1 false)
+  %cmp = icmp ne <2 x i32> %x, <i32 31, i32 31>
+  ret <2 x i1> %cmp
+}
+
+define i1 @cttz_eq_other_i33(i33 %x) {
+; CHECK-LABEL: @cttz_eq_other_i33(
+; CHECK-NEXT:    [[TMP1:%.*]] = and i33 [[X:%.*]], 31
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i33 [[TMP1]], 16
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %tz = tail call i33 @llvm.cttz.i33(i33 %x, i1 false)
+  %cmp = icmp eq i33 %tz, 4
+  ret i1 %cmp
+}
+
+define <2 x i1> @cttz_ne_other_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: @cttz_ne_other_v2i32(
+; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], <i32 31, i32 31>
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i32> [[TMP1]], <i32 16, i32 16>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %x = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %a, i1 false)
+  %cmp = icmp ne <2 x i32> %x, <i32 4, i32 4>
+  ret <2 x i1> %cmp
+}
+
+define i1 @cttz_eq_other_i33_multiuse(i33 %x, i33* %p) {
+; CHECK-LABEL: @cttz_eq_other_i33_multiuse(
+; CHECK-NEXT:    [[TZ:%.*]] = tail call i33 @llvm.cttz.i33(i33 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    store i33 [[TZ]], i33* [[P:%.*]], align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i33 [[TZ]], 4
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %tz = tail call i33 @llvm.cttz.i33(i33 %x, i1 false)
+  store i33 %tz, i33* %p
+  %cmp = icmp eq i33 %tz, 4
+  ret i1 %cmp
+}
+
+define i1 @cttz_ugt_zero_i33(i33 %x) {
+; CHECK-LABEL: @cttz_ugt_zero_i33(
+; CHECK-NEXT:    [[TMP1:%.*]] = and i33 [[X:%.*]], 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i33 [[TMP1]], 0
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %tz = tail call i33 @llvm.cttz.i33(i33 %x, i1 false)
+  %cmp = icmp ugt i33 %tz, 0
+  ret i1 %cmp
+}
+
+define i1 @cttz_ugt_one_i33(i33 %x) {
+; CHECK-LABEL: @cttz_ugt_one_i33(
+; CHECK-NEXT:    [[TZ:%.*]] = tail call i33 @llvm.cttz.i33(i33 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i33 [[TZ]], 1
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %tz = tail call i33 @llvm.cttz.i33(i33 %x, i1 false)
+  %cmp = icmp ugt i33 %tz, 1
+  ret i1 %cmp
+}
+
+define i1 @cttz_ugt_other_i33(i33 %x) {
+; CHECK-LABEL: @cttz_ugt_other_i33(
+; CHECK-NEXT:    [[TZ:%.*]] = tail call i33 @llvm.cttz.i33(i33 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i33 [[TZ]], 16
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %tz = tail call i33 @llvm.cttz.i33(i33 %x, i1 false)
+  %cmp = icmp ugt i33 %tz, 16
+  ret i1 %cmp
+}
+
+define i1 @cttz_ugt_other_multiuse_i33(i33 %x, i33* %p) {
+; CHECK-LABEL: @cttz_ugt_other_multiuse_i33(
+; CHECK-NEXT:    [[TZ:%.*]] = tail call i33 @llvm.cttz.i33(i33 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    store i33 [[TZ]], i33* [[P:%.*]], align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i33 [[TZ]], 16
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %tz = tail call i33 @llvm.cttz.i33(i33 %x, i1 false)
+  store i33 %tz, i33* %p
+  %cmp = icmp ugt i33 %tz, 16
+  ret i1 %cmp
+}
+
+define i1 @cttz_ugt_bw_minus_one_i33(i33 %x) {
+; CHECK-LABEL: @cttz_ugt_bw_minus_one_i33(
+; CHECK-NEXT:    [[TZ:%.*]] = tail call i33 @llvm.cttz.i33(i33 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i33 [[TZ]], 32
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %tz = tail call i33 @llvm.cttz.i33(i33 %x, i1 false)
+  %cmp = icmp ugt i33 %tz, 32
+  ret i1 %cmp
+}
+
+define <2 x i1> @cttz_ult_one_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: @cttz_ult_one_v2i32(
+; CHECK-NEXT:    [[CMP:%.*]] = trunc <2 x i32> [[X:%.*]] to <2 x i1>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %tz = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %x, i1 false)
+  %cmp = icmp ult <2 x i32> %tz, <i32 1, i32 1>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @cttz_ult_other_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: @cttz_ult_other_v2i32(
+; CHECK-NEXT:    [[TZ:%.*]] = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> [[X:%.*]], i1 false)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> [[TZ]], <i32 16, i32 16>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %tz = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %x, i1 false)
+  %cmp = icmp ult <2 x i32> %tz, <i32 16, i32 16>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @cttz_ult_other_multiuse_v2i32(<2 x i32> %x, <2 x i32>* %p) {
+; CHECK-LABEL: @cttz_ult_other_multiuse_v2i32(
+; CHECK-NEXT:    [[TZ:%.*]] = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> [[X:%.*]], i1 false)
+; CHECK-NEXT:    store <2 x i32> [[TZ]], <2 x i32>* [[P:%.*]], align 8
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> [[TZ]], <i32 16, i32 16>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %tz = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %x, i1 false)
+  store <2 x i32> %tz, <2 x i32>* %p
+  %cmp = icmp ult <2 x i32> %tz, <i32 16, i32 16>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @cttz_ult_bw_minus_one_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: @cttz_ult_bw_minus_one_v2i32(
+; CHECK-NEXT:    [[TZ:%.*]] = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> [[X:%.*]], i1 false)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> [[TZ]], <i32 31, i32 31>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %tz = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %x, i1 false)
+  %cmp = icmp ult <2 x i32> %tz, <i32 31, i32 31>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @cttz_ult_bitwidth_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: @cttz_ult_bitwidth_v2i32(
+; CHECK-NEXT:    [[TZ:%.*]] = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> [[X:%.*]], i1 false)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> [[TZ]], <i32 32, i32 32>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %tz = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %x, i1 false)
+  %cmp = icmp ult <2 x i32> %tz, <i32 32, i32 32>
+  ret <2 x i1> %cmp
+}
+
 define i1 @ctpop_eq_zero_i11(i11 %x) {
 ; CHECK-LABEL: @ctpop_eq_zero_i11(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i11 %x, 0
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i11 [[X:%.*]], 0
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %pop = tail call i11 @llvm.ctpop.i11(i11 %x)
@@ -93,7 +471,7 @@
 
 define <2 x i1> @ctpop_ne_zero_v2i32(<2 x i32> %x) {
 ; CHECK-LABEL: @ctpop_ne_zero_v2i32(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i32> %x, zeroinitializer
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i32> [[X:%.*]], zeroinitializer
 ; CHECK-NEXT:    ret <2 x i1> [[CMP]]
 ;
   %pop = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %x)
@@ -103,7 +481,7 @@
 
 define i1 @ctpop_eq_bitwidth_i8(i8 %x) {
 ; CHECK-LABEL: @ctpop_eq_bitwidth_i8(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 %x, -1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[X:%.*]], -1
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %pop = tail call i8 @llvm.ctpop.i8(i8 %x)
@@ -113,7 +491,7 @@
 
 define <2 x i1> @ctpop_ne_bitwidth_v2i32(<2 x i32> %x) {
 ; CHECK-LABEL: @ctpop_ne_bitwidth_v2i32(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i32> %x, <i32 -1, i32 -1>
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i32> [[X:%.*]], <i32 -1, i32 -1>
 ; CHECK-NEXT:    ret <2 x i1> [[CMP]]
 ;
   %pop = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %x)
diff --git a/test/Transforms/InstCombine/element-atomic-memintrins.ll b/test/Transforms/InstCombine/element-atomic-memintrins.ll
index 7467bc7..6bc62c9 100644
--- a/test/Transforms/InstCombine/element-atomic-memintrins.ll
+++ b/test/Transforms/InstCombine/element-atomic-memintrins.ll
@@ -15,12 +15,9 @@
 define void @test_memset_to_store(i8* %dest) {
 ; CHECK-LABEL: @test_memset_to_store(
 ; CHECK-NEXT:    store atomic i8 1, i8* [[DEST:%.*]] unordered, align 1
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[DEST]] to i16*
-; CHECK-NEXT:    store atomic i16 257, i16* [[TMP1]] unordered, align 1
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[DEST]] to i32*
-; CHECK-NEXT:    store atomic i32 16843009, i32* [[TMP2]] unordered, align 1
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i64*
-; CHECK-NEXT:    store atomic i64 72340172838076673, i64* [[TMP3]] unordered, align 1
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 2, i32 1)
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 4, i32 1)
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 8, i32 1)
 ; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 16, i32 1)
 ; CHECK-NEXT:    ret void
 ;
@@ -34,15 +31,15 @@
 
 define void @test_memset_to_store_2(i8* %dest) {
 ; CHECK-LABEL: @test_memset_to_store_2(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[DEST:%.*]] to i16*
+; CHECK-NEXT:    store atomic i8 1, i8* [[DEST:%.*]] unordered, align 2
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[DEST]] to i16*
 ; CHECK-NEXT:    store atomic i16 257, i16* [[TMP1]] unordered, align 2
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[DEST]] to i32*
-; CHECK-NEXT:    store atomic i32 16843009, i32* [[TMP2]] unordered, align 2
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i64*
-; CHECK-NEXT:    store atomic i64 72340172838076673, i64* [[TMP3]] unordered, align 2
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 4, i32 2)
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 8, i32 2)
 ; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 16, i32 2)
 ; CHECK-NEXT:    ret void
 ;
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 %dest, i8 1, i32 1, i32 1)
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 %dest, i8 1, i32 2, i32 2)
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 %dest, i8 1, i32 4, i32 2)
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 %dest, i8 1, i32 8, i32 2)
@@ -52,13 +49,17 @@
 
 define void @test_memset_to_store_4(i8* %dest) {
 ; CHECK-LABEL: @test_memset_to_store_4(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[DEST:%.*]] to i32*
-; CHECK-NEXT:    store atomic i32 16843009, i32* [[TMP1]] unordered, align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[DEST]] to i64*
-; CHECK-NEXT:    store atomic i64 72340172838076673, i64* [[TMP2]] unordered, align 4
+; CHECK-NEXT:    store atomic i8 1, i8* [[DEST:%.*]] unordered, align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[DEST]] to i16*
+; CHECK-NEXT:    store atomic i16 257, i16* [[TMP1]] unordered, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[DEST]] to i32*
+; CHECK-NEXT:    store atomic i32 16843009, i32* [[TMP2]] unordered, align 4
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 [[DEST]], i8 1, i32 8, i32 4)
 ; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 [[DEST]], i8 1, i32 16, i32 4)
 ; CHECK-NEXT:    ret void
 ;
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %dest, i8 1, i32 1, i32 1)
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %dest, i8 1, i32 2, i32 2)
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %dest, i8 1, i32 4, i32 4)
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %dest, i8 1, i32 8, i32 4)
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %dest, i8 1, i32 16, i32 4)
@@ -67,11 +68,19 @@
 
 define void @test_memset_to_store_8(i8* %dest) {
 ; CHECK-LABEL: @test_memset_to_store_8(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[DEST:%.*]] to i64*
-; CHECK-NEXT:    store atomic i64 72340172838076673, i64* [[TMP1]] unordered, align 8
+; CHECK-NEXT:    store atomic i8 1, i8* [[DEST:%.*]] unordered, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[DEST]] to i16*
+; CHECK-NEXT:    store atomic i16 257, i16* [[TMP1]] unordered, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[DEST]] to i32*
+; CHECK-NEXT:    store atomic i32 16843009, i32* [[TMP2]] unordered, align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i64*
+; CHECK-NEXT:    store atomic i64 72340172838076673, i64* [[TMP3]] unordered, align 8
 ; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 [[DEST]], i8 1, i32 16, i32 8)
 ; CHECK-NEXT:    ret void
 ;
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 %dest, i8 1, i32 1, i32 1)
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 %dest, i8 1, i32 2, i32 2)
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 %dest, i8 1, i32 4, i32 4)
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 %dest, i8 1, i32 8, i32 8)
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 %dest, i8 1, i32 16, i32 8)
   ret void
@@ -79,9 +88,20 @@
 
 define void @test_memset_to_store_16(i8* %dest) {
 ; CHECK-LABEL: @test_memset_to_store_16(
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 [[DEST:%.*]], i8 1, i32 16, i32 16)
+; CHECK-NEXT:    store atomic i8 1, i8* [[DEST:%.*]] unordered, align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[DEST]] to i16*
+; CHECK-NEXT:    store atomic i16 257, i16* [[TMP1]] unordered, align 16
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[DEST]] to i32*
+; CHECK-NEXT:    store atomic i32 16843009, i32* [[TMP2]] unordered, align 16
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i64*
+; CHECK-NEXT:    store atomic i64 72340172838076673, i64* [[TMP3]] unordered, align 16
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 [[DEST]], i8 1, i32 16, i32 16)
 ; CHECK-NEXT:    ret void
 ;
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 %dest, i8 1, i32 1, i32 1)
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 %dest, i8 1, i32 2, i32 2)
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 %dest, i8 1, i32 4, i32 4)
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 %dest, i8 1, i32 8, i32 8)
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 %dest, i8 1, i32 16, i32 16)
   ret void
 }
@@ -134,18 +154,9 @@
 ; CHECK-LABEL: @test_memmove_loadstore(
 ; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 1
 ; CHECK-NEXT:    store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 1
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[SRC]] to i16*
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
-; CHECK-NEXT:    [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 1
-; CHECK-NEXT:    store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 1
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8* [[SRC]] to i32*
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32*
-; CHECK-NEXT:    [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 1
-; CHECK-NEXT:    store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 1
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i8* [[SRC]] to i64*
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
-; CHECK-NEXT:    [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 1
-; CHECK-NEXT:    store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 1
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 2, i32 1)
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 4, i32 1)
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 8, i32 1)
 ; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 16, i32 1)
 ; CHECK-NEXT:    ret void
 ;
@@ -159,21 +170,18 @@
 
 define void @test_memmove_loadstore_2(i8* %dest, i8* %src) {
 ; CHECK-LABEL: @test_memmove_loadstore_2(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[SRC:%.*]] to i16*
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[DEST:%.*]] to i16*
-; CHECK-NEXT:    [[TMP3:%.*]] = load atomic i16, i16* [[TMP1]] unordered, align 2
-; CHECK-NEXT:    store atomic i16 [[TMP3]], i16* [[TMP2]] unordered, align 2
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i8* [[SRC]] to i32*
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8* [[DEST]] to i32*
-; CHECK-NEXT:    [[TMP6:%.*]] = load atomic i32, i32* [[TMP4]] unordered, align 2
-; CHECK-NEXT:    store atomic i32 [[TMP6]], i32* [[TMP5]] unordered, align 2
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i8* [[SRC]] to i64*
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i8* [[DEST]] to i64*
-; CHECK-NEXT:    [[TMP9:%.*]] = load atomic i64, i64* [[TMP7]] unordered, align 2
-; CHECK-NEXT:    store atomic i64 [[TMP9]], i64* [[TMP8]] unordered, align 2
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 2
+; CHECK-NEXT:    store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 2
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[SRC]] to i16*
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
+; CHECK-NEXT:    [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 2
+; CHECK-NEXT:    store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 2
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 4, i32 2)
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 8, i32 2)
 ; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 16, i32 2)
 ; CHECK-NEXT:    ret void
 ;
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 1, i32 1)
   call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 2, i32 2)
   call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 4, i32 2)
   call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 8, i32 2)
@@ -183,17 +191,22 @@
 
 define void @test_memmove_loadstore_4(i8* %dest, i8* %src) {
 ; CHECK-LABEL: @test_memmove_loadstore_4(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[SRC:%.*]] to i32*
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[DEST:%.*]] to i32*
-; CHECK-NEXT:    [[TMP3:%.*]] = load atomic i32, i32* [[TMP1]] unordered, align 4
-; CHECK-NEXT:    store atomic i32 [[TMP3]], i32* [[TMP2]] unordered, align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i8* [[SRC]] to i64*
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8* [[DEST]] to i64*
-; CHECK-NEXT:    [[TMP6:%.*]] = load atomic i64, i64* [[TMP4]] unordered, align 4
-; CHECK-NEXT:    store atomic i64 [[TMP6]], i64* [[TMP5]] unordered, align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 4
+; CHECK-NEXT:    store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[SRC]] to i16*
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
+; CHECK-NEXT:    [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 4
+; CHECK-NEXT:    store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8* [[SRC]] to i32*
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32*
+; CHECK-NEXT:    [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 4
+; CHECK-NEXT:    store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 4
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 8, i32 4)
 ; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 16, i32 4)
 ; CHECK-NEXT:    ret void
 ;
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 1, i32 1)
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 2, i32 2)
   call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 4, i32 4)
   call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 8, i32 4)
   call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 16, i32 4)
@@ -202,13 +215,26 @@
 
 define void @test_memmove_loadstore_8(i8* %dest, i8* %src) {
 ; CHECK-LABEL: @test_memmove_loadstore_8(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[SRC:%.*]] to i64*
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[DEST:%.*]] to i64*
-; CHECK-NEXT:    [[TMP3:%.*]] = load atomic i64, i64* [[TMP1]] unordered, align 8
-; CHECK-NEXT:    store atomic i64 [[TMP3]], i64* [[TMP2]] unordered, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 8
+; CHECK-NEXT:    store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[SRC]] to i16*
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
+; CHECK-NEXT:    [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 8
+; CHECK-NEXT:    store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8* [[SRC]] to i32*
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32*
+; CHECK-NEXT:    [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 8
+; CHECK-NEXT:    store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i8* [[SRC]] to i64*
+; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
+; CHECK-NEXT:    [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 8
+; CHECK-NEXT:    store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 8
 ; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i32 16, i32 8)
 ; CHECK-NEXT:    ret void
 ;
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 1, i32 1)
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 2, i32 2)
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 4, i32 4)
   call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 8, i32 8)
   call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 16, i32 8)
   ret void
@@ -216,9 +242,27 @@
 
 define void @test_memmove_loadstore_16(i8* %dest, i8* %src) {
 ; CHECK-LABEL: @test_memmove_loadstore_16(
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 16
+; CHECK-NEXT:    store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 16
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[SRC]] to i16*
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
+; CHECK-NEXT:    [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 16
+; CHECK-NEXT:    store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 16
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8* [[SRC]] to i32*
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32*
+; CHECK-NEXT:    [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 16
+; CHECK-NEXT:    store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 16
+; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i8* [[SRC]] to i64*
+; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
+; CHECK-NEXT:    [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 16
+; CHECK-NEXT:    store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 16
 ; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 [[DEST:%.*]], i8* align 16 [[SRC:%.*]], i32 16, i32 16)
 ; CHECK-NEXT:    ret void
 ;
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 1, i32 1)
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 2, i32 2)
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 4, i32 4)
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 8, i32 8)
   call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 16, i32 16)
   ret void
 }
@@ -258,18 +302,9 @@
 ; CHECK-LABEL: @test_memcpy_loadstore(
 ; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 1
 ; CHECK-NEXT:    store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 1
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[SRC]] to i16*
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
-; CHECK-NEXT:    [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 1
-; CHECK-NEXT:    store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 1
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8* [[SRC]] to i32*
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32*
-; CHECK-NEXT:    [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 1
-; CHECK-NEXT:    store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 1
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i8* [[SRC]] to i64*
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
-; CHECK-NEXT:    [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 1
-; CHECK-NEXT:    store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 1
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 2, i32 1)
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 4, i32 1)
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 8, i32 1)
 ; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 16, i32 1)
 ; CHECK-NEXT:    ret void
 ;
@@ -283,21 +318,18 @@
 
 define void @test_memcpy_loadstore_2(i8* %dest, i8* %src) {
 ; CHECK-LABEL: @test_memcpy_loadstore_2(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[SRC:%.*]] to i16*
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[DEST:%.*]] to i16*
-; CHECK-NEXT:    [[TMP3:%.*]] = load atomic i16, i16* [[TMP1]] unordered, align 2
-; CHECK-NEXT:    store atomic i16 [[TMP3]], i16* [[TMP2]] unordered, align 2
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i8* [[SRC]] to i32*
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8* [[DEST]] to i32*
-; CHECK-NEXT:    [[TMP6:%.*]] = load atomic i32, i32* [[TMP4]] unordered, align 2
-; CHECK-NEXT:    store atomic i32 [[TMP6]], i32* [[TMP5]] unordered, align 2
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i8* [[SRC]] to i64*
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i8* [[DEST]] to i64*
-; CHECK-NEXT:    [[TMP9:%.*]] = load atomic i64, i64* [[TMP7]] unordered, align 2
-; CHECK-NEXT:    store atomic i64 [[TMP9]], i64* [[TMP8]] unordered, align 2
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 2
+; CHECK-NEXT:    store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 2
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[SRC]] to i16*
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
+; CHECK-NEXT:    [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 2
+; CHECK-NEXT:    store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 2
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 4, i32 2)
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 8, i32 2)
 ; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 16, i32 2)
 ; CHECK-NEXT:    ret void
 ;
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 1, i32 1)
   call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 2, i32 2)
   call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 4, i32 2)
   call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 8, i32 2)
@@ -307,17 +339,22 @@
 
 define void @test_memcpy_loadstore_4(i8* %dest, i8* %src) {
 ; CHECK-LABEL: @test_memcpy_loadstore_4(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[SRC:%.*]] to i32*
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[DEST:%.*]] to i32*
-; CHECK-NEXT:    [[TMP3:%.*]] = load atomic i32, i32* [[TMP1]] unordered, align 4
-; CHECK-NEXT:    store atomic i32 [[TMP3]], i32* [[TMP2]] unordered, align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i8* [[SRC]] to i64*
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8* [[DEST]] to i64*
-; CHECK-NEXT:    [[TMP6:%.*]] = load atomic i64, i64* [[TMP4]] unordered, align 4
-; CHECK-NEXT:    store atomic i64 [[TMP6]], i64* [[TMP5]] unordered, align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 4
+; CHECK-NEXT:    store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[SRC]] to i16*
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
+; CHECK-NEXT:    [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 4
+; CHECK-NEXT:    store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8* [[SRC]] to i32*
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32*
+; CHECK-NEXT:    [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 4
+; CHECK-NEXT:    store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 4
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 8, i32 4)
 ; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 16, i32 4)
 ; CHECK-NEXT:    ret void
 ;
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 1, i32 1)
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 2, i32 2)
   call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 4, i32 4)
   call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 8, i32 4)
   call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 16, i32 4)
@@ -326,13 +363,26 @@
 
 define void @test_memcpy_loadstore_8(i8* %dest, i8* %src) {
 ; CHECK-LABEL: @test_memcpy_loadstore_8(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[SRC:%.*]] to i64*
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[DEST:%.*]] to i64*
-; CHECK-NEXT:    [[TMP3:%.*]] = load atomic i64, i64* [[TMP1]] unordered, align 8
-; CHECK-NEXT:    store atomic i64 [[TMP3]], i64* [[TMP2]] unordered, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 8
+; CHECK-NEXT:    store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[SRC]] to i16*
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
+; CHECK-NEXT:    [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 8
+; CHECK-NEXT:    store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8* [[SRC]] to i32*
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32*
+; CHECK-NEXT:    [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 8
+; CHECK-NEXT:    store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i8* [[SRC]] to i64*
+; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
+; CHECK-NEXT:    [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 8
+; CHECK-NEXT:    store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 8
 ; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i32 16, i32 8)
 ; CHECK-NEXT:    ret void
 ;
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 1, i32 1)
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 2, i32 2)
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 4, i32 4)
   call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 8, i32 8)
   call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 16, i32 8)
   ret void
@@ -340,9 +390,27 @@
 
 define void @test_memcpy_loadstore_16(i8* %dest, i8* %src) {
 ; CHECK-LABEL: @test_memcpy_loadstore_16(
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 16
+; CHECK-NEXT:    store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 16
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[SRC]] to i16*
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
+; CHECK-NEXT:    [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 16
+; CHECK-NEXT:    store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 16
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8* [[SRC]] to i32*
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32*
+; CHECK-NEXT:    [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 16
+; CHECK-NEXT:    store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 16
+; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i8* [[SRC]] to i64*
+; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
+; CHECK-NEXT:    [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 16
+; CHECK-NEXT:    store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 16
 ; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 [[DEST:%.*]], i8* align 16 [[SRC:%.*]], i32 16, i32 16)
 ; CHECK-NEXT:    ret void
 ;
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 1, i32 1)
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 2, i32 2)
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 4, i32 4)
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 8, i32 8)
   call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 16, i32 16)
   ret void
 }
diff --git a/test/Transforms/InstCombine/intersect-accessgroup.ll b/test/Transforms/InstCombine/intersect-accessgroup.ll
new file mode 100644
index 0000000..858b9b6
--- /dev/null
+++ b/test/Transforms/InstCombine/intersect-accessgroup.ll
@@ -0,0 +1,113 @@
+; RUN: opt -instcombine -S < %s | FileCheck %s
+;
+; void func(long n, double A[static const restrict n]) {
+; 	for (int i = 0; i <  n; i+=1)
+; 		for (int j = 0; j <  n;j+=1)
+; 			for (int k = 0; k < n; k += 1)
+; 				for (int l = 0; l < n; l += 1) {
+; 					double *p = &A[i + j + k + l];
+; 					double x = *p;
+; 					double y = *p;
+; 					arg(x + y);
+; 				}
+; }
+;
+; Check for correctly merging access group metadata for instcombine
+; (only common loops are parallel == intersection)
+; Note that combined load would be parallel to loop !16 since both
+; origin loads are parallel to it, but it references two access groups
+; (!8 and !9), neither of which contain both loads. As such, the
+; information that the combined load is parallel to !16 is lost.
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+declare void @arg(double)
+
+define void @func(i64 %n, double* noalias nonnull %A) {
+entry:
+  br label %for.cond
+
+for.cond:
+  %i.0 = phi i32 [ 0, %entry ], [ %add31, %for.inc30 ]
+  %conv = sext i32 %i.0 to i64
+  %cmp = icmp slt i64 %conv, %n
+  br i1 %cmp, label %for.cond2, label %for.end32
+
+for.cond2:
+  %j.0 = phi i32 [ %add28, %for.inc27 ], [ 0, %for.cond ]
+  %conv3 = sext i32 %j.0 to i64
+  %cmp4 = icmp slt i64 %conv3, %n
+  br i1 %cmp4, label %for.cond8, label %for.inc30
+
+for.cond8:
+  %k.0 = phi i32 [ %add25, %for.inc24 ], [ 0, %for.cond2 ]
+  %conv9 = sext i32 %k.0 to i64
+  %cmp10 = icmp slt i64 %conv9, %n
+  br i1 %cmp10, label %for.cond14, label %for.inc27
+
+for.cond14:
+  %l.0 = phi i32 [ %add23, %for.body19 ], [ 0, %for.cond8 ]
+  %conv15 = sext i32 %l.0 to i64
+  %cmp16 = icmp slt i64 %conv15, %n
+  br i1 %cmp16, label %for.body19, label %for.inc24
+
+for.body19:
+  %add = add nsw i32 %i.0, %j.0
+  %add20 = add nsw i32 %add, %k.0
+  %add21 = add nsw i32 %add20, %l.0
+  %idxprom = sext i32 %add21 to i64
+  %arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom
+  %0 = load double, double* %arrayidx, align 8, !llvm.access.group !1
+  %1 = load double, double* %arrayidx, align 8, !llvm.access.group !2
+  %add22 = fadd double %0, %1
+  call void @arg(double %add22), !llvm.access.group !3
+  %add23 = add nsw i32 %l.0, 1
+  br label %for.cond14, !llvm.loop !11
+
+for.inc24:
+  %add25 = add nsw i32 %k.0, 1
+  br label %for.cond8, !llvm.loop !14
+
+for.inc27:
+  %add28 = add nsw i32 %j.0, 1
+  br label %for.cond2, !llvm.loop !16
+
+for.inc30:
+  %add31 = add nsw i32 %i.0, 1
+  br label %for.cond, !llvm.loop !18
+
+for.end32:
+  ret void
+}
+
+
+; access groups
+!7 = distinct !{}
+!8 = distinct !{}
+!9 = distinct !{}
+
+; access group lists
+!1 = !{!7, !9}
+!2 = !{!7, !8}
+!3 = !{!7, !8, !9}
+
+!11 = distinct !{!11, !13}
+!13 = !{!"llvm.loop.parallel_accesses", !7}
+
+!14 = distinct !{!14, !15}
+!15 = !{!"llvm.loop.parallel_accesses", !8}
+
+!16 = distinct !{!16, !17}
+!17 = !{!"llvm.loop.parallel_accesses", !8, !9}
+
+!18 = distinct !{!18, !19}
+!19 = !{!"llvm.loop.parallel_accesses", !9}
+
+
+; CHECK: load double, {{.*}} !llvm.access.group ![[ACCESSGROUP_0:[0-9]+]]
+; CHECK: br label %for.cond14, !llvm.loop ![[LOOP_4:[0-9]+]]
+
+; CHECK: ![[ACCESSGROUP_0]] = distinct !{}
+
+; CHECK: ![[LOOP_4]] = distinct !{![[LOOP_4]], ![[PARALLEL_ACCESSES_5:[0-9]+]]}
+; CHECK: ![[PARALLEL_ACCESSES_5]] = !{!"llvm.loop.parallel_accesses", ![[ACCESSGROUP_0]]}
diff --git a/test/Transforms/InstCombine/intrinsics.ll b/test/Transforms/InstCombine/intrinsics.ll
index 73f1cd9..8de892f 100644
--- a/test/Transforms/InstCombine/intrinsics.ll
+++ b/test/Transforms/InstCombine/intrinsics.ll
@@ -322,30 +322,26 @@
   ret <2 x i1> %res
 }
 
-define i1 @cttz_knownbits2(i32 %arg) {
+define i32 @cttz_knownbits2(i32 %arg) {
 ; CHECK-LABEL: @cttz_knownbits2(
 ; CHECK-NEXT:    [[OR:%.*]] = or i32 [[ARG:%.*]], 4
 ; CHECK-NEXT:    [[CNT:%.*]] = call i32 @llvm.cttz.i32(i32 [[OR]], i1 true) #2, !range ![[$CTTZ_RANGE:[0-9]+]]
-; CHECK-NEXT:    [[RES:%.*]] = icmp eq i32 [[CNT]], 2
-; CHECK-NEXT:    ret i1 [[RES]]
+; CHECK-NEXT:    ret i32 [[CNT]]
 ;
   %or = or i32 %arg, 4
   %cnt = call i32 @llvm.cttz.i32(i32 %or, i1 true) nounwind readnone
-  %res = icmp eq i32 %cnt, 2
-  ret i1 %res
+  ret i32 %cnt
 }
 
-define <2 x i1> @cttz_knownbits2_vec(<2 x i32> %arg) {
+define <2 x i32> @cttz_knownbits2_vec(<2 x i32> %arg) {
 ; CHECK-LABEL: @cttz_knownbits2_vec(
 ; CHECK-NEXT:    [[OR:%.*]] = or <2 x i32> [[ARG:%.*]], <i32 4, i32 4>
 ; CHECK-NEXT:    [[CNT:%.*]] = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> [[OR]], i1 true)
-; CHECK-NEXT:    [[RES:%.*]] = icmp eq <2 x i32> [[CNT]], <i32 2, i32 2>
-; CHECK-NEXT:    ret <2 x i1> [[RES]]
+; CHECK-NEXT:    ret <2 x i32> [[CNT]]
 ;
   %or = or <2 x i32> %arg, <i32 4, i32 4>
   %cnt = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %or, i1 true) nounwind readnone
-  %res = icmp eq <2 x i32> %cnt, <i32 2, i32 2>
-  ret <2 x i1> %res
+  ret <2 x i32> %cnt
 }
 
 define i1 @cttz_knownbits3(i32 %arg) {
@@ -358,13 +354,9 @@
   ret i1 %res
 }
 
-; TODO: The icmp is unnecessary given the known bits of the input.
 define <2 x i1> @cttz_knownbits3_vec(<2 x i32> %arg) {
 ; CHECK-LABEL: @cttz_knownbits3_vec(
-; CHECK-NEXT:    [[OR:%.*]] = or <2 x i32> [[ARG:%.*]], <i32 4, i32 4>
-; CHECK-NEXT:    [[CNT:%.*]] = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> [[OR]], i1 true)
-; CHECK-NEXT:    [[RES:%.*]] = icmp eq <2 x i32> [[CNT]], <i32 3, i32 3>
-; CHECK-NEXT:    ret <2 x i1> [[RES]]
+; CHECK-NEXT:    ret <2 x i1> zeroinitializer
 ;
   %or = or <2 x i32> %arg, <i32 4, i32 4>
   %cnt = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %or, i1 true) nounwind readnone
@@ -422,30 +414,26 @@
   ret <2 x i1> %res
 }
 
-define i1 @ctlz_knownbits2(i8 %arg) {
+define i8 @ctlz_knownbits2(i8 %arg) {
 ; CHECK-LABEL: @ctlz_knownbits2(
 ; CHECK-NEXT:    [[OR:%.*]] = or i8 [[ARG:%.*]], 32
 ; CHECK-NEXT:    [[CNT:%.*]] = call i8 @llvm.ctlz.i8(i8 [[OR]], i1 true) #2, !range ![[$CTLZ_RANGE:[0-9]+]]
-; CHECK-NEXT:    [[RES:%.*]] = icmp eq i8 [[CNT]], 2
-; CHECK-NEXT:    ret i1 [[RES]]
+; CHECK-NEXT:    ret i8 [[CNT]]
 ;
   %or = or i8 %arg, 32
   %cnt = call i8 @llvm.ctlz.i8(i8 %or, i1 true) nounwind readnone
-  %res = icmp eq i8 %cnt, 2
-  ret i1 %res
+  ret i8 %cnt
 }
 
-define <2 x i1> @ctlz_knownbits2_vec(<2 x i8> %arg) {
+define <2 x i8> @ctlz_knownbits2_vec(<2 x i8> %arg) {
 ; CHECK-LABEL: @ctlz_knownbits2_vec(
 ; CHECK-NEXT:    [[OR:%.*]] = or <2 x i8> [[ARG:%.*]], <i8 32, i8 32>
 ; CHECK-NEXT:    [[CNT:%.*]] = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> [[OR]], i1 true)
-; CHECK-NEXT:    [[RES:%.*]] = icmp eq <2 x i8> [[CNT]], <i8 2, i8 2>
-; CHECK-NEXT:    ret <2 x i1> [[RES]]
+; CHECK-NEXT:    ret <2 x i8> [[CNT]]
 ;
   %or = or <2 x i8> %arg, <i8 32, i8 32>
   %cnt = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> %or, i1 true) nounwind readnone
-  %res = icmp eq <2 x i8> %cnt, <i8 2, i8 2>
-  ret <2 x i1> %res
+  ret <2 x i8> %cnt
 }
 
 define i1 @ctlz_knownbits3(i8 %arg) {
@@ -458,13 +446,9 @@
   ret i1 %res
 }
 
-; TODO: The icmp is unnecessary given the known bits of the input.
 define <2 x i1> @ctlz_knownbits3_vec(<2 x i8> %arg) {
 ; CHECK-LABEL: @ctlz_knownbits3_vec(
-; CHECK-NEXT:    [[OR:%.*]] = or <2 x i8> [[ARG:%.*]], <i8 32, i8 32>
-; CHECK-NEXT:    [[CNT:%.*]] = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> [[OR]], i1 true)
-; CHECK-NEXT:    [[RES:%.*]] = icmp eq <2 x i8> [[CNT]], <i8 3, i8 3>
-; CHECK-NEXT:    ret <2 x i1> [[RES]]
+; CHECK-NEXT:    ret <2 x i1> zeroinitializer
 ;
   %or = or <2 x i8> %arg, <i8 32, i8 32>
   %cnt = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> %or, i1 true) nounwind readnone
diff --git a/test/Transforms/InstCombine/loadstore-metadata.ll b/test/Transforms/InstCombine/loadstore-metadata.ll
index 4856350..5916a8d 100644
--- a/test/Transforms/InstCombine/loadstore-metadata.ll
+++ b/test/Transforms/InstCombine/loadstore-metadata.ll
@@ -39,7 +39,7 @@
 define i32 @test_load_cast_combine_invariant(float* %ptr) {
 ; Ensure (cast (load (...))) -> (load (cast (...))) preserves invariant metadata.
 ; CHECK-LABEL: @test_load_cast_combine_invariant(
-; CHECK: load i32, i32* %{{.*}}, !invariant.load !5
+; CHECK: load i32, i32* %{{.*}}, !invariant.load !7
 entry:
   %l = load float, float* %ptr, !invariant.load !6
   %c = bitcast float %l to i32
@@ -50,7 +50,7 @@
 ; Ensure (cast (load (...))) -> (load (cast (...))) preserves nontemporal
 ; metadata.
 ; CHECK-LABEL: @test_load_cast_combine_nontemporal(
-; CHECK: load i32, i32* %{{.*}}, !nontemporal !6
+; CHECK: load i32, i32* %{{.*}}, !nontemporal !8
 entry:
   %l = load float, float* %ptr, !nontemporal !7
   %c = bitcast float %l to i32
@@ -61,7 +61,7 @@
 ; Ensure (cast (load (...))) -> (load (cast (...))) preserves align
 ; metadata.
 ; CHECK-LABEL: @test_load_cast_combine_align(
-; CHECK: load i8*, i8** %{{.*}}, !align !7
+; CHECK: load i8*, i8** %{{.*}}, !align !9
 entry:
   %l = load i32*, i32** %ptr, !align !8
   %c = bitcast i32* %l to i8*
@@ -72,7 +72,7 @@
 ; Ensure (cast (load (...))) -> (load (cast (...))) preserves dereferenceable
 ; metadata.
 ; CHECK-LABEL: @test_load_cast_combine_deref(
-; CHECK: load i8*, i8** %{{.*}}, !dereferenceable !7
+; CHECK: load i8*, i8** %{{.*}}, !dereferenceable !9
 entry:
   %l = load i32*, i32** %ptr, !dereferenceable !8
   %c = bitcast i32* %l to i8*
@@ -83,7 +83,7 @@
 ; Ensure (cast (load (...))) -> (load (cast (...))) preserves
 ; dereferenceable_or_null metadata.
 ; CHECK-LABEL: @test_load_cast_combine_deref_or_null(
-; CHECK: load i8*, i8** %{{.*}}, !dereferenceable_or_null !7
+; CHECK: load i8*, i8** %{{.*}}, !dereferenceable_or_null !9
 entry:
   %l = load i32*, i32** %ptr, !dereferenceable_or_null !8
   %c = bitcast i32* %l to i8*
@@ -94,7 +94,7 @@
 ; Ensure (cast (load (...))) -> (load (cast (...))) preserves loop access
 ; metadata.
 ; CHECK-LABEL: @test_load_cast_combine_loop(
-; CHECK: load i32, i32* %{{.*}}, !llvm.mem.parallel_loop_access !4
+; CHECK: load i32, i32* %{{.*}}, !llvm.access.group !6
 entry:
   br label %loop
 
@@ -102,7 +102,7 @@
   %i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
   %src.gep = getelementptr inbounds float, float* %src, i32 %i
   %dst.gep = getelementptr inbounds i32, i32* %dst, i32 %i
-  %l = load float, float* %src.gep, !llvm.mem.parallel_loop_access !4
+  %l = load float, float* %src.gep, !llvm.access.group !9
   %c = bitcast float %l to i32
   store i32 %c, i32* %dst.gep
   %i.next = add i32 %i, 1
@@ -142,8 +142,9 @@
 !1 = !{!"scalar type", !2}
 !2 = !{!"root"}
 !3 = distinct !{!3, !4}
-!4 = distinct !{!4}
+!4 = distinct !{!4, !{!"llvm.loop.parallel_accesses", !9}}
 !5 = !{i32 0, i32 42}
 !6 = !{}
 !7 = !{i32 1}
 !8 = !{i64 8}
+!9 = distinct !{}
diff --git a/test/Transforms/InstCombine/mem-par-metadata-memcpy.ll b/test/Transforms/InstCombine/mem-par-metadata-memcpy.ll
index 82231ba..54fe6cb 100644
--- a/test/Transforms/InstCombine/mem-par-metadata-memcpy.ll
+++ b/test/Transforms/InstCombine/mem-par-metadata-memcpy.ll
@@ -1,6 +1,6 @@
 ; RUN: opt < %s -instcombine -S | FileCheck %s
 ;
-; Make sure the llvm.mem.parallel_loop_access meta-data is preserved
+; Make sure the llvm.access.group meta-data is preserved
 ; when a memcpy is replaced with a load+store by instcombine
 ;
 ; #include <string.h>
@@ -13,8 +13,8 @@
 ; }
 
 ; CHECK: for.body:
-; CHECK:  %{{.*}} = load i16, i16* %{{.*}}, align 1, !llvm.mem.parallel_loop_access !1
-; CHECK:  store i16 %{{.*}}, i16* %{{.*}}, align 1, !llvm.mem.parallel_loop_access !1
+; CHECK:  %{{.*}} = load i16, i16* %{{.*}}, align 1, !llvm.access.group !1
+; CHECK:  store i16 %{{.*}}, i16* %{{.*}}, align 1, !llvm.access.group !1
 
 
 ; ModuleID = '<stdin>'
@@ -36,7 +36,7 @@
   %arrayidx = getelementptr inbounds i8, i8* %out, i64 %i.0
   %add = add nsw i64 %i.0, %size
   %arrayidx1 = getelementptr inbounds i8, i8* %out, i64 %add
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %arrayidx, i8* %arrayidx1, i64 2, i1 false), !llvm.mem.parallel_loop_access !1
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %arrayidx, i8* %arrayidx1, i64 2, i1 false), !llvm.access.group !4
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body
@@ -56,6 +56,7 @@
 !llvm.ident = !{!0}
 
 !0 = !{!"clang version 4.0.0 (cfe/trunk 277751)"}
-!1 = distinct !{!1, !2, !3}
+!1 = distinct !{!1, !2, !3, !{!"llvm.loop.parallel_accesses", !4}}
 !2 = distinct !{!2, !3}
 !3 = !{!"llvm.loop.vectorize.enable", i1 true}
+!4 = distinct !{} ; access group
diff --git a/test/Transforms/InstCombine/memchr.ll b/test/Transforms/InstCombine/memchr.ll
index 5a081c2..83073e2 100644
--- a/test/Transforms/InstCombine/memchr.ll
+++ b/test/Transforms/InstCombine/memchr.ll
@@ -50,7 +50,7 @@
 
 define void @test4(i32 %chr) {
 ; CHECK-LABEL: @test4(
-; CHECK-NEXT:    [[DST:%.*]] = call i8* @memchr(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @hello, i32 0, i32 0), i32 %chr, i32 14)
+; CHECK-NEXT:    [[DST:%.*]] = call i8* @memchr(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @hello, i32 0, i32 0), i32 [[CHR:%.*]], i32 14)
 ; CHECK-NEXT:    store i8* [[DST]], i8** @chp, align 4
 ; CHECK-NEXT:    ret void
 ;
@@ -131,12 +131,13 @@
 ; Check transformation memchr("\r\n", C, 2) != nullptr -> (C & 9216) != 0
 define i1 @test11(i32 %C) {
 ; CHECK-LABEL: @test11(
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 %C to i16
-; CHECK-NEXT:    [[MEMCHR_BOUNDS:%.*]] = icmp ult i16 [[TMP1]], 16
-; CHECK-NEXT:    [[TMP2:%.*]] = shl i16 1, [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = and i16 [[TMP2]], 9216
-; CHECK-NEXT:    [[MEMCHR_BITS:%.*]] = icmp ne i16 [[TMP3]], 0
-; CHECK-NEXT:    [[MEMCHR:%.*]] = and i1 [[MEMCHR:%.*]].bounds, [[MEMCHR:%.*]].bits
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i16
+; CHECK-NEXT:    [[TMP2:%.*]] = and i16 [[TMP1]], 255
+; CHECK-NEXT:    [[MEMCHR_BOUNDS:%.*]] = icmp ult i16 [[TMP2]], 16
+; CHECK-NEXT:    [[TMP3:%.*]] = shl i16 1, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = and i16 [[TMP3]], 9216
+; CHECK-NEXT:    [[MEMCHR_BITS:%.*]] = icmp ne i16 [[TMP4]], 0
+; CHECK-NEXT:    [[MEMCHR:%.*]] = and i1 [[MEMCHR_BOUNDS]], [[MEMCHR_BITS]]
 ; CHECK-NEXT:    ret i1 [[MEMCHR]]
 ;
   %dst = call i8* @memchr(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @newlines, i64 0, i64 0), i32 %C, i32 2)
@@ -147,7 +148,7 @@
 ; No 64 bits here
 define i1 @test12(i32 %C) {
 ; CHECK-LABEL: @test12(
-; CHECK-NEXT:    [[DST:%.*]] = call i8* @memchr(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @spaces, i32 0, i32 0), i32 %C, i32 3)
+; CHECK-NEXT:    [[DST:%.*]] = call i8* @memchr(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @spaces, i32 0, i32 0), i32 [[C:%.*]], i32 3)
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8* [[DST]], null
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
@@ -158,11 +159,12 @@
 
 define i1 @test13(i32 %C) {
 ; CHECK-LABEL: @test13(
-; CHECK-NEXT:    [[MEMCHR_BOUNDS:%.*]] = icmp ult i32 %C, 32
-; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 1, %C
-; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], -2147483647
-; CHECK-NEXT:    [[MEMCHR_BITS:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT:    [[MEMCHR:%.*]] = and i1 [[MEMCHR:%.*]].bounds, [[MEMCHR:%.*]].bits
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[C:%.*]], 255
+; CHECK-NEXT:    [[MEMCHR_BOUNDS:%.*]] = icmp ult i32 [[TMP1]], 32
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i32 1, [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[TMP2]], -2147483647
+; CHECK-NEXT:    [[MEMCHR_BITS:%.*]] = icmp ne i32 [[TMP3]], 0
+; CHECK-NEXT:    [[MEMCHR:%.*]] = and i1 [[MEMCHR_BOUNDS]], [[MEMCHR_BITS]]
 ; CHECK-NEXT:    ret i1 [[MEMCHR]]
 ;
   %dst = call i8* @memchr(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @single, i64 0, i64 0), i32 %C, i32 2)
@@ -172,8 +174,9 @@
 
 define i1 @test14(i32 %C) {
 ; CHECK-LABEL: @test14(
-; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 %C, 31
-; CHECK-NEXT:    ret i1 [[TMP1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[C:%.*]], 255
+; CHECK-NEXT:    [[MEMCHR_BITS:%.*]] = icmp eq i32 [[TMP1]], 31
+; CHECK-NEXT:    ret i1 [[MEMCHR_BITS]]
 ;
   %dst = call i8* @memchr(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @single, i64 0, i64 0), i32 %C, i32 1)
   %cmp = icmp ne i8* %dst, null
@@ -182,7 +185,7 @@
 
 define i1 @test15(i32 %C) {
 ; CHECK-LABEL: @test15(
-; CHECK-NEXT:    [[DST:%.*]] = call i8* @memchr(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @negative, i32 0, i32 0), i32 %C, i32 3)
+; CHECK-NEXT:    [[DST:%.*]] = call i8* @memchr(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @negative, i32 0, i32 0), i32 [[C:%.*]], i32 3)
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8* [[DST]], null
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
diff --git a/test/Transforms/InstCombine/mul.ll b/test/Transforms/InstCombine/mul.ll
index fc3773a..b1dc9d5 100644
--- a/test/Transforms/InstCombine/mul.ll
+++ b/test/Transforms/InstCombine/mul.ll
@@ -442,3 +442,78 @@
   %mul = mul nsw i128 %X, 2
   ret i128 %mul
 }
+
+define i32 @test_mul_canonicalize_op0(i32 %x, i32 %y) {
+; CHECK-LABEL: @test_mul_canonicalize_op0(
+; CHECK-NEXT:    [[TMP1:%.*]] = mul i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[MUL:%.*]] = sub i32 0, [[TMP1]]
+; CHECK-NEXT:    ret i32 [[MUL]]
+;
+  %neg = sub i32 0, %x
+  %mul = mul i32 %neg, %y
+  ret i32 %mul
+}
+
+define i32 @test_mul_canonicalize_op1(i32 %x, i32 %z) {
+; CHECK-LABEL: @test_mul_canonicalize_op1(
+; CHECK-NEXT:    [[Y:%.*]] = mul i32 [[Z:%.*]], 3
+; CHECK-NEXT:    [[TMP1:%.*]] = mul i32 [[Y]], [[X:%.*]]
+; CHECK-NEXT:    [[MUL:%.*]] = sub i32 0, [[TMP1]]
+; CHECK-NEXT:    ret i32 [[MUL]]
+;
+  %y = mul i32 %z, 3
+  %neg = sub i32 0, %x
+  %mul = mul i32 %y, %neg
+  ret i32 %mul
+}
+
+define i32 @test_mul_canonicalize_nsw(i32 %x, i32 %y) {
+; CHECK-LABEL: @test_mul_canonicalize_nsw(
+; CHECK-NEXT:    [[TMP1:%.*]] = mul i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[MUL:%.*]] = sub i32 0, [[TMP1]]
+; CHECK-NEXT:    ret i32 [[MUL]]
+;
+  %neg = sub nsw i32 0, %x
+  %mul = mul nsw i32 %neg, %y
+  ret i32 %mul
+}
+
+define <2 x i32> @test_mul_canonicalize_vec(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @test_mul_canonicalize_vec(
+; CHECK-NEXT:    [[TMP1:%.*]] = mul <2 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[MUL:%.*]] = sub <2 x i32> zeroinitializer, [[TMP1]]
+; CHECK-NEXT:    ret <2 x i32> [[MUL]]
+;
+  %neg = sub <2 x i32> <i32 0, i32 0>, %x
+  %mul = mul <2 x i32> %neg, %y
+  ret <2 x i32> %mul
+}
+
+define i32 @test_mul_canonicalize_multiple_uses(i32 %x, i32 %y) {
+; CHECK-LABEL: @test_mul_canonicalize_multiple_uses(
+; CHECK-NEXT:    [[NEG:%.*]] = sub i32 0, [[X:%.*]]
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT:    [[MUL2:%.*]] = mul i32 [[MUL]], [[NEG]]
+; CHECK-NEXT:    ret i32 [[MUL2]]
+;
+  %neg = sub i32 0, %x
+  %mul = mul i32 %neg, %y
+  %mul2 = mul i32 %mul, %neg
+  ret i32 %mul2
+}
+
+@X = global i32 5
+
+define i64 @test_mul_canonicalize_neg_is_not_undone(i64 %L1) {
+; Check we do not undo the canonicalization of 0 - (X * Y), if Y is a constant
+; expr.
+; CHECK-LABEL: @test_mul_canonicalize_neg_is_not_undone(
+; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 [[L1:%.*]], ptrtoint (i32* @X to i64)
+; CHECK-NEXT:    [[B4:%.*]] = sub i64 0, [[TMP1]]
+; CHECK-NEXT:    ret i64 [[B4]]
+;
+  %v1 = ptrtoint i32* @X to i64
+  %B8 = sub i64 0, %v1
+  %B4 = mul i64 %B8, %L1
+  ret i64 %B4
+}
diff --git a/test/Transforms/InstCombine/operand-complexity.ll b/test/Transforms/InstCombine/operand-complexity.ll
index 20abe7b..c67fb08 100644
--- a/test/Transforms/InstCombine/operand-complexity.ll
+++ b/test/Transforms/InstCombine/operand-complexity.ll
@@ -1,18 +1,18 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -instcombine -S | FileCheck %s
 
-; 'Negate' is considered less complex than a normal binop, so the mul should have the binop as the first operand.
+; 'Negate' is considered less complex than a normal binop, so the xor should have the binop as the first operand.
 
 define i8 @neg(i8 %x) {
 ; CHECK-LABEL: @neg(
 ; CHECK-NEXT:    [[BO:%.*]] = udiv i8 [[X:%.*]], 42
 ; CHECK-NEXT:    [[NEGX:%.*]] = sub i8 0, [[X]]
-; CHECK-NEXT:    [[R:%.*]] = mul i8 [[BO]], [[NEGX]]
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[BO]], [[NEGX]]
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %bo = udiv i8 %x, 42
   %negx = sub i8 0, %x
-  %r = mul i8 %negx, %bo
+  %r = xor i8 %negx, %bo
   ret i8 %r
 }
 
@@ -20,12 +20,12 @@
 ; CHECK-LABEL: @neg_vec(
 ; CHECK-NEXT:    [[BO:%.*]] = udiv <2 x i8> [[X:%.*]], <i8 42, i8 -42>
 ; CHECK-NEXT:    [[NEGX:%.*]] = sub <2 x i8> zeroinitializer, [[X]]
-; CHECK-NEXT:    [[R:%.*]] = mul <2 x i8> [[BO]], [[NEGX]]
+; CHECK-NEXT:    [[R:%.*]] = xor <2 x i8> [[BO]], [[NEGX]]
 ; CHECK-NEXT:    ret <2 x i8> [[R]]
 ;
   %bo = udiv <2 x i8> %x, <i8 42, i8 -42>
   %negx = sub <2 x i8> <i8 0, i8 0>, %x
-  %r = mul <2 x i8> %negx, %bo
+  %r = xor <2 x i8> %negx, %bo
   ret <2 x i8> %r
 }
 
@@ -33,12 +33,12 @@
 ; CHECK-LABEL: @neg_vec_undef(
 ; CHECK-NEXT:    [[BO:%.*]] = udiv <2 x i8> [[X:%.*]], <i8 42, i8 -42>
 ; CHECK-NEXT:    [[NEGX:%.*]] = sub <2 x i8> <i8 0, i8 undef>, [[X]]
-; CHECK-NEXT:    [[R:%.*]] = mul <2 x i8> [[BO]], [[NEGX]]
+; CHECK-NEXT:    [[R:%.*]] = xor <2 x i8> [[BO]], [[NEGX]]
 ; CHECK-NEXT:    ret <2 x i8> [[R]]
 ;
   %bo = udiv <2 x i8> %x, <i8 42, i8 -42>
   %negx = sub <2 x i8> <i8 0, i8 undef>, %x
-  %r = mul <2 x i8> %negx, %bo
+  %r = xor <2 x i8> %negx, %bo
   ret <2 x i8> %r
 }
 
diff --git a/test/Transforms/InstCombine/rotate.ll b/test/Transforms/InstCombine/rotate.ll
index 492817e..abfe74d 100644
--- a/test/Transforms/InstCombine/rotate.ll
+++ b/test/Transforms/InstCombine/rotate.ll
@@ -206,19 +206,13 @@
   ret <3 x i42> %r
 }
 
-; TODO:
 ; This is the canonical pattern for a UB-safe rotate-by-variable with power-of-2-size scalar type.
 ; The backend expansion of funnel shift for targets that don't have a rotate instruction should
 ; match the original IR, so it is always good to canonicalize to the intrinsics for this pattern.
 
 define i32 @rotl_safe_i32(i32 %x, i32 %y) {
 ; CHECK-LABEL: @rotl_safe_i32(
-; CHECK-NEXT:    [[NEGY:%.*]] = sub i32 0, [[Y:%.*]]
-; CHECK-NEXT:    [[YMASK:%.*]] = and i32 [[Y]], 31
-; CHECK-NEXT:    [[NEGYMASK:%.*]] = and i32 [[NEGY]], 31
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[X:%.*]], [[YMASK]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[X]], [[NEGYMASK]]
-; CHECK-NEXT:    [[R:%.*]] = or i32 [[SHR]], [[SHL]]
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]])
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %negy = sub i32 0, %y
@@ -230,18 +224,14 @@
   ret i32 %r
 }
 
-; TODO:
 ; Extra uses don't change anything.
 
 define i16 @rotl_safe_i16_commute_extra_use(i16 %x, i16 %y, i16* %p) {
 ; CHECK-LABEL: @rotl_safe_i16_commute_extra_use(
 ; CHECK-NEXT:    [[NEGY:%.*]] = sub i16 0, [[Y:%.*]]
-; CHECK-NEXT:    [[YMASK:%.*]] = and i16 [[Y]], 15
 ; CHECK-NEXT:    [[NEGYMASK:%.*]] = and i16 [[NEGY]], 15
 ; CHECK-NEXT:    store i16 [[NEGYMASK]], i16* [[P:%.*]], align 2
-; CHECK-NEXT:    [[SHL:%.*]] = shl i16 [[X:%.*]], [[YMASK]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr i16 [[X]], [[NEGYMASK]]
-; CHECK-NEXT:    [[R:%.*]] = or i16 [[SHL]], [[SHR]]
+; CHECK-NEXT:    [[R:%.*]] = call i16 @llvm.fshl.i16(i16 [[X:%.*]], i16 [[X]], i16 [[Y]])
 ; CHECK-NEXT:    ret i16 [[R]]
 ;
   %negy = sub i16 0, %y
@@ -254,17 +244,11 @@
   ret i16 %r
 }
 
-; TODO:
 ; Left/right is determined by the negation.
 
 define i64 @rotr_safe_i64(i64 %x, i64 %y) {
 ; CHECK-LABEL: @rotr_safe_i64(
-; CHECK-NEXT:    [[NEGY:%.*]] = sub i64 0, [[Y:%.*]]
-; CHECK-NEXT:    [[YMASK:%.*]] = and i64 [[Y]], 63
-; CHECK-NEXT:    [[NEGYMASK:%.*]] = and i64 [[NEGY]], 63
-; CHECK-NEXT:    [[SHL:%.*]] = shl i64 [[X:%.*]], [[NEGYMASK]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr i64 [[X]], [[YMASK]]
-; CHECK-NEXT:    [[R:%.*]] = or i64 [[SHR]], [[SHL]]
+; CHECK-NEXT:    [[R:%.*]] = call i64 @llvm.fshr.i64(i64 [[X:%.*]], i64 [[X]], i64 [[Y:%.*]])
 ; CHECK-NEXT:    ret i64 [[R]]
 ;
   %negy = sub i64 0, %y
@@ -276,7 +260,6 @@
   ret i64 %r
 }
 
-; TODO:
 ; Extra uses don't change anything.
 
 define i8 @rotr_safe_i8_commute_extra_use(i8 %x, i8 %y, i8* %p) {
@@ -300,17 +283,11 @@
   ret i8 %r
 }
 
-; TODO:
 ; Vectors follow the same rules.
 
 define <2 x i32> @rotl_safe_v2i32(<2 x i32> %x, <2 x i32> %y) {
 ; CHECK-LABEL: @rotl_safe_v2i32(
-; CHECK-NEXT:    [[NEGY:%.*]] = sub <2 x i32> zeroinitializer, [[Y:%.*]]
-; CHECK-NEXT:    [[YMASK:%.*]] = and <2 x i32> [[Y]], <i32 31, i32 31>
-; CHECK-NEXT:    [[NEGYMASK:%.*]] = and <2 x i32> [[NEGY]], <i32 31, i32 31>
-; CHECK-NEXT:    [[SHL:%.*]] = shl <2 x i32> [[X:%.*]], [[YMASK]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr <2 x i32> [[X]], [[NEGYMASK]]
-; CHECK-NEXT:    [[R:%.*]] = or <2 x i32> [[SHR]], [[SHL]]
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[X:%.*]], <2 x i32> [[X]], <2 x i32> [[Y:%.*]])
 ; CHECK-NEXT:    ret <2 x i32> [[R]]
 ;
   %negy = sub <2 x i32> zeroinitializer, %y
@@ -322,17 +299,11 @@
   ret <2 x i32> %r
 }
 
-; TODO:
 ; Vectors follow the same rules.
 
 define <3 x i16> @rotr_safe_v3i16(<3 x i16> %x, <3 x i16> %y) {
 ; CHECK-LABEL: @rotr_safe_v3i16(
-; CHECK-NEXT:    [[NEGY:%.*]] = sub <3 x i16> zeroinitializer, [[Y:%.*]]
-; CHECK-NEXT:    [[YMASK:%.*]] = and <3 x i16> [[Y]], <i16 15, i16 15, i16 15>
-; CHECK-NEXT:    [[NEGYMASK:%.*]] = and <3 x i16> [[NEGY]], <i16 15, i16 15, i16 15>
-; CHECK-NEXT:    [[SHL:%.*]] = shl <3 x i16> [[X:%.*]], [[NEGYMASK]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr <3 x i16> [[X]], [[YMASK]]
-; CHECK-NEXT:    [[R:%.*]] = or <3 x i16> [[SHR]], [[SHL]]
+; CHECK-NEXT:    [[R:%.*]] = call <3 x i16> @llvm.fshr.v3i16(<3 x i16> [[X:%.*]], <3 x i16> [[X]], <3 x i16> [[Y:%.*]])
 ; CHECK-NEXT:    ret <3 x i16> [[R]]
 ;
   %negy = sub <3 x i16> zeroinitializer, %y
@@ -353,12 +324,7 @@
 define i16 @rotate_left_16bit(i16 %v, i32 %shift) {
 ; CHECK-LABEL: @rotate_left_16bit(
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 [[SHIFT:%.*]] to i16
-; CHECK-NEXT:    [[TMP2:%.*]] = and i16 [[TMP1]], 15
-; CHECK-NEXT:    [[TMP3:%.*]] = sub i16 0, [[TMP1]]
-; CHECK-NEXT:    [[TMP4:%.*]] = and i16 [[TMP3]], 15
-; CHECK-NEXT:    [[TMP5:%.*]] = lshr i16 [[V:%.*]], [[TMP4]]
-; CHECK-NEXT:    [[TMP6:%.*]] = shl i16 [[V]], [[TMP2]]
-; CHECK-NEXT:    [[CONV2:%.*]] = or i16 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[CONV2:%.*]] = call i16 @llvm.fshl.i16(i16 [[V:%.*]], i16 [[V]], i16 [[TMP1]])
 ; CHECK-NEXT:    ret i16 [[CONV2]]
 ;
   %and = and i32 %shift, 15
@@ -376,12 +342,7 @@
 define <2 x i16> @rotate_left_commute_16bit_vec(<2 x i16> %v, <2 x i32> %shift) {
 ; CHECK-LABEL: @rotate_left_commute_16bit_vec(
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc <2 x i32> [[SHIFT:%.*]] to <2 x i16>
-; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i16> [[TMP1]], <i16 15, i16 15>
-; CHECK-NEXT:    [[TMP3:%.*]] = sub <2 x i16> zeroinitializer, [[TMP1]]
-; CHECK-NEXT:    [[TMP4:%.*]] = and <2 x i16> [[TMP3]], <i16 15, i16 15>
-; CHECK-NEXT:    [[TMP5:%.*]] = shl <2 x i16> [[V:%.*]], [[TMP2]]
-; CHECK-NEXT:    [[TMP6:%.*]] = lshr <2 x i16> [[V]], [[TMP4]]
-; CHECK-NEXT:    [[CONV2:%.*]] = or <2 x i16> [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[CONV2:%.*]] = call <2 x i16> @llvm.fshl.v2i16(<2 x i16> [[V:%.*]], <2 x i16> [[V]], <2 x i16> [[TMP1]])
 ; CHECK-NEXT:    ret <2 x i16> [[CONV2]]
 ;
   %and = and <2 x i32> %shift, <i32 15, i32 15>
@@ -399,11 +360,7 @@
 define i8 @rotate_right_8bit(i8 %v, i3 %shift) {
 ; CHECK-LABEL: @rotate_right_8bit(
 ; CHECK-NEXT:    [[TMP1:%.*]] = zext i3 [[SHIFT:%.*]] to i8
-; CHECK-NEXT:    [[TMP2:%.*]] = sub i3 0, [[SHIFT]]
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i3 [[TMP2]] to i8
-; CHECK-NEXT:    [[TMP4:%.*]] = shl i8 [[V:%.*]], [[TMP3]]
-; CHECK-NEXT:    [[TMP5:%.*]] = lshr i8 [[V]], [[TMP1]]
-; CHECK-NEXT:    [[CONV2:%.*]] = or i8 [[TMP4]], [[TMP5]]
+; CHECK-NEXT:    [[CONV2:%.*]] = call i8 @llvm.fshr.i8(i8 [[V:%.*]], i8 [[V]], i8 [[TMP1]])
 ; CHECK-NEXT:    ret i8 [[CONV2]]
 ;
   %and = zext i3 %shift to i32
@@ -423,12 +380,8 @@
 ; CHECK-LABEL: @rotate_right_commute_8bit(
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 [[SHIFT:%.*]] to i8
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i8 [[TMP1]], 3
-; CHECK-NEXT:    [[TMP3:%.*]] = sub nsw i8 0, [[TMP2]]
-; CHECK-NEXT:    [[TMP4:%.*]] = and i8 [[TMP3]], 7
-; CHECK-NEXT:    [[TMP5:%.*]] = trunc i32 [[V:%.*]] to i8
-; CHECK-NEXT:    [[TMP6:%.*]] = lshr i8 [[TMP5]], [[TMP2]]
-; CHECK-NEXT:    [[TMP7:%.*]] = shl i8 [[TMP5]], [[TMP4]]
-; CHECK-NEXT:    [[CONV2:%.*]] = or i8 [[TMP6]], [[TMP7]]
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[V:%.*]] to i8
+; CHECK-NEXT:    [[CONV2:%.*]] = call i8 @llvm.fshr.i8(i8 [[TMP3]], i8 [[TMP3]], i8 [[TMP2]])
 ; CHECK-NEXT:    ret i8 [[CONV2]]
 ;
   %and = and i32 %shift, 3
@@ -447,12 +400,7 @@
 define i8 @rotate8_not_safe(i8 %v, i32 %shamt) {
 ; CHECK-LABEL: @rotate8_not_safe(
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 [[SHAMT:%.*]] to i8
-; CHECK-NEXT:    [[TMP2:%.*]] = sub i8 0, [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = and i8 [[TMP1]], 7
-; CHECK-NEXT:    [[TMP4:%.*]] = and i8 [[TMP2]], 7
-; CHECK-NEXT:    [[TMP5:%.*]] = lshr i8 [[V:%.*]], [[TMP4]]
-; CHECK-NEXT:    [[TMP6:%.*]] = shl i8 [[V]], [[TMP3]]
-; CHECK-NEXT:    [[RET:%.*]] = or i8 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[RET:%.*]] = call i8 @llvm.fshl.i8(i8 [[V:%.*]], i8 [[V]], i8 [[TMP1]])
 ; CHECK-NEXT:    ret i8 [[RET]]
 ;
   %conv = zext i8 %v to i32
@@ -490,12 +438,7 @@
 
 define i16 @rotateleft_16_neg_mask(i16 %v, i16 %shamt) {
 ; CHECK-LABEL: @rotateleft_16_neg_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = sub i16 0, [[SHAMT:%.*]]
-; CHECK-NEXT:    [[TMP2:%.*]] = and i16 [[SHAMT]], 15
-; CHECK-NEXT:    [[TMP3:%.*]] = and i16 [[TMP1]], 15
-; CHECK-NEXT:    [[TMP4:%.*]] = lshr i16 [[V:%.*]], [[TMP3]]
-; CHECK-NEXT:    [[TMP5:%.*]] = shl i16 [[V]], [[TMP2]]
-; CHECK-NEXT:    [[RET:%.*]] = or i16 [[TMP4]], [[TMP5]]
+; CHECK-NEXT:    [[RET:%.*]] = call i16 @llvm.fshl.i16(i16 [[V:%.*]], i16 [[V]], i16 [[SHAMT:%.*]])
 ; CHECK-NEXT:    ret i16 [[RET]]
 ;
   %neg = sub i16 0, %shamt
@@ -513,12 +456,7 @@
 
 define i16 @rotateleft_16_neg_mask_commute(i16 %v, i16 %shamt) {
 ; CHECK-LABEL: @rotateleft_16_neg_mask_commute(
-; CHECK-NEXT:    [[TMP1:%.*]] = sub i16 0, [[SHAMT:%.*]]
-; CHECK-NEXT:    [[TMP2:%.*]] = and i16 [[SHAMT]], 15
-; CHECK-NEXT:    [[TMP3:%.*]] = and i16 [[TMP1]], 15
-; CHECK-NEXT:    [[TMP4:%.*]] = shl i16 [[V:%.*]], [[TMP2]]
-; CHECK-NEXT:    [[TMP5:%.*]] = lshr i16 [[V]], [[TMP3]]
-; CHECK-NEXT:    [[RET:%.*]] = or i16 [[TMP4]], [[TMP5]]
+; CHECK-NEXT:    [[RET:%.*]] = call i16 @llvm.fshl.i16(i16 [[V:%.*]], i16 [[V]], i16 [[SHAMT:%.*]])
 ; CHECK-NEXT:    ret i16 [[RET]]
 ;
   %neg = sub i16 0, %shamt
@@ -536,12 +474,7 @@
 
 define i8 @rotateright_8_neg_mask(i8 %v, i8 %shamt) {
 ; CHECK-LABEL: @rotateright_8_neg_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[SHAMT:%.*]]
-; CHECK-NEXT:    [[TMP2:%.*]] = and i8 [[SHAMT]], 7
-; CHECK-NEXT:    [[TMP3:%.*]] = and i8 [[TMP1]], 7
-; CHECK-NEXT:    [[TMP4:%.*]] = lshr i8 [[V:%.*]], [[TMP2]]
-; CHECK-NEXT:    [[TMP5:%.*]] = shl i8 [[V]], [[TMP3]]
-; CHECK-NEXT:    [[RET:%.*]] = or i8 [[TMP4]], [[TMP5]]
+; CHECK-NEXT:    [[RET:%.*]] = call i8 @llvm.fshr.i8(i8 [[V:%.*]], i8 [[V]], i8 [[SHAMT:%.*]])
 ; CHECK-NEXT:    ret i8 [[RET]]
 ;
   %neg = sub i8 0, %shamt
@@ -559,12 +492,7 @@
 
 define i8 @rotateright_8_neg_mask_commute(i8 %v, i8 %shamt) {
 ; CHECK-LABEL: @rotateright_8_neg_mask_commute(
-; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[SHAMT:%.*]]
-; CHECK-NEXT:    [[TMP2:%.*]] = and i8 [[SHAMT]], 7
-; CHECK-NEXT:    [[TMP3:%.*]] = and i8 [[TMP1]], 7
-; CHECK-NEXT:    [[TMP4:%.*]] = shl i8 [[V:%.*]], [[TMP3]]
-; CHECK-NEXT:    [[TMP5:%.*]] = lshr i8 [[V]], [[TMP2]]
-; CHECK-NEXT:    [[RET:%.*]] = or i8 [[TMP4]], [[TMP5]]
+; CHECK-NEXT:    [[RET:%.*]] = call i8 @llvm.fshr.i8(i8 [[V:%.*]], i8 [[V]], i8 [[SHAMT:%.*]])
 ; CHECK-NEXT:    ret i8 [[RET]]
 ;
   %neg = sub i8 0, %shamt
@@ -586,12 +514,7 @@
 define i16 @rotateright_16_neg_mask_wide_amount(i16 %v, i32 %shamt) {
 ; CHECK-LABEL: @rotateright_16_neg_mask_wide_amount(
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 [[SHAMT:%.*]] to i16
-; CHECK-NEXT:    [[TMP2:%.*]] = sub i16 0, [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = and i16 [[TMP1]], 15
-; CHECK-NEXT:    [[TMP4:%.*]] = and i16 [[TMP2]], 15
-; CHECK-NEXT:    [[TMP5:%.*]] = lshr i16 [[V:%.*]], [[TMP3]]
-; CHECK-NEXT:    [[TMP6:%.*]] = shl i16 [[V]], [[TMP4]]
-; CHECK-NEXT:    [[RET:%.*]] = or i16 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[RET:%.*]] = call i16 @llvm.fshr.i16(i16 [[V:%.*]], i16 [[V]], i16 [[TMP1]])
 ; CHECK-NEXT:    ret i16 [[RET]]
 ;
   %neg = sub i32 0, %shamt
@@ -608,12 +531,7 @@
 define i16 @rotateright_16_neg_mask_wide_amount_commute(i16 %v, i32 %shamt) {
 ; CHECK-LABEL: @rotateright_16_neg_mask_wide_amount_commute(
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 [[SHAMT:%.*]] to i16
-; CHECK-NEXT:    [[TMP2:%.*]] = sub i16 0, [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = and i16 [[TMP1]], 15
-; CHECK-NEXT:    [[TMP4:%.*]] = and i16 [[TMP2]], 15
-; CHECK-NEXT:    [[TMP5:%.*]] = shl i16 [[V:%.*]], [[TMP4]]
-; CHECK-NEXT:    [[TMP6:%.*]] = lshr i16 [[V]], [[TMP3]]
-; CHECK-NEXT:    [[RET:%.*]] = or i16 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[RET:%.*]] = call i16 @llvm.fshr.i16(i16 [[V:%.*]], i16 [[V]], i16 [[TMP1]])
 ; CHECK-NEXT:    ret i16 [[RET]]
 ;
   %neg = sub i32 0, %shamt
@@ -630,12 +548,7 @@
 define i8 @rotateleft_8_neg_mask_wide_amount(i8 %v, i32 %shamt) {
 ; CHECK-LABEL: @rotateleft_8_neg_mask_wide_amount(
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 [[SHAMT:%.*]] to i8
-; CHECK-NEXT:    [[TMP2:%.*]] = sub i8 0, [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = and i8 [[TMP1]], 7
-; CHECK-NEXT:    [[TMP4:%.*]] = and i8 [[TMP2]], 7
-; CHECK-NEXT:    [[TMP5:%.*]] = lshr i8 [[V:%.*]], [[TMP4]]
-; CHECK-NEXT:    [[TMP6:%.*]] = shl i8 [[V]], [[TMP3]]
-; CHECK-NEXT:    [[RET:%.*]] = or i8 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[RET:%.*]] = call i8 @llvm.fshl.i8(i8 [[V:%.*]], i8 [[V]], i8 [[TMP1]])
 ; CHECK-NEXT:    ret i8 [[RET]]
 ;
   %neg = sub i32 0, %shamt
@@ -652,12 +565,7 @@
 define i8 @rotateleft_8_neg_mask_wide_amount_commute(i8 %v, i32 %shamt) {
 ; CHECK-LABEL: @rotateleft_8_neg_mask_wide_amount_commute(
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 [[SHAMT:%.*]] to i8
-; CHECK-NEXT:    [[TMP2:%.*]] = sub i8 0, [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = and i8 [[TMP1]], 7
-; CHECK-NEXT:    [[TMP4:%.*]] = and i8 [[TMP2]], 7
-; CHECK-NEXT:    [[TMP5:%.*]] = shl i8 [[V:%.*]], [[TMP3]]
-; CHECK-NEXT:    [[TMP6:%.*]] = lshr i8 [[V]], [[TMP4]]
-; CHECK-NEXT:    [[RET:%.*]] = or i8 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[RET:%.*]] = call i8 @llvm.fshl.i8(i8 [[V:%.*]], i8 [[V]], i8 [[TMP1]])
 ; CHECK-NEXT:    ret i8 [[RET]]
 ;
   %neg = sub i32 0, %shamt
@@ -700,12 +608,7 @@
 
 define i32 @rotr_select(i32 %x, i32 %shamt) {
 ; CHECK-LABEL: @rotr_select(
-; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 0, [[SHAMT:%.*]]
-; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[SHAMT]], 31
-; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[TMP1]], 31
-; CHECK-NEXT:    [[TMP4:%.*]] = lshr i32 [[X:%.*]], [[TMP2]]
-; CHECK-NEXT:    [[TMP5:%.*]] = shl i32 [[X]], [[TMP3]]
-; CHECK-NEXT:    [[R:%.*]] = or i32 [[TMP4]], [[TMP5]]
+; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.fshr.i32(i32 [[X:%.*]], i32 [[X]], i32 [[SHAMT:%.*]])
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %cmp = icmp eq i32 %shamt, 0
@@ -721,12 +624,7 @@
 
 define i8 @rotr_select_commute(i8 %x, i8 %shamt) {
 ; CHECK-LABEL: @rotr_select_commute(
-; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[SHAMT:%.*]]
-; CHECK-NEXT:    [[TMP2:%.*]] = and i8 [[SHAMT]], 7
-; CHECK-NEXT:    [[TMP3:%.*]] = and i8 [[TMP1]], 7
-; CHECK-NEXT:    [[TMP4:%.*]] = shl i8 [[X:%.*]], [[TMP3]]
-; CHECK-NEXT:    [[TMP5:%.*]] = lshr i8 [[X]], [[TMP2]]
-; CHECK-NEXT:    [[R:%.*]] = or i8 [[TMP4]], [[TMP5]]
+; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.fshr.i8(i8 [[X:%.*]], i8 [[X]], i8 [[SHAMT:%.*]])
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %cmp = icmp eq i8 %shamt, 0
@@ -742,12 +640,7 @@
 
 define i16 @rotl_select(i16 %x, i16 %shamt) {
 ; CHECK-LABEL: @rotl_select(
-; CHECK-NEXT:    [[TMP1:%.*]] = sub i16 0, [[SHAMT:%.*]]
-; CHECK-NEXT:    [[TMP2:%.*]] = and i16 [[SHAMT]], 15
-; CHECK-NEXT:    [[TMP3:%.*]] = and i16 [[TMP1]], 15
-; CHECK-NEXT:    [[TMP4:%.*]] = lshr i16 [[X:%.*]], [[TMP3]]
-; CHECK-NEXT:    [[TMP5:%.*]] = shl i16 [[X]], [[TMP2]]
-; CHECK-NEXT:    [[R:%.*]] = or i16 [[TMP4]], [[TMP5]]
+; CHECK-NEXT:    [[R:%.*]] = call i16 @llvm.fshl.i16(i16 [[X:%.*]], i16 [[X]], i16 [[SHAMT:%.*]])
 ; CHECK-NEXT:    ret i16 [[R]]
 ;
   %cmp = icmp eq i16 %shamt, 0
@@ -763,12 +656,7 @@
 
 define <2 x i64> @rotl_select_commute(<2 x i64> %x, <2 x i64> %shamt) {
 ; CHECK-LABEL: @rotl_select_commute(
-; CHECK-NEXT:    [[TMP1:%.*]] = sub <2 x i64> zeroinitializer, [[SHAMT:%.*]]
-; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i64> [[SHAMT]], <i64 63, i64 63>
-; CHECK-NEXT:    [[TMP3:%.*]] = and <2 x i64> [[TMP1]], <i64 63, i64 63>
-; CHECK-NEXT:    [[TMP4:%.*]] = shl <2 x i64> [[X:%.*]], [[TMP2]]
-; CHECK-NEXT:    [[TMP5:%.*]] = lshr <2 x i64> [[X]], [[TMP3]]
-; CHECK-NEXT:    [[R:%.*]] = or <2 x i64> [[TMP4]], [[TMP5]]
+; CHECK-NEXT:    [[R:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> [[X:%.*]], <2 x i64> [[X]], <2 x i64> [[SHAMT:%.*]])
 ; CHECK-NEXT:    ret <2 x i64> [[R]]
 ;
   %cmp = icmp eq <2 x i64> %shamt, zeroinitializer
diff --git a/test/Transforms/InstCombine/scalarization.ll b/test/Transforms/InstCombine/scalarization.ll
index b306327..5865095 100644
--- a/test/Transforms/InstCombine/scalarization.ll
+++ b/test/Transforms/InstCombine/scalarization.ll
@@ -1,6 +1,63 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -instcombine -S < %s | FileCheck %s
 
+define i32 @extract_load(<4 x i32>* %p) {
+; CHECK-LABEL: @extract_load(
+; CHECK-NEXT:    [[X:%.*]] = load <4 x i32>, <4 x i32>* [[P:%.*]], align 4
+; CHECK-NEXT:    [[EXT:%.*]] = extractelement <4 x i32> [[X]], i32 1
+; CHECK-NEXT:    ret i32 [[EXT]]
+;
+  %x = load <4 x i32>, <4 x i32>* %p, align 4
+  %ext = extractelement <4 x i32> %x, i32 1
+  ret i32 %ext
+}
+
+define double @extract_load_fp(<4 x double>* %p) {
+; CHECK-LABEL: @extract_load_fp(
+; CHECK-NEXT:    [[X:%.*]] = load <4 x double>, <4 x double>* [[P:%.*]], align 32
+; CHECK-NEXT:    [[EXT:%.*]] = extractelement <4 x double> [[X]], i32 3
+; CHECK-NEXT:    ret double [[EXT]]
+;
+  %x = load <4 x double>, <4 x double>* %p, align 32
+  %ext = extractelement <4 x double> %x, i32 3
+  ret double %ext
+}
+
+define double @extract_load_volatile(<4 x double>* %p) {
+; CHECK-LABEL: @extract_load_volatile(
+; CHECK-NEXT:    [[X:%.*]] = load volatile <4 x double>, <4 x double>* [[P:%.*]], align 32
+; CHECK-NEXT:    [[EXT:%.*]] = extractelement <4 x double> [[X]], i32 2
+; CHECK-NEXT:    ret double [[EXT]]
+;
+  %x = load volatile <4 x double>, <4 x double>* %p
+  %ext = extractelement <4 x double> %x, i32 2
+  ret double %ext
+}
+
+define double @extract_load_extra_use(<4 x double>* %p, <4 x double>* %p2) {
+; CHECK-LABEL: @extract_load_extra_use(
+; CHECK-NEXT:    [[X:%.*]] = load <4 x double>, <4 x double>* [[P:%.*]], align 8
+; CHECK-NEXT:    [[EXT:%.*]] = extractelement <4 x double> [[X]], i32 0
+; CHECK-NEXT:    store <4 x double> [[X]], <4 x double>* [[P2:%.*]], align 32
+; CHECK-NEXT:    ret double [[EXT]]
+;
+  %x = load <4 x double>, <4 x double>* %p, align 8
+  %ext = extractelement <4 x double> %x, i32 0
+  store <4 x double> %x, <4 x double>* %p2
+  ret double %ext
+}
+
+define double @extract_load_variable_index(<4 x double>* %p, i32 %y) {
+; CHECK-LABEL: @extract_load_variable_index(
+; CHECK-NEXT:    [[X:%.*]] = load <4 x double>, <4 x double>* [[P:%.*]], align 32
+; CHECK-NEXT:    [[EXT:%.*]] = extractelement <4 x double> [[X]], i32 [[Y:%.*]]
+; CHECK-NEXT:    ret double [[EXT]]
+;
+  %x = load <4 x double>, <4 x double>* %p
+  %ext = extractelement <4 x double> %x, i32 %y
+  ret double %ext
+}
+
 define void @scalarize_phi(i32 * %n, float * %inout) {
 ; CHECK-LABEL: @scalarize_phi(
 ; CHECK-NEXT:  entry:
@@ -45,17 +102,72 @@
   ret void
 }
 
-define float @extract_element_constant_index(<4 x float> %x) {
-; CHECK-LABEL: @extract_element_constant_index(
+define float @extract_element_binop_splat_constant_index(<4 x float> %x) {
+; CHECK-LABEL: @extract_element_binop_splat_constant_index(
 ; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x float> [[X:%.*]], i32 2
 ; CHECK-NEXT:    [[R:%.*]] = fadd float [[TMP1]], 0x4002A3D700000000
 ; CHECK-NEXT:    ret float [[R]]
 ;
-  %add = fadd <4 x float> %x, <float 0x4002A3D700000000, float 0x4002A3D700000000, float 0x4002A3D700000000, float 0x4002A3D700000000>
-  %r = extractelement <4 x float> %add, i32 2
+  %b = fadd <4 x float> %x, <float 0x4002A3D700000000, float 0x4002A3D700000000, float 0x4002A3D700000000, float 0x4002A3D700000000>
+  %r = extractelement <4 x float> %b, i32 2
   ret float %r
 }
 
+define double @extract_element_binop_splat_with_undef_constant_index(<2 x double> %x) {
+; CHECK-LABEL: @extract_element_binop_splat_with_undef_constant_index(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x double> [[X:%.*]], i32 0
+; CHECK-NEXT:    [[R:%.*]] = fdiv double 4.200000e+01, [[TMP1]]
+; CHECK-NEXT:    ret double [[R]]
+;
+  %b = fdiv <2 x double> <double 42.0, double undef>, %x
+  %r = extractelement <2 x double> %b, i32 0
+  ret double %r
+}
+
+define float @extract_element_binop_nonsplat_constant_index(<2 x float> %x) {
+; CHECK-LABEL: @extract_element_binop_nonsplat_constant_index(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x float> [[X:%.*]], i32 1
+; CHECK-NEXT:    [[R:%.*]] = fmul float [[TMP1]], 4.300000e+01
+; CHECK-NEXT:    ret float [[R]]
+;
+  %b = fmul <2 x float> %x, <float 42.0, float 43.0>
+  %r = extractelement <2 x float> %b, i32 1
+  ret float %r
+}
+
+define i8 @extract_element_binop_splat_variable_index(<4 x i8> %x, i32 %y) {
+; CHECK-LABEL: @extract_element_binop_splat_variable_index(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i8> [[X:%.*]], i32 [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = sdiv i8 [[TMP1]], 42
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %b = sdiv <4 x i8> %x, <i8 42, i8 42, i8 42, i8 42>
+  %r = extractelement <4 x i8> %b, i32 %y
+  ret i8 %r
+}
+
+define i8 @extract_element_binop_splat_with_undef_variable_index(<4 x i8> %x, i32 %y) {
+; CHECK-LABEL: @extract_element_binop_splat_with_undef_variable_index(
+; CHECK-NEXT:    [[B:%.*]] = mul <4 x i8> [[X:%.*]], <i8 42, i8 42, i8 undef, i8 42>
+; CHECK-NEXT:    [[R:%.*]] = extractelement <4 x i8> [[B]], i32 [[Y:%.*]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %b = mul <4 x i8> %x, <i8 42, i8 42, i8 undef, i8 42>
+  %r = extractelement <4 x i8> %b, i32 %y
+  ret i8 %r
+}
+
+define i8 @extract_element_binop_nonsplat_variable_index(<4 x i8> %x, i32 %y) {
+; CHECK-LABEL: @extract_element_binop_nonsplat_variable_index(
+; CHECK-NEXT:    [[B:%.*]] = lshr <4 x i8> [[X:%.*]], <i8 4, i8 3, i8 undef, i8 2>
+; CHECK-NEXT:    [[R:%.*]] = extractelement <4 x i8> [[B]], i32 [[Y:%.*]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %b = lshr <4 x i8> %x, <i8 4, i8 3, i8 undef, i8 2>
+  %r = extractelement <4 x i8> %b, i32 %y
+  ret i8 %r
+}
+
 define float @extract_element_load(<4 x float> %x, <4 x float>* %ptr) {
 ; CHECK-LABEL: @extract_element_load(
 ; CHECK-NEXT:    [[LOAD:%.*]] = load <4 x float>, <4 x float>* [[PTR:%.*]], align 16
diff --git a/test/Transforms/InstCombine/select-bitext.ll b/test/Transforms/InstCombine/select-bitext.ll
index b66a9ee..d44be27 100644
--- a/test/Transforms/InstCombine/select-bitext.ll
+++ b/test/Transforms/InstCombine/select-bitext.ll
@@ -5,7 +5,7 @@
 
 define i16 @sel_sext_constants(i1 %cmp) {
 ; CHECK-LABEL: @sel_sext_constants(
-; CHECK-NEXT:    [[EXT:%.*]] = select i1 %cmp, i16 -1, i16 42
+; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], i16 -1, i16 42
 ; CHECK-NEXT:    ret i16 [[EXT]]
 ;
   %sel = select i1 %cmp, i8 255, i8 42
@@ -15,7 +15,7 @@
 
 define i16 @sel_zext_constants(i1 %cmp) {
 ; CHECK-LABEL: @sel_zext_constants(
-; CHECK-NEXT:    [[EXT:%.*]] = select i1 %cmp, i16 255, i16 42
+; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], i16 255, i16 42
 ; CHECK-NEXT:    ret i16 [[EXT]]
 ;
   %sel = select i1 %cmp, i8 255, i8 42
@@ -25,7 +25,7 @@
 
 define double @sel_fpext_constants(i1 %cmp) {
 ; CHECK-LABEL: @sel_fpext_constants(
-; CHECK-NEXT:    [[EXT:%.*]] = select i1 %cmp, double -2.550000e+02, double 4.200000e+01
+; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], double -2.550000e+02, double 4.200000e+01
 ; CHECK-NEXT:    ret double [[EXT]]
 ;
   %sel = select i1 %cmp, float -255.0, float 42.0
@@ -37,8 +37,8 @@
 
 define i64 @sel_sext(i32 %a, i1 %cmp) {
 ; CHECK-LABEL: @sel_sext(
-; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 %a to i64
-; CHECK-NEXT:    [[EXT:%.*]] = select i1 %cmp, i64 [[TMP1]], i64 42
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[A:%.*]] to i64
+; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], i64 [[TMP1]], i64 42
 ; CHECK-NEXT:    ret i64 [[EXT]]
 ;
   %sel = select i1 %cmp, i32 %a, i32 42
@@ -48,8 +48,8 @@
 
 define <4 x i64> @sel_sext_vec(<4 x i32> %a, <4 x i1> %cmp) {
 ; CHECK-LABEL: @sel_sext_vec(
-; CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i32> %a to <4 x i64>
-; CHECK-NEXT:    [[EXT:%.*]] = select <4 x i1> %cmp, <4 x i64> [[TMP1]], <4 x i64> <i64 42, i64 42, i64 42, i64 42>
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i32> [[A:%.*]] to <4 x i64>
+; CHECK-NEXT:    [[EXT:%.*]] = select <4 x i1> [[CMP:%.*]], <4 x i64> [[TMP1]], <4 x i64> <i64 42, i64 42, i64 42, i64 42>
 ; CHECK-NEXT:    ret <4 x i64> [[EXT]]
 ;
   %sel = select <4 x i1> %cmp, <4 x i32> %a, <4 x i32> <i32 42, i32 42, i32 42, i32 42>
@@ -59,8 +59,8 @@
 
 define i64 @sel_zext(i32 %a, i1 %cmp) {
 ; CHECK-LABEL: @sel_zext(
-; CHECK-NEXT:    [[TMP1:%.*]] = zext i32 %a to i64
-; CHECK-NEXT:    [[EXT:%.*]] = select i1 %cmp, i64 [[TMP1]], i64 42
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i32 [[A:%.*]] to i64
+; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], i64 [[TMP1]], i64 42
 ; CHECK-NEXT:    ret i64 [[EXT]]
 ;
   %sel = select i1 %cmp, i32 %a, i32 42
@@ -70,8 +70,8 @@
 
 define <4 x i64> @sel_zext_vec(<4 x i32> %a, <4 x i1> %cmp) {
 ; CHECK-LABEL: @sel_zext_vec(
-; CHECK-NEXT:    [[TMP1:%.*]] = zext <4 x i32> %a to <4 x i64>
-; CHECK-NEXT:    [[EXT:%.*]] = select <4 x i1> %cmp, <4 x i64> [[TMP1]], <4 x i64> <i64 42, i64 42, i64 42, i64 42>
+; CHECK-NEXT:    [[TMP1:%.*]] = zext <4 x i32> [[A:%.*]] to <4 x i64>
+; CHECK-NEXT:    [[EXT:%.*]] = select <4 x i1> [[CMP:%.*]], <4 x i64> [[TMP1]], <4 x i64> <i64 42, i64 42, i64 42, i64 42>
 ; CHECK-NEXT:    ret <4 x i64> [[EXT]]
 ;
   %sel = select <4 x i1> %cmp, <4 x i32> %a, <4 x i32> <i32 42, i32 42, i32 42, i32 42>
@@ -85,9 +85,9 @@
 
 define i64 @trunc_sel_larger_sext(i32 %a, i1 %cmp) {
 ; CHECK-LABEL: @trunc_sel_larger_sext(
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i32 %a to i16
+; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i32 [[A:%.*]] to i16
 ; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[TRUNC]] to i64
-; CHECK-NEXT:    [[EXT:%.*]] = select i1 %cmp, i64 [[TMP1]], i64 42
+; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], i64 [[TMP1]], i64 42
 ; CHECK-NEXT:    ret i64 [[EXT]]
 ;
   %trunc = trunc i32 %a to i16
@@ -98,10 +98,9 @@
 
 define <2 x i64> @trunc_sel_larger_sext_vec(<2 x i32> %a, <2 x i1> %cmp) {
 ; CHECK-LABEL: @trunc_sel_larger_sext_vec(
-; CHECK-NEXT:    [[TRUNC:%.*]] = zext <2 x i32> %a to <2 x i64>
-; CHECK-NEXT:    [[SEXT:%.*]] = shl <2 x i64> [[TRUNC]], <i64 48, i64 48>
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <2 x i64> [[SEXT]], <i64 48, i64 48>
-; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> %cmp, <2 x i64> [[TMP1]], <2 x i64> <i64 42, i64 43>
+; CHECK-NEXT:    [[TRUNC:%.*]] = trunc <2 x i32> [[A:%.*]] to <2 x i16>
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <2 x i16> [[TRUNC]] to <2 x i64>
+; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> [[CMP:%.*]], <2 x i64> [[TMP1]], <2 x i64> <i64 42, i64 43>
 ; CHECK-NEXT:    ret <2 x i64> [[EXT]]
 ;
   %trunc = trunc <2 x i32> %a to <2 x i16>
@@ -112,9 +111,9 @@
 
 define i32 @trunc_sel_smaller_sext(i64 %a, i1 %cmp) {
 ; CHECK-LABEL: @trunc_sel_smaller_sext(
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i64 %a to i16
+; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i64 [[A:%.*]] to i16
 ; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[TRUNC]] to i32
-; CHECK-NEXT:    [[EXT:%.*]] = select i1 %cmp, i32 [[TMP1]], i32 42
+; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], i32 [[TMP1]], i32 42
 ; CHECK-NEXT:    ret i32 [[EXT]]
 ;
   %trunc = trunc i64 %a to i16
@@ -125,10 +124,9 @@
 
 define <2 x i32> @trunc_sel_smaller_sext_vec(<2 x i64> %a, <2 x i1> %cmp) {
 ; CHECK-LABEL: @trunc_sel_smaller_sext_vec(
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc <2 x i64> %a to <2 x i32>
-; CHECK-NEXT:    [[SEXT:%.*]] = shl <2 x i32> [[TRUNC]], <i32 16, i32 16>
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <2 x i32> [[SEXT]], <i32 16, i32 16>
-; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> %cmp, <2 x i32> [[TMP1]], <2 x i32> <i32 42, i32 43>
+; CHECK-NEXT:    [[TRUNC:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i16>
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <2 x i16> [[TRUNC]] to <2 x i32>
+; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> [[CMP:%.*]], <2 x i32> [[TMP1]], <2 x i32> <i32 42, i32 43>
 ; CHECK-NEXT:    ret <2 x i32> [[EXT]]
 ;
   %trunc = trunc <2 x i64> %a to <2 x i16>
@@ -139,9 +137,9 @@
 
 define i32 @trunc_sel_equal_sext(i32 %a, i1 %cmp) {
 ; CHECK-LABEL: @trunc_sel_equal_sext(
-; CHECK-NEXT:    [[SEXT:%.*]] = shl i32 %a, 16
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i32 [[SEXT]], 16
-; CHECK-NEXT:    [[EXT:%.*]] = select i1 %cmp, i32 [[TMP1]], i32 42
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[A:%.*]], 16
+; CHECK-NEXT:    [[TMP2:%.*]] = ashr exact i32 [[TMP1]], 16
+; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], i32 [[TMP2]], i32 42
 ; CHECK-NEXT:    ret i32 [[EXT]]
 ;
   %trunc = trunc i32 %a to i16
@@ -152,9 +150,9 @@
 
 define <2 x i32> @trunc_sel_equal_sext_vec(<2 x i32> %a, <2 x i1> %cmp) {
 ; CHECK-LABEL: @trunc_sel_equal_sext_vec(
-; CHECK-NEXT:    [[SEXT:%.*]] = shl <2 x i32> %a, <i32 16, i32 16>
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <2 x i32> [[SEXT]], <i32 16, i32 16>
-; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> %cmp, <2 x i32> [[TMP1]], <2 x i32> <i32 42, i32 43>
+; CHECK-NEXT:    [[TMP1:%.*]] = shl <2 x i32> [[A:%.*]], <i32 16, i32 16>
+; CHECK-NEXT:    [[TMP2:%.*]] = ashr exact <2 x i32> [[TMP1]], <i32 16, i32 16>
+; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> [[CMP:%.*]], <2 x i32> [[TMP2]], <2 x i32> <i32 42, i32 43>
 ; CHECK-NEXT:    ret <2 x i32> [[EXT]]
 ;
   %trunc = trunc <2 x i32> %a to <2 x i16>
@@ -165,9 +163,9 @@
 
 define i64 @trunc_sel_larger_zext(i32 %a, i1 %cmp) {
 ; CHECK-LABEL: @trunc_sel_larger_zext(
-; CHECK-NEXT:    [[TRUNC_MASK:%.*]] = and i32 %a, 65535
+; CHECK-NEXT:    [[TRUNC_MASK:%.*]] = and i32 [[A:%.*]], 65535
 ; CHECK-NEXT:    [[TMP1:%.*]] = zext i32 [[TRUNC_MASK]] to i64
-; CHECK-NEXT:    [[EXT:%.*]] = select i1 %cmp, i64 [[TMP1]], i64 42
+; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], i64 [[TMP1]], i64 42
 ; CHECK-NEXT:    ret i64 [[EXT]]
 ;
   %trunc = trunc i32 %a to i16
@@ -178,9 +176,9 @@
 
 define <2 x i64> @trunc_sel_larger_zext_vec(<2 x i32> %a, <2 x i1> %cmp) {
 ; CHECK-LABEL: @trunc_sel_larger_zext_vec(
-; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> %a, <i32 65535, i32 65535>
-; CHECK-NEXT:    [[TMP2:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
-; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> %cmp, <2 x i64> [[TMP2]], <2 x i64> <i64 42, i64 43>
+; CHECK-NEXT:    [[TRUNC_MASK:%.*]] = and <2 x i32> [[A:%.*]], <i32 65535, i32 65535>
+; CHECK-NEXT:    [[TMP1:%.*]] = zext <2 x i32> [[TRUNC_MASK]] to <2 x i64>
+; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> [[CMP:%.*]], <2 x i64> [[TMP1]], <2 x i64> <i64 42, i64 43>
 ; CHECK-NEXT:    ret <2 x i64> [[EXT]]
 ;
   %trunc = trunc <2 x i32> %a to <2 x i16>
@@ -191,9 +189,9 @@
 
 define i32 @trunc_sel_smaller_zext(i64 %a, i1 %cmp) {
 ; CHECK-LABEL: @trunc_sel_smaller_zext(
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 %a to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i32
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 65535
-; CHECK-NEXT:    [[EXT:%.*]] = select i1 %cmp, i32 [[TMP2]], i32 42
+; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], i32 [[TMP2]], i32 42
 ; CHECK-NEXT:    ret i32 [[EXT]]
 ;
   %trunc = trunc i64 %a to i16
@@ -204,9 +202,9 @@
 
 define <2 x i32> @trunc_sel_smaller_zext_vec(<2 x i64> %a, <2 x i1> %cmp) {
 ; CHECK-LABEL: @trunc_sel_smaller_zext_vec(
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc <2 x i64> %a to <2 x i32>
-; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[TRUNC]], <i32 65535, i32 65535>
-; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> %cmp, <2 x i32> [[TMP1]], <2 x i32> <i32 42, i32 43>
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[TMP1]], <i32 65535, i32 65535>
+; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> [[CMP:%.*]], <2 x i32> [[TMP2]], <2 x i32> <i32 42, i32 43>
 ; CHECK-NEXT:    ret <2 x i32> [[EXT]]
 ;
   %trunc = trunc <2 x i64> %a to <2 x i16>
@@ -217,8 +215,8 @@
 
 define i32 @trunc_sel_equal_zext(i32 %a, i1 %cmp) {
 ; CHECK-LABEL: @trunc_sel_equal_zext(
-; CHECK-NEXT:    [[TMP1:%.*]] = and i32 %a, 65535
-; CHECK-NEXT:    [[EXT:%.*]] = select i1 %cmp, i32 [[TMP1]], i32 42
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 65535
+; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], i32 [[TMP1]], i32 42
 ; CHECK-NEXT:    ret i32 [[EXT]]
 ;
   %trunc = trunc i32 %a to i16
@@ -229,8 +227,8 @@
 
 define <2 x i32> @trunc_sel_equal_zext_vec(<2 x i32> %a, <2 x i1> %cmp) {
 ; CHECK-LABEL: @trunc_sel_equal_zext_vec(
-; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> %a, <i32 65535, i32 65535>
-; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> %cmp, <2 x i32> [[TMP1]], <2 x i32> <i32 42, i32 43>
+; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], <i32 65535, i32 65535>
+; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> [[CMP:%.*]], <2 x i32> [[TMP1]], <2 x i32> <i32 42, i32 43>
 ; CHECK-NEXT:    ret <2 x i32> [[EXT]]
 ;
   %trunc = trunc <2 x i32> %a to <2 x i16>
@@ -241,9 +239,9 @@
 
 define double @trunc_sel_larger_fpext(float %a, i1 %cmp) {
 ; CHECK-LABEL: @trunc_sel_larger_fpext(
-; CHECK-NEXT:    [[TRUNC:%.*]] = fptrunc float %a to half
+; CHECK-NEXT:    [[TRUNC:%.*]] = fptrunc float [[A:%.*]] to half
 ; CHECK-NEXT:    [[TMP1:%.*]] = fpext half [[TRUNC]] to double
-; CHECK-NEXT:    [[EXT:%.*]] = select i1 %cmp, double [[TMP1]], double 4.200000e+01
+; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], double [[TMP1]], double 4.200000e+01
 ; CHECK-NEXT:    ret double [[EXT]]
 ;
   %trunc = fptrunc float %a to half
@@ -254,9 +252,9 @@
 
 define <2 x double> @trunc_sel_larger_fpext_vec(<2 x float> %a, <2 x i1> %cmp) {
 ; CHECK-LABEL: @trunc_sel_larger_fpext_vec(
-; CHECK-NEXT:    [[TRUNC:%.*]] = fptrunc <2 x float> %a to <2 x half>
+; CHECK-NEXT:    [[TRUNC:%.*]] = fptrunc <2 x float> [[A:%.*]] to <2 x half>
 ; CHECK-NEXT:    [[TMP1:%.*]] = fpext <2 x half> [[TRUNC]] to <2 x double>
-; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> %cmp, <2 x double> [[TMP1]], <2 x double> <double 4.200000e+01, double 4.300000e+01>
+; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> [[CMP:%.*]], <2 x double> [[TMP1]], <2 x double> <double 4.200000e+01, double 4.300000e+01>
 ; CHECK-NEXT:    ret <2 x double> [[EXT]]
 ;
   %trunc = fptrunc <2 x float> %a to <2 x half>
@@ -267,9 +265,9 @@
 
 define float @trunc_sel_smaller_fpext(double %a, i1 %cmp) {
 ; CHECK-LABEL: @trunc_sel_smaller_fpext(
-; CHECK-NEXT:    [[TRUNC:%.*]] = fptrunc double %a to half
+; CHECK-NEXT:    [[TRUNC:%.*]] = fptrunc double [[A:%.*]] to half
 ; CHECK-NEXT:    [[TMP1:%.*]] = fpext half [[TRUNC]] to float
-; CHECK-NEXT:    [[EXT:%.*]] = select i1 %cmp, float [[TMP1]], float 4.200000e+01
+; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], float [[TMP1]], float 4.200000e+01
 ; CHECK-NEXT:    ret float [[EXT]]
 ;
   %trunc = fptrunc double %a to half
@@ -280,9 +278,9 @@
 
 define <2 x float> @trunc_sel_smaller_fpext_vec(<2 x double> %a, <2 x i1> %cmp) {
 ; CHECK-LABEL: @trunc_sel_smaller_fpext_vec(
-; CHECK-NEXT:    [[TRUNC:%.*]] = fptrunc <2 x double> %a to <2 x half>
+; CHECK-NEXT:    [[TRUNC:%.*]] = fptrunc <2 x double> [[A:%.*]] to <2 x half>
 ; CHECK-NEXT:    [[TMP1:%.*]] = fpext <2 x half> [[TRUNC]] to <2 x float>
-; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> %cmp, <2 x float> [[TMP1]], <2 x float> <float 4.200000e+01, float 4.300000e+01>
+; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> [[CMP:%.*]], <2 x float> [[TMP1]], <2 x float> <float 4.200000e+01, float 4.300000e+01>
 ; CHECK-NEXT:    ret <2 x float> [[EXT]]
 ;
   %trunc = fptrunc <2 x double> %a to <2 x half>
@@ -293,9 +291,9 @@
 
 define float @trunc_sel_equal_fpext(float %a, i1 %cmp) {
 ; CHECK-LABEL: @trunc_sel_equal_fpext(
-; CHECK-NEXT:    [[TRUNC:%.*]] = fptrunc float %a to half
+; CHECK-NEXT:    [[TRUNC:%.*]] = fptrunc float [[A:%.*]] to half
 ; CHECK-NEXT:    [[TMP1:%.*]] = fpext half [[TRUNC]] to float
-; CHECK-NEXT:    [[EXT:%.*]] = select i1 %cmp, float [[TMP1]], float 4.200000e+01
+; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], float [[TMP1]], float 4.200000e+01
 ; CHECK-NEXT:    ret float [[EXT]]
 ;
   %trunc = fptrunc float %a to half
@@ -306,9 +304,9 @@
 
 define <2 x float> @trunc_sel_equal_fpext_vec(<2 x float> %a, <2 x i1> %cmp) {
 ; CHECK-LABEL: @trunc_sel_equal_fpext_vec(
-; CHECK-NEXT:    [[TRUNC:%.*]] = fptrunc <2 x float> %a to <2 x half>
+; CHECK-NEXT:    [[TRUNC:%.*]] = fptrunc <2 x float> [[A:%.*]] to <2 x half>
 ; CHECK-NEXT:    [[TMP1:%.*]] = fpext <2 x half> [[TRUNC]] to <2 x float>
-; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> %cmp, <2 x float> [[TMP1]], <2 x float> <float 4.200000e+01, float 4.300000e+01>
+; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> [[CMP:%.*]], <2 x float> [[TMP1]], <2 x float> <float 4.200000e+01, float 4.300000e+01>
 ; CHECK-NEXT:    ret <2 x float> [[EXT]]
 ;
   %trunc = fptrunc <2 x float> %a to <2 x half>
@@ -319,8 +317,8 @@
 
 define i32 @test_sext1(i1 %cca, i1 %ccb) {
 ; CHECK-LABEL: @test_sext1(
-; CHECK-NEXT:    [[FOLD_R:%.*]] = and i1 %ccb, %cca
-; CHECK-NEXT:    [[R:%.*]] = sext i1 [[FOLD_R]] to i32
+; CHECK-NEXT:    [[NARROW:%.*]] = and i1 [[CCB:%.*]], [[CCA:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = sext i1 [[NARROW]] to i32
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %ccax = sext i1 %cca to i32
@@ -330,8 +328,8 @@
 
 define i32 @test_sext2(i1 %cca, i1 %ccb) {
 ; CHECK-LABEL: @test_sext2(
-; CHECK-NEXT:    [[FOLD_R:%.*]] = or i1 %ccb, %cca
-; CHECK-NEXT:    [[R:%.*]] = sext i1 [[FOLD_R]] to i32
+; CHECK-NEXT:    [[NARROW:%.*]] = or i1 [[CCB:%.*]], [[CCA:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = sext i1 [[NARROW]] to i32
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %ccax = sext i1 %cca to i32
@@ -341,9 +339,9 @@
 
 define i32 @test_sext3(i1 %cca, i1 %ccb) {
 ; CHECK-LABEL: @test_sext3(
-; CHECK-NEXT:    [[NOT_CCB:%.*]] = xor i1 %ccb, true
-; CHECK-NEXT:    [[FOLD_R:%.*]] = and i1 [[NOT_CCB]], %cca
-; CHECK-NEXT:    [[R:%.*]] = sext i1 [[FOLD_R]] to i32
+; CHECK-NEXT:    [[NOT_CCB:%.*]] = xor i1 [[CCB:%.*]], true
+; CHECK-NEXT:    [[NARROW:%.*]] = and i1 [[NOT_CCB]], [[CCA:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = sext i1 [[NARROW]] to i32
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %ccax = sext i1 %cca to i32
@@ -353,9 +351,9 @@
 
 define i32 @test_sext4(i1 %cca, i1 %ccb) {
 ; CHECK-LABEL: @test_sext4(
-; CHECK-NEXT:    [[NOT_CCB:%.*]] = xor i1 %ccb, true
-; CHECK-NEXT:    [[FOLD_R:%.*]] = or i1 [[NOT_CCB]], %cca
-; CHECK-NEXT:    [[R:%.*]] = sext i1 [[FOLD_R]] to i32
+; CHECK-NEXT:    [[NOT_CCB:%.*]] = xor i1 [[CCB:%.*]], true
+; CHECK-NEXT:    [[NARROW:%.*]] = or i1 [[NOT_CCB]], [[CCA:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = sext i1 [[NARROW]] to i32
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %ccax = sext i1 %cca to i32
@@ -365,8 +363,8 @@
 
 define i32 @test_zext1(i1 %cca, i1 %ccb) {
 ; CHECK-LABEL: @test_zext1(
-; CHECK-NEXT:    [[FOLD_R:%.*]] = and i1 %ccb, %cca
-; CHECK-NEXT:    [[R:%.*]] = zext i1 [[FOLD_R]] to i32
+; CHECK-NEXT:    [[NARROW:%.*]] = and i1 [[CCB:%.*]], [[CCA:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = zext i1 [[NARROW]] to i32
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %ccax = zext i1 %cca to i32
@@ -376,8 +374,8 @@
 
 define i32 @test_zext2(i1 %cca, i1 %ccb) {
 ; CHECK-LABEL: @test_zext2(
-; CHECK-NEXT:    [[FOLD_R:%.*]] = or i1 %ccb, %cca
-; CHECK-NEXT:    [[R:%.*]] = zext i1 [[FOLD_R]] to i32
+; CHECK-NEXT:    [[NARROW:%.*]] = or i1 [[CCB:%.*]], [[CCA:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = zext i1 [[NARROW]] to i32
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %ccax = zext i1 %cca to i32
@@ -387,9 +385,9 @@
 
 define i32 @test_zext3(i1 %cca, i1 %ccb) {
 ; CHECK-LABEL: @test_zext3(
-; CHECK-NEXT:    [[NOT_CCB:%.*]] = xor i1 %ccb, true
-; CHECK-NEXT:    [[FOLD_R:%.*]] = and i1 [[NOT_CCB]], %cca
-; CHECK-NEXT:    [[R:%.*]] = zext i1 [[FOLD_R]] to i32
+; CHECK-NEXT:    [[NOT_CCB:%.*]] = xor i1 [[CCB:%.*]], true
+; CHECK-NEXT:    [[NARROW:%.*]] = and i1 [[NOT_CCB]], [[CCA:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = zext i1 [[NARROW]] to i32
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %ccax = zext i1 %cca to i32
@@ -399,9 +397,9 @@
 
 define i32 @test_zext4(i1 %cca, i1 %ccb) {
 ; CHECK-LABEL: @test_zext4(
-; CHECK-NEXT:    [[NOT_CCB:%.*]] = xor i1 %ccb, true
-; CHECK-NEXT:    [[FOLD_R:%.*]] = or i1 [[NOT_CCB]], %cca
-; CHECK-NEXT:    [[R:%.*]] = zext i1 [[FOLD_R]] to i32
+; CHECK-NEXT:    [[NOT_CCB:%.*]] = xor i1 [[CCB:%.*]], true
+; CHECK-NEXT:    [[NARROW:%.*]] = or i1 [[NOT_CCB]], [[CCA:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = zext i1 [[NARROW]] to i32
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %ccax = zext i1 %cca to i32
@@ -411,8 +409,8 @@
 
 define i32 @test_negative_sext(i1 %a, i1 %cc) {
 ; CHECK-LABEL: @test_negative_sext(
-; CHECK-NEXT:    [[A_EXT:%.*]] = sext i1 %a to i32
-; CHECK-NEXT:    [[R:%.*]] = select i1 %cc, i32 [[A_EXT]], i32 1
+; CHECK-NEXT:    [[A_EXT:%.*]] = sext i1 [[A:%.*]] to i32
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[CC:%.*]], i32 [[A_EXT]], i32 1
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a.ext = sext i1 %a to i32
@@ -422,8 +420,8 @@
 
 define i32 @test_negative_zext(i1 %a, i1 %cc) {
 ; CHECK-LABEL: @test_negative_zext(
-; CHECK-NEXT:    [[A_EXT:%.*]] = zext i1 %a to i32
-; CHECK-NEXT:    [[R:%.*]] = select i1 %cc, i32 [[A_EXT]], i32 -1
+; CHECK-NEXT:    [[A_EXT:%.*]] = zext i1 [[A:%.*]] to i32
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[CC:%.*]], i32 [[A_EXT]], i32 -1
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a.ext = zext i1 %a to i32
@@ -433,8 +431,8 @@
 
 define i32 @test_bits_sext(i8 %a, i1 %cc) {
 ; CHECK-LABEL: @test_bits_sext(
-; CHECK-NEXT:    [[A_EXT:%.*]] = sext i8 %a to i32
-; CHECK-NEXT:    [[R:%.*]] = select i1 %cc, i32 [[A_EXT]], i32 -128
+; CHECK-NEXT:    [[A_EXT:%.*]] = sext i8 [[A:%.*]] to i32
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[CC:%.*]], i32 [[A_EXT]], i32 -128
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a.ext = sext i8 %a to i32
@@ -444,8 +442,8 @@
 
 define i32 @test_bits_zext(i8 %a, i1 %cc) {
 ; CHECK-LABEL: @test_bits_zext(
-; CHECK-NEXT:    [[A_EXT:%.*]] = zext i8 %a to i32
-; CHECK-NEXT:    [[R:%.*]] = select i1 %cc, i32 [[A_EXT]], i32 255
+; CHECK-NEXT:    [[A_EXT:%.*]] = zext i8 [[A:%.*]] to i32
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[CC:%.*]], i32 [[A_EXT]], i32 255
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a.ext = zext i8 %a to i32
@@ -455,11 +453,11 @@
 
 define i32 @test_op_op(i32 %a, i32 %b, i32 %c) {
 ; CHECK-LABEL: @test_op_op(
-; CHECK-NEXT:    [[CCA:%.*]] = icmp sgt i32 %a, 0
-; CHECK-NEXT:    [[CCB:%.*]] = icmp sgt i32 %b, 0
-; CHECK-NEXT:    [[CCC:%.*]] = icmp sgt i32 %c, 0
+; CHECK-NEXT:    [[CCA:%.*]] = icmp sgt i32 [[A:%.*]], 0
+; CHECK-NEXT:    [[CCB:%.*]] = icmp sgt i32 [[B:%.*]], 0
+; CHECK-NEXT:    [[CCC:%.*]] = icmp sgt i32 [[C:%.*]], 0
 ; CHECK-NEXT:    [[R_V:%.*]] = select i1 [[CCC]], i1 [[CCA]], i1 [[CCB]]
-; CHECK-NEXT:    [[R:%.*]] = sext i1 [[R:%.*]].v to i32
+; CHECK-NEXT:    [[R:%.*]] = sext i1 [[R_V]] to i32
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %cca = icmp sgt i32 %a, 0
@@ -473,8 +471,8 @@
 
 define <2 x i32> @test_vectors_sext(<2 x i1> %cca, <2 x i1> %ccb) {
 ; CHECK-LABEL: @test_vectors_sext(
-; CHECK-NEXT:    [[FOLD_R:%.*]] = and <2 x i1> %ccb, %cca
-; CHECK-NEXT:    [[R:%.*]] = sext <2 x i1> [[FOLD_R]] to <2 x i32>
+; CHECK-NEXT:    [[NARROW:%.*]] = and <2 x i1> [[CCB:%.*]], [[CCA:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = sext <2 x i1> [[NARROW]] to <2 x i32>
 ; CHECK-NEXT:    ret <2 x i32> [[R]]
 ;
   %ccax = sext <2 x i1> %cca to <2 x i32>
@@ -484,7 +482,7 @@
 
 define <2 x i32> @test_vectors_sext_nonsplat(<2 x i1> %cca, <2 x i1> %ccb) {
 ; CHECK-LABEL: @test_vectors_sext_nonsplat(
-; CHECK-NEXT:    [[NARROW:%.*]] = select <2 x i1> %ccb, <2 x i1> %cca, <2 x i1> <i1 false, i1 true>
+; CHECK-NEXT:    [[NARROW:%.*]] = select <2 x i1> [[CCB:%.*]], <2 x i1> [[CCA:%.*]], <2 x i1> <i1 false, i1 true>
 ; CHECK-NEXT:    [[R:%.*]] = sext <2 x i1> [[NARROW]] to <2 x i32>
 ; CHECK-NEXT:    ret <2 x i32> [[R]]
 ;
@@ -495,8 +493,8 @@
 
 define <2 x i32> @test_vectors_zext(<2 x i1> %cca, <2 x i1> %ccb) {
 ; CHECK-LABEL: @test_vectors_zext(
-; CHECK-NEXT:    [[FOLD_R:%.*]] = and <2 x i1> %ccb, %cca
-; CHECK-NEXT:    [[R:%.*]] = zext <2 x i1> [[FOLD_R]] to <2 x i32>
+; CHECK-NEXT:    [[NARROW:%.*]] = and <2 x i1> [[CCB:%.*]], [[CCA:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = zext <2 x i1> [[NARROW]] to <2 x i32>
 ; CHECK-NEXT:    ret <2 x i32> [[R]]
 ;
   %ccax = zext <2 x i1> %cca to <2 x i32>
@@ -506,7 +504,7 @@
 
 define <2 x i32> @test_vectors_zext_nonsplat(<2 x i1> %cca, <2 x i1> %ccb) {
 ; CHECK-LABEL: @test_vectors_zext_nonsplat(
-; CHECK-NEXT:    [[NARROW:%.*]] = select <2 x i1> %ccb, <2 x i1> %cca, <2 x i1> <i1 true, i1 false>
+; CHECK-NEXT:    [[NARROW:%.*]] = select <2 x i1> [[CCB:%.*]], <2 x i1> [[CCA:%.*]], <2 x i1> <i1 true, i1 false>
 ; CHECK-NEXT:    [[R:%.*]] = zext <2 x i1> [[NARROW]] to <2 x i32>
 ; CHECK-NEXT:    ret <2 x i32> [[R]]
 ;
@@ -517,8 +515,8 @@
 
 define <2 x i32> @scalar_select_of_vectors_sext(<2 x i1> %cca, i1 %ccb) {
 ; CHECK-LABEL: @scalar_select_of_vectors_sext(
-; CHECK-NEXT:    [[FOLD_R:%.*]] = select i1 %ccb, <2 x i1> %cca, <2 x i1> zeroinitializer
-; CHECK-NEXT:    [[R:%.*]] = sext <2 x i1> [[FOLD_R]] to <2 x i32>
+; CHECK-NEXT:    [[NARROW:%.*]] = select i1 [[CCB:%.*]], <2 x i1> [[CCA:%.*]], <2 x i1> zeroinitializer
+; CHECK-NEXT:    [[R:%.*]] = sext <2 x i1> [[NARROW]] to <2 x i32>
 ; CHECK-NEXT:    ret <2 x i32> [[R]]
 ;
   %ccax = sext <2 x i1> %cca to <2 x i32>
@@ -528,8 +526,8 @@
 
 define <2 x i32> @scalar_select_of_vectors_zext(<2 x i1> %cca, i1 %ccb) {
 ; CHECK-LABEL: @scalar_select_of_vectors_zext(
-; CHECK-NEXT:    [[FOLD_R:%.*]] = select i1 %ccb, <2 x i1> %cca, <2 x i1> zeroinitializer
-; CHECK-NEXT:    [[R:%.*]] = zext <2 x i1> [[FOLD_R]] to <2 x i32>
+; CHECK-NEXT:    [[NARROW:%.*]] = select i1 [[CCB:%.*]], <2 x i1> [[CCA:%.*]], <2 x i1> zeroinitializer
+; CHECK-NEXT:    [[R:%.*]] = zext <2 x i1> [[NARROW]] to <2 x i32>
 ; CHECK-NEXT:    ret <2 x i32> [[R]]
 ;
   %ccax = zext <2 x i1> %cca to <2 x i32>
@@ -539,7 +537,7 @@
 
 define i32 @sext_true_val_must_be_all_ones(i1 %x) {
 ; CHECK-LABEL: @sext_true_val_must_be_all_ones(
-; CHECK-NEXT:    [[SEL:%.*]] = select i1 %x, i32 -1, i32 42, !prof !0
+; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[X:%.*]], i32 -1, i32 42, !prof !0
 ; CHECK-NEXT:    ret i32 [[SEL]]
 ;
   %ext = sext i1 %x to i32
@@ -549,7 +547,7 @@
 
 define <2 x i32> @sext_true_val_must_be_all_ones_vec(<2 x i1> %x) {
 ; CHECK-LABEL: @sext_true_val_must_be_all_ones_vec(
-; CHECK-NEXT:    [[SEL:%.*]] = select <2 x i1> %x, <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 42, i32 12>, !prof !0
+; CHECK-NEXT:    [[SEL:%.*]] = select <2 x i1> [[X:%.*]], <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 42, i32 12>, !prof !0
 ; CHECK-NEXT:    ret <2 x i32> [[SEL]]
 ;
   %ext = sext <2 x i1> %x to <2 x i32>
@@ -559,7 +557,7 @@
 
 define i32 @zext_true_val_must_be_one(i1 %x) {
 ; CHECK-LABEL: @zext_true_val_must_be_one(
-; CHECK-NEXT:    [[SEL:%.*]] = select i1 %x, i32 1, i32 42, !prof !0
+; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[X:%.*]], i32 1, i32 42, !prof !0
 ; CHECK-NEXT:    ret i32 [[SEL]]
 ;
   %ext = zext i1 %x to i32
@@ -569,7 +567,7 @@
 
 define <2 x i32> @zext_true_val_must_be_one_vec(<2 x i1> %x) {
 ; CHECK-LABEL: @zext_true_val_must_be_one_vec(
-; CHECK-NEXT:    [[SEL:%.*]] = select <2 x i1> %x, <2 x i32> <i32 1, i32 1>, <2 x i32> <i32 42, i32 12>, !prof !0
+; CHECK-NEXT:    [[SEL:%.*]] = select <2 x i1> [[X:%.*]], <2 x i32> <i32 1, i32 1>, <2 x i32> <i32 42, i32 12>, !prof !0
 ; CHECK-NEXT:    ret <2 x i32> [[SEL]]
 ;
   %ext = zext <2 x i1> %x to <2 x i32>
@@ -579,7 +577,7 @@
 
 define i32 @sext_false_val_must_be_zero(i1 %x) {
 ; CHECK-LABEL: @sext_false_val_must_be_zero(
-; CHECK-NEXT:    [[SEL:%.*]] = select i1 %x, i32 42, i32 0, !prof !0
+; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[X:%.*]], i32 42, i32 0, !prof !0
 ; CHECK-NEXT:    ret i32 [[SEL]]
 ;
   %ext = sext i1 %x to i32
@@ -589,7 +587,7 @@
 
 define <2 x i32> @sext_false_val_must_be_zero_vec(<2 x i1> %x) {
 ; CHECK-LABEL: @sext_false_val_must_be_zero_vec(
-; CHECK-NEXT:    [[SEL:%.*]] = select <2 x i1> %x, <2 x i32> <i32 42, i32 12>, <2 x i32> zeroinitializer, !prof !0
+; CHECK-NEXT:    [[SEL:%.*]] = select <2 x i1> [[X:%.*]], <2 x i32> <i32 42, i32 12>, <2 x i32> zeroinitializer, !prof !0
 ; CHECK-NEXT:    ret <2 x i32> [[SEL]]
 ;
   %ext = sext <2 x i1> %x to <2 x i32>
@@ -599,7 +597,7 @@
 
 define i32 @zext_false_val_must_be_zero(i1 %x) {
 ; CHECK-LABEL: @zext_false_val_must_be_zero(
-; CHECK-NEXT:    [[SEL:%.*]] = select i1 %x, i32 42, i32 0, !prof !0
+; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[X:%.*]], i32 42, i32 0, !prof !0
 ; CHECK-NEXT:    ret i32 [[SEL]]
 ;
   %ext = zext i1 %x to i32
@@ -609,7 +607,7 @@
 
 define <2 x i32> @zext_false_val_must_be_zero_vec(<2 x i1> %x) {
 ; CHECK-LABEL: @zext_false_val_must_be_zero_vec(
-; CHECK-NEXT:    [[SEL:%.*]] = select <2 x i1> %x, <2 x i32> <i32 42, i32 12>, <2 x i32> zeroinitializer, !prof !0
+; CHECK-NEXT:    [[SEL:%.*]] = select <2 x i1> [[X:%.*]], <2 x i32> <i32 42, i32 12>, <2 x i32> zeroinitializer, !prof !0
 ; CHECK-NEXT:    ret <2 x i32> [[SEL]]
 ;
   %ext = zext <2 x i1> %x to <2 x i32>
diff --git a/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll b/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll
index 3ac0279..606cded 100644
--- a/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll
+++ b/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -instcombine -S < %s | FileCheck %s
 
 ; This test is to verify that the instruction combiner is able to fold
@@ -6,144 +7,146 @@
 
 define i16 @test1(i16 %x) {
 ; CHECK-LABEL: @test1(
-; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i16 @llvm.ctlz.i16(i16 %x, i1 false)
-; CHECK-NEXT: ret i16 [[VAR]]
-entry:
-  %0 = tail call i16 @llvm.ctlz.i16(i16 %x, i1 true)
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.ctlz.i16(i16 [[X:%.*]], i1 false), !range !0
+; CHECK-NEXT:    ret i16 [[TMP1]]
+;
+  %ct = tail call i16 @llvm.ctlz.i16(i16 %x, i1 true)
   %tobool = icmp ne i16 %x, 0
-  %cond = select i1 %tobool, i16 %0, i16 16
+  %cond = select i1 %tobool, i16 %ct, i16 16
   ret i16 %cond
 }
 
 define i32 @test2(i32 %x) {
 ; CHECK-LABEL: @test2(
-; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
-; CHECK-NEXT: ret i32 [[VAR]]
-entry:
-  %0 = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true)
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    ret i32 [[TMP1]]
+;
+  %ct = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true)
   %tobool = icmp ne i32 %x, 0
-  %cond = select i1 %tobool, i32 %0, i32 32
+  %cond = select i1 %tobool, i32 %ct, i32 32
   ret i32 %cond
 }
 
 define i64 @test3(i64 %x) {
 ; CHECK-LABEL: @test3(
-; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %x, i1 false)
-; CHECK-NEXT: ret i64 [[VAR]]
-entry:
-  %0 = tail call i64 @llvm.ctlz.i64(i64 %x, i1 true)
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.ctlz.i64(i64 [[X:%.*]], i1 false), !range !2
+; CHECK-NEXT:    ret i64 [[TMP1]]
+;
+  %ct = tail call i64 @llvm.ctlz.i64(i64 %x, i1 true)
   %tobool = icmp ne i64 %x, 0
-  %cond = select i1 %tobool, i64 %0, i64 64
+  %cond = select i1 %tobool, i64 %ct, i64 64
   ret i64 %cond
 }
 
 define i16 @test4(i16 %x) {
 ; CHECK-LABEL: @test4(
-; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i16 @llvm.ctlz.i16(i16 %x, i1 false)
-; CHECK-NEXT: ret i16 [[VAR]]
-entry:
-  %0 = tail call i16 @llvm.ctlz.i16(i16 %x, i1 true)
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.ctlz.i16(i16 [[X:%.*]], i1 false), !range !0
+; CHECK-NEXT:    ret i16 [[TMP1]]
+;
+  %ct = tail call i16 @llvm.ctlz.i16(i16 %x, i1 true)
   %tobool = icmp eq i16 %x, 0
-  %cond = select i1 %tobool, i16 16, i16 %0
+  %cond = select i1 %tobool, i16 16, i16 %ct
   ret i16 %cond
 }
 
 define i32 @test5(i32 %x) {
 ; CHECK-LABEL: @test5(
-; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
-; CHECK-NEXT: ret i32 [[VAR]]
-entry:
-  %0 = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true)
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    ret i32 [[TMP1]]
+;
+  %ct = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true)
   %tobool = icmp eq i32 %x, 0
-  %cond = select i1 %tobool, i32 32, i32 %0
+  %cond = select i1 %tobool, i32 32, i32 %ct
   ret i32 %cond
 }
 
 define i64 @test6(i64 %x) {
 ; CHECK-LABEL: @test6(
-; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %x, i1 false)
-; CHECK-NEXT: ret i64 [[VAR]]
-entry:
-  %0 = tail call i64 @llvm.ctlz.i64(i64 %x, i1 true)
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.ctlz.i64(i64 [[X:%.*]], i1 false), !range !2
+; CHECK-NEXT:    ret i64 [[TMP1]]
+;
+  %ct = tail call i64 @llvm.ctlz.i64(i64 %x, i1 true)
   %tobool = icmp eq i64 %x, 0
-  %cond = select i1 %tobool, i64 64, i64 %0
+  %cond = select i1 %tobool, i64 64, i64 %ct
   ret i64 %cond
 }
 
 define i16 @test1b(i16 %x) {
 ; CHECK-LABEL: @test1b(
-; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i16 @llvm.cttz.i16(i16 %x, i1 false)
-; CHECK-NEXT: ret i16 [[VAR]]
-entry:
-  %0 = tail call i16 @llvm.cttz.i16(i16 %x, i1 true)
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.cttz.i16(i16 [[X:%.*]], i1 false), !range !0
+; CHECK-NEXT:    ret i16 [[TMP1]]
+;
+  %ct = tail call i16 @llvm.cttz.i16(i16 %x, i1 true)
   %tobool = icmp ne i16 %x, 0
-  %cond = select i1 %tobool, i16 %0, i16 16
+  %cond = select i1 %tobool, i16 %ct, i16 16
   ret i16 %cond
 }
 
 define i32 @test2b(i32 %x) {
 ; CHECK-LABEL: @test2b(
-; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
-; CHECK-NEXT: ret i32 [[VAR]]
-entry:
-  %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    ret i32 [[TMP1]]
+;
+  %ct = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
   %tobool = icmp ne i32 %x, 0
-  %cond = select i1 %tobool, i32 %0, i32 32
+  %cond = select i1 %tobool, i32 %ct, i32 32
   ret i32 %cond
 }
 
 define i64 @test3b(i64 %x) {
 ; CHECK-LABEL: @test3b(
-; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i64 @llvm.cttz.i64(i64 %x, i1 false)
-; CHECK-NEXT: ret i64 [[VAR]]
-entry:
-  %0 = tail call i64 @llvm.cttz.i64(i64 %x, i1 true)
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.cttz.i64(i64 [[X:%.*]], i1 false), !range !2
+; CHECK-NEXT:    ret i64 [[TMP1]]
+;
+  %ct = tail call i64 @llvm.cttz.i64(i64 %x, i1 true)
   %tobool = icmp ne i64 %x, 0
-  %cond = select i1 %tobool, i64 %0, i64 64
+  %cond = select i1 %tobool, i64 %ct, i64 64
   ret i64 %cond
 }
 
 define i16 @test4b(i16 %x) {
 ; CHECK-LABEL: @test4b(
-; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i16 @llvm.cttz.i16(i16 %x, i1 false)
-; CHECK-NEXT: ret i16 [[VAR]]
-entry:
-  %0 = tail call i16 @llvm.cttz.i16(i16 %x, i1 true)
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.cttz.i16(i16 [[X:%.*]], i1 false), !range !0
+; CHECK-NEXT:    ret i16 [[TMP1]]
+;
+  %ct = tail call i16 @llvm.cttz.i16(i16 %x, i1 true)
   %tobool = icmp eq i16 %x, 0
-  %cond = select i1 %tobool, i16 16, i16 %0
+  %cond = select i1 %tobool, i16 16, i16 %ct
   ret i16 %cond
 }
 
 define i32 @test5b(i32 %x) {
 ; CHECK-LABEL: @test5b(
-; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
-; CHECK-NEXT: ret i32 [[VAR]]
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
 entry:
-  %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
+  %ct = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
   %tobool = icmp eq i32 %x, 0
-  %cond = select i1 %tobool, i32 32, i32 %0
+  %cond = select i1 %tobool, i32 32, i32 %ct
   ret i32 %cond
 }
 
 define i64 @test6b(i64 %x) {
 ; CHECK-LABEL: @test6b(
-; CHECK: [[VAR:%[a-zA-Z0-9]+]] = tail call i64 @llvm.cttz.i64(i64 %x, i1 false)
-; CHECK-NEXT: ret i64 [[VAR]]
-entry:
-  %0 = tail call i64 @llvm.cttz.i64(i64 %x, i1 true)
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.cttz.i64(i64 [[X:%.*]], i1 false), !range !2
+; CHECK-NEXT:    ret i64 [[TMP1]]
+;
+  %ct = tail call i64 @llvm.cttz.i64(i64 %x, i1 true)
   %tobool = icmp eq i64 %x, 0
-  %cond = select i1 %tobool, i64 64, i64 %0
+  %cond = select i1 %tobool, i64 64, i64 %ct
   ret i64 %cond
 }
 
 define i32 @test1c(i16 %x) {
 ; CHECK-LABEL: @test1c(
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = tail call i16 @llvm.cttz.i16(i16 %x, i1 false)
-; CHECK-NEXT: [[VAR2:%[a-zA-Z0-9]+]] = zext i16 [[VAR1]] to i32
-; CHECK-NEXT: ret i32 [[VAR2]]
-entry:
-  %0 = tail call i16 @llvm.cttz.i16(i16 %x, i1 true)
-  %cast2 = zext i16 %0 to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.cttz.i16(i16 [[X:%.*]], i1 false), !range !0
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[TMP1]] to i32
+; CHECK-NEXT:    ret i32 [[TMP2]]
+;
+  %ct = tail call i16 @llvm.cttz.i16(i16 %x, i1 true)
+  %cast2 = zext i16 %ct to i32
   %tobool = icmp ne i16 %x, 0
   %cond = select i1 %tobool, i32 %cast2, i32 16
   ret i32 %cond
@@ -151,12 +154,12 @@
 
 define i64 @test2c(i16 %x) {
 ; CHECK-LABEL: @test2c(
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = tail call i16 @llvm.cttz.i16(i16 %x, i1 false)
-; CHECK-NEXT: [[VAR2:%[a-zA-Z0-9]+]] = zext i16 [[VAR1]] to i64
-; CHECK-NEXT: ret i64 [[VAR2]]
-entry:
-  %0 = tail call i16 @llvm.cttz.i16(i16 %x, i1 true)
-  %conv = zext i16 %0 to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.cttz.i16(i16 [[X:%.*]], i1 false), !range !0
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[TMP1]] to i64
+; CHECK-NEXT:    ret i64 [[TMP2]]
+;
+  %ct = tail call i16 @llvm.cttz.i16(i16 %x, i1 true)
+  %conv = zext i16 %ct to i64
   %tobool = icmp ne i16 %x, 0
   %cond = select i1 %tobool, i64 %conv, i64 16
   ret i64 %cond
@@ -164,12 +167,12 @@
 
 define i64 @test3c(i32 %x) {
 ; CHECK-LABEL: @test3c(
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
-; CHECK-NEXT: [[VAR2:%[a-zA-Z0-9]+]] = zext i32 [[VAR1]] to i64
-; CHECK-NEXT: ret i64 [[VAR2]]
-entry:
-  %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
-  %conv = zext i32 %0 to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    ret i64 [[TMP2]]
+;
+  %ct = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
+  %conv = zext i32 %ct to i64
   %tobool = icmp ne i32 %x, 0
   %cond = select i1 %tobool, i64 %conv, i64 32
   ret i64 %cond
@@ -177,12 +180,12 @@
 
 define i32 @test4c(i16 %x) {
 ; CHECK-LABEL: @test4c(
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = tail call i16 @llvm.ctlz.i16(i16 %x, i1 false)
-; CHECK-NEXT: [[VAR2:%[a-zA-Z0-9]+]] = zext i16 [[VAR1]] to i32
-; CHECK-NEXT: ret i32 [[VAR2]]
-entry:
-  %0 = tail call i16 @llvm.ctlz.i16(i16 %x, i1 true)
-  %cast = zext i16 %0 to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.ctlz.i16(i16 [[X:%.*]], i1 false), !range !0
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[TMP1]] to i32
+; CHECK-NEXT:    ret i32 [[TMP2]]
+;
+  %ct = tail call i16 @llvm.ctlz.i16(i16 %x, i1 true)
+  %cast = zext i16 %ct to i32
   %tobool = icmp ne i16 %x, 0
   %cond = select i1 %tobool, i32 %cast, i32 16
   ret i32 %cond
@@ -190,12 +193,12 @@
 
 define i64 @test5c(i16 %x) {
 ; CHECK-LABEL: @test5c(
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = tail call i16 @llvm.ctlz.i16(i16 %x, i1 false)
-; CHECK-NEXT: [[VAR2:%[a-zA-Z0-9]+]] = zext i16 [[VAR1]] to i64
-; CHECK-NEXT: ret i64 [[VAR2]]
-entry:
-  %0 = tail call i16 @llvm.ctlz.i16(i16 %x, i1 true)
-  %cast = zext i16 %0 to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.ctlz.i16(i16 [[X:%.*]], i1 false), !range !0
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[TMP1]] to i64
+; CHECK-NEXT:    ret i64 [[TMP2]]
+;
+  %ct = tail call i16 @llvm.ctlz.i16(i16 %x, i1 true)
+  %cast = zext i16 %ct to i64
   %tobool = icmp ne i16 %x, 0
   %cond = select i1 %tobool, i64 %cast, i64 16
   ret i64 %cond
@@ -203,12 +206,12 @@
 
 define i64 @test6c(i32 %x) {
 ; CHECK-LABEL: @test6c(
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
-; CHECK-NEXT: [[VAR2:%[a-zA-Z0-9]+]] = zext i32 [[VAR1]] to i64
-; CHECK-NEXT: ret i64 [[VAR2]]
-entry:
-  %0 = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true)
-  %cast = zext i32 %0 to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    ret i64 [[TMP2]]
+;
+  %ct = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true)
+  %cast = zext i32 %ct to i64
   %tobool = icmp ne i32 %x, 0
   %cond = select i1 %tobool, i64 %cast, i64 32
   ret i64 %cond
@@ -216,12 +219,12 @@
 
 define i16 @test1d(i64 %x) {
 ; CHECK-LABEL: @test1d(
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = tail call i64 @llvm.cttz.i64(i64 %x, i1 false)
-; CHECK-NEXT: [[VAR2:%[a-zA-Z0-9]+]] = trunc i64 [[VAR1]] to i16
-; CHECK-NEXT: ret i16 [[VAR2]]
-entry:
-  %0 = tail call i64 @llvm.cttz.i64(i64 %x, i1 true)
-  %conv = trunc i64 %0 to i16
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.cttz.i64(i64 [[X:%.*]], i1 false), !range !2
+; CHECK-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i16
+; CHECK-NEXT:    ret i16 [[TMP2]]
+;
+  %ct = tail call i64 @llvm.cttz.i64(i64 %x, i1 true)
+  %conv = trunc i64 %ct to i16
   %tobool = icmp ne i64 %x, 0
   %cond = select i1 %tobool, i16 %conv, i16 64
   ret i16 %cond
@@ -229,12 +232,12 @@
 
 define i32 @test2d(i64 %x) {
 ; CHECK-LABEL: @test2d(
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = tail call i64 @llvm.cttz.i64(i64 %x, i1 false)
-; CHECK-NEXT: [[VAR2:%[a-zA-Z0-9]+]] = trunc i64 [[VAR1]] to i32
-; CHECK-NEXT: ret i32 [[VAR2]]
-entry:
-  %0 = tail call i64 @llvm.cttz.i64(i64 %x, i1 true)
-  %cast = trunc i64 %0 to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.cttz.i64(i64 [[X:%.*]], i1 false), !range !2
+; CHECK-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
+; CHECK-NEXT:    ret i32 [[TMP2]]
+;
+  %ct = tail call i64 @llvm.cttz.i64(i64 %x, i1 true)
+  %cast = trunc i64 %ct to i32
   %tobool = icmp ne i64 %x, 0
   %cond = select i1 %tobool, i32 %cast, i32 64
   ret i32 %cond
@@ -242,12 +245,12 @@
 
 define i16 @test3d(i32 %x) {
 ; CHECK-LABEL: @test3d(
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
-; CHECK-NEXT: [[VAR2:%[a-zA-Z0-9]+]] = trunc i32 [[VAR1]] to i16
-; CHECK-NEXT: ret i16 [[VAR2]]
-entry:
-  %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
-  %cast = trunc i32 %0 to i16
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT:    ret i16 [[TMP2]]
+;
+  %ct = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
+  %cast = trunc i32 %ct to i16
   %tobool = icmp ne i32 %x, 0
   %cond = select i1 %tobool, i16 %cast, i16 32
   ret i16 %cond
@@ -255,12 +258,12 @@
 
 define i16 @test4d(i64 %x) {
 ; CHECK-LABEL: @test4d(
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %x, i1 false)
-; CHECK-NEXT: [[VAR2:%[a-zA-Z0-9]+]] = trunc i64 [[VAR1]] to i16
-; CHECK-NEXT: ret i16 [[VAR2]]
-entry:
-  %0 = tail call i64 @llvm.ctlz.i64(i64 %x, i1 true)
-  %cast = trunc i64 %0 to i16
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.ctlz.i64(i64 [[X:%.*]], i1 false), !range !2
+; CHECK-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i16
+; CHECK-NEXT:    ret i16 [[TMP2]]
+;
+  %ct = tail call i64 @llvm.ctlz.i64(i64 %x, i1 true)
+  %cast = trunc i64 %ct to i16
   %tobool = icmp ne i64 %x, 0
   %cond = select i1 %tobool, i16 %cast, i16 64
   ret i16 %cond
@@ -268,12 +271,12 @@
 
 define i32 @test5d(i64 %x) {
 ; CHECK-LABEL: @test5d(
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %x, i1 false)
-; CHECK-NEXT: [[VAR2:%[a-zA-Z0-9]+]] = trunc i64 [[VAR1]] to i32
-; CHECK-NEXT: ret i32 [[VAR2]]
-entry:
-  %0 = tail call i64 @llvm.ctlz.i64(i64 %x, i1 true)
-  %cast = trunc i64 %0 to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.ctlz.i64(i64 [[X:%.*]], i1 false), !range !2
+; CHECK-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
+; CHECK-NEXT:    ret i32 [[TMP2]]
+;
+  %ct = tail call i64 @llvm.ctlz.i64(i64 %x, i1 true)
+  %cast = trunc i64 %ct to i32
   %tobool = icmp ne i64 %x, 0
   %cond = select i1 %tobool, i32 %cast, i32 64
   ret i32 %cond
@@ -281,12 +284,12 @@
 
 define i16 @test6d(i32 %x) {
 ; CHECK-LABEL: @test6d(
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
-; CHECK-NEXT: [[VAR2:%[a-zA-Z0-9]+]] = trunc i32 [[VAR1]] to i16
-; CHECK-NEXT: ret i16 [[VAR2]]
-entry:
-  %0 = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true)
-  %cast = trunc i32 %0 to i16
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT:    ret i16 [[TMP2]]
+;
+  %ct = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true)
+  %cast = trunc i32 %ct to i16
   %tobool = icmp ne i32 %x, 0
   %cond = select i1 %tobool, i16 %cast, i16 32
   ret i16 %cond
@@ -294,12 +297,12 @@
 
 define i64 @select_bug1(i32 %x) {
 ; CHECK-LABEL: @select_bug1(
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
-; CHECK-NEXT: [[VAR2:%[a-zA-Z0-9]+]] = zext i32 [[VAR1]] to i64
-; CHECK-NEXT: ret i64 [[VAR2]]
-entry:
-  %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
-  %conv = zext i32 %0 to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    ret i64 [[TMP2]]
+;
+  %ct = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
+  %conv = zext i32 %ct to i64
   %tobool = icmp ne i32 %x, 0
   %cond = select i1 %tobool, i64 %conv, i64 32
   ret i64 %cond
@@ -307,12 +310,12 @@
 
 define i16 @select_bug2(i32 %x) {
 ; CHECK-LABEL: @select_bug2(
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
-; CHECK-NEXT: [[VAR2:%[a-zA-Z0-9]+]] = trunc i32 [[VAR1]] to i16
-; CHECK-NEXT: ret i16 [[VAR2]]
-entry:
-  %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
-  %conv = trunc i32 %0 to i16
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT:    ret i16 [[TMP2]]
+;
+  %ct = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
+  %conv = trunc i32 %ct to i16
   %tobool = icmp ne i32 %x, 0
   %cond = select i1 %tobool, i16 %conv, i16 32
   ret i16 %cond
@@ -323,9 +326,9 @@
 ; CHECK-NEXT:    [[TMP1:%.*]] = tail call i128 @llvm.ctlz.i128(i128 [[X:%.*]], i1 false), !range !3
 ; CHECK-NEXT:    ret i128 [[TMP1]]
 ;
-  %1 = tail call i128 @llvm.ctlz.i128(i128 %x, i1 true)
+  %ct = tail call i128 @llvm.ctlz.i128(i128 %x, i1 true)
   %tobool = icmp ne i128 %x, 0
-  %cond = select i1 %tobool, i128 %1, i128 128
+  %cond = select i1 %tobool, i128 %ct, i128 128
   ret i128 %cond
 }
 
@@ -334,17 +337,123 @@
 ; CHECK-NEXT:    [[TMP1:%.*]] = tail call i128 @llvm.cttz.i128(i128 [[X:%.*]], i1 false), !range !3
 ; CHECK-NEXT:    ret i128 [[TMP1]]
 ;
-  %1 = tail call i128 @llvm.cttz.i128(i128 %x, i1 true)
+  %ct = tail call i128 @llvm.cttz.i128(i128 %x, i1 true)
   %tobool = icmp ne i128 %x, 0
-  %cond = select i1 %tobool, i128 %1, i128 128
+  %cond = select i1 %tobool, i128 %ct, i128 128
   ret i128 %cond
 }
 
+define i32 @test_ctlz_not_bw(i32 %x) {
+; CHECK-LABEL: @test_ctlz_not_bw(
+; CHECK-NEXT:    [[CT:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 true), !range !1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT:    [[RES:%.*]] = select i1 [[CMP]], i32 123, i32 [[CT]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+  %ct = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+  %cmp = icmp ne i32 %x, 0
+  %res = select i1 %cmp, i32 %ct, i32 123
+  ret i32 %res
+}
+
+define i32 @test_ctlz_not_bw_multiuse(i32 %x) {
+; CHECK-LABEL: @test_ctlz_not_bw_multiuse(
+; CHECK-NEXT:    [[CT:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP]], i32 123, i32 [[CT]]
+; CHECK-NEXT:    [[RES:%.*]] = or i32 [[SEL]], [[CT]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+  %ct = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+  %cmp = icmp ne i32 %x, 0
+  %sel = select i1 %cmp, i32 %ct, i32 123
+  %res = or i32 %sel, %ct
+  ret i32 %res
+}
+
+define i32 @test_cttz_not_bw(i32 %x) {
+; CHECK-LABEL: @test_cttz_not_bw(
+; CHECK-NEXT:    [[CT:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true), !range !1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT:    [[RES:%.*]] = select i1 [[CMP]], i32 123, i32 [[CT]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+  %ct = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
+  %cmp = icmp ne i32 %x, 0
+  %res = select i1 %cmp, i32 %ct, i32 123
+  ret i32 %res
+}
+
+define i32 @test_cttz_not_bw_multiuse(i32 %x) {
+; CHECK-LABEL: @test_cttz_not_bw_multiuse(
+; CHECK-NEXT:    [[CT:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range !1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP]], i32 123, i32 [[CT]]
+; CHECK-NEXT:    [[RES:%.*]] = or i32 [[SEL]], [[CT]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+  %ct = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
+  %cmp = icmp ne i32 %x, 0
+  %sel = select i1 %cmp, i32 %ct, i32 123
+  %res = or i32 %sel, %ct
+  ret i32 %res
+}
+
+define <2 x i32> @test_ctlz_bw_vec(<2 x i32> %x) {
+; CHECK-LABEL: @test_ctlz_bw_vec(
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> [[X:%.*]], i1 false)
+; CHECK-NEXT:    ret <2 x i32> [[TMP1]]
+;
+  %ct = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %x, i1 true)
+  %cmp = icmp ne <2 x i32> %x, zeroinitializer
+  %res = select <2 x i1> %cmp, <2 x i32> %ct, <2 x i32> <i32 32, i32 32>
+  ret <2 x i32> %res
+}
+
+define <2 x i32> @test_ctlz_not_bw_vec(<2 x i32> %x) {
+; CHECK-LABEL: @test_ctlz_not_bw_vec(
+; CHECK-NEXT:    [[CT:%.*]] = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> [[X:%.*]], i1 true)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i32> [[X]], zeroinitializer
+; CHECK-NEXT:    [[RES:%.*]] = select <2 x i1> [[CMP]], <2 x i32> zeroinitializer, <2 x i32> [[CT]]
+; CHECK-NEXT:    ret <2 x i32> [[RES]]
+;
+  %ct = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %x, i1 false)
+  %cmp = icmp ne <2 x i32> %x, zeroinitializer
+  %res = select <2 x i1> %cmp, <2 x i32> %ct, <2 x i32> <i32 0, i32 0>
+  ret <2 x i32> %res
+}
+
+define <2 x i32> @test_cttz_bw_vec(<2 x i32> %x) {
+; CHECK-LABEL: @test_cttz_bw_vec(
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> [[X:%.*]], i1 false)
+; CHECK-NEXT:    ret <2 x i32> [[TMP1]]
+;
+  %ct = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %x, i1 true)
+  %cmp = icmp ne <2 x i32> %x, zeroinitializer
+  %res = select <2 x i1> %cmp, <2 x i32> %ct, <2 x i32> <i32 32, i32 32>
+  ret <2 x i32> %res
+}
+
+define <2 x i32> @test_cttz_not_bw_vec(<2 x i32> %x) {
+; CHECK-LABEL: @test_cttz_not_bw_vec(
+; CHECK-NEXT:    [[CT:%.*]] = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> [[X:%.*]], i1 true)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i32> [[X]], zeroinitializer
+; CHECK-NEXT:    [[RES:%.*]] = select <2 x i1> [[CMP]], <2 x i32> zeroinitializer, <2 x i32> [[CT]]
+; CHECK-NEXT:    ret <2 x i32> [[RES]]
+;
+  %ct = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %x, i1 false)
+  %cmp = icmp ne <2 x i32> %x, zeroinitializer
+  %res = select <2 x i1> %cmp, <2 x i32> %ct, <2 x i32> <i32 0, i32 0>
+  ret <2 x i32> %res
+}
+
 declare i16 @llvm.ctlz.i16(i16, i1)
 declare i32 @llvm.ctlz.i32(i32, i1)
 declare i64 @llvm.ctlz.i64(i64, i1)
 declare i128 @llvm.ctlz.i128(i128, i1)
+declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1)
 declare i16 @llvm.cttz.i16(i16, i1)
 declare i32 @llvm.cttz.i32(i32, i1)
 declare i64 @llvm.cttz.i64(i64, i1)
 declare i128 @llvm.cttz.i128(i128, i1)
+declare <2 x i32> @llvm.cttz.v2i32(<2 x i32>, i1)
diff --git a/test/Transforms/InstCombine/sink-into-catchswitch.ll b/test/Transforms/InstCombine/sink-into-catchswitch.ll
index 04a6225..893bf2b 100644
--- a/test/Transforms/InstCombine/sink-into-catchswitch.ll
+++ b/test/Transforms/InstCombine/sink-into-catchswitch.ll
@@ -1,16 +1,39 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -instcombine -S < %s | FileCheck %s
+
 target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-pc-windows-msvc18.0.0"
 
 %struct.B = type { i64, i64 }
 
 define void @test1(%struct.B* %p) personality i32 (...)* @__CxxFrameHandler3 {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT:  invoke.cont:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast %struct.B* [[P:%.*]] to <2 x i64>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
+; CHECK-NEXT:    invoke void @throw()
+; CHECK-NEXT:    to label [[UNREACHABLE:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
+; CHECK:       catch.dispatch:
+; CHECK-NEXT:    [[CS:%.*]] = catchswitch within none [label %invoke.cont1] unwind label [[EHCLEANUP:%.*]]
+; CHECK:       invoke.cont1:
+; CHECK-NEXT:    [[CATCH:%.*]] = catchpad within [[CS]] [i8* null, i32 64, i8* null]
+; CHECK-NEXT:    invoke void @throw() [ "funclet"(token [[CATCH]]) ]
+; CHECK-NEXT:    to label [[UNREACHABLE]] unwind label [[EHCLEANUP]]
+; CHECK:       ehcleanup:
+; CHECK-NEXT:    [[PHI:%.*]] = phi i64 [ [[TMP2]], [[CATCH_DISPATCH]] ], [ 9, [[INVOKE_CONT1:%.*]] ]
+; CHECK-NEXT:    [[CLEANUP:%.*]] = cleanuppad within none []
+; CHECK-NEXT:    call void @release(i64 [[PHI]]) [ "funclet"(token [[CLEANUP]]) ]
+; CHECK-NEXT:    cleanupret from [[CLEANUP]] unwind to caller
+; CHECK:       unreachable:
+; CHECK-NEXT:    unreachable
+;
 invoke.cont:
   %0 = bitcast %struct.B* %p to <2 x i64>*
   %1 = load <2 x i64>, <2 x i64>* %0, align 8
   %2 = extractelement <2 x i64> %1, i32 0
   invoke void @throw()
-          to label %unreachable unwind label %catch.dispatch
+  to label %unreachable unwind label %catch.dispatch
 
 catch.dispatch:                                   ; preds = %invoke.cont
   %cs = catchswitch within none [label %invoke.cont1] unwind label %ehcleanup
@@ -18,7 +41,7 @@
 invoke.cont1:                                     ; preds = %catch.dispatch
   %catch = catchpad within %cs [i8* null, i32 64, i8* null]
   invoke void @throw() [ "funclet"(token %catch) ]
-          to label %unreachable unwind label %ehcleanup
+  to label %unreachable unwind label %ehcleanup
 
 ehcleanup:                                        ; preds = %invoke.cont1, %catch.dispatch
   %phi = phi i64 [ %2, %catch.dispatch ], [ 9, %invoke.cont1 ]
@@ -30,16 +53,6 @@
   unreachable
 }
 
-; CHECK-LABEL: define void @test1(
-; CHECK: %[[bc:.*]] = bitcast %struct.B* %p to <2 x i64>*
-; CHECK: %[[ld:.*]] = load <2 x i64>, <2 x i64>* %[[bc]], align 8
-; CHECK: %[[ee:.*]] = extractelement <2 x i64> %[[ld]], i32 0
-
-; CHECK: %[[phi:.*]] = phi i64 [ %[[ee]], {{.*}} ], [ 9, {{.*}} ]
-; CHECK: call void @release(i64 %[[phi]])
-
 declare i32 @__CxxFrameHandler3(...)
-
 declare void @throw()
-
 declare void @release(i64)
diff --git a/test/Transforms/InstCombine/strchr-1.ll b/test/Transforms/InstCombine/strchr-1.ll
index 6c10ebd..4fce378 100644
--- a/test/Transforms/InstCombine/strchr-1.ll
+++ b/test/Transforms/InstCombine/strchr-1.ll
@@ -82,8 +82,9 @@
 define i1 @test_simplify7(i32 %C) {
 ; CHECK-LABEL: @test_simplify7
 ; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 %C to i16
-; CHECK-NEXT: %memchr.bounds = icmp ult i16 [[TRUNC]], 16
-; CHECK-NEXT: [[SHL:%.*]] = shl i16 1, [[TRUNC]]
+; CHECK-NEXT: [[TRUNC_AND:%.*]] = and i16 [[TRUNC]], 255
+; CHECK-NEXT: %memchr.bounds = icmp ult i16 [[TRUNC_AND]], 16
+; CHECK-NEXT: [[SHL:%.*]] = shl i16 1, [[TRUNC_AND]]
 ; CHECK-NEXT: [[AND:%.*]] = and i16 [[SHL]], 9217
 ; CHECK-NEXT: %memchr.bits = icmp ne i16 [[AND]], 0
 ; CHECK-NEXT: %memchr1 = and i1 %memchr.bounds, %memchr.bits
diff --git a/test/Transforms/InstCombine/vector-casts.ll b/test/Transforms/InstCombine/vector-casts.ll
index e0d6083..d2acefc 100644
--- a/test/Transforms/InstCombine/vector-casts.ll
+++ b/test/Transforms/InstCombine/vector-casts.ll
@@ -163,8 +163,8 @@
 
 define <2 x i65> @foo(<2 x i64> %t) {
 ; CHECK-LABEL: @foo(
-; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i64> [[T:%.*]], <i64 4294967295, i64 4294967295>
-; CHECK-NEXT:    [[B:%.*]] = zext <2 x i64> [[TMP1]] to <2 x i65>
+; CHECK-NEXT:    [[A_MASK:%.*]] = and <2 x i64> [[T:%.*]], <i64 4294967295, i64 4294967295>
+; CHECK-NEXT:    [[B:%.*]] = zext <2 x i64> [[A_MASK]] to <2 x i65>
 ; CHECK-NEXT:    ret <2 x i65> [[B]]
 ;
   %a = trunc <2 x i64> %t to <2 x i32>
@@ -174,8 +174,8 @@
 
 define <2 x i64> @bar(<2 x i65> %t) {
 ; CHECK-LABEL: @bar(
-; CHECK-NEXT:    [[A:%.*]] = trunc <2 x i65> [[T:%.*]] to <2 x i64>
-; CHECK-NEXT:    [[B:%.*]] = and <2 x i64> [[A]], <i64 4294967295, i64 4294967295>
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <2 x i65> [[T:%.*]] to <2 x i64>
+; CHECK-NEXT:    [[B:%.*]] = and <2 x i64> [[TMP1]], <i64 4294967295, i64 4294967295>
 ; CHECK-NEXT:    ret <2 x i64> [[B]]
 ;
   %a = trunc <2 x i65> %t to <2 x i32>
@@ -185,9 +185,8 @@
 
 define <2 x i64> @bars(<2 x i65> %t) {
 ; CHECK-LABEL: @bars(
-; CHECK-NEXT:    [[A:%.*]] = trunc <2 x i65> [[T:%.*]] to <2 x i64>
-; CHECK-NEXT:    [[SEXT:%.*]] = shl <2 x i64> [[A]], <i64 32, i64 32>
-; CHECK-NEXT:    [[B:%.*]] = ashr exact <2 x i64> [[SEXT]], <i64 32, i64 32>
+; CHECK-NEXT:    [[A:%.*]] = trunc <2 x i65> [[T:%.*]] to <2 x i32>
+; CHECK-NEXT:    [[B:%.*]] = sext <2 x i32> [[A]] to <2 x i64>
 ; CHECK-NEXT:    ret <2 x i64> [[B]]
 ;
   %a = trunc <2 x i65> %t to <2 x i32>
@@ -197,8 +196,8 @@
 
 define <2 x i64> @quxs(<2 x i64> %t) {
 ; CHECK-LABEL: @quxs(
-; CHECK-NEXT:    [[SEXT:%.*]] = shl <2 x i64> [[T:%.*]], <i64 32, i64 32>
-; CHECK-NEXT:    [[B:%.*]] = ashr exact <2 x i64> [[SEXT]], <i64 32, i64 32>
+; CHECK-NEXT:    [[TMP1:%.*]] = shl <2 x i64> [[T:%.*]], <i64 32, i64 32>
+; CHECK-NEXT:    [[B:%.*]] = ashr exact <2 x i64> [[TMP1]], <i64 32, i64 32>
 ; CHECK-NEXT:    ret <2 x i64> [[B]]
 ;
   %a = trunc <2 x i64> %t to <2 x i32>
@@ -377,3 +376,38 @@
   ret <3 x float> %trunc
 }
 
+; Converting to a wide type might reduce instruction count,
+; but we can not do that unless the backend can recover from
+; the creation of a potentially illegal op (like a 64-bit vmul).
+; PR40032 - https://bugs.llvm.org/show_bug.cgi?id=40032
+
+define <2 x i64> @sext_less_casting_with_wideop(<2 x i64> %x, <2 x i64> %y) {
+; CHECK-LABEL: @sext_less_casting_with_wideop(
+; CHECK-NEXT:    [[XNARROW:%.*]] = trunc <2 x i64> [[X:%.*]] to <2 x i32>
+; CHECK-NEXT:    [[YNARROW:%.*]] = trunc <2 x i64> [[Y:%.*]] to <2 x i32>
+; CHECK-NEXT:    [[MUL:%.*]] = mul <2 x i32> [[XNARROW]], [[YNARROW]]
+; CHECK-NEXT:    [[R:%.*]] = sext <2 x i32> [[MUL]] to <2 x i64>
+; CHECK-NEXT:    ret <2 x i64> [[R]]
+;
+  %xnarrow = trunc <2 x i64> %x to <2 x i32>
+  %ynarrow = trunc <2 x i64> %y to <2 x i32>
+  %mul = mul <2 x i32> %xnarrow, %ynarrow
+  %r = sext <2 x i32> %mul to <2 x i64>
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @zext_less_casting_with_wideop(<2 x i64> %x, <2 x i64> %y) {
+; CHECK-LABEL: @zext_less_casting_with_wideop(
+; CHECK-NEXT:    [[XNARROW:%.*]] = trunc <2 x i64> [[X:%.*]] to <2 x i32>
+; CHECK-NEXT:    [[YNARROW:%.*]] = trunc <2 x i64> [[Y:%.*]] to <2 x i32>
+; CHECK-NEXT:    [[MUL:%.*]] = mul <2 x i32> [[XNARROW]], [[YNARROW]]
+; CHECK-NEXT:    [[R:%.*]] = zext <2 x i32> [[MUL]] to <2 x i64>
+; CHECK-NEXT:    ret <2 x i64> [[R]]
+;
+  %xnarrow = trunc <2 x i64> %x to <2 x i32>
+  %ynarrow = trunc <2 x i64> %y to <2 x i32>
+  %mul = mul <2 x i32> %xnarrow, %ynarrow
+  %r = zext <2 x i32> %mul to <2 x i64>
+  ret <2 x i64> %r
+}
+
diff --git a/test/Transforms/InstSimplify/call.ll b/test/Transforms/InstSimplify/call.ll
index b3a2f96..9d63d37 100644
--- a/test/Transforms/InstSimplify/call.ll
+++ b/test/Transforms/InstSimplify/call.ll
@@ -189,36 +189,6 @@
   ret {i8, i1} %x
 }
 
-declare i256 @llvm.cttz.i256(i256 %src, i1 %is_zero_undef)
-
-define i256 @test_cttz() {
-; CHECK-LABEL: @test_cttz(
-; CHECK-NEXT:    ret i256 1
-;
-  %x = call i256 @llvm.cttz.i256(i256 10, i1 false)
-  ret i256 %x
-}
-
-declare <2 x i256> @llvm.cttz.v2i256(<2 x i256> %src, i1 %is_zero_undef)
-
-define <2 x i256> @test_cttz_vec() {
-; CHECK-LABEL: @test_cttz_vec(
-; CHECK-NEXT:    ret <2 x i256> <i256 1, i256 1>
-;
-  %x = call <2 x i256> @llvm.cttz.v2i256(<2 x i256> <i256 10, i256 10>, i1 false)
-  ret <2 x i256> %x
-}
-
-declare i256 @llvm.ctpop.i256(i256 %src)
-
-define i256 @test_ctpop() {
-; CHECK-LABEL: @test_ctpop(
-; CHECK-NEXT:    ret i256 2
-;
-  %x = call i256 @llvm.ctpop.i256(i256 10)
-  ret i256 %x
-}
-
 ; Test a non-intrinsic that we know about as a library call.
 declare float @fabs(float %x)
 
diff --git a/test/Transforms/InstSimplify/saturating-add-sub.ll b/test/Transforms/InstSimplify/saturating-add-sub.ll
index 86be08f..a226cc4 100644
--- a/test/Transforms/InstSimplify/saturating-add-sub.ll
+++ b/test/Transforms/InstSimplify/saturating-add-sub.ll
@@ -406,3 +406,261 @@
   ret <2 x i8> %y6v
 }
 
+define i1 @uadd_icmp_op0_known(i8 %a) {
+; CHECK-LABEL: @uadd_icmp_op0_known(
+; CHECK-NEXT:    ret i1 true
+;
+  %b = call i8 @llvm.uadd.sat.i8(i8 10, i8 %a)
+  %c = icmp uge i8 %b, 10
+  ret i1 %c
+}
+
+define i1 @uadd_icmp_op0_unknown(i8 %a) {
+; CHECK-LABEL: @uadd_icmp_op0_unknown(
+; CHECK-NEXT:    [[B:%.*]] = call i8 @llvm.uadd.sat.i8(i8 10, i8 [[A:%.*]])
+; CHECK-NEXT:    [[C:%.*]] = icmp ugt i8 [[B]], 10
+; CHECK-NEXT:    ret i1 [[C]]
+;
+  %b = call i8 @llvm.uadd.sat.i8(i8 10, i8 %a)
+  %c = icmp ugt i8 %b, 10
+  ret i1 %c
+}
+
+define i1 @uadd_icmp_op1_known(i8 %a) {
+; CHECK-LABEL: @uadd_icmp_op1_known(
+; CHECK-NEXT:    ret i1 true
+;
+  %b = call i8 @llvm.uadd.sat.i8(i8 %a, i8 10)
+  %c = icmp uge i8 %b, 10
+  ret i1 %c
+}
+
+define i1 @uadd_icmp_op1_unknown(i8 %a) {
+; CHECK-LABEL: @uadd_icmp_op1_unknown(
+; CHECK-NEXT:    [[B:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 10)
+; CHECK-NEXT:    [[C:%.*]] = icmp ugt i8 [[B]], 10
+; CHECK-NEXT:    ret i1 [[C]]
+;
+  %b = call i8 @llvm.uadd.sat.i8(i8 %a, i8 10)
+  %c = icmp ugt i8 %b, 10
+  ret i1 %c
+}
+
+define i1 @sadd_icmp_op0_pos_known(i8 %a) {
+; CHECK-LABEL: @sadd_icmp_op0_pos_known(
+; CHECK-NEXT:    ret i1 true
+;
+  %b = call i8 @llvm.sadd.sat.i8(i8 10, i8 %a)
+  %c = icmp sge i8 %b, -118
+  ret i1 %c
+}
+
+define i1 @sadd_icmp_op0_pos_unknown(i8 %a) {
+; CHECK-LABEL: @sadd_icmp_op0_pos_unknown(
+; CHECK-NEXT:    [[B:%.*]] = call i8 @llvm.sadd.sat.i8(i8 10, i8 [[A:%.*]])
+; CHECK-NEXT:    [[C:%.*]] = icmp sgt i8 [[B]], -118
+; CHECK-NEXT:    ret i1 [[C]]
+;
+  %b = call i8 @llvm.sadd.sat.i8(i8 10, i8 %a)
+  %c = icmp sgt i8 %b, -118
+  ret i1 %c
+}
+
+define i1 @sadd_icmp_op0_neg_known(i8 %a) {
+; CHECK-LABEL: @sadd_icmp_op0_neg_known(
+; CHECK-NEXT:    ret i1 true
+;
+  %b = call i8 @llvm.sadd.sat.i8(i8 -10, i8 %a)
+  %c = icmp sle i8 %b, 117
+  ret i1 %c
+}
+
+define i1 @sadd_icmp_op0_neg_unknown(i8 %a) {
+; CHECK-LABEL: @sadd_icmp_op0_neg_unknown(
+; CHECK-NEXT:    [[B:%.*]] = call i8 @llvm.sadd.sat.i8(i8 -10, i8 [[A:%.*]])
+; CHECK-NEXT:    [[C:%.*]] = icmp slt i8 [[B]], 117
+; CHECK-NEXT:    ret i1 [[C]]
+;
+  %b = call i8 @llvm.sadd.sat.i8(i8 -10, i8 %a)
+  %c = icmp slt i8 %b, 117
+  ret i1 %c
+}
+
+define i1 @sadd_icmp_op1_pos_known(i8 %a) {
+; CHECK-LABEL: @sadd_icmp_op1_pos_known(
+; CHECK-NEXT:    ret i1 true
+;
+  %b = call i8 @llvm.sadd.sat.i8(i8 %a, i8 10)
+  %c = icmp sge i8 %b, -118
+  ret i1 %c
+}
+
+define i1 @sadd_icmp_op1_pos_unknown(i8 %a) {
+; CHECK-LABEL: @sadd_icmp_op1_pos_unknown(
+; CHECK-NEXT:    [[B:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 10)
+; CHECK-NEXT:    [[C:%.*]] = icmp sgt i8 [[B]], -118
+; CHECK-NEXT:    ret i1 [[C]]
+;
+  %b = call i8 @llvm.sadd.sat.i8(i8 %a, i8 10)
+  %c = icmp sgt i8 %b, -118
+  ret i1 %c
+}
+
+define i1 @sadd_icmp_op1_neg_known(i8 %a) {
+; CHECK-LABEL: @sadd_icmp_op1_neg_known(
+; CHECK-NEXT:    ret i1 true
+;
+  %b = call i8 @llvm.sadd.sat.i8(i8 %a, i8 -10)
+  %c = icmp sle i8 %b, 117
+  ret i1 %c
+}
+
+define i1 @sadd_icmp_op1_neg_unknown(i8 %a) {
+; CHECK-LABEL: @sadd_icmp_op1_neg_unknown(
+; CHECK-NEXT:    [[B:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -10)
+; CHECK-NEXT:    [[C:%.*]] = icmp slt i8 [[B]], 117
+; CHECK-NEXT:    ret i1 [[C]]
+;
+  %b = call i8 @llvm.sadd.sat.i8(i8 %a, i8 -10)
+  %c = icmp slt i8 %b, 117
+  ret i1 %c
+}
+
+define i1 @usub_icmp_op0_known(i8 %a) {
+; CHECK-LABEL: @usub_icmp_op0_known(
+; CHECK-NEXT:    ret i1 true
+;
+  %b = call i8 @llvm.usub.sat.i8(i8 10, i8 %a)
+  %c = icmp ule i8 %b, 10
+  ret i1 %c
+}
+
+define i1 @usub_icmp_op0_unknown(i8 %a) {
+; CHECK-LABEL: @usub_icmp_op0_unknown(
+; CHECK-NEXT:    [[B:%.*]] = call i8 @llvm.usub.sat.i8(i8 10, i8 [[A:%.*]])
+; CHECK-NEXT:    [[C:%.*]] = icmp ult i8 [[B]], 10
+; CHECK-NEXT:    ret i1 [[C]]
+;
+  %b = call i8 @llvm.usub.sat.i8(i8 10, i8 %a)
+  %c = icmp ult i8 %b, 10
+  ret i1 %c
+}
+
+define i1 @usub_icmp_op1_known(i8 %a) {
+; CHECK-LABEL: @usub_icmp_op1_known(
+; CHECK-NEXT:    ret i1 true
+;
+  %b = call i8 @llvm.usub.sat.i8(i8 %a, i8 10)
+  %c = icmp ule i8 %b, 245
+  ret i1 %c
+}
+
+define i1 @usub_icmp_op1_unknown(i8 %a) {
+; CHECK-LABEL: @usub_icmp_op1_unknown(
+; CHECK-NEXT:    [[B:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 10)
+; CHECK-NEXT:    [[C:%.*]] = icmp ult i8 [[B]], -11
+; CHECK-NEXT:    ret i1 [[C]]
+;
+  %b = call i8 @llvm.usub.sat.i8(i8 %a, i8 10)
+  %c = icmp ult i8 %b, 245
+  ret i1 %c
+}
+
+define i1 @ssub_icmp_op0_pos_known(i8 %a) {
+; CHECK-LABEL: @ssub_icmp_op0_pos_known(
+; CHECK-NEXT:    ret i1 true
+;
+  %b = call i8 @llvm.ssub.sat.i8(i8 10, i8 %a)
+  %c = icmp sge i8 %b, -117
+  ret i1 %c
+}
+
+define i1 @ssub_icmp_op0_pos_unknown(i8 %a) {
+; CHECK-LABEL: @ssub_icmp_op0_pos_unknown(
+; CHECK-NEXT:    [[B:%.*]] = call i8 @llvm.ssub.sat.i8(i8 10, i8 [[A:%.*]])
+; CHECK-NEXT:    [[C:%.*]] = icmp sgt i8 [[B]], -117
+; CHECK-NEXT:    ret i1 [[C]]
+;
+  %b = call i8 @llvm.ssub.sat.i8(i8 10, i8 %a)
+  %c = icmp sgt i8 %b, -117
+  ret i1 %c
+}
+
+define i1 @ssub_icmp_op0_neg_known(i8 %a) {
+; CHECK-LABEL: @ssub_icmp_op0_neg_known(
+; CHECK-NEXT:    ret i1 true
+;
+  %b = call i8 @llvm.ssub.sat.i8(i8 -10, i8 %a)
+  %c = icmp sle i8 %b, 118
+  ret i1 %c
+}
+
+define i1 @ssub_icmp_op0_neg_unknown(i8 %a) {
+; CHECK-LABEL: @ssub_icmp_op0_neg_unknown(
+; CHECK-NEXT:    [[B:%.*]] = call i8 @llvm.ssub.sat.i8(i8 -10, i8 [[A:%.*]])
+; CHECK-NEXT:    [[C:%.*]] = icmp slt i8 [[B]], 118
+; CHECK-NEXT:    ret i1 [[C]]
+;
+  %b = call i8 @llvm.ssub.sat.i8(i8 -10, i8 %a)
+  %c = icmp slt i8 %b, 118
+  ret i1 %c
+}
+
+; Peculiar case: ssub.sat(0, x) is never signed min.
+define i1 @ssub_icmp_op0_zero(i8 %a) {
+; CHECK-LABEL: @ssub_icmp_op0_zero(
+; CHECK-NEXT:    ret i1 true
+;
+  %b = call i8 @llvm.ssub.sat.i8(i8 0, i8 %a)
+  %c = icmp ne i8 %b, -128
+  ret i1 %c
+}
+
+define i1 @ssub_icmp_op1_pos_known(i8 %a) {
+; CHECK-LABEL: @ssub_icmp_op1_pos_known(
+; CHECK-NEXT:    ret i1 true
+;
+  %b = call i8 @llvm.ssub.sat.i8(i8 %a, i8 10)
+  %c = icmp sle i8 %b, 117
+  ret i1 %c
+}
+
+define i1 @ssub_icmp_op1_pos_unknown(i8 %a) {
+; CHECK-LABEL: @ssub_icmp_op1_pos_unknown(
+; CHECK-NEXT:    [[B:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A:%.*]], i8 10)
+; CHECK-NEXT:    [[C:%.*]] = icmp slt i8 [[B]], 117
+; CHECK-NEXT:    ret i1 [[C]]
+;
+  %b = call i8 @llvm.ssub.sat.i8(i8 %a, i8 10)
+  %c = icmp slt i8 %b, 117
+  ret i1 %c
+}
+
+define i1 @ssub_icmp_op1_neg_known(i8 %a) {
+; CHECK-LABEL: @ssub_icmp_op1_neg_known(
+; CHECK-NEXT:    ret i1 true
+;
+  %b = call i8 @llvm.ssub.sat.i8(i8 %a, i8 -10)
+  %c = icmp sge i8 %b, -118
+  ret i1 %c
+}
+
+define i1 @ssub_icmp_op1_neg_unknown(i8 %a) {
+; CHECK-LABEL: @ssub_icmp_op1_neg_unknown(
+; CHECK-NEXT:    [[B:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A:%.*]], i8 -10)
+; CHECK-NEXT:    [[C:%.*]] = icmp sgt i8 [[B]], -118
+; CHECK-NEXT:    ret i1 [[C]]
+;
+  %b = call i8 @llvm.ssub.sat.i8(i8 %a, i8 -10)
+  %c = icmp sgt i8 %b, -118
+  ret i1 %c
+}
+
+define i1 @ssub_icmp_op1_smin(i8 %a) {
+; CHECK-LABEL: @ssub_icmp_op1_smin(
+; CHECK-NEXT:    ret i1 true
+;
+  %b = call i8 @llvm.ssub.sat.i8(i8 %a, i8 -128)
+  %c = icmp sge i8 %b, 0
+  ret i1 %c
+}
diff --git a/test/Transforms/JumpThreading/select.ll b/test/Transforms/JumpThreading/select.ll
index dd96e75..7557a6c 100644
--- a/test/Transforms/JumpThreading/select.ll
+++ b/test/Transforms/JumpThreading/select.ll
@@ -363,3 +363,81 @@
 ; CHECK: br i1 %cmp13.i, label %.exit, label %cond.false.15.i
 ; CHECK: br label %.exit
 }
+
+; When a select has a constant operand in one branch, and it feeds a phi node
+; and the phi node feeds a switch we unfold the select
+define void @test_func(i32* nocapture readonly %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %n) local_unnamed_addr #0 {
+entry:
+  br label %for.cond
+
+for.cond:                                         ; preds = %sw.default, %entry
+  %i.0 = phi i32 [ 0, %entry ], [ %inc, %sw.default ]
+  %cmp = icmp slt i32 %i.0, %n
+  br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %for.cond
+  ret void
+
+for.body:                                         ; preds = %for.cond
+  %0 = zext i32 %i.0 to i64
+  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %0
+  %1 = load i32, i32* %arrayidx, align 4
+  %cmp1 = icmp eq i32 %1, 4
+  br i1 %cmp1, label %land.lhs.true, label %if.end
+
+land.lhs.true:                                    ; preds = %for.body
+  %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %0
+  %2 = load i32, i32* %arrayidx3, align 4
+  %arrayidx5 = getelementptr inbounds i32, i32* %c, i64 %0
+  %3 = load i32, i32* %arrayidx5, align 4
+  %cmp6 = icmp eq i32 %2, %3
+  %spec.select = select i1 %cmp6, i32 2, i32 4
+  br label %if.end
+
+if.end:                                           ; preds = %land.lhs.true, %for.body
+  %local_var.0 = phi i32 [ %1, %for.body ], [ %spec.select, %land.lhs.true ]
+  switch i32 %local_var.0, label %sw.default [
+    i32 2, label %sw.bb
+    i32 4, label %sw.bb7
+    i32 5, label %sw.bb8
+    i32 7, label %sw.bb9
+  ]
+
+sw.bb:                                            ; preds = %if.end
+  call void @foo()
+  br label %sw.bb7
+
+sw.bb7:                                           ; preds = %if.end, %sw.bb
+  call void @bar()
+  br label %sw.bb8
+
+sw.bb8:                                           ; preds = %if.end, %sw.bb7
+  call void @baz()
+  br label %sw.bb9
+
+sw.bb9:                                           ; preds = %if.end, %sw.bb8
+  call void @quux()
+  br label %sw.default
+
+sw.default:                                       ; preds = %if.end, %sw.bb9
+  call void @baz()
+  %inc = add nuw nsw i32 %i.0, 1
+  br label %for.cond
+
+; CHECK-LABEL: @test_func(
+; CHECK: [[REG:%[0-9]+]] = load
+; CHECK-NOT: select
+; CHECK: br i1
+; CHECK-NOT: select
+; CHECK: br i1 {{.*}}, label [[DEST1:%.*]], label [[DEST2:%.*]]
+
+; The following line checks existence of a phi node, and makes sure
+; it only has one incoming value. To do this, we check every '%'. Note
+; that REG and REG2 each contain one '%;. There is another one in the
+; beginning of the incoming block name. After that there should be no other '%'.
+
+; CHECK: [[REG2:%.*]] = phi i32 {{[^%]*}}[[REG]]{{[^%]*%[^%]*}}
+; CHECK: switch i32 [[REG2]]
+; CHECK: i32 2, label [[DEST1]]
+; CHECK: i32 4, label [[DEST2]]
+}
diff --git a/test/Transforms/LCSSA/avoid-intrinsics-in-catchswitch.ll b/test/Transforms/LCSSA/avoid-intrinsics-in-catchswitch.ll
index d1c25c0..55357f1 100644
--- a/test/Transforms/LCSSA/avoid-intrinsics-in-catchswitch.ll
+++ b/test/Transforms/LCSSA/avoid-intrinsics-in-catchswitch.ll
@@ -85,7 +85,7 @@
 
 define internal i32 @"\01?filt$0@0@m@@"(i8* %exception_pointers, i8* %frame_pointer) personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) {
 entry:
-  %0 = tail call i8* @llvm.x86.seh.recoverfp(i8* bitcast (i32 ()* @"\01?m@@YAJXZ" to i8*), i8* %frame_pointer)
+  %0 = tail call i8* @llvm.eh.recoverfp(i8* bitcast (i32 ()* @"\01?m@@YAJXZ" to i8*), i8* %frame_pointer)
   %1 = tail call i8* @llvm.localrecover(i8* bitcast (i32 ()* @"\01?m@@YAJXZ" to i8*), i8* %0, i32 0)
   %2 = tail call i8* @llvm.localrecover(i8* bitcast (i32 ()* @"\01?m@@YAJXZ" to i8*), i8* %0, i32 1)
   %status = bitcast i8* %2 to i32*
@@ -112,7 +112,7 @@
   cleanupret from %9 unwind to caller
 }
 
-declare i8* @llvm.x86.seh.recoverfp(i8*, i8*)
+declare i8* @llvm.eh.recoverfp(i8*, i8*)
 declare i8* @llvm.localrecover(i8*, i8*, i32)
 declare i32 @"\01?j@@YAJVf@@JPEAUk@@PEAH@Z"(i8, i32, %struct.k*, i32*) local_unnamed_addr
 declare i32 @__C_specific_handler(...)
diff --git a/test/Transforms/LICM/argmemonly-call.ll b/test/Transforms/LICM/argmemonly-call.ll
index b9e0818..86c92a5 100644
--- a/test/Transforms/LICM/argmemonly-call.ll
+++ b/test/Transforms/LICM/argmemonly-call.ll
@@ -2,6 +2,7 @@
 ; RUN: opt -licm -basicaa -licm-n2-threshold=200 < %s -S | FileCheck %s --check-prefix=ALIAS-N2
 ; RUN: opt -aa-pipeline=basic-aa -licm-n2-threshold=0 -passes='require<aa>,require<targetir>,require<scalar-evolution>,require<opt-remark-emit>,loop(licm)' < %s -S | FileCheck %s
 ; RUN: opt -aa-pipeline=basic-aa -licm-n2-threshold=200 -passes='require<aa>,require<targetir>,require<scalar-evolution>,require<opt-remark-emit>,loop(licm)' < %s -S | FileCheck %s --check-prefix=ALIAS-N2
+; RUN: opt -S -basicaa -licm -licm-n2-threshold=0 -enable-mssa-loop-dependency=true -verify-memoryssa %s | FileCheck %s --check-prefix=ALIAS-N2
 
 declare i32 @foo() readonly argmemonly nounwind
 declare i32 @foo2() readonly nounwind
@@ -11,6 +12,9 @@
 ; CHECK-LABEL: @test
 ; CHECK: @foo
 ; CHECK-LABEL: loop:
+; ALIAS-N2-LABEL: @test
+; ALIAS-N2: @foo
+; ALIAS-N2-LABEL: loop:
   br label %loop
 
 loop:
@@ -24,6 +28,9 @@
 ; CHECK-LABEL: @test_neg
 ; CHECK-LABEL: loop:
 ; CHECK: @foo
+; ALIAS-N2-LABEL: @test_neg
+; ALIAS-N2-LABEL: loop:
+; ALIAS-N2: @foo
   br label %loop
 
 loop:
@@ -36,6 +43,9 @@
 ; CHECK-LABEL: @test2
 ; CHECK: @bar
 ; CHECK-LABEL: loop:
+; ALIAS-N2-LABEL: @test2
+; ALIAS-N2: @bar
+; ALIAS-N2-LABEL: loop:
   br label %loop
 
 loop:
@@ -49,6 +59,9 @@
 ; CHECK-LABEL: @test3
 ; CHECK-LABEL: loop:
 ; CHECK: @bar
+; ALIAS-N2-LABEL: @test3
+; ALIAS-N2-LABEL: loop:
+; ALIAS-N2: @bar
   br label %loop
 
 loop:
@@ -64,6 +77,9 @@
 ; CHECK-LABEL: @test4
 ; CHECK-LABEL: loop:
 ; CHECK: @bar
+; ALIAS-N2-LABEL: @test4
+; ALIAS-N2-LABEL: loop:
+; ALIAS-N2: @bar
   br label %loop
 
 loop:
@@ -77,6 +93,7 @@
 ; we clump foo_new with bar.
 ; With the N2 Alias analysis diagnostic tool, we are able to hoist the
 ; argmemonly bar call out of the loop.
+; Using MemorySSA we can also hoist bar.
 
 define void @test5(i32* %loc2, i32* noalias %loc) {
 ; ALIAS-N2-LABEL: @test5
@@ -103,6 +120,10 @@
 ; CHECK: %val = load i32, i32* %loc2
 ; CHECK-LABEL: loop:
 ; CHECK: @llvm.memcpy
+; ALIAS-N2-LABEL: @test6
+; ALIAS-N2: %val = load i32, i32* %loc2
+; ALIAS-N2-LABEL: loop:
+; ALIAS-N2: @llvm.memcpy
   br label %loop
 
 loop:
@@ -119,6 +140,10 @@
 ; CHECK: %val = load i32, i32* %loc2
 ; CHECK-LABEL: loop:
 ; CHECK: @custom_memcpy
+; ALIAS-N2-LABEL: @test7
+; ALIAS-N2: %val = load i32, i32* %loc2
+; ALIAS-N2-LABEL: loop:
+; ALIAS-N2: @custom_memcpy
   br label %loop
 
 loop:
diff --git a/test/Transforms/LICM/hoist-bitcast-load.ll b/test/Transforms/LICM/hoist-bitcast-load.ll
index 956c728..fcd6d08 100644
--- a/test/Transforms/LICM/hoist-bitcast-load.ll
+++ b/test/Transforms/LICM/hoist-bitcast-load.ll
@@ -1,5 +1,6 @@
 ; RUN: opt -S -basicaa -licm < %s | FileCheck %s
 ; RUN: opt -aa-pipeline=basic-aa -passes='require<opt-remark-emit>,loop(simplify-cfg,licm)' -S < %s | FileCheck %s
+; RUN: opt -S -basicaa -licm -enable-mssa-loop-dependency=true -verify-memoryssa < %s | FileCheck %s
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Transforms/LICM/hoist-debuginvariant.ll b/test/Transforms/LICM/hoist-debuginvariant.ll
index 95c4018..71aeb75 100644
--- a/test/Transforms/LICM/hoist-debuginvariant.ll
+++ b/test/Transforms/LICM/hoist-debuginvariant.ll
@@ -1,5 +1,6 @@
 ; RUN: opt < %s -licm -S | FileCheck %s
 ; RUN: opt < %s -strip-debug -licm -S | FileCheck %s
+; RUN: opt < %s -licm -enable-mssa-loop-dependency=true -verify-memoryssa -S | FileCheck %s --check-prefixes=CHECK,MSSA
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
@@ -16,7 +17,9 @@
 ; CHECK-NEXT: [[_TMP2:%.*]] = load i32, i32* @a, align 4
 ; CHECK-NEXT: [[_TMP3:%.*]] = load i32, i32* @b, align 4
 ; CHECK-NEXT: [[_TMP4:%.*]] = sdiv i32 [[_TMP2]], [[_TMP3]]
+; MSSA-NEXT: store i32 [[_TMP4:%.*]], i32* @c, align 4
 ; CHECK-NEXT: br label [[BB3:%.*]]
+
   br label %bb3
 
 bb3:                                              ; preds = %bb3, %0
diff --git a/test/Transforms/LICM/hoist-deref-load.ll b/test/Transforms/LICM/hoist-deref-load.ll
index 6c9a817..aacff88 100644
--- a/test/Transforms/LICM/hoist-deref-load.ll
+++ b/test/Transforms/LICM/hoist-deref-load.ll
@@ -1,5 +1,7 @@
 ; RUN: opt -S -basicaa -licm < %s | FileCheck %s
 ; RUN: opt -aa-pipeline=basic-aa -passes='require<opt-remark-emit>,loop(simplify-cfg,licm)' -S < %s | FileCheck %s
+; RUN: opt -S -basicaa -licm -enable-mssa-loop-dependency=true -verify-memoryssa < %s | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='require<opt-remark-emit>,loop(simplify-cfg,licm)' -enable-mssa-loop-dependency=true -verify-memoryssa -S < %s | FileCheck %s
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Transforms/LICM/hoist-fast-fdiv.ll b/test/Transforms/LICM/hoist-fast-fdiv.ll
index 57df241..bdefcf9 100644
--- a/test/Transforms/LICM/hoist-fast-fdiv.ll
+++ b/test/Transforms/LICM/hoist-fast-fdiv.ll
@@ -1,4 +1,5 @@
 ; RUN: opt -licm -S < %s | FileCheck %s
+; RUN: opt -licm -enable-mssa-loop-dependency=true -verify-memoryssa -S < %s | FileCheck %s
 
 ; Function Attrs: noinline norecurse nounwind readnone ssp uwtable
 define zeroext i1 @invariant_denom(double %v) #0 {
diff --git a/test/Transforms/LICM/hoist-invariant-load.ll b/test/Transforms/LICM/hoist-invariant-load.ll
index ed669f3..909652b 100644
--- a/test/Transforms/LICM/hoist-invariant-load.ll
+++ b/test/Transforms/LICM/hoist-invariant-load.ll
@@ -1,5 +1,6 @@
 ; REQUIRES: asserts
 ; RUN: opt < %s -licm -disable-basicaa -stats -S 2>&1 | grep "1 licm"
+; RUN: opt < %s -licm -enable-mssa-loop-dependency=true -verify-memoryssa -disable-basicaa -stats -S 2>&1 | grep "1 licm"
 
 @"\01L_OBJC_METH_VAR_NAME_" = internal global [4 x i8] c"foo\00", section "__TEXT,__objc_methname,cstring_literals", align 1
 @"\01L_OBJC_SELECTOR_REFERENCES_" = internal global i8* getelementptr inbounds ([4 x i8], [4 x i8]* @"\01L_OBJC_METH_VAR_NAME_", i32 0, i32 0), section "__DATA, __objc_selrefs, literal_pointers, no_dead_strip"
diff --git a/test/Transforms/LICM/hoist-nounwind.ll b/test/Transforms/LICM/hoist-nounwind.ll
index d53e404..a582173 100644
--- a/test/Transforms/LICM/hoist-nounwind.ll
+++ b/test/Transforms/LICM/hoist-nounwind.ll
@@ -1,5 +1,6 @@
 ; RUN: opt -S -basicaa -licm < %s | FileCheck %s
 ; RUN: opt -aa-pipeline=basic-aa -passes='require<opt-remark-emit>,loop(licm)' -S %s | FileCheck %s
+; RUN: opt -S -basicaa -licm -enable-mssa-loop-dependency=true -verify-memoryssa < %s | FileCheck %s
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
diff --git a/test/Transforms/LICM/hoist-phi.ll b/test/Transforms/LICM/hoist-phi.ll
index 26d0c01..f3a137d 100644
--- a/test/Transforms/LICM/hoist-phi.ll
+++ b/test/Transforms/LICM/hoist-phi.ll
@@ -5,6 +5,11 @@
 ; RUN: opt -passes='require<opt-remark-emit>,loop(licm)' -licm-control-flow-hoisting=1 -S < %s | FileCheck %s -check-prefixes=CHECK,CHECK-ENABLED
 ; RUN: opt -passes='require<opt-remark-emit>,loop(licm)' -licm-control-flow-hoisting=0 -S < %s | FileCheck %s -check-prefixes=CHECK,CHECK-DISABLED
 
+; RUN: opt -passes='require<opt-remark-emit>,loop(licm)' -licm-control-flow-hoisting=1 -enable-mssa-loop-dependency=true -verify-memoryssa -S < %s | FileCheck %s -check-prefixes=CHECK,CHECK-ENABLED
+; Enable run below when adding promotion. e.g. "store i32 %phi, i32* %p" is promoted to phi.lcssa.
+; opt -passes='require<opt-remark-emit>,loop(licm)' -licm-control-flow-hoisting=0 -enable-mssa-loop-dependency=true -verify-memoryssa -S < %s | FileCheck %s -check-prefixes=CHECK,CHECK-DISABLED
+
+
 ; CHECK-LABEL: @triangle_phi
 define void @triangle_phi(i32 %x, i32* %p) {
 ; CHECK-LABEL: entry:
@@ -1349,3 +1354,167 @@
 loop.backedge:
   br label %loop
 }
+
+; The order that we hoist instructions from the loop is different to the textual
+; order in the function. Check that we can rehoist this correctly.
+; CHECK-LABEL: @rehoist_wrong_order_1
+define void @rehoist_wrong_order_1(i32* %ptr) {
+; CHECK-LABEL: entry
+; CHECK-DAG: %gep2 = getelementptr inbounds i32, i32* %ptr, i64 2
+; CHECK-DAG: %gep3 = getelementptr inbounds i32, i32* %ptr, i64 3
+; CHECK-DAG: %gep1 = getelementptr inbounds i32, i32* %ptr, i64 1
+; CHECK-ENABLED: br i1 undef, label %[[IF1_LICM:.*]], label %[[ELSE1_LICM:.*]]
+entry:
+  br label %loop
+
+; CHECK-ENABLED: [[IF1_LICM]]:
+; CHECK-ENABLED: br label %[[LOOP_BACKEDGE_LICM:.*]]
+
+; CHECK-ENABLED: [[ELSE1_LICM]]:
+; CHECK-ENABLED: br label %[[LOOP_BACKEDGE_LICM]]
+
+; CHECK-ENABLED: [[LOOP_BACKEDGE_LICM]]:
+; CHECK-ENABLED: br i1 undef, label %[[IF3_LICM:.*]], label %[[END_LICM:.*]]
+
+; CHECK-ENABLED: [[IF3_LICM]]:
+; CHECK-ENABLED: br label %[[END_LICM]]
+
+; CHECK-ENABLED: [[END_LICM]]:
+; CHECK: br label %loop
+
+loop:
+  br i1 undef, label %if1, label %else1
+
+if1:
+  %gep1 = getelementptr inbounds i32, i32* %ptr, i64 1
+  store i32 0, i32* %gep1, align 4
+  br label %loop.backedge
+
+else1:
+  %gep2 = getelementptr inbounds i32, i32* %ptr, i64 2
+  store i32 0, i32* %gep2, align 4
+  br i1 undef, label %if2, label %loop.backedge
+
+if2:
+  br i1 undef, label %if3, label %end
+
+if3:
+  %gep3 = getelementptr inbounds i32, i32* %ptr, i64 3
+  store i32 0, i32* %gep3, align 4
+  br label %end
+
+end:
+  br label %loop.backedge
+
+loop.backedge:
+  br label %loop
+
+}
+
+; CHECK-LABEL: @rehoist_wrong_order_2
+define void @rehoist_wrong_order_2(i32* %ptr) {
+; CHECK-LABEL: entry
+; CHECK-DAG: %gep2 = getelementptr inbounds i32, i32* %ptr, i64 2
+; CHECK-DAG: %gep3 = getelementptr inbounds i32, i32* %gep2, i64 3
+; CHECK-DAG: %gep1 = getelementptr inbounds i32, i32* %ptr, i64 1
+; CHECK-ENABLED: br i1 undef, label %[[IF1_LICM:.*]], label %[[ELSE1_LICM:.*]]
+entry:
+  br label %loop
+
+; CHECK-ENABLED: [[IF1_LICM]]:
+; CHECK-ENABLED: br label %[[LOOP_BACKEDGE_LICM:.*]]
+
+; CHECK-ENABLED: [[ELSE1_LICM]]:
+; CHECK-ENABLED: br label %[[LOOP_BACKEDGE_LICM]]
+
+; CHECK-ENABLED: [[LOOP_BACKEDGE_LICM]]:
+; CHECK-ENABLED: br i1 undef, label %[[IF3_LICM:.*]], label %[[END_LICM:.*]]
+
+; CHECK-ENABLED: [[IF3_LICM]]:
+; CHECK-ENABLED: br label %[[END_LICM]]
+
+; CHECK-ENABLED: [[END_LICM]]:
+; CHECK: br label %loop
+
+loop:
+  br i1 undef, label %if1, label %else1
+
+if1:
+  %gep1 = getelementptr inbounds i32, i32* %ptr, i64 1
+  store i32 0, i32* %gep1, align 4
+  br label %loop.backedge
+
+else1:
+  %gep2 = getelementptr inbounds i32, i32* %ptr, i64 2
+  store i32 0, i32* %gep2, align 4
+  br i1 undef, label %if2, label %loop.backedge
+
+if2:
+  br i1 undef, label %if3, label %end
+
+if3:
+  %gep3 = getelementptr inbounds i32, i32* %gep2, i64 3
+  store i32 0, i32* %gep3, align 4
+  br label %end
+
+end:
+  br label %loop.backedge
+
+loop.backedge:
+  br label %loop
+}
+
+; CHECK-LABEL: @rehoist_wrong_order_3
+define void @rehoist_wrong_order_3(i32* %ptr) {
+; CHECK-LABEL: entry
+; CHECK-DAG: %gep2 = getelementptr inbounds i32, i32* %ptr, i64 2
+; CHECK-DAG: %gep1 = getelementptr inbounds i32, i32* %ptr, i64 1
+; CHECK-ENABLED: br i1 undef, label %[[IF1_LICM:.*]], label %[[ELSE1_LICM:.*]]
+entry:
+  br label %loop
+
+; CHECK-ENABLED: [[IF1_LICM]]:
+; CHECK-ENABLED: br label %[[IF2_LICM:.*]]
+
+; CHECK-ENABLED: [[ELSE1_LICM]]:
+; CHECK-ENABLED: br label %[[IF2_LICM]]
+
+; CHECK-ENABLED: [[IF2_LICM]]:
+; CHECK-ENABLED: %phi = phi i32* [ %gep1, %[[IF1_LICM]] ], [ %gep2, %[[ELSE1_LICM]] ]
+; CHECK-ENABLED: %gep3 = getelementptr inbounds i32, i32* %phi, i64 3
+; CHECK-ENABLED: br i1 undef, label %[[IF3_LICM:.*]], label %[[END_LICM:.*]]
+
+; CHECK-ENABLED: [[IF3_LICM]]:
+; CHECK-ENABLED: br label %[[END_LICM]]
+
+; CHECK-ENABLED: [[END_LICM]]:
+; CHECK: br label %loop
+
+loop:
+  br i1 undef, label %if1, label %else1
+
+if1:
+  %gep1 = getelementptr inbounds i32, i32* %ptr, i64 1
+  store i32 0, i32* %gep1, align 4
+  br label %if2
+
+else1:
+  %gep2 = getelementptr inbounds i32, i32* %ptr, i64 2
+  store i32 0, i32* %gep2, align 4
+  br i1 undef, label %if2, label %loop.backedge
+
+if2:
+  %phi = phi i32* [ %gep1, %if1 ], [ %gep2, %else1 ]
+  br i1 undef, label %if3, label %end
+
+if3:
+  %gep3 = getelementptr inbounds i32, i32* %phi, i64 3
+  store i32 0, i32* %gep3, align 4
+  br label %end
+
+end:
+  br label %loop.backedge
+
+loop.backedge:
+  br label %loop
+}
diff --git a/test/Transforms/LICM/hoist-round.ll b/test/Transforms/LICM/hoist-round.ll
index 35851f3..10f75be 100644
--- a/test/Transforms/LICM/hoist-round.ll
+++ b/test/Transforms/LICM/hoist-round.ll
@@ -1,5 +1,6 @@
 ; RUN: opt -S -licm < %s | FileCheck %s
 ; RUN: opt -aa-pipeline=basic-aa -passes='require<aa>,require<targetir>,require<scalar-evolution>,require<opt-remark-emit>,loop(licm)' -S %s | FileCheck %s
+; RUN: opt -S -licm -enable-mssa-loop-dependency=true -verify-memoryssa < %s | FileCheck %s
 
 target datalayout = "E-m:e-p:32:32-i8:8:8-i16:16:16-i64:32:32-f64:32:32-v64:32:32-v128:32:32-a0:0:32-n32"
 
diff --git a/test/Transforms/LICM/hoisting.ll b/test/Transforms/LICM/hoisting.ll
index 4572ad3..f65b965 100644
--- a/test/Transforms/LICM/hoisting.ll
+++ b/test/Transforms/LICM/hoisting.ll
@@ -1,5 +1,6 @@
 ; RUN: opt < %s -licm -S | FileCheck %s
 ; RUN: opt < %s -aa-pipeline=basic-aa -passes='require<opt-remark-emit>,loop(licm)' -S | FileCheck %s
+; RUN: opt < %s -licm -enable-mssa-loop-dependency=true -verify-memoryssa -S | FileCheck %s
 
 @X = global i32 0		; <i32*> [#uses=1]
 
diff --git a/test/Transforms/LICM/sink-promote.ll b/test/Transforms/LICM/sink-promote.ll
new file mode 100644
index 0000000..45024c4
--- /dev/null
+++ b/test/Transforms/LICM/sink-promote.ll
@@ -0,0 +1,50 @@
+; RUN: opt < %s -basicaa -licm -S | FileCheck %s
+
+; Test moved from sinking.ll, as it tests sinking of a store who alone touches
+; a memory location in a loop.
+; Store can be sunk out of exit block containing indirectbr instructions after
+; D50925. Updated to use an argument instead of undef, due to PR38989.
+define void @test12(i32* %ptr) {
+; CHECK-LABEL: @test12
+; CHECK: store
+; CHECK-NEXT: br label %lab4
+  br label %lab4
+
+lab4:
+  br label %lab20
+
+lab5:
+  br label %lab20
+
+lab6:
+  br label %lab4
+
+lab7:
+  br i1 undef, label %lab8, label %lab13
+
+lab8:
+  br i1 undef, label %lab13, label %lab10
+
+lab10:
+  br label %lab7
+
+lab13:
+  ret void
+
+lab20:
+  br label %lab21
+
+lab21:
+; CHECK: lab21:
+; CHECK-NOT: store
+; CHECK: br i1 false, label %lab21, label %lab22
+  store i32 36127957, i32* %ptr, align 4
+  br i1 undef, label %lab21, label %lab22
+
+lab22:
+; CHECK: lab22:
+; CHECK-NOT: store
+; CHECK-NEXT: indirectbr i8* undef
+  indirectbr i8* undef, [label %lab5, label %lab6, label %lab7]
+}
+
diff --git a/test/Transforms/LICM/sink.ll b/test/Transforms/LICM/sink.ll
index 70fa6fa..17170f5 100644
--- a/test/Transforms/LICM/sink.ll
+++ b/test/Transforms/LICM/sink.ll
@@ -2,6 +2,7 @@
 ; RUN: opt -S -licm < %s | opt -S -loop-sink | FileCheck %s --check-prefix=CHECK-SINK
 ; RUN: opt -S < %s -passes='require<opt-remark-emit>,loop(licm),loop-sink' \
 ; RUN:     | FileCheck %s --check-prefix=CHECK-SINK
+; RUN: opt -S -licm -enable-mssa-loop-dependency=true -verify-memoryssa < %s | FileCheck %s --check-prefix=CHECK-LICM
 
 ; Original source code:
 ; int g;
diff --git a/test/Transforms/LICM/sinking.ll b/test/Transforms/LICM/sinking.ll
index 3bcbc5d..cc30494 100644
--- a/test/Transforms/LICM/sinking.ll
+++ b/test/Transforms/LICM/sinking.ll
@@ -1,5 +1,7 @@
 ; RUN: opt < %s -basicaa -licm -S | FileCheck %s
 ; RUN: opt < %s -debugify -basicaa -licm -S | FileCheck %s -check-prefix=DEBUGIFY
+; RUN: opt < %s -basicaa -licm -S -enable-mssa-loop-dependency=true -verify-memoryssa | FileCheck %s
+
 
 declare i32 @strlen(i8*) readonly nounwind
 
@@ -358,50 +360,7 @@
   ret i32 %lcssa
 }
 
-; Can't sink stores out of exit blocks containing indirectbr instructions
-; because loop simplify does not create dedicated exits for such blocks. Test
-; that by sinking the store from lab21 to lab22, but not further.
-define void @test12() {
-; CHECK-LABEL: @test12
-  br label %lab4
-
-lab4:
-  br label %lab20
-
-lab5:
-  br label %lab20
-
-lab6:
-  br label %lab4
-
-lab7:
-  br i1 undef, label %lab8, label %lab13
-
-lab8:
-  br i1 undef, label %lab13, label %lab10
-
-lab10:
-  br label %lab7
-
-lab13:
-  ret void
-
-lab20:
-  br label %lab21
-
-lab21:
-; CHECK: lab21:
-; CHECK-NOT: store
-; CHECK: br i1 false, label %lab21, label %lab22
-  store i32 36127957, i32* undef, align 4
-  br i1 undef, label %lab21, label %lab22
-
-lab22:
-; CHECK: lab22:
-; CHECK: store
-; CHECK-NEXT: indirectbr i8* undef
-  indirectbr i8* undef, [label %lab5, label %lab6, label %lab7]
-}
+; @test12 moved to sink-promote.ll, as it tests sinking and promotion.
 
 ; Test that we don't crash when trying to sink stores and there's no preheader
 ; available (which is used for creating loads that may be used by the SSA
diff --git a/test/Transforms/LICM/volatile-alias.ll b/test/Transforms/LICM/volatile-alias.ll
index f387012..c1f727c 100644
--- a/test/Transforms/LICM/volatile-alias.ll
+++ b/test/Transforms/LICM/volatile-alias.ll
@@ -1,5 +1,6 @@
 ; RUN: opt -basicaa -sroa -loop-rotate -licm -S < %s | FileCheck %s
 ; RUN: opt -basicaa -sroa -loop-rotate %s | opt -aa-pipeline=basic-aa -passes='require<aa>,require<targetir>,require<scalar-evolution>,require<opt-remark-emit>,loop(licm)' -S | FileCheck %s
+; RUN: opt -basicaa -sroa -loop-rotate -licm -enable-mssa-loop-dependency=true -verify-memoryssa -S < %s | FileCheck %s
 ; The objects *p and *q are aliased to each other, but even though *q is
 ; volatile, *p can be considered invariant in the loop. Check if it is moved
 ; out of the loop.
diff --git a/test/Transforms/LoopDeletion/crashbc.ll b/test/Transforms/LoopDeletion/crashbc.ll
index 056c61f..f385a29 100644
--- a/test/Transforms/LoopDeletion/crashbc.ll
+++ b/test/Transforms/LoopDeletion/crashbc.ll
@@ -1,5 +1,5 @@
 ; Make sure we don't crash when writing bitcode.
-; RUN: opt -loop-deletion -o /dev/null
+; RUN: opt < %s -loop-deletion -o /dev/null
 
 define void @f() {
   br label %bb1
diff --git a/test/Transforms/LoopIdiom/X86/cttz.ll b/test/Transforms/LoopIdiom/X86/cttz.ll
new file mode 100644
index 0000000..e18feca
--- /dev/null
+++ b/test/Transforms/LoopIdiom/X86/cttz.ll
@@ -0,0 +1,82 @@
+; RUN: opt -loop-idiom -mtriple=x86_64 -mcpu=core-avx2 < %s -S | FileCheck --check-prefix=ALL %s
+; RUN: opt -loop-idiom -mtriple=x86_64 -mcpu=corei7 < %s -S | FileCheck --check-prefix=ALL %s
+
+; Recognize CTTZ builtin pattern.
+; Here it will replace the loop -
+; assume builtin is always profitable.
+;
+; int cttz_zero_check(int n)
+; {
+;   int i = 0;
+;   while(n) {
+;     n <<= 1;
+;     i++;
+;   }
+;   return i;
+; }
+;
+; ALL-LABEL: @cttz_zero_check
+; ALL:       %0 = call i32 @llvm.cttz.i32(i32 %n, i1 true)
+; ALL-NEXT:  %1 = sub i32 32, %0
+;
+; Function Attrs: norecurse nounwind readnone uwtable
+define i32 @cttz_zero_check(i32 %n) {
+entry:
+  %tobool4 = icmp eq i32 %n, 0
+  br i1 %tobool4, label %while.end, label %while.body.preheader
+
+while.body.preheader:                             ; preds = %entry
+  br label %while.body
+
+while.body:                                       ; preds = %while.body.preheader, %while.body
+  %i.06 = phi i32 [ %inc, %while.body ], [ 0, %while.body.preheader ]
+  %n.addr.05 = phi i32 [ %shl, %while.body ], [ %n, %while.body.preheader ]
+  %shl = shl i32 %n.addr.05, 1
+  %inc = add nsw i32 %i.06, 1
+  %tobool = icmp eq i32 %shl, 0
+  br i1 %tobool, label %while.end.loopexit, label %while.body
+
+while.end.loopexit:                               ; preds = %while.body
+  br label %while.end
+
+while.end:                                        ; preds = %while.end.loopexit, %entry
+  %i.0.lcssa = phi i32 [ 0, %entry ], [ %inc, %while.end.loopexit ]
+  ret i32 %i.0.lcssa
+}
+
+; Recognize CTTZ builtin pattern.
+; Here it will replace the loop -
+; assume builtin is always profitable.
+;
+; int cttz(int n)
+; {
+;   int i = 0;
+;   while(n <<= 1) {
+;     i++;
+;   }
+;   return i;
+; }
+;
+; ALL-LABEL: @cttz
+; ALL:      %0 = shl i32 %n, 1
+; ALL-NEXT: %1 = call i32 @llvm.cttz.i32(i32 %0, i1 false)
+; ALL-NEXT: %2 = sub i32 32, %1
+; ALL-NEXT: %3 = add i32 %2, 1
+;
+; Function Attrs: norecurse nounwind readnone uwtable
+define i32 @cttz(i32 %n) {
+entry:
+  br label %while.cond
+
+while.cond:                                       ; preds = %while.cond, %entry
+  %n.addr.0 = phi i32 [ %n, %entry ], [ %shl, %while.cond ]
+  %i.0 = phi i32 [ 0, %entry ], [ %inc, %while.cond ]
+  %shl = shl i32 %n.addr.0, 1
+  %tobool = icmp eq i32 %shl, 0
+  %inc = add nsw i32 %i.0, 1
+  br i1 %tobool, label %while.end, label %while.cond
+
+while.end:                                        ; preds = %while.cond
+  ret i32 %i.0
+}
+
diff --git a/test/Transforms/LoopReroll/reroll_with_dbg.ll b/test/Transforms/LoopReroll/reroll_with_dbg.ll
index 9351846..3a752b2 100644
--- a/test/Transforms/LoopReroll/reroll_with_dbg.ll
+++ b/test/Transforms/LoopReroll/reroll_with_dbg.ll
@@ -85,7 +85,7 @@
 !llvm.module.flags = !{!17, !18, !19, !20}
 !llvm.ident = !{!21}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.8.0 (http://llvm.org/git/clang.git b1fbc23058e7fa1cdd954ab97ba84f1c549c9879) (http://llvm.org/git/llvm.git 054da58c5398a721d4dab7af63d7de8d7a1e1a1c)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.8.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "test.c", directory: "/home/weimingz/llvm-build/release/community-tip")
 !2 = !{}
 !4 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !5, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !11)
@@ -105,7 +105,7 @@
 !18 = !{i32 2, !"Debug Info Version", i32 3}
 !19 = !{i32 1, !"wchar_size", i32 4}
 !20 = !{i32 1, !"min_enum_size", i32 4}
-!21 = !{!"clang version 3.8.0 (http://llvm.org/git/clang.git b1fbc23058e7fa1cdd954ab97ba84f1c549c9879) (http://llvm.org/git/llvm.git 054da58c5398a721d4dab7af63d7de8d7a1e1a1c)"}
+!21 = !{!"clang version 3.8.0"}
 !22 = !DIExpression()
 !23 = !DILocation(line: 1, column: 27, scope: !4)
 !24 = !DILocation(line: 1, column: 47, scope: !4)
diff --git a/test/Transforms/LoopRotate/phi-dbgvalue.ll b/test/Transforms/LoopRotate/phi-dbgvalue.ll
index 1f7e129..c4e13dc 100644
--- a/test/Transforms/LoopRotate/phi-dbgvalue.ll
+++ b/test/Transforms/LoopRotate/phi-dbgvalue.ll
@@ -48,12 +48,12 @@
 !llvm.module.flags = !{!3, !4}
 !llvm.ident = !{!5}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 0f3ed908c1f13f83da4b240f7595eb8d05e0a754) (http://llvm.org/git/llvm.git 8e270f5a6b8ceb0f3ac3ef1ffb83c5e29b44ae68)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "debug-phi.c", directory: "/work/projects/src/tests/debug")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
-!5 = !{!"clang version 5.0.0 (http://llvm.org/git/clang.git 0f3ed908c1f13f83da4b240f7595eb8d05e0a754) (http://llvm.org/git/llvm.git 8e270f5a6b8ceb0f3ac3ef1ffb83c5e29b44ae68)"}
+!5 = !{!"clang version 5.0.0"}
 !6 = distinct !DISubprogram(name: "func", scope: !1, file: !1, line: 2, type: !7, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: false, unit: !0, retainedNodes: !2)
 !7 = !DISubroutineType(types: !8)
 !8 = !{null, !9}
diff --git a/test/Transforms/LoopSimplifyCFG/constant-fold-branch.ll b/test/Transforms/LoopSimplifyCFG/constant-fold-branch.ll
index 7998aa3..db5f0f1 100644
--- a/test/Transforms/LoopSimplifyCFG/constant-fold-branch.ll
+++ b/test/Transforms/LoopSimplifyCFG/constant-fold-branch.ll
@@ -1,4 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; XFAIL: *
+; Tests complex_dead_subloop_branch and complex_dead_subloop_switch fail an
+; assertion, therefore the CFG simplification is temporarily disabled.
 ; REQUIRES: asserts
 ; RUN: opt -S -enable-loop-simplifycfg-term-folding=true -loop-simplifycfg -debug-only=loop-simplifycfg -verify-loop-info -verify-dom-info -verify-loop-lcssa 2>&1 < %s | FileCheck %s
 ; RUN: opt -S -enable-loop-simplifycfg-term-folding=true -passes='require<domtree>,loop(simplify-cfg)' -debug-only=loop-simplifycfg -verify-loop-info -verify-dom-info -verify-loop-lcssa 2>&1 < %s | FileCheck %s
@@ -88,18 +91,12 @@
 ; CHECK-NEXT:  preheader:
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
-; CHECK-NEXT:    br i1 true, label [[BACKEDGE]], label [[DEAD:%.*]]
-; CHECK:       dead:
-; CHECK-NEXT:    [[I_2:%.*]] = add i32 [[I]], 1
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       backedge:
-; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ [[I]], [[HEADER]] ], [ [[I_2]], [[DEAD]] ]
-; CHECK-NEXT:    [[I_INC]] = add i32 [[I_1]], 1
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[HEADER]] ]
+; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I_INC]], [[END:%.*]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[HEADER]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
+; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[HEADER]] ]
 ; CHECK-NEXT:    ret i32 [[I_INC_LCSSA]]
 ;
 preheader:
@@ -129,22 +126,12 @@
 ; CHECK-NEXT:  preheader:
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
-; CHECK-NEXT:    switch i32 1, label [[DEAD:%.*]] [
-; CHECK-NEXT:    i32 0, label [[DEAD]]
-; CHECK-NEXT:    i32 1, label [[BACKEDGE]]
-; CHECK-NEXT:    i32 2, label [[DEAD]]
-; CHECK-NEXT:    ]
-; CHECK:       dead:
-; CHECK-NEXT:    [[I_2:%.*]] = add i32 [[I]], 1
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       backedge:
-; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ [[I]], [[HEADER]] ], [ [[I_2]], [[DEAD]] ]
-; CHECK-NEXT:    [[I_INC]] = add i32 [[I_1]], 1
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[HEADER]] ]
+; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I_INC]], [[END:%.*]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[HEADER]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
+; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[HEADER]] ]
 ; CHECK-NEXT:    ret i32 [[I_INC_LCSSA]]
 ;
 preheader:
@@ -175,18 +162,12 @@
 ; CHECK-NEXT:  preheader:
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
-; CHECK-NEXT:    br i1 true, label [[BACKEDGE]], label [[DEAD:%.*]]
-; CHECK:       dead:
-; CHECK-NEXT:    [[I_2:%.*]] = add i32 [[I]], 1
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       backedge:
-; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ [[I]], [[HEADER]] ], [ [[I_2]], [[DEAD]] ]
-; CHECK-NEXT:    [[I_INC]] = add i32 [[I_1]], 1
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[HEADER]] ]
+; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I_INC]], [[END:%.*]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[HEADER]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
+; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[HEADER]] ]
 ; CHECK-NEXT:    ret i32 [[I_INC_LCSSA]]
 ;
 preheader:
@@ -219,22 +200,12 @@
 ; CHECK-NEXT:  preheader:
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
-; CHECK-NEXT:    switch i32 1, label [[DEAD:%.*]] [
-; CHECK-NEXT:    i32 0, label [[DEAD]]
-; CHECK-NEXT:    i32 1, label [[BACKEDGE]]
-; CHECK-NEXT:    i32 2, label [[DEAD]]
-; CHECK-NEXT:    ]
-; CHECK:       dead:
-; CHECK-NEXT:    [[I_2:%.*]] = add i32 [[I]], 1
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       backedge:
-; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ [[I]], [[HEADER]] ], [ [[I_2]], [[DEAD]] ]
-; CHECK-NEXT:    [[I_INC]] = add i32 [[I_1]], 1
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[HEADER]] ]
+; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I_INC]], [[END:%.*]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[HEADER]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
+; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[HEADER]] ]
 ; CHECK-NEXT:    ret i32 [[I_INC_LCSSA]]
 ;
 preheader:
@@ -268,24 +239,25 @@
 define i32 @dead_exit_test_branch_loop(i32 %end) {
 ; CHECK-LABEL: @dead_exit_test_branch_loop(
 ; CHECK-NEXT:  preheader:
+; CHECK-NEXT:    switch i32 0, label [[PREHEADER_SPLIT:%.*]] [
+; CHECK-NEXT:    i32 1, label [[DEAD:%.*]]
+; CHECK-NEXT:    ]
+; CHECK:       preheader-split:
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
-; CHECK-NEXT:    br i1 true, label [[BACKEDGE]], label [[DEAD:%.*]]
-; CHECK:       dead:
-; CHECK-NEXT:    [[I_LCSSA:%.*]] = phi i32 [ [[I]], [[HEADER]] ]
-; CHECK-NEXT:    br label [[DUMMY:%.*]]
-; CHECK:       dummy:
-; CHECK-NEXT:    br label [[EXIT:%.*]]
-; CHECK:       backedge:
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER_SPLIT]] ], [ [[I_INC:%.*]], [[HEADER]] ]
 ; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I_INC]], [[END:%.*]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[HEADER]], label [[EXIT_LOOPEXIT:%.*]]
+; CHECK:       dead:
+; CHECK-NEXT:    br label [[DUMMY:%.*]]
+; CHECK:       dummy:
+; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit.loopexit:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
+; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[HEADER]] ]
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ [[I_LCSSA]], [[DUMMY]] ], [ [[I_INC_LCSSA]], [[EXIT_LOOPEXIT]] ]
+; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ undef, [[DUMMY]] ], [ [[I_INC_LCSSA]], [[EXIT_LOOPEXIT]] ]
 ; CHECK-NEXT:    ret i32 [[I_1]]
 ;
 preheader:
@@ -316,28 +288,27 @@
 define i32 @dead_exit_test_switch_loop(i32 %end) {
 ; CHECK-LABEL: @dead_exit_test_switch_loop(
 ; CHECK-NEXT:  preheader:
+; CHECK-NEXT:    switch i32 0, label [[PREHEADER_SPLIT:%.*]] [
+; CHECK-NEXT:    i32 1, label [[DEAD:%.*]]
+; CHECK-NEXT:    i32 2, label [[DEAD]]
+; CHECK-NEXT:    i32 3, label [[DEAD]]
+; CHECK-NEXT:    ]
+; CHECK:       preheader-split:
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
-; CHECK-NEXT:    switch i32 1, label [[DEAD:%.*]] [
-; CHECK-NEXT:    i32 0, label [[DEAD]]
-; CHECK-NEXT:    i32 1, label [[BACKEDGE]]
-; CHECK-NEXT:    i32 2, label [[DEAD]]
-; CHECK-NEXT:    ]
-; CHECK:       dead:
-; CHECK-NEXT:    [[I_LCSSA:%.*]] = phi i32 [ [[I]], [[HEADER]] ], [ [[I]], [[HEADER]] ], [ [[I]], [[HEADER]] ]
-; CHECK-NEXT:    br label [[DUMMY:%.*]]
-; CHECK:       dummy:
-; CHECK-NEXT:    br label [[EXIT:%.*]]
-; CHECK:       backedge:
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER_SPLIT]] ], [ [[I_INC:%.*]], [[HEADER]] ]
 ; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I_INC]], [[END:%.*]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[HEADER]], label [[EXIT_LOOPEXIT:%.*]]
+; CHECK:       dead:
+; CHECK-NEXT:    br label [[DUMMY:%.*]]
+; CHECK:       dummy:
+; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit.loopexit:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
+; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[HEADER]] ]
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ [[I_LCSSA]], [[DUMMY]] ], [ [[I_INC_LCSSA]], [[EXIT_LOOPEXIT]] ]
+; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ undef, [[DUMMY]] ], [ [[I_INC_LCSSA]], [[EXIT_LOOPEXIT]] ]
 ; CHECK-NEXT:    ret i32 [[I_1]]
 ;
 preheader:
@@ -463,32 +434,19 @@
 ; CHECK-NEXT:  preheader:
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
-; CHECK-NEXT:    br i1 true, label [[LIVE_PREHEADER:%.*]], label [[DEAD_PREHEADER:%.*]]
-; CHECK:       live_preheader:
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[EXIT_A:%.*]] ]
 ; CHECK-NEXT:    br label [[LIVE_LOOP:%.*]]
 ; CHECK:       live_loop:
-; CHECK-NEXT:    [[A:%.*]] = phi i32 [ 0, [[LIVE_PREHEADER]] ], [ [[A_INC:%.*]], [[LIVE_LOOP]] ]
+; CHECK-NEXT:    [[A:%.*]] = phi i32 [ 0, [[HEADER]] ], [ [[A_INC:%.*]], [[LIVE_LOOP]] ]
 ; CHECK-NEXT:    [[A_INC]] = add i32 [[A]], 1
 ; CHECK-NEXT:    [[CMP_A:%.*]] = icmp slt i32 [[A_INC]], [[END:%.*]]
-; CHECK-NEXT:    br i1 [[CMP_A]], label [[LIVE_LOOP]], label [[EXIT_A:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_A]], label [[LIVE_LOOP]], label [[EXIT_A]]
 ; CHECK:       exit.a:
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       dead_preheader:
-; CHECK-NEXT:    br label [[DEAD_LOOP:%.*]]
-; CHECK:       dead_loop:
-; CHECK-NEXT:    [[B:%.*]] = phi i32 [ 0, [[DEAD_PREHEADER]] ], [ [[B_INC:%.*]], [[DEAD_LOOP]] ]
-; CHECK-NEXT:    [[B_INC]] = add i32 [[B]], 1
-; CHECK-NEXT:    [[CMP_B:%.*]] = icmp slt i32 [[B_INC]], [[END]]
-; CHECK-NEXT:    br i1 [[CMP_B]], label [[DEAD_LOOP]], label [[EXIT_B:%.*]]
-; CHECK:       exit.b:
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       backedge:
 ; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I_INC]], [[END]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[HEADER]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
+; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[EXIT_A]] ]
 ; CHECK-NEXT:    ret i32 [[I_INC_LCSSA]]
 ;
 preheader:
@@ -536,36 +494,19 @@
 ; CHECK-NEXT:  preheader:
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
-; CHECK-NEXT:    switch i32 1, label [[DEAD_PREHEADER:%.*]] [
-; CHECK-NEXT:    i32 0, label [[DEAD_PREHEADER]]
-; CHECK-NEXT:    i32 1, label [[LIVE_PREHEADER:%.*]]
-; CHECK-NEXT:    i32 2, label [[DEAD_PREHEADER]]
-; CHECK-NEXT:    ]
-; CHECK:       live_preheader:
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[EXIT_A:%.*]] ]
 ; CHECK-NEXT:    br label [[LIVE_LOOP:%.*]]
 ; CHECK:       live_loop:
-; CHECK-NEXT:    [[A:%.*]] = phi i32 [ 0, [[LIVE_PREHEADER]] ], [ [[A_INC:%.*]], [[LIVE_LOOP]] ]
+; CHECK-NEXT:    [[A:%.*]] = phi i32 [ 0, [[HEADER]] ], [ [[A_INC:%.*]], [[LIVE_LOOP]] ]
 ; CHECK-NEXT:    [[A_INC]] = add i32 [[A]], 1
 ; CHECK-NEXT:    [[CMP_A:%.*]] = icmp slt i32 [[A_INC]], [[END:%.*]]
-; CHECK-NEXT:    br i1 [[CMP_A]], label [[LIVE_LOOP]], label [[EXIT_A:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_A]], label [[LIVE_LOOP]], label [[EXIT_A]]
 ; CHECK:       exit.a:
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       dead_preheader:
-; CHECK-NEXT:    br label [[DEAD_LOOP:%.*]]
-; CHECK:       dead_loop:
-; CHECK-NEXT:    [[B:%.*]] = phi i32 [ 0, [[DEAD_PREHEADER]] ], [ [[B_INC:%.*]], [[DEAD_LOOP]] ]
-; CHECK-NEXT:    [[B_INC]] = add i32 [[B]], 1
-; CHECK-NEXT:    [[CMP_B:%.*]] = icmp slt i32 [[B_INC]], [[END]]
-; CHECK-NEXT:    br i1 [[CMP_B]], label [[DEAD_LOOP]], label [[EXIT_B:%.*]]
-; CHECK:       exit.b:
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       backedge:
 ; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I_INC]], [[END]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[HEADER]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
+; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[EXIT_A]] ]
 ; CHECK-NEXT:    ret i32 [[I_INC_LCSSA]]
 ;
 preheader:
@@ -615,21 +556,18 @@
 define i32 @inf_loop_test_branch_loop(i32 %end) {
 ; CHECK-LABEL: @inf_loop_test_branch_loop(
 ; CHECK-NEXT:  preheader:
+; CHECK-NEXT:    switch i32 0, label [[PREHEADER_SPLIT:%.*]] [
+; CHECK-NEXT:    i32 1, label [[EXIT:%.*]]
+; CHECK-NEXT:    ]
+; CHECK:       preheader-split:
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
-; CHECK-NEXT:    br i1 true, label [[BACKEDGE]], label [[DEAD:%.*]]
-; CHECK:       dead:
-; CHECK-NEXT:    [[I_2:%.*]] = add i32 [[I]], 1
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       backedge:
-; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ [[I]], [[HEADER]] ], [ [[I_2]], [[DEAD]] ]
-; CHECK-NEXT:    [[I_INC]] = add i32 [[I_1]], 1
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER_SPLIT]] ], [ [[I_INC:%.*]], [[HEADER]] ]
+; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I_INC]], [[END:%.*]]
-; CHECK-NEXT:    br i1 true, label [[HEADER]], label [[EXIT:%.*]]
+; CHECK-NEXT:    br label [[HEADER]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
-; CHECK-NEXT:    ret i32 [[I_INC_LCSSA]]
+; CHECK-NEXT:    ret i32 undef
 ;
 preheader:
   br label %header
@@ -658,25 +596,18 @@
 define i32 @inf_loop_test_switch_loop(i32 %end) {
 ; CHECK-LABEL: @inf_loop_test_switch_loop(
 ; CHECK-NEXT:  preheader:
+; CHECK-NEXT:    switch i32 0, label [[PREHEADER_SPLIT:%.*]] [
+; CHECK-NEXT:    i32 1, label [[EXIT:%.*]]
+; CHECK-NEXT:    ]
+; CHECK:       preheader-split:
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER:%.*]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
-; CHECK-NEXT:    switch i32 1, label [[DEAD:%.*]] [
-; CHECK-NEXT:    i32 0, label [[DEAD]]
-; CHECK-NEXT:    i32 1, label [[BACKEDGE]]
-; CHECK-NEXT:    i32 2, label [[DEAD]]
-; CHECK-NEXT:    ]
-; CHECK:       dead:
-; CHECK-NEXT:    [[I_2:%.*]] = add i32 [[I]], 1
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       backedge:
-; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ [[I]], [[HEADER]] ], [ [[I_2]], [[DEAD]] ]
-; CHECK-NEXT:    [[I_INC]] = add i32 [[I_1]], 1
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER_SPLIT]] ], [ [[I_INC:%.*]], [[HEADER]] ]
+; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I_INC]], [[END:%.*]]
-; CHECK-NEXT:    br i1 true, label [[HEADER]], label [[EXIT:%.*]]
+; CHECK-NEXT:    br label [[HEADER]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
-; CHECK-NEXT:    ret i32 [[I_INC_LCSSA]]
+; CHECK-NEXT:    ret i32 undef
 ;
 preheader:
   br label %header
@@ -898,18 +829,12 @@
 ; CHECK-NEXT:    [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[J_INC:%.*]], [[OUTER_BACKEDGE:%.*]] ]
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[OUTER_HEADER]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
-; CHECK-NEXT:    br i1 true, label [[BACKEDGE]], label [[DEAD:%.*]]
-; CHECK:       dead:
-; CHECK-NEXT:    [[I_2:%.*]] = add i32 [[I]], 1
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       backedge:
-; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ [[I]], [[HEADER]] ], [ [[I_2]], [[DEAD]] ]
-; CHECK-NEXT:    [[I_INC]] = add i32 [[I_1]], 1
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[OUTER_HEADER]] ], [ [[I_INC:%.*]], [[HEADER]] ]
+; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I_INC]], [[END:%.*]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[HEADER]], label [[OUTER_BACKEDGE]]
 ; CHECK:       outer_backedge:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
+; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[HEADER]] ]
 ; CHECK-NEXT:    [[J_INC]] = add i32 [[J]], 1
 ; CHECK-NEXT:    [[CMP_J:%.*]] = icmp slt i32 [[J_INC]], [[END]]
 ; CHECK-NEXT:    br i1 [[CMP_J]], label [[OUTER_HEADER]], label [[EXIT:%.*]]
@@ -958,22 +883,12 @@
 ; CHECK-NEXT:    [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[J_INC:%.*]], [[OUTER_BACKEDGE:%.*]] ]
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[OUTER_HEADER]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
-; CHECK-NEXT:    switch i32 1, label [[DEAD:%.*]] [
-; CHECK-NEXT:    i32 0, label [[DEAD]]
-; CHECK-NEXT:    i32 1, label [[BACKEDGE]]
-; CHECK-NEXT:    i32 2, label [[DEAD]]
-; CHECK-NEXT:    ]
-; CHECK:       dead:
-; CHECK-NEXT:    [[I_2:%.*]] = add i32 [[I]], 1
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       backedge:
-; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ [[I]], [[HEADER]] ], [ [[I_2]], [[DEAD]] ]
-; CHECK-NEXT:    [[I_INC]] = add i32 [[I_1]], 1
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[OUTER_HEADER]] ], [ [[I_INC:%.*]], [[HEADER]] ]
+; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I_INC]], [[END:%.*]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[HEADER]], label [[OUTER_BACKEDGE]]
 ; CHECK:       outer_backedge:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
+; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[HEADER]] ]
 ; CHECK-NEXT:    [[J_INC]] = add i32 [[J]], 1
 ; CHECK-NEXT:    [[CMP_J:%.*]] = icmp slt i32 [[J_INC]], [[END]]
 ; CHECK-NEXT:    br i1 [[CMP_J]], label [[OUTER_HEADER]], label [[EXIT:%.*]]
@@ -1284,25 +1199,23 @@
 ; CHECK-NEXT:    br label [[OUTER_HEADER:%.*]]
 ; CHECK:       outer_header:
 ; CHECK-NEXT:    [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[J_INC:%.*]], [[OUTER_BACKEDGE:%.*]] ]
+; CHECK-NEXT:    switch i32 0, label [[PREHEADER_SPLIT:%.*]] [
+; CHECK-NEXT:    i32 1, label [[OUTER_BACKEDGE]]
+; CHECK-NEXT:    ]
+; CHECK:       preheader-split:
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[OUTER_HEADER]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER_SPLIT]] ], [ [[I_INC:%.*]], [[HEADER]] ]
 ; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[I]], [[I]]
-; CHECK-NEXT:    br i1 false, label [[BACKEDGE]], label [[DEAD:%.*]]
-; CHECK:       dead:
 ; CHECK-NEXT:    [[I_2:%.*]] = add i32 [[I]], 1
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       backedge:
-; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ [[I]], [[HEADER]] ], [ [[I_2]], [[DEAD]] ]
-; CHECK-NEXT:    [[I_INC]] = add i32 [[I_1]], 1
-; CHECK-NEXT:    br i1 true, label [[HEADER]], label [[OUTER_BACKEDGE]]
+; CHECK-NEXT:    [[I_INC]] = add i32 [[I_2]], 1
+; CHECK-NEXT:    br label [[HEADER]]
 ; CHECK:       outer_backedge:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
 ; CHECK-NEXT:    [[J_INC]] = add i32 [[J]], 1
 ; CHECK-NEXT:    [[CMP_J:%.*]] = icmp slt i32 [[J_INC]], [[END:%.*]]
 ; CHECK-NEXT:    br i1 [[CMP_J]], label [[OUTER_HEADER]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[I_INC_LCSSA_LCSSA:%.*]] = phi i32 [ [[I_INC_LCSSA]], [[OUTER_BACKEDGE]] ]
+; CHECK-NEXT:    [[I_INC_LCSSA_LCSSA:%.*]] = phi i32 [ undef, [[OUTER_BACKEDGE]] ]
 ; CHECK-NEXT:    ret i32 [[I_INC_LCSSA_LCSSA]]
 ;
 entry:
@@ -1347,29 +1260,23 @@
 ; CHECK-NEXT:    br label [[OUTER_HEADER:%.*]]
 ; CHECK:       outer_header:
 ; CHECK-NEXT:    [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[J_INC:%.*]], [[OUTER_BACKEDGE:%.*]] ]
+; CHECK-NEXT:    switch i32 0, label [[PREHEADER_SPLIT:%.*]] [
+; CHECK-NEXT:    i32 1, label [[OUTER_BACKEDGE]]
+; CHECK-NEXT:    ]
+; CHECK:       preheader-split:
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[OUTER_HEADER]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER_SPLIT]] ], [ [[I_INC:%.*]], [[HEADER]] ]
 ; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[I]], [[I]]
-; CHECK-NEXT:    switch i32 1, label [[DEAD:%.*]] [
-; CHECK-NEXT:    i32 0, label [[BACKEDGE]]
-; CHECK-NEXT:    ]
-; CHECK:       dead:
 ; CHECK-NEXT:    [[I_2:%.*]] = add i32 [[I]], 1
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       backedge:
-; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ [[I]], [[HEADER]] ], [ [[I_2]], [[DEAD]] ]
-; CHECK-NEXT:    [[I_INC]] = add i32 [[I_1]], 1
-; CHECK-NEXT:    switch i32 1, label [[HEADER]] [
-; CHECK-NEXT:    i32 0, label [[OUTER_BACKEDGE]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    [[I_INC]] = add i32 [[I_2]], 1
+; CHECK-NEXT:    br label [[HEADER]]
 ; CHECK:       outer_backedge:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
 ; CHECK-NEXT:    [[J_INC]] = add i32 [[J]], 1
 ; CHECK-NEXT:    [[CMP_J:%.*]] = icmp slt i32 [[J_INC]], [[END:%.*]]
 ; CHECK-NEXT:    br i1 [[CMP_J]], label [[OUTER_HEADER]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[I_INC_LCSSA_LCSSA:%.*]] = phi i32 [ [[I_INC_LCSSA]], [[OUTER_BACKEDGE]] ]
+; CHECK-NEXT:    [[I_INC_LCSSA_LCSSA:%.*]] = phi i32 [ undef, [[OUTER_BACKEDGE]] ]
 ; CHECK-NEXT:    ret i32 [[I_INC_LCSSA_LCSSA]]
 ;
 entry:
@@ -1415,25 +1322,22 @@
 ; CHECK-NEXT:    br label [[OUTER_HEADER:%.*]]
 ; CHECK:       outer_header:
 ; CHECK-NEXT:    [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[J_INC:%.*]], [[OUTER_BACKEDGE:%.*]] ]
+; CHECK-NEXT:    switch i32 0, label [[PREHEADER_SPLIT:%.*]] [
+; CHECK-NEXT:    i32 1, label [[OUTER_BACKEDGE]]
+; CHECK-NEXT:    ]
+; CHECK:       preheader-split:
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[OUTER_HEADER]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER_SPLIT]] ], [ [[I_INC:%.*]], [[HEADER]] ]
 ; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[I]], [[I]]
-; CHECK-NEXT:    br i1 true, label [[BACKEDGE]], label [[DEAD:%.*]]
-; CHECK:       dead:
-; CHECK-NEXT:    [[I_2:%.*]] = add i32 [[I]], 1
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       backedge:
-; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ [[I]], [[HEADER]] ], [ [[I_2]], [[DEAD]] ]
-; CHECK-NEXT:    [[I_INC]] = add i32 [[I_1]], 1
-; CHECK-NEXT:    br i1 true, label [[HEADER]], label [[OUTER_BACKEDGE]]
+; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
+; CHECK-NEXT:    br label [[HEADER]]
 ; CHECK:       outer_backedge:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
 ; CHECK-NEXT:    [[J_INC]] = add i32 [[J]], 1
 ; CHECK-NEXT:    [[CMP_J:%.*]] = icmp slt i32 [[J_INC]], [[END:%.*]]
 ; CHECK-NEXT:    br i1 [[CMP_J]], label [[OUTER_HEADER]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[I_INC_LCSSA_LCSSA:%.*]] = phi i32 [ [[I_INC_LCSSA]], [[OUTER_BACKEDGE]] ]
+; CHECK-NEXT:    [[I_INC_LCSSA_LCSSA:%.*]] = phi i32 [ undef, [[OUTER_BACKEDGE]] ]
 ; CHECK-NEXT:    ret i32 [[I_INC_LCSSA_LCSSA]]
 ;
 entry:
@@ -1478,29 +1382,22 @@
 ; CHECK-NEXT:    br label [[OUTER_HEADER:%.*]]
 ; CHECK:       outer_header:
 ; CHECK-NEXT:    [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[J_INC:%.*]], [[OUTER_BACKEDGE:%.*]] ]
+; CHECK-NEXT:    switch i32 0, label [[PREHEADER_SPLIT:%.*]] [
+; CHECK-NEXT:    i32 1, label [[OUTER_BACKEDGE]]
+; CHECK-NEXT:    ]
+; CHECK:       preheader-split:
 ; CHECK-NEXT:    br label [[HEADER:%.*]]
 ; CHECK:       header:
-; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[OUTER_HEADER]] ], [ [[I_INC:%.*]], [[BACKEDGE:%.*]] ]
+; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[PREHEADER_SPLIT]] ], [ [[I_INC:%.*]], [[HEADER]] ]
 ; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[I]], [[I]]
-; CHECK-NEXT:    switch i32 1, label [[BACKEDGE]] [
-; CHECK-NEXT:    i32 0, label [[DEAD:%.*]]
-; CHECK-NEXT:    ]
-; CHECK:       dead:
-; CHECK-NEXT:    [[I_2:%.*]] = add i32 [[I]], 1
-; CHECK-NEXT:    br label [[BACKEDGE]]
-; CHECK:       backedge:
-; CHECK-NEXT:    [[I_1:%.*]] = phi i32 [ [[I]], [[HEADER]] ], [ [[I_2]], [[DEAD]] ]
-; CHECK-NEXT:    [[I_INC]] = add i32 [[I_1]], 1
-; CHECK-NEXT:    switch i32 1, label [[HEADER]] [
-; CHECK-NEXT:    i32 0, label [[OUTER_BACKEDGE]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
+; CHECK-NEXT:    br label [[HEADER]]
 ; CHECK:       outer_backedge:
-; CHECK-NEXT:    [[I_INC_LCSSA:%.*]] = phi i32 [ [[I_INC]], [[BACKEDGE]] ]
 ; CHECK-NEXT:    [[J_INC]] = add i32 [[J]], 1
 ; CHECK-NEXT:    [[CMP_J:%.*]] = icmp slt i32 [[J_INC]], [[END:%.*]]
 ; CHECK-NEXT:    br i1 [[CMP_J]], label [[OUTER_HEADER]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[I_INC_LCSSA_LCSSA:%.*]] = phi i32 [ [[I_INC_LCSSA]], [[OUTER_BACKEDGE]] ]
+; CHECK-NEXT:    [[I_INC_LCSSA_LCSSA:%.*]] = phi i32 [ undef, [[OUTER_BACKEDGE]] ]
 ; CHECK-NEXT:    ret i32 [[I_INC_LCSSA_LCSSA]]
 ;
 entry:
@@ -1548,13 +1445,17 @@
 ; CHECK-NEXT:    br label [[LOOP_2:%.*]]
 ; CHECK:       loop_2:
 ; CHECK-NEXT:    [[J:%.*]] = phi i32 [ 0, [[LOOP_1]] ], [ [[J_NEXT:%.*]], [[LOOP_2_BACKEDGE:%.*]] ]
+; CHECK-NEXT:    switch i32 0, label [[LOOP_2_SPLIT:%.*]] [
+; CHECK-NEXT:    i32 1, label [[LOOP_2_BACKEDGE]]
+; CHECK-NEXT:    ]
+; CHECK:       loop_2-split:
 ; CHECK-NEXT:    br label [[LOOP_3:%.*]]
 ; CHECK:       loop_3:
-; CHECK-NEXT:    [[K:%.*]] = phi i32 [ 0, [[LOOP_2]] ], [ [[K_NEXT:%.*]], [[LOOP_3_BACKEDGE:%.*]] ]
+; CHECK-NEXT:    [[K:%.*]] = phi i32 [ 0, [[LOOP_2_SPLIT]] ], [ [[K_NEXT:%.*]], [[LOOP_3_BACKEDGE:%.*]] ]
 ; CHECK-NEXT:    br i1 [[COND1:%.*]], label [[LOOP_3_BACKEDGE]], label [[LOOP_1_BACKEDGE_LOOPEXIT:%.*]]
 ; CHECK:       loop_3_backedge:
 ; CHECK-NEXT:    [[K_NEXT]] = add i32 [[K]], 1
-; CHECK-NEXT:    br i1 true, label [[LOOP_3]], label [[LOOP_2_BACKEDGE]]
+; CHECK-NEXT:    br label [[LOOP_3]]
 ; CHECK:       loop_2_backedge:
 ; CHECK-NEXT:    [[J_NEXT]] = add i32 [[J]], 1
 ; CHECK-NEXT:    [[C_2:%.*]] = icmp slt i32 [[J_NEXT]], [[N:%.*]]
@@ -1613,15 +1514,17 @@
 ; CHECK-NEXT:    br label [[LOOP_2:%.*]]
 ; CHECK:       loop_2:
 ; CHECK-NEXT:    [[J:%.*]] = phi i32 [ 0, [[LOOP_1]] ], [ [[J_NEXT:%.*]], [[LOOP_2_BACKEDGE:%.*]] ]
+; CHECK-NEXT:    switch i32 0, label [[LOOP_2_SPLIT:%.*]] [
+; CHECK-NEXT:    i32 1, label [[LOOP_2_BACKEDGE]]
+; CHECK-NEXT:    ]
+; CHECK:       loop_2-split:
 ; CHECK-NEXT:    br label [[LOOP_3:%.*]]
 ; CHECK:       loop_3:
-; CHECK-NEXT:    [[K:%.*]] = phi i32 [ 0, [[LOOP_2]] ], [ [[K_NEXT:%.*]], [[LOOP_3_BACKEDGE:%.*]] ]
+; CHECK-NEXT:    [[K:%.*]] = phi i32 [ 0, [[LOOP_2_SPLIT]] ], [ [[K_NEXT:%.*]], [[LOOP_3_BACKEDGE:%.*]] ]
 ; CHECK-NEXT:    br i1 [[COND1:%.*]], label [[LOOP_3_BACKEDGE]], label [[LOOP_1_BACKEDGE_LOOPEXIT:%.*]]
 ; CHECK:       loop_3_backedge:
 ; CHECK-NEXT:    [[K_NEXT]] = add i32 [[K]], 1
-; CHECK-NEXT:    switch i32 1, label [[LOOP_3]] [
-; CHECK-NEXT:    i32 0, label [[LOOP_2_BACKEDGE]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    br label [[LOOP_3]]
 ; CHECK:       loop_2_backedge:
 ; CHECK-NEXT:    [[J_NEXT]] = add i32 [[J]], 1
 ; CHECK-NEXT:    [[C_2:%.*]] = icmp slt i32 [[J_NEXT]], [[N:%.*]]
@@ -2607,3 +2510,59 @@
 exit:
   ret i32 %i
 }
+
+define i32 @complex_dead_subloop_branch(i1 %cond1, i1 %cond2, i1 %cond3) {
+entry:
+  br label %loop
+
+loop:
+  br i1 true, label %latch, label %subloop
+
+subloop:
+  br i1 %cond1, label %x, label %y
+
+x:
+  br label %subloop_latch
+
+y:
+  br label %subloop_latch
+
+subloop_latch:
+  %dead_phi = phi i32 [ 1, %x ], [ 2, %y ]
+  br i1 %cond2, label %latch, label %subloop
+
+latch:
+  %result = phi i32 [ 0, %loop ], [ %dead_phi, %subloop_latch ]
+  br i1 %cond3, label %loop, label %exit
+
+exit:
+  ret i32 %result
+}
+
+define i32 @complex_dead_subloop_switch(i1 %cond1, i1 %cond2, i1 %cond3) {
+entry:
+  br label %loop
+
+loop:
+  switch i32 1, label %latch [ i32 0, label %subloop ]
+
+subloop:
+  br i1 %cond1, label %x, label %y
+
+x:
+  br label %subloop_latch
+
+y:
+  br label %subloop_latch
+
+subloop_latch:
+  %dead_phi = phi i32 [ 1, %x ], [ 2, %y ]
+  br i1 %cond2, label %latch, label %subloop
+
+latch:
+  %result = phi i32 [ 0, %loop ], [ %dead_phi, %subloop_latch ]
+  br i1 %cond3, label %loop, label %exit
+
+exit:
+  ret i32 %result
+}
diff --git a/test/Transforms/LoopSimplifyCFG/lcssa.ll b/test/Transforms/LoopSimplifyCFG/lcssa.ll
index 82920f1..032b12a 100644
--- a/test/Transforms/LoopSimplifyCFG/lcssa.ll
+++ b/test/Transforms/LoopSimplifyCFG/lcssa.ll
@@ -1,7 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; REQUIRES: asserts
 ; RUN: opt -S -enable-loop-simplifycfg-term-folding=true -loop-simplifycfg -verify-loop-info -verify-dom-info -verify-loop-lcssa < %s | FileCheck %s
 ; RUN: opt -S -enable-loop-simplifycfg-term-folding=true -passes='require<domtree>,loop(simplify-cfg)' -verify-loop-info -verify-dom-info -verify-loop-lcssa < %s | FileCheck %s
 ; RUN: opt -S -enable-loop-simplifycfg-term-folding=true -loop-simplifycfg -enable-mssa-loop-dependency=true -verify-memoryssa -verify-loop-info -verify-dom-info -verify-loop-lcssa < %s | FileCheck %s
+; XFAIL: *
+; test_01 is currently failing because the loop is not in LCSSA form after the
+; transform.
 
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 
@@ -50,3 +54,24 @@
 if.end.8:                                         ; preds = %if.end.7
   br label %for.cond
 }
+
+define void @test_01() {
+entry:
+  br label %for.cond
+
+for.cond.loopexit:                                ; preds = %while.cond
+  %inc41.lcssa = phi i16 [ %inc41, %while.cond ]
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.cond.loopexit, %entry
+  %inc41.lcssa3 = phi i16 [ %inc41.lcssa, %for.cond.loopexit ], [ undef, %entry ]
+  br label %while.cond
+
+while.cond:                                       ; preds = %while.body, %for.cond
+  %inc41 = phi i16 [ %inc4, %while.body ], [ %inc41.lcssa3, %for.cond ]
+  br i1 true, label %while.body, label %for.cond.loopexit
+
+while.body:                                       ; preds = %while.cond
+  %inc4 = add nsw i16 %inc41, 1
+  br label %while.cond
+}
diff --git a/test/Transforms/LoopUnroll/disable-loop-unrolling_forced.ll b/test/Transforms/LoopUnroll/disable-loop-unrolling_forced.ll
new file mode 100644
index 0000000..9a0900d
--- /dev/null
+++ b/test/Transforms/LoopUnroll/disable-loop-unrolling_forced.ll
@@ -0,0 +1,30 @@
+; RUN: opt -disable-loop-unrolling -O1 -S < %s | FileCheck %s
+;
+; Check loop unrolling metadata is honored despite automatic unrolling
+; being disabled in the pass builder.
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; CHECK-LABEL: @forced(
+; CHECK: load
+; CHECK: load
+define void @forced(i32* nocapture %a) {
+entry:
+  br label %for.body
+
+for.body:
+  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+  %0 = load i32, i32* %arrayidx, align 4
+  %inc = add nsw i32 %0, 1
+  store i32 %inc, i32* %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond = icmp eq i64 %indvars.iv.next, 64
+  br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
+
+for.end:
+  ret void
+}
+
+!0 = distinct !{!0, !{!"llvm.loop.unroll.enable"},
+                    !{!"llvm.loop.unroll.count", i32 2}}
diff --git a/test/Transforms/LoopUnroll/runtime-loop-multiexit-dom-verify.ll b/test/Transforms/LoopUnroll/runtime-loop-multiexit-dom-verify.ll
index 6014775..b3c0039 100644
--- a/test/Transforms/LoopUnroll/runtime-loop-multiexit-dom-verify.ll
+++ b/test/Transforms/LoopUnroll/runtime-loop-multiexit-dom-verify.ll
@@ -124,3 +124,152 @@
 exitsucc:                                              ; preds = %headerexit
   ret i64 96
 }
+
+; exit block (%default) has an exiting block and another exit block as predecessors.
+define void @test4(i16 %c3) {
+; CHECK-LABEL: test4
+
+; CHECK-LABEL: exiting.prol:
+; CHECK-NEXT:   switch i16 %c3, label %default.loopexit.loopexit1 [
+
+; CHECK-LABEL: exiting:
+; CHECK-NEXT:   switch i16 %c3, label %default.loopexit.loopexit [
+
+; CHECK-LABEL: default.loopexit.loopexit:
+; CHECK-NEXT:   br label %default.loopexit
+
+; CHECK-LABEL: default.loopexit.loopexit1:
+; CHECK-NEXT:   br label %default.loopexit
+
+; CHECK-LABEL: default.loopexit:
+; CHECK-NEXT:   br label %default
+preheader:
+  %c1 = zext i32 undef to i64
+  br label %header
+
+header:                                       ; preds = %latch, %preheader
+  %indvars.iv = phi i64 [ 0, %preheader ], [ %indvars.iv.next, %latch ]
+  br label %exiting
+
+exiting:                                           ; preds = %header
+  switch i16 %c3, label %default [
+    i16 45, label %otherexit
+    i16 95, label %latch
+  ]
+
+latch:                                          ; preds = %exiting
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %c2 = icmp ult i64 %indvars.iv.next, %c1
+  br i1 %c2, label %header, label %latchexit
+
+latchexit:                                          ; preds = %latch
+  ret void
+
+default:                                          ; preds = %otherexit, %exiting
+  ret void
+
+otherexit:                                           ; preds = %exiting
+  br label %default
+}
+
+; exit block (%exitB) has an exiting block and another exit block as predecessors.
+; exiting block comes from inner loop.
+define void @test5() {
+; CHECK-LABEL: test5
+; CHECK-LABEL: bb1:
+; CHECK-NEXT:   br i1 false, label %outerH.prol.preheader, label %outerH.prol.loopexit
+
+; CHECK-LABEL: outerH.prol.preheader:
+; CHECK-NEXT:   br label %outerH.prol
+
+; CHECK-LABEL: outerH.prol:
+; CHECK-NEXT:   %tmp4.prol = phi i32 [ %tmp6.prol, %outerLatch.prol ], [ undef, %outerH.prol.preheader ]
+; CHECK-NEXT:   %prol.iter = phi i32 [ 0, %outerH.prol.preheader ], [ %prol.iter.sub, %outerLatch.prol ]
+; CHECK-NEXT:   br label %innerH.prol
+bb:
+  %tmp = icmp sgt i32 undef, 79
+  br i1 %tmp, label %outerLatchExit, label %bb1
+
+bb1:                                              ; preds = %bb
+  br label %outerH
+
+outerH:                                              ; preds = %outerLatch, %bb1
+  %tmp4 = phi i32 [ %tmp6, %outerLatch ], [ undef, %bb1 ]
+  br label %innerH
+
+innerH:                                              ; preds = %innerLatch, %outerH
+  br i1 undef, label %innerexiting, label %otherexitB
+
+innerexiting:                                             ; preds = %innerH
+  br i1 undef, label %innerLatch, label %exitB
+
+innerLatch:                                             ; preds = %innerexiting
+  %tmp13 = fcmp olt double undef, 2.000000e+00
+  br i1 %tmp13, label %innerH, label %outerLatch
+
+outerLatch:                                              ; preds = %innerLatch
+  %tmp6 = add i32 %tmp4, 1
+  %tmp7 = icmp sgt i32 %tmp6, 79
+  br i1 %tmp7, label %outerLatchExit, label %outerH
+
+outerLatchExit:                                              ; preds = %outerLatch, %bb
+  ret void
+
+exitB:                                             ; preds = %innerexiting, %otherexitB
+  ret void
+
+otherexitB:                                              ; preds = %innerH
+  br label %exitB
+
+}
+
+; Blocks reachable from exits (not_zero44) have the IDom as the block within the loop (Header).
+; Update the IDom to the preheader.
+define void @test6() {
+; CHECK-LABEL: test6
+; CHECK-LABEL: header.prol.preheader:
+; CHECK-NEXT:    br label %header.prol
+
+; CHECK-LABEL: header.prol:
+; CHECK-NEXT:    %indvars.iv.prol = phi i64 [ undef, %header.prol.preheader ], [ %indvars.iv.next.prol, %latch.prol ]
+; CHECK-NEXT:    %prol.iter = phi i64 [ 1, %header.prol.preheader ], [ %prol.iter.sub, %latch.prol ]
+; CHECK-NEXT:    br i1 false, label %latch.prol, label %otherexit.loopexit1
+
+; CHECK-LABEL: header.prol.loopexit.unr-lcssa:
+; CHECK-NEXT:    %indvars.iv.unr.ph = phi i64 [ %indvars.iv.next.prol, %latch.prol ]
+; CHECK-NEXT:    br label %header.prol.loopexit
+
+; CHECK-LABEL: header.prol.loopexit:
+; CHECK-NEXT:    %indvars.iv.unr = phi i64 [ undef, %entry ], [ %indvars.iv.unr.ph, %header.prol.loopexit.unr-lcssa ]
+; CHECK-NEXT:    br i1 true, label %latchexit, label %entry.new
+
+; CHECK-LABEL: entry.new:
+; CHECK-NEXT:    br label %header
+entry:
+  br label %header
+
+header:                                          ; preds = %latch, %entry
+  %indvars.iv = phi i64 [ undef, %entry ], [ %indvars.iv.next, %latch ]
+  br i1 undef, label %latch, label %otherexit
+
+latch:                                         ; preds = %header
+  %indvars.iv.next = add nsw i64 %indvars.iv, 2
+  %0 = icmp slt i64 %indvars.iv.next, 616
+  br i1 %0, label %header, label %latchexit
+
+latchexit:                                          ; preds = %latch
+  br label %latchexitsucc
+
+otherexit:                                 ; preds = %header
+  br label %otherexitsucc
+
+otherexitsucc:                                          ; preds = %otherexit
+  br label %not_zero44
+
+not_zero44:                                       ; preds = %latchexitsucc, %otherexitsucc
+  unreachable
+
+latchexitsucc:                                      ; preds = %latchexit
+  br label %not_zero44
+}
+
diff --git a/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll b/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll
index ef983df..397e907 100644
--- a/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll
+++ b/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll
@@ -1,8 +1,8 @@
-; RUN: opt < %s -loop-unroll -unroll-runtime=true -unroll-runtime-epilog=true -unroll-runtime-multi-exit=true -verify-dom-info -verify-loop-info -S | FileCheck %s -check-prefix=EPILOG-NO-IC
-; RUN: opt < %s -loop-unroll -unroll-runtime=true -unroll-runtime-epilog=true -unroll-runtime-multi-exit=true -verify-dom-info -verify-loop-info -instcombine -S | FileCheck %s -check-prefix=EPILOG
-; RUN: opt < %s -loop-unroll -unroll-runtime -unroll-count=2 -unroll-runtime-epilog=true -unroll-runtime-multi-exit=true -verify-dom-info -verify-loop-info -instcombine
-; RUN: opt < %s -loop-unroll -unroll-runtime=true -unroll-runtime-epilog=false -unroll-runtime-multi-exit=true -verify-dom-info -verify-loop-info -instcombine -S | FileCheck %s -check-prefix=PROLOG
-; RUN: opt < %s -loop-unroll -unroll-runtime -unroll-runtime-epilog=false -unroll-count=2 -unroll-runtime-multi-exit=true -verify-dom-info -verify-loop-info -instcombine
+; RUN: opt < %s -loop-unroll -unroll-runtime=true -unroll-runtime-epilog=true -unroll-runtime-multi-exit=true -verify-loop-lcssa -verify-dom-info -verify-loop-info -S | FileCheck %s -check-prefix=EPILOG-NO-IC
+; RUN: opt < %s -loop-unroll -unroll-runtime=true -unroll-runtime-epilog=true -unroll-runtime-multi-exit=true -verify-loop-lcssa -verify-dom-info -verify-loop-info -instcombine -S | FileCheck %s -check-prefix=EPILOG
+; RUN: opt < %s -loop-unroll -unroll-runtime -unroll-count=2 -unroll-runtime-epilog=true -unroll-runtime-multi-exit=true -verify-loop-lcssa -verify-dom-info -verify-loop-info -instcombine
+; RUN: opt < %s -loop-unroll -unroll-runtime=true -unroll-runtime-epilog=false -unroll-runtime-multi-exit=true -verify-loop-lcssa -verify-dom-info -verify-loop-info -instcombine -S | FileCheck %s -check-prefix=PROLOG
+; RUN: opt < %s -loop-unroll -unroll-runtime -unroll-runtime-epilog=false -unroll-count=2 -unroll-runtime-multi-exit=true -verify-loop-lcssa -verify-dom-info -verify-loop-info -instcombine
 
 ; REQUIRES: asserts
 
@@ -207,43 +207,165 @@
 }
 
 ; FIXME: Support multiple exiting blocks to the same latch exit block.
-define i32 @test4(i32* nocapture %a, i64 %n, i1 %cond) {
-; EPILOG: test4(
+; Three exiting blocks where header and latch exit to same LatchExit.
+define i32 @hdr_latch_same_exit(i32* nocapture %a, i64 %n, i1 %cond) {
+; EPILOG: hdr_latch_same_exit(
 ; EPILOG-NOT: .unr
 ; EPILOG-NOT: .epil
 
-; PROLOG: test4(
+; PROLOG: hdr_latch_same_exit(
 ; PROLOG-NOT: .unr
 ; PROLOG-NOT: .prol
 entry:
   br label %header
 
 header:
-  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
-  %sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
-  br i1 %cond, label %for.end, label %for.exiting_block
+  %indvars.iv = phi i64 [ %indvars.iv.next, %latch ], [ 0, %entry ]
+  %sum.02 = phi i32 [ %add, %latch ], [ 0, %entry ]
+  br i1 %cond, label %latchExit, label %for.exiting_block
 
 for.exiting_block:
  %cmp = icmp eq i64 %n, 42
- br i1 %cmp, label %for.exit2, label %for.body
+ br i1 %cmp, label %for.exit2, label %latch
 
-for.body:                                         ; preds = %for.body, %entry
+latch:                                         ; preds = %latch, %entry
   %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
   %0 = load i32, i32* %arrayidx, align 4
   %add = add nsw i32 %0, %sum.02
   %indvars.iv.next = add i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, %n
-  br i1 %exitcond, label %for.end, label %header
+  br i1 %exitcond, label %latchExit, label %header
 
-for.end:                                          ; preds = %for.body, %entry
-  %sum.0.lcssa = phi i32 [ 0, %header ], [ %add, %for.body ]
-  ret i32 %sum.0.lcssa
+latchExit:                                          ; preds = %latch, %entry
+  %result = phi i32 [ 0, %header ], [ %add, %latch ]
+  ret i32 %result
 
 for.exit2:
   ret i32 42
 }
 
-; FIXME: Support multiple exiting blocks to the unique exit block.
+; Two exiting blocks to latch where the exiting blocks are Latch and a
+; non-header
+; FIXME: We should unroll this loop.
+define i32 @otherblock_latch_same_exit(i32* nocapture %a, i64 %n, i1 %cond) {
+; EPILOG: otherblock_latch_same_exit(
+; EPILOG-NOT: .unr
+; EPILOG-NOT: .epil
+
+; PROLOG: otherblock_latch_same_exit(
+; PROLOG-NOT: .unr
+; PROLOG-NOT: .prol
+entry:
+  br label %header
+
+header:
+  %indvars.iv = phi i64 [ %indvars.iv.next, %latch ], [ 0, %entry ]
+  %sum.02 = phi i32 [ %add, %latch ], [ 0, %entry ]
+  br i1 %cond, label %for.exit2, label %for.exiting_block
+
+for.exiting_block:
+ %cmp = icmp eq i64 %n, 42
+ br i1 %cmp, label %latchExit, label %latch
+
+latch:                                         ; preds = %latch, %entry
+  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+  %0 = load i32, i32* %arrayidx, align 4
+  %add = add nsw i32 %0, %sum.02
+  %indvars.iv.next = add i64 %indvars.iv, 1
+  %exitcond = icmp eq i64 %indvars.iv.next, %n
+  br i1 %exitcond, label %latchExit, label %header
+
+latchExit:                                          ; preds = %latch, %entry
+  %result = phi i32 [ 2, %for.exiting_block ], [ %add, %latch ]
+  ret i32 %result
+
+for.exit2:
+  ret i32 42
+}
+
+; Two exiting blocks to latch where the exiting blocks are Latch and a
+; non-header
+; Same as above test except the incoming value for latch Phi is from the header
+; FIXME: We should be able to runtime unroll.
+define i32 @otherblock_latch_same_exit2(i32* nocapture %a, i64 %n, i1 %cond) {
+; EPILOG: otherblock_latch_same_exit2(
+; EPILOG-NOT: .unr
+; EPILOG-NOT: .epil
+
+; PROLOG: otherblock_latch_same_exit2(
+; PROLOG-NOT: .unr
+; PROLOG-NOT: .prol
+entry:
+  br label %header
+
+header:
+  %indvars.iv = phi i64 [ %indvars.iv.next, %latch ], [ 0, %entry ]
+  %sum.02 = phi i32 [ %add, %latch ], [ 0, %entry ]
+  br i1 %cond, label %for.exit2, label %for.exiting_block
+
+for.exiting_block:
+ %cmp = icmp eq i64 %n, 42
+ br i1 %cmp, label %latchExit, label %latch
+
+latch:                                         ; preds = %latch, %entry
+  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+  %0 = load i32, i32* %arrayidx, align 4
+  %add = add nsw i32 %0, %sum.02
+  %indvars.iv.next = add i64 %indvars.iv, 1
+  %exitcond = icmp eq i64 %indvars.iv.next, %n
+  br i1 %exitcond, label %latchExit, label %header
+
+latchExit:                                          ; preds = %latch, %entry
+  %result = phi i32 [ %sum.02, %for.exiting_block ], [ %add, %latch ]
+  ret i32 %result
+
+for.exit2:
+  ret i32 42
+}
+
+; Two exiting blocks to latch where the exiting blocks are Latch and a
+; non-header
+; Same as above test except the incoming value for cloned latch Phi is from the
+; for.exiting_block.
+; FIXME: We should be able to runtime unroll.
+define i32 @otherblock_latch_same_exit3(i32* nocapture %a, i64 %n, i1 %cond) {
+; EPILOG: otherblock_latch_same_exit3(
+; EPILOG-NOT: .unr
+; EPILOG-NOT: .epil
+
+; PROLOG: otherblock_latch_same_exit3(
+; PROLOG-NOT: .unr
+; PROLOG-NOT: .prol
+entry:
+  br label %header
+
+header:
+  %indvars.iv = phi i64 [ %indvars.iv.next, %latch ], [ 0, %entry ]
+  %sum.02 = phi i32 [ %add, %latch ], [ 0, %entry ]
+  br i1 %cond, label %for.exit2, label %for.exiting_block
+
+for.exiting_block:
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+ %0 = load i32, i32* %arrayidx, align 4
+ %add = add nsw i32 %0, %sum.02
+ %cmp = icmp eq i64 %n, 42
+ br i1 %cmp, label %latchExit, label %latch
+
+latch:                                         ; preds = %latch, %entry
+  %indvars.iv.next = add i64 %indvars.iv, 1
+  %exitcond = icmp eq i64 %indvars.iv.next, %n
+  br i1 %exitcond, label %latchExit, label %header
+
+latchExit:                                          ; preds = %latch, %entry
+  %result = phi i32 [ %sum.02, %for.exiting_block ], [ %add, %latch ]
+  ret i32 %result
+
+for.exit2:
+  ret i32 42
+}
+
+; FIXME: Support multiple exiting blocks to the unique exit block (LatchExit).
+; Only 2 blocks in loop: header and latch where both exit to same LatchExit.
 define void @unique_exit(i32 %arg) {
 ; EPILOG: unique_exit(
 ; EPILOG-NOT: .unr
@@ -259,22 +381,22 @@
 preheader:                                 ; preds = %entry
   br label %header
 
-LoopExit:                                ; preds = %header, %latch
-  %tmp2.ph = phi i32 [ %tmp4, %header ], [ -1, %latch ]
-  br label %returnblock
-
-returnblock:                                         ; preds = %LoopExit, %entry
-  %tmp2 = phi i32 [ -1, %entry ], [ %tmp2.ph, %LoopExit ]
-  ret void
-
 header:                                           ; preds = %preheader, %latch
   %tmp4 = phi i32 [ %inc, %latch ], [ %arg, %preheader ]
   %inc = add nsw i32 %tmp4, 1
-  br i1 true, label %LoopExit, label %latch
+  br i1 true, label %latchExit, label %latch
 
 latch:                                            ; preds = %header
   %cmp = icmp slt i32 %inc, undef
-  br i1 %cmp, label %header, label %LoopExit
+  br i1 %cmp, label %header, label %latchExit
+
+latchExit:                                ; preds = %header, %latch
+  %tmp2.ph = phi i32 [ %tmp4, %header ], [ -1, %latch ]
+  br label %returnblock
+
+returnblock:                                         ; preds = %latchExit, %entry
+  %tmp2 = phi i32 [ -1, %entry ], [ %tmp2.ph, %latchExit ]
+  ret void
 }
 
 ; two exiting and two exit blocks.
diff --git a/test/Transforms/LoopVectorize/X86/force-ifcvt.ll b/test/Transforms/LoopVectorize/X86/force-ifcvt.ll
index 0076494..07b98b4 100644
--- a/test/Transforms/LoopVectorize/X86/force-ifcvt.ll
+++ b/test/Transforms/LoopVectorize/X86/force-ifcvt.ll
@@ -13,21 +13,21 @@
 for.body:                                         ; preds = %cond.end, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %cond.end ]
   %arrayidx = getelementptr inbounds i32, i32* %p, i64 %indvars.iv
-  %0 = load i32, i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !0
+  %0 = load i32, i32* %arrayidx, align 4, !llvm.access.group !1
   %cmp1 = icmp eq i32 %0, 0
   %arrayidx3 = getelementptr inbounds i32, i32* %res, i64 %indvars.iv
-  %1 = load i32, i32* %arrayidx3, align 4, !llvm.mem.parallel_loop_access !0
+  %1 = load i32, i32* %arrayidx3, align 4, !llvm.access.group !1
   br i1 %cmp1, label %cond.end, label %cond.false
 
 cond.false:                                       ; preds = %for.body
   %arrayidx7 = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
-  %2 = load i32, i32* %arrayidx7, align 4, !llvm.mem.parallel_loop_access !0
+  %2 = load i32, i32* %arrayidx7, align 4, !llvm.access.group !1
   %add = add nsw i32 %2, %1
   br label %cond.end
 
 cond.end:                                         ; preds = %for.body, %cond.false
   %cond = phi i32 [ %add, %cond.false ], [ %1, %for.body ]
-  store i32 %cond, i32* %arrayidx3, align 4, !llvm.mem.parallel_loop_access !0
+  store i32 %cond, i32* %arrayidx3, align 4, !llvm.access.group !1
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 16
   br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
@@ -38,4 +38,5 @@
 
 attributes #0 = { norecurse nounwind uwtable "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" }
 
-!0 = distinct !{!0}
+!0 = distinct !{!0, !{!"llvm.loop.parallel_accesses", !1}}
+!1 = distinct !{}
diff --git a/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll b/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
index c78bcdd..683e857 100644
--- a/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
+++ b/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
@@ -1,12 +1,9 @@
-; RUN: opt < %s  -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s  -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -dce -S | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK-LABEL: @foo(
-; CHECK: <4 x i32>
-; CHECK: ret void
-
 ; PR15794
 ; incorrect addition of llvm.mem.parallel_loop_access metadata is undefined
 ; behaviour. Vectorizer ignores the memory dependency checks and goes ahead and
@@ -21,8 +18,98 @@
 ;   }
 ; }
 
-; Function Attrs: nounwind uwtable 
+; Function Attrs: nounwind uwtable
 define void @foo(i32* nocapture %a, i32* nocapture %b, i32 %k, i32 %m) #0 {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP27:%.*]] = icmp sgt i32 [[M:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP27]], label [[FOR_BODY3_LR_PH_US_PREHEADER:%.*]], label [[FOR_END15:%.*]]
+; CHECK:       for.body3.lr.ph.us.preheader:
+; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[M]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], 1
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[K:%.*]] to i64
+; CHECK-NEXT:    br label [[FOR_BODY3_LR_PH_US:%.*]]
+; CHECK:       for.end.us:
+; CHECK-NEXT:    [[ARRAYIDX9_US:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV33:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX9_US]], align 4, !llvm.mem.parallel_loop_access !0
+; CHECK-NEXT:    [[ADD10_US:%.*]] = add nsw i32 [[TMP4]], 3
+; CHECK-NEXT:    store i32 [[ADD10_US]], i32* [[ARRAYIDX9_US]], align 4, !llvm.mem.parallel_loop_access !0
+; CHECK-NEXT:    [[INDVARS_IV_NEXT34:%.*]] = add i64 [[INDVARS_IV33]], 1
+; CHECK-NEXT:    [[LFTR_WIDEIV35:%.*]] = trunc i64 [[INDVARS_IV_NEXT34]] to i32
+; CHECK-NEXT:    [[EXITCOND36:%.*]] = icmp eq i32 [[LFTR_WIDEIV35]], [[M]]
+; CHECK-NEXT:    br i1 [[EXITCOND36]], label [[FOR_END15_LOOPEXIT:%.*]], label [[FOR_BODY3_LR_PH_US]], !llvm.loop !2
+; CHECK:       for.body3.us:
+; CHECK-NEXT:    [[INDVARS_IV29:%.*]] = phi i64 [ [[BC_RESUME_VAL:%.*]], [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT30:%.*]], [[FOR_BODY3_US:%.*]] ]
+; CHECK-NEXT:    [[TMP5:%.*]] = trunc i64 [[INDVARS_IV29]] to i32
+; CHECK-NEXT:    [[ADD4_US:%.*]] = add i32 [[ADD_US:%.*]], [[TMP5]]
+; CHECK-NEXT:    [[IDXPROM_US:%.*]] = sext i32 [[ADD4_US]] to i64
+; CHECK-NEXT:    [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IDXPROM_US]]
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_US]], align 4, !llvm.mem.parallel_loop_access !0
+; CHECK-NEXT:    [[ADD5_US:%.*]] = add nsw i32 [[TMP6]], 1
+; CHECK-NEXT:    store i32 [[ADD5_US]], i32* [[ARRAYIDX7_US:%.*]], align 4, !llvm.mem.parallel_loop_access !0
+; CHECK-NEXT:    [[INDVARS_IV_NEXT30]] = add i64 [[INDVARS_IV29]], 1
+; CHECK-NEXT:    [[LFTR_WIDEIV31:%.*]] = trunc i64 [[INDVARS_IV_NEXT30]] to i32
+; CHECK-NEXT:    [[EXITCOND32:%.*]] = icmp eq i32 [[LFTR_WIDEIV31]], [[M]]
+; CHECK-NEXT:    br i1 [[EXITCOND32]], label [[FOR_END_US:%.*]], label [[FOR_BODY3_US]], !llvm.loop !3
+; CHECK:       for.body3.lr.ph.us:
+; CHECK-NEXT:    [[INDVARS_IV33]] = phi i64 [ [[INDVARS_IV_NEXT34]], [[FOR_END_US]] ], [ 0, [[FOR_BODY3_LR_PH_US_PREHEADER]] ]
+; CHECK-NEXT:    [[TMP7:%.*]] = add i64 [[TMP3]], [[INDVARS_IV33]]
+; CHECK-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32
+; CHECK-NEXT:    [[TMP9:%.*]] = trunc i64 [[INDVARS_IV33]] to i32
+; CHECK-NEXT:    [[ADD_US]] = add i32 [[TMP9]], [[K]]
+; CHECK-NEXT:    [[ARRAYIDX7_US]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV33]]
+; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
+; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH]], label [[VECTOR_SCEVCHECK:%.*]]
+; CHECK:       vector.scevcheck:
+; CHECK-NEXT:    [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP0]])
+; CHECK-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
+; CHECK-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
+; CHECK-NEXT:    [[TMP10:%.*]] = add i32 [[TMP8]], [[MUL_RESULT]]
+; CHECK-NEXT:    [[TMP11:%.*]] = sub i32 [[TMP8]], [[MUL_RESULT]]
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp sgt i32 [[TMP11]], [[TMP8]]
+; CHECK-NEXT:    [[TMP13:%.*]] = icmp slt i32 [[TMP10]], [[TMP8]]
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 false, i1 [[TMP12]], i1 [[TMP13]]
+; CHECK-NEXT:    [[TMP15:%.*]] = or i1 [[TMP14]], [[MUL_OVERFLOW]]
+; CHECK-NEXT:    [[TMP16:%.*]] = or i1 false, [[TMP15]]
+; CHECK-NEXT:    br i1 [[TMP16]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
+; CHECK:       vector.ph:
+; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 4
+; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
+; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
+; CHECK:       vector.body:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP17:%.*]] = trunc i64 [[INDEX]] to i32
+; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP17]], 0
+; CHECK-NEXT:    [[TMP19:%.*]] = add i32 [[ADD_US]], [[TMP18]]
+; CHECK-NEXT:    [[TMP20:%.*]] = sext i32 [[TMP19]] to i64
+; CHECK-NEXT:    [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP20]]
+; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i32 0
+; CHECK-NEXT:    [[TMP23:%.*]] = bitcast i32* [[TMP22]] to <4 x i32>*
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP23]], align 4
+; CHECK-NEXT:    [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT:    [[TMP25:%.*]] = extractelement <4 x i32> [[TMP24]], i32 0
+; CHECK-NEXT:    store i32 [[TMP25]], i32* [[ARRAYIDX7_US]], align 4, !llvm.mem.parallel_loop_access !0
+; CHECK-NEXT:    [[TMP26:%.*]] = extractelement <4 x i32> [[TMP24]], i32 1
+; CHECK-NEXT:    store i32 [[TMP26]], i32* [[ARRAYIDX7_US]], align 4, !llvm.mem.parallel_loop_access !0
+; CHECK-NEXT:    [[TMP27:%.*]] = extractelement <4 x i32> [[TMP24]], i32 2
+; CHECK-NEXT:    store i32 [[TMP27]], i32* [[ARRAYIDX7_US]], align 4, !llvm.mem.parallel_loop_access !0
+; CHECK-NEXT:    [[TMP28:%.*]] = extractelement <4 x i32> [[TMP24]], i32 3
+; CHECK-NEXT:    store i32 [[TMP28]], i32* [[ARRAYIDX7_US]], align 4, !llvm.mem.parallel_loop_access !0
+; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 4
+; CHECK-NEXT:    [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !5
+; CHECK:       middle.block:
+; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[CMP_N]], label [[FOR_END_US]], label [[SCALAR_PH]]
+; CHECK:       scalar.ph:
+; CHECK-NEXT:    [[BC_RESUME_VAL]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY3_LR_PH_US]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; CHECK-NEXT:    br label [[FOR_BODY3_US]]
+; CHECK:       for.end15.loopexit:
+; CHECK-NEXT:    br label [[FOR_END15]]
+; CHECK:       for.end15:
+; CHECK-NEXT:    ret void
+;
 entry:
   %cmp27 = icmp sgt i32 %m, 0
   br i1 %cmp27, label %for.body3.lr.ph.us, label %for.end15
@@ -66,10 +153,46 @@
 
 ; Here we can see the vectorizer does the mem dep checks and decides it is
 ; unsafe to vectorize.
-; CHECK-LABEL: no-par-mem-metadata(
-; CHECK-NOT: <4 x i32>
-; CHECK:     ret void
 define void @no-par-mem-metadata(i32* nocapture %a, i32* nocapture %b, i32 %k, i32 %m) #0 {
+; CHECK-LABEL: @no-par-mem-metadata(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP27:%.*]] = icmp sgt i32 [[M:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP27]], label [[FOR_BODY3_LR_PH_US_PREHEADER:%.*]], label [[FOR_END15:%.*]]
+; CHECK:       for.body3.lr.ph.us.preheader:
+; CHECK-NEXT:    br label [[FOR_BODY3_LR_PH_US:%.*]]
+; CHECK:       for.end.us:
+; CHECK-NEXT:    [[ARRAYIDX9_US:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV33:%.*]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX9_US]], align 4
+; CHECK-NEXT:    [[ADD10_US:%.*]] = add nsw i32 [[TMP0]], 3
+; CHECK-NEXT:    store i32 [[ADD10_US]], i32* [[ARRAYIDX9_US]], align 4
+; CHECK-NEXT:    [[INDVARS_IV_NEXT34:%.*]] = add i64 [[INDVARS_IV33]], 1
+; CHECK-NEXT:    [[LFTR_WIDEIV35:%.*]] = trunc i64 [[INDVARS_IV_NEXT34]] to i32
+; CHECK-NEXT:    [[EXITCOND36:%.*]] = icmp eq i32 [[LFTR_WIDEIV35]], [[M]]
+; CHECK-NEXT:    br i1 [[EXITCOND36]], label [[FOR_END15_LOOPEXIT:%.*]], label [[FOR_BODY3_LR_PH_US]], !llvm.loop !2
+; CHECK:       for.body3.us:
+; CHECK-NEXT:    [[INDVARS_IV29:%.*]] = phi i64 [ 0, [[FOR_BODY3_LR_PH_US]] ], [ [[INDVARS_IV_NEXT30:%.*]], [[FOR_BODY3_US:%.*]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV29]] to i32
+; CHECK-NEXT:    [[ADD4_US:%.*]] = add i32 [[ADD_US:%.*]], [[TMP1]]
+; CHECK-NEXT:    [[IDXPROM_US:%.*]] = sext i32 [[ADD4_US]] to i64
+; CHECK-NEXT:    [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IDXPROM_US]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_US]], align 4
+; CHECK-NEXT:    [[ADD5_US:%.*]] = add nsw i32 [[TMP2]], 1
+; CHECK-NEXT:    store i32 [[ADD5_US]], i32* [[ARRAYIDX7_US:%.*]], align 4
+; CHECK-NEXT:    [[INDVARS_IV_NEXT30]] = add i64 [[INDVARS_IV29]], 1
+; CHECK-NEXT:    [[LFTR_WIDEIV31:%.*]] = trunc i64 [[INDVARS_IV_NEXT30]] to i32
+; CHECK-NEXT:    [[EXITCOND32:%.*]] = icmp eq i32 [[LFTR_WIDEIV31]], [[M]]
+; CHECK-NEXT:    br i1 [[EXITCOND32]], label [[FOR_END_US:%.*]], label [[FOR_BODY3_US]], !llvm.loop !1
+; CHECK:       for.body3.lr.ph.us:
+; CHECK-NEXT:    [[INDVARS_IV33]] = phi i64 [ [[INDVARS_IV_NEXT34]], [[FOR_END_US]] ], [ 0, [[FOR_BODY3_LR_PH_US_PREHEADER]] ]
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i64 [[INDVARS_IV33]] to i32
+; CHECK-NEXT:    [[ADD_US]] = add i32 [[TMP3]], [[K:%.*]]
+; CHECK-NEXT:    [[ARRAYIDX7_US]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV33]]
+; CHECK-NEXT:    br label [[FOR_BODY3_US]]
+; CHECK:       for.end15.loopexit:
+; CHECK-NEXT:    br label [[FOR_END15]]
+; CHECK:       for.end15:
+; CHECK-NEXT:    ret void
+;
 entry:
   %cmp27 = icmp sgt i32 %m, 0
   br i1 %cmp27, label %for.body3.lr.ph.us, label %for.end15
diff --git a/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll b/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll
index 631361c..0e6f064 100644
--- a/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll
+++ b/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll
@@ -19,19 +19,19 @@
 for.body:                                         ; preds = %for.body.for.body_crit_edge, %entry
   %indvars.iv.reload = load i64, i64* %indvars.iv.reg2mem
   %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.reload
-  %0 = load i32, i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+  %0 = load i32, i32* %arrayidx, align 4, !llvm.access.group !4
   %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.reload
-  %1 = load i32, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+  %1 = load i32, i32* %arrayidx2, align 4, !llvm.access.group !4
   %idxprom3 = sext i32 %1 to i64
   %arrayidx4 = getelementptr inbounds i32, i32* %a, i64 %idxprom3
-  store i32 %0, i32* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !3
+  store i32 %0, i32* %arrayidx4, align 4, !llvm.access.group !4
   %indvars.iv.next = add i64 %indvars.iv.reload, 1
   ; A new store without the parallel metadata here:
   store i64 %indvars.iv.next, i64* %indvars.iv.next.reg2mem
   %indvars.iv.next.reload1 = load i64, i64* %indvars.iv.next.reg2mem
   %arrayidx6 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next.reload1
-  %2 = load i32, i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !3
-  store i32 %2, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+  %2 = load i32, i32* %arrayidx6, align 4, !llvm.access.group !4
+  store i32 %2, i32* %arrayidx2, align 4, !llvm.access.group !4
   %indvars.iv.next.reload = load i64, i64* %indvars.iv.next.reg2mem
   %lftr.wideiv = trunc i64 %indvars.iv.next.reload to i32
   %exitcond = icmp eq i32 %lftr.wideiv, 512
@@ -46,4 +46,5 @@
   ret void
 }
 
-!3 = !{!3}
+!3 = !{!3, !{!"llvm.loop.parallel_accesses", !4}}
+!4 = distinct !{}
diff --git a/test/Transforms/LoopVectorize/X86/parallel-loops.ll b/test/Transforms/LoopVectorize/X86/parallel-loops.ll
index 53061ed..be77885 100644
--- a/test/Transforms/LoopVectorize/X86/parallel-loops.ll
+++ b/test/Transforms/LoopVectorize/X86/parallel-loops.ll
@@ -51,18 +51,18 @@
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
-  %0 = load i32, i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+  %0 = load i32, i32* %arrayidx, align 4, !llvm.access.group !13
   %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-  %1 = load i32, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+  %1 = load i32, i32* %arrayidx2, align 4, !llvm.access.group !13
   %idxprom3 = sext i32 %1 to i64
   %arrayidx4 = getelementptr inbounds i32, i32* %a, i64 %idxprom3
   ; This store might have originated from inlining a function with a parallel
   ; loop. Refers to a list with the "original loop reference" (!4) also included.
-  store i32 %0, i32* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !5
+  store i32 %0, i32* %arrayidx4, align 4, !llvm.access.group !15
   %indvars.iv.next = add i64 %indvars.iv, 1
   %arrayidx6 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next
-  %2 = load i32, i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !3
-  store i32 %2, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+  %2 = load i32, i32* %arrayidx6, align 4, !llvm.access.group !13
+  store i32 %2, i32* %arrayidx2, align 4, !llvm.access.group !13
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
   %exitcond = icmp eq i32 %lftr.wideiv, 512
   br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !3
@@ -84,18 +84,18 @@
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
-  %0 = load i32, i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !6
+  %0 = load i32, i32* %arrayidx, align 4, !llvm.access.group !16
   %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-  %1 = load i32, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !6
+  %1 = load i32, i32* %arrayidx2, align 4, !llvm.access.group !16
   %idxprom3 = sext i32 %1 to i64
   %arrayidx4 = getelementptr inbounds i32, i32* %a, i64 %idxprom3
   ; This refers to the loop marked with !7 which we are not in at the moment.
   ; It should prevent detecting as a parallel loop.
-  store i32 %0, i32* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !7
+  store i32 %0, i32* %arrayidx4, align 4, !llvm.access.group !17
   %indvars.iv.next = add i64 %indvars.iv, 1
   %arrayidx6 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next
-  %2 = load i32, i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !6
-  store i32 %2, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !6
+  %2 = load i32, i32* %arrayidx6, align 4, !llvm.access.group !16
+  store i32 %2, i32* %arrayidx2, align 4, !llvm.access.group !16
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
   %exitcond = icmp eq i32 %lftr.wideiv, 512
   br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !6
@@ -104,8 +104,12 @@
   ret void
 }
 
-!3 = !{!3}
-!4 = !{!4}
-!5 = !{!3, !4}
-!6 = !{!6}
-!7 = !{!7}
+!3 = !{!3, !{!"llvm.loop.parallel_accesses", !13, !15}}
+!4 = !{!4, !{!"llvm.loop.parallel_accesses", !14, !15}}
+!6 = !{!6, !{!"llvm.loop.parallel_accesses", !16}}
+!7 = !{!7, !{!"llvm.loop.parallel_accesses", !17}}
+!13 = distinct !{}
+!14 = distinct !{}
+!15 = distinct !{}
+!16 = distinct !{}
+!17 = distinct !{}
diff --git a/test/Transforms/LoopVectorize/X86/pr34438.ll b/test/Transforms/LoopVectorize/X86/pr34438.ll
index 6699985..efbfecc 100644
--- a/test/Transforms/LoopVectorize/X86/pr34438.ll
+++ b/test/Transforms/LoopVectorize/X86/pr34438.ll
@@ -18,11 +18,11 @@
 for.body:
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
-  %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+  %0 = load float, float* %arrayidx, align 4, !llvm.access.group !5
   %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
-  %1 = load float, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+  %1 = load float, float* %arrayidx2, align 4, !llvm.access.group !5
   %add = fadd fast float %0, %1
-  store float %add, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+  store float %add, float* %arrayidx2, align 4, !llvm.access.group !5
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 8
   br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !4
@@ -31,5 +31,6 @@
   ret void
 }
 
-!3 = !{!3}
+!3 = !{!3, !{!"llvm.loop.parallel_accesses", !5}}
 !4 = !{!4}
+!5 = distinct !{}
diff --git a/test/Transforms/LoopVectorize/X86/vect.omp.force.ll b/test/Transforms/LoopVectorize/X86/vect.omp.force.ll
index c2a0fed..9011467 100644
--- a/test/Transforms/LoopVectorize/X86/vect.omp.force.ll
+++ b/test/Transforms/LoopVectorize/X86/vect.omp.force.ll
@@ -32,10 +32,10 @@
 for.body:
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
   %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
-  %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
+  %0 = load float, float* %arrayidx, align 4, !llvm.access.group !11
   %call = tail call float @llvm.sin.f32(float %0)
   %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
-  store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
+  store float %call, float* %arrayidx2, align 4, !llvm.access.group !11
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
   %exitcond = icmp eq i32 %lftr.wideiv, 1000
@@ -48,8 +48,9 @@
   ret void
 }
 
-!1 = !{!1, !2}
+!1 = !{!1, !2, !{!"llvm.loop.parallel_accesses", !11}}
 !2 = !{!"llvm.loop.vectorize.enable", i1 true}
+!11 = distinct !{}
 
 ;
 ; This method will not be vectorized, as scalar cost is lower than any of vector costs.
@@ -62,10 +63,10 @@
 for.body:
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
   %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
-  %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+  %0 = load float, float* %arrayidx, align 4, !llvm.access.group !13
   %call = tail call float @llvm.sin.f32(float %0)
   %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
-  store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+  store float %call, float* %arrayidx2, align 4, !llvm.access.group !13
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
   %exitcond = icmp eq i32 %lftr.wideiv, 1000
@@ -81,5 +82,6 @@
 declare float @llvm.sin.f32(float) nounwind readnone
 
 ; Dummy metadata
-!3 = !{!3}
+!3 = !{!3, !{!"llvm.loop.parallel_accesses", !13}}
+!13 = distinct !{}
 
diff --git a/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll b/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
index 81f3113..dc6b8f0 100644
--- a/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
+++ b/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
@@ -41,7 +41,7 @@
 ; CHECK-NEXT:    store <8 x float> [[TMP7]], <8 x float>* [[TMP8]], align 4
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 8
 ; CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
-; CHECK-NEXT:    br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0
+; CHECK-NEXT:    br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !1
 ; CHECK:       middle.block:
 ; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 20, 16
 ; CHECK-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -51,14 +51,14 @@
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT:    [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.mem.parallel_loop_access !3
+; CHECK-NEXT:    [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !0
 ; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT:    [[TMP11:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.mem.parallel_loop_access !3
+; CHECK-NEXT:    [[TMP11:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.access.group !0
 ; CHECK-NEXT:    [[ADD:%.*]] = fadd fast float [[TMP10]], [[TMP11]]
-; CHECK-NEXT:    store float [[ADD]], float* [[ARRAYIDX2]], align 4, !llvm.mem.parallel_loop_access !3
+; CHECK-NEXT:    store float [[ADD]], float* [[ARRAYIDX2]], align 4, !llvm.access.group !0
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 20
-; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop !4
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop !5
 ; CHECK:       for.end:
 ; CHECK-NEXT:    ret void
 ;
@@ -68,11 +68,11 @@
 for.body:
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
-  %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
+  %0 = load float, float* %arrayidx, align 4, !llvm.access.group !11
   %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
-  %1 = load float, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
+  %1 = load float, float* %arrayidx2, align 4, !llvm.access.group !11
   %add = fadd fast float %0, %1
-  store float %add, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
+  store float %add, float* %arrayidx2, align 4, !llvm.access.group !11
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 20
   br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
@@ -81,8 +81,9 @@
   ret void
 }
 
-!1 = !{!1, !2}
+!1 = !{!1, !2, !{!"llvm.loop.parallel_accesses", !11}}
 !2 = !{!"llvm.loop.vectorize.enable", i1 true}
+!11 = distinct !{}
 
 ;
 ; This loop will be vectorized as the trip count is below the threshold but no
@@ -114,7 +115,7 @@
 ; CHECK-NEXT:    call void @llvm.masked.store.v8f32.p0v8f32(<8 x float> [[TMP7]], <8 x float>* [[TMP9]], i32 4, <8 x i1> [[TMP8]])
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 8
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24
-; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !6
+; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !8
 ; CHECK:       middle.block:
 ; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
 ; CHECK:       scalar.ph:
@@ -127,11 +128,11 @@
 for.body:
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
-  %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+  %0 = load float, float* %arrayidx, align 4, !llvm.access.group !13
   %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
-  %1 = load float, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+  %1 = load float, float* %arrayidx2, align 4, !llvm.access.group !13
   %add = fadd fast float %0, %1
-  store float %add, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+  store float %add, float* %arrayidx2, align 4, !llvm.access.group !13
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 20
   br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !3
@@ -140,7 +141,8 @@
   ret void
 }
 
-!3 = !{!3}
+!3 = !{!3, !{!"llvm.loop.parallel_accesses", !13}}
+!13 = distinct !{}
 
 ;
 ; This loop will be vectorized as the trip count is below the threshold but no
@@ -171,7 +173,7 @@
 ; CHECK-NEXT:    store <8 x float> [[TMP7]], <8 x float>* [[TMP8]], align 4
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 8
 ; CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
-; CHECK-NEXT:    br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !9
+; CHECK-NEXT:    br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !11
 ; CHECK:       middle.block:
 ; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 16, 16
 ; CHECK-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -181,14 +183,14 @@
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT:    [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.mem.parallel_loop_access !7
+; CHECK-NEXT:    [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !7
 ; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT:    [[TMP11:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.mem.parallel_loop_access !7
+; CHECK-NEXT:    [[TMP11:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.access.group !7
 ; CHECK-NEXT:    [[ADD:%.*]] = fadd fast float [[TMP10]], [[TMP11]]
-; CHECK-NEXT:    store float [[ADD]], float* [[ARRAYIDX2]], align 4, !llvm.mem.parallel_loop_access !7
+; CHECK-NEXT:    store float [[ADD]], float* [[ARRAYIDX2]], align 4, !llvm.access.group !7
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 16
-; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop !10
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop !12
 ; CHECK:       for.end:
 ; CHECK-NEXT:    ret void
 ;
@@ -198,11 +200,11 @@
 for.body:
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
-  %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+  %0 = load float, float* %arrayidx, align 4, !llvm.access.group !13
   %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
-  %1 = load float, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+  %1 = load float, float* %arrayidx2, align 4, !llvm.access.group !13
   %add = fadd fast float %0, %1
-  store float %add, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+  store float %add, float* %arrayidx2, align 4, !llvm.access.group !13
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 16
   br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !4
diff --git a/test/Transforms/LoopVectorize/X86/vector_max_bandwidth.ll b/test/Transforms/LoopVectorize/X86/vector_max_bandwidth.ll
index 34c6d74..2d2082e 100644
--- a/test/Transforms/LoopVectorize/X86/vector_max_bandwidth.ll
+++ b/test/Transforms/LoopVectorize/X86/vector_max_bandwidth.ll
@@ -58,11 +58,11 @@
 for.body:
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %B, i64 %indvars.iv
-  %l1 = load i8, i8* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+  %l1 = load i8, i8* %arrayidx, align 4, !llvm.access.group !13
   %arrayidx2 = getelementptr inbounds i8, i8* %A, i64 %indvars.iv
-  %l2 = load i8, i8* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+  %l2 = load i8, i8* %arrayidx2, align 4, !llvm.access.group !13
   %add = add i8 %l1, %l2
-  store i8 %add, i8* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+  store i8 %add, i8* %arrayidx2, align 4, !llvm.access.group !13
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 16
   br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !4
@@ -70,5 +70,6 @@
 for.end:
   ret void
 }
-!3 = !{!3}
+!3 = !{!3, !{!"llvm.loop.parallel_accesses", !13}}
 !4 = !{!4}
+!13 = distinct !{}
diff --git a/test/Transforms/LoopVectorize/libcall-remark.ll b/test/Transforms/LoopVectorize/libcall-remark.ll
new file mode 100644
index 0000000..a1a7e46
--- /dev/null
+++ b/test/Transforms/LoopVectorize/libcall-remark.ll
@@ -0,0 +1,52 @@
+; RUN: opt -S -loop-vectorize < %s 2>&1 -pass-remarks-analysis=.* | FileCheck %s
+
+; Test the optimization remark emitter for recognition 
+; of a mathlib function vs. an arbitrary function.
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.14.0"
+@data = external local_unnamed_addr global [32768 x float], align 16
+
+; CHECK: loop not vectorized: library call cannot be vectorized
+
+define void @libcall_blocks_vectorization() {
+entry:
+  br label %for.body
+
+for.cond.cleanup:
+  ret void
+
+for.body:
+  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds [32768 x float], [32768 x float]* @data, i64 0, i64 %indvars.iv
+  %t0 = load float, float* %arrayidx, align 4
+  %sqrtf = tail call float @sqrtf(float %t0)
+  store float %sqrtf, float* %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond = icmp eq i64 %indvars.iv.next, 32768
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK: loop not vectorized: call instruction cannot be vectorized
+
+define void @arbitrary_call_blocks_vectorization() {
+entry:
+  br label %for.body
+
+for.cond.cleanup:
+  ret void
+
+for.body:
+  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds [32768 x float], [32768 x float]* @data, i64 0, i64 %indvars.iv
+  %t0 = load float, float* %arrayidx, align 4
+  %sqrtf = tail call float @arbitrary(float %t0)
+  store float %sqrtf, float* %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond = icmp eq i64 %indvars.iv.next, 32768
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+declare float @sqrtf(float)
+declare float @arbitrary(float)
+
diff --git a/test/Transforms/LoopVectorize/runtime-check.ll b/test/Transforms/LoopVectorize/runtime-check.ll
index 88489fa..332e3ef 100644
--- a/test/Transforms/LoopVectorize/runtime-check.ll
+++ b/test/Transforms/LoopVectorize/runtime-check.ll
@@ -117,6 +117,48 @@
   ret void
 }
 
+; Check we do not generate runtime checks if we found a known dependence preventing
+; vectorization. In this case, it is a read of c[i-1] followed by a write of c[i].
+; The runtime checks would always fail.
+
+; void test_runtime_check2(float *a, float b, unsigned offset, unsigned offset2, unsigned n, float *c) {
+;   for (unsigned i = 1; i < n; i++) {
+;     a[i+o1] += a[i+o2] + b;
+;     c[i] = c[i-1] + b;
+;   }
+; }
+;
+; CHECK-LABEL: test_runtime_check2
+; CHECK-NOT:      <4 x float>
+define void @test_runtime_check2(float* %a, float %b, i64 %offset, i64 %offset2, i64 %n, float* %c) {
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %ind.sum = add i64 %iv, %offset
+  %arr.idx = getelementptr inbounds float, float* %a, i64 %ind.sum
+  %l1 = load float, float* %arr.idx, align 4
+  %ind.sum2 = add i64 %iv, %offset2
+  %arr.idx2 = getelementptr inbounds float, float* %a, i64 %ind.sum2
+  %l2 = load float, float* %arr.idx2, align 4
+  %m = fmul fast float %b, %l2
+  %ad = fadd fast float %l1, %m
+  store float %ad, float* %arr.idx, align 4
+  %c.ind = add i64 %iv, -1
+  %c.idx = getelementptr inbounds float, float* %c, i64 %c.ind
+  %lc = load float, float* %c.idx, align 4
+  %vc = fadd float %lc, 1.0
+  %c.idx2 = getelementptr inbounds float, float* %c, i64 %iv
+  store float %vc, float* %c.idx2
+  %iv.next = add nuw nsw i64 %iv, 1
+  %exitcond = icmp eq i64 %iv.next, %n
+  br i1 %exitcond, label %loopexit, label %for.body
+
+loopexit:
+  ret void
+}
+
 ; CHECK: !9 = !DILocation(line: 101, column: 1, scope: !{{.*}})
 
 !llvm.module.flags = !{!0, !1}
diff --git a/test/Transforms/MemCpyOpt/stackrestore.ll b/test/Transforms/MemCpyOpt/stackrestore.ll
new file mode 100644
index 0000000..4bead33
--- /dev/null
+++ b/test/Transforms/MemCpyOpt/stackrestore.ll
@@ -0,0 +1,74 @@
+; RUN: opt -S -memcpyopt < %s | FileCheck %s
+
+; PR40118: BasicAA didn't realize that stackrestore ends the lifetime of
+; unescaped dynamic allocas, such as those that might come from inalloca.
+
+source_filename = "t.cpp"
+target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
+target triple = "i686-unknown-windows-msvc19.14.26433"
+
+@str = internal constant [9 x i8] c"abcdxxxxx"
+
+
+; Test that we can propagate memcpy through an unescaped dynamic alloca across
+; a call to @external.
+
+define i32 @test_norestore(i32 %n) {
+  %tmpmem = alloca [10 x i8], align 4
+  %tmp = getelementptr inbounds [10 x i8], [10 x i8]* %tmpmem, i32 0, i32 0
+
+  ; Make a dynamic alloca, initialize it.
+  %p = alloca i8, i32 %n, align 4
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* align 1 getelementptr inbounds ([9 x i8], [9 x i8]* @str, i32 0, i32 0), i32 9, i1 false)
+
+  ; This extra byte exists to prevent memcpyopt from propagating @str.
+  %p10 = getelementptr inbounds i8, i8* %p, i32 9
+  store i8 0, i8* %p10
+
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* %p, i32 10, i1 false)
+  call void @external()
+  %heap = call i8* @malloc(i32 9)
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %heap, i8* %tmp, i32 9, i1 false)
+  call void @useit(i8* %heap)
+  ret i32 0
+}
+
+; CHECK-LABEL: define i32 @test_norestore(i32 %n)
+; CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* align 1 getelementptr inbounds ([9 x i8], [9 x i8]* @str, i32 0, i32 0), i32 9, i1 false)
+; CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* %p, i32 10, i1 false)
+; CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %heap, i8* %p, i32 9, i1 false)
+
+
+; Do not propagate memcpy from %p across the stackrestore.
+
+define i32 @test_stackrestore() {
+  %tmpmem = alloca [10 x i8], align 4
+  %tmp = getelementptr inbounds [10 x i8], [10 x i8]* %tmpmem, i32 0, i32 0
+  %inalloca.save = tail call i8* @llvm.stacksave()
+  %argmem = alloca inalloca [10 x i8], align 4
+  %p = getelementptr inbounds [10 x i8], [10 x i8]* %argmem, i32 0, i32 0
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* align 1 getelementptr inbounds ([9 x i8], [9 x i8]* @str, i32 0, i32 0), i32 9, i1 false)
+
+  ; This extra byte exists to prevent memcpyopt from propagating @str.
+  %p10 = getelementptr inbounds [10 x i8], [10 x i8]* %argmem, i32 0, i32 9
+  store i8 0, i8* %p10
+
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* %p, i32 10, i1 false)
+  call void @llvm.stackrestore(i8* %inalloca.save)
+  %heap = call i8* @malloc(i32 9)
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %heap, i8* %tmp, i32 9, i1 false)
+  call void @useit(i8* %heap)
+  ret i32 0
+}
+
+; CHECK-LABEL: define i32 @test_stackrestore()
+; CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* align 1 getelementptr inbounds ([9 x i8], [9 x i8]* @str, i32 0, i32 0), i32 9, i1 false)
+; CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* %p, i32 10, i1 false)
+; CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %heap, i8* %tmp, i32 9, i1 false)
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1)
+declare i8* @llvm.stacksave()
+declare void @llvm.stackrestore(i8*)
+declare i8* @malloc(i32)
+declare void @useit(i8*)
+declare void @external()
diff --git a/test/Transforms/MergeFunc/linkonce_odr.ll b/test/Transforms/MergeFunc/linkonce_odr.ll
index 1ad0d72..825f905 100644
--- a/test/Transforms/MergeFunc/linkonce_odr.ll
+++ b/test/Transforms/MergeFunc/linkonce_odr.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -mergefunc < %s | FileCheck %s
+; RUN: opt -S -mergefunc < %s | FileCheck %s -implicit-check-not=funC
 
 ; Replacments should be totally ordered on the function name.
 ; If we don't do this we  can end up with one module defining a thunk for @funA
@@ -15,6 +15,13 @@
 ; CHECK-NEXT:    tail call i32 @funA(i32 %0, i32 %1)
 ; CHECK-NEXT:    ret
 
+define linkonce_odr i32 @funC(i32 %x, i32 %y) {
+  %sum = add i32 %x, %y
+  %sum2 = add i32 %x, %sum
+  %sum3 = add i32 %x, %sum2
+  ret i32 %sum3
+}
+
 define linkonce_odr i32 @funB(i32 %x, i32 %y) {
   %sum = add i32 %x, %y
   %sum2 = add i32 %x, %sum
@@ -28,3 +35,8 @@
   %sum3 = add i32 %x, %sum2
   ret i32 %sum3
 }
+
+; This creates a use of @funB, preventing -mergefunc from deleting it.
+; @funC, however, can safely be deleted as it has no uses, and is discardable
+; if unused.
+@take_addr_of_funB = global i8* bitcast (i32 (i32, i32)* @funB to i8*)
diff --git a/test/Transforms/MergeFunc/nonzero-address-spaces.ll b/test/Transforms/MergeFunc/nonzero-address-spaces.ll
new file mode 100644
index 0000000..3ee887c
--- /dev/null
+++ b/test/Transforms/MergeFunc/nonzero-address-spaces.ll
@@ -0,0 +1,30 @@
+; RUN: opt -S -mergefunc < %s | FileCheck %s
+
+; MergeFunctions should respect the default function address
+; space specified in the data layout.
+
+target datalayout = "e-P1-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+declare void @stuff()
+
+; CHECK-LABEL: @f0(
+define void @f0(i64 %p0) {
+entry:
+  call void @stuff()
+  call void @stuff()
+  call void @stuff()
+  ret void
+}
+
+; CHECK-LABEL: @f1(
+; CHECK: ptrtoint i64*
+; CHECK: tail call addrspace(1) void @f0(i64
+
+define void @f1(i64* %p0) {
+entry:
+  call void @stuff()
+  call void @stuff()
+  call void @stuff()
+  ret void
+}
+
diff --git a/test/Transforms/NewGVN/eliminate-ssacopy.ll b/test/Transforms/NewGVN/eliminate-ssacopy.ll
new file mode 100644
index 0000000..57bed02
--- /dev/null
+++ b/test/Transforms/NewGVN/eliminate-ssacopy.ll
@@ -0,0 +1,81 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -newgvn -S < %s | FileCheck %s
+
+; Make sure the created ssa copies are cleaned up. See PR38804.
+
+; CHECK-NOT: ssa_copy
+
+@b = external dso_local local_unnamed_addr global i32, align 4
+@a = external dso_local local_unnamed_addr global i8, align 1
+@f = external dso_local local_unnamed_addr global i16, align 2
+
+define void @g() {
+; CHECK-LABEL: @g(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 undef, label [[FOR_COND1THREAD_PRE_SPLIT:%.*]], label [[FOR_COND_PREHEADER:%.*]]
+; CHECK:       for.cond.preheader:
+; CHECK-NEXT:    unreachable
+; CHECK:       for.cond1thread-pre-split:
+; CHECK-NEXT:    br label [[FOR_END4_SPLIT:%.*]]
+; CHECK:       for.end4.split:
+; CHECK-NEXT:    br i1 true, label [[FOR_COND6_PREHEADER:%.*]], label [[IF_END11:%.*]]
+; CHECK:       for.cond6.preheader:
+; CHECK-NEXT:    br i1 undef, label [[FOR_COND6_PREHEADER3:%.*]], label [[IF_END11_LOOPEXIT:%.*]]
+; CHECK:       for.cond6.preheader3:
+; CHECK-NEXT:    br label [[IF_END11_LOOPEXIT]]
+; CHECK:       if.end11.loopexit:
+; CHECK-NEXT:    [[STOREMERGE_LCSSA:%.*]] = phi i32 [ 0, [[FOR_COND6_PREHEADER]] ], [ 1, [[FOR_COND6_PREHEADER3]] ]
+; CHECK-NEXT:    store i32 [[STOREMERGE_LCSSA]], i32* @b, align 4
+; CHECK-NEXT:    br label [[IF_END11]]
+; CHECK:       if.end11:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* @b, align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i8, i8* @a, align 1
+; CHECK-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+; CHECK-NEXT:    [[CMP12:%.*]] = icmp eq i32 [[TMP0]], [[CONV]]
+; CHECK-NEXT:    br i1 [[CMP12]], label [[IF_THEN14:%.*]], label [[IF_END16:%.*]]
+; CHECK:       if.then14:
+; CHECK-NEXT:    [[CONV15:%.*]] = trunc i32 [[TMP0]] to i16
+; CHECK-NEXT:    store i16 [[CONV15]], i16* @f, align 2
+; CHECK-NEXT:    unreachable
+; CHECK:       if.end16:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %tobool = icmp eq i32 undef, 0
+  br i1 %tobool, label %for.cond1thread-pre-split, label %for.cond.preheader
+
+for.cond.preheader:                               ; preds = %entry
+  unreachable
+
+for.cond1thread-pre-split:                        ; preds = %entry
+  br label %for.end4.split
+
+for.end4.split:                                   ; preds = %for.cond1thread-pre-split
+  br i1 %tobool, label %for.cond6.preheader, label %if.end11
+
+for.cond6.preheader:                              ; preds = %for.end4.split
+  br i1 undef, label %for.cond6.preheader3, label %if.end11.loopexit
+
+for.cond6.preheader3:                             ; preds = %for.cond6.preheader
+  br label %if.end11.loopexit
+
+if.end11.loopexit:                                ; preds = %for.cond6.preheader3, %for.cond6.preheader
+  %storemerge.lcssa = phi i32 [ 0, %for.cond6.preheader ], [ 1, %for.cond6.preheader3 ]
+  store i32 %storemerge.lcssa, i32* @b, align 4
+  br label %if.end11
+
+if.end11:                                         ; preds = %if.end11.loopexit, %for.end4.split
+  %0 = load i32, i32* @b, align 4
+  %1 = load i8, i8* @a, align 1
+  %conv = sext i8 %1 to i32
+  %cmp12 = icmp eq i32 %0, %conv
+  br i1 %cmp12, label %if.then14, label %if.end16
+
+if.then14:                                        ; preds = %if.end11
+  %conv15 = trunc i32 %0 to i16
+  store i16 %conv15, i16* @f, align 2
+  unreachable
+
+if.end16:                                         ; preds = %if.end11
+  ret void
+}
diff --git a/test/Transforms/NewGVN/memory-handling.ll b/test/Transforms/NewGVN/memory-handling.ll
index 12e882b..dfb52d6 100644
--- a/test/Transforms/NewGVN/memory-handling.ll
+++ b/test/Transforms/NewGVN/memory-handling.ll
@@ -176,7 +176,7 @@
 
 !llvm.ident = !{!0}
 
-!0 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git 9b9db7fa41a1905899dbcbcc6cbdd05d2511da8e) (/Users/dannyb/sources/llvm-clean a3908a41623f6ac14ba8c04613d6c64e0544bb5d)"}
+!0 = !{!"clang version 4.0.0"}
 !1 = !{!2, !2, i64 0}
 !2 = !{!"int", !3, i64 0}
 !3 = !{!"omnipotent char", !4, i64 0}
diff --git a/test/Transforms/NewGVN/phi-edge-handling.ll b/test/Transforms/NewGVN/phi-edge-handling.ll
index 6451006..890958c 100644
--- a/test/Transforms/NewGVN/phi-edge-handling.ll
+++ b/test/Transforms/NewGVN/phi-edge-handling.ll
@@ -55,6 +55,6 @@
 
 !llvm.ident = !{!0}
 
-!0 = !{!"clang version 5.0.0 (http://llvm.org/git/clang.git a8b933d4d1d133594fdaed35ee5814514b738f6d) (/Users/dannyb/sources/llvm-clean fc630a9b5613f544c07a8f16abcc173793df62cf)"}
+!0 = !{!"clang version 5.0.0"}
 !1 = distinct !{!1, !2}
 !2 = !{!"llvm.loop.unroll.disable"}
diff --git a/test/Transforms/NewGVN/pr31501.ll b/test/Transforms/NewGVN/pr31501.ll
index 7122ade..bc95969 100644
--- a/test/Transforms/NewGVN/pr31501.ll
+++ b/test/Transforms/NewGVN/pr31501.ll
@@ -123,7 +123,7 @@
 !llvm.ident = !{!1}
 
 !0 = !{i32 1, !"PIC Level", i32 2}
-!1 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git b63fa9e2bb8aac0a80c3e3467991c6b1a4b01e6a) (llvm/trunk 290779)"}
+!1 = !{!"clang version 4.0.0"}
 !2 = !{!3, !4, i64 0}
 !3 = !{!"_ZTSN4llvm15SmallVectorBaseE", !4, i64 0, !4, i64 8, !4, i64 16}
 !4 = !{!"any pointer", !5, i64 0}
diff --git a/test/Transforms/NewGVN/pr33305.ll b/test/Transforms/NewGVN/pr33305.ll
index 96717b8..9cbc10c 100644
--- a/test/Transforms/NewGVN/pr33305.ll
+++ b/test/Transforms/NewGVN/pr33305.ll
@@ -176,7 +176,7 @@
 
 !0 = !{i32 1, !"wchar_size", i32 4}
 !1 = !{i32 7, !"PIC Level", i32 2}
-!2 = !{!"clang version 5.0.0 (http://llvm.org/git/clang.git e97b4dda83fd49e0218ea06ba4e37796a81b2027) (/Users/dannyb/sources/llvm-clean b38f051979e4ac2aa6513e40046d120fd472cb96)"}
+!2 = !{!"clang version 5.0.0"}
 !3 = !{!4, !4, i64 0}
 !4 = !{!"int", !5, i64 0}
 !5 = !{!"omnipotent char", !6, i64 0}
diff --git a/test/Transforms/NewGVN/pr34430.ll b/test/Transforms/NewGVN/pr34430.ll
index 81d1837..0b59dfc 100644
--- a/test/Transforms/NewGVN/pr34430.ll
+++ b/test/Transforms/NewGVN/pr34430.ll
@@ -45,4 +45,4 @@
 
 !llvm.ident = !{!0}
 
-!0 = !{!"clang version 6.0.0 (http://llvm.org/git/clang.git e649d902285b23af8ba58cb92a739f3bad2723df) (/Users/dannyb/sources/llvm-clean 0abfd30028cbb294ff2c2dd5e2df4ec3fdb6c591)"}
+!0 = !{!"clang version 6.0.0"}
diff --git a/test/Transforms/ObjCARC/allocas.ll b/test/Transforms/ObjCARC/allocas.ll
index 5074256..bf2039d 100644
--- a/test/Transforms/ObjCARC/allocas.ll
+++ b/test/Transforms/ObjCARC/allocas.ll
@@ -1,13 +1,13 @@
 ; RUN: opt -objc-arc -S < %s | FileCheck %s
 
-declare i8* @objc_retain(i8*)
-declare i8* @objc_retainAutoreleasedReturnValue(i8*)
-declare void @objc_release(i8*)
-declare i8* @objc_autorelease(i8*)
-declare i8* @objc_autoreleaseReturnValue(i8*)
-declare void @objc_autoreleasePoolPop(i8*)
-declare i8* @objc_autoreleasePoolPush()
-declare i8* @objc_retainBlock(i8*)
+declare i8* @llvm.objc.retain(i8*)
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
+declare void @llvm.objc.release(i8*)
+declare i8* @llvm.objc.autorelease(i8*)
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
+declare void @llvm.objc.autoreleasePoolPop(i8*)
+declare i8* @llvm.objc.autoreleasePoolPush()
+declare i8* @llvm.objc.retainBlock(i8*)
 
 declare i8* @objc_retainedObject(i8*)
 declare i8* @objc_unretainedObject(i8*)
@@ -25,7 +25,7 @@
 
 declare void @llvm.dbg.value(metadata, metadata, metadata)
 
-declare i8* @objc_msgSend(i8*, i8*, ...)
+declare i8* @llvm.objc.msgSend(i8*, i8*, ...)
 
 
 ; In the presence of allocas, unconditionally remove retain/release pairs only
@@ -44,77 +44,77 @@
 ; rdar://13750319
 
 ; CHECK: define void @test1a(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_release(i8* %y)
-; CHECK: @objc_release(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.release(i8* %y)
+; CHECK: @llvm.objc.release(i8* %x)
 ; CHECK: ret void
 ; CHECK: }
 define void @test1a(i8* %x) {
 entry:
   %A = alloca i8*
-  tail call i8* @objc_retain(i8* %x)
-  tail call i8* @objc_retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
   store i8* %x, i8** %A, align 8
   %y = load i8*, i8** %A
   call void @use_alloca(i8** %A)
-  call void @objc_release(i8* %y), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %y), !clang.imprecise_release !0
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x), !clang.imprecise_release !0
   ret void
 }
 
 ; CHECK: define void @test1b(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_release(i8* %y)
-; CHECK: @objc_release(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.release(i8* %y)
+; CHECK: @llvm.objc.release(i8* %x)
 ; CHECK: ret void
 ; CHECK: }
 define void @test1b(i8* %x) {
 entry:
   %A = alloca i8*
   %gep = getelementptr i8*, i8** %A, i32 0
-  tail call i8* @objc_retain(i8* %x)
-  tail call i8* @objc_retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
   store i8* %x, i8** %gep, align 8
   %y = load i8*, i8** %A
   call void @use_alloca(i8** %A)
-  call void @objc_release(i8* %y), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %y), !clang.imprecise_release !0
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x), !clang.imprecise_release !0
   ret void
 }
 
 
 ; CHECK: define void @test1c(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_release(i8* %y)
-; CHECK: @objc_release(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.release(i8* %y)
+; CHECK: @llvm.objc.release(i8* %x)
 ; CHECK: ret void
 ; CHECK: }
 define void @test1c(i8* %x) {
 entry:
   %A = alloca i8*, i32 3
   %gep = getelementptr i8*, i8** %A, i32 2
-  tail call i8* @objc_retain(i8* %x)
-  tail call i8* @objc_retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
   store i8* %x, i8** %gep, align 8
   %y = load i8*, i8** %gep
   call void @use_alloca(i8** %A)
-  call void @objc_release(i8* %y), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %y), !clang.imprecise_release !0
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x), !clang.imprecise_release !0
   ret void
 }
 
 
 ; CHECK: define void @test1d(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_release(i8* %y)
-; CHECK: @objc_release(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.release(i8* %y)
+; CHECK: @llvm.objc.release(i8* %x)
 ; CHECK: ret void
 ; CHECK: }
 define void @test1d(i8* %x) {
@@ -132,22 +132,22 @@
 exit:
   %A = phi i8** [ %allocaA, %use_allocaA ], [ %allocaB, %use_allocaB ]
   %gep = getelementptr i8*, i8** %A, i32 0
-  tail call i8* @objc_retain(i8* %x)
-  tail call i8* @objc_retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
   store i8* %x, i8** %gep, align 8
   %y = load i8*, i8** %gep
   call void @use_alloca(i8** %A)
-  call void @objc_release(i8* %y), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %y), !clang.imprecise_release !0
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x), !clang.imprecise_release !0
   ret void
 }
 
 ; CHECK: define void @test1e(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_release(i8* %y)
-; CHECK: @objc_release(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.release(i8* %y)
+; CHECK: @llvm.objc.release(i8* %x)
 ; CHECK: ret void
 ; CHECK: }
 define void @test1e(i8* %x) {
@@ -165,22 +165,22 @@
 exit:
   %A = phi i8** [ %allocaA, %use_allocaA ], [ %allocaB, %use_allocaB ]
   %gep = getelementptr i8*, i8** %A, i32 2
-  tail call i8* @objc_retain(i8* %x)
-  tail call i8* @objc_retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
   store i8* %x, i8** %gep, align 8
   %y = load i8*, i8** %gep
   call void @use_alloca(i8** %A)
-  call void @objc_release(i8* %y), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %y), !clang.imprecise_release !0
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x), !clang.imprecise_release !0
   ret void
 }
 
 ; CHECK: define void @test1f(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_release(i8* %y)
-; CHECK: @objc_release(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.release(i8* %y)
+; CHECK: @llvm.objc.release(i8* %x)
 ; CHECK: ret void
 ; CHECK: }
 define void @test1f(i8* %x) {
@@ -188,14 +188,14 @@
   %allocaOne = alloca i8*
   %allocaTwo = alloca i8*
   %A = select i1 undef, i8** %allocaOne, i8** %allocaTwo
-  tail call i8* @objc_retain(i8* %x)
-  tail call i8* @objc_retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
   store i8* %x, i8** %A, align 8
   %y = load i8*, i8** %A
   call void @use_alloca(i8** %A)
-  call void @objc_release(i8* %y), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %y), !clang.imprecise_release !0
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x), !clang.imprecise_release !0
   ret void
 }
 
@@ -204,10 +204,10 @@
 
 
 ; CHECK: define void @test2a(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_release(i8* %y)
-; CHECK: @objc_release(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.release(i8* %y)
+; CHECK: @llvm.objc.release(i8* %x)
 ; CHECK: ret void
 ; CHECK: }
 define void @test2a(i8* %x) {
@@ -224,20 +224,20 @@
   br label %bb3
 
 bb3:
-  tail call i8* @objc_retain(i8* %x)
-  tail call i8* @objc_retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
   call void @use_alloca(i8** %A)
-  call void @objc_release(i8* %y), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %y), !clang.imprecise_release !0
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x), !clang.imprecise_release !0
   ret void
 }
 
 ; CHECK: define void @test2b(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_release(i8* %y)
-; CHECK: @objc_release(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.release(i8* %y)
+; CHECK: @llvm.objc.release(i8* %x)
 ; CHECK: ret void
 ; CHECK: }
 define void @test2b(i8* %x) {
@@ -256,20 +256,20 @@
   br label %bb3
 
 bb3:
-  tail call i8* @objc_retain(i8* %x)
-  tail call i8* @objc_retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
   call void @use_alloca(i8** %A)
-  call void @objc_release(i8* %y), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %y), !clang.imprecise_release !0
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x), !clang.imprecise_release !0
   ret void
 }
 
 ; CHECK: define void @test2c(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_release(i8* %y)
-; CHECK: @objc_release(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.release(i8* %y)
+; CHECK: @llvm.objc.release(i8* %x)
 ; CHECK: ret void
 ; CHECK: }
 define void @test2c(i8* %x) {
@@ -279,7 +279,7 @@
   store i8* %x, i8** %gep1, align 8
   %gep2 = getelementptr i8*, i8** %A, i32 2
   %y = load i8*, i8** %gep2
-  tail call i8* @objc_retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
   br label %bb1
 
 bb1:
@@ -289,24 +289,24 @@
   br label %bb3
 
 bb3:
-  tail call i8* @objc_retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
   call void @use_alloca(i8** %A)
-  call void @objc_release(i8* %y), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %y), !clang.imprecise_release !0
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x), !clang.imprecise_release !0
   ret void
 }
 
 ; CHECK: define void @test2d(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_release(i8* %y)
-; CHECK: @objc_release(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.release(i8* %y)
+; CHECK: @llvm.objc.release(i8* %x)
 ; CHECK: ret void
 ; CHECK: }
 define void @test2d(i8* %x) {
 entry:
-  tail call i8* @objc_retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
   br label %bb1
 
 bb1:
@@ -328,11 +328,11 @@
 bb3:
   %A = phi i8** [ %Abb1, %bb1 ], [ %Abb2, %bb2 ]
   %y = phi i8* [ %ybb1, %bb1 ], [ %ybb2, %bb2 ]
-  tail call i8* @objc_retain(i8* %x)
+  tail call i8* @llvm.objc.retain(i8* %x)
   call void @use_alloca(i8** %A)
-  call void @objc_release(i8* %y), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %y), !clang.imprecise_release !0
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x), !clang.imprecise_release !0
   ret void
 }
 
@@ -344,21 +344,21 @@
 
 ; CHECK: define void @test3a() {
 ; CHECK: entry:
-; CHECK:   @objc_retainAutoreleasedReturnValue
-; CHECK:   @objc_retain
-; CHECK:   @objc_retain
-; CHECK:   @objc_retain
-; CHECK:   @objc_retain
+; CHECK:   @llvm.objc.retainAutoreleasedReturnValue
+; CHECK:   @llvm.objc.retain
+; CHECK:   @llvm.objc.retain
+; CHECK:   @llvm.objc.retain
+; CHECK:   @llvm.objc.retain
 ; CHECK: arraydestroy.body:
-; CHECK:   @objc_release
-; CHECK-NOT: @objc_release
+; CHECK:   @llvm.objc.release
+; CHECK-NOT: @llvm.objc.release
 ; CHECK: arraydestroy.done:
-; CHECK-NOT: @objc_release
+; CHECK-NOT: @llvm.objc.release
 ; CHECK: arraydestroy.body1:
-; CHECK:   @objc_release
-; CHECK-NOT: @objc_release
+; CHECK:   @llvm.objc.release
+; CHECK-NOT: @llvm.objc.release
 ; CHECK: arraydestroy.done1:
-; CHECK: @objc_release
+; CHECK: @llvm.objc.release
 ; CHECK: ret void
 ; CHECK: }
 define void @test3a() {
@@ -367,22 +367,22 @@
   %objs = alloca [2 x i8*], align 16
   
   %call1 = call i8* @returner()
-  %tmp0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call1)
+  %tmp0 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call1)
 
   %objs.begin = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 0
-  tail call i8* @objc_retain(i8* %call1)
+  tail call i8* @llvm.objc.retain(i8* %call1)
   store i8* %call1, i8** %objs.begin, align 8
   %objs.elt = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 1
-  tail call i8* @objc_retain(i8* %call1)
+  tail call i8* @llvm.objc.retain(i8* %call1)
   store i8* %call1, i8** %objs.elt
 
   %call2 = call i8* @returner1()
   %call3 = call i8* @returner2()
   %keys.begin = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 0
-  tail call i8* @objc_retain(i8* %call2)
+  tail call i8* @llvm.objc.retain(i8* %call2)
   store i8* %call2, i8** %keys.begin, align 8
   %keys.elt = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 1
-  tail call i8* @objc_retain(i8* %call3)
+  tail call i8* @llvm.objc.retain(i8* %call3)
   store i8* %call3, i8** %keys.elt  
   
   %gep = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 2
@@ -392,7 +392,7 @@
   %arraydestroy.elementPast = phi i8** [ %gep, %entry ], [ %arraydestroy.element, %arraydestroy.body ]
   %arraydestroy.element = getelementptr inbounds i8*, i8** %arraydestroy.elementPast, i64 -1
   %destroy_tmp = load i8*, i8** %arraydestroy.element, align 8
-  call void @objc_release(i8* %destroy_tmp), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %destroy_tmp), !clang.imprecise_release !0
   %objs_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 0
   %arraydestroy.cmp = icmp eq i8** %arraydestroy.element, %objs_ptr
   br i1 %arraydestroy.cmp, label %arraydestroy.done, label %arraydestroy.body
@@ -405,13 +405,13 @@
   %arraydestroy.elementPast1 = phi i8** [ %gep1, %arraydestroy.done ], [ %arraydestroy.element1, %arraydestroy.body1 ]
   %arraydestroy.element1 = getelementptr inbounds i8*, i8** %arraydestroy.elementPast1, i64 -1
   %destroy_tmp1 = load i8*, i8** %arraydestroy.element1, align 8
-  call void @objc_release(i8* %destroy_tmp1), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %destroy_tmp1), !clang.imprecise_release !0
   %keys_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 0
   %arraydestroy.cmp1 = icmp eq i8** %arraydestroy.element1, %keys_ptr
   br i1 %arraydestroy.cmp1, label %arraydestroy.done1, label %arraydestroy.body1
 
 arraydestroy.done1:
-  call void @objc_release(i8* %call1), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %call1), !clang.imprecise_release !0
   ret void
 }
 
@@ -422,21 +422,21 @@
 
 ; CHECK: define void @test3b() {
 ; CHECK: entry:
-; CHECK:   @objc_retainAutoreleasedReturnValue
-; CHECK:   @objc_retain
-; CHECK:   @objc_retain
-; CHECK:   @objc_retain
-; CHECK:   @objc_retain
+; CHECK:   @llvm.objc.retainAutoreleasedReturnValue
+; CHECK:   @llvm.objc.retain
+; CHECK:   @llvm.objc.retain
+; CHECK:   @llvm.objc.retain
+; CHECK:   @llvm.objc.retain
 ; CHECK: arraydestroy.body:
-; CHECK:   @objc_release
-; CHECK-NOT: @objc_release
+; CHECK:   @llvm.objc.release
+; CHECK-NOT: @llvm.objc.release
 ; CHECK: arraydestroy.done:
-; CHECK-NOT: @objc_release
+; CHECK-NOT: @llvm.objc.release
 ; CHECK: arraydestroy.body1:
-; CHECK:   @objc_release
-; CHECK-NOT: @objc_release
+; CHECK:   @llvm.objc.release
+; CHECK-NOT: @llvm.objc.release
 ; CHECK: arraydestroy.done1:
-; CHECK: @objc_release
+; CHECK: @llvm.objc.release
 ; CHECK: ret void
 ; CHECK: }
 define void @test3b() {
@@ -445,23 +445,23 @@
   %objs = alloca [2 x i8*], align 16
   
   %call1 = call i8* @returner()
-  %tmp0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call1)
-  %tmp1 = tail call i8* @objc_retain(i8* %call1)
+  %tmp0 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call1)
+  %tmp1 = tail call i8* @llvm.objc.retain(i8* %call1)
 
   %objs.begin = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 0
-  tail call i8* @objc_retain(i8* %call1)
+  tail call i8* @llvm.objc.retain(i8* %call1)
   store i8* %call1, i8** %objs.begin, align 8
   %objs.elt = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 1
-  tail call i8* @objc_retain(i8* %call1)
+  tail call i8* @llvm.objc.retain(i8* %call1)
   store i8* %call1, i8** %objs.elt
 
   %call2 = call i8* @returner1()
   %call3 = call i8* @returner2()
   %keys.begin = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 0
-  tail call i8* @objc_retain(i8* %call2)
+  tail call i8* @llvm.objc.retain(i8* %call2)
   store i8* %call2, i8** %keys.begin, align 8
   %keys.elt = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 1
-  tail call i8* @objc_retain(i8* %call3)
+  tail call i8* @llvm.objc.retain(i8* %call3)
   store i8* %call3, i8** %keys.elt  
   
   %gep = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 2
@@ -471,7 +471,7 @@
   %arraydestroy.elementPast = phi i8** [ %gep, %entry ], [ %arraydestroy.element, %arraydestroy.body ]
   %arraydestroy.element = getelementptr inbounds i8*, i8** %arraydestroy.elementPast, i64 -1
   %destroy_tmp = load i8*, i8** %arraydestroy.element, align 8
-  call void @objc_release(i8* %destroy_tmp), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %destroy_tmp), !clang.imprecise_release !0
   %objs_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 0
   %arraydestroy.cmp = icmp eq i8** %arraydestroy.element, %objs_ptr
   br i1 %arraydestroy.cmp, label %arraydestroy.done, label %arraydestroy.body
@@ -484,14 +484,14 @@
   %arraydestroy.elementPast1 = phi i8** [ %gep1, %arraydestroy.done ], [ %arraydestroy.element1, %arraydestroy.body1 ]
   %arraydestroy.element1 = getelementptr inbounds i8*, i8** %arraydestroy.elementPast1, i64 -1
   %destroy_tmp1 = load i8*, i8** %arraydestroy.element1, align 8
-  call void @objc_release(i8* %destroy_tmp1), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %destroy_tmp1), !clang.imprecise_release !0
   %keys_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 0
   %arraydestroy.cmp1 = icmp eq i8** %arraydestroy.element1, %keys_ptr
   br i1 %arraydestroy.cmp1, label %arraydestroy.done1, label %arraydestroy.body1
 
 arraydestroy.done1:
-  call void @objc_release(i8* %call1), !clang.imprecise_release !0
-  call void @objc_release(i8* %call1), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %call1), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %call1), !clang.imprecise_release !0
   ret void
 }
 
diff --git a/test/Transforms/ObjCARC/apelim.ll b/test/Transforms/ObjCARC/apelim.ll
index 14412c6..da3a1f4 100644
--- a/test/Transforms/ObjCARC/apelim.ll
+++ b/test/Transforms/ObjCARC/apelim.ll
@@ -31,25 +31,25 @@
 ; CHECK: }
 define internal void @_GLOBAL__I_x() {
 entry:
-  %0 = call i8* @objc_autoreleasePoolPush() nounwind
+  %0 = call i8* @llvm.objc.autoreleasePoolPush() nounwind
   call void @__cxx_global_var_init()
-  call void @objc_autoreleasePoolPop(i8* %0) nounwind
+  call void @llvm.objc.autoreleasePoolPop(i8* %0) nounwind
   ret void
 }
 
 ; CHECK: define internal void @_GLOBAL__I_y() {
-; CHECK: %0 = call i8* @objc_autoreleasePoolPush() [[NUW:#[0-9]+]]
-; CHECK: call void @objc_autoreleasePoolPop(i8* %0) [[NUW]]
+; CHECK: %0 = call i8* @llvm.objc.autoreleasePoolPush() [[NUW:#[0-9]+]]
+; CHECK: call void @llvm.objc.autoreleasePoolPop(i8* %0) [[NUW]]
 ; CHECK: }
 define internal void @_GLOBAL__I_y() {
 entry:
-  %0 = call i8* @objc_autoreleasePoolPush() nounwind
+  %0 = call i8* @llvm.objc.autoreleasePoolPush() nounwind
   call void @__dxx_global_var_init()
-  call void @objc_autoreleasePoolPop(i8* %0) nounwind
+  call void @llvm.objc.autoreleasePoolPop(i8* %0) nounwind
   ret void
 }
 
-declare i8* @objc_autoreleasePoolPush()
-declare void @objc_autoreleasePoolPop(i8*)
+declare i8* @llvm.objc.autoreleasePoolPush()
+declare void @llvm.objc.autoreleasePoolPop(i8*)
 
 ; CHECK: attributes #0 = { nounwind }
diff --git a/test/Transforms/ObjCARC/basic.ll b/test/Transforms/ObjCARC/basic.ll
index 70b83b9..6524dad 100644
--- a/test/Transforms/ObjCARC/basic.ll
+++ b/test/Transforms/ObjCARC/basic.ll
@@ -2,19 +2,19 @@
 
 target datalayout = "e-p:64:64:64"
 
-declare i8* @objc_retain(i8*)
-declare i8* @objc_retainAutoreleasedReturnValue(i8*)
-declare i8* @objc_unsafeClaimAutoreleasedReturnValue(i8*)
-declare void @objc_release(i8*)
-declare i8* @objc_autorelease(i8*)
-declare i8* @objc_autoreleaseReturnValue(i8*)
-declare void @objc_autoreleasePoolPop(i8*)
-declare i8* @objc_autoreleasePoolPush()
-declare i8* @objc_retainBlock(i8*)
+declare i8* @llvm.objc.retain(i8*)
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
+declare i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8*)
+declare void @llvm.objc.release(i8*)
+declare i8* @llvm.objc.autorelease(i8*)
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
+declare void @llvm.objc.autoreleasePoolPop(i8*)
+declare i8* @llvm.objc.autoreleasePoolPush()
+declare i8* @llvm.objc.retainBlock(i8*)
 
-declare i8* @objc_retainedObject(i8*)
-declare i8* @objc_unretainedObject(i8*)
-declare i8* @objc_unretainedPointer(i8*)
+declare i8* @llvm.objc.retainedObject(i8*)
+declare i8* @llvm.objc.unretainedObject(i8*)
+declare i8* @llvm.objc.unretainedPointer(i8*)
 
 declare void @use_pointer(i8*)
 declare void @callee()
@@ -25,19 +25,19 @@
 
 declare void @llvm.dbg.value(metadata, metadata, metadata)
 
-declare i8* @objc_msgSend(i8*, i8*, ...)
+declare i8* @llvm.objc.msgSend(i8*, i8*, ...)
 
 ; Simple retain+release pair deletion, with some intervening control
 ; flow and harmless instructions.
 
 ; CHECK: define void @test0_precise(i32* %x, i1 %p) [[NUW:#[0-9]+]] {
-; CHECK: @objc_retain
-; CHECK: @objc_release
+; CHECK: @llvm.objc.retain
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test0_precise(i32* %x, i1 %p) nounwind {
 entry:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   br i1 %p, label %t, label %f
 
 t:
@@ -52,17 +52,17 @@
 
 return:
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind
+  call void @llvm.objc.release(i8* %c) nounwind
   ret void
 }
 
 ; CHECK: define void @test0_imprecise(i32* %x, i1 %p) [[NUW]] {
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test0_imprecise(i32* %x, i1 %p) nounwind {
 entry:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   br i1 %p, label %t, label %f
 
 t:
@@ -77,23 +77,23 @@
 
 return:
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %c) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ; Like test0 but the release isn't always executed when the retain is,
 ; so the optimization is not safe.
 
-; TODO: Make the objc_release's argument be %0.
+; TODO: Make the llvm.objc.release's argument be %0.
 
 ; CHECK: define void @test1_precise(i32* %x, i1 %p, i1 %q) [[NUW]] {
-; CHECK: @objc_retain(i8* %a)
-; CHECK: @objc_release
+; CHECK: @llvm.objc.retain(i8* %a)
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test1_precise(i32* %x, i1 %p, i1 %q) nounwind {
 entry:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   br i1 %p, label %t, label %f
 
 t:
@@ -109,7 +109,7 @@
 
 return:
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind
+  call void @llvm.objc.release(i8* %c) nounwind
   ret void
 
 alt_return:
@@ -117,13 +117,13 @@
 }
 
 ; CHECK: define void @test1_imprecise(i32* %x, i1 %p, i1 %q) [[NUW]] {
-; CHECK: @objc_retain(i8* %a)
-; CHECK: @objc_release
+; CHECK: @llvm.objc.retain(i8* %a)
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test1_imprecise(i32* %x, i1 %p, i1 %q) nounwind {
 entry:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   br i1 %p, label %t, label %f
 
 t:
@@ -139,7 +139,7 @@
 
 return:
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %c) nounwind, !clang.imprecise_release !0
   ret void
 
 alt_return:
@@ -151,15 +151,15 @@
 
 ; CHECK: define void @test1b_precise(i8* %x, i1 %p, i1 %q) {
 ; CHECK: entry:
-; CHECK:   tail call i8* @objc_retain(i8* %x) [[NUW]]
-; CHECK-NOT: @objc_
+; CHECK:   tail call i8* @llvm.objc.retain(i8* %x) [[NUW]]
+; CHECK-NOT: @llvm.objc.
 ; CHECK: if.end5:
-; CHECK:   tail call void @objc_release(i8* %x) [[NUW]]
-; CHECK-NOT: @objc_
+; CHECK:   tail call void @llvm.objc.release(i8* %x) [[NUW]]
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test1b_precise(i8* %x, i1 %p, i1 %q) {
 entry:
-  tail call i8* @objc_retain(i8* %x) nounwind
+  tail call i8* @llvm.objc.retain(i8* %x) nounwind
   br i1 %p, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
@@ -174,21 +174,21 @@
   br label %if.end5
 
 if.end5:                                          ; preds = %if.then3, %if.end
-  tail call void @objc_release(i8* %x) nounwind
+  tail call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
 ; CHECK-LABEL: define void @test1b_imprecise(
 ; CHECK: entry:
-; CHECK:   tail call i8* @objc_retain(i8* %x) [[NUW:#[0-9]+]]
-; CHECK-NOT: @objc_
+; CHECK:   tail call i8* @llvm.objc.retain(i8* %x) [[NUW:#[0-9]+]]
+; CHECK-NOT: @llvm.objc.
 ; CHECK: if.end5:
-; CHECK:   tail call void @objc_release(i8* %x) [[NUW]], !clang.imprecise_release ![[RELEASE:[0-9]+]]
-; CHECK-NOT: @objc_
+; CHECK:   tail call void @llvm.objc.release(i8* %x) [[NUW]], !clang.imprecise_release ![[RELEASE:[0-9]+]]
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test1b_imprecise(i8* %x, i1 %p, i1 %q) {
 entry:
-  tail call i8* @objc_retain(i8* %x) nounwind
+  tail call i8* @llvm.objc.retain(i8* %x) nounwind
   br i1 %p, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
@@ -203,7 +203,7 @@
   br label %if.end5
 
 if.end5:                                          ; preds = %if.then3, %if.end
-  tail call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %x) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -212,13 +212,13 @@
 ; so the optimization is not safe.
 
 ; CHECK-LABEL: define void @test2_precise(
-; CHECK: @objc_retain(i8* %a)
-; CHECK: @objc_release
+; CHECK: @llvm.objc.retain(i8* %a)
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test2_precise(i32* %x, i1 %p) nounwind {
 entry:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   br i1 %p, label %t, label %f
 
 t:
@@ -236,18 +236,18 @@
 
 return:
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind
+  call void @llvm.objc.release(i8* %c) nounwind
   ret void
 }
 
 ; CHECK-LABEL: define void @test2_imprecise(
-; CHECK: @objc_retain(i8* %a)
-; CHECK: @objc_release
+; CHECK: @llvm.objc.retain(i8* %a)
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test2_imprecise(i32* %x, i1 %p) nounwind {
 entry:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   br i1 %p, label %t, label %f
 
 t:
@@ -265,7 +265,7 @@
 
 return:
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %c) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -275,18 +275,18 @@
 ; TODO: For now, assume this can't happen.
 
 ; CHECK-LABEL: define void @test3_precise(
-; TODO: @objc_retain(i8* %a)
-; TODO: @objc_release
+; TODO: @llvm.objc.retain(i8* %a)
+; TODO: @llvm.objc.release
 ; CHECK: }
 define void @test3_precise(i32* %x, i1* %q) nounwind {
 entry:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   br label %loop
 
 loop:
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind
+  call void @llvm.objc.release(i8* %c) nounwind
   %j = load volatile i1, i1* %q
   br i1 %j, label %loop, label %return
 
@@ -295,18 +295,18 @@
 }
 
 ; CHECK-LABEL: define void @test3_imprecise(
-; TODO: @objc_retain(i8* %a)
-; TODO: @objc_release
+; TODO: @llvm.objc.retain(i8* %a)
+; TODO: @llvm.objc.release
 ; CHECK: }
 define void @test3_imprecise(i32* %x, i1* %q) nounwind {
 entry:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   br label %loop
 
 loop:
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %c) nounwind, !clang.imprecise_release !0
   %j = load volatile i1, i1* %q
   br i1 %j, label %loop, label %return
 
@@ -321,8 +321,8 @@
 ; so the optimization is not safe.
 
 ; CHECK-LABEL: define void @test4_precise(
-; TODO: @objc_retain(i8* %a)
-; TODO: @objc_release
+; TODO: @llvm.objc.retain(i8* %a)
+; TODO: @llvm.objc.release
 ; CHECK: }
 define void @test4_precise(i32* %x, i1* %q) nounwind {
 entry:
@@ -330,19 +330,19 @@
 
 loop:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   %j = load volatile i1, i1* %q
   br i1 %j, label %loop, label %return
 
 return:
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind
+  call void @llvm.objc.release(i8* %c) nounwind
   ret void
 }
 
 ; CHECK-LABEL: define void @test4_imprecise(
-; TODO: @objc_retain(i8* %a)
-; TODO: @objc_release
+; TODO: @llvm.objc.retain(i8* %a)
+; TODO: @llvm.objc.release
 ; CHECK: }
 define void @test4_imprecise(i32* %x, i1* %q) nounwind {
 entry:
@@ -350,13 +350,13 @@
 
 loop:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   %j = load volatile i1, i1* %q
   br i1 %j, label %loop, label %return
 
 return:
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %c) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -365,34 +365,34 @@
 ; so the optimization is not safe.
 
 ; CHECK-LABEL: define void @test5a(
-; CHECK: @objc_retain(i8*
-; CHECK: @objc_release
+; CHECK: @llvm.objc.retain(i8*
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test5a(i32* %x, i1 %q, i8* %y) nounwind {
 entry:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   %s = select i1 %q, i8* %y, i8* %0
   call void @use_pointer(i8* %s)
   store i32 7, i32* %x
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind
+  call void @llvm.objc.release(i8* %c) nounwind
   ret void
 }
 
 ; CHECK-LABEL: define void @test5b(
-; CHECK: @objc_retain(i8*
-; CHECK: @objc_release
+; CHECK: @llvm.objc.retain(i8*
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test5b(i32* %x, i1 %q, i8* %y) nounwind {
 entry:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   %s = select i1 %q, i8* %y, i8* %0
   call void @use_pointer(i8* %s)
   store i32 7, i32* %x
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %c) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -402,17 +402,17 @@
 
 ; CHECK-LABEL: define void @test6a(
 ; CHECK: entry:
-; CHECK:   tail call i8* @objc_retain(
+; CHECK:   tail call i8* @llvm.objc.retain
 ; CHECK: t:
-; CHECK:   call void @objc_release(
+; CHECK:   call void @llvm.objc.release
 ; CHECK: f:
-; CHECK:   call void @objc_release(
+; CHECK:   call void @llvm.objc.release
 ; CHECK: return:
 ; CHECK: }
 define void @test6a(i32* %x, i1 %p) nounwind {
 entry:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   br i1 %p, label %t, label %f
 
 t:
@@ -420,14 +420,14 @@
   %b = bitcast i32* %x to float*
   store float 2.0, float* %b
   %ct = bitcast i32* %x to i8*
-  call void @objc_release(i8* %ct) nounwind
+  call void @llvm.objc.release(i8* %ct) nounwind
   br label %return
 
 f:
   store i32 7, i32* %x
   call void @callee()
   %cf = bitcast i32* %x to i8*
-  call void @objc_release(i8* %cf) nounwind
+  call void @llvm.objc.release(i8* %cf) nounwind
   br label %return
 
 return:
@@ -435,12 +435,12 @@
 }
 
 ; CHECK-LABEL: define void @test6b(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test6b(i32* %x, i1 %p) nounwind {
 entry:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   br i1 %p, label %t, label %f
 
 t:
@@ -448,14 +448,14 @@
   %b = bitcast i32* %x to float*
   store float 2.0, float* %b
   %ct = bitcast i32* %x to i8*
-  call void @objc_release(i8* %ct) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %ct) nounwind, !clang.imprecise_release !0
   br label %return
 
 f:
   store i32 7, i32* %x
   call void @callee()
   %cf = bitcast i32* %x to i8*
-  call void @objc_release(i8* %cf) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %cf) nounwind, !clang.imprecise_release !0
   br label %return
 
 return:
@@ -464,17 +464,17 @@
 
 ; CHECK-LABEL: define void @test6c(
 ; CHECK: entry:
-; CHECK:   tail call i8* @objc_retain(
+; CHECK:   tail call i8* @llvm.objc.retain
 ; CHECK: t:
-; CHECK:   call void @objc_release(
+; CHECK:   call void @llvm.objc.release
 ; CHECK: f:
-; CHECK:   call void @objc_release(
+; CHECK:   call void @llvm.objc.release
 ; CHECK: return:
 ; CHECK: }
 define void @test6c(i32* %x, i1 %p) nounwind {
 entry:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   br i1 %p, label %t, label %f
 
 t:
@@ -482,14 +482,14 @@
   %b = bitcast i32* %x to float*
   store float 2.0, float* %b
   %ct = bitcast i32* %x to i8*
-  call void @objc_release(i8* %ct) nounwind
+  call void @llvm.objc.release(i8* %ct) nounwind
   br label %return
 
 f:
   store i32 7, i32* %x
   call void @callee()
   %cf = bitcast i32* %x to i8*
-  call void @objc_release(i8* %cf) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %cf) nounwind, !clang.imprecise_release !0
   br label %return
 
 return:
@@ -498,17 +498,17 @@
 
 ; CHECK-LABEL: define void @test6d(
 ; CHECK: entry:
-; CHECK:   tail call i8* @objc_retain(
+; CHECK:   tail call i8* @llvm.objc.retain
 ; CHECK: t:
-; CHECK:   call void @objc_release(
+; CHECK:   call void @llvm.objc.release
 ; CHECK: f:
-; CHECK:   call void @objc_release(
+; CHECK:   call void @llvm.objc.release
 ; CHECK: return:
 ; CHECK: }
 define void @test6d(i32* %x, i1 %p) nounwind {
 entry:
   %a = bitcast i32* %x to i8*
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   br i1 %p, label %t, label %f
 
 t:
@@ -516,14 +516,14 @@
   %b = bitcast i32* %x to float*
   store float 2.0, float* %b
   %ct = bitcast i32* %x to i8*
-  call void @objc_release(i8* %ct) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %ct) nounwind, !clang.imprecise_release !0
   br label %return
 
 f:
   store i32 7, i32* %x
   call void @callee()
   %cf = bitcast i32* %x to i8*
-  call void @objc_release(i8* %cf) nounwind
+  call void @llvm.objc.release(i8* %cf) nounwind
   br label %return
 
 return:
@@ -536,13 +536,13 @@
 
 ; CHECK-LABEL:     define void @test7(
 ; CHECK:     entry:
-; CHECK-NOT:   objc_
+; CHECK-NOT:   llvm.objc.
 ; CHECK:     t:
-; CHECK:       call i8* @objc_retain
+; CHECK:       call i8* @llvm.objc.retain
 ; CHECK:     f:
-; CHECK:       call i8* @objc_retain
+; CHECK:       call i8* @llvm.objc.retain
 ; CHECK:     return:
-; CHECK:       call void @objc_release
+; CHECK:       call void @llvm.objc.release
 ; CHECK: }
 define void @test7(i32* %x, i1 %p) nounwind {
 entry:
@@ -550,26 +550,26 @@
   br i1 %p, label %t, label %f
 
 t:
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   store i8 3, i8* %a
   %b = bitcast i32* %x to float*
   store float 2.0, float* %b
   br label %return
 
 f:
-  %1 = call i8* @objc_retain(i8* %a) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %a) nounwind
   store i32 7, i32* %x
   call void @callee()
   br label %return
 
 return:
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind
+  call void @llvm.objc.release(i8* %c) nounwind
   ret void
 }
 
 ; CHECK-LABEL: define void @test7b(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test7b(i32* %x, i1 %p) nounwind {
 entry:
@@ -577,21 +577,21 @@
   br i1 %p, label %t, label %f
 
 t:
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   store i8 3, i8* %a
   %b = bitcast i32* %x to float*
   store float 2.0, float* %b
   br label %return
 
 f:
-  %1 = call i8* @objc_retain(i8* %a) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %a) nounwind
   store i32 7, i32* %x
   call void @callee()
   br label %return
 
 return:
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %c) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -599,11 +599,11 @@
 
 ; CHECK-LABEL: define void @test7c(
 ; CHECK: t:
-; CHECK:   call i8* @objc_retainBlock
+; CHECK:   call i8* @llvm.objc.retainBlock
 ; CHECK: f:
-; CHECK:   call i8* @objc_retain
+; CHECK:   call i8* @llvm.objc.retain
 ; CHECK: return:
-; CHECK:   call void @objc_release
+; CHECK:   call void @llvm.objc.release
 ; CHECK: }
 define void @test7c(i32* %x, i1 %p) nounwind {
 entry:
@@ -611,21 +611,21 @@
   br i1 %p, label %t, label %f
 
 t:
-  %0 = call i8* @objc_retainBlock(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retainBlock(i8* %a) nounwind
   store i8 3, i8* %a
   %b = bitcast i32* %x to float*
   store float 2.0, float* %b
   br label %return
 
 f:
-  %1 = call i8* @objc_retain(i8* %a) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %a) nounwind
   store i32 7, i32* %x
   call void @callee()
   br label %return
 
 return:
   %c = bitcast i32* %x to i8*
-  call void @objc_release(i8* %c) nounwind
+  call void @llvm.objc.release(i8* %c) nounwind
   ret void
 }
 
@@ -635,14 +635,14 @@
 ; CHECK-LABEL: define void @test8a(
 ; CHECK: entry:
 ; CHECK: t:
-; CHECK:   @objc_retain
+; CHECK:   @llvm.objc.retain
 ; CHECK: f:
-; CHECK:   @objc_retain
+; CHECK:   @llvm.objc.retain
 ; CHECK: mid:
 ; CHECK: u:
-; CHECK:   @objc_release
+; CHECK:   @llvm.objc.release
 ; CHECK: g:
-; CHECK:   @objc_release
+; CHECK:   @llvm.objc.release
 ; CHECK: return:
 ; CHECK: }
 define void @test8a(i32* %x, i1 %p, i1 %q) nounwind {
@@ -651,14 +651,14 @@
   br i1 %p, label %t, label %f
 
 t:
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   store i8 3, i8* %a
   %b = bitcast i32* %x to float*
   store float 2.0, float* %b
   br label %mid
 
 f:
-  %1 = call i8* @objc_retain(i8* %a) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %a) nounwind
   store i32 7, i32* %x
   br label %mid
 
@@ -668,12 +668,12 @@
 u:
   call void @callee()
   %cu = bitcast i32* %x to i8*
-  call void @objc_release(i8* %cu) nounwind
+  call void @llvm.objc.release(i8* %cu) nounwind
   br label %return
 
 g:
   %cg = bitcast i32* %x to i8*
-  call void @objc_release(i8* %cg) nounwind
+  call void @llvm.objc.release(i8* %cg) nounwind
   br label %return
 
 return:
@@ -681,7 +681,7 @@
 }
 
 ; CHECK-LABEL: define void @test8b(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test8b(i32* %x, i1 %p, i1 %q) nounwind {
 entry:
@@ -689,14 +689,14 @@
   br i1 %p, label %t, label %f
 
 t:
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   store i8 3, i8* %a
   %b = bitcast i32* %x to float*
   store float 2.0, float* %b
   br label %mid
 
 f:
-  %1 = call i8* @objc_retain(i8* %a) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %a) nounwind
   store i32 7, i32* %x
   br label %mid
 
@@ -706,12 +706,12 @@
 u:
   call void @callee()
   %cu = bitcast i32* %x to i8*
-  call void @objc_release(i8* %cu) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %cu) nounwind, !clang.imprecise_release !0
   br label %return
 
 g:
   %cg = bitcast i32* %x to i8*
-  call void @objc_release(i8* %cg) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %cg) nounwind, !clang.imprecise_release !0
   br label %return
 
 return:
@@ -721,14 +721,14 @@
 ; CHECK-LABEL: define void @test8c(
 ; CHECK: entry:
 ; CHECK: t:
-; CHECK:   @objc_retain
+; CHECK:   @llvm.objc.retain
 ; CHECK: f:
-; CHECK:   @objc_retain
+; CHECK:   @llvm.objc.retain
 ; CHECK: mid:
 ; CHECK: u:
-; CHECK:   @objc_release
+; CHECK:   @llvm.objc.release
 ; CHECK: g:
-; CHECK:   @objc_release
+; CHECK:   @llvm.objc.release
 ; CHECK: return:
 ; CHECK: }
 define void @test8c(i32* %x, i1 %p, i1 %q) nounwind {
@@ -737,14 +737,14 @@
   br i1 %p, label %t, label %f
 
 t:
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   store i8 3, i8* %a
   %b = bitcast i32* %x to float*
   store float 2.0, float* %b
   br label %mid
 
 f:
-  %1 = call i8* @objc_retain(i8* %a) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %a) nounwind
   store i32 7, i32* %x
   br label %mid
 
@@ -754,12 +754,12 @@
 u:
   call void @callee()
   %cu = bitcast i32* %x to i8*
-  call void @objc_release(i8* %cu) nounwind
+  call void @llvm.objc.release(i8* %cu) nounwind
   br label %return
 
 g:
   %cg = bitcast i32* %x to i8*
-  call void @objc_release(i8* %cg) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %cg) nounwind, !clang.imprecise_release !0
   br label %return
 
 return:
@@ -769,14 +769,14 @@
 ; CHECK-LABEL: define void @test8d(
 ; CHECK: entry:
 ; CHECK: t:
-; CHECK:   @objc_retain
+; CHECK:   @llvm.objc.retain
 ; CHECK: f:
-; CHECK:   @objc_retain
+; CHECK:   @llvm.objc.retain
 ; CHECK: mid:
 ; CHECK: u:
-; CHECK:   @objc_release
+; CHECK:   @llvm.objc.release
 ; CHECK: g:
-; CHECK:   @objc_release
+; CHECK:   @llvm.objc.release
 ; CHECK: return:
 ; CHECK: }
 define void @test8d(i32* %x, i1 %p, i1 %q) nounwind {
@@ -785,14 +785,14 @@
   br i1 %p, label %t, label %f
 
 t:
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   store i8 3, i8* %a
   %b = bitcast i32* %x to float*
   store float 2.0, float* %b
   br label %mid
 
 f:
-  %1 = call i8* @objc_retain(i8* %a) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %a) nounwind
   store i32 7, i32* %x
   br label %mid
 
@@ -802,12 +802,12 @@
 u:
   call void @callee()
   %cu = bitcast i32* %x to i8*
-  call void @objc_release(i8* %cu) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %cu) nounwind, !clang.imprecise_release !0
   br label %return
 
 g:
   %cg = bitcast i32* %x to i8*
-  call void @objc_release(i8* %cg) nounwind
+  call void @llvm.objc.release(i8* %cg) nounwind
   br label %return
 
 return:
@@ -817,58 +817,58 @@
 ; Trivial retain+release pair deletion.
 
 ; CHECK-LABEL: define void @test9(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test9(i8* %x) nounwind {
 entry:
-  %0 = call i8* @objc_retain(i8* %x) nounwind
-  call void @objc_release(i8* %0) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %0) nounwind
   ret void
 }
 
 ; Retain+release pair, but on an unknown pointer relationship. Don't delete!
 
 ; CHECK-LABEL: define void @test9b(
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_release(i8* %s)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.release(i8* %s)
 ; CHECK: }
 define void @test9b(i8* %x, i1 %j, i8* %p) nounwind {
 entry:
-  %0 = call i8* @objc_retain(i8* %x) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %x) nounwind
   %s = select i1 %j, i8* %x, i8* %p
-  call void @objc_release(i8* %s) nounwind
+  call void @llvm.objc.release(i8* %s) nounwind
   ret void
 }
 
 ; Trivial retain+release pair with intervening calls - don't delete!
 
 ; CHECK-LABEL: define void @test10(
-; CHECK: @objc_retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
 ; CHECK: @callee
 ; CHECK: @use_pointer
-; CHECK: @objc_release
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test10(i8* %x) nounwind {
 entry:
-  %0 = call i8* @objc_retain(i8* %x) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @callee()
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %0) nounwind
+  call void @llvm.objc.release(i8* %0) nounwind
   ret void
 }
 
 ; Trivial retain+autoreleaserelease pair. Don't delete!
-; Also, add a tail keyword, since objc_retain can never be passed
+; Also, add a tail keyword, since llvm.objc.retain can never be passed
 ; a stack argument.
 
 ; CHECK-LABEL: define void @test11(
-; CHECK: tail call i8* @objc_retain(i8* %x) [[NUW]]
-; CHECK: call i8* @objc_autorelease(i8* %0) [[NUW]]
+; CHECK: tail call i8* @llvm.objc.retain(i8* %x) [[NUW]]
+; CHECK: call i8* @llvm.objc.autorelease(i8* %0) [[NUW]]
 ; CHECK: }
 define void @test11(i8* %x) nounwind {
 entry:
-  %0 = call i8* @objc_retain(i8* %x) nounwind
-  call i8* @objc_autorelease(i8* %0) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %x) nounwind
+  call i8* @llvm.objc.autorelease(i8* %0) nounwind
   call void @use_pointer(i8* %x)
   ret void
 }
@@ -881,8 +881,8 @@
 ; CHECK: }
 define void @test11a(i8* %x) nounwind {
 entry:
-  %0 = call i8* @objc_retain(i8* %x) nounwind
-  call i8* @objc_autorelease(i8* %0) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %x) nounwind
+  call i8* @llvm.objc.autorelease(i8* %0) nounwind
   ret void
 }
 
@@ -891,13 +891,13 @@
 ; want it to be in the autorelease pool.
 
 ; CHECK-LABEL: define i8* @test11b(
-; CHECK: tail call i8* @objc_retain(i8* %x) [[NUW]]
-; CHECK: call i8* @objc_autorelease(i8* %0) [[NUW]]
+; CHECK: tail call i8* @llvm.objc.retain(i8* %x) [[NUW]]
+; CHECK: call i8* @llvm.objc.autorelease(i8* %0) [[NUW]]
 ; CHECK: }
 define i8* @test11b(i8* %x) nounwind {
 entry:
-  %0 = call i8* @objc_retain(i8* %x) nounwind
-  call i8* @objc_autorelease(i8* %0) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %x) nounwind
+  call i8* @llvm.objc.autorelease(i8* %0) nounwind
   ret i8* %x
 }
 
@@ -906,34 +906,34 @@
 
 ; CHECK-LABEL: define void @test12(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT: @objc_retain(i8* %x)
-; CHECK-NEXT: @objc_retain
-; CHECK: @objc_release
+; CHECK-NEXT: @llvm.objc.retain(i8* %x)
+; CHECK-NEXT: @llvm.objc.retain
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test12(i8* %x, i64 %n) {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %x)
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
 ; Trivial retain,autorelease pair. Don't delete!
 
 ; CHECK-LABEL: define void @test13(
-; CHECK: tail call i8* @objc_retain(i8* %x) [[NUW]]
-; CHECK: tail call i8* @objc_retain(i8* %x) [[NUW]]
+; CHECK: tail call i8* @llvm.objc.retain(i8* %x) [[NUW]]
+; CHECK: tail call i8* @llvm.objc.retain(i8* %x) [[NUW]]
 ; CHECK: @use_pointer(i8* %x)
-; CHECK: call i8* @objc_autorelease(i8* %x) [[NUW]]
+; CHECK: call i8* @llvm.objc.autorelease(i8* %x) [[NUW]]
 ; CHECK: }
 define void @test13(i8* %x, i64 %n) {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %x)
-  call i8* @objc_autorelease(i8* %x) nounwind
+  call i8* @llvm.objc.autorelease(i8* %x) nounwind
   ret void
 }
 
@@ -941,22 +941,22 @@
 
 ; CHECK-LABEL: define void @test13b(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT: @objc_retain(i8* %x)
+; CHECK-NEXT: @llvm.objc.retain(i8* %x)
 ; CHECK-NEXT: @use_pointer
 ; CHECK-NEXT: @use_pointer
 ; CHECK-NEXT: @use_pointer
-; CHECK-NEXT: @objc_release
+; CHECK-NEXT: @llvm.objc.release
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test13b(i8* %x, i64 %n) {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %x)
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
@@ -964,20 +964,20 @@
 ; autoreleasePoolPop in the way.
 
 ; CHECK-LABEL: define void @test13c(
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc_autoreleasePoolPop
-; CHECK: @objc_retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc.autoreleasePoolPop
+; CHECK: @llvm.objc.retain(i8* %x)
 ; CHECK: @use_pointer
-; CHECK: @objc_release
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test13c(i8* %x, i64 %n) {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
-  call void @objc_autoreleasePoolPop(i8* undef)
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
+  call void @llvm.objc.autoreleasePoolPop(i8* undef)
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %x)
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
@@ -986,24 +986,24 @@
 
 ; CHECK-LABEL: define void @test13d(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT: @objc_retain(i8* %x)
-; CHECK-NEXT: @objc_autoreleasePoolPush
+; CHECK-NEXT: @llvm.objc.retain(i8* %x)
+; CHECK-NEXT: @llvm.objc.autoreleasePoolPush
 ; CHECK-NEXT: @use_pointer
 ; CHECK-NEXT: @use_pointer
 ; CHECK-NEXT: @use_pointer
-; CHECK-NEXT: @objc_release
+; CHECK-NEXT: @llvm.objc.release
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test13d(i8* %x, i64 %n) {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
-  call i8* @objc_autoreleasePoolPush()
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
+  call i8* @llvm.objc.autoreleasePoolPush()
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %x)
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
@@ -1013,20 +1013,20 @@
 
 ; CHECK-LABEL: define void @test14(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT: @objc_retain
+; CHECK-NEXT: @llvm.objc.retain
 ; CHECK-NEXT: @use_pointer
 ; CHECK-NEXT: @use_pointer
-; CHECK-NEXT: @objc_release
-; CHECK-NEXT: @objc_release
+; CHECK-NEXT: @llvm.objc.release
+; CHECK-NEXT: @llvm.objc.release
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test14(i8* %x, i64 %n) {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %x)
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x) nounwind
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
@@ -1035,18 +1035,18 @@
 
 ; CHECK-LABEL: define void @test15(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT: @objc_retain(i8* %x)
+; CHECK-NEXT: @llvm.objc.retain(i8* %x)
 ; CHECK-NEXT: @use_pointer
-; CHECK-NEXT: @objc_autorelease(i8* %x)
-; CHECK-NEXT: @objc_release
+; CHECK-NEXT: @llvm.objc.autorelease(i8* %x)
+; CHECK-NEXT: @llvm.objc.release
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test15(i8* %x, i64 %n) {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %x)
-  call i8* @objc_autorelease(i8* %x) nounwind
-  call void @objc_release(i8* %x) nounwind
+  call i8* @llvm.objc.autorelease(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
@@ -1055,52 +1055,52 @@
 
 ; CHECK-LABEL: define void @test15b(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT: @objc_retain
-; CHECK-NEXT: @objc_autorelease
-; CHECK-NEXT: @objc_release
+; CHECK-NEXT: @llvm.objc.retain
+; CHECK-NEXT: @llvm.objc.autorelease
+; CHECK-NEXT: @llvm.objc.release
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test15b(i8* %x, i64 %n) {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
-  call i8* @objc_autorelease(i8* %x) nounwind
-  call void @objc_release(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
+  call i8* @llvm.objc.autorelease(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
 ; CHECK-LABEL: define void @test15c(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT: @objc_autorelease
+; CHECK-NEXT: @llvm.objc.autorelease
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test15c(i8* %x, i64 %n) {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
-  call i8* @objc_autorelease(i8* %x) nounwind
-  call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+  call i8* @llvm.objc.retain(i8* %x) nounwind
+  call i8* @llvm.objc.autorelease(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ; Retain+release pairs in diamonds, all dominated by a retain.
 
 ; CHECK-LABEL: define void @test16a(
-; CHECK: @objc_retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
 ; CHECK-NOT: @objc
 ; CHECK: purple:
 ; CHECK: @use_pointer
-; CHECK: @objc_release
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test16a(i1 %a, i1 %b, i8* %x) {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   br i1 %a, label %red, label %orange
 
 red:
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   br label %yellow
 
 orange:
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   br label %yellow
 
 yellow:
@@ -1109,38 +1109,38 @@
   br i1 %b, label %green, label %blue
 
 green:
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   br label %purple
 
 blue:
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   br label %purple
 
 purple:
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
 ; CHECK-LABEL: define void @test16b(
-; CHECK: @objc_retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
 ; CHECK-NOT: @objc
 ; CHECK: purple:
 ; CHECK-NEXT: @use_pointer
 ; CHECK-NEXT: @use_pointer
-; CHECK-NEXT: @objc_release
+; CHECK-NEXT: @llvm.objc.release
 ; CHECK: }
 define void @test16b(i1 %a, i1 %b, i8* %x) {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   br i1 %a, label %red, label %orange
 
 red:
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   br label %yellow
 
 orange:
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   br label %yellow
 
 yellow:
@@ -1149,38 +1149,38 @@
   br i1 %b, label %green, label %blue
 
 green:
-  call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x) nounwind, !clang.imprecise_release !0
   br label %purple
 
 blue:
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   br label %purple
 
 purple:
   call void @use_pointer(i8* %x)
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
 ; CHECK-LABEL: define void @test16c(
-; CHECK: @objc_retain(i8* %x)
+; CHECK: @llvm.objc.retain(i8* %x)
 ; CHECK-NOT: @objc
 ; CHECK: purple:
 ; CHECK: @use_pointer
-; CHECK: @objc_release
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test16c(i1 %a, i1 %b, i8* %x) {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   br i1 %a, label %red, label %orange
 
 red:
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   br label %yellow
 
 orange:
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   br label %yellow
 
 yellow:
@@ -1189,34 +1189,34 @@
   br i1 %b, label %green, label %blue
 
 green:
-  call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x) nounwind, !clang.imprecise_release !0
   br label %purple
 
 blue:
-  call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x) nounwind, !clang.imprecise_release !0
   br label %purple
 
 purple:
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ; CHECK-LABEL: define void @test16d(
-; CHECK: @objc_retain(i8* %x)
-; CHECK: @objc
+; CHECK: @llvm.objc.retain(i8* %x)
+; CHECK: @llvm.objc
 ; CHECK: }
 define void @test16d(i1 %a, i1 %b, i8* %x) {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   br i1 %a, label %red, label %orange
 
 red:
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   br label %yellow
 
 orange:
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   br label %yellow
 
 yellow:
@@ -1225,11 +1225,11 @@
   br i1 %b, label %green, label %blue
 
 green:
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   br label %purple
 
 blue:
-  call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x) nounwind, !clang.imprecise_release !0
   br label %purple
 
 purple:
@@ -1239,24 +1239,24 @@
 ; Delete no-ops.
 
 ; CHECK-LABEL: define void @test18(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test18() {
-  call i8* @objc_retain(i8* null)
-  call void @objc_release(i8* null)
-  call i8* @objc_autorelease(i8* null)
+  call i8* @llvm.objc.retain(i8* null)
+  call void @llvm.objc.release(i8* null)
+  call i8* @llvm.objc.autorelease(i8* null)
   ret void
 }
 
 ; Delete no-ops where undef can be assumed to be null.
 
 ; CHECK-LABEL: define void @test18b(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test18b() {
-  call i8* @objc_retain(i8* undef)
-  call void @objc_release(i8* undef)
-  call i8* @objc_autorelease(i8* undef)
+  call i8* @llvm.objc.retain(i8* undef)
+  call void @llvm.objc.release(i8* undef)
+  call i8* @llvm.objc.autorelease(i8* undef)
   ret void
 }
 
@@ -1266,34 +1266,34 @@
 ; CHECK: define void @test19(i32* %y) {
 ; CHECK:   %z = bitcast i32* %y to i8*
 ; CHECK:   %0 = bitcast i32* %y to i8*
-; CHECK:   %1 = tail call i8* @objc_retain(i8* %0)
+; CHECK:   %1 = tail call i8* @llvm.objc.retain(i8* %0)
 ; CHECK:   call void @use_pointer(i8* %z)
 ; CHECK:   call void @use_pointer(i8* %z)
 ; CHECK:   %2 = bitcast i32* %y to i8*
-; CHECK:   call void @objc_release(i8* %2)
+; CHECK:   call void @llvm.objc.release(i8* %2)
 ; CHECK:   ret void
 ; CHECK: }
 define void @test19(i32* %y) {
 entry:
   %x = bitcast i32* %y to i8*
-  %0 = call i8* @objc_retain(i8* %x) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %x) nounwind
   %z = bitcast i32* %y to i8*
   call void @use_pointer(i8* %z)
   call void @use_pointer(i8* %z)
-  call void @objc_release(i8* %x)
+  call void @llvm.objc.release(i8* %x)
   ret void
 }
 
 ; Bitcast insertion
 
 ; CHECK-LABEL: define void @test20(
-; CHECK: %tmp1 = tail call i8* @objc_retain(i8* %tmp) [[NUW]]
+; CHECK: %tmp1 = tail call i8* @llvm.objc.retain(i8* %tmp) [[NUW]]
 ; CHECK-NEXT: invoke
 ; CHECK: }
 define void @test20(double* %self) personality i32 (...)* @__gxx_personality_v0 {
 if.then12:
   %tmp = bitcast double* %self to i8*
-  %tmp1 = call i8* @objc_retain(i8* %tmp) nounwind
+  %tmp1 = call i8* @llvm.objc.retain(i8* %tmp) nounwind
   invoke void @invokee()
           to label %invoke.cont23 unwind label %lpad20
 
@@ -1321,8 +1321,8 @@
 define i8* @test21() {
 entry:
   %call = call i8* @returner()
-  %0 = call i8* @objc_retain(i8* %call) nounwind
-  %1 = call i8* @objc_autorelease(i8* %0) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %call) nounwind
+  %1 = call i8* @llvm.objc.autorelease(i8* %0) nounwind
   ret i8* %1
 }
 
@@ -1331,10 +1331,10 @@
 ; CHECK-LABEL: define void @test22(
 ; CHECK: B:
 ; CHECK:   %1 = bitcast double* %p to i8*
-; CHECK:   call void @objc_release(i8* %1)
+; CHECK:   call void @llvm.objc.release(i8* %1)
 ; CHECK:   br label %C
 ; CHECK: C:                                                ; preds = %B, %A
-; CHECK-NOT: @objc_release
+; CHECK-NOT: @llvm.objc.release
 ; CHECK: }
 define void @test22(double* %p, i1 %a) {
   br i1 %a, label %A, label %B
@@ -1345,16 +1345,16 @@
 C:
   %h = phi double* [ null, %A ], [ %p, %B ]
   %c = bitcast double* %h to i8*
-  call void @objc_release(i8* %c), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %c), !clang.imprecise_release !0
   ret void
 }
 
-; Do not move an objc_release that doesn't have the clang.imprecise_release tag.
+; Do not move an llvm.objc.release that doesn't have the clang.imprecise_release tag.
 
 ; CHECK-LABEL: define void @test22_precise(
 ; CHECK: %[[P0:.*]] = phi double*
 ; CHECK: %[[V0:.*]] = bitcast double* %[[P0]] to i8*
-; CHECK: call void @objc_release(i8* %[[V0]])
+; CHECK: call void @llvm.objc.release(i8* %[[V0]])
 ; CHECK: ret void
 define void @test22_precise(double* %p, i1 %a) {
   br i1 %a, label %A, label %B
@@ -1365,21 +1365,21 @@
 C:
   %h = phi double* [ null, %A ], [ %p, %B ]
   %c = bitcast double* %h to i8*
-  call void @objc_release(i8* %c)
+  call void @llvm.objc.release(i8* %c)
   ret void
 }
 
 ; Any call can decrement a retain count.
 
 ; CHECK-LABEL: define void @test24(
-; CHECK: @objc_retain(i8* %a)
-; CHECK: @objc_release
+; CHECK: @llvm.objc.retain(i8* %a)
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test24(i8* %r, i8* %a) {
-  call i8* @objc_retain(i8* %a)
+  call i8* @llvm.objc.retain(i8* %a)
   call void @use_pointer(i8* %r)
   %q = load i8, i8* %a
-  call void @objc_release(i8* %a)
+  call void @llvm.objc.release(i8* %a)
   ret void
 }
 
@@ -1388,14 +1388,14 @@
 
 ; CHECK-LABEL: define void @test25(
 ; CHECK: entry:
-; CHECK:   call i8* @objc_retain(i8* %p)
+; CHECK:   call i8* @llvm.objc.retain(i8* %p)
 ; CHECK: true:
 ; CHECK: done:
-; CHECK:   call void @objc_release(i8* %p)
+; CHECK:   call void @llvm.objc.release(i8* %p)
 ; CHECK: }
 define void @test25(i8* %p, i1 %x) {
 entry:
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   call void @callee()
   br i1 %x, label %true, label %done
 
@@ -1404,7 +1404,7 @@
   br label %done
 
 done:
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
@@ -1413,14 +1413,14 @@
 
 ; CHECK-LABEL: define void @test26(
 ; CHECK: entry:
-; CHECK:   call i8* @objc_retain(i8* %p)
+; CHECK:   call i8* @llvm.objc.retain(i8* %p)
 ; CHECK: true:
 ; CHECK: done:
-; CHECK:   call void @objc_release(i8* %p)
+; CHECK:   call void @llvm.objc.release(i8* %p)
 ; CHECK: }
 define void @test26(i8* %p, i1 %x) {
 entry:
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   br i1 %x, label %true, label %done
 
 true:
@@ -1429,7 +1429,7 @@
 
 done:
   store i8 0, i8* %p
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
@@ -1437,15 +1437,15 @@
 
 ; CHECK-LABEL: define void @test27(
 ; CHECK: entry:
-; CHECK: call i8* @objc_retain(i8* %p)
+; CHECK: call i8* @llvm.objc.retain(i8* %p)
 ; CHECK: loop:
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: done:
-; CHECK: call void @objc_release
+; CHECK: call void @llvm.objc.release
 ; CHECK: }
 define void @test27(i8* %p, i1 %x, i1 %y) {
 entry: 
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   br i1 %x, label %loop, label %done
 
 loop:
@@ -1454,25 +1454,25 @@
   br i1 %y, label %done, label %loop
   
 done: 
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
 ; Trivial code motion case: Triangle.
 
 ; CHECK-LABEL: define void @test28(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: true:
-; CHECK: call i8* @objc_retain(
+; CHECK: call i8* @llvm.objc.retain
 ; CHECK: call void @callee()
 ; CHECK: store
-; CHECK: call void @objc_release
+; CHECK: call void @llvm.objc.release
 ; CHECK: done:
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test28(i8* %p, i1 %x) {
 entry:
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   br i1 %x, label %true, label %done
 
 true:
@@ -1481,7 +1481,7 @@
   br label %done
 
 done:
-  call void @objc_release(i8* %p), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %p), !clang.imprecise_release !0
   ret void
 }
 
@@ -1489,19 +1489,19 @@
 ; unrelated memory references!
 
 ; CHECK-LABEL: define void @test28b(
-; CHECK: call i8* @objc_retain(
+; CHECK: call i8* @llvm.objc.retain
 ; CHECK: true:
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: call void @callee()
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: store
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: done:
-; CHECK: @objc_release
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test28b(i8* %p, i1 %x, i8* noalias %t) {
 entry:
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   br i1 %x, label %true, label %done
 
 true:
@@ -1511,7 +1511,7 @@
 
 done:
   store i8 0, i8* %t
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
@@ -1519,18 +1519,18 @@
 ; unrelated memory references! And preserve the metadata.
 
 ; CHECK-LABEL: define void @test28c(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: true:
-; CHECK: call i8* @objc_retain(
+; CHECK: call i8* @llvm.objc.retain
 ; CHECK: call void @callee()
 ; CHECK: store
-; CHECK: call void @objc_release(i8* %p) [[NUW]], !clang.imprecise_release
+; CHECK: call void @llvm.objc.release(i8* %p) [[NUW]], !clang.imprecise_release
 ; CHECK: done:
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test28c(i8* %p, i1 %x, i8* noalias %t) {
 entry:
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   br i1 %x, label %true, label %done
 
 true:
@@ -1540,28 +1540,28 @@
 
 done:
   store i8 0, i8* %t
-  call void @objc_release(i8* %p), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %p), !clang.imprecise_release !0
   ret void
 }
 
 ; Like test28. but with two releases.
 
 ; CHECK-LABEL: define void @test29(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: true:
-; CHECK: call i8* @objc_retain(
+; CHECK: call i8* @llvm.objc.retain
 ; CHECK: call void @callee()
 ; CHECK: store
-; CHECK: call void @objc_release
-; CHECK-NOT: @objc_release
+; CHECK: call void @llvm.objc.release
+; CHECK-NOT: @llvm.objc.release
 ; CHECK: done:
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: ohno:
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test29(i8* %p, i1 %x, i1 %y) {
 entry:
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   br i1 %x, label %true, label %done
 
 true:
@@ -1570,11 +1570,11 @@
   br i1 %y, label %done, label %ohno
 
 done:
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 
 ohno:
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
@@ -1582,23 +1582,23 @@
 ; with an extra release.
 
 ; CHECK-LABEL: define void @test30(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: true:
-; CHECK: call i8* @objc_retain(
+; CHECK: call i8* @llvm.objc.retain
 ; CHECK: call void @callee()
 ; CHECK: store
-; CHECK: call void @objc_release
-; CHECK-NOT: @objc_release
+; CHECK: call void @llvm.objc.release
+; CHECK-NOT: @llvm.objc.release
 ; CHECK: false:
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: done:
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: ohno:
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test30(i8* %p, i1 %x, i1 %y, i1 %z) {
 entry:
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   br i1 %x, label %true, label %false
 
 true:
@@ -1610,58 +1610,58 @@
   br i1 %z, label %done, label %ohno
 
 done:
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 
 ohno:
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
 ; Basic case with a mergeable release.
 
 ; CHECK-LABEL: define void @test31(
-; CHECK: call i8* @objc_retain(i8* %p)
+; CHECK: call i8* @llvm.objc.retain(i8* %p)
 ; CHECK: call void @callee()
 ; CHECK: store
-; CHECK: call void @objc_release
-; CHECK-NOT: @objc_release
+; CHECK: call void @llvm.objc.release
+; CHECK-NOT: @llvm.objc.release
 ; CHECK: true:
-; CHECK-NOT: @objc_release
+; CHECK-NOT: @llvm.objc.release
 ; CHECK: false:
-; CHECK-NOT: @objc_release
+; CHECK-NOT: @llvm.objc.release
 ; CHECK: ret void
-; CHECK-NOT: @objc_release
+; CHECK-NOT: @llvm.objc.release
 ; CHECK: }
 define void @test31(i8* %p, i1 %x) {
 entry:
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   call void @callee()
   store i8 0, i8* %p
   br i1 %x, label %true, label %false
 true:
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 false:
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
 ; Don't consider bitcasts or getelementptrs direct uses.
 
 ; CHECK-LABEL: define void @test32(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: true:
-; CHECK: call i8* @objc_retain(
+; CHECK: call i8* @llvm.objc.retain
 ; CHECK: call void @callee()
 ; CHECK: store
-; CHECK: call void @objc_release
+; CHECK: call void @llvm.objc.release
 ; CHECK: done:
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test32(i8* %p, i1 %x) {
 entry:
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   br i1 %x, label %true, label %done
 
 true:
@@ -1672,25 +1672,25 @@
 done:
   %g = bitcast i8* %p to i8*
   %h = getelementptr i8, i8* %g, i64 0
-  call void @objc_release(i8* %g)
+  call void @llvm.objc.release(i8* %g)
   ret void
 }
 
 ; Do consider icmps to be direct uses.
 
 ; CHECK-LABEL: define void @test33(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: true:
-; CHECK: call i8* @objc_retain(
+; CHECK: call i8* @llvm.objc.retain
 ; CHECK: call void @callee()
 ; CHECK: icmp
-; CHECK: call void @objc_release
+; CHECK: call void @llvm.objc.release
 ; CHECK: done:
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test33(i8* %p, i1 %x, i8* %y) {
 entry:
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   br i1 %x, label %true, label %done
 
 true:
@@ -1701,7 +1701,7 @@
 done:
   %g = bitcast i8* %p to i8*
   %h = getelementptr i8, i8* %g, i64 0
-  call void @objc_release(i8* %g)
+  call void @llvm.objc.release(i8* %g)
   ret void
 }
 
@@ -1709,14 +1709,14 @@
 ; releases.
 
 ; CHECK-LABEL: define void @test34a(
-; CHECK:   call i8* @objc_retain
+; CHECK:   call i8* @llvm.objc.retain
 ; CHECK: true:
 ; CHECK: done:
-; CHECK: call void @objc_release
+; CHECK: call void @llvm.objc.release
 ; CHECK: }
 define void @test34a(i8* %p, i1 %x, i8* %y) {
 entry:
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   br i1 %x, label %true, label %done
 
 true:
@@ -1726,16 +1726,16 @@
 done:
   %g = bitcast i8* %p to i8*
   %h = getelementptr i8, i8* %g, i64 0
-  call void @objc_release(i8* %g)
+  call void @llvm.objc.release(i8* %g)
   ret void
 }
 
 ; CHECK-LABEL: define void @test34b(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test34b(i8* %p, i1 %x, i8* %y) {
 entry:
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   br i1 %x, label %true, label %done
 
 true:
@@ -1745,7 +1745,7 @@
 done:
   %g = bitcast i8* %p to i8*
   %h = getelementptr i8, i8* %g, i64 0
-  call void @objc_release(i8* %g), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %g), !clang.imprecise_release !0
   ret void
 }
 
@@ -1756,14 +1756,14 @@
 ; Precise.
 ; CHECK-LABEL: define void @test35a(
 ; CHECK: entry:
-; CHECK:   call i8* @objc_retain
+; CHECK:   call i8* @llvm.objc.retain
 ; CHECK: true:
 ; CHECK: done:
-; CHECK:   call void @objc_release
+; CHECK:   call void @llvm.objc.release
 ; CHECK: }
 define void @test35a(i8* %p, i1 %x, i8* %y) {
 entry:
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   br i1 %x, label %true, label %done
 
 true:
@@ -1773,17 +1773,17 @@
 done:
   %g = bitcast i8* %p to i8*
   %h = getelementptr i8, i8* %g, i64 0
-  call void @objc_release(i8* %g)
+  call void @llvm.objc.release(i8* %g)
   ret void
 }
 
 ; Imprecise.
 ; CHECK-LABEL: define void @test35b(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test35b(i8* %p, i1 %x, i8* %y) {
 entry:
-  %f0 = call i8* @objc_retain(i8* %p)
+  %f0 = call i8* @llvm.objc.retain(i8* %p)
   br i1 %x, label %true, label %done
 
 true:
@@ -1793,50 +1793,50 @@
 done:
   %g = bitcast i8* %p to i8*
   %h = getelementptr i8, i8* %g, i64 0
-  call void @objc_release(i8* %g), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %g), !clang.imprecise_release !0
   ret void
 }
 
 ; Delete a retain,release if there's no actual use and we have precise release.
 
 ; CHECK-LABEL: define void @test36a(
-; CHECK: @objc_retain
+; CHECK: @llvm.objc.retain
 ; CHECK: call void @callee()
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: call void @callee()
-; CHECK: @objc_release
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test36a(i8* %p) {
 entry:
-  call i8* @objc_retain(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
   call void @callee()
   call void @callee()
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
 ; Like test36, but with metadata.
 
 ; CHECK-LABEL: define void @test36b(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test36b(i8* %p) {
 entry:
-  call i8* @objc_retain(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
   call void @callee()
   call void @callee()
-  call void @objc_release(i8* %p), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %p), !clang.imprecise_release !0
   ret void
 }
 
 ; Be aggressive about analyzing phis to eliminate possible uses.
 
 ; CHECK-LABEL: define void @test38(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test38(i8* %p, i1 %u, i1 %m, i8* %z, i8* %y, i8* %x, i8* %w) {
 entry:
-  call i8* @objc_retain(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
   br i1 %u, label %true, label %false
 true:
   br i1 %m, label %a, label %b
@@ -1859,36 +1859,36 @@
 g:
   %h = phi i8* [ %j, %e ], [ %k, %f ]
   call void @use_pointer(i8* %h)
-  call void @objc_release(i8* %p), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %p), !clang.imprecise_release !0
   ret void
 }
 
 ; Delete retain,release pairs around loops.
 
 ; CHECK-LABEL: define void @test39(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test39(i8* %p) {
 entry:
-  %0 = call i8* @objc_retain(i8* %p)
+  %0 = call i8* @llvm.objc.retain(i8* %p)
   br label %loop
 
 loop:                                             ; preds = %loop, %entry
   br i1 undef, label %loop, label %exit
 
 exit:                                             ; preds = %loop
-  call void @objc_release(i8* %0), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %0), !clang.imprecise_release !0
   ret void
 }
 
 ; Delete retain,release pairs around loops containing uses.
 
 ; CHECK-LABEL: define void @test39b(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test39b(i8* %p) {
 entry:
-  %0 = call i8* @objc_retain(i8* %p)
+  %0 = call i8* @llvm.objc.retain(i8* %p)
   br label %loop
 
 loop:                                             ; preds = %loop, %entry
@@ -1896,18 +1896,18 @@
   br i1 undef, label %loop, label %exit
 
 exit:                                             ; preds = %loop
-  call void @objc_release(i8* %0), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %0), !clang.imprecise_release !0
   ret void
 }
 
 ; Delete retain,release pairs around loops containing potential decrements.
 
 ; CHECK-LABEL: define void @test39c(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test39c(i8* %p) {
 entry:
-  %0 = call i8* @objc_retain(i8* %p)
+  %0 = call i8* @llvm.objc.retain(i8* %p)
   br label %loop
 
 loop:                                             ; preds = %loop, %entry
@@ -1915,7 +1915,7 @@
   br i1 undef, label %loop, label %exit
 
 exit:                                             ; preds = %loop
-  call void @objc_release(i8* %0), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %0), !clang.imprecise_release !0
   ret void
 }
 
@@ -1923,11 +1923,11 @@
 ; the successors are in a different order.
 
 ; CHECK-LABEL: define void @test40(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test40(i8* %p) {
 entry:
-  %0 = call i8* @objc_retain(i8* %p)
+  %0 = call i8* @llvm.objc.retain(i8* %p)
   br label %loop
 
 loop:                                             ; preds = %loop, %entry
@@ -1935,7 +1935,7 @@
   br i1 undef, label %exit, label %loop
 
 exit:                                             ; preds = %loop
-  call void @objc_release(i8* %0), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %0), !clang.imprecise_release !0
   ret void
 }
 
@@ -1944,26 +1944,26 @@
 
 ; CHECK-LABEL: define void @test42(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT: call i8* @objc_retain(i8* %p)
-; CHECK-NEXT: call i8* @objc_autorelease(i8* %p)
+; CHECK-NEXT: call i8* @llvm.objc.retain(i8* %p)
+; CHECK-NEXT: call i8* @llvm.objc.autorelease(i8* %p)
 ; CHECK-NEXT: call void @use_pointer(i8* %p)
 ; CHECK-NEXT: call void @use_pointer(i8* %p)
 ; CHECK-NEXT: call void @use_pointer(i8* %p)
 ; CHECK-NEXT: call void @use_pointer(i8* %p)
-; CHECK-NEXT: call void @objc_release(i8* %p)
+; CHECK-NEXT: call void @llvm.objc.release(i8* %p)
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test42(i8* %p) {
 entry:
-  call i8* @objc_retain(i8* %p)
-  call i8* @objc_autorelease(i8* %p)
-  call i8* @objc_retain(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
+  call i8* @llvm.objc.autorelease(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
   call void @use_pointer(i8* %p)
   call void @use_pointer(i8* %p)
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   call void @use_pointer(i8* %p)
   call void @use_pointer(i8* %p)
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
@@ -1972,24 +1972,24 @@
 
 ; CHECK-LABEL: define void @test43(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT: call i8* @objc_retain(i8* %p)
-; CHECK-NEXT: call i8* @objc_autorelease(i8* %p)
-; CHECK-NEXT: call i8* @objc_retain
+; CHECK-NEXT: call i8* @llvm.objc.retain(i8* %p)
+; CHECK-NEXT: call i8* @llvm.objc.autorelease(i8* %p)
+; CHECK-NEXT: call i8* @llvm.objc.retain
 ; CHECK-NEXT: call void @use_pointer(i8* %p)
 ; CHECK-NEXT: call void @use_pointer(i8* %p)
-; CHECK-NEXT: call void @objc_autoreleasePoolPop(i8* undef)
-; CHECK-NEXT: call void @objc_release
+; CHECK-NEXT: call void @llvm.objc.autoreleasePoolPop(i8* undef)
+; CHECK-NEXT: call void @llvm.objc.release
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test43(i8* %p) {
 entry:
-  call i8* @objc_retain(i8* %p)
-  call i8* @objc_autorelease(i8* %p)
-  call i8* @objc_retain(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
+  call i8* @llvm.objc.autorelease(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
   call void @use_pointer(i8* %p)
   call void @use_pointer(i8* %p)
-  call void @objc_autoreleasePoolPop(i8* undef)
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.autoreleasePoolPop(i8* undef)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
@@ -1998,74 +1998,74 @@
 
 ; CHECK-LABEL: define void @test43b(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT: call i8* @objc_retain(i8* %p)
-; CHECK-NEXT: call i8* @objc_autorelease(i8* %p)
+; CHECK-NEXT: call i8* @llvm.objc.retain(i8* %p)
+; CHECK-NEXT: call i8* @llvm.objc.autorelease(i8* %p)
 ; CHECK-NEXT: call void @use_pointer(i8* %p)
 ; CHECK-NEXT: call void @use_pointer(i8* %p)
-; CHECK-NEXT: call i8* @objc_autoreleasePoolPush()
+; CHECK-NEXT: call i8* @llvm.objc.autoreleasePoolPush()
 ; CHECK-NEXT: call void @use_pointer(i8* %p)
-; CHECK-NEXT: call void @objc_release
+; CHECK-NEXT: call void @llvm.objc.release
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test43b(i8* %p) {
 entry:
-  call i8* @objc_retain(i8* %p)
-  call i8* @objc_autorelease(i8* %p)
-  call i8* @objc_retain(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
+  call i8* @llvm.objc.autorelease(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
   call void @use_pointer(i8* %p)
   call void @use_pointer(i8* %p)
-  call i8* @objc_autoreleasePoolPush()
-  call void @objc_release(i8* %p)
+  call i8* @llvm.objc.autoreleasePoolPush()
+  call void @llvm.objc.release(i8* %p)
   call void @use_pointer(i8* %p)
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
 ; Do retain+release elimination for non-provenance pointers.
 
 ; CHECK-LABEL: define void @test44(
-; CHECK-NOT: objc_
+; CHECK-NOT: llvm.objc.
 ; CHECK: }
 define void @test44(i8** %pp) {
   %p = load i8*, i8** %pp
-  %q = call i8* @objc_retain(i8* %p)
-  call void @objc_release(i8* %q)
+  %q = call i8* @llvm.objc.retain(i8* %p)
+  call void @llvm.objc.release(i8* %q)
   ret void
 }
 
 ; Don't delete retain+release with an unknown-provenance
-; may-alias objc_release between them.
+; may-alias llvm.objc.release between them.
 
 ; CHECK-LABEL: define void @test45(
-; CHECK: call i8* @objc_retain(i8* %p)
-; CHECK: call void @objc_release(i8* %q)
+; CHECK: call i8* @llvm.objc.retain(i8* %p)
+; CHECK: call void @llvm.objc.release(i8* %q)
 ; CHECK: call void @use_pointer(i8* %p)
-; CHECK: call void @objc_release(i8* %p)
+; CHECK: call void @llvm.objc.release(i8* %p)
 ; CHECK: }
 define void @test45(i8** %pp, i8** %qq) {
   %p = load i8*, i8** %pp
   %q = load i8*, i8** %qq
-  call i8* @objc_retain(i8* %p)
-  call void @objc_release(i8* %q)
+  call i8* @llvm.objc.retain(i8* %p)
+  call void @llvm.objc.release(i8* %q)
   call void @use_pointer(i8* %p)
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
 ; Don't delete retain and autorelease here.
 
 ; CHECK-LABEL: define void @test46(
-; CHECK: tail call i8* @objc_retain(i8* %p) [[NUW]]
+; CHECK: tail call i8* @llvm.objc.retain(i8* %p) [[NUW]]
 ; CHECK: true:
-; CHECK: call i8* @objc_autorelease(i8* %p) [[NUW]]
+; CHECK: call i8* @llvm.objc.autorelease(i8* %p) [[NUW]]
 ; CHECK: }
 define void @test46(i8* %p, i1 %a) {
 entry:
-  call i8* @objc_retain(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
   br i1 %a, label %true, label %false
 
 true:
-  call i8* @objc_autorelease(i8* %p)
+  call i8* @llvm.objc.autorelease(i8* %p)
   call void @use_pointer(i8* %p)
   ret void
 
@@ -2080,7 +2080,7 @@
 ; CHECK: ret i8* %p
 ; CHECK: }
 define i8* @test47(i8* %p) nounwind {
-  %x = call i8* @objc_retainedObject(i8* %p)
+  %x = call i8* @llvm.objc.retainedObject(i8* %p)
   ret i8* %x
 }
 
@@ -2091,7 +2091,7 @@
 ; CHECK: ret i8* %p
 ; CHECK: }
 define i8* @test48(i8* %p) nounwind {
-  %x = call i8* @objc_unretainedObject(i8* %p)
+  %x = call i8* @llvm.objc.unretainedObject(i8* %p)
   ret i8* %x
 }
 
@@ -2102,36 +2102,36 @@
 ; CHECK: ret i8* %p
 ; CHECK: }
 define i8* @test49(i8* %p) nounwind {
-  %x = call i8* @objc_unretainedPointer(i8* %p)
+  %x = call i8* @llvm.objc.unretainedPointer(i8* %p)
   ret i8* %x
 }
 
 ; Do delete retain+release with intervening stores of the address value if we
-; have imprecise release attached to objc_release.
+; have imprecise release attached to llvm.objc.release.
 
 ; CHECK-LABEL:      define void @test50a(
-; CHECK-NEXT:   call i8* @objc_retain
+; CHECK-NEXT:   call i8* @llvm.objc.retain
 ; CHECK-NEXT:   call void @callee
 ; CHECK-NEXT:   store
-; CHECK-NEXT:   call void @objc_release
+; CHECK-NEXT:   call void @llvm.objc.release
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test50a(i8* %p, i8** %pp) {
-  call i8* @objc_retain(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
   call void @callee()
   store i8* %p, i8** %pp
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
 ; CHECK-LABEL: define void @test50b(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test50b(i8* %p, i8** %pp) {
-  call i8* @objc_retain(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
   call void @callee()
   store i8* %p, i8** %pp
-  call void @objc_release(i8* %p), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %p), !clang.imprecise_release !0
   ret void
 }
 
@@ -2140,28 +2140,28 @@
 ; address value.
 
 ; CHECK-LABEL: define void @test51a(
-; CHECK: call i8* @objc_retain(i8* %p)
-; CHECK: call void @objc_release(i8* %p)
+; CHECK: call i8* @llvm.objc.retain(i8* %p)
+; CHECK: call void @llvm.objc.release(i8* %p)
 ; CHECK: ret void
 ; CHECK: }
 define void @test51a(i8* %p) {
-  call i8* @objc_retain(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
   call void @callee()
   store i8 0, i8* %p
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
 ; CHECK-LABEL: define void @test51b(
-; CHECK: call i8* @objc_retain(i8* %p)
-; CHECK: call void @objc_release(i8* %p)
+; CHECK: call i8* @llvm.objc.retain(i8* %p)
+; CHECK: call void @llvm.objc.release(i8* %p)
 ; CHECK: ret void
 ; CHECK: }
 define void @test51b(i8* %p) {
-  call i8* @objc_retain(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
   call void @callee()
   store i8 0, i8* %p
-  call void @objc_release(i8* %p), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %p), !clang.imprecise_release !0
   ret void
 }
 
@@ -2169,36 +2169,36 @@
 ; unknown provenance.
 
 ; CHECK-LABEL: define void @test52a(
-; CHECK: call i8* @objc_retain
+; CHECK: call i8* @llvm.objc.retain
 ; CHECK: call void @callee()
 ; CHECK: call void @use_pointer(i8* %z)
-; CHECK: call void @objc_release
+; CHECK: call void @llvm.objc.release
 ; CHECK: ret void
 ; CHECK: }
 define void @test52a(i8** %zz, i8** %pp) {
   %p = load i8*, i8** %pp
-  %1 = call i8* @objc_retain(i8* %p)
+  %1 = call i8* @llvm.objc.retain(i8* %p)
   call void @callee()
   %z = load i8*, i8** %zz
   call void @use_pointer(i8* %z)
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
 ; CHECK-LABEL: define void @test52b(
-; CHECK: call i8* @objc_retain
+; CHECK: call i8* @llvm.objc.retain
 ; CHECK: call void @callee()
 ; CHECK: call void @use_pointer(i8* %z)
-; CHECK: call void @objc_release
+; CHECK: call void @llvm.objc.release
 ; CHECK: ret void
 ; CHECK: }
 define void @test52b(i8** %zz, i8** %pp) {
   %p = load i8*, i8** %pp
-  %1 = call i8* @objc_retain(i8* %p)
+  %1 = call i8* @llvm.objc.retain(i8* %p)
   call void @callee()
   %z = load i8*, i8** %zz
   call void @use_pointer(i8* %z)
-  call void @objc_release(i8* %p), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %p), !clang.imprecise_release !0
   ret void
 }
 
@@ -2208,15 +2208,15 @@
 ; See rdar://10551239.
 
 ; CHECK-LABEL: define void @test53(
-; CHECK: @objc_
+; CHECK: @llvm.objc.
 ; CHECK: }
 define void @test53(void ()** %zz, i8** %pp) {
   %p = load i8*, i8** %pp
-  %1 = call i8* @objc_retain(i8* %p)
+  %1 = call i8* @llvm.objc.retain(i8* %p)
   call void @callee()
   %z = load void ()*, void ()** %zz
   call void @callee_fnptr(void ()* %z)
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
@@ -2224,12 +2224,12 @@
 
 ; CHECK-LABEL: define void @test54(
 ; CHECK: call i8* @returner()
-; CHECK-NEXT: call void @objc_release(i8* %t) [[NUW]], !clang.imprecise_release ![[RELEASE]]
+; CHECK-NEXT: call void @llvm.objc.release(i8* %t) [[NUW]], !clang.imprecise_release ![[RELEASE]]
 ; CHECK-NEXT: ret void
 ; CHECK: }
 define void @test54() {
   %t = call i8* @returner()
-  call i8* @objc_autorelease(i8* %t)
+  call i8* @llvm.objc.autorelease(i8* %t)
   ret void
 }
 
@@ -2240,10 +2240,10 @@
 ; CHECK: }
 define void @test55(i8* %x) { 
 entry: 
-  %0 = call i8* @objc_retain(i8* %x) nounwind 
-  %1 = call i8* @objc_retain(i8* %x) nounwind 
-  call void @objc_release(i8* %x) nounwind 
-  call void @objc_release(i8* %x) nounwind 
+  %0 = call i8* @llvm.objc.retain(i8* %x) nounwind 
+  %1 = call i8* @llvm.objc.retain(i8* %x) nounwind 
+  call void @llvm.objc.release(i8* %x) nounwind 
+  call void @llvm.objc.release(i8* %x) nounwind 
   ret void 
 }
 
@@ -2255,30 +2255,30 @@
 ; CHECK-LABEL: define void @test56(
 ; CHECK-NOT: @objc
 ; CHECK: if.then:
-; CHECK-NEXT: %0 = tail call i8* @objc_retain(i8* %x) [[NUW]]
+; CHECK-NEXT: %0 = tail call i8* @llvm.objc.retain(i8* %x) [[NUW]]
 ; CHECK-NEXT: tail call void @use_pointer(i8* %x)
 ; CHECK-NEXT: tail call void @use_pointer(i8* %x)
-; CHECK-NEXT: tail call void @objc_release(i8* %x) [[NUW]], !clang.imprecise_release ![[RELEASE]]
+; CHECK-NEXT: tail call void @llvm.objc.release(i8* %x) [[NUW]], !clang.imprecise_release ![[RELEASE]]
 ; CHECK-NEXT: br label %if.end
 ; CHECK-NOT: @objc
 ; CHECK: }
 define void @test56(i8* %x, i32 %n) {
 entry:
-  %0 = tail call i8* @objc_retain(i8* %x) nounwind
-  %1 = tail call i8* @objc_retain(i8* %0) nounwind
+  %0 = tail call i8* @llvm.objc.retain(i8* %x) nounwind
+  %1 = tail call i8* @llvm.objc.retain(i8* %0) nounwind
   %tobool = icmp eq i32 %n, 0
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  %2 = tail call i8* @objc_retain(i8* %1) nounwind
+  %2 = tail call i8* @llvm.objc.retain(i8* %1) nounwind
   tail call void @use_pointer(i8* %2)
   tail call void @use_pointer(i8* %2)
-  tail call void @objc_release(i8* %2) nounwind, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %2) nounwind, !clang.imprecise_release !0
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then
-  tail call void @objc_release(i8* %1) nounwind, !clang.imprecise_release !0
-  tail call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %1) nounwind, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -2288,26 +2288,26 @@
 
 ; CHECK-LABEL:      define void @test57(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   tail call i8* @objc_retain(i8* %x) [[NUW]]
+; CHECK-NEXT:   tail call i8* @llvm.objc.retain(i8* %x) [[NUW]]
 ; CHECK-NEXT:   call void @use_pointer(i8* %x)
 ; CHECK-NEXT:   call void @use_pointer(i8* %x)
-; CHECK-NEXT:   tail call i8* @objc_retain(i8* %x) [[NUW]]
+; CHECK-NEXT:   tail call i8* @llvm.objc.retain(i8* %x) [[NUW]]
 ; CHECK-NEXT:   call void @use_pointer(i8* %x)
 ; CHECK-NEXT:   call void @use_pointer(i8* %x)
-; CHECK-NEXT:   call void @objc_release(i8* %x) [[NUW]]
+; CHECK-NEXT:   call void @llvm.objc.release(i8* %x) [[NUW]]
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test57(i8* %x) nounwind {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %x)
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x) nounwind
-  call i8* @objc_retain(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %x)
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
@@ -2316,20 +2316,20 @@
 
 ; CHECK-LABEL:      define void @test58(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   @objc_retain
+; CHECK-NEXT:   @llvm.objc.retain
 ; CHECK-NEXT:   call void @use_pointer(i8* %x)
 ; CHECK-NEXT:   call void @use_pointer(i8* %x)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test58(i8* %x) nounwind {
 entry:
-  call i8* @objc_retain(i8* %x) nounwind
-  call i8* @objc_retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %x)
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x) nounwind
-  call i8* @objc_retain(i8* %x) nounwind
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
+  call i8* @llvm.objc.retain(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
@@ -2337,20 +2337,20 @@
 
 ; CHECK-LABEL:      define void @test59(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   %0 = tail call i8* @objc_retain(i8* %x) [[NUW]]
+; CHECK-NEXT:   %0 = tail call i8* @llvm.objc.retain(i8* %x) [[NUW]]
 ; CHECK-NEXT:   call void @use_pointer(i8* %x)
 ; CHECK-NEXT:   call void @use_pointer(i8* %x)
-; CHECK-NEXT:   call void @objc_release(i8* %x) [[NUW]]
+; CHECK-NEXT:   call void @llvm.objc.release(i8* %x) [[NUW]]
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test59(i8* %x) nounwind {
 entry:
-  %a = call i8* @objc_retain(i8* %x) nounwind
-  call void @objc_release(i8* %x) nounwind
-  %b = call i8* @objc_retain(i8* %x) nounwind
+  %a = call i8* @llvm.objc.retain(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
+  %b = call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %x)
   call void @use_pointer(i8* %x)
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
@@ -2363,71 +2363,71 @@
 ; @something is not constant.
 
 ; CHECK-LABEL: define void @test60a(
-; CHECK: call i8* @objc_retain
-; CHECK: call void @objc_release
+; CHECK: call i8* @llvm.objc.retain
+; CHECK: call void @llvm.objc.release
 ; CHECK: }
 define void @test60a() {
   %t = load i8*, i8** @constptr
   %s = load i8*, i8** @something
-  call i8* @objc_retain(i8* %s)
+  call i8* @llvm.objc.retain(i8* %s)
   call void @callee()
   call void @use_pointer(i8* %t)
-  call void @objc_release(i8* %s)
+  call void @llvm.objc.release(i8* %s)
   ret void
 }
 
 ; CHECK-LABEL: define void @test60b(
-; CHECK: call i8* @objc_retain
-; CHECK-NOT: call i8* @objc_retain
-; CHECK-NOT: call i8* @objc_release
+; CHECK: call i8* @llvm.objc.retain
+; CHECK-NOT: call i8* @llvm.objc.retain
+; CHECK-NOT: call i8* @llvm.objc.release
 ; CHECK: }
 define void @test60b() {
   %t = load i8*, i8** @constptr
   %s = load i8*, i8** @something
-  call i8* @objc_retain(i8* %t)
-  call i8* @objc_retain(i8* %t)
+  call i8* @llvm.objc.retain(i8* %t)
+  call i8* @llvm.objc.retain(i8* %t)
   call void @callee()
   call void @use_pointer(i8* %s)
-  call void @objc_release(i8* %t)
+  call void @llvm.objc.release(i8* %t)
   ret void
 }
 
 ; CHECK-LABEL: define void @test60c(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test60c() {
   %t = load i8*, i8** @constptr
   %s = load i8*, i8** @something
-  call i8* @objc_retain(i8* %t)
+  call i8* @llvm.objc.retain(i8* %t)
   call void @callee()
   call void @use_pointer(i8* %s)
-  call void @objc_release(i8* %t), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %t), !clang.imprecise_release !0
   ret void
 }
 
 ; CHECK-LABEL: define void @test60d(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test60d() {
   %t = load i8*, i8** @constptr
   %s = load i8*, i8** @something
-  call i8* @objc_retain(i8* %t)
+  call i8* @llvm.objc.retain(i8* %t)
   call void @callee()
   call void @use_pointer(i8* %s)
-  call void @objc_release(i8* %t)
+  call void @llvm.objc.release(i8* %t)
   ret void
 }
 
 ; CHECK-LABEL: define void @test60e(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test60e() {
   %t = load i8*, i8** @constptr
   %s = load i8*, i8** @something
-  call i8* @objc_retain(i8* %t)
+  call i8* @llvm.objc.retain(i8* %t)
   call void @callee()
   call void @use_pointer(i8* %s)
-  call void @objc_release(i8* %t), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %t), !clang.imprecise_release !0
   ret void
 }
 
@@ -2435,14 +2435,14 @@
 ; pointers.
 
 ; CHECK-LABEL: define void @test61(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test61() {
   %t = load i8*, i8** @constptr
-  call i8* @objc_retain(i8* %t)
+  call i8* @llvm.objc.retain(i8* %t)
   call void @callee()
   call void @use_pointer(i8* %t)
-  call void @objc_release(i8* %t)
+  call void @llvm.objc.release(i8* %t)
   ret void
 }
 
@@ -2450,23 +2450,23 @@
 ; other is outside the loop.
 
 ; CHECK-LABEL: define void @test62(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test62(i8* %x, i1* %p) nounwind {
 entry:
   br label %loop
 
 loop:
-  call i8* @objc_retain(i8* %x)
+  call i8* @llvm.objc.retain(i8* %x)
   %q = load i1, i1* %p
   br i1 %q, label %loop.more, label %exit
 
 loop.more:
-  call void @objc_release(i8* %x)
+  call void @llvm.objc.release(i8* %x)
   br label %loop
 
 exit:
-  call void @objc_release(i8* %x)
+  call void @llvm.objc.release(i8* %x)
   ret void
 }
 
@@ -2475,21 +2475,21 @@
 
 ; CHECK-LABEL: define void @test63(
 ; CHECK: loop:
-; CHECK:   tail call i8* @objc_retain(i8* %x)
+; CHECK:   tail call i8* @llvm.objc.retain(i8* %x)
 ; CHECK: loop.more:
-; CHECK:   call void @objc_release(i8* %x)
+; CHECK:   call void @llvm.objc.release(i8* %x)
 ; CHECK: }
 define void @test63(i8* %x, i1* %p) nounwind {
 entry:
   br label %loop
 
 loop:
-  call i8* @objc_retain(i8* %x)
+  call i8* @llvm.objc.retain(i8* %x)
   %q = load i1, i1* %p
   br i1 %q, label %loop.more, label %exit
 
 loop.more:
-  call void @objc_release(i8* %x)
+  call void @llvm.objc.release(i8* %x)
   br label %loop
 
 exit:
@@ -2501,16 +2501,16 @@
 
 ; CHECK-LABEL: define void @test64(
 ; CHECK: loop:
-; CHECK:   tail call i8* @objc_retain(i8* %x)
+; CHECK:   tail call i8* @llvm.objc.retain(i8* %x)
 ; CHECK: exit:
-; CHECK:   call void @objc_release(i8* %x)
+; CHECK:   call void @llvm.objc.release(i8* %x)
 ; CHECK: }
 define void @test64(i8* %x, i1* %p) nounwind {
 entry:
   br label %loop
 
 loop:
-  call i8* @objc_retain(i8* %x)
+  call i8* @llvm.objc.retain(i8* %x)
   %q = load i1, i1* %p
   br i1 %q, label %loop.more, label %exit
 
@@ -2518,7 +2518,7 @@
   br label %loop
 
 exit:
-  call void @objc_release(i8* %x)
+  call void @llvm.objc.release(i8* %x)
   ret void
 }
 
@@ -2526,9 +2526,9 @@
 
 ; CHECK-LABEL: define i8* @test65(
 ; CHECK: if.then:
-; CHECK:   call i8* @objc_autorelease(
+; CHECK:   call i8* @llvm.objc.autorelease(
 ; CHECK: return:
-; CHECK-NOT: @objc_autorelease
+; CHECK-NOT: @llvm.objc.autorelease
 ; CHECK: }
 define i8* @test65(i1 %x) {
 entry:
@@ -2536,12 +2536,12 @@
 
 if.then:                                          ; preds = %entry
   %c = call i8* @returner()
-  %s = call i8* @objc_retainAutoreleasedReturnValue(i8* %c) nounwind
+  %s = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %c) nounwind
   br label %return
 
 return:                                           ; preds = %if.then, %entry
   %retval = phi i8* [ %s, %if.then ], [ null, %entry ]
-  %q = call i8* @objc_autorelease(i8* %retval) nounwind
+  %q = call i8* @llvm.objc.autorelease(i8* %retval) nounwind
   ret i8* %retval
 }
 
@@ -2549,24 +2549,24 @@
 
 ; CHECK-LABEL: define i8* @test65b(
 ; CHECK: if.then:
-; CHECK-NOT: @objc_autorelease
+; CHECK-NOT: @llvm.objc.autorelease
 ; CHECK: return:
-; CHECK:   call i8* @objc_autorelease(
+; CHECK:   call i8* @llvm.objc.autorelease(
 ; CHECK: }
 define i8* @test65b(i1 %x) {
 entry:
-  %t = call i8* @objc_autoreleasePoolPush()
+  %t = call i8* @llvm.objc.autoreleasePoolPush()
   br i1 %x, label %return, label %if.then
 
 if.then:                                          ; preds = %entry
   %c = call i8* @returner()
-  %s = call i8* @objc_retainAutoreleasedReturnValue(i8* %c) nounwind
+  %s = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %c) nounwind
   br label %return
 
 return:                                           ; preds = %if.then, %entry
   %retval = phi i8* [ %s, %if.then ], [ null, %entry ]
-  call void @objc_autoreleasePoolPop(i8* %t)
-  %q = call i8* @objc_autorelease(i8* %retval) nounwind
+  call void @llvm.objc.autoreleasePoolPop(i8* %t)
+  %q = call i8* @llvm.objc.autorelease(i8* %retval) nounwind
   ret i8* %retval
 }
 
@@ -2575,9 +2575,9 @@
 
 ; CHECK-LABEL: define i8* @test65c(
 ; CHECK: if.then:
-; CHECK-NOT: @objc_autorelease
+; CHECK-NOT: @llvm.objc.autorelease
 ; CHECK: return:
-; CHECK:   call i8* @objc_autoreleaseReturnValue(
+; CHECK:   call i8* @llvm.objc.autoreleaseReturnValue(
 ; CHECK: }
 define i8* @test65c(i1 %x) {
 entry:
@@ -2585,20 +2585,20 @@
 
 if.then:                                          ; preds = %entry
   %c = call i8* @returner()
-  %s = call i8* @objc_retainAutoreleasedReturnValue(i8* %c) nounwind
+  %s = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %c) nounwind
   br label %return
 
 return:                                           ; preds = %if.then, %entry
   %retval = phi i8* [ %s, %if.then ], [ null, %entry ]
-  %q = call i8* @objc_autoreleaseReturnValue(i8* %retval) nounwind
+  %q = call i8* @llvm.objc.autoreleaseReturnValue(i8* %retval) nounwind
   ret i8* %retval
 }
 
 ; CHECK-LABEL: define i8* @test65d(
 ; CHECK: if.then:
-; CHECK-NOT: @objc_autorelease
+; CHECK-NOT: @llvm.objc.autorelease
 ; CHECK: return:
-; CHECK:   call i8* @objc_autoreleaseReturnValue(
+; CHECK:   call i8* @llvm.objc.autoreleaseReturnValue(
 ; CHECK: }
 define i8* @test65d(i1 %x) {
 entry:
@@ -2606,23 +2606,23 @@
 
 if.then:                                          ; preds = %entry
   %c = call i8* @returner()
-  %s = call i8* @objc_unsafeClaimAutoreleasedReturnValue(i8* %c) nounwind
+  %s = call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %c) nounwind
   br label %return
 
 return:                                           ; preds = %if.then, %entry
   %retval = phi i8* [ %s, %if.then ], [ null, %entry ]
-  %q = call i8* @objc_autoreleaseReturnValue(i8* %retval) nounwind
+  %q = call i8* @llvm.objc.autoreleaseReturnValue(i8* %retval) nounwind
   ret i8* %retval
 }
 
-; An objc_retain can serve as a may-use for a different pointer.
+; An llvm.objc.retain can serve as a may-use for a different pointer.
 ; rdar://11931823
 
 ; CHECK-LABEL: define void @test66a(
-; CHECK:   tail call i8* @objc_retain(i8* %cond) [[NUW]]
-; CHECK:   tail call void @objc_release(i8* %call) [[NUW]]
-; CHECK:   tail call i8* @objc_retain(i8* %tmp8) [[NUW]]
-; CHECK:   tail call void @objc_release(i8* %cond) [[NUW]]
+; CHECK:   tail call i8* @llvm.objc.retain(i8* %cond) [[NUW]]
+; CHECK:   tail call void @llvm.objc.release(i8* %call) [[NUW]]
+; CHECK:   tail call i8* @llvm.objc.retain(i8* %tmp8) [[NUW]]
+; CHECK:   tail call void @llvm.objc.release(i8* %cond) [[NUW]]
 ; CHECK: }
 define void @test66a(i8* %tmp5, i8* %bar, i1 %tobool, i1 %tobool1, i8* %call) {
 entry:
@@ -2633,19 +2633,19 @@
 
 cond.end:                                         ; preds = %cond.true, %entry
   %cond = phi i8* [ %tmp5, %cond.true ], [ %call, %entry ]
-  %tmp7 = tail call i8* @objc_retain(i8* %cond) nounwind
-  tail call void @objc_release(i8* %call) nounwind
+  %tmp7 = tail call i8* @llvm.objc.retain(i8* %cond) nounwind
+  tail call void @llvm.objc.release(i8* %call) nounwind
   %tmp8 = select i1 %tobool1, i8* %cond, i8* %bar
-  %tmp9 = tail call i8* @objc_retain(i8* %tmp8) nounwind
-  tail call void @objc_release(i8* %cond) nounwind
+  %tmp9 = tail call i8* @llvm.objc.retain(i8* %tmp8) nounwind
+  tail call void @llvm.objc.release(i8* %cond) nounwind
   ret void
 }
 
 ; CHECK-LABEL: define void @test66b(
-; CHECK:   tail call i8* @objc_retain(i8* %cond) [[NUW]]
-; CHECK:   tail call void @objc_release(i8* %call) [[NUW]]
-; CHECK:   tail call i8* @objc_retain(i8* %tmp8) [[NUW]]
-; CHECK:   tail call void @objc_release(i8* %cond) [[NUW]]
+; CHECK:   tail call i8* @llvm.objc.retain(i8* %cond) [[NUW]]
+; CHECK:   tail call void @llvm.objc.release(i8* %call) [[NUW]]
+; CHECK:   tail call i8* @llvm.objc.retain(i8* %tmp8) [[NUW]]
+; CHECK:   tail call void @llvm.objc.release(i8* %cond) [[NUW]]
 ; CHECK: }
 define void @test66b(i8* %tmp5, i8* %bar, i1 %tobool, i1 %tobool1, i8* %call) {
 entry:
@@ -2656,19 +2656,19 @@
 
 cond.end:                                         ; preds = %cond.true, %entry
   %cond = phi i8* [ %tmp5, %cond.true ], [ %call, %entry ]
-  %tmp7 = tail call i8* @objc_retain(i8* %cond) nounwind
-  tail call void @objc_release(i8* %call) nounwind, !clang.imprecise_release !0
+  %tmp7 = tail call i8* @llvm.objc.retain(i8* %cond) nounwind
+  tail call void @llvm.objc.release(i8* %call) nounwind, !clang.imprecise_release !0
   %tmp8 = select i1 %tobool1, i8* %cond, i8* %bar
-  %tmp9 = tail call i8* @objc_retain(i8* %tmp8) nounwind
-  tail call void @objc_release(i8* %cond) nounwind
+  %tmp9 = tail call i8* @llvm.objc.retain(i8* %tmp8) nounwind
+  tail call void @llvm.objc.release(i8* %cond) nounwind
   ret void
 }
 
 ; CHECK-LABEL: define void @test66c(
-; CHECK:   tail call i8* @objc_retain(i8* %cond) [[NUW]]
-; CHECK:   tail call void @objc_release(i8* %call) [[NUW]]
-; CHECK:   tail call i8* @objc_retain(i8* %tmp8) [[NUW]]
-; CHECK:   tail call void @objc_release(i8* %cond) [[NUW]]
+; CHECK:   tail call i8* @llvm.objc.retain(i8* %cond) [[NUW]]
+; CHECK:   tail call void @llvm.objc.release(i8* %call) [[NUW]]
+; CHECK:   tail call i8* @llvm.objc.retain(i8* %tmp8) [[NUW]]
+; CHECK:   tail call void @llvm.objc.release(i8* %cond) [[NUW]]
 ; CHECK: }
 define void @test66c(i8* %tmp5, i8* %bar, i1 %tobool, i1 %tobool1, i8* %call) {
 entry:
@@ -2679,19 +2679,19 @@
 
 cond.end:                                         ; preds = %cond.true, %entry
   %cond = phi i8* [ %tmp5, %cond.true ], [ %call, %entry ]
-  %tmp7 = tail call i8* @objc_retain(i8* %cond) nounwind
-  tail call void @objc_release(i8* %call) nounwind
+  %tmp7 = tail call i8* @llvm.objc.retain(i8* %cond) nounwind
+  tail call void @llvm.objc.release(i8* %call) nounwind
   %tmp8 = select i1 %tobool1, i8* %cond, i8* %bar
-  %tmp9 = tail call i8* @objc_retain(i8* %tmp8) nounwind, !clang.imprecise_release !0
-  tail call void @objc_release(i8* %cond) nounwind
+  %tmp9 = tail call i8* @llvm.objc.retain(i8* %tmp8) nounwind, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %cond) nounwind
   ret void
 }
 
 ; CHECK-LABEL: define void @test66d(
-; CHECK:   tail call i8* @objc_retain(i8* %cond) [[NUW]]
-; CHECK:   tail call void @objc_release(i8* %call) [[NUW]]
-; CHECK:   tail call i8* @objc_retain(i8* %tmp8) [[NUW]]
-; CHECK:   tail call void @objc_release(i8* %cond) [[NUW]]
+; CHECK:   tail call i8* @llvm.objc.retain(i8* %cond) [[NUW]]
+; CHECK:   tail call void @llvm.objc.release(i8* %call) [[NUW]]
+; CHECK:   tail call i8* @llvm.objc.retain(i8* %tmp8) [[NUW]]
+; CHECK:   tail call void @llvm.objc.release(i8* %cond) [[NUW]]
 ; CHECK: }
 define void @test66d(i8* %tmp5, i8* %bar, i1 %tobool, i1 %tobool1, i8* %call) {
 entry:
@@ -2702,11 +2702,11 @@
 
 cond.end:                                         ; preds = %cond.true, %entry
   %cond = phi i8* [ %tmp5, %cond.true ], [ %call, %entry ]
-  %tmp7 = tail call i8* @objc_retain(i8* %cond) nounwind
-  tail call void @objc_release(i8* %call) nounwind, !clang.imprecise_release !0
+  %tmp7 = tail call i8* @llvm.objc.retain(i8* %cond) nounwind
+  tail call void @llvm.objc.release(i8* %call) nounwind, !clang.imprecise_release !0
   %tmp8 = select i1 %tobool1, i8* %cond, i8* %bar
-  %tmp9 = tail call i8* @objc_retain(i8* %tmp8) nounwind
-  tail call void @objc_release(i8* %cond) nounwind, !clang.imprecise_release !0
+  %tmp9 = tail call i8* @llvm.objc.retain(i8* %tmp8) nounwind
+  tail call void @llvm.objc.release(i8* %cond) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -2719,13 +2719,13 @@
 @str = internal constant [16 x i8] c"-[ Top0 _getX ]\00"
 
 ; CHECK: define { <2 x float>, <2 x float> } @"\01-[A z]"({}* %self, i8* nocapture %_cmd) [[NUW]] {
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 
 define {<2 x float>, <2 x float>} @"\01-[A z]"({}* %self, i8* nocapture %_cmd) nounwind {
 invoke.cont:
   %0 = bitcast {}* %self to i8*
-  %1 = tail call i8* @objc_retain(i8* %0) nounwind
+  %1 = tail call i8* @llvm.objc.retain(i8* %0) nounwind
   tail call void @llvm.dbg.value(metadata {}* %self, metadata !DILocalVariable(scope: !2), metadata !DIExpression()), !dbg !DILocation(scope: !2)
   tail call void @llvm.dbg.value(metadata {}* %self, metadata !DILocalVariable(scope: !2), metadata !DIExpression()), !dbg !DILocation(scope: !2)
   %ivar = load i64, i64* @"OBJC_IVAR_$_A.myZ", align 8
@@ -2753,7 +2753,7 @@
   %add.ptr24 = getelementptr i8, i8* %0, i64 %ivar23
   %4 = bitcast i8* %add.ptr24 to i128*
   %srcval = load i128, i128* %4, align 4
-  tail call void @objc_release(i8* %0) nounwind
+  tail call void @llvm.objc.release(i8* %0) nounwind
   %tmp29 = trunc i128 %srcval to i64
   %tmp30 = bitcast i64 %tmp29 to <2 x float>
   %tmp31 = insertvalue {<2 x float>, <2 x float>} undef, <2 x float> %tmp30, 0
@@ -2765,15 +2765,15 @@
 }
 
 ; CHECK: @"\01-[Top0 _getX]"({}* %self, i8* nocapture %_cmd) [[NUW]] {
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 
 define i32 @"\01-[Top0 _getX]"({}* %self, i8* nocapture %_cmd) nounwind {
 invoke.cont:
   %0 = bitcast {}* %self to i8*
-  %1 = tail call i8* @objc_retain(i8* %0) nounwind
+  %1 = tail call i8* @llvm.objc.retain(i8* %0) nounwind
   %puts = tail call i32 @puts(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @str, i64 0, i64 0))
-  tail call void @objc_release(i8* %0) nounwind
+  tail call void @llvm.objc.release(i8* %0) nounwind
   ret i32 0
 }
 
@@ -2785,36 +2785,36 @@
 
 ; CHECK: define void @loop(i8* %x, i64 %n) {
 ; CHECK: for.body:
-; CHECK-NOT: @objc_
-; CHECK: @objc_msgSend
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
+; CHECK: @llvm.objc.msgSend
+; CHECK-NOT: @llvm.objc.
 ; CHECK: for.end:
 ; CHECK: }
 define void @loop(i8* %x, i64 %n) {
 entry:
-  %0 = tail call i8* @objc_retain(i8* %x) nounwind
+  %0 = tail call i8* @llvm.objc.retain(i8* %x) nounwind
   %cmp9 = icmp sgt i64 %n, 0
   br i1 %cmp9, label %for.body, label %for.end
 
 for.body:                                         ; preds = %entry, %for.body
   %i.010 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
-  %1 = tail call i8* @objc_retain(i8* %x) nounwind
+  %1 = tail call i8* @llvm.objc.retain(i8* %x) nounwind
   %tmp5 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call = tail call i8* (i8*, i8*, ...) @objc_msgSend(i8* %1, i8* %tmp5)
-  tail call void @objc_release(i8* %1) nounwind, !clang.imprecise_release !0
+  %call = tail call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %1, i8* %tmp5)
+  tail call void @llvm.objc.release(i8* %1) nounwind, !clang.imprecise_release !0
   %inc = add nsw i64 %i.010, 1
   %exitcond = icmp eq i64 %inc, %n
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:                                          ; preds = %for.body, %entry
-  tail call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %x) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ; ObjCARCOpt can delete the retain,release on self.
 
 ; CHECK: define void @TextEditTest(%2* %self, %3* %pboard) {
-; CHECK-NOT: call i8* @objc_retain(i8* %tmp7)
+; CHECK-NOT: call i8* @llvm.objc.retain(i8* %tmp7)
 ; CHECK: }
 
 %0 = type { i8* (i8*, %struct._message_ref_t*, ...)*, i8* }
@@ -2873,34 +2873,34 @@
 entry:
   %err = alloca %4*, align 8
   %tmp7 = bitcast %2* %self to i8*
-  %tmp8 = call i8* @objc_retain(i8* %tmp7) nounwind
+  %tmp8 = call i8* @llvm.objc.retain(i8* %tmp7) nounwind
   store %4* null, %4** %err, align 8
   %tmp1 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_17", align 8
   %tmp2 = load %struct.__CFString*, %struct.__CFString** @kUTTypePlainText, align 8
   %tmp3 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_19", align 8
   %tmp4 = bitcast %struct._class_t* %tmp1 to i8*
-  %call5 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %tmp4, i8* %tmp3, %struct.__CFString* %tmp2)
+  %call5 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %tmp4, i8* %tmp3, %struct.__CFString* %tmp2)
   %tmp5 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_21", align 8
   %tmp6 = bitcast %3* %pboard to i8*
-  %call76 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %tmp6, i8* %tmp5, i8* %call5)
-  %tmp9 = call i8* @objc_retain(i8* %call76) nounwind
+  %call76 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %tmp6, i8* %tmp5, i8* %call5)
+  %tmp9 = call i8* @llvm.objc.retain(i8* %call76) nounwind
   %tobool = icmp eq i8* %tmp9, null
   br i1 %tobool, label %end, label %land.lhs.true
 
 land.lhs.true:                                    ; preds = %entry
   %tmp11 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_23", align 8
-  %call137 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %tmp6, i8* %tmp11, i8* %tmp9)
+  %call137 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %tmp6, i8* %tmp11, i8* %tmp9)
   %tmp = bitcast i8* %call137 to %1*
-  %tmp10 = call i8* @objc_retain(i8* %call137) nounwind
-  call void @objc_release(i8* null) nounwind
-  %tmp12 = call i8* @objc_retain(i8* %call137) nounwind
-  call void @objc_release(i8* null) nounwind
+  %tmp10 = call i8* @llvm.objc.retain(i8* %call137) nounwind
+  call void @llvm.objc.release(i8* null) nounwind
+  %tmp12 = call i8* @llvm.objc.retain(i8* %call137) nounwind
+  call void @llvm.objc.release(i8* null) nounwind
   %tobool16 = icmp eq i8* %call137, null
   br i1 %tobool16, label %end, label %if.then
 
 if.then:                                          ; preds = %land.lhs.true
   %tmp19 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_25", align 8
-  %call21 = call signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*)*)(i8* %call137, i8* %tmp19)
+  %call21 = call signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*)*)(i8* %call137, i8* %tmp19)
   %tobool22 = icmp eq i8 %call21, 0
   br i1 %tobool22, label %if.then44, label %land.lhs.true23
 
@@ -2908,10 +2908,10 @@
   %tmp24 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_26", align 8
   %tmp26 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_28", align 8
   %tmp27 = bitcast %struct._class_t* %tmp24 to i8*
-  %call2822 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %tmp27, i8* %tmp26, i8* %call137)
+  %call2822 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %tmp27, i8* %tmp26, i8* %call137)
   %tmp13 = bitcast i8* %call2822 to %5*
-  %tmp14 = call i8* @objc_retain(i8* %call2822) nounwind
-  call void @objc_release(i8* null) nounwind
+  %tmp14 = call i8* @llvm.objc.retain(i8* %call2822) nounwind
+  call void @llvm.objc.release(i8* null) nounwind
   %tobool30 = icmp eq i8* %call2822, null
   br i1 %tobool30, label %if.then44, label %if.end
 
@@ -2919,38 +2919,38 @@
   %tmp32 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_29", align 8
   %tmp33 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_31", align 8
   %tmp34 = bitcast %struct._class_t* %tmp32 to i8*
-  %call35 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %tmp34, i8* %tmp33)
+  %call35 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %tmp34, i8* %tmp33)
   %tmp37 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_33", align 8
-  %call3923 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %call35, i8* %tmp37, i8* %call2822, i32 signext 1, %4** %err)
+  %call3923 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %call35, i8* %tmp37, i8* %call2822, i32 signext 1, %4** %err)
   %cmp = icmp eq i8* %call3923, null
   br i1 %cmp, label %if.then44, label %end
 
 if.then44:                                        ; preds = %if.end, %land.lhs.true23, %if.then
   %url.025 = phi %5* [ %tmp13, %if.end ], [ %tmp13, %land.lhs.true23 ], [ null, %if.then ]
   %tmp49 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_35", align 8
-  %call51 = call %struct._NSRange bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %struct._NSRange (i8*, i8*, i64, i64)*)(i8* %call137, i8* %tmp49, i64 0, i64 0)
+  %call51 = call %struct._NSRange bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %struct._NSRange (i8*, i8*, i64, i64)*)(i8* %call137, i8* %tmp49, i64 0, i64 0)
   %call513 = extractvalue %struct._NSRange %call51, 0
   %call514 = extractvalue %struct._NSRange %call51, 1
   %tmp52 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_37", align 8
-  %call548 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %call137, i8* %tmp52, i64 %call513, i64 %call514)
+  %call548 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %call137, i8* %tmp52, i64 %call513, i64 %call514)
   %tmp55 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_38", align 8
   %tmp56 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_40", align 8
   %tmp57 = bitcast %struct._class_t* %tmp55 to i8*
-  %call58 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %tmp57, i8* %tmp56)
+  %call58 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %tmp57, i8* %tmp56)
   %tmp59 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_42", align 8
-  %call6110 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %call548, i8* %tmp59, i8* %call58)
-  %tmp15 = call i8* @objc_retain(i8* %call6110) nounwind
-  call void @objc_release(i8* %call137) nounwind
+  %call6110 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %call548, i8* %tmp59, i8* %call58)
+  %tmp15 = call i8* @llvm.objc.retain(i8* %call6110) nounwind
+  call void @llvm.objc.release(i8* %call137) nounwind
   %tmp64 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_46", align 8
-  %call66 = call signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*, %1*)*)(i8* %call6110, i8* %tmp64, %1* bitcast (%struct.NSConstantString* @_unnamed_cfstring_44 to %1*))
+  %call66 = call signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*, %1*)*)(i8* %call6110, i8* %tmp64, %1* bitcast (%struct.NSConstantString* @_unnamed_cfstring_44 to %1*))
   %tobool67 = icmp eq i8 %call66, 0
   br i1 %tobool67, label %if.end74, label %if.then68
 
 if.then68:                                        ; preds = %if.then44
   %tmp70 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_48", align 8
-  %call7220 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %call6110, i8* %tmp70)
-  %tmp16 = call i8* @objc_retain(i8* %call7220) nounwind
-  call void @objc_release(i8* %call6110) nounwind
+  %call7220 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %call6110, i8* %tmp70)
+  %tmp16 = call i8* @llvm.objc.retain(i8* %call7220) nounwind
+  call void @llvm.objc.release(i8* %call6110) nounwind
   br label %if.end74
 
 if.end74:                                         ; preds = %if.then68, %if.then44
@@ -2964,7 +2964,7 @@
 
 land.lhs.true80:                                  ; preds = %if.end74
   %tmp82 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_25", align 8
-  %call84 = call signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*)*)(i8* %filename.0.in, i8* %tmp82)
+  %call84 = call signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*)*)(i8* %filename.0.in, i8* %tmp82)
   %tobool86 = icmp eq i8 %call84, 0
   br i1 %tobool86, label %if.then109, label %if.end106
 
@@ -2972,17 +2972,17 @@
   %tmp88 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_26", align 8
   %tmp90 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_28", align 8
   %tmp91 = bitcast %struct._class_t* %tmp88 to i8*
-  %call9218 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %tmp91, i8* %tmp90, i8* %filename.0.in)
+  %call9218 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %tmp91, i8* %tmp90, i8* %filename.0.in)
   %tmp20 = bitcast i8* %call9218 to %5*
-  %tmp21 = call i8* @objc_retain(i8* %call9218) nounwind
+  %tmp21 = call i8* @llvm.objc.retain(i8* %call9218) nounwind
   %tmp22 = bitcast %5* %url.025 to i8*
-  call void @objc_release(i8* %tmp22) nounwind
+  call void @llvm.objc.release(i8* %tmp22) nounwind
   %tmp94 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_29", align 8
   %tmp95 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_31", align 8
   %tmp96 = bitcast %struct._class_t* %tmp94 to i8*
-  %call97 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %tmp96, i8* %tmp95)
+  %call97 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %tmp96, i8* %tmp95)
   %tmp99 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_33", align 8
-  %call10119 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %call97, i8* %tmp99, i8* %call9218, i32 signext 1, %4** %err)
+  %call10119 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %call97, i8* %tmp99, i8* %call9218, i32 signext 1, %4** %err)
   %phitmp = icmp eq i8* %call10119, null
   br i1 %phitmp, label %if.then109, label %end
 
@@ -3000,12 +3000,12 @@
   %tmp118 = load %1*, %1** @NSFilePathErrorKey, align 8
   %tmp119 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_53", align 8
   %tmp120 = bitcast %struct._class_t* %tmp115 to i8*
-  %call12113 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %tmp120, i8* %tmp119, %1* %call117, %1* %tmp118, i8* null)
+  %call12113 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %tmp120, i8* %tmp119, %1* %call117, %1* %tmp118, i8* null)
   %tmp122 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_55", align 8
   %tmp123 = bitcast %struct._class_t* %tmp113 to i8*
-  %call12414 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %tmp123, i8* %tmp122, %1* %tmp114, i64 258, i8* %call12113)
-  %tmp23 = call i8* @objc_retain(i8* %call12414) nounwind
-  %tmp25 = call i8* @objc_autorelease(i8* %tmp23) nounwind
+  %call12414 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %tmp123, i8* %tmp122, %1* %tmp114, i64 258, i8* %call12113)
+  %tmp23 = call i8* @llvm.objc.retain(i8* %call12414) nounwind
+  %tmp25 = call i8* @llvm.objc.autorelease(i8* %tmp23) nounwind
   %tmp28 = bitcast i8* %tmp25 to %4*
   store %4* %tmp28, %4** %err, align 8
   br label %if.end125
@@ -3015,44 +3015,44 @@
   %tmp126 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_56", align 8
   %tmp128 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_58", align 8
   %tmp129 = bitcast %struct._class_t* %tmp126 to i8*
-  %call13015 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %tmp129, i8* %tmp128, %4* %tmp127)
+  %call13015 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %tmp129, i8* %tmp128, %4* %tmp127)
   %tmp131 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_60", align 8
-  %call13317 = call i8* (i8*, i8*, ...) @objc_msgSend(i8* %call13015, i8* %tmp131)
+  %call13317 = call i8* (i8*, i8*, ...) @llvm.objc.msgSend(i8* %call13015, i8* %tmp131)
   br label %end
 
 end:                                              ; preds = %if.end125, %if.end106, %if.end, %land.lhs.true, %entry
   %filename.2 = phi %1* [ %filename.0, %if.end106 ], [ %filename.0, %if.end125 ], [ %tmp, %land.lhs.true ], [ null, %entry ], [ %tmp, %if.end ]
   %origFilename.0 = phi %1* [ %tmp, %if.end106 ], [ %tmp, %if.end125 ], [ %tmp, %land.lhs.true ], [ null, %entry ], [ %tmp, %if.end ]
   %url.2 = phi %5* [ %tmp20, %if.end106 ], [ %url.129, %if.end125 ], [ null, %land.lhs.true ], [ null, %entry ], [ %tmp13, %if.end ]
-  call void @objc_release(i8* %tmp9) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %tmp9) nounwind, !clang.imprecise_release !0
   %tmp29 = bitcast %5* %url.2 to i8*
-  call void @objc_release(i8* %tmp29) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %tmp29) nounwind, !clang.imprecise_release !0
   %tmp30 = bitcast %1* %origFilename.0 to i8*
-  call void @objc_release(i8* %tmp30) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %tmp30) nounwind, !clang.imprecise_release !0
   %tmp31 = bitcast %1* %filename.2 to i8*
-  call void @objc_release(i8* %tmp31) nounwind, !clang.imprecise_release !0
-  call void @objc_release(i8* %tmp7) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %tmp31) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %tmp7) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 declare i32 @__gxx_personality_v0(...)
 
-declare i32 @objc_sync_enter(i8*)
-declare i32 @objc_sync_exit(i8*)
+declare i32 @llvm.objc.sync.enter(i8*)
+declare i32 @llvm.objc.sync.exit(i8*)
 
 ; Make sure that we understand that objc_sync_{enter,exit} are IC_User not
 ; IC_Call/IC_CallOrUser.
 
 ; CHECK-LABEL:      define void @test67(
-; CHECK-NEXT:   call i32 @objc_sync_enter(i8* %x)
-; CHECK-NEXT:   call i32 @objc_sync_exit(i8* %x)
+; CHECK-NEXT:   call i32 @llvm.objc.sync.enter(i8* %x)
+; CHECK-NEXT:   call i32 @llvm.objc.sync.exit(i8* %x)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test67(i8* %x) {
-  call i8* @objc_retain(i8* %x)
-  call i32 @objc_sync_enter(i8* %x)
-  call i32 @objc_sync_exit(i8* %x)
-  call void @objc_release(i8* %x), !clang.imprecise_release !0
+  call i8* @llvm.objc.retain(i8* %x)
+  call i32 @llvm.objc.sync.enter(i8* %x)
+  call i32 @llvm.objc.sync.exit(i8* %x)
+  call void @llvm.objc.release(i8* %x), !clang.imprecise_release !0
   ret void
 }
 
@@ -3069,6 +3069,6 @@
 !4 = !DIFile(filename: "path/to/file", directory: "/path/to/dir")
 !5 = !{i32 2, !"Debug Info Version", i32 3}
 
-; CHECK: attributes #0 = { nounwind readnone speculatable }
 ; CHECK: attributes [[NUW]] = { nounwind }
+; CHECK: attributes #1 = { nounwind readnone speculatable }
 ; CHECK: ![[RELEASE]] = !{}
diff --git a/test/Transforms/ObjCARC/cfg-hazards.ll b/test/Transforms/ObjCARC/cfg-hazards.ll
index 8407e44..9559b3c 100644
--- a/test/Transforms/ObjCARC/cfg-hazards.ll
+++ b/test/Transforms/ObjCARC/cfg-hazards.ll
@@ -5,21 +5,21 @@
 ; across them.
 
 declare void @use_pointer(i8*)
-declare i8* @objc_retain(i8*)
-declare void @objc_release(i8*)
+declare i8* @llvm.objc.retain(i8*)
+declare void @llvm.objc.release(i8*)
 declare void @callee()
 declare void @block_callee(void ()*)
 
 ; CHECK-LABEL: define void @test0(
-; CHECK:   call i8* @objc_retain(
+; CHECK:   call i8* @llvm.objc.retain
 ; CHECK: for.body:
 ; CHECK-NOT: @objc
 ; CHECK: for.end:
-; CHECK:   call void @objc_release(
+; CHECK:   call void @llvm.objc.release
 ; CHECK: }
 define void @test0(i8* %digits) {
 entry:
-  %tmp1 = call i8* @objc_retain(i8* %digits) nounwind
+  %tmp1 = call i8* @llvm.objc.retain(i8* %digits) nounwind
   call void @use_pointer(i8* %digits)
   br label %for.body
 
@@ -31,20 +31,20 @@
   br i1 %cmp, label %for.body, label %for.end
 
 for.end:                                          ; preds = %for.body
-  call void @objc_release(i8* %digits) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %digits) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ; CHECK-LABEL: define void @test1(
-; CHECK:   call i8* @objc_retain(
+; CHECK:   call i8* @llvm.objc.retain
 ; CHECK: for.body:
 ; CHECK-NOT: @objc
 ; CHECK: for.end:
-; CHECK:   void @objc_release(
+; CHECK:   void @llvm.objc.release
 ; CHECK: }
 define void @test1(i8* %digits) {
 entry:
-  %tmp1 = call i8* @objc_retain(i8* %digits) nounwind
+  %tmp1 = call i8* @llvm.objc.retain(i8* %digits) nounwind
   br label %for.body
 
 for.body:                                         ; preds = %for.body, %entry
@@ -56,20 +56,20 @@
   br i1 %cmp, label %for.body, label %for.end
 
 for.end:                                          ; preds = %for.body
-  call void @objc_release(i8* %digits) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %digits) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ; CHECK-LABEL: define void @test2(
-; CHECK:   call i8* @objc_retain(
+; CHECK:   call i8* @llvm.objc.retain
 ; CHECK: for.body:
 ; CHECK-NOT: @objc
 ; CHECK: for.end:
-; CHECK:   void @objc_release(
+; CHECK:   void @llvm.objc.release
 ; CHECK: }
 define void @test2(i8* %digits) {
 entry:
-  %tmp1 = call i8* @objc_retain(i8* %digits) nounwind
+  %tmp1 = call i8* @llvm.objc.retain(i8* %digits) nounwind
   br label %for.body
 
 for.body:                                         ; preds = %for.body, %entry
@@ -81,7 +81,7 @@
 
 for.end:                                          ; preds = %for.body
   call void @use_pointer(i8* %digits)
-  call void @objc_release(i8* %digits) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %digits) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -89,17 +89,17 @@
 
 ;      CHECK: define void @test3(i8* %a) #0 {
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   tail call i8* @objc_retain(i8* %a) [[NUW:#[0-9]+]]
+; CHECK-NEXT:   tail call i8* @llvm.objc.retain(i8* %a) [[NUW:#[0-9]+]]
 ; CHECK-NEXT:   br label %loop
-;  CHECK-NOT:   @objc_
+;  CHECK-NOT:   @llvm.objc.
 ;      CHECK: exit:
-; CHECK-NEXT:   call void @objc_release(i8* %a)
+; CHECK-NEXT:   call void @llvm.objc.release(i8* %a)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test3(i8* %a) nounwind {
 entry:
-  %outer = call i8* @objc_retain(i8* %a) nounwind
-  %inner = call i8* @objc_retain(i8* %a) nounwind
+  %outer = call i8* @llvm.objc.retain(i8* %a) nounwind
+  %inner = call i8* @llvm.objc.retain(i8* %a) nounwind
   br label %loop
 
 loop:
@@ -108,24 +108,24 @@
   br i1 undef, label %loop, label %exit
 
 exit:
-  call void @objc_release(i8* %a) nounwind
-  call void @objc_release(i8* %a) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %a) nounwind
+  call void @llvm.objc.release(i8* %a) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ;      CHECK: define void @test4(i8* %a) #0 {
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   tail call i8* @objc_retain(i8* %a) [[NUW]]
+; CHECK-NEXT:   tail call i8* @llvm.objc.retain(i8* %a) [[NUW]]
 ; CHECK-NEXT:   br label %loop
-;  CHECK-NOT:   @objc_
+;  CHECK-NOT:   @llvm.objc.
 ;      CHECK: exit:
-; CHECK-NEXT:   call void @objc_release(i8* %a)
+; CHECK-NEXT:   call void @llvm.objc.release(i8* %a)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test4(i8* %a) nounwind {
 entry:
-  %outer = call i8* @objc_retain(i8* %a) nounwind
-  %inner = call i8* @objc_retain(i8* %a) nounwind
+  %outer = call i8* @llvm.objc.retain(i8* %a) nounwind
+  %inner = call i8* @llvm.objc.retain(i8* %a) nounwind
   br label %loop
 
 loop:
@@ -138,26 +138,26 @@
   br i1 undef, label %loop, label %exit
 
 exit:
-  call void @objc_release(i8* %a) nounwind
-  call void @objc_release(i8* %a) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %a) nounwind
+  call void @llvm.objc.release(i8* %a) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ;      CHECK: define void @test5(i8* %a) #0 {
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   tail call i8* @objc_retain(i8* %a) [[NUW]]
+; CHECK-NEXT:   tail call i8* @llvm.objc.retain(i8* %a) [[NUW]]
 ; CHECK-NEXT:   call void @callee()
 ; CHECK-NEXT:   br label %loop
-;  CHECK-NOT:   @objc_
+;  CHECK-NOT:   @llvm.objc.
 ;      CHECK: exit:
 ; CHECK-NEXT:   call void @use_pointer(i8* %a)
-; CHECK-NEXT:   call void @objc_release(i8* %a)
+; CHECK-NEXT:   call void @llvm.objc.release(i8* %a)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test5(i8* %a) nounwind {
 entry:
-  %outer = tail call i8* @objc_retain(i8* %a) nounwind
-  %inner = tail call i8* @objc_retain(i8* %a) nounwind
+  %outer = tail call i8* @llvm.objc.retain(i8* %a) nounwind
+  %inner = tail call i8* @llvm.objc.retain(i8* %a) nounwind
   call void @callee()
   br label %loop
 
@@ -172,25 +172,25 @@
 
 exit:
   call void @use_pointer(i8* %a)
-  call void @objc_release(i8* %a) nounwind
-  call void @objc_release(i8* %a) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %a) nounwind
+  call void @llvm.objc.release(i8* %a) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ;      CHECK: define void @test6(i8* %a) #0 {
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   tail call i8* @objc_retain(i8* %a) [[NUW]]
+; CHECK-NEXT:   tail call i8* @llvm.objc.retain(i8* %a) [[NUW]]
 ; CHECK-NEXT:   br label %loop
-;  CHECK-NOT:   @objc_
+;  CHECK-NOT:   @llvm.objc.
 ;      CHECK: exit:
 ; CHECK-NEXT:   call void @use_pointer(i8* %a)
-; CHECK-NEXT:   call void @objc_release(i8* %a)
+; CHECK-NEXT:   call void @llvm.objc.release(i8* %a)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test6(i8* %a) nounwind {
 entry:
-  %outer = tail call i8* @objc_retain(i8* %a) nounwind
-  %inner = tail call i8* @objc_retain(i8* %a) nounwind
+  %outer = tail call i8* @llvm.objc.retain(i8* %a) nounwind
+  %inner = tail call i8* @llvm.objc.retain(i8* %a) nounwind
   br label %loop
 
 loop:
@@ -205,25 +205,25 @@
 
 exit:
   call void @use_pointer(i8* %a)
-  call void @objc_release(i8* %a) nounwind
-  call void @objc_release(i8* %a) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %a) nounwind
+  call void @llvm.objc.release(i8* %a) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ;      CHECK: define void @test7(i8* %a) #0 {
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   tail call i8* @objc_retain(i8* %a) [[NUW]]
+; CHECK-NEXT:   tail call i8* @llvm.objc.retain(i8* %a) [[NUW]]
 ; CHECK-NEXT:   call void @callee()
 ; CHECK-NEXT:   br label %loop
-;  CHECK-NOT:   @objc_
+;  CHECK-NOT:   @llvm.objc.
 ;      CHECK: exit:
-; CHECK-NEXT:   call void @objc_release(i8* %a)
+; CHECK-NEXT:   call void @llvm.objc.release(i8* %a)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test7(i8* %a) nounwind {
 entry:
-  %outer = tail call i8* @objc_retain(i8* %a) nounwind
-  %inner = tail call i8* @objc_retain(i8* %a) nounwind
+  %outer = tail call i8* @llvm.objc.retain(i8* %a) nounwind
+  %inner = tail call i8* @llvm.objc.retain(i8* %a) nounwind
   call void @callee()
   br label %loop
 
@@ -238,24 +238,24 @@
   br i1 undef, label %exit, label %loop
 
 exit:
-  call void @objc_release(i8* %a) nounwind
-  call void @objc_release(i8* %a) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %a) nounwind
+  call void @llvm.objc.release(i8* %a) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ;      CHECK: define void @test8(i8* %a) #0 {
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   tail call i8* @objc_retain(i8* %a) [[NUW]]
+; CHECK-NEXT:   tail call i8* @llvm.objc.retain(i8* %a) [[NUW]]
 ; CHECK-NEXT:   br label %loop
-;  CHECK-NOT:   @objc_
+;  CHECK-NOT:   @llvm.objc.
 ;      CHECK: exit:
-; CHECK-NEXT:   call void @objc_release(i8* %a)
+; CHECK-NEXT:   call void @llvm.objc.release(i8* %a)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test8(i8* %a) nounwind {
 entry:
-  %outer = tail call i8* @objc_retain(i8* %a) nounwind
-  %inner = tail call i8* @objc_retain(i8* %a) nounwind
+  %outer = tail call i8* @llvm.objc.retain(i8* %a) nounwind
+  %inner = tail call i8* @llvm.objc.retain(i8* %a) nounwind
   br label %loop
 
 loop:
@@ -270,22 +270,22 @@
   br i1 undef, label %exit, label %loop
 
 exit:
-  call void @objc_release(i8* %a) nounwind
-  call void @objc_release(i8* %a) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %a) nounwind
+  call void @llvm.objc.release(i8* %a) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ;      CHECK: define void @test9(i8* %a) #0 {
 ; CHECK-NEXT: entry:
 ; CHECK-NEXT:   br label %loop
-;  CHECK-NOT:   @objc_
+;  CHECK-NOT:   @llvm.objc.
 ;      CHECK: exit:
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test9(i8* %a) nounwind {
 entry:
-  %outer = tail call i8* @objc_retain(i8* %a) nounwind
-  %inner = tail call i8* @objc_retain(i8* %a) nounwind
+  %outer = tail call i8* @llvm.objc.retain(i8* %a) nounwind
+  %inner = tail call i8* @llvm.objc.retain(i8* %a) nounwind
   br label %loop
 
 loop:
@@ -299,22 +299,22 @@
   br i1 undef, label %exit, label %loop
 
 exit:
-  call void @objc_release(i8* %a) nounwind
-  call void @objc_release(i8* %a) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %a) nounwind
+  call void @llvm.objc.release(i8* %a) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ;      CHECK: define void @test10(i8* %a) #0 {
 ; CHECK-NEXT: entry:
 ; CHECK-NEXT:   br label %loop
-;  CHECK-NOT:   @objc_
+;  CHECK-NOT:   @llvm.objc.
 ;      CHECK: exit:
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test10(i8* %a) nounwind {
 entry:
-  %outer = tail call i8* @objc_retain(i8* %a) nounwind
-  %inner = tail call i8* @objc_retain(i8* %a) nounwind
+  %outer = tail call i8* @llvm.objc.retain(i8* %a) nounwind
+  %inner = tail call i8* @llvm.objc.retain(i8* %a) nounwind
   br label %loop
 
 loop:
@@ -328,22 +328,22 @@
   br i1 undef, label %exit, label %loop
 
 exit:
-  call void @objc_release(i8* %a) nounwind
-  call void @objc_release(i8* %a) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %a) nounwind
+  call void @llvm.objc.release(i8* %a) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ;      CHECK: define void @test11(i8* %a) #0 {
 ; CHECK-NEXT: entry:
 ; CHECK-NEXT:   br label %loop
-;  CHECK-NOT:   @objc_
+;  CHECK-NOT:   @llvm.objc.
 ;      CHECK: exit:
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test11(i8* %a) nounwind {
 entry:
-  %outer = tail call i8* @objc_retain(i8* %a) nounwind
-  %inner = tail call i8* @objc_retain(i8* %a) nounwind
+  %outer = tail call i8* @llvm.objc.retain(i8* %a) nounwind
+  %inner = tail call i8* @llvm.objc.retain(i8* %a) nounwind
   br label %loop
 
 loop:
@@ -356,8 +356,8 @@
   br i1 undef, label %exit, label %loop
 
 exit:
-  call void @objc_release(i8* %a) nounwind
-  call void @objc_release(i8* %a) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %a) nounwind
+  call void @llvm.objc.release(i8* %a) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -365,19 +365,19 @@
 
 ;      CHECK: define void @test12(i8* %a) #0 {
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   %outer = tail call i8* @objc_retain(i8* %a) [[NUW]]
-; CHECK-NEXT:   %inner = tail call i8* @objc_retain(i8* %a) [[NUW]]
+; CHECK-NEXT:   %outer = tail call i8* @llvm.objc.retain(i8* %a) [[NUW]]
+; CHECK-NEXT:   %inner = tail call i8* @llvm.objc.retain(i8* %a) [[NUW]]
 ; CHECK-NEXT:   br label %loop
-;  CHECK-NOT:   @objc_
+;  CHECK-NOT:   @llvm.objc.
 ;      CHECK: exit:
-; CHECK-NEXT: call void @objc_release(i8* %a) [[NUW]]
-; CHECK-NEXT: call void @objc_release(i8* %a) [[NUW]], !clang.imprecise_release !0
+; CHECK-NEXT: call void @llvm.objc.release(i8* %a) [[NUW]]
+; CHECK-NEXT: call void @llvm.objc.release(i8* %a) [[NUW]], !clang.imprecise_release !0
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test12(i8* %a) nounwind {
 entry:
-  %outer = tail call i8* @objc_retain(i8* %a) nounwind
-  %inner = tail call i8* @objc_retain(i8* %a) nounwind
+  %outer = tail call i8* @llvm.objc.retain(i8* %a) nounwind
+  %inner = tail call i8* @llvm.objc.retain(i8* %a) nounwind
   br label %loop
 
 loop:
@@ -390,8 +390,8 @@
   br i1 undef, label %exit, label %loop
 
 exit:
-  call void @objc_release(i8* %a) nounwind
-  call void @objc_release(i8* %a) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %a) nounwind
+  call void @llvm.objc.release(i8* %a) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -402,31 +402,31 @@
 
 ; CHECK: define void @test13(i8* %a) [[NUW]] {
 ; CHECK: entry:
-; CHECK:   tail call i8* @objc_retain(i8* %a) [[NUW]]
+; CHECK:   tail call i8* @llvm.objc.retain(i8* %a) [[NUW]]
 ; CHECK: loop:
-; CHECK:   tail call i8* @objc_retain(i8* %a) [[NUW]]
+; CHECK:   tail call i8* @llvm.objc.retain(i8* %a) [[NUW]]
 ; CHECK:   call void @block_callee
-; CHECK:   call void @objc_release(i8* %reloaded_a) [[NUW]]
+; CHECK:   call void @llvm.objc.release(i8* %reloaded_a) [[NUW]]
 ; CHECK: exit:
-; CHECK:   call void @objc_release(i8* %a) [[NUW]]
+; CHECK:   call void @llvm.objc.release(i8* %a) [[NUW]]
 ; CHECK: }
 define void @test13(i8* %a) nounwind {
 entry:
   %block = alloca i8*
-  %a1 = tail call i8* @objc_retain(i8* %a) nounwind
+  %a1 = tail call i8* @llvm.objc.retain(i8* %a) nounwind
   br label %loop
 
 loop:
-  %a2 = tail call i8* @objc_retain(i8* %a) nounwind
+  %a2 = tail call i8* @llvm.objc.retain(i8* %a) nounwind
   store i8* %a, i8** %block, align 8
   %casted_block = bitcast i8** %block to void ()*
   call void @block_callee(void ()* %casted_block)
   %reloaded_a = load i8*, i8** %block, align 8
-  call void @objc_release(i8* %reloaded_a) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %reloaded_a) nounwind, !clang.imprecise_release !0
   br i1 undef, label %loop, label %exit
   
 exit:
-  call void @objc_release(i8* %a) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %a) nounwind, !clang.imprecise_release !0
   ret void
 }
 
diff --git a/test/Transforms/ObjCARC/clang-arc-use-barrier.ll b/test/Transforms/ObjCARC/clang-arc-use-barrier.ll
index 98d49ec..a00c117 100644
--- a/test/Transforms/ObjCARC/clang-arc-use-barrier.ll
+++ b/test/Transforms/ObjCARC/clang-arc-use-barrier.ll
@@ -2,36 +2,36 @@
 
 %0 = type opaque
 
-; Make sure ARC optimizer doesn't sink @obj_retain past @clang.arc.use.
+; Make sure ARC optimizer doesn't sink @obj_retain past @llvm.objc.clang.arc.use.
 
-; CHECK: call i8* @objc_retain(
-; CHECK: call void (...) @clang.arc.use(
-; CHECK: call i8* @objc_retain(
-; CHECK: call void (...) @clang.arc.use(
+; CHECK: call i8* @llvm.objc.retain
+; CHECK: call void (...) @llvm.objc.clang.arc.use(
+; CHECK: call i8* @llvm.objc.retain
+; CHECK: call void (...) @llvm.objc.clang.arc.use(
 
 define void @runTest() local_unnamed_addr {
   %1 = alloca %0*, align 8
   %2 = alloca %0*, align 8
   %3 = tail call %0* @foo0()
   %4 = bitcast %0* %3 to i8*
-  %5 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %4)
+  %5 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %4)
   store %0* %3, %0** %1, align 8
   call void @foo1(%0** nonnull %1)
   %6 = load %0*, %0** %1, align 8
   %7 = bitcast %0* %6 to i8*
-  %8 = call i8* @objc_retain(i8* %7)
-  call void (...) @clang.arc.use(%0* %3)
-  call void @objc_release(i8* %4)
+  %8 = call i8* @llvm.objc.retain(i8* %7)
+  call void (...) @llvm.objc.clang.arc.use(%0* %3)
+  call void @llvm.objc.release(i8* %4)
   store %0* %6, %0** %2, align 8
   call void @foo1(%0** nonnull %2)
   %9 = load %0*, %0** %2, align 8
   %10 = bitcast %0* %9 to i8*
-  %11 = call i8* @objc_retain(i8* %10)
-  call void (...) @clang.arc.use(%0* %6)
+  %11 = call i8* @llvm.objc.retain(i8* %10)
+  call void (...) @llvm.objc.clang.arc.use(%0* %6)
   %tmp1 = load %0*, %0** %2, align 8
-  call void @objc_release(i8* %7)
+  call void @llvm.objc.release(i8* %7)
   call void @foo2(%0* %9)
-  call void @objc_release(i8* %10)
+  call void @llvm.objc.release(i8* %10)
   ret void
 }
 
@@ -39,7 +39,7 @@
 declare void @foo1(%0**) local_unnamed_addr
 declare void @foo2(%0*) local_unnamed_addr
 
-declare i8* @objc_retainAutoreleasedReturnValue(i8*) local_unnamed_addr
-declare i8* @objc_retain(i8*) local_unnamed_addr
-declare void @clang.arc.use(...) local_unnamed_addr
-declare void @objc_release(i8*) local_unnamed_addr
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*) local_unnamed_addr
+declare i8* @llvm.objc.retain(i8*) local_unnamed_addr
+declare void @llvm.objc.clang.arc.use(...) local_unnamed_addr
+declare void @llvm.objc.release(i8*) local_unnamed_addr
diff --git a/test/Transforms/ObjCARC/comdat-ipo.ll b/test/Transforms/ObjCARC/comdat-ipo.ll
index 0a5713e..44d7b10 100644
--- a/test/Transforms/ObjCARC/comdat-ipo.ll
+++ b/test/Transforms/ObjCARC/comdat-ipo.ll
@@ -30,24 +30,24 @@
 ; CHECK-LABEL: define internal void @_GLOBAL__I_x() {
 define internal void @_GLOBAL__I_x() {
 entry:
-; CHECK:  call i8* @objc_autoreleasePoolPush()
+; CHECK:  call i8* @llvm.objc.autoreleasePoolPush()
 ; CHECK-NEXT:  call void @__cxx_global_var_init()
-; CHECK-NEXT:  call void @objc_autoreleasePoolPop(i8* %0)
+; CHECK-NEXT:  call void @llvm.objc.autoreleasePoolPop(i8* %0)
 ; CHECK-NEXT:  ret void
 
-  %0 = call i8* @objc_autoreleasePoolPush() nounwind
+  %0 = call i8* @llvm.objc.autoreleasePoolPush() nounwind
   call void @__cxx_global_var_init()
-  call void @objc_autoreleasePoolPop(i8* %0) nounwind
+  call void @llvm.objc.autoreleasePoolPop(i8* %0) nounwind
   ret void
 }
 
 define internal void @_GLOBAL__I_y() {
 entry:
-  %0 = call i8* @objc_autoreleasePoolPush() nounwind
+  %0 = call i8* @llvm.objc.autoreleasePoolPush() nounwind
   call void @__dxx_global_var_init()
-  call void @objc_autoreleasePoolPop(i8* %0) nounwind
+  call void @llvm.objc.autoreleasePoolPop(i8* %0) nounwind
   ret void
 }
 
-declare i8* @objc_autoreleasePoolPush()
-declare void @objc_autoreleasePoolPop(i8*)
+declare i8* @llvm.objc.autoreleasePoolPush()
+declare void @llvm.objc.autoreleasePoolPop(i8*)
diff --git a/test/Transforms/ObjCARC/contract-catchswitch.ll b/test/Transforms/ObjCARC/contract-catchswitch.ll
index 3f5dd93..90b6522 100644
--- a/test/Transforms/ObjCARC/contract-catchswitch.ll
+++ b/test/Transforms/ObjCARC/contract-catchswitch.ll
@@ -6,8 +6,8 @@
 %0 = type opaque
 
 declare i32 @__CxxFrameHandler3(...)
-declare dllimport void @objc_release(i8*) local_unnamed_addr
-declare dllimport i8* @objc_retain(i8* returned) local_unnamed_addr
+declare dllimport void @llvm.objc.release(i8*) local_unnamed_addr
+declare dllimport i8* @llvm.objc.retain(i8* returned) local_unnamed_addr
 
 @p = global i8* null, align 4
 
@@ -17,7 +17,7 @@
 entry:
   %tmp = load i8*, i8** @p, align 4
   %cast = bitcast i8* %tmp to %0*
-  %tmp1 = tail call i8* @objc_retain(i8* %tmp) #0
+  %tmp1 = tail call i8* @llvm.objc.retain(i8* %tmp) #0
   ; Split the basic block to ensure bitcast ends up in entry.split.
   br label %entry.split
 
@@ -43,8 +43,8 @@
 invoke.cont:
   %tmp6 = load i8*, i8** @p, align 4
   %cast1 = bitcast i8* %tmp6 to %0*
-  %tmp7 = tail call i8* @objc_retain(i8* %tmp6) #0
-  call void @objc_release(i8* %tmp) #0, !clang.imprecise_release !0
+  %tmp7 = tail call i8* @llvm.objc.retain(i8* %tmp6) #0
+  call void @llvm.objc.release(i8* %tmp) #0, !clang.imprecise_release !0
   ; Split the basic block to ensure bitcast ends up in invoke.cont.split.
   br label %invoke.cont.split
 
@@ -59,7 +59,7 @@
   %tmp8 = phi %0* [ %cast, %catch.dispatch1 ], [ %cast1, %invoke.cont.split ]
   %tmp9 = cleanuppad within none []
   %tmp10 = bitcast %0* %tmp8 to i8*
-  call void @objc_release(i8* %tmp10) #0 [ "funclet"(token %tmp9) ]
+  call void @llvm.objc.release(i8* %tmp10) #0 [ "funclet"(token %tmp9) ]
   cleanupret from %tmp9 unwind to caller
 }
 
diff --git a/test/Transforms/ObjCARC/contract-end-of-use-list.ll b/test/Transforms/ObjCARC/contract-end-of-use-list.ll
index a38cd8a..364d722 100644
--- a/test/Transforms/ObjCARC/contract-end-of-use-list.ll
+++ b/test/Transforms/ObjCARC/contract-end-of-use-list.ll
@@ -10,14 +10,14 @@
 define internal i8* @foo() {
 entry:
   %call = call i8* @bar()
-; CHECK: %retained1 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call)
-  %retained1 = call i8* @objc_retain(i8* %call)
+; CHECK: %retained1 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call)
+  %retained1 = call i8* @llvm.objc.retain(i8* %call)
   %isnull = icmp eq i8* %retained1, null
   br i1 %isnull, label %cleanup, label %if.end
 
 if.end:
-; CHECK: %retained2 = call i8* @objc_retain(i8* %retained1)
-  %retained2 = call i8* @objc_retain(i8* %retained1)
+; CHECK: %retained2 = call i8* @llvm.objc.retain(i8* %retained1)
+  %retained2 = call i8* @llvm.objc.retain(i8* %retained1)
   br label %cleanup
 
 cleanup:
@@ -27,4 +27,4 @@
 
 declare i8* @bar()
 
-declare extern_weak i8* @objc_retain(i8*)
+declare extern_weak i8* @llvm.objc.retain(i8*)
diff --git a/test/Transforms/ObjCARC/contract-marker-funclet.ll b/test/Transforms/ObjCARC/contract-marker-funclet.ll
index 4e116c4..462b24c 100644
--- a/test/Transforms/ObjCARC/contract-marker-funclet.ll
+++ b/test/Transforms/ObjCARC/contract-marker-funclet.ll
@@ -21,22 +21,22 @@
 catch:                                            ; preds = %catch.dispatch
   %1 = catchpad within %0 [i8* null, i32 64, i8* null]
   %call1 = call i8* @"\01?f@@YAPAUobjc_object@@XZ"() [ "funclet"(token %1) ]
-  %2 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call1) [ "funclet"(token %1) ]
-  call void @objc_release(i8* %2) [ "funclet"(token %1) ]
+  %2 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call1) [ "funclet"(token %1) ]
+  call void @llvm.objc.release(i8* %2) [ "funclet"(token %1) ]
   br label %catch.1
 
 catch.1:                                          ; preds = %catch
   %call2 = call i8* @"\01?f@@YAPAUobjc_object@@XZ"() [ "funclet"(token %1) ]
-  %3 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call2) [ "funclet"(token %1) ]
-  call void @objc_release(i8* %3) [ "funclet"(token %1) ]
+  %3 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call2) [ "funclet"(token %1) ]
+  call void @llvm.objc.release(i8* %3) [ "funclet"(token %1) ]
   catchret from %1 to label %catchret.dest
 
 catchret.dest:                                    ; preds = %catch.1
   ret void
 
 invoke.cont:                                      ; preds = %entry
-  %4 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call)
-  call void @objc_release(i8* %4)
+  %4 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call)
+  call void @llvm.objc.release(i8* %4)
   ret void
 }
 
@@ -44,9 +44,9 @@
 
 declare i32 @__CxxFrameHandler3(...)
 
-declare dllimport i8* @objc_retainAutoreleasedReturnValue(i8*)
+declare dllimport i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
 
-declare dllimport void @objc_release(i8*)
+declare dllimport void @llvm.objc.release(i8*)
 
 !clang.arc.retainAutoreleasedReturnValueMarker = !{!0}
 !0 = !{!"movl\09%ebp, %ebp\09\09// marker for objc_retainAutoreleaseReturnValue"}
diff --git a/test/Transforms/ObjCARC/contract-marker.ll b/test/Transforms/ObjCARC/contract-marker.ll
index bf70d4e..6dc93fe 100644
--- a/test/Transforms/ObjCARC/contract-marker.ll
+++ b/test/Transforms/ObjCARC/contract-marker.ll
@@ -4,14 +4,14 @@
 ; CHECK:      %call = tail call i32* @qux()
 ; CHECK-NEXT: %tcall = bitcast i32* %call to i8*
 ; CHECK-NEXT: call void asm sideeffect "mov\09r7, r7\09\09@ marker for return value optimization", ""()
-; CHECK-NEXT: %0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %tcall) [[NUW:#[0-9]+]]
+; CHECK-NEXT: %0 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %tcall) [[NUW:#[0-9]+]]
 ; CHECK: }
 
 define void @foo() {
 entry:
   %call = tail call i32* @qux()
   %tcall = bitcast i32* %call to i8*
-  %0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %tcall) nounwind
+  %0 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %tcall) nounwind
   tail call void @bar(i8* %0)
   ret void
 }
@@ -20,22 +20,22 @@
 ; CHECK:      %call = tail call i32* @qux()
 ; CHECK-NEXT: %tcall = bitcast i32* %call to i8*
 ; CHECK-NEXT: call void asm sideeffect "mov\09r7, r7\09\09@ marker for return value optimization", ""()
-; CHECK-NEXT: %0 = tail call i8* @objc_unsafeClaimAutoreleasedReturnValue(i8* %tcall) [[NUW:#[0-9]+]]
+; CHECK-NEXT: %0 = tail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %tcall) [[NUW:#[0-9]+]]
 ; CHECK: }
 
 define void @foo2() {
 entry:
   %call = tail call i32* @qux()
   %tcall = bitcast i32* %call to i8*
-  %0 = tail call i8* @objc_unsafeClaimAutoreleasedReturnValue(i8* %tcall) nounwind
+  %0 = tail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %tcall) nounwind
   tail call void @bar(i8* %0)
   ret void
 }
 
 
 declare i32* @qux()
-declare i8* @objc_retainAutoreleasedReturnValue(i8*)
-declare i8* @objc_unsafeClaimAutoreleasedReturnValue(i8*)
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
+declare i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8*)
 declare void @bar(i8*)
 
 !clang.arc.retainAutoreleasedReturnValueMarker = !{!0}
diff --git a/test/Transforms/ObjCARC/contract-replace-arg-use.ll b/test/Transforms/ObjCARC/contract-replace-arg-use.ll
index 4cff9f7..28e2f6e 100644
--- a/test/Transforms/ObjCARC/contract-replace-arg-use.ll
+++ b/test/Transforms/ObjCARC/contract-replace-arg-use.ll
@@ -1,20 +1,20 @@
 ; RUN: opt -objc-arc-contract -S < %s | FileCheck %s
 
-declare i8* @objc_autoreleaseReturnValue(i8*)
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
 declare i8* @foo1()
 
 ; Check that ARC contraction replaces the function return with the value
-; returned by @objc_autoreleaseReturnValue.
+; returned by @llvm.objc.autoreleaseReturnValue.
 
 ; CHECK-LABEL: define i32* @autoreleaseRVTailCall(
-; CHECK: %[[V0:[0-9]+]] = tail call i8* @objc_autoreleaseReturnValue(
+; CHECK: %[[V0:[0-9]+]] = tail call i8* @llvm.objc.autoreleaseReturnValue(
 ; CHECK: %[[V1:[0-9]+]] = bitcast i8* %[[V0]] to i32*
 ; CHECK: ret i32* %[[V1]]
 
 define i32* @autoreleaseRVTailCall() {
   %1 = call i8* @foo1()
   %2 = bitcast i8* %1 to i32*
-  %3 = tail call i8* @objc_autoreleaseReturnValue(i8* %1)
+  %3 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %1)
   ret i32* %2
 }
 
@@ -23,7 +23,7 @@
 ; CHECK-LABEL: define i32* @autoreleaseRVTailCallPhi(
 ; CHECK: %[[PHIVAL:.*]] = phi i8* [ %{{.*}}, %bb1 ], [ %{{.*}}, %bb2 ]
 ; CHECK: %[[RETVAL:.*]] = phi i32* [ %{{.*}}, %bb1 ], [ %{{.*}}, %bb2 ]
-; CHECK: %[[V4:.*]] = tail call i8* @objc_autoreleaseReturnValue(i8* %[[PHIVAL]])
+; CHECK: %[[V4:.*]] = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %[[PHIVAL]])
 ; CHECK: %[[V0:.*]] = bitcast i8* %[[V4]] to i32*
 ; CHECK: ret i32* %[[V0]]
 
@@ -41,6 +41,6 @@
 bb3:
   %phival = phi i8* [ %v1, %bb1 ], [ %v3, %bb2 ]
   %retval = phi i32* [ %v0, %bb1 ], [ %v2, %bb2 ]
-  %v4 = tail call i8* @objc_autoreleaseReturnValue(i8* %phival)
+  %v4 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %phival)
   ret i32* %retval
 }
diff --git a/test/Transforms/ObjCARC/contract-storestrong-funclet.ll b/test/Transforms/ObjCARC/contract-storestrong-funclet.ll
index 2155a36..afeab0e 100644
--- a/test/Transforms/ObjCARC/contract-storestrong-funclet.ll
+++ b/test/Transforms/ObjCARC/contract-storestrong-funclet.ll
@@ -2,9 +2,9 @@
 
 declare void @f()
 declare i32 @__CxxFrameHandler3(...)
-declare dllimport i8* @objc_retain(i8*)
-declare dllimport i8* @objc_retainAutoreleasedReturnValue(i8*)
-declare dllimport void @objc_release(i8*)
+declare dllimport i8* @llvm.objc.retain(i8*)
+declare dllimport i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
+declare dllimport void @llvm.objc.release(i8*)
 
 @x = external global i8*
 
@@ -12,26 +12,26 @@
   invoke void @f() to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:
-  %call = tail call i8* @objc_retain(i8* %p) nounwind
+  %call = tail call i8* @llvm.objc.retain(i8* %p) nounwind
   %tmp = load i8*, i8** @x, align 4
   store i8* %call, i8** @x, align 4
-  tail call void @objc_release(i8* %tmp) nounwind
+  tail call void @llvm.objc.release(i8* %tmp) nounwind
   ret void
 
 ehcleanup:
   %1 = cleanuppad within none []
-  %call1 = tail call i8* @objc_retain(i8* %p) nounwind [ "funclet"(token %1) ]
+  %call1 = tail call i8* @llvm.objc.retain(i8* %p) nounwind [ "funclet"(token %1) ]
   %tmp1 = load i8*, i8** @x, align 4
   store i8* %call1, i8** @x, align 4
-  tail call void @objc_release(i8* %tmp1) nounwind [ "funclet"(token %1) ]
+  tail call void @llvm.objc.release(i8* %tmp1) nounwind [ "funclet"(token %1) ]
   cleanupret from %1 unwind to caller
 }
 
 ; CHECK-LABEL: invoke.cont:
-; CHECK: tail call void @objc_storeStrong(i8** @x, i8* %p) #0{{$}}
+; CHECK: tail call void @llvm.objc.storeStrong(i8** @x, i8* %p) #0{{$}}
 ; CHECK: ret void
 
 ; CHECK-LABEL: ehcleanup:
 ; CHECK: %1 = cleanuppad within none []
-; CHECK: tail call void @objc_storeStrong(i8** @x, i8* %p) #0 [ "funclet"(token %1) ]
+; CHECK: tail call void @llvm.objc.storeStrong(i8** @x, i8* %p) #0 [ "funclet"(token %1) ]
 ; CHECK: cleanupret from %1 unwind to caller
diff --git a/test/Transforms/ObjCARC/contract-storestrong-ivar.ll b/test/Transforms/ObjCARC/contract-storestrong-ivar.ll
index 8b1a02f..79db46a 100644
--- a/test/Transforms/ObjCARC/contract-storestrong-ivar.ll
+++ b/test/Transforms/ObjCARC/contract-storestrong-ivar.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -objc-arc-contract -S < %s | FileCheck %s
 
-; CHECK: tail call void @objc_storeStrong(i8**
+; CHECK: tail call void @llvm.objc.storeStrong(i8**
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-apple-darwin11.0.0"
@@ -10,9 +10,9 @@
 
 @"OBJC_IVAR_$_Controller.preferencesController" = external global i64, section "__DATA, __objc_const", align 8
 
-declare i8* @objc_retain(i8*)
+declare i8* @llvm.objc.retain(i8*)
 
-declare void @objc_release(i8*)
+declare void @llvm.objc.release(i8*)
 
 define hidden void @y(%0* nocapture %self, %1* %preferencesController) nounwind {
 entry:
@@ -22,9 +22,9 @@
   %tmp1 = bitcast i8* %add.ptr to %1**
   %tmp2 = load %1*, %1** %tmp1, align 8
   %tmp3 = bitcast %1* %preferencesController to i8*
-  %tmp4 = tail call i8* @objc_retain(i8* %tmp3) nounwind
+  %tmp4 = tail call i8* @llvm.objc.retain(i8* %tmp3) nounwind
   %tmp5 = bitcast %1* %tmp2 to i8*
-  tail call void @objc_release(i8* %tmp5) nounwind
+  tail call void @llvm.objc.release(i8* %tmp5) nounwind
   %tmp6 = bitcast i8* %tmp4 to %1*
   store %1* %tmp6, %1** %tmp1, align 8
   ret void
diff --git a/test/Transforms/ObjCARC/contract-storestrong.ll b/test/Transforms/ObjCARC/contract-storestrong.ll
index a02f7b7..eff0a6f 100644
--- a/test/Transforms/ObjCARC/contract-storestrong.ll
+++ b/test/Transforms/ObjCARC/contract-storestrong.ll
@@ -2,23 +2,23 @@
 
 target datalayout = "e-p:64:64:64"
 
-declare i8* @objc_retain(i8*)
-declare void @objc_release(i8*)
+declare i8* @llvm.objc.retain(i8*)
+declare void @llvm.objc.release(i8*)
 declare void @use_pointer(i8*)
 
 @x = external global i8*
 
 ; CHECK-LABEL: define void @test0(
 ; CHECK: entry:
-; CHECK-NEXT: tail call void @objc_storeStrong(i8** @x, i8* %p) [[NUW:#[0-9]+]]
+; CHECK-NEXT: tail call void @llvm.objc.storeStrong(i8** @x, i8* %p) [[NUW:#[0-9]+]]
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test0(i8* %p) {
 entry:
-  %0 = tail call i8* @objc_retain(i8* %p) nounwind
+  %0 = tail call i8* @llvm.objc.retain(i8* %p) nounwind
   %tmp = load i8*, i8** @x, align 8
   store i8* %0, i8** @x, align 8
-  tail call void @objc_release(i8* %tmp) nounwind
+  tail call void @llvm.objc.release(i8* %tmp) nounwind
   ret void
 }
 
@@ -26,18 +26,18 @@
 
 ; CHECK-LABEL: define void @test1(i8* %p) {
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   %0 = tail call i8* @objc_retain(i8* %p) [[NUW]]
+; CHECK-NEXT:   %0 = tail call i8* @llvm.objc.retain(i8* %p) [[NUW]]
 ; CHECK-NEXT:   %tmp = load volatile i8*, i8** @x, align 8
 ; CHECK-NEXT:   store i8* %0, i8** @x, align 8
-; CHECK-NEXT:   tail call void @objc_release(i8* %tmp) [[NUW]]
+; CHECK-NEXT:   tail call void @llvm.objc.release(i8* %tmp) [[NUW]]
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test1(i8* %p) {
 entry:
-  %0 = tail call i8* @objc_retain(i8* %p) nounwind
+  %0 = tail call i8* @llvm.objc.retain(i8* %p) nounwind
   %tmp = load volatile i8*, i8** @x, align 8
   store i8* %0, i8** @x, align 8
-  tail call void @objc_release(i8* %tmp) nounwind
+  tail call void @llvm.objc.release(i8* %tmp) nounwind
   ret void
 }
 
@@ -45,18 +45,18 @@
 
 ; CHECK-LABEL: define void @test2(i8* %p) {
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   %0 = tail call i8* @objc_retain(i8* %p) [[NUW]]
+; CHECK-NEXT:   %0 = tail call i8* @llvm.objc.retain(i8* %p) [[NUW]]
 ; CHECK-NEXT:   %tmp = load i8*, i8** @x, align 8
 ; CHECK-NEXT:   store volatile i8* %0, i8** @x, align 8
-; CHECK-NEXT:   tail call void @objc_release(i8* %tmp) [[NUW]]
+; CHECK-NEXT:   tail call void @llvm.objc.release(i8* %tmp) [[NUW]]
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test2(i8* %p) {
 entry:
-  %0 = tail call i8* @objc_retain(i8* %p) nounwind
+  %0 = tail call i8* @llvm.objc.retain(i8* %p) nounwind
   %tmp = load i8*, i8** @x, align 8
   store volatile i8* %0, i8** @x, align 8
-  tail call void @objc_release(i8* %tmp) nounwind
+  tail call void @llvm.objc.release(i8* %tmp) nounwind
   ret void
 }
 
@@ -65,20 +65,20 @@
 
 ; CHECK-LABEL: define void @test3(i8* %newValue) {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    %x0 = tail call i8* @objc_retain(i8* %newValue) [[NUW]]
+; CHECK-NEXT:    %x0 = tail call i8* @llvm.objc.retain(i8* %newValue) [[NUW]]
 ; CHECK-NEXT:    %x1 = load i8*, i8** @x, align 8
 ; CHECK-NEXT:    store i8* %x0, i8** @x, align 8
 ; CHECK-NEXT:    tail call void @use_pointer(i8* %x1), !clang.arc.no_objc_arc_exceptions !0
-; CHECK-NEXT:    tail call void @objc_release(i8* %x1) [[NUW]], !clang.imprecise_release !0
+; CHECK-NEXT:    tail call void @llvm.objc.release(i8* %x1) [[NUW]], !clang.imprecise_release !0
 ; CHECK-NEXT:    ret void
 ; CHECK-NEXT:  }
 define void @test3(i8* %newValue) {
 entry:
-  %x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
+  %x0 = tail call i8* @llvm.objc.retain(i8* %newValue) nounwind
   %x1 = load i8*, i8** @x, align 8
   store i8* %newValue, i8** @x, align 8
   tail call void @use_pointer(i8* %x1), !clang.arc.no_objc_arc_exceptions !0
-  tail call void @objc_release(i8* %x1) nounwind, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %x1) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -86,36 +86,36 @@
 
 ; CHECK-LABEL:  define i1 @test4(i8* %newValue, i8* %foo) {
 ; CHECK-NEXT:   entry:
-; CHECK-NEXT:     %x0 = tail call i8* @objc_retain(i8* %newValue) [[NUW]]
+; CHECK-NEXT:     %x0 = tail call i8* @llvm.objc.retain(i8* %newValue) [[NUW]]
 ; CHECK-NEXT:     %x1 = load i8*, i8** @x, align 8
 ; CHECK-NEXT:     store i8* %x0, i8** @x, align 8
 ; CHECK-NEXT:     %t = icmp eq i8* %x1, %foo
-; CHECK-NEXT:     tail call void @objc_release(i8* %x1) [[NUW]], !clang.imprecise_release !0
+; CHECK-NEXT:     tail call void @llvm.objc.release(i8* %x1) [[NUW]], !clang.imprecise_release !0
 ; CHECK-NEXT:     ret i1 %t
 ; CHECK-NEXT:   }
 define i1 @test4(i8* %newValue, i8* %foo) {
 entry:
-  %x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
+  %x0 = tail call i8* @llvm.objc.retain(i8* %newValue) nounwind
   %x1 = load i8*, i8** @x, align 8
   store i8* %newValue, i8** @x, align 8
   %t = icmp eq i8* %x1, %foo
-  tail call void @objc_release(i8* %x1) nounwind, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %x1) nounwind, !clang.imprecise_release !0
   ret i1 %t
 }
 
-; Do form an objc_storeStrong here, because the use is before the store.
+; Do form an llvm.objc.storeStrong here, because the use is before the store.
 
 ; CHECK-LABEL: define i1 @test5(i8* %newValue, i8* %foo) {
 ; CHECK: %t = icmp eq i8* %x1, %foo
-; CHECK: tail call void @objc_storeStrong(i8** @x, i8* %newValue) [[NUW]]
+; CHECK: tail call void @llvm.objc.storeStrong(i8** @x, i8* %newValue) [[NUW]]
 ; CHECK: }
 define i1 @test5(i8* %newValue, i8* %foo) {
 entry:
-  %x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
+  %x0 = tail call i8* @llvm.objc.retain(i8* %newValue) nounwind
   %x1 = load i8*, i8** @x, align 8
   %t = icmp eq i8* %x1, %foo
   store i8* %newValue, i8** @x, align 8
-  tail call void @objc_release(i8* %x1) nounwind, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %x1) nounwind, !clang.imprecise_release !0
   ret i1 %t
 }
 
@@ -123,49 +123,49 @@
 
 ; CHECK-LABEL: define i1 @test6(i8* %newValue, i8* %foo) {
 ; CHECK: %t = icmp eq i8* %x1, %foo
-; CHECK: tail call void @objc_storeStrong(i8** @x, i8* %newValue) [[NUW]]
+; CHECK: tail call void @llvm.objc.storeStrong(i8** @x, i8* %newValue) [[NUW]]
 ; CHECK: }
 define i1 @test6(i8* %newValue, i8* %foo) {
 entry:
-  %x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
+  %x0 = tail call i8* @llvm.objc.retain(i8* %newValue) nounwind
   %x1 = load i8*, i8** @x, align 8
-  tail call void @objc_release(i8* %x1) nounwind, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %x1) nounwind, !clang.imprecise_release !0
   %t = icmp eq i8* %x1, %foo
   store i8* %newValue, i8** @x, align 8
   ret i1 %t
 }
 
-; Like test0, but there's no store, so don't form an objc_storeStrong.
+; Like test0, but there's no store, so don't form an llvm.objc.storeStrong.
 
 ; CHECK-LABEL: define void @test7(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   %0 = tail call i8* @objc_retain(i8* %p) [[NUW]]
+; CHECK-NEXT:   %0 = tail call i8* @llvm.objc.retain(i8* %p) [[NUW]]
 ; CHECK-NEXT:   %tmp = load i8*, i8** @x, align 8
-; CHECK-NEXT:   tail call void @objc_release(i8* %tmp) [[NUW]]
+; CHECK-NEXT:   tail call void @llvm.objc.release(i8* %tmp) [[NUW]]
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test7(i8* %p) {
 entry:
-  %0 = tail call i8* @objc_retain(i8* %p) nounwind
+  %0 = tail call i8* @llvm.objc.retain(i8* %p) nounwind
   %tmp = load i8*, i8** @x, align 8
-  tail call void @objc_release(i8* %tmp) nounwind
+  tail call void @llvm.objc.release(i8* %tmp) nounwind
   ret void
 }
 
-; Like test0, but there's no retain, so don't form an objc_storeStrong.
+; Like test0, but there's no retain, so don't form an llvm.objc.storeStrong.
 
 ; CHECK-LABEL: define void @test8(
 ; CHECK-NEXT: entry:
 ; CHECK-NEXT:   %tmp = load i8*, i8** @x, align 8
 ; CHECK-NEXT:   store i8* %p, i8** @x, align 8
-; CHECK-NEXT:   tail call void @objc_release(i8* %tmp) [[NUW]]
+; CHECK-NEXT:   tail call void @llvm.objc.release(i8* %tmp) [[NUW]]
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test8(i8* %p) {
 entry:
   %tmp = load i8*, i8** @x, align 8
   store i8* %p, i8** @x, align 8
-  tail call void @objc_release(i8* %tmp) nounwind
+  tail call void @llvm.objc.release(i8* %tmp) nounwind
   ret void
 }
 
@@ -176,13 +176,13 @@
 ; pointer.
 ;
 ; CHECK-LABEL: define i1 @test9(i8* %newValue, i8* %foo, i8* %unrelated_ptr) {
-; CHECK-NOT: objc_storeStrong
+; CHECK-NOT: llvm.objc.storeStrong
 define i1 @test9(i8* %newValue, i8* %foo, i8* %unrelated_ptr) {
 entry:
-  %x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
-  tail call void @objc_release(i8* %unrelated_ptr) nounwind, !clang.imprecise_release !0
+  %x0 = tail call i8* @llvm.objc.retain(i8* %newValue) nounwind
+  tail call void @llvm.objc.release(i8* %unrelated_ptr) nounwind, !clang.imprecise_release !0
   %x1 = load i8*, i8** @x, align 8
-  tail call void @objc_release(i8* %x1) nounwind, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %x1) nounwind, !clang.imprecise_release !0
   %t = icmp eq i8* %x1, %foo
   store i8* %newValue, i8** @x, align 8
   ret i1 %t  
@@ -191,13 +191,13 @@
 ; Make sure that we don't perform the optimization when we just have a call.
 ;
 ; CHECK-LABEL: define i1 @test10(i8* %newValue, i8* %foo, i8* %unrelated_ptr) {
-; CHECK-NOT: objc_storeStrong
+; CHECK-NOT: llvm.objc.storeStrong
 define i1 @test10(i8* %newValue, i8* %foo, i8* %unrelated_ptr) {
 entry:
-  %x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
+  %x0 = tail call i8* @llvm.objc.retain(i8* %newValue) nounwind
   call void @use_pointer(i8* %unrelated_ptr)
   %x1 = load i8*, i8** @x, align 8
-  tail call void @objc_release(i8* %x1) nounwind, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %x1) nounwind, !clang.imprecise_release !0
   %t = icmp eq i8* %x1, %foo
   store i8* %newValue, i8** @x, align 8
   ret i1 %t
@@ -206,13 +206,13 @@
 ; Make sure we form the store strong if the use in between the retain
 ; and the store does not touch reference counts.
 ; CHECK-LABEL: define i1 @test11(i8* %newValue, i8* %foo, i8* %unrelated_ptr) {
-; CHECK: objc_storeStrong
+; CHECK: llvm.objc.storeStrong
 define i1 @test11(i8* %newValue, i8* %foo, i8* %unrelated_ptr) {
 entry:
-  %x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
+  %x0 = tail call i8* @llvm.objc.retain(i8* %newValue) nounwind
   %t = icmp eq i8* %newValue, %foo
   %x1 = load i8*, i8** @x, align 8
-  tail call void @objc_release(i8* %x1) nounwind, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %x1) nounwind, !clang.imprecise_release !0
   store i8* %newValue, i8** @x, align 8
   ret i1 %t
 }
@@ -227,31 +227,31 @@
 ; CHECK-NEXT: %p32 = bitcast i8** @x to i32**
 ; CHECK-NEXT: %v32 = bitcast i8* %p to i32*
 ; CHECK-NEXT: %0 = bitcast i16** %p16 to i8**
-; CHECK-NEXT: tail call void @objc_storeStrong(i8** %0, i8* %p)
+; CHECK-NEXT: tail call void @llvm.objc.storeStrong(i8** %0, i8* %p)
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test12(i8* %p) {
 entry:
-  %retain = tail call i8* @objc_retain(i8* %p) nounwind
+  %retain = tail call i8* @llvm.objc.retain(i8* %p) nounwind
   %p16 = bitcast i8** @x to i16**
   %tmp16 = load i16*, i16** %p16, align 8
   %tmp8 = bitcast i16* %tmp16 to i8*
   %p32 = bitcast i8** @x to i32**
   %v32 = bitcast i8* %retain to i32*
   store i32* %v32, i32** %p32, align 8
-  tail call void @objc_release(i8* %tmp8) nounwind
+  tail call void @llvm.objc.release(i8* %tmp8) nounwind
   ret void
 }
 
 ; This used to crash.
 ; CHECK-LABEL: define i8* @test13(
-; CHECK: tail call void @objc_storeStrong(i8** %{{.*}}, i8* %[[NEW:.*]])
+; CHECK: tail call void @llvm.objc.storeStrong(i8** %{{.*}}, i8* %[[NEW:.*]])
 ; CHECK-NEXT: ret i8* %[[NEW]]
 
 define i8* @test13(i8* %a0, i8* %a1, i8** %addr, i8* %new) {
   %old = load i8*, i8** %addr, align 8
-  call void @objc_release(i8* %old)
-  %retained = call i8* @objc_retain(i8* %new)
+  call void @llvm.objc.release(i8* %old)
+  %retained = call i8* @llvm.objc.retain(i8* %new)
   store i8* %retained, i8** %addr, align 8
   ret i8* %retained
 }
diff --git a/test/Transforms/ObjCARC/contract-testcases.ll b/test/Transforms/ObjCARC/contract-testcases.ll
index e6d34a9..5715735 100644
--- a/test/Transforms/ObjCARC/contract-testcases.ll
+++ b/test/Transforms/ObjCARC/contract-testcases.ll
@@ -7,13 +7,13 @@
 %4 = type opaque
 
 declare %0* @"\01-[NSAttributedString(Terminal) pathAtIndex:effectiveRange:]"(%1*, i8* nocapture, i64, %2*) optsize
-declare i8* @objc_retainAutoreleasedReturnValue(i8*)
-declare i8* @objc_msgSend_fixup(i8*, i8*, ...)
-declare i8* @objc_msgSend(i8*, i8*, ...)
-declare void @objc_release(i8*)
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
+declare i8* @llvm.objc.msgSend_fixup(i8*, i8*, ...)
+declare i8* @llvm.objc.msgSend(i8*, i8*, ...)
+declare void @llvm.objc.release(i8*)
 declare %2 @NSUnionRange(i64, i64, i64, i64) optsize
-declare i8* @objc_autoreleaseReturnValue(i8*)
-declare i8* @objc_autorelease(i8*)
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
+declare i8* @llvm.objc.autorelease(i8*)
 declare i32 @__gxx_personality_sj0(...)
 
 ; Don't get in trouble on bugpointed code.
@@ -22,7 +22,7 @@
 define void @test0() {
 bb:
   %tmp = bitcast %4* undef to i8*
-  %tmp1 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %tmp) nounwind
+  %tmp1 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %tmp) nounwind
   br label %bb3
 
 bb3:                                              ; preds = %bb2
@@ -53,9 +53,9 @@
 ; CHECK: }
 define void @test1() {
 bb:
-  %tmp = tail call %0* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %0* ()*)()
+  %tmp = tail call %0* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %0* ()*)()
   %tmp2 = bitcast %0* %tmp to i8*
-  %tmp3 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %tmp2) nounwind
+  %tmp3 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %tmp2) nounwind
   br i1 undef, label %bb7, label %bb7
 
 bb7:                                              ; preds = %bb6, %bb6, %bb5
@@ -70,15 +70,15 @@
 ; CHECK: define void @_Z6doTestP8NSString() personality i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*) {
 ; CHECK: invoke.cont:                                      ; preds = %entry
 ; CHECK-NEXT: call void asm sideeffect "mov\09r7, r7\09\09@ marker for objc_retainAutoreleaseReturnValue", ""()
-; CHECK-NEXT: %tmp = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call) [[NUW:#[0-9]+]]
+; CHECK-NEXT: %tmp = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) [[NUW:#[0-9]+]]
 ; CHECK: }
 define void @_Z6doTestP8NSString() personality i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*) {
 entry:
-  %call = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* ()*)()
+  %call = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* ()*)()
           to label %invoke.cont unwind label %lpad
 
 invoke.cont:                                      ; preds = %entry
-  %tmp = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
+  %tmp = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
   unreachable
 
 lpad:                                             ; preds = %entry
diff --git a/test/Transforms/ObjCARC/contract.ll b/test/Transforms/ObjCARC/contract.ll
index 6ad46f2..7cf3f5e 100644
--- a/test/Transforms/ObjCARC/contract.ll
+++ b/test/Transforms/ObjCARC/contract.ll
@@ -2,11 +2,11 @@
 
 target datalayout = "e-p:64:64:64"
 
-declare i8* @objc_retain(i8*)
-declare void @objc_release(i8*)
-declare i8* @objc_autorelease(i8*)
-declare i8* @objc_autoreleaseReturnValue(i8*)
-declare i8* @objc_retainAutoreleasedReturnValue(i8*)
+declare i8* @llvm.objc.retain(i8*)
+declare void @llvm.objc.release(i8*)
+declare i8* @llvm.objc.autorelease(i8*)
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
 
 declare void @use_pointer(i8*)
 declare i8* @returner()
@@ -17,7 +17,7 @@
 ; CHECK: }
 define void @test0(i8* %x) nounwind {
 entry:
-  %0 = call i8* @objc_retain(i8* %x) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %x)
   ret void
 }
@@ -27,7 +27,7 @@
 ; CHECK: }
 define void @test1(i8* %x) nounwind {
 entry:
-  %0 = call i8* @objc_autorelease(i8* %x) nounwind
+  %0 = call i8* @llvm.objc.autorelease(i8* %x) nounwind
   call void @use_pointer(i8* %x)
   ret void
 }
@@ -35,12 +35,12 @@
 ; Merge objc_retain and objc_autorelease into objc_retainAutorelease.
 
 ; CHECK-LABEL: define void @test2(
-; CHECK: tail call i8* @objc_retainAutorelease(i8* %x) [[NUW:#[0-9]+]]
+; CHECK: tail call i8* @llvm.objc.retainAutorelease(i8* %x) [[NUW:#[0-9]+]]
 ; CHECK: }
 define void @test2(i8* %x) nounwind {
 entry:
-  %0 = tail call i8* @objc_retain(i8* %x) nounwind
-  call i8* @objc_autorelease(i8* %0) nounwind
+  %0 = tail call i8* @llvm.objc.retain(i8* %x) nounwind
+  call i8* @llvm.objc.autorelease(i8* %0) nounwind
   call void @use_pointer(i8* %x)
   ret void
 }
@@ -48,26 +48,26 @@
 ; Same as test2 but the value is returned. Do an RV optimization.
 
 ; CHECK-LABEL: define i8* @test2b(
-; CHECK: tail call i8* @objc_retainAutoreleaseReturnValue(i8* %x) [[NUW]]
+; CHECK: tail call i8* @llvm.objc.retainAutoreleaseReturnValue(i8* %x) [[NUW]]
 ; CHECK: }
 define i8* @test2b(i8* %x) nounwind {
 entry:
-  %0 = tail call i8* @objc_retain(i8* %x) nounwind
-  tail call i8* @objc_autoreleaseReturnValue(i8* %0) nounwind
+  %0 = tail call i8* @llvm.objc.retain(i8* %x) nounwind
+  tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %0) nounwind
   ret i8* %x
 }
 
 ; Merge a retain,autorelease pair around a call.
 
 ; CHECK-LABEL: define void @test3(
-; CHECK: tail call i8* @objc_retainAutorelease(i8* %x) [[NUW]]
+; CHECK: tail call i8* @llvm.objc.retainAutorelease(i8* %x) [[NUW]]
 ; CHECK: @use_pointer(i8* %0)
 ; CHECK: }
 define void @test3(i8* %x, i64 %n) {
 entry:
-  tail call i8* @objc_retain(i8* %x) nounwind
+  tail call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %x)
-  call i8* @objc_autorelease(i8* %x) nounwind
+  call i8* @llvm.objc.autorelease(i8* %x) nounwind
   ret void
 }
 
@@ -76,34 +76,34 @@
 
 ; CHECK-LABEL: define void @test4(
 ; CHECK-NEXT: entry:
-; CHECK-NEXT: @objc_retainAutorelease(i8* %x) [[NUW]]
+; CHECK-NEXT: @llvm.objc.retainAutorelease(i8* %x) [[NUW]]
 ; CHECK-NEXT: @use_pointer
-; CHECK-NEXT: @objc_release
+; CHECK-NEXT: @llvm.objc.release
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test4(i8* %x, i64 %n) {
 entry:
-  tail call i8* @objc_retain(i8* %x) nounwind
+  tail call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %x)
-  call i8* @objc_autorelease(i8* %x) nounwind
-  tail call void @objc_release(i8* %x) nounwind
+  call i8* @llvm.objc.autorelease(i8* %x) nounwind
+  tail call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
 ; Don't merge retain and autorelease if they're not control-equivalent.
 
 ; CHECK-LABEL: define void @test5(
-; CHECK: tail call i8* @objc_retain(i8* %p) [[NUW]]
+; CHECK: tail call i8* @llvm.objc.retain(i8* %p) [[NUW]]
 ; CHECK: true:
-; CHECK: call i8* @objc_autorelease(i8* %0) [[NUW]]
+; CHECK: call i8* @llvm.objc.autorelease(i8* %0) [[NUW]]
 ; CHECK: }
 define void @test5(i8* %p, i1 %a) {
 entry:
-  tail call i8* @objc_retain(i8* %p) nounwind
+  tail call i8* @llvm.objc.retain(i8* %p) nounwind
   br i1 %a, label %true, label %false
 
 true:
-  call i8* @objc_autorelease(i8* %p) nounwind
+  call i8* @llvm.objc.autorelease(i8* %p) nounwind
   call void @use_pointer(i8* %p)
   ret void
 
@@ -120,13 +120,13 @@
 ; Those entrypoints don't exist yet though.
 
 ; CHECK-LABEL: define i8* @test6(
-; CHECK: call i8* @objc_retainAutoreleasedReturnValue(i8* %p) [[NUW]]
-; CHECK: %t = tail call i8* @objc_autoreleaseReturnValue(i8* %1) [[NUW]]
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %p) [[NUW]]
+; CHECK: %t = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %1) [[NUW]]
 ; CHECK: }
 define i8* @test6() {
   %p = call i8* @returner()
-  tail call i8* @objc_retainAutoreleasedReturnValue(i8* %p) nounwind
-  %t = tail call i8* @objc_autoreleaseReturnValue(i8* %p) nounwind
+  tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %p) nounwind
+  %t = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %p) nounwind
   call void @use_pointer(i8* %t)
   ret i8* %t
 }
@@ -134,15 +134,15 @@
 ; Don't spoil the RV optimization.
 
 ; CHECK: define i8* @test7(i8* %p)
-; CHECK: tail call i8* @objc_retain(i8* %p)
+; CHECK: tail call i8* @llvm.objc.retain(i8* %p)
 ; CHECK: call void @use_pointer(i8* %1)
-; CHECK: tail call i8* @objc_autoreleaseReturnValue(i8* %1)
+; CHECK: tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %1)
 ; CHECK: ret i8* %2
 ; CHECK-NEXT: }
 define i8* @test7(i8* %p) {
-  %1 = tail call i8* @objc_retain(i8* %p)
+  %1 = tail call i8* @llvm.objc.retain(i8* %p)
   call void @use_pointer(i8* %p)
-  %2 = tail call i8* @objc_autoreleaseReturnValue(i8* %p)
+  %2 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %p)
   ret i8* %p
 }
 
@@ -156,7 +156,7 @@
   br i1 %x, label %return, label %if.then
 
 if.then:                                          ; preds = %entry
-  %p = call i8* @objc_retain(i8* %c) nounwind
+  %p = call i8* @llvm.objc.retain(i8* %c) nounwind
   br label %return
 
 return:                                           ; preds = %if.then, %entry
@@ -164,12 +164,12 @@
   ret i8* %retval
 }
 
-; Kill calls to @clang.arc.use(...)
+; Kill calls to @llvm.objc.clang.arc.use(...)
 ; CHECK-LABEL: define void @test9(
 ; CHECK-NOT: clang.arc.use
 ; CHECK: }
 define void @test9(i8* %a, i8* %b) {
-  call void (...) @clang.arc.use(i8* %a, i8* %b) nounwind
+  call void (...) @llvm.objc.clang.arc.use(i8* %a, i8* %b) nounwind
   ret void
 }
 
@@ -178,10 +178,10 @@
 ; is a return value.
 
 ; CHECK: define void @test10()
-; CHECK: tail call i8* @objc_retainAutoreleasedReturnValue(i8* %p)
+; CHECK: tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %p)
 define void @test10() {
   %p = call i8* @returner()
-  tail call i8* @objc_retain(i8* %p) nounwind
+  tail call i8* @llvm.objc.retain(i8* %p) nounwind
   ret void
 }
 
@@ -190,11 +190,11 @@
 
 ; CHECK-LABEL: define void @test11(
 ; CHECK-NEXT: %y = call i8* @returner()
-; CHECK-NEXT: tail call i8* @objc_retainAutoreleasedReturnValue(i8* %y) [[NUW]]
+; CHECK-NEXT: tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %y) [[NUW]]
 ; CHECK-NEXT: ret void
 define void @test11() {
   %y = call i8* @returner()
-  tail call i8* @objc_retain(i8* %y) nounwind
+  tail call i8* @llvm.objc.retain(i8* %y) nounwind
   ret void
 }
 
@@ -202,11 +202,11 @@
 ; argument is not a return value.
 
 ; CHECK-LABEL: define void @test12(
-; CHECK-NEXT: tail call i8* @objc_retain(i8* %y) [[NUW]]
+; CHECK-NEXT: tail call i8* @llvm.objc.retain(i8* %y) [[NUW]]
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test12(i8* %y) {
-  tail call i8* @objc_retain(i8* %y) nounwind
+  tail call i8* @llvm.objc.retain(i8* %y) nounwind
   ret void
 }
 
@@ -216,17 +216,17 @@
 ; CHECK-LABEL: define void @test13(
 ; CHECK-NEXT: %y = call i8* @returner()
 ; CHECK-NEXT: call void @callee()
-; CHECK-NEXT: tail call i8* @objc_retain(i8* %y) [[NUW]]
+; CHECK-NEXT: tail call i8* @llvm.objc.retain(i8* %y) [[NUW]]
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test13() {
   %y = call i8* @returner()
   call void @callee()
-  tail call i8* @objc_retain(i8* %y) nounwind
+  tail call i8* @llvm.objc.retain(i8* %y) nounwind
   ret void
 }
 
 
-declare void @clang.arc.use(...) nounwind
+declare void @llvm.objc.clang.arc.use(...) nounwind
 
 ; CHECK: attributes [[NUW]] = { nounwind }
diff --git a/test/Transforms/ObjCARC/empty-block.ll b/test/Transforms/ObjCARC/empty-block.ll
index cc82d10..68372e7 100644
--- a/test/Transforms/ObjCARC/empty-block.ll
+++ b/test/Transforms/ObjCARC/empty-block.ll
@@ -3,33 +3,33 @@
 
 %0 = type opaque
 
-declare i8* @objc_retain(i8*)
+declare i8* @llvm.objc.retain(i8*)
 
-declare void @objc_release(i8*)
+declare void @llvm.objc.release(i8*)
 
-declare i8* @objc_autoreleaseReturnValue(i8*)
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
 
 ; Don't delete the autorelease.
 
 ; CHECK-LABEL: define %0* @test0(
-; CHECK:   @objc_retain
+; CHECK:   @llvm.objc.retain
 ; CHECK: .lr.ph:
-; CHECK-NOT: @objc_r
-; CHECK: @objc_autoreleaseReturnValue
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.r
+; CHECK: @llvm.objc.autoreleaseReturnValue
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define %0* @test0(%0* %buffer) nounwind {
   %1 = bitcast %0* %buffer to i8*
-  %2 = tail call i8* @objc_retain(i8* %1) nounwind
+  %2 = tail call i8* @llvm.objc.retain(i8* %1) nounwind
   br i1 undef, label %.lr.ph, label %._crit_edge
 
 .lr.ph:                                           ; preds = %.lr.ph, %0
   br i1 false, label %.lr.ph, label %._crit_edge
 
 ._crit_edge:                                      ; preds = %.lr.ph, %0
-  %3 = tail call i8* @objc_retain(i8* %1) nounwind
-  tail call void @objc_release(i8* %1) nounwind, !clang.imprecise_release !0
-  %4 = tail call i8* @objc_autoreleaseReturnValue(i8* %1) nounwind
+  %3 = tail call i8* @llvm.objc.retain(i8* %1) nounwind
+  tail call void @llvm.objc.release(i8* %1) nounwind, !clang.imprecise_release !0
+  %4 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %1) nounwind
   ret %0* %buffer
 }
 
@@ -41,16 +41,16 @@
 define %0* @test1() nounwind {
   %buffer = call %0* @foo()
   %1 = bitcast %0* %buffer to i8*
-  %2 = tail call i8* @objc_retain(i8* %1) nounwind
+  %2 = tail call i8* @llvm.objc.retain(i8* %1) nounwind
   br i1 undef, label %.lr.ph, label %._crit_edge
 
 .lr.ph:                                           ; preds = %.lr.ph, %0
   br i1 false, label %.lr.ph, label %._crit_edge
 
 ._crit_edge:                                      ; preds = %.lr.ph, %0
-  %3 = tail call i8* @objc_retain(i8* %1) nounwind
-  tail call void @objc_release(i8* %1) nounwind, !clang.imprecise_release !0
-  %4 = tail call i8* @objc_autoreleaseReturnValue(i8* %1) nounwind
+  %3 = tail call i8* @llvm.objc.retain(i8* %1) nounwind
+  tail call void @llvm.objc.release(i8* %1) nounwind, !clang.imprecise_release !0
+  %4 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %1) nounwind
   ret %0* %buffer
 }
 
diff --git a/test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll b/test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll
index be351be..589cb7b 100644
--- a/test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll
+++ b/test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll
@@ -39,66 +39,66 @@
   %tmp = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_", align 8, !dbg !37
   %tmp1 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8, !dbg !37, !invariant.load !38
   %tmp2 = bitcast %struct._class_t* %tmp to i8*, !dbg !37
-; CHECK: call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp2, i8* %tmp1)
-  %call = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp2, i8* %tmp1), !dbg !37, !clang.arc.no_objc_arc_exceptions !38
+; CHECK: call i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* %tmp2, i8* %tmp1)
+  %call = call i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* %tmp2, i8* %tmp1), !dbg !37, !clang.arc.no_objc_arc_exceptions !38
   call void @llvm.dbg.value(metadata i8* %call, metadata !25, metadata !DIExpression()), !dbg !37
-; CHECK: call i8* @objc_retain(i8* %call) [[NUW:#[0-9]+]]
-  %tmp3 = call i8* @objc_retain(i8* %call) nounwind, !dbg !39
+; CHECK: call i8* @llvm.objc.retain(i8* %call) [[NUW:#[0-9]+]]
+  %tmp3 = call i8* @llvm.objc.retain(i8* %call) nounwind, !dbg !39
   call void @llvm.dbg.value(metadata i8* %call, metadata !25, metadata !DIExpression()), !dbg !39
   invoke fastcc void @ThrowFunc(i8* %call)
           to label %eh.cont unwind label %lpad, !dbg !40, !clang.arc.no_objc_arc_exceptions !38
 
 eh.cont:                                          ; preds = %entry
-; CHECK: call void @objc_release(i8* %call)
-  call void @objc_release(i8* %call) nounwind, !dbg !42, !clang.imprecise_release !38
+; CHECK: call void @llvm.objc.release(i8* %call)
+  call void @llvm.objc.release(i8* %call) nounwind, !dbg !42, !clang.imprecise_release !38
   br label %if.end, !dbg !43
 
 lpad:                                             ; preds = %entry
   %tmp4 = landingpad { i8*, i32 }
           catch i8* null, !dbg !40
   %tmp5 = extractvalue { i8*, i32 } %tmp4, 0, !dbg !40
-  %exn.adjusted = call i8* @objc_begin_catch(i8* %tmp5) nounwind, !dbg !44
+  %exn.adjusted = call i8* @llvm.objc.begin_catch(i8* %tmp5) nounwind, !dbg !44
   call void @llvm.dbg.value(metadata i8 0, metadata !21, metadata !DIExpression()), !dbg !46
-  call void @objc_end_catch(), !dbg !49, !clang.arc.no_objc_arc_exceptions !38
-; CHECK: call void @objc_release(i8* %call)
-  call void @objc_release(i8* %call) nounwind, !dbg !42, !clang.imprecise_release !38
+  call void @llvm.objc.end_catch(), !dbg !49, !clang.arc.no_objc_arc_exceptions !38
+; CHECK: call void @llvm.objc.release(i8* %call)
+  call void @llvm.objc.release(i8* %call) nounwind, !dbg !42, !clang.imprecise_release !38
   call void (i8*, ...) @NSLog(i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring_ to i8*), i8* %call), !dbg !50, !clang.arc.no_objc_arc_exceptions !38
   br label %if.end, !dbg !52
 
 if.end:                                           ; preds = %lpad, %eh.cont
   call void (i8*, ...) @NSLog(i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring_ to i8*), i8* %call), !dbg !53, !clang.arc.no_objc_arc_exceptions !38
-; CHECK: call void @objc_release(i8* %call)
-  call void @objc_release(i8* %call) nounwind, !dbg !54, !clang.imprecise_release !38
+; CHECK: call void @llvm.objc.release(i8* %call)
+  call void @llvm.objc.release(i8* %call) nounwind, !dbg !54, !clang.imprecise_release !38
   ret i32 0, !dbg !54
 }
 
 declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
 
-declare i8* @objc_msgSend(i8*, i8*, ...) nonlazybind
+declare i8* @llvm.objc.msgSend(i8*, i8*, ...) nonlazybind
 
-declare i8* @objc_retain(i8*) nonlazybind
+declare i8* @llvm.objc.retain(i8*) nonlazybind
 
-declare i8* @objc_begin_catch(i8*)
+declare i8* @llvm.objc.begin_catch(i8*)
 
-declare void @objc_end_catch()
+declare void @llvm.objc.end_catch()
 
-declare void @objc_exception_rethrow()
+declare void @llvm.objc.exception_rethrow()
 
 define internal fastcc void @ThrowFunc(i8* %obj) uwtable noinline ssp !dbg !27 {
 entry:
-  %tmp = call i8* @objc_retain(i8* %obj) nounwind
+  %tmp = call i8* @llvm.objc.retain(i8* %obj) nounwind
   call void @llvm.dbg.value(metadata i8* %obj, metadata !32, metadata !DIExpression()), !dbg !55
   %tmp1 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_1", align 8, !dbg !56
   %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_5", align 8, !dbg !56, !invariant.load !38
   %tmp3 = bitcast %struct._class_t* %tmp1 to i8*, !dbg !56
-  call void (i8*, i8*, %0*, %0*, ...) bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, %0*, %0*, ...)*)(i8* %tmp3, i8* %tmp2, %0* bitcast (%struct.NSConstantString* @_unnamed_cfstring_3 to %0*), %0* bitcast (%struct.NSConstantString* @_unnamed_cfstring_3 to %0*)), !dbg !56, !clang.arc.no_objc_arc_exceptions !38
-  call void @objc_release(i8* %obj) nounwind, !dbg !58, !clang.imprecise_release !38
+  call void (i8*, i8*, %0*, %0*, ...) bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, %0*, %0*, ...)*)(i8* %tmp3, i8* %tmp2, %0* bitcast (%struct.NSConstantString* @_unnamed_cfstring_3 to %0*), %0* bitcast (%struct.NSConstantString* @_unnamed_cfstring_3 to %0*)), !dbg !56, !clang.arc.no_objc_arc_exceptions !38
+  call void @llvm.objc.release(i8* %obj) nounwind, !dbg !58, !clang.imprecise_release !38
   ret void, !dbg !58
 }
 
 declare i32 @__objc_personality_v0(...)
 
-declare void @objc_release(i8*) nonlazybind
+declare void @llvm.objc.release(i8*) nonlazybind
 
 declare void @NSLog(i8*, ...)
 
@@ -107,8 +107,8 @@
 ; CHECK: attributes #0 = { ssp uwtable }
 ; CHECK: attributes #1 = { nounwind readnone speculatable }
 ; CHECK: attributes #2 = { nonlazybind }
-; CHECK: attributes #3 = { noinline ssp uwtable }
 ; CHECK: attributes [[NUW]] = { nounwind }
+; CHECK: attributes #4 = { noinline ssp uwtable }
 
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!33, !34, !35, !36, !61}
diff --git a/test/Transforms/ObjCARC/escape.ll b/test/Transforms/ObjCARC/escape.ll
index c7a1b03..f9eeca8 100644
--- a/test/Transforms/ObjCARC/escape.ll
+++ b/test/Transforms/ObjCARC/escape.ll
@@ -10,8 +10,8 @@
 ; with the objc_storeWeak call.
 
 ; CHECK-LABEL: define void @test0(
-; CHECK: %tmp7 = call i8* @objc_retainBlock(i8* %tmp6) [[NUW:#[0-9]+]], !clang.arc.copy_on_escape !0
-; CHECK: call void @objc_release(i8* %tmp7) [[NUW]], !clang.imprecise_release !0
+; CHECK: %tmp7 = call i8* @llvm.objc.retainBlock(i8* %tmp6) [[NUW:#[0-9]+]], !clang.arc.copy_on_escape !0
+; CHECK: call void @llvm.objc.release(i8* %tmp7) [[NUW]], !clang.imprecise_release !0
 ; CHECK: }
 define void @test0() nounwind {
 entry:
@@ -31,7 +31,7 @@
   store i8* bitcast (void (i8*)* @__Block_byref_object_dispose_ to i8*), i8** %tmp2, align 8
   %weakLogNTimes1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 6
   %tmp3 = bitcast void (...)** %weakLogNTimes1 to i8**
-  %tmp4 = call i8* @objc_initWeak(i8** %tmp3, i8* null) nounwind
+  %tmp4 = call i8* @llvm.objc.initWeak(i8** %tmp3, i8* null) nounwind
   %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 0
   store i8* null, i8** %block.isa, align 8
   %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 1
@@ -46,19 +46,19 @@
   %tmp5 = bitcast %struct.__block_byref_weakLogNTimes* %weakLogNTimes to i8*
   store i8* %tmp5, i8** %block.captured, align 8
   %tmp6 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block to i8*
-  %tmp7 = call i8* @objc_retainBlock(i8* %tmp6) nounwind, !clang.arc.copy_on_escape !0
+  %tmp7 = call i8* @llvm.objc.retainBlock(i8* %tmp6) nounwind, !clang.arc.copy_on_escape !0
   %tmp8 = load %struct.__block_byref_weakLogNTimes*, %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
   %weakLogNTimes3 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %tmp8, i64 0, i32 6
   %tmp9 = bitcast void (...)** %weakLogNTimes3 to i8**
-  %tmp10 = call i8* @objc_storeWeak(i8** %tmp9, i8* %tmp7) nounwind
+  %tmp10 = call i8* @llvm.objc.storeWeak(i8** %tmp9, i8* %tmp7) nounwind
   %tmp11 = getelementptr inbounds i8, i8* %tmp7, i64 16
   %tmp12 = bitcast i8* %tmp11 to i8**
   %tmp13 = load i8*, i8** %tmp12, align 8
   %tmp14 = bitcast i8* %tmp13 to void (i8*, i32)*
   call void %tmp14(i8* %tmp7, i32 10) nounwind, !clang.arc.no_objc_arc_exceptions !0
-  call void @objc_release(i8* %tmp7) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %tmp7) nounwind, !clang.imprecise_release !0
   call void @_Block_object_dispose(i8* %tmp5, i32 8) nounwind
-  call void @objc_destroyWeak(i8** %tmp3) nounwind
+  call void @llvm.objc.destroyWeak(i8** %tmp3) nounwind
   ret void
 }
 
@@ -66,7 +66,7 @@
 ; so the optimization is valid.
 
 ; CHECK-LABEL: define void @test1(
-; CHECK-NOT: @objc_retainBlock
+; CHECK-NOT: @llvm.objc.retainBlock
 ; CHECK: }
 define void @test1() nounwind {
 entry:
@@ -86,7 +86,7 @@
   store i8* bitcast (void (i8*)* @__Block_byref_object_dispose_ to i8*), i8** %tmp2, align 8
   %weakLogNTimes1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 6
   %tmp3 = bitcast void (...)** %weakLogNTimes1 to i8**
-  %tmp4 = call i8* @objc_initWeak(i8** %tmp3, i8* null) nounwind
+  %tmp4 = call i8* @llvm.objc.initWeak(i8** %tmp3, i8* null) nounwind
   %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 0
   store i8* null, i8** %block.isa, align 8
   %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 1
@@ -101,7 +101,7 @@
   %tmp5 = bitcast %struct.__block_byref_weakLogNTimes* %weakLogNTimes to i8*
   store i8* %tmp5, i8** %block.captured, align 8
   %tmp6 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block to i8*
-  %tmp7 = call i8* @objc_retainBlock(i8* %tmp6) nounwind, !clang.arc.copy_on_escape !0
+  %tmp7 = call i8* @llvm.objc.retainBlock(i8* %tmp6) nounwind, !clang.arc.copy_on_escape !0
   %tmp8 = load %struct.__block_byref_weakLogNTimes*, %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
   %weakLogNTimes3 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %tmp8, i64 0, i32 6
   %tmp9 = bitcast void (...)** %weakLogNTimes3 to i8**
@@ -111,22 +111,22 @@
   %tmp13 = load i8*, i8** %tmp12, align 8
   %tmp14 = bitcast i8* %tmp13 to void (i8*, i32)*
   call void %tmp14(i8* %tmp7, i32 10) nounwind, !clang.arc.no_objc_arc_exceptions !0
-  call void @objc_release(i8* %tmp7) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %tmp7) nounwind, !clang.imprecise_release !0
   call void @_Block_object_dispose(i8* %tmp5, i32 8) nounwind
-  call void @objc_destroyWeak(i8** %tmp3) nounwind
+  call void @llvm.objc.destroyWeak(i8** %tmp3) nounwind
   ret void
 }
 
 declare void @__Block_byref_object_copy_(i8*, i8*) nounwind
 declare void @__Block_byref_object_dispose_(i8*) nounwind
-declare void @objc_destroyWeak(i8**)
-declare i8* @objc_initWeak(i8**, i8*)
+declare void @llvm.objc.destroyWeak(i8**)
+declare i8* @llvm.objc.initWeak(i8**, i8*)
 declare void @__main_block_invoke_0(i8* nocapture, i32) nounwind ssp
 declare void @_Block_object_dispose(i8*, i32)
-declare i8* @objc_retainBlock(i8*)
-declare i8* @objc_storeWeak(i8**, i8*)
+declare i8* @llvm.objc.retainBlock(i8*)
+declare i8* @llvm.objc.storeWeak(i8**, i8*)
 declare i8* @not_really_objc_storeWeak(i8**, i8*)
-declare void @objc_release(i8*)
+declare void @llvm.objc.release(i8*)
 
 !0 = !{}
 
diff --git a/test/Transforms/ObjCARC/expand.ll b/test/Transforms/ObjCARC/expand.ll
index fe47ee5..b89c5d5 100644
--- a/test/Transforms/ObjCARC/expand.ll
+++ b/test/Transforms/ObjCARC/expand.ll
@@ -2,78 +2,78 @@
 
 target datalayout = "e-p:64:64:64"
 
-declare i8* @objc_retain(i8*)
-declare i8* @objc_autorelease(i8*)
-declare i8* @objc_retainAutoreleasedReturnValue(i8*)
-declare i8* @objc_autoreleaseReturnValue(i8*)
-declare i8* @objc_retainAutorelease(i8*)
-declare i8* @objc_retainAutoreleaseReturnValue(i8*)
-declare i8* @objc_retainBlock(i8*)
+declare i8* @llvm.objc.retain(i8*)
+declare i8* @llvm.objc.autorelease(i8*)
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
+declare i8* @llvm.objc.retainAutorelease(i8*)
+declare i8* @llvm.objc.retainAutoreleaseReturnValue(i8*)
+declare i8* @llvm.objc.retainBlock(i8*)
 
 declare void @use_pointer(i8*)
 
 ; CHECK: define void @test_retain(i8* %x) [[NUW:#[0-9]+]] {
-; CHECK: call i8* @objc_retain(i8* %x)
+; CHECK: call i8* @llvm.objc.retain(i8* %x)
 ; CHECK: call void @use_pointer(i8* %x)
 ; CHECK: }
 define void @test_retain(i8* %x) nounwind {
 entry:
-  %0 = call i8* @objc_retain(i8* %x) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %x) nounwind
   call void @use_pointer(i8* %0)
   ret void
 }
 
 ; CHECK: define void @test_retainAutoreleasedReturnValue(i8* %x) [[NUW]] {
-; CHECK: call i8* @objc_retainAutoreleasedReturnValue(i8* %x)
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %x)
 ; CHECK: call void @use_pointer(i8* %x)
 ; CHECK: }
 define void @test_retainAutoreleasedReturnValue(i8* %x) nounwind {
 entry:
-  %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %x) nounwind
+  %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %x) nounwind
   call void @use_pointer(i8* %0)
   ret void
 }
 
 ; CHECK: define void @test_retainAutorelease(i8* %x) [[NUW]] {
-; CHECK: call i8* @objc_retainAutorelease(i8* %x)
+; CHECK: call i8* @llvm.objc.retainAutorelease(i8* %x)
 ; CHECK: call void @use_pointer(i8* %x)
 ; CHECK: }
 define void @test_retainAutorelease(i8* %x) nounwind {
 entry:
-  %0 = call i8* @objc_retainAutorelease(i8* %x) nounwind
+  %0 = call i8* @llvm.objc.retainAutorelease(i8* %x) nounwind
   call void @use_pointer(i8* %0)
   ret void
 }
 
 ; CHECK: define void @test_retainAutoreleaseReturnValue(i8* %x) [[NUW]] {
-; CHECK: call i8* @objc_retainAutoreleaseReturnValue(i8* %x)
+; CHECK: call i8* @llvm.objc.retainAutoreleaseReturnValue(i8* %x)
 ; CHECK: call void @use_pointer(i8* %x)
 ; CHECK: }
 define void @test_retainAutoreleaseReturnValue(i8* %x) nounwind {
 entry:
-  %0 = call i8* @objc_retainAutoreleaseReturnValue(i8* %x) nounwind
+  %0 = call i8* @llvm.objc.retainAutoreleaseReturnValue(i8* %x) nounwind
   call void @use_pointer(i8* %0)
   ret void
 }
 
 ; CHECK: define void @test_autorelease(i8* %x) [[NUW]] {
-; CHECK: call i8* @objc_autorelease(i8* %x)
+; CHECK: call i8* @llvm.objc.autorelease(i8* %x)
 ; CHECK: call void @use_pointer(i8* %x)
 ; CHECK: }
 define void @test_autorelease(i8* %x) nounwind {
 entry:
-  %0 = call i8* @objc_autorelease(i8* %x) nounwind
+  %0 = call i8* @llvm.objc.autorelease(i8* %x) nounwind
   call void @use_pointer(i8* %0)
   ret void
 }
 
 ; CHECK: define void @test_autoreleaseReturnValue(i8* %x) [[NUW]] {
-; CHECK: call i8* @objc_autoreleaseReturnValue(i8* %x)
+; CHECK: call i8* @llvm.objc.autoreleaseReturnValue(i8* %x)
 ; CHECK: call void @use_pointer(i8* %x)
 ; CHECK: }
 define void @test_autoreleaseReturnValue(i8* %x) nounwind {
 entry:
-  %0 = call i8* @objc_autoreleaseReturnValue(i8* %x) nounwind
+  %0 = call i8* @llvm.objc.autoreleaseReturnValue(i8* %x) nounwind
   call void @use_pointer(i8* %0)
   ret void
 }
@@ -83,12 +83,12 @@
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
 ; CHECK: define void @test_retainBlock(i8* %x) [[NUW]] {
-; CHECK: call i8* @objc_retainBlock(i8* %x)
+; CHECK: call i8* @llvm.objc.retainBlock(i8* %x)
 ; CHECK: call void @use_pointer(i8* %0)
 ; CHECK: }
 define void @test_retainBlock(i8* %x) nounwind {
 entry:
-  %0 = call i8* @objc_retainBlock(i8* %x) nounwind
+  %0 = call i8* @llvm.objc.retainBlock(i8* %x) nounwind
   call void @use_pointer(i8* %0)
   ret void
 }
diff --git a/test/Transforms/ObjCARC/funclet.ll b/test/Transforms/ObjCARC/funclet.ll
index 57e6b49..346a690 100644
--- a/test/Transforms/ObjCARC/funclet.ll
+++ b/test/Transforms/ObjCARC/funclet.ll
@@ -14,8 +14,8 @@
 declare zeroext i1 @"\01?g@@YA_NXZ"() local_unnamed_addr
 declare i8* @"\01?h@@YAPEAUobjc_object@@XZ"() local_unnamed_addr
 
-declare dllimport void @objc_release(i8*) local_unnamed_addr
-declare dllimport i8* @objc_retainAutoreleasedReturnValue(i8* returned) local_unnamed_addr
+declare dllimport void @llvm.objc.release(i8*) local_unnamed_addr
+declare dllimport i8* @llvm.objc.retainAutoreleasedReturnValue(i8* returned) local_unnamed_addr
 
 declare i32 @__CxxFrameHandler3(...)
 
@@ -32,8 +32,8 @@
           to label %invoke.cont1 unwind label %ehcleanup6
 
 invoke.cont1:                                     ; preds = %if.then
-  %0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call2)
-  tail call void @objc_release(i8* null), !clang.imprecise_release !1
+  %0 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call2)
+  tail call void @llvm.objc.release(i8* null), !clang.imprecise_release !1
   br label %if.end
 
 if.end:                                           ; preds = %invoke.cont1, %invoke.cont
@@ -42,25 +42,25 @@
           to label %invoke.cont3 unwind label %ehcleanup
 
 invoke.cont3:                                     ; preds = %if.end
-  tail call void @objc_release(i8* null), !clang.imprecise_release !1
-  tail call void @objc_release(i8* %a.0), !clang.imprecise_release !1
+  tail call void @llvm.objc.release(i8* null), !clang.imprecise_release !1
+  tail call void @llvm.objc.release(i8* %a.0), !clang.imprecise_release !1
   ret void
 
 ehcleanup:                                        ; preds = %if.end
   %1 = cleanuppad within none []
-  call void @objc_release(i8* null) [ "funclet"(token %1) ], !clang.imprecise_release !1
+  call void @llvm.objc.release(i8* null) [ "funclet"(token %1) ], !clang.imprecise_release !1
   cleanupret from %1 unwind label %ehcleanup6
 
 ehcleanup6:                                       ; preds = %ehcleanup, %if.then, %entry
   %a.1 = phi i8* [ %a.0, %ehcleanup ], [ null, %if.then ], [ null, %entry ]
   %2 = cleanuppad within none []
-  call void @objc_release(i8* %a.1) [ "funclet"(token %2) ], !clang.imprecise_release !1
+  call void @llvm.objc.release(i8* %a.1) [ "funclet"(token %2) ], !clang.imprecise_release !1
   cleanupret from %2 unwind to caller
 }
 
 ; CHECK-LABEL: ?f@@YAXXZ
-; CHECK: call void @objc_release(i8* {{.*}}) {{.*}}[ "funclet"(token %1) ]
-; CHECK-NOT: call void @objc_release(i8* {{.*}}) {{.*}}[ "funclet"(token %2) ]
+; CHECK: call void @llvm.objc.release(i8* {{.*}}) {{.*}}[ "funclet"(token %1) ]
+; CHECK-NOT: call void @llvm.objc.release(i8* {{.*}}) {{.*}}[ "funclet"(token %2) ]
 
 define void @"\01?i@@YAXXZ"() local_unnamed_addr personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
 entry:
@@ -75,8 +75,8 @@
           to label %invoke.cont1 unwind label %ehcleanup6
 
 invoke.cont1:                                     ; preds = %if.then
-  %0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call2)
-  tail call void @objc_release(i8* null), !clang.imprecise_release !1
+  %0 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call2)
+  tail call void @llvm.objc.release(i8* null), !clang.imprecise_release !1
   br label %if.end
 
 if.end:                                           ; preds = %invoke.cont1, %invoke.cont
@@ -85,13 +85,13 @@
           to label %invoke.cont3 unwind label %ehcleanup
 
 invoke.cont3:                                     ; preds = %if.end
-  tail call void @objc_release(i8* null), !clang.imprecise_release !1
-  tail call void @objc_release(i8* %a.0), !clang.imprecise_release !1
+  tail call void @llvm.objc.release(i8* null), !clang.imprecise_release !1
+  tail call void @llvm.objc.release(i8* %a.0), !clang.imprecise_release !1
   ret void
 
 ehcleanup:                                        ; preds = %if.end
   %1 = cleanuppad within none []
-  call void @objc_release(i8* null) [ "funclet"(token %1) ], !clang.imprecise_release !1
+  call void @llvm.objc.release(i8* null) [ "funclet"(token %1) ], !clang.imprecise_release !1
   br label %ehcleanup.1
 
 ehcleanup.1:
@@ -100,13 +100,13 @@
 ehcleanup6:                                       ; preds = %ehcleanup, %if.then, %entry
   %a.1 = phi i8* [ %a.0, %ehcleanup.1 ], [ null, %if.then ], [ null, %entry ]
   %2 = cleanuppad within none []
-  call void @objc_release(i8* %a.1) [ "funclet"(token %2) ], !clang.imprecise_release !1
+  call void @llvm.objc.release(i8* %a.1) [ "funclet"(token %2) ], !clang.imprecise_release !1
   cleanupret from %2 unwind to caller
 }
 
 ; CHECK-LABEL: ?i@@YAXXZ
-; CHECK: call void @objc_release(i8* {{.*}}) {{.*}}[ "funclet"(token %1) ]
-; CHECK-NOT: call void @objc_release(i8* {{.*}}) {{.*}}[ "funclet"(token %2) ]
+; CHECK: call void @llvm.objc.release(i8* {{.*}}) {{.*}}[ "funclet"(token %1) ]
+; CHECK-NOT: call void @llvm.objc.release(i8* {{.*}}) {{.*}}[ "funclet"(token %2) ]
 
 !1 = !{}
 
diff --git a/test/Transforms/ObjCARC/gvn.ll b/test/Transforms/ObjCARC/gvn.ll
index 6f82854..f2977d0 100644
--- a/test/Transforms/ObjCARC/gvn.ll
+++ b/test/Transforms/ObjCARC/gvn.ll
@@ -2,9 +2,9 @@
 
 @x = common global i8* null, align 8
 
-declare i8* @objc_retain(i8*)
-declare i32 @objc_sync_enter(i8*)
-declare i32 @objc_sync_exit(i8*)
+declare i8* @llvm.objc.retain(i8*)
+declare i32 @llvm.objc.sync.enter(i8*)
+declare i32 @llvm.objc.sync.exit(i8*)
 
 ; GVN should be able to eliminate this redundant load, with ARC-specific
 ; alias analysis.
@@ -18,7 +18,7 @@
 define i8* @test0(i32 %n) nounwind {
 entry:
   %s = load i8*, i8** @x
-  %0 = tail call i8* @objc_retain(i8* %s) nounwind
+  %0 = tail call i8* @llvm.objc.retain(i8* %s) nounwind
   %t = load i8*, i8** @x
   ret i8* %t
 }
@@ -34,8 +34,8 @@
 define i8* @test1(i32 %n) nounwind {
 entry:
   %s = load i8*, i8** @x
-  %0 = call i32 @objc_sync_enter(i8* %s)
+  %0 = call i32 @llvm.objc.sync.enter(i8* %s)
   %t = load i8*, i8** @x
-  %1 = call i32 @objc_sync_exit(i8* %s)
+  %1 = call i32 @llvm.objc.sync.exit(i8* %s)
   ret i8* %t
 }
diff --git a/test/Transforms/ObjCARC/intrinsic-use-isolated.ll b/test/Transforms/ObjCARC/intrinsic-use-isolated.ll
index 03d7520..4ccad03 100644
--- a/test/Transforms/ObjCARC/intrinsic-use-isolated.ll
+++ b/test/Transforms/ObjCARC/intrinsic-use-isolated.ll
@@ -3,14 +3,14 @@
 ; This file makes sure that clang.arc.used is removed even if no other ARC
 ; interesting calls are in the module.
 
-declare void @clang.arc.use(...) nounwind
+declare void @llvm.objc.clang.arc.use(...) nounwind
 
-; Kill calls to @clang.arc.use(...)
+; Kill calls to @llvm.objc.clang.arc.use(...)
 ; CHECK-LABEL: define void @test0(
 ; CHECK-NOT: clang.arc.use
 ; CHECK: }
 define void @test0(i8* %a, i8* %b) {
-  call void (...) @clang.arc.use(i8* %a, i8* %b) nounwind
+  call void (...) @llvm.objc.clang.arc.use(i8* %a, i8* %b) nounwind
   ret void
 }
 
diff --git a/test/Transforms/ObjCARC/intrinsic-use.ll b/test/Transforms/ObjCARC/intrinsic-use.ll
index f595620..8a4ac52 100644
--- a/test/Transforms/ObjCARC/intrinsic-use.ll
+++ b/test/Transforms/ObjCARC/intrinsic-use.ll
@@ -2,12 +2,12 @@
 
 target datalayout = "e-p:64:64:64"
 
-declare i8* @objc_retain(i8*)
-declare i8* @objc_retainAutorelease(i8*)
-declare void @objc_release(i8*)
-declare i8* @objc_autorelease(i8*)
+declare i8* @llvm.objc.retain(i8*)
+declare i8* @llvm.objc.retainAutorelease(i8*)
+declare void @llvm.objc.release(i8*)
+declare i8* @llvm.objc.autorelease(i8*)
 
-declare void @clang.arc.use(...)
+declare void @llvm.objc.clang.arc.use(...)
 
 declare void @test0_helper(i8*, i8**)
 
@@ -15,70 +15,70 @@
 ; the reduced test case from <rdar://13195034>.
 ;
 ; CHECK-LABEL:      define void @test0(
-; CHECK:        @objc_retain(i8* %x)
+; CHECK:        @llvm.objc.retain(i8* %x)
 ; CHECK-NEXT:   store i8* %y, i8** %temp0
-; CHECK-NEXT:   @objc_retain(i8* %y)
+; CHECK-NEXT:   @llvm.objc.retain(i8* %y)
 ; CHECK-NEXT:   call void @test0_helper
 ; CHECK-NEXT:   [[VAL1:%.*]] = load i8*, i8** %temp0
-; CHECK-NEXT:   @objc_retain(i8* [[VAL1]])
-; CHECK-NEXT:   call void (...) @clang.arc.use(i8* %y)
-; CHECK-NEXT:   @objc_release(i8* %y)
+; CHECK-NEXT:   @llvm.objc.retain(i8* [[VAL1]])
+; CHECK-NEXT:   call void (...) @llvm.objc.clang.arc.use(i8* %y)
+; CHECK-NEXT:   @llvm.objc.release(i8* %y)
 ; CHECK-NEXT:   store i8* [[VAL1]], i8** %temp1
 ; CHECK-NEXT:   call void @test0_helper
 ; CHECK-NEXT:   [[VAL2:%.*]] = load i8*, i8** %temp1
-; CHECK-NEXT:   @objc_retain(i8* [[VAL2]])
-; CHECK-NEXT:   call void (...) @clang.arc.use(i8* [[VAL1]])
-; CHECK-NEXT:   @objc_release(i8* [[VAL1]])
-; CHECK-NEXT:   @objc_autorelease(i8* %x)
+; CHECK-NEXT:   @llvm.objc.retain(i8* [[VAL2]])
+; CHECK-NEXT:   call void (...) @llvm.objc.clang.arc.use(i8* [[VAL1]])
+; CHECK-NEXT:   @llvm.objc.release(i8* [[VAL1]])
+; CHECK-NEXT:   @llvm.objc.autorelease(i8* %x)
 ; CHECK-NEXT:   store i8* %x, i8** %out
-; CHECK-NEXT:   @objc_retain(i8* %x)
-; CHECK-NEXT:   @objc_release(i8* [[VAL2]])
-; CHECK-NEXT:   @objc_release(i8* %x)
+; CHECK-NEXT:   @llvm.objc.retain(i8* %x)
+; CHECK-NEXT:   @llvm.objc.release(i8* [[VAL2]])
+; CHECK-NEXT:   @llvm.objc.release(i8* %x)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test0(i8** %out, i8* %x, i8* %y) {
 entry:
   %temp0 = alloca i8*, align 8
   %temp1 = alloca i8*, align 8
-  %0 = call i8* @objc_retain(i8* %x) nounwind
-  %1 = call i8* @objc_retain(i8* %y) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %x) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %y) nounwind
   store i8* %y, i8** %temp0
   call void @test0_helper(i8* %x, i8** %temp0)
   %val1 = load i8*, i8** %temp0
-  %2 = call i8* @objc_retain(i8* %val1) nounwind
-  call void (...) @clang.arc.use(i8* %y) nounwind
-  call void @objc_release(i8* %y) nounwind
+  %2 = call i8* @llvm.objc.retain(i8* %val1) nounwind
+  call void (...) @llvm.objc.clang.arc.use(i8* %y) nounwind
+  call void @llvm.objc.release(i8* %y) nounwind
   store i8* %val1, i8** %temp1
   call void @test0_helper(i8* %x, i8** %temp1)
   %val2 = load i8*, i8** %temp1
-  %3 = call i8* @objc_retain(i8* %val2) nounwind
-  call void (...) @clang.arc.use(i8* %val1) nounwind
-  call void @objc_release(i8* %val1) nounwind
-  %4 = call i8* @objc_retain(i8* %x) nounwind
-  %5 = call i8* @objc_autorelease(i8* %x) nounwind
+  %3 = call i8* @llvm.objc.retain(i8* %val2) nounwind
+  call void (...) @llvm.objc.clang.arc.use(i8* %val1) nounwind
+  call void @llvm.objc.release(i8* %val1) nounwind
+  %4 = call i8* @llvm.objc.retain(i8* %x) nounwind
+  %5 = call i8* @llvm.objc.autorelease(i8* %x) nounwind
   store i8* %x, i8** %out
-  call void @objc_release(i8* %val2) nounwind
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %val2) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
 ; CHECK-LABEL:      define void @test0a(
-; CHECK:        @objc_retain(i8* %x)
+; CHECK:        @llvm.objc.retain(i8* %x)
 ; CHECK-NEXT:   store i8* %y, i8** %temp0
-; CHECK-NEXT:   @objc_retain(i8* %y)
+; CHECK-NEXT:   @llvm.objc.retain(i8* %y)
 ; CHECK-NEXT:   call void @test0_helper
 ; CHECK-NEXT:   [[VAL1:%.*]] = load i8*, i8** %temp0
-; CHECK-NEXT:   @objc_retain(i8* [[VAL1]])
-; CHECK-NEXT:   call void (...) @clang.arc.use(i8* %y)
-; CHECK-NEXT:   @objc_release(i8* %y)
+; CHECK-NEXT:   @llvm.objc.retain(i8* [[VAL1]])
+; CHECK-NEXT:   call void (...) @llvm.objc.clang.arc.use(i8* %y)
+; CHECK-NEXT:   @llvm.objc.release(i8* %y)
 ; CHECK-NEXT:   store i8* [[VAL1]], i8** %temp1
 ; CHECK-NEXT:   call void @test0_helper
 ; CHECK-NEXT:   [[VAL2:%.*]] = load i8*, i8** %temp1
-; CHECK-NEXT:   @objc_retain(i8* [[VAL2]])
-; CHECK-NEXT:   call void (...) @clang.arc.use(i8* [[VAL1]])
-; CHECK-NEXT:   @objc_release(i8* [[VAL1]])
-; CHECK-NEXT:   @objc_autorelease(i8* %x)
-; CHECK-NEXT:   @objc_release(i8* [[VAL2]])
+; CHECK-NEXT:   @llvm.objc.retain(i8* [[VAL2]])
+; CHECK-NEXT:   call void (...) @llvm.objc.clang.arc.use(i8* [[VAL1]])
+; CHECK-NEXT:   @llvm.objc.release(i8* [[VAL1]])
+; CHECK-NEXT:   @llvm.objc.autorelease(i8* %x)
+; CHECK-NEXT:   @llvm.objc.release(i8* [[VAL2]])
 ; CHECK-NEXT:   store i8* %x, i8** %out
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
@@ -86,25 +86,25 @@
 entry:
   %temp0 = alloca i8*, align 8
   %temp1 = alloca i8*, align 8
-  %0 = call i8* @objc_retain(i8* %x) nounwind
-  %1 = call i8* @objc_retain(i8* %y) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %x) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %y) nounwind
   store i8* %y, i8** %temp0
   call void @test0_helper(i8* %x, i8** %temp0)
   %val1 = load i8*, i8** %temp0
-  %2 = call i8* @objc_retain(i8* %val1) nounwind
-  call void (...) @clang.arc.use(i8* %y) nounwind
-  call void @objc_release(i8* %y) nounwind, !clang.imprecise_release !0
+  %2 = call i8* @llvm.objc.retain(i8* %val1) nounwind
+  call void (...) @llvm.objc.clang.arc.use(i8* %y) nounwind
+  call void @llvm.objc.release(i8* %y) nounwind, !clang.imprecise_release !0
   store i8* %val1, i8** %temp1
   call void @test0_helper(i8* %x, i8** %temp1)
   %val2 = load i8*, i8** %temp1
-  %3 = call i8* @objc_retain(i8* %val2) nounwind
-  call void (...) @clang.arc.use(i8* %val1) nounwind
-  call void @objc_release(i8* %val1) nounwind, !clang.imprecise_release !0
-  %4 = call i8* @objc_retain(i8* %x) nounwind
-  %5 = call i8* @objc_autorelease(i8* %x) nounwind
+  %3 = call i8* @llvm.objc.retain(i8* %val2) nounwind
+  call void (...) @llvm.objc.clang.arc.use(i8* %val1) nounwind
+  call void @llvm.objc.release(i8* %val1) nounwind, !clang.imprecise_release !0
+  %4 = call i8* @llvm.objc.retain(i8* %x) nounwind
+  %5 = call i8* @llvm.objc.autorelease(i8* %x) nounwind
   store i8* %x, i8** %out
-  call void @objc_release(i8* %val2) nounwind, !clang.imprecise_release !0
-  call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %val2) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %x) nounwind, !clang.imprecise_release !0
   ret void
 }
 
diff --git a/test/Transforms/ObjCARC/invoke-2.ll b/test/Transforms/ObjCARC/invoke-2.ll
index ef5c675..b34de1a 100644
--- a/test/Transforms/ObjCARC/invoke-2.ll
+++ b/test/Transforms/ObjCARC/invoke-2.ll
@@ -4,51 +4,51 @@
 
 declare i32 @__CxxFrameHandler3(...)
 
-declare dllimport i8* @objc_msgSend(i8*, i8*, ...) local_unnamed_addr
+declare dllimport i8* @llvm.objc.msgSend(i8*, i8*, ...) local_unnamed_addr
 
-declare dllimport i8* @objc_retain(i8* returned) local_unnamed_addr
-declare dllimport void @objc_release(i8*) local_unnamed_addr
-declare dllimport i8* @objc_retainAutoreleasedReturnValue(i8* returned) local_unnamed_addr
+declare dllimport i8* @llvm.objc.retain(i8* returned) local_unnamed_addr
+declare dllimport void @llvm.objc.release(i8*) local_unnamed_addr
+declare dllimport i8* @llvm.objc.retainAutoreleasedReturnValue(i8* returned) local_unnamed_addr
 
-declare dllimport i8* @objc_begin_catch(i8*) local_unnamed_addr
-declare dllimport void @objc_end_catch() local_unnamed_addr
+declare dllimport i8* @llvm.objc.begin_catch(i8*) local_unnamed_addr
+declare dllimport void @llvm.objc.end_catch() local_unnamed_addr
 
-@OBJC_METH_VAR_NAME_ = private unnamed_addr constant [2 x i8] c"m\00", align 1
-@OBJC_SELECTOR_REFERENCES_ = private externally_initialized global i8* getelementptr inbounds ([2 x i8], [2 x i8]* @OBJC_METH_VAR_NAME_, i64 0, i64 0), section ".objc_selrefs$B", align 8
+@llvm.objc.METH_VAR_NAME_ = private unnamed_addr constant [2 x i8] c"m\00", align 1
+@llvm.objc.SELECTOR_REFERENCES_ = private externally_initialized global i8* getelementptr inbounds ([2 x i8], [2 x i8]* @llvm.objc.METH_VAR_NAME_, i64 0, i64 0), section ".objc_selrefs$B", align 8
 
 define void @f(i8* %i) local_unnamed_addr personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
 entry:
-  %0 = tail call i8* @objc_retain(i8* %i)
-  %1 = load i8*, i8** @OBJC_SELECTOR_REFERENCES_, align 8, !invariant.load !0
-  %call = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %0, i8* %1)
+  %0 = tail call i8* @llvm.objc.retain(i8* %i)
+  %1 = load i8*, i8** @llvm.objc.SELECTOR_REFERENCES_, align 8, !invariant.load !0
+  %call = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* %0, i8* %1)
           to label %invoke.cont unwind label %catch.dispatch, !clang.arc.no_objc_arc_exceptions !0
 
 catch.dispatch:                                   ; preds = %entry
   %2 = catchswitch within none [label %catch] unwind to caller
 
 invoke.cont:                                      ; preds = %entry
-  %3 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call)
-  tail call void @objc_release(i8* %3) #0, !clang.imprecise_release !0
+  %3 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call)
+  tail call void @llvm.objc.release(i8* %3) #0, !clang.imprecise_release !0
   br label %eh.cont
 
 eh.cont:                                          ; preds = %invoke.cont, %catch
-  tail call void @objc_release(i8* %0) #0, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %0) #0, !clang.imprecise_release !0
   ret void
 
 catch:                                            ; preds = %catch.dispatch
   %4 = catchpad within %2 [i8* null, i32 0, i8* null]
-  %exn.adjusted = tail call i8* @objc_begin_catch(i8* undef)
-  tail call void @objc_end_catch(), !clang.arc.no_objc_arc_exceptions !0
+  %exn.adjusted = tail call i8* @llvm.objc.begin_catch(i8* undef)
+  tail call void @llvm.objc.end_catch(), !clang.arc.no_objc_arc_exceptions !0
   br label %eh.cont
 }
 
 ; CHECK-LABEL: @f
 
-; CHECK-NOT: tail call i8* @objc_retain(i8* %i)
-; CHECK: load i8*, i8** @OBJC_SELECTOR_REFERENCES_, align 8
+; CHECK-NOT: tail call i8* @llvm.objc.retain(i8* %i)
+; CHECK: load i8*, i8** @llvm.objc.SELECTOR_REFERENCES_, align 8
 
 ; CHECK: eh.cont:
-; CHECK-NOT: call void @objc_release(i8*
+; CHECK-NOT: call void @llvm.objc.release(i8*
 ; CHECK: ret void
 
 attributes #0 = { nounwind }
diff --git a/test/Transforms/ObjCARC/invoke.ll b/test/Transforms/ObjCARC/invoke.ll
index 06105c1..3dc95cd 100644
--- a/test/Transforms/ObjCARC/invoke.ll
+++ b/test/Transforms/ObjCARC/invoke.ll
@@ -1,9 +1,9 @@
 ; RUN: opt -S -objc-arc < %s | FileCheck %s
 
-declare i8* @objc_retain(i8*)
-declare void @objc_release(i8*)
-declare i8* @objc_retainAutoreleasedReturnValue(i8*)
-declare i8* @objc_msgSend(i8*, i8*, ...)
+declare i8* @llvm.objc.retain(i8*)
+declare void @llvm.objc.release(i8*)
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
+declare i8* @llvm.objc.msgSend(i8*, i8*, ...)
 declare void @use_pointer(i8*)
 declare void @callee()
 declare i8* @returner()
@@ -12,27 +12,27 @@
 
 ; CHECK-LABEL: define void @test0(
 ; CHECK: invoke.cont:
-; CHECK:   call void @objc_release(i8* %zipFile) [[NUW:#[0-9]+]], !clang.imprecise_release !0
+; CHECK:   call void @llvm.objc.release(i8* %zipFile) [[NUW:#[0-9]+]], !clang.imprecise_release !0
 ; CHECK:   ret void
 ; CHECK: lpad:
-; CHECK:   call void @objc_release(i8* %zipFile) [[NUW]], !clang.imprecise_release !0
+; CHECK:   call void @llvm.objc.release(i8* %zipFile) [[NUW]], !clang.imprecise_release !0
 ; CHECK:   ret void
 ; CHECK-NEXT: }
 define void @test0(i8* %zipFile) personality i32 (...)* @__gxx_personality_v0 {
 entry:
-  call i8* @objc_retain(i8* %zipFile) nounwind
+  call i8* @llvm.objc.retain(i8* %zipFile) nounwind
   call void @use_pointer(i8* %zipFile)
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*)*)(i8* %zipFile) 
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*)*)(i8* %zipFile) 
           to label %invoke.cont unwind label %lpad
 
 invoke.cont:                                      ; preds = %entry
-  call void @objc_release(i8* %zipFile) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %zipFile) nounwind, !clang.imprecise_release !0
   ret void
 
 lpad:                                             ; preds = %entry
   %exn = landingpad {i8*, i32}
            cleanup
-  call void @objc_release(i8* %zipFile) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %zipFile) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -40,11 +40,11 @@
 
 ; CHECK-LABEL: define void @test1(
 ; CHECK: invoke.cont:
-; CHECK:   call void @objc_release(i8* %zipFile) [[NUW]], !clang.imprecise_release !0
+; CHECK:   call void @llvm.objc.release(i8* %zipFile) [[NUW]], !clang.imprecise_release !0
 ; CHECK:   call void @callee()
 ; CHECK:   br label %done
 ; CHECK: lpad:
-; CHECK:   call void @objc_release(i8* %zipFile) [[NUW]], !clang.imprecise_release !0
+; CHECK:   call void @llvm.objc.release(i8* %zipFile) [[NUW]], !clang.imprecise_release !0
 ; CHECK:   call void @callee()
 ; CHECK:   br label %done
 ; CHECK: done:
@@ -52,9 +52,9 @@
 ; CHECK-NEXT: }
 define void @test1(i8* %zipFile) personality i32 (...)* @__gxx_personality_v0 {
 entry:
-  call i8* @objc_retain(i8* %zipFile) nounwind
+  call i8* @llvm.objc.retain(i8* %zipFile) nounwind
   call void @use_pointer(i8* %zipFile)
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*)*)(i8* %zipFile)
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*)*)(i8* %zipFile)
           to label %invoke.cont unwind label %lpad
 
 invoke.cont:                                      ; preds = %entry
@@ -68,7 +68,7 @@
   br label %done
 
 done:
-  call void @objc_release(i8* %zipFile) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %zipFile) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -77,27 +77,27 @@
 
 ; CHECK: define void @test2() personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*) {
 ; CHECK: invoke.cont:
-; CHECK-NEXT: call i8* @objc_retain
-; CHECK-NOT: @objc_r
+; CHECK-NEXT: call i8* @llvm.objc.retain
+; CHECK-NOT: @llvm.objc.r
 ; CHECK: finally.cont:
-; CHECK-NEXT: call void @objc_release
+; CHECK-NEXT: call void @llvm.objc.release
 ; CHECK-NOT: @objc
 ; CHECK: finally.rethrow:
 ; CHECK-NOT: @objc
 ; CHECK: }
 define void @test2() personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*) {
 entry:
-  %call = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* ()*)()
+  %call = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* ()*)()
           to label %invoke.cont unwind label %finally.rethrow, !clang.arc.no_objc_arc_exceptions !0
 
 invoke.cont:                                      ; preds = %entry
-  %tmp1 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
-  call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void ()*)(), !clang.arc.no_objc_arc_exceptions !0
+  %tmp1 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
+  call void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void ()*)(), !clang.arc.no_objc_arc_exceptions !0
   invoke void @use_pointer(i8* %call)
           to label %finally.cont unwind label %finally.rethrow, !clang.arc.no_objc_arc_exceptions !0
 
 finally.cont:                                     ; preds = %invoke.cont
-  tail call void @objc_release(i8* %call) nounwind, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %call) nounwind, !clang.imprecise_release !0
   ret void
 
 finally.rethrow:                                  ; preds = %invoke.cont, %entry
@@ -110,12 +110,12 @@
 
 ; CHECK-LABEL: define void @test3(
 ; CHECK: if.end:
-; CHECK-NEXT: call void @objc_release(i8* %p) [[NUW]]
+; CHECK-NEXT: call void @llvm.objc.release(i8* %p) [[NUW]]
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test3(i8* %p, i1 %b) personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*) {
 entry:
-  %0 = call i8* @objc_retain(i8* %p)
+  %0 = call i8* @llvm.objc.retain(i8* %p)
   call void @callee()
   br i1 %b, label %if.else, label %if.then
 
@@ -133,7 +133,7 @@
   ret void
 
 if.end:
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
@@ -143,15 +143,15 @@
 ; CHECK: lpad:
 ; CHECK-NEXT: %r = landingpad { i8*, i32 }
 ; CHECK-NEXT: cleanup
-; CHECK-NEXT: call void @objc_release(i8* %p) [[NUW]]
+; CHECK-NEXT: call void @llvm.objc.release(i8* %p) [[NUW]]
 ; CHECK-NEXT: ret void
 ; CHECK: if.end:
-; CHECK-NEXT: call void @objc_release(i8* %p) [[NUW]]
+; CHECK-NEXT: call void @llvm.objc.release(i8* %p) [[NUW]]
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test4(i8* %p, i1 %b) personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*) {
 entry:
-  %0 = call i8* @objc_retain(i8* %p)
+  %0 = call i8* @llvm.objc.retain(i8* %p)
   call void @callee()
   br i1 %b, label %if.else, label %if.then
 
@@ -166,11 +166,11 @@
 lpad:
   %r = landingpad { i8*, i32 }
        cleanup
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 
 if.end:
-  call void @objc_release(i8* %p)
+  call void @llvm.objc.release(i8* %p)
   ret void
 }
 
@@ -178,7 +178,7 @@
 ; for an invoke which we can assume codegen will put immediately prior.
 
 ; CHECK-LABEL: define void @test5(
-; CHECK: call i8* @objc_retainAutoreleasedReturnValue(i8* %z)
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %z)
 ; CHECK: }
 define void @test5() personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*) {
 entry:
@@ -191,14 +191,14 @@
   ret void
 
 if.end:
-  call i8* @objc_retainAutoreleasedReturnValue(i8* %z)
+  call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %z)
   ret void
 }
 
 ; Like test5, but there's intervening code.
 
 ; CHECK-LABEL: define void @test6(
-; CHECK: call i8* @objc_retain(i8* %z)
+; CHECK: call i8* @llvm.objc.retain(i8* %z)
 ; CHECK: }
 define void @test6() personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*) {
 entry:
@@ -212,7 +212,7 @@
 
 if.end:
   call void @callee()
-  call i8* @objc_retainAutoreleasedReturnValue(i8* %z)
+  call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %z)
   ret void
 }
 
diff --git a/test/Transforms/ObjCARC/move-and-form-retain-autorelease.ll b/test/Transforms/ObjCARC/move-and-form-retain-autorelease.ll
index 9894eb4..91b865e 100644
--- a/test/Transforms/ObjCARC/move-and-form-retain-autorelease.ll
+++ b/test/Transforms/ObjCARC/move-and-form-retain-autorelease.ll
@@ -4,7 +4,7 @@
 ; and various scary looking things and fold it into an objc_retainAutorelease.
 
 ; CHECK: bb57:
-; CHECK: tail call i8* @objc_retainAutorelease(i8* %tmp71x) [[NUW:#[0-9]+]]
+; CHECK: tail call i8* @llvm.objc.retainAutorelease(i8* %tmp71x) [[NUW:#[0-9]+]]
 ; CHECK: bb99:
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@@ -68,30 +68,30 @@
 @"\01L_OBJC_SELECTOR_REFERENCES_413" = external hidden global i8*, section "__DATA, __objc_selrefs, literal_pointers, no_dead_strip"
 @"\01L_OBJC_SELECTOR_REFERENCES_415" = external hidden global i8*, section "__DATA, __objc_selrefs, literal_pointers, no_dead_strip"
 
-declare i8* @objc_msgSend(i8*, i8*, ...)
+declare i8* @llvm.objc.msgSend(i8*, i8*, ...)
 
-declare i8* @objc_retain(i8*)
+declare i8* @llvm.objc.retain(i8*)
 
-declare void @objc_release(i8*)
+declare void @llvm.objc.release(i8*)
 
-declare i8* @objc_autorelease(i8*)
+declare i8* @llvm.objc.autorelease(i8*)
 
-declare i8* @objc_explicit_autorelease(i8*)
+declare i8* @llvm.objc.explicit_autorelease(i8*)
 
 define hidden %14* @foo(%15* %arg, %16* %arg2) {
 bb:
   %tmp = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_3725", align 8
   %tmp4 = bitcast %15* %arg to i8*
-  %tmp5 = tail call %18* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %18* (i8*, i8*)*)(i8* %tmp4, i8* %tmp)
+  %tmp5 = tail call %18* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %18* (i8*, i8*)*)(i8* %tmp4, i8* %tmp)
   %tmp6 = bitcast %18* %tmp5 to i8*
-  %tmp7 = tail call i8* @objc_retain(i8* %tmp6) nounwind
+  %tmp7 = tail call i8* @llvm.objc.retain(i8* %tmp6) nounwind
   %tmp8 = load %2*, %2** @"\01L_OBJC_CLASSLIST_REFERENCES_$_40", align 8
   %tmp9 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_4227", align 8
   %tmp10 = bitcast %2* %tmp8 to i8*
-  %tmp11 = tail call %19* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %19* (i8*, i8*)*)(i8* %tmp10, i8* %tmp9)
+  %tmp11 = tail call %19* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %19* (i8*, i8*)*)(i8* %tmp10, i8* %tmp9)
   %tmp12 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_4631", align 8
   %tmp13 = bitcast %19* %tmp11 to i8*
-  %tmp14 = tail call signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*, %13*)*)(i8* %tmp13, i8* %tmp12, %13* bitcast (%12* @_unnamed_cfstring_386 to %13*))
+  %tmp14 = tail call signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*, %13*)*)(i8* %tmp13, i8* %tmp12, %13* bitcast (%12* @_unnamed_cfstring_386 to %13*))
   %tmp15 = bitcast %16* %arg2 to i8*
   %tmp16 = load i8*, i8** bitcast (%0* @"\01l_objc_msgSend_fixup_count" to i8**), align 16
   %tmp17 = bitcast i8* %tmp16 to i64 (i8*, %1*)*
@@ -111,35 +111,35 @@
 bb25:                                             ; preds = %bb22, %bb20
   %tmp26 = phi i1 [ %tmp21, %bb20 ], [ false, %bb22 ]
   %tmp27 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_188", align 8
-  %tmp28 = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp7, i8* %tmp27)
-  %tmp29 = tail call i8* @objc_explicit_autorelease(i8* %tmp28) nounwind
+  %tmp28 = tail call i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* %tmp7, i8* %tmp27)
+  %tmp29 = tail call i8* @llvm.objc.explicit_autorelease(i8* %tmp28) nounwind
   %tmp30 = bitcast i8* %tmp29 to %18*
-  tail call void @objc_release(i8* %tmp7) nounwind
+  tail call void @llvm.objc.release(i8* %tmp7) nounwind
   %tmp31 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_389", align 8
-  %tmp32 = tail call %20* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %20* (i8*, i8*)*)(i8* %tmp29, i8* %tmp31)
+  %tmp32 = tail call %20* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %20* (i8*, i8*)*)(i8* %tmp29, i8* %tmp31)
   %tmp33 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_391", align 8
   %tmp34 = bitcast %20* %tmp32 to i8*
-  tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, %16*)*)(i8* %tmp34, i8* %tmp33, %16* %arg2)
+  tail call void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, %16*)*)(i8* %tmp34, i8* %tmp33, %16* %arg2)
   br i1 %tmp26, label %bb46, label %bb35
 
 bb35:                                             ; preds = %bb25
   %tmp36 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_389", align 8
-  %tmp37 = tail call %20* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %20* (i8*, i8*)*)(i8* %tmp29, i8* %tmp36)
+  %tmp37 = tail call %20* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %20* (i8*, i8*)*)(i8* %tmp29, i8* %tmp36)
   %tmp38 = load %2*, %2** @"\01L_OBJC_CLASSLIST_REFERENCES_$_70", align 8
   %tmp39 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_393", align 8
   %tmp40 = bitcast %2* %tmp38 to i8*
-  %tmp41 = tail call %21* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %21* (i8*, i8*, i8)*)(i8* %tmp40, i8* %tmp39, i8 signext 1)
+  %tmp41 = tail call %21* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %21* (i8*, i8*, i8)*)(i8* %tmp40, i8* %tmp39, i8 signext 1)
   %tmp42 = bitcast %21* %tmp41 to i8*
   %tmp43 = load %13*, %13** @NSPrintHeaderAndFooter, align 8
   %tmp44 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_159", align 8
   %tmp45 = bitcast %20* %tmp37 to i8*
-  tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, %13*)*)(i8* %tmp45, i8* %tmp44, i8* %tmp42, %13* %tmp43)
+  tail call void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8*, %13*)*)(i8* %tmp45, i8* %tmp44, i8* %tmp42, %13* %tmp43)
   br label %bb46
 
 bb46:                                             ; preds = %bb35, %bb25, %bb22
   %tmp47 = phi %18* [ %tmp30, %bb35 ], [ %tmp30, %bb25 ], [ %tmp23, %bb22 ]
   %tmp48 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_328", align 8
-  %tmp49 = tail call %22* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %22* (i8*, i8*)*)(i8* %tmp4, i8* %tmp48)
+  %tmp49 = tail call %22* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %22* (i8*, i8*)*)(i8* %tmp4, i8* %tmp48)
   %tmp50 = bitcast %22* %tmp49 to i8*
   %tmp51 = load i8*, i8** bitcast (%0* @"\01l_objc_msgSend_fixup_count" to i8**), align 16
   %tmp52 = bitcast i8* %tmp51 to i64 (i8*, %1*)*
@@ -149,74 +149,74 @@
 
 bb55:                                             ; preds = %bb46
   %tmp56 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_395", align 8
-  tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*)*)(i8* %tmp4, i8* %tmp56)
+  tail call void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*)*)(i8* %tmp4, i8* %tmp56)
   br label %bb57
 
 bb57:                                             ; preds = %bb55, %bb46
   %tmp58 = load %2*, %2** @"\01L_OBJC_CLASSLIST_REFERENCES_$_396", align 8
   %tmp59 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_328", align 8
-  %tmp60 = tail call %22* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %22* (i8*, i8*)*)(i8* %tmp4, i8* %tmp59)
+  %tmp60 = tail call %22* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %22* (i8*, i8*)*)(i8* %tmp4, i8* %tmp59)
   %tmp61 = bitcast %22* %tmp60 to i8*
   %tmp62 = load i8*, i8** bitcast (%0* @"\01l_objc_msgSend_fixup_objectAtIndex_" to i8**), align 16
   %tmp63 = bitcast i8* %tmp62 to i8* (i8*, %1*, i64)*
   %tmp64 = tail call i8* %tmp63(i8* %tmp61, %1* bitcast (%0* @"\01l_objc_msgSend_fixup_objectAtIndex_" to %1*), i64 0)
   %tmp65 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_398", align 8
-  %tmp66 = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp64, i8* %tmp65)
+  %tmp66 = tail call i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* %tmp64, i8* %tmp65)
   %tmp67 = bitcast i8* %tmp66 to %23*
   %tmp68 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_400", align 8
   %tmp69 = bitcast %2* %tmp58 to i8*
-  %tmp70 = tail call %14* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %14* (i8*, i8*, %23*, %18*)*)(i8* %tmp69, i8* %tmp68, %23* %tmp67, %18* %tmp47)
+  %tmp70 = tail call %14* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %14* (i8*, i8*, %23*, %18*)*)(i8* %tmp69, i8* %tmp68, %23* %tmp67, %18* %tmp47)
   %tmp71 = bitcast %14* %tmp70 to i8*
   ; hack to prevent the optimize from using objc_retainAutoreleasedReturnValue.
   %tmp71x = getelementptr i8, i8* %tmp71, i64 1
-  %tmp72 = tail call i8* @objc_retain(i8* %tmp71x) nounwind
+  %tmp72 = tail call i8* @llvm.objc.retain(i8* %tmp71x) nounwind
   %tmp73 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_402", align 8
-  tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8)*)(i8* %tmp72, i8* %tmp73, i8 signext 1)
+  tail call void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8)*)(i8* %tmp72, i8* %tmp73, i8 signext 1)
   %tmp74 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_404", align 8
-  tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8)*)(i8* %tmp72, i8* %tmp74, i8 signext 1)
+  tail call void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8)*)(i8* %tmp72, i8* %tmp74, i8 signext 1)
   %tmp75 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_328", align 8
-  %tmp76 = tail call %22* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %22* (i8*, i8*)*)(i8* %tmp4, i8* %tmp75)
+  %tmp76 = tail call %22* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %22* (i8*, i8*)*)(i8* %tmp4, i8* %tmp75)
   %tmp77 = bitcast %22* %tmp76 to i8*
   %tmp78 = load i8*, i8** bitcast (%0* @"\01l_objc_msgSend_fixup_objectAtIndex_" to i8**), align 16
   %tmp79 = bitcast i8* %tmp78 to i8* (i8*, %1*, i64)*
   %tmp80 = tail call i8* %tmp79(i8* %tmp77, %1* bitcast (%0* @"\01l_objc_msgSend_fixup_objectAtIndex_" to %1*), i64 0)
   %tmp81 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_406", align 8
-  tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i64)*)(i8* %tmp80, i8* %tmp81, i64 9223372036854775807)
+  tail call void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i64)*)(i8* %tmp80, i8* %tmp81, i64 9223372036854775807)
   %tmp82 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_408", align 8
-  %tmp83 = tail call %24* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %24* (i8*, i8*)*)(i8* %tmp72, i8* %tmp82)
+  %tmp83 = tail call %24* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %24* (i8*, i8*)*)(i8* %tmp72, i8* %tmp82)
   %tmp84 = bitcast %24* %tmp83 to i8*
-  %tmp85 = tail call i8* @objc_retain(i8* %tmp84) nounwind
+  %tmp85 = tail call i8* @llvm.objc.retain(i8* %tmp84) nounwind
   %tmp86 = load %2*, %2** @"\01L_OBJC_CLASSLIST_REFERENCES_$_409", align 8
   %tmp87 = bitcast %2* %tmp86 to i8*
   %tmp88 = load i8*, i8** bitcast (%0* @"\01l_objc_msgSend_fixup_alloc" to i8**), align 16
   %tmp89 = bitcast i8* %tmp88 to i8* (i8*, %1*)*
   %tmp90 = tail call i8* %tmp89(i8* %tmp87, %1* bitcast (%0* @"\01l_objc_msgSend_fixup_alloc" to %1*))
   %tmp91 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_8", align 8
-  %tmp92 = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp90, i8* %tmp91)
-  %tmp93 = tail call i8* @objc_explicit_autorelease(i8* %tmp92) nounwind
+  %tmp92 = tail call i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* %tmp90, i8* %tmp91)
+  %tmp93 = tail call i8* @llvm.objc.explicit_autorelease(i8* %tmp92) nounwind
   %tmp94 = bitcast i8* %tmp93 to %25*
   %tmp95 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_411", align 8
-  tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, %25*)*)(i8* %tmp85, i8* %tmp95, %25* %tmp94)
-  tail call void @objc_release(i8* %tmp93) nounwind
+  tail call void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, %25*)*)(i8* %tmp85, i8* %tmp95, %25* %tmp94)
+  tail call void @llvm.objc.release(i8* %tmp93) nounwind
   %tmp96 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_148", align 8
-  %tmp97 = tail call signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*)*)(i8* %tmp4, i8* %tmp96)
+  %tmp97 = tail call signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*)*)(i8* %tmp4, i8* %tmp96)
   %tmp98 = icmp eq i8 %tmp97, 0
   br i1 %tmp98, label %bb99, label %bb104
 
 bb99:                                             ; preds = %bb57
   %tmp100 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_413", align 8
-  %tmp101 = tail call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*)*)(i8* %tmp85, i8* %tmp100)
+  %tmp101 = tail call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*)*)(i8* %tmp85, i8* %tmp100)
   %tmp102 = or i64 %tmp101, 12
   %tmp103 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_415", align 8
-  tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i64)*)(i8* %tmp85, i8* %tmp103, i64 %tmp102)
+  tail call void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i64)*)(i8* %tmp85, i8* %tmp103, i64 %tmp102)
   br label %bb104
 
 bb104:                                            ; preds = %bb99, %bb57
-  %tmp105 = call i8* @objc_autorelease(i8* %tmp72) nounwind
+  %tmp105 = call i8* @llvm.objc.autorelease(i8* %tmp72) nounwind
   %tmp106 = bitcast i8* %tmp105 to %14*
-  tail call void @objc_release(i8* %tmp85) nounwind
+  tail call void @llvm.objc.release(i8* %tmp85) nounwind
   %tmp107 = bitcast %18* %tmp47 to i8*
-  tail call void @objc_release(i8* %tmp107) nounwind
+  tail call void @llvm.objc.release(i8* %tmp107) nounwind
   ret %14* %tmp106
 }
 
diff --git a/test/Transforms/ObjCARC/move-and-merge-autorelease.ll b/test/Transforms/ObjCARC/move-and-merge-autorelease.ll
index 0a68541..eaf1fc1 100644
--- a/test/Transforms/ObjCARC/move-and-merge-autorelease.ll
+++ b/test/Transforms/ObjCARC/move-and-merge-autorelease.ll
@@ -4,7 +4,7 @@
 ; and fold it with the release in bb65.
 
 ; CHECK: bb65:
-; CHECK: call i8* @objc_retainAutorelease
+; CHECK: call i8* @llvm.objc.retainAutorelease
 ; CHECK: br label %bb76
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@@ -25,24 +25,24 @@
 @"\01L_OBJC_SELECTOR_REFERENCES_624" = external hidden global i8*, section "__DATA, __objc_selrefs, literal_pointers, no_dead_strip"
 @"\01L_OBJC_SELECTOR_REFERENCES_626" = external hidden global i8*, section "__DATA, __objc_selrefs, literal_pointers, no_dead_strip"
 
-declare i8* @objc_msgSend(i8*, i8*, ...)
+declare i8* @llvm.objc.msgSend(i8*, i8*, ...)
 
-declare i8* @objc_retain(i8*)
+declare i8* @llvm.objc.retain(i8*)
 
-declare void @objc_release(i8*)
+declare void @llvm.objc.release(i8*)
 
-declare i8* @objc_autorelease(i8*)
+declare i8* @llvm.objc.autorelease(i8*)
 
 define hidden %0* @foo(%1* %arg, %3* %arg3) {
 bb:
   %tmp16 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_620", align 8
   %tmp17 = bitcast %3* %arg3 to i8*
-  %tmp18 = call %4* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %4* (i8*, i8*)*)(i8* %tmp17, i8* %tmp16)
+  %tmp18 = call %4* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %4* (i8*, i8*)*)(i8* %tmp17, i8* %tmp16)
   %tmp19 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_622", align 8
   %tmp20 = bitcast %4* %tmp18 to i8*
-  %tmp21 = call %5* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %5* (i8*, i8*)*)(i8* %tmp20, i8* %tmp19)
+  %tmp21 = call %5* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %5* (i8*, i8*)*)(i8* %tmp20, i8* %tmp19)
   %tmp22 = bitcast %5* %tmp21 to i8*
-  %tmp23 = call i8* @objc_retain(i8* %tmp22) nounwind
+  %tmp23 = call i8* @llvm.objc.retain(i8* %tmp22) nounwind
   %tmp24 = bitcast i8* %tmp23 to %5*
   %tmp26 = icmp eq i8* %tmp23, null
   br i1 %tmp26, label %bb81, label %bb27
@@ -50,22 +50,22 @@
 bb27:                                             ; preds = %bb
   %tmp29 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_11", align 8
   %tmp30 = bitcast %1* %arg to i8*
-  %tmp31 = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp30, i8* %tmp29)
-  %tmp34 = call i8* @objc_retain(i8* %tmp31) nounwind
+  %tmp31 = call i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* %tmp30, i8* %tmp29)
+  %tmp34 = call i8* @llvm.objc.retain(i8* %tmp31) nounwind
   %tmp37 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_421455", align 8
-  %tmp39 = call %0* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %0* (i8*, i8*)*)(i8* %tmp34, i8* %tmp37)
+  %tmp39 = call %0* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %0* (i8*, i8*)*)(i8* %tmp34, i8* %tmp37)
   %tmp40 = bitcast %0* %tmp39 to i8*
-  %tmp41 = call i8* @objc_retain(i8* %tmp40) nounwind
+  %tmp41 = call i8* @llvm.objc.retain(i8* %tmp40) nounwind
   %tmp42 = bitcast i8* %tmp41 to %0*
   %tmp44 = icmp eq i8* %tmp41, null
   br i1 %tmp44, label %bb45, label %bb55
 
 bb45:                                             ; preds = %bb27
   %tmp47 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_624", align 8
-  %tmp49 = call %0* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %0* (i8*, i8*)*)(i8* %tmp34, i8* %tmp47)
+  %tmp49 = call %0* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %0* (i8*, i8*)*)(i8* %tmp34, i8* %tmp47)
   %tmp51 = bitcast %0* %tmp49 to i8*
-  %tmp52 = call i8* @objc_retain(i8* %tmp51) nounwind
-  call void @objc_release(i8* %tmp41) nounwind
+  %tmp52 = call i8* @llvm.objc.retain(i8* %tmp51) nounwind
+  call void @llvm.objc.release(i8* %tmp41) nounwind
   br label %bb55
 
 bb55:                                             ; preds = %bb27, %bb45
@@ -76,33 +76,33 @@
 bb58:                                             ; preds = %bb55
   %tmp60 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_598", align 8
   %tmp61 = bitcast %0* %tmp13.0 to i8*
-  %tmp62 = call signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*)*)(i8* %tmp61, i8* %tmp60)
+  %tmp62 = call signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*)*)(i8* %tmp61, i8* %tmp60)
   %tmp64 = icmp eq i8 %tmp62, 0
   br i1 %tmp64, label %bb76, label %bb65
 
 bb65:                                             ; preds = %bb58
   %tmp68 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_626", align 8
   %tmp69 = bitcast %0* %tmp13.0 to i8*
-  %tmp70 = call %0* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %0* (i8*, i8*, %5*)*)(i8* %tmp69, i8* %tmp68, %5* %tmp24)
+  %tmp70 = call %0* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to %0* (i8*, i8*, %5*)*)(i8* %tmp69, i8* %tmp68, %5* %tmp24)
   %tmp72 = bitcast %0* %tmp70 to i8*
-  %tmp73 = call i8* @objc_retain(i8* %tmp72) nounwind
+  %tmp73 = call i8* @llvm.objc.retain(i8* %tmp72) nounwind
   br label %bb76
 
 bb76:                                             ; preds = %bb58, %bb55, %bb65
   %tmp10.0 = phi %0* [ %tmp70, %bb65 ], [ null, %bb58 ], [ null, %bb55 ]
   %tmp78 = bitcast %0* %tmp13.0 to i8*
-  call void @objc_release(i8* %tmp78) nounwind
-  call void @objc_release(i8* %tmp34) nounwind
+  call void @llvm.objc.release(i8* %tmp78) nounwind
+  call void @llvm.objc.release(i8* %tmp34) nounwind
   br label %bb81
 
 bb81:                                             ; preds = %bb, %bb76
   %tmp10.1 = phi %0* [ %tmp10.0, %bb76 ], [ null, %bb ]
   %tmp83 = bitcast %0* %tmp10.1 to i8*
-  %tmp84 = call i8* @objc_retain(i8* %tmp83) nounwind
-  call void @objc_release(i8* %tmp23) nounwind
-  %tmp87 = call i8* @objc_autorelease(i8* %tmp84) nounwind
+  %tmp84 = call i8* @llvm.objc.retain(i8* %tmp83) nounwind
+  call void @llvm.objc.release(i8* %tmp23) nounwind
+  %tmp87 = call i8* @llvm.objc.autorelease(i8* %tmp84) nounwind
   %tmp88 = bitcast i8* %tmp87 to %0*
   %tmp92 = bitcast %0* %tmp10.1 to i8*
-  call void @objc_release(i8* %tmp92) nounwind
+  call void @llvm.objc.release(i8* %tmp92) nounwind
   ret %0* %tmp88
 }
diff --git a/test/Transforms/ObjCARC/nested.ll b/test/Transforms/ObjCARC/nested.ll
index b317cd8..8b7e673 100644
--- a/test/Transforms/ObjCARC/nested.ll
+++ b/test/Transforms/ObjCARC/nested.ll
@@ -9,16 +9,16 @@
 
 declare void @callee()
 declare i8* @returner()
-declare i8* @objc_retainAutoreleasedReturnValue(i8*)
-declare i8* @objc_retain(i8*)
-declare void @objc_enumerationMutation(i8*)
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
+declare i8* @llvm.objc.retain(i8*)
+declare void @llvm.objc.enumerationMutation(i8*)
 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
-declare i8* @objc_msgSend(i8*, i8*, ...) nonlazybind
+declare i8* @llvm.objc.msgSend(i8*, i8*, ...) nonlazybind
 declare void @use(i8*)
-declare void @objc_release(i8*)
+declare void @llvm.objc.release(i8*)
 declare i8* @def()
 declare void @__crasher_block_invoke(i8* nocapture)
-declare i8* @objc_retainBlock(i8*)
+declare i8* @llvm.objc.retainBlock(i8*)
 declare void @__crasher_block_invoke1(i8* nocapture)
 
 !0 = !{}
@@ -26,19 +26,19 @@
 ; Delete a nested retain+release pair.
 
 ; CHECK-LABEL: define void @test0(
-; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: call i8* @llvm.objc.retain
+; CHECK-NOT: @llvm.objc.retain
 ; CHECK: }
 define void @test0(i8* %a) nounwind {
 entry:
   %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
   %items.ptr = alloca [16 x i8*], align 8
-  %0 = call i8* @objc_retain(i8* %a) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
   %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
   call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
-  %1 = call i8* @objc_retain(i8* %0) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
   %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %iszero = icmp eq i64 %call, 0
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
@@ -63,7 +63,7 @@
   br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
 
 forcoll.mutated:
-  call void @objc_enumerationMutation(i8* %1)
+  call void @llvm.objc.enumerationMutation(i8* %1)
   br label %forcoll.notmutated
 
 forcoll.notmutated:
@@ -77,33 +77,33 @@
 
 forcoll.refetch:
   %tmp5 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call6 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp5, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call6 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp5, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %5 = icmp eq i64 %call6, 0
   br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
 
 forcoll.empty:
-  call void @objc_release(i8* %1) nounwind
-  call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %1) nounwind
+  call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ; Delete a nested retain+release pair.
 
 ; CHECK-LABEL: define void @test2(
-; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
+; CHECK-NOT: @llvm.objc.retain
 ; CHECK: }
 define void @test2() nounwind {
 entry:
   %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
   %items.ptr = alloca [16 x i8*], align 8
   %call = call i8* @returner()
-  %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
+  %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
   %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
   call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
-  %1 = call i8* @objc_retain(i8* %0) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
   %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %iszero = icmp eq i64 %call3, 0
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
@@ -128,7 +128,7 @@
   br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
 
 forcoll.mutated:
-  call void @objc_enumerationMutation(i8* %1)
+  call void @llvm.objc.enumerationMutation(i8* %1)
   br label %forcoll.notmutated
 
 forcoll.notmutated:
@@ -142,33 +142,33 @@
 
 forcoll.refetch:
   %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %5 = icmp eq i64 %call7, 0
   br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
 
 forcoll.empty:
-  call void @objc_release(i8* %1) nounwind
-  call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %1) nounwind
+  call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ; Delete a nested retain+release pair.
 
 ; CHECK-LABEL: define void @test4(
-; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: call i8* @llvm.objc.retain
+; CHECK-NOT: @llvm.objc.retain
 ; CHECK: }
 define void @test4() nounwind {
 entry:
   %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
   %items.ptr = alloca [16 x i8*], align 8
   %tmp = load i8*, i8** @g, align 8
-  %0 = call i8* @objc_retain(i8* %tmp) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %tmp) nounwind
   %tmp2 = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
   call void @llvm.memset.p0i8.i64(i8* align 8 %tmp2, i8 0, i64 64, i1 false)
-  %1 = call i8* @objc_retain(i8* %0) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
   %tmp4 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp4, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp4, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %iszero = icmp eq i64 %call, 0
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
@@ -193,7 +193,7 @@
   br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
 
 forcoll.mutated:
-  call void @objc_enumerationMutation(i8* %1)
+  call void @llvm.objc.enumerationMutation(i8* %1)
   br label %forcoll.notmutated
 
 forcoll.notmutated:
@@ -207,33 +207,33 @@
 
 forcoll.refetch:
   %tmp7 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call8 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp7, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call8 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp7, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %5 = icmp eq i64 %call8, 0
   br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
 
 forcoll.empty:
-  call void @objc_release(i8* %1) nounwind
-  call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %1) nounwind
+  call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ; Delete a nested retain+release pair.
 
 ; CHECK-LABEL: define void @test5(
-; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
+; CHECK-NOT: @llvm.objc.retain
 ; CHECK: }
 define void @test5() nounwind {
 entry:
   %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
   %items.ptr = alloca [16 x i8*], align 8
   %call = call i8* @returner()
-  %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
+  %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
   %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
   call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
-  %1 = call i8* @objc_retain(i8* %0) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
   %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %iszero = icmp eq i64 %call3, 0
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
@@ -258,7 +258,7 @@
   br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
 
 forcoll.mutated:
-  call void @objc_enumerationMutation(i8* %1)
+  call void @llvm.objc.enumerationMutation(i8* %1)
   br label %forcoll.notmutated
 
 forcoll.notmutated:
@@ -272,13 +272,13 @@
 
 forcoll.refetch:
   %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %5 = icmp eq i64 %call7, 0
   br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
 
 forcoll.empty:
-  call void @objc_release(i8* %1) nounwind
-  call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %1) nounwind
+  call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -286,20 +286,20 @@
 ; use.
 ;
 ; CHECK-LABEL: define void @test6(
-; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
+; CHECK-NOT: @llvm.objc.retain
 ; CHECK: }
 define void @test6() nounwind {
 entry:
   %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
   %items.ptr = alloca [16 x i8*], align 8
   %call = call i8* @returner()
-  %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
+  %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
   %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
   call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
-  %1 = call i8* @objc_retain(i8* %0) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
   %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %iszero = icmp eq i64 %call3, 0
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
@@ -324,7 +324,7 @@
   br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
 
 forcoll.mutated:
-  call void @objc_enumerationMutation(i8* %1)
+  call void @llvm.objc.enumerationMutation(i8* %1)
   br label %forcoll.notmutated
 
 forcoll.notmutated:
@@ -338,14 +338,14 @@
 
 forcoll.refetch:
   %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %5 = icmp eq i64 %call7, 0
   br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
 
 forcoll.empty:
-  call void @objc_release(i8* %1) nounwind
+  call void @llvm.objc.release(i8* %1) nounwind
   call void @callee()
-  call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -354,21 +354,21 @@
 ; reasnoning about nesting.
 
 ; CHECK-LABEL: define void @test7(
-; CHECK: call i8* @objc_retain
-; CHECK: @objc_retain
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
+; CHECK: @llvm.objc.retain
 ; CHECK: }
 define void @test7() nounwind {
 entry:
   %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
   %items.ptr = alloca [16 x i8*], align 8
   %call = call i8* @returner()
-  %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
+  %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
   call void @callee()
   %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
   call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
-  %1 = call i8* @objc_retain(i8* %0) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
   %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %iszero = icmp eq i64 %call3, 0
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
@@ -393,7 +393,7 @@
   br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
 
 forcoll.mutated:
-  call void @objc_enumerationMutation(i8* %1)
+  call void @llvm.objc.enumerationMutation(i8* %1)
   br label %forcoll.notmutated
 
 forcoll.notmutated:
@@ -407,34 +407,34 @@
 
 forcoll.refetch:
   %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %5 = icmp eq i64 %call7, 0
   br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
 
 forcoll.empty:
-  call void @objc_release(i8* %1) nounwind
+  call void @llvm.objc.release(i8* %1) nounwind
   call void @callee()
-  call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ; Delete a nested retain+release pair.
 
 ; CHECK-LABEL: define void @test8(
-; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
+; CHECK-NOT: @llvm.objc.retain
 ; CHECK: }
 define void @test8() nounwind {
 entry:
   %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
   %items.ptr = alloca [16 x i8*], align 8
   %call = call i8* @returner()
-  %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
+  %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
   %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
   call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
-  %1 = call i8* @objc_retain(i8* %0) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
   %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %iszero = icmp eq i64 %call3, 0
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
@@ -459,7 +459,7 @@
   br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
 
 forcoll.mutated:
-  call void @objc_enumerationMutation(i8* %1)
+  call void @llvm.objc.enumerationMutation(i8* %1)
   br label %forcoll.notmutated
 
 forcoll.notmutated:
@@ -480,13 +480,13 @@
 
 forcoll.refetch:
   %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %5 = icmp eq i64 %call7, 0
   br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
 
 forcoll.empty:
-  call void @objc_release(i8* %1) nounwind
-  call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %1) nounwind
+  call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -495,23 +495,23 @@
 ; See test9b for the same testcase without a split backedge.
 
 ; CHECK-LABEL: define void @test9(
-; CHECK: call i8* @objc_retain
-; CHECK: call i8* @objc_retain
-; CHECK: call i8* @objc_retain
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
+; CHECK: call i8* @llvm.objc.retain
 ; CHECK: }
 define void @test9() nounwind {
 entry:
   %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
   %items.ptr = alloca [16 x i8*], align 8
   %call = call i8* @returner()
-  %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
+  %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
   %call1 = call i8* @returner()
-  %1 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call1) nounwind
+  %1 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call1) nounwind
   %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
   call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
-  %2 = call i8* @objc_retain(i8* %0) nounwind
+  %2 = call i8* @llvm.objc.retain(i8* %0) nounwind
   %tmp3 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %iszero = icmp eq i64 %call4, 0
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
@@ -535,7 +535,7 @@
   br i1 %3, label %forcoll.notmutated, label %forcoll.mutated
 
 forcoll.mutated:
-  call void @objc_enumerationMutation(i8* %2)
+  call void @llvm.objc.enumerationMutation(i8* %2)
   br label %forcoll.notmutated
 
 forcoll.notmutated:
@@ -548,37 +548,37 @@
 
 forcoll.refetch:
   %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %4 = icmp eq i64 %call7, 0
   br i1 %4, label %forcoll.empty, label %forcoll.loopbody.outer
 
 forcoll.empty:
-  call void @objc_release(i8* %2) nounwind
-  call void @objc_release(i8* %1) nounwind, !clang.imprecise_release !0
-  call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %2) nounwind
+  call void @llvm.objc.release(i8* %1) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ; Like test9, but without a split backedge. TODO: optimize this.
 
 ; CHECK-LABEL: define void @test9b(
-; CHECK: call i8* @objc_retain
-; CHECK: call i8* @objc_retain
-; CHECK: @objc_retain
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
+; CHECK: @llvm.objc.retain
 ; CHECK: }
 define void @test9b() nounwind {
 entry:
   %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
   %items.ptr = alloca [16 x i8*], align 8
   %call = call i8* @returner()
-  %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
+  %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
   %call1 = call i8* @returner()
-  %1 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call1) nounwind
+  %1 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call1) nounwind
   %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
   call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
-  %2 = call i8* @objc_retain(i8* %0) nounwind
+  %2 = call i8* @llvm.objc.retain(i8* %0) nounwind
   %tmp3 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %iszero = icmp eq i64 %call4, 0
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
@@ -602,7 +602,7 @@
   br i1 %3, label %forcoll.notmutated, label %forcoll.mutated
 
 forcoll.mutated:
-  call void @objc_enumerationMutation(i8* %2)
+  call void @llvm.objc.enumerationMutation(i8* %2)
   br label %forcoll.notmutated
 
 forcoll.notmutated:
@@ -612,14 +612,14 @@
 
 forcoll.refetch:
   %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %4 = icmp eq i64 %call7, 0
   br i1 %4, label %forcoll.empty, label %forcoll.loopbody.outer
 
 forcoll.empty:
-  call void @objc_release(i8* %2) nounwind
-  call void @objc_release(i8* %1) nounwind, !clang.imprecise_release !0
-  call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %2) nounwind
+  call void @llvm.objc.release(i8* %1) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -628,24 +628,24 @@
 ; See test10b for the same testcase without a split backedge.
 
 ; CHECK-LABEL: define void @test10(
-; CHECK: call i8* @objc_retain
-; CHECK: call i8* @objc_retain
-; CHECK: call i8* @objc_retain
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
+; CHECK: call i8* @llvm.objc.retain
 ; CHECK: }
 define void @test10() nounwind {
 entry:
   %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
   %items.ptr = alloca [16 x i8*], align 8
   %call = call i8* @returner()
-  %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
+  %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
   %call1 = call i8* @returner()
-  %1 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call1) nounwind
+  %1 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call1) nounwind
   call void @callee()
   %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
   call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
-  %2 = call i8* @objc_retain(i8* %0) nounwind
+  %2 = call i8* @llvm.objc.retain(i8* %0) nounwind
   %tmp3 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %iszero = icmp eq i64 %call4, 0
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
@@ -669,7 +669,7 @@
   br i1 %3, label %forcoll.notmutated, label %forcoll.mutated
 
 forcoll.mutated:
-  call void @objc_enumerationMutation(i8* %2)
+  call void @llvm.objc.enumerationMutation(i8* %2)
   br label %forcoll.notmutated
 
 forcoll.notmutated:
@@ -682,38 +682,38 @@
 
 forcoll.refetch:
   %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %4 = icmp eq i64 %call7, 0
   br i1 %4, label %forcoll.empty, label %forcoll.loopbody.outer
 
 forcoll.empty:
-  call void @objc_release(i8* %2) nounwind
-  call void @objc_release(i8* %1) nounwind, !clang.imprecise_release !0
-  call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %2) nounwind
+  call void @llvm.objc.release(i8* %1) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 ; Like test10, but without a split backedge. TODO: optimize this.
 
 ; CHECK-LABEL: define void @test10b(
-; CHECK: call i8* @objc_retain
-; CHECK: call i8* @objc_retain
-; CHECK: @objc_retain
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
+; CHECK: @llvm.objc.retain
 ; CHECK: }
 define void @test10b() nounwind {
 entry:
   %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
   %items.ptr = alloca [16 x i8*], align 8
   %call = call i8* @returner()
-  %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
+  %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
   %call1 = call i8* @returner()
-  %1 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call1) nounwind
+  %1 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call1) nounwind
   call void @callee()
   %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
   call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
-  %2 = call i8* @objc_retain(i8* %0) nounwind
+  %2 = call i8* @llvm.objc.retain(i8* %0) nounwind
   %tmp3 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %iszero = icmp eq i64 %call4, 0
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
@@ -737,7 +737,7 @@
   br i1 %3, label %forcoll.notmutated, label %forcoll.mutated
 
 forcoll.mutated:
-  call void @objc_enumerationMutation(i8* %2)
+  call void @llvm.objc.enumerationMutation(i8* %2)
   br label %forcoll.notmutated
 
 forcoll.notmutated:
@@ -747,14 +747,14 @@
 
 forcoll.refetch:
   %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
-  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
+  %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
   %4 = icmp eq i64 %call7, 0
   br i1 %4, label %forcoll.empty, label %forcoll.loopbody.outer
 
 forcoll.empty:
-  call void @objc_release(i8* %2) nounwind
-  call void @objc_release(i8* %1) nounwind, !clang.imprecise_release !0
-  call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %2) nounwind
+  call void @llvm.objc.release(i8* %1) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -768,9 +768,9 @@
 @__block_d_tmp5 = external hidden constant { i64, i64, i8*, i8*, i8*, i8* }
 
 ; CHECK-LABEL: define void @test11(
-; CHECK: tail call i8* @objc_retain(i8* %call) [[NUW:#[0-9]+]]
-; CHECK: tail call i8* @objc_retain(i8* %call) [[NUW]]
-; CHECK: call void @objc_release(i8* %call) [[NUW]], !clang.imprecise_release !0
+; CHECK: tail call i8* @llvm.objc.retain(i8* %call) [[NUW:#[0-9]+]]
+; CHECK: tail call i8* @llvm.objc.retain(i8* %call) [[NUW]]
+; CHECK: call void @llvm.objc.release(i8* %call) [[NUW]], !clang.imprecise_release !0
 ; CHECK: }
 define void @test11() {
 entry:
@@ -788,14 +788,14 @@
   store i8* bitcast (void (i8*)* @__crasher_block_invoke to i8*), i8** %block.invoke, align 8
   %block.d = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 4
   store %struct.__block_d* bitcast ({ i64, i64, i8*, i8*, i8*, i8* }* @__block_d_tmp to %struct.__block_d*), %struct.__block_d** %block.d, align 8
-  %foo2 = tail call i8* @objc_retain(i8* %call) nounwind
+  %foo2 = tail call i8* @llvm.objc.retain(i8* %call) nounwind
   store i8* %foo2, i8** %foo, align 8
   %foo4 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block to i8*
-  %foo5 = call i8* @objc_retainBlock(i8* %foo4) nounwind
+  %foo5 = call i8* @llvm.objc.retainBlock(i8* %foo4) nounwind
   call void @use(i8* %foo5), !clang.arc.no_objc_arc_exceptions !0
-  call void @objc_release(i8* %foo5) nounwind
+  call void @llvm.objc.release(i8* %foo5) nounwind
   %strongdestroy = load i8*, i8** %foo, align 8
-  call void @objc_release(i8* %strongdestroy) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %strongdestroy) nounwind, !clang.imprecise_release !0
   %foo10 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 5
   %block.isa11 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 0
   store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa11, align 8
@@ -807,19 +807,19 @@
   store i8* bitcast (void (i8*)* @__crasher_block_invoke1 to i8*), i8** %block.invoke14, align 8
   %block.d15 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 4
   store %struct.__block_d* bitcast ({ i64, i64, i8*, i8*, i8*, i8* }* @__block_d_tmp5 to %struct.__block_d*), %struct.__block_d** %block.d15, align 8
-  %foo18 = call i8* @objc_retain(i8* %call) nounwind
+  %foo18 = call i8* @llvm.objc.retain(i8* %call) nounwind
   store i8* %call, i8** %foo10, align 8
   %foo20 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9 to i8*
-  %foo21 = call i8* @objc_retainBlock(i8* %foo20) nounwind
+  %foo21 = call i8* @llvm.objc.retainBlock(i8* %foo20) nounwind
   call void @use(i8* %foo21), !clang.arc.no_objc_arc_exceptions !0
-  call void @objc_release(i8* %foo21) nounwind
+  call void @llvm.objc.release(i8* %foo21) nounwind
   %strongdestroy25 = load i8*, i8** %foo10, align 8
-  call void @objc_release(i8* %strongdestroy25) nounwind, !clang.imprecise_release !0
-  call void @objc_release(i8* %call) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %strongdestroy25) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %call) nounwind, !clang.imprecise_release !0
   ret void
 }
 
 
-; CHECK: attributes #0 = { argmemonly nounwind }
-; CHECK: attributes #1 = { nonlazybind }
 ; CHECK: attributes [[NUW]] = { nounwind }
+; CHECK: attributes #1 = { argmemonly nounwind }
+; CHECK: attributes #2 = { nonlazybind }
diff --git a/test/Transforms/ObjCARC/opt-catchswitch.ll b/test/Transforms/ObjCARC/opt-catchswitch.ll
index 5af62e0..b627c11 100644
--- a/test/Transforms/ObjCARC/opt-catchswitch.ll
+++ b/test/Transforms/ObjCARC/opt-catchswitch.ll
@@ -7,15 +7,15 @@
 
 declare i32 @__CxxFrameHandler3(...)
 
-declare dllimport i8* @objc_autoreleaseReturnValue(i8* returned)
-declare dllimport i8* @objc_retain(i8* returned)
-declare dllimport i8* @objc_retainAutoreleasedReturnValue(i8* returned)
-declare dllimport void @objc_release(i8*)
+declare dllimport i8* @llvm.objc.autoreleaseReturnValue(i8* returned)
+declare dllimport i8* @llvm.objc.retain(i8* returned)
+declare dllimport i8* @llvm.objc.retainAutoreleasedReturnValue(i8* returned)
+declare dllimport void @llvm.objc.release(i8*)
 
 define i8* @g(i8* %p, i8* %q) local_unnamed_addr personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
 entry:
-  %0 = tail call i8* @objc_retain(i8* %p) #0
-  %1 = tail call i8* @objc_retain(i8* %q) #0
+  %0 = tail call i8* @llvm.objc.retain(i8* %p) #0
+  %1 = tail call i8* @llvm.objc.retain(i8* %q) #0
   %call = invoke i8* @f(i8* %p, i8* %q)
           to label %invoke.cont unwind label %catch.dispatch, !clang.arc.no_objc_arc_exceptions !0
 
@@ -27,19 +27,19 @@
   catchret from %3 to label %cleanup
 
 invoke.cont:
-  %4 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call) #0
+  %4 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) #0
   br label %cleanup
 
 cleanup:
   %retval.0 = phi i8* [ %call, %invoke.cont ], [ null, %catch ]
-  tail call void @objc_release(i8* %q) #0, !clang.imprecise_release !0
-  tail call void @objc_release(i8* %p) #0, !clang.imprecise_release !0
-  %5 = tail call i8* @objc_autoreleaseReturnValue(i8* %retval.0) #0
+  tail call void @llvm.objc.release(i8* %q) #0, !clang.imprecise_release !0
+  tail call void @llvm.objc.release(i8* %p) #0, !clang.imprecise_release !0
+  %5 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %retval.0) #0
   ret i8* %retval.0
 }
 
 ; CHECK-LABEL: entry:
-; CHECK-NEXT:    %0 = tail call i8* @objc_retain(i8* %p) #0
+; CHECK-NEXT:    %0 = tail call i8* @llvm.objc.retain(i8* %p) #0
 ; CHECK-NEXT:    %call = invoke i8* @f(i8* %p, i8* %q)
 ; CHECK-NEXT:            to label %invoke.cont unwind label %catch.dispatch
 
@@ -47,7 +47,7 @@
 ; CHECK-NEXT:    %1 = catchswitch within none [label %catch] unwind to caller
 
 ; CHECK-LABEL: cleanup:
-; CHECK:         tail call void @objc_release(i8* %p) #0
+; CHECK:         tail call void @llvm.objc.release(i8* %p) #0
 
 attributes #0 = { nounwind }
 
diff --git a/test/Transforms/ObjCARC/path-overflow.ll b/test/Transforms/ObjCARC/path-overflow.ll
index 82c9fbe..227d6e5 100644
--- a/test/Transforms/ObjCARC/path-overflow.ll
+++ b/test/Transforms/ObjCARC/path-overflow.ll
@@ -18,13 +18,13 @@
 @_unnamed_cfstring = external constant %struct.NSConstantString, section "__DATA,__cfstring"
 @_unnamed_cfstring_2 = external constant %struct.NSConstantString, section "__DATA,__cfstring"
 
-declare i8* @objc_retain(i8*) nonlazybind
-declare i8* @objc_retainAutoreleasedReturnValue(i8*) nonlazybind
-declare void @objc_release(i8*) nonlazybind
+declare i8* @llvm.objc.retain(i8*) nonlazybind
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*) nonlazybind
+declare void @llvm.objc.release(i8*) nonlazybind
 declare i8* @returner()
-declare i8* @objc_msgSend(i8*, i8*, ...) nonlazybind
+declare i8* @llvm.objc.msgSend(i8*, i8*, ...) nonlazybind
 declare void @NSLog(i8*, ...)
-declare void @objc_msgSend_stret(i8*, i8*, ...)
+declare void @llvm.objc.msgSend_stret(i8*, i8*, ...)
 declare i32 @__gxx_personality_sj0(...)
 declare i32 @__objc_personality_v0(...)
 
@@ -41,7 +41,7 @@
 
 msgSend.cont:                                     ; preds = %msgSend.nullinit, %msgSend.call
   %0 = bitcast %struct.NSConstantString* @_unnamed_cfstring to i8*
-  %1 = call i8* @objc_retain(i8* %0) nounwind
+  %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
   br i1 undef, label %msgSend.nullinit33, label %msgSend.call32
 
 msgSend.call32:                                   ; preds = %if.end10
@@ -336,7 +336,7 @@
   br label %msgSend.cont507
 
 msgSend.cont507:                                  ; preds = %msgSend.nullinit506, %msgSend.call505
-  call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
 
@@ -779,9 +779,9 @@
   br i1 undef, label %bb186, label %bb195
 
 bb186:                                            ; preds = %bb184
-  %tmp188 = call i8* @objc_retainAutoreleasedReturnValue(i8* %tmp185)
-  %tmp189 = call i8* @objc_retain(i8* %tmp188)
-  call void @objc_release(i8* %tmp189), !clang.imprecise_release !0
+  %tmp188 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %tmp185)
+  %tmp189 = call i8* @llvm.objc.retain(i8* %tmp188)
+  call void @llvm.objc.release(i8* %tmp189), !clang.imprecise_release !0
   br i1 undef, label %bb197, label %bb190
 
 bb190:                                            ; preds = %bb186
@@ -866,18 +866,18 @@
 ; Function Attrs: ssp
 define void @test3() #1 personality i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*) {
 entry:
-  %call2 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call2 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont unwind label %lpad
 
 invoke.cont:                                      ; preds = %entry
-  %call5 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*)*)(i8* undef, i8* undef)
+  %call5 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont4 unwind label %lpad3
 
 invoke.cont4:                                     ; preds = %invoke.cont
   br i1 undef, label %land.end, label %land.rhs
 
 land.rhs:                                         ; preds = %invoke.cont4
-  %call7 = invoke i32 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i32 (i8*, i8*)*)(i8* undef, i8* undef)
+  %call7 = invoke i32 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i32 (i8*, i8*)*)(i8* undef, i8* undef)
           to label %land.end unwind label %lpad3
 
 land.end:                                         ; preds = %land.rhs, %invoke.cont4
@@ -896,11 +896,11 @@
   unreachable
 
 invoke.cont8:                                     ; preds = %if.then.i, %invoke.cont.i
-  %call18 = invoke i8* (i8*, i8*, i8*, ...) bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*, ...)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*), i8* undef)
+  %call18 = invoke i8* (i8*, i8*, i8*, ...) bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*, ...)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*), i8* undef)
           to label %invoke.cont17 unwind label %lpad16
 
 invoke.cont17:                                    ; preds = %invoke.cont8
-  %call22 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call22 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont21 unwind label %lpad20
 
 invoke.cont21:                                    ; preds = %invoke.cont17
@@ -919,14 +919,14 @@
   unreachable
 
 invoke.cont24:                                    ; preds = %if.then.i1981, %invoke.cont.i1980
-  %call37 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*)*)(i8* undef, i8* undef)
+  %call37 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont36 unwind label %lpad35
 
 invoke.cont36:                                    ; preds = %invoke.cont24
   br i1 undef, label %land.end43, label %land.rhs39
 
 land.rhs39:                                       ; preds = %invoke.cont36
-  %call41 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call41 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %land.end43 unwind label %lpad35
 
 land.end43:                                       ; preds = %land.rhs39, %invoke.cont36
@@ -945,18 +945,18 @@
   unreachable
 
 invoke.cont44:                                    ; preds = %if.then.i1987, %invoke.cont.i1986
-  %call53 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call53 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont52 unwind label %lpad51
 
 invoke.cont52:                                    ; preds = %invoke.cont44
   br i1 undef, label %land.end70, label %land.rhs58
 
 land.rhs58:                                       ; preds = %invoke.cont52
-  %call63 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 42)
+  %call63 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 42)
           to label %invoke.cont62 unwind label %lpad61
 
 invoke.cont62:                                    ; preds = %land.rhs58
-  %call68 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef)
+  %call68 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef)
           to label %land.end70 unwind label %lpad66.body.thread
 
 land.end70:                                       ; preds = %invoke.cont62, %invoke.cont52
@@ -985,11 +985,11 @@
   br label %ehcleanup102
 
 invoke.cont91:                                    ; preds = %if.then.i1999, %invoke.cont.i1998
-  %call96 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
+  %call96 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont95 unwind label %lpad94
 
 invoke.cont95:                                    ; preds = %invoke.cont91
-  %call98 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* %call96)
+  %call98 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* %call96)
           to label %invoke.cont97 unwind label %lpad94
 
 invoke.cont97:                                    ; preds = %invoke.cont95
@@ -1008,7 +1008,7 @@
   unreachable
 
 invoke.cont100:                                   ; preds = %if.then.i2005, %invoke.cont.i2004
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont110 unwind label %lpad109
 
 invoke.cont110:                                   ; preds = %invoke.cont100
@@ -1111,11 +1111,11 @@
   br label %invoke.cont165
 
 invoke.cont165:                                   ; preds = %if.then.i2029, %invoke.cont.i2028
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, void (i8*, i8*)*)*)(i8* undef, i8* undef, void (i8*, i8*)* undef)
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, void (i8*, i8*)*)*)(i8* undef, i8* undef, void (i8*, i8*)* undef)
           to label %invoke.cont184 unwind label %lpad183
 
 invoke.cont184:                                   ; preds = %invoke.cont165
-  %call186 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call186 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont185 unwind label %lpad183
 
 invoke.cont185:                                   ; preds = %invoke.cont184
@@ -1134,15 +1134,15 @@
   br label %lpad183.body
 
 invoke.cont190:                                   ; preds = %if.then.i2035, %invoke.cont.i2034
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont197 unwind label %lpad196
 
 invoke.cont197:                                   ; preds = %invoke.cont190
-  %call202 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call202 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont201 unwind label %lpad200
 
 invoke.cont201:                                   ; preds = %invoke.cont197
-  %call205 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call205 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont204 unwind label %lpad203
 
 invoke.cont204:                                   ; preds = %invoke.cont201
@@ -1161,7 +1161,7 @@
   unreachable
 
 invoke.cont207:                                   ; preds = %if.then.i2041, %invoke.cont.i2040
-  %call209 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
+  %call209 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont208 unwind label %lpad203
 
 invoke.cont208:                                   ; preds = %invoke.cont207
@@ -1175,11 +1175,11 @@
   br label %invoke.cont213
 
 invoke.cont213:                                   ; preds = %if.then.i2047, %invoke.cont.i2046
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont221 unwind label %lpad220
 
 invoke.cont221:                                   ; preds = %invoke.cont213
-  %call229 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call229 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont228 unwind label %lpad227
 
 invoke.cont228:                                   ; preds = %invoke.cont221
@@ -1198,7 +1198,7 @@
   unreachable
 
 invoke.cont231:                                   ; preds = %if.then.i2053, %invoke.cont.i2052
-  %call233 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
+  %call233 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont232 unwind label %lpad227
 
 invoke.cont232:                                   ; preds = %invoke.cont231
@@ -1212,39 +1212,39 @@
   br label %invoke.cont237
 
 invoke.cont237:                                   ; preds = %if.then.i2059, %invoke.cont.i2058
-  %call246 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call246 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont245 unwind label %lpad244
 
 invoke.cont245:                                   ; preds = %invoke.cont237
-  %call248 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 13)
+  %call248 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 13)
           to label %invoke.cont247 unwind label %lpad244
 
 invoke.cont247:                                   ; preds = %invoke.cont245
-  %call251 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 2)
+  %call251 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 2)
           to label %invoke.cont250 unwind label %lpad249
 
 invoke.cont250:                                   ; preds = %invoke.cont247
-  %call254 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 7)
+  %call254 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 7)
           to label %invoke.cont253 unwind label %lpad252
 
 invoke.cont253:                                   ; preds = %invoke.cont250
-  %call257 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8**, i32)*)(i8* undef, i8* undef, i8** undef, i32 3)
+  %call257 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8**, i32)*)(i8* undef, i8* undef, i8** undef, i32 3)
           to label %invoke.cont256 unwind label %lpad255
 
 invoke.cont256:                                   ; preds = %invoke.cont253
-  %call260 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* undef)
+  %call260 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* undef)
           to label %invoke.cont259 unwind label %lpad258
 
 invoke.cont259:                                   ; preds = %invoke.cont256
-  %call267 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call267 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont266 unwind label %lpad265
 
 invoke.cont266:                                   ; preds = %invoke.cont259
-  %call275 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef)
+  %call275 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef)
           to label %invoke.cont274 unwind label %lpad273
 
 invoke.cont274:                                   ; preds = %invoke.cont266
-  %call279 = invoke i32 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i32 (i8*, i8*)*)(i8* undef, i8* undef)
+  %call279 = invoke i32 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i32 (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont278 unwind label %lpad277
 
 invoke.cont278:                                   ; preds = %invoke.cont274
@@ -1263,34 +1263,34 @@
   unreachable
 
 invoke.cont281:                                   ; preds = %if.then.i2065, %invoke.cont.i2064
-  %call291 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call291 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont290 unwind label %lpad289
 
 invoke.cont290:                                   ; preds = %invoke.cont281
-  %call303 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 8)
+  %call303 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 8)
           to label %invoke.cont302 unwind label %lpad301
 
 invoke.cont302:                                   ; preds = %invoke.cont290
-  %call310 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, double)*)(i8* undef, i8* undef, double 5.000000e-01)
+  %call310 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, double)*)(i8* undef, i8* undef, double 5.000000e-01)
           to label %invoke.cont309 unwind label %lpad308
 
 invoke.cont309:                                   ; preds = %invoke.cont302
-  %call313 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 42)
+  %call313 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 42)
           to label %invoke.cont312 unwind label %lpad311
 
 invoke.cont312:                                   ; preds = %invoke.cont309
-  %call316 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8**, i8**, i32)*)(i8* undef, i8* undef, i8** undef, i8** undef, i32 2)
+  %call316 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8**, i8**, i32)*)(i8* undef, i8* undef, i8** undef, i8** undef, i32 2)
           to label %invoke.cont315 unwind label %lpad314
 
 invoke.cont315:                                   ; preds = %invoke.cont312
-  %call322 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef)
+  %call322 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef)
           to label %invoke.cont321 unwind label %lpad320
 
 invoke.cont321:                                   ; preds = %invoke.cont315
   br i1 undef, label %land.end344, label %land.rhs335
 
 land.rhs335:                                      ; preds = %invoke.cont321
-  %call342 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call342 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %land.end344 unwind label %lpad340.body.thread
 
 land.end344:                                      ; preds = %land.rhs335, %invoke.cont321
@@ -1304,15 +1304,15 @@
   br label %invoke.cont345
 
 invoke.cont345:                                   ; preds = %if.then.i2071, %invoke.cont.i2070
-  %call362 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef)
+  %call362 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef)
           to label %invoke.cont361 unwind label %lpad360
 
 invoke.cont361:                                   ; preds = %invoke.cont345
-  %call365 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call365 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont364 unwind label %lpad363
 
 invoke.cont364:                                   ; preds = %invoke.cont361
-  %call371 = invoke i32 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i32 (i8*, i8*)*)(i8* undef, i8* undef)
+  %call371 = invoke i32 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i32 (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont370 unwind label %lpad369
 
 invoke.cont370:                                   ; preds = %invoke.cont364
@@ -1331,15 +1331,15 @@
   unreachable
 
 invoke.cont373:                                   ; preds = %if.then.i2077, %invoke.cont.i2076
-  %call377 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32, i8*)*)(i8* undef, i8* undef, i32 42, i8* undef)
+  %call377 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32, i8*)*)(i8* undef, i8* undef, i32 42, i8* undef)
           to label %invoke.cont376 unwind label %lpad363
 
 invoke.cont376:                                   ; preds = %invoke.cont373
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i32)*)(i8* undef, i8* undef, i8* undef, i32 5)
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8*, i32)*)(i8* undef, i8* undef, i8* undef, i32 5)
           to label %invoke.cont382 unwind label %lpad381
 
 invoke.cont382:                                   ; preds = %invoke.cont376
-  %call384 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call384 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont383 unwind label %lpad381
 
 invoke.cont383:                                   ; preds = %invoke.cont382
@@ -1358,19 +1358,19 @@
   unreachable
 
 invoke.cont392:                                   ; preds = %if.then.i2083, %invoke.cont.i2082
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i32)*)(i8* undef, i8* undef, i8* undef, i32 -2)
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8*, i32)*)(i8* undef, i8* undef, i8* undef, i32 -2)
           to label %invoke.cont395 unwind label %lpad381
 
 invoke.cont395:                                   ; preds = %invoke.cont392
-  %call397 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call397 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont396 unwind label %lpad381
 
 invoke.cont396:                                   ; preds = %invoke.cont395
-  %call400 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
+  %call400 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont399 unwind label %lpad398
 
 invoke.cont399:                                   ; preds = %invoke.cont396
-  %call403 = invoke i32 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i32 (i8*, i8*)*)(i8* undef, i8* undef)
+  %call403 = invoke i32 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i32 (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont402 unwind label %lpad401
 
 invoke.cont402:                                   ; preds = %invoke.cont399
@@ -1389,15 +1389,15 @@
   unreachable
 
 invoke.cont405:                                   ; preds = %if.then.i2089, %invoke.cont.i2088
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i32)*)(i8* undef, i8* undef, i8* undef, i32 -1)
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8*, i32)*)(i8* undef, i8* undef, i8* undef, i32 -1)
           to label %invoke.cont408 unwind label %lpad381
 
 invoke.cont408:                                   ; preds = %invoke.cont405
-  %call410 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call410 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont409 unwind label %lpad381
 
 invoke.cont409:                                   ; preds = %invoke.cont408
-  %call413 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
+  %call413 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont412 unwind label %lpad411
 
 invoke.cont412:                                   ; preds = %invoke.cont409
@@ -1416,19 +1416,19 @@
   unreachable
 
 invoke.cont418:                                   ; preds = %if.then.i2095, %invoke.cont.i2094
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i32)*)(i8* undef, i8* undef, i8* undef, i32 0)
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8*, i32)*)(i8* undef, i8* undef, i8* undef, i32 0)
           to label %invoke.cont422 unwind label %lpad381
 
 invoke.cont422:                                   ; preds = %invoke.cont418
-  %call424 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call424 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont423 unwind label %lpad381
 
 invoke.cont423:                                   ; preds = %invoke.cont422
-  %call427 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
+  %call427 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont426 unwind label %lpad425
 
 invoke.cont426:                                   ; preds = %invoke.cont423
-  %call430 = invoke i32 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i32 (i8*, i8*)*)(i8* undef, i8* undef)
+  %call430 = invoke i32 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i32 (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont429 unwind label %lpad428
 
 invoke.cont429:                                   ; preds = %invoke.cont426
@@ -1447,7 +1447,7 @@
   unreachable
 
 invoke.cont432:                                   ; preds = %if.then.i2101, %invoke.cont.i2100
-  %call436 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 0)
+  %call436 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 0)
           to label %invoke.cont435 unwind label %lpad381
 
 invoke.cont435:                                   ; preds = %invoke.cont432
@@ -1455,7 +1455,7 @@
           to label %invoke.cont.i2106 unwind label %lpad.i2108
 
 invoke.cont.i2106:                                ; preds = %invoke.cont435
-  %call444 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 5)
+  %call444 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 5)
           to label %invoke.cont443 unwind label %lpad381
 
 lpad.i2108:                                       ; preds = %invoke.cont435
@@ -1479,11 +1479,11 @@
   unreachable
 
 invoke.cont449:                                   ; preds = %if.then.i2113, %invoke.cont.i2112
-  %call453 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 -2)
+  %call453 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 -2)
           to label %invoke.cont452 unwind label %lpad381
 
 invoke.cont452:                                   ; preds = %invoke.cont449
-  %call456 = invoke i32 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i32 (i8*, i8*)*)(i8* undef, i8* undef)
+  %call456 = invoke i32 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i32 (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont455 unwind label %lpad454
 
 invoke.cont455:                                   ; preds = %invoke.cont452
@@ -1502,7 +1502,7 @@
   unreachable
 
 invoke.cont458:                                   ; preds = %if.then.i2119, %invoke.cont.i2118
-  %call461 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 -1)
+  %call461 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 -1)
           to label %invoke.cont460 unwind label %lpad381
 
 invoke.cont460:                                   ; preds = %invoke.cont458
@@ -1521,7 +1521,7 @@
   br label %ehcleanup477
 
 invoke.cont466:                                   ; preds = %if.then.i2125, %invoke.cont.i2124
-  %call470 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 0)
+  %call470 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 0)
           to label %invoke.cont469 unwind label %lpad381
 
 invoke.cont469:                                   ; preds = %invoke.cont466
@@ -1540,34 +1540,34 @@
   br label %ehcleanup477
 
 invoke.cont475:                                   ; preds = %if.then.i2131, %invoke.cont.i2130
-  %call491 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 1)
+  %call491 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 1)
           to label %invoke.cont490 unwind label %lpad489
 
 invoke.cont490:                                   ; preds = %invoke.cont475
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont499 unwind label %lpad498
 
 invoke.cont499:                                   ; preds = %invoke.cont490
-  %call504 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call504 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont503 unwind label %lpad489
 
 invoke.cont503:                                   ; preds = %invoke.cont499
-  %call507 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 3)
+  %call507 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32)*)(i8* undef, i8* undef, i32 3)
           to label %invoke.cont506 unwind label %lpad505
 
 invoke.cont506:                                   ; preds = %invoke.cont503
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont509 unwind label %lpad508
 
 invoke.cont509:                                   ; preds = %invoke.cont506
-  %call513 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call513 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont512 unwind label %lpad489
 
 invoke.cont512:                                   ; preds = %invoke.cont509
   br i1 undef, label %msgSend.null-receiver, label %msgSend.call
 
 msgSend.call:                                     ; preds = %invoke.cont512
-  invoke void bitcast (void (i8*, i8*, ...)* @objc_msgSend_stret to void (%struct.CGPoint*, i8*, i8*)*)(%struct.CGPoint* sret undef, i8* undef, i8* undef)
+  invoke void bitcast (void (i8*, i8*, ...)* @llvm.objc.msgSend_stret to void (%struct.CGPoint*, i8*, i8*)*)(%struct.CGPoint* sret undef, i8* undef, i8* undef)
           to label %msgSend.cont unwind label %lpad514
 
 msgSend.null-receiver:                            ; preds = %invoke.cont512
@@ -1589,15 +1589,15 @@
   unreachable
 
 invoke.cont521:                                   ; preds = %if.then.i2137, %invoke.cont.i2136
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*), i8* undef)
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*), i8* undef)
           to label %invoke.cont528 unwind label %lpad527
 
 invoke.cont528:                                   ; preds = %invoke.cont521
-  %call532 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call532 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont531 unwind label %lpad489
 
 invoke.cont531:                                   ; preds = %invoke.cont528
-  %call535 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
+  %call535 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont534 unwind label %lpad533
 
 invoke.cont534:                                   ; preds = %invoke.cont531
@@ -1616,43 +1616,43 @@
   unreachable
 
 invoke.cont540:                                   ; preds = %if.then.i2143, %invoke.cont.i2142
-  %call544 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*), i8* undef, i32 3)
+  %call544 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i32)*)(i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*), i8* undef, i32 3)
           to label %invoke.cont543 unwind label %lpad489
 
 invoke.cont543:                                   ; preds = %invoke.cont540
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*), i8* undef)
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*), i8* undef)
           to label %invoke.cont546 unwind label %lpad545
 
 invoke.cont546:                                   ; preds = %invoke.cont543
-  %call549 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call549 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont548 unwind label %lpad489
 
 invoke.cont548:                                   ; preds = %invoke.cont546
-  %call555 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  %call555 = invoke signext i8 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8 (i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont554 unwind label %lpad553
 
 invoke.cont554:                                   ; preds = %invoke.cont548
-  %tmp499 = call i8* @objc_retain(i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*)) #3
+  %tmp499 = call i8* @llvm.objc.retain(i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*)) #3
   invoke void (i8*, ...) @NSLog(i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*), i8* %tmp499, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont.i2148 unwind label %lpad.i2150
 
 invoke.cont.i2148:                                ; preds = %invoke.cont554
-  call void @objc_release(i8* %tmp499) #3, !clang.imprecise_release !0
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  call void @llvm.objc.release(i8* %tmp499) #3, !clang.imprecise_release !0
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont566 unwind label %lpad565
 
 lpad.i2150:                                       ; preds = %invoke.cont554
   %tmp500 = landingpad { i8*, i32 }
           cleanup
-  call void @objc_release(i8* %tmp499) #3, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %tmp499) #3, !clang.imprecise_release !0
   unreachable
 
 invoke.cont566:                                   ; preds = %invoke.cont.i2148
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*, i8*, i8*)*)(i8* undef, i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*))
           to label %invoke.cont572 unwind label %lpad571
 
 invoke.cont572:                                   ; preds = %invoke.cont566
-  %call582 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
+  %call582 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*)*)(i8* undef, i8* undef)
           to label %invoke.cont581 unwind label %lpad580
 
 invoke.cont581:                                   ; preds = %invoke.cont572
@@ -1927,7 +1927,7 @@
   br label %if.end13
 
 if.end13:                                         ; preds = %if.then10, %entry
-  %0 = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*, i64, i8*, i8)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*), i64 2, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring_2 to i8*), i8 signext 0), !clang.arc.no_objc_arc_exceptions !0
+  %0 = call i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*, i8*, i8*, i64, i8*, i8)*)(i8* undef, i8* undef, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring to i8*), i64 2, i8* bitcast (%struct.NSConstantString* @_unnamed_cfstring_2 to i8*), i8 signext 0), !clang.arc.no_objc_arc_exceptions !0
   br i1 undef, label %if.then17, label %if.end18
 
 if.then17:                                        ; preds = %if.end13
@@ -2162,14 +2162,14 @@
   br label %if.end399
 
 if.end399:                                        ; preds = %if.then398, %if.end392
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*)*)(i8* undef, i8* undef)
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*, i8*)*)(i8* undef, i8* undef)
           to label %eh.cont unwind label %lpad, !clang.arc.no_objc_arc_exceptions !0
 
 eh.cont:                                          ; preds = %if.end399
   br i1 undef, label %if.then430, label %if.end439.critedge
 
 if.then430:                                       ; preds = %eh.cont
-  %1 = call i8* @objc_retain(i8* %0)
+  %1 = call i8* @llvm.objc.retain(i8* %0)
   br label %if.end439
 
 lpad:                                             ; preds = %if.end399
@@ -2178,11 +2178,11 @@
   unreachable
 
 if.end439.critedge:                               ; preds = %eh.cont
-  %3 = call i8* @objc_retain(i8* %0)
+  %3 = call i8* @llvm.objc.retain(i8* %0)
   br label %if.end439
 
 if.end439:                                        ; preds = %if.end439.critedge, %if.then430
-  call void @objc_release(i8* %0), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %0), !clang.imprecise_release !0
   unreachable
 
 return:                                           ; No predecessors!
diff --git a/test/Transforms/ObjCARC/pointer-types.ll b/test/Transforms/ObjCARC/pointer-types.ll
index 257560d..b7fcad0 100644
--- a/test/Transforms/ObjCARC/pointer-types.ll
+++ b/test/Transforms/ObjCARC/pointer-types.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -objc-arc -S < %s | FileCheck %s
 
-; Don't hoist @objc_release past a use of its pointer, even
+; Don't hoist @llvm.objc.release past a use of its pointer, even
 ; if the use has function type, because clang uses function types
 ; in dubious ways.
 ; rdar://10551239
@@ -9,7 +9,7 @@
 ; CHECK: %otherBlock = phi void ()* [ %b1, %if.then ], [ null, %entry ]
 ; CHECK-NEXT: call void @use_fptr(void ()* %otherBlock)
 ; CHECK-NEXT: %tmp11 = bitcast void ()* %otherBlock to i8*
-; CHECK-NEXT: call void @objc_release(i8* %tmp11)
+; CHECK-NEXT: call void @llvm.objc.release(i8* %tmp11)
 
 define void @test0(i1 %tobool, void ()* %b1) {
 entry:
@@ -22,10 +22,10 @@
   %otherBlock = phi void ()* [ %b1, %if.then ], [ null, %entry ]
   call void @use_fptr(void ()* %otherBlock)
   %tmp11 = bitcast void ()* %otherBlock to i8*
-  call void @objc_release(i8* %tmp11) nounwind
+  call void @llvm.objc.release(i8* %tmp11) nounwind
   ret void
 }
 
 declare void @use_fptr(void ()*)
-declare void @objc_release(i8*)
+declare void @llvm.objc.release(i8*)
 
diff --git a/test/Transforms/ObjCARC/post-inlining.ll b/test/Transforms/ObjCARC/post-inlining.ll
index b2d6112..0304d59 100644
--- a/test/Transforms/ObjCARC/post-inlining.ll
+++ b/test/Transforms/ObjCARC/post-inlining.ll
@@ -2,9 +2,9 @@
 
 declare void @use_pointer(i8*)
 declare i8* @returner()
-declare i8* @objc_retain(i8*)
-declare i8* @objc_autoreleaseReturnValue(i8*)
-declare i8* @objc_retainAutoreleasedReturnValue(i8*)
+declare i8* @llvm.objc.retain(i8*)
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
 
 ; Clean up residue left behind after inlining.
 
@@ -14,8 +14,8 @@
 ; CHECK-NEXT: }
 define void @test0(i8* %call.i) {
 entry:
-  %0 = tail call i8* @objc_retain(i8* %call.i) nounwind
-  %1 = tail call i8* @objc_autoreleaseReturnValue(i8* %0) nounwind
+  %0 = tail call i8* @llvm.objc.retain(i8* %call.i) nounwind
+  %1 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %0) nounwind
   ret void
 }
 
@@ -27,8 +27,8 @@
 ; CHECK-NEXT: }
 define void @test1(i8* %call.i) {
 entry:
-  %0 = tail call i8* @objc_retain(i8* %call.i) nounwind
-  %1 = tail call i8* @objc_autoreleaseReturnValue(i8* %call.i) nounwind
+  %0 = tail call i8* @llvm.objc.retain(i8* %call.i) nounwind
+  %1 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %call.i) nounwind
   ret void
 }
 
@@ -41,8 +41,8 @@
 ; CHECK-NEXT: }
 define void @test24(i8* %p) {
 entry:
-  call i8* @objc_autoreleaseReturnValue(i8* %p) nounwind
-  call i8* @objc_retainAutoreleasedReturnValue(i8* %p) nounwind
+  call i8* @llvm.objc.autoreleaseReturnValue(i8* %p) nounwind
+  call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %p) nounwind
   call void @use_pointer(i8* %p)
   ret void
 }
diff --git a/test/Transforms/ObjCARC/pr12270.ll b/test/Transforms/ObjCARC/pr12270.ll
index bdff0d7..b1d9902 100644
--- a/test/Transforms/ObjCARC/pr12270.ll
+++ b/test/Transforms/ObjCARC/pr12270.ll
@@ -8,14 +8,14 @@
 
 return:                                           ; No predecessors!
   %bar = bitcast %2* %x to i8*
-  %foo = call i8* @objc_autoreleaseReturnValue(i8* %bar) nounwind
+  %foo = call i8* @llvm.objc.autoreleaseReturnValue(i8* %bar) nounwind
   call void @callee()
   call void @use_pointer(i8* %foo)
-  call void @objc_release(i8* %foo) nounwind
+  call void @llvm.objc.release(i8* %foo) nounwind
   ret void
 }
 
-declare i8* @objc_autoreleaseReturnValue(i8*)
-declare void @objc_release(i8*)
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
+declare void @llvm.objc.release(i8*)
 declare void @callee()
 declare void @use_pointer(i8*)
diff --git a/test/Transforms/ObjCARC/retain-block-side-effects.ll b/test/Transforms/ObjCARC/retain-block-side-effects.ll
index 5f5def9..a980ffd 100644
--- a/test/Transforms/ObjCARC/retain-block-side-effects.ll
+++ b/test/Transforms/ObjCARC/retain-block-side-effects.ll
@@ -4,7 +4,7 @@
 ; objc_retainBlock stores into %repeater so the load from after the
 ; call isn't forwardable from the store before the call.
 
-; CHECK: %tmp16 = call i8* @objc_retainBlock(i8* %tmp15) [[NUW:#[0-9]+]]
+; CHECK: %tmp16 = call i8* @llvm.objc.retainBlock(i8* %tmp15) [[NUW:#[0-9]+]]
 ; CHECK: %tmp17 = bitcast i8* %tmp16 to void ()*
 ; CHECK: %tmp18 = load %struct.__block_byref_repeater*, %struct.__block_byref_repeater** %byref.forwarding, align 8
 ; CHECK: %repeater12 = getelementptr inbounds %struct.__block_byref_repeater, %struct.__block_byref_repeater* %tmp18, i64 0, i32 6
@@ -27,7 +27,7 @@
   %tmp14 = bitcast %struct.__block_byref_repeater* %repeater to i8*
   store i8* %tmp14, i8** %block.captured11, align 8
   %tmp15 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0*, i8* }>* %block to i8*
-  %tmp16 = call i8* @objc_retainBlock(i8* %tmp15) nounwind
+  %tmp16 = call i8* @llvm.objc.retainBlock(i8* %tmp15) nounwind
   %tmp17 = bitcast i8* %tmp16 to void ()*
   %tmp18 = load %struct.__block_byref_repeater*, %struct.__block_byref_repeater** %byref.forwarding, align 8
   %repeater12 = getelementptr inbounds %struct.__block_byref_repeater, %struct.__block_byref_repeater* %tmp18, i64 0, i32 6
@@ -36,7 +36,7 @@
   ret void
 }
 
-declare i8* @objc_retainBlock(i8*)
+declare i8* @llvm.objc.retainBlock(i8*)
 
 ; CHECK: attributes #0 = { noreturn }
 ; CHECK: attributes [[NUW]] = { nounwind }
diff --git a/test/Transforms/ObjCARC/retain-not-declared.ll b/test/Transforms/ObjCARC/retain-not-declared.ll
index f7ac908..7df5159 100644
--- a/test/Transforms/ObjCARC/retain-not-declared.ll
+++ b/test/Transforms/ObjCARC/retain-not-declared.ll
@@ -1,11 +1,11 @@
 ; RUN: opt -S -objc-arc -objc-arc-contract < %s | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-declare i8* @objc_unretainedObject(i8*)
-declare i8* @objc_retainAutoreleasedReturnValue(i8*)
-declare i8* @objc_autoreleaseReturnValue(i8*)
-declare i8* @objc_msgSend(i8*, i8*, ...)
-declare void @objc_release(i8*)
+declare i8* @llvm.objc.unretainedObject(i8*)
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
+declare i8* @llvm.objc.msgSend(i8*, i8*, ...)
+declare void @llvm.objc.release(i8*)
 
 ; Test that the optimizer can create an objc_retainAutoreleaseReturnValue
 ; declaration even if no objc_retain declaration exists.
@@ -13,41 +13,41 @@
 
 ; CHECK:      define i8* @test0(i8* %p) {
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   %0 = tail call i8* @objc_retainAutoreleaseReturnValue(i8* %p) [[NUW:#[0-9]+]]
+; CHECK-NEXT:   %0 = tail call i8* @llvm.objc.retainAutoreleaseReturnValue(i8* %p) [[NUW:#[0-9]+]]
 ; CHECK-NEXT:   ret i8* %0
 ; CHECK-NEXT: }
 
 define i8* @test0(i8* %p) {
 entry:
-  %call = tail call i8* @objc_unretainedObject(i8* %p)
-  %0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
-  %1 = tail call i8* @objc_autoreleaseReturnValue(i8* %call) nounwind
+  %call = tail call i8* @llvm.objc.unretainedObject(i8* %p)
+  %0 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
+  %1 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %call) nounwind
   ret i8* %call
 }
 
-; Properly create the @objc_retain declaration when it doesn't already exist.
+; Properly create the @llvm.objc.retain declaration when it doesn't already exist.
 ; rdar://9825114
 
 ; CHECK-LABEL: @test1(
-; CHECK: @objc_retain(
-; CHECK: @objc_retainAutoreleasedReturnValue(
-; CHECK: @objc_release(
-; CHECK: @objc_release(
+; CHECK: @llvm.objc.retain
+; CHECK: @llvm.objc.retainAutoreleasedReturnValue(
+; CHECK: @llvm.objc.release
+; CHECK: @llvm.objc.release
 ; CHECK: }
 define void @test1(i8* %call88) nounwind personality i32 (...)* @__gxx_personality_v0 {
 entry:
-  %tmp1 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call88) nounwind
-  %call94 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*)*)(i8* %tmp1)
+  %tmp1 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call88) nounwind
+  %call94 = invoke i8* bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i8* (i8*)*)(i8* %tmp1)
           to label %invoke.cont93 unwind label %lpad91
 
 invoke.cont93:                                    ; preds = %entry
-  %tmp2 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call94) nounwind
-  call void @objc_release(i8* %tmp1) nounwind
-  invoke void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*)*)(i8* %tmp2)
+  %tmp2 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call94) nounwind
+  call void @llvm.objc.release(i8* %tmp1) nounwind
+  invoke void bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to void (i8*)*)(i8* %tmp2)
           to label %invoke.cont102 unwind label %lpad100
 
 invoke.cont102:                                   ; preds = %invoke.cont93
-  call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %tmp2) nounwind, !clang.imprecise_release !0
   unreachable
 
 lpad91:                                           ; preds = %entry
@@ -58,7 +58,7 @@
 lpad100:                                          ; preds = %invoke.cont93
   %exn100 = landingpad {i8*, i32}
               cleanup
-  call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %tmp2) nounwind, !clang.imprecise_release !0
   unreachable
 }
 
diff --git a/test/Transforms/ObjCARC/rle-s2l.ll b/test/Transforms/ObjCARC/rle-s2l.ll
index 2865c94..5bf63f2 100644
--- a/test/Transforms/ObjCARC/rle-s2l.ll
+++ b/test/Transforms/ObjCARC/rle-s2l.ll
@@ -1,71 +1,71 @@
 ; RUN: opt -S -basicaa -objc-arc < %s | FileCheck %s
 
-declare i8* @objc_loadWeak(i8**)
-declare i8* @objc_loadWeakRetained(i8**)
-declare i8* @objc_storeWeak(i8**, i8*)
-declare i8* @objc_initWeak(i8**, i8*)
+declare i8* @llvm.objc.loadWeak(i8**)
+declare i8* @llvm.objc.loadWeakRetained(i8**)
+declare i8* @llvm.objc.storeWeak(i8**, i8*)
+declare i8* @llvm.objc.initWeak(i8**, i8*)
 declare void @use_pointer(i8*)
 declare void @callee()
 
-; Basic redundant @objc_loadWeak elimination.
+; Basic redundant @llvm.objc.loadWeak elimination.
 
 ; CHECK:      define void @test0(i8** %p) {
-; CHECK-NEXT:   %y = call i8* @objc_loadWeak(i8** %p)
+; CHECK-NEXT:   %y = call i8* @llvm.objc.loadWeak(i8** %p)
 ; CHECK-NEXT:   call void @use_pointer(i8* %y)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test0(i8** %p) {
-  %x = call i8* @objc_loadWeak(i8** %p)
-  %y = call i8* @objc_loadWeak(i8** %p)
+  %x = call i8* @llvm.objc.loadWeak(i8** %p)
+  %y = call i8* @llvm.objc.loadWeak(i8** %p)
   call void @use_pointer(i8* %y)
   ret void
 }
 
-; DCE the @objc_loadWeak.
+; DCE the @llvm.objc.loadWeak.
 
 ; CHECK:      define void @test1(i8** %p) {
-; CHECK-NEXT:   %y = call i8* @objc_loadWeakRetained(i8** %p)
+; CHECK-NEXT:   %y = call i8* @llvm.objc.loadWeakRetained(i8** %p)
 ; CHECK-NEXT:   call void @use_pointer(i8* %y)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test1(i8** %p) {
-  %x = call i8* @objc_loadWeak(i8** %p)
-  %y = call i8* @objc_loadWeakRetained(i8** %p)
+  %x = call i8* @llvm.objc.loadWeak(i8** %p)
+  %y = call i8* @llvm.objc.loadWeakRetained(i8** %p)
   call void @use_pointer(i8* %y)
   ret void
 }
 
-; Basic redundant @objc_loadWeakRetained elimination.
+; Basic redundant @llvm.objc.loadWeakRetained elimination.
 
 ; CHECK:      define void @test2(i8** %p) {
-; CHECK-NEXT:   %x = call i8* @objc_loadWeak(i8** %p)
+; CHECK-NEXT:   %x = call i8* @llvm.objc.loadWeak(i8** %p)
 ; CHECK-NEXT:   store i8 3, i8* %x
-; CHECK-NEXT:   %1 = tail call i8* @objc_retain(i8* %x)
+; CHECK-NEXT:   %1 = tail call i8* @llvm.objc.retain(i8* %x)
 ; CHECK-NEXT:   call void @use_pointer(i8* %x)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test2(i8** %p) {
-  %x = call i8* @objc_loadWeak(i8** %p)
+  %x = call i8* @llvm.objc.loadWeak(i8** %p)
   store i8 3, i8* %x
-  %y = call i8* @objc_loadWeakRetained(i8** %p)
+  %y = call i8* @llvm.objc.loadWeakRetained(i8** %p)
   call void @use_pointer(i8* %y)
   ret void
 }
 
-; Basic redundant @objc_loadWeakRetained elimination, this time
+; Basic redundant @llvm.objc.loadWeakRetained elimination, this time
 ; with a readonly call instead of a store.
 
 ; CHECK:      define void @test3(i8** %p) {
-; CHECK-NEXT:   %x = call i8* @objc_loadWeak(i8** %p)
+; CHECK-NEXT:   %x = call i8* @llvm.objc.loadWeak(i8** %p)
 ; CHECK-NEXT:   call void @use_pointer(i8* %x) [[RO:#[0-9]+]]
-; CHECK-NEXT:   %1 = tail call i8* @objc_retain(i8* %x)
+; CHECK-NEXT:   %1 = tail call i8* @llvm.objc.retain(i8* %x)
 ; CHECK-NEXT:   call void @use_pointer(i8* %x)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test3(i8** %p) {
-  %x = call i8* @objc_loadWeak(i8** %p)
+  %x = call i8* @llvm.objc.loadWeak(i8** %p)
   call void @use_pointer(i8* %x) readonly
-  %y = call i8* @objc_loadWeakRetained(i8** %p)
+  %y = call i8* @llvm.objc.loadWeakRetained(i8** %p)
   call void @use_pointer(i8* %y)
   ret void
 }
@@ -73,18 +73,18 @@
 ; A regular call blocks redundant weak load elimination.
 
 ; CHECK:      define void @test4(i8** %p) {
-; CHECK-NEXT:   %x = call i8* @objc_loadWeak(i8** %p)
+; CHECK-NEXT:   %x = call i8* @llvm.objc.loadWeak(i8** %p)
 ; CHECK-NEXT:   call void @use_pointer(i8* %x) [[RO]]
 ; CHECK-NEXT:   call void @callee()
-; CHECK-NEXT:   %y = call i8* @objc_loadWeak(i8** %p)
+; CHECK-NEXT:   %y = call i8* @llvm.objc.loadWeak(i8** %p)
 ; CHECK-NEXT:   call void @use_pointer(i8* %y)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test4(i8** %p) {
-  %x = call i8* @objc_loadWeak(i8** %p)
+  %x = call i8* @llvm.objc.loadWeak(i8** %p)
   call void @use_pointer(i8* %x) readonly
   call void @callee()
-  %y = call i8* @objc_loadWeak(i8** %p)
+  %y = call i8* @llvm.objc.loadWeak(i8** %p)
   call void @use_pointer(i8* %y)
   ret void
 }
@@ -92,13 +92,13 @@
 ; Store to load forwarding.
 
 ; CHECK:      define void @test5(i8** %p, i8* %n) {
-; CHECK-NEXT:   %1 = call i8* @objc_storeWeak(i8** %p, i8* %n)
+; CHECK-NEXT:   %1 = call i8* @llvm.objc.storeWeak(i8** %p, i8* %n)
 ; CHECK-NEXT:   call void @use_pointer(i8* %n)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test5(i8** %p, i8* %n) {
-  call i8* @objc_storeWeak(i8** %p, i8* %n)
-  %y = call i8* @objc_loadWeak(i8** %p)
+  call i8* @llvm.objc.storeWeak(i8** %p, i8* %n)
+  %y = call i8* @llvm.objc.loadWeak(i8** %p)
   call void @use_pointer(i8* %y)
   ret void
 }
@@ -106,13 +106,13 @@
 ; Store to load forwarding with objc_initWeak.
 
 ; CHECK:      define void @test6(i8** %p, i8* %n) {
-; CHECK-NEXT:   %1 = call i8* @objc_initWeak(i8** %p, i8* %n)
+; CHECK-NEXT:   %1 = call i8* @llvm.objc.initWeak(i8** %p, i8* %n)
 ; CHECK-NEXT:   call void @use_pointer(i8* %n)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test6(i8** %p, i8* %n) {
-  call i8* @objc_initWeak(i8** %p, i8* %n)
-  %y = call i8* @objc_loadWeak(i8** %p)
+  call i8* @llvm.objc.initWeak(i8** %p, i8* %n)
+  %y = call i8* @llvm.objc.loadWeak(i8** %p)
   call void @use_pointer(i8* %y)
   ret void
 }
@@ -120,16 +120,16 @@
 ; Don't forward if there's a may-alias store in the way.
 
 ; CHECK:      define void @test7(i8** %p, i8* %n, i8** %q, i8* %m) {
-; CHECK-NEXT:   call i8* @objc_initWeak(i8** %p, i8* %n)
-; CHECK-NEXT:   call i8* @objc_storeWeak(i8** %q, i8* %m)
-; CHECK-NEXT:   %y = call i8* @objc_loadWeak(i8** %p)
+; CHECK-NEXT:   call i8* @llvm.objc.initWeak(i8** %p, i8* %n)
+; CHECK-NEXT:   call i8* @llvm.objc.storeWeak(i8** %q, i8* %m)
+; CHECK-NEXT:   %y = call i8* @llvm.objc.loadWeak(i8** %p)
 ; CHECK-NEXT:   call void @use_pointer(i8* %y)
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 define void @test7(i8** %p, i8* %n, i8** %q, i8* %m) {
-  call i8* @objc_initWeak(i8** %p, i8* %n)
-  call i8* @objc_storeWeak(i8** %q, i8* %m)
-  %y = call i8* @objc_loadWeak(i8** %p)
+  call i8* @llvm.objc.initWeak(i8** %p, i8* %n)
+  call i8* @llvm.objc.storeWeak(i8** %q, i8* %m)
+  %y = call i8* @llvm.objc.loadWeak(i8** %p)
   call void @use_pointer(i8* %y)
   ret void
 }
diff --git a/test/Transforms/ObjCARC/rv.ll b/test/Transforms/ObjCARC/rv.ll
index 425f86c..3d0d56c 100644
--- a/test/Transforms/ObjCARC/rv.ll
+++ b/test/Transforms/ObjCARC/rv.ll
@@ -2,15 +2,15 @@
 
 target datalayout = "e-p:64:64:64"
 
-declare i8* @objc_retain(i8*)
-declare i8* @objc_retainAutoreleasedReturnValue(i8*)
-declare void @objc_release(i8*)
-declare i8* @objc_autorelease(i8*)
-declare i8* @objc_autoreleaseReturnValue(i8*)
-declare i8* @objc_retainAutoreleaseReturnValue(i8*)
-declare void @objc_autoreleasePoolPop(i8*)
-declare void @objc_autoreleasePoolPush()
-declare i8* @objc_retainBlock(i8*)
+declare i8* @llvm.objc.retain(i8*)
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
+declare void @llvm.objc.release(i8*)
+declare i8* @llvm.objc.autorelease(i8*)
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
+declare i8* @llvm.objc.retainAutoreleaseReturnValue(i8*)
+declare void @llvm.objc.autoreleasePoolPop(i8*)
+declare void @llvm.objc.autoreleasePoolPush()
+declare i8* @llvm.objc.retainBlock(i8*)
 
 declare i8* @objc_retainedObject(i8*)
 declare i8* @objc_unretainedObject(i8*)
@@ -29,17 +29,17 @@
 ; CHECK-LABEL:      define void @test0(
 ; CHECK-NEXT: entry:
 ; CHECK-NEXT:   %x = call i8* @returner
-; CHECK-NEXT:   %0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %x) [[NUW:#[0-9]+]]
+; CHECK-NEXT:   %0 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %x) [[NUW:#[0-9]+]]
 ; CHECK: t:
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: return:
-; CHECK-NEXT: call void @objc_release(i8* %x)
+; CHECK-NEXT: call void @llvm.objc.release(i8* %x)
 ; CHECK-NEXT: ret void
 ; CHECK-NEXT: }
 define void @test0(i1 %p) nounwind {
 entry:
   %x = call i8* @returner()
-  %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %x)
+  %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %x)
   br i1 %p, label %t, label %return
 
 t:
@@ -48,19 +48,24 @@
   br label %return
 
 return:
-  call void @objc_release(i8* %x) nounwind
+  call void @llvm.objc.release(i8* %x) nounwind
   ret void
 }
 
 ; Delete no-ops.
 
 ; CHECK-LABEL: define void @test2(
-; CHECK-NOT: @objc_
+; CHECK-NOT: @llvm.objc.
 ; CHECK: }
 define void @test2() {
-  call i8* @objc_retainAutoreleasedReturnValue(i8* null)
-  call i8* @objc_autoreleaseReturnValue(i8* null)
-  ; call i8* @objc_retainAutoreleaseReturnValue(i8* null) ; TODO
+  call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* null)
+  call i8* @llvm.objc.autoreleaseReturnValue(i8* null)
+  ; call i8* @llvm.objc.retainAutoreleaseReturnValue(i8* null) ; TODO
+  %bitcast = bitcast i32* null to i8*
+  %rb = call i8* @llvm.objc.retainBlock(i8* %bitcast)
+  call void @use_pointer(i8* %rb)
+  %rb2 = call i8* @llvm.objc.retainBlock(i8* undef)
+  call void @use_pointer(i8* %rb2)
   ret void
 }
 
@@ -73,8 +78,8 @@
 define i8* @test3() {
 entry:
   %call = call i8* @returner()
-  %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call) nounwind
-  %1 = call i8* @objc_autoreleaseReturnValue(i8* %0) nounwind
+  %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
+  %1 = call i8* @llvm.objc.autoreleaseReturnValue(i8* %0) nounwind
   ret i8* %1
 }
 
@@ -87,8 +92,8 @@
 define i8* @test4() {
 entry:
   %call = call i8* @returner()
-  %0 = call i8* @objc_retain(i8* %call) nounwind
-  %1 = call i8* @objc_autoreleaseReturnValue(i8* %0) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %call) nounwind
+  %1 = call i8* @llvm.objc.autoreleaseReturnValue(i8* %0) nounwind
   ret i8* %1
 }
 
@@ -102,7 +107,7 @@
 ;define i8* @test5() {
 ;entry:
 ;  %call = call i8* @returner()
-;  %0 = call i8* @objc_retainAutoreleaseReturnValue(i8* %call) nounwind
+;  %0 = call i8* @llvm.objc.retainAutoreleaseReturnValue(i8* %call) nounwind
 ;  ret i8* %0
 ;}
 
@@ -115,45 +120,45 @@
 ; Those entrypoints don't exist yet though.
 
 ; CHECK-LABEL: define i8* @test7(
-; CHECK: call i8* @objc_retainAutoreleasedReturnValue(i8* %p)
-; CHECK: %t = tail call i8* @objc_autoreleaseReturnValue(i8* %p)
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %p)
+; CHECK: %t = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %p)
 define i8* @test7() {
   %p = call i8* @returner()
-  call i8* @objc_retainAutoreleasedReturnValue(i8* %p)
-  %t = call i8* @objc_autoreleaseReturnValue(i8* %p)
+  call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %p)
+  %t = call i8* @llvm.objc.autoreleaseReturnValue(i8* %p)
   call void @use_pointer(i8* %p)
   ret i8* %t
 }
 
 ; CHECK-LABEL: define i8* @test7b(
-; CHECK: call i8* @objc_retain(i8* %p)
-; CHECK: %t = tail call i8* @objc_autoreleaseReturnValue(i8* %p)
+; CHECK: call i8* @llvm.objc.retain(i8* %p)
+; CHECK: %t = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %p)
 define i8* @test7b() {
   %p = call i8* @returner()
   call void @use_pointer(i8* %p)
-  call i8* @objc_retainAutoreleasedReturnValue(i8* %p)
-  %t = call i8* @objc_autoreleaseReturnValue(i8* %p)
+  call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %p)
+  %t = call i8* @llvm.objc.autoreleaseReturnValue(i8* %p)
   ret i8* %p
 }
 
 ; Don't apply the RV optimization to autorelease if there's no retain.
 
 ; CHECK: define i8* @test9(i8* %p)
-; CHECK: call i8* @objc_autorelease(i8* %p)
+; CHECK: call i8* @llvm.objc.autorelease(i8* %p)
 define i8* @test9(i8* %p) {
-  call i8* @objc_autorelease(i8* %p)
+  call i8* @llvm.objc.autorelease(i8* %p)
   ret i8* %p
 }
 
 ; Do not apply the RV optimization.
 
 ; CHECK: define i8* @test10(i8* %p)
-; CHECK: tail call i8* @objc_retain(i8* %p) [[NUW]]
-; CHECK: call i8* @objc_autorelease(i8* %p) [[NUW]]
+; CHECK: tail call i8* @llvm.objc.retain(i8* %p) [[NUW]]
+; CHECK: call i8* @llvm.objc.autorelease(i8* %p) [[NUW]]
 ; CHECK-NEXT: ret i8* %p
 define i8* @test10(i8* %p) {
-  %1 = call i8* @objc_retain(i8* %p)
-  %2 = call i8* @objc_autorelease(i8* %p)
+  %1 = call i8* @llvm.objc.retain(i8* %p)
+  %2 = call i8* @llvm.objc.autorelease(i8* %p)
   ret i8* %p
 }
 
@@ -161,42 +166,42 @@
 ; could undo the retain.
 
 ; CHECK: define i8* @test11(i8* %p)
-; CHECK: tail call i8* @objc_retain(i8* %p)
+; CHECK: tail call i8* @llvm.objc.retain(i8* %p)
 ; CHECK-NEXT: call void @use_pointer(i8* %p)
-; CHECK: call i8* @objc_autorelease(i8* %p)
+; CHECK: call i8* @llvm.objc.autorelease(i8* %p)
 ; CHECK-NEXT: ret i8* %p
 define i8* @test11(i8* %p) {
-  %1 = call i8* @objc_retain(i8* %p)
+  %1 = call i8* @llvm.objc.retain(i8* %p)
   call void @use_pointer(i8* %p)
-  %2 = call i8* @objc_autorelease(i8* %p)
+  %2 = call i8* @llvm.objc.autorelease(i8* %p)
   ret i8* %p
 }
 
 ; Don't spoil the RV optimization.
 
 ; CHECK: define i8* @test12(i8* %p)
-; CHECK: tail call i8* @objc_retain(i8* %p)
+; CHECK: tail call i8* @llvm.objc.retain(i8* %p)
 ; CHECK: call void @use_pointer(i8* %p)
-; CHECK: tail call i8* @objc_autoreleaseReturnValue(i8* %p)
+; CHECK: tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %p)
 ; CHECK: ret i8* %p
 define i8* @test12(i8* %p) {
-  %1 = call i8* @objc_retain(i8* %p)
+  %1 = call i8* @llvm.objc.retain(i8* %p)
   call void @use_pointer(i8* %p)
-  %2 = call i8* @objc_autoreleaseReturnValue(i8* %p)
+  %2 = call i8* @llvm.objc.autoreleaseReturnValue(i8* %p)
   ret i8* %p
 }
 
 ; Don't zap the objc_retainAutoreleasedReturnValue.
 
 ; CHECK-LABEL: define i8* @test13(
-; CHECK: tail call i8* @objc_retainAutoreleasedReturnValue(i8* %p)
-; CHECK: call i8* @objc_autorelease(i8* %p)
+; CHECK: tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %p)
+; CHECK: call i8* @llvm.objc.autorelease(i8* %p)
 ; CHECK: ret i8* %p
 define i8* @test13() {
   %p = call i8* @returner()
-  %1 = call i8* @objc_retainAutoreleasedReturnValue(i8* %p)
+  %1 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %p)
   call void @callee()
-  %2 = call i8* @objc_autorelease(i8* %p)
+  %2 = call i8* @llvm.objc.autorelease(i8* %p)
   ret i8* %p
 }
 
@@ -204,10 +209,10 @@
 ; argument is not a return value.
 
 ; CHECK-LABEL: define void @test14(
-; CHECK-NEXT: tail call i8* @objc_retain(i8* %p) [[NUW]]
+; CHECK-NEXT: tail call i8* @llvm.objc.retain(i8* %p) [[NUW]]
 ; CHECK-NEXT: ret void
 define void @test14(i8* %p) {
-  call i8* @objc_retainAutoreleasedReturnValue(i8* %p)
+  call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %p)
   ret void
 }
 
@@ -216,11 +221,11 @@
 
 ; CHECK-LABEL: define void @test15(
 ; CHECK-NEXT: %y = call i8* @returner()
-; CHECK-NEXT: tail call i8* @objc_retainAutoreleasedReturnValue(i8* %y) [[NUW]]
+; CHECK-NEXT: tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %y) [[NUW]]
 ; CHECK-NEXT: ret void
 define void @test15() {
   %y = call i8* @returner()
-  call i8* @objc_retainAutoreleasedReturnValue(i8* %y)
+  call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %y)
   ret void
 }
 
@@ -229,54 +234,72 @@
 ; CHECK: define i8* @test19(i8* %p) {
 ; CHECK-NEXT: ret i8* %p
 define i8* @test19(i8* %p) {
-  call i8* @objc_autoreleaseReturnValue(i8* %p)
-  call i8* @objc_retainAutoreleasedReturnValue(i8* %p)
+  call i8* @llvm.objc.autoreleaseReturnValue(i8* %p)
+  call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %p)
+  ret i8* %p
+}
+
+; Delete autoreleaseRV+retainRV pairs when they have equivalent PHIs as inputs
+
+; CHECK: define i8* @test19phi(i8* %p) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label %test19bb
+; CHECK: test19bb:
+; CHECK-NEXT: ret i8* %p
+define i8* @test19phi(i8* %p) {
+entry:
+  br label %test19bb
+test19bb:
+  %phi1 = phi i8* [ %p, %entry ]
+  %phi2 = phi i8* [ %p, %entry ]
+  call i8* @llvm.objc.autoreleaseReturnValue(i8* %phi1)
+  call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %phi2)
   ret i8* %p
 }
 
 ; Like test19 but with plain autorelease.
 
 ; CHECK: define i8* @test20(i8* %p) {
-; CHECK-NEXT: call i8* @objc_autorelease(i8* %p)
-; CHECK-NEXT: call i8* @objc_retain(i8* %p)
+; CHECK-NEXT: call i8* @llvm.objc.autorelease(i8* %p)
+; CHECK-NEXT: call i8* @llvm.objc.retain(i8* %p)
 ; CHECK-NEXT: ret i8* %p
 define i8* @test20(i8* %p) {
-  call i8* @objc_autorelease(i8* %p)
-  call i8* @objc_retainAutoreleasedReturnValue(i8* %p)
+  call i8* @llvm.objc.autorelease(i8* %p)
+  call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %p)
   ret i8* %p
 }
 
 ; Like test19 but with plain retain.
 
 ; CHECK: define i8* @test21(i8* %p) {
-; CHECK-NEXT: call i8* @objc_autoreleaseReturnValue(i8* %p)
-; CHECK-NEXT: call i8* @objc_retain(i8* %p)
+; CHECK-NEXT: call i8* @llvm.objc.autoreleaseReturnValue(i8* %p)
+; CHECK-NEXT: call i8* @llvm.objc.retain(i8* %p)
 ; CHECK-NEXT: ret i8* %p
 define i8* @test21(i8* %p) {
-  call i8* @objc_autoreleaseReturnValue(i8* %p)
-  call i8* @objc_retain(i8* %p)
+  call i8* @llvm.objc.autoreleaseReturnValue(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
   ret i8* %p
 }
 
 ; Like test19 but with plain retain and autorelease.
 
 ; CHECK: define i8* @test22(i8* %p) {
-; CHECK-NEXT: call i8* @objc_autorelease(i8* %p)
-; CHECK-NEXT: call i8* @objc_retain(i8* %p)
+; CHECK-NEXT: call i8* @llvm.objc.autorelease(i8* %p)
+; CHECK-NEXT: call i8* @llvm.objc.retain(i8* %p)
 ; CHECK-NEXT: ret i8* %p
 define i8* @test22(i8* %p) {
-  call i8* @objc_autorelease(i8* %p)
-  call i8* @objc_retain(i8* %p)
+  call i8* @llvm.objc.autorelease(i8* %p)
+  call i8* @llvm.objc.retain(i8* %p)
   ret i8* %p
 }
 
 ; Convert autoreleaseRV to autorelease.
 
 ; CHECK-LABEL: define void @test23(
-; CHECK: call i8* @objc_autorelease(i8* %p) [[NUW]]
+; CHECK: call i8* @llvm.objc.autorelease(i8* %p) [[NUW]]
 define void @test23(i8* %p) {
   store i8 0, i8* %p
-  call i8* @objc_autoreleaseReturnValue(i8* %p)
+  call i8* @llvm.objc.autoreleaseReturnValue(i8* %p)
   ret void
 }
 
@@ -284,9 +307,9 @@
 ; even through a bitcast.
 
 ; CHECK-LABEL: define {}* @test24(
-; CHECK: tail call i8* @objc_autoreleaseReturnValue(i8* %p)
+; CHECK: tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %p)
 define {}* @test24(i8* %p) {
-  %t = call i8* @objc_autoreleaseReturnValue(i8* %p)
+  %t = call i8* @llvm.objc.autoreleaseReturnValue(i8* %p)
   %s = bitcast i8* %p to {}*
   ret {}* %s
 }
@@ -301,16 +324,16 @@
 
 ; CHECK-LABEL: define void @test25(
 ; CHECK: %[[CALL1:.*]] = call i8* @second_test25(
-; CHECK-NEXT: tail call i8* @objc_retainAutoreleasedReturnValue(i8* %[[CALL1]])
+; CHECK-NEXT: tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %[[CALL1]])
 
 define void @test25() {
   %first = call i8* @first_test25()
-  %v0 = call i8* @objc_retain(i8* %first)
+  %v0 = call i8* @llvm.objc.retain(i8* %first)
   call void @somecall_test25()
   %second = call i8* @second_test25(i8* %first)
-  %call2 = call i8* @objc_retainAutoreleasedReturnValue(i8* %second)
-  call void @objc_release(i8* %second), !clang.imprecise_release !0
-  call void @objc_release(i8* %first), !clang.imprecise_release !0
+  %call2 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %second)
+  call void @llvm.objc.release(i8* %second), !clang.imprecise_release !0
+  call void @llvm.objc.release(i8* %first), !clang.imprecise_release !0
   ret void
 }
 
@@ -324,10 +347,10 @@
 define i8* @test26() {
 bb0:
   %v0 = call i8* @returner()
-  %v1 = tail call i8* @objc_retain(i8* %v0)
+  %v1 = tail call i8* @llvm.objc.retain(i8* %v0)
   br label %bb1
 bb1:
-  %v2 = tail call i8* @objc_autoreleaseReturnValue(i8* %v1)
+  %v2 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %v1)
   br label %bb2
 bb2:
   ret i8* %v2
@@ -336,14 +359,14 @@
 declare i32* @func27(i32);
 
 ; Check that ObjCARCOpt::OptimizeAutoreleaseRVCall doesn't turn a call to
-; @objc_autoreleaseReturnValue into a call to @objc_autorelease when a return
-; instruction uses a value equivalent to @objc_autoreleaseReturnValue's operand.
+; @llvm.objc.autoreleaseReturnValue into a call to @llvm.objc.autorelease when a return
+; instruction uses a value equivalent to @llvm.objc.autoreleaseReturnValue's operand.
 ; In the code below, %phival and %retval are considered equivalent.
 
 ; CHECK-LABEL: define i32* @test27(
 ; CHECK: %[[PHIVAL:.*]] = phi i8* [ %{{.*}}, %bb1 ], [ %{{.*}}, %bb2 ]
 ; CHECK: %[[RETVAL:.*]] = phi i32* [ %{{.*}}, %bb1 ], [ %{{.*}}, %bb2 ]
-; CHECK: tail call i8* @objc_autoreleaseReturnValue(i8* %[[PHIVAL]])
+; CHECK: tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %[[PHIVAL]])
 ; CHECK: ret i32* %[[RETVAL]]
 
 define i32* @test27(i1 %cond) {
@@ -360,7 +383,7 @@
 bb3:
   %phival = phi i8* [ %v1, %bb1 ], [ %v3, %bb2 ]
   %retval = phi i32* [ %v0, %bb1 ], [ %v2, %bb2 ]
-  %v4 = tail call i8* @objc_autoreleaseReturnValue(i8* %phival)
+  %v4 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %phival)
   ret i32* %retval
 }
 
diff --git a/test/Transforms/ObjCARC/split-backedge.ll b/test/Transforms/ObjCARC/split-backedge.ll
index 6851487..e9239ae 100644
--- a/test/Transforms/ObjCARC/split-backedge.ll
+++ b/test/Transforms/ObjCARC/split-backedge.ll
@@ -4,12 +4,12 @@
 ; rdar://11256239
 
 ; CHECK-LABEL: define void @test0(
-; CHECK: call i8* @objc_retain(i8* %call) [[NUW:#[0-9]+]]
-; CHECK: call i8* @objc_retain(i8* %call) [[NUW]]
-; CHECK: call i8* @objc_retain(i8* %cond) [[NUW]]
-; CHECK: call void @objc_release(i8* %call) [[NUW]]
-; CHECK: call void @objc_release(i8* %call) [[NUW]]
-; CHECK: call void @objc_release(i8* %cond) [[NUW]]
+; CHECK: call i8* @llvm.objc.retain(i8* %call) [[NUW:#[0-9]+]]
+; CHECK: call i8* @llvm.objc.retain(i8* %call) [[NUW]]
+; CHECK: call i8* @llvm.objc.retain(i8* %cond) [[NUW]]
+; CHECK: call void @llvm.objc.release(i8* %call) [[NUW]]
+; CHECK: call void @llvm.objc.release(i8* %call) [[NUW]]
+; CHECK: call void @llvm.objc.release(i8* %cond) [[NUW]]
 define void @test0() personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*) {
 entry:
   br label %while.body
@@ -19,18 +19,18 @@
           to label %invoke.cont unwind label %lpad, !clang.arc.no_objc_arc_exceptions !0
 
 invoke.cont:                                      ; preds = %while.body
-  %t0 = call i8* @objc_retain(i8* %call) nounwind
-  %t1 = call i8* @objc_retain(i8* %call) nounwind
+  %t0 = call i8* @llvm.objc.retain(i8* %call) nounwind
+  %t1 = call i8* @llvm.objc.retain(i8* %call) nounwind
   %call.i1 = invoke i8* @returner()
           to label %invoke.cont1 unwind label %lpad
 
 invoke.cont1:                                     ; preds = %invoke.cont
   %cond = select i1 undef, i8* null, i8* %call
-  %t2 = call i8* @objc_retain(i8* %cond) nounwind
-  call void @objc_release(i8* %call) nounwind
-  call void @objc_release(i8* %call) nounwind
+  %t2 = call i8* @llvm.objc.retain(i8* %cond) nounwind
+  call void @llvm.objc.release(i8* %call) nounwind
+  call void @llvm.objc.release(i8* %call) nounwind
   call void @use_pointer(i8* %cond)
-  call void @objc_release(i8* %cond) nounwind
+  call void @llvm.objc.release(i8* %cond) nounwind
   br label %while.body
 
 lpad:                                             ; preds = %invoke.cont, %while.body
@@ -41,8 +41,8 @@
 
 declare i8* @returner()
 declare i32 @__objc_personality_v0(...)
-declare void @objc_release(i8*)
-declare i8* @objc_retain(i8*)
+declare void @llvm.objc.release(i8*)
+declare i8* @llvm.objc.retain(i8*)
 declare void @use_pointer(i8*)
 
 !0 = !{}
diff --git a/test/Transforms/ObjCARC/tail-call-invariant-enforcement.ll b/test/Transforms/ObjCARC/tail-call-invariant-enforcement.ll
index 3073abf..fcb28dd 100644
--- a/test/Transforms/ObjCARC/tail-call-invariant-enforcement.ll
+++ b/test/Transforms/ObjCARC/tail-call-invariant-enforcement.ll
@@ -1,23 +1,23 @@
 ; RUN: opt -objc-arc -S < %s | FileCheck %s
 
-declare void @objc_release(i8* %x)
-declare i8* @objc_retain(i8* %x)
-declare i8* @objc_autorelease(i8* %x)
-declare i8* @objc_autoreleaseReturnValue(i8* %x)
-declare i8* @objc_retainAutoreleasedReturnValue(i8* %x)
-declare i8* @objc_unsafeClaimAutoreleasedReturnValue(i8* %x)
+declare void @llvm.objc.release(i8* %x)
+declare i8* @llvm.objc.retain(i8* %x)
+declare i8* @llvm.objc.autorelease(i8* %x)
+declare i8* @llvm.objc.autoreleaseReturnValue(i8* %x)
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %x)
+declare i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %x)
 declare i8* @tmp(i8*)
 
 ; Never tail call objc_autorelease.
 
 ; CHECK: define i8* @test0(i8* %x) [[NUW:#[0-9]+]] {
-; CHECK: %tmp0 = call i8* @objc_autorelease(i8* %x) [[NUW]]
-; CHECK: %tmp1 = call i8* @objc_autorelease(i8* %x) [[NUW]]
+; CHECK: %tmp0 = call i8* @llvm.objc.autorelease(i8* %x) [[NUW]]
+; CHECK: %tmp1 = call i8* @llvm.objc.autorelease(i8* %x) [[NUW]]
 ; CHECK: }
 define i8* @test0(i8* %x) nounwind {
 entry:
-  %tmp0 = call i8* @objc_autorelease(i8* %x)
-  %tmp1 = tail call i8* @objc_autorelease(i8* %x)
+  %tmp0 = call i8* @llvm.objc.autorelease(i8* %x)
+  %tmp1 = tail call i8* @llvm.objc.autorelease(i8* %x)
 
   ret i8* %x
 }
@@ -25,78 +25,78 @@
 ; Always tail call autoreleaseReturnValue.
 
 ; CHECK: define i8* @test1(i8* %x) [[NUW]] {
-; CHECK: %tmp0 = tail call i8* @objc_autoreleaseReturnValue(i8* %x) [[NUW]]
-; CHECK: %tmp1 = tail call i8* @objc_autoreleaseReturnValue(i8* %x) [[NUW]]
+; CHECK: %tmp0 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %x) [[NUW]]
+; CHECK: %tmp1 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %x) [[NUW]]
 ; CHECK: }
 define i8* @test1(i8* %x) nounwind {
 entry:
-  %tmp0 = call i8* @objc_autoreleaseReturnValue(i8* %x)
-  %tmp1 = tail call i8* @objc_autoreleaseReturnValue(i8* %x)
+  %tmp0 = call i8* @llvm.objc.autoreleaseReturnValue(i8* %x)
+  %tmp1 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %x)
   ret i8* %x
 }
 
 ; Always tail call objc_retain.
 
 ; CHECK: define i8* @test2(i8* %x) [[NUW]] {
-; CHECK: %tmp0 = tail call i8* @objc_retain(i8* %x) [[NUW]]
-; CHECK: %tmp1 = tail call i8* @objc_retain(i8* %x) [[NUW]]
+; CHECK: %tmp0 = tail call i8* @llvm.objc.retain(i8* %x) [[NUW]]
+; CHECK: %tmp1 = tail call i8* @llvm.objc.retain(i8* %x) [[NUW]]
 ; CHECK: }
 define i8* @test2(i8* %x) nounwind {
 entry:
-  %tmp0 = call i8* @objc_retain(i8* %x)
-  %tmp1 = tail call i8* @objc_retain(i8* %x)
+  %tmp0 = call i8* @llvm.objc.retain(i8* %x)
+  %tmp1 = tail call i8* @llvm.objc.retain(i8* %x)
   ret i8* %x
 }
 
 ; Always tail call objc_retainAutoreleasedReturnValue.
 ; CHECK: define i8* @test3(i8* %x) [[NUW]] {
-; CHECK: %tmp0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %y) [[NUW]]
-; CHECK: %tmp1 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %z) [[NUW]]
+; CHECK: %tmp0 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %y) [[NUW]]
+; CHECK: %tmp1 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %z) [[NUW]]
 ; CHECK: }
 define i8* @test3(i8* %x) nounwind {
 entry:
   %y = call i8* @tmp(i8* %x)
-  %tmp0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %y)
+  %tmp0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %y)
   %z = call i8* @tmp(i8* %x)
-  %tmp1 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %z)
+  %tmp1 = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %z)
   ret i8* %x
 }
 
 ; By itself, we should never change whether or not objc_release is tail called.
 
 ; CHECK: define void @test4(i8* %x) [[NUW]] {
-; CHECK: call void @objc_release(i8* %x) [[NUW]]
-; CHECK: tail call void @objc_release(i8* %x) [[NUW]]
+; CHECK: call void @llvm.objc.release(i8* %x) [[NUW]]
+; CHECK: tail call void @llvm.objc.release(i8* %x) [[NUW]]
 ; CHECK: }
 define void @test4(i8* %x) nounwind {
 entry:
-  call void @objc_release(i8* %x)
-  tail call void @objc_release(i8* %x)
+  call void @llvm.objc.release(i8* %x)
+  tail call void @llvm.objc.release(i8* %x)
   ret void
 }
 
-; If we convert a tail called @objc_autoreleaseReturnValue to an
-; @objc_autorelease, ensure that the tail call is removed.
+; If we convert a tail called @llvm.objc.autoreleaseReturnValue to an
+; @llvm.objc.autorelease, ensure that the tail call is removed.
 ; CHECK: define i8* @test5(i8* %x) [[NUW]] {
-; CHECK: %tmp0 = call i8* @objc_autorelease(i8* %x) [[NUW]]
+; CHECK: %tmp0 = call i8* @llvm.objc.autorelease(i8* %x) [[NUW]]
 ; CHECK: }
 define i8* @test5(i8* %x) nounwind {
 entry:
-  %tmp0 = tail call i8* @objc_autoreleaseReturnValue(i8* %x)
+  %tmp0 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %x)
   ret i8* %tmp0
 }
 
-; Always tail call objc_unsafeClaimAutoreleasedReturnValue.
+; Always tail call llvm.objc.unsafeClaimAutoreleasedReturnValue.
 ; CHECK: define i8* @test6(i8* %x) [[NUW]] {
-; CHECK: %tmp0 = tail call i8* @objc_unsafeClaimAutoreleasedReturnValue(i8* %y) [[NUW]]
-; CHECK: %tmp1 = tail call i8* @objc_unsafeClaimAutoreleasedReturnValue(i8* %z) [[NUW]]
+; CHECK: %tmp0 = tail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %y) [[NUW]]
+; CHECK: %tmp1 = tail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %z) [[NUW]]
 ; CHECK: }
 define i8* @test6(i8* %x) nounwind {
 entry:
   %y = call i8* @tmp(i8* %x)
-  %tmp0 = call i8* @objc_unsafeClaimAutoreleasedReturnValue(i8* %y)
+  %tmp0 = call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %y)
   %z = call i8* @tmp(i8* %x)
-  %tmp1 = tail call i8* @objc_unsafeClaimAutoreleasedReturnValue(i8* %z)
+  %tmp1 = tail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %z)
   ret i8* %x
 }
 
diff --git a/test/Transforms/ObjCARC/unsafe-claim-rv.ll b/test/Transforms/ObjCARC/unsafe-claim-rv.ll
index addd0c8..8b64802 100644
--- a/test/Transforms/ObjCARC/unsafe-claim-rv.ll
+++ b/test/Transforms/ObjCARC/unsafe-claim-rv.ll
@@ -15,33 +15,33 @@
 ;
 ; And then hand-reduced further. 
 
-declare i8* @objc_autoreleaseReturnValue(i8*)
-declare i8* @objc_unsafeClaimAutoreleasedReturnValue(i8*)
-declare i8* @objc_retain(i8*)
-declare void @objc_release(i8*)
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
+declare i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8*)
+declare i8* @llvm.objc.retain(i8*)
+declare void @llvm.objc.release(i8*)
 
 define void @foo(i8* %X) {
 entry:
-  %0 = tail call i8* @objc_retain(i8* %X) 
+  %0 = tail call i8* @llvm.objc.retain(i8* %X) 
   %tobool = icmp eq i8* %0, null
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  %1 = tail call i8* @objc_retain(i8* nonnull %0)
+  %1 = tail call i8* @llvm.objc.retain(i8* nonnull %0)
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
   %Y.0 = phi i8* [ %1, %if.then ], [ null, %entry ]
-  %2 = tail call i8* @objc_autoreleaseReturnValue(i8* %Y.0)
-  %3 = tail call i8* @objc_unsafeClaimAutoreleasedReturnValue(i8* %2)
-  tail call void @objc_release(i8* %0) 
+  %2 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %Y.0)
+  %3 = tail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %2)
+  tail call void @llvm.objc.release(i8* %0) 
   ret void
 }
 
 ; CHECK: if.then
-; CHECK: tail call i8* @objc_retain
-; CHECK-NEXT: call i8* @objc_autorelease
+; CHECK: tail call i8* @llvm.objc.retain
+; CHECK-NEXT: call i8* @llvm.objc.autorelease
 ; CHECK: %Y.0 = phi
-; CHECK-NEXT: tail call i8* @objc_unsafeClaimAutoreleasedReturnValue(i8* %Y.0)
-; CHECK-NEXT: tail call void @objc_release
+; CHECK-NEXT: tail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %Y.0)
+; CHECK-NEXT: tail call void @llvm.objc.release
 
diff --git a/test/Transforms/ObjCARC/weak-contract.ll b/test/Transforms/ObjCARC/weak-contract.ll
index ca69c70..ca37711 100644
--- a/test/Transforms/ObjCARC/weak-contract.ll
+++ b/test/Transforms/ObjCARC/weak-contract.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -objc-arc-contract -S < %s | FileCheck %s
 
-declare i8* @objc_initWeak(i8**, i8*)
+declare i8* @llvm.objc.initWeak(i8**, i8*)
 
 ; Convert objc_initWeak(p, null) to *p = null.
 
@@ -9,6 +9,6 @@
 ; CHECK-NEXT:   ret i8* null
 ; CHECK-NEXT: }
 define i8* @test0(i8** %p) {
-  %t = call i8* @objc_initWeak(i8** %p, i8* null)
+  %t = call i8* @llvm.objc.initWeak(i8** %p, i8* null)
   ret i8* %t
 }
diff --git a/test/Transforms/ObjCARC/weak-copies.ll b/test/Transforms/ObjCARC/weak-copies.ll
index d3177bb..440f3fb 100644
--- a/test/Transforms/ObjCARC/weak-copies.ll
+++ b/test/Transforms/ObjCARC/weak-copies.ll
@@ -27,13 +27,13 @@
   %w = alloca i8*, align 8
   %x = alloca i8*, align 8
   %call = call i8* @bar()
-  %0 = call i8* @objc_initWeak(i8** %w, i8* %call) nounwind
-  %1 = call i8* @objc_loadWeak(i8** %w) nounwind
-  %2 = call i8* @objc_initWeak(i8** %x, i8* %1) nounwind
-  %3 = call i8* @objc_loadWeak(i8** %x) nounwind
+  %0 = call i8* @llvm.objc.initWeak(i8** %w, i8* %call) nounwind
+  %1 = call i8* @llvm.objc.loadWeak(i8** %w) nounwind
+  %2 = call i8* @llvm.objc.initWeak(i8** %x, i8* %1) nounwind
+  %3 = call i8* @llvm.objc.loadWeak(i8** %x) nounwind
   call void @use(i8* %3) nounwind
-  call void @objc_destroyWeak(i8** %x) nounwind
-  call void @objc_destroyWeak(i8** %w) nounwind
+  call void @llvm.objc.destroyWeak(i8** %x) nounwind
+  call void @llvm.objc.destroyWeak(i8** %w) nounwind
   ret void
 }
 
@@ -48,8 +48,8 @@
 entry:
   %w = alloca i8*, align 8
   %block = alloca %1, align 8
-  %0 = call i8* @objc_retain(i8* %me) nounwind
-  %1 = call i8* @objc_initWeak(i8** %w, i8* %0) nounwind
+  %0 = call i8* @llvm.objc.retain(i8* %me) nounwind
+  %1 = call i8* @llvm.objc.initWeak(i8** %w, i8* %0) nounwind
   %block.isa = getelementptr inbounds %1, %1* %block, i64 0, i32 0
   store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa, align 8
   %block.flags = getelementptr inbounds %1, %1* %block, i64 0, i32 1
@@ -61,28 +61,28 @@
   %block.descriptor = getelementptr inbounds %1, %1* %block, i64 0, i32 4
   store %struct.__block_descriptor* bitcast (%0* @__block_descriptor_tmp to %struct.__block_descriptor*), %struct.__block_descriptor** %block.descriptor, align 8
   %block.captured = getelementptr inbounds %1, %1* %block, i64 0, i32 5
-  %2 = call i8* @objc_loadWeak(i8** %w) nounwind
-  %3 = call i8* @objc_initWeak(i8** %block.captured, i8* %2) nounwind
+  %2 = call i8* @llvm.objc.loadWeak(i8** %w) nounwind
+  %3 = call i8* @llvm.objc.initWeak(i8** %block.captured, i8* %2) nounwind
   %4 = bitcast %1* %block to void ()*
   call void @use_block(void ()* %4) nounwind
-  call void @objc_destroyWeak(i8** %block.captured) nounwind
-  call void @objc_destroyWeak(i8** %w) nounwind
-  call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
+  call void @llvm.objc.destroyWeak(i8** %block.captured) nounwind
+  call void @llvm.objc.destroyWeak(i8** %w) nounwind
+  call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
 
-declare i8* @objc_retain(i8*)
+declare i8* @llvm.objc.retain(i8*)
 declare void @use_block(void ()*) nounwind
 declare void @__qux_block_invoke_0(i8* %.block_descriptor) nounwind
 declare void @__copy_helper_block_(i8*, i8*) nounwind
-declare void @objc_copyWeak(i8**, i8**)
+declare void @llvm.objc.copyWeak(i8**, i8**)
 declare void @__destroy_helper_block_(i8*) nounwind
-declare void @objc_release(i8*)
+declare void @llvm.objc.release(i8*)
 declare i8* @bar()
-declare i8* @objc_initWeak(i8**, i8*)
-declare i8* @objc_loadWeak(i8**)
+declare i8* @llvm.objc.initWeak(i8**, i8*)
+declare i8* @llvm.objc.loadWeak(i8**)
 declare void @use(i8*) nounwind
-declare void @objc_destroyWeak(i8**)
+declare void @llvm.objc.destroyWeak(i8**)
 
 ; CHECK: attributes [[NUW]] = { nounwind }
 
diff --git a/test/Transforms/ObjCARC/weak-dce.ll b/test/Transforms/ObjCARC/weak-dce.ll
index f094671..e499ac1 100644
--- a/test/Transforms/ObjCARC/weak-dce.ll
+++ b/test/Transforms/ObjCARC/weak-dce.ll
@@ -4,43 +4,43 @@
 ; Delete the weak calls and replace them with just the net retain.
 
 ;      CHECK: define void @test0(i8* %p) {
-; CHECK-NEXT: call i8* @objc_retain(i8* %p)
+; CHECK-NEXT: call i8* @llvm.objc.retain(i8* %p)
 ; CHECK-NEXT: ret void
 
 define void @test0(i8* %p) {
   %weakBlock = alloca i8*, align 8
-  %tmp7 = call i8* @objc_initWeak(i8** %weakBlock, i8* %p) nounwind
-  %tmp26 = call i8* @objc_loadWeakRetained(i8** %weakBlock) nounwind
-  call void @objc_destroyWeak(i8** %weakBlock) nounwind
+  %tmp7 = call i8* @llvm.objc.initWeak(i8** %weakBlock, i8* %p) nounwind
+  %tmp26 = call i8* @llvm.objc.loadWeakRetained(i8** %weakBlock) nounwind
+  call void @llvm.objc.destroyWeak(i8** %weakBlock) nounwind
   ret void
 }
 
 ;      CHECK: define i8* @test1(i8* %p) {
-; CHECK-NEXT: call i8* @objc_retain(i8* %p)
+; CHECK-NEXT: call i8* @llvm.objc.retain(i8* %p)
 ; CHECK-NEXT: ret i8* %p
 
 define i8* @test1(i8* %p) {
   %weakBlock = alloca i8*, align 8
-  %tmp7 = call i8* @objc_initWeak(i8** %weakBlock, i8* %p) nounwind
-  %tmp26 = call i8* @objc_loadWeakRetained(i8** %weakBlock) nounwind
-  call void @objc_destroyWeak(i8** %weakBlock) nounwind
+  %tmp7 = call i8* @llvm.objc.initWeak(i8** %weakBlock, i8* %p) nounwind
+  %tmp26 = call i8* @llvm.objc.loadWeakRetained(i8** %weakBlock) nounwind
+  call void @llvm.objc.destroyWeak(i8** %weakBlock) nounwind
   ret i8* %tmp26
 }
 
 ;      CHECK: define i8* @test2(i8* %p, i8* %q) {
-; CHECK-NEXT: call i8* @objc_retain(i8* %q)
+; CHECK-NEXT: call i8* @llvm.objc.retain(i8* %q)
 ; CHECK-NEXT: ret i8* %q
 
 define i8* @test2(i8* %p, i8* %q) {
   %weakBlock = alloca i8*, align 8
-  %tmp7 = call i8* @objc_initWeak(i8** %weakBlock, i8* %p) nounwind
-  %tmp19 = call i8* @objc_storeWeak(i8** %weakBlock, i8* %q) nounwind
-  %tmp26 = call i8* @objc_loadWeakRetained(i8** %weakBlock) nounwind
-  call void @objc_destroyWeak(i8** %weakBlock) nounwind
+  %tmp7 = call i8* @llvm.objc.initWeak(i8** %weakBlock, i8* %p) nounwind
+  %tmp19 = call i8* @llvm.objc.storeWeak(i8** %weakBlock, i8* %q) nounwind
+  %tmp26 = call i8* @llvm.objc.loadWeakRetained(i8** %weakBlock) nounwind
+  call void @llvm.objc.destroyWeak(i8** %weakBlock) nounwind
   ret i8* %tmp26
 }
 
-declare i8* @objc_initWeak(i8**, i8*)
-declare void @objc_destroyWeak(i8**)
-declare i8* @objc_loadWeakRetained(i8**)
-declare i8* @objc_storeWeak(i8** %weakBlock, i8* %q)
+declare i8* @llvm.objc.initWeak(i8**, i8*)
+declare void @llvm.objc.destroyWeak(i8**)
+declare i8* @llvm.objc.loadWeakRetained(i8**)
+declare i8* @llvm.objc.storeWeak(i8** %weakBlock, i8* %q)
diff --git a/test/Transforms/ObjCARC/weak.ll b/test/Transforms/ObjCARC/weak.ll
index 119aa82..caaeba7 100644
--- a/test/Transforms/ObjCARC/weak.ll
+++ b/test/Transforms/ObjCARC/weak.ll
@@ -1,12 +1,12 @@
 ; RUN: opt -objc-arc -S < %s | FileCheck %s
 
-declare i8* @objc_initWeak(i8**, i8*)
-declare i8* @objc_storeWeak(i8**, i8*)
-declare i8* @objc_loadWeak(i8**)
-declare void @objc_destroyWeak(i8**)
-declare i8* @objc_loadWeakRetained(i8**)
-declare void @objc_moveWeak(i8**, i8**)
-declare void @objc_copyWeak(i8**, i8**)
+declare i8* @llvm.objc.initWeak(i8**, i8*)
+declare i8* @llvm.objc.storeWeak(i8**, i8*)
+declare i8* @llvm.objc.loadWeak(i8**)
+declare void @llvm.objc.destroyWeak(i8**)
+declare i8* @llvm.objc.loadWeakRetained(i8**)
+declare void @llvm.objc.moveWeak(i8**, i8**)
+declare void @llvm.objc.copyWeak(i8**, i8**)
 
 ; If the pointer-to-weak-pointer is null, it's undefined behavior.
 
@@ -32,26 +32,26 @@
 ; CHECK: ret void
 define void @test0(i8* %p, i8** %q) {
 entry:
-  call i8* @objc_storeWeak(i8** null, i8* %p)
-  call i8* @objc_storeWeak(i8** undef, i8* %p)
-  call i8* @objc_loadWeakRetained(i8** null)
-  call i8* @objc_loadWeakRetained(i8** undef)
-  call i8* @objc_loadWeak(i8** null)
-  call i8* @objc_loadWeak(i8** undef)
-  call i8* @objc_initWeak(i8** null, i8* %p)
-  call i8* @objc_initWeak(i8** undef, i8* %p)
-  call void @objc_destroyWeak(i8** null)
-  call void @objc_destroyWeak(i8** undef)
+  call i8* @llvm.objc.storeWeak(i8** null, i8* %p)
+  call i8* @llvm.objc.storeWeak(i8** undef, i8* %p)
+  call i8* @llvm.objc.loadWeakRetained(i8** null)
+  call i8* @llvm.objc.loadWeakRetained(i8** undef)
+  call i8* @llvm.objc.loadWeak(i8** null)
+  call i8* @llvm.objc.loadWeak(i8** undef)
+  call i8* @llvm.objc.initWeak(i8** null, i8* %p)
+  call i8* @llvm.objc.initWeak(i8** undef, i8* %p)
+  call void @llvm.objc.destroyWeak(i8** null)
+  call void @llvm.objc.destroyWeak(i8** undef)
 
-  call void @objc_copyWeak(i8** null, i8** %q)
-  call void @objc_copyWeak(i8** undef, i8** %q)
-  call void @objc_copyWeak(i8** %q, i8** null)
-  call void @objc_copyWeak(i8** %q, i8** undef)
+  call void @llvm.objc.copyWeak(i8** null, i8** %q)
+  call void @llvm.objc.copyWeak(i8** undef, i8** %q)
+  call void @llvm.objc.copyWeak(i8** %q, i8** null)
+  call void @llvm.objc.copyWeak(i8** %q, i8** undef)
 
-  call void @objc_moveWeak(i8** null, i8** %q)
-  call void @objc_moveWeak(i8** undef, i8** %q)
-  call void @objc_moveWeak(i8** %q, i8** null)
-  call void @objc_moveWeak(i8** %q, i8** undef)
+  call void @llvm.objc.moveWeak(i8** null, i8** %q)
+  call void @llvm.objc.moveWeak(i8** undef, i8** %q)
+  call void @llvm.objc.moveWeak(i8** %q, i8** null)
+  call void @llvm.objc.moveWeak(i8** %q, i8** undef)
 
   ret void
 }
diff --git a/test/Transforms/PhaseOrdering/rotate.ll b/test/Transforms/PhaseOrdering/rotate.ll
index 1d33de7..e10a46c 100644
--- a/test/Transforms/PhaseOrdering/rotate.ll
+++ b/test/Transforms/PhaseOrdering/rotate.ll
@@ -5,30 +5,19 @@
 ; This should become a single funnel shift through a combination
 ; of aggressive-instcombine, simplifycfg, and instcombine.
 ; https://bugs.llvm.org/show_bug.cgi?id=34924
+; These are equivalent, but the value name with the new-pm shows a bug -
+; this code should not have been converted to a speculative select with
+; an intermediate transform.
 
 define i32 @rotl(i32 %a, i32 %b) {
 ; OLDPM-LABEL: @rotl(
 ; OLDPM-NEXT:  entry:
-; OLDPM-NEXT:    [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
-; OLDPM-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
-; OLDPM:       rotbb:
-; OLDPM-NEXT:    [[SUB:%.*]] = sub i32 32, [[B]]
-; OLDPM-NEXT:    [[SHR:%.*]] = lshr i32 [[A:%.*]], [[SUB]]
-; OLDPM-NEXT:    [[SHL:%.*]] = shl i32 [[A]], [[B]]
-; OLDPM-NEXT:    [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
-; OLDPM-NEXT:    br label [[END]]
-; OLDPM:       end:
-; OLDPM-NEXT:    [[COND:%.*]] = phi i32 [ [[OR]], [[ROTBB]] ], [ [[A]], [[ENTRY:%.*]] ]
-; OLDPM-NEXT:    ret i32 [[COND]]
+; OLDPM-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.fshl.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B:%.*]])
+; OLDPM-NEXT:    ret i32 [[TMP0]]
 ;
 ; NEWPM-LABEL: @rotl(
 ; NEWPM-NEXT:  entry:
-; NEWPM-NEXT:    [[TMP0:%.*]] = sub i32 0, [[B:%.*]]
-; NEWPM-NEXT:    [[TMP1:%.*]] = and i32 [[B]], 31
-; NEWPM-NEXT:    [[TMP2:%.*]] = and i32 [[TMP0]], 31
-; NEWPM-NEXT:    [[TMP3:%.*]] = lshr i32 [[A:%.*]], [[TMP2]]
-; NEWPM-NEXT:    [[TMP4:%.*]] = shl i32 [[A]], [[TMP1]]
-; NEWPM-NEXT:    [[SPEC_SELECT:%.*]] = or i32 [[TMP3]], [[TMP4]]
+; NEWPM-NEXT:    [[SPEC_SELECT:%.*]] = tail call i32 @llvm.fshl.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B:%.*]])
 ; NEWPM-NEXT:    ret i32 [[SPEC_SELECT]]
 ;
 entry:
diff --git a/test/Transforms/PreISelIntrinsicLowering/objc-arc.ll b/test/Transforms/PreISelIntrinsicLowering/objc-arc.ll
new file mode 100644
index 0000000..8b7d11e
--- /dev/null
+++ b/test/Transforms/PreISelIntrinsicLowering/objc-arc.ll
@@ -0,0 +1,312 @@
+; RUN: opt -pre-isel-intrinsic-lowering -S -o - %s | FileCheck %s
+; RUN: opt -passes='pre-isel-intrinsic-lowering' -S -o - %s | FileCheck %s
+
+; Make sure calls to the objc intrinsics are translated to calls in to the
+; runtime
+
+define i8* @test_objc_autorelease(i8* %arg0) {
+; CHECK-LABEL: test_objc_autorelease
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_autorelease(i8* %arg0)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.autorelease(i8* %arg0)
+	ret i8* %0
+}
+
+define void @test_objc_autoreleasePoolPop(i8* %arg0) {
+; CHECK-LABEL: test_objc_autoreleasePoolPop
+; CHECK-NEXT: entry
+; CHECK-NEXT: call void @objc_autoreleasePoolPop(i8* %arg0)
+; CHECK-NEXT: ret void
+entry:
+  call void @llvm.objc.autoreleasePoolPop(i8* %arg0)
+  ret void
+}
+
+define i8* @test_objc_autoreleasePoolPush() {
+; CHECK-LABEL: test_objc_autoreleasePoolPush
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_autoreleasePoolPush()
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.autoreleasePoolPush()
+	ret i8* %0
+}
+
+define i8* @test_objc_autoreleaseReturnValue(i8* %arg0) {
+; CHECK-LABEL: test_objc_autoreleaseReturnValue
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_autoreleaseReturnValue(i8* %arg0)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.autoreleaseReturnValue(i8* %arg0)
+	ret i8* %0
+}
+
+define void @test_objc_copyWeak(i8** %arg0, i8** %arg1) {
+; CHECK-LABEL: test_objc_copyWeak
+; CHECK-NEXT: entry
+; CHECK-NEXT: call void @objc_copyWeak(i8** %arg0, i8** %arg1)
+; CHECK-NEXT: ret void
+entry:
+  call void @llvm.objc.copyWeak(i8** %arg0, i8** %arg1)
+  ret void
+}
+
+define void @test_objc_destroyWeak(i8** %arg0) {
+; CHECK-LABEL: test_objc_destroyWeak
+; CHECK-NEXT: entry
+; CHECK-NEXT: call void @objc_destroyWeak(i8** %arg0)
+; CHECK-NEXT: ret void
+entry:
+  call void @llvm.objc.destroyWeak(i8** %arg0)
+  ret void
+}
+
+define i8* @test_objc_initWeak(i8** %arg0, i8* %arg1) {
+; CHECK-LABEL: test_objc_initWeak
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_initWeak(i8** %arg0, i8* %arg1)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.initWeak(i8** %arg0, i8* %arg1)
+	ret i8* %0
+}
+
+define i8* @test_objc_loadWeak(i8** %arg0) {
+; CHECK-LABEL: test_objc_loadWeak
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_loadWeak(i8** %arg0)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.loadWeak(i8** %arg0)
+	ret i8* %0
+}
+
+define i8* @test_objc_loadWeakRetained(i8** %arg0) {
+; CHECK-LABEL: test_objc_loadWeakRetained
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_loadWeakRetained(i8** %arg0)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.loadWeakRetained(i8** %arg0)
+	ret i8* %0
+}
+
+define void @test_objc_moveWeak(i8** %arg0, i8** %arg1) {
+; CHECK-LABEL: test_objc_moveWeak
+; CHECK-NEXT: entry
+; CHECK-NEXT: call void @objc_moveWeak(i8** %arg0, i8** %arg1)
+; CHECK-NEXT: ret void
+entry:
+  call void @llvm.objc.moveWeak(i8** %arg0, i8** %arg1)
+  ret void
+}
+
+define void @test_objc_release(i8* %arg0) {
+; CHECK-LABEL: test_objc_release
+; CHECK-NEXT: entry
+; CHECK-NEXT: call void @objc_release(i8* %arg0)
+; CHECK-NEXT: ret void
+entry:
+  call void @llvm.objc.release(i8* %arg0)
+  ret void
+}
+
+define i8* @test_objc_retain(i8* %arg0) {
+; CHECK-LABEL: test_objc_retain
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_retain(i8* %arg0)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.retain(i8* %arg0)
+	ret i8* %0
+}
+
+define i8* @test_objc_retainAutorelease(i8* %arg0) {
+; CHECK-LABEL: test_objc_retainAutorelease
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_retainAutorelease(i8* %arg0)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.retainAutorelease(i8* %arg0)
+	ret i8* %0
+}
+
+define i8* @test_objc_retainAutoreleaseReturnValue(i8* %arg0) {
+; CHECK-LABEL: test_objc_retainAutoreleaseReturnValue
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = tail call i8* @objc_retainAutoreleaseReturnValue(i8* %arg0)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = tail call i8* @llvm.objc.retainAutoreleaseReturnValue(i8* %arg0)
+	ret i8* %0
+}
+
+define i8* @test_objc_retainAutoreleasedReturnValue(i8* %arg0) {
+; CHECK-LABEL: test_objc_retainAutoreleasedReturnValue
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %arg0)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %arg0)
+	ret i8* %0
+}
+
+define i8* @test_objc_retainBlock(i8* %arg0) {
+; CHECK-LABEL: test_objc_retainBlock
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_retainBlock(i8* %arg0)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.retainBlock(i8* %arg0)
+	ret i8* %0
+}
+
+define void @test_objc_storeStrong(i8** %arg0, i8* %arg1) {
+; CHECK-LABEL: test_objc_storeStrong
+; CHECK-NEXT: entry
+; CHECK-NEXT: call void @objc_storeStrong(i8** %arg0, i8* %arg1)
+; CHECK-NEXT: ret void
+entry:
+  call void @llvm.objc.storeStrong(i8** %arg0, i8* %arg1)
+	ret void
+}
+
+define i8* @test_objc_storeWeak(i8** %arg0, i8* %arg1) {
+; CHECK-LABEL: test_objc_storeWeak
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_storeWeak(i8** %arg0, i8* %arg1)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.storeWeak(i8** %arg0, i8* %arg1)
+	ret i8* %0
+}
+
+define i8* @test_objc_unsafeClaimAutoreleasedReturnValue(i8* %arg0) {
+; CHECK-LABEL: test_objc_unsafeClaimAutoreleasedReturnValue
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_unsafeClaimAutoreleasedReturnValue(i8* %arg0)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %arg0)
+  ret i8* %0
+}
+
+define i8* @test_objc_retainedObject(i8* %arg0) {
+; CHECK-LABEL: test_objc_retainedObject
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_retainedObject(i8* %arg0)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.retainedObject(i8* %arg0)
+  ret i8* %0
+}
+
+define i8* @test_objc_unretainedObject(i8* %arg0) {
+; CHECK-LABEL: test_objc_unretainedObject
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_unretainedObject(i8* %arg0)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.unretainedObject(i8* %arg0)
+  ret i8* %0
+}
+
+define i8* @test_objc_unretainedPointer(i8* %arg0) {
+; CHECK-LABEL: test_objc_unretainedPointer
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_unretainedPointer(i8* %arg0)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.unretainedPointer(i8* %arg0)
+  ret i8* %0
+}
+
+define i8* @test_objc_retain_autorelease(i8* %arg0) {
+; CHECK-LABEL: test_objc_retain_autorelease
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i8* @objc_retain_autorelease(i8* %arg0)
+; CHECK-NEXT: ret i8* %0
+entry:
+  %0 = call i8* @llvm.objc.retain.autorelease(i8* %arg0)
+  ret i8* %0
+}
+
+define i32 @test_objc_sync_enter(i8* %arg0) {
+; CHECK-LABEL: test_objc_sync_enter
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i32 @objc_sync_enter(i8* %arg0)
+; CHECK-NEXT: ret i32 %0
+entry:
+  %0 = call i32 @llvm.objc.sync.enter(i8* %arg0)
+  ret i32 %0
+}
+
+define i32 @test_objc_sync_exit(i8* %arg0) {
+; CHECK-LABEL: test_objc_sync_exit
+; CHECK-NEXT: entry
+; CHECK-NEXT: %0 = call i32 @objc_sync_exit(i8* %arg0)
+; CHECK-NEXT: ret i32 %0
+entry:
+  %0 = call i32 @llvm.objc.sync.exit(i8* %arg0)
+  ret i32 %0
+}
+
+declare i8* @llvm.objc.autorelease(i8*)
+declare void @llvm.objc.autoreleasePoolPop(i8*)
+declare i8* @llvm.objc.autoreleasePoolPush()
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
+declare void @llvm.objc.copyWeak(i8**, i8**)
+declare void @llvm.objc.destroyWeak(i8**)
+declare extern_weak i8* @llvm.objc.initWeak(i8**, i8*)
+declare i8* @llvm.objc.loadWeak(i8**)
+declare i8* @llvm.objc.loadWeakRetained(i8**)
+declare void @llvm.objc.moveWeak(i8**, i8**)
+declare void @llvm.objc.release(i8*)
+declare i8* @llvm.objc.retain(i8*)
+declare i8* @llvm.objc.retainAutorelease(i8*)
+declare i8* @llvm.objc.retainAutoreleaseReturnValue(i8*)
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
+declare i8* @llvm.objc.retainBlock(i8*)
+declare void @llvm.objc.storeStrong(i8**, i8*)
+declare i8* @llvm.objc.storeWeak(i8**, i8*)
+declare i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8*)
+declare i8* @llvm.objc.retainedObject(i8*)
+declare i8* @llvm.objc.unretainedObject(i8*)
+declare i8* @llvm.objc.unretainedPointer(i8*)
+declare i8* @llvm.objc.retain.autorelease(i8*)
+declare i32 @llvm.objc.sync.enter(i8*)
+declare i32 @llvm.objc.sync.exit(i8*)
+
+attributes #0 = { nounwind }
+
+; CHECK: declare i8* @objc_autorelease(i8*)
+; CHECK: declare void @objc_autoreleasePoolPop(i8*)
+; CHECK: declare i8* @objc_autoreleasePoolPush()
+; CHECK: declare i8* @objc_autoreleaseReturnValue(i8*)
+; CHECK: declare void @objc_copyWeak(i8**, i8**)
+; CHECK: declare void @objc_destroyWeak(i8**)
+; CHECK: declare extern_weak i8* @objc_initWeak(i8**, i8*)
+; CHECK: declare i8* @objc_loadWeak(i8**)
+; CHECK: declare i8* @objc_loadWeakRetained(i8**)
+; CHECK: declare void @objc_moveWeak(i8**, i8**)
+; CHECK: declare void @objc_release(i8*) [[NLB:#[0-9]+]]
+; CHECK: declare i8* @objc_retain(i8*) [[NLB]]
+; CHECK: declare i8* @objc_retainAutorelease(i8*)
+; CHECK: declare i8* @objc_retainAutoreleaseReturnValue(i8*)
+; CHECK: declare i8* @objc_retainAutoreleasedReturnValue(i8*)
+; CHECK: declare i8* @objc_retainBlock(i8*)
+; CHECK: declare void @objc_storeStrong(i8**, i8*)
+; CHECK: declare i8* @objc_storeWeak(i8**, i8*)
+; CHECK: declare i8* @objc_unsafeClaimAutoreleasedReturnValue(i8*)
+; CHECK: declare i8* @objc_retainedObject(i8*)
+; CHECK: declare i8* @objc_unretainedObject(i8*)
+; CHECK: declare i8* @objc_unretainedPointer(i8*)
+; CHECK: declare i8* @objc_retain_autorelease(i8*)
+; CHECK: declare i32 @objc_sync_enter(i8*)
+; CHECK: declare i32 @objc_sync_exit(i8*)
+
+; CHECK: attributes #0 = { nounwind }
+; CHECK: attributes [[NLB]] = { nonlazybind }
diff --git a/test/Transforms/SLPVectorizer/AArch64/64-bit-vector.ll b/test/Transforms/SLPVectorizer/AArch64/64-bit-vector.ll
index edc8042..ad970b2 100644
--- a/test/Transforms/SLPVectorizer/AArch64/64-bit-vector.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/64-bit-vector.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -slp-vectorizer -mtriple=aarch64--linux-gnu -mcpu=generic < %s | FileCheck %s
 ; RUN: opt -S -slp-vectorizer -mtriple=aarch64-apple-ios -mcpu=cyclone < %s | FileCheck %s
 ; Currently disabled for a few subtargets (e.g. Kryo):
@@ -5,14 +6,36 @@
 ; RUN: opt -S -slp-vectorizer -mtriple=aarch64--linux-gnu -mcpu=generic -slp-min-reg-size=128 < %s | FileCheck --check-prefix=NO_SLP %s
 
 define void @f(float* %r, float* %w) {
+; CHECK-LABEL: @f(
+; CHECK-NEXT:    [[R0:%.*]] = getelementptr inbounds float, float* [[R:%.*]], i64 0
+; CHECK-NEXT:    [[R1:%.*]] = getelementptr inbounds float, float* [[R]], i64 1
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[R0]] to <2 x float>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x float>, <2 x float>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = fadd <2 x float> [[TMP2]], [[TMP2]]
+; CHECK-NEXT:    [[W0:%.*]] = getelementptr inbounds float, float* [[W:%.*]], i64 0
+; CHECK-NEXT:    [[W1:%.*]] = getelementptr inbounds float, float* [[W]], i64 1
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast float* [[W0]] to <2 x float>*
+; CHECK-NEXT:    store <2 x float> [[TMP3]], <2 x float>* [[TMP4]], align 4
+; CHECK-NEXT:    ret void
+;
+; NO_SLP-LABEL: @f(
+; NO_SLP-NEXT:    [[R0:%.*]] = getelementptr inbounds float, float* [[R:%.*]], i64 0
+; NO_SLP-NEXT:    [[R1:%.*]] = getelementptr inbounds float, float* [[R]], i64 1
+; NO_SLP-NEXT:    [[F0:%.*]] = load float, float* [[R0]]
+; NO_SLP-NEXT:    [[F1:%.*]] = load float, float* [[R1]]
+; NO_SLP-NEXT:    [[ADD0:%.*]] = fadd float [[F0]], [[F0]]
+; NO_SLP-NEXT:    [[ADD1:%.*]] = fadd float [[F1]], [[F1]]
+; NO_SLP-NEXT:    [[W0:%.*]] = getelementptr inbounds float, float* [[W:%.*]], i64 0
+; NO_SLP-NEXT:    [[W1:%.*]] = getelementptr inbounds float, float* [[W]], i64 1
+; NO_SLP-NEXT:    store float [[ADD0]], float* [[W0]]
+; NO_SLP-NEXT:    store float [[ADD1]], float* [[W1]]
+; NO_SLP-NEXT:    ret void
+;
   %r0 = getelementptr inbounds float, float* %r, i64 0
   %r1 = getelementptr inbounds float, float* %r, i64 1
   %f0 = load float, float* %r0
   %f1 = load float, float* %r1
   %add0 = fadd float %f0, %f0
-; CHECK:  fadd <2 x float>
-; NO_SLP: fadd float
-; NO_SLP: fadd float
   %add1 = fadd float %f1, %f1
   %w0 = getelementptr inbounds float, float* %w, i64 0
   %w1 = getelementptr inbounds float, float* %w, i64 1
diff --git a/test/Transforms/SLPVectorizer/AArch64/commute.ll b/test/Transforms/SLPVectorizer/AArch64/commute.ll
index 2bce59c..3f831fd 100644
--- a/test/Transforms/SLPVectorizer/AArch64/commute.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/commute.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -slp-vectorizer %s -slp-threshold=-10 | FileCheck %s
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--linux-gnu"
@@ -5,17 +6,27 @@
 %structA = type { [2 x float] }
 
 define void @test1(%structA* nocapture readonly %J, i32 %xmin, i32 %ymin) {
-; CHECK-LABEL: test1
-; CHECK: %arrayidx4 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 0
-; CHECK: %arrayidx9 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 1
-; CHECK: %3 = bitcast float* %arrayidx4 to <2 x float>*
-; CHECK: %4 = load <2 x float>, <2 x float>* %3, align 4
-; CHECK: %5 = fsub fast <2 x float> %2, %4
-; CHECK: %6 = fmul fast <2 x float> %5, %5
-; CHECK: %7 = extractelement <2 x float> %6, i32 0
-; CHECK: %8 = extractelement <2 x float> %6, i32 1
-; CHECK: %add = fadd fast float %7, %8
-; CHECK: %cmp = fcmp oeq float %add, 0.000000e+00
+; CHECK-LABEL: @test1(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x i32> undef, i32 [[XMIN:%.*]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x i32> [[TMP0]], i32 [[YMIN:%.*]], i32 1
+; CHECK-NEXT:    br label [[FOR_BODY3_LR_PH:%.*]]
+; CHECK:       for.body3.lr.ph:
+; CHECK-NEXT:    [[TMP2:%.*]] = sitofp <2 x i32> [[TMP1]] to <2 x float>
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [[STRUCTA:%.*]], %structA* [[J:%.*]], i64 0, i32 0, i64 0
+; CHECK-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [[STRUCTA]], %structA* [[J]], i64 0, i32 0, i64 1
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float* [[ARRAYIDX4]] to <2 x float>*
+; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x float>, <2 x float>* [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = fsub fast <2 x float> [[TMP2]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast <2 x float> [[TMP5]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <2 x float> [[TMP6]], i32 0
+; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x float> [[TMP6]], i32 1
+; CHECK-NEXT:    [[ADD:%.*]] = fadd fast float [[TMP7]], [[TMP8]]
+; CHECK-NEXT:    [[CMP:%.*]] = fcmp oeq float [[ADD]], 0.000000e+00
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY3_LR_PH]], label [[FOR_END27:%.*]]
+; CHECK:       for.end27:
+; CHECK-NEXT:    ret void
+;
 
 entry:
   br label %for.body3.lr.ph
@@ -40,17 +51,27 @@
 }
 
 define void @test2(%structA* nocapture readonly %J, i32 %xmin, i32 %ymin) {
-; CHECK-LABEL: test2
-; CHECK: %arrayidx4 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 0
-; CHECK: %arrayidx9 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 1
-; CHECK: %3 = bitcast float* %arrayidx4 to <2 x float>*
-; CHECK: %4 = load <2 x float>, <2 x float>* %3, align 4
-; CHECK: %5 = fsub fast <2 x float> %2, %4
-; CHECK: %6 = fmul fast <2 x float> %5, %5
-; CHECK: %7 = extractelement <2 x float> %6, i32 0
-; CHECK: %8 = extractelement <2 x float> %6, i32 1
-; CHECK: %add = fadd fast float %8, %7
-; CHECK: %cmp = fcmp oeq float %add, 0.000000e+00
+; CHECK-LABEL: @test2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x i32> undef, i32 [[XMIN:%.*]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x i32> [[TMP0]], i32 [[YMIN:%.*]], i32 1
+; CHECK-NEXT:    br label [[FOR_BODY3_LR_PH:%.*]]
+; CHECK:       for.body3.lr.ph:
+; CHECK-NEXT:    [[TMP2:%.*]] = sitofp <2 x i32> [[TMP1]] to <2 x float>
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [[STRUCTA:%.*]], %structA* [[J:%.*]], i64 0, i32 0, i64 0
+; CHECK-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [[STRUCTA]], %structA* [[J]], i64 0, i32 0, i64 1
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float* [[ARRAYIDX4]] to <2 x float>*
+; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x float>, <2 x float>* [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = fsub fast <2 x float> [[TMP2]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast <2 x float> [[TMP5]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <2 x float> [[TMP6]], i32 0
+; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x float> [[TMP6]], i32 1
+; CHECK-NEXT:    [[ADD:%.*]] = fadd fast float [[TMP8]], [[TMP7]]
+; CHECK-NEXT:    [[CMP:%.*]] = fcmp oeq float [[ADD]], 0.000000e+00
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY3_LR_PH]], label [[FOR_END27:%.*]]
+; CHECK:       for.end27:
+; CHECK-NEXT:    ret void
+;
 
 entry:
   br label %for.body3.lr.ph
diff --git a/test/Transforms/SLPVectorizer/AArch64/gather-cost.ll b/test/Transforms/SLPVectorizer/AArch64/gather-cost.ll
index a4c655c..401776a 100644
--- a/test/Transforms/SLPVectorizer/AArch64/gather-cost.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/gather-cost.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -S -slp-vectorizer -instcombine -pass-remarks-output=%t | FileCheck %s
 ; RUN: cat %t | FileCheck -check-prefix=REMARK %s
 ; RUN: opt < %s -S -passes='slp-vectorizer,instcombine' -pass-remarks-output=%t | FileCheck %s
@@ -6,25 +7,25 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--linux-gnu"
 
-; CHECK-LABEL:  @gather_multiple_use(
-; CHECK-NEXT:     [[TMP1:%.*]] = insertelement <4 x i32> undef, i32 [[C:%.*]], i32 0
-; CHECK-NEXT:     [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[A:%.*]], i32 1
-; CHECK-NEXT:     [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[B:%.*]], i32 2
-; CHECK-NEXT:     [[TMP4:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[D:%.*]], i32 3
-; CHECK-NEXT:     [[TMP5:%.*]] = lshr <4 x i32> [[TMP4]], <i32 15, i32 15, i32 15, i32 15>
-; CHECK-NEXT:     [[TMP6:%.*]] = and <4 x i32> [[TMP5]], <i32 65537, i32 65537, i32 65537, i32 65537>
-; CHECK-NEXT:     [[TMP7:%.*]] = mul nuw <4 x i32> [[TMP6]], <i32 65535, i32 65535, i32 65535, i32 65535>
-; CHECK-NEXT:     [[TMP8:%.*]] = add <4 x i32> [[TMP4]], [[TMP7]]
-; CHECK-NEXT:     [[TMP9:%.*]] = xor <4 x i32> [[TMP8]], [[TMP7]]
-; CHECK-NEXT:     [[TMP10:%.*]] = call i32 @llvm.experimental.vector.reduce.add.i32.v4i32(<4 x i32> [[TMP9]])
-; CHECK-NEXT:     ret i32 [[TMP10]]
-;
 ; REMARK-LABEL: Function: gather_multiple_use
 ; REMARK:       Args:
 ; REMARK-NEXT:    - String: 'Vectorized horizontal reduction with cost '
 ; REMARK-NEXT:    - Cost: '-7'
 ;
 define internal i32 @gather_multiple_use(i32 %a, i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: @gather_multiple_use(
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <4 x i32> undef, i32 [[C:%.*]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[A:%.*]], i32 1
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[B:%.*]], i32 2
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[D:%.*]], i32 3
+; CHECK-NEXT:    [[TMP5:%.*]] = lshr <4 x i32> [[TMP4]], <i32 15, i32 15, i32 15, i32 15>
+; CHECK-NEXT:    [[TMP6:%.*]] = and <4 x i32> [[TMP5]], <i32 65537, i32 65537, i32 65537, i32 65537>
+; CHECK-NEXT:    [[TMP7:%.*]] = mul nuw <4 x i32> [[TMP6]], <i32 65535, i32 65535, i32 65535, i32 65535>
+; CHECK-NEXT:    [[TMP8:%.*]] = add <4 x i32> [[TMP4]], [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = xor <4 x i32> [[TMP8]], [[TMP7]]
+; CHECK-NEXT:    [[TMP10:%.*]] = call i32 @llvm.experimental.vector.reduce.add.i32.v4i32(<4 x i32> [[TMP9]])
+; CHECK-NEXT:    ret i32 [[TMP10]]
+;
   %tmp00 = lshr i32 %a, 15
   %tmp01 = and i32 %tmp00, 65537
   %tmp02 = mul nuw i32 %tmp01, 65535
diff --git a/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll b/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll
index 3f61875b..db02f55 100644
--- a/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -slp-vectorizer -slp-threshold=-18 -dce -instcombine -pass-remarks-output=%t < %s | FileCheck %s
 ; RUN: cat %t | FileCheck -check-prefix=YAML %s
 ; RUN: opt -S -passes='slp-vectorizer,dce,instcombine' -slp-threshold=-18 -pass-remarks-output=%t < %s | FileCheck %s
@@ -22,12 +23,6 @@
 ; }
 ;
 
-; CHECK-LABEL: @getelementptr_4x32
-;
-; CHECK: [[A:%[a-zA-Z0-9.]+]] = add nsw <4 x i32>
-; CHECK: [[X:%[a-zA-Z0-9.]+]] = extractelement <4 x i32> [[A]]
-; CHECK: sext i32 [[X]] to i64
-
 ; YAML:      --- !Passed
 ; YAML-NEXT: Pass:            slp-vectorizer
 ; YAML-NEXT: Name:            VectorizedList
@@ -49,6 +44,56 @@
 ; YAML-NEXT:   - TreeSize:        '3'
 
 define i32 @getelementptr_4x32(i32* nocapture readonly %g, i32 %n, i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @getelementptr_4x32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP31:%.*]] = icmp sgt i32 [[N:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP31]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; CHECK:       for.body.preheader:
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>, i32 [[X:%.*]], i32 1
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[Y:%.*]], i32 2
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[Z:%.*]], i32 3
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.cond.cleanup.loopexit:
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x i32> [[TMP21:%.*]], i32 1
+; CHECK-NEXT:    br label [[FOR_COND_CLEANUP]]
+; CHECK:       for.cond.cleanup:
+; CHECK-NEXT:    [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP3]], [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] ]
+; CHECK-NEXT:    ret i32 [[SUM_0_LCSSA]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[TMP4:%.*]] = phi <2 x i32> [ zeroinitializer, [[FOR_BODY_PREHEADER]] ], [ [[TMP21]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i32> [[TMP4]], i32 0
+; CHECK-NEXT:    [[T4:%.*]] = shl nsw i32 [[TMP5]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <4 x i32> undef, i32 [[T4]], i32 0
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> undef, <4 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP8:%.*]] = add nsw <4 x i32> [[TMP2]], [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = extractelement <4 x i32> [[TMP8]], i32 0
+; CHECK-NEXT:    [[TMP10:%.*]] = sext i32 [[TMP9]] to i64
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[G:%.*]], i64 [[TMP10]]
+; CHECK-NEXT:    [[T6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = extractelement <2 x i32> [[TMP4]], i32 1
+; CHECK-NEXT:    [[ADD1:%.*]] = add nsw i32 [[T6]], [[TMP11]]
+; CHECK-NEXT:    [[TMP12:%.*]] = extractelement <4 x i32> [[TMP8]], i32 1
+; CHECK-NEXT:    [[TMP13:%.*]] = sext i32 [[TMP12]] to i64
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[G]], i64 [[TMP13]]
+; CHECK-NEXT:    [[T8:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4
+; CHECK-NEXT:    [[ADD6:%.*]] = add nsw i32 [[ADD1]], [[T8]]
+; CHECK-NEXT:    [[TMP14:%.*]] = extractelement <4 x i32> [[TMP8]], i32 2
+; CHECK-NEXT:    [[TMP15:%.*]] = sext i32 [[TMP14]] to i64
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[G]], i64 [[TMP15]]
+; CHECK-NEXT:    [[T10:%.*]] = load i32, i32* [[ARRAYIDX10]], align 4
+; CHECK-NEXT:    [[ADD11:%.*]] = add nsw i32 [[ADD6]], [[T10]]
+; CHECK-NEXT:    [[TMP16:%.*]] = extractelement <4 x i32> [[TMP8]], i32 3
+; CHECK-NEXT:    [[TMP17:%.*]] = sext i32 [[TMP16]] to i64
+; CHECK-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds i32, i32* [[G]], i64 [[TMP17]]
+; CHECK-NEXT:    [[T12:%.*]] = load i32, i32* [[ARRAYIDX15]], align 4
+; CHECK-NEXT:    [[TMP18:%.*]] = insertelement <2 x i32> <i32 1, i32 undef>, i32 [[ADD11]], i32 1
+; CHECK-NEXT:    [[TMP19:%.*]] = insertelement <2 x i32> undef, i32 [[TMP5]], i32 0
+; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <2 x i32> [[TMP19]], i32 [[T12]], i32 1
+; CHECK-NEXT:    [[TMP21]] = add nsw <2 x i32> [[TMP18]], [[TMP20]]
+; CHECK-NEXT:    [[TMP22:%.*]] = extractelement <2 x i32> [[TMP21]], i32 0
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[TMP22]], [[N]]
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]]
+;
 entry:
   %cmp31 = icmp sgt i32 %n, 0
   br i1 %cmp31, label %for.body.preheader, label %for.cond.cleanup
@@ -88,12 +133,6 @@
   br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
 }
 
-; CHECK-LABEL: @getelementptr_2x32
-;
-; CHECK: [[A:%[a-zA-Z0-9.]+]] = add nsw <2 x i32>
-; CHECK: [[X:%[a-zA-Z0-9.]+]] = extractelement <2 x i32> [[A]]
-; CHECK: sext i32 [[X]] to i64
-
 ; YAML:      --- !Passed
 ; YAML-NEXT: Pass:            slp-vectorizer
 ; YAML-NEXT: Name:            VectorizedList
@@ -115,6 +154,54 @@
 ; YAML-NEXT:   - TreeSize:        '3'
 
 define i32 @getelementptr_2x32(i32* nocapture readonly %g, i32 %n, i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @getelementptr_2x32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP31:%.*]] = icmp sgt i32 [[N:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP31]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; CHECK:       for.body.preheader:
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x i32> undef, i32 [[Y:%.*]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x i32> [[TMP0]], i32 [[Z:%.*]], i32 1
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.cond.cleanup.loopexit:
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i32> [[TMP18:%.*]], i32 1
+; CHECK-NEXT:    br label [[FOR_COND_CLEANUP]]
+; CHECK:       for.cond.cleanup:
+; CHECK-NEXT:    [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP2]], [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] ]
+; CHECK-NEXT:    ret i32 [[SUM_0_LCSSA]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[TMP3:%.*]] = phi <2 x i32> [ zeroinitializer, [[FOR_BODY_PREHEADER]] ], [ [[TMP18]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[TMP3]], i32 0
+; CHECK-NEXT:    [[T4:%.*]] = shl nsw i32 [[TMP4]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = sext i32 [[T4]] to i64
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[G:%.*]], i64 [[TMP5]]
+; CHECK-NEXT:    [[T6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i32> [[TMP3]], i32 1
+; CHECK-NEXT:    [[ADD1:%.*]] = add nsw i32 [[T6]], [[TMP6]]
+; CHECK-NEXT:    [[T7:%.*]] = or i32 [[T4]], 1
+; CHECK-NEXT:    [[TMP7:%.*]] = sext i32 [[T7]] to i64
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[G]], i64 [[TMP7]]
+; CHECK-NEXT:    [[T8:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4
+; CHECK-NEXT:    [[ADD6:%.*]] = add nsw i32 [[ADD1]], [[T8]]
+; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x i32> undef, i32 [[T4]], i32 0
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <2 x i32> [[TMP8]], <2 x i32> undef, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP10:%.*]] = add nsw <2 x i32> [[TMP1]], [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = extractelement <2 x i32> [[TMP10]], i32 0
+; CHECK-NEXT:    [[TMP12:%.*]] = sext i32 [[TMP11]] to i64
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[G]], i64 [[TMP12]]
+; CHECK-NEXT:    [[T10:%.*]] = load i32, i32* [[ARRAYIDX10]], align 4
+; CHECK-NEXT:    [[ADD11:%.*]] = add nsw i32 [[ADD6]], [[T10]]
+; CHECK-NEXT:    [[TMP13:%.*]] = extractelement <2 x i32> [[TMP10]], i32 1
+; CHECK-NEXT:    [[TMP14:%.*]] = sext i32 [[TMP13]] to i64
+; CHECK-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds i32, i32* [[G]], i64 [[TMP14]]
+; CHECK-NEXT:    [[T12:%.*]] = load i32, i32* [[ARRAYIDX15]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = insertelement <2 x i32> <i32 1, i32 undef>, i32 [[ADD11]], i32 1
+; CHECK-NEXT:    [[TMP16:%.*]] = insertelement <2 x i32> undef, i32 [[TMP4]], i32 0
+; CHECK-NEXT:    [[TMP17:%.*]] = insertelement <2 x i32> [[TMP16]], i32 [[T12]], i32 1
+; CHECK-NEXT:    [[TMP18]] = add nsw <2 x i32> [[TMP15]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = extractelement <2 x i32> [[TMP18]], i32 0
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[TMP19]], [[N]]
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]]
+;
 entry:
   %cmp31 = icmp sgt i32 %n, 0
   br i1 %cmp31, label %for.body.preheader, label %for.cond.cleanup
diff --git a/test/Transforms/SLPVectorizer/AArch64/horizontal.ll b/test/Transforms/SLPVectorizer/AArch64/horizontal.ll
index 02cf09d..c4a584f 100644
--- a/test/Transforms/SLPVectorizer/AArch64/horizontal.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/horizontal.ll
@@ -1,6 +1,5 @@
-; RUN: opt -slp-vectorizer -slp-threshold=-6 -S -pass-remarks-output=%t <  %s | FileCheck %s
-; RUN: cat %t | FileCheck -check-prefix=YAML %s
-; RUN: opt -passes=slp-vectorizer -slp-threshold=-6 -S -pass-remarks-output=%t <  %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -slp-vectorizer -slp-threshold=-6 -S -pass-remarks-output=%t < %s | FileCheck %s
 ; RUN: cat %t | FileCheck -check-prefix=YAML %s
 
 
@@ -10,11 +9,6 @@
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--linux"
 
-; CHECK-LABEL: test_select
-; CHECK: load <4 x i32>
-; CHECK: load <4 x i32>
-; CHECK: select <4 x i1>
-
 ; YAML:      --- !Passed
 ; YAML-NEXT: Pass:            slp-vectorizer
 ; YAML-NEXT: Name:            VectorizedHorizontalReduction
@@ -26,6 +20,49 @@
 ; YAML-NEXT:   - TreeSize:        '8'
 
 define i32 @test_select(i32* noalias nocapture readonly %blk1, i32* noalias nocapture readonly %blk2, i32 %lx, i32 %h) {
+; CHECK-LABEL: @test_select(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP_22:%.*]] = icmp sgt i32 [[H:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP_22]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
+; CHECK:       for.body.lr.ph:
+; CHECK-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[LX:%.*]] to i64
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[S_026:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[OP_EXTRA:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[J_025:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[P2_024:%.*]] = phi i32* [ [[BLK2:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR29:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[P1_023:%.*]] = phi i32* [ [[BLK1:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[P1_023]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[P2_024]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[P1_023]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[P2_024]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i32, i32* [[P1_023]], i64 3
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[P1_023]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i32, i32* [[P2_024]], i64 3
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32* [[P2_024]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = sub nsw <4 x i32> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp slt <4 x i32> [[TMP4]], zeroinitializer
+; CHECK-NEXT:    [[TMP6:%.*]] = sub nsw <4 x i32> zeroinitializer, [[TMP4]]
+; CHECK-NEXT:    [[TMP7:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> [[TMP6]], <4 x i32> [[TMP4]]
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 undef, [[S_026]]
+; CHECK-NEXT:    [[ADD11:%.*]] = add nsw i32 [[ADD]], undef
+; CHECK-NEXT:    [[ADD19:%.*]] = add nsw i32 [[ADD11]], undef
+; CHECK-NEXT:    [[TMP8:%.*]] = call i32 @llvm.experimental.vector.reduce.add.i32.v4i32(<4 x i32> [[TMP7]])
+; CHECK-NEXT:    [[OP_EXTRA]] = add nsw i32 [[TMP8]], [[S_026]]
+; CHECK-NEXT:    [[ADD27:%.*]] = add nsw i32 [[ADD19]], undef
+; CHECK-NEXT:    [[ADD_PTR]] = getelementptr inbounds i32, i32* [[P1_023]], i64 [[IDX_EXT]]
+; CHECK-NEXT:    [[ADD_PTR29]] = getelementptr inbounds i32, i32* [[P2_024]], i64 [[IDX_EXT]]
+; CHECK-NEXT:    [[INC]] = add nuw nsw i32 [[J_025]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[H]]
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]]
+; CHECK:       for.end.loopexit:
+; CHECK-NEXT:    br label [[FOR_END]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[S_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_EXTRA]], [[FOR_END_LOOPEXIT]] ]
+; CHECK-NEXT:    ret i32 [[S_0_LCSSA]]
+;
 entry:
   %cmp.22 = icmp sgt i32 %h, 0
   br i1 %cmp.22, label %for.body.lr.ph, label %for.end
@@ -104,11 +141,6 @@
 ;;   p2 += lx;
 ;; }
 define i32 @reduction_with_br(i32* noalias nocapture readonly %blk1, i32* noalias nocapture readonly %blk2, i32 %lx, i32 %h, i32 %lim) {
-; CHECK-LABEL: reduction_with_br
-; CHECK: load <4 x i32>
-; CHECK: load <4 x i32>
-; CHECK: mul nsw <4 x i32>
-
 ; YAML:      --- !Passed
 ; YAML-NEXT: Pass:            slp-vectorizer
 ; YAML-NEXT: Name:            VectorizedHorizontalReduction
@@ -118,7 +150,49 @@
 ; YAML-NEXT:   - Cost:            '-11'
 ; YAML-NEXT:   - String:          ' and with tree size '
 ; YAML-NEXT:   - TreeSize:        '3'
-
+; CHECK-LABEL: @reduction_with_br(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP_16:%.*]] = icmp sgt i32 [[H:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP_16]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
+; CHECK:       for.body.lr.ph:
+; CHECK-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[LX:%.*]] to i64
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[S_020:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[OP_EXTRA:%.*]], [[IF_END:%.*]] ]
+; CHECK-NEXT:    [[J_019:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[IF_END]] ]
+; CHECK-NEXT:    [[P2_018:%.*]] = phi i32* [ [[BLK2:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR16:%.*]], [[IF_END]] ]
+; CHECK-NEXT:    [[P1_017:%.*]] = phi i32* [ [[BLK1:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR:%.*]], [[IF_END]] ]
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[P1_017]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[P2_018]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[P1_017]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[P2_018]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[P1_017]], i64 3
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[P1_017]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[P2_018]], i64 3
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32* [[P2_018]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], [[TMP1]]
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 undef, [[S_020]]
+; CHECK-NEXT:    [[ADD5:%.*]] = add nsw i32 [[ADD]], undef
+; CHECK-NEXT:    [[ADD9:%.*]] = add nsw i32 [[ADD5]], undef
+; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.experimental.vector.reduce.add.i32.v4i32(<4 x i32> [[TMP4]])
+; CHECK-NEXT:    [[OP_EXTRA]] = add nsw i32 [[TMP5]], [[S_020]]
+; CHECK-NEXT:    [[ADD13:%.*]] = add nsw i32 [[ADD9]], undef
+; CHECK-NEXT:    [[CMP14:%.*]] = icmp slt i32 [[OP_EXTRA]], [[LIM:%.*]]
+; CHECK-NEXT:    br i1 [[CMP14]], label [[IF_END]], label [[FOR_END_LOOPEXIT:%.*]]
+; CHECK:       if.end:
+; CHECK-NEXT:    [[ADD_PTR]] = getelementptr inbounds i32, i32* [[P1_017]], i64 [[IDX_EXT]]
+; CHECK-NEXT:    [[ADD_PTR16]] = getelementptr inbounds i32, i32* [[P2_018]], i64 [[IDX_EXT]]
+; CHECK-NEXT:    [[INC]] = add nuw nsw i32 [[J_019]], 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[H]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT]]
+; CHECK:       for.end.loopexit:
+; CHECK-NEXT:    br label [[FOR_END]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[S_1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_EXTRA]], [[FOR_END_LOOPEXIT]] ]
+; CHECK-NEXT:    ret i32 [[S_1]]
+;
 entry:
   %cmp.16 = icmp sgt i32 %h, 0
   br i1 %cmp.16, label %for.body.lr.ph, label %for.end
@@ -172,11 +246,6 @@
   ret i32 %s.1
 }
 
-; CHECK: test_unrolled_select
-; CHECK: load <8 x i8>
-; CHECK: load <8 x i8>
-; CHECK: select <8 x i1>
-
 ; YAML:      --- !Passed
 ; YAML-NEXT: Pass:            slp-vectorizer
 ; YAML-NEXT: Name:            VectorizedHorizontalReduction
@@ -188,6 +257,66 @@
 ; YAML-NEXT:   - TreeSize:        '10'
 
 define i32 @test_unrolled_select(i8* noalias nocapture readonly %blk1, i8* noalias nocapture readonly %blk2, i32 %lx, i32 %h, i32 %lim) #0 {
+; CHECK-LABEL: @test_unrolled_select(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP_43:%.*]] = icmp sgt i32 [[H:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP_43]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
+; CHECK:       for.body.lr.ph:
+; CHECK-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[LX:%.*]] to i64
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[S_047:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[OP_EXTRA:%.*]], [[IF_END_86:%.*]] ]
+; CHECK-NEXT:    [[J_046:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[IF_END_86]] ]
+; CHECK-NEXT:    [[P2_045:%.*]] = phi i8* [ [[BLK2:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR88:%.*]], [[IF_END_86]] ]
+; CHECK-NEXT:    [[P1_044:%.*]] = phi i8* [ [[BLK1:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR:%.*]], [[IF_END_86]] ]
+; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i8, i8* [[P1_044]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, i8* [[P2_045]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds i8, i8* [[P1_044]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds i8, i8* [[P2_045]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX28:%.*]] = getelementptr inbounds i8, i8* [[P1_044]], i64 3
+; CHECK-NEXT:    [[ARRAYIDX30:%.*]] = getelementptr inbounds i8, i8* [[P2_045]], i64 3
+; CHECK-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds i8, i8* [[P1_044]], i64 4
+; CHECK-NEXT:    [[ARRAYIDX41:%.*]] = getelementptr inbounds i8, i8* [[P2_045]], i64 4
+; CHECK-NEXT:    [[ARRAYIDX50:%.*]] = getelementptr inbounds i8, i8* [[P1_044]], i64 5
+; CHECK-NEXT:    [[ARRAYIDX52:%.*]] = getelementptr inbounds i8, i8* [[P2_045]], i64 5
+; CHECK-NEXT:    [[ARRAYIDX61:%.*]] = getelementptr inbounds i8, i8* [[P1_044]], i64 6
+; CHECK-NEXT:    [[ARRAYIDX63:%.*]] = getelementptr inbounds i8, i8* [[P2_045]], i64 6
+; CHECK-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds i8, i8* [[P1_044]], i64 7
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[P1_044]] to <8 x i8>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = zext <8 x i8> [[TMP1]] to <8 x i32>
+; CHECK-NEXT:    [[ARRAYIDX74:%.*]] = getelementptr inbounds i8, i8* [[P2_045]], i64 7
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[P2_045]] to <8 x i8>*
+; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[TMP3]], align 1
+; CHECK-NEXT:    [[TMP5:%.*]] = zext <8 x i8> [[TMP4]] to <8 x i32>
+; CHECK-NEXT:    [[TMP6:%.*]] = sub nsw <8 x i32> [[TMP2]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = icmp slt <8 x i32> [[TMP6]], zeroinitializer
+; CHECK-NEXT:    [[TMP8:%.*]] = sub nsw <8 x i32> zeroinitializer, [[TMP6]]
+; CHECK-NEXT:    [[TMP9:%.*]] = select <8 x i1> [[TMP7]], <8 x i32> [[TMP8]], <8 x i32> [[TMP6]]
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 undef, [[S_047]]
+; CHECK-NEXT:    [[ADD16:%.*]] = add nsw i32 [[ADD]], undef
+; CHECK-NEXT:    [[ADD27:%.*]] = add nsw i32 [[ADD16]], undef
+; CHECK-NEXT:    [[ADD38:%.*]] = add nsw i32 [[ADD27]], undef
+; CHECK-NEXT:    [[ADD49:%.*]] = add nsw i32 [[ADD38]], undef
+; CHECK-NEXT:    [[ADD60:%.*]] = add nsw i32 [[ADD49]], undef
+; CHECK-NEXT:    [[ADD71:%.*]] = add nsw i32 [[ADD60]], undef
+; CHECK-NEXT:    [[TMP10:%.*]] = call i32 @llvm.experimental.vector.reduce.add.i32.v8i32(<8 x i32> [[TMP9]])
+; CHECK-NEXT:    [[OP_EXTRA]] = add nsw i32 [[TMP10]], [[S_047]]
+; CHECK-NEXT:    [[ADD82:%.*]] = add nsw i32 [[ADD71]], undef
+; CHECK-NEXT:    [[CMP83:%.*]] = icmp slt i32 [[OP_EXTRA]], [[LIM:%.*]]
+; CHECK-NEXT:    br i1 [[CMP83]], label [[IF_END_86]], label [[FOR_END_LOOPEXIT:%.*]]
+; CHECK:       if.end.86:
+; CHECK-NEXT:    [[ADD_PTR]] = getelementptr inbounds i8, i8* [[P1_044]], i64 [[IDX_EXT]]
+; CHECK-NEXT:    [[ADD_PTR88]] = getelementptr inbounds i8, i8* [[P2_045]], i64 [[IDX_EXT]]
+; CHECK-NEXT:    [[INC]] = add nuw nsw i32 [[J_046]], 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[H]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT]]
+; CHECK:       for.end.loopexit:
+; CHECK-NEXT:    br label [[FOR_END]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[S_1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_EXTRA]], [[FOR_END_LOOPEXIT]] ]
+; CHECK-NEXT:    ret i32 [[S_1]]
+;
 entry:
   %cmp.43 = icmp sgt i32 %h, 0
   br i1 %cmp.43, label %for.body.lr.ph, label %for.end
diff --git a/test/Transforms/SLPVectorizer/AArch64/matmul.ll b/test/Transforms/SLPVectorizer/AArch64/matmul.ll
index 17beea4..fdd12c1 100644
--- a/test/Transforms/SLPVectorizer/AArch64/matmul.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/matmul.ll
@@ -4,7 +4,7 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 
 ; This test is reduced from the matrix multiplication benchmark in the test-suite:
-; http://www.llvm.org/viewvc/llvm-project/test-suite/trunk/SingleSource/Benchmarks/Misc/matmul_f64_4x4.c
+; https://github.com/llvm/llvm-test-suite/tree/master/SingleSource/Benchmarks/Misc/matmul_f64_4x4.c
 ; The operations here are expected to be vectorized to <2 x double>.
 ; Otherwise, performance will suffer on Cortex-A53.
 
diff --git a/test/Transforms/SLPVectorizer/AArch64/minimum-sizes.ll b/test/Transforms/SLPVectorizer/AArch64/minimum-sizes.ll
index 7e1d670..0429665 100644
--- a/test/Transforms/SLPVectorizer/AArch64/minimum-sizes.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/minimum-sizes.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -slp-vectorizer < %s | FileCheck %s
 
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
@@ -7,10 +8,19 @@
 ; should not compute a smaller size for %k.13 since it is in a use-def cycle
 ; and cannot be demoted.
 ;
-; CHECK-LABEL: @PR26364
-; CHECK: %k.13 = phi i32
-;
 define fastcc void @PR26364() {
+; CHECK-LABEL: @PR26364(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 undef, label [[FOR_END11:%.*]], label [[FOR_COND4:%.*]]
+; CHECK:       for.cond4:
+; CHECK-NEXT:    [[K_13:%.*]] = phi i32 [ undef, [[ENTRY:%.*]] ], [ [[K_3:%.*]], [[FOR_COND4]] ]
+; CHECK-NEXT:    [[E_02:%.*]] = phi i32 [ 1, [[ENTRY]] ], [ 0, [[FOR_COND4]] ]
+; CHECK-NEXT:    [[E_1:%.*]] = select i1 undef, i32 [[E_02]], i32 0
+; CHECK-NEXT:    [[K_3]] = select i1 undef, i32 [[K_13]], i32 undef
+; CHECK-NEXT:    br label [[FOR_COND4]]
+; CHECK:       for.end11:
+; CHECK-NEXT:    ret void
+;
 entry:
   br i1 undef, label %for.end11, label %for.cond4
 
@@ -29,10 +39,25 @@
 ; every root in the vectorizable tree when computing minimum sizes since one
 ; root may require fewer bits than another.
 ;
-; CHECK-LABEL: @PR26629
-; CHECK-NOT: {{.*}} and <2 x i72>
-;
 define void @PR26629(i32* %c) {
+; CHECK-LABEL: @PR26629(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 undef, label [[FOR_PH:%.*]], label [[FOR_END:%.*]]
+; CHECK:       for.ph:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[C:%.*]], align 4
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[D:%.*]] = phi i72 [ 576507472957710340, [[FOR_PH]] ], [ [[BF_SET17:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 [[TMP0]], undef
+; CHECK-NEXT:    [[BF_CLEAR13:%.*]] = and i72 [[D]], -576460748008464384
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i32 [[SUB]] to i72
+; CHECK-NEXT:    [[BF_VALUE15:%.*]] = and i72 [[TMP1]], 8191
+; CHECK-NEXT:    [[BF_CLEAR16:%.*]] = or i72 [[BF_VALUE15]], [[BF_CLEAR13]]
+; CHECK-NEXT:    [[BF_SET17]] = or i72 [[BF_CLEAR16]], undef
+; CHECK-NEXT:    br label [[FOR_BODY]]
+; CHECK:       for.end:
+; CHECK-NEXT:    ret void
+;
 entry:
   br i1 undef, label %for.ph, label %for.end
 
diff --git a/test/Transforms/SLPVectorizer/AArch64/mismatched-intrinsics.ll b/test/Transforms/SLPVectorizer/AArch64/mismatched-intrinsics.ll
index 3d6da12..64b8743 100644
--- a/test/Transforms/SLPVectorizer/AArch64/mismatched-intrinsics.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/mismatched-intrinsics.ll
@@ -1,11 +1,16 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -slp-vectorizer %s | FileCheck %s
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 target triple = "arm64-apple-ios5.0.0"
 
 define i64 @mismatched_intrinsics(<4 x i32> %in1, <2 x i32> %in2) nounwind {
-; CHECK-LABEL: @mismatched_intrinsics
-; CHECK: call i64 @llvm.arm64.neon.saddlv.i64.v4i32
-; CHECK: call i64 @llvm.arm64.neon.saddlv.i64.v2i32
+; CHECK-LABEL: @mismatched_intrinsics(
+; CHECK-NEXT:    [[VADDLVQ_S32_I:%.*]] = tail call i64 @llvm.arm64.neon.saddlv.i64.v4i32(<4 x i32> [[IN1:%.*]])
+; CHECK-NEXT:    [[VADDLV_S32_I:%.*]] = tail call i64 @llvm.arm64.neon.saddlv.i64.v2i32(<2 x i32> [[IN2:%.*]])
+; CHECK-NEXT:    [[TST:%.*]] = icmp sgt i64 [[VADDLVQ_S32_I]], [[VADDLV_S32_I]]
+; CHECK-NEXT:    [[EQUAL:%.*]] = sext i1 [[TST]] to i64
+; CHECK-NEXT:    ret i64 [[EQUAL]]
+;
 
   %vaddlvq_s32.i = tail call i64 @llvm.arm64.neon.saddlv.i64.v4i32(<4 x i32> %in1) #2
   %vaddlv_s32.i = tail call i64 @llvm.arm64.neon.saddlv.i64.v2i32(<2 x i32> %in2) #2
diff --git a/test/Transforms/SLPVectorizer/AArch64/nontemporal.ll b/test/Transforms/SLPVectorizer/AArch64/nontemporal.ll
index 87d021d..98c0332 100644
--- a/test/Transforms/SLPVectorizer/AArch64/nontemporal.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/nontemporal.ll
@@ -1,12 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -basicaa -slp-vectorizer -dce < %s | FileCheck %s
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 target triple = "arm64-apple-ios5.0.0"
 
-; CHECK-LABEL: @foo
 define void @foo(float* noalias %a, float* noalias %b, float* noalias %c) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float* [[B:%.*]] to <4 x float>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4, !nontemporal !0
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[C:%.*]] to <4 x float>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd <4 x float> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast float* [[A:%.*]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP4]], <4 x float>* [[TMP5]], align 4, !nontemporal !0
+; CHECK-NEXT:    ret void
+;
 entry:
 ; Check that we don't lose !nontemporal hint when vectorizing loads.
-; CHECK: %{{[0-9]*}} = load <4 x float>, <4 x float>* %{{[0-9]+}}, align 4, !nontemporal !0
   %b1 = load float, float* %b, align 4, !nontemporal !0
   %arrayidx.1 = getelementptr inbounds float, float* %b, i64 1
   %b2 = load float, float* %arrayidx.1, align 4, !nontemporal !0
@@ -16,7 +26,6 @@
   %b4 = load float, float* %arrayidx.3, align 4, !nontemporal !0
 
 ; Check that we don't introduce !nontemporal hint when the original scalar loads didn't have it.
-; CHECK: %{{[0-9]*}} = load <4 x float>, <4 x float>* %{{[0-9]+}}, align 4{{$}}
   %c1 = load float, float* %c, align 4
   %arrayidx2.1 = getelementptr inbounds float, float* %c, i64 1
   %c2 = load float, float* %arrayidx2.1, align 4
@@ -31,7 +40,6 @@
   %a4 = fadd float %b4, %c4
 
 ; Check that we don't lose !nontemporal hint when vectorizing stores.
-; CHECK: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 4, !nontemporal !0
   store float %a1, float* %a, align 4, !nontemporal !0
   %arrayidx3.1 = getelementptr inbounds float, float* %a, i64 1
   store float %a2, float* %arrayidx3.1, align 4, !nontemporal !0
@@ -40,16 +48,21 @@
   %arrayidx3.3 = getelementptr inbounds float, float* %a, i64 3
   store float %a4, float* %arrayidx3.3, align 4, !nontemporal !0
 
-; CHECK: ret void
   ret void
 }
 
-; CHECK-LABEL: @foo2
 define void @foo2(float* noalias %a, float* noalias %b) {
+; CHECK-LABEL: @foo2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float* [[B:%.*]] to <4 x float>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[A:%.*]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP1]], <4 x float>* [[TMP2]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
 ; Check that we don't mark vector load with !nontemporal attribute if some of
 ; the original scalar loads don't have it.
-; CHECK: %{{[0-9]*}} = load <4 x float>, <4 x float>* %{{[0-9]+}}, align 4{{$}}
   %b1 = load float, float* %b, align 4, !nontemporal !0
   %arrayidx.1 = getelementptr inbounds float, float* %b, i64 1
   %b2 = load float, float* %arrayidx.1, align 4
@@ -60,7 +73,6 @@
 
 ; Check that we don't mark vector store with !nontemporal attribute if some of
 ; the original scalar stores don't have it.
-; CHECK: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 4{{$}}
   store float %b1, float* %a, align 4, !nontemporal !0
   %arrayidx3.1 = getelementptr inbounds float, float* %a, i64 1
   store float %b2, float* %arrayidx3.1, align 4
@@ -69,7 +81,6 @@
   %arrayidx3.3 = getelementptr inbounds float, float* %a, i64 3
   store float %b4, float* %arrayidx3.3, align 4, !nontemporal !0
 
-; CHECK: ret void
   ret void
 }
 
diff --git a/test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll b/test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll
index 72c7082..8311f20 100644
--- a/test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll
@@ -1,13 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu -mcpu=cortex-a57 | FileCheck %s
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--linux-gnu"
 
-; CHECK-LABEL: @test1
-; CHECK: load <4 x i32>
-; CHECK: add nsw <4 x i32>
-; CHECK: sdiv <4 x i32>
-
 define void @test1(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* noalias nocapture readonly %c) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 3
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[B]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 3
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32* [[C]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = sdiv <4 x i32> [[TMP4]], <i32 2, i32 2, i32 2, i32 2>
+; CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i32* [[A]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %0 = load i32, i32* %b, align 4
   %1 = load i32, i32* %c, align 4
diff --git a/test/Transforms/SLPVectorizer/AArch64/tsc-s352.ll b/test/Transforms/SLPVectorizer/AArch64/tsc-s352.ll
index 9ca039a..6d7a6a2 100644
--- a/test/Transforms/SLPVectorizer/AArch64/tsc-s352.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/tsc-s352.ll
@@ -4,7 +4,7 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 
 ; This test is reduced from the TSVC evaluation of vectorizers:
-; http://www.llvm.org/viewvc/llvm-project/test-suite/trunk/MultiSource/Benchmarks/TSVC/LoopRerolling-flt/tsc.c?view=log
+; https://github.com/llvm/llvm-test-suite/commits/master/MultiSource/Benchmarks/TSVC/LoopRerolling-flt/tsc.c
 ; Two loads and an fmul are expected to be vectorized to <2 x float>.
 ; Otherwise, performance will suffer on Cortex-A53.
 ; See https://bugs.llvm.org/show_bug.cgi?id=36280 for more details.
diff --git a/test/Transforms/SLPVectorizer/ARM/memory.ll b/test/Transforms/SLPVectorizer/ARM/memory.ll
index 57d7cce..70e8703 100644
--- a/test/Transforms/SLPVectorizer/ARM/memory.ll
+++ b/test/Transforms/SLPVectorizer/ARM/memory.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=thumbv7-apple-ios3.0.0 -mcpu=swift | FileCheck %s
 
 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
@@ -5,10 +6,17 @@
 ; On swift unaligned <2 x double> stores need 4uops and it is there for cheaper
 ; to do this scalar.
 
-; CHECK-LABEL: expensive_double_store
-; CHECK-NOT: load <2 x double>
-; CHECK-NOT: store <2 x double>
 define void @expensive_double_store(double* noalias %dst, double* noalias %src, i64 %count) {
+; CHECK-LABEL: @expensive_double_store(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[SRC:%.*]], align 8
+; CHECK-NEXT:    store double [[TMP0]], double* [[DST:%.*]], align 8
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[SRC]], i64 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load double, double* [[ARRAYIDX2]], align 8
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[DST]], i64 1
+; CHECK-NEXT:    store double [[TMP1]], double* [[ARRAYIDX3]], align 8
+; CHECK-NEXT:    ret void
+;
 entry:
   %0 = load double, double* %src, align 8
   store double %0, double* %dst, align 8
diff --git a/test/Transforms/SLPVectorizer/ARM/sroa.ll b/test/Transforms/SLPVectorizer/ARM/sroa.ll
index 65e0260..c43e8f1 100644
--- a/test/Transforms/SLPVectorizer/ARM/sroa.ll
+++ b/test/Transforms/SLPVectorizer/ARM/sroa.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -mcpu=swift -mtriple=thumbv7-apple-ios -basicaa -slp-vectorizer < %s | FileCheck %s
 
 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
@@ -8,11 +9,45 @@
 ; because the scalar version of the shl/or are handled by the
 ; backend and disappear, the vectorized code stays.
 
-; CHECK-LABEL: SROAed
-; CHECK-NOT: shl nuw <2 x i64>
-; CHECK-NOT: or <2 x i64>
-
 define void @SROAed(%class.Complex* noalias nocapture sret %agg.result, [4 x i32] %a.coerce, [4 x i32] %b.coerce) {
+; CHECK-LABEL: @SROAed(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[A_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i32] [[A_COERCE:%.*]], 0
+; CHECK-NEXT:    [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i32 [[A_COERCE_FCA_0_EXTRACT]] to i64
+; CHECK-NEXT:    [[A_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i32] [[A_COERCE]], 1
+; CHECK-NEXT:    [[A_SROA_0_4_INSERT_EXT:%.*]] = zext i32 [[A_COERCE_FCA_1_EXTRACT]] to i64
+; CHECK-NEXT:    [[A_SROA_0_4_INSERT_SHIFT:%.*]] = shl nuw i64 [[A_SROA_0_4_INSERT_EXT]], 32
+; CHECK-NEXT:    [[A_SROA_0_4_INSERT_INSERT:%.*]] = or i64 [[A_SROA_0_4_INSERT_SHIFT]], [[A_SROA_0_0_INSERT_EXT]]
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i64 [[A_SROA_0_4_INSERT_INSERT]] to double
+; CHECK-NEXT:    [[A_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i32] [[A_COERCE]], 2
+; CHECK-NEXT:    [[A_SROA_3_8_INSERT_EXT:%.*]] = zext i32 [[A_COERCE_FCA_2_EXTRACT]] to i64
+; CHECK-NEXT:    [[A_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i32] [[A_COERCE]], 3
+; CHECK-NEXT:    [[A_SROA_3_12_INSERT_EXT:%.*]] = zext i32 [[A_COERCE_FCA_3_EXTRACT]] to i64
+; CHECK-NEXT:    [[A_SROA_3_12_INSERT_SHIFT:%.*]] = shl nuw i64 [[A_SROA_3_12_INSERT_EXT]], 32
+; CHECK-NEXT:    [[A_SROA_3_12_INSERT_INSERT:%.*]] = or i64 [[A_SROA_3_12_INSERT_SHIFT]], [[A_SROA_3_8_INSERT_EXT]]
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i64 [[A_SROA_3_12_INSERT_INSERT]] to double
+; CHECK-NEXT:    [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i32] [[B_COERCE:%.*]], 0
+; CHECK-NEXT:    [[B_SROA_0_0_INSERT_EXT:%.*]] = zext i32 [[B_COERCE_FCA_0_EXTRACT]] to i64
+; CHECK-NEXT:    [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i32] [[B_COERCE]], 1
+; CHECK-NEXT:    [[B_SROA_0_4_INSERT_EXT:%.*]] = zext i32 [[B_COERCE_FCA_1_EXTRACT]] to i64
+; CHECK-NEXT:    [[B_SROA_0_4_INSERT_SHIFT:%.*]] = shl nuw i64 [[B_SROA_0_4_INSERT_EXT]], 32
+; CHECK-NEXT:    [[B_SROA_0_4_INSERT_INSERT:%.*]] = or i64 [[B_SROA_0_4_INSERT_SHIFT]], [[B_SROA_0_0_INSERT_EXT]]
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i64 [[B_SROA_0_4_INSERT_INSERT]] to double
+; CHECK-NEXT:    [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i32] [[B_COERCE]], 2
+; CHECK-NEXT:    [[B_SROA_3_8_INSERT_EXT:%.*]] = zext i32 [[B_COERCE_FCA_2_EXTRACT]] to i64
+; CHECK-NEXT:    [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i32] [[B_COERCE]], 3
+; CHECK-NEXT:    [[B_SROA_3_12_INSERT_EXT:%.*]] = zext i32 [[B_COERCE_FCA_3_EXTRACT]] to i64
+; CHECK-NEXT:    [[B_SROA_3_12_INSERT_SHIFT:%.*]] = shl nuw i64 [[B_SROA_3_12_INSERT_EXT]], 32
+; CHECK-NEXT:    [[B_SROA_3_12_INSERT_INSERT:%.*]] = or i64 [[B_SROA_3_12_INSERT_SHIFT]], [[B_SROA_3_8_INSERT_EXT]]
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i64 [[B_SROA_3_12_INSERT_INSERT]] to double
+; CHECK-NEXT:    [[ADD:%.*]] = fadd double [[TMP0]], [[TMP2]]
+; CHECK-NEXT:    [[ADD3:%.*]] = fadd double [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[RE_I_I:%.*]] = getelementptr inbounds [[CLASS_COMPLEX:%.*]], %class.Complex* [[AGG_RESULT:%.*]], i32 0, i32 0
+; CHECK-NEXT:    store double [[ADD]], double* [[RE_I_I]], align 4
+; CHECK-NEXT:    [[IM_I_I:%.*]] = getelementptr inbounds [[CLASS_COMPLEX]], %class.Complex* [[AGG_RESULT]], i32 0, i32 1
+; CHECK-NEXT:    store double [[ADD3]], double* [[IM_I_I]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %a.coerce.fca.0.extract = extractvalue [4 x i32] %a.coerce, 0
   %a.sroa.0.0.insert.ext = zext i32 %a.coerce.fca.0.extract to i64
diff --git a/test/Transforms/SLPVectorizer/NVPTX/non-vectorizable-intrinsic.ll b/test/Transforms/SLPVectorizer/NVPTX/non-vectorizable-intrinsic.ll
new file mode 100644
index 0000000..3eae2d0
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/NVPTX/non-vectorizable-intrinsic.ll
@@ -0,0 +1,57 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -slp-vectorizer -o - -S -slp-threshold=-1000 | FileCheck %s
+
+target datalayout = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx--nvidiacl"
+
+; CTLZ cannot be vectorized currently because the second argument is a scalar
+; for both the scalar and vector forms of the intrinsic. In the future it
+; should be possible to vectorize such functions.
+; Test causes an assert if LLVM tries to vectorize CTLZ.
+
+define <2 x i8> @cltz_test(<2 x i8> %x) #0 {
+; CHECK-LABEL: @cltz_test(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <2 x i8> [[X:%.*]], i32 0
+; CHECK-NEXT:    [[CALL_I:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP0]], i1 false)
+; CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <2 x i8> undef, i8 [[CALL_I]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i8> [[X]], i32 1
+; CHECK-NEXT:    [[CALL_I4:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[VECINIT2:%.*]] = insertelement <2 x i8> [[VECINIT]], i8 [[CALL_I4]], i32 1
+; CHECK-NEXT:    ret <2 x i8> [[VECINIT2]]
+;
+entry:
+  %0 = extractelement <2 x i8> %x, i32 0
+  %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
+  %vecinit = insertelement <2 x i8> undef, i8 %call.i, i32 0
+  %1 = extractelement <2 x i8> %x, i32 1
+  %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
+  %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
+  ret <2 x i8> %vecinit2
+}
+
+define <2 x i8> @cltz_test2(<2 x i8> %x) #1 {
+; CHECK-LABEL: @cltz_test2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <2 x i8> [[X:%.*]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i8> [[X]], i32 1
+; CHECK-NEXT:    [[CALL_I:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP0]], i1 false)
+; CHECK-NEXT:    [[CALL_I4:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <2 x i8> undef, i8 [[CALL_I]], i32 0
+; CHECK-NEXT:    [[VECINIT2:%.*]] = insertelement <2 x i8> [[VECINIT]], i8 [[CALL_I4]], i32 1
+; CHECK-NEXT:    ret <2 x i8> [[VECINIT2]]
+;
+entry:
+  %0 = extractelement <2 x i8> %x, i32 0
+  %1 = extractelement <2 x i8> %x, i32 1
+  %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
+  %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
+  %vecinit = insertelement <2 x i8> undef, i8 %call.i, i32 0
+  %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
+  ret <2 x i8> %vecinit2
+}
+
+declare i8 @llvm.ctlz.i8(i8, i1) #3
+
+attributes #0 = { alwaysinline nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
diff --git a/test/Transforms/SLPVectorizer/NVPTX/v2f16.ll b/test/Transforms/SLPVectorizer/NVPTX/v2f16.ll
index d8b80f4..7038b0f 100644
--- a/test/Transforms/SLPVectorizer/NVPTX/v2f16.ll
+++ b/test/Transforms/SLPVectorizer/NVPTX/v2f16.ll
@@ -1,20 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=nvptx64-nvidia-cuda -mcpu=sm_70 | FileCheck %s
 ; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=nvptx64-nvidia-cuda -mcpu=sm_40 | FileCheck %s -check-prefix=NOVECTOR
 
-; CHECK-LABEL: @fusion
-; CHECK: load <2 x half>, <2 x half>*
-; CHECK: fmul fast <2 x half>
-; CHECK: fadd fast <2 x half>
-; CHECK: store <2 x half> %4, <2 x half>
-
-; NOVECTOR-LABEL: @fusion
-; NOVECTOR: load half
-; NOVECTOR: fmul fast half
-; NOVECTOR: fadd fast half
-; NOVECTOR: fmul fast half
-; NOVECTOR: fadd fast half
-; NOVECTOR: store half
 define void @fusion(i8* noalias nocapture align 256 dereferenceable(19267584) %arg, i8* noalias nocapture readonly align 256 dereferenceable(19267584) %arg1, i32 %arg2, i32 %arg3) local_unnamed_addr #0 {
+; CHECK-LABEL: @fusion(
+; CHECK-NEXT:    [[TMP:%.*]] = shl nuw nsw i32 [[ARG2:%.*]], 6
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP]], [[ARG3:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = shl nuw nsw i32 [[TMP4]], 2
+; CHECK-NEXT:    [[TMP6:%.*]] = zext i32 [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP7:%.*]] = or i64 [[TMP6]], 1
+; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i8* [[ARG1:%.*]] to half*
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds half, half* [[TMP10]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP15:%.*]] = bitcast i8* [[ARG:%.*]] to half*
+; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds half, half* [[TMP15]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds half, half* [[TMP10]], i64 [[TMP7]]
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast half* [[TMP11]] to <2 x half>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x half>, <2 x half>* [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul fast <2 x half> <half 0xH5380, half 0xH5380>, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd fast <2 x half> <half 0xH57F0, half 0xH57F0>, [[TMP3]]
+; CHECK-NEXT:    [[TMP21:%.*]] = getelementptr inbounds half, half* [[TMP15]], i64 [[TMP7]]
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast half* [[TMP16]] to <2 x half>*
+; CHECK-NEXT:    store <2 x half> [[TMP4]], <2 x half>* [[TMP5]], align 8
+; CHECK-NEXT:    ret void
+;
+; NOVECTOR-LABEL: @fusion(
+; NOVECTOR-NEXT:    [[TMP:%.*]] = shl nuw nsw i32 [[ARG2:%.*]], 6
+; NOVECTOR-NEXT:    [[TMP4:%.*]] = or i32 [[TMP]], [[ARG3:%.*]]
+; NOVECTOR-NEXT:    [[TMP5:%.*]] = shl nuw nsw i32 [[TMP4]], 2
+; NOVECTOR-NEXT:    [[TMP6:%.*]] = zext i32 [[TMP5]] to i64
+; NOVECTOR-NEXT:    [[TMP7:%.*]] = or i64 [[TMP6]], 1
+; NOVECTOR-NEXT:    [[TMP10:%.*]] = bitcast i8* [[ARG1:%.*]] to half*
+; NOVECTOR-NEXT:    [[TMP11:%.*]] = getelementptr inbounds half, half* [[TMP10]], i64 [[TMP6]]
+; NOVECTOR-NEXT:    [[TMP12:%.*]] = load half, half* [[TMP11]], align 8
+; NOVECTOR-NEXT:    [[TMP13:%.*]] = fmul fast half [[TMP12]], 0xH5380
+; NOVECTOR-NEXT:    [[TMP14:%.*]] = fadd fast half [[TMP13]], 0xH57F0
+; NOVECTOR-NEXT:    [[TMP15:%.*]] = bitcast i8* [[ARG:%.*]] to half*
+; NOVECTOR-NEXT:    [[TMP16:%.*]] = getelementptr inbounds half, half* [[TMP15]], i64 [[TMP6]]
+; NOVECTOR-NEXT:    store half [[TMP14]], half* [[TMP16]], align 8
+; NOVECTOR-NEXT:    [[TMP17:%.*]] = getelementptr inbounds half, half* [[TMP10]], i64 [[TMP7]]
+; NOVECTOR-NEXT:    [[TMP18:%.*]] = load half, half* [[TMP17]], align 2
+; NOVECTOR-NEXT:    [[TMP19:%.*]] = fmul fast half [[TMP18]], 0xH5380
+; NOVECTOR-NEXT:    [[TMP20:%.*]] = fadd fast half [[TMP19]], 0xH57F0
+; NOVECTOR-NEXT:    [[TMP21:%.*]] = getelementptr inbounds half, half* [[TMP15]], i64 [[TMP7]]
+; NOVECTOR-NEXT:    store half [[TMP20]], half* [[TMP21]], align 2
+; NOVECTOR-NEXT:    ret void
+;
   %tmp = shl nuw nsw i32 %arg2, 6
   %tmp4 = or i32 %tmp, %arg3
   %tmp5 = shl nuw nsw i32 %tmp4, 2
diff --git a/test/Transforms/SLPVectorizer/PowerPC/pr27897.ll b/test/Transforms/SLPVectorizer/PowerPC/pr27897.ll
index dabb338..7f7df82 100644
--- a/test/Transforms/SLPVectorizer/PowerPC/pr27897.ll
+++ b/test/Transforms/SLPVectorizer/PowerPC/pr27897.ll
@@ -1,8 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -mtriple=powerpc64-linux-gnu -mcpu=pwr8 -mattr=+vsx -slp-vectorizer < %s | FileCheck %s
 
 %struct.A = type { i8*, i8* }
 
 define i64 @foo(%struct.A* nocapture readonly %this) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[END_I:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], %struct.A* [[THIS:%.*]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8** [[END_I]] to i64*
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast %struct.A* [[THIS]] to i64*
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP2]], align 8
+; CHECK-NEXT:    [[SUB_PTR_SUB_I:%.*]] = sub i64 [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[SUB_PTR_SUB_I]], 9
+; CHECK-NEXT:    br i1 [[CMP]], label [[RETURN:%.*]], label [[LOR_LHS_FALSE:%.*]]
+; CHECK:       lor.lhs.false:
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to i8*
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP1]] to i8*
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp ugt i8* [[TMP5]], [[TMP4]]
+; CHECK-NEXT:    [[DOT:%.*]] = select i1 [[CMP2]], i64 2, i64 -1
+; CHECK-NEXT:    ret i64 [[DOT]]
+; CHECK:       return:
+; CHECK-NEXT:    ret i64 2
+;
 entry:
   %end.i = getelementptr inbounds %struct.A, %struct.A* %this, i64 0, i32 1
   %0 = bitcast i8** %end.i to i64*
@@ -24,6 +44,3 @@
   ret i64 2
 }
 
-; CHECK: load i64
-; CHECK-NOT: load <2 x i64>
-; CHECK-NOT: extractelement
diff --git a/test/Transforms/SLPVectorizer/X86/PR40310.ll b/test/Transforms/SLPVectorizer/X86/PR40310.ll
new file mode 100644
index 0000000..74e62e0
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/PR40310.ll
@@ -0,0 +1,97 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu -mcpu=skylake < %s | FileCheck %s
+
+define void @mainTest(i32 %param, i32 * %vals, i32 %len) {
+; CHECK-LABEL: @mainTest(
+; CHECK-NEXT:  bci_15.preheader:
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x i32> <i32 31, i32 undef>, i32 [[PARAM:%.*]], i32 1
+; CHECK-NEXT:    br label [[BCI_15:%.*]]
+; CHECK:       bci_15:
+; CHECK-NEXT:    [[TMP1:%.*]] = phi <2 x i32> [ [[TMP11:%.*]], [[BCI_15]] ], [ [[TMP0]], [[BCI_15_PREHEADER:%.*]] ]
+; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1>
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <16 x i32> [[SHUFFLE]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <16 x i32> [[SHUFFLE]], i32 15
+; CHECK-NEXT:    store atomic i32 [[TMP3]], i32* [[VALS:%.*]] unordered, align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = add <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 -1>, [[SHUFFLE]]
+; CHECK-NEXT:    [[V14:%.*]] = and i32 [[TMP2]], undef
+; CHECK-NEXT:    [[V16:%.*]] = and i32 undef, [[V14]]
+; CHECK-NEXT:    [[V18:%.*]] = and i32 undef, [[V16]]
+; CHECK-NEXT:    [[V20:%.*]] = and i32 undef, [[V18]]
+; CHECK-NEXT:    [[V22:%.*]] = and i32 undef, [[V20]]
+; CHECK-NEXT:    [[V24:%.*]] = and i32 undef, [[V22]]
+; CHECK-NEXT:    [[V26:%.*]] = and i32 undef, [[V24]]
+; CHECK-NEXT:    [[V28:%.*]] = and i32 undef, [[V26]]
+; CHECK-NEXT:    [[V30:%.*]] = and i32 undef, [[V28]]
+; CHECK-NEXT:    [[V32:%.*]] = and i32 undef, [[V30]]
+; CHECK-NEXT:    [[V34:%.*]] = and i32 undef, [[V32]]
+; CHECK-NEXT:    [[V36:%.*]] = and i32 undef, [[V34]]
+; CHECK-NEXT:    [[V38:%.*]] = and i32 undef, [[V36]]
+; CHECK-NEXT:    [[V40:%.*]] = and i32 undef, [[V38]]
+; CHECK-NEXT:    [[V42:%.*]] = and i32 undef, [[V40]]
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i32> undef, i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <16 x i32> [[TMP4]], i32 0
+; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <2 x i32> [[TMP5]], i32 [[TMP6]], i32 1
+; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x i32> <i32 16, i32 undef>, i32 [[V42]], i32 1
+; CHECK-NEXT:    [[TMP9:%.*]] = add <2 x i32> [[TMP7]], [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = and <2 x i32> [[TMP7]], [[TMP8]]
+; CHECK-NEXT:    [[TMP11]] = shufflevector <2 x i32> [[TMP9]], <2 x i32> [[TMP10]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <16 x i32> [[TMP4]], <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[BIN_RDX:%.*]] = and <16 x i32> [[TMP4]], [[RDX_SHUF]]
+; CHECK-NEXT:    [[RDX_SHUF1:%.*]] = shufflevector <16 x i32> [[BIN_RDX]], <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[BIN_RDX2:%.*]] = and <16 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT:    [[RDX_SHUF3:%.*]] = shufflevector <16 x i32> [[BIN_RDX2]], <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[BIN_RDX4:%.*]] = and <16 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT:    [[RDX_SHUF5:%.*]] = shufflevector <16 x i32> [[BIN_RDX4]], <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[BIN_RDX6:%.*]] = and <16 x i32> [[BIN_RDX4]], [[RDX_SHUF5]]
+; CHECK-NEXT:    [[TMP12:%.*]] = extractelement <16 x i32> [[BIN_RDX6]], i32 0
+; CHECK-NEXT:    [[OP_EXTRA:%.*]] = and i32 [[TMP12]], [[TMP2]]
+; CHECK-NEXT:    [[TMP13:%.*]] = extractelement <2 x i32> [[TMP11]], i32 1
+; CHECK-NEXT:    br i1 true, label [[BCI_15]], label [[LOOPEXIT:%.*]]
+; CHECK:       loopexit:
+; CHECK-NEXT:    ret void
+;
+bci_15.preheader:
+  br label %bci_15
+
+bci_15:                                       ; preds = %bci_15.preheader, %bci_15
+  %local_0_ = phi i32 [ %v43, %bci_15 ], [ %param, %bci_15.preheader ]
+  %local_4_ = phi i32 [ %v44, %bci_15 ], [ 31, %bci_15.preheader ]
+  %v12 = add i32 %local_0_, -1
+  store atomic i32 %local_0_, i32 * %vals unordered, align 4
+  %v13 = add i32 %local_4_, 1
+  %v14 = and i32 %local_4_, %v12
+  %v15 = add i32 %local_4_, 2
+  %v16 = and i32 %v13, %v14
+  %v17 = add i32 %local_4_, 3
+  %v18 = and i32 %v15, %v16
+  %v19 = add i32 %local_4_, 4
+  %v20 = and i32 %v17, %v18
+  %v21 = add i32 %local_4_, 5
+  %v22 = and i32 %v19, %v20
+  %v23 = add i32 %local_4_, 6
+  %v24 = and i32 %v21, %v22
+  %v25 = add i32 %local_4_, 7
+  %v26 = and i32 %v23, %v24
+  %v27 = add i32 %local_4_, 8
+  %v28 = and i32 %v25, %v26
+  %v29 = add i32 %local_4_, 9
+  %v30 = and i32 %v27, %v28
+  %v31 = add i32 %local_4_, 10
+  %v32 = and i32 %v29, %v30
+  %v33 = add i32 %local_4_, 11
+  %v34 = and i32 %v31, %v32
+  %v35 = add i32 %local_4_, 12
+  %v36 = and i32 %v33, %v34
+  %v37 = add i32 %local_4_, 13
+  %v38 = and i32 %v35, %v36
+  %v39 = add i32 %local_4_, 14
+  %v40 = and i32 %v37, %v38
+  %v41 = add i32 %local_4_, 15
+  %v42 = and i32 %v39, %v40
+  %v43 = and i32 %v41, %v42
+  %v44 = add i32 %local_4_, 16
+  br i1 true, label %bci_15, label %loopexit
+
+loopexit:
+  ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/align.ll b/test/Transforms/SLPVectorizer/X86/align.ll
index b74b709..5c7c4ce 100644
--- a/test/Transforms/SLPVectorizer/X86/align.ll
+++ b/test/Transforms/SLPVectorizer/X86/align.ll
@@ -1,16 +1,31 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
 ; Simple 3-pair chain with loads and stores
-; CHECK-LABEL: @test1
 define void @test1(double* %a, double* %b, double* %c) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[AGG_TMP_I_I_SROA_0:%.*]] = alloca [3 x double], align 16
+; CHECK-NEXT:    [[STORE1:%.*]] = getelementptr inbounds [3 x double], [3 x double]* [[AGG_TMP_I_I_SROA_0]], i64 0, i64 1
+; CHECK-NEXT:    [[STORE2:%.*]] = getelementptr inbounds [3 x double], [3 x double]* [[AGG_TMP_I_I_SROA_0]], i64 0, i64 2
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[A]] to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[B:%.*]], i64 1
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[B]] to <2 x double>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[STORE1]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
+; CHECK-NEXT:    ret void
+;
 entry:
   %agg.tmp.i.i.sroa.0 = alloca [3 x double], align 16
-; CHECK: %[[V0:[0-9]+]] = load <2 x double>, <2 x double>* %[[V2:[0-9]+]], align 8
-  %i0 = load double, double* %a 
-  %i1 = load double, double* %b 
+  %i0 = load double, double* %a
+  %i1 = load double, double* %b
   %mul = fmul double %i0, %i1
   %store1 = getelementptr inbounds [3 x double], [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 1
   %store2 = getelementptr inbounds [3 x double], [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 2
@@ -19,23 +34,29 @@
   %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double, double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
-; CHECK: store <2 x double> %[[V1:[0-9]+]], <2 x double>* %[[V2:[0-9]+]], align 8
   store double %mul, double* %store1
   store double %mul5, double* %store2, align 16
-; CHECK: ret
   ret void
 }
 
 ; Float has 4 byte abi alignment on x86_64. We must use the alignmnet of the
 ; value being loaded/stored not the alignment of the pointer type.
 
-; CHECK-LABEL: @test2
-; CHECK-NOT: align 8
-; CHECK: load <4 x float>{{.*}}, align 4
-; CHECK: store <4 x float>{{.*}}, align 4
-; CHECK: ret
-
 define void @test2(float * %a, float * %b) {
+; CHECK-LABEL: @test2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[A1:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 1
+; CHECK-NEXT:    [[A2:%.*]] = getelementptr inbounds float, float* [[A]], i64 2
+; CHECK-NEXT:    [[A3:%.*]] = getelementptr inbounds float, float* [[A]], i64 3
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float* [[A]] to <4 x float>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT:    [[B1:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 1
+; CHECK-NEXT:    [[B2:%.*]] = getelementptr inbounds float, float* [[B]], i64 2
+; CHECK-NEXT:    [[B3:%.*]] = getelementptr inbounds float, float* [[B]], i64 3
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[B]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP1]], <4 x float>* [[TMP2]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %l0 = load float, float* %a
   %a1 = getelementptr inbounds float, float* %a, i64 1
diff --git a/test/Transforms/SLPVectorizer/X86/arith-add-ssat.ll b/test/Transforms/SLPVectorizer/X86/arith-add-ssat.ll
new file mode 100644
index 0000000..86cd8da
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/arith-add-ssat.ll
@@ -0,0 +1,703 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SLM
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=-prefer-256-bit -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512BW
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=+prefer-256-bit -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256BW
+
+@a64 = common global [8 x i64] zeroinitializer, align 64
+@b64 = common global [8 x i64] zeroinitializer, align 64
+@c64 = common global [8 x i64] zeroinitializer, align 64
+@a32 = common global [16 x i32] zeroinitializer, align 64
+@b32 = common global [16 x i32] zeroinitializer, align 64
+@c32 = common global [16 x i32] zeroinitializer, align 64
+@a16 = common global [32 x i16] zeroinitializer, align 64
+@b16 = common global [32 x i16] zeroinitializer, align 64
+@c16 = common global [32 x i16] zeroinitializer, align 64
+@a8  = common global [64 x i8] zeroinitializer, align 64
+@b8  = common global [64 x i8] zeroinitializer, align 64
+@c8  = common global [64 x i8] zeroinitializer, align 64
+
+declare i64 @llvm.sadd.sat.i64(i64, i64)
+declare i32 @llvm.sadd.sat.i32(i32, i32)
+declare i16 @llvm.sadd.sat.i16(i16, i16)
+declare i8  @llvm.sadd.sat.i8 (i8 , i8 )
+
+define void @add_v8i64() {
+; CHECK-LABEL: @add_v8i64(
+; CHECK-NEXT:    [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+; CHECK-NEXT:    [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+; CHECK-NEXT:    [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+; CHECK-NEXT:    [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+; CHECK-NEXT:    [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+; CHECK-NEXT:    [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+; CHECK-NEXT:    [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+; CHECK-NEXT:    [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+; CHECK-NEXT:    [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+; CHECK-NEXT:    [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+; CHECK-NEXT:    [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+; CHECK-NEXT:    [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+; CHECK-NEXT:    [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+; CHECK-NEXT:    [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+; CHECK-NEXT:    [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+; CHECK-NEXT:    [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+; CHECK-NEXT:    [[R0:%.*]] = call i64 @llvm.sadd.sat.i64(i64 [[A0]], i64 [[B0]])
+; CHECK-NEXT:    [[R1:%.*]] = call i64 @llvm.sadd.sat.i64(i64 [[A1]], i64 [[B1]])
+; CHECK-NEXT:    [[R2:%.*]] = call i64 @llvm.sadd.sat.i64(i64 [[A2]], i64 [[B2]])
+; CHECK-NEXT:    [[R3:%.*]] = call i64 @llvm.sadd.sat.i64(i64 [[A3]], i64 [[B3]])
+; CHECK-NEXT:    [[R4:%.*]] = call i64 @llvm.sadd.sat.i64(i64 [[A4]], i64 [[B4]])
+; CHECK-NEXT:    [[R5:%.*]] = call i64 @llvm.sadd.sat.i64(i64 [[A5]], i64 [[B5]])
+; CHECK-NEXT:    [[R6:%.*]] = call i64 @llvm.sadd.sat.i64(i64 [[A6]], i64 [[B6]])
+; CHECK-NEXT:    [[R7:%.*]] = call i64 @llvm.sadd.sat.i64(i64 [[A7]], i64 [[B7]])
+; CHECK-NEXT:    store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+; CHECK-NEXT:    store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+; CHECK-NEXT:    store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+; CHECK-NEXT:    store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+; CHECK-NEXT:    store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+; CHECK-NEXT:    store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+; CHECK-NEXT:    store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+; CHECK-NEXT:    store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+; CHECK-NEXT:    ret void
+;
+  %a0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+  %a1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+  %a2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+  %a3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+  %a4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+  %a5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+  %a6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+  %a7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+  %b0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+  %b1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+  %b2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+  %b3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+  %b4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+  %b5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+  %b6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+  %b7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+  %r0 = call i64 @llvm.sadd.sat.i64(i64 %a0, i64 %b0)
+  %r1 = call i64 @llvm.sadd.sat.i64(i64 %a1, i64 %b1)
+  %r2 = call i64 @llvm.sadd.sat.i64(i64 %a2, i64 %b2)
+  %r3 = call i64 @llvm.sadd.sat.i64(i64 %a3, i64 %b3)
+  %r4 = call i64 @llvm.sadd.sat.i64(i64 %a4, i64 %b4)
+  %r5 = call i64 @llvm.sadd.sat.i64(i64 %a5, i64 %b5)
+  %r6 = call i64 @llvm.sadd.sat.i64(i64 %a6, i64 %b6)
+  %r7 = call i64 @llvm.sadd.sat.i64(i64 %a7, i64 %b7)
+  store i64 %r0, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+  store i64 %r1, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+  store i64 %r2, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+  store i64 %r3, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+  store i64 %r4, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+  store i64 %r5, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+  store i64 %r6, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+  store i64 %r7, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+  ret void
+}
+
+define void @add_v16i32() {
+; CHECK-LABEL: @add_v16i32(
+; CHECK-NEXT:    [[A0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0), align 4
+; CHECK-NEXT:    [[A1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1), align 4
+; CHECK-NEXT:    [[A2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2), align 4
+; CHECK-NEXT:    [[A3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3), align 4
+; CHECK-NEXT:    [[A4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4), align 4
+; CHECK-NEXT:    [[A5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5), align 4
+; CHECK-NEXT:    [[A6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6), align 4
+; CHECK-NEXT:    [[A7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7), align 4
+; CHECK-NEXT:    [[A8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8), align 4
+; CHECK-NEXT:    [[A9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9), align 4
+; CHECK-NEXT:    [[A10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+; CHECK-NEXT:    [[A11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+; CHECK-NEXT:    [[A12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+; CHECK-NEXT:    [[A13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+; CHECK-NEXT:    [[A14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+; CHECK-NEXT:    [[A15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+; CHECK-NEXT:    [[B0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0), align 4
+; CHECK-NEXT:    [[B1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1), align 4
+; CHECK-NEXT:    [[B2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2), align 4
+; CHECK-NEXT:    [[B3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3), align 4
+; CHECK-NEXT:    [[B4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4), align 4
+; CHECK-NEXT:    [[B5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5), align 4
+; CHECK-NEXT:    [[B6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6), align 4
+; CHECK-NEXT:    [[B7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7), align 4
+; CHECK-NEXT:    [[B8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8), align 4
+; CHECK-NEXT:    [[B9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9), align 4
+; CHECK-NEXT:    [[B10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+; CHECK-NEXT:    [[B11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+; CHECK-NEXT:    [[B12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+; CHECK-NEXT:    [[B13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+; CHECK-NEXT:    [[B14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+; CHECK-NEXT:    [[B15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+; CHECK-NEXT:    [[R0:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A0]], i32 [[B0]])
+; CHECK-NEXT:    [[R1:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A1]], i32 [[B1]])
+; CHECK-NEXT:    [[R2:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A2]], i32 [[B2]])
+; CHECK-NEXT:    [[R3:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A3]], i32 [[B3]])
+; CHECK-NEXT:    [[R4:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A4]], i32 [[B4]])
+; CHECK-NEXT:    [[R5:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A5]], i32 [[B5]])
+; CHECK-NEXT:    [[R6:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A6]], i32 [[B6]])
+; CHECK-NEXT:    [[R7:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A7]], i32 [[B7]])
+; CHECK-NEXT:    [[R8:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A8]], i32 [[B8]])
+; CHECK-NEXT:    [[R9:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A9]], i32 [[B9]])
+; CHECK-NEXT:    [[R10:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A10]], i32 [[B10]])
+; CHECK-NEXT:    [[R11:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A11]], i32 [[B11]])
+; CHECK-NEXT:    [[R12:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A12]], i32 [[B12]])
+; CHECK-NEXT:    [[R13:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A13]], i32 [[B13]])
+; CHECK-NEXT:    [[R14:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A14]], i32 [[B14]])
+; CHECK-NEXT:    [[R15:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[A15]], i32 [[B15]])
+; CHECK-NEXT:    store i32 [[R0]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0), align 4
+; CHECK-NEXT:    store i32 [[R1]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1), align 4
+; CHECK-NEXT:    store i32 [[R2]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2), align 4
+; CHECK-NEXT:    store i32 [[R3]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3), align 4
+; CHECK-NEXT:    store i32 [[R4]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4), align 4
+; CHECK-NEXT:    store i32 [[R5]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5), align 4
+; CHECK-NEXT:    store i32 [[R6]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6), align 4
+; CHECK-NEXT:    store i32 [[R7]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7), align 4
+; CHECK-NEXT:    store i32 [[R8]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8), align 4
+; CHECK-NEXT:    store i32 [[R9]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9), align 4
+; CHECK-NEXT:    store i32 [[R10]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+; CHECK-NEXT:    store i32 [[R11]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+; CHECK-NEXT:    store i32 [[R12]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+; CHECK-NEXT:    store i32 [[R13]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+; CHECK-NEXT:    store i32 [[R14]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+; CHECK-NEXT:    store i32 [[R15]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+; CHECK-NEXT:    ret void
+;
+  %a0  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0 ), align 4
+  %a1  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1 ), align 4
+  %a2  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2 ), align 4
+  %a3  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3 ), align 4
+  %a4  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4 ), align 4
+  %a5  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5 ), align 4
+  %a6  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6 ), align 4
+  %a7  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7 ), align 4
+  %a8  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8 ), align 4
+  %a9  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9 ), align 4
+  %a10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+  %a11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+  %a12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+  %a13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+  %a14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+  %a15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+  %b0  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0 ), align 4
+  %b1  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1 ), align 4
+  %b2  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2 ), align 4
+  %b3  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3 ), align 4
+  %b4  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4 ), align 4
+  %b5  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5 ), align 4
+  %b6  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6 ), align 4
+  %b7  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7 ), align 4
+  %b8  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8 ), align 4
+  %b9  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9 ), align 4
+  %b10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+  %b11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+  %b12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+  %b13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+  %b14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+  %b15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+  %r0  = call i32 @llvm.sadd.sat.i32(i32 %a0 , i32 %b0 )
+  %r1  = call i32 @llvm.sadd.sat.i32(i32 %a1 , i32 %b1 )
+  %r2  = call i32 @llvm.sadd.sat.i32(i32 %a2 , i32 %b2 )
+  %r3  = call i32 @llvm.sadd.sat.i32(i32 %a3 , i32 %b3 )
+  %r4  = call i32 @llvm.sadd.sat.i32(i32 %a4 , i32 %b4 )
+  %r5  = call i32 @llvm.sadd.sat.i32(i32 %a5 , i32 %b5 )
+  %r6  = call i32 @llvm.sadd.sat.i32(i32 %a6 , i32 %b6 )
+  %r7  = call i32 @llvm.sadd.sat.i32(i32 %a7 , i32 %b7 )
+  %r8  = call i32 @llvm.sadd.sat.i32(i32 %a8 , i32 %b8 )
+  %r9  = call i32 @llvm.sadd.sat.i32(i32 %a9 , i32 %b9 )
+  %r10 = call i32 @llvm.sadd.sat.i32(i32 %a10, i32 %b10)
+  %r11 = call i32 @llvm.sadd.sat.i32(i32 %a11, i32 %b11)
+  %r12 = call i32 @llvm.sadd.sat.i32(i32 %a12, i32 %b12)
+  %r13 = call i32 @llvm.sadd.sat.i32(i32 %a13, i32 %b13)
+  %r14 = call i32 @llvm.sadd.sat.i32(i32 %a14, i32 %b14)
+  %r15 = call i32 @llvm.sadd.sat.i32(i32 %a15, i32 %b15)
+  store i32 %r0 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0 ), align 4
+  store i32 %r1 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1 ), align 4
+  store i32 %r2 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2 ), align 4
+  store i32 %r3 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3 ), align 4
+  store i32 %r4 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4 ), align 4
+  store i32 %r5 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5 ), align 4
+  store i32 %r6 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6 ), align 4
+  store i32 %r7 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7 ), align 4
+  store i32 %r8 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8 ), align 4
+  store i32 %r9 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9 ), align 4
+  store i32 %r10, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+  store i32 %r11, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+  store i32 %r12, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+  store i32 %r13, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+  store i32 %r14, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+  store i32 %r15, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+  ret void
+}
+
+define void @add_v32i16() {
+; SSE-LABEL: @add_v32i16(
+; SSE-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @a16 to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @b16 to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP9:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[TMP1]], <8 x i16> [[TMP5]])
+; SSE-NEXT:    [[TMP10:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[TMP2]], <8 x i16> [[TMP6]])
+; SSE-NEXT:    [[TMP11:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[TMP3]], <8 x i16> [[TMP7]])
+; SSE-NEXT:    [[TMP12:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[TMP4]], <8 x i16> [[TMP8]])
+; SSE-NEXT:    store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([32 x i16]* @c16 to <8 x i16>*), align 2
+; SSE-NEXT:    store <8 x i16> [[TMP10]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT:    store <8 x i16> [[TMP11]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT:    store <8 x i16> [[TMP12]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT:    ret void
+;
+; SLM-LABEL: @add_v32i16(
+; SLM-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @a16 to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @b16 to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP9:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[TMP1]], <8 x i16> [[TMP5]])
+; SLM-NEXT:    [[TMP10:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[TMP2]], <8 x i16> [[TMP6]])
+; SLM-NEXT:    [[TMP11:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[TMP3]], <8 x i16> [[TMP7]])
+; SLM-NEXT:    [[TMP12:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[TMP4]], <8 x i16> [[TMP8]])
+; SLM-NEXT:    store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([32 x i16]* @c16 to <8 x i16>*), align 2
+; SLM-NEXT:    store <8 x i16> [[TMP10]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8) to <8 x i16>*), align 2
+; SLM-NEXT:    store <8 x i16> [[TMP11]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <8 x i16>*), align 2
+; SLM-NEXT:    store <8 x i16> [[TMP12]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24) to <8 x i16>*), align 2
+; SLM-NEXT:    ret void
+;
+; AVX-LABEL: @add_v32i16(
+; AVX-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP5:%.*]] = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> [[TMP1]], <16 x i16> [[TMP3]])
+; AVX-NEXT:    [[TMP6:%.*]] = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> [[TMP2]], <16 x i16> [[TMP4]])
+; AVX-NEXT:    store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX-NEXT:    store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @add_v32i16(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP5:%.*]] = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> [[TMP1]], <16 x i16> [[TMP3]])
+; AVX512-NEXT:    [[TMP6:%.*]] = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> [[TMP2]], <16 x i16> [[TMP4]])
+; AVX512-NEXT:    store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX512-NEXT:    store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0 ), align 2
+  %a1  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1 ), align 2
+  %a2  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2 ), align 2
+  %a3  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3 ), align 2
+  %a4  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4 ), align 2
+  %a5  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5 ), align 2
+  %a6  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6 ), align 2
+  %a7  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7 ), align 2
+  %a8  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8 ), align 2
+  %a9  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9 ), align 2
+  %a10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
+  %a11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
+  %a12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
+  %a13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
+  %a14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
+  %a15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
+  %a16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
+  %a17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
+  %a18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
+  %a19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
+  %a20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
+  %a21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
+  %a22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
+  %a23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
+  %a24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
+  %a25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
+  %a26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
+  %a27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
+  %a28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
+  %a29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
+  %a30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
+  %a31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
+  %b0  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0 ), align 2
+  %b1  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1 ), align 2
+  %b2  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2 ), align 2
+  %b3  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3 ), align 2
+  %b4  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4 ), align 2
+  %b5  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5 ), align 2
+  %b6  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6 ), align 2
+  %b7  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7 ), align 2
+  %b8  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8 ), align 2
+  %b9  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9 ), align 2
+  %b10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
+  %b11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
+  %b12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
+  %b13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
+  %b14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
+  %b15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
+  %b16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
+  %b17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
+  %b18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
+  %b19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
+  %b20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
+  %b21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
+  %b22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
+  %b23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
+  %b24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
+  %b25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
+  %b26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
+  %b27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
+  %b28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
+  %b29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
+  %b30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
+  %b31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
+  %r0  = call i16 @llvm.sadd.sat.i16(i16 %a0 , i16 %b0 )
+  %r1  = call i16 @llvm.sadd.sat.i16(i16 %a1 , i16 %b1 )
+  %r2  = call i16 @llvm.sadd.sat.i16(i16 %a2 , i16 %b2 )
+  %r3  = call i16 @llvm.sadd.sat.i16(i16 %a3 , i16 %b3 )
+  %r4  = call i16 @llvm.sadd.sat.i16(i16 %a4 , i16 %b4 )
+  %r5  = call i16 @llvm.sadd.sat.i16(i16 %a5 , i16 %b5 )
+  %r6  = call i16 @llvm.sadd.sat.i16(i16 %a6 , i16 %b6 )
+  %r7  = call i16 @llvm.sadd.sat.i16(i16 %a7 , i16 %b7 )
+  %r8  = call i16 @llvm.sadd.sat.i16(i16 %a8 , i16 %b8 )
+  %r9  = call i16 @llvm.sadd.sat.i16(i16 %a9 , i16 %b9 )
+  %r10 = call i16 @llvm.sadd.sat.i16(i16 %a10, i16 %b10)
+  %r11 = call i16 @llvm.sadd.sat.i16(i16 %a11, i16 %b11)
+  %r12 = call i16 @llvm.sadd.sat.i16(i16 %a12, i16 %b12)
+  %r13 = call i16 @llvm.sadd.sat.i16(i16 %a13, i16 %b13)
+  %r14 = call i16 @llvm.sadd.sat.i16(i16 %a14, i16 %b14)
+  %r15 = call i16 @llvm.sadd.sat.i16(i16 %a15, i16 %b15)
+  %r16 = call i16 @llvm.sadd.sat.i16(i16 %a16, i16 %b16)
+  %r17 = call i16 @llvm.sadd.sat.i16(i16 %a17, i16 %b17)
+  %r18 = call i16 @llvm.sadd.sat.i16(i16 %a18, i16 %b18)
+  %r19 = call i16 @llvm.sadd.sat.i16(i16 %a19, i16 %b19)
+  %r20 = call i16 @llvm.sadd.sat.i16(i16 %a20, i16 %b20)
+  %r21 = call i16 @llvm.sadd.sat.i16(i16 %a21, i16 %b21)
+  %r22 = call i16 @llvm.sadd.sat.i16(i16 %a22, i16 %b22)
+  %r23 = call i16 @llvm.sadd.sat.i16(i16 %a23, i16 %b23)
+  %r24 = call i16 @llvm.sadd.sat.i16(i16 %a24, i16 %b24)
+  %r25 = call i16 @llvm.sadd.sat.i16(i16 %a25, i16 %b25)
+  %r26 = call i16 @llvm.sadd.sat.i16(i16 %a26, i16 %b26)
+  %r27 = call i16 @llvm.sadd.sat.i16(i16 %a27, i16 %b27)
+  %r28 = call i16 @llvm.sadd.sat.i16(i16 %a28, i16 %b28)
+  %r29 = call i16 @llvm.sadd.sat.i16(i16 %a29, i16 %b29)
+  %r30 = call i16 @llvm.sadd.sat.i16(i16 %a30, i16 %b30)
+  %r31 = call i16 @llvm.sadd.sat.i16(i16 %a31, i16 %b31)
+  store i16 %r0 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0 ), align 2
+  store i16 %r1 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1 ), align 2
+  store i16 %r2 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2 ), align 2
+  store i16 %r3 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3 ), align 2
+  store i16 %r4 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4 ), align 2
+  store i16 %r5 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5 ), align 2
+  store i16 %r6 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6 ), align 2
+  store i16 %r7 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7 ), align 2
+  store i16 %r8 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8 ), align 2
+  store i16 %r9 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9 ), align 2
+  store i16 %r10, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
+  store i16 %r11, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
+  store i16 %r12, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
+  store i16 %r13, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
+  store i16 %r14, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
+  store i16 %r15, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
+  store i16 %r16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
+  store i16 %r17, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
+  store i16 %r18, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
+  store i16 %r19, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
+  store i16 %r20, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
+  store i16 %r21, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
+  store i16 %r22, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
+  store i16 %r23, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
+  store i16 %r24, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
+  store i16 %r25, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
+  store i16 %r26, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
+  store i16 %r27, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
+  store i16 %r28, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
+  store i16 %r29, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
+  store i16 %r30, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
+  store i16 %r31, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
+  ret void
+}
+
+define void @add_v64i8() {
+; CHECK-LABEL: @add_v64i8(
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @a8 to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @b8 to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP8:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP9:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP5]])
+; CHECK-NEXT:    [[TMP10:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[TMP2]], <16 x i8> [[TMP6]])
+; CHECK-NEXT:    [[TMP11:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[TMP3]], <16 x i8> [[TMP7]])
+; CHECK-NEXT:    [[TMP12:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[TMP4]], <16 x i8> [[TMP8]])
+; CHECK-NEXT:    store <16 x i8> [[TMP9]], <16 x i8>* bitcast ([64 x i8]* @c8 to <16 x i8>*), align 1
+; CHECK-NEXT:    store <16 x i8> [[TMP10]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT:    store <16 x i8> [[TMP11]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT:    store <16 x i8> [[TMP12]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT:    ret void
+;
+  %a0  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 0 ), align 1
+  %a1  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 1 ), align 1
+  %a2  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 2 ), align 1
+  %a3  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 3 ), align 1
+  %a4  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 4 ), align 1
+  %a5  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 5 ), align 1
+  %a6  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 6 ), align 1
+  %a7  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 7 ), align 1
+  %a8  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 8 ), align 1
+  %a9  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 9 ), align 1
+  %a10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 10), align 1
+  %a11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 11), align 1
+  %a12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 12), align 1
+  %a13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 13), align 1
+  %a14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 14), align 1
+  %a15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 15), align 1
+  %a16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16), align 1
+  %a17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 17), align 1
+  %a18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 18), align 1
+  %a19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 19), align 1
+  %a20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 20), align 1
+  %a21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 21), align 1
+  %a22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 22), align 1
+  %a23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 23), align 1
+  %a24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 24), align 1
+  %a25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 25), align 1
+  %a26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 26), align 1
+  %a27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 27), align 1
+  %a28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 28), align 1
+  %a29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 29), align 1
+  %a30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 30), align 1
+  %a31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 31), align 1
+  %a32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32), align 1
+  %a33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 33), align 1
+  %a34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 34), align 1
+  %a35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 35), align 1
+  %a36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 36), align 1
+  %a37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 37), align 1
+  %a38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 38), align 1
+  %a39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 39), align 1
+  %a40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 40), align 1
+  %a41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 41), align 1
+  %a42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 42), align 1
+  %a43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 43), align 1
+  %a44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 44), align 1
+  %a45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 45), align 1
+  %a46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 46), align 1
+  %a47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 47), align 1
+  %a48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48), align 1
+  %a49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 49), align 1
+  %a50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 50), align 1
+  %a51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 51), align 1
+  %a52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 52), align 1
+  %a53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 53), align 1
+  %a54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 54), align 1
+  %a55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 55), align 1
+  %a56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 56), align 1
+  %a57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 57), align 1
+  %a58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 58), align 1
+  %a59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 59), align 1
+  %a60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 60), align 1
+  %a61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 61), align 1
+  %a62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 62), align 1
+  %a63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 63), align 1
+  %b0  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 0 ), align 1
+  %b1  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 1 ), align 1
+  %b2  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 2 ), align 1
+  %b3  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 3 ), align 1
+  %b4  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 4 ), align 1
+  %b5  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 5 ), align 1
+  %b6  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 6 ), align 1
+  %b7  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 7 ), align 1
+  %b8  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 8 ), align 1
+  %b9  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 9 ), align 1
+  %b10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 10), align 1
+  %b11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 11), align 1
+  %b12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 12), align 1
+  %b13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 13), align 1
+  %b14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 14), align 1
+  %b15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 15), align 1
+  %b16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16), align 1
+  %b17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 17), align 1
+  %b18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 18), align 1
+  %b19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 19), align 1
+  %b20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 20), align 1
+  %b21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 21), align 1
+  %b22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 22), align 1
+  %b23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 23), align 1
+  %b24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 24), align 1
+  %b25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 25), align 1
+  %b26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 26), align 1
+  %b27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 27), align 1
+  %b28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 28), align 1
+  %b29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 29), align 1
+  %b30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 30), align 1
+  %b31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 31), align 1
+  %b32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32), align 1
+  %b33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 33), align 1
+  %b34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 34), align 1
+  %b35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 35), align 1
+  %b36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 36), align 1
+  %b37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 37), align 1
+  %b38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 38), align 1
+  %b39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 39), align 1
+  %b40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 40), align 1
+  %b41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 41), align 1
+  %b42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 42), align 1
+  %b43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 43), align 1
+  %b44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 44), align 1
+  %b45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 45), align 1
+  %b46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 46), align 1
+  %b47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 47), align 1
+  %b48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48), align 1
+  %b49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 49), align 1
+  %b50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 50), align 1
+  %b51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 51), align 1
+  %b52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 52), align 1
+  %b53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 53), align 1
+  %b54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 54), align 1
+  %b55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 55), align 1
+  %b56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 56), align 1
+  %b57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 57), align 1
+  %b58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 58), align 1
+  %b59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 59), align 1
+  %b60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 60), align 1
+  %b61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 61), align 1
+  %b62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 62), align 1
+  %b63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 63), align 1
+  %r0  = call i8 @llvm.sadd.sat.i8(i8 %a0 , i8 %b0 )
+  %r1  = call i8 @llvm.sadd.sat.i8(i8 %a1 , i8 %b1 )
+  %r2  = call i8 @llvm.sadd.sat.i8(i8 %a2 , i8 %b2 )
+  %r3  = call i8 @llvm.sadd.sat.i8(i8 %a3 , i8 %b3 )
+  %r4  = call i8 @llvm.sadd.sat.i8(i8 %a4 , i8 %b4 )
+  %r5  = call i8 @llvm.sadd.sat.i8(i8 %a5 , i8 %b5 )
+  %r6  = call i8 @llvm.sadd.sat.i8(i8 %a6 , i8 %b6 )
+  %r7  = call i8 @llvm.sadd.sat.i8(i8 %a7 , i8 %b7 )
+  %r8  = call i8 @llvm.sadd.sat.i8(i8 %a8 , i8 %b8 )
+  %r9  = call i8 @llvm.sadd.sat.i8(i8 %a9 , i8 %b9 )
+  %r10 = call i8 @llvm.sadd.sat.i8(i8 %a10, i8 %b10)
+  %r11 = call i8 @llvm.sadd.sat.i8(i8 %a11, i8 %b11)
+  %r12 = call i8 @llvm.sadd.sat.i8(i8 %a12, i8 %b12)
+  %r13 = call i8 @llvm.sadd.sat.i8(i8 %a13, i8 %b13)
+  %r14 = call i8 @llvm.sadd.sat.i8(i8 %a14, i8 %b14)
+  %r15 = call i8 @llvm.sadd.sat.i8(i8 %a15, i8 %b15)
+  %r16 = call i8 @llvm.sadd.sat.i8(i8 %a16, i8 %b16)
+  %r17 = call i8 @llvm.sadd.sat.i8(i8 %a17, i8 %b17)
+  %r18 = call i8 @llvm.sadd.sat.i8(i8 %a18, i8 %b18)
+  %r19 = call i8 @llvm.sadd.sat.i8(i8 %a19, i8 %b19)
+  %r20 = call i8 @llvm.sadd.sat.i8(i8 %a20, i8 %b20)
+  %r21 = call i8 @llvm.sadd.sat.i8(i8 %a21, i8 %b21)
+  %r22 = call i8 @llvm.sadd.sat.i8(i8 %a22, i8 %b22)
+  %r23 = call i8 @llvm.sadd.sat.i8(i8 %a23, i8 %b23)
+  %r24 = call i8 @llvm.sadd.sat.i8(i8 %a24, i8 %b24)
+  %r25 = call i8 @llvm.sadd.sat.i8(i8 %a25, i8 %b25)
+  %r26 = call i8 @llvm.sadd.sat.i8(i8 %a26, i8 %b26)
+  %r27 = call i8 @llvm.sadd.sat.i8(i8 %a27, i8 %b27)
+  %r28 = call i8 @llvm.sadd.sat.i8(i8 %a28, i8 %b28)
+  %r29 = call i8 @llvm.sadd.sat.i8(i8 %a29, i8 %b29)
+  %r30 = call i8 @llvm.sadd.sat.i8(i8 %a30, i8 %b30)
+  %r31 = call i8 @llvm.sadd.sat.i8(i8 %a31, i8 %b31)
+  %r32 = call i8 @llvm.sadd.sat.i8(i8 %a32, i8 %b32)
+  %r33 = call i8 @llvm.sadd.sat.i8(i8 %a33, i8 %b33)
+  %r34 = call i8 @llvm.sadd.sat.i8(i8 %a34, i8 %b34)
+  %r35 = call i8 @llvm.sadd.sat.i8(i8 %a35, i8 %b35)
+  %r36 = call i8 @llvm.sadd.sat.i8(i8 %a36, i8 %b36)
+  %r37 = call i8 @llvm.sadd.sat.i8(i8 %a37, i8 %b37)
+  %r38 = call i8 @llvm.sadd.sat.i8(i8 %a38, i8 %b38)
+  %r39 = call i8 @llvm.sadd.sat.i8(i8 %a39, i8 %b39)
+  %r40 = call i8 @llvm.sadd.sat.i8(i8 %a40, i8 %b40)
+  %r41 = call i8 @llvm.sadd.sat.i8(i8 %a41, i8 %b41)
+  %r42 = call i8 @llvm.sadd.sat.i8(i8 %a42, i8 %b42)
+  %r43 = call i8 @llvm.sadd.sat.i8(i8 %a43, i8 %b43)
+  %r44 = call i8 @llvm.sadd.sat.i8(i8 %a44, i8 %b44)
+  %r45 = call i8 @llvm.sadd.sat.i8(i8 %a45, i8 %b45)
+  %r46 = call i8 @llvm.sadd.sat.i8(i8 %a46, i8 %b46)
+  %r47 = call i8 @llvm.sadd.sat.i8(i8 %a47, i8 %b47)
+  %r48 = call i8 @llvm.sadd.sat.i8(i8 %a48, i8 %b48)
+  %r49 = call i8 @llvm.sadd.sat.i8(i8 %a49, i8 %b49)
+  %r50 = call i8 @llvm.sadd.sat.i8(i8 %a50, i8 %b50)
+  %r51 = call i8 @llvm.sadd.sat.i8(i8 %a51, i8 %b51)
+  %r52 = call i8 @llvm.sadd.sat.i8(i8 %a52, i8 %b52)
+  %r53 = call i8 @llvm.sadd.sat.i8(i8 %a53, i8 %b53)
+  %r54 = call i8 @llvm.sadd.sat.i8(i8 %a54, i8 %b54)
+  %r55 = call i8 @llvm.sadd.sat.i8(i8 %a55, i8 %b55)
+  %r56 = call i8 @llvm.sadd.sat.i8(i8 %a56, i8 %b56)
+  %r57 = call i8 @llvm.sadd.sat.i8(i8 %a57, i8 %b57)
+  %r58 = call i8 @llvm.sadd.sat.i8(i8 %a58, i8 %b58)
+  %r59 = call i8 @llvm.sadd.sat.i8(i8 %a59, i8 %b59)
+  %r60 = call i8 @llvm.sadd.sat.i8(i8 %a60, i8 %b60)
+  %r61 = call i8 @llvm.sadd.sat.i8(i8 %a61, i8 %b61)
+  %r62 = call i8 @llvm.sadd.sat.i8(i8 %a62, i8 %b62)
+  %r63 = call i8 @llvm.sadd.sat.i8(i8 %a63, i8 %b63)
+  store i8 %r0 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 0 ), align 1
+  store i8 %r1 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 1 ), align 1
+  store i8 %r2 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 2 ), align 1
+  store i8 %r3 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 3 ), align 1
+  store i8 %r4 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 4 ), align 1
+  store i8 %r5 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 5 ), align 1
+  store i8 %r6 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 6 ), align 1
+  store i8 %r7 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 7 ), align 1
+  store i8 %r8 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 8 ), align 1
+  store i8 %r9 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 9 ), align 1
+  store i8 %r10, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 10), align 1
+  store i8 %r11, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 11), align 1
+  store i8 %r12, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 12), align 1
+  store i8 %r13, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 13), align 1
+  store i8 %r14, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 14), align 1
+  store i8 %r15, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 15), align 1
+  store i8 %r16, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16), align 1
+  store i8 %r17, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 17), align 1
+  store i8 %r18, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 18), align 1
+  store i8 %r19, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 19), align 1
+  store i8 %r20, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 20), align 1
+  store i8 %r21, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 21), align 1
+  store i8 %r22, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 22), align 1
+  store i8 %r23, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 23), align 1
+  store i8 %r24, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 24), align 1
+  store i8 %r25, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 25), align 1
+  store i8 %r26, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 26), align 1
+  store i8 %r27, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 27), align 1
+  store i8 %r28, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 28), align 1
+  store i8 %r29, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 29), align 1
+  store i8 %r30, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 30), align 1
+  store i8 %r31, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 31), align 1
+  store i8 %r32, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32), align 1
+  store i8 %r33, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 33), align 1
+  store i8 %r34, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 34), align 1
+  store i8 %r35, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 35), align 1
+  store i8 %r36, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 36), align 1
+  store i8 %r37, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 37), align 1
+  store i8 %r38, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 38), align 1
+  store i8 %r39, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 39), align 1
+  store i8 %r40, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 40), align 1
+  store i8 %r41, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 41), align 1
+  store i8 %r42, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 42), align 1
+  store i8 %r43, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 43), align 1
+  store i8 %r44, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 44), align 1
+  store i8 %r45, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 45), align 1
+  store i8 %r46, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 46), align 1
+  store i8 %r47, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 47), align 1
+  store i8 %r48, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48), align 1
+  store i8 %r49, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 49), align 1
+  store i8 %r50, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 50), align 1
+  store i8 %r51, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 51), align 1
+  store i8 %r52, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 52), align 1
+  store i8 %r53, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 53), align 1
+  store i8 %r54, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 54), align 1
+  store i8 %r55, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 55), align 1
+  store i8 %r56, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 56), align 1
+  store i8 %r57, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 57), align 1
+  store i8 %r58, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 58), align 1
+  store i8 %r59, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 59), align 1
+  store i8 %r60, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 60), align 1
+  store i8 %r61, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 61), align 1
+  store i8 %r62, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 62), align 1
+  store i8 %r63, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 63), align 1
+  ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/arith-add-usat.ll b/test/Transforms/SLPVectorizer/X86/arith-add-usat.ll
new file mode 100644
index 0000000..2747bba
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/arith-add-usat.ll
@@ -0,0 +1,703 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SLM
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=-prefer-256-bit -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512BW
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=+prefer-256-bit -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256BW
+
+@a64 = common global [8 x i64] zeroinitializer, align 64
+@b64 = common global [8 x i64] zeroinitializer, align 64
+@c64 = common global [8 x i64] zeroinitializer, align 64
+@a32 = common global [16 x i32] zeroinitializer, align 64
+@b32 = common global [16 x i32] zeroinitializer, align 64
+@c32 = common global [16 x i32] zeroinitializer, align 64
+@a16 = common global [32 x i16] zeroinitializer, align 64
+@b16 = common global [32 x i16] zeroinitializer, align 64
+@c16 = common global [32 x i16] zeroinitializer, align 64
+@a8  = common global [64 x i8] zeroinitializer, align 64
+@b8  = common global [64 x i8] zeroinitializer, align 64
+@c8  = common global [64 x i8] zeroinitializer, align 64
+
+declare i64 @llvm.uadd.sat.i64(i64, i64)
+declare i32 @llvm.uadd.sat.i32(i32, i32)
+declare i16 @llvm.uadd.sat.i16(i16, i16)
+declare i8  @llvm.uadd.sat.i8 (i8 , i8 )
+
+define void @add_v8i64() {
+; CHECK-LABEL: @add_v8i64(
+; CHECK-NEXT:    [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+; CHECK-NEXT:    [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+; CHECK-NEXT:    [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+; CHECK-NEXT:    [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+; CHECK-NEXT:    [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+; CHECK-NEXT:    [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+; CHECK-NEXT:    [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+; CHECK-NEXT:    [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+; CHECK-NEXT:    [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+; CHECK-NEXT:    [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+; CHECK-NEXT:    [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+; CHECK-NEXT:    [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+; CHECK-NEXT:    [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+; CHECK-NEXT:    [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+; CHECK-NEXT:    [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+; CHECK-NEXT:    [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+; CHECK-NEXT:    [[R0:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A0]], i64 [[B0]])
+; CHECK-NEXT:    [[R1:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A1]], i64 [[B1]])
+; CHECK-NEXT:    [[R2:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A2]], i64 [[B2]])
+; CHECK-NEXT:    [[R3:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A3]], i64 [[B3]])
+; CHECK-NEXT:    [[R4:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A4]], i64 [[B4]])
+; CHECK-NEXT:    [[R5:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A5]], i64 [[B5]])
+; CHECK-NEXT:    [[R6:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A6]], i64 [[B6]])
+; CHECK-NEXT:    [[R7:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A7]], i64 [[B7]])
+; CHECK-NEXT:    store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+; CHECK-NEXT:    store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+; CHECK-NEXT:    store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+; CHECK-NEXT:    store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+; CHECK-NEXT:    store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+; CHECK-NEXT:    store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+; CHECK-NEXT:    store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+; CHECK-NEXT:    store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+; CHECK-NEXT:    ret void
+;
+  %a0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+  %a1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+  %a2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+  %a3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+  %a4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+  %a5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+  %a6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+  %a7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+  %b0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+  %b1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+  %b2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+  %b3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+  %b4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+  %b5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+  %b6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+  %b7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+  %r0 = call i64 @llvm.uadd.sat.i64(i64 %a0, i64 %b0)
+  %r1 = call i64 @llvm.uadd.sat.i64(i64 %a1, i64 %b1)
+  %r2 = call i64 @llvm.uadd.sat.i64(i64 %a2, i64 %b2)
+  %r3 = call i64 @llvm.uadd.sat.i64(i64 %a3, i64 %b3)
+  %r4 = call i64 @llvm.uadd.sat.i64(i64 %a4, i64 %b4)
+  %r5 = call i64 @llvm.uadd.sat.i64(i64 %a5, i64 %b5)
+  %r6 = call i64 @llvm.uadd.sat.i64(i64 %a6, i64 %b6)
+  %r7 = call i64 @llvm.uadd.sat.i64(i64 %a7, i64 %b7)
+  store i64 %r0, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+  store i64 %r1, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+  store i64 %r2, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+  store i64 %r3, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+  store i64 %r4, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+  store i64 %r5, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+  store i64 %r6, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+  store i64 %r7, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+  ret void
+}
+
+define void @add_v16i32() {
+; CHECK-LABEL: @add_v16i32(
+; CHECK-NEXT:    [[A0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0), align 4
+; CHECK-NEXT:    [[A1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1), align 4
+; CHECK-NEXT:    [[A2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2), align 4
+; CHECK-NEXT:    [[A3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3), align 4
+; CHECK-NEXT:    [[A4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4), align 4
+; CHECK-NEXT:    [[A5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5), align 4
+; CHECK-NEXT:    [[A6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6), align 4
+; CHECK-NEXT:    [[A7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7), align 4
+; CHECK-NEXT:    [[A8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8), align 4
+; CHECK-NEXT:    [[A9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9), align 4
+; CHECK-NEXT:    [[A10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+; CHECK-NEXT:    [[A11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+; CHECK-NEXT:    [[A12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+; CHECK-NEXT:    [[A13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+; CHECK-NEXT:    [[A14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+; CHECK-NEXT:    [[A15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+; CHECK-NEXT:    [[B0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0), align 4
+; CHECK-NEXT:    [[B1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1), align 4
+; CHECK-NEXT:    [[B2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2), align 4
+; CHECK-NEXT:    [[B3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3), align 4
+; CHECK-NEXT:    [[B4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4), align 4
+; CHECK-NEXT:    [[B5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5), align 4
+; CHECK-NEXT:    [[B6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6), align 4
+; CHECK-NEXT:    [[B7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7), align 4
+; CHECK-NEXT:    [[B8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8), align 4
+; CHECK-NEXT:    [[B9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9), align 4
+; CHECK-NEXT:    [[B10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+; CHECK-NEXT:    [[B11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+; CHECK-NEXT:    [[B12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+; CHECK-NEXT:    [[B13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+; CHECK-NEXT:    [[B14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+; CHECK-NEXT:    [[B15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+; CHECK-NEXT:    [[R0:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A0]], i32 [[B0]])
+; CHECK-NEXT:    [[R1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A1]], i32 [[B1]])
+; CHECK-NEXT:    [[R2:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A2]], i32 [[B2]])
+; CHECK-NEXT:    [[R3:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A3]], i32 [[B3]])
+; CHECK-NEXT:    [[R4:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A4]], i32 [[B4]])
+; CHECK-NEXT:    [[R5:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A5]], i32 [[B5]])
+; CHECK-NEXT:    [[R6:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A6]], i32 [[B6]])
+; CHECK-NEXT:    [[R7:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A7]], i32 [[B7]])
+; CHECK-NEXT:    [[R8:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A8]], i32 [[B8]])
+; CHECK-NEXT:    [[R9:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A9]], i32 [[B9]])
+; CHECK-NEXT:    [[R10:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A10]], i32 [[B10]])
+; CHECK-NEXT:    [[R11:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A11]], i32 [[B11]])
+; CHECK-NEXT:    [[R12:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A12]], i32 [[B12]])
+; CHECK-NEXT:    [[R13:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A13]], i32 [[B13]])
+; CHECK-NEXT:    [[R14:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A14]], i32 [[B14]])
+; CHECK-NEXT:    [[R15:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A15]], i32 [[B15]])
+; CHECK-NEXT:    store i32 [[R0]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0), align 4
+; CHECK-NEXT:    store i32 [[R1]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1), align 4
+; CHECK-NEXT:    store i32 [[R2]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2), align 4
+; CHECK-NEXT:    store i32 [[R3]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3), align 4
+; CHECK-NEXT:    store i32 [[R4]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4), align 4
+; CHECK-NEXT:    store i32 [[R5]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5), align 4
+; CHECK-NEXT:    store i32 [[R6]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6), align 4
+; CHECK-NEXT:    store i32 [[R7]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7), align 4
+; CHECK-NEXT:    store i32 [[R8]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8), align 4
+; CHECK-NEXT:    store i32 [[R9]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9), align 4
+; CHECK-NEXT:    store i32 [[R10]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+; CHECK-NEXT:    store i32 [[R11]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+; CHECK-NEXT:    store i32 [[R12]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+; CHECK-NEXT:    store i32 [[R13]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+; CHECK-NEXT:    store i32 [[R14]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+; CHECK-NEXT:    store i32 [[R15]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+; CHECK-NEXT:    ret void
+;
+  %a0  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0 ), align 4
+  %a1  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1 ), align 4
+  %a2  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2 ), align 4
+  %a3  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3 ), align 4
+  %a4  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4 ), align 4
+  %a5  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5 ), align 4
+  %a6  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6 ), align 4
+  %a7  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7 ), align 4
+  %a8  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8 ), align 4
+  %a9  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9 ), align 4
+  %a10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+  %a11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+  %a12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+  %a13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+  %a14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+  %a15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+  %b0  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0 ), align 4
+  %b1  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1 ), align 4
+  %b2  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2 ), align 4
+  %b3  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3 ), align 4
+  %b4  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4 ), align 4
+  %b5  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5 ), align 4
+  %b6  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6 ), align 4
+  %b7  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7 ), align 4
+  %b8  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8 ), align 4
+  %b9  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9 ), align 4
+  %b10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+  %b11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+  %b12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+  %b13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+  %b14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+  %b15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+  %r0  = call i32 @llvm.uadd.sat.i32(i32 %a0 , i32 %b0 )
+  %r1  = call i32 @llvm.uadd.sat.i32(i32 %a1 , i32 %b1 )
+  %r2  = call i32 @llvm.uadd.sat.i32(i32 %a2 , i32 %b2 )
+  %r3  = call i32 @llvm.uadd.sat.i32(i32 %a3 , i32 %b3 )
+  %r4  = call i32 @llvm.uadd.sat.i32(i32 %a4 , i32 %b4 )
+  %r5  = call i32 @llvm.uadd.sat.i32(i32 %a5 , i32 %b5 )
+  %r6  = call i32 @llvm.uadd.sat.i32(i32 %a6 , i32 %b6 )
+  %r7  = call i32 @llvm.uadd.sat.i32(i32 %a7 , i32 %b7 )
+  %r8  = call i32 @llvm.uadd.sat.i32(i32 %a8 , i32 %b8 )
+  %r9  = call i32 @llvm.uadd.sat.i32(i32 %a9 , i32 %b9 )
+  %r10 = call i32 @llvm.uadd.sat.i32(i32 %a10, i32 %b10)
+  %r11 = call i32 @llvm.uadd.sat.i32(i32 %a11, i32 %b11)
+  %r12 = call i32 @llvm.uadd.sat.i32(i32 %a12, i32 %b12)
+  %r13 = call i32 @llvm.uadd.sat.i32(i32 %a13, i32 %b13)
+  %r14 = call i32 @llvm.uadd.sat.i32(i32 %a14, i32 %b14)
+  %r15 = call i32 @llvm.uadd.sat.i32(i32 %a15, i32 %b15)
+  store i32 %r0 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0 ), align 4
+  store i32 %r1 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1 ), align 4
+  store i32 %r2 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2 ), align 4
+  store i32 %r3 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3 ), align 4
+  store i32 %r4 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4 ), align 4
+  store i32 %r5 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5 ), align 4
+  store i32 %r6 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6 ), align 4
+  store i32 %r7 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7 ), align 4
+  store i32 %r8 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8 ), align 4
+  store i32 %r9 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9 ), align 4
+  store i32 %r10, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+  store i32 %r11, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+  store i32 %r12, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+  store i32 %r13, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+  store i32 %r14, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+  store i32 %r15, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+  ret void
+}
+
+define void @add_v32i16() {
+; SSE-LABEL: @add_v32i16(
+; SSE-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @a16 to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @b16 to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP9:%.*]] = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> [[TMP1]], <8 x i16> [[TMP5]])
+; SSE-NEXT:    [[TMP10:%.*]] = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> [[TMP2]], <8 x i16> [[TMP6]])
+; SSE-NEXT:    [[TMP11:%.*]] = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> [[TMP3]], <8 x i16> [[TMP7]])
+; SSE-NEXT:    [[TMP12:%.*]] = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> [[TMP4]], <8 x i16> [[TMP8]])
+; SSE-NEXT:    store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([32 x i16]* @c16 to <8 x i16>*), align 2
+; SSE-NEXT:    store <8 x i16> [[TMP10]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT:    store <8 x i16> [[TMP11]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT:    store <8 x i16> [[TMP12]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT:    ret void
+;
+; SLM-LABEL: @add_v32i16(
+; SLM-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @a16 to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @b16 to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP9:%.*]] = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> [[TMP1]], <8 x i16> [[TMP5]])
+; SLM-NEXT:    [[TMP10:%.*]] = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> [[TMP2]], <8 x i16> [[TMP6]])
+; SLM-NEXT:    [[TMP11:%.*]] = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> [[TMP3]], <8 x i16> [[TMP7]])
+; SLM-NEXT:    [[TMP12:%.*]] = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> [[TMP4]], <8 x i16> [[TMP8]])
+; SLM-NEXT:    store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([32 x i16]* @c16 to <8 x i16>*), align 2
+; SLM-NEXT:    store <8 x i16> [[TMP10]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8) to <8 x i16>*), align 2
+; SLM-NEXT:    store <8 x i16> [[TMP11]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <8 x i16>*), align 2
+; SLM-NEXT:    store <8 x i16> [[TMP12]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24) to <8 x i16>*), align 2
+; SLM-NEXT:    ret void
+;
+; AVX-LABEL: @add_v32i16(
+; AVX-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP5:%.*]] = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> [[TMP1]], <16 x i16> [[TMP3]])
+; AVX-NEXT:    [[TMP6:%.*]] = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> [[TMP2]], <16 x i16> [[TMP4]])
+; AVX-NEXT:    store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX-NEXT:    store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @add_v32i16(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP5:%.*]] = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> [[TMP1]], <16 x i16> [[TMP3]])
+; AVX512-NEXT:    [[TMP6:%.*]] = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> [[TMP2]], <16 x i16> [[TMP4]])
+; AVX512-NEXT:    store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX512-NEXT:    store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0 ), align 2
+  %a1  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1 ), align 2
+  %a2  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2 ), align 2
+  %a3  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3 ), align 2
+  %a4  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4 ), align 2
+  %a5  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5 ), align 2
+  %a6  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6 ), align 2
+  %a7  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7 ), align 2
+  %a8  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8 ), align 2
+  %a9  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9 ), align 2
+  %a10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
+  %a11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
+  %a12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
+  %a13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
+  %a14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
+  %a15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
+  %a16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
+  %a17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
+  %a18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
+  %a19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
+  %a20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
+  %a21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
+  %a22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
+  %a23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
+  %a24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
+  %a25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
+  %a26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
+  %a27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
+  %a28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
+  %a29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
+  %a30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
+  %a31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
+  %b0  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0 ), align 2
+  %b1  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1 ), align 2
+  %b2  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2 ), align 2
+  %b3  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3 ), align 2
+  %b4  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4 ), align 2
+  %b5  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5 ), align 2
+  %b6  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6 ), align 2
+  %b7  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7 ), align 2
+  %b8  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8 ), align 2
+  %b9  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9 ), align 2
+  %b10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
+  %b11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
+  %b12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
+  %b13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
+  %b14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
+  %b15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
+  %b16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
+  %b17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
+  %b18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
+  %b19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
+  %b20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
+  %b21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
+  %b22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
+  %b23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
+  %b24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
+  %b25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
+  %b26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
+  %b27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
+  %b28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
+  %b29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
+  %b30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
+  %b31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
+  %r0  = call i16 @llvm.uadd.sat.i16(i16 %a0 , i16 %b0 )
+  %r1  = call i16 @llvm.uadd.sat.i16(i16 %a1 , i16 %b1 )
+  %r2  = call i16 @llvm.uadd.sat.i16(i16 %a2 , i16 %b2 )
+  %r3  = call i16 @llvm.uadd.sat.i16(i16 %a3 , i16 %b3 )
+  %r4  = call i16 @llvm.uadd.sat.i16(i16 %a4 , i16 %b4 )
+  %r5  = call i16 @llvm.uadd.sat.i16(i16 %a5 , i16 %b5 )
+  %r6  = call i16 @llvm.uadd.sat.i16(i16 %a6 , i16 %b6 )
+  %r7  = call i16 @llvm.uadd.sat.i16(i16 %a7 , i16 %b7 )
+  %r8  = call i16 @llvm.uadd.sat.i16(i16 %a8 , i16 %b8 )
+  %r9  = call i16 @llvm.uadd.sat.i16(i16 %a9 , i16 %b9 )
+  %r10 = call i16 @llvm.uadd.sat.i16(i16 %a10, i16 %b10)
+  %r11 = call i16 @llvm.uadd.sat.i16(i16 %a11, i16 %b11)
+  %r12 = call i16 @llvm.uadd.sat.i16(i16 %a12, i16 %b12)
+  %r13 = call i16 @llvm.uadd.sat.i16(i16 %a13, i16 %b13)
+  %r14 = call i16 @llvm.uadd.sat.i16(i16 %a14, i16 %b14)
+  %r15 = call i16 @llvm.uadd.sat.i16(i16 %a15, i16 %b15)
+  %r16 = call i16 @llvm.uadd.sat.i16(i16 %a16, i16 %b16)
+  %r17 = call i16 @llvm.uadd.sat.i16(i16 %a17, i16 %b17)
+  %r18 = call i16 @llvm.uadd.sat.i16(i16 %a18, i16 %b18)
+  %r19 = call i16 @llvm.uadd.sat.i16(i16 %a19, i16 %b19)
+  %r20 = call i16 @llvm.uadd.sat.i16(i16 %a20, i16 %b20)
+  %r21 = call i16 @llvm.uadd.sat.i16(i16 %a21, i16 %b21)
+  %r22 = call i16 @llvm.uadd.sat.i16(i16 %a22, i16 %b22)
+  %r23 = call i16 @llvm.uadd.sat.i16(i16 %a23, i16 %b23)
+  %r24 = call i16 @llvm.uadd.sat.i16(i16 %a24, i16 %b24)
+  %r25 = call i16 @llvm.uadd.sat.i16(i16 %a25, i16 %b25)
+  %r26 = call i16 @llvm.uadd.sat.i16(i16 %a26, i16 %b26)
+  %r27 = call i16 @llvm.uadd.sat.i16(i16 %a27, i16 %b27)
+  %r28 = call i16 @llvm.uadd.sat.i16(i16 %a28, i16 %b28)
+  %r29 = call i16 @llvm.uadd.sat.i16(i16 %a29, i16 %b29)
+  %r30 = call i16 @llvm.uadd.sat.i16(i16 %a30, i16 %b30)
+  %r31 = call i16 @llvm.uadd.sat.i16(i16 %a31, i16 %b31)
+  store i16 %r0 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0 ), align 2
+  store i16 %r1 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1 ), align 2
+  store i16 %r2 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2 ), align 2
+  store i16 %r3 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3 ), align 2
+  store i16 %r4 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4 ), align 2
+  store i16 %r5 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5 ), align 2
+  store i16 %r6 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6 ), align 2
+  store i16 %r7 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7 ), align 2
+  store i16 %r8 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8 ), align 2
+  store i16 %r9 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9 ), align 2
+  store i16 %r10, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
+  store i16 %r11, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
+  store i16 %r12, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
+  store i16 %r13, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
+  store i16 %r14, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
+  store i16 %r15, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
+  store i16 %r16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
+  store i16 %r17, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
+  store i16 %r18, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
+  store i16 %r19, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
+  store i16 %r20, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
+  store i16 %r21, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
+  store i16 %r22, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
+  store i16 %r23, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
+  store i16 %r24, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
+  store i16 %r25, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
+  store i16 %r26, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
+  store i16 %r27, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
+  store i16 %r28, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
+  store i16 %r29, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
+  store i16 %r30, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
+  store i16 %r31, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
+  ret void
+}
+
+define void @add_v64i8() {
+; CHECK-LABEL: @add_v64i8(
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @a8 to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @b8 to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP8:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP9:%.*]] = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP5]])
+; CHECK-NEXT:    [[TMP10:%.*]] = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> [[TMP2]], <16 x i8> [[TMP6]])
+; CHECK-NEXT:    [[TMP11:%.*]] = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> [[TMP3]], <16 x i8> [[TMP7]])
+; CHECK-NEXT:    [[TMP12:%.*]] = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> [[TMP4]], <16 x i8> [[TMP8]])
+; CHECK-NEXT:    store <16 x i8> [[TMP9]], <16 x i8>* bitcast ([64 x i8]* @c8 to <16 x i8>*), align 1
+; CHECK-NEXT:    store <16 x i8> [[TMP10]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT:    store <16 x i8> [[TMP11]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT:    store <16 x i8> [[TMP12]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT:    ret void
+;
+  %a0  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 0 ), align 1
+  %a1  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 1 ), align 1
+  %a2  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 2 ), align 1
+  %a3  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 3 ), align 1
+  %a4  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 4 ), align 1
+  %a5  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 5 ), align 1
+  %a6  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 6 ), align 1
+  %a7  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 7 ), align 1
+  %a8  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 8 ), align 1
+  %a9  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 9 ), align 1
+  %a10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 10), align 1
+  %a11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 11), align 1
+  %a12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 12), align 1
+  %a13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 13), align 1
+  %a14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 14), align 1
+  %a15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 15), align 1
+  %a16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16), align 1
+  %a17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 17), align 1
+  %a18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 18), align 1
+  %a19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 19), align 1
+  %a20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 20), align 1
+  %a21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 21), align 1
+  %a22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 22), align 1
+  %a23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 23), align 1
+  %a24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 24), align 1
+  %a25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 25), align 1
+  %a26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 26), align 1
+  %a27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 27), align 1
+  %a28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 28), align 1
+  %a29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 29), align 1
+  %a30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 30), align 1
+  %a31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 31), align 1
+  %a32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32), align 1
+  %a33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 33), align 1
+  %a34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 34), align 1
+  %a35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 35), align 1
+  %a36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 36), align 1
+  %a37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 37), align 1
+  %a38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 38), align 1
+  %a39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 39), align 1
+  %a40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 40), align 1
+  %a41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 41), align 1
+  %a42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 42), align 1
+  %a43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 43), align 1
+  %a44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 44), align 1
+  %a45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 45), align 1
+  %a46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 46), align 1
+  %a47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 47), align 1
+  %a48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48), align 1
+  %a49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 49), align 1
+  %a50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 50), align 1
+  %a51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 51), align 1
+  %a52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 52), align 1
+  %a53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 53), align 1
+  %a54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 54), align 1
+  %a55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 55), align 1
+  %a56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 56), align 1
+  %a57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 57), align 1
+  %a58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 58), align 1
+  %a59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 59), align 1
+  %a60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 60), align 1
+  %a61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 61), align 1
+  %a62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 62), align 1
+  %a63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 63), align 1
+  %b0  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 0 ), align 1
+  %b1  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 1 ), align 1
+  %b2  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 2 ), align 1
+  %b3  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 3 ), align 1
+  %b4  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 4 ), align 1
+  %b5  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 5 ), align 1
+  %b6  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 6 ), align 1
+  %b7  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 7 ), align 1
+  %b8  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 8 ), align 1
+  %b9  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 9 ), align 1
+  %b10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 10), align 1
+  %b11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 11), align 1
+  %b12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 12), align 1
+  %b13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 13), align 1
+  %b14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 14), align 1
+  %b15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 15), align 1
+  %b16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16), align 1
+  %b17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 17), align 1
+  %b18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 18), align 1
+  %b19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 19), align 1
+  %b20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 20), align 1
+  %b21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 21), align 1
+  %b22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 22), align 1
+  %b23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 23), align 1
+  %b24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 24), align 1
+  %b25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 25), align 1
+  %b26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 26), align 1
+  %b27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 27), align 1
+  %b28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 28), align 1
+  %b29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 29), align 1
+  %b30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 30), align 1
+  %b31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 31), align 1
+  %b32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32), align 1
+  %b33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 33), align 1
+  %b34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 34), align 1
+  %b35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 35), align 1
+  %b36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 36), align 1
+  %b37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 37), align 1
+  %b38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 38), align 1
+  %b39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 39), align 1
+  %b40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 40), align 1
+  %b41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 41), align 1
+  %b42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 42), align 1
+  %b43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 43), align 1
+  %b44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 44), align 1
+  %b45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 45), align 1
+  %b46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 46), align 1
+  %b47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 47), align 1
+  %b48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48), align 1
+  %b49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 49), align 1
+  %b50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 50), align 1
+  %b51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 51), align 1
+  %b52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 52), align 1
+  %b53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 53), align 1
+  %b54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 54), align 1
+  %b55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 55), align 1
+  %b56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 56), align 1
+  %b57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 57), align 1
+  %b58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 58), align 1
+  %b59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 59), align 1
+  %b60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 60), align 1
+  %b61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 61), align 1
+  %b62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 62), align 1
+  %b63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 63), align 1
+  %r0  = call i8 @llvm.uadd.sat.i8(i8 %a0 , i8 %b0 )
+  %r1  = call i8 @llvm.uadd.sat.i8(i8 %a1 , i8 %b1 )
+  %r2  = call i8 @llvm.uadd.sat.i8(i8 %a2 , i8 %b2 )
+  %r3  = call i8 @llvm.uadd.sat.i8(i8 %a3 , i8 %b3 )
+  %r4  = call i8 @llvm.uadd.sat.i8(i8 %a4 , i8 %b4 )
+  %r5  = call i8 @llvm.uadd.sat.i8(i8 %a5 , i8 %b5 )
+  %r6  = call i8 @llvm.uadd.sat.i8(i8 %a6 , i8 %b6 )
+  %r7  = call i8 @llvm.uadd.sat.i8(i8 %a7 , i8 %b7 )
+  %r8  = call i8 @llvm.uadd.sat.i8(i8 %a8 , i8 %b8 )
+  %r9  = call i8 @llvm.uadd.sat.i8(i8 %a9 , i8 %b9 )
+  %r10 = call i8 @llvm.uadd.sat.i8(i8 %a10, i8 %b10)
+  %r11 = call i8 @llvm.uadd.sat.i8(i8 %a11, i8 %b11)
+  %r12 = call i8 @llvm.uadd.sat.i8(i8 %a12, i8 %b12)
+  %r13 = call i8 @llvm.uadd.sat.i8(i8 %a13, i8 %b13)
+  %r14 = call i8 @llvm.uadd.sat.i8(i8 %a14, i8 %b14)
+  %r15 = call i8 @llvm.uadd.sat.i8(i8 %a15, i8 %b15)
+  %r16 = call i8 @llvm.uadd.sat.i8(i8 %a16, i8 %b16)
+  %r17 = call i8 @llvm.uadd.sat.i8(i8 %a17, i8 %b17)
+  %r18 = call i8 @llvm.uadd.sat.i8(i8 %a18, i8 %b18)
+  %r19 = call i8 @llvm.uadd.sat.i8(i8 %a19, i8 %b19)
+  %r20 = call i8 @llvm.uadd.sat.i8(i8 %a20, i8 %b20)
+  %r21 = call i8 @llvm.uadd.sat.i8(i8 %a21, i8 %b21)
+  %r22 = call i8 @llvm.uadd.sat.i8(i8 %a22, i8 %b22)
+  %r23 = call i8 @llvm.uadd.sat.i8(i8 %a23, i8 %b23)
+  %r24 = call i8 @llvm.uadd.sat.i8(i8 %a24, i8 %b24)
+  %r25 = call i8 @llvm.uadd.sat.i8(i8 %a25, i8 %b25)
+  %r26 = call i8 @llvm.uadd.sat.i8(i8 %a26, i8 %b26)
+  %r27 = call i8 @llvm.uadd.sat.i8(i8 %a27, i8 %b27)
+  %r28 = call i8 @llvm.uadd.sat.i8(i8 %a28, i8 %b28)
+  %r29 = call i8 @llvm.uadd.sat.i8(i8 %a29, i8 %b29)
+  %r30 = call i8 @llvm.uadd.sat.i8(i8 %a30, i8 %b30)
+  %r31 = call i8 @llvm.uadd.sat.i8(i8 %a31, i8 %b31)
+  %r32 = call i8 @llvm.uadd.sat.i8(i8 %a32, i8 %b32)
+  %r33 = call i8 @llvm.uadd.sat.i8(i8 %a33, i8 %b33)
+  %r34 = call i8 @llvm.uadd.sat.i8(i8 %a34, i8 %b34)
+  %r35 = call i8 @llvm.uadd.sat.i8(i8 %a35, i8 %b35)
+  %r36 = call i8 @llvm.uadd.sat.i8(i8 %a36, i8 %b36)
+  %r37 = call i8 @llvm.uadd.sat.i8(i8 %a37, i8 %b37)
+  %r38 = call i8 @llvm.uadd.sat.i8(i8 %a38, i8 %b38)
+  %r39 = call i8 @llvm.uadd.sat.i8(i8 %a39, i8 %b39)
+  %r40 = call i8 @llvm.uadd.sat.i8(i8 %a40, i8 %b40)
+  %r41 = call i8 @llvm.uadd.sat.i8(i8 %a41, i8 %b41)
+  %r42 = call i8 @llvm.uadd.sat.i8(i8 %a42, i8 %b42)
+  %r43 = call i8 @llvm.uadd.sat.i8(i8 %a43, i8 %b43)
+  %r44 = call i8 @llvm.uadd.sat.i8(i8 %a44, i8 %b44)
+  %r45 = call i8 @llvm.uadd.sat.i8(i8 %a45, i8 %b45)
+  %r46 = call i8 @llvm.uadd.sat.i8(i8 %a46, i8 %b46)
+  %r47 = call i8 @llvm.uadd.sat.i8(i8 %a47, i8 %b47)
+  %r48 = call i8 @llvm.uadd.sat.i8(i8 %a48, i8 %b48)
+  %r49 = call i8 @llvm.uadd.sat.i8(i8 %a49, i8 %b49)
+  %r50 = call i8 @llvm.uadd.sat.i8(i8 %a50, i8 %b50)
+  %r51 = call i8 @llvm.uadd.sat.i8(i8 %a51, i8 %b51)
+  %r52 = call i8 @llvm.uadd.sat.i8(i8 %a52, i8 %b52)
+  %r53 = call i8 @llvm.uadd.sat.i8(i8 %a53, i8 %b53)
+  %r54 = call i8 @llvm.uadd.sat.i8(i8 %a54, i8 %b54)
+  %r55 = call i8 @llvm.uadd.sat.i8(i8 %a55, i8 %b55)
+  %r56 = call i8 @llvm.uadd.sat.i8(i8 %a56, i8 %b56)
+  %r57 = call i8 @llvm.uadd.sat.i8(i8 %a57, i8 %b57)
+  %r58 = call i8 @llvm.uadd.sat.i8(i8 %a58, i8 %b58)
+  %r59 = call i8 @llvm.uadd.sat.i8(i8 %a59, i8 %b59)
+  %r60 = call i8 @llvm.uadd.sat.i8(i8 %a60, i8 %b60)
+  %r61 = call i8 @llvm.uadd.sat.i8(i8 %a61, i8 %b61)
+  %r62 = call i8 @llvm.uadd.sat.i8(i8 %a62, i8 %b62)
+  %r63 = call i8 @llvm.uadd.sat.i8(i8 %a63, i8 %b63)
+  store i8 %r0 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 0 ), align 1
+  store i8 %r1 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 1 ), align 1
+  store i8 %r2 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 2 ), align 1
+  store i8 %r3 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 3 ), align 1
+  store i8 %r4 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 4 ), align 1
+  store i8 %r5 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 5 ), align 1
+  store i8 %r6 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 6 ), align 1
+  store i8 %r7 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 7 ), align 1
+  store i8 %r8 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 8 ), align 1
+  store i8 %r9 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 9 ), align 1
+  store i8 %r10, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 10), align 1
+  store i8 %r11, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 11), align 1
+  store i8 %r12, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 12), align 1
+  store i8 %r13, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 13), align 1
+  store i8 %r14, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 14), align 1
+  store i8 %r15, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 15), align 1
+  store i8 %r16, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16), align 1
+  store i8 %r17, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 17), align 1
+  store i8 %r18, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 18), align 1
+  store i8 %r19, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 19), align 1
+  store i8 %r20, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 20), align 1
+  store i8 %r21, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 21), align 1
+  store i8 %r22, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 22), align 1
+  store i8 %r23, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 23), align 1
+  store i8 %r24, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 24), align 1
+  store i8 %r25, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 25), align 1
+  store i8 %r26, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 26), align 1
+  store i8 %r27, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 27), align 1
+  store i8 %r28, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 28), align 1
+  store i8 %r29, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 29), align 1
+  store i8 %r30, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 30), align 1
+  store i8 %r31, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 31), align 1
+  store i8 %r32, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32), align 1
+  store i8 %r33, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 33), align 1
+  store i8 %r34, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 34), align 1
+  store i8 %r35, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 35), align 1
+  store i8 %r36, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 36), align 1
+  store i8 %r37, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 37), align 1
+  store i8 %r38, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 38), align 1
+  store i8 %r39, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 39), align 1
+  store i8 %r40, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 40), align 1
+  store i8 %r41, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 41), align 1
+  store i8 %r42, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 42), align 1
+  store i8 %r43, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 43), align 1
+  store i8 %r44, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 44), align 1
+  store i8 %r45, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 45), align 1
+  store i8 %r46, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 46), align 1
+  store i8 %r47, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 47), align 1
+  store i8 %r48, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48), align 1
+  store i8 %r49, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 49), align 1
+  store i8 %r50, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 50), align 1
+  store i8 %r51, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 51), align 1
+  store i8 %r52, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 52), align 1
+  store i8 %r53, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 53), align 1
+  store i8 %r54, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 54), align 1
+  store i8 %r55, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 55), align 1
+  store i8 %r56, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 56), align 1
+  store i8 %r57, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 57), align 1
+  store i8 %r58, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 58), align 1
+  store i8 %r59, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 59), align 1
+  store i8 %r60, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 60), align 1
+  store i8 %r61, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 61), align 1
+  store i8 %r62, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 62), align 1
+  store i8 %r63, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 63), align 1
+  ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/arith-sub-ssat.ll b/test/Transforms/SLPVectorizer/X86/arith-sub-ssat.ll
new file mode 100644
index 0000000..4156f55
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/arith-sub-ssat.ll
@@ -0,0 +1,703 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SLM
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=-prefer-256-bit -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512BW
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=+prefer-256-bit -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256BW
+
+@a64 = common global [8 x i64] zeroinitializer, align 64
+@b64 = common global [8 x i64] zeroinitializer, align 64
+@c64 = common global [8 x i64] zeroinitializer, align 64
+@a32 = common global [16 x i32] zeroinitializer, align 64
+@b32 = common global [16 x i32] zeroinitializer, align 64
+@c32 = common global [16 x i32] zeroinitializer, align 64
+@a16 = common global [32 x i16] zeroinitializer, align 64
+@b16 = common global [32 x i16] zeroinitializer, align 64
+@c16 = common global [32 x i16] zeroinitializer, align 64
+@a8  = common global [64 x i8] zeroinitializer, align 64
+@b8  = common global [64 x i8] zeroinitializer, align 64
+@c8  = common global [64 x i8] zeroinitializer, align 64
+
+declare i64 @llvm.ssub.sat.i64(i64, i64)
+declare i32 @llvm.ssub.sat.i32(i32, i32)
+declare i16 @llvm.ssub.sat.i16(i16, i16)
+declare i8  @llvm.ssub.sat.i8 (i8 , i8 )
+
+define void @sub_v8i64() {
+; CHECK-LABEL: @sub_v8i64(
+; CHECK-NEXT:    [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+; CHECK-NEXT:    [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+; CHECK-NEXT:    [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+; CHECK-NEXT:    [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+; CHECK-NEXT:    [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+; CHECK-NEXT:    [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+; CHECK-NEXT:    [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+; CHECK-NEXT:    [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+; CHECK-NEXT:    [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+; CHECK-NEXT:    [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+; CHECK-NEXT:    [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+; CHECK-NEXT:    [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+; CHECK-NEXT:    [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+; CHECK-NEXT:    [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+; CHECK-NEXT:    [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+; CHECK-NEXT:    [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+; CHECK-NEXT:    [[R0:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A0]], i64 [[B0]])
+; CHECK-NEXT:    [[R1:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A1]], i64 [[B1]])
+; CHECK-NEXT:    [[R2:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A2]], i64 [[B2]])
+; CHECK-NEXT:    [[R3:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A3]], i64 [[B3]])
+; CHECK-NEXT:    [[R4:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A4]], i64 [[B4]])
+; CHECK-NEXT:    [[R5:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A5]], i64 [[B5]])
+; CHECK-NEXT:    [[R6:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A6]], i64 [[B6]])
+; CHECK-NEXT:    [[R7:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A7]], i64 [[B7]])
+; CHECK-NEXT:    store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+; CHECK-NEXT:    store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+; CHECK-NEXT:    store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+; CHECK-NEXT:    store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+; CHECK-NEXT:    store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+; CHECK-NEXT:    store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+; CHECK-NEXT:    store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+; CHECK-NEXT:    store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+; CHECK-NEXT:    ret void
+;
+  %a0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+  %a1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+  %a2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+  %a3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+  %a4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+  %a5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+  %a6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+  %a7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+  %b0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+  %b1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+  %b2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+  %b3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+  %b4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+  %b5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+  %b6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+  %b7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+  %r0 = call i64 @llvm.ssub.sat.i64(i64 %a0, i64 %b0)
+  %r1 = call i64 @llvm.ssub.sat.i64(i64 %a1, i64 %b1)
+  %r2 = call i64 @llvm.ssub.sat.i64(i64 %a2, i64 %b2)
+  %r3 = call i64 @llvm.ssub.sat.i64(i64 %a3, i64 %b3)
+  %r4 = call i64 @llvm.ssub.sat.i64(i64 %a4, i64 %b4)
+  %r5 = call i64 @llvm.ssub.sat.i64(i64 %a5, i64 %b5)
+  %r6 = call i64 @llvm.ssub.sat.i64(i64 %a6, i64 %b6)
+  %r7 = call i64 @llvm.ssub.sat.i64(i64 %a7, i64 %b7)
+  store i64 %r0, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+  store i64 %r1, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+  store i64 %r2, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+  store i64 %r3, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+  store i64 %r4, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+  store i64 %r5, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+  store i64 %r6, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+  store i64 %r7, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+  ret void
+}
+
+define void @sub_v16i32() {
+; CHECK-LABEL: @sub_v16i32(
+; CHECK-NEXT:    [[A0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0), align 4
+; CHECK-NEXT:    [[A1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1), align 4
+; CHECK-NEXT:    [[A2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2), align 4
+; CHECK-NEXT:    [[A3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3), align 4
+; CHECK-NEXT:    [[A4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4), align 4
+; CHECK-NEXT:    [[A5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5), align 4
+; CHECK-NEXT:    [[A6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6), align 4
+; CHECK-NEXT:    [[A7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7), align 4
+; CHECK-NEXT:    [[A8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8), align 4
+; CHECK-NEXT:    [[A9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9), align 4
+; CHECK-NEXT:    [[A10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+; CHECK-NEXT:    [[A11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+; CHECK-NEXT:    [[A12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+; CHECK-NEXT:    [[A13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+; CHECK-NEXT:    [[A14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+; CHECK-NEXT:    [[A15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+; CHECK-NEXT:    [[B0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0), align 4
+; CHECK-NEXT:    [[B1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1), align 4
+; CHECK-NEXT:    [[B2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2), align 4
+; CHECK-NEXT:    [[B3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3), align 4
+; CHECK-NEXT:    [[B4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4), align 4
+; CHECK-NEXT:    [[B5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5), align 4
+; CHECK-NEXT:    [[B6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6), align 4
+; CHECK-NEXT:    [[B7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7), align 4
+; CHECK-NEXT:    [[B8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8), align 4
+; CHECK-NEXT:    [[B9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9), align 4
+; CHECK-NEXT:    [[B10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+; CHECK-NEXT:    [[B11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+; CHECK-NEXT:    [[B12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+; CHECK-NEXT:    [[B13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+; CHECK-NEXT:    [[B14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+; CHECK-NEXT:    [[B15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+; CHECK-NEXT:    [[R0:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A0]], i32 [[B0]])
+; CHECK-NEXT:    [[R1:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A1]], i32 [[B1]])
+; CHECK-NEXT:    [[R2:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A2]], i32 [[B2]])
+; CHECK-NEXT:    [[R3:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A3]], i32 [[B3]])
+; CHECK-NEXT:    [[R4:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A4]], i32 [[B4]])
+; CHECK-NEXT:    [[R5:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A5]], i32 [[B5]])
+; CHECK-NEXT:    [[R6:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A6]], i32 [[B6]])
+; CHECK-NEXT:    [[R7:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A7]], i32 [[B7]])
+; CHECK-NEXT:    [[R8:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A8]], i32 [[B8]])
+; CHECK-NEXT:    [[R9:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A9]], i32 [[B9]])
+; CHECK-NEXT:    [[R10:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A10]], i32 [[B10]])
+; CHECK-NEXT:    [[R11:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A11]], i32 [[B11]])
+; CHECK-NEXT:    [[R12:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A12]], i32 [[B12]])
+; CHECK-NEXT:    [[R13:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A13]], i32 [[B13]])
+; CHECK-NEXT:    [[R14:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A14]], i32 [[B14]])
+; CHECK-NEXT:    [[R15:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[A15]], i32 [[B15]])
+; CHECK-NEXT:    store i32 [[R0]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0), align 4
+; CHECK-NEXT:    store i32 [[R1]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1), align 4
+; CHECK-NEXT:    store i32 [[R2]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2), align 4
+; CHECK-NEXT:    store i32 [[R3]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3), align 4
+; CHECK-NEXT:    store i32 [[R4]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4), align 4
+; CHECK-NEXT:    store i32 [[R5]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5), align 4
+; CHECK-NEXT:    store i32 [[R6]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6), align 4
+; CHECK-NEXT:    store i32 [[R7]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7), align 4
+; CHECK-NEXT:    store i32 [[R8]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8), align 4
+; CHECK-NEXT:    store i32 [[R9]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9), align 4
+; CHECK-NEXT:    store i32 [[R10]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+; CHECK-NEXT:    store i32 [[R11]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+; CHECK-NEXT:    store i32 [[R12]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+; CHECK-NEXT:    store i32 [[R13]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+; CHECK-NEXT:    store i32 [[R14]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+; CHECK-NEXT:    store i32 [[R15]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+; CHECK-NEXT:    ret void
+;
+  %a0  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0 ), align 4
+  %a1  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1 ), align 4
+  %a2  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2 ), align 4
+  %a3  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3 ), align 4
+  %a4  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4 ), align 4
+  %a5  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5 ), align 4
+  %a6  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6 ), align 4
+  %a7  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7 ), align 4
+  %a8  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8 ), align 4
+  %a9  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9 ), align 4
+  %a10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+  %a11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+  %a12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+  %a13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+  %a14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+  %a15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+  %b0  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0 ), align 4
+  %b1  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1 ), align 4
+  %b2  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2 ), align 4
+  %b3  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3 ), align 4
+  %b4  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4 ), align 4
+  %b5  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5 ), align 4
+  %b6  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6 ), align 4
+  %b7  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7 ), align 4
+  %b8  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8 ), align 4
+  %b9  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9 ), align 4
+  %b10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+  %b11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+  %b12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+  %b13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+  %b14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+  %b15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+  %r0  = call i32 @llvm.ssub.sat.i32(i32 %a0 , i32 %b0 )
+  %r1  = call i32 @llvm.ssub.sat.i32(i32 %a1 , i32 %b1 )
+  %r2  = call i32 @llvm.ssub.sat.i32(i32 %a2 , i32 %b2 )
+  %r3  = call i32 @llvm.ssub.sat.i32(i32 %a3 , i32 %b3 )
+  %r4  = call i32 @llvm.ssub.sat.i32(i32 %a4 , i32 %b4 )
+  %r5  = call i32 @llvm.ssub.sat.i32(i32 %a5 , i32 %b5 )
+  %r6  = call i32 @llvm.ssub.sat.i32(i32 %a6 , i32 %b6 )
+  %r7  = call i32 @llvm.ssub.sat.i32(i32 %a7 , i32 %b7 )
+  %r8  = call i32 @llvm.ssub.sat.i32(i32 %a8 , i32 %b8 )
+  %r9  = call i32 @llvm.ssub.sat.i32(i32 %a9 , i32 %b9 )
+  %r10 = call i32 @llvm.ssub.sat.i32(i32 %a10, i32 %b10)
+  %r11 = call i32 @llvm.ssub.sat.i32(i32 %a11, i32 %b11)
+  %r12 = call i32 @llvm.ssub.sat.i32(i32 %a12, i32 %b12)
+  %r13 = call i32 @llvm.ssub.sat.i32(i32 %a13, i32 %b13)
+  %r14 = call i32 @llvm.ssub.sat.i32(i32 %a14, i32 %b14)
+  %r15 = call i32 @llvm.ssub.sat.i32(i32 %a15, i32 %b15)
+  store i32 %r0 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0 ), align 4
+  store i32 %r1 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1 ), align 4
+  store i32 %r2 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2 ), align 4
+  store i32 %r3 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3 ), align 4
+  store i32 %r4 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4 ), align 4
+  store i32 %r5 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5 ), align 4
+  store i32 %r6 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6 ), align 4
+  store i32 %r7 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7 ), align 4
+  store i32 %r8 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8 ), align 4
+  store i32 %r9 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9 ), align 4
+  store i32 %r10, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+  store i32 %r11, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+  store i32 %r12, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+  store i32 %r13, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+  store i32 %r14, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+  store i32 %r15, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+  ret void
+}
+
+define void @sub_v32i16() {
+; SSE-LABEL: @sub_v32i16(
+; SSE-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @a16 to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @b16 to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP9:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP1]], <8 x i16> [[TMP5]])
+; SSE-NEXT:    [[TMP10:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP2]], <8 x i16> [[TMP6]])
+; SSE-NEXT:    [[TMP11:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP3]], <8 x i16> [[TMP7]])
+; SSE-NEXT:    [[TMP12:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP4]], <8 x i16> [[TMP8]])
+; SSE-NEXT:    store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([32 x i16]* @c16 to <8 x i16>*), align 2
+; SSE-NEXT:    store <8 x i16> [[TMP10]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT:    store <8 x i16> [[TMP11]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT:    store <8 x i16> [[TMP12]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT:    ret void
+;
+; SLM-LABEL: @sub_v32i16(
+; SLM-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @a16 to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @b16 to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP9:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP1]], <8 x i16> [[TMP5]])
+; SLM-NEXT:    [[TMP10:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP2]], <8 x i16> [[TMP6]])
+; SLM-NEXT:    [[TMP11:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP3]], <8 x i16> [[TMP7]])
+; SLM-NEXT:    [[TMP12:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> [[TMP4]], <8 x i16> [[TMP8]])
+; SLM-NEXT:    store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([32 x i16]* @c16 to <8 x i16>*), align 2
+; SLM-NEXT:    store <8 x i16> [[TMP10]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8) to <8 x i16>*), align 2
+; SLM-NEXT:    store <8 x i16> [[TMP11]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <8 x i16>*), align 2
+; SLM-NEXT:    store <8 x i16> [[TMP12]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24) to <8 x i16>*), align 2
+; SLM-NEXT:    ret void
+;
+; AVX-LABEL: @sub_v32i16(
+; AVX-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP5:%.*]] = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> [[TMP1]], <16 x i16> [[TMP3]])
+; AVX-NEXT:    [[TMP6:%.*]] = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> [[TMP2]], <16 x i16> [[TMP4]])
+; AVX-NEXT:    store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX-NEXT:    store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @sub_v32i16(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP5:%.*]] = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> [[TMP1]], <16 x i16> [[TMP3]])
+; AVX512-NEXT:    [[TMP6:%.*]] = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> [[TMP2]], <16 x i16> [[TMP4]])
+; AVX512-NEXT:    store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX512-NEXT:    store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0 ), align 2
+  %a1  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1 ), align 2
+  %a2  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2 ), align 2
+  %a3  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3 ), align 2
+  %a4  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4 ), align 2
+  %a5  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5 ), align 2
+  %a6  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6 ), align 2
+  %a7  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7 ), align 2
+  %a8  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8 ), align 2
+  %a9  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9 ), align 2
+  %a10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
+  %a11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
+  %a12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
+  %a13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
+  %a14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
+  %a15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
+  %a16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
+  %a17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
+  %a18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
+  %a19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
+  %a20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
+  %a21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
+  %a22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
+  %a23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
+  %a24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
+  %a25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
+  %a26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
+  %a27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
+  %a28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
+  %a29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
+  %a30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
+  %a31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
+  %b0  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0 ), align 2
+  %b1  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1 ), align 2
+  %b2  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2 ), align 2
+  %b3  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3 ), align 2
+  %b4  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4 ), align 2
+  %b5  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5 ), align 2
+  %b6  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6 ), align 2
+  %b7  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7 ), align 2
+  %b8  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8 ), align 2
+  %b9  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9 ), align 2
+  %b10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
+  %b11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
+  %b12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
+  %b13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
+  %b14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
+  %b15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
+  %b16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
+  %b17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
+  %b18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
+  %b19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
+  %b20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
+  %b21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
+  %b22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
+  %b23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
+  %b24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
+  %b25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
+  %b26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
+  %b27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
+  %b28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
+  %b29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
+  %b30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
+  %b31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
+  %r0  = call i16 @llvm.ssub.sat.i16(i16 %a0 , i16 %b0 )
+  %r1  = call i16 @llvm.ssub.sat.i16(i16 %a1 , i16 %b1 )
+  %r2  = call i16 @llvm.ssub.sat.i16(i16 %a2 , i16 %b2 )
+  %r3  = call i16 @llvm.ssub.sat.i16(i16 %a3 , i16 %b3 )
+  %r4  = call i16 @llvm.ssub.sat.i16(i16 %a4 , i16 %b4 )
+  %r5  = call i16 @llvm.ssub.sat.i16(i16 %a5 , i16 %b5 )
+  %r6  = call i16 @llvm.ssub.sat.i16(i16 %a6 , i16 %b6 )
+  %r7  = call i16 @llvm.ssub.sat.i16(i16 %a7 , i16 %b7 )
+  %r8  = call i16 @llvm.ssub.sat.i16(i16 %a8 , i16 %b8 )
+  %r9  = call i16 @llvm.ssub.sat.i16(i16 %a9 , i16 %b9 )
+  %r10 = call i16 @llvm.ssub.sat.i16(i16 %a10, i16 %b10)
+  %r11 = call i16 @llvm.ssub.sat.i16(i16 %a11, i16 %b11)
+  %r12 = call i16 @llvm.ssub.sat.i16(i16 %a12, i16 %b12)
+  %r13 = call i16 @llvm.ssub.sat.i16(i16 %a13, i16 %b13)
+  %r14 = call i16 @llvm.ssub.sat.i16(i16 %a14, i16 %b14)
+  %r15 = call i16 @llvm.ssub.sat.i16(i16 %a15, i16 %b15)
+  %r16 = call i16 @llvm.ssub.sat.i16(i16 %a16, i16 %b16)
+  %r17 = call i16 @llvm.ssub.sat.i16(i16 %a17, i16 %b17)
+  %r18 = call i16 @llvm.ssub.sat.i16(i16 %a18, i16 %b18)
+  %r19 = call i16 @llvm.ssub.sat.i16(i16 %a19, i16 %b19)
+  %r20 = call i16 @llvm.ssub.sat.i16(i16 %a20, i16 %b20)
+  %r21 = call i16 @llvm.ssub.sat.i16(i16 %a21, i16 %b21)
+  %r22 = call i16 @llvm.ssub.sat.i16(i16 %a22, i16 %b22)
+  %r23 = call i16 @llvm.ssub.sat.i16(i16 %a23, i16 %b23)
+  %r24 = call i16 @llvm.ssub.sat.i16(i16 %a24, i16 %b24)
+  %r25 = call i16 @llvm.ssub.sat.i16(i16 %a25, i16 %b25)
+  %r26 = call i16 @llvm.ssub.sat.i16(i16 %a26, i16 %b26)
+  %r27 = call i16 @llvm.ssub.sat.i16(i16 %a27, i16 %b27)
+  %r28 = call i16 @llvm.ssub.sat.i16(i16 %a28, i16 %b28)
+  %r29 = call i16 @llvm.ssub.sat.i16(i16 %a29, i16 %b29)
+  %r30 = call i16 @llvm.ssub.sat.i16(i16 %a30, i16 %b30)
+  %r31 = call i16 @llvm.ssub.sat.i16(i16 %a31, i16 %b31)
+  store i16 %r0 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0 ), align 2
+  store i16 %r1 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1 ), align 2
+  store i16 %r2 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2 ), align 2
+  store i16 %r3 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3 ), align 2
+  store i16 %r4 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4 ), align 2
+  store i16 %r5 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5 ), align 2
+  store i16 %r6 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6 ), align 2
+  store i16 %r7 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7 ), align 2
+  store i16 %r8 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8 ), align 2
+  store i16 %r9 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9 ), align 2
+  store i16 %r10, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
+  store i16 %r11, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
+  store i16 %r12, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
+  store i16 %r13, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
+  store i16 %r14, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
+  store i16 %r15, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
+  store i16 %r16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
+  store i16 %r17, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
+  store i16 %r18, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
+  store i16 %r19, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
+  store i16 %r20, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
+  store i16 %r21, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
+  store i16 %r22, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
+  store i16 %r23, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
+  store i16 %r24, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
+  store i16 %r25, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
+  store i16 %r26, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
+  store i16 %r27, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
+  store i16 %r28, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
+  store i16 %r29, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
+  store i16 %r30, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
+  store i16 %r31, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
+  ret void
+}
+
+define void @sub_v64i8() {
+; CHECK-LABEL: @sub_v64i8(
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @a8 to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @b8 to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP8:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP9:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP5]])
+; CHECK-NEXT:    [[TMP10:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP2]], <16 x i8> [[TMP6]])
+; CHECK-NEXT:    [[TMP11:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP3]], <16 x i8> [[TMP7]])
+; CHECK-NEXT:    [[TMP12:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> [[TMP4]], <16 x i8> [[TMP8]])
+; CHECK-NEXT:    store <16 x i8> [[TMP9]], <16 x i8>* bitcast ([64 x i8]* @c8 to <16 x i8>*), align 1
+; CHECK-NEXT:    store <16 x i8> [[TMP10]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT:    store <16 x i8> [[TMP11]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT:    store <16 x i8> [[TMP12]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT:    ret void
+;
+  %a0  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 0 ), align 1
+  %a1  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 1 ), align 1
+  %a2  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 2 ), align 1
+  %a3  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 3 ), align 1
+  %a4  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 4 ), align 1
+  %a5  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 5 ), align 1
+  %a6  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 6 ), align 1
+  %a7  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 7 ), align 1
+  %a8  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 8 ), align 1
+  %a9  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 9 ), align 1
+  %a10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 10), align 1
+  %a11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 11), align 1
+  %a12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 12), align 1
+  %a13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 13), align 1
+  %a14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 14), align 1
+  %a15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 15), align 1
+  %a16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16), align 1
+  %a17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 17), align 1
+  %a18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 18), align 1
+  %a19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 19), align 1
+  %a20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 20), align 1
+  %a21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 21), align 1
+  %a22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 22), align 1
+  %a23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 23), align 1
+  %a24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 24), align 1
+  %a25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 25), align 1
+  %a26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 26), align 1
+  %a27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 27), align 1
+  %a28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 28), align 1
+  %a29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 29), align 1
+  %a30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 30), align 1
+  %a31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 31), align 1
+  %a32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32), align 1
+  %a33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 33), align 1
+  %a34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 34), align 1
+  %a35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 35), align 1
+  %a36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 36), align 1
+  %a37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 37), align 1
+  %a38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 38), align 1
+  %a39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 39), align 1
+  %a40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 40), align 1
+  %a41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 41), align 1
+  %a42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 42), align 1
+  %a43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 43), align 1
+  %a44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 44), align 1
+  %a45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 45), align 1
+  %a46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 46), align 1
+  %a47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 47), align 1
+  %a48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48), align 1
+  %a49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 49), align 1
+  %a50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 50), align 1
+  %a51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 51), align 1
+  %a52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 52), align 1
+  %a53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 53), align 1
+  %a54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 54), align 1
+  %a55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 55), align 1
+  %a56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 56), align 1
+  %a57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 57), align 1
+  %a58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 58), align 1
+  %a59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 59), align 1
+  %a60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 60), align 1
+  %a61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 61), align 1
+  %a62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 62), align 1
+  %a63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 63), align 1
+  %b0  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 0 ), align 1
+  %b1  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 1 ), align 1
+  %b2  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 2 ), align 1
+  %b3  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 3 ), align 1
+  %b4  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 4 ), align 1
+  %b5  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 5 ), align 1
+  %b6  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 6 ), align 1
+  %b7  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 7 ), align 1
+  %b8  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 8 ), align 1
+  %b9  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 9 ), align 1
+  %b10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 10), align 1
+  %b11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 11), align 1
+  %b12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 12), align 1
+  %b13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 13), align 1
+  %b14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 14), align 1
+  %b15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 15), align 1
+  %b16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16), align 1
+  %b17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 17), align 1
+  %b18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 18), align 1
+  %b19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 19), align 1
+  %b20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 20), align 1
+  %b21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 21), align 1
+  %b22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 22), align 1
+  %b23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 23), align 1
+  %b24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 24), align 1
+  %b25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 25), align 1
+  %b26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 26), align 1
+  %b27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 27), align 1
+  %b28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 28), align 1
+  %b29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 29), align 1
+  %b30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 30), align 1
+  %b31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 31), align 1
+  %b32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32), align 1
+  %b33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 33), align 1
+  %b34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 34), align 1
+  %b35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 35), align 1
+  %b36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 36), align 1
+  %b37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 37), align 1
+  %b38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 38), align 1
+  %b39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 39), align 1
+  %b40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 40), align 1
+  %b41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 41), align 1
+  %b42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 42), align 1
+  %b43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 43), align 1
+  %b44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 44), align 1
+  %b45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 45), align 1
+  %b46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 46), align 1
+  %b47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 47), align 1
+  %b48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48), align 1
+  %b49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 49), align 1
+  %b50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 50), align 1
+  %b51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 51), align 1
+  %b52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 52), align 1
+  %b53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 53), align 1
+  %b54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 54), align 1
+  %b55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 55), align 1
+  %b56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 56), align 1
+  %b57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 57), align 1
+  %b58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 58), align 1
+  %b59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 59), align 1
+  %b60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 60), align 1
+  %b61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 61), align 1
+  %b62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 62), align 1
+  %b63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 63), align 1
+  %r0  = call i8 @llvm.ssub.sat.i8(i8 %a0 , i8 %b0 )
+  %r1  = call i8 @llvm.ssub.sat.i8(i8 %a1 , i8 %b1 )
+  %r2  = call i8 @llvm.ssub.sat.i8(i8 %a2 , i8 %b2 )
+  %r3  = call i8 @llvm.ssub.sat.i8(i8 %a3 , i8 %b3 )
+  %r4  = call i8 @llvm.ssub.sat.i8(i8 %a4 , i8 %b4 )
+  %r5  = call i8 @llvm.ssub.sat.i8(i8 %a5 , i8 %b5 )
+  %r6  = call i8 @llvm.ssub.sat.i8(i8 %a6 , i8 %b6 )
+  %r7  = call i8 @llvm.ssub.sat.i8(i8 %a7 , i8 %b7 )
+  %r8  = call i8 @llvm.ssub.sat.i8(i8 %a8 , i8 %b8 )
+  %r9  = call i8 @llvm.ssub.sat.i8(i8 %a9 , i8 %b9 )
+  %r10 = call i8 @llvm.ssub.sat.i8(i8 %a10, i8 %b10)
+  %r11 = call i8 @llvm.ssub.sat.i8(i8 %a11, i8 %b11)
+  %r12 = call i8 @llvm.ssub.sat.i8(i8 %a12, i8 %b12)
+  %r13 = call i8 @llvm.ssub.sat.i8(i8 %a13, i8 %b13)
+  %r14 = call i8 @llvm.ssub.sat.i8(i8 %a14, i8 %b14)
+  %r15 = call i8 @llvm.ssub.sat.i8(i8 %a15, i8 %b15)
+  %r16 = call i8 @llvm.ssub.sat.i8(i8 %a16, i8 %b16)
+  %r17 = call i8 @llvm.ssub.sat.i8(i8 %a17, i8 %b17)
+  %r18 = call i8 @llvm.ssub.sat.i8(i8 %a18, i8 %b18)
+  %r19 = call i8 @llvm.ssub.sat.i8(i8 %a19, i8 %b19)
+  %r20 = call i8 @llvm.ssub.sat.i8(i8 %a20, i8 %b20)
+  %r21 = call i8 @llvm.ssub.sat.i8(i8 %a21, i8 %b21)
+  %r22 = call i8 @llvm.ssub.sat.i8(i8 %a22, i8 %b22)
+  %r23 = call i8 @llvm.ssub.sat.i8(i8 %a23, i8 %b23)
+  %r24 = call i8 @llvm.ssub.sat.i8(i8 %a24, i8 %b24)
+  %r25 = call i8 @llvm.ssub.sat.i8(i8 %a25, i8 %b25)
+  %r26 = call i8 @llvm.ssub.sat.i8(i8 %a26, i8 %b26)
+  %r27 = call i8 @llvm.ssub.sat.i8(i8 %a27, i8 %b27)
+  %r28 = call i8 @llvm.ssub.sat.i8(i8 %a28, i8 %b28)
+  %r29 = call i8 @llvm.ssub.sat.i8(i8 %a29, i8 %b29)
+  %r30 = call i8 @llvm.ssub.sat.i8(i8 %a30, i8 %b30)
+  %r31 = call i8 @llvm.ssub.sat.i8(i8 %a31, i8 %b31)
+  %r32 = call i8 @llvm.ssub.sat.i8(i8 %a32, i8 %b32)
+  %r33 = call i8 @llvm.ssub.sat.i8(i8 %a33, i8 %b33)
+  %r34 = call i8 @llvm.ssub.sat.i8(i8 %a34, i8 %b34)
+  %r35 = call i8 @llvm.ssub.sat.i8(i8 %a35, i8 %b35)
+  %r36 = call i8 @llvm.ssub.sat.i8(i8 %a36, i8 %b36)
+  %r37 = call i8 @llvm.ssub.sat.i8(i8 %a37, i8 %b37)
+  %r38 = call i8 @llvm.ssub.sat.i8(i8 %a38, i8 %b38)
+  %r39 = call i8 @llvm.ssub.sat.i8(i8 %a39, i8 %b39)
+  %r40 = call i8 @llvm.ssub.sat.i8(i8 %a40, i8 %b40)
+  %r41 = call i8 @llvm.ssub.sat.i8(i8 %a41, i8 %b41)
+  %r42 = call i8 @llvm.ssub.sat.i8(i8 %a42, i8 %b42)
+  %r43 = call i8 @llvm.ssub.sat.i8(i8 %a43, i8 %b43)
+  %r44 = call i8 @llvm.ssub.sat.i8(i8 %a44, i8 %b44)
+  %r45 = call i8 @llvm.ssub.sat.i8(i8 %a45, i8 %b45)
+  %r46 = call i8 @llvm.ssub.sat.i8(i8 %a46, i8 %b46)
+  %r47 = call i8 @llvm.ssub.sat.i8(i8 %a47, i8 %b47)
+  %r48 = call i8 @llvm.ssub.sat.i8(i8 %a48, i8 %b48)
+  %r49 = call i8 @llvm.ssub.sat.i8(i8 %a49, i8 %b49)
+  %r50 = call i8 @llvm.ssub.sat.i8(i8 %a50, i8 %b50)
+  %r51 = call i8 @llvm.ssub.sat.i8(i8 %a51, i8 %b51)
+  %r52 = call i8 @llvm.ssub.sat.i8(i8 %a52, i8 %b52)
+  %r53 = call i8 @llvm.ssub.sat.i8(i8 %a53, i8 %b53)
+  %r54 = call i8 @llvm.ssub.sat.i8(i8 %a54, i8 %b54)
+  %r55 = call i8 @llvm.ssub.sat.i8(i8 %a55, i8 %b55)
+  %r56 = call i8 @llvm.ssub.sat.i8(i8 %a56, i8 %b56)
+  %r57 = call i8 @llvm.ssub.sat.i8(i8 %a57, i8 %b57)
+  %r58 = call i8 @llvm.ssub.sat.i8(i8 %a58, i8 %b58)
+  %r59 = call i8 @llvm.ssub.sat.i8(i8 %a59, i8 %b59)
+  %r60 = call i8 @llvm.ssub.sat.i8(i8 %a60, i8 %b60)
+  %r61 = call i8 @llvm.ssub.sat.i8(i8 %a61, i8 %b61)
+  %r62 = call i8 @llvm.ssub.sat.i8(i8 %a62, i8 %b62)
+  %r63 = call i8 @llvm.ssub.sat.i8(i8 %a63, i8 %b63)
+  store i8 %r0 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 0 ), align 1
+  store i8 %r1 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 1 ), align 1
+  store i8 %r2 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 2 ), align 1
+  store i8 %r3 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 3 ), align 1
+  store i8 %r4 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 4 ), align 1
+  store i8 %r5 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 5 ), align 1
+  store i8 %r6 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 6 ), align 1
+  store i8 %r7 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 7 ), align 1
+  store i8 %r8 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 8 ), align 1
+  store i8 %r9 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 9 ), align 1
+  store i8 %r10, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 10), align 1
+  store i8 %r11, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 11), align 1
+  store i8 %r12, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 12), align 1
+  store i8 %r13, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 13), align 1
+  store i8 %r14, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 14), align 1
+  store i8 %r15, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 15), align 1
+  store i8 %r16, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16), align 1
+  store i8 %r17, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 17), align 1
+  store i8 %r18, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 18), align 1
+  store i8 %r19, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 19), align 1
+  store i8 %r20, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 20), align 1
+  store i8 %r21, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 21), align 1
+  store i8 %r22, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 22), align 1
+  store i8 %r23, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 23), align 1
+  store i8 %r24, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 24), align 1
+  store i8 %r25, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 25), align 1
+  store i8 %r26, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 26), align 1
+  store i8 %r27, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 27), align 1
+  store i8 %r28, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 28), align 1
+  store i8 %r29, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 29), align 1
+  store i8 %r30, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 30), align 1
+  store i8 %r31, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 31), align 1
+  store i8 %r32, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32), align 1
+  store i8 %r33, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 33), align 1
+  store i8 %r34, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 34), align 1
+  store i8 %r35, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 35), align 1
+  store i8 %r36, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 36), align 1
+  store i8 %r37, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 37), align 1
+  store i8 %r38, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 38), align 1
+  store i8 %r39, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 39), align 1
+  store i8 %r40, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 40), align 1
+  store i8 %r41, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 41), align 1
+  store i8 %r42, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 42), align 1
+  store i8 %r43, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 43), align 1
+  store i8 %r44, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 44), align 1
+  store i8 %r45, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 45), align 1
+  store i8 %r46, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 46), align 1
+  store i8 %r47, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 47), align 1
+  store i8 %r48, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48), align 1
+  store i8 %r49, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 49), align 1
+  store i8 %r50, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 50), align 1
+  store i8 %r51, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 51), align 1
+  store i8 %r52, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 52), align 1
+  store i8 %r53, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 53), align 1
+  store i8 %r54, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 54), align 1
+  store i8 %r55, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 55), align 1
+  store i8 %r56, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 56), align 1
+  store i8 %r57, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 57), align 1
+  store i8 %r58, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 58), align 1
+  store i8 %r59, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 59), align 1
+  store i8 %r60, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 60), align 1
+  store i8 %r61, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 61), align 1
+  store i8 %r62, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 62), align 1
+  store i8 %r63, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 63), align 1
+  ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/arith-sub-usat.ll b/test/Transforms/SLPVectorizer/X86/arith-sub-usat.ll
new file mode 100644
index 0000000..e22b963
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/arith-sub-usat.ll
@@ -0,0 +1,863 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SLM
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=-prefer-256-bit -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512BW
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=+prefer-256-bit -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256BW
+
+@a64 = common global [8 x i64] zeroinitializer, align 64
+@b64 = common global [8 x i64] zeroinitializer, align 64
+@c64 = common global [8 x i64] zeroinitializer, align 64
+@a32 = common global [16 x i32] zeroinitializer, align 64
+@b32 = common global [16 x i32] zeroinitializer, align 64
+@c32 = common global [16 x i32] zeroinitializer, align 64
+@a16 = common global [32 x i16] zeroinitializer, align 64
+@b16 = common global [32 x i16] zeroinitializer, align 64
+@c16 = common global [32 x i16] zeroinitializer, align 64
+@a8  = common global [64 x i8] zeroinitializer, align 64
+@b8  = common global [64 x i8] zeroinitializer, align 64
+@c8  = common global [64 x i8] zeroinitializer, align 64
+
+declare i64 @llvm.usub.sat.i64(i64, i64)
+declare i32 @llvm.usub.sat.i32(i32, i32)
+declare i16 @llvm.usub.sat.i16(i16, i16)
+declare i8  @llvm.usub.sat.i8 (i8 , i8 )
+
+define void @sub_v8i64() {
+; SSE-LABEL: @sub_v8i64(
+; SSE-NEXT:    [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+; SSE-NEXT:    [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+; SSE-NEXT:    [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+; SSE-NEXT:    [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+; SSE-NEXT:    [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+; SSE-NEXT:    [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+; SSE-NEXT:    [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+; SSE-NEXT:    [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+; SSE-NEXT:    [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+; SSE-NEXT:    [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+; SSE-NEXT:    [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+; SSE-NEXT:    [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+; SSE-NEXT:    [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+; SSE-NEXT:    [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+; SSE-NEXT:    [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+; SSE-NEXT:    [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+; SSE-NEXT:    [[R0:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A0]], i64 [[B0]])
+; SSE-NEXT:    [[R1:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A1]], i64 [[B1]])
+; SSE-NEXT:    [[R2:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A2]], i64 [[B2]])
+; SSE-NEXT:    [[R3:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A3]], i64 [[B3]])
+; SSE-NEXT:    [[R4:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A4]], i64 [[B4]])
+; SSE-NEXT:    [[R5:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A5]], i64 [[B5]])
+; SSE-NEXT:    [[R6:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A6]], i64 [[B6]])
+; SSE-NEXT:    [[R7:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A7]], i64 [[B7]])
+; SSE-NEXT:    store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+; SSE-NEXT:    store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+; SSE-NEXT:    store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+; SSE-NEXT:    store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+; SSE-NEXT:    store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+; SSE-NEXT:    store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+; SSE-NEXT:    store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+; SSE-NEXT:    store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+; SSE-NEXT:    ret void
+;
+; SLM-LABEL: @sub_v8i64(
+; SLM-NEXT:    [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+; SLM-NEXT:    [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+; SLM-NEXT:    [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+; SLM-NEXT:    [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+; SLM-NEXT:    [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+; SLM-NEXT:    [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+; SLM-NEXT:    [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+; SLM-NEXT:    [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+; SLM-NEXT:    [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+; SLM-NEXT:    [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+; SLM-NEXT:    [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+; SLM-NEXT:    [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+; SLM-NEXT:    [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+; SLM-NEXT:    [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+; SLM-NEXT:    [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+; SLM-NEXT:    [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+; SLM-NEXT:    [[R0:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A0]], i64 [[B0]])
+; SLM-NEXT:    [[R1:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A1]], i64 [[B1]])
+; SLM-NEXT:    [[R2:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A2]], i64 [[B2]])
+; SLM-NEXT:    [[R3:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A3]], i64 [[B3]])
+; SLM-NEXT:    [[R4:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A4]], i64 [[B4]])
+; SLM-NEXT:    [[R5:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A5]], i64 [[B5]])
+; SLM-NEXT:    [[R6:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A6]], i64 [[B6]])
+; SLM-NEXT:    [[R7:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A7]], i64 [[B7]])
+; SLM-NEXT:    store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+; SLM-NEXT:    store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+; SLM-NEXT:    store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+; SLM-NEXT:    store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+; SLM-NEXT:    store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+; SLM-NEXT:    store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+; SLM-NEXT:    store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+; SLM-NEXT:    store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+; SLM-NEXT:    ret void
+;
+; AVX1-LABEL: @sub_v8i64(
+; AVX1-NEXT:    [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+; AVX1-NEXT:    [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+; AVX1-NEXT:    [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+; AVX1-NEXT:    [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+; AVX1-NEXT:    [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+; AVX1-NEXT:    [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+; AVX1-NEXT:    [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+; AVX1-NEXT:    [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+; AVX1-NEXT:    [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+; AVX1-NEXT:    [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+; AVX1-NEXT:    [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+; AVX1-NEXT:    [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+; AVX1-NEXT:    [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+; AVX1-NEXT:    [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+; AVX1-NEXT:    [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+; AVX1-NEXT:    [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+; AVX1-NEXT:    [[R0:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A0]], i64 [[B0]])
+; AVX1-NEXT:    [[R1:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A1]], i64 [[B1]])
+; AVX1-NEXT:    [[R2:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A2]], i64 [[B2]])
+; AVX1-NEXT:    [[R3:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A3]], i64 [[B3]])
+; AVX1-NEXT:    [[R4:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A4]], i64 [[B4]])
+; AVX1-NEXT:    [[R5:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A5]], i64 [[B5]])
+; AVX1-NEXT:    [[R6:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A6]], i64 [[B6]])
+; AVX1-NEXT:    [[R7:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A7]], i64 [[B7]])
+; AVX1-NEXT:    store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+; AVX1-NEXT:    store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+; AVX1-NEXT:    store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+; AVX1-NEXT:    store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+; AVX1-NEXT:    store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+; AVX1-NEXT:    store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+; AVX1-NEXT:    store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+; AVX1-NEXT:    store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+; AVX1-NEXT:    ret void
+;
+; AVX2-LABEL: @sub_v8i64(
+; AVX2-NEXT:    [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+; AVX2-NEXT:    [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+; AVX2-NEXT:    [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+; AVX2-NEXT:    [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+; AVX2-NEXT:    [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+; AVX2-NEXT:    [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+; AVX2-NEXT:    [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+; AVX2-NEXT:    [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+; AVX2-NEXT:    [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+; AVX2-NEXT:    [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+; AVX2-NEXT:    [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+; AVX2-NEXT:    [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+; AVX2-NEXT:    [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+; AVX2-NEXT:    [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+; AVX2-NEXT:    [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+; AVX2-NEXT:    [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+; AVX2-NEXT:    [[R0:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A0]], i64 [[B0]])
+; AVX2-NEXT:    [[R1:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A1]], i64 [[B1]])
+; AVX2-NEXT:    [[R2:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A2]], i64 [[B2]])
+; AVX2-NEXT:    [[R3:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A3]], i64 [[B3]])
+; AVX2-NEXT:    [[R4:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A4]], i64 [[B4]])
+; AVX2-NEXT:    [[R5:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A5]], i64 [[B5]])
+; AVX2-NEXT:    [[R6:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A6]], i64 [[B6]])
+; AVX2-NEXT:    [[R7:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A7]], i64 [[B7]])
+; AVX2-NEXT:    store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+; AVX2-NEXT:    store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+; AVX2-NEXT:    store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+; AVX2-NEXT:    store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+; AVX2-NEXT:    store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+; AVX2-NEXT:    store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+; AVX2-NEXT:    store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+; AVX2-NEXT:    store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+; AVX2-NEXT:    ret void
+;
+; AVX512-LABEL: @sub_v8i64(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @a64 to <8 x i64>*), align 8
+; AVX512-NEXT:    [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @b64 to <8 x i64>*), align 8
+; AVX512-NEXT:    [[TMP3:%.*]] = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP2]])
+; AVX512-NEXT:    store <8 x i64> [[TMP3]], <8 x i64>* bitcast ([8 x i64]* @c64 to <8 x i64>*), align 8
+; AVX512-NEXT:    ret void
+;
+; AVX256BW-LABEL: @sub_v8i64(
+; AVX256BW-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @a64 to <4 x i64>*), align 8
+; AVX256BW-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX256BW-NEXT:    [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @b64 to <4 x i64>*), align 8
+; AVX256BW-NEXT:    [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX256BW-NEXT:    [[TMP5:%.*]] = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> [[TMP1]], <4 x i64> [[TMP3]])
+; AVX256BW-NEXT:    [[TMP6:%.*]] = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> [[TMP2]], <4 x i64> [[TMP4]])
+; AVX256BW-NEXT:    store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8
+; AVX256BW-NEXT:    store <4 x i64> [[TMP6]], <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX256BW-NEXT:    ret void
+;
+  %a0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+  %a1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+  %a2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+  %a3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+  %a4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+  %a5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+  %a6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+  %a7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+  %b0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+  %b1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+  %b2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+  %b3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+  %b4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+  %b5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+  %b6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+  %b7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+  %r0 = call i64 @llvm.usub.sat.i64(i64 %a0, i64 %b0)
+  %r1 = call i64 @llvm.usub.sat.i64(i64 %a1, i64 %b1)
+  %r2 = call i64 @llvm.usub.sat.i64(i64 %a2, i64 %b2)
+  %r3 = call i64 @llvm.usub.sat.i64(i64 %a3, i64 %b3)
+  %r4 = call i64 @llvm.usub.sat.i64(i64 %a4, i64 %b4)
+  %r5 = call i64 @llvm.usub.sat.i64(i64 %a5, i64 %b5)
+  %r6 = call i64 @llvm.usub.sat.i64(i64 %a6, i64 %b6)
+  %r7 = call i64 @llvm.usub.sat.i64(i64 %a7, i64 %b7)
+  store i64 %r0, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+  store i64 %r1, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+  store i64 %r2, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+  store i64 %r3, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+  store i64 %r4, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+  store i64 %r5, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+  store i64 %r6, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+  store i64 %r7, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+  ret void
+}
+
+define void @sub_v16i32() {
+; SSE-LABEL: @sub_v16i32(
+; SSE-NEXT:    [[A0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0), align 4
+; SSE-NEXT:    [[A1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[A2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2), align 4
+; SSE-NEXT:    [[A3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3), align 4
+; SSE-NEXT:    [[A4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4), align 4
+; SSE-NEXT:    [[A5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5), align 4
+; SSE-NEXT:    [[A6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6), align 4
+; SSE-NEXT:    [[A7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7), align 4
+; SSE-NEXT:    [[A8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8), align 4
+; SSE-NEXT:    [[A9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9), align 4
+; SSE-NEXT:    [[A10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+; SSE-NEXT:    [[A11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+; SSE-NEXT:    [[A12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+; SSE-NEXT:    [[A13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+; SSE-NEXT:    [[A14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+; SSE-NEXT:    [[A15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+; SSE-NEXT:    [[B0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0), align 4
+; SSE-NEXT:    [[B1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[B2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2), align 4
+; SSE-NEXT:    [[B3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3), align 4
+; SSE-NEXT:    [[B4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4), align 4
+; SSE-NEXT:    [[B5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5), align 4
+; SSE-NEXT:    [[B6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6), align 4
+; SSE-NEXT:    [[B7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7), align 4
+; SSE-NEXT:    [[B8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8), align 4
+; SSE-NEXT:    [[B9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9), align 4
+; SSE-NEXT:    [[B10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+; SSE-NEXT:    [[B11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+; SSE-NEXT:    [[B12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+; SSE-NEXT:    [[B13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+; SSE-NEXT:    [[B14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+; SSE-NEXT:    [[B15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+; SSE-NEXT:    [[R0:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A0]], i32 [[B0]])
+; SSE-NEXT:    [[R1:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A1]], i32 [[B1]])
+; SSE-NEXT:    [[R2:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A2]], i32 [[B2]])
+; SSE-NEXT:    [[R3:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A3]], i32 [[B3]])
+; SSE-NEXT:    [[R4:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A4]], i32 [[B4]])
+; SSE-NEXT:    [[R5:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A5]], i32 [[B5]])
+; SSE-NEXT:    [[R6:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A6]], i32 [[B6]])
+; SSE-NEXT:    [[R7:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A7]], i32 [[B7]])
+; SSE-NEXT:    [[R8:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A8]], i32 [[B8]])
+; SSE-NEXT:    [[R9:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A9]], i32 [[B9]])
+; SSE-NEXT:    [[R10:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A10]], i32 [[B10]])
+; SSE-NEXT:    [[R11:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A11]], i32 [[B11]])
+; SSE-NEXT:    [[R12:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A12]], i32 [[B12]])
+; SSE-NEXT:    [[R13:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A13]], i32 [[B13]])
+; SSE-NEXT:    [[R14:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A14]], i32 [[B14]])
+; SSE-NEXT:    [[R15:%.*]] = call i32 @llvm.usub.sat.i32(i32 [[A15]], i32 [[B15]])
+; SSE-NEXT:    store i32 [[R0]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0), align 4
+; SSE-NEXT:    store i32 [[R1]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1), align 4
+; SSE-NEXT:    store i32 [[R2]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2), align 4
+; SSE-NEXT:    store i32 [[R3]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3), align 4
+; SSE-NEXT:    store i32 [[R4]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4), align 4
+; SSE-NEXT:    store i32 [[R5]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5), align 4
+; SSE-NEXT:    store i32 [[R6]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6), align 4
+; SSE-NEXT:    store i32 [[R7]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7), align 4
+; SSE-NEXT:    store i32 [[R8]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8), align 4
+; SSE-NEXT:    store i32 [[R9]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9), align 4
+; SSE-NEXT:    store i32 [[R10]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+; SSE-NEXT:    store i32 [[R11]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+; SSE-NEXT:    store i32 [[R12]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+; SSE-NEXT:    store i32 [[R13]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+; SSE-NEXT:    store i32 [[R14]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+; SSE-NEXT:    store i32 [[R15]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+; SSE-NEXT:    ret void
+;
+; SLM-LABEL: @sub_v16i32(
+; SLM-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @a32 to <4 x i32>*), align 4
+; SLM-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4) to <4 x i32>*), align 4
+; SLM-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <4 x i32>*), align 4
+; SLM-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12) to <4 x i32>*), align 4
+; SLM-NEXT:    [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @b32 to <4 x i32>*), align 4
+; SLM-NEXT:    [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4) to <4 x i32>*), align 4
+; SLM-NEXT:    [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <4 x i32>*), align 4
+; SLM-NEXT:    [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12) to <4 x i32>*), align 4
+; SLM-NEXT:    [[TMP9:%.*]] = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> [[TMP1]], <4 x i32> [[TMP5]])
+; SLM-NEXT:    [[TMP10:%.*]] = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> [[TMP2]], <4 x i32> [[TMP6]])
+; SLM-NEXT:    [[TMP11:%.*]] = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> [[TMP3]], <4 x i32> [[TMP7]])
+; SLM-NEXT:    [[TMP12:%.*]] = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> [[TMP4]], <4 x i32> [[TMP8]])
+; SLM-NEXT:    store <4 x i32> [[TMP9]], <4 x i32>* bitcast ([16 x i32]* @c32 to <4 x i32>*), align 4
+; SLM-NEXT:    store <4 x i32> [[TMP10]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4) to <4 x i32>*), align 4
+; SLM-NEXT:    store <4 x i32> [[TMP11]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <4 x i32>*), align 4
+; SLM-NEXT:    store <4 x i32> [[TMP12]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12) to <4 x i32>*), align 4
+; SLM-NEXT:    ret void
+;
+; AVX-LABEL: @sub_v16i32(
+; AVX-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @a32 to <8 x i32>*), align 4
+; AVX-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT:    [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @b32 to <8 x i32>*), align 4
+; AVX-NEXT:    [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT:    [[TMP5:%.*]] = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> [[TMP1]], <8 x i32> [[TMP3]])
+; AVX-NEXT:    [[TMP6:%.*]] = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> [[TMP2]], <8 x i32> [[TMP4]])
+; AVX-NEXT:    store <8 x i32> [[TMP5]], <8 x i32>* bitcast ([16 x i32]* @c32 to <8 x i32>*), align 4
+; AVX-NEXT:    store <8 x i32> [[TMP6]], <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @sub_v16i32(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @a32 to <16 x i32>*), align 4
+; AVX512-NEXT:    [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @b32 to <16 x i32>*), align 4
+; AVX512-NEXT:    [[TMP3:%.*]] = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP2]])
+; AVX512-NEXT:    store <16 x i32> [[TMP3]], <16 x i32>* bitcast ([16 x i32]* @c32 to <16 x i32>*), align 4
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0 ), align 4
+  %a1  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1 ), align 4
+  %a2  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2 ), align 4
+  %a3  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3 ), align 4
+  %a4  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4 ), align 4
+  %a5  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5 ), align 4
+  %a6  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6 ), align 4
+  %a7  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7 ), align 4
+  %a8  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8 ), align 4
+  %a9  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9 ), align 4
+  %a10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+  %a11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+  %a12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+  %a13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+  %a14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+  %a15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+  %b0  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0 ), align 4
+  %b1  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1 ), align 4
+  %b2  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2 ), align 4
+  %b3  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3 ), align 4
+  %b4  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4 ), align 4
+  %b5  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5 ), align 4
+  %b6  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6 ), align 4
+  %b7  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7 ), align 4
+  %b8  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8 ), align 4
+  %b9  = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9 ), align 4
+  %b10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+  %b11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+  %b12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+  %b13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+  %b14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+  %b15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+  %r0  = call i32 @llvm.usub.sat.i32(i32 %a0 , i32 %b0 )
+  %r1  = call i32 @llvm.usub.sat.i32(i32 %a1 , i32 %b1 )
+  %r2  = call i32 @llvm.usub.sat.i32(i32 %a2 , i32 %b2 )
+  %r3  = call i32 @llvm.usub.sat.i32(i32 %a3 , i32 %b3 )
+  %r4  = call i32 @llvm.usub.sat.i32(i32 %a4 , i32 %b4 )
+  %r5  = call i32 @llvm.usub.sat.i32(i32 %a5 , i32 %b5 )
+  %r6  = call i32 @llvm.usub.sat.i32(i32 %a6 , i32 %b6 )
+  %r7  = call i32 @llvm.usub.sat.i32(i32 %a7 , i32 %b7 )
+  %r8  = call i32 @llvm.usub.sat.i32(i32 %a8 , i32 %b8 )
+  %r9  = call i32 @llvm.usub.sat.i32(i32 %a9 , i32 %b9 )
+  %r10 = call i32 @llvm.usub.sat.i32(i32 %a10, i32 %b10)
+  %r11 = call i32 @llvm.usub.sat.i32(i32 %a11, i32 %b11)
+  %r12 = call i32 @llvm.usub.sat.i32(i32 %a12, i32 %b12)
+  %r13 = call i32 @llvm.usub.sat.i32(i32 %a13, i32 %b13)
+  %r14 = call i32 @llvm.usub.sat.i32(i32 %a14, i32 %b14)
+  %r15 = call i32 @llvm.usub.sat.i32(i32 %a15, i32 %b15)
+  store i32 %r0 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0 ), align 4
+  store i32 %r1 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1 ), align 4
+  store i32 %r2 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2 ), align 4
+  store i32 %r3 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3 ), align 4
+  store i32 %r4 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4 ), align 4
+  store i32 %r5 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5 ), align 4
+  store i32 %r6 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6 ), align 4
+  store i32 %r7 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7 ), align 4
+  store i32 %r8 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8 ), align 4
+  store i32 %r9 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9 ), align 4
+  store i32 %r10, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+  store i32 %r11, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+  store i32 %r12, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+  store i32 %r13, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+  store i32 %r14, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+  store i32 %r15, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+  ret void
+}
+
+define void @sub_v32i16() {
+; SSE-LABEL: @sub_v32i16(
+; SSE-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @a16 to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @b16 to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT:    [[TMP9:%.*]] = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> [[TMP1]], <8 x i16> [[TMP5]])
+; SSE-NEXT:    [[TMP10:%.*]] = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> [[TMP2]], <8 x i16> [[TMP6]])
+; SSE-NEXT:    [[TMP11:%.*]] = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> [[TMP3]], <8 x i16> [[TMP7]])
+; SSE-NEXT:    [[TMP12:%.*]] = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> [[TMP4]], <8 x i16> [[TMP8]])
+; SSE-NEXT:    store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([32 x i16]* @c16 to <8 x i16>*), align 2
+; SSE-NEXT:    store <8 x i16> [[TMP10]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT:    store <8 x i16> [[TMP11]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT:    store <8 x i16> [[TMP12]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT:    ret void
+;
+; SLM-LABEL: @sub_v32i16(
+; SLM-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @a16 to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @b16 to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24) to <8 x i16>*), align 2
+; SLM-NEXT:    [[TMP9:%.*]] = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> [[TMP1]], <8 x i16> [[TMP5]])
+; SLM-NEXT:    [[TMP10:%.*]] = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> [[TMP2]], <8 x i16> [[TMP6]])
+; SLM-NEXT:    [[TMP11:%.*]] = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> [[TMP3]], <8 x i16> [[TMP7]])
+; SLM-NEXT:    [[TMP12:%.*]] = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> [[TMP4]], <8 x i16> [[TMP8]])
+; SLM-NEXT:    store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([32 x i16]* @c16 to <8 x i16>*), align 2
+; SLM-NEXT:    store <8 x i16> [[TMP10]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8) to <8 x i16>*), align 2
+; SLM-NEXT:    store <8 x i16> [[TMP11]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <8 x i16>*), align 2
+; SLM-NEXT:    store <8 x i16> [[TMP12]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24) to <8 x i16>*), align 2
+; SLM-NEXT:    ret void
+;
+; AVX-LABEL: @sub_v32i16(
+; AVX-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT:    [[TMP5:%.*]] = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> [[TMP1]], <16 x i16> [[TMP3]])
+; AVX-NEXT:    [[TMP6:%.*]] = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> [[TMP2]], <16 x i16> [[TMP4]])
+; AVX-NEXT:    store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX-NEXT:    store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @sub_v32i16(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT:    [[TMP5:%.*]] = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> [[TMP1]], <16 x i16> [[TMP3]])
+; AVX512-NEXT:    [[TMP6:%.*]] = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> [[TMP2]], <16 x i16> [[TMP4]])
+; AVX512-NEXT:    store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX512-NEXT:    store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0 ), align 2
+  %a1  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1 ), align 2
+  %a2  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2 ), align 2
+  %a3  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3 ), align 2
+  %a4  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4 ), align 2
+  %a5  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5 ), align 2
+  %a6  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6 ), align 2
+  %a7  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7 ), align 2
+  %a8  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8 ), align 2
+  %a9  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9 ), align 2
+  %a10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
+  %a11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
+  %a12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
+  %a13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
+  %a14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
+  %a15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
+  %a16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
+  %a17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
+  %a18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
+  %a19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
+  %a20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
+  %a21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
+  %a22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
+  %a23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
+  %a24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
+  %a25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
+  %a26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
+  %a27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
+  %a28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
+  %a29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
+  %a30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
+  %a31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
+  %b0  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0 ), align 2
+  %b1  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1 ), align 2
+  %b2  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2 ), align 2
+  %b3  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3 ), align 2
+  %b4  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4 ), align 2
+  %b5  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5 ), align 2
+  %b6  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6 ), align 2
+  %b7  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7 ), align 2
+  %b8  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8 ), align 2
+  %b9  = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9 ), align 2
+  %b10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
+  %b11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
+  %b12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
+  %b13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
+  %b14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
+  %b15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
+  %b16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
+  %b17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
+  %b18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
+  %b19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
+  %b20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
+  %b21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
+  %b22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
+  %b23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
+  %b24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
+  %b25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
+  %b26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
+  %b27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
+  %b28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
+  %b29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
+  %b30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
+  %b31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
+  %r0  = call i16 @llvm.usub.sat.i16(i16 %a0 , i16 %b0 )
+  %r1  = call i16 @llvm.usub.sat.i16(i16 %a1 , i16 %b1 )
+  %r2  = call i16 @llvm.usub.sat.i16(i16 %a2 , i16 %b2 )
+  %r3  = call i16 @llvm.usub.sat.i16(i16 %a3 , i16 %b3 )
+  %r4  = call i16 @llvm.usub.sat.i16(i16 %a4 , i16 %b4 )
+  %r5  = call i16 @llvm.usub.sat.i16(i16 %a5 , i16 %b5 )
+  %r6  = call i16 @llvm.usub.sat.i16(i16 %a6 , i16 %b6 )
+  %r7  = call i16 @llvm.usub.sat.i16(i16 %a7 , i16 %b7 )
+  %r8  = call i16 @llvm.usub.sat.i16(i16 %a8 , i16 %b8 )
+  %r9  = call i16 @llvm.usub.sat.i16(i16 %a9 , i16 %b9 )
+  %r10 = call i16 @llvm.usub.sat.i16(i16 %a10, i16 %b10)
+  %r11 = call i16 @llvm.usub.sat.i16(i16 %a11, i16 %b11)
+  %r12 = call i16 @llvm.usub.sat.i16(i16 %a12, i16 %b12)
+  %r13 = call i16 @llvm.usub.sat.i16(i16 %a13, i16 %b13)
+  %r14 = call i16 @llvm.usub.sat.i16(i16 %a14, i16 %b14)
+  %r15 = call i16 @llvm.usub.sat.i16(i16 %a15, i16 %b15)
+  %r16 = call i16 @llvm.usub.sat.i16(i16 %a16, i16 %b16)
+  %r17 = call i16 @llvm.usub.sat.i16(i16 %a17, i16 %b17)
+  %r18 = call i16 @llvm.usub.sat.i16(i16 %a18, i16 %b18)
+  %r19 = call i16 @llvm.usub.sat.i16(i16 %a19, i16 %b19)
+  %r20 = call i16 @llvm.usub.sat.i16(i16 %a20, i16 %b20)
+  %r21 = call i16 @llvm.usub.sat.i16(i16 %a21, i16 %b21)
+  %r22 = call i16 @llvm.usub.sat.i16(i16 %a22, i16 %b22)
+  %r23 = call i16 @llvm.usub.sat.i16(i16 %a23, i16 %b23)
+  %r24 = call i16 @llvm.usub.sat.i16(i16 %a24, i16 %b24)
+  %r25 = call i16 @llvm.usub.sat.i16(i16 %a25, i16 %b25)
+  %r26 = call i16 @llvm.usub.sat.i16(i16 %a26, i16 %b26)
+  %r27 = call i16 @llvm.usub.sat.i16(i16 %a27, i16 %b27)
+  %r28 = call i16 @llvm.usub.sat.i16(i16 %a28, i16 %b28)
+  %r29 = call i16 @llvm.usub.sat.i16(i16 %a29, i16 %b29)
+  %r30 = call i16 @llvm.usub.sat.i16(i16 %a30, i16 %b30)
+  %r31 = call i16 @llvm.usub.sat.i16(i16 %a31, i16 %b31)
+  store i16 %r0 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0 ), align 2
+  store i16 %r1 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1 ), align 2
+  store i16 %r2 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2 ), align 2
+  store i16 %r3 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3 ), align 2
+  store i16 %r4 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4 ), align 2
+  store i16 %r5 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5 ), align 2
+  store i16 %r6 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6 ), align 2
+  store i16 %r7 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7 ), align 2
+  store i16 %r8 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8 ), align 2
+  store i16 %r9 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9 ), align 2
+  store i16 %r10, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
+  store i16 %r11, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
+  store i16 %r12, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
+  store i16 %r13, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
+  store i16 %r14, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
+  store i16 %r15, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
+  store i16 %r16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
+  store i16 %r17, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
+  store i16 %r18, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
+  store i16 %r19, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
+  store i16 %r20, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
+  store i16 %r21, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
+  store i16 %r22, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
+  store i16 %r23, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
+  store i16 %r24, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
+  store i16 %r25, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
+  store i16 %r26, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
+  store i16 %r27, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
+  store i16 %r28, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
+  store i16 %r29, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
+  store i16 %r30, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
+  store i16 %r31, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
+  ret void
+}
+
+define void @sub_v64i8() {
+; CHECK-LABEL: @sub_v64i8(
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @a8 to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @b8 to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP8:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT:    [[TMP9:%.*]] = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP5]])
+; CHECK-NEXT:    [[TMP10:%.*]] = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> [[TMP2]], <16 x i8> [[TMP6]])
+; CHECK-NEXT:    [[TMP11:%.*]] = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> [[TMP3]], <16 x i8> [[TMP7]])
+; CHECK-NEXT:    [[TMP12:%.*]] = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> [[TMP4]], <16 x i8> [[TMP8]])
+; CHECK-NEXT:    store <16 x i8> [[TMP9]], <16 x i8>* bitcast ([64 x i8]* @c8 to <16 x i8>*), align 1
+; CHECK-NEXT:    store <16 x i8> [[TMP10]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT:    store <16 x i8> [[TMP11]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT:    store <16 x i8> [[TMP12]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT:    ret void
+;
+  %a0  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 0 ), align 1
+  %a1  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 1 ), align 1
+  %a2  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 2 ), align 1
+  %a3  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 3 ), align 1
+  %a4  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 4 ), align 1
+  %a5  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 5 ), align 1
+  %a6  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 6 ), align 1
+  %a7  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 7 ), align 1
+  %a8  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 8 ), align 1
+  %a9  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 9 ), align 1
+  %a10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 10), align 1
+  %a11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 11), align 1
+  %a12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 12), align 1
+  %a13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 13), align 1
+  %a14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 14), align 1
+  %a15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 15), align 1
+  %a16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16), align 1
+  %a17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 17), align 1
+  %a18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 18), align 1
+  %a19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 19), align 1
+  %a20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 20), align 1
+  %a21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 21), align 1
+  %a22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 22), align 1
+  %a23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 23), align 1
+  %a24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 24), align 1
+  %a25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 25), align 1
+  %a26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 26), align 1
+  %a27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 27), align 1
+  %a28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 28), align 1
+  %a29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 29), align 1
+  %a30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 30), align 1
+  %a31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 31), align 1
+  %a32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32), align 1
+  %a33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 33), align 1
+  %a34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 34), align 1
+  %a35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 35), align 1
+  %a36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 36), align 1
+  %a37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 37), align 1
+  %a38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 38), align 1
+  %a39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 39), align 1
+  %a40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 40), align 1
+  %a41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 41), align 1
+  %a42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 42), align 1
+  %a43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 43), align 1
+  %a44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 44), align 1
+  %a45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 45), align 1
+  %a46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 46), align 1
+  %a47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 47), align 1
+  %a48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48), align 1
+  %a49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 49), align 1
+  %a50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 50), align 1
+  %a51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 51), align 1
+  %a52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 52), align 1
+  %a53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 53), align 1
+  %a54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 54), align 1
+  %a55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 55), align 1
+  %a56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 56), align 1
+  %a57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 57), align 1
+  %a58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 58), align 1
+  %a59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 59), align 1
+  %a60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 60), align 1
+  %a61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 61), align 1
+  %a62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 62), align 1
+  %a63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 63), align 1
+  %b0  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 0 ), align 1
+  %b1  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 1 ), align 1
+  %b2  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 2 ), align 1
+  %b3  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 3 ), align 1
+  %b4  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 4 ), align 1
+  %b5  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 5 ), align 1
+  %b6  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 6 ), align 1
+  %b7  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 7 ), align 1
+  %b8  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 8 ), align 1
+  %b9  = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 9 ), align 1
+  %b10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 10), align 1
+  %b11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 11), align 1
+  %b12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 12), align 1
+  %b13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 13), align 1
+  %b14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 14), align 1
+  %b15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 15), align 1
+  %b16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16), align 1
+  %b17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 17), align 1
+  %b18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 18), align 1
+  %b19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 19), align 1
+  %b20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 20), align 1
+  %b21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 21), align 1
+  %b22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 22), align 1
+  %b23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 23), align 1
+  %b24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 24), align 1
+  %b25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 25), align 1
+  %b26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 26), align 1
+  %b27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 27), align 1
+  %b28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 28), align 1
+  %b29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 29), align 1
+  %b30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 30), align 1
+  %b31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 31), align 1
+  %b32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32), align 1
+  %b33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 33), align 1
+  %b34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 34), align 1
+  %b35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 35), align 1
+  %b36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 36), align 1
+  %b37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 37), align 1
+  %b38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 38), align 1
+  %b39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 39), align 1
+  %b40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 40), align 1
+  %b41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 41), align 1
+  %b42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 42), align 1
+  %b43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 43), align 1
+  %b44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 44), align 1
+  %b45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 45), align 1
+  %b46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 46), align 1
+  %b47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 47), align 1
+  %b48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48), align 1
+  %b49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 49), align 1
+  %b50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 50), align 1
+  %b51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 51), align 1
+  %b52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 52), align 1
+  %b53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 53), align 1
+  %b54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 54), align 1
+  %b55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 55), align 1
+  %b56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 56), align 1
+  %b57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 57), align 1
+  %b58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 58), align 1
+  %b59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 59), align 1
+  %b60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 60), align 1
+  %b61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 61), align 1
+  %b62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 62), align 1
+  %b63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 63), align 1
+  %r0  = call i8 @llvm.usub.sat.i8(i8 %a0 , i8 %b0 )
+  %r1  = call i8 @llvm.usub.sat.i8(i8 %a1 , i8 %b1 )
+  %r2  = call i8 @llvm.usub.sat.i8(i8 %a2 , i8 %b2 )
+  %r3  = call i8 @llvm.usub.sat.i8(i8 %a3 , i8 %b3 )
+  %r4  = call i8 @llvm.usub.sat.i8(i8 %a4 , i8 %b4 )
+  %r5  = call i8 @llvm.usub.sat.i8(i8 %a5 , i8 %b5 )
+  %r6  = call i8 @llvm.usub.sat.i8(i8 %a6 , i8 %b6 )
+  %r7  = call i8 @llvm.usub.sat.i8(i8 %a7 , i8 %b7 )
+  %r8  = call i8 @llvm.usub.sat.i8(i8 %a8 , i8 %b8 )
+  %r9  = call i8 @llvm.usub.sat.i8(i8 %a9 , i8 %b9 )
+  %r10 = call i8 @llvm.usub.sat.i8(i8 %a10, i8 %b10)
+  %r11 = call i8 @llvm.usub.sat.i8(i8 %a11, i8 %b11)
+  %r12 = call i8 @llvm.usub.sat.i8(i8 %a12, i8 %b12)
+  %r13 = call i8 @llvm.usub.sat.i8(i8 %a13, i8 %b13)
+  %r14 = call i8 @llvm.usub.sat.i8(i8 %a14, i8 %b14)
+  %r15 = call i8 @llvm.usub.sat.i8(i8 %a15, i8 %b15)
+  %r16 = call i8 @llvm.usub.sat.i8(i8 %a16, i8 %b16)
+  %r17 = call i8 @llvm.usub.sat.i8(i8 %a17, i8 %b17)
+  %r18 = call i8 @llvm.usub.sat.i8(i8 %a18, i8 %b18)
+  %r19 = call i8 @llvm.usub.sat.i8(i8 %a19, i8 %b19)
+  %r20 = call i8 @llvm.usub.sat.i8(i8 %a20, i8 %b20)
+  %r21 = call i8 @llvm.usub.sat.i8(i8 %a21, i8 %b21)
+  %r22 = call i8 @llvm.usub.sat.i8(i8 %a22, i8 %b22)
+  %r23 = call i8 @llvm.usub.sat.i8(i8 %a23, i8 %b23)
+  %r24 = call i8 @llvm.usub.sat.i8(i8 %a24, i8 %b24)
+  %r25 = call i8 @llvm.usub.sat.i8(i8 %a25, i8 %b25)
+  %r26 = call i8 @llvm.usub.sat.i8(i8 %a26, i8 %b26)
+  %r27 = call i8 @llvm.usub.sat.i8(i8 %a27, i8 %b27)
+  %r28 = call i8 @llvm.usub.sat.i8(i8 %a28, i8 %b28)
+  %r29 = call i8 @llvm.usub.sat.i8(i8 %a29, i8 %b29)
+  %r30 = call i8 @llvm.usub.sat.i8(i8 %a30, i8 %b30)
+  %r31 = call i8 @llvm.usub.sat.i8(i8 %a31, i8 %b31)
+  %r32 = call i8 @llvm.usub.sat.i8(i8 %a32, i8 %b32)
+  %r33 = call i8 @llvm.usub.sat.i8(i8 %a33, i8 %b33)
+  %r34 = call i8 @llvm.usub.sat.i8(i8 %a34, i8 %b34)
+  %r35 = call i8 @llvm.usub.sat.i8(i8 %a35, i8 %b35)
+  %r36 = call i8 @llvm.usub.sat.i8(i8 %a36, i8 %b36)
+  %r37 = call i8 @llvm.usub.sat.i8(i8 %a37, i8 %b37)
+  %r38 = call i8 @llvm.usub.sat.i8(i8 %a38, i8 %b38)
+  %r39 = call i8 @llvm.usub.sat.i8(i8 %a39, i8 %b39)
+  %r40 = call i8 @llvm.usub.sat.i8(i8 %a40, i8 %b40)
+  %r41 = call i8 @llvm.usub.sat.i8(i8 %a41, i8 %b41)
+  %r42 = call i8 @llvm.usub.sat.i8(i8 %a42, i8 %b42)
+  %r43 = call i8 @llvm.usub.sat.i8(i8 %a43, i8 %b43)
+  %r44 = call i8 @llvm.usub.sat.i8(i8 %a44, i8 %b44)
+  %r45 = call i8 @llvm.usub.sat.i8(i8 %a45, i8 %b45)
+  %r46 = call i8 @llvm.usub.sat.i8(i8 %a46, i8 %b46)
+  %r47 = call i8 @llvm.usub.sat.i8(i8 %a47, i8 %b47)
+  %r48 = call i8 @llvm.usub.sat.i8(i8 %a48, i8 %b48)
+  %r49 = call i8 @llvm.usub.sat.i8(i8 %a49, i8 %b49)
+  %r50 = call i8 @llvm.usub.sat.i8(i8 %a50, i8 %b50)
+  %r51 = call i8 @llvm.usub.sat.i8(i8 %a51, i8 %b51)
+  %r52 = call i8 @llvm.usub.sat.i8(i8 %a52, i8 %b52)
+  %r53 = call i8 @llvm.usub.sat.i8(i8 %a53, i8 %b53)
+  %r54 = call i8 @llvm.usub.sat.i8(i8 %a54, i8 %b54)
+  %r55 = call i8 @llvm.usub.sat.i8(i8 %a55, i8 %b55)
+  %r56 = call i8 @llvm.usub.sat.i8(i8 %a56, i8 %b56)
+  %r57 = call i8 @llvm.usub.sat.i8(i8 %a57, i8 %b57)
+  %r58 = call i8 @llvm.usub.sat.i8(i8 %a58, i8 %b58)
+  %r59 = call i8 @llvm.usub.sat.i8(i8 %a59, i8 %b59)
+  %r60 = call i8 @llvm.usub.sat.i8(i8 %a60, i8 %b60)
+  %r61 = call i8 @llvm.usub.sat.i8(i8 %a61, i8 %b61)
+  %r62 = call i8 @llvm.usub.sat.i8(i8 %a62, i8 %b62)
+  %r63 = call i8 @llvm.usub.sat.i8(i8 %a63, i8 %b63)
+  store i8 %r0 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 0 ), align 1
+  store i8 %r1 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 1 ), align 1
+  store i8 %r2 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 2 ), align 1
+  store i8 %r3 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 3 ), align 1
+  store i8 %r4 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 4 ), align 1
+  store i8 %r5 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 5 ), align 1
+  store i8 %r6 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 6 ), align 1
+  store i8 %r7 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 7 ), align 1
+  store i8 %r8 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 8 ), align 1
+  store i8 %r9 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 9 ), align 1
+  store i8 %r10, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 10), align 1
+  store i8 %r11, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 11), align 1
+  store i8 %r12, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 12), align 1
+  store i8 %r13, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 13), align 1
+  store i8 %r14, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 14), align 1
+  store i8 %r15, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 15), align 1
+  store i8 %r16, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16), align 1
+  store i8 %r17, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 17), align 1
+  store i8 %r18, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 18), align 1
+  store i8 %r19, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 19), align 1
+  store i8 %r20, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 20), align 1
+  store i8 %r21, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 21), align 1
+  store i8 %r22, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 22), align 1
+  store i8 %r23, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 23), align 1
+  store i8 %r24, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 24), align 1
+  store i8 %r25, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 25), align 1
+  store i8 %r26, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 26), align 1
+  store i8 %r27, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 27), align 1
+  store i8 %r28, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 28), align 1
+  store i8 %r29, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 29), align 1
+  store i8 %r30, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 30), align 1
+  store i8 %r31, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 31), align 1
+  store i8 %r32, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32), align 1
+  store i8 %r33, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 33), align 1
+  store i8 %r34, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 34), align 1
+  store i8 %r35, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 35), align 1
+  store i8 %r36, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 36), align 1
+  store i8 %r37, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 37), align 1
+  store i8 %r38, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 38), align 1
+  store i8 %r39, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 39), align 1
+  store i8 %r40, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 40), align 1
+  store i8 %r41, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 41), align 1
+  store i8 %r42, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 42), align 1
+  store i8 %r43, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 43), align 1
+  store i8 %r44, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 44), align 1
+  store i8 %r45, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 45), align 1
+  store i8 %r46, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 46), align 1
+  store i8 %r47, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 47), align 1
+  store i8 %r48, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48), align 1
+  store i8 %r49, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 49), align 1
+  store i8 %r50, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 50), align 1
+  store i8 %r51, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 51), align 1
+  store i8 %r52, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 52), align 1
+  store i8 %r53, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 53), align 1
+  store i8 %r54, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 54), align 1
+  store i8 %r55, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 55), align 1
+  store i8 %r56, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 56), align 1
+  store i8 %r57, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 57), align 1
+  store i8 %r58, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 58), align 1
+  store i8 %r59, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 59), align 1
+  store i8 %r60, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 60), align 1
+  store i8 %r61, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 61), align 1
+  store i8 %r62, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 62), align 1
+  store i8 %r63, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 63), align 1
+  ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/atomics.ll b/test/Transforms/SLPVectorizer/X86/atomics.ll
index a48b793..c7f0549 100644
--- a/test/Transforms/SLPVectorizer/X86/atomics.ll
+++ b/test/Transforms/SLPVectorizer/X86/atomics.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -S |FileCheck %s
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 
@@ -7,16 +8,19 @@
 ; The SLPVectorizer should not vectorize atomic stores and it should not
 ; schedule regular stores around atomic stores.
 
-; CHECK-LABEL: test
-; CHECK: store i32
-; CHECK: store atomic i32
-; CHECK: store i32
-; CHECK: store atomic i32
-; CHECK: store i32
-; CHECK: store atomic i32
-; CHECK: store i32
-; CHECK: store atomic i32
 define void @test() {
+; CHECK-LABEL: @test(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    store i32 0, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 0), align 16
+; CHECK-NEXT:    store atomic i32 0, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @x, i64 0, i64 0) release, align 16
+; CHECK-NEXT:    store i32 0, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1), align 4
+; CHECK-NEXT:    store atomic i32 1, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @x, i64 0, i64 1) release, align 4
+; CHECK-NEXT:    store i32 0, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 2), align 8
+; CHECK-NEXT:    store atomic i32 2, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @x, i64 0, i64 2) release, align 8
+; CHECK-NEXT:    store i32 0, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 3), align 4
+; CHECK-NEXT:    store atomic i32 3, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @x, i64 0, i64 3) release, align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   store i32 0, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 0), align 16
   store atomic i32 0, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @x, i64 0, i64 0) release, align 16
diff --git a/test/Transforms/SLPVectorizer/X86/bad_types.ll b/test/Transforms/SLPVectorizer/X86/bad_types.ll
index 98c2906..d229acd 100644
--- a/test/Transforms/SLPVectorizer/X86/bad_types.ll
+++ b/test/Transforms/SLPVectorizer/X86/bad_types.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -S -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -7,10 +8,17 @@
 ; Ensure we can handle x86_mmx values which are primitive and can be bitcast
 ; with integer types but can't be put into a vector.
 ;
-; CHECK-LABEL: @test1
-; CHECK:         store i64
-; CHECK:         store i64
-; CHECK:         ret void
+; CHECK-LABEL: @test1(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[A_CAST:%.*]] = bitcast x86_mmx [[A:%.*]] to i64
+; CHECK-NEXT:    [[B_CAST:%.*]] = bitcast x86_mmx [[B:%.*]] to i64
+; CHECK-NEXT:    [[A_AND:%.*]] = and i64 [[A_CAST]], 42
+; CHECK-NEXT:    [[B_AND:%.*]] = and i64 [[B_CAST]], 42
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i64, i64* [[PTR:%.*]], i32 1
+; CHECK-NEXT:    store i64 [[A_AND]], i64* [[PTR]]
+; CHECK-NEXT:    store i64 [[B_AND]], i64* [[GEP]]
+; CHECK-NEXT:    ret void
+;
 entry:
   %a.cast = bitcast x86_mmx %a to i64
   %b.cast = bitcast x86_mmx %b to i64
@@ -26,10 +34,21 @@
 ; Same as @test1 but using phi-input vectorization instead of store
 ; vectorization.
 ;
-; CHECK-LABEL: @test2
-; CHECK:         and i64
-; CHECK:         and i64
-; CHECK:         ret void
+; CHECK-LABEL: @test2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN:%.*]], label [[EXIT:%.*]]
+; CHECK:       if.then:
+; CHECK-NEXT:    [[A_CAST:%.*]] = bitcast x86_mmx [[A:%.*]] to i64
+; CHECK-NEXT:    [[B_CAST:%.*]] = bitcast x86_mmx [[B:%.*]] to i64
+; CHECK-NEXT:    [[A_AND:%.*]] = and i64 [[A_CAST]], 42
+; CHECK-NEXT:    [[B_AND:%.*]] = and i64 [[B_CAST]], 42
+; CHECK-NEXT:    br label [[EXIT]]
+; CHECK:       exit:
+; CHECK-NEXT:    [[A_PHI:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[A_AND]], [[IF_THEN]] ]
+; CHECK-NEXT:    [[B_PHI:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[B_AND]], [[IF_THEN]] ]
+; CHECK-NEXT:    tail call void @f(i64 [[A_PHI]], i64 [[B_PHI]])
+; CHECK-NEXT:    ret void
+;
 entry:
   br i1 undef, label %if.then, label %exit
 
@@ -50,9 +69,26 @@
 define i8 @test3(i8 *%addr) {
 ; Check that we do not vectorize types that are padded to a bigger ones.
 ;
-; CHECK-LABEL: @test3
-; CHECK-NOT:   <4 x i2>
-; CHECK:       ret i8
+; CHECK-LABEL: @test3(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[A:%.*]] = bitcast i8* [[ADDR:%.*]] to i2*
+; CHECK-NEXT:    [[A0:%.*]] = getelementptr inbounds i2, i2* [[A]], i64 0
+; CHECK-NEXT:    [[A1:%.*]] = getelementptr inbounds i2, i2* [[A]], i64 1
+; CHECK-NEXT:    [[A2:%.*]] = getelementptr inbounds i2, i2* [[A]], i64 2
+; CHECK-NEXT:    [[A3:%.*]] = getelementptr inbounds i2, i2* [[A]], i64 3
+; CHECK-NEXT:    [[L0:%.*]] = load i2, i2* [[A0]], align 1
+; CHECK-NEXT:    [[L1:%.*]] = load i2, i2* [[A1]], align 1
+; CHECK-NEXT:    [[L2:%.*]] = load i2, i2* [[A2]], align 1
+; CHECK-NEXT:    [[L3:%.*]] = load i2, i2* [[A3]], align 1
+; CHECK-NEXT:    br label [[BB1:%.*]]
+; CHECK:       bb1:
+; CHECK-NEXT:    [[P0:%.*]] = phi i2 [ [[L0]], [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[P1:%.*]] = phi i2 [ [[L1]], [[ENTRY]] ]
+; CHECK-NEXT:    [[P2:%.*]] = phi i2 [ [[L2]], [[ENTRY]] ]
+; CHECK-NEXT:    [[P3:%.*]] = phi i2 [ [[L3]], [[ENTRY]] ]
+; CHECK-NEXT:    [[R:%.*]] = zext i2 [[P2]] to i8
+; CHECK-NEXT:    ret i8 [[R]]
+;
 entry:
   %a = bitcast i8* %addr to i2*
   %a0 = getelementptr inbounds i2, i2* %a, i64 0
diff --git a/test/Transforms/SLPVectorizer/X86/barriercall.ll b/test/Transforms/SLPVectorizer/X86/barriercall.ll
index 382a43f..7378b8b 100644
--- a/test/Transforms/SLPVectorizer/X86/barriercall.ll
+++ b/test/Transforms/SLPVectorizer/X86/barriercall.ll
@@ -1,12 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
-;CHECK-LABEL: @foo(
-;CHECK: store <4 x i32>
-;CHECK: ret
 define i32 @foo(i32* nocapture %A, i32 %n) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CALL:%.*]] = tail call i32 (...) @bar()
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[N:%.*]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[N]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[N]], i32 2
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[N]], i32 3
+; CHECK-NEXT:    [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], <i32 5, i32 9, i32 3, i32 10>
+; CHECK-NEXT:    [[TMP5:%.*]] = shl <4 x i32> [[TMP3]], <i32 5, i32 9, i32 3, i32 10>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 6, i32 3>
+; CHECK-NEXT:    [[TMP7:%.*]] = add nsw <4 x i32> <i32 9, i32 9, i32 9, i32 9>, [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i32* [[A:%.*]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP7]], <4 x i32>* [[TMP8]], align 4
+; CHECK-NEXT:    ret i32 undef
+;
 entry:
   %call = tail call i32 (...) @bar() #2
   %mul = mul nsw i32 %n, 5
diff --git a/test/Transforms/SLPVectorizer/X86/call.ll b/test/Transforms/SLPVectorizer/X86/call.ll
index 8397d34..c93397c 100644
--- a/test/Transforms/SLPVectorizer/X86/call.ll
+++ b/test/Transforms/SLPVectorizer/X86/call.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -slp-threshold=-999 -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -13,10 +14,10 @@
 
 define void @sin_libm(double* %a, double* %b) {
 ; CHECK-LABEL: @sin_libm(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* %a to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8
 ; CHECK-NEXT:    [[TMP3:%.*]] = call <2 x double> @llvm.sin.v2f64(<2 x double> [[TMP2]])
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* %b to <2 x double>*
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
 ; CHECK-NEXT:    store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 8
 ; CHECK-NEXT:    ret void
 ;
@@ -33,10 +34,10 @@
 
 define void @cos_libm(double* %a, double* %b) {
 ; CHECK-LABEL: @cos_libm(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* %a to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8
 ; CHECK-NEXT:    [[TMP3:%.*]] = call <2 x double> @llvm.cos.v2f64(<2 x double> [[TMP2]])
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* %b to <2 x double>*
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
 ; CHECK-NEXT:    store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 8
 ; CHECK-NEXT:    ret void
 ;
@@ -53,10 +54,10 @@
 
 define void @pow_libm(double* %a, double* %b) {
 ; CHECK-LABEL: @pow_libm(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* %a to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8
 ; CHECK-NEXT:    [[TMP3:%.*]] = call <2 x double> @llvm.pow.v2f64(<2 x double> [[TMP2]], <2 x double> [[TMP2]])
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* %b to <2 x double>*
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
 ; CHECK-NEXT:    store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 8
 ; CHECK-NEXT:    ret void
 ;
@@ -73,10 +74,10 @@
 
 define void @exp_libm(double* %a, double* %b) {
 ; CHECK-LABEL: @exp_libm(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* %a to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8
 ; CHECK-NEXT:    [[TMP3:%.*]] = call <2 x double> @llvm.exp2.v2f64(<2 x double> [[TMP2]])
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* %b to <2 x double>*
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
 ; CHECK-NEXT:    store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 8
 ; CHECK-NEXT:    ret void
 ;
@@ -91,15 +92,15 @@
   ret void
 }
 
-; No fast-math-flags are required to convert sqrt library calls to an intrinsic. 
+; No fast-math-flags are required to convert sqrt library calls to an intrinsic.
 ; We just need to know that errno is not set (readnone).
 
 define void @sqrt_libm_no_errno(double* %a, double* %b) {
 ; CHECK-LABEL: @sqrt_libm_no_errno(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* %a to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8
 ; CHECK-NEXT:    [[TMP3:%.*]] = call <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP2]])
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* %b to <2 x double>*
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
 ; CHECK-NEXT:    store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 8
 ; CHECK-NEXT:    ret void
 ;
@@ -116,18 +117,18 @@
 
 ; The sqrt intrinsic does not set errno, but a non-constant sqrt call might, so this can't vectorize.
 ; The nnan on the call does not matter because there's no guarantee in the C standard that a negative
-; input would result in a nan output ("On a domain error, the function returns an 
+; input would result in a nan output ("On a domain error, the function returns an
 ; implementation-defined value.")
 
 define void @sqrt_libm_errno(double* %a, double* %b) {
 ; CHECK-LABEL: @sqrt_libm_errno(
-; CHECK-NEXT:    [[A0:%.*]] = load double, double* %a, align 8
-; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds double, double* %a, i64 1
+; CHECK-NEXT:    [[A0:%.*]] = load double, double* [[A:%.*]], align 8
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds double, double* [[A]], i64 1
 ; CHECK-NEXT:    [[A1:%.*]] = load double, double* [[IDX1]], align 8
 ; CHECK-NEXT:    [[SQRT1:%.*]] = tail call nnan double @sqrt(double [[A0]]) #2
 ; CHECK-NEXT:    [[SQRT2:%.*]] = tail call nnan double @sqrt(double [[A1]]) #2
-; CHECK-NEXT:    store double [[SQRT1]], double* %b, align 8
-; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds double, double* %b, i64 1
+; CHECK-NEXT:    store double [[SQRT1]], double* [[B:%.*]], align 8
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds double, double* [[B]], i64 1
 ; CHECK-NEXT:    store double [[SQRT2]], double* [[IDX2]], align 8
 ; CHECK-NEXT:    ret void
 ;
@@ -145,13 +146,13 @@
 ; Negative test case
 define void @round_custom(i64* %a, i64* %b) {
 ; CHECK-LABEL: @round_custom(
-; CHECK-NEXT:    [[A0:%.*]] = load i64, i64* %a, align 8
-; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds i64, i64* %a, i64 1
+; CHECK-NEXT:    [[A0:%.*]] = load i64, i64* [[A:%.*]], align 8
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 1
 ; CHECK-NEXT:    [[A1:%.*]] = load i64, i64* [[IDX1]], align 8
 ; CHECK-NEXT:    [[ROUND1:%.*]] = tail call i64 @round(i64 [[A0]]) #3
 ; CHECK-NEXT:    [[ROUND2:%.*]] = tail call i64 @round(i64 [[A1]]) #3
-; CHECK-NEXT:    store i64 [[ROUND1]], i64* %b, align 8
-; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds i64, i64* %b, i64 1
+; CHECK-NEXT:    store i64 [[ROUND1]], i64* [[B:%.*]], align 8
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 1
 ; CHECK-NEXT:    store i64 [[ROUND2]], i64* [[IDX2]], align 8
 ; CHECK-NEXT:    ret void
 ;
diff --git a/test/Transforms/SLPVectorizer/X86/commutativity.ll b/test/Transforms/SLPVectorizer/X86/commutativity.ll
index 2798ccb..9af59ef 100644
--- a/test/Transforms/SLPVectorizer/X86/commutativity.ll
+++ b/test/Transforms/SLPVectorizer/X86/commutativity.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -slp-vectorizer < %s -S | FileCheck %s
 
 ; Verify that the SLP vectorizer is able to figure out that commutativity
@@ -16,9 +17,31 @@
 ; Check that we correctly detect a splat/broadcast by leveraging the
 ; commutativity property of `xor`.
 
-; CHECK-LABEL:  @splat
-; CHECK:  store <16 x i8>
 define void @splat(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: @splat(
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <16 x i8> undef, i8 [[C:%.*]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <16 x i8> [[TMP1]], i8 [[C]], i32 1
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <16 x i8> [[TMP2]], i8 [[C]], i32 2
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <16 x i8> [[TMP3]], i8 [[C]], i32 3
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <16 x i8> [[TMP4]], i8 [[C]], i32 4
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <16 x i8> [[TMP5]], i8 [[C]], i32 5
+; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <16 x i8> [[TMP6]], i8 [[C]], i32 6
+; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <16 x i8> [[TMP7]], i8 [[C]], i32 7
+; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <16 x i8> [[TMP8]], i8 [[C]], i32 8
+; CHECK-NEXT:    [[TMP10:%.*]] = insertelement <16 x i8> [[TMP9]], i8 [[C]], i32 9
+; CHECK-NEXT:    [[TMP11:%.*]] = insertelement <16 x i8> [[TMP10]], i8 [[C]], i32 10
+; CHECK-NEXT:    [[TMP12:%.*]] = insertelement <16 x i8> [[TMP11]], i8 [[C]], i32 11
+; CHECK-NEXT:    [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[C]], i32 12
+; CHECK-NEXT:    [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[C]], i32 13
+; CHECK-NEXT:    [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[C]], i32 14
+; CHECK-NEXT:    [[TMP16:%.*]] = insertelement <16 x i8> [[TMP15]], i8 [[C]], i32 15
+; CHECK-NEXT:    [[TMP17:%.*]] = insertelement <2 x i8> undef, i8 [[A:%.*]], i32 0
+; CHECK-NEXT:    [[TMP18:%.*]] = insertelement <2 x i8> [[TMP17]], i8 [[B:%.*]], i32 1
+; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <2 x i8> [[TMP18]], <2 x i8> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+; CHECK-NEXT:    [[TMP19:%.*]] = xor <16 x i8> [[TMP16]], [[SHUFFLE]]
+; CHECK-NEXT:    store <16 x i8> [[TMP19]], <16 x i8>* bitcast ([32 x i8]* @cle to <16 x i8>*), align 16
+; CHECK-NEXT:    ret void
+;
   %1 = xor i8 %c, %a
   store i8 %1, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @cle, i64 0, i64 0), align 16
   %2 = xor i8 %a, %c
@@ -59,9 +82,24 @@
 ; Check that we correctly detect that we can have the same opcode on one side by
 ; leveraging the commutativity property of `xor`.
 
-; CHECK-LABEL:  @same_opcode_on_one_side
-; CHECK:  store <4 x i32>
 define void @same_opcode_on_one_side(i32 %a, i32 %b, i32 %c) {
+; CHECK-LABEL: @same_opcode_on_one_side(
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <4 x i32> undef, i32 [[C:%.*]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[C]], i32 1
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[C]], i32 2
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[C]], i32 3
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <4 x i32> undef, i32 [[A:%.*]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[A]], i32 1
+; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[A]], i32 2
+; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <4 x i32> [[TMP7]], i32 [[A]], i32 3
+; CHECK-NEXT:    [[TMP9:%.*]] = add <4 x i32> [[TMP4]], [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[B:%.*]], i32 1
+; CHECK-NEXT:    [[TMP11:%.*]] = insertelement <4 x i32> [[TMP10]], i32 [[C]], i32 2
+; CHECK-NEXT:    [[TMP12:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[A]], i32 3
+; CHECK-NEXT:    [[TMP13:%.*]] = xor <4 x i32> [[TMP12]], [[TMP9]]
+; CHECK-NEXT:    store <4 x i32> [[TMP13]], <4 x i32>* bitcast ([32 x i32]* @cle32 to <4 x i32>*), align 16
+; CHECK-NEXT:    ret void
+;
   %add1 = add i32 %c, %a
   %add2 = add i32 %c, %a
   %add3 = add i32 %a, %c
diff --git a/test/Transforms/SLPVectorizer/X86/consecutive-access.ll b/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
index aa682fc..f394dc7 100644
--- a/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
+++ b/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -S | FileCheck %s
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.9.0"
@@ -11,10 +12,38 @@
 ; A[3*i], A[3*i+1] and A[3*i+2] are consecutive, but in future
 ; that would hopefully be fixed. For now, check that this isn't
 ; vectorized.
-; CHECK-LABEL: foo_3double
-; CHECK-NOT: x double>
 ; Function Attrs: nounwind ssp uwtable
 define void @foo_3double(i32 %u) #0 {
+; CHECK-LABEL: @foo_3double(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[U]], 3
+; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load double, double* [[ARRAYIDX4]], align 8
+; CHECK-NEXT:    [[ADD5:%.*]] = fadd double [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    store double [[ADD5]], double* [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[ADD11:%.*]] = add nsw i32 [[MUL]], 1
+; CHECK-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[ADD11]] to i64
+; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load double, double* [[ARRAYIDX13]], align 8
+; CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load double, double* [[ARRAYIDX17]], align 8
+; CHECK-NEXT:    [[ADD18:%.*]] = fadd double [[TMP2]], [[TMP3]]
+; CHECK-NEXT:    store double [[ADD18]], double* [[ARRAYIDX13]], align 8
+; CHECK-NEXT:    [[ADD24:%.*]] = add nsw i32 [[MUL]], 2
+; CHECK-NEXT:    [[IDXPROM25:%.*]] = sext i32 [[ADD24]] to i64
+; CHECK-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM25]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load double, double* [[ARRAYIDX26]], align 8
+; CHECK-NEXT:    [[ARRAYIDX30:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM25]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load double, double* [[ARRAYIDX30]], align 8
+; CHECK-NEXT:    [[ADD31:%.*]] = fadd double [[TMP4]], [[TMP5]]
+; CHECK-NEXT:    store double [[ADD31]], double* [[ARRAYIDX26]], align 8
+; CHECK-NEXT:    ret void
+;
 entry:
   %u.addr = alloca i32, align 4
   store i32 %u, i32* %u.addr, align 4
@@ -48,10 +77,29 @@
 ; SCEV should be able to tell that accesses A[C1 + C2*i], A[C1 + C2*i], ...
 ; A[C1 + C2*i] are consecutive, if C2 is a power of 2, and C2 > C1 > 0.
 ; Thus, the following code should be vectorized.
-; CHECK-LABEL: foo_2double
-; CHECK: x double>
 ; Function Attrs: nounwind ssp uwtable
 define void @foo_2double(i32 %u) #0 {
+; CHECK-LABEL: @foo_2double(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[U]], 2
+; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ADD11:%.*]] = add nsw i32 [[MUL]], 1
+; CHECK-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[ADD11]] to i64
+; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
+; CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX4]] to <2 x double>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
+; CHECK-NEXT:    ret void
+;
 entry:
   %u.addr = alloca i32, align 4
   store i32 %u, i32* %u.addr, align 4
@@ -75,10 +123,37 @@
 }
 
 ; Similar to the previous test, but with different datatype.
-; CHECK-LABEL: foo_4float
-; CHECK: x float>
 ; Function Attrs: nounwind ssp uwtable
 define void @foo_4float(i32 %u) #0 {
+; CHECK-LABEL: @foo_4float(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[U]], 4
+; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ADD11:%.*]] = add nsw i32 [[MUL]], 1
+; CHECK-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[ADD11]] to i64
+; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[ADD24:%.*]] = add nsw i32 [[MUL]], 2
+; CHECK-NEXT:    [[IDXPROM25:%.*]] = sext i32 [[ADD24]] to i64
+; CHECK-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 [[IDXPROM25]]
+; CHECK-NEXT:    [[ARRAYIDX30:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 [[IDXPROM25]]
+; CHECK-NEXT:    [[ADD37:%.*]] = add nsw i32 [[MUL]], 3
+; CHECK-NEXT:    [[IDXPROM38:%.*]] = sext i32 [[ADD37]] to i64
+; CHECK-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 [[IDXPROM38]]
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float* [[ARRAYIDX]] to <4 x float>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT:    [[ARRAYIDX43:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 [[IDXPROM38]]
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[ARRAYIDX4]] to <4 x float>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd <4 x float> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast float* [[ARRAYIDX]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP4]], <4 x float>* [[TMP5]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %u.addr = alloca i32, align 4
   store i32 %u, i32* %u.addr, align 4
@@ -118,10 +193,51 @@
 }
 
 ; Similar to the previous tests, but now we are dealing with AddRec SCEV.
-; CHECK-LABEL: foo_loop
-; CHECK: x double>
 ; Function Attrs: nounwind ssp uwtable
 define i32 @foo_loop(double* %A, i32 %n) #0 {
+; CHECK-LABEL: @foo_loop(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
+; CHECK-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    [[SUM:%.*]] = alloca double, align 8
+; CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    store double* [[A:%.*]], double** [[A_ADDR]], align 8
+; CHECK-NEXT:    store i32 [[N:%.*]], i32* [[N_ADDR]], align 4
+; CHECK-NEXT:    store double 0.000000e+00, double* [[SUM]], align 8
+; CHECK-NEXT:    store i32 0, i32* [[I]], align 4
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 0, [[N]]
+; CHECK-NEXT:    br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
+; CHECK:       for.body.lr.ph:
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = phi double [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[ADD7:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP0]], 2
+; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[MUL]], 1
+; CHECK-NEXT:    [[IDXPROM3:%.*]] = sext i32 [[ADD]] to i64
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM3]]
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> <double 7.000000e+00, double 7.000000e+00>, [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x double> [[TMP4]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x double> [[TMP4]], i32 1
+; CHECK-NEXT:    [[ADD6:%.*]] = fadd double [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[ADD7]] = fadd double [[TMP1]], [[ADD6]]
+; CHECK-NEXT:    store double [[ADD7]], double* [[SUM]], align 8
+; CHECK-NEXT:    [[INC]] = add nsw i32 [[TMP0]], 1
+; CHECK-NEXT:    store i32 [[INC]], i32* [[I]], align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[N]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
+; CHECK:       for.cond.for.end_crit_edge:
+; CHECK-NEXT:    [[SPLIT:%.*]] = phi double [ [[ADD7]], [[FOR_BODY]] ]
+; CHECK-NEXT:    br label [[FOR_END]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[DOTLCSSA:%.*]] = phi double [ [[SPLIT]], [[FOR_COND_FOR_END_CRIT_EDGE]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[CONV:%.*]] = fptosi double [[DOTLCSSA]] to i32
+; CHECK-NEXT:    ret i32 [[CONV]]
+;
 entry:
   %A.addr = alloca double*, align 8
   %n.addr = alloca i32, align 4
@@ -170,11 +286,30 @@
 
 ; Similar to foo_2double but with a non-power-of-2 factor and potential
 ; wrapping (both indices wrap or both don't in the same time)
-; CHECK-LABEL: foo_2double_non_power_of_2
-; CHECK: load <2 x double>
-; CHECK: load <2 x double>
 ; Function Attrs: nounwind ssp uwtable
 define void @foo_2double_non_power_of_2(i32 %u) #0 {
+; CHECK-LABEL: @foo_2double_non_power_of_2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[U]], 6
+; CHECK-NEXT:    [[ADD6:%.*]] = add i32 [[MUL]], 6
+; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[ADD6]] to i64
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ADD7:%.*]] = add i32 [[MUL]], 7
+; CHECK-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[ADD7]] to i64
+; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
+; CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX4]] to <2 x double>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
+; CHECK-NEXT:    ret void
+;
 entry:
   %u.addr = alloca i32, align 4
   store i32 %u, i32* %u.addr, align 4
@@ -199,11 +334,30 @@
 }
 
 ; Similar to foo_2double_non_power_of_2 but with zext's instead of sext's
-; CHECK-LABEL: foo_2double_non_power_of_2_zext
-; CHECK: load <2 x double>
-; CHECK: load <2 x double>
 ; Function Attrs: nounwind ssp uwtable
 define void @foo_2double_non_power_of_2_zext(i32 %u) #0 {
+; CHECK-LABEL: @foo_2double_non_power_of_2_zext(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[U]], 6
+; CHECK-NEXT:    [[ADD6:%.*]] = add i32 [[MUL]], 6
+; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[ADD6]] to i64
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ADD7:%.*]] = add i32 [[MUL]], 7
+; CHECK-NEXT:    [[IDXPROM12:%.*]] = zext i32 [[ADD7]] to i64
+; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
+; CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX4]] to <2 x double>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
+; CHECK-NEXT:    ret void
+;
 entry:
   %u.addr = alloca i32, align 4
   store i32 %u, i32* %u.addr, align 4
@@ -230,10 +384,52 @@
 ; Similar to foo_2double_non_power_of_2, but now we are dealing with AddRec SCEV.
 ; Alternatively, this is like foo_loop, but with a non-power-of-2 factor and
 ; potential wrapping (both indices wrap or both don't in the same time)
-; CHECK-LABEL: foo_loop_non_power_of_2
-; CHECK: <2 x double>
 ; Function Attrs: nounwind ssp uwtable
 define i32 @foo_loop_non_power_of_2(double* %A, i32 %n) #0 {
+; CHECK-LABEL: @foo_loop_non_power_of_2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
+; CHECK-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    [[SUM:%.*]] = alloca double, align 8
+; CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    store double* [[A:%.*]], double** [[A_ADDR]], align 8
+; CHECK-NEXT:    store i32 [[N:%.*]], i32* [[N_ADDR]], align 4
+; CHECK-NEXT:    store double 0.000000e+00, double* [[SUM]], align 8
+; CHECK-NEXT:    store i32 0, i32* [[I]], align 4
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 0, [[N]]
+; CHECK-NEXT:    br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
+; CHECK:       for.body.lr.ph:
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = phi double [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[ADD7:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[TMP0]], 12
+; CHECK-NEXT:    [[ADD_5:%.*]] = add i32 [[MUL]], 5
+; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[ADD_5]] to i64
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ADD_6:%.*]] = add i32 [[MUL]], 6
+; CHECK-NEXT:    [[IDXPROM3:%.*]] = sext i32 [[ADD_6]] to i64
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM3]]
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> <double 7.000000e+00, double 7.000000e+00>, [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x double> [[TMP4]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x double> [[TMP4]], i32 1
+; CHECK-NEXT:    [[ADD6:%.*]] = fadd double [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[ADD7]] = fadd double [[TMP1]], [[ADD6]]
+; CHECK-NEXT:    store double [[ADD7]], double* [[SUM]], align 8
+; CHECK-NEXT:    [[INC]] = add i32 [[TMP0]], 1
+; CHECK-NEXT:    store i32 [[INC]], i32* [[I]], align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[N]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
+; CHECK:       for.cond.for.end_crit_edge:
+; CHECK-NEXT:    [[SPLIT:%.*]] = phi double [ [[ADD7]], [[FOR_BODY]] ]
+; CHECK-NEXT:    br label [[FOR_END]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[DOTLCSSA:%.*]] = phi double [ [[SPLIT]], [[FOR_COND_FOR_END_CRIT_EDGE]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[CONV:%.*]] = fptosi double [[DOTLCSSA]] to i32
+; CHECK-NEXT:    ret i32 [[CONV]]
+;
 entry:
   %A.addr = alloca double*, align 8
   %n.addr = alloca i32, align 4
@@ -299,9 +495,32 @@
 ;
 ; Make sure we are able to vectorize this from now on:
 ;
-; CHECK-LABEL: @bar
-; CHECK: load <2 x double>
 define double @bar(double* nocapture readonly %a, i32 %n) local_unnamed_addr #0 {
+; CHECK-LABEL: @bar(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP15:%.*]] = icmp eq i32 [[N:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP15]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY:%.*]]
+; CHECK:       for.cond.cleanup:
+; CHECK-NEXT:    [[TMP0:%.*]] = phi <2 x double> [ zeroinitializer, [[ENTRY:%.*]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x double> [[TMP0]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x double> [[TMP0]], i32 1
+; CHECK-NEXT:    [[MUL:%.*]] = fmul double [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    ret double [[MUL]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[I_018:%.*]] = phi i32 [ [[ADD5:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEXT:    [[TMP3:%.*]] = phi <2 x double> [ [[TMP6]], [[FOR_BODY]] ], [ zeroinitializer, [[ENTRY]] ]
+; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[I_018]] to i64
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ADD1:%.*]] = or i32 [[I_018]], 1
+; CHECK-NEXT:    [[IDXPROM2:%.*]] = zext i32 [[ADD1]] to i64
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x double>, <2 x double>* [[TMP4]], align 8
+; CHECK-NEXT:    [[TMP6]] = fadd <2 x double> [[TMP3]], [[TMP5]]
+; CHECK-NEXT:    [[ADD5]] = add i32 [[I_018]], 2
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD5]], [[N]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP]]
+;
 entry:
   %cmp15 = icmp eq i32 %n, 0
   br i1 %cmp15, label %for.cond.cleanup, label %for.body
diff --git a/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll b/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
index ecae70e..060cb05 100644
--- a/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
+++ b/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
@@ -1,13 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
 ; We will keep trying to vectorize the basic block even we already find vectorized store.
-; CHECK: test1
-; CHECK: store <2 x double>
-; CHECK: ret
 define void @test1(double* %a, double* %b, double* %c, double* %d) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[A]] to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[B:%.*]], i64 1
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[B]] to <2 x double>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[C:%.*]], i64 1
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[C]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast double* [[A]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = bitcast double* [[B]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i32>, <4 x i32>* [[TMP8]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = mul <4 x i32> [[TMP7]], [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = bitcast double* [[D:%.*]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP10]], <4 x i32>* [[TMP11]], align 8
+; CHECK-NEXT:    ret void
+;
 entry:
   %i0 = load double, double* %a, align 8
   %i1 = load double, double* %b, align 8
diff --git a/test/Transforms/SLPVectorizer/X86/crash_7zip.ll b/test/Transforms/SLPVectorizer/X86/crash_7zip.ll
index 54e5d5a..e7bff49 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_7zip.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_7zip.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
@@ -7,6 +8,32 @@
 %struct._CLzmaProps.0.27.54.81.102.123.144.165.180.195.228.258.333 = type { i32, i32, i32, i32 }
 
 define fastcc void @LzmaDec_DecodeReal2(%struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p) {
+; CHECK-LABEL: @LzmaDec_DecodeReal2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[RANGE20_I:%.*]] = getelementptr inbounds [[STRUCT_CLZMADEC_1_28_55_82_103_124_145_166_181_196_229_259_334:%.*]], %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* [[P:%.*]], i64 0, i32 4
+; CHECK-NEXT:    [[CODE21_I:%.*]] = getelementptr inbounds [[STRUCT_CLZMADEC_1_28_55_82_103_124_145_166_181_196_229_259_334]], %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* [[P]], i64 0, i32 5
+; CHECK-NEXT:    br label [[DO_BODY66_I:%.*]]
+; CHECK:       do.body66.i:
+; CHECK-NEXT:    [[RANGE_2_I:%.*]] = phi i32 [ [[RANGE_4_I:%.*]], [[DO_COND_I:%.*]] ], [ undef, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[CODE_2_I:%.*]] = phi i32 [ [[CODE_4_I:%.*]], [[DO_COND_I]] ], [ undef, [[ENTRY]] ]
+; CHECK-NEXT:    [[DOTRANGE_2_I:%.*]] = select i1 undef, i32 undef, i32 [[RANGE_2_I]]
+; CHECK-NEXT:    [[DOTCODE_2_I:%.*]] = select i1 undef, i32 undef, i32 [[CODE_2_I]]
+; CHECK-NEXT:    br i1 undef, label [[DO_COND_I]], label [[IF_ELSE_I:%.*]]
+; CHECK:       if.else.i:
+; CHECK-NEXT:    [[SUB91_I:%.*]] = sub i32 [[DOTRANGE_2_I]], undef
+; CHECK-NEXT:    [[SUB92_I:%.*]] = sub i32 [[DOTCODE_2_I]], undef
+; CHECK-NEXT:    br label [[DO_COND_I]]
+; CHECK:       do.cond.i:
+; CHECK-NEXT:    [[RANGE_4_I]] = phi i32 [ [[SUB91_I]], [[IF_ELSE_I]] ], [ undef, [[DO_BODY66_I]] ]
+; CHECK-NEXT:    [[CODE_4_I]] = phi i32 [ [[SUB92_I]], [[IF_ELSE_I]] ], [ [[DOTCODE_2_I]], [[DO_BODY66_I]] ]
+; CHECK-NEXT:    br i1 undef, label [[DO_BODY66_I]], label [[DO_END1006_I:%.*]]
+; CHECK:       do.end1006.i:
+; CHECK-NEXT:    [[DOTRANGE_4_I:%.*]] = select i1 undef, i32 undef, i32 [[RANGE_4_I]]
+; CHECK-NEXT:    [[DOTCODE_4_I:%.*]] = select i1 undef, i32 undef, i32 [[CODE_4_I]]
+; CHECK-NEXT:    store i32 [[DOTRANGE_4_I]], i32* [[RANGE20_I]], align 4
+; CHECK-NEXT:    store i32 [[DOTCODE_4_I]], i32* [[CODE21_I]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %range20.i = getelementptr inbounds %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334, %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p, i64 0, i32 4
   %code21.i = getelementptr inbounds %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334, %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p, i64 0, i32 5
diff --git a/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll b/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll
index 9046c35..eec5373 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-darwin13.3.0"
@@ -6,6 +7,28 @@
 @a = common global double 0.000000e+00, align 8
 
 define i32 @fn1() {
+; CHECK-LABEL: @fn1(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[INIT:%.*]] = load double, double* @a, align 8
+; CHECK-NEXT:    br label [[LOOP:%.*]]
+; CHECK:       loop:
+; CHECK-NEXT:    [[PHI:%.*]] = phi double [ [[ADD2:%.*]], [[LOOP]] ], [ [[INIT]], [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[POSTADD1_PHI:%.*]] = phi double [ [[POSTADD1:%.*]], [[LOOP]] ], [ [[INIT]], [[ENTRY]] ]
+; CHECK-NEXT:    [[POSTADD2_PHI:%.*]] = phi double [ [[POSTADD2:%.*]], [[LOOP]] ], [ [[INIT]], [[ENTRY]] ]
+; CHECK-NEXT:    [[ADD1:%.*]] = fadd double [[POSTADD1_PHI]], undef
+; CHECK-NEXT:    [[ADD2]] = fadd double [[POSTADD2_PHI]], [[PHI]]
+; CHECK-NEXT:    [[MUL2:%.*]] = fmul double [[ADD2]], 0.000000e+00
+; CHECK-NEXT:    [[BINARYOP_B:%.*]] = fadd double [[POSTADD1_PHI]], [[MUL2]]
+; CHECK-NEXT:    [[MUL1:%.*]] = fmul double [[ADD1]], 0.000000e+00
+; CHECK-NEXT:    [[TMP:%.*]] = fadd double [[POSTADD2_PHI]], 0.000000e+00
+; CHECK-NEXT:    [[BINARY_V:%.*]] = fadd double [[MUL1]], [[BINARYOP_B]]
+; CHECK-NEXT:    [[POSTADD1]] = fadd double [[BINARY_V]], 0.000000e+00
+; CHECK-NEXT:    [[POSTADD2]] = fadd double [[TMP]], 1.000000e+00
+; CHECK-NEXT:    [[TOBOOL:%.*]] = fcmp une double [[POSTADD1]], 0.000000e+00
+; CHECK-NEXT:    br i1 [[TOBOOL]], label [[EXIT:%.*]], label [[LOOP]]
+; CHECK:       exit:
+; CHECK-NEXT:    ret i32 1
+;
 entry:
   %init = load double, double* @a, align 8
   br label %loop
diff --git a/test/Transforms/SLPVectorizer/X86/crash_bullet.ll b/test/Transforms/SLPVectorizer/X86/crash_bullet.ll
index 1bad671..a1a3f50 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_bullet.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_bullet.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
@@ -6,6 +7,32 @@
 %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960" = type { i32, i32 }
 
 define void @_ZN23btGeneric6DofConstraint8getInfo1EPN17btTypedConstraint17btConstraintInfo1E(%"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* nocapture %info) {
+; CHECK-LABEL: @_ZN23btGeneric6DofConstraint8getInfo1EPN17btTypedConstraint17btConstraintInfo1E(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 undef, label [[IF_ELSE:%.*]], label [[IF_THEN:%.*]]
+; CHECK:       if.then:
+; CHECK-NEXT:    ret void
+; CHECK:       if.else:
+; CHECK-NEXT:    [[M_NUMCONSTRAINTROWS4:%.*]] = getelementptr inbounds %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960", %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* [[INFO:%.*]], i64 0, i32 0
+; CHECK-NEXT:    [[NUB5:%.*]] = getelementptr inbounds %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960", %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* [[INFO]], i64 0, i32 1
+; CHECK-NEXT:    br i1 undef, label [[LAND_LHS_TRUE_I_1:%.*]], label [[IF_THEN7_1:%.*]]
+; CHECK:       land.lhs.true.i.1:
+; CHECK-NEXT:    br i1 undef, label [[FOR_INC_1:%.*]], label [[IF_THEN7_1]]
+; CHECK:       if.then7.1:
+; CHECK-NEXT:    [[INC_1:%.*]] = add nsw i32 0, 1
+; CHECK-NEXT:    store i32 [[INC_1]], i32* [[M_NUMCONSTRAINTROWS4]], align 4
+; CHECK-NEXT:    [[DEC_1:%.*]] = add nsw i32 6, -1
+; CHECK-NEXT:    store i32 [[DEC_1]], i32* [[NUB5]], align 4
+; CHECK-NEXT:    br label [[FOR_INC_1]]
+; CHECK:       for.inc.1:
+; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ [[DEC_1]], [[IF_THEN7_1]] ], [ 6, [[LAND_LHS_TRUE_I_1]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = phi i32 [ [[INC_1]], [[IF_THEN7_1]] ], [ 0, [[LAND_LHS_TRUE_I_1]] ]
+; CHECK-NEXT:    [[INC_2:%.*]] = add nsw i32 [[TMP1]], 1
+; CHECK-NEXT:    store i32 [[INC_2]], i32* [[M_NUMCONSTRAINTROWS4]], align 4
+; CHECK-NEXT:    [[DEC_2:%.*]] = add nsw i32 [[TMP0]], -1
+; CHECK-NEXT:    store i32 [[DEC_2]], i32* [[NUB5]], align 4
+; CHECK-NEXT:    unreachable
+;
 entry:
   br i1 undef, label %if.else, label %if.then
 
@@ -42,6 +69,30 @@
 %class.btVector4.7.32.67.92.117.142.177.187.262.282.331 = type { %class.btVector3.5.30.65.90.115.140.175.185.260.280.330 }
 
 define void @_ZN30GIM_TRIANGLE_CALCULATION_CACHE18triangle_collisionERK9btVector3S2_S2_fS2_S2_S2_fR25GIM_TRIANGLE_CONTACT_DATA(%class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this) {
+; CHECK-LABEL: @_ZN30GIM_TRIANGLE_CALCULATION_CACHE18triangle_collisionERK9btVector3S2_S2_fS2_S2_S2_fR25GIM_TRIANGLE_CONTACT_DATA(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds [[CLASS_GIM_TRIANGLE_CALCULATION_CACHE_9_34_69_94_119_144_179_189_264_284_332:%.*]], %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* [[THIS:%.*]], i64 0, i32 2, i64 0, i32 0, i64 1
+; CHECK-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds [[CLASS_GIM_TRIANGLE_CALCULATION_CACHE_9_34_69_94_119_144_179_189_264_284_332]], %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* [[THIS]], i64 0, i32 2, i64 0, i32 0, i64 2
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[ARRAYIDX36]], align 4
+; CHECK-NEXT:    [[ADD587:%.*]] = fadd float undef, undef
+; CHECK-NEXT:    [[SUB600:%.*]] = fsub float [[ADD587]], undef
+; CHECK-NEXT:    store float [[SUB600]], float* undef, align 4
+; CHECK-NEXT:    [[SUB613:%.*]] = fsub float [[ADD587]], [[SUB600]]
+; CHECK-NEXT:    store float [[SUB613]], float* [[ARRAYIDX26]], align 4
+; CHECK-NEXT:    [[ADD626:%.*]] = fadd float [[TMP0]], undef
+; CHECK-NEXT:    [[SUB639:%.*]] = fsub float [[ADD626]], undef
+; CHECK-NEXT:    [[SUB652:%.*]] = fsub float [[ADD626]], [[SUB639]]
+; CHECK-NEXT:    store float [[SUB652]], float* [[ARRAYIDX36]], align 4
+; CHECK-NEXT:    br i1 undef, label [[IF_ELSE1609:%.*]], label [[IF_THEN1595:%.*]]
+; CHECK:       if.then1595:
+; CHECK-NEXT:    br i1 undef, label [[RETURN:%.*]], label [[FOR_BODY_LR_PH_I_I1702:%.*]]
+; CHECK:       for.body.lr.ph.i.i1702:
+; CHECK-NEXT:    unreachable
+; CHECK:       if.else1609:
+; CHECK-NEXT:    unreachable
+; CHECK:       return:
+; CHECK-NEXT:    ret void
+;
 entry:
   %arrayidx26 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332, %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 1
   %arrayidx36 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332, %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 2
@@ -71,6 +122,40 @@
 }
 
 define void @_Z8dBoxBox2RK9btVector3PKfS1_S1_S3_S1_RS_PfPiiP12dContactGeomiRN36btDiscreteCollisionDetectorInterface6ResultE() {
+; CHECK-LABEL: @_Z8dBoxBox2RK9btVector3PKfS1_S1_S3_S1_RS_PfPiiP12dContactGeomiRN36btDiscreteCollisionDetectorInterface6ResultE(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 undef, label [[RETURN:%.*]], label [[IF_END:%.*]]
+; CHECK:       if.end:
+; CHECK-NEXT:    br i1 undef, label [[RETURN]], label [[IF_END111:%.*]]
+; CHECK:       if.end111:
+; CHECK-NEXT:    br i1 undef, label [[RETURN]], label [[IF_END136:%.*]]
+; CHECK:       if.end136:
+; CHECK-NEXT:    br i1 undef, label [[RETURN]], label [[IF_END162:%.*]]
+; CHECK:       if.end162:
+; CHECK-NEXT:    br i1 undef, label [[RETURN]], label [[IF_END189:%.*]]
+; CHECK:       if.end189:
+; CHECK-NEXT:    br i1 undef, label [[RETURN]], label [[IF_END216:%.*]]
+; CHECK:       if.end216:
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN218:%.*]], label [[IF_END225:%.*]]
+; CHECK:       if.then218:
+; CHECK-NEXT:    br label [[IF_END225]]
+; CHECK:       if.end225:
+; CHECK-NEXT:    br i1 undef, label [[RETURN]], label [[IF_END248:%.*]]
+; CHECK:       if.end248:
+; CHECK-NEXT:    br i1 undef, label [[RETURN]], label [[IF_END304:%.*]]
+; CHECK:       if.end304:
+; CHECK-NEXT:    br i1 undef, label [[RETURN]], label [[IF_END361:%.*]]
+; CHECK:       if.end361:
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN370:%.*]], label [[IF_END395:%.*]]
+; CHECK:       if.then370:
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN374:%.*]], label [[IF_END395]]
+; CHECK:       if.then374:
+; CHECK-NEXT:    br label [[IF_END395]]
+; CHECK:       if.end395:
+; CHECK-NEXT:    unreachable
+; CHECK:       return:
+; CHECK-NEXT:    ret void
+;
 entry:
   %add8.i2343 = fadd float undef, undef
   %add8.i2381 = fadd float undef, undef
diff --git a/test/Transforms/SLPVectorizer/X86/crash_bullet3.ll b/test/Transforms/SLPVectorizer/X86/crash_bullet3.ll
index 8102769..7ec4fc1 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_bullet3.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_bullet3.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
@@ -7,6 +8,62 @@
 
 ; Function Attrs: ssp uwtable
 define void @_ZN11HullLibrary15CleanupVerticesEjPK9btVector3jRjPS0_fRS0_(%class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* %vertices) #0 align 2 {
+; CHECK-LABEL: @_ZN11HullLibrary15CleanupVerticesEjPK9btVector3jRjPS0_fRS0_(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 undef, label [[RETURN:%.*]], label [[IF_END:%.*]]
+; CHECK:       if.end:
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN17_1:%.*]], label [[IF_END22_1:%.*]]
+; CHECK:       for.end36:
+; CHECK-NEXT:    br label [[FOR_BODY144:%.*]]
+; CHECK:       for.body144:
+; CHECK-NEXT:    br i1 undef, label [[FOR_END227:%.*]], label [[FOR_BODY144]]
+; CHECK:       for.end227:
+; CHECK-NEXT:    br i1 undef, label [[FOR_END271:%.*]], label [[FOR_BODY233:%.*]]
+; CHECK:       for.body233:
+; CHECK-NEXT:    br i1 undef, label [[FOR_BODY233]], label [[FOR_END271]]
+; CHECK:       for.end271:
+; CHECK-NEXT:    [[TMP0:%.*]] = phi float [ 0x47EFFFFFE0000000, [[FOR_END227]] ], [ undef, [[FOR_BODY233]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = phi float [ 0x47EFFFFFE0000000, [[FOR_END227]] ], [ undef, [[FOR_BODY233]] ]
+; CHECK-NEXT:    [[SUB275:%.*]] = fsub float undef, [[TMP1]]
+; CHECK-NEXT:    [[SUB279:%.*]] = fsub float undef, [[TMP0]]
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN291:%.*]], label [[RETURN]]
+; CHECK:       if.then291:
+; CHECK-NEXT:    [[MUL292:%.*]] = fmul float [[SUB275]], 5.000000e-01
+; CHECK-NEXT:    [[ADD294:%.*]] = fadd float [[TMP1]], [[MUL292]]
+; CHECK-NEXT:    [[MUL295:%.*]] = fmul float [[SUB279]], 5.000000e-01
+; CHECK-NEXT:    [[ADD297:%.*]] = fadd float [[TMP0]], [[MUL295]]
+; CHECK-NEXT:    br i1 undef, label [[IF_END332:%.*]], label [[IF_ELSE319:%.*]]
+; CHECK:       if.else319:
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN325:%.*]], label [[IF_END327:%.*]]
+; CHECK:       if.then325:
+; CHECK-NEXT:    br label [[IF_END327]]
+; CHECK:       if.end327:
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN329:%.*]], label [[IF_END332]]
+; CHECK:       if.then329:
+; CHECK-NEXT:    br label [[IF_END332]]
+; CHECK:       if.end332:
+; CHECK-NEXT:    [[DX272_1:%.*]] = phi float [ [[SUB275]], [[IF_THEN329]] ], [ [[SUB275]], [[IF_END327]] ], [ 0x3F847AE140000000, [[IF_THEN291]] ]
+; CHECK-NEXT:    [[DY276_1:%.*]] = phi float [ undef, [[IF_THEN329]] ], [ undef, [[IF_END327]] ], [ 0x3F847AE140000000, [[IF_THEN291]] ]
+; CHECK-NEXT:    [[SUB334:%.*]] = fsub float [[ADD294]], [[DX272_1]]
+; CHECK-NEXT:    [[SUB338:%.*]] = fsub float [[ADD297]], [[DY276_1]]
+; CHECK-NEXT:    [[ARRAYIDX_I_I606:%.*]] = getelementptr inbounds [[CLASS_BTVECTOR3_23_221_463_485_507_573_595_683_727_749_815_837_991_1585_1607_1629_1651_1849_2047_2069_2091_2113:%.*]], %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* [[VERTICES:%.*]], i64 0, i32 0, i64 0
+; CHECK-NEXT:    store float [[SUB334]], float* [[ARRAYIDX_I_I606]], align 4
+; CHECK-NEXT:    [[ARRAYIDX3_I607:%.*]] = getelementptr inbounds [[CLASS_BTVECTOR3_23_221_463_485_507_573_595_683_727_749_815_837_991_1585_1607_1629_1651_1849_2047_2069_2091_2113]], %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* [[VERTICES]], i64 0, i32 0, i64 1
+; CHECK-NEXT:    store float [[SUB338]], float* [[ARRAYIDX3_I607]], align 4
+; CHECK-NEXT:    br label [[RETURN]]
+; CHECK:       return:
+; CHECK-NEXT:    ret void
+; CHECK:       if.then17.1:
+; CHECK-NEXT:    br label [[IF_END22_1]]
+; CHECK:       if.end22.1:
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN17_2:%.*]], label [[IF_END22_2:%.*]]
+; CHECK:       if.then17.2:
+; CHECK-NEXT:    br label [[IF_END22_2]]
+; CHECK:       if.end22.2:
+; CHECK-NEXT:    br i1 undef, label [[FOR_END36:%.*]], label [[FOR_BODY]]
+;
 entry:
   br i1 undef, label %return, label %if.end
 
diff --git a/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll b/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll
index f10c862..5cee363 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll
@@ -1,9 +1,50 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -S
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -S | FileCheck %s
 
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.10.0"
 
 define void @testfunc(float* nocapture %dest, float* nocapture readonly %src) {
+; CHECK-LABEL: @testfunc(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[ACC1_056:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD13:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = phi <2 x float> [ zeroinitializer, [[ENTRY]] ], [ [[TMP23:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[DEST:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    store float [[ACC1_056]], float* [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x float> [[TMP0]], i32 1
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x float> undef, float [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x float> [[TMP0]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x float> [[TMP3]], float [[TMP4]], i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x float> undef, float [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <2 x float> [[TMP6]], float [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP8:%.*]] = fadd <2 x float> [[TMP5]], [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = fmul <2 x float> zeroinitializer, [[TMP0]]
+; CHECK-NEXT:    [[TMP10:%.*]] = fadd <2 x float> [[TMP9]], [[TMP8]]
+; CHECK-NEXT:    [[TMP11:%.*]] = fcmp olt <2 x float> [[TMP10]], <float 1.000000e+00, float 1.000000e+00>
+; CHECK-NEXT:    [[TMP12:%.*]] = select <2 x i1> [[TMP11]], <2 x float> [[TMP10]], <2 x float> <float 1.000000e+00, float 1.000000e+00>
+; CHECK-NEXT:    [[TMP13:%.*]] = fcmp olt <2 x float> [[TMP12]], <float -1.000000e+00, float -1.000000e+00>
+; CHECK-NEXT:    [[TMP14:%.*]] = fmul <2 x float> zeroinitializer, [[TMP12]]
+; CHECK-NEXT:    [[TMP15:%.*]] = select <2 x i1> [[TMP13]], <2 x float> <float -0.000000e+00, float -0.000000e+00>, <2 x float> [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = extractelement <2 x float> [[TMP15]], i32 0
+; CHECK-NEXT:    [[TMP17:%.*]] = extractelement <2 x float> [[TMP15]], i32 1
+; CHECK-NEXT:    [[ADD13]] = fadd float [[TMP16]], [[TMP17]]
+; CHECK-NEXT:    [[TMP18:%.*]] = insertelement <2 x float> undef, float [[TMP17]], i32 0
+; CHECK-NEXT:    [[TMP19:%.*]] = insertelement <2 x float> [[TMP18]], float [[ADD13]], i32 1
+; CHECK-NEXT:    [[TMP20:%.*]] = fcmp olt <2 x float> [[TMP19]], <float 1.000000e+00, float 1.000000e+00>
+; CHECK-NEXT:    [[TMP21:%.*]] = select <2 x i1> [[TMP20]], <2 x float> [[TMP19]], <2 x float> <float 1.000000e+00, float 1.000000e+00>
+; CHECK-NEXT:    [[TMP22:%.*]] = fcmp olt <2 x float> [[TMP21]], <float -1.000000e+00, float -1.000000e+00>
+; CHECK-NEXT:    [[TMP23]] = select <2 x i1> [[TMP22]], <2 x float> <float -1.000000e+00, float -1.000000e+00>, <2 x float> [[TMP21]]
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 32
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CHECK:       for.end:
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %for.body
 
diff --git a/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll b/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll
index 28b7aa3..b5f736a 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
@@ -6,6 +7,33 @@
 
 ; Function Attrs: nounwind ssp uwtable
 define void @_ZSt6uniqueISt15_Deque_iteratorIdRdPdEET_S4_S4_(%"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* nocapture %__last) {
+; CHECK-LABEL: @_ZSt6uniqueISt15_Deque_iteratorIdRdPdEET_S4_S4_(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[_M_CUR2_I_I:%.*]] = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* [[__FIRST:%.*]], i64 0, i32 0
+; CHECK-NEXT:    [[TMP0:%.*]] = load double*, double** [[_M_CUR2_I_I]], align 8
+; CHECK-NEXT:    [[_M_FIRST3_I_I:%.*]] = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* [[__FIRST]], i64 0, i32 1
+; CHECK-NEXT:    [[_M_CUR2_I_I81:%.*]] = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* [[__LAST:%.*]], i64 0, i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = load double*, double** [[_M_CUR2_I_I81]], align 8
+; CHECK-NEXT:    [[_M_FIRST3_I_I83:%.*]] = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* [[__LAST]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = load double*, double** [[_M_FIRST3_I_I83]], align 8
+; CHECK-NEXT:    br i1 undef, label [[_ZST13ADJACENT_FINDIST15_DEQUE_ITERATORIDRDPDEET_S4_S4__EXIT:%.*]], label [[WHILE_COND_I_PREHEADER:%.*]]
+; CHECK:       while.cond.i.preheader:
+; CHECK-NEXT:    br label [[WHILE_COND_I:%.*]]
+; CHECK:       while.cond.i:
+; CHECK-NEXT:    br i1 undef, label [[_ZST13ADJACENT_FINDIST15_DEQUE_ITERATORIDRDPDEET_S4_S4__EXIT]], label [[WHILE_BODY_I:%.*]]
+; CHECK:       while.body.i:
+; CHECK-NEXT:    br i1 undef, label [[_ZST13ADJACENT_FINDIST15_DEQUE_ITERATORIDRDPDEET_S4_S4__EXIT]], label [[WHILE_COND_I]]
+; CHECK:       _ZSt13adjacent_findISt15_Deque_iteratorIdRdPdEET_S4_S4_.exit:
+; CHECK-NEXT:    [[TMP3:%.*]] = phi double* [ [[TMP2]], [[ENTRY:%.*]] ], [ [[TMP2]], [[WHILE_COND_I]] ], [ undef, [[WHILE_BODY_I]] ]
+; CHECK-NEXT:    [[TMP4:%.*]] = phi double* [ [[TMP0]], [[ENTRY]] ], [ [[TMP1]], [[WHILE_COND_I]] ], [ undef, [[WHILE_BODY_I]] ]
+; CHECK-NEXT:    store double* [[TMP4]], double** [[_M_CUR2_I_I]], align 8
+; CHECK-NEXT:    store double* [[TMP3]], double** [[_M_FIRST3_I_I]], align 8
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN_I55:%.*]], label [[WHILE_COND:%.*]]
+; CHECK:       if.then.i55:
+; CHECK-NEXT:    br label [[WHILE_COND]]
+; CHECK:       while.cond:
+; CHECK-NEXT:    br label [[WHILE_COND]]
+;
 entry:
   %_M_cur2.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 0
   %0 = load double*, double** %_M_cur2.i.i, align 8
diff --git a/test/Transforms/SLPVectorizer/X86/crash_flop7.ll b/test/Transforms/SLPVectorizer/X86/crash_flop7.ll
index e11be48..d149c27 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_flop7.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_flop7.ll
@@ -1,10 +1,41 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
 ; Function Attrs: nounwind ssp uwtable
 define void @main() #0 {
+; CHECK-LABEL: @main(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 undef, label [[WHILE_BODY:%.*]], label [[WHILE_END:%.*]]
+; CHECK:       while.body:
+; CHECK-NEXT:    unreachable
+; CHECK:       while.end:
+; CHECK-NEXT:    br i1 undef, label [[FOR_END80:%.*]], label [[FOR_BODY75_LR_PH:%.*]]
+; CHECK:       for.body75.lr.ph:
+; CHECK-NEXT:    br label [[FOR_BODY75:%.*]]
+; CHECK:       for.body75:
+; CHECK-NEXT:    br label [[FOR_BODY75]]
+; CHECK:       for.end80:
+; CHECK-NEXT:    br i1 undef, label [[FOR_END300:%.*]], label [[FOR_BODY267_LR_PH:%.*]]
+; CHECK:       for.body267.lr.ph:
+; CHECK-NEXT:    br label [[FOR_BODY267:%.*]]
+; CHECK:       for.body267:
+; CHECK-NEXT:    [[S_71010:%.*]] = phi double [ 0.000000e+00, [[FOR_BODY267_LR_PH]] ], [ [[ADD297:%.*]], [[FOR_BODY267]] ]
+; CHECK-NEXT:    [[MUL269:%.*]] = fmul double undef, undef
+; CHECK-NEXT:    [[MUL270:%.*]] = fmul double [[MUL269]], [[MUL269]]
+; CHECK-NEXT:    [[ADD282:%.*]] = fadd double undef, undef
+; CHECK-NEXT:    [[MUL283:%.*]] = fmul double [[MUL269]], [[ADD282]]
+; CHECK-NEXT:    [[ADD293:%.*]] = fadd double undef, undef
+; CHECK-NEXT:    [[MUL294:%.*]] = fmul double [[MUL270]], [[ADD293]]
+; CHECK-NEXT:    [[ADD295:%.*]] = fadd double undef, [[MUL294]]
+; CHECK-NEXT:    [[DIV296:%.*]] = fdiv double [[MUL283]], [[ADD295]]
+; CHECK-NEXT:    [[ADD297]] = fadd double [[S_71010]], [[DIV296]]
+; CHECK-NEXT:    br i1 undef, label [[FOR_BODY267]], label [[FOR_END300]]
+; CHECK:       for.end300:
+; CHECK-NEXT:    unreachable
+;
 entry:
   br i1 undef, label %while.body, label %while.end
 
diff --git a/test/Transforms/SLPVectorizer/X86/crash_gep.ll b/test/Transforms/SLPVectorizer/X86/crash_gep.ll
index bd1e8f7..eca21e7 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_gep.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_gep.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-unknown-linux-gnu
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
@@ -7,6 +8,17 @@
 
 ; Function Attrs: nounwind uwtable
 define i32 @fn1() {
+; CHECK-LABEL: @fn1(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64*, i64** @a, align 8
+; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 1
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint i64* [[ADD_PTR]] to i64
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 2
+; CHECK-NEXT:    store i64 [[TMP1]], i64* [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint i64* [[ARRAYIDX]] to i64
+; CHECK-NEXT:    store i64 [[TMP2]], i64* [[ADD_PTR]], align 8
+; CHECK-NEXT:    ret i32 undef
+;
 entry:
   %0 = load i64*, i64** @a, align 8
   %add.ptr = getelementptr inbounds i64, i64* %0, i64 1
diff --git a/test/Transforms/SLPVectorizer/X86/crash_lencod.ll b/test/Transforms/SLPVectorizer/X86/crash_lencod.ll
index 70b13fd..eb1cb32 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_lencod.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_lencod.ll
@@ -1,10 +1,45 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
 ; Function Attrs: nounwind ssp uwtable
 define void @RCModelEstimator() {
+; CHECK-LABEL: @RCModelEstimator(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 undef, label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END_THREAD:%.*]]
+; CHECK:       for.end.thread:
+; CHECK-NEXT:    unreachable
+; CHECK:       for.body.lr.ph:
+; CHECK-NEXT:    br i1 undef, label [[FOR_END:%.*]], label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    br i1 undef, label [[FOR_END]], label [[FOR_BODY]]
+; CHECK:       for.end:
+; CHECK-NEXT:    br i1 undef, label [[FOR_BODY3:%.*]], label [[IF_END103:%.*]]
+; CHECK:       for.cond14.preheader:
+; CHECK-NEXT:    br i1 undef, label [[FOR_BODY16_LR_PH:%.*]], label [[IF_END103]]
+; CHECK:       for.body16.lr.ph:
+; CHECK-NEXT:    br label [[FOR_BODY16:%.*]]
+; CHECK:       for.body3:
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN7:%.*]], label [[FOR_INC11:%.*]]
+; CHECK:       if.then7:
+; CHECK-NEXT:    br label [[FOR_INC11]]
+; CHECK:       for.inc11:
+; CHECK-NEXT:    br i1 false, label [[FOR_COND14_PREHEADER:%.*]], label [[FOR_BODY3]]
+; CHECK:       for.body16:
+; CHECK-NEXT:    br i1 undef, label [[FOR_END39:%.*]], label [[FOR_BODY16]]
+; CHECK:       for.end39:
+; CHECK-NEXT:    br i1 undef, label [[IF_END103]], label [[FOR_COND45_PREHEADER:%.*]]
+; CHECK:       for.cond45.preheader:
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN88:%.*]], label [[IF_ELSE:%.*]]
+; CHECK:       if.then88:
+; CHECK-NEXT:    br label [[IF_END103]]
+; CHECK:       if.else:
+; CHECK-NEXT:    br label [[IF_END103]]
+; CHECK:       if.end103:
+; CHECK-NEXT:    ret void
+;
 entry:
   br i1 undef, label %for.body.lr.ph, label %for.end.thread
 
@@ -66,6 +101,17 @@
 
 
 define void @intrapred_luma() {
+; CHECK-LABEL: @intrapred_luma(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CONV153:%.*]] = trunc i32 undef to i16
+; CHECK-NEXT:    [[ARRAYIDX154:%.*]] = getelementptr inbounds [13 x i16], [13 x i16]* undef, i64 0, i64 12
+; CHECK-NEXT:    store i16 [[CONV153]], i16* [[ARRAYIDX154]], align 8
+; CHECK-NEXT:    [[ARRAYIDX155:%.*]] = getelementptr inbounds [13 x i16], [13 x i16]* undef, i64 0, i64 11
+; CHECK-NEXT:    store i16 [[CONV153]], i16* [[ARRAYIDX155]], align 2
+; CHECK-NEXT:    [[ARRAYIDX156:%.*]] = getelementptr inbounds [13 x i16], [13 x i16]* undef, i64 0, i64 10
+; CHECK-NEXT:    store i16 [[CONV153]], i16* [[ARRAYIDX156]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %conv153 = trunc i32 undef to i16
   %arrayidx154 = getelementptr inbounds [13 x i16], [13 x i16]* undef, i64 0, i64 12
@@ -78,6 +124,18 @@
 }
 
 define fastcc void @dct36(double* %inbuf) {
+; CHECK-LABEL: @dct36(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAYIDX41:%.*]] = getelementptr inbounds double, double* [[INBUF:%.*]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX44:%.*]] = getelementptr inbounds double, double* [[INBUF]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[ARRAYIDX44]], align 8
+; CHECK-NEXT:    [[ADD46:%.*]] = fadd double [[TMP0]], undef
+; CHECK-NEXT:    store double [[ADD46]], double* [[ARRAYIDX41]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load double, double* [[INBUF]], align 8
+; CHECK-NEXT:    [[ADD49:%.*]] = fadd double [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    store double [[ADD49]], double* [[ARRAYIDX44]], align 8
+; CHECK-NEXT:    ret void
+;
 entry:
   %arrayidx41 = getelementptr inbounds double, double* %inbuf, i64 2
   %arrayidx44 = getelementptr inbounds double, double* %inbuf, i64 1
diff --git a/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll b/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll
index f82343f..f12de2a 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll
@@ -1,9 +1,46 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
 define void @main() {
+; CHECK-LABEL: @main(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    br label [[FOR_COND4_PREHEADER:%.*]]
+; CHECK:       for.cond4.preheader:
+; CHECK-NEXT:    br label [[FOR_BODY6:%.*]]
+; CHECK:       for.body6:
+; CHECK-NEXT:    br label [[FOR_BODY12:%.*]]
+; CHECK:       for.body12:
+; CHECK-NEXT:    [[FZIMG_069:%.*]] = phi double [ undef, [[FOR_BODY6]] ], [ [[ADD19:%.*]], [[IF_END:%.*]] ]
+; CHECK-NEXT:    [[FZREAL_068:%.*]] = phi double [ undef, [[FOR_BODY6]] ], [ [[ADD20:%.*]], [[IF_END]] ]
+; CHECK-NEXT:    [[MUL13:%.*]] = fmul double [[FZREAL_068]], [[FZREAL_068]]
+; CHECK-NEXT:    [[MUL14:%.*]] = fmul double [[FZIMG_069]], [[FZIMG_069]]
+; CHECK-NEXT:    [[ADD15:%.*]] = fadd double [[MUL13]], [[MUL14]]
+; CHECK-NEXT:    [[CMP16:%.*]] = fcmp ogt double [[ADD15]], 4.000000e+00
+; CHECK-NEXT:    br i1 [[CMP16]], label [[FOR_INC21:%.*]], label [[IF_END]]
+; CHECK:       if.end:
+; CHECK-NEXT:    [[MUL18:%.*]] = fmul double undef, [[FZIMG_069]]
+; CHECK-NEXT:    [[ADD19]] = fadd double undef, [[MUL18]]
+; CHECK-NEXT:    [[SUB:%.*]] = fsub double [[MUL13]], [[MUL14]]
+; CHECK-NEXT:    [[ADD20]] = fadd double undef, [[SUB]]
+; CHECK-NEXT:    br i1 undef, label [[FOR_BODY12]], label [[FOR_INC21]]
+; CHECK:       for.inc21:
+; CHECK-NEXT:    br i1 undef, label [[FOR_END23:%.*]], label [[FOR_BODY6]]
+; CHECK:       for.end23:
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN25:%.*]], label [[IF_THEN26:%.*]]
+; CHECK:       if.then25:
+; CHECK-NEXT:    br i1 undef, label [[FOR_END44:%.*]], label [[FOR_COND4_PREHEADER]]
+; CHECK:       if.then26:
+; CHECK-NEXT:    unreachable
+; CHECK:       for.end44:
+; CHECK-NEXT:    br i1 undef, label [[FOR_END48:%.*]], label [[FOR_BODY]]
+; CHECK:       for.end48:
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %for.body
 
@@ -54,6 +91,26 @@
 %struct.hoge = type { double, double, double}
 
 define void @zot(%struct.hoge* %arg) {
+; CHECK-LABEL: @zot(
+; CHECK-NEXT:  bb:
+; CHECK-NEXT:    [[TMP:%.*]] = load double, double* undef, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load double, double* undef, align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> undef, double [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[TMP]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = fsub <2 x double> [[TMP1]], undef
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_HOGE:%.*]], %struct.hoge* [[ARG:%.*]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul <2 x double> undef, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = fsub <2 x double> [[TMP3]], undef
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[TMP7]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
+; CHECK-NEXT:    br i1 undef, label [[BB11:%.*]], label [[BB12:%.*]]
+; CHECK:       bb11:
+; CHECK-NEXT:    br label [[BB14:%.*]]
+; CHECK:       bb12:
+; CHECK-NEXT:    br label [[BB14]]
+; CHECK:       bb14:
+; CHECK-NEXT:    ret void
+;
 bb:
   %tmp = load double, double* undef, align 8
   %tmp1 = fsub double %tmp, undef
@@ -85,6 +142,22 @@
 %struct.rc4_state.0.24 = type { i32, i32, [256 x i32] }
 
 define void @rc4_crypt(%struct.rc4_state.0.24* nocapture %s) {
+; CHECK-LABEL: @rc4_crypt(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[X1:%.*]] = getelementptr inbounds [[STRUCT_RC4_STATE_0_24:%.*]], %struct.rc4_state.0.24* [[S:%.*]], i64 0, i32 0
+; CHECK-NEXT:    [[Y2:%.*]] = getelementptr inbounds [[STRUCT_RC4_STATE_0_24]], %struct.rc4_state.0.24* [[S]], i64 0, i32 1
+; CHECK-NEXT:    br i1 undef, label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[CONV4:%.*]] = and i32 undef, 255
+; CHECK-NEXT:    [[CONV7:%.*]] = and i32 undef, 255
+; CHECK-NEXT:    br i1 undef, label [[FOR_END]], label [[FOR_BODY]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[X_0_LCSSA:%.*]] = phi i32 [ undef, [[ENTRY:%.*]] ], [ [[CONV4]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[Y_0_LCSSA:%.*]] = phi i32 [ undef, [[ENTRY]] ], [ [[CONV7]], [[FOR_BODY]] ]
+; CHECK-NEXT:    store i32 [[X_0_LCSSA]], i32* [[X1]], align 4
+; CHECK-NEXT:    store i32 [[Y_0_LCSSA]], i32* [[Y2]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %x1 = getelementptr inbounds %struct.rc4_state.0.24, %struct.rc4_state.0.24* %s, i64 0, i32 0
   %y2 = getelementptr inbounds %struct.rc4_state.0.24, %struct.rc4_state.0.24* %s, i64 0, i32 1
diff --git a/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll b/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll
index e1df98d..a52ec6b 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
@@ -12,6 +13,30 @@
 @e = common global i32 0, align 4
 
 define i32 @fn1() {
+; CHECK-LABEL: @fn1(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* getelementptr inbounds (%struct.DState, %struct.DState* @b, i32 0, i32 0), align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* getelementptr inbounds (%struct.DState, %struct.DState* @b, i32 0, i32 1), align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* @d, align 4
+; CHECK-NEXT:    [[COND:%.*]] = icmp eq i32 [[TMP2]], 0
+; CHECK-NEXT:    br i1 [[COND]], label [[SW_BB:%.*]], label [[SAVE_STATE_AND_RETURN:%.*]]
+; CHECK:       sw.bb:
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* @c, align 4
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[TMP3]], 7
+; CHECK-NEXT:    store i32 [[AND]], i32* @a, align 4
+; CHECK-NEXT:    switch i32 [[AND]], label [[IF_END:%.*]] [
+; CHECK-NEXT:    i32 7, label [[SAVE_STATE_AND_RETURN]]
+; CHECK-NEXT:    i32 0, label [[SAVE_STATE_AND_RETURN]]
+; CHECK-NEXT:    ]
+; CHECK:       if.end:
+; CHECK-NEXT:    br label [[SAVE_STATE_AND_RETURN]]
+; CHECK:       save_state_and_return:
+; CHECK-NEXT:    [[T_0:%.*]] = phi i32 [ 0, [[IF_END]] ], [ [[TMP0]], [[ENTRY:%.*]] ], [ [[TMP0]], [[SW_BB]] ], [ [[TMP0]], [[SW_BB]] ]
+; CHECK-NEXT:    [[F_0:%.*]] = phi i32 [ 0, [[IF_END]] ], [ [[TMP1]], [[ENTRY]] ], [ 0, [[SW_BB]] ], [ 0, [[SW_BB]] ]
+; CHECK-NEXT:    store i32 [[T_0]], i32* getelementptr inbounds (%struct.DState, %struct.DState* @b, i32 0, i32 0), align 4
+; CHECK-NEXT:    store i32 [[F_0]], i32* getelementptr inbounds (%struct.DState, %struct.DState* @b, i32 0, i32 1), align 4
+; CHECK-NEXT:    ret i32 undef
+;
 entry:
   %0 = load i32, i32* getelementptr inbounds (%struct.DState, %struct.DState* @b, i32 0, i32 0), align 4
   %1 = load i32, i32* getelementptr inbounds (%struct.DState, %struct.DState* @b, i32 0, i32 1), align 4
@@ -24,8 +49,8 @@
   %and = and i32 %3, 7
   store i32 %and, i32* @a, align 4
   switch i32 %and, label %if.end [
-    i32 7, label %save_state_and_return
-    i32 0, label %save_state_and_return
+  i32 7, label %save_state_and_return
+  i32 0, label %save_state_and_return
   ]
 
 if.end:                                           ; preds = %sw.bb
diff --git a/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll b/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll
index 916772c..9108e84 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll
@@ -1,9 +1,41 @@
-; RUN: opt < %s -basicaa -disable-verify -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -disable-verify -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-darwin13.3.0"
 
 define void @_foo(double %p1, double %p2, double %p3) #0 {
+; CHECK-LABEL: @_foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TAB1:%.*]] = alloca [256 x i32], align 16
+; CHECK-NEXT:    [[TAB2:%.*]] = alloca [256 x i32], align 16
+; CHECK-NEXT:    br label [[BB1:%.*]]
+; CHECK:       bb1:
+; CHECK-NEXT:    [[MUL19:%.*]] = fmul double [[P1:%.*]], 1.638400e+04
+; CHECK-NEXT:    [[MUL20:%.*]] = fmul double [[P3:%.*]], 1.638400e+04
+; CHECK-NEXT:    [[ADD:%.*]] = fadd double [[MUL20]], 8.192000e+03
+; CHECK-NEXT:    [[MUL21:%.*]] = fmul double [[P2:%.*]], 1.638400e+04
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV266:%.*]] = phi i64 [ 0, [[BB1]] ], [ [[INDVARS_IV_NEXT267:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[T_0259:%.*]] = phi double [ 0.000000e+00, [[BB1]] ], [ [[ADD27:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[P3_ADDR_0258:%.*]] = phi double [ [[ADD]], [[BB1]] ], [ [[ADD28:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[VECINIT_I_I237:%.*]] = insertelement <2 x double> undef, double [[T_0259]], i32 0
+; CHECK-NEXT:    [[X13:%.*]] = tail call i32 @_xfn(<2 x double> [[VECINIT_I_I237]])
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [256 x i32], [256 x i32]* [[TAB1]], i64 0, i64 [[INDVARS_IV266]]
+; CHECK-NEXT:    store i32 [[X13]], i32* [[ARRAYIDX]], align 4, !tbaa !0
+; CHECK-NEXT:    [[VECINIT_I_I:%.*]] = insertelement <2 x double> undef, double [[P3_ADDR_0258]], i32 0
+; CHECK-NEXT:    [[X14:%.*]] = tail call i32 @_xfn(<2 x double> [[VECINIT_I_I]])
+; CHECK-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds [256 x i32], [256 x i32]* [[TAB2]], i64 0, i64 [[INDVARS_IV266]]
+; CHECK-NEXT:    store i32 [[X14]], i32* [[ARRAYIDX26]], align 4, !tbaa !0
+; CHECK-NEXT:    [[ADD27]] = fadd double [[MUL19]], [[T_0259]]
+; CHECK-NEXT:    [[ADD28]] = fadd double [[MUL21]], [[P3_ADDR_0258]]
+; CHECK-NEXT:    [[INDVARS_IV_NEXT267]] = add nuw nsw i64 [[INDVARS_IV266]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT267]], 256
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[RETURN:%.*]], label [[FOR_BODY]]
+; CHECK:       return:
+; CHECK-NEXT:    ret void
+;
 entry:
   %tab1 = alloca [256 x i32], align 16
   %tab2 = alloca [256 x i32], align 16
diff --git a/test/Transforms/SLPVectorizer/X86/crash_sim4b1.ll b/test/Transforms/SLPVectorizer/X86/crash_sim4b1.ll
index 5a576c2..9a07cfc 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_sim4b1.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_sim4b1.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
@@ -6,6 +7,83 @@
 %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171 = type { i32, i32, i32, i32, i32, i32, [8 x i8] }
 
 define void @SIM4() {
+; CHECK-LABEL: @SIM4(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 undef, label [[RETURN:%.*]], label [[LOR_LHS_FALSE:%.*]]
+; CHECK:       lor.lhs.false:
+; CHECK-NEXT:    br i1 undef, label [[RETURN]], label [[IF_END:%.*]]
+; CHECK:       if.end:
+; CHECK-NEXT:    br i1 undef, label [[FOR_END605:%.*]], label [[FOR_BODY_LR_PH:%.*]]
+; CHECK:       for.body.lr.ph:
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    br i1 undef, label [[FOR_INC603:%.*]], label [[IF_END12:%.*]]
+; CHECK:       if.end12:
+; CHECK-NEXT:    br i1 undef, label [[LAND_LHS_TRUE:%.*]], label [[LAND_LHS_TRUE167:%.*]]
+; CHECK:       land.lhs.true:
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN17:%.*]], label [[LAND_LHS_TRUE167]]
+; CHECK:       if.then17:
+; CHECK-NEXT:    br i1 undef, label [[IF_END98:%.*]], label [[LAND_RHS_LR_PH:%.*]]
+; CHECK:       land.rhs.lr.ph:
+; CHECK-NEXT:    unreachable
+; CHECK:       if.end98:
+; CHECK-NEXT:    [[FROM299:%.*]] = getelementptr inbounds [[STRUCT__EXON_T_12_103_220_363_480_649_740_857_1039_1065_1078_1091_1117_1130_1156_1169_1195_1221_1234_1286_1299_1312_1338_1429_1455_1468_1494_1520_1884_1897_1975_2066_2105_2170_2171:%.*]], %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171* undef, i64 0, i32 1
+; CHECK-NEXT:    br i1 undef, label [[LAND_LHS_TRUE167]], label [[IF_THEN103:%.*]]
+; CHECK:       if.then103:
+; CHECK-NEXT:    [[DOTSUB100:%.*]] = select i1 undef, i32 250, i32 undef
+; CHECK-NEXT:    [[MUL114:%.*]] = shl nsw i32 [[DOTSUB100]], 2
+; CHECK-NEXT:    [[FROM1115:%.*]] = getelementptr inbounds [[STRUCT__EXON_T_12_103_220_363_480_649_740_857_1039_1065_1078_1091_1117_1130_1156_1169_1195_1221_1234_1286_1299_1312_1338_1429_1455_1468_1494_1520_1884_1897_1975_2066_2105_2170_2171]], %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171* undef, i64 0, i32 0
+; CHECK-NEXT:    [[COND125:%.*]] = select i1 undef, i32 undef, i32 [[MUL114]]
+; CHECK-NEXT:    br label [[FOR_COND_I:%.*]]
+; CHECK:       for.cond.i:
+; CHECK-NEXT:    [[ROW_0_I:%.*]] = phi i32 [ undef, [[LAND_RHS_I874:%.*]] ], [ [[DOTSUB100]], [[IF_THEN103]] ]
+; CHECK-NEXT:    [[COL_0_I:%.*]] = phi i32 [ undef, [[LAND_RHS_I874]] ], [ [[COND125]], [[IF_THEN103]] ]
+; CHECK-NEXT:    br i1 undef, label [[LAND_RHS_I874]], label [[FOR_END_I:%.*]]
+; CHECK:       land.rhs.i874:
+; CHECK-NEXT:    br i1 undef, label [[FOR_COND_I]], label [[FOR_END_I]]
+; CHECK:       for.end.i:
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN_I:%.*]], label [[IF_END_I:%.*]]
+; CHECK:       if.then.i:
+; CHECK-NEXT:    [[ADD14_I:%.*]] = add nsw i32 [[ROW_0_I]], undef
+; CHECK-NEXT:    [[ADD15_I:%.*]] = add nsw i32 [[COL_0_I]], undef
+; CHECK-NEXT:    br label [[EXTEND_BW_EXIT:%.*]]
+; CHECK:       if.end.i:
+; CHECK-NEXT:    [[ADD16_I:%.*]] = add i32 [[COND125]], [[DOTSUB100]]
+; CHECK-NEXT:    [[CMP26514_I:%.*]] = icmp slt i32 [[ADD16_I]], 0
+; CHECK-NEXT:    br i1 [[CMP26514_I]], label [[FOR_END33_I:%.*]], label [[FOR_BODY28_LR_PH_I:%.*]]
+; CHECK:       for.body28.lr.ph.i:
+; CHECK-NEXT:    br label [[FOR_END33_I]]
+; CHECK:       for.end33.i:
+; CHECK-NEXT:    br i1 undef, label [[FOR_END58_I:%.*]], label [[FOR_BODY52_LR_PH_I:%.*]]
+; CHECK:       for.body52.lr.ph.i:
+; CHECK-NEXT:    br label [[FOR_END58_I]]
+; CHECK:       for.end58.i:
+; CHECK-NEXT:    br label [[WHILE_COND260_I:%.*]]
+; CHECK:       while.cond260.i:
+; CHECK-NEXT:    br i1 undef, label [[LAND_RHS263_I:%.*]], label [[WHILE_END275_I:%.*]]
+; CHECK:       land.rhs263.i:
+; CHECK-NEXT:    br i1 undef, label [[WHILE_COND260_I]], label [[WHILE_END275_I]]
+; CHECK:       while.end275.i:
+; CHECK-NEXT:    br label [[EXTEND_BW_EXIT]]
+; CHECK:       extend_bw.exit:
+; CHECK-NEXT:    [[ADD14_I1262:%.*]] = phi i32 [ [[ADD14_I]], [[IF_THEN_I]] ], [ undef, [[WHILE_END275_I]] ]
+; CHECK-NEXT:    [[ADD15_I1261:%.*]] = phi i32 [ [[ADD15_I]], [[IF_THEN_I]] ], [ undef, [[WHILE_END275_I]] ]
+; CHECK-NEXT:    br i1 false, label [[IF_THEN157:%.*]], label [[LAND_LHS_TRUE167]]
+; CHECK:       if.then157:
+; CHECK-NEXT:    [[ADD158:%.*]] = add nsw i32 [[ADD14_I1262]], 1
+; CHECK-NEXT:    store i32 [[ADD158]], i32* [[FROM299]], align 4
+; CHECK-NEXT:    [[ADD160:%.*]] = add nsw i32 [[ADD15_I1261]], 1
+; CHECK-NEXT:    store i32 [[ADD160]], i32* [[FROM1115]], align 4
+; CHECK-NEXT:    br label [[LAND_LHS_TRUE167]]
+; CHECK:       land.lhs.true167:
+; CHECK-NEXT:    unreachable
+; CHECK:       for.inc603:
+; CHECK-NEXT:    br i1 undef, label [[FOR_BODY]], label [[FOR_END605]]
+; CHECK:       for.end605:
+; CHECK-NEXT:    unreachable
+; CHECK:       return:
+; CHECK-NEXT:    ret void
+;
 entry:
   br i1 undef, label %return, label %lor.lhs.false
 
diff --git a/test/Transforms/SLPVectorizer/X86/crash_smallpt.ll b/test/Transforms/SLPVectorizer/X86/crash_smallpt.ll
index 273584c..e2d3637 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_smallpt.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_smallpt.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
@@ -8,6 +9,45 @@
 
 ; Function Attrs: ssp uwtable
 define void @main() #0 {
+; CHECK-LABEL: @main(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 undef, label [[COND_TRUE:%.*]], label [[COND_END:%.*]]
+; CHECK:       cond.true:
+; CHECK-NEXT:    unreachable
+; CHECK:       cond.end:
+; CHECK-NEXT:    br label [[INVOKE_CONT:%.*]]
+; CHECK:       invoke.cont:
+; CHECK-NEXT:    br i1 undef, label [[ARRAYCTOR_CONT:%.*]], label [[INVOKE_CONT]]
+; CHECK:       arrayctor.cont:
+; CHECK-NEXT:    [[AGG_TMP99208_SROA_0_0_IDX:%.*]] = getelementptr inbounds [[STRUCT_RAY_5_11_53_113_119_137_149_185_329_389_416:%.*]], %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 0, i32 0
+; CHECK-NEXT:    [[AGG_TMP101211_SROA_0_0_IDX:%.*]] = getelementptr inbounds [[STRUCT_RAY_5_11_53_113_119_137_149_185_329_389_416]], %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 1, i32 0
+; CHECK-NEXT:    br label [[FOR_COND36_PREHEADER:%.*]]
+; CHECK:       for.cond36.preheader:
+; CHECK-NEXT:    br i1 undef, label [[FOR_BODY42_LR_PH_US:%.*]], label [[_Z5CLAMPD_EXIT_1:%.*]]
+; CHECK:       cond.false51.us:
+; CHECK-NEXT:    unreachable
+; CHECK:       cond.true48.us:
+; CHECK-NEXT:    br i1 undef, label [[COND_TRUE63_US:%.*]], label [[COND_FALSE66_US:%.*]]
+; CHECK:       cond.false66.us:
+; CHECK-NEXT:    [[ADD_I276_US:%.*]] = fadd double 0.000000e+00, undef
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> undef, double [[ADD_I276_US]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double 0xBFA5CC2D1960285F, i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = fadd <2 x double> <double 0.000000e+00, double undef>, [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul <2 x double> <double 1.400000e+02, double 1.400000e+02>, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> <double 5.000000e+01, double 5.200000e+01>, [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = fmul <2 x double> undef, [[TMP2]]
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast double* [[AGG_TMP99208_SROA_0_0_IDX]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast double* [[AGG_TMP101211_SROA_0_0_IDX]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP5]], <2 x double>* [[TMP7]], align 8
+; CHECK-NEXT:    unreachable
+; CHECK:       cond.true63.us:
+; CHECK-NEXT:    unreachable
+; CHECK:       for.body42.lr.ph.us:
+; CHECK-NEXT:    br i1 undef, label [[COND_TRUE48_US:%.*]], label [[COND_FALSE51_US:%.*]]
+; CHECK:       _Z5clampd.exit.1:
+; CHECK-NEXT:    br label [[FOR_COND36_PREHEADER]]
+;
 entry:
   br i1 undef, label %cond.true, label %cond.end
 
@@ -67,6 +107,27 @@
 %struct.Vec.0.6.48.90.132.186.192.198.234.252.258.264.270.276.282.288.378.432.438.450.456.594.600 = type { double, double, double }
 
 define void @_Z8radianceRK3RayiPt() #0 {
+; CHECK-LABEL: @_Z8radianceRK3RayiPt(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 undef, label [[IF_THEN78:%.*]], label [[IF_THEN38:%.*]]
+; CHECK:       if.then38:
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> undef, double undef, i32 1
+; CHECK-NEXT:    [[TMP1:%.*]] = fmul <2 x double> undef, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = fsub <2 x double> undef, [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul <2 x double> undef, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> undef, [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = fadd <2 x double> undef, [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = fadd <2 x double> undef, [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = fmul <2 x double> undef, [[TMP6]]
+; CHECK-NEXT:    [[AGG_TMP74663_SROA_0_0_IDX:%.*]] = getelementptr inbounds [[STRUCT_RAY_5_11_53_95_137_191_197_203_239_257_263_269_275_281_287_293_383_437_443_455_461_599_601:%.*]], %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601* undef, i64 0, i32 1, i32 0
+; CHECK-NEXT:    [[TMP8:%.*]] = bitcast double* [[AGG_TMP74663_SROA_0_0_IDX]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP7]], <2 x double>* [[TMP8]], align 8
+; CHECK-NEXT:    br label [[RETURN:%.*]]
+; CHECK:       if.then78:
+; CHECK-NEXT:    br label [[RETURN]]
+; CHECK:       return:
+; CHECK-NEXT:    ret void
+;
 entry:
   br i1 undef, label %if.then78, label %if.then38
 
diff --git a/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll b/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll
index 45ca99a..24f5bad 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll
@@ -1,14 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -basicaa -slp-vectorizer -mtriple=x86_64-apple-macosx10.9.0 -mcpu=corei7-avx -S < %s | FileCheck %s
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.9.0"
 
 
 ; This test used to crash because we were following phi chains incorrectly.
-; We used indices to get the incoming value of two phi nodes rather than 
+; We used indices to get the incoming value of two phi nodes rather than
 ; incoming block lookup.
 ; This can give wrong results when the ordering of incoming
 ; edges in the two phi nodes don't match.
-;CHECK-LABEL: bar
 
 %0 = type { %1, %2 }
 %1 = type { double, double }
@@ -17,6 +17,36 @@
 
 ;define fastcc void @bar() {
 define void @bar() {
+; CHECK-LABEL: @bar(
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[TMP0:%.*]], %0* undef, i64 0, i32 1, i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[TMP0]], %0* undef, i64 0, i32 1, i32 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[TMP0]], %0* undef, i64 0, i32 1, i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[TMP0]], %0* undef, i64 0, i32 1, i32 1
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[TMP0]], %0* undef, i64 0, i32 1, i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[TMP0]], %0* undef, i64 0, i32 1, i32 1
+; CHECK-NEXT:    br label [[TMP7:%.*]]
+; CHECK:         [[TMP8:%.*]] = phi <2 x double> [ <double 1.800000e+01, double 2.800000e+01>, [[TMP0]] ], [ [[TMP11:%.*]], [[TMP21:%.*]] ], [ [[TMP11]], [[TMP18:%.*]] ], [ [[TMP11]], [[TMP18]] ]
+; CHECK-NEXT:    [[TMP9:%.*]] = bitcast double* [[TMP1]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP8]], <2 x double>* [[TMP9]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = bitcast double* [[TMP3]] to <2 x double>*
+; CHECK-NEXT:    [[TMP11]] = load <2 x double>, <2 x double>* [[TMP10]], align 8
+; CHECK-NEXT:    br i1 undef, label [[TMP12:%.*]], label [[TMP13:%.*]]
+; CHECK:         ret void
+; CHECK:         [[TMP14:%.*]] = bitcast double* [[TMP5]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP11]], <2 x double>* [[TMP14]], align 8
+; CHECK-NEXT:    br i1 undef, label [[TMP15:%.*]], label [[TMP16:%.*]]
+; CHECK:         br label [[TMP16]]
+; CHECK:         br i1 undef, label [[TMP17:%.*]], label [[TMP18]]
+; CHECK:         unreachable
+; CHECK:         [[TMP19:%.*]] = extractelement <2 x double> [[TMP11]], i32 0
+; CHECK-NEXT:    [[TMP20:%.*]] = extractelement <2 x double> [[TMP11]], i32 1
+; CHECK-NEXT:    switch i32 undef, label [[TMP21]] [
+; CHECK-NEXT:    i32 32, label [[TMP7]]
+; CHECK-NEXT:    i32 103, label [[TMP7]]
+; CHECK-NEXT:    ]
+; CHECK:         br i1 undef, label [[TMP7]], label [[TMP22:%.*]]
+; CHECK:         unreachable
+;
   %1 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 0
   %2 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 1
   %3 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 0
@@ -53,8 +83,8 @@
 
 ; <label>:17                                      ; preds = %15
   switch i32 undef, label %18 [
-    i32 32, label %7
-    i32 103, label %7
+  i32 32, label %7
+  i32 103, label %7
   ]
 
 ; <label>:18                                      ; preds = %17
diff --git a/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll b/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll
index eac20a0..98db3ed 100644
--- a/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll
+++ b/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -16,15 +17,26 @@
 ; }
 
 
-;CHECK-LABEL: @foo(
-;CHECK: load <2 x float>
-;CHECK: fadd <2 x float>
-;CHECK: call i32
-;CHECK: load <2 x double>
-;CHECK: fadd <2 x double>
-;CHECK: store <2 x double>
-;CHECK: ret
 define i32 @foo(double* nocapture %A, float* nocapture %B, i32 %g) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float* [[B:%.*]] to <2 x float>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x float>, <2 x float>* [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = fadd <2 x float> <float 5.000000e+00, float 8.000000e+00>, [[TMP1]]
+; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i32 [[G:%.*]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; CHECK:       if.then:
+; CHECK-NEXT:    [[CALL:%.*]] = tail call i32 (...) @bar()
+; CHECK-NEXT:    br label [[IF_END]]
+; CHECK:       if.end:
+; CHECK-NEXT:    [[TMP3:%.*]] = fpext <2 x float> [[TMP2]] to <2 x double>
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x double>, <2 x double>* [[TMP4]], align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = fadd <2 x double> [[TMP3]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast double* [[A]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP6]], <2 x double>* [[TMP7]], align 8
+; CHECK-NEXT:    ret i32 undef
+;
 entry:
   %0 = load float, float* %B, align 4
   %arrayidx1 = getelementptr inbounds float, float* %B, i64 1
diff --git a/test/Transforms/SLPVectorizer/X86/cycle_dup.ll b/test/Transforms/SLPVectorizer/X86/cycle_dup.ll
index 0a4e961..ac69333 100644
--- a/test/Transforms/SLPVectorizer/X86/cycle_dup.ll
+++ b/test/Transforms/SLPVectorizer/X86/cycle_dup.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -11,17 +12,28 @@
 ;   A[0] = r; A[1] = g; A[2] = b; A[3] = a;
 ; }
 
-;CHECK-LABEL: @foo
-;CHECK: bitcast i32* %A to <4 x i32>*
-;CHECK-NEXT: load <4 x i32>
-;CHECK: phi <4 x i32>
-;CHECK-NEXT: mul nsw <4 x i32>
-;CHECK-NOT: mul
-;CHECK: phi <4 x i32>
-;CHECK: bitcast i32* %A to <4 x i32>*
-;CHECK-NEXT: store <4 x i32>
-;CHECK-NEXT:ret i32 undef
 define i32 @foo(i32* nocapture %A) #0 {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[A:%.*]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 13
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX4]], align 4
+; CHECK-NEXT:    [[CMP24:%.*]] = icmp sgt i32 [[TMP2]], 0
+; CHECK-NEXT:    br i1 [[CMP24]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[I_029:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[TMP3:%.*]] = phi <4 x i32> [ [[TMP4:%.*]], [[FOR_BODY]] ], [ [[TMP1]], [[ENTRY]] ]
+; CHECK-NEXT:    [[TMP4]] = mul nsw <4 x i32> <i32 18, i32 19, i32 12, i32 9>, [[TMP3]]
+; CHECK-NEXT:    [[INC]] = add nsw i32 [[I_029]], 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[TMP2]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[TMP5:%.*]] = phi <4 x i32> [ [[TMP1]], [[ENTRY]] ], [ [[TMP4]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i32* [[A]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4
+; CHECK-NEXT:    ret i32 undef
+;
 entry:
   %0 = load i32, i32* %A, align 4
   %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 1
diff --git a/test/Transforms/SLPVectorizer/X86/debug_info.ll b/test/Transforms/SLPVectorizer/X86/debug_info.ll
index cdf2455..0fe399e 100644
--- a/test/Transforms/SLPVectorizer/X86/debug_info.ll
+++ b/test/Transforms/SLPVectorizer/X86/debug_info.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -12,16 +13,28 @@
 ;   A[8] = y0; A[8+1] = y1;
 ; }
 
-;CHECK: @depth
-;CHECK: getelementptr inbounds {{.*}}, !dbg ![[LOC:[0-9]+]]
-;CHECK: bitcast double* {{.*}}, !dbg ![[LOC]]
-;CHECK: load <2 x double>, <2 x double>* {{.*}}, !dbg ![[LOC]]
-;CHECK: store <2 x double> {{.*}}, !dbg ![[LOC2:[0-9]+]]
-;CHECK: ret
-;CHECK: ![[LOC]] = !DILocation(line: 4, scope:
-;CHECK: ![[LOC2]] = !DILocation(line: 7, scope:
-
 define i32 @depth(double* nocapture %A, i32 %m) #0 !dbg !4 {
+; CHECK-LABEL: @depth(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    call void @llvm.dbg.value(metadata double* [[A:%.*]], metadata !12, metadata !DIExpression()), !dbg !18
+; CHECK-NEXT:    call void @llvm.dbg.value(metadata i32 [[M:%.*]], metadata !13, metadata !DIExpression()), !dbg !18
+; CHECK-NEXT:    call void @llvm.dbg.value(metadata double 0.000000e+00, metadata !14, metadata !DIExpression()), !dbg !19
+; CHECK-NEXT:    call void @llvm.dbg.value(metadata double 2.000000e-01, metadata !15, metadata !DIExpression()), !dbg !19
+; CHECK-NEXT:    call void @llvm.dbg.value(metadata i32 0, metadata !16, metadata !DIExpression()), !dbg !20
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[M]], 0, !dbg !20
+; CHECK-NEXT:    br i1 [[CMP8]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]], !dbg !20
+; CHECK:       for.body.lr.ph:
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A]], i64 4, !dbg !21
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*, !dbg !21
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8, !dbg !21
+; CHECK-NEXT:    br label [[FOR_END]], !dbg !20
+; CHECK:       for.end:
+; CHECK-NEXT:    [[TMP2:%.*]] = phi <2 x double> [ [[TMP1]], [[FOR_BODY_LR_PH]] ], [ <double 0.000000e+00, double 1.000000e+00>, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[A]], i64 8, !dbg !23
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast double* [[ARRAYIDX2]] to <2 x double>*, !dbg !23
+; CHECK-NEXT:    store <2 x double> [[TMP2]], <2 x double>* [[TMP3]], align 8, !dbg !23
+; CHECK-NEXT:    ret i32 undef, !dbg !24
+;
 entry:
   tail call void @llvm.dbg.value(metadata double* %A, i64 0, metadata !12, metadata !DIExpression()), !dbg !19
   tail call void @llvm.dbg.value(metadata i32 %m, i64 0, metadata !13, metadata !DIExpression()), !dbg !19
diff --git a/test/Transforms/SLPVectorizer/X86/diamond.ll b/test/Transforms/SLPVectorizer/X86/diamond.ll
index 4e2c02f..3cba8ea 100644
--- a/test/Transforms/SLPVectorizer/X86/diamond.ll
+++ b/test/Transforms/SLPVectorizer/X86/diamond.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -11,12 +12,27 @@
 ;   return 0;
 ; }
 
-; CHECK-LABEL: @foo(
-; CHECK: load <4 x i32>
-; CHECK: mul <4 x i32>
-; CHECK: store <4 x i32>
-; CHECK: ret
 define i32 @foo(i32* noalias nocapture %B, i32* noalias nocapture %A, i32 %n, i32 %m) #0 {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[MUL238:%.*]] = add i32 [[M:%.*]], [[N:%.*]]
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[A]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[MUL238]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[MUL238]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[MUL238]], i32 2
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[MUL238]], i32 3
+; CHECK-NEXT:    [[TMP6:%.*]] = mul <4 x i32> [[TMP1]], [[TMP5]]
+; CHECK-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 3
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i32* [[B]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* [[TMP7]], align 4
+; CHECK-NEXT:    ret i32 0
+;
 entry:
   %0 = load i32, i32* %A, align 4
   %mul238 = add i32 %m, %n
@@ -49,12 +65,28 @@
 ;   return A[0];
 ; }
 
-; CHECK-LABEL: @extr_user(
-; CHECK: load <4 x i32>
-; CHECK: store <4 x i32>
-; CHECK: extractelement <4 x i32>
-; CHECK-NEXT: ret
 define i32 @extr_user(i32* noalias nocapture %B, i32* noalias nocapture %A, i32 %n, i32 %m) {
+; CHECK-LABEL: @extr_user(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[MUL238:%.*]] = add i32 [[M:%.*]], [[N:%.*]]
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[A]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[MUL238]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[MUL238]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[MUL238]], i32 2
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[MUL238]], i32 3
+; CHECK-NEXT:    [[TMP6:%.*]] = mul <4 x i32> [[TMP1]], [[TMP5]]
+; CHECK-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 3
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i32* [[B]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    ret i32 [[TMP8]]
+;
 entry:
   %0 = load i32, i32* %A, align 4
   %mul238 = add i32 %m, %n
@@ -79,12 +111,28 @@
 }
 
 ; In this example we have an external user that is not the first element in the vector.
-; CHECK-LABEL: @extr_user1(
-; CHECK: load <4 x i32>
-; CHECK: store <4 x i32>
-; CHECK: extractelement <4 x i32>
-; CHECK-NEXT: ret
 define i32 @extr_user1(i32* noalias nocapture %B, i32* noalias nocapture %A, i32 %n, i32 %m) {
+; CHECK-LABEL: @extr_user1(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[MUL238:%.*]] = add i32 [[M:%.*]], [[N:%.*]]
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[A]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[MUL238]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[MUL238]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[MUL238]], i32 2
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[MUL238]], i32 3
+; CHECK-NEXT:    [[TMP6:%.*]] = mul <4 x i32> [[TMP1]], [[TMP5]]
+; CHECK-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 3
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i32* [[B]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    ret i32 [[TMP8]]
+;
 entry:
   %0 = load i32, i32* %A, align 4
   %mul238 = add i32 %m, %n
diff --git a/test/Transforms/SLPVectorizer/X86/external_user.ll b/test/Transforms/SLPVectorizer/X86/external_user.ll
index bf2febd..8ee644f 100644
--- a/test/Transforms/SLPVectorizer/X86/external_user.ll
+++ b/test/Transforms/SLPVectorizer/X86/external_user.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -22,16 +23,27 @@
 ;   return x; <-- must extract here!
 ; }
 
-;CHECK: ext_user
-;CHECK: phi <2 x double>
-;CHECK: fadd <2 x double>
-;CHECK: fmul <2 x double>
-;CHECK: br
-;CHECK: store <2 x double>
-;CHECK: extractelement <2 x double>
-;CHECK: ret double
-
 define double @ext_user(double* noalias nocapture %B, double* noalias nocapture %A, i32 %n, i32 %m) {
+; CHECK-LABEL: @ext_user(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[I_020:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = phi <2 x double> [ [[TMP1]], [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP3:%.*]] = fadd <2 x double> <double 1.000000e+01, double 1.000000e+01>, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> <double 4.000000e+00, double 4.000000e+00>, [[TMP3]]
+; CHECK-NEXT:    [[TMP5]] = fadd <2 x double> <double 4.000000e+00, double 4.000000e+00>, [[TMP4]]
+; CHECK-NEXT:    [[INC]] = add nsw i32 [[I_020]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 100
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <2 x double> [[TMP4]], i32 0
+; CHECK-NEXT:    ret double [[TMP7]]
+;
 entry:
   %arrayidx = getelementptr inbounds double, double* %A, i64 1
   %0 = load double, double* %arrayidx, align 8
@@ -65,9 +77,33 @@
 ; This test would assert because we would keep the scalar fpext and fadd alive.
 ; PR18129
 
-; CHECK-LABEL: needtogather
 define i32 @needtogather(double *noalias %a, i32 *noalias %b,  float * noalias %c,
-                i32 * noalias %d) {
+; CHECK-LABEL: @needtogather(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[D:%.*]], align 4
+; CHECK-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[C:%.*]]
+; CHECK-NEXT:    [[SUB:%.*]] = fsub float 0.000000e+00, [[TMP1]]
+; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[SUB]], 0.000000e+00
+; CHECK-NEXT:    [[ADD:%.*]] = fadd float [[CONV]], [[MUL]]
+; CHECK-NEXT:    [[CONV1:%.*]] = fpext float [[ADD]] to double
+; CHECK-NEXT:    [[SUB3:%.*]] = fsub float 1.000000e+00, [[TMP1]]
+; CHECK-NEXT:    [[MUL4:%.*]] = fmul float [[SUB3]], 0.000000e+00
+; CHECK-NEXT:    [[ADD5:%.*]] = fadd float [[CONV]], [[MUL4]]
+; CHECK-NEXT:    [[CONV6:%.*]] = fpext float [[ADD5]] to double
+; CHECK-NEXT:    [[TOBOOL:%.*]] = fcmp une float [[ADD]], 0.000000e+00
+; CHECK-NEXT:    br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; CHECK:       if.then:
+; CHECK-NEXT:    br label [[IF_END]]
+; CHECK:       if.end:
+; CHECK-NEXT:    [[STOREMERGE:%.*]] = phi double [ [[CONV6]], [[IF_THEN]] ], [ [[CONV1]], [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[E_0:%.*]] = phi double [ [[CONV1]], [[IF_THEN]] ], [ [[CONV6]], [[ENTRY]] ]
+; CHECK-NEXT:    store double [[STOREMERGE]], double* [[A:%.*]], align 8
+; CHECK-NEXT:    [[CONV7:%.*]] = fptosi double [[E_0]] to i32
+; CHECK-NEXT:    store i32 [[CONV7]], i32* [[B:%.*]], align 4
+; CHECK-NEXT:    ret i32 undef
+;
+  i32 * noalias %d) {
 entry:
   %0 = load i32, i32* %d, align 4
   %conv = sitofp i32 %0 to float
diff --git a/test/Transforms/SLPVectorizer/X86/extractcost.ll b/test/Transforms/SLPVectorizer/X86/extractcost.ll
index 164ddf3..c9fae44 100644
--- a/test/Transforms/SLPVectorizer/X86/extractcost.ll
+++ b/test/Transforms/SLPVectorizer/X86/extractcost.ll
@@ -1,12 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
-;CHECK-LABEL: @foo(
-;CHECK: store <4 x i32>
-;CHECK: ret
 define i32 @foo(i32* nocapture %A, i32 %n, i32 %m) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[N:%.*]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[N]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[N]], i32 2
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[N]], i32 3
+; CHECK-NEXT:    [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], <i32 5, i32 9, i32 3, i32 10>
+; CHECK-NEXT:    [[TMP5:%.*]] = shl <4 x i32> [[TMP3]], <i32 5, i32 9, i32 3, i32 10>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 6, i32 3>
+; CHECK-NEXT:    [[TMP7:%.*]] = add nsw <4 x i32> <i32 9, i32 9, i32 9, i32 9>, [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i32* [[A:%.*]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP7]], <4 x i32>* [[TMP8]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = extractelement <4 x i32> [[TMP7]], i32 0
+; CHECK-NEXT:    [[EXTERNALUSE1:%.*]] = add nsw i32 [[TMP9]], [[M:%.*]]
+; CHECK-NEXT:    [[EXTERNALUSE2:%.*]] = mul nsw i32 [[TMP9]], [[M]]
+; CHECK-NEXT:    [[ADD10:%.*]] = add nsw i32 [[EXTERNALUSE1]], [[EXTERNALUSE2]]
+; CHECK-NEXT:    ret i32 [[ADD10]]
+;
 entry:
   %mul = mul nsw i32 %n, 5
   %add = add nsw i32 %mul, 9
@@ -23,7 +39,7 @@
   %add8 = add nsw i32 %mul7, 9
   %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 3
   store i32 %add8, i32* %arrayidx9, align 4
-  %externaluse1 = add nsw i32 %add, %m  
+  %externaluse1 = add nsw i32 %add, %m
   %externaluse2 = mul nsw i32 %add, %m  ; we should add the extract cost only once and the store will be vectorized
   %add10 = add nsw i32 %externaluse1, %externaluse2
   ret i32 %add10
diff --git a/test/Transforms/SLPVectorizer/X86/fabs-cost-softfp.ll b/test/Transforms/SLPVectorizer/X86/fabs-cost-softfp.ll
index add4858..25394f4 100644
--- a/test/Transforms/SLPVectorizer/X86/fabs-cost-softfp.ll
+++ b/test/Transforms/SLPVectorizer/X86/fabs-cost-softfp.ll
@@ -1,14 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; Regression test from https://bugs.llvm.org/show_bug.cgi?id=39168
 ; Based on code from `compiler-rt/lib/builtins/multc3.c`
 ; On plaforms where fp128 lowers to an interger type (soft-fp) we
 ; shouldn't be calling isFAbsFree() on the legalized type.
 
 ; RUN: opt -slp-vectorizer -slp-threshold=-10 -S %s | FileCheck %s
-; CHECK: call <2 x fp128> @llvm.fabs.v2f128(<2 x fp128
 
 target triple = "i686-unknown-linux-gnu"
 
 define void @vectorize_fp128(fp128 %c, fp128 %d) #0 {
+; CHECK-LABEL: @vectorize_fp128(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x fp128> undef, fp128 [[C:%.*]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x fp128> [[TMP0]], fp128 [[D:%.*]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x fp128> @llvm.fabs.v2f128(<2 x fp128> [[TMP1]])
+; CHECK-NEXT:    [[TMP3:%.*]] = fcmp oeq <2 x fp128> [[TMP2]], <fp128 0xL00000000000000007FFF000000000000, fp128 0xL00000000000000007FFF000000000000>
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i1> [[TMP3]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i1> [[TMP3]], i32 1
+; CHECK-NEXT:    [[OR_COND39:%.*]] = or i1 [[TMP4]], [[TMP5]]
+; CHECK-NEXT:    br i1 [[OR_COND39]], label [[IF_THEN13:%.*]], label [[IF_END24:%.*]]
+; CHECK:       if.then13:
+; CHECK-NEXT:    unreachable
+; CHECK:       if.end24:
+; CHECK-NEXT:    ret void
+;
 entry:
   %0 = tail call fp128 @llvm.fabs.f128(fp128 %c)
   %cmpinf10 = fcmp oeq fp128 %0, 0xL00000000000000007FFF000000000000
diff --git a/test/Transforms/SLPVectorizer/X86/flag.ll b/test/Transforms/SLPVectorizer/X86/flag.ll
index 7db8d75..e695c02 100644
--- a/test/Transforms/SLPVectorizer/X86/flag.ll
+++ b/test/Transforms/SLPVectorizer/X86/flag.ll
@@ -1,14 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -slp-threshold=1000 -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
 ; Check that the command line flag works.
-;CHECK:rollable
-;CHECK-NOT:load <4 x i32>
-;CHECK: ret
-
 define i32 @rollable(i32* noalias nocapture %in, i32* noalias nocapture %out, i64 %n) {
+; CHECK-LABEL: @rollable(
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 [[N:%.*]], 0
+; CHECK-NEXT:    br i1 [[TMP1]], label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH:%.*]]
+; CHECK:       .lr.ph:
+; CHECK-NEXT:    [[I_019:%.*]] = phi i64 [ [[TMP26:%.*]], [[DOTLR_PH]] ], [ 0, [[TMP0:%.*]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[I_019]], 2
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = or i64 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = or i64 [[TMP2]], 2
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = or i64 [[TMP2]], 3
+; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 [[TMP11]]
+; CHECK-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP4]], 7
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP14]], 7
+; CHECK-NEXT:    [[TMP16:%.*]] = mul i32 [[TMP7]], 7
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP16]], 14
+; CHECK-NEXT:    [[TMP18:%.*]] = mul i32 [[TMP10]], 7
+; CHECK-NEXT:    [[TMP19:%.*]] = add i32 [[TMP18]], 21
+; CHECK-NEXT:    [[TMP20:%.*]] = mul i32 [[TMP13]], 7
+; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP20]], 28
+; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    store i32 [[TMP15]], i32* [[TMP22]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 [[TMP5]]
+; CHECK-NEXT:    store i32 [[TMP17]], i32* [[TMP23]], align 4
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 [[TMP8]]
+; CHECK-NEXT:    store i32 [[TMP19]], i32* [[TMP24]], align 4
+; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 [[TMP11]]
+; CHECK-NEXT:    store i32 [[TMP21]], i32* [[TMP25]], align 4
+; CHECK-NEXT:    [[TMP26]] = add i64 [[I_019]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[TMP26]], [[N]]
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
+; CHECK:       ._crit_edge:
+; CHECK-NEXT:    ret i32 undef
+;
   %1 = icmp eq i64 %n, 0
   br i1 %1, label %._crit_edge, label %.lr.ph
 
diff --git a/test/Transforms/SLPVectorizer/X86/gep.ll b/test/Transforms/SLPVectorizer/X86/gep.ll
index 60b0a11..a3cce8b 100644
--- a/test/Transforms/SLPVectorizer/X86/gep.ll
+++ b/test/Transforms/SLPVectorizer/X86/gep.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -S |FileCheck %s
 ; RUN: opt < %s -aa-pipeline=basic-aa -passes=slp-vectorizer -S |FileCheck %s
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
@@ -8,9 +9,19 @@
 ;   x->first  = y->first  + 16
 ;   x->second = y->second + 16
 
-; CHECK-LABEL: foo1
-; CHECK: <2 x i32*>
 define void @foo1 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y) {
+; CHECK-LABEL: @foo1(
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[Y:%.*]], i64 0, i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[X:%.*]], i64 0, i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[Y]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i32** [[TMP1]] to <2 x i32*>*
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i32*>, <2 x i32*>* [[TMP4]], align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr i32, <2 x i32*> [[TMP5]], <2 x i64> <i64 16, i64 16>
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[X]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i32** [[TMP2]] to <2 x i32*>*
+; CHECK-NEXT:    store <2 x i32*> [[TMP6]], <2 x i32*>* [[TMP8]], align 8
+; CHECK-NEXT:    ret void
+;
   %1 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 0
   %2 = load i32*, i32** %1, align 8
   %3 = getelementptr inbounds i32, i32* %2, i64 16
@@ -26,9 +37,20 @@
 
 ; Test that we don't vectorize GEP expressions if indexes are not constants.
 ; We can't produce an efficient code in that case.
-; CHECK-LABEL: foo2
-; CHECK-NOT: <2 x i32*>
 define void @foo2 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y, i32 %i) {
+; CHECK-LABEL: @foo2(
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[Y:%.*]], i64 0, i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 [[I:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[X:%.*]], i64 0, i32 0
+; CHECK-NEXT:    store i32* [[TMP3]], i32** [[TMP4]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[Y]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 [[I]]
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[X]], i64 0, i32 1
+; CHECK-NEXT:    store i32* [[TMP7]], i32** [[TMP8]], align 8
+; CHECK-NEXT:    ret void
+;
   %1 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 0
   %2 = load i32*, i32** %1, align 8
   %3 = getelementptr inbounds i32, i32* %2, i32 %i
diff --git a/test/Transforms/SLPVectorizer/X86/gep_mismatch.ll b/test/Transforms/SLPVectorizer/X86/gep_mismatch.ll
index 1cd28a9..f9b9995 100644
--- a/test/Transforms/SLPVectorizer/X86/gep_mismatch.ll
+++ b/test/Transforms/SLPVectorizer/X86/gep_mismatch.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -S -slp-vectorizer
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -S -slp-vectorizer | FileCheck %s
 
 ; This code has GEPs with different index types, which should not
 ; matter for the SLPVectorizer.
@@ -6,6 +7,19 @@
 target triple = "x86_64--linux"
 
 define void @foo() {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[BB1:%.*]]
+; CHECK:       bb1:
+; CHECK-NEXT:    [[LS1_PH:%.*]] = phi float* [ [[_TMP1:%.*]], [[BB1]] ], [ undef, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[LS2_PH:%.*]] = phi float* [ [[_TMP2:%.*]], [[BB1]] ], [ undef, [[ENTRY]] ]
+; CHECK-NEXT:    store float undef, float* [[LS1_PH]]
+; CHECK-NEXT:    [[_TMP1]] = getelementptr float, float* [[LS1_PH]], i32 1
+; CHECK-NEXT:    [[_TMP2]] = getelementptr float, float* [[LS2_PH]], i64 4
+; CHECK-NEXT:    br i1 false, label [[BB1]], label [[BB2:%.*]]
+; CHECK:       bb2:
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %bb1
 
diff --git a/test/Transforms/SLPVectorizer/X86/implicitfloat.ll b/test/Transforms/SLPVectorizer/X86/implicitfloat.ll
index f7283f0d0..2fbb8f0 100644
--- a/test/Transforms/SLPVectorizer/X86/implicitfloat.ll
+++ b/test/Transforms/SLPVectorizer/X86/implicitfloat.ll
@@ -1,13 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
 ; Don't vectorize when noimplicitfloat is used.
-; CHECK: test1
-; CHECK-NOT: store <2 x double>
-; CHECK: ret
 define void @test1(double* %a, double* %b, double* %c) noimplicitfloat { ; <------ noimplicitfloat attribute here!
+; CHECK-LABEL: @test1(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[I0:%.*]] = load double, double* [[A:%.*]], align 8
+; CHECK-NEXT:    [[I1:%.*]] = load double, double* [[B:%.*]], align 8
+; CHECK-NEXT:    [[MUL:%.*]] = fmul double [[I0]], [[I1]]
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A]], i64 1
+; CHECK-NEXT:    [[I3:%.*]] = load double, double* [[ARRAYIDX3]], align 8
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[B]], i64 1
+; CHECK-NEXT:    [[I4:%.*]] = load double, double* [[ARRAYIDX4]], align 8
+; CHECK-NEXT:    [[MUL5:%.*]] = fmul double [[I3]], [[I4]]
+; CHECK-NEXT:    store double [[MUL]], double* [[C:%.*]], align 8
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[C]], i64 1
+; CHECK-NEXT:    store double [[MUL5]], double* [[ARRAYIDX5]], align 8
+; CHECK-NEXT:    ret void
+;
 entry:
   %i0 = load double, double* %a, align 8
   %i1 = load double, double* %b, align 8
diff --git a/test/Transforms/SLPVectorizer/X86/intrinsic.ll b/test/Transforms/SLPVectorizer/X86/intrinsic.ll
index cc5a4af..fbce755 100644
--- a/test/Transforms/SLPVectorizer/X86/intrinsic.ll
+++ b/test/Transforms/SLPVectorizer/X86/intrinsic.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -slp-threshold=-999 -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -5,13 +6,19 @@
 
 declare double @llvm.fabs.f64(double) nounwind readnone
 
-;CHECK-LABEL: @vec_fabs_f64(
-;CHECK: load <2 x double>
-;CHECK: load <2 x double>
-;CHECK: call <2 x double> @llvm.fabs.v2f64
-;CHECK: store <2 x double>
-;CHECK: ret
 define void @vec_fabs_f64(double* %a, double* %b, double* %c) {
+; CHECK-LABEL: @vec_fabs_f64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = call <2 x double> @llvm.fabs.v2f64(<2 x double> [[TMP4]])
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast double* [[C:%.*]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 8
+; CHECK-NEXT:    ret void
+;
 entry:
   %i0 = load double, double* %a, align 8
   %i1 = load double, double* %b, align 8
@@ -31,13 +38,18 @@
 
 declare float @llvm.copysign.f32(float, float) nounwind readnone
 
-;CHECK-LABEL: @vec_copysign_f32(
-;CHECK: load <4 x float>
-;CHECK: load <4 x float>
-;CHECK: call <4 x float> @llvm.copysign.v4f32
-;CHECK: store <4 x float>
-;CHECK: ret
 define void @vec_copysign_f32(float* %a, float* %b, float* noalias %c) {
+; CHECK-LABEL: @vec_copysign_f32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float* [[A:%.*]] to <4 x float>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[B:%.*]] to <4 x float>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = call <4 x float> @llvm.copysign.v4f32(<4 x float> [[TMP1]], <4 x float> [[TMP3]])
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast float* [[C:%.*]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP4]], <4 x float>* [[TMP5]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %0 = load float, float* %a, align 4
   %1 = load float, float* %b, align 4
@@ -74,6 +86,18 @@
 declare i32 @llvm.bswap.i32(i32) nounwind readnone
 
 define void @vec_bswap_i32(i32* %a, i32* %b, i32* %c) {
+; CHECK-LABEL: @vec_bswap_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[A:%.*]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32* [[B:%.*]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = add <4 x i32> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> [[TMP4]])
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i32* [[C:%.*]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %i0 = load i32, i32* %a, align 4
   %i1 = load i32, i32* %b, align 4
@@ -110,17 +134,23 @@
   store i32 %call4, i32* %arrayidx10, align 4
   ret void
 
-; CHECK-LABEL: @vec_bswap_i32(
-; CHECK: load <4 x i32>
-; CHECK: load <4 x i32>
-; CHECK: call <4 x i32> @llvm.bswap.v4i32
-; CHECK: store <4 x i32>
-; CHECK: ret
 }
 
 declare i32 @llvm.ctlz.i32(i32,i1) nounwind readnone
 
 define void @vec_ctlz_i32(i32* %a, i32* %b, i32* %c, i1) {
+; CHECK-LABEL: @vec_ctlz_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[A:%.*]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[B:%.*]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = add <4 x i32> [[TMP2]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> [[TMP5]], i1 true)
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i32* [[C:%.*]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* [[TMP7]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %i0 = load i32, i32* %a, align 4
   %i1 = load i32, i32* %b, align 4
@@ -157,15 +187,42 @@
   store i32 %call4, i32* %arrayidx10, align 4
   ret void
 
-; CHECK-LABEL: @vec_ctlz_i32(
-; CHECK: load <4 x i32>
-; CHECK: load <4 x i32>
-; CHECK: call <4 x i32> @llvm.ctlz.v4i32
-; CHECK: store <4 x i32>
-; CHECK: ret
 }
 
 define void @vec_ctlz_i32_neg(i32* %a, i32* %b, i32* %c, i1) {
+; CHECK-LABEL: @vec_ctlz_i32_neg(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[I0:%.*]] = load i32, i32* [[A:%.*]], align 4
+; CHECK-NEXT:    [[I1:%.*]] = load i32, i32* [[B:%.*]], align 4
+; CHECK-NEXT:    [[ADD1:%.*]] = add i32 [[I0]], [[I1]]
+; CHECK-NEXT:    [[CALL1:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[ADD1]], i1 true) #3
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 1
+; CHECK-NEXT:    [[I2:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 1
+; CHECK-NEXT:    [[I3:%.*]] = load i32, i32* [[ARRAYIDX3]], align 4
+; CHECK-NEXT:    [[ADD2:%.*]] = add i32 [[I2]], [[I3]]
+; CHECK-NEXT:    [[CALL2:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[ADD2]], i1 false) #3
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 2
+; CHECK-NEXT:    [[I4:%.*]] = load i32, i32* [[ARRAYIDX4]], align 4
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 2
+; CHECK-NEXT:    [[I5:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4
+; CHECK-NEXT:    [[ADD3:%.*]] = add i32 [[I4]], [[I5]]
+; CHECK-NEXT:    [[CALL3:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[ADD3]], i1 true) #3
+; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 3
+; CHECK-NEXT:    [[I6:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4
+; CHECK-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 3
+; CHECK-NEXT:    [[I7:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4
+; CHECK-NEXT:    [[ADD4:%.*]] = add i32 [[I6]], [[I7]]
+; CHECK-NEXT:    [[CALL4:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[ADD4]], i1 false) #3
+; CHECK-NEXT:    store i32 [[CALL1]], i32* [[C:%.*]], align 4
+; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[C]], i32 1
+; CHECK-NEXT:    store i32 [[CALL2]], i32* [[ARRAYIDX8]], align 4
+; CHECK-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[C]], i32 2
+; CHECK-NEXT:    store i32 [[CALL3]], i32* [[ARRAYIDX9]], align 4
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[C]], i32 3
+; CHECK-NEXT:    store i32 [[CALL4]], i32* [[ARRAYIDX10]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %i0 = load i32, i32* %a, align 4
   %i1 = load i32, i32* %b, align 4
@@ -202,8 +259,6 @@
   store i32 %call4, i32* %arrayidx10, align 4
   ret void
 
-; CHECK-LABEL: @vec_ctlz_i32_neg(
-; CHECK-NOT: call <4 x i32> @llvm.ctlz.v4i32
 
 }
 
@@ -211,6 +266,18 @@
 declare i32 @llvm.cttz.i32(i32,i1) nounwind readnone
 
 define void @vec_cttz_i32(i32* %a, i32* %b, i32* %c, i1) {
+; CHECK-LABEL: @vec_cttz_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[A:%.*]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[B:%.*]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = add <4 x i32> [[TMP2]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> [[TMP5]], i1 true)
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i32* [[C:%.*]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* [[TMP7]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %i0 = load i32, i32* %a, align 4
   %i1 = load i32, i32* %b, align 4
@@ -247,15 +314,42 @@
   store i32 %call4, i32* %arrayidx10, align 4
   ret void
 
-; CHECK-LABEL: @vec_cttz_i32(
-; CHECK: load <4 x i32>
-; CHECK: load <4 x i32>
-; CHECK: call <4 x i32> @llvm.cttz.v4i32
-; CHECK: store <4 x i32>
-; CHECK: ret
 }
 
 define void @vec_cttz_i32_neg(i32* %a, i32* %b, i32* %c, i1) {
+; CHECK-LABEL: @vec_cttz_i32_neg(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[I0:%.*]] = load i32, i32* [[A:%.*]], align 4
+; CHECK-NEXT:    [[I1:%.*]] = load i32, i32* [[B:%.*]], align 4
+; CHECK-NEXT:    [[ADD1:%.*]] = add i32 [[I0]], [[I1]]
+; CHECK-NEXT:    [[CALL1:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[ADD1]], i1 true) #3
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 1
+; CHECK-NEXT:    [[I2:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 1
+; CHECK-NEXT:    [[I3:%.*]] = load i32, i32* [[ARRAYIDX3]], align 4
+; CHECK-NEXT:    [[ADD2:%.*]] = add i32 [[I2]], [[I3]]
+; CHECK-NEXT:    [[CALL2:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[ADD2]], i1 false) #3
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 2
+; CHECK-NEXT:    [[I4:%.*]] = load i32, i32* [[ARRAYIDX4]], align 4
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 2
+; CHECK-NEXT:    [[I5:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4
+; CHECK-NEXT:    [[ADD3:%.*]] = add i32 [[I4]], [[I5]]
+; CHECK-NEXT:    [[CALL3:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[ADD3]], i1 true) #3
+; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 3
+; CHECK-NEXT:    [[I6:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4
+; CHECK-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 3
+; CHECK-NEXT:    [[I7:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4
+; CHECK-NEXT:    [[ADD4:%.*]] = add i32 [[I6]], [[I7]]
+; CHECK-NEXT:    [[CALL4:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[ADD4]], i1 false) #3
+; CHECK-NEXT:    store i32 [[CALL1]], i32* [[C:%.*]], align 4
+; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[C]], i32 1
+; CHECK-NEXT:    store i32 [[CALL2]], i32* [[ARRAYIDX8]], align 4
+; CHECK-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[C]], i32 2
+; CHECK-NEXT:    store i32 [[CALL3]], i32* [[ARRAYIDX9]], align 4
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[C]], i32 3
+; CHECK-NEXT:    store i32 [[CALL4]], i32* [[ARRAYIDX10]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %i0 = load i32, i32* %a, align 4
   %i1 = load i32, i32* %b, align 4
@@ -292,13 +386,23 @@
   store i32 %call4, i32* %arrayidx10, align 4
   ret void
 
-; CHECK-LABEL: @vec_cttz_i32_neg(
-; CHECK-NOT: call <4 x i32> @llvm.cttz.v4i32
 }
 
 
 declare float @llvm.powi.f32(float, i32)
 define void @vec_powi_f32(float* %a, float* %b, float* %c, i32 %P) {
+; CHECK-LABEL: @vec_powi_f32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float* [[A:%.*]] to <4 x float>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[B:%.*]] to <4 x float>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd <4 x float> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = call <4 x float> @llvm.powi.v4f32(<4 x float> [[TMP4]], i32 [[P:%.*]])
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast float* [[C:%.*]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP5]], <4 x float>* [[TMP6]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %i0 = load float, float* %a, align 4
   %i1 = load float, float* %b, align 4
@@ -335,16 +439,43 @@
   store float %call4, float* %arrayidx10, align 4
   ret void
 
-; CHECK-LABEL: @vec_powi_f32(
-; CHECK: load <4 x float>
-; CHECK: load <4 x float>
-; CHECK: call <4 x float> @llvm.powi.v4f32
-; CHECK: store <4 x float>
-; CHECK: ret
 }
 
 
 define void @vec_powi_f32_neg(float* %a, float* %b, float* %c, i32 %P, i32 %Q) {
+; CHECK-LABEL: @vec_powi_f32_neg(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[I0:%.*]] = load float, float* [[A:%.*]], align 4
+; CHECK-NEXT:    [[I1:%.*]] = load float, float* [[B:%.*]], align 4
+; CHECK-NEXT:    [[ADD1:%.*]] = fadd float [[I0]], [[I1]]
+; CHECK-NEXT:    [[CALL1:%.*]] = tail call float @llvm.powi.f32(float [[ADD1]], i32 [[P:%.*]]) #3
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[A]], i32 1
+; CHECK-NEXT:    [[I2:%.*]] = load float, float* [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[B]], i32 1
+; CHECK-NEXT:    [[I3:%.*]] = load float, float* [[ARRAYIDX3]], align 4
+; CHECK-NEXT:    [[ADD2:%.*]] = fadd float [[I2]], [[I3]]
+; CHECK-NEXT:    [[CALL2:%.*]] = tail call float @llvm.powi.f32(float [[ADD2]], i32 [[Q:%.*]]) #3
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[A]], i32 2
+; CHECK-NEXT:    [[I4:%.*]] = load float, float* [[ARRAYIDX4]], align 4
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds float, float* [[B]], i32 2
+; CHECK-NEXT:    [[I5:%.*]] = load float, float* [[ARRAYIDX5]], align 4
+; CHECK-NEXT:    [[ADD3:%.*]] = fadd float [[I4]], [[I5]]
+; CHECK-NEXT:    [[CALL3:%.*]] = tail call float @llvm.powi.f32(float [[ADD3]], i32 [[P]]) #3
+; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[A]], i32 3
+; CHECK-NEXT:    [[I6:%.*]] = load float, float* [[ARRAYIDX6]], align 4
+; CHECK-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[B]], i32 3
+; CHECK-NEXT:    [[I7:%.*]] = load float, float* [[ARRAYIDX7]], align 4
+; CHECK-NEXT:    [[ADD4:%.*]] = fadd float [[I6]], [[I7]]
+; CHECK-NEXT:    [[CALL4:%.*]] = tail call float @llvm.powi.f32(float [[ADD4]], i32 [[Q]]) #3
+; CHECK-NEXT:    store float [[CALL1]], float* [[C:%.*]], align 4
+; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[C]], i32 1
+; CHECK-NEXT:    store float [[CALL2]], float* [[ARRAYIDX8]], align 4
+; CHECK-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[C]], i32 2
+; CHECK-NEXT:    store float [[CALL3]], float* [[ARRAYIDX9]], align 4
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[C]], i32 3
+; CHECK-NEXT:    store float [[CALL4]], float* [[ARRAYIDX10]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %i0 = load float, float* %a, align 4
   %i1 = load float, float* %b, align 4
@@ -381,6 +512,4 @@
   store float %call4, float* %arrayidx10, align 4
   ret void
 
-; CHECK-LABEL: @vec_powi_f32_neg(
-; CHECK-NOT: call <4 x float> @llvm.powi.v4f32
 }
diff --git a/test/Transforms/SLPVectorizer/X86/long_chains.ll b/test/Transforms/SLPVectorizer/X86/long_chains.ll
index f87dabf..99b340a 100644
--- a/test/Transforms/SLPVectorizer/X86/long_chains.ll
+++ b/test/Transforms/SLPVectorizer/X86/long_chains.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -5,13 +6,31 @@
 
 ; At this point we can't vectorize only parts of the tree.
 
-; CHECK: test
-; CHECK: insertelement <2 x i8>
-; CHECK: insertelement <2 x i8>
-; CHECK: sitofp <2 x i8>
-; CHECK: fmul <2 x double>
-; CHECK: ret
 define i32 @test(double* nocapture %A, i8* nocapture %B) {
+; CHECK-LABEL: @test(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[B:%.*]] to <2 x i8>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = add <2 x i8> <i8 3, i8 3>, [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x i8> [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i8> undef, i8 [[TMP3]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i8> [[TMP2]], i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i8> [[TMP4]], i8 [[TMP5]], i32 1
+; CHECK-NEXT:    [[TMP7:%.*]] = sitofp <2 x i8> [[TMP6]] to <2 x double>
+; CHECK-NEXT:    [[TMP8:%.*]] = fmul <2 x double> [[TMP7]], [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = fadd <2 x double> <double 1.000000e+00, double 1.000000e+00>, [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = fmul <2 x double> [[TMP9]], [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = fadd <2 x double> <double 1.000000e+00, double 1.000000e+00>, [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = fmul <2 x double> [[TMP11]], [[TMP11]]
+; CHECK-NEXT:    [[TMP13:%.*]] = fadd <2 x double> <double 1.000000e+00, double 1.000000e+00>, [[TMP12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = fmul <2 x double> [[TMP13]], [[TMP13]]
+; CHECK-NEXT:    [[TMP15:%.*]] = fadd <2 x double> <double 1.000000e+00, double 1.000000e+00>, [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = fmul <2 x double> [[TMP15]], [[TMP15]]
+; CHECK-NEXT:    [[TMP17:%.*]] = fadd <2 x double> <double 1.000000e+00, double 1.000000e+00>, [[TMP16]]
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP17]], <2 x double>* [[TMP18]], align 8
+; CHECK-NEXT:    ret i32 undef
+;
 entry:
   %0 = load i8, i8* %B, align 1
   %arrayidx1 = getelementptr inbounds i8, i8* %B, i64 1
@@ -19,7 +38,7 @@
   %add = add i8 %0, 3
   %add4 = add i8 %1, 3
   %conv6 = sitofp i8 %add to double
-  %conv7 = sitofp i8 %add4 to double 
+  %conv7 = sitofp i8 %add4 to double
   %mul = fmul double %conv6, %conv6
   %add8 = fadd double %mul, 1.000000e+00
   %mul9 = fmul double %conv7, %conv7
diff --git a/test/Transforms/SLPVectorizer/X86/loopinvariant.ll b/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
index dace4b3..1b19aea 100644
--- a/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
+++ b/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
@@ -1,14 +1,51 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -S -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
-;CHECK-LABEL: @foo(
-;CHECK: load <8 x i32>
-;CHECK: add nsw <8 x i32>
-;CHECK: store <8 x i32>
-;CHECK: ret
 define i32 @foo(i32* nocapture %A, i32 %n) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP62:%.*]] = icmp sgt i32 [[N:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP62]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[TMP0:%.*]] = or i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = or i64 [[INDVARS_IV]], 2
+; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = or i64 [[INDVARS_IV]], 3
+; CHECK-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP3:%.*]] = or i64 [[INDVARS_IV]], 4
+; CHECK-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP3]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i64 [[INDVARS_IV]], 5
+; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP4]]
+; CHECK-NEXT:    [[TMP5:%.*]] = or i64 [[INDVARS_IV]], 6
+; CHECK-NEXT:    [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP5]]
+; CHECK-NEXT:    [[TMP6:%.*]] = or i64 [[INDVARS_IV]], 7
+; CHECK-NEXT:    [[ARRAYIDX28:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i32* [[ARRAYIDX]] to <8 x i32>*
+; CHECK-NEXT:    [[TMP8:%.*]] = load <8 x i32>, <8 x i32>* [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <8 x i32> undef, i32 [[N]], i32 0
+; CHECK-NEXT:    [[TMP10:%.*]] = insertelement <8 x i32> [[TMP9]], i32 [[N]], i32 1
+; CHECK-NEXT:    [[TMP11:%.*]] = insertelement <8 x i32> [[TMP10]], i32 [[N]], i32 2
+; CHECK-NEXT:    [[TMP12:%.*]] = insertelement <8 x i32> [[TMP11]], i32 [[N]], i32 3
+; CHECK-NEXT:    [[TMP13:%.*]] = insertelement <8 x i32> [[TMP12]], i32 [[N]], i32 4
+; CHECK-NEXT:    [[TMP14:%.*]] = insertelement <8 x i32> [[TMP13]], i32 [[N]], i32 5
+; CHECK-NEXT:    [[TMP15:%.*]] = insertelement <8 x i32> [[TMP14]], i32 [[N]], i32 6
+; CHECK-NEXT:    [[TMP16:%.*]] = insertelement <8 x i32> [[TMP15]], i32 [[N]], i32 7
+; CHECK-NEXT:    [[TMP17:%.*]] = add nsw <8 x i32> [[TMP16]], [[TMP8]]
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast i32* [[ARRAYIDX]] to <8 x i32>*
+; CHECK-NEXT:    store <8 x i32> [[TMP17]], <8 x i32>* [[TMP18]], align 4
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP19]], [[N]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]]
+; CHECK:       for.end:
+; CHECK-NEXT:    ret i32 undef
+;
 entry:
   %cmp62 = icmp sgt i32 %n, 0
   br i1 %cmp62, label %for.body, label %for.end
diff --git a/test/Transforms/SLPVectorizer/X86/metadata.ll b/test/Transforms/SLPVectorizer/X86/metadata.ll
index ebef6b5..fdfd032 100644
--- a/test/Transforms/SLPVectorizer/X86/metadata.ll
+++ b/test/Transforms/SLPVectorizer/X86/metadata.ll
@@ -1,16 +1,21 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
-;CHECK-LABEL: test1
-;CHECK: load <2 x double>{{.*}}!tbaa ![[TBAA:[0-9]+]]
-;CHECK: load <2 x double>{{.*}}!tbaa ![[TBAA]]
-;CHECK: fmul <2 x double>{{.*}}!fpmath ![[FP1:[0-9]+]]
-;CHECK: store <2 x double>{{.*}}!tbaa ![[TBAA]]
-;CHECK: ret void
-
 define void @test1(double* %a, double* %b, double* %c) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8, !tbaa !0
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8, !tbaa !0
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> [[TMP1]], [[TMP3]], !fpmath !4
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[C:%.*]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8, !tbaa !0
+; CHECK-NEXT:    ret void
+;
 entry:
   %i0 = load double, double* %a, align 8, !tbaa !4
   %i1 = load double, double* %b, align 8, !tbaa !4
@@ -26,14 +31,19 @@
   ret void
 }
 
-;CHECK-LABEL: test2
-;CHECK: load <2 x double>{{.*}}!tbaa ![[TBAA]]
-;CHECK: load <2 x double>{{.*}}!tbaa ![[TBAA]]
-;CHECK: fmul <2 x double>{{.*}}!fpmath ![[FP2:[0-9]+]]
-;CHECK: store <2 x double>{{.*}}!tbaa ![[TBAA]]
-;CHECK: ret void
-
 define void @test2(double* %a, double* %b, i8* %e) {
+; CHECK-LABEL: @test2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8, !tbaa !0
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8, !tbaa !0
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> [[TMP1]], [[TMP3]], !fpmath !5
+; CHECK-NEXT:    [[C:%.*]] = bitcast i8* [[E:%.*]] to double*
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[C]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8, !tbaa !0
+; CHECK-NEXT:    ret void
+;
 entry:
   %i0 = load double, double* %a, align 8, !tbaa !4
   %i1 = load double, double* %b, align 8, !tbaa !4
@@ -51,9 +61,9 @@
   ret void
 }
 
-;CHECK-DAG: ![[TBAA]] = !{[[TYPEC:!.*]], [[TYPEC]], i64 0}
-;CHECK-DAG: ![[FP1]] = !{float 5.000000e+00}
-;CHECK-DAG: ![[FP2]] = !{float 2.500000e+00}
+;CHECK-DAG: !0 = !{[[TYPEC:!.*]], [[TYPEC]], i64 0}
+;CHECK-DAG: !4 = !{float 5.000000e+00}
+;CHECK-DAG: !5 = !{float 2.500000e+00}
 !0 = !{ float 5.0 }
 !1 = !{ float 2.5 }
 !2 = !{!"Simple C/C++ TBAA"}
diff --git a/test/Transforms/SLPVectorizer/X86/multi_block.ll b/test/Transforms/SLPVectorizer/X86/multi_block.ll
index b381d06..d021610 100644
--- a/test/Transforms/SLPVectorizer/X86/multi_block.ll
+++ b/test/Transforms/SLPVectorizer/X86/multi_block.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -16,15 +17,23 @@
 ; }
 
 
-;CHECK-LABEL: @bar(
-;CHECK: load <2 x double>
-;CHECK: fptrunc <2 x double>
-;CHECK: call i32
-;CHECK: fadd <2 x float>
-;CHECK: fpext <2 x float>
-;CHECK: store <2 x double>
-;CHECK: ret
 define i32 @bar(double* nocapture %A, i32 %d) {
+; CHECK-LABEL: @bar(
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = fptrunc <2 x double> [[TMP2]] to <2 x float>
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp eq i32 [[D:%.*]], 0
+; CHECK-NEXT:    br i1 [[TMP4]], label [[TMP7:%.*]], label [[TMP5:%.*]]
+; CHECK:         [[TMP6:%.*]] = tail call i32 (...) @foo()
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:         [[TMP8:%.*]] = fadd <2 x float> <float 4.000000e+00, float 5.000000e+00>, [[TMP3]]
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds double, double* [[A]], i64 8
+; CHECK-NEXT:    [[TMP10:%.*]] = fpext <2 x float> [[TMP8]] to <2 x double>
+; CHECK-NEXT:    [[TMP11:%.*]] = fadd <2 x double> <double 9.000000e+00, double 5.000000e+00>, [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = bitcast double* [[TMP9]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP11]], <2 x double>* [[TMP12]], align 8
+; CHECK-NEXT:    ret i32 undef
+;
   %1 = load double, double* %A, align 8
   %2 = getelementptr inbounds double, double* %A, i64 1
   %3 = load double, double* %2, align 8
diff --git a/test/Transforms/SLPVectorizer/X86/multi_user.ll b/test/Transforms/SLPVectorizer/X86/multi_user.ll
index 3197f6d..ce8594e 100644
--- a/test/Transforms/SLPVectorizer/X86/multi_user.ll
+++ b/test/Transforms/SLPVectorizer/X86/multi_user.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -11,13 +12,26 @@
 ;  A[4] += n * 5 + 11;
 ;}
 
-;CHECK-LABEL: @foo(
-;CHECK: insertelement <4 x i32>
-;CHECK: load <4 x i32>
-;CHECK: add nsw <4 x i32>
-;CHECK: store <4 x i32>
-;CHECK: ret
 define i32 @foo(i32* nocapture %A, i32 %n) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:    [[TMP1:%.*]] = mul nsw i32 [[N:%.*]], 5
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[TMP1]], i32 2
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[TMP1]], i32 3
+; CHECK-NEXT:    [[TMP6:%.*]] = add nsw <4 x i32> <i32 7, i32 8, i32 9, i32 10>, [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i32* [[A:%.*]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = add nsw <4 x i32> [[TMP6]], [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i32* [[A]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP9]], <4 x i32>* [[TMP10]], align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = add nsw i32 [[TMP1]], 11
+; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 4
+; CHECK-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = add nsw i32 [[TMP11]], [[TMP13]]
+; CHECK-NEXT:    store i32 [[TMP14]], i32* [[TMP12]], align 4
+; CHECK-NEXT:    ret i32 undef
+;
   %1 = mul nsw i32 %n, 5
   %2 = add nsw i32 %1, 7
   %3 = load i32, i32* %A, align 4
diff --git a/test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll b/test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll
deleted file mode 100644
index b250735..0000000
--- a/test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: opt < %s -slp-vectorizer -o - -S -slp-threshold=-1000
-
-target datalayout = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64"
-target triple = "nvptx--nvidiacl"
-
-; CTLZ cannot be vectorized currently because the second argument is a scalar
-; for both the scalar and vector forms of the intrinsic. In the future it
-; should be possible to vectorize such functions.
-; Test causes an assert if LLVM tries to vectorize CTLZ.
-
-define <2 x i8> @cltz_test(<2 x i8> %x) #0 {
-entry:
-  %0 = extractelement <2 x i8> %x, i32 0
-  %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
-  %vecinit = insertelement <2 x i8> undef, i8 %call.i, i32 0
-  %1 = extractelement <2 x i8> %x, i32 1
-  %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
-  %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
-  ret <2 x i8> %vecinit2
-}
-
-define <2 x i8> @cltz_test2(<2 x i8> %x) #1 {
-entry:
-  %0 = extractelement <2 x i8> %x, i32 0
-  %1 = extractelement <2 x i8> %x, i32 1
-  %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
-  %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
-  %vecinit = insertelement <2 x i8> undef, i8 %call.i, i32 0
-  %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
-  ret <2 x i8> %vecinit2
-}
-
-declare i8 @llvm.ctlz.i8(i8, i1) #3
-
-attributes #0 = { alwaysinline nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
diff --git a/test/Transforms/SLPVectorizer/X86/operandorder.ll b/test/Transforms/SLPVectorizer/X86/operandorder.ll
index dab7f296..2354ebd 100644
--- a/test/Transforms/SLPVectorizer/X86/operandorder.ll
+++ b/test/Transforms/SLPVectorizer/X86/operandorder.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -slp-threshold=-100 -instcombine -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
@@ -7,12 +8,18 @@
 ; Make sure we order the operands of commutative operations so that we get
 ; bigger vectorizable trees.
 
-; CHECK-LABEL: shuffle_operands1
-; CHECK:         load <2 x double>
-; CHECK:         fadd <2 x double>
-
 define void @shuffle_operands1(double * noalias %from, double * noalias %to,
-                               double %v1, double %v2) {
+; CHECK-LABEL: @shuffle_operands1(
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x double> undef, double [[V1:%.*]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x double> [[TMP3]], double [[V2:%.*]], i32 1
+; CHECK-NEXT:    [[TMP5:%.*]] = fadd <2 x double> [[TMP4]], [[TMP2]]
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 4
+; CHECK-NEXT:    ret void
+;
+  double %v1, double %v2) {
   %from_1 = getelementptr double, double *%from, i64 1
   %v0_1 = load double , double * %from
   %v0_2 = load double , double * %from_1
@@ -24,12 +31,28 @@
   ret void
 }
 
-; CHECK-LABEL: shuffle_preserve_broadcast
-; CHECK: %[[BCAST:[a-z0-9]+]] = insertelement <2 x double> undef, double %v0_1
-; CHECK:                      = shufflevector <2 x double> %[[BCAST]], <2 x double> undef, <2 x i32> zeroinitializer
 define void @shuffle_preserve_broadcast(double * noalias %from,
-                                        double * noalias %to,
-                                        double %v1, double %v2) {
+; CHECK-LABEL: @shuffle_preserve_broadcast(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[LP:%.*]]
+; CHECK:       lp:
+; CHECK-NEXT:    [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[FROM_1:%.*]] = getelementptr double, double* [[FROM:%.*]], i32 1
+; CHECK-NEXT:    [[V0_1:%.*]] = load double, double* [[FROM]], align 4
+; CHECK-NEXT:    [[V0_2:%.*]] = load double, double* [[FROM_1]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> undef, double [[V0_1]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> undef, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x double> undef, double [[P]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x double> [[TMP2]], double [[V0_2]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 4
+; CHECK-NEXT:    br i1 undef, label [[LP]], label [[EXT:%.*]]
+; CHECK:       ext:
+; CHECK-NEXT:    ret void
+;
+  double * noalias %to,
+  double %v1, double %v2) {
 entry:
 br label %lp
 
@@ -49,12 +72,28 @@
   ret void
 }
 
-; CHECK-LABEL: shuffle_preserve_broadcast2
-; CHECK: %[[BCAST:[a-z0-9]+]] = insertelement <2 x double> undef, double %v0_1
-; CHECK:                      = shufflevector <2 x double> %[[BCAST]], <2 x double> undef, <2 x i32> zeroinitializer
 define void @shuffle_preserve_broadcast2(double * noalias %from,
-                                        double * noalias %to,
-                                        double %v1, double %v2) {
+; CHECK-LABEL: @shuffle_preserve_broadcast2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[LP:%.*]]
+; CHECK:       lp:
+; CHECK-NEXT:    [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[FROM_1:%.*]] = getelementptr double, double* [[FROM:%.*]], i32 1
+; CHECK-NEXT:    [[V0_1:%.*]] = load double, double* [[FROM]], align 4
+; CHECK-NEXT:    [[V0_2:%.*]] = load double, double* [[FROM_1]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> undef, double [[P]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[V0_2]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x double> undef, double [[V0_1]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> undef, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 4
+; CHECK-NEXT:    br i1 undef, label [[LP]], label [[EXT:%.*]]
+; CHECK:       ext:
+; CHECK-NEXT:    ret void
+;
+  double * noalias %to,
+  double %v1, double %v2) {
 entry:
 br label %lp
 
@@ -74,12 +113,28 @@
   ret void
 }
 
-; CHECK-LABEL: shuffle_preserve_broadcast3
-; CHECK: %[[BCAST:[a-z0-9]+]] = insertelement <2 x double> undef, double %v0_1
-; CHECK:                      = shufflevector <2 x double> %[[BCAST]], <2 x double> undef, <2 x i32> zeroinitializer
 define void @shuffle_preserve_broadcast3(double * noalias %from,
-                                        double * noalias %to,
-                                        double %v1, double %v2) {
+; CHECK-LABEL: @shuffle_preserve_broadcast3(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[LP:%.*]]
+; CHECK:       lp:
+; CHECK-NEXT:    [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[FROM_1:%.*]] = getelementptr double, double* [[FROM:%.*]], i32 1
+; CHECK-NEXT:    [[V0_1:%.*]] = load double, double* [[FROM]], align 4
+; CHECK-NEXT:    [[V0_2:%.*]] = load double, double* [[FROM_1]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> undef, double [[P]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[V0_2]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x double> undef, double [[V0_1]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> undef, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 4
+; CHECK-NEXT:    br i1 undef, label [[LP]], label [[EXT:%.*]]
+; CHECK:       ext:
+; CHECK-NEXT:    ret void
+;
+  double * noalias %to,
+  double %v1, double %v2) {
 entry:
 br label %lp
 
@@ -100,12 +155,28 @@
 }
 
 
-; CHECK-LABEL: shuffle_preserve_broadcast4
-; CHECK: %[[BCAST:[a-z0-9]+]] = insertelement <2 x double> undef, double %v0_1
-; CHECK:                      = shufflevector <2 x double> %[[BCAST]], <2 x double> undef, <2 x i32> zeroinitializer
 define void @shuffle_preserve_broadcast4(double * noalias %from,
-                                        double * noalias %to,
-                                        double %v1, double %v2) {
+; CHECK-LABEL: @shuffle_preserve_broadcast4(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[LP:%.*]]
+; CHECK:       lp:
+; CHECK-NEXT:    [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[FROM_1:%.*]] = getelementptr double, double* [[FROM:%.*]], i32 1
+; CHECK-NEXT:    [[V0_1:%.*]] = load double, double* [[FROM]], align 4
+; CHECK-NEXT:    [[V0_2:%.*]] = load double, double* [[FROM_1]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> undef, double [[V0_2]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[P]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x double> undef, double [[V0_1]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> undef, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 4
+; CHECK-NEXT:    br i1 undef, label [[LP]], label [[EXT:%.*]]
+; CHECK:       ext:
+; CHECK-NEXT:    ret void
+;
+  double * noalias %to,
+  double %v1, double %v2) {
 entry:
 br label %lp
 
@@ -125,12 +196,28 @@
   ret void
 }
 
-; CHECK-LABEL: shuffle_preserve_broadcast5
-; CHECK: %[[BCAST:[a-z0-9]+]] = insertelement <2 x double> undef, double %v0_1
-; CHECK:                      = shufflevector <2 x double> %[[BCAST]], <2 x double> undef, <2 x i32> zeroinitializer
 define void @shuffle_preserve_broadcast5(double * noalias %from,
-                                        double * noalias %to,
-                                        double %v1, double %v2) {
+; CHECK-LABEL: @shuffle_preserve_broadcast5(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[LP:%.*]]
+; CHECK:       lp:
+; CHECK-NEXT:    [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[FROM_1:%.*]] = getelementptr double, double* [[FROM:%.*]], i32 1
+; CHECK-NEXT:    [[V0_1:%.*]] = load double, double* [[FROM]], align 4
+; CHECK-NEXT:    [[V0_2:%.*]] = load double, double* [[FROM_1]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> undef, double [[V0_1]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> undef, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x double> undef, double [[V0_2]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x double> [[TMP2]], double [[P]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 4
+; CHECK-NEXT:    br i1 undef, label [[LP]], label [[EXT:%.*]]
+; CHECK:       ext:
+; CHECK-NEXT:    ret void
+;
+  double * noalias %to,
+  double %v1, double %v2) {
 entry:
 br label %lp
 
@@ -151,12 +238,28 @@
 }
 
 
-; CHECK-LABEL: shuffle_preserve_broadcast6
-; CHECK: %[[BCAST:[a-z0-9]+]] = insertelement <2 x double> undef, double %v0_1
-; CHECK:                      = shufflevector <2 x double> %[[BCAST]], <2 x double> undef, <2 x i32> zeroinitializer
 define void @shuffle_preserve_broadcast6(double * noalias %from,
-                                        double * noalias %to,
-                                        double %v1, double %v2) {
+; CHECK-LABEL: @shuffle_preserve_broadcast6(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[LP:%.*]]
+; CHECK:       lp:
+; CHECK-NEXT:    [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[FROM_1:%.*]] = getelementptr double, double* [[FROM:%.*]], i32 1
+; CHECK-NEXT:    [[V0_1:%.*]] = load double, double* [[FROM]], align 4
+; CHECK-NEXT:    [[V0_2:%.*]] = load double, double* [[FROM_1]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> undef, double [[V0_1]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> undef, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x double> undef, double [[V0_2]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x double> [[TMP2]], double [[P]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 4
+; CHECK-NEXT:    br i1 undef, label [[LP]], label [[EXT:%.*]]
+; CHECK:       ext:
+; CHECK-NEXT:    ret void
+;
+  double * noalias %to,
+  double %v1, double %v2) {
 entry:
 br label %lp
 
@@ -179,16 +282,46 @@
 ; Make sure we don't scramble operands when we reorder them and destroy
 ; 'good' source order.
 
-; CHECK-LABEL: good_load_order
-
-; CHECK: %[[V1:[0-9]+]] = load <4 x float>, <4 x float>*
-; CHECK: %[[V2:[0-9]+]] = insertelement <4 x float> undef, float %1, i32 0
-; CHECK: %[[V3:[0-9]+]] = shufflevector <4 x float> %[[V2]], <4 x float> %[[V1]], <4 x i32> <i32 0, i32 4, i32 5, i32 6>
-; CHECK:                = fmul <4 x float> %[[V1]], %[[V3]]
-
 @a = common global [32000 x float] zeroinitializer, align 16
 
 define void @good_load_order() {
+; CHECK-LABEL: @good_load_order(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[FOR_COND1_PREHEADER:%.*]]
+; CHECK:       for.cond1.preheader:
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i32 0, i32 0), align 16
+; CHECK-NEXT:    br label [[FOR_BODY3:%.*]]
+; CHECK:       for.body3:
+; CHECK-NEXT:    [[TMP1:%.*]] = phi float [ [[TMP0]], [[FOR_COND1_PREHEADER]] ], [ [[TMP14:%.*]], [[FOR_BODY3]] ]
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_COND1_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY3]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[TMP2]], 1
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [32000 x float], [32000 x float]* @a, i32 0, i32 [[TMP3]]
+; CHECK-NEXT:    [[TMP4:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [32000 x float], [32000 x float]* @a, i32 0, i32 [[TMP4]]
+; CHECK-NEXT:    [[TMP5:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; CHECK-NEXT:    [[TMP6:%.*]] = add i32 [[TMP5]], 4
+; CHECK-NEXT:    [[ARRAYIDX31:%.*]] = getelementptr inbounds [32000 x float], [32000 x float]* @a, i32 0, i32 [[TMP6]]
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast float* [[ARRAYIDX]] to <4 x float>*
+; CHECK-NEXT:    [[TMP8:%.*]] = load <4 x float>, <4 x float>* [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <4 x float> undef, float [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <4 x float> [[TMP9]], <4 x float> [[TMP8]], <4 x i32> <i32 0, i32 4, i32 5, i32 6>
+; CHECK-NEXT:    [[TMP11:%.*]] = fmul <4 x float> [[TMP8]], [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = bitcast float* [[ARRAYIDX5]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP11]], <4 x float>* [[TMP12]], align 4
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 5
+; CHECK-NEXT:    [[TMP13:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; CHECK-NEXT:    [[ARRAYIDX41:%.*]] = getelementptr inbounds [32000 x float], [32000 x float]* @a, i32 0, i32 [[TMP13]]
+; CHECK-NEXT:    [[TMP14]] = load float, float* [[ARRAYIDX41]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = extractelement <4 x float> [[TMP8]], i32 3
+; CHECK-NEXT:    [[MUL45:%.*]] = fmul float [[TMP14]], [[TMP15]]
+; CHECK-NEXT:    store float [[MUL45]], float* [[ARRAYIDX31]], align 4
+; CHECK-NEXT:    [[TMP16:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP16]], 31995
+; CHECK-NEXT:    br i1 [[CMP2]], label [[FOR_BODY3]], label [[FOR_END:%.*]]
+; CHECK:       for.end:
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %for.cond1.preheader
 
@@ -237,10 +370,17 @@
 ;  c[0] = a[0]+b[0];
 ;  c[1] = b[1]+a[1]; // swapped b[1] and a[1]
 
-; CHECK-LABEL: load_reorder_double
-; CHECK: load <2 x double>, <2 x double>*
-; CHECK: fadd <2 x double>
 define void @load_reorder_double(double* nocapture %c, double* noalias nocapture readonly %a, double* noalias nocapture readonly %b){
+; CHECK-LABEL: @load_reorder_double(
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
+; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x double>, <2 x double>* [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = fadd <2 x double> [[TMP4]], [[TMP2]]
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast double* [[C:%.*]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 4
+; CHECK-NEXT:    ret void
+;
   %1 = load double, double* %a
   %2 = load double, double* %b
   %3 = fadd double %1, %2
@@ -261,10 +401,17 @@
 ;  c[2] = a[2]+b[2];
 ;  c[3] = a[3]+b[3];
 
-; CHECK-LABEL: load_reorder_float
-; CHECK: load <4 x float>, <4 x float>*
-; CHECK: fadd <4 x float>
 define void @load_reorder_float(float* nocapture %c, float* noalias nocapture readonly %a, float* noalias nocapture readonly %b){
+; CHECK-LABEL: @load_reorder_float(
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[A:%.*]] to <4 x float>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float* [[B:%.*]] to <4 x float>*
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = fadd <4 x float> [[TMP2]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast float* [[C:%.*]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP5]], <4 x float>* [[TMP6]], align 4
+; CHECK-NEXT:    ret void
+;
   %1 = load float, float* %a
   %2 = load float, float* %b
   %3 = fadd float %1, %2
@@ -299,11 +446,21 @@
 ; a[2] = (b[2]+c[2])+d[2];
 ; a[3] = (b[3]+c[3])+d[3];
 
-; CHECK-LABEL: opcode_reorder
-; CHECK: load <4 x float>, <4 x float>*
-; CHECK: fadd <4 x float>
-define void @opcode_reorder(float* noalias nocapture %a, float* noalias nocapture readonly %b, 
-                            float* noalias nocapture readonly %c,float* noalias nocapture readonly %d){
+define void @opcode_reorder(float* noalias nocapture %a, float* noalias nocapture readonly %b,
+; CHECK-LABEL: @opcode_reorder(
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[B:%.*]] to <4 x float>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float* [[C:%.*]] to <4 x float>*
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = fadd <4 x float> [[TMP2]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast float* [[D:%.*]] to <4 x float>*
+; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x float>, <4 x float>* [[TMP6]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = fadd <4 x float> [[TMP5]], [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = bitcast float* [[A:%.*]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP8]], <4 x float>* [[TMP9]], align 4
+; CHECK-NEXT:    ret void
+;
+  float* noalias nocapture readonly %c,float* noalias nocapture readonly %d){
   %1 = load float, float* %b
   %2 = load float, float* %c
   %3 = fadd float %1, %2
diff --git a/test/Transforms/SLPVectorizer/X86/opt.ll b/test/Transforms/SLPVectorizer/X86/opt.ll
index 824e999..97aa601 100644
--- a/test/Transforms/SLPVectorizer/X86/opt.ll
+++ b/test/Transforms/SLPVectorizer/X86/opt.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -O3 -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s --check-prefix=SLP
 ; RUN: opt < %s -O3 -disable-slp-vectorization -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s --check-prefix=NOSLP
 
@@ -6,14 +7,33 @@
 
 ; Make sure we can disable slp vectorization in opt.
 
-; SLP-LABEL: test1
-; SLP: store <2 x double>
-
-; NOSLP-LABEL: test1
-; NOSLP-NOT: store <2 x double>
-
-
 define void @test1(double* %a, double* %b, double* %c) {
+; SLP-LABEL: @test1(
+; SLP-NEXT:  entry:
+; SLP-NEXT:    [[TMP0:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
+; SLP-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
+; SLP-NEXT:    [[TMP2:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
+; SLP-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
+; SLP-NEXT:    [[TMP4:%.*]] = fmul <2 x double> [[TMP1]], [[TMP3]]
+; SLP-NEXT:    [[TMP5:%.*]] = bitcast double* [[C:%.*]] to <2 x double>*
+; SLP-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
+; SLP-NEXT:    ret void
+;
+; NOSLP-LABEL: @test1(
+; NOSLP-NEXT:  entry:
+; NOSLP-NEXT:    [[I0:%.*]] = load double, double* [[A:%.*]], align 8
+; NOSLP-NEXT:    [[I1:%.*]] = load double, double* [[B:%.*]], align 8
+; NOSLP-NEXT:    [[MUL:%.*]] = fmul double [[I0]], [[I1]]
+; NOSLP-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A]], i64 1
+; NOSLP-NEXT:    [[I3:%.*]] = load double, double* [[ARRAYIDX3]], align 8
+; NOSLP-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[B]], i64 1
+; NOSLP-NEXT:    [[I4:%.*]] = load double, double* [[ARRAYIDX4]], align 8
+; NOSLP-NEXT:    [[MUL5:%.*]] = fmul double [[I3]], [[I4]]
+; NOSLP-NEXT:    store double [[MUL]], double* [[C:%.*]], align 8
+; NOSLP-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[C]], i64 1
+; NOSLP-NEXT:    store double [[MUL5]], double* [[ARRAYIDX5]], align 8
+; NOSLP-NEXT:    ret void
+;
 entry:
   %i0 = load double, double* %a, align 8
   %i1 = load double, double* %b, align 8
diff --git a/test/Transforms/SLPVectorizer/X86/ordering.ll b/test/Transforms/SLPVectorizer/X86/ordering.ll
index 11f5a3d..bc6ef18 100644
--- a/test/Transforms/SLPVectorizer/X86/ordering.ll
+++ b/test/Transforms/SLPVectorizer/X86/ordering.ll
@@ -1,9 +1,14 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
 define void @updateModelQPFrame(i32 %m_Bits) {
+; CHECK-LABEL: @updateModelQPFrame(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    ret void
+;
 entry:
   %0 = load double, double* undef, align 8
   %mul = fmul double undef, %0
@@ -22,35 +27,52 @@
 declare i32 @personality_v0(...)
 
 define void @invoketest() personality i8* bitcast (i32 (...)* @personality_v0 to i8*) {
+; CHECK-LABEL: @invoketest(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 undef, label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+; CHECK:       cond.true:
+; CHECK-NEXT:    [[CALL49:%.*]] = invoke double bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to double (i8*, i8*)*)(i8* undef, i8* undef)
+; CHECK-NEXT:    to label [[COND_TRUE54:%.*]] unwind label [[LPAD:%.*]]
+; CHECK:       cond.false:
+; CHECK-NEXT:    [[CALL51:%.*]] = invoke double bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to double (i8*, i8*)*)(i8* undef, i8* undef)
+; CHECK-NEXT:    to label [[COND_FALSE57:%.*]] unwind label [[LPAD]]
+; CHECK:       cond.true54:
+; CHECK-NEXT:    [[CALL56:%.*]] = invoke double bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to double (i8*, i8*)*)(i8* undef, i8* undef)
+; CHECK-NEXT:    to label [[COND_END60:%.*]] unwind label [[LPAD]]
+; CHECK:       cond.false57:
+; CHECK-NEXT:    [[CALL59:%.*]] = invoke double bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to double (i8*, i8*)*)(i8* undef, i8* undef)
+; CHECK-NEXT:    to label [[COND_END60]] unwind label [[LPAD]]
+; CHECK:       cond.end60:
+; CHECK-NEXT:    br i1 undef, label [[IF_END98:%.*]], label [[IF_THEN63:%.*]]
+; CHECK:       if.then63:
+; CHECK-NEXT:    br label [[IF_END98]]
+; CHECK:       lpad:
+; CHECK-NEXT:    [[L:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT:    cleanup
+; CHECK-NEXT:    resume { i8*, i32 } [[L]]
+; CHECK:       if.end98:
+; CHECK-NEXT:    br label [[IF_END99:%.*]]
+; CHECK:       if.end99:
+; CHECK-NEXT:    ret void
+;
 entry:
   br i1 undef, label %cond.true, label %cond.false
 
 cond.true:
-  %call49 = invoke double bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to double (i8*, i8*)*)(i8* undef, i8* undef) 
-          to label %cond.true54 unwind label %lpad
+  %call49 = invoke double bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to double (i8*, i8*)*)(i8* undef, i8* undef)
+  to label %cond.true54 unwind label %lpad
 
 cond.false:
   %call51 = invoke double bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to double (i8*, i8*)*)(i8* undef, i8* undef)
-          to label %cond.false57 unwind label %lpad
+  to label %cond.false57 unwind label %lpad
 
 cond.true54:
-  %call56 = invoke double bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to double (i8*, i8*)*)(i8* undef, i8* undef) 
-          to label %cond.end60 unwind label %lpad
+  %call56 = invoke double bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to double (i8*, i8*)*)(i8* undef, i8* undef)
+  to label %cond.end60 unwind label %lpad
 
 cond.false57:
   %call59 = invoke double bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to double (i8*, i8*)*)(i8* undef, i8* undef)
-          to label %cond.end60 unwind label %lpad
-
-; Make sure we don't vectorize these phis - they have invokes as inputs.
-
-; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
-
-; CHECK-LABEL: invoketest
-
-; CHECK-LABEL: cond.end60
-; CHECK-NOT: phi <2 x double>
-; CHECK: insertelement
-; CHECK-LABEL: if.then63
+  to label %cond.end60 unwind label %lpad
 
 cond.end60:
   %cond126 = phi double [ %call49, %cond.true54 ], [ %call51, %cond.false57 ]
@@ -68,7 +90,7 @@
 
 lpad:
   %l = landingpad { i8*, i32 }
-          cleanup
+  cleanup
   resume { i8*, i32 } %l
 
 if.end98:
diff --git a/test/Transforms/SLPVectorizer/X86/phi.ll b/test/Transforms/SLPVectorizer/X86/phi.ll
index ef94467..a0a13b2 100644
--- a/test/Transforms/SLPVectorizer/X86/phi.ll
+++ b/test/Transforms/SLPVectorizer/X86/phi.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -slp-threshold=-100 -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
@@ -18,12 +19,22 @@
 ;}
 
 
-;CHECK: i32 @foo
-;CHECK: load <2 x double>
-;CHECK: phi <2 x double>
-;CHECK: store <2 x double>
-;CHECK: ret i32 undef
 define i32 @foo(double* nocapture %A, i32 %k) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i32 [[K:%.*]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL]], label [[IF_ELSE:%.*]], label [[IF_END:%.*]]
+; CHECK:       if.else:
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 10
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
+; CHECK-NEXT:    br label [[IF_END]]
+; CHECK:       if.end:
+; CHECK-NEXT:    [[TMP2:%.*]] = phi <2 x double> [ [[TMP1]], [[IF_ELSE]] ], [ <double 3.000000e+00, double 5.000000e+00>, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast double* [[A]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP2]], <2 x double>* [[TMP3]], align 8
+; CHECK-NEXT:    ret i32 undef
+;
 entry:
   %tobool = icmp eq i32 %k, 0
   br i1 %tobool, label %if.else, label %if.end
@@ -61,13 +72,26 @@
 ;  return 0;
 ;}
 
-;CHECK: foo2
-;CHECK: load <2 x double>
-;CHECK: phi <2 x double>
-;CHECK: fmul <2 x double>
-;CHECK: store <2 x double>
-;CHECK: ret
 define i32 @foo2(double* noalias nocapture %B, double* noalias nocapture %A, i32 %n, i32 %m) #0 {
+; CHECK-LABEL: @foo2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[I_019:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = phi <2 x double> [ [[TMP1]], [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP3:%.*]] = fadd <2 x double> <double 1.000000e+01, double 1.000000e+01>, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> <double 4.000000e+00, double 4.000000e+00>, [[TMP3]]
+; CHECK-NEXT:    [[TMP5]] = fadd <2 x double> <double 4.000000e+00, double 4.000000e+00>, [[TMP4]]
+; CHECK-NEXT:    [[INC]] = add nsw i32 [[I_019]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 100
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 8
+; CHECK-NEXT:    ret i32 0
+;
 entry:
   %arrayidx = getelementptr inbounds double, double* %A, i64 1
   %0 = load double, double* %arrayidx, align 8
@@ -113,15 +137,55 @@
 ;   return R+G+B+Y+P;
 ; }
 
-;CHECK: foo3
-;CHECK: phi <4 x float>
-;CHECK: fmul <4 x float>
-;CHECK: fadd <4 x float>
-;CHECK-NOT: phi <5 x float>
-;CHECK-NOT: fmul <5 x float>
-;CHECK-NOT: fadd <5 x float>
-
 define float @foo3(float* nocapture readonly %A) #0 {
+; CHECK-LABEL: @foo3(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[A:%.*]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds float, float* [[A]], i64 1
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[ARRAYIDX1]] to <4 x float>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
+; CHECK-NEXT:    [[REORDER_SHUFFLE:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <4 x float> [[REORDER_SHUFFLE]], i32 3
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[R_052:%.*]] = phi float [ [[TMP0]], [[ENTRY]] ], [ [[ADD6:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP4:%.*]] = phi float [ [[TMP3]], [[ENTRY]] ], [ [[TMP12:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP5:%.*]] = phi float [ [[TMP0]], [[ENTRY]] ], [ [[TMP14:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP6:%.*]] = phi <4 x float> [ [[REORDER_SHUFFLE]], [[ENTRY]] ], [ [[TMP19:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[TMP5]], 7.000000e+00
+; CHECK-NEXT:    [[ADD6]] = fadd float [[R_052]], [[MUL]]
+; CHECK-NEXT:    [[TMP7:%.*]] = add nsw i64 [[INDVARS_IV]], 2
+; CHECK-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP7]]
+; CHECK-NEXT:    [[TMP8:%.*]] = load float, float* [[ARRAYIDX14]], align 4
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 3
+; CHECK-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT:    [[TMP9:%.*]] = bitcast float* [[ARRAYIDX19]] to <2 x float>*
+; CHECK-NEXT:    [[TMP10:%.*]] = load <2 x float>, <2 x float>* [[TMP9]], align 4
+; CHECK-NEXT:    [[REORDER_SHUFFLE1:%.*]] = shufflevector <2 x float> [[TMP10]], <2 x float> undef, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT:    [[TMP11:%.*]] = insertelement <4 x float> <float 1.100000e+01, float 1.000000e+01, float 9.000000e+00, float undef>, float [[TMP4]], i32 3
+; CHECK-NEXT:    [[TMP12]] = extractelement <2 x float> [[REORDER_SHUFFLE1]], i32 0
+; CHECK-NEXT:    [[TMP13:%.*]] = insertelement <4 x float> undef, float [[TMP12]], i32 0
+; CHECK-NEXT:    [[TMP14]] = extractelement <2 x float> [[REORDER_SHUFFLE1]], i32 1
+; CHECK-NEXT:    [[TMP15:%.*]] = insertelement <4 x float> [[TMP13]], float [[TMP14]], i32 1
+; CHECK-NEXT:    [[TMP16:%.*]] = insertelement <4 x float> [[TMP15]], float [[TMP8]], i32 2
+; CHECK-NEXT:    [[TMP17:%.*]] = insertelement <4 x float> [[TMP16]], float 8.000000e+00, i32 3
+; CHECK-NEXT:    [[TMP18:%.*]] = fmul <4 x float> [[TMP11]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19]] = fadd <4 x float> [[TMP6]], [[TMP18]]
+; CHECK-NEXT:    [[TMP20:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP20]], 121
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[TMP21:%.*]] = extractelement <4 x float> [[TMP19]], i32 3
+; CHECK-NEXT:    [[ADD28:%.*]] = fadd float [[ADD6]], [[TMP21]]
+; CHECK-NEXT:    [[TMP22:%.*]] = extractelement <4 x float> [[TMP19]], i32 2
+; CHECK-NEXT:    [[ADD29:%.*]] = fadd float [[ADD28]], [[TMP22]]
+; CHECK-NEXT:    [[TMP23:%.*]] = extractelement <4 x float> [[TMP19]], i32 1
+; CHECK-NEXT:    [[ADD30:%.*]] = fadd float [[ADD29]], [[TMP23]]
+; CHECK-NEXT:    [[TMP24:%.*]] = extractelement <4 x float> [[TMP19]], i32 0
+; CHECK-NEXT:    [[ADD31:%.*]] = fadd float [[ADD30]], [[TMP24]]
+; CHECK-NEXT:    ret float [[ADD31]]
+;
 entry:
   %0 = load float, float* %A, align 4
   %arrayidx1 = getelementptr inbounds float, float* %A, i64 1
@@ -176,11 +240,35 @@
 
 ; Make sure the order of phi nodes of different types does not prevent
 ; vectorization of same typed phi nodes.
-; CHECK-LABEL: sort_phi_type
-; CHECK: phi <4 x float>
-; CHECK: fmul <4 x float>
-
 define float @sort_phi_type(float* nocapture readonly %A) {
+; CHECK-LABEL: @sort_phi_type(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = phi <4 x float> [ <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>, [[ENTRY]] ], [ [[TMP9:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x float> [[TMP0]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x float> undef, float [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x float> [[TMP2]], float [[TMP3]], i32 1
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <4 x float> [[TMP4]], float [[TMP5]], i32 2
+; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
+; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <4 x float> [[TMP6]], float [[TMP7]], i32 3
+; CHECK-NEXT:    [[TMP9]] = fmul <4 x float> <float 8.000000e+00, float 9.000000e+00, float 1.000000e+02, float 1.110000e+02>, [[TMP8]]
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i64 [[INDVARS_IV_NEXT]], 128
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[TMP10:%.*]] = extractelement <4 x float> [[TMP9]], i32 0
+; CHECK-NEXT:    [[TMP11:%.*]] = extractelement <4 x float> [[TMP9]], i32 1
+; CHECK-NEXT:    [[ADD29:%.*]] = fadd float [[TMP10]], [[TMP11]]
+; CHECK-NEXT:    [[TMP12:%.*]] = extractelement <4 x float> [[TMP9]], i32 2
+; CHECK-NEXT:    [[ADD30:%.*]] = fadd float [[ADD29]], [[TMP12]]
+; CHECK-NEXT:    [[TMP13:%.*]] = extractelement <4 x float> [[TMP9]], i32 3
+; CHECK-NEXT:    [[ADD31:%.*]] = fadd float [[ADD30]], [[TMP13]]
+; CHECK-NEXT:    ret float [[ADD31]]
+;
 entry:
   br label %for.body
 
@@ -208,20 +296,33 @@
 
 define void @test(x86_fp80* %i1, x86_fp80* %i2, x86_fp80* %o) {
 ; CHECK-LABEL: @test(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[I1_0:%.*]] = load x86_fp80, x86_fp80* [[I1:%.*]], align 16
+; CHECK-NEXT:    [[I1_GEP1:%.*]] = getelementptr x86_fp80, x86_fp80* [[I1]], i64 1
+; CHECK-NEXT:    [[I1_1:%.*]] = load x86_fp80, x86_fp80* [[I1_GEP1]], align 16
+; CHECK-NEXT:    br i1 undef, label [[THEN:%.*]], label [[END:%.*]]
+; CHECK:       then:
+; CHECK-NEXT:    [[I2_GEP0:%.*]] = getelementptr inbounds x86_fp80, x86_fp80* [[I2:%.*]], i64 0
+; CHECK-NEXT:    [[I2_0:%.*]] = load x86_fp80, x86_fp80* [[I2_GEP0]], align 16
+; CHECK-NEXT:    [[I2_GEP1:%.*]] = getelementptr inbounds x86_fp80, x86_fp80* [[I2]], i64 1
+; CHECK-NEXT:    [[I2_1:%.*]] = load x86_fp80, x86_fp80* [[I2_GEP1]], align 16
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
+; CHECK-NEXT:    [[PHI0:%.*]] = phi x86_fp80 [ [[I1_0]], [[ENTRY:%.*]] ], [ [[I2_0]], [[THEN]] ]
+; CHECK-NEXT:    [[PHI1:%.*]] = phi x86_fp80 [ [[I1_1]], [[ENTRY]] ], [ [[I2_1]], [[THEN]] ]
+; CHECK-NEXT:    store x86_fp80 [[PHI0]], x86_fp80* [[O:%.*]], align 16
+; CHECK-NEXT:    [[O_GEP1:%.*]] = getelementptr inbounds x86_fp80, x86_fp80* [[O]], i64 1
+; CHECK-NEXT:    store x86_fp80 [[PHI1]], x86_fp80* [[O_GEP1]], align 16
+; CHECK-NEXT:    ret void
 ;
 ; Test that we correctly recognize the discontiguous memory in arrays where the
 ; size is less than the alignment, and through various different GEP formations.
-;
-; We disable the vectorization of x86_fp80 for now. 
+; We disable the vectorization of x86_fp80 for now.
 
 entry:
   %i1.0 = load x86_fp80, x86_fp80* %i1, align 16
   %i1.gep1 = getelementptr x86_fp80, x86_fp80* %i1, i64 1
   %i1.1 = load x86_fp80, x86_fp80* %i1.gep1, align 16
-; CHECK: load x86_fp80, x86_fp80*
-; CHECK: load x86_fp80, x86_fp80*
-; CHECK-NOT: insertelement <2 x x86_fp80>
-; CHECK-NOT: insertelement <2 x x86_fp80>
   br i1 undef, label %then, label %end
 
 then:
@@ -229,18 +330,11 @@
   %i2.0 = load x86_fp80, x86_fp80* %i2.gep0, align 16
   %i2.gep1 = getelementptr inbounds x86_fp80, x86_fp80* %i2, i64 1
   %i2.1 = load x86_fp80, x86_fp80* %i2.gep1, align 16
-; CHECK: load x86_fp80, x86_fp80*
-; CHECK: load x86_fp80, x86_fp80*
-; CHECK-NOT: insertelement <2 x x86_fp80>
-; CHECK-NOT: insertelement <2 x x86_fp80>
   br label %end
 
 end:
   %phi0 = phi x86_fp80 [ %i1.0, %entry ], [ %i2.0, %then ]
   %phi1 = phi x86_fp80 [ %i1.1, %entry ], [ %i2.1, %then ]
-; CHECK-NOT: phi <2 x x86_fp80>
-; CHECK-NOT: extractelement <2 x x86_fp80>
-; CHECK-NOT: extractelement <2 x x86_fp80>
   store x86_fp80 %phi0, x86_fp80* %o, align 16
   %o.gep1 = getelementptr inbounds x86_fp80, x86_fp80* %o, i64 1
   store x86_fp80 %phi1, x86_fp80* %o.gep1, align 16
diff --git a/test/Transforms/SLPVectorizer/X86/phi3.ll b/test/Transforms/SLPVectorizer/X86/phi3.ll
index 6162830..b08d9ab 100644
--- a/test/Transforms/SLPVectorizer/X86/phi3.ll
+++ b/test/Transforms/SLPVectorizer/X86/phi3.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
@@ -10,6 +11,24 @@
 declare %struct.GPar.0.16.26* @Rf_gpptr(...)
 
 define void @Rf_GReset() {
+; CHECK-LABEL: @Rf_GReset(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load double, double* @d, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x double> undef, double [[TMP0]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[TMP1]]
+; CHECK-NEXT:    br i1 icmp eq (%struct.GPar.0.16.26* (...)* inttoptr (i64 115 to %struct.GPar.0.16.26* (...)*), %struct.GPar.0.16.26* (...)* @Rf_gpptr), label [[IF_THEN:%.*]], label [[IF_END7:%.*]]
+; CHECK:       if.then:
+; CHECK-NEXT:    [[TMP3:%.*]] = fsub <2 x double> [[TMP2]], undef
+; CHECK-NEXT:    [[TMP4:%.*]] = fdiv <2 x double> [[TMP3]], undef
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x double> [[TMP4]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x double> [[TMP4]], i32 1
+; CHECK-NEXT:    [[CMP:%.*]] = fcmp ogt double [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF_THEN6:%.*]], label [[IF_END7]]
+; CHECK:       if.then6:
+; CHECK-NEXT:    br label [[IF_END7]]
+; CHECK:       if.end7:
+; CHECK-NEXT:    ret void
+;
 entry:
   %sub = fsub double -0.000000e+00, undef
   %0 = load double, double* @d, align 8
diff --git a/test/Transforms/SLPVectorizer/X86/phi_landingpad.ll b/test/Transforms/SLPVectorizer/X86/phi_landingpad.ll
index b47a6ce..0a75288 100644
--- a/test/Transforms/SLPVectorizer/X86/phi_landingpad.ll
+++ b/test/Transforms/SLPVectorizer/X86/phi_landingpad.ll
@@ -1,18 +1,35 @@
-; RUN: opt < %s -slp-vectorizer -mtriple=x86_64-apple-macosx10.9.0 -disable-output
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -slp-vectorizer -mtriple=x86_64-apple-macosx10.9.0 -S -o - | FileCheck %s
 
 target datalayout = "f64:64:64-v64:64:64"
 
 define void @test_phi_in_landingpad() personality i8*
-          bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+; CHECK-LABEL: @test_phi_in_landingpad(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    invoke void @foo()
+; CHECK-NEXT:    to label [[INNER:%.*]] unwind label [[LPAD:%.*]]
+; CHECK:       inner:
+; CHECK-NEXT:    invoke void @foo()
+; CHECK-NEXT:    to label [[DONE:%.*]] unwind label [[LPAD]]
+; CHECK:       lpad:
+; CHECK-NEXT:    [[TMP0:%.*]] = phi <2 x double> [ undef, [[ENTRY:%.*]] ], [ undef, [[INNER]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT:    catch i8* null
+; CHECK-NEXT:    br label [[DONE]]
+; CHECK:       done:
+; CHECK-NEXT:    [[TMP2:%.*]] = phi <2 x double> [ undef, [[INNER]] ], [ [[TMP0]], [[LPAD]] ]
+; CHECK-NEXT:    ret void
+;
+  bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
 entry:
   invoke void @foo()
-          to label %inner unwind label %lpad
+  to label %inner unwind label %lpad
 
 inner:
   %x0 = fsub double undef, undef
   %y0 = fsub double undef, undef
   invoke void @foo()
-          to label %done unwind label %lpad
+  to label %done unwind label %lpad
 
 lpad:
   %x1 = phi double [ undef, %entry ], [ undef, %inner ]
diff --git a/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll b/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll
index fa08eff..f708341 100644
--- a/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll
+++ b/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll
@@ -1,12 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -slp-threshold=-100 -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
-; We purposely over-align f64 to 128bit here. 
+; We purposely over-align f64 to 128bit here.
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:128:128-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
 target triple = "i386-apple-macosx10.9.0"
 
 
 define void @test(double* %i1, double* %i2, double* %o) {
 ; CHECK-LABEL: @test(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[I1_0:%.*]] = load double, double* [[I1:%.*]], align 16
+; CHECK-NEXT:    [[I1_GEP1:%.*]] = getelementptr double, double* [[I1]], i64 1
+; CHECK-NEXT:    [[I1_1:%.*]] = load double, double* [[I1_GEP1]], align 16
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> undef, double [[I1_0]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[I1_1]], i32 1
+; CHECK-NEXT:    br i1 undef, label [[THEN:%.*]], label [[END:%.*]]
+; CHECK:       then:
+; CHECK-NEXT:    [[I2_GEP0:%.*]] = getelementptr inbounds double, double* [[I2:%.*]], i64 0
+; CHECK-NEXT:    [[I2_0:%.*]] = load double, double* [[I2_GEP0]], align 16
+; CHECK-NEXT:    [[I2_GEP1:%.*]] = getelementptr inbounds double, double* [[I2]], i64 1
+; CHECK-NEXT:    [[I2_1:%.*]] = load double, double* [[I2_GEP1]], align 16
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x double> undef, double [[I2_0]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x double> [[TMP2]], double [[I2_1]], i32 1
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
+; CHECK-NEXT:    [[TMP4:%.*]] = phi <2 x double> [ [[TMP1]], [[ENTRY:%.*]] ], [ [[TMP3]], [[THEN]] ]
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x double> [[TMP4]], i32 0
+; CHECK-NEXT:    store double [[TMP5]], double* [[O:%.*]], align 16
+; CHECK-NEXT:    [[O_GEP1:%.*]] = getelementptr inbounds double, double* [[O]], i64 1
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x double> [[TMP4]], i32 1
+; CHECK-NEXT:    store double [[TMP6]], double* [[O_GEP1]], align 16
+; CHECK-NEXT:    ret void
 ;
 ; Test that we correctly recognize the discontiguous memory in arrays where the
 ; size is less than the alignment, and through various different GEP formations.
@@ -15,10 +39,6 @@
   %i1.0 = load double, double* %i1, align 16
   %i1.gep1 = getelementptr double, double* %i1, i64 1
   %i1.1 = load double, double* %i1.gep1, align 16
-; CHECK: load double, double*
-; CHECK: load double, double*
-; CHECK: insertelement <2 x double>
-; CHECK: insertelement <2 x double>
   br i1 undef, label %then, label %end
 
 then:
@@ -26,18 +46,11 @@
   %i2.0 = load double, double* %i2.gep0, align 16
   %i2.gep1 = getelementptr inbounds double, double* %i2, i64 1
   %i2.1 = load double, double* %i2.gep1, align 16
-; CHECK: load double, double*
-; CHECK: load double, double*
-; CHECK: insertelement <2 x double>
-; CHECK: insertelement <2 x double>
   br label %end
 
 end:
   %phi0 = phi double [ %i1.0, %entry ], [ %i2.0, %then ]
   %phi1 = phi double [ %i1.1, %entry ], [ %i2.1, %then ]
-; CHECK: phi <2 x double>
-; CHECK: extractelement <2 x double>
-; CHECK: extractelement <2 x double>
   store double %phi0, double* %o, align 16
   %o.gep1 = getelementptr inbounds double, double* %o, i64 1
   store double %phi1, double* %o.gep1, align 16
diff --git a/test/Transforms/SLPVectorizer/X86/pr16628.ll b/test/Transforms/SLPVectorizer/X86/pr16628.ll
index 06abe91..9f19564 100644
--- a/test/Transforms/SLPVectorizer/X86/pr16628.ll
+++ b/test/Transforms/SLPVectorizer/X86/pr16628.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.9.0"
@@ -9,6 +10,21 @@
 
 ; Function Attrs: nounwind ssp uwtable
 define void @f() {
+; CHECK-LABEL: @f(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CALL:%.*]] = tail call i32 (...) @g()
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* @c, align 4
+; CHECK-NEXT:    [[LNOT:%.*]] = icmp eq i32 [[TMP0]], 0
+; CHECK-NEXT:    [[LNOT_EXT:%.*]] = zext i1 [[LNOT]] to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = load i16, i16* @a, align 2
+; CHECK-NEXT:    [[LNOT2:%.*]] = icmp eq i16 [[TMP1]], 0
+; CHECK-NEXT:    [[LNOT_EXT3:%.*]] = zext i1 [[LNOT2]] to i32
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[LNOT_EXT3]], [[LNOT_EXT]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[CALL]], [[OR]]
+; CHECK-NEXT:    [[CONV4:%.*]] = zext i1 [[CMP]] to i16
+; CHECK-NEXT:    store i16 [[CONV4]], i16* @b, align 2
+; CHECK-NEXT:    ret void
+;
 entry:
   %call = tail call i32 (...) @g()
   %0 = load i32, i32* @c, align 4
diff --git a/test/Transforms/SLPVectorizer/X86/pr16899.ll b/test/Transforms/SLPVectorizer/X86/pr16899.ll
index 0de14ec..5b91c30 100644
--- a/test/Transforms/SLPVectorizer/X86/pr16899.ll
+++ b/test/Transforms/SLPVectorizer/X86/pr16899.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s  -slp-vectorizer -S -mtriple=i386--netbsd -mcpu=i486
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s  -slp-vectorizer -S -mtriple=i386--netbsd -mcpu=i486 | FileCheck %s
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
 target triple = "i386--netbsd"
 
@@ -6,6 +7,20 @@
 
 ; Function Attrs: noreturn nounwind readonly
 define i32 @fn1() #0 {
+; CHECK-LABEL: @fn1(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32*, i32** @a, align 4, !tbaa !0
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4, !tbaa !4
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX1]], align 4, !tbaa !4
+; CHECK-NEXT:    br label [[DO_BODY:%.*]]
+; CHECK:       do.body:
+; CHECK-NEXT:    [[C_0:%.*]] = phi i32 [ [[TMP2]], [[ENTRY:%.*]] ], [ [[ADD2:%.*]], [[DO_BODY]] ]
+; CHECK-NEXT:    [[B_0:%.*]] = phi i32 [ [[TMP1]], [[ENTRY]] ], [ [[ADD:%.*]], [[DO_BODY]] ]
+; CHECK-NEXT:    [[ADD]] = add nsw i32 [[B_0]], [[C_0]]
+; CHECK-NEXT:    [[ADD2]] = add nsw i32 [[ADD]], 1
+; CHECK-NEXT:    br label [[DO_BODY]]
+;
 entry:
   %0 = load i32*, i32** @a, align 4, !tbaa !4
   %1 = load i32, i32* %0, align 4, !tbaa !5
diff --git a/test/Transforms/SLPVectorizer/X86/pr18060.ll b/test/Transforms/SLPVectorizer/X86/pr18060.ll
index e6813f3..0af5d0f 100644
--- a/test/Transforms/SLPVectorizer/X86/pr18060.ll
+++ b/test/Transforms/SLPVectorizer/X86/pr18060.ll
@@ -1,19 +1,53 @@
-; RUN: opt < %s -slp-vectorizer -S -mtriple=i386-pc-linux
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -slp-vectorizer -S -mtriple=i386-pc-linux | FileCheck %s
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
 target triple = "i386-pc-linux"
 
 ; Function Attrs: nounwind
 define i32 @_Z16adjustFixupValueyj(i64 %Value, i32 %Kind) {
+; CHECK-LABEL: @_Z16adjustFixupValueyj(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[EXTRACT_T:%.*]] = trunc i64 [[VALUE:%.*]] to i32
+; CHECK-NEXT:    [[EXTRACT:%.*]] = lshr i64 [[VALUE]], 12
+; CHECK-NEXT:    [[EXTRACT_T6:%.*]] = trunc i64 [[EXTRACT]] to i32
+; CHECK-NEXT:    switch i32 [[KIND:%.*]], label [[SW_DEFAULT:%.*]] [
+; CHECK-NEXT:    i32 0, label [[RETURN:%.*]]
+; CHECK-NEXT:    i32 1, label [[RETURN]]
+; CHECK-NEXT:    i32 129, label [[SW_BB1:%.*]]
+; CHECK-NEXT:    i32 130, label [[SW_BB2:%.*]]
+; CHECK-NEXT:    ]
+; CHECK:       sw.default:
+; CHECK-NEXT:    call void @_Z25llvm_unreachable_internalv()
+; CHECK-NEXT:    unreachable
+; CHECK:       sw.bb1:
+; CHECK-NEXT:    [[SHR:%.*]] = lshr i64 [[VALUE]], 16
+; CHECK-NEXT:    [[EXTRACT_T5:%.*]] = trunc i64 [[SHR]] to i32
+; CHECK-NEXT:    [[EXTRACT7:%.*]] = lshr i64 [[VALUE]], 28
+; CHECK-NEXT:    [[EXTRACT_T8:%.*]] = trunc i64 [[EXTRACT7]] to i32
+; CHECK-NEXT:    br label [[SW_BB2]]
+; CHECK:       sw.bb2:
+; CHECK-NEXT:    [[VALUE_ADDR_0_OFF0:%.*]] = phi i32 [ [[EXTRACT_T]], [[ENTRY:%.*]] ], [ [[EXTRACT_T5]], [[SW_BB1]] ]
+; CHECK-NEXT:    [[VALUE_ADDR_0_OFF12:%.*]] = phi i32 [ [[EXTRACT_T6]], [[ENTRY]] ], [ [[EXTRACT_T8]], [[SW_BB1]] ]
+; CHECK-NEXT:    [[CONV6:%.*]] = and i32 [[VALUE_ADDR_0_OFF0]], 4095
+; CHECK-NEXT:    [[CONV4:%.*]] = shl i32 [[VALUE_ADDR_0_OFF12]], 16
+; CHECK-NEXT:    [[SHL:%.*]] = and i32 [[CONV4]], 983040
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHL]], [[CONV6]]
+; CHECK-NEXT:    [[OR11:%.*]] = or i32 [[OR]], 8388608
+; CHECK-NEXT:    br label [[RETURN]]
+; CHECK:       return:
+; CHECK-NEXT:    [[RETVAL_0:%.*]] = phi i32 [ [[OR11]], [[SW_BB2]] ], [ [[EXTRACT_T]], [[ENTRY]] ], [ [[EXTRACT_T]], [[ENTRY]] ]
+; CHECK-NEXT:    ret i32 [[RETVAL_0]]
+;
 entry:
   %extract.t = trunc i64 %Value to i32
   %extract = lshr i64 %Value, 12
   %extract.t6 = trunc i64 %extract to i32
   switch i32 %Kind, label %sw.default [
-    i32 0, label %return
-    i32 1, label %return
-    i32 129, label %sw.bb1
-    i32 130, label %sw.bb2
+  i32 0, label %return
+  i32 1, label %return
+  i32 129, label %sw.bb1
+  i32 130, label %sw.bb2
   ]
 
 sw.default:                                       ; preds = %entry
diff --git a/test/Transforms/SLPVectorizer/X86/pr23510.ll b/test/Transforms/SLPVectorizer/X86/pr23510.ll
index efdb0ec..420fdde 100644
--- a/test/Transforms/SLPVectorizer/X86/pr23510.ll
+++ b/test/Transforms/SLPVectorizer/X86/pr23510.ll
@@ -1,16 +1,37 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; PR23510
 ; RUN: opt < %s -basicaa -slp-vectorizer -S | FileCheck %s
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK-LABEL: @_Z3fooPml(
-; CHECK: lshr <2 x i64>
-; CHECK: lshr <2 x i64>
-
 @total = global i64 0, align 8
 
 define void @_Z3fooPml(i64* nocapture %a, i64 %i) {
+; CHECK-LABEL: @_Z3fooPml(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i64* [[A]] to <2 x i64>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr <2 x i64> [[TMP1]], <i64 4, i64 4>
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i64* [[A]] to <2 x i64>*
+; CHECK-NEXT:    store <2 x i64> [[TMP2]], <2 x i64>* [[TMP3]], align 8
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[I:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* [[ARRAYIDX3]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, i64* @total, align 8
+; CHECK-NEXT:    [[ADD:%.*]] = add i64 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    store i64 [[ADD]], i64* @total, align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i64* [[A]] to <2 x i64>*
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, <2 x i64>* [[TMP4]], align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = lshr <2 x i64> [[TMP5]], <i64 4, i64 4>
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i64* [[A]] to <2 x i64>*
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64>* [[TMP7]], align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i64, i64* [[ARRAYIDX3]], align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, i64* @total, align 8
+; CHECK-NEXT:    [[ADD9:%.*]] = add i64 [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    store i64 [[ADD9]], i64* @total, align 8
+; CHECK-NEXT:    ret void
+;
 entry:
   %tmp = load i64, i64* %a, align 8
   %shr = lshr i64 %tmp, 4
diff --git a/test/Transforms/SLPVectorizer/X86/pr27163.ll b/test/Transforms/SLPVectorizer/X86/pr27163.ll
index 2b8480e..b1c1d95 100644
--- a/test/Transforms/SLPVectorizer/X86/pr27163.ll
+++ b/test/Transforms/SLPVectorizer/X86/pr27163.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -slp-vectorizer -S < %s | FileCheck %s
 target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-pc-windows-msvc18.0.0"
@@ -5,6 +6,31 @@
 %struct.B = type { i64, i64 }
 
 define void @test1(%struct.B* %p) personality i32 (...)* @__CxxFrameHandler3 {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT:  invoke.cont:
+; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds [[STRUCT_B:%.*]], %struct.B* [[P:%.*]], i64 0, i32 0
+; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds [[STRUCT_B]], %struct.B* [[P]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i64* [[GEP1]] to <2 x i64>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i64* [[GEP1]] to <2 x i64>*
+; CHECK-NEXT:    store <2 x i64> [[TMP1]], <2 x i64>* [[TMP3]], align 8
+; CHECK-NEXT:    invoke void @throw()
+; CHECK-NEXT:    to label [[UNREACHABLE:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
+; CHECK:       catch.dispatch:
+; CHECK-NEXT:    [[CS:%.*]] = catchswitch within none [label %invoke.cont1] unwind label [[EHCLEANUP:%.*]]
+; CHECK:       invoke.cont1:
+; CHECK-NEXT:    [[CATCH:%.*]] = catchpad within [[CS]] [i8* null, i32 64, i8* null]
+; CHECK-NEXT:    invoke void @throw() [ "funclet"(token [[CATCH]]) ]
+; CHECK-NEXT:    to label [[UNREACHABLE]] unwind label [[EHCLEANUP]]
+; CHECK:       ehcleanup:
+; CHECK-NEXT:    [[PHI:%.*]] = phi i64 [ [[TMP2]], [[CATCH_DISPATCH]] ], [ 9, [[INVOKE_CONT1:%.*]] ]
+; CHECK-NEXT:    [[CLEANUP:%.*]] = cleanuppad within none []
+; CHECK-NEXT:    call void @release(i64 [[PHI]]) [ "funclet"(token [[CLEANUP]]) ]
+; CHECK-NEXT:    cleanupret from [[CLEANUP]] unwind to caller
+; CHECK:       unreachable:
+; CHECK-NEXT:    unreachable
+;
 invoke.cont:
   %gep1 = getelementptr inbounds %struct.B, %struct.B* %p, i64 0, i32 0
   %gep2 = getelementptr inbounds %struct.B, %struct.B* %p, i64 0, i32 1
@@ -13,7 +39,7 @@
   store i64 %load1, i64* %gep1, align 8
   store i64 %load2, i64* %gep2, align 8
   invoke void @throw()
-          to label %unreachable unwind label %catch.dispatch
+  to label %unreachable unwind label %catch.dispatch
 
 catch.dispatch:                                   ; preds = %invoke.cont
   %cs = catchswitch within none [label %invoke.cont1] unwind label %ehcleanup
@@ -21,7 +47,7 @@
 invoke.cont1:                                     ; preds = %catch.dispatch
   %catch = catchpad within %cs [i8* null, i32 64, i8* null]
   invoke void @throw() [ "funclet"(token %catch) ]
-          to label %unreachable unwind label %ehcleanup
+  to label %unreachable unwind label %ehcleanup
 
 ehcleanup:                                        ; preds = %invoke.cont1, %catch.dispatch
   %phi = phi i64 [ %load1, %catch.dispatch ], [ 9, %invoke.cont1 ]
@@ -33,16 +59,6 @@
   unreachable
 }
 
-
-; CHECK-LABEL: define void @test1(
-; CHECK: %[[gep:.*]] = getelementptr inbounds %struct.B, %struct.B* %p, i64 0, i32 0
-; CHECK: %[[bc:.*]]  = bitcast i64* %[[gep]] to <2 x i64>*
-; CHECK: %[[ld:.*]]  = load <2 x i64>, <2 x i64>* %[[bc]], align 8
-; CHECK: %[[ee:.*]]  = extractelement <2 x i64> %[[ld]], i32 0
-
-; CHECK: %[[phi:.*]] = phi i64 [ %[[ee]], {{.*}} ], [ 9, {{.*}} ]
-; CHECK: call void @release(i64 %[[phi]])
-
 declare i32 @__CxxFrameHandler3(...)
 
 declare void @throw()
diff --git a/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll b/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll
index 28217fc..7cc0194 100644
--- a/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll
+++ b/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -S | FileCheck %s
 
 ; Check propagation of optional IR flags (PR20802). For a flag to
@@ -7,9 +8,19 @@
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-unknown"
 
-; CHECK-LABEL: @exact(
-; CHECK: lshr exact <4 x i32>
 define void @exact(i32* %x) {
+; CHECK-LABEL: @exact(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds i32, i32* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 1
+; CHECK-NEXT:    [[IDX3:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 2
+; CHECK-NEXT:    [[IDX4:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 3
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = lshr exact <4 x i32> [[TMP2]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP3]], <4 x i32>* [[TMP4]], align 4
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds i32, i32* %x, i64 0
   %idx2 = getelementptr inbounds i32, i32* %x, i64 1
   %idx3 = getelementptr inbounds i32, i32* %x, i64 2
@@ -33,9 +44,19 @@
   ret void
 }
 
-; CHECK-LABEL: @not_exact(
-; CHECK: lshr <4 x i32>
 define void @not_exact(i32* %x) {
+; CHECK-LABEL: @not_exact(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds i32, i32* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 1
+; CHECK-NEXT:    [[IDX3:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 2
+; CHECK-NEXT:    [[IDX4:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 3
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = lshr <4 x i32> [[TMP2]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP3]], <4 x i32>* [[TMP4]], align 4
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds i32, i32* %x, i64 0
   %idx2 = getelementptr inbounds i32, i32* %x, i64 1
   %idx3 = getelementptr inbounds i32, i32* %x, i64 2
@@ -59,9 +80,19 @@
   ret void
 }
 
-; CHECK-LABEL: @nsw(
-; CHECK: add nsw <4 x i32>
 define void @nsw(i32* %x) {
+; CHECK-LABEL: @nsw(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds i32, i32* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 1
+; CHECK-NEXT:    [[IDX3:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 2
+; CHECK-NEXT:    [[IDX4:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 3
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = add nsw <4 x i32> <i32 1, i32 1, i32 1, i32 1>, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP3]], <4 x i32>* [[TMP4]], align 4
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds i32, i32* %x, i64 0
   %idx2 = getelementptr inbounds i32, i32* %x, i64 1
   %idx3 = getelementptr inbounds i32, i32* %x, i64 2
@@ -85,9 +116,19 @@
   ret void
 }
 
-; CHECK-LABEL: @not_nsw(
-; CHECK: add <4 x i32>
 define void @not_nsw(i32* %x) {
+; CHECK-LABEL: @not_nsw(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds i32, i32* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 1
+; CHECK-NEXT:    [[IDX3:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 2
+; CHECK-NEXT:    [[IDX4:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 3
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = add <4 x i32> <i32 1, i32 1, i32 1, i32 1>, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP3]], <4 x i32>* [[TMP4]], align 4
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds i32, i32* %x, i64 0
   %idx2 = getelementptr inbounds i32, i32* %x, i64 1
   %idx3 = getelementptr inbounds i32, i32* %x, i64 2
@@ -111,9 +152,19 @@
   ret void
 }
 
-; CHECK-LABEL: @nuw(
-; CHECK: add nuw <4 x i32>
 define void @nuw(i32* %x) {
+; CHECK-LABEL: @nuw(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds i32, i32* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 1
+; CHECK-NEXT:    [[IDX3:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 2
+; CHECK-NEXT:    [[IDX4:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 3
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = add nuw <4 x i32> <i32 1, i32 1, i32 1, i32 1>, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP3]], <4 x i32>* [[TMP4]], align 4
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds i32, i32* %x, i64 0
   %idx2 = getelementptr inbounds i32, i32* %x, i64 1
   %idx3 = getelementptr inbounds i32, i32* %x, i64 2
@@ -136,10 +187,20 @@
 
   ret void
 }
- 
-; CHECK-LABEL: @not_nuw(
-; CHECK: add <4 x i32>
+
 define void @not_nuw(i32* %x) {
+; CHECK-LABEL: @not_nuw(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds i32, i32* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 1
+; CHECK-NEXT:    [[IDX3:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 2
+; CHECK-NEXT:    [[IDX4:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 3
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = add <4 x i32> <i32 1, i32 1, i32 1, i32 1>, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP3]], <4 x i32>* [[TMP4]], align 4
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds i32, i32* %x, i64 0
   %idx2 = getelementptr inbounds i32, i32* %x, i64 1
   %idx3 = getelementptr inbounds i32, i32* %x, i64 2
@@ -162,10 +223,20 @@
 
   ret void
 }
- 
-; CHECK-LABEL: @nnan(
-; CHECK: fadd nnan <4 x float>
+
 define void @nnan(float* %x) {
+; CHECK-LABEL: @nnan(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds float, float* [[X]], i64 1
+; CHECK-NEXT:    [[IDX3:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT:    [[IDX4:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[IDX1]] to <4 x float>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = fadd nnan <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast float* [[IDX1]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP3]], <4 x float>* [[TMP4]], align 4
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds float, float* %x, i64 0
   %idx2 = getelementptr inbounds float, float* %x, i64 1
   %idx3 = getelementptr inbounds float, float* %x, i64 2
@@ -188,10 +259,20 @@
 
   ret void
 }
- 
-; CHECK-LABEL: @not_nnan(
-; CHECK: fadd <4 x float>
+
 define void @not_nnan(float* %x) {
+; CHECK-LABEL: @not_nnan(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds float, float* [[X]], i64 1
+; CHECK-NEXT:    [[IDX3:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT:    [[IDX4:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[IDX1]] to <4 x float>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = fadd <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast float* [[IDX1]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP3]], <4 x float>* [[TMP4]], align 4
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds float, float* %x, i64 0
   %idx2 = getelementptr inbounds float, float* %x, i64 1
   %idx3 = getelementptr inbounds float, float* %x, i64 2
@@ -214,10 +295,20 @@
 
   ret void
 }
- 
-; CHECK-LABEL: @only_fast(
-; CHECK: fadd fast <4 x float>
+
 define void @only_fast(float* %x) {
+; CHECK-LABEL: @only_fast(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds float, float* [[X]], i64 1
+; CHECK-NEXT:    [[IDX3:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT:    [[IDX4:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[IDX1]] to <4 x float>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = fadd fast <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast float* [[IDX1]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP3]], <4 x float>* [[TMP4]], align 4
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds float, float* %x, i64 0
   %idx2 = getelementptr inbounds float, float* %x, i64 1
   %idx3 = getelementptr inbounds float, float* %x, i64 2
@@ -240,10 +331,20 @@
 
   ret void
 }
- 
-; CHECK-LABEL: @only_arcp(
-; CHECK: fadd arcp <4 x float>
+
 define void @only_arcp(float* %x) {
+; CHECK-LABEL: @only_arcp(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds float, float* [[X]], i64 1
+; CHECK-NEXT:    [[IDX3:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT:    [[IDX4:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[IDX1]] to <4 x float>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = fadd arcp <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast float* [[IDX1]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP3]], <4 x float>* [[TMP4]], align 4
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds float, float* %x, i64 0
   %idx2 = getelementptr inbounds float, float* %x, i64 1
   %idx3 = getelementptr inbounds float, float* %x, i64 2
@@ -267,10 +368,21 @@
   ret void
 }
 
-; CHECK-LABEL: @addsub_all_nsw
-; CHECK: add nsw <4 x i32>
-; CHECK: sub nsw <4 x i32>
 define void @addsub_all_nsw(i32* %x) {
+; CHECK-LABEL: @addsub_all_nsw(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds i32, i32* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 1
+; CHECK-NEXT:    [[IDX3:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 2
+; CHECK-NEXT:    [[IDX4:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 3
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = add nsw <4 x i32> [[TMP2]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT:    [[TMP4:%.*]] = sub nsw <4 x i32> [[TMP2]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds i32, i32* %x, i64 0
   %idx2 = getelementptr inbounds i32, i32* %x, i64 1
   %idx3 = getelementptr inbounds i32, i32* %x, i64 2
@@ -293,11 +405,22 @@
 
   ret void
 }
- 
-; CHECK-LABEL: @addsub_some_nsw
-; CHECK: add nsw <4 x i32>
-; CHECK: sub <4 x i32>
+
 define void @addsub_some_nsw(i32* %x) {
+; CHECK-LABEL: @addsub_some_nsw(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds i32, i32* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 1
+; CHECK-NEXT:    [[IDX3:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 2
+; CHECK-NEXT:    [[IDX4:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 3
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = add nsw <4 x i32> [[TMP2]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT:    [[TMP4:%.*]] = sub <4 x i32> [[TMP2]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds i32, i32* %x, i64 0
   %idx2 = getelementptr inbounds i32, i32* %x, i64 1
   %idx3 = getelementptr inbounds i32, i32* %x, i64 2
@@ -320,11 +443,22 @@
 
   ret void
 }
- 
-; CHECK-LABEL: @addsub_no_nsw
-; CHECK: add <4 x i32>
-; CHECK: sub <4 x i32>
+
 define void @addsub_no_nsw(i32* %x) {
+; CHECK-LABEL: @addsub_no_nsw(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds i32, i32* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 1
+; CHECK-NEXT:    [[IDX3:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 2
+; CHECK-NEXT:    [[IDX4:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 3
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = add <4 x i32> [[TMP2]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT:    [[TMP4:%.*]] = sub <4 x i32> [[TMP2]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i32* [[IDX1]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds i32, i32* %x, i64 0
   %idx2 = getelementptr inbounds i32, i32* %x, i64 1
   %idx3 = getelementptr inbounds i32, i32* %x, i64 2
@@ -347,11 +481,20 @@
 
   ret void
 }
- 
-; CHECK-LABEL: @fcmp_fast
-; CHECK: fcmp fast oge <2 x double>
-; CHECK: sub fast <2 x double>
+
 define void @fcmp_fast(double* %x) #1 {
+; CHECK-LABEL: @fcmp_fast(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds double, double* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds double, double* [[X]], i64 1
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[IDX1]] to <2 x double>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = fcmp fast oge <2 x double> [[TMP2]], zeroinitializer
+; CHECK-NEXT:    [[TMP4:%.*]] = fsub fast <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = select <2 x i1> [[TMP3]], <2 x double> [[TMP2]], <2 x double> [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast double* [[IDX1]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 8
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds double, double* %x, i64 0
   %idx2 = getelementptr inbounds double, double* %x, i64 1
 
@@ -373,10 +516,19 @@
   ret void
 }
 
-; CHECK-LABEL: @fcmp_no_fast
-; CHECK: fcmp oge <2 x double>
-; CHECK: sub <2 x double>
 define void @fcmp_no_fast(double* %x) #1 {
+; CHECK-LABEL: @fcmp_no_fast(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds double, double* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds double, double* [[X]], i64 1
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[IDX1]] to <2 x double>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = fcmp oge <2 x double> [[TMP2]], zeroinitializer
+; CHECK-NEXT:    [[TMP4:%.*]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = select <2 x i1> [[TMP3]], <2 x double> [[TMP2]], <2 x double> [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast double* [[IDX1]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 8
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds double, double* %x, i64 0
   %idx2 = getelementptr inbounds double, double* %x, i64 1
 
@@ -400,9 +552,17 @@
 
 declare double @llvm.fabs.f64(double) nounwind readnone
 
-;CHECK-LABEL: @call_fast(
-;CHECK: call fast <2 x double> @llvm.fabs.v2f64
 define void @call_fast(double* %x) {
+; CHECK-LABEL: @call_fast(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds double, double* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds double, double* [[X]], i64 1
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[IDX1]] to <2 x double>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = call fast <2 x double> @llvm.fabs.v2f64(<2 x double> [[TMP2]])
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* [[IDX1]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 8
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds double, double* %x, i64 0
   %idx2 = getelementptr inbounds double, double* %x, i64 1
 
@@ -418,9 +578,17 @@
   ret void
 }
 
-;CHECK-LABEL: @call_no_fast(
-;CHECK: call <2 x double> @llvm.fabs.v2f64
 define void @call_no_fast(double* %x) {
+; CHECK-LABEL: @call_no_fast(
+; CHECK-NEXT:    [[IDX1:%.*]] = getelementptr inbounds double, double* [[X:%.*]], i64 0
+; CHECK-NEXT:    [[IDX2:%.*]] = getelementptr inbounds double, double* [[X]], i64 1
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[IDX1]] to <2 x double>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = call <2 x double> @llvm.fabs.v2f64(<2 x double> [[TMP2]])
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* [[IDX1]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 8
+; CHECK-NEXT:    ret void
+;
   %idx1 = getelementptr inbounds double, double* %x, i64 0
   %idx2 = getelementptr inbounds double, double* %x, i64 1
 
diff --git a/test/Transforms/SLPVectorizer/X86/reduction.ll b/test/Transforms/SLPVectorizer/X86/reduction.ll
index 4c5f126..03b7f67 100644
--- a/test/Transforms/SLPVectorizer/X86/reduction.ll
+++ b/test/Transforms/SLPVectorizer/X86/reduction.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
@@ -10,11 +11,33 @@
 ;   return sum;
 ; }
 
-;CHECK: reduce
-;CHECK: load <2 x double>
-;CHECK: fmul <2 x double>
-;CHECK: ret
 define i32 @reduce(double* nocapture %A, i32 %n, i32 %m) {
+; CHECK-LABEL: @reduce(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP13:%.*]] = icmp sgt i32 [[N:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP13]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[I_015:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[SUM_014:%.*]] = phi double [ [[ADD6:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ]
+; CHECK-NEXT:    [[MUL:%.*]] = shl nsw i32 [[I_015]], 1
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i32 [[MUL]]
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = fmul <2 x double> <double 7.000000e+00, double 7.000000e+00>, [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x double> [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x double> [[TMP2]], i32 1
+; CHECK-NEXT:    [[ADD5:%.*]] = fadd double [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[ADD6]] = fadd double [[SUM_014]], [[ADD5]]
+; CHECK-NEXT:    [[INC]] = add nsw i32 [[I_015]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]], label [[FOR_BODY]]
+; CHECK:       for.cond.for.end_crit_edge:
+; CHECK-NEXT:    [[PHITMP:%.*]] = fptosi double [[ADD6]] to i32
+; CHECK-NEXT:    br label [[FOR_END]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[SUM_0_LCSSA:%.*]] = phi i32 [ [[PHITMP]], [[FOR_COND_FOR_END_CRIT_EDGE]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEXT:    ret i32 [[SUM_0_LCSSA]]
+;
 entry:
   %cmp13 = icmp sgt i32 %n, 0
   br i1 %cmp13, label %for.body, label %for.end
diff --git a/test/Transforms/SLPVectorizer/X86/reduction2.ll b/test/Transforms/SLPVectorizer/X86/reduction2.ll
index 507a61a..87a6af7 100644
--- a/test/Transforms/SLPVectorizer/X86/reduction2.ll
+++ b/test/Transforms/SLPVectorizer/X86/reduction2.ll
@@ -1,12 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
 target triple = "i386-apple-macosx10.8.0"
 
-;CHECK-LABEL: @foo(
-;CHECK: load <2 x double>
-;CHECK: ret
 define double @foo(double* nocapture %D) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:    br label [[TMP1:%.*]]
+; CHECK:         [[I_02:%.*]] = phi i32 [ 0, [[TMP0:%.*]] ], [ [[TMP12:%.*]], [[TMP1]] ]
+; CHECK-NEXT:    [[SUM_01:%.*]] = phi double [ 0.000000e+00, [[TMP0]] ], [ [[TMP11:%.*]], [[TMP1]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = shl nsw i32 [[I_02]], 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds double, double* [[D:%.*]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* [[TMP3]] to <2 x double>*
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x double>, <2 x double>* [[TMP4]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = fmul <2 x double> [[TMP5]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = fmul <2 x double> [[TMP6]], [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x double> [[TMP7]], i32 0
+; CHECK-NEXT:    [[TMP9:%.*]] = extractelement <2 x double> [[TMP7]], i32 1
+; CHECK-NEXT:    [[TMP10:%.*]] = fadd double [[TMP8]], [[TMP9]]
+; CHECK-NEXT:    [[TMP11]] = fadd double [[SUM_01]], [[TMP10]]
+; CHECK-NEXT:    [[TMP12]] = add nsw i32 [[I_02]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[TMP12]], 100
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[TMP13:%.*]], label [[TMP1]]
+; CHECK:         ret double [[TMP11]]
+;
   br label %1
 
 ; <label>:1                                       ; preds = %1, %0
diff --git a/test/Transforms/SLPVectorizer/X86/remark_horcost.ll b/test/Transforms/SLPVectorizer/X86/remark_horcost.ll
index 96c8d7f..27997f6 100644
--- a/test/Transforms/SLPVectorizer/X86/remark_horcost.ll
+++ b/test/Transforms/SLPVectorizer/X86/remark_horcost.ll
@@ -1,7 +1,59 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -mtriple=x86_64-pc-linux-gnu -mcpu=generic -slp-vectorizer -pass-remarks-output=%t < %s | FileCheck %s
 ; RUN: FileCheck --input-file=%t --check-prefix=YAML %s
 
 define i32 @foo(i32* %diff) #0 {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[M2:%.*]] = alloca [8 x [8 x i32]], align 16
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast [8 x [8 x i32]]* [[M2]] to i8*
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[A_088:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[OP_EXTRA:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[INDVARS_IV]], 3
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[DIFF:%.*]], i64 [[TMP1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = or i64 [[TMP1]], 4
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP2]]
+; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* [[M2]], i64 0, i64 [[INDVARS_IV]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = or i64 [[TMP1]], 1
+; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP3]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i64 [[TMP1]], 5
+; CHECK-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP4]]
+; CHECK-NEXT:    [[TMP5:%.*]] = or i64 [[TMP1]], 2
+; CHECK-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP5]]
+; CHECK-NEXT:    [[TMP6:%.*]] = or i64 [[TMP1]], 6
+; CHECK-NEXT:    [[ARRAYIDX30:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP7:%.*]] = or i64 [[TMP1]], 3
+; CHECK-NEXT:    [[ARRAYIDX41:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP7]]
+; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i32* [[ARRAYIDX]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i32>, <4 x i32>* [[TMP8]], align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = or i64 [[TMP1]], 7
+; CHECK-NEXT:    [[ARRAYIDX44:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP10]]
+; CHECK-NEXT:    [[TMP11:%.*]] = bitcast i32* [[ARRAYIDX2]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i32>, <4 x i32>* [[TMP11]], align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = add nsw <4 x i32> [[TMP12]], [[TMP9]]
+; CHECK-NEXT:    [[ADD10:%.*]] = add nsw i32 undef, [[A_088]]
+; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* [[M2]], i64 0, i64 [[INDVARS_IV]], i64 1
+; CHECK-NEXT:    [[ADD24:%.*]] = add nsw i32 [[ADD10]], undef
+; CHECK-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* [[M2]], i64 0, i64 [[INDVARS_IV]], i64 2
+; CHECK-NEXT:    [[ADD38:%.*]] = add nsw i32 [[ADD24]], undef
+; CHECK-NEXT:    [[ARRAYIDX48:%.*]] = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* [[M2]], i64 0, i64 [[INDVARS_IV]], i64 3
+; CHECK-NEXT:    [[TMP14:%.*]] = bitcast i32* [[ARRAYIDX6]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP13]], <4 x i32>* [[TMP14]], align 16
+; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[TMP13]], <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; CHECK-NEXT:    [[BIN_RDX:%.*]] = add nsw <4 x i32> [[TMP13]], [[RDX_SHUF]]
+; CHECK-NEXT:    [[RDX_SHUF1:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[BIN_RDX2:%.*]] = add nsw <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT:    [[TMP15:%.*]] = extractelement <4 x i32> [[BIN_RDX2]], i32 0
+; CHECK-NEXT:    [[OP_EXTRA]] = add nsw i32 [[TMP15]], [[A_088]]
+; CHECK-NEXT:    [[ADD52:%.*]] = add nsw i32 [[ADD38]], undef
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CHECK:       for.end:
+; CHECK-NEXT:    ret i32 [[OP_EXTRA]]
+;
 entry:
   %m2 = alloca [8 x [8 x i32]], align 16
   %0 = bitcast [8 x [8 x i32]]* %m2 to i8*
@@ -19,7 +71,7 @@
   %add3 = add nsw i32 %4, %2
   %arrayidx6 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 0
   store i32 %add3, i32* %arrayidx6, align 16
- 
+
   %add10 = add nsw i32 %add3, %a.088
   %5 = or i64 %1, 1
   %arrayidx13 = getelementptr inbounds i32, i32* %diff, i64 %5
@@ -30,7 +82,7 @@
   %add17 = add nsw i32 %8, %6
   %arrayidx20 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 1
   store i32 %add17, i32* %arrayidx20, align 4
-  
+
   %add24 = add nsw i32 %add10, %add17
   %9 = or i64 %1, 2
   %arrayidx27 = getelementptr inbounds i32, i32* %diff, i64 %9
@@ -41,7 +93,7 @@
   %add31 = add nsw i32 %12, %10
   %arrayidx34 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 2
   store i32 %add31, i32* %arrayidx34, align 8
-  
+
   %add38 = add nsw i32 %add24, %add31
   %13 = or i64 %1, 3
   %arrayidx41 = getelementptr inbounds i32, i32* %diff, i64 %13
@@ -49,35 +101,33 @@
   %15 = or i64 %1, 7
   %arrayidx44 = getelementptr inbounds i32, i32* %diff, i64 %15
   %16 = load i32, i32* %arrayidx44, align 4
-  
+
   %add45 = add nsw i32 %16, %14
   %arrayidx48 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 3
   store i32 %add45, i32* %arrayidx48, align 4
-  
-  %add52 = add nsw i32 %add38, %add45
- ; CHECK: add nsw <{{[0-9]+}} x i32>
- ; CHECK: add nsw <{{[0-9]+}} x i32>
- 
- ; YAML:      --- !Passed
- ; YAML-NEXT: Pass:            slp-vectorizer
- ; YAML-NEXT: Name:            StoresVectorized
- ; YAML-NEXT: Function:        foo
- ; YAML-NEXT: Args:
- ; YAML-NEXT:   - String:          'Stores SLP vectorized with cost '
- ; YAML-NEXT:   - Cost:            '-8'
- ; YAML-NEXT:   - String:          ' and with tree size '
- ; YAML-NEXT:   - TreeSize:        '4'
 
- ; YAML:      --- !Passed
- ; YAML-NEXT: Pass:            slp-vectorizer
- ; YAML-NEXT: Name:            VectorizedHorizontalReduction
- ; YAML-NEXT: Function:        foo
- ; YAML-NEXT: Args:
- ; YAML-NEXT:   - String:          'Vectorized horizontal reduction with cost '
- ; YAML-NEXT:   - Cost:            '-2'
- ; YAML-NEXT:   - String:          ' and with tree size '
- ; YAML-NEXT:   - TreeSize:        '1'
- 
+  %add52 = add nsw i32 %add38, %add45
+
+  ; YAML:      --- !Passed
+  ; YAML-NEXT: Pass:            slp-vectorizer
+  ; YAML-NEXT: Name:            StoresVectorized
+  ; YAML-NEXT: Function:        foo
+  ; YAML-NEXT: Args:
+  ; YAML-NEXT:   - String:          'Stores SLP vectorized with cost '
+  ; YAML-NEXT:   - Cost:            '-8'
+  ; YAML-NEXT:   - String:          ' and with tree size '
+  ; YAML-NEXT:   - TreeSize:        '4'
+
+  ; YAML:      --- !Passed
+  ; YAML-NEXT: Pass:            slp-vectorizer
+  ; YAML-NEXT: Name:            VectorizedHorizontalReduction
+  ; YAML-NEXT: Function:        foo
+  ; YAML-NEXT: Args:
+  ; YAML-NEXT:   - String:          'Vectorized horizontal reduction with cost '
+  ; YAML-NEXT:   - Cost:            '-2'
+  ; YAML-NEXT:   - String:          ' and with tree size '
+  ; YAML-NEXT:   - TreeSize:        '1'
+
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 8
   br i1 %exitcond, label %for.end, label %for.body
diff --git a/test/Transforms/SLPVectorizer/X86/remark_listcost.ll b/test/Transforms/SLPVectorizer/X86/remark_listcost.ll
index 6f6e00f..ed2cd87 100644
--- a/test/Transforms/SLPVectorizer/X86/remark_listcost.ll
+++ b/test/Transforms/SLPVectorizer/X86/remark_listcost.ll
@@ -1,7 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -mtriple=x86_64-pc-linux-gnu -mcpu=generic -slp-vectorizer -pass-remarks-output=%t < %s | FileCheck %s
 ; RUN: FileCheck --input-file=%t --check-prefix=YAML %s
 
 define void @vsub2_test(i32* %pin1, i32* %pin2, i32* %pout) #0 {
+; CHECK-LABEL: @vsub2_test(
+; CHECK-NEXT:    br label [[TMP1:%.*]]
+; CHECK:         [[IDX_04:%.*]] = phi i32 [ 0, [[TMP0:%.*]] ], [ [[TMP8:%.*]], [[TMP1]] ]
+; CHECK-NEXT:    [[PO_03:%.*]] = phi i32* [ [[POUT:%.*]], [[TMP0]] ], [ [[TMP7:%.*]], [[TMP1]] ]
+; CHECK-NEXT:    [[PTMPI2_02:%.*]] = phi i32* [ [[PIN2:%.*]], [[TMP0]] ], [ [[TMP4:%.*]], [[TMP1]] ]
+; CHECK-NEXT:    [[PTMPI1_01:%.*]] = phi i32* [ [[PIN1:%.*]], [[TMP0]] ], [ [[TMP2:%.*]], [[TMP1]] ]
+; CHECK-NEXT:    [[TMP2]] = getelementptr inbounds i32, i32* [[PTMPI1_01]], i64 1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* [[PTMPI1_01]], align 4, !tbaa !1
+; CHECK-NEXT:    [[TMP4]] = getelementptr inbounds i32, i32* [[PTMPI2_02]], i64 1
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[PTMPI2_02]], align 4, !tbaa !1
+; CHECK-NEXT:    [[TMP6:%.*]] = sub nsw i32 [[TMP3]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7]] = getelementptr inbounds i32, i32* [[PO_03]], i64 1
+; CHECK-NEXT:    store i32 [[TMP6]], i32* [[PO_03]], align 4, !tbaa !1
+; CHECK-NEXT:    [[TMP8]] = add nuw nsw i32 [[IDX_04]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[TMP8]], 64
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[TMP9:%.*]], label [[TMP1]], !llvm.loop !5
+; CHECK:         ret void
+;
   br label %1
 
   %idx.04 = phi i32 [ 0, %0 ], [ %8, %1 ]
@@ -14,15 +33,14 @@
   %5 = load i32, i32* %ptmpi2.02, align 4, !tbaa !1
   %6 = sub nsw i32 %3, %5
   %7 = getelementptr inbounds i32, i32* %po.03, i64 1
- ; CHECK-NOT: <{{[0-9]+}} x i32>
- ; YAML:      Pass:            slp-vectorizer
- ; YAML-NEXT: Name:            NotBeneficial
- ; YAML-NEXT: Function:        vsub2_test
- ; YAML-NEXT: Args:
- ; YAML-NEXT:   - String:          'List vectorization was possible but not beneficial with cost '
- ; YAML-NEXT:   - Cost:            '0'
- ; YAML-NEXT:   - String:          ' >= '
- ; YAML-NEXT:   - Treshold:        '0'
+  ; YAML:      Pass:            slp-vectorizer
+  ; YAML-NEXT: Name:            NotBeneficial
+  ; YAML-NEXT: Function:        vsub2_test
+  ; YAML-NEXT: Args:
+  ; YAML-NEXT:   - String:          'List vectorization was possible but not beneficial with cost '
+  ; YAML-NEXT:   - Cost:            '0'
+  ; YAML-NEXT:   - String:          ' >= '
+  ; YAML-NEXT:   - Treshold:        '0'
   store i32 %6, i32* %po.03, align 4, !tbaa !1
   %8 = add nuw nsw i32 %idx.04, 1
   %exitcond = icmp eq i32 %8, 64
diff --git a/test/Transforms/SLPVectorizer/X86/remark_not_all_parts.ll b/test/Transforms/SLPVectorizer/X86/remark_not_all_parts.ll
index 59d4bb0..a0d3c93 100644
--- a/test/Transforms/SLPVectorizer/X86/remark_not_all_parts.ll
+++ b/test/Transforms/SLPVectorizer/X86/remark_not_all_parts.ll
@@ -1,7 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -mtriple=x86_64-pc-linux-gnu -mcpu=generic -slp-vectorizer -pass-remarks-output=%t < %s | FileCheck %s
 ; RUN: FileCheck --input-file=%t --check-prefix=YAML %s
 
 define i32 @foo(i32* nocapture readonly %diff) #0 {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[M2:%.*]] = alloca [8 x [8 x i32]], align 16
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast [8 x [8 x i32]]* [[M2]] to i8*
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[A_088:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD24:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[INDVARS_IV]], 3
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[DIFF:%.*]], i64 [[TMP1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = or i64 [[TMP1]], 4
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP3]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP4]], [[TMP2]]
+; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* [[M2]], i64 0, i64 [[INDVARS_IV]], i64 0
+; CHECK-NEXT:    store i32 [[ADD3]], i32* [[ARRAYIDX6]], align 16
+; CHECK-NEXT:    [[ADD10:%.*]] = add nsw i32 [[ADD3]], [[A_088]]
+; CHECK-NEXT:    [[TMP5:%.*]] = or i64 [[TMP1]], 1
+; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP5]]
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX13]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = or i64 [[TMP1]], 5
+; CHECK-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP7]]
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX16]], align 4
+; CHECK-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP8]], [[TMP6]]
+; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* [[M2]], i64 0, i64 [[INDVARS_IV]], i64 1
+; CHECK-NEXT:    store i32 [[ADD17]], i32* [[ARRAYIDX20]], align 4
+; CHECK-NEXT:    [[ADD24]] = add nsw i32 [[ADD10]], [[ADD17]]
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* [[M2]], i64 0, i64 0
+; CHECK-NEXT:    ret i32 [[ADD24]]
+;
 entry:
   %m2 = alloca [8 x [8 x i32]], align 16
   %0 = bitcast [8 x [8 x i32]]* %m2 to i8*
@@ -31,13 +67,12 @@
   store i32 %add17, i32* %arrayidx20, align 4
   %add24 = add nsw i32 %add10, %add17
 
- ; CHECK-NOT: add nsw <{{[0-9]+}} x i32> 
- ; YAML:      Pass:            slp-vectorizer
- ; YAML-NEXT: Name:            NotPossible
- ; YAML-NEXT: Function:        foo
- ; YAML-NEXT: Args:
- ; YAML-NEXT:   - String:          'Cannot SLP vectorize list: vectorization was impossible'
- ; YAML-NEXT:   - String:          ' with available vectorization factors'
+  ; YAML:      Pass:            slp-vectorizer
+  ; YAML-NEXT: Name:            NotPossible
+  ; YAML-NEXT: Function:        foo
+  ; YAML-NEXT: Args:
+  ; YAML-NEXT:   - String:          'Cannot SLP vectorize list: vectorization was impossible'
+  ; YAML-NEXT:   - String:          ' with available vectorization factors'
 
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 8
diff --git a/test/Transforms/SLPVectorizer/X86/remark_unsupported.ll b/test/Transforms/SLPVectorizer/X86/remark_unsupported.ll
index d78d122..a134aec 100644
--- a/test/Transforms/SLPVectorizer/X86/remark_unsupported.ll
+++ b/test/Transforms/SLPVectorizer/X86/remark_unsupported.ll
@@ -1,9 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -mtriple=x86_64-pc-linux-gnu -mcpu=generic -slp-vectorizer -pass-remarks-output=%t < %s | FileCheck %s
 ; RUN: FileCheck --input-file=%t --check-prefix=YAML %s
 
 ; This type is not supported by SLP
 define void @test(x86_fp80* %i1, x86_fp80* %i2, x86_fp80* %o) {
-
+; CHECK-LABEL: @test(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[I1_0:%.*]] = load x86_fp80, x86_fp80* [[I1:%.*]], align 16
+; CHECK-NEXT:    [[I1_GEP1:%.*]] = getelementptr x86_fp80, x86_fp80* [[I1]], i64 1
+; CHECK-NEXT:    [[I1_1:%.*]] = load x86_fp80, x86_fp80* [[I1_GEP1]], align 16
+; CHECK-NEXT:    br i1 undef, label [[THEN:%.*]], label [[END:%.*]]
+; CHECK:       then:
+; CHECK-NEXT:    [[I2_GEP0:%.*]] = getelementptr inbounds x86_fp80, x86_fp80* [[I2:%.*]], i64 0
+; CHECK-NEXT:    [[I2_0:%.*]] = load x86_fp80, x86_fp80* [[I2_GEP0]], align 16
+; CHECK-NEXT:    [[I2_GEP1:%.*]] = getelementptr inbounds x86_fp80, x86_fp80* [[I2]], i64 1
+; CHECK-NEXT:    [[I2_1:%.*]] = load x86_fp80, x86_fp80* [[I2_GEP1]], align 16
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
+; CHECK-NEXT:    [[PHI0:%.*]] = phi x86_fp80 [ [[I1_0]], [[ENTRY:%.*]] ], [ [[I2_0]], [[THEN]] ]
+; CHECK-NEXT:    [[PHI1:%.*]] = phi x86_fp80 [ [[I1_1]], [[ENTRY]] ], [ [[I2_1]], [[THEN]] ]
+; CHECK-NEXT:    store x86_fp80 [[PHI0]], x86_fp80* [[O:%.*]], align 16
+; CHECK-NEXT:    [[O_GEP1:%.*]] = getelementptr inbounds x86_fp80, x86_fp80* [[O]], i64 1
+; CHECK-NEXT:    store x86_fp80 [[PHI1]], x86_fp80* [[O_GEP1]], align 16
+; CHECK-NEXT:    ret void
+;
 entry:
   %i1.0 = load x86_fp80, x86_fp80* %i1, align 16
   %i1.gep1 = getelementptr x86_fp80, x86_fp80* %i1, i64 1
@@ -22,13 +42,12 @@
   store x86_fp80 %phi0, x86_fp80* %o, align 16
   %o.gep1 = getelementptr inbounds x86_fp80, x86_fp80* %o, i64 1
   store x86_fp80 %phi1, x86_fp80* %o.gep1, align 16
- ; CHECK-NOT: <{{[0-9]+}} x x86_fp80>
- ; YAML:      Pass:            slp-vectorizer
- ; YAML-NEXT: Name:            UnsupportedType
- ; YAML-NEXT: Function:        test
- ; YAML-NEXT: Args:
- ; YAML-NEXT:   - String:          'Cannot SLP vectorize list: type '
- ; YAML-NEXT:   - String:          x86_fp80 is unsupported by vectorizer
+  ; YAML:      Pass:            slp-vectorizer
+  ; YAML-NEXT: Name:            UnsupportedType
+  ; YAML-NEXT: Function:        test
+  ; YAML-NEXT: Args:
+  ; YAML-NEXT:   - String:          'Cannot SLP vectorize list: type '
+  ; YAML-NEXT:   - String:          x86_fp80 is unsupported by vectorizer
 
   ret void
 }
diff --git a/test/Transforms/SLPVectorizer/X86/rgb_phi.ll b/test/Transforms/SLPVectorizer/X86/rgb_phi.ll
index 0bdb7da..c7e419b 100644
--- a/test/Transforms/SLPVectorizer/X86/rgb_phi.ll
+++ b/test/Transforms/SLPVectorizer/X86/rgb_phi.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
@@ -19,19 +20,46 @@
 ;   return R+G+B;
 ; }
 
-;CHECK-LABEL: @foo(
-;CHECK: br
-;CHECK-NOT: phi <3 x float>
-;CHECK-NOT: fmul <3 x float>
-;CHECK-NOT: fadd <3 x float>
-; At the moment we don't sink extractelements.
-;CHECK: br
-;CHECK-NOT: extractelement
-;CHECK-NOT: extractelement
-;CHECK-NOT: extractelement
-;CHECK: ret
-
 define float @foo(float* nocapture readonly %A) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[A:%.*]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds float, float* [[A]], i64 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[A]], i64 2
+; CHECK-NEXT:    [[TMP2:%.*]] = load float, float* [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[TMP3:%.*]] = phi float [ [[TMP0]], [[ENTRY:%.*]] ], [ [[DOTPRE:%.*]], [[FOR_BODY_FOR_BODY_CRIT_EDGE:%.*]] ]
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY_FOR_BODY_CRIT_EDGE]] ]
+; CHECK-NEXT:    [[B_032:%.*]] = phi float [ [[TMP2]], [[ENTRY]] ], [ [[ADD14:%.*]], [[FOR_BODY_FOR_BODY_CRIT_EDGE]] ]
+; CHECK-NEXT:    [[G_031:%.*]] = phi float [ [[TMP1]], [[ENTRY]] ], [ [[ADD9:%.*]], [[FOR_BODY_FOR_BODY_CRIT_EDGE]] ]
+; CHECK-NEXT:    [[R_030:%.*]] = phi float [ [[TMP0]], [[ENTRY]] ], [ [[ADD4:%.*]], [[FOR_BODY_FOR_BODY_CRIT_EDGE]] ]
+; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[TMP3]], 7.000000e+00
+; CHECK-NEXT:    [[ADD4]] = fadd float [[R_030]], [[MUL]]
+; CHECK-NEXT:    [[TMP4:%.*]] = add nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP4]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load float, float* [[ARRAYIDX7]], align 4
+; CHECK-NEXT:    [[MUL8:%.*]] = fmul float [[TMP5]], 8.000000e+00
+; CHECK-NEXT:    [[ADD9]] = fadd float [[G_031]], [[MUL8]]
+; CHECK-NEXT:    [[TMP6:%.*]] = add nsw i64 [[INDVARS_IV]], 2
+; CHECK-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP7:%.*]] = load float, float* [[ARRAYIDX12]], align 4
+; CHECK-NEXT:    [[MUL13:%.*]] = fmul float [[TMP7]], 9.000000e+00
+; CHECK-NEXT:    [[ADD14]] = fadd float [[B_032]], [[MUL13]]
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 3
+; CHECK-NEXT:    [[TMP8:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP8]], 121
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY_FOR_BODY_CRIT_EDGE]], label [[FOR_END:%.*]]
+; CHECK:       for.body.for.body_crit_edge:
+; CHECK-NEXT:    [[ARRAYIDX3_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT:    [[DOTPRE]] = load float, float* [[ARRAYIDX3_PHI_TRANS_INSERT]], align 4
+; CHECK-NEXT:    br label [[FOR_BODY]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[ADD16:%.*]] = fadd float [[ADD4]], [[ADD9]]
+; CHECK-NEXT:    [[ADD17:%.*]] = fadd float [[ADD16]], [[ADD14]]
+; CHECK-NEXT:    ret float [[ADD17]]
+;
 entry:
   %0 = load float, float* %A, align 4
   %arrayidx1 = getelementptr inbounds float, float* %A, i64 1
diff --git a/test/Transforms/SLPVectorizer/X86/saxpy.ll b/test/Transforms/SLPVectorizer/X86/saxpy.ll
index a9ca093..f2f858e 100644
--- a/test/Transforms/SLPVectorizer/X86/saxpy.ll
+++ b/test/Transforms/SLPVectorizer/X86/saxpy.ll
@@ -1,14 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
 ; SLP vectorization example from http://cs.stanford.edu/people/eschkufz/research/asplos291-schkufza.pdf
-;CHECK: SAXPY
-;CHECK: mul nsw <4 x i32>
-;CHECK: ret
-
 define void @SAXPY(i32* noalias nocapture %x, i32* noalias nocapture %y, i32 %a, i64 %i) {
+; CHECK-LABEL: @SAXPY(
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[X:%.*]], i64 [[I:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[Y:%.*]], i64 [[I]]
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP1]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <4 x i32> undef, i32 [[A:%.*]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[A]], i32 1
+; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[A]], i32 2
+; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <4 x i32> [[TMP7]], i32 [[A]], i32 3
+; CHECK-NEXT:    [[TMP9:%.*]] = mul nsw <4 x i32> [[TMP8]], [[TMP4]]
+; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP11:%.*]] = load <4 x i32>, <4 x i32>* [[TMP10]], align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add nsw <4 x i32> [[TMP9]], [[TMP11]]
+; CHECK-NEXT:    [[TMP13:%.*]] = bitcast i32* [[TMP1]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP12]], <4 x i32>* [[TMP13]], align 4
+; CHECK-NEXT:    ret void
+;
   %1 = getelementptr inbounds i32, i32* %x, i64 %i
   %2 = load i32, i32* %1, align 4
   %3 = mul nsw i32 %2, %a
@@ -45,6 +59,21 @@
 
 ; Make sure we don't crash on this one.
 define void @SAXPY_crash(i32* noalias nocapture %x, i32* noalias nocapture %y, i64 %i) {
+; CHECK-LABEL: @SAXPY_crash(
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 [[I:%.*]], 1
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[X:%.*]], i64 [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[Y:%.*]], i64 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = add nsw i32 undef, [[TMP4]]
+; CHECK-NEXT:    store i32 [[TMP5]], i32* [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[I]], 2
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[Y]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = add nsw i32 undef, [[TMP9]]
+; CHECK-NEXT:    store i32 [[TMP10]], i32* [[TMP7]], align 4
+; CHECK-NEXT:    ret void
+;
   %1 = add i64 %i, 1
   %2 = getelementptr inbounds i32, i32* %x, i64 %1
   %3 = getelementptr inbounds i32, i32* %y, i64 %1
diff --git a/test/Transforms/SLPVectorizer/X86/schedule_budget.ll b/test/Transforms/SLPVectorizer/X86/schedule_budget.ll
index 2cb2373..0cd08f0 100644
--- a/test/Transforms/SLPVectorizer/X86/schedule_budget.ll
+++ b/test/Transforms/SLPVectorizer/X86/schedule_budget.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -S  -slp-schedule-budget=16 -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
@@ -8,19 +9,63 @@
 
 declare void @unknown()
 
-; CHECK-LABEL: @test
-; CHECK: load float
-; CHECK: load float
-; CHECK: load float
-; CHECK: load float
-; CHECK: call void @unknown
-; CHECK: store float
-; CHECK: store float
-; CHECK: store float
-; CHECK: store float
-; CHECK: load <4 x float>
-; CHECK: store <4 x float>
 define void @test(float * %a, float * %b, float * %c, float * %d) {
+; CHECK-LABEL: @test(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[L0:%.*]] = load float, float* [[A:%.*]]
+; CHECK-NEXT:    [[A1:%.*]] = getelementptr inbounds float, float* [[A]], i64 1
+; CHECK-NEXT:    [[L1:%.*]] = load float, float* [[A1]]
+; CHECK-NEXT:    [[A2:%.*]] = getelementptr inbounds float, float* [[A]], i64 2
+; CHECK-NEXT:    [[L2:%.*]] = load float, float* [[A2]]
+; CHECK-NEXT:    [[A3:%.*]] = getelementptr inbounds float, float* [[A]], i64 3
+; CHECK-NEXT:    [[L3:%.*]] = load float, float* [[A3]]
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    store float [[L0]], float* [[B:%.*]]
+; CHECK-NEXT:    [[B1:%.*]] = getelementptr inbounds float, float* [[B]], i64 1
+; CHECK-NEXT:    store float [[L1]], float* [[B1]]
+; CHECK-NEXT:    [[B2:%.*]] = getelementptr inbounds float, float* [[B]], i64 2
+; CHECK-NEXT:    store float [[L2]], float* [[B2]]
+; CHECK-NEXT:    [[B3:%.*]] = getelementptr inbounds float, float* [[B]], i64 3
+; CHECK-NEXT:    store float [[L3]], float* [[B3]]
+; CHECK-NEXT:    [[C1:%.*]] = getelementptr inbounds float, float* [[C:%.*]], i64 1
+; CHECK-NEXT:    [[C2:%.*]] = getelementptr inbounds float, float* [[C]], i64 2
+; CHECK-NEXT:    [[C3:%.*]] = getelementptr inbounds float, float* [[C]], i64 3
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float* [[C]] to <4 x float>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT:    [[D1:%.*]] = getelementptr inbounds float, float* [[D:%.*]], i64 1
+; CHECK-NEXT:    [[D2:%.*]] = getelementptr inbounds float, float* [[D]], i64 2
+; CHECK-NEXT:    [[D3:%.*]] = getelementptr inbounds float, float* [[D]], i64 3
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[D]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP1]], <4 x float>* [[TMP2]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   ; Don't vectorize these loads.
   %l0 = load float, float* %a
diff --git a/test/Transforms/SLPVectorizer/X86/simple-loop.ll b/test/Transforms/SLPVectorizer/X86/simple-loop.ll
index c9bb884..975a1af 100644
--- a/test/Transforms/SLPVectorizer/X86/simple-loop.ll
+++ b/test/Transforms/SLPVectorizer/X86/simple-loop.ll
@@ -1,10 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.8.0"
 
-;CHECK:rollable
 define i32 @rollable(i32* noalias nocapture %in, i32* noalias nocapture %out, i64 %n) {
+; CHECK-LABEL: @rollable(
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 [[N:%.*]], 0
+; CHECK-NEXT:    br i1 [[TMP1]], label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH:%.*]]
+; CHECK:       .lr.ph:
+; CHECK-NEXT:    [[I_019:%.*]] = phi i64 [ [[TMP10:%.*]], [[DOTLR_PH]] ], [ 0, [[TMP0:%.*]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[I_019]], 2
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i32* [[TMP3]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[TMP4]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = mul <4 x i32> <i32 7, i32 7, i32 7, i32 7>, [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = add <4 x i32> <i32 7, i32 14, i32 21, i32 28>, [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP7]], <4 x i32>* [[TMP9]], align 4
+; CHECK-NEXT:    [[TMP10]] = add i64 [[I_019]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[TMP10]], [[N]]
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
+; CHECK:       ._crit_edge:
+; CHECK-NEXT:    ret i32 undef
+;
   %1 = icmp eq i64 %n, 0
   br i1 %1, label %._crit_edge, label %.lr.ph
 
@@ -12,7 +32,6 @@
   %i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
   %2 = shl i64 %i.019, 2
   %3 = getelementptr inbounds i32, i32* %in, i64 %2
-;CHECK:load <4 x i32>
   %4 = load i32, i32* %3, align 4
   %5 = or i64 %2, 1
   %6 = getelementptr inbounds i32, i32* %in, i64 %5
@@ -23,9 +42,7 @@
   %11 = or i64 %2, 3
   %12 = getelementptr inbounds i32, i32* %in, i64 %11
   %13 = load i32, i32* %12, align 4
-;CHECK:mul <4 x i32>
   %14 = mul i32 %4, 7
-;CHECK:add <4 x i32>
   %15 = add i32 %14, 7
   %16 = mul i32 %7, 7
   %17 = add i32 %16, 14
@@ -34,7 +51,6 @@
   %20 = mul i32 %13, 7
   %21 = add i32 %20, 28
   %22 = getelementptr inbounds i32, i32* %out, i64 %2
-;CHECK:store <4 x i32>
   store i32 %15, i32* %22, align 4
   %23 = getelementptr inbounds i32, i32* %out, i64 %5
   store i32 %17, i32* %23, align 4
@@ -47,14 +63,50 @@
   br i1 %exitcond, label %._crit_edge, label %.lr.ph
 
 ._crit_edge:                                      ; preds = %.lr.ph, %0
-;CHECK: ret
   ret i32 undef
 }
 
-;CHECK:unrollable
-;CHECK-NOT: <4 x i32>
-;CHECK: ret
 define i32 @unrollable(i32* %in, i32* %out, i64 %n) nounwind ssp uwtable {
+; CHECK-LABEL: @unrollable(
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 [[N:%.*]], 0
+; CHECK-NEXT:    br i1 [[TMP1]], label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH:%.*]]
+; CHECK:       .lr.ph:
+; CHECK-NEXT:    [[I_019:%.*]] = phi i64 [ [[TMP26:%.*]], [[DOTLR_PH]] ], [ 0, [[TMP0:%.*]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[I_019]], 2
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = or i64 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = or i64 [[TMP2]], 2
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = or i64 [[TMP2]], 3
+; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 [[TMP11]]
+; CHECK-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP4]], 7
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP14]], 7
+; CHECK-NEXT:    [[TMP16:%.*]] = mul i32 [[TMP7]], 7
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP16]], 14
+; CHECK-NEXT:    [[TMP18:%.*]] = mul i32 [[TMP10]], 7
+; CHECK-NEXT:    [[TMP19:%.*]] = add i32 [[TMP18]], 21
+; CHECK-NEXT:    [[TMP20:%.*]] = mul i32 [[TMP13]], 7
+; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP20]], 28
+; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    store i32 [[TMP15]], i32* [[TMP22]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 [[TMP5]]
+; CHECK-NEXT:    store i32 [[TMP17]], i32* [[TMP23]], align 4
+; CHECK-NEXT:    [[BARRIER:%.*]] = call i32 @goo(i32 0)
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 [[TMP8]]
+; CHECK-NEXT:    store i32 [[TMP19]], i32* [[TMP24]], align 4
+; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 [[TMP11]]
+; CHECK-NEXT:    store i32 [[TMP21]], i32* [[TMP25]], align 4
+; CHECK-NEXT:    [[TMP26]] = add i64 [[I_019]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[TMP26]], [[N]]
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
+; CHECK:       ._crit_edge:
+; CHECK-NEXT:    ret i32 undef
+;
   %1 = icmp eq i64 %n, 0
   br i1 %1, label %._crit_edge, label %.lr.ph
 
diff --git a/test/Transforms/SLPVectorizer/X86/unreachable.ll b/test/Transforms/SLPVectorizer/X86/unreachable.ll
index f29f69d..cc1f1fc 100644
--- a/test/Transforms/SLPVectorizer/X86/unreachable.ll
+++ b/test/Transforms/SLPVectorizer/X86/unreachable.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
 
 ; Check if the SLPVectorizer does not crash when handling
 ; unreachable blocks with unscheduleable instructions.
@@ -7,6 +8,34 @@
 target triple = "x86_64-apple-macosx10.9.0"
 
 define void @foo(i32* nocapture %x) #0 {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[BB2:%.*]]
+; CHECK:       bb1:
+; CHECK-NEXT:    [[T3:%.*]] = getelementptr inbounds i32, i32* [[X:%.*]], i64 4
+; CHECK-NEXT:    [[T4:%.*]] = load i32, i32* [[T3]], align 4
+; CHECK-NEXT:    [[T5:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 5
+; CHECK-NEXT:    [[T6:%.*]] = load i32, i32* [[T5]], align 4
+; CHECK-NEXT:    [[BAD:%.*]] = fadd float [[BAD]], 0.000000e+00
+; CHECK-NEXT:    [[T7:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 6
+; CHECK-NEXT:    [[T8:%.*]] = load i32, i32* [[T7]], align 4
+; CHECK-NEXT:    [[T9:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 7
+; CHECK-NEXT:    [[T10:%.*]] = load i32, i32* [[T9]], align 4
+; CHECK-NEXT:    br label [[BB2]]
+; CHECK:       bb2:
+; CHECK-NEXT:    [[T1_0:%.*]] = phi i32 [ [[T4]], [[BB1:%.*]] ], [ 2, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[T2_0:%.*]] = phi i32 [ [[T6]], [[BB1]] ], [ 2, [[ENTRY]] ]
+; CHECK-NEXT:    [[T3_0:%.*]] = phi i32 [ [[T8]], [[BB1]] ], [ 2, [[ENTRY]] ]
+; CHECK-NEXT:    [[T4_0:%.*]] = phi i32 [ [[T10]], [[BB1]] ], [ 2, [[ENTRY]] ]
+; CHECK-NEXT:    store i32 [[T1_0]], i32* [[X]], align 4
+; CHECK-NEXT:    [[T12:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 1
+; CHECK-NEXT:    store i32 [[T2_0]], i32* [[T12]], align 4
+; CHECK-NEXT:    [[T13:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 2
+; CHECK-NEXT:    store i32 [[T3_0]], i32* [[T13]], align 4
+; CHECK-NEXT:    [[T14:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 3
+; CHECK-NEXT:    store i32 [[T4_0]], i32* [[T14]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %bb2
 
@@ -16,7 +45,7 @@
   %t5 = getelementptr inbounds i32, i32* %x, i64 5
   %t6 = load i32, i32* %t5, align 4
   %bad = fadd float %bad, 0.000000e+00  ; <- an instruction with self dependency,
-                                        ;    but legal in unreachable code
+  ;    but legal in unreachable code
   %t7 = getelementptr inbounds i32, i32* %x, i64 6
   %t8 = load i32, i32* %t7, align 4
   %t9 = getelementptr inbounds i32, i32* %x, i64 7
diff --git a/test/Transforms/SLPVectorizer/X86/vector_gep.ll b/test/Transforms/SLPVectorizer/X86/vector_gep.ll
index 595a77f..436f091 100644
--- a/test/Transforms/SLPVectorizer/X86/vector_gep.ll
+++ b/test/Transforms/SLPVectorizer/X86/vector_gep.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ;RUN: opt < %s -slp-vectorizer -S | FileCheck %s
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -8,9 +9,14 @@
 
 ; Function Attrs: noreturn readonly uwtable
 define void @_Z3fn1v(i32 %x, <16 x i32*>%y) local_unnamed_addr #0 {
-; CHECK-LABEL: _Z3fn1v
-; CHECK: getelementptr i32, <16 x i32*>
-; CHECK: getelementptr i32, <16 x i32*>
+; CHECK-LABEL: @_Z3fn1v(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CONV42_LE:%.*]] = sext i32 [[X:%.*]] to i64
+; CHECK-NEXT:    [[CONV36109_LE:%.*]] = zext i32 2 to i64
+; CHECK-NEXT:    [[VECTORGEP:%.*]] = getelementptr i32, <16 x i32*> [[Y:%.*]], i64 [[CONV36109_LE]]
+; CHECK-NEXT:    [[VECTORGEP208:%.*]] = getelementptr i32, <16 x i32*> [[Y]], i64 [[CONV42_LE]]
+; CHECK-NEXT:    unreachable
+;
 
 entry:
   %conv42.le = sext i32 %x to i64
diff --git a/test/Transforms/SLPVectorizer/XCore/no-vector-registers.ll b/test/Transforms/SLPVectorizer/XCore/no-vector-registers.ll
index efd5386..d84a499 100644
--- a/test/Transforms/SLPVectorizer/XCore/no-vector-registers.ll
+++ b/test/Transforms/SLPVectorizer/XCore/no-vector-registers.ll
@@ -1,12 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=xcore  | FileCheck %s
 
 target datalayout = "e-p:32:32:32-a0:0:32-n32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f16:16:32-f32:32:32-f64:32:32"
 target triple = "xcore"
 
 ; Simple 3-pair chain with loads and stores
-; CHECK: test1
-; CHECK-NOT: <2 x double>
 define void @test1(double* %a, double* %b, double* %c) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[I0:%.*]] = load double, double* [[A:%.*]], align 8
+; CHECK-NEXT:    [[I1:%.*]] = load double, double* [[B:%.*]], align 8
+; CHECK-NEXT:    [[MUL:%.*]] = fmul double [[I0]], [[I1]]
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A]], i64 1
+; CHECK-NEXT:    [[I3:%.*]] = load double, double* [[ARRAYIDX3]], align 8
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[B]], i64 1
+; CHECK-NEXT:    [[I4:%.*]] = load double, double* [[ARRAYIDX4]], align 8
+; CHECK-NEXT:    [[MUL5:%.*]] = fmul double [[I3]], [[I4]]
+; CHECK-NEXT:    store double [[MUL]], double* [[C:%.*]], align 8
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[C]], i64 1
+; CHECK-NEXT:    store double [[MUL5]], double* [[ARRAYIDX5]], align 8
+; CHECK-NEXT:    ret void
+;
 entry:
   %i0 = load double, double* %a, align 8
   %i1 = load double, double* %b, align 8
diff --git a/test/Transforms/SLPVectorizer/int_sideeffect.ll b/test/Transforms/SLPVectorizer/int_sideeffect.ll
index a6123c1..aab3f02 100644
--- a/test/Transforms/SLPVectorizer/int_sideeffect.ll
+++ b/test/Transforms/SLPVectorizer/int_sideeffect.ll
@@ -1,25 +1,37 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S < %s -slp-vectorizer -slp-max-reg-size=128 -slp-min-reg-size=128 | FileCheck %s
 
 declare void @llvm.sideeffect()
 
 ; SLP vectorization across a @llvm.sideeffect.
 
-; CHECK-LABEL: test
-; CHECK: store <4 x float>
 define void @test(float* %p) {
-    %p0 = getelementptr float, float* %p, i64 0
-    %p1 = getelementptr float, float* %p, i64 1
-    %p2 = getelementptr float, float* %p, i64 2
-    %p3 = getelementptr float, float* %p, i64 3
-    %l0 = load float, float* %p0
-    %l1 = load float, float* %p1
-    %l2 = load float, float* %p2
-    call void @llvm.sideeffect()
-    %l3 = load float, float* %p3
-    store float %l0, float* %p0
-    call void @llvm.sideeffect()
-    store float %l1, float* %p1
-    store float %l2, float* %p2
-    store float %l3, float* %p3
-    ret void
+; CHECK-LABEL: @test(
+; CHECK-NEXT:    [[P0:%.*]] = getelementptr float, float* [[P:%.*]], i64 0
+; CHECK-NEXT:    [[P1:%.*]] = getelementptr float, float* [[P]], i64 1
+; CHECK-NEXT:    [[P2:%.*]] = getelementptr float, float* [[P]], i64 2
+; CHECK-NEXT:    [[P3:%.*]] = getelementptr float, float* [[P]], i64 3
+; CHECK-NEXT:    call void @llvm.sideeffect()
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[P0]] to <4 x float>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
+; CHECK-NEXT:    call void @llvm.sideeffect()
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float* [[P0]] to <4 x float>*
+; CHECK-NEXT:    store <4 x float> [[TMP2]], <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT:    ret void
+;
+  %p0 = getelementptr float, float* %p, i64 0
+  %p1 = getelementptr float, float* %p, i64 1
+  %p2 = getelementptr float, float* %p, i64 2
+  %p3 = getelementptr float, float* %p, i64 3
+  %l0 = load float, float* %p0
+  %l1 = load float, float* %p1
+  %l2 = load float, float* %p2
+  call void @llvm.sideeffect()
+  %l3 = load float, float* %p3
+  store float %l0, float* %p0
+  call void @llvm.sideeffect()
+  store float %l1, float* %p1
+  store float %l2, float* %p2
+  store float %l3, float* %p3
+  ret void
 }
diff --git a/test/Transforms/SROA/alignment.ll b/test/Transforms/SROA/alignment.ll
index 8bc9018..cbae6be 100644
--- a/test/Transforms/SROA/alignment.ll
+++ b/test/Transforms/SROA/alignment.ll
@@ -181,3 +181,50 @@
   ret void
 ; CHECK: ret void
 }
+
+define void @test8() {
+; CHECK-LABEL: @test8(
+; CHECK: load i32, {{.*}}, align 1
+; CHECK: load i32, {{.*}}, align 1
+; CHECK: load i32, {{.*}}, align 1
+; CHECK: load i32, {{.*}}, align 1
+; CHECK: load i32, {{.*}}, align 1
+
+  %ptr = alloca [5 x i32], align 1
+  %ptr.8 = bitcast [5 x i32]* %ptr to i8*
+  call void @populate(i8* %ptr.8)
+  %val = load [5 x i32], [5 x i32]* %ptr, align 1
+  ret void
+}
+
+define void @test9() {
+; CHECK-LABEL: @test9(
+; CHECK: load i32, {{.*}}, align 8
+; CHECK: load i32, {{.*}}, align 4
+; CHECK: load i32, {{.*}}, align 8
+; CHECK: load i32, {{.*}}, align 4
+; CHECK: load i32, {{.*}}, align 8
+
+  %ptr = alloca [5 x i32], align 8
+  %ptr.8 = bitcast [5 x i32]* %ptr to i8*
+  call void @populate(i8* %ptr.8)
+  %val = load [5 x i32], [5 x i32]* %ptr, align 8
+  ret void
+}
+
+define void @test10() {
+; CHECK-LABEL: @test10(
+; CHECK: load i32, {{.*}}, align 2
+; CHECK: load i8, {{.*}}, align 2
+; CHECK: load i8, {{.*}}, align 1
+; CHECK: load i8, {{.*}}, align 2
+; CHECK: load i16, {{.*}}, align 2
+
+  %ptr = alloca {i32, i8, i8, {i8, i16}}, align 2
+  %ptr.8 = bitcast {i32, i8, i8, {i8, i16}}* %ptr to i8*
+  call void @populate(i8* %ptr.8)
+  %val = load {i32, i8, i8, {i8, i16}}, {i32, i8, i8, {i8, i16}}* %ptr, align 2
+  ret void
+}
+
+declare void @populate(i8*)
diff --git a/test/Transforms/SROA/mem-par-metadata-sroa.ll b/test/Transforms/SROA/mem-par-metadata-sroa.ll
index 32ea8f5..577245c 100644
--- a/test/Transforms/SROA/mem-par-metadata-sroa.ll
+++ b/test/Transforms/SROA/mem-par-metadata-sroa.ll
@@ -1,6 +1,6 @@
 ; RUN: opt < %s -sroa -S | FileCheck %s
 ;
-; Make sure the llvm.mem.parallel_loop_access meta-data is preserved
+; Make sure the llvm.access.group meta-data is preserved
 ; when a load/store is replaced with another load/store by sroa
 ;
 ; class Complex {
@@ -33,9 +33,9 @@
 
 ; CHECK: for.body:
 ; CHECK-NOT:  store i32 %{{.*}}, i32* %{{.*}}, align 4
-; CHECK: store i32 %{{.*}}, i32* %{{.*}}, align 4, !llvm.mem.parallel_loop_access !1
+; CHECK: store i32 %{{.*}}, i32* %{{.*}}, align 4, !llvm.access.group !1
 ; CHECK-NOT:  store i32 %{{.*}}, i32* %{{.*}}, align 4
-; CHECK: store i32 %{{.*}}, i32* %{{.*}}, align 4, !llvm.mem.parallel_loop_access !1
+; CHECK: store i32 %{{.*}}, i32* %{{.*}}, align 4, !llvm.access.group !1
 ; CHECK-NOT:  store i32 %{{.*}}, i32* %{{.*}}, align 4
 ; CHECK: br label
 
@@ -63,30 +63,30 @@
   %arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.0
   %real_.i = getelementptr inbounds %class.Complex, %class.Complex* %t0, i64 0, i32 0
   %real_.i.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx, i64 0, i32 0
-  %0 = load float, float* %real_.i.i, align 4, !llvm.mem.parallel_loop_access !1
-  store float %0, float* %real_.i, align 4, !llvm.mem.parallel_loop_access !1
+  %0 = load float, float* %real_.i.i, align 4, !llvm.access.group !11
+  store float %0, float* %real_.i, align 4, !llvm.access.group !11
   %imaginary_.i = getelementptr inbounds %class.Complex, %class.Complex* %t0, i64 0, i32 1
   %imaginary_.i.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx, i64 0, i32 1
-  %1 = load float, float* %imaginary_.i.i, align 4, !llvm.mem.parallel_loop_access !1
-  store float %1, float* %imaginary_.i, align 4, !llvm.mem.parallel_loop_access !1
+  %1 = load float, float* %imaginary_.i.i, align 4, !llvm.access.group !11
+  store float %1, float* %imaginary_.i, align 4, !llvm.access.group !11
   %arrayidx1 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.0
   %real_.i1 = getelementptr inbounds %class.Complex, %class.Complex* %t0, i64 0, i32 0
-  %2 = load float, float* %real_.i1, align 4, !noalias !3, !llvm.mem.parallel_loop_access !1
+  %2 = load float, float* %real_.i1, align 4, !noalias !3, !llvm.access.group !11
   %real_2.i = getelementptr inbounds %class.Complex, %class.Complex* %t0, i64 0, i32 0
-  %3 = load float, float* %real_2.i, align 4, !noalias !3, !llvm.mem.parallel_loop_access !1
+  %3 = load float, float* %real_2.i, align 4, !noalias !3, !llvm.access.group !11
   %add.i = fadd float %2, %3
   %imaginary_.i2 = getelementptr inbounds %class.Complex, %class.Complex* %t0, i64 0, i32 1
-  %4 = load float, float* %imaginary_.i2, align 4, !noalias !3, !llvm.mem.parallel_loop_access !1
+  %4 = load float, float* %imaginary_.i2, align 4, !noalias !3, !llvm.access.group !11
   %imaginary_3.i = getelementptr inbounds %class.Complex, %class.Complex* %t0, i64 0, i32 1
-  %5 = load float, float* %imaginary_3.i, align 4, !noalias !3, !llvm.mem.parallel_loop_access !1
+  %5 = load float, float* %imaginary_3.i, align 4, !noalias !3, !llvm.access.group !11
   %add4.i = fadd float %4, %5
   %real_.i.i3 = getelementptr inbounds %class.Complex, %class.Complex* %tmpcast, i64 0, i32 0
-  store float %add.i, float* %real_.i.i3, align 4, !alias.scope !3, !llvm.mem.parallel_loop_access !1
+  store float %add.i, float* %real_.i.i3, align 4, !alias.scope !3, !llvm.access.group !11
   %imaginary_.i.i4 = getelementptr inbounds %class.Complex, %class.Complex* %tmpcast, i64 0, i32 1
-  store float %add4.i, float* %imaginary_.i.i4, align 4, !alias.scope !3, !llvm.mem.parallel_loop_access !1
+  store float %add4.i, float* %imaginary_.i.i4, align 4, !alias.scope !3, !llvm.access.group !11
   %6 = bitcast %class.Complex* %arrayidx1 to i64*
-  %7 = load i64, i64* %ref.tmp, align 8, !llvm.mem.parallel_loop_access !1
-  store i64 %7, i64* %6, align 4, !llvm.mem.parallel_loop_access !1
+  %7 = load i64, i64* %ref.tmp, align 8, !llvm.access.group !11
+  store i64 %7, i64* %6, align 4, !llvm.access.group !11
   %inc = add nsw i64 %offset.0, 1
   br label %for.cond, !llvm.loop !1
 
@@ -103,8 +103,9 @@
 !llvm.ident = !{!0}
 
 !0 = !{!"clang version 4.0.0 (cfe/trunk 277751)"}
-!1 = distinct !{!1, !2}
+!1 = distinct !{!1, !2, !{!"llvm.loop.parallel_accesses", !11}}
 !2 = !{!"llvm.loop.vectorize.enable", i1 true}
 !3 = !{!4}
 !4 = distinct !{!4, !5, !"_ZNK7ComplexplERKS_: %agg.result"}
 !5 = distinct !{!5, !"_ZNK7ComplexplERKS_"}
+!11 = distinct !{}
diff --git a/test/Transforms/Scalarizer/basic.ll b/test/Transforms/Scalarizer/basic.ll
index c3ba1fe..29a82fd 100644
--- a/test/Transforms/Scalarizer/basic.ll
+++ b/test/Transforms/Scalarizer/basic.ll
@@ -206,17 +206,17 @@
   ret void
 }
 
-; Check that llvm.mem.parallel_loop_access information is preserved.
+; Check that llvm.access.group information is preserved.
 define void @f5(i32 %count, <4 x i32> *%src, <4 x i32> *%dst) {
 ; CHECK-LABEL: @f5(
-; CHECK: %val.i0 = load i32, i32* %this_src.i0, align 16, !llvm.mem.parallel_loop_access ![[TAG:[0-9]*]]
-; CHECK: %val.i1 = load i32, i32* %this_src.i1, align 4, !llvm.mem.parallel_loop_access ![[TAG]]
-; CHECK: %val.i2 = load i32, i32* %this_src.i2, align 8, !llvm.mem.parallel_loop_access ![[TAG]]
-; CHECK: %val.i3 = load i32, i32* %this_src.i3, align 4, !llvm.mem.parallel_loop_access ![[TAG]]
-; CHECK: store i32 %add.i0, i32* %this_dst.i0, align 16, !llvm.mem.parallel_loop_access ![[TAG]]
-; CHECK: store i32 %add.i1, i32* %this_dst.i1, align 4, !llvm.mem.parallel_loop_access ![[TAG]]
-; CHECK: store i32 %add.i2, i32* %this_dst.i2, align 8, !llvm.mem.parallel_loop_access ![[TAG]]
-; CHECK: store i32 %add.i3, i32* %this_dst.i3, align 4, !llvm.mem.parallel_loop_access ![[TAG]]
+; CHECK: %val.i0 = load i32, i32* %this_src.i0, align 16, !llvm.access.group ![[TAG:[0-9]*]]
+; CHECK: %val.i1 = load i32, i32* %this_src.i1, align 4, !llvm.access.group ![[TAG]]
+; CHECK: %val.i2 = load i32, i32* %this_src.i2, align 8, !llvm.access.group ![[TAG]]
+; CHECK: %val.i3 = load i32, i32* %this_src.i3, align 4, !llvm.access.group ![[TAG]]
+; CHECK: store i32 %add.i0, i32* %this_dst.i0, align 16, !llvm.access.group ![[TAG]]
+; CHECK: store i32 %add.i1, i32* %this_dst.i1, align 4, !llvm.access.group ![[TAG]]
+; CHECK: store i32 %add.i2, i32* %this_dst.i2, align 8, !llvm.access.group ![[TAG]]
+; CHECK: store i32 %add.i3, i32* %this_dst.i3, align 4, !llvm.access.group ![[TAG]]
 ; CHECK: ret void
 entry:
   br label %loop
@@ -225,9 +225,9 @@
   %index = phi i32 [ 0, %entry ], [ %next_index, %loop ]
   %this_src = getelementptr <4 x i32>, <4 x i32> *%src, i32 %index
   %this_dst = getelementptr <4 x i32>, <4 x i32> *%dst, i32 %index
-  %val = load <4 x i32> , <4 x i32> *%this_src, !llvm.mem.parallel_loop_access !3
+  %val = load <4 x i32> , <4 x i32> *%this_src, !llvm.access.group !13
   %add = add <4 x i32> %val, %val
-  store <4 x i32> %add, <4 x i32> *%this_dst, !llvm.mem.parallel_loop_access !3
+  store <4 x i32> %add, <4 x i32> *%this_dst, !llvm.access.group !13
   %next_index = add i32 %index, -1
   %continue = icmp ne i32 %next_index, %count
   br i1 %continue, label %loop, label %end, !llvm.loop !3
@@ -447,6 +447,7 @@
 !0 = !{ !"root" }
 !1 = !{ !"set1", !0 }
 !2 = !{ !"set2", !0 }
-!3 = !{ !3 }
+!3 = !{ !3, !{!"llvm.loop.parallel_accesses", !13} }
 !4 = !{ float 4.0 }
 !5 = !{ i64 0, i64 8, null }
+!13 = distinct !{}
diff --git a/test/Transforms/SimplifyCFG/combine-parallel-mem-md.ll b/test/Transforms/SimplifyCFG/combine-parallel-mem-md.ll
index 7afde1f..d3ff927 100644
--- a/test/Transforms/SimplifyCFG/combine-parallel-mem-md.ll
+++ b/test/Transforms/SimplifyCFG/combine-parallel-mem-md.ll
@@ -8,39 +8,39 @@
   br label %for.body
 
 ; CHECK-LABEL: @Test
-; CHECK: load i32, i32* {{.*}}, align 4, !llvm.mem.parallel_loop_access !0
-; CHECK: load i32, i32* {{.*}}, align 4, !llvm.mem.parallel_loop_access !0
-; CHECK: store i32 {{.*}}, align 4, !llvm.mem.parallel_loop_access !0
+; CHECK: load i32, i32* {{.*}}, align 4, !llvm.access.group !0
+; CHECK: load i32, i32* {{.*}}, align 4, !llvm.access.group !0
+; CHECK: store i32 {{.*}}, align 4, !llvm.access.group !0
 ; CHECK-NOT: load
 ; CHECK-NOT: store
 
 for.body:                                         ; preds = %cond.end, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %cond.end ]
   %arrayidx = getelementptr inbounds i32, i32* %p, i64 %indvars.iv
-  %0 = load i32, i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !0
+  %0 = load i32, i32* %arrayidx, align 4, !llvm.access.group !0
   %cmp1 = icmp eq i32 %0, 0
   br i1 %cmp1, label %cond.true, label %cond.false
 
 cond.false:                                       ; preds = %for.body
   %arrayidx3 = getelementptr inbounds i32, i32* %res, i64 %indvars.iv
-  %v = load i32, i32* %arrayidx3, align 4, !llvm.mem.parallel_loop_access !0
+  %v = load i32, i32* %arrayidx3, align 4, !llvm.access.group !0
   %arrayidx7 = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
-  %1 = load i32, i32* %arrayidx7, align 4, !llvm.mem.parallel_loop_access !0
+  %1 = load i32, i32* %arrayidx7, align 4, !llvm.access.group !0
   %add = add nsw i32 %1, %v
   br label %cond.end
 
 cond.true:                                       ; preds = %for.body
   %arrayidx4 = getelementptr inbounds i32, i32* %res, i64 %indvars.iv
-  %w = load i32, i32* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !0
+  %w = load i32, i32* %arrayidx4, align 4, !llvm.access.group !0
   %arrayidx8 = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
-  %2 = load i32, i32* %arrayidx8, align 4, !llvm.mem.parallel_loop_access !0
+  %2 = load i32, i32* %arrayidx8, align 4, !llvm.access.group !0
   %add2 = add nsw i32 %2, %w
   br label %cond.end
 
 cond.end:                                         ; preds = %for.body, %cond.false
   %cond = phi i32 [ %add, %cond.false ], [ %add2, %cond.true ]
   %arrayidx9 = getelementptr inbounds i32, i32* %res, i64 %indvars.iv
-  store i32 %cond, i32* %arrayidx9, align 4, !llvm.mem.parallel_loop_access !0
+  store i32 %cond, i32* %arrayidx9, align 4, !llvm.access.group !0
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 16
   br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
@@ -51,5 +51,6 @@
 
 attributes #0 = { norecurse nounwind uwtable }
 
-!0 = distinct !{!0, !1}
+!0 = distinct !{!0, !1, !{!"llvm.loop.parallel_accesses", !10}}
 !1 = !{!"llvm.loop.vectorize.enable", i1 true}
+!10 = distinct !{}
diff --git a/test/Transforms/ThinLTOBitcodeWriter/circular-reference.ll b/test/Transforms/ThinLTOBitcodeWriter/circular-reference.ll
index eeda793..fb239b0 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/circular-reference.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/circular-reference.ll
@@ -1,4 +1,4 @@
-; RUN: opt -thinlto-bc -o %t %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-modextract -b -n 0 -o - %t | llvm-dis | FileCheck --check-prefix=M0 %s
 ; RUN: llvm-modextract -b -n 1 -o - %t | llvm-dis | FileCheck --check-prefix=M1 %s
 
diff --git a/test/Transforms/ThinLTOBitcodeWriter/comdat.ll b/test/Transforms/ThinLTOBitcodeWriter/comdat.ll
index caea48e..a43fa1c 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/comdat.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/comdat.ll
@@ -1,4 +1,4 @@
-; RUN: opt -thinlto-bc -o %t %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-modextract -n 0 -o - %t | llvm-dis | FileCheck --check-prefix=THIN %s
 ; RUN: llvm-modextract -n 1 -o - %t | llvm-dis | FileCheck --check-prefix=MERGED %s
 
diff --git a/test/Transforms/ThinLTOBitcodeWriter/filter-alias.ll b/test/Transforms/ThinLTOBitcodeWriter/filter-alias.ll
index eb0cbe7..200d494 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/filter-alias.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/filter-alias.ll
@@ -1,4 +1,4 @@
-; RUN: opt -thinlto-bc -o %t %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-modextract -n 0 -o - %t | llvm-dis | FileCheck --check-prefix=CHECK0 %s
 ; RUN: llvm-modextract -n 1 -o - %t | llvm-dis | FileCheck --check-prefix=CHECK1 %s
 ; CHECK0-NOT: @{{.*}}anon{{.*}}=
diff --git a/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll b/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll
index 119b821..a1dbd96 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll
@@ -1,4 +1,4 @@
-; RUN: opt -thinlto-bc -o %t %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-modextract -n 1 -o - %t | llvm-dis | FileCheck --check-prefix=CHECK1 %s
 
 target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Transforms/ThinLTOBitcodeWriter/pr33536.ll b/test/Transforms/ThinLTOBitcodeWriter/pr33536.ll
index 661d073..c405c36 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/pr33536.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/pr33536.ll
@@ -1,7 +1,7 @@
 ; Test for a bug specific to the new pass manager where we may build a domtree
 ; to make more precise AA queries for functions.
 ;
-; RUN: opt -aa-pipeline=default -passes='no-op-module' -debug-pass-manager -thinlto-bc -o %t %s
+; RUN: opt -aa-pipeline=default -passes='no-op-module' -debug-pass-manager -thinlto-bc -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-modextract -b -n 0 -o - %t | llvm-dis | FileCheck --check-prefix=M0 %s
 ; RUN: llvm-modextract -b -n 1 -o - %t | llvm-dis | FileCheck --check-prefix=M1 %s
 
diff --git a/test/Transforms/ThinLTOBitcodeWriter/split-internal-typeid.ll b/test/Transforms/ThinLTOBitcodeWriter/split-internal-typeid.ll
index a43db9a..290df00 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/split-internal-typeid.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/split-internal-typeid.ll
@@ -1,4 +1,4 @@
-; RUN: opt -thinlto-bc -o %t %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-modextract -b -n 0 -o %t0 %t
 ; RUN: llvm-modextract -b -n 1 -o %t1 %t
 ; RUN: not llvm-modextract -b -n 2 -o - %t 2>&1 | FileCheck --check-prefix=ERROR %s
diff --git a/test/Transforms/ThinLTOBitcodeWriter/split-internal1.ll b/test/Transforms/ThinLTOBitcodeWriter/split-internal1.ll
index 6d18c4f..42a06bd 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/split-internal1.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/split-internal1.ll
@@ -1,4 +1,4 @@
-; RUN: opt -thinlto-bc -o %t %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-modextract -b -n 0 -o %t0 %t
 ; RUN: llvm-modextract -b -n 1 -o %t1 %t
 ; RUN: not llvm-modextract -b -n 2 -o - %t 2>&1 | FileCheck --check-prefix=ERROR %s
diff --git a/test/Transforms/ThinLTOBitcodeWriter/split-internal2.ll b/test/Transforms/ThinLTOBitcodeWriter/split-internal2.ll
index fbe618f..02fc3d1 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/split-internal2.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/split-internal2.ll
@@ -1,4 +1,4 @@
-; RUN: opt -thinlto-bc -o %t %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-modextract -b -n 0 -o %t0 %t
 ; RUN: llvm-modextract -b -n 1 -o %t1 %t
 ; RUN: not llvm-modextract -b -n 2 -o - %t 2>&1 | FileCheck --check-prefix=ERROR %s
diff --git a/test/Transforms/ThinLTOBitcodeWriter/split-vfunc-internal.ll b/test/Transforms/ThinLTOBitcodeWriter/split-vfunc-internal.ll
index 087796b..7ebb30a 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/split-vfunc-internal.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/split-vfunc-internal.ll
@@ -1,4 +1,4 @@
-; RUN: opt -thinlto-bc -o %t %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-modextract -b -n 0 -o - %t | llvm-dis | FileCheck --check-prefix=M0 %s
 ; RUN: llvm-modextract -b -n 1 -o - %t | llvm-dis | FileCheck --check-prefix=M1 %s
 
diff --git a/test/Transforms/ThinLTOBitcodeWriter/split-vfunc.ll b/test/Transforms/ThinLTOBitcodeWriter/split-vfunc.ll
index 66d37d5..fcf5751 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/split-vfunc.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/split-vfunc.ll
@@ -1,4 +1,4 @@
-; RUN: opt -thinlto-bc -o %t %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-modextract -b -n 0 -o - %t | llvm-dis | FileCheck --check-prefix=M0 %s
 ; RUN: llvm-modextract -b -n 1 -o - %t | llvm-dis | FileCheck --check-prefix=M1 %s
 
diff --git a/test/Transforms/ThinLTOBitcodeWriter/split.ll b/test/Transforms/ThinLTOBitcodeWriter/split.ll
index 08ed92e..5502f7a 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/split.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/split.ll
@@ -1,6 +1,6 @@
 ; Generate bitcode files with summary, as well as minimized bitcode without
 ; the debug metadata for the thin link.
-; RUN: opt -thinlto-bc -thin-link-bitcode-file=%t2 -o %t %s
+; RUN: opt -thinlto-bc -thin-link-bitcode-file=%t2 -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-modextract -b -n 0 -o %t0.bc %t
 ; RUN: llvm-modextract -b -n 1 -o %t1.bc %t
 ; RUN: llvm-modextract -b -n 0 -o %t0.thinlink.bc %t2
diff --git a/test/Transforms/ThinLTOBitcodeWriter/symver.ll b/test/Transforms/ThinLTOBitcodeWriter/symver.ll
index 078825c..8acdd0c 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/symver.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/symver.ll
@@ -1,4 +1,4 @@
-; RUN: opt -thinlto-bc -o %t %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-modextract -n 1 -o - %t | llvm-dis | FileCheck %s
 
 ; The target assembly parser is required to parse the symver directives
diff --git a/test/Transforms/ThinLTOBitcodeWriter/unsplittable.ll b/test/Transforms/ThinLTOBitcodeWriter/unsplittable.ll
index 5413e0f..46c87bc 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/unsplittable.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/unsplittable.ll
@@ -1,4 +1,4 @@
-; RUN: opt -thinlto-bc -thin-link-bitcode-file=%t2 -o %t %s
+; RUN: opt -thinlto-bc -thin-link-bitcode-file=%t2 -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-dis -o - %t | FileCheck %s
 ; RUN: llvm-bcanalyzer -dump %t | FileCheck --check-prefix=BCA %s
 ; When not splitting the module, the thin link bitcode file should simply be a
@@ -28,7 +28,8 @@
   ret void
 }
 
-; CHECK: !llvm.module.flags = !{![[FLAG:[0-9]+]]}
-; CHECK: ![[FLAG]] = !{i32 1, !"ThinLTO", i32 0}
+; CHECK: !llvm.module.flags = !{![[FLAG1:[0-9]+]], ![[FLAG2:[0-9]+]]}
+; CHECK: ![[FLAG1]] = !{i32 1, !"EnableSplitLTOUnit", i32 1}
+; CHECK: ![[FLAG2]] = !{i32 1, !"ThinLTO", i32 0}
 
 !0 = !{i32 0, !"typeid"}
diff --git a/test/Transforms/ThinLTOBitcodeWriter/x86/module-asm.ll b/test/Transforms/ThinLTOBitcodeWriter/x86/module-asm.ll
index 15e4778..587ab3f 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/x86/module-asm.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/x86/module-asm.ll
@@ -1,4 +1,4 @@
-; RUN: opt -thinlto-bc -o %t %s
+; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t %s
 ; RUN: llvm-modextract -b -n 0 -o - %t | llvm-dis | FileCheck --check-prefix=M0 %s
 ; RUN: llvm-modextract -b -n 1 -o - %t | llvm-dis | FileCheck --check-prefix=M1 %s
 
diff --git a/test/Transforms/Util/dbg-user-of-aext.ll b/test/Transforms/Util/dbg-user-of-aext.ll
new file mode 100644
index 0000000..9a31066
--- /dev/null
+++ b/test/Transforms/Util/dbg-user-of-aext.ll
@@ -0,0 +1,87 @@
+; Checks that llvm.dbg.declare -> llvm.dbg.value conversion utility
+; (here exposed through the SROA) pass refers to [s|z]exts of values (as
+; opposed to the operand of a [s|z]ext).
+; RUN: opt -S -sroa %s | FileCheck %s
+
+; Built from:
+; struct foo { bool b; long i; };
+; void f(bool b, bool expr, foo g) {
+; }
+; And modifying the frag dbg.declare to use a fragmented DIExpression (with offset: 0, size: 4)
+; to test the dbg.declare+fragment case here.
+
+; Expect two fragments:
+; * first starting at bit 0, 8 bits (for the bool)
+; * second starting at bit 32, 32 bits (for the long)
+; (this happens to create/demonstrate a gap from bits [7, 32))
+
+; But also check that a complex expression is not used for a lone bool
+; parameter. It can reference the register it's in directly without masking off
+; high bits or anything
+
+; CHECK: call void @llvm.dbg.value(metadata i8 %g.coerce0, metadata ![[VAR_STRUCT:[0-9]+]], metadata !DIExpression(DW_OP_LLVM_fragment, 0, 8))
+; CHECK: call void @llvm.dbg.value(metadata i64 %g.coerce1, metadata ![[VAR_STRUCT]], metadata !DIExpression(DW_OP_LLVM_fragment, 32, 64))
+; CHECK: call void @llvm.dbg.value(metadata i8 %frombool, metadata ![[VAR_BOOL:[0-9]+]], metadata !DIExpression())
+; CHECK: call void @llvm.dbg.value(metadata i8 %frombool1, metadata ![[VAR_FRAG:[0-9]+]], metadata !DIExpression(DW_OP_LLVM_fragment, 0, 4))
+
+%struct.foo = type { i8, i64 }
+
+; Function Attrs: noinline nounwind uwtable
+define void @_Z1fbb3foo(i1 zeroext %b, i1 zeroext %frag, i8 %g.coerce0, i64 %g.coerce1) #0 !dbg !6 {
+entry:
+  %g = alloca %struct.foo, align 8
+  %b.addr = alloca i8, align 1
+  %frag.addr = alloca i8, align 1
+  %0 = bitcast %struct.foo* %g to { i8, i64 }*
+  %1 = getelementptr inbounds { i8, i64 }, { i8, i64 }* %0, i32 0, i32 0
+  store i8 %g.coerce0, i8* %1, align 8
+  %2 = getelementptr inbounds { i8, i64 }, { i8, i64 }* %0, i32 0, i32 1
+  store i64 %g.coerce1, i64* %2, align 8
+  %frombool = zext i1 %b to i8
+  store i8 %frombool, i8* %b.addr, align 1
+  call void @llvm.dbg.declare(metadata i8* %b.addr, metadata !15, metadata !16), !dbg !17
+  %frombool1 = sext i1 %frag to i8
+  store i8 %frombool1, i8* %frag.addr, align 1
+  call void @llvm.dbg.declare(metadata i8* %frag.addr, metadata !18, metadata !23), !dbg !19
+  call void @llvm.dbg.declare(metadata %struct.foo* %g, metadata !20, metadata !16), !dbg !21
+  ret void, !dbg !22
+}
+
+; CHECK: ![[VAR_STRUCT]] = !DILocalVariable(name: "g"
+; CHECK: ![[VAR_BOOL]] = !DILocalVariable(name: "b"
+; CHECK: ![[VAR_FRAG]] = !DILocalVariable(name: "frag"
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+attributes #0 = { noinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone speculatable }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!5}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (trunk 303077) (llvm/trunk 303098)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "foo.cpp", directory: "/usr/local/google/home/blaikie/dev/scratch")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{!"clang version 5.0.0 (trunk 303077) (llvm/trunk 303098)"}
+!6 = distinct !DISubprogram(name: "f", linkageName: "_Z1fbb3foo", scope: !1, file: !1, line: 2, type: !7, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: false, unit: !0, retainedNodes: !2)
+!7 = !DISubroutineType(types: !8)
+!8 = !{null, !9, !9, !10}
+!9 = !DIBasicType(name: "bool", size: 8, encoding: DW_ATE_boolean)
+!10 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "foo", file: !1, line: 1, size: 128, elements: !11, identifier: "_ZTS3foo")
+!11 = !{!12, !13}
+!12 = !DIDerivedType(tag: DW_TAG_member, name: "b", scope: !10, file: !1, line: 1, baseType: !9, size: 8)
+!13 = !DIDerivedType(tag: DW_TAG_member, name: "i", scope: !10, file: !1, line: 1, baseType: !14, size: 64, offset: 64)
+!14 = !DIBasicType(name: "long int", size: 64, encoding: DW_ATE_signed)
+!15 = !DILocalVariable(name: "b", arg: 1, scope: !6, file: !1, line: 2, type: !9)
+!16 = !DIExpression()
+!17 = !DILocation(line: 2, column: 13, scope: !6)
+!18 = !DILocalVariable(name: "frag", arg: 2, scope: !6, file: !1, line: 2, type: !9)
+!19 = !DILocation(line: 2, column: 21, scope: !6)
+!20 = !DILocalVariable(name: "g", arg: 3, scope: !6, file: !1, line: 2, type: !10)
+!21 = !DILocation(line: 2, column: 31, scope: !6)
+!22 = !DILocation(line: 3, column: 1, scope: !6)
+!23 = !DIExpression(DW_OP_LLVM_fragment, 0, 4)
diff --git a/test/Transforms/Util/split-bit-piece.ll b/test/Transforms/Util/split-bit-piece.ll
deleted file mode 100644
index 86f4642..0000000
--- a/test/Transforms/Util/split-bit-piece.ll
+++ /dev/null
@@ -1,83 +0,0 @@
-; Checks that llvm.dbg.declare -> llvm.dbg.value conversion utility
-; (here exposed through the SROA) pass, properly inserts bit_piece expressions
-; if it only describes part of the variable.
-; RUN: opt -S -sroa %s | FileCheck %s
-
-; Built from:
-; struct foo { bool b; long i; };
-; void f(bool b, bool expr, foo g) {
-; }
-; And modifying the frag dbg.declare to use a fragmented DIExpression (with offset: 0, size: 4)
-; to test the dbg.declare+fragment case here.
-
-; Expect two fragments:
-; * first starting at bit 0, 8 bits (for the bool)
-; * second starting at bit 32, 32 bits (for the long)
-; (this happens to create/demonstrate a gap from bits [7, 32))
-
-; But also check that a complex expression is not used for a lone bool
-; parameter. It can reference the register it's in directly without masking off
-; high bits or anything
-
-; CHECK: call void @llvm.dbg.value(metadata i8 %g.coerce0, metadata ![[VAR_STRUCT:[0-9]+]], metadata !DIExpression(DW_OP_LLVM_fragment, 0, 8))
-; CHECK: call void @llvm.dbg.value(metadata i64 %g.coerce1, metadata ![[VAR_STRUCT]], metadata !DIExpression(DW_OP_LLVM_fragment, 32, 64))
-; CHECK: call void @llvm.dbg.value(metadata i1 %b, metadata ![[VAR_BOOL:[0-9]+]], metadata !DIExpression())
-; CHECK: call void @llvm.dbg.value(metadata i1 %frag, metadata ![[VAR_FRAG:[0-9]+]], metadata !DIExpression(DW_OP_LLVM_fragment, 0, 1))
-
-%struct.foo = type { i8, i64 }
-
-; Function Attrs: noinline nounwind uwtable
-define void @_Z1fbb3foo(i1 zeroext %b, i1 zeroext %frag, i8 %g.coerce0, i64 %g.coerce1) #0 !dbg !6 {
-entry:
-  %g = alloca %struct.foo, align 8
-  %b.addr = alloca i8, align 1
-  %frag.addr = alloca i8, align 1
-  %0 = bitcast %struct.foo* %g to { i8, i64 }*
-  %1 = getelementptr inbounds { i8, i64 }, { i8, i64 }* %0, i32 0, i32 0
-  store i8 %g.coerce0, i8* %1, align 8
-  %2 = getelementptr inbounds { i8, i64 }, { i8, i64 }* %0, i32 0, i32 1
-  store i64 %g.coerce1, i64* %2, align 8
-  %frombool = zext i1 %b to i8
-  store i8 %frombool, i8* %b.addr, align 1
-  call void @llvm.dbg.declare(metadata i8* %b.addr, metadata !15, metadata !16), !dbg !17
-  %frombool1 = zext i1 %frag to i8
-  store i8 %frombool1, i8* %frag.addr, align 1
-  call void @llvm.dbg.declare(metadata i8* %frag.addr, metadata !18, metadata !23), !dbg !19
-  call void @llvm.dbg.declare(metadata %struct.foo* %g, metadata !20, metadata !16), !dbg !21
-  ret void, !dbg !22
-}
-
-; Function Attrs: nounwind readnone speculatable
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
-
-attributes #0 = { noinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone speculatable }
-
-!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!3, !4}
-!llvm.ident = !{!5}
-
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (trunk 303077) (llvm/trunk 303098)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
-!1 = !DIFile(filename: "foo.cpp", directory: "/usr/local/google/home/blaikie/dev/scratch")
-!2 = !{}
-!3 = !{i32 2, !"Dwarf Version", i32 4}
-!4 = !{i32 2, !"Debug Info Version", i32 3}
-!5 = !{!"clang version 5.0.0 (trunk 303077) (llvm/trunk 303098)"}
-!6 = distinct !DISubprogram(name: "f", linkageName: "_Z1fbb3foo", scope: !1, file: !1, line: 2, type: !7, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: false, unit: !0, retainedNodes: !2)
-!7 = !DISubroutineType(types: !8)
-!8 = !{null, !9, !9, !10}
-!9 = !DIBasicType(name: "bool", size: 8, encoding: DW_ATE_boolean)
-!10 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "foo", file: !1, line: 1, size: 128, elements: !11, identifier: "_ZTS3foo")
-!11 = !{!12, !13}
-!12 = !DIDerivedType(tag: DW_TAG_member, name: "b", scope: !10, file: !1, line: 1, baseType: !9, size: 8)
-!13 = !DIDerivedType(tag: DW_TAG_member, name: "i", scope: !10, file: !1, line: 1, baseType: !14, size: 64, offset: 64)
-!14 = !DIBasicType(name: "long int", size: 64, encoding: DW_ATE_signed)
-!15 = !DILocalVariable(name: "b", arg: 1, scope: !6, file: !1, line: 2, type: !9)
-!16 = !DIExpression()
-!17 = !DILocation(line: 2, column: 13, scope: !6)
-!18 = !DILocalVariable(name: "frag", arg: 2, scope: !6, file: !1, line: 2, type: !9)
-!19 = !DILocation(line: 2, column: 21, scope: !6)
-!20 = !DILocalVariable(name: "g", arg: 3, scope: !6, file: !1, line: 2, type: !10)
-!21 = !DILocation(line: 2, column: 31, scope: !6)
-!22 = !DILocation(line: 3, column: 1, scope: !6)
-!23 = !DIExpression(DW_OP_LLVM_fragment, 0, 4)
diff --git a/test/Transforms/Util/store-first-op.ll b/test/Transforms/Util/store-first-op.ll
index 397925d..c4ef2a1 100644
--- a/test/Transforms/Util/store-first-op.ll
+++ b/test/Transforms/Util/store-first-op.ll
@@ -24,7 +24,7 @@
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!2}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 3.8.0 (https://github.com/llvm-mirror/clang 89dda3855cda574f355e6defa1d77bdae5053994) (llvm/trunk 257597)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 3.8.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
 !1 = !DIFile(filename: "none", directory: ".")
 !2 = !{i32 2, !"Debug Info Version", i32 3}
 !3 = !DILocalVariable(name: "getU", scope: !4, file: !1, line: 25, type: !5)
diff --git a/test/Unit/lit.cfg.py b/test/Unit/lit.cfg.py
index 609408d..3198ab2 100644
--- a/test/Unit/lit.cfg.py
+++ b/test/Unit/lit.cfg.py
@@ -31,6 +31,11 @@
 if 'TEMP' in os.environ:
     config.environment['TEMP'] = os.environ['TEMP']
 
+# Propagate HOME as it can be used to override incorrect homedir in passwd
+# that causes the tests to fail.
+if 'HOME' in os.environ:
+    config.environment['HOME'] = os.environ['HOME']
+
 # Propagate path to symbolizer for ASan/MSan.
 for symbolizer in ['ASAN_SYMBOLIZER_PATH', 'MSAN_SYMBOLIZER_PATH']:
     if symbolizer in os.environ:
diff --git a/test/Verifier/di-subroutine-localvar.ll b/test/Verifier/di-subroutine-localvar.ll
new file mode 100644
index 0000000..477e633
--- /dev/null
+++ b/test/Verifier/di-subroutine-localvar.ll
@@ -0,0 +1,42 @@
+; RUN: opt %s -verify 2>&1 | FileCheck %s
+; CHECK: invalid type
+; CHECK: !20 = !DILocalVariable(name: "f", scope: !21, file: !13, line: 970, type: !14)
+; CHECK: !14 = !DISubroutineType(types: !15)
+
+
+%timespec.0.1.2.3.0.1.2 = type { i64, i64 }
+define internal i64 @init_vdso_clock_gettime(i32, %timespec.0.1.2.3.0.1.2* nonnull) unnamed_addr !dbg !142 {
+  call void @llvm.dbg.value(metadata i64 (i32, %timespec.0.1.2.3.0.1.2*)* null, metadata !162, metadata !DIExpression()), !dbg !167
+  ret i64 -38, !dbg !168
+}
+declare void @llvm.dbg.value(metadata, metadata, metadata) #0
+!llvm.module.flags = !{!0}
+!llvm.dbg.cu = !{!1}
+!0 = !{i32 2, !"Debug Info Version", i32 3}
+!1 = distinct !DICompileUnit(language: DW_LANG_C99, file: !2, producer: "zig 0.3.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !3, globals: !76)
+!2 = !DIFile(filename: "test", directory: ".")
+!3 = !{!4}
+!4 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "Arch", scope: !5, file: !5, line: 44, baseType: !6, size: 8, align: 8, elements: !7)
+!5 = !DIFile(filename: "builtin.zig", directory: "/home/andy/.local/share/zig/stage1/builtin/ugMGxVES9OkDAffv3xhJS3KQVy0Wm1xPM3Bc6x4MBuup5aetdi5pVTrGRG2aDAn0")
+!6 = !DIBasicType(name: "u7", size: 8, encoding: DW_ATE_unsigned)
+!7 = !{!8}
+!8 = !DIEnumerator(name: "armv8_5a", value: 0)
+!76 = !{!77}
+!77 = !DIGlobalVariableExpression(var: !78, expr: !DIExpression())
+!78 = distinct !DIGlobalVariable(name: "arch", linkageName: "arch", scope: !5, file: !5, line: 437, type: !4, isLocal: true, isDefinition: true)
+!81 = !DIFile(filename: "index.zig", directory: "/store/dev/zig/build-llvm8-debug/lib/zig/std/os/linux")
+!142 = distinct !DISubprogram(name: "init_vdso_clock_gettime", scope: !81, file: !81, line: 968, type: !143, scopeLine: 968, flags: DIFlagStaticMember, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: !1, retainedNodes: !153)
+!143 = !DISubroutineType(types: !144)
+!144 = !{!145}
+!145 = !DIBasicType(name: "usize", size: 64, encoding: DW_ATE_unsigned)
+!146 = !DIBasicType(name: "i32", size: 32, encoding: DW_ATE_signed)
+!153 = !{!154}
+!154 = !DILocalVariable(name: "clk", arg: 1, scope: !142, file: !81, line: 968, type: !146)
+!162 = !DILocalVariable(name: "f", scope: !163, file: !81, line: 970, type: !143)
+!163 = distinct !DILexicalBlock(scope: !164, file: !81, line: 969, column: 5)
+!164 = distinct !DILexicalBlock(scope: !165, file: !81, line: 968, column: 66)
+!165 = distinct !DILexicalBlock(scope: !166, file: !81, line: 968, column: 45)
+!166 = distinct !DILexicalBlock(scope: !142, file: !81, line: 968, column: 35)
+!167 = !DILocation(line: 970, column: 5, scope: !163)
+!168 = !DILocation(line: 972, column: 28, scope: !169)
+!169 = distinct !DILexicalBlock(scope: !163, file: !81, line: 970, column: 5)
diff --git a/test/lit.cfg.py b/test/lit.cfg.py
index 3dff11b..3c154c5 100644
--- a/test/lit.cfg.py
+++ b/test/lit.cfg.py
@@ -89,7 +89,6 @@
 config.substitutions.append(('%llvmshlibdir', config.llvm_shlib_dir))
 config.substitutions.append(('%shlibext', config.llvm_shlib_ext))
 config.substitutions.append(('%exeext', config.llvm_exe_ext))
-config.substitutions.append(('%host_cc', config.host_cc))
 
 
 lli_args = []
diff --git a/test/tools/dsymutil/ARM/obfuscated.test b/test/tools/dsymutil/ARM/obfuscated.test
new file mode 100644
index 0000000..9ce684c
--- /dev/null
+++ b/test/tools/dsymutil/ARM/obfuscated.test
@@ -0,0 +1,166 @@
+REQUIRES: system-darwin
+
+RUN: dsymutil --symbol-map %p/../Inputs/obfuscated.map %p/../Inputs/obfuscated.arm64 -f -o - \
+RUN:     | llvm-dwarfdump -v - \
+RUN:     | FileCheck %s
+
+RUN: dsymutil --symbol-map %p/../Inputs/obfuscated.map %p/../Inputs/obfuscated.arm64 -f -o - \
+RUN:     | llvm-dwarfdump -v - \
+RUN:     | FileCheck --check-prefix=NOHIDDEN %s
+
+RUN: dsymutil --symbol-map %p/../Inputs/obfuscated.2.map %p/../Inputs/obfuscated.2.arm64 -f -o - \
+RUN:     | llvm-dwarfdump -v - \
+RUN:     | FileCheck --check-prefix=NOHIDDEN %s
+
+// Run with plist and make sure dsymutil finds it.
+RUN: mkdir -p %t.dSYM/Contents/Resources/DWARF/
+RUN: mkdir -p %t.mapdir
+RUN: cp %p/../Inputs/obfuscated.arm64 %t.dSYM/Contents/Resources/DWARF/
+RUN: cp %p/../Inputs/E828A486-8433-3A5E-B6DB-A6294D28133D.plist %t.dSYM/Contents/Resources/
+RUN: cp %p/../Inputs/obfuscated.map %t.mapdir/506AA50A-6B26-3B37-86D2-DC6EBD57B720.bcsymbolmap
+RUN: dsymutil --symbol-map %t.mapdir %t.dSYM 2>&1 | FileCheck --check-prefix=OBFUSCATING %s
+
+// Run without plist and make sure dsymutil doesn't crash.
+RUN: rm %t.dSYM/Contents/Resources/E828A486-8433-3A5E-B6DB-A6294D28133D.plist
+RUN: dsymutil --symbol-map %t.mapdir %t.dSYM 2>&1 | FileCheck --check-prefix=NOTOBFUSCATING %s
+
+OBFUSCATING-NOT: not unobfuscating
+
+NOTOBFUSCATING: not unobfuscating
+
+NOHIDDEN-NOT: __hidden#
+
+CHECK: .debug_info contents:
+
+CHECK: DW_TAG_compile_unit [1] *
+CHECK:    DW_AT_producer [DW_FORM_strp]    ( {{.*}} "Apple LLVM version 7.0.0 (clang-700.2.38.2)")
+CHECK:    DW_AT_name [DW_FORM_strp]        ( {{.*}} "main.c")
+CHECK:    DW_AT_comp_dir [DW_FORM_strp]    ( {{.*}} "/Users/steven/dev/alpena/tests/src")
+CHECK:    DW_TAG_subprogram [2]
+CHECK:      DW_AT_name [DW_FORM_strp]      ( {{.*}} "main")
+
+CHECK:  DW_TAG_compile_unit [1] *
+CHECK:    DW_AT_producer [DW_FORM_strp]    ( {{.*}} "Apple LLVM version 7.0.0 (clang-700.2.38.2)")
+CHECK:    DW_AT_name [DW_FORM_strp]        ( {{.*}} "one.c")
+CHECK:    DW_AT_comp_dir [DW_FORM_strp]    ( {{.*}} "/Users/steven/dev/alpena/tests/src")
+CHECK:    DW_TAG_subprogram [2]
+CHECK:      DW_AT_name [DW_FORM_strp]      ( {{.*}} "one")
+
+CHECK:  DW_TAG_compile_unit [1] *
+CHECK:    DW_AT_producer [DW_FORM_strp]    ( {{.*}} "Apple LLVM version 7.0.0 (clang-700.2.38.2)")
+CHECK:    DW_AT_name [DW_FORM_strp]        ( {{.*}} "two.c")
+CHECK:    DW_AT_comp_dir [DW_FORM_strp]    ( {{.*}} "/Users/steven/dev/alpena/tests/src")
+CHECK:    DW_TAG_subprogram [2]
+CHECK:      DW_AT_name [DW_FORM_strp]      ( {{.*}} "two")
+
+CHECK:  DW_TAG_compile_unit [1] *
+CHECK:    DW_AT_producer [DW_FORM_strp]    ( {{.*}} "Apple LLVM version 7.0.0 (clang-700.2.38.2)")
+CHECK:    DW_AT_name [DW_FORM_strp]        ( {{.*}} "three.c")
+CHECK:    DW_AT_comp_dir [DW_FORM_strp]    ( {{.*}} "/Users/steven/dev/alpena/tests/src")
+CHECK:    DW_TAG_subprogram [2]
+CHECK:      DW_AT_name [DW_FORM_strp]      ( {{.*}} "three")
+
+CHECK:  DW_TAG_compile_unit [1] *
+CHECK:    DW_AT_producer [DW_FORM_strp]    ( {{.*}} "Apple LLVM version 7.0.0 (clang-700.2.38.2)")
+CHECK:    DW_AT_name [DW_FORM_strp]        ( {{.*}} "four.c")
+CHECK:    DW_AT_stmt_list [DW_FORM_data4]  (0x0000011e)
+CHECK:    DW_AT_comp_dir [DW_FORM_strp]    ( {{.*}} "/Users/steven/dev/alpena/tests/src")
+CHECK:    DW_TAG_subprogram [2]
+CHECK:      DW_AT_name [DW_FORM_strp]      ( {{.*}} "four")
+
+CHECK:  DW_TAG_compile_unit [1] *
+CHECK:    DW_AT_producer [DW_FORM_strp]    ( {{.*}} "Apple LLVM version 7.0.0 (clang-700.2.38.2)")
+CHECK:    DW_AT_name [DW_FORM_strp]        ( {{.*}} "five.c")
+CHECK:    DW_AT_comp_dir [DW_FORM_strp]    ( {{.*}} "/Users/steven/dev/alpena/tests/src")
+CHECK:    DW_TAG_subprogram [2]
+CHECK:      DW_AT_name [DW_FORM_strp]      ( {{.*}} "five")
+
+CHECK:  DW_TAG_compile_unit [1] *
+CHECK:    DW_AT_producer [DW_FORM_strp]    ( {{.*}} "Apple LLVM version 7.0.0 (clang-700.2.38.2)")
+CHECK:    DW_AT_name [DW_FORM_strp]        ( {{.*}} "six.c")
+CHECK:    DW_AT_comp_dir [DW_FORM_strp]    ( {{.*}} "/Users/steven/dev/alpena/tests/src")
+CHECK:    DW_TAG_subprogram [2]
+CHECK:      DW_AT_name [DW_FORM_strp]      ( {{.*}} "six")
+
+CHECK: .debug_line contents:
+CHECK: file_names[  1]:
+CHECK:            name: "main.c"
+CHECK:       dir_index: 0
+CHECK:        mod_time: 0x00000000
+CHECK: file_names[  1]:
+CHECK:            name: "one.c"
+CHECK:       dir_index: 0
+CHECK:        mod_time: 0x00000000
+CHECK:          length: 0x00000000
+CHECK: file_names[  1]:
+CHECK:            name: "two.c"
+CHECK:       dir_index: 0
+CHECK:        mod_time: 0x00000000
+CHECK:          length: 0x00000000
+CHECK: file_names[  1]:
+CHECK:            name: "three.c"
+CHECK:       dir_index: 0
+CHECK:        mod_time: 0x00000000
+CHECK:          length: 0x00000000
+CHECK: file_names[  1]:
+CHECK:            name: "four.c"
+CHECK:       dir_index: 0
+CHECK:        mod_time: 0x00000000
+CHECK:          length: 0x00000000
+CHECK: file_names[  1]:
+CHECK:            name: "five.c"
+CHECK:       dir_index: 0
+CHECK:        mod_time: 0x00000000
+CHECK:          length: 0x00000000
+CHECK: file_names[  1]:
+CHECK:            name: "six.c"
+CHECK:       dir_index: 0
+CHECK:        mod_time: 0x00000000
+CHECK:          length: 0x00000000
+
+CHECK: .debug_pubnames contents:
+CHECK: length = 0x00000017 version = 0x0002 unit_offset = 0x00000000 unit_size = 0x00000044
+CHECK: 0x0000002e "main"
+CHECK: length = 0x00000016 version = 0x0002 unit_offset = 0x00000044 unit_size = 0x00000044
+CHECK: 0x0000002e "one"
+CHECK: length = 0x00000016 version = 0x0002 unit_offset = 0x00000088 unit_size = 0x00000044
+CHECK: 0x0000002e "two"
+CHECK: length = 0x00000018 version = 0x0002 unit_offset = 0x000000cc unit_size = 0x00000044
+CHECK: 0x0000002e "three"
+CHECK: length = 0x00000017 version = 0x0002 unit_offset = 0x00000110 unit_size = 0x00000044
+CHECK: 0x0000002e "four"
+CHECK: length = 0x00000017 version = 0x0002 unit_offset = 0x00000154 unit_size = 0x00000044
+CHECK: 0x0000002e "five"
+CHECK: length = 0x00000016 version = 0x0002 unit_offset = 0x00000198 unit_size = 0x00000044
+CHECK: 0x0000002e "six"
+
+CHECK: .apple_names contents:
+
+CHECK: String: 0x00000091 "five"
+CHECK-NEXT: Data 0 [
+CHECK-NEXT:   Atom[0]: 0x00000182
+CHECK-NEXT: ]
+CHECK: String: 0x0000009c "six"
+CHECK-NEXT: Data 0 [
+CHECK-NEXT:   Atom[0]: 0x000001c6
+CHECK-NEXT: ]
+CHECK: String: 0x00000078 "three"
+CHECK-NEXT: Data 0 [
+CHECK-NEXT:   Atom[0]: 0x000000fa
+CHECK-NEXT: ]
+CHECK: String: 0x0000006c "two"
+CHECK-NEXT: Data 0 [
+CHECK-NEXT:   Atom[0]: 0x000000b6
+CHECK-NEXT: ]
+CHECK: String: 0x00000057 "main"
+CHECK-NEXT: Data 0 [
+CHECK-NEXT:   Atom[0]: 0x0000002e
+CHECK-NEXT: ]
+CHECK: String: 0x00000085 "four"
+CHECK-NEXT: Data 0 [
+CHECK-NEXT:   Atom[0]: 0x0000013e
+CHECK-NEXT: ]
+CHECK: String: 0x00000062 "one"
+CHECK-NEXT: Data 0 [
+CHECK-NEXT:   Atom[0]: 0x00000072
+CHECK-NEXT: ]
diff --git a/test/tools/dsymutil/Inputs/E828A486-8433-3A5E-B6DB-A6294D28133D.plist b/test/tools/dsymutil/Inputs/E828A486-8433-3A5E-B6DB-A6294D28133D.plist
new file mode 100644
index 0000000..adf7dbf
--- /dev/null
+++ b/test/tools/dsymutil/Inputs/E828A486-8433-3A5E-B6DB-A6294D28133D.plist
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+   <key>DBGOriginalUUID</key>
+   <string>506AA50A-6B26-3B37-86D2-DC6EBD57B720</string>
+</dict>
+</plist>
\ No newline at end of file
diff --git a/test/tools/dsymutil/Inputs/obfuscated.2.arm64 b/test/tools/dsymutil/Inputs/obfuscated.2.arm64
new file mode 100644
index 0000000..b40e023
--- /dev/null
+++ b/test/tools/dsymutil/Inputs/obfuscated.2.arm64
Binary files differ
diff --git a/test/tools/dsymutil/Inputs/obfuscated.2.map b/test/tools/dsymutil/Inputs/obfuscated.2.map
new file mode 100644
index 0000000..6efca59
--- /dev/null
+++ b/test/tools/dsymutil/Inputs/obfuscated.2.map
@@ -0,0 +1,22 @@
+BCSymbolMap Version: 2.0
+_two
+_three
+_four
+_five
+_six
+LLVM version 3.9.0 (ssh://git@stash.sd.apple.com/devtools/clang.git c74ae34bd917b77f9c848bd599dfde2813fb509f)
+main
+main.c
+/Volumes/Data/dev/BitcodeBuildTests/unit
+one
+one.c
+two
+two.c
+three
+three.c
+four
+four.c
+five
+five.c
+six
+six.c
diff --git a/test/tools/dsymutil/Inputs/obfuscated.arm64 b/test/tools/dsymutil/Inputs/obfuscated.arm64
new file mode 100644
index 0000000..8395798
--- /dev/null
+++ b/test/tools/dsymutil/Inputs/obfuscated.arm64
Binary files differ
diff --git a/test/tools/dsymutil/Inputs/obfuscated.map b/test/tools/dsymutil/Inputs/obfuscated.map
new file mode 100644
index 0000000..30fed8b
--- /dev/null
+++ b/test/tools/dsymutil/Inputs/obfuscated.map
@@ -0,0 +1,17 @@
+one
+two
+three
+four
+five
+six
+.str
+Apple LLVM version 7.0.0 (clang-700.2.38.2)
+main
+main.c
+/Users/steven/dev/alpena/tests/src
+one.c
+two.c
+three.c
+four.c
+five.c
+six.c
diff --git a/test/tools/dsymutil/X86/lc_build_version.test b/test/tools/dsymutil/X86/lc_build_version.test
index d0a8598..2c91cbb 100644
--- a/test/tools/dsymutil/X86/lc_build_version.test
+++ b/test/tools/dsymutil/X86/lc_build_version.test
@@ -1,5 +1,5 @@
 # RUN: dsymutil -f %p/../Inputs/lc_build_version.x86_64 -o - \
-# RUN:   | obj2yaml | FileCheck %s
+# RUN:   -oso-prepend-path=%p/.. | obj2yaml | FileCheck %s
 
 CHECK: LoadCommands:
 CHECK:   - cmd:             LC_BUILD_VERSION
diff --git a/test/tools/dsymutil/cmdline.test b/test/tools/dsymutil/cmdline.test
index c2ddead..60a1a0a 100644
--- a/test/tools/dsymutil/cmdline.test
+++ b/test/tools/dsymutil/cmdline.test
@@ -17,6 +17,7 @@
 HELP: -o=<filename>
 HELP: -oso-prepend-path=<path>
 HELP: -papertrail
+HELP: -symbol-map
 HELP: -symtab
 HELP: -toolchain
 HELP: -update
diff --git a/test/tools/gold/X86/emit-asm.ll b/test/tools/gold/X86/emit-asm.ll
new file mode 100644
index 0000000..40ff71f
--- /dev/null
+++ b/test/tools/gold/X86/emit-asm.ll
@@ -0,0 +1,25 @@
+; RUN: llvm-as %s -o %t.o
+
+; RUN: %gold -plugin %llvmshlibdir/LLVMgold%shlibext \
+; RUN:    -m elf_x86_64 --plugin-opt=emit-asm \
+; RUN:    -shared %t.o -o %t2.s
+; RUN: FileCheck --input-file %t2.s %s
+
+; RUN: %gold -plugin %llvmshlibdir/LLVMgold%shlibext \
+; RUN:    -m elf_x86_64 --plugin-opt=emit-asm --plugin-opt=lto-partitions=2\
+; RUN:    -shared %t.o -o %t2.s
+; RUN: cat %t2.s %t2.s1 > %t3.s
+; RUN: FileCheck --input-file %t3.s %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK-DAG: f1:
+define void @f1() {
+  ret void
+}
+
+; CHECK-DAG: f2:
+define void @f2() {
+  ret void
+}
diff --git a/test/tools/gold/X86/split-dwarf.ll b/test/tools/gold/X86/split-dwarf.ll
index 1743654..6746dfd 100644
--- a/test/tools/gold/X86/split-dwarf.ll
+++ b/test/tools/gold/X86/split-dwarf.ll
@@ -10,8 +10,8 @@
 ; CHECK: DW_AT_GNU_dwo_name{{.*}}dwo_dir/1.dwo
 ; CHECK-NOT: DW_TAG_subprogram
 ; RUN: llvm-dwarfdump -debug-info %t/dwo_dir/1.dwo | FileCheck --check-prefix DWOCHECK %s
-; DWOCHECK: DW_AT_GNU_dwo_name{{.*}}dwo_dir/1.dwo
 ; DWOCHECK: DW_AT_name{{.*}}split-dwarf.c
+; DWOCHECK: DW_AT_GNU_dwo_name{{.*}}dwo_dir/1.dwo
 ; DWOCHECK: DW_TAG_subprogram
 
 ; RUN:rm -rf %t/dwo_dir
@@ -26,8 +26,8 @@
 ; LTOCHECK: DW_AT_GNU_dwo_name{{.*}}dwo_dir/0.dwo
 ; LTOCHECK-NOT: DW_TAG_subprogram
 ; RUN: llvm-dwarfdump -debug-info %t/dwo_dir/0.dwo | FileCheck --check-prefix LTODWOCHECK %s
-; LTODWOCHECK: DW_AT_GNU_dwo_name{{.*}}dwo_dir/0.dwo
 ; LTODWOCHECK: DW_AT_name{{.*}}split-dwarf.c
+; LTODWOCHECK: DW_AT_GNU_dwo_name{{.*}}dwo_dir/0.dwo
 ; LTODWOCHECK: DW_TAG_subprogram
 
 ; ModuleID = 'split-dwarf.c'
@@ -47,13 +47,13 @@
 !llvm.module.flags = !{!3, !4, !5}
 !llvm.ident = !{!6}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 7.0.0 (https://github.com/llvm-mirror/clang.git b641d31365414ba3ea0305fdaa80369a9efb6bd9) (https://github.com/llvm-mirror/llvm.git 6165a776d1a8bb181be93f2dc97088f7a1abc405)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 7.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
 !1 = !DIFile(filename: "split-dwarf.c", directory: "/usr/local/google/home/yunlian/dwp/build/bin")
 !2 = !{}
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
 !5 = !{i32 1, !"wchar_size", i32 4}
-!6 = !{!"clang version 7.0.0 (https://github.com/llvm-mirror/clang.git b641d31365414ba3ea0305fdaa80369a9efb6bd9) (https://github.com/llvm-mirror/llvm.git 6165a776d1a8bb181be93f2dc97088f7a1abc405)"}
+!6 = !{!"clang version 7.0.0"}
 !7 = distinct !DISubprogram(name: "split_dwarf", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: false, unit: !0, retainedNodes: !2)
 !8 = !DISubroutineType(types: !9)
 !9 = !{!10}
diff --git a/test/tools/llvm-ar/Inputs/a-plus-b.a b/test/tools/llvm-ar/Inputs/a-plus-b.a
new file mode 100644
index 0000000..a58406e
--- /dev/null
+++ b/test/tools/llvm-ar/Inputs/a-plus-b.a
@@ -0,0 +1,6 @@
+!<thin>
+//                                              14        `
+a.txt/
+b.txt/
+/0              0           0     0     644     11        `
+/7              0           0     0     644     11        `
diff --git a/test/tools/llvm-ar/Inputs/a.txt b/test/tools/llvm-ar/Inputs/a.txt
new file mode 100644
index 0000000..12d91b6
--- /dev/null
+++ b/test/tools/llvm-ar/Inputs/a.txt
@@ -0,0 +1 @@
+a-contents
diff --git a/test/tools/llvm-ar/Inputs/b.txt b/test/tools/llvm-ar/Inputs/b.txt
new file mode 100644
index 0000000..aee476f
--- /dev/null
+++ b/test/tools/llvm-ar/Inputs/b.txt
@@ -0,0 +1 @@
+b-contents
diff --git a/test/tools/llvm-ar/Inputs/c.txt b/test/tools/llvm-ar/Inputs/c.txt
new file mode 100644
index 0000000..a80e36bc3
--- /dev/null
+++ b/test/tools/llvm-ar/Inputs/c.txt
@@ -0,0 +1 @@
+c-contents
diff --git a/test/tools/llvm-ar/Inputs/d.txt b/test/tools/llvm-ar/Inputs/d.txt
new file mode 100644
index 0000000..20d7aa0
--- /dev/null
+++ b/test/tools/llvm-ar/Inputs/d.txt
@@ -0,0 +1 @@
+d-contents
diff --git a/test/tools/llvm-ar/Inputs/nested-thin-archive.a b/test/tools/llvm-ar/Inputs/nested-thin-archive.a
new file mode 100644
index 0000000..312e8de
--- /dev/null
+++ b/test/tools/llvm-ar/Inputs/nested-thin-archive.a
@@ -0,0 +1,7 @@
+!<thin>
+//                                              20        `
+a-plus-b.a/
+c.txt/
+
+/0              0           0     0     644     202       `
+/12             0           0     0     644     11        `
diff --git a/test/tools/llvm-ar/flatten-thin-archive-recursive.test b/test/tools/llvm-ar/flatten-thin-archive-recursive.test
new file mode 100644
index 0000000..fdd752d
--- /dev/null
+++ b/test/tools/llvm-ar/flatten-thin-archive-recursive.test
@@ -0,0 +1,13 @@
+# Since llvm-ar cannot create thin archives that contain any thin archives,
+# nested-thin-archive.a is a manually constructed thin archive that contains
+# another (unflattened) thin archive.
+# This test ensures that flat archives are recursively flattened.
+
+RUN: rm -f %t.a
+RUN: llvm-ar rcsT %t.a %S/Inputs/nested-thin-archive.a %S/Inputs/d.txt
+RUN: llvm-ar t %t.a | FileCheck %s
+
+CHECK:      a.txt
+CHECK-NEXT: b.txt
+CHECK-NEXT: c.txt
+CHECK-NEXT: d.txt
diff --git a/test/tools/llvm-ar/flatten-thin-archive.test b/test/tools/llvm-ar/flatten-thin-archive.test
new file mode 100644
index 0000000..430f48f
--- /dev/null
+++ b/test/tools/llvm-ar/flatten-thin-archive.test
@@ -0,0 +1,18 @@
+# This test creates a thin archive that contains a thin archive, a regular
+# archive, and a file.
+#
+# The inner thin archive should be flattened, but the regular archive should
+# not. The order of members in the archive should match the input order, with
+# flattened members appearing together.
+
+RUN: touch %t-a.txt %t-b.txt %t-c.txt %t-d.txt %t-e.txt
+RUN: rm -f %t-a-plus-b.a %t.a
+RUN: llvm-ar rcsT %t-a-plus-b.a %t-a.txt %t-b.txt
+RUN: llvm-ar rcs %t-d-plus-e.a %t-d.txt %t-e.txt
+RUN: llvm-ar rcsT %t.a %t-a-plus-b.a %t-c.txt %t-d-plus-e.a
+RUN: llvm-ar t %t.a | FileCheck %s
+
+CHECK:      a.txt
+CHECK-NEXT: b.txt
+CHECK-NEXT: c.txt
+CHECK-NEXT: -d-plus-e.a
diff --git a/test/tools/llvm-config/booleans.test b/test/tools/llvm-config/booleans.test
index b28f293..1db3383 100644
--- a/test/tools/llvm-config/booleans.test
+++ b/test/tools/llvm-config/booleans.test
@@ -18,7 +18,7 @@
 CHECK-BUILD-MODE-NOT: warning
 
 RUN: llvm-config --build-system 2>&1 | FileCheck --check-prefix=CHECK-BUILD-SYSTEM %s
-CHECK-BUILD-SYSTEM: cmake
+CHECK-BUILD-SYSTEM: {{cmake|gn}}
 CHECK-BUILD-SYSTEM-NOT: error:
 CHECK-BUILD-SYSTEM-NOT: warning
 
diff --git a/test/tools/llvm-dwarfdump/X86/debug_info_addrx.s b/test/tools/llvm-dwarfdump/X86/debug_info_addrx.s
new file mode 100644
index 0000000..e0e468c
--- /dev/null
+++ b/test/tools/llvm-dwarfdump/X86/debug_info_addrx.s
@@ -0,0 +1,77 @@
+# RUN: llvm-mc %s -filetype obj -triple x86_64-pc-linux -o %t.o
+# RUN: llvm-dwarfdump -debug-info %t.o | FileCheck %s
+# RUN: llvm-dwarfdump -debug-info %t.o -v | FileCheck --check-prefix=VERBOSE %s
+
+# CHECK: DW_TAG_compile_unit
+# CHECK:   DW_AT_low_pc                                              (0x0000000000000000)
+# VERBOSE: DW_AT_low_pc [DW_FORM_addrx] (indexed (00000000) address = 0x0000000000000000 ".text")
+# FIXME: There is a debug_addr section, it's just that the index is outside its 
+#        bounds (both of the section, and the range defined by the header for the
+#        debug_addr contribution for this CU)
+# CHECK:   DW_AT_low_pc                 (indexed (00000001) address = <no .debug_addr section>)
+# VERBOSE: DW_AT_low_pc [DW_FORM_addrx] (indexed (00000001) address = <no .debug_addr section>)
+
+# CHECK: DW_TAG_compile_unit
+# FIXME: Should error "no debug_addr contribution" - rather than parsing debug_addr
+#        from the start, incorrectly interpreting the header bytes as an address.
+# CHECK:   DW_AT_low_pc                                              (0x000800050000000c)
+# VERBOSE: DW_AT_low_pc [DW_FORM_addrx] (indexed (00000000) address = 0x000800050000000c)
+
+	.globl	foo                     # -- Begin function foo
+foo:                                    # @foo
+.Lfunc_begin0:
+	retq
+.Lfunc_end0:
+	.section	.debug_abbrev,"",@progbits
+	.byte	1                       # Abbreviation Code
+	.byte	17                      # DW_TAG_compile_unit
+	.byte	0                       # DW_CHILDREN_no
+	.byte	115                     # DW_AT_addr_base
+	.byte	23                      # DW_FORM_sec_offset
+	.byte	17                      # DW_AT_low_pc
+	.byte	27                      # DW_FORM_addrx
+	.byte	17                      # DW_AT_low_pc
+	.byte	27                      # DW_FORM_addrx
+	.byte	0                       # EOM(1)
+	.byte	0                       # EOM(2)
+	.byte	2                       # Abbreviation Code
+	.byte	17                      # DW_TAG_compile_unit
+	.byte	0                       # DW_CHILDREN_no
+	.byte	17                      # DW_AT_low_pc
+	.byte	27                      # DW_FORM_addrx
+	.byte	0                       # EOM(1)
+	.byte	0                       # EOM(2)
+	.byte	0                       # EOM(3)
+	.section	.debug_info,"",@progbits
+.Lcu_begin0:
+	.long	.Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+	.short	5                       # DWARF version number
+	.byte	1                       # DWARF Unit Type
+	.byte	8                       # Address Size (in bytes)
+	.long	.debug_abbrev           # Offset Into Abbrev. Section
+	.byte	1                       # Abbrev [1] 0xc:0x23 DW_TAG_compile_unit
+	.long	.Laddr_table_base0      # DW_AT_addr_base
+	.byte	0                       # DW_AT_low_pc
+	.byte	1                       # DW_AT_low_pc
+.Ldebug_info_end0:
+.Lcu_begin1:
+	.long	.Ldebug_info_end1-.Ldebug_info_start1 # Length of Unit
+.Ldebug_info_start1:
+	.short	5                       # DWARF version number
+	.byte	1                       # DWARF Unit Type
+	.byte	8                       # Address Size (in bytes)
+	.long	.debug_abbrev           # Offset Into Abbrev. Section
+	.byte	2                       # Abbrev [2] 0xc:0x23 DW_TAG_compile_unit
+	.long	.Laddr_table_base0      # DW_AT_addr_base
+	.byte	0                       # DW_AT_low_pc
+.Ldebug_info_end1:
+	.section	.debug_macinfo,"",@progbits
+	.byte	0                       # End Of Macro List Mark
+	.section	.debug_addr,"",@progbits
+	.long	12                      # Length of Pool
+	.short	5                       # DWARF version number
+	.byte	8                       # Address Size (in bytes)
+	.byte	0                       # Segment selector size
+.Laddr_table_base0:
+	.quad	.Lfunc_begin0
diff --git a/test/tools/llvm-dwarfdump/X86/debug_tls_relocs.s b/test/tools/llvm-dwarfdump/X86/debug_tls_relocs.s
new file mode 100644
index 0000000..d432ecf
--- /dev/null
+++ b/test/tools/llvm-dwarfdump/X86/debug_tls_relocs.s
@@ -0,0 +1,68 @@
+# RUN: llvm-mc %s -filetype obj -triple x86_64-pc-linux -o %t.o
+# RUN: llvm-dwarfdump -v %t.o | FileCheck %s
+
+# CHECK-NOT: error
+# CHECK: DW_AT_location [DW_FORM_exprloc] (DW_OP_const8u 0x0, DW_OP_GNU_push_tls_address)
+# CHECK: DW_AT_location [DW_FORM_exprloc] (DW_OP_const4u 0x0, DW_OP_GNU_push_tls_address)
+
+.section .debug_str,"MS",@progbits,1
+.Linfo_string0:
+ .asciz "X"
+
+.section .debug_abbrev,"",@progbits
+ .byte 1                # Abbreviation Code
+ .byte 17               # DW_TAG_compile_unit
+ .byte 1                # DW_CHILDREN_yes
+ .byte 37               # DW_AT_producer
+ .byte 14               # DW_FORM_strp
+ .byte 19               # DW_AT_language
+ .byte 5                # DW_FORM_data2
+ .byte 3                # DW_AT_name
+ .byte 14               # DW_FORM_strp
+ .byte 0                # EOM(1)
+ .byte 0                # EOM(2)
+
+ .byte 2                # Abbreviation Code
+ .byte 52               # DW_TAG_variable
+ .byte 0                # DW_CHILDREN_no
+ .byte 3                # DW_AT_name
+ .byte 14               # DW_FORM_strp
+ .byte 73               # DW_AT_type
+ .byte 19               # DW_FORM_ref4
+ .byte 63               # DW_AT_external
+ .byte 25               # DW_FORM_flag_present
+ .byte 2                # DW_AT_location
+ .byte 24               # DW_FORM_exprloc
+ .byte 0                # EOM(1)
+ .byte 0                # EOM(2)
+
+ .byte 0                # EOM(3)
+
+.section .debug_info,"",@progbits
+ .long 49               # Length of Unit
+ .short 4               # DWARF version number
+ .long .debug_abbrev    # Offset Into Abbrev. Section
+ .byte 8                # Address Size (in bytes)
+ .byte 1                # Abbrev [1] 0xb:0x6c DW_TAG_compile_unit
+ .long .Linfo_string0   # DW_AT_producer
+ .short 4               # DW_AT_language
+ .long .Linfo_string0   # DW_AT_name
+                        
+ .byte 2                # Abbrev [2] 0x2a:0x16 DW_TAG_variable
+ .long .Linfo_string0   # DW_AT_name
+ .long 0                # DW_AT_type
+ .byte 10               # DW_AT_location
+ .byte 14
+ .quad tdata1@DTPOFF
+ .byte 224
+
+ .byte 2                # Abbrev [2] 0x47:0x16 DW_TAG_variable
+ .long .Linfo_string0   # DW_AT_name
+ .long 0                # DW_AT_type
+ .byte 6                # DW_AT_location
+ .byte 12
+ .long tdata2@DTPOFF
+ .byte 224
+
+ .byte 0                # End Of Children Mark
+ .byte 0                # End Of Children Mark
diff --git a/test/tools/llvm-dwarfdump/X86/prettyprint_types.s b/test/tools/llvm-dwarfdump/X86/prettyprint_types.s
index b3e871e..afeee4c 100644
--- a/test/tools/llvm-dwarfdump/X86/prettyprint_types.s
+++ b/test/tools/llvm-dwarfdump/X86/prettyprint_types.s
@@ -19,13 +19,41 @@
 # CHECK:   DW_AT_type{{.*}}"int foo::*"
 
 # array_type
-# Testing lower_bound, upper_bound, lower and upper, lower and count, and count separately.
-# CHECK:   DW_AT_type{{.*}}"int[1-][2][1-2][1-3][2]"
+# CHECK:   DW_AT_type{{.*}}"int
+# Testing with a default lower bound of 0 and the following explicit bounds:
+#   lower_bound(1)
+# CHECK-NOT: {{.}}
+# CHECK-SAME: {{\[}}[1, ?)]
+#   upper_bound(2)
+# CHECK-NOT: {{.}}
+# CHECK-SAME: [3]
+#   lower(1) and upper(2)
+# CHECK-NOT: {{.}}
+# CHECK-SAME: {{\[}}[1, 3)]
+#   lower(1) and count(3)
+# CHECK-NOT: {{.}}
+# CHECK-SAME: {{\[}}[1, 4)]
+#   lower(0) and count(4) - testing that the lower bound matching language
+#   default is not rendered
+# CHECK-NOT: {{.}}
+# CHECK-SAME: [4]
+#   count(2)
+# CHECK-SAME: [2]
+#   no attributes
+# CHECK-NOT: {{.}}
+# CHECK-SAME: []{{"\)$}}
+
 
 # subroutine types
 # CHECK:   DW_AT_type{{.*}}"int()"
 # CHECK:   DW_AT_type{{.*}}"void(int)"
 # CHECK:   DW_AT_type{{.*}}"void(int, int)"
+
+# array_type with a language with a default lower bound of 1 instead of 0 and
+# an upper bound of 2. This describes an array with 2 elements (whereas with a
+# default lower bound of 0 it would be an array of 3 elements)
+# CHECK: DW_AT_type{{.*}}"int[2]"
+
 	.section	.debug_str,"MS",@progbits,1
 .Lint_name:
 	.asciz	"int"
@@ -155,6 +183,11 @@
 	.byte	19                      # DW_FORM_ref4
 	.byte	0                       # EOM(1)
 	.byte	0                       # EOM(2)
+	.byte	18                      # Abbreviation Code
+	.byte	0x21                    # DW_TAG_subrange_type
+	.byte	0                       # DW_CHILDREN_no
+	.byte	0                       # EOM(1)
+	.byte	0                       # EOM(2)
 	.byte	0                       # EOM(3)
 	.section	.debug_info,"",@progbits
 .Lcu_begin:
@@ -196,9 +229,13 @@
 	.byte   2                       #     DW_AT_upper_bound
 	.byte	12                      #   DW_AT_subrange_type
 	.byte   1                       #     DW_AT_lower_bound
-	.byte   2                       #     DW_AT_count
+	.byte   3                       #     DW_AT_count
+	.byte	12                      #   DW_AT_subrange_type
+	.byte   0                       #     DW_AT_lower_bound
+	.byte   4                       #     DW_AT_count
 	.byte	13                      #   DW_AT_subrange_type
 	.byte   2                       #     DW_AT_count
+	.byte	18                      #   DW_AT_subrange_type
 	.byte	0                       # End Of Children Mark
 .Lsub_int_empty_type:
 	.byte	15                      # DW_TAG_subroutine_type
@@ -236,3 +273,22 @@
 	.long	.Lsub_void_int_int_type - .Lcu_begin #   DW_AT_type
 	.byte	0                       # End Of Children Mark
 .Lunit_end:
+.Lcu2_begin:
+	.long	.Lcu2_unit_end - .Lcu2_unit_start # Length of Unit
+.Lcu2_unit_start:
+	.short	4                       # DWARF version number
+	.long	.debug_abbrev           # Offset Into Abbrev. Section
+	.byte	8                       # Address Size (in bytes)
+	.byte	1                       # DW_TAG_compile_unit
+	.short	13                      #   DW_AT_language
+.Lcu2_int_type:
+	.byte	2                       # DW_TAG_base_type
+	.long	.Lint_name              #   DW_AT_name
+.Lcu2_array_type:
+	.byte	8                       # DW_TAG_array_type
+	.long	.Lcu2_int_type - .Lcu2_begin #   DW_AT_type
+	.byte	10                      #   DW_AT_subrange_type
+	.byte   2                       #     DW_AT_upper_bound
+	.byte	3                       # DW_TAG_variable
+	.long	.Lcu2_array_type - .Lcu2_begin #   DW_AT_type
+.Lcu2_unit_end:
diff --git a/test/tools/llvm-dwarfdump/X86/verify_debug_info2.s b/test/tools/llvm-dwarfdump/X86/verify_debug_info2.s
index 71b9557..8ac1052 100644
--- a/test/tools/llvm-dwarfdump/X86/verify_debug_info2.s
+++ b/test/tools/llvm-dwarfdump/X86/verify_debug_info2.s
@@ -14,7 +14,7 @@
   .byte  0x4   # DW_AT_language [DW_FORM_data1] (DW_LANG_C_plus_plus)
   .long  0     # DW_AT_name [DW_FORM_strp] ( .debug_str[0x00000000] = "test")
   .long  0     # DW_AT_comp_dir [DW_FORM_strp] ( .debug_str[0x00000000] = "test")
-  .long  0     # DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
+  .long  0     # DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000 ".text")
   .long  0     # DW_AT_high_pc [DW_FORM_data4] (0x00000000)
 
 .section  .debug_abbrev,"",@progbits
diff --git a/test/tools/llvm-elfabi/binary-read-arch.test b/test/tools/llvm-elfabi/binary-read-arch.test
new file mode 100644
index 0000000..ecb2fb8
--- /dev/null
+++ b/test/tools/llvm-elfabi/binary-read-arch.test
@@ -0,0 +1,15 @@
+# RUN: yaml2obj %s > %t
+# RUN: llvm-elfabi %t --emit-tbe=- | FileCheck %s
+
+!ELF
+FileHeader:
+  Class:           ELFCLASS64
+  Data:            ELFDATA2LSB
+  Type:            ET_DYN
+  Machine:         EM_X86_64
+
+# CHECK:      --- !tapi-tbe
+# CHECK-NEXT: TbeVersion: {{[1-9]\d*\.(0|([1-9]\d*))}}
+# CHECK-NEXT: Arch: x86_64
+# CHECK-NEXT: Symbols: {}
+# CHECK-NEXT: ...
diff --git a/test/tools/llvm-elfabi/fail-file-open.test b/test/tools/llvm-elfabi/fail-file-open.test
new file mode 100644
index 0000000..b4019af
--- /dev/null
+++ b/test/tools/llvm-elfabi/fail-file-open.test
@@ -0,0 +1,5 @@
+# RUN: not llvm-elfabi %s.NotAFileInTestingDir --emit-tbe=%t 2>&1 | FileCheck %s
+
+This file will not be read. An invalid file path is fed to llvm-elfabi.
+
+# CHECK: error: Could not open `{{.*}}.NotAFileInTestingDir`
diff --git a/test/tools/llvm-elfabi/read-tbe-as-elf.test b/test/tools/llvm-elfabi/read-tbe-as-elf.test
new file mode 100644
index 0000000..eaddd38
--- /dev/null
+++ b/test/tools/llvm-elfabi/read-tbe-as-elf.test
@@ -0,0 +1,16 @@
+# RUN: not llvm-elfabi --elf %s --emit-tbe=%t  2>&1 | FileCheck %s
+
+--- !tapi-tbe
+SoName: somelib.so
+TbeVersion: 1.0
+Arch: x86_64
+Symbols:
+  foo: { Type: Func }
+  bar: { Type: Object, Size: 42 }
+  baz: { Type: Object, Size: 8 }
+  not: { Type: Object, Undefined: true, Size: 128 }
+  nor: { Type: Func, Undefined: true }
+...
+
+# CHECK: The file was not recognized as a valid object file
+# CHECK: No file readers succeeded reading `{{.*}}read-tbe-as-elf.test` (unsupported/malformed file?)
diff --git a/test/tools/llvm-elfabi/read-tbe-as-tbe.test b/test/tools/llvm-elfabi/read-tbe-as-tbe.test
new file mode 100644
index 0000000..a5e2d44
--- /dev/null
+++ b/test/tools/llvm-elfabi/read-tbe-as-tbe.test
@@ -0,0 +1,13 @@
+# RUN: llvm-elfabi --tbe %s --emit-tbe=- | FileCheck %s
+
+--- !tapi-tbe
+TbeVersion: 1.0
+Arch: AArch64
+Symbols: {}
+...
+
+# CHECK:      --- !tapi-tbe
+# CHECK-NEXT: TbeVersion: {{[1-9]\d*\.(0|([1-9]\d*))}}
+# CHECK-NEXT: Arch: AArch64
+# CHECK-NEXT: Symbols: {}
+# CHECK-NEXT: ...
diff --git a/test/tools/llvm-elfabi/read-unsupported-file.test b/test/tools/llvm-elfabi/read-unsupported-file.test
new file mode 100644
index 0000000..4ebe1bc
--- /dev/null
+++ b/test/tools/llvm-elfabi/read-unsupported-file.test
@@ -0,0 +1,7 @@
+# RUN: not llvm-elfabi %s --emit-tbe=%t 2>&1 | FileCheck %s
+
+This is just some text that cannot be read by llvm-elfabi.
+
+# CHECK: The file was not recognized as a valid object file
+# CHECK: YAML failed reading as TBE
+# CHECK: No file readers succeeded reading `{{.*}}` (unsupported/malformed file?)
diff --git a/test/tools/llvm-elfabi/replace-soname-tbe.test b/test/tools/llvm-elfabi/replace-soname-tbe.test
new file mode 100644
index 0000000..71d50a8
--- /dev/null
+++ b/test/tools/llvm-elfabi/replace-soname-tbe.test
@@ -0,0 +1,16 @@
+# RUN: yaml2obj %s > %t
+# RUN: llvm-elfabi %t --emit-tbe=- --soname=best.so | FileCheck %s
+
+!ELF
+FileHeader:
+  Class:           ELFCLASS64
+  Data:            ELFDATA2LSB
+  Type:            ET_DYN
+  Machine:         EM_AARCH64
+
+# CHECK:      --- !tapi-tbe
+# CHECK-NEXT: TbeVersion: {{[1-9]\d*\.(0|([1-9]\d*))}}
+# CHECK-NEXT: SoName: best.so
+# CHECK-NEXT: Arch: AArch64
+# CHECK-NEXT: Symbols: {}
+# CHECK-NEXT: ...
diff --git a/test/tools/llvm-elfabi/tbe-emits-current-version.test b/test/tools/llvm-elfabi/tbe-emits-current-version.test
new file mode 100644
index 0000000..12a5476
--- /dev/null
+++ b/test/tools/llvm-elfabi/tbe-emits-current-version.test
@@ -0,0 +1,13 @@
+# RUN: llvm-elfabi %s --emit-tbe=- | FileCheck %s
+
+--- !tapi-tbe
+TbeVersion: 1.0
+Arch: AArch64
+Symbols: {}
+...
+
+# As the tbe reader/writer is updated, update this check to ensure --emit-tbe
+# uses the latest tbe writer by default.
+
+# CHECK:      --- !tapi-tbe
+# CHECK-NEXT: TbeVersion: 1.0
diff --git a/test/tools/llvm-elfabi/tbe-read-basic.test b/test/tools/llvm-elfabi/tbe-read-basic.test
new file mode 100644
index 0000000..1599f5a
--- /dev/null
+++ b/test/tools/llvm-elfabi/tbe-read-basic.test
@@ -0,0 +1,25 @@
+# RUN: llvm-elfabi %s --emit-tbe=- | FileCheck %s
+
+--- !tapi-tbe
+SoName: somelib.so
+TbeVersion: 1.0
+Arch: x86_64
+Symbols:
+  foo: { Type: Func }
+  bar: { Type: Object, Size: 42 }
+  baz: { Type: Object, Size: 8 }
+  not: { Type: Object, Undefined: true, Size: 128 }
+  nor: { Type: Func, Undefined: true }
+...
+
+# CHECK:      --- !tapi-tbe
+# CHECK-NEXT: TbeVersion: {{[1-9]\d*\.(0|([1-9]\d*))}}
+# CHECK-NEXT: SoName: somelib.so
+# CHECK-NEXT: Arch: x86_64
+# CHECK-NEXT: Symbols:
+# CHECK-NEXT:   bar: { Type: Object, Size: 42 }
+# CHECK-NEXT:   baz: { Type: Object, Size: 8 }
+# CHECK-NEXT:   foo: { Type: Func }
+# CHECK-NEXT:   nor: { Type: Func, Undefined: true }
+# CHECK-NEXT:   not: { Type: Object, Size: 128, Undefined: true }
+# CHECK-NEXT: ...
diff --git a/test/tools/llvm-mca/AArch64/CortexA57/direct-branch.s b/test/tools/llvm-mca/AArch64/Cortex/direct-branch.s
similarity index 100%
rename from test/tools/llvm-mca/AArch64/CortexA57/direct-branch.s
rename to test/tools/llvm-mca/AArch64/Cortex/direct-branch.s
diff --git a/test/tools/llvm-mca/AArch64/CortexA57/shifted-register.s b/test/tools/llvm-mca/AArch64/Cortex/shifted-register.s
similarity index 100%
rename from test/tools/llvm-mca/AArch64/CortexA57/shifted-register.s
rename to test/tools/llvm-mca/AArch64/Cortex/shifted-register.s
diff --git a/test/tools/llvm-mca/AArch64/Exynos/direct-branch.s b/test/tools/llvm-mca/AArch64/Exynos/direct-branch.s
index 7b4682b..cd31d0b 100644
--- a/test/tools/llvm-mca/AArch64/Exynos/direct-branch.s
+++ b/test/tools/llvm-mca/AArch64/Exynos/direct-branch.s
@@ -1,15 +1,16 @@
 # NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
 # RUN: llvm-mca -mtriple=aarch64-linux-gnu -mcpu=exynos-m1 -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,M1
 # RUN: llvm-mca -mtriple=aarch64-linux-gnu -mcpu=exynos-m3 -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,M3
+# RUN: llvm-mca -mtriple=aarch64-linux-gnu -mcpu=exynos-m4 -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,M4
 
-1:
-  b	1b
+  b	main
 
 # ALL:      Iterations:        100
 # ALL-NEXT: Instructions:      100
 
 # M1-NEXT:  Total Cycles:      26
 # M3-NEXT:  Total Cycles:      18
+# M4-NEXT:  Total Cycles:      18
 
 # ALL-NEXT: Total uOps:        100
 
@@ -23,6 +24,11 @@
 # M3-NEXT:  IPC:               5.56
 # M3-NEXT:  Block RThroughput: 0.2
 
+# M4:       Dispatch Width:    6
+# M4-NEXT:  uOps Per Cycle:    5.56
+# M4-NEXT:  IPC:               5.56
+# M4-NEXT:  Block RThroughput: 0.2
+
 # ALL:      Instruction Info:
 # ALL-NEXT: [1]: #uOps
 # ALL-NEXT: [2]: Latency
@@ -33,5 +39,6 @@
 
 # ALL:      [1]    [2]    [3]    [4]    [5]    [6]    Instructions:
 
-# M1-NEXT:   1      0     0.25                        b	.Ltmp0
-# M3-NEXT:   1      0     0.17                        b	.Ltmp0
+# M1-NEXT:   1      0     0.25                        b	main
+# M3-NEXT:   1      0     0.17                        b	main
+# M4-NEXT:   1      0     0.17                        b	main
diff --git a/test/tools/llvm-mca/AArch64/Exynos/extended-register.s b/test/tools/llvm-mca/AArch64/Exynos/extended-register.s
index c580a8d..7044f64 100644
--- a/test/tools/llvm-mca/AArch64/Exynos/extended-register.s
+++ b/test/tools/llvm-mca/AArch64/Exynos/extended-register.s
@@ -1,21 +1,23 @@
 # NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
-# RUN: llvm-mca -march=aarch64 -mcpu=exynos-m1  -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,EM1
-# RUN: llvm-mca -march=aarch64 -mcpu=exynos-m3  -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,EM3
+# RUN: llvm-mca -march=aarch64 -mcpu=exynos-m1 -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,EM1
+# RUN: llvm-mca -march=aarch64 -mcpu=exynos-m3 -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,EM3
+# RUN: llvm-mca -march=aarch64 -mcpu=exynos-m4 -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,EM4
 
   sub	w0, w1, w2, sxtb #0
   add	x3, x4, w5, sxth #1
   subs	x6, x7, w8, uxtw #2
   adds	x9, x10, x11, uxtx #3
-  sub	w12, w13, w14, uxtb #3
-  add	x15, x16, w17, uxth #2
-  subs	x18, x19, w20, sxtw #1
-  adds	x21, x22, x23, sxtx #0
+  sub	w12, w13, w14, uxtb #0
+  add	x15, x16, w17, uxth #1
+  subs	x18, x19, w20, sxtw #2
+  adds	x21, x22, x23, sxtx #3
 
 # ALL:      Iterations:        100
 # ALL-NEXT: Instructions:      800
 
 # EM1-NEXT: Total Cycles:      403
-# EM3-NEXT: Total Cycles:      303
+# EM3-NEXT: Total Cycles:      304
+# EM4-NEXT: Total Cycles:      304
 
 # ALL-NEXT: Total uOps:        800
 
@@ -25,10 +27,15 @@
 # EM1-NEXT: Block RThroughput: 4.0
 
 # EM3:      Dispatch Width:    6
-# EM3-NEXT: uOps Per Cycle:    2.64
-# EM3-NEXT: IPC:               2.64
+# EM3-NEXT: uOps Per Cycle:    2.63
+# EM3-NEXT: IPC:               2.63
 # EM3-NEXT: Block RThroughput: 3.0
 
+# EM4:      Dispatch Width:    6
+# EM4-NEXT: uOps Per Cycle:    2.63
+# EM4-NEXT: IPC:               2.63
+# EM4-NEXT: Block RThroughput: 3.0
+
 # ALL:      Instruction Info:
 # ALL-NEXT: [1]: #uOps
 # ALL-NEXT: [2]: Latency
@@ -43,16 +50,25 @@
 # EM1-NEXT:  1      2     0.67                        add	x3, x4, w5, sxth #1
 # EM1-NEXT:  1      1     0.33                        subs	x6, x7, w8, uxtw #2
 # EM1-NEXT:  1      1     0.33                        adds	x9, x10, x11, uxtx #3
-# EM1-NEXT:  1      2     0.67                        sub	w12, w13, w14, uxtb #3
-# EM1-NEXT:  1      2     0.67                        add	x15, x16, w17, uxth #2
-# EM1-NEXT:  1      2     0.67                        subs	x18, x19, w20, sxtw #1
-# EM1-NEXT:  1      1     0.33                        adds	x21, x22, x23, sxtx
+# EM1-NEXT:  1      1     0.33                        sub	w12, w13, w14, uxtb
+# EM1-NEXT:  1      2     0.67                        add	x15, x16, w17, uxth #1
+# EM1-NEXT:  1      2     0.67                        subs	x18, x19, w20, sxtw #2
+# EM1-NEXT:  1      2     0.67                        adds	x21, x22, x23, sxtx #3
 
 # EM3-NEXT:  1      1     0.25                        sub	w0, w1, w2, sxtb
 # EM3-NEXT:  1      2     0.50                        add	x3, x4, w5, sxth #1
 # EM3-NEXT:  1      1     0.25                        subs	x6, x7, w8, uxtw #2
 # EM3-NEXT:  1      1     0.25                        adds	x9, x10, x11, uxtx #3
-# EM3-NEXT:  1      2     0.50                        sub	w12, w13, w14, uxtb #3
-# EM3-NEXT:  1      2     0.50                        add	x15, x16, w17, uxth #2
-# EM3-NEXT:  1      2     0.50                        subs	x18, x19, w20, sxtw #1
-# EM3-NEXT:  1      1     0.25                        adds	x21, x22, x23, sxtx
+# EM3-NEXT:  1      1     0.25                        sub	w12, w13, w14, uxtb
+# EM3-NEXT:  1      2     0.50                        add	x15, x16, w17, uxth #1
+# EM3-NEXT:  1      2     0.50                        subs	x18, x19, w20, sxtw #2
+# EM3-NEXT:  1      2     0.50                        adds	x21, x22, x23, sxtx #3
+
+# EM4-NEXT:  1      1     0.25                        sub	w0, w1, w2, sxtb
+# EM4-NEXT:  1      2     0.50                        add	x3, x4, w5, sxth #1
+# EM4-NEXT:  1      1     0.25                        subs	x6, x7, w8, uxtw #2
+# EM4-NEXT:  1      1     0.25                        adds	x9, x10, x11, uxtx #3
+# EM4-NEXT:  1      1     0.25                        sub	w12, w13, w14, uxtb
+# EM4-NEXT:  1      2     0.50                        add	x15, x16, w17, uxth #1
+# EM4-NEXT:  1      2     0.50                        subs	x18, x19, w20, sxtw #2
+# EM4-NEXT:  1      2     0.50                        adds	x21, x22, x23, sxtx #3
diff --git a/test/tools/llvm-mca/AArch64/Exynos/register-offset.s b/test/tools/llvm-mca/AArch64/Exynos/register-offset.s
deleted file mode 100644
index b31b396..0000000
--- a/test/tools/llvm-mca/AArch64/Exynos/register-offset.s
+++ /dev/null
@@ -1,43 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
-# RUN: llvm-mca -march=aarch64 -mcpu=exynos-m1  -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,EM1
-# RUN: llvm-mca -march=aarch64 -mcpu=exynos-m3  -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,EM3
-
-  ldr	w0, [x1, x2, lsl #0]
-  str	x3, [x4, w5, sxtw #0]
-  ldr	x6, [x7, w8, uxtw #3]
-  str	x9, [x10, x11, lsl #3]
-
-# ALL:      Iterations:        100
-# ALL-NEXT: Instructions:      400
-# ALL-NEXT: Total Cycles:      308
-# ALL-NEXT: Total uOps:        600
-
-# EM1:      Dispatch Width:    4
-# EM1-NEXT: uOps Per Cycle:    1.95
-# EM1-NEXT: IPC:               1.30
-# EM1-NEXT: Block RThroughput: 2.0
-
-# EM3:      Dispatch Width:    6
-# EM3-NEXT: uOps Per Cycle:    1.95
-# EM3-NEXT: IPC:               1.30
-# EM3-NEXT: Block RThroughput: 2.0
-
-# ALL:      Instruction Info:
-# ALL-NEXT: [1]: #uOps
-# ALL-NEXT: [2]: Latency
-# ALL-NEXT: [3]: RThroughput
-# ALL-NEXT: [4]: MayLoad
-# ALL-NEXT: [5]: MayStore
-# ALL-NEXT: [6]: HasSideEffects (U)
-
-# ALL:      [1]    [2]    [3]    [4]    [5]    [6]    Instructions:
-
-# EM1-NEXT:  1      5     1.00    *                   ldr	w0, [x1, x2]
-# EM3-NEXT:  1      5     0.50    *                   ldr	w0, [x1, x2]
-
-# ALL-NEXT:  2      2     1.00           *            str	x3, [x4, w5, sxtw]
-
-# EM1-NEXT:  2      5     1.00    *                   ldr	x6, [x7, w8, uxtw #3]
-# EM3-NEXT:  2      5     0.50    *                   ldr	x6, [x7, w8, uxtw #3]
-
-# ALL-NEXT:  1      1     1.00           *            str	x9, [x10, x11, lsl #3]
diff --git a/test/tools/llvm-mca/AArch64/Exynos/scheduler-queue-usage.s b/test/tools/llvm-mca/AArch64/Exynos/scheduler-queue-usage.s
index 20f9e74..8a2c780 100644
--- a/test/tools/llvm-mca/AArch64/Exynos/scheduler-queue-usage.s
+++ b/test/tools/llvm-mca/AArch64/Exynos/scheduler-queue-usage.s
@@ -1,8 +1,9 @@
 # NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
 # RUN: llvm-mca -march=aarch64 -mcpu=exynos-m1 -iterations=1 -scheduler-stats -resource-pressure=false -instruction-info=false < %s | FileCheck %s -check-prefixes=ALL,M1
 # RUN: llvm-mca -march=aarch64 -mcpu=exynos-m3 -iterations=1 -scheduler-stats -resource-pressure=false -instruction-info=false < %s | FileCheck %s -check-prefixes=ALL,M3
+# RUN: llvm-mca -march=aarch64 -mcpu=exynos-m4 -iterations=1 -scheduler-stats -resource-pressure=false -instruction-info=false < %s | FileCheck %s -check-prefixes=ALL,M4
 
-  b   t
+  b	main
 
 # ALL:      Iterations:        1
 # ALL-NEXT: Instructions:      1
@@ -19,6 +20,11 @@
 # M3-NEXT:  IPC:               0.50
 # M3-NEXT:  Block RThroughput: 0.2
 
+# M4:       Dispatch Width:    6
+# M4-NEXT:  uOps Per Cycle:    0.50
+# M4-NEXT:  IPC:               0.50
+# M4-NEXT:  Block RThroughput: 0.2
+
 # ALL:      Schedulers - number of cycles where we saw N instructions issued:
 # ALL-NEXT: [# issued], [# cycles]
 # ALL-NEXT:  0,          1  (50.0%)
diff --git a/test/tools/llvm-mca/AArch64/Exynos/shifted-register.s b/test/tools/llvm-mca/AArch64/Exynos/shifted-register.s
index e37d2d0..5dfdc1e 100644
--- a/test/tools/llvm-mca/AArch64/Exynos/shifted-register.s
+++ b/test/tools/llvm-mca/AArch64/Exynos/shifted-register.s
@@ -1,29 +1,40 @@
 # NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
-# RUN: llvm-mca -march=aarch64 -mcpu=exynos-m1  -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,EM1
-# RUN: llvm-mca -march=aarch64 -mcpu=exynos-m3  -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,EM3
+# RUN: llvm-mca -march=aarch64 -mcpu=exynos-m1 -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,EM1
+# RUN: llvm-mca -march=aarch64 -mcpu=exynos-m3 -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,EM3
+# RUN: llvm-mca -march=aarch64 -mcpu=exynos-m4 -resource-pressure=false < %s | FileCheck %s -check-prefixes=ALL,EM4
 
-  add	w0, w1, w2, lsl #0
+  adds	w0, w1, w2, lsl #0
   sub	x3, x4, x5, lsr #1
-  adds	x6, x7, x8, lsl #2
-  subs	w9, w10, w11, asr #3
+  ands	x6, x7, x8, lsl #2
+  orr	w9, w10, w11, asr #3
+  adds	w12, w13, w14, lsl #4
+  sub	x15, x16, x17, lsr #6
+  ands	x18, x19, x20, lsl #8
+  orr	w21, w22, w23, asr #10
 
 # ALL:      Iterations:        100
-# ALL-NEXT: Instructions:      400
+# ALL-NEXT: Instructions:      800
 
-# EM1-NEXT: Total Cycles:      204
-# EM3-NEXT: Total Cycles:      154
+# EM1-NEXT: Total Cycles:      470
+# EM3-NEXT: Total Cycles:      354
+# EM4-NEXT: Total Cycles:      329
 
-# ALL-NEXT: Total uOps:        400
+# ALL-NEXT: Total uOps:        800
 
 # EM1:      Dispatch Width:    4
-# EM1-NEXT: uOps Per Cycle:    1.96
-# EM1-NEXT: IPC:               1.96
-# EM1-NEXT: Block RThroughput: 2.0
+# EM1-NEXT: uOps Per Cycle:    1.70
+# EM1-NEXT: IPC:               1.70
+# EM1-NEXT: Block RThroughput: 4.7
 
 # EM3:      Dispatch Width:    6
-# EM3-NEXT: uOps Per Cycle:    2.60
-# EM3-NEXT: IPC:               2.60
-# EM3-NEXT: Block RThroughput: 1.5
+# EM3-NEXT: uOps Per Cycle:    2.26
+# EM3-NEXT: IPC:               2.26
+# EM3-NEXT: Block RThroughput: 3.5
+
+# EM4:      Dispatch Width:    6
+# EM4-NEXT: uOps Per Cycle:    2.43
+# EM4-NEXT: IPC:               2.43
+# EM4-NEXT: Block RThroughput: 3.3
 
 # ALL:      Instruction Info:
 # ALL-NEXT: [1]: #uOps
@@ -35,12 +46,29 @@
 
 # ALL:      [1]    [2]    [3]    [4]    [5]    [6]    Instructions:
 
-# EM1-NEXT:  1      1     0.33                        add	w0, w1, w2
+# EM1-NEXT:  1      1     0.33                        adds	w0, w1, w2
 # EM1-NEXT:  1      2     0.67                        sub	x3, x4, x5, lsr #1
-# EM1-NEXT:  1      1     0.33                        adds	x6, x7, x8, lsl #2
-# EM1-NEXT:  1      2     0.67                        subs	w9, w10, w11, asr #3
+# EM1-NEXT:  1      1     0.33                        ands	x6, x7, x8, lsl #2
+# EM1-NEXT:  1      2     0.67                        orr	w9, w10, w11, asr #3
+# EM1-NEXT:  1      2     0.67                        adds	w12, w13, w14, lsl #4
+# EM1-NEXT:  1      2     0.67                        sub	x15, x16, x17, lsr #6
+# EM1-NEXT:  1      2     0.67                        ands	x18, x19, x20, lsl #8
+# EM1-NEXT:  1      2     0.67                        orr	w21, w22, w23, asr #10
 
-# EM3-NEXT:  1      1     0.25                        add	w0, w1, w2
+# EM3-NEXT:  1      1     0.25                        adds	w0, w1, w2
 # EM3-NEXT:  1      2     0.50                        sub	x3, x4, x5, lsr #1
-# EM3-NEXT:  1      1     0.25                        adds	x6, x7, x8, lsl #2
-# EM3-NEXT:  1      2     0.50                        subs	w9, w10, w11, asr #3
+# EM3-NEXT:  1      1     0.25                        ands	x6, x7, x8, lsl #2
+# EM3-NEXT:  1      2     0.50                        orr	w9, w10, w11, asr #3
+# EM3-NEXT:  1      2     0.50                        adds	w12, w13, w14, lsl #4
+# EM3-NEXT:  1      2     0.50                        sub	x15, x16, x17, lsr #6
+# EM3-NEXT:  1      2     0.50                        ands	x18, x19, x20, lsl #8
+# EM3-NEXT:  1      2     0.50                        orr	w21, w22, w23, asr #10
+
+# EM4-NEXT:  1      1     0.25                        adds	w0, w1, w2
+# EM4-NEXT:  1      2     0.50                        sub	x3, x4, x5, lsr #1
+# EM4-NEXT:  1      1     0.25                        ands	x6, x7, x8, lsl #2
+# EM4-NEXT:  1      2     0.50                        orr	w9, w10, w11, asr #3
+# EM4-NEXT:  1      2     0.50                        adds	w12, w13, w14, lsl #4
+# EM4-NEXT:  1      2     0.50                        sub	x15, x16, x17, lsr #6
+# EM4-NEXT:  1      1     0.25                        ands	x18, x19, x20, lsl #8
+# EM4-NEXT:  1      2     0.50                        orr	w21, w22, w23, asr #10
diff --git a/test/tools/llvm-mca/SystemZ/lit.local.cfg b/test/tools/llvm-mca/SystemZ/lit.local.cfg
new file mode 100644
index 0000000..5c02dd3
--- /dev/null
+++ b/test/tools/llvm-mca/SystemZ/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'SystemZ' in config.root.targets:
+    config.unsupported = True
+
diff --git a/test/tools/llvm-mca/SystemZ/stm-lm.s b/test/tools/llvm-mca/SystemZ/stm-lm.s
new file mode 100644
index 0000000..db2d796
--- /dev/null
+++ b/test/tools/llvm-mca/SystemZ/stm-lm.s
@@ -0,0 +1,72 @@
+# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
+# RUN: llvm-mca -mtriple=s390x-linux-gnu -mcpu=z14 -timeline -timeline-max-iterations=3 < %s | FileCheck %s
+
+stmg	%r6, %r15, 48(%r15)
+lmg	%r6, %r15, 48(%r15)
+
+# CHECK:      Iterations:        100
+# CHECK-NEXT: Instructions:      200
+# CHECK-NEXT: Total Cycles:      1004
+# CHECK-NEXT: Total uOps:        600
+
+# CHECK:      Dispatch Width:    6
+# CHECK-NEXT: uOps Per Cycle:    0.60
+# CHECK-NEXT: IPC:               0.20
+# CHECK-NEXT: Block RThroughput: 3.5
+
+# CHECK:      Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK:      [1]    [2]    [3]    [4]    [5]    [6]    Instructions:
+# CHECK-NEXT:  3      1     1.50           *            stmg	%r6, %r15, 48(%r15)
+# CHECK-NEXT:  3      10    2.50    *                   lmg	%r6, %r15, 48(%r15)
+
+# CHECK:      Resources:
+# CHECK-NEXT: [0.0] - Z14_FXaUnit
+# CHECK-NEXT: [0.1] - Z14_FXaUnit
+# CHECK-NEXT: [1.0] - Z14_FXbUnit
+# CHECK-NEXT: [1.1] - Z14_FXbUnit
+# CHECK-NEXT: [2.0] - Z14_LSUnit
+# CHECK-NEXT: [2.1] - Z14_LSUnit
+# CHECK-NEXT: [3]   - Z14_MCD
+# CHECK-NEXT: [4.0] - Z14_VBUnit
+# CHECK-NEXT: [4.1] - Z14_VBUnit
+# CHECK-NEXT: [5.0] - Z14_VecFPdUnit
+# CHECK-NEXT: [5.1] - Z14_VecFPdUnit
+# CHECK-NEXT: [6.0] - Z14_VecUnit
+# CHECK-NEXT: [6.1] - Z14_VecUnit
+
+# CHECK:      Resource pressure per iteration:
+# CHECK-NEXT: [0.0]  [0.1]  [1.0]  [1.1]  [2.0]  [2.1]  [3]    [4.0]  [4.1]  [5.0]  [5.1]  [6.0]  [6.1]
+# CHECK-NEXT:  -      -     1.50   1.50   2.06   4.94    -      -      -      -      -      -      -
+
+# CHECK:      Resource pressure by instruction:
+# CHECK-NEXT: [0.0]  [0.1]  [1.0]  [1.1]  [2.0]  [2.1]  [3]    [4.0]  [4.1]  [5.0]  [5.1]  [6.0]  [6.1]  Instructions:
+# CHECK-NEXT:  -      -     1.50   1.50   1.96   0.04    -      -      -      -      -      -      -     stmg	%r6, %r15, 48(%r15)
+# CHECK-NEXT:  -      -      -      -     0.10   4.90    -      -      -      -      -      -      -     lmg	%r6, %r15, 48(%r15)
+
+# CHECK:      Timeline view:
+# CHECK-NEXT:                     0123456789          0123
+# CHECK-NEXT: Index     0123456789          0123456789
+
+# CHECK:      [0,0]     DeER .    .    .    .    .    .  .   stmg	%r6, %r15, 48(%r15)
+# CHECK-NEXT: [0,1]     .DeeeeeeeeeeER .    .    .    .  .   lmg	%r6, %r15, 48(%r15)
+# CHECK-NEXT: [1,0]     . D=========eER.    .    .    .  .   stmg	%r6, %r15, 48(%r15)
+# CHECK-NEXT: [1,1]     .  D========eeeeeeeeeeER .    .  .   lmg	%r6, %r15, 48(%r15)
+# CHECK-NEXT: [2,0]     .   D=================eER.    .  .   stmg	%r6, %r15, 48(%r15)
+# CHECK-NEXT: [2,1]     .    D================eeeeeeeeeeER   lmg	%r6, %r15, 48(%r15)
+
+# CHECK:      Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK:            [0]    [1]    [2]    [3]
+# CHECK-NEXT: 0.     3     9.7    0.3    0.0       stmg	%r6, %r15, 48(%r15)
+# CHECK-NEXT: 1.     3     9.0    0.3    0.0       lmg	%r6, %r15, 48(%r15)
diff --git a/test/tools/llvm-nm/X86/posix-aliases.test b/test/tools/llvm-nm/X86/posix-aliases.test
new file mode 100644
index 0000000..2e09993
--- /dev/null
+++ b/test/tools/llvm-nm/X86/posix-aliases.test
@@ -0,0 +1,5 @@
+# RUN: llvm-nm -P %p/Inputs/hello.obj.elf-x86_64 > %t1
+# RUN: llvm-nm -format=posix %p/Inputs/hello.obj.elf-x86_64 > %t2
+# RUN: llvm-nm -portability %p/Inputs/hello.obj.elf-x86_64 > %t3
+# RUN: diff %t1 %t2
+# RUN: diff %t1 %t3
diff --git a/test/tools/llvm-nm/wasm/exports.yaml b/test/tools/llvm-nm/wasm/exports.yaml
index 55d2b76..c1ee6d7 100644
--- a/test/tools/llvm-nm/wasm/exports.yaml
+++ b/test/tools/llvm-nm/wasm/exports.yaml
@@ -37,7 +37,7 @@
         Content:         '616263'
   - Type:            CUSTOM
     Name:            linking
-    Version:         1
+    Version:         2
     SymbolTable:
        - Index:           0
          Kind:            FUNCTION
diff --git a/test/tools/llvm-nm/wasm/imports.yaml b/test/tools/llvm-nm/wasm/imports.yaml
index 9696972..2ea0d0f 100644
--- a/test/tools/llvm-nm/wasm/imports.yaml
+++ b/test/tools/llvm-nm/wasm/imports.yaml
@@ -25,7 +25,7 @@
         GlobalMutable:   false
   - Type:            CUSTOM
     Name:            linking
-    Version:         1
+    Version:         2
     SymbolTable:
        - Index:           0
          Kind:            FUNCTION
diff --git a/test/tools/llvm-nm/wasm/weak-symbols.yaml b/test/tools/llvm-nm/wasm/weak-symbols.yaml
index caa981d..36711b1 100644
--- a/test/tools/llvm-nm/wasm/weak-symbols.yaml
+++ b/test/tools/llvm-nm/wasm/weak-symbols.yaml
@@ -43,7 +43,7 @@
         Content:         '616263'
   - Type:            CUSTOM
     Name:            linking
-    Version:         1
+    Version:         2
     SymbolTable:
        - Index:           0
          Kind:            DATA
diff --git a/test/tools/llvm-objcopy/COFF/Inputs/discard-locals.yaml b/test/tools/llvm-objcopy/COFF/Inputs/discard-locals.yaml
new file mode 100644
index 0000000..8b34e8f
--- /dev/null
+++ b/test/tools/llvm-objcopy/COFF/Inputs/discard-locals.yaml
@@ -0,0 +1,60 @@
+--- !COFF
+header:          
+  Machine:         IMAGE_FILE_MACHINE_AMD64
+  Characteristics: [  ]
+sections:        
+  - Name:            .text
+    Characteristics: [  ]
+    Alignment:       4
+    SectionData:     E800000000E800000000C3C3C3
+    Relocations:     
+      - VirtualAddress:  1
+        SymbolName:      local_referenced
+        Type:            IMAGE_REL_AMD64_REL32
+      - VirtualAddress:  5
+        SymbolName:      external_undefined
+        Type:            IMAGE_REL_AMD64_REL32
+symbols:         
+  - Name:            external
+    Value:           0
+    SectionNumber:   1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_EXTERNAL
+  - Name:            external_undefined
+    Value:           0
+    SectionNumber:   0
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_EXTERNAL
+  - Name:            external_undefined_unreferenced
+    Value:           0
+    SectionNumber:   0
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_EXTERNAL
+  - Name:            local_unreferenced
+    Value:           6
+    SectionNumber:   1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+  - Name:            local_referenced
+    Value:           7
+    SectionNumber:   1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+  - Name:            local_undefined_unreferenced
+    Value:           0
+    SectionNumber:   0
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+  - Name:            '@feat.00'
+    Value:           0
+    SectionNumber:   -1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+...
diff --git a/test/tools/llvm-objcopy/COFF/Inputs/i386-exe.yaml b/test/tools/llvm-objcopy/COFF/Inputs/i386-exe.yaml
new file mode 100644
index 0000000..4f74368
--- /dev/null
+++ b/test/tools/llvm-objcopy/COFF/Inputs/i386-exe.yaml
@@ -0,0 +1,84 @@
+--- !COFF
+OptionalHeader:  
+  AddressOfEntryPoint: 4144
+  ImageBase:       4194304
+  SectionAlignment: 4096
+  FileAlignment:   512
+  MajorOperatingSystemVersion: 6
+  MinorOperatingSystemVersion: 0
+  MajorImageVersion: 0
+  MinorImageVersion: 0
+  MajorSubsystemVersion: 6
+  MinorSubsystemVersion: 0
+  Subsystem:       IMAGE_SUBSYSTEM_WINDOWS_CUI
+  DLLCharacteristics: [ IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE, IMAGE_DLL_CHARACTERISTICS_NX_COMPAT, IMAGE_DLL_CHARACTERISTICS_NO_SEH, IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE ]
+  SizeOfStackReserve: 1048576
+  SizeOfStackCommit: 4096
+  SizeOfHeapReserve: 1048576
+  SizeOfHeapCommit: 4096
+  ExportTable:     
+    RelativeVirtualAddress: 0
+    Size:            0
+  ImportTable:     
+    RelativeVirtualAddress: 0
+    Size:            0
+  ResourceTable:   
+    RelativeVirtualAddress: 0
+    Size:            0
+  ExceptionTable:  
+    RelativeVirtualAddress: 0
+    Size:            0
+  CertificateTable: 
+    RelativeVirtualAddress: 0
+    Size:            0
+  BaseRelocationTable: 
+    RelativeVirtualAddress: 12288
+    Size:            12
+  Debug:           
+    RelativeVirtualAddress: 0
+    Size:            0
+  Architecture:    
+    RelativeVirtualAddress: 0
+    Size:            0
+  GlobalPtr:       
+    RelativeVirtualAddress: 0
+    Size:            0
+  TlsTable:        
+    RelativeVirtualAddress: 0
+    Size:            0
+  LoadConfigTable: 
+    RelativeVirtualAddress: 0
+    Size:            0
+  BoundImport:     
+    RelativeVirtualAddress: 0
+    Size:            0
+  IAT:             
+    RelativeVirtualAddress: 0
+    Size:            0
+  DelayImportDescriptor: 
+    RelativeVirtualAddress: 0
+    Size:            0
+  ClrRuntimeHeader: 
+    RelativeVirtualAddress: 0
+    Size:            0
+header:          
+  Machine:         IMAGE_FILE_MACHINE_I386
+  Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_32BIT_MACHINE ]
+sections:        
+  - Name:            .text
+    Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+    VirtualAddress:  4096
+    VirtualSize:     83
+    SectionData:     5589E5508B45088B0D00204000034D088945FC89C883C4045DC3660F1F4400005589E55DC3662E0F1F840000000000905589E583EC08E8E5FFFFFFC745FC00000000C7042402000000E8B2FFFFFF83C4085DC3
+  - Name:            .data
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+    VirtualAddress:  8192
+    VirtualSize:     4
+    SectionData:     '01000000'
+  - Name:            .reloc
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_DISCARDABLE, IMAGE_SCN_MEM_READ ]
+    VirtualAddress:  12288
+    VirtualSize:     12
+    SectionData:     001000000C00000009300000
+symbols:         []
+...
diff --git a/test/tools/llvm-objcopy/COFF/Inputs/i386-obj.yaml b/test/tools/llvm-objcopy/COFF/Inputs/i386-obj.yaml
new file mode 100644
index 0000000..4bf09b1
--- /dev/null
+++ b/test/tools/llvm-objcopy/COFF/Inputs/i386-obj.yaml
@@ -0,0 +1,244 @@
+--- !COFF
+header:          
+  Machine:         IMAGE_FILE_MACHINE_I386
+  Characteristics: [  ]
+sections:        
+  - Name:            .text
+    Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+    Alignment:       16
+    SectionData:     5589E5508B45088B0D00000000034D088945FC89C883C4045DC3660F1F4400005589E55DC3662E0F1F840000000000905589E583EC08E800000000C745FC00000000C7042402000000E80000000083C4085DC3
+    Relocations:     
+      - VirtualAddress:  9
+        SymbolName:      _x
+        Type:            IMAGE_REL_I386_DIR32
+      - VirtualAddress:  55
+        SymbolName:      ___main
+        Type:            IMAGE_REL_I386_REL32
+      - VirtualAddress:  74
+        SymbolName:      _f
+        Type:            IMAGE_REL_I386_REL32
+  - Name:            .data
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+    Alignment:       4
+    SectionData:     '01000000'
+  - Name:            .bss
+    Characteristics: [ IMAGE_SCN_CNT_UNINITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+    Alignment:       4
+    SectionData:     ''
+  - Name:            .debug_str
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_DISCARDABLE, IMAGE_SCN_MEM_READ ]
+    Alignment:       1
+    SectionData:     636C616E672076657273696F6E20382E302E3020287472756E6B203334363337382920286C6C766D2F7472756E6B203334363339302900736F757263652E63002F686F6D652F6D617274696E2F636F64652F6C6C766D2F6275696C642F6F626A636F70792D696E707574007800696E740066005F5F6D61696E006D61696E007900
+  - Name:            .debug_abbrev
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_DISCARDABLE, IMAGE_SCN_MEM_READ ]
+    Alignment:       1
+    SectionData:     011101250E1305030E10171B0E110112060000023400030E49133A0B3B0B02180000032400030E3E0B0B0B0000042E01110112064018030E3A0B3B0B271949133F1900000505000218030E3A0B3B0B49130000062E00110112064018030E3A0B3B0B27193F190000072E00110112064018030E3A0B3B0B49133F19000000
+  - Name:            .debug_info
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_DISCARDABLE, IMAGE_SCN_MEM_READ ]
+    Alignment:       1
+    SectionData:     850000000400000000000401000000000C003700000000000000400000000000000053000000026B000000370000000101050300000000036D000000050404000000001A000000015571000000010337000000050291087F00000001033700000000062000000005000000015573000000010707300000002300000001557A00000001093700000000
+    Relocations:     
+      - VirtualAddress:  6
+        SymbolName:      .debug_abbrev
+        Type:            IMAGE_REL_I386_SECREL
+      - VirtualAddress:  12
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_I386_SECREL
+      - VirtualAddress:  18
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_I386_SECREL
+      - VirtualAddress:  22
+        SymbolName:      .debug_line
+        Type:            IMAGE_REL_I386_SECREL
+      - VirtualAddress:  26
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_I386_SECREL
+      - VirtualAddress:  30
+        SymbolName:      .text
+        Type:            IMAGE_REL_I386_DIR32
+      - VirtualAddress:  39
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_I386_SECREL
+      - VirtualAddress:  51
+        SymbolName:      _x
+        Type:            IMAGE_REL_I386_DIR32
+      - VirtualAddress:  56
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_I386_SECREL
+      - VirtualAddress:  63
+        SymbolName:      .text
+        Type:            IMAGE_REL_I386_DIR32
+      - VirtualAddress:  73
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_I386_SECREL
+      - VirtualAddress:  87
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_I386_SECREL
+      - VirtualAddress:  99
+        SymbolName:      .text
+        Type:            IMAGE_REL_I386_DIR32
+      - VirtualAddress:  109
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_I386_SECREL
+      - VirtualAddress:  116
+        SymbolName:      .text
+        Type:            IMAGE_REL_I386_DIR32
+      - VirtualAddress:  126
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_I386_SECREL
+  - Name:            .debug_macinfo
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_DISCARDABLE, IMAGE_SCN_MEM_READ ]
+    Alignment:       1
+    SectionData:     '00'
+  - Name:            .debug_line
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_DISCARDABLE, IMAGE_SCN_MEM_READ ]
+    Alignment:       1
+    SectionData:     52000000040020000000010101FB0E0D00010101010000000100000100736F757263652E6300000000000005020000000014050A0A75050C0666050366050006CB05010A3D0500C9050A0A0821050306BA0205000101
+    Relocations:     
+      - VirtualAddress:  45
+        SymbolName:      .text
+        Type:            IMAGE_REL_I386_DIR32
+  - Name:            .llvm_addrsig
+    Characteristics: [ IMAGE_SCN_LNK_REMOVE ]
+    Alignment:       1
+    SectionData:     '1314'
+symbols:         
+  - Name:            .text
+    Value:           0
+    SectionNumber:   1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          83
+      NumberOfRelocations: 3
+      NumberOfLinenumbers: 0
+      CheckSum:        4183332250
+      Number:          1
+  - Name:            .data
+    Value:           0
+    SectionNumber:   2
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          4
+      NumberOfRelocations: 0
+      NumberOfLinenumbers: 0
+      CheckSum:        3099354981
+      Number:          2
+  - Name:            .bss
+    Value:           0
+    SectionNumber:   3
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          0
+      NumberOfRelocations: 0
+      NumberOfLinenumbers: 0
+      CheckSum:        0
+      Number:          3
+  - Name:            .debug_str
+    Value:           0
+    SectionNumber:   4
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          129
+      NumberOfRelocations: 0
+      NumberOfLinenumbers: 0
+      CheckSum:        2876129505
+      Number:          4
+  - Name:            .debug_abbrev
+    Value:           0
+    SectionNumber:   5
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          126
+      NumberOfRelocations: 0
+      NumberOfLinenumbers: 0
+      CheckSum:        2218663305
+      Number:          5
+  - Name:            .debug_info
+    Value:           0
+    SectionNumber:   6
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          137
+      NumberOfRelocations: 16
+      NumberOfLinenumbers: 0
+      CheckSum:        2577207131
+      Number:          6
+  - Name:            .debug_macinfo
+    Value:           0
+    SectionNumber:   7
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          1
+      NumberOfRelocations: 0
+      NumberOfLinenumbers: 0
+      CheckSum:        0
+      Number:          7
+  - Name:            .debug_line
+    Value:           0
+    SectionNumber:   8
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          86
+      NumberOfRelocations: 1
+      NumberOfLinenumbers: 0
+      CheckSum:        2357396799
+      Number:          8
+  - Name:            .llvm_addrsig
+    Value:           0
+    SectionNumber:   9
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          2
+      NumberOfRelocations: 0
+      NumberOfLinenumbers: 0
+      CheckSum:        2067109359
+      Number:          9
+  - Name:            '@feat.00'
+    Value:           1
+    SectionNumber:   -1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+  - Name:            _f
+    Value:           0
+    SectionNumber:   1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_FUNCTION
+    StorageClass:    IMAGE_SYM_CLASS_EXTERNAL
+  - Name:            _x
+    Value:           0
+    SectionNumber:   2
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+  - Name:            ___main
+    Value:           32
+    SectionNumber:   1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_FUNCTION
+    StorageClass:    IMAGE_SYM_CLASS_EXTERNAL
+  - Name:            _main
+    Value:           48
+    SectionNumber:   1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_FUNCTION
+    StorageClass:    IMAGE_SYM_CLASS_EXTERNAL
+...
diff --git a/test/tools/llvm-objcopy/COFF/Inputs/no-symbols.yaml b/test/tools/llvm-objcopy/COFF/Inputs/no-symbols.yaml
new file mode 100644
index 0000000..db8aeb6
--- /dev/null
+++ b/test/tools/llvm-objcopy/COFF/Inputs/no-symbols.yaml
@@ -0,0 +1,11 @@
+--- !COFF
+header:          
+  Machine:         IMAGE_FILE_MACHINE_AMD64
+  Characteristics: [  ]
+sections:        
+  - Name:            .text
+    Characteristics: [  ]
+    Alignment:       4
+    SectionData:     E800000000C3C3C3
+symbols:         
+...
diff --git a/test/tools/llvm-objcopy/COFF/Inputs/strip-symbols.yaml b/test/tools/llvm-objcopy/COFF/Inputs/strip-symbols.yaml
new file mode 100644
index 0000000..1d652691
--- /dev/null
+++ b/test/tools/llvm-objcopy/COFF/Inputs/strip-symbols.yaml
@@ -0,0 +1,53 @@
+--- !COFF
+header:          
+  Machine:         IMAGE_FILE_MACHINE_AMD64
+  Characteristics: [  ]
+sections:        
+  - Name:            .text
+    Characteristics: [  ]
+    Alignment:       4
+    SectionData:     488B0500000000488B0500000000488B0500000000
+    Relocations:     
+      - VirtualAddress:  3
+        SymbolTableIndex: 0
+        Type:            IMAGE_REL_AMD64_REL32
+      - VirtualAddress:  10
+        SymbolTableIndex: 1
+        Type:            IMAGE_REL_AMD64_REL32
+      - VirtualAddress:  17
+        SymbolName:      foo
+        Type:            IMAGE_REL_AMD64_REL32
+  - Name:            .rdata
+    Characteristics: [  ]
+    Alignment:       1
+    SectionData:     '00'
+  - Name:            .rdata
+    Characteristics: [  ]
+    Alignment:       1
+    SectionData:     '01'
+symbols:         
+  - Name:            .rdata
+    Value:           0
+    SectionNumber:   2
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+  - Name:            .rdata
+    Value:           0
+    SectionNumber:   3
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+  - Name:            mainfunc
+    Value:           0
+    SectionNumber:   1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_EXTERNAL
+  - Name:            foo
+    Value:           0
+    SectionNumber:   3
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_EXTERNAL
+...
diff --git a/test/tools/llvm-objcopy/COFF/Inputs/x86_64-exe.yaml b/test/tools/llvm-objcopy/COFF/Inputs/x86_64-exe.yaml
new file mode 100644
index 0000000..c4c4df4
--- /dev/null
+++ b/test/tools/llvm-objcopy/COFF/Inputs/x86_64-exe.yaml
@@ -0,0 +1,89 @@
+--- !COFF
+OptionalHeader:  
+  AddressOfEntryPoint: 4144
+  ImageBase:       1073741824
+  SectionAlignment: 4096
+  FileAlignment:   512
+  MajorOperatingSystemVersion: 6
+  MinorOperatingSystemVersion: 0
+  MajorImageVersion: 0
+  MinorImageVersion: 0
+  MajorSubsystemVersion: 6
+  MinorSubsystemVersion: 0
+  Subsystem:       IMAGE_SUBSYSTEM_WINDOWS_CUI
+  DLLCharacteristics: [ IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA, IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE, IMAGE_DLL_CHARACTERISTICS_NX_COMPAT, IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE ]
+  SizeOfStackReserve: 1048576
+  SizeOfStackCommit: 4096
+  SizeOfHeapReserve: 1048576
+  SizeOfHeapCommit: 4096
+  ExportTable:     
+    RelativeVirtualAddress: 0
+    Size:            0
+  ImportTable:     
+    RelativeVirtualAddress: 0
+    Size:            0
+  ResourceTable:   
+    RelativeVirtualAddress: 0
+    Size:            0
+  ExceptionTable:  
+    RelativeVirtualAddress: 16384
+    Size:            24
+  CertificateTable: 
+    RelativeVirtualAddress: 0
+    Size:            0
+  BaseRelocationTable: 
+    RelativeVirtualAddress: 0
+    Size:            0
+  Debug:           
+    RelativeVirtualAddress: 0
+    Size:            0
+  Architecture:    
+    RelativeVirtualAddress: 0
+    Size:            0
+  GlobalPtr:       
+    RelativeVirtualAddress: 0
+    Size:            0
+  TlsTable:        
+    RelativeVirtualAddress: 0
+    Size:            0
+  LoadConfigTable: 
+    RelativeVirtualAddress: 0
+    Size:            0
+  BoundImport:     
+    RelativeVirtualAddress: 0
+    Size:            0
+  IAT:             
+    RelativeVirtualAddress: 0
+    Size:            0
+  DelayImportDescriptor: 
+    RelativeVirtualAddress: 0
+    Size:            0
+  ClrRuntimeHeader: 
+    RelativeVirtualAddress: 0
+    Size:            0
+header:          
+  Machine:         IMAGE_FILE_MACHINE_AMD64
+  Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE ]
+sections:        
+  - Name:            .text
+    Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+    VirtualAddress:  4096
+    VirtualSize:     87
+    SectionData:     50894C24048B0DF51F0000034C240489C859C3662E0F1F8400000000000F1F00C3662E0F1F8400000000000F1F440000554883EC30488D6C2430E8E1FFFFFFC745FC00000000B902000000E8B0FFFFFF904883C4305DC3
+  - Name:            .rdata
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+    VirtualAddress:  8192
+    VirtualSize:     20
+    SectionData:     0101010001020000010A03350A03055201500000
+  - Name:            .data
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+    VirtualAddress:  12288
+    VirtualSize:     4
+    SectionData:     '01000000'
+  - Name:            .pdata
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+    VirtualAddress:  16384
+    VirtualSize:     24
+    SectionData:     '001000001310000000200000301000005710000008200000'
+symbols:         []
+...
diff --git a/test/tools/llvm-objcopy/COFF/Inputs/x86_64-obj.yaml b/test/tools/llvm-objcopy/COFF/Inputs/x86_64-obj.yaml
new file mode 100644
index 0000000..ef1666c
--- /dev/null
+++ b/test/tools/llvm-objcopy/COFF/Inputs/x86_64-obj.yaml
@@ -0,0 +1,295 @@
+--- !COFF
+header:          
+  Machine:         IMAGE_FILE_MACHINE_AMD64
+  Characteristics: [  ]
+sections:        
+  - Name:            .text
+    Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+    Alignment:       16
+    SectionData:     50894C24048B0D00000000034C240489C859C3662E0F1F8400000000000F1F00C3662E0F1F8400000000000F1F440000554883EC30488D6C2430E800000000C745FC00000000B902000000E800000000904883C4305DC3
+    Relocations:     
+      - VirtualAddress:  7
+        SymbolName:      x
+        Type:            IMAGE_REL_AMD64_REL32
+      - VirtualAddress:  59
+        SymbolName:      __main
+        Type:            IMAGE_REL_AMD64_REL32
+      - VirtualAddress:  76
+        SymbolName:      f
+        Type:            IMAGE_REL_AMD64_REL32
+  - Name:            .data
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+    Alignment:       4
+    SectionData:     '01000000'
+  - Name:            .bss
+    Characteristics: [ IMAGE_SCN_CNT_UNINITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+    Alignment:       4
+    SectionData:     ''
+  - Name:            .xdata
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+    Alignment:       4
+    SectionData:     0101010001020000010A03350A03055201500000
+  - Name:            .debug_str
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_DISCARDABLE, IMAGE_SCN_MEM_READ ]
+    Alignment:       1
+    SectionData:     636C616E672076657273696F6E20382E302E3020287472756E6B203334363337382920286C6C766D2F7472756E6B203334363339302900736F757263652E63002F686F6D652F6D617274696E2F636F64652F6C6C766D2F6275696C642F6F626A636F70792D696E707574007800696E740066005F5F6D61696E006D61696E007900
+  - Name:            .debug_abbrev
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_DISCARDABLE, IMAGE_SCN_MEM_READ ]
+    Alignment:       1
+    SectionData:     011101250E1305030E10171B0E110112060000023400030E49133A0B3B0B02180000032400030E3E0B0B0B0000042E01110112064018030E3A0B3B0B271949133F1900000505000218030E3A0B3B0B49130000062E00110112064018030E3A0B3B0B27193F190000072E00110112064018030E3A0B3B0B49133F19000000
+  - Name:            .debug_info
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_DISCARDABLE, IMAGE_SCN_MEM_READ ]
+    Alignment:       1
+    SectionData:     990000000400000000000801000000000C00370000000000000040000000000000000000000057000000026B0000003F000000010109030000000000000000036D00000005040400000000000000001300000001577100000001033F000000050291047F00000001033F000000000620000000000000000100000001577300000001070730000000000000002700000001567A00000001093F00000000
+    Relocations:     
+      - VirtualAddress:  6
+        SymbolName:      .debug_abbrev
+        Type:            IMAGE_REL_AMD64_SECREL
+      - VirtualAddress:  12
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_AMD64_SECREL
+      - VirtualAddress:  18
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_AMD64_SECREL
+      - VirtualAddress:  22
+        SymbolName:      .debug_line
+        Type:            IMAGE_REL_AMD64_SECREL
+      - VirtualAddress:  26
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_AMD64_SECREL
+      - VirtualAddress:  30
+        SymbolName:      .text
+        Type:            IMAGE_REL_AMD64_ADDR64
+      - VirtualAddress:  43
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_AMD64_SECREL
+      - VirtualAddress:  55
+        SymbolName:      x
+        Type:            IMAGE_REL_AMD64_ADDR64
+      - VirtualAddress:  64
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_AMD64_SECREL
+      - VirtualAddress:  71
+        SymbolName:      .text
+        Type:            IMAGE_REL_AMD64_ADDR64
+      - VirtualAddress:  85
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_AMD64_SECREL
+      - VirtualAddress:  99
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_AMD64_SECREL
+      - VirtualAddress:  111
+        SymbolName:      .text
+        Type:            IMAGE_REL_AMD64_ADDR64
+      - VirtualAddress:  125
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_AMD64_SECREL
+      - VirtualAddress:  132
+        SymbolName:      .text
+        Type:            IMAGE_REL_AMD64_ADDR64
+      - VirtualAddress:  146
+        SymbolName:      .debug_str
+        Type:            IMAGE_REL_AMD64_SECREL
+  - Name:            .debug_macinfo
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_DISCARDABLE, IMAGE_SCN_MEM_READ ]
+    Alignment:       1
+    SectionData:     '00'
+  - Name:            .pdata
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+    Alignment:       4
+    SectionData:     '000000001300000000000000000000002700000008000000'
+    Relocations:     
+      - VirtualAddress:  0
+        SymbolName:      f
+        Type:            IMAGE_REL_AMD64_ADDR32NB
+      - VirtualAddress:  4
+        SymbolName:      f
+        Type:            IMAGE_REL_AMD64_ADDR32NB
+      - VirtualAddress:  8
+        SymbolName:      .xdata
+        Type:            IMAGE_REL_AMD64_ADDR32NB
+      - VirtualAddress:  12
+        SymbolName:      main
+        Type:            IMAGE_REL_AMD64_ADDR32NB
+      - VirtualAddress:  16
+        SymbolName:      main
+        Type:            IMAGE_REL_AMD64_ADDR32NB
+      - VirtualAddress:  20
+        SymbolName:      .xdata
+        Type:            IMAGE_REL_AMD64_ADDR32NB
+  - Name:            .debug_line
+    Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_DISCARDABLE, IMAGE_SCN_MEM_READ ]
+    Alignment:       1
+    SectionData:     57000000040020000000010101FB0E0D00010101010000000100000100736F757263652E630000000000000902000000000000000014050A0A59050C066605034A050006081505010A130500F3050A0A08590503069E0207000101
+    Relocations:     
+      - VirtualAddress:  45
+        SymbolName:      .text
+        Type:            IMAGE_REL_AMD64_ADDR64
+  - Name:            .llvm_addrsig
+    Characteristics: [ IMAGE_SCN_LNK_REMOVE ]
+    Alignment:       1
+    SectionData:     '1718'
+symbols:         
+  - Name:            .text
+    Value:           0
+    SectionNumber:   1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          87
+      NumberOfRelocations: 3
+      NumberOfLinenumbers: 0
+      CheckSum:        4237828689
+      Number:          1
+  - Name:            .data
+    Value:           0
+    SectionNumber:   2
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          4
+      NumberOfRelocations: 0
+      NumberOfLinenumbers: 0
+      CheckSum:        3099354981
+      Number:          2
+  - Name:            .bss
+    Value:           0
+    SectionNumber:   3
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          0
+      NumberOfRelocations: 0
+      NumberOfLinenumbers: 0
+      CheckSum:        0
+      Number:          3
+  - Name:            .xdata
+    Value:           0
+    SectionNumber:   4
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          20
+      NumberOfRelocations: 0
+      NumberOfLinenumbers: 0
+      CheckSum:        3415491858
+      Number:          4
+  - Name:            .debug_str
+    Value:           0
+    SectionNumber:   5
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          129
+      NumberOfRelocations: 0
+      NumberOfLinenumbers: 0
+      CheckSum:        2876129505
+      Number:          5
+  - Name:            .debug_abbrev
+    Value:           0
+    SectionNumber:   6
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          126
+      NumberOfRelocations: 0
+      NumberOfLinenumbers: 0
+      CheckSum:        2218663305
+      Number:          6
+  - Name:            .debug_info
+    Value:           0
+    SectionNumber:   7
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          157
+      NumberOfRelocations: 16
+      NumberOfLinenumbers: 0
+      CheckSum:        603506744
+      Number:          7
+  - Name:            .debug_macinfo
+    Value:           0
+    SectionNumber:   8
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          1
+      NumberOfRelocations: 0
+      NumberOfLinenumbers: 0
+      CheckSum:        0
+      Number:          8
+  - Name:            .pdata
+    Value:           0
+    SectionNumber:   9
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          24
+      NumberOfRelocations: 6
+      NumberOfLinenumbers: 0
+      CheckSum:        2036901199
+      Number:          9
+  - Name:            .debug_line
+    Value:           0
+    SectionNumber:   10
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          91
+      NumberOfRelocations: 1
+      NumberOfLinenumbers: 0
+      CheckSum:        633454091
+      Number:          10
+  - Name:            .llvm_addrsig
+    Value:           0
+    SectionNumber:   11
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+    SectionDefinition: 
+      Length:          2
+      NumberOfRelocations: 0
+      NumberOfLinenumbers: 0
+      CheckSum:        384769216
+      Number:          11
+  - Name:            '@feat.00'
+    Value:           0
+    SectionNumber:   -1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+  - Name:            f
+    Value:           0
+    SectionNumber:   1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_FUNCTION
+    StorageClass:    IMAGE_SYM_CLASS_EXTERNAL
+  - Name:            x
+    Value:           0
+    SectionNumber:   2
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+  - Name:            __main
+    Value:           32
+    SectionNumber:   1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_FUNCTION
+    StorageClass:    IMAGE_SYM_CLASS_EXTERNAL
+  - Name:            main
+    Value:           48
+    SectionNumber:   1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_FUNCTION
+    StorageClass:    IMAGE_SYM_CLASS_EXTERNAL
+...
diff --git a/test/tools/llvm-objcopy/COFF/basic-copy.test b/test/tools/llvm-objcopy/COFF/basic-copy.test
new file mode 100644
index 0000000..ecdf430
--- /dev/null
+++ b/test/tools/llvm-objcopy/COFF/basic-copy.test
@@ -0,0 +1,48 @@
+Test plain passthrough copying with llvm-objcopy, by checking that obj2yaml
+produces identical output for both input and output object files/executables.
+(Intentionally not comparing to the original input yaml, in case there are
+superficial differences like line endings.) In order to have a check of the
+whole file and not just individual fields with FileCheck, checking with
+obj2yaml+cmp instead of llvm-readobj+cmp, as llvm-readobj also prints file
+details that can differ between otherwise equal files (such as file offsets).
+
+Actual copied object files/executables can differ in, among others, the
+following aspects:
+- The padding of executable sections (lld uses 0xcc, which is int3 on x86)
+- The actual layout of the string table (it can be filled linearly,
+  strings can be dedupliated, the table can be optimized by sharing tails
+  of longer strings; different parts in llvm do each of these three options)
+- The size indication for an empty/missing string table can either be 4
+  or left out altogether
+- Alignment of section data
+- Checksums
+
+RUN: yaml2obj %p/Inputs/i386-obj.yaml > %t.i386.o
+RUN: llvm-objcopy %t.i386.o %t.i386-copy.o
+RUN: obj2yaml %t.i386.o > %t.i386.o.yaml
+RUN: obj2yaml %t.i386-copy.o > %t.i386-copy.o.yaml
+RUN: cmp %t.i386.o.yaml %t.i386-copy.o.yaml
+
+RUN: yaml2obj %p/Inputs/x86_64-obj.yaml > %t.x86_64.o
+RUN: llvm-objcopy %t.x86_64.o %t.x86_64-copy.o
+RUN: obj2yaml %t.x86_64.o > %t.x86_64.o.yaml
+RUN: obj2yaml %t.x86_64-copy.o > %t.x86_64-copy.o.yaml
+RUN: cmp %t.x86_64.o.yaml %t.x86_64-copy.o.yaml
+
+RUN: yaml2obj %p/Inputs/i386-exe.yaml > %t.i386.exe
+RUN: llvm-objcopy %t.i386.exe %t.i386-copy.exe
+RUN: obj2yaml %t.i386.exe > %t.i386.exe.yaml
+RUN: obj2yaml %t.i386-copy.exe > %t.i386-copy.exe.yaml
+RUN: cmp %t.i386.exe.yaml %t.i386-copy.exe.yaml
+
+RUN: yaml2obj %p/Inputs/x86_64-exe.yaml > %t.x86_64.exe
+RUN: llvm-objcopy %t.x86_64.exe %t.x86_64-copy.exe
+RUN: obj2yaml %t.x86_64.exe > %t.x86_64.exe.yaml
+RUN: obj2yaml %t.x86_64-copy.exe > %t.x86_64-copy.exe.yaml
+RUN: cmp %t.x86_64.exe.yaml %t.x86_64-copy.exe.yaml
+
+RUN: yaml2obj %p/Inputs/no-symbols.yaml > %t.no-symbols.o
+RUN: llvm-objcopy %t.no-symbols.o %t.no-symbols-copy.o
+RUN: obj2yaml %t.no-symbols.o > %t.no-symbols.o.yaml
+RUN: obj2yaml %t.no-symbols-copy.o > %t.no-symbols-copy.o.yaml
+RUN: cmp %t.no-symbols.o.yaml %t.no-symbols-copy.o.yaml
diff --git a/test/tools/llvm-objcopy/COFF/discard-all.test b/test/tools/llvm-objcopy/COFF/discard-all.test
new file mode 100644
index 0000000..5d7d5ce
--- /dev/null
+++ b/test/tools/llvm-objcopy/COFF/discard-all.test
@@ -0,0 +1,27 @@
+RUN: yaml2obj %p/Inputs/discard-locals.yaml > %t.in.o
+
+RUN: llvm-objdump -t %t.in.o | FileCheck %s --check-prefixes=SYMBOLS,SYMBOLS-PRE
+
+RUN: llvm-objcopy --discard-all %t.in.o %t.out.o
+RUN: llvm-objdump -t %t.out.o | FileCheck %s --check-prefixes=SYMBOLS
+
+RUN: llvm-objcopy -x %t.in.o %t.out-x.o
+RUN: cmp %t.out.o %t.out-x.o
+
+RUN: cp %t.in.o %t.strip-x.o
+RUN: llvm-strip -x %t.strip-x.o
+RUN: cmp %t.out.o %t.strip-x.o
+
+RUN: cp %t.in.o %t.strip-discard-all.o
+RUN: llvm-strip --discard-all %t.strip-discard-all.o
+RUN: cmp %t.out.o %t.strip-discard-all.o
+
+SYMBOLS: SYMBOL TABLE:
+SYMBOLS-NEXT: external
+SYMBOLS-NEXT: external_undefined
+SYMBOLS-NEXT: external_undefined_unreferenced
+SYMBOLS-PRE-NEXT: local_unreferenced
+SYMBOLS-NEXT: local_referenced
+SYMBOLS-NEXT: local_undefined_unreferenced
+SYMBOLS-PRE-NEXT: @feat.00
+SYMBOLS-EMPTY:
diff --git a/test/tools/llvm-objcopy/COFF/strip-all.yaml b/test/tools/llvm-objcopy/COFF/strip-all.yaml
new file mode 100644
index 0000000..8a92ac6
--- /dev/null
+++ b/test/tools/llvm-objcopy/COFF/strip-all.yaml
@@ -0,0 +1,55 @@
+# RUN: yaml2obj %s > %t.in.o
+
+# RUN: llvm-objdump -t %t.in.o | FileCheck %s --check-prefixes=SYMBOLS,SYMBOLS-PRE
+
+# RUN: llvm-objcopy --strip-all %t.in.o %t.out.o
+# RUN: llvm-objdump -t %t.out.o | FileCheck %s --check-prefix=SYMBOLS
+# RUN: llvm-readobj -relocs %t.out.o | FileCheck %s --check-prefix=RELOCS
+
+# Test that -S, llvm-strip without arguments and --strip-all-gnu produces
+# output identical to --strip-all above.
+# RUN: llvm-objcopy -S %t.in.o %t.out-short.o
+# RUN: cmp %t.out.o %t.out-short.o
+
+# RUN: cp %t.in.o %t.out-strip.o
+# RUN: llvm-strip %t.out-strip.o
+# RUN: cmp %t.out.o %t.out-strip.o
+
+# RUN: llvm-objcopy --strip-all-gnu %t.in.o %t.out-gnu.o
+# RUN: cmp %t.out.o %t.out-gnu.o
+
+# SYMBOLS: SYMBOL TABLE:
+# SYMBOLS-PRE-NEXT: external
+# SYMBOLS-PRE-NEXT: external_undefined
+# SYMBOLS-EMPTY:
+
+# RELOCS:      Relocations [
+# RELOCS-NEXT: ]
+
+--- !COFF
+header:          
+  Machine:         IMAGE_FILE_MACHINE_AMD64
+  Characteristics: [  ]
+sections:        
+  - Name:            .text
+    Characteristics: [  ]
+    Alignment:       4
+    SectionData:     488B0500000000C3
+    Relocations:     
+      - VirtualAddress:  3
+        SymbolName:      external_undefined
+        Type:            IMAGE_REL_AMD64_REL32
+symbols:         
+  - Name:            external
+    Value:           0
+    SectionNumber:   1
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_EXTERNAL
+  - Name:            external_undefined
+    Value:           0
+    SectionNumber:   0
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_EXTERNAL
+...
diff --git a/test/tools/llvm-objcopy/COFF/strip-reloc-symbol.test b/test/tools/llvm-objcopy/COFF/strip-reloc-symbol.test
new file mode 100644
index 0000000..ba1dfb0
--- /dev/null
+++ b/test/tools/llvm-objcopy/COFF/strip-reloc-symbol.test
@@ -0,0 +1,5 @@
+RUN: yaml2obj %p/Inputs/strip-symbols.yaml > %t.o
+RUN: not llvm-objcopy -N foo %t.o 2>&1 | FileCheck %s --check-prefix=ERROR
+RUN: not llvm-objcopy --strip-symbol foo %t.o 2>&1 | FileCheck %s --check-prefix=ERROR
+
+ERROR: error: '{{.*}}{{/|\\}}strip-reloc-symbol.test.tmp.o': not stripping symbol 'foo' because it is named in a relocation.
diff --git a/test/tools/llvm-objcopy/COFF/strip-symbol.test b/test/tools/llvm-objcopy/COFF/strip-symbol.test
new file mode 100644
index 0000000..268b097
--- /dev/null
+++ b/test/tools/llvm-objcopy/COFF/strip-symbol.test
@@ -0,0 +1,32 @@
+RUN: yaml2obj %p/Inputs/strip-symbols.yaml > %t.in.o
+
+RUN: llvm-readobj -relocations %t.in.o | FileCheck %s --check-prefixes=RELOCS,RELOCS-PRE
+RUN: llvm-objdump -t %t.in.o | FileCheck %s --check-prefixes=SYMBOLS,SYMBOLS-PRE
+
+RUN: llvm-objcopy -N mainfunc %t.in.o %t.out.o
+RUN: llvm-readobj -relocations %t.out.o | FileCheck %s --check-prefixes=RELOCS,RELOCS-POST
+RUN: llvm-objdump -t %t.out.o | FileCheck %s --check-prefix=SYMBOLS
+
+RUN: llvm-objcopy --strip-symbol mainfunc %t.in.o %t.out.o
+RUN: llvm-readobj -relocations %t.out.o | FileCheck %s --check-prefixes=RELOCS,RELOCS-POST
+RUN: llvm-objdump -t %t.out.o | FileCheck %s --check-prefix=SYMBOLS
+
+Explicitly listing the relocations for the input as well, to show
+that the symbol index of the symbol foo is updated in the relocations,
+while keeping relocations to two distinct .rdata symbols separate.
+
+RELOCS:      Relocations [
+RELOCS-NEXT:   Section (1) .text {
+RELOCS-NEXT:     0x3 IMAGE_REL_AMD64_REL32 .rdata (0)
+RELOCS-NEXT:     0xA IMAGE_REL_AMD64_REL32 .rdata (1)
+RELOCS-PRE-NEXT:  0x11 IMAGE_REL_AMD64_REL32 foo (3)
+RELOCS-POST-NEXT: 0x11 IMAGE_REL_AMD64_REL32 foo (2)
+RELOCS-NEXT:   }
+RELOCS-NEXT: ]
+
+SYMBOLS: SYMBOL TABLE:
+SYMBOLS-NEXT: .rdata
+SYMBOLS-NEXT: .rdata
+SYMBOLS-PRE-NEXT: mainfunc
+SYMBOLS-NEXT: foo
+SYMBOLS-EMPTY:
diff --git a/test/tools/llvm-objcopy/COFF/strip-unneeded.test b/test/tools/llvm-objcopy/COFF/strip-unneeded.test
new file mode 100644
index 0000000..569bc8a
--- /dev/null
+++ b/test/tools/llvm-objcopy/COFF/strip-unneeded.test
@@ -0,0 +1,16 @@
+RUN: yaml2obj %p/Inputs/discard-locals.yaml > %t.in.o
+
+RUN: llvm-objdump -t %t.in.o | FileCheck %s --check-prefixes=SYMBOLS,SYMBOLS-PRE
+
+RUN: llvm-objcopy --strip-unneeded %t.in.o %t.out.o
+RUN: llvm-objdump -t %t.out.o | FileCheck %s --check-prefix=SYMBOLS
+
+SYMBOLS: SYMBOL TABLE:
+SYMBOLS-NEXT: external
+SYMBOLS-NEXT: external_undefined
+SYMBOLS-PRE-NEXT: external_undefined_unreferenced
+SYMBOLS-PRE-NEXT: local_unreferenced
+SYMBOLS-NEXT: local_referenced
+SYMBOLS-PRE-NEXT: local_undefined_unreferenced
+SYMBOLS-PRE-NEXT: @feat.00
+SYMBOLS-EMPTY:
diff --git a/test/tools/llvm-objcopy/ELF/abs-symbol.test b/test/tools/llvm-objcopy/ELF/abs-symbol.test
index fb261ce..5f2536d 100644
--- a/test/tools/llvm-objcopy/ELF/abs-symbol.test
+++ b/test/tools/llvm-objcopy/ELF/abs-symbol.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/add-gnu-debuglink.test b/test/tools/llvm-objcopy/ELF/add-gnu-debuglink.test
index 16f4637..330571c 100644
--- a/test/tools/llvm-objcopy/ELF/add-gnu-debuglink.test
+++ b/test/tools/llvm-objcopy/ELF/add-gnu-debuglink.test
@@ -1,7 +1,7 @@
 # RUN: yaml2obj %s > %t
 # RUN: printf 0000 > %t.blob
-# RUN: llvm-objcopy -add-gnu-debuglink=%t.blob %t %t2
-# RUN: llvm-readobj -sections -section-data %t2 | FileCheck %s
+# RUN: llvm-objcopy --add-gnu-debuglink=%t.blob %t %t2
+# RUN: llvm-readobj --sections --section-data %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/add-note.test b/test/tools/llvm-objcopy/ELF/add-note.test
new file mode 100644
index 0000000..b456230
--- /dev/null
+++ b/test/tools/llvm-objcopy/ELF/add-note.test
@@ -0,0 +1,36 @@
+# Verify that --add-section can be used to add a note section which is
+# successfully interpreted by tools that read notes.
+
+# Add [namesz, descsz, type, name, desc] for a build id.
+# RUN: echo -e -n "\x04\x00\x00\x00" >  %t-note.bin
+# RUN: echo -e -n "\x10\x00\x00\x00" >> %t-note.bin
+# RUN: echo -e -n "\x03\x00\x00\x00" >> %t-note.bin
+# RUN: echo -e -n "GNU\x00"          >> %t-note.bin
+# RUN: echo -e -n "\x00\x01\x02\x03" >> %t-note.bin
+# RUN: echo -e -n "\x04\x05\x06\x07" >> %t-note.bin
+# RUN: echo -e -n "\x08\x09\x0a\x0b" >> %t-note.bin
+# RUN: echo -e -n "\x0c\x0d\x0e\x0f" >> %t-note.bin
+
+# RUN: yaml2obj %s > %t.o
+# RUN: llvm-objcopy --add-section=.note.gnu.build-id=%t-note.bin %t.o %t-with-note.o
+# RUN: llvm-readobj --notes %t-with-note.o | FileCheck %s
+
+!ELF
+FileHeader:
+  Class:           ELFCLASS64
+  Data:            ELFDATA2LSB
+  Type:            ET_REL
+  Machine:         EM_X86_64
+
+# CHECK:      Notes [
+# CHECK-NEXT:   NoteSection {
+# CHECK-NEXT:     Offset:
+# CHECK-NEXT:     Size:
+# CHECK-NEXT:     Note {
+# CHECK-NEXT:       Owner: GNU
+# CHECK-NEXT:       Data size: 0x10
+# CHECK-NEXT:       Type: NT_GNU_BUILD_ID
+# CHECK-NEXT:       Build ID: 000102030405060708090a0b0c0d0e0f
+# CHECK-NEXT:     }
+# CHECK-NEXT:   }
+# CHECK-NEXT: ]
diff --git a/test/tools/llvm-objcopy/ELF/add-section-remove.test b/test/tools/llvm-objcopy/ELF/add-section-remove.test
index 9e7abc7..fe462db 100644
--- a/test/tools/llvm-objcopy/ELF/add-section-remove.test
+++ b/test/tools/llvm-objcopy/ELF/add-section-remove.test
@@ -1,7 +1,7 @@
 # RUN: yaml2obj %s > %t
 # RUN: echo 0000 > %t.sec
-# RUN: llvm-objcopy -R .test2 -add-section=.test2=%t.sec %t %t2
-# RUN: llvm-readobj -file-headers -sections -section-data %t2 | FileCheck %s
+# RUN: llvm-objcopy -R .test2 --add-section=.test2=%t.sec %t %t2
+# RUN: llvm-readobj --file-headers --sections --section-data %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/add-section-special.test b/test/tools/llvm-objcopy/ELF/add-section-special.test
new file mode 100644
index 0000000..e83cd51
--- /dev/null
+++ b/test/tools/llvm-objcopy/ELF/add-section-special.test
@@ -0,0 +1,22 @@
+# Check the properties of added sections.
+# By default, sections are SHT_PROGBITS, but .note sections (excluding
+# .note.GNU-stack) are SHT_NOTE sections.
+
+# RUN: yaml2obj %s > %t.o
+# RUN: llvm-objcopy --add-section=.foo=/dev/null %t.o %t-foo.o
+# RUN: llvm-objcopy --add-section=.note.foo=/dev/null %t.o %t-regular-note.o
+# RUN: llvm-objcopy --add-section=.note.GNU-stack=/dev/null %t.o %t-gnu-stack.o
+# RUN: llvm-readelf --sections %t-foo.o | FileCheck %s --check-prefix=NORMAL
+# RUN: llvm-readelf --sections %t-regular-note.o | FileCheck %s --check-prefix=NOTE
+# RUN: llvm-readelf --sections %t-gnu-stack.o | FileCheck %s --check-prefix=GNU-STACK
+
+!ELF
+FileHeader:
+  Class:           ELFCLASS64
+  Data:            ELFDATA2LSB
+  Type:            ET_REL
+  Machine:         EM_X86_64
+
+# NORMAL:    .foo            PROGBITS
+# NOTE:      .note.foo       NOTE
+# GNU-STACK: .note.GNU-stack PROGBITS
diff --git a/test/tools/llvm-objcopy/ELF/add-section.test b/test/tools/llvm-objcopy/ELF/add-section.test
index 369d119..bf3ffdb 100644
--- a/test/tools/llvm-objcopy/ELF/add-section.test
+++ b/test/tools/llvm-objcopy/ELF/add-section.test
@@ -1,8 +1,8 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy -O binary -j .test2 %t %t.sec
 # RUN: llvm-objcopy -R .test2 %t %t2
-# RUN: llvm-objcopy -add-section=.test2=%t.sec %t2 %t3
-# RUN: llvm-readobj -file-headers -sections -section-data %t3 | FileCheck %s
+# RUN: llvm-objcopy --add-section=.test2=%t.sec %t2 %t3
+# RUN: llvm-readobj --file-headers --sections --section-data %t3 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/armexidx-link.test b/test/tools/llvm-objcopy/ELF/armexidx-link.test
index ec942a1..596a780 100644
--- a/test/tools/llvm-objcopy/ELF/armexidx-link.test
+++ b/test/tools/llvm-objcopy/ELF/armexidx-link.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -remove-section=.text.bar %t %t2
-# RUN: llvm-readobj -sections %t2 | FileCheck %s
+# RUN: llvm-objcopy --remove-section=.text.bar %t %t2
+# RUN: llvm-readobj --sections %t2 | FileCheck %s
 
 # CHECK:          Index: 2
 # CHECK-NEXT:     Name: .ARM.exidx.text.foo (1)
diff --git a/test/tools/llvm-objcopy/ELF/auto-remove-shndx.test b/test/tools/llvm-objcopy/ELF/auto-remove-shndx.test
index 5452f34..5a23493 100644
--- a/test/tools/llvm-objcopy/ELF/auto-remove-shndx.test
+++ b/test/tools/llvm-objcopy/ELF/auto-remove-shndx.test
@@ -1,5 +1,5 @@
 # RUN: %python %p/Inputs/ungzip.py %p/Inputs/many-sections.o.gz > %t
 # RUN: llvm-objcopy -R .text -R s0 -R s1 -R s2 -R s3 -R s4 -R s5 -R s6 %t %t2
-# RUN: llvm-readobj -sections %t2 | FileCheck --check-prefix=SECS %s
+# RUN: llvm-readobj --sections %t2 | FileCheck --check-prefix=SECS %s
 
 # SECS-NOT: Name: .symtab_shndx
diff --git a/test/tools/llvm-objcopy/ELF/bad-output-format.test b/test/tools/llvm-objcopy/ELF/bad-output-format.test
new file mode 100644
index 0000000..e01d955
--- /dev/null
+++ b/test/tools/llvm-objcopy/ELF/bad-output-format.test
@@ -0,0 +1,13 @@
+# RUN: yaml2obj %s > %t.o
+
+# RUN: not llvm-objcopy -O xyz %t.o %t.2.o 2>&1 \
+# RUN:   | FileCheck %s --check-prefix=BAD-OUTPUT-FORMAT
+
+!ELF
+FileHeader:
+  Class:           ELFCLASS32
+  Data:            ELFDATA2LSB
+  Type:            ET_EXEC
+  Machine:         EM_386
+
+# BAD-OUTPUT-FORMAT: Invalid output format: 'xyz'.
diff --git a/test/tools/llvm-objcopy/ELF/basic-archive-copy.test b/test/tools/llvm-objcopy/ELF/basic-archive-copy.test
index bc050c3..cf973d6c 100644
--- a/test/tools/llvm-objcopy/ELF/basic-archive-copy.test
+++ b/test/tools/llvm-objcopy/ELF/basic-archive-copy.test
@@ -8,9 +8,9 @@
 # RUN: llvm-ar p %t2.a > %t3
 # RUN: cmp %t2 %t3
 
-# RUN: llvm-readobj -sections %t2 | FileCheck %s
-# RUN: llvm-nm -print-armap %t.a | FileCheck --check-prefix=INDEX-TABLE %s
-# RUN: llvm-nm -print-armap %t2.a | FileCheck --check-prefix=INDEX-TABLE %s
+# RUN: llvm-readobj --sections %t2 | FileCheck %s
+# RUN: llvm-nm --print-armap %t.a | FileCheck --check-prefix=INDEX-TABLE %s
+# RUN: llvm-nm --print-armap %t2.a | FileCheck --check-prefix=INDEX-TABLE %s
 # Verify that llvm-objcopy has not modifed the input.
 # RUN: cmp %t.copy.a %t.a
 
@@ -22,8 +22,8 @@
 # RUN: llvm-objcopy %t.no.index.a %t2.no.index.a
 # RUN: llvm-ar p %t2.no.index.a > %t4
 
-# RUN: llvm-nm -print-armap %t.no.index.a | FileCheck --check-prefix=NO-INDEX-TABLE %s
-# RUN: llvm-nm -print-armap %t2.no.index.a | FileCheck --check-prefix=NO-INDEX-TABLE %s
+# RUN: llvm-nm --print-armap %t.no.index.a | FileCheck --check-prefix=NO-INDEX-TABLE %s
+# RUN: llvm-nm --print-armap %t2.no.index.a | FileCheck --check-prefix=NO-INDEX-TABLE %s
 # RUN: cmp %t2 %t4
 
 # NO-INDEX-TABLE-NOT: Archive map
diff --git a/test/tools/llvm-objcopy/ELF/basic-copy.test b/test/tools/llvm-objcopy/ELF/basic-copy.test
index 266d48b..b6b0bcc 100644
--- a/test/tools/llvm-objcopy/ELF/basic-copy.test
+++ b/test/tools/llvm-objcopy/ELF/basic-copy.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --sections %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/basic-keep.test b/test/tools/llvm-objcopy/ELF/basic-keep.test
index 79d7717..8488a26 100644
--- a/test/tools/llvm-objcopy/ELF/basic-keep.test
+++ b/test/tools/llvm-objcopy/ELF/basic-keep.test
@@ -1,7 +1,7 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -strip-non-alloc -keep-section=.test %t %t2
-# RUN: llvm-strip --strip-all -keep-section=.test %t -o %t3
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-objcopy --strip-non-alloc --keep-section=.test %t %t2
+# RUN: llvm-strip --strip-all --keep-section=.test %t -o %t3
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 # RUN: cmp %t2 %t3
 
 !ELF
diff --git a/test/tools/llvm-objcopy/ELF/basic-only-section.test b/test/tools/llvm-objcopy/ELF/basic-only-section.test
index 536a0fc..e47a97a 100644
--- a/test/tools/llvm-objcopy/ELF/basic-only-section.test
+++ b/test/tools/llvm-objcopy/ELF/basic-only-section.test
@@ -1,7 +1,7 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -only-section=.test %t %t2
+# RUN: llvm-objcopy --only-section=.test %t %t2
 # RUN: llvm-objcopy -j .test %t %t3
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 # RUN: diff %t2 %t3
 
 !ELF
diff --git a/test/tools/llvm-objcopy/ELF/basic-relocations.test b/test/tools/llvm-objcopy/ELF/basic-relocations.test
index e9afc05..e043041 100644
--- a/test/tools/llvm-objcopy/ELF/basic-relocations.test
+++ b/test/tools/llvm-objcopy/ELF/basic-relocations.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -relocations %t2 | FileCheck %s
+# RUN: llvm-readobj --relocations %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/binary-input-arch.test b/test/tools/llvm-objcopy/ELF/binary-input-arch.test
index 08f02da..4c04b6e 100644
--- a/test/tools/llvm-objcopy/ELF/binary-input-arch.test
+++ b/test/tools/llvm-objcopy/ELF/binary-input-arch.test
@@ -1,25 +1,25 @@
 # RUN: echo abcd > %t.txt
 
 # RUN: llvm-objcopy -I binary -B aarch64 %t.txt %t.aarch64.o
-# RUN: llvm-readobj -file-headers %t.aarch64.o | FileCheck %s --check-prefixes=CHECK,AARCH64,64
+# RUN: llvm-readobj --file-headers %t.aarch64.o | FileCheck %s --check-prefixes=CHECK,AARCH64,64
 
 # RUN: llvm-objcopy -I binary -B arm %t.txt %t.arm.o
-# RUN: llvm-readobj -file-headers %t.arm.o | FileCheck %s --check-prefixes=CHECK,ARM,32
+# RUN: llvm-readobj --file-headers %t.arm.o | FileCheck %s --check-prefixes=CHECK,ARM,32
 
 # RUN: llvm-objcopy -I binary -B i386 %t.txt %t.i386.o
-# RUN: llvm-readobj -file-headers %t.i386.o | FileCheck %s --check-prefixes=CHECK,I386,32
+# RUN: llvm-readobj --file-headers %t.i386.o | FileCheck %s --check-prefixes=CHECK,I386,32
 
 # RUN: llvm-objcopy -I binary -B i386:x86-64 %t.txt %t.i386_x86-64.o
-# RUN: llvm-readobj -file-headers %t.i386_x86-64.o | FileCheck %s --check-prefixes=CHECK,X86-64,64
+# RUN: llvm-readobj --file-headers %t.i386_x86-64.o | FileCheck %s --check-prefixes=CHECK,X86-64,64
 
 # RUN: llvm-objcopy -I binary -B powerpc:common64 %t.txt %t.powerpc_common64.o
-# RUN: llvm-readobj -file-headers %t.powerpc_common64.o | FileCheck %s --check-prefixes=CHECK,PPC,64
+# RUN: llvm-readobj --file-headers %t.powerpc_common64.o | FileCheck %s --check-prefixes=CHECK,PPC,64
 
 # RUN: llvm-objcopy -I binary -B sparc %t.txt %t.sparc.o
-# RUN: llvm-readobj -file-headers %t.sparc.o | FileCheck %s --check-prefixes=CHECK,SPARC,32
+# RUN: llvm-readobj --file-headers %t.sparc.o | FileCheck %s --check-prefixes=CHECK,SPARC,32
 
 # RUN: llvm-objcopy -I binary -B x86-64 %t.txt %t.x86-64.o
-# RUN: llvm-readobj -file-headers %t.x86-64.o | FileCheck %s --check-prefixes=CHECK,X86-64,64
+# RUN: llvm-readobj --file-headers %t.x86-64.o | FileCheck %s --check-prefixes=CHECK,X86-64,64
 
 # CHECK: Format:
 # AARCH64-SAME: ELF64-aarch64-little
diff --git a/test/tools/llvm-objcopy/ELF/binary-input-with-arch.test b/test/tools/llvm-objcopy/ELF/binary-input-with-arch.test
new file mode 100644
index 0000000..e8ae841
--- /dev/null
+++ b/test/tools/llvm-objcopy/ELF/binary-input-with-arch.test
@@ -0,0 +1,20 @@
+# RUN: echo -n abcd > %t.x-txt
+# Preserve input to verify it is not modified.
+# RUN: cp %t.x-txt %t-copy.txt
+# RUN: llvm-objcopy -I binary -B i386 -O elf64-x86-64 %t.x-txt %t.o
+# RUN: llvm-readobj --file-headers %t.o | FileCheck %s
+# RUN: cmp %t.x-txt %t-copy.txt
+
+# Many uses of objcopy use no spaces in the flags, make sure that also works.
+# RUN: llvm-objcopy -Ibinary -Bi386 -Oelf64-x86-64 %t.x-txt %t-no-spaces.o
+# RUN: cmp %t.o %t-no-spaces.o
+
+# CHECK:      Format: ELF64-x86-64
+# CHECK-NEXT: Arch: x86_64
+# CHECK-NEXT: AddressSize: 64bit
+
+# CHECK: Class: 64-bit
+# CHECK: DataEncoding: LittleEndian
+# CHECK: Machine: EM_X86_64
+# CHECK: HeaderSize: 64
+# CHECK: SectionHeaderEntrySize: 64
diff --git a/test/tools/llvm-objcopy/ELF/binary-input.test b/test/tools/llvm-objcopy/ELF/binary-input.test
index ff1d381..d7a708d 100644
--- a/test/tools/llvm-objcopy/ELF/binary-input.test
+++ b/test/tools/llvm-objcopy/ELF/binary-input.test
@@ -2,7 +2,7 @@
 # Preserve input to verify it is not modified
 # RUN: cp %t.x-txt %t-copy.txt
 # RUN: llvm-objcopy -I binary -B i386:x86-64 %t.x-txt %t.o
-# RUN: llvm-readobj -sections -symbols %t.o | FileCheck %s
+# RUN: llvm-readobj --sections --symbols %t.o | FileCheck %s
 # RUN: cmp %t.x-txt %t-copy.txt
 
 # CHECK:      Sections [
@@ -45,7 +45,7 @@
 # CHECK-NEXT:     Size:
 # CHECK-NEXT:     Link: 1
 # CHECK-NEXT:     Info: 1
-# CHECK-NEXT:     AddressAlignment: 1
+# CHECK-NEXT:     AddressAlignment: 8
 # CHECK-NEXT:     EntrySize: 24
 # CHECK-NEXT:   }
 # CHECK-NEXT:   Section {
diff --git a/test/tools/llvm-objcopy/ELF/binary-remove-all-but-one.test b/test/tools/llvm-objcopy/ELF/binary-remove-all-but-one.test
index e1ef7d6..857b76d 100644
--- a/test/tools/llvm-objcopy/ELF/binary-remove-all-but-one.test
+++ b/test/tools/llvm-objcopy/ELF/binary-remove-all-but-one.test
@@ -1,7 +1,7 @@
 # RUN: yaml2obj %s -o %t
 # RUN: llvm-objcopy -R .text -R .text3 -O binary %t %t2
 # RUN: od -Ax -t x1 %t2 | FileCheck %s
-# RUN: wc -c %t2 | FileCheck %s -check-prefix=SIZE
+# RUN: wc -c %t2 | FileCheck %s --check-prefix=SIZE
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/binary-remove-end.test b/test/tools/llvm-objcopy/ELF/binary-remove-end.test
index 1c2994b..eddeaf1 100644
--- a/test/tools/llvm-objcopy/ELF/binary-remove-end.test
+++ b/test/tools/llvm-objcopy/ELF/binary-remove-end.test
@@ -1,7 +1,7 @@
 # RUN: yaml2obj %s -o %t
 # RUN: llvm-objcopy -R .text3 -O binary %t %t2
 # RUN: od -Ax -v -t x1 %t2 | FileCheck %s
-# RUN: wc -c %t2 | FileCheck %s -check-prefix=SIZE
+# RUN: wc -c %t2 | FileCheck %s --check-prefix=SIZE
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/binary-remove-middle.test b/test/tools/llvm-objcopy/ELF/binary-remove-middle.test
index 6d69907..fbc2f6a 100644
--- a/test/tools/llvm-objcopy/ELF/binary-remove-middle.test
+++ b/test/tools/llvm-objcopy/ELF/binary-remove-middle.test
@@ -1,7 +1,7 @@
 # RUN: yaml2obj %s -o %t
 # RUN: llvm-objcopy -R .text2 -O binary %t %t2
 # RUN: od -Ax -v -t x1 %t2 | FileCheck %s
-# RUN: wc -c %t2 | FileCheck %s -check-prefix=SIZE
+# RUN: wc -c %t2 | FileCheck %s --check-prefix=SIZE
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/check-addr-offset-align.test b/test/tools/llvm-objcopy/ELF/check-addr-offset-align.test
index ca2367b..bc219ff 100644
--- a/test/tools/llvm-objcopy/ELF/check-addr-offset-align.test
+++ b/test/tools/llvm-objcopy/ELF/check-addr-offset-align.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s -o %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -program-headers %t2 | FileCheck %s
+# RUN: llvm-readobj --program-headers %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/common-symbol.test b/test/tools/llvm-objcopy/ELF/common-symbol.test
index b659356..9ec49c7 100644
--- a/test/tools/llvm-objcopy/ELF/common-symbol.test
+++ b/test/tools/llvm-objcopy/ELF/common-symbol.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/compress-debug-sections-zlib-gnu.test b/test/tools/llvm-objcopy/ELF/compress-debug-sections-zlib-gnu.test
index 7199b2e..0a360d0 100644
--- a/test/tools/llvm-objcopy/ELF/compress-debug-sections-zlib-gnu.test
+++ b/test/tools/llvm-objcopy/ELF/compress-debug-sections-zlib-gnu.test
@@ -4,12 +4,12 @@
 # RUN: llvm-objcopy --compress-debug-sections=zlib-gnu %t.o %t-compressed.o
 # RUN: llvm-objcopy --decompress-debug-sections %t-compressed.o %t-decompressed.o
 
-# RUN: llvm-objdump -s %t.o -section=.debug_foo | FileCheck %s
+# RUN: llvm-objdump -s %t.o --section=.debug_foo | FileCheck %s
 # RUN: llvm-objdump -s %t-compressed.o | FileCheck %s --check-prefix=CHECK-COMPRESSED
-# RUN: llvm-readobj -relocations -s %t-compressed.o | FileCheck %s --check-prefix=CHECK-FLAGS
-# RUN: llvm-readobj -relocations -s %t-decompressed.o | FileCheck %s --check-prefix=CHECK-HEADER
-# RUN: llvm-readobj -relocations -s %t.o | FileCheck %s --check-prefix=CHECK-HEADER
-# RUN: llvm-objdump -s %t-decompressed.o -section=.debug_foo | FileCheck %s
+# RUN: llvm-readobj --relocations -s %t-compressed.o | FileCheck %s --check-prefix=CHECK-FLAGS
+# RUN: llvm-readobj --relocations -s %t-decompressed.o | FileCheck %s --check-prefix=CHECK-HEADER
+# RUN: llvm-readobj --relocations -s %t.o | FileCheck %s --check-prefix=CHECK-HEADER
+# RUN: llvm-objdump -s %t-decompressed.o --section=.debug_foo | FileCheck %s
 
 # CHECK: .debug_foo:
 # CHECK-NEXT: 0000 00000000 00000000
diff --git a/test/tools/llvm-objcopy/ELF/compress-debug-sections-zlib.test b/test/tools/llvm-objcopy/ELF/compress-debug-sections-zlib.test
index d2ca6ef..1e7dfc9 100644
--- a/test/tools/llvm-objcopy/ELF/compress-debug-sections-zlib.test
+++ b/test/tools/llvm-objcopy/ELF/compress-debug-sections-zlib.test
@@ -4,12 +4,12 @@
 # RUN: llvm-objcopy --compress-debug-sections=zlib %t.o %t-compressed.o
 # RUN: llvm-objcopy --decompress-debug-sections %t-compressed.o %t-decompressed.o
 
-# RUN: llvm-objdump -s %t.o -section=.debug_foo | FileCheck %s
+# RUN: llvm-objdump -s %t.o --section=.debug_foo | FileCheck %s
 # RUN: llvm-objdump -s %t-compressed.o | FileCheck %s --check-prefix=CHECK-COMPRESSED
-# RUN: llvm-readobj -relocations -s %t-compressed.o | FileCheck %s --check-prefix=CHECK-FLAGS
-# RUN: llvm-readobj -relocations -s %t-decompressed.o | FileCheck %s --check-prefix=CHECK-HEADER
-# RUN: llvm-readobj -relocations -s %t.o | FileCheck %s --check-prefix=CHECK-HEADER
-# RUN: llvm-objdump -s %t-decompressed.o -section=.debug_foo | FileCheck %s
+# RUN: llvm-readobj --relocations -s %t-compressed.o | FileCheck %s --check-prefix=CHECK-FLAGS
+# RUN: llvm-readobj --relocations -s %t-decompressed.o | FileCheck %s --check-prefix=CHECK-HEADER
+# RUN: llvm-readobj --relocations -s %t.o | FileCheck %s --check-prefix=CHECK-HEADER
+# RUN: llvm-objdump -s %t-decompressed.o --section=.debug_foo | FileCheck %s
 
 # CHECK: .debug_foo:
 # CHECK-NEXT: 0000 00000000 00000000
diff --git a/test/tools/llvm-objcopy/ELF/compress-debug-sections.test b/test/tools/llvm-objcopy/ELF/compress-debug-sections.test
index 6fadad3..014303f 100644
--- a/test/tools/llvm-objcopy/ELF/compress-debug-sections.test
+++ b/test/tools/llvm-objcopy/ELF/compress-debug-sections.test
@@ -12,9 +12,9 @@
 # RUN: llvm-objcopy --decompress-debug-sections %tzg.o %t3.o
 
 # Using redirects to avoid llvm-objdump from printing the filename.
-# RUN: llvm-objdump -s -section=.debug_str - < %t.o  > %t.txt
-# RUN: llvm-objdump -s -section=.debug_str - < %t2.o > %t2.txt
-# RUN: llvm-objdump -s -section=.debug_str - < %t3.o > %t3.txt
+# RUN: llvm-objdump -s --section=.debug_str - < %t.o  > %t.txt
+# RUN: llvm-objdump -s --section=.debug_str - < %t2.o > %t2.txt
+# RUN: llvm-objdump -s --section=.debug_str - < %t3.o > %t3.txt
 
 # RUN: diff %t.txt %t2.txt
 # RUN: diff %t.txt %t3.txt
diff --git a/test/tools/llvm-objcopy/ELF/copy-osabi.test b/test/tools/llvm-objcopy/ELF/copy-osabi.test
new file mode 100644
index 0000000..75a22bc
--- /dev/null
+++ b/test/tools/llvm-objcopy/ELF/copy-osabi.test
@@ -0,0 +1,16 @@
+# RUN: yaml2obj %s > %t
+# RUN: llvm-objcopy %t %t2
+# RUN: llvm-readobj --file-headers %t2 | FileCheck %s
+
+## Check that llvm-objcopy preserves the OSABI and ABIVersion fields.
+# CHECK: OS/ABI: FreeBSD (0x9)
+# CHECK: ABIVersion: 1
+
+!ELF
+FileHeader:
+  Class:           ELFCLASS64
+  Data:            ELFDATA2LSB
+  OSABI:           ELFOSABI_FREEBSD
+  ABIVersion:      1
+  Type:            ET_REL
+  Machine:         EM_AARCH64
diff --git a/test/tools/llvm-objcopy/ELF/cross-arch-headers.test b/test/tools/llvm-objcopy/ELF/cross-arch-headers.test
new file mode 100644
index 0000000..ec76fa6
--- /dev/null
+++ b/test/tools/llvm-objcopy/ELF/cross-arch-headers.test
@@ -0,0 +1,71 @@
+# RUN: yaml2obj %s > %t.o
+
+# RUN: llvm-objcopy %t.o -O elf32-i386 %t.elf32_i386.o
+# RUN: llvm-readobj --file-headers %t.elf32_i386.o | FileCheck %s --check-prefixes=CHECK,I386,32
+
+# RUN: llvm-objcopy %t.o -O elf32-powerpcle %t.elf32_ppcle.o
+# RUN: llvm-readobj --file-headers %t.elf32_ppcle.o | FileCheck %s --check-prefixes=CHECK,PPC,32
+
+# RUN: llvm-objcopy %t.o -O elf32-x86-64 %t.elf32_x86_64.o
+# RUN: llvm-readobj --file-headers %t.elf32_x86_64.o | FileCheck %s --check-prefixes=CHECK,X86-64,32
+
+# RUN: llvm-objcopy %t.o -O elf64-powerpcle %t.elf64_ppcle.o
+# RUN: llvm-readobj --file-headers %t.elf64_ppcle.o | FileCheck %s --check-prefixes=CHECK,PPC64,64
+
+# RUN: llvm-objcopy %t.o -O elf64-x86-64 %t.elf64_x86_64.o
+# RUN: llvm-readobj --file-headers %t.elf64_x86_64.o | FileCheck %s --check-prefixes=CHECK,X86-64,64
+
+!ELF
+FileHeader:
+  Class:           ELFCLASS32
+  Data:            ELFDATA2LSB
+  Type:            ET_EXEC
+  Machine:         EM_386
+Sections:
+  - Name:            .text
+    Type:            SHT_PROGBITS
+    Flags:           [ SHF_ALLOC, SHF_EXECINSTR ]
+  - Name:            .data
+    Type:            SHT_PROGBITS
+    Flags:           [ SHF_ALLOC ]
+Symbols:
+  Global:
+    - Name:     foo
+      Type:     STT_FUNC
+      Section:  .text
+      Value:    0x1234
+    - Name:     bar
+      Type:     STT_OBJECT
+      Section:  .data
+      Value:    0xabcd
+
+# CHECK: Format:
+# 32-SAME:      ELF32-
+# 64-SAME:      ELF64-
+# I386-SAME:    i386
+# PPC-SAME:     ppc
+# PPC64-SAME:   ppc64
+# X86-64-SAME:  x86-64
+
+# I386-NEXT:    Arch: i386
+# PPC-NEXT:     Arch: powerpc
+# PPC64-NEXT:   Arch: powerpc64le
+# X86-64-NEXT:  Arch: x86_64
+
+# 32-NEXT:      AddressSize: 32bit
+# 64-NEXT:      AddressSize: 64bit
+
+# 32:     Class: 32-bit
+# 64:     Class: 64-bit
+# CHECK:  DataEncoding: LittleEndian
+
+# I386:   Machine: EM_386
+# PPC:    Machine: EM_PPC
+# PPC64:  Machine: EM_PPC64
+# X86-64: Machine: EM_X86_64
+
+# 32: HeaderSize: 52
+# 64: HeaderSize: 64
+
+# 32: SectionHeaderEntrySize: 40
+# 64: SectionHeaderEntrySize: 64
diff --git a/test/tools/llvm-objcopy/ELF/cross-arch-sections-symbols.test b/test/tools/llvm-objcopy/ELF/cross-arch-sections-symbols.test
new file mode 100644
index 0000000..1d959a9
--- /dev/null
+++ b/test/tools/llvm-objcopy/ELF/cross-arch-sections-symbols.test
@@ -0,0 +1,153 @@
+# RUN: yaml2obj %s > %t.o
+# Preserve input to verify it is not modified.
+# RUN: cp %t.o %t-copy.o
+# RUN: llvm-objcopy %t.o -O elf64-x86-64 %t.2.o
+# RUN: llvm-readobj --sections --symbols %t.2.o | FileCheck %s
+# RUN: cmp %t.o %t-copy.o
+
+!ELF
+FileHeader:
+  Class:           ELFCLASS32
+  Data:            ELFDATA2LSB
+  Type:            ET_EXEC
+  Machine:         EM_386
+Sections:
+  - Name:            .text
+    Type:            SHT_PROGBITS
+    Flags:           [ SHF_ALLOC, SHF_EXECINSTR ]
+    Size:            32
+  - Name:            .data
+    Type:            SHT_PROGBITS
+    Flags:           [ SHF_ALLOC ]
+    Content:         DEADBEEF
+    Size:            16
+Symbols:
+  Global:
+    - Name:     foo
+      Type:     STT_FUNC
+      Section:  .text
+      Value:    16
+      Size:     8
+    - Name:     bar
+      Type:     STT_OBJECT
+      Section:  .data
+      Size:     16
+
+# CHECK:      Sections [
+# CHECK-NEXT:   Section {
+# CHECK-NEXT:     Index: 0
+# CHECK-NEXT:     Name:  (0)
+# CHECK-NEXT:     Type: SHT_NULL (0x0)
+# CHECK-NEXT:     Flags [ (0x0)
+# CHECK-NEXT:     ]
+# CHECK-NEXT:     Address:
+# CHECK-NEXT:     Offset:
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Link: 0
+# CHECK-NEXT:     Info: 0
+# CHECK-NEXT:     AddressAlignment: 0
+# CHECK-NEXT:     EntrySize: 0
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Section {
+# CHECK-NEXT:     Index: 1
+# CHECK-NEXT:     Name: .text
+# CHECK-NEXT:     Type: SHT_PROGBITS (0x1)
+# CHECK-NEXT:     Flags [ (0x6)
+# CHECK-NEXT:       SHF_ALLOC (0x2)
+# CHECK-NEXT:       SHF_EXECINSTR (0x4)
+# CHECK-NEXT:     ]
+# CHECK-NEXT:     Address:
+# CHECK-NEXT:     Offset:
+# CHECK-NEXT:     Size: 32
+# CHECK-NEXT:     Link: 0
+# CHECK-NEXT:     Info: 0
+# CHECK-NEXT:     AddressAlignment: 0
+# CHECK-NEXT:     EntrySize: 0
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Section {
+# CHECK-NEXT:     Index: 2
+# CHECK-NEXT:     Name: .data
+# CHECK-NEXT:     Type: SHT_PROGBITS (0x1)
+# CHECK-NEXT:     Flags [ (0x2)
+# CHECK-NEXT:       SHF_ALLOC (0x2)
+# CHECK-NEXT:     ]
+# CHECK-NEXT:     Address:
+# CHECK-NEXT:     Offset:
+# CHECK-NEXT:     Size: 16
+# CHECK-NEXT:     Link: 0
+# CHECK-NEXT:     Info: 0
+# CHECK-NEXT:     AddressAlignment: 0
+# CHECK-NEXT:     EntrySize: 0
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Section {
+# CHECK-NEXT:     Index: 3
+# CHECK-NEXT:     Name: .symtab
+# CHECK-NEXT:     Type: SHT_SYMTAB (0x2)
+# CHECK-NEXT:     Flags [ (0x0)
+# CHECK-NEXT:     ]
+# CHECK-NEXT:     Address:
+# CHECK-NEXT:     Offset:
+# CHECK-NEXT:     Size: 72
+# CHECK-NEXT:     Link: 4
+# CHECK-NEXT:     Info: 1
+# CHECK-NEXT:     AddressAlignment: 8
+# CHECK-NEXT:     EntrySize: 24
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Section {
+# CHECK-NEXT:     Index: 4
+# CHECK-NEXT:     Name: .strtab
+# CHECK-NEXT:     Type: SHT_STRTAB (0x3)
+# CHECK-NEXT:     Flags [ (0x0)
+# CHECK-NEXT:     ]
+# CHECK-NEXT:     Address:
+# CHECK-NEXT:     Offset:
+# CHECK-NEXT:     Size: 10
+# CHECK-NEXT:     Link: 0
+# CHECK-NEXT:     Info: 0
+# CHECK-NEXT:     AddressAlignment: 1
+# CHECK-NEXT:     EntrySize: 0
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Section {
+# CHECK-NEXT:     Index: 5
+# CHECK-NEXT:     Name: .shstrtab
+# CHECK-NEXT:     Type: SHT_STRTAB (0x3)
+# CHECK-NEXT:     Flags [ (0x0)
+# CHECK-NEXT:     ]
+# CHECK-NEXT:     Address:
+# CHECK-NEXT:     Offset:
+# CHECK-NEXT:     Size: 39
+# CHECK-NEXT:     Link: 0
+# CHECK-NEXT:     Info: 0
+# CHECK-NEXT:     AddressAlignment: 1
+# CHECK-NEXT:     EntrySize: 0
+# CHECK-NEXT:   }
+# CHECK-NEXT: ]
+# CHECK-NEXT: Symbols [
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name:
+# CHECK-NEXT:     Value: 0x0
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Local (0x0)
+# CHECK-NEXT:     Type: None (0x0)
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: Undefined
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: foo
+# CHECK-NEXT:     Value: 0x10
+# CHECK-NEXT:     Size: 8
+# CHECK-NEXT:     Binding: Global (0x1)
+# CHECK-NEXT:     Type: Function (0x2)
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: .text
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: bar
+# CHECK-NEXT:     Value: 0x0
+# CHECK-NEXT:     Size: 16
+# CHECK-NEXT:     Binding: Global (0x1)
+# CHECK-NEXT:     Type: Object (0x1)
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: .data
+# CHECK-NEXT:   }
+# CHECK-NEXT: ]
diff --git a/test/tools/llvm-objcopy/ELF/discard-all.test b/test/tools/llvm-objcopy/ELF/discard-all.test
index db06869..cc676b5 100644
--- a/test/tools/llvm-objcopy/ELF/discard-all.test
+++ b/test/tools/llvm-objcopy/ELF/discard-all.test
@@ -3,7 +3,7 @@
 # RUN: llvm-objcopy --discard-all %t %t2
 # Verify that llvm-objcopy has not modified the input.
 # RUN: cmp %t %t1
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s
 
 # RUN: llvm-objcopy -x %t %t3
 # Verify that llvm-objcopy has not modified the input.
diff --git a/test/tools/llvm-objcopy/ELF/drawf-fission.test b/test/tools/llvm-objcopy/ELF/drawf-fission.test
index 112bffb..76f74ce 100644
--- a/test/tools/llvm-objcopy/ELF/drawf-fission.test
+++ b/test/tools/llvm-objcopy/ELF/drawf-fission.test
@@ -1,8 +1,8 @@
-# RUN: llvm-objcopy -extract-dwo %p/Inputs/dwarf.dwo %t
-# RUN: llvm-objcopy -strip-dwo %p/Inputs/dwarf.dwo %t2
-# RUN: llvm-objcopy -split-dwo=%t3 %p/Inputs/dwarf.dwo %t4
-# RUN: llvm-readobj -file-headers -sections %t | FileCheck %s -check-prefix=DWARF
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s -check-prefix=STRIP
+# RUN: llvm-objcopy --extract-dwo %p/Inputs/dwarf.dwo %t
+# RUN: llvm-objcopy --strip-dwo %p/Inputs/dwarf.dwo %t2
+# RUN: llvm-objcopy --split-dwo=%t3 %p/Inputs/dwarf.dwo %t4
+# RUN: llvm-readobj --file-headers --sections %t | FileCheck %s --check-prefix=DWARF
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s --check-prefix=STRIP
 # RUN: diff %t %t3
 # RUN: diff %t2 %t4
 
diff --git a/test/tools/llvm-objcopy/ELF/dump-section.test b/test/tools/llvm-objcopy/ELF/dump-section.test
index 8e66f65..7a25f9c 100644
--- a/test/tools/llvm-objcopy/ELF/dump-section.test
+++ b/test/tools/llvm-objcopy/ELF/dump-section.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy -O binary -j .text %t %t2
-# RUN: llvm-objcopy -O binary -only-section .text %t %t3
+# RUN: llvm-objcopy -O binary --only-section .text %t %t3
 # RUN: llvm-objcopy --dump-section .text=%t4 %t %t5
 # RUN: llvm-objcopy --dump-section .foo=%t6 %t %t7
 # RUN: not llvm-objcopy --dump-section .bar=%t8 %t %t9 2>&1 | FileCheck %s --check-prefix=NOBITS
diff --git a/test/tools/llvm-objcopy/ELF/dynamic-relocations.test b/test/tools/llvm-objcopy/ELF/dynamic-relocations.test
index e544e2d..1c0fabc 100644
--- a/test/tools/llvm-objcopy/ELF/dynamic-relocations.test
+++ b/test/tools/llvm-objcopy/ELF/dynamic-relocations.test
@@ -1,5 +1,5 @@
 # RUN: llvm-objcopy %p/Inputs/dynrel.elf %t
-# RUN: llvm-readobj -sections -section-data %t | FileCheck %s
+# RUN: llvm-readobj --sections --section-data %t | FileCheck %s
 
 #CHECK:         Name: .rela.plt
 #CHECK-NEXT:    Type: SHT_RELA
diff --git a/test/tools/llvm-objcopy/ELF/dynamic.test b/test/tools/llvm-objcopy/ELF/dynamic.test
index 3e9ea20..5eb8809 100644
--- a/test/tools/llvm-objcopy/ELF/dynamic.test
+++ b/test/tools/llvm-objcopy/ELF/dynamic.test
@@ -1,6 +1,6 @@
 # RUN: llvm-objcopy %p/Inputs/dynamic.so %t
-# RUN: llvm-readobj -dynamic-table %t | FileCheck %s
-# RUN: llvm-readobj -sections %t | FileCheck -check-prefix=LINK %s
+# RUN: llvm-readobj --dynamic-table %t | FileCheck %s
+# RUN: llvm-readobj --sections %t | FileCheck --check-prefix=LINK %s
 
 #CHECK: DynamicSection [
 #CHECK-NEXT:  Tag                Type                 Name/Value
diff --git a/test/tools/llvm-objcopy/ELF/dynstr.test b/test/tools/llvm-objcopy/ELF/dynstr.test
index 6e19306..68ec586 100644
--- a/test/tools/llvm-objcopy/ELF/dynstr.test
+++ b/test/tools/llvm-objcopy/ELF/dynstr.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -sections -section-data %t2 | FileCheck %s
+# RUN: llvm-readobj --sections --section-data %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/dynsym.test b/test/tools/llvm-objcopy/ELF/dynsym.test
index b7d0953..6a7eef3 100644
--- a/test/tools/llvm-objcopy/ELF/dynsym.test
+++ b/test/tools/llvm-objcopy/ELF/dynsym.test
@@ -1,6 +1,6 @@
 # RUN: llvm-objcopy %p/Inputs/dynsym.so %t
-# RUN: llvm-readobj -dyn-symbols %t | FileCheck %s
-# RUN: llvm-readobj -sections %t | FileCheck -check-prefix=LINK %s
+# RUN: llvm-readobj --dyn-symbols %t | FileCheck %s
+# RUN: llvm-readobj --sections %t | FileCheck --check-prefix=LINK %s
 
 #LINK:         Name: .dynsym
 #LINK-NEXT:    Type: SHT_DYNSYM
@@ -17,7 +17,7 @@
 
 #CHECK: DynamicSymbols [
 #CHECK-NEXT:  Symbol {
-#CHECK-NEXT:    Name: @ (0)
+#CHECK-NEXT:    Name: (0)
 #CHECK-NEXT:    Value: 0x0
 #CHECK-NEXT:    Size: 0
 #CHECK-NEXT:    Binding: Local
@@ -26,7 +26,7 @@
 #CHECK-NEXT:    Section: Undefined (0x0)
 #CHECK-NEXT:  }
 #CHECK-NEXT:  Symbol {
-#CHECK-NEXT:    Name: bang@ (1)
+#CHECK-NEXT:    Name: bang (1)
 #CHECK-NEXT:    Value: 0x4000
 #CHECK-NEXT:    Size: 8
 #CHECK-NEXT:    Binding: Global (0x1)
@@ -35,7 +35,7 @@
 #CHECK-NEXT:    Section: .bss (0x7)
 #CHECK-NEXT:  }
 #CHECK-NEXT:  Symbol {
-#CHECK-NEXT:    Name: bar@ (6)
+#CHECK-NEXT:    Name: bar (6)
 #CHECK-NEXT:    Value: 0x1001
 #CHECK-NEXT:    Size: 0
 #CHECK-NEXT:    Binding: Global (0x1)
@@ -44,7 +44,7 @@
 #CHECK-NEXT:    Section: .text (0x4)
 #CHECK-NEXT:  }
 #CHECK-NEXT:  Symbol {
-#CHECK-NEXT:    Name: baz@ (10)
+#CHECK-NEXT:    Name: baz (10)
 #CHECK-NEXT:    Value: 0x2000
 #CHECK-NEXT:    Size: 0
 #CHECK-NEXT:    Binding: Global (0x1)
@@ -53,7 +53,7 @@
 #CHECK-NEXT:    Section: .data (0x5)
 #CHECK-NEXT:  }
 #CHECK-NEXT:  Symbol {
-#CHECK-NEXT:    Name: foo@ (14)
+#CHECK-NEXT:    Name: foo (14)
 #CHECK-NEXT:    Value: 0x1000
 #CHECK-NEXT:    Size: 0
 #CHECK-NEXT:    Binding: Global (0x1)
diff --git a/test/tools/llvm-objcopy/ELF/elf32be.test b/test/tools/llvm-objcopy/ELF/elf32be.test
index 786c862..374b799 100644
--- a/test/tools/llvm-objcopy/ELF/elf32be.test
+++ b/test/tools/llvm-objcopy/ELF/elf32be.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/elf32le.test b/test/tools/llvm-objcopy/ELF/elf32le.test
index ed7d5ff..b2cb40c 100644
--- a/test/tools/llvm-objcopy/ELF/elf32le.test
+++ b/test/tools/llvm-objcopy/ELF/elf32le.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/elf64be.test b/test/tools/llvm-objcopy/ELF/elf64be.test
index 49e707e..97977b0 100644
--- a/test/tools/llvm-objcopy/ELF/elf64be.test
+++ b/test/tools/llvm-objcopy/ELF/elf64be.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/empty-section.test b/test/tools/llvm-objcopy/ELF/empty-section.test
index 8847327..5c55383 100644
--- a/test/tools/llvm-objcopy/ELF/empty-section.test
+++ b/test/tools/llvm-objcopy/ELF/empty-section.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --sections %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/explicit-keep-remove.test b/test/tools/llvm-objcopy/ELF/explicit-keep-remove.test
index fea708e..98ad3ae 100644
--- a/test/tools/llvm-objcopy/ELF/explicit-keep-remove.test
+++ b/test/tools/llvm-objcopy/ELF/explicit-keep-remove.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -R=.test -keep-section=.test %t %t2
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-objcopy -R=.test --keep-section=.test %t %t2
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/explicit-only-section-remove.test b/test/tools/llvm-objcopy/ELF/explicit-only-section-remove.test
index 15a3159..8a152e5 100644
--- a/test/tools/llvm-objcopy/ELF/explicit-only-section-remove.test
+++ b/test/tools/llvm-objcopy/ELF/explicit-only-section-remove.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -R=.test -only-section=.test %t %t2
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-objcopy -R=.test --only-section=.test %t %t2
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/globalize.test b/test/tools/llvm-objcopy/ELF/globalize.test
index 4941cf1..c05a0a6 100644
--- a/test/tools/llvm-objcopy/ELF/globalize.test
+++ b/test/tools/llvm-objcopy/ELF/globalize.test
@@ -3,7 +3,7 @@
 # RUN:   --globalize-symbol Local \
 # RUN:   --globalize-symbol Weak \
 # RUN:   --globalize-symbol WeakUndef %t %t2
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/group-big-endian.test b/test/tools/llvm-objcopy/ELF/group-big-endian.test
index d5a0260..dd5ca88 100644
--- a/test/tools/llvm-objcopy/ELF/group-big-endian.test
+++ b/test/tools/llvm-objcopy/ELF/group-big-endian.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -remove-section=.text.bar %t %t2
-# RUN: llvm-readobj -elf-section-groups %t2 | FileCheck %s
+# RUN: llvm-objcopy --remove-section=.text.bar %t %t2
+# RUN: llvm-readobj --elf-section-groups %t2 | FileCheck %s
 
 # In this test the section .text.bar is getting removed, as a result, 
 # the indices of the sections which go after .text.bar will change, 
diff --git a/test/tools/llvm-objcopy/ELF/group-unchanged.test b/test/tools/llvm-objcopy/ELF/group-unchanged.test
index 9e86172..31fba11 100644
--- a/test/tools/llvm-objcopy/ELF/group-unchanged.test
+++ b/test/tools/llvm-objcopy/ELF/group-unchanged.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -remove-section=.text.bar %t %t2
-# RUN: llvm-readobj -elf-section-groups %t2 | FileCheck %s
+# RUN: llvm-objcopy --remove-section=.text.bar %t %t2
+# RUN: llvm-readobj --elf-section-groups %t2 | FileCheck %s
 
 # In this test the section .text.bar is getting removed, since this section
 # goes after all the sections comprising a group, the content of the
diff --git a/test/tools/llvm-objcopy/ELF/group.test b/test/tools/llvm-objcopy/ELF/group.test
index 4023f12..2e058f5 100644
--- a/test/tools/llvm-objcopy/ELF/group.test
+++ b/test/tools/llvm-objcopy/ELF/group.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -remove-section=.text.bar %t %t2
-# RUN: llvm-readobj -elf-section-groups %t2 | FileCheck %s
+# RUN: llvm-objcopy --remove-section=.text.bar %t %t2
+# RUN: llvm-readobj --elf-section-groups %t2 | FileCheck %s
 
 # In this test the section .text.bar is getting removed, as a result, 
 # the indices of the sections which go after .text.bar will change, 
diff --git a/test/tools/llvm-objcopy/ELF/help-message.test b/test/tools/llvm-objcopy/ELF/help-message.test
index 504ce21..2dc92f6 100644
--- a/test/tools/llvm-objcopy/ELF/help-message.test
+++ b/test/tools/llvm-objcopy/ELF/help-message.test
@@ -1,15 +1,19 @@
+# RUN: llvm-objcopy -help | FileCheck --check-prefix=OBJCOPY-USAGE %s
 # RUN: llvm-objcopy --help | FileCheck --check-prefix=OBJCOPY-USAGE %s
 # RUN: not llvm-objcopy 2>&1 | FileCheck --check-prefix=OBJCOPY-USAGE %s
 # RUN: not llvm-objcopy -abcabc 2>&1 | FileCheck --check-prefix=UNKNOWN-ARG %s
-# RUN: not llvm-objcopy -strip-debug 2>&1 | FileCheck %s --check-prefix=NO-INPUT-FILES
+# RUN: not llvm-objcopy --abcabc 2>&1 | FileCheck --check-prefix=UNKNOWN-ARG %s
+# RUN: not llvm-objcopy --strip-debug 2>&1 | FileCheck %s --check-prefix=NO-INPUT-FILES
 
+# RUN: llvm-strip -help | FileCheck --check-prefix=STRIP-USAGE %s
 # RUN: llvm-strip --help | FileCheck --check-prefix=STRIP-USAGE %s
 # RUN: not llvm-strip 2>&1 | FileCheck --check-prefix=STRIP-USAGE %s
 # RUN: not llvm-strip -abcabc 2>&1 | FileCheck --check-prefix=UNKNOWN-ARG %s
-# RUN: not llvm-strip -strip-debug 2>&1 | FileCheck %s --check-prefix=NO-INPUT-FILES
+# RUN: not llvm-strip --abcabc 2>&1 | FileCheck --check-prefix=UNKNOWN-ARG %s
+# RUN: not llvm-strip --strip-debug 2>&1 | FileCheck %s --check-prefix=NO-INPUT-FILES
 
 
 # OBJCOPY-USAGE:  USAGE: llvm-objcopy
 # STRIP-USAGE:    USAGE: llvm-strip
-# UNKNOWN-ARG:    unknown argument '-abcabc'
+# UNKNOWN-ARG:    unknown argument '{{-+}}abcabc'
 # NO-INPUT-FILES: No input file specified
diff --git a/test/tools/llvm-objcopy/ELF/keep-file-symbols.test b/test/tools/llvm-objcopy/ELF/keep-file-symbols.test
index 8554688..3d7299d 100644
--- a/test/tools/llvm-objcopy/ELF/keep-file-symbols.test
+++ b/test/tools/llvm-objcopy/ELF/keep-file-symbols.test
@@ -1,8 +1,8 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy --strip-all --keep-file-symbols %t %t2
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s --check-prefix=STRIPALL
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s --check-prefix=STRIPALL
 # RUN: llvm-objcopy --keep-file-symbols --strip-symbol foo %t %t2
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s --check-prefix=STRIP
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s --check-prefix=STRIP
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/keep-global-symbols-mix-globalize.test b/test/tools/llvm-objcopy/ELF/keep-global-symbols-mix-globalize.test
index 2de5807..bc5cba6 100644
--- a/test/tools/llvm-objcopy/ELF/keep-global-symbols-mix-globalize.test
+++ b/test/tools/llvm-objcopy/ELF/keep-global-symbols-mix-globalize.test
@@ -11,7 +11,7 @@
 # RUN:   --globalize-symbol Global1 \
 # RUN:   --keep-global-symbol Global2 \
 # RUN:   %t.o %t.2.o
-# RUN: llvm-readobj -elf-output-style=GNU -symbols %t.2.o | FileCheck %s
+# RUN: llvm-readobj --elf-output-style=GNU --symbols %t.2.o | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/keep-global-symbols.test b/test/tools/llvm-objcopy/ELF/keep-global-symbols.test
index 8ce1d7f..6714740 100644
--- a/test/tools/llvm-objcopy/ELF/keep-global-symbols.test
+++ b/test/tools/llvm-objcopy/ELF/keep-global-symbols.test
@@ -39,7 +39,7 @@
 # RUN:   -G Weak1 \
 # RUN:   --globalize-symbol Weak2 \
 # RUN:   %t.o %t.2.o
-# RUN: llvm-readobj -elf-output-style=GNU -symbols %t.2.o | FileCheck %s
+# RUN: llvm-readobj --elf-output-style=GNU --symbols %t.2.o | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/keep-many.test b/test/tools/llvm-objcopy/ELF/keep-many.test
index 2abb19d..2f1019a 100644
--- a/test/tools/llvm-objcopy/ELF/keep-many.test
+++ b/test/tools/llvm-objcopy/ELF/keep-many.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -strip-non-alloc -keep-section=.test -keep-section=.test3 %t %t2
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-objcopy --strip-non-alloc --keep-section=.test --keep-section=.test3 %t %t2
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/keep-only-section.test b/test/tools/llvm-objcopy/ELF/keep-only-section.test
index 38aa52c..4bf3384 100644
--- a/test/tools/llvm-objcopy/ELF/keep-only-section.test
+++ b/test/tools/llvm-objcopy/ELF/keep-only-section.test
@@ -1,7 +1,7 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -keep-section=.test2 -only-section=.test %t %t2
-# RUN: llvm-objcopy -j .test -keep-section=.test2 %t %t3
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-objcopy --keep-section=.test2 --only-section=.test %t %t2
+# RUN: llvm-objcopy -j .test --keep-section=.test2 %t %t3
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 # RUN: diff %t2 %t3
 
 !ELF
diff --git a/test/tools/llvm-objcopy/ELF/keep-symbol-remove-section.test b/test/tools/llvm-objcopy/ELF/keep-symbol-remove-section.test
index 6b02caa..2dbfb2c 100644
--- a/test/tools/llvm-objcopy/ELF/keep-symbol-remove-section.test
+++ b/test/tools/llvm-objcopy/ELF/keep-symbol-remove-section.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy --remove-section .text --keep-symbol foo %t %t2
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/keep-symbol.test b/test/tools/llvm-objcopy/ELF/keep-symbol.test
index 8c6415f..40a7dde 100644
--- a/test/tools/llvm-objcopy/ELF/keep-symbol.test
+++ b/test/tools/llvm-objcopy/ELF/keep-symbol.test
@@ -1,8 +1,8 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy --discard-all -K foo --keep-symbol bar %t %t2
 # RUN: llvm-objcopy -K foo -N foo -N bar --keep-symbol bar -N baz %t %t3
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s
-# RUN: llvm-readobj -symbols %t3 | FileCheck %s
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --symbols %t3 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/localize-hidden.test b/test/tools/llvm-objcopy/ELF/localize-hidden.test
index 05d747b..4166200 100644
--- a/test/tools/llvm-objcopy/ELF/localize-hidden.test
+++ b/test/tools/llvm-objcopy/ELF/localize-hidden.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -localize-hidden %t %t2
-# RUN: llvm-readobj -relocations -symbols %t2 | FileCheck %s
+# RUN: llvm-objcopy --localize-hidden %t %t2
+# RUN: llvm-readobj --relocations --symbols %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/localize.test b/test/tools/llvm-objcopy/ELF/localize.test
index 2e2d6cc..a04c51d 100644
--- a/test/tools/llvm-objcopy/ELF/localize.test
+++ b/test/tools/llvm-objcopy/ELF/localize.test
@@ -5,7 +5,7 @@
 # RUN:     -L Weak \
 # RUN:     -L GlobalCommon \
 # RUN:     %t %t2
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/many-sections.test b/test/tools/llvm-objcopy/ELF/many-sections.test
index e9e12cf..57239f3 100644
--- a/test/tools/llvm-objcopy/ELF/many-sections.test
+++ b/test/tools/llvm-objcopy/ELF/many-sections.test
@@ -1,8 +1,8 @@
 RUN: %python %p/Inputs/ungzip.py %p/Inputs/many-sections.o.gz > %t
 RUN: llvm-objcopy %t %t2
-RUN: llvm-readobj -file-headers %t2 | FileCheck --check-prefix=EHDR %s
-RUN: llvm-readobj -sections %t2 | FileCheck --check-prefix=SECS %s
-RUN: llvm-readobj -symbols %t2 | grep "Symbol {" | wc -l | FileCheck --check-prefix=SYMS %s
+RUN: llvm-readobj --file-headers %t2 | FileCheck --check-prefix=EHDR %s
+RUN: llvm-readobj --sections %t2 | FileCheck --check-prefix=SECS %s
+RUN: llvm-readobj --symbols %t2 | grep "Symbol {" | wc -l | FileCheck --check-prefix=SYMS %s
 
 EHDR:      Format: ELF64-x86-64
 EHDR-NEXT: Arch: x86_64
diff --git a/test/tools/llvm-objcopy/ELF/marker-segment.test b/test/tools/llvm-objcopy/ELF/marker-segment.test
index 01b55eb..226d21c 100644
--- a/test/tools/llvm-objcopy/ELF/marker-segment.test
+++ b/test/tools/llvm-objcopy/ELF/marker-segment.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s -o %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -program-headers %t2 | FileCheck %s
+# RUN: llvm-readobj --program-headers %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/no-symbol-relocation.test b/test/tools/llvm-objcopy/ELF/no-symbol-relocation.test
index 5e60ec8..9def536 100644
--- a/test/tools/llvm-objcopy/ELF/no-symbol-relocation.test
+++ b/test/tools/llvm-objcopy/ELF/no-symbol-relocation.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -relocations %t2 | FileCheck %s
+# RUN: llvm-readobj --relocations %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/null-symbol.test b/test/tools/llvm-objcopy/ELF/null-symbol.test
index 61ed410..94f5ab5 100644
--- a/test/tools/llvm-objcopy/ELF/null-symbol.test
+++ b/test/tools/llvm-objcopy/ELF/null-symbol.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/only-section-many.test b/test/tools/llvm-objcopy/ELF/only-section-many.test
index 2f95623..43ccff1 100644
--- a/test/tools/llvm-objcopy/ELF/only-section-many.test
+++ b/test/tools/llvm-objcopy/ELF/only-section-many.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy -j .test1 -j .test2 %t %t2
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/only-section-remove-strtab.test b/test/tools/llvm-objcopy/ELF/only-section-remove-strtab.test
index e336181..c88fef2 100644
--- a/test/tools/llvm-objcopy/ELF/only-section-remove-strtab.test
+++ b/test/tools/llvm-objcopy/ELF/only-section-remove-strtab.test
@@ -1,7 +1,7 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -R .symtab -R .strtab -only-section=.test %t %t2
+# RUN: llvm-objcopy -R .symtab -R .strtab --only-section=.test %t %t2
 # RUN: llvm-objcopy -j .test -R .strtab -R .symtab %t %t3
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 # RUN: diff %t2 %t3
 
 !ELF
diff --git a/test/tools/llvm-objcopy/ELF/only-section-strip-non-alloc.test b/test/tools/llvm-objcopy/ELF/only-section-strip-non-alloc.test
index f61b4ae..ae39b51 100644
--- a/test/tools/llvm-objcopy/ELF/only-section-strip-non-alloc.test
+++ b/test/tools/llvm-objcopy/ELF/only-section-strip-non-alloc.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -strip-non-alloc -only-section=.test %t %t2
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-objcopy --strip-non-alloc --only-section=.test %t %t2
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/overlap-chain.test b/test/tools/llvm-objcopy/ELF/overlap-chain.test
index a084abb..6b54e73 100644
--- a/test/tools/llvm-objcopy/ELF/overlap-chain.test
+++ b/test/tools/llvm-objcopy/ELF/overlap-chain.test
@@ -5,7 +5,7 @@
 
 # RUN: yaml2obj %s -o %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -program-headers %t2 | FileCheck %s
+# RUN: llvm-readobj --program-headers %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/prefix-symbols.test b/test/tools/llvm-objcopy/ELF/prefix-symbols.test
index 8761ba9..1e6a54a 100644
--- a/test/tools/llvm-objcopy/ELF/prefix-symbols.test
+++ b/test/tools/llvm-objcopy/ELF/prefix-symbols.test
@@ -1,8 +1,8 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy --prefix-symbols prefix %t %t2
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s --check-prefix=COMMON --check-prefix=BASIC
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s --check-prefix=COMMON --check-prefix=BASIC
 # RUN: llvm-objcopy --redefine-sym bar=baz --prefix-symbols prefix %t %t3
-# RUN: llvm-readobj -symbols %t3 | FileCheck %s --check-prefix=COMMON --check-prefix=REDEF
+# RUN: llvm-readobj --symbols %t3 | FileCheck %s --check-prefix=COMMON --check-prefix=REDEF
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/program-headers.test b/test/tools/llvm-objcopy/ELF/program-headers.test
index dc25510..9c123d2 100644
--- a/test/tools/llvm-objcopy/ELF/program-headers.test
+++ b/test/tools/llvm-objcopy/ELF/program-headers.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s -o %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -program-headers %t2 | FileCheck %s
+# RUN: llvm-readobj --program-headers %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/pt-phdr.test b/test/tools/llvm-objcopy/ELF/pt-phdr.test
index 336ce76..dba40ef 100644
--- a/test/tools/llvm-objcopy/ELF/pt-phdr.test
+++ b/test/tools/llvm-objcopy/ELF/pt-phdr.test
@@ -2,7 +2,7 @@
 # segments.
 
 # RUN: llvm-objcopy %p/Inputs/pt-phdr.elf %t
-# RUN: llvm-readobj -program-headers %t | FileCheck %s
+# RUN: llvm-readobj --program-headers %t | FileCheck %s
 
 #CHECK: ProgramHeaders [
 #CHECK-NEXT:  ProgramHeader {
diff --git a/test/tools/llvm-objcopy/ELF/redefine-symbol.test b/test/tools/llvm-objcopy/ELF/redefine-symbol.test
index a0600fa..c56621a 100644
--- a/test/tools/llvm-objcopy/ELF/redefine-symbol.test
+++ b/test/tools/llvm-objcopy/ELF/redefine-symbol.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy --redefine-sym foo=oof --redefine-sym empty= %t %t2
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s
 # RUN: not llvm-objcopy --redefine-sym barbar %t %t2 2>&1 | FileCheck %s --check-prefix=BAD-FORMAT
 # RUN: not llvm-objcopy --redefine-sym foo=f1 --redefine-sym foo=f2 %t %t2 2>&1 | FileCheck %s --check-prefix=MULTIPLE-REDEFINITION
 
diff --git a/test/tools/llvm-objcopy/ELF/reloc-no-symtab.test b/test/tools/llvm-objcopy/ELF/reloc-no-symtab.test
index ea59361..96bf4d3 100644
--- a/test/tools/llvm-objcopy/ELF/reloc-no-symtab.test
+++ b/test/tools/llvm-objcopy/ELF/reloc-no-symtab.test
@@ -7,8 +7,8 @@
 
 # RUN: yaml2obj %s > %t.original
 # RUN: llvm-strip %t.original -o %t.stripped
-# RUN: llvm-readobj -sections %t.original | FileCheck %s
-# RUN: llvm-readobj -sections %t.stripped | FileCheck %s
+# RUN: llvm-readobj --sections %t.original | FileCheck %s
+# RUN: llvm-readobj --sections %t.stripped | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/relocatable-phdr.test b/test/tools/llvm-objcopy/ELF/relocatable-phdr.test
index b621895..4021646 100644
--- a/test/tools/llvm-objcopy/ELF/relocatable-phdr.test
+++ b/test/tools/llvm-objcopy/ELF/relocatable-phdr.test
@@ -2,7 +2,7 @@
 # for offset and header size when copied with llvm-objcopy.
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -file-headers %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/remove-multiple-sections.test b/test/tools/llvm-objcopy/ELF/remove-multiple-sections.test
index 5e5de97..1f1bc4c 100644
--- a/test/tools/llvm-objcopy/ELF/remove-multiple-sections.test
+++ b/test/tools/llvm-objcopy/ELF/remove-multiple-sections.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy -R .test2 -R .test3 -R .test5 %t %t2
-# RUN: llvm-readobj -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --sections %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/remove-section-with-symbol.test b/test/tools/llvm-objcopy/ELF/remove-section-with-symbol.test
index 067c0b2..949d34c 100644
--- a/test/tools/llvm-objcopy/ELF/remove-section-with-symbol.test
+++ b/test/tools/llvm-objcopy/ELF/remove-section-with-symbol.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy -R .test %t %t2
-# RUN: llvm-readobj -file-headers -symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --symbols %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/remove-section.test b/test/tools/llvm-objcopy/ELF/remove-section.test
index f19e7b7..7bde475 100644
--- a/test/tools/llvm-objcopy/ELF/remove-section.test
+++ b/test/tools/llvm-objcopy/ELF/remove-section.test
@@ -2,13 +2,13 @@
 # RUN: cp %t %t1
 
 # RUN: llvm-objcopy -R .test2 %t %t2
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 
 # Verify that the first run of llvm-objcopy
 # has not modified the input binary.
 # RUN: cmp %t %t1
 
-# RUN: llvm-objcopy -remove-section=.test2 %t1 %t3
+# RUN: llvm-objcopy --remove-section=.test2 %t1 %t3
 # RUN: cmp %t2 %t3
 
 !ELF
diff --git a/test/tools/llvm-objcopy/ELF/remove-shndx.test b/test/tools/llvm-objcopy/ELF/remove-shndx.test
index b8ea94c..6cc3a1a 100644
--- a/test/tools/llvm-objcopy/ELF/remove-shndx.test
+++ b/test/tools/llvm-objcopy/ELF/remove-shndx.test
@@ -2,6 +2,6 @@
 # that needs it, even if the original was removed.
 RUN: %python %p/Inputs/ungzip.py %p/Inputs/many-sections.o.gz > %t
 RUN: llvm-objcopy -R .symtab_shndx %t %t2
-RUN: llvm-readobj -sections %t2 | FileCheck %s
+RUN: llvm-readobj --sections %t2 | FileCheck %s
 
 CHECK: Name: .symtab_shndx (
diff --git a/test/tools/llvm-objcopy/ELF/remove-symtab.test b/test/tools/llvm-objcopy/ELF/remove-symtab.test
index 0e3f82b..5c53962 100644
--- a/test/tools/llvm-objcopy/ELF/remove-symtab.test
+++ b/test/tools/llvm-objcopy/ELF/remove-symtab.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy -R .symtab %t %t2
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/rename-section-flag-osproc-mask.test b/test/tools/llvm-objcopy/ELF/rename-section-flag-osproc-mask.test
index 58a5ffa..a769290 100644
--- a/test/tools/llvm-objcopy/ELF/rename-section-flag-osproc-mask.test
+++ b/test/tools/llvm-objcopy/ELF/rename-section-flag-osproc-mask.test
@@ -2,9 +2,9 @@
 
 # ===== x86_64 =====
 
-# RUN: yaml2obj -docnum 1 %s > %t-x86_64.o
+# RUN: yaml2obj --docnum 1 %s > %t-x86_64.o
 # RUN: llvm-objcopy --rename-section=.foo=.bar,alloc %t-x86_64.o
-# RUN: llvm-readobj -sections %t-x86_64.o | FileCheck %s --check-prefix=X86_64
+# RUN: llvm-readobj --sections %t-x86_64.o | FileCheck %s --check-prefix=X86_64
 
 --- !ELF
 FileHeader:
@@ -27,9 +27,9 @@
 
 # ===== hex =====
 
-# RUN: yaml2obj -docnum 2 %s > %t-hex.o
+# RUN: yaml2obj --docnum 2 %s > %t-hex.o
 # RUN: llvm-objcopy --rename-section=.foo=.bar,alloc %t-hex.o
-# RUN: llvm-readobj -sections %t-hex.o | FileCheck %s --check-prefix=HEX
+# RUN: llvm-readobj --sections %t-hex.o | FileCheck %s --check-prefix=HEX
 
 --- !ELF
 FileHeader:
@@ -52,9 +52,9 @@
 
 # ===== mips =====
 
-# RUN: yaml2obj -docnum 3 %s > %t-mips.o
+# RUN: yaml2obj --docnum 3 %s > %t-mips.o
 # RUN: llvm-objcopy --rename-section=.foo=.bar,alloc %t-mips.o
-# RUN: llvm-readobj -sections %t-mips.o | FileCheck %s --check-prefix=MIPS
+# RUN: llvm-readobj --sections %t-mips.o | FileCheck %s --check-prefix=MIPS
 
 --- !ELF
 FileHeader:
@@ -91,9 +91,9 @@
 
 # ===== arm =====
 
-# RUN: yaml2obj -docnum 4 %s > %t-arm.o
+# RUN: yaml2obj --docnum 4 %s > %t-arm.o
 # RUN: llvm-objcopy --rename-section=.foo=.bar,alloc %t-arm.o
-# RUN: llvm-readobj -sections %t-arm.o | FileCheck %s --check-prefix=ARM
+# RUN: llvm-readobj --sections %t-arm.o | FileCheck %s --check-prefix=ARM
 
 --- !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/rename-section-flag-preserved.test b/test/tools/llvm-objcopy/ELF/rename-section-flag-preserved.test
index d944e40..6c22eb8 100644
--- a/test/tools/llvm-objcopy/ELF/rename-section-flag-preserved.test
+++ b/test/tools/llvm-objcopy/ELF/rename-section-flag-preserved.test
@@ -2,29 +2,29 @@
 
 # Single flags on a section with all flags:
 # RUN: llvm-objcopy --rename-section=.foo=.bar,alloc %t %t.alloc
-# RUN: llvm-readobj -sections %t.alloc | FileCheck %s --check-prefixes=CHECK,ALLOC,WRITE
+# RUN: llvm-readobj --sections %t.alloc | FileCheck %s --check-prefixes=CHECK,ALLOC,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,load %t %t.load
-# RUN: llvm-readobj -sections %t.load | FileCheck %s --check-prefixes=CHECK,WRITE
+# RUN: llvm-readobj --sections %t.load | FileCheck %s --check-prefixes=CHECK,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,noload %t %t.noload
-# RUN: llvm-readobj -sections %t.noload | FileCheck %s --check-prefixes=CHECK,WRITE
+# RUN: llvm-readobj --sections %t.noload | FileCheck %s --check-prefixes=CHECK,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,readonly %t %t.readonly
-# RUN: llvm-readobj -sections %t.readonly | FileCheck %s --check-prefixes=CHECK
+# RUN: llvm-readobj --sections %t.readonly | FileCheck %s --check-prefixes=CHECK
 # RUN: llvm-objcopy --rename-section=.foo=.bar,debug %t %t.debug
-# RUN: llvm-readobj -sections %t.debug | FileCheck %s --check-prefixes=CHECK,WRITE
+# RUN: llvm-readobj --sections %t.debug | FileCheck %s --check-prefixes=CHECK,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,code %t %t.code
-# RUN: llvm-readobj -sections %t.code | FileCheck %s --check-prefixes=CHECK,EXEC,WRITE
+# RUN: llvm-readobj --sections %t.code | FileCheck %s --check-prefixes=CHECK,EXEC,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,data %t %t.data
-# RUN: llvm-readobj -sections %t.data | FileCheck %s --check-prefixes=CHECK,WRITE
+# RUN: llvm-readobj --sections %t.data | FileCheck %s --check-prefixes=CHECK,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,rom %t %t.rom
-# RUN: llvm-readobj -sections %t.rom | FileCheck %s --check-prefixes=CHECK,WRITE
+# RUN: llvm-readobj --sections %t.rom | FileCheck %s --check-prefixes=CHECK,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,contents %t %t.contents
-# RUN: llvm-readobj -sections %t.contents | FileCheck %s --check-prefixes=CHECK,WRITE
+# RUN: llvm-readobj --sections %t.contents | FileCheck %s --check-prefixes=CHECK,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,merge %t %t.merge
-# RUN: llvm-readobj -sections %t.merge | FileCheck %s --check-prefixes=CHECK,MERGE,WRITE
+# RUN: llvm-readobj --sections %t.merge | FileCheck %s --check-prefixes=CHECK,MERGE,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,strings %t %t.strings
-# RUN: llvm-readobj -sections %t.strings | FileCheck %s --check-prefixes=CHECK,STRINGS,WRITE
+# RUN: llvm-readobj --sections %t.strings | FileCheck %s --check-prefixes=CHECK,STRINGS,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,share %t %t.share
-# RUN: llvm-readobj -sections %t.share | FileCheck %s --check-prefixes=CHECK,WRITE
+# RUN: llvm-readobj --sections %t.share | FileCheck %s --check-prefixes=CHECK,WRITE
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/rename-section-flag.test b/test/tools/llvm-objcopy/ELF/rename-section-flag.test
index f31452d..75f35af 100644
--- a/test/tools/llvm-objcopy/ELF/rename-section-flag.test
+++ b/test/tools/llvm-objcopy/ELF/rename-section-flag.test
@@ -2,35 +2,35 @@
 
 # Single flags on a section with no flags:
 # RUN: llvm-objcopy --rename-section=.foo=.bar,alloc %t %t.alloc
-# RUN: llvm-readobj -sections %t.alloc | FileCheck %s --check-prefixes=CHECK,ALLOC,WRITE
+# RUN: llvm-readobj --sections %t.alloc | FileCheck %s --check-prefixes=CHECK,ALLOC,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,load %t %t.load
-# RUN: llvm-readobj -sections %t.load | FileCheck %s --check-prefixes=CHECK,WRITE
+# RUN: llvm-readobj --sections %t.load | FileCheck %s --check-prefixes=CHECK,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,noload %t %t.noload
-# RUN: llvm-readobj -sections %t.noload | FileCheck %s --check-prefixes=CHECK,WRITE
+# RUN: llvm-readobj --sections %t.noload | FileCheck %s --check-prefixes=CHECK,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,readonly %t %t.readonly
-# RUN: llvm-readobj -sections %t.readonly | FileCheck %s --check-prefixes=CHECK
+# RUN: llvm-readobj --sections %t.readonly | FileCheck %s --check-prefixes=CHECK
 # RUN: llvm-objcopy --rename-section=.foo=.bar,debug %t %t.debug
-# RUN: llvm-readobj -sections %t.debug | FileCheck %s --check-prefixes=CHECK,WRITE
+# RUN: llvm-readobj --sections %t.debug | FileCheck %s --check-prefixes=CHECK,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,code %t %t.code
-# RUN: llvm-readobj -sections %t.code | FileCheck %s --check-prefixes=CHECK,EXEC,WRITE
+# RUN: llvm-readobj --sections %t.code | FileCheck %s --check-prefixes=CHECK,EXEC,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,data %t %t.data
-# RUN: llvm-readobj -sections %t.data | FileCheck %s --check-prefixes=CHECK,WRITE
+# RUN: llvm-readobj --sections %t.data | FileCheck %s --check-prefixes=CHECK,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,rom %t %t.rom
-# RUN: llvm-readobj -sections %t.rom | FileCheck %s --check-prefixes=CHECK,WRITE
+# RUN: llvm-readobj --sections %t.rom | FileCheck %s --check-prefixes=CHECK,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,contents %t %t.contents
-# RUN: llvm-readobj -sections %t.contents | FileCheck %s --check-prefixes=CHECK,WRITE
+# RUN: llvm-readobj --sections %t.contents | FileCheck %s --check-prefixes=CHECK,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,merge %t %t.merge
-# RUN: llvm-readobj -sections %t.merge | FileCheck %s --check-prefixes=CHECK,MERGE,WRITE
+# RUN: llvm-readobj --sections %t.merge | FileCheck %s --check-prefixes=CHECK,MERGE,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,strings %t %t.strings
-# RUN: llvm-readobj -sections %t.strings | FileCheck %s --check-prefixes=CHECK,STRINGS,WRITE
+# RUN: llvm-readobj --sections %t.strings | FileCheck %s --check-prefixes=CHECK,STRINGS,WRITE
 # RUN: llvm-objcopy --rename-section=.foo=.bar,share %t %t.share
-# RUN: llvm-readobj -sections %t.share | FileCheck %s --check-prefixes=CHECK,WRITE
+# RUN: llvm-readobj --sections %t.share | FileCheck %s --check-prefixes=CHECK,WRITE
 
 # Multiple flags:
 # RUN: llvm-objcopy --rename-section=.foo=.bar,alloc,readonly,strings %t %t.alloc_ro_strings
-# RUN: llvm-readobj -sections %t.alloc_ro_strings | FileCheck %s --check-prefixes=CHECK,ALLOC,STRINGS
+# RUN: llvm-readobj --sections %t.alloc_ro_strings | FileCheck %s --check-prefixes=CHECK,ALLOC,STRINGS
 # RUN: llvm-objcopy --rename-section=.foo=.bar,alloc,code %t %t.alloc_code
-# RUN: llvm-readobj -sections %t.alloc_code | FileCheck %s --check-prefixes=CHECK,ALLOC,EXEC,WRITE
+# RUN: llvm-readobj --sections %t.alloc_code | FileCheck %s --check-prefixes=CHECK,ALLOC,EXEC,WRITE
 
 # Invalid flags:
 # RUN: not llvm-objcopy --rename-section=.foo=.bar,xyzzy %t %t.xyzzy 2>&1 | FileCheck %s --check-prefix=BAD-FLAG
diff --git a/test/tools/llvm-objcopy/ELF/rename-section-multiple.test b/test/tools/llvm-objcopy/ELF/rename-section-multiple.test
index 8feff9f..e614f65 100644
--- a/test/tools/llvm-objcopy/ELF/rename-section-multiple.test
+++ b/test/tools/llvm-objcopy/ELF/rename-section-multiple.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy --rename-section=.test1=.test2 --rename-section=.test3=.test4 --rename-section=.test5=.test6 %t %t2
-# RUN: llvm-readobj -file-headers -sections -section-data %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --sections --section-data %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/rename-section.test b/test/tools/llvm-objcopy/ELF/rename-section.test
index 505cf77..feaec44 100644
--- a/test/tools/llvm-objcopy/ELF/rename-section.test
+++ b/test/tools/llvm-objcopy/ELF/rename-section.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy --rename-section=.foo=.bar %t %t2
-# RUN: llvm-readobj -file-headers -sections -section-data %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --sections --section-data %t2 | FileCheck %s
 # RUN: not llvm-objcopy --rename-section=.foo.bar --rename-section=.foo=.other %t %t2 2>&1 | FileCheck %s --check-prefix=BAD-FORMAT
 # RUN: not llvm-objcopy --rename-section=.foo=.bar --rename-section=.foo=.other %t %t2 2>&1 | FileCheck %s --check-prefix=MULTIPLE-RENAMES
 
diff --git a/test/tools/llvm-objcopy/ELF/segment-shift-section-remove.test b/test/tools/llvm-objcopy/ELF/segment-shift-section-remove.test
index caeb559..563a9e3 100644
--- a/test/tools/llvm-objcopy/ELF/segment-shift-section-remove.test
+++ b/test/tools/llvm-objcopy/ELF/segment-shift-section-remove.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s -o %t
 # RUN: llvm-objcopy -R .text2 %t %t2
-# RUN: llvm-readobj -file-headers -sections -program-headers %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --sections --program-headers %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/segment-shift.test b/test/tools/llvm-objcopy/ELF/segment-shift.test
index 635fdcc..b4caa19 100644
--- a/test/tools/llvm-objcopy/ELF/segment-shift.test
+++ b/test/tools/llvm-objcopy/ELF/segment-shift.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s -o %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -program-headers %t2 | FileCheck %s
+# RUN: llvm-readobj --program-headers %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/segment-test-remove-section.test b/test/tools/llvm-objcopy/ELF/segment-test-remove-section.test
index 9b98dc8..49a9003 100644
--- a/test/tools/llvm-objcopy/ELF/segment-test-remove-section.test
+++ b/test/tools/llvm-objcopy/ELF/segment-test-remove-section.test
@@ -6,7 +6,7 @@
 
 # RUN: yaml2obj %s -o %t
 # RUN: llvm-objcopy -R .text2 %t %t2
-# RUN: llvm-readobj -file-headers -program-headers -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --program-headers --sections %t2 | FileCheck %s
 # RUN: od -t x1 -j 8192 %t2 | FileCheck %s --check-prefix=DATA
 
 !ELF
diff --git a/test/tools/llvm-objcopy/ELF/strict-no-add.test b/test/tools/llvm-objcopy/ELF/strict-no-add.test
index 15f5251..4f24df3 100644
--- a/test/tools/llvm-objcopy/ELF/strict-no-add.test
+++ b/test/tools/llvm-objcopy/ELF/strict-no-add.test
@@ -4,7 +4,7 @@
 RUN: %python %p/Inputs/ungzip.py %p/Inputs/many-sections.o.gz > %t.0
 RUN: cat %p/Inputs/alloc-symtab.o > %t
 RUN: llvm-objcopy -R .text -R s0 -R s1 -R s2 -R s3 -R s4 -R s5 -R s6 %t.0 %t2
-RUN: llvm-objcopy -add-section=.s0=%t -add-section=.s1=%t -add-section=.s2=%t %t2 %t2
-RUN: llvm-readobj -sections %t2 | FileCheck --check-prefix=SECS %s
+RUN: llvm-objcopy --add-section=.s0=%t --add-section=.s1=%t --add-section=.s2=%t %t2 %t2
+RUN: llvm-readobj --sections %t2 | FileCheck --check-prefix=SECS %s
 
 SECS-NOT: Name: .symtab_shndx
diff --git a/test/tools/llvm-objcopy/ELF/strip-all-and-keep-symbol.test b/test/tools/llvm-objcopy/ELF/strip-all-and-keep-symbol.test
index 72e17a7..9892a12 100644
--- a/test/tools/llvm-objcopy/ELF/strip-all-and-keep-symbol.test
+++ b/test/tools/llvm-objcopy/ELF/strip-all-and-keep-symbol.test
@@ -1,13 +1,13 @@
 # RUN: yaml2obj %s > %t
 # RUN: cp %t %t1
 # RUN: llvm-objcopy --strip-all --keep-symbol foo %t %t2
-# RUN: llvm-readobj -sections -symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --sections --symbols %t2 | FileCheck %s
 
 # Verify that the first run of llvm-objcopy
 # has not modified the input binary.
 # RUN: cmp %t %t1
 
-# RUN: llvm-strip -keep-symbol=foo %t1
+# RUN: llvm-strip --keep-symbol=foo %t1
 # RUN: cmp %t1 %t2
 
 # RUN: cp %t %t3
diff --git a/test/tools/llvm-objcopy/ELF/strip-all-and-remove.test b/test/tools/llvm-objcopy/ELF/strip-all-and-remove.test
index 9ee738a..5455c6b 100644
--- a/test/tools/llvm-objcopy/ELF/strip-all-and-remove.test
+++ b/test/tools/llvm-objcopy/ELF/strip-all-and-remove.test
@@ -1,10 +1,10 @@
 # RUN: yaml2obj %s > %t
 # RUN: cp %t %t1
 
-# RUN: llvm-strip -remove-section=.text.bar %t
-# RUN: llvm-readobj -file-headers -sections -symbols %t | FileCheck %s
+# RUN: llvm-strip --remove-section=.text.bar %t
+# RUN: llvm-readobj --file-headers --sections --symbols %t | FileCheck %s
 
-# RUN: llvm-objcopy -strip-all -remove-section=.text.bar %t1 %t1
+# RUN: llvm-objcopy --strip-all --remove-section=.text.bar %t1 %t1
 # RUN: cmp %t %t1
 
 !ELF
diff --git a/test/tools/llvm-objcopy/ELF/strip-all-gnu.test b/test/tools/llvm-objcopy/ELF/strip-all-gnu.test
index f6dbcc7..8253526 100644
--- a/test/tools/llvm-objcopy/ELF/strip-all-gnu.test
+++ b/test/tools/llvm-objcopy/ELF/strip-all-gnu.test
@@ -2,7 +2,7 @@
 # RUN: cp %t %t1
 # RUN: llvm-objcopy --strip-all-gnu %t %t2
 # RUN: llvm-strip --strip-all-gnu %t -o %t3
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 # RUN: cmp %t2 %t3
 
 !ELF
diff --git a/test/tools/llvm-objcopy/ELF/strip-all.test b/test/tools/llvm-objcopy/ELF/strip-all.test
index 5c5b6fd..87388e0 100644
--- a/test/tools/llvm-objcopy/ELF/strip-all.test
+++ b/test/tools/llvm-objcopy/ELF/strip-all.test
@@ -1,7 +1,7 @@
 # RUN: yaml2obj %s > %t
 # RUN: cp %t %t3
 # RUN: llvm-objcopy --strip-all %t %t2
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 
 # Verify that the previous llvm-objcopy's run has not modified the input.
 # RUN: cmp %t %t3
diff --git a/test/tools/llvm-objcopy/ELF/strip-debug-and-remove.test b/test/tools/llvm-objcopy/ELF/strip-debug-and-remove.test
index 5d2b0c7..d2d0b34 100644
--- a/test/tools/llvm-objcopy/ELF/strip-debug-and-remove.test
+++ b/test/tools/llvm-objcopy/ELF/strip-debug-and-remove.test
@@ -1,10 +1,10 @@
 # RUN: yaml2obj %s > %t
 # RUN: cp %t %t1
 
-# RUN: llvm-strip -strip-debug -remove-section=.text.bar %t
-# RUN: llvm-readobj -file-headers -sections -symbols %t | FileCheck %s
+# RUN: llvm-strip --strip-debug --remove-section=.text.bar %t
+# RUN: llvm-readobj --file-headers --sections --symbols %t | FileCheck %s
 
-# RUN: llvm-objcopy -strip-debug -remove-section=.text.bar %t1 %t1
+# RUN: llvm-objcopy --strip-debug --remove-section=.text.bar %t1 %t1
 # RUN: cmp %t %t1
 
 !ELF
diff --git a/test/tools/llvm-objcopy/ELF/strip-debug.test b/test/tools/llvm-objcopy/ELF/strip-debug.test
index 6c833f3..b88744e 100644
--- a/test/tools/llvm-objcopy/ELF/strip-debug.test
+++ b/test/tools/llvm-objcopy/ELF/strip-debug.test
@@ -1,12 +1,12 @@
 # RUN: yaml2obj %s > %t
 # RUN: cp %t %t3
-# RUN: llvm-objcopy -strip-debug %t %t2
-# RUN: llvm-readobj -file-headers -sections -symbols %t2 | FileCheck %s
+# RUN: llvm-objcopy --strip-debug %t %t2
+# RUN: llvm-readobj --file-headers --sections --symbols %t2 | FileCheck %s
 
 # Verify that the previous run of llvm-objcopy has not modified the input.
 # RUN: cmp %t %t3
 
-# RUN: llvm-strip -strip-debug %t3
+# RUN: llvm-strip --strip-debug %t3
 # RUN: cmp %t2 %t3
 
 # RUN: cp %t %t4
diff --git a/test/tools/llvm-objcopy/ELF/strip-dwo-groups.test b/test/tools/llvm-objcopy/ELF/strip-dwo-groups.test
index 957bb55..724b063 100644
--- a/test/tools/llvm-objcopy/ELF/strip-dwo-groups.test
+++ b/test/tools/llvm-objcopy/ELF/strip-dwo-groups.test
@@ -1,6 +1,6 @@
 # RUN: cp %p/Inputs/groups.o %t
-# RUN: llvm-objcopy -strip-dwo %t
-# RUN: llvm-readobj -elf-section-groups %t | FileCheck %s
+# RUN: llvm-objcopy --strip-dwo %t
+# RUN: llvm-readobj --elf-section-groups %t | FileCheck %s
 
 // Source code of groups.o:
 //
@@ -18,11 +18,11 @@
 //
 // clang -g -gsplit-dwarf -std=c++11 -c groups.cpp -o groups.o
 
-// `llvm-objcopy -strip-dwo` strips out dwo sections, as a result, the index of 
+// `llvm-objcopy --strip-dwo` strips out dwo sections, as a result, the index of 
 // the symbol table, the indices of the symbols and the indices of the sections 
 // which go after the removed ones will change. Consequently, the fields 
 // Link, Info and the content of .group need to be updated. In the past 
-// `llvm-objcopy -strip-dwo` used to produce invalid binaries with 
+// `llvm-objcopy --strip-dwo` used to produce invalid binaries with 
 // broken .group section, this test verifies the correctness of 
 // Link, Info and the content of this section.
 
diff --git a/test/tools/llvm-objcopy/ELF/strip-dwo-inplace.test b/test/tools/llvm-objcopy/ELF/strip-dwo-inplace.test
index 31bbd36..fc2d6c3 100644
--- a/test/tools/llvm-objcopy/ELF/strip-dwo-inplace.test
+++ b/test/tools/llvm-objcopy/ELF/strip-dwo-inplace.test
@@ -1,6 +1,6 @@
 # RUN: cp %p/Inputs/dwarf.dwo %t
-# RUN: llvm-objcopy -strip-dwo %t
-# RUN: llvm-readobj -file-headers -sections %t | FileCheck %s
+# RUN: llvm-objcopy --strip-dwo %t
+# RUN: llvm-readobj --file-headers --sections %t | FileCheck %s
 
 CHECK:     SectionHeaderCount: 24
 
diff --git a/test/tools/llvm-objcopy/ELF/strip-multiple-files.test b/test/tools/llvm-objcopy/ELF/strip-multiple-files.test
index d1496eb..9aef872 100644
--- a/test/tools/llvm-objcopy/ELF/strip-multiple-files.test
+++ b/test/tools/llvm-objcopy/ELF/strip-multiple-files.test
@@ -4,7 +4,7 @@
 # tests so we only have to run FileCheck on it once.
 # RUN: cp %t.o %t.1.o
 # RUN: llvm-strip --keep-symbol=foo %t.1.o -o %t.stripped.o
-# RUN: llvm-readobj -symbols %t.stripped.o | FileCheck %s
+# RUN: llvm-readobj --symbols %t.stripped.o | FileCheck %s
 
 # llvm-strip on two files:
 # RUN: cp %t.o %t.1.o
diff --git a/test/tools/llvm-objcopy/ELF/strip-non-alloc.test b/test/tools/llvm-objcopy/ELF/strip-non-alloc.test
index 270670c..8db7321 100644
--- a/test/tools/llvm-objcopy/ELF/strip-non-alloc.test
+++ b/test/tools/llvm-objcopy/ELF/strip-non-alloc.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy --strip-non-alloc %t %t2
-# RUN: llvm-readobj -file-headers -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --sections %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/strip-preserve-atime.test b/test/tools/llvm-objcopy/ELF/strip-preserve-atime.test
index 5cee21f..aad537c 100644
--- a/test/tools/llvm-objcopy/ELF/strip-preserve-atime.test
+++ b/test/tools/llvm-objcopy/ELF/strip-preserve-atime.test
@@ -43,7 +43,7 @@
 # Preserve dates in split DWO files.
 # RUN: cp %p/Inputs/dwarf.dwo %t-input.dwo
 # RUN: touch -a -t 199505050555.55 %t-input.dwo
-# RUN: llvm-objcopy -p -split-dwo=%t-dwo %t-input.dwo %t-nondwo
+# RUN: llvm-objcopy -p --split-dwo=%t-dwo %t-input.dwo %t-nondwo
 # RUN: ls -lu %t-dwo | FileCheck %s --check-prefix=CHECK-PRESERVE-ATIME
 # RUN: llvm-readobj %t-dwo
 # RUN: ls -lu %t-nondwo | FileCheck %s --check-prefix=CHECK-PRESERVE-ATIME
diff --git a/test/tools/llvm-objcopy/ELF/strip-preserve-mtime.test b/test/tools/llvm-objcopy/ELF/strip-preserve-mtime.test
index ac430b4..5f247ae 100644
--- a/test/tools/llvm-objcopy/ELF/strip-preserve-mtime.test
+++ b/test/tools/llvm-objcopy/ELF/strip-preserve-mtime.test
@@ -41,7 +41,7 @@
 # Preserve dates in split DWO files.
 # RUN: cp %p/Inputs/dwarf.dwo %t-input.dwo
 # RUN: touch -m -t 199705050555.55 %t-input.dwo
-# RUN: llvm-objcopy -p -split-dwo=%t-dwo %t-input.dwo %t-nondwo
+# RUN: llvm-objcopy -p --split-dwo=%t-dwo %t-input.dwo %t-nondwo
 # RUN: ls -l %t-dwo | FileCheck %s --check-prefix=CHECK-PRESERVE-MTIME
 # RUN: llvm-readobj %t-dwo
 # RUN: ls -l %t-nondwo | FileCheck %s --check-prefix=CHECK-PRESERVE-MTIME
diff --git a/test/tools/llvm-objcopy/ELF/strip-sections-keep.test b/test/tools/llvm-objcopy/ELF/strip-sections-keep.test
index f0031fa..711687f 100644
--- a/test/tools/llvm-objcopy/ELF/strip-sections-keep.test
+++ b/test/tools/llvm-objcopy/ELF/strip-sections-keep.test
@@ -1,5 +1,5 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -strip-sections -keep-section=.shstrtab %t %t2
+# RUN: llvm-objcopy --strip-sections --keep-section=.shstrtab %t %t2
 # RUN: od -Ax -t c %t2 | FileCheck %s
 
 !ELF
diff --git a/test/tools/llvm-objcopy/ELF/strip-sections-only-section.test b/test/tools/llvm-objcopy/ELF/strip-sections-only-section.test
index 2c14a72..e2236a9 100644
--- a/test/tools/llvm-objcopy/ELF/strip-sections-only-section.test
+++ b/test/tools/llvm-objcopy/ELF/strip-sections-only-section.test
@@ -1,7 +1,7 @@
 # RUN: yaml2obj %s > %t
-# RUN: llvm-objcopy -strip-sections -only-section=.test %t %t2
+# RUN: llvm-objcopy --strip-sections --only-section=.test %t %t2
 # RUN: od -Ax -t x1 %t2 | FileCheck %s
-# RUN: od -Ax -t c  %t2 | FileCheck %s -check-prefix=TEXT
+# RUN: od -Ax -t c  %t2 | FileCheck %s --check-prefix=TEXT
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/strip-sections.test b/test/tools/llvm-objcopy/ELF/strip-sections.test
index fd8e224..2dee6e2 100644
--- a/test/tools/llvm-objcopy/ELF/strip-sections.test
+++ b/test/tools/llvm-objcopy/ELF/strip-sections.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy --strip-sections %t %t2
-# RUN: llvm-readobj -file-headers -program-headers %t2 | FileCheck %s
+# RUN: llvm-readobj --file-headers --program-headers %t2 | FileCheck %s
 # RUN: od -t x1 -j 4096 %t2 | FileCheck %s --check-prefix=DATA
 
 !ELF
diff --git a/test/tools/llvm-objcopy/ELF/strip-symbol.test b/test/tools/llvm-objcopy/ELF/strip-symbol.test
index 1b5a681..ab5fee2 100644
--- a/test/tools/llvm-objcopy/ELF/strip-symbol.test
+++ b/test/tools/llvm-objcopy/ELF/strip-symbol.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy --strip-symbol baz -N bar %t %t2
-# RUN: llvm-readobj -symbols -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --symbols --sections %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/strip-unneeded.test b/test/tools/llvm-objcopy/ELF/strip-unneeded.test
index 8f96035..6d5682f 100644
--- a/test/tools/llvm-objcopy/ELF/strip-unneeded.test
+++ b/test/tools/llvm-objcopy/ELF/strip-unneeded.test
@@ -3,7 +3,7 @@
 # RUN: llvm-objcopy --strip-unneeded %t %t2
 # Verify that llvm-objcopy has not modified the input.
 # RUN: cmp %t %t1
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s
 
 # Verify that llvm-strip modifies the symbol table the same way.
 # RUN: llvm-strip --strip-unneeded %t
diff --git a/test/tools/llvm-objcopy/ELF/symbol-copy.test b/test/tools/llvm-objcopy/ELF/symbol-copy.test
index 3e34670..4332868 100644
--- a/test/tools/llvm-objcopy/ELF/symbol-copy.test
+++ b/test/tools/llvm-objcopy/ELF/symbol-copy.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/symtab-link.test b/test/tools/llvm-objcopy/ELF/symtab-link.test
index 7ed895e..7693bfc 100644
--- a/test/tools/llvm-objcopy/ELF/symtab-link.test
+++ b/test/tools/llvm-objcopy/ELF/symtab-link.test
@@ -1,9 +1,9 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy %t %t2
-# RUN: llvm-readobj -sections %t2 | FileCheck %s
+# RUN: llvm-readobj --sections %t2 | FileCheck %s
 # RUN: cp %t %t3
 # RUN: llvm-strip --strip-debug %t3
-# RUN: llvm-readobj -sections %t3 | FileCheck %s
+# RUN: llvm-readobj --sections %t3 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/two-seg-remove-end.test b/test/tools/llvm-objcopy/ELF/two-seg-remove-end.test
index f78a964..20c2689 100644
--- a/test/tools/llvm-objcopy/ELF/two-seg-remove-end.test
+++ b/test/tools/llvm-objcopy/ELF/two-seg-remove-end.test
@@ -1,7 +1,7 @@
 # RUN: yaml2obj %s -o %t
 # RUN: llvm-objcopy -R .text4 -O binary %t %t2
 # RUN: od -Ax -v -t x1 %t2 | FileCheck %s
-# RUN: wc -c %t2 | FileCheck %s -check-prefix=SIZE
+# RUN: wc -c %t2 | FileCheck %s --check-prefix=SIZE
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/two-seg-remove-first.test b/test/tools/llvm-objcopy/ELF/two-seg-remove-first.test
index 7d0ffef..a541fc6 100644
--- a/test/tools/llvm-objcopy/ELF/two-seg-remove-first.test
+++ b/test/tools/llvm-objcopy/ELF/two-seg-remove-first.test
@@ -1,7 +1,7 @@
 # RUN: yaml2obj %s -o %t
 # RUN: llvm-objcopy -R .text -O binary %t %t2
 # RUN: od -Ax -v -t x1 %t2 | FileCheck %s
-# RUN: wc -c %t2 | FileCheck %s -check-prefix=SIZE
+# RUN: wc -c %t2 | FileCheck %s --check-prefix=SIZE
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/two-seg-remove-third-sec.test b/test/tools/llvm-objcopy/ELF/two-seg-remove-third-sec.test
index bedd4aa..254273d 100644
--- a/test/tools/llvm-objcopy/ELF/two-seg-remove-third-sec.test
+++ b/test/tools/llvm-objcopy/ELF/two-seg-remove-third-sec.test
@@ -1,7 +1,7 @@
 # RUN: yaml2obj %s -o %t
 # RUN: llvm-objcopy -R .text3 -O binary %t %t2
 # RUN: od -Ax -v -t x1 %t2 | FileCheck %s
-# RUN: wc -c %t2 | FileCheck %s -check-prefix=SIZE
+# RUN: wc -c %t2 | FileCheck %s --check-prefix=SIZE
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/weaken-all.test b/test/tools/llvm-objcopy/ELF/weaken-all.test
index c9a7e5a..6107d37 100644
--- a/test/tools/llvm-objcopy/ELF/weaken-all.test
+++ b/test/tools/llvm-objcopy/ELF/weaken-all.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy --weaken %t %t2
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objcopy/ELF/weaken.test b/test/tools/llvm-objcopy/ELF/weaken.test
index b833ff6..6f12b83 100644
--- a/test/tools/llvm-objcopy/ELF/weaken.test
+++ b/test/tools/llvm-objcopy/ELF/weaken.test
@@ -1,6 +1,6 @@
 # RUN: yaml2obj %s > %t
 # RUN: llvm-objcopy --weaken-symbol Global -W Local -W Weak %t %t2
-# RUN: llvm-readobj -symbols %t2 | FileCheck %s
+# RUN: llvm-readobj --symbols %t2 | FileCheck %s
 
 !ELF
 FileHeader:
diff --git a/test/tools/llvm-objdump/Hexagon/source-interleave-hexagon.ll b/test/tools/llvm-objdump/Hexagon/source-interleave-hexagon.ll
index bfa18e0..02cef67 100644
--- a/test/tools/llvm-objdump/Hexagon/source-interleave-hexagon.ll
+++ b/test/tools/llvm-objdump/Hexagon/source-interleave-hexagon.ll
@@ -41,7 +41,7 @@
 !llvm.module.flags = !{!6, !7}
 !llvm.ident = !{!8}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0 (http://llvm.org/git/clang d19a95e94dc57c5a72fd25d64f26134aa7d25fa0) (http://llvm.org/git/llvm.git 313924e6ff8a332063f61d3fda03812c220762f6)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, globals: !3)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, globals: !3)
 !1 = !DIFile(filename: "source-interleave-hexagon.c", directory: "SRC_COMPDIR")
 !2 = !{}
 !3 = !{!4}
@@ -49,7 +49,7 @@
 !5 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
 !6 = !{i32 2, !"Dwarf Version", i32 4}
 !7 = !{i32 2, !"Debug Info Version", i32 3}
-!8 = !{!"clang version 4.0.0 (http://llvm.org/git/clang d19a95e94dc57c5a72fd25d64f26134aa7d25fa0) (http://llvm.org/git/llvm.git 313924e6ff8a332063f61d3fda03812c220762f6)"}
+!8 = !{!"clang version 4.0.0"}
 !9 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 2, type: !10, isLocal: false, isDefinition: true, scopeLine: 2, isOptimized: false, unit: !0, retainedNodes: !2)
 !10 = !DISubroutineType(types: !11)
 !11 = !{!5}
diff --git a/test/tools/llvm-objdump/Inputs/trivial.obj.wasm b/test/tools/llvm-objdump/Inputs/trivial.obj.wasm
index 8652d67..a894522 100644
--- a/test/tools/llvm-objdump/Inputs/trivial.obj.wasm
+++ b/test/tools/llvm-objdump/Inputs/trivial.obj.wasm
Binary files differ
diff --git a/test/tools/llvm-objdump/X86/Inputs/macho-multiple-text b/test/tools/llvm-objdump/X86/Inputs/macho-multiple-text
new file mode 100755
index 0000000..7b04830
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/Inputs/macho-multiple-text
Binary files differ
diff --git a/test/tools/llvm-objdump/X86/demangle.s b/test/tools/llvm-objdump/X86/demangle.s
new file mode 100644
index 0000000..e20fa3d
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/demangle.s
@@ -0,0 +1,22 @@
+# RUN: llvm-mc %s -filetype=obj -triple=x86_64-pc-linux -o %t
+# RUN: llvm-objdump -syms -reloc -demangle %t | FileCheck %s
+
+## Check we demangle symbols when printing relocations.
+# CHECK:      000000000000001 R_X86_64_PLT32 foo()-4
+
+## Check we demangle symbols when printing symbol table.
+# CHECK:      SYMBOL TABLE:
+# CHECK-NEXT: 0000000000000000 g     F .text           00000000 foo()
+
+## Check the case when relocations are inlined into disassembly.
+# RUN: llvm-objdump -d -r -demangle %t | FileCheck %s --check-prefix=INLINE
+# INLINE:      Disassembly of section .text:
+# INLINE-NEXT: foo():
+# INLINE-NEXT:  0: {{.*}}  callq   0 <_Z3foov+0x5>
+# INLINE-NEXT:  0000000000000001:  R_X86_64_PLT32 foo()-4
+
+.text
+.globl _Z3foov
+.type _Z3foov,@function
+_Z3foov:
+ callq _Z3foov@PLT
diff --git a/test/tools/llvm-objdump/X86/macho-disassemble-all.test b/test/tools/llvm-objdump/X86/macho-disassemble-all.test
new file mode 100644
index 0000000..ce22d74
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/macho-disassemble-all.test
@@ -0,0 +1,39 @@
+// RUN: llvm-objdump -m -d -full-leading-addr -print-imm-hex -no-show-raw-insn %p/Inputs/macho-multiple-text | FileCheck %s -check-prefix=TEXT
+
+TEXT:      (__TEXT,__text) section
+TEXT_NEXT: _main:
+TEXT_NEXT: 0000000100000f60	pushq	%rbp
+TEXT_NEXT: 0000000100000f61	movq	%rsp, %rbp
+TEXT_NEXT: 0000000100000f64	subq	$0x10, %rsp
+TEXT_NEXT: 0000000100000f68	movl	$0x0, -0x4(%rbp)
+TEXT_NEXT: 0000000100000f6f	callq	_hello
+TEXT_NEXT: 0000000100000f74	xorl	%eax, %eax
+TEXT_NEXT: 0000000100000f76	addq	$0x10, %rsp
+TEXT_NEXT: 0000000100000f7a	popq	%rbp
+TEXT_NEXT: 0000000100000f7b	retq
+
+// RUN: llvm-objdump -m -D -full-leading-addr -print-imm-hex -no-show-raw-insn %p/Inputs/macho-multiple-text | FileCheck %s -check-prefix=ALL
+
+ALL:      (__TEXT,__text) section
+ALL_NEXT: _main:
+ALL_NEXT: 0000000100000f60	pushq	%rbp
+ALL_NEXT: 0000000100000f61	movq	%rsp, %rbp
+ALL_NEXT: 0000000100000f64	subq	$0x10, %rsp
+ALL_NEXT: 0000000100000f68	movl	$0x0, -0x4(%rbp)
+ALL_NEXT: 0000000100000f6f	callq	_hello
+ALL_NEXT: 0000000100000f74	xorl	%eax, %eax
+ALL_NEXT: 0000000100000f76	addq	$0x10, %rsp
+ALL_NEXT: 0000000100000f7a	popq	%rbp
+ALL_NEXT: 0000000100000f7b	retq
+ALL:      (__TEXT_EXEC,__text) section
+ALL_NEXT: _hello:
+ALL_NEXT: 0000000100001000	pushq	%rbp
+ALL_NEXT: 0000000100001001	movq	%rsp, %rbp
+ALL_NEXT: 0000000100001004	subq	$0x10, %rsp
+ALL_NEXT: 0000000100001008	leaq	-0x71(%rip), %rdi ## literal pool for: "hello, world!\n"
+ALL_NEXT: 000000010000100f	movb	$0x0, %al
+ALL_NEXT: 0000000100001011	callq	0x100000f7c ## symbol stub for: _printf
+ALL_NEXT: 0000000100001016	movl	%eax, -0x4(%rbp)
+ALL_NEXT: 0000000100001019	addq	$0x10, %rsp
+ALL_NEXT: 000000010000101d	popq	%rbp
+ALL_NEXT: 000000010000101e	retq
diff --git a/test/tools/llvm-objdump/X86/out-of-section-sym.test b/test/tools/llvm-objdump/X86/out-of-section-sym.test
index f70dce6..55de107 100644
--- a/test/tools/llvm-objdump/X86/out-of-section-sym.test
+++ b/test/tools/llvm-objdump/X86/out-of-section-sym.test
@@ -6,7 +6,6 @@
 CHECK-NEXT: _start:
 CHECK-NEXT:   10:  c3  retl
 CHECK-NEXT: SYMBOL TABLE:
-CHECK-NEXT: 00000000         *UND*  00000000
 CHECK-NEXT: 00000010 l    d  .text  00000000 .text
 CHECK-NEXT: 00000010         .text  00000000 _start
 CHECK-NEXT: 00000020         .text  00000000 _fdata
diff --git a/test/tools/llvm-objdump/X86/print-symbol-addr.s b/test/tools/llvm-objdump/X86/print-symbol-addr.s
new file mode 100644
index 0000000..9c5b23e
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/print-symbol-addr.s
@@ -0,0 +1,29 @@
+// RUN: llvm-mc %s -filetype=obj -triple=x86_64-pc-linux -o %t.o
+
+// Check we print the address of `foo` and `bar`.
+// RUN: llvm-objdump -d %t.o | FileCheck %s
+// CHECK:      Disassembly of section .text:
+// CHECK-NEXT: 0000000000000000 foo:
+// CHECK-NEXT:   0: {{.*}}  nop
+// CHECK-NEXT:   1: {{.*}}  nop
+// CHECK:      0000000000000002 bar:
+// CHECK-NEXT:   2: {{.*}}  nop
+
+// Check we do not print the addresses with -no-leading-addr.
+// RUN: llvm-objdump -d -no-leading-addr %t.o | FileCheck %s --check-prefix=NOADDR
+// NOADDR:      Disassembly of section .text:
+// NOADDR-NEXT: {{^}}foo:
+// NOADDR-NEXT:   {{.*}} nop
+// NOADDR-NEXT:   {{.*}} nop
+// NOADDR:      {{^}}bar:
+// NOADDR-NEXT:   {{.*}} nop
+
+.text
+.globl  foo
+.type   foo, @function
+foo:
+ nop
+ nop
+
+bar:
+ nop
diff --git a/test/tools/llvm-objdump/X86/source-interleave-x86_64.ll b/test/tools/llvm-objdump/X86/source-interleave-x86_64.ll
index f9b9054..2a47726 100644
--- a/test/tools/llvm-objdump/X86/source-interleave-x86_64.ll
+++ b/test/tools/llvm-objdump/X86/source-interleave-x86_64.ll
@@ -42,7 +42,7 @@
 !llvm.module.flags = !{!6, !7}
 !llvm.ident = !{!8}
 
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0 (http://llvm.org/git/clang d19a95e94dc57c5a72fd25d64f26134aa7d25fa0) (http://llvm.org/git/llvm.git 313924e6ff8a332063f61d3fda03812c220762f6)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, globals: !3)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, globals: !3)
 !1 = !DIFile(filename: "source-interleave-x86_64.c", directory: "SRC_COMPDIR")
 !2 = !{}
 !3 = !{!4}
@@ -50,7 +50,7 @@
 !5 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
 !6 = !{i32 2, !"Dwarf Version", i32 4}
 !7 = !{i32 2, !"Debug Info Version", i32 3}
-!8 = !{!"clang version 4.0.0 (http://llvm.org/git/clang d19a95e94dc57c5a72fd25d64f26134aa7d25fa0) (http://llvm.org/git/llvm.git 313924e6ff8a332063f61d3fda03812c220762f6)"}
+!8 = !{!"clang version 4.0.0"}
 !9 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 2, type: !10, isLocal: false, isDefinition: true, scopeLine: 2, isOptimized: false, unit: !0, retainedNodes: !2)
 !10 = !DISubroutineType(types: !11)
 !11 = !{!5}
diff --git a/test/tools/llvm-objdump/all-headers.test b/test/tools/llvm-objdump/all-headers.test
new file mode 100644
index 0000000..6bb9a84
--- /dev/null
+++ b/test/tools/llvm-objdump/all-headers.test
@@ -0,0 +1,20 @@
+# RUN: yaml2obj %s > %t
+# RUN: llvm-objdump --all-headers %t | FileCheck %s
+
+## Check we print file format, architecture and start address before
+## anything else when using --all-headers.
+
+# CHECK:       file format ELF64-x86-64
+# CHECK-EMPTY:
+# CHECK-NEXT:  architecture: x86_64
+# CHECK-NEXT:  start address: 0x0000000000000000
+# CHECK-EMPTY:
+# CHECK-NEXT:  Sections:
+
+!ELF
+FileHeader:
+  Class:           ELFCLASS64
+  Data:            ELFDATA2LSB
+  Type:            ET_EXEC
+  Machine:         EM_X86_64
+Sections:
diff --git a/test/tools/llvm-objdump/symbol-table-elf.test b/test/tools/llvm-objdump/symbol-table-elf.test
index abe339d..649e54c 100644
--- a/test/tools/llvm-objdump/symbol-table-elf.test
+++ b/test/tools/llvm-objdump/symbol-table-elf.test
@@ -5,7 +5,6 @@
 # RUN: FileCheck %s --input-file=%t1
 
 # CHECK:      SYMBOL TABLE:
-# CHECK-NEXT: 0000000000000000         *UND*     00000000
 # CHECK-NEXT: 0000000000001004 l     F .text     00000000 lfoo
 # CHECK-NEXT: 0000000000001008 l     O .text     00000000 lbar
 # CHECK-NEXT: 0000000000001004 g     F .text     00000000 foo
diff --git a/test/tools/llvm-profdata/Inputs/cutoff.proftext b/test/tools/llvm-profdata/Inputs/cutoff.proftext
new file mode 100644
index 0000000..1ce4843
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/cutoff.proftext
@@ -0,0 +1,21 @@
+# IR level Instrumentation Flag
+:ir
+bar
+10
+2
+0
+0
+
+main
+16650
+4
+1
+1000
+1000000
+499500
+
+foo
+10
+2
+999
+1
diff --git a/test/tools/llvm-profdata/cutoff.test b/test/tools/llvm-profdata/cutoff.test
new file mode 100644
index 0000000..f04ea16
--- /dev/null
+++ b/test/tools/llvm-profdata/cutoff.test
@@ -0,0 +1,23 @@
+Basic tests for cutoff options in show command.
+
+RUN: llvm-profdata show -value-cutoff=1 %p/Inputs/cutoff.proftext | FileCheck %s -check-prefix=CUTOFF1 -check-prefix=CHECK
+RUN: llvm-profdata show -value-cutoff=1000 %p/Inputs/cutoff.proftext | FileCheck %s -check-prefix=CUTOFF1000 -check-prefix=CHECK
+RUN: llvm-profdata show -all-functions -value-cutoff=1 %p/Inputs/cutoff.proftext | FileCheck %s -check-prefix=CUTOFF1FUNC -check-prefix=CUTOFF1 -check-prefix=CHECK
+RUN: llvm-profdata show -all-functions -value-cutoff=1000 %p/Inputs/cutoff.proftext | FileCheck %s -check-prefix=CUTOFF1000FUNC -check-prefix=CUTOFF1000 -check-prefix=CHECK
+RUN: llvm-profdata show -value-cutoff=1 -list-below-cutoff %p/Inputs/cutoff.proftext | FileCheck %s -check-prefix=BELOW1 -check-prefix=CUTOFF1 -check-prefix=CHECK
+RUN: llvm-profdata show -value-cutoff=1000 -list-below-cutoff %p/Inputs/cutoff.proftext | FileCheck %s -check-prefix=BELOW1000 -check-prefix=CUTOFF1000 -check-prefix=CHECK
+CUTOFF1FUNC-NOT: bar
+CUTOFF1FUNC: Functions shown: 2
+CUTOFF1000FUNC-NOT: bar
+CUTOFF1000FUNC-NOT: foo
+CUTOFF1000FUNC: Functions shown: 1
+BELOW1: The list of functions with the maximum counter less than 1:
+BELOW1:  bar: (Max = 0 Sum = 0)
+BELOW1000:The list of functions with the maximum counter less than 1000:
+BELOW1000:  bar: (Max = 0 Sum = 0)
+BELOW1000:  foo: (Max = 999 Sum = 1000)
+CHECK: Total functions: 3
+CUTOFF1: Number of functions with maximum count (< 1): 1
+CUTOFF1: Number of functions with maximum count (>= 1): 2
+CUTOFF1000: Number of functions with maximum count (< 1000): 2
+CUTOFF1000: Number of functions with maximum count (>= 1000): 1
diff --git a/test/tools/llvm-profdata/value-prof.proftext b/test/tools/llvm-profdata/value-prof.proftext
index 31a7698..a854a51 100644
--- a/test/tools/llvm-profdata/value-prof.proftext
+++ b/test/tools/llvm-profdata/value-prof.proftext
@@ -47,9 +47,9 @@
 
 #ICTXT: Indirect Call Site Count: 3
 #ICTXT-NEXT:    Indirect Target Results:
-#ICTXT-NEXT:	[ 1, foo, 100 ]
-#ICTXT-NEXT:	[ 1, foo2, 1000 ]
-#ICTXT-NEXT:	[ 2, foo2, 20000 ]
+#ICTXT-NEXT:	[ 1, foo, 100 ] (9.09%)
+#ICTXT-NEXT:	[ 1, foo2, 1000 ] (90.91%)
+#ICTXT-NEXT:	[ 2, foo2, 20000 ] (100.00%)
 
 #IC: Indirect Call Site Count: 3
 #IC-NEXT:    Indirect Target Results:
diff --git a/test/tools/llvm-rc/absolute.test b/test/tools/llvm-rc/absolute.test
index 5e8a6c8..95aff3e 100644
--- a/test/tools/llvm-rc/absolute.test
+++ b/test/tools/llvm-rc/absolute.test
@@ -1,3 +1,3 @@
 ; RUN: touch %t.manifest
 ; RUN: echo "1 24 \"%t.manifest\"" > %t.rc
-; RUN: llvm-rc %t.rc
+; RUN: llvm-rc -- %t.rc
diff --git a/test/tools/llvm-rc/codepage.test b/test/tools/llvm-rc/codepage.test
index ce17e0a..05e3181 100644
--- a/test/tools/llvm-rc/codepage.test
+++ b/test/tools/llvm-rc/codepage.test
@@ -1,4 +1,4 @@
-; RUN: llvm-rc /C 65001 /FO %t.utf8.res %p/Inputs/utf8.rc
+; RUN: llvm-rc /C 65001 /FO %t.utf8.res -- %p/Inputs/utf8.rc
 ; RUN: llvm-readobj %t.utf8.res | FileCheck %s --check-prefix=UTF8
 
 ; UTF8:      Resource type (int): 6
diff --git a/test/tools/llvm-rc/cpp-output.test b/test/tools/llvm-rc/cpp-output.test
index 456a59d..984e9fd 100644
--- a/test/tools/llvm-rc/cpp-output.test
+++ b/test/tools/llvm-rc/cpp-output.test
@@ -1,4 +1,4 @@
-; RUN: llvm-rc /FO %t %p/Inputs/cpp-output.rc
+; RUN: llvm-rc /FO %t -- %p/Inputs/cpp-output.rc
 ; RUN: llvm-readobj %t | FileCheck %s
 
 ; CHECK:      Resource type (int): 6
diff --git a/test/tools/llvm-rc/flags.test b/test/tools/llvm-rc/flags.test
index 452e90a..5b71481 100644
--- a/test/tools/llvm-rc/flags.test
+++ b/test/tools/llvm-rc/flags.test
@@ -1,4 +1,4 @@
-; RUN: llvm-rc /dry-run /FO %t %p/Inputs/empty.rc 2>&1 | FileCheck %s --allow-empty --check-prefix=FO
-; RUN: llvm-rc /dry-run /FO%t %p/Inputs/empty.rc 2>&1 | FileCheck %s --allow-empty --check-prefix=FO
+; RUN: llvm-rc /dry-run /FO %t -- %p/Inputs/empty.rc 2>&1 | FileCheck %s --allow-empty --check-prefix=FO
+; RUN: llvm-rc /dry-run /FO%t -- %p/Inputs/empty.rc 2>&1 | FileCheck %s --allow-empty --check-prefix=FO
 
 ; FO-NOT: Exactly one input file should be provided.
diff --git a/test/tools/llvm-rc/include-paths.test b/test/tools/llvm-rc/include-paths.test
index e6c52b1..10a77e6 100644
--- a/test/tools/llvm-rc/include-paths.test
+++ b/test/tools/llvm-rc/include-paths.test
@@ -1,21 +1,21 @@
 ; Should find the bitmap if it is in the same folder as the rc file.
 ; RUN: rm -f %t.include.res
-; RUN: llvm-rc /FO %t.include.res %p/Inputs/include.rc
+; RUN: llvm-rc /FO %t.include.res -- %p/Inputs/include.rc
 ; RUN: llvm-readobj %t.include.res | FileCheck --check-prefix=FOUND %s
 
 ; Try including files without quotes.
 ; RUN: rm -f %t.noquotes.res
-; RUN: llvm-rc /FO %t.noquotes.res %p/Inputs/include-noquotes.rc
+; RUN: llvm-rc /FO %t.noquotes.res -- %p/Inputs/include-noquotes.rc
 ; RUN: llvm-readobj %t.noquotes.res | FileCheck --check-prefix=FOUND %s
 
 ; Should find the bitmap if the folder is explicitly specified.
 ; RUN: rm -f %t.nested-include.res
-; RUN: llvm-rc /FO %t.nested-include.res /I %p/Inputs/nested %p/Inputs/deep-include.rc
+; RUN: llvm-rc /FO %t.nested-include.res /I %p/Inputs/nested -- %p/Inputs/deep-include.rc
 ; RUN: llvm-readobj %t.nested-include.res | FileCheck --check-prefix=FOUND %s
 
 ; Otherwise, it should not find the bitmap.
 ; RUN: rm -f %t.nested-include.res
-; RUN: not llvm-rc /FO %t.nested-include.res %p/Inputs/deep-include.rc 2>&1 \
+; RUN: not llvm-rc /FO %t.nested-include.res -- %p/Inputs/deep-include.rc 2>&1 \
 ; RUN:   | FileCheck --check-prefix=MISSING %s
 
 ; Should find the bitmap if the process's current working directory
@@ -24,7 +24,7 @@
 ; failure of other tests if run first.
 ; RUN: rm -f %t.nested-include.res
 ; RUN: cd %p/Inputs/nested
-; RUN: llvm-rc /FO %t.nested-include.res %p/Inputs/include.rc
+; RUN: llvm-rc /FO %t.nested-include.res -- %p/Inputs/include.rc
 ; RUN: llvm-readobj %t.nested-include.res | FileCheck --check-prefix=FOUND %s
 
 FOUND:      Resource type (int): 2
diff --git a/test/tools/llvm-rc/memoryflags-stringtable.test b/test/tools/llvm-rc/memoryflags-stringtable.test
index f168bed..e8a2d40 100644
--- a/test/tools/llvm-rc/memoryflags-stringtable.test
+++ b/test/tools/llvm-rc/memoryflags-stringtable.test
@@ -1,4 +1,4 @@
-; RUN: llvm-rc /FO %t %p/Inputs/memoryflags-stringtable.rc
+; RUN: llvm-rc /FO %t -- %p/Inputs/memoryflags-stringtable.rc
 ; RUN: llvm-readobj %t | FileCheck %s
 
 ; CHECK:      Resource type (int): 6
diff --git a/test/tools/llvm-rc/memoryflags.test b/test/tools/llvm-rc/memoryflags.test
index c36fb9b..14e1193 100644
--- a/test/tools/llvm-rc/memoryflags.test
+++ b/test/tools/llvm-rc/memoryflags.test
@@ -1,4 +1,4 @@
-; RUN: llvm-rc /FO %t %p/Inputs/memoryflags.rc
+; RUN: llvm-rc /FO %t -- %p/Inputs/memoryflags.rc
 ; RUN: llvm-readobj %t | FileCheck %s
 
 ; CHECK:      Resource type (int): 1
diff --git a/test/tools/llvm-rc/not-expr.test b/test/tools/llvm-rc/not-expr.test
index c602234..43725ed 100644
--- a/test/tools/llvm-rc/not-expr.test
+++ b/test/tools/llvm-rc/not-expr.test
@@ -1,4 +1,4 @@
-; RUN: llvm-rc /FO %t %p/Inputs/not-expr.rc
+; RUN: llvm-rc /FO %t -- %p/Inputs/not-expr.rc
 ; RUN: llvm-readobj %t | FileCheck %s --check-prefix=NOTEXPR
 
 ; NOTEXPR: Resource type (int): 5
diff --git a/test/tools/llvm-rc/parser-expr.test b/test/tools/llvm-rc/parser-expr.test
index 9558f93..1fb6aa6 100644
--- a/test/tools/llvm-rc/parser-expr.test
+++ b/test/tools/llvm-rc/parser-expr.test
@@ -1,4 +1,4 @@
-; RUN: llvm-rc /dry-run /V %p/Inputs/parser-expr.rc | FileCheck %s
+; RUN: llvm-rc /dry-run /V -- %p/Inputs/parser-expr.rc | FileCheck %s
 
 ; CHECK:  Language: 5, Sublanguage: 1
 ; CHECK-NEXT:  Language: 3, Sublanguage: 2
diff --git a/test/tools/llvm-rc/parser.test b/test/tools/llvm-rc/parser.test
index 66ed738..4f10d78 100644
--- a/test/tools/llvm-rc/parser.test
+++ b/test/tools/llvm-rc/parser.test
@@ -1,4 +1,4 @@
-; RUN: llvm-rc /dry-run /V %p/Inputs/parser-correct-everything.rc | FileCheck %s --check-prefix PGOOD
+; RUN: llvm-rc /dry-run /V -- %p/Inputs/parser-correct-everything.rc | FileCheck %s --check-prefix PGOOD
 
 ; PGOOD:  Icon (meh): "hello.bmp"
 ; PGOOD-NEXT:  Icon (Icon): "Icon"
diff --git a/test/tools/llvm-rc/tag-accelerators.test b/test/tools/llvm-rc/tag-accelerators.test
index 093a0bd..910bf21 100644
--- a/test/tools/llvm-rc/tag-accelerators.test
+++ b/test/tools/llvm-rc/tag-accelerators.test
@@ -1,4 +1,4 @@
-; RUN: llvm-rc /FO %t %p/Inputs/tag-accelerators.rc
+; RUN: llvm-rc /FO %t -- %p/Inputs/tag-accelerators.rc
 ; RUN: llvm-readobj %t | FileCheck %s --check-prefix=ACCELERATORS
 
 ; ACCELERATORS: Resource type (int): 9
diff --git a/test/tools/llvm-rc/tag-dialog.test b/test/tools/llvm-rc/tag-dialog.test
index 85a8c20..d44326e 100644
--- a/test/tools/llvm-rc/tag-dialog.test
+++ b/test/tools/llvm-rc/tag-dialog.test
@@ -1,4 +1,4 @@
-; RUN: llvm-rc /FO %t %p/Inputs/tag-dialog.rc
+; RUN: llvm-rc /FO %t -- %p/Inputs/tag-dialog.rc
 ; RUN: llvm-readobj %t | FileCheck %s --check-prefix=DIALOG
 
 ; DIALOG: Resource type (int): 5
diff --git a/test/tools/llvm-rc/tag-escape.test b/test/tools/llvm-rc/tag-escape.test
index 7c58e99..51c3d92 100644
--- a/test/tools/llvm-rc/tag-escape.test
+++ b/test/tools/llvm-rc/tag-escape.test
@@ -1,4 +1,4 @@
-; RUN: llvm-rc /FO %t %p/Inputs/tag-escape.rc
+; RUN: llvm-rc /FO %t -- %p/Inputs/tag-escape.rc
 ; RUN: llvm-readobj %t | FileCheck %s
 
 ; CHECK:      Resource type (int): 4
diff --git a/test/tools/llvm-rc/tag-html.test b/test/tools/llvm-rc/tag-html.test
index 571e1bc..efeb04b 100644
--- a/test/tools/llvm-rc/tag-html.test
+++ b/test/tools/llvm-rc/tag-html.test
@@ -1,6 +1,6 @@
 ; RUN: rm -rf %t && mkdir %t && cd %t
 ; RUN: cp %p/Inputs/webpage*.html .
-; RUN: llvm-rc /FO %t/tag-html.res %p/Inputs/tag-html.rc
+; RUN: llvm-rc /FO %t/tag-html.res -- %p/Inputs/tag-html.rc
 ; RUN: llvm-readobj %t/tag-html.res | FileCheck %s --check-prefix HTML
 
 ; HTML: Resource type (int): 23
diff --git a/test/tools/llvm-rc/tag-icon-cursor.test b/test/tools/llvm-rc/tag-icon-cursor.test
index 44ef684..b4301f5 100644
--- a/test/tools/llvm-rc/tag-icon-cursor.test
+++ b/test/tools/llvm-rc/tag-icon-cursor.test
@@ -1,7 +1,7 @@
 ; RUN: rm -rf %t
 ; RUN: mkdir %t
 
-; RUN: llvm-rc /FO %t/tag-icon-cursor.res %p/Inputs/tag-icon-cursor.rc
+; RUN: llvm-rc /FO %t/tag-icon-cursor.res -- %p/Inputs/tag-icon-cursor.rc
 ; RUN: llvm-readobj %t/tag-icon-cursor.res | FileCheck %s
 
 ; CHECK: Resource type (int): 1
diff --git a/test/tools/llvm-rc/tag-menu.test b/test/tools/llvm-rc/tag-menu.test
index 25dc395..91e2445 100644
--- a/test/tools/llvm-rc/tag-menu.test
+++ b/test/tools/llvm-rc/tag-menu.test
@@ -1,9 +1,9 @@
-; RUN: llvm-rc /FO %t %p/Inputs/tag-menu.rc
+; RUN: llvm-rc /FO %t -- %p/Inputs/tag-menu.rc
 ; RUN: llvm-readobj %t | FileCheck %s --check-prefix=MENU
 
 ; Test running llvm-rc without an explicit output file.
 ; RUN: cp %p/Inputs/tag-menu.rc %t.implicit.rc
-; RUN: llvm-rc %t.implicit.rc
+; RUN: llvm-rc -- %t.implicit.rc
 ; RUN: llvm-readobj %t.implicit.res | FileCheck --check-prefix=MENU %s
 
 ; MENU: Resource type (int): 4
diff --git a/test/tools/llvm-rc/tag-stringtable.test b/test/tools/llvm-rc/tag-stringtable.test
index 43b5f5c..8dc4e57 100644
--- a/test/tools/llvm-rc/tag-stringtable.test
+++ b/test/tools/llvm-rc/tag-stringtable.test
@@ -1,4 +1,4 @@
-; RUN: llvm-rc /FO %t %p/Inputs/tag-stringtable-basic.rc
+; RUN: llvm-rc /FO %t -- %p/Inputs/tag-stringtable-basic.rc
 ; RUN: llvm-readobj %t | FileCheck %s
 
 ; CHECK:      Resource type (int): 6
diff --git a/test/tools/llvm-rc/tag-user.test b/test/tools/llvm-rc/tag-user.test
index c0c1cfd..e93899c 100644
--- a/test/tools/llvm-rc/tag-user.test
+++ b/test/tools/llvm-rc/tag-user.test
@@ -2,7 +2,7 @@
 ; RUN: mkdir %t
 ; RUN: cd %t
 ; RUN: cp %p/Inputs/bitmap.bmp .
-; RUN: llvm-rc /FO %t/tag-user.res %p/Inputs/tag-user.rc
+; RUN: llvm-rc /FO %t/tag-user.res -- %p/Inputs/tag-user.rc
 ; RUN: llvm-readobj %t/tag-user.res | FileCheck %s
 
 ; CHECK:      Resource type (int): 500
diff --git a/test/tools/llvm-rc/tag-versioninfo.test b/test/tools/llvm-rc/tag-versioninfo.test
index 4c30346..0118f3c 100644
--- a/test/tools/llvm-rc/tag-versioninfo.test
+++ b/test/tools/llvm-rc/tag-versioninfo.test
@@ -1,4 +1,4 @@
-; RUN: llvm-rc /FO %t %p/Inputs/tag-versioninfo.rc
+; RUN: llvm-rc /FO %t -- %p/Inputs/tag-versioninfo.rc
 ; RUN: llvm-readobj %t | FileCheck %s
 
 ; CHECK:      Resource type (int): 16
diff --git a/test/tools/llvm-rc/tokenizer.test b/test/tools/llvm-rc/tokenizer.test
index 5103b19..5916467 100644
--- a/test/tools/llvm-rc/tokenizer.test
+++ b/test/tools/llvm-rc/tokenizer.test
@@ -1,4 +1,4 @@
-; RUN: not llvm-rc /V /FO %t.res %p/Inputs/tokens.rc | FileCheck %s
+; RUN: not llvm-rc /V /FO %t.res -- %p/Inputs/tokens.rc | FileCheck %s
 ; llvm-rc fails now on this sample because it is an invalid resource file
 ; script. We silence the error message and just analyze the output.
 
diff --git a/test/tools/llvm-rc/versioninfo-padding.test b/test/tools/llvm-rc/versioninfo-padding.test
index 7cfb537..fab18a6 100644
--- a/test/tools/llvm-rc/versioninfo-padding.test
+++ b/test/tools/llvm-rc/versioninfo-padding.test
@@ -1,4 +1,4 @@
-; RUN: llvm-rc /FO %t %p/Inputs/versioninfo-padding.rc
+; RUN: llvm-rc /FO %t -- %p/Inputs/versioninfo-padding.rc
 ; RUN: llvm-readobj %t | FileCheck %s
 
 ; CHECK:      Resource type (int): 16
diff --git a/test/tools/llvm-readobj/Inputs/relocs.py b/test/tools/llvm-readobj/Inputs/relocs.py
index 3d0cae5..8e621ce 100644
--- a/test/tools/llvm-readobj/Inputs/relocs.py
+++ b/test/tools/llvm-readobj/Inputs/relocs.py
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import print_function
+
 # Generates ELF, COFF and MachO object files for different architectures
 # containing all relocations:
 #
@@ -14,7 +16,6 @@
 
 import operator
 import shutil
-import StringIO
 import struct
 import subprocess
 import sys
@@ -43,17 +44,17 @@
 
   # Not supported (Enums are immutable).
   def __setattr__(self, name, value):
-    raise NotSupportedException, self.__setattr__
+    raise NotSupportedException(self.__setattr__)
 
   # Not supported (Enums are immutable).
   def __delattr__(self, name):
-    raise NotSupportedException, self.__delattr__
+    raise NotSupportedException(self.__delattr__)
 
   # Gets the enum symbol for the specified value.
   def __getitem__(self, value):
     symbol = self._map.get(value)
     if symbol is None:
-      raise KeyError, value
+      raise KeyError(value)
     return symbol
 
   # Gets the enum symbol for the specified value or none.
@@ -63,18 +64,18 @@
 
   # Not supported (Enums are immutable).
   def __setitem__(self, value, symbol):
-    raise NotSupportedException, self.__setitem__
+    raise NotSupportedException(self.__setitem__)
 
   # Not supported (Enums are immutable).
   def __delitem__(self, value):
-    raise NotSupportedException, self.__delitem__
+    raise NotSupportedException(self.__delitem__)
 
   def entries(self):
     # sort by (value, name)
     def makeKey(item):
       return (item[1], item[0])
     e = []
-    for pair in sorted(self._nameMap.iteritems(), key=makeKey):
+    for pair in sorted(self._nameMap.items(), key=makeKey):
       e.append(pair)
     return e
 
@@ -100,7 +101,7 @@
   def read(self, N):
     data = self.file.read(N)
     if len(data) != N:
-      raise ValueError, "Out of data!"
+      raise ValueError("Out of data!")
     return data
 
   def int8(self):
@@ -266,7 +267,7 @@
   elif fileclass == 2:
     f.is64Bit = True
   else:
-    raise ValueError, "Unknown file class %x" % fileclass
+    raise ValueError("Unknown file class %x" % fileclass)
 
   byteordering = f.uint8()
   if byteordering == 1:
@@ -274,7 +275,7 @@
   elif byteordering == 2:
       f.isLSB = False
   else:
-      raise ValueError, "Unknown byte ordering %x" % byteordering
+      raise ValueError("Unknown byte ordering %x" % byteordering)
 
   f.seek(18)
   e_machine = f.uint16()
@@ -375,7 +376,7 @@
   elif magic == '\xCF\xFA\xED\xFE':
     f.isLSB, f.is64Bit = True, True
   else:
-    raise ValueError,"Not a Mach-O object file: %r (bad magic)" % path
+    raise ValueError("Not a Mach-O object file: %r (bad magic)" % path)
 
   cputype = f.uint32()
   cpusubtype = f.uint32()
@@ -392,8 +393,8 @@
     patchMachoLoadCommand(f, relocs)
 
   if f.tell() - start != loadCommandsSize:
-    raise ValueError,"%s: warning: invalid load commands size: %r" % (
-      sys.argv[0], loadCommandsSize)
+    raise ValueError("%s: warning: invalid load commands size: %r" % (
+      sys.argv[0], loadCommandsSize))
 
 def patchMachoLoadCommand(f, relocs):
   start = f.tell()
@@ -408,8 +409,8 @@
     f.read(cmdSize - 8)
 
   if f.tell() - start != cmdSize:
-    raise ValueError,"%s: warning: invalid load command size: %r" % (
-      sys.argv[0], cmdSize)
+    raise ValueError("%s: warning: invalid load command size: %r" % (
+      sys.argv[0], cmdSize))
 
 def patchMachoSegmentLoadCommand(f, relocs):
   segment_name = f.read(16)
diff --git a/test/tools/llvm-readobj/Inputs/trivial.obj.wasm b/test/tools/llvm-readobj/Inputs/trivial.obj.wasm
index 2f99d34..10ebbee 100644
--- a/test/tools/llvm-readobj/Inputs/trivial.obj.wasm
+++ b/test/tools/llvm-readobj/Inputs/trivial.obj.wasm
Binary files differ
diff --git a/test/tools/llvm-readobj/dyn-symbols.test b/test/tools/llvm-readobj/dyn-symbols.test
new file mode 100644
index 0000000..16a2aba
--- /dev/null
+++ b/test/tools/llvm-readobj/dyn-symbols.test
@@ -0,0 +1,163 @@
+RUN: llvm-readobj --dyn-symbols %p/Inputs/dynamic-table-so.x86 | FileCheck %s
+
+# Check the two-letter alias -dt is equivalent to the --dyn-symbols full flag
+# name.
+RUN: llvm-readobj -dt %p/Inputs/dynamic-table-so.x86 > %t.readobj-dt-alias
+RUN: llvm-readobj --dyn-symbols %p/Inputs/dynamic-table-so.x86 > %t.readobj-dt-no-alias
+RUN: diff %t.readobj-dt-alias %t.readobj-dt-no-alias
+
+# CHECK:      DynamicSymbols [
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name:
+# CHECK-NEXT:     Value: 0x0
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Local
+# CHECK-NEXT:     Type: None
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: Undefined
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name:
+# CHECK-NEXT:     Value: 0x618
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Local
+# CHECK-NEXT:     Type: Section
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: .init
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name:
+# CHECK-NEXT:     Value: 0x200DC0
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Local
+# CHECK-NEXT:     Type: Section
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: .tbss
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: _ITM_deregisterTMCloneTable{{ }}
+# CHECK-NEXT:     Value: 0x0
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Weak
+# CHECK-NEXT:     Type: None
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: Undefined
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: puts@GLIBC_2.2.5{{ }}
+# CHECK-NEXT:     Value: 0x0
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Global
+# CHECK-NEXT:     Type: Function
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: Undefined
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: __tls_get_addr@GLIBC_2.3{{ }}
+# CHECK-NEXT:     Value: 0x0
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Global
+# CHECK-NEXT:     Type: Function
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: Undefined
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: __gmon_start__{{ }}
+# CHECK-NEXT:     Value: 0x0
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Weak
+# CHECK-NEXT:     Type: None
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: Undefined
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: _Jv_RegisterClasses{{ }}
+# CHECK-NEXT:     Value: 0x0
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Weak
+# CHECK-NEXT:     Type: None
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: Undefined
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: _ITM_registerTMCloneTable{{ }}
+# CHECK-NEXT:     Value: 0x0
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Weak
+# CHECK-NEXT:     Type: None
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: Undefined
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: __cxa_finalize@GLIBC_2.2.5{{ }}
+# CHECK-NEXT:     Value: 0x0
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Weak
+# CHECK-NEXT:     Type: Function
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: Undefined
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: _edata{{ }}
+# CHECK-NEXT:     Value: 0x201030
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Global
+# CHECK-NEXT:     Type: None
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: .data
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: foo{{ }}
+# CHECK-NEXT:     Value: 0x0
+# CHECK-NEXT:     Size: 4
+# CHECK-NEXT:     Binding: Global
+# CHECK-NEXT:     Type: TLS
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: .tbss
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: _end{{ }}
+# CHECK-NEXT:     Value: 0x201038
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Global
+# CHECK-NEXT:     Type: None
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: .bss
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: __bss_start{{ }}
+# CHECK-NEXT:     Value: 0x201030
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Global
+# CHECK-NEXT:     Type: None
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: .bss
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: main{{ }}
+# CHECK-NEXT:     Value: 0x780
+# CHECK-NEXT:     Size: 59
+# CHECK-NEXT:     Binding: Global
+# CHECK-NEXT:     Type: Function
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: .text
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: _init{{ }}
+# CHECK-NEXT:     Value: 0x618
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Global
+# CHECK-NEXT:     Type: Function
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: .init
+# CHECK-NEXT:   }
+# CHECK-NEXT:   Symbol {
+# CHECK-NEXT:     Name: _fini{{ }}
+# CHECK-NEXT:     Value: 0x7BC
+# CHECK-NEXT:     Size: 0
+# CHECK-NEXT:     Binding: Global
+# CHECK-NEXT:     Type: Function
+# CHECK-NEXT:     Other: 0
+# CHECK-NEXT:     Section: .fini
+# CHECK-NEXT:   }
+# CHECK-NEXT: ]
diff --git a/test/tools/llvm-readobj/elf-versioninfo.test b/test/tools/llvm-readobj/elf-versioninfo.test
index 919120e..82029cf 100644
--- a/test/tools/llvm-readobj/elf-versioninfo.test
+++ b/test/tools/llvm-readobj/elf-versioninfo.test
@@ -14,15 +14,15 @@
 CHECK-NEXT:   Symbols [
 CHECK-NEXT:     Symbol {
 CHECK-NEXT:       Version: 0
-CHECK-NEXT:       Name: @
+CHECK-NEXT:       Name: {{$}}
 CHECK-NEXT:     }
 CHECK-NEXT:     Symbol {
 CHECK-NEXT:       Version: 1
-CHECK-NEXT:       Name: _end@
+CHECK-NEXT:       Name: _end{{$}}
 CHECK-NEXT:     }
 CHECK-NEXT:     Symbol {
 CHECK-NEXT:       Version: 1
-CHECK-NEXT:       Name: _edata@
+CHECK-NEXT:       Name: _edata{{$}}
 CHECK-NEXT:     }
 CHECK-NEXT:     Symbol {
 CHECK-NEXT:       Version: 3
@@ -30,7 +30,7 @@
 CHECK-NEXT:     }
 CHECK-NEXT:     Symbol {
 CHECK-NEXT:       Version: 1
-CHECK-NEXT:       Name: __bss_start@
+CHECK-NEXT:       Name: __bss_start{{$}}
 CHECK-NEXT:     }
 CHECK-NEXT:     Symbol {
 CHECK-NEXT:       Version: 2
diff --git a/test/tools/llvm-readobj/gnu-symbols.test b/test/tools/llvm-readobj/gnu-symbols.test
index 0d8dcc8..7eb1123 100644
--- a/test/tools/llvm-readobj/gnu-symbols.test
+++ b/test/tools/llvm-readobj/gnu-symbols.test
@@ -50,25 +50,25 @@
 HASH:     Symbol table of .hash for image:
 HASH-NEXT:   Num Buc:    Value  Size   Type   Bind Vis      Ndx Name
 HASH-NEXT:     9   0: 00000000     0 FUNC    GLOBAL DEFAULT UND __gxx_personality_v0@CXXABI_1.3
-HASH-NEXT:    13   0: 00001b64     0 NOTYPE  GLOBAL DEFAULT ABS _edata@
+HASH-NEXT:    13   0: 00001b64     0 NOTYPE  GLOBAL DEFAULT ABS _edata{{$}}
 HASH-NEXT:     7   0: 00000000     0 FUNC    GLOBAL DEFAULT UND _ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1EPKcSt13_Ios_Openmode@GLIBCXX_3.4
-HASH-NEXT:     2   0: 00000000     0 NOTYPE  WEAK   DEFAULT UND _Jv_RegisterClasses@
-HASH-NEXT:     1   0: 00000000     0 NOTYPE  WEAK   DEFAULT UND __gmon_start__@
-HASH-NEXT:    16   1: 00000850    81 FUNC    GLOBAL DEFAULT  14 main@
+HASH-NEXT:     2   0: 00000000     0 NOTYPE  WEAK   DEFAULT UND _Jv_RegisterClasses{{$}}
+HASH-NEXT:     1   0: 00000000     0 NOTYPE  WEAK   DEFAULT UND __gmon_start__{{$}}
+HASH-NEXT:    16   1: 00000850    81 FUNC    GLOBAL DEFAULT  14 main{{$}}
 HASH-NEXT:    10   1: 00000000     0 FUNC    GLOBAL DEFAULT UND _Unwind_Resume@GCC_3.0
 HASH-NEXT:     8   1: 00000000     0 FUNC    GLOBAL DEFAULT UND puts@GLIBC_2.0
-HASH-NEXT:    12   1: 00001b68     0 NOTYPE  GLOBAL DEFAULT ABS _end@
+HASH-NEXT:    12   1: 00001b68     0 NOTYPE  GLOBAL DEFAULT ABS _end{{$}}
 HASH-NEXT:     6   1: 00000000     0 FUNC    GLOBAL DEFAULT UND _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@GLIBCXX_3.4
-HASH-NEXT:     5   1: 00000000     0 NOTYPE  WEAK   DEFAULT UND _ITM_registerTMCloneTable@
-HASH-NEXT:     4   1: 00000000     0 NOTYPE  WEAK   DEFAULT UND _ITM_deregisterTMCloneTable@
+HASH-NEXT:     5   1: 00000000     0 NOTYPE  WEAK   DEFAULT UND _ITM_registerTMCloneTable{{$}}
+HASH-NEXT:     4   1: 00000000     0 NOTYPE  WEAK   DEFAULT UND _ITM_deregisterTMCloneTable{{$}}
 HASH-NEXT:     3   1: 00000000     0 FUNC    GLOBAL DEFAULT UND __libc_start_main@GLIBC_2.0
 HASH-NEXT:    11   2: 00000000     0 FUNC    WEAK   DEFAULT UND __cxa_finalize@GLIBC_2.1.3
-HASH-NEXT:    15   2: 00001b64     0 NOTYPE  GLOBAL DEFAULT ABS __bss_start@
-HASH-NEXT:    14   2: 0000093c     4 OBJECT  GLOBAL DEFAULT  16 _IO_stdin_used@
+HASH-NEXT:    15   2: 00001b64     0 NOTYPE  GLOBAL DEFAULT ABS __bss_start{{$}}
+HASH-NEXT:    14   2: 0000093c     4 OBJECT  GLOBAL DEFAULT  16 _IO_stdin_used{{$}}
 HASH:     Symbol table of .gnu.hash for image:
 HASH-NEXT:   Num Buc:    Value  Size   Type   Bind Vis      Ndx Name
-HASH-NEXT:    12   0: 00001b68     0 NOTYPE  GLOBAL DEFAULT ABS _end@
-HASH-NEXT:    13   0: 00001b64     0 NOTYPE  GLOBAL DEFAULT ABS _edata@
-HASH-NEXT:    14   1: 0000093c     4 OBJECT  GLOBAL DEFAULT  16 _IO_stdin_used@
-HASH-NEXT:    15   1: 00001b64     0 NOTYPE  GLOBAL DEFAULT ABS __bss_start@
-HASH-NEXT:    16   1: 00000850    81 FUNC    GLOBAL DEFAULT  14 main@
+HASH-NEXT:    12   0: 00001b68     0 NOTYPE  GLOBAL DEFAULT ABS _end{{$}}
+HASH-NEXT:    13   0: 00001b64     0 NOTYPE  GLOBAL DEFAULT ABS _edata{{$}}
+HASH-NEXT:    14   1: 0000093c     4 OBJECT  GLOBAL DEFAULT  16 _IO_stdin_used{{$}}
+HASH-NEXT:    15   1: 00001b64     0 NOTYPE  GLOBAL DEFAULT ABS __bss_start{{$}}
+HASH-NEXT:    16   1: 00000850    81 FUNC    GLOBAL DEFAULT  14 main{{$}}
diff --git a/test/tools/llvm-readobj/merged.test b/test/tools/llvm-readobj/merged.test
new file mode 100644
index 0000000..0fcc14f
--- /dev/null
+++ b/test/tools/llvm-readobj/merged.test
@@ -0,0 +1,13 @@
+# Check merged args produce identical output to when not merged.
+RUN: llvm-readelf -aeWhSrnudlVgIs %p/Inputs/trivial.obj.elf-i386 > %t.merged
+RUN: llvm-readelf -a -e -W -h -S -r -n -u -d -l -V -g -I -s %p/Inputs/trivial.obj.elf-i386 > %t.not-merged
+RUN: cmp %t.merged %t.not-merged
+RUN: FileCheck %s --input-file %t.merged
+
+# llvm-readobj does not support merged args, because it also supports some old
+# flags (-st, -sd, etc.), and it would be confusing if only some merged args
+# were supported.
+RUN: not llvm-readobj -aeWhSrnudlVgIs %p/Inputs/trivial.obj.elf-i386 2>&1 | FileCheck %s --check-prefix=UNKNOWN
+
+CHECK-NOT: Unknown command line argument
+UNKNOWN:   Unknown command line argument
diff --git a/test/tools/llvm-readobj/mips-got.test b/test/tools/llvm-readobj/mips-got.test
index a5c15fd..7d8d266 100644
--- a/test/tools/llvm-readobj/mips-got.test
+++ b/test/tools/llvm-readobj/mips-got.test
@@ -73,7 +73,7 @@
 GOT-EXE-NEXT:       Value: 0x0
 GOT-EXE-NEXT:       Type: Function (0x2)
 GOT-EXE-NEXT:       Section: Undefined (0x0)
-GOT-EXE-NEXT:       Name: __gmon_start__@ (1)
+GOT-EXE-NEXT:       Name: __gmon_start__ (1)
 GOT-EXE-NEXT:     }
 GOT-EXE-NEXT:   ]
 GOT-EXE-NEXT:   Number of TLS and multi-GOT entries: 0
@@ -145,7 +145,7 @@
 GOT-SO-NEXT:       Value: 0x0
 GOT-SO-NEXT:       Type: None (0x0)
 GOT-SO-NEXT:       Section: Undefined (0x0)
-GOT-SO-NEXT:       Name: _ITM_registerTMCloneTable@ (87)
+GOT-SO-NEXT:       Name: _ITM_registerTMCloneTable (87)
 GOT-SO-NEXT:     }
 GOT-SO-NEXT:     Entry {
 GOT-SO-NEXT:       Address: 0x1090C
@@ -154,7 +154,7 @@
 GOT-SO-NEXT:       Value: 0x0
 GOT-SO-NEXT:       Type: None (0x0)
 GOT-SO-NEXT:       Section: Undefined (0x0)
-GOT-SO-NEXT:       Name: _Jv_RegisterClasses@ (128)
+GOT-SO-NEXT:       Name: _Jv_RegisterClasses (128)
 GOT-SO-NEXT:     }
 GOT-SO-NEXT:     Entry {
 GOT-SO-NEXT:       Address: 0x10910
@@ -163,7 +163,7 @@
 GOT-SO-NEXT:       Value: 0x0
 GOT-SO-NEXT:       Type: Function (0x2)
 GOT-SO-NEXT:       Section: Undefined (0x0)
-GOT-SO-NEXT:       Name: __gmon_start__@ (23)
+GOT-SO-NEXT:       Name: __gmon_start__ (23)
 GOT-SO-NEXT:     }
 GOT-SO-NEXT:     Entry {
 GOT-SO-NEXT:       Address: 0x10914
@@ -181,7 +181,7 @@
 GOT-SO-NEXT:       Value: 0x0
 GOT-SO-NEXT:       Type: None (0x0)
 GOT-SO-NEXT:       Section: Undefined (0x0)
-GOT-SO-NEXT:       Name: _ITM_deregisterTMCloneTable@ (59)
+GOT-SO-NEXT:       Name: _ITM_deregisterTMCloneTable (59)
 GOT-SO-NEXT:     }
 GOT-SO-NEXT:     Entry {
 GOT-SO-NEXT:       Address: 0x1091C
@@ -277,7 +277,7 @@
 GOT-TLS-NEXT:       Value: 0x0
 GOT-TLS-NEXT:       Type: None (0x0)
 GOT-TLS-NEXT:       Section: Undefined (0x0)
-GOT-TLS-NEXT:       Name: _ITM_registerTMCloneTable@ (78)
+GOT-TLS-NEXT:       Name: _ITM_registerTMCloneTable (78)
 GOT-TLS-NEXT:     }
 GOT-TLS-NEXT:     Entry {
 GOT-TLS-NEXT:       Address: 0x10C70
@@ -286,7 +286,7 @@
 GOT-TLS-NEXT:       Value: 0x0
 GOT-TLS-NEXT:       Type: None (0x0)
 GOT-TLS-NEXT:       Section: Undefined (0x0)
-GOT-TLS-NEXT:       Name: _Jv_RegisterClasses@ (119)
+GOT-TLS-NEXT:       Name: _Jv_RegisterClasses (119)
 GOT-TLS-NEXT:     }
 GOT-TLS-NEXT:     Entry {
 GOT-TLS-NEXT:       Address: 0x10C78
@@ -295,7 +295,7 @@
 GOT-TLS-NEXT:       Value: 0x0
 GOT-TLS-NEXT:       Type: Function (0x2)
 GOT-TLS-NEXT:       Section: Undefined (0x0)
-GOT-TLS-NEXT:       Name: __gmon_start__@ (23)
+GOT-TLS-NEXT:       Name: __gmon_start__ (23)
 GOT-TLS-NEXT:     }
 GOT-TLS-NEXT:     Entry {
 GOT-TLS-NEXT:       Address: 0x10C80
@@ -313,7 +313,7 @@
 GOT-TLS-NEXT:       Value: 0x0
 GOT-TLS-NEXT:       Type: None (0x0)
 GOT-TLS-NEXT:       Section: Undefined (0x0)
-GOT-TLS-NEXT:       Name: _ITM_deregisterTMCloneTable@ (50)
+GOT-TLS-NEXT:       Name: _ITM_deregisterTMCloneTable (50)
 GOT-TLS-NEXT:     }
 GOT-TLS-NEXT:     Entry {
 GOT-TLS-NEXT:       Address: 0x10C90
diff --git a/test/tools/llvm-readobj/relocations.test b/test/tools/llvm-readobj/relocations.test
index 4a7dfa5..7f1d3fa 100644
--- a/test/tools/llvm-readobj/relocations.test
+++ b/test/tools/llvm-readobj/relocations.test
@@ -21,9 +21,9 @@
 
 COFF:      Relocations [
 COFF-NEXT:   Section (1) .text {
-COFF-NEXT:     0x4 IMAGE_REL_I386_DIR32 .data
-COFF-NEXT:     0x9 IMAGE_REL_I386_REL32 _puts
-COFF-NEXT:     0xE IMAGE_REL_I386_REL32 _SomeOtherFunction
+COFF-NEXT:     0x4 IMAGE_REL_I386_DIR32 .data (4)
+COFF-NEXT:     0x9 IMAGE_REL_I386_REL32 _puts (7)
+COFF-NEXT:     0xE IMAGE_REL_I386_REL32 _SomeOtherFunction (8)
 COFF-NEXT:   }
 COFF-NEXT: ]
 
diff --git a/test/tools/llvm-readobj/sections-ext.test b/test/tools/llvm-readobj/sections-ext.test
index 70ae0f2..0856938 100644
--- a/test/tools/llvm-readobj/sections-ext.test
+++ b/test/tools/llvm-readobj/sections-ext.test
@@ -13,6 +13,20 @@
 RUN: llvm-readobj -expand-relocs -s -st -sr -sd %p/Inputs/trivial.obj.macho-arm \
 RUN:   | FileCheck %s -check-prefix MACHO-ARM
 
+# Check the two-letter aliases above (-st, -sr, -sd) are equivalent to their
+# full flag names.
+RUN: llvm-readobj -s -st %p/Inputs/trivial.obj.elf-i386 > %t.readobj-st-alias
+RUN: llvm-readobj -s --section-symbols %p/Inputs/trivial.obj.elf-i386 > %t.readobj-st-no-alias
+RUN: diff %t.readobj-st-alias %t.readobj-st-no-alias
+
+RUN: llvm-readobj -s -sr %p/Inputs/trivial.obj.elf-i386 > %t.readobj-sr-alias
+RUN: llvm-readobj -s --section-relocations %p/Inputs/trivial.obj.elf-i386 > %t.readobj-sr-no-alias
+RUN: diff %t.readobj-sr-alias %t.readobj-sr-no-alias
+
+RUN: llvm-readobj -s -sd %p/Inputs/trivial.obj.elf-i386 > %t.readobj-sd-alias
+RUN: llvm-readobj -s --section-data %p/Inputs/trivial.obj.elf-i386 > %t.readobj-sd-no-alias
+RUN: diff %t.readobj-sd-alias %t.readobj-sd-no-alias
+
 COFF:      Sections [
 COFF-NEXT:   Section {
 COFF-NEXT:     Number: 1
diff --git a/test/tools/llvm-symbolizer/basic.s b/test/tools/llvm-symbolizer/basic.s
new file mode 100644
index 0000000..c966f98
--- /dev/null
+++ b/test/tools/llvm-symbolizer/basic.s
@@ -0,0 +1,23 @@
+# REQUIRES: x86-registered-target
+
+foo:
+    .space 10
+    nop
+    nop
+
+# RUN: llvm-mc -filetype=obj -triple=x86_64-pc-linux %s -o %t.o -g
+
+# Check input addresses specified on stdin.
+# RUN: echo -e "0xa\n0xb" | llvm-symbolizer --obj=%t.o | FileCheck %s
+# RUN: echo -e "10\n11" | llvm-symbolizer --obj=%t.o | FileCheck %s
+
+# Check input addresses specified on the command-line.
+# RUN: llvm-symbolizer 0xa 0xb --obj=%t.o | FileCheck %s
+# RUN: llvm-symbolizer 10 11 --obj=%t.o | FileCheck %s
+
+# Check --obj aliases --exe, -e
+# RUN: llvm-symbolizer 0xa 0xb --exe=%t.o | FileCheck %s
+# RUN: llvm-symbolizer 0xa 0xb -e=%t.o | FileCheck %s
+
+# CHECK: basic.s:5:0
+# CHECK: basic.s:6:0
diff --git a/test/tools/llvm-symbolizer/sym.test b/test/tools/llvm-symbolizer/sym.test
index 871c16a..623eb4c 100644
--- a/test/tools/llvm-symbolizer/sym.test
+++ b/test/tools/llvm-symbolizer/sym.test
@@ -18,9 +18,12 @@
 #Build as : clang -g -O2 addr.c
 
 RUN: llvm-symbolizer -print-address -obj=%p/Inputs/addr.exe < %p/Inputs/addr.inp | FileCheck %s
-RUN: llvm-symbolizer -inlining -print-address -pretty-print -obj=%p/Inputs/addr.exe < %p/Inputs/addr.inp | FileCheck --check-prefix="PRETTY" %s 
+RUN: llvm-symbolizer -addresses -obj=%p/Inputs/addr.exe < %p/Inputs/addr.inp | FileCheck %s
+RUN: llvm-symbolizer -a -obj=%p/Inputs/addr.exe < %p/Inputs/addr.inp | FileCheck %s
+RUN: llvm-symbolizer -inlining -print-address -pretty-print -obj=%p/Inputs/addr.exe < %p/Inputs/addr.inp | FileCheck -check-prefix="PRETTY" %s 
+RUN: llvm-symbolizer -inlining -print-address -p -obj=%p/Inputs/addr.exe < %p/Inputs/addr.inp | FileCheck -check-prefix="PRETTY" %s
 RUN: echo "0x1" > %t.input
-RUN: llvm-symbolizer -obj=%p/Inputs/zero < %t.input | FileCheck --check-prefix="ZERO" %s
+RUN: llvm-symbolizer -obj=%p/Inputs/zero < %t.input | FileCheck -check-prefix="ZERO" %s
 
 #CHECK: some text
 #CHECK: 0x40054d
diff --git a/test/tools/yaml2obj/coff-symbol-index.yaml b/test/tools/yaml2obj/coff-symbol-index.yaml
new file mode 100644
index 0000000..592ecb3
--- /dev/null
+++ b/test/tools/yaml2obj/coff-symbol-index.yaml
@@ -0,0 +1,74 @@
+# RUN: yaml2obj %s -o %t
+# RUN: llvm-readobj -relocations %t | FileCheck %s --check-prefix=RELOCS
+# RUN: obj2yaml %t | FileCheck %s --check-prefix=YAML
+
+# RELOCS:      Relocations [
+# RELOCS-NEXT:   Section (1) .text {
+# RELOCS-NEXT:     0x3 IMAGE_REL_AMD64_REL32 .rdata (0)
+# RELOCS-NEXT:     0xA IMAGE_REL_AMD64_REL32 .rdata (1)
+# RELOCS-NEXT:     0x11 IMAGE_REL_AMD64_REL32 foo (2)
+# RELOCS-NEXT:   }
+# RELOCS-NEXT: ]
+
+# Check that we usually output relocations with SymbolName.
+# For relocations with a non-unique symbol name, output
+# SymbolTableIndex instead.
+
+# YAML:          Relocations:     
+# YAML-NEXT:       - VirtualAddress:  3
+# YAML-NEXT:         SymbolTableIndex: 0
+# YAML-NEXT:         Type:            IMAGE_REL_AMD64_REL32
+# YAML-NEXT:       - VirtualAddress:  10
+# YAML-NEXT:         SymbolTableIndex: 1
+# YAML-NEXT:         Type:            IMAGE_REL_AMD64_REL32
+# YAML-NEXT:       - VirtualAddress:  17
+# YAML-NEXT:         SymbolName:      foo
+# YAML-NEXT:         Type:            IMAGE_REL_AMD64_REL32
+
+--- !COFF
+header:          
+  Machine:         IMAGE_FILE_MACHINE_AMD64
+  Characteristics: [  ]
+sections:        
+  - Name:            .text
+    Characteristics: [  ]
+    Alignment:       4
+    SectionData:     488B0500000000488B0500000000488B0500000000
+    Relocations:     
+      - VirtualAddress:  3
+        SymbolTableIndex: 0
+        Type:            IMAGE_REL_AMD64_REL32
+      - VirtualAddress:  10
+        SymbolTableIndex: 1
+        Type:            IMAGE_REL_AMD64_REL32
+      - VirtualAddress:  17
+        SymbolName:      foo
+        Type:            IMAGE_REL_AMD64_REL32
+  - Name:            .rdata
+    Characteristics: [  ]
+    Alignment:       1
+    SectionData:     '00'
+  - Name:            .rdata
+    Characteristics: [  ]
+    Alignment:       1
+    SectionData:     '01'
+symbols:         
+  - Name:            .rdata
+    Value:           0
+    SectionNumber:   2
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+  - Name:            .rdata
+    Value:           0
+    SectionNumber:   3
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_STATIC
+  - Name:            foo
+    Value:           0
+    SectionNumber:   3
+    SimpleType:      IMAGE_SYM_TYPE_NULL
+    ComplexType:     IMAGE_SYM_DTYPE_NULL
+    StorageClass:    IMAGE_SYM_CLASS_EXTERNAL
+...
diff --git a/test/tools/yaml2obj/elf-abiversion.yaml b/test/tools/yaml2obj/elf-abiversion.yaml
new file mode 100644
index 0000000..0f43e69
--- /dev/null
+++ b/test/tools/yaml2obj/elf-abiversion.yaml
@@ -0,0 +1,16 @@
+# RUN: yaml2obj %s -o %t
+# RUN: llvm-readobj -file-headers %t | FileCheck %s --check-prefix=FROMYAML
+# RUN: obj2yaml %t | FileCheck %s --check-prefix=TOYAML
+
+## Check we are able to parse/emit the ABI version with yaml2obj/obj2yaml.
+
+# FROMYAML: ABIVersion: 5
+# TOYAML:   ABIVersion: 0x05
+
+--- !ELF
+FileHeader:
+  Class:           ELFCLASS64
+  Data:            ELFDATA2LSB
+  Type:            ET_REL
+  Machine:         EM_X86_64
+  ABIVersion:      0x05
diff --git a/tools/LLVMBuild.txt b/tools/LLVMBuild.txt
index 1732ea0..61e053c 100644
--- a/tools/LLVMBuild.txt
+++ b/tools/LLVMBuild.txt
@@ -32,6 +32,7 @@
  llvm-dis
  llvm-dwarfdump
  llvm-dwp
+ llvm-elfabi
  llvm-exegesis
  llvm-extract
  llvm-jitlistener
diff --git a/tools/bugpoint-passes/CMakeLists.txt b/tools/bugpoint-passes/CMakeLists.txt
index e32b0a3..eea3e23 100644
--- a/tools/bugpoint-passes/CMakeLists.txt
+++ b/tools/bugpoint-passes/CMakeLists.txt
@@ -14,7 +14,7 @@
   set(LLVM_LINK_COMPONENTS Core)
 endif()
 
-add_llvm_loadable_module( BugpointPasses
+add_llvm_library( BugpointPasses MODULE BUILDTREE_ONLY
   TestPasses.cpp
 
   DEPENDS
diff --git a/tools/bugpoint-passes/TestPasses.cpp b/tools/bugpoint-passes/TestPasses.cpp
index 22ded62..6b14636 100644
--- a/tools/bugpoint-passes/TestPasses.cpp
+++ b/tools/bugpoint-passes/TestPasses.cpp
@@ -123,3 +123,28 @@
 static RegisterPass<CrashOnTooManyCUs>
     A("bugpoint-crash-too-many-cus",
       "BugPoint Test Pass - Intentionally crash on too many CUs");
+
+namespace {
+class CrashOnFunctionAttribute : public FunctionPass {
+public:
+  static char ID; // Pass ID, replacement for typeid
+  CrashOnFunctionAttribute() : FunctionPass(ID) {}
+
+private:
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+  }
+
+  bool runOnFunction(Function &F) override {
+    AttributeSet A = F.getAttributes().getFnAttributes();
+    if (A.hasAttribute("bugpoint-crash"))
+      abort();
+    return false;
+  }
+};
+} // namespace
+
+char CrashOnFunctionAttribute::ID = 0;
+static RegisterPass<CrashOnFunctionAttribute>
+    B("bugpoint-crashfuncattr", "BugPoint Test Pass - Intentionally crash on "
+                                "function attribute 'bugpoint-crash'");
diff --git a/tools/bugpoint/CrashDebugger.cpp b/tools/bugpoint/CrashDebugger.cpp
index a50ff4c..ef6a214 100644
--- a/tools/bugpoint/CrashDebugger.cpp
+++ b/tools/bugpoint/CrashDebugger.cpp
@@ -315,6 +315,66 @@
 }
 
 namespace {
+/// ReduceCrashingFunctionAttributes reducer - This works by removing
+/// attributes on a particular function and seeing if the program still crashes.
+/// If it does, then keep the newer, smaller program.
+///
+class ReduceCrashingFunctionAttributes : public ListReducer<Attribute> {
+  BugDriver &BD;
+  std::string FnName;
+  BugTester TestFn;
+
+public:
+  ReduceCrashingFunctionAttributes(BugDriver &bd, const std::string &FnName,
+                                   BugTester testFn)
+      : BD(bd), FnName(FnName), TestFn(testFn) {}
+
+  Expected<TestResult> doTest(std::vector<Attribute> &Prefix,
+                              std::vector<Attribute> &Kept) override {
+    if (!Kept.empty() && TestFuncAttrs(Kept))
+      return KeepSuffix;
+    if (!Prefix.empty() && TestFuncAttrs(Prefix))
+      return KeepPrefix;
+    return NoFailure;
+  }
+
+  bool TestFuncAttrs(std::vector<Attribute> &Attrs);
+};
+}
+
+bool ReduceCrashingFunctionAttributes::TestFuncAttrs(
+    std::vector<Attribute> &Attrs) {
+  // Clone the program to try hacking it apart...
+  std::unique_ptr<Module> M = CloneModule(BD.getProgram());
+  Function *F = M->getFunction(FnName);
+
+  // Build up an AttributeList from the attributes we've been given by the
+  // reducer.
+  AttrBuilder AB;
+  for (auto A : Attrs)
+    AB.addAttribute(A);
+  AttributeList NewAttrs;
+  NewAttrs =
+      NewAttrs.addAttributes(BD.getContext(), AttributeList::FunctionIndex, AB);
+
+  // Set this new list of attributes on the function.
+  F->setAttributes(NewAttrs);
+
+  // Try running on the hacked up program...
+  if (TestFn(BD, M.get())) {
+    BD.setNewProgram(std::move(M)); // It crashed, keep the trimmed version...
+
+    // Pass along the set of attributes that caused the crash.
+    Attrs.clear();
+    for (Attribute A : NewAttrs.getFnAttributes()) {
+      Attrs.push_back(A);
+    }
+    return true;
+  }
+  return false;
+}
+
+namespace {
 /// Simplify the CFG without completely destroying it.
 /// This is not well defined, but basically comes down to "try to eliminate
 /// unreachable blocks and constant fold terminators without deciding that
@@ -1056,6 +1116,38 @@
       BD.EmitProgressBitcode(BD.getProgram(), "reduced-function");
   }
 
+  // For each remaining function, try to reduce that function's attributes.
+  std::vector<std::string> FunctionNames;
+  for (Function &F : BD.getProgram())
+    FunctionNames.push_back(F.getName());
+
+  if (!FunctionNames.empty() && !BugpointIsInterrupted) {
+    outs() << "\n*** Attempting to reduce the number of function attributes in "
+              "the testcase\n";
+
+    unsigned OldSize = 0;
+    unsigned NewSize = 0;
+    for (std::string &Name : FunctionNames) {
+      Function *Fn = BD.getProgram().getFunction(Name);
+      assert(Fn && "Could not find funcion?");
+
+      std::vector<Attribute> Attrs;
+      for (Attribute A : Fn->getAttributes().getFnAttributes())
+        Attrs.push_back(A);
+
+      OldSize += Attrs.size();
+      Expected<bool> Result =
+          ReduceCrashingFunctionAttributes(BD, Name, TestFn).reduceList(Attrs);
+      if (Error E = Result.takeError())
+        return E;
+
+      NewSize += Attrs.size();
+    }
+
+    if (OldSize < NewSize)
+      BD.EmitProgressBitcode(BD.getProgram(), "reduced-function-attributes");
+  }
+
   // Attempt to change conditional branches into unconditional branches to
   // eliminate blocks.
   if (!DisableSimplifyCFG && !BugpointIsInterrupted) {
diff --git a/tools/dsymutil/CMakeLists.txt b/tools/dsymutil/CMakeLists.txt
index f41a6fd..480f78f 100644
--- a/tools/dsymutil/CMakeLists.txt
+++ b/tools/dsymutil/CMakeLists.txt
@@ -20,6 +20,7 @@
   MachODebugMapParser.cpp
   MachOUtils.cpp
   NonRelocatableStringpool.cpp
+  SymbolMap.cpp
 
   DEPENDS
   intrinsics_gen
diff --git a/tools/dsymutil/CompileUnit.cpp b/tools/dsymutil/CompileUnit.cpp
index 67e1739..4654e41 100644
--- a/tools/dsymutil/CompileUnit.cpp
+++ b/tools/dsymutil/CompileUnit.cpp
@@ -92,7 +92,11 @@
 
 void CompileUnit::addFunctionRange(uint64_t FuncLowPc, uint64_t FuncHighPc,
                                    int64_t PcOffset) {
-  Ranges.insert(FuncLowPc, FuncHighPc, PcOffset);
+  //  Don't add empty ranges to the interval map.  They are a problem because
+  //  the interval map expects half open intervals. This is safe because they
+  //  are empty anyway.
+  if (FuncHighPc != FuncLowPc)
+    Ranges.insert(FuncLowPc, FuncHighPc, PcOffset);
   this->LowPc = std::min(LowPc, FuncLowPc + PcOffset);
   this->HighPc = std::max(HighPc, FuncHighPc + PcOffset);
 }
diff --git a/tools/dsymutil/DebugMap.h b/tools/dsymutil/DebugMap.h
index c988377..d8de37e 100644
--- a/tools/dsymutil/DebugMap.h
+++ b/tools/dsymutil/DebugMap.h
@@ -75,7 +75,7 @@
 class DebugMap {
   Triple BinaryTriple;
   std::string BinaryPath;
-
+  std::vector<uint8_t> BinaryUUID;
   using ObjectContainer = std::vector<std::unique_ptr<DebugMapObject>>;
 
   ObjectContainer Objects;
@@ -89,8 +89,10 @@
   ///@}
 
 public:
-  DebugMap(const Triple &BinaryTriple, StringRef BinaryPath)
-      : BinaryTriple(BinaryTriple), BinaryPath(BinaryPath) {}
+  DebugMap(const Triple &BinaryTriple, StringRef BinaryPath,
+           ArrayRef<uint8_t> BinaryUUID = ArrayRef<uint8_t>())
+      : BinaryTriple(BinaryTriple), BinaryPath(BinaryPath),
+        BinaryUUID(BinaryUUID.begin(), BinaryUUID.end()) {}
 
   using const_iterator = ObjectContainer::const_iterator;
 
@@ -113,6 +115,10 @@
 
   const Triple &getTriple() const { return BinaryTriple; }
 
+  const ArrayRef<uint8_t> getUUID() const {
+    return ArrayRef<uint8_t>(BinaryUUID);
+  }
+
   StringRef getBinaryPath() const { return BinaryPath; }
 
   void print(raw_ostream &OS) const;
diff --git a/tools/dsymutil/DwarfLinker.cpp b/tools/dsymutil/DwarfLinker.cpp
index 2862739..0743cfc 100644
--- a/tools/dsymutil/DwarfLinker.cpp
+++ b/tools/dsymutil/DwarfLinker.cpp
@@ -1701,6 +1701,8 @@
   DWARFDataExtractor LineExtractor(
       OrigDwarf.getDWARFObj(), OrigDwarf.getDWARFObj().getLineSection(),
       OrigDwarf.isLittleEndian(), Unit.getOrigUnit().getAddressByteSize());
+  if (Options.Translator)
+    return Streamer->translateLineTable(LineExtractor, StmtOffset, Options);
 
   Error Err = LineTable.parse(LineExtractor, &StmtOffset, OrigDwarf,
                               &Unit.getOrigUnit(), DWARFContext::dumpWarning);
@@ -2245,17 +2247,16 @@
     if (Linker.Options.NoOutput)
       continue;
 
-    if (LLVM_LIKELY(!Linker.Options.Update)) {
-      // FIXME: for compatibility with the classic dsymutil, we emit an empty
-      // line table for the unit, even if the unit doesn't actually exist in
-      // the DIE tree.
+    // FIXME: for compatibility with the classic dsymutil, we emit
+    // an empty line table for the unit, even if the unit doesn't
+    // actually exist in the DIE tree.
+    if (LLVM_LIKELY(!Linker.Options.Update) || Linker.Options.Translator)
       Linker.patchLineTableForUnit(*CurrentUnit, DwarfContext, Ranges, DMO);
-      Linker.emitAcceleratorEntriesForUnit(*CurrentUnit);
-      Linker.patchRangesForUnit(*CurrentUnit, DwarfContext, DMO);
-      Linker.Streamer->emitLocationsForUnit(*CurrentUnit, DwarfContext);
-    } else {
-      Linker.emitAcceleratorEntriesForUnit(*CurrentUnit);
-    }
+    Linker.emitAcceleratorEntriesForUnit(*CurrentUnit);
+    if (Linker.Options.Update)
+      continue;
+    Linker.patchRangesForUnit(*CurrentUnit, DwarfContext, DMO);
+    Linker.Streamer->emitLocationsForUnit(*CurrentUnit, DwarfContext);
   }
 
   if (Linker.Options.NoOutput)
@@ -2380,7 +2381,7 @@
   // This Dwarf string pool which is used for emission. It must be used
   // serially as the order of calling getStringOffset matters for
   // reproducibility.
-  OffsetsStringPool OffsetsStringPool;
+  OffsetsStringPool OffsetsStringPool(Options.Translator);
 
   // ODR Contexts for the link.
   DeclContextTree ODRContexts;
@@ -2649,7 +2650,7 @@
     pool.wait();
   }
 
-  return Options.NoOutput ? true : Streamer->finish(Map);
+  return Options.NoOutput ? true : Streamer->finish(Map, Options.Translator);
 } // namespace dsymutil
 
 bool linkDwarf(raw_fd_ostream &OutFile, BinaryHolder &BinHolder,
diff --git a/tools/dsymutil/DwarfStreamer.cpp b/tools/dsymutil/DwarfStreamer.cpp
index ef798be..28088ff 100644
--- a/tools/dsymutil/DwarfStreamer.cpp
+++ b/tools/dsymutil/DwarfStreamer.cpp
@@ -124,11 +124,11 @@
   return true;
 }
 
-bool DwarfStreamer::finish(const DebugMap &DM) {
+bool DwarfStreamer::finish(const DebugMap &DM, SymbolMapTranslator &T) {
   bool Result = true;
   if (DM.getTriple().isOSDarwin() && !DM.getBinaryPath().empty() &&
       Options.FileType == OutputFileType::Object)
-    Result = MachOUtils::generateDsymCompanion(DM, *MS, OutFile);
+    Result = MachOUtils::generateDsymCompanion(DM, T, *MS, OutFile);
   else
     MS->Finish();
   return Result;
@@ -577,6 +577,89 @@
   MS->EmitLabel(LineEndSym);
 }
 
+/// Copy the debug_line over to the updated binary while unobfuscating the file
+/// names and directories.
+void DwarfStreamer::translateLineTable(DataExtractor Data, uint32_t Offset,
+                                       LinkOptions &Options) {
+  MS->SwitchSection(MC->getObjectFileInfo()->getDwarfLineSection());
+  StringRef Contents = Data.getData();
+
+  // We have to deconstruct the line table header, because it contains to
+  // length fields that will need to be updated when we change the length of
+  // the files and directories in there.
+  unsigned UnitLength = Data.getU32(&Offset);
+  unsigned UnitEnd = Offset + UnitLength;
+  MCSymbol *BeginLabel = MC->createTempSymbol();
+  MCSymbol *EndLabel = MC->createTempSymbol();
+  unsigned Version = Data.getU16(&Offset);
+
+  if (Version > 5) {
+    warn("Unsupported line table version: dropping contents and not "
+         "unobfsucating line table.");
+    return;
+  }
+
+  Asm->EmitLabelDifference(EndLabel, BeginLabel, 4);
+  Asm->OutStreamer->EmitLabel(BeginLabel);
+  Asm->emitInt16(Version);
+  LineSectionSize += 6;
+
+  MCSymbol *HeaderBeginLabel = MC->createTempSymbol();
+  MCSymbol *HeaderEndLabel = MC->createTempSymbol();
+  Asm->EmitLabelDifference(HeaderEndLabel, HeaderBeginLabel, 4);
+  Asm->OutStreamer->EmitLabel(HeaderBeginLabel);
+  Offset += 4;
+  LineSectionSize += 4;
+
+  uint32_t AfterHeaderLengthOffset = Offset;
+  // Skip to the directories.
+  Offset += (Version >= 4) ? 5 : 4;
+  unsigned OpcodeBase = Data.getU8(&Offset);
+  Offset += OpcodeBase - 1;
+  Asm->OutStreamer->EmitBytes(Contents.slice(AfterHeaderLengthOffset, Offset));
+  LineSectionSize += Offset - AfterHeaderLengthOffset;
+
+  // Offset points to the first directory.
+  while (const char *Dir = Data.getCStr(&Offset)) {
+    if (Dir[0] == 0)
+      break;
+
+    StringRef Translated = Options.Translator(Dir);
+    Asm->OutStreamer->EmitBytes(Translated);
+    Asm->emitInt8(0);
+    LineSectionSize += Translated.size() + 1;
+  }
+  Asm->emitInt8(0);
+  LineSectionSize += 1;
+
+  while (const char *File = Data.getCStr(&Offset)) {
+    if (File[0] == 0)
+      break;
+
+    StringRef Translated = Options.Translator(File);
+    Asm->OutStreamer->EmitBytes(Translated);
+    Asm->emitInt8(0);
+    LineSectionSize += Translated.size() + 1;
+
+    uint32_t OffsetBeforeLEBs = Offset;
+    Asm->EmitULEB128(Data.getULEB128(&Offset));
+    Asm->EmitULEB128(Data.getULEB128(&Offset));
+    Asm->EmitULEB128(Data.getULEB128(&Offset));
+    LineSectionSize += Offset - OffsetBeforeLEBs;
+  }
+  Asm->emitInt8(0);
+  LineSectionSize += 1;
+
+  Asm->OutStreamer->EmitLabel(HeaderEndLabel);
+
+  // Copy the actual line table program over.
+  Asm->OutStreamer->EmitBytes(Contents.slice(Offset, UnitEnd));
+  LineSectionSize += UnitEnd - Offset;
+
+  Asm->OutStreamer->EmitLabel(EndLabel);
+  Offset = UnitEnd;
+}
+
 static void emitSectionContents(const object::ObjectFile &Obj,
                                 StringRef SecName, MCStreamer *MS) {
   StringRef Contents;
@@ -586,8 +669,10 @@
 }
 
 void DwarfStreamer::copyInvariantDebugSection(const object::ObjectFile &Obj) {
-  MS->SwitchSection(MC->getObjectFileInfo()->getDwarfLineSection());
-  emitSectionContents(Obj, "debug_line", MS);
+  if (!Options.Translator) {
+    MS->SwitchSection(MC->getObjectFileInfo()->getDwarfLineSection());
+    emitSectionContents(Obj, "debug_line", MS);
+  }
 
   MS->SwitchSection(MC->getObjectFileInfo()->getDwarfLocSection());
   emitSectionContents(Obj, "debug_loc", MS);
diff --git a/tools/dsymutil/DwarfStreamer.h b/tools/dsymutil/DwarfStreamer.h
index 679d124..abc8654 100644
--- a/tools/dsymutil/DwarfStreamer.h
+++ b/tools/dsymutil/DwarfStreamer.h
@@ -50,7 +50,7 @@
   bool init(Triple TheTriple);
 
   /// Dump the file to the disk.
-  bool finish(const DebugMap &);
+  bool finish(const DebugMap &, SymbolMapTranslator &T);
 
   AsmPrinter &getAsmPrinter() const { return *Asm; }
 
@@ -104,6 +104,11 @@
                             std::vector<DWARFDebugLine::Row> &Rows,
                             unsigned AdddressSize);
 
+  /// Copy the debug_line over to the updated binary while unobfuscating the
+  /// file names and directories.
+  void translateLineTable(DataExtractor LineData, uint32_t Offset,
+                          LinkOptions &Options);
+
   /// Copy over the debug sections that are not modified when updating.
   void copyInvariantDebugSection(const object::ObjectFile &Obj);
 
diff --git a/tools/dsymutil/LinkUtils.h b/tools/dsymutil/LinkUtils.h
index f0abd88..0769741 100644
--- a/tools/dsymutil/LinkUtils.h
+++ b/tools/dsymutil/LinkUtils.h
@@ -10,8 +10,11 @@
 #ifndef LLVM_TOOLS_DSYMUTIL_LINKOPTIONS_H
 #define LLVM_TOOLS_DSYMUTIL_LINKOPTIONS_H
 
+#include "SymbolMap.h"
+
 #include "llvm/ADT/Twine.h"
 #include "llvm/Support/WithColor.h"
+
 #include <string>
 
 namespace llvm {
@@ -60,6 +63,9 @@
   /// -oso-prepend-path
   std::string PrependPath;
 
+  /// Symbol map translator.
+  SymbolMapTranslator Translator;
+
   LinkOptions() = default;
 };
 
diff --git a/tools/dsymutil/MachODebugMapParser.cpp b/tools/dsymutil/MachODebugMapParser.cpp
index d696e1d..8ff7e22 100644
--- a/tools/dsymutil/MachODebugMapParser.cpp
+++ b/tools/dsymutil/MachODebugMapParser.cpp
@@ -163,7 +163,8 @@
 MachODebugMapParser::parseOneBinary(const MachOObjectFile &MainBinary,
                                     StringRef BinaryPath) {
   loadMainBinarySymbols(MainBinary);
-  Result = make_unique<DebugMap>(MainBinary.getArchTriple(), BinaryPath);
+  ArrayRef<uint8_t> UUID = MainBinary.getUuid();
+  Result = make_unique<DebugMap>(MainBinary.getArchTriple(), BinaryPath, UUID);
   MainBinaryStrings = MainBinary.getStringTableData();
   for (const SymbolRef &Symbol : MainBinary.symbols()) {
     const DataRefImpl &DRI = Symbol.getRawDataRefImpl();
diff --git a/tools/dsymutil/MachOUtils.cpp b/tools/dsymutil/MachOUtils.cpp
index cac4ad8..8c54563 100644
--- a/tools/dsymutil/MachOUtils.cpp
+++ b/tools/dsymutil/MachOUtils.cpp
@@ -333,8 +333,8 @@
 // Stream a dSYM companion binary file corresponding to the binary referenced
 // by \a DM to \a OutFile. The passed \a MS MCStreamer is setup to write to
 // \a OutFile and it must be using a MachObjectWriter object to do so.
-bool generateDsymCompanion(const DebugMap &DM, MCStreamer &MS,
-                           raw_fd_ostream &OutFile) {
+bool generateDsymCompanion(const DebugMap &DM, SymbolMapTranslator &Translator,
+                           MCStreamer &MS, raw_fd_ostream &OutFile) {
   auto &ObjectStreamer = static_cast<MCObjectStreamer &>(MS);
   MCAssembler &MCAsm = ObjectStreamer.getAssembler();
   auto &Writer = static_cast<MachObjectWriter &>(MCAsm.getWriter());
@@ -443,7 +443,7 @@
   }
 
   SmallString<0> NewSymtab;
-  NonRelocatableStringpool NewStrings;
+  NonRelocatableStringpool NewStrings(Translator);
   unsigned NListSize = Is64Bit ? sizeof(MachO::nlist_64) : sizeof(MachO::nlist);
   unsigned NumSyms = 0;
   uint64_t NewStringsSize = 0;
diff --git a/tools/dsymutil/MachOUtils.h b/tools/dsymutil/MachOUtils.h
index a8be89e..c24f963 100644
--- a/tools/dsymutil/MachOUtils.h
+++ b/tools/dsymutil/MachOUtils.h
@@ -9,8 +9,11 @@
 #ifndef LLVM_TOOLS_DSYMUTIL_MACHOUTILS_H
 #define LLVM_TOOLS_DSYMUTIL_MACHOUTILS_H
 
+#include "SymbolMap.h"
+
 #include "llvm/ADT/StringRef.h"
 #include "llvm/Support/FileSystem.h"
+
 #include <string>
 
 namespace llvm {
@@ -38,8 +41,8 @@
                              StringRef OutputFileName, const LinkOptions &,
                              StringRef SDKPath);
 
-bool generateDsymCompanion(const DebugMap &DM, MCStreamer &MS,
-                           raw_fd_ostream &OutFile);
+bool generateDsymCompanion(const DebugMap &DM, SymbolMapTranslator &Translator,
+                           MCStreamer &MS, raw_fd_ostream &OutFile);
 
 std::string getArchName(StringRef Arch);
 } // namespace MachOUtils
diff --git a/tools/dsymutil/NonRelocatableStringpool.cpp b/tools/dsymutil/NonRelocatableStringpool.cpp
index d82ff84..b8392a1 100644
--- a/tools/dsymutil/NonRelocatableStringpool.cpp
+++ b/tools/dsymutil/NonRelocatableStringpool.cpp
@@ -16,6 +16,8 @@
   if (S.empty() && !Strings.empty())
     return EmptyString;
 
+  if (Translator)
+    S = Translator(S);
   auto I = Strings.insert({S, DwarfStringPoolEntry()});
   auto &Entry = I.first->second;
   if (I.second || !Entry.isIndexed()) {
@@ -29,6 +31,10 @@
 
 StringRef NonRelocatableStringpool::internString(StringRef S) {
   DwarfStringPoolEntry Entry{nullptr, 0, DwarfStringPoolEntry::NotIndexed};
+
+  if (Translator)
+    S = Translator(S);
+
   auto InsertResult = Strings.insert({S, Entry});
   return InsertResult.first->getKey();
 }
diff --git a/tools/dsymutil/NonRelocatableStringpool.h b/tools/dsymutil/NonRelocatableStringpool.h
index e339e51..c398ff0 100644
--- a/tools/dsymutil/NonRelocatableStringpool.h
+++ b/tools/dsymutil/NonRelocatableStringpool.h
@@ -10,6 +10,8 @@
 #ifndef LLVM_TOOLS_DSYMUTIL_NONRELOCATABLESTRINGPOOL_H
 #define LLVM_TOOLS_DSYMUTIL_NONRELOCATABLESTRINGPOOL_H
 
+#include "SymbolMap.h"
+
 #include "llvm/ADT/StringMap.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/CodeGen/DwarfStringPoolEntry.h"
@@ -32,7 +34,9 @@
   /// order.
   using MapTy = StringMap<DwarfStringPoolEntry, BumpPtrAllocator>;
 
-  NonRelocatableStringpool() {
+  NonRelocatableStringpool(
+      SymbolMapTranslator Translator = SymbolMapTranslator())
+      : Translator(Translator) {
     // Legacy dsymutil puts an empty string at the start of the line table.
     EmptyString = getEntry("");
   }
@@ -62,6 +66,7 @@
   uint32_t CurrentEndOffset = 0;
   unsigned NumEntries = 0;
   DwarfStringPoolEntryRef EmptyString;
+  SymbolMapTranslator Translator;
 };
 
 /// Helper for making strong types.
diff --git a/tools/dsymutil/SymbolMap.cpp b/tools/dsymutil/SymbolMap.cpp
new file mode 100644
index 0000000..cab9374
--- /dev/null
+++ b/tools/dsymutil/SymbolMap.cpp
@@ -0,0 +1,162 @@
+//===- tools/dsymutil/SymbolMap.cpp ---------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SymbolMap.h"
+#include "DebugMap.h"
+#include "MachOUtils.h"
+
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/WithColor.h"
+
+#ifdef __APPLE__
+#include <CoreFoundation/CoreFoundation.h>
+#include <uuid/uuid.h>
+#endif
+
+namespace llvm {
+namespace dsymutil {
+
+StringRef SymbolMapTranslator::operator()(StringRef Input) {
+  if (!Input.startswith("__hidden#") && !Input.startswith("___hidden#"))
+    return Input;
+
+  bool MightNeedUnderscore = false;
+  StringRef Line = Input.drop_front(sizeof("__hidden#") - 1);
+  if (Line[0] == '#') {
+    Line = Line.drop_front();
+    MightNeedUnderscore = true;
+  }
+
+  std::size_t LineNumber = std::numeric_limits<std::size_t>::max();
+  Line.split('_').first.getAsInteger(10, LineNumber);
+  if (LineNumber >= UnobfuscatedStrings.size()) {
+    WithColor::warning() << "reference to a unexisting unobfuscated string "
+                         << Input << ": symbol map mismatch?\n"
+                         << Line << '\n';
+    return Input;
+  }
+
+  const std::string &Translation = UnobfuscatedStrings[LineNumber];
+  if (!MightNeedUnderscore || !MangleNames)
+    return Translation;
+
+  // Objective-C symbols for the MachO symbol table start with a \1. Please see
+  // `CGObjCCommonMac::GetNameForMethod` in clang.
+  if (Translation[0] == 1)
+    return StringRef(Translation).drop_front();
+
+  // We need permanent storage for the string we are about to create. Just
+  // append it to the vector containing translations. This should only happen
+  // during MachO symbol table translation, thus there should be no risk on
+  // exponential growth.
+  UnobfuscatedStrings.emplace_back("_" + Translation);
+  return UnobfuscatedStrings.back();
+}
+
+SymbolMapTranslator SymbolMapLoader::Load(StringRef InputFile,
+                                          const DebugMap &Map) const {
+  if (SymbolMap.empty())
+    return {};
+
+  std::string SymbolMapPath = SymbolMap;
+
+#if __APPLE__
+  // Look through the UUID Map.
+  if (sys::fs::is_directory(SymbolMapPath) && !Map.getUUID().empty()) {
+    uuid_string_t UUIDString;
+    uuid_unparse_upper((const uint8_t *)Map.getUUID().data(), UUIDString);
+
+    SmallString<256> PlistPath(
+        sys::path::parent_path(sys::path::parent_path(InputFile)));
+    sys::path::append(PlistPath, StringRef(UUIDString).str() + ".plist");
+
+    CFStringRef plistFile = CFStringCreateWithCString(
+        kCFAllocatorDefault, PlistPath.c_str(), kCFStringEncodingUTF8);
+    CFURLRef fileURL = CFURLCreateWithFileSystemPath(
+        kCFAllocatorDefault, plistFile, kCFURLPOSIXPathStyle, false);
+    CFReadStreamRef resourceData =
+        CFReadStreamCreateWithFile(kCFAllocatorDefault, fileURL);
+    if (resourceData) {
+      CFReadStreamOpen(resourceData);
+      CFDictionaryRef plist = (CFDictionaryRef)CFPropertyListCreateWithStream(
+          kCFAllocatorDefault, resourceData, 0, kCFPropertyListImmutable,
+          nullptr, nullptr);
+
+      if (plist) {
+        if (CFDictionaryContainsKey(plist, CFSTR("DBGOriginalUUID"))) {
+          CFStringRef OldUUID = (CFStringRef)CFDictionaryGetValue(
+              plist, CFSTR("DBGOriginalUUID"));
+
+          StringRef UUID(CFStringGetCStringPtr(OldUUID, kCFStringEncodingUTF8));
+          SmallString<256> BCSymbolMapPath(SymbolMapPath);
+          sys::path::append(BCSymbolMapPath, UUID.str() + ".bcsymbolmap");
+          SymbolMapPath = BCSymbolMapPath.str();
+        }
+        CFRelease(plist);
+      }
+      CFReadStreamClose(resourceData);
+      CFRelease(resourceData);
+    }
+    CFRelease(fileURL);
+    CFRelease(plistFile);
+  }
+#endif
+
+  if (sys::fs::is_directory(SymbolMapPath)) {
+    SymbolMapPath += (Twine("/") + sys::path::filename(InputFile) + "-" +
+                      MachOUtils::getArchName(Map.getTriple().getArchName()) +
+                      ".bcsymbolmap")
+                         .str();
+  }
+
+  auto ErrOrMemBuffer = MemoryBuffer::getFile(SymbolMapPath);
+  if (auto EC = ErrOrMemBuffer.getError()) {
+    WithColor::warning() << SymbolMapPath << ": " << EC.message()
+                         << ": not unobfuscating.\n";
+    return {};
+  }
+
+  std::vector<std::string> UnobfuscatedStrings;
+  auto &MemBuf = **ErrOrMemBuffer;
+  StringRef Data(MemBuf.getBufferStart(),
+                 MemBuf.getBufferEnd() - MemBuf.getBufferStart());
+  StringRef LHS;
+  std::tie(LHS, Data) = Data.split('\n');
+  bool MangleNames = false;
+
+  // Check version string first.
+  if (!LHS.startswith("BCSymbolMap Version:")) {
+    // Version string not present, warns but try to parse it.
+    WithColor::warning() << SymbolMapPath
+                         << " is missing version string: assuming 1.0.\n";
+    UnobfuscatedStrings.emplace_back(LHS);
+  } else if (LHS.equals("BCSymbolMap Version: 1.0")) {
+    MangleNames = true;
+  } else if (LHS.equals("BCSymbolMap Version: 2.0")) {
+    MangleNames = false;
+  } else {
+    StringRef VersionNum;
+    std::tie(LHS, VersionNum) = LHS.split(':');
+    WithColor::warning() << SymbolMapPath
+                         << " has unsupported symbol map version" << VersionNum
+                         << ": not unobfuscating.\n";
+    return {};
+  }
+
+  while (!Data.empty()) {
+    std::tie(LHS, Data) = Data.split('\n');
+    UnobfuscatedStrings.emplace_back(LHS);
+  }
+
+  return SymbolMapTranslator(std::move(UnobfuscatedStrings), MangleNames);
+}
+
+} // namespace dsymutil
+} // namespace llvm
diff --git a/tools/dsymutil/SymbolMap.h b/tools/dsymutil/SymbolMap.h
new file mode 100644
index 0000000..e3fbdbb
--- /dev/null
+++ b/tools/dsymutil/SymbolMap.h
@@ -0,0 +1,54 @@
+//=- tools/dsymutil/SymbolMap.h -----------------------------------*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TOOLS_DSYMUTIL_SYMBOLMAP_H
+#define LLVM_TOOLS_DSYMUTIL_SYMBOLMAP_H
+
+#include "llvm/ADT/StringRef.h"
+
+#include <string>
+#include <vector>
+
+namespace llvm {
+namespace dsymutil {
+class DebugMap;
+
+/// Callable class to unobfuscate strings based on a BCSymbolMap.
+class SymbolMapTranslator {
+public:
+  SymbolMapTranslator() : MangleNames(false) {}
+
+  SymbolMapTranslator(std::vector<std::string> UnobfuscatedStrings,
+                      bool MangleNames)
+      : UnobfuscatedStrings(std::move(UnobfuscatedStrings)),
+        MangleNames(MangleNames) {}
+
+  StringRef operator()(StringRef Input);
+
+  operator bool() const { return !UnobfuscatedStrings.empty(); }
+
+private:
+  std::vector<std::string> UnobfuscatedStrings;
+  bool MangleNames;
+};
+
+/// Class to initialize SymbolMapTranslators from a BCSymbolMap.
+class SymbolMapLoader {
+public:
+  SymbolMapLoader(std::string SymbolMap) : SymbolMap(std::move(SymbolMap)) {}
+
+  SymbolMapTranslator Load(StringRef InputFile, const DebugMap &Map) const;
+
+private:
+  const std::string SymbolMap;
+};
+} // namespace dsymutil
+} // namespace llvm
+
+#endif // LLVM_TOOLS_DSYMUTIL_SYMBOLMAP_H
diff --git a/tools/dsymutil/dsymutil.cpp b/tools/dsymutil/dsymutil.cpp
index 5fe4067..ec8d050 100644
--- a/tools/dsymutil/dsymutil.cpp
+++ b/tools/dsymutil/dsymutil.cpp
@@ -59,6 +59,8 @@
     OutputFileOpt("o",
                   desc("Specify the output file. default: <input file>.dwarf"),
                   value_desc("filename"), cat(DsymCategory));
+static alias OutputFileOptA("out", desc("Alias for -o"),
+                            aliasopt(OutputFileOpt));
 
 static opt<std::string> OsoPrependPath(
     "oso-prepend-path",
@@ -100,6 +102,11 @@
     init(false), cat(DsymCategory));
 static alias UpdateA("u", desc("Alias for --update"), aliasopt(Update));
 
+static opt<std::string> SymbolMap(
+    "symbol-map",
+    desc("Updates the existing dSYMs inplace using symbol map specified."),
+    value_desc("bcsymbolmap"), cat(DsymCategory));
+
 static cl::opt<AccelTableKind> AcceleratorTable(
     "accelerator", cl::desc("Output accelerator tables."),
     cl::values(clEnumValN(AccelTableKind::Default, "Default",
@@ -273,8 +280,11 @@
 }
 
 static Expected<std::string> getOutputFileName(llvm::StringRef InputFile) {
+  if (OutputFileOpt == "-")
+    return OutputFileOpt;
+
   // When updating, do in place replacement.
-  if (OutputFileOpt.empty() && Update)
+  if (OutputFileOpt.empty() && (Update || !SymbolMap.empty()))
     return InputFile;
 
   // If a flat dSYM has been requested, things are pretty simple.
@@ -325,6 +335,9 @@
   Options.PrependPath = OsoPrependPath;
   Options.TheAccelTableKind = AcceleratorTable;
 
+  if (!SymbolMap.empty())
+    Options.Update = true;
+
   if (Assembly)
     Options.FileType = OutputFileType::Assembly;
 
@@ -443,6 +456,13 @@
     return 1;
   }
 
+  if (InputFiles.size() > 1 && !SymbolMap.empty() &&
+      !llvm::sys::fs::is_directory(SymbolMap)) {
+    WithColor::error() << "when unobfuscating multiple files, --symbol-map "
+                       << "needs to point to a directory.\n";
+    return 1;
+  }
+
   if (getenv("RC_DEBUG_OPTIONS"))
     PaperTrailWarnings = true;
 
@@ -457,6 +477,8 @@
       return 1;
     }
 
+  SymbolMapLoader SymMapLoader(SymbolMap);
+
   for (auto &InputFile : *InputsOrErr) {
     // Dump the symbol table for each input file and requested arch
     if (DumpStab) {
@@ -511,6 +533,9 @@
       if (DumpDebugMap)
         continue;
 
+      if (!SymbolMap.empty())
+        OptionsOrErr->Translator = SymMapLoader.Load(InputFile, *Map);
+
       if (Map->begin() == Map->end())
         WithColor::warning()
             << "no debug symbols in executable (-arch "
diff --git a/tools/gold/CMakeLists.txt b/tools/gold/CMakeLists.txt
index d258032..72f7655 100644
--- a/tools/gold/CMakeLists.txt
+++ b/tools/gold/CMakeLists.txt
@@ -11,7 +11,7 @@
      IPO
      )
 
-  add_llvm_loadable_module(LLVMgold
+  add_llvm_library(LLVMgold MODULE
     gold-plugin.cpp
     )
 
diff --git a/tools/gold/gold-plugin.cpp b/tools/gold/gold-plugin.cpp
index 8ffa587..738cafa 100644
--- a/tools/gold/gold-plugin.cpp
+++ b/tools/gold/gold-plugin.cpp
@@ -128,6 +128,7 @@
     OT_NORMAL,
     OT_DISABLE,
     OT_BC_ONLY,
+    OT_ASM_ONLY,
     OT_SAVE_TEMPS
   };
   static OutputType TheOutputType = OT_NORMAL;
@@ -229,6 +230,8 @@
       TheOutputType = OT_SAVE_TEMPS;
     } else if (opt == "disable-output") {
       TheOutputType = OT_DISABLE;
+    } else if (opt == "emit-asm") {
+      TheOutputType = OT_ASM_ONLY;
     } else if (opt == "thinlto") {
       thinlto = true;
     } else if (opt == "thinlto-index-only") {
@@ -882,6 +885,9 @@
     check(Conf.addSaveTemps(output_name + ".",
                             /* UseInputModulePath */ true));
     break;
+  case options::OT_ASM_ONLY:
+    Conf.CGFileType = TargetMachine::CGFT_AssemblyFile;
+    break;
   }
 
   if (!options::sample_profile.empty())
@@ -1009,6 +1015,8 @@
     Filename = options::obj_path;
   else if (options::TheOutputType == options::OT_SAVE_TEMPS)
     Filename = output_name + ".o";
+  else if (options::TheOutputType == options::OT_ASM_ONLY)
+    Filename = output_name;
   bool SaveTemps = !Filename.empty();
 
   size_t MaxTasks = Lto->getMaxTasks();
@@ -1057,7 +1065,8 @@
   std::vector<std::pair<SmallString<128>, bool>> Files = runLTO();
 
   if (options::TheOutputType == options::OT_DISABLE ||
-      options::TheOutputType == options::OT_BC_ONLY)
+      options::TheOutputType == options::OT_BC_ONLY ||
+      options::TheOutputType == options::OT_ASM_ONLY)
     return LDPS_OK;
 
   if (options::thinlto_index_only) {
@@ -1082,6 +1091,7 @@
   llvm_shutdown();
 
   if (options::TheOutputType == options::OT_BC_ONLY ||
+      options::TheOutputType == options::OT_ASM_ONLY ||
       options::TheOutputType == options::OT_DISABLE) {
     if (options::TheOutputType == options::OT_DISABLE) {
       // Remove the output file here since ld.bfd creates the output file
diff --git a/tools/lli/lli.cpp b/tools/lli/lli.cpp
index e4a7462..7e93d31 100644
--- a/tools/lli/lli.cpp
+++ b/tools/lli/lli.cpp
@@ -35,7 +35,6 @@
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Module.h"
 #include "llvm/IR/Type.h"
-#include "llvm/IR/TypeBuilder.h"
 #include "llvm/IR/Verifier.h"
 #include "llvm/IRReader/IRReader.h"
 #include "llvm/Object/Archive.h"
@@ -317,23 +316,18 @@
   M->setTargetTriple(TargetTripleStr);
 
   // Create an empty function named "__main".
-  Function *Result;
-  if (TargetTriple.isArch64Bit()) {
-    Result = Function::Create(
-      TypeBuilder<int64_t(void), false>::get(Context),
-      GlobalValue::ExternalLinkage, "__main", M.get());
-  } else {
-    Result = Function::Create(
-      TypeBuilder<int32_t(void), false>::get(Context),
-      GlobalValue::ExternalLinkage, "__main", M.get());
-  }
+  Type *ReturnTy;
+  if (TargetTriple.isArch64Bit())
+    ReturnTy = Type::getInt64Ty(Context);
+  else
+    ReturnTy = Type::getInt32Ty(Context);
+  Function *Result =
+      Function::Create(FunctionType::get(ReturnTy, {}, false),
+                       GlobalValue::ExternalLinkage, "__main", M.get());
+
   BasicBlock *BB = BasicBlock::Create(Context, "__main", Result);
   Builder.SetInsertPoint(BB);
-  Value *ReturnVal;
-  if (TargetTriple.isArch64Bit())
-    ReturnVal = ConstantInt::get(Context, APInt(64, 0));
-  else
-    ReturnVal = ConstantInt::get(Context, APInt(32, 0));
+  Value *ReturnVal = ConstantInt::get(ReturnTy, 0);
   Builder.CreateRet(ReturnVal);
 
   // Add this new module to the ExecutionEngine.
diff --git a/tools/llvm-ar/llvm-ar.cpp b/tools/llvm-ar/llvm-ar.cpp
index 5ab8ae1..1c453ee 100644
--- a/tools/llvm-ar/llvm-ar.cpp
+++ b/tools/llvm-ar/llvm-ar.cpp
@@ -33,6 +33,7 @@
 #include "llvm/Support/StringSaver.h"
 #include "llvm/Support/TargetSelect.h"
 #include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/WithColor.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/ToolDrivers/llvm-dlltool/DlltoolDriver.h"
 #include "llvm/ToolDrivers/llvm-lib/LibDriver.h"
@@ -115,7 +116,7 @@
 
 // Show the error message and exit.
 LLVM_ATTRIBUTE_NORETURN static void fail(Twine Error) {
-  errs() << ToolName << ": " << Error << ".\n";
+  WithColor::error(errs(), ToolName) << Error << ".\n";
   printHelpMessage();
   exit(1);
 }
@@ -125,7 +126,7 @@
     return;
 
   std::string ContextStr = Context.str();
-  if (ContextStr == "")
+  if (ContextStr.empty())
     fail(EC.message());
   fail(Context + ": " + EC.message());
 }
@@ -136,7 +137,7 @@
 
   handleAllErrors(std::move(E), [&](const llvm::ErrorInfoBase &EIB) {
     std::string ContextStr = Context.str();
-    if (ContextStr == "")
+    if (ContextStr.empty())
       fail(EIB.message());
     fail(Context + ": " + EIB.message());
   });
@@ -196,7 +197,7 @@
 // Extract the member filename from the command line for the [relpos] argument
 // associated with a, b, and i modifiers
 static void getRelPos() {
-  if (PositionalArgs.size() == 0)
+  if (PositionalArgs.empty())
     fail("Expected [relpos] for a, b, or i modifier");
   RelPos = PositionalArgs[0];
   PositionalArgs.erase(PositionalArgs.begin());
@@ -204,7 +205,7 @@
 
 // Get the archive file name from the command line
 static void getArchive() {
-  if (PositionalArgs.size() == 0)
+  if (PositionalArgs.empty())
     fail("An archive name must be specified");
   ArchiveName = PositionalArgs[0];
   PositionalArgs.erase(PositionalArgs.begin());
@@ -221,7 +222,7 @@
 
 static object::Archive &readLibrary(const Twine &Library) {
   auto BufOrErr = MemoryBuffer::getFile(Library, -1, false);
-  failIfError(BufOrErr.getError(), "Could not open library");
+  failIfError(BufOrErr.getError(), "Could not open library " + Library);
   ArchiveBuffers.push_back(std::move(*BufOrErr));
   auto LibOrErr =
       object::Archive::create(ArchiveBuffers.back()->getMemBufferRef());
@@ -532,56 +533,57 @@
   if (Members.empty())
     return;
   for (StringRef Name : Members)
-    errs() << Name << " was not found\n";
+    WithColor::error(errs(), ToolName) << "'" << Name << "' was not found\n";
   exit(1);
 }
 
-static void addMember(std::vector<NewArchiveMember> &Members,
-                      StringRef FileName, int Pos = -1) {
-  Expected<NewArchiveMember> NMOrErr =
-      NewArchiveMember::getFile(FileName, Deterministic);
-  failIfError(NMOrErr.takeError(), FileName);
-
-  // Use the basename of the object path for the member name.
-  NMOrErr->MemberName = sys::path::filename(NMOrErr->MemberName);
-
-  if (Pos == -1)
-    Members.push_back(std::move(*NMOrErr));
-  else
-    Members[Pos] = std::move(*NMOrErr);
-}
-
-static void addMember(std::vector<NewArchiveMember> &Members,
-                      const object::Archive::Child &M, int Pos = -1) {
+static void addChildMember(std::vector<NewArchiveMember> &Members,
+                           const object::Archive::Child &M,
+                           bool FlattenArchive = false) {
   if (Thin && !M.getParent()->isThin())
     fail("Cannot convert a regular archive to a thin one");
   Expected<NewArchiveMember> NMOrErr =
       NewArchiveMember::getOldMember(M, Deterministic);
   failIfError(NMOrErr.takeError());
-  if (Pos == -1)
-    Members.push_back(std::move(*NMOrErr));
-  else
-    Members[Pos] = std::move(*NMOrErr);
+  if (FlattenArchive &&
+      identify_magic(NMOrErr->Buf->getBuffer()) == file_magic::archive) {
+    Expected<std::string> FileNameOrErr = M.getFullName();
+    failIfError(FileNameOrErr.takeError());
+    object::Archive &Lib = readLibrary(*FileNameOrErr);
+    // When creating thin archives, only flatten if the member is also thin.
+    if (!Thin || Lib.isThin()) {
+      Error Err = Error::success();
+      // Only Thin archives are recursively flattened.
+      for (auto &Child : Lib.children(Err))
+        addChildMember(Members, Child, /*FlattenArchive=*/Thin);
+      failIfError(std::move(Err));
+      return;
+    }
+  }
+  Members.push_back(std::move(*NMOrErr));
 }
 
-static void addLibMember(std::vector<NewArchiveMember> &Members,
-                         StringRef FileName) {
+static void addMember(std::vector<NewArchiveMember> &Members,
+                      StringRef FileName, bool FlattenArchive = false) {
   Expected<NewArchiveMember> NMOrErr =
       NewArchiveMember::getFile(FileName, Deterministic);
   failIfError(NMOrErr.takeError(), FileName);
-  if (identify_magic(NMOrErr->Buf->getBuffer()) == file_magic::archive) {
+  if (FlattenArchive &&
+      identify_magic(NMOrErr->Buf->getBuffer()) == file_magic::archive) {
     object::Archive &Lib = readLibrary(FileName);
-    Error Err = Error::success();
-
-    for (auto &Child : Lib.children(Err))
-      addMember(Members, Child);
-
-    failIfError(std::move(Err));
-  } else {
-    // Use the basename of the object path for the member name.
-    NMOrErr->MemberName = sys::path::filename(NMOrErr->MemberName);
-    Members.push_back(std::move(*NMOrErr));
+    // When creating thin archives, only flatten if the member is also thin.
+    if (!Thin || Lib.isThin()) {
+      Error Err = Error::success();
+      // Only Thin archives are recursively flattened.
+      for (auto &Child : Lib.children(Err))
+        addChildMember(Members, Child, /*FlattenArchive=*/Thin);
+      failIfError(std::move(Err));
+      return;
+    }
   }
+  // Use the basename of the object path for the member name.
+  NMOrErr->MemberName = sys::path::filename(NMOrErr->MemberName);
+  Members.push_back(std::move(*NMOrErr));
 }
 
 enum InsertAction {
@@ -670,7 +672,7 @@
           computeInsertAction(Operation, Child, Name, MemberI);
       switch (Action) {
       case IA_AddOldMember:
-        addMember(Ret, Child);
+        addChildMember(Ret, Child);
         break;
       case IA_AddNewMember:
         addMember(Ret, *MemberI);
@@ -678,7 +680,7 @@
       case IA_Delete:
         break;
       case IA_MoveOldMember:
-        addMember(Moved, Child);
+        addChildMember(Moved, Child);
         break;
       case IA_MoveNewMember:
         addMember(Moved, *MemberI);
@@ -709,17 +711,16 @@
   if (AddLibrary) {
     assert(Operation == QuickAppend);
     for (auto &Member : Members)
-      addLibMember(Ret, Member);
+      addMember(Ret, Member, /*FlattenArchive=*/true);
     return Ret;
   }
 
-  for (unsigned I = 0; I != Members.size(); ++I)
-    Ret.insert(Ret.begin() + InsertPos, NewArchiveMember());
-  Pos = InsertPos;
-  for (auto &Member : Members) {
-    addMember(Ret, Member, Pos);
-    ++Pos;
-  }
+  std::vector<NewArchiveMember> NewMembers;
+  for (auto &Member : Members)
+    addMember(NewMembers, Member, /*FlattenArchive=*/Thin);
+  Ret.reserve(Ret.size() + NewMembers.size());
+  std::move(NewMembers.begin(), NewMembers.end(),
+            std::inserter(Ret, std::next(Ret.begin(), InsertPos)));
 
   return Ret;
 }
@@ -760,11 +761,11 @@
     else if (OldArchive)
       Kind = OldArchive->kind();
     else if (NewMembersP)
-      Kind = NewMembersP->size() ? getKindFromMember(NewMembersP->front())
-                                 : getDefaultForHost();
+      Kind = !NewMembersP->empty() ? getKindFromMember(NewMembersP->front())
+                                   : getDefaultForHost();
     else
-      Kind = NewMembers.size() ? getKindFromMember(NewMembers.front())
-                               : getDefaultForHost();
+      Kind = !NewMembers.empty() ? getKindFromMember(NewMembers.front())
+                                 : getDefaultForHost();
     break;
   case GNU:
     Kind = object::Archive::K_GNU;
@@ -853,7 +854,8 @@
   } else {
     if (!Create) {
       // Produce a warning if we should and we're creating the archive
-      errs() << ToolName << ": creating " << ArchiveName << "\n";
+      WithColor::warning(errs(), ToolName)
+          << "creating " << ArchiveName << "\n";
     }
   }
 
@@ -897,7 +899,7 @@
       {
         Error Err = Error::success();
         for (auto &Member : Lib.children(Err))
-          addMember(NewMembers, Member);
+          addChildMember(NewMembers, Member);
         failIfError(std::move(Err));
       }
       break;
diff --git a/tools/llvm-cov/CodeCoverage.cpp b/tools/llvm-cov/CodeCoverage.cpp
index 1dc6eed..728e00e 100644
--- a/tools/llvm-cov/CodeCoverage.cpp
+++ b/tools/llvm-cov/CodeCoverage.cpp
@@ -691,7 +691,7 @@
       PathRemapping = EquivPair;
 
     // If a demangler is supplied, check if it exists and register it.
-    if (DemanglerOpts.size()) {
+    if (!DemanglerOpts.empty()) {
       auto DemanglerPathOrErr = sys::findProgramByName(DemanglerOpts[0]);
       if (!DemanglerPathOrErr) {
         error("Could not find the demangler!",
diff --git a/tools/llvm-cov/SourceCoverageView.cpp b/tools/llvm-cov/SourceCoverageView.cpp
index 775322b..cebaf63 100644
--- a/tools/llvm-cov/SourceCoverageView.cpp
+++ b/tools/llvm-cov/SourceCoverageView.cpp
@@ -31,7 +31,7 @@
 std::string CoveragePrinter::getOutputPath(StringRef Path, StringRef Extension,
                                            bool InToplevel,
                                            bool Relative) const {
-  assert(Extension.size() && "The file extension may not be empty");
+  assert(!Extension.empty() && "The file extension may not be empty");
 
   SmallString<256> FullPath;
 
diff --git a/tools/llvm-cov/SourceCoverageViewHTML.cpp b/tools/llvm-cov/SourceCoverageViewHTML.cpp
index acb67aa..3f730bb 100644
--- a/tools/llvm-cov/SourceCoverageViewHTML.cpp
+++ b/tools/llvm-cov/SourceCoverageViewHTML.cpp
@@ -54,7 +54,7 @@
 std::string tag(const std::string &Name, const std::string &Str,
                 const std::string &ClassName = "") {
   std::string Tag = "<" + Name;
-  if (ClassName != "")
+  if (!ClassName.empty())
     Tag += " class='" + ClassName + "'";
   return Tag + ">" + Str + "</" + Name + ">";
 }
diff --git a/tools/llvm-demangle-fuzzer/CMakeLists.txt b/tools/llvm-demangle-fuzzer/CMakeLists.txt
deleted file mode 100644
index 0fe711c..0000000
--- a/tools/llvm-demangle-fuzzer/CMakeLists.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-set(LLVM_LINK_COMPONENTS
-  Demangle
-  FuzzMutate
-  Support
-)
-
-add_llvm_fuzzer(llvm-demangle-fuzzer
-  llvm-demangle-fuzzer.cpp
-  DUMMY_MAIN DummyDemanglerFuzzer.cpp
-  )
diff --git a/tools/llvm-elfabi/CMakeLists.txt b/tools/llvm-elfabi/CMakeLists.txt
new file mode 100644
index 0000000..bd3ec85
--- /dev/null
+++ b/tools/llvm-elfabi/CMakeLists.txt
@@ -0,0 +1,11 @@
+set(LLVM_LINK_COMPONENTS
+  Object
+  Support
+  TextAPI
+  )
+
+add_llvm_tool(llvm-elfabi
+  ELFObjHandler.cpp
+  ErrorCollector.cpp
+  llvm-elfabi.cpp
+  )
diff --git a/tools/llvm-elfabi/ELFObjHandler.cpp b/tools/llvm-elfabi/ELFObjHandler.cpp
new file mode 100644
index 0000000..412c299
--- /dev/null
+++ b/tools/llvm-elfabi/ELFObjHandler.cpp
@@ -0,0 +1,68 @@
+//===- ELFObjHandler.cpp --------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===-----------------------------------------------------------------------===/
+
+#include "ELFObjHandler.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/ELFTypes.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/TextAPI/ELF/ELFStub.h"
+
+using llvm::MemoryBufferRef;
+using llvm::object::ELFObjectFile;
+
+using namespace llvm;
+using namespace llvm::object;
+using namespace llvm::elfabi;
+using namespace llvm::ELF;
+
+namespace llvm {
+namespace elfabi {
+
+/// Returns a new ELFStub with all members populated from an ELFObjectFile.
+/// @param ElfObj Source ELFObjectFile.
+template <class ELFT>
+Expected<std::unique_ptr<ELFStub>>
+buildStub(const ELFObjectFile<ELFT> &ElfObj) {
+  std::unique_ptr<ELFStub> DestStub = make_unique<ELFStub>();
+  const ELFFile<ELFT> *ElfFile = ElfObj.getELFFile();
+
+  DestStub->Arch = ElfFile->getHeader()->e_machine;
+
+  // TODO: Populate SoName from .dynamic entries and linked string table.
+  // TODO: Populate NeededLibs from .dynamic entries and linked string table.
+  // TODO: Populate Symbols from .dynsym table and linked string table.
+
+  return std::move(DestStub);
+}
+
+Expected<std::unique_ptr<ELFStub>> readELFFile(MemoryBufferRef Buf) {
+  Expected<std::unique_ptr<Binary>> BinOrErr = createBinary(Buf);
+  if (!BinOrErr) {
+    return BinOrErr.takeError();
+  }
+
+  Binary *Bin = BinOrErr->get();
+  if (auto Obj = dyn_cast<ELFObjectFile<ELF32LE>>(Bin)) {
+    return buildStub(*Obj);
+  } else if (auto Obj = dyn_cast<ELFObjectFile<ELF64LE>>(Bin)) {
+    return buildStub(*Obj);
+  } else if (auto Obj = dyn_cast<ELFObjectFile<ELF32BE>>(Bin)) {
+    return buildStub(*Obj);
+  } else if (auto Obj = dyn_cast<ELFObjectFile<ELF64BE>>(Bin)) {
+    return buildStub(*Obj);
+  }
+
+  return createStringError(errc::not_supported, "Unsupported binary format");
+}
+
+} // end namespace elfabi
+} // end namespace llvm
diff --git a/tools/llvm-elfabi/ELFObjHandler.h b/tools/llvm-elfabi/ELFObjHandler.h
new file mode 100644
index 0000000..496bad0
--- /dev/null
+++ b/tools/llvm-elfabi/ELFObjHandler.h
@@ -0,0 +1,33 @@
+//===- ELFObjHandler.h ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===-----------------------------------------------------------------------===/
+///
+/// This supports reading and writing of elf dynamic shared objects.
+///
+//===-----------------------------------------------------------------------===/
+
+#ifndef LLVM_TOOLS_ELFABI_ELFOBJHANDLER_H
+#define LLVM_TOOLS_ELFABI_ELFOBJHANDLER_H
+
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/ELFTypes.h"
+#include "llvm/TextAPI/ELF/ELFStub.h"
+
+namespace llvm {
+
+class MemoryBuffer;
+
+namespace elfabi {
+
+/// Attempt to read a binary ELF file from a MemoryBuffer.
+Expected<std::unique_ptr<ELFStub>> readELFFile(MemoryBufferRef Buf);
+
+} // end namespace elfabi
+} // end namespace llvm
+
+#endif // LLVM_TOOLS_ELFABI_ELFOBJHANDLER_H
diff --git a/tools/llvm-elfabi/ErrorCollector.cpp b/tools/llvm-elfabi/ErrorCollector.cpp
new file mode 100644
index 0000000..0d74979
--- /dev/null
+++ b/tools/llvm-elfabi/ErrorCollector.cpp
@@ -0,0 +1,70 @@
+//===- ErrorCollector.cpp -------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===-----------------------------------------------------------------------===/
+
+#include "ErrorCollector.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/WithColor.h"
+#include <vector>
+
+using namespace llvm;
+using namespace llvm::elfabi;
+
+void ErrorCollector::escalateToFatal() {
+  ErrorsAreFatal = true;
+}
+
+void ErrorCollector::addError(Error &&Err, StringRef Tag) {
+  if (Err) {
+    Errors.push_back(std::move(Err));
+    Tags.push_back(Tag.str());
+  }
+}
+
+Error ErrorCollector::makeError() {
+  // TODO: Make this return something (an AggregateError?) that gives more
+  // individual control over each error and which might be of interest.
+  Error JoinedErrors = Error::success();
+  for (Error &E : Errors) {
+    JoinedErrors = joinErrors(std::move(JoinedErrors), std::move(E));
+  }
+  Errors.clear();
+  Tags.clear();
+  return JoinedErrors;
+}
+
+void ErrorCollector::log(raw_ostream &OS) {
+  OS << "Encountered multiple errors:\n";
+  for (size_t i = 0; i < Errors.size(); ++i) {
+    WithColor::error(OS) << "(" << Tags[i] << ") " << Errors[i];
+    if (i != Errors.size() - 1)
+      OS << "\n";
+  }
+}
+
+bool ErrorCollector::allErrorsHandled() const {
+  return Errors.empty();
+}
+
+ErrorCollector::~ErrorCollector() {
+  if (ErrorsAreFatal && !allErrorsHandled())
+    fatalUnhandledError();
+
+  for (Error &E : Errors) {
+    consumeError(std::move(E));
+  }
+}
+
+LLVM_ATTRIBUTE_NORETURN void ErrorCollector::fatalUnhandledError() {
+  errs() << "Program aborted due to unhandled Error(s):\n";
+  log(errs());
+  errs() << "\n";
+  abort();
+}
diff --git a/tools/llvm-elfabi/ErrorCollector.h b/tools/llvm-elfabi/ErrorCollector.h
new file mode 100644
index 0000000..d54b3fb
--- /dev/null
+++ b/tools/llvm-elfabi/ErrorCollector.h
@@ -0,0 +1,75 @@
+//===- ErrorCollector.h -----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===-----------------------------------------------------------------------===/
+///
+/// This class collects errors that should be reported or ignored in aggregate.
+///
+/// Like llvm::Error, an ErrorCollector cannot be copied. Unlike llvm::Error,
+/// an ErrorCollector may be destroyed if it was originally constructed to treat
+/// errors as non-fatal. In this case, all Errors are consumed upon destruction.
+/// An ErrorCollector may be initially constructed (or escalated) such that
+/// errors are treated as fatal. This causes a crash if an attempt is made to
+/// delete the ErrorCollector when some Errors have not been retrieved via
+/// makeError().
+///
+//===-----------------------------------------------------------------------===/
+
+#ifndef LLVM_TOOLS_ELFABI_ERRORCOLLECTOR_H
+#define LLVM_TOOLS_ELFABI_ERRORCOLLECTOR_H
+
+#include "llvm/Support/Error.h"
+#include <vector>
+
+namespace llvm {
+namespace elfabi {
+
+class ErrorCollector {
+public:
+  /// Upon destruction, an ErrorCollector will crash if UseFatalErrors=true and
+  /// there are remaining errors that haven't been fetched by makeError().
+  ErrorCollector(bool UseFatalErrors = true) : ErrorsAreFatal(UseFatalErrors) {}
+  // Don't allow copying.
+  ErrorCollector(const ErrorCollector &Stub) = delete;
+  ErrorCollector &operator=(const ErrorCollector &Other) = delete;
+  ~ErrorCollector();
+
+  // TODO: Add move constructor and operator= when a testable situation arises.
+
+  /// Returns a single error that contains messages for all stored Errors.
+  Error makeError();
+
+  /// Adds an error with a descriptive tag that helps with identification.
+  /// If the error is an Error::success(), it is checked and discarded.
+  void addError(Error &&E, StringRef Tag);
+
+  /// This ensures an ErrorCollector will treat unhandled errors as fatal.
+  /// This function should be called if errors that usually can be ignored
+  /// are suddenly of concern (i.e. attempt multiple things that return Error,
+  /// but only care about the Errors if no attempt succeeds).
+  void escalateToFatal();
+
+private:
+  /// Logs all errors to a raw_ostream.
+  void log(raw_ostream &OS);
+
+  /// Returns true if all errors have been retrieved through makeError(), or
+  /// false if errors have been added since the last makeError() call.
+  bool allErrorsHandled() const;
+
+  /// Dump output and crash.
+  LLVM_ATTRIBUTE_NORETURN void fatalUnhandledError();
+
+  bool ErrorsAreFatal;
+  std::vector<Error> Errors;
+  std::vector<std::string> Tags;
+};
+
+} // end namespace elfabi
+} // end namespace llvm
+
+#endif // LLVM_TOOLS_ELFABI_ERRORCOLLECTOR_H
diff --git a/tools/llvm-elfabi/LLVMBuild.txt b/tools/llvm-elfabi/LLVMBuild.txt
new file mode 100644
index 0000000..e4fdc9a
--- /dev/null
+++ b/tools/llvm-elfabi/LLVMBuild.txt
@@ -0,0 +1,22 @@
+;===- ./tools/llvm-elfabi/LLVMBuild.txt ------------------------*- Conf -*--===;
+;
+;                     The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+;   http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Tool
+name = llvm-elfabi
+parent = Tools
+required_libraries = Object Support TextAPI
diff --git a/tools/llvm-elfabi/llvm-elfabi.cpp b/tools/llvm-elfabi/llvm-elfabi.cpp
new file mode 100644
index 0000000..4c15bc2
--- /dev/null
+++ b/tools/llvm-elfabi/llvm-elfabi.cpp
@@ -0,0 +1,143 @@
+//===- llvm-elfabi.cpp ----------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===-----------------------------------------------------------------------===/
+
+#include "ELFObjHandler.h"
+#include "ErrorCollector.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/FileOutputBuffer.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/WithColor.h"
+#include "llvm/TextAPI/ELF/TBEHandler.h"
+#include <string>
+
+namespace llvm {
+namespace elfabi {
+
+enum class FileFormat {
+  TBE,
+  ELF
+};
+
+} // end namespace elfabi
+} // end namespace llvm
+
+using namespace llvm;
+using namespace llvm::elfabi;
+
+// Command line flags:
+cl::opt<FileFormat> InputFileFormat(
+    cl::desc("Force input file format:"),
+    cl::values(clEnumValN(FileFormat::TBE,
+                          "tbe", "Read `input` as text-based ELF stub"),
+               clEnumValN(FileFormat::ELF,
+                          "elf", "Read `input` as ELF binary")));
+cl::opt<std::string> InputFilePath(cl::Positional, cl::desc("input"),
+                                   cl::Required);
+cl::opt<std::string>
+    EmitTBE("emit-tbe",
+            cl::desc("Emit a text-based ELF stub (.tbe) from the input file"),
+            cl::value_desc("path"));
+cl::opt<std::string> SOName(
+    "soname",
+    cl::desc("Manually set the DT_SONAME entry of any emitted files"),
+    cl::value_desc("name"));
+
+/// writeTBE() writes a Text-Based ELF stub to a file using the latest version
+/// of the YAML parser.
+static Error writeTBE(StringRef FilePath, ELFStub &Stub) {
+  std::error_code SysErr;
+
+  // Open file for writing.
+  raw_fd_ostream Out(FilePath, SysErr);
+  if (SysErr)
+    return createStringError(SysErr, "Couldn't open `%s` for writing",
+                             FilePath.data());
+  // Write file.
+  Error YAMLErr = writeTBEToOutputStream(Out, Stub);
+  if (YAMLErr)
+    return YAMLErr;
+
+  return Error::success();
+}
+
+/// readInputFile populates an ELFStub by attempting to read the
+/// input file using both the TBE and binary ELF parsers.
+static Expected<std::unique_ptr<ELFStub>> readInputFile(StringRef FilePath) {
+  // Read in file.
+  ErrorOr<std::unique_ptr<MemoryBuffer>> BufOrError =
+      MemoryBuffer::getFile(FilePath);
+  if (!BufOrError) {
+    return createStringError(BufOrError.getError(), "Could not open `%s`",
+                             FilePath.data());
+  }
+
+  std::unique_ptr<MemoryBuffer> FileReadBuffer = std::move(*BufOrError);
+  ErrorCollector EC(/*UseFatalErrors=*/false);
+
+  // First try to read as a binary (fails fast if not binary).
+  if (InputFileFormat.getNumOccurrences() == 0 ||
+      InputFileFormat == FileFormat::ELF) {
+    Expected<std::unique_ptr<ELFStub>> StubFromELF =
+        readELFFile(FileReadBuffer->getMemBufferRef());
+    if (StubFromELF) {
+      return std::move(*StubFromELF);
+    }
+    EC.addError(StubFromELF.takeError(), "BinaryRead");
+  }
+
+  // Fall back to reading as a tbe.
+  if (InputFileFormat.getNumOccurrences() == 0 ||
+      InputFileFormat == FileFormat::TBE) {
+    Expected<std::unique_ptr<ELFStub>> StubFromTBE =
+        readTBEFromBuffer(FileReadBuffer->getBuffer());
+    if (StubFromTBE) {
+      return std::move(*StubFromTBE);
+    }
+    EC.addError(StubFromTBE.takeError(), "YamlParse");
+  }
+
+  // If both readers fail, build a new error that includes all information.
+  EC.addError(createStringError(errc::not_supported,
+                                "No file readers succeeded reading `%s` "
+                                "(unsupported/malformed file?)",
+                                FilePath.data()),
+              "ReadInputFile");
+  EC.escalateToFatal();
+  return EC.makeError();
+}
+
+int main(int argc, char *argv[]) {
+  // Parse arguments.
+  cl::ParseCommandLineOptions(argc, argv);
+
+  Expected<std::unique_ptr<ELFStub>> StubOrErr = readInputFile(InputFilePath);
+  if (!StubOrErr) {
+    Error ReadError = StubOrErr.takeError();
+    WithColor::error() << ReadError << "\n";
+    exit(1);
+  }
+
+  std::unique_ptr<ELFStub> TargetStub = std::move(StubOrErr.get());
+
+  // Write out .tbe file.
+  if (EmitTBE.getNumOccurrences() == 1) {
+    TargetStub->TbeVersion = TBEVersionCurrent;
+    if (SOName.getNumOccurrences() == 1) {
+      TargetStub->SoName = SOName;
+    }
+    Error TBEWriteError = writeTBE(EmitTBE, *TargetStub);
+    if (TBEWriteError) {
+      WithColor::error() << TBEWriteError << "\n";
+      exit(1);
+    }
+  }
+}
diff --git a/tools/llvm-itanium-demangle-fuzzer/CMakeLists.txt b/tools/llvm-itanium-demangle-fuzzer/CMakeLists.txt
new file mode 100644
index 0000000..07f02a3
--- /dev/null
+++ b/tools/llvm-itanium-demangle-fuzzer/CMakeLists.txt
@@ -0,0 +1,10 @@
+set(LLVM_LINK_COMPONENTS
+  Demangle
+  FuzzMutate
+  Support
+)
+
+add_llvm_fuzzer(llvm-itanium-demangle-fuzzer
+  llvm-itanium-demangle-fuzzer.cpp
+  DUMMY_MAIN DummyDemanglerFuzzer.cpp
+  )
diff --git a/tools/llvm-demangle-fuzzer/DummyDemanglerFuzzer.cpp b/tools/llvm-itanium-demangle-fuzzer/DummyDemanglerFuzzer.cpp
similarity index 100%
rename from tools/llvm-demangle-fuzzer/DummyDemanglerFuzzer.cpp
rename to tools/llvm-itanium-demangle-fuzzer/DummyDemanglerFuzzer.cpp
diff --git a/tools/llvm-demangle-fuzzer/llvm-demangle-fuzzer.cpp b/tools/llvm-itanium-demangle-fuzzer/llvm-itanium-demangle-fuzzer.cpp
similarity index 100%
rename from tools/llvm-demangle-fuzzer/llvm-demangle-fuzzer.cpp
rename to tools/llvm-itanium-demangle-fuzzer/llvm-itanium-demangle-fuzzer.cpp
diff --git a/tools/llvm-mca/CMakeLists.txt b/tools/llvm-mca/CMakeLists.txt
index 4339d48..1fceb08 100644
--- a/tools/llvm-mca/CMakeLists.txt
+++ b/tools/llvm-mca/CMakeLists.txt
@@ -6,6 +6,7 @@
   AllTargetsDescs
   AllTargetsDisassemblers
   AllTargetsInfos
+  MCA
   MC
   MCParser
   Support
@@ -28,5 +29,3 @@
   )
 
 set(LLVM_MCA_SOURCE_DIR ${CURRENT_SOURCE_DIR})
-add_subdirectory(lib)
-target_link_libraries(llvm-mca PRIVATE LLVMMCA)
diff --git a/tools/llvm-mca/LLVMBuild.txt b/tools/llvm-mca/LLVMBuild.txt
index 0afcd31..a704612 100644
--- a/tools/llvm-mca/LLVMBuild.txt
+++ b/tools/llvm-mca/LLVMBuild.txt
@@ -19,4 +19,4 @@
 type = Tool
 name = llvm-mca
 parent = Tools
-required_libraries = MC MCParser Support all-targets
+required_libraries = MC MCA MCParser Support all-targets
diff --git a/tools/llvm-mca/PipelinePrinter.h b/tools/llvm-mca/PipelinePrinter.h
index 7e42638..456026e 100644
--- a/tools/llvm-mca/PipelinePrinter.h
+++ b/tools/llvm-mca/PipelinePrinter.h
@@ -17,9 +17,9 @@
 #ifndef LLVM_TOOLS_LLVM_MCA_PIPELINEPRINTER_H
 #define LLVM_TOOLS_LLVM_MCA_PIPELINEPRINTER_H
 
-#include "Pipeline.h"
 #include "Views/View.h"
 #include "llvm/ADT/SmallVector.h"
+#include "llvm/MCA/Pipeline.h"
 #include "llvm/Support/raw_ostream.h"
 
 #define DEBUG_TYPE "llvm-mca"
diff --git a/tools/llvm-mca/Views/SummaryView.cpp b/tools/llvm-mca/Views/SummaryView.cpp
index fdf2760..d8ac709 100644
--- a/tools/llvm-mca/Views/SummaryView.cpp
+++ b/tools/llvm-mca/Views/SummaryView.cpp
@@ -14,8 +14,8 @@
 //===----------------------------------------------------------------------===//
 
 #include "Views/SummaryView.h"
-#include "Support.h"
 #include "llvm/ADT/SmallVector.h"
+#include "llvm/MCA/Support.h"
 #include "llvm/Support/Format.h"
 
 namespace llvm {
@@ -27,7 +27,8 @@
                          unsigned Width)
     : SM(Model), Source(S), DispatchWidth(Width), LastInstructionIdx(0),
       TotalCycles(0), NumMicroOps(0),
-      ProcResourceUsage(Model.getNumProcResourceKinds(), 0) {
+      ProcResourceUsage(Model.getNumProcResourceKinds(), 0),
+      ProcResourceMasks(Model.getNumProcResourceKinds()) {
   computeProcResourceMasks(SM, ProcResourceMasks);
 }
 
diff --git a/tools/llvm-mca/Views/View.h b/tools/llvm-mca/Views/View.h
index c332bb5..4b82b0d 100644
--- a/tools/llvm-mca/Views/View.h
+++ b/tools/llvm-mca/Views/View.h
@@ -16,7 +16,7 @@
 #ifndef LLVM_TOOLS_LLVM_MCA_VIEW_H
 #define LLVM_TOOLS_LLVM_MCA_VIEW_H
 
-#include "HWEventListener.h"
+#include "llvm/MCA/HWEventListener.h"
 #include "llvm/Support/raw_ostream.h"
 
 namespace llvm {
diff --git a/tools/llvm-mca/include/Context.h b/tools/llvm-mca/include/Context.h
deleted file mode 100644
index ebd1528..0000000
--- a/tools/llvm-mca/include/Context.h
+++ /dev/null
@@ -1,68 +0,0 @@
-//===---------------------------- Context.h ---------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines a class for holding ownership of various simulated
-/// hardware units.  A Context also provides a utility routine for constructing
-/// a default out-of-order pipeline with fetch, dispatch, execute, and retire
-/// stages.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_CONTEXT_H
-#define LLVM_TOOLS_LLVM_MCA_CONTEXT_H
-#include "HardwareUnits/HardwareUnit.h"
-#include "InstrBuilder.h"
-#include "Pipeline.h"
-#include "SourceMgr.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include <memory>
-
-namespace llvm {
-namespace mca {
-
-/// This is a convenience struct to hold the parameters necessary for creating
-/// the pre-built "default" out-of-order pipeline.
-struct PipelineOptions {
-  PipelineOptions(unsigned DW, unsigned RFS, unsigned LQS, unsigned SQS,
-                  bool NoAlias)
-      : DispatchWidth(DW), RegisterFileSize(RFS), LoadQueueSize(LQS),
-        StoreQueueSize(SQS), AssumeNoAlias(NoAlias) {}
-  unsigned DispatchWidth;
-  unsigned RegisterFileSize;
-  unsigned LoadQueueSize;
-  unsigned StoreQueueSize;
-  bool AssumeNoAlias;
-};
-
-class Context {
-  SmallVector<std::unique_ptr<HardwareUnit>, 4> Hardware;
-  const MCRegisterInfo &MRI;
-  const MCSubtargetInfo &STI;
-
-public:
-  Context(const MCRegisterInfo &R, const MCSubtargetInfo &S) : MRI(R), STI(S) {}
-  Context(const Context &C) = delete;
-  Context &operator=(const Context &C) = delete;
-
-  void addHardwareUnit(std::unique_ptr<HardwareUnit> H) {
-    Hardware.push_back(std::move(H));
-  }
-
-  /// Construct a basic pipeline for simulating an out-of-order pipeline.
-  /// This pipeline consists of Fetch, Dispatch, Execute, and Retire stages.
-  std::unique_ptr<Pipeline> createDefaultPipeline(const PipelineOptions &Opts,
-                                                  InstrBuilder &IB,
-                                                  SourceMgr &SrcMgr);
-};
-
-} // namespace mca
-} // namespace llvm
-#endif // LLVM_TOOLS_LLVM_MCA_CONTEXT_H
diff --git a/tools/llvm-mca/include/HWEventListener.h b/tools/llvm-mca/include/HWEventListener.h
deleted file mode 100644
index 0216fae..0000000
--- a/tools/llvm-mca/include/HWEventListener.h
+++ /dev/null
@@ -1,156 +0,0 @@
-//===----------------------- HWEventListener.h ------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines the main interface for hardware event listeners.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_HWEVENTLISTENER_H
-#define LLVM_TOOLS_LLVM_MCA_HWEVENTLISTENER_H
-
-#include "Instruction.h"
-#include "Support.h"
-#include "llvm/ADT/ArrayRef.h"
-
-namespace llvm {
-namespace mca {
-
-// An HWInstructionEvent represents state changes of instructions that
-// listeners might be interested in. Listeners can choose to ignore any event
-// they are not interested in.
-class HWInstructionEvent {
-public:
-  // This is the list of event types that are shared by all targets, that
-  // generic subtarget-agnostic classes (e.g., Pipeline, HWInstructionEvent,
-  // ...) and generic Views can manipulate.
-  // Subtargets are free to define additional event types, that are goin to be
-  // handled by generic components as opaque values, but can still be
-  // emitted by subtarget-specific pipeline stages (e.g., ExecuteStage,
-  // DispatchStage, ...) and interpreted by subtarget-specific EventListener
-  // implementations.
-  enum GenericEventType {
-    Invalid = 0,
-    // Events generated by the Retire Control Unit.
-    Retired,
-    // Events generated by the Scheduler.
-    Ready,
-    Issued,
-    Executed,
-    // Events generated by the Dispatch logic.
-    Dispatched,
-
-    LastGenericEventType,
-  };
-
-  HWInstructionEvent(unsigned type, const InstRef &Inst)
-      : Type(type), IR(Inst) {}
-
-  // The event type. The exact meaning depends on the subtarget.
-  const unsigned Type;
-
-  // The instruction this event was generated for.
-  const InstRef &IR;
-};
-
-class HWInstructionIssuedEvent : public HWInstructionEvent {
-public:
-  using ResourceRef = std::pair<uint64_t, uint64_t>;
-  HWInstructionIssuedEvent(const InstRef &IR,
-                           ArrayRef<std::pair<ResourceRef, ResourceCycles>> UR)
-      : HWInstructionEvent(HWInstructionEvent::Issued, IR), UsedResources(UR) {}
-
-  ArrayRef<std::pair<ResourceRef, ResourceCycles>> UsedResources;
-};
-
-class HWInstructionDispatchedEvent : public HWInstructionEvent {
-public:
-  HWInstructionDispatchedEvent(const InstRef &IR, ArrayRef<unsigned> Regs,
-                               unsigned UOps)
-      : HWInstructionEvent(HWInstructionEvent::Dispatched, IR),
-        UsedPhysRegs(Regs), MicroOpcodes(UOps) {}
-  // Number of physical register allocated for this instruction. There is one
-  // entry per register file.
-  ArrayRef<unsigned> UsedPhysRegs;
-  // Number of micro opcodes dispatched.
-  // This field is often set to the total number of micro-opcodes specified by
-  // the instruction descriptor of IR.
-  // The only exception is when IR declares a number of micro opcodes
-  // which exceeds the processor DispatchWidth, and - by construction - it
-  // requires multiple cycles to be fully dispatched. In that particular case,
-  // the dispatch logic would generate more than one dispatch event (one per
-  // cycle), and each event would declare how many micro opcodes are effectively
-  // been dispatched to the schedulers.
-  unsigned MicroOpcodes;
-};
-
-class HWInstructionRetiredEvent : public HWInstructionEvent {
-public:
-  HWInstructionRetiredEvent(const InstRef &IR, ArrayRef<unsigned> Regs)
-      : HWInstructionEvent(HWInstructionEvent::Retired, IR),
-        FreedPhysRegs(Regs) {}
-  // Number of register writes that have been architecturally committed. There
-  // is one entry per register file.
-  ArrayRef<unsigned> FreedPhysRegs;
-};
-
-// A HWStallEvent represents a pipeline stall caused by the lack of hardware
-// resources.
-class HWStallEvent {
-public:
-  enum GenericEventType {
-    Invalid = 0,
-    // Generic stall events generated by the DispatchStage.
-    RegisterFileStall,
-    RetireControlUnitStall,
-    // Generic stall events generated by the Scheduler.
-    DispatchGroupStall,
-    SchedulerQueueFull,
-    LoadQueueFull,
-    StoreQueueFull,
-    LastGenericEvent
-  };
-
-  HWStallEvent(unsigned type, const InstRef &Inst) : Type(type), IR(Inst) {}
-
-  // The exact meaning of the stall event type depends on the subtarget.
-  const unsigned Type;
-
-  // The instruction this event was generated for.
-  const InstRef &IR;
-};
-
-class HWEventListener {
-public:
-  // Generic events generated by the pipeline.
-  virtual void onCycleBegin() {}
-  virtual void onCycleEnd() {}
-
-  virtual void onEvent(const HWInstructionEvent &Event) {}
-  virtual void onEvent(const HWStallEvent &Event) {}
-
-  using ResourceRef = std::pair<uint64_t, uint64_t>;
-  virtual void onResourceAvailable(const ResourceRef &RRef) {}
-
-  // Events generated by the Scheduler when buffered resources are
-  // consumed/freed for an instruction.
-  virtual void onReservedBuffers(const InstRef &Inst,
-                                 ArrayRef<unsigned> Buffers) {}
-  virtual void onReleasedBuffers(const InstRef &Inst,
-                                 ArrayRef<unsigned> Buffers) {}
-
-  virtual ~HWEventListener() {}
-
-private:
-  virtual void anchor();
-};
-} // namespace mca
-} // namespace llvm
-
-#endif
diff --git a/tools/llvm-mca/include/HardwareUnits/HardwareUnit.h b/tools/llvm-mca/include/HardwareUnits/HardwareUnit.h
deleted file mode 100644
index 5070418..0000000
--- a/tools/llvm-mca/include/HardwareUnits/HardwareUnit.h
+++ /dev/null
@@ -1,33 +0,0 @@
-//===-------------------------- HardwareUnit.h ------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines a base class for describing a simulated hardware
-/// unit.  These units are used to construct a simulated backend.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_HARDWAREUNIT_H
-#define LLVM_TOOLS_LLVM_MCA_HARDWAREUNIT_H
-
-namespace llvm {
-namespace mca {
-
-class HardwareUnit {
-  HardwareUnit(const HardwareUnit &H) = delete;
-  HardwareUnit &operator=(const HardwareUnit &H) = delete;
-
-public:
-  HardwareUnit() = default;
-  virtual ~HardwareUnit();
-};
-
-} // namespace mca
-} // namespace llvm
-#endif // LLVM_TOOLS_LLVM_MCA_HARDWAREUNIT_H
diff --git a/tools/llvm-mca/include/HardwareUnits/LSUnit.h b/tools/llvm-mca/include/HardwareUnits/LSUnit.h
deleted file mode 100644
index 3f37651..0000000
--- a/tools/llvm-mca/include/HardwareUnits/LSUnit.h
+++ /dev/null
@@ -1,207 +0,0 @@
-//===------------------------- LSUnit.h --------------------------*- C++-*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// A Load/Store unit class that models load/store queues and that implements
-/// a simple weak memory consistency model.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_LSUNIT_H
-#define LLVM_TOOLS_LLVM_MCA_LSUNIT_H
-
-#include "HardwareUnits/HardwareUnit.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/MC/MCSchedule.h"
-
-namespace llvm {
-namespace mca {
-
-class InstRef;
-class Scheduler;
-
-/// A Load/Store Unit implementing a load and store queues.
-///
-/// This class implements a load queue and a store queue to emulate the
-/// out-of-order execution of memory operations.
-/// Each load (or store) consumes an entry in the load (or store) queue.
-///
-/// Rules are:
-/// 1) A younger load is allowed to pass an older load only if there are no
-///    stores nor barriers in between the two loads.
-/// 2) An younger store is not allowed to pass an older store.
-/// 3) A younger store is not allowed to pass an older load.
-/// 4) A younger load is allowed to pass an older store only if the load does
-///    not alias with the store.
-///
-/// This class optimistically assumes that loads don't alias store operations.
-/// Under this assumption, younger loads are always allowed to pass older
-/// stores (this would only affects rule 4).
-/// Essentially, this class doesn't perform any sort alias analysis to
-/// identify aliasing loads and stores.
-///
-/// To enforce aliasing between loads and stores, flag `AssumeNoAlias` must be
-/// set to `false` by the constructor of LSUnit.
-///
-/// Note that this class doesn't know about the existence of different memory
-/// types for memory operations (example: write-through, write-combining, etc.).
-/// Derived classes are responsible for implementing that extra knowledge, and
-/// provide different sets of rules for loads and stores by overriding method
-/// `isReady()`.
-/// To emulate a write-combining memory type, rule 2. must be relaxed in a
-/// derived class to enable the reordering of non-aliasing store operations.
-///
-/// No assumptions are made by this class on the size of the store buffer.  This
-/// class doesn't know how to identify cases where store-to-load forwarding may
-/// occur.
-///
-/// LSUnit doesn't attempt to predict whether a load or store hits or misses
-/// the L1 cache. To be more specific, LSUnit doesn't know anything about
-/// cache hierarchy and memory types.
-/// It only knows if an instruction "mayLoad" and/or "mayStore". For loads, the
-/// scheduling model provides an "optimistic" load-to-use latency (which usually
-/// matches the load-to-use latency for when there is a hit in the L1D).
-/// Derived classes may expand this knowledge.
-///
-/// Class MCInstrDesc in LLVM doesn't know about serializing operations, nor
-/// memory-barrier like instructions.
-/// LSUnit conservatively assumes that an instruction which `mayLoad` and has
-/// `unmodeled side effects` behave like a "soft" load-barrier. That means, it
-/// serializes loads without forcing a flush of the load queue.
-/// Similarly, instructions that both `mayStore` and have `unmodeled side
-/// effects` are treated like store barriers. A full memory
-/// barrier is a 'mayLoad' and 'mayStore' instruction with unmodeled side
-/// effects. This is obviously inaccurate, but this is the best that we can do
-/// at the moment.
-///
-/// Each load/store barrier consumes one entry in the load/store queue. A
-/// load/store barrier enforces ordering of loads/stores:
-///  - A younger load cannot pass a load barrier.
-///  - A younger store cannot pass a store barrier.
-///
-/// A younger load has to wait for the memory load barrier to execute.
-/// A load/store barrier is "executed" when it becomes the oldest entry in
-/// the load/store queue(s). That also means, all the older loads/stores have
-/// already been executed.
-class LSUnit : public HardwareUnit {
-  // Load queue size.
-  // LQ_Size == 0 means that there are infinite slots in the load queue.
-  unsigned LQ_Size;
-
-  // Store queue size.
-  // SQ_Size == 0 means that there are infinite slots in the store queue.
-  unsigned SQ_Size;
-
-  // If true, loads will never alias with stores. This is the default.
-  bool NoAlias;
-
-  // When a `MayLoad` instruction is dispatched to the schedulers for execution,
-  // the LSUnit reserves an entry in the `LoadQueue` for it.
-  //
-  // LoadQueue keeps track of all the loads that are in-flight. A load
-  // instruction is eventually removed from the LoadQueue when it reaches
-  // completion stage. That means, a load leaves the queue whe it is 'executed',
-  // and its value can be forwarded on the data path to outside units.
-  //
-  // This class doesn't know about the latency of a load instruction. So, it
-  // conservatively/pessimistically assumes that the latency of a load opcode
-  // matches the instruction latency.
-  //
-  // FIXME: In the absence of cache misses (i.e. L1I/L1D/iTLB/dTLB hits/misses),
-  // and load/store conflicts, the latency of a load is determined by the depth
-  // of the load pipeline. So, we could use field `LoadLatency` in the
-  // MCSchedModel to model that latency.
-  // Field `LoadLatency` often matches the so-called 'load-to-use' latency from
-  // L1D, and it usually already accounts for any extra latency due to data
-  // forwarding.
-  // When doing throughput analysis, `LoadLatency` is likely to
-  // be a better predictor of load latency than instruction latency. This is
-  // particularly true when simulating code with temporal/spatial locality of
-  // memory accesses.
-  // Using `LoadLatency` (instead of the instruction latency) is also expected
-  // to improve the load queue allocation for long latency instructions with
-  // folded memory operands (See PR39829).
-  //
-  // FIXME: On some processors, load/store operations are split into multiple
-  // uOps. For example, X86 AMD Jaguar natively supports 128-bit data types, but
-  // not 256-bit data types. So, a 256-bit load is effectively split into two
-  // 128-bit loads, and each split load consumes one 'LoadQueue' entry. For
-  // simplicity, this class optimistically assumes that a load instruction only
-  // consumes one entry in the LoadQueue.  Similarly, store instructions only
-  // consume a single entry in the StoreQueue.
-  // In future, we should reassess the quality of this design, and consider
-  // alternative approaches that let instructions specify the number of
-  // load/store queue entries which they consume at dispatch stage (See
-  // PR39830).
-  SmallSet<unsigned, 16> LoadQueue;
-  SmallSet<unsigned, 16> StoreQueue;
-
-  void assignLQSlot(unsigned Index);
-  void assignSQSlot(unsigned Index);
-  bool isReadyNoAlias(unsigned Index) const;
-
-  // An instruction that both 'mayStore' and 'HasUnmodeledSideEffects' is
-  // conservatively treated as a store barrier. It forces older store to be
-  // executed before newer stores are issued.
-  SmallSet<unsigned, 8> StoreBarriers;
-
-  // An instruction that both 'MayLoad' and 'HasUnmodeledSideEffects' is
-  // conservatively treated as a load barrier. It forces older loads to execute
-  // before newer loads are issued.
-  SmallSet<unsigned, 8> LoadBarriers;
-
-  bool isSQEmpty() const { return StoreQueue.empty(); }
-  bool isLQEmpty() const { return LoadQueue.empty(); }
-  bool isSQFull() const { return SQ_Size != 0 && StoreQueue.size() == SQ_Size; }
-  bool isLQFull() const { return LQ_Size != 0 && LoadQueue.size() == LQ_Size; }
-
-public:
-  LSUnit(const MCSchedModel &SM, unsigned LQ = 0, unsigned SQ = 0,
-         bool AssumeNoAlias = false);
-
-#ifndef NDEBUG
-  void dump() const;
-#endif
-
-  enum Status { LSU_AVAILABLE = 0, LSU_LQUEUE_FULL, LSU_SQUEUE_FULL };
-
-  // Returns LSU_AVAILABLE if there are enough load/store queue entries to serve
-  // IR. It also returns LSU_AVAILABLE if IR is not a memory operation.
-  Status isAvailable(const InstRef &IR) const;
-
-  // Allocates load/store queue resources for IR.
-  //
-  // This method assumes that a previous call to `isAvailable(IR)` returned
-  // LSU_AVAILABLE, and that IR is a memory operation.
-  void dispatch(const InstRef &IR);
-
-  // By default, rules are:
-  // 1. A store may not pass a previous store.
-  // 2. A load may not pass a previous store unless flag 'NoAlias' is set.
-  // 3. A load may pass a previous load.
-  // 4. A store may not pass a previous load (regardless of flag 'NoAlias').
-  // 5. A load has to wait until an older load barrier is fully executed.
-  // 6. A store has to wait until an older store barrier is fully executed.
-  virtual bool isReady(const InstRef &IR) const;
-
-  // Load and store instructions are tracked by their corresponding queues from
-  // dispatch until the "instruction executed" event.
-  // Only when a load instruction reaches the 'Executed' stage, its value
-  // becomes available to the users. At that point, the load no longer needs to
-  // be tracked by the load queue.
-  // FIXME: For simplicity, we optimistically assume a similar behavior for
-  // store instructions. In practice, store operations don't tend to leave the
-  // store queue until they reach the 'Retired' stage (See PR39830).
-  void onInstructionExecuted(const InstRef &IR);
-};
-
-} // namespace mca
-} // namespace llvm
-
-#endif
diff --git a/tools/llvm-mca/include/HardwareUnits/RegisterFile.h b/tools/llvm-mca/include/HardwareUnits/RegisterFile.h
deleted file mode 100644
index d9949bf..0000000
--- a/tools/llvm-mca/include/HardwareUnits/RegisterFile.h
+++ /dev/null
@@ -1,239 +0,0 @@
-//===--------------------- RegisterFile.h -----------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines a register mapping file class.  This class is responsible
-/// for managing hardware register files and the tracking of data dependencies
-/// between registers.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_REGISTER_FILE_H
-#define LLVM_TOOLS_LLVM_MCA_REGISTER_FILE_H
-
-#include "HardwareUnits/HardwareUnit.h"
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCSchedule.h"
-#include "llvm/Support/Error.h"
-
-namespace llvm {
-namespace mca {
-
-class ReadState;
-class WriteState;
-class WriteRef;
-
-/// Manages hardware register files, and tracks register definitions for
-/// register renaming purposes.
-class RegisterFile : public HardwareUnit {
-  const MCRegisterInfo &MRI;
-
-  // class RegisterMappingTracker is a  physical register file (PRF) descriptor.
-  // There is one RegisterMappingTracker for every PRF definition in the
-  // scheduling model.
-  //
-  // An instance of RegisterMappingTracker tracks the number of physical
-  // registers available for renaming. It also tracks  the number of register
-  // moves eliminated per cycle.
-  struct RegisterMappingTracker {
-    // The total number of physical registers that are available in this
-    // register file for register renaming purpouses.  A value of zero for this
-    // field means: this register file has an unbounded number of physical
-    // registers.
-    const unsigned NumPhysRegs;
-    // Number of physical registers that are currently in use.
-    unsigned NumUsedPhysRegs;
-
-    // Maximum number of register moves that can be eliminated by this PRF every
-    // cycle. A value of zero means that there is no limit in the number of
-    // moves which can be eliminated every cycle.
-    const unsigned MaxMoveEliminatedPerCycle;
-
-    // Number of register moves eliminated during this cycle.
-    //
-    // This value is increased by one every time a register move is eliminated.
-    // Every new cycle, this value is reset to zero.
-    // A move can be eliminated only if MaxMoveEliminatedPerCycle is zero, or if
-    // NumMoveEliminated is less than MaxMoveEliminatedPerCycle.
-    unsigned NumMoveEliminated;
-
-    // If set, move elimination is restricted to zero-register moves only.
-    bool AllowZeroMoveEliminationOnly;
-
-    RegisterMappingTracker(unsigned NumPhysRegisters,
-                           unsigned MaxMoveEliminated = 0U,
-                           bool AllowZeroMoveElimOnly = false)
-        : NumPhysRegs(NumPhysRegisters), NumUsedPhysRegs(0),
-          MaxMoveEliminatedPerCycle(MaxMoveEliminated), NumMoveEliminated(0U),
-          AllowZeroMoveEliminationOnly(AllowZeroMoveElimOnly) {}
-  };
-
-  // A vector of register file descriptors.  This set always contains at least
-  // one entry. Entry at index #0 is reserved.  That entry describes a register
-  // file with an unbounded number of physical registers that "sees" all the
-  // hardware registers declared by the target (i.e. all the register
-  // definitions in the target specific `XYZRegisterInfo.td` - where `XYZ` is
-  // the target name).
-  //
-  // Users can limit the number of physical registers that are available in
-  // regsiter file #0 specifying command line flag `-register-file-size=<uint>`.
-  SmallVector<RegisterMappingTracker, 4> RegisterFiles;
-
-  // This type is used to propagate information about the owner of a register,
-  // and the cost of allocating it in the PRF. Register cost is defined as the
-  // number of physical registers consumed by the PRF to allocate a user
-  // register.
-  //
-  // For example: on X86 BtVer2, a YMM register consumes 2 128-bit physical
-  // registers. So, the cost of allocating a YMM register in BtVer2 is 2.
-  using IndexPlusCostPairTy = std::pair<unsigned, unsigned>;
-
-  // Struct RegisterRenamingInfo is used to map logical registers to register
-  // files.
-  //
-  // There is a RegisterRenamingInfo object for every logical register defined
-  // by the target. RegisteRenamingInfo objects are stored into vector
-  // `RegisterMappings`, and MCPhysReg IDs can be used to reference
-  // elements in that vector.
-  //
-  // Each RegisterRenamingInfo is owned by a PRF, and field `IndexPlusCost`
-  // specifies both the owning PRF, as well as the number of physical registers
-  // consumed at register renaming stage.
-  //
-  // Field `AllowMoveElimination` is set for registers that are used as
-  // destination by optimizable register moves.
-  //
-  // Field `AliasRegID` is set by writes from register moves that have been
-  // eliminated at register renaming stage. A move eliminated at register
-  // renaming stage is effectively bypassed, and its write aliases the source
-  // register definition.
-  struct RegisterRenamingInfo {
-    IndexPlusCostPairTy IndexPlusCost;
-    MCPhysReg RenameAs;
-    MCPhysReg AliasRegID;
-    bool AllowMoveElimination;
-    RegisterRenamingInfo()
-        : IndexPlusCost(std::make_pair(0U, 1U)), RenameAs(0U), AliasRegID(0U),
-          AllowMoveElimination(false) {}
-  };
-
-  // RegisterMapping objects are mainly used to track physical register
-  // definitions and resolve data dependencies.
-  //
-  // Every register declared by the Target is associated with an instance of
-  // RegisterMapping. RegisterMapping objects keep track of writes to a logical
-  // register.  That information is used by class RegisterFile to resolve data
-  // dependencies, and correctly set latencies for register uses.
-  //
-  // This implementation does not allow overlapping register files. The only
-  // register file that is allowed to overlap with other register files is
-  // register file #0. If we exclude register #0, every register is "owned" by
-  // at most one register file.
-  using RegisterMapping = std::pair<WriteRef, RegisterRenamingInfo>;
-
-  // There is one entry per each register defined by the target.
-  std::vector<RegisterMapping> RegisterMappings;
-
-  // Used to track zero registers. There is one bit for each register defined by
-  // the target. Bits are set for registers that are known to be zero.
-  APInt ZeroRegisters;
-
-  // This method creates a new register file descriptor.
-  // The new register file owns all of the registers declared by register
-  // classes in the 'RegisterClasses' set.
-  //
-  // Processor models allow the definition of RegisterFile(s) via tablegen. For
-  // example, this is a tablegen definition for a x86 register file for
-  // XMM[0-15] and YMM[0-15], that allows up to 60 renames (each rename costs 1
-  // physical register).
-  //
-  //    def FPRegisterFile : RegisterFile<60, [VR128RegClass, VR256RegClass]>
-  //
-  // Here FPRegisterFile contains all the registers defined by register class
-  // VR128RegClass and VR256RegClass. FPRegisterFile implements 60
-  // registers which can be used for register renaming purpose.
-  void addRegisterFile(const MCRegisterFileDesc &RF,
-                       ArrayRef<MCRegisterCostEntry> Entries);
-
-  // Consumes physical registers in each register file specified by the
-  // `IndexPlusCostPairTy`. This method is called from `addRegisterMapping()`.
-  void allocatePhysRegs(const RegisterRenamingInfo &Entry,
-                        MutableArrayRef<unsigned> UsedPhysRegs);
-
-  // Releases previously allocated physical registers from the register file(s).
-  // This method is called from `invalidateRegisterMapping()`.
-  void freePhysRegs(const RegisterRenamingInfo &Entry,
-                    MutableArrayRef<unsigned> FreedPhysRegs);
-
-  // Collects writes that are in a RAW dependency with RS.
-  // This method is called from `addRegisterRead()`.
-  void collectWrites(const ReadState &RS,
-                     SmallVectorImpl<WriteRef> &Writes) const;
-
-  // Create an instance of RegisterMappingTracker for every register file
-  // specified by the processor model.
-  // If no register file is specified, then this method creates a default
-  // register file with an unbounded number of physical registers.
-  void initialize(const MCSchedModel &SM, unsigned NumRegs);
-
-public:
-  RegisterFile(const MCSchedModel &SM, const MCRegisterInfo &mri,
-               unsigned NumRegs = 0);
-
-  // This method updates the register mappings inserting a new register
-  // definition. This method is also responsible for updating the number of
-  // allocated physical registers in each register file modified by the write.
-  // No physical regiser is allocated if this write is from a zero-idiom.
-  void addRegisterWrite(WriteRef Write, MutableArrayRef<unsigned> UsedPhysRegs);
-
-  // Collect writes that are in a data dependency with RS, and update RS
-  // internal state.
-  void addRegisterRead(ReadState &RS, SmallVectorImpl<WriteRef> &Writes) const;
-
-  // Removes write \param WS from the register mappings.
-  // Physical registers may be released to reflect this update.
-  // No registers are released if this write is from a zero-idiom.
-  void removeRegisterWrite(const WriteState &WS,
-                           MutableArrayRef<unsigned> FreedPhysRegs);
-
-  // Returns true if a move from RS to WS can be eliminated.
-  // On success, it updates WriteState by setting flag `WS.isEliminated`.
-  // If RS is a read from a zero register, and WS is eliminated, then
-  // `WS.WritesZero` is also set, so that method addRegisterWrite() would not
-  // reserve a physical register for it.
-  bool tryEliminateMove(WriteState &WS, ReadState &RS);
-
-  // Checks if there are enough physical registers in the register files.
-  // Returns a "response mask" where each bit represents the response from a
-  // different register file.  A mask of all zeroes means that all register
-  // files are available.  Otherwise, the mask can be used to identify which
-  // register file was busy.  This sematic allows us to classify dispatch
-  // stalls caused by the lack of register file resources.
-  //
-  // Current implementation can simulate up to 32 register files (including the
-  // special register file at index #0).
-  unsigned isAvailable(ArrayRef<unsigned> Regs) const;
-
-  // Returns the number of PRFs implemented by this processor.
-  unsigned getNumRegisterFiles() const { return RegisterFiles.size(); }
-
-  // Notify each PRF that a new cycle just started.
-  void cycleStart();
-
-#ifndef NDEBUG
-  void dump() const;
-#endif
-};
-
-} // namespace mca
-} // namespace llvm
-
-#endif // LLVM_TOOLS_LLVM_MCA_REGISTER_FILE_H
diff --git a/tools/llvm-mca/include/HardwareUnits/ResourceManager.h b/tools/llvm-mca/include/HardwareUnits/ResourceManager.h
deleted file mode 100644
index aa1bdb0..0000000
--- a/tools/llvm-mca/include/HardwareUnits/ResourceManager.h
+++ /dev/null
@@ -1,360 +0,0 @@
-//===--------------------- ResourceManager.h --------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// The classes here represent processor resource units and their management
-/// strategy.  These classes are managed by the Scheduler.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_RESOURCE_MANAGER_H
-#define LLVM_TOOLS_LLVM_MCA_RESOURCE_MANAGER_H
-
-#include "Instruction.h"
-#include "Support.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/MC/MCSchedule.h"
-
-namespace llvm {
-namespace mca {
-
-/// Used to notify the internal state of a processor resource.
-///
-/// A processor resource is available if it is not reserved, and there are
-/// available slots in the buffer.  A processor resource is unavailable if it
-/// is either reserved, or the associated buffer is full. A processor resource
-/// with a buffer size of -1 is always available if it is not reserved.
-///
-/// Values of type ResourceStateEvent are returned by method
-/// ResourceState::isBufferAvailable(), which is used to query the internal
-/// state of a resource.
-///
-/// The naming convention for resource state events is:
-///  * Event names start with prefix RS_
-///  * Prefix RS_ is followed by a string describing the actual resource state.
-enum ResourceStateEvent {
-  RS_BUFFER_AVAILABLE,
-  RS_BUFFER_UNAVAILABLE,
-  RS_RESERVED
-};
-
-/// Resource allocation strategy used by hardware scheduler resources.
-class ResourceStrategy {
-  ResourceStrategy(const ResourceStrategy &) = delete;
-  ResourceStrategy &operator=(const ResourceStrategy &) = delete;
-
-public:
-  ResourceStrategy() {}
-  virtual ~ResourceStrategy();
-
-  /// Selects a processor resource unit from a ReadyMask.
-  virtual uint64_t select(uint64_t ReadyMask) = 0;
-
-  /// Called by the ResourceManager when a processor resource group, or a
-  /// processor resource with multiple units has become unavailable.
-  ///
-  /// The default strategy uses this information to bias its selection logic.
-  virtual void used(uint64_t ResourceMask) {}
-};
-
-/// Default resource allocation strategy used by processor resource groups and
-/// processor resources with multiple units.
-class DefaultResourceStrategy final : public ResourceStrategy {
-  /// A Mask of resource unit identifiers.
-  ///
-  /// There is one bit set for every available resource unit.
-  /// It defaults to the value of field ResourceSizeMask in ResourceState.
-  const unsigned ResourceUnitMask;
-
-  /// A simple round-robin selector for processor resource units.
-  /// Each bit of this mask identifies a sub resource within a group.
-  ///
-  /// As an example, lets assume that this is a default policy for a
-  /// processor resource group composed by the following three units:
-  ///   ResourceA -- 0b001
-  ///   ResourceB -- 0b010
-  ///   ResourceC -- 0b100
-  ///
-  /// Field NextInSequenceMask is used to select the next unit from the set of
-  /// resource units. It defaults to the value of field `ResourceUnitMasks` (in
-  /// this example, it defaults to mask '0b111').
-  ///
-  /// The round-robin selector would firstly select 'ResourceC', then
-  /// 'ResourceB', and eventually 'ResourceA'.  When a resource R is used, the
-  /// corresponding bit in NextInSequenceMask is cleared.  For example, if
-  /// 'ResourceC' is selected, then the new value of NextInSequenceMask becomes
-  /// 0xb011.
-  ///
-  /// When NextInSequenceMask becomes zero, it is automatically reset to the
-  /// default value (i.e. ResourceUnitMask).
-  uint64_t NextInSequenceMask;
-
-  /// This field is used to track resource units that are used (i.e. selected)
-  /// by other groups other than the one associated with this strategy object.
-  ///
-  /// In LLVM processor resource groups are allowed to partially (or fully)
-  /// overlap. That means, a same unit may be visible to multiple groups.
-  /// This field keeps track of uses that have originated from outside of
-  /// this group. The idea is to bias the selection strategy, so that resources
-  /// that haven't been used by other groups get prioritized.
-  ///
-  /// The end goal is to (try to) keep the resource distribution as much uniform
-  /// as possible. By construction, this mask only tracks one-level of resource
-  /// usage. Therefore, this strategy is expected to be less accurate when same
-  /// units are used multiple times by other groups within a single round of
-  /// select.
-  ///
-  /// Note: an LRU selector would have a better accuracy at the cost of being
-  /// slightly more expensive (mostly in terms of runtime cost). Methods
-  /// 'select' and 'used', are always in the hot execution path of llvm-mca.
-  /// Therefore, a slow implementation of 'select' would have a negative impact
-  /// on the overall performance of the tool.
-  uint64_t RemovedFromNextInSequence;
-
-public:
-  DefaultResourceStrategy(uint64_t UnitMask)
-      : ResourceStrategy(), ResourceUnitMask(UnitMask),
-        NextInSequenceMask(UnitMask), RemovedFromNextInSequence(0) {}
-  virtual ~DefaultResourceStrategy() = default;
-
-  uint64_t select(uint64_t ReadyMask) override;
-  void used(uint64_t Mask) override;
-};
-
-/// A processor resource descriptor.
-///
-/// There is an instance of this class for every processor resource defined by
-/// the machine scheduling model.
-/// Objects of class ResourceState dynamically track the usage of processor
-/// resource units.
-class ResourceState {
-  /// An index to the MCProcResourceDesc entry in the processor model.
-  const unsigned ProcResourceDescIndex;
-  /// A resource mask. This is generated by the tool with the help of
-  /// function `mca::createProcResourceMasks' (see Support.h).
-  const uint64_t ResourceMask;
-
-  /// A ProcResource can have multiple units.
-  ///
-  /// For processor resource groups,
-  /// this field default to the value of field `ResourceMask`; the number of
-  /// bits set is equal to the cardinality of the group.  For normal (i.e.
-  /// non-group) resources, the number of bits set in this mask is equivalent
-  /// to the number of units declared by the processor model (see field
-  /// 'NumUnits' in 'ProcResourceUnits').
-  uint64_t ResourceSizeMask;
-
-  /// A mask of ready units.
-  uint64_t ReadyMask;
-
-  /// Buffered resources will have this field set to a positive number different
-  /// than zero. A buffered resource behaves like a reservation station
-  /// implementing its own buffer for out-of-order execution.
-  ///
-  /// A BufferSize of 1 is used by scheduler resources that force in-order
-  /// execution.
-  ///
-  /// A BufferSize of 0 is used to model in-order issue/dispatch resources.
-  /// Since in-order issue/dispatch resources don't implement buffers, dispatch
-  /// events coincide with issue events.
-  /// Also, no other instruction ca be dispatched/issue while this resource is
-  /// in use. Only when all the "resource cycles" are consumed (after the issue
-  /// event), a new instruction ca be dispatched.
-  const int BufferSize;
-
-  /// Available slots in the buffer (zero, if this is not a buffered resource).
-  unsigned AvailableSlots;
-
-  /// This field is set if this resource is currently reserved.
-  ///
-  /// Resources can be reserved for a number of cycles.
-  /// Instructions can still be dispatched to reserved resources. However,
-  /// istructions dispatched to a reserved resource cannot be issued to the
-  /// underlying units (i.e. pipelines) until the resource is released.
-  bool Unavailable;
-
-  const bool IsAGroup;
-
-  /// Checks for the availability of unit 'SubResMask' in the group.
-  bool isSubResourceReady(uint64_t SubResMask) const {
-    return ReadyMask & SubResMask;
-  }
-
-public:
-  ResourceState(const MCProcResourceDesc &Desc, unsigned Index, uint64_t Mask);
-
-  unsigned getProcResourceID() const { return ProcResourceDescIndex; }
-  uint64_t getResourceMask() const { return ResourceMask; }
-  uint64_t getReadyMask() const { return ReadyMask; }
-  int getBufferSize() const { return BufferSize; }
-
-  bool isBuffered() const { return BufferSize > 0; }
-  bool isInOrder() const { return BufferSize == 1; }
-
-  /// Returns true if this is an in-order dispatch/issue resource.
-  bool isADispatchHazard() const { return BufferSize == 0; }
-  bool isReserved() const { return Unavailable; }
-
-  void setReserved() { Unavailable = true; }
-  void clearReserved() { Unavailable = false; }
-
-  /// Returs true if this resource is not reserved, and if there are at least
-  /// `NumUnits` available units.
-  bool isReady(unsigned NumUnits = 1) const;
-
-  bool isAResourceGroup() const { return IsAGroup; }
-
-  bool containsResource(uint64_t ID) const { return ResourceMask & ID; }
-
-  void markSubResourceAsUsed(uint64_t ID) {
-    assert(isSubResourceReady(ID));
-    ReadyMask ^= ID;
-  }
-
-  void releaseSubResource(uint64_t ID) {
-    assert(!isSubResourceReady(ID));
-    ReadyMask ^= ID;
-  }
-
-  unsigned getNumUnits() const {
-    return isAResourceGroup() ? 1U : countPopulation(ResourceSizeMask);
-  }
-
-  /// Checks if there is an available slot in the resource buffer.
-  ///
-  /// Returns RS_BUFFER_AVAILABLE if this is not a buffered resource, or if
-  /// there is a slot available.
-  ///
-  /// Returns RS_RESERVED if this buffered resource is a dispatch hazard, and it
-  /// is reserved.
-  ///
-  /// Returns RS_BUFFER_UNAVAILABLE if there are no available slots.
-  ResourceStateEvent isBufferAvailable() const;
-
-  /// Reserve a slot in the buffer.
-  void reserveBuffer() {
-    if (AvailableSlots)
-      AvailableSlots--;
-  }
-
-  /// Release a slot in the buffer.
-  void releaseBuffer() {
-    if (BufferSize > 0)
-      AvailableSlots++;
-    assert(AvailableSlots <= static_cast<unsigned>(BufferSize));
-  }
-
-#ifndef NDEBUG
-  void dump() const;
-#endif
-};
-
-/// A resource unit identifier.
-///
-/// This is used to identify a specific processor resource unit using a pair
-/// of indices where the 'first' index is a processor resource mask, and the
-/// 'second' index is an index for a "sub-resource" (i.e. unit).
-typedef std::pair<uint64_t, uint64_t> ResourceRef;
-
-// First: a MCProcResourceDesc index identifying a buffered resource.
-// Second: max number of buffer entries used in this resource.
-typedef std::pair<unsigned, unsigned> BufferUsageEntry;
-
-/// A resource manager for processor resource units and groups.
-///
-/// This class owns all the ResourceState objects, and it is responsible for
-/// acting on requests from a Scheduler by updating the internal state of
-/// ResourceState objects.
-/// This class doesn't know about instruction itineraries and functional units.
-/// In future, it can be extended to support itineraries too through the same
-/// public interface.
-class ResourceManager {
-  // The resource manager owns all the ResourceState.
-  std::vector<std::unique_ptr<ResourceState>> Resources;
-  std::vector<std::unique_ptr<ResourceStrategy>> Strategies;
-
-  // Keeps track of which resources are busy, and how many cycles are left
-  // before those become usable again.
-  SmallDenseMap<ResourceRef, unsigned> BusyResources;
-
-  // A table to map processor resource IDs to processor resource masks.
-  SmallVector<uint64_t, 8> ProcResID2Mask;
-
-  // Returns the actual resource unit that will be used.
-  ResourceRef selectPipe(uint64_t ResourceID);
-
-  void use(const ResourceRef &RR);
-  void release(const ResourceRef &RR);
-
-  unsigned getNumUnits(uint64_t ResourceID) const;
-
-  // Overrides the selection strategy for the processor resource with the given
-  // mask.
-  void setCustomStrategyImpl(std::unique_ptr<ResourceStrategy> S,
-                             uint64_t ResourceMask);
-
-public:
-  ResourceManager(const MCSchedModel &SM);
-  virtual ~ResourceManager() = default;
-
-  // Overrides the selection strategy for the resource at index ResourceID in
-  // the MCProcResourceDesc table.
-  void setCustomStrategy(std::unique_ptr<ResourceStrategy> S,
-                         unsigned ResourceID) {
-    assert(ResourceID < ProcResID2Mask.size() &&
-           "Invalid resource index in input!");
-    return setCustomStrategyImpl(std::move(S), ProcResID2Mask[ResourceID]);
-  }
-
-  // Returns RS_BUFFER_AVAILABLE if buffered resources are not reserved, and if
-  // there are enough available slots in the buffers.
-  ResourceStateEvent canBeDispatched(ArrayRef<uint64_t> Buffers) const;
-
-  // Return the processor resource identifier associated to this Mask.
-  unsigned resolveResourceMask(uint64_t Mask) const;
-
-  // Consume a slot in every buffered resource from array 'Buffers'. Resource
-  // units that are dispatch hazards (i.e. BufferSize=0) are marked as reserved.
-  void reserveBuffers(ArrayRef<uint64_t> Buffers);
-
-  // Release buffer entries previously allocated by method reserveBuffers.
-  void releaseBuffers(ArrayRef<uint64_t> Buffers);
-
-  // Reserve a processor resource. A reserved resource is not available for
-  // instruction issue until it is released.
-  void reserveResource(uint64_t ResourceID);
-
-  // Release a previously reserved processor resource.
-  void releaseResource(uint64_t ResourceID);
-
-  // Returns true if all resources are in-order, and there is at least one
-  // resource which is a dispatch hazard (BufferSize = 0).
-  bool mustIssueImmediately(const InstrDesc &Desc) const;
-
-  bool canBeIssued(const InstrDesc &Desc) const;
-
-  void issueInstruction(
-      const InstrDesc &Desc,
-      SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Pipes);
-
-  void cycleEvent(SmallVectorImpl<ResourceRef> &ResourcesFreed);
-
-#ifndef NDEBUG
-  void dump() const {
-    for (const std::unique_ptr<ResourceState> &Resource : Resources)
-      Resource->dump();
-  }
-#endif
-};
-} // namespace mca
-} // namespace llvm
-
-#endif // LLVM_TOOLS_LLVM_MCA_RESOURCE_MANAGER_H
diff --git a/tools/llvm-mca/include/HardwareUnits/RetireControlUnit.h b/tools/llvm-mca/include/HardwareUnits/RetireControlUnit.h
deleted file mode 100644
index 12e0a1f..0000000
--- a/tools/llvm-mca/include/HardwareUnits/RetireControlUnit.h
+++ /dev/null
@@ -1,104 +0,0 @@
-//===---------------------- RetireControlUnit.h -----------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file simulates the hardware responsible for retiring instructions.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_RETIRE_CONTROL_UNIT_H
-#define LLVM_TOOLS_LLVM_MCA_RETIRE_CONTROL_UNIT_H
-
-#include "HardwareUnits/HardwareUnit.h"
-#include "Instruction.h"
-#include "llvm/MC/MCSchedule.h"
-#include <vector>
-
-namespace llvm {
-namespace mca {
-
-/// This class tracks which instructions are in-flight (i.e., dispatched but not
-/// retired) in the OoO backend.
-//
-/// This class checks on every cycle if/which instructions can be retired.
-/// Instructions are retired in program order.
-/// In the event of an instruction being retired, the pipeline that owns
-/// this RetireControlUnit (RCU) gets notified.
-///
-/// On instruction retired, register updates are all architecturally
-/// committed, and any physicall registers previously allocated for the
-/// retired instruction are freed.
-struct RetireControlUnit : public HardwareUnit {
-  // A RUToken is created by the RCU for every instruction dispatched to the
-  // schedulers.  These "tokens" are managed by the RCU in its token Queue.
-  //
-  // On every cycle ('cycleEvent'), the RCU iterates through the token queue
-  // looking for any token with its 'Executed' flag set.  If a token has that
-  // flag set, then the instruction has reached the write-back stage and will
-  // be retired by the RCU.
-  //
-  // 'NumSlots' represents the number of entries consumed by the instruction in
-  // the reorder buffer. Those entries will become available again once the
-  // instruction is retired.
-  //
-  // Note that the size of the reorder buffer is defined by the scheduling
-  // model via field 'NumMicroOpBufferSize'.
-  struct RUToken {
-    InstRef IR;
-    unsigned NumSlots; // Slots reserved to this instruction.
-    bool Executed;     // True if the instruction is past the WB stage.
-  };
-
-private:
-  unsigned NextAvailableSlotIdx;
-  unsigned CurrentInstructionSlotIdx;
-  unsigned AvailableSlots;
-  unsigned MaxRetirePerCycle; // 0 means no limit.
-  std::vector<RUToken> Queue;
-
-public:
-  RetireControlUnit(const MCSchedModel &SM);
-
-  bool isEmpty() const { return AvailableSlots == Queue.size(); }
-  bool isAvailable(unsigned Quantity = 1) const {
-    // Some instructions may declare a number of uOps which exceeds the size
-    // of the reorder buffer. To avoid problems, cap the amount of slots to
-    // the size of the reorder buffer.
-    Quantity = std::min(Quantity, static_cast<unsigned>(Queue.size()));
-
-    // Further normalize the number of micro opcodes for instructions that
-    // declare zero opcodes. This should match the behavior of method
-    // reserveSlot().
-    Quantity = std::max(Quantity, 1U);
-    return AvailableSlots >= Quantity;
-  }
-
-  unsigned getMaxRetirePerCycle() const { return MaxRetirePerCycle; }
-
-  // Reserves a number of slots, and returns a new token.
-  unsigned reserveSlot(const InstRef &IS, unsigned NumMicroOps);
-
-  // Return the current token from the RCU's circular token queue.
-  const RUToken &peekCurrentToken() const;
-
-  // Advance the pointer to the next token in the circular token queue.
-  void consumeCurrentToken();
-
-  // Update the RCU token to represent the executed state.
-  void onInstructionExecuted(unsigned TokenID);
-
-#ifndef NDEBUG
-  void dump() const;
-#endif
-};
-
-} // namespace mca
-} // namespace llvm
-
-#endif // LLVM_TOOLS_LLVM_MCA_RETIRE_CONTROL_UNIT_H
diff --git a/tools/llvm-mca/include/HardwareUnits/Scheduler.h b/tools/llvm-mca/include/HardwareUnits/Scheduler.h
deleted file mode 100644
index a8d00d2..0000000
--- a/tools/llvm-mca/include/HardwareUnits/Scheduler.h
+++ /dev/null
@@ -1,214 +0,0 @@
-//===--------------------- Scheduler.h ------------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// A scheduler for Processor Resource Units and Processor Resource Groups.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_SCHEDULER_H
-#define LLVM_TOOLS_LLVM_MCA_SCHEDULER_H
-
-#include "HardwareUnits/HardwareUnit.h"
-#include "HardwareUnits/LSUnit.h"
-#include "ResourceManager.h"
-#include "Support.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/MC/MCSchedule.h"
-
-namespace llvm {
-namespace mca {
-
-class SchedulerStrategy {
-public:
-  SchedulerStrategy() = default;
-  virtual ~SchedulerStrategy();
-
-  /// Returns true if Lhs should take priority over Rhs.
-  ///
-  /// This method is used by class Scheduler to select the "best" ready
-  /// instruction to issue to the underlying pipelines.
-  virtual bool compare(const InstRef &Lhs, const InstRef &Rhs) const = 0;
-};
-
-/// Default instruction selection strategy used by class Scheduler.
-class DefaultSchedulerStrategy : public SchedulerStrategy {
-  /// This method ranks instructions based on their age, and the number of known
-  /// users. The lower the rank value, the better.
-  int computeRank(const InstRef &Lhs) const {
-    return Lhs.getSourceIndex() - Lhs.getInstruction()->getNumUsers();
-  }
-
-public:
-  DefaultSchedulerStrategy() = default;
-  virtual ~DefaultSchedulerStrategy();
-
-  bool compare(const InstRef &Lhs, const InstRef &Rhs) const override {
-    int LhsRank = computeRank(Lhs);
-    int RhsRank = computeRank(Rhs);
-
-    /// Prioritize older instructions over younger instructions to minimize the
-    /// pressure on the reorder buffer.
-    if (LhsRank == RhsRank)
-      return Lhs.getSourceIndex() < Rhs.getSourceIndex();
-    return LhsRank < RhsRank;
-  }
-};
-
-/// Class Scheduler is responsible for issuing instructions to pipeline
-/// resources.
-///
-/// Internally, it delegates to a ResourceManager the management of processor
-/// resources. This class is also responsible for tracking the progress of
-/// instructions from the dispatch stage, until the write-back stage.
-///
-/// An instruction dispatched to the Scheduler is initially placed into either
-/// the 'WaitSet' or the 'ReadySet' depending on the availability of the input
-/// operands.
-///
-/// An instruction is moved from the WaitSet to the ReadySet when register
-/// operands become available, and all memory dependencies are met.
-/// Instructions that are moved from the WaitSet to the ReadySet transition
-/// in state from 'IS_AVAILABLE' to 'IS_READY'.
-///
-/// On every cycle, the Scheduler checks if it can promote instructions from the
-/// WaitSet to the ReadySet.
-///
-/// An Instruction is moved from the ReadySet the `IssuedSet` when it is issued
-/// to a (one or more) pipeline(s). This event also causes an instruction state
-/// transition (i.e. from state IS_READY, to state IS_EXECUTING). An Instruction
-/// leaves the IssuedSet when it reaches the write-back stage.
-class Scheduler : public HardwareUnit {
-  LSUnit &LSU;
-
-  // Instruction selection strategy for this Scheduler.
-  std::unique_ptr<SchedulerStrategy> Strategy;
-
-  // Hardware resources that are managed by this scheduler.
-  std::unique_ptr<ResourceManager> Resources;
-
-  std::vector<InstRef> WaitSet;
-  std::vector<InstRef> ReadySet;
-  std::vector<InstRef> IssuedSet;
-
-  /// Verify the given selection strategy and set the Strategy member
-  /// accordingly.  If no strategy is provided, the DefaultSchedulerStrategy is
-  /// used.
-  void initializeStrategy(std::unique_ptr<SchedulerStrategy> S);
-
-  /// Issue an instruction without updating the ready queue.
-  void issueInstructionImpl(
-      InstRef &IR,
-      SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Pipes);
-
-  // Identify instructions that have finished executing, and remove them from
-  // the IssuedSet. References to executed instructions are added to input
-  // vector 'Executed'.
-  void updateIssuedSet(SmallVectorImpl<InstRef> &Executed);
-
-  // Try to promote instructions from WaitSet to ReadySet.
-  // Add promoted instructions to the 'Ready' vector in input.
-  void promoteToReadySet(SmallVectorImpl<InstRef> &Ready);
-
-public:
-  Scheduler(const MCSchedModel &Model, LSUnit &Lsu)
-      : Scheduler(Model, Lsu, nullptr) {}
-
-  Scheduler(const MCSchedModel &Model, LSUnit &Lsu,
-            std::unique_ptr<SchedulerStrategy> SelectStrategy)
-      : Scheduler(make_unique<ResourceManager>(Model), Lsu,
-                  std::move(SelectStrategy)) {}
-
-  Scheduler(std::unique_ptr<ResourceManager> RM, LSUnit &Lsu,
-            std::unique_ptr<SchedulerStrategy> SelectStrategy)
-      : LSU(Lsu), Resources(std::move(RM)) {
-    initializeStrategy(std::move(SelectStrategy));
-  }
-
-  // Stalls generated by the scheduler.
-  enum Status {
-    SC_AVAILABLE,
-    SC_LOAD_QUEUE_FULL,
-    SC_STORE_QUEUE_FULL,
-    SC_BUFFERS_FULL,
-    SC_DISPATCH_GROUP_STALL,
-  };
-
-  /// Check if the instruction in 'IR' can be dispatched and returns an answer
-  /// in the form of a Status value.
-  ///
-  /// The DispatchStage is responsible for querying the Scheduler before
-  /// dispatching new instructions. This routine is used for performing such
-  /// a query.  If the instruction 'IR' can be dispatched, then true is
-  /// returned, otherwise false is returned with Event set to the stall type.
-  /// Internally, it also checks if the load/store unit is available.
-  Status isAvailable(const InstRef &IR) const;
-
-  /// Reserves buffer and LSUnit queue resources that are necessary to issue
-  /// this instruction.
-  ///
-  /// Returns true if instruction IR is ready to be issued to the underlying
-  /// pipelines. Note that this operation cannot fail; it assumes that a
-  /// previous call to method `isAvailable(IR)` returned `SC_AVAILABLE`.
-  void dispatch(const InstRef &IR);
-
-  /// Returns true if IR is ready to be executed by the underlying pipelines.
-  /// This method assumes that IR has been previously dispatched.
-  bool isReady(const InstRef &IR) const;
-
-  /// Issue an instruction and populates a vector of used pipeline resources,
-  /// and a vector of instructions that transitioned to the ready state as a
-  /// result of this event.
-  void issueInstruction(
-      InstRef &IR,
-      SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Used,
-      SmallVectorImpl<InstRef> &Ready);
-
-  /// Returns true if IR has to be issued immediately, or if IR is a zero
-  /// latency instruction.
-  bool mustIssueImmediately(const InstRef &IR) const;
-
-  /// This routine notifies the Scheduler that a new cycle just started.
-  ///
-  /// It notifies the underlying ResourceManager that a new cycle just started.
-  /// Vector `Freed` is populated with resourceRef related to resources that
-  /// have changed in state, and that are now available to new instructions.
-  /// Instructions executed are added to vector Executed, while vector Ready is
-  /// populated with instructions that have become ready in this new cycle.
-  void cycleEvent(SmallVectorImpl<ResourceRef> &Freed,
-                  SmallVectorImpl<InstRef> &Ready,
-                  SmallVectorImpl<InstRef> &Executed);
-
-  /// Convert a resource mask into a valid llvm processor resource identifier.
-  unsigned getResourceID(uint64_t Mask) const {
-    return Resources->resolveResourceMask(Mask);
-  }
-
-  /// Select the next instruction to issue from the ReadySet. Returns an invalid
-  /// instruction reference if there are no ready instructions, or if processor
-  /// resources are not available.
-  InstRef select();
-
-#ifndef NDEBUG
-  // Update the ready queues.
-  void dump() const;
-
-  // This routine performs a sanity check.  This routine should only be called
-  // when we know that 'IR' is not in the scheduler's instruction queues.
-  void sanityCheck(const InstRef &IR) const {
-    assert(find(WaitSet, IR) == WaitSet.end() && "Already in the wait set!");
-    assert(find(ReadySet, IR) == ReadySet.end() && "Already in the ready set!");
-    assert(find(IssuedSet, IR) == IssuedSet.end() && "Already executing!");
-  }
-#endif // !NDEBUG
-};
-} // namespace mca
-} // namespace llvm
-
-#endif // LLVM_TOOLS_LLVM_MCA_SCHEDULER_H
diff --git a/tools/llvm-mca/include/InstrBuilder.h b/tools/llvm-mca/include/InstrBuilder.h
deleted file mode 100644
index 1c958c5..0000000
--- a/tools/llvm-mca/include/InstrBuilder.h
+++ /dev/null
@@ -1,77 +0,0 @@
-//===--------------------- InstrBuilder.h -----------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// A builder class for instructions that are statically analyzed by llvm-mca.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_INSTRBUILDER_H
-#define LLVM_TOOLS_LLVM_MCA_INSTRBUILDER_H
-
-#include "Instruction.h"
-#include "Support.h"
-#include "llvm/MC/MCInstrAnalysis.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/Error.h"
-
-namespace llvm {
-namespace mca {
-
-/// A builder class that knows how to construct Instruction objects.
-///
-/// Every llvm-mca Instruction is described by an object of class InstrDesc.
-/// An InstrDesc describes which registers are read/written by the instruction,
-/// as well as the instruction latency and hardware resources consumed.
-///
-/// This class is used by the tool to construct Instructions and instruction
-/// descriptors (i.e. InstrDesc objects).
-/// Information from the machine scheduling model is used to identify processor
-/// resources that are consumed by an instruction.
-class InstrBuilder {
-  const MCSubtargetInfo &STI;
-  const MCInstrInfo &MCII;
-  const MCRegisterInfo &MRI;
-  const MCInstrAnalysis &MCIA;
-  SmallVector<uint64_t, 8> ProcResourceMasks;
-
-  DenseMap<unsigned short, std::unique_ptr<const InstrDesc>> Descriptors;
-  DenseMap<const MCInst *, std::unique_ptr<const InstrDesc>> VariantDescriptors;
-
-  bool FirstCallInst;
-  bool FirstReturnInst;
-
-  Expected<const InstrDesc &> createInstrDescImpl(const MCInst &MCI);
-  Expected<const InstrDesc &> getOrCreateInstrDesc(const MCInst &MCI);
-
-  InstrBuilder(const InstrBuilder &) = delete;
-  InstrBuilder &operator=(const InstrBuilder &) = delete;
-
-  void populateWrites(InstrDesc &ID, const MCInst &MCI, unsigned SchedClassID);
-  void populateReads(InstrDesc &ID, const MCInst &MCI, unsigned SchedClassID);
-  Error verifyInstrDesc(const InstrDesc &ID, const MCInst &MCI) const;
-
-public:
-  InstrBuilder(const MCSubtargetInfo &STI, const MCInstrInfo &MCII,
-               const MCRegisterInfo &RI, const MCInstrAnalysis &IA);
-
-  void clear() {
-    VariantDescriptors.shrink_and_clear();
-    FirstCallInst = true;
-    FirstReturnInst = true;
-  }
-
-  Expected<std::unique_ptr<Instruction>> createInstruction(const MCInst &MCI);
-};
-} // namespace mca
-} // namespace llvm
-
-#endif
diff --git a/tools/llvm-mca/include/Instruction.h b/tools/llvm-mca/include/Instruction.h
deleted file mode 100644
index dff9513..0000000
--- a/tools/llvm-mca/include/Instruction.h
+++ /dev/null
@@ -1,542 +0,0 @@
-//===--------------------- Instruction.h ------------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines abstractions used by the Pipeline to model register reads,
-/// register writes and instructions.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_INSTRUCTION_H
-#define LLVM_TOOLS_LLVM_MCA_INSTRUCTION_H
-
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/MathExtras.h"
-
-#ifndef NDEBUG
-#include "llvm/Support/raw_ostream.h"
-#endif
-
-#include <memory>
-
-namespace llvm {
-namespace mca {
-
-constexpr int UNKNOWN_CYCLES = -512;
-
-/// A register write descriptor.
-struct WriteDescriptor {
-  // Operand index. The index is negative for implicit writes only.
-  // For implicit writes, the actual operand index is computed performing
-  // a bitwise not of the OpIndex.
-  int OpIndex;
-  // Write latency. Number of cycles before write-back stage.
-  unsigned Latency;
-  // This field is set to a value different than zero only if this
-  // is an implicit definition.
-  unsigned RegisterID;
-  // Instruction itineraries would set this field to the SchedClass ID.
-  // Otherwise, it defaults to the WriteResourceID from the MCWriteLatencyEntry
-  // element associated to this write.
-  // When computing read latencies, this value is matched against the
-  // "ReadAdvance" information. The hardware backend may implement
-  // dedicated forwarding paths to quickly propagate write results to dependent
-  // instructions waiting in the reservation station (effectively bypassing the
-  // write-back stage).
-  unsigned SClassOrWriteResourceID;
-  // True only if this is a write obtained from an optional definition.
-  // Optional definitions are allowed to reference regID zero (i.e. "no
-  // register").
-  bool IsOptionalDef;
-
-  bool isImplicitWrite() const { return OpIndex < 0; };
-};
-
-/// A register read descriptor.
-struct ReadDescriptor {
-  // A MCOperand index. This is used by the Dispatch logic to identify register
-  // reads. Implicit reads have negative indices. The actual operand index of an
-  // implicit read is the bitwise not of field OpIndex.
-  int OpIndex;
-  // The actual "UseIdx". This is used to query the ReadAdvance table. Explicit
-  // uses always come first in the sequence of uses.
-  unsigned UseIndex;
-  // This field is only set if this is an implicit read.
-  unsigned RegisterID;
-  // Scheduling Class Index. It is used to query the scheduling model for the
-  // MCSchedClassDesc object.
-  unsigned SchedClassID;
-
-  bool isImplicitRead() const { return OpIndex < 0; };
-};
-
-class ReadState;
-
-/// Tracks uses of a register definition (e.g. register write).
-///
-/// Each implicit/explicit register write is associated with an instance of
-/// this class. A WriteState object tracks the dependent users of a
-/// register write. It also tracks how many cycles are left before the write
-/// back stage.
-class WriteState {
-  const WriteDescriptor *WD;
-  // On instruction issue, this field is set equal to the write latency.
-  // Before instruction issue, this field defaults to -512, a special
-  // value that represents an "unknown" number of cycles.
-  int CyclesLeft;
-
-  // Actual register defined by this write. This field is only used
-  // to speedup queries on the register file.
-  // For implicit writes, this field always matches the value of
-  // field RegisterID from WD.
-  unsigned RegisterID;
-
-  // Physical register file that serves register RegisterID.
-  unsigned PRFID;
-
-  // True if this write implicitly clears the upper portion of RegisterID's
-  // super-registers.
-  bool ClearsSuperRegs;
-
-  // True if this write is from a dependency breaking zero-idiom instruction.
-  bool WritesZero;
-
-  // True if this write has been eliminated at register renaming stage.
-  // Example: a register move doesn't consume scheduler/pipleline resources if
-  // it is eliminated at register renaming stage. It still consumes
-  // decode bandwidth, and ROB entries.
-  bool IsEliminated;
-
-  // This field is set if this is a partial register write, and it has a false
-  // dependency on any previous write of the same register (or a portion of it).
-  // DependentWrite must be able to complete before this write completes, so
-  // that we don't break the WAW, and the two writes can be merged together.
-  const WriteState *DependentWrite;
-
-  // A partial write that is in a false dependency with this write.
-  WriteState *PartialWrite;
-
-  unsigned DependentWriteCyclesLeft;
-
-  // A list of dependent reads. Users is a set of dependent
-  // reads. A dependent read is added to the set only if CyclesLeft
-  // is "unknown". As soon as CyclesLeft is 'known', each user in the set
-  // gets notified with the actual CyclesLeft.
-
-  // The 'second' element of a pair is a "ReadAdvance" number of cycles.
-  SmallVector<std::pair<ReadState *, int>, 4> Users;
-
-public:
-  WriteState(const WriteDescriptor &Desc, unsigned RegID,
-             bool clearsSuperRegs = false, bool writesZero = false)
-      : WD(&Desc), CyclesLeft(UNKNOWN_CYCLES), RegisterID(RegID),
-        PRFID(0), ClearsSuperRegs(clearsSuperRegs), WritesZero(writesZero),
-        IsEliminated(false), DependentWrite(nullptr), PartialWrite(nullptr),
-        DependentWriteCyclesLeft(0) {}
-
-  WriteState(const WriteState &Other) = default;
-  WriteState &operator=(const WriteState &Other) = default;
-
-  int getCyclesLeft() const { return CyclesLeft; }
-  unsigned getWriteResourceID() const { return WD->SClassOrWriteResourceID; }
-  unsigned getRegisterID() const { return RegisterID; }
-  unsigned getRegisterFileID() const { return PRFID; }
-  unsigned getLatency() const { return WD->Latency; }
-
-  void addUser(ReadState *Use, int ReadAdvance);
-  void addUser(WriteState *Use);
-
-  unsigned getDependentWriteCyclesLeft() const { return DependentWriteCyclesLeft; }
-
-  unsigned getNumUsers() const {
-    unsigned NumUsers = Users.size();
-    if (PartialWrite)
-      ++NumUsers;
-    return NumUsers;
-  }
-
-  bool clearsSuperRegisters() const { return ClearsSuperRegs; }
-  bool isWriteZero() const { return WritesZero; }
-  bool isEliminated() const { return IsEliminated; }
-  bool isExecuted() const {
-    return CyclesLeft != UNKNOWN_CYCLES && CyclesLeft <= 0;
-  }
-
-  const WriteState *getDependentWrite() const { return DependentWrite; }
-  void setDependentWrite(WriteState *Other) { DependentWrite = Other; }
-  void writeStartEvent(unsigned Cycles) {
-    DependentWriteCyclesLeft = Cycles;
-    DependentWrite = nullptr;
-  }
-
-  void setWriteZero() { WritesZero = true; }
-  void setEliminated() {
-    assert(Users.empty() && "Write is in an inconsistent state.");
-    CyclesLeft = 0;
-    IsEliminated = true;
-  }
-
-  void setPRF(unsigned PRF) { PRFID = PRF; }
-
-  // On every cycle, update CyclesLeft and notify dependent users.
-  void cycleEvent();
-  void onInstructionIssued();
-
-#ifndef NDEBUG
-  void dump() const;
-#endif
-};
-
-/// Tracks register operand latency in cycles.
-///
-/// A read may be dependent on more than one write. This occurs when some
-/// writes only partially update the register associated to this read.
-class ReadState {
-  const ReadDescriptor *RD;
-  // Physical register identified associated to this read.
-  unsigned RegisterID;
-  // Physical register file that serves register RegisterID.
-  unsigned PRFID;
-  // Number of writes that contribute to the definition of RegisterID.
-  // In the absence of partial register updates, the number of DependentWrites
-  // cannot be more than one.
-  unsigned DependentWrites;
-  // Number of cycles left before RegisterID can be read. This value depends on
-  // the latency of all the dependent writes. It defaults to UNKNOWN_CYCLES.
-  // It gets set to the value of field TotalCycles only when the 'CyclesLeft' of
-  // every dependent write is known.
-  int CyclesLeft;
-  // This field is updated on every writeStartEvent(). When the number of
-  // dependent writes (i.e. field DependentWrite) is zero, this value is
-  // propagated to field CyclesLeft.
-  unsigned TotalCycles;
-  // This field is set to true only if there are no dependent writes, and
-  // there are no `CyclesLeft' to wait.
-  bool IsReady;
-  // True if this is a read from a known zero register.
-  bool IsZero;
-  // True if this register read is from a dependency-breaking instruction.
-  bool IndependentFromDef;
-
-public:
-  ReadState(const ReadDescriptor &Desc, unsigned RegID)
-      : RD(&Desc), RegisterID(RegID), PRFID(0), DependentWrites(0),
-        CyclesLeft(UNKNOWN_CYCLES), TotalCycles(0), IsReady(true),
-        IsZero(false), IndependentFromDef(false) {}
-
-  const ReadDescriptor &getDescriptor() const { return *RD; }
-  unsigned getSchedClass() const { return RD->SchedClassID; }
-  unsigned getRegisterID() const { return RegisterID; }
-  unsigned getRegisterFileID() const { return PRFID; }
-
-  bool isReady() const { return IsReady; }
-  bool isImplicitRead() const { return RD->isImplicitRead(); }
-
-  bool isIndependentFromDef() const { return IndependentFromDef; }
-  void setIndependentFromDef() { IndependentFromDef = true; }
-
-  void cycleEvent();
-  void writeStartEvent(unsigned Cycles);
-  void setDependentWrites(unsigned Writes) {
-    DependentWrites = Writes;
-    IsReady = !Writes;
-  }
-
-  bool isReadZero() const { return IsZero; }
-  void setReadZero() { IsZero = true; }
-  void setPRF(unsigned ID) { PRFID = ID; }
-};
-
-/// A sequence of cycles.
-///
-/// This class can be used as a building block to construct ranges of cycles.
-class CycleSegment {
-  unsigned Begin; // Inclusive.
-  unsigned End;   // Exclusive.
-  bool Reserved;  // Resources associated to this segment must be reserved.
-
-public:
-  CycleSegment(unsigned StartCycle, unsigned EndCycle, bool IsReserved = false)
-      : Begin(StartCycle), End(EndCycle), Reserved(IsReserved) {}
-
-  bool contains(unsigned Cycle) const { return Cycle >= Begin && Cycle < End; }
-  bool startsAfter(const CycleSegment &CS) const { return End <= CS.Begin; }
-  bool endsBefore(const CycleSegment &CS) const { return Begin >= CS.End; }
-  bool overlaps(const CycleSegment &CS) const {
-    return !startsAfter(CS) && !endsBefore(CS);
-  }
-  bool isExecuting() const { return Begin == 0 && End != 0; }
-  bool isExecuted() const { return End == 0; }
-  bool operator<(const CycleSegment &Other) const {
-    return Begin < Other.Begin;
-  }
-  CycleSegment &operator--(void) {
-    if (Begin)
-      Begin--;
-    if (End)
-      End--;
-    return *this;
-  }
-
-  bool isValid() const { return Begin <= End; }
-  unsigned size() const { return End - Begin; };
-  void subtract(unsigned Cycles) {
-    assert(End >= Cycles);
-    End -= Cycles;
-  }
-
-  unsigned begin() const { return Begin; }
-  unsigned end() const { return End; }
-  void setEnd(unsigned NewEnd) { End = NewEnd; }
-  bool isReserved() const { return Reserved; }
-  void setReserved() { Reserved = true; }
-};
-
-/// Helper used by class InstrDesc to describe how hardware resources
-/// are used.
-///
-/// This class describes how many resource units of a specific resource kind
-/// (and how many cycles) are "used" by an instruction.
-struct ResourceUsage {
-  CycleSegment CS;
-  unsigned NumUnits;
-  ResourceUsage(CycleSegment Cycles, unsigned Units = 1)
-      : CS(Cycles), NumUnits(Units) {}
-  unsigned size() const { return CS.size(); }
-  bool isReserved() const { return CS.isReserved(); }
-  void setReserved() { CS.setReserved(); }
-};
-
-/// An instruction descriptor
-struct InstrDesc {
-  SmallVector<WriteDescriptor, 4> Writes; // Implicit writes are at the end.
-  SmallVector<ReadDescriptor, 4> Reads;   // Implicit reads are at the end.
-
-  // For every resource used by an instruction of this kind, this vector
-  // reports the number of "consumed cycles".
-  SmallVector<std::pair<uint64_t, ResourceUsage>, 4> Resources;
-
-  // A list of buffered resources consumed by this instruction.
-  SmallVector<uint64_t, 4> Buffers;
-
-  unsigned MaxLatency;
-  // Number of MicroOps for this instruction.
-  unsigned NumMicroOps;
-
-  bool MayLoad;
-  bool MayStore;
-  bool HasSideEffects;
-
-  // A zero latency instruction doesn't consume any scheduler resources.
-  bool isZeroLatency() const { return !MaxLatency && Resources.empty(); }
-
-  InstrDesc() = default;
-  InstrDesc(const InstrDesc &Other) = delete;
-  InstrDesc &operator=(const InstrDesc &Other) = delete;
-};
-
-/// Base class for instructions consumed by the simulation pipeline.
-///
-/// This class tracks data dependencies as well as generic properties
-/// of the instruction.
-class InstructionBase {
-  const InstrDesc &Desc;
-
-  // This field is set for instructions that are candidates for move
-  // elimination. For more information about move elimination, see the
-  // definition of RegisterMappingTracker in RegisterFile.h
-  bool IsOptimizableMove;
-
-  // Output dependencies.
-  // One entry per each implicit and explicit register definition.
-  SmallVector<WriteState, 4> Defs;
-
-  // Input dependencies.
-  // One entry per each implicit and explicit register use.
-  SmallVector<ReadState, 4> Uses;
-
-public:
-  InstructionBase(const InstrDesc &D) : Desc(D), IsOptimizableMove(false) {}
-
-  SmallVectorImpl<WriteState> &getDefs() { return Defs; }
-  const ArrayRef<WriteState> getDefs() const { return Defs; }
-  SmallVectorImpl<ReadState> &getUses() { return Uses; }
-  const ArrayRef<ReadState> getUses() const { return Uses; }
-  const InstrDesc &getDesc() const { return Desc; }
-
-  unsigned getLatency() const { return Desc.MaxLatency; }
-
-  bool hasDependentUsers() const {
-    return any_of(Defs,
-                  [](const WriteState &Def) { return Def.getNumUsers() > 0; });
-  }
-
-  unsigned getNumUsers() const {
-    unsigned NumUsers = 0;
-    for (const WriteState &Def : Defs)
-      NumUsers += Def.getNumUsers();
-    return NumUsers;
-  }
-
-  // Returns true if this instruction is a candidate for move elimination.
-  bool isOptimizableMove() const { return IsOptimizableMove; }
-  void setOptimizableMove() { IsOptimizableMove = true; }
-};
-
-/// An instruction propagated through the simulated instruction pipeline.
-///
-/// This class is used to monitor changes to the internal state of instructions
-/// that are sent to the various components of the simulated hardware pipeline.
-class Instruction : public InstructionBase {
-  enum InstrStage {
-    IS_INVALID,   // Instruction in an invalid state.
-    IS_AVAILABLE, // Instruction dispatched but operands are not ready.
-    IS_READY,     // Instruction dispatched and operands ready.
-    IS_EXECUTING, // Instruction issued.
-    IS_EXECUTED,  // Instruction executed. Values are written back.
-    IS_RETIRED    // Instruction retired.
-  };
-
-  // The current instruction stage.
-  enum InstrStage Stage;
-
-  // This value defaults to the instruction latency. This instruction is
-  // considered executed when field CyclesLeft goes to zero.
-  int CyclesLeft;
-
-  // Retire Unit token ID for this instruction.
-  unsigned RCUTokenID;
-
-public:
-  Instruction(const InstrDesc &D)
-      : InstructionBase(D), Stage(IS_INVALID), CyclesLeft(UNKNOWN_CYCLES),
-        RCUTokenID(0) {}
-
-  unsigned getRCUTokenID() const { return RCUTokenID; }
-  int getCyclesLeft() const { return CyclesLeft; }
-
-  // Transition to the dispatch stage, and assign a RCUToken to this
-  // instruction. The RCUToken is used to track the completion of every
-  // register write performed by this instruction.
-  void dispatch(unsigned RCUTokenID);
-
-  // Instruction issued. Transition to the IS_EXECUTING state, and update
-  // all the definitions.
-  void execute();
-
-  // Force a transition from the IS_AVAILABLE state to the IS_READY state if
-  // input operands are all ready. State transitions normally occur at the
-  // beginning of a new cycle (see method cycleEvent()). However, the scheduler
-  // may decide to promote instructions from the wait queue to the ready queue
-  // as the result of another issue event.  This method is called every time the
-  // instruction might have changed in state.
-  void update();
-
-  bool isDispatched() const { return Stage == IS_AVAILABLE; }
-  bool isReady() const { return Stage == IS_READY; }
-  bool isExecuting() const { return Stage == IS_EXECUTING; }
-  bool isExecuted() const { return Stage == IS_EXECUTED; }
-  bool isRetired() const { return Stage == IS_RETIRED; }
-
-  bool isEliminated() const {
-    return isReady() && getDefs().size() &&
-           all_of(getDefs(),
-                  [](const WriteState &W) { return W.isEliminated(); });
-  }
-
-  // Forces a transition from state IS_AVAILABLE to state IS_EXECUTED.
-  void forceExecuted();
-
-  void retire() {
-    assert(isExecuted() && "Instruction is in an invalid state!");
-    Stage = IS_RETIRED;
-  }
-
-  void cycleEvent();
-};
-
-/// An InstRef contains both a SourceMgr index and Instruction pair.  The index
-/// is used as a unique identifier for the instruction.  MCA will make use of
-/// this index as a key throughout MCA.
-class InstRef {
-  std::pair<unsigned, Instruction *> Data;
-
-public:
-  InstRef() : Data(std::make_pair(0, nullptr)) {}
-  InstRef(unsigned Index, Instruction *I) : Data(std::make_pair(Index, I)) {}
-
-  bool operator==(const InstRef &Other) const { return Data == Other.Data; }
-
-  unsigned getSourceIndex() const { return Data.first; }
-  Instruction *getInstruction() { return Data.second; }
-  const Instruction *getInstruction() const { return Data.second; }
-
-  /// Returns true if this references a valid instruction.
-  operator bool() const { return Data.second != nullptr; }
-
-  /// Invalidate this reference.
-  void invalidate() { Data.second = nullptr; }
-
-#ifndef NDEBUG
-  void print(raw_ostream &OS) const { OS << getSourceIndex(); }
-#endif
-};
-
-#ifndef NDEBUG
-inline raw_ostream &operator<<(raw_ostream &OS, const InstRef &IR) {
-  IR.print(OS);
-  return OS;
-}
-#endif
-
-/// A reference to a register write.
-///
-/// This class is mainly used by the register file to describe register
-/// mappings. It correlates a register write to the source index of the
-/// defining instruction.
-class WriteRef {
-  std::pair<unsigned, WriteState *> Data;
-  static const unsigned INVALID_IID;
-
-public:
-  WriteRef() : Data(INVALID_IID, nullptr) {}
-  WriteRef(unsigned SourceIndex, WriteState *WS) : Data(SourceIndex, WS) {}
-
-  unsigned getSourceIndex() const { return Data.first; }
-  const WriteState *getWriteState() const { return Data.second; }
-  WriteState *getWriteState() { return Data.second; }
-  void invalidate() { Data.second = nullptr; }
-  bool isWriteZero() const {
-    assert(isValid() && "Invalid null WriteState found!");
-    return getWriteState()->isWriteZero();
-  }
-
-  /// Returns true if this register write has been executed, and the new
-  /// register value is therefore available to users.
-  bool isAvailable() const {
-    if (getSourceIndex() == INVALID_IID)
-      return false;
-    const WriteState *WS = getWriteState();
-    return !WS || WS->isExecuted();
-  }
-
-  bool isValid() const { return Data.first != INVALID_IID && Data.second; }
-  bool operator==(const WriteRef &Other) const { return Data == Other.Data; }
-
-#ifndef NDEBUG
-  void dump() const;
-#endif
-};
-
-} // namespace mca
-} // namespace llvm
-
-#endif
diff --git a/tools/llvm-mca/include/Pipeline.h b/tools/llvm-mca/include/Pipeline.h
deleted file mode 100644
index 0fd4026..0000000
--- a/tools/llvm-mca/include/Pipeline.h
+++ /dev/null
@@ -1,79 +0,0 @@
-//===--------------------- Pipeline.h ---------------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file implements an ordered container of stages that simulate the
-/// pipeline of a hardware backend.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_PIPELINE_H
-#define LLVM_TOOLS_LLVM_MCA_PIPELINE_H
-
-#include "HardwareUnits/Scheduler.h"
-#include "Stages/Stage.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/Error.h"
-
-namespace llvm {
-namespace mca {
-
-class HWEventListener;
-
-/// A pipeline for a specific subtarget.
-///
-/// It emulates an out-of-order execution of instructions. Instructions are
-/// fetched from a MCInst sequence managed by an initial 'Fetch' stage.
-/// Instructions are firstly fetched, then dispatched to the schedulers, and
-/// then executed.
-///
-/// This class tracks the lifetime of an instruction from the moment where
-/// it gets dispatched to the schedulers, to the moment where it finishes
-/// executing and register writes are architecturally committed.
-/// In particular, it monitors changes in the state of every instruction
-/// in flight.
-///
-/// Instructions are executed in a loop of iterations. The number of iterations
-/// is defined by the SourceMgr object, which is managed by the initial stage
-/// of the instruction pipeline.
-///
-/// The Pipeline entry point is method 'run()' which executes cycles in a loop
-/// until there are new instructions to dispatch, and not every instruction
-/// has been retired.
-///
-/// Internally, the Pipeline collects statistical information in the form of
-/// histograms. For example, it tracks how the dispatch group size changes
-/// over time.
-class Pipeline {
-  Pipeline(const Pipeline &P) = delete;
-  Pipeline &operator=(const Pipeline &P) = delete;
-
-  /// An ordered list of stages that define this instruction pipeline.
-  SmallVector<std::unique_ptr<Stage>, 8> Stages;
-  std::set<HWEventListener *> Listeners;
-  unsigned Cycles;
-
-  Error runCycle();
-  bool hasWorkToProcess();
-  void notifyCycleBegin();
-  void notifyCycleEnd();
-
-public:
-  Pipeline() : Cycles(0) {}
-  void appendStage(std::unique_ptr<Stage> S);
-
-  /// Returns the total number of simulated cycles.
-  Expected<unsigned> run();
-
-  void addEventListener(HWEventListener *Listener);
-};
-} // namespace mca
-} // namespace llvm
-
-#endif // LLVM_TOOLS_LLVM_MCA_PIPELINE_H
diff --git a/tools/llvm-mca/include/SourceMgr.h b/tools/llvm-mca/include/SourceMgr.h
deleted file mode 100644
index e518010..0000000
--- a/tools/llvm-mca/include/SourceMgr.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//===--------------------- SourceMgr.h --------------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-/// This file implements class SourceMgr. Class SourceMgr abstracts the input
-/// code sequence (a sequence of MCInst), and assings unique identifiers to
-/// every instruction in the sequence.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_SOURCEMGR_H
-#define LLVM_TOOLS_LLVM_MCA_SOURCEMGR_H
-
-#include "llvm/ADT/ArrayRef.h"
-
-namespace llvm {
-namespace mca {
-
-class Instruction;
-
-typedef std::pair<unsigned, const Instruction &> SourceRef;
-
-class SourceMgr {
-  using UniqueInst = std::unique_ptr<Instruction>;
-  ArrayRef<UniqueInst> Sequence;
-  unsigned Current;
-  const unsigned Iterations;
-  static const unsigned DefaultIterations = 100;
-
-public:
-  SourceMgr(ArrayRef<UniqueInst> S, unsigned Iter)
-      : Sequence(S), Current(0), Iterations(Iter ? Iter : DefaultIterations) {}
-
-  unsigned getNumIterations() const { return Iterations; }
-  unsigned size() const { return Sequence.size(); }
-  bool hasNext() const { return Current < (Iterations * Sequence.size()); }
-  void updateNext() { ++Current; }
-
-  SourceRef peekNext() const {
-    assert(hasNext() && "Already at end of sequence!");
-    return SourceRef(Current, *Sequence[Current % Sequence.size()]);
-  }
-
-  using const_iterator = ArrayRef<UniqueInst>::const_iterator;
-  const_iterator begin() const { return Sequence.begin(); }
-  const_iterator end() const { return Sequence.end(); }
-};
-
-} // namespace mca
-} // namespace llvm
-
-#endif
diff --git a/tools/llvm-mca/include/Stages/DispatchStage.h b/tools/llvm-mca/include/Stages/DispatchStage.h
deleted file mode 100644
index 29cace1..0000000
--- a/tools/llvm-mca/include/Stages/DispatchStage.h
+++ /dev/null
@@ -1,93 +0,0 @@
-//===----------------------- DispatchStage.h --------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file models the dispatch component of an instruction pipeline.
-///
-/// The DispatchStage is responsible for updating instruction dependencies
-/// and communicating to the simulated instruction scheduler that an instruction
-/// is ready to be scheduled for execution.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_DISPATCH_STAGE_H
-#define LLVM_TOOLS_LLVM_MCA_DISPATCH_STAGE_H
-
-#include "HWEventListener.h"
-#include "HardwareUnits/RegisterFile.h"
-#include "HardwareUnits/RetireControlUnit.h"
-#include "Instruction.h"
-#include "Stages/Stage.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-
-namespace llvm {
-namespace mca {
-
-// Implements the hardware dispatch logic.
-//
-// This class is responsible for the dispatch stage, in which instructions are
-// dispatched in groups to the Scheduler.  An instruction can be dispatched if
-// the following conditions are met:
-//  1) There are enough entries in the reorder buffer (see class
-//     RetireControlUnit) to write the opcodes associated with the instruction.
-//  2) There are enough physical registers to rename output register operands.
-//  3) There are enough entries available in the used buffered resource(s).
-//
-// The number of micro opcodes that can be dispatched in one cycle is limited by
-// the value of field 'DispatchWidth'. A "dynamic dispatch stall" occurs when
-// processor resources are not available. Dispatch stall events are counted
-// during the entire execution of the code, and displayed by the performance
-// report when flag '-dispatch-stats' is specified.
-//
-// If the number of micro opcodes exceedes DispatchWidth, then the instruction
-// is dispatched in multiple cycles.
-class DispatchStage final : public Stage {
-  unsigned DispatchWidth;
-  unsigned AvailableEntries;
-  unsigned CarryOver;
-  InstRef CarriedOver;
-  const MCSubtargetInfo &STI;
-  RetireControlUnit &RCU;
-  RegisterFile &PRF;
-
-  bool checkRCU(const InstRef &IR) const;
-  bool checkPRF(const InstRef &IR) const;
-  bool canDispatch(const InstRef &IR) const;
-  Error dispatch(InstRef IR);
-
-  void updateRAWDependencies(ReadState &RS, const MCSubtargetInfo &STI);
-
-  void notifyInstructionDispatched(const InstRef &IR,
-                                   ArrayRef<unsigned> UsedPhysRegs,
-                                   unsigned uOps) const;
-
-public:
-  DispatchStage(const MCSubtargetInfo &Subtarget, const MCRegisterInfo &MRI,
-                unsigned MaxDispatchWidth, RetireControlUnit &R,
-                RegisterFile &F)
-      : DispatchWidth(MaxDispatchWidth), AvailableEntries(MaxDispatchWidth),
-        CarryOver(0U), CarriedOver(), STI(Subtarget), RCU(R), PRF(F) {}
-
-  bool isAvailable(const InstRef &IR) const override;
-
-  // The dispatch logic internally doesn't buffer instructions. So there is
-  // never work to do at the beginning of every cycle.
-  bool hasWorkToComplete() const override { return false; }
-  Error cycleStart() override;
-  Error execute(InstRef &IR) override;
-
-#ifndef NDEBUG
-  void dump() const;
-#endif
-};
-} // namespace mca
-} // namespace llvm
-
-#endif // LLVM_TOOLS_LLVM_MCA_DISPATCH_STAGE_H
diff --git a/tools/llvm-mca/include/Stages/EntryStage.h b/tools/llvm-mca/include/Stages/EntryStage.h
deleted file mode 100644
index 21b8331..0000000
--- a/tools/llvm-mca/include/Stages/EntryStage.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//===---------------------- EntryStage.h ------------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines the Entry stage of an instruction pipeline.  Its sole
-/// purpose in life is to pick instructions in sequence and move them to the
-/// next pipeline stage.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_ENTRY_STAGE_H
-#define LLVM_TOOLS_LLVM_MCA_ENTRY_STAGE_H
-
-#include "SourceMgr.h"
-#include "Stages/Stage.h"
-#include "llvm/ADT/SmallVector.h"
-
-namespace llvm {
-namespace mca {
-
-class EntryStage final : public Stage {
-  InstRef CurrentInstruction;
-  SmallVector<std::unique_ptr<Instruction>, 16> Instructions;
-  SourceMgr &SM;
-  unsigned NumRetired;
-
-  // Updates the program counter, and sets 'CurrentInstruction'.
-  void getNextInstruction();
-
-  EntryStage(const EntryStage &Other) = delete;
-  EntryStage &operator=(const EntryStage &Other) = delete;
-
-public:
-  EntryStage(SourceMgr &SM) : CurrentInstruction(), SM(SM), NumRetired(0) { }
-
-  bool isAvailable(const InstRef &IR) const override;
-  bool hasWorkToComplete() const override;
-  Error execute(InstRef &IR) override;
-  Error cycleStart() override;
-  Error cycleEnd() override;
-};
-
-} // namespace mca
-} // namespace llvm
-
-#endif // LLVM_TOOLS_LLVM_MCA_FETCH_STAGE_H
diff --git a/tools/llvm-mca/include/Stages/ExecuteStage.h b/tools/llvm-mca/include/Stages/ExecuteStage.h
deleted file mode 100644
index 91b2405..0000000
--- a/tools/llvm-mca/include/Stages/ExecuteStage.h
+++ /dev/null
@@ -1,80 +0,0 @@
-//===---------------------- ExecuteStage.h ----------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines the execution stage of a default instruction pipeline.
-///
-/// The ExecuteStage is responsible for managing the hardware scheduler
-/// and issuing notifications that an instruction has been executed.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_EXECUTE_STAGE_H
-#define LLVM_TOOLS_LLVM_MCA_EXECUTE_STAGE_H
-
-#include "HardwareUnits/Scheduler.h"
-#include "Instruction.h"
-#include "Stages/Stage.h"
-#include "llvm/ADT/ArrayRef.h"
-
-namespace llvm {
-namespace mca {
-
-class ExecuteStage final : public Stage {
-  Scheduler &HWS;
-
-  Error issueInstruction(InstRef &IR);
-
-  // Called at the beginning of each cycle to issue already dispatched
-  // instructions to the underlying pipelines.
-  Error issueReadyInstructions();
-
-  // Used to notify instructions eliminated at register renaming stage.
-  Error handleInstructionEliminated(InstRef &IR);
-
-  ExecuteStage(const ExecuteStage &Other) = delete;
-  ExecuteStage &operator=(const ExecuteStage &Other) = delete;
-
-public:
-  ExecuteStage(Scheduler &S) : Stage(), HWS(S) {}
-
-  // This stage works under the assumption that the Pipeline will eventually
-  // execute a retire stage. We don't need to check if pipelines and/or
-  // schedulers have instructions to process, because those instructions are
-  // also tracked by the retire control unit. That means,
-  // RetireControlUnit::hasWorkToComplete() is responsible for checking if there
-  // are still instructions in-flight in the out-of-order backend.
-  bool hasWorkToComplete() const override { return false; }
-  bool isAvailable(const InstRef &IR) const override;
-
-  // Notifies the scheduler that a new cycle just started.
-  //
-  // This method notifies the scheduler that a new cycle started.
-  // This method is also responsible for notifying listeners about instructions
-  // state changes, and processor resources freed by the scheduler.
-  // Instructions that transitioned to the 'Executed' state are automatically
-  // moved to the next stage (i.e. RetireStage).
-  Error cycleStart() override;
-  Error execute(InstRef &IR) override;
-
-  void notifyInstructionIssued(
-      const InstRef &IR,
-      ArrayRef<std::pair<ResourceRef, ResourceCycles>> Used) const;
-  void notifyInstructionExecuted(const InstRef &IR) const;
-  void notifyInstructionReady(const InstRef &IR) const;
-  void notifyResourceAvailable(const ResourceRef &RR) const;
-
-  // Notify listeners that buffered resources have been consumed or freed.
-  void notifyReservedOrReleasedBuffers(const InstRef &IR, bool Reserved) const;
-};
-
-} // namespace mca
-} // namespace llvm
-
-#endif // LLVM_TOOLS_LLVM_MCA_EXECUTE_STAGE_H
diff --git a/tools/llvm-mca/include/Stages/InstructionTables.h b/tools/llvm-mca/include/Stages/InstructionTables.h
deleted file mode 100644
index e618d06..0000000
--- a/tools/llvm-mca/include/Stages/InstructionTables.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//===--------------------- InstructionTables.h ------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file implements a custom stage to generate instruction tables.
-/// See the description of command-line flag -instruction-tables in
-/// docs/CommandGuide/lvm-mca.rst
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_INSTRUCTIONTABLES_H
-#define LLVM_TOOLS_LLVM_MCA_INSTRUCTIONTABLES_H
-
-#include "HardwareUnits/Scheduler.h"
-#include "Stages/Stage.h"
-#include "Support.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/MC/MCSchedule.h"
-
-namespace llvm {
-namespace mca {
-
-class InstructionTables final : public Stage {
-  const MCSchedModel &SM;
-  SmallVector<std::pair<ResourceRef, ResourceCycles>, 4> UsedResources;
-  SmallVector<uint64_t, 8> Masks;
-
-public:
-  InstructionTables(const MCSchedModel &Model) : Stage(), SM(Model) {
-    computeProcResourceMasks(Model, Masks);
-  }
-
-  bool hasWorkToComplete() const override { return false; }
-  Error execute(InstRef &IR) override;
-};
-} // namespace mca
-} // namespace llvm
-
-#endif
diff --git a/tools/llvm-mca/include/Stages/RetireStage.h b/tools/llvm-mca/include/Stages/RetireStage.h
deleted file mode 100644
index 28eda40..0000000
--- a/tools/llvm-mca/include/Stages/RetireStage.h
+++ /dev/null
@@ -1,48 +0,0 @@
-//===---------------------- RetireStage.h -----------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines the retire stage of a default instruction pipeline.
-/// The RetireStage represents the process logic that interacts with the
-/// simulated RetireControlUnit hardware.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_RETIRE_STAGE_H
-#define LLVM_TOOLS_LLVM_MCA_RETIRE_STAGE_H
-
-#include "HardwareUnits/RegisterFile.h"
-#include "HardwareUnits/RetireControlUnit.h"
-#include "Stages/Stage.h"
-
-namespace llvm {
-namespace mca {
-
-class RetireStage final : public Stage {
-  // Owner will go away when we move listeners/eventing to the stages.
-  RetireControlUnit &RCU;
-  RegisterFile &PRF;
-
-  RetireStage(const RetireStage &Other) = delete;
-  RetireStage &operator=(const RetireStage &Other) = delete;
-
-public:
-  RetireStage(RetireControlUnit &R, RegisterFile &F)
-      : Stage(), RCU(R), PRF(F) {}
-
-  bool hasWorkToComplete() const override { return !RCU.isEmpty(); }
-  Error cycleStart() override;
-  Error execute(InstRef &IR) override;
-  void notifyInstructionRetired(const InstRef &IR) const;
-};
-
-} // namespace mca
-} // namespace llvm
-
-#endif // LLVM_TOOLS_LLVM_MCA_RETIRE_STAGE_H
diff --git a/tools/llvm-mca/include/Stages/Stage.h b/tools/llvm-mca/include/Stages/Stage.h
deleted file mode 100644
index 5665fc4..0000000
--- a/tools/llvm-mca/include/Stages/Stage.h
+++ /dev/null
@@ -1,88 +0,0 @@
-//===---------------------- Stage.h -----------------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines a stage.
-/// A chain of stages compose an instruction pipeline.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_STAGE_H
-#define LLVM_TOOLS_LLVM_MCA_STAGE_H
-
-#include "HWEventListener.h"
-#include "llvm/Support/Error.h"
-#include <set>
-
-namespace llvm {
-namespace mca {
-
-class InstRef;
-
-class Stage {
-  Stage *NextInSequence;
-  std::set<HWEventListener *> Listeners;
-
-  Stage(const Stage &Other) = delete;
-  Stage &operator=(const Stage &Other) = delete;
-
-protected:
-  const std::set<HWEventListener *> &getListeners() const { return Listeners; }
-
-public:
-  Stage() : NextInSequence(nullptr) {}
-  virtual ~Stage();
-
-  /// Returns true if it can execute IR during this cycle.
-  virtual bool isAvailable(const InstRef &IR) const { return true; }
-
-  /// Returns true if some instructions are still executing this stage.
-  virtual bool hasWorkToComplete() const = 0;
-
-  /// Called once at the start of each cycle.  This can be used as a setup
-  /// phase to prepare for the executions during the cycle.
-  virtual Error cycleStart() { return ErrorSuccess(); }
-
-  /// Called once at the end of each cycle.
-  virtual Error cycleEnd() { return ErrorSuccess(); }
-
-  /// The primary action that this stage performs on instruction IR.
-  virtual Error execute(InstRef &IR) = 0;
-
-  void setNextInSequence(Stage *NextStage) {
-    assert(!NextInSequence && "This stage already has a NextInSequence!");
-    NextInSequence = NextStage;
-  }
-
-  bool checkNextStage(const InstRef &IR) const {
-    return NextInSequence && NextInSequence->isAvailable(IR);
-  }
-
-  /// Called when an instruction is ready to move the next pipeline stage.
-  ///
-  /// Stages are responsible for moving instructions to their immediate
-  /// successor stages.
-  Error moveToTheNextStage(InstRef &IR) {
-    assert(checkNextStage(IR) && "Next stage is not ready!");
-    return NextInSequence->execute(IR);
-  }
-
-  /// Add a listener to receive callbacks during the execution of this stage.
-  void addListener(HWEventListener *Listener);
-
-  /// Notify listeners of a particular hardware event.
-  template <typename EventT> void notifyEvent(const EventT &Event) const {
-    for (HWEventListener *Listener : Listeners)
-      Listener->onEvent(Event);
-  }
-};
-
-} // namespace mca
-} // namespace llvm
-#endif // LLVM_TOOLS_LLVM_MCA_STAGE_H
diff --git a/tools/llvm-mca/include/Support.h b/tools/llvm-mca/include/Support.h
deleted file mode 100644
index e7a4e33..0000000
--- a/tools/llvm-mca/include/Support.h
+++ /dev/null
@@ -1,119 +0,0 @@
-//===--------------------- Support.h ----------------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// Helper functions used by various pipeline components.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_LLVM_MCA_SUPPORT_H
-#define LLVM_TOOLS_LLVM_MCA_SUPPORT_H
-
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/MC/MCSchedule.h"
-#include "llvm/Support/Error.h"
-
-namespace llvm {
-namespace mca {
-
-template <typename T>
-class InstructionError : public ErrorInfo<InstructionError<T>> {
-public:
-  static char ID;
-  std::string Message;
-  const T &Inst;
-
-  InstructionError(std::string M, const T &MCI)
-      : Message(std::move(M)), Inst(MCI) {}
-
-  void log(raw_ostream &OS) const override { OS << Message; }
-
-  std::error_code convertToErrorCode() const override {
-    return inconvertibleErrorCode();
-  }
-};
-
-template <typename T> char InstructionError<T>::ID;
-
-/// This class represents the number of cycles per resource (fractions of
-/// cycles).  That quantity is managed here as a ratio, and accessed via the
-/// double cast-operator below.  The two quantities, number of cycles and
-/// number of resources, are kept separate.  This is used by the
-/// ResourcePressureView to calculate the average resource cycles
-/// per instruction/iteration.
-class ResourceCycles {
-  unsigned Numerator, Denominator;
-
-public:
-  ResourceCycles() : Numerator(0), Denominator(1) {}
-  ResourceCycles(unsigned Cycles, unsigned ResourceUnits = 1)
-      : Numerator(Cycles), Denominator(ResourceUnits) {}
-
-  operator double() const {
-    assert(Denominator && "Invalid denominator (must be non-zero).");
-    return (Denominator == 1) ? Numerator : (double)Numerator / Denominator;
-  }
-
-  // Add the components of RHS to this instance.  Instead of calculating
-  // the final value here, we keep track of the numerator and denominator
-  // separately, to reduce floating point error.
-  ResourceCycles &operator+=(const ResourceCycles &RHS) {
-    if (Denominator == RHS.Denominator)
-      Numerator += RHS.Numerator;
-    else {
-      // Create a common denominator for LHS and RHS by calculating the least
-      // common multiple from the GCD.
-      unsigned GCD = GreatestCommonDivisor64(Denominator, RHS.Denominator);
-      unsigned LCM = (Denominator * RHS.Denominator) / GCD;
-      unsigned LHSNumerator = Numerator * (LCM / Denominator);
-      unsigned RHSNumerator = RHS.Numerator * (LCM / RHS.Denominator);
-      Numerator = LHSNumerator + RHSNumerator;
-      Denominator = LCM;
-    }
-    return *this;
-  }
-};
-
-/// Populates vector Masks with processor resource masks.
-///
-/// The number of bits set in a mask depends on the processor resource type.
-/// Each processor resource mask has at least one bit set. For groups, the
-/// number of bits set in the mask is equal to the cardinality of the group plus
-/// one. Excluding the most significant bit, the remaining bits in the mask
-/// identify processor resources that are part of the group.
-///
-/// Example:
-///
-///  ResourceA  -- Mask: 0b001
-///  ResourceB  -- Mask: 0b010
-///  ResourceAB -- Mask: 0b100 U (ResourceA::Mask | ResourceB::Mask) == 0b111
-///
-/// ResourceAB is a processor resource group containing ResourceA and ResourceB.
-/// Each resource mask uniquely identifies a resource; both ResourceA and
-/// ResourceB only have one bit set.
-/// ResourceAB is a group; excluding the most significant bit in the mask, the
-/// remaining bits identify the composition of the group.
-///
-/// Resource masks are used by the ResourceManager to solve set membership
-/// problems with simple bit manipulation operations.
-void computeProcResourceMasks(const MCSchedModel &SM,
-                              SmallVectorImpl<uint64_t> &Masks);
-
-/// Compute the reciprocal block throughput from a set of processor resource
-/// cycles. The reciprocal block throughput is computed as the MAX between:
-///  - NumMicroOps / DispatchWidth
-///  - ProcResourceCycles / #ProcResourceUnits  (for every consumed resource).
-double computeBlockRThroughput(const MCSchedModel &SM, unsigned DispatchWidth,
-                               unsigned NumMicroOps,
-                               ArrayRef<unsigned> ProcResourceUsage);
-} // namespace mca
-} // namespace llvm
-
-#endif
diff --git a/tools/llvm-mca/lib/CMakeLists.txt b/tools/llvm-mca/lib/CMakeLists.txt
deleted file mode 100644
index 21b6e34..0000000
--- a/tools/llvm-mca/lib/CMakeLists.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-include_directories(${LLVM_MCA_SOURCE_DIR}/include)
-
-add_library(LLVMMCA
-  STATIC
-  Context.cpp
-  HWEventListener.cpp
-  HardwareUnits/HardwareUnit.cpp
-  HardwareUnits/LSUnit.cpp
-  HardwareUnits/RegisterFile.cpp
-  HardwareUnits/ResourceManager.cpp
-  HardwareUnits/RetireControlUnit.cpp
-  HardwareUnits/Scheduler.cpp
-  InstrBuilder.cpp
-  Instruction.cpp
-  Pipeline.cpp
-  Stages/DispatchStage.cpp
-  Stages/EntryStage.cpp
-  Stages/ExecuteStage.cpp
-  Stages/InstructionTables.cpp
-  Stages/RetireStage.cpp
-  Stages/Stage.cpp
-  Support.cpp
-  )
-
-llvm_update_compile_flags(LLVMMCA)
-llvm_map_components_to_libnames(libs
-  MC
-  Support
-  )
-
-target_link_libraries(LLVMMCA ${libs})
-set_target_properties(LLVMMCA PROPERTIES FOLDER "Libraries")
diff --git a/tools/llvm-mca/lib/Context.cpp b/tools/llvm-mca/lib/Context.cpp
deleted file mode 100644
index 17b992a..0000000
--- a/tools/llvm-mca/lib/Context.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-//===---------------------------- Context.cpp -------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines a class for holding ownership of various simulated
-/// hardware units.  A Context also provides a utility routine for constructing
-/// a default out-of-order pipeline with fetch, dispatch, execute, and retire
-/// stages.
-///
-//===----------------------------------------------------------------------===//
-
-#include "Context.h"
-#include "HardwareUnits/RegisterFile.h"
-#include "HardwareUnits/RetireControlUnit.h"
-#include "HardwareUnits/Scheduler.h"
-#include "Stages/DispatchStage.h"
-#include "Stages/EntryStage.h"
-#include "Stages/ExecuteStage.h"
-#include "Stages/RetireStage.h"
-
-namespace llvm {
-namespace mca {
-
-std::unique_ptr<Pipeline>
-Context::createDefaultPipeline(const PipelineOptions &Opts, InstrBuilder &IB,
-                               SourceMgr &SrcMgr) {
-  const MCSchedModel &SM = STI.getSchedModel();
-
-  // Create the hardware units defining the backend.
-  auto RCU = llvm::make_unique<RetireControlUnit>(SM);
-  auto PRF = llvm::make_unique<RegisterFile>(SM, MRI, Opts.RegisterFileSize);
-  auto LSU = llvm::make_unique<LSUnit>(SM, Opts.LoadQueueSize,
-                                       Opts.StoreQueueSize, Opts.AssumeNoAlias);
-  auto HWS = llvm::make_unique<Scheduler>(SM, *LSU);
-
-  // Create the pipeline stages.
-  auto Fetch = llvm::make_unique<EntryStage>(SrcMgr);
-  auto Dispatch = llvm::make_unique<DispatchStage>(STI, MRI, Opts.DispatchWidth,
-                                                   *RCU, *PRF);
-  auto Execute = llvm::make_unique<ExecuteStage>(*HWS);
-  auto Retire = llvm::make_unique<RetireStage>(*RCU, *PRF);
-
-  // Pass the ownership of all the hardware units to this Context.
-  addHardwareUnit(std::move(RCU));
-  addHardwareUnit(std::move(PRF));
-  addHardwareUnit(std::move(LSU));
-  addHardwareUnit(std::move(HWS));
-
-  // Build the pipeline.
-  auto StagePipeline = llvm::make_unique<Pipeline>();
-  StagePipeline->appendStage(std::move(Fetch));
-  StagePipeline->appendStage(std::move(Dispatch));
-  StagePipeline->appendStage(std::move(Execute));
-  StagePipeline->appendStage(std::move(Retire));
-  return StagePipeline;
-}
-
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/HWEventListener.cpp b/tools/llvm-mca/lib/HWEventListener.cpp
deleted file mode 100644
index 3930e25..0000000
--- a/tools/llvm-mca/lib/HWEventListener.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-//===----------------------- HWEventListener.cpp ----------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines a vtable anchor for class HWEventListener.
-///
-//===----------------------------------------------------------------------===//
-
-#include "HWEventListener.h"
-
-namespace llvm {
-namespace mca {
-
-// Anchor the vtable here.
-void HWEventListener::anchor() {}
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/HardwareUnits/HardwareUnit.cpp b/tools/llvm-mca/lib/HardwareUnits/HardwareUnit.cpp
deleted file mode 100644
index 4e46ffa..0000000
--- a/tools/llvm-mca/lib/HardwareUnits/HardwareUnit.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-//===------------------------- HardwareUnit.cpp -----------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines the anchor for the base class that describes
-/// simulated hardware units.
-///
-//===----------------------------------------------------------------------===//
-
-#include "HardwareUnits/HardwareUnit.h"
-
-namespace llvm {
-namespace mca {
-
-// Pin the vtable with this method.
-HardwareUnit::~HardwareUnit() = default;
-
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/HardwareUnits/LSUnit.cpp b/tools/llvm-mca/lib/HardwareUnits/LSUnit.cpp
deleted file mode 100644
index ed82691..0000000
--- a/tools/llvm-mca/lib/HardwareUnits/LSUnit.cpp
+++ /dev/null
@@ -1,190 +0,0 @@
-//===----------------------- LSUnit.cpp --------------------------*- C++-*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// A Load-Store Unit for the llvm-mca tool.
-///
-//===----------------------------------------------------------------------===//
-
-#include "HardwareUnits/LSUnit.h"
-#include "Instruction.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-
-#define DEBUG_TYPE "llvm-mca"
-
-namespace llvm {
-namespace mca {
-
-LSUnit::LSUnit(const MCSchedModel &SM, unsigned LQ, unsigned SQ,
-               bool AssumeNoAlias)
-    : LQ_Size(LQ), SQ_Size(SQ), NoAlias(AssumeNoAlias) {
-  if (SM.hasExtraProcessorInfo()) {
-    const MCExtraProcessorInfo &EPI = SM.getExtraProcessorInfo();
-    if (!LQ_Size && EPI.LoadQueueID) {
-      const MCProcResourceDesc &LdQDesc = *SM.getProcResource(EPI.LoadQueueID);
-      LQ_Size = LdQDesc.BufferSize;
-    }
-
-    if (!SQ_Size && EPI.StoreQueueID) {
-      const MCProcResourceDesc &StQDesc = *SM.getProcResource(EPI.StoreQueueID);
-      SQ_Size = StQDesc.BufferSize;
-    }
-  }
-}
-
-#ifndef NDEBUG
-void LSUnit::dump() const {
-  dbgs() << "[LSUnit] LQ_Size = " << LQ_Size << '\n';
-  dbgs() << "[LSUnit] SQ_Size = " << SQ_Size << '\n';
-  dbgs() << "[LSUnit] NextLQSlotIdx = " << LoadQueue.size() << '\n';
-  dbgs() << "[LSUnit] NextSQSlotIdx = " << StoreQueue.size() << '\n';
-}
-#endif
-
-void LSUnit::assignLQSlot(unsigned Index) {
-  assert(!isLQFull());
-  assert(LoadQueue.count(Index) == 0);
-
-  LLVM_DEBUG(dbgs() << "[LSUnit] - AssignLQSlot <Idx=" << Index
-                    << ",slot=" << LoadQueue.size() << ">\n");
-  LoadQueue.insert(Index);
-}
-
-void LSUnit::assignSQSlot(unsigned Index) {
-  assert(!isSQFull());
-  assert(StoreQueue.count(Index) == 0);
-
-  LLVM_DEBUG(dbgs() << "[LSUnit] - AssignSQSlot <Idx=" << Index
-                    << ",slot=" << StoreQueue.size() << ">\n");
-  StoreQueue.insert(Index);
-}
-
-void LSUnit::dispatch(const InstRef &IR) {
-  const InstrDesc &Desc = IR.getInstruction()->getDesc();
-  unsigned IsMemBarrier = Desc.HasSideEffects;
-  assert((Desc.MayLoad || Desc.MayStore) && "Not a memory operation!");
-
-  const unsigned Index = IR.getSourceIndex();
-  if (Desc.MayLoad) {
-    if (IsMemBarrier)
-      LoadBarriers.insert(Index);
-    assignLQSlot(Index);
-  }
-
-  if (Desc.MayStore) {
-    if (IsMemBarrier)
-      StoreBarriers.insert(Index);
-    assignSQSlot(Index);
-  }
-}
-
-LSUnit::Status LSUnit::isAvailable(const InstRef &IR) const {
-  const InstrDesc &Desc = IR.getInstruction()->getDesc();
-  if (Desc.MayLoad && isLQFull())
-    return LSUnit::LSU_LQUEUE_FULL;
-  if (Desc.MayStore && isSQFull())
-    return LSUnit::LSU_SQUEUE_FULL;
-  return LSUnit::LSU_AVAILABLE;
-}
-
-bool LSUnit::isReady(const InstRef &IR) const {
-  const InstrDesc &Desc = IR.getInstruction()->getDesc();
-  const unsigned Index = IR.getSourceIndex();
-  bool IsALoad = Desc.MayLoad;
-  bool IsAStore = Desc.MayStore;
-  assert((IsALoad || IsAStore) && "Not a memory operation!");
-  assert((!IsALoad || LoadQueue.count(Index) == 1) && "Load not in queue!");
-  assert((!IsAStore || StoreQueue.count(Index) == 1) && "Store not in queue!");
-
-  if (IsALoad && !LoadBarriers.empty()) {
-    unsigned LoadBarrierIndex = *LoadBarriers.begin();
-    // A younger load cannot pass a older load barrier.
-    if (Index > LoadBarrierIndex)
-      return false;
-    // A load barrier cannot pass a older load.
-    if (Index == LoadBarrierIndex && Index != *LoadQueue.begin())
-      return false;
-  }
-
-  if (IsAStore && !StoreBarriers.empty()) {
-    unsigned StoreBarrierIndex = *StoreBarriers.begin();
-    // A younger store cannot pass a older store barrier.
-    if (Index > StoreBarrierIndex)
-      return false;
-    // A store barrier cannot pass a older store.
-    if (Index == StoreBarrierIndex && Index != *StoreQueue.begin())
-      return false;
-  }
-
-  // A load may not pass a previous store unless flag 'NoAlias' is set.
-  // A load may pass a previous load.
-  if (NoAlias && IsALoad)
-    return true;
-
-  if (StoreQueue.size()) {
-    // A load may not pass a previous store.
-    // A store may not pass a previous store.
-    if (Index > *StoreQueue.begin())
-      return false;
-  }
-
-  // Okay, we are older than the oldest store in the queue.
-  // If there are no pending loads, then we can say for sure that this
-  // instruction is ready.
-  if (isLQEmpty())
-    return true;
-
-  // Check if there are no older loads.
-  if (Index <= *LoadQueue.begin())
-    return true;
-
-  // There is at least one younger load.
-  //
-  // A store may not pass a previous load.
-  // A load may pass a previous load.
-  return !IsAStore;
-}
-
-void LSUnit::onInstructionExecuted(const InstRef &IR) {
-  const InstrDesc &Desc = IR.getInstruction()->getDesc();
-  const unsigned Index = IR.getSourceIndex();
-  bool IsALoad = Desc.MayLoad;
-  bool IsAStore = Desc.MayStore;
-
-  if (IsALoad) {
-    if (LoadQueue.erase(Index)) {
-      LLVM_DEBUG(dbgs() << "[LSUnit]: Instruction idx=" << Index
-                        << " has been removed from the load queue.\n");
-    }
-    if (!LoadBarriers.empty() && Index == *LoadBarriers.begin()) {
-      LLVM_DEBUG(
-          dbgs() << "[LSUnit]: Instruction idx=" << Index
-                 << " has been removed from the set of load barriers.\n");
-      LoadBarriers.erase(Index);
-    }
-  }
-
-  if (IsAStore) {
-    if (StoreQueue.erase(Index)) {
-      LLVM_DEBUG(dbgs() << "[LSUnit]: Instruction idx=" << Index
-                        << " has been removed from the store queue.\n");
-    }
-
-    if (!StoreBarriers.empty() && Index == *StoreBarriers.begin()) {
-      LLVM_DEBUG(
-          dbgs() << "[LSUnit]: Instruction idx=" << Index
-                 << " has been removed from the set of store barriers.\n");
-      StoreBarriers.erase(Index);
-    }
-  }
-}
-
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/HardwareUnits/RegisterFile.cpp b/tools/llvm-mca/lib/HardwareUnits/RegisterFile.cpp
deleted file mode 100644
index f96e4ca..0000000
--- a/tools/llvm-mca/lib/HardwareUnits/RegisterFile.cpp
+++ /dev/null
@@ -1,491 +0,0 @@
-//===--------------------- RegisterFile.cpp ---------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines a register mapping file class.  This class is responsible
-/// for managing hardware register files and the tracking of data dependencies
-/// between registers.
-///
-//===----------------------------------------------------------------------===//
-
-#include "HardwareUnits/RegisterFile.h"
-#include "Instruction.h"
-#include "llvm/Support/Debug.h"
-
-#define DEBUG_TYPE "llvm-mca"
-
-namespace llvm {
-namespace mca {
-
-RegisterFile::RegisterFile(const MCSchedModel &SM, const MCRegisterInfo &mri,
-                           unsigned NumRegs)
-    : MRI(mri),
-      RegisterMappings(mri.getNumRegs(), {WriteRef(), RegisterRenamingInfo()}),
-      ZeroRegisters(mri.getNumRegs(), false) {
-  initialize(SM, NumRegs);
-}
-
-void RegisterFile::initialize(const MCSchedModel &SM, unsigned NumRegs) {
-  // Create a default register file that "sees" all the machine registers
-  // declared by the target. The number of physical registers in the default
-  // register file is set equal to `NumRegs`. A value of zero for `NumRegs`
-  // means: this register file has an unbounded number of physical registers.
-  RegisterFiles.emplace_back(NumRegs);
-  if (!SM.hasExtraProcessorInfo())
-    return;
-
-  // For each user defined register file, allocate a RegisterMappingTracker
-  // object. The size of every register file, as well as the mapping between
-  // register files and register classes is specified via tablegen.
-  const MCExtraProcessorInfo &Info = SM.getExtraProcessorInfo();
-
-  // Skip invalid register file at index 0.
-  for (unsigned I = 1, E = Info.NumRegisterFiles; I < E; ++I) {
-    const MCRegisterFileDesc &RF = Info.RegisterFiles[I];
-    assert(RF.NumPhysRegs && "Invalid PRF with zero physical registers!");
-
-    // The cost of a register definition is equivalent to the number of
-    // physical registers that are allocated at register renaming stage.
-    unsigned Length = RF.NumRegisterCostEntries;
-    const MCRegisterCostEntry *FirstElt =
-        &Info.RegisterCostTable[RF.RegisterCostEntryIdx];
-    addRegisterFile(RF, ArrayRef<MCRegisterCostEntry>(FirstElt, Length));
-  }
-}
-
-void RegisterFile::cycleStart() {
-  for (RegisterMappingTracker &RMT : RegisterFiles)
-    RMT.NumMoveEliminated = 0;
-}
-
-void RegisterFile::addRegisterFile(const MCRegisterFileDesc &RF,
-                                   ArrayRef<MCRegisterCostEntry> Entries) {
-  // A default register file is always allocated at index #0. That register file
-  // is mainly used to count the total number of mappings created by all
-  // register files at runtime. Users can limit the number of available physical
-  // registers in register file #0 through the command line flag
-  // `-register-file-size`.
-  unsigned RegisterFileIndex = RegisterFiles.size();
-  RegisterFiles.emplace_back(RF.NumPhysRegs, RF.MaxMovesEliminatedPerCycle,
-                             RF.AllowZeroMoveEliminationOnly);
-
-  // Special case where there is no register class identifier in the set.
-  // An empty set of register classes means: this register file contains all
-  // the physical registers specified by the target.
-  // We optimistically assume that a register can be renamed at the cost of a
-  // single physical register. The constructor of RegisterFile ensures that
-  // a RegisterMapping exists for each logical register defined by the Target.
-  if (Entries.empty())
-    return;
-
-  // Now update the cost of individual registers.
-  for (const MCRegisterCostEntry &RCE : Entries) {
-    const MCRegisterClass &RC = MRI.getRegClass(RCE.RegisterClassID);
-    for (const MCPhysReg Reg : RC) {
-      RegisterRenamingInfo &Entry = RegisterMappings[Reg].second;
-      IndexPlusCostPairTy &IPC = Entry.IndexPlusCost;
-      if (IPC.first && IPC.first != RegisterFileIndex) {
-        // The only register file that is allowed to overlap is the default
-        // register file at index #0. The analysis is inaccurate if register
-        // files overlap.
-        errs() << "warning: register " << MRI.getName(Reg)
-               << " defined in multiple register files.";
-      }
-      IPC = std::make_pair(RegisterFileIndex, RCE.Cost);
-      Entry.RenameAs = Reg;
-      Entry.AllowMoveElimination = RCE.AllowMoveElimination;
-
-      // Assume the same cost for each sub-register.
-      for (MCSubRegIterator I(Reg, &MRI); I.isValid(); ++I) {
-        RegisterRenamingInfo &OtherEntry = RegisterMappings[*I].second;
-        if (!OtherEntry.IndexPlusCost.first &&
-            (!OtherEntry.RenameAs ||
-             MRI.isSuperRegister(*I, OtherEntry.RenameAs))) {
-          OtherEntry.IndexPlusCost = IPC;
-          OtherEntry.RenameAs = Reg;
-        }
-      }
-    }
-  }
-}
-
-void RegisterFile::allocatePhysRegs(const RegisterRenamingInfo &Entry,
-                                    MutableArrayRef<unsigned> UsedPhysRegs) {
-  unsigned RegisterFileIndex = Entry.IndexPlusCost.first;
-  unsigned Cost = Entry.IndexPlusCost.second;
-  if (RegisterFileIndex) {
-    RegisterMappingTracker &RMT = RegisterFiles[RegisterFileIndex];
-    RMT.NumUsedPhysRegs += Cost;
-    UsedPhysRegs[RegisterFileIndex] += Cost;
-  }
-
-  // Now update the default register mapping tracker.
-  RegisterFiles[0].NumUsedPhysRegs += Cost;
-  UsedPhysRegs[0] += Cost;
-}
-
-void RegisterFile::freePhysRegs(const RegisterRenamingInfo &Entry,
-                                MutableArrayRef<unsigned> FreedPhysRegs) {
-  unsigned RegisterFileIndex = Entry.IndexPlusCost.first;
-  unsigned Cost = Entry.IndexPlusCost.second;
-  if (RegisterFileIndex) {
-    RegisterMappingTracker &RMT = RegisterFiles[RegisterFileIndex];
-    RMT.NumUsedPhysRegs -= Cost;
-    FreedPhysRegs[RegisterFileIndex] += Cost;
-  }
-
-  // Now update the default register mapping tracker.
-  RegisterFiles[0].NumUsedPhysRegs -= Cost;
-  FreedPhysRegs[0] += Cost;
-}
-
-void RegisterFile::addRegisterWrite(WriteRef Write,
-                                    MutableArrayRef<unsigned> UsedPhysRegs) {
-  WriteState &WS = *Write.getWriteState();
-  unsigned RegID = WS.getRegisterID();
-  assert(RegID && "Adding an invalid register definition?");
-
-  LLVM_DEBUG({
-    dbgs() << "RegisterFile: addRegisterWrite [ " << Write.getSourceIndex()
-           << ", " << MRI.getName(RegID) << "]\n";
-  });
-
-  // If RenameAs is equal to RegID, then RegID is subject to register renaming
-  // and false dependencies on RegID are all eliminated.
-
-  // If RenameAs references the invalid register, then we optimistically assume
-  // that it can be renamed. In the absence of tablegen descriptors for register
-  // files, RenameAs is always set to the invalid register ID.  In all other
-  // cases, RenameAs must be either equal to RegID, or it must reference a
-  // super-register of RegID.
-
-  // If RenameAs is a super-register of RegID, then a write to RegID has always
-  // a false dependency on RenameAs. The only exception is for when the write
-  // implicitly clears the upper portion of the underlying register.
-  // If a write clears its super-registers, then it is renamed as `RenameAs`.
-  bool IsWriteZero = WS.isWriteZero();
-  bool IsEliminated = WS.isEliminated();
-  bool ShouldAllocatePhysRegs = !IsWriteZero && !IsEliminated;
-  const RegisterRenamingInfo &RRI = RegisterMappings[RegID].second;
-  WS.setPRF(RRI.IndexPlusCost.first);
-
-  if (RRI.RenameAs && RRI.RenameAs != RegID) {
-    RegID = RRI.RenameAs;
-    WriteRef &OtherWrite = RegisterMappings[RegID].first;
-
-    if (!WS.clearsSuperRegisters()) {
-      // The processor keeps the definition of `RegID` together with register
-      // `RenameAs`. Since this partial write is not renamed, no physical
-      // register is allocated.
-      ShouldAllocatePhysRegs = false;
-
-      WriteState *OtherWS = OtherWrite.getWriteState();
-      if (OtherWS && (OtherWrite.getSourceIndex() != Write.getSourceIndex())) {
-        // This partial write has a false dependency on RenameAs.
-        assert(!IsEliminated && "Unexpected partial update!");
-        OtherWS->addUser(&WS);
-      }
-    }
-  }
-
-  // Update zero registers.
-  unsigned ZeroRegisterID =
-      WS.clearsSuperRegisters() ? RegID : WS.getRegisterID();
-  if (IsWriteZero) {
-    ZeroRegisters.setBit(ZeroRegisterID);
-    for (MCSubRegIterator I(ZeroRegisterID, &MRI); I.isValid(); ++I)
-      ZeroRegisters.setBit(*I);
-  } else {
-    ZeroRegisters.clearBit(ZeroRegisterID);
-    for (MCSubRegIterator I(ZeroRegisterID, &MRI); I.isValid(); ++I)
-      ZeroRegisters.clearBit(*I);
-  }
-
-  // If this is move has been eliminated, then the call to tryEliminateMove
-  // should have already updated all the register mappings.
-  if (!IsEliminated) {
-    // Update the mapping for register RegID including its sub-registers.
-    RegisterMappings[RegID].first = Write;
-    RegisterMappings[RegID].second.AliasRegID = 0U;
-    for (MCSubRegIterator I(RegID, &MRI); I.isValid(); ++I) {
-      RegisterMappings[*I].first = Write;
-      RegisterMappings[*I].second.AliasRegID = 0U;
-    }
-
-    // No physical registers are allocated for instructions that are optimized
-    // in hardware. For example, zero-latency data-dependency breaking
-    // instructions don't consume physical registers.
-    if (ShouldAllocatePhysRegs)
-      allocatePhysRegs(RegisterMappings[RegID].second, UsedPhysRegs);
-  }
-
-  if (!WS.clearsSuperRegisters())
-    return;
-
-  for (MCSuperRegIterator I(RegID, &MRI); I.isValid(); ++I) {
-    if (!IsEliminated) {
-      RegisterMappings[*I].first = Write;
-      RegisterMappings[*I].second.AliasRegID = 0U;
-    }
-
-    if (IsWriteZero)
-      ZeroRegisters.setBit(*I);
-    else
-      ZeroRegisters.clearBit(*I);
-  }
-}
-
-void RegisterFile::removeRegisterWrite(
-    const WriteState &WS, MutableArrayRef<unsigned> FreedPhysRegs) {
-  // Early exit if this write was eliminated. A write eliminated at register
-  // renaming stage generates an alias, and it is not added to the PRF.
-  if (WS.isEliminated())
-    return;
-
-  unsigned RegID = WS.getRegisterID();
-
-  assert(RegID != 0 && "Invalidating an already invalid register?");
-  assert(WS.getCyclesLeft() != UNKNOWN_CYCLES &&
-         "Invalidating a write of unknown cycles!");
-  assert(WS.getCyclesLeft() <= 0 && "Invalid cycles left for this write!");
-
-  bool ShouldFreePhysRegs = !WS.isWriteZero();
-  unsigned RenameAs = RegisterMappings[RegID].second.RenameAs;
-  if (RenameAs && RenameAs != RegID) {
-    RegID = RenameAs;
-
-    if (!WS.clearsSuperRegisters()) {
-      // Keep the definition of `RegID` together with register `RenameAs`.
-      ShouldFreePhysRegs = false;
-    }
-  }
-
-  if (ShouldFreePhysRegs)
-    freePhysRegs(RegisterMappings[RegID].second, FreedPhysRegs);
-
-  WriteRef &WR = RegisterMappings[RegID].first;
-  if (WR.getWriteState() == &WS)
-    WR.invalidate();
-
-  for (MCSubRegIterator I(RegID, &MRI); I.isValid(); ++I) {
-    WriteRef &OtherWR = RegisterMappings[*I].first;
-    if (OtherWR.getWriteState() == &WS)
-      OtherWR.invalidate();
-  }
-
-  if (!WS.clearsSuperRegisters())
-    return;
-
-  for (MCSuperRegIterator I(RegID, &MRI); I.isValid(); ++I) {
-    WriteRef &OtherWR = RegisterMappings[*I].first;
-    if (OtherWR.getWriteState() == &WS)
-      OtherWR.invalidate();
-  }
-}
-
-bool RegisterFile::tryEliminateMove(WriteState &WS, ReadState &RS) {
-  const RegisterMapping &RMFrom = RegisterMappings[RS.getRegisterID()];
-  const RegisterMapping &RMTo = RegisterMappings[WS.getRegisterID()];
-
-  // From and To must be owned by the same PRF.
-  const RegisterRenamingInfo &RRIFrom = RMFrom.second;
-  const RegisterRenamingInfo &RRITo = RMTo.second;
-  unsigned RegisterFileIndex = RRIFrom.IndexPlusCost.first;
-  if (RegisterFileIndex != RRITo.IndexPlusCost.first)
-    return false;
-
-  // We only allow move elimination for writes that update a full physical
-  // register. On X86, move elimination is possible with 32-bit general purpose
-  // registers because writes to those registers are not partial writes.  If a
-  // register move is a partial write, then we conservatively assume that move
-  // elimination fails, since it would either trigger a partial update, or the
-  // issue of a merge opcode.
-  //
-  // Note that this constraint may be lifted in future.  For example, we could
-  // make this model more flexible, and let users customize the set of registers
-  // (i.e. register classes) that allow move elimination.
-  //
-  // For now, we assume that there is a strong correlation between registers
-  // that allow move elimination, and how those same registers are renamed in
-  // hardware.
-  if (RRITo.RenameAs && RRITo.RenameAs != WS.getRegisterID()) {
-    // Early exit if the PRF doesn't support move elimination for this register.
-    if (!RegisterMappings[RRITo.RenameAs].second.AllowMoveElimination)
-      return false;
-    if (!WS.clearsSuperRegisters())
-      return false;
-  }
-
-  RegisterMappingTracker &RMT = RegisterFiles[RegisterFileIndex];
-  if (RMT.MaxMoveEliminatedPerCycle &&
-      RMT.NumMoveEliminated == RMT.MaxMoveEliminatedPerCycle)
-    return false;
-
-  bool IsZeroMove = ZeroRegisters[RS.getRegisterID()];
-  if (RMT.AllowZeroMoveEliminationOnly && !IsZeroMove)
-    return false;
-
-  MCPhysReg FromReg = RS.getRegisterID();
-  MCPhysReg ToReg = WS.getRegisterID();
-
-  // Construct an alias.
-  MCPhysReg AliasReg = FromReg;
-  if (RRIFrom.RenameAs)
-    AliasReg = RRIFrom.RenameAs;
-
-  const RegisterRenamingInfo &RMAlias = RegisterMappings[AliasReg].second;
-  if (RMAlias.AliasRegID)
-    AliasReg = RMAlias.AliasRegID;
-
-  if (AliasReg != ToReg) {
-    RegisterMappings[ToReg].second.AliasRegID = AliasReg;
-    for (MCSubRegIterator I(ToReg, &MRI); I.isValid(); ++I)
-      RegisterMappings[*I].second.AliasRegID = AliasReg;
-  }
-
-  RMT.NumMoveEliminated++;
-  if (IsZeroMove) {
-    WS.setWriteZero();
-    RS.setReadZero();
-  }
-  WS.setEliminated();
-
-  return true;
-}
-
-void RegisterFile::collectWrites(const ReadState &RS,
-                                 SmallVectorImpl<WriteRef> &Writes) const {
-  unsigned RegID = RS.getRegisterID();
-  assert(RegID && RegID < RegisterMappings.size());
-  LLVM_DEBUG(dbgs() << "RegisterFile: collecting writes for register "
-                    << MRI.getName(RegID) << '\n');
-
-  // Check if this is an alias.
-  const RegisterRenamingInfo &RRI = RegisterMappings[RegID].second;
-  if (RRI.AliasRegID)
-    RegID = RRI.AliasRegID;
-
-  const WriteRef &WR = RegisterMappings[RegID].first;
-  if (WR.isValid())
-    Writes.push_back(WR);
-
-  // Handle potential partial register updates.
-  for (MCSubRegIterator I(RegID, &MRI); I.isValid(); ++I) {
-    const WriteRef &WR = RegisterMappings[*I].first;
-    if (WR.isValid())
-      Writes.push_back(WR);
-  }
-
-  // Remove duplicate entries and resize the input vector.
-  if (Writes.size() > 1) {
-    sort(Writes, [](const WriteRef &Lhs, const WriteRef &Rhs) {
-      return Lhs.getWriteState() < Rhs.getWriteState();
-    });
-    auto It = std::unique(Writes.begin(), Writes.end());
-    Writes.resize(std::distance(Writes.begin(), It));
-  }
-
-  LLVM_DEBUG({
-    for (const WriteRef &WR : Writes) {
-      const WriteState &WS = *WR.getWriteState();
-      dbgs() << "[PRF] Found a dependent use of Register "
-             << MRI.getName(WS.getRegisterID()) << " (defined by instruction #"
-             << WR.getSourceIndex() << ")\n";
-    }
-  });
-}
-
-void RegisterFile::addRegisterRead(ReadState &RS,
-                                   SmallVectorImpl<WriteRef> &Defs) const {
-  unsigned RegID = RS.getRegisterID();
-  const RegisterRenamingInfo &RRI = RegisterMappings[RegID].second;
-  RS.setPRF(RRI.IndexPlusCost.first);
-  if (RS.isIndependentFromDef())
-    return;
-
-  if (ZeroRegisters[RS.getRegisterID()])
-    RS.setReadZero();
-  collectWrites(RS, Defs);
-  RS.setDependentWrites(Defs.size());
-}
-
-unsigned RegisterFile::isAvailable(ArrayRef<unsigned> Regs) const {
-  SmallVector<unsigned, 4> NumPhysRegs(getNumRegisterFiles());
-
-  // Find how many new mappings must be created for each register file.
-  for (const unsigned RegID : Regs) {
-    const RegisterRenamingInfo &RRI = RegisterMappings[RegID].second;
-    const IndexPlusCostPairTy &Entry = RRI.IndexPlusCost;
-    if (Entry.first)
-      NumPhysRegs[Entry.first] += Entry.second;
-    NumPhysRegs[0] += Entry.second;
-  }
-
-  unsigned Response = 0;
-  for (unsigned I = 0, E = getNumRegisterFiles(); I < E; ++I) {
-    unsigned NumRegs = NumPhysRegs[I];
-    if (!NumRegs)
-      continue;
-
-    const RegisterMappingTracker &RMT = RegisterFiles[I];
-    if (!RMT.NumPhysRegs) {
-      // The register file has an unbounded number of microarchitectural
-      // registers.
-      continue;
-    }
-
-    if (RMT.NumPhysRegs < NumRegs) {
-      // The current register file is too small. This may occur if the number of
-      // microarchitectural registers in register file #0 was changed by the
-      // users via flag -reg-file-size. Alternatively, the scheduling model
-      // specified a too small number of registers for this register file.
-      LLVM_DEBUG(dbgs() << "Not enough registers in the register file.\n");
-
-      // FIXME: Normalize the instruction register count to match the
-      // NumPhysRegs value.  This is a highly unusual case, and is not expected
-      // to occur.  This normalization is hiding an inconsistency in either the
-      // scheduling model or in the value that the user might have specified
-      // for NumPhysRegs.
-      NumRegs = RMT.NumPhysRegs;
-    }
-
-    if (RMT.NumPhysRegs < (RMT.NumUsedPhysRegs + NumRegs))
-      Response |= (1U << I);
-  }
-
-  return Response;
-}
-
-#ifndef NDEBUG
-void RegisterFile::dump() const {
-  for (unsigned I = 0, E = MRI.getNumRegs(); I < E; ++I) {
-    const RegisterMapping &RM = RegisterMappings[I];
-    const RegisterRenamingInfo &RRI = RM.second;
-    if (ZeroRegisters[I]) {
-      dbgs() << MRI.getName(I) << ", " << I
-             << ", PRF=" << RRI.IndexPlusCost.first
-             << ", Cost=" << RRI.IndexPlusCost.second
-             << ", RenameAs=" << RRI.RenameAs << ", IsZero=" << ZeroRegisters[I]
-             << ",";
-      RM.first.dump();
-      dbgs() << '\n';
-    }
-  }
-
-  for (unsigned I = 0, E = getNumRegisterFiles(); I < E; ++I) {
-    dbgs() << "Register File #" << I;
-    const RegisterMappingTracker &RMT = RegisterFiles[I];
-    dbgs() << "\n  TotalMappings:        " << RMT.NumPhysRegs
-           << "\n  NumUsedMappings:      " << RMT.NumUsedPhysRegs << '\n';
-  }
-}
-#endif
-
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/HardwareUnits/ResourceManager.cpp b/tools/llvm-mca/lib/HardwareUnits/ResourceManager.cpp
deleted file mode 100644
index f12238a..0000000
--- a/tools/llvm-mca/lib/HardwareUnits/ResourceManager.cpp
+++ /dev/null
@@ -1,326 +0,0 @@
-//===--------------------- ResourceManager.cpp ------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// The classes here represent processor resource units and their management
-/// strategy.  These classes are managed by the Scheduler.
-///
-//===----------------------------------------------------------------------===//
-
-#include "HardwareUnits/ResourceManager.h"
-#include "Support.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace llvm {
-namespace mca {
-
-#define DEBUG_TYPE "llvm-mca"
-ResourceStrategy::~ResourceStrategy() = default;
-
-uint64_t DefaultResourceStrategy::select(uint64_t ReadyMask) {
-  // This method assumes that ReadyMask cannot be zero.
-  uint64_t CandidateMask = ReadyMask & NextInSequenceMask;
-  if (CandidateMask) {
-    CandidateMask = PowerOf2Floor(CandidateMask);
-    NextInSequenceMask &= (CandidateMask | (CandidateMask - 1));
-    return CandidateMask;
-  }
-
-  NextInSequenceMask = ResourceUnitMask ^ RemovedFromNextInSequence;
-  RemovedFromNextInSequence = 0;
-  CandidateMask = ReadyMask & NextInSequenceMask;
-
-  if (CandidateMask) {
-    CandidateMask = PowerOf2Floor(CandidateMask);
-    NextInSequenceMask &= (CandidateMask | (CandidateMask - 1));
-    return CandidateMask;
-  }
-
-  NextInSequenceMask = ResourceUnitMask;
-  CandidateMask = PowerOf2Floor(ReadyMask & NextInSequenceMask);
-  NextInSequenceMask &= (CandidateMask | (CandidateMask - 1));
-  return CandidateMask;
-}
-
-void DefaultResourceStrategy::used(uint64_t Mask) {
-  if (Mask > NextInSequenceMask) {
-    RemovedFromNextInSequence |= Mask;
-    return;
-  }
- 
-  NextInSequenceMask &= (~Mask);
-  if (NextInSequenceMask)
-    return;
-
-  NextInSequenceMask = ResourceUnitMask ^ RemovedFromNextInSequence;
-  RemovedFromNextInSequence = 0;
-}
-
-ResourceState::ResourceState(const MCProcResourceDesc &Desc, unsigned Index,
-                             uint64_t Mask)
-    : ProcResourceDescIndex(Index), ResourceMask(Mask),
-      BufferSize(Desc.BufferSize), IsAGroup(countPopulation(ResourceMask)>1) {
-  if (IsAGroup)
-    ResourceSizeMask = ResourceMask ^ PowerOf2Floor(ResourceMask);
-  else
-    ResourceSizeMask = (1ULL << Desc.NumUnits) - 1;
-  ReadyMask = ResourceSizeMask;
-  AvailableSlots = BufferSize == -1 ? 0U : static_cast<unsigned>(BufferSize);
-  Unavailable = false;
-}
-
-bool ResourceState::isReady(unsigned NumUnits) const {
-  return (!isReserved() || isADispatchHazard()) &&
-         countPopulation(ReadyMask) >= NumUnits;
-}
-
-ResourceStateEvent ResourceState::isBufferAvailable() const {
-  if (isADispatchHazard() && isReserved())
-    return RS_RESERVED;
-  if (!isBuffered() || AvailableSlots)
-    return RS_BUFFER_AVAILABLE;
-  return RS_BUFFER_UNAVAILABLE;
-}
-
-#ifndef NDEBUG
-void ResourceState::dump() const {
-  dbgs() << "MASK: " << ResourceMask << ", SIZE_MASK: " << ResourceSizeMask
-         << ", RDYMASK: " << ReadyMask << ", BufferSize=" << BufferSize
-         << ", AvailableSlots=" << AvailableSlots
-         << ", Reserved=" << Unavailable << '\n';
-}
-#endif
-
-static unsigned getResourceStateIndex(uint64_t Mask) {
-  return std::numeric_limits<uint64_t>::digits - countLeadingZeros(Mask);
-}
-
-static std::unique_ptr<ResourceStrategy>
-getStrategyFor(const ResourceState &RS) {
-  if (RS.isAResourceGroup() || RS.getNumUnits() > 1)
-    return llvm::make_unique<DefaultResourceStrategy>(RS.getReadyMask());
-  return std::unique_ptr<ResourceStrategy>(nullptr);
-}
-
-ResourceManager::ResourceManager(const MCSchedModel &SM) {
-  computeProcResourceMasks(SM, ProcResID2Mask);
-  Resources.resize(SM.getNumProcResourceKinds());
-  Strategies.resize(SM.getNumProcResourceKinds());
-
-  for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
-    uint64_t Mask = ProcResID2Mask[I];
-    unsigned Index = getResourceStateIndex(Mask);
-    Resources[Index] =
-        llvm::make_unique<ResourceState>(*SM.getProcResource(I), I, Mask);
-    Strategies[Index] = getStrategyFor(*Resources[Index]);
-  }
-}
-
-void ResourceManager::setCustomStrategyImpl(std::unique_ptr<ResourceStrategy> S,
-                                            uint64_t ResourceMask) {
-  unsigned Index = getResourceStateIndex(ResourceMask);
-  assert(Index < Resources.size() && "Invalid processor resource index!");
-  assert(S && "Unexpected null strategy in input!");
-  Strategies[Index] = std::move(S);
-}
-
-unsigned ResourceManager::resolveResourceMask(uint64_t Mask) const {
-  return Resources[getResourceStateIndex(Mask)]->getProcResourceID();
-}
-
-unsigned ResourceManager::getNumUnits(uint64_t ResourceID) const {
-  return Resources[getResourceStateIndex(ResourceID)]->getNumUnits();
-}
-
-// Returns the actual resource consumed by this Use.
-// First, is the primary resource ID.
-// Second, is the specific sub-resource ID.
-ResourceRef ResourceManager::selectPipe(uint64_t ResourceID) {
-  unsigned Index = getResourceStateIndex(ResourceID);
-  ResourceState &RS = *Resources[Index];
-  assert(RS.isReady() && "No available units to select!");
-
-  // Special case where RS is not a group, and it only declares a single
-  // resource unit.
-  if (!RS.isAResourceGroup() && RS.getNumUnits() == 1)
-    return std::make_pair(ResourceID, RS.getReadyMask());
-
-  uint64_t SubResourceID = Strategies[Index]->select(RS.getReadyMask());
-  if (RS.isAResourceGroup())
-    return selectPipe(SubResourceID);
-  return std::make_pair(ResourceID, SubResourceID);
-}
-
-void ResourceManager::use(const ResourceRef &RR) {
-  // Mark the sub-resource referenced by RR as used.
-  unsigned RSID = getResourceStateIndex(RR.first);
-  ResourceState &RS = *Resources[RSID];
-  RS.markSubResourceAsUsed(RR.second);
-  // Remember to update the resource strategy for non-group resources with
-  // multiple units.
-  if (RS.getNumUnits() > 1)
-    Strategies[RSID]->used(RR.second);
-
-  // If there are still available units in RR.first,
-  // then we are done.
-  if (RS.isReady())
-    return;
-
-  // Notify to other resources that RR.first is no longer available.
-  for (std::unique_ptr<ResourceState> &Res : Resources) {
-    ResourceState &Current = *Res;
-    if (!Current.isAResourceGroup() || Current.getResourceMask() == RR.first)
-      continue;
-
-    if (Current.containsResource(RR.first)) {
-      unsigned Index = getResourceStateIndex(Current.getResourceMask());
-      Current.markSubResourceAsUsed(RR.first);
-      Strategies[Index]->used(RR.first);
-    }
-  }
-}
-
-void ResourceManager::release(const ResourceRef &RR) {
-  ResourceState &RS = *Resources[getResourceStateIndex(RR.first)];
-  bool WasFullyUsed = !RS.isReady();
-  RS.releaseSubResource(RR.second);
-  if (!WasFullyUsed)
-    return;
-
-  for (std::unique_ptr<ResourceState> &Res : Resources) {
-    ResourceState &Current = *Res;
-    if (!Current.isAResourceGroup() || Current.getResourceMask() == RR.first)
-      continue;
-
-    if (Current.containsResource(RR.first))
-      Current.releaseSubResource(RR.first);
-  }
-}
-
-ResourceStateEvent
-ResourceManager::canBeDispatched(ArrayRef<uint64_t> Buffers) const {
-  ResourceStateEvent Result = ResourceStateEvent::RS_BUFFER_AVAILABLE;
-  for (uint64_t Buffer : Buffers) {
-    ResourceState &RS = *Resources[getResourceStateIndex(Buffer)];
-    Result = RS.isBufferAvailable();
-    if (Result != ResourceStateEvent::RS_BUFFER_AVAILABLE)
-      break;
-  }
-  return Result;
-}
-
-void ResourceManager::reserveBuffers(ArrayRef<uint64_t> Buffers) {
-  for (const uint64_t Buffer : Buffers) {
-    ResourceState &RS = *Resources[getResourceStateIndex(Buffer)];
-    assert(RS.isBufferAvailable() == ResourceStateEvent::RS_BUFFER_AVAILABLE);
-    RS.reserveBuffer();
-
-    if (RS.isADispatchHazard()) {
-      assert(!RS.isReserved());
-      RS.setReserved();
-    }
-  }
-}
-
-void ResourceManager::releaseBuffers(ArrayRef<uint64_t> Buffers) {
-  for (const uint64_t R : Buffers)
-    Resources[getResourceStateIndex(R)]->releaseBuffer();
-}
-
-bool ResourceManager::canBeIssued(const InstrDesc &Desc) const {
-  return all_of(
-      Desc.Resources, [&](const std::pair<uint64_t, const ResourceUsage> &E) {
-        unsigned NumUnits = E.second.isReserved() ? 0U : E.second.NumUnits;
-        unsigned Index = getResourceStateIndex(E.first);
-        return Resources[Index]->isReady(NumUnits);
-      });
-}
-
-// Returns true if all resources are in-order, and there is at least one
-// resource which is a dispatch hazard (BufferSize = 0).
-bool ResourceManager::mustIssueImmediately(const InstrDesc &Desc) const {
-  if (!canBeIssued(Desc))
-    return false;
-  bool AllInOrderResources = all_of(Desc.Buffers, [&](uint64_t BufferMask) {
-    unsigned Index = getResourceStateIndex(BufferMask);
-    const ResourceState &Resource = *Resources[Index];
-    return Resource.isInOrder() || Resource.isADispatchHazard();
-  });
-  if (!AllInOrderResources)
-    return false;
-
-  return any_of(Desc.Buffers, [&](uint64_t BufferMask) {
-    return Resources[getResourceStateIndex(BufferMask)]->isADispatchHazard();
-  });
-}
-
-void ResourceManager::issueInstruction(
-    const InstrDesc &Desc,
-    SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Pipes) {
-  for (const std::pair<uint64_t, ResourceUsage> &R : Desc.Resources) {
-    const CycleSegment &CS = R.second.CS;
-    if (!CS.size()) {
-      releaseResource(R.first);
-      continue;
-    }
-
-    assert(CS.begin() == 0 && "Invalid {Start, End} cycles!");
-    if (!R.second.isReserved()) {
-      ResourceRef Pipe = selectPipe(R.first);
-      use(Pipe);
-      BusyResources[Pipe] += CS.size();
-      // Replace the resource mask with a valid processor resource index.
-      const ResourceState &RS = *Resources[getResourceStateIndex(Pipe.first)];
-      Pipe.first = RS.getProcResourceID();
-      Pipes.emplace_back(std::pair<ResourceRef, ResourceCycles>(
-          Pipe, ResourceCycles(CS.size())));
-    } else {
-      assert((countPopulation(R.first) > 1) && "Expected a group!");
-      // Mark this group as reserved.
-      assert(R.second.isReserved());
-      reserveResource(R.first);
-      BusyResources[ResourceRef(R.first, R.first)] += CS.size();
-    }
-  }
-}
-
-void ResourceManager::cycleEvent(SmallVectorImpl<ResourceRef> &ResourcesFreed) {
-  for (std::pair<ResourceRef, unsigned> &BR : BusyResources) {
-    if (BR.second)
-      BR.second--;
-    if (!BR.second) {
-      // Release this resource.
-      const ResourceRef &RR = BR.first;
-
-      if (countPopulation(RR.first) == 1)
-        release(RR);
-
-      releaseResource(RR.first);
-      ResourcesFreed.push_back(RR);
-    }
-  }
-
-  for (const ResourceRef &RF : ResourcesFreed)
-    BusyResources.erase(RF);
-}
-
-void ResourceManager::reserveResource(uint64_t ResourceID) {
-  ResourceState &Resource = *Resources[getResourceStateIndex(ResourceID)];
-  assert(!Resource.isReserved());
-  Resource.setReserved();
-}
-
-void ResourceManager::releaseResource(uint64_t ResourceID) {
-  ResourceState &Resource = *Resources[getResourceStateIndex(ResourceID)];
-  Resource.clearReserved();
-}
-
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/HardwareUnits/RetireControlUnit.cpp b/tools/llvm-mca/lib/HardwareUnits/RetireControlUnit.cpp
deleted file mode 100644
index bd7b411..0000000
--- a/tools/llvm-mca/lib/HardwareUnits/RetireControlUnit.cpp
+++ /dev/null
@@ -1,88 +0,0 @@
-//===---------------------- RetireControlUnit.cpp ---------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file simulates the hardware responsible for retiring instructions.
-///
-//===----------------------------------------------------------------------===//
-
-#include "HardwareUnits/RetireControlUnit.h"
-#include "llvm/Support/Debug.h"
-
-#define DEBUG_TYPE "llvm-mca"
-
-namespace llvm {
-namespace mca {
-
-RetireControlUnit::RetireControlUnit(const MCSchedModel &SM)
-    : NextAvailableSlotIdx(0), CurrentInstructionSlotIdx(0),
-      AvailableSlots(SM.MicroOpBufferSize), MaxRetirePerCycle(0) {
-  // Check if the scheduling model provides extra information about the machine
-  // processor. If so, then use that information to set the reorder buffer size
-  // and the maximum number of instructions retired per cycle.
-  if (SM.hasExtraProcessorInfo()) {
-    const MCExtraProcessorInfo &EPI = SM.getExtraProcessorInfo();
-    if (EPI.ReorderBufferSize)
-      AvailableSlots = EPI.ReorderBufferSize;
-    MaxRetirePerCycle = EPI.MaxRetirePerCycle;
-  }
-
-  assert(AvailableSlots && "Invalid reorder buffer size!");
-  Queue.resize(AvailableSlots);
-}
-
-// Reserves a number of slots, and returns a new token.
-unsigned RetireControlUnit::reserveSlot(const InstRef &IR,
-                                        unsigned NumMicroOps) {
-  assert(isAvailable(NumMicroOps) && "Reorder Buffer unavailable!");
-  unsigned NormalizedQuantity =
-      std::min(NumMicroOps, static_cast<unsigned>(Queue.size()));
-  // Zero latency instructions may have zero uOps. Artificially bump this
-  // value to 1. Although zero latency instructions don't consume scheduler
-  // resources, they still consume one slot in the retire queue.
-  NormalizedQuantity = std::max(NormalizedQuantity, 1U);
-  unsigned TokenID = NextAvailableSlotIdx;
-  Queue[NextAvailableSlotIdx] = {IR, NormalizedQuantity, false};
-  NextAvailableSlotIdx += NormalizedQuantity;
-  NextAvailableSlotIdx %= Queue.size();
-  AvailableSlots -= NormalizedQuantity;
-  return TokenID;
-}
-
-const RetireControlUnit::RUToken &RetireControlUnit::peekCurrentToken() const {
-  return Queue[CurrentInstructionSlotIdx];
-}
-
-void RetireControlUnit::consumeCurrentToken() {
-  RetireControlUnit::RUToken &Current = Queue[CurrentInstructionSlotIdx];
-  assert(Current.NumSlots && "Reserved zero slots?");
-  assert(Current.IR && "Invalid RUToken in the RCU queue.");
-  Current.IR.getInstruction()->retire();
-
-  // Update the slot index to be the next item in the circular queue.
-  CurrentInstructionSlotIdx += Current.NumSlots;
-  CurrentInstructionSlotIdx %= Queue.size();
-  AvailableSlots += Current.NumSlots;
-}
-
-void RetireControlUnit::onInstructionExecuted(unsigned TokenID) {
-  assert(Queue.size() > TokenID);
-  assert(Queue[TokenID].Executed == false && Queue[TokenID].IR);
-  Queue[TokenID].Executed = true;
-}
-
-#ifndef NDEBUG
-void RetireControlUnit::dump() const {
-  dbgs() << "Retire Unit: { Total Slots=" << Queue.size()
-         << ", Available Slots=" << AvailableSlots << " }\n";
-}
-#endif
-
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/HardwareUnits/Scheduler.cpp b/tools/llvm-mca/lib/HardwareUnits/Scheduler.cpp
deleted file mode 100644
index f0ac59e..0000000
--- a/tools/llvm-mca/lib/HardwareUnits/Scheduler.cpp
+++ /dev/null
@@ -1,245 +0,0 @@
-//===--------------------- Scheduler.cpp ------------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// A scheduler for processor resource units and processor resource groups.
-//
-//===----------------------------------------------------------------------===//
-
-#include "HardwareUnits/Scheduler.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace llvm {
-namespace mca {
-
-#define DEBUG_TYPE "llvm-mca"
-
-void Scheduler::initializeStrategy(std::unique_ptr<SchedulerStrategy> S) {
-  // Ensure we have a valid (non-null) strategy object.
-  Strategy = S ? std::move(S) : llvm::make_unique<DefaultSchedulerStrategy>();
-}
-
-// Anchor the vtable of SchedulerStrategy and DefaultSchedulerStrategy.
-SchedulerStrategy::~SchedulerStrategy() = default;
-DefaultSchedulerStrategy::~DefaultSchedulerStrategy() = default;
-
-#ifndef NDEBUG
-void Scheduler::dump() const {
-  dbgs() << "[SCHEDULER]: WaitSet size is: " << WaitSet.size() << '\n';
-  dbgs() << "[SCHEDULER]: ReadySet size is: " << ReadySet.size() << '\n';
-  dbgs() << "[SCHEDULER]: IssuedSet size is: " << IssuedSet.size() << '\n';
-  Resources->dump();
-}
-#endif
-
-Scheduler::Status Scheduler::isAvailable(const InstRef &IR) const {
-  const InstrDesc &Desc = IR.getInstruction()->getDesc();
-
-  switch (Resources->canBeDispatched(Desc.Buffers)) {
-  case ResourceStateEvent::RS_BUFFER_UNAVAILABLE:
-    return Scheduler::SC_BUFFERS_FULL;
-  case ResourceStateEvent::RS_RESERVED:
-    return Scheduler::SC_DISPATCH_GROUP_STALL;
-  case ResourceStateEvent::RS_BUFFER_AVAILABLE:
-    break;
-  }
-
-  // Give lower priority to LSUnit stall events.
-  switch (LSU.isAvailable(IR)) {
-  case LSUnit::LSU_LQUEUE_FULL:
-    return Scheduler::SC_LOAD_QUEUE_FULL;
-  case LSUnit::LSU_SQUEUE_FULL:
-    return Scheduler::SC_STORE_QUEUE_FULL;
-  case LSUnit::LSU_AVAILABLE:
-    return Scheduler::SC_AVAILABLE;
-  }
-
-  llvm_unreachable("Don't know how to process this LSU state result!");
-}
-
-void Scheduler::issueInstructionImpl(
-    InstRef &IR,
-    SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &UsedResources) {
-  Instruction *IS = IR.getInstruction();
-  const InstrDesc &D = IS->getDesc();
-
-  // Issue the instruction and collect all the consumed resources
-  // into a vector. That vector is then used to notify the listener.
-  Resources->issueInstruction(D, UsedResources);
-
-  // Notify the instruction that it started executing.
-  // This updates the internal state of each write.
-  IS->execute();
-
-  if (IS->isExecuting())
-    IssuedSet.emplace_back(IR);
-  else if (IS->isExecuted())
-    LSU.onInstructionExecuted(IR);
-}
-
-// Release the buffered resources and issue the instruction.
-void Scheduler::issueInstruction(
-    InstRef &IR,
-    SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &UsedResources,
-    SmallVectorImpl<InstRef> &ReadyInstructions) {
-  const Instruction &Inst = *IR.getInstruction();
-  bool HasDependentUsers = Inst.hasDependentUsers();
-
-  Resources->releaseBuffers(Inst.getDesc().Buffers);
-  issueInstructionImpl(IR, UsedResources);
-  // Instructions that have been issued during this cycle might have unblocked
-  // other dependent instructions. Dependent instructions may be issued during
-  // this same cycle if operands have ReadAdvance entries.  Promote those
-  // instructions to the ReadySet and notify the caller that those are ready.
-  if (HasDependentUsers)
-    promoteToReadySet(ReadyInstructions);
-}
-
-void Scheduler::promoteToReadySet(SmallVectorImpl<InstRef> &Ready) {
-  // Scan the set of waiting instructions and promote them to the
-  // ready queue if operands are all ready.
-  unsigned RemovedElements = 0;
-  for (auto I = WaitSet.begin(), E = WaitSet.end(); I != E;) {
-    InstRef &IR = *I;
-    if (!IR)
-      break;
-
-    // Check if this instruction is now ready. In case, force
-    // a transition in state using method 'update()'.
-    Instruction &IS = *IR.getInstruction();
-    if (!IS.isReady())
-      IS.update();
-
-    // Check if there are still unsolved data dependencies.
-    if (!isReady(IR)) {
-      ++I;
-      continue;
-    }
-
-    Ready.emplace_back(IR);
-    ReadySet.emplace_back(IR);
-
-    IR.invalidate();
-    ++RemovedElements;
-    std::iter_swap(I, E - RemovedElements);
-  }
-
-  WaitSet.resize(WaitSet.size() - RemovedElements);
-}
-
-InstRef Scheduler::select() {
-  unsigned QueueIndex = ReadySet.size();
-  for (unsigned I = 0, E = ReadySet.size(); I != E; ++I) {
-    const InstRef &IR = ReadySet[I];
-    if (QueueIndex == ReadySet.size() ||
-        Strategy->compare(IR, ReadySet[QueueIndex])) {
-      const InstrDesc &D = IR.getInstruction()->getDesc();
-      if (Resources->canBeIssued(D))
-        QueueIndex = I;
-    }
-  }
-
-  if (QueueIndex == ReadySet.size())
-    return InstRef();
-
-  // We found an instruction to issue.
-  InstRef IR = ReadySet[QueueIndex];
-  std::swap(ReadySet[QueueIndex], ReadySet[ReadySet.size() - 1]);
-  ReadySet.pop_back();
-  return IR;
-}
-
-void Scheduler::updateIssuedSet(SmallVectorImpl<InstRef> &Executed) {
-  unsigned RemovedElements = 0;
-  for (auto I = IssuedSet.begin(), E = IssuedSet.end(); I != E;) {
-    InstRef &IR = *I;
-    if (!IR)
-      break;
-    Instruction &IS = *IR.getInstruction();
-    if (!IS.isExecuted()) {
-      LLVM_DEBUG(dbgs() << "[SCHEDULER]: Instruction #" << IR
-                        << " is still executing.\n");
-      ++I;
-      continue;
-    }
-
-    // Instruction IR has completed execution.
-    LSU.onInstructionExecuted(IR);
-    Executed.emplace_back(IR);
-    ++RemovedElements;
-    IR.invalidate();
-    std::iter_swap(I, E - RemovedElements);
-  }
-
-  IssuedSet.resize(IssuedSet.size() - RemovedElements);
-}
-
-void Scheduler::cycleEvent(SmallVectorImpl<ResourceRef> &Freed,
-                           SmallVectorImpl<InstRef> &Executed,
-                           SmallVectorImpl<InstRef> &Ready) {
-  // Release consumed resources.
-  Resources->cycleEvent(Freed);
-
-  // Propagate the cycle event to the 'Issued' and 'Wait' sets.
-  for (InstRef &IR : IssuedSet)
-    IR.getInstruction()->cycleEvent();
-
-  updateIssuedSet(Executed);
-
-  for (InstRef &IR : WaitSet)
-    IR.getInstruction()->cycleEvent();
-
-  promoteToReadySet(Ready);
-}
-
-bool Scheduler::mustIssueImmediately(const InstRef &IR) const {
-  // Instructions that use an in-order dispatch/issue processor resource must be
-  // issued immediately to the pipeline(s). Any other in-order buffered
-  // resources (i.e. BufferSize=1) is consumed.
-  const InstrDesc &Desc = IR.getInstruction()->getDesc();
-  return Desc.isZeroLatency() || Resources->mustIssueImmediately(Desc);
-}
-
-void Scheduler::dispatch(const InstRef &IR) {
-  const InstrDesc &Desc = IR.getInstruction()->getDesc();
-  Resources->reserveBuffers(Desc.Buffers);
-
-  // If necessary, reserve queue entries in the load-store unit (LSU).
-  bool IsMemOp = Desc.MayLoad || Desc.MayStore;
-  if (IsMemOp)
-    LSU.dispatch(IR);
-
-  if (!isReady(IR)) {
-    LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding #" << IR << " to the WaitSet\n");
-    WaitSet.push_back(IR);
-    return;
-  }
-
-  // Don't add a zero-latency instruction to the Ready queue.
-  // A zero-latency instruction doesn't consume any scheduler resources. That is
-  // because it doesn't need to be executed, and it is often removed at register
-  // renaming stage. For example, register-register moves are often optimized at
-  // register renaming stage by simply updating register aliases. On some
-  // targets, zero-idiom instructions (for example: a xor that clears the value
-  // of a register) are treated specially, and are often eliminated at register
-  // renaming stage.
-  if (!mustIssueImmediately(IR)) {
-    LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding #" << IR << " to the ReadySet\n");
-    ReadySet.push_back(IR);
-  }
-}
-
-bool Scheduler::isReady(const InstRef &IR) const {
-  const InstrDesc &Desc = IR.getInstruction()->getDesc();
-  bool IsMemOp = Desc.MayLoad || Desc.MayStore;
-  return IR.getInstruction()->isReady() && (!IsMemOp || LSU.isReady(IR));
-}
-
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/InstrBuilder.cpp b/tools/llvm-mca/lib/InstrBuilder.cpp
deleted file mode 100644
index f396082..0000000
--- a/tools/llvm-mca/lib/InstrBuilder.cpp
+++ /dev/null
@@ -1,675 +0,0 @@
-//===--------------------- InstrBuilder.cpp ---------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file implements the InstrBuilder interface.
-///
-//===----------------------------------------------------------------------===//
-
-#include "InstrBuilder.h"
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/WithColor.h"
-#include "llvm/Support/raw_ostream.h"
-
-#define DEBUG_TYPE "llvm-mca"
-
-namespace llvm {
-namespace mca {
-
-InstrBuilder::InstrBuilder(const llvm::MCSubtargetInfo &sti,
-                           const llvm::MCInstrInfo &mcii,
-                           const llvm::MCRegisterInfo &mri,
-                           const llvm::MCInstrAnalysis &mcia)
-    : STI(sti), MCII(mcii), MRI(mri), MCIA(mcia), FirstCallInst(true),
-      FirstReturnInst(true) {
-  computeProcResourceMasks(STI.getSchedModel(), ProcResourceMasks);
-}
-
-static void initializeUsedResources(InstrDesc &ID,
-                                    const MCSchedClassDesc &SCDesc,
-                                    const MCSubtargetInfo &STI,
-                                    ArrayRef<uint64_t> ProcResourceMasks) {
-  const MCSchedModel &SM = STI.getSchedModel();
-
-  // Populate resources consumed.
-  using ResourcePlusCycles = std::pair<uint64_t, ResourceUsage>;
-  std::vector<ResourcePlusCycles> Worklist;
-
-  // Track cycles contributed by resources that are in a "Super" relationship.
-  // This is required if we want to correctly match the behavior of method
-  // SubtargetEmitter::ExpandProcResource() in Tablegen. When computing the set
-  // of "consumed" processor resources and resource cycles, the logic in
-  // ExpandProcResource() doesn't update the number of resource cycles
-  // contributed by a "Super" resource to a group.
-  // We need to take this into account when we find that a processor resource is
-  // part of a group, and it is also used as the "Super" of other resources.
-  // This map stores the number of cycles contributed by sub-resources that are
-  // part of a "Super" resource. The key value is the "Super" resource mask ID.
-  DenseMap<uint64_t, unsigned> SuperResources;
-
-  unsigned NumProcResources = SM.getNumProcResourceKinds();
-  APInt Buffers(NumProcResources, 0);
-
-  for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
-    const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
-    const MCProcResourceDesc &PR = *SM.getProcResource(PRE->ProcResourceIdx);
-    uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
-    if (PR.BufferSize != -1)
-      Buffers.setBit(PRE->ProcResourceIdx);
-    CycleSegment RCy(0, PRE->Cycles, false);
-    Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
-    if (PR.SuperIdx) {
-      uint64_t Super = ProcResourceMasks[PR.SuperIdx];
-      SuperResources[Super] += PRE->Cycles;
-    }
-  }
-
-  // Sort elements by mask popcount, so that we prioritize resource units over
-  // resource groups, and smaller groups over larger groups.
-  sort(Worklist, [](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
-    unsigned popcntA = countPopulation(A.first);
-    unsigned popcntB = countPopulation(B.first);
-    if (popcntA < popcntB)
-      return true;
-    if (popcntA > popcntB)
-      return false;
-    return A.first < B.first;
-  });
-
-  uint64_t UsedResourceUnits = 0;
-
-  // Remove cycles contributed by smaller resources.
-  for (unsigned I = 0, E = Worklist.size(); I < E; ++I) {
-    ResourcePlusCycles &A = Worklist[I];
-    if (!A.second.size()) {
-      A.second.NumUnits = 0;
-      A.second.setReserved();
-      ID.Resources.emplace_back(A);
-      continue;
-    }
-
-    ID.Resources.emplace_back(A);
-    uint64_t NormalizedMask = A.first;
-    if (countPopulation(A.first) == 1) {
-      UsedResourceUnits |= A.first;
-    } else {
-      // Remove the leading 1 from the resource group mask.
-      NormalizedMask ^= PowerOf2Floor(NormalizedMask);
-    }
-
-    for (unsigned J = I + 1; J < E; ++J) {
-      ResourcePlusCycles &B = Worklist[J];
-      if ((NormalizedMask & B.first) == NormalizedMask) {
-        B.second.CS.subtract(A.second.size() - SuperResources[A.first]);
-        if (countPopulation(B.first) > 1)
-          B.second.NumUnits++;
-      }
-    }
-  }
-
-  // A SchedWrite may specify a number of cycles in which a resource group
-  // is reserved. For example (on target x86; cpu Haswell):
-  //
-  //  SchedWriteRes<[HWPort0, HWPort1, HWPort01]> {
-  //    let ResourceCycles = [2, 2, 3];
-  //  }
-  //
-  // This means:
-  // Resource units HWPort0 and HWPort1 are both used for 2cy.
-  // Resource group HWPort01 is the union of HWPort0 and HWPort1.
-  // Since this write touches both HWPort0 and HWPort1 for 2cy, HWPort01
-  // will not be usable for 2 entire cycles from instruction issue.
-  //
-  // On top of those 2cy, SchedWriteRes explicitly specifies an extra latency
-  // of 3 cycles for HWPort01. This tool assumes that the 3cy latency is an
-  // extra delay on top of the 2 cycles latency.
-  // During those extra cycles, HWPort01 is not usable by other instructions.
-  for (ResourcePlusCycles &RPC : ID.Resources) {
-    if (countPopulation(RPC.first) > 1 && !RPC.second.isReserved()) {
-      // Remove the leading 1 from the resource group mask.
-      uint64_t Mask = RPC.first ^ PowerOf2Floor(RPC.first);
-      if ((Mask & UsedResourceUnits) == Mask)
-        RPC.second.setReserved();
-    }
-  }
-
-  // Identify extra buffers that are consumed through super resources.
-  for (const std::pair<uint64_t, unsigned> &SR : SuperResources) {
-    for (unsigned I = 1, E = NumProcResources; I < E; ++I) {
-      const MCProcResourceDesc &PR = *SM.getProcResource(I);
-      if (PR.BufferSize == -1)
-        continue;
-
-      uint64_t Mask = ProcResourceMasks[I];
-      if (Mask != SR.first && ((Mask & SR.first) == SR.first))
-        Buffers.setBit(I);
-    }
-  }
-
-  // Now set the buffers.
-  if (unsigned NumBuffers = Buffers.countPopulation()) {
-    ID.Buffers.resize(NumBuffers);
-    for (unsigned I = 0, E = NumProcResources; I < E && NumBuffers; ++I) {
-      if (Buffers[I]) {
-        --NumBuffers;
-        ID.Buffers[NumBuffers] = ProcResourceMasks[I];
-      }
-    }
-  }
-
-  LLVM_DEBUG({
-    for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
-      dbgs() << "\t\tMask=" << R.first << ", cy=" << R.second.size() << '\n';
-    for (const uint64_t R : ID.Buffers)
-      dbgs() << "\t\tBuffer Mask=" << R << '\n';
-  });
-}
-
-static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
-                              const MCSchedClassDesc &SCDesc,
-                              const MCSubtargetInfo &STI) {
-  if (MCDesc.isCall()) {
-    // We cannot estimate how long this call will take.
-    // Artificially set an arbitrarily high latency (100cy).
-    ID.MaxLatency = 100U;
-    return;
-  }
-
-  int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
-  // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
-  ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
-}
-
-static Error verifyOperands(const MCInstrDesc &MCDesc, const MCInst &MCI) {
-  // Count register definitions, and skip non register operands in the process.
-  unsigned I, E;
-  unsigned NumExplicitDefs = MCDesc.getNumDefs();
-  for (I = 0, E = MCI.getNumOperands(); NumExplicitDefs && I < E; ++I) {
-    const MCOperand &Op = MCI.getOperand(I);
-    if (Op.isReg())
-      --NumExplicitDefs;
-  }
-
-  if (NumExplicitDefs) {
-    return make_error<InstructionError<MCInst>>(
-        "Expected more register operand definitions.", MCI);
-  }
-
-  if (MCDesc.hasOptionalDef()) {
-    // Always assume that the optional definition is the last operand.
-    const MCOperand &Op = MCI.getOperand(MCDesc.getNumOperands() - 1);
-    if (I == MCI.getNumOperands() || !Op.isReg()) {
-      std::string Message =
-          "expected a register operand for an optional definition. Instruction "
-          "has not been correctly analyzed.";
-      return make_error<InstructionError<MCInst>>(Message, MCI);
-    }
-  }
-
-  return ErrorSuccess();
-}
-
-void InstrBuilder::populateWrites(InstrDesc &ID, const MCInst &MCI,
-                                  unsigned SchedClassID) {
-  const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
-  const MCSchedModel &SM = STI.getSchedModel();
-  const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
-
-  // Assumptions made by this algorithm:
-  //  1. The number of explicit and implicit register definitions in a MCInst
-  //     matches the number of explicit and implicit definitions according to
-  //     the opcode descriptor (MCInstrDesc).
-  //  2. Uses start at index #(MCDesc.getNumDefs()).
-  //  3. There can only be a single optional register definition, an it is
-  //     always the last operand of the sequence (excluding extra operands
-  //     contributed by variadic opcodes).
-  //
-  // These assumptions work quite well for most out-of-order in-tree targets
-  // like x86. This is mainly because the vast majority of instructions is
-  // expanded to MCInst using a straightforward lowering logic that preserves
-  // the ordering of the operands.
-  //
-  // About assumption 1.
-  // The algorithm allows non-register operands between register operand
-  // definitions. This helps to handle some special ARM instructions with
-  // implicit operand increment (-mtriple=armv7):
-  //
-  // vld1.32  {d18, d19}, [r1]!  @ <MCInst #1463 VLD1q32wb_fixed
-  //                             @  <MCOperand Reg:59>
-  //                             @  <MCOperand Imm:0>     (!!)
-  //                             @  <MCOperand Reg:67>
-  //                             @  <MCOperand Imm:0>
-  //                             @  <MCOperand Imm:14>
-  //                             @  <MCOperand Reg:0>>
-  //
-  // MCDesc reports:
-  //  6 explicit operands.
-  //  1 optional definition
-  //  2 explicit definitions (!!)
-  //
-  // The presence of an 'Imm' operand between the two register definitions
-  // breaks the assumption that "register definitions are always at the
-  // beginning of the operand sequence".
-  //
-  // To workaround this issue, this algorithm ignores (i.e. skips) any
-  // non-register operands between register definitions.  The optional
-  // definition is still at index #(NumOperands-1).
-  //
-  // According to assumption 2. register reads start at #(NumExplicitDefs-1).
-  // That means, register R1 from the example is both read and written.
-  unsigned NumExplicitDefs = MCDesc.getNumDefs();
-  unsigned NumImplicitDefs = MCDesc.getNumImplicitDefs();
-  unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
-  unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
-  if (MCDesc.hasOptionalDef())
-    TotalDefs++;
-
-  unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
-  ID.Writes.resize(TotalDefs + NumVariadicOps);
-  // Iterate over the operands list, and skip non-register operands.
-  // The first NumExplictDefs register operands are expected to be register
-  // definitions.
-  unsigned CurrentDef = 0;
-  unsigned i = 0;
-  for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
-    const MCOperand &Op = MCI.getOperand(i);
-    if (!Op.isReg())
-      continue;
-
-    WriteDescriptor &Write = ID.Writes[CurrentDef];
-    Write.OpIndex = i;
-    if (CurrentDef < NumWriteLatencyEntries) {
-      const MCWriteLatencyEntry &WLE =
-          *STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
-      // Conservatively default to MaxLatency.
-      Write.Latency =
-          WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
-      Write.SClassOrWriteResourceID = WLE.WriteResourceID;
-    } else {
-      // Assign a default latency for this write.
-      Write.Latency = ID.MaxLatency;
-      Write.SClassOrWriteResourceID = 0;
-    }
-    Write.IsOptionalDef = false;
-    LLVM_DEBUG({
-      dbgs() << "\t\t[Def]    OpIdx=" << Write.OpIndex
-             << ", Latency=" << Write.Latency
-             << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
-    });
-    CurrentDef++;
-  }
-
-  assert(CurrentDef == NumExplicitDefs &&
-         "Expected more register operand definitions.");
-  for (CurrentDef = 0; CurrentDef < NumImplicitDefs; ++CurrentDef) {
-    unsigned Index = NumExplicitDefs + CurrentDef;
-    WriteDescriptor &Write = ID.Writes[Index];
-    Write.OpIndex = ~CurrentDef;
-    Write.RegisterID = MCDesc.getImplicitDefs()[CurrentDef];
-    if (Index < NumWriteLatencyEntries) {
-      const MCWriteLatencyEntry &WLE =
-          *STI.getWriteLatencyEntry(&SCDesc, Index);
-      // Conservatively default to MaxLatency.
-      Write.Latency =
-          WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
-      Write.SClassOrWriteResourceID = WLE.WriteResourceID;
-    } else {
-      // Assign a default latency for this write.
-      Write.Latency = ID.MaxLatency;
-      Write.SClassOrWriteResourceID = 0;
-    }
-
-    Write.IsOptionalDef = false;
-    assert(Write.RegisterID != 0 && "Expected a valid phys register!");
-    LLVM_DEBUG({
-      dbgs() << "\t\t[Def][I] OpIdx=" << ~Write.OpIndex
-             << ", PhysReg=" << MRI.getName(Write.RegisterID)
-             << ", Latency=" << Write.Latency
-             << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
-    });
-  }
-
-  if (MCDesc.hasOptionalDef()) {
-    WriteDescriptor &Write = ID.Writes[NumExplicitDefs + NumImplicitDefs];
-    Write.OpIndex = MCDesc.getNumOperands() - 1;
-    // Assign a default latency for this write.
-    Write.Latency = ID.MaxLatency;
-    Write.SClassOrWriteResourceID = 0;
-    Write.IsOptionalDef = true;
-    LLVM_DEBUG({
-      dbgs() << "\t\t[Def][O] OpIdx=" << Write.OpIndex
-             << ", Latency=" << Write.Latency
-             << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
-    });
-  }
-
-  if (!NumVariadicOps)
-    return;
-
-  // FIXME: if an instruction opcode is flagged 'mayStore', and it has no
-  // "unmodeledSideEffects', then this logic optimistically assumes that any
-  // extra register operands in the variadic sequence is not a register
-  // definition.
-  //
-  // Otherwise, we conservatively assume that any register operand from the
-  // variadic sequence is both a register read and a register write.
-  bool AssumeUsesOnly = MCDesc.mayStore() && !MCDesc.mayLoad() &&
-                        !MCDesc.hasUnmodeledSideEffects();
-  CurrentDef = NumExplicitDefs + NumImplicitDefs + MCDesc.hasOptionalDef();
-  for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
-       I < NumVariadicOps && !AssumeUsesOnly; ++I, ++OpIndex) {
-    const MCOperand &Op = MCI.getOperand(OpIndex);
-    if (!Op.isReg())
-      continue;
-
-    WriteDescriptor &Write = ID.Writes[CurrentDef];
-    Write.OpIndex = OpIndex;
-    // Assign a default latency for this write.
-    Write.Latency = ID.MaxLatency;
-    Write.SClassOrWriteResourceID = 0;
-    Write.IsOptionalDef = false;
-    ++CurrentDef;
-    LLVM_DEBUG({
-      dbgs() << "\t\t[Def][V] OpIdx=" << Write.OpIndex
-             << ", Latency=" << Write.Latency
-             << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
-    });
-  }
-
-  ID.Writes.resize(CurrentDef);
-}
-
-void InstrBuilder::populateReads(InstrDesc &ID, const MCInst &MCI,
-                                 unsigned SchedClassID) {
-  const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
-  unsigned NumExplicitUses = MCDesc.getNumOperands() - MCDesc.getNumDefs();
-  unsigned NumImplicitUses = MCDesc.getNumImplicitUses();
-  // Remove the optional definition.
-  if (MCDesc.hasOptionalDef())
-    --NumExplicitUses;
-  unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
-  unsigned TotalUses = NumExplicitUses + NumImplicitUses + NumVariadicOps;
-  ID.Reads.resize(TotalUses);
-  unsigned CurrentUse = 0;
-  for (unsigned I = 0, OpIndex = MCDesc.getNumDefs(); I < NumExplicitUses;
-       ++I, ++OpIndex) {
-    const MCOperand &Op = MCI.getOperand(OpIndex);
-    if (!Op.isReg())
-      continue;
-
-    ReadDescriptor &Read = ID.Reads[CurrentUse];
-    Read.OpIndex = OpIndex;
-    Read.UseIndex = I;
-    Read.SchedClassID = SchedClassID;
-    ++CurrentUse;
-    LLVM_DEBUG(dbgs() << "\t\t[Use]    OpIdx=" << Read.OpIndex
-                      << ", UseIndex=" << Read.UseIndex << '\n');
-  }
-
-  // For the purpose of ReadAdvance, implicit uses come directly after explicit
-  // uses. The "UseIndex" must be updated according to that implicit layout.
-  for (unsigned I = 0; I < NumImplicitUses; ++I) {
-    ReadDescriptor &Read = ID.Reads[CurrentUse + I];
-    Read.OpIndex = ~I;
-    Read.UseIndex = NumExplicitUses + I;
-    Read.RegisterID = MCDesc.getImplicitUses()[I];
-    Read.SchedClassID = SchedClassID;
-    LLVM_DEBUG(dbgs() << "\t\t[Use][I] OpIdx=" << ~Read.OpIndex
-                      << ", UseIndex=" << Read.UseIndex << ", RegisterID="
-                      << MRI.getName(Read.RegisterID) << '\n');
-  }
-
-  CurrentUse += NumImplicitUses;
-
-  // FIXME: If an instruction opcode is marked as 'mayLoad', and it has no
-  // "unmodeledSideEffects", then this logic optimistically assumes that any
-  // extra register operands in the variadic sequence are not register
-  // definition.
-
-  bool AssumeDefsOnly = !MCDesc.mayStore() && MCDesc.mayLoad() &&
-                        !MCDesc.hasUnmodeledSideEffects();
-  for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
-       I < NumVariadicOps && !AssumeDefsOnly; ++I, ++OpIndex) {
-    const MCOperand &Op = MCI.getOperand(OpIndex);
-    if (!Op.isReg())
-      continue;
-
-    ReadDescriptor &Read = ID.Reads[CurrentUse];
-    Read.OpIndex = OpIndex;
-    Read.UseIndex = NumExplicitUses + NumImplicitUses + I;
-    Read.SchedClassID = SchedClassID;
-    ++CurrentUse;
-    LLVM_DEBUG(dbgs() << "\t\t[Use][V] OpIdx=" << Read.OpIndex
-                      << ", UseIndex=" << Read.UseIndex << '\n');
-  }
-
-  ID.Reads.resize(CurrentUse);
-}
-
-Error InstrBuilder::verifyInstrDesc(const InstrDesc &ID,
-                                    const MCInst &MCI) const {
-  if (ID.NumMicroOps != 0)
-    return ErrorSuccess();
-
-  bool UsesMemory = ID.MayLoad || ID.MayStore;
-  bool UsesBuffers = !ID.Buffers.empty();
-  bool UsesResources = !ID.Resources.empty();
-  if (!UsesMemory && !UsesBuffers && !UsesResources)
-    return ErrorSuccess();
-
-  StringRef Message;
-  if (UsesMemory) {
-    Message = "found an inconsistent instruction that decodes "
-              "into zero opcodes and that consumes load/store "
-              "unit resources.";
-  } else {
-    Message = "found an inconsistent instruction that decodes "
-              "to zero opcodes and that consumes scheduler "
-              "resources.";
-  }
-
-  return make_error<InstructionError<MCInst>>(Message, MCI);
-}
-
-Expected<const InstrDesc &>
-InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
-  assert(STI.getSchedModel().hasInstrSchedModel() &&
-         "Itineraries are not yet supported!");
-
-  // Obtain the instruction descriptor from the opcode.
-  unsigned short Opcode = MCI.getOpcode();
-  const MCInstrDesc &MCDesc = MCII.get(Opcode);
-  const MCSchedModel &SM = STI.getSchedModel();
-
-  // Then obtain the scheduling class information from the instruction.
-  unsigned SchedClassID = MCDesc.getSchedClass();
-  bool IsVariant = SM.getSchedClassDesc(SchedClassID)->isVariant();
-
-  // Try to solve variant scheduling classes.
-  if (IsVariant) {
-    unsigned CPUID = SM.getProcessorID();
-    while (SchedClassID && SM.getSchedClassDesc(SchedClassID)->isVariant())
-      SchedClassID = STI.resolveVariantSchedClass(SchedClassID, &MCI, CPUID);
-
-    if (!SchedClassID) {
-      return make_error<InstructionError<MCInst>>(
-          "unable to resolve scheduling class for write variant.", MCI);
-    }
-  }
-
-  // Check if this instruction is supported. Otherwise, report an error.
-  const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
-  if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
-    return make_error<InstructionError<MCInst>>(
-        "found an unsupported instruction in the input assembly sequence.",
-        MCI);
-  }
-
-  // Create a new empty descriptor.
-  std::unique_ptr<InstrDesc> ID = llvm::make_unique<InstrDesc>();
-  ID->NumMicroOps = SCDesc.NumMicroOps;
-
-  if (MCDesc.isCall() && FirstCallInst) {
-    // We don't correctly model calls.
-    WithColor::warning() << "found a call in the input assembly sequence.\n";
-    WithColor::note() << "call instructions are not correctly modeled. "
-                      << "Assume a latency of 100cy.\n";
-    FirstCallInst = false;
-  }
-
-  if (MCDesc.isReturn() && FirstReturnInst) {
-    WithColor::warning() << "found a return instruction in the input"
-                         << " assembly sequence.\n";
-    WithColor::note() << "program counter updates are ignored.\n";
-    FirstReturnInst = false;
-  }
-
-  ID->MayLoad = MCDesc.mayLoad();
-  ID->MayStore = MCDesc.mayStore();
-  ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
-
-  initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
-  computeMaxLatency(*ID, MCDesc, SCDesc, STI);
-
-  if (Error Err = verifyOperands(MCDesc, MCI))
-    return std::move(Err);
-
-  populateWrites(*ID, MCI, SchedClassID);
-  populateReads(*ID, MCI, SchedClassID);
-
-  LLVM_DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
-  LLVM_DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
-
-  // Sanity check on the instruction descriptor.
-  if (Error Err = verifyInstrDesc(*ID, MCI))
-    return std::move(Err);
-
-  // Now add the new descriptor.
-  SchedClassID = MCDesc.getSchedClass();
-  bool IsVariadic = MCDesc.isVariadic();
-  if (!IsVariadic && !IsVariant) {
-    Descriptors[MCI.getOpcode()] = std::move(ID);
-    return *Descriptors[MCI.getOpcode()];
-  }
-
-  VariantDescriptors[&MCI] = std::move(ID);
-  return *VariantDescriptors[&MCI];
-}
-
-Expected<const InstrDesc &>
-InstrBuilder::getOrCreateInstrDesc(const MCInst &MCI) {
-  if (Descriptors.find_as(MCI.getOpcode()) != Descriptors.end())
-    return *Descriptors[MCI.getOpcode()];
-
-  if (VariantDescriptors.find(&MCI) != VariantDescriptors.end())
-    return *VariantDescriptors[&MCI];
-
-  return createInstrDescImpl(MCI);
-}
-
-Expected<std::unique_ptr<Instruction>>
-InstrBuilder::createInstruction(const MCInst &MCI) {
-  Expected<const InstrDesc &> DescOrErr = getOrCreateInstrDesc(MCI);
-  if (!DescOrErr)
-    return DescOrErr.takeError();
-  const InstrDesc &D = *DescOrErr;
-  std::unique_ptr<Instruction> NewIS = llvm::make_unique<Instruction>(D);
-
-  // Check if this is a dependency breaking instruction.
-  APInt Mask;
-
-  unsigned ProcID = STI.getSchedModel().getProcessorID();
-  bool IsZeroIdiom = MCIA.isZeroIdiom(MCI, Mask, ProcID);
-  bool IsDepBreaking =
-      IsZeroIdiom || MCIA.isDependencyBreaking(MCI, Mask, ProcID);
-  if (MCIA.isOptimizableRegisterMove(MCI, ProcID))
-    NewIS->setOptimizableMove();
-
-  // Initialize Reads first.
-  for (const ReadDescriptor &RD : D.Reads) {
-    int RegID = -1;
-    if (!RD.isImplicitRead()) {
-      // explicit read.
-      const MCOperand &Op = MCI.getOperand(RD.OpIndex);
-      // Skip non-register operands.
-      if (!Op.isReg())
-        continue;
-      RegID = Op.getReg();
-    } else {
-      // Implicit read.
-      RegID = RD.RegisterID;
-    }
-
-    // Skip invalid register operands.
-    if (!RegID)
-      continue;
-
-    // Okay, this is a register operand. Create a ReadState for it.
-    assert(RegID > 0 && "Invalid register ID found!");
-    NewIS->getUses().emplace_back(RD, RegID);
-    ReadState &RS = NewIS->getUses().back();
-
-    if (IsDepBreaking) {
-      // A mask of all zeroes means: explicit input operands are not
-      // independent.
-      if (Mask.isNullValue()) {
-        if (!RD.isImplicitRead())
-          RS.setIndependentFromDef();
-      } else {
-        // Check if this register operand is independent according to `Mask`.
-        // Note that Mask may not have enough bits to describe all explicit and
-        // implicit input operands. If this register operand doesn't have a
-        // corresponding bit in Mask, then conservatively assume that it is
-        // dependent.
-        if (Mask.getBitWidth() > RD.UseIndex) {
-          // Okay. This map describe register use `RD.UseIndex`.
-          if (Mask[RD.UseIndex])
-            RS.setIndependentFromDef();
-        }
-      }
-    }
-  }
-
-  // Early exit if there are no writes.
-  if (D.Writes.empty())
-    return std::move(NewIS);
-
-  // Track register writes that implicitly clear the upper portion of the
-  // underlying super-registers using an APInt.
-  APInt WriteMask(D.Writes.size(), 0);
-
-  // Now query the MCInstrAnalysis object to obtain information about which
-  // register writes implicitly clear the upper portion of a super-register.
-  MCIA.clearsSuperRegisters(MRI, MCI, WriteMask);
-
-  // Initialize writes.
-  unsigned WriteIndex = 0;
-  for (const WriteDescriptor &WD : D.Writes) {
-    unsigned RegID = WD.isImplicitWrite() ? WD.RegisterID
-                                          : MCI.getOperand(WD.OpIndex).getReg();
-    // Check if this is a optional definition that references NoReg.
-    if (WD.IsOptionalDef && !RegID) {
-      ++WriteIndex;
-      continue;
-    }
-
-    assert(RegID && "Expected a valid register ID!");
-    NewIS->getDefs().emplace_back(WD, RegID,
-                                  /* ClearsSuperRegs */ WriteMask[WriteIndex],
-                                  /* WritesZero */ IsZeroIdiom);
-    ++WriteIndex;
-  }
-
-  return std::move(NewIS);
-}
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/Instruction.cpp b/tools/llvm-mca/lib/Instruction.cpp
deleted file mode 100644
index 47ba2f8..0000000
--- a/tools/llvm-mca/lib/Instruction.cpp
+++ /dev/null
@@ -1,205 +0,0 @@
-//===--------------------- Instruction.cpp ----------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines abstractions used by the Pipeline to model register reads,
-// register writes and instructions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Instruction.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace llvm {
-namespace mca {
-
-void ReadState::writeStartEvent(unsigned Cycles) {
-  assert(DependentWrites);
-  assert(CyclesLeft == UNKNOWN_CYCLES);
-
-  // This read may be dependent on more than one write. This typically occurs
-  // when a definition is the result of multiple writes where at least one
-  // write does a partial register update.
-  // The HW is forced to do some extra bookkeeping to track of all the
-  // dependent writes, and implement a merging scheme for the partial writes.
-  --DependentWrites;
-  TotalCycles = std::max(TotalCycles, Cycles);
-
-  if (!DependentWrites) {
-    CyclesLeft = TotalCycles;
-    IsReady = !CyclesLeft;
-  }
-}
-
-void WriteState::onInstructionIssued() {
-  assert(CyclesLeft == UNKNOWN_CYCLES);
-  // Update the number of cycles left based on the WriteDescriptor info.
-  CyclesLeft = getLatency();
-
-  // Now that the time left before write-back is known, notify
-  // all the users.
-  for (const std::pair<ReadState *, int> &User : Users) {
-    ReadState *RS = User.first;
-    unsigned ReadCycles = std::max(0, CyclesLeft - User.second);
-    RS->writeStartEvent(ReadCycles);
-  }
-
-  // Notify any writes that are in a false dependency with this write.
-  if (PartialWrite)
-    PartialWrite->writeStartEvent(CyclesLeft);
-}
-
-void WriteState::addUser(ReadState *User, int ReadAdvance) {
-  // If CyclesLeft is different than -1, then we don't need to
-  // update the list of users. We can just notify the user with
-  // the actual number of cycles left (which may be zero).
-  if (CyclesLeft != UNKNOWN_CYCLES) {
-    unsigned ReadCycles = std::max(0, CyclesLeft - ReadAdvance);
-    User->writeStartEvent(ReadCycles);
-    return;
-  }
-
-  if (llvm::find_if(Users, [&User](const std::pair<ReadState *, int> &Use) {
-        return Use.first == User;
-      }) == Users.end()) {
-    Users.emplace_back(User, ReadAdvance);
-  }
-}
-
-void WriteState::addUser(WriteState *User) {
-  if (CyclesLeft != UNKNOWN_CYCLES) {
-    User->writeStartEvent(std::max(0, CyclesLeft));
-    return;
-  }
-
-  assert(!PartialWrite && "PartialWrite already set!");
-  PartialWrite = User;
-  User->setDependentWrite(this);
-}
-
-void WriteState::cycleEvent() {
-  // Note: CyclesLeft can be a negative number. It is an error to
-  // make it an unsigned quantity because users of this write may
-  // specify a negative ReadAdvance.
-  if (CyclesLeft != UNKNOWN_CYCLES)
-    CyclesLeft--;
-
-  if (DependentWriteCyclesLeft)
-    DependentWriteCyclesLeft--;
-}
-
-void ReadState::cycleEvent() {
-  // Update the total number of cycles.
-  if (DependentWrites && TotalCycles) {
-    --TotalCycles;
-    return;
-  }
-
-  // Bail out immediately if we don't know how many cycles are left.
-  if (CyclesLeft == UNKNOWN_CYCLES)
-    return;
-
-  if (CyclesLeft) {
-    --CyclesLeft;
-    IsReady = !CyclesLeft;
-  }
-}
-
-#ifndef NDEBUG
-void WriteState::dump() const {
-  dbgs() << "{ OpIdx=" << WD->OpIndex << ", Lat=" << getLatency() << ", RegID "
-         << getRegisterID() << ", Cycles Left=" << getCyclesLeft() << " }";
-}
-
-void WriteRef::dump() const {
-  dbgs() << "IID=" << getSourceIndex() << ' ';
-  if (isValid())
-    getWriteState()->dump();
-  else
-    dbgs() << "(null)";
-}
-#endif
-
-void Instruction::dispatch(unsigned RCUToken) {
-  assert(Stage == IS_INVALID);
-  Stage = IS_AVAILABLE;
-  RCUTokenID = RCUToken;
-
-  // Check if input operands are already available.
-  update();
-}
-
-void Instruction::execute() {
-  assert(Stage == IS_READY);
-  Stage = IS_EXECUTING;
-
-  // Set the cycles left before the write-back stage.
-  CyclesLeft = getLatency();
-
-  for (WriteState &WS : getDefs())
-    WS.onInstructionIssued();
-
-  // Transition to the "executed" stage if this is a zero-latency instruction.
-  if (!CyclesLeft)
-    Stage = IS_EXECUTED;
-}
-
-void Instruction::forceExecuted() {
-  assert(Stage == IS_READY && "Invalid internal state!");
-  CyclesLeft = 0;
-  Stage = IS_EXECUTED;
-}
-
-void Instruction::update() {
-  assert(isDispatched() && "Unexpected instruction stage found!");
-
-  if (!all_of(getUses(), [](const ReadState &Use) { return Use.isReady(); }))
-    return;
-
-  // A partial register write cannot complete before a dependent write.
-  auto IsDefReady = [&](const WriteState &Def) {
-    if (!Def.getDependentWrite()) {
-      unsigned CyclesLeft = Def.getDependentWriteCyclesLeft();
-      return !CyclesLeft || CyclesLeft < getLatency();
-    }
-    return false;
-  };
-
-  if (all_of(getDefs(), IsDefReady))
-    Stage = IS_READY;
-}
-
-void Instruction::cycleEvent() {
-  if (isReady())
-    return;
-
-  if (isDispatched()) {
-    for (ReadState &Use : getUses())
-      Use.cycleEvent();
-
-    for (WriteState &Def : getDefs())
-      Def.cycleEvent();
-
-    update();
-    return;
-  }
-
-  assert(isExecuting() && "Instruction not in-flight?");
-  assert(CyclesLeft && "Instruction already executed?");
-  for (WriteState &Def : getDefs())
-    Def.cycleEvent();
-  CyclesLeft--;
-  if (!CyclesLeft)
-    Stage = IS_EXECUTED;
-}
-
-const unsigned WriteRef::INVALID_IID = std::numeric_limits<unsigned>::max();
-
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/Pipeline.cpp b/tools/llvm-mca/lib/Pipeline.cpp
deleted file mode 100644
index 0357124..0000000
--- a/tools/llvm-mca/lib/Pipeline.cpp
+++ /dev/null
@@ -1,97 +0,0 @@
-//===--------------------- Pipeline.cpp -------------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file implements an ordered container of stages that simulate the
-/// pipeline of a hardware backend.
-///
-//===----------------------------------------------------------------------===//
-
-#include "Pipeline.h"
-#include "HWEventListener.h"
-#include "llvm/Support/Debug.h"
-
-namespace llvm {
-namespace mca {
-
-#define DEBUG_TYPE "llvm-mca"
-
-void Pipeline::addEventListener(HWEventListener *Listener) {
-  if (Listener)
-    Listeners.insert(Listener);
-  for (auto &S : Stages)
-    S->addListener(Listener);
-}
-
-bool Pipeline::hasWorkToProcess() {
-  return any_of(Stages, [](const std::unique_ptr<Stage> &S) {
-    return S->hasWorkToComplete();
-  });
-}
-
-Expected<unsigned> Pipeline::run() {
-  assert(!Stages.empty() && "Unexpected empty pipeline found!");
-
-  do {
-    notifyCycleBegin();
-    if (Error Err = runCycle())
-      return std::move(Err);
-    notifyCycleEnd();
-    ++Cycles;
-  } while (hasWorkToProcess());
-
-  return Cycles;
-}
-
-Error Pipeline::runCycle() {
-  Error Err = ErrorSuccess();
-  // Update stages before we start processing new instructions.
-  for (auto I = Stages.rbegin(), E = Stages.rend(); I != E && !Err; ++I) {
-    const std::unique_ptr<Stage> &S = *I;
-    Err = S->cycleStart();
-  }
-
-  // Now fetch and execute new instructions.
-  InstRef IR;
-  Stage &FirstStage = *Stages[0];
-  while (!Err && FirstStage.isAvailable(IR))
-    Err = FirstStage.execute(IR);
-
-  // Update stages in preparation for a new cycle.
-  for (auto I = Stages.rbegin(), E = Stages.rend(); I != E && !Err; ++I) {
-    const std::unique_ptr<Stage> &S = *I;
-    Err = S->cycleEnd();
-  }
-
-  return Err;
-}
-
-void Pipeline::appendStage(std::unique_ptr<Stage> S) {
-  assert(S && "Invalid null stage in input!");
-  if (!Stages.empty()) {
-    Stage *Last = Stages.back().get();
-    Last->setNextInSequence(S.get());
-  }
-
-  Stages.push_back(std::move(S));
-}
-
-void Pipeline::notifyCycleBegin() {
-  LLVM_DEBUG(dbgs() << "[E] Cycle begin: " << Cycles << '\n');
-  for (HWEventListener *Listener : Listeners)
-    Listener->onCycleBegin();
-}
-
-void Pipeline::notifyCycleEnd() {
-  LLVM_DEBUG(dbgs() << "[E] Cycle end: " << Cycles << "\n\n");
-  for (HWEventListener *Listener : Listeners)
-    Listener->onCycleEnd();
-}
-} // namespace mca.
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/Stages/DispatchStage.cpp b/tools/llvm-mca/lib/Stages/DispatchStage.cpp
deleted file mode 100644
index 838dbad..0000000
--- a/tools/llvm-mca/lib/Stages/DispatchStage.cpp
+++ /dev/null
@@ -1,185 +0,0 @@
-//===--------------------- DispatchStage.cpp --------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file models the dispatch component of an instruction pipeline.
-///
-/// The DispatchStage is responsible for updating instruction dependencies
-/// and communicating to the simulated instruction scheduler that an instruction
-/// is ready to be scheduled for execution.
-///
-//===----------------------------------------------------------------------===//
-
-#include "Stages/DispatchStage.h"
-#include "HWEventListener.h"
-#include "HardwareUnits/Scheduler.h"
-#include "llvm/Support/Debug.h"
-
-#define DEBUG_TYPE "llvm-mca"
-
-namespace llvm {
-namespace mca {
-
-void DispatchStage::notifyInstructionDispatched(const InstRef &IR,
-                                                ArrayRef<unsigned> UsedRegs,
-                                                unsigned UOps) const {
-  LLVM_DEBUG(dbgs() << "[E] Instruction Dispatched: #" << IR << '\n');
-  notifyEvent<HWInstructionEvent>(
-      HWInstructionDispatchedEvent(IR, UsedRegs, UOps));
-}
-
-bool DispatchStage::checkPRF(const InstRef &IR) const {
-  SmallVector<unsigned, 4> RegDefs;
-  for (const WriteState &RegDef : IR.getInstruction()->getDefs())
-    RegDefs.emplace_back(RegDef.getRegisterID());
-
-  const unsigned RegisterMask = PRF.isAvailable(RegDefs);
-  // A mask with all zeroes means: register files are available.
-  if (RegisterMask) {
-    notifyEvent<HWStallEvent>(
-        HWStallEvent(HWStallEvent::RegisterFileStall, IR));
-    return false;
-  }
-
-  return true;
-}
-
-bool DispatchStage::checkRCU(const InstRef &IR) const {
-  const unsigned NumMicroOps = IR.getInstruction()->getDesc().NumMicroOps;
-  if (RCU.isAvailable(NumMicroOps))
-    return true;
-  notifyEvent<HWStallEvent>(
-      HWStallEvent(HWStallEvent::RetireControlUnitStall, IR));
-  return false;
-}
-
-bool DispatchStage::canDispatch(const InstRef &IR) const {
-  return checkRCU(IR) && checkPRF(IR) && checkNextStage(IR);
-}
-
-void DispatchStage::updateRAWDependencies(ReadState &RS,
-                                          const MCSubtargetInfo &STI) {
-  SmallVector<WriteRef, 4> DependentWrites;
-
-  // Collect all the dependent writes, and update RS internal state.
-  PRF.addRegisterRead(RS, DependentWrites);
-
-  // We know that this read depends on all the writes in DependentWrites.
-  // For each write, check if we have ReadAdvance information, and use it
-  // to figure out in how many cycles this read becomes available.
-  const ReadDescriptor &RD = RS.getDescriptor();
-  const MCSchedModel &SM = STI.getSchedModel();
-  const MCSchedClassDesc *SC = SM.getSchedClassDesc(RD.SchedClassID);
-  for (WriteRef &WR : DependentWrites) {
-    WriteState &WS = *WR.getWriteState();
-    unsigned WriteResID = WS.getWriteResourceID();
-    int ReadAdvance = STI.getReadAdvanceCycles(SC, RD.UseIndex, WriteResID);
-    WS.addUser(&RS, ReadAdvance);
-  }
-}
-
-Error DispatchStage::dispatch(InstRef IR) {
-  assert(!CarryOver && "Cannot dispatch another instruction!");
-  Instruction &IS = *IR.getInstruction();
-  const InstrDesc &Desc = IS.getDesc();
-  const unsigned NumMicroOps = Desc.NumMicroOps;
-  if (NumMicroOps > DispatchWidth) {
-    assert(AvailableEntries == DispatchWidth);
-    AvailableEntries = 0;
-    CarryOver = NumMicroOps - DispatchWidth;
-    CarriedOver = IR;
-  } else {
-    assert(AvailableEntries >= NumMicroOps);
-    AvailableEntries -= NumMicroOps;
-  }
-
-  // Check if this is an optimizable reg-reg move.
-  bool IsEliminated = false;
-  if (IS.isOptimizableMove()) {
-    assert(IS.getDefs().size() == 1 && "Expected a single input!");
-    assert(IS.getUses().size() == 1 && "Expected a single output!");
-    IsEliminated = PRF.tryEliminateMove(IS.getDefs()[0], IS.getUses()[0]);
-  }
-
-  // A dependency-breaking instruction doesn't have to wait on the register
-  // input operands, and it is often optimized at register renaming stage.
-  // Update RAW dependencies if this instruction is not a dependency-breaking
-  // instruction. A dependency-breaking instruction is a zero-latency
-  // instruction that doesn't consume hardware resources.
-  // An example of dependency-breaking instruction on X86 is a zero-idiom XOR.
-  //
-  // We also don't update data dependencies for instructions that have been
-  // eliminated at register renaming stage.
-  if (!IsEliminated) {
-    for (ReadState &RS : IS.getUses())
-      updateRAWDependencies(RS, STI);
-  }
-
-  // By default, a dependency-breaking zero-idiom is expected to be optimized
-  // at register renaming stage. That means, no physical register is allocated
-  // to the instruction.
-  SmallVector<unsigned, 4> RegisterFiles(PRF.getNumRegisterFiles());
-  for (WriteState &WS : IS.getDefs())
-    PRF.addRegisterWrite(WriteRef(IR.getSourceIndex(), &WS), RegisterFiles);
-
-  // Reserve slots in the RCU, and notify the instruction that it has been
-  // dispatched to the schedulers for execution.
-  IS.dispatch(RCU.reserveSlot(IR, NumMicroOps));
-
-  // Notify listeners of the "instruction dispatched" event,
-  // and move IR to the next stage.
-  notifyInstructionDispatched(IR, RegisterFiles,
-                              std::min(DispatchWidth, NumMicroOps));
-  return moveToTheNextStage(IR);
-}
-
-Error DispatchStage::cycleStart() {
-  PRF.cycleStart();
-
-  if (!CarryOver) {
-    AvailableEntries = DispatchWidth;
-    return ErrorSuccess();
-  }
-
-  AvailableEntries = CarryOver >= DispatchWidth ? 0 : DispatchWidth - CarryOver;
-  unsigned DispatchedOpcodes = DispatchWidth - AvailableEntries;
-  CarryOver -= DispatchedOpcodes;
-  assert(CarriedOver && "Invalid dispatched instruction");
-
-  SmallVector<unsigned, 8> RegisterFiles(PRF.getNumRegisterFiles(), 0U);
-  notifyInstructionDispatched(CarriedOver, RegisterFiles, DispatchedOpcodes);
-  if (!CarryOver)
-    CarriedOver = InstRef();
-  return ErrorSuccess();
-}
-
-bool DispatchStage::isAvailable(const InstRef &IR) const {
-  const InstrDesc &Desc = IR.getInstruction()->getDesc();
-  unsigned Required = std::min(Desc.NumMicroOps, DispatchWidth);
-  if (Required > AvailableEntries)
-    return false;
-  // The dispatch logic doesn't internally buffer instructions.  It only accepts
-  // instructions that can be successfully moved to the next stage during this
-  // same cycle.
-  return canDispatch(IR);
-}
-
-Error DispatchStage::execute(InstRef &IR) {
-  assert(canDispatch(IR) && "Cannot dispatch another instruction!");
-  return dispatch(IR);
-}
-
-#ifndef NDEBUG
-void DispatchStage::dump() const {
-  PRF.dump();
-  RCU.dump();
-}
-#endif
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/Stages/EntryStage.cpp b/tools/llvm-mca/lib/Stages/EntryStage.cpp
deleted file mode 100644
index f552132..0000000
--- a/tools/llvm-mca/lib/Stages/EntryStage.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-//===---------------------- EntryStage.cpp ----------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines the Fetch stage of an instruction pipeline.  Its sole
-/// purpose in life is to produce instructions for the rest of the pipeline.
-///
-//===----------------------------------------------------------------------===//
-
-#include "Stages/EntryStage.h"
-#include "Instruction.h"
-
-namespace llvm {
-namespace mca {
-
-bool EntryStage::hasWorkToComplete() const { return CurrentInstruction; }
-
-bool EntryStage::isAvailable(const InstRef & /* unused */) const {
-  if (CurrentInstruction)
-    return checkNextStage(CurrentInstruction);
-  return false;
-}
-
-void EntryStage::getNextInstruction() {
-  assert(!CurrentInstruction && "There is already an instruction to process!");
-  if (!SM.hasNext())
-    return;
-  SourceRef SR = SM.peekNext();
-  std::unique_ptr<Instruction> Inst = llvm::make_unique<Instruction>(SR.second);
-  CurrentInstruction = InstRef(SR.first, Inst.get());
-  Instructions.emplace_back(std::move(Inst));
-  SM.updateNext();
-}
-
-llvm::Error EntryStage::execute(InstRef & /*unused */) {
-  assert(CurrentInstruction && "There is no instruction to process!");
-  if (llvm::Error Val = moveToTheNextStage(CurrentInstruction))
-    return Val;
-
-  // Move the program counter.
-  CurrentInstruction.invalidate();
-  getNextInstruction();
-  return llvm::ErrorSuccess();
-}
-
-llvm::Error EntryStage::cycleStart() {
-  if (!CurrentInstruction)
-    getNextInstruction();
-  return llvm::ErrorSuccess();
-}
-
-llvm::Error EntryStage::cycleEnd() {
-  // Find the first instruction which hasn't been retired.
-  auto Range = make_range(&Instructions[NumRetired], Instructions.end());
-  auto It = find_if(Range, [](const std::unique_ptr<Instruction> &I) {
-    return !I->isRetired();
-  });
-
-  NumRetired = std::distance(Instructions.begin(), It);
-  // Erase instructions up to the first that hasn't been retired.
-  if ((NumRetired * 2) >= Instructions.size()) {
-    Instructions.erase(Instructions.begin(), It);
-    NumRetired = 0;
-  }
-
-  return llvm::ErrorSuccess();
-}
-
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/Stages/ExecuteStage.cpp b/tools/llvm-mca/lib/Stages/ExecuteStage.cpp
deleted file mode 100644
index 298f08a..0000000
--- a/tools/llvm-mca/lib/Stages/ExecuteStage.cpp
+++ /dev/null
@@ -1,219 +0,0 @@
-//===---------------------- ExecuteStage.cpp --------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines the execution stage of an instruction pipeline.
-///
-/// The ExecuteStage is responsible for managing the hardware scheduler
-/// and issuing notifications that an instruction has been executed.
-///
-//===----------------------------------------------------------------------===//
-
-#include "Stages/ExecuteStage.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/Debug.h"
-
-#define DEBUG_TYPE "llvm-mca"
-
-namespace llvm {
-namespace mca {
-
-HWStallEvent::GenericEventType toHWStallEventType(Scheduler::Status Status) {
-  switch (Status) {
-  case Scheduler::SC_LOAD_QUEUE_FULL:
-    return HWStallEvent::LoadQueueFull;
-  case Scheduler::SC_STORE_QUEUE_FULL:
-    return HWStallEvent::StoreQueueFull;
-  case Scheduler::SC_BUFFERS_FULL:
-    return HWStallEvent::SchedulerQueueFull;
-  case Scheduler::SC_DISPATCH_GROUP_STALL:
-    return HWStallEvent::DispatchGroupStall;
-  case Scheduler::SC_AVAILABLE:
-    return HWStallEvent::Invalid;
-  }
-
-  llvm_unreachable("Don't know how to process this StallKind!");
-}
-
-bool ExecuteStage::isAvailable(const InstRef &IR) const {
-  if (Scheduler::Status S = HWS.isAvailable(IR)) {
-    HWStallEvent::GenericEventType ET = toHWStallEventType(S);
-    notifyEvent<HWStallEvent>(HWStallEvent(ET, IR));
-    return false;
-  }
-
-  return true;
-}
-
-Error ExecuteStage::issueInstruction(InstRef &IR) {
-  SmallVector<std::pair<ResourceRef, ResourceCycles>, 4> Used;
-  SmallVector<InstRef, 4> Ready;
-  HWS.issueInstruction(IR, Used, Ready);
-
-  notifyReservedOrReleasedBuffers(IR, /* Reserved */ false);
-  notifyInstructionIssued(IR, Used);
-  if (IR.getInstruction()->isExecuted()) {
-    notifyInstructionExecuted(IR);
-    // FIXME: add a buffer of executed instructions.
-    if (Error S = moveToTheNextStage(IR))
-      return S;
-  }
-
-  for (const InstRef &I : Ready)
-    notifyInstructionReady(I);
-  return ErrorSuccess();
-}
-
-Error ExecuteStage::issueReadyInstructions() {
-  InstRef IR = HWS.select();
-  while (IR) {
-    if (Error Err = issueInstruction(IR))
-      return Err;
-
-    // Select the next instruction to issue.
-    IR = HWS.select();
-  }
-
-  return ErrorSuccess();
-}
-
-Error ExecuteStage::cycleStart() {
-  SmallVector<ResourceRef, 8> Freed;
-  SmallVector<InstRef, 4> Executed;
-  SmallVector<InstRef, 4> Ready;
-
-  HWS.cycleEvent(Freed, Executed, Ready);
-
-  for (const ResourceRef &RR : Freed)
-    notifyResourceAvailable(RR);
-
-  for (InstRef &IR : Executed) {
-    notifyInstructionExecuted(IR);
-    // FIXME: add a buffer of executed instructions.
-    if (Error S = moveToTheNextStage(IR))
-      return S;
-  }
-
-  for (const InstRef &IR : Ready)
-    notifyInstructionReady(IR);
-
-  return issueReadyInstructions();
-}
-
-#ifndef NDEBUG
-static void verifyInstructionEliminated(const InstRef &IR) {
-  const Instruction &Inst = *IR.getInstruction();
-  assert(Inst.isEliminated() && "Instruction was not eliminated!");
-  assert(Inst.isReady() && "Instruction in an inconsistent state!");
-
-  // Ensure that instructions eliminated at register renaming stage are in a
-  // consistent state.
-  const InstrDesc &Desc = Inst.getDesc();
-  assert(!Desc.MayLoad && !Desc.MayStore && "Cannot eliminate a memory op!");
-}
-#endif
-
-Error ExecuteStage::handleInstructionEliminated(InstRef &IR) {
-#ifndef NDEBUG
-  verifyInstructionEliminated(IR);
-#endif
-  notifyInstructionReady(IR);
-  notifyInstructionIssued(IR, {});
-  IR.getInstruction()->forceExecuted();
-  notifyInstructionExecuted(IR);
-  return moveToTheNextStage(IR);
-}
-
-// Schedule the instruction for execution on the hardware.
-Error ExecuteStage::execute(InstRef &IR) {
-  assert(isAvailable(IR) && "Scheduler is not available!");
-
-#ifndef NDEBUG
-  // Ensure that the HWS has not stored this instruction in its queues.
-  HWS.sanityCheck(IR);
-#endif
-
-  if (IR.getInstruction()->isEliminated())
-    return handleInstructionEliminated(IR);
-
-  // Reserve a slot in each buffered resource. Also, mark units with
-  // BufferSize=0 as reserved. Resources with a buffer size of zero will only
-  // be released after MCIS is issued, and all the ResourceCycles for those
-  // units have been consumed.
-  HWS.dispatch(IR);
-  notifyReservedOrReleasedBuffers(IR, /* Reserved */ true);
-  if (!HWS.isReady(IR))
-    return ErrorSuccess();
-
-  // If we did not return early, then the scheduler is ready for execution.
-  notifyInstructionReady(IR);
-
-  // If we cannot issue immediately, the HWS will add IR to its ready queue for
-  // execution later, so we must return early here.
-  if (!HWS.mustIssueImmediately(IR))
-    return ErrorSuccess();
-
-  // Issue IR to the underlying pipelines.
-  return issueInstruction(IR);
-}
-
-void ExecuteStage::notifyInstructionExecuted(const InstRef &IR) const {
-  LLVM_DEBUG(dbgs() << "[E] Instruction Executed: #" << IR << '\n');
-  notifyEvent<HWInstructionEvent>(
-      HWInstructionEvent(HWInstructionEvent::Executed, IR));
-}
-
-void ExecuteStage::notifyInstructionReady(const InstRef &IR) const {
-  LLVM_DEBUG(dbgs() << "[E] Instruction Ready: #" << IR << '\n');
-  notifyEvent<HWInstructionEvent>(
-      HWInstructionEvent(HWInstructionEvent::Ready, IR));
-}
-
-void ExecuteStage::notifyResourceAvailable(const ResourceRef &RR) const {
-  LLVM_DEBUG(dbgs() << "[E] Resource Available: [" << RR.first << '.'
-                    << RR.second << "]\n");
-  for (HWEventListener *Listener : getListeners())
-    Listener->onResourceAvailable(RR);
-}
-
-void ExecuteStage::notifyInstructionIssued(
-    const InstRef &IR,
-    ArrayRef<std::pair<ResourceRef, ResourceCycles>> Used) const {
-  LLVM_DEBUG({
-    dbgs() << "[E] Instruction Issued: #" << IR << '\n';
-    for (const std::pair<ResourceRef, ResourceCycles> &Resource : Used) {
-      dbgs() << "[E] Resource Used: [" << Resource.first.first << '.'
-             << Resource.first.second << "], ";
-      dbgs() << "cycles: " << Resource.second << '\n';
-    }
-  });
-  notifyEvent<HWInstructionEvent>(HWInstructionIssuedEvent(IR, Used));
-}
-
-void ExecuteStage::notifyReservedOrReleasedBuffers(const InstRef &IR,
-                                                   bool Reserved) const {
-  const InstrDesc &Desc = IR.getInstruction()->getDesc();
-  if (Desc.Buffers.empty())
-    return;
-
-  SmallVector<unsigned, 4> BufferIDs(Desc.Buffers.begin(), Desc.Buffers.end());
-  std::transform(Desc.Buffers.begin(), Desc.Buffers.end(), BufferIDs.begin(),
-                 [&](uint64_t Op) { return HWS.getResourceID(Op); });
-  if (Reserved) {
-    for (HWEventListener *Listener : getListeners())
-      Listener->onReservedBuffers(IR, BufferIDs);
-    return;
-  }
-
-  for (HWEventListener *Listener : getListeners())
-    Listener->onReleasedBuffers(IR, BufferIDs);
-}
-
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/Stages/InstructionTables.cpp b/tools/llvm-mca/lib/Stages/InstructionTables.cpp
deleted file mode 100644
index 33c30e7..0000000
--- a/tools/llvm-mca/lib/Stages/InstructionTables.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-//===--------------------- InstructionTables.cpp ----------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file implements the method InstructionTables::execute().
-/// Method execute() prints a theoretical resource pressure distribution based
-/// on the information available in the scheduling model, and without running
-/// the pipeline.
-///
-//===----------------------------------------------------------------------===//
-
-#include "Stages/InstructionTables.h"
-
-namespace llvm {
-namespace mca {
-
-Error InstructionTables::execute(InstRef &IR) {
-  const InstrDesc &Desc = IR.getInstruction()->getDesc();
-  UsedResources.clear();
-
-  // Identify the resources consumed by this instruction.
-  for (const std::pair<uint64_t, ResourceUsage> Resource : Desc.Resources) {
-    // Skip zero-cycle resources (i.e., unused resources).
-    if (!Resource.second.size())
-      continue;
-    unsigned Cycles = Resource.second.size();
-    unsigned Index = std::distance(
-        Masks.begin(), std::find(Masks.begin(), Masks.end(), Resource.first));
-    const MCProcResourceDesc &ProcResource = *SM.getProcResource(Index);
-    unsigned NumUnits = ProcResource.NumUnits;
-    if (!ProcResource.SubUnitsIdxBegin) {
-      // The number of cycles consumed by each unit.
-      for (unsigned I = 0, E = NumUnits; I < E; ++I) {
-        ResourceRef ResourceUnit = std::make_pair(Index, 1U << I);
-        UsedResources.emplace_back(
-            std::make_pair(ResourceUnit, ResourceCycles(Cycles, NumUnits)));
-      }
-      continue;
-    }
-
-    // This is a group. Obtain the set of resources contained in this
-    // group. Some of these resources may implement multiple units.
-    // Uniformly distribute Cycles across all of the units.
-    for (unsigned I1 = 0; I1 < NumUnits; ++I1) {
-      unsigned SubUnitIdx = ProcResource.SubUnitsIdxBegin[I1];
-      const MCProcResourceDesc &SubUnit = *SM.getProcResource(SubUnitIdx);
-      // Compute the number of cycles consumed by each resource unit.
-      for (unsigned I2 = 0, E2 = SubUnit.NumUnits; I2 < E2; ++I2) {
-        ResourceRef ResourceUnit = std::make_pair(SubUnitIdx, 1U << I2);
-        UsedResources.emplace_back(std::make_pair(
-            ResourceUnit, ResourceCycles(Cycles, NumUnits * SubUnit.NumUnits)));
-      }
-    }
-  }
-
-  // Send a fake instruction issued event to all the views.
-  HWInstructionIssuedEvent Event(IR, UsedResources);
-  notifyEvent<HWInstructionIssuedEvent>(Event);
-  return ErrorSuccess();
-}
-
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/Stages/RetireStage.cpp b/tools/llvm-mca/lib/Stages/RetireStage.cpp
deleted file mode 100644
index 47eed5f..0000000
--- a/tools/llvm-mca/lib/Stages/RetireStage.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-//===---------------------- RetireStage.cpp ---------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines the retire stage of an instruction pipeline.
-/// The RetireStage represents the process logic that interacts with the
-/// simulated RetireControlUnit hardware.
-///
-//===----------------------------------------------------------------------===//
-
-#include "Stages/RetireStage.h"
-#include "HWEventListener.h"
-#include "llvm/Support/Debug.h"
-
-#define DEBUG_TYPE "llvm-mca"
-
-namespace llvm {
-namespace mca {
-
-llvm::Error RetireStage::cycleStart() {
-  if (RCU.isEmpty())
-    return llvm::ErrorSuccess();
-
-  const unsigned MaxRetirePerCycle = RCU.getMaxRetirePerCycle();
-  unsigned NumRetired = 0;
-  while (!RCU.isEmpty()) {
-    if (MaxRetirePerCycle != 0 && NumRetired == MaxRetirePerCycle)
-      break;
-    const RetireControlUnit::RUToken &Current = RCU.peekCurrentToken();
-    if (!Current.Executed)
-      break;
-    RCU.consumeCurrentToken();
-    notifyInstructionRetired(Current.IR);
-    NumRetired++;
-  }
-
-  return llvm::ErrorSuccess();
-}
-
-llvm::Error RetireStage::execute(InstRef &IR) {
-  RCU.onInstructionExecuted(IR.getInstruction()->getRCUTokenID());
-  return llvm::ErrorSuccess();
-}
-
-void RetireStage::notifyInstructionRetired(const InstRef &IR) const {
-  LLVM_DEBUG(llvm::dbgs() << "[E] Instruction Retired: #" << IR << '\n');
-  llvm::SmallVector<unsigned, 4> FreedRegs(PRF.getNumRegisterFiles());
-  const Instruction &Inst = *IR.getInstruction();
-
-  for (const WriteState &WS : Inst.getDefs())
-    PRF.removeRegisterWrite(WS, FreedRegs);
-  notifyEvent<HWInstructionEvent>(HWInstructionRetiredEvent(IR, FreedRegs));
-}
-
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/Stages/Stage.cpp b/tools/llvm-mca/lib/Stages/Stage.cpp
deleted file mode 100644
index c3cfe47..0000000
--- a/tools/llvm-mca/lib/Stages/Stage.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-//===---------------------- Stage.cpp ---------------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file defines a stage.
-/// A chain of stages compose an instruction pipeline.
-///
-//===----------------------------------------------------------------------===//
-
-#include "Stages/Stage.h"
-
-namespace llvm {
-namespace mca {
-
-// Pin the vtable here in the implementation file.
-Stage::~Stage() = default;
-
-void Stage::addListener(HWEventListener *Listener) {
-  Listeners.insert(Listener);
-}
-
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/lib/Support.cpp b/tools/llvm-mca/lib/Support.cpp
deleted file mode 100644
index a6ff26d..0000000
--- a/tools/llvm-mca/lib/Support.cpp
+++ /dev/null
@@ -1,79 +0,0 @@
-//===--------------------- Support.cpp --------------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// This file implements a few helper functions used by various pipeline
-/// components.
-///
-//===----------------------------------------------------------------------===//
-
-#include "Support.h"
-#include "llvm/MC/MCSchedule.h"
-
-namespace llvm {
-namespace mca {
-
-void computeProcResourceMasks(const MCSchedModel &SM,
-                              SmallVectorImpl<uint64_t> &Masks) {
-  unsigned ProcResourceID = 0;
-
-  // Create a unique bitmask for every processor resource unit.
-  // Skip resource at index 0, since it always references 'InvalidUnit'.
-  Masks.resize(SM.getNumProcResourceKinds());
-  for (unsigned I = 1, E = SM.getNumProcResourceKinds(); I < E; ++I) {
-    const MCProcResourceDesc &Desc = *SM.getProcResource(I);
-    if (Desc.SubUnitsIdxBegin)
-      continue;
-    Masks[I] = 1ULL << ProcResourceID;
-    ProcResourceID++;
-  }
-
-  // Create a unique bitmask for every processor resource group.
-  for (unsigned I = 1, E = SM.getNumProcResourceKinds(); I < E; ++I) {
-    const MCProcResourceDesc &Desc = *SM.getProcResource(I);
-    if (!Desc.SubUnitsIdxBegin)
-      continue;
-    Masks[I] = 1ULL << ProcResourceID;
-    for (unsigned U = 0; U < Desc.NumUnits; ++U) {
-      uint64_t OtherMask = Masks[Desc.SubUnitsIdxBegin[U]];
-      Masks[I] |= OtherMask;
-    }
-    ProcResourceID++;
-  }
-}
-
-double computeBlockRThroughput(const MCSchedModel &SM, unsigned DispatchWidth,
-                               unsigned NumMicroOps,
-                               ArrayRef<unsigned> ProcResourceUsage) {
-  // The block throughput is bounded from above by the hardware dispatch
-  // throughput. That is because the DispatchWidth is an upper bound on the
-  // number of opcodes that can be part of a single dispatch group.
-  double Max = static_cast<double>(NumMicroOps) / DispatchWidth;
-
-  // The block throughput is also limited by the amount of hardware parallelism.
-  // The number of available resource units affects the resource pressure
-  // distribution, as well as how many blocks can be executed every cycle.
-  for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
-    unsigned ResourceCycles = ProcResourceUsage[I];
-    if (!ResourceCycles)
-      continue;
-
-    const MCProcResourceDesc &MCDesc = *SM.getProcResource(I);
-    double Throughput = static_cast<double>(ResourceCycles) / MCDesc.NumUnits;
-    Max = std::max(Max, Throughput);
-  }
-
-  // The block reciprocal throughput is computed as the MAX of:
-  //  - (NumMicroOps / DispatchWidth)
-  //  - (NumUnits / ResourceCycles)   for every consumed processor resource.
-  return Max;
-}
-
-} // namespace mca
-} // namespace llvm
diff --git a/tools/llvm-mca/llvm-mca.cpp b/tools/llvm-mca/llvm-mca.cpp
index a5edbce..68d63db 100644
--- a/tools/llvm-mca/llvm-mca.cpp
+++ b/tools/llvm-mca/llvm-mca.cpp
@@ -24,8 +24,6 @@
 #include "CodeRegion.h"
 #include "CodeRegionGenerator.h"
 #include "PipelinePrinter.h"
-#include "Stages/EntryStage.h"
-#include "Stages/InstructionTables.h"
 #include "Views/DispatchStatistics.h"
 #include "Views/InstructionInfoView.h"
 #include "Views/RegisterFileStatistics.h"
@@ -34,13 +32,15 @@
 #include "Views/SchedulerStatistics.h"
 #include "Views/SummaryView.h"
 #include "Views/TimelineView.h"
-#include "include/Context.h"
-#include "include/Pipeline.h"
-#include "include/Support.h"
 #include "llvm/MC/MCAsmInfo.h"
 #include "llvm/MC/MCContext.h"
 #include "llvm/MC/MCObjectFileInfo.h"
 #include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MCA/Context.h"
+#include "llvm/MCA/Pipeline.h"
+#include "llvm/MCA/Stages/EntryStage.h"
+#include "llvm/MCA/Stages/InstructionTables.h"
+#include "llvm/MCA/Support.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/ErrorOr.h"
@@ -149,15 +149,13 @@
                   cl::desc("If set, assume that loads and stores do not alias"),
                   cl::cat(ToolOptions), cl::init(true));
 
-static cl::opt<unsigned>
-    LoadQueueSize("lqueue",
-                  cl::desc("Size of the load queue"),
-                  cl::cat(ToolOptions), cl::init(0));
+static cl::opt<unsigned> LoadQueueSize("lqueue",
+                                       cl::desc("Size of the load queue"),
+                                       cl::cat(ToolOptions), cl::init(0));
 
-static cl::opt<unsigned>
-    StoreQueueSize("squeue",
-                   cl::desc("Size of the store queue"),
-                   cl::cat(ToolOptions), cl::init(0));
+static cl::opt<unsigned> StoreQueueSize("squeue",
+                                        cl::desc("Size of the store queue"),
+                                        cl::cat(ToolOptions), cl::init(0));
 
 static cl::opt<bool>
     PrintInstructionTables("instruction-tables",
@@ -339,8 +337,14 @@
   // Parse the input and create CodeRegions that llvm-mca can analyze.
   mca::AsmCodeRegionGenerator CRG(*TheTarget, SrcMgr, Ctx, *MAI, *STI, *MCII);
   Expected<const mca::CodeRegions &> RegionsOrErr = CRG.parseCodeRegions();
-  if (auto Err = RegionsOrErr.takeError()) {
-    WithColor::error() << Err << "\n";
+  if (!RegionsOrErr) {
+    if (auto Err =
+            handleErrors(RegionsOrErr.takeError(), [](const StringError &E) {
+              WithColor::error() << E.getMessage() << '\n';
+            })) {
+      // Default case.
+      WithColor::error() << toString(std::move(Err)) << '\n';
+    }
     return 1;
   }
   const mca::CodeRegions &Regions = *RegionsOrErr;
@@ -378,7 +382,7 @@
     Width = DispatchWidth;
 
   // Create an instruction builder.
-  mca::InstrBuilder IB(*STI, *MCII, *MRI, *MCIA);
+  mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get());
 
   // Create a context to control ownership of the pipeline hardware.
   mca::Context MCA(*MRI, *STI);
diff --git a/tools/llvm-microsoft-demangle-fuzzer/CMakeLists.txt b/tools/llvm-microsoft-demangle-fuzzer/CMakeLists.txt
new file mode 100644
index 0000000..d3db43a
--- /dev/null
+++ b/tools/llvm-microsoft-demangle-fuzzer/CMakeLists.txt
@@ -0,0 +1,10 @@
+set(LLVM_LINK_COMPONENTS
+  Demangle
+  FuzzMutate
+  Support
+)
+
+add_llvm_fuzzer(llvm-microsoft-demangle-fuzzer
+  llvm-microsoft-demangle-fuzzer.cpp
+  DUMMY_MAIN DummyDemanglerFuzzer.cpp
+  )
diff --git a/tools/llvm-demangle-fuzzer/DummyDemanglerFuzzer.cpp b/tools/llvm-microsoft-demangle-fuzzer/DummyDemanglerFuzzer.cpp
similarity index 100%
copy from tools/llvm-demangle-fuzzer/DummyDemanglerFuzzer.cpp
copy to tools/llvm-microsoft-demangle-fuzzer/DummyDemanglerFuzzer.cpp
diff --git a/tools/llvm-microsoft-demangle-fuzzer/llvm-microsoft-demangle-fuzzer.cpp b/tools/llvm-microsoft-demangle-fuzzer/llvm-microsoft-demangle-fuzzer.cpp
new file mode 100644
index 0000000..4c1a413
--- /dev/null
+++ b/tools/llvm-microsoft-demangle-fuzzer/llvm-microsoft-demangle-fuzzer.cpp
@@ -0,0 +1,21 @@
+//===--- llvm-demangle-fuzzer.cpp - Fuzzer for the Itanium Demangler ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Demangle/Demangle.h"
+
+#include <cstdint>
+#include <cstdlib>
+#include <string>
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
+  std::string NullTerminatedString((const char *)Data, Size);
+  free(llvm::microsoftDemangle(NullTerminatedString.c_str(), nullptr, nullptr,
+                               nullptr));
+  return 0;
+}
diff --git a/tools/llvm-nm/llvm-nm.cpp b/tools/llvm-nm/llvm-nm.cpp
index 88217a5..042e284 100644
--- a/tools/llvm-nm/llvm-nm.cpp
+++ b/tools/llvm-nm/llvm-nm.cpp
@@ -91,6 +91,8 @@
                         cl::Grouping);
 cl::opt<bool> POSIXFormat("P", cl::desc("Alias for --format=posix"),
                           cl::Grouping);
+cl::alias Portability("portability", cl::desc("Alias for --format=posix"),
+                      cl::aliasopt(POSIXFormat), cl::NotHidden);
 cl::opt<bool> DarwinFormat("m", cl::desc("Alias for --format=darwin"),
                            cl::Grouping);
 
@@ -677,7 +679,7 @@
 }
 
 static Optional<std::string> demangle(StringRef Name, bool StripUnderscore) {
-  if (StripUnderscore && Name.size() > 0 && Name[0] == '_')
+  if (StripUnderscore && !Name.empty() && Name[0] == '_')
     Name = Name.substr(1);
 
   if (!Name.startswith("_Z"))
@@ -1163,7 +1165,7 @@
   // file get the section number for that section in this object file.
   unsigned int Nsect = 0;
   MachOObjectFile *MachO = dyn_cast<MachOObjectFile>(&Obj);
-  if (SegSect.size() != 0 && MachO) {
+  if (!SegSect.empty() && MachO) {
     Nsect = getNsectForSegSect(MachO);
     // If this section is not in the object file no symbols are printed.
     if (Nsect == 0)
@@ -1800,7 +1802,7 @@
   }
   if (MachOUniversalBinary *UB = dyn_cast<MachOUniversalBinary>(&Bin)) {
     // If we have a list of architecture flags specified dump only those.
-    if (!ArchAll && ArchFlags.size() != 0) {
+    if (!ArchAll && !ArchFlags.empty()) {
       // Look for a slice in the universal binary that matches each ArchFlag.
       bool ArchFound;
       for (unsigned i = 0; i < ArchFlags.size(); ++i) {
@@ -2071,7 +2073,7 @@
     }
   }
 
-  if (SegSect.size() != 0 && SegSect.size() != 2)
+  if (!SegSect.empty() && SegSect.size() != 2)
     error("bad number of arguments (must be two arguments)",
           "for the -s option");
 
diff --git a/tools/llvm-objcopy/CMakeLists.txt b/tools/llvm-objcopy/CMakeLists.txt
index afbf787..1beb737 100644
--- a/tools/llvm-objcopy/CMakeLists.txt
+++ b/tools/llvm-objcopy/CMakeLists.txt
@@ -17,6 +17,10 @@
   Buffer.cpp
   CopyConfig.cpp
   llvm-objcopy.cpp
+  COFF/COFFObjcopy.cpp
+  COFF/Object.cpp
+  COFF/Reader.cpp
+  COFF/Writer.cpp
   ELF/ELFObjcopy.cpp
   ELF/Object.cpp
   DEPENDS
diff --git a/tools/llvm-objcopy/COFF/COFFObjcopy.cpp b/tools/llvm-objcopy/COFF/COFFObjcopy.cpp
new file mode 100644
index 0000000..6b386d2
--- /dev/null
+++ b/tools/llvm-objcopy/COFF/COFFObjcopy.cpp
@@ -0,0 +1,98 @@
+//===- COFFObjcopy.cpp ----------------------------------------------------===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "COFFObjcopy.h"
+#include "Buffer.h"
+#include "CopyConfig.h"
+#include "Object.h"
+#include "Reader.h"
+#include "Writer.h"
+#include "llvm-objcopy.h"
+
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Support/Errc.h"
+#include <cassert>
+
+namespace llvm {
+namespace objcopy {
+namespace coff {
+
+using namespace object;
+using namespace COFF;
+
+static Error handleArgs(const CopyConfig &Config, Object &Obj) {
+  // StripAll removes all symbols and thus also removes all relocations.
+  if (Config.StripAll || Config.StripAllGNU)
+    for (Section &Sec : Obj.Sections)
+      Sec.Relocs.clear();
+
+  // If we need to do per-symbol removals, initialize the Referenced field.
+  if (Config.StripUnneeded || Config.DiscardAll ||
+      !Config.SymbolsToRemove.empty())
+    if (Error E = Obj.markSymbols())
+      return E;
+
+  // Actually do removals of symbols.
+  Obj.removeSymbols([&](const Symbol &Sym) {
+    // For StripAll, all relocations have been stripped and we remove all
+    // symbols.
+    if (Config.StripAll || Config.StripAllGNU)
+      return true;
+
+    if (is_contained(Config.SymbolsToRemove, Sym.Name)) {
+      // Explicitly removing a referenced symbol is an error.
+      if (Sym.Referenced)
+        reportError(Config.OutputFilename,
+                    make_error<StringError>(
+                        "not stripping symbol '" + Sym.Name +
+                            "' because it is named in a relocation.",
+                        llvm::errc::invalid_argument));
+      return true;
+    }
+
+    if (!Sym.Referenced) {
+      // With --strip-unneeded, GNU objcopy removes all unreferenced local
+      // symbols, and any unreferenced undefined external.
+      if (Config.StripUnneeded &&
+          (Sym.Sym.StorageClass == IMAGE_SYM_CLASS_STATIC ||
+           Sym.Sym.SectionNumber == 0))
+        return true;
+
+      // GNU objcopy keeps referenced local symbols and external symbols
+      // if --discard-all is set, similar to what --strip-unneeded does,
+      // but undefined local symbols are kept when --discard-all is set.
+      if (Config.DiscardAll && Sym.Sym.StorageClass == IMAGE_SYM_CLASS_STATIC &&
+          Sym.Sym.SectionNumber != 0)
+        return true;
+    }
+
+    return false;
+  });
+  return Error::success();
+}
+
+void executeObjcopyOnBinary(const CopyConfig &Config,
+                            object::COFFObjectFile &In, Buffer &Out) {
+  COFFReader Reader(In);
+  Expected<std::unique_ptr<Object>> ObjOrErr = Reader.create();
+  if (!ObjOrErr)
+    reportError(Config.InputFilename, ObjOrErr.takeError());
+  Object *Obj = ObjOrErr->get();
+  assert(Obj && "Unable to deserialize COFF object");
+  if (Error E = handleArgs(Config, *Obj))
+    reportError(Config.InputFilename, std::move(E));
+  COFFWriter Writer(*Obj, Out);
+  if (Error E = Writer.write())
+    reportError(Config.OutputFilename, std::move(E));
+}
+
+} // end namespace coff
+} // end namespace objcopy
+} // end namespace llvm
diff --git a/tools/llvm-objcopy/COFF/COFFObjcopy.h b/tools/llvm-objcopy/COFF/COFFObjcopy.h
new file mode 100644
index 0000000..bf70bd9
--- /dev/null
+++ b/tools/llvm-objcopy/COFF/COFFObjcopy.h
@@ -0,0 +1,31 @@
+//===- COFFObjcopy.h --------------------------------------------*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TOOLS_OBJCOPY_COFFOBJCOPY_H
+#define LLVM_TOOLS_OBJCOPY_COFFOBJCOPY_H
+
+namespace llvm {
+
+namespace object {
+class COFFObjectFile;
+} // end namespace object
+
+namespace objcopy {
+struct CopyConfig;
+class Buffer;
+
+namespace coff {
+void executeObjcopyOnBinary(const CopyConfig &Config,
+                            object::COFFObjectFile &In, Buffer &Out);
+
+} // end namespace coff
+} // end namespace objcopy
+} // end namespace llvm
+
+#endif // LLVM_TOOLS_OBJCOPY_COFFOBJCOPY_H
diff --git a/tools/llvm-objcopy/COFF/Object.cpp b/tools/llvm-objcopy/COFF/Object.cpp
new file mode 100644
index 0000000..315d3a7
--- /dev/null
+++ b/tools/llvm-objcopy/COFF/Object.cpp
@@ -0,0 +1,70 @@
+//===- Object.cpp ---------------------------------------------------------===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Object.h"
+#include <algorithm>
+
+namespace llvm {
+namespace objcopy {
+namespace coff {
+
+using namespace object;
+
+void Object::addSymbols(ArrayRef<Symbol> NewSymbols) {
+  for (Symbol S : NewSymbols) {
+    S.UniqueId = NextSymbolUniqueId++;
+    Symbols.emplace_back(S);
+  }
+  updateSymbols();
+}
+
+void Object::updateSymbols() {
+  SymbolMap = DenseMap<size_t, Symbol *>(Symbols.size());
+  size_t RawSymIndex = 0;
+  for (Symbol &Sym : Symbols) {
+    SymbolMap[Sym.UniqueId] = &Sym;
+    Sym.RawIndex = RawSymIndex;
+    RawSymIndex += 1 + Sym.Sym.NumberOfAuxSymbols;
+  }
+}
+
+const Symbol *Object::findSymbol(size_t UniqueId) const {
+  auto It = SymbolMap.find(UniqueId);
+  if (It == SymbolMap.end())
+    return nullptr;
+  return It->second;
+}
+
+void Object::removeSymbols(function_ref<bool(const Symbol &)> ToRemove) {
+  Symbols.erase(
+      std::remove_if(std::begin(Symbols), std::end(Symbols),
+                     [ToRemove](const Symbol &Sym) { return ToRemove(Sym); }),
+      std::end(Symbols));
+  updateSymbols();
+}
+
+Error Object::markSymbols() {
+  for (Symbol &Sym : Symbols)
+    Sym.Referenced = false;
+  for (const Section &Sec : Sections) {
+    for (const Relocation &R : Sec.Relocs) {
+      auto It = SymbolMap.find(R.Target);
+      if (It == SymbolMap.end())
+        return make_error<StringError>("Relocation target " + Twine(R.Target) +
+                                           " not found",
+                                       object_error::invalid_symbol_index);
+      It->second->Referenced = true;
+    }
+  }
+  return Error::success();
+}
+
+} // end namespace coff
+} // end namespace objcopy
+} // end namespace llvm
diff --git a/tools/llvm-objcopy/COFF/Object.h b/tools/llvm-objcopy/COFF/Object.h
new file mode 100644
index 0000000..7531fb4
--- /dev/null
+++ b/tools/llvm-objcopy/COFF/Object.h
@@ -0,0 +1,148 @@
+//===- Object.h -------------------------------------------------*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TOOLS_OBJCOPY_COFF_OBJECT_H
+#define LLVM_TOOLS_OBJCOPY_COFF_OBJECT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace objcopy {
+namespace coff {
+
+struct Relocation {
+  Relocation() {}
+  Relocation(const object::coff_relocation& R) : Reloc(R) {}
+
+  object::coff_relocation Reloc;
+  size_t Target;
+  StringRef TargetName; // Used for diagnostics only
+};
+
+struct Section {
+  object::coff_section Header;
+  ArrayRef<uint8_t> Contents;
+  std::vector<Relocation> Relocs;
+  StringRef Name;
+};
+
+struct Symbol {
+  object::coff_symbol32 Sym;
+  StringRef Name;
+  ArrayRef<uint8_t> AuxData;
+  size_t UniqueId;
+  size_t RawIndex;
+  bool Referenced;
+};
+
+struct Object {
+  bool IsPE = false;
+
+  object::dos_header DosHeader;
+  ArrayRef<uint8_t> DosStub;
+
+  object::coff_file_header CoffFileHeader;
+
+  bool Is64 = false;
+  object::pe32plus_header PeHeader;
+  uint32_t BaseOfData = 0; // pe32plus_header lacks this field.
+
+  std::vector<object::data_directory> DataDirectories;
+  std::vector<Section> Sections;
+
+  ArrayRef<Symbol> getSymbols() const { return Symbols; }
+  // This allows mutating individual Symbols, but not mutating the list
+  // of symbols itself.
+  iterator_range<std::vector<Symbol>::iterator> getMutableSymbols() {
+    return make_range(Symbols.begin(), Symbols.end());
+  }
+
+  const Symbol *findSymbol(size_t UniqueId) const;
+
+  void addSymbols(ArrayRef<Symbol> NewSymbols);
+  void removeSymbols(function_ref<bool(const Symbol &)> ToRemove);
+
+  // Set the Referenced field on all Symbols, based on relocations in
+  // all sections.
+  Error markSymbols();
+
+private:
+  std::vector<Symbol> Symbols;
+  DenseMap<size_t, Symbol *> SymbolMap;
+
+  size_t NextSymbolUniqueId = 0;
+
+  // Update SymbolMap and RawIndex in each Symbol.
+  void updateSymbols();
+};
+
+// Copy between coff_symbol16 and coff_symbol32.
+// The source and destination files can use either coff_symbol16 or
+// coff_symbol32, while we always store them as coff_symbol32 in the
+// intermediate data structure.
+template <class Symbol1Ty, class Symbol2Ty>
+void copySymbol(Symbol1Ty &Dest, const Symbol2Ty &Src) {
+  static_assert(sizeof(Dest.Name.ShortName) == sizeof(Src.Name.ShortName),
+                "Mismatched name sizes");
+  memcpy(Dest.Name.ShortName, Src.Name.ShortName, sizeof(Dest.Name.ShortName));
+  Dest.Value = Src.Value;
+  Dest.SectionNumber = Src.SectionNumber;
+  Dest.Type = Src.Type;
+  Dest.StorageClass = Src.StorageClass;
+  Dest.NumberOfAuxSymbols = Src.NumberOfAuxSymbols;
+}
+
+// Copy between pe32_header and pe32plus_header.
+// We store the intermediate state in a pe32plus_header.
+template <class PeHeader1Ty, class PeHeader2Ty>
+void copyPeHeader(PeHeader1Ty &Dest, const PeHeader2Ty &Src) {
+  Dest.Magic = Src.Magic;
+  Dest.MajorLinkerVersion = Src.MajorLinkerVersion;
+  Dest.MinorLinkerVersion = Src.MinorLinkerVersion;
+  Dest.SizeOfCode = Src.SizeOfCode;
+  Dest.SizeOfInitializedData = Src.SizeOfInitializedData;
+  Dest.SizeOfUninitializedData = Src.SizeOfUninitializedData;
+  Dest.AddressOfEntryPoint = Src.AddressOfEntryPoint;
+  Dest.BaseOfCode = Src.BaseOfCode;
+  Dest.ImageBase = Src.ImageBase;
+  Dest.SectionAlignment = Src.SectionAlignment;
+  Dest.FileAlignment = Src.FileAlignment;
+  Dest.MajorOperatingSystemVersion = Src.MajorOperatingSystemVersion;
+  Dest.MinorOperatingSystemVersion = Src.MinorOperatingSystemVersion;
+  Dest.MajorImageVersion = Src.MajorImageVersion;
+  Dest.MinorImageVersion = Src.MinorImageVersion;
+  Dest.MajorSubsystemVersion = Src.MajorSubsystemVersion;
+  Dest.MinorSubsystemVersion = Src.MinorSubsystemVersion;
+  Dest.Win32VersionValue = Src.Win32VersionValue;
+  Dest.SizeOfImage = Src.SizeOfImage;
+  Dest.SizeOfHeaders = Src.SizeOfHeaders;
+  Dest.CheckSum = Src.CheckSum;
+  Dest.Subsystem = Src.Subsystem;
+  Dest.DLLCharacteristics = Src.DLLCharacteristics;
+  Dest.SizeOfStackReserve = Src.SizeOfStackReserve;
+  Dest.SizeOfStackCommit = Src.SizeOfStackCommit;
+  Dest.SizeOfHeapReserve = Src.SizeOfHeapReserve;
+  Dest.SizeOfHeapCommit = Src.SizeOfHeapCommit;
+  Dest.LoaderFlags = Src.LoaderFlags;
+  Dest.NumberOfRvaAndSize = Src.NumberOfRvaAndSize;
+}
+
+} // end namespace coff
+} // end namespace objcopy
+} // end namespace llvm
+
+#endif // LLVM_TOOLS_OBJCOPY_COFF_OBJECT_H
diff --git a/tools/llvm-objcopy/COFF/Reader.cpp b/tools/llvm-objcopy/COFF/Reader.cpp
new file mode 100644
index 0000000..a017683
--- /dev/null
+++ b/tools/llvm-objcopy/COFF/Reader.cpp
@@ -0,0 +1,171 @@
+//===- Reader.cpp ---------------------------------------------------------===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Reader.h"
+#include "Object.h"
+#include "llvm-objcopy.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+namespace objcopy {
+namespace coff {
+
+using namespace object;
+
+Error COFFReader::readExecutableHeaders(Object &Obj) const {
+  const dos_header *DH = COFFObj.getDOSHeader();
+  Obj.Is64 = COFFObj.is64();
+  if (!DH)
+    return Error::success();
+
+  Obj.IsPE = true;
+  Obj.DosHeader = *DH;
+  if (DH->AddressOfNewExeHeader > sizeof(*DH))
+    Obj.DosStub = ArrayRef<uint8_t>(reinterpret_cast<const uint8_t *>(&DH[1]),
+                                    DH->AddressOfNewExeHeader - sizeof(*DH));
+
+  if (COFFObj.is64()) {
+    const pe32plus_header *PE32Plus = nullptr;
+    if (auto EC = COFFObj.getPE32PlusHeader(PE32Plus))
+      return errorCodeToError(EC);
+    Obj.PeHeader = *PE32Plus;
+  } else {
+    const pe32_header *PE32 = nullptr;
+    if (auto EC = COFFObj.getPE32Header(PE32))
+      return errorCodeToError(EC);
+    copyPeHeader(Obj.PeHeader, *PE32);
+    // The pe32plus_header (stored in Object) lacks the BaseOfData field.
+    Obj.BaseOfData = PE32->BaseOfData;
+  }
+
+  for (size_t I = 0; I < Obj.PeHeader.NumberOfRvaAndSize; I++) {
+    const data_directory *Dir;
+    if (auto EC = COFFObj.getDataDirectory(I, Dir))
+      return errorCodeToError(EC);
+    Obj.DataDirectories.emplace_back(*Dir);
+  }
+  return Error::success();
+}
+
+Error COFFReader::readSections(Object &Obj) const {
+  // Section indexing starts from 1.
+  for (size_t I = 1, E = COFFObj.getNumberOfSections(); I <= E; I++) {
+    const coff_section *Sec;
+    if (auto EC = COFFObj.getSection(I, Sec))
+      return errorCodeToError(EC);
+    Obj.Sections.push_back(Section());
+    Section &S = Obj.Sections.back();
+    S.Header = *Sec;
+    if (auto EC = COFFObj.getSectionContents(Sec, S.Contents))
+      return errorCodeToError(EC);
+    ArrayRef<coff_relocation> Relocs = COFFObj.getRelocations(Sec);
+    for (const coff_relocation &R : Relocs)
+      S.Relocs.push_back(R);
+    if (auto EC = COFFObj.getSectionName(Sec, S.Name))
+      return errorCodeToError(EC);
+    if (Sec->hasExtendedRelocations())
+      return make_error<StringError>("Extended relocations not supported yet",
+                                     object_error::parse_failed);
+  }
+  return Error::success();
+}
+
+Error COFFReader::readSymbols(Object &Obj, bool IsBigObj) const {
+  std::vector<Symbol> Symbols;
+  Symbols.reserve(COFFObj.getRawNumberOfSymbols());
+  for (uint32_t I = 0, E = COFFObj.getRawNumberOfSymbols(); I < E;) {
+    Expected<COFFSymbolRef> SymOrErr = COFFObj.getSymbol(I);
+    if (!SymOrErr)
+      return SymOrErr.takeError();
+    COFFSymbolRef SymRef = *SymOrErr;
+
+    Symbols.push_back(Symbol());
+    Symbol &Sym = Symbols.back();
+    // Copy symbols from the original form into an intermediate coff_symbol32.
+    if (IsBigObj)
+      copySymbol(Sym.Sym,
+                 *reinterpret_cast<const coff_symbol32 *>(SymRef.getRawPtr()));
+    else
+      copySymbol(Sym.Sym,
+                 *reinterpret_cast<const coff_symbol16 *>(SymRef.getRawPtr()));
+    if (auto EC = COFFObj.getSymbolName(SymRef, Sym.Name))
+      return errorCodeToError(EC);
+    Sym.AuxData = COFFObj.getSymbolAuxData(SymRef);
+    assert((Sym.AuxData.size() %
+            (IsBigObj ? sizeof(coff_symbol32) : sizeof(coff_symbol16))) == 0);
+    I += 1 + SymRef.getNumberOfAuxSymbols();
+  }
+  Obj.addSymbols(Symbols);
+  return Error::success();
+}
+
+Error COFFReader::setRelocTargets(Object &Obj) const {
+  std::vector<const Symbol *> RawSymbolTable;
+  for (const Symbol &Sym : Obj.getSymbols()) {
+    RawSymbolTable.push_back(&Sym);
+    for (size_t I = 0; I < Sym.Sym.NumberOfAuxSymbols; I++)
+      RawSymbolTable.push_back(nullptr);
+  }
+  for (Section &Sec : Obj.Sections) {
+    for (Relocation &R : Sec.Relocs) {
+      if (R.Reloc.SymbolTableIndex >= RawSymbolTable.size())
+        return make_error<StringError>("SymbolTableIndex out of range",
+                                       object_error::parse_failed);
+      const Symbol *Sym = RawSymbolTable[R.Reloc.SymbolTableIndex];
+      if (Sym == nullptr)
+        return make_error<StringError>("Invalid SymbolTableIndex",
+                                       object_error::parse_failed);
+      R.Target = Sym->UniqueId;
+      R.TargetName = Sym->Name;
+    }
+  }
+  return Error::success();
+}
+
+Expected<std::unique_ptr<Object>> COFFReader::create() const {
+  auto Obj = llvm::make_unique<Object>();
+
+  const coff_file_header *CFH = nullptr;
+  const coff_bigobj_file_header *CBFH = nullptr;
+  COFFObj.getCOFFHeader(CFH);
+  COFFObj.getCOFFBigObjHeader(CBFH);
+  bool IsBigObj = false;
+  if (CFH) {
+    Obj->CoffFileHeader = *CFH;
+  } else {
+    if (!CBFH)
+      return make_error<StringError>("No COFF file header returned",
+                                     object_error::parse_failed);
+    // Only copying the few fields from the bigobj header that we need
+    // and won't recreate in the end.
+    Obj->CoffFileHeader.Machine = CBFH->Machine;
+    Obj->CoffFileHeader.TimeDateStamp = CBFH->TimeDateStamp;
+    IsBigObj = true;
+  }
+
+  if (Error E = readExecutableHeaders(*Obj))
+    return std::move(E);
+  if (Error E = readSections(*Obj))
+    return std::move(E);
+  if (Error E = readSymbols(*Obj, IsBigObj))
+    return std::move(E);
+  if (Error E = setRelocTargets(*Obj))
+    return std::move(E);
+
+  return std::move(Obj);
+}
+
+} // end namespace coff
+} // end namespace objcopy
+} // end namespace llvm
diff --git a/tools/llvm-objcopy/COFF/Reader.h b/tools/llvm-objcopy/COFF/Reader.h
new file mode 100644
index 0000000..ca7057d
--- /dev/null
+++ b/tools/llvm-objcopy/COFF/Reader.h
@@ -0,0 +1,43 @@
+//===- Reader.h -------------------------------------------------*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TOOLS_OBJCOPY_COFF_READER_H
+#define LLVM_TOOLS_OBJCOPY_COFF_READER_H
+
+#include "Buffer.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace objcopy {
+namespace coff {
+
+struct Object;
+
+using object::COFFObjectFile;
+
+class COFFReader {
+  const COFFObjectFile &COFFObj;
+
+  Error readExecutableHeaders(Object &Obj) const;
+  Error readSections(Object &Obj) const;
+  Error readSymbols(Object &Obj, bool IsBigObj) const;
+  Error setRelocTargets(Object &Obj) const;
+
+public:
+  explicit COFFReader(const COFFObjectFile &O) : COFFObj(O) {}
+  Expected<std::unique_ptr<Object>> create() const;
+};
+
+} // end namespace coff
+} // end namespace objcopy
+} // end namespace llvm
+
+#endif // LLVM_TOOLS_OBJCOPY_COFF_READER_H
diff --git a/tools/llvm-objcopy/COFF/Writer.cpp b/tools/llvm-objcopy/COFF/Writer.cpp
new file mode 100644
index 0000000..385d43b
--- /dev/null
+++ b/tools/llvm-objcopy/COFF/Writer.cpp
@@ -0,0 +1,337 @@
+//===- Writer.cpp ---------------------------------------------------------===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Writer.h"
+#include "Object.h"
+#include "llvm-objcopy.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+namespace objcopy {
+namespace coff {
+
+using namespace object;
+using namespace COFF;
+
+Error COFFWriter::finalizeRelocTargets() {
+  for (Section &Sec : Obj.Sections) {
+    for (Relocation &R : Sec.Relocs) {
+      const Symbol *Sym = Obj.findSymbol(R.Target);
+      if (Sym == nullptr)
+        return make_error<StringError>("Relocation target " + R.TargetName +
+                                           " (" + Twine(R.Target) +
+                                           ") not found",
+                                       object_error::invalid_symbol_index);
+      R.Reloc.SymbolTableIndex = Sym->RawIndex;
+    }
+  }
+  return Error::success();
+}
+
+void COFFWriter::layoutSections() {
+  for (auto &S : Obj.Sections) {
+    if (S.Header.SizeOfRawData > 0)
+      S.Header.PointerToRawData = FileSize;
+    FileSize += S.Header.SizeOfRawData; // For executables, this is already
+                                        // aligned to FileAlignment.
+    S.Header.NumberOfRelocations = S.Relocs.size();
+    S.Header.PointerToRelocations =
+        S.Header.NumberOfRelocations > 0 ? FileSize : 0;
+    FileSize += S.Relocs.size() * sizeof(coff_relocation);
+    FileSize = alignTo(FileSize, FileAlignment);
+
+    if (S.Header.Characteristics & IMAGE_SCN_CNT_INITIALIZED_DATA)
+      SizeOfInitializedData += S.Header.SizeOfRawData;
+  }
+}
+
+size_t COFFWriter::finalizeStringTable() {
+  for (auto &S : Obj.Sections)
+    if (S.Name.size() > COFF::NameSize)
+      StrTabBuilder.add(S.Name);
+
+  for (const auto &S : Obj.getSymbols())
+    if (S.Name.size() > COFF::NameSize)
+      StrTabBuilder.add(S.Name);
+
+  StrTabBuilder.finalize();
+
+  for (auto &S : Obj.Sections) {
+    if (S.Name.size() > COFF::NameSize) {
+      snprintf(S.Header.Name, sizeof(S.Header.Name), "/%d",
+               (int)StrTabBuilder.getOffset(S.Name));
+    } else {
+      strncpy(S.Header.Name, S.Name.data(), COFF::NameSize);
+    }
+  }
+  for (auto &S : Obj.getMutableSymbols()) {
+    if (S.Name.size() > COFF::NameSize) {
+      S.Sym.Name.Offset.Zeroes = 0;
+      S.Sym.Name.Offset.Offset = StrTabBuilder.getOffset(S.Name);
+    } else {
+      strncpy(S.Sym.Name.ShortName, S.Name.data(), COFF::NameSize);
+    }
+  }
+  return StrTabBuilder.getSize();
+}
+
+template <class SymbolTy>
+std::pair<size_t, size_t> COFFWriter::finalizeSymbolTable() {
+  size_t SymTabSize = Obj.getSymbols().size() * sizeof(SymbolTy);
+  for (const auto &S : Obj.getSymbols())
+    SymTabSize += S.AuxData.size();
+  return std::make_pair(SymTabSize, sizeof(SymbolTy));
+}
+
+Error COFFWriter::finalize(bool IsBigObj) {
+  if (Error E = finalizeRelocTargets())
+    return E;
+
+  size_t SizeOfHeaders = 0;
+  FileAlignment = 1;
+  size_t PeHeaderSize = 0;
+  if (Obj.IsPE) {
+    Obj.DosHeader.AddressOfNewExeHeader =
+        sizeof(Obj.DosHeader) + Obj.DosStub.size();
+    SizeOfHeaders += Obj.DosHeader.AddressOfNewExeHeader + sizeof(PEMagic);
+
+    FileAlignment = Obj.PeHeader.FileAlignment;
+    Obj.PeHeader.NumberOfRvaAndSize = Obj.DataDirectories.size();
+
+    PeHeaderSize = Obj.Is64 ? sizeof(pe32plus_header) : sizeof(pe32_header);
+    SizeOfHeaders +=
+        PeHeaderSize + sizeof(data_directory) * Obj.DataDirectories.size();
+  }
+  Obj.CoffFileHeader.NumberOfSections = Obj.Sections.size();
+  SizeOfHeaders +=
+      IsBigObj ? sizeof(coff_bigobj_file_header) : sizeof(coff_file_header);
+  SizeOfHeaders += sizeof(coff_section) * Obj.Sections.size();
+  SizeOfHeaders = alignTo(SizeOfHeaders, FileAlignment);
+
+  Obj.CoffFileHeader.SizeOfOptionalHeader =
+      PeHeaderSize + sizeof(data_directory) * Obj.DataDirectories.size();
+
+  FileSize = SizeOfHeaders;
+  SizeOfInitializedData = 0;
+
+  layoutSections();
+
+  if (Obj.IsPE) {
+    Obj.PeHeader.SizeOfHeaders = SizeOfHeaders;
+    Obj.PeHeader.SizeOfInitializedData = SizeOfInitializedData;
+
+    if (!Obj.Sections.empty()) {
+      const Section &S = Obj.Sections.back();
+      Obj.PeHeader.SizeOfImage =
+          alignTo(S.Header.VirtualAddress + S.Header.VirtualSize,
+                  Obj.PeHeader.SectionAlignment);
+    }
+
+    // If the PE header had a checksum, clear it, since it isn't valid
+    // any longer. (We don't calculate a new one.)
+    Obj.PeHeader.CheckSum = 0;
+  }
+
+  size_t StrTabSize = finalizeStringTable();
+  size_t SymTabSize, SymbolSize;
+  std::tie(SymTabSize, SymbolSize) = IsBigObj
+                                         ? finalizeSymbolTable<coff_symbol32>()
+                                         : finalizeSymbolTable<coff_symbol16>();
+
+  size_t PointerToSymbolTable = FileSize;
+  // StrTabSize <= 4 is the size of an empty string table, only consisting
+  // of the length field.
+  if (SymTabSize == 0 && StrTabSize <= 4 && Obj.IsPE) {
+    // For executables, don't point to the symbol table and skip writing
+    // the length field, if both the symbol and string tables are empty.
+    PointerToSymbolTable = 0;
+    StrTabSize = 0;
+  }
+
+  size_t NumRawSymbols = SymTabSize / SymbolSize;
+  Obj.CoffFileHeader.PointerToSymbolTable = PointerToSymbolTable;
+  Obj.CoffFileHeader.NumberOfSymbols = NumRawSymbols;
+  FileSize += SymTabSize + StrTabSize;
+  FileSize = alignTo(FileSize, FileAlignment);
+
+  return Error::success();
+}
+
+void COFFWriter::writeHeaders(bool IsBigObj) {
+  uint8_t *Ptr = Buf.getBufferStart();
+  if (Obj.IsPE) {
+    memcpy(Ptr, &Obj.DosHeader, sizeof(Obj.DosHeader));
+    Ptr += sizeof(Obj.DosHeader);
+    memcpy(Ptr, Obj.DosStub.data(), Obj.DosStub.size());
+    Ptr += Obj.DosStub.size();
+    memcpy(Ptr, PEMagic, sizeof(PEMagic));
+    Ptr += sizeof(PEMagic);
+  }
+  if (!IsBigObj) {
+    memcpy(Ptr, &Obj.CoffFileHeader, sizeof(Obj.CoffFileHeader));
+    Ptr += sizeof(Obj.CoffFileHeader);
+  } else {
+    // Generate a coff_bigobj_file_header, filling it in with the values
+    // from Obj.CoffFileHeader. All extra fields that don't exist in
+    // coff_file_header can be set to hardcoded values.
+    coff_bigobj_file_header BigObjHeader;
+    BigObjHeader.Sig1 = IMAGE_FILE_MACHINE_UNKNOWN;
+    BigObjHeader.Sig2 = 0xffff;
+    BigObjHeader.Version = BigObjHeader::MinBigObjectVersion;
+    BigObjHeader.Machine = Obj.CoffFileHeader.Machine;
+    BigObjHeader.TimeDateStamp = Obj.CoffFileHeader.TimeDateStamp;
+    memcpy(BigObjHeader.UUID, BigObjMagic, sizeof(BigObjMagic));
+    BigObjHeader.unused1 = 0;
+    BigObjHeader.unused2 = 0;
+    BigObjHeader.unused3 = 0;
+    BigObjHeader.unused4 = 0;
+    // The value in Obj.CoffFileHeader.NumberOfSections is truncated, thus
+    // get the original one instead.
+    BigObjHeader.NumberOfSections = Obj.Sections.size();
+    BigObjHeader.PointerToSymbolTable = Obj.CoffFileHeader.PointerToSymbolTable;
+    BigObjHeader.NumberOfSymbols = Obj.CoffFileHeader.NumberOfSymbols;
+
+    memcpy(Ptr, &BigObjHeader, sizeof(BigObjHeader));
+    Ptr += sizeof(BigObjHeader);
+  }
+  if (Obj.IsPE) {
+    if (Obj.Is64) {
+      memcpy(Ptr, &Obj.PeHeader, sizeof(Obj.PeHeader));
+      Ptr += sizeof(Obj.PeHeader);
+    } else {
+      pe32_header PeHeader;
+      copyPeHeader(PeHeader, Obj.PeHeader);
+      // The pe32plus_header (stored in Object) lacks the BaseOfData field.
+      PeHeader.BaseOfData = Obj.BaseOfData;
+
+      memcpy(Ptr, &PeHeader, sizeof(PeHeader));
+      Ptr += sizeof(PeHeader);
+    }
+    for (const auto &DD : Obj.DataDirectories) {
+      memcpy(Ptr, &DD, sizeof(DD));
+      Ptr += sizeof(DD);
+    }
+  }
+  for (const auto &S : Obj.Sections) {
+    memcpy(Ptr, &S.Header, sizeof(S.Header));
+    Ptr += sizeof(S.Header);
+  }
+}
+
+void COFFWriter::writeSections() {
+  for (const auto &S : Obj.Sections) {
+    uint8_t *Ptr = Buf.getBufferStart() + S.Header.PointerToRawData;
+    std::copy(S.Contents.begin(), S.Contents.end(), Ptr);
+
+    // For executable sections, pad the remainder of the raw data size with
+    // 0xcc, which is int3 on x86.
+    if ((S.Header.Characteristics & IMAGE_SCN_CNT_CODE) &&
+        S.Header.SizeOfRawData > S.Contents.size())
+      memset(Ptr + S.Contents.size(), 0xcc,
+             S.Header.SizeOfRawData - S.Contents.size());
+
+    Ptr += S.Header.SizeOfRawData;
+    for (const auto &R : S.Relocs) {
+      memcpy(Ptr, &R.Reloc, sizeof(R.Reloc));
+      Ptr += sizeof(R.Reloc);
+    }
+  }
+}
+
+template <class SymbolTy> void COFFWriter::writeSymbolStringTables() {
+  uint8_t *Ptr = Buf.getBufferStart() + Obj.CoffFileHeader.PointerToSymbolTable;
+  for (const auto &S : Obj.getSymbols()) {
+    // Convert symbols back to the right size, from coff_symbol32.
+    copySymbol<SymbolTy, coff_symbol32>(*reinterpret_cast<SymbolTy *>(Ptr),
+                                        S.Sym);
+    Ptr += sizeof(SymbolTy);
+    std::copy(S.AuxData.begin(), S.AuxData.end(), Ptr);
+    Ptr += S.AuxData.size();
+  }
+  if (StrTabBuilder.getSize() > 4 || !Obj.IsPE) {
+    // Always write a string table in object files, even an empty one.
+    StrTabBuilder.write(Ptr);
+    Ptr += StrTabBuilder.getSize();
+  }
+}
+
+Error COFFWriter::write(bool IsBigObj) {
+  if (Error E = finalize(IsBigObj))
+    return E;
+
+  Buf.allocate(FileSize);
+
+  writeHeaders(IsBigObj);
+  writeSections();
+  if (IsBigObj)
+    writeSymbolStringTables<coff_symbol32>();
+  else
+    writeSymbolStringTables<coff_symbol16>();
+
+  if (Obj.IsPE)
+    if (Error E = patchDebugDirectory())
+      return E;
+
+  return Buf.commit();
+}
+
+// Locate which sections contain the debug directories, iterate over all
+// the debug_directory structs in there, and set the PointerToRawData field
+// in all of them, according to their new physical location in the file.
+Error COFFWriter::patchDebugDirectory() {
+  if (Obj.DataDirectories.size() < DEBUG_DIRECTORY)
+    return Error::success();
+  const data_directory *Dir = &Obj.DataDirectories[DEBUG_DIRECTORY];
+  if (Dir->Size <= 0)
+    return Error::success();
+  for (const auto &S : Obj.Sections) {
+    if (Dir->RelativeVirtualAddress >= S.Header.VirtualAddress &&
+        Dir->RelativeVirtualAddress <
+            S.Header.VirtualAddress + S.Header.SizeOfRawData) {
+      if (Dir->RelativeVirtualAddress + Dir->Size >
+          S.Header.VirtualAddress + S.Header.SizeOfRawData)
+        return make_error<StringError>(
+            "Debug directory extends past end of section",
+            object_error::parse_failed);
+
+      size_t Offset = Dir->RelativeVirtualAddress - S.Header.VirtualAddress;
+      uint8_t *Ptr = Buf.getBufferStart() + S.Header.PointerToRawData + Offset;
+      uint8_t *End = Ptr + Dir->Size;
+      while (Ptr < End) {
+        debug_directory *Debug = reinterpret_cast<debug_directory *>(Ptr);
+        Debug->PointerToRawData =
+            S.Header.PointerToRawData + Offset + sizeof(debug_directory);
+        Ptr += sizeof(debug_directory) + Debug->SizeOfData;
+        Offset += sizeof(debug_directory) + Debug->SizeOfData;
+      }
+      // Debug directory found and patched, all done.
+      return Error::success();
+    }
+  }
+  return make_error<StringError>("Debug directory not found",
+                                 object_error::parse_failed);
+}
+
+Error COFFWriter::write() {
+  bool IsBigObj = Obj.Sections.size() > MaxNumberOfSections16;
+  if (IsBigObj && Obj.IsPE)
+    return make_error<StringError>("Too many sections for executable",
+                                   object_error::parse_failed);
+  return write(IsBigObj);
+}
+
+} // end namespace coff
+} // end namespace objcopy
+} // end namespace llvm
diff --git a/tools/llvm-objcopy/COFF/Writer.h b/tools/llvm-objcopy/COFF/Writer.h
new file mode 100644
index 0000000..ab66e0c
--- /dev/null
+++ b/tools/llvm-objcopy/COFF/Writer.h
@@ -0,0 +1,61 @@
+//===- Writer.h -------------------------------------------------*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TOOLS_OBJCOPY_COFF_WRITER_H
+#define LLVM_TOOLS_OBJCOPY_COFF_WRITER_H
+
+#include "Buffer.h"
+#include "llvm/MC/StringTableBuilder.h"
+#include "llvm/Support/Error.h"
+#include <cstddef>
+#include <utility>
+
+namespace llvm {
+namespace objcopy {
+namespace coff {
+
+struct Object;
+
+class COFFWriter {
+  Object &Obj;
+  Buffer &Buf;
+
+  size_t FileSize;
+  size_t FileAlignment;
+  size_t SizeOfInitializedData;
+  StringTableBuilder StrTabBuilder;
+
+  Error finalizeRelocTargets();
+  void layoutSections();
+  size_t finalizeStringTable();
+  template <class SymbolTy> std::pair<size_t, size_t> finalizeSymbolTable();
+
+  Error finalize(bool IsBigObj);
+
+  void writeHeaders(bool IsBigObj);
+  void writeSections();
+  template <class SymbolTy> void writeSymbolStringTables();
+
+  Error write(bool IsBigObj);
+
+  Error patchDebugDirectory();
+
+public:
+  virtual ~COFFWriter() {}
+  Error write();
+
+  COFFWriter(Object &Obj, Buffer &Buf)
+      : Obj(Obj), Buf(Buf), StrTabBuilder(StringTableBuilder::WinCOFF) {}
+};
+
+} // end namespace coff
+} // end namespace objcopy
+} // end namespace llvm
+
+#endif // LLVM_TOOLS_OBJCOPY_COFF_WRITER_H
diff --git a/tools/llvm-objcopy/CopyConfig.cpp b/tools/llvm-objcopy/CopyConfig.cpp
index 24f0e29..3737f57 100644
--- a/tools/llvm-objcopy/CopyConfig.cpp
+++ b/tools/llvm-objcopy/CopyConfig.cpp
@@ -189,6 +189,22 @@
   return Iter->getValue();
 }
 
+static const StringMap<MachineInfo> OutputFormatMap{
+    // Name, {EMachine, 64bit, LittleEndian}
+    {"elf32-i386", {ELF::EM_386, false, true}},
+    {"elf32-powerpcle", {ELF::EM_PPC, false, true}},
+    {"elf32-x86-64", {ELF::EM_X86_64, false, true}},
+    {"elf64-powerpcle", {ELF::EM_PPC64, true, true}},
+    {"elf64-x86-64", {ELF::EM_X86_64, true, true}},
+};
+
+static const MachineInfo &getOutputFormatMachineInfo(StringRef Format) {
+  auto Iter = OutputFormatMap.find(Format);
+  if (Iter == std::end(OutputFormatMap))
+    error("Invalid output format: '" + Format + "'");
+  return Iter->getValue();
+}
+
 static void addGlobalSymbolsFromFile(std::vector<std::string> &Symbols,
                                      StringRef Filename) {
   SmallVector<StringRef, 16> Lines;
@@ -266,6 +282,8 @@
       error("Specified binary input without specifiying an architecture");
     Config.BinaryArch = getMachineInfo(BinaryArch);
   }
+  if (!Config.OutputFormat.empty() && Config.OutputFormat != "binary")
+    Config.OutputArch = getOutputFormatMachineInfo(Config.OutputFormat);
 
   if (auto Arg = InputArgs.getLastArg(OBJCOPY_compress_debug_sections,
                                       OBJCOPY_compress_debug_sections_eq)) {
diff --git a/tools/llvm-objcopy/CopyConfig.h b/tools/llvm-objcopy/CopyConfig.h
index ce6ead8..71a2423 100644
--- a/tools/llvm-objcopy/CopyConfig.h
+++ b/tools/llvm-objcopy/CopyConfig.h
@@ -46,8 +46,10 @@
   StringRef OutputFilename;
   StringRef OutputFormat;
 
-  // Only applicable for --input-format=Binary
+  // Only applicable for --input-format=binary
   MachineInfo BinaryArch;
+  // Only applicable when --output-format!=binary (e.g. elf64-x86-64).
+  Optional<MachineInfo> OutputArch;
 
   // Advanced options
   StringRef AddGnuDebugLink;
diff --git a/tools/llvm-objcopy/ELF/ELFObjcopy.cpp b/tools/llvm-objcopy/ELF/ELFObjcopy.cpp
index 8a136de..f5ab8e7 100644
--- a/tools/llvm-objcopy/ELF/ELFObjcopy.cpp
+++ b/tools/llvm-objcopy/ELF/ELFObjcopy.cpp
@@ -173,6 +173,8 @@
   auto DWOFile = Reader.create();
   DWOFile->removeSections(
       [&](const SectionBase &Sec) { return onlyKeepDWOPred(*DWOFile, Sec); });
+  if (Config.OutputArch)
+    DWOFile->Machine = Config.OutputArch.getValue().EMachine;
   FileBuffer FB(File);
   auto Writer = createWriter(Config, *DWOFile, FB, OutputElfType);
   Writer->finalize();
@@ -183,7 +185,7 @@
                                Object &Obj) {
   for (auto &Sec : Obj.sections()) {
     if (Sec.Name == SecName) {
-      if (Sec.OriginalData.size() == 0)
+      if (Sec.OriginalData.empty())
         return make_error<StringError>("Can't dump section \"" + SecName +
                                            "\": it has no contents",
                                        object_error::parse_failed);
@@ -261,6 +263,8 @@
   if (!Config.SplitDWO.empty()) {
     splitDWOToFile(Config, Reader, Config.SplitDWO, OutputElfType);
   }
+  if (Config.OutputArch)
+    Obj.Machine = Config.OutputArch.getValue().EMachine;
 
   // TODO: update or remove symbols only if there is an option that affects
   // them.
@@ -495,17 +499,21 @@
 
   if (!Config.AddSection.empty()) {
     for (const auto &Flag : Config.AddSection) {
-      auto SecPair = Flag.split("=");
-      auto SecName = SecPair.first;
-      auto File = SecPair.second;
-      auto BufOrErr = MemoryBuffer::getFile(File);
+      std::pair<StringRef, StringRef> SecPair = Flag.split("=");
+      StringRef SecName = SecPair.first;
+      StringRef File = SecPair.second;
+      ErrorOr<std::unique_ptr<MemoryBuffer>> BufOrErr =
+          MemoryBuffer::getFile(File);
       if (!BufOrErr)
         reportError(File, BufOrErr.getError());
-      auto Buf = std::move(*BufOrErr);
-      auto BufPtr = reinterpret_cast<const uint8_t *>(Buf->getBufferStart());
-      auto BufSize = Buf->getBufferSize();
-      Obj.addSection<OwnedDataSection>(SecName,
-                                       ArrayRef<uint8_t>(BufPtr, BufSize));
+      std::unique_ptr<MemoryBuffer> Buf = std::move(*BufOrErr);
+      ArrayRef<uint8_t> Data(
+          reinterpret_cast<const uint8_t *>(Buf->getBufferStart()),
+          Buf->getBufferSize());
+      OwnedDataSection &NewSection =
+          Obj.addSection<OwnedDataSection>(SecName, Data);
+      if (SecName.startswith(".note") && SecName != ".note.GNU-stack")
+        NewSection.Type = SHT_NOTE;
     }
   }
 
@@ -528,7 +536,10 @@
   BinaryReader Reader(Config.BinaryArch, &In);
   std::unique_ptr<Object> Obj = Reader.create();
 
-  const ElfType OutputElfType = getOutputElfType(Config.BinaryArch);
+  // Prefer OutputArch (-O<format>) if set, otherwise fallback to BinaryArch
+  // (-B<arch>).
+  const ElfType OutputElfType = getOutputElfType(
+      Config.OutputArch ? Config.OutputArch.getValue() : Config.BinaryArch);
   handleArgs(Config, *Obj, Reader, OutputElfType);
   std::unique_ptr<Writer> Writer =
       createWriter(Config, *Obj, Out, OutputElfType);
@@ -540,7 +551,10 @@
                             object::ELFObjectFileBase &In, Buffer &Out) {
   ELFReader Reader(&In);
   std::unique_ptr<Object> Obj = Reader.create();
-  const ElfType OutputElfType = getOutputElfType(In);
+  // Prefer OutputArch (-O<format>) if set, otherwise infer it from the input.
+  const ElfType OutputElfType =
+      Config.OutputArch ? getOutputElfType(Config.OutputArch.getValue())
+                        : getOutputElfType(In);
   ArrayRef<uint8_t> BuildIdBytes;
 
   if (!Config.BuildIdLinkDir.empty()) {
diff --git a/tools/llvm-objcopy/ELF/Object.cpp b/tools/llvm-objcopy/ELF/Object.cpp
index ae02966..3d3e029 100644
--- a/tools/llvm-objcopy/ELF/Object.cpp
+++ b/tools/llvm-objcopy/ELF/Object.cpp
@@ -71,7 +71,46 @@
   Shdr.sh_entsize = Sec.EntrySize;
 }
 
-SectionVisitor::~SectionVisitor() {}
+template <class ELFT> void ELFSectionSizer<ELFT>::visit(Section &Sec) {}
+
+template <class ELFT>
+void ELFSectionSizer<ELFT>::visit(OwnedDataSection &Sec) {}
+
+template <class ELFT>
+void ELFSectionSizer<ELFT>::visit(StringTableSection &Sec) {}
+
+template <class ELFT>
+void ELFSectionSizer<ELFT>::visit(DynamicRelocationSection &Sec) {}
+
+template <class ELFT>
+void ELFSectionSizer<ELFT>::visit(SymbolTableSection &Sec) {
+  Sec.EntrySize = sizeof(Elf_Sym);
+  Sec.Size = Sec.Symbols.size() * Sec.EntrySize;
+  // Align to the largest field in Elf_Sym.
+  Sec.Align = ELFT::Is64Bits ? sizeof(Elf_Xword) : sizeof(Elf_Word);
+}
+
+template <class ELFT>
+void ELFSectionSizer<ELFT>::visit(RelocationSection &Sec) {
+  Sec.EntrySize = Sec.Type == SHT_REL ? sizeof(Elf_Rel) : sizeof(Elf_Rela);
+  Sec.Size = Sec.Relocations.size() * Sec.EntrySize;
+  // Align to the largest field in Elf_Rel(a).
+  Sec.Align = ELFT::Is64Bits ? sizeof(Elf_Xword) : sizeof(Elf_Word);
+}
+
+template <class ELFT>
+void ELFSectionSizer<ELFT>::visit(GnuDebugLinkSection &Sec) {}
+
+template <class ELFT> void ELFSectionSizer<ELFT>::visit(GroupSection &Sec) {}
+
+template <class ELFT>
+void ELFSectionSizer<ELFT>::visit(SectionIndexSection &Sec) {}
+
+template <class ELFT>
+void ELFSectionSizer<ELFT>::visit(CompressedSection &Sec) {}
+
+template <class ELFT>
+void ELFSectionSizer<ELFT>::visit(DecompressedSection &Sec) {}
 
 void BinarySectionWriter::visit(const SectionIndexSection &Sec) {
   error("Cannot write symbol section index table '" + Sec.Name + "' ");
@@ -102,6 +141,8 @@
 
 void Section::accept(SectionVisitor &Visitor) const { Visitor.visit(*this); }
 
+void Section::accept(MutableSectionVisitor &Visitor) { Visitor.visit(*this); }
+
 void SectionWriter::visit(const OwnedDataSection &Sec) {
   uint8_t *Buf = Out.getBufferStart() + Sec.Offset;
   llvm::copy(Sec.Data, Buf);
@@ -164,10 +205,18 @@
   Visitor.visit(*this);
 }
 
+void DecompressedSection::accept(MutableSectionVisitor &Visitor) {
+  Visitor.visit(*this);
+}
+
 void OwnedDataSection::accept(SectionVisitor &Visitor) const {
   Visitor.visit(*this);
 }
 
+void OwnedDataSection::accept(MutableSectionVisitor &Visitor) {
+  Visitor.visit(*this);
+}
+
 void BinarySectionWriter::visit(const CompressedSection &Sec) {
   error("Cannot write compressed section '" + Sec.Name + "' ");
 }
@@ -246,6 +295,10 @@
   Visitor.visit(*this);
 }
 
+void CompressedSection::accept(MutableSectionVisitor &Visitor) {
+  Visitor.visit(*this);
+}
+
 void StringTableSection::addString(StringRef Name) {
   StrTabBuilder.add(Name);
   Size = StrTabBuilder.getSize();
@@ -265,6 +318,10 @@
   Visitor.visit(*this);
 }
 
+void StringTableSection::accept(MutableSectionVisitor &Visitor) {
+  Visitor.visit(*this);
+}
+
 template <class ELFT>
 void ELFSectionWriter<ELFT>::visit(const SectionIndexSection &Sec) {
   uint8_t *Buf = Out.getBufferStart() + Sec.Offset;
@@ -288,6 +345,10 @@
   Visitor.visit(*this);
 }
 
+void SectionIndexSection::accept(MutableSectionVisitor &Visitor) {
+  Visitor.visit(*this);
+}
+
 static bool isValidReservedSectionIndex(uint16_t Index, uint16_t Machine) {
   switch (Index) {
   case SHN_ABS:
@@ -470,6 +531,10 @@
   Visitor.visit(*this);
 }
 
+void SymbolTableSection::accept(MutableSectionVisitor &Visitor) {
+  Visitor.visit(*this);
+}
+
 template <class SymTabType>
 void RelocSectionWithSymtabBase<SymTabType>::removeSectionReferences(
     const SectionBase *Sec) {
@@ -540,6 +605,10 @@
   Visitor.visit(*this);
 }
 
+void RelocationSection::accept(MutableSectionVisitor &Visitor) {
+  Visitor.visit(*this);
+}
+
 void RelocationSection::removeSymbols(
     function_ref<bool(const Symbol &)> ToRemove) {
   for (const Relocation &Reloc : Relocations)
@@ -562,6 +631,10 @@
   Visitor.visit(*this);
 }
 
+void DynamicRelocationSection::accept(MutableSectionVisitor &Visitor) {
+  Visitor.visit(*this);
+}
+
 void Section::removeSectionReferences(const SectionBase *Sec) {
   if (LinkSection == Sec) {
     error("Section " + LinkSection->Name +
@@ -648,6 +721,10 @@
   Visitor.visit(*this);
 }
 
+void GnuDebugLinkSection::accept(MutableSectionVisitor &Visitor) {
+  Visitor.visit(*this);
+}
+
 template <class ELFT>
 void ELFSectionWriter<ELFT>::visit(const GroupSection &Sec) {
   ELF::Elf32_Word *Buf =
@@ -661,6 +738,10 @@
   Visitor.visit(*this);
 }
 
+void GroupSection::accept(MutableSectionVisitor &Visitor) {
+  Visitor.visit(*this);
+}
+
 // Returns true IFF a section is wholly inside the range of a segment
 static bool sectionWithinSegment(const SectionBase &Section,
                                  const Segment &Segment) {
@@ -700,19 +781,19 @@
   return A->Index < B->Index;
 }
 
-template <class ELFT> void BinaryELFBuilder<ELFT>::initFileHeader() {
+void BinaryELFBuilder::initFileHeader() {
   Obj->Flags = 0x0;
   Obj->Type = ET_REL;
+  Obj->OSABI = ELFOSABI_NONE;
+  Obj->ABIVersion = 0;
   Obj->Entry = 0x0;
   Obj->Machine = EMachine;
   Obj->Version = 1;
 }
 
-template <class ELFT> void BinaryELFBuilder<ELFT>::initHeaderSegment() {
-  Obj->ElfHdrSegment.Index = 0;
-}
+void BinaryELFBuilder::initHeaderSegment() { Obj->ElfHdrSegment.Index = 0; }
 
-template <class ELFT> StringTableSection *BinaryELFBuilder<ELFT>::addStrTab() {
+StringTableSection *BinaryELFBuilder::addStrTab() {
   auto &StrTab = Obj->addSection<StringTableSection>();
   StrTab.Name = ".strtab";
 
@@ -720,15 +801,11 @@
   return &StrTab;
 }
 
-template <class ELFT>
-SymbolTableSection *
-BinaryELFBuilder<ELFT>::addSymTab(StringTableSection *StrTab) {
+SymbolTableSection *BinaryELFBuilder::addSymTab(StringTableSection *StrTab) {
   auto &SymTab = Obj->addSection<SymbolTableSection>();
 
   SymTab.Name = ".symtab";
   SymTab.Link = StrTab->Index;
-  // TODO: Factor out dependence on ElfType here.
-  SymTab.EntrySize = sizeof(Elf_Sym);
 
   // The symbol table always needs a null symbol
   SymTab.addSymbol("", 0, 0, nullptr, 0, 0, 0, 0);
@@ -737,8 +814,7 @@
   return &SymTab;
 }
 
-template <class ELFT>
-void BinaryELFBuilder<ELFT>::addData(SymbolTableSection *SymTab) {
+void BinaryELFBuilder::addData(SymbolTableSection *SymTab) {
   auto Data = ArrayRef<uint8_t>(
       reinterpret_cast<const uint8_t *>(MemBuf->getBufferStart()),
       MemBuf->getBufferSize());
@@ -761,13 +837,13 @@
                     /*Value=*/DataSection.Size, STV_DEFAULT, SHN_ABS, 0);
 }
 
-template <class ELFT> void BinaryELFBuilder<ELFT>::initSections() {
+void BinaryELFBuilder::initSections() {
   for (auto &Section : Obj->sections()) {
     Section.initialize(Obj->sections());
   }
 }
 
-template <class ELFT> std::unique_ptr<Object> BinaryELFBuilder<ELFT>::build() {
+std::unique_ptr<Object> BinaryELFBuilder::build() {
   initFileHeader();
   initHeaderSegment();
   StringTableSection *StrTab = addStrTab();
@@ -1086,6 +1162,8 @@
 template <class ELFT> void ELFBuilder<ELFT>::build() {
   const auto &Ehdr = *ElfFile.getHeader();
 
+  Obj.OSABI = Ehdr.e_ident[EI_OSABI];
+  Obj.ABIVersion = Ehdr.e_ident[EI_ABIVERSION];
   Obj.Type = Ehdr.e_type;
   Obj.Machine = Ehdr.e_machine;
   Obj.Version = Ehdr.e_version;
@@ -1118,14 +1196,7 @@
 Reader::~Reader() {}
 
 std::unique_ptr<Object> BinaryReader::create() const {
-  if (MInfo.Is64Bit)
-    return MInfo.IsLittleEndian
-               ? BinaryELFBuilder<ELF64LE>(MInfo.EMachine, MemBuf).build()
-               : BinaryELFBuilder<ELF64BE>(MInfo.EMachine, MemBuf).build();
-  else
-    return MInfo.IsLittleEndian
-               ? BinaryELFBuilder<ELF32LE>(MInfo.EMachine, MemBuf).build()
-               : BinaryELFBuilder<ELF32BE>(MInfo.EMachine, MemBuf).build();
+  return BinaryELFBuilder(MInfo.EMachine, MemBuf).build();
 }
 
 std::unique_ptr<Object> ELFReader::create() const {
@@ -1162,8 +1233,8 @@
   Ehdr.e_ident[EI_DATA] =
       ELFT::TargetEndianness == support::big ? ELFDATA2MSB : ELFDATA2LSB;
   Ehdr.e_ident[EI_VERSION] = EV_CURRENT;
-  Ehdr.e_ident[EI_OSABI] = ELFOSABI_NONE;
-  Ehdr.e_ident[EI_ABIVERSION] = 0;
+  Ehdr.e_ident[EI_OSABI] = Obj.OSABI;
+  Ehdr.e_ident[EI_ABIVERSION] = Obj.ABIVersion;
 
   Ehdr.e_type = Obj.Type;
   Ehdr.e_machine = Obj.Machine;
@@ -1479,10 +1550,16 @@
     }
 
   initEhdrSegment();
+
   // Before we can prepare for layout the indexes need to be finalized.
+  // Also, the output arch may not be the same as the input arch, so fix up
+  // size-related fields before doing layout calculations.
   uint64_t Index = 0;
-  for (auto &Sec : Obj.sections())
+  auto SecSizer = llvm::make_unique<ELFSectionSizer<ELFT>>();
+  for (auto &Sec : Obj.sections()) {
     Sec.Index = Index++;
+    Sec.accept(*SecSizer);
+  }
 
   // The symbol table does not update all other sections on update. For
   // instance, symbol names are not added as new symbols are added. This means
@@ -1603,11 +1680,6 @@
   SecWriter = llvm::make_unique<BinarySectionWriter>(Buf);
 }
 
-template class BinaryELFBuilder<ELF64LE>;
-template class BinaryELFBuilder<ELF64BE>;
-template class BinaryELFBuilder<ELF32LE>;
-template class BinaryELFBuilder<ELF32BE>;
-
 template class ELFBuilder<ELF64LE>;
 template class ELFBuilder<ELF64BE>;
 template class ELFBuilder<ELF32LE>;
diff --git a/tools/llvm-objcopy/ELF/Object.h b/tools/llvm-objcopy/ELF/Object.h
index 4b84065..e5730cd 100644
--- a/tools/llvm-objcopy/ELF/Object.h
+++ b/tools/llvm-objcopy/ELF/Object.h
@@ -71,7 +71,7 @@
 
 class SectionVisitor {
 public:
-  virtual ~SectionVisitor();
+  virtual ~SectionVisitor() = default;
 
   virtual void visit(const Section &Sec) = 0;
   virtual void visit(const OwnedDataSection &Sec) = 0;
@@ -86,6 +86,23 @@
   virtual void visit(const DecompressedSection &Sec) = 0;
 };
 
+class MutableSectionVisitor {
+public:
+  virtual ~MutableSectionVisitor() = default;
+
+  virtual void visit(Section &Sec) = 0;
+  virtual void visit(OwnedDataSection &Sec) = 0;
+  virtual void visit(StringTableSection &Sec) = 0;
+  virtual void visit(SymbolTableSection &Sec) = 0;
+  virtual void visit(RelocationSection &Sec) = 0;
+  virtual void visit(DynamicRelocationSection &Sec) = 0;
+  virtual void visit(GnuDebugLinkSection &Sec) = 0;
+  virtual void visit(GroupSection &Sec) = 0;
+  virtual void visit(SectionIndexSection &Sec) = 0;
+  virtual void visit(CompressedSection &Sec) = 0;
+  virtual void visit(DecompressedSection &Sec) = 0;
+};
+
 class SectionWriter : public SectionVisitor {
 protected:
   Buffer &Out;
@@ -128,9 +145,32 @@
   explicit ELFSectionWriter(Buffer &Buf) : SectionWriter(Buf) {}
 };
 
+template <class ELFT> class ELFSectionSizer : public MutableSectionVisitor {
+private:
+  using Elf_Rel = typename ELFT::Rel;
+  using Elf_Rela = typename ELFT::Rela;
+  using Elf_Sym = typename ELFT::Sym;
+  using Elf_Word = typename ELFT::Word;
+  using Elf_Xword = typename ELFT::Xword;
+
+public:
+  void visit(Section &Sec) override;
+  void visit(OwnedDataSection &Sec) override;
+  void visit(StringTableSection &Sec) override;
+  void visit(DynamicRelocationSection &Sec) override;
+  void visit(SymbolTableSection &Sec) override;
+  void visit(RelocationSection &Sec) override;
+  void visit(GnuDebugLinkSection &Sec) override;
+  void visit(GroupSection &Sec) override;
+  void visit(SectionIndexSection &Sec) override;
+  void visit(CompressedSection &Sec) override;
+  void visit(DecompressedSection &Sec) override;
+};
+
 #define MAKE_SEC_WRITER_FRIEND                                                 \
   friend class SectionWriter;                                                  \
-  template <class ELFT> friend class ELFSectionWriter;
+  template <class ELFT> friend class ELFSectionWriter;                         \
+  template <class ELFT> friend class ELFSectionSizer;
 
 class BinarySectionWriter : public SectionWriter {
 public:
@@ -237,6 +277,7 @@
   virtual void removeSectionReferences(const SectionBase *Sec);
   virtual void removeSymbols(function_ref<bool(const Symbol &)> ToRemove);
   virtual void accept(SectionVisitor &Visitor) const = 0;
+  virtual void accept(MutableSectionVisitor &Visitor) = 0;
   virtual void markSymbols();
 };
 
@@ -293,6 +334,7 @@
   explicit Section(ArrayRef<uint8_t> Data) : Contents(Data) {}
 
   void accept(SectionVisitor &Visitor) const override;
+  void accept(MutableSectionVisitor &Visitor) override;
   void removeSectionReferences(const SectionBase *Sec) override;
   void initialize(SectionTableRef SecTable) override;
   void finalize() override;
@@ -313,6 +355,7 @@
   }
 
   void accept(SectionVisitor &Sec) const override;
+  void accept(MutableSectionVisitor &Visitor) override;
 };
 
 class CompressedSection : public SectionBase {
@@ -333,6 +376,7 @@
   uint64_t getDecompressedAlign() const { return DecompressedAlign; }
 
   void accept(SectionVisitor &Visitor) const override;
+  void accept(MutableSectionVisitor &Visitor) override;
 
   static bool classof(const SectionBase *S) {
     return (S->Flags & ELF::SHF_COMPRESSED) ||
@@ -354,6 +398,7 @@
   }
 
   void accept(SectionVisitor &Visitor) const override;
+  void accept(MutableSectionVisitor &Visitor) override;
 };
 
 // There are two types of string tables that can exist, dynamic and not dynamic.
@@ -378,6 +423,7 @@
   uint32_t findIndex(StringRef Name) const;
   void finalize() override;
   void accept(SectionVisitor &Visitor) const override;
+  void accept(MutableSectionVisitor &Visitor) override;
 
   static bool classof(const SectionBase *S) {
     if (S->Flags & ELF::SHF_ALLOC)
@@ -435,6 +481,7 @@
   void initialize(SectionTableRef SecTable) override;
   void finalize() override;
   void accept(SectionVisitor &Visitor) const override;
+  void accept(MutableSectionVisitor &Visitor) override;
 
   SectionIndexSection() {
     Name = ".symtab_shndx";
@@ -479,6 +526,7 @@
   void initialize(SectionTableRef SecTable) override;
   void finalize() override;
   void accept(SectionVisitor &Visitor) const override;
+  void accept(MutableSectionVisitor &Visitor) override;
   void removeSymbols(function_ref<bool(const Symbol &)> ToRemove) override;
 
   static bool classof(const SectionBase *S) {
@@ -540,6 +588,7 @@
 public:
   void addRelocation(Relocation Rel) { Relocations.push_back(Rel); }
   void accept(SectionVisitor &Visitor) const override;
+  void accept(MutableSectionVisitor &Visitor) override;
   void removeSymbols(function_ref<bool(const Symbol &)> ToRemove) override;
   void markSymbols() override;
 
@@ -573,6 +622,7 @@
   void addMember(SectionBase *Sec) { GroupMembers.push_back(Sec); }
 
   void accept(SectionVisitor &) const override;
+  void accept(MutableSectionVisitor &Visitor) override;
   void finalize() override;
   void removeSymbols(function_ref<bool(const Symbol &)> ToRemove) override;
   void markSymbols() override;
@@ -611,6 +661,7 @@
   explicit DynamicRelocationSection(ArrayRef<uint8_t> Data) : Contents(Data) {}
 
   void accept(SectionVisitor &) const override;
+  void accept(MutableSectionVisitor &Visitor) override;
 
   static bool classof(const SectionBase *S) {
     if (!(S->Flags & ELF::SHF_ALLOC))
@@ -632,6 +683,7 @@
   // If we add this section from an external source we can use this ctor.
   explicit GnuDebugLinkSection(StringRef File);
   void accept(SectionVisitor &Visitor) const override;
+  void accept(MutableSectionVisitor &Visitor) override;
 };
 
 class Reader {
@@ -645,9 +697,7 @@
 using object::ELFObjectFile;
 using object::OwningBinary;
 
-template <class ELFT> class BinaryELFBuilder {
-  using Elf_Sym = typename ELFT::Sym;
-
+class BinaryELFBuilder {
   uint16_t EMachine;
   MemoryBuffer *MemBuf;
   std::unique_ptr<Object> Obj;
@@ -733,6 +783,8 @@
   Segment ElfHdrSegment;
   Segment ProgramHdrSegment;
 
+  uint8_t OSABI;
+  uint8_t ABIVersion;
   uint64_t Entry;
   uint64_t SHOffset;
   uint32_t Type;
diff --git a/tools/llvm-objcopy/llvm-objcopy.cpp b/tools/llvm-objcopy/llvm-objcopy.cpp
index bc50880..fb1ff18 100644
--- a/tools/llvm-objcopy/llvm-objcopy.cpp
+++ b/tools/llvm-objcopy/llvm-objcopy.cpp
@@ -9,6 +9,7 @@
 
 #include "llvm-objcopy.h"
 #include "Buffer.h"
+#include "COFF/COFFObjcopy.h"
 #include "CopyConfig.h"
 #include "ELF/ELFObjcopy.h"
 
@@ -19,6 +20,7 @@
 #include "llvm/Object/Archive.h"
 #include "llvm/Object/ArchiveWriter.h"
 #include "llvm/Object/Binary.h"
+#include "llvm/Object/COFF.h"
 #include "llvm/Object/ELFObjectFile.h"
 #include "llvm/Object/ELFTypes.h"
 #include "llvm/Object/Error.h"
@@ -125,6 +127,8 @@
                                    Buffer &Out) {
   if (auto *ELFBinary = dyn_cast<object::ELFObjectFileBase>(&In))
     return elf::executeObjcopyOnBinary(Config, *ELFBinary, Out);
+  else if (auto *COFFBinary = dyn_cast<object::COFFObjectFile>(&In))
+    return coff::executeObjcopyOnBinary(Config, *COFFBinary, Out);
   else
     error("Unsupported object file format");
 }
diff --git a/tools/llvm-objdump/COFFDump.cpp b/tools/llvm-objdump/COFFDump.cpp
index a7ee495..55607ec 100644
--- a/tools/llvm-objdump/COFFDump.cpp
+++ b/tools/llvm-objdump/COFFDump.cpp
@@ -455,7 +455,7 @@
       Rels.push_back(Reloc);
 
     // Sort relocations by address.
-    llvm::sort(Rels, RelocAddressLess);
+    llvm::sort(Rels, isRelocAddressLess);
 
     ArrayRef<uint8_t> Contents;
     error(Obj->getSectionContents(Pdata, Contents));
diff --git a/tools/llvm-objdump/MachODump.cpp b/tools/llvm-objdump/MachODump.cpp
index b5c0439..5ef7058 100644
--- a/tools/llvm-objdump/MachODump.cpp
+++ b/tools/llvm-objdump/MachODump.cpp
@@ -1412,7 +1412,7 @@
     std::pair<StringRef, StringRef> DumpSegSectName;
     DumpSegSectName = DumpSection.split(',');
     StringRef DumpSegName, DumpSectName;
-    if (DumpSegSectName.second.size()) {
+    if (!DumpSegSectName.second.empty()) {
       DumpSegName = DumpSegSectName.first;
       DumpSectName = DumpSegSectName.second;
     } else {
@@ -1582,7 +1582,7 @@
   if (Disassemble || Relocations || PrivateHeaders || ExportsTrie || Rebase ||
       Bind || SymbolTable || LazyBind || WeakBind || IndirectSymbols ||
       DataInCode || LinkOptHints || DylibsUsed || DylibId || ObjcMetaData ||
-      (FilterSections.size() != 0)) {
+      (!FilterSections.empty())) {
     if (!NoLeadingHeaders) {
       outs() << Name;
       if (!ArchiveMemberName.empty())
@@ -1607,12 +1607,22 @@
   // If we need the symbol table to do the operation then check it here to
   // produce a good error message as to where the Mach-O file comes from in
   // the error message.
-  if (Disassemble || IndirectSymbols || FilterSections.size() != 0 ||
-      UnwindInfo)
+  if (Disassemble || IndirectSymbols || !FilterSections.empty() || UnwindInfo)
     if (Error Err = MachOOF->checkSymbolTable())
       report_error(ArchiveName, FileName, std::move(Err), ArchitectureName);
-
-  if (Disassemble) {
+  
+  if (DisassembleAll) {
+    for (const SectionRef &Section : MachOOF->sections()) {
+      StringRef SectName;
+      Section.getName(SectName);
+      if (SectName.equals("__text")) {
+        DataRefImpl Ref = Section.getRawDataRefImpl();
+        StringRef SegName = MachOOF->getSectionFinalSegmentName(Ref);
+        DisassembleMachO(FileName, MachOOF, SegName, SectName);
+      }
+    }
+  }
+  else if (Disassemble) {
     if (MachOOF->getHeader().filetype == MachO::MH_KEXT_BUNDLE &&
         MachOOF->getHeader().cputype == MachO::CPU_TYPE_ARM64)
       DisassembleMachO(FileName, MachOOF, "__TEXT_EXEC", "__text");
@@ -1628,10 +1638,10 @@
   if (Relocations)
     PrintRelocations(MachOOF, !NonVerbose);
   if (SectionHeaders)
-    PrintSectionHeaders(MachOOF);
+    printSectionHeaders(MachOOF);
   if (SectionContents)
-    PrintSectionContents(MachOOF);
-  if (FilterSections.size() != 0)
+    printSectionContents(MachOOF);
+  if (!FilterSections.empty())
     DumpSectionContents(FileName, MachOOF, !NonVerbose);
   if (InfoPlist)
     DumpInfoPlistSectionContents(FileName, MachOOF);
@@ -1640,7 +1650,7 @@
   if (DylibId)
     PrintDylibs(MachOOF, true);
   if (SymbolTable)
-    PrintSymbolTable(MachOOF, ArchiveName, ArchitectureName);
+    printSymbolTable(MachOOF, ArchiveName, ArchitectureName);
   if (UnwindInfo)
     printMachOUnwindInfo(MachOOF);
   if (PrivateHeaders) {
@@ -1960,7 +1970,7 @@
 // -arch flags selecting just those slices as specified by them and also parses
 // archive files.  Then for each individual Mach-O file ProcessMachO() is
 // called to process the file based on the command line options.
-void llvm::ParseInputMachO(StringRef Filename) {
+void llvm::parseInputMachO(StringRef Filename) {
   if (!ValidateArchFlags())
     return;
 
@@ -1999,7 +2009,7 @@
     return;
   }
   if (MachOUniversalBinary *UB = dyn_cast<MachOUniversalBinary>(&Bin)) {
-    ParseInputMachO(UB);
+    parseInputMachO(UB);
     return;
   }
   if (ObjectFile *O = dyn_cast<ObjectFile>(&Bin)) {
@@ -2016,7 +2026,7 @@
   llvm_unreachable("Input object can't be invalid at this point");
 }
 
-void llvm::ParseInputMachO(MachOUniversalBinary *UB) {
+void llvm::parseInputMachO(MachOUniversalBinary *UB) {
   if (!ValidateArchFlags())
     return;
 
@@ -2026,7 +2036,7 @@
     printMachOUniversalHeaders(UB, !NonVerbose);
 
   // If we have a list of architecture flags specified dump only those.
-  if (!ArchAll && ArchFlags.size() != 0) {
+  if (!ArchAll && !ArchFlags.empty()) {
     // Look for a slice in the universal binary that matches each ArchFlag.
     bool ArchFound;
     for (unsigned i = 0; i < ArchFlags.size(); ++i) {
@@ -6831,7 +6841,7 @@
 
   // Package up features to be passed to target/subtarget
   std::string FeaturesStr;
-  if (MAttrs.size()) {
+  if (!MAttrs.empty()) {
     SubtargetFeatures Features;
     for (unsigned i = 0; i != MAttrs.size(); ++i)
       Features.AddFeature(MAttrs[i]);
@@ -6971,15 +6981,18 @@
       ErrorOr<std::unique_ptr<MemoryBuffer>> BufOrErr =
           MemoryBuffer::getFileOrSTDIN(DSYMFile);
       if (std::error_code EC = BufOrErr.getError()) {
-        WithColor::error(errs(), "llvm-objdump")
-            << Filename << ": " << EC.message() << '\n';
+        report_error(DSYMFile, errorCodeToError(EC));
         return;
       }
+
       Expected<std::unique_ptr<MachOObjectFile>> DbgObjCheck =
           ObjectFile::createMachOObjectFile(BufOrErr.get()->getMemBufferRef());
 
-      if (DbgObjCheck.takeError())
-        report_error(MachOOF->getFileName(), DbgObjCheck.takeError());
+      if (Error E = DbgObjCheck.takeError()) {
+        report_error(DSYMFile, std::move(E));
+        return;
+      }
+
       DbgObj = DbgObjCheck.get().release();
       // We need to keep the file alive, because we're replacing DbgObj with it.
       DSYMBuf = std::move(BufOrErr.get());
@@ -6989,7 +7002,7 @@
     diContext = DWARFContext::create(*DbgObj);
   }
 
-  if (FilterSections.size() == 0)
+  if (FilterSections.empty())
     outs() << "(" << DisSegName << "," << DisSectName << ") section\n";
 
   for (unsigned SectIdx = 0; SectIdx != Sections.size(); SectIdx++) {
@@ -7052,7 +7065,7 @@
     unsigned int Arch = MachOOF->getArch();
 
     // Skip all symbols if this is a stubs file.
-    if (Bytes.size() == 0)
+    if (Bytes.empty())
       return;
 
     // If the section has symbols but no symbol at the start of the section
diff --git a/tools/llvm-objdump/llvm-objdump.cpp b/tools/llvm-objdump/llvm-objdump.cpp
index 9a405c6..ba8d3c5 100644
--- a/tools/llvm-objdump/llvm-objdump.cpp
+++ b/tools/llvm-objdump/llvm-objdump.cpp
@@ -265,8 +265,17 @@
     StartAddress("start-address", cl::desc("Disassemble beginning at address"),
                  cl::value_desc("address"), cl::init(0));
 cl::opt<unsigned long long>
-    StopAddress("stop-address", cl::desc("Stop disassembly at address"),
+    StopAddress("stop-address",
+                cl::desc("Stop disassembly at address"),
                 cl::value_desc("address"), cl::init(UINT64_MAX));
+
+cl::opt<bool> DisassembleZeroes(
+                "disassemble-zeroes",
+                cl::desc("Do not skip blocks of zeroes when disassembling"));
+cl::alias DisassembleZeroesShort("z",
+                                 cl::desc("Alias for --disassemble-zeroes"),
+                                 cl::aliasopt(DisassembleZeroes));
+
 static StringRef ToolName;
 
 typedef std::vector<std::tuple<uint64_t, StringRef, uint8_t>> SectionSymbolsTy;
@@ -421,18 +430,16 @@
   // Figure out the target triple.
   llvm::Triple TheTriple("unknown-unknown-unknown");
   if (TripleName.empty()) {
-    if (Obj) {
+    if (Obj)
       TheTriple = Obj->makeTriple();
-    }
   } else {
     TheTriple.setTriple(Triple::normalize(TripleName));
 
     // Use the triple, but also try to combine with ARM build attributes.
     if (Obj) {
       auto Arch = Obj->getArch();
-      if (Arch == Triple::arm || Arch == Triple::armeb) {
+      if (Arch == Triple::arm || Arch == Triple::armeb)
         Obj->setARMSubArch(TheTriple);
-      }
     }
   }
 
@@ -452,22 +459,35 @@
   return TheTarget;
 }
 
-bool llvm::RelocAddressLess(RelocationRef a, RelocationRef b) {
-  return a.getOffset() < b.getOffset();
+bool llvm::isRelocAddressLess(RelocationRef A, RelocationRef B) {
+  return A.getOffset() < B.getOffset();
+}
+
+static std::string demangle(StringRef Name) {
+  char *Demangled = nullptr;
+  if (Name.startswith("_Z"))
+    Demangled = itaniumDemangle(Name.data(), Demangled, nullptr, nullptr);
+  else if (Name.startswith("?"))
+    Demangled = microsoftDemangle(Name.data(), Demangled, nullptr, nullptr);
+
+  if (!Demangled)
+    return Name;
+
+  std::string Ret = Demangled;
+  free(Demangled);
+  return Ret;
 }
 
 template <class ELFT>
 static std::error_code getRelocationValueString(const ELFObjectFile<ELFT> *Obj,
                                                 const RelocationRef &RelRef,
                                                 SmallVectorImpl<char> &Result) {
-  DataRefImpl Rel = RelRef.getRawDataRefImpl();
-
   typedef typename ELFObjectFile<ELFT>::Elf_Sym Elf_Sym;
   typedef typename ELFObjectFile<ELFT>::Elf_Shdr Elf_Shdr;
   typedef typename ELFObjectFile<ELFT>::Elf_Rela Elf_Rela;
 
   const ELFFile<ELFT> &EF = *Obj->getELFFile();
-
+  DataRefImpl Rel = RelRef.getRawDataRefImpl();
   auto SecOrErr = EF.getSection(Rel.d.a);
   if (!SecOrErr)
     return errorToErrorCode(SecOrErr.takeError());
@@ -485,11 +505,11 @@
   if (!StrTabOrErr)
     return errorToErrorCode(StrTabOrErr.takeError());
   StringRef StrTab = *StrTabOrErr;
-  int64_t addend = 0;
+  int64_t Addend = 0;
   // If there is no Symbol associated with the relocation, we set the undef
   // boolean value to 'true'. This will prevent us from calling functions that
   // requires the relocation to be associated with a symbol.
-  bool undef = false;
+  bool Undef = false;
   switch (Sec->sh_type) {
   default:
     return object_error::parse_failed;
@@ -499,13 +519,13 @@
   }
   case ELF::SHT_RELA: {
     const Elf_Rela *ERela = Obj->getRela(Rel);
-    addend = ERela->r_addend;
-    undef = ERela->getSymbol(false) == 0;
+    Addend = ERela->r_addend;
+    Undef = ERela->getSymbol(false) == 0;
     break;
   }
   }
-  StringRef Target;
-  if (!undef) {
+  std::string Target;
+  if (!Undef) {
     symbol_iterator SI = RelRef.getSymbol();
     const Elf_Sym *symb = Obj->getSymbol(SI->getRawDataRefImpl());
     if (symb->getType() == ELF::STT_SECTION) {
@@ -521,20 +541,23 @@
       Expected<StringRef> SymName = symb->getName(StrTab);
       if (!SymName)
         return errorToErrorCode(SymName.takeError());
-      Target = *SymName;
+      if (Demangle)
+        Target = demangle(*SymName);
+      else
+        Target = *SymName;
     }
   } else
     Target = "*ABS*";
 
   // Default scheme is to print Target, as well as "+ <addend>" for nonzero
   // addend. Should be acceptable for all normal purposes.
-  std::string fmtbuf;
-  raw_string_ostream fmt(fmtbuf);
-  fmt << Target;
-  if (addend != 0)
-    fmt << (addend < 0 ? "" : "+") << addend;
-  fmt.flush();
-  Result.append(fmtbuf.begin(), fmtbuf.end());
+  std::string FmtBuf;
+  raw_string_ostream Fmt(FmtBuf);
+  Fmt << Target;
+  if (Addend != 0)
+    Fmt << (Addend < 0 ? "" : "+") << Addend;
+  Fmt.flush();
+  Result.append(FmtBuf.begin(), FmtBuf.end());
   return std::error_code();
 }
 
@@ -565,18 +588,15 @@
 
 static void printRelocationTargetName(const MachOObjectFile *O,
                                       const MachO::any_relocation_info &RE,
-                                      raw_string_ostream &fmt) {
-  bool IsScattered = O->isRelocationScattered(RE);
-
+                                      raw_string_ostream &Fmt) {
   // Target of a scattered relocation is an address.  In the interest of
   // generating pretty output, scan through the symbol table looking for a
   // symbol that aligns with that address.  If we find one, print it.
   // Otherwise, we just print the hex address of the target.
-  if (IsScattered) {
+  if (O->isRelocationScattered(RE)) {
     uint32_t Val = O->getPlainRelocationSymbolNum(RE);
 
     for (const SymbolRef &Symbol : O->symbols()) {
-      std::error_code ec;
       Expected<uint64_t> Addr = Symbol.getAddress();
       if (!Addr)
         report_error(O->getFileName(), Addr.takeError());
@@ -585,7 +605,7 @@
       Expected<StringRef> Name = Symbol.getName();
       if (!Name)
         report_error(O->getFileName(), Name.takeError());
-      fmt << *Name;
+      Fmt << *Name;
       return;
     }
 
@@ -600,11 +620,11 @@
         continue;
       if ((ec = Section.getName(Name)))
         report_error(O->getFileName(), ec);
-      fmt << Name;
+      Fmt << Name;
       return;
     }
 
-    fmt << format("0x%x", Val);
+    Fmt << format("0x%x", Val);
     return;
   }
 
@@ -613,9 +633,11 @@
   uint64_t Val = O->getPlainRelocationSymbolNum(RE);
 
   if (O->getAnyRelocationType(RE) == MachO::ARM64_RELOC_ADDEND) {
-    fmt << format("0x%0" PRIx64, Val);
+    Fmt << format("0x%0" PRIx64, Val);
     return;
-  } else if (isExtern) {
+  }
+
+  if (isExtern) {
     symbol_iterator SI = O->symbol_begin();
     advance(SI, Val);
     Expected<StringRef> SOrErr = SI->getName();
@@ -626,21 +648,21 @@
     section_iterator SI = O->section_begin();
     // Adjust for the fact that sections are 1-indexed.
     if (Val == 0) {
-      fmt << "0 (?,?)";
+      Fmt << "0 (?,?)";
       return;
     }
-    uint32_t i = Val - 1;
-    while (i != 0 && SI != O->section_end()) {
-      i--;
+    uint32_t I = Val - 1;
+    while (I != 0 && SI != O->section_end()) {
+      --I;
       advance(SI, 1);
     }
     if (SI == O->section_end())
-      fmt << Val << " (?,?)";
+      Fmt << Val << " (?,?)";
     else
       SI->getName(S);
   }
 
-  fmt << S;
+  Fmt << S;
 }
 
 static std::error_code getRelocationValueString(const WasmObjectFile *Obj,
@@ -648,12 +670,12 @@
                                                 SmallVectorImpl<char> &Result) {
   const wasm::WasmRelocation& Rel = Obj->getWasmRelocation(RelRef);
   symbol_iterator SI = RelRef.getSymbol();
-  std::string fmtbuf;
-  raw_string_ostream fmt(fmtbuf);
+  std::string FmtBuf;
+  raw_string_ostream Fmt(FmtBuf);
   if (SI == Obj->symbol_end()) {
     // Not all wasm relocations have symbols associated with them.
     // In particular R_WEBASSEMBLY_TYPE_INDEX_LEB.
-    fmt << Rel.Index;
+    Fmt << Rel.Index;
   } else {
     Expected<StringRef> SymNameOrErr = SI->getName();
     if (!SymNameOrErr)
@@ -661,9 +683,9 @@
     StringRef SymName = *SymNameOrErr;
     Result.append(SymName.begin(), SymName.end());
   }
-  fmt << (Rel.Addend < 0 ? "" : "+") << Rel.Addend;
-  fmt.flush();
-  Result.append(fmtbuf.begin(), fmtbuf.end());
+  Fmt << (Rel.Addend < 0 ? "" : "+") << Rel.Addend;
+  Fmt.flush();
+  Result.append(FmtBuf.begin(), FmtBuf.end());
   return std::error_code();
 }
 
@@ -675,8 +697,8 @@
 
   unsigned Arch = Obj->getArch();
 
-  std::string fmtbuf;
-  raw_string_ostream fmt(fmtbuf);
+  std::string FmtBuf;
+  raw_string_ostream Fmt(FmtBuf);
   unsigned Type = Obj->getAnyRelocationType(RE);
   bool IsPCRel = Obj->getAnyRelocationPCRel(RE);
 
@@ -685,15 +707,13 @@
 
   // X86_64 has entirely custom relocation types.
   if (Arch == Triple::x86_64) {
-    bool isPCRel = Obj->getAnyRelocationPCRel(RE);
-
     switch (Type) {
     case MachO::X86_64_RELOC_GOT_LOAD:
     case MachO::X86_64_RELOC_GOT: {
-      printRelocationTargetName(Obj, RE, fmt);
-      fmt << "@GOT";
-      if (isPCRel)
-        fmt << "PCREL";
+      printRelocationTargetName(Obj, RE, Fmt);
+      Fmt << "@GOT";
+      if (IsPCRel)
+        Fmt << "PCREL";
       break;
     }
     case MachO::X86_64_RELOC_SUBTRACTOR: {
@@ -711,31 +731,31 @@
 
       // The X86_64_RELOC_UNSIGNED contains the minuend symbol;
       // X86_64_RELOC_SUBTRACTOR contains the subtrahend.
-      printRelocationTargetName(Obj, RENext, fmt);
-      fmt << "-";
-      printRelocationTargetName(Obj, RE, fmt);
+      printRelocationTargetName(Obj, RENext, Fmt);
+      Fmt << "-";
+      printRelocationTargetName(Obj, RE, Fmt);
       break;
     }
     case MachO::X86_64_RELOC_TLV:
-      printRelocationTargetName(Obj, RE, fmt);
-      fmt << "@TLV";
-      if (isPCRel)
-        fmt << "P";
+      printRelocationTargetName(Obj, RE, Fmt);
+      Fmt << "@TLV";
+      if (IsPCRel)
+        Fmt << "P";
       break;
     case MachO::X86_64_RELOC_SIGNED_1:
-      printRelocationTargetName(Obj, RE, fmt);
-      fmt << "-1";
+      printRelocationTargetName(Obj, RE, Fmt);
+      Fmt << "-1";
       break;
     case MachO::X86_64_RELOC_SIGNED_2:
-      printRelocationTargetName(Obj, RE, fmt);
-      fmt << "-2";
+      printRelocationTargetName(Obj, RE, Fmt);
+      Fmt << "-2";
       break;
     case MachO::X86_64_RELOC_SIGNED_4:
-      printRelocationTargetName(Obj, RE, fmt);
-      fmt << "-4";
+      printRelocationTargetName(Obj, RE, Fmt);
+      Fmt << "-4";
       break;
     default:
-      printRelocationTargetName(Obj, RE, fmt);
+      printRelocationTargetName(Obj, RE, Fmt);
       break;
     }
     // X86 and ARM share some relocation types in common.
@@ -758,9 +778,9 @@
         report_error(Obj->getFileName(), "Expected GENERIC_RELOC_PAIR after "
                      "GENERIC_RELOC_SECTDIFF.");
 
-      printRelocationTargetName(Obj, RE, fmt);
-      fmt << "-";
-      printRelocationTargetName(Obj, RENext, fmt);
+      printRelocationTargetName(Obj, RE, Fmt);
+      Fmt << "-";
+      printRelocationTargetName(Obj, RENext, Fmt);
       break;
     }
     }
@@ -779,20 +799,20 @@
           report_error(Obj->getFileName(), "Expected GENERIC_RELOC_PAIR after "
                        "GENERIC_RELOC_LOCAL_SECTDIFF.");
 
-        printRelocationTargetName(Obj, RE, fmt);
-        fmt << "-";
-        printRelocationTargetName(Obj, RENext, fmt);
+        printRelocationTargetName(Obj, RE, Fmt);
+        Fmt << "-";
+        printRelocationTargetName(Obj, RENext, Fmt);
         break;
       }
       case MachO::GENERIC_RELOC_TLV: {
-        printRelocationTargetName(Obj, RE, fmt);
-        fmt << "@TLV";
+        printRelocationTargetName(Obj, RE, Fmt);
+        Fmt << "@TLV";
         if (IsPCRel)
-          fmt << "P";
+          Fmt << "P";
         break;
       }
       default:
-        printRelocationTargetName(Obj, RE, fmt);
+        printRelocationTargetName(Obj, RE, Fmt);
       }
     } else { // ARM-specific relocations
       switch (Type) {
@@ -803,10 +823,10 @@
         bool isUpper = (Obj->getAnyRelocationLength(RE) & 0x1) == 1;
 
         if (isUpper)
-          fmt << ":upper16:(";
+          Fmt << ":upper16:(";
         else
-          fmt << ":lower16:(";
-        printRelocationTargetName(Obj, RE, fmt);
+          Fmt << ":lower16:(";
+        printRelocationTargetName(Obj, RE, Fmt);
 
         DataRefImpl RelNext = Rel;
         Obj->moveRelocationNext(RelNext);
@@ -827,21 +847,21 @@
         // ARM_RELOC_HALF_SECTDIFF encodes the second section in the
         // symbol/section pointer of the follow-on relocation.
         if (Type == MachO::ARM_RELOC_HALF_SECTDIFF) {
-          fmt << "-";
-          printRelocationTargetName(Obj, RENext, fmt);
+          Fmt << "-";
+          printRelocationTargetName(Obj, RENext, Fmt);
         }
 
-        fmt << ")";
+        Fmt << ")";
         break;
       }
-      default: { printRelocationTargetName(Obj, RE, fmt); }
+      default: { printRelocationTargetName(Obj, RE, Fmt); }
       }
     }
   } else
-    printRelocationTargetName(Obj, RE, fmt);
+    printRelocationTargetName(Obj, RE, Fmt);
 
-  fmt.flush();
-  Result.append(fmtbuf.begin(), fmtbuf.end());
+  Fmt.flush();
+  Result.append(FmtBuf.begin(), FmtBuf.end());
   return std::error_code();
 }
 
@@ -863,8 +883,7 @@
 /// relocations, usually because it is the trailing part of a multipart
 /// relocation that will be printed as part of the leading relocation.
 static bool getHidden(RelocationRef RelRef) {
-  const ObjectFile *Obj = RelRef.getObject();
-  auto *MachO = dyn_cast<MachOObjectFile>(Obj);
+  auto *MachO = dyn_cast<MachOObjectFile>(RelRef.getObject());
   if (!MachO)
     return false;
 
@@ -874,10 +893,10 @@
 
   // On arches that use the generic relocations, GENERIC_RELOC_PAIR
   // is always hidden.
-  if (Arch == Triple::x86 || Arch == Triple::arm || Arch == Triple::ppc) {
-    if (Type == MachO::GENERIC_RELOC_PAIR)
-      return true;
-  } else if (Arch == Triple::x86_64) {
+  if (Arch == Triple::x86 || Arch == Triple::arm || Arch == Triple::ppc)
+    return Type == MachO::GENERIC_RELOC_PAIR;
+
+  if (Arch == Triple::x86_64) {
     // On x86_64, X86_64_RELOC_UNSIGNED is hidden only when it follows
     // an X86_64_RELOC_SUBTRACTOR.
     if (Type == MachO::X86_64_RELOC_UNSIGNED && Rel.d.a > 0) {
@@ -1052,27 +1071,27 @@
     auto Preamble = " { ";
     auto Separator = "";
     StringRef Fmt = "\t\t\t%08" PRIx64 ":  ";
-    std::vector<RelocationRef>::const_iterator rel_cur = Rels->begin();
-    std::vector<RelocationRef>::const_iterator rel_end = Rels->end();
+    std::vector<RelocationRef>::const_iterator RelCur = Rels->begin();
+    std::vector<RelocationRef>::const_iterator RelEnd = Rels->end();
 
     // Hexagon's packets require relocations to be inline rather than
     // clustered at the end of the packet.
     auto PrintReloc = [&]() -> void {
-      while ((rel_cur != rel_end) && (rel_cur->getOffset() <= Address)) {
-        if (rel_cur->getOffset() == Address) {
-          SmallString<16> name;
-          SmallString<32> val;
-          rel_cur->getTypeName(name);
-          error(getRelocationValueString(*rel_cur, val));
-          OS << Separator << format(Fmt.data(), Address) << name << "\t" << val
+      while ((RelCur != RelEnd) && (RelCur->getOffset() <= Address)) {
+        if (RelCur->getOffset() == Address) {
+          SmallString<16> Name;
+          SmallString<32> Val;
+          RelCur->getTypeName(Name);
+          error(getRelocationValueString(*RelCur, Val));
+          OS << Separator << format(Fmt.data(), Address) << Name << "\t" << Val
                 << "\n";
           return;
         }
-        rel_cur++;
+        ++RelCur;
       }
     };
 
-    while(!HeadTail.first.empty()) {
+    while (!HeadTail.first.empty()) {
       OS << Separator;
       Separator = "\n";
       if (SP && (PrintSource || PrintLines))
@@ -1082,7 +1101,7 @@
       Preamble = "   ";
       StringRef Inst;
       auto Duplex = HeadTail.first.split('\v');
-      if(!Duplex.second.empty()){
+      if (!Duplex.second.empty()) {
         OS << Duplex.first;
         OS << "; ";
         Inst = Duplex.second;
@@ -1214,7 +1233,6 @@
     Expected<uint64_t> AddressOrErr = Symbol.getAddress();
     if (!AddressOrErr)
       report_error(Obj->getFileName(), AddressOrErr.takeError());
-    uint64_t Address = *AddressOrErr;
 
     Expected<StringRef> Name = Symbol.getName();
     if (!Name)
@@ -1229,7 +1247,7 @@
     if (SecI == Obj->section_end())
       continue;
 
-    AllSymbols[*SecI].emplace_back(Address, *Name, SymbolType);
+    AllSymbols[*SecI].emplace_back(*AddressOrErr, *Name, SymbolType);
   }
 }
 
@@ -1265,7 +1283,6 @@
   if (auto *ElfObj = dyn_cast<ELFObjectFileBase>(Obj)) {
     for (auto PltEntry : ElfObj->getPltAddresses()) {
       SymbolRef Symbol(PltEntry.first, ElfObj);
-
       uint8_t SymbolType = getElfSymbolType(Obj, Symbol);
 
       Expected<StringRef> NameOrErr = Symbol.getName();
@@ -1280,7 +1297,30 @@
   }
 }
 
-static void DisassembleObject(const ObjectFile *Obj, bool InlineRelocs) {
+// Normally the disassembly output will skip blocks of zeroes. This function
+// returns the number of zero bytes that can be skipped when dumping the
+// disassembly of the instructions in Buf.
+static size_t countSkippableZeroBytes(ArrayRef<uint8_t> Buf) {
+  // When -z or --disassemble-zeroes are given we always dissasemble them.
+  if (DisassembleZeroes)
+    return 0;
+
+  // Find the number of leading zeroes.
+  size_t N = 0;
+  while (N < Buf.size() && !Buf[N])
+    ++N;
+
+  // We may want to skip blocks of zero bytes, but unless we see
+  // at least 8 of them in a row.
+  if (N < 8)
+    return 0;
+
+  // We skip zeroes in multiples of 4 because do not want to truncate an
+  // instruction if it starts with a zero byte.
+  return N & ~0x3;
+}
+
+static void disassembleObject(const ObjectFile *Obj, bool InlineRelocs) {
   if (StartAddress > StopAddress)
     error("Start address should be less than stop address");
 
@@ -1288,10 +1328,9 @@
 
   // Package up features to be passed to target/subtarget
   SubtargetFeatures Features = Obj->getFeatures();
-  if (MAttrs.size()) {
-    for (unsigned i = 0; i != MAttrs.size(); ++i)
-      Features.AddFeature(MAttrs[i]);
-  }
+  if (!MAttrs.empty())
+    for (unsigned I = 0; I != MAttrs.size(); ++I)
+      Features.AddFeature(MAttrs[I]);
 
   std::unique_ptr<const MCRegisterInfo> MRI(
       TheTarget->createMCRegInfo(TripleName));
@@ -1486,7 +1525,7 @@
     }
 
     // Sort relocations by address.
-    llvm::sort(Rels, RelocAddressLess);
+    llvm::sort(Rels, isRelocAddressLess);
 
     StringRef SegmentName = "";
     if (const MachOObjectFile *MachO = dyn_cast<const MachOObjectFile>(Obj)) {
@@ -1516,15 +1555,16 @@
     uint64_t Index;
     bool PrintedSection = false;
 
-    std::vector<RelocationRef>::const_iterator rel_cur = Rels.begin();
-    std::vector<RelocationRef>::const_iterator rel_end = Rels.end();
+    std::vector<RelocationRef>::const_iterator RelCur = Rels.begin();
+    std::vector<RelocationRef>::const_iterator RelEnd = Rels.end();
     // Disassemble symbol by symbol.
-    for (unsigned si = 0, se = Symbols.size(); si != se; ++si) {
-      uint64_t Start = std::get<0>(Symbols[si]) - SectionAddr;
+    for (unsigned SI = 0, SE = Symbols.size(); SI != SE; ++SI) {
+      uint64_t Start = std::get<0>(Symbols[SI]) - SectionAddr;
       // The end is either the section end or the beginning of the next
       // symbol.
-      uint64_t End =
-          (si == se - 1) ? SectSize : std::get<0>(Symbols[si + 1]) - SectionAddr;
+      uint64_t End = (SI == SE - 1)
+                         ? SectSize
+                         : std::get<0>(Symbols[SI + 1]) - SectionAddr;
       // Don't try to disassemble beyond the end of section contents.
       if (End > SectSize)
         End = SectSize;
@@ -1541,7 +1581,7 @@
 
       /// Skip if user requested specific symbols and this is not in the list
       if (!DisasmFuncsSet.empty() &&
-          !DisasmFuncsSet.count(std::get<1>(Symbols[si])))
+          !DisasmFuncsSet.count(std::get<1>(Symbols[SI])))
         continue;
 
       if (!PrintedSection) {
@@ -1557,12 +1597,12 @@
         End = StopAddress - SectionAddr;
 
       if (Obj->isELF() && Obj->getArch() == Triple::amdgcn) {
-        if (std::get<2>(Symbols[si]) == ELF::STT_AMDGPU_HSA_KERNEL) {
+        if (std::get<2>(Symbols[SI]) == ELF::STT_AMDGPU_HSA_KERNEL) {
           // skip amd_kernel_code_t at the begining of kernel symbol (256 bytes)
           Start += 256;
         }
-        if (si == se - 1 ||
-            std::get<2>(Symbols[si + 1]) == ELF::STT_AMDGPU_HSA_KERNEL) {
+        if (SI == SE - 1 ||
+            std::get<2>(Symbols[SI + 1]) == ELF::STT_AMDGPU_HSA_KERNEL) {
           // cut trailing zeroes at the end of kernel
           // cut up to 256 bytes
           const uint64_t EndAlign = 256;
@@ -1573,30 +1613,15 @@
         }
       }
 
-      auto PrintSymbol = [](StringRef Name) {
-        outs() << '\n' << Name << ":\n";
-      };
-      StringRef SymbolName = std::get<1>(Symbols[si]);
-      if (Demangle) {
-        char *DemangledSymbol = nullptr;
-        size_t Size = 0;
-        int Status = -1;
-        if (SymbolName.startswith("_Z"))
-          DemangledSymbol = itaniumDemangle(SymbolName.data(), DemangledSymbol,
-                                            &Size, &Status);
-        else if (SymbolName.startswith("?"))
-          DemangledSymbol = microsoftDemangle(SymbolName.data(),
-                                              DemangledSymbol, &Size, &Status);
+      outs() << '\n';
+      if (!NoLeadingAddr)
+        outs() << format("%016" PRIx64 " ", SectionAddr + Start);
 
-        if (Status == 0 && DemangledSymbol)
-          PrintSymbol(StringRef(DemangledSymbol));
-        else
-          PrintSymbol(SymbolName);
-
-        if (DemangledSymbol)
-          free(DemangledSymbol);
-      } else
-        PrintSymbol(SymbolName);
+      StringRef SymbolName = std::get<1>(Symbols[SI]);
+      if (Demangle)
+        outs() << demangle(SymbolName) << ":\n";
+      else
+        outs() << SymbolName << ":\n";
 
       // Don't print raw contents of a virtual section. A virtual section
       // doesn't have any contents in the file.
@@ -1624,7 +1649,7 @@
         // same section. We rely on the markers introduced to
         // understand what we need to dump. If the data marker is within a
         // function, it is denoted as a word/short etc
-        if (isArmElf(Obj) && std::get<2>(Symbols[si]) != ELF::STT_OBJECT &&
+        if (isArmElf(Obj) && std::get<2>(Symbols[SI]) != ELF::STT_OBJECT &&
             !DisassembleAll) {
           uint64_t Stride = 0;
 
@@ -1688,7 +1713,7 @@
         // disassembling text (applicable all architectures),
         // we are in a situation where we must print the data and not
         // disassemble it.
-        if (Obj->isELF() && std::get<2>(Symbols[si]) == ELF::STT_OBJECT &&
+        if (Obj->isELF() && std::get<2>(Symbols[SI]) == ELF::STT_OBJECT &&
             !DisassembleAll && Section.isText()) {
           // print out data up to 8 bytes at a time in hex and ascii
           uint8_t AsciiData[9] = {'\0'};
@@ -1729,6 +1754,14 @@
         if (Index >= End)
           break;
 
+        if (size_t N =
+                countSkippableZeroBytes(Bytes.slice(Index, End - Index))) {
+          outs() << "\t\t..." << '\n';
+          Index += N;
+          if (Index >= End)
+            break;
+        }
+
         // Disassemble a real instruction or a data when disassemble all is
         // provided
         bool Disassembled = DisAsm->getInstruction(Inst, Size, Bytes.slice(Index),
@@ -1807,32 +1840,32 @@
         // Hexagon does this in pretty printer
         if (Obj->getArch() != Triple::hexagon)
           // Print relocation for instruction.
-          while (rel_cur != rel_end) {
-            bool hidden = getHidden(*rel_cur);
-            uint64_t addr = rel_cur->getOffset();
-            SmallString<16> name;
-            SmallString<32> val;
+          while (RelCur != RelEnd) {
+            uint64_t Addr = RelCur->getOffset();
+            SmallString<16> Name;
+            SmallString<32> Val;
 
             // If this relocation is hidden, skip it.
-            if (hidden || ((SectionAddr + addr) < StartAddress)) {
-              ++rel_cur;
+            if (getHidden(*RelCur) || ((SectionAddr + Addr) < StartAddress)) {
+              ++RelCur;
               continue;
             }
 
             // Stop when rel_cur's address is past the current instruction.
-            if (addr >= Index + Size) break;
-            rel_cur->getTypeName(name);
-            error(getRelocationValueString(*rel_cur, val));
-            outs() << format(Fmt.data(), SectionAddr + addr) << name
-                   << "\t" << val << "\n";
-            ++rel_cur;
+            if (Addr >= Index + Size)
+              break;
+            RelCur->getTypeName(Name);
+            error(getRelocationValueString(*RelCur, Val));
+            outs() << format(Fmt.data(), SectionAddr + Addr) << Name << "\t"
+                   << Val << "\n";
+            ++RelCur;
           }
       }
     }
   }
 }
 
-void llvm::PrintRelocations(const ObjectFile *Obj) {
+void llvm::printRelocations(const ObjectFile *Obj) {
   StringRef Fmt = Obj->getBytesInAddress() > 4 ? "%016" PRIx64 :
                                                  "%08" PRIx64;
   // Regular objdump doesn't print relocations in non-relocatable object
@@ -1843,61 +1876,57 @@
   for (const SectionRef &Section : ToolSectionFilter(*Obj)) {
     if (Section.relocation_begin() == Section.relocation_end())
       continue;
-    StringRef secname;
-    error(Section.getName(secname));
-    outs() << "RELOCATION RECORDS FOR [" << secname << "]:\n";
+    StringRef SecName;
+    error(Section.getName(SecName));
+    outs() << "RELOCATION RECORDS FOR [" << SecName << "]:\n";
     for (const RelocationRef &Reloc : Section.relocations()) {
-      bool hidden = getHidden(Reloc);
-      uint64_t address = Reloc.getOffset();
-      SmallString<32> relocname;
-      SmallString<32> valuestr;
-      if (address < StartAddress || address > StopAddress || hidden)
+      uint64_t Address = Reloc.getOffset();
+      SmallString<32> RelocName;
+      SmallString<32> ValueStr;
+      if (Address < StartAddress || Address > StopAddress || getHidden(Reloc))
         continue;
-      Reloc.getTypeName(relocname);
-      error(getRelocationValueString(Reloc, valuestr));
-      outs() << format(Fmt.data(), address) << " " << relocname << " "
-             << valuestr << "\n";
+      Reloc.getTypeName(RelocName);
+      error(getRelocationValueString(Reloc, ValueStr));
+      outs() << format(Fmt.data(), Address) << " " << RelocName << " "
+             << ValueStr << "\n";
     }
     outs() << "\n";
   }
 }
 
-void llvm::PrintDynamicRelocations(const ObjectFile *Obj) {
-
+void llvm::printDynamicRelocations(const ObjectFile *Obj) {
   // For the moment, this option is for ELF only
   if (!Obj->isELF())
     return;
 
   const auto *Elf = dyn_cast<ELFObjectFileBase>(Obj);
-
   if (!Elf || Elf->getEType() != ELF::ET_DYN) {
     error("not a dynamic object");
     return;
   }
 
-  StringRef Fmt = Obj->getBytesInAddress() > 4 ? "%016" PRIx64 : "%08" PRIx64;
-
   std::vector<SectionRef> DynRelSec = Obj->dynamic_relocation_sections();
   if (DynRelSec.empty())
     return;
 
   outs() << "DYNAMIC RELOCATION RECORDS\n";
+  StringRef Fmt = Obj->getBytesInAddress() > 4 ? "%016" PRIx64 : "%08" PRIx64;
   for (const SectionRef &Section : DynRelSec) {
     if (Section.relocation_begin() == Section.relocation_end())
       continue;
     for (const RelocationRef &Reloc : Section.relocations()) {
-      uint64_t address = Reloc.getOffset();
-      SmallString<32> relocname;
-      SmallString<32> valuestr;
-      Reloc.getTypeName(relocname);
-      error(getRelocationValueString(Reloc, valuestr));
-      outs() << format(Fmt.data(), address) << " " << relocname << " "
-             << valuestr << "\n";
+      uint64_t Address = Reloc.getOffset();
+      SmallString<32> RelocName;
+      SmallString<32> ValueStr;
+      Reloc.getTypeName(RelocName);
+      error(getRelocationValueString(Reloc, ValueStr));
+      outs() << format(Fmt.data(), Address) << " " << RelocName << " "
+             << ValueStr << "\n";
     }
   }
 }
 
-void llvm::PrintSectionHeaders(const ObjectFile *Obj) {
+void llvm::printSectionHeaders(const ObjectFile *Obj) {
   outs() << "Sections:\n"
             "Idx Name          Size      Address          Type\n";
   for (const SectionRef &Section : ToolSectionFilter(*Obj)) {
@@ -1917,7 +1946,7 @@
   outs() << "\n";
 }
 
-void llvm::PrintSectionContents(const ObjectFile *Obj) {
+void llvm::printSectionContents(const ObjectFile *Obj) {
   std::error_code EC;
   for (const SectionRef &Section : ToolSectionFilter(*Obj)) {
     StringRef Name;
@@ -1939,23 +1968,23 @@
     error(Section.getContents(Contents));
 
     // Dump out the content as hex and printable ascii characters.
-    for (std::size_t addr = 0, end = Contents.size(); addr < end; addr += 16) {
-      outs() << format(" %04" PRIx64 " ", BaseAddr + addr);
+    for (std::size_t Addr = 0, End = Contents.size(); Addr < End; Addr += 16) {
+      outs() << format(" %04" PRIx64 " ", BaseAddr + Addr);
       // Dump line of hex.
-      for (std::size_t i = 0; i < 16; ++i) {
-        if (i != 0 && i % 4 == 0)
+      for (std::size_t I = 0; I < 16; ++I) {
+        if (I != 0 && I % 4 == 0)
           outs() << ' ';
-        if (addr + i < end)
-          outs() << hexdigit((Contents[addr + i] >> 4) & 0xF, true)
-                 << hexdigit(Contents[addr + i] & 0xF, true);
+        if (Addr + I < End)
+          outs() << hexdigit((Contents[Addr + I] >> 4) & 0xF, true)
+                 << hexdigit(Contents[Addr + I] & 0xF, true);
         else
           outs() << "  ";
       }
       // Print ascii.
       outs() << "  ";
-      for (std::size_t i = 0; i < 16 && addr + i < end; ++i) {
-        if (isPrint(static_cast<unsigned char>(Contents[addr + i]) & 0xFF))
-          outs() << Contents[addr + i];
+      for (std::size_t I = 0; I < 16 && Addr + I < End; ++I) {
+        if (isPrint(static_cast<unsigned char>(Contents[Addr + I]) & 0xFF))
+          outs() << Contents[Addr + I];
         else
           outs() << ".";
       }
@@ -1964,40 +1993,47 @@
   }
 }
 
-void llvm::PrintSymbolTable(const ObjectFile *o, StringRef ArchiveName,
+void llvm::printSymbolTable(const ObjectFile *O, StringRef ArchiveName,
                             StringRef ArchitectureName) {
   outs() << "SYMBOL TABLE:\n";
 
-  if (const COFFObjectFile *coff = dyn_cast<const COFFObjectFile>(o)) {
-    printCOFFSymbolTable(coff);
+  if (const COFFObjectFile *Coff = dyn_cast<const COFFObjectFile>(O)) {
+    printCOFFSymbolTable(Coff);
     return;
   }
-  for (const SymbolRef &Symbol : o->symbols()) {
+
+  for (auto I = O->symbol_begin(), E = O->symbol_end(); I != E; ++I) {
+    // Skip printing the special zero symbol when dumping an ELF file.
+    // This makes the output consistent with the GNU objdump.
+    if (I == O->symbol_begin() && isa<ELFObjectFileBase>(O))
+      continue;
+
+    const SymbolRef &Symbol = *I;
     Expected<uint64_t> AddressOrError = Symbol.getAddress();
     if (!AddressOrError)
-      report_error(ArchiveName, o->getFileName(), AddressOrError.takeError(),
+      report_error(ArchiveName, O->getFileName(), AddressOrError.takeError(),
                    ArchitectureName);
     uint64_t Address = *AddressOrError;
     if ((Address < StartAddress) || (Address > StopAddress))
       continue;
     Expected<SymbolRef::Type> TypeOrError = Symbol.getType();
     if (!TypeOrError)
-      report_error(ArchiveName, o->getFileName(), TypeOrError.takeError(),
+      report_error(ArchiveName, O->getFileName(), TypeOrError.takeError(),
                    ArchitectureName);
     SymbolRef::Type Type = *TypeOrError;
     uint32_t Flags = Symbol.getFlags();
     Expected<section_iterator> SectionOrErr = Symbol.getSection();
     if (!SectionOrErr)
-      report_error(ArchiveName, o->getFileName(), SectionOrErr.takeError(),
+      report_error(ArchiveName, O->getFileName(), SectionOrErr.takeError(),
                    ArchitectureName);
     section_iterator Section = *SectionOrErr;
     StringRef Name;
-    if (Type == SymbolRef::ST_Debug && Section != o->section_end()) {
+    if (Type == SymbolRef::ST_Debug && Section != O->section_end()) {
       Section->getName(Name);
     } else {
       Expected<StringRef> NameOrErr = Symbol.getName();
       if (!NameOrErr)
-        report_error(ArchiveName, o->getFileName(), NameOrErr.takeError(),
+        report_error(ArchiveName, O->getFileName(), NameOrErr.takeError(),
                      ArchitectureName);
       Name = *NameOrErr;
     }
@@ -2021,7 +2057,7 @@
     else if (Type == SymbolRef::ST_Data)
       FileFunc = 'O';
 
-    const char *Fmt = o->getBytesInAddress() > 4 ? "%016" PRIx64 :
+    const char *Fmt = O->getBytesInAddress() > 4 ? "%016" PRIx64 :
                                                    "%08" PRIx64;
 
     outs() << format(Fmt, Address) << " "
@@ -2037,11 +2073,11 @@
       outs() << "*ABS*";
     } else if (Common) {
       outs() << "*COM*";
-    } else if (Section == o->section_end()) {
+    } else if (Section == O->section_end()) {
       outs() << "*UND*";
     } else {
       if (const MachOObjectFile *MachO =
-          dyn_cast<const MachOObjectFile>(o)) {
+          dyn_cast<const MachOObjectFile>(O)) {
         DataRefImpl DR = Section->getRawDataRefImpl();
         StringRef SegmentName = MachO->getSectionFinalSegmentName(DR);
         outs() << SegmentName << ",";
@@ -2052,94 +2088,84 @@
     }
 
     outs() << '\t';
-    if (Common || isa<ELFObjectFileBase>(o)) {
+    if (Common || isa<ELFObjectFileBase>(O)) {
       uint64_t Val =
           Common ? Symbol.getAlignment() : ELFSymbolRef(Symbol).getSize();
       outs() << format("\t %08" PRIx64 " ", Val);
     }
 
-    if (Hidden) {
+    if (Hidden)
       outs() << ".hidden ";
-    }
-    outs() << Name
-           << '\n';
+
+    if (Demangle)
+      outs() << demangle(Name) << '\n';
+    else
+      outs() << Name << '\n';
   }
 }
 
-static void PrintUnwindInfo(const ObjectFile *o) {
+static void printUnwindInfo(const ObjectFile *O) {
   outs() << "Unwind info:\n\n";
 
-  if (const COFFObjectFile *coff = dyn_cast<COFFObjectFile>(o)) {
-    printCOFFUnwindInfo(coff);
-  } else if (const MachOObjectFile *MachO = dyn_cast<MachOObjectFile>(o))
+  if (const COFFObjectFile *Coff = dyn_cast<COFFObjectFile>(O))
+    printCOFFUnwindInfo(Coff);
+  else if (const MachOObjectFile *MachO = dyn_cast<MachOObjectFile>(O))
     printMachOUnwindInfo(MachO);
-  else {
+  else
     // TODO: Extract DWARF dump tool to objdump.
     WithColor::error(errs(), ToolName)
         << "This operation is only currently supported "
            "for COFF and MachO object files.\n";
-    return;
-  }
 }
 
 void llvm::printExportsTrie(const ObjectFile *o) {
   outs() << "Exports trie:\n";
   if (const MachOObjectFile *MachO = dyn_cast<MachOObjectFile>(o))
     printMachOExportsTrie(MachO);
-  else {
+  else
     WithColor::error(errs(), ToolName)
         << "This operation is only currently supported "
            "for Mach-O executable files.\n";
-    return;
-  }
 }
 
 void llvm::printRebaseTable(ObjectFile *o) {
   outs() << "Rebase table:\n";
   if (MachOObjectFile *MachO = dyn_cast<MachOObjectFile>(o))
     printMachORebaseTable(MachO);
-  else {
+  else
     WithColor::error(errs(), ToolName)
         << "This operation is only currently supported "
            "for Mach-O executable files.\n";
-    return;
-  }
 }
 
 void llvm::printBindTable(ObjectFile *o) {
   outs() << "Bind table:\n";
   if (MachOObjectFile *MachO = dyn_cast<MachOObjectFile>(o))
     printMachOBindTable(MachO);
-  else {
+  else
     WithColor::error(errs(), ToolName)
         << "This operation is only currently supported "
            "for Mach-O executable files.\n";
-    return;
-  }
 }
 
 void llvm::printLazyBindTable(ObjectFile *o) {
   outs() << "Lazy bind table:\n";
   if (MachOObjectFile *MachO = dyn_cast<MachOObjectFile>(o))
     printMachOLazyBindTable(MachO);
-  else {
+  else
     WithColor::error(errs(), ToolName)
         << "This operation is only currently supported "
            "for Mach-O executable files.\n";
-    return;
-  }
 }
 
 void llvm::printWeakBindTable(ObjectFile *o) {
   outs() << "Weak bind table:\n";
   if (MachOObjectFile *MachO = dyn_cast<MachOObjectFile>(o))
     printMachOWeakBindTable(MachO);
-  else {
+  else
     WithColor::error(errs(), ToolName)
         << "This operation is only currently supported "
            "for Mach-O executable files.\n";
-    return;
-  }
 }
 
 /// Dump the raw contents of the __clangast section so the output can be piped
@@ -2177,7 +2203,7 @@
 }
 
 static void printFaultMaps(const ObjectFile *Obj) {
-  const char *FaultMapSectionName = nullptr;
+  StringRef FaultMapSectionName;
 
   if (isa<ELFObjectFileBase>(Obj)) {
     FaultMapSectionName = ".llvm_faultmaps";
@@ -2217,39 +2243,38 @@
   outs() << FMP;
 }
 
-static void printPrivateFileHeaders(const ObjectFile *o, bool onlyFirst) {
-  if (o->isELF()) {
-    printELFFileHeader(o);
-    return printELFDynamicSection(o);
+static void printPrivateFileHeaders(const ObjectFile *O, bool OnlyFirst) {
+  if (O->isELF()) {
+    printELFFileHeader(O);
+    return printELFDynamicSection(O);
   }
-  if (o->isCOFF())
-    return printCOFFFileHeader(o);
-  if (o->isWasm())
-    return printWasmFileHeader(o);
-  if (o->isMachO()) {
-    printMachOFileHeader(o);
-    if (!onlyFirst)
-      printMachOLoadCommands(o);
+  if (O->isCOFF())
+    return printCOFFFileHeader(O);
+  if (O->isWasm())
+    return printWasmFileHeader(O);
+  if (O->isMachO()) {
+    printMachOFileHeader(O);
+    if (!OnlyFirst)
+      printMachOLoadCommands(O);
     return;
   }
-  report_error(o->getFileName(), "Invalid/Unsupported object file format");
+  report_error(O->getFileName(), "Invalid/Unsupported object file format");
 }
 
-static void printFileHeaders(const ObjectFile *o) {
-  if (!o->isELF() && !o->isCOFF())
-    report_error(o->getFileName(), "Invalid/Unsupported object file format");
+static void printFileHeaders(const ObjectFile *O) {
+  if (!O->isELF() && !O->isCOFF())
+    report_error(O->getFileName(), "Invalid/Unsupported object file format");
 
-  Triple::ArchType AT = o->getArch();
+  Triple::ArchType AT = O->getArch();
   outs() << "architecture: " << Triple::getArchTypeName(AT) << "\n";
-  Expected<uint64_t> StartAddrOrErr = o->getStartAddress();
+  Expected<uint64_t> StartAddrOrErr = O->getStartAddress();
   if (!StartAddrOrErr)
-    report_error(o->getFileName(), StartAddrOrErr.takeError());
+    report_error(O->getFileName(), StartAddrOrErr.takeError());
 
-  StringRef Fmt = o->getBytesInAddress() > 4 ? "%016" PRIx64 : "%08" PRIx64;
+  StringRef Fmt = O->getBytesInAddress() > 4 ? "%016" PRIx64 : "%08" PRIx64;
   uint64_t Address = StartAddrOrErr.get();
   outs() << "start address: "
-         << "0x" << format(Fmt.data(), Address)
-         << "\n";
+         << "0x" << format(Fmt.data(), Address) << "\n\n";
 }
 
 static void printArchiveChild(StringRef Filename, const Archive::Child &C) {
@@ -2316,55 +2341,55 @@
   outs() << Name << "\n";
 }
 
-static void DumpObject(ObjectFile *o, const Archive *a = nullptr,
-                       const Archive::Child *c = nullptr) {
-  StringRef ArchiveName = a != nullptr ? a->getFileName() : "";
+static void dumpObject(ObjectFile *O, const Archive *A = nullptr,
+                       const Archive::Child *C = nullptr) {
   // Avoid other output when using a raw option.
   if (!RawClangAST) {
     outs() << '\n';
-    if (a)
-      outs() << a->getFileName() << "(" << o->getFileName() << ")";
+    if (A)
+      outs() << A->getFileName() << "(" << O->getFileName() << ")";
     else
-      outs() << o->getFileName();
-    outs() << ":\tfile format " << o->getFileFormatName() << "\n\n";
+      outs() << O->getFileName();
+    outs() << ":\tfile format " << O->getFileFormatName() << "\n\n";
   }
 
-  if (ArchiveHeaders && !MachOOpt && c)
-    printArchiveChild(ArchiveName, *c);
-  if (Disassemble)
-    DisassembleObject(o, Relocations);
-  if (Relocations && !Disassemble)
-    PrintRelocations(o);
-  if (DynamicRelocations)
-    PrintDynamicRelocations(o);
-  if (SectionHeaders)
-    PrintSectionHeaders(o);
-  if (SectionContents)
-    PrintSectionContents(o);
-  if (SymbolTable)
-    PrintSymbolTable(o, ArchiveName);
-  if (UnwindInfo)
-    PrintUnwindInfo(o);
-  if (PrivateHeaders || FirstPrivateHeader)
-    printPrivateFileHeaders(o, FirstPrivateHeader);
+  StringRef ArchiveName = A ? A->getFileName() : "";
   if (FileHeaders)
-    printFileHeaders(o);
+    printFileHeaders(O);
+  if (ArchiveHeaders && !MachOOpt && C)
+    printArchiveChild(ArchiveName, *C);
+  if (Disassemble)
+    disassembleObject(O, Relocations);
+  if (Relocations && !Disassemble)
+    printRelocations(O);
+  if (DynamicRelocations)
+    printDynamicRelocations(O);
+  if (SectionHeaders)
+    printSectionHeaders(O);
+  if (SectionContents)
+    printSectionContents(O);
+  if (SymbolTable)
+    printSymbolTable(O, ArchiveName);
+  if (UnwindInfo)
+    printUnwindInfo(O);
+  if (PrivateHeaders || FirstPrivateHeader)
+    printPrivateFileHeaders(O, FirstPrivateHeader);
   if (ExportsTrie)
-    printExportsTrie(o);
+    printExportsTrie(O);
   if (Rebase)
-    printRebaseTable(o);
+    printRebaseTable(O);
   if (Bind)
-    printBindTable(o);
+    printBindTable(O);
   if (LazyBind)
-    printLazyBindTable(o);
+    printLazyBindTable(O);
   if (WeakBind)
-    printWeakBindTable(o);
+    printWeakBindTable(O);
   if (RawClangAST)
-    printRawClangAST(o);
+    printRawClangAST(O);
   if (PrintFaultMaps)
-    printFaultMaps(o);
+    printFaultMaps(O);
   if (DwarfDumpType != DIDT_Null) {
-    std::unique_ptr<DIContext> DICtx = DWARFContext::create(*o);
+    std::unique_ptr<DIContext> DICtx = DWARFContext::create(*O);
     // Dump the complete DWARF structure.
     DIDumpOptions DumpOpts;
     DumpOpts.DumpType = DwarfDumpType;
@@ -2372,7 +2397,7 @@
   }
 }
 
-static void DumpObject(const COFFImportFile *I, const Archive *A,
+static void dumpObject(const COFFImportFile *I, const Archive *A,
                        const Archive::Child *C = nullptr) {
   StringRef ArchiveName = A ? A->getFileName() : "";
 
@@ -2390,34 +2415,33 @@
 }
 
 /// Dump each object file in \a a;
-static void DumpArchive(const Archive *a) {
+static void dumpArchive(const Archive *A) {
   Error Err = Error::success();
-  for (auto &C : a->children(Err)) {
+  for (auto &C : A->children(Err)) {
     Expected<std::unique_ptr<Binary>> ChildOrErr = C.getAsBinary();
     if (!ChildOrErr) {
       if (auto E = isNotObjectErrorInvalidFileType(ChildOrErr.takeError()))
-        report_error(a->getFileName(), C, std::move(E));
+        report_error(A->getFileName(), C, std::move(E));
       continue;
     }
-    if (ObjectFile *o = dyn_cast<ObjectFile>(&*ChildOrErr.get()))
-      DumpObject(o, a, &C);
+    if (ObjectFile *O = dyn_cast<ObjectFile>(&*ChildOrErr.get()))
+      dumpObject(O, A, &C);
     else if (COFFImportFile *I = dyn_cast<COFFImportFile>(&*ChildOrErr.get()))
-      DumpObject(I, a, &C);
+      dumpObject(I, A, &C);
     else
-      report_error(a->getFileName(), object_error::invalid_file_type);
+      report_error(A->getFileName(), object_error::invalid_file_type);
   }
   if (Err)
-    report_error(a->getFileName(), std::move(Err));
+    report_error(A->getFileName(), std::move(Err));
 }
 
 /// Open file and figure out how to dump it.
-static void DumpInput(StringRef file) {
-
+static void dumpInput(StringRef file) {
   // If we are using the Mach-O specific object file parser, then let it parse
   // the file and process the command line options.  So the -arch flags can
   // be used to select specific slices, etc.
   if (MachOOpt) {
-    ParseInputMachO(file);
+    parseInputMachO(file);
     return;
   }
 
@@ -2427,12 +2451,12 @@
     report_error(file, BinaryOrErr.takeError());
   Binary &Binary = *BinaryOrErr.get().getBinary();
 
-  if (Archive *a = dyn_cast<Archive>(&Binary))
-    DumpArchive(a);
-  else if (ObjectFile *o = dyn_cast<ObjectFile>(&Binary))
-    DumpObject(o);
+  if (Archive *A = dyn_cast<Archive>(&Binary))
+    dumpArchive(A);
+  else if (ObjectFile *O = dyn_cast<ObjectFile>(&Binary))
+    dumpObject(O);
   else if (MachOUniversalBinary *UB = dyn_cast<MachOUniversalBinary>(&Binary))
-    ParseInputMachO(UB);
+    parseInputMachO(UB);
   else
     report_error(file, object_error::invalid_file_type);
 }
@@ -2453,11 +2477,12 @@
   ToolName = argv[0];
 
   // Defaults to a.out if no filenames specified.
-  if (InputFilenames.size() == 0)
+  if (InputFilenames.empty())
     InputFilenames.push_back("a.out");
 
   if (AllHeaders)
-    PrivateHeaders = Relocations = SectionHeaders = SymbolTable = true;
+    FileHeaders = PrivateHeaders = Relocations = SectionHeaders = SymbolTable =
+        true;
 
   if (DisassembleAll || PrintSource || PrintLines)
     Disassemble = true;
@@ -2487,7 +2512,7 @@
       && !(DylibsUsed && MachOOpt)
       && !(DylibId && MachOOpt)
       && !(ObjcMetaData && MachOOpt)
-      && !(FilterSections.size() != 0 && MachOOpt)
+      && !(!FilterSections.empty() && MachOOpt)
       && !PrintFaultMaps
       && DwarfDumpType == DIDT_Null) {
     cl::PrintHelpMessage();
@@ -2497,7 +2522,7 @@
   DisasmFuncsSet.insert(DisassembleFunctions.begin(),
                         DisassembleFunctions.end());
 
-  llvm::for_each(InputFilenames, DumpInput);
+  llvm::for_each(InputFilenames, dumpInput);
 
   return EXIT_SUCCESS;
 }
diff --git a/tools/llvm-objdump/llvm-objdump.h b/tools/llvm-objdump/llvm-objdump.h
index 44ef14d..fe2cb05 100644
--- a/tools/llvm-objdump/llvm-objdump.h
+++ b/tools/llvm-objdump/llvm-objdump.h
@@ -70,35 +70,35 @@
 
 // Various helper functions.
 void error(std::error_code ec);
-bool RelocAddressLess(object::RelocationRef a, object::RelocationRef b);
-void ParseInputMachO(StringRef Filename);
-void ParseInputMachO(object::MachOUniversalBinary *UB);
-void printCOFFUnwindInfo(const object::COFFObjectFile* o);
-void printMachOUnwindInfo(const object::MachOObjectFile* o);
-void printMachOExportsTrie(const object::MachOObjectFile* o);
-void printMachORebaseTable(object::MachOObjectFile* o);
-void printMachOBindTable(object::MachOObjectFile* o);
-void printMachOLazyBindTable(object::MachOObjectFile* o);
-void printMachOWeakBindTable(object::MachOObjectFile* o);
-void printELFFileHeader(const object::ObjectFile *o);
+bool isRelocAddressLess(object::RelocationRef A, object::RelocationRef B);
+void parseInputMachO(StringRef Filename);
+void parseInputMachO(object::MachOUniversalBinary *UB);
+void printCOFFUnwindInfo(const object::COFFObjectFile *O);
+void printMachOUnwindInfo(const object::MachOObjectFile *O);
+void printMachOExportsTrie(const object::MachOObjectFile *O);
+void printMachORebaseTable(object::MachOObjectFile *O);
+void printMachOBindTable(object::MachOObjectFile *O);
+void printMachOLazyBindTable(object::MachOObjectFile *O);
+void printMachOWeakBindTable(object::MachOObjectFile *O);
+void printELFFileHeader(const object::ObjectFile *O);
 void printELFDynamicSection(const object::ObjectFile *Obj);
-void printCOFFFileHeader(const object::ObjectFile *o);
-void printCOFFSymbolTable(const object::COFFImportFile *i);
-void printCOFFSymbolTable(const object::COFFObjectFile *o);
-void printMachOFileHeader(const object::ObjectFile *o);
-void printMachOLoadCommands(const object::ObjectFile *o);
-void printWasmFileHeader(const object::ObjectFile *o);
-void printExportsTrie(const object::ObjectFile *o);
-void printRebaseTable(object::ObjectFile *o);
-void printBindTable(object::ObjectFile *o);
-void printLazyBindTable(object::ObjectFile *o);
-void printWeakBindTable(object::ObjectFile *o);
-void printRawClangAST(const object::ObjectFile *o);
-void PrintRelocations(const object::ObjectFile *o);
-void PrintDynamicRelocations(const object::ObjectFile *o);
-void PrintSectionHeaders(const object::ObjectFile *o);
-void PrintSectionContents(const object::ObjectFile *o);
-void PrintSymbolTable(const object::ObjectFile *o, StringRef ArchiveName,
+void printCOFFFileHeader(const object::ObjectFile *O);
+void printCOFFSymbolTable(const object::COFFImportFile *I);
+void printCOFFSymbolTable(const object::COFFObjectFile *O);
+void printMachOFileHeader(const object::ObjectFile *O);
+void printMachOLoadCommands(const object::ObjectFile *O);
+void printWasmFileHeader(const object::ObjectFile *O);
+void printExportsTrie(const object::ObjectFile *O);
+void printRebaseTable(object::ObjectFile *O);
+void printBindTable(object::ObjectFile *O);
+void printLazyBindTable(object::ObjectFile *O);
+void printWeakBindTable(object::ObjectFile *O);
+void printRawClangAST(const object::ObjectFile *O);
+void printRelocations(const object::ObjectFile *O);
+void printDynamicRelocations(const object::ObjectFile *O);
+void printSectionHeaders(const object::ObjectFile *O);
+void printSectionContents(const object::ObjectFile *O);
+void printSymbolTable(const object::ObjectFile *O, StringRef ArchiveName,
                       StringRef ArchitectureName = StringRef());
 void warn(StringRef Message);
 LLVM_ATTRIBUTE_NORETURN void error(Twine Message);
diff --git a/tools/llvm-opt-report/OptReport.cpp b/tools/llvm-opt-report/OptReport.cpp
index 071f779..0c4bc94 100644
--- a/tools/llvm-opt-report/OptReport.cpp
+++ b/tools/llvm-opt-report/OptReport.cpp
@@ -231,13 +231,8 @@
   bool FirstFile = true;
   for (auto &FI : LocationInfo) {
     SmallString<128> FileName(FI.first);
-    if (!InputRelDir.empty()) {
-      if (std::error_code EC = sys::fs::make_absolute(InputRelDir, FileName)) {
-        WithColor::error() << "Can't resolve file path to " << FileName << ": "
-                           << EC.message() << "\n";
-        return false;
-      }
-    }
+    if (!InputRelDir.empty())
+      sys::fs::make_absolute(InputRelDir, FileName);
 
     const auto &FileInfo = FI.second;
 
diff --git a/tools/llvm-profdata/llvm-profdata.cpp b/tools/llvm-profdata/llvm-profdata.cpp
index db3c304..c25cbc2 100644
--- a/tools/llvm-profdata/llvm-profdata.cpp
+++ b/tools/llvm-profdata/llvm-profdata.cpp
@@ -633,13 +633,21 @@
         Stats.ValueSitesHistogram.resize(NV, 0);
       Stats.ValueSitesHistogram[NV - 1]++;
     }
+
+    uint64_t SiteSum = 0;
+    for (uint32_t V = 0; V < NV; V++)
+      SiteSum += VD[V].Count;
+    if (SiteSum == 0)
+      SiteSum = 1;
+
     for (uint32_t V = 0; V < NV; V++) {
-      OS << "\t[ " << I << ", ";
+      OS << "\t[ " << format("%2u", I) << ", ";
       if (Symtab == nullptr)
-        OS << VD[V].Value;
+        OS << format("%4u", VD[V].Value);
       else
         OS << Symtab->getFuncName(VD[V].Value);
-      OS << ", " << VD[V].Count << " ]\n";
+      OS << ", " << format("%10" PRId64, VD[V].Count) << " ] ("
+         << format("%.2f%%", (VD[V].Count * 100.0 / SiteSum)) << ")\n";
     }
   }
 }
@@ -662,9 +670,9 @@
                             uint32_t TopN, bool ShowIndirectCallTargets,
                             bool ShowMemOPSizes, bool ShowDetailedSummary,
                             std::vector<uint32_t> DetailedSummaryCutoffs,
-                            bool ShowAllFunctions,
-                            const std::string &ShowFunction, bool TextFormat,
-                            raw_fd_ostream &OS) {
+                            bool ShowAllFunctions, uint64_t ValueCutoff,
+                            bool OnlyListBelow, const std::string &ShowFunction,
+                            bool TextFormat, raw_fd_ostream &OS) {
   auto ReaderOrErr = InstrProfReader::create(Filename);
   std::vector<uint32_t> Cutoffs = std::move(DetailedSummaryCutoffs);
   if (ShowDetailedSummary && Cutoffs.empty()) {
@@ -677,6 +685,7 @@
   auto Reader = std::move(ReaderOrErr.get());
   bool IsIRInstr = Reader->isIRLevelProfile();
   size_t ShownFunctions = 0;
+  size_t BelowCutoffFunctions = 0;
   int NumVPKind = IPVK_Last - IPVK_First + 1;
   std::vector<ValueSitesStats> VPStats(NumVPKind);
 
@@ -690,6 +699,11 @@
                       decltype(MinCmp)>
       HottestFuncs(MinCmp);
 
+  if (!TextFormat && OnlyListBelow) {
+    OS << "The list of functions with the maximum counter less than "
+       << ValueCutoff << ":\n";
+  }
+
   // Add marker so that IR-level instrumentation round-trips properly.
   if (TextFormat && IsIRInstr)
     OS << ":ir\n";
@@ -711,11 +725,24 @@
     assert(Func.Counts.size() > 0 && "function missing entry counter");
     Builder.addRecord(Func);
 
-    if (TopN) {
-      uint64_t FuncMax = 0;
-      for (size_t I = 0, E = Func.Counts.size(); I < E; ++I)
-        FuncMax = std::max(FuncMax, Func.Counts[I]);
+    uint64_t FuncMax = 0;
+    uint64_t FuncSum = 0;
+    for (size_t I = 0, E = Func.Counts.size(); I < E; ++I) {
+      FuncMax = std::max(FuncMax, Func.Counts[I]);
+      FuncSum += Func.Counts[I];
+    }
 
+    if (FuncMax < ValueCutoff) {
+      ++BelowCutoffFunctions;
+      if (OnlyListBelow) {
+        OS << "  " << Func.Name << ": (Max = " << FuncMax
+           << " Sum = " << FuncSum << ")\n";
+      }
+      continue;
+    } else if (OnlyListBelow)
+      continue;
+
+    if (TopN) {
       if (HottestFuncs.size() == TopN) {
         if (HottestFuncs.top().second < FuncMax) {
           HottestFuncs.pop();
@@ -726,7 +753,6 @@
     }
 
     if (Show) {
-
       if (!ShownFunctions)
         OS << "Counters:\n";
 
@@ -781,6 +807,12 @@
   if (ShowAllFunctions || !ShowFunction.empty())
     OS << "Functions shown: " << ShownFunctions << "\n";
   OS << "Total functions: " << PS->getNumFunctions() << "\n";
+  if (ValueCutoff > 0) {
+    OS << "Number of functions with maximum count (< " << ValueCutoff
+       << "): " << BelowCutoffFunctions << "\n";
+    OS << "Number of functions with maximum count (>= " << ValueCutoff
+       << "): " << PS->getNumFunctions() - BelowCutoffFunctions << "\n";
+  }
   OS << "Maximum function count: " << PS->getMaxFunctionCount() << "\n";
   OS << "Maximum internal block count: " << PS->getMaxInternalCount() << "\n";
 
@@ -882,7 +914,14 @@
   cl::opt<uint32_t> TopNFunctions(
       "topn", cl::init(0),
       cl::desc("Show the list of functions with the largest internal counts"));
-
+  cl::opt<uint32_t> ValueCutoff(
+      "value-cutoff", cl::init(0),
+      cl::desc("Set the count value cutoff. Functions with the maximum count "
+               "less than this value will not be printed out. (Default is 0)"));
+  cl::opt<bool> OnlyListBelow(
+      "list-below-cutoff", cl::init(false),
+      cl::desc("Only output names of functions whose max count values are "
+               "below the cutoff value"));
   cl::ParseCommandLineOptions(argc, argv, "LLVM profile data summary\n");
 
   if (OutputFilename.empty())
@@ -902,7 +941,8 @@
     return showInstrProfile(Filename, ShowCounts, TopNFunctions,
                             ShowIndirectCallTargets, ShowMemOPSizes,
                             ShowDetailedSummary, DetailedSummaryCutoffs,
-                            ShowAllFunctions, ShowFunction, TextFormat, OS);
+                            ShowAllFunctions, ValueCutoff, OnlyListBelow,
+                            ShowFunction, TextFormat, OS);
   else
     return showSampleProfile(Filename, ShowCounts, ShowAllFunctions,
                              ShowFunction, OS);
diff --git a/tools/llvm-rc/llvm-rc.cpp b/tools/llvm-rc/llvm-rc.cpp
index 4511c5c..54997e9 100644
--- a/tools/llvm-rc/llvm-rc.cpp
+++ b/tools/llvm-rc/llvm-rc.cpp
@@ -31,6 +31,7 @@
 #include "llvm/Support/Signals.h"
 #include "llvm/Support/raw_ostream.h"
 
+#include <algorithm>
 #include <system_error>
 
 using namespace llvm;
@@ -85,7 +86,10 @@
 
   RcOptTable T;
   unsigned MAI, MAC;
-  ArrayRef<const char *> ArgsArr = makeArrayRef(Argv + 1, Argc - 1);
+  const char **DashDash = std::find_if(
+      Argv + 1, Argv + Argc, [](StringRef Str) { return Str == "--"; });
+  ArrayRef<const char *> ArgsArr = makeArrayRef(Argv + 1, DashDash);
+
   opt::InputArgList InputArgs = T.ParseArgs(ArgsArr, MAI, MAC);
 
   // The tool prints nothing when invoked with no command-line arguments.
@@ -97,6 +101,8 @@
   const bool BeVerbose = InputArgs.hasArg(OPT_VERBOSE);
 
   std::vector<std::string> InArgsInfo = InputArgs.getAllArgValues(OPT_INPUT);
+  if (DashDash != Argv + Argc)
+    InArgsInfo.insert(InArgsInfo.end(), DashDash + 1, Argv + Argc);
   if (InArgsInfo.size() != 1) {
     fatalError("Exactly one input file should be provided.");
   }
diff --git a/tools/llvm-readobj/COFFDumper.cpp b/tools/llvm-readobj/COFFDumper.cpp
index 3665491..3e2626d 100644
--- a/tools/llvm-readobj/COFFDumper.cpp
+++ b/tools/llvm-readobj/COFFDumper.cpp
@@ -1248,9 +1248,9 @@
         error(object_error::parse_failed);
       }
       SmallVector<TypeIndex, 128> SourceToDest;
-      Optional<EndPrecompRecord> EndPrecomp;
+      Optional<uint32_t> PCHSignature;
       if (auto EC = mergeTypeAndIdRecords(CVIDs, CVTypes, SourceToDest, Types,
-                                          EndPrecomp))
+                                          PCHSignature))
         return error(std::move(EC));
     }
   }
@@ -1365,10 +1365,12 @@
   StringRef SymbolName;
   Reloc.getTypeName(RelocName);
   symbol_iterator Symbol = Reloc.getSymbol();
+  int64_t SymbolIndex = -1;
   if (Symbol != Obj->symbol_end()) {
     Expected<StringRef> SymbolNameOrErr = Symbol->getName();
     error(errorToErrorCode(SymbolNameOrErr.takeError()));
     SymbolName = *SymbolNameOrErr;
+    SymbolIndex = Obj->getSymbolIndex(Obj->getCOFFSymbol(*Symbol));
   }
 
   if (opts::ExpandRelocs) {
@@ -1376,11 +1378,13 @@
     W.printHex("Offset", Offset);
     W.printNumber("Type", RelocName, RelocType);
     W.printString("Symbol", SymbolName.empty() ? "-" : SymbolName);
+    W.printNumber("SymbolIndex", SymbolIndex);
   } else {
     raw_ostream& OS = W.startLine();
     OS << W.hex(Offset)
        << " " << RelocName
        << " " << (SymbolName.empty() ? "-" : SymbolName)
+       << " (" << SymbolIndex << ")"
        << "\n";
   }
 }
diff --git a/tools/llvm-readobj/DwarfCFIEHPrinter.h b/tools/llvm-readobj/DwarfCFIEHPrinter.h
index 5a1eef1..d91d764 100644
--- a/tools/llvm-readobj/DwarfCFIEHPrinter.h
+++ b/tools/llvm-readobj/DwarfCFIEHPrinter.h
@@ -16,6 +16,7 @@
 #include "llvm/BinaryFormat/Dwarf.h"
 #include "llvm/Object/ELF.h"
 #include "llvm/Object/ELFTypes.h"
+#include "llvm/Object/ELFObjectFile.h"
 #include "llvm/Support/Casting.h"
 #include "llvm/Support/ScopedPrinter.h"
 #include "llvm/Support/Debug.h"
@@ -31,15 +32,15 @@
 template <typename ELFT>
 class PrinterContext {
   ScopedPrinter &W;
-  const object::ELFFile<ELFT> *Obj;
+  const object::ELFObjectFile<ELFT> *ObjF;
 
   void printEHFrameHdr(uint64_t Offset, uint64_t Address, uint64_t Size) const;
 
   void printEHFrame(const typename ELFT::Shdr *EHFrameShdr) const;
 
 public:
-  PrinterContext(ScopedPrinter &W, const object::ELFFile<ELFT> *Obj)
-      : W(W), Obj(Obj) {}
+  PrinterContext(ScopedPrinter &W, const object::ELFObjectFile<ELFT> *ObjF)
+      : W(W), ObjF(ObjF) {}
 
   void printUnwindInformation() const;
 };
@@ -59,6 +60,7 @@
 
 template <typename ELFT>
 void PrinterContext<ELFT>::printUnwindInformation() const {
+  const object::ELFFile<ELFT> *Obj = ObjF->getELFFile();
   const typename ELFT::Phdr *EHFramePhdr = nullptr;
 
   auto PHs = Obj->program_headers();
@@ -101,6 +103,7 @@
   W.startLine() << format("Offset: 0x%" PRIx64 "\n", EHFrameHdrOffset);
   W.startLine() << format("Size: 0x%" PRIx64 "\n", EHFrameHdrSize);
 
+  const object::ELFFile<ELFT> *Obj = ObjF->getELFFile();
   const auto *EHFrameHdrShdr = findSectionByAddress(Obj, EHFrameHdrAddress);
   if (EHFrameHdrShdr) {
     auto SectionName = Obj->getSectionName(EHFrameHdrShdr);
@@ -173,6 +176,7 @@
                           ShOffset, Address);
   W.indent();
 
+  const object::ELFFile<ELFT> *Obj = ObjF->getELFFile();
   auto Result = Obj->getSectionContents(EHFrameShdr);
   if (Error E = Result.takeError())
     reportError(toString(std::move(E)));
@@ -183,7 +187,8 @@
                 Contents.size()),
       ELFT::TargetEndianness == support::endianness::little,
       ELFT::Is64Bits ? 8 : 4);
-  DWARFDebugFrame EHFrame(/*IsEH=*/true, /*EHFrameAddress=*/Address);
+  DWARFDebugFrame EHFrame(Triple::ArchType(ObjF->getArch()), /*IsEH=*/true,
+                          /*EHFrameAddress=*/Address);
   EHFrame.parse(DE);
 
   for (const auto &Entry : EHFrame) {
diff --git a/tools/llvm-readobj/ELFDumper.cpp b/tools/llvm-readobj/ELFDumper.cpp
index 8e2b43c..9325471 100644
--- a/tools/llvm-readobj/ELFDumper.cpp
+++ b/tools/llvm-readobj/ELFDumper.cpp
@@ -141,7 +141,7 @@
 template<typename ELFT>
 class ELFDumper : public ObjDumper {
 public:
-  ELFDumper(const ELFFile<ELFT> *Obj, ScopedPrinter &Writer);
+  ELFDumper(const object::ELFObjectFile<ELFT> *ObjF, ScopedPrinter &Writer);
 
   void printFileHeaders() override;
   void printSectionHeaders() override;
@@ -183,6 +183,7 @@
   TYPEDEF_ELF_TYPES(ELFT)
 
   DynRegionInfo checkDRI(DynRegionInfo DRI) {
+    const ELFFile<ELFT> *Obj = ObjF->getELFFile();
     if (DRI.Addr < Obj->base() ||
         (const uint8_t *)DRI.Addr + DRI.Size > Obj->base() + Obj->getBufSize())
       error(llvm::object::object_error::parse_failed);
@@ -190,11 +191,11 @@
   }
 
   DynRegionInfo createDRIFrom(const Elf_Phdr *P, uintX_t EntSize) {
-    return checkDRI({Obj->base() + P->p_offset, P->p_filesz, EntSize});
+    return checkDRI({ObjF->getELFFile()->base() + P->p_offset, P->p_filesz, EntSize});
   }
 
   DynRegionInfo createDRIFrom(const Elf_Shdr *S) {
-    return checkDRI({Obj->base() + S->sh_offset, S->sh_size, S->sh_entsize});
+    return checkDRI({ObjF->getELFFile()->base() + S->sh_offset, S->sh_size, S->sh_entsize});
   }
 
   void parseDynamicTable(ArrayRef<const Elf_Phdr *> LoadSegments);
@@ -208,7 +209,7 @@
   void LoadVersionNeeds(const Elf_Shdr *ec) const;
   void LoadVersionDefs(const Elf_Shdr *sec) const;
 
-  const ELFO *Obj;
+  const object::ELFObjectFile<ELFT> *ObjF;
   DynRegionInfo DynRelRegion;
   DynRegionInfo DynRelaRegion;
   DynRegionInfo DynRelrRegion;
@@ -291,6 +292,7 @@
   StringRef StrTable, SymtabName;
   size_t Entries = 0;
   Elf_Sym_Range Syms(nullptr, nullptr);
+  const ELFFile<ELFT> *Obj = ObjF->getELFFile();
   if (IsDynamic) {
     StrTable = DynamicStringTable;
     Syms = dynamic_symbols();
@@ -480,7 +482,7 @@
 namespace llvm {
 
 template <class ELFT>
-static std::error_code createELFDumper(const ELFFile<ELFT> *Obj,
+static std::error_code createELFDumper(const ELFObjectFile<ELFT> *Obj,
                                        ScopedPrinter &Writer,
                                        std::unique_ptr<ObjDumper> &Result) {
   Result.reset(new ELFDumper<ELFT>(Obj, Writer));
@@ -492,19 +494,19 @@
                                 std::unique_ptr<ObjDumper> &Result) {
   // Little-endian 32-bit
   if (const ELF32LEObjectFile *ELFObj = dyn_cast<ELF32LEObjectFile>(Obj))
-    return createELFDumper(ELFObj->getELFFile(), Writer, Result);
+    return createELFDumper(ELFObj, Writer, Result);
 
   // Big-endian 32-bit
   if (const ELF32BEObjectFile *ELFObj = dyn_cast<ELF32BEObjectFile>(Obj))
-    return createELFDumper(ELFObj->getELFFile(), Writer, Result);
+    return createELFDumper(ELFObj, Writer, Result);
 
   // Little-endian 64-bit
   if (const ELF64LEObjectFile *ELFObj = dyn_cast<ELF64LEObjectFile>(Obj))
-    return createELFDumper(ELFObj->getELFFile(), Writer, Result);
+    return createELFDumper(ELFObj, Writer, Result);
 
   // Big-endian 64-bit
   if (const ELF64BEObjectFile *ELFObj = dyn_cast<ELF64BEObjectFile>(Obj))
-    return createELFDumper(ELFObj->getELFFile(), Writer, Result);
+    return createELFDumper(ELFObj, Writer, Result);
 
   return readobj_error::unsupported_obj_file_format;
 }
@@ -517,7 +519,7 @@
 void ELFDumper<ELFT>::LoadVersionNeeds(const Elf_Shdr *sec) const {
   unsigned vn_size = sec->sh_size;  // Size of section in bytes
   unsigned vn_count = sec->sh_info; // Number of Verneed entries
-  const char *sec_start = (const char *)Obj->base() + sec->sh_offset;
+  const char *sec_start = (const char *)ObjF->getELFFile()->base() + sec->sh_offset;
   const char *sec_end = sec_start + vn_size;
   // The first Verneed entry is at the start of the section.
   const char *p = sec_start;
@@ -551,7 +553,7 @@
 void ELFDumper<ELFT>::LoadVersionDefs(const Elf_Shdr *sec) const {
   unsigned vd_size = sec->sh_size;  // Size of section in bytes
   unsigned vd_count = sec->sh_info; // Number of Verdef entries
-  const char *sec_start = (const char *)Obj->base() + sec->sh_offset;
+  const char *sec_start = (const char *)ObjF->getELFFile()->base() + sec->sh_offset;
   const char *sec_end = sec_start + vd_size;
   // The first Verdef entry is at the start of the section.
   const char *p = sec_start;
@@ -576,7 +578,7 @@
     return;
 
   // Has the VersionMap already been loaded?
-  if (VersionMap.size() > 0)
+  if (!VersionMap.empty())
     return;
 
   // The first two version indexes are reserved.
@@ -735,13 +737,13 @@
 
 template <typename ELFT> void ELFDumper<ELFT>::printVersionInfo() {
   // Dump version symbol section.
-  printVersionSymbolSection(this, Obj, dot_gnu_version_sec, W);
+  printVersionSymbolSection(this, ObjF->getELFFile(), dot_gnu_version_sec, W);
 
   // Dump version definition section.
-  printVersionDefinitionSection(this, Obj, dot_gnu_version_d_sec, W);
+  printVersionDefinitionSection(this, ObjF->getELFFile(), dot_gnu_version_d_sec, W);
 
   // Dump version dependency section.
-  printVersionDependencySection(this, Obj, dot_gnu_version_r_sec, W);
+  printVersionDependencySection(this, ObjF->getELFFile(), dot_gnu_version_r_sec, W);
 }
 
 template <typename ELFT>
@@ -762,7 +764,7 @@
 
   // Get the corresponding version index entry
   const Elf_Versym *vs = unwrapOrError(
-      Obj->template getEntry<Elf_Versym>(dot_gnu_version_sec, entry_index));
+      ObjF->getELFFile()->template getEntry<Elf_Versym>(dot_gnu_version_sec, entry_index));
   size_t version_index = vs->vs_index & ELF::VERSYM_VERSION;
 
   // Special markers for unversioned symbols.
@@ -795,6 +797,7 @@
 
 template <typename ELFT>
 StringRef ELFDumper<ELFT>::getStaticSymbolName(uint32_t Index) const {
+  const ELFFile<ELFT> *Obj = ObjF->getELFFile();
   StringRef StrTable = unwrapOrError(Obj->getStringTableForSymtab(*DotSymtabSec));
   Elf_Sym_Range Syms = unwrapOrError(Obj->symbols(DotSymtabSec));
   if (Index >= Syms.size())
@@ -815,8 +818,10 @@
 
   bool IsDefault;
   StringRef Version = getSymbolVersion(StrTable, &*Symbol, IsDefault);
-  FullSymbolName += (IsDefault ? "@@" : "@");
-  FullSymbolName += Version;
+  if (!Version.empty()) {
+    FullSymbolName += (IsDefault ? "@@" : "@");
+    FullSymbolName += Version;
+  }
   return FullSymbolName;
 }
 
@@ -842,6 +847,7 @@
     if (SectionIndex == SHN_XINDEX)
       SectionIndex = unwrapOrError(object::getExtendedSymbolTableIndex<ELFT>(
           Symbol, FirstSym, ShndxTable));
+    const ELFFile<ELFT> *Obj = ObjF->getELFFile();
     const typename ELFT::Shdr *Sec =
         unwrapOrError(Obj->getSection(SectionIndex));
     SectionName = unwrapOrError(Obj->getSectionName(Sec));
@@ -1414,9 +1420,11 @@
 }
 
 template <typename ELFT>
-ELFDumper<ELFT>::ELFDumper(const ELFFile<ELFT> *Obj, ScopedPrinter &Writer)
-    : ObjDumper(Writer), Obj(Obj) {
+ELFDumper<ELFT>::ELFDumper(const object::ELFObjectFile<ELFT> *ObjF,
+    ScopedPrinter &Writer)
+    : ObjDumper(Writer), ObjF(ObjF) {
   SmallVector<const Elf_Phdr *, 4> LoadSegments;
+  const ELFFile<ELFT> *Obj = ObjF->getELFFile();
   for (const Elf_Phdr &Phdr : unwrapOrError(Obj->program_headers())) {
     if (Phdr.p_type == ELF::PT_DYNAMIC) {
       DynamicTable = createDRIFrom(&Phdr, sizeof(Elf_Dyn));
@@ -1485,7 +1493,7 @@
 void ELFDumper<ELFT>::parseDynamicTable(
     ArrayRef<const Elf_Phdr *> LoadSegments) {
   auto toMappedAddr = [&](uint64_t VAddr) -> const uint8_t * {
-    auto MappedAddrOrError = Obj->toMappedAddr(VAddr);
+    auto MappedAddrOrError = ObjF->getELFFile()->toMappedAddr(VAddr);
     if (!MappedAddrOrError)
       report_fatal_error(MappedAddrOrError.takeError());
     return MappedAddrOrError.get();
@@ -1587,50 +1595,51 @@
 
 template<class ELFT>
 void ELFDumper<ELFT>::printFileHeaders() {
-  ELFDumperStyle->printFileHeaders(Obj);
+  ELFDumperStyle->printFileHeaders(ObjF->getELFFile());
 }
 
-template <class ELFT> void ELFDumper<ELFT>::printSectionHeaders() {
-  ELFDumperStyle->printSectionHeaders(Obj);
+template<class ELFT>
+void ELFDumper<ELFT>::printSectionHeaders() {
+  ELFDumperStyle->printSectionHeaders(ObjF->getELFFile());
 }
 
 template<class ELFT>
 void ELFDumper<ELFT>::printRelocations() {
-  ELFDumperStyle->printRelocations(Obj);
+  ELFDumperStyle->printRelocations(ObjF->getELFFile());
 }
 
 template <class ELFT> void ELFDumper<ELFT>::printProgramHeaders() {
-  ELFDumperStyle->printProgramHeaders(Obj);
+  ELFDumperStyle->printProgramHeaders(ObjF->getELFFile());
 }
 
 template <class ELFT> void ELFDumper<ELFT>::printDynamicRelocations() {
-  ELFDumperStyle->printDynamicRelocations(Obj);
+  ELFDumperStyle->printDynamicRelocations(ObjF->getELFFile());
 }
 
 template<class ELFT>
 void ELFDumper<ELFT>::printSymbols() {
-  ELFDumperStyle->printSymbols(Obj);
+  ELFDumperStyle->printSymbols(ObjF->getELFFile());
 }
 
 template<class ELFT>
 void ELFDumper<ELFT>::printDynamicSymbols() {
-  ELFDumperStyle->printDynamicSymbols(Obj);
+  ELFDumperStyle->printDynamicSymbols(ObjF->getELFFile());
 }
 
 template <class ELFT> void ELFDumper<ELFT>::printHashHistogram() {
-  ELFDumperStyle->printHashHistogram(Obj);
+  ELFDumperStyle->printHashHistogram(ObjF->getELFFile());
 }
 
 template <class ELFT> void ELFDumper<ELFT>::printCGProfile() {
-  ELFDumperStyle->printCGProfile(Obj);
+  ELFDumperStyle->printCGProfile(ObjF->getELFFile());
 }
 
 template <class ELFT> void ELFDumper<ELFT>::printNotes() {
-  ELFDumperStyle->printNotes(Obj);
+  ELFDumperStyle->printNotes(ObjF->getELFFile());
 }
 
 template <class ELFT> void ELFDumper<ELFT>::printELFLinkerOptions() {
-  ELFDumperStyle->printELFLinkerOptions(Obj);
+  ELFDumperStyle->printELFLinkerOptions(ObjF->getELFFile());
 }
 
 static const char *getTypeString(unsigned Arch, uint64_t Type) {
@@ -1874,9 +1883,9 @@
 
 template<class ELFT>
 void ELFDumper<ELFT>::printUnwindInfo() {
-  const unsigned Machine = Obj->getHeader()->e_machine;
+  const unsigned Machine = ObjF->getELFFile()->getHeader()->e_machine;
   if (Machine == EM_386 || Machine == EM_X86_64) {
-    DwarfCFIEH::PrinterContext<ELFT> Ctx(W, Obj);
+    DwarfCFIEH::PrinterContext<ELFT> Ctx(W, ObjF);
     return Ctx.printUnwindInformation();
   }
   W.startLine() << "UnwindInfo not implemented.\n";
@@ -1885,6 +1894,7 @@
 namespace {
 
 template <> void ELFDumper<ELF32LE>::printUnwindInfo() {
+  const ELFFile<ELF32LE> *Obj = ObjF->getELFFile();
   const unsigned Machine = Obj->getHeader()->e_machine;
   if (Machine == EM_ARM) {
     ARM::EHABI::PrinterContext<ELF32LE> Ctx(W, Obj, DotSymtabSec);
@@ -1927,7 +1937,7 @@
     uintX_t Tag = Entry.getTag();
     ++I;
     W.startLine() << "  " << format_hex(Tag, Is64 ? 18 : 10, opts::Output != opts::GNU) << " "
-                  << format("%-21s", getTypeString(Obj->getHeader()->e_machine, Tag));
+                  << format("%-21s", getTypeString(ObjF->getELFFile()->getHeader()->e_machine, Tag));
     printValue(Tag, Entry.getVal());
     OS << "\n";
   }
@@ -1994,6 +2004,7 @@
 namespace {
 
 template <> void ELFDumper<ELF32LE>::printAttributes() {
+  const ELFFile<ELF32LE> *Obj = ObjF->getELFFile();
   if (Obj->getHeader()->e_machine != EM_ARM) {
     W.startLine() << "Attributes not implemented.\n";
     return;
@@ -2279,6 +2290,7 @@
 }
 
 template <class ELFT> void ELFDumper<ELFT>::printMipsPLTGOT() {
+  const ELFFile<ELFT> *Obj = ObjF->getELFFile();
   if (Obj->getHeader()->e_machine != EM_MIPS)
     reportError("MIPS PLT GOT is available for MIPS targets only");
 
@@ -2363,6 +2375,7 @@
 }
 
 template <class ELFT> void ELFDumper<ELFT>::printMipsABIFlags() {
+  const ELFFile<ELFT> *Obj = ObjF->getELFFile();
   const Elf_Shdr *Shdr = findSectionByName(*Obj, ".MIPS.abiflags");
   if (!Shdr) {
     W.startLine() << "There is no .MIPS.abiflags section in the file.\n";
@@ -2408,6 +2421,7 @@
 }
 
 template <class ELFT> void ELFDumper<ELFT>::printMipsReginfo() {
+  const ELFFile<ELFT> *Obj = ObjF->getELFFile();
   const Elf_Shdr *Shdr = findSectionByName(*Obj, ".reginfo");
   if (!Shdr) {
     W.startLine() << "There is no .reginfo section in the file.\n";
@@ -2425,6 +2439,7 @@
 }
 
 template <class ELFT> void ELFDumper<ELFT>::printMipsOptions() {
+  const ELFFile<ELFT> *Obj = ObjF->getELFFile();
   const Elf_Shdr *Shdr = findSectionByName(*Obj, ".MIPS.options");
   if (!Shdr) {
     W.startLine() << "There is no .MIPS.options section in the file.\n";
@@ -2454,6 +2469,7 @@
 }
 
 template <class ELFT> void ELFDumper<ELFT>::printStackMap() const {
+  const ELFFile<ELFT> *Obj = ObjF->getELFFile();
   const Elf_Shdr *StackMapSection = nullptr;
   for (const auto &Sec : unwrapOrError(Obj->sections())) {
     StringRef Name = unwrapOrError(Obj->getSectionName(&Sec));
@@ -2474,11 +2490,11 @@
 }
 
 template <class ELFT> void ELFDumper<ELFT>::printGroupSections() {
-  ELFDumperStyle->printGroupSections(Obj);
+  ELFDumperStyle->printGroupSections(ObjF->getELFFile());
 }
 
 template <class ELFT> void ELFDumper<ELFT>::printAddrsig() {
-  ELFDumperStyle->printAddrsig(Obj);
+  ELFDumperStyle->printAddrsig(ObjF->getELFFile());
 }
 
 static inline void printFields(formatted_raw_ostream &OS, StringRef Str1,
diff --git a/tools/llvm-readobj/llvm-readobj.cpp b/tools/llvm-readobj/llvm-readobj.cpp
index ffc5845..81ce7a5 100644
--- a/tools/llvm-readobj/llvm-readobj.cpp
+++ b/tools/llvm-readobj/llvm-readobj.cpp
@@ -92,26 +92,20 @@
                                 cl::desc("Alias for --section-headers"),
                                 cl::aliasopt(SectionHeaders), cl::NotHidden);
 
-  // -section-relocations, -sr
+  // -section-relocations
+  // Also -sr in llvm-readobj mode.
   cl::opt<bool> SectionRelocations("section-relocations",
     cl::desc("Display relocations for each section shown."));
-  cl::alias SectionRelocationsShort("sr",
-    cl::desc("Alias for --section-relocations"),
-    cl::aliasopt(SectionRelocations));
 
-  // -section-symbols, -st
+  // -section-symbols
+  // Also -st in llvm-readobj mode.
   cl::opt<bool> SectionSymbols("section-symbols",
     cl::desc("Display symbols for each section shown."));
-  cl::alias SectionSymbolsShort("st",
-    cl::desc("Alias for --section-symbols"),
-    cl::aliasopt(SectionSymbols));
 
-  // -section-data, -sd
+  // -section-data
+  // Also -sd in llvm-readobj mode.
   cl::opt<bool> SectionData("section-data",
     cl::desc("Display section data for each section shown."));
-  cl::alias SectionDataShort("sd",
-    cl::desc("Alias for --section-data"),
-    cl::aliasopt(SectionData));
 
   // -relocations, -relocs, -r
   cl::opt<bool> Relocations("relocations",
@@ -136,12 +130,10 @@
   cl::alias SymbolsGNU("syms", cl::desc("Alias for --symbols"),
                        cl::aliasopt(Symbols));
 
-  // -dyn-symbols, -dyn-syms, -dt
+  // -dyn-symbols, -dyn-syms
+  // Also -dt in llvm-readobj mode.
   cl::opt<bool> DynamicSymbols("dyn-symbols",
     cl::desc("Display the dynamic symbol table"));
-  cl::alias DynamicSymbolsShort("dt",
-    cl::desc("Alias for --dyn-symbols"),
-    cl::aliasopt(DynamicSymbols));
   cl::alias DynSymsGNU("dyn-syms", cl::desc("Alias for --dyn-symbols"),
                        cl::aliasopt(DynamicSymbols));
 
@@ -636,13 +628,36 @@
   // --section-details (not implemented yet).
   static cl::alias SymbolsShort("t", cl::desc("Alias for --symbols"),
                                 cl::aliasopt(opts::Symbols), cl::NotHidden);
+
+  // The following two-letter aliases are only provided for readobj, as readelf
+  // allows single-letter args to be grouped together.
+  static cl::alias SectionRelocationsShort(
+      "sr", cl::desc("Alias for --section-relocations"),
+      cl::aliasopt(opts::SectionRelocations));
+  static cl::alias SectionDataShort("sd", cl::desc("Alias for --section-data"),
+                                    cl::aliasopt(opts::SectionData));
+  static cl::alias SectionSymbolsShort("st",
+                                       cl::desc("Alias for --section-symbols"),
+                                       cl::aliasopt(opts::SectionSymbols));
+  static cl::alias DynamicSymbolsShort("dt",
+                                       cl::desc("Alias for --dyn-symbols"),
+                                       cl::aliasopt(opts::DynamicSymbols));
 }
 
 /// Registers aliases that should only be allowed by readelf.
 static void registerReadelfAliases() {
   // -s is here because for readobj it means --sections.
   static cl::alias SymbolsShort("s", cl::desc("Alias for --symbols"),
-                                cl::aliasopt(opts::Symbols), cl::NotHidden);
+                                cl::aliasopt(opts::Symbols), cl::NotHidden,
+                                cl::Grouping);
+
+  // Allow all single letter flags to be grouped together.
+  for (auto &OptEntry : cl::getRegisteredOptions()) {
+    StringRef ArgName = OptEntry.getKey();
+    cl::Option *Option = OptEntry.getValue();
+    if (ArgName.size() == 1)
+      Option->setFormattingFlag(cl::Grouping);
+  }
 }
 
 int main(int argc, const char *argv[]) {
@@ -681,7 +696,7 @@
   }
 
   // Default to stdin if no filename is specified.
-  if (opts::InputFilenames.size() == 0)
+  if (opts::InputFilenames.empty())
     opts::InputFilenames.push_back("-");
 
   llvm::for_each(opts::InputFilenames, dumpInput);
diff --git a/tools/llvm-size/llvm-size.cpp b/tools/llvm-size/llvm-size.cpp
index be1e5bb..5d63844 100644
--- a/tools/llvm-size/llvm-size.cpp
+++ b/tools/llvm-size/llvm-size.cpp
@@ -578,7 +578,7 @@
   } else if (MachOUniversalBinary *UB =
                  dyn_cast<MachOUniversalBinary>(&Bin)) {
     // If we have a list of architecture flags specified dump only those.
-    if (!ArchAll && ArchFlags.size() != 0) {
+    if (!ArchAll && !ArchFlags.empty()) {
       // Look for a slice in the universal binary that matches each ArchFlag.
       bool ArchFound;
       for (unsigned i = 0; i < ArchFlags.size(); ++i) {
diff --git a/tools/llvm-stress/llvm-stress.cpp b/tools/llvm-stress/llvm-stress.cpp
index d8ec112..c29b7a7 100644
--- a/tools/llvm-stress/llvm-stress.cpp
+++ b/tools/llvm-stress/llvm-stress.cpp
@@ -356,8 +356,8 @@
   void Act() override {
     // Try to use predefined pointers. If non-exist, use undef pointer value;
     Value *Ptr = getRandomPointerValue();
-    Type  *Tp = Ptr->getType();
-    Value *Val = getRandomValue(Tp->getContainedType(0));
+    PointerType *Tp = cast<PointerType>(Ptr->getType());
+    Value *Val = getRandomValue(Tp->getElementType());
     Type  *ValTy = Val->getType();
 
     // Do not store vectors of i1s because they are unsupported
diff --git a/tools/llvm-symbolizer/llvm-symbolizer.cpp b/tools/llvm-symbolizer/llvm-symbolizer.cpp
index 6d40a54..9d19f99 100644
--- a/tools/llvm-symbolizer/llvm-symbolizer.cpp
+++ b/tools/llvm-symbolizer/llvm-symbolizer.cpp
@@ -55,17 +55,29 @@
     ClPrintInlining("inlining", cl::init(true),
                     cl::desc("Print all inlined frames for a given address"));
 
+// -demangle, -C
 static cl::opt<bool>
 ClDemangle("demangle", cl::init(true), cl::desc("Demangle function names"));
+static cl::alias
+ClDemangleShort("C", cl::desc("Alias for -demangle"),
+                cl::NotHidden, cl::aliasopt(ClDemangle));
 
 static cl::opt<std::string> ClDefaultArch("default-arch", cl::init(""),
                                           cl::desc("Default architecture "
                                                    "(for multi-arch objects)"));
 
+// -obj, -exe, -e
 static cl::opt<std::string>
 ClBinaryName("obj", cl::init(""),
              cl::desc("Path to object file to be symbolized (if not provided, "
                       "object file should be specified for each input line)"));
+static cl::alias
+ClBinaryNameAliasExe("exe", cl::desc("Alias for -obj"),
+                     cl::NotHidden, cl::aliasopt(ClBinaryName));
+static cl::alias
+ClBinaryNameAliasE("e", cl::desc("Alias for -obj"),
+                   cl::NotHidden, cl::aliasopt(ClBinaryName));
+
 
 static cl::opt<std::string>
     ClDwpName("dwp", cl::init(""),
@@ -75,13 +87,25 @@
 ClDsymHint("dsym-hint", cl::ZeroOrMore,
            cl::desc("Path to .dSYM bundles to search for debug info for the "
                     "object files"));
-static cl::opt<bool>
-    ClPrintAddress("print-address", cl::init(false),
-                   cl::desc("Show address before line information"));
 
+// -print-address, -addresses, -a
+static cl::opt<bool>
+ClPrintAddress("print-address", cl::init(false),
+               cl::desc("Show address before line information"));
+static cl::alias
+ClPrintAddressAliasAddresses("addresses", cl::desc("Alias for -print-address"),
+                             cl::NotHidden, cl::aliasopt(ClPrintAddress));
+static cl::alias
+ClPrintAddressAliasA("a", cl::desc("Alias for -print-address"),
+                     cl::NotHidden, cl::aliasopt(ClPrintAddress));
+
+// -pretty-print, -p
 static cl::opt<bool>
     ClPrettyPrint("pretty-print", cl::init(false),
                   cl::desc("Make the output more human friendly"));
+static cl::alias ClPrettyPrintShort("p", cl::desc("Alias for -pretty-print"),
+                                    cl::NotHidden,
+                                    cl::aliasopt(ClPrettyPrint));
 
 static cl::opt<int> ClPrintSourceContextLines(
     "print-source-context-lines", cl::init(0),
@@ -90,6 +114,10 @@
 static cl::opt<bool> ClVerbose("verbose", cl::init(false),
                                cl::desc("Print verbose line info"));
 
+static cl::list<std::string> ClInputAddresses(cl::Positional,
+                                              cl::desc("<input addresses>..."),
+                                              cl::ZeroOrMore);
+
 template<typename T>
 static bool error(Expected<T> &ResOrErr) {
   if (ResOrErr)
@@ -137,6 +165,38 @@
   return !StringRef(pos, offset_length).getAsInteger(0, ModuleOffset);
 }
 
+static void symbolizeInput(StringRef InputString, LLVMSymbolizer &Symbolizer,
+                           DIPrinter &Printer) {
+  bool IsData = false;
+  std::string ModuleName;
+  uint64_t ModuleOffset = 0;
+  if (!parseCommand(StringRef(InputString), IsData, ModuleName, ModuleOffset)) {
+    outs() << InputString;
+    return;
+  }
+
+  if (ClPrintAddress) {
+    outs() << "0x";
+    outs().write_hex(ModuleOffset);
+    StringRef Delimiter = ClPrettyPrint ? ": " : "\n";
+    outs() << Delimiter;
+  }
+  if (IsData) {
+    auto ResOrErr = Symbolizer.symbolizeData(ModuleName, ModuleOffset);
+    Printer << (error(ResOrErr) ? DIGlobal() : ResOrErr.get());
+  } else if (ClPrintInlining) {
+    auto ResOrErr =
+        Symbolizer.symbolizeInlinedCode(ModuleName, ModuleOffset, ClDwpName);
+    Printer << (error(ResOrErr) ? DIInliningInfo() : ResOrErr.get());
+  } else {
+    auto ResOrErr =
+        Symbolizer.symbolizeCode(ModuleName, ModuleOffset, ClDwpName);
+    Printer << (error(ResOrErr) ? DILineInfo() : ResOrErr.get());
+  }
+  outs() << "\n";
+  outs().flush();
+}
+
 int main(int argc, char **argv) {
   InitLLVM X(argc, argv);
 
@@ -159,43 +219,15 @@
   DIPrinter Printer(outs(), ClPrintFunctions != FunctionNameKind::None,
                     ClPrettyPrint, ClPrintSourceContextLines, ClVerbose);
 
-  const int kMaxInputStringLength = 1024;
-  char InputString[kMaxInputStringLength];
+  if (ClInputAddresses.empty()) {
+    const int kMaxInputStringLength = 1024;
+    char InputString[kMaxInputStringLength];
 
-  while (true) {
-    if (!fgets(InputString, sizeof(InputString), stdin))
-      break;
-
-    bool IsData = false;
-    std::string ModuleName;
-    uint64_t ModuleOffset = 0;
-    if (!parseCommand(StringRef(InputString), IsData, ModuleName,
-                      ModuleOffset)) {
-      outs() << InputString;
-      continue;
-    }
-
-    if (ClPrintAddress) {
-      outs() << "0x";
-      outs().write_hex(ModuleOffset);
-      StringRef Delimiter = ClPrettyPrint ? ": " : "\n";
-      outs() << Delimiter;
-    }
-    if (IsData) {
-      auto ResOrErr = Symbolizer.symbolizeData(ModuleName, ModuleOffset);
-      Printer << (error(ResOrErr) ? DIGlobal() : ResOrErr.get());
-    } else if (ClPrintInlining) {
-      auto ResOrErr =
-          Symbolizer.symbolizeInlinedCode(ModuleName, ModuleOffset, ClDwpName);
-      Printer << (error(ResOrErr) ? DIInliningInfo()
-                                             : ResOrErr.get());
-    } else {
-      auto ResOrErr =
-          Symbolizer.symbolizeCode(ModuleName, ModuleOffset, ClDwpName);
-      Printer << (error(ResOrErr) ? DILineInfo() : ResOrErr.get());
-    }
-    outs() << "\n";
-    outs().flush();
+    while (fgets(InputString, sizeof(InputString), stdin))
+      symbolizeInput(InputString, Symbolizer, Printer);
+  } else {
+    for (StringRef Address : ClInputAddresses)
+      symbolizeInput(Address, Symbolizer, Printer);
   }
 
   return 0;
diff --git a/tools/obj2yaml/coff2yaml.cpp b/tools/obj2yaml/coff2yaml.cpp
index 6835dcf..3b44780 100644
--- a/tools/obj2yaml/coff2yaml.cpp
+++ b/tools/obj2yaml/coff2yaml.cpp
@@ -8,6 +8,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "obj2yaml.h"
+#include "llvm/ADT/StringMap.h"
 #include "llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h"
 #include "llvm/DebugInfo/CodeView/DebugStringTableSubsection.h"
 #include "llvm/DebugInfo/CodeView/StringsAndChecksums.h"
@@ -142,6 +143,18 @@
   codeview::StringsAndChecksumsRef SC;
   initializeFileAndStringTable(Obj, SC);
 
+  StringMap<bool> SymbolUnique;
+  for (const auto &S : Obj.symbols()) {
+    object::COFFSymbolRef Symbol = Obj.getCOFFSymbol(S);
+    StringRef Name;
+    Obj.getSymbolName(Symbol, Name);
+    StringMap<bool>::iterator It;
+    bool Inserted;
+    std::tie(It, Inserted) = SymbolUnique.insert(std::make_pair(Name, true));
+    if (!Inserted)
+      It->second = false;
+  }
+
   for (const auto &ObjSection : Obj.sections()) {
     const object::coff_section *COFFSection = Obj.getCOFFSection(ObjSection);
     COFFYAML::Section NewYAMLSection;
@@ -192,7 +205,10 @@
        OS.flush();
        report_fatal_error(Buf);
       }
-      Rel.SymbolName = *SymbolNameOrErr;
+      if (SymbolUnique.lookup(*SymbolNameOrErr))
+        Rel.SymbolName = *SymbolNameOrErr;
+      else
+        Rel.SymbolTableIndex = reloc->SymbolTableIndex;
       Rel.VirtualAddress = reloc->VirtualAddress;
       Rel.Type = reloc->Type;
       Relocations.push_back(Rel);
diff --git a/tools/obj2yaml/elf2yaml.cpp b/tools/obj2yaml/elf2yaml.cpp
index dea4d1b..48ecee0 100644
--- a/tools/obj2yaml/elf2yaml.cpp
+++ b/tools/obj2yaml/elf2yaml.cpp
@@ -114,6 +114,7 @@
   Y->Header.Class = ELFYAML::ELF_ELFCLASS(Obj.getHeader()->getFileClass());
   Y->Header.Data = ELFYAML::ELF_ELFDATA(Obj.getHeader()->getDataEncoding());
   Y->Header.OSABI = Obj.getHeader()->e_ident[ELF::EI_OSABI];
+  Y->Header.ABIVersion = Obj.getHeader()->e_ident[ELF::EI_ABIVERSION];
   Y->Header.Type = Obj.getHeader()->e_type;
   Y->Header.Machine = Obj.getHeader()->e_machine;
   Y->Header.Flags = Obj.getHeader()->e_flags;
diff --git a/tools/opt-viewer/opt-diff.py b/tools/opt-viewer/opt-diff.py
index f3bfd18..36e81a5 100755
--- a/tools/opt-viewer/opt-diff.py
+++ b/tools/opt-viewer/opt-diff.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python
 
 from __future__ import print_function
 
diff --git a/tools/opt-viewer/opt-stats.py b/tools/opt-viewer/opt-stats.py
index 03de23b..f4ee3a7 100755
--- a/tools/opt-viewer/opt-stats.py
+++ b/tools/opt-viewer/opt-stats.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python
 
 from __future__ import print_function
 
diff --git a/tools/opt-viewer/opt-viewer.py b/tools/opt-viewer/opt-viewer.py
index 4887043..f658250 100755
--- a/tools/opt-viewer/opt-viewer.py
+++ b/tools/opt-viewer/opt-viewer.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python
 
 from __future__ import print_function
 
@@ -72,7 +72,10 @@
         file_text = stream.read()
 
         if self.no_highlight:
-            html_highlighted = file_text.decode('utf-8')
+            if sys.version_info.major >= 3:
+                html_highlighted = file_text
+            else:
+                html_highlighted = file_text.decode('utf-8')
         else:
             html_highlighted = highlight(
             file_text,
diff --git a/tools/opt-viewer/optpmap.py b/tools/opt-viewer/optpmap.py
index db6b079..ff3e683f 100644
--- a/tools/opt-viewer/optpmap.py
+++ b/tools/opt-viewer/optpmap.py
@@ -42,7 +42,7 @@
 
     func_and_args = [(func, arg, should_print_progress,) for arg in iterable]
     if processes == 1:
-        result = map(_wrapped_func, func_and_args, *args, **kwargs)
+        result = list(map(_wrapped_func, func_and_args, *args, **kwargs))
     else:
         pool = multiprocessing.Pool(initializer=_init,
                                     initargs=(_current, _total,),
diff --git a/tools/opt-viewer/optrecord.py b/tools/opt-viewer/optrecord.py
index 8cf22ee..0193d25 100644
--- a/tools/opt-viewer/optrecord.py
+++ b/tools/opt-viewer/optrecord.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python
 
 from __future__ import print_function
 
diff --git a/tools/opt/opt.cpp b/tools/opt/opt.cpp
index 2666039..a4967a2 100644
--- a/tools/opt/opt.cpp
+++ b/tools/opt/opt.cpp
@@ -103,6 +103,10 @@
     OutputThinLTOBC("thinlto-bc",
                     cl::desc("Write output as ThinLTO-ready bitcode"));
 
+static cl::opt<bool>
+    SplitLTOUnit("thinlto-split-lto-unit",
+                 cl::desc("Enable splitting of a ThinLTO LTOUnit"));
+
 static cl::opt<std::string> ThinLinkBitcodeFile(
     "thin-link-bitcode-file", cl::value_desc("filename"),
     cl::desc(
@@ -596,6 +600,9 @@
     if (CheckBitcodeOutputToConsole(Out->os(), !Quiet))
       NoOutput = true;
 
+  if (OutputThinLTOBC)
+    M->addModuleFlag(Module::Error, "EnableSplitLTOUnit", SplitLTOUnit);
+
   if (PassPipeline.getNumOccurrences() > 0) {
     OutputKind OK = OK_NoOutput;
     if (!NoOutput)
diff --git a/tools/sancov/coverage-report-server.py b/tools/sancov/coverage-report-server.py
index a2e161d..2fb70ee 100755
--- a/tools/sancov/coverage-report-server.py
+++ b/tools/sancov/coverage-report-server.py
@@ -22,6 +22,8 @@
     --host host_name - host name to bind server to (127.0.0.1)
 '''
 
+from __future__ import print_function
+
 import argparse
 import http.server
 import json
diff --git a/tools/yaml2obj/yaml2coff.cpp b/tools/yaml2obj/yaml2coff.cpp
index f9d255e..4fe9ab0 100644
--- a/tools/yaml2obj/yaml2coff.cpp
+++ b/tools/yaml2obj/yaml2coff.cpp
@@ -24,6 +24,7 @@
 #include "llvm/Support/Endian.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/WithColor.h"
 #include "llvm/Support/raw_ostream.h"
 #include <vector>
 
@@ -520,7 +521,15 @@
     assert(S.Header.SizeOfRawData >= S.SectionData.binary_size());
     OS << num_zeros(S.Header.SizeOfRawData - S.SectionData.binary_size());
     for (const COFFYAML::Relocation &R : S.Relocations) {
-      uint32_t SymbolTableIndex = SymbolTableIndexMap[R.SymbolName];
+      uint32_t SymbolTableIndex;
+      if (R.SymbolTableIndex) {
+        if (!R.SymbolName.empty())
+          WithColor::error()
+              << "Both SymbolName and SymbolTableIndex specified\n";
+        SymbolTableIndex = *R.SymbolTableIndex;
+      } else {
+        SymbolTableIndex = SymbolTableIndexMap[R.SymbolName];
+      }
       OS << binary_le(R.VirtualAddress)
          << binary_le(SymbolTableIndex)
          << binary_le(R.Type);
diff --git a/tools/yaml2obj/yaml2elf.cpp b/tools/yaml2obj/yaml2elf.cpp
index 672d154..5e23bc6 100644
--- a/tools/yaml2obj/yaml2elf.cpp
+++ b/tools/yaml2obj/yaml2elf.cpp
@@ -196,7 +196,7 @@
   Header.e_ident[EI_DATA] = IsLittleEndian ? ELFDATA2LSB : ELFDATA2MSB;
   Header.e_ident[EI_VERSION] = EV_CURRENT;
   Header.e_ident[EI_OSABI] = Doc.Header.OSABI;
-  Header.e_ident[EI_ABIVERSION] = 0;
+  Header.e_ident[EI_ABIVERSION] = Doc.Header.ABIVersion;
   Header.e_type = Doc.Header.Type;
   Header.e_machine = Doc.Header.Machine;
   Header.e_version = EV_CURRENT;
diff --git a/tools/yaml2obj/yaml2wasm.cpp b/tools/yaml2obj/yaml2wasm.cpp
index 7fd0bb0..2d3e3b7 100644
--- a/tools/yaml2obj/yaml2wasm.cpp
+++ b/tools/yaml2obj/yaml2wasm.cpp
@@ -13,6 +13,7 @@
 //===----------------------------------------------------------------------===//
 //
 
+#include "llvm/Object/Wasm.h"
 #include "llvm/ObjectYAML/ObjectYAML.h"
 #include "llvm/Support/Endian.h"
 #include "llvm/Support/LEB128.h"
@@ -104,7 +105,7 @@
   case wasm::WASM_OPCODE_F64_CONST:
     writeUint64(OS, InitExpr.Value.Float64);
     break;
-  case wasm::WASM_OPCODE_GET_GLOBAL:
+  case wasm::WASM_OPCODE_GLOBAL_GET:
     encodeULEB128(InitExpr.Value.Global, OS);
     break;
   default:
@@ -516,7 +517,15 @@
   writeUint32(OS, Obj.Header.Version);
 
   // Write each section
+  llvm::object::WasmSectionOrderChecker Checker;
   for (const std::unique_ptr<WasmYAML::Section> &Sec : Obj.Sections) {
+    StringRef SecName = "";
+    if (auto S = dyn_cast<WasmYAML::CustomSection>(Sec.get()))
+      SecName = S->Name;
+    if (!Checker.isValidSectionOrder(Sec->Type, SecName)) {
+      errs() << "Out of order section type: " << Sec->Type << "\n";
+      return 1;
+    }
     encodeULEB128(Sec->Type, OS);
     std::string OutString;
     raw_string_ostream StringStream(OutString);
diff --git a/unittests/ADT/IntervalMapTest.cpp b/unittests/ADT/IntervalMapTest.cpp
index 11f1375..513c063 100644
--- a/unittests/ADT/IntervalMapTest.cpp
+++ b/unittests/ADT/IntervalMapTest.cpp
@@ -611,6 +611,50 @@
 
 }
 
+TEST(IntervalMapTest, Overlaps) {
+  UUMap::Allocator allocator;
+  UUMap map(allocator);
+  map.insert(10, 20, 0);
+  map.insert(30, 40, 0);
+  map.insert(50, 60, 0);
+
+  EXPECT_FALSE(map.overlaps(0, 9));
+  EXPECT_TRUE(map.overlaps(0, 10));
+  EXPECT_TRUE(map.overlaps(0, 15));
+  EXPECT_TRUE(map.overlaps(0, 25));
+  EXPECT_TRUE(map.overlaps(0, 45));
+  EXPECT_TRUE(map.overlaps(10, 45));
+  EXPECT_TRUE(map.overlaps(30, 45));
+  EXPECT_TRUE(map.overlaps(35, 36));
+  EXPECT_TRUE(map.overlaps(40, 45));
+  EXPECT_FALSE(map.overlaps(45, 45));
+  EXPECT_TRUE(map.overlaps(60, 60));
+  EXPECT_TRUE(map.overlaps(60, 66));
+  EXPECT_FALSE(map.overlaps(66, 66));
+}
+
+TEST(IntervalMapTest, OverlapsHalfOpen) {
+  UUHalfOpenMap::Allocator allocator;
+  UUHalfOpenMap map(allocator);
+  map.insert(10, 20, 0);
+  map.insert(30, 40, 0);
+  map.insert(50, 60, 0);
+
+  EXPECT_FALSE(map.overlaps(0, 9));
+  EXPECT_FALSE(map.overlaps(0, 10));
+  EXPECT_TRUE(map.overlaps(0, 15));
+  EXPECT_TRUE(map.overlaps(0, 25));
+  EXPECT_TRUE(map.overlaps(0, 45));
+  EXPECT_TRUE(map.overlaps(10, 45));
+  EXPECT_TRUE(map.overlaps(30, 45));
+  EXPECT_TRUE(map.overlaps(35, 36));
+  EXPECT_FALSE(map.overlaps(40, 45));
+  EXPECT_FALSE(map.overlaps(45, 46));
+  EXPECT_FALSE(map.overlaps(60, 61));
+  EXPECT_FALSE(map.overlaps(60, 66));
+  EXPECT_FALSE(map.overlaps(66, 67));
+}
+
 TEST(IntervalMapOverlapsTest, SmallMaps) {
   typedef IntervalMapOverlaps<UUMap,UUMap> UUOverlaps;
   UUMap::Allocator allocator;
diff --git a/unittests/ADT/TripleTest.cpp b/unittests/ADT/TripleTest.cpp
index efe859f..bc7f932 100644
--- a/unittests/ADT/TripleTest.cpp
+++ b/unittests/ADT/TripleTest.cpp
@@ -253,12 +253,24 @@
   EXPECT_EQ(Triple::UnknownOS, T.getOS());
   EXPECT_EQ(Triple::UnknownEnvironment, T.getEnvironment());
 
+  T = Triple("wasm32-unknown-wasi-musl");
+  EXPECT_EQ(Triple::wasm32, T.getArch());
+  EXPECT_EQ(Triple::UnknownVendor, T.getVendor());
+  EXPECT_EQ(Triple::WASI, T.getOS());
+  EXPECT_EQ(Triple::Musl, T.getEnvironment());
+
   T = Triple("wasm64-unknown-unknown");
   EXPECT_EQ(Triple::wasm64, T.getArch());
   EXPECT_EQ(Triple::UnknownVendor, T.getVendor());
   EXPECT_EQ(Triple::UnknownOS, T.getOS());
   EXPECT_EQ(Triple::UnknownEnvironment, T.getEnvironment());
 
+  T = Triple("wasm64-unknown-wasi-musl");
+  EXPECT_EQ(Triple::wasm64, T.getArch());
+  EXPECT_EQ(Triple::UnknownVendor, T.getVendor());
+  EXPECT_EQ(Triple::WASI, T.getOS());
+  EXPECT_EQ(Triple::Musl, T.getEnvironment());
+
   T = Triple("avr-unknown-unknown");
   EXPECT_EQ(Triple::avr, T.getArch());
   EXPECT_EQ(Triple::UnknownVendor, T.getVendor());
@@ -1235,11 +1247,17 @@
 
   EXPECT_EQ(Triple::Wasm, Triple("wasm32-unknown-unknown").getObjectFormat());
   EXPECT_EQ(Triple::Wasm, Triple("wasm64-unknown-unknown").getObjectFormat());
+  EXPECT_EQ(Triple::Wasm, Triple("wasm32-unknown-wasi-musl").getObjectFormat());
+  EXPECT_EQ(Triple::Wasm, Triple("wasm64-unknown-wasi-musl").getObjectFormat());
 
   EXPECT_EQ(Triple::Wasm,
             Triple("wasm32-unknown-unknown-wasm").getObjectFormat());
   EXPECT_EQ(Triple::Wasm,
             Triple("wasm64-unknown-unknown-wasm").getObjectFormat());
+  EXPECT_EQ(Triple::Wasm,
+            Triple("wasm32-unknown-wasi-musl-wasm").getObjectFormat());
+  EXPECT_EQ(Triple::Wasm,
+            Triple("wasm64-unknown-wasi-musl-wasm").getObjectFormat());
 
   Triple MSVCNormalized(Triple::normalize("i686-pc-windows-msvc-elf"));
   EXPECT_EQ(Triple::ELF, MSVCNormalized.getObjectFormat());
diff --git a/unittests/Analysis/CMakeLists.txt b/unittests/Analysis/CMakeLists.txt
index 7d4fd33..563b48d 100644
--- a/unittests/Analysis/CMakeLists.txt
+++ b/unittests/Analysis/CMakeLists.txt
@@ -12,6 +12,7 @@
   BlockFrequencyInfoTest.cpp
   BranchProbabilityInfoTest.cpp
   CallGraphTest.cpp
+  CaptureTrackingTest.cpp
   CFGTest.cpp
   CGSCCPassManagerTest.cpp
   DivergenceAnalysisTest.cpp
diff --git a/unittests/Analysis/CaptureTrackingTest.cpp b/unittests/Analysis/CaptureTrackingTest.cpp
new file mode 100644
index 0000000..ee6e010
--- /dev/null
+++ b/unittests/Analysis/CaptureTrackingTest.cpp
@@ -0,0 +1,78 @@
+//=======- CaptureTrackingTest.cpp - Unit test for the Capture Tracking ---===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/CaptureTracking.h"
+#include "llvm/Analysis/OrderedBasicBlock.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+TEST(CaptureTracking, MaxUsesToExplore) {
+  StringRef Assembly = R"(
+    ; Function Attrs: nounwind ssp uwtable
+    declare void @doesnt_capture(i8* nocapture, i8* nocapture, i8* nocapture, 
+                                 i8* nocapture, i8* nocapture)
+
+    ; %arg has 5 uses
+    define void @test_few_uses(i8* %arg) {
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      ret void
+    }
+
+    ; %arg has 50 uses
+    define void @test_many_uses(i8* %arg) {
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      ret void
+    }
+  )";
+
+  LLVMContext Context;
+  SMDiagnostic Error;
+  auto M = parseAssemblyString(Assembly, Error, Context);
+  ASSERT_TRUE(M) << "Bad assembly?";
+
+  auto Test = [&M](const char *FName, unsigned FalseMaxUsesLimit,
+                   unsigned TrueMaxUsesLimit) {
+    Function *F = M->getFunction(FName);
+    ASSERT_NE(F, nullptr);
+    Value *Arg = &*F->arg_begin();
+    ASSERT_NE(Arg, nullptr);
+    ASSERT_FALSE(PointerMayBeCaptured(Arg, true, true, FalseMaxUsesLimit));
+    ASSERT_TRUE(PointerMayBeCaptured(Arg, true, true, TrueMaxUsesLimit));
+
+    BasicBlock *EntryBB = &F->getEntryBlock();
+    DominatorTree DT(*F);
+    OrderedBasicBlock OBB(EntryBB);
+
+    Instruction *Ret = EntryBB->getTerminator();
+    ASSERT_TRUE(isa<ReturnInst>(Ret));
+    ASSERT_FALSE(PointerMayBeCapturedBefore(Arg, true, true, Ret, &DT, false, 
+                                            &OBB, FalseMaxUsesLimit));
+    ASSERT_TRUE(PointerMayBeCapturedBefore(Arg, true, true, Ret, &DT, false,
+                                           &OBB, TrueMaxUsesLimit));
+  };
+
+  Test("test_few_uses", 6, 4);
+  Test("test_many_uses", 50, 30);
+}
diff --git a/unittests/CodeGen/AArch64SelectionDAGTest.cpp b/unittests/CodeGen/AArch64SelectionDAGTest.cpp
index 03bfdc2..e25249e 100644
--- a/unittests/CodeGen/AArch64SelectionDAGTest.cpp
+++ b/unittests/CodeGen/AArch64SelectionDAGTest.cpp
@@ -89,8 +89,7 @@
   auto InVec = DAG->getConstant(0, Loc, InVecVT);
   auto Op = DAG->getNode(ISD::ZERO_EXTEND_VECTOR_INREG, Loc, OutVecVT, InVec);
   auto DemandedElts = APInt(2, 3);
-  KnownBits Known;
-  DAG->computeKnownBits(Op, Known, DemandedElts);
+  KnownBits Known = DAG->computeKnownBits(Op, DemandedElts);
   EXPECT_TRUE(Known.isZero());
 }
 
@@ -105,8 +104,7 @@
   auto ZeroIdx = DAG->getConstant(0, Loc, IdxVT);
   auto Op = DAG->getNode(ISD::EXTRACT_SUBVECTOR, Loc, VecVT, Vec, ZeroIdx);
   auto DemandedElts = APInt(3, 7);
-  KnownBits Known;
-  DAG->computeKnownBits(Op, Known, DemandedElts);
+  KnownBits Known = DAG->computeKnownBits(Op, DemandedElts);
   EXPECT_TRUE(Known.isZero());
 }
 
diff --git a/unittests/CodeGen/GlobalISel/CMakeLists.txt b/unittests/CodeGen/GlobalISel/CMakeLists.txt
index 60566cb..32bbd56 100644
--- a/unittests/CodeGen/GlobalISel/CMakeLists.txt
+++ b/unittests/CodeGen/GlobalISel/CMakeLists.txt
@@ -13,4 +13,5 @@
         LegalizerInfoTest.cpp
         PatternMatchTest.cpp
         LegalizerHelperTest.cpp
+        CSETest.cpp
         )
diff --git a/unittests/CodeGen/GlobalISel/CSETest.cpp b/unittests/CodeGen/GlobalISel/CSETest.cpp
new file mode 100644
index 0000000..c6bbd8b
--- /dev/null
+++ b/unittests/CodeGen/GlobalISel/CSETest.cpp
@@ -0,0 +1,87 @@
+//===- CSETest.cpp -----------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GISelMITest.h"
+#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
+
+namespace {
+
+TEST_F(GISelMITest, TestCSE) {
+  if (!TM)
+    return;
+
+  LLT s16{LLT::scalar(16)};
+  LLT s32{LLT::scalar(32)};
+  auto MIBInput = B.buildInstr(TargetOpcode::G_TRUNC, {s16}, {Copies[0]});
+  auto MIBInput1 = B.buildInstr(TargetOpcode::G_TRUNC, {s16}, {Copies[1]});
+  auto MIBAdd = B.buildInstr(TargetOpcode::G_ADD, {s16}, {MIBInput, MIBInput});
+  GISelCSEInfo CSEInfo;
+  CSEInfo.setCSEConfig(make_unique<CSEConfig>());
+  CSEInfo.analyze(*MF);
+  B.setCSEInfo(&CSEInfo);
+  CSEMIRBuilder CSEB(B.getState());
+  CSEB.setInsertPt(*EntryMBB, EntryMBB->begin());
+  unsigned AddReg = MRI->createGenericVirtualRegister(s16);
+  auto MIBAddCopy =
+      CSEB.buildInstr(TargetOpcode::G_ADD, {AddReg}, {MIBInput, MIBInput});
+  ASSERT_EQ(MIBAddCopy->getOpcode(), TargetOpcode::COPY);
+  auto MIBAdd2 =
+      CSEB.buildInstr(TargetOpcode::G_ADD, {s16}, {MIBInput, MIBInput});
+  ASSERT_TRUE(&*MIBAdd == &*MIBAdd2);
+  auto MIBAdd4 =
+      CSEB.buildInstr(TargetOpcode::G_ADD, {s16}, {MIBInput, MIBInput});
+  ASSERT_TRUE(&*MIBAdd == &*MIBAdd4);
+  auto MIBAdd5 =
+      CSEB.buildInstr(TargetOpcode::G_ADD, {s16}, {MIBInput, MIBInput1});
+  ASSERT_TRUE(&*MIBAdd != &*MIBAdd5);
+
+  // Try building G_CONSTANTS.
+  auto MIBCst = CSEB.buildConstant(s32, 0);
+  auto MIBCst1 = CSEB.buildConstant(s32, 0);
+  ASSERT_TRUE(&*MIBCst == &*MIBCst1);
+  // Try the CFing of BinaryOps.
+  auto MIBCF1 = CSEB.buildInstr(TargetOpcode::G_ADD, {s32}, {MIBCst, MIBCst});
+  ASSERT_TRUE(&*MIBCF1 == &*MIBCst);
+
+  // Try out building FCONSTANTs.
+  auto MIBFP0 = CSEB.buildFConstant(s32, 1.0);
+  auto MIBFP0_1 = CSEB.buildFConstant(s32, 1.0);
+  ASSERT_TRUE(&*MIBFP0 == &*MIBFP0_1);
+  CSEInfo.print();
+
+  // Check G_UNMERGE_VALUES
+  auto MIBUnmerge = CSEB.buildUnmerge({s32, s32}, Copies[0]);
+  auto MIBUnmerge2 = CSEB.buildUnmerge({s32, s32}, Copies[0]);
+  ASSERT_TRUE(&*MIBUnmerge == &*MIBUnmerge2);
+}
+
+TEST_F(GISelMITest, TestCSEConstantConfig) {
+  if (!TM)
+    return;
+
+  LLT s16{LLT::scalar(16)};
+  auto MIBInput = B.buildInstr(TargetOpcode::G_TRUNC, {s16}, {Copies[0]});
+  auto MIBAdd = B.buildInstr(TargetOpcode::G_ADD, {s16}, {MIBInput, MIBInput});
+  auto MIBZero = B.buildConstant(s16, 0);
+  GISelCSEInfo CSEInfo;
+  CSEInfo.setCSEConfig(make_unique<CSEConfigConstantOnly>());
+  CSEInfo.analyze(*MF);
+  B.setCSEInfo(&CSEInfo);
+  CSEMIRBuilder CSEB(B.getState());
+  CSEB.setInsertPt(*EntryMBB, EntryMBB->begin());
+  auto MIBAdd1 =
+      CSEB.buildInstr(TargetOpcode::G_ADD, {s16}, {MIBInput, MIBInput});
+  // We should CSE constants only. Adds should not be CSEd.
+  ASSERT_TRUE(MIBAdd1->getOpcode() != TargetOpcode::COPY);
+  ASSERT_TRUE(&*MIBAdd1 != &*MIBAdd);
+  // We should CSE constant.
+  auto MIBZeroTmp = CSEB.buildConstant(s16, 0);
+  ASSERT_TRUE(&*MIBZero == &*MIBZeroTmp);
+}
+} // namespace
diff --git a/unittests/CodeGen/GlobalISel/GISelMITest.h b/unittests/CodeGen/GlobalISel/GISelMITest.h
new file mode 100644
index 0000000..91b8e81
--- /dev/null
+++ b/unittests/CodeGen/GlobalISel/GISelMITest.h
@@ -0,0 +1,196 @@
+//===- GISelMITest.h
+//-----------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_UNITTEST_CODEGEN_GLOBALISEL_GISELMI_H
+#define LLVM_UNITTEST_CODEGEN_GLOBALISEL_GISELMI_H
+
+#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
+#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
+#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MIRParser/MIRParser.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/Support/FileCheck.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+using namespace MIPatternMatch;
+
+static inline void initLLVM() {
+  InitializeAllTargets();
+  InitializeAllTargetMCs();
+  InitializeAllAsmPrinters();
+  InitializeAllAsmParsers();
+
+  PassRegistry *Registry = PassRegistry::getPassRegistry();
+  initializeCore(*Registry);
+  initializeCodeGen(*Registry);
+}
+
+/// Create a TargetMachine. As we lack a dedicated always available target for
+/// unittests, we go for "AArch64".
+static std::unique_ptr<LLVMTargetMachine> createTargetMachine() {
+  Triple TargetTriple("aarch64--");
+  std::string Error;
+  const Target *T = TargetRegistry::lookupTarget("", TargetTriple, Error);
+  if (!T)
+    return nullptr;
+
+  TargetOptions Options;
+  return std::unique_ptr<LLVMTargetMachine>(
+      static_cast<LLVMTargetMachine *>(T->createTargetMachine(
+          "AArch64", "", "", Options, None, None, CodeGenOpt::Aggressive)));
+}
+
+static std::unique_ptr<Module> parseMIR(LLVMContext &Context,
+                                        std::unique_ptr<MIRParser> &MIR,
+                                        const TargetMachine &TM,
+                                        StringRef MIRCode, const char *FuncName,
+                                        MachineModuleInfo &MMI) {
+  SMDiagnostic Diagnostic;
+  std::unique_ptr<MemoryBuffer> MBuffer = MemoryBuffer::getMemBuffer(MIRCode);
+  MIR = createMIRParser(std::move(MBuffer), Context);
+  if (!MIR)
+    return nullptr;
+
+  std::unique_ptr<Module> M = MIR->parseIRModule();
+  if (!M)
+    return nullptr;
+
+  M->setDataLayout(TM.createDataLayout());
+
+  if (MIR->parseMachineFunctions(*M, MMI))
+    return nullptr;
+
+  return M;
+}
+
+static std::pair<std::unique_ptr<Module>, std::unique_ptr<MachineModuleInfo>>
+createDummyModule(LLVMContext &Context, const LLVMTargetMachine &TM,
+                  StringRef MIRFunc) {
+  SmallString<512> S;
+  StringRef MIRString = (Twine(R"MIR(
+---
+...
+name: func
+registers:
+  - { id: 0, class: _ }
+  - { id: 1, class: _ }
+  - { id: 2, class: _ }
+  - { id: 3, class: _ }
+body: |
+  bb.1:
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
+    %2(s64) = COPY $x2
+)MIR") + Twine(MIRFunc) + Twine("...\n"))
+                            .toNullTerminatedStringRef(S);
+  std::unique_ptr<MIRParser> MIR;
+  auto MMI = make_unique<MachineModuleInfo>(&TM);
+  std::unique_ptr<Module> M =
+      parseMIR(Context, MIR, TM, MIRString, "func", *MMI);
+  return make_pair(std::move(M), std::move(MMI));
+}
+
+static MachineFunction *getMFFromMMI(const Module *M,
+                                     const MachineModuleInfo *MMI) {
+  Function *F = M->getFunction("func");
+  auto *MF = MMI->getMachineFunction(*F);
+  return MF;
+}
+
+static void collectCopies(SmallVectorImpl<unsigned> &Copies,
+                          MachineFunction *MF) {
+  for (auto &MBB : *MF)
+    for (MachineInstr &MI : MBB) {
+      if (MI.getOpcode() == TargetOpcode::COPY)
+        Copies.push_back(MI.getOperand(0).getReg());
+    }
+}
+
+class GISelMITest : public ::testing::Test {
+protected:
+  GISelMITest() : ::testing::Test() {
+    TM = createTargetMachine();
+    if (!TM)
+      return;
+    ModuleMMIPair = createDummyModule(Context, *TM, "");
+    MF = getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get());
+    collectCopies(Copies, MF);
+    EntryMBB = &*MF->begin();
+    B.setMF(*MF);
+    MRI = &MF->getRegInfo();
+    B.setInsertPt(*EntryMBB, EntryMBB->end());
+  }
+  LLVMContext Context;
+  std::unique_ptr<LLVMTargetMachine> TM;
+  MachineFunction *MF;
+  std::pair<std::unique_ptr<Module>, std::unique_ptr<MachineModuleInfo>>
+      ModuleMMIPair;
+  SmallVector<unsigned, 4> Copies;
+  MachineBasicBlock *EntryMBB;
+  MachineIRBuilder B;
+  MachineRegisterInfo *MRI;
+};
+
+#define DefineLegalizerInfo(Name, SettingUpActionsBlock)                       \
+  class Name##Info : public LegalizerInfo {                                    \
+  public:                                                                      \
+    Name##Info(const TargetSubtargetInfo &ST) {                                \
+      using namespace TargetOpcode;                                            \
+      const LLT s8 = LLT::scalar(8);                                           \
+      (void)s8;                                                                \
+      const LLT s16 = LLT::scalar(16);                                         \
+      (void)s16;                                                               \
+      const LLT s32 = LLT::scalar(32);                                         \
+      (void)s32;                                                               \
+      const LLT s64 = LLT::scalar(64);                                         \
+      (void)s64;                                                               \
+      do                                                                       \
+        SettingUpActionsBlock while (0);                                       \
+      computeTables();                                                         \
+      verify(*ST.getInstrInfo());                                              \
+    }                                                                          \
+  };
+
+static inline bool CheckMachineFunction(const MachineFunction &MF,
+                                        StringRef CheckStr) {
+  SmallString<512> Msg;
+  raw_svector_ostream OS(Msg);
+  MF.print(OS);
+  auto OutputBuf = MemoryBuffer::getMemBuffer(Msg, "Output", false);
+  auto CheckBuf = MemoryBuffer::getMemBuffer(CheckStr, "");
+  SmallString<4096> CheckFileBuffer;
+  FileCheckRequest Req;
+  FileCheck FC(Req);
+  StringRef CheckFileText =
+      FC.CanonicalizeFile(*CheckBuf.get(), CheckFileBuffer);
+  SourceMgr SM;
+  SM.AddNewSourceBuffer(MemoryBuffer::getMemBuffer(CheckFileText, "CheckFile"),
+                        SMLoc());
+  Regex PrefixRE = FC.buildCheckPrefixRegex();
+  std::vector<FileCheckString> CheckStrings;
+  FC.ReadCheckFile(SM, CheckFileText, PrefixRE, CheckStrings);
+  auto OutBuffer = OutputBuf->getBuffer();
+  SM.AddNewSourceBuffer(std::move(OutputBuf), SMLoc());
+  return FC.CheckInput(SM, OutBuffer, CheckStrings);
+}
+#endif
diff --git a/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp b/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp
index ee84aef..9764a0b 100644
--- a/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp
+++ b/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp
@@ -1,4 +1,5 @@
-//===- PatternMatchTest.cpp -----------------------------------------------===//
+//===- LegalizerHelperTest.cpp
+//-----------------------------------------------===//
 //
 //                     The LLVM Compiler Infrastructure
 //
@@ -7,21 +8,21 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "LegalizerHelperTest.h"
+#include "GISelMITest.h"
 
 namespace {
 
 class DummyGISelObserver : public GISelChangeObserver {
 public:
-  void changingInstr(const MachineInstr &MI) override {}
-  void changedInstr(const MachineInstr &MI) override {}
-  void createdInstr(const MachineInstr &MI) override {}
-  void erasingInstr(const MachineInstr &MI) override {}
+  void changingInstr(MachineInstr &MI) override {}
+  void changedInstr(MachineInstr &MI) override {}
+  void createdInstr(MachineInstr &MI) override {}
+  void erasingInstr(MachineInstr &MI) override {}
 };
 
 // Test CTTZ expansion when CTTZ_ZERO_UNDEF is legal or custom,
 // in which case it becomes CTTZ_ZERO_UNDEF with select.
-TEST_F(LegalizerHelperTest, LowerBitCountingCTTZ0) {
+TEST_F(GISelMITest, LowerBitCountingCTTZ0) {
   if (!TM)
     return;
 
@@ -33,7 +34,7 @@
       B.buildInstr(TargetOpcode::G_CTTZ, {LLT::scalar(64)}, {Copies[0]});
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
-  LegalizerHelper Helper(*MF, Info, Observer);
+  LegalizerHelper Helper(*MF, Info, Observer, B);
   // Perform Legalization
   ASSERT_TRUE(Helper.lower(*MIBCTTZ, 0, LLT::scalar(64)) ==
               LegalizerHelper::LegalizeResult::Legalized);
@@ -51,7 +52,7 @@
 }
 
 // CTTZ expansion in terms of CTLZ
-TEST_F(LegalizerHelperTest, LowerBitCountingCTTZ1) {
+TEST_F(GISelMITest, LowerBitCountingCTTZ1) {
   if (!TM)
     return;
 
@@ -63,7 +64,7 @@
       B.buildInstr(TargetOpcode::G_CTTZ, {LLT::scalar(64)}, {Copies[0]});
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
-  LegalizerHelper Helper(*MF, Info, Observer);
+  LegalizerHelper Helper(*MF, Info, Observer, B);
   // Perform Legalization
   ASSERT_TRUE(Helper.lower(*MIBCTTZ, 0, LLT::scalar(64)) ==
               LegalizerHelper::LegalizeResult::Legalized);
@@ -83,7 +84,7 @@
 }
 
 // CTTZ expansion in terms of CTPOP
-TEST_F(LegalizerHelperTest, LowerBitCountingCTTZ2) {
+TEST_F(GISelMITest, LowerBitCountingCTTZ2) {
   if (!TM)
     return;
 
@@ -95,7 +96,7 @@
       B.buildInstr(TargetOpcode::G_CTTZ, {LLT::scalar(64)}, {Copies[0]});
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
-  LegalizerHelper Helper(*MF, Info, Observer);
+  LegalizerHelper Helper(*MF, Info, Observer, B);
   ASSERT_TRUE(Helper.lower(*MIBCTTZ, 0, LLT::scalar(64)) ==
               LegalizerHelper::LegalizeResult::Legalized);
 
@@ -112,7 +113,7 @@
 }
 
 // CTTZ_ZERO_UNDEF expansion in terms of CTTZ
-TEST_F(LegalizerHelperTest, LowerBitCountingCTTZ3) {
+TEST_F(GISelMITest, LowerBitCountingCTTZ3) {
   if (!TM)
     return;
 
@@ -124,7 +125,7 @@
                               {LLT::scalar(64)}, {Copies[0]});
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
-  LegalizerHelper Helper(*MF, Info, Observer);
+  LegalizerHelper Helper(*MF, Info, Observer, B);
   ASSERT_TRUE(Helper.lower(*MIBCTTZ, 0, LLT::scalar(64)) ==
               LegalizerHelper::LegalizeResult::Legalized);
 
@@ -137,7 +138,7 @@
 }
 
 // CTLZ expansion in terms of CTLZ_ZERO_UNDEF
-TEST_F(LegalizerHelperTest, LowerBitCountingCTLZ0) {
+TEST_F(GISelMITest, LowerBitCountingCTLZ0) {
   if (!TM)
     return;
 
@@ -149,7 +150,7 @@
       B.buildInstr(TargetOpcode::G_CTLZ, {LLT::scalar(64)}, {Copies[0]});
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
-  LegalizerHelper Helper(*MF, Info, Observer);
+  LegalizerHelper Helper(*MF, Info, Observer, B);
   ASSERT_TRUE(Helper.lower(*MIBCTLZ, 0, LLT::scalar(64)) ==
               LegalizerHelper::LegalizeResult::Legalized);
 
@@ -166,7 +167,7 @@
 }
 
 // CTLZ expansion in terms of CTLZ_ZERO_UNDEF if the latter is a libcall
-TEST_F(LegalizerHelperTest, LowerBitCountingCTLZLibcall) {
+TEST_F(GISelMITest, LowerBitCountingCTLZLibcall) {
   if (!TM)
     return;
 
@@ -178,7 +179,7 @@
       B.buildInstr(TargetOpcode::G_CTLZ, {LLT::scalar(64)}, {Copies[0]});
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
-  LegalizerHelper Helper(*MF, Info, Observer);
+  LegalizerHelper Helper(*MF, Info, Observer, B);
   ASSERT_TRUE(Helper.lower(*MIBCTLZ, 0, LLT::scalar(64)) ==
               LegalizerHelper::LegalizeResult::Legalized);
 
@@ -195,7 +196,7 @@
 }
 
 // CTLZ expansion
-TEST_F(LegalizerHelperTest, LowerBitCountingCTLZ1) {
+TEST_F(GISelMITest, LowerBitCountingCTLZ1) {
   if (!TM)
     return;
 
@@ -209,7 +210,7 @@
   auto MIBCTLZ = B.buildInstr(TargetOpcode::G_CTLZ, {s8}, {MIBTrunc});
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
-  LegalizerHelper Helper(*MF, Info, Observer);
+  LegalizerHelper Helper(*MF, Info, Observer, B);
   ASSERT_TRUE(Helper.lower(*MIBCTLZ, 0, s8) ==
               LegalizerHelper::LegalizeResult::Legalized);
 
@@ -234,7 +235,7 @@
 }
 
 // CTLZ widening.
-TEST_F(LegalizerHelperTest, WidenBitCountingCTLZ) {
+TEST_F(GISelMITest, WidenBitCountingCTLZ) {
   if (!TM)
     return;
 
@@ -249,7 +250,7 @@
   auto MIBCTLZ = B.buildInstr(TargetOpcode::G_CTLZ, {s8}, {MIBTrunc});
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
-  LegalizerHelper Helper(*MF, Info, Observer);
+  LegalizerHelper Helper(*MF, Info, Observer, B);
   ASSERT_TRUE(Helper.widenScalar(*MIBCTLZ, 0, s16) ==
               LegalizerHelper::LegalizeResult::Legalized);
 
@@ -267,7 +268,7 @@
 }
 
 // CTLZ_ZERO_UNDEF widening.
-TEST_F(LegalizerHelperTest, WidenBitCountingCTLZZeroUndef) {
+TEST_F(GISelMITest, WidenBitCountingCTLZZeroUndef) {
   if (!TM)
     return;
 
@@ -283,7 +284,7 @@
       B.buildInstr(TargetOpcode::G_CTLZ_ZERO_UNDEF, {s8}, {MIBTrunc});
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
-  LegalizerHelper Helper(*MF, Info, Observer);
+  LegalizerHelper Helper(*MF, Info, Observer, B);
   ASSERT_TRUE(Helper.widenScalar(*MIBCTLZ_ZU, 0, s16) ==
               LegalizerHelper::LegalizeResult::Legalized);
 
@@ -301,7 +302,7 @@
 }
 
 // CTPOP widening.
-TEST_F(LegalizerHelperTest, WidenBitCountingCTPOP) {
+TEST_F(GISelMITest, WidenBitCountingCTPOP) {
   if (!TM)
     return;
 
@@ -316,7 +317,7 @@
   auto MIBCTPOP = B.buildInstr(TargetOpcode::G_CTPOP, {s8}, {MIBTrunc});
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
-  LegalizerHelper Helper(*MF, Info, Observer);
+  LegalizerHelper Helper(*MF, Info, Observer, B);
   ASSERT_TRUE(Helper.widenScalar(*MIBCTPOP, 0, s16) ==
               LegalizerHelper::LegalizeResult::Legalized);
 
@@ -332,7 +333,7 @@
 }
 
 // CTTZ_ZERO_UNDEF widening.
-TEST_F(LegalizerHelperTest, WidenBitCountingCTTZ_ZERO_UNDEF) {
+TEST_F(GISelMITest, WidenBitCountingCTTZ_ZERO_UNDEF) {
   if (!TM)
     return;
 
@@ -348,7 +349,7 @@
       B.buildInstr(TargetOpcode::G_CTTZ_ZERO_UNDEF, {s8}, {MIBTrunc});
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
-  LegalizerHelper Helper(*MF, Info, Observer);
+  LegalizerHelper Helper(*MF, Info, Observer, B);
   ASSERT_TRUE(Helper.widenScalar(*MIBCTTZ_ZERO_UNDEF, 0, s16) ==
               LegalizerHelper::LegalizeResult::Legalized);
 
@@ -364,7 +365,7 @@
 }
 
 // CTTZ widening.
-TEST_F(LegalizerHelperTest, WidenBitCountingCTTZ) {
+TEST_F(GISelMITest, WidenBitCountingCTTZ) {
   if (!TM)
     return;
 
@@ -379,7 +380,7 @@
   auto MIBCTTZ = B.buildInstr(TargetOpcode::G_CTTZ, {s8}, {MIBTrunc});
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
-  LegalizerHelper Helper(*MF, Info, Observer);
+  LegalizerHelper Helper(*MF, Info, Observer, B);
   ASSERT_TRUE(Helper.widenScalar(*MIBCTTZ, 0, s16) ==
               LegalizerHelper::LegalizeResult::Legalized);
 
@@ -396,7 +397,7 @@
   ASSERT_TRUE(CheckMachineFunction(*MF, CheckStr));
 }
 // UADDO widening.
-TEST_F(LegalizerHelperTest, WidenUADDO) {
+TEST_F(GISelMITest, WidenUADDO) {
   if (!TM)
     return;
 
@@ -413,7 +414,7 @@
       B.buildInstr(TargetOpcode::G_UADDO, {s8, CarryReg}, {MIBTrunc, MIBTrunc});
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
-  LegalizerHelper Helper(*MF, Info, Observer);
+  LegalizerHelper Helper(*MF, Info, Observer, B);
   ASSERT_TRUE(Helper.widenScalar(*MIBUAddO, 0, s16) ==
               LegalizerHelper::LegalizeResult::Legalized);
 
@@ -433,7 +434,7 @@
 }
 
 // USUBO widening.
-TEST_F(LegalizerHelperTest, WidenUSUBO) {
+TEST_F(GISelMITest, WidenUSUBO) {
   if (!TM)
     return;
 
@@ -450,7 +451,7 @@
       B.buildInstr(TargetOpcode::G_USUBO, {s8, CarryReg}, {MIBTrunc, MIBTrunc});
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
-  LegalizerHelper Helper(*MF, Info, Observer);
+  LegalizerHelper Helper(*MF, Info, Observer, B);
   ASSERT_TRUE(Helper.widenScalar(*MIBUSUBO, 0, s16) ==
               LegalizerHelper::LegalizeResult::Legalized);
 
diff --git a/unittests/CodeGen/GlobalISel/LegalizerHelperTest.h b/unittests/CodeGen/GlobalISel/LegalizerHelperTest.h
deleted file mode 100644
index 0a171a7..0000000
--- a/unittests/CodeGen/GlobalISel/LegalizerHelperTest.h
+++ /dev/null
@@ -1,192 +0,0 @@
-//===- LegalizerHelperTest.h
-//-----------------------------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
-#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
-#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
-#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
-#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
-#include "llvm/CodeGen/GlobalISel/Utils.h"
-#include "llvm/CodeGen/MIRParser/MIRParser.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/TargetFrameLowering.h"
-#include "llvm/CodeGen/TargetInstrInfo.h"
-#include "llvm/CodeGen/TargetLowering.h"
-#include "llvm/CodeGen/TargetSubtargetInfo.h"
-#include "llvm/Support/FileCheck.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/TargetSelect.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include "gtest/gtest.h"
-
-using namespace llvm;
-using namespace MIPatternMatch;
-
-void initLLVM() {
-  InitializeAllTargets();
-  InitializeAllTargetMCs();
-  InitializeAllAsmPrinters();
-  InitializeAllAsmParsers();
-
-  PassRegistry *Registry = PassRegistry::getPassRegistry();
-  initializeCore(*Registry);
-  initializeCodeGen(*Registry);
-}
-
-/// Create a TargetMachine. As we lack a dedicated always available target for
-/// unittests, we go for "AArch64".
-std::unique_ptr<LLVMTargetMachine> createTargetMachine() {
-  Triple TargetTriple("aarch64--");
-  std::string Error;
-  const Target *T = TargetRegistry::lookupTarget("", TargetTriple, Error);
-  if (!T)
-    return nullptr;
-
-  TargetOptions Options;
-  return std::unique_ptr<LLVMTargetMachine>(static_cast<LLVMTargetMachine*>(
-      T->createTargetMachine("AArch64", "", "", Options, None, None,
-                             CodeGenOpt::Aggressive)));
-}
-
-std::unique_ptr<Module> parseMIR(LLVMContext &Context,
-                                 std::unique_ptr<MIRParser> &MIR,
-                                 const TargetMachine &TM, StringRef MIRCode,
-                                 const char *FuncName, MachineModuleInfo &MMI) {
-  SMDiagnostic Diagnostic;
-  std::unique_ptr<MemoryBuffer> MBuffer = MemoryBuffer::getMemBuffer(MIRCode);
-  MIR = createMIRParser(std::move(MBuffer), Context);
-  if (!MIR)
-    return nullptr;
-
-  std::unique_ptr<Module> M = MIR->parseIRModule();
-  if (!M)
-    return nullptr;
-
-  M->setDataLayout(TM.createDataLayout());
-
-  if (MIR->parseMachineFunctions(*M, MMI))
-    return nullptr;
-
-  return M;
-}
-
-std::pair<std::unique_ptr<Module>, std::unique_ptr<MachineModuleInfo>>
-createDummyModule(LLVMContext &Context, const LLVMTargetMachine &TM,
-                  StringRef MIRFunc) {
-  SmallString<512> S;
-  StringRef MIRString = (Twine(R"MIR(
----
-...
-name: func
-registers:
-  - { id: 0, class: _ }
-  - { id: 1, class: _ }
-  - { id: 2, class: _ }
-  - { id: 3, class: _ }
-body: |
-  bb.1:
-    %0(s64) = COPY $x0
-    %1(s64) = COPY $x1
-    %2(s64) = COPY $x2
-)MIR") + Twine(MIRFunc) + Twine("...\n"))
-                            .toNullTerminatedStringRef(S);
-  std::unique_ptr<MIRParser> MIR;
-  auto MMI = make_unique<MachineModuleInfo>(&TM);
-  std::unique_ptr<Module> M =
-      parseMIR(Context, MIR, TM, MIRString, "func", *MMI);
-  return make_pair(std::move(M), std::move(MMI));
-}
-
-static MachineFunction *getMFFromMMI(const Module *M,
-                                     const MachineModuleInfo *MMI) {
-  Function *F = M->getFunction("func");
-  auto *MF = MMI->getMachineFunction(*F);
-  return MF;
-}
-
-static void collectCopies(SmallVectorImpl<unsigned> &Copies,
-                          MachineFunction *MF) {
-  for (auto &MBB : *MF)
-    for (MachineInstr &MI : MBB) {
-      if (MI.getOpcode() == TargetOpcode::COPY)
-        Copies.push_back(MI.getOperand(0).getReg());
-    }
-}
-
-class LegalizerHelperTest : public ::testing::Test {
-protected:
-  LegalizerHelperTest() : ::testing::Test() {
-    TM = createTargetMachine();
-    if (!TM)
-      return;
-    ModuleMMIPair = createDummyModule(Context, *TM, "");
-    MF = getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get());
-    collectCopies(Copies, MF);
-    EntryMBB = &*MF->begin();
-    B.setMF(*MF);
-    MRI = &MF->getRegInfo();
-    B.setInsertPt(*EntryMBB, EntryMBB->end());
-  }
-  LLVMContext Context;
-  std::unique_ptr<LLVMTargetMachine> TM;
-  MachineFunction *MF;
-  std::pair<std::unique_ptr<Module>, std::unique_ptr<MachineModuleInfo>>
-      ModuleMMIPair;
-  SmallVector<unsigned, 4> Copies;
-  MachineBasicBlock *EntryMBB;
-  MachineIRBuilder B;
-  MachineRegisterInfo *MRI;
-};
-
-#define DefineLegalizerInfo(Name, SettingUpActionsBlock)                       \
-  class Name##Info : public LegalizerInfo {                                    \
-  public:                                                                      \
-    Name##Info(const TargetSubtargetInfo &ST) {                                \
-      using namespace TargetOpcode;                                            \
-      const LLT s8 = LLT::scalar(8);                                           \
-      (void)s8;                                                                \
-      const LLT s16 = LLT::scalar(16);                                         \
-      (void)s16;                                                               \
-      const LLT s32 = LLT::scalar(32);                                         \
-      (void)s32;                                                               \
-      const LLT s64 = LLT::scalar(64);                                         \
-      (void)s64;                                                               \
-      do                                                                       \
-        SettingUpActionsBlock while (0);                                       \
-      computeTables();                                                         \
-      verify(*ST.getInstrInfo());                                              \
-    }                                                                          \
-  };
-
-static bool CheckMachineFunction(const MachineFunction &MF,
-                                 StringRef CheckStr) {
-  SmallString<512> Msg;
-  raw_svector_ostream OS(Msg);
-  MF.print(OS);
-  auto OutputBuf = MemoryBuffer::getMemBuffer(Msg, "Output", false);
-  auto CheckBuf = MemoryBuffer::getMemBuffer(CheckStr, "");
-  SmallString<4096> CheckFileBuffer;
-  FileCheckRequest Req;
-  FileCheck FC(Req);
-  StringRef CheckFileText =
-      FC.CanonicalizeFile(*CheckBuf.get(), CheckFileBuffer);
-  SourceMgr SM;
-  SM.AddNewSourceBuffer(MemoryBuffer::getMemBuffer(CheckFileText, "CheckFile"),
-                        SMLoc());
-  Regex PrefixRE = FC.buildCheckPrefixRegex();
-  std::vector<FileCheckString> CheckStrings;
-  FC.ReadCheckFile(SM, CheckFileText, PrefixRE, CheckStrings);
-  auto OutBuffer = OutputBuf->getBuffer();
-  SM.AddNewSourceBuffer(std::move(OutputBuf), SMLoc());
-  return FC.CheckInput(SM, OutBuffer, CheckStrings);
-}
diff --git a/unittests/ExecutionEngine/MCJIT/MCJITMultipleModuleTest.cpp b/unittests/ExecutionEngine/MCJIT/MCJITMultipleModuleTest.cpp
index 1226bba..856ae45 100644
--- a/unittests/ExecutionEngine/MCJIT/MCJITMultipleModuleTest.cpp
+++ b/unittests/ExecutionEngine/MCJIT/MCJITMultipleModuleTest.cpp
@@ -175,7 +175,7 @@
   std::unique_ptr<Module> A, B;
   Function *FA1, *FA2, *FB;
   createTwoModuleExternCase(A, FA1, B, FB);
-  FA2 = insertSimpleCallFunction<int32_t(int32_t, int32_t)>(A.get(), FA1);
+  FA2 = insertSimpleCallFunction(A.get(), FA1);
 
   createJIT(std::move(A));
   TheJIT->addModule(std::move(B));
@@ -203,15 +203,18 @@
   std::unique_ptr<Module> A, B;
   Function *FA, *FB;
   GlobalVariable *GVA, *GVB, *GVC;
+
   A.reset(createEmptyModule("A"));
   B.reset(createEmptyModule("B"));
 
   int32_t initialNum = 7;
   GVA = insertGlobalInt32(A.get(), "GVA", initialNum);
   GVB = insertGlobalInt32(B.get(), "GVB", initialNum);
-  FA = startFunction<int32_t(void)>(A.get(), "FA");
+  FA = startFunction(A.get(),
+                     FunctionType::get(Builder.getInt32Ty(), {}, false), "FA");
   endFunctionWithRet(FA, Builder.CreateLoad(GVA));
-  FB = startFunction<int32_t(void)>(B.get(), "FB");
+  FB = startFunction(B.get(),
+                     FunctionType::get(Builder.getInt32Ty(), {}, false), "FB");
   endFunctionWithRet(FB, Builder.CreateLoad(GVB));
 
   GVC = insertGlobalInt32(B.get(), "GVC", initialNum);
diff --git a/unittests/ExecutionEngine/MCJIT/MCJITTest.cpp b/unittests/ExecutionEngine/MCJIT/MCJITTest.cpp
index e7da75a..8972fb6 100644
--- a/unittests/ExecutionEngine/MCJIT/MCJITTest.cpp
+++ b/unittests/ExecutionEngine/MCJIT/MCJITTest.cpp
@@ -99,8 +99,9 @@
   int32_t initialNum = 7;
   GlobalVariable *GV = insertGlobalInt32(M.get(), "myglob", initialNum);
 
-  Function *ReturnGlobal = startFunction<int32_t(void)>(M.get(),
-                                                        "ReturnGlobal");
+  Function *ReturnGlobal =
+      startFunction(M.get(), FunctionType::get(Builder.getInt32Ty(), {}, false),
+                    "ReturnGlobal");
   Value *ReadGlobal = Builder.CreateLoad(GV);
   endFunctionWithRet(ReturnGlobal, ReadGlobal);
 
@@ -126,7 +127,10 @@
   SKIP_UNSUPPORTED_PLATFORM;
 
   int32_t initialNum = 5;
-  Function *IncrementGlobal = startFunction<int32_t(void)>(M.get(), "IncrementGlobal");
+  Function *IncrementGlobal = startFunction(
+      M.get(),
+      FunctionType::get(Builder.getInt32Ty(), {}, false),
+      "IncrementGlobal");
   GlobalVariable *GV = insertGlobalInt32(M.get(), "my_global", initialNum);
   Value *DerefGV = Builder.CreateLoad(GV);
   Value *AddResult = Builder.CreateAdd(DerefGV,
@@ -161,14 +165,17 @@
   unsigned int numLevels = 23;
   int32_t innerRetVal= 5;
 
-  Function *Inner = startFunction<int32_t(void)>(M.get(), "Inner");
+  Function *Inner = startFunction(
+      M.get(), FunctionType::get(Builder.getInt32Ty(), {}, false), "Inner");
   endFunctionWithRet(Inner, ConstantInt::get(Context, APInt(32, innerRetVal)));
 
   Function *Outer;
   for (unsigned int i = 0; i < numLevels; ++i) {
     std::stringstream funcName;
     funcName << "level_" << i;
-    Outer = startFunction<int32_t(void)>(M.get(), funcName.str());
+    Outer = startFunction(M.get(),
+                          FunctionType::get(Builder.getInt32Ty(), {}, false),
+                          funcName.str());
     Value *innerResult = Builder.CreateCall(Inner, {});
     endFunctionWithRet(Outer, innerResult);
 
@@ -190,7 +197,8 @@
 TEST_F(MCJITTest, multiple_decl_lookups) {
   SKIP_UNSUPPORTED_PLATFORM;
 
-  Function *Foo = insertExternalReferenceToFunction<void(void)>(M.get(), "_exit");
+  Function *Foo = insertExternalReferenceToFunction(
+      M.get(), FunctionType::get(Builder.getVoidTy(), {}, false), "_exit");
   createJIT(std::move(M));
   void *A = TheJIT->getPointerToFunction(Foo);
   void *B = TheJIT->getPointerToFunction(Foo);
@@ -203,10 +211,12 @@
 
 TEST_F(MCJITTest, lazy_function_creator_pointer) {
   SKIP_UNSUPPORTED_PLATFORM;
-  
-  Function *Foo = insertExternalReferenceToFunction<int32_t(void)>(M.get(),
-                                                                   "\1Foo");
-  startFunction<int32_t(void)>(M.get(), "Parent");
+
+  Function *Foo = insertExternalReferenceToFunction(
+      M.get(), FunctionType::get(Builder.getInt32Ty(), {}, false),
+      "\1Foo");
+  startFunction(M.get(), FunctionType::get(Builder.getInt32Ty(), {}, false),
+                "Parent");
   CallInst *Call = Builder.CreateCall(Foo, {});
   Builder.CreateRet(Call);
   
@@ -240,12 +250,14 @@
 
 TEST_F(MCJITTest, lazy_function_creator_lambda) {
   SKIP_UNSUPPORTED_PLATFORM;
-  
-  Function *Foo1 = insertExternalReferenceToFunction<int32_t(void)>(M.get(),
-                                                                   "\1Foo1");
-  Function *Foo2 = insertExternalReferenceToFunction<int32_t(void)>(M.get(),
-                                                                   "\1Foo2");
-  startFunction<int32_t(void)>(M.get(), "Parent");
+
+  FunctionType *Int32VoidFnTy =
+      FunctionType::get(Builder.getInt32Ty(), {}, false);
+  Function *Foo1 =
+      insertExternalReferenceToFunction(M.get(), Int32VoidFnTy, "\1Foo1");
+  Function *Foo2 =
+      insertExternalReferenceToFunction(M.get(), Int32VoidFnTy, "\1Foo2");
+  startFunction(M.get(), Int32VoidFnTy, "Parent");
   CallInst *Call1 = Builder.CreateCall(Foo1, {});
   CallInst *Call2 = Builder.CreateCall(Foo2, {});
   Value *Result = Builder.CreateAdd(Call1, Call2);
diff --git a/unittests/ExecutionEngine/MCJIT/MCJITTestBase.h b/unittests/ExecutionEngine/MCJIT/MCJITTestBase.h
index a768920..50a57f1 100644
--- a/unittests/ExecutionEngine/MCJIT/MCJITTestBase.h
+++ b/unittests/ExecutionEngine/MCJIT/MCJITTestBase.h
@@ -24,7 +24,7 @@
 #include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Module.h"
-#include "llvm/IR/TypeBuilder.h"
+#include "llvm/IR/Type.h"
 #include "llvm/Support/CodeGen.h"
 
 namespace llvm {
@@ -45,11 +45,9 @@
     return M;
   }
 
-  template<typename FuncType>
-  Function *startFunction(Module *M, StringRef Name) {
-    Function *Result = Function::Create(
-      TypeBuilder<FuncType, false>::get(Context),
-      GlobalValue::ExternalLinkage, Name, M);
+  Function *startFunction(Module *M, FunctionType *FT, StringRef Name) {
+    Function *Result =
+        Function::Create(FT, GlobalValue::ExternalLinkage, Name, M);
 
     BasicBlock *BB = BasicBlock::Create(Context, Name, Result);
     Builder.SetInsertPoint(BB);
@@ -63,9 +61,8 @@
 
   // Inserts a simple function that invokes Callee and takes the same arguments:
   //    int Caller(...) { return Callee(...); }
-  template<typename Signature>
   Function *insertSimpleCallFunction(Module *M, Function *Callee) {
-    Function *Result = startFunction<Signature>(M, "caller");
+    Function *Result = startFunction(M, Callee->getFunctionType(), "caller");
 
     SmallVector<Value*, 1> CallArgs;
 
@@ -81,7 +78,8 @@
   //    int32_t main() { return X; }
   // where X is given by returnCode
   Function *insertMainFunction(Module *M, uint32_t returnCode) {
-    Function *Result = startFunction<int32_t(void)>(M, "main");
+    Function *Result = startFunction(
+        M, FunctionType::get(Type::getInt32Ty(Context), {}, false), "main");
 
     Value *ReturnVal = ConstantInt::get(Context, APInt(32, returnCode));
     endFunctionWithRet(Result, ReturnVal);
@@ -93,7 +91,12 @@
   //    int32_t add(int32_t a, int32_t b) { return a + b; }
   // in the current module and returns a pointer to it.
   Function *insertAddFunction(Module *M, StringRef Name = "add") {
-    Function *Result = startFunction<int32_t(int32_t, int32_t)>(M, Name);
+    Function *Result = startFunction(
+        M,
+        FunctionType::get(
+            Type::getInt32Ty(Context),
+            {Type::getInt32Ty(Context), Type::getInt32Ty(Context)}, false),
+        Name);
 
     Function::arg_iterator args = Result->arg_begin();
     Value *Arg1 = &*args;
@@ -106,20 +109,10 @@
   }
 
   // Inserts a declaration to a function defined elsewhere
-  template <typename FuncType>
-  Function *insertExternalReferenceToFunction(Module *M, StringRef Name) {
-    Function *Result = Function::Create(
-                         TypeBuilder<FuncType, false>::get(Context),
-                         GlobalValue::ExternalLinkage, Name, M);
-    return Result;
-  }
-
-  // Inserts an declaration to a function defined elsewhere
-  Function *insertExternalReferenceToFunction(Module *M, StringRef Name,
-                                              FunctionType *FuncTy) {
-    Function *Result = Function::Create(FuncTy,
-                                        GlobalValue::ExternalLinkage,
-                                        Name, M);
+  Function *insertExternalReferenceToFunction(Module *M, FunctionType *FTy,
+                                              StringRef Name) {
+    Function *Result =
+        Function::Create(FTy, GlobalValue::ExternalLinkage, Name, M);
     return Result;
   }
 
@@ -136,7 +129,7 @@
   GlobalVariable *insertGlobalInt32(Module *M,
                                     StringRef name,
                                     int32_t InitialValue) {
-    Type *GlobalTy = TypeBuilder<types::i<32>, true>::get(Context);
+    Type *GlobalTy = Type::getInt32Ty(Context);
     Constant *IV = ConstantInt::get(Context, APInt(32, InitialValue));
     GlobalVariable *Global = new GlobalVariable(*M,
                                                 GlobalTy,
@@ -160,7 +153,11 @@
   Function *insertAccumulateFunction(Module *M,
                                      Function *Helper = nullptr,
                                      StringRef Name = "accumulate") {
-    Function *Result = startFunction<int32_t(int32_t)>(M, Name);
+    Function *Result =
+        startFunction(M,
+                      FunctionType::get(Type::getInt32Ty(Context),
+                                        {Type::getInt32Ty(Context)}, false),
+                      Name);
     if (!Helper)
       Helper = Result;
 
@@ -225,11 +222,11 @@
 
     B.reset(createEmptyModule("B"));
     Function *FAExtern_in_B = insertExternalReferenceToFunction(B.get(), FA);
-    FB = insertSimpleCallFunction<int32_t(int32_t, int32_t)>(B.get(), FAExtern_in_B);
+    FB = insertSimpleCallFunction(B.get(), FAExtern_in_B);
 
     C.reset(createEmptyModule("C"));
     Function *FBExtern_in_C = insertExternalReferenceToFunction(C.get(), FB);
-    FC = insertSimpleCallFunction<int32_t(int32_t, int32_t)>(C.get(), FBExtern_in_C);
+    FC = insertSimpleCallFunction(C.get(), FBExtern_in_C);
   }
 
   // Module A { Function FA },
@@ -253,8 +250,7 @@
 
     B.reset(createEmptyModule("B"));
     Function *FAExtern_in_B = insertExternalReferenceToFunction(B.get(), FA);
-    FB = insertSimpleCallFunction<int32_t(int32_t, int32_t)>(B.get(),
-                                                             FAExtern_in_B);
+    FB = insertSimpleCallFunction(B.get(), FAExtern_in_B);
   }
 
   // Module A { Function FA },
@@ -268,11 +264,11 @@
 
     B.reset(createEmptyModule("B"));
     Function *FAExtern_in_B = insertExternalReferenceToFunction(B.get(), FA);
-    FB = insertSimpleCallFunction<int32_t(int32_t, int32_t)>(B.get(), FAExtern_in_B);
+    FB = insertSimpleCallFunction(B.get(), FAExtern_in_B);
 
     C.reset(createEmptyModule("C"));
     Function *FAExtern_in_C = insertExternalReferenceToFunction(C.get(), FA);
-    FC = insertSimpleCallFunction<int32_t(int32_t, int32_t)>(C.get(), FAExtern_in_C);
+    FC = insertSimpleCallFunction(C.get(), FAExtern_in_C);
   }
 };
 
diff --git a/unittests/ExecutionEngine/Orc/IndirectionUtilsTest.cpp b/unittests/ExecutionEngine/Orc/IndirectionUtilsTest.cpp
index ed42544..1dfa0a1 100644
--- a/unittests/ExecutionEngine/Orc/IndirectionUtilsTest.cpp
+++ b/unittests/ExecutionEngine/Orc/IndirectionUtilsTest.cpp
@@ -19,7 +19,10 @@
 TEST(IndirectionUtilsTest, MakeStub) {
   LLVMContext Context;
   ModuleBuilder MB(Context, "x86_64-apple-macosx10.10", "");
-  Function *F = MB.createFunctionDecl<void(DummyStruct, DummyStruct)>("");
+  FunctionType *FTy = FunctionType::get(
+      Type::getVoidTy(Context),
+      {getDummyStructTy(Context), getDummyStructTy(Context)}, false);
+  Function *F = MB.createFunctionDecl(FTy, "");
   AttributeSet FnAttrs = AttributeSet::get(
       Context, AttrBuilder().addAttribute(Attribute::NoUnwind));
   AttributeSet RetAttrs; // None
diff --git a/unittests/ExecutionEngine/Orc/LegacyRTDyldObjectLinkingLayerTest.cpp b/unittests/ExecutionEngine/Orc/LegacyRTDyldObjectLinkingLayerTest.cpp
index 8c9c958..b3696c6 100644
--- a/unittests/ExecutionEngine/Orc/LegacyRTDyldObjectLinkingLayerTest.cpp
+++ b/unittests/ExecutionEngine/Orc/LegacyRTDyldObjectLinkingLayerTest.cpp
@@ -123,6 +123,8 @@
   if (!SupportsJIT)
     return;
 
+  Type *Int32Ty = IntegerType::get(Context, 32);
+
   ExecutionSession ES;
 
   auto MM = std::make_shared<SectionMemoryManagerWrapper>();
@@ -153,7 +155,8 @@
   ModuleBuilder MB1(Context, "", "dummy");
   {
     MB1.getModule()->setDataLayout(TM->createDataLayout());
-    Function *BarImpl = MB1.createFunctionDecl<int32_t(void)>("bar");
+    Function *BarImpl =
+        MB1.createFunctionDecl(FunctionType::get(Int32Ty, {}, false), "bar");
     BasicBlock *BarEntry = BasicBlock::Create(Context, "entry", BarImpl);
     IRBuilder<> Builder(BarEntry);
     IntegerType *Int32Ty = IntegerType::get(Context, 32);
@@ -166,8 +169,10 @@
   ModuleBuilder MB2(Context, "", "dummy");
   {
     MB2.getModule()->setDataLayout(TM->createDataLayout());
-    Function *BarDecl = MB2.createFunctionDecl<int32_t(void)>("bar");
-    Function *FooImpl = MB2.createFunctionDecl<int32_t(void)>("foo");
+    Function *BarDecl =
+        MB2.createFunctionDecl(FunctionType::get(Int32Ty, {}, false), "bar");
+    Function *FooImpl =
+        MB2.createFunctionDecl(FunctionType::get(Int32Ty, {}, false), "foo");
     BasicBlock *FooEntry = BasicBlock::Create(Context, "entry", FooImpl);
     IRBuilder<> Builder(FooEntry);
     Builder.CreateRet(Builder.CreateCall(BarDecl));
@@ -207,6 +212,8 @@
   if (!SupportsJIT)
     return;
 
+  Type *Int32Ty = IntegerType::get(Context, 32);
+
   ExecutionSession ES;
 
   auto MM = std::make_shared<SectionMemoryManagerWrapper>();
@@ -233,7 +240,8 @@
   ModuleBuilder MB1(Context, "", "dummy");
   {
     MB1.getModule()->setDataLayout(TM->createDataLayout());
-    Function *BarImpl = MB1.createFunctionDecl<int32_t(void)>("foo");
+    Function *BarImpl =
+        MB1.createFunctionDecl(FunctionType::get(Int32Ty, {}, false), "foo");
     BasicBlock *BarEntry = BasicBlock::Create(Context, "entry", BarImpl);
     IRBuilder<> Builder(BarEntry);
     IntegerType *Int32Ty = IntegerType::get(Context, 32);
@@ -246,7 +254,8 @@
   ModuleBuilder MB2(Context, "", "dummy");
   {
     MB2.getModule()->setDataLayout(TM->createDataLayout());
-    Function *BarImpl = MB2.createFunctionDecl<int32_t(void)>("bar");
+    Function *BarImpl =
+        MB2.createFunctionDecl(FunctionType::get(Int32Ty, {}, false), "bar");
     BasicBlock *BarEntry = BasicBlock::Create(Context, "entry", BarImpl);
     IRBuilder<> Builder(BarEntry);
     IntegerType *Int32Ty = IntegerType::get(Context, 32);
diff --git a/unittests/ExecutionEngine/Orc/OrcCAPITest.cpp b/unittests/ExecutionEngine/Orc/OrcCAPITest.cpp
index b288b6b..54d8156 100644
--- a/unittests/ExecutionEngine/Orc/OrcCAPITest.cpp
+++ b/unittests/ExecutionEngine/Orc/OrcCAPITest.cpp
@@ -27,8 +27,15 @@
 protected:
   std::unique_ptr<Module> createTestModule(const Triple &TT) {
     ModuleBuilder MB(Context, TT.str(), "");
-    Function *TestFunc = MB.createFunctionDecl<int()>("testFunc");
-    Function *Main = MB.createFunctionDecl<int(int, char*[])>("main");
+    Type *IntTy = Type::getScalarTy<int>(Context);
+    Function *TestFunc =
+        MB.createFunctionDecl(FunctionType::get(IntTy, {}, false), "testFunc");
+    Function *Main = MB.createFunctionDecl(
+        FunctionType::get(
+            IntTy,
+            {IntTy, Type::getInt8PtrTy(Context)->getPointerTo()},
+            false),
+        "main");
 
     Main->getBasicBlockList().push_back(BasicBlock::Create(Context));
     IRBuilder<> B(&Main->back());
diff --git a/unittests/ExecutionEngine/Orc/OrcTestCommon.h b/unittests/ExecutionEngine/Orc/OrcTestCommon.h
index e76d2fa..e25c513 100644
--- a/unittests/ExecutionEngine/Orc/OrcTestCommon.h
+++ b/unittests/ExecutionEngine/Orc/OrcTestCommon.h
@@ -22,7 +22,6 @@
 #include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Module.h"
-#include "llvm/IR/TypeBuilder.h"
 #include "llvm/Object/ObjectFile.h"
 #include "llvm/Support/TargetRegistry.h"
 #include "llvm/Support/TargetSelect.h"
@@ -169,11 +168,8 @@
   ModuleBuilder(LLVMContext &Context, StringRef Triple,
                 StringRef Name);
 
-  template <typename FuncType>
-  Function* createFunctionDecl(StringRef Name) {
-    return Function::Create(
-             TypeBuilder<FuncType, false>::get(M->getContext()),
-             GlobalValue::ExternalLinkage, Name, M.get());
+  Function *createFunctionDecl(FunctionType *FTy, StringRef Name) {
+    return Function::Create(FTy, GlobalValue::ExternalLinkage, Name, M.get());
   }
 
   Module* getModule() { return M.get(); }
@@ -189,15 +185,9 @@
   int X[256];
 };
 
-// TypeBuilder specialization for DummyStruct.
-template <bool XCompile>
-class TypeBuilder<DummyStruct, XCompile> {
-public:
-  static StructType *get(LLVMContext &Context) {
-    return StructType::get(
-        TypeBuilder<types::i<32>[256], XCompile>::get(Context));
-  }
-};
+inline StructType *getDummyStructTy(LLVMContext &Context) {
+  return StructType::get(ArrayType::get(Type::getInt32Ty(Context), 256));
+}
 
 template <typename HandleT, typename ModuleT>
 class MockBaseLayer {
diff --git a/unittests/ExecutionEngine/Orc/RTDyldObjectLinkingLayerTest.cpp b/unittests/ExecutionEngine/Orc/RTDyldObjectLinkingLayerTest.cpp
index 6b1dbe9..2db237f 100644
--- a/unittests/ExecutionEngine/Orc/RTDyldObjectLinkingLayerTest.cpp
+++ b/unittests/ExecutionEngine/Orc/RTDyldObjectLinkingLayerTest.cpp
@@ -131,13 +131,17 @@
     ModuleBuilder MB(*TSCtx.getContext(), TM->getTargetTriple().str(), "dummy");
     MB.getModule()->setDataLayout(TM->createDataLayout());
 
-    Function *FooImpl = MB.createFunctionDecl<void()>("foo");
+    Function *FooImpl = MB.createFunctionDecl(
+        FunctionType::get(Type::getVoidTy(*TSCtx.getContext()), {}, false),
+        "foo");
     BasicBlock *FooEntry =
         BasicBlock::Create(*TSCtx.getContext(), "entry", FooImpl);
     IRBuilder<> B1(FooEntry);
     B1.CreateRetVoid();
 
-    Function *BarImpl = MB.createFunctionDecl<void()>("bar");
+    Function *BarImpl = MB.createFunctionDecl(
+        FunctionType::get(Type::getVoidTy(*TSCtx.getContext()), {}, false),
+        "bar");
     BasicBlock *BarEntry =
         BasicBlock::Create(*TSCtx.getContext(), "entry", BarImpl);
     IRBuilder<> B2(BarEntry);
@@ -181,9 +185,9 @@
     FunkySimpleCompiler(TargetMachine &TM) : SimpleCompiler(TM) {}
 
     CompileResult operator()(Module &M) {
-      Function *BarImpl =
-          Function::Create(TypeBuilder<void(), false>::get(M.getContext()),
-                           GlobalValue::ExternalLinkage, "bar", &M);
+      Function *BarImpl = Function::Create(
+          FunctionType::get(Type::getVoidTy(M.getContext()), {}, false),
+          GlobalValue::ExternalLinkage, "bar", &M);
       BasicBlock *BarEntry =
           BasicBlock::Create(M.getContext(), "entry", BarImpl);
       IRBuilder<> B(BarEntry);
@@ -200,7 +204,9 @@
     ModuleBuilder MB(*TSCtx.getContext(), TM->getTargetTriple().str(), "dummy");
     MB.getModule()->setDataLayout(TM->createDataLayout());
 
-    Function *FooImpl = MB.createFunctionDecl<void()>("foo");
+    Function *FooImpl = MB.createFunctionDecl(
+        FunctionType::get(Type::getVoidTy(*TSCtx.getContext()), {}, false),
+        "foo");
     BasicBlock *FooEntry =
         BasicBlock::Create(*TSCtx.getContext(), "entry", FooImpl);
     IRBuilder<> B(FooEntry);
diff --git a/unittests/ExecutionEngine/Orc/RemoteObjectLayerTest.cpp b/unittests/ExecutionEngine/Orc/RemoteObjectLayerTest.cpp
index 09224c2..4ffd741 100644
--- a/unittests/ExecutionEngine/Orc/RemoteObjectLayerTest.cpp
+++ b/unittests/ExecutionEngine/Orc/RemoteObjectLayerTest.cpp
@@ -95,7 +95,12 @@
   LLVMContext Ctx;
   ModuleBuilder MB(Ctx, TM->getTargetTriple().str(), "TestModule");
   MB.getModule()->setDataLayout(TM->createDataLayout());
-  auto *Main = MB.createFunctionDecl<void(int, char**)>("main");
+  auto *Main = MB.createFunctionDecl(
+      FunctionType::get(Type::getInt32Ty(Ctx),
+                        {Type::getInt32Ty(Ctx),
+                         Type::getInt8PtrTy(Ctx)->getPointerTo()},
+                        false),
+      "main");
   Main->getBasicBlockList().push_back(BasicBlock::Create(Ctx));
   IRBuilder<> B(&Main->back());
   B.CreateRet(ConstantInt::getSigned(Type::getInt32Ty(Ctx), 42));
diff --git a/unittests/IR/CFGBuilder.cpp b/unittests/IR/CFGBuilder.cpp
index 0f9fb8b..886ab8a 100644
--- a/unittests/IR/CFGBuilder.cpp
+++ b/unittests/IR/CFGBuilder.cpp
@@ -11,7 +11,6 @@
 
 #include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/LLVMContext.h"
-#include "llvm/IR/TypeBuilder.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
 #include "gtest/gtest.h"
@@ -23,7 +22,7 @@
 CFGHolder::CFGHolder(StringRef ModuleName, StringRef FunctionName)
     : Context(llvm::make_unique<LLVMContext>()),
       M(llvm::make_unique<Module>(ModuleName, *Context)) {
-  FunctionType *FTy = TypeBuilder<void(), false>::get(*Context);
+  FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Context), {}, false);
   F = cast<Function>(M->getOrInsertFunction(FunctionName, FTy));
 }
 CFGHolder::~CFGHolder() = default;
diff --git a/unittests/IR/CMakeLists.txt b/unittests/IR/CMakeLists.txt
index 7498983..f33835f 100644
--- a/unittests/IR/CMakeLists.txt
+++ b/unittests/IR/CMakeLists.txt
@@ -30,7 +30,6 @@
   ModuleTest.cpp
   PassManagerTest.cpp
   PatternMatch.cpp
-  TypeBuilderTest.cpp
   TypesTest.cpp
   UseTest.cpp
   UserTest.cpp
diff --git a/unittests/IR/MetadataTest.cpp b/unittests/IR/MetadataTest.cpp
index 3f744cd..883a582 100644
--- a/unittests/IR/MetadataTest.cpp
+++ b/unittests/IR/MetadataTest.cpp
@@ -981,6 +981,107 @@
   EXPECT_TRUE(L2->isTemporary());
 }
 
+TEST_F(DILocationTest, discriminatorEncoding) {
+  EXPECT_EQ(0U, DILocation::encodeDiscriminator(0, 0, 0).getValue());
+
+  // Encode base discriminator as a component: lsb is 0, then the value.
+  // The other components are all absent, so we leave all the other bits 0.
+  EXPECT_EQ(2U, DILocation::encodeDiscriminator(1, 0, 0).getValue());
+
+  // Base discriminator component is empty, so lsb is 1. Next component is not
+  // empty, so its lsb is 0, then its value (1). Next component is empty.
+  // So the bit pattern is 101.
+  EXPECT_EQ(5U, DILocation::encodeDiscriminator(0, 1, 0).getValue());
+
+  // First 2 components are empty, so the bit pattern is 11. Then the
+  // next component - ending up with 1011.
+  EXPECT_EQ(0xbU, DILocation::encodeDiscriminator(0, 0, 1).getValue());
+
+  // The bit pattern for the first 2 components is 11. The next bit is 0,
+  // because the last component is not empty. We have 29 bits usable for
+  // encoding, but we cap it at 12 bits uniformously for all components. We
+  // encode the last component over 14 bits.
+  EXPECT_EQ(0xfffbU, DILocation::encodeDiscriminator(0, 0, 0xfff).getValue());
+
+  EXPECT_EQ(0x102U, DILocation::encodeDiscriminator(1, 1, 0).getValue());
+
+  EXPECT_EQ(0x13eU, DILocation::encodeDiscriminator(0x1f, 1, 0).getValue());
+
+  EXPECT_EQ(0x87feU, DILocation::encodeDiscriminator(0x1ff, 1, 0).getValue());
+
+  EXPECT_EQ(0x1f3eU, DILocation::encodeDiscriminator(0x1f, 0x1f, 0).getValue());
+
+  EXPECT_EQ(0x3ff3eU,
+            DILocation::encodeDiscriminator(0x1f, 0x1ff, 0).getValue());
+
+  EXPECT_EQ(0x1ff87feU,
+            DILocation::encodeDiscriminator(0x1ff, 0x1ff, 0).getValue());
+
+  EXPECT_EQ(0xfff9f3eU,
+            DILocation::encodeDiscriminator(0x1f, 0x1f, 0xfff).getValue());
+
+  EXPECT_EQ(0xffc3ff3eU,
+            DILocation::encodeDiscriminator(0x1f, 0x1ff, 0x1ff).getValue());
+
+  EXPECT_EQ(0xffcf87feU,
+            DILocation::encodeDiscriminator(0x1ff, 0x1f, 0x1ff).getValue());
+
+  EXPECT_EQ(0xe1ff87feU,
+            DILocation::encodeDiscriminator(0x1ff, 0x1ff, 7).getValue());
+}
+
+TEST_F(DILocationTest, discriminatorEncodingNegativeTests) {
+  EXPECT_EQ(None, DILocation::encodeDiscriminator(0, 0, 0x1000));
+  EXPECT_EQ(None, DILocation::encodeDiscriminator(0x1000, 0, 0));
+  EXPECT_EQ(None, DILocation::encodeDiscriminator(0, 0x1000, 0));
+  EXPECT_EQ(None, DILocation::encodeDiscriminator(0, 0, 0x1000));
+  EXPECT_EQ(None, DILocation::encodeDiscriminator(0x1ff, 0x1ff, 8));
+  EXPECT_EQ(None,
+            DILocation::encodeDiscriminator(std::numeric_limits<uint32_t>::max(),
+                                            std::numeric_limits<uint32_t>::max(),
+                                            0));
+}
+
+TEST_F(DILocationTest, discriminatorSpecialCases) {
+  // We don't test getCopyIdentifier here because the only way
+  // to set it is by constructing an encoded discriminator using
+  // encodeDiscriminator, which is already tested.
+  auto L1 = DILocation::get(Context, 1, 2, getSubprogram());
+  EXPECT_EQ(0U, L1->getBaseDiscriminator());
+  EXPECT_EQ(1U, L1->getDuplicationFactor());
+
+  auto L2 = L1->setBaseDiscriminator(1).getValue();
+  EXPECT_EQ(0U, L1->getBaseDiscriminator());
+  EXPECT_EQ(1U, L1->getDuplicationFactor());
+
+  EXPECT_EQ(1U, L2->getBaseDiscriminator());
+  EXPECT_EQ(1U, L2->getDuplicationFactor());
+
+  auto L3 = L2->cloneWithDuplicationFactor(2).getValue();
+  EXPECT_EQ(1U, L3->getBaseDiscriminator());
+  EXPECT_EQ(2U, L3->getDuplicationFactor());
+
+  auto L4 = L3->cloneWithDuplicationFactor(4).getValue();
+  EXPECT_EQ(1U, L4->getBaseDiscriminator());
+  EXPECT_EQ(8U, L4->getDuplicationFactor());
+
+  auto L5 = L4->setBaseDiscriminator(2).getValue();
+  EXPECT_EQ(2U, L5->getBaseDiscriminator());
+  EXPECT_EQ(1U, L5->getDuplicationFactor());
+
+  // Check extreme cases
+  auto L6 = L1->setBaseDiscriminator(0xfff).getValue();
+  EXPECT_EQ(0xfffU, L6->getBaseDiscriminator());
+  EXPECT_EQ(
+      0xfffU,
+      L6->cloneWithDuplicationFactor(0xfff).getValue()->getDuplicationFactor());
+
+  // Check we return None for unencodable cases.
+  EXPECT_EQ(None, L4->setBaseDiscriminator(0x1000));
+  EXPECT_EQ(None, L4->cloneWithDuplicationFactor(0x1000));
+}
+
+
 typedef MetadataTest GenericDINodeTest;
 
 TEST_F(GenericDINodeTest, get) {
diff --git a/unittests/IR/TypeBuilderTest.cpp b/unittests/IR/TypeBuilderTest.cpp
deleted file mode 100644
index 9ba7765..0000000
--- a/unittests/IR/TypeBuilderTest.cpp
+++ /dev/null
@@ -1,284 +0,0 @@
-//===- llvm/unittest/TypeBuilderTest.cpp - TypeBuilder tests --------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/IR/TypeBuilder.h"
-#include "llvm/IR/LLVMContext.h"
-#include "gtest/gtest.h"
-
-using namespace llvm;
-
-namespace {
-
-TEST(TypeBuilderTest, Void) {
-  LLVMContext Context;
-  EXPECT_EQ(Type::getVoidTy(Context), (TypeBuilder<void, true>::get(Context)));
-  EXPECT_EQ(Type::getVoidTy(Context), (TypeBuilder<void, false>::get(Context)));
-  // Special cases for C compatibility:
-  EXPECT_EQ(Type::getInt8PtrTy(Context),
-            (TypeBuilder<void *, false>::get(Context)));
-  EXPECT_EQ(Type::getInt8PtrTy(Context),
-            (TypeBuilder<const void *, false>::get(Context)));
-  EXPECT_EQ(Type::getInt8PtrTy(Context),
-            (TypeBuilder<volatile void *, false>::get(Context)));
-  EXPECT_EQ(Type::getInt8PtrTy(Context),
-            (TypeBuilder<const volatile void *, false>::get(Context)));
-}
-
-TEST(TypeBuilderTest, HostIntegers) {
-  LLVMContext Context;
-  EXPECT_EQ(Type::getInt8Ty(Context),
-            (TypeBuilder<int8_t, false>::get(Context)));
-  EXPECT_EQ(Type::getInt8Ty(Context),
-            (TypeBuilder<uint8_t, false>::get(Context)));
-  EXPECT_EQ(Type::getInt16Ty(Context),
-            (TypeBuilder<int16_t, false>::get(Context)));
-  EXPECT_EQ(Type::getInt16Ty(Context),
-            (TypeBuilder<uint16_t, false>::get(Context)));
-  EXPECT_EQ(Type::getInt32Ty(Context),
-            (TypeBuilder<int32_t, false>::get(Context)));
-  EXPECT_EQ(Type::getInt32Ty(Context),
-            (TypeBuilder<uint32_t, false>::get(Context)));
-  EXPECT_EQ(Type::getInt64Ty(Context),
-            (TypeBuilder<int64_t, false>::get(Context)));
-  EXPECT_EQ(Type::getInt64Ty(Context),
-            (TypeBuilder<uint64_t, false>::get(Context)));
-
-  EXPECT_EQ(IntegerType::get(Context, sizeof(size_t) * CHAR_BIT),
-            (TypeBuilder<size_t, false>::get(Context)));
-  EXPECT_EQ(IntegerType::get(Context, sizeof(ptrdiff_t) * CHAR_BIT),
-            (TypeBuilder<ptrdiff_t, false>::get(Context)));
-}
-
-TEST(TypeBuilderTest, CrossCompilableIntegers) {
-  LLVMContext Context;
-  EXPECT_EQ(IntegerType::get(Context, 1),
-            (TypeBuilder<types::i<1>, true>::get(Context)));
-  EXPECT_EQ(IntegerType::get(Context, 1),
-            (TypeBuilder<types::i<1>, false>::get(Context)));
-  EXPECT_EQ(IntegerType::get(Context, 72),
-            (TypeBuilder<types::i<72>, true>::get(Context)));
-  EXPECT_EQ(IntegerType::get(Context, 72),
-            (TypeBuilder<types::i<72>, false>::get(Context)));
-}
-
-TEST(TypeBuilderTest, Float) {
-  LLVMContext Context;
-  EXPECT_EQ(Type::getFloatTy(Context),
-            (TypeBuilder<float, false>::get(Context)));
-  EXPECT_EQ(Type::getDoubleTy(Context),
-            (TypeBuilder<double, false>::get(Context)));
-  // long double isn't supported yet.
-  EXPECT_EQ(Type::getFloatTy(Context),
-            (TypeBuilder<types::ieee_float, true>::get(Context)));
-  EXPECT_EQ(Type::getFloatTy(Context),
-            (TypeBuilder<types::ieee_float, false>::get(Context)));
-  EXPECT_EQ(Type::getDoubleTy(Context),
-            (TypeBuilder<types::ieee_double, true>::get(Context)));
-  EXPECT_EQ(Type::getDoubleTy(Context),
-            (TypeBuilder<types::ieee_double, false>::get(Context)));
-  EXPECT_EQ(Type::getX86_FP80Ty(Context),
-            (TypeBuilder<types::x86_fp80, true>::get(Context)));
-  EXPECT_EQ(Type::getX86_FP80Ty(Context),
-            (TypeBuilder<types::x86_fp80, false>::get(Context)));
-  EXPECT_EQ(Type::getFP128Ty(Context),
-            (TypeBuilder<types::fp128, true>::get(Context)));
-  EXPECT_EQ(Type::getFP128Ty(Context),
-            (TypeBuilder<types::fp128, false>::get(Context)));
-  EXPECT_EQ(Type::getPPC_FP128Ty(Context),
-            (TypeBuilder<types::ppc_fp128, true>::get(Context)));
-  EXPECT_EQ(Type::getPPC_FP128Ty(Context),
-            (TypeBuilder<types::ppc_fp128, false>::get(Context)));
-}
-
-TEST(TypeBuilderTest, Derived) {
-  LLVMContext Context;
-  EXPECT_EQ(PointerType::getUnqual(Type::getInt8PtrTy(Context)),
-            (TypeBuilder<int8_t **, false>::get(Context)));
-  EXPECT_EQ(ArrayType::get(Type::getInt8Ty(Context), 7),
-            (TypeBuilder<int8_t[7], false>::get(Context)));
-  EXPECT_EQ(ArrayType::get(Type::getInt8Ty(Context), 0),
-            (TypeBuilder<int8_t[], false>::get(Context)));
-
-  EXPECT_EQ(PointerType::getUnqual(Type::getInt8PtrTy(Context)),
-            (TypeBuilder<types::i<8> **, false>::get(Context)));
-  EXPECT_EQ(ArrayType::get(Type::getInt8Ty(Context), 7),
-            (TypeBuilder<types::i<8>[7], false>::get(Context)));
-  EXPECT_EQ(ArrayType::get(Type::getInt8Ty(Context), 0),
-            (TypeBuilder<types::i<8>[], false>::get(Context)));
-
-  EXPECT_EQ(PointerType::getUnqual(Type::getInt8PtrTy(Context)),
-            (TypeBuilder<types::i<8> **, true>::get(Context)));
-  EXPECT_EQ(ArrayType::get(Type::getInt8Ty(Context), 7),
-            (TypeBuilder<types::i<8>[7], true>::get(Context)));
-  EXPECT_EQ(ArrayType::get(Type::getInt8Ty(Context), 0),
-            (TypeBuilder<types::i<8>[], true>::get(Context)));
-
-  EXPECT_EQ(Type::getInt8Ty(Context),
-            (TypeBuilder<const int8_t, false>::get(Context)));
-  EXPECT_EQ(Type::getInt8Ty(Context),
-            (TypeBuilder<volatile int8_t, false>::get(Context)));
-  EXPECT_EQ(Type::getInt8Ty(Context),
-            (TypeBuilder<const volatile int8_t, false>::get(Context)));
-
-  EXPECT_EQ(Type::getInt8Ty(Context),
-            (TypeBuilder<const types::i<8>, false>::get(Context)));
-  EXPECT_EQ(Type::getInt8Ty(Context),
-            (TypeBuilder<volatile types::i<8>, false>::get(Context)));
-  EXPECT_EQ(Type::getInt8Ty(Context),
-            (TypeBuilder<const volatile types::i<8>, false>::get(Context)));
-
-  EXPECT_EQ(Type::getInt8Ty(Context),
-            (TypeBuilder<const types::i<8>, true>::get(Context)));
-  EXPECT_EQ(Type::getInt8Ty(Context),
-            (TypeBuilder<volatile types::i<8>, true>::get(Context)));
-  EXPECT_EQ(Type::getInt8Ty(Context),
-            (TypeBuilder<const volatile types::i<8>, true>::get(Context)));
-
-  EXPECT_EQ(Type::getInt8PtrTy(Context),
-            (TypeBuilder<const volatile int8_t *const volatile, false>::get(
-                Context)));
-}
-
-TEST(TypeBuilderTest, Functions) {
-  LLVMContext Context;
-  std::vector<Type*> params;
-  EXPECT_EQ(FunctionType::get(Type::getVoidTy(Context), params, false),
-            (TypeBuilder<void(), true>::get(Context)));
-  EXPECT_EQ(FunctionType::get(Type::getInt8Ty(Context), params, true),
-            (TypeBuilder<int8_t(...), false>::get(Context)));
-  params.push_back(TypeBuilder<int32_t *, false>::get(Context));
-  EXPECT_EQ(FunctionType::get(Type::getInt8Ty(Context), params, false),
-            (TypeBuilder<int8_t(const int32_t *), false>::get(Context)));
-  EXPECT_EQ(FunctionType::get(Type::getInt8Ty(Context), params, true),
-            (TypeBuilder<int8_t(const int32_t *, ...), false>::get(Context)));
-  params.push_back(TypeBuilder<char *, false>::get(Context));
-  EXPECT_EQ(FunctionType::get(Type::getInt8Ty(Context), params, false),
-            (TypeBuilder<int8_t(int32_t *, void *), false>::get(Context)));
-  EXPECT_EQ(FunctionType::get(Type::getInt8Ty(Context), params, true),
-            (TypeBuilder<int8_t(int32_t *, char *, ...), false>::get(Context)));
-  params.push_back(TypeBuilder<char, false>::get(Context));
-  EXPECT_EQ(
-      FunctionType::get(Type::getInt8Ty(Context), params, false),
-      (TypeBuilder<int8_t(int32_t *, void *, char), false>::get(Context)));
-  EXPECT_EQ(
-      FunctionType::get(Type::getInt8Ty(Context), params, true),
-      (TypeBuilder<int8_t(int32_t *, char *, char, ...), false>::get(Context)));
-  params.push_back(TypeBuilder<char, false>::get(Context));
-  EXPECT_EQ(FunctionType::get(Type::getInt8Ty(Context), params, false),
-            (TypeBuilder<int8_t(int32_t *, void *, char, char), false>::get(
-                Context)));
-  EXPECT_EQ(
-      FunctionType::get(Type::getInt8Ty(Context), params, true),
-      (TypeBuilder<int8_t(int32_t *, char *, char, char, ...), false>::get(
-          Context)));
-  params.push_back(TypeBuilder<char, false>::get(Context));
-  EXPECT_EQ(
-      FunctionType::get(Type::getInt8Ty(Context), params, false),
-      (TypeBuilder<int8_t(int32_t *, void *, char, char, char), false>::get(
-          Context)));
-  EXPECT_EQ(FunctionType::get(Type::getInt8Ty(Context), params, true),
-            (TypeBuilder<int8_t(int32_t *, char *, char, char, char, ...),
-                         false>::get(Context)));
-}
-
-TEST(TypeBuilderTest, Context) {
-  // We used to cache TypeBuilder results in static local variables.  This
-  // produced the same type for different contexts, which of course broke
-  // things.
-  LLVMContext context1;
-  EXPECT_EQ(&context1,
-            &(TypeBuilder<types::i<1>, true>::get(context1))->getContext());
-  LLVMContext context2;
-  EXPECT_EQ(&context2,
-            &(TypeBuilder<types::i<1>, true>::get(context2))->getContext());
-}
-
-struct MyType {
-  int a;
-  int *b;
-  void *array[1];
-};
-
-struct MyPortableType {
-  int32_t a;
-  int32_t *b;
-  void *array[1];
-};
-
-}  // anonymous namespace
-
-namespace llvm {
-template<bool cross> class TypeBuilder<MyType, cross> {
-public:
-  static StructType *get(LLVMContext &Context) {
-    // Using the static result variable ensures that the type is
-    // only looked up once.
-    std::vector<Type*> st;
-    st.push_back(TypeBuilder<int, cross>::get(Context));
-    st.push_back(TypeBuilder<int*, cross>::get(Context));
-    st.push_back(TypeBuilder<void*[], cross>::get(Context));
-    static StructType *const result = StructType::get(Context, st);
-    return result;
-  }
-
-  // You may find this a convenient place to put some constants
-  // to help with getelementptr.  They don't have any effect on
-  // the operation of TypeBuilder.
-  enum Fields {
-    FIELD_A,
-    FIELD_B,
-    FIELD_ARRAY
-  };
-};
-
-template<bool cross> class TypeBuilder<MyPortableType, cross> {
-public:
-  static StructType *get(LLVMContext &Context) {
-    // Using the static result variable ensures that the type is
-    // only looked up once.
-    std::vector<Type*> st;
-    st.push_back(TypeBuilder<types::i<32>, cross>::get(Context));
-    st.push_back(TypeBuilder<types::i<32>*, cross>::get(Context));
-    st.push_back(TypeBuilder<types::i<8>*[], cross>::get(Context));
-    static StructType *const result = StructType::get(Context, st);
-    return result;
-  }
-
-  // You may find this a convenient place to put some constants
-  // to help with getelementptr.  They don't have any effect on
-  // the operation of TypeBuilder.
-  enum Fields {
-    FIELD_A,
-    FIELD_B,
-    FIELD_ARRAY
-  };
-};
-}  // namespace llvm
-namespace {
-
-TEST(TypeBuilderTest, Extensions) {
-  LLVMContext Context;
-  EXPECT_EQ(PointerType::getUnqual(
-                StructType::get(TypeBuilder<int, false>::get(Context),
-                                TypeBuilder<int *, false>::get(Context),
-                                TypeBuilder<void *[], false>::get(Context))),
-            (TypeBuilder<MyType *, false>::get(Context)));
-  EXPECT_EQ(PointerType::getUnqual(StructType::get(
-                TypeBuilder<types::i<32>, false>::get(Context),
-                TypeBuilder<types::i<32> *, false>::get(Context),
-                TypeBuilder<types::i<8> *[], false>::get(Context))),
-            (TypeBuilder<MyPortableType *, false>::get(Context)));
-  EXPECT_EQ(PointerType::getUnqual(StructType::get(
-                TypeBuilder<types::i<32>, false>::get(Context),
-                TypeBuilder<types::i<32> *, false>::get(Context),
-                TypeBuilder<types::i<8> *[], false>::get(Context))),
-            (TypeBuilder<MyPortableType *, true>::get(Context)));
-}
-
-}  // anonymous namespace
diff --git a/unittests/Passes/CMakeLists.txt b/unittests/Passes/CMakeLists.txt
index 415f3a7..3e83b52 100644
--- a/unittests/Passes/CMakeLists.txt
+++ b/unittests/Passes/CMakeLists.txt
@@ -15,7 +15,7 @@
 target_link_libraries(PluginsTests PRIVATE LLVMTestingSupport)
 
 set(LLVM_LINK_COMPONENTS)
-add_llvm_loadable_module(TestPlugin
+add_llvm_library(TestPlugin MODULE BUILDTREE_ONLY
   TestPlugin.cpp
   )
 
diff --git a/unittests/ProfileData/SampleProfTest.cpp b/unittests/ProfileData/SampleProfTest.cpp
index 67e6e9f..f75f10b 100644
--- a/unittests/ProfileData/SampleProfTest.cpp
+++ b/unittests/ProfileData/SampleProfTest.cpp
@@ -128,11 +128,17 @@
 
     FunctionSamples *ReadFooSamples = Reader->getSamplesFor(FooName);
     ASSERT_TRUE(ReadFooSamples != nullptr);
+    if (Format != SampleProfileFormat::SPF_Compact_Binary) {
+      ASSERT_EQ("_Z3fooi", ReadFooSamples->getName());
+    }
     ASSERT_EQ(7711u, ReadFooSamples->getTotalSamples());
     ASSERT_EQ(610u, ReadFooSamples->getHeadSamples());
 
     FunctionSamples *ReadBarSamples = Reader->getSamplesFor(BarName);
     ASSERT_TRUE(ReadBarSamples != nullptr);
+    if (Format != SampleProfileFormat::SPF_Compact_Binary) {
+      ASSERT_EQ("_Z3bari", ReadBarSamples->getName());
+    }
     ASSERT_EQ(20301u, ReadBarSamples->getTotalSamples());
     ASSERT_EQ(1437u, ReadBarSamples->getHeadSamples());
     ErrorOr<SampleRecord::CallTargetMap> CTMap =
diff --git a/unittests/Support/CommandLineTest.cpp b/unittests/Support/CommandLineTest.cpp
index a296912..9d06f72 100644
--- a/unittests/Support/CommandLineTest.cpp
+++ b/unittests/Support/CommandLineTest.cpp
@@ -840,4 +840,78 @@
 }
 #endif
 
-}  // anonymous namespace
+TEST(CommandLineTest, PrefixOptions) {
+  cl::ResetCommandLineParser();
+
+  StackOption<std::string, cl::list<std::string>> IncludeDirs(
+      "I", cl::Prefix, cl::desc("Declare an include directory"));
+
+  // Test non-prefixed variant works with cl::Prefix options.
+  EXPECT_TRUE(IncludeDirs.empty());
+  const char *args[] = {"prog", "-I=/usr/include"};
+  EXPECT_TRUE(
+      cl::ParseCommandLineOptions(2, args, StringRef(), &llvm::nulls()));
+  EXPECT_TRUE(IncludeDirs.size() == 1);
+  EXPECT_TRUE(IncludeDirs.front().compare("/usr/include") == 0);
+
+  IncludeDirs.erase(IncludeDirs.begin());
+  cl::ResetAllOptionOccurrences();
+
+  // Test non-prefixed variant works with cl::Prefix options when value is
+  // passed in following argument.
+  EXPECT_TRUE(IncludeDirs.empty());
+  const char *args2[] = {"prog", "-I", "/usr/include"};
+  EXPECT_TRUE(
+      cl::ParseCommandLineOptions(3, args2, StringRef(), &llvm::nulls()));
+  EXPECT_TRUE(IncludeDirs.size() == 1);
+  EXPECT_TRUE(IncludeDirs.front().compare("/usr/include") == 0);
+
+  IncludeDirs.erase(IncludeDirs.begin());
+  cl::ResetAllOptionOccurrences();
+
+  // Test prefixed variant works with cl::Prefix options.
+  EXPECT_TRUE(IncludeDirs.empty());
+  const char *args3[] = {"prog", "-I/usr/include"};
+  EXPECT_TRUE(
+      cl::ParseCommandLineOptions(2, args3, StringRef(), &llvm::nulls()));
+  EXPECT_TRUE(IncludeDirs.size() == 1);
+  EXPECT_TRUE(IncludeDirs.front().compare("/usr/include") == 0);
+
+  StackOption<std::string, cl::list<std::string>> MacroDefs(
+      "D", cl::AlwaysPrefix, cl::desc("Define a macro"),
+      cl::value_desc("MACRO[=VALUE]"));
+
+  cl::ResetAllOptionOccurrences();
+
+  // Test non-prefixed variant does not work with cl::AlwaysPrefix options:
+  // equal sign is part of the value.
+  EXPECT_TRUE(MacroDefs.empty());
+  const char *args4[] = {"prog", "-D=HAVE_FOO"};
+  EXPECT_TRUE(
+      cl::ParseCommandLineOptions(2, args4, StringRef(), &llvm::nulls()));
+  EXPECT_TRUE(MacroDefs.size() == 1);
+  EXPECT_TRUE(MacroDefs.front().compare("=HAVE_FOO") == 0);
+
+  MacroDefs.erase(MacroDefs.begin());
+  cl::ResetAllOptionOccurrences();
+
+  // Test non-prefixed variant does not allow value to be passed in following
+  // argument with cl::AlwaysPrefix options.
+  EXPECT_TRUE(MacroDefs.empty());
+  const char *args5[] = {"prog", "-D", "HAVE_FOO"};
+  EXPECT_FALSE(
+      cl::ParseCommandLineOptions(3, args5, StringRef(), &llvm::nulls()));
+  EXPECT_TRUE(MacroDefs.empty());
+
+  cl::ResetAllOptionOccurrences();
+
+  // Test prefixed variant works with cl::AlwaysPrefix options.
+  EXPECT_TRUE(MacroDefs.empty());
+  const char *args6[] = {"prog", "-DHAVE_FOO"};
+  EXPECT_TRUE(
+      cl::ParseCommandLineOptions(2, args6, StringRef(), &llvm::nulls()));
+  EXPECT_TRUE(MacroDefs.size() == 1);
+  EXPECT_TRUE(MacroDefs.front().compare("HAVE_FOO") == 0);
+}
+
+} // anonymous namespace
diff --git a/unittests/Support/Path.cpp b/unittests/Support/Path.cpp
index 40faa66..97b77e2 100644
--- a/unittests/Support/Path.cpp
+++ b/unittests/Support/Path.cpp
@@ -187,7 +187,7 @@
   }
 
   SmallString<32> Relative("foo.cpp");
-  ASSERT_NO_ERROR(sys::fs::make_absolute("/root", Relative));
+  sys::fs::make_absolute("/root", Relative);
   Relative[5] = '/'; // Fix up windows paths.
   ASSERT_EQ("/root/foo.cpp", Relative);
 }
@@ -1665,7 +1665,9 @@
   EXPECT_TRUE(CheckPermissions(fs::set_gid_on_exe));
 
   // Modern BSDs require root to set the sticky bit on files.
-#if !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
+  // AIX without root will mask off (i.e., lose) the sticky bit on files.
+#if !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__) &&  \
+    !defined(_AIX)
   EXPECT_EQ(fs::setPermissions(TempPath, fs::sticky_bit), NoError);
   EXPECT_TRUE(CheckPermissions(fs::sticky_bit));
 
@@ -1685,7 +1687,7 @@
 
   EXPECT_EQ(fs::setPermissions(TempPath, fs::all_perms), NoError);
   EXPECT_TRUE(CheckPermissions(fs::all_perms));
-#endif // !FreeBSD && !NetBSD && !OpenBSD
+#endif // !FreeBSD && !NetBSD && !OpenBSD && !AIX
 
   EXPECT_EQ(fs::setPermissions(TempPath, fs::all_perms & ~fs::sticky_bit),
                                NoError);
diff --git a/unittests/Support/TargetParserTest.cpp b/unittests/Support/TargetParserTest.cpp
index a6a6150..18bbb6a 100644
--- a/unittests/Support/TargetParserTest.cpp
+++ b/unittests/Support/TargetParserTest.cpp
@@ -265,11 +265,12 @@
                          ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
                          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP,
                          "8-A"));
-  EXPECT_TRUE(testARMCPU("exynos-m4", "armv8-a", "crypto-neon-fp-armv8",
+  EXPECT_TRUE(testARMCPU("exynos-m4", "armv8.2-a", "crypto-neon-fp-armv8",
                          ARM::AEK_CRC | ARM::AEK_SEC | ARM::AEK_MP |
                          ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
-                         ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP,
-                         "8-A"));
+                         ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_DOTPROD |
+                         ARM::AEK_FP16 | ARM::AEK_RAS,
+                         "8.2-A"));
   EXPECT_TRUE(testARMCPU("cortex-m23", "armv8-m.base", "none",
                          ARM::AEK_HWDIVTHUMB, "8-M.Baseline"));
   EXPECT_TRUE(testARMCPU("cortex-m33", "armv8-m.main", "fpv5-sp-d16",
@@ -584,7 +585,8 @@
                               {"iwmmxt", "noiwmmxt", nullptr, nullptr},
                               {"iwmmxt2", "noiwmmxt2", nullptr, nullptr},
                               {"maverick", "maverick", nullptr, nullptr},
-                              {"xscale", "noxscale", nullptr, nullptr}};
+                              {"xscale", "noxscale", nullptr, nullptr},
+                              {"sb", "nosb", "+sb", "-sb"}};
 
   for (unsigned i = 0; i < array_lengthof(ArchExt); i++) {
     EXPECT_EQ(StringRef(ArchExt[i][2]), ARM::getArchExtFeature(ArchExt[i][0]));
@@ -758,9 +760,11 @@
       AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
       AArch64::AEK_SIMD, "8-A"));
   EXPECT_TRUE(testAArch64CPU(
-      "exynos-m4", "armv8-a", "crypto-neon-fp-armv8",
-      AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
-      AArch64::AEK_SIMD, "8-A"));
+      "exynos-m4", "armv8.2-a", "crypto-neon-fp-armv8",
+      AArch64::AEK_CRC | AArch64::AEK_CRYPTO |
+      AArch64::AEK_DOTPROD | AArch64::AEK_FP | AArch64::AEK_FP16 |
+      AArch64::AEK_LSE | AArch64::AEK_RAS | AArch64::AEK_RDM |
+      AArch64::AEK_SIMD, "8.2-A"));
   EXPECT_TRUE(testAArch64CPU(
       "falkor", "armv8-a", "crypto-neon-fp-armv8",
       AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
@@ -869,8 +873,16 @@
                                     AArch64::ArchKind::INVALID, "ras"));
   EXPECT_FALSE(testAArch64Extension("exynos-m3",
                                     AArch64::ArchKind::INVALID, "ras"));
+  EXPECT_TRUE(testAArch64Extension("exynos-m4",
+                                   AArch64::ArchKind::INVALID, "lse"));
+  EXPECT_TRUE(testAArch64Extension("exynos-m4",
+                                   AArch64::ArchKind::INVALID, "rdm"));
+  EXPECT_TRUE(testAArch64Extension("exynos-m4",
+                                   AArch64::ArchKind::INVALID, "ras"));
   EXPECT_FALSE(testAArch64Extension("exynos-m4",
-                                    AArch64::ArchKind::INVALID, "ras"));
+                                    AArch64::ArchKind::INVALID, "fullfp16"));
+  EXPECT_TRUE(testAArch64Extension("exynos-m4",
+                                   AArch64::ArchKind::INVALID, "dotprod"));
   EXPECT_TRUE(testAArch64Extension("falkor",
                                    AArch64::ArchKind::INVALID, "rdm"));
   EXPECT_FALSE(testAArch64Extension("kryo",
@@ -988,7 +1000,10 @@
                               {"rcpc", "norcpc", "+rcpc", "-rcpc" },
                               {"rng", "norng", "+rand", "-rand"},
                               {"memtag", "nomemtag", "+mte", "-mte"},
-                              {"ssbs", "nossbs", "+ssbs", "-ssbs"}};
+                              {"ssbs", "nossbs", "+ssbs", "-ssbs"},
+                              {"sb", "nosb", "+sb", "-sb"},
+                              {"predres", "nopredres", "+predres", "-predres"}
+};
 
   for (unsigned i = 0; i < array_lengthof(ArchExt); i++) {
     EXPECT_EQ(StringRef(ArchExt[i][2]),
diff --git a/unittests/Support/VirtualFileSystemTest.cpp b/unittests/Support/VirtualFileSystemTest.cpp
index 458b07e..7b42943 100644
--- a/unittests/Support/VirtualFileSystemTest.cpp
+++ b/unittests/Support/VirtualFileSystemTest.cpp
@@ -743,6 +743,43 @@
   }
 }
 
+TEST(ProxyFileSystemTest, Basic) {
+  IntrusiveRefCntPtr<vfs::InMemoryFileSystem> Base(
+      new vfs::InMemoryFileSystem());
+  vfs::ProxyFileSystem PFS(Base);
+
+  Base->addFile("/a", 0, MemoryBuffer::getMemBuffer("test"));
+
+  auto Stat = PFS.status("/a");
+  ASSERT_FALSE(Stat.getError());
+
+  auto File = PFS.openFileForRead("/a");
+  ASSERT_FALSE(File.getError());
+  EXPECT_EQ("test", (*(*File)->getBuffer("ignored"))->getBuffer());
+
+  std::error_code EC;
+  vfs::directory_iterator I = PFS.dir_begin("/", EC);
+  ASSERT_FALSE(EC);
+  ASSERT_EQ("/a", I->path());
+  I.increment(EC);
+  ASSERT_FALSE(EC);
+  ASSERT_EQ(vfs::directory_iterator(), I);
+
+  ASSERT_FALSE(PFS.setCurrentWorkingDirectory("/"));
+
+  auto PWD = PFS.getCurrentWorkingDirectory();
+  ASSERT_FALSE(PWD.getError());
+  ASSERT_EQ("/", *PWD);
+
+  SmallString<16> Path;
+  ASSERT_FALSE(PFS.getRealPath("a", Path));
+  ASSERT_EQ("/a", Path);
+
+  bool Local = true;
+  ASSERT_FALSE(PFS.isLocal("/a", Local));
+  ASSERT_EQ(false, Local);
+}
+
 class InMemoryFileSystemTest : public ::testing::Test {
 protected:
   llvm::vfs::InMemoryFileSystem FS;
diff --git a/unittests/TextAPI/CMakeLists.txt b/unittests/TextAPI/CMakeLists.txt
index 5f64c8a..d7208b2 100644
--- a/unittests/TextAPI/CMakeLists.txt
+++ b/unittests/TextAPI/CMakeLists.txt
@@ -2,8 +2,8 @@
   TextAPI
 )
 
-add_llvm_unittest(TapiTests
+add_llvm_unittest(TextAPITests
   ELFYAMLTest.cpp
 )
 
-target_link_libraries(TapiTests PRIVATE LLVMTestingSupport)
+target_link_libraries(TextAPITests PRIVATE LLVMTestingSupport)
diff --git a/unittests/TextAPI/ELFYAMLTest.cpp b/unittests/TextAPI/ELFYAMLTest.cpp
index 802a2d7..1ace819 100644
--- a/unittests/TextAPI/ELFYAMLTest.cpp
+++ b/unittests/TextAPI/ELFYAMLTest.cpp
@@ -65,7 +65,7 @@
                       "  foo: { Type: Func, Warning: \"Deprecated!\" }\n"
                       "  nor: { Type: NoType, Undefined: true }\n"
                       "  not: { Type: File, Undefined: true, Size: 111, "
-                      "Warning: \'All fields populated!\' }\n"
+                      "Weak: true, Warning: \'All fields populated!\' }\n"
                       "...\n";
   Expected<std::unique_ptr<ELFStub>> StubOrErr = readTBEFromBuffer(Data);
   ASSERT_THAT_ERROR(StubOrErr.takeError(), Succeeded());
@@ -81,6 +81,7 @@
   EXPECT_EQ(SymBar.Size, 42u);
   EXPECT_EQ(SymBar.Type, ELFSymbolType::Object);
   EXPECT_FALSE(SymBar.Undefined);
+  EXPECT_FALSE(SymBar.Weak);
   EXPECT_FALSE(SymBar.Warning.hasValue());
 
   ELFSymbol const &SymBaz = *Iterator++;
@@ -88,6 +89,7 @@
   EXPECT_EQ(SymBaz.Size, 3u);
   EXPECT_EQ(SymBaz.Type, ELFSymbolType::TLS);
   EXPECT_FALSE(SymBaz.Undefined);
+  EXPECT_FALSE(SymBaz.Weak);
   EXPECT_FALSE(SymBaz.Warning.hasValue());
 
   ELFSymbol const &SymFoo = *Iterator++;
@@ -95,6 +97,7 @@
   EXPECT_EQ(SymFoo.Size, 0u);
   EXPECT_EQ(SymFoo.Type, ELFSymbolType::Func);
   EXPECT_FALSE(SymFoo.Undefined);
+  EXPECT_FALSE(SymFoo.Weak);
   EXPECT_TRUE(SymFoo.Warning.hasValue());
   EXPECT_STREQ(SymFoo.Warning->c_str(), "Deprecated!");
 
@@ -103,6 +106,7 @@
   EXPECT_EQ(SymNor.Size, 0u);
   EXPECT_EQ(SymNor.Type, ELFSymbolType::NoType);
   EXPECT_TRUE(SymNor.Undefined);
+  EXPECT_FALSE(SymNor.Weak);
   EXPECT_FALSE(SymNor.Warning.hasValue());
 
   ELFSymbol const &SymNot = *Iterator++;
@@ -110,6 +114,7 @@
   EXPECT_EQ(SymNot.Size, 111u);
   EXPECT_EQ(SymNot.Type, ELFSymbolType::Unknown);
   EXPECT_TRUE(SymNot.Undefined);
+  EXPECT_TRUE(SymNot.Weak);
   EXPECT_TRUE(SymNot.Warning.hasValue());
   EXPECT_STREQ(SymNot.Warning->c_str(), "All fields populated!");
 }
@@ -146,6 +151,7 @@
       "TbeVersion:      1.0\n"
       "Arch:            AArch64\n"
       "Symbols:         \n"
+      "  bar:             { Type: Func, Weak: true }\n"
       "  foo:             { Type: NoType, Size: 99, Warning: Does nothing }\n"
       "  nor:             { Type: Func, Undefined: true }\n"
       "  not:             { Type: Unknown, Size: 12345678901234 }\n"
@@ -158,19 +164,30 @@
   SymFoo.Size = 99u;
   SymFoo.Type = ELFSymbolType::NoType;
   SymFoo.Undefined = false;
+  SymFoo.Weak = false;
   SymFoo.Warning = "Does nothing";
 
+  ELFSymbol SymBar("bar");
+  SymBar.Size = 128u;
+  SymBar.Type = ELFSymbolType::Func;
+  SymBar.Undefined = false;
+  SymBar.Weak = true;
+
   ELFSymbol SymNor("nor");
+  SymNor.Size = 1234u;
   SymNor.Type = ELFSymbolType::Func;
   SymNor.Undefined = true;
+  SymNor.Weak = false;
 
   ELFSymbol SymNot("not");
   SymNot.Size = 12345678901234u;
   SymNot.Type = ELFSymbolType::Unknown;
   SymNot.Undefined = false;
+  SymNot.Weak = false;
 
   // Deliberately not in order to check that result is sorted.
   Stub.Symbols.insert(SymNot);
+  Stub.Symbols.insert(SymBar);
   Stub.Symbols.insert(SymFoo);
   Stub.Symbols.insert(SymNor);
 
diff --git a/utils/DSAclean.py b/utils/DSAclean.py
index 6c43357..789a825 100755
--- a/utils/DSAclean.py
+++ b/utils/DSAclean.py
@@ -8,10 +8,13 @@
 #the comments
 #10/12/2005: now it only removes nodes and edges for which the label is %tmp.# rather
 #than removing all lines for which the lable CONTAINS %tmp.#
+
+from __future__ import print_function
+
 import re
 import sys
 if( len(sys.argv) < 3 ):
-	print 'usage is: ./DSAclean <dot_file_to_be_cleaned> <out_put_file>'
+	print('usage is: ./DSAclean <dot_file_to_be_cleaned> <out_put_file>')
 	sys.exit(1)
 #get a file object
 input = open(sys.argv[1], 'r')
diff --git a/utils/DSAextract.py b/utils/DSAextract.py
index 89dece1..258aac4 100644
--- a/utils/DSAextract.py
+++ b/utils/DSAextract.py
@@ -25,14 +25,16 @@
 #currently the script prints the names it is searching for
 #to STDOUT, so you can check to see if they are what you intend
 
+from __future__ import print_function
+
 import re
 import string
 import sys
 
 
 if len(sys.argv) < 3:
-	print 'usage is ./DSAextract <dot_file_to_modify> \
-			<output_file> [list of nodes to extract]'
+	print('usage is ./DSAextract <dot_file_to_modify> \
+			<output_file> [list of nodes to extract]')
 
 #open the input file
 input = open(sys.argv[1], 'r')
@@ -73,7 +75,7 @@
 #test code
 #print '\n'
 
-print node_name_set
+print(node_name_set)
 
 #print node_set
 	
diff --git a/utils/FileCheck/FileCheck.cpp b/utils/FileCheck/FileCheck.cpp
index 967d22f..39245d2 100644
--- a/utils/FileCheck/FileCheck.cpp
+++ b/utils/FileCheck/FileCheck.cpp
@@ -19,12 +19,13 @@
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/InitLLVM.h"
 #include "llvm/Support/Process.h"
+#include "llvm/Support/WithColor.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/FileCheck.h"
 using namespace llvm;
 
 static cl::opt<std::string>
-    CheckFilename(cl::Positional, cl::desc("<check-file>"), cl::Required);
+    CheckFilename(cl::Positional, cl::desc("<check-file>"), cl::Optional);
 
 static cl::opt<std::string>
     InputFilename("input-file", cl::desc("File to check (defaults to stdin)"),
@@ -50,9 +51,10 @@
              "this pattern occur which are not matched by a positive pattern"),
     cl::value_desc("pattern"));
 
-static cl::list<std::string> GlobalDefines("D", cl::Prefix,
-    cl::desc("Define a variable to be used in capture patterns."),
-    cl::value_desc("VAR=VALUE"));
+static cl::list<std::string>
+    GlobalDefines("D", cl::AlwaysPrefix,
+                  cl::desc("Define a variable to be used in capture patterns."),
+                  cl::value_desc("VAR=VALUE"));
 
 static cl::opt<bool> AllowEmptyInput(
     "allow-empty", cl::init(false),
@@ -91,7 +93,27 @@
     "dump-input-on-failure", cl::init(std::getenv(DumpInputEnv)),
     cl::desc("Dump original input to stderr before failing.\n"
              "The value can be also controlled using\n"
-             "FILECHECK_DUMP_INPUT_ON_FAILURE environment variable.\n"));
+             "FILECHECK_DUMP_INPUT_ON_FAILURE environment variable.\n"
+             "This option is deprecated in favor of -dump-input=fail.\n"));
+
+enum DumpInputValue {
+  DumpInputDefault,
+  DumpInputHelp,
+  DumpInputNever,
+  DumpInputFail,
+  DumpInputAlways
+};
+
+static cl::opt<DumpInputValue> DumpInput(
+    "dump-input", cl::init(DumpInputDefault),
+    cl::desc("Dump input to stderr, adding annotations representing\n"
+             " currently enabled diagnostics\n"),
+    cl::value_desc("mode"),
+    cl::values(clEnumValN(DumpInputHelp, "help",
+                          "Explain dump format and quit"),
+               clEnumValN(DumpInputNever, "never", "Never dump input"),
+               clEnumValN(DumpInputFail, "fail", "Dump input on failure"),
+               clEnumValN(DumpInputAlways, "always", "Always dump input")));
 
 typedef cl::list<std::string>::const_iterator prefix_iterator;
 
@@ -108,6 +130,376 @@
   errs() << "\n";
 }
 
+struct MarkerStyle {
+  /// The starting char (before tildes) for marking the line.
+  char Lead;
+  /// What color to use for this annotation.
+  raw_ostream::Colors Color;
+  /// A note to follow the marker, or empty string if none.
+  std::string Note;
+  MarkerStyle() {}
+  MarkerStyle(char Lead, raw_ostream::Colors Color,
+              const std::string &Note = "")
+      : Lead(Lead), Color(Color), Note(Note) {}
+};
+
+static MarkerStyle GetMarker(FileCheckDiag::MatchType MatchTy) {
+  switch (MatchTy) {
+  case FileCheckDiag::MatchFoundAndExpected:
+    return MarkerStyle('^', raw_ostream::GREEN);
+  case FileCheckDiag::MatchFoundButExcluded:
+    return MarkerStyle('!', raw_ostream::RED, "error: no match expected");
+  case FileCheckDiag::MatchFoundButWrongLine:
+    return MarkerStyle('!', raw_ostream::RED, "error: match on wrong line");
+  case FileCheckDiag::MatchFoundButDiscarded:
+    return MarkerStyle('!', raw_ostream::CYAN,
+                       "discard: overlaps earlier match");
+  case FileCheckDiag::MatchNoneAndExcluded:
+    return MarkerStyle('X', raw_ostream::GREEN);
+  case FileCheckDiag::MatchNoneButExpected:
+    return MarkerStyle('X', raw_ostream::RED, "error: no match found");
+  case FileCheckDiag::MatchFuzzy:
+    return MarkerStyle('?', raw_ostream::MAGENTA, "possible intended match");
+  }
+  llvm_unreachable_internal("unexpected match type");
+}
+
+static void DumpInputAnnotationHelp(raw_ostream &OS) {
+  OS << "The following description was requested by -dump-input=help to\n"
+     << "explain the input annotations printed by -dump-input=always and\n"
+     << "-dump-input=fail:\n\n";
+
+  // Labels for input lines.
+  OS << "  - ";
+  WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "L:";
+  OS << "     labels line number L of the input file\n";
+
+  // Labels for annotation lines.
+  OS << "  - ";
+  WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "T:L";
+  OS << "    labels the only match result for a pattern of type T from "
+     << "line L of\n"
+     << "           the check file\n";
+  OS << "  - ";
+  WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "T:L'N";
+  OS << "  labels the Nth match result for a pattern of type T from line "
+     << "L of\n"
+     << "           the check file\n";
+
+  // Markers on annotation lines.
+  OS << "  - ";
+  WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "^~~";
+  OS << "    marks good match (reported if -v)\n"
+     << "  - ";
+  WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "!~~";
+  OS << "    marks bad match, such as:\n"
+     << "           - CHECK-NEXT on same line as previous match (error)\n"
+     << "           - CHECK-NOT found (error)\n"
+     << "           - CHECK-DAG overlapping match (discarded, reported if "
+     << "-vv)\n"
+     << "  - ";
+  WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "X~~";
+  OS << "    marks search range when no match is found, such as:\n"
+     << "           - CHECK-NEXT not found (error)\n"
+     << "           - CHECK-NOT not found (success, reported if -vv)\n"
+     << "           - CHECK-DAG not found after discarded matches (error)\n"
+     << "  - ";
+  WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "?";
+  OS << "      marks fuzzy match when no match is found\n";
+
+  // Colors.
+  OS << "  - colors ";
+  WithColor(OS, raw_ostream::GREEN, true) << "success";
+  OS << ", ";
+  WithColor(OS, raw_ostream::RED, true) << "error";
+  OS << ", ";
+  WithColor(OS, raw_ostream::MAGENTA, true) << "fuzzy match";
+  OS << ", ";
+  WithColor(OS, raw_ostream::CYAN, true, false) << "discarded match";
+  OS << ", ";
+  WithColor(OS, raw_ostream::CYAN, true, true) << "unmatched input";
+  OS << "\n\n"
+     << "If you are not seeing color above or in input dumps, try: -color\n";
+}
+
+/// An annotation for a single input line.
+struct InputAnnotation {
+  /// The check file line (one-origin indexing) where the directive that
+  /// produced this annotation is located.
+  unsigned CheckLine;
+  /// The index of the match result for this check.
+  unsigned CheckDiagIndex;
+  /// The label for this annotation.
+  std::string Label;
+  /// What input line (one-origin indexing) this annotation marks.  This might
+  /// be different from the starting line of the original diagnostic if this is
+  /// a non-initial fragment of a diagnostic that has been broken across
+  /// multiple lines.
+  unsigned InputLine;
+  /// The column range (one-origin indexing, open end) in which to to mark the
+  /// input line.  If InputEndCol is UINT_MAX, treat it as the last column
+  /// before the newline.
+  unsigned InputStartCol, InputEndCol;
+  /// The marker to use.
+  MarkerStyle Marker;
+  /// Whether this annotation represents a good match for an expected pattern.
+  bool FoundAndExpectedMatch;
+};
+
+/// Get an abbreviation for the check type.
+std::string GetCheckTypeAbbreviation(Check::FileCheckType Ty) {
+  switch (Ty) {
+  case Check::CheckPlain:
+    if (Ty.getCount() > 1)
+      return "count";
+    return "check";
+  case Check::CheckNext:
+    return "next";
+  case Check::CheckSame:
+    return "same";
+  case Check::CheckNot:
+    return "not";
+  case Check::CheckDAG:
+    return "dag";
+  case Check::CheckLabel:
+    return "label";
+  case Check::CheckEmpty:
+    return "empty";
+  case Check::CheckEOF:
+    return "eof";
+  case Check::CheckBadNot:
+    return "bad-not";
+  case Check::CheckBadCount:
+    return "bad-count";
+  case Check::CheckNone:
+    llvm_unreachable("invalid FileCheckType");
+  }
+  llvm_unreachable("unknown FileCheckType");
+}
+
+static void BuildInputAnnotations(const std::vector<FileCheckDiag> &Diags,
+                                  std::vector<InputAnnotation> &Annotations,
+                                  unsigned &LabelWidth) {
+  // How many diagnostics has the current check seen so far?
+  unsigned CheckDiagCount = 0;
+  // What's the widest label?
+  LabelWidth = 0;
+  for (auto DiagItr = Diags.begin(), DiagEnd = Diags.end(); DiagItr != DiagEnd;
+       ++DiagItr) {
+    InputAnnotation A;
+
+    // Build label, which uniquely identifies this check result.
+    A.CheckLine = DiagItr->CheckLine;
+    llvm::raw_string_ostream Label(A.Label);
+    Label << GetCheckTypeAbbreviation(DiagItr->CheckTy) << ":"
+          << DiagItr->CheckLine;
+    A.CheckDiagIndex = UINT_MAX;
+    auto DiagNext = std::next(DiagItr);
+    if (DiagNext != DiagEnd && DiagItr->CheckTy == DiagNext->CheckTy &&
+        DiagItr->CheckLine == DiagNext->CheckLine)
+      A.CheckDiagIndex = CheckDiagCount++;
+    else if (CheckDiagCount) {
+      A.CheckDiagIndex = CheckDiagCount;
+      CheckDiagCount = 0;
+    }
+    if (A.CheckDiagIndex != UINT_MAX)
+      Label << "'" << A.CheckDiagIndex;
+    else
+      A.CheckDiagIndex = 0;
+    Label.flush();
+    LabelWidth = std::max((std::string::size_type)LabelWidth, A.Label.size());
+
+    MarkerStyle Marker = GetMarker(DiagItr->MatchTy);
+    A.Marker = Marker;
+    A.FoundAndExpectedMatch =
+        DiagItr->MatchTy == FileCheckDiag::MatchFoundAndExpected;
+
+    // Compute the mark location, and break annotation into multiple
+    // annotations if it spans multiple lines.
+    A.InputLine = DiagItr->InputStartLine;
+    A.InputStartCol = DiagItr->InputStartCol;
+    if (DiagItr->InputStartLine == DiagItr->InputEndLine) {
+      // Sometimes ranges are empty in order to indicate a specific point, but
+      // that would mean nothing would be marked, so adjust the range to
+      // include the following character.
+      A.InputEndCol =
+          std::max(DiagItr->InputStartCol + 1, DiagItr->InputEndCol);
+      Annotations.push_back(A);
+    } else {
+      assert(DiagItr->InputStartLine < DiagItr->InputEndLine &&
+             "expected input range not to be inverted");
+      A.InputEndCol = UINT_MAX;
+      A.Marker.Note = "";
+      Annotations.push_back(A);
+      for (unsigned L = DiagItr->InputStartLine + 1, E = DiagItr->InputEndLine;
+           L <= E; ++L) {
+        // If a range ends before the first column on a line, then it has no
+        // characters on that line, so there's nothing to render.
+        if (DiagItr->InputEndCol == 1 && L == E) {
+          Annotations.back().Marker.Note = Marker.Note;
+          break;
+        }
+        InputAnnotation B;
+        B.CheckLine = A.CheckLine;
+        B.CheckDiagIndex = A.CheckDiagIndex;
+        B.Label = A.Label;
+        B.InputLine = L;
+        B.Marker = Marker;
+        B.Marker.Lead = '~';
+        B.InputStartCol = 1;
+        if (L != E) {
+          B.InputEndCol = UINT_MAX;
+          B.Marker.Note = "";
+        } else
+          B.InputEndCol = DiagItr->InputEndCol;
+        B.FoundAndExpectedMatch = A.FoundAndExpectedMatch;
+        Annotations.push_back(B);
+      }
+    }
+  }
+}
+
+static void DumpAnnotatedInput(raw_ostream &OS, const FileCheckRequest &Req,
+                               StringRef InputFileText,
+                               std::vector<InputAnnotation> &Annotations,
+                               unsigned LabelWidth) {
+  OS << "Full input was:\n<<<<<<\n";
+
+  // Sort annotations.
+  //
+  // First, sort in the order of input lines to make it easier to find relevant
+  // annotations while iterating input lines in the implementation below.
+  // FileCheck diagnostics are not always reported and recorded in the order of
+  // input lines due to, for example, CHECK-DAG and CHECK-NOT.
+  //
+  // Second, for annotations for the same input line, sort in the order of the
+  // FileCheck directive's line in the check file (where there's at most one
+  // directive per line) and then by the index of the match result for that
+  // directive.  The rationale of this choice is that, for any input line, this
+  // sort establishes a total order of annotations that, with respect to match
+  // results, is consistent across multiple lines, thus making match results
+  // easier to track from one line to the next when they span multiple lines.
+  std::sort(Annotations.begin(), Annotations.end(),
+            [](const InputAnnotation &A, const InputAnnotation &B) {
+              if (A.InputLine != B.InputLine)
+                return A.InputLine < B.InputLine;
+              if (A.CheckLine != B.CheckLine)
+                return A.CheckLine < B.CheckLine;
+              // FIXME: Sometimes CHECK-LABEL reports its match twice with
+              // other diagnostics in between, and then diag index incrementing
+              // fails to work properly, and then this assert fails.  We should
+              // suppress one of those diagnostics or do a better job of
+              // computing this index.  For now, we just produce a redundant
+              // CHECK-LABEL annotation.
+              // assert(A.CheckDiagIndex != B.CheckDiagIndex &&
+              //        "expected diagnostic indices to be unique within a "
+              //        " check line");
+              return A.CheckDiagIndex < B.CheckDiagIndex;
+            });
+
+  // Compute the width of the label column.
+  const unsigned char *InputFilePtr = InputFileText.bytes_begin(),
+                      *InputFileEnd = InputFileText.bytes_end();
+  unsigned LineCount = InputFileText.count('\n');
+  if (InputFileEnd[-1] != '\n')
+    ++LineCount;
+  unsigned LineNoWidth = log10(LineCount) + 1;
+  // +3 below adds spaces (1) to the left of the (right-aligned) line numbers
+  // on input lines and (2) to the right of the (left-aligned) labels on
+  // annotation lines so that input lines and annotation lines are more
+  // visually distinct.  For example, the spaces on the annotation lines ensure
+  // that input line numbers and check directive line numbers never align
+  // horizontally.  Those line numbers might not even be for the same file.
+  // One space would be enough to achieve that, but more makes it even easier
+  // to see.
+  LabelWidth = std::max(LabelWidth, LineNoWidth) + 3;
+
+  // Print annotated input lines.
+  auto AnnotationItr = Annotations.begin(), AnnotationEnd = Annotations.end();
+  for (unsigned Line = 1;
+       InputFilePtr != InputFileEnd || AnnotationItr != AnnotationEnd;
+       ++Line) {
+    const unsigned char *InputFileLine = InputFilePtr;
+
+    // Print right-aligned line number.
+    WithColor(OS, raw_ostream::BLACK, true)
+        << format_decimal(Line, LabelWidth) << ": ";
+
+    // For the case where -v and colors are enabled, find the annotations for
+    // good matches for expected patterns in order to highlight everything
+    // else in the line.  There are no such annotations if -v is disabled.
+    std::vector<InputAnnotation> FoundAndExpectedMatches;
+    if (Req.Verbose && WithColor(OS).colorsEnabled()) {
+      for (auto I = AnnotationItr; I != AnnotationEnd && I->InputLine == Line;
+           ++I) {
+        if (I->FoundAndExpectedMatch)
+          FoundAndExpectedMatches.push_back(*I);
+      }
+    }
+
+    // Print numbered line with highlighting where there are no matches for
+    // expected patterns.
+    bool Newline = false;
+    {
+      WithColor COS(OS);
+      bool InMatch = false;
+      if (Req.Verbose)
+        COS.changeColor(raw_ostream::CYAN, true, true);
+      for (unsigned Col = 1; InputFilePtr != InputFileEnd && !Newline; ++Col) {
+        bool WasInMatch = InMatch;
+        InMatch = false;
+        for (auto M : FoundAndExpectedMatches) {
+          if (M.InputStartCol <= Col && Col < M.InputEndCol) {
+            InMatch = true;
+            break;
+          }
+        }
+        if (!WasInMatch && InMatch)
+          COS.resetColor();
+        else if (WasInMatch && !InMatch)
+          COS.changeColor(raw_ostream::CYAN, true, true);
+        if (*InputFilePtr == '\n')
+          Newline = true;
+        else
+          COS << *InputFilePtr;
+        ++InputFilePtr;
+      }
+    }
+    OS << '\n';
+    unsigned InputLineWidth = InputFilePtr - InputFileLine - Newline;
+
+    // Print any annotations.
+    while (AnnotationItr != AnnotationEnd &&
+           AnnotationItr->InputLine == Line) {
+      WithColor COS(OS, AnnotationItr->Marker.Color, true);
+      // The two spaces below are where the ": " appears on input lines.
+      COS << left_justify(AnnotationItr->Label, LabelWidth) << "  ";
+      unsigned Col;
+      for (Col = 1; Col < AnnotationItr->InputStartCol; ++Col)
+        COS << ' ';
+      COS << AnnotationItr->Marker.Lead;
+      // If InputEndCol=UINT_MAX, stop at InputLineWidth.
+      for (++Col; Col < AnnotationItr->InputEndCol && Col <= InputLineWidth;
+           ++Col)
+        COS << '~';
+      const std::string &Note = AnnotationItr->Marker.Note;
+      if (!Note.empty()) {
+        // Put the note at the end of the input line.  If we were to instead
+        // put the note right after the marker, subsequent annotations for the
+        // same input line might appear to mark this note instead of the input
+        // line.
+        for (; Col <= InputLineWidth; ++Col)
+          COS << ' ';
+        COS << ' ' << Note;
+      }
+      COS << '\n';
+      ++AnnotationItr;
+    }
+  }
+
+  OS << ">>>>>>\n";
+}
+
 int main(int argc, char **argv) {
   // Enable use of ANSI color codes because FileCheck is using them to
   // highlight text.
@@ -116,6 +508,14 @@
   InitLLVM X(argc, argv);
   cl::ParseCommandLineOptions(argc, argv, /*Overview*/ "", /*Errs*/ nullptr,
                               "FILECHECK_OPTS");
+  if (DumpInput == DumpInputHelp) {
+    DumpInputAnnotationHelp(outs());
+    return 0;
+  }
+  if (CheckFilename.empty()) {
+    errs() << "<check-file> not specified\n";
+    return 2;
+  }
 
   FileCheckRequest Req;
   for (auto Prefix : CheckPrefixes)
@@ -124,8 +524,25 @@
   for (auto CheckNot : ImplicitCheckNot)
     Req.ImplicitCheckNot.push_back(CheckNot);
 
-  for (auto G : GlobalDefines)
+  bool GlobalDefineError = false;
+  for (auto G : GlobalDefines) {
+    size_t EqIdx = G.find('=');
+    if (EqIdx == std::string::npos) {
+      errs() << "Missing equal sign in command-line definition '-D" << G
+             << "'\n";
+      GlobalDefineError = true;
+      continue;
+    }
+    if (EqIdx == 0) {
+      errs() << "Missing pattern variable name in command-line definition '-D"
+             << G << "'\n";
+      GlobalDefineError = true;
+      continue;
+    }
     Req.GlobalDefines.push_back(G);
+  }
+  if (GlobalDefineError)
+    return 2;
 
   Req.AllowEmptyInput = AllowEmptyInput;
   Req.EnableVarScope = EnableVarScope;
@@ -157,7 +574,6 @@
     return 2;
   }
 
-
   SourceMgr SM;
 
   // Read the expected strings from the check file.
@@ -204,10 +620,29 @@
                             InputFileText, InputFile.getBufferIdentifier()),
                         SMLoc());
 
-  int ExitCode =
-      FC.CheckInput(SM, InputFileText, CheckStrings) ? EXIT_SUCCESS : 1;
-  if (ExitCode == 1 && DumpInputOnFailure)
-    errs() << "Full input was:\n<<<<<<\n" << InputFileText << "\n>>>>>>\n";
+  if (DumpInput == DumpInputDefault)
+    DumpInput = DumpInputOnFailure ? DumpInputFail : DumpInputNever;
+
+  std::vector<FileCheckDiag> Diags;
+  int ExitCode = FC.CheckInput(SM, InputFileText, CheckStrings,
+                               DumpInput == DumpInputNever ? nullptr : &Diags)
+                     ? EXIT_SUCCESS
+                     : 1;
+  if (DumpInput == DumpInputAlways ||
+      (ExitCode == 1 && DumpInput == DumpInputFail)) {
+    errs() << "\n"
+           << "Input file: "
+           << (InputFilename == "-" ? "<stdin>" : InputFilename.getValue())
+           << "\n"
+           << "Check file: " << CheckFilename << "\n"
+           << "\n"
+           << "-dump-input=help describes the format of the following dump.\n"
+           << "\n";
+    std::vector<InputAnnotation> Annotations;
+    unsigned LabelWidth;
+    BuildInputAnnotations(Diags, Annotations, LabelWidth);
+    DumpAnnotatedInput(errs(), Req, InputFileText, Annotations, LabelWidth);
+  }
 
   return ExitCode;
 }
diff --git a/utils/LLVMVisualizers/llvm.natvis b/utils/LLVMVisualizers/llvm.natvis
index bbded5d..42d3a12 100644
--- a/utils/LLVMVisualizers/llvm.natvis
+++ b/utils/LLVMVisualizers/llvm.natvis
@@ -35,6 +35,7 @@
     <DisplayString IncludeView ="elt4">, /* {Size - 4} more*/ </DisplayString>

     <DisplayString Condition="Size == 0">empty</DisplayString>

     <DisplayString Condition="Size != 0">{{{*this,view(elt0)}}}</DisplayString>

+    <DisplayString>Uninitialized</DisplayString>

     <Expand>

       <Item Name="[size]">Size</Item>

       <Item Name="[capacity]">Capacity</Item>

@@ -92,36 +93,87 @@
     </Expand>

   </Type>

 

+  <!-- PointerUnion types - In addition to the regular view, which displays the pointer, there is a "deref" view that

+       displays the pointed to object, which is often needed by other visualizers -->

   <Type Name="llvm::PointerUnion&lt;*,*&gt;">

-    <DisplayString Condition="((Val.Value &gt;&gt; Val.IntShift) &amp; Val.IntMask) == 0">{"$T1", s8b}: {($T1)(Val.Value &amp; Val.PointerBitMask)}</DisplayString>

-    <DisplayString Condition="((Val.Value &gt;&gt; Val.IntShift) &amp; Val.IntMask) != 0">{"$T2", s8b}: {($T2)(Val.Value &amp; Val.PointerBitMask)}</DisplayString>

+    <DisplayString  Optional="true" IncludeView="deref" Condition="((Val.Value &gt;&gt; ValTy::InfoTy::IntShift) &amp; ValTy::InfoTy::IntMask) == 0">{*($T1)(Val.Value &amp; ValTy::InfoTy::PointerBitMask)}</DisplayString>

+    <DisplayString  Optional="true" IncludeView="deref" Condition="((Val.Value &gt;&gt; ValTy::InfoTy::IntShift) &amp; ValTy::InfoTy::IntMask) != 0">{*($T2)(Val.Value &amp; ValTy::InfoTy::PointerBitMask)}</DisplayString>

+    <DisplayString  Optional="true" Condition="((Val.Value &gt;&gt; ValTy::InfoTy::IntShift) &amp; ValTy::InfoTy::IntMask) == 0">{"$T1", s8b}: {($T1)(Val.Value &amp; ValTy::InfoTy::PointerBitMask)}</DisplayString>

+    <DisplayString  Optional="true" Condition="((Val.Value &gt;&gt; ValTy::InfoTy::IntShift) &amp; ValTy::InfoTy::IntMask) != 0">{"$T2", s8b}: {($T2)(Val.Value &amp; ValTy::InfoTy::PointerBitMask)}</DisplayString>

     <Expand>

-      <ExpandedItem Condition="((Val.Value &gt;&gt; Val.IntShift) &amp; Val.IntMask) == 0">($T1)(Val.Value &amp; Val.PointerBitMask)</ExpandedItem>

-      <ExpandedItem Condition="((Val.Value &gt;&gt; Val.IntShift) &amp; Val.IntMask) != 0">($T2)(Val.Value &amp; Val.PointerBitMask)</ExpandedItem>

+      <Item Name="[Holds]" Condition="((Val.Value &gt;&gt; ValTy::InfoTy::IntShift) &amp; ValTy::InfoTy::IntMask) == 0">"$T1", s8b</Item>

+      <Item Name="[Ptr]" Optional="true"  Condition="((Val.Value &gt;&gt; ValTy::InfoTy::IntShift) &amp; ValTy::InfoTy::IntMask) == 0">($T1)(Val.Value &amp; ValTy::InfoTy::PointerBitMask)</Item>

+      <Item Name="[Holds]" Condition="((Val.Value &gt;&gt; ValTy::InfoTy::IntShift) &amp; ValTy::InfoTy::IntMask) != 0">"$T2", s8b</Item>

+      <Item Name="[Ptr]" Optional="true" Condition="((Val.Value &gt;&gt; ValTy::InfoTy::IntShift) &amp; ValTy::InfoTy::IntMask) != 0">($T2)(Val.Value &amp; ValTy::InfoTy::PointerBitMask)</Item>

     </Expand>

   </Type>

 

   <Type Name="llvm::PointerUnion3&lt;*,*,*&gt;">

-    <DisplayString Condition="(Val.Val.Value &amp; 2) != 2 &amp;&amp; (Val.Val.Value &amp; 1) != 1">{"$T1", s8b}: {($T1)((Val.Val.Value &gt;&gt; 2) &lt;&lt; 2)}</DisplayString>

-    <DisplayString Condition="(Val.Val.Value &amp; 2) == 2">{"$T2", s8b}: {($T2)((Val.Val.Value &gt;&gt; 2) &lt;&lt; 2)}</DisplayString>

-    <DisplayString Condition="(Val.Val.Value &amp; 1) == 1">{"$T3", s8b}: {($T3)((Val.Val.Value &gt;&gt; 2) &lt;&lt; 2)}</DisplayString>

+    <DisplayString Optional="true" IncludeView="deref" Condition="(Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1">{Val,view(deref)}</DisplayString>

+    <DisplayString Optional="true" IncludeView="deref">{*(InnerUnion*)&amp;Val.Val.Value,view(deref)}</DisplayString>

+    <DisplayString Condition="(Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1">{Val}</DisplayString>

+    <DisplayString>{*(InnerUnion*)&amp;Val.Val.Value}</DisplayString>

     <Expand>

-      <ExpandedItem Condition="(Val.Val.Value &amp; 2) != 2 &amp;&amp; (Val.Val.Value &amp; 1) != 1">($T1)((Val.Val.Value &gt;&gt; 2) &lt;&lt; 2)</ExpandedItem>

-      <ExpandedItem Condition="(Val.Val.Value &amp; 2) == 2">($T2)((Val.Val.Value &gt;&gt; 2) &lt;&lt; 2)</ExpandedItem>

-      <ExpandedItem Condition="(Val.Val.Value &amp; 1) == 1">($T3)((Val.Val.Value &gt;&gt; 2) &lt;&lt; 2)</ExpandedItem>

+      <Item Name="[Holds]" Condition="(Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1">"$T3", s8b</Item>

+      <Item Name="[Ptr]" Condition="(Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1">($T3)(Val.Val.Value &amp; ValTy::ValTy::InfoTy::PointerBitMask)</Item>

+      <ExpandedItem Condition="!((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1)">*(InnerUnion*)&amp;Val.Val.Value</ExpandedItem>

     </Expand>

   </Type>

 

   <Type Name="llvm::PointerUnion4&lt;*,*,*,*&gt;">

-    <DisplayString Condition="(Val.Val.Value &amp; 3) != 3 &amp;&amp; (Val.Val.Value &amp; 2) != 2 &amp;&amp; (Val.Val.Value &amp; 1) != 1">{"$T1", s8b}: {($T1)((Val.Val.Value &gt;&gt; 2) &lt;&lt; 2)}</DisplayString>

-    <DisplayString Condition="(Val.Val.Value &amp; 3) != 3 &amp;&amp; (Val.Val.Value &amp; 2) == 2">{"$T2", s8b}: {($T2)((Val.Val.Value &gt;&gt; 2) &lt;&lt; 2)}</DisplayString>

-    <DisplayString Condition="(Val.Val.Value &amp; 3) != 3 &amp;&amp; (Val.Val.Value &amp; 1) == 1">{"$T3", s8b}: {($T3)((Val.Val.Value &gt;&gt; 2) &lt;&lt; 2)}</DisplayString>

-    <DisplayString Condition="(Val.Val.Value &amp; 3) == 3">{"$T4", s8b}: {($T4)((Val.Val.Value &gt;&gt; 2) &lt;&lt; 2)}</DisplayString>

+    <DisplayString Optional="true" IncludeView="deref" 

+                   Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 0 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion1::ValTy::InfoTy::IntShift) &amp; 1) == 0">

+      {*($T1)(Val.Val.Value &amp; InnerUnion1::ValTy::InfoTy::PointerBitMask)}

+    </DisplayString>

+    <DisplayString Optional="true" IncludeView="deref" 

+                   Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 0 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion1::ValTy::InfoTy::IntShift) &amp; 1) == 1">

+      {*($T2)(Val.Val.Value &amp; InnerUnion1::ValTy::InfoTy::PointerBitMask)}

+    </DisplayString>

+    <DisplayString  Optional="true" IncludeView="deref" 

+                    Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 1 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion2::ValTy::InfoTy::IntShift) &amp; 1) == 0">

+      {*($T3)(Val.Val.Value &amp; InnerUnion2::ValTy::InfoTy::PointerBitMask)}

+    </DisplayString>

+    <DisplayString  Optional="true" IncludeView="deref" 

+                    Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 1 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion2::ValTy::InfoTy::IntShift) &amp; 1) == 1">

+      {*($T4)(Val.Val.Value &amp; InnerUnion2::ValTy::InfoTy::PointerBitMask)}

+    </DisplayString>

+    <DisplayString Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 0 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion1::ValTy::InfoTy::IntShift) &amp; 1) == 0">

+      {"$T1", s8b}: {($T1)(Val.Val.Value &amp; InnerUnion1::ValTy::InfoTy::PointerBitMask)}

+    </DisplayString>

+    <DisplayString Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 0 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion1::ValTy::InfoTy::IntShift) &amp; 1) == 1">

+      {"$T2", s8b}: {($T2)(Val.Val.Value &amp; InnerUnion1::ValTy::InfoTy::PointerBitMask)}

+    </DisplayString>

+    <DisplayString Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 1 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion2::ValTy::InfoTy::IntShift) &amp; 1) == 0">

+      {"$T3", s8b}: {($T3)(Val.Val.Value &amp; InnerUnion2::ValTy::InfoTy::PointerBitMask)}

+    </DisplayString>

+    <DisplayString Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 1 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion2::ValTy::InfoTy::IntShift) &amp; 1) == 1">

+      {"$T4", s8b}: {($T4)(Val.Val.Value &amp; InnerUnion2::ValTy::InfoTy::PointerBitMask)}

+    </DisplayString>

     <Expand>

-      <ExpandedItem Condition="(Val.Val.Value &amp; 3) != 3 &amp;&amp; (Val.Val.Value &amp; 2) != 2 &amp;&amp; (Val.Val.Value &amp; 1) != 1">($T1)((Val.Val.Value &gt;&gt; 2) &lt;&lt; 2)</ExpandedItem>

-      <ExpandedItem Condition="(Val.Val.Value &amp; 3) != 3 &amp;&amp; (Val.Val.Value &amp; 2) == 2">($T2)((Val.Val.Value &gt;&gt; 2) &lt;&lt; 2)</ExpandedItem>

-      <ExpandedItem Condition="(Val.Val.Value &amp; 3) != 3 &amp;&amp; (Val.Val.Value &amp; 1) == 1">($T3)((Val.Val.Value &gt;&gt; 2) &lt;&lt; 2)</ExpandedItem>

-      <ExpandedItem Condition="(Val.Val.Value &amp; 3) == 3">($T4)((Val.Val.Value &gt;&gt; 2) &lt;&lt; 2)</ExpandedItem>

+      <Item Name="[Holds]" Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 0 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion1::ValTy::InfoTy::IntShift) &amp; 1) == 0">

+        "$T1", s8b

+      </Item>

+      <Item Name="[Ptr]" Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 0 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion1::ValTy::InfoTy::IntShift) &amp; 1) == 0">

+        ($T1)(Val.Val.Value &amp; InnerUnion1::ValTy::InfoTy::PointerBitMask)

+      </Item>

+      <Item Name="[Holds]" Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 0 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion1::ValTy::InfoTy::IntShift) &amp; 1) == 1">

+        "$T2", s8b

+      </Item>

+      <Item Name="[Ptr]" Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 0 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion1::ValTy::InfoTy::IntShift) &amp; 1) == 1">

+        ($T2)(Val.Val.Value &amp; InnerUnion1::ValTy::InfoTy::PointerBitMask)

+      </Item>

+      <Item Name="[Holds]" Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 1 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion2::ValTy::InfoTy::IntShift) &amp; 1) == 0">

+        "$T3", s8b

+      </Item>

+      <Item Name="[Ptr]" Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 1 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion2::ValTy::InfoTy::IntShift) &amp; 1) == 0">

+        ($T3)(Val.Val.Value &amp; InnerUnion1::ValTy::InfoTy::PointerBitMask)

+      </Item>

+      <Item Name="[Holds]" Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 1 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion2::ValTy::InfoTy::IntShift) &amp; 1) == 1">

+        "$T4", s8b

+      </Item>

+      <Item Name="[Ptr]" Condition="((Val.Val.Value &gt;&gt; ValTy::ValTy::InfoTy::IntShift) &amp; 1) == 1 &amp;&amp; ((Val.Val.Value &gt;&gt; InnerUnion2::ValTy::InfoTy::IntShift) &amp; 1) == 1">

+        ($T4)(Val.Val.Value &amp; InnerUnion1::ValTy::InfoTy::PointerBitMask)

+      </Item>

     </Expand>

   </Type>

 

@@ -139,10 +191,10 @@
 

   <Type Name="llvm::IntrusiveRefCntPtr&lt;*&gt;">

     <DisplayString Condition="Obj == 0">empty</DisplayString>

-    <DisplayString Condition="(Obj != 0) &amp;&amp; (Obj-&gt;ref_cnt == 1)">RefPtr [1 ref] {*Obj}</DisplayString>

-    <DisplayString Condition="(Obj != 0) &amp;&amp; (Obj-&gt;ref_cnt != 1)">RefPtr [{Obj-&gt;ref_cnt} refs] {*Obj}</DisplayString>

+    <DisplayString Condition="(Obj != 0) &amp;&amp; (Obj-&gt;RefCount == 1)">RefPtr [1 ref] {*Obj}</DisplayString>

+    <DisplayString Condition="(Obj != 0) &amp;&amp; (Obj-&gt;RefCount != 1)">RefPtr [{Obj-&gt;RefCount} refs] {*Obj}</DisplayString>

     <Expand>

-      <Item Condition="Obj != 0" Name="[refs]">Obj-&gt;ref_cnt</Item>

+      <Item Condition="Obj != 0" Name="[refs]">Obj-&gt;RefCount</Item>

       <ExpandedItem Condition="Obj != 0">Obj</ExpandedItem>

     </Expand>

   </Type>

diff --git a/utils/Reviewing/find_interesting_reviews.py b/utils/Reviewing/find_interesting_reviews.py
index 5af462b..7bfbec8 100644
--- a/utils/Reviewing/find_interesting_reviews.py
+++ b/utils/Reviewing/find_interesting_reviews.py
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import print_function
+
 import argparse
 import email.mime.multipart
 import email.mime.text
@@ -50,10 +52,10 @@
         return self.id2PhabObjects[id]
 
     def get_ids_in_cache(self):
-        return self.id2PhabObjects.keys()
+        return list(self.id2PhabObjects.keys())
 
     def get_objects(self):
-        return self.id2PhabObjects.values()
+        return list(self.id2PhabObjects.values())
 
     DEFAULT_DIRECTORY = "PhabObjectCache"
 
diff --git a/utils/TableGen/FixedLenDecoderEmitter.cpp b/utils/TableGen/FixedLenDecoderEmitter.cpp
index 1206f77..5e621fc 100644
--- a/utils/TableGen/FixedLenDecoderEmitter.cpp
+++ b/utils/TableGen/FixedLenDecoderEmitter.cpp
@@ -1717,7 +1717,7 @@
   dumpStack(errs(), "\t\t");
 
   for (unsigned i = 0; i < Opcodes.size(); ++i) {
-    errs() << '\t' << Opcodes[i] << " ";
+    errs() << '\t' << AllInstructions[Opcodes[i]] << " ";
     dumpBits(errs(),
              getBitsField(*AllInstructions[Opcodes[i]].EncodingDef, "Inst"));
     errs() << '\n';
diff --git a/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp b/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp
index ab78571..788f142 100644
--- a/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp
+++ b/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp
@@ -92,6 +92,7 @@
         // Collect operand types for storage in a shared list.
         CurOperandList.clear();
         for (auto &Op : CGI.Operands.OperandList) {
+          assert(Op.OperandType != "MCOI::OPERAND_UNKNOWN");
           CurOperandList.push_back(Op.OperandType);
         }
         // See if we already have stored this sequence before. This is not
diff --git a/utils/Target/ARM/analyze-match-table.py b/utils/Target/ARM/analyze-match-table.py
index aa952d4..d4e158d 100644
--- a/utils/Target/ARM/analyze-match-table.py
+++ b/utils/Target/ARM/analyze-match-table.py
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import print_function
+
 def analyze_match_table(path):
     # Extract the instruction table.
     data = open(path).read()
@@ -37,10 +39,10 @@
     condcode_mnemonics = set(m for m in mnemonics
                              if 'MCK_CondCode' in mnemonic_flags[m])
     noncondcode_mnemonics = mnemonics - condcode_mnemonics
-    print ' || '.join('Mnemonic == "%s"' % m
-                      for m in ccout_mnemonics)
-    print ' || '.join('Mnemonic == "%s"' % m
-                      for m in noncondcode_mnemonics)
+    print(' || '.join('Mnemonic == "%s"' % m
+                      for m in ccout_mnemonics))
+    print(' || '.join('Mnemonic == "%s"' % m
+                      for m in noncondcode_mnemonics))
 
 def main():
     import sys
diff --git a/utils/create_ladder_graph.py b/utils/create_ladder_graph.py
index d29e3ad..a5946ff 100644
--- a/utils/create_ladder_graph.py
+++ b/utils/create_ladder_graph.py
@@ -10,6 +10,8 @@
 really behaving linearly.
 """
 
+from __future__ import print_function
+
 import argparse
 def main():
   parser = argparse.ArgumentParser(description=__doc__)
@@ -17,27 +19,27 @@
                       help="Number of ladder rungs. Must be a multiple of 2")
   args = parser.parse_args()
   if (args.rungs % 2) != 0:
-    print "Rungs must be a multiple of 2"
+    print("Rungs must be a multiple of 2")
     return
-  print "int ladder(int *foo, int *bar, int x) {"
-  rung1 = xrange(0, args.rungs, 2)
-  rung2 = xrange(1, args.rungs, 2)
+  print("int ladder(int *foo, int *bar, int x) {")
+  rung1 = range(0, args.rungs, 2)
+  rung2 = range(1, args.rungs, 2)
   for i in rung1:
-    print "rung1%d:" % i
-    print "*foo = x++;"
+    print("rung1%d:" % i)
+    print("*foo = x++;")
     if i != rung1[-1]:
-      print "if (*bar) goto rung1%d;" % (i+2)
-      print "else goto rung2%d;" % (i+1)
+      print("if (*bar) goto rung1%d;" % (i+2))
+      print("else goto rung2%d;" % (i+1))
     else:
-      print "goto rung2%d;" % (i+1)
+      print("goto rung2%d;" % (i+1))
   for i in rung2:
-    print "rung2%d:" % i
-    print "*foo = x++;"
+    print("rung2%d:" % i)
+    print("*foo = x++;")
     if i != rung2[-1]:
-      print "goto rung2%d;" % (i+2)
+      print("goto rung2%d;" % (i+2))
     else:
-      print "return *foo;"
-  print "}"
+      print("return *foo;")
+  print("}")
 
 if __name__ == '__main__':
   main()
diff --git a/utils/demangle_tree.py b/utils/demangle_tree.py
index 1185a23..00de72b 100644
--- a/utils/demangle_tree.py
+++ b/utils/demangle_tree.py
@@ -4,6 +4,8 @@
 # demanglings.  Useful for stress testing the demangler against a large corpus
 # of inputs.
 
+from __future__ import print_function
+
 import argparse
 import functools
 import os
diff --git a/utils/docker/scripts/llvm_checksum/llvm_checksum.py b/utils/docker/scripts/llvm_checksum/llvm_checksum.py
index 584efa2..f554d92 100755
--- a/utils/docker/scripts/llvm_checksum/llvm_checksum.py
+++ b/utils/docker/scripts/llvm_checksum/llvm_checksum.py
@@ -183,7 +183,7 @@
     if len(new_checksums) != len(reference_checksums):
       return False
 
-  for proj, checksum in new_checksums.iteritems():
+  for proj, checksum in new_checksums.items():
     # We never computed a checksum for this project.
     if proj not in reference_checksums:
       return False
diff --git a/utils/extract_vplan.py b/utils/extract_vplan.py
index ac0055d..b4e34fc 100755
--- a/utils/extract_vplan.py
+++ b/utils/extract_vplan.py
@@ -4,6 +4,8 @@
 # and saves them in individual dot files (one for each plan). Optionally, and
 # providing 'dot' is installed, it can also render the dot into a PNG file.
 
+from __future__ import print_function
+
 import sys
 import re
 import argparse
diff --git a/utils/gdb-scripts/prettyprinters.py b/utils/gdb-scripts/prettyprinters.py
index 918411d..7ddc33a 100644
--- a/utils/gdb-scripts/prettyprinters.py
+++ b/utils/gdb-scripts/prettyprinters.py
@@ -1,12 +1,15 @@
+from __future__ import print_function
+import sys
+
 import gdb.printing
 
 class Iterator:
   def __iter__(self):
     return self
 
-  # Python 2 compatibility
-  def next(self):
-    return self.__next__()
+  if sys.version_info.major == 2:
+      def next(self):
+        return self.__next__()
 
   def children(self):
     return self
@@ -68,7 +71,7 @@
     def __iter__(self):
       return self
 
-    def next(self):
+    def __next__(self):
       if self.cur == self.end:
         raise StopIteration
       count = self.count
@@ -77,13 +80,12 @@
       self.cur = self.cur + 1
       return '[%d]' % count, cur.dereference()
 
-    __next__ = next
+    if sys.version_info.major == 2:
+        next = __next__
 
   def __init__(self, val):
     self.val = val
 
-    __next__ = next
-
   def children(self):
     data = self.val['Data']
     return self._iterator(data, data + self.val['Length'])
@@ -167,7 +169,7 @@
       while self.cur != self.end and (is_equal(self.cur.dereference()['first'], empty) or is_equal(self.cur.dereference()['first'], tombstone)):
         self.cur = self.cur + 1
 
-    def next(self):
+    def __next__(self):
       if self.cur == self.end:
         raise StopIteration
       cur = self.cur
@@ -180,7 +182,8 @@
         self.first = False
       return 'x', v
 
-    __next__ = next
+    if sys.version_info.major == 2:
+        next = __next__
 
   def __init__(self, val):
     self.val = val
diff --git a/utils/gn/.gn b/utils/gn/.gn
index 86d2da9..6c77157 100644
--- a/utils/gn/.gn
+++ b/utils/gn/.gn
@@ -1,6 +1,6 @@
-# FIXME: Once it's possible to add files to the root directory of the
-# monorepo, move this file to there.  Until then, you need to pass
-# `--dotfile=llvm/utils/gn/.gn --root=.` to the `gn gen` command.
+# Since this can't be at the toplevel, you either need to pass
+# `--dotfile=llvm/utils/gn/.gn --root=.` to the `gn gen` command
+# or use llvm/utils/gn/gn.py which calls gn with these two flags added.
 
 buildconfig = "//llvm/utils/gn/build/BUILDCONFIG.gn"
 
diff --git a/utils/gn/README.rst b/utils/gn/README.rst
index fd811e9..4d8051f 100644
--- a/utils/gn/README.rst
+++ b/utils/gn/README.rst
@@ -18,11 +18,7 @@
 files. Keeping the GN build files up-to-date is on the people who use the GN
 build.
 
-*Another Warning* Right now, we're in the process of getting the GN build
-checked in. As of this writing, it's not yet functional at all. Check back
-in a few weeks!
-
-`GN <https://gn.googlesource.com/gn/>`_ is another metabuild system. It always
+`GN <https://gn.googlesource.com/gn/>`_ is a metabuild system. It always
 creates ninja files, but it can create some IDE projects (MSVC, Xcode, ...)
 which then shell out to ninja for the actual build.
 
@@ -32,11 +28,6 @@
 making it possible to switch e.g. between release and debug builds in one build
 directory.
 
-It is arguable easier to configure than the CMake build, and has native support
-for building with multiple toolchains in one build directory. The build
-description is declarative-ish, allowing GN to print it in a json format that
-can fairly easily be converted to other metabuild system inputs.
-
 The main motivation behind the GN build is that some people find it more
 convenient for day-to-day hacking on LLVM than CMake. Distribution, building
 just parts of LLVM, and embedding the LLVM GN build from other builds are a
@@ -49,27 +40,26 @@
 Quick start
 ===========
 
-*Warning* Right now, we're in the process of getting the GN build checked in.
-As of this writing, it's not yet functional at all.
-
 GN only works in the monorepo layout.
 
 #. Obtain a `gn binary <https://gn.googlesource.com/gn/#getting-started>`_.
 
-#. In the root of the monorepo, run
-   `gn gen --dotfile=$PWD/llvm/utils/gn/.gn --root=. out/gn` (`out/gn` is the
-   build directory, it can have any name, and you can have as many as you want,
-   each with different build settings).
+#. In the root of the monorepo, run `llvm/utils/gn/gn.py gen out/gn`.
+   `out/gn` is the build directory, it can have any name, and you can have as
+   many as you want, each with different build settings.  (The `gn.py` script
+   adds `--dotfile=llvm/utils/gn/.gn --root=.` and just runs regular `gn`;
+   you can manually pass these parameters and not use the wrapper if you
+   prefer.)
 
-#. Run e.g. `ninja -C out/gn llvm-undname` to build all prerequisites for and
-   including the Microsoft symbol name pretty printing tool llvm-undname.
+#. Run e.g. `ninja -C out/gn check-lld` to build all prerequisites for and
+   run the LLD tests.
 
 By default, you get a release build with assertions enabled that targets
 the host arch. You can set various build options by editing `out/gn/args.gn`,
 for example putting `is_debug = true` in there gives you a debug build. Run
-`gn args --list out/gn` to see a list of all possible options. After touching
-`out/gn/args.gn`, just run ninja, it will re-invoke gn before starting the
-build.
+`llvm/utils/gn/gn.py args --list out/gn` to see a list of all possible
+options. After touching `out/gn/args.gn`, just run ninja, it will re-invoke gn
+before starting the build.
 
 GN has extensive built-in help; try e.g. `gn help gen` to see the help
 for the `gen` command. The full GN reference is also `available online
diff --git a/utils/gn/TODO.txt b/utils/gn/TODO.txt
new file mode 100644
index 0000000..1588470
--- /dev/null
+++ b/utils/gn/TODO.txt
@@ -0,0 +1,28 @@
+Ideas for things to do:
+
+- more projects (compiler-rt libcxx libcxxabi libunwind clang-tools-extra lldb)
+  - phosek expressed interest in compiler-rt libcxx libcxxabi libunwind
+  - once there are more projects, have an llvm_enable_projects arg, modeled
+    after llvm_targets_to_build in the GN build
+  - a check-all build target that runs test of all projects
+- more targets (AVR MIPS RISCV SystemZ etc)
+  - example: https://reviews.llvm.org/D56416
+- investigate feasibility of working `gn check`
+
+- "optimized tblgen" mode
+  - either just always build tablegen and support with opt config
+  - or use opt toolchain and build tablegen twice in debug builds, like cmake
+
+- cross builds using GN's toolchain feature
+- one-build-dir bootstrap builds using GN's toolchain feature
+
+- move clang_tablegen into lib/ for private files
+- add dead code stripping
+- move run_tablegen.py from build to tablegen folder
+- figure out why -Iclang/Support gets added so often
+- make LLVM_LIBXML2_ENABLED use llvm_canonicalize_cmake_boolean (cf D28294),
+  clean up both GN and CMake builds
+- plugin() template with working rpath, exports thingers
+  - then port clang_build_examples and enable by default so that clang
+    plugin tests run by default
+- plugin_host() template
diff --git a/utils/gn/build/BUILD.gn b/utils/gn/build/BUILD.gn
index 37aa431..e5c8d2a 100644
--- a/utils/gn/build/BUILD.gn
+++ b/utils/gn/build/BUILD.gn
@@ -1,20 +1,17 @@
 import("//llvm/utils/gn/build/buildflags.gni")
 import("//llvm/utils/gn/build/mac_sdk.gni")
 import("//llvm/utils/gn/build/toolchain/compiler.gni")
+import("//llvm/utils/gn/build/toolchain/target_flags.gni")
 
 config("compiler_defaults") {
   defines = []
 
-  # FIXME: Don't define this globally here.
-  if (host_os != "win") {
-    defines += [ "LLVM_ON_UNIX" ]
-  }
-
   if (!llvm_enable_assertions) {
     defines += [ "NDEBUG" ]
   }
 
-  cflags = []
+  cflags = target_flags + target_cflags
+  ldflags = target_flags + target_ldflags
 
   if (host_os == "mac" && clang_base_path != "") {
     cflags += [
@@ -33,7 +30,6 @@
     cflags_cc = [
       "-std=c++11",
       "-fno-exceptions",
-      "-fno-rtti",
       "-fvisibility-inlines-hidden",
     ]
   } else {
@@ -57,10 +53,7 @@
       "_UNICODE",
       "UNICODE",
     ]
-    cflags += [
-      "/EHs-c-",
-      "/GR-",
-    ]
+    cflags += [ "/EHs-c-" ]
 
     # The MSVC default value (1 MB) is not enough for parsing recursive C++
     # templates in Clang.
@@ -113,6 +106,18 @@
       cflags += [ "-Wno-nonportable-include-path" ]
     }
   }
+
+  if (use_lld) {
+    ldflags += [ "-fuse-ld=lld" ]
+  }
+}
+
+config("no_rtti") {
+  if (current_os == "win") {
+    cflags_cc = [ "/GR-" ]
+  } else {
+    cflags_cc = [ "-fno-rtti" ]
+  }
 }
 
 config("llvm_code") {
@@ -136,6 +141,15 @@
   ]
 }
 
+config("crt_code") {
+  include_dirs = [ "//compiler-rt/lib" ]
+  cflags = [
+    "-fPIC",
+    "-funwind-tables",
+    "-gline-tables-only",
+  ]
+}
+
 config("warn_covered_switch_default") {
   if (is_clang) {
     cflags = [ "-Wcovered-switch-default" ]
diff --git a/utils/gn/build/BUILDCONFIG.gn b/utils/gn/build/BUILDCONFIG.gn
index c440365..cec736f 100644
--- a/utils/gn/build/BUILDCONFIG.gn
+++ b/utils/gn/build/BUILDCONFIG.gn
@@ -2,31 +2,48 @@
 # Targets can opt out of a config by removing it from their local configs list.
 # If you're adding global flags and don't need targets to be able to opt out,
 # add the flags to compiler_defaults, not to a new config.
-_shared_binary_target_configs = [
+shared_binary_target_configs = [
   "//llvm/utils/gn/build:compiler_defaults",
   "//llvm/utils/gn/build:llvm_code",
+  "//llvm/utils/gn/build:no_rtti",
   "//llvm/utils/gn/build:warn_covered_switch_default",
 ]
 
 # Apply that default list to the binary target types.
 set_defaults("executable") {
-  configs = _shared_binary_target_configs
+  configs = shared_binary_target_configs
 }
 set_defaults("loadable_module") {
-  configs = _shared_binary_target_configs
+  configs = shared_binary_target_configs
 }
 set_defaults("static_library") {
-  configs = _shared_binary_target_configs
+  configs = shared_binary_target_configs
 }
 set_defaults("shared_library") {
-  configs = _shared_binary_target_configs
+  configs = shared_binary_target_configs
 }
 set_defaults("source_set") {
-  configs = _shared_binary_target_configs
+  configs = shared_binary_target_configs
+}
+
+if (target_os == "") {
+  target_os = host_os
+}
+if (current_os == "") {
+  current_os = target_os
+}
+
+if (target_cpu == "") {
+  target_cpu = host_cpu
+}
+if (current_cpu == "") {
+  current_cpu = target_cpu
 }
 
 if (host_os == "win") {
-  set_default_toolchain("//llvm/utils/gn/build/toolchain:win")
+  host_toolchain = "//llvm/utils/gn/build/toolchain:win"
 } else {
-  set_default_toolchain("//llvm/utils/gn/build/toolchain:unix")
+  host_toolchain = "//llvm/utils/gn/build/toolchain:unix"
 }
+
+set_default_toolchain(host_toolchain)
diff --git a/utils/gn/build/fuzzer.gni b/utils/gn/build/fuzzer.gni
new file mode 100644
index 0000000..b5aaf94
--- /dev/null
+++ b/utils/gn/build/fuzzer.gni
@@ -0,0 +1,49 @@
+# This file introduces a templates for defining fuzzers.
+#
+# All parameters valid for executable() targets are valid (cflags, defines,
+# deps, include_dirs, sources, ...). In addition to that:
+#
+#   dummy_main (required)
+#       Path to a cpp file containing main(), used when neither
+#       llvm_use_sanitize_coverage nor llvm_use_sanitize_coverage are set.
+#
+# Example of usage:
+#
+#   fuzzer("llvm-opt-fuzzer") {
+#     deps = [ ... ]
+#     dummy_main = "DummyOptFuzzer.cpp"
+#     sources = [ "llvm-opt-fuzzer.cpp" ]
+#   }
+
+declare_args() {
+  # Set to the path of a static library containing a fuzzing engine, e.g.
+  # oss-fuzz's $LIB_FUZZING_ENGINE.
+  llvm_lib_fuzzing_engine = ""
+
+  # If true, pass -fsanitize=fuzzer to the compiler for fuzzer() targets.
+  # Likely only makes sense to set if you know that the host compiler is clang.
+  llvm_use_sanitize_coverage = false
+}
+
+template("fuzzer") {
+  assert(defined(invoker.dummy_main), "must set 'dummy_main' in $target_name")
+  assert(defined(invoker.sources), "must set 'sources' for $target_name")
+  executable(target_name) {
+    forward_variables_from(invoker, "*", [ "dummy_main" ])
+    if (llvm_lib_fuzzing_engine != "") {
+      if (!defined(libs)) {
+        libs = []
+      }
+      libs += [ llvm_lib_fuzzing_engine ]
+      not_needed(invoker, [ "dummy_main" ])
+    } else if (llvm_use_sanitize_coverage) {
+      if (!defined(cflags)) {
+        cflags = []
+      }
+      cflags += [ "-fsanitize=fuzzer" ]
+      not_needed(invoker, [ "dummy_main" ])
+    } else {
+      sources += [ invoker.dummy_main ]
+    }
+  }
+}
diff --git a/utils/gn/build/libs/edit/BUILD.gn b/utils/gn/build/libs/edit/BUILD.gn
new file mode 100644
index 0000000..c22a7e6
--- /dev/null
+++ b/utils/gn/build/libs/edit/BUILD.gn
@@ -0,0 +1,12 @@
+import("//llvm/utils/gn/build/libs/edit/enable.gni")
+
+config("edit_config") {
+  visibility = [ ":edit" ]
+  libs = [ "edit" ]
+}
+
+group("edit") {
+  if (llvm_enable_libedit) {
+    public_configs = [ ":edit_config" ]
+  }
+}
diff --git a/utils/gn/build/libs/edit/enable.gni b/utils/gn/build/libs/edit/enable.gni
new file mode 100644
index 0000000..b973b0e
--- /dev/null
+++ b/utils/gn/build/libs/edit/enable.gni
@@ -0,0 +1,3 @@
+declare_args() {
+  llvm_enable_libedit = host_os == "mac"
+}
diff --git a/utils/gn/build/libs/pthread/BUILD.gn b/utils/gn/build/libs/pthread/BUILD.gn
index 51e7f1f..7708d31 100644
--- a/utils/gn/build/libs/pthread/BUILD.gn
+++ b/utils/gn/build/libs/pthread/BUILD.gn
@@ -6,7 +6,8 @@
 }
 
 group("pthread") {
-  if (llvm_enable_threads && host_os != "win") {
+  # On Android, bionic has built-in support for pthreads.
+  if (llvm_enable_threads && current_os != "win" && current_os != "android") {
     public_configs = [ ":pthread_config" ]
   }
 }
diff --git a/utils/gn/build/libs/xar/BUILD.gn b/utils/gn/build/libs/xar/BUILD.gn
new file mode 100644
index 0000000..0c30abf
--- /dev/null
+++ b/utils/gn/build/libs/xar/BUILD.gn
@@ -0,0 +1,12 @@
+import("//llvm/utils/gn/build/libs/xar/enable.gni")
+
+config("xar_config") {
+  visibility = [ ":xar" ]
+  libs = [ "xar" ]
+}
+
+group("xar") {
+  if (llvm_enable_libxar) {
+    public_configs = [ ":xar_config" ]
+  }
+}
diff --git a/utils/gn/build/libs/xar/enable.gni b/utils/gn/build/libs/xar/enable.gni
new file mode 100644
index 0000000..c394a7e
--- /dev/null
+++ b/utils/gn/build/libs/xar/enable.gni
@@ -0,0 +1,3 @@
+declare_args() {
+  llvm_enable_libxar = host_os == "mac"
+}
diff --git a/utils/gn/build/sync_source_lists_from_cmake.py b/utils/gn/build/sync_source_lists_from_cmake.py
index 7753967..21abe7e 100755
--- a/utils/gn/build/sync_source_lists_from_cmake.py
+++ b/utils/gn/build/sync_source_lists_from_cmake.py
@@ -4,7 +4,11 @@
 
 For each BUILD.gn file in the tree, checks if the list of cpp files in
 it is identical to the list of cpp files in the corresponding CMakeLists.txt
-file, and prints the difference if not."""
+file, and prints the difference if not.
+
+Also checks that each CMakeLists.txt file below unittests/ folders that define
+binaries have corresponding BUILD.gn files.
+"""
 
 from __future__ import print_function
 
@@ -12,13 +16,14 @@
 import re
 import subprocess
 
-def main():
+
+def sync_source_lists():
     gn_files = subprocess.check_output(
             ['git', 'ls-files', '*BUILD.gn']).splitlines()
 
-    # Matches e.g. |   "foo.cpp",|.
+    # Matches e.g. |   "foo.cpp",|, captures |foo| in group 1.
     gn_cpp_re = re.compile(r'^\s*"([^"]+\.(?:cpp|h))",$', re.MULTILINE)
-    # Matches e.g. |   "foo.cpp"|.
+    # Matches e.g. |   foo.cpp|, captures |foo| in group 1.
     cmake_cpp_re = re.compile(r'^\s*([A-Za-z_0-9/-]+\.(?:cpp|h))$',
                               re.MULTILINE)
 
@@ -50,5 +55,29 @@
             print('remove:\n' + '\n'.join(remove))
         print()
 
+
+def sync_unittests():
+    # Matches e.g. |add_llvm_unittest_with_input_files|.
+    unittest_re = re.compile(r'^add_\S+_unittest', re.MULTILINE)
+
+    checked = [ 'clang', 'lld', 'llvm' ]
+    for c in checked:
+        for root, _, _ in os.walk(os.path.join(c, 'unittests')):
+            cmake_file = os.path.join(root, 'CMakeLists.txt')
+            if not os.path.exists(cmake_file):
+                continue
+            if not unittest_re.search(open(cmake_file).read()):
+                continue  # Skip CMake files that just add subdirectories.
+            gn_file = os.path.join('llvm/utils/gn/secondary', root, 'BUILD.gn')
+            if not os.path.exists(gn_file):
+                print('missing GN file %s for unittest CMake file %s' %
+                      (gn_file, cmake_file))
+
+
+def main():
+    sync_source_lists()
+    sync_unittests()
+
+
 if __name__ == '__main__':
     main()
diff --git a/utils/gn/build/toolchain/BUILD.gn b/utils/gn/build/toolchain/BUILD.gn
index 927a53d..97c64ff 100644
--- a/utils/gn/build/toolchain/BUILD.gn
+++ b/utils/gn/build/toolchain/BUILD.gn
@@ -9,7 +9,123 @@
   }
 }
 
-toolchain("unix") {
+template("unix_toolchain") {
+  toolchain(target_name) {
+    forward_variables_from(invoker, "*")
+
+    tool("cc") {
+      depfile = "{{output}}.d"
+      command = "$cc -MMD -MF $depfile -o {{output}} -c {{source}} {{defines}} {{include_dirs}} {{cflags}} {{cflags_c}}"
+      depsformat = "gcc"
+      description = "CC {{output}}"
+      outputs = [
+        "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o",
+      ]
+    }
+
+    tool("cxx") {
+      depfile = "{{output}}.d"
+      command = "$cxx -MMD -MF $depfile -o {{output}} -c {{source}} {{defines}} {{include_dirs}} {{cflags}} {{cflags_cc}}"
+      depsformat = "gcc"
+      description = "CXX {{output}}"
+      outputs = [
+        "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o",
+      ]
+    }
+
+    tool("asm") {
+      depfile = "{{output}}.d"
+      command = "$cc -MMD -MF $depfile -o {{output}} -c {{source}} {{defines}} {{include_dirs}} {{asmflags}}"
+      depsformat = "gcc"
+      description = "ASM {{output}}"
+      outputs = [
+        "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o",
+      ]
+    }
+
+    tool("alink") {
+      if (current_os == "mac") {
+        command = "libtool -static -no_warning_for_no_symbols {{arflags}} -o {{output}} {{inputs}}"
+      } else {
+        # Remove the output file first so that ar doesn't try to modify the
+        # existing file.
+        command =
+            "rm -f {{output}} && $ar rcsDT {{arflags}} {{output}} {{inputs}}"
+      }
+      description = "AR {{output}}"
+      outputs = [
+        "{{output_dir}}/{{target_output_name}}.a",
+      ]
+      output_prefix = "lib"
+      default_output_dir = "{{root_out_dir}}/lib"
+    }
+
+    tool("solink") {
+      outfile = "{{output_dir}}/{{target_output_name}}{{output_extension}}"
+      if (current_os == "mac") {
+        command = "$ld -shared {{ldflags}} -o $outfile {{libs}} {{inputs}}"
+        default_output_extension = ".dylib"
+      } else {
+        command = "$ld -shared {{ldflags}} -Wl,-z,defs -Wl,-soname,{{target_output_name}}{{output_extension}} -o $outfile {{libs}} {{inputs}}"
+        default_output_extension = ".so"
+      }
+      description = "SOLINK $outfile"
+      outputs = [
+        outfile,
+      ]
+      lib_switch = "-l"
+      output_prefix = "lib"
+      default_output_dir = "{{root_out_dir}}/lib"
+    }
+
+    tool("solink_module") {
+      outfile = "{{output_dir}}/{{target_output_name}}{{output_extension}}"
+      if (current_os == "mac") {
+        command = "$ld -shared {{ldflags}} -Wl,-flat_namespace -Wl,-undefined,suppress -o $outfile {{libs}} {{inputs}}"
+        default_output_extension = ".dylib"
+      } else {
+        command = "$ld -shared {{ldflags}} -Wl,-soname,{{target_output_name}}{{output_extension}} -o $outfile {{libs}} {{inputs}}"
+        default_output_extension = ".so"
+      }
+      description = "SOLINK $outfile"
+      outputs = [
+        outfile,
+      ]
+      lib_switch = "-l"
+      default_output_dir = "{{root_out_dir}}/lib"
+    }
+
+    tool("link") {
+      outfile = "{{output_dir}}/{{target_output_name}}{{output_extension}}"
+      if (current_os == "mac") {
+        command = "$ld {{ldflags}} -o $outfile {{libs}} {{inputs}}"
+      } else {
+        command = "$ld {{ldflags}} -o $outfile {{libs}} -Wl,--start-group {{inputs}} -Wl,--end-group"
+      }
+      description = "LINK $outfile"
+      outputs = [
+        outfile,
+      ]
+      lib_switch = "-l"
+
+      # Setting this allows targets to override the default executable output by
+      # setting output_dir.
+      default_output_dir = "{{root_out_dir}}/bin"
+    }
+
+    tool("copy") {
+      command = "ln -f {{source}} {{output}} 2>/dev/null || (rm -rf {{output}} && cp -af {{source}} {{output}})"
+      description = "COPY {{source}} {{output}}"
+    }
+
+    tool("stamp") {
+      command = "touch {{output}}"
+      description = "STAMP {{output}}"
+    }
+  }
+}
+
+unix_toolchain("unix") {
   cc = "cc"
   cxx = "c++"
 
@@ -24,105 +140,48 @@
     cxx = "$goma_dir/gomacc $cxx"
   }
 
-  tool("cc") {
-    depfile = "{{output}}.d"
-    command = "$cc -MMD -MF $depfile -o {{output}} -c {{source}} {{defines}} {{include_dirs}} {{cflags}} {{cflags_c}}"
-    depsformat = "gcc"
-    description = "CC {{output}}"
-    outputs = [
-      "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o",
-    ]
+  if (current_os != "mac") {
+    ar = "ar"
   }
 
-  tool("cxx") {
-    depfile = "{{output}}.d"
-    command = "$cxx -MMD -MF $depfile -o {{output}} -c {{source}} {{defines}} {{include_dirs}} {{cflags}} {{cflags_cc}}"
-    depsformat = "gcc"
-    description = "CXX {{output}}"
-    outputs = [
-      "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o",
+  toolchain_args = {
+    current_os = host_os
+    current_cpu = host_cpu
+  }
+}
+
+template("stage2_unix_toolchain") {
+  unix_toolchain(target_name) {
+    forward_variables_from(invoker, "*")
+
+    cc = "bin/clang"
+    cxx = "bin/clang++"
+    ld = cxx
+    ar = "bin/llvm-ar"
+
+    deps = [
+      "//:clang($host_toolchain)",
+      "//:lld($host_toolchain)",
+      "//:llvm-ar($host_toolchain)",
     ]
   }
+}
 
-  tool("alink") {
-    if (host_os == "mac") {
-      command = "libtool -static -no_warning_for_no_symbols {{arflags}} -o {{output}} {{inputs}}"
-    } else {
-      # Remove the output file first so that ar doesn't try to modify the
-      # existing file.
-      command =
-          "rm -f {{output}} && ar rcsDT {{arflags}} -o {{output}} {{inputs}}"
+stage2_unix_toolchain("stage2_unix") {
+  toolchain_args = {
+    current_os = host_os
+    current_cpu = host_cpu
+    use_lld = host_os != "mac"
+  }
+}
+
+if (android_ndk_path != "") {
+  stage2_unix_toolchain("stage2_android_aarch64") {
+    toolchain_args = {
+      current_os = "android"
+      current_cpu = "arm64"
+      use_lld = true
     }
-    description = "AR {{output}}"
-    outputs = [
-      "{{output_dir}}/{{target_output_name}}.a",
-    ]
-    output_prefix = "lib"
-    default_output_dir = "{{root_out_dir}}/lib"
-  }
-
-  tool("solink") {
-    outfile = "{{output_dir}}/{{target_output_name}}{{output_extension}}"
-    if (host_os == "mac") {
-      command = "$ld -shared {{ldflags}} -o $outfile {{libs}} {{inputs}}"
-      default_output_extension = ".dylib"
-    } else {
-      command =
-          "$ld -shared {{ldflags}} -Wl,-z,defs -o $outfile {{libs}} {{inputs}}"
-      default_output_extension = ".so"
-    }
-    description = "SOLINK $outfile"
-    outputs = [
-      outfile,
-    ]
-    lib_switch = "-l"
-    output_prefix = "lib"
-    default_output_dir = "{{root_out_dir}}/lib"
-  }
-
-  tool("solink_module") {
-    outfile = "{{output_dir}}/{{target_output_name}}{{output_extension}}"
-    if (host_os == "mac") {
-      command = "$ld -shared {{ldflags}} -Wl,-flat_namespace -Wl,-undefined,suppress -o $outfile {{libs}} {{inputs}}"
-      default_output_extension = ".dylib"
-    } else {
-      command = "$ld -shared {{ldflags}} -o $outfile {{libs}} {{inputs}}"
-      default_output_extension = ".so"
-    }
-    description = "SOLINK $outfile"
-    outputs = [
-      outfile,
-    ]
-    lib_switch = "-l"
-    default_output_dir = "{{root_out_dir}}/lib"
-  }
-
-  tool("link") {
-    outfile = "{{output_dir}}/{{target_output_name}}{{output_extension}}"
-    if (host_os == "mac") {
-      command = "$ld {{ldflags}} -o $outfile {{libs}} {{inputs}}"
-    } else {
-      command = "$ld {{ldflags}} -o $outfile {{libs}} -Wl,--start-group {{inputs}} -Wl,--end-group"
-    }
-    description = "LINK $outfile"
-    outputs = [
-      outfile,
-    ]
-    lib_switch = "-l"
-
-    # Setting this allows targets to override the default executable output by
-    # setting output_dir.
-    default_output_dir = "{{root_out_dir}}/bin"
-  }
-
-  tool("copy") {
-    command = "ln -f {{source}} {{output}} 2>/dev/null || (rm -rf {{output}} && cp -af {{source}} {{output}})"
-    description = "COPY {{source}} {{output}}"
-  }
-
-  tool("stamp") {
-    command = "touch {{output}}"
-    description = "STAMP {{output}}"
   }
 }
 
@@ -229,4 +288,9 @@
     command = "cmd /c type nul > {{output}}"
     description = "STAMP {{output}}"
   }
+
+  toolchain_args = {
+    current_os = "win"
+    current_cpu = host_cpu
+  }
 }
diff --git a/utils/gn/build/toolchain/compiler.gni b/utils/gn/build/toolchain/compiler.gni
index 92d965e..3c419fb 100644
--- a/utils/gn/build/toolchain/compiler.gni
+++ b/utils/gn/build/toolchain/compiler.gni
@@ -9,10 +9,17 @@
   # On Windows, setting this also causes lld-link to be used as linker.
   # Example value: getenv("HOME") + "/src/llvm-build/Release+Asserts"
   clang_base_path = ""
+
+  # Set this to the path to Android NDK r18b. If set, cross compilation targeting
+  # Android will be enabled.
+  android_ndk_path = ""
 }
 
 declare_args() {
   # Set if the host compiler is clang.  On by default on Mac or if
   # clang_base_path is set.
   is_clang = host_os == "mac" || clang_base_path != ""
+
+  # Set this to true to link with LLD instead of the default linker.
+  use_lld = clang_base_path != "" && host_os != "mac"
 }
diff --git a/utils/gn/build/toolchain/target_flags.gni b/utils/gn/build/toolchain/target_flags.gni
new file mode 100644
index 0000000..6b6373a
--- /dev/null
+++ b/utils/gn/build/toolchain/target_flags.gni
@@ -0,0 +1,34 @@
+import("//llvm/triples.gni")
+import("//llvm/utils/gn/build/toolchain/compiler.gni")
+
+target_flags = []
+target_cflags = []
+target_ldflags = []
+
+if (current_os == "android") {
+  assert(current_cpu == "arm64", "current_cpu not supported")
+
+  libcxx_path = "$android_ndk_path/sources/cxx-stl/llvm-libc++"
+  platform_lib_path =
+      "$android_ndk_path/platforms/android-21/arch-arm64/usr/lib"
+  libgcc_path = "$android_ndk_path/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/lib/gcc/aarch64-linux-android/4.9.x"
+
+  target_flags += [
+    "--target=$llvm_current_triple",
+    "--sysroot=$android_ndk_path/sysroot",
+  ]
+  target_cflags += [
+    "-isystem",
+    "$libcxx_path/include",
+  ]
+  target_ldflags += [
+    "-B$platform_lib_path",
+    "-L$platform_lib_path",
+    "-L$libgcc_path",
+  ]
+  target_ldflags += [
+    "-nostdlib++",
+    "-L$libcxx_path/libs/arm64-v8a",
+    "-l:libc++.a.21",
+  ]
+}
diff --git a/utils/gn/build/write_cmake_config.gni b/utils/gn/build/write_cmake_config.gni
new file mode 100644
index 0000000..6dca026
--- /dev/null
+++ b/utils/gn/build/write_cmake_config.gni
@@ -0,0 +1,54 @@
+# This file introduces a templates for calling write_cmake_config.py.
+#
+# write_cmake_config behaves like CMake's configure_file(), but runs at build
+# time, not at generator time.  See write_cmake_config.py for details.
+#
+# Parameters:
+#
+#   input (required) [string]
+#
+#   output (required) [string]
+#
+#   values (required) [list of strings]
+#       Each entry is a '='-separated key-value pair used for substitution.
+#
+# Example use:
+#
+#   write_cmake_config("attributes_compat_func_gen") {
+#     input = "Version.inc.in"
+#     output = "$root_gen_dir/clang/include/clang/Basic/Version.inc",
+#     values = [
+#       "CLANG_VERSION=$llvm_version",
+#     ]
+#   }
+
+template("write_cmake_config") {
+  assert(defined(invoker.input), "must set 'input' in $target_name")
+  assert(defined(invoker.output), "must set 'output' in $target_name")
+  assert(defined(invoker.values), "must set 'values' in $target_name")
+
+  action(target_name) {
+    script = "//llvm/utils/gn/build/write_cmake_config.py"
+
+    sources = [
+      invoker.input,
+    ]
+    outputs = [
+      invoker.output,
+    ]
+    args = [
+             "-o",
+             rebase_path(outputs[0], root_build_dir),
+             rebase_path(sources[0], root_build_dir),
+           ] + invoker.values
+
+    forward_variables_from(invoker,
+                           [
+                             "configs",
+                             "deps",
+                             "public_configs",
+                             "public_deps",
+                             "visibility",
+                           ])
+  }
+}
diff --git a/utils/gn/build/write_cmake_config.py b/utils/gn/build/write_cmake_config.py
index 6a54073..eba6b4e 100755
--- a/utils/gn/build/write_cmake_config.py
+++ b/utils/gn/build/write_cmake_config.py
@@ -53,6 +53,9 @@
     values = {}
     for value in args.values:
         key, val = value.split('=', 1)
+        if key in values:
+            print('duplicate key "%s" in args' % key, file=sys.stderr)
+            return 1
         values[key] = val.replace('\\n', '\n')
     unused_values = set(values.keys())
 
@@ -86,7 +89,7 @@
 
     if unused_values:
         print('unused values args:', file=sys.stderr)
-        print('    ', '\n    '.join(unused_values), file=sys.stderr)
+        print('    ' + '\n    '.join(unused_values), file=sys.stderr)
         return 1
 
     output = ''.join(out_lines)
@@ -98,6 +101,7 @@
 
     if not os.path.exists(args.output) or open(args.output).read() != output:
         open(args.output, 'w').write(output)
+        os.chmod(args.output, os.stat(args.input).st_mode & 0o777)
 
 
 if __name__ == '__main__':
diff --git a/utils/gn/build/write_vcsrevision.py b/utils/gn/build/write_vcsrevision.py
index ed7c05f..974004d 100755
--- a/utils/gn/build/write_vcsrevision.py
+++ b/utils/gn/build/write_vcsrevision.py
@@ -12,7 +12,6 @@
 
 THIS_DIR = os.path.abspath(os.path.dirname(__file__))
 LLVM_DIR = os.path.dirname(os.path.dirname(os.path.dirname(THIS_DIR)))
-MONO_DIR = os.path.dirname(LLVM_DIR)
 
 
 def which(program):
@@ -36,25 +35,27 @@
     if os.path.isdir(os.path.join(LLVM_DIR, '.svn')):
         print('SVN support not implemented', file=sys.stderr)
         return 1
-    if os.path.isdir(os.path.join(LLVM_DIR, '.git')):
+    if os.path.exists(os.path.join(LLVM_DIR, '.git')):
         print('non-mono-repo git support not implemented', file=sys.stderr)
         return 1
 
-    git_dir = os.path.join(MONO_DIR, '.git')
-    if not os.path.isdir(git_dir):
-        print('.git dir not found at "%s"' % git_dir, file=sys.stderr)
-        return 1
-
     git, use_shell = which('git'), False
     if not git:
         git = which('git.exe')
     if not git:
         git = which('git.bat')
         use_shell = True
+
+    git_dir = subprocess.check_output([git, 'rev-parse', '--git-dir'],
+                                      cwd=LLVM_DIR, shell=use_shell).strip()
+    if not os.path.isdir(git_dir):
+        print('.git dir not found at "%s"' % git_dir, file=sys.stderr)
+        return 1
+
     rev = subprocess.check_output([git, 'rev-parse', '--short', 'HEAD'],
-                                  cwd=git_dir, shell=use_shell)
+                                  cwd=git_dir, shell=use_shell).decode().strip()
     # FIXME: add pizzas such as the svn revision read off a git note?
-    vcsrevision_contents = '#define LLVM_REVISION "git-%s"\n' % rev.strip()
+    vcsrevision_contents = '#define LLVM_REVISION "git-%s"\n' % rev
 
     # If the output already exists and is identical to what we'd write,
     # return to not perturb the existing file's timestamp.
diff --git a/utils/gn/gn.py b/utils/gn/gn.py
new file mode 100755
index 0000000..f80873b
--- /dev/null
+++ b/utils/gn/gn.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+"""Calls `gn` with the right --dotfile= and --root= arguments for LLVM."""
+
+# GN normally expects a file called '.gn' at the root of the repository.
+# Since LLVM's GN build isn't supported, putting that file at the root
+# is deemed inappropriate, which requires passing --dotfile= and -root= to GN.
+# Since that gets old fast, this script automatically passes these arguments.
+
+import os
+import subprocess
+import sys
+
+
+THIS_DIR = os.path.dirname(__file__)
+ROOT_DIR = os.path.join(THIS_DIR, '..', '..', '..')
+
+
+def main():
+    # Find real gn executable. For now, just assume it's on PATH.
+    # FIXME: Probably need to append '.exe' on Windows.
+    gn = 'gn'
+
+    # Compute --dotfile= and --root= args to add.
+    extra_args = []
+    gn_main_arg = next((x for x in sys.argv[1:] if not x.startswith('-')), None)
+    if gn_main_arg != 'help':  # `gn help` gets confused by the switches.
+        cwd = os.getcwd()
+        dotfile = os.path.relpath(os.path.join(THIS_DIR, '.gn'), cwd)
+        root = os.path.relpath(ROOT_DIR, cwd)
+        extra_args = [ '--dotfile=' + dotfile, '--root=' + root ]
+
+    # Run GN command with --dotfile= and --root= added.
+    cmd = [gn] + extra_args + sys.argv[1:]
+    sys.exit(subprocess.call(cmd))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/utils/gn/secondary/BUILD.gn b/utils/gn/secondary/BUILD.gn
index 892a0b2..b8b748b 100644
--- a/utils/gn/secondary/BUILD.gn
+++ b/utils/gn/secondary/BUILD.gn
@@ -1,9 +1,16 @@
+import("//clang/lib/ARCMigrate/enable.gni")
+import("//clang/lib/StaticAnalyzer/Frontend/enable.gni")
+import("//llvm/utils/gn/build/toolchain/compiler.gni")
+
 group("default") {
   deps = [
-    ":lld",
-    "//llvm/tools/llc",
-    "//llvm/tools/llvm-undname",
+    "//clang/test",
+    "//compiler-rt/test/hwasan",
+    "//lld/test",
+    "//llvm/test",
   ]
+
+  testonly = true
 }
 
 # Symlink handling.
@@ -26,14 +33,68 @@
 # executable.  This has the effect that `ninja lld` builds lld and then creates
 # symlinks (via this target), while `ninja bin/lld` only builds lld and doesn't
 # update symlinks (in particular, on Windows it doesn't copy the new lld to its
-# new locations); also `ninja lld-link` will build lld and copy it (on Windows)
-# to lld-link, but it won't copy it to ld.lld.
+# new locations).
 # That seems simpler, more explicit, and good enough.
+group("clang") {
+  deps = [
+    "//clang/tools/driver:symlinks",
+  ]
+}
 group("lld") {
   deps = [
     "//lld/tools/lld:symlinks",
   ]
 }
+group("llvm-ar") {
+  deps = [
+    "//llvm/tools/llvm-ar:symlinks",
+  ]
+}
+group("llvm-dwp") {
+  deps = [
+    "//llvm/tools/llvm-dwp:symlinks",
+  ]
+}
+group("llvm-nm") {
+  deps = [
+    "//llvm/tools/llvm-nm:symlinks",
+  ]
+}
+group("llvm-cxxfilt") {
+  deps = [
+    "//llvm/tools/llvm-cxxfilt:symlinks",
+  ]
+}
+group("llvm-objcopy") {
+  deps = [
+    "//llvm/tools/llvm-objcopy:symlinks",
+  ]
+}
+group("llvm-objdump") {
+  deps = [
+    "//llvm/tools/llvm-objdump:symlinks",
+  ]
+}
+group("llvm-readobj") {
+  deps = [
+    "//llvm/tools/llvm-readobj:symlinks",
+  ]
+}
+group("llvm-size") {
+  deps = [
+    "//llvm/tools/llvm-size:symlinks",
+  ]
+}
+group("llvm-strings") {
+  deps = [
+    "//llvm/tools/llvm-strings:symlinks",
+  ]
+}
+group("llvm-symbolizer") {
+  deps = [
+    "//llvm/tools/llvm-symbolizer:symlinks",
+  ]
+}
 
 # A pool called "console" in the root BUILD.gn is magic and represents ninja's
 # built-in console pool. (Requires a GN with `gn --version` >= 552353.)
diff --git a/utils/gn/secondary/clang/include/clang/AST/BUILD.gn b/utils/gn/secondary/clang/include/clang/AST/BUILD.gn
new file mode 100644
index 0000000..6ac4a72
--- /dev/null
+++ b/utils/gn/secondary/clang/include/clang/AST/BUILD.gn
@@ -0,0 +1,88 @@
+import("//clang/utils/TableGen/clang_tablegen.gni")
+
+clang_tablegen("Attrs") {
+  args = [
+    "-gen-clang-attr-classes",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "../Basic/Attr.td"
+}
+
+clang_tablegen("AttrImpl") {
+  args = [
+    "-gen-clang-attr-impl",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "../Basic/Attr.td"
+}
+
+clang_tablegen("AttrTextNodeDump") {
+  args = [
+    "-gen-clang-attr-text-node-dump",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "../Basic/Attr.td"
+}
+
+clang_tablegen("AttrNodeTraverse") {
+  args = [
+    "-gen-clang-attr-node-traverse",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "../Basic/Attr.td"
+}
+
+clang_tablegen("AttrVisitor") {
+  args = [
+    "-gen-clang-attr-ast-visitor",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "../Basic/Attr.td"
+}
+
+clang_tablegen("StmtNodes") {
+  args = [ "-gen-clang-stmt-nodes" ]
+  td_file = "../Basic/StmtNodes.td"
+}
+
+clang_tablegen("DeclNodes") {
+  args = [ "-gen-clang-decl-nodes" ]
+  td_file = "../Basic/DeclNodes.td"
+}
+
+clang_tablegen("CommentNodes") {
+  args = [ "-gen-clang-comment-nodes" ]
+  td_file = "../Basic/CommentNodes.td"
+}
+
+clang_tablegen("CommentHTMLTags") {
+  args = [ "-gen-clang-comment-html-tags" ]
+}
+
+clang_tablegen("CommentHTMLTagsProperties") {
+  args = [ "-gen-clang-comment-html-tags-properties" ]
+  td_file = "CommentHTMLTags.td"
+}
+
+clang_tablegen("CommentHTMLNamedCharacterReferences") {
+  args = [ "-gen-clang-comment-html-named-character-references" ]
+}
+
+clang_tablegen("CommentCommandInfo") {
+  args = [ "-gen-clang-comment-command-info" ]
+  td_file = "CommentCommands.td"
+}
+
+clang_tablegen("CommentCommandList") {
+  args = [ "-gen-clang-comment-command-list" ]
+  td_file = "CommentCommands.td"
+}
+
+clang_tablegen("StmtDataCollectors") {
+  args = [ "-gen-clang-data-collectors" ]
+}
diff --git a/utils/gn/secondary/clang/include/clang/Basic/BUILD.gn b/utils/gn/secondary/clang/include/clang/Basic/BUILD.gn
new file mode 100644
index 0000000..7d67a2f
--- /dev/null
+++ b/utils/gn/secondary/clang/include/clang/Basic/BUILD.gn
@@ -0,0 +1,100 @@
+import("//clang/utils/TableGen/clang_tablegen.gni")
+import("//llvm/utils/gn/build/write_cmake_config.gni")
+import("//llvm/version.gni")
+
+# Version header.
+
+write_cmake_config("version") {
+  input = "Version.inc.in"
+  output = "$target_gen_dir/Version.inc"
+  values = [
+    "CLANG_VERSION=$llvm_version",
+    "CLANG_VERSION_MAJOR=$llvm_version_major",
+    "CLANG_VERSION_MINOR=$llvm_version_minor",
+    "CLANG_VERSION_PATCHLEVEL=$llvm_version_patch",
+  ]
+}
+
+# Diagnostics.
+
+diag_groups = [
+  "Analysis",
+  "AST",
+  "Comment",
+  "Common",
+  "CrossTU",
+  "Driver",
+  "Frontend",
+  "Lex",
+  "Parse",
+  "Refactoring",
+  "Sema",
+  "Serialization",
+]
+foreach(diag_group, diag_groups) {
+  clang_tablegen("Diagnostic${diag_group}Kinds") {
+    args = [
+      "-gen-clang-diags-defs",
+      "-clang-component=${diag_group}",
+    ]
+    td_file = "Diagnostic.td"
+  }
+}
+group("diags_tablegen") {
+  # DiagnosticGroups and DiagnosticIndexName are intentionally not part of this
+  # group.  Much of clang depends on the DiagKinds.inc files transitively,
+  # but almost nothing needs DiagnosticGroups.inc or DiagnosticIndexName.inc.
+  public_deps = []
+  foreach(diag_group, diag_groups) {
+    public_deps += [ ":Diagnostic${diag_group}Kinds" ]
+  }
+}
+
+clang_tablegen("DiagnosticGroups") {
+  args = [ "-gen-clang-diag-groups" ]
+  td_file = "Diagnostic.td"
+}
+
+clang_tablegen("DiagnosticIndexName") {
+  args = [ "-gen-clang-diags-index-name" ]
+  td_file = "Diagnostic.td"
+}
+
+# Attributes
+
+clang_tablegen("AttrList") {
+  args = [
+    "-gen-clang-attr-list",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "Attr.td"
+}
+
+clang_tablegen("AttrSubMatchRulesList") {
+  args = [
+    "-gen-clang-attr-subject-match-rule-list",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "Attr.td"
+}
+
+clang_tablegen("AttrHasAttributeImpl") {
+  args = [
+    "-gen-clang-attr-has-attribute-impl",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "Attr.td"
+}
+
+# Misc
+
+clang_tablegen("arm_neon") {
+  args = [ "-gen-arm-neon-sema" ]
+}
+
+clang_tablegen("arm_fp16") {
+  args = [ "-gen-arm-neon-sema" ]
+}
diff --git a/utils/gn/secondary/clang/include/clang/Config/BUILD.gn b/utils/gn/secondary/clang/include/clang/Config/BUILD.gn
new file mode 100644
index 0000000..a3c0d5a
--- /dev/null
+++ b/utils/gn/secondary/clang/include/clang/Config/BUILD.gn
@@ -0,0 +1,75 @@
+import("//clang/lib/ARCMigrate/enable.gni")
+import("//clang/lib/StaticAnalyzer/Frontend/enable.gni")
+import("//llvm/utils/gn/build/libs/xml/enable.gni")
+import("//llvm/utils/gn/build/write_cmake_config.gni")
+import("//llvm/version.gni")
+
+config("Config_config") {
+  visibility = [ ":Config" ]
+  include_dirs = [ "$target_gen_dir/clang/include" ]
+}
+
+write_cmake_config("Config") {
+  input = "config.h.cmake"
+  output = "$target_gen_dir/config.h"
+  values = [
+    "BUG_REPORT_URL=https://bugs.llvm.org/",
+    "CLANG_DEFAULT_LINKER=",
+    "CLANG_DEFAULT_STD_C=",
+    "CLANG_DEFAULT_STD_CXX=",
+    "CLANG_DEFAULT_CXX_STDLIB=",
+    "CLANG_DEFAULT_RTLIB=",
+    "CLANG_DEFAULT_OBJCOPY=objcopy",
+    "CLANG_DEFAULT_OPENMP_RUNTIME=libomp",
+    "CLANG_OPENMP_NVPTX_DEFAULT_ARCH=sm_35",
+    "CLANG_LIBDIR_SUFFIX=",
+    "CLANG_RESOURCE_DIR=",
+    "C_INCLUDE_DIRS=",
+    "CLANG_CONFIG_FILE_SYSTEM_DIR=",
+    "CLANG_CONFIG_FILE_USER_DIR=",
+    "DEFAULT_SYSROOT=",
+    "GCC_INSTALL_PREFIX=",
+    "CLANG_ANALYZER_WITH_Z3=",
+    "BACKEND_PACKAGE_STRING=LLVM ${llvm_version}svn",
+    "ENABLE_LINKER_BUILD_ID=",
+    "ENABLE_X86_RELAX_RELOCATIONS=",
+    "ENABLE_EXPERIMENTAL_NEW_PASS_MANAGER=",
+    "CLANG_ENABLE_OBJC_REWRITER=1",  # FIXME: flag?
+  ]
+
+  if (clang_enable_arcmt) {
+    values += [ "CLANG_ENABLE_ARCMT=1" ]
+  } else {
+    values += [ "CLANG_ENABLE_ARCMT=" ]
+  }
+
+  if (clang_enable_static_analyzer) {
+    values += [ "CLANG_ENABLE_STATIC_ANALYZER=1" ]
+  } else {
+    values += [ "CLANG_ENABLE_STATIC_ANALYZER=" ]
+  }
+
+  if (host_os != "win") {
+    values += [ "CLANG_HAVE_RLIMITS=1" ]
+  } else {
+    values += [ "CLANG_HAVE_RLIMITS=" ]
+  }
+
+  if (llvm_enable_libxml2) {
+    values += [ "CLANG_HAVE_LIBXML=1" ]
+  } else {
+    values += [ "CLANG_HAVE_LIBXML=" ]
+  }
+
+  if (host_os == "mac") {
+    # FIXME: Hardcoding this isn't great, but assuming that the host ld version
+    # has anything to do with the ld version where the built clang will run
+    # isn't either. Probably want to make this a declare_args.
+    values += [ "HOST_LINK_VERSION=305" ]
+  } else {
+    values += [ "HOST_LINK_VERSION=" ]
+  }
+
+  # Let targets depending on this find the generated file.
+  public_configs = [ ":Config_config" ]
+}
diff --git a/utils/gn/secondary/clang/include/clang/Driver/BUILD.gn b/utils/gn/secondary/clang/include/clang/Driver/BUILD.gn
new file mode 100644
index 0000000..955e786
--- /dev/null
+++ b/utils/gn/secondary/clang/include/clang/Driver/BUILD.gn
@@ -0,0 +1,5 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("Options") {
+  args = [ "-gen-opt-parser-defs" ]
+}
diff --git a/utils/gn/secondary/clang/include/clang/Parse/BUILD.gn b/utils/gn/secondary/clang/include/clang/Parse/BUILD.gn
new file mode 100644
index 0000000..24aaaea
--- /dev/null
+++ b/utils/gn/secondary/clang/include/clang/Parse/BUILD.gn
@@ -0,0 +1,19 @@
+import("//clang/utils/TableGen/clang_tablegen.gni")
+
+clang_tablegen("AttrParserStringSwitches") {
+  args = [
+    "-gen-clang-attr-parser-string-switches",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "../Basic/Attr.td"
+}
+
+clang_tablegen("AttrSubMatchRulesParserStringSwitches") {
+  args = [
+    "-gen-clang-attr-subject-match-rules-parser-string-switches",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "../Basic/Attr.td"
+}
diff --git a/utils/gn/secondary/clang/include/clang/Sema/BUILD.gn b/utils/gn/secondary/clang/include/clang/Sema/BUILD.gn
new file mode 100644
index 0000000..1f41189
--- /dev/null
+++ b/utils/gn/secondary/clang/include/clang/Sema/BUILD.gn
@@ -0,0 +1,46 @@
+import("//clang/utils/TableGen/clang_tablegen.gni")
+
+clang_tablegen("AttrTemplateInstantiate") {
+  args = [
+    "-gen-clang-attr-template-instantiate",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "../Basic/Attr.td"
+}
+
+clang_tablegen("AttrParsedAttrList") {
+  args = [
+    "-gen-clang-attr-parsed-attr-list",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "../Basic/Attr.td"
+}
+
+clang_tablegen("AttrParsedAttrKinds") {
+  args = [
+    "-gen-clang-attr-parsed-attr-kinds",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "../Basic/Attr.td"
+}
+
+clang_tablegen("AttrSpellingListIndex") {
+  args = [
+    "-gen-clang-attr-spelling-index",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "../Basic/Attr.td"
+}
+
+clang_tablegen("AttrParsedAttrImpl") {
+  args = [
+    "-gen-clang-attr-parsed-attr-impl",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "../Basic/Attr.td"
+}
diff --git a/utils/gn/secondary/clang/include/clang/Serialization/BUILD.gn b/utils/gn/secondary/clang/include/clang/Serialization/BUILD.gn
new file mode 100644
index 0000000..bf90e06
--- /dev/null
+++ b/utils/gn/secondary/clang/include/clang/Serialization/BUILD.gn
@@ -0,0 +1,19 @@
+import("//clang/utils/TableGen/clang_tablegen.gni")
+
+clang_tablegen("AttrPCHRead") {
+  args = [
+    "-gen-clang-attr-pch-read",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "../Basic/Attr.td"
+}
+
+clang_tablegen("AttrPCHWrite") {
+  args = [
+    "-gen-clang-attr-pch-write",
+    "-I",
+    rebase_path("../..", root_out_dir),
+  ]
+  td_file = "../Basic/Attr.td"
+}
diff --git a/utils/gn/secondary/clang/include/clang/StaticAnalyzer/Checkers/BUILD.gn b/utils/gn/secondary/clang/include/clang/StaticAnalyzer/Checkers/BUILD.gn
new file mode 100644
index 0000000..1921a57
--- /dev/null
+++ b/utils/gn/secondary/clang/include/clang/StaticAnalyzer/Checkers/BUILD.gn
@@ -0,0 +1,5 @@
+import("//clang/utils/TableGen/clang_tablegen.gni")
+
+clang_tablegen("Checkers") {
+  args = [ "-gen-clang-sa-checkers" ]
+}
diff --git a/utils/gn/secondary/clang/lib/ARCMigrate/BUILD.gn b/utils/gn/secondary/clang/lib/ARCMigrate/BUILD.gn
new file mode 100644
index 0000000..d78a559
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/ARCMigrate/BUILD.gn
@@ -0,0 +1,39 @@
+static_library("ARCMigrate") {
+  output_name = "clangARCMigrate"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Analysis",
+    "//clang/lib/Basic",
+    "//clang/lib/Edit",
+    "//clang/lib/Frontend",
+    "//clang/lib/Lex",
+    "//clang/lib/Rewrite",
+    "//clang/lib/Sema",
+    "//clang/lib/Serialization",
+    "//clang/lib/StaticAnalyzer/Checkers",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ARCMT.cpp",
+    "ARCMTActions.cpp",
+    "FileRemapper.cpp",
+    "ObjCMT.cpp",
+    "PlistReporter.cpp",
+    "TransAPIUses.cpp",
+    "TransARCAssign.cpp",
+    "TransAutoreleasePool.cpp",
+    "TransBlockObjCVariable.cpp",
+    "TransEmptyStatementsAndDealloc.cpp",
+    "TransGCAttrs.cpp",
+    "TransGCCalls.cpp",
+    "TransProperties.cpp",
+    "TransProtectedScope.cpp",
+    "TransRetainReleaseDealloc.cpp",
+    "TransUnbridgedCasts.cpp",
+    "TransUnusedInitDelegate.cpp",
+    "TransZeroOutPropsInDealloc.cpp",
+    "TransformActions.cpp",
+    "Transforms.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/ARCMigrate/enable.gni b/utils/gn/secondary/clang/lib/ARCMigrate/enable.gni
new file mode 100644
index 0000000..7a7a42e
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/ARCMigrate/enable.gni
@@ -0,0 +1,4 @@
+declare_args() {
+  # Whether to include the arc migrate tool in the clang binary.
+  clang_enable_arcmt = true
+}
diff --git a/utils/gn/secondary/clang/lib/AST/BUILD.gn b/utils/gn/secondary/clang/lib/AST/BUILD.gn
new file mode 100644
index 0000000..fc3b0b9
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/AST/BUILD.gn
@@ -0,0 +1,102 @@
+static_library("AST") {
+  output_name = "clangAST"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/include/clang/AST:AttrImpl",
+    "//clang/include/clang/AST:AttrNodeTraverse",
+    "//clang/include/clang/AST:AttrTextNodeDump",
+    "//clang/include/clang/AST:CommentCommandInfo",
+    "//clang/include/clang/AST:CommentHTMLNamedCharacterReferences",
+    "//clang/include/clang/AST:CommentHTMLTags",
+    "//clang/include/clang/AST:CommentHTMLTagsProperties",
+    "//clang/include/clang/AST:DeclNodes",
+    "//clang/lib/Basic",
+    "//clang/lib/Lex",
+    "//llvm/lib/BinaryFormat",
+    "//llvm/lib/Support",
+  ]
+
+  # Generated files used in public headers should be in public_deps, the rest
+  # in regular deps.
+  public_deps = [
+    "//clang/include/clang/AST:AttrVisitor",
+    "//clang/include/clang/AST:Attrs",
+    "//clang/include/clang/AST:CommentCommandList",
+    "//clang/include/clang/AST:CommentNodes",
+    "//clang/include/clang/AST:StmtNodes",
+  ]
+  sources = [
+    "APValue.cpp",
+    "ASTConsumer.cpp",
+    "ASTContext.cpp",
+    "ASTDiagnostic.cpp",
+    "ASTDumper.cpp",
+    "ASTImporter.cpp",
+    "ASTImporterLookupTable.cpp",
+    "ASTStructuralEquivalence.cpp",
+    "ASTTypeTraits.cpp",
+    "AttrImpl.cpp",
+    "CXXInheritance.cpp",
+    "Comment.cpp",
+    "CommentBriefParser.cpp",
+    "CommentCommandTraits.cpp",
+    "CommentLexer.cpp",
+    "CommentParser.cpp",
+    "CommentSema.cpp",
+    "ComparisonCategories.cpp",
+    "DataCollection.cpp",
+    "Decl.cpp",
+    "DeclBase.cpp",
+    "DeclCXX.cpp",
+    "DeclFriend.cpp",
+    "DeclGroup.cpp",
+    "DeclObjC.cpp",
+    "DeclOpenMP.cpp",
+    "DeclPrinter.cpp",
+    "DeclTemplate.cpp",
+    "DeclarationName.cpp",
+    "Expr.cpp",
+    "ExprCXX.cpp",
+    "ExprClassification.cpp",
+    "ExprConstant.cpp",
+    "ExprObjC.cpp",
+    "ExternalASTMerger.cpp",
+    "ExternalASTSource.cpp",
+    "FormatString.cpp",
+    "InheritViz.cpp",
+    "ItaniumCXXABI.cpp",
+    "ItaniumMangle.cpp",
+    "Mangle.cpp",
+    "MicrosoftCXXABI.cpp",
+    "MicrosoftMangle.cpp",
+    "NSAPI.cpp",
+    "NestedNameSpecifier.cpp",
+    "ODRHash.cpp",
+    "OSLog.cpp",
+    "OpenMPClause.cpp",
+    "ParentMap.cpp",
+    "PrintfFormatString.cpp",
+    "QualTypeNames.cpp",
+    "RawCommentList.cpp",
+    "RecordLayout.cpp",
+    "RecordLayoutBuilder.cpp",
+    "ScanfFormatString.cpp",
+    "SelectorLocationsKind.cpp",
+    "Stmt.cpp",
+    "StmtCXX.cpp",
+    "StmtIterator.cpp",
+    "StmtObjC.cpp",
+    "StmtOpenMP.cpp",
+    "StmtPrinter.cpp",
+    "StmtProfile.cpp",
+    "StmtViz.cpp",
+    "TemplateBase.cpp",
+    "TemplateName.cpp",
+    "TextNodeDumper.cpp",
+    "Type.cpp",
+    "TypeLoc.cpp",
+    "TypePrinter.cpp",
+    "VTTBuilder.cpp",
+    "VTableBuilder.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/ASTMatchers/BUILD.gn b/utils/gn/secondary/clang/lib/ASTMatchers/BUILD.gn
new file mode 100644
index 0000000..8dbb132
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/ASTMatchers/BUILD.gn
@@ -0,0 +1,13 @@
+static_library("ASTMatchers") {
+  output_name = "clangASTMatchers"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ASTMatchFinder.cpp",
+    "ASTMatchersInternal.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/ASTMatchers/Dynamic/BUILD.gn b/utils/gn/secondary/clang/lib/ASTMatchers/Dynamic/BUILD.gn
new file mode 100644
index 0000000..31d27de
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/ASTMatchers/Dynamic/BUILD.gn
@@ -0,0 +1,16 @@
+static_library("Dynamic") {
+  output_name = "clangDynamicASTMatchers"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/ASTMatchers",
+    "//clang/lib/Basic",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "Diagnostics.cpp",
+    "Parser.cpp",
+    "Registry.cpp",
+    "VariantValue.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Analysis/BUILD.gn b/utils/gn/secondary/clang/lib/Analysis/BUILD.gn
new file mode 100644
index 0000000..b084d75
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Analysis/BUILD.gn
@@ -0,0 +1,36 @@
+static_library("Analysis") {
+  output_name = "clangAnalysis"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/include/clang/AST:StmtDataCollectors",
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Lex",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "AnalysisDeclContext.cpp",
+    "BodyFarm.cpp",
+    "CFG.cpp",
+    "CFGReachabilityAnalysis.cpp",
+    "CFGStmtMap.cpp",
+    "CallGraph.cpp",
+    "CloneDetection.cpp",
+    "CocoaConventions.cpp",
+    "CodeInjector.cpp",
+    "ConstructionContext.cpp",
+    "Consumed.cpp",
+    "Dominators.cpp",
+    "ExprMutationAnalyzer.cpp",
+    "LiveVariables.cpp",
+    "ObjCNoReturn.cpp",
+    "PostOrderCFGView.cpp",
+    "ProgramPoint.cpp",
+    "ReachableCode.cpp",
+    "ThreadSafety.cpp",
+    "ThreadSafetyCommon.cpp",
+    "ThreadSafetyLogical.cpp",
+    "ThreadSafetyTIL.cpp",
+    "UninitializedValues.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Basic/BUILD.gn b/utils/gn/secondary/clang/lib/Basic/BUILD.gn
new file mode 100644
index 0000000..4d0578b
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Basic/BUILD.gn
@@ -0,0 +1,81 @@
+static_library("Basic") {
+  output_name = "clangBasic"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  public_deps = [
+    # public_dep because public header Version.h includes generated Version.inc.
+    "//clang/include/clang/Basic:AttrList",
+    "//clang/include/clang/Basic:AttrSubMatchRulesList",
+    "//clang/include/clang/Basic:DiagnosticGroups",
+    "//clang/include/clang/Basic:diags_tablegen",
+    "//clang/include/clang/Basic:version",
+  ]
+  deps = [
+    "//clang/include/clang/Basic:AttrHasAttributeImpl",
+    "//clang/include/clang/Basic:arm_fp16",
+    "//clang/include/clang/Basic:arm_neon",
+    "//clang/include/clang/Config",
+    "//llvm/include/llvm/Config:llvm-config",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+  ]
+  include_dirs = [ "." ]
+  sources = [
+    "Attributes.cpp",
+    "Builtins.cpp",
+    "CharInfo.cpp",
+    "CodeGenOptions.cpp",
+    "Cuda.cpp",
+    "Diagnostic.cpp",
+    "DiagnosticIDs.cpp",
+    "DiagnosticOptions.cpp",
+    "FileManager.cpp",
+    "FileSystemStatCache.cpp",
+    "FixedPoint.cpp",
+    "IdentifierTable.cpp",
+    "LangOptions.cpp",
+    "MemoryBufferCache.cpp",
+    "Module.cpp",
+    "ObjCRuntime.cpp",
+    "OpenMPKinds.cpp",
+    "OperatorPrecedence.cpp",
+    "SanitizerBlacklist.cpp",
+    "SanitizerSpecialCaseList.cpp",
+    "Sanitizers.cpp",
+    "SourceLocation.cpp",
+    "SourceManager.cpp",
+    "TargetInfo.cpp",
+    "Targets.cpp",
+    "Targets/AArch64.cpp",
+    "Targets/AMDGPU.cpp",
+    "Targets/ARC.cpp",
+    "Targets/ARM.cpp",
+    "Targets/AVR.cpp",
+    "Targets/BPF.cpp",
+    "Targets/Hexagon.cpp",
+    "Targets/Lanai.cpp",
+    "Targets/Le64.cpp",
+    "Targets/MSP430.cpp",
+    "Targets/Mips.cpp",
+    "Targets/NVPTX.cpp",
+    "Targets/OSTargets.cpp",
+    "Targets/PNaCl.cpp",
+    "Targets/PPC.cpp",
+    "Targets/RISCV.cpp",
+    "Targets/SPIR.cpp",
+    "Targets/Sparc.cpp",
+    "Targets/SystemZ.cpp",
+    "Targets/TCE.cpp",
+    "Targets/WebAssembly.cpp",
+    "Targets/X86.cpp",
+    "Targets/XCore.cpp",
+    "TokenKinds.cpp",
+
+    # FIXME: This should be in its own target that passes -DHAVE_SVN_VERSION_INC
+    # and that also depends on a target generating SVNVersion.inc.
+    "Version.cpp",
+    "Warnings.cpp",
+    "XRayInstr.cpp",
+    "XRayLists.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/CodeGen/BUILD.gn b/utils/gn/secondary/clang/lib/CodeGen/BUILD.gn
new file mode 100644
index 0000000..c245489
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/CodeGen/BUILD.gn
@@ -0,0 +1,88 @@
+static_library("CodeGen") {
+  output_name = "clangCodeGen"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Analysis",
+    "//clang/lib/Basic",
+    "//clang/lib/Frontend",
+    "//clang/lib/Lex",
+    "//llvm/lib/Analysis",
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/IR",
+    "//llvm/lib/IRReader",
+    "//llvm/lib/LTO",
+    "//llvm/lib/Linker",
+    "//llvm/lib/MC",
+    "//llvm/lib/Object",
+    "//llvm/lib/Passes",
+    "//llvm/lib/ProfileData",
+    "//llvm/lib/ProfileData/Coverage",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Transforms/Coroutines",
+    "//llvm/lib/Transforms/IPO",
+    "//llvm/lib/Transforms/InstCombine",
+    "//llvm/lib/Transforms/Instrumentation",
+    "//llvm/lib/Transforms/ObjCARC",
+    "//llvm/lib/Transforms/Scalar",
+    "//llvm/lib/Transforms/Utils",
+  ]
+  sources = [
+    "BackendUtil.cpp",
+    "CGAtomic.cpp",
+    "CGBlocks.cpp",
+    "CGBuiltin.cpp",
+    "CGCUDANV.cpp",
+    "CGCUDARuntime.cpp",
+    "CGCXX.cpp",
+    "CGCXXABI.cpp",
+    "CGCall.cpp",
+    "CGClass.cpp",
+    "CGCleanup.cpp",
+    "CGCoroutine.cpp",
+    "CGDebugInfo.cpp",
+    "CGDecl.cpp",
+    "CGDeclCXX.cpp",
+    "CGException.cpp",
+    "CGExpr.cpp",
+    "CGExprAgg.cpp",
+    "CGExprCXX.cpp",
+    "CGExprComplex.cpp",
+    "CGExprConstant.cpp",
+    "CGExprScalar.cpp",
+    "CGGPUBuiltin.cpp",
+    "CGLoopInfo.cpp",
+    "CGNonTrivialStruct.cpp",
+    "CGObjC.cpp",
+    "CGObjCGNU.cpp",
+    "CGObjCMac.cpp",
+    "CGObjCRuntime.cpp",
+    "CGOpenCLRuntime.cpp",
+    "CGOpenMPRuntime.cpp",
+    "CGOpenMPRuntimeNVPTX.cpp",
+    "CGRecordLayoutBuilder.cpp",
+    "CGStmt.cpp",
+    "CGStmtOpenMP.cpp",
+    "CGVTT.cpp",
+    "CGVTables.cpp",
+    "CodeGenABITypes.cpp",
+    "CodeGenAction.cpp",
+    "CodeGenFunction.cpp",
+    "CodeGenModule.cpp",
+    "CodeGenPGO.cpp",
+    "CodeGenTBAA.cpp",
+    "CodeGenTypes.cpp",
+    "ConstantInitBuilder.cpp",
+    "CoverageMappingGen.cpp",
+    "ItaniumCXXABI.cpp",
+    "MacroPPCallbacks.cpp",
+    "MicrosoftCXXABI.cpp",
+    "ModuleBuilder.cpp",
+    "ObjectFilePCHContainerOperations.cpp",
+    "SanitizerMetadata.cpp",
+    "SwiftCallingConv.cpp",
+    "TargetInfo.cpp",
+    "VarBypassDetector.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/CrossTU/BUILD.gn b/utils/gn/secondary/clang/lib/CrossTU/BUILD.gn
new file mode 100644
index 0000000..81fb0b6
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/CrossTU/BUILD.gn
@@ -0,0 +1,14 @@
+static_library("CrossTU") {
+  output_name = "clangCrossTU"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Frontend",
+    "//clang/lib/Index",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "CrossTranslationUnit.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Driver/BUILD.gn b/utils/gn/secondary/clang/lib/Driver/BUILD.gn
new file mode 100644
index 0000000..1f07a62
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Driver/BUILD.gn
@@ -0,0 +1,89 @@
+static_library("Driver") {
+  output_name = "clangDriver"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  include_dirs = [ "." ]
+  deps = [
+    "//clang/include/clang/Config",
+
+    # Driver doesn't depend on StaticAnalyzer and the other way round, but
+    # as of clang r311958 Driver does depend on StaticAnalyzer/Checkers's
+    # tablegen'd Checkers.inc.  The CMake build runs all clang tablegen steps
+    # before all lib compilations via the clang-tablegen-targets target; the
+    # GN build has this dependency instead.
+    # FIXME: Move Checkers.td somewhere else to clean up this layering mess.
+    # See the review thread of r311958 for details.
+    "//clang/include/clang/StaticAnalyzer/Checkers",
+    "//clang/lib/Basic",
+    "//llvm/include/llvm/Config:llvm-config",
+    "//llvm/lib/BinaryFormat",
+    "//llvm/lib/Option",
+    "//llvm/lib/Support",
+  ]
+  public_deps = [
+    # public_dep because public header Options.h includes generated Options.inc.
+    "//clang/include/clang/Driver:Options",
+  ]
+  if (host_os == "win") {
+    # MSVCToolChain.cpp uses version.dll.
+    libs = [ "version.lib" ]
+  }
+  sources = [
+    "Action.cpp",
+    "Compilation.cpp",
+    "DarwinSDKInfo.cpp",
+    "Distro.cpp",
+    "Driver.cpp",
+    "DriverOptions.cpp",
+    "Job.cpp",
+    "Multilib.cpp",
+    "Phases.cpp",
+    "SanitizerArgs.cpp",
+    "Tool.cpp",
+    "ToolChain.cpp",
+    "ToolChains/AMDGPU.cpp",
+    "ToolChains/AVR.cpp",
+    "ToolChains/Ananas.cpp",
+    "ToolChains/Arch/AArch64.cpp",
+    "ToolChains/Arch/ARM.cpp",
+    "ToolChains/Arch/Mips.cpp",
+    "ToolChains/Arch/PPC.cpp",
+    "ToolChains/Arch/RISCV.cpp",
+    "ToolChains/Arch/Sparc.cpp",
+    "ToolChains/Arch/SystemZ.cpp",
+    "ToolChains/Arch/X86.cpp",
+    "ToolChains/BareMetal.cpp",
+    "ToolChains/Clang.cpp",
+    "ToolChains/CloudABI.cpp",
+    "ToolChains/CommonArgs.cpp",
+    "ToolChains/Contiki.cpp",
+    "ToolChains/CrossWindows.cpp",
+    "ToolChains/Cuda.cpp",
+    "ToolChains/Darwin.cpp",
+    "ToolChains/DragonFly.cpp",
+    "ToolChains/FreeBSD.cpp",
+    "ToolChains/Fuchsia.cpp",
+    "ToolChains/Gnu.cpp",
+    "ToolChains/HIP.cpp",
+    "ToolChains/Haiku.cpp",
+    "ToolChains/Hexagon.cpp",
+    "ToolChains/Hurd.cpp",
+    "ToolChains/Linux.cpp",
+    "ToolChains/MSP430.cpp",
+    "ToolChains/MSVC.cpp",
+    "ToolChains/MinGW.cpp",
+    "ToolChains/Minix.cpp",
+    "ToolChains/MipsLinux.cpp",
+    "ToolChains/Myriad.cpp",
+    "ToolChains/NaCl.cpp",
+    "ToolChains/NetBSD.cpp",
+    "ToolChains/OpenBSD.cpp",
+    "ToolChains/PS4CPU.cpp",
+    "ToolChains/RISCVToolchain.cpp",
+    "ToolChains/Solaris.cpp",
+    "ToolChains/TCE.cpp",
+    "ToolChains/WebAssembly.cpp",
+    "ToolChains/XCore.cpp",
+    "Types.cpp",
+    "XRayArgs.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Edit/BUILD.gn b/utils/gn/secondary/clang/lib/Edit/BUILD.gn
new file mode 100644
index 0000000..e32e207
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Edit/BUILD.gn
@@ -0,0 +1,15 @@
+static_library("Edit") {
+  output_name = "clangEdit"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Lex",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "Commit.cpp",
+    "EditedSource.cpp",
+    "RewriteObjCFoundationAPI.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Format/BUILD.gn b/utils/gn/secondary/clang/lib/Format/BUILD.gn
new file mode 100644
index 0000000..e08c923
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Format/BUILD.gn
@@ -0,0 +1,27 @@
+static_library("Format") {
+  output_name = "clangFormat"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/Basic",
+    "//clang/lib/Lex",
+    "//clang/lib/Tooling/Core",
+    "//clang/lib/Tooling/Inclusions",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "AffectedRangeManager.cpp",
+    "BreakableToken.cpp",
+    "ContinuationIndenter.cpp",
+    "Format.cpp",
+    "FormatToken.cpp",
+    "FormatTokenLexer.cpp",
+    "NamespaceEndCommentsFixer.cpp",
+    "SortJavaScriptImports.cpp",
+    "TokenAnalyzer.cpp",
+    "TokenAnnotator.cpp",
+    "UnwrappedLineFormatter.cpp",
+    "UnwrappedLineParser.cpp",
+    "UsingDeclarationsSorter.cpp",
+    "WhitespaceManager.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Frontend/BUILD.gn b/utils/gn/secondary/clang/lib/Frontend/BUILD.gn
new file mode 100644
index 0000000..0f80aa6
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Frontend/BUILD.gn
@@ -0,0 +1,54 @@
+static_library("Frontend") {
+  output_name = "clangFrontend"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/include/clang/Config",
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Driver",
+    "//clang/lib/Edit",
+    "//clang/lib/Lex",
+    "//clang/lib/Parse",
+    "//clang/lib/Sema",
+    "//clang/lib/Serialization",
+    "//llvm/include/llvm/Config:llvm-config",
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/Option",
+    "//llvm/lib/ProfileData",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ASTConsumers.cpp",
+    "ASTMerge.cpp",
+    "ASTUnit.cpp",
+    "ChainedDiagnosticConsumer.cpp",
+    "ChainedIncludesSource.cpp",
+    "CompilerInstance.cpp",
+    "CompilerInvocation.cpp",
+    "CreateInvocationFromCommandLine.cpp",
+    "DependencyFile.cpp",
+    "DependencyGraph.cpp",
+    "DiagnosticRenderer.cpp",
+    "FrontendAction.cpp",
+    "FrontendActions.cpp",
+    "FrontendOptions.cpp",
+    "FrontendTiming.cpp",
+    "HeaderIncludeGen.cpp",
+    "InitHeaderSearch.cpp",
+    "InitPreprocessor.cpp",
+    "LangStandards.cpp",
+    "LayoutOverrideSource.cpp",
+    "LogDiagnosticPrinter.cpp",
+    "ModuleDependencyCollector.cpp",
+    "MultiplexConsumer.cpp",
+    "PrecompiledPreamble.cpp",
+    "PrintPreprocessedOutput.cpp",
+    "SerializedDiagnosticPrinter.cpp",
+    "SerializedDiagnosticReader.cpp",
+    "TestModuleFileExtension.cpp",
+    "TextDiagnostic.cpp",
+    "TextDiagnosticBuffer.cpp",
+    "TextDiagnosticPrinter.cpp",
+    "VerifyDiagnosticConsumer.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Frontend/Rewrite/BUILD.gn b/utils/gn/secondary/clang/lib/Frontend/Rewrite/BUILD.gn
new file mode 100644
index 0000000..1f185cf
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Frontend/Rewrite/BUILD.gn
@@ -0,0 +1,24 @@
+static_library("Rewrite") {
+  output_name = "clangRewriteFrontend"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Edit",
+    "//clang/lib/Frontend",
+    "//clang/lib/Lex",
+    "//clang/lib/Rewrite",
+    "//clang/lib/Serialization",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "FixItRewriter.cpp",
+    "FrontendActions.cpp",
+    "HTMLPrint.cpp",
+    "InclusionRewriter.cpp",
+    "RewriteMacros.cpp",
+    "RewriteModernObjC.cpp",
+    "RewriteObjC.cpp",
+    "RewriteTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/FrontendTool/BUILD.gn b/utils/gn/secondary/clang/lib/FrontendTool/BUILD.gn
new file mode 100644
index 0000000..6bcb4c3
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/FrontendTool/BUILD.gn
@@ -0,0 +1,29 @@
+import("//clang/lib/ARCMigrate/enable.gni")
+import("//clang/lib/StaticAnalyzer/Frontend/enable.gni")
+
+assert(clang_enable_static_analyzer || !clang_enable_arcmt,
+       "Cannot disable static analyzer while enabling ARCMT")
+
+static_library("FrontendTool") {
+  output_name = "clangFrontendTool"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/include/clang/Config",
+    "//clang/lib/Basic",
+    "//clang/lib/CodeGen",
+    "//clang/lib/Driver",
+    "//clang/lib/Frontend",
+    "//clang/lib/Frontend/Rewrite",
+    "//llvm/lib/Option",
+    "//llvm/lib/Support",
+  ]
+  if (clang_enable_arcmt) {
+    deps += [ "//clang/lib/ARCMigrate" ]
+  }
+  if (clang_enable_static_analyzer) {
+    deps += [ "//clang/lib/StaticAnalyzer/Frontend" ]
+  }
+  sources = [
+    "ExecuteCompilerInvocation.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Headers/BUILD.gn b/utils/gn/secondary/clang/lib/Headers/BUILD.gn
new file mode 100644
index 0000000..2c472ce
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Headers/BUILD.gn
@@ -0,0 +1,161 @@
+import("//clang/resource_dir.gni")
+import("//clang/utils/TableGen/clang_tablegen.gni")
+
+# Generate arm_neon.h
+clang_tablegen("arm_neon") {
+  args = [ "-gen-arm-neon" ]
+  td_file = "//clang/include/clang/Basic/arm_neon.td"
+  output_name = "arm_neon.h"
+}
+
+# Generate arm_fp16.h
+clang_tablegen("arm_fp16") {
+  args = [ "-gen-arm-fp16" ]
+  td_file = "//clang/include/clang/Basic/arm_fp16.td"
+  output_name = "arm_fp16.h"
+}
+
+copy("arm_headers") {
+  visibility = [ ":Headers" ]
+  deps = [
+    ":arm_fp16",
+    ":arm_neon",
+  ]
+  sources = get_target_outputs(":arm_neon") + get_target_outputs(":arm_fp16")
+  outputs = [
+    "$clang_resource_dir/include/{{source_file_part}}",
+  ]
+}
+
+copy("Headers") {
+  deps = [
+    ":arm_headers",
+  ]
+
+  # NOSORT
+  # Tell `gn format` to not reorder the sources list: Its order matches CMake,
+  # and the ordering is alphabetical but ignores leading underscores.
+  sources = [
+    "adxintrin.h",
+    "altivec.h",
+    "ammintrin.h",
+    "arm_acle.h",
+    "armintr.h",
+    "arm64intr.h",
+    "avx2intrin.h",
+    "avx512bwintrin.h",
+    "avx512bitalgintrin.h",
+    "avx512vlbitalgintrin.h",
+    "avx512cdintrin.h",
+    "avx512vpopcntdqintrin.h",
+    "avx512dqintrin.h",
+    "avx512erintrin.h",
+    "avx512fintrin.h",
+    "avx512ifmaintrin.h",
+    "avx512ifmavlintrin.h",
+    "avx512pfintrin.h",
+    "avx512vbmiintrin.h",
+    "avx512vbmivlintrin.h",
+    "avx512vbmi2intrin.h",
+    "avx512vlvbmi2intrin.h",
+    "avx512vlbwintrin.h",
+    "avx512vlcdintrin.h",
+    "avx512vldqintrin.h",
+    "avx512vlintrin.h",
+    "avx512vpopcntdqvlintrin.h",
+    "avx512vnniintrin.h",
+    "avx512vlvnniintrin.h",
+    "avxintrin.h",
+    "bmi2intrin.h",
+    "bmiintrin.h",
+    "__clang_cuda_builtin_vars.h",
+    "__clang_cuda_cmath.h",
+    "__clang_cuda_complex_builtins.h",
+    "__clang_cuda_device_functions.h",
+    "__clang_cuda_intrinsics.h",
+    "__clang_cuda_libdevice_declares.h",
+    "__clang_cuda_math_forward_declares.h",
+    "__clang_cuda_runtime_wrapper.h",
+    "cetintrin.h",
+    "cldemoteintrin.h",
+    "clzerointrin.h",
+    "cpuid.h",
+    "clflushoptintrin.h",
+    "clwbintrin.h",
+    "emmintrin.h",
+    "f16cintrin.h",
+    "float.h",
+    "fma4intrin.h",
+    "fmaintrin.h",
+    "fxsrintrin.h",
+    "gfniintrin.h",
+    "htmintrin.h",
+    "htmxlintrin.h",
+    "ia32intrin.h",
+    "immintrin.h",
+    "intrin.h",
+    "inttypes.h",
+    "invpcidintrin.h",
+    "iso646.h",
+    "limits.h",
+    "lwpintrin.h",
+    "lzcntintrin.h",
+    "mm3dnow.h",
+    "mmintrin.h",
+    "mm_malloc.h",
+    "module.modulemap",
+    "movdirintrin.h",
+    "msa.h",
+    "mwaitxintrin.h",
+    "nmmintrin.h",
+    "opencl-c.h",
+    "pconfigintrin.h",
+    "pkuintrin.h",
+    "pmmintrin.h",
+    "popcntintrin.h",
+    "prfchwintrin.h",
+    "ptwriteintrin.h",
+    "rdseedintrin.h",
+    "rtmintrin.h",
+    "s390intrin.h",
+    "sgxintrin.h",
+    "shaintrin.h",
+    "smmintrin.h",
+    "stdalign.h",
+    "stdarg.h",
+    "stdatomic.h",
+    "stdbool.h",
+    "stddef.h",
+    "__stddef_max_align_t.h",
+    "stdint.h",
+    "stdnoreturn.h",
+    "tbmintrin.h",
+    "tgmath.h",
+    "tmmintrin.h",
+    "unwind.h",
+    "vadefs.h",
+    "vaesintrin.h",
+    "varargs.h",
+    "vecintrin.h",
+    "vpclmulqdqintrin.h",
+    "waitpkgintrin.h",
+    "wbnoinvdintrin.h",
+    "wmmintrin.h",
+    "__wmmintrin_aes.h",
+    "__wmmintrin_pclmul.h",
+    "x86intrin.h",
+    "xmmintrin.h",
+    "xopintrin.h",
+    "xsavecintrin.h",
+    "xsaveintrin.h",
+    "xsaveoptintrin.h",
+    "xsavesintrin.h",
+    "xtestintrin.h",
+    "cuda_wrappers/algorithm",
+    "cuda_wrappers/complex",
+    "cuda_wrappers/new",
+  ]
+  outputs = [
+    "$clang_resource_dir/include/{{source_target_relative}}",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Index/BUILD.gn b/utils/gn/secondary/clang/lib/Index/BUILD.gn
new file mode 100644
index 0000000..8c7190f
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Index/BUILD.gn
@@ -0,0 +1,28 @@
+static_library("Index") {
+  output_name = "clangIndex"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Format",
+    "//clang/lib/Frontend",
+    "//clang/lib/Rewrite",
+    "//clang/lib/Serialization",
+    "//clang/lib/Tooling/Core",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "CodegenNameGenerator.cpp",
+    "CommentToXML.cpp",
+    "IndexBody.cpp",
+    "IndexDecl.cpp",
+    "IndexSymbol.cpp",
+    "IndexTypeSourceInfo.cpp",
+    "IndexingAction.cpp",
+    "IndexingContext.cpp",
+    "IndexingContext.h",
+    "SimpleFormatContext.h",
+    "USRGeneration.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Lex/BUILD.gn b/utils/gn/secondary/clang/lib/Lex/BUILD.gn
new file mode 100644
index 0000000..2934e9cf
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Lex/BUILD.gn
@@ -0,0 +1,31 @@
+static_library("Lex") {
+  output_name = "clangLex"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/Basic",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "HeaderMap.cpp",
+    "HeaderSearch.cpp",
+    "Lexer.cpp",
+    "LiteralSupport.cpp",
+    "MacroArgs.cpp",
+    "MacroInfo.cpp",
+    "ModuleMap.cpp",
+    "PPCaching.cpp",
+    "PPCallbacks.cpp",
+    "PPConditionalDirectiveRecord.cpp",
+    "PPDirectives.cpp",
+    "PPExpressions.cpp",
+    "PPLexerChange.cpp",
+    "PPMacroExpansion.cpp",
+    "Pragma.cpp",
+    "PreprocessingRecord.cpp",
+    "Preprocessor.cpp",
+    "PreprocessorLexer.cpp",
+    "ScratchBuffer.cpp",
+    "TokenConcatenation.cpp",
+    "TokenLexer.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Parse/BUILD.gn b/utils/gn/secondary/clang/lib/Parse/BUILD.gn
new file mode 100644
index 0000000..341881e
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Parse/BUILD.gn
@@ -0,0 +1,32 @@
+static_library("Parse") {
+  output_name = "clangParse"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/include/clang/Parse:AttrParserStringSwitches",
+    "//clang/include/clang/Parse:AttrSubMatchRulesParserStringSwitches",
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Lex",
+    "//clang/lib/Sema",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ParseAST.cpp",
+    "ParseCXXInlineMethods.cpp",
+    "ParseDecl.cpp",
+    "ParseDeclCXX.cpp",
+    "ParseExpr.cpp",
+    "ParseExprCXX.cpp",
+    "ParseInit.cpp",
+    "ParseObjc.cpp",
+    "ParseOpenMP.cpp",
+    "ParsePragma.cpp",
+    "ParseStmt.cpp",
+    "ParseStmtAsm.cpp",
+    "ParseTemplate.cpp",
+    "ParseTentative.cpp",
+    "Parser.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Rewrite/BUILD.gn b/utils/gn/secondary/clang/lib/Rewrite/BUILD.gn
new file mode 100644
index 0000000..2ea3cdd
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Rewrite/BUILD.gn
@@ -0,0 +1,16 @@
+static_library("Rewrite") {
+  output_name = "clangRewrite"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/Basic",
+    "//clang/lib/Lex",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "DeltaTree.cpp",
+    "HTMLRewrite.cpp",
+    "RewriteRope.cpp",
+    "Rewriter.cpp",
+    "TokenRewriter.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Sema/BUILD.gn b/utils/gn/secondary/clang/lib/Sema/BUILD.gn
new file mode 100644
index 0000000..716f4d5
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Sema/BUILD.gn
@@ -0,0 +1,66 @@
+static_library("Sema") {
+  output_name = "clangSema"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/include/clang/Sema:AttrParsedAttrImpl",
+    "//clang/include/clang/Sema:AttrParsedAttrKinds",
+    "//clang/include/clang/Sema:AttrParsedAttrList",
+    "//clang/include/clang/Sema:AttrSpellingListIndex",
+    "//clang/include/clang/Sema:AttrTemplateInstantiate",
+    "//clang/lib/AST",
+    "//clang/lib/Analysis",
+    "//clang/lib/Basic",
+    "//clang/lib/Edit",
+    "//clang/lib/Lex",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "AnalysisBasedWarnings.cpp",
+    "CodeCompleteConsumer.cpp",
+    "DeclSpec.cpp",
+    "DelayedDiagnostic.cpp",
+    "IdentifierResolver.cpp",
+    "JumpDiagnostics.cpp",
+    "MultiplexExternalSemaSource.cpp",
+    "ParsedAttr.cpp",
+    "Scope.cpp",
+    "ScopeInfo.cpp",
+    "Sema.cpp",
+    "SemaAccess.cpp",
+    "SemaAttr.cpp",
+    "SemaCUDA.cpp",
+    "SemaCXXScopeSpec.cpp",
+    "SemaCast.cpp",
+    "SemaChecking.cpp",
+    "SemaCodeComplete.cpp",
+    "SemaConsumer.cpp",
+    "SemaCoroutine.cpp",
+    "SemaDecl.cpp",
+    "SemaDeclAttr.cpp",
+    "SemaDeclCXX.cpp",
+    "SemaDeclObjC.cpp",
+    "SemaExceptionSpec.cpp",
+    "SemaExpr.cpp",
+    "SemaExprCXX.cpp",
+    "SemaExprMember.cpp",
+    "SemaExprObjC.cpp",
+    "SemaFixItUtils.cpp",
+    "SemaInit.cpp",
+    "SemaLambda.cpp",
+    "SemaLookup.cpp",
+    "SemaObjCProperty.cpp",
+    "SemaOpenMP.cpp",
+    "SemaOverload.cpp",
+    "SemaPseudoObject.cpp",
+    "SemaStmt.cpp",
+    "SemaStmtAsm.cpp",
+    "SemaStmtAttr.cpp",
+    "SemaTemplate.cpp",
+    "SemaTemplateDeduction.cpp",
+    "SemaTemplateInstantiate.cpp",
+    "SemaTemplateInstantiateDecl.cpp",
+    "SemaTemplateVariadic.cpp",
+    "SemaType.cpp",
+    "TypeLocBuilder.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Serialization/BUILD.gn b/utils/gn/secondary/clang/lib/Serialization/BUILD.gn
new file mode 100644
index 0000000..394b1ed
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Serialization/BUILD.gn
@@ -0,0 +1,31 @@
+static_library("Serialization") {
+  output_name = "clangSerialization"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/include/clang/Serialization:AttrPCHRead",
+    "//clang/include/clang/Serialization:AttrPCHWrite",
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Lex",
+    "//clang/lib/Sema",
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ASTCommon.cpp",
+    "ASTCommon.h",
+    "ASTReader.cpp",
+    "ASTReaderDecl.cpp",
+    "ASTReaderInternals.h",
+    "ASTReaderStmt.cpp",
+    "ASTWriter.cpp",
+    "ASTWriterDecl.cpp",
+    "ASTWriterStmt.cpp",
+    "GeneratePCH.cpp",
+    "GlobalModuleIndex.cpp",
+    "Module.cpp",
+    "ModuleFileExtension.cpp",
+    "ModuleManager.cpp",
+    "PCHContainerOperations.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/StaticAnalyzer/Checkers/BUILD.gn b/utils/gn/secondary/clang/lib/StaticAnalyzer/Checkers/BUILD.gn
new file mode 100644
index 0000000..e8e0c71
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/StaticAnalyzer/Checkers/BUILD.gn
@@ -0,0 +1,114 @@
+static_library("Checkers") {
+  output_name = "clangStaticAnalyzerCheckers"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/include/clang/StaticAnalyzer/Checkers",
+    "//clang/lib/AST",
+    "//clang/lib/ASTMatchers",
+    "//clang/lib/Analysis",
+    "//clang/lib/Basic",
+    "//clang/lib/Lex",
+    "//clang/lib/StaticAnalyzer/Core",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "AnalysisOrderChecker.cpp",
+    "AnalyzerStatsChecker.cpp",
+    "ArrayBoundChecker.cpp",
+    "ArrayBoundCheckerV2.cpp",
+    "BasicObjCFoundationChecks.cpp",
+    "BlockInCriticalSectionChecker.cpp",
+    "BoolAssignmentChecker.cpp",
+    "BuiltinFunctionChecker.cpp",
+    "CStringChecker.cpp",
+    "CStringSyntaxChecker.cpp",
+    "CXXSelfAssignmentChecker.cpp",
+    "CallAndMessageChecker.cpp",
+    "CastSizeChecker.cpp",
+    "CastToStructChecker.cpp",
+    "CheckObjCDealloc.cpp",
+    "CheckObjCInstMethSignature.cpp",
+    "CheckSecuritySyntaxOnly.cpp",
+    "CheckSizeofPointer.cpp",
+    "CheckerDocumentation.cpp",
+    "ChrootChecker.cpp",
+    "CloneChecker.cpp",
+    "ConversionChecker.cpp",
+    "DeadStoresChecker.cpp",
+    "DebugCheckers.cpp",
+    "DeleteWithNonVirtualDtorChecker.cpp",
+    "DereferenceChecker.cpp",
+    "DirectIvarAssignment.cpp",
+    "DivZeroChecker.cpp",
+    "DynamicTypeChecker.cpp",
+    "DynamicTypePropagation.cpp",
+    "EnumCastOutOfRangeChecker.cpp",
+    "ExprInspectionChecker.cpp",
+    "FixedAddressChecker.cpp",
+    "GCDAntipatternChecker.cpp",
+    "GTestChecker.cpp",
+    "GenericTaintChecker.cpp",
+    "IdenticalExprChecker.cpp",
+    "InnerPointerChecker.cpp",
+    "IteratorChecker.cpp",
+    "IvarInvalidationChecker.cpp",
+    "LLVMConventionsChecker.cpp",
+    "LocalizationChecker.cpp",
+    "MPI-Checker/MPIBugReporter.cpp",
+    "MPI-Checker/MPIChecker.cpp",
+    "MPI-Checker/MPIFunctionClassifier.cpp",
+    "MacOSKeychainAPIChecker.cpp",
+    "MacOSXAPIChecker.cpp",
+    "MallocChecker.cpp",
+    "MallocOverflowSecurityChecker.cpp",
+    "MallocSizeofChecker.cpp",
+    "MmapWriteExecChecker.cpp",
+    "MoveChecker.cpp",
+    "NSAutoreleasePoolChecker.cpp",
+    "NSErrorChecker.cpp",
+    "NoReturnFunctionChecker.cpp",
+    "NonNullParamChecker.cpp",
+    "NonnullGlobalConstantsChecker.cpp",
+    "NullabilityChecker.cpp",
+    "NumberObjectConversionChecker.cpp",
+    "ObjCAtSyncChecker.cpp",
+    "ObjCAutoreleaseWriteChecker.cpp",
+    "ObjCContainersASTChecker.cpp",
+    "ObjCContainersChecker.cpp",
+    "ObjCMissingSuperCallChecker.cpp",
+    "ObjCPropertyChecker.cpp",
+    "ObjCSelfInitChecker.cpp",
+    "ObjCSuperDeallocChecker.cpp",
+    "ObjCUnusedIVarsChecker.cpp",
+    "PaddingChecker.cpp",
+    "PointerArithChecker.cpp",
+    "PointerSubChecker.cpp",
+    "PthreadLockChecker.cpp",
+    "RetainCountChecker/RetainCountChecker.cpp",
+    "RetainCountChecker/RetainCountDiagnostics.cpp",
+    "ReturnPointerRangeChecker.cpp",
+    "ReturnUndefChecker.cpp",
+    "RunLoopAutoreleaseLeakChecker.cpp",
+    "SimpleStreamChecker.cpp",
+    "StackAddrEscapeChecker.cpp",
+    "StdLibraryFunctionsChecker.cpp",
+    "StreamChecker.cpp",
+    "TaintTesterChecker.cpp",
+    "TestAfterDivZeroChecker.cpp",
+    "TraversalChecker.cpp",
+    "TrustNonnullChecker.cpp",
+    "UndefBranchChecker.cpp",
+    "UndefCapturedBlockVarChecker.cpp",
+    "UndefResultChecker.cpp",
+    "UndefinedArraySubscriptChecker.cpp",
+    "UndefinedAssignmentChecker.cpp",
+    "UninitializedObject/UninitializedObjectChecker.cpp",
+    "UninitializedObject/UninitializedPointee.cpp",
+    "UnixAPIChecker.cpp",
+    "UnreachableCodeChecker.cpp",
+    "VLASizeChecker.cpp",
+    "ValistChecker.cpp",
+    "VforkChecker.cpp",
+    "VirtualCallChecker.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/StaticAnalyzer/Core/BUILD.gn b/utils/gn/secondary/clang/lib/StaticAnalyzer/Core/BUILD.gn
new file mode 100644
index 0000000..e08a476
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/StaticAnalyzer/Core/BUILD.gn
@@ -0,0 +1,67 @@
+static_library("Core") {
+  output_name = "clangStaticAnalyzerCore"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/include/clang/Config",
+    "//clang/lib/AST",
+    "//clang/lib/ASTMatchers",
+    "//clang/lib/Analysis",
+    "//clang/lib/Basic",
+    "//clang/lib/CrossTU",
+    "//clang/lib/Lex",
+    "//clang/lib/Rewrite",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "APSIntType.cpp",
+    "AnalysisManager.cpp",
+    "AnalyzerOptions.cpp",
+    "BasicValueFactory.cpp",
+    "BlockCounter.cpp",
+    "BugReporter.cpp",
+    "BugReporterVisitors.cpp",
+    "CallEvent.cpp",
+    "Checker.cpp",
+    "CheckerContext.cpp",
+    "CheckerHelpers.cpp",
+    "CheckerManager.cpp",
+    "CommonBugCategories.cpp",
+    "ConstraintManager.cpp",
+    "CoreEngine.cpp",
+    "DynamicTypeMap.cpp",
+    "Environment.cpp",
+    "ExplodedGraph.cpp",
+    "ExprEngine.cpp",
+    "ExprEngineC.cpp",
+    "ExprEngineCXX.cpp",
+    "ExprEngineCallAndReturn.cpp",
+    "ExprEngineObjC.cpp",
+    "FunctionSummary.cpp",
+    "HTMLDiagnostics.cpp",
+    "IssueHash.cpp",
+    "LoopUnrolling.cpp",
+    "LoopWidening.cpp",
+    "MemRegion.cpp",
+    "PathDiagnostic.cpp",
+    "PlistDiagnostics.cpp",
+    "ProgramState.cpp",
+    "RangeConstraintManager.cpp",
+    "RangedConstraintManager.cpp",
+    "RegionStore.cpp",
+    "RetainSummaryManager.cpp",
+    "SValBuilder.cpp",
+    "SVals.cpp",
+    "SarifDiagnostics.cpp",
+    "SimpleConstraintManager.cpp",
+    "SimpleSValBuilder.cpp",
+    "Store.cpp",
+    "SubEngine.cpp",
+    "SymbolManager.cpp",
+    "TaintManager.cpp",
+    "WorkList.cpp",
+    "Z3ConstraintManager.cpp",
+  ]
+
+  # FIXME: clang/Config/BUILD.gn currently always sets CLANG_ANALYZER_WITH_Z3
+  # to false. If that changes we need to link to Z3 libs here.
+}
diff --git a/utils/gn/secondary/clang/lib/StaticAnalyzer/Frontend/BUILD.gn b/utils/gn/secondary/clang/lib/StaticAnalyzer/Frontend/BUILD.gn
new file mode 100644
index 0000000..e4654bf
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/StaticAnalyzer/Frontend/BUILD.gn
@@ -0,0 +1,23 @@
+static_library("Frontend") {
+  output_name = "clangStaticAnalyzerFrontend"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Analysis",
+    "//clang/lib/Basic",
+    "//clang/lib/CrossTU",
+    "//clang/lib/Frontend",
+    "//clang/lib/Lex",
+    "//clang/lib/StaticAnalyzer/Checkers",
+    "//clang/lib/StaticAnalyzer/Core",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "AnalysisConsumer.cpp",
+    "CheckerRegistration.cpp",
+    "CheckerRegistry.cpp",
+    "FrontendActions.cpp",
+    "ModelConsumer.cpp",
+    "ModelInjector.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/StaticAnalyzer/Frontend/enable.gni b/utils/gn/secondary/clang/lib/StaticAnalyzer/Frontend/enable.gni
new file mode 100644
index 0000000..75653a9
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/StaticAnalyzer/Frontend/enable.gni
@@ -0,0 +1,4 @@
+declare_args() {
+  # Whether to include the static analyzer in the clang binary.
+  clang_enable_static_analyzer = true
+}
diff --git a/utils/gn/secondary/clang/lib/Tooling/ASTDiff/BUILD.gn b/utils/gn/secondary/clang/lib/Tooling/ASTDiff/BUILD.gn
new file mode 100644
index 0000000..0058a26
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Tooling/ASTDiff/BUILD.gn
@@ -0,0 +1,13 @@
+static_library("ASTDiff") {
+  output_name = "clangToolingASTDiff"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Lex",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ASTDiff.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Tooling/BUILD.gn b/utils/gn/secondary/clang/lib/Tooling/BUILD.gn
new file mode 100644
index 0000000..a298d52
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Tooling/BUILD.gn
@@ -0,0 +1,31 @@
+static_library("Tooling") {
+  output_name = "clangTooling"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/include/clang/Driver:Options",
+    "//clang/lib/AST",
+    "//clang/lib/ASTMatchers",
+    "//clang/lib/Basic",
+    "//clang/lib/Driver",
+    "//clang/lib/Format",
+    "//clang/lib/Frontend",
+    "//clang/lib/Lex",
+    "//clang/lib/Rewrite",
+    "//clang/lib/Tooling/Core",
+  ]
+  sources = [
+    "AllTUsExecution.cpp",
+    "ArgumentsAdjusters.cpp",
+    "CommonOptionsParser.cpp",
+    "CompilationDatabase.cpp",
+    "Execution.cpp",
+    "FileMatchTrie.cpp",
+    "FixIt.cpp",
+    "InterpolatingCompilationDatabase.cpp",
+    "JSONCompilationDatabase.cpp",
+    "Refactoring.cpp",
+    "RefactoringCallbacks.cpp",
+    "StandaloneExecution.cpp",
+    "Tooling.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Tooling/Core/BUILD.gn b/utils/gn/secondary/clang/lib/Tooling/Core/BUILD.gn
new file mode 100644
index 0000000..1cc513e
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Tooling/Core/BUILD.gn
@@ -0,0 +1,16 @@
+static_library("Core") {
+  output_name = "clangToolingCore"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Lex",
+    "//clang/lib/Rewrite",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "Diagnostic.cpp",
+    "Lookup.cpp",
+    "Replacement.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Tooling/Inclusions/BUILD.gn b/utils/gn/secondary/clang/lib/Tooling/Inclusions/BUILD.gn
new file mode 100644
index 0000000..419c615d
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Tooling/Inclusions/BUILD.gn
@@ -0,0 +1,15 @@
+static_library("Inclusions") {
+  output_name = "clangToolingInclusions"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/Basic",
+    "//clang/lib/Lex",
+    "//clang/lib/Rewrite",
+    "//clang/lib/Tooling/Core",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "HeaderIncludes.cpp",
+    "IncludeStyle.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/lib/Tooling/Refactoring/BUILD.gn b/utils/gn/secondary/clang/lib/Tooling/Refactoring/BUILD.gn
new file mode 100644
index 0000000..bf57e3e
--- /dev/null
+++ b/utils/gn/secondary/clang/lib/Tooling/Refactoring/BUILD.gn
@@ -0,0 +1,29 @@
+static_library("Refactoring") {
+  output_name = "clangToolingRefactor"
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/ASTMatchers",
+    "//clang/lib/Basic",
+    "//clang/lib/Format",
+    "//clang/lib/Index",
+    "//clang/lib/Lex",
+    "//clang/lib/Rewrite",
+    "//clang/lib/Tooling/Core",
+    "//llvm/lib/Support",
+  ]
+  include_dirs = [ "." ]
+  sources = [
+    "ASTSelection.cpp",
+    "ASTSelectionRequirements.cpp",
+    "AtomicChange.cpp",
+    "Extract/Extract.cpp",
+    "Extract/SourceExtraction.cpp",
+    "RefactoringActions.cpp",
+    "Rename/RenamingAction.cpp",
+    "Rename/SymbolOccurrences.cpp",
+    "Rename/USRFinder.cpp",
+    "Rename/USRFindingAction.cpp",
+    "Rename/USRLocFinder.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/resource_dir.gni b/utils/gn/secondary/clang/resource_dir.gni
new file mode 100644
index 0000000..b5a877a
--- /dev/null
+++ b/utils/gn/secondary/clang/resource_dir.gni
@@ -0,0 +1,3 @@
+import("//llvm/version.gni")
+
+clang_resource_dir = "$root_build_dir/lib/clang/$llvm_version"
diff --git a/utils/gn/secondary/clang/test/BUILD.gn b/utils/gn/secondary/clang/test/BUILD.gn
new file mode 100644
index 0000000..681b07c
--- /dev/null
+++ b/utils/gn/secondary/clang/test/BUILD.gn
@@ -0,0 +1,186 @@
+import("//clang/lib/ARCMigrate/enable.gni")
+import("//clang/lib/StaticAnalyzer/Frontend/enable.gni")
+import("//llvm/lib/Target/targets.gni")
+import("//llvm/triples.gni")
+import("//llvm/utils/gn/build/libs/zlib/enable.gni")
+import("//llvm/utils/gn/build/write_cmake_config.gni")
+import("clang_lit_site_cfg_files.gni")
+
+template("write_lit_config") {
+  write_cmake_config(target_name) {
+    input = invoker.input
+    output = invoker.output
+    values = [
+      "LIT_SITE_CFG_IN_HEADER=## Autogenerated from $input, do not edit",
+      "CLANG_BINARY_DIR=" +
+          rebase_path(get_label_info("//clang", "target_out_dir")),
+      "CLANG_SOURCE_DIR=" + rebase_path("//clang"),
+      "ENABLE_SHARED=0",
+      "LLVM_BINARY_DIR=" +
+          rebase_path(get_label_info("//llvm", "target_out_dir")),
+      "LLVM_LIBS_DIR=",  # needed only for shared builds
+      "LLVM_SOURCE_DIR=" + rebase_path("//llvm"),
+      "LLVM_TOOLS_DIR=" + rebase_path("$root_out_dir/bin"),
+      "TARGET_TRIPLE=$llvm_target_triple",
+    ]
+    if (host_os == "win") {
+      # See comment for Windows solink in llvm/utils/gn/build/toolchain/BUILD.gn
+      values += [ "SHLIBDIR=" + rebase_path("$root_out_dir/bin") ]
+    } else {
+      values += [ "SHLIBDIR=" + rebase_path("$root_out_dir/lib") ]
+    }
+    values += invoker.extra_values
+  }
+}
+
+write_lit_config("lit_site_cfg") {
+  # Fully-qualified instead of relative for LIT_SITE_CFG_IN_HEADER.
+  input = "//clang/test/lit.site.cfg.py.in"
+  output = clang_lit_site_cfg_file
+
+  extra_values = [
+    "CLANG_ANALYZER_WITH_Z3=",  # Must be empty, not 0.
+    "CLANG_BUILD_EXAMPLES=0",
+    "CLANG_DEFAULT_CXX_STDLIB=",  # Empty string means "default value" here.
+    "CLANG_TOOLS_DIR=" + rebase_path("$root_out_dir/bin"),
+
+    # This is only used if LLVM_USE_SANITIZER includes lsan and the host
+    # OS is macOS. Since the GN build currently never uses LLVM_USE_SANITIZER,
+    # this is never read.  If it's ever needed,
+    # utils/gn/build/toolchain/BUILD.gn should get the compiler from a variable
+    # that's also read here -- but that should happen after multi-toolchain
+    # builds exist, to make sure it's a toolchain var.
+    "CMAKE_CXX_COMPILER=c++",
+    "ENABLE_BACKTRACES=1",
+    "LLVM_HOST_TRIPLE=$llvm_current_triple",
+    "LLVM_LIT_TOOLS_DIR=",  # Intentionally empty, matches cmake build.
+    "LLVM_USE_SANITIZER=",
+    "PYTHON_EXECUTABLE=$python_path",
+    "USE_Z3_SOLVER=",
+  ]
+
+  if (clang_enable_arcmt) {
+    extra_values += [ "CLANG_ENABLE_ARCMT=1" ]
+  } else {
+    extra_values += [ "CLANG_ENABLE_ARCMT=0" ]
+  }
+
+  if (clang_enable_static_analyzer) {
+    extra_values += [ "CLANG_ENABLE_STATIC_ANALYZER=1" ]
+  } else {
+    extra_values += [ "CLANG_ENABLE_STATIC_ANALYZER=0" ]
+  }
+
+  if (llvm_enable_zlib) {
+    extra_values += [ "HAVE_LIBZ=1" ]
+  } else {
+    extra_values += [ "HAVE_LIBZ=0" ]  # Must be 0.
+  }
+
+  if (host_cpu == "x64") {
+    extra_values += [ "HOST_ARCH=x86_64" ]
+  } else {
+    assert(false, "unimplemented host_cpu " + host_cpu)
+  }
+
+  if (host_os == "mac") {
+    extra_values += [ "LLVM_PLUGIN_EXT=.dylib" ]
+  } else if (host_os == "win") {
+    extra_values += [ "LLVM_PLUGIN_EXT=.dll" ]
+  } else {
+    extra_values += [ "LLVM_PLUGIN_EXT=.so" ]
+  }
+}
+
+write_lit_config("lit_unit_site_cfg") {
+  # Fully-qualified instead of relative for LIT_SITE_CFG_IN_HEADER.
+  input = "//clang/test/Unit/lit.site.cfg.py.in"
+  output = clang_lit_unit_site_cfg_file
+  extra_values = [ "LLVM_BUILD_MODE=." ]
+}
+
+# This target should contain all dependencies of check-clang.
+# //:default depends on it, so that ninja's default target builds all
+# prerequisites for check-clang but doesn't run check-clang itself.
+group("test") {
+  deps = [
+    ":lit_site_cfg",
+    ":lit_unit_site_cfg",
+    "//clang/lib/Headers",
+    "//clang/tools/c-index-test",
+    "//clang/tools/clang-diff",
+    "//clang/tools/clang-format",
+    "//clang/tools/clang-import-test",
+    "//clang/tools/clang-offload-bundler",
+    "//clang/tools/clang-refactor",
+    "//clang/tools/clang-rename",
+    "//clang/tools/diagtool",
+    "//clang/tools/driver:symlinks",
+    "//clang/unittests",
+    "//clang/utils/TableGen:clang-tblgen",
+    "//clang/utils/hmaptool",
+    "//llvm/tools/llc",
+    "//llvm/tools/llvm-bcanalyzer",
+    "//llvm/tools/llvm-cat",
+    "//llvm/tools/llvm-config",
+    "//llvm/tools/llvm-dis",
+    "//llvm/tools/llvm-lto",
+    "//llvm/tools/llvm-lto2",
+    "//llvm/tools/llvm-modextract",
+    "//llvm/tools/llvm-nm:symlinks",
+    "//llvm/tools/llvm-objdump:symlinks",
+    "//llvm/tools/llvm-profdata",
+    "//llvm/tools/llvm-readobj:symlinks",
+    "//llvm/tools/llvm-symbolizer:symlinks",
+    "//llvm/tools/opt",
+    "//llvm/utils/FileCheck",
+    "//llvm/utils/count",
+    "//llvm/utils/llvm-lit",
+    "//llvm/utils/not",
+  ]
+  if (clang_enable_arcmt) {
+    deps += [
+      "//clang/tools/arcmt-test",
+      "//clang/tools/c-arcmt-test",
+    ]
+  }
+  if (clang_enable_static_analyzer) {
+    deps += [
+      "//clang/tools/clang-check",
+      "//clang/tools/clang-extdef-mapping",
+    ]
+  }
+
+  # FIXME: clang_build_examples
+  testonly = true
+}
+
+action("check-clang") {
+  script = "$root_out_dir/bin/llvm-lit"
+  if (host_os == "win") {
+    script += ".py"
+  }
+  args = [
+    "-sv",
+    "--param",
+    "clang_site_config=" + rebase_path(clang_lit_site_cfg_file, root_out_dir),
+    "--param",
+    "clang_unit_site_config=" +
+        rebase_path(clang_lit_unit_site_cfg_file, root_out_dir),
+    rebase_path(".", root_out_dir),
+  ]
+  outputs = [
+    "$target_gen_dir/run-lit",  # Non-existing, so that ninja runs it each time.
+  ]
+
+  # Since check-clang is always dirty, //:default doesn't depend on it so that
+  # it's not part of the default ninja target.  Hence, check-clang shouldn't
+  # have any deps except :test. so that the default target is sure to build
+  # all the deps.
+  deps = [
+    ":test",
+  ]
+  testonly = true
+
+  pool = "//:console"
+}
diff --git a/utils/gn/secondary/clang/test/clang_lit_site_cfg_files.gni b/utils/gn/secondary/clang/test/clang_lit_site_cfg_files.gni
new file mode 100644
index 0000000..f4079a6
--- /dev/null
+++ b/utils/gn/secondary/clang/test/clang_lit_site_cfg_files.gni
@@ -0,0 +1,2 @@
+clang_lit_site_cfg_file = "$root_gen_dir/clang/test/lit.site.cfg.py"
+clang_lit_unit_site_cfg_file = "$root_gen_dir/clang/test/Unit/lit.site.cfg.py"
diff --git a/utils/gn/secondary/clang/tools/arcmt-test/BUILD.gn b/utils/gn/secondary/clang/tools/arcmt-test/BUILD.gn
new file mode 100644
index 0000000..c16ce2a
--- /dev/null
+++ b/utils/gn/secondary/clang/tools/arcmt-test/BUILD.gn
@@ -0,0 +1,13 @@
+executable("arcmt-test") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/ARCMigrate",
+    "//clang/lib/Basic",
+    "//clang/lib/Frontend",
+    "//clang/lib/Lex",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "arcmt-test.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/tools/c-arcmt-test/BUILD.gn b/utils/gn/secondary/clang/tools/c-arcmt-test/BUILD.gn
new file mode 100644
index 0000000..2c77f7a
--- /dev/null
+++ b/utils/gn/secondary/clang/tools/c-arcmt-test/BUILD.gn
@@ -0,0 +1,15 @@
+executable("c-arcmt-test") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/tools/libclang",
+  ]
+  sources = [
+    "c-arcmt-test.c",
+  ]
+
+  # See comment at top of clang/tools/libclang/BUILD.gn for why this isn't
+  # needed on Linux.
+  if (host_os == "mac") {
+    ldflags = [ "-Wl,-rpath,@loader_path/../lib" ]
+  }
+}
diff --git a/utils/gn/secondary/clang/tools/c-index-test/BUILD.gn b/utils/gn/secondary/clang/tools/c-index-test/BUILD.gn
new file mode 100644
index 0000000..c168b6d
--- /dev/null
+++ b/utils/gn/secondary/clang/tools/c-index-test/BUILD.gn
@@ -0,0 +1,28 @@
+executable("c-index-test") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/include/clang/Config",
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/CodeGen",
+    "//clang/lib/Frontend",
+    "//clang/lib/Index",
+    "//clang/lib/Serialization",
+    "//clang/tools/libclang",
+    "//llvm/lib/Support",
+    "//llvm/utils/gn/build/libs/xml",
+  ]
+  if (host_os != "win") {
+    cflags_c = [ "-std=gnu89" ]
+  }
+  sources = [
+    "c-index-test.c",
+    "core_main.cpp",
+  ]
+
+  # See comment at top of clang/tools/libclang/BUILD.gn for why this isn't
+  # needed on Linux.
+  if (host_os == "mac") {
+    ldflags = [ "-Wl,-rpath,@loader_path/../lib" ]
+  }
+}
diff --git a/utils/gn/secondary/clang/tools/clang-check/BUILD.gn b/utils/gn/secondary/clang/tools/clang-check/BUILD.gn
new file mode 100644
index 0000000..3d18b14
--- /dev/null
+++ b/utils/gn/secondary/clang/tools/clang-check/BUILD.gn
@@ -0,0 +1,18 @@
+executable("clang-check") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Driver",
+    "//clang/lib/Frontend",
+    "//clang/lib/Frontend/Rewrite",
+    "//clang/lib/StaticAnalyzer/Frontend",
+    "//clang/lib/Tooling",
+    "//llvm/lib/Option",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "ClangCheck.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/tools/clang-diff/BUILD.gn b/utils/gn/secondary/clang/tools/clang-diff/BUILD.gn
new file mode 100644
index 0000000..c68cd29
--- /dev/null
+++ b/utils/gn/secondary/clang/tools/clang-diff/BUILD.gn
@@ -0,0 +1,13 @@
+executable("clang-diff") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/Basic",
+    "//clang/lib/Frontend",
+    "//clang/lib/Tooling",
+    "//clang/lib/Tooling/ASTDiff",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ClangDiff.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/tools/clang-extdef-mapping/BUILD.gn b/utils/gn/secondary/clang/tools/clang-extdef-mapping/BUILD.gn
new file mode 100644
index 0000000..68b3af4
--- /dev/null
+++ b/utils/gn/secondary/clang/tools/clang-extdef-mapping/BUILD.gn
@@ -0,0 +1,17 @@
+executable("clang-extdef-mapping") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/CrossTU",
+    "//clang/lib/Frontend",
+    "//clang/lib/Index",
+    "//clang/lib/Tooling",
+    "//llvm/lib/AsmParser",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ClangExtDefMapGen.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/tools/clang-format/BUILD.gn b/utils/gn/secondary/clang/tools/clang-format/BUILD.gn
new file mode 100644
index 0000000..7352ae1
--- /dev/null
+++ b/utils/gn/secondary/clang/tools/clang-format/BUILD.gn
@@ -0,0 +1,13 @@
+executable("clang-format") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/Basic",
+    "//clang/lib/Format",
+    "//clang/lib/Rewrite",
+    "//clang/lib/Tooling/Core",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ClangFormat.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/tools/clang-import-test/BUILD.gn b/utils/gn/secondary/clang/tools/clang-import-test/BUILD.gn
new file mode 100644
index 0000000..60a8a66
--- /dev/null
+++ b/utils/gn/secondary/clang/tools/clang-import-test/BUILD.gn
@@ -0,0 +1,17 @@
+executable("clang-import-test") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/CodeGen",
+    "//clang/lib/Driver",
+    "//clang/lib/Frontend",
+    "//clang/lib/Lex",
+    "//clang/lib/Parse",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "clang-import-test.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/tools/clang-offload-bundler/BUILD.gn b/utils/gn/secondary/clang/tools/clang-offload-bundler/BUILD.gn
new file mode 100644
index 0000000..4335087
--- /dev/null
+++ b/utils/gn/secondary/clang/tools/clang-offload-bundler/BUILD.gn
@@ -0,0 +1,13 @@
+executable("clang-offload-bundler") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/Basic",
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/IR",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ClangOffloadBundler.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/tools/clang-refactor/BUILD.gn b/utils/gn/secondary/clang/tools/clang-refactor/BUILD.gn
new file mode 100644
index 0000000..4942aeb
--- /dev/null
+++ b/utils/gn/secondary/clang/tools/clang-refactor/BUILD.gn
@@ -0,0 +1,19 @@
+executable("clang-refactor") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Format",
+    "//clang/lib/Frontend",
+    "//clang/lib/Lex",
+    "//clang/lib/Rewrite",
+    "//clang/lib/Tooling",
+    "//clang/lib/Tooling/Refactoring",
+    "//llvm/lib/Option",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ClangRefactor.cpp",
+    "TestSupport.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/tools/clang-rename/BUILD.gn b/utils/gn/secondary/clang/tools/clang-rename/BUILD.gn
new file mode 100644
index 0000000..ad08ec1
--- /dev/null
+++ b/utils/gn/secondary/clang/tools/clang-rename/BUILD.gn
@@ -0,0 +1,16 @@
+executable("clang-rename") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/Basic",
+    "//clang/lib/Frontend",
+    "//clang/lib/Rewrite",
+    "//clang/lib/Tooling",
+    "//clang/lib/Tooling/Core",
+    "//clang/lib/Tooling/Refactoring",
+    "//llvm/lib/Option",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ClangRename.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/tools/diagtool/BUILD.gn b/utils/gn/secondary/clang/tools/diagtool/BUILD.gn
new file mode 100644
index 0000000..52924d7
--- /dev/null
+++ b/utils/gn/secondary/clang/tools/diagtool/BUILD.gn
@@ -0,0 +1,18 @@
+executable("diagtool") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/include/clang/Basic:DiagnosticIndexName",
+    "//clang/lib/Basic",
+    "//clang/lib/Frontend",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "DiagTool.cpp",
+    "DiagnosticNames.cpp",
+    "FindDiagnosticID.cpp",
+    "ListWarnings.cpp",
+    "ShowEnabledWarnings.cpp",
+    "TreeView.cpp",
+    "diagtool_main.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/tools/driver/BUILD.gn b/utils/gn/secondary/clang/tools/driver/BUILD.gn
new file mode 100644
index 0000000..7324e04
--- /dev/null
+++ b/utils/gn/secondary/clang/tools/driver/BUILD.gn
@@ -0,0 +1,91 @@
+import("//llvm/utils/gn/build/symlink_or_copy.gni")
+import("//llvm/utils/gn/build/write_cmake_config.gni")
+import("//llvm/version.gni")
+
+symlinks = [
+  # target_name, symlink_target pairs: GN doesn't support '+' in rule names.
+  [
+    "clangxx",
+    "clang++",
+  ],
+  [
+    "clang-cl",
+    "clang-cl",
+  ],
+  [
+    "clang-cpp",
+    "clang-cpp",
+  ],
+]
+foreach(target, symlinks) {
+  symlink_or_copy(target[0]) {
+    deps = [
+      ":clang",
+    ]
+    source = "clang"
+    output = "$root_out_dir/bin/${target[1]}"
+  }
+}
+
+# //:clang depends on this symlink target, see comment in //BUILD.gn.
+group("symlinks") {
+  deps = []
+  foreach(target, symlinks) {
+    deps += [ ":${target[0]}" ]
+  }
+}
+
+if (host_os == "mac") {
+  write_cmake_config("write_info_plist") {
+    input = "Info.plist.in"
+    output = "$target_gen_dir/Info.plist"
+    values = [
+      "TOOL_INFO_BUILD_VERSION=$llvm_version_major.$llvm_version_minor",
+      "TOOL_INFO_NAME=clang",
+      "TOOL_INFO_UTI=org.llvm.clang",
+      "TOOL_INFO_VERSION=$llvm_version",
+    ]
+  }
+}
+
+executable("clang") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/include/clang/Config",
+    "//clang/lib/Basic",
+    "//clang/lib/CodeGen",
+    "//clang/lib/Driver",
+    "//clang/lib/Frontend",
+    "//clang/lib/FrontendTool",
+    "//clang/lib/Headers",
+    "//clang/tools/clang-offload-bundler",
+    "//llvm/include/llvm/Config:llvm-config",
+    "//llvm/lib/Analysis",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Option",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:TargetsToBuild",
+    "//llvm/lib/Transforms/IPO",
+    "//llvm/lib/Transforms/InstCombine",
+    "//llvm/lib/Transforms/Instrumentation",
+    "//llvm/lib/Transforms/ObjCARC",
+    "//llvm/lib/Transforms/Scalar",
+    "//llvm/lib/Transforms/Utils",
+    "//llvm/lib/Transforms/Vectorize",
+  ]
+  if (host_os == "mac") {
+    deps += [ ":write_info_plist" ]
+    plist = get_target_outputs(":write_info_plist")
+    ldflags = [ "-Wl,-sectcreate,__TEXT,__info_plist," +
+                rebase_path(plist[0], root_out_dir) ]
+  }
+  sources = [
+    "cc1_main.cpp",
+    "cc1as_main.cpp",
+    "cc1gen_reproducer_main.cpp",
+    "driver.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/tools/libclang/BUILD.gn b/utils/gn/secondary/clang/tools/libclang/BUILD.gn
new file mode 100644
index 0000000..c2fad99
--- /dev/null
+++ b/utils/gn/secondary/clang/tools/libclang/BUILD.gn
@@ -0,0 +1,89 @@
+import("//clang/lib/ARCMigrate/enable.gni")
+import("//llvm/version.gni")
+
+# This build file is just enough to get check-clang to pass, it's missing
+# several things from the CMake build:
+# - linking in clangTidyPlugin and clangIncludeFixerPlugin from
+#   clang-tools-extra (which doesn't have any GN build files yet)
+# - using libclang.exports
+# - a build target copying the Python bindings
+# - the GN linux build always builds without -fPIC (as if LLVM_ENABLE_PIC=OFF
+#   in the CMake build), so libclang is always a static library on linux
+# - the GN build doesn't have LIBCLANG_BUILD_STATIC
+
+libclang_target_type = "shared_library"
+if (host_os == "linux") {
+  # Linux needs -fPIC to build shared libs but they aren't on by default.
+  # For now, make libclang a static lib there.
+  libclang_target_type = "static_library"
+}
+
+target(libclang_target_type, "libclang") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/include/clang/Config",
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Frontend",
+    "//clang/lib/Headers",
+    "//clang/lib/Index",
+    "//clang/lib/Lex",
+    "//clang/lib/Sema",
+    "//clang/lib/Tooling",
+    "//llvm/include/llvm/Config:llvm-config",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  if (clang_enable_arcmt) {
+    deps += [ "//clang/lib/ARCMigrate" ]
+  }
+
+  if (host_os == "win") {
+    defines = [ "_CINDEX_LIB_" ]
+  }
+
+  sources = [
+    "ARCMigrate.cpp",
+    "BuildSystem.cpp",
+    "CIndex.cpp",
+    "CIndexCXX.cpp",
+    "CIndexCodeCompletion.cpp",
+    "CIndexDiagnostic.cpp",
+    "CIndexDiagnostic.h",
+    "CIndexHigh.cpp",
+    "CIndexInclusionStack.cpp",
+    "CIndexUSRs.cpp",
+    "CIndexer.cpp",
+    "CIndexer.h",
+    "CXComment.cpp",
+    "CXCompilationDatabase.cpp",
+    "CXCursor.cpp",
+    "CXCursor.h",
+    "CXIndexDataConsumer.cpp",
+    "CXLoadedDiagnostic.cpp",
+    "CXLoadedDiagnostic.h",
+    "CXSourceLocation.cpp",
+    "CXSourceLocation.h",
+    "CXStoredDiagnostic.cpp",
+    "CXString.cpp",
+    "CXString.h",
+    "CXTranslationUnit.h",
+    "CXType.cpp",
+    "CXType.h",
+    "Index_Internal.h",
+    "Indexing.cpp",
+  ]
+  if (host_os == "mac") {
+    ldflags = [
+      "-Wl,-compatibility_version,1",
+      "-Wl,-current_version,$llvm_version",
+
+      # See llvm_setup_rpath() in CMake.
+      "-Wl,-install_name,@rpath/libclang.dylib",
+      "-Wl,-rpath,@loader_path/../lib",
+    ]
+  }
+
+  # FIXME: Use libclang.exports
+}
diff --git a/utils/gn/secondary/clang/unittests/AST/BUILD.gn b/utils/gn/secondary/clang/unittests/AST/BUILD.gn
new file mode 100644
index 0000000..d5b00af
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/AST/BUILD.gn
@@ -0,0 +1,33 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("ASTTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/ASTMatchers",
+    "//clang/lib/Analysis",
+    "//clang/lib/Basic",
+    "//clang/lib/Frontend",
+    "//clang/lib/Tooling",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ASTContextParentMapTest.cpp",
+    "ASTImporterTest.cpp",
+    "ASTTypeTraitsTest.cpp",
+    "ASTVectorTest.cpp",
+    "CommentLexer.cpp",
+    "CommentParser.cpp",
+    "CommentTextTest.cpp",
+    "DataCollectionTest.cpp",
+    "DeclPrinterTest.cpp",
+    "DeclTest.cpp",
+    "EvaluateAsRValueTest.cpp",
+    "ExternalASTSourceTest.cpp",
+    "Language.cpp",
+    "NamedDeclPrinterTest.cpp",
+    "SourceLocationTest.cpp",
+    "StmtPrinterTest.cpp",
+    "StructuralEquivalenceTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/ASTMatchers/BUILD.gn b/utils/gn/secondary/clang/unittests/ASTMatchers/BUILD.gn
new file mode 100644
index 0000000..241d03d
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/ASTMatchers/BUILD.gn
@@ -0,0 +1,19 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("ASTMatchersTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/ASTMatchers",
+    "//clang/lib/Basic",
+    "//clang/lib/Frontend",
+    "//clang/lib/Tooling",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ASTMatchersInternalTest.cpp",
+    "ASTMatchersNarrowingTest.cpp",
+    "ASTMatchersNodeTest.cpp",
+    "ASTMatchersTraversalTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/ASTMatchers/Dynamic/BUILD.gn b/utils/gn/secondary/clang/unittests/ASTMatchers/Dynamic/BUILD.gn
new file mode 100644
index 0000000..de89f0f
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/ASTMatchers/Dynamic/BUILD.gn
@@ -0,0 +1,19 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("DynamicASTMatchersTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/ASTMatchers",
+    "//clang/lib/ASTMatchers/Dynamic",
+    "//clang/lib/Basic",
+    "//clang/lib/Frontend",
+    "//clang/lib/Tooling",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ParserTest.cpp",
+    "RegistryTest.cpp",
+    "VariantValueTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/Analysis/BUILD.gn b/utils/gn/secondary/clang/unittests/Analysis/BUILD.gn
new file mode 100644
index 0000000..c6c6fbe
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/Analysis/BUILD.gn
@@ -0,0 +1,19 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("ClangAnalysisTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/ASTMatchers",
+    "//clang/lib/Analysis",
+    "//clang/lib/Basic",
+    "//clang/lib/Frontend",
+    "//clang/lib/Tooling",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "CFGTest.cpp",
+    "CloneDetectionTest.cpp",
+    "ExprMutationAnalyzerTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/BUILD.gn b/utils/gn/secondary/clang/unittests/BUILD.gn
new file mode 100644
index 0000000..a8293ba
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/BUILD.gn
@@ -0,0 +1,35 @@
+import("//clang/lib/StaticAnalyzer/Frontend/enable.gni")
+
+group("unittests") {
+  deps = [
+    "AST:ASTTests",
+    "ASTMatchers:ASTMatchersTests",
+    "ASTMatchers/Dynamic:DynamicASTMatchersTests",
+    "Basic:BasicTests",
+    "CodeGen:ClangCodeGenTests",
+    "CrossTU:CrossTUTests",
+    "Driver:ClangDriverTests",
+    "Format:FormatTests",
+    "Index:IndexTests",
+    "Lex:LexTests",
+    "Rename:ClangRenameTests",
+    "Rewrite:RewriteTests",
+    "Sema:SemaTests",
+    "Tooling:ToolingTests",
+  ]
+  if (clang_enable_static_analyzer) {
+    deps += [
+      "Analysis:ClangAnalysisTests",
+      "Frontend:FrontendTests",
+      "StaticAnalyzer:StaticAnalysisTests",
+    ]
+  }
+  if (host_os != "win") {
+    # FIXME: libclang unit tests are disabled on Windows due
+    # to failures, mostly in libclang.VirtualFileOverlay_*.
+    # FIXME: Also, the executable can't find libclang.dll since that's
+    # in a different directory.
+    deps += [ "libclang:libclangTests" ]
+  }
+  testonly = true
+}
diff --git a/utils/gn/secondary/clang/unittests/Basic/BUILD.gn b/utils/gn/secondary/clang/unittests/Basic/BUILD.gn
new file mode 100644
index 0000000..31277ae
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/Basic/BUILD.gn
@@ -0,0 +1,18 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("BasicTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/Basic",
+    "//clang/lib/Lex",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "CharInfoTest.cpp",
+    "DiagnosticTest.cpp",
+    "FileManagerTest.cpp",
+    "FixedPointTest.cpp",
+    "MemoryBufferCacheTest.cpp",
+    "SourceManagerTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/CodeGen/BUILD.gn b/utils/gn/secondary/clang/unittests/CodeGen/BUILD.gn
new file mode 100644
index 0000000..37b514e
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/CodeGen/BUILD.gn
@@ -0,0 +1,21 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("ClangCodeGenTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/CodeGen",
+    "//clang/lib/Frontend",
+    "//clang/lib/Lex",
+    "//clang/lib/Parse",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "BufferSourceTest.cpp",
+    "CodeGenExternalTest.cpp",
+    "IncrementalProcessingTest.cpp",
+    "TBAAMetadataTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/CrossTU/BUILD.gn b/utils/gn/secondary/clang/unittests/CrossTU/BUILD.gn
new file mode 100644
index 0000000..6330f30
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/CrossTU/BUILD.gn
@@ -0,0 +1,17 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("CrossTUTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/CrossTU",
+    "//clang/lib/Frontend",
+    "//clang/lib/Tooling",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "CrossTranslationUnitTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/Driver/BUILD.gn b/utils/gn/secondary/clang/unittests/Driver/BUILD.gn
new file mode 100644
index 0000000..6a91b03
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/Driver/BUILD.gn
@@ -0,0 +1,18 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("ClangDriverTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/Basic",
+    "//clang/lib/Driver",
+    "//llvm/lib/Option",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "DistroTest.cpp",
+    "ModuleCacheTest.cpp",
+    "MultilibTest.cpp",
+    "ToolChainTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/Format/BUILD.gn b/utils/gn/secondary/clang/unittests/Format/BUILD.gn
new file mode 100644
index 0000000..aaf990b
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/Format/BUILD.gn
@@ -0,0 +1,31 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("FormatTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/Basic",
+    "//clang/lib/Format",
+    "//clang/lib/Frontend",
+    "//clang/lib/Rewrite",
+    "//clang/lib/Tooling/Core",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "CleanupTest.cpp",
+    "FormatTest.cpp",
+    "FormatTestComments.cpp",
+    "FormatTestJS.cpp",
+    "FormatTestJava.cpp",
+    "FormatTestObjC.cpp",
+    "FormatTestProto.cpp",
+    "FormatTestRawStrings.cpp",
+    "FormatTestSelective.cpp",
+    "FormatTestTableGen.cpp",
+    "FormatTestTextProto.cpp",
+    "NamespaceEndCommentsFixerTest.cpp",
+    "SortImportsTestJS.cpp",
+    "SortImportsTestJava.cpp",
+    "SortIncludesTest.cpp",
+    "UsingDeclarationsSorterTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/Frontend/BUILD.gn b/utils/gn/secondary/clang/unittests/Frontend/BUILD.gn
new file mode 100644
index 0000000..f3b535c
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/Frontend/BUILD.gn
@@ -0,0 +1,25 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("FrontendTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/CodeGen",
+    "//clang/lib/Frontend",
+    "//clang/lib/FrontendTool",
+    "//clang/lib/Lex",
+    "//clang/lib/Sema",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ASTUnitTest.cpp",
+    "CodeGenActionTest.cpp",
+    "CompilerInstanceTest.cpp",
+    "FixedPointString.cpp",
+    "FrontendActionTest.cpp",
+    "OutputStreamTest.cpp",
+    "PCHPreambleTest.cpp",
+    "ParsedSourceLocationTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/Index/BUILD.gn b/utils/gn/secondary/clang/unittests/Index/BUILD.gn
new file mode 100644
index 0000000..c2e6395
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/Index/BUILD.gn
@@ -0,0 +1,18 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("IndexTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Frontend",
+    "//clang/lib/Index",
+    "//clang/lib/Lex",
+    "//clang/lib/Serialization",
+    "//clang/lib/Tooling",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "IndexTests.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/Lex/BUILD.gn b/utils/gn/secondary/clang/unittests/Lex/BUILD.gn
new file mode 100644
index 0000000..63180cb
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/Lex/BUILD.gn
@@ -0,0 +1,20 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("LexTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Lex",
+    "//clang/lib/Parse",
+    "//clang/lib/Sema",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "HeaderMapTest.cpp",
+    "HeaderSearchTest.cpp",
+    "LexerTest.cpp",
+    "PPCallbacksTest.cpp",
+    "PPConditionalDirectiveRecordTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/Rename/BUILD.gn b/utils/gn/secondary/clang/unittests/Rename/BUILD.gn
new file mode 100644
index 0000000..54c0dba
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/Rename/BUILD.gn
@@ -0,0 +1,28 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("ClangRenameTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+
+  # We'd like clang/unittests/Tooling/RewriterTestContext.h in the test.
+  include_dirs = [ "../.." ]
+
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/ASTMatchers",
+    "//clang/lib/Basic",
+    "//clang/lib/Format",
+    "//clang/lib/Frontend",
+    "//clang/lib/Rewrite",
+    "//clang/lib/Tooling",
+    "//clang/lib/Tooling/Core",
+    "//clang/lib/Tooling/Refactoring",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "RenameAliasTest.cpp",
+    "RenameClassTest.cpp",
+    "RenameEnumTest.cpp",
+    "RenameFunctionTest.cpp",
+    "RenameMemberTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/Rewrite/BUILD.gn b/utils/gn/secondary/clang/unittests/Rewrite/BUILD.gn
new file mode 100644
index 0000000..350f67d
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/Rewrite/BUILD.gn
@@ -0,0 +1,12 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("RewriteTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/Rewrite",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "RewriteBufferTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/Sema/BUILD.gn b/utils/gn/secondary/clang/unittests/Sema/BUILD.gn
new file mode 100644
index 0000000..b65865d
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/Sema/BUILD.gn
@@ -0,0 +1,18 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("SemaTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/Basic",
+    "//clang/lib/Frontend",
+    "//clang/lib/Parse",
+    "//clang/lib/Sema",
+    "//clang/lib/Tooling",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "CodeCompleteTest.cpp",
+    "ExternalSemaSourceTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/StaticAnalyzer/BUILD.gn b/utils/gn/secondary/clang/unittests/StaticAnalyzer/BUILD.gn
new file mode 100644
index 0000000..4263853
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/StaticAnalyzer/BUILD.gn
@@ -0,0 +1,17 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("StaticAnalysisTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/lib/Analysis",
+    "//clang/lib/Basic",
+    "//clang/lib/StaticAnalyzer/Core",
+    "//clang/lib/StaticAnalyzer/Frontend",
+    "//clang/lib/Tooling",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "AnalyzerOptionsTest.cpp",
+    "RegisterCustomCheckersTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/Tooling/BUILD.gn b/utils/gn/secondary/clang/unittests/Tooling/BUILD.gn
new file mode 100644
index 0000000..5b027dc
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/Tooling/BUILD.gn
@@ -0,0 +1,61 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("ToolingTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  include_dirs = [ "." ]
+  deps = [
+    "//clang/lib/AST",
+    "//clang/lib/ASTMatchers",
+    "//clang/lib/Basic",
+    "//clang/lib/Format",
+    "//clang/lib/Frontend",
+    "//clang/lib/Lex",
+    "//clang/lib/Rewrite",
+    "//clang/lib/Tooling",
+    "//clang/lib/Tooling/Core",
+    "//clang/lib/Tooling/Refactoring",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "ASTSelectionTest.cpp",
+    "CastExprTest.cpp",
+    "CommentHandlerTest.cpp",
+    "CompilationDatabaseTest.cpp",
+    "DiagnosticsYamlTest.cpp",
+    "ExecutionTest.cpp",
+    "FixItTest.cpp",
+    "HeaderIncludesTest.cpp",
+    "LexicallyOrderedRecursiveASTVisitorTest.cpp",
+    "LookupTest.cpp",
+    "QualTypeNamesTest.cpp",
+    "RecursiveASTVisitorTestDeclVisitor.cpp",
+    "RecursiveASTVisitorTestPostOrderVisitor.cpp",
+    "RecursiveASTVisitorTestTypeLocVisitor.cpp",
+    "RecursiveASTVisitorTests/Attr.cpp",
+    "RecursiveASTVisitorTests/CXXBoolLiteralExpr.cpp",
+    "RecursiveASTVisitorTests/CXXMemberCall.cpp",
+    "RecursiveASTVisitorTests/CXXOperatorCallExprTraverser.cpp",
+    "RecursiveASTVisitorTests/Class.cpp",
+    "RecursiveASTVisitorTests/ConstructExpr.cpp",
+    "RecursiveASTVisitorTests/DeclRefExpr.cpp",
+    "RecursiveASTVisitorTests/ImplicitCtor.cpp",
+    "RecursiveASTVisitorTests/InitListExprPostOrder.cpp",
+    "RecursiveASTVisitorTests/InitListExprPostOrderNoQueue.cpp",
+    "RecursiveASTVisitorTests/InitListExprPreOrder.cpp",
+    "RecursiveASTVisitorTests/InitListExprPreOrderNoQueue.cpp",
+    "RecursiveASTVisitorTests/IntegerLiteral.cpp",
+    "RecursiveASTVisitorTests/LambdaDefaultCapture.cpp",
+    "RecursiveASTVisitorTests/LambdaExpr.cpp",
+    "RecursiveASTVisitorTests/NestedNameSpecifiers.cpp",
+    "RecursiveASTVisitorTests/ParenExpr.cpp",
+    "RecursiveASTVisitorTests/TemplateArgumentLocTraverser.cpp",
+    "RecursiveASTVisitorTests/TraversalScope.cpp",
+    "RefactoringActionRulesTest.cpp",
+    "RefactoringCallbacksTest.cpp",
+    "RefactoringTest.cpp",
+    "ReplacementsYamlTest.cpp",
+    "RewriterTest.cpp",
+    "ToolingTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/unittests/libclang/BUILD.gn b/utils/gn/secondary/clang/unittests/libclang/BUILD.gn
new file mode 100644
index 0000000..cd99640
--- /dev/null
+++ b/utils/gn/secondary/clang/unittests/libclang/BUILD.gn
@@ -0,0 +1,14 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("libclangTests") {
+  configs += [ "//llvm/utils/gn/build:clang_code" ]
+  deps = [
+    "//clang/tools/libclang",
+  ]
+  sources = [
+    "LibclangTest.cpp",
+  ]
+  if (host_os == "mac") {
+    ldflags = [ "-Wl,-rpath," + rebase_path("$root_out_dir/lib") ]
+  }
+}
diff --git a/utils/gn/secondary/clang/utils/TableGen/BUILD.gn b/utils/gn/secondary/clang/utils/TableGen/BUILD.gn
new file mode 100644
index 0000000..acfccca
--- /dev/null
+++ b/utils/gn/secondary/clang/utils/TableGen/BUILD.gn
@@ -0,0 +1,19 @@
+executable("clang-tblgen") {
+  deps = [
+    "//llvm/lib/Support",
+    "//llvm/lib/TableGen",
+  ]
+  sources = [
+    "ClangASTNodesEmitter.cpp",
+    "ClangAttrEmitter.cpp",
+    "ClangCommentCommandInfoEmitter.cpp",
+    "ClangCommentHTMLNamedCharacterReferenceEmitter.cpp",
+    "ClangCommentHTMLTagsEmitter.cpp",
+    "ClangDataCollectorsEmitter.cpp",
+    "ClangDiagnosticsEmitter.cpp",
+    "ClangOptionDocEmitter.cpp",
+    "ClangSACheckersEmitter.cpp",
+    "NeonEmitter.cpp",
+    "TableGen.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/clang/utils/TableGen/clang_tablegen.gni b/utils/gn/secondary/clang/utils/TableGen/clang_tablegen.gni
new file mode 100644
index 0000000..efc1e3c
--- /dev/null
+++ b/utils/gn/secondary/clang/utils/TableGen/clang_tablegen.gni
@@ -0,0 +1,40 @@
+# This file introduces a templates for running clang-tblgen.
+#
+# Parameters:
+#
+#   args (required)
+#       [list of strings] Flags to pass to llvm-tblgen.
+#
+#   output_name (optional)
+#       Basename of the generated output file.
+#       Defaults to target name with ".inc" appended.
+#
+#   td_file (optional)
+#       The .td file to pass to llvm-tblgen.
+#       Defaults to target name with ".td" appended.
+#
+#   visibility (optional)
+#       GN's regular visibility attribute, see `gn help visibility`.
+#
+# Example of usage:
+#
+#   clang_tablegen("DiagnosticGroups") {
+#     args = [ "-gen-clang-diag-groups" ]
+#     td_file = "Diagnostic.td"
+#   }
+
+import("//llvm/utils/TableGen/tablegen.gni")
+
+template("clang_tablegen") {
+  tablegen(target_name) {
+    forward_variables_from(invoker,
+                           [
+                             "args",
+                             "output_name",
+                             "td_file",
+                             "visibility",
+                           ])
+
+    tblgen_target = "//clang/utils/TableGen:clang-tblgen"
+  }
+}
diff --git a/utils/gn/secondary/clang/utils/hmaptool/BUILD.gn b/utils/gn/secondary/clang/utils/hmaptool/BUILD.gn
new file mode 100644
index 0000000..f76db22
--- /dev/null
+++ b/utils/gn/secondary/clang/utils/hmaptool/BUILD.gn
@@ -0,0 +1,8 @@
+copy("hmaptool") {
+  sources = [
+    "hmaptool",
+  ]
+  outputs = [
+    "$root_out_dir/bin/{{source_file_part}}",
+  ]
+}
diff --git a/utils/gn/secondary/compiler-rt/include/BUILD.gn b/utils/gn/secondary/compiler-rt/include/BUILD.gn
new file mode 100644
index 0000000..39aac9f
--- /dev/null
+++ b/utils/gn/secondary/compiler-rt/include/BUILD.gn
@@ -0,0 +1,26 @@
+import("//clang/resource_dir.gni")
+
+copy("include") {
+  sources = [
+    "sanitizer/allocator_interface.h",
+    "sanitizer/asan_interface.h",
+    "sanitizer/common_interface_defs.h",
+    "sanitizer/coverage_interface.h",
+    "sanitizer/dfsan_interface.h",
+    "sanitizer/esan_interface.h",
+    "sanitizer/hwasan_interface.h",
+    "sanitizer/linux_syscall_hooks.h",
+    "sanitizer/lsan_interface.h",
+    "sanitizer/msan_interface.h",
+    "sanitizer/netbsd_syscall_hooks.h",
+    "sanitizer/scudo_interface.h",
+    "sanitizer/tsan_interface.h",
+    "sanitizer/tsan_interface_atomic.h",
+    "xray/xray_interface.h",
+    "xray/xray_log_interface.h",
+    "xray/xray_records.h",
+  ]
+  outputs = [
+    "$clang_resource_dir/include/{{source_target_relative}}",
+  ]
+}
diff --git a/utils/gn/secondary/compiler-rt/lib/cfi/BUILD.gn b/utils/gn/secondary/compiler-rt/lib/cfi/BUILD.gn
new file mode 100644
index 0000000..dd24b5b
--- /dev/null
+++ b/utils/gn/secondary/compiler-rt/lib/cfi/BUILD.gn
@@ -0,0 +1,10 @@
+import("//clang/resource_dir.gni")
+
+copy("blacklist") {
+  sources = [
+    "cfi_blacklist.txt",
+  ]
+  outputs = [
+    "$clang_resource_dir/share/{{source_target_relative}}",
+  ]
+}
diff --git a/utils/gn/secondary/compiler-rt/lib/hwasan/BUILD.gn b/utils/gn/secondary/compiler-rt/lib/hwasan/BUILD.gn
new file mode 100644
index 0000000..81b6f60
--- /dev/null
+++ b/utils/gn/secondary/compiler-rt/lib/hwasan/BUILD.gn
@@ -0,0 +1,102 @@
+import("//compiler-rt/target.gni")
+
+action("version_script") {
+  script = "//compiler-rt/lib/sanitizer_common/scripts/gen_dynamic_list.py"
+  sources = [
+    "hwasan.syms.extra",
+  ]
+  deps = [
+    ":hwasan",
+    ":hwasan_cxx",
+  ]
+  outputs = [
+    "$target_gen_dir/hwasan.vers",
+  ]
+  args = [
+    "--version-list",
+    "--extra",
+    rebase_path(sources[0], root_build_dir),
+    rebase_path("$crt_current_out_dir/libclang_rt.hwasan-$crt_current_target.a",
+                root_build_dir),
+    rebase_path(
+        "$crt_current_out_dir/libclang_rt.hwasan_cxx-$crt_current_target.a",
+        root_build_dir),
+    "-o",
+    rebase_path(outputs[0], root_build_dir),
+  ]
+}
+
+source_set("sources") {
+  configs -= [ "//llvm/utils/gn/build:llvm_code" ]
+  configs += [ "//llvm/utils/gn/build:crt_code" ]
+  defines = [ "HWASAN_WITH_INTERCEPTORS=1" ]
+  deps = [
+    "//compiler-rt/lib/interception:sources",
+    "//compiler-rt/lib/sanitizer_common:sources",
+    "//compiler-rt/lib/ubsan:sources",
+  ]
+  sources = [
+    "hwasan.cc",
+    "hwasan_allocator.cc",
+    "hwasan_dynamic_shadow.cc",
+    "hwasan_interceptors.cc",
+    "hwasan_linux.cc",
+    "hwasan_memintrinsics.cc",
+    "hwasan_poisoning.cc",
+    "hwasan_report.cc",
+    "hwasan_thread.cc",
+    "hwasan_thread_list.cc",
+  ]
+}
+
+source_set("cxx_sources") {
+  configs -= [ "//llvm/utils/gn/build:llvm_code" ]
+  configs += [ "//llvm/utils/gn/build:crt_code" ]
+  deps = [
+    "//compiler-rt/lib/ubsan:cxx_sources",
+  ]
+  sources = [
+    "hwasan_new_delete.cc",
+  ]
+}
+
+static_library("hwasan") {
+  output_dir = crt_current_out_dir
+  output_name = "clang_rt.hwasan-$crt_current_target"
+  complete_static_lib = true
+  configs -= [ "//llvm/utils/gn/build:llvm_code" ]
+  configs += [ "//llvm/utils/gn/build:crt_code" ]
+  deps = [
+    ":sources",
+  ]
+}
+
+static_library("hwasan_cxx") {
+  output_dir = crt_current_out_dir
+  output_name = "clang_rt.hwasan_cxx-$crt_current_target"
+  complete_static_lib = true
+  configs -= [ "//llvm/utils/gn/build:llvm_code" ]
+  configs += [ "//llvm/utils/gn/build:crt_code" ]
+  deps = [
+    ":cxx_sources",
+  ]
+}
+
+shared_library("hwasan_shared") {
+  output_dir = crt_current_out_dir
+  output_name = "clang_rt.hwasan-$crt_current_target"
+  configs -= [ "//llvm/utils/gn/build:llvm_code" ]
+  configs += [ "//llvm/utils/gn/build:crt_code" ]
+  deps = [
+    ":cxx_sources",
+    ":sources",
+    ":version_script",
+  ]
+  inputs = [
+    "$target_gen_dir/hwasan.vers",
+  ]
+  ldflags = [
+    "-Wl,--version-script," + rebase_path(inputs[0], root_build_dir),
+    "-Wl,-z,global",
+  ]
+}
diff --git a/utils/gn/secondary/compiler-rt/lib/interception/BUILD.gn b/utils/gn/secondary/compiler-rt/lib/interception/BUILD.gn
new file mode 100644
index 0000000..4276e2e
--- /dev/null
+++ b/utils/gn/secondary/compiler-rt/lib/interception/BUILD.gn
@@ -0,0 +1,13 @@
+source_set("sources") {
+  configs -= [ "//llvm/utils/gn/build:llvm_code" ]
+  configs += [ "//llvm/utils/gn/build:crt_code" ]
+  deps = [
+    "//compiler-rt/lib/sanitizer_common:sources",
+  ]
+  sources = [
+    "interception_linux.cc",
+    "interception_mac.cc",
+    "interception_type_test.cc",
+    "interception_win.cc",
+  ]
+}
diff --git a/utils/gn/secondary/compiler-rt/lib/sanitizer_common/BUILD.gn b/utils/gn/secondary/compiler-rt/lib/sanitizer_common/BUILD.gn
new file mode 100644
index 0000000..7d280bd
--- /dev/null
+++ b/utils/gn/secondary/compiler-rt/lib/sanitizer_common/BUILD.gn
@@ -0,0 +1,83 @@
+source_set("sources") {
+  configs -= [ "//llvm/utils/gn/build:llvm_code" ]
+  configs += [ "//llvm/utils/gn/build:crt_code" ]
+  deps = [
+    "//llvm/utils/gn/build/libs/pthread",
+  ]
+  libs = []
+  if (current_os == "linux" || current_os == "android") {
+    libs += [ "dl" ]
+  }
+  if (current_os == "linux") {
+    libs += [ "rt" ]
+  }
+  sources = [
+    "sancov_flags.cc",
+    "sanitizer_allocator.cc",
+    "sanitizer_allocator_checks.cc",
+    "sanitizer_allocator_report.cc",
+    "sanitizer_common.cc",
+    "sanitizer_common_libcdep.cc",
+    "sanitizer_coverage_fuchsia.cc",
+    "sanitizer_coverage_libcdep_new.cc",
+    "sanitizer_coverage_win_sections.cc",
+    "sanitizer_deadlock_detector1.cc",
+    "sanitizer_deadlock_detector2.cc",
+    "sanitizer_errno.cc",
+    "sanitizer_file.cc",
+    "sanitizer_flag_parser.cc",
+    "sanitizer_flags.cc",
+    "sanitizer_fuchsia.cc",
+    "sanitizer_libc.cc",
+    "sanitizer_libignore.cc",
+    "sanitizer_linux.cc",
+    "sanitizer_linux_libcdep.cc",
+    "sanitizer_linux_s390.cc",
+    "sanitizer_mac.cc",
+    "sanitizer_mac_libcdep.cc",
+    "sanitizer_netbsd.cc",
+    "sanitizer_openbsd.cc",
+    "sanitizer_persistent_allocator.cc",
+    "sanitizer_platform_limits_freebsd.cc",
+    "sanitizer_platform_limits_linux.cc",
+    "sanitizer_platform_limits_netbsd.cc",
+    "sanitizer_platform_limits_openbsd.cc",
+    "sanitizer_platform_limits_posix.cc",
+    "sanitizer_platform_limits_solaris.cc",
+    "sanitizer_posix.cc",
+    "sanitizer_posix_libcdep.cc",
+    "sanitizer_printf.cc",
+    "sanitizer_procmaps_bsd.cc",
+    "sanitizer_procmaps_common.cc",
+    "sanitizer_procmaps_linux.cc",
+    "sanitizer_procmaps_mac.cc",
+    "sanitizer_procmaps_solaris.cc",
+    "sanitizer_rtems.cc",
+    "sanitizer_solaris.cc",
+    "sanitizer_stackdepot.cc",
+    "sanitizer_stacktrace.cc",
+    "sanitizer_stacktrace_libcdep.cc",
+    "sanitizer_stacktrace_printer.cc",
+    "sanitizer_stacktrace_sparc.cc",
+    "sanitizer_stoptheworld_linux_libcdep.cc",
+    "sanitizer_stoptheworld_mac.cc",
+    "sanitizer_suppressions.cc",
+    "sanitizer_symbolizer.cc",
+    "sanitizer_symbolizer_libbacktrace.cc",
+    "sanitizer_symbolizer_libcdep.cc",
+    "sanitizer_symbolizer_mac.cc",
+    "sanitizer_symbolizer_markup.cc",
+    "sanitizer_symbolizer_posix_libcdep.cc",
+    "sanitizer_symbolizer_report.cc",
+    "sanitizer_symbolizer_win.cc",
+    "sanitizer_termination.cc",
+    "sanitizer_thread_registry.cc",
+    "sanitizer_tls_get_addr.cc",
+    "sanitizer_unwind_linux_libcdep.cc",
+    "sanitizer_unwind_win.cc",
+    "sanitizer_win.cc",
+  ]
+  if (current_cpu == "x64") {
+    sources += [ "sanitizer_linux_x86_64.S" ]
+  }
+}
diff --git a/utils/gn/secondary/compiler-rt/lib/ubsan/BUILD.gn b/utils/gn/secondary/compiler-rt/lib/ubsan/BUILD.gn
new file mode 100644
index 0000000..7169816
--- /dev/null
+++ b/utils/gn/secondary/compiler-rt/lib/ubsan/BUILD.gn
@@ -0,0 +1,30 @@
+source_set("sources") {
+  configs -= [ "//llvm/utils/gn/build:llvm_code" ]
+  configs += [ "//llvm/utils/gn/build:crt_code" ]
+  defines = [ "UBSAN_CAN_USE_CXXABI" ]
+  deps = [
+    "//compiler-rt/lib/interception:sources",
+    "//compiler-rt/lib/sanitizer_common:sources",
+  ]
+  sources = [
+    "ubsan_diag.cc",
+    "ubsan_flags.cc",
+    "ubsan_handlers.cc",
+    "ubsan_init.cc",
+    "ubsan_monitor.cc",
+    "ubsan_value.cc",
+  ]
+}
+
+source_set("cxx_sources") {
+  configs -= [ "//llvm/utils/gn/build:llvm_code" ]
+  configs -= [ "//llvm/utils/gn/build:no_rtti" ]
+  configs += [ "//llvm/utils/gn/build:crt_code" ]
+  defines = [ "UBSAN_CAN_USE_CXXABI" ]
+  sources = [
+    "ubsan_handlers_cxx.cc",
+    "ubsan_type_hash.cc",
+    "ubsan_type_hash_itanium.cc",
+    "ubsan_type_hash_win.cc",
+  ]
+}
diff --git a/utils/gn/secondary/compiler-rt/target.gni b/utils/gn/secondary/compiler-rt/target.gni
new file mode 100644
index 0000000..7ea73b1
--- /dev/null
+++ b/utils/gn/secondary/compiler-rt/target.gni
@@ -0,0 +1,20 @@
+import("//clang/resource_dir.gni")
+
+if (current_os == "linux" || current_os == "android") {
+  crt_current_out_dir = "$clang_resource_dir/lib/linux"
+} else {
+  assert(false, "unimplemented current_os " + current_os)
+}
+
+if (current_cpu == "x64") {
+  crt_current_target_arch = "x86_64"
+} else if (current_cpu == "arm64") {
+  crt_current_target_arch = "aarch64"
+} else {
+  assert(false, "unimplemented current_cpu " + current_cpu)
+}
+
+crt_current_target = crt_current_target_arch
+if (current_os == "android") {
+  crt_current_target += "-android"
+}
diff --git a/utils/gn/secondary/compiler-rt/test/BUILD.gn b/utils/gn/secondary/compiler-rt/test/BUILD.gn
new file mode 100644
index 0000000..59c0f78
--- /dev/null
+++ b/utils/gn/secondary/compiler-rt/test/BUILD.gn
@@ -0,0 +1,79 @@
+import("//compiler-rt/target.gni")
+import("//compiler-rt/test/test.gni")
+import("//llvm/triples.gni")
+import("//llvm/utils/gn/build/toolchain/compiler.gni")
+import("//llvm/utils/gn/build/write_cmake_config.gni")
+import("//llvm/version.gni")
+
+write_cmake_config("lit_common_configured") {
+  input = "lit.common.configured.in"
+  output = "$target_gen_dir/lit.common.configured"
+
+  values = [
+    "LIT_SITE_CFG_IN_HEADER=## Autogenerated from $input, do not edit",
+
+    "COMPILER_RT_DEFAULT_TARGET_TRIPLE=$llvm_current_triple",
+    "COMPILER_RT_DEFAULT_TARGET_ARCH=$crt_current_target_arch",
+
+    "COMPILER_RT_TEST_COMPILER_CFLAGS=$target_flags_string",
+    "LLVM_BUILD_MODE=.",
+    "LLVM_MAIN_SRC_DIR=" + rebase_path("//llvm"),
+    "LLVM_BINARY_DIR=" + rebase_path(root_build_dir),
+    "COMPILER_RT_SOURCE_DIR=" + rebase_path("//compiler-rt"),
+
+    # This is only used by tsan to find the path to an instrumented libc++.
+    # Since we don't currently support running the tsan tests, leave it empty
+    # for now. Eventually it should probably be replaced with some mechanism
+    # where the libraries are found in a toolchain dir.
+    "COMPILER_RT_BINARY_DIR=",
+
+    # We don't currently support the multiarch runtime layout.
+    "LLVM_ENABLE_PER_TARGET_RUNTIME_DIR_PYBOOL=False",
+
+    "LLVM_TOOLS_DIR=" + rebase_path("$root_build_dir/bin"),
+    "LLVM_LIBRARY_OUTPUT_INTDIR=" + rebase_path("$root_build_dir/lib"),
+
+    "GOLD_EXECUTABLE=ld",
+    "COMPILER_RT_RESOLVED_TEST_COMPILER=" +
+        rebase_path("$root_build_dir/bin/clang"),
+    "COMPILER_RT_TEST_COMPILER_ID=Clang",
+    "PYTHON_EXECUTABLE=$python_path",
+    "COMPILER_RT_DEBUG_PYBOOL=False",
+    "COMPILER_RT_RESOLVED_LIBRARY_OUTPUT_DIR=" +
+        rebase_path(crt_current_out_dir),
+    "COMPILER_RT_EMULATOR=",
+    "COMPILER_RT_ASAN_SHADOW_SCALE=",
+    "SANITIZER_CAN_USE_CXXABI_PYBOOL=True",
+    "COMPILER_RT_HAS_LLD_PYBOOL=True",
+    "HAVE_RPC_XDR_H=0",
+    "ANDROID_SERIAL_FOR_TESTING=$android_serial_for_testing",
+  ]
+
+  if (host_cpu == "x64") {
+    values += [ "HOST_ARCH=x86_64" ]
+  } else {
+    assert(false, "unimplemented host_cpu " + host_cpu)
+  }
+
+  if (host_os == "mac") {
+    values += [ "HOST_OS=Darwin" ]
+  } else if (host_os == "linux") {
+    values += [ "HOST_OS=Linux" ]
+  } else if (host_os == "win") {
+    values += [ "HOST_OS=Windows" ]
+  } else {
+    assert(false, "unsupported host_os " + host_os)
+  }
+
+  if (current_os != "win" || llvm_enable_dia_sdk) {
+    values += [ "CAN_SYMBOLIZE=1" ]
+  } else {
+    values += [ "CAN_SYMBOLIZE=0" ]
+  }
+
+  if (current_os == "android") {
+    values += [ "ANDROID_PYBOOL=True" ]
+  } else {
+    values += [ "ANDROID_PYBOOL=False" ]
+  }
+}
diff --git a/utils/gn/secondary/compiler-rt/test/hwasan/BUILD.gn b/utils/gn/secondary/compiler-rt/test/hwasan/BUILD.gn
new file mode 100644
index 0000000..ee65838
--- /dev/null
+++ b/utils/gn/secondary/compiler-rt/test/hwasan/BUILD.gn
@@ -0,0 +1,99 @@
+import("//compiler-rt/target.gni")
+import("//compiler-rt/test/test.gni")
+import("//llvm/utils/gn/build/toolchain/compiler.gni")
+import("//llvm/utils/gn/build/write_cmake_config.gni")
+import("//llvm/version.gni")
+
+write_cmake_config("lit_site_cfg") {
+  input = "lit.site.cfg.in"
+  output = "$target_gen_dir/lit.site.cfg"
+
+  values = [
+    "LIT_SITE_CFG_IN_HEADER=## Autogenerated from $input, do not edit",
+
+    "HWASAN_TEST_CONFIG_SUFFIX=-$crt_current_target",
+    "HWASAN_TEST_TARGET_CFLAGS=$target_flags_string",
+    "HWASAN_TEST_TARGET_ARCH=$crt_current_target_arch",
+
+    "COMPILER_RT_BINARY_DIR=" + rebase_path("$root_gen_dir/compiler-rt"),
+    "HWASAN_LIT_SOURCE_DIR=" + rebase_path("."),
+  ]
+
+  if (current_os == "android") {
+    values += [ "HWASAN_ANDROID_FILES_TO_PUSH=[\"" + rebase_path(
+                    "$crt_current_out_dir/libclang_rt.hwasan-$crt_current_target.so") + "\", \"" + rebase_path(
+                    "$root_out_dir/bin/llvm-symbolizer") + "\"]" ]
+  } else {
+    values += [ "HWASAN_ANDROID_FILES_TO_PUSH=[]" ]
+  }
+}
+
+if (current_toolchain != host_toolchain) {
+  group("hwasan_toolchain") {
+    deps = [
+      ":lit_site_cfg",
+      "//compiler-rt/include($host_toolchain)",
+      "//compiler-rt/lib/cfi:blacklist($host_toolchain)",
+      "//compiler-rt/lib/hwasan:hwasan_shared",
+      "//compiler-rt/test:lit_common_configured",
+      "//llvm/utils/FileCheck($host_toolchain)",
+      "//llvm/utils/llvm-lit($host_toolchain)",
+      "//llvm/utils/not($host_toolchain)",
+    ]
+
+    # FIXME: Make the host use the stage2 llvm-symbolizer as well, for
+    # consistency. Currently lit.common.cfg sets up the sanitizer runtime to
+    # look for llvm-symbolizer in llvm_tools_dir, and also looks there for
+    # other tools which are built with the host toolchain.
+    if (current_os == host_os && current_cpu == host_cpu) {
+      deps += [ "//llvm/tools/llvm-symbolizer($host_toolchain)" ]
+    } else {
+      deps += [ "//llvm/tools/llvm-symbolizer" ]
+    }
+  }
+}
+
+supported_toolchains = []
+if (host_os == "linux" && host_cpu == "x64") {
+  supported_toolchains += [ "//llvm/utils/gn/build/toolchain:stage2_unix" ]
+}
+if (android_ndk_path != "") {
+  supported_toolchains +=
+      [ "//llvm/utils/gn/build/toolchain:stage2_android_aarch64" ]
+}
+
+group("hwasan") {
+  deps = []
+  foreach(toolchain, supported_toolchains) {
+    deps += [ ":hwasan_toolchain($toolchain)" ]
+  }
+}
+
+if (supported_toolchains != []) {
+  action("check-hwasan") {
+    script = "$root_build_dir/bin/llvm-lit"
+    if (host_os == "win") {
+      script += ".py"
+    }
+    args = [ "-sv" ]
+    foreach(toolchain, supported_toolchains) {
+      args += [ rebase_path(
+              get_label_info(":lit_site_cfg($toolchain)", "target_gen_dir"),
+              root_build_dir) ]
+    }
+    outputs = [
+      "$target_gen_dir/run-lit",  # Non-existing, so that ninja runs it each time.
+    ]
+
+    # Since check-hwasan is always dirty, //:default doesn't depend on it so that
+    # it's not part of the default ninja target.  Hence, check-hwasan shouldn't
+    # have any deps except :hwasan. so that the default target is sure to build
+    # all the deps.
+    deps = [
+      ":hwasan",
+    ]
+    testonly = true
+
+    pool = "//:console"
+  }
+}
diff --git a/utils/gn/secondary/compiler-rt/test/test.gni b/utils/gn/secondary/compiler-rt/test/test.gni
new file mode 100644
index 0000000..4144482
--- /dev/null
+++ b/utils/gn/secondary/compiler-rt/test/test.gni
@@ -0,0 +1,16 @@
+import("//llvm/utils/gn/build/toolchain/target_flags.gni")
+
+declare_args() {
+  # Specifies the serial number of the Android device to be used for testing.
+  android_serial_for_testing = ""
+}
+
+target_flags_string = ""
+
+foreach(flag,
+        target_flags + target_cflags + target_ldflags + [ "-fuse-ld=lld" ]) {
+  if (target_flags_string != "") {
+    target_flags_string += " "
+  }
+  target_flags_string += flag
+}
diff --git a/utils/gn/secondary/lld/ELF/BUILD.gn b/utils/gn/secondary/lld/ELF/BUILD.gn
index d5f8a89..8006ef1 100644
--- a/utils/gn/secondary/lld/ELF/BUILD.gn
+++ b/utils/gn/secondary/lld/ELF/BUILD.gn
@@ -29,6 +29,7 @@
     "Arch/ARM.cpp",
     "Arch/AVR.cpp",
     "Arch/Hexagon.cpp",
+    "Arch/MSP430.cpp",
     "Arch/Mips.cpp",
     "Arch/MipsArchTree.cpp",
     "Arch/PPC.cpp",
diff --git a/utils/gn/secondary/lld/include/lld/Common/BUILD.gn b/utils/gn/secondary/lld/include/lld/Common/BUILD.gn
index b45c04e..82a0923 100644
--- a/utils/gn/secondary/lld/include/lld/Common/BUILD.gn
+++ b/utils/gn/secondary/lld/include/lld/Common/BUILD.gn
@@ -1,20 +1,10 @@
+import("//llvm/utils/gn/build/write_cmake_config.gni")
 import("//llvm/version.gni")
 
-action("version") {
-  script = "//llvm/utils/gn/build/write_cmake_config.py"
-
-  sources = [
-    "Version.inc.in",
-  ]
-  outputs = [
-    "$target_gen_dir/Version.inc",
-  ]
-  args = [
-    "-o",
-    rebase_path(outputs[0], root_out_dir),
-
-    rebase_path(sources[0], root_out_dir),
-
+write_cmake_config("version") {
+  input = "Version.inc.in"
+  output = "$target_gen_dir/Version.inc"
+  values = [
     "LLD_VERSION=$llvm_version",
     "LLD_VERSION_MAJOR=$llvm_version_major",
     "LLD_VERSION_MINOR=$llvm_version_minor",
diff --git a/utils/gn/secondary/lld/test/BUILD.gn b/utils/gn/secondary/lld/test/BUILD.gn
new file mode 100644
index 0000000..6eb80c9
--- /dev/null
+++ b/utils/gn/secondary/lld/test/BUILD.gn
@@ -0,0 +1,126 @@
+import("//llvm/lib/DebugInfo/PDB/enable_dia.gni")
+import("//llvm/triples.gni")
+import("//llvm/utils/gn/build/libs/xml/enable.gni")
+import("//llvm/utils/gn/build/libs/zlib/enable.gni")
+import("//llvm/utils/gn/build/write_cmake_config.gni")
+import("lld_lit_site_cfg_files.gni")
+
+# The bits common to writing lit.site.cfg.py.in and Unit/lit.site.cfg.py.in.
+template("write_lit_cfg") {
+  write_cmake_config(target_name) {
+    input = invoker.input
+    output = invoker.output
+    values = [
+      "LIT_SITE_CFG_IN_HEADER=## Autogenerated from $input, do not edit",
+      "LLD_BINARY_DIR=" +
+          rebase_path(get_label_info("//lld", "target_out_dir")),
+      "LLD_SOURCE_DIR=" + rebase_path("//lld"),
+      "LLVM_BINARY_DIR=" +
+          rebase_path(get_label_info("//llvm", "target_out_dir")),
+      "LLVM_LIBRARY_OUTPUT_INTDIR=",  # FIXME: for shared builds only (?)
+      "LLVM_LIBS_DIR=",  # needed only for shared builds
+      "LLVM_LIT_TOOLS_DIR=",  # Intentionally empty, matches cmake build.
+      "LLVM_RUNTIME_OUTPUT_INTDIR=" + rebase_path("$root_out_dir/bin"),
+      "LLVM_SOURCE_DIR=" + rebase_path("//llvm"),
+      "LLVM_TOOLS_DIR=" + rebase_path("$root_out_dir/bin"),
+      "PYTHON_EXECUTABLE=$python_path",
+      "TARGET_TRIPLE=$llvm_target_triple",
+    ]
+    values += invoker.extra_values
+  }
+}
+
+write_lit_cfg("lit_site_cfg") {
+  # Fully-qualified instead of relative for LIT_SITE_CFG_IN_HEADER.
+  input = "//lld/test/lit.site.cfg.py.in"
+  output = lld_lit_site_cfg_file
+
+  extra_values = []
+  if (llvm_enable_dia_sdk) {
+    extra_values += [ "LLVM_ENABLE_DIA_SDK=1" ]
+  } else {
+    extra_values += [ "LLVM_ENABLE_DIA_SDK=0" ]  # Must be 0.
+  }
+
+  if (llvm_enable_libxml2) {
+    extra_values += [ "LLVM_LIBXML2_ENABLED=1" ]
+  } else {
+    extra_values += [ "LLVM_LIBXML2_ENABLED=" ]  # Must be empty.
+  }
+
+  if (llvm_enable_zlib) {
+    extra_values += [ "HAVE_LIBZ=1" ]
+  } else {
+    extra_values += [ "HAVE_LIBZ=0" ]  # Must be 0.
+  }
+}
+
+write_lit_cfg("lit_unit_site_cfg") {
+  # Fully-qualified instead of relative for LIT_SITE_CFG_IN_HEADER.
+  input = "//lld/test/Unit/lit.site.cfg.py.in"
+  output = lld_lit_unit_site_cfg_file
+  extra_values = [ "LLVM_BUILD_MODE=." ]
+}
+
+# This target should contain all dependencies of check-lld.
+# //:default depends on it, so that ninja's default target builds all
+# prerequisites for check-lld but doesn't run check-lld itself.
+group("test") {
+  deps = [
+    ":lit_site_cfg",
+    ":lit_unit_site_cfg",
+    "//lld/tools/lld:symlinks",
+    "//lld/unittests",
+    "//llvm/tools/llc",
+    "//llvm/tools/llvm-ar:symlinks",
+    "//llvm/tools/llvm-as",
+    "//llvm/tools/llvm-bcanalyzer",
+    "//llvm/tools/llvm-dis",
+    "//llvm/tools/llvm-dwarfdump",
+    "//llvm/tools/llvm-mc",
+    "//llvm/tools/llvm-nm:symlinks",
+    "//llvm/tools/llvm-objcopy:symlinks",
+    "//llvm/tools/llvm-objdump:symlinks",
+    "//llvm/tools/llvm-pdbutil",
+    "//llvm/tools/llvm-readobj:symlinks",
+    "//llvm/tools/obj2yaml",
+    "//llvm/tools/opt",
+    "//llvm/tools/yaml2obj",
+    "//llvm/utils/FileCheck",
+    "//llvm/utils/count",
+    "//llvm/utils/llvm-lit",
+    "//llvm/utils/not",
+  ]
+  testonly = true
+}
+
+# This is the action that runs all of lld's tests, check-lld.
+action("check-lld") {
+  script = "$root_out_dir/bin/llvm-lit"
+  if (host_os == "win") {
+    script += ".py"
+  }
+  args = [
+    "-sv",
+    "--param",
+    "lld_site_config=" + rebase_path(lld_lit_site_cfg_file, root_out_dir),
+    "--param",
+    "lld_unit_site_config=" +
+        rebase_path(lld_lit_unit_site_cfg_file, root_out_dir),
+    rebase_path(".", root_out_dir),
+  ]
+  outputs = [
+    "$target_gen_dir/run-lit",  # Non-existing, so that ninja runs it each time.
+  ]
+
+  # Since check-lld is always dirty, //:default doesn't depend on it so that
+  # it's not part of the default ninja target.  Hence, check-lld shouldn't
+  # have any deps except :test, so that the default target is sure to build
+  # all the deps.
+  deps = [
+    ":test",
+  ]
+  testonly = true
+
+  pool = "//:console"
+}
diff --git a/utils/gn/secondary/lld/test/lld_lit_site_cfg_files.gni b/utils/gn/secondary/lld/test/lld_lit_site_cfg_files.gni
new file mode 100644
index 0000000..87c00b6
--- /dev/null
+++ b/utils/gn/secondary/lld/test/lld_lit_site_cfg_files.gni
@@ -0,0 +1,2 @@
+lld_lit_site_cfg_file = "$root_gen_dir/lld/test/lit.site.cfg.py"
+lld_lit_unit_site_cfg_file = "$root_gen_dir/lld/test/Unit/lit.site.cfg.py"
diff --git a/utils/gn/secondary/lld/unittests/BUILD.gn b/utils/gn/secondary/lld/unittests/BUILD.gn
new file mode 100644
index 0000000..275637c
--- /dev/null
+++ b/utils/gn/secondary/lld/unittests/BUILD.gn
@@ -0,0 +1,7 @@
+group("unittests") {
+  deps = [
+    "DriverTests",
+    "MachOTests",
+  ]
+  testonly = true
+}
diff --git a/utils/gn/secondary/lld/unittests/DriverTests/BUILD.gn b/utils/gn/secondary/lld/unittests/DriverTests/BUILD.gn
new file mode 100644
index 0000000..b683c35
--- /dev/null
+++ b/utils/gn/secondary/lld/unittests/DriverTests/BUILD.gn
@@ -0,0 +1,12 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("DriverTests") {
+  configs += [ "//llvm/utils/gn/build:lld_code" ]
+  deps = [
+    "//lld/lib/Driver",
+    "//lld/lib/ReaderWriter/MachO",
+  ]
+  sources = [
+    "DarwinLdDriverTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/lld/unittests/MachOTests/BUILD.gn b/utils/gn/secondary/lld/unittests/MachOTests/BUILD.gn
new file mode 100644
index 0000000..90d0a61
--- /dev/null
+++ b/utils/gn/secondary/lld/unittests/MachOTests/BUILD.gn
@@ -0,0 +1,16 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("MachOTests") {
+  configs += [ "//llvm/utils/gn/build:lld_code" ]
+  deps = [
+    "//lld/lib/Driver",
+    "//lld/lib/ReaderWriter/MachO",
+    "//lld/lib/ReaderWriter/YAML",
+  ]
+  sources = [
+    "MachONormalizedFileBinaryReaderTests.cpp",
+    "MachONormalizedFileBinaryWriterTests.cpp",
+    "MachONormalizedFileToAtomsTests.cpp",
+    "MachONormalizedFileYAMLTests.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn b/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn
index fe86961..68ee10c 100644
--- a/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn
+++ b/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn
@@ -1,10 +1,13 @@
 import("//llvm/lib/Target/targets.gni")
 import("//llvm/triples.gni")
 import("//llvm/utils/gn/build/buildflags.gni")
+import("//llvm/utils/gn/build/libs/edit/enable.gni")
 import("//llvm/utils/gn/build/libs/pthread/enable.gni")
 import("//llvm/utils/gn/build/libs/terminfo/enable.gni")
+import("//llvm/utils/gn/build/libs/xar/enable.gni")
 import("//llvm/utils/gn/build/libs/xml/enable.gni")
 import("//llvm/utils/gn/build/libs/zlib/enable.gni")
+import("//llvm/utils/gn/build/write_cmake_config.gni")
 import("//llvm/version.gni")
 
 # Contains actions to create config.h, llvm-config.h, abi-breaking.h,
@@ -41,53 +44,32 @@
   llvm_enable_reverse_iteration = false
 }
 
-action("abi-breaking") {
-  script = "//llvm/utils/gn/build/write_cmake_config.py"
-
-  sources = [
-    "abi-breaking.h.cmake",
-  ]
-  outputs = [
-    "$target_gen_dir/abi-breaking.h",
-  ]
-  args = [
-    "-o",
-    rebase_path(outputs[0], root_out_dir),
-
-    rebase_path(sources[0], root_out_dir),
-  ]
+write_cmake_config("abi-breaking") {
+  input = "abi-breaking.h.cmake"
+  output = "$target_gen_dir/abi-breaking.h"
+  values = []
 
   if (llvm_enable_abi_breaking_checks) {
-    args += [ "LLVM_ENABLE_ABI_BREAKING_CHECKS=1" ]
+    values += [ "LLVM_ENABLE_ABI_BREAKING_CHECKS=1" ]
   } else {
-    args += [ "LLVM_ENABLE_ABI_BREAKING_CHECKS=" ]
+    values += [ "LLVM_ENABLE_ABI_BREAKING_CHECKS=" ]
   }
 
   if (llvm_enable_reverse_iteration) {
-    args += [ "LLVM_ENABLE_REVERSE_ITERATION=1" ]
+    values += [ "LLVM_ENABLE_REVERSE_ITERATION=1" ]
   } else {
-    args += [ "LLVM_ENABLE_REVERSE_ITERATION=" ]
+    values += [ "LLVM_ENABLE_REVERSE_ITERATION=" ]
   }
 }
 
-action("config") {
-  script = "//llvm/utils/gn/build/write_cmake_config.py"
-
+write_cmake_config("config") {
   public_deps = [
     ":llvm-config",
   ]
 
-  sources = [
-    "config.h.cmake",
-  ]
-  outputs = [
-    "$target_gen_dir/config.h",
-  ]
-  args = [
-    "-o",
-    rebase_path(outputs[0], root_out_dir),
-    rebase_path(sources[0], root_out_dir),
-
+  input = "config.h.cmake"
+  output = "$target_gen_dir/config.h"
+  values = [
     "BUG_REPORT_URL=https://bugs.llvm.org/",
     "ENABLE_BACKTRACES=1",
     "ENABLE_CRASH_OVERRIDES=1",
@@ -110,6 +92,7 @@
     "HAVE_STRERROR=1",
     "HAVE_SYS_STAT_H=1",
     "HAVE_SYS_TYPES_H=1",
+    "HAVE_VALGRIND_VALGRIND_H=",
     "HAVE__ALLOCA=",
     "HAVE___ALLOCA=",
     "HAVE___ASHLDI3=",
@@ -146,8 +129,8 @@
     "LLVM_DEFAULT_TARGET_TRIPLE=$llvm_target_triple",
   ]
 
-  if (host_os == "linux") {
-    args += [
+  if (current_os == "linux" || current_os == "android") {
+    values += [
       "HAVE_FUTIMENS=1",
       "HAVE_LINK_H=1",
       "HAVE_LSEEK64=1",
@@ -156,10 +139,9 @@
       "HAVE_SCHED_GETAFFINITY=1",
       "HAVE_CPU_COUNT=1",
       "HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC=1",
-      "HAVE_VALGRIND_VALGRIND_H=1",
     ]
   } else {
-    args += [
+    values += [
       "HAVE_FUTIMENS=",
       "HAVE_LINK_H=",
       "HAVE_LSEEK64=",
@@ -168,41 +150,49 @@
       "HAVE_SCHED_GETAFFINITY=",
       "HAVE_CPU_COUNT=",
       "HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC=",
-      "HAVE_VALGRIND_VALGRIND_H=",
     ]
   }
 
-  if (host_os == "mac") {
-    args += [
+  if (current_os == "mac") {
+    values += [
       "HAVE_CRASHREPORTER_INFO=1",
       "HAVE_DECL_ARC4RANDOM=1",
       "HAVE_DLADDR=1",
-      "HAVE_LIBEDIT=1",
       "HAVE_MALLOC_H=",
       "HAVE_MACH_MACH_H=1",
       "HAVE_MALLOC_MALLOC_H=1",
       "HAVE_MALLOC_ZONE_STATISTICS=1",
-      "HAVE_LIBXAR=1",
       "HAVE_STRUCT_STAT_ST_MTIMESPEC_TV_NSEC=1",
     ]
   } else {
-    args += [
+    values += [
       "HAVE_CRASHREPORTER_INFO=",
       "HAVE_DECL_ARC4RANDOM=",
       "HAVE_DLADDR=",
-      "HAVE_LIBEDIT=",
       "HAVE_MACH_MACH_H=",
       "HAVE_MALLOC_H=1",
       "HAVE_MALLOC_MALLOC_H=",
       "HAVE_MALLOC_ZONE_STATISTICS=",
-      "HAVE_LIBXAR=",
       "HAVE_STRUCT_STAT_ST_MTIMESPEC_TV_NSEC=",
     ]
   }
 
-  if (host_os == "win") {
-    args += [
+  if (current_os == "linux" || current_os == "mac") {
+    values += [
+      "HAVE_BACKTRACE=1",
+      "HAVE_POSIX_SPAWN=1",
+      "HAVE_PTHREAD_GETNAME_NP=1",
+    ]
+  } else {
+    values += [
       "HAVE_BACKTRACE=",
+      "HAVE_POSIX_SPAWN=",
+      "HAVE_PTHREAD_GETNAME_NP=",
+    ]
+  }
+
+  if (current_os == "win") {
+    values += [
       "HAVE_DECL_STRERROR_S=1",
       "HAVE_DLFCN_H=",
       "HAVE_DLOPEN=",
@@ -212,10 +202,8 @@
       "HAVE_GETRUSAGE=",
       "HAVE_ISATTY=",
       "HAVE_LIBPTHREAD=",
-      "HAVE_PTHREAD_GETNAME_NP=",
       "HAVE_PTHREAD_SETNAME_NP=",
       "HAVE_LIBZ=",
-      "HAVE_POSIX_SPAWN=",
       "HAVE_PREAD=",
       "HAVE_PTHREAD_GETSPECIFIC=",
       "HAVE_PTHREAD_H=",
@@ -243,8 +231,7 @@
     ]
   } else {
     # POSIX-y system defaults.
-    args += [
-      "HAVE_BACKTRACE=1",
+    values += [
       "HAVE_DECL_STRERROR_S=",
       "HAVE_DLFCN_H=1",
       "HAVE_DLOPEN=1",
@@ -254,10 +241,8 @@
       "HAVE_GETRUSAGE=1",
       "HAVE_ISATTY=1",
       "HAVE_LIBPTHREAD=1",
-      "HAVE_PTHREAD_GETNAME_NP=1",
       "HAVE_PTHREAD_SETNAME_NP=1",
       "HAVE_LIBZ=1",
-      "HAVE_POSIX_SPAWN=1",
       "HAVE_PREAD=1",
       "HAVE_PTHREAD_GETSPECIFIC=1",
       "HAVE_PTHREAD_H=1",
@@ -285,52 +270,54 @@
     ]
   }
 
-  if (host_os == "linux") {
-    args += [ "LTDL_SHLIB_EXT=.so" ]
-  } else if (host_os == "mac") {
-    args += [ "LTDL_SHLIB_EXT=.dylib" ]
-  } else if (host_os == "win") {
-    args += [ "LTDL_SHLIB_EXT=.dll" ]
+  if (current_os == "linux" || current_os == "android") {
+    values += [ "LTDL_SHLIB_EXT=.so" ]
+  } else if (current_os == "mac") {
+    values += [ "LTDL_SHLIB_EXT=.dylib" ]
+  } else if (current_os == "win") {
+    values += [ "LTDL_SHLIB_EXT=.dll" ]
+  }
+
+  if (llvm_enable_libedit) {
+    values += [ "HAVE_LIBEDIT=1" ]
+  } else {
+    values += [ "HAVE_LIBEDIT=" ]
+  }
+
+  if (llvm_enable_libxar) {
+    values += [ "HAVE_LIBXAR=1" ]
+  } else {
+    values += [ "HAVE_LIBXAR=" ]
   }
 
   if (llvm_enable_terminfo) {
-    args += [ "HAVE_TERMINFO=1" ]
+    values += [ "HAVE_TERMINFO=1" ]
   } else {
-    args += [ "HAVE_TERMINFO=" ]
+    values += [ "HAVE_TERMINFO=" ]
   }
 
   if (llvm_enable_zlib) {
-    args += [ "LLVM_ENABLE_ZLIB=1" ]
+    values += [ "LLVM_ENABLE_ZLIB=1" ]
   } else {
-    args += [ "LLVM_ENABLE_ZLIB=" ]
+    values += [ "LLVM_ENABLE_ZLIB=" ]
   }
 
   if (llvm_enable_libxml2) {
-    args += [ "LLVM_LIBXML2_ENABLED=1" ]
+    values += [ "LLVM_LIBXML2_ENABLED=1" ]
   } else {
-    args += [ "LLVM_LIBXML2_ENABLED=" ]
+    values += [ "LLVM_LIBXML2_ENABLED=" ]
   }
 }
 
-action("llvm-config") {
-  script = "//llvm/utils/gn/build/write_cmake_config.py"
-
-  sources = [
-    "llvm-config.h.cmake",
-  ]
-  outputs = [
-    "$target_gen_dir/llvm-config.h",
-  ]
-  args = [
-    "-o",
-    rebase_path(outputs[0], root_out_dir),
-    rebase_path(sources[0], root_out_dir),
-
+write_cmake_config("llvm-config") {
+  input = "llvm-config.h.cmake"
+  output = "$target_gen_dir/llvm-config.h"
+  values = [
     "LLVM_ENABLE_DUMP=",
     "LINK_POLLY_INTO_TOOLS=",
     "LLVM_DEFAULT_TARGET_TRIPLE=$llvm_target_triple",
     "LLVM_HAS_ATOMICS=1",
-    "LLVM_HOST_TRIPLE=$llvm_host_triple",
+    "LLVM_HOST_TRIPLE=$llvm_current_triple",
     "LLVM_NATIVE_ARCH=$native_target",
     "LLVM_NATIVE_ASMPARSER=1",
     "LLVM_NATIVE_ASMPRINTER=1",
@@ -348,16 +335,16 @@
     "LLVM_FORCE_ENABLE_STATS=",
   ]
 
-  if (host_os == "win") {
-    args += [ "LLVM_ON_UNIX=" ]
+  if (current_os == "win") {
+    values += [ "LLVM_ON_UNIX=" ]
   } else {
-    args += [ "LLVM_ON_UNIX=1" ]
+    values += [ "LLVM_ON_UNIX=1" ]
   }
 
   if (llvm_enable_threads) {
-    args += [ "LLVM_ENABLE_THREADS=1" ]
+    values += [ "LLVM_ENABLE_THREADS=1" ]
   } else {
-    args += [ "LLVM_ENABLE_THREADS=" ]
+    values += [ "LLVM_ENABLE_THREADS=" ]
   }
 }
 
@@ -365,19 +352,13 @@
 # .def files used by llvm/lib/Target
 
 template("write_target_def_file") {
-  assert(defined(invoker.key), "callers must set key")
-  assert(defined(invoker.value), "callers must set value")
+  assert(defined(invoker.key), "must set 'key' in $target_name")
+  assert(defined(invoker.value), "must set 'value' in $target_name")
 
-  action(target_name) {
+  write_cmake_config(target_name) {
     visibility = [ ":write_target_def_files" ]
-    script = "//llvm/utils/gn/build/write_cmake_config.py"
-
-    sources = [
-      "$target_name.in",
-    ]
-    outputs = [
-      "$target_gen_dir/$target_name",
-    ]
+    input = "$target_name.in"
+    output = "$target_gen_dir/$target_name"
 
     # Build something like
     # `LLVM_ENUM_ASM_PARSERS=LLVM_ASM_PARSER(ARM)\nLLVM_ASM_PARSER(X86)\n`. Note
@@ -387,12 +368,7 @@
     foreach(target, llvm_targets_to_build) {
       value = "$value${invoker.value}($target)\n"
     }
-    args = [
-      "-o",
-      rebase_path(outputs[0], root_out_dir),
-      rebase_path(sources[0], root_out_dir),
-      "${invoker.key}=$value",
-    ]
+    values = [ "${invoker.key}=$value" ]
   }
 }
 
diff --git a/utils/gn/secondary/llvm/include/llvm/IR/BUILD.gn b/utils/gn/secondary/llvm/include/llvm/IR/BUILD.gn
index cc73446..c9165c3 100644
--- a/utils/gn/secondary/llvm/include/llvm/IR/BUILD.gn
+++ b/utils/gn/secondary/llvm/include/llvm/IR/BUILD.gn
@@ -1,7 +1,7 @@
 import("//llvm/utils/TableGen/tablegen.gni")
 
 tablegen("IntrinsicEnums") {
-  visibility = [ "//llvm/lib/IR" ]
+  visibility = [ ":public_tablegen" ]
   args = [ "-gen-intrinsic-enums" ]
   td_file = "Intrinsics.td"
 }
@@ -13,6 +13,20 @@
 }
 
 tablegen("Attributes") {
-  visibility = [ "//llvm/lib/IR" ]
+  visibility = [ ":public_tablegen" ]
   args = [ "-gen-attrs" ]
 }
+
+# Groups all tablegen() calls that create .inc files that are included in
+# IR's public headers.  //llvm/lib/Target has this as a public_dep, so targets
+# dependign on //llvm/lib/IR don't need to depend on this.  This exists
+# solely for targets that use IR's public headers but don't link against IR.
+group("public_tablegen") {
+  public_deps = [
+    # IR's public headers include Attributes.inc.
+    ":Attributes",
+
+    # IR's public headers include IntrinsicEnums.inc.
+    ":IntrinsicEnums",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/CodeGen/GlobalISel/BUILD.gn b/utils/gn/secondary/llvm/lib/CodeGen/GlobalISel/BUILD.gn
index 7147c82..83628a8 100644
--- a/utils/gn/secondary/llvm/lib/CodeGen/GlobalISel/BUILD.gn
+++ b/utils/gn/secondary/llvm/lib/CodeGen/GlobalISel/BUILD.gn
@@ -12,9 +12,12 @@
     "//llvm/lib/Transforms/Utils",
   ]
   sources = [
+    "CSEMIRBuilder.cpp",
+    "CSEInfo.cpp",
     "CallLowering.cpp",
     "Combiner.cpp",
     "CombinerHelper.cpp",
+    "GISelChangeObserver.cpp",
     "GlobalISel.cpp",
     "IRTranslator.cpp",
     "InstructionSelect.cpp",
diff --git a/utils/gn/secondary/llvm/lib/DebugInfo/Symbolize/BUILD.gn b/utils/gn/secondary/llvm/lib/DebugInfo/Symbolize/BUILD.gn
new file mode 100644
index 0000000..ecd59c3
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/DebugInfo/Symbolize/BUILD.gn
@@ -0,0 +1,16 @@
+static_library("Symbolize") {
+  output_name = "LLVMSymbolize"
+  deps = [
+    "//llvm/include/llvm/Config:config",
+    "//llvm/lib/DebugInfo/DWARF",
+    "//llvm/lib/DebugInfo/PDB",
+    "//llvm/lib/Demangle",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "DIPrinter.cpp",
+    "SymbolizableObjectFile.cpp",
+    "Symbolize.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/ExecutionEngine/BUILD.gn b/utils/gn/secondary/llvm/lib/ExecutionEngine/BUILD.gn
new file mode 100644
index 0000000..ac73dd4
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/ExecutionEngine/BUILD.gn
@@ -0,0 +1,21 @@
+static_library("ExecutionEngine") {
+  output_name = "LLVMExecutionEngine"
+  public_deps = [
+    # Must be a public_dep because ExecutionEngine's headers include
+    # llvm-config.h.
+    "//llvm/include/llvm/Config:llvm-config",
+  ]
+  deps = [
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/Object",
+    "//llvm/lib/Target",
+  ]
+  sources = [
+    "ExecutionEngine.cpp",
+    "ExecutionEngineBindings.cpp",
+    "GDBRegistrationListener.cpp",
+    "SectionMemoryManager.cpp",
+    "TargetSelect.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/ExecutionEngine/Interpreter/BUILD.gn b/utils/gn/secondary/llvm/lib/ExecutionEngine/Interpreter/BUILD.gn
new file mode 100644
index 0000000..81192ca
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/ExecutionEngine/Interpreter/BUILD.gn
@@ -0,0 +1,15 @@
+static_library("Interpreter") {
+  output_name = "LLVMInterpreter"
+  deps = [
+    "//llvm/include/llvm/Config:config",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/ExecutionEngine",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "Execution.cpp",
+    "ExternalFunctions.cpp",
+    "Interpreter.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/ExecutionEngine/MCJIT/BUILD.gn b/utils/gn/secondary/llvm/lib/ExecutionEngine/MCJIT/BUILD.gn
new file mode 100644
index 0000000..9d686ce
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/ExecutionEngine/MCJIT/BUILD.gn
@@ -0,0 +1,14 @@
+static_library("MCJIT") {
+  output_name = "LLVMMCJIT"
+  deps = [
+    "//llvm/lib/ExecutionEngine",
+    "//llvm/lib/ExecutionEngine/RuntimeDyld",
+    "//llvm/lib/IR",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+  ]
+  sources = [
+    "MCJIT.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/BUILD.gn b/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/BUILD.gn
new file mode 100644
index 0000000..19c68f0
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/BUILD.gn
@@ -0,0 +1,33 @@
+static_library("Orc") {
+  output_name = "LLVMOrcJIT"
+  deps = [
+    "//llvm/lib/ExecutionEngine",
+    "//llvm/lib/ExecutionEngine/RuntimeDyld",
+    "//llvm/lib/IR",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Transforms/Utils",
+  ]
+  sources = [
+    "CompileOnDemandLayer.cpp",
+    "Core.cpp",
+    "ExecutionUtils.cpp",
+    "IRCompileLayer.cpp",
+    "IRTransformLayer.cpp",
+    "IndirectionUtils.cpp",
+    "JITTargetMachineBuilder.cpp",
+    "LLJIT.cpp",
+    "Layer.cpp",
+    "LazyReexports.cpp",
+    "Legacy.cpp",
+    "NullResolver.cpp",
+    "ObjectTransformLayer.cpp",
+    "OrcABISupport.cpp",
+    "OrcCBindings.cpp",
+    "OrcError.cpp",
+    "OrcMCJITReplacement.cpp",
+    "RPCUtils.cpp",
+    "RTDyldObjectLinkingLayer.cpp",
+    "ThreadSafeModule.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/ExecutionEngine/RuntimeDyld/BUILD.gn b/utils/gn/secondary/llvm/lib/ExecutionEngine/RuntimeDyld/BUILD.gn
new file mode 100644
index 0000000..75deb06
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/ExecutionEngine/RuntimeDyld/BUILD.gn
@@ -0,0 +1,19 @@
+static_library("RuntimeDyld") {
+  output_name = "LLVMRuntimeDyld"
+  deps = [
+    "//llvm/include/llvm/Config:config",
+    "//llvm/lib/MC",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "JITSymbol.cpp",
+    "RTDyldMemoryManager.cpp",
+    "RuntimeDyld.cpp",
+    "RuntimeDyldCOFF.cpp",
+    "RuntimeDyldChecker.cpp",
+    "RuntimeDyldELF.cpp",
+    "RuntimeDyldMachO.cpp",
+    "Targets/RuntimeDyldELFMips.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/FuzzMutate/BUILD.gn b/utils/gn/secondary/llvm/lib/FuzzMutate/BUILD.gn
new file mode 100644
index 0000000..57a6b57
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/FuzzMutate/BUILD.gn
@@ -0,0 +1,19 @@
+static_library("FuzzMutate") {
+  output_name = "LLVMFuzzMutate"
+  deps = [
+    "//llvm/lib/Analysis",
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Transforms/Scalar",
+  ]
+  sources = [
+    "FuzzerCLI.cpp",
+    "IRMutator.cpp",
+    "OpDescriptor.cpp",
+    "Operations.cpp",
+    "RandomIRBuilder.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/IR/BUILD.gn b/utils/gn/secondary/llvm/lib/IR/BUILD.gn
index 35e3db9..6d4945f 100644
--- a/utils/gn/secondary/llvm/lib/IR/BUILD.gn
+++ b/utils/gn/secondary/llvm/lib/IR/BUILD.gn
@@ -10,12 +10,7 @@
   public_deps = [
     # Must be public_dep because IR's public headers include llvm-config.h.
     "//llvm/include/llvm/Config:llvm-config",
-
-    # Must be public_dep because IR's public headers include Attributes.inc.
-    "//llvm/include/llvm/IR:Attributes",
-
-    # Must be public_dep because IR's public headers include IntrinsicEnums.inc.
-    "//llvm/include/llvm/IR:IntrinsicEnums",
+    "//llvm/include/llvm/IR:public_tablegen",
   ]
   deps = [
     ":AttributesCompatFunc",
diff --git a/utils/gn/secondary/llvm/lib/LineEditor/BUILD.gn b/utils/gn/secondary/llvm/lib/LineEditor/BUILD.gn
new file mode 100644
index 0000000..b8f6185
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/LineEditor/BUILD.gn
@@ -0,0 +1,12 @@
+static_library("LineEditor") {
+  output_name = "LLVMLineEditor"
+  deps = [
+    "//llvm/include/llvm/Config:config",
+    "//llvm/lib/Support",
+    "//llvm/utils/gn/build/libs/edit",
+  ]
+
+  sources = [
+    "LineEditor.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/MCA/BUILD.gn b/utils/gn/secondary/llvm/lib/MCA/BUILD.gn
new file mode 100644
index 0000000..9752ca8
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/MCA/BUILD.gn
@@ -0,0 +1,29 @@
+static_library("MCA") {
+  output_name = "LLVMMCA"
+  deps = [
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+  ]
+  include_dirs = [ "../include" ]
+  sources = [
+    "Context.cpp",
+    "HWEventListener.cpp",
+    "HardwareUnits/HardwareUnit.cpp",
+    "HardwareUnits/LSUnit.cpp",
+    "HardwareUnits/RegisterFile.cpp",
+    "HardwareUnits/ResourceManager.cpp",
+    "HardwareUnits/RetireControlUnit.cpp",
+    "HardwareUnits/Scheduler.cpp",
+    "InstrBuilder.cpp",
+    "Instruction.cpp",
+    "Pipeline.cpp",
+    "Stages/DispatchStage.cpp",
+    "Stages/EntryStage.cpp",
+    "Stages/ExecuteStage.cpp",
+    "Stages/InstructionTables.cpp",
+    "Stages/RetireStage.cpp",
+    "Stages/Stage.cpp",
+    "Support.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/ObjectYAML/BUILD.gn b/utils/gn/secondary/llvm/lib/ObjectYAML/BUILD.gn
new file mode 100644
index 0000000..27367ff
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/ObjectYAML/BUILD.gn
@@ -0,0 +1,22 @@
+static_library("ObjectYAML") {
+  output_name = "LLVMObjectYAML"
+  deps = [
+    "//llvm/lib/DebugInfo/CodeView",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "COFFYAML.cpp",
+    "CodeViewYAMLDebugSections.cpp",
+    "CodeViewYAMLSymbols.cpp",
+    "CodeViewYAMLTypeHashing.cpp",
+    "CodeViewYAMLTypes.cpp",
+    "DWARFEmitter.cpp",
+    "DWARFVisitor.cpp",
+    "DWARFYAML.cpp",
+    "ELFYAML.cpp",
+    "MachOYAML.cpp",
+    "ObjectYAML.cpp",
+    "WasmYAML.cpp",
+    "YAML.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/OptRemarks/BUILD.gn b/utils/gn/secondary/llvm/lib/OptRemarks/BUILD.gn
new file mode 100644
index 0000000..cd80b85
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/OptRemarks/BUILD.gn
@@ -0,0 +1,10 @@
+static_library("OptRemarks") {
+  output_name = "LLVMOptRemarks"
+  deps = [
+    "//llvm/lib/Support",
+  ]
+
+  sources = [
+    "OptRemarksParser.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/ProfileData/Coverage/BUILD.gn b/utils/gn/secondary/llvm/lib/ProfileData/Coverage/BUILD.gn
new file mode 100644
index 0000000..bd72861
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/ProfileData/Coverage/BUILD.gn
@@ -0,0 +1,14 @@
+static_library("Coverage") {
+  output_name = "LLVMCoverage"
+  deps = [
+    "//llvm/lib/IR",
+    "//llvm/lib/Object",
+    "//llvm/lib/ProfileData",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "CoverageMapping.cpp",
+    "CoverageMappingReader.cpp",
+    "CoverageMappingWriter.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Support/BUILD.gn b/utils/gn/secondary/llvm/lib/Support/BUILD.gn
index 84ae80f..e17f2a3 100644
--- a/utils/gn/secondary/llvm/lib/Support/BUILD.gn
+++ b/utils/gn/secondary/llvm/lib/Support/BUILD.gn
@@ -155,7 +155,7 @@
 
   libs = []
 
-  if (host_os == "linux") {
+  if (current_os == "linux" || current_os == "android") {
     libs += [ "dl" ]
   }
 }
diff --git a/utils/gn/secondary/llvm/lib/Target/AArch64/AsmParser/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/AArch64/AsmParser/BUILD.gn
new file mode 100644
index 0000000..54a12d2
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/AArch64/AsmParser/BUILD.gn
@@ -0,0 +1,24 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("AArch64GenAsmMatcher") {
+  visibility = [ ":AsmParser" ]
+  args = [ "-gen-asm-matcher" ]
+  td_file = "../AArch64.td"
+}
+
+static_library("AsmParser") {
+  output_name = "LLVMAArch64AsmParser"
+  deps = [
+    ":AArch64GenAsmMatcher",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/AArch64/MCTargetDesc",
+    "//llvm/lib/Target/AArch64/TargetInfo",
+    "//llvm/lib/Target/AArch64/Utils",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "AArch64AsmParser.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/AArch64/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/AArch64/BUILD.gn
new file mode 100644
index 0000000..4f80358
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/AArch64/BUILD.gn
@@ -0,0 +1,122 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("AArch64GenCallingConv") {
+  visibility = [ ":LLVMAArch64CodeGen" ]
+  args = [ "-gen-callingconv" ]
+  td_file = "AArch64.td"
+}
+
+tablegen("AArch64GenDAGISel") {
+  visibility = [ ":LLVMAArch64CodeGen" ]
+  args = [ "-gen-dag-isel" ]
+  td_file = "AArch64.td"
+}
+
+tablegen("AArch64GenFastISel") {
+  visibility = [ ":LLVMAArch64CodeGen" ]
+  args = [ "-gen-fast-isel" ]
+  td_file = "AArch64.td"
+}
+
+tablegen("AArch64GenGlobalISel") {
+  visibility = [ ":LLVMAArch64CodeGen" ]
+  args = [ "-gen-global-isel" ]
+  td_file = "AArch64.td"
+}
+
+tablegen("AArch64GenMCPseudoLowering") {
+  visibility = [ ":LLVMAArch64CodeGen" ]
+  args = [ "-gen-pseudo-lowering" ]
+  td_file = "AArch64.td"
+}
+
+tablegen("AArch64GenRegisterBank") {
+  visibility = [ ":LLVMAArch64CodeGen" ]
+  args = [ "-gen-register-bank" ]
+  td_file = "AArch64.td"
+}
+
+static_library("LLVMAArch64CodeGen") {
+  deps = [
+    ":AArch64GenCallingConv",
+    ":AArch64GenDAGISel",
+    ":AArch64GenFastISel",
+    ":AArch64GenGlobalISel",
+    ":AArch64GenMCPseudoLowering",
+    ":AArch64GenRegisterBank",
+    "InstPrinter",
+    "MCTargetDesc",
+    "TargetInfo",
+    "Utils",
+    "//llvm/lib/Analysis",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/CodeGen/AsmPrinter",
+    "//llvm/lib/CodeGen/GlobalISel",
+    "//llvm/lib/CodeGen/SelectionDAG",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Transforms/Scalar",
+  ]
+  include_dirs = [ "." ]
+  sources = [
+    "AArch64A53Fix835769.cpp",
+    "AArch64A57FPLoadBalancing.cpp",
+    "AArch64AdvSIMDScalarPass.cpp",
+    "AArch64AsmPrinter.cpp",
+    "AArch64BranchTargets.cpp",
+    "AArch64CallLowering.cpp",
+    "AArch64CleanupLocalDynamicTLSPass.cpp",
+    "AArch64CollectLOH.cpp",
+    "AArch64CompressJumpTables.cpp",
+    "AArch64CondBrTuning.cpp",
+    "AArch64ConditionOptimizer.cpp",
+    "AArch64ConditionalCompares.cpp",
+    "AArch64DeadRegisterDefinitionsPass.cpp",
+    "AArch64ExpandPseudoInsts.cpp",
+    "AArch64FalkorHWPFFix.cpp",
+    "AArch64FastISel.cpp",
+    "AArch64FrameLowering.cpp",
+    "AArch64ISelDAGToDAG.cpp",
+    "AArch64ISelLowering.cpp",
+    "AArch64InstrInfo.cpp",
+    "AArch64InstructionSelector.cpp",
+    "AArch64LegalizerInfo.cpp",
+    "AArch64LoadStoreOptimizer.cpp",
+    "AArch64MCInstLower.cpp",
+    "AArch64MacroFusion.cpp",
+    "AArch64PBQPRegAlloc.cpp",
+    "AArch64PreLegalizerCombiner.cpp",
+    "AArch64PromoteConstant.cpp",
+    "AArch64RedundantCopyElimination.cpp",
+    "AArch64RegisterBankInfo.cpp",
+    "AArch64RegisterInfo.cpp",
+    "AArch64SIMDInstrOpt.cpp",
+    "AArch64SelectionDAGInfo.cpp",
+    "AArch64SpeculationHardening.cpp",
+    "AArch64StorePairSuppress.cpp",
+    "AArch64Subtarget.cpp",
+    "AArch64TargetMachine.cpp",
+    "AArch64TargetObjectFile.cpp",
+    "AArch64TargetTransformInfo.cpp",
+  ]
+}
+
+# This is a bit different from most build files: Due to this group
+# having the directory's name, "//llvm/lib/Target/AArch64" will refer to this
+# target, which pulls in the code in this directory *and all subdirectories*.
+# For most other directories, "//llvm/lib/Foo" only pulls in the code directly
+# in "llvm/lib/Foo". The forwarding targets in //llvm/lib/Target expect this
+# different behavior.
+group("AArch64") {
+  deps = [
+    ":LLVMAArch64CodeGen",
+    "AsmParser",
+    "Disassembler",
+    "InstPrinter",
+    "MCTargetDesc",
+    "TargetInfo",
+    "Utils",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/AArch64/Disassembler/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/AArch64/Disassembler/BUILD.gn
new file mode 100644
index 0000000..eda8422
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/AArch64/Disassembler/BUILD.gn
@@ -0,0 +1,25 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("AArch64GenDisassemblerTables") {
+  visibility = [ ":Disassembler" ]
+  args = [ "-gen-disassembler" ]
+  td_file = "../AArch64.td"
+}
+
+static_library("Disassembler") {
+  output_name = "LLVMAArch64Disassembler"
+  deps = [
+    ":AArch64GenDisassemblerTables",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCDisassembler",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/AArch64/MCTargetDesc",
+    "//llvm/lib/Target/AArch64/TargetInfo",
+    "//llvm/lib/Target/AArch64/Utils",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "AArch64Disassembler.cpp",
+    "AArch64ExternalSymbolizer.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/AArch64/InstPrinter/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/AArch64/InstPrinter/BUILD.gn
new file mode 100644
index 0000000..6c17495
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/AArch64/InstPrinter/BUILD.gn
@@ -0,0 +1,35 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("AArch64GenAsmWriter") {
+  visibility = [ ":InstPrinter" ]
+  args = [ "-gen-asm-writer" ]
+  td_file = "../AArch64.td"
+}
+
+tablegen("AArch64GenAsmWriter1") {
+  visibility = [ ":InstPrinter" ]
+  args = [
+    "-gen-asm-writer",
+    "-asmwriternum=1",
+  ]
+  td_file = "../AArch64.td"
+}
+
+static_library("InstPrinter") {
+  output_name = "LLVMAArch64AsmPrinter"
+  deps = [
+    ":AArch64GenAsmWriter",
+    ":AArch64GenAsmWriter1",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+
+    # MCTargetDesc depends on InstPrinter, so we can't depend on the full
+    # MCTargetDesc target here: it would form a cycle.
+    "//llvm/lib/Target/AArch64/MCTargetDesc:tablegen",
+    "//llvm/lib/Target/AArch64/Utils",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "AArch64InstPrinter.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/AArch64/MCTargetDesc/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/AArch64/MCTargetDesc/BUILD.gn
new file mode 100644
index 0000000..6303fbe
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/AArch64/MCTargetDesc/BUILD.gn
@@ -0,0 +1,68 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("AArch64GenInstrInfo") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-instr-info" ]
+  td_file = "../AArch64.td"
+}
+
+tablegen("AArch64GenMCCodeEmitter") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-emitter" ]
+  td_file = "../AArch64.td"
+}
+
+tablegen("AArch64GenRegisterInfo") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-register-info" ]
+  td_file = "../AArch64.td"
+}
+
+tablegen("AArch64GenSubtargetInfo") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-subtarget" ]
+  td_file = "../AArch64.td"
+}
+
+group("tablegen") {
+  visibility = [
+    ":MCTargetDesc",
+    "../InstPrinter",
+    "../TargetInfo",
+    "../Utils",
+  ]
+  public_deps = [
+    ":AArch64GenInstrInfo",
+    ":AArch64GenMCCodeEmitter",
+    ":AArch64GenRegisterInfo",
+    ":AArch64GenSubtargetInfo",
+  ]
+}
+
+static_library("MCTargetDesc") {
+  output_name = "LLVMAArch64Desc"
+  public_deps = [
+    ":tablegen",
+  ]
+  deps = [
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/AArch64/InstPrinter",
+    "//llvm/lib/Target/AArch64/TargetInfo",
+    "//llvm/lib/Target/AArch64/Utils",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "AArch64AsmBackend.cpp",
+    "AArch64ELFObjectWriter.cpp",
+    "AArch64ELFStreamer.cpp",
+    "AArch64MCAsmInfo.cpp",
+    "AArch64MCCodeEmitter.cpp",
+    "AArch64MCExpr.cpp",
+    "AArch64MCTargetDesc.cpp",
+    "AArch64MachObjectWriter.cpp",
+    "AArch64TargetStreamer.cpp",
+    "AArch64WinCOFFObjectWriter.cpp",
+    "AArch64WinCOFFStreamer.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/AArch64/TargetInfo/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/AArch64/TargetInfo/BUILD.gn
new file mode 100644
index 0000000..114d93f
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/AArch64/TargetInfo/BUILD.gn
@@ -0,0 +1,14 @@
+static_library("TargetInfo") {
+  output_name = "LLVMAArch64Info"
+  deps = [
+    "//llvm/lib/Support",
+
+    # MCTargetDesc depends on TargetInfo, so we can't depend on the full
+    # MCTargetDesc target here: it would form a cycle.
+    "//llvm/lib/Target/AArch64/MCTargetDesc:tablegen",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "AArch64TargetInfo.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/AArch64/Utils/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/AArch64/Utils/BUILD.gn
new file mode 100644
index 0000000..144bd5c
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/AArch64/Utils/BUILD.gn
@@ -0,0 +1,24 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("AArch64GenSystemOperands") {
+  visibility = [ ":Utils" ]
+  args = [ "-gen-searchable-tables" ]
+  td_file = "../AArch64.td"
+}
+
+static_library("Utils") {
+  output_name = "LLVMAArch64Utils"
+  public_deps = [
+    ":AArch64GenSystemOperands",
+  ]
+  deps = [
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/AArch64/MCTargetDesc:tablegen",
+  ]
+
+  # AArch64BaseInfo.h includes a header from MCTargetDesc :-/
+  include_dirs = [ ".." ]
+  sources = [
+    "AArch64BaseInfo.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/ARM/AsmParser/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/ARM/AsmParser/BUILD.gn
new file mode 100644
index 0000000..9b6dd64
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/ARM/AsmParser/BUILD.gn
@@ -0,0 +1,24 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("ARMGenAsmMatcher") {
+  visibility = [ ":AsmParser" ]
+  args = [ "-gen-asm-matcher" ]
+  td_file = "../ARM.td"
+}
+
+static_library("AsmParser") {
+  output_name = "LLVMARMAsmParser"
+  deps = [
+    ":ARMGenAsmMatcher",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/ARM/MCTargetDesc",
+    "//llvm/lib/Target/ARM/TargetInfo",
+    "//llvm/lib/Target/ARM/Utils",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "ARMAsmParser.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/ARM/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/ARM/BUILD.gn
new file mode 100644
index 0000000..b299be5
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/ARM/BUILD.gn
@@ -0,0 +1,121 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("ARMGenCallingConv") {
+  visibility = [ ":LLVMARMCodeGen" ]
+  args = [ "-gen-callingconv" ]
+  td_file = "ARM.td"
+}
+
+tablegen("ARMGenDAGISel") {
+  visibility = [ ":LLVMARMCodeGen" ]
+  args = [ "-gen-dag-isel" ]
+  td_file = "ARM.td"
+}
+
+tablegen("ARMGenFastISel") {
+  visibility = [ ":LLVMARMCodeGen" ]
+  args = [ "-gen-fast-isel" ]
+  td_file = "ARM.td"
+}
+
+tablegen("ARMGenGlobalISel") {
+  visibility = [ ":LLVMARMCodeGen" ]
+  args = [ "-gen-global-isel" ]
+  td_file = "ARM.td"
+}
+
+tablegen("ARMGenMCPseudoLowering") {
+  visibility = [ ":LLVMARMCodeGen" ]
+  args = [ "-gen-pseudo-lowering" ]
+  td_file = "ARM.td"
+}
+
+tablegen("ARMGenRegisterBank") {
+  visibility = [ ":LLVMARMCodeGen" ]
+  args = [ "-gen-register-bank" ]
+  td_file = "ARM.td"
+}
+
+static_library("LLVMARMCodeGen") {
+  deps = [
+    ":ARMGenCallingConv",
+    ":ARMGenDAGISel",
+    ":ARMGenFastISel",
+    ":ARMGenGlobalISel",
+    ":ARMGenMCPseudoLowering",
+    ":ARMGenRegisterBank",
+    "InstPrinter",
+    "MCTargetDesc",
+    "TargetInfo",
+    "Utils",
+    "//llvm/include/llvm/Config:llvm-config",
+    "//llvm/lib/Analysis",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/CodeGen/AsmPrinter",
+    "//llvm/lib/CodeGen/GlobalISel",
+    "//llvm/lib/CodeGen/SelectionDAG",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+  ]
+  include_dirs = [ "." ]
+  sources = [
+    "A15SDOptimizer.cpp",
+    "ARMAsmPrinter.cpp",
+    "ARMBaseInstrInfo.cpp",
+    "ARMBaseRegisterInfo.cpp",
+    "ARMCallLowering.cpp",
+    "ARMCodeGenPrepare.cpp",
+    "ARMComputeBlockSize.cpp",
+    "ARMConstantIslandPass.cpp",
+    "ARMConstantPoolValue.cpp",
+    "ARMExpandPseudoInsts.cpp",
+    "ARMFastISel.cpp",
+    "ARMFrameLowering.cpp",
+    "ARMHazardRecognizer.cpp",
+    "ARMISelDAGToDAG.cpp",
+    "ARMISelLowering.cpp",
+    "ARMInstrInfo.cpp",
+    "ARMInstructionSelector.cpp",
+    "ARMLegalizerInfo.cpp",
+    "ARMLoadStoreOptimizer.cpp",
+    "ARMMCInstLower.cpp",
+    "ARMMachineFunctionInfo.cpp",
+    "ARMMacroFusion.cpp",
+    "ARMOptimizeBarriersPass.cpp",
+    "ARMParallelDSP.cpp",
+    "ARMRegisterBankInfo.cpp",
+    "ARMRegisterInfo.cpp",
+    "ARMSelectionDAGInfo.cpp",
+    "ARMSubtarget.cpp",
+    "ARMTargetMachine.cpp",
+    "ARMTargetObjectFile.cpp",
+    "ARMTargetTransformInfo.cpp",
+    "MLxExpansionPass.cpp",
+    "Thumb1FrameLowering.cpp",
+    "Thumb1InstrInfo.cpp",
+    "Thumb2ITBlockPass.cpp",
+    "Thumb2InstrInfo.cpp",
+    "Thumb2SizeReduction.cpp",
+    "ThumbRegisterInfo.cpp",
+  ]
+}
+
+# This is a bit different from most build files: Due to this group
+# having the directory's name, "//llvm/lib/Target/ARM" will refer to this
+# target, which pulls in the code in this directory *and all subdirectories*.
+# For most other directories, "//llvm/lib/Foo" only pulls in the code directly
+# in "llvm/lib/Foo". The forwarding targets in //llvm/lib/Target expect this
+# different behavior.
+group("ARM") {
+  deps = [
+    ":LLVMARMCodeGen",
+    "AsmParser",
+    "Disassembler",
+    "InstPrinter",
+    "MCTargetDesc",
+    "TargetInfo",
+    "Utils",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/ARM/Disassembler/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/ARM/Disassembler/BUILD.gn
new file mode 100644
index 0000000..bfe5e50
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/ARM/Disassembler/BUILD.gn
@@ -0,0 +1,23 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("ARMGenDisassemblerTables") {
+  visibility = [ ":Disassembler" ]
+  args = [ "-gen-disassembler" ]
+  td_file = "../ARM.td"
+}
+
+static_library("Disassembler") {
+  output_name = "LLVMARMDisassembler"
+  deps = [
+    ":ARMGenDisassemblerTables",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCDisassembler",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/ARM/MCTargetDesc",
+    "//llvm/lib/Target/ARM/Utils",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "ARMDisassembler.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/ARM/InstPrinter/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/ARM/InstPrinter/BUILD.gn
new file mode 100644
index 0000000..6cac442
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/ARM/InstPrinter/BUILD.gn
@@ -0,0 +1,25 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("ARMGenAsmWriter") {
+  visibility = [ ":InstPrinter" ]
+  args = [ "-gen-asm-writer" ]
+  td_file = "../ARM.td"
+}
+
+static_library("InstPrinter") {
+  output_name = "LLVMARMAsmPrinter"
+  deps = [
+    ":ARMGenAsmWriter",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+
+    # MCTargetDesc depends on InstPrinter, so we can't depend on the full
+    # MCTargetDesc target here: it would form a cycle.
+    "//llvm/lib/Target/ARM/MCTargetDesc:tablegen",
+    "//llvm/lib/Target/ARM/Utils",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "ARMInstPrinter.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/ARM/MCTargetDesc/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/ARM/MCTargetDesc/BUILD.gn
new file mode 100644
index 0000000..3ef2f43
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/ARM/MCTargetDesc/BUILD.gn
@@ -0,0 +1,70 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("ARMGenInstrInfo") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-instr-info" ]
+  td_file = "../ARM.td"
+}
+
+tablegen("ARMGenMCCodeEmitter") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-emitter" ]
+  td_file = "../ARM.td"
+}
+
+tablegen("ARMGenRegisterInfo") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-register-info" ]
+  td_file = "../ARM.td"
+}
+
+tablegen("ARMGenSubtargetInfo") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-subtarget" ]
+  td_file = "../ARM.td"
+}
+
+group("tablegen") {
+  visibility = [
+    ":MCTargetDesc",
+    "../InstPrinter",
+    "../TargetInfo",
+    "../Utils",
+  ]
+  public_deps = [
+    ":ARMGenInstrInfo",
+    ":ARMGenMCCodeEmitter",
+    ":ARMGenRegisterInfo",
+    ":ARMGenSubtargetInfo",
+  ]
+}
+static_library("MCTargetDesc") {
+  output_name = "LLVMARMDesc"
+  public_deps = [
+    ":tablegen",
+  ]
+  deps = [
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCDisassembler",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/ARM/InstPrinter",
+    "//llvm/lib/Target/ARM/TargetInfo",
+    "//llvm/lib/Target/ARM/Utils",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "ARMAsmBackend.cpp",
+    "ARMELFObjectWriter.cpp",
+    "ARMELFStreamer.cpp",
+    "ARMMCAsmInfo.cpp",
+    "ARMMCCodeEmitter.cpp",
+    "ARMMCExpr.cpp",
+    "ARMMCTargetDesc.cpp",
+    "ARMMachORelocationInfo.cpp",
+    "ARMMachObjectWriter.cpp",
+    "ARMTargetStreamer.cpp",
+    "ARMUnwindOpAsm.cpp",
+    "ARMWinCOFFObjectWriter.cpp",
+    "ARMWinCOFFStreamer.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/ARM/TargetInfo/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/ARM/TargetInfo/BUILD.gn
new file mode 100644
index 0000000..521367c
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/ARM/TargetInfo/BUILD.gn
@@ -0,0 +1,14 @@
+static_library("TargetInfo") {
+  output_name = "LLVMARMInfo"
+  deps = [
+    "//llvm/lib/Support",
+
+    # MCTargetDesc depends on TargetInfo, so we can't depend on the full
+    # MCTargetDesc target here: it would form a cycle.
+    "//llvm/lib/Target/ARM/MCTargetDesc:tablegen",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "ARMTargetInfo.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/ARM/Utils/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/ARM/Utils/BUILD.gn
new file mode 100644
index 0000000..e0ddc22
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/ARM/Utils/BUILD.gn
@@ -0,0 +1,25 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("ARMGenSystemRegister") {
+  visibility = [ ":Utils" ]
+  args = [ "-gen-searchable-tables" ]
+  td_file = "../ARM.td"
+}
+
+static_library("Utils") {
+  output_name = "LLVMARMUtils"
+  public_deps = [
+    ":ARMGenSystemRegister",
+  ]
+  deps = [
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/ARM/MCTargetDesc:tablegen",
+  ]
+
+  # ARMBaseInfo.h includes a header from MCTargetDesc,
+  # https://reviews.llvm.org/D35209#1075113 :-/
+  include_dirs = [ ".." ]
+  sources = [
+    "ARMBaseInfo.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/PowerPC/AsmParser/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/PowerPC/AsmParser/BUILD.gn
new file mode 100644
index 0000000..28c8945
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/PowerPC/AsmParser/BUILD.gn
@@ -0,0 +1,23 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("PPCGenAsmMatcher") {
+  visibility = [ ":AsmParser" ]
+  args = [ "-gen-asm-matcher" ]
+  td_file = "../PPC.td"
+}
+
+static_library("AsmParser") {
+  output_name = "LLVMPowerPCAsmParser"
+  deps = [
+    ":PPCGenAsmMatcher",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/PowerPC/MCTargetDesc",
+    "//llvm/lib/Target/PowerPC/TargetInfo",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "PPCAsmParser.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/PowerPC/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/PowerPC/BUILD.gn
new file mode 100644
index 0000000..268ea48
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/PowerPC/BUILD.gn
@@ -0,0 +1,91 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("PPCGenCallingConv") {
+  visibility = [ ":LLVMPowerPCCodeGen" ]
+  args = [ "-gen-callingconv" ]
+  td_file = "PPC.td"
+}
+
+tablegen("PPCGenDAGISel") {
+  visibility = [ ":LLVMPowerPCCodeGen" ]
+  args = [ "-gen-dag-isel" ]
+  td_file = "PPC.td"
+}
+
+tablegen("PPCGenFastISel") {
+  visibility = [ ":LLVMPowerPCCodeGen" ]
+  args = [ "-gen-fast-isel" ]
+  td_file = "PPC.td"
+}
+
+static_library("LLVMPowerPCCodeGen") {
+  deps = [
+    ":PPCGenCallingConv",
+    ":PPCGenDAGISel",
+    ":PPCGenFastISel",
+    "InstPrinter",
+    "MCTargetDesc",
+    "TargetInfo",
+    "//llvm/include/llvm/Config:llvm-config",
+    "//llvm/lib/Analysis",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/CodeGen/AsmPrinter",
+    "//llvm/lib/CodeGen/SelectionDAG",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Transforms/Utils",
+  ]
+  include_dirs = [ "." ]
+  sources = [
+    "PPCAsmPrinter.cpp",
+    "PPCBoolRetToInt.cpp",
+    "PPCBranchCoalescing.cpp",
+    "PPCBranchSelector.cpp",
+    "PPCCCState.cpp",
+    "PPCCTRLoops.cpp",
+    "PPCEarlyReturn.cpp",
+    "PPCExpandISEL.cpp",
+    "PPCFastISel.cpp",
+    "PPCFrameLowering.cpp",
+    "PPCHazardRecognizers.cpp",
+    "PPCISelDAGToDAG.cpp",
+    "PPCISelLowering.cpp",
+    "PPCInstrInfo.cpp",
+    "PPCLoopPreIncPrep.cpp",
+    "PPCMCInstLower.cpp",
+    "PPCMIPeephole.cpp",
+    "PPCMachineFunctionInfo.cpp",
+    "PPCPreEmitPeephole.cpp",
+    "PPCQPXLoadSplat.cpp",
+    "PPCReduceCRLogicals.cpp",
+    "PPCRegisterInfo.cpp",
+    "PPCSubtarget.cpp",
+    "PPCTLSDynamicCall.cpp",
+    "PPCTOCRegDeps.cpp",
+    "PPCTargetMachine.cpp",
+    "PPCTargetObjectFile.cpp",
+    "PPCTargetTransformInfo.cpp",
+    "PPCVSXCopy.cpp",
+    "PPCVSXFMAMutate.cpp",
+    "PPCVSXSwapRemoval.cpp",
+  ]
+}
+
+# This is a bit different from most build files: Due to this group
+# having the directory's name, "//llvm/lib/Target/PowerPC" will refer to this
+# target, which pulls in the code in this directory *and all subdirectories*.
+# For most other directories, "//llvm/lib/Foo" only pulls in the code directly
+# in "llvm/lib/Foo". The forwarding targets in //llvm/lib/Target expect this
+# different behavior.
+group("PowerPC") {
+  deps = [
+    ":LLVMPowerPCCodeGen",
+    "AsmParser",
+    "Disassembler",
+    "InstPrinter",
+    "MCTargetDesc",
+    "TargetInfo",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/PowerPC/Disassembler/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/PowerPC/Disassembler/BUILD.gn
new file mode 100644
index 0000000..3b5febf
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/PowerPC/Disassembler/BUILD.gn
@@ -0,0 +1,22 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("PPCGenDisassemblerTables") {
+  visibility = [ ":Disassembler" ]
+  args = [ "-gen-disassembler" ]
+  td_file = "../PPC.td"
+}
+
+static_library("Disassembler") {
+  output_name = "LLVMPowerPCDisassembler"
+  deps = [
+    ":PPCGenDisassemblerTables",
+    "//llvm/lib/MC/MCDisassembler",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/PowerPC/MCTargetDesc",
+    "//llvm/lib/Target/PowerPC/TargetInfo",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "PPCDisassembler.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/PowerPC/InstPrinter/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/PowerPC/InstPrinter/BUILD.gn
new file mode 100644
index 0000000..8d885d6
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/PowerPC/InstPrinter/BUILD.gn
@@ -0,0 +1,24 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("PPCGenAsmWriter") {
+  visibility = [ ":InstPrinter" ]
+  args = [ "-gen-asm-writer" ]
+  td_file = "../PPC.td"
+}
+
+static_library("InstPrinter") {
+  output_name = "LLVMPowerPCAsmPrinter"
+  deps = [
+    ":PPCGenAsmWriter",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+
+    # MCTargetDesc depends on InstPrinter, so we can't depend on the full
+    # MCTargetDesc target here: it would form a cycle.
+    "//llvm/lib/Target/PowerPC/MCTargetDesc:tablegen",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "PPCInstPrinter.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/PowerPC/MCTargetDesc/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/PowerPC/MCTargetDesc/BUILD.gn
new file mode 100644
index 0000000..4425612
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/PowerPC/MCTargetDesc/BUILD.gn
@@ -0,0 +1,63 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("PPCGenInstrInfo") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-instr-info" ]
+  td_file = "../PPC.td"
+}
+
+tablegen("PPCGenMCCodeEmitter") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-emitter" ]
+  td_file = "../PPC.td"
+}
+
+tablegen("PPCGenRegisterInfo") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-register-info" ]
+  td_file = "../PPC.td"
+}
+
+tablegen("PPCGenSubtargetInfo") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-subtarget" ]
+  td_file = "../PPC.td"
+}
+
+group("tablegen") {
+  visibility = [
+    ":MCTargetDesc",
+    "../InstPrinter",
+    "../TargetInfo",
+  ]
+  public_deps = [
+    ":PPCGenInstrInfo",
+    ":PPCGenMCCodeEmitter",
+    ":PPCGenRegisterInfo",
+    ":PPCGenSubtargetInfo",
+  ]
+}
+
+static_library("MCTargetDesc") {
+  output_name = "LLVMPowerPCDesc"
+  public_deps = [
+    ":tablegen",
+  ]
+  deps = [
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/PowerPC/InstPrinter",
+    "//llvm/lib/Target/PowerPC/TargetInfo",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "PPCAsmBackend.cpp",
+    "PPCELFObjectWriter.cpp",
+    "PPCMCAsmInfo.cpp",
+    "PPCMCCodeEmitter.cpp",
+    "PPCMCExpr.cpp",
+    "PPCMCTargetDesc.cpp",
+    "PPCMachObjectWriter.cpp",
+    "PPCPredicates.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/PowerPC/TargetInfo/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/PowerPC/TargetInfo/BUILD.gn
new file mode 100644
index 0000000..c706924
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/PowerPC/TargetInfo/BUILD.gn
@@ -0,0 +1,14 @@
+static_library("TargetInfo") {
+  output_name = "LLVMPowerPCInfo"
+  deps = [
+    "//llvm/lib/Support",
+
+    # MCTargetDesc depends on TargetInfo, so we can't depend on the full
+    # MCTargetDesc target here: it would form a cycle.
+    "//llvm/lib/Target/PowerPC/MCTargetDesc:tablegen",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "PowerPCTargetInfo.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/WebAssembly/AsmParser/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/WebAssembly/AsmParser/BUILD.gn
new file mode 100644
index 0000000..158f9e9
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/WebAssembly/AsmParser/BUILD.gn
@@ -0,0 +1,23 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("WebAssemblyGenAsmMatcher") {
+  visibility = [ ":AsmParser" ]
+  args = [ "-gen-asm-matcher" ]
+  td_file = "../WebAssembly.td"
+}
+
+static_library("AsmParser") {
+  output_name = "LLVMWebAssemblyAsmParser"
+  deps = [
+    ":WebAssemblyGenAsmMatcher",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/WebAssembly/MCTargetDesc",
+    "//llvm/lib/Target/WebAssembly/TargetInfo",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "WebAssemblyAsmParser.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/WebAssembly/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/WebAssembly/BUILD.gn
new file mode 100644
index 0000000..e3bd8f2
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/WebAssembly/BUILD.gn
@@ -0,0 +1,122 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("WebAssemblyGenCallingConv") {
+  visibility = [ ":LLVMWebAssemblyCodeGen" ]
+  args = [ "-gen-callingconv" ]
+  td_file = "WebAssembly.td"
+}
+
+tablegen("WebAssemblyGenDAGISel") {
+  visibility = [ ":LLVMWebAssemblyCodeGen" ]
+  args = [ "-gen-dag-isel" ]
+  td_file = "WebAssembly.td"
+}
+
+tablegen("WebAssemblyGenFastISel") {
+  visibility = [ ":LLVMWebAssemblyCodeGen" ]
+  args = [ "-gen-fast-isel" ]
+  td_file = "WebAssembly.td"
+}
+
+tablegen("WebAssemblyGenGlobalISel") {
+  visibility = [ ":LLVMWebAssemblyCodeGen" ]
+  args = [ "-gen-global-isel" ]
+  td_file = "WebAssembly.td"
+}
+
+tablegen("WebAssemblyGenMCPseudoLowering") {
+  visibility = [ ":LLVMWebAssemblyCodeGen" ]
+  args = [ "-gen-pseudo-lowering" ]
+  td_file = "WebAssembly.td"
+}
+
+tablegen("WebAssemblyGenRegisterBank") {
+  visibility = [ ":LLVMWebAssemblyCodeGen" ]
+  args = [ "-gen-register-bank" ]
+  td_file = "WebAssembly.td"
+}
+
+static_library("LLVMWebAssemblyCodeGen") {
+  deps = [
+    ":WebAssemblyGenCallingConv",
+    ":WebAssemblyGenDAGISel",
+    ":WebAssemblyGenFastISel",
+    ":WebAssemblyGenGlobalISel",
+    ":WebAssemblyGenMCPseudoLowering",
+    ":WebAssemblyGenRegisterBank",
+    "InstPrinter",
+    "MCTargetDesc",
+    "TargetInfo",
+    "//llvm/include/llvm/Config:llvm-config",
+    "//llvm/lib/Analysis",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/CodeGen/AsmPrinter",
+    "//llvm/lib/CodeGen/GlobalISel",
+    "//llvm/lib/CodeGen/SelectionDAG",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+  ]
+  include_dirs = [ "." ]
+  sources = [
+    "WebAssemblyAddMissingPrototypes.cpp",
+    "WebAssemblyArgumentMove.cpp",
+    "WebAssemblyAsmPrinter.cpp",
+    "WebAssemblyCFGSort.cpp",
+    "WebAssemblyCFGStackify.cpp",
+    "WebAssemblyCallIndirectFixup.cpp",
+    "WebAssemblyDebugValueManager.cpp",
+    "WebAssemblyEHRestoreStackPointer.cpp",
+    "WebAssemblyExceptionInfo.cpp",
+    "WebAssemblyExplicitLocals.cpp",
+    "WebAssemblyFastISel.cpp",
+    "WebAssemblyFixFunctionBitcasts.cpp",
+    "WebAssemblyFixIrreducibleControlFlow.cpp",
+    "WebAssemblyFrameLowering.cpp",
+    "WebAssemblyISelDAGToDAG.cpp",
+    "WebAssemblyISelLowering.cpp",
+    "WebAssemblyInstrInfo.cpp",
+    "WebAssemblyLateEHPrepare.cpp",
+    "WebAssemblyLowerBrUnless.cpp",
+    "WebAssemblyLowerEmscriptenEHSjLj.cpp",
+    "WebAssemblyLowerGlobalDtors.cpp",
+    "WebAssemblyMCInstLower.cpp",
+    "WebAssemblyMachineFunctionInfo.cpp",
+    "WebAssemblyMemIntrinsicResults.cpp",
+    "WebAssemblyOptimizeLiveIntervals.cpp",
+    "WebAssemblyOptimizeReturned.cpp",
+    "WebAssemblyPeephole.cpp",
+    "WebAssemblyPrepareForLiveIntervals.cpp",
+    "WebAssemblyRegColoring.cpp",
+    "WebAssemblyRegNumbering.cpp",
+    "WebAssemblyRegStackify.cpp",
+    "WebAssemblyRegisterInfo.cpp",
+    "WebAssemblyReplacePhysRegs.cpp",
+    "WebAssemblyRuntimeLibcallSignatures.cpp",
+    "WebAssemblySelectionDAGInfo.cpp",
+    "WebAssemblySetP2AlignOperands.cpp",
+    "WebAssemblySubtarget.cpp",
+    "WebAssemblyTargetMachine.cpp",
+    "WebAssemblyTargetObjectFile.cpp",
+    "WebAssemblyTargetTransformInfo.cpp",
+    "WebAssemblyUtilities.cpp",
+  ]
+}
+
+# This is a bit different from most build files: Due to this group
+# having the directory's name, "//llvm/lib/Target/AArch64" will refer to this
+# target, which pulls in the code in this directory *and all subdirectories*.
+# For most other directories, "//llvm/lib/Foo" only pulls in the code directly
+# in "llvm/lib/Foo". The forwarding targets in //llvm/lib/Target expect this
+# different behavior.
+group("WebAssembly") {
+  deps = [
+    ":LLVMWebAssemblyCodeGen",
+    "AsmParser",
+    "Disassembler",
+    "InstPrinter",
+    "MCTargetDesc",
+    "TargetInfo",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/WebAssembly/Disassembler/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/WebAssembly/Disassembler/BUILD.gn
new file mode 100644
index 0000000..44e586d
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/WebAssembly/Disassembler/BUILD.gn
@@ -0,0 +1,23 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("WebAssemblyGenDisassemblerTables") {
+  visibility = [ ":Disassembler" ]
+  args = [ "-gen-disassembler" ]
+  td_file = "../WebAssembly.td"
+}
+
+static_library("Disassembler") {
+  output_name = "LLVMWebAssemblyDisassembler"
+  deps = [
+    ":WebAssemblyGenDisassemblerTables",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCDisassembler",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/WebAssembly/MCTargetDesc",
+    "//llvm/lib/Target/WebAssembly/TargetInfo",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "WebAssemblyDisassembler.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/WebAssembly/InstPrinter/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/WebAssembly/InstPrinter/BUILD.gn
new file mode 100644
index 0000000..e63ea79
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/WebAssembly/InstPrinter/BUILD.gn
@@ -0,0 +1,24 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("WebAssemblyGenAsmWriter") {
+  visibility = [ ":InstPrinter" ]
+  args = [ "-gen-asm-writer" ]
+  td_file = "../WebAssembly.td"
+}
+
+static_library("InstPrinter") {
+  output_name = "LLVMWebAssemblyAsmPrinter"
+  deps = [
+    ":WebAssemblyGenAsmWriter",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+
+    # MCTargetDesc depends on InstPrinter, so we can't depend on the full
+    # MCTargetDesc target here: it would form a cycle.
+    "//llvm/lib/Target/WebAssembly/MCTargetDesc:tablegen",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "WebAssemblyInstPrinter.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/WebAssembly/MCTargetDesc/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/WebAssembly/MCTargetDesc/BUILD.gn
new file mode 100644
index 0000000..8d8e6c0
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/WebAssembly/MCTargetDesc/BUILD.gn
@@ -0,0 +1,61 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("WebAssemblyGenInstrInfo") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-instr-info" ]
+  td_file = "../WebAssembly.td"
+}
+
+tablegen("WebAssemblyGenMCCodeEmitter") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-emitter" ]
+  td_file = "../WebAssembly.td"
+}
+
+tablegen("WebAssemblyGenRegisterInfo") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-register-info" ]
+  td_file = "../WebAssembly.td"
+}
+
+tablegen("WebAssemblyGenSubtargetInfo") {
+  visibility = [ ":tablegen" ]
+  args = [ "-gen-subtarget" ]
+  td_file = "../WebAssembly.td"
+}
+
+group("tablegen") {
+  visibility = [
+    ":MCTargetDesc",
+    "../InstPrinter",
+    "../TargetInfo",
+    "../Utils",
+  ]
+  public_deps = [
+    ":WebAssemblyGenInstrInfo",
+    ":WebAssemblyGenMCCodeEmitter",
+    ":WebAssemblyGenRegisterInfo",
+    ":WebAssemblyGenSubtargetInfo",
+  ]
+}
+static_library("MCTargetDesc") {
+  output_name = "LLVMWebAssemblyDesc"
+  public_deps = [
+    ":tablegen",
+  ]
+  deps = [
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/WebAssembly/InstPrinter",
+    "//llvm/lib/Target/WebAssembly/TargetInfo",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "WebAssemblyAsmBackend.cpp",
+    "WebAssemblyMCAsmInfo.cpp",
+    "WebAssemblyMCCodeEmitter.cpp",
+    "WebAssemblyMCTargetDesc.cpp",
+    "WebAssemblyTargetStreamer.cpp",
+    "WebAssemblyWasmObjectWriter.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/WebAssembly/TargetInfo/BUILD.gn b/utils/gn/secondary/llvm/lib/Target/WebAssembly/TargetInfo/BUILD.gn
new file mode 100644
index 0000000..334183b
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/WebAssembly/TargetInfo/BUILD.gn
@@ -0,0 +1,14 @@
+static_library("TargetInfo") {
+  output_name = "LLVMWebAssemblyInfo"
+  deps = [
+    "//llvm/lib/Support",
+
+    # MCTargetDesc depends on TargetInfo, so we can't depend on the full
+    # MCTargetDesc target here: it would form a cycle.
+    "//llvm/lib/Target/WebAssembly/MCTargetDesc:tablegen",
+  ]
+  include_dirs = [ ".." ]
+  sources = [
+    "WebAssemblyTargetInfo.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Target/targets.gni b/utils/gn/secondary/llvm/lib/Target/targets.gni
index 76a3d57..85055cf 100644
--- a/utils/gn/secondary/llvm/lib/Target/targets.gni
+++ b/utils/gn/secondary/llvm/lib/Target/targets.gni
@@ -6,30 +6,60 @@
 }
 
 if (llvm_targets_to_build == "host") {
-  if (host_cpu == "x86" || host_cpu == "x64") {
+  if (host_cpu == "arm64") {
+    llvm_targets_to_build = [ "AArch64" ]
+  } else if (host_cpu == "arm") {
+    llvm_targets_to_build = [ "ARM" ]
+  } else if (host_cpu == "pcc" || host_cpu == "pcc64") {
+    llvm_targets_to_build = [ "PowerPC" ]
+  } else if (host_cpu == "x86" || host_cpu == "x64") {
     llvm_targets_to_build = [ "X86" ]
   } else {
     assert(false, "add your host_cpu above")
   }
 } else if (llvm_targets_to_build == "all") {
   # FIXME: Port the remaining targets.
-  llvm_targets_to_build = [ "X86" ]
+  llvm_targets_to_build = [
+    "AArch64",
+    "ARM",
+    "PowerPC",
+    "WebAssembly",
+    "X86",
+  ]
 }
 
 # Validate that llvm_targets_to_build is set to a list of valid targets,
 # and remember which targets are built.
+llvm_build_AArch64 = false
+llvm_build_ARM = false
+llvm_build_PowerPC = false
+llvm_build_WebAssembly = false
 llvm_build_X86 = false
 foreach(target, llvm_targets_to_build) {
-  if (target == "X86") {
+  if (target == "AArch64") {
+    llvm_build_AArch64 = true
+  } else if (target == "ARM") {
+    llvm_build_ARM = true
+  } else if (target == "PowerPC") {
+    llvm_build_PowerPC = true
+  } else if (target == "WebAssembly") {
+    llvm_build_WebAssembly = true
+  } else if (target == "X86") {
     llvm_build_X86 = true
   } else {
-    #FIXME : Port the remaining targets.
+    # FIXME: Port the remaining targets.
     assert(false, "Unknown target '$target'.")
   }
 }
 
 # FIXME: This should be based off target_cpu once cross compiles work.
-if (host_cpu == "x86" || host_cpu == "x64") {
+if (host_cpu == "arm64") {
+  native_target = "AArch64"
+} else if (host_cpu == "arm") {
+  native_target = "ARM"
+} else if (host_cpu == "pcc" || host_cpu == "pcc64") {
+  native_target = [ "PowerPC" ]
+} else if (host_cpu == "x86" || host_cpu == "x64") {
   native_target = "X86"
 } else {
   assert(false, "Unsuppored host_cpu '$host_cpu'.")
diff --git a/utils/gn/secondary/llvm/lib/Target/targets_string.gni b/utils/gn/secondary/llvm/lib/Target/targets_string.gni
new file mode 100644
index 0000000..dcf8c68
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Target/targets_string.gni
@@ -0,0 +1,10 @@
+import("//llvm/lib/Target/targets.gni")
+
+# A single string with all built targets, separated by spaces.
+llvm_targets_to_build_string = ""
+foreach(target, llvm_targets_to_build) {
+  if (llvm_targets_to_build_string != "") {
+    llvm_targets_to_build_string += " "
+  }
+  llvm_targets_to_build_string += target
+}
diff --git a/utils/gn/secondary/llvm/lib/Testing/Support/BUILD.gn b/utils/gn/secondary/llvm/lib/Testing/Support/BUILD.gn
new file mode 100644
index 0000000..064d5ec
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Testing/Support/BUILD.gn
@@ -0,0 +1,12 @@
+static_library("Support") {
+  output_name = "LLVMTestingSupport"
+  deps = [
+    "//llvm/lib/Support",
+    "//llvm/utils/unittest:gtest",
+  ]
+  sources = [
+    "Error.cpp",
+    "SupportHelpers.cpp",
+  ]
+  testonly = true
+}
diff --git a/utils/gn/secondary/llvm/lib/TextAPI/BUILD.gn b/utils/gn/secondary/llvm/lib/TextAPI/BUILD.gn
new file mode 100644
index 0000000..0daf58b
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/TextAPI/BUILD.gn
@@ -0,0 +1,12 @@
+static_library("TextAPI") {
+  output_name = "LLVMTextAPI"
+  deps = [
+    "//llvm/lib/BinaryFormat",
+    "//llvm/lib/Support",
+  ]
+  include_dirs = [ "." ]
+  sources = [
+    "ELF/ELFStub.cpp",
+    "ELF/TBEHandler.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/ToolDrivers/llvm-dlltool/BUILD.gn b/utils/gn/secondary/llvm/lib/ToolDrivers/llvm-dlltool/BUILD.gn
new file mode 100644
index 0000000..b5d5a61
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/ToolDrivers/llvm-dlltool/BUILD.gn
@@ -0,0 +1,19 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("Options") {
+  visibility = [ ":DlltoolDriver" ]
+  args = [ "-gen-opt-parser-defs" ]
+}
+
+static_library("DlltoolDriver") {
+  output_name = "LLVMDlltoolDriver"
+  deps = [
+    ":Options",
+    "//llvm/lib/Object",
+    "//llvm/lib/Option",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "DlltoolDriver.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Transforms/Coroutines/BUILD.gn b/utils/gn/secondary/llvm/lib/Transforms/Coroutines/BUILD.gn
new file mode 100644
index 0000000..bbc921c
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Transforms/Coroutines/BUILD.gn
@@ -0,0 +1,20 @@
+static_library("Coroutines") {
+  output_name = "LLVMCoroutines"
+  deps = [
+    "//llvm/include/llvm/Config:llvm-config",
+    "//llvm/lib/Analysis",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+    "//llvm/lib/Transforms/IPO",
+    "//llvm/lib/Transforms/Scalar",
+    "//llvm/lib/Transforms/Utils",
+  ]
+  sources = [
+    "CoroCleanup.cpp",
+    "CoroEarly.cpp",
+    "CoroElide.cpp",
+    "CoroFrame.cpp",
+    "CoroSplit.cpp",
+    "Coroutines.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/lib/Transforms/Hello/BUILD.gn b/utils/gn/secondary/llvm/lib/Transforms/Hello/BUILD.gn
new file mode 100644
index 0000000..9240193
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/Transforms/Hello/BUILD.gn
@@ -0,0 +1,22 @@
+assert(host_os != "win", "loadable modules not supported on win")
+
+loadable_module("Hello") {
+  output_name = "LLVMHello"
+  deps = [
+    # LLVMHello doesn't want to link in any LLVM code, it just
+    # needs its headers.
+    "//llvm/include/llvm/IR:public_tablegen",
+  ]
+  sources = [
+    "Hello.cpp",
+  ]
+
+  if (host_os == "linux") {
+    # The GN build currently doesn't globally pass -fPIC, but that's
+    # needed for building .so files on Linux.  Just pass it manually
+    # for loadable_modules for now.
+    cflags = [ "-fPIC" ]
+  }
+
+  # FIXME: Use Hello.exports to remove all exports.
+}
diff --git a/utils/gn/secondary/llvm/lib/Transforms/Utils/BUILD.gn b/utils/gn/secondary/llvm/lib/Transforms/Utils/BUILD.gn
index 843dd78..eee9b80 100644
--- a/utils/gn/secondary/llvm/lib/Transforms/Utils/BUILD.gn
+++ b/utils/gn/secondary/llvm/lib/Transforms/Utils/BUILD.gn
@@ -13,6 +13,7 @@
     "BuildLibCalls.cpp",
     "BypassSlowDivision.cpp",
     "CallPromotionUtils.cpp",
+    "CanonicalizeAliases.cpp",
     "CloneFunction.cpp",
     "CloneModule.cpp",
     "CodeExtractor.cpp",
diff --git a/utils/gn/secondary/llvm/lib/XRay/BUILD.gn b/utils/gn/secondary/llvm/lib/XRay/BUILD.gn
new file mode 100644
index 0000000..c7907e3
--- /dev/null
+++ b/utils/gn/secondary/llvm/lib/XRay/BUILD.gn
@@ -0,0 +1,23 @@
+static_library("XRay") {
+  output_name = "LLVMXRay"
+  deps = [
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "BlockIndexer.cpp",
+    "BlockPrinter.cpp",
+    "BlockVerifier.cpp",
+    "FDRRecordProducer.cpp",
+    "FDRRecords.cpp",
+    "FDRTraceExpander.cpp",
+    "FDRTraceWriter.cpp",
+    "FileHeaderReader.cpp",
+    "InstrumentationMap.cpp",
+    "LogBuilderConsumer.cpp",
+    "Profile.cpp",
+    "RecordInitializer.cpp",
+    "RecordPrinter.cpp",
+    "Trace.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/test/BUILD.gn b/utils/gn/secondary/llvm/test/BUILD.gn
new file mode 100644
index 0000000..b6f7574
--- /dev/null
+++ b/utils/gn/secondary/llvm/test/BUILD.gn
@@ -0,0 +1,288 @@
+import("//llvm/lib/DebugInfo/PDB/enable_dia.gni")
+import("//llvm/lib/Target/targets_string.gni")
+import("//llvm/triples.gni")
+import("//llvm/utils/gn/build/buildflags.gni")
+import("//llvm/utils/gn/build/libs/pthread/enable.gni")
+import("//llvm/utils/gn/build/libs/xar/enable.gni")
+import("//llvm/utils/gn/build/libs/xml/enable.gni")
+import("//llvm/utils/gn/build/libs/zlib/enable.gni")
+import("//llvm/utils/gn/build/write_cmake_config.gni")
+import("llvm_lit_site_cfg_files.gni")
+
+template("write_lit_config") {
+  write_cmake_config(target_name) {
+    input = invoker.input
+    output = invoker.output
+    values = [
+      "LIT_SITE_CFG_IN_HEADER=## Autogenerated from $input, do not edit",
+      "ENABLE_SHARED=0",
+      "LLVM_BINARY_DIR=" +
+          rebase_path(get_label_info("//llvm", "target_out_dir")),
+      "LLVM_SOURCE_DIR=" + rebase_path("//llvm"),
+      "LLVM_TOOLS_DIR=" + rebase_path("$root_out_dir/bin"),
+    ]
+    if (host_os == "win") {
+      # See comment for Windows solink in llvm/utils/gn/build/toolchain/BUILD.gn
+      values += [ "SHLIBDIR=" + rebase_path("$root_out_dir/bin") ]
+    } else {
+      values += [ "SHLIBDIR=" + rebase_path("$root_out_dir/lib") ]
+    }
+    values += invoker.extra_values
+  }
+}
+
+write_lit_config("lit_site_cfg") {
+  input = "//llvm/test/lit.site.cfg.py.in"
+  output = llvm_lit_site_cfg_file
+  extra_values = [
+    "BUILD_SHARED_LIBS=0",
+
+    # Only used by the Go bindings tests, or if LLVM_USE_SANITIZER includes
+    # asan and the host OS is macOS. The GN build currently never uses
+    # LLVM_USE_SANITIZER.  (See also CMAKE_CXX_COMPILER in clang/test/BUILD.gn.)
+    "HOST_CC=cc",
+
+    # Only used by the Go bindings tests, or if LLVM_USE_SANITIZER includes
+    # lsan and the host OS is macOS. The GN build currently never uses
+    # LLVM_USE_SANITIZER.  (See also CMAKE_CXX_COMPILER in clang/test/BUILD.gn.)
+    "HOST_CXX=c++",
+
+    # Only used by the Go bindings tests, and for detecting a 32-bit build
+    # and in a belt-and-suspenders check for detecting 32-bit host builds.
+    # (That check also checks LLVM_HOST_TRIPLE.)
+    "HOST_LDFLAGS=",
+
+    "LLVM_ENABLE_FFI=0",
+    "LLVM_HAVE_OPT_VIEWER_MODULES=0",
+    "LLVM_HOST_TRIPLE=$llvm_current_triple",
+    "LLVM_LIBRARY_DIR=" + rebase_path("$root_out_dir/lib"),
+    "LLVM_LINK_LLVM_DYLIB=0",
+    "LLVM_LIT_TOOLS_DIR=",  # Intentionally empty, matches cmake build.
+    "LLVM_NATIVE_ARCH=$native_target",
+    "LLVM_TOOL_LTO_BUILD=1",  # The GN build always builds //llvm/tools/lto.
+    "LLVM_USE_INTEL_JITEVENTS=0",
+    "LLVM_USE_SANITIZER=",
+    "PYTHON_EXECUTABLE=$python_path",
+    "TARGETS_TO_BUILD=$llvm_targets_to_build_string",
+    "TARGET_TRIPLE=$llvm_target_triple",
+
+    # No bindings are implemented in the GN build.
+    "LLVM_BINDINGS=",
+
+    "GO_EXECUTABLE=",
+    "LLVM_INCLUDE_GO_TESTS=0",
+
+    "HAVE_OCAMLOPT=0",
+    "HAVE_OCAML_OUNIT=0",
+    "OCAMLFIND=OCAMLFIND-NOTFOUND",
+    "OCAMLFLAGS=",
+  ]
+
+  if (host_cpu == "x64") {
+    extra_values += [ "HOST_ARCH=x86_64" ]
+  } else {
+    assert(false, "unimplemented host_cpu " + host_cpu)
+  }
+
+  if (host_os == "mac") {
+    extra_values += [
+      "EXEEXT=",
+      "HOST_OS=Darwin",
+      "SHLIBEXT=.dylib",
+    ]
+  } else if (host_os == "linux") {
+    extra_values += [
+      "EXEEXT=",
+      "HOST_OS=Linux",
+      "SHLIBEXT=.so",
+    ]
+  } else if (host_os == "win") {
+    extra_values += [
+      "EXEEXT=.exe",
+      "HOST_OS=Windows",
+      "SHLIBEXT=.dll",
+    ]
+  } else {
+    assert(false, "unsupported host_os " + host_os)
+  }
+  if (host_os == "linux") {
+    # lit.cfg.py's have_ld_plugin_support() checks for "gold" in --version,
+    # so just claim that ld is gold on Linux.  The function also checks if
+    # LLVMgold.so exists, but since that target isn't hooked up yet in the GN
+    # build the LLVMgold.so tests currently don't run anywhere in the GN build.
+    extra_values += [ "GOLD_EXECUTABLE=ld" ]
+  } else {
+    extra_values += [ "GOLD_EXECUTABLE=" ]
+  }
+  if (host_os == "mac") {
+    extra_values += [ "LD64_EXECUTABLE=ld" ]
+  } else {
+    extra_values += [ "LD64_EXECUTABLE=" ]
+  }
+
+  if (llvm_enable_assertions) {
+    extra_values += [ "ENABLE_ASSERTIONS=1" ]
+  } else {
+    extra_values += [ "ENABLE_ASSERTIONS=0" ]  # Must be 0.
+  }
+
+  if (llvm_enable_libxar) {
+    extra_values += [ "HAVE_LIBXAR=1" ]
+  } else {
+    extra_values += [ "HAVE_LIBXAR=0" ]  # Must be 0.
+  }
+
+  if (llvm_enable_dia_sdk) {
+    extra_values += [ "LLVM_ENABLE_DIA_SDK=1" ]
+  } else {
+    extra_values += [ "LLVM_ENABLE_DIA_SDK=0" ]  # Must be 0.
+  }
+
+  if (llvm_enable_libxml2) {
+    extra_values += [ "LLVM_LIBXML2_ENABLED=1" ]
+  } else {
+    extra_values += [ "LLVM_LIBXML2_ENABLED=" ]  # Must be empty.
+  }
+
+  if (llvm_enable_threads) {
+    extra_values += [ "LLVM_ENABLE_THREADS=1" ]
+  } else {
+    extra_values += [ "LLVM_ENABLE_THREADS=0" ]  # Must be 0.
+  }
+
+  if (llvm_enable_zlib) {
+    extra_values += [ "HAVE_LIBZ=1" ]
+  } else {
+    extra_values += [ "HAVE_LIBZ=0" ]  # Must be 0.
+  }
+}
+
+write_lit_config("lit_unit_site_cfg") {
+  input = "//llvm/test/Unit/lit.site.cfg.py.in"
+  output = llvm_lit_unit_site_cfg_file
+  extra_values = [ "LLVM_BUILD_MODE=." ]
+}
+
+# This target should contain all dependencies of check-llvm.
+# //:default depends on it, so that ninja's default target builds all
+# prerequisites for check-llvm but doesn't run check-llvm itself.
+group("test") {
+  deps = [
+    ":lit_site_cfg",
+    ":lit_unit_site_cfg",
+
+    # Because llvm/tools/llvm-config/BUILD.gn calls llvm-build to generate
+    # LibraryDependencies.inc, llvm-config expects these libraries to exist
+    # even though nothing but unittests depends on them.  Add explicit
+    # dependencies to make sure the libaries exist on disk when llvm-config's
+    # lit tests run.
+    "//llvm/lib/LineEditor",
+    "//llvm/lib/Testing/Support",
+    "//llvm/tools/bugpoint",
+    "//llvm/tools/dsymutil",
+    "//llvm/tools/llc",
+    "//llvm/tools/lli",
+    "//llvm/tools/lli/ChildTarget:lli-child-target",
+    "//llvm/tools/llvm-ar:symlinks",
+    "//llvm/tools/llvm-as",
+    "//llvm/tools/llvm-bcanalyzer",
+    "//llvm/tools/llvm-c-test",
+    "//llvm/tools/llvm-cat",
+    "//llvm/tools/llvm-cfi-verify",
+    "//llvm/tools/llvm-cov",
+    "//llvm/tools/llvm-cvtres",
+    "//llvm/tools/llvm-cxxdump",
+    "//llvm/tools/llvm-cxxfilt",
+    "//llvm/tools/llvm-cxxmap",
+    "//llvm/tools/llvm-diff",
+    "//llvm/tools/llvm-dis",
+    "//llvm/tools/llvm-dwarfdump",
+    "//llvm/tools/llvm-dwp",
+    "//llvm/tools/llvm-elfabi",
+    "//llvm/tools/llvm-exegesis",
+    "//llvm/tools/llvm-extract",
+    "//llvm/tools/llvm-isel-fuzzer",
+    "//llvm/tools/llvm-link",
+    "//llvm/tools/llvm-lto",
+    "//llvm/tools/llvm-lto2",
+    "//llvm/tools/llvm-mc",
+    "//llvm/tools/llvm-mca",
+    "//llvm/tools/llvm-modextract",
+    "//llvm/tools/llvm-mt",
+    "//llvm/tools/llvm-nm",
+    "//llvm/tools/llvm-objcopy:symlinks",
+    "//llvm/tools/llvm-objdump",
+    "//llvm/tools/llvm-opt-fuzzer",
+    "//llvm/tools/llvm-opt-report",
+    "//llvm/tools/llvm-pdbutil",
+    "//llvm/tools/llvm-profdata",
+    "//llvm/tools/llvm-rc",
+    "//llvm/tools/llvm-readobj:symlinks",
+    "//llvm/tools/llvm-rtdyld",
+    "//llvm/tools/llvm-size",
+    "//llvm/tools/llvm-split",
+    "//llvm/tools/llvm-strings",
+    "//llvm/tools/llvm-symbolizer",
+    "//llvm/tools/llvm-undname",
+    "//llvm/tools/llvm-xray",
+    "//llvm/tools/lto",
+    "//llvm/tools/obj2yaml",
+    "//llvm/tools/opt",
+    "//llvm/tools/sancov",
+    "//llvm/tools/sanstats",
+    "//llvm/tools/verify-uselistorder",
+    "//llvm/tools/yaml2obj",
+    "//llvm/unittests",
+    "//llvm/utils/FileCheck",
+    "//llvm/utils/TableGen:llvm-tblgen",
+    "//llvm/utils/count",
+    "//llvm/utils/not",
+
+    # llvm-config wants libgtest_main.a to exist at runtime when run as in
+    # its tests, but nothing in the tree depends on them.
+    "//llvm/utils/unittest/UnitTestMain:gtest_main",
+    "//llvm/utils/yaml-bench",
+  ]
+  if (host_os != "win") {
+    # loadable_modules don't work on Windows.
+    # FIXME: In the CMake build, ENABLE_SHARED makes them work somehow
+    # (but they're off by default there too).
+    deps += [
+      "//llvm/lib/Transforms/Hello",
+      "//llvm/tools/bugpoint-passes",
+    ]
+  }
+
+  # FIXME: llvm_build_examples
+  testonly = true
+}
+
+action("check-llvm") {
+  script = "$root_out_dir/bin/llvm-lit"
+  if (host_os == "win") {
+    script += ".py"
+  }
+  args = [
+    "-sv",
+    "--param",
+    "llvm_site_config=" + rebase_path(llvm_lit_site_cfg_file, root_out_dir),
+    "--param",
+    "llvm_unit_site_config=" +
+        rebase_path(llvm_lit_unit_site_cfg_file, root_out_dir),
+    rebase_path(".", root_out_dir),
+  ]
+  outputs = [
+    "$target_gen_dir/run-lit",  # Non-existing, so that ninja runs it each time.
+  ]
+
+  # Since check-llvm is always dirty, //:default doesn't depend on it so that
+  # it's not part of the default ninja target.  Hence, check-llvm shouldn't
+  # have any deps except :test. so that the default target is sure to build
+  # all the deps.
+  deps = [
+    ":test",
+  ]
+  testonly = true
+
+  pool = "//:console"
+}
diff --git a/utils/gn/secondary/llvm/test/llvm_lit_site_cfg_files.gni b/utils/gn/secondary/llvm/test/llvm_lit_site_cfg_files.gni
new file mode 100644
index 0000000..b0f5aca
--- /dev/null
+++ b/utils/gn/secondary/llvm/test/llvm_lit_site_cfg_files.gni
@@ -0,0 +1,2 @@
+llvm_lit_site_cfg_file = "$root_gen_dir/llvm/test/lit.site.cfg.py"
+llvm_lit_unit_site_cfg_file = "$root_gen_dir/llvm/test/Unit/lit.site.cfg.py"
diff --git a/utils/gn/secondary/llvm/tools/binutils_symlinks.gni b/utils/gn/secondary/llvm/tools/binutils_symlinks.gni
new file mode 100644
index 0000000..3149775
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/binutils_symlinks.gni
@@ -0,0 +1,5 @@
+declare_args() {
+  # If set, creates symlinks for nm, objdump, readelf in the build
+  # directory.
+  llvm_install_binutils_symlinks = false
+}
diff --git a/utils/gn/secondary/llvm/tools/bugpoint-passes/BUILD.gn b/utils/gn/secondary/llvm/tools/bugpoint-passes/BUILD.gn
new file mode 100644
index 0000000..385c469
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/bugpoint-passes/BUILD.gn
@@ -0,0 +1,22 @@
+assert(host_os != "win", "loadable modules not supported on win")
+
+loadable_module("bugpoint-passes") {
+  output_name = "BugpointPasses"
+  deps = [
+    # BugpointPasses doesn't want to link in any LLVM code, it just
+    # needs its headers.
+    "//llvm/include/llvm/IR:public_tablegen",
+  ]
+  sources = [
+    "TestPasses.cpp",
+  ]
+
+  if (host_os == "linux") {
+    # The GN build currently doesn't globally pass -fPIC, but that's
+    # needed for building .so files on Linux.  Just pass it manually
+    # for loadable_modules for now.
+    cflags = [ "-fPIC" ]
+  }
+
+  # FIXME: Use bugpoint.exports to remove all exports.
+}
diff --git a/utils/gn/secondary/llvm/tools/bugpoint/BUILD.gn b/utils/gn/secondary/llvm/tools/bugpoint/BUILD.gn
new file mode 100644
index 0000000..84a4908
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/bugpoint/BUILD.gn
@@ -0,0 +1,41 @@
+executable("bugpoint") {
+  deps = [
+    "//llvm/include/llvm/Config:config",
+    "//llvm/include/llvm/Config:llvm-config",
+    "//llvm/lib/Analysis",
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/IR",
+    "//llvm/lib/IRReader",
+    "//llvm/lib/Linker",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:TargetsToBuild",
+    "//llvm/lib/Transforms/AggressiveInstCombine",
+    "//llvm/lib/Transforms/IPO",
+    "//llvm/lib/Transforms/Instrumentation",
+    "//llvm/lib/Transforms/ObjCARC",
+    "//llvm/lib/Transforms/Scalar",
+    "//llvm/lib/Transforms/Utils",
+    "//llvm/lib/Transforms/Vectorize",
+  ]
+  sources = [
+    "BugDriver.cpp",
+    "CrashDebugger.cpp",
+    "ExecutionDriver.cpp",
+    "ExtractFunction.cpp",
+    "FindBugs.cpp",
+    "Miscompilation.cpp",
+    "OptimizerDriver.cpp",
+    "ToolRunner.cpp",
+    "bugpoint.cpp",
+  ]
+
+  # Support plugins.
+  # FIXME: Disable dead stripping once other binaries are dead-stripped.
+  if (host_os == "linux") {
+    # Make sure bugpoint plugins can access bugpoint's symbols.
+    # Corresponds to export_executable_symbols() in cmake.
+    ldflags = [ "-rdynamic" ]
+  }
+}
diff --git a/utils/gn/secondary/llvm/tools/dsymutil/BUILD.gn b/utils/gn/secondary/llvm/tools/dsymutil/BUILD.gn
new file mode 100644
index 0000000..721b4c4
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/dsymutil/BUILD.gn
@@ -0,0 +1,28 @@
+executable("dsymutil") {
+  deps = [
+    "//llvm/lib/CodeGen/AsmPrinter",
+    "//llvm/lib/DebugInfo/DWARF",
+    "//llvm/lib/MC",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "BinaryHolder.cpp",
+    "CFBundle.cpp",
+    "CompileUnit.cpp",
+    "DebugMap.cpp",
+    "DeclContext.cpp",
+    "DwarfLinker.cpp",
+    "DwarfStreamer.cpp",
+    "MachODebugMapParser.cpp",
+    "MachOUtils.cpp",
+    "NonRelocatableStringpool.cpp",
+    "SymbolMap.cpp",
+    "dsymutil.cpp",
+  ]
+  if (host_os == "mac") {
+    libs = [ "CoreFoundation.framework" ]
+  }
+}
diff --git a/utils/gn/secondary/llvm/tools/llc/BUILD.gn b/utils/gn/secondary/llvm/tools/llc/BUILD.gn
index c9dddd5..45cde4a 100644
--- a/utils/gn/secondary/llvm/tools/llc/BUILD.gn
+++ b/utils/gn/secondary/llvm/tools/llc/BUILD.gn
@@ -20,7 +20,7 @@
   ]
 
   # Support plugins.
-  # FIXME: Disable dead stripping once other binaries are dead-stripped
+  # FIXME: Disable dead stripping once other binaries are dead-stripped.
   if (host_os == "linux") {
     # Corresponds to export_executable_symbols() in cmake.
     ldflags = [ "-rdynamic" ]
diff --git a/utils/gn/secondary/llvm/tools/lli/BUILD.gn b/utils/gn/secondary/llvm/tools/lli/BUILD.gn
new file mode 100644
index 0000000..0e17358
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/lli/BUILD.gn
@@ -0,0 +1,29 @@
+executable("lli") {
+  deps = [
+    "//llvm/include/llvm/Config:llvm-config",
+    "//llvm/lib/AsmParser",
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/CodeGen/SelectionDAG",
+    "//llvm/lib/ExecutionEngine",
+    "//llvm/lib/ExecutionEngine/Interpreter",
+    "//llvm/lib/ExecutionEngine/MCJIT",
+    "//llvm/lib/ExecutionEngine/Orc",
+    "//llvm/lib/ExecutionEngine/RuntimeDyld",
+    "//llvm/lib/IR",
+    "//llvm/lib/IRReader",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:NativeTarget",
+    "//llvm/lib/Transforms/Instrumentation",
+    "//llvm/lib/Transforms/Utils",
+  ]
+  sources = [
+    "lli.cpp",
+  ]
+  if (host_os == "linux") {
+    # Corresponds to export_executable_symbols() in cmake.
+    ldflags = [ "-rdynamic" ]
+  }
+}
diff --git a/utils/gn/secondary/llvm/tools/lli/ChildTarget/BUILD.gn b/utils/gn/secondary/llvm/tools/lli/ChildTarget/BUILD.gn
new file mode 100644
index 0000000..6203b2a
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/lli/ChildTarget/BUILD.gn
@@ -0,0 +1,10 @@
+executable("lli-child-target") {
+  deps = [
+    "//llvm/lib/ExecutionEngine/Orc",
+    "//llvm/lib/ExecutionEngine/RuntimeDyld",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ChildTarget.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-ar/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-ar/BUILD.gn
new file mode 100644
index 0000000..969fbed
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-ar/BUILD.gn
@@ -0,0 +1,46 @@
+import("//llvm/tools/binutils_symlinks.gni")
+import("//llvm/utils/gn/build/symlink_or_copy.gni")
+
+symlinks = [
+  "llvm-dlltool",
+  "llvm-lib",
+  "llvm-ranlib",
+]
+if (llvm_install_binutils_symlinks) {
+  symlinks += [
+    "ar",
+    "dlltool",
+    "ranlib",
+  ]
+}
+foreach(target, symlinks) {
+  symlink_or_copy(target) {
+    deps = [
+      ":llvm-ar",
+    ]
+    source = "llvm-ar"
+    output = "$root_out_dir/bin/$target"
+  }
+}
+
+# //:llvm-ar depends on this symlink target, see comment in //BUILD.gn.
+group("symlinks") {
+  deps = []
+  foreach(target, symlinks) {
+    deps += [ ":$target" ]
+  }
+}
+
+executable("llvm-ar") {
+  deps = [
+    "//llvm/lib/IR",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:TargetsToBuild",
+    "//llvm/lib/ToolDrivers/llvm-dlltool:DlltoolDriver",
+    "//llvm/lib/ToolDrivers/llvm-lib:LibDriver",
+  ]
+  sources = [
+    "llvm-ar.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-as/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-as/BUILD.gn
new file mode 100644
index 0000000..914cb9a
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-as/BUILD.gn
@@ -0,0 +1,11 @@
+executable("llvm-as") {
+  deps = [
+    "//llvm/lib/AsmParser",
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "llvm-as.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-bcanalyzer/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-bcanalyzer/BUILD.gn
new file mode 100644
index 0000000..a5d7581
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-bcanalyzer/BUILD.gn
@@ -0,0 +1,9 @@
+executable("llvm-bcanalyzer") {
+  deps = [
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "llvm-bcanalyzer.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-c-test/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-c-test/BUILD.gn
new file mode 100644
index 0000000..1bbe64d
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-c-test/BUILD.gn
@@ -0,0 +1,32 @@
+executable("llvm-c-test") {
+  deps = [
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC/MCDisassembler",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  if (host_os != "win") {
+    cflags_c = [
+      "-std=gnu99",
+      "-Wstrict-prototypes",
+    ]
+  }
+  sources = [
+    "attributes.c",
+    "calc.c",
+    "debuginfo.c",
+    "diagnostic.c",
+    "disassemble.c",
+    "echo.cpp",
+    "helpers.c",
+    "include-all.c",
+    "main.c",
+    "metadata.c",
+    "module.c",
+    "object.c",
+    "targets.c",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-cat/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-cat/BUILD.gn
new file mode 100644
index 0000000..29c7176
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-cat/BUILD.gn
@@ -0,0 +1,12 @@
+executable("llvm-cat") {
+  deps = [
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/IR",
+    "//llvm/lib/IRReader",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "llvm-cat.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-cfi-verify/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-cfi-verify/BUILD.gn
new file mode 100644
index 0000000..e5779ae
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-cfi-verify/BUILD.gn
@@ -0,0 +1,18 @@
+executable("llvm-cfi-verify") {
+  deps = [
+    "lib",
+    "//llvm/lib/DebugInfo/Symbolize",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:AllTargetsAsmParsers",
+    "//llvm/lib/Target:AllTargetsAsmPrinters",
+    "//llvm/lib/Target:AllTargetsDescs",
+    "//llvm/lib/Target:AllTargetsDisassemblers",
+    "//llvm/lib/Target:AllTargetsInfos",
+  ]
+  sources = [
+    "llvm-cfi-verify.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-cfi-verify/lib/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-cfi-verify/lib/BUILD.gn
new file mode 100644
index 0000000..4a1d61c
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-cfi-verify/lib/BUILD.gn
@@ -0,0 +1,18 @@
+static_library("lib") {
+  output_name = "LLVMCFIVerify"
+  deps = [
+    "//llvm/lib/DebugInfo/DWARF",
+    "//llvm/lib/DebugInfo/Symbolize",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCDisassembler",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "FileAnalysis.cpp",
+    "FileAnalysis.h",
+    "GraphBuilder.cpp",
+    "GraphBuilder.h",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-config/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-config/BUILD.gn
new file mode 100644
index 0000000..2f662e0
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-config/BUILD.gn
@@ -0,0 +1,123 @@
+import("//llvm/lib/Target/targets_string.gni")
+import("//llvm/utils/gn/build/buildflags.gni")
+import("//llvm/utils/gn/build/libs/pthread/enable.gni")
+import("//llvm/utils/gn/build/libs/terminfo/enable.gni")
+import("//llvm/utils/gn/build/libs/xml/enable.gni")
+import("//llvm/utils/gn/build/libs/zlib/enable.gni")
+import("//llvm/utils/gn/build/write_cmake_config.gni")
+import("//llvm/version.gni")
+
+write_cmake_config("BuildVariables.inc") {
+  input = "BuildVariables.inc.in"
+  output = "$target_gen_dir/BuildVariables.inc"
+
+  if (is_debug) {
+    build_mode = "debug"
+  } else {
+    build_mode = "release"
+  }
+
+  # FIXME: Why doesn't llvm-config do this, why is this done in
+  # llvm-config/CMakeLists.txt?
+  if (host_os == "win") {
+    l = ""
+    lib = ".lib"
+  } else {
+    l = "-l"
+    lib = ""
+  }
+
+  # Windows doesn't use any of libxml2,terminfo, zlib by default.
+  # Make GN not warn about these variables being unused.
+  not_needed([
+               "l",
+               "lib",
+             ])
+
+  system_libs = ""
+  if (host_os == "win") {
+    # libuuid required for FOLDERID_Profile usage in
+    # lib/Support/Windows/Path.inc.
+    # advapi32 required for CryptAcquireContextW in
+    # lib/Support/Windows/Path.inc
+    system_libs = "psapi.lib shell32.lib ole32.lib uuid.lib advapi32"
+  } else {
+    system_libs += "-lm"
+    if (host_os == "linux") {
+      system_libs += " -lrt -ldl"
+    }
+    if (llvm_enable_threads) {
+      system_libs += " -llibpthreads"
+      if (host_os == "linux") {
+        system_libs += " -latomic"
+      }
+    }
+  }
+  if (llvm_enable_libxml2) {
+    system_libs += " ${l}xml2${lib}"
+  }
+  if (llvm_enable_terminfo) {
+    system_libs += " ${l}ncurses${lib}"
+  }
+  if (llvm_enable_zlib) {
+    system_libs += " ${l}z${lib}"
+  }
+
+  values = [
+    "LLVM_SRC_ROOT=" + rebase_path("//llvm"),
+    "LLVM_OBJ_ROOT=" + rebase_path(root_out_dir),
+
+    # FIXME: Only the bits needed to run LLVM's test are implemented.
+    "LLVM_CPPFLAGS=.",  # FIXME
+    "LLVM_CFLAGS=.",  # FIXME
+    "LLVM_LDFLAGS=.",  # FIXME
+    "LLVM_CXXFLAGS=.",  # FIXME
+    "LLVM_BUILDMODE=$build_mode",
+    "LLVM_LIBDIR_SUFFIX=",
+    "LLVM_TARGETS_BUILT=$llvm_targets_to_build_string",
+    "LLVM_SYSTEM_LIBS=$system_libs",
+    "LLVM_BUILD_SYSTEM=gn",
+    "LLVM_HAS_RTTI=0",
+    "LLVM_BUILD_LLVM_DYLIB=0",
+    "LLVM_LINK_LLVM_DYLIB=0",
+    "BUILD_SHARED_LIBS=0",
+    "LLVM_DYLIB_COMPONENTS=all",
+    "LLVM_DYLIB_VERSION=${llvm_version_major}svn",
+    "LLVM_HAS_GLOBAL_ISEL=1",
+    "LLVM_TOOLS_INSTALL_DIR=",
+  ]
+}
+
+# FIXME: It'd be nice to not depend on llvm-build on this, Depending on all the
+# LLVMBuild.txt files just for this seems a bit overkill.  `gn desc` should
+# have all this information too and could be called at build time.
+# When this is removed, update llvm/test/BUILD.gn to no longer have unnecessary
+# deps on a couple llvm/lib/ targets.
+action("LibraryDependencies.inc") {
+  script = "//llvm/utils/llvm-build/llvm-build"
+  output = "$target_gen_dir/LibraryDependencies.inc"
+  args = [
+    "--native-target=$native_target",
+    "--enable-targets=$llvm_targets_to_build_string",
+    "--write-library-table=" + rebase_path(output, root_out_dir),
+  ]
+  outputs = [
+    output,
+  ]
+}
+
+executable("llvm-config") {
+  deps = [
+    ":BuildVariables.inc",
+    ":LibraryDependencies.inc",
+    "//llvm/include/llvm/Config:config",
+    "//llvm/include/llvm/Config:llvm-config",
+    "//llvm/lib/Support",
+  ]
+
+  # To pick up the generated inc files.
+  include_dirs = [ "$target_gen_dir" ]
+  sources = [
+    "llvm-config.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-cov/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-cov/BUILD.gn
new file mode 100644
index 0000000..0d4e766
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-cov/BUILD.gn
@@ -0,0 +1,24 @@
+executable("llvm-cov") {
+  deps = [
+    "//llvm/include/llvm/Config:llvm-config",
+    "//llvm/lib/IR",
+    "//llvm/lib/Object",
+    "//llvm/lib/ProfileData",
+    "//llvm/lib/ProfileData/Coverage",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "CodeCoverage.cpp",
+    "CoverageExporterJson.cpp",
+    "CoverageExporterLcov.cpp",
+    "CoverageFilters.cpp",
+    "CoverageReport.cpp",
+    "CoverageSummaryInfo.cpp",
+    "SourceCoverageView.cpp",
+    "SourceCoverageViewHTML.cpp",
+    "SourceCoverageViewText.cpp",
+    "TestingSupport.cpp",
+    "gcov.cpp",
+    "llvm-cov.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-cvtres/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-cvtres/BUILD.gn
new file mode 100644
index 0000000..77d410d
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-cvtres/BUILD.gn
@@ -0,0 +1,18 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("Opts") {
+  visibility = [ ":llvm-cvtres" ]
+  args = [ "-gen-opt-parser-defs" ]
+}
+
+executable("llvm-cvtres") {
+  deps = [
+    ":Opts",
+    "//llvm/lib/Object",
+    "//llvm/lib/Option",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "llvm-cvtres.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-cxxdump/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-cxxdump/BUILD.gn
new file mode 100644
index 0000000..c146958
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-cxxdump/BUILD.gn
@@ -0,0 +1,11 @@
+executable("llvm-cxxdump") {
+  deps = [
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "Error.cpp",
+    "llvm-cxxdump.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-cxxfilt/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-cxxfilt/BUILD.gn
new file mode 100644
index 0000000..37e7ebd
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-cxxfilt/BUILD.gn
@@ -0,0 +1,32 @@
+import("//llvm/tools/binutils_symlinks.gni")
+import("//llvm/utils/gn/build/symlink_or_copy.gni")
+
+if (llvm_install_binutils_symlinks) {
+  symlink_or_copy("cxxfilt") {  # Can't have '+' in target name.
+    deps = [
+      ":llvm-cxxfilt",
+    ]
+    source = "llvm-cxxfilt"
+    output = "$root_out_dir/bin/c++filt"  # Note: c++filt, not cxxfilt
+  }
+}
+
+# //:llvm-cxxfilt depends on this symlink target, see comment in //BUILD.gn.
+group("symlinks") {
+  deps = [
+    ":llvm-cxxfilt",
+  ]
+  if (llvm_install_binutils_symlinks) {
+    deps += [ ":cxxfilt" ]
+  }
+}
+
+executable("llvm-cxxfilt") {
+  deps = [
+    "//llvm/lib/Demangle",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "llvm-cxxfilt.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-cxxmap/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-cxxmap/BUILD.gn
new file mode 100644
index 0000000..7cee418
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-cxxmap/BUILD.gn
@@ -0,0 +1,10 @@
+executable("llvm-cxxmap") {
+  deps = [
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "llvm-cxxmap.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-diff/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-diff/BUILD.gn
new file mode 100644
index 0000000..5f46d8c
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-diff/BUILD.gn
@@ -0,0 +1,13 @@
+executable("llvm-diff") {
+  deps = [
+    "//llvm/lib/IR",
+    "//llvm/lib/IRReader",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "DiffConsumer.cpp",
+    "DiffLog.cpp",
+    "DifferenceEngine.cpp",
+    "llvm-diff.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-dis/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-dis/BUILD.gn
new file mode 100644
index 0000000..c0c0c49
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-dis/BUILD.gn
@@ -0,0 +1,10 @@
+executable("llvm-dis") {
+  deps = [
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "llvm-dis.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-dwarfdump/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-dwarfdump/BUILD.gn
new file mode 100644
index 0000000..b99086f
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-dwarfdump/BUILD.gn
@@ -0,0 +1,14 @@
+executable("llvm-dwarfdump") {
+  deps = [
+    "//llvm/lib/DebugInfo/DWARF",
+    "//llvm/lib/MC",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:AllTargetsDescs",
+    "//llvm/lib/Target:AllTargetsInfos",
+  ]
+  sources = [
+    "Statistics.cpp",
+    "llvm-dwarfdump.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-dwp/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-dwp/BUILD.gn
new file mode 100644
index 0000000..efb5b82
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-dwp/BUILD.gn
@@ -0,0 +1,38 @@
+import("//llvm/tools/binutils_symlinks.gni")
+import("//llvm/utils/gn/build/symlink_or_copy.gni")
+
+if (llvm_install_binutils_symlinks) {
+  symlink_or_copy("dwp") {
+    deps = [
+      ":llvm-dwp",
+    ]
+    source = "llvm-dwp"
+    output = "$root_out_dir/bin/dwp"
+  }
+}
+
+# //:llvm-dwp depends on this symlink target, see comment in //BUILD.gn.
+group("symlinks") {
+  deps = [
+    ":llvm-dwp",
+  ]
+  if (llvm_install_binutils_symlinks) {
+    deps += [ ":dwp" ]
+  }
+}
+
+executable("llvm-dwp") {
+  deps = [
+    "//llvm/lib/CodeGen/AsmPrinter",
+    "//llvm/lib/DebugInfo/DWARF",
+    "//llvm/lib/MC",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "DWPError.cpp",
+    "llvm-dwp.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-elfabi/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-elfabi/BUILD.gn
new file mode 100644
index 0000000..dd12e20
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-elfabi/BUILD.gn
@@ -0,0 +1,12 @@
+executable("llvm-elfabi") {
+  deps = [
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/TextAPI",
+  ]
+  sources = [
+    "ELFObjHandler.cpp",
+    "ErrorCollector.cpp",
+    "llvm-elfabi.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-exegesis/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-exegesis/BUILD.gn
new file mode 100644
index 0000000..7b81b7e
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-exegesis/BUILD.gn
@@ -0,0 +1,15 @@
+executable("llvm-exegesis") {
+  deps = [
+    "lib",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/ExecutionEngine",
+    "//llvm/lib/ExecutionEngine/MCJIT",
+    "//llvm/lib/MC",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:NativeTarget",
+  ]
+  sources = [
+    "llvm-exegesis.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-exegesis/lib/AArch64/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-exegesis/lib/AArch64/BUILD.gn
new file mode 100644
index 0000000..ef280c6
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-exegesis/lib/AArch64/BUILD.gn
@@ -0,0 +1,22 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("AArch64GenExegesis") {
+  args = [ "-gen-exegesis" ]
+  td_file = "//llvm/lib/Target/AArch64/AArch64.td"
+}
+
+static_library("AArch64") {
+  output_name = "LLVMExegesisAArch64"
+  deps = [
+    ":AArch64GenExegesis",
+
+    # Exegesis reaches inside the Target/AArch64 tablegen internals and must
+    # depend on these Target/AArch64-internal build targets.
+    "//llvm/lib/Target/AArch64/MCTargetDesc",
+    "//llvm/lib/Target/AArch64/Utils",
+  ]
+  sources = [
+    "Target.cpp",
+  ]
+  include_dirs = [ "//llvm/lib/Target/AArch64" ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-exegesis/lib/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-exegesis/lib/BUILD.gn
new file mode 100644
index 0000000..ef4eea6
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-exegesis/lib/BUILD.gn
@@ -0,0 +1,44 @@
+import("//llvm/lib/Target/targets.gni")
+
+static_library("lib") {
+  output_name = "LLVMExegesis"
+  deps = [
+    "//llvm/lib/Analysis",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/CodeGen/GlobalISel",
+    "//llvm/lib/ExecutionEngine",
+    "//llvm/lib/ExecutionEngine/MCJIT",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/Object",
+    "//llvm/lib/ObjectYAML",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "Analysis.cpp",
+    "Assembler.cpp",
+    "BenchmarkResult.cpp",
+    "BenchmarkRunner.cpp",
+    "Clustering.cpp",
+    "CodeTemplate.cpp",
+    "Latency.cpp",
+    "LlvmState.cpp",
+    "MCInstrDescView.cpp",
+    "PerfHelper.cpp",
+    "RegisterAliasing.cpp",
+    "RegisterValue.cpp",
+    "SnippetGenerator.cpp",
+    "Target.cpp",
+    "Uops.cpp",
+  ]
+
+  if (llvm_build_AArch64) {
+    deps += [ "AArch64" ]
+  }
+  if (llvm_build_PowerPC) {
+    deps += [ "PowerPC" ]
+  }
+  if (llvm_build_X86) {
+    deps += [ "X86" ]
+  }
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-exegesis/lib/PowerPC/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-exegesis/lib/PowerPC/BUILD.gn
new file mode 100644
index 0000000..4c4af42
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-exegesis/lib/PowerPC/BUILD.gn
@@ -0,0 +1,21 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("PPCGenExegesis") {
+  args = [ "-gen-exegesis" ]
+  td_file = "//llvm/lib/Target/PowerPC/PPC.td"
+}
+
+static_library("PowerPC") {
+  output_name = "LLVMExegesisPowerPC"
+  deps = [
+    ":PPCGenExegesis",
+
+    # Exegesis reaches inside the Target/PowerPC tablegen internals and must
+    # depend on these Target/PowerPC-internal build targets.
+    "//llvm/lib/Target/PowerPC/MCTargetDesc",
+  ]
+  sources = [
+    "Target.cpp",
+  ]
+  include_dirs = [ "//llvm/lib/Target/PowerPC" ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-exegesis/lib/X86/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-exegesis/lib/X86/BUILD.gn
new file mode 100644
index 0000000..a1cb577
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-exegesis/lib/X86/BUILD.gn
@@ -0,0 +1,21 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("X86GenExegesis") {
+  args = [ "-gen-exegesis" ]
+  td_file = "//llvm/lib/Target/X86/X86.td"
+}
+
+static_library("X86") {
+  output_name = "LLVMExegesisX86"
+  deps = [
+    ":X86GenExegesis",
+
+    # Exegesis reaches inside the Target/X86 tablegen internals and must
+    # depend on this Target/X86-internal build target.
+    "//llvm/lib/Target/X86/MCTargetDesc",
+  ]
+  sources = [
+    "Target.cpp",
+  ]
+  include_dirs = [ "//llvm/lib/Target/X86" ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-extract/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-extract/BUILD.gn
new file mode 100644
index 0000000..c8699a7
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-extract/BUILD.gn
@@ -0,0 +1,12 @@
+executable("llvm-extract") {
+  deps = [
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/IR",
+    "//llvm/lib/IRReader",
+    "//llvm/lib/Support",
+    "//llvm/lib/Transforms/IPO",
+  ]
+  sources = [
+    "llvm-extract.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-isel-fuzzer/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-isel-fuzzer/BUILD.gn
new file mode 100644
index 0000000..d9ef7fc
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-isel-fuzzer/BUILD.gn
@@ -0,0 +1,24 @@
+import("//llvm/utils/gn/build/fuzzer.gni")
+
+fuzzer("llvm-isel-fuzzer") {
+  deps = [
+    "//llvm/lib/Analysis",
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/CodeGen/AsmPrinter",
+    "//llvm/lib/CodeGen/SelectionDAG",
+    "//llvm/lib/FuzzMutate",
+    "//llvm/lib/IR",
+    "//llvm/lib/IRReader",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:TargetsToBuild",
+    "//llvm/lib/Transforms/Scalar",
+  ]
+  dummy_main = "DummyISelFuzzer.cpp"
+  sources = [
+    "llvm-isel-fuzzer.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-link/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-link/BUILD.gn
new file mode 100644
index 0000000..10d6783
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-link/BUILD.gn
@@ -0,0 +1,16 @@
+executable("llvm-link") {
+  deps = [
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/IR",
+    "//llvm/lib/IRReader",
+    "//llvm/lib/Linker",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Transforms/IPO",
+    "//llvm/lib/Transforms/Utils",
+  ]
+  sources = [
+    "llvm-link.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-lto/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-lto/BUILD.gn
new file mode 100644
index 0000000..a569fd9
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-lto/BUILD.gn
@@ -0,0 +1,17 @@
+executable("llvm-lto") {
+  deps = [
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/IR",
+    "//llvm/lib/IRReader",
+    "//llvm/lib/LTO",
+    "//llvm/lib/MC",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "llvm-lto.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-lto2/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-lto2/BUILD.gn
new file mode 100644
index 0000000..9dfb231
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-lto2/BUILD.gn
@@ -0,0 +1,16 @@
+executable("llvm-lto2") {
+  deps = [
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/IR",
+    "//llvm/lib/LTO",
+    "//llvm/lib/Linker",
+    "//llvm/lib/MC",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "llvm-lto2.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-mc/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-mc/BUILD.gn
new file mode 100644
index 0000000..ddaae80
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-mc/BUILD.gn
@@ -0,0 +1,16 @@
+executable("llvm-mc") {
+  deps = [
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:AllTargetsAsmParsers",
+    "//llvm/lib/Target:AllTargetsAsmPrinters",
+    "//llvm/lib/Target:AllTargetsDescs",
+    "//llvm/lib/Target:AllTargetsDisassemblers",
+    "//llvm/lib/Target:AllTargetsInfos",
+  ]
+  sources = [
+    "Disassembler.cpp",
+    "llvm-mc.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-mca/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-mca/BUILD.gn
new file mode 100644
index 0000000..f4b66eb
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-mca/BUILD.gn
@@ -0,0 +1,32 @@
+executable("llvm-mca") {
+  deps = [
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/MCA",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:AllTargetsAsmParsers",
+    "//llvm/lib/Target:AllTargetsAsmPrinters",
+    "//llvm/lib/Target:AllTargetsDescs",
+    "//llvm/lib/Target:AllTargetsDisassemblers",
+    "//llvm/lib/Target:AllTargetsInfos",
+  ]
+  include_dirs = [
+    ".",
+    "include",
+  ]
+  sources = [
+    "CodeRegion.cpp",
+    "CodeRegionGenerator.cpp",
+    "PipelinePrinter.cpp",
+    "Views/DispatchStatistics.cpp",
+    "Views/InstructionInfoView.cpp",
+    "Views/RegisterFileStatistics.cpp",
+    "Views/ResourcePressureView.cpp",
+    "Views/RetireControlUnitStatistics.cpp",
+    "Views/SchedulerStatistics.cpp",
+    "Views/SummaryView.cpp",
+    "Views/TimelineView.cpp",
+    "Views/View.cpp",
+    "llvm-mca.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-modextract/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-modextract/BUILD.gn
new file mode 100644
index 0000000..a8bf2d3
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-modextract/BUILD.gn
@@ -0,0 +1,12 @@
+executable("llvm-modextract") {
+  deps = [
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/IR",
+    "//llvm/lib/IRReader",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "llvm-modextract.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-mt/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-mt/BUILD.gn
new file mode 100644
index 0000000..7f3ea48
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-mt/BUILD.gn
@@ -0,0 +1,18 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("Opts") {
+  visibility = [ ":llvm-mt" ]
+  args = [ "-gen-opt-parser-defs" ]
+}
+
+executable("llvm-mt") {
+  deps = [
+    ":Opts",
+    "//llvm/lib/Option",
+    "//llvm/lib/Support",
+    "//llvm/lib/WindowsManifest",
+  ]
+  sources = [
+    "llvm-mt.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-nm/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-nm/BUILD.gn
new file mode 100644
index 0000000..51279d9
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-nm/BUILD.gn
@@ -0,0 +1,38 @@
+import("//llvm/tools/binutils_symlinks.gni")
+import("//llvm/utils/gn/build/symlink_or_copy.gni")
+
+if (llvm_install_binutils_symlinks) {
+  symlink_or_copy("nm") {
+    deps = [
+      ":llvm-nm",
+    ]
+    source = "llvm-nm"
+    output = "$root_out_dir/bin/nm"
+  }
+}
+
+# //:llvm-nm depends on this symlink target, see comment in //BUILD.gn.
+group("symlinks") {
+  deps = [
+    ":llvm-nm",
+  ]
+  if (llvm_install_binutils_symlinks) {
+    deps += [ ":nm" ]
+  }
+}
+
+executable("llvm-nm") {
+  deps = [
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/Demangle",
+    "//llvm/lib/IR",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:AllTargetsAsmParsers",
+    "//llvm/lib/Target:AllTargetsDescs",
+    "//llvm/lib/Target:AllTargetsInfos",
+  ]
+  sources = [
+    "llvm-nm.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-objcopy/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-objcopy/BUILD.gn
new file mode 100644
index 0000000..a23d1a3
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-objcopy/BUILD.gn
@@ -0,0 +1,61 @@
+import("//llvm/tools/binutils_symlinks.gni")
+import("//llvm/utils/TableGen/tablegen.gni")
+import("//llvm/utils/gn/build/symlink_or_copy.gni")
+
+tablegen("ObjcopyOpts") {
+  visibility = [ ":llvm-objcopy" ]
+  args = [ "-gen-opt-parser-defs" ]
+}
+
+tablegen("StripOpts") {
+  visibility = [ ":llvm-objcopy" ]
+  args = [ "-gen-opt-parser-defs" ]
+}
+
+symlinks = [ "llvm-strip" ]
+if (llvm_install_binutils_symlinks) {
+  symlinks += [
+    "objcopy",
+    "strip",
+  ]
+}
+foreach(target, symlinks) {
+  symlink_or_copy(target) {
+    deps = [
+      ":llvm-objcopy",
+    ]
+    source = "llvm-objcopy"
+    output = "$root_out_dir/bin/$target"
+  }
+}
+
+# //:llvm-objcopy depends on this symlink target, see comment in //BUILD.gn.
+group("symlinks") {
+  deps = []
+  foreach(target, symlinks) {
+    deps += [ ":$target" ]
+  }
+}
+
+executable("llvm-objcopy") {
+  deps = [
+    ":ObjcopyOpts",
+    ":StripOpts",
+    "//llvm/lib/MC",
+    "//llvm/lib/Object",
+    "//llvm/lib/Option",
+    "//llvm/lib/Support",
+  ]
+  include_dirs = [ "." ]
+  sources = [
+    "Buffer.cpp",
+    "COFF/COFFObjcopy.cpp",
+    "COFF/Object.cpp",
+    "COFF/Reader.cpp",
+    "COFF/Writer.cpp",
+    "CopyConfig.cpp",
+    "ELF/ELFObjcopy.cpp",
+    "ELF/Object.cpp",
+    "llvm-objcopy.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-objdump/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-objdump/BUILD.gn
new file mode 100644
index 0000000..e567a70
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-objdump/BUILD.gn
@@ -0,0 +1,49 @@
+import("//llvm/tools/binutils_symlinks.gni")
+import("//llvm/utils/gn/build/symlink_or_copy.gni")
+
+if (llvm_install_binutils_symlinks) {
+  symlink_or_copy("objdump") {
+    deps = [
+      ":llvm-objdump",
+    ]
+    source = "llvm-objdump"
+    output = "$root_out_dir/bin/objdump"
+  }
+}
+
+# //:llvm-nm depends on this symlink target, see comment in //BUILD.gn.
+group("symlinks") {
+  deps = [
+    ":llvm-objdump",
+  ]
+  if (llvm_install_binutils_symlinks) {
+    deps += [ ":objdump" ]
+  }
+}
+
+executable("llvm-objdump") {
+  deps = [
+    "//llvm/include/llvm/Config:config",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/DebugInfo/DWARF",
+    "//llvm/lib/DebugInfo/PDB",
+    "//llvm/lib/DebugInfo/Symbolize",
+    "//llvm/lib/Demangle",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCDisassembler",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:AllTargetsAsmPrinters",
+    "//llvm/lib/Target:AllTargetsDescs",
+    "//llvm/lib/Target:AllTargetsDisassemblers",
+    "//llvm/lib/Target:AllTargetsInfos",
+    "//llvm/utils/gn/build/libs/xar",
+  ]
+  sources = [
+    "COFFDump.cpp",
+    "ELFDump.cpp",
+    "MachODump.cpp",
+    "WasmDump.cpp",
+    "llvm-objdump.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-opt-fuzzer/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-opt-fuzzer/BUILD.gn
new file mode 100644
index 0000000..cb95dda
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-opt-fuzzer/BUILD.gn
@@ -0,0 +1,31 @@
+import("//llvm/utils/gn/build/fuzzer.gni")
+
+fuzzer("llvm-opt-fuzzer") {
+  deps = [
+    "//llvm/lib/Analysis",
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/FuzzMutate",
+    "//llvm/lib/IR",
+    "//llvm/lib/IRReader",
+    "//llvm/lib/MC",
+    "//llvm/lib/Passes",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:TargetsToBuild",
+    "//llvm/lib/Transforms/AggressiveInstCombine",
+    "//llvm/lib/Transforms/Coroutines",
+    "//llvm/lib/Transforms/IPO",
+    "//llvm/lib/Transforms/InstCombine",
+    "//llvm/lib/Transforms/Instrumentation",
+    "//llvm/lib/Transforms/ObjCARC",
+    "//llvm/lib/Transforms/Scalar",
+    "//llvm/lib/Transforms/Utils",
+    "//llvm/lib/Transforms/Vectorize",
+  ]
+  dummy_main = "DummyOptFuzzer.cpp"
+  sources = [
+    "llvm-opt-fuzzer.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-opt-report/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-opt-report/BUILD.gn
new file mode 100644
index 0000000..70b2e1b
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-opt-report/BUILD.gn
@@ -0,0 +1,12 @@
+executable("llvm-opt-report") {
+  deps = [
+    "//llvm/lib/Demangle",
+    "//llvm/lib/IR",
+    "//llvm/lib/Object",
+    "//llvm/lib/OptRemarks",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "OptReport.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-pdbutil/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-pdbutil/BUILD.gn
new file mode 100644
index 0000000..b72bd00
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-pdbutil/BUILD.gn
@@ -0,0 +1,35 @@
+executable("llvm-pdbutil") {
+  deps = [
+    "//llvm/lib/BinaryFormat",
+    "//llvm/lib/DebugInfo/CodeView",
+    "//llvm/lib/DebugInfo/MSF",
+    "//llvm/lib/DebugInfo/PDB",
+    "//llvm/lib/Object",
+    "//llvm/lib/ObjectYAML",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "BytesOutputStyle.cpp",
+    "DumpOutputStyle.cpp",
+    "ExplainOutputStyle.cpp",
+    "FormatUtil.cpp",
+    "InputFile.cpp",
+    "LinePrinter.cpp",
+    "MinimalSymbolDumper.cpp",
+    "MinimalTypeDumper.cpp",
+    "PdbYaml.cpp",
+    "PrettyBuiltinDumper.cpp",
+    "PrettyClassDefinitionDumper.cpp",
+    "PrettyClassLayoutGraphicalDumper.cpp",
+    "PrettyCompilandDumper.cpp",
+    "PrettyEnumDumper.cpp",
+    "PrettyExternalSymbolDumper.cpp",
+    "PrettyFunctionDumper.cpp",
+    "PrettyTypeDumper.cpp",
+    "PrettyTypedefDumper.cpp",
+    "PrettyVariableDumper.cpp",
+    "StreamUtil.cpp",
+    "YAMLOutputStyle.cpp",
+    "llvm-pdbutil.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-profdata/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-profdata/BUILD.gn
new file mode 100644
index 0000000..748acfc
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-profdata/BUILD.gn
@@ -0,0 +1,10 @@
+executable("llvm-profdata") {
+  deps = [
+    "//llvm/lib/IR",
+    "//llvm/lib/ProfileData",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "llvm-profdata.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-rc/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-rc/BUILD.gn
new file mode 100644
index 0000000..d47148a
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-rc/BUILD.gn
@@ -0,0 +1,22 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+
+tablegen("Opts") {
+  visibility = [ ":llvm-rc" ]
+  args = [ "-gen-opt-parser-defs" ]
+}
+
+executable("llvm-rc") {
+  deps = [
+    ":Opts",
+    "//llvm/lib/Option",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ResourceFileWriter.cpp",
+    "ResourceScriptCppFilter.cpp",
+    "ResourceScriptParser.cpp",
+    "ResourceScriptStmt.cpp",
+    "ResourceScriptToken.cpp",
+    "llvm-rc.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-readobj/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-readobj/BUILD.gn
new file mode 100644
index 0000000..501d809
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-readobj/BUILD.gn
@@ -0,0 +1,49 @@
+import("//llvm/tools/binutils_symlinks.gni")
+import("//llvm/utils/gn/build/symlink_or_copy.gni")
+
+symlinks = [ "llvm-readelf" ]
+if (llvm_install_binutils_symlinks) {
+  symlinks += [ "readelf" ]
+}
+foreach(target, symlinks) {
+  symlink_or_copy(target) {
+    deps = [
+      ":llvm-readobj",
+    ]
+    source = "llvm-readobj"
+    output = "$root_out_dir/bin/$target"
+  }
+}
+
+# //:llvm-readobj depends on this symlink target, see comment in //BUILD.gn.
+group("symlinks") {
+  deps = []
+  foreach(target, symlinks) {
+    deps += [ ":$target" ]
+  }
+}
+
+executable("llvm-readobj") {
+  deps = [
+    "//llvm/lib/BinaryFormat",
+    "//llvm/lib/DebugInfo/CodeView",
+    "//llvm/lib/DebugInfo/DWARF",
+    "//llvm/lib/DebugInfo/MSF",
+    "//llvm/lib/DebugInfo/PDB",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ARMWinEHPrinter.cpp",
+    "COFFDumper.cpp",
+    "COFFImportDumper.cpp",
+    "ELFDumper.cpp",
+    "Error.cpp",
+    "MachODumper.cpp",
+    "ObjDumper.cpp",
+    "WasmDumper.cpp",
+    "Win64EHDumper.cpp",
+    "WindowsResourceDumper.cpp",
+    "llvm-readobj.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-rtdyld/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-rtdyld/BUILD.gn
new file mode 100644
index 0000000..2c4d8f2
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-rtdyld/BUILD.gn
@@ -0,0 +1,14 @@
+executable("llvm-rtdyld") {
+  deps = [
+    "//llvm/lib/DebugInfo/DWARF",
+    "//llvm/lib/ExecutionEngine",
+    "//llvm/lib/ExecutionEngine/RuntimeDyld",
+    "//llvm/lib/MC",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "llvm-rtdyld.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-size/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-size/BUILD.gn
new file mode 100644
index 0000000..03bc544
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-size/BUILD.gn
@@ -0,0 +1,32 @@
+import("//llvm/tools/binutils_symlinks.gni")
+import("//llvm/utils/gn/build/symlink_or_copy.gni")
+
+if (llvm_install_binutils_symlinks) {
+  symlink_or_copy("size") {
+    deps = [
+      ":llvm-size",
+    ]
+    source = "llvm-size"
+    output = "$root_out_dir/bin/size"
+  }
+}
+
+# //:llvm-size depends on this symlink target, see comment in //BUILD.gn.
+group("symlinks") {
+  deps = [
+    ":llvm-size",
+  ]
+  if (llvm_install_binutils_symlinks) {
+    deps += [ ":size" ]
+  }
+}
+
+executable("llvm-size") {
+  deps = [
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "llvm-size.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-split/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-split/BUILD.gn
new file mode 100644
index 0000000..375dce3
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-split/BUILD.gn
@@ -0,0 +1,12 @@
+executable("llvm-split") {
+  deps = [
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/IR",
+    "//llvm/lib/IRReader",
+    "//llvm/lib/Support",
+    "//llvm/lib/Transforms/Utils",
+  ]
+  sources = [
+    "llvm-split.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-strings/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-strings/BUILD.gn
new file mode 100644
index 0000000..dddf3a2
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-strings/BUILD.gn
@@ -0,0 +1,33 @@
+import("//llvm/tools/binutils_symlinks.gni")
+import("//llvm/utils/gn/build/symlink_or_copy.gni")
+
+if (llvm_install_binutils_symlinks) {
+  symlink_or_copy("strings") {
+    deps = [
+      ":llvm-strings",
+    ]
+    source = "llvm-strings"
+    output = "$root_out_dir/bin/strings"
+  }
+}
+
+# //:llvm-strings depends on this symlink target, see comment in //BUILD.gn.
+group("symlinks") {
+  deps = [
+    ":llvm-strings",
+  ]
+  if (llvm_install_binutils_symlinks) {
+    deps += [ ":strings" ]
+  }
+}
+
+executable("llvm-strings") {
+  deps = [
+    "//llvm/lib/IR",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "llvm-strings.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-symbolizer/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-symbolizer/BUILD.gn
new file mode 100644
index 0000000..497d2c8
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-symbolizer/BUILD.gn
@@ -0,0 +1,36 @@
+import("//llvm/tools/binutils_symlinks.gni")
+import("//llvm/utils/gn/build/symlink_or_copy.gni")
+
+if (llvm_install_binutils_symlinks) {
+  symlink_or_copy("addr2line") {
+    deps = [
+      ":llvm-symbolizer",
+    ]
+    source = "llvm-symbolizer"
+    output = "$root_out_dir/bin/addr2line"
+  }
+}
+
+# //:llvm-symbolizer depends on this symlink target, see comment in //BUILD.gn.
+group("symlinks") {
+  deps = [
+    ":llvm-symbolizer",
+  ]
+  if (llvm_install_binutils_symlinks) {
+    deps += [ ":addr2line" ]
+  }
+}
+
+executable("llvm-symbolizer") {
+  deps = [
+    "//llvm/lib/DebugInfo/DWARF",
+    "//llvm/lib/DebugInfo/PDB",
+    "//llvm/lib/DebugInfo/Symbolize",
+    "//llvm/lib/Demangle",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "llvm-symbolizer.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/llvm-xray/BUILD.gn b/utils/gn/secondary/llvm/tools/llvm-xray/BUILD.gn
new file mode 100644
index 0000000..89128bd
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/llvm-xray/BUILD.gn
@@ -0,0 +1,23 @@
+executable("llvm-xray") {
+  deps = [
+    "//llvm/lib/DebugInfo/DWARF",
+    "//llvm/lib/DebugInfo/Symbolize",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:TargetsToBuild",
+    "//llvm/lib/XRay",
+  ]
+  sources = [
+    "func-id-helper.cpp",
+    "llvm-xray.cpp",
+    "xray-account.cpp",
+    "xray-color-helper.cpp",
+    "xray-converter.cpp",
+    "xray-extract.cpp",
+    "xray-fdr-dump.cpp",
+    "xray-graph-diff.cpp",
+    "xray-graph.cpp",
+    "xray-registry.cpp",
+    "xray-stacks.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/lto/BUILD.gn b/utils/gn/secondary/llvm/tools/lto/BUILD.gn
new file mode 100644
index 0000000..3cc62d3
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/lto/BUILD.gn
@@ -0,0 +1,39 @@
+import("//llvm/version.gni")
+
+lto_target_type = "shared_library"
+if (host_os == "linux") {
+  # Linux needs -fPIC to build shared libs but they aren't on by default.
+  # For now, make libclang a static lib there.
+  lto_target_type = "static_library"
+}
+
+target(lto_target_type, "lto") {
+  output_name = "LTO"
+  deps = [
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/IR",
+    "//llvm/lib/LTO",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCDisassembler",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "LTODisassembler.cpp",
+    "lto.cpp",
+  ]
+
+  if (host_os == "mac") {
+    ldflags = [
+      "-Wl,-compatibility_version,1",
+      "-Wl,-current_version,$llvm_version",
+
+      # See llvm_setup_rpath() in CMake.
+      "-Wl,-install_name,@rpath/libLTO.dylib",
+      "-Wl,-rpath,@loader_path/../lib",
+    ]
+  }
+
+  # FIXME: Use lto.exports
+}
diff --git a/utils/gn/secondary/llvm/tools/obj2yaml/BUILD.gn b/utils/gn/secondary/llvm/tools/obj2yaml/BUILD.gn
new file mode 100644
index 0000000..6ba2022
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/obj2yaml/BUILD.gn
@@ -0,0 +1,18 @@
+executable("obj2yaml") {
+  deps = [
+    "//llvm/lib/DebugInfo/CodeView",
+    "//llvm/lib/DebugInfo/DWARF",
+    "//llvm/lib/Object",
+    "//llvm/lib/ObjectYAML",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "Error.cpp",
+    "coff2yaml.cpp",
+    "dwarf2yaml.cpp",
+    "elf2yaml.cpp",
+    "macho2yaml.cpp",
+    "obj2yaml.cpp",
+    "wasm2yaml.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/opt/BUILD.gn b/utils/gn/secondary/llvm/tools/opt/BUILD.gn
new file mode 100644
index 0000000..7deae1c
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/opt/BUILD.gn
@@ -0,0 +1,39 @@
+executable("opt") {
+  deps = [
+    "//llvm/include/llvm/Config:llvm-config",
+    "//llvm/lib/Analysis",
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/Passes",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:TargetsToBuild",
+    "//llvm/lib/Transforms/AggressiveInstCombine",
+    "//llvm/lib/Transforms/Coroutines",
+    "//llvm/lib/Transforms/IPO",
+    "//llvm/lib/Transforms/Instrumentation",
+    "//llvm/lib/Transforms/ObjCARC",
+    "//llvm/lib/Transforms/Scalar",
+    "//llvm/lib/Transforms/Utils",
+    "//llvm/lib/Transforms/Vectorize",
+  ]
+  sources = [
+    "AnalysisWrappers.cpp",
+    "BreakpointPrinter.cpp",
+    "Debugify.cpp",
+    "GraphPrinters.cpp",
+    "NewPMDriver.cpp",
+    "PassPrinters.cpp",
+    "PrintSCC.cpp",
+    "opt.cpp",
+  ]
+
+  # Support plugins.
+  # FIXME: Disable dead stripping once other binaries are dead-stripped.
+  if (host_os == "linux") {
+    # Corresponds to export_executable_symbols() in cmake.
+    ldflags = [ "-rdynamic" ]
+  }
+}
diff --git a/utils/gn/secondary/llvm/tools/sancov/BUILD.gn b/utils/gn/secondary/llvm/tools/sancov/BUILD.gn
new file mode 100644
index 0000000..c07af7b
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/sancov/BUILD.gn
@@ -0,0 +1,18 @@
+executable("sancov") {
+  deps = [
+    "//llvm/lib/DebugInfo/DWARF",
+    "//llvm/lib/DebugInfo/PDB",
+    "//llvm/lib/DebugInfo/Symbolize",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCDisassembler",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:AllTargetsAsmPrinters",
+    "//llvm/lib/Target:AllTargetsDescs",
+    "//llvm/lib/Target:AllTargetsDisassemblers",
+    "//llvm/lib/Target:AllTargetsInfos",
+  ]
+  sources = [
+    "sancov.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/sanstats/BUILD.gn b/utils/gn/secondary/llvm/tools/sanstats/BUILD.gn
new file mode 100644
index 0000000..306775d
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/sanstats/BUILD.gn
@@ -0,0 +1,9 @@
+executable("sanstats") {
+  deps = [
+    "//llvm/lib/DebugInfo/Symbolize",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "sanstats.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/verify-uselistorder/BUILD.gn b/utils/gn/secondary/llvm/tools/verify-uselistorder/BUILD.gn
new file mode 100644
index 0000000..09fd489
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/verify-uselistorder/BUILD.gn
@@ -0,0 +1,13 @@
+executable("verify-uselistorder") {
+  deps = [
+    "//llvm/lib/AsmParser",
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/IR",
+    "//llvm/lib/IRReader",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "verify-uselistorder.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/tools/yaml2obj/BUILD.gn b/utils/gn/secondary/llvm/tools/yaml2obj/BUILD.gn
new file mode 100644
index 0000000..56bebc4
--- /dev/null
+++ b/utils/gn/secondary/llvm/tools/yaml2obj/BUILD.gn
@@ -0,0 +1,16 @@
+executable("yaml2obj") {
+  deps = [
+    "//llvm/lib/DebugInfo/CodeView",
+    "//llvm/lib/MC",
+    "//llvm/lib/Object",
+    "//llvm/lib/ObjectYAML",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "yaml2coff.cpp",
+    "yaml2elf.cpp",
+    "yaml2macho.cpp",
+    "yaml2obj.cpp",
+    "yaml2wasm.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/triples.gni b/utils/gn/secondary/llvm/triples.gni
index b5ff762..1987afa 100644
--- a/utils/gn/secondary/llvm/triples.gni
+++ b/utils/gn/secondary/llvm/triples.gni
@@ -1,12 +1,22 @@
-if (host_os == "linux") {
-  llvm_host_triple = "x86_64-unknown-linux-gnu"
-} else if (host_os == "mac") {
-  llvm_host_triple = "x86_64-apple-darwin"
-} else if (host_os == "win") {
-  llvm_host_triple = "x86_64-pc-windows"
+if (current_cpu == "x64") {
+  if (current_os == "linux") {
+    llvm_current_triple = "x86_64-unknown-linux-gnu"
+  } else if (current_os == "mac") {
+    llvm_current_triple = "x86_64-apple-darwin"
+  } else if (current_os == "win") {
+    llvm_current_triple = "x86_64-pc-windows"
+  }
+} else if (current_cpu == "arm64") {
+  if (current_os == "android") {
+    llvm_current_triple = "aarch64-linux-android21"
+  }
+}
+
+if (!defined(llvm_current_triple)) {
+  assert(false, "unimplemented cpu/os " + current_cpu + "/" + current_os)
 }
 
 declare_args() {
   # The default target triple.
-  llvm_target_triple = llvm_host_triple
+  llvm_target_triple = llvm_current_triple
 }
diff --git a/utils/gn/secondary/llvm/unittests/ADT/BUILD.gn b/utils/gn/secondary/llvm/unittests/ADT/BUILD.gn
new file mode 100644
index 0000000..611df36
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/ADT/BUILD.gn
@@ -0,0 +1,79 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("ADTTests") {
+  # ADT is a headers-only library so there's no //llvm/lib/ADT to depend on.
+  # Also see note in //llvm/lib/Support/BUILD.gn.
+  deps = [
+    # Some tests include files from IR, but there's no library dependency.
+    "//llvm/include/llvm/IR:public_tablegen",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "APFloatTest.cpp",
+    "APIntTest.cpp",
+    "APSIntTest.cpp",
+    "AnyTest.cpp",
+    "ArrayRefTest.cpp",
+    "BitVectorTest.cpp",
+    "BitmaskEnumTest.cpp",
+    "BreadthFirstIteratorTest.cpp",
+    "BumpPtrListTest.cpp",
+    "DAGDeltaAlgorithmTest.cpp",
+    "DeltaAlgorithmTest.cpp",
+    "DenseMapTest.cpp",
+    "DenseSetTest.cpp",
+    "DepthFirstIteratorTest.cpp",
+    "EquivalenceClassesTest.cpp",
+    "FoldingSet.cpp",
+    "FunctionExtrasTest.cpp",
+    "FunctionRefTest.cpp",
+    "HashingTest.cpp",
+    "IListBaseTest.cpp",
+    "IListIteratorTest.cpp",
+    "IListNodeBaseTest.cpp",
+    "IListNodeTest.cpp",
+    "IListSentinelTest.cpp",
+    "IListTest.cpp",
+    "ImmutableListTest.cpp",
+    "ImmutableMapTest.cpp",
+    "ImmutableSetTest.cpp",
+    "IntEqClassesTest.cpp",
+    "IntervalMapTest.cpp",
+    "IntrusiveRefCntPtrTest.cpp",
+    "IteratorTest.cpp",
+    "MakeUniqueTest.cpp",
+    "MapVectorTest.cpp",
+    "MappedIteratorTest.cpp",
+    "OptionalTest.cpp",
+    "PackedVectorTest.cpp",
+    "PointerEmbeddedIntTest.cpp",
+    "PointerIntPairTest.cpp",
+    "PointerSumTypeTest.cpp",
+    "PointerUnionTest.cpp",
+    "PostOrderIteratorTest.cpp",
+    "PriorityWorklistTest.cpp",
+    "RangeAdapterTest.cpp",
+    "SCCIteratorTest.cpp",
+    "STLExtrasTest.cpp",
+    "ScopeExitTest.cpp",
+    "SequenceTest.cpp",
+    "SetVectorTest.cpp",
+    "SimpleIListTest.cpp",
+    "SmallPtrSetTest.cpp",
+    "SmallSetTest.cpp",
+    "SmallStringTest.cpp",
+    "SmallVectorTest.cpp",
+    "SparseBitVectorTest.cpp",
+    "SparseMultiSetTest.cpp",
+    "SparseSetTest.cpp",
+    "StatisticTest.cpp",
+    "StringExtrasTest.cpp",
+    "StringMapTest.cpp",
+    "StringRefTest.cpp",
+    "StringSwitchTest.cpp",
+    "TinyPtrVectorTest.cpp",
+    "TripleTest.cpp",
+    "TwineTest.cpp",
+    "VariadicFunctionTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/Analysis/BUILD.gn b/utils/gn/secondary/llvm/unittests/Analysis/BUILD.gn
new file mode 100644
index 0000000..e68ef94
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Analysis/BUILD.gn
@@ -0,0 +1,38 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("AnalysisTests") {
+  deps = [
+    "//llvm/lib/Analysis",
+    "//llvm/lib/AsmParser",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "AliasAnalysisTest.cpp",
+    "AliasSetTrackerTest.cpp",
+    "BasicAliasAnalysisTest.cpp",
+    "BlockFrequencyInfoTest.cpp",
+    "BranchProbabilityInfoTest.cpp",
+    "CFGTest.cpp",
+    "CGSCCPassManagerTest.cpp",
+    "CallGraphTest.cpp",
+    "CaptureTrackingTest.cpp",
+    "DivergenceAnalysisTest.cpp",
+    "GlobalsModRefTest.cpp",
+    "LazyCallGraphTest.cpp",
+    "LoopInfoTest.cpp",
+    "MemoryBuiltinsTest.cpp",
+    "MemorySSATest.cpp",
+    "OrderedBasicBlockTest.cpp",
+    "OrderedInstructionsTest.cpp",
+    "PhiValuesTest.cpp",
+    "ProfileSummaryInfoTest.cpp",
+    "ScalarEvolutionTest.cpp",
+    "SparsePropagation.cpp",
+    "TBAATest.cpp",
+    "TargetLibraryInfoTest.cpp",
+    "UnrollAnalyzerTest.cpp",
+    "ValueLatticeTest.cpp",
+    "ValueTrackingTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/AsmParser/BUILD.gn b/utils/gn/secondary/llvm/unittests/AsmParser/BUILD.gn
new file mode 100644
index 0000000..7cc4a54
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/AsmParser/BUILD.gn
@@ -0,0 +1,12 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("AsmParserTests") {
+  deps = [
+    "//llvm/lib/AsmParser",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "AsmParserTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/BUILD.gn b/utils/gn/secondary/llvm/unittests/BUILD.gn
new file mode 100644
index 0000000..b46d402
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/BUILD.gn
@@ -0,0 +1,68 @@
+import("//llvm/lib/Target/targets.gni")
+
+group("unittests") {
+  deps = [
+    "ADT:ADTTests",
+    "Analysis:AnalysisTests",
+    "AsmParser:AsmParserTests",
+    "BinaryFormat:BinaryFormatTests",
+    "Bitcode:BitcodeTests",
+    "CodeGen:CodeGenTests",
+    "CodeGen/GlobalISel:GlobalISelTests",
+    "DebugInfo/CodeView:DebugInfoCodeViewTests",
+    "DebugInfo/DWARF:DebugInfoDWARFTests",
+    "DebugInfo/MSF:DebugInfoMSFTests",
+    "DebugInfo/PDB:DebugInfoPDBTests",
+    "Demangle:DemangleTests",
+    "ExecutionEngine:ExecutionEngineTests",
+    "ExecutionEngine/MCJIT:MCJITTests",
+    "ExecutionEngine/Orc:OrcJITTests",
+    "FuzzMutate:FuzzMutateTests",
+    "IR:IRTests",
+    "LineEditor:LineEditorTests",
+    "Linker:LinkerTests",
+    "MC:MCTests",
+    "MI:MITests",
+    "Object:ObjectTests",
+    "ObjectYAML:ObjectYAMLTests",
+    "OptRemarks:OptRemarksTests",
+    "Option:OptionTests",
+    "Passes:PluginsTests",
+    "ProfileData:ProfileDataTests",
+    "Support:SupportTests",
+    "Support/DynamicLibrary:DynamicLibraryTests",
+    "TextAPI:TextAPITests",
+    "Transforms/IPO:IPOTests",
+    "Transforms/Scalar:ScalarTests",
+    "Transforms/Utils:UtilsTests",
+    "Transforms/Vectorize:VectorizeTests",
+    "XRay:XRayTests",
+    "tools/llvm-cfi-verify:CFIVerifyTests",
+    "tools/llvm-exegesis:LLVMExegesisTests",
+  ]
+
+  # Target-dependent unit tests.
+  # FIXME: This matches how they are set up in the cmake build,
+  # but if we disable an arch after building with it on, this
+  # setup leaves behind stale executables.
+  if (llvm_build_AArch64) {
+    deps += [
+      "Target/AArch64:AArch64Tests",
+      "tools/llvm-exegesis/AArch64:LLVMExegesisAArch64Tests",
+    ]
+  }
+  if (llvm_build_ARM) {
+    deps += [ "tools/llvm-exegesis/ARM:LLVMExegesisARMTests" ]
+  }
+  if (llvm_build_WebAssembly) {
+    deps += [ "Target/WebAssembly:WebAssemblyTests" ]
+  }
+  if (llvm_build_PowerPC) {
+    deps += [ "tools/llvm-exegesis/PowerPC:LLVMExegesisPowerPCTests" ]
+  }
+  if (llvm_build_X86) {
+    deps += [ "tools/llvm-exegesis/X86:LLVMExegesisX86Tests" ]
+  }
+
+  testonly = true
+}
diff --git a/utils/gn/secondary/llvm/unittests/BinaryFormat/BUILD.gn b/utils/gn/secondary/llvm/unittests/BinaryFormat/BUILD.gn
new file mode 100644
index 0000000..7b2b78e
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/BinaryFormat/BUILD.gn
@@ -0,0 +1,15 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("BinaryFormatTests") {
+  deps = [
+    "//llvm/lib/BinaryFormat",
+  ]
+  sources = [
+    "DwarfTest.cpp",
+    "MachOTest.cpp",
+    "MsgPackReaderTest.cpp",
+    "MsgPackTypesTest.cpp",
+    "MsgPackWriterTest.cpp",
+    "TestFileMagic.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/Bitcode/BUILD.gn b/utils/gn/secondary/llvm/unittests/Bitcode/BUILD.gn
new file mode 100644
index 0000000..9cb7894
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Bitcode/BUILD.gn
@@ -0,0 +1,16 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("BitcodeTests") {
+  deps = [
+    "//llvm/lib/AsmParser",
+    "//llvm/lib/Bitcode/Reader",
+    "//llvm/lib/Bitcode/Writer",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "BitReaderTest.cpp",
+    "BitstreamReaderTest.cpp",
+    "BitstreamWriterTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/CodeGen/BUILD.gn b/utils/gn/secondary/llvm/unittests/CodeGen/BUILD.gn
new file mode 100644
index 0000000..b67631b
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/CodeGen/BUILD.gn
@@ -0,0 +1,25 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("CodeGenTests") {
+  deps = [
+    "//llvm/lib/Analysis",
+    "//llvm/lib/AsmParser",
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/CodeGen/AsmPrinter",
+    "//llvm/lib/CodeGen/SelectionDAG",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "AArch64SelectionDAGTest.cpp",
+    "DIEHashTest.cpp",
+    "LowLevelTypeTest.cpp",
+    "MachineInstrBundleIteratorTest.cpp",
+    "MachineInstrTest.cpp",
+    "MachineOperandTest.cpp",
+    "ScalableVectorMVTsTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/CodeGen/GlobalISel/BUILD.gn b/utils/gn/secondary/llvm/unittests/CodeGen/GlobalISel/BUILD.gn
new file mode 100644
index 0000000..43cf22c
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/CodeGen/GlobalISel/BUILD.gn
@@ -0,0 +1,21 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("GlobalISelTests") {
+  deps = [
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/CodeGen/GlobalISel",
+    "//llvm/lib/CodeGen/MIRParser",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "CSETest.cpp",
+    "LegalizerHelperTest.cpp",
+    "LegalizerInfoTest.cpp",
+    "PatternMatchTest.cpp",
+  ]
+  has_custom_main = true
+}
diff --git a/utils/gn/secondary/llvm/unittests/DebugInfo/CodeView/BUILD.gn b/utils/gn/secondary/llvm/unittests/DebugInfo/CodeView/BUILD.gn
new file mode 100644
index 0000000..913de50
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/DebugInfo/CodeView/BUILD.gn
@@ -0,0 +1,13 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("DebugInfoCodeViewTests") {
+  deps = [
+    "//llvm/lib/DebugInfo/CodeView",
+    "//llvm/lib/Testing/Support",
+  ]
+  sources = [
+    "RandomAccessVisitorTest.cpp",
+    "TypeHashingTest.cpp",
+    "TypeIndexDiscoveryTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/DebugInfo/DWARF/BUILD.gn b/utils/gn/secondary/llvm/unittests/DebugInfo/DWARF/BUILD.gn
new file mode 100644
index 0000000..1c22a4b
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/DebugInfo/DWARF/BUILD.gn
@@ -0,0 +1,21 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("DebugInfoDWARFTests") {
+  deps = [
+    "//llvm/lib/CodeGen/AsmPrinter",
+    "//llvm/lib/DebugInfo/DWARF",
+    "//llvm/lib/MC",
+    "//llvm/lib/Object",
+    "//llvm/lib/ObjectYAML",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:TargetsToBuild",
+    "//llvm/lib/Testing/Support",
+  ]
+  sources = [
+    "DWARFDebugInfoTest.cpp",
+    "DWARFDebugLineTest.cpp",
+    "DWARFFormValueTest.cpp",
+    "DwarfGenerator.cpp",
+    "DwarfUtils.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/DebugInfo/MSF/BUILD.gn b/utils/gn/secondary/llvm/unittests/DebugInfo/MSF/BUILD.gn
new file mode 100644
index 0000000..6c9bc06
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/DebugInfo/MSF/BUILD.gn
@@ -0,0 +1,13 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("DebugInfoMSFTests") {
+  deps = [
+    "//llvm/lib/DebugInfo/MSF",
+    "//llvm/lib/Testing/Support",
+  ]
+  sources = [
+    "MSFBuilderTest.cpp",
+    "MSFCommonTest.cpp",
+    "MappedBlockStreamTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/DebugInfo/PDB/BUILD.gn b/utils/gn/secondary/llvm/unittests/DebugInfo/PDB/BUILD.gn
new file mode 100644
index 0000000..cfb92c6
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/DebugInfo/PDB/BUILD.gn
@@ -0,0 +1,32 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("DebugInfoPDBTests") {
+  deps = [
+    "//llvm/lib/DebugInfo/CodeView",
+    "//llvm/lib/DebugInfo/MSF",
+    "//llvm/lib/DebugInfo/PDB",
+    "//llvm/lib/Testing/Support",
+  ]
+  sources = [
+    "HashTableTest.cpp",
+    "NativeSymbolReuseTest.cpp",
+    "PDBApiTest.cpp",
+    "StringTableBuilderTest.cpp",
+  ]
+
+  # DebugInfoPDBTests uses llvm::getInputFileDirectory(), which expects
+  # a file called llvm.srcdir.txt next to the test executable that contains
+  # the path of the source directory (which contains this file).
+  # lit doesn't change the cwd while running googletests, so the cwd isn't
+  # well-defined. This means this has to be an absolute path.
+  # FIXME: This doesn't work with swarming. This should really be a data
+  # dependency, and the cwd while tests requiring input files run should
+  # be required to be some fixed directory.
+  # FIXME: Also, the GN way is to write this file at build time. But since
+  # there's only one use of this, and since this is a pattern that hopefully
+  # will disappear again, and since it doesn't have any measurable performance
+  # hit, write the file at GN time.
+  # Note: This line here implicitly depends on unittest() setting output_dir to
+  # target_out_dir.
+  write_file("$target_out_dir/llvm.srcdir.txt", rebase_path("."))
+}
diff --git a/utils/gn/secondary/llvm/unittests/Demangle/BUILD.gn b/utils/gn/secondary/llvm/unittests/Demangle/BUILD.gn
new file mode 100644
index 0000000..c7eedfb
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Demangle/BUILD.gn
@@ -0,0 +1,11 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("DemangleTests") {
+  deps = [
+    "//llvm/lib/Demangle",
+  ]
+  sources = [
+    "ItaniumDemangleTest.cpp",
+    "PartialDemangleTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/ExecutionEngine/BUILD.gn b/utils/gn/secondary/llvm/unittests/ExecutionEngine/BUILD.gn
new file mode 100644
index 0000000..1076964
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/ExecutionEngine/BUILD.gn
@@ -0,0 +1,16 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("ExecutionEngineTests") {
+  deps = [
+    "//llvm/lib/ExecutionEngine",
+    "//llvm/lib/ExecutionEngine/Interpreter",
+    "//llvm/lib/ExecutionEngine/Orc",
+    "//llvm/lib/ExecutionEngine/RuntimeDyld",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "ExecutionEngineTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/ExecutionEngine/MCJIT/BUILD.gn b/utils/gn/secondary/llvm/unittests/ExecutionEngine/MCJIT/BUILD.gn
new file mode 100644
index 0000000..eed7d73
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/ExecutionEngine/MCJIT/BUILD.gn
@@ -0,0 +1,29 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("MCJITTests") {
+  deps = [
+    "//llvm/lib/Analysis",
+    "//llvm/lib/ExecutionEngine",
+    "//llvm/lib/ExecutionEngine/MCJIT",
+    "//llvm/lib/ExecutionEngine/RuntimeDyld",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:NativeTarget",
+    "//llvm/lib/Transforms/IPO",
+    "//llvm/lib/Transforms/InstCombine",
+    "//llvm/lib/Transforms/Scalar",
+  ]
+  sources = [
+    "MCJITCAPITest.cpp",
+    "MCJITMemoryManagerTest.cpp",
+    "MCJITMultipleModuleTest.cpp",
+    "MCJITObjectCacheTest.cpp",
+    "MCJITTest.cpp",
+  ]
+
+  if (host_os == "win") {
+    sources += [ "MCJITTests.def" ]
+  }
+}
diff --git a/utils/gn/secondary/llvm/unittests/ExecutionEngine/Orc/BUILD.gn b/utils/gn/secondary/llvm/unittests/ExecutionEngine/Orc/BUILD.gn
new file mode 100644
index 0000000..81b66a5
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/ExecutionEngine/Orc/BUILD.gn
@@ -0,0 +1,33 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("OrcJITTests") {
+  deps = [
+    "//llvm/lib/ExecutionEngine",
+    "//llvm/lib/ExecutionEngine/Orc",
+    "//llvm/lib/ExecutionEngine/RuntimeDyld",
+    "//llvm/lib/IR",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:NativeTarget",
+  ]
+  sources = [
+    "CoreAPIsTest.cpp",
+    "GlobalMappingLayerTest.cpp",
+    "IndirectionUtilsTest.cpp",
+    "JITTargetMachineBuilderTest.cpp",
+    "LazyCallThroughAndReexportsTest.cpp",
+    "LazyEmittingLayerTest.cpp",
+    "LegacyAPIInteropTest.cpp",
+    "LegacyCompileOnDemandLayerTest.cpp",
+    "LegacyRTDyldObjectLinkingLayerTest.cpp",
+    "ObjectTransformLayerTest.cpp",
+    "OrcCAPITest.cpp",
+    "OrcTestCommon.cpp",
+    "QueueChannel.cpp",
+    "RPCUtilsTest.cpp",
+    "RTDyldObjectLinkingLayerTest.cpp",
+    "RemoteObjectLayerTest.cpp",
+    "SymbolStringPoolTest.cpp",
+    "ThreadSafeModuleTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/FuzzMutate/BUILD.gn b/utils/gn/secondary/llvm/unittests/FuzzMutate/BUILD.gn
new file mode 100644
index 0000000..2967ddc
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/FuzzMutate/BUILD.gn
@@ -0,0 +1,16 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("FuzzMutateTests") {
+  deps = [
+    "//llvm/lib/AsmParser",
+    "//llvm/lib/FuzzMutate",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "OperationsTest.cpp",
+    "RandomIRBuilderTest.cpp",
+    "ReservoirSamplerTest.cpp",
+    "StrategiesTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/IR/BUILD.gn b/utils/gn/secondary/llvm/unittests/IR/BUILD.gn
new file mode 100644
index 0000000..b02a66c
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/IR/BUILD.gn
@@ -0,0 +1,45 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("IRTests") {
+  deps = [
+    "//llvm/lib/Analysis",
+    "//llvm/lib/AsmParser",
+    "//llvm/lib/IR",
+    "//llvm/lib/Passes",
+    "//llvm/lib/Support",
+    "//llvm/lib/Testing/Support",
+  ]
+  sources = [
+    "AsmWriterTest.cpp",
+    "AttributesTest.cpp",
+    "BasicBlockTest.cpp",
+    "CFGBuilder.cpp",
+    "ConstantRangeTest.cpp",
+    "ConstantsTest.cpp",
+    "DebugInfoTest.cpp",
+    "DebugTypeODRUniquingTest.cpp",
+    "DomTreeUpdaterTest.cpp",
+    "DominatorTreeBatchUpdatesTest.cpp",
+    "DominatorTreeTest.cpp",
+    "FunctionTest.cpp",
+    "IRBuilderTest.cpp",
+    "InstructionsTest.cpp",
+    "IntrinsicsTest.cpp",
+    "LegacyPassManagerTest.cpp",
+    "MDBuilderTest.cpp",
+    "ManglerTest.cpp",
+    "MetadataTest.cpp",
+    "ModuleTest.cpp",
+    "PassBuilderCallbacksTest.cpp",
+    "PassManagerTest.cpp",
+    "PatternMatch.cpp",
+    "TypesTest.cpp",
+    "UseTest.cpp",
+    "UserTest.cpp",
+    "ValueHandleTest.cpp",
+    "ValueMapTest.cpp",
+    "ValueTest.cpp",
+    "VerifierTest.cpp",
+    "WaymarkTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/LineEditor/BUILD.gn b/utils/gn/secondary/llvm/unittests/LineEditor/BUILD.gn
new file mode 100644
index 0000000..0916c91
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/LineEditor/BUILD.gn
@@ -0,0 +1,11 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("LineEditorTests") {
+  deps = [
+    "//llvm/lib/LineEditor",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "LineEditor.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/Linker/BUILD.gn b/utils/gn/secondary/llvm/unittests/Linker/BUILD.gn
new file mode 100644
index 0000000..a245ffd
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Linker/BUILD.gn
@@ -0,0 +1,12 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("LinkerTests") {
+  deps = [
+    "//llvm/lib/AsmParser",
+    "//llvm/lib/IR",
+    "//llvm/lib/Linker",
+  ]
+  sources = [
+    "LinkModulesTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/MC/BUILD.gn b/utils/gn/secondary/llvm/unittests/MC/BUILD.gn
new file mode 100644
index 0000000..d42db2c
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/MC/BUILD.gn
@@ -0,0 +1,16 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("MCTests") {
+  deps = [
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCDisassembler",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "Disassembler.cpp",
+    "DwarfLineTables.cpp",
+    "StringTableBuilderTest.cpp",
+    "TargetRegistry.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/MI/BUILD.gn b/utils/gn/secondary/llvm/unittests/MI/BUILD.gn
new file mode 100644
index 0000000..4ef5ebf
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/MI/BUILD.gn
@@ -0,0 +1,17 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("MITests") {
+  deps = [
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/CodeGen/MIRParser",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target:TargetsToBuild",
+  ]
+  sources = [
+    "LiveIntervalTest.cpp",
+  ]
+  has_custom_main = true
+}
diff --git a/utils/gn/secondary/llvm/unittests/Object/BUILD.gn b/utils/gn/secondary/llvm/unittests/Object/BUILD.gn
new file mode 100644
index 0000000..74f3525
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Object/BUILD.gn
@@ -0,0 +1,11 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("ObjectTests") {
+  deps = [
+    "//llvm/lib/Object",
+  ]
+  sources = [
+    "SymbolSizeTest.cpp",
+    "SymbolicFileTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/ObjectYAML/BUILD.gn b/utils/gn/secondary/llvm/unittests/ObjectYAML/BUILD.gn
new file mode 100644
index 0000000..8d8575d
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/ObjectYAML/BUILD.gn
@@ -0,0 +1,10 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("ObjectYAMLTests") {
+  deps = [
+    "//llvm/lib/ObjectYAML",
+  ]
+  sources = [
+    "YAMLTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/OptRemarks/BUILD.gn b/utils/gn/secondary/llvm/unittests/OptRemarks/BUILD.gn
new file mode 100644
index 0000000..785e78b
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/OptRemarks/BUILD.gn
@@ -0,0 +1,11 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("OptRemarksTests") {
+  deps = [
+    "//llvm/lib/OptRemarks",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "OptRemarksParsingTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/Option/BUILD.gn b/utils/gn/secondary/llvm/unittests/Option/BUILD.gn
new file mode 100644
index 0000000..02a67af
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Option/BUILD.gn
@@ -0,0 +1,18 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+import("//llvm/utils/unittest/unittest.gni")
+
+tablegen("Opts") {
+  visibility = [ ":OptionTests" ]
+  args = [ "-gen-opt-parser-defs" ]
+}
+
+unittest("OptionTests") {
+  deps = [
+    ":Opts",
+    "//llvm/lib/Option",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "OptionParsingTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/Passes/BUILD.gn b/utils/gn/secondary/llvm/unittests/Passes/BUILD.gn
new file mode 100644
index 0000000..6bf6e70
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Passes/BUILD.gn
@@ -0,0 +1,53 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+# Keyed off LLVM_ENABLE_PLUGINS in the CMake build, which is usually false
+# on Windows and true elsewhere.
+if (host_os != "win") {
+  loadable_module("TestPlugin") {
+    # Put plugin next to the unit test executable.
+    output_dir = target_out_dir
+
+    sources = [
+      "TestPlugin.cpp",
+    ]
+
+    deps = [
+      # TestPlugin doesn't want to link in any LLVM code, it just needs its
+      # headers.
+      "//llvm/include/llvm/IR:public_tablegen",
+    ]
+
+    if (host_os == "linux") {
+      # The GN build currently doesn't globally pass -fPIC, but that's
+      # needed for building .so files on Linux.  Just pass it manually
+      # for loadable_modules for now.
+      cflags = [ "-fPIC" ]
+    }
+  }
+}
+
+unittest("PluginsTests") {
+  deps = [
+    "//llvm/include/llvm/Config:config",
+    "//llvm/lib/IR",
+    "//llvm/lib/Passes",
+    "//llvm/lib/Support",
+    "//llvm/lib/Testing/Support",
+  ]
+  sources = [
+    "PluginsTest.cpp",
+  ]
+
+  # If plugins are disabled, this test will disable itself at runtime.
+  # Otherwise, reconfiguring with plugins disabled will leave behind a stale
+  # executable.
+  if (host_os != "win") {
+    deps += [ ":TestPlugin" ]
+    defines = [ "LLVM_ENABLE_PLUGINS" ]
+  }
+
+  if (host_os == "linux") {
+    # Corresponds to export_executable_symbols() in cmake.
+    ldflags = [ "-rdynamic" ]
+  }
+}
diff --git a/utils/gn/secondary/llvm/unittests/ProfileData/BUILD.gn b/utils/gn/secondary/llvm/unittests/ProfileData/BUILD.gn
new file mode 100644
index 0000000..e933b51
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/ProfileData/BUILD.gn
@@ -0,0 +1,15 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("ProfileDataTests") {
+  deps = [
+    "//llvm/lib/IR",
+    "//llvm/lib/ProfileData",
+    "//llvm/lib/ProfileData/Coverage",
+    "//llvm/lib/Testing/Support",
+  ]
+  sources = [
+    "CoverageMappingTest.cpp",
+    "InstrProfTest.cpp",
+    "SampleProfTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/Support/BUILD.gn b/utils/gn/secondary/llvm/unittests/Support/BUILD.gn
new file mode 100644
index 0000000..f8d8a02
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Support/BUILD.gn
@@ -0,0 +1,83 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("SupportTests") {
+  deps = [
+    "//llvm/lib/Support",
+    "//llvm/lib/Testing/Support",
+  ]
+  sources = [
+    "ARMAttributeParser.cpp",
+    "AlignOfTest.cpp",
+    "AllocatorTest.cpp",
+    "ArrayRecyclerTest.cpp",
+    "BinaryStreamTest.cpp",
+    "BlockFrequencyTest.cpp",
+    "BranchProbabilityTest.cpp",
+    "CachePruningTest.cpp",
+    "Casting.cpp",
+    "CheckedArithmeticTest.cpp",
+    "Chrono.cpp",
+    "CommandLineTest.cpp",
+    "CompressionTest.cpp",
+    "ConvertUTFTest.cpp",
+    "CrashRecoveryTest.cpp",
+    "DJBTest.cpp",
+    "DataExtractorTest.cpp",
+    "DebugCounterTest.cpp",
+    "DebugTest.cpp",
+    "EndianStreamTest.cpp",
+    "EndianTest.cpp",
+    "ErrnoTest.cpp",
+    "ErrorOrTest.cpp",
+    "ErrorTest.cpp",
+    "FileOutputBufferTest.cpp",
+    "FormatVariadicTest.cpp",
+    "GlobPatternTest.cpp",
+    "Host.cpp",
+    "ItaniumManglingCanonicalizerTest.cpp",
+    "JSONTest.cpp",
+    "LEB128Test.cpp",
+    "LineIteratorTest.cpp",
+    "LockFileManagerTest.cpp",
+    "MD5Test.cpp",
+    "ManagedStatic.cpp",
+    "MathExtrasTest.cpp",
+    "MemoryBufferTest.cpp",
+    "MemoryTest.cpp",
+    "NativeFormatTests.cpp",
+    "ParallelTest.cpp",
+    "Path.cpp",
+    "ProcessTest.cpp",
+    "ProgramTest.cpp",
+    "RegexTest.cpp",
+    "ReplaceFileTest.cpp",
+    "ReverseIterationTest.cpp",
+    "ScaledNumberTest.cpp",
+    "SourceMgrTest.cpp",
+    "SpecialCaseListTest.cpp",
+    "StringPool.cpp",
+    "SwapByteOrderTest.cpp",
+    "SymbolRemappingReaderTest.cpp",
+    "TarWriterTest.cpp",
+    "TargetParserTest.cpp",
+    "TaskQueueTest.cpp",
+    "ThreadLocalTest.cpp",
+    "ThreadPool.cpp",
+    "Threading.cpp",
+    "TimerTest.cpp",
+    "TrailingObjectsTest.cpp",
+    "TrigramIndexTest.cpp",
+    "TypeNameTest.cpp",
+    "TypeTraitsTest.cpp",
+    "UnicodeTest.cpp",
+    "VersionTupleTest.cpp",
+    "VirtualFileSystemTest.cpp",
+    "YAMLIOTest.cpp",
+    "YAMLParserTest.cpp",
+    "formatted_raw_ostream_test.cpp",
+    "raw_ostream_test.cpp",
+    "raw_pwrite_stream_test.cpp",
+    "raw_sha1_ostream_test.cpp",
+    "xxhashTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/Support/DynamicLibrary/BUILD.gn b/utils/gn/secondary/llvm/unittests/Support/DynamicLibrary/BUILD.gn
new file mode 100644
index 0000000..a04559e
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Support/DynamicLibrary/BUILD.gn
@@ -0,0 +1,49 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+# FIXME: If we add -Wl,-z,nodelete to the global ldflags, we need to remove
+# it again for these tests (cf CMake).
+
+template("dynlib_add_module") {
+  not_needed(invoker, "*")
+
+  loadable_module(target_name) {
+    # Put plugin next to the unit test executable.
+    # This assumes that unittest() puts tests in target_out_dir.
+    output_dir = target_out_dir
+
+    sources = [
+      "PipSqueak.cpp",
+    ]
+
+    if (host_os == "linux") {
+      # The GN build currently doesn't globally pass -fPIC, but that's
+      # needed for building .so files on Linux.  Just pass it manually
+      # for loadable_modules for now.
+      cflags = [ "-fPIC" ]
+    }
+  }
+}
+
+dynlib_add_module("PipSqueak") {
+}
+
+dynlib_add_module("SecondLib") {
+}
+
+unittest("DynamicLibraryTests") {
+  deps = [
+    ":PipSqueak",
+    ":SecondLib",
+    "//llvm/include/llvm/Config:config",
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "DynamicLibraryTest.cpp",
+    "ExportedFuncs.cpp",
+  ]
+
+  if (host_os == "linux") {
+    # Corresponds to export_executable_symbols() in cmake.
+    ldflags = [ "-rdynamic" ]
+  }
+}
diff --git a/utils/gn/secondary/llvm/unittests/Target/AArch64/BUILD.gn b/utils/gn/secondary/llvm/unittests/Target/AArch64/BUILD.gn
new file mode 100644
index 0000000..9a3313d
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Target/AArch64/BUILD.gn
@@ -0,0 +1,20 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("AArch64Tests") {
+  deps = [
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/CodeGen/GlobalISel",
+    "//llvm/lib/CodeGen/MIRParser",
+    "//llvm/lib/CodeGen/SelectionDAG",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target",
+    "//llvm/lib/Target/AArch64:LLVMAArch64CodeGen",
+    "//llvm/lib/Target/AArch64/MCTargetDesc",
+    "//llvm/lib/Target/AArch64/TargetInfo",
+    "//llvm/lib/Target/AArch64/Utils",
+  ]
+  include_dirs = [ "//llvm/lib/Target/AArch64" ]
+  sources = [
+    "InstSizes.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/Target/WebAssembly/BUILD.gn b/utils/gn/secondary/llvm/unittests/Target/WebAssembly/BUILD.gn
new file mode 100644
index 0000000..e02149f
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Target/WebAssembly/BUILD.gn
@@ -0,0 +1,18 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("WebAssemblyTests") {
+  deps = [
+    "//llvm/lib/CodeGen",
+    "//llvm/lib/CodeGen/MIRParser",
+    "//llvm/lib/IR",
+    "//llvm/lib/MC",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/WebAssembly:LLVMWebAssemblyCodeGen",
+    "//llvm/lib/Target/WebAssembly/MCTargetDesc",
+    "//llvm/lib/Target/WebAssembly/TargetInfo",
+  ]
+  include_dirs = [ "//llvm/lib/Target/WebAssembly" ]
+  sources = [
+    "WebAssemblyExceptionInfoTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/TextAPI/BUILD.gn b/utils/gn/secondary/llvm/unittests/TextAPI/BUILD.gn
new file mode 100644
index 0000000..0bfa109
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/TextAPI/BUILD.gn
@@ -0,0 +1,11 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("TextAPITests") {
+  deps = [
+    "//llvm/lib/Testing/Support",
+    "//llvm/lib/TextAPI",
+  ]
+  sources = [
+    "ELFYAMLTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/Transforms/IPO/BUILD.gn b/utils/gn/secondary/llvm/unittests/Transforms/IPO/BUILD.gn
new file mode 100644
index 0000000..1b0a86d
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Transforms/IPO/BUILD.gn
@@ -0,0 +1,13 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("IPOTests") {
+  deps = [
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+    "//llvm/lib/Transforms/IPO",
+  ]
+  sources = [
+    "LowerTypeTests.cpp",
+    "WholeProgramDevirt.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/Transforms/Scalar/BUILD.gn b/utils/gn/secondary/llvm/unittests/Transforms/Scalar/BUILD.gn
new file mode 100644
index 0000000..eee435f
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Transforms/Scalar/BUILD.gn
@@ -0,0 +1,15 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("ScalarTests") {
+  deps = [
+    "//llvm/lib/Analysis",
+    "//llvm/lib/AsmParser",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+    "//llvm/lib/Transforms/Scalar",
+    "//llvm/lib/Transforms/Utils",
+  ]
+  sources = [
+    "LoopPassManagerTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/Transforms/Utils/BUILD.gn b/utils/gn/secondary/llvm/unittests/Transforms/Utils/BUILD.gn
new file mode 100644
index 0000000..e4585e2
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Transforms/Utils/BUILD.gn
@@ -0,0 +1,23 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("UtilsTests") {
+  deps = [
+    "//llvm/lib/Analysis",
+    "//llvm/lib/AsmParser",
+    "//llvm/lib/IR",
+    "//llvm/lib/Support",
+    "//llvm/lib/Transforms/Utils",
+  ]
+  sources = [
+    "ASanStackFrameLayoutTest.cpp",
+    "BasicBlockUtilsTest.cpp",
+    "CloningTest.cpp",
+    "CodeExtractorTest.cpp",
+    "FunctionComparatorTest.cpp",
+    "IntegerDivisionTest.cpp",
+    "LocalTest.cpp",
+    "SSAUpdaterBulkTest.cpp",
+    "UnrollLoopTest.cpp",
+    "ValueMapperTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/Transforms/Vectorize/BUILD.gn b/utils/gn/secondary/llvm/unittests/Transforms/Vectorize/BUILD.gn
new file mode 100644
index 0000000..aec5d0f
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/Transforms/Vectorize/BUILD.gn
@@ -0,0 +1,17 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("VectorizeTests") {
+  deps = [
+    "//llvm/lib/Analysis",
+    "//llvm/lib/AsmParser",
+    "//llvm/lib/IR",
+    "//llvm/lib/Transforms/Vectorize",
+  ]
+  sources = [
+    "VPlanDominatorTreeTest.cpp",
+    "VPlanHCFGTest.cpp",
+    "VPlanLoopInfoTest.cpp",
+    "VPlanSlpTest.cpp",
+    "VPlanTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/XRay/BUILD.gn b/utils/gn/secondary/llvm/unittests/XRay/BUILD.gn
new file mode 100644
index 0000000..003bfc0
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/XRay/BUILD.gn
@@ -0,0 +1,19 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("XRayTests") {
+  deps = [
+    "//llvm/lib/Support",
+    "//llvm/lib/Testing/Support",
+    "//llvm/lib/XRay",
+  ]
+  sources = [
+    "FDRBlockIndexerTest.cpp",
+    "FDRBlockVerifierTest.cpp",
+    "FDRProducerConsumerTest.cpp",
+    "FDRRecordPrinterTest.cpp",
+    "FDRRecordsTest.cpp",
+    "FDRTraceWriterTest.cpp",
+    "GraphTest.cpp",
+    "ProfileTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/tools/llvm-cfi-verify/BUILD.gn b/utils/gn/secondary/llvm/unittests/tools/llvm-cfi-verify/BUILD.gn
new file mode 100644
index 0000000..e973126
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/tools/llvm-cfi-verify/BUILD.gn
@@ -0,0 +1,22 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("CFIVerifyTests") {
+  deps = [
+    "//llvm/lib/DebugInfo/Symbolize",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target:AllTargetsAsmParsers",
+    "//llvm/lib/Target:AllTargetsAsmPrinters",
+    "//llvm/lib/Target:AllTargetsDescs",
+    "//llvm/lib/Target:AllTargetsDisassemblers",
+    "//llvm/lib/Target:AllTargetsInfos",
+    "//llvm/tools/llvm-cfi-verify/lib",
+  ]
+  sources = [
+    "FileAnalysis.cpp",
+    "GraphBuilder.cpp",
+  ]
+  has_custom_main = true
+}
diff --git a/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/AArch64/BUILD.gn b/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/AArch64/BUILD.gn
new file mode 100644
index 0000000..e72301b
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/AArch64/BUILD.gn
@@ -0,0 +1,25 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("LLVMExegesisAArch64Tests") {
+  deps = [
+    "//llvm/lib/DebugInfo/Symbolize",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/AArch64",
+
+    # Exegesis reaches inside the Target/AArch64 tablegen internals and must
+    # depend on this Target/AArch64-internal build target.
+    "//llvm/lib/Target/AArch64/MCTargetDesc",
+    "//llvm/tools/llvm-exegesis/lib",
+    "//llvm/tools/llvm-exegesis/lib/AArch64",
+  ]
+  include_dirs = [
+    "//llvm/lib/Target/AArch64",
+    "//llvm/tools/llvm-exegesis/lib",
+  ]
+  sources = [
+    "TargetTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/ARM/BUILD.gn b/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/ARM/BUILD.gn
new file mode 100644
index 0000000..6c4f768
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/ARM/BUILD.gn
@@ -0,0 +1,25 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("LLVMExegesisARMTests") {
+  deps = [
+    "//llvm/lib/DebugInfo/Symbolize",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/ARM",
+
+    # Exegesis reaches inside the Target/ARM tablegen internals and must
+    # depend on these Target/ARM-internal build targets.
+    "//llvm/lib/Target/ARM/MCTargetDesc",
+    "//llvm/lib/Target/ARM/Utils",
+    "//llvm/tools/llvm-exegesis/lib",
+  ]
+  include_dirs = [
+    "//llvm/lib/Target/ARM",
+    "//llvm/tools/llvm-exegesis/lib",
+  ]
+  sources = [
+    "AssemblerTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/BUILD.gn b/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/BUILD.gn
new file mode 100644
index 0000000..9eb3c9d
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/BUILD.gn
@@ -0,0 +1,19 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("LLVMExegesisTests") {
+  deps = [
+    "//llvm/lib/DebugInfo/Symbolize",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/tools/llvm-exegesis/lib",
+  ]
+  include_dirs = [ "../../../tools/llvm-exegesis/lib" ]
+  sources = [
+    "BenchmarkRunnerTest.cpp",
+    "ClusteringTest.cpp",
+    "PerfHelperTest.cpp",
+    "RegisterValueTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/PowerPC/BUILD.gn b/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/PowerPC/BUILD.gn
new file mode 100644
index 0000000..bbb37bd
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/PowerPC/BUILD.gn
@@ -0,0 +1,26 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("LLVMExegesisPowerPCTests") {
+  deps = [
+    "//llvm/lib/DebugInfo/Symbolize",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/PowerPC",
+
+    # Exegesis reaches inside the Target/PowerPC tablegen internals and must
+    # depend on these Target/PowerPC-internal build targets.
+    "//llvm/lib/Target/PowerPC/MCTargetDesc",
+    "//llvm/tools/llvm-exegesis/lib",
+    "//llvm/tools/llvm-exegesis/lib/PowerPC",
+  ]
+  include_dirs = [
+    "//llvm/lib/Target/PowerPC",
+    "//llvm/tools/llvm-exegesis/lib",
+  ]
+  sources = [
+    "AnalysisTest.cpp",
+    "TargetTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/X86/BUILD.gn b/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/X86/BUILD.gn
new file mode 100644
index 0000000..2b59d8b
--- /dev/null
+++ b/utils/gn/secondary/llvm/unittests/tools/llvm-exegesis/X86/BUILD.gn
@@ -0,0 +1,31 @@
+import("//llvm/utils/unittest/unittest.gni")
+
+unittest("LLVMExegesisX86Tests") {
+  deps = [
+    "//llvm/lib/DebugInfo/Symbolize",
+    "//llvm/lib/MC",
+    "//llvm/lib/MC/MCParser",
+    "//llvm/lib/Object",
+    "//llvm/lib/Support",
+    "//llvm/lib/Target/X86",
+
+    # Exegesis reaches inside the Target/X86 tablegen internals and must
+    # depend on this Target/X86-internal build target -- and so must its
+    # unittests.
+    "//llvm/lib/Target/X86/MCTargetDesc",
+    "//llvm/tools/llvm-exegesis/lib",
+    "//llvm/tools/llvm-exegesis/lib/X86",
+  ]
+  include_dirs = [
+    "//llvm/lib/Target/X86",
+    "//llvm/tools/llvm-exegesis/lib",
+  ]
+  sources = [
+    "AnalysisTest.cpp",
+    "AssemblerTest.cpp",
+    "BenchmarkResultTest.cpp",
+    "RegisterAliasingTest.cpp",
+    "SnippetGeneratorTest.cpp",
+    "TargetTest.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/utils/FileCheck/BUILD.gn b/utils/gn/secondary/llvm/utils/FileCheck/BUILD.gn
new file mode 100644
index 0000000..65a2b73
--- /dev/null
+++ b/utils/gn/secondary/llvm/utils/FileCheck/BUILD.gn
@@ -0,0 +1,8 @@
+executable("FileCheck") {
+  deps = [
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "FileCheck.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/utils/TableGen/tablegen.gni b/utils/gn/secondary/llvm/utils/TableGen/tablegen.gni
index 2a5520e..5037832 100644
--- a/utils/gn/secondary/llvm/utils/TableGen/tablegen.gni
+++ b/utils/gn/secondary/llvm/utils/TableGen/tablegen.gni
@@ -25,7 +25,7 @@
 #   }
 
 template("tablegen") {
-  assert(defined(invoker.args), "args must be defined for $target_name")
+  assert(defined(invoker.args), "must set 'args' in $target_name")
 
   config_name = "${target_name}_config"
   config(config_name) {
@@ -36,8 +36,12 @@
   action(target_name) {
     forward_variables_from(invoker, [ "visibility" ])
 
-    # FIXME: In cross builds, this should depend on the host binary.
-    tblgen_target = "//llvm/utils/TableGen:llvm-tblgen"
+    if (defined(invoker.tblgen_target)) {
+      tblgen_target = invoker.tblgen_target
+    } else {
+      tblgen_target = "//llvm/utils/TableGen:llvm-tblgen"
+    }
+    tblgen_target += "($host_toolchain)"
     tblgen_executable = get_label_info(tblgen_target, "root_out_dir") +
                         "/bin/" + get_label_info(tblgen_target, "name")
     deps = [
@@ -60,11 +64,6 @@
     depfile = "$gen_output.d"
     td_file = rebase_path(td_file, root_build_dir)
 
-    # FIXME: The cmake build lets tablegen write to a temp file and then copies
-    # it over the final output only if it has changed, for ninja's restat
-    # optimization. Instead of doing that in cmake, llvm-tblgen should do this
-    # itself. r330742 tried this, but it caused problems. Fix those and reland,
-    # so that the gn build has the optimization too.
     args = [
              rebase_path(tblgen_executable, root_build_dir),
              "-I",
diff --git a/utils/gn/secondary/llvm/utils/count/BUILD.gn b/utils/gn/secondary/llvm/utils/count/BUILD.gn
new file mode 100644
index 0000000..efbc2c9
--- /dev/null
+++ b/utils/gn/secondary/llvm/utils/count/BUILD.gn
@@ -0,0 +1,5 @@
+executable("count") {
+  sources = [
+    "count.c",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/utils/llvm-lit/BUILD.gn b/utils/gn/secondary/llvm/utils/llvm-lit/BUILD.gn
new file mode 100644
index 0000000..fca5679
--- /dev/null
+++ b/utils/gn/secondary/llvm/utils/llvm-lit/BUILD.gn
@@ -0,0 +1,56 @@
+import("//clang/test/clang_lit_site_cfg_files.gni")
+import("//lld/test/lld_lit_site_cfg_files.gni")
+import("//llvm/test/llvm_lit_site_cfg_files.gni")
+import("//llvm/utils/gn/build/write_cmake_config.gni")
+
+write_cmake_config("llvm-lit") {
+  input = "llvm-lit.in"
+  output = "$root_out_dir/bin/llvm-lit"
+
+  if (host_os == "win") {
+    # llvm-lit needs suffix.py for multiprocess to find a main module.
+    output = "${output}.py"
+  }
+
+  # lit's lit/llvm/config.py shells out to llvm-config.
+  deps = [
+    "//llvm/tools/llvm-config",
+  ]
+
+  # Generate LLVM_LIT_CONFIG_MAP parameter.
+  # llvm-lit contains a mapping from each lit.cfg.py file to the corresponding
+  # generated llvm.site.cfg.py file, so llvm-lit depends on all the targets to
+  # generate the site.cfg.py file.
+  config_map = ""
+
+  deps += [
+    "//clang/test:lit_site_cfg",
+    "//clang/test:lit_unit_site_cfg",
+    "//lld/test:lit_site_cfg",
+    "//lld/test:lit_unit_site_cfg",
+    "//llvm/test:lit_site_cfg",
+    "//llvm/test:lit_unit_site_cfg",
+  ]
+
+  # Note: \n is converted into a newline by write_cmake_config.py, not by gn.
+  config_map += "map_config('" + rebase_path("//clang/test/lit.cfg.py") +
+                "', '" + rebase_path(clang_lit_site_cfg_file) + "')\n"
+  config_map += "map_config('" + rebase_path("//clang/test/Unit/lit.cfg.py") +
+                "', '" + rebase_path(clang_lit_unit_site_cfg_file) + "')\n"
+  config_map += "map_config('" + rebase_path("//lld/test/lit.cfg.py") + "', '" +
+                rebase_path(lld_lit_site_cfg_file) + "')\n"
+  config_map += "map_config('" + rebase_path("//lld/test/Unit/lit.cfg.py") +
+                "', '" + rebase_path(lld_lit_unit_site_cfg_file) + "')\n"
+  config_map += "map_config('" + rebase_path("//llvm/test/lit.cfg.py") +
+                "', '" + rebase_path(llvm_lit_site_cfg_file) + "')\n"
+  config_map += "map_config('" + rebase_path("//llvm/test/Unit/lit.cfg.py") +
+                "', '" + rebase_path(llvm_lit_unit_site_cfg_file) + "')\n"
+
+  values = [
+    "LLVM_SOURCE_DIR=" + rebase_path("//llvm"),
+    "LLVM_BINARY_DIR=" +
+        rebase_path(get_label_info("//llvm", "target_out_dir")),
+    "BUILD_MODE=.",
+    "LLVM_LIT_CONFIG_MAP=" + config_map,
+  ]
+}
diff --git a/utils/gn/secondary/llvm/utils/not/BUILD.gn b/utils/gn/secondary/llvm/utils/not/BUILD.gn
new file mode 100644
index 0000000..ea450e1
--- /dev/null
+++ b/utils/gn/secondary/llvm/utils/not/BUILD.gn
@@ -0,0 +1,8 @@
+executable("not") {
+  deps = [
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "not.cpp",
+  ]
+}
diff --git a/utils/gn/secondary/llvm/utils/unittest/BUILD.gn b/utils/gn/secondary/llvm/utils/unittest/BUILD.gn
new file mode 100644
index 0000000..e462029
--- /dev/null
+++ b/utils/gn/secondary/llvm/utils/unittest/BUILD.gn
@@ -0,0 +1,40 @@
+import("//llvm/utils/gn/build/libs/pthread/enable.gni")
+
+# Used to push the gtest include directories to things depending on :googletest.
+config("googletest_config") {
+  include_dirs = [
+    "googlemock/include",
+    "googletest/include",
+  ]
+
+  # LLVM requires C++11 but gtest doesn't correctly detect the availability
+  # of C++11 on MSVC, so we force it on.
+  defines = [
+    "GTEST_LANG_CXX11",
+    "GTEST_HAS_TR1_TUPLE=0",
+  ]
+  if (host_os == "win") {
+    defines += [ "GTEST_OS_WINDOWS" ]
+  }
+  if (!llvm_enable_threads) {
+    defines += [ "GTEST_HAS_PTHREAD=0" ]
+  }
+  defines += [ "GTEST_HAS_RTTI=0" ]
+}
+
+static_library("gtest") {
+  deps = [
+    "//llvm/lib/Support",
+  ]
+  include_dirs = [
+    "googletest",  # For including src/gtest.cc
+    "googlemock",  # For including src/gmock.cc
+  ]
+  public_configs = [ ":googletest_config" ]
+  configs -= [ "//llvm/utils/gn/build:warn_covered_switch_default" ]
+  sources = [
+    "googlemock/src/gmock-all.cc",
+    "googletest/src/gtest-all.cc",
+  ]
+  testonly = true
+}
diff --git a/utils/gn/secondary/llvm/utils/unittest/UnitTestMain/BUILD.gn b/utils/gn/secondary/llvm/utils/unittest/UnitTestMain/BUILD.gn
new file mode 100644
index 0000000..51bc7b8
--- /dev/null
+++ b/utils/gn/secondary/llvm/utils/unittest/UnitTestMain/BUILD.gn
@@ -0,0 +1,25 @@
+source_set("UnitTestMain") {
+  deps = [
+    "//llvm/lib/Support",
+  ]
+
+  # Make targets depending on this also depend on gtest, to get the gtest
+  # include_dir.
+  public_deps = [
+    "..:gtest",
+  ]
+  sources = [
+    "TestMain.cpp",
+  ]
+  testonly = true
+}
+
+# Nothing depends on this target, but llvm-config expects it to exist when
+# it runs with `--link-static --system-libs`, so humor it.
+static_library("gtest_main") {
+  deps = [
+    ":UnitTestMain",
+  ]
+  complete_static_lib = true
+  testonly = true
+}
diff --git a/utils/gn/secondary/llvm/utils/unittest/unittest.gni b/utils/gn/secondary/llvm/utils/unittest/unittest.gni
new file mode 100644
index 0000000..f0c8309
--- /dev/null
+++ b/utils/gn/secondary/llvm/utils/unittest/unittest.gni
@@ -0,0 +1,58 @@
+# This file defines a template for adding a unittest binary.
+#
+# It's a thin wrapper around GN's built-in executable() target type and
+# accepts the same parameters, and in addition this paramater:
+#
+#   has_custom_main (optional)
+#       [bool] If set, link against gtest instead of UnitTestMain; for tests
+#              that define their own main() function.
+#
+# Example use:
+#
+#   unittest("FormatTest") {
+#     sources = [ ... ]
+#     ...
+#   }
+
+template("unittest") {
+  executable(target_name) {
+    has_custom_main = false  # Default value.
+
+    # Foward everything (has_custom_main if set; configs, sources, deps, ...).
+    forward_variables_from(invoker, "*")
+    assert(!defined(invoker.output_dir), "cannot set unittest output_dir")
+    assert(!defined(invoker.testonly), "cannot set unittest testonly")
+
+    # Common settings for all unit tests.
+    # Unit test binaries shouldn't go right in out/gn/bin, for two reasons:
+    # 1. That's where production binaries go.
+    # 2. The CMake build doesn't put the unit tests of all projects (clang,
+    #    lld,...) in one directory, so it's not guaranteed that there won't
+    #    be name collisions between test binaries from separate projects.
+    # Each lit suite takes an foo_obj_root parameter and puts temporary files
+    # for lit tests at foo_obj_root/test and looks for unit test binaries
+    # below foo_obj_root/unittests. As long as the BUILD.gn files processing
+    # the lit.site.cfg.py.in files match the output dir here, it doesn't
+    # matter all that much where the unit test binaries go, with the weak
+    # constraints that test binaries of different projects should go in
+    # different folders, and that it's not too difficult to manually
+    # run the unit test binary if necessary. Using target_out_dir here
+    # means that //clang/unittests/Format gets its binary in
+    # out/gn/obj/clang/unittests/Format/FormatTests, which seems fine.
+    #
+    # If you change output_dir here, look through
+    # `git grep target_out_dir '*/unittests/*'` and update those too.
+    output_dir = target_out_dir
+
+    if (has_custom_main) {
+      deps += [ "//llvm/utils/unittest:gtest" ]
+    } else {
+      deps += [ "//llvm/utils/unittest/UnitTestMain" ]
+    }
+    testonly = true
+  }
+}
+
+set_defaults("unittest") {
+  configs = shared_binary_target_configs
+}
diff --git a/utils/gn/secondary/llvm/utils/yaml-bench/BUILD.gn b/utils/gn/secondary/llvm/utils/yaml-bench/BUILD.gn
new file mode 100644
index 0000000..e388bf4
--- /dev/null
+++ b/utils/gn/secondary/llvm/utils/yaml-bench/BUILD.gn
@@ -0,0 +1,8 @@
+executable("yaml-bench") {
+  deps = [
+    "//llvm/lib/Support",
+  ]
+  sources = [
+    "YAMLBench.cpp",
+  ]
+}
diff --git a/utils/indirect_calls.py b/utils/indirect_calls.py
index b7349a6..e460ff7 100755
--- a/utils/indirect_calls.py
+++ b/utils/indirect_calls.py
@@ -10,6 +10,8 @@
    dump format.
 """
 
+from __future__ import print_function
+
 import os
 import sys
 import re
@@ -32,8 +34,8 @@
         result = re.search('(call|jmp).*\*', line)
         if result != None:
             # TODO: Perhaps use cxxfilt to demangle functions?
-            print function
-            print line
+            print(function)
+            print(line)
     return
 
 def main(args):
diff --git a/utils/lint/common_lint.py b/utils/lint/common_lint.py
index e982680..aec9079 100644
--- a/utils/lint/common_lint.py
+++ b/utils/lint/common_lint.py
@@ -2,6 +2,7 @@
 #
 # Common lint functions applicable to multiple types of files.
 
+from __future__ import print_function
 import re
 
 def VerifyLineLength(filename, lines, max_length):
@@ -89,7 +90,7 @@
   for filename in filenames:
     file = open(filename, 'r')
     if not file:
-      print 'Cound not open %s' % filename
+      print('Cound not open %s' % filename)
       continue
     lines = file.readlines()
     lint.extend(linter.RunOnFile(filename, lines))
diff --git a/utils/lint/cpp_lint.py b/utils/lint/cpp_lint.py
index 07fad58..2fb8cc9 100755
--- a/utils/lint/cpp_lint.py
+++ b/utils/lint/cpp_lint.py
@@ -6,6 +6,7 @@
 # TODO: add unittests for the verifier functions:
 # http://docs.python.org/library/unittest.html .
 
+from __future__ import print_function
 import common_lint
 import re
 import sys
@@ -86,7 +87,7 @@
 def CppLintMain(filenames):
   all_lint = common_lint.RunLintOverAllFiles(CppLint(), filenames)
   for lint in all_lint:
-    print '%s:%d:%s' % (lint[0], lint[1], lint[2])
+    print('%s:%d:%s' % (lint[0], lint[1], lint[2]))
   return 0
 
 
diff --git a/utils/lit/lit/util.py b/utils/lit/lit/util.py
index e20c4ab..4c116b3 100644
--- a/utils/lit/lit/util.py
+++ b/utils/lit/lit/util.py
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
 import errno
 import itertools
 import math
diff --git a/utils/lit/tests/Inputs/shtest-env/print_environment.py b/utils/lit/tests/Inputs/shtest-env/print_environment.py
index 1add407..ac9a80e 100644
--- a/utils/lit/tests/Inputs/shtest-env/print_environment.py
+++ b/utils/lit/tests/Inputs/shtest-env/print_environment.py
@@ -1,8 +1,9 @@
 #!/usr/bin/env python
 
+from __future__ import print_statement
 import os
 
 sorted_environment = sorted(os.environ.items())
 
 for name,value in sorted_environment:
-    print name,'=',value
+    print(name,'=',value)
diff --git a/utils/lit/tests/Inputs/shtest-shell/check_path.py b/utils/lit/tests/Inputs/shtest-shell/check_path.py
index c1d2797..467505b7 100644
--- a/utils/lit/tests/Inputs/shtest-shell/check_path.py
+++ b/utils/lit/tests/Inputs/shtest-shell/check_path.py
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import print_function
+
 import os
 import sys
 
diff --git a/utils/lit/tests/Inputs/shtest-timeout/lit.cfg b/utils/lit/tests/Inputs/shtest-timeout/lit.cfg
index 96bf181..6256f5a 100644
--- a/utils/lit/tests/Inputs/shtest-timeout/lit.cfg
+++ b/utils/lit/tests/Inputs/shtest-timeout/lit.cfg
@@ -28,5 +28,13 @@
 config.test_exec_root = config.test_source_root
 config.target_triple = '(unused)'
 src_root = os.path.join(config.test_source_root, '..')
-config.environment['PYTHONPATH'] = src_root
+
+pythonpath_list = [src_root]
+# Ensure the user's PYTHONPATH is included.
+if 'PYTHONPATH' in os.environ:
+    pythonpath_list.append(os.environ['PYTHONPATH'])
+if 'PYTHONPATH' in config.environment:
+    pythonpath_list.append(config.environment['PYTHONPATH'])
+config.environment['PYTHONPATH'] = os.pathsep.join(pythonpath_list)
+
 config.substitutions.append(('%{python}', '"%s"' % (sys.executable)))
diff --git a/utils/lit/tests/lit.cfg b/utils/lit/tests/lit.cfg
index 01a3431..2af93d6 100644
--- a/utils/lit/tests/lit.cfg
+++ b/utils/lit/tests/lit.cfg
@@ -34,7 +34,15 @@
 else:
   lit_path = src_root
 
-config.environment['PYTHONPATH'] = lit_path # Required because some tests import the lit module
+pythonpath_list = [lit_path] # Required because some tests import the lit module
+
+# Ensure the user's PYTHONPATH is included.
+if 'PYTHONPATH' in os.environ:
+    pythonpath_list.append(os.environ['PYTHONPATH'])
+if 'PYTHONPATH' in config.environment:
+    pythonpath_list.append(config.environment['PYTHONPATH'])
+config.environment['PYTHONPATH'] = os.pathsep.join(pythonpath_list)
+
 config.substitutions.append(('%{src_root}', src_root))
 config.substitutions.append(('%{inputs}', os.path.join(
             src_root, 'tests', 'Inputs')))
diff --git a/utils/lit/tests/shtest-run-at-line.py b/utils/lit/tests/shtest-run-at-line.py
index cd0e081..7e5d53b 100644
--- a/utils/lit/tests/shtest-run-at-line.py
+++ b/utils/lit/tests/shtest-run-at-line.py
@@ -1,7 +1,7 @@
 # Check that -vv makes the line number of the failing RUN command clear.
 # (-v is actually sufficient in the case of the internal shell.)
 #
-# RUN: not %{lit} -j 1 -vv %{inputs}/shtest-run-at-line > %t.out
+# RUN: env -u FILECHECK_OPTS not %{lit} -j 1 -vv %{inputs}/shtest-run-at-line > %t.out
 # RUN: FileCheck --input-file %t.out %s
 #
 # END.
diff --git a/utils/llvm-build/llvmbuild/componentinfo.py b/utils/llvm-build/llvmbuild/componentinfo.py
index b384acd..0e9d089 100644
--- a/utils/llvm-build/llvmbuild/componentinfo.py
+++ b/utils/llvm-build/llvmbuild/componentinfo.py
@@ -2,7 +2,7 @@
 Descriptor objects for entities that are part of the LLVM project.
 """
 
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
 try:
     import configparser
 except:
@@ -461,8 +461,8 @@
             info = type_class.parse(subpath,
                                     IniFormatParser(parser.items(section)))
         except TypeError:
-            print >>sys.stderr, "error: invalid component %r in %r: %s" % (
-                section, path, "unable to instantiate: %r" % type_name)
+            print("error: invalid component %r in %r: %s" % (
+                section, path, "unable to instantiate: %r" % type_name), file=sys.stderr)
             import traceback
             traceback.print_exc()
             raise SystemExit(1)
diff --git a/utils/llvm-gisel-cov.py b/utils/llvm-gisel-cov.py
index a74ed10..820fcea 100644
--- a/utils/llvm-gisel-cov.py
+++ b/utils/llvm-gisel-cov.py
@@ -5,6 +5,7 @@
 Emits the number of rules covered or the percentage of rules covered depending
 on whether --num-rules has been used to specify the total number of rules.
 """
+from __future__ import print_function
 
 import argparse
 import struct
@@ -59,9 +60,9 @@
   num_rules = dict(args.num_rules)
   for backend, rules_for_backend in covered_rules.items():
     if backend in num_rules:
-      print "%s: %3.2f%% of rules covered" % (backend, (float(len(rules_for_backend.keys())) / num_rules[backend]) * 100)
+      print("%s: %3.2f%% of rules covered" % (backend, float(len(rules_for_backend)) / num_rules[backend]) * 100))
     else:
-      print "%s: %d rules covered" % (backend, len(rules_for_backend.keys()))
+      print("%s: %d rules covered" % (backend, len(rules_for_backend)))
 
 if __name__ == '__main__':
   main()
diff --git a/utils/llvm-lit/llvm-lit.in b/utils/llvm-lit/llvm-lit.in
index de67b18..8700d15 100755
--- a/utils/llvm-lit/llvm-lit.in
+++ b/utils/llvm-lit/llvm-lit.in
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+# -*- coding: utf-8 -*-
 
 import os
 import sys
diff --git a/utils/release/build_llvm_package.bat b/utils/release/build_llvm_package.bat
index 30767f6..51f4256 100755
--- a/utils/release/build_llvm_package.bat
+++ b/utils/release/build_llvm_package.bat
@@ -8,15 +8,22 @@
 

 REM Prerequisites:

 REM

-REM   Visual Studio 2017, CMake, Ninja, SVN, GNUWin32,

+REM   Visual Studio 2017, CMake, Ninja, SVN, GNUWin32, SWIG, Python 3,

 REM   NSIS with the strlen_8192 patch,

 REM   Visual Studio 2017 SDK and Nuget (for the clang-format plugin),

 REM   Perl (for the OpenMP run-time).

+REM

+REM

+REM   For LLDB, SWIG version <= 3.0.8 needs to be used to work around

+REM   https://github.com/swig/swig/issues/769

 

 

 REM You need to modify the paths below:

 set vsdevcmd=C:\Program Files (x86)\Microsoft Visual Studio\2017\Professional\Common7\Tools\VsDevCmd.bat

 

+set python32_dir=C:\Users\%USERNAME%\AppData\Local\Programs\Python\Python36-32

+set python64_dir=C:\Users\%USERNAME%\AppData\Local\Programs\Python\Python36

+

 set revision=%1

 set branch=trunk

 set package_version=8.0.0-r%revision%

@@ -41,10 +48,11 @@
 svn.exe export -r %revision% http://llvm.org/svn/llvm-project/lld/%branch% llvm/tools/lld || exit /b

 svn.exe export -r %revision% http://llvm.org/svn/llvm-project/compiler-rt/%branch% llvm/projects/compiler-rt || exit /b

 svn.exe export -r %revision% http://llvm.org/svn/llvm-project/openmp/%branch% llvm/projects/openmp || exit /b

+svn.exe export -r %revision% http://llvm.org/svn/llvm-project/lldb/%branch% llvm/tools/lldb || exit /b

 

 

 REM Setting CMAKE_CL_SHOWINCLUDES_PREFIX to work around PR27226.

-set cmake_flags=-DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=ON -DLLVM_USE_CRT_RELEASE=MT -DLLVM_INSTALL_TOOLCHAIN_ONLY=ON -DCLANG_FORMAT_VS_VERSION=%clang_format_vs_version% -DPACKAGE_VERSION=%package_version% -DCMAKE_CL_SHOWINCLUDES_PREFIX="Note: including file: "

+set cmake_flags=-DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=ON -DLLVM_INSTALL_TOOLCHAIN_ONLY=ON -DCMAKE_INSTALL_UCRT_LIBRARIES=ON -DCLANG_FORMAT_VS_VERSION=%clang_format_vs_version% -DPACKAGE_VERSION=%package_version% -DLLDB_RELOCATABLE_PYTHON=1 -DLLDB_TEST_COMPILER=%cd%\build32_stage0\bin\clang.exe -DCMAKE_CL_SHOWINCLUDES_PREFIX="Note: including file: "

 

 REM TODO: Run all tests, including lld and compiler-rt.

 

@@ -55,7 +63,7 @@
 mkdir build32_stage0

 cd build32_stage0

 REM Work around VS2017 bug by using MinSizeRel.

-cmake -GNinja %cmake_flags% -DCMAKE_BUILD_TYPE=MinSizeRel ..\llvm || exit /b

+cmake -GNinja %cmake_flags% -DPYTHON_HOME=%python32_dir% -DCMAKE_BUILD_TYPE=MinSizeRel ..\llvm || exit /b

 ninja all || ninja all || ninja all || exit /b

 ninja check || ninja check || ninja check || exit /b

 ninja check-clang || ninja check-clang || ninja check-clang ||  exit /b

@@ -65,7 +73,7 @@
 cd build32

 set CC=..\build32_stage0\bin\clang-cl

 set CXX=..\build32_stage0\bin\clang-cl

-cmake -GNinja %cmake_flags% ..\llvm || exit /b

+cmake -GNinja %cmake_flags% -DPYTHON_HOME=%python32_dir% ..\llvm || exit /b

 ninja all || ninja all || ninja all || exit /b

 ninja check || ninja check || ninja check || exit /b

 ninja check-clang || ninja check-clang || ninja check-clang ||  exit /b

@@ -77,7 +85,7 @@
 cd build_vsix

 set CC=..\build32_stage0\bin\clang-cl

 set CXX=..\build32_stage0\bin\clang-cl

-cmake -GNinja %cmake_flags% -DBUILD_CLANG_FORMAT_VS_PLUGIN=ON ..\llvm || exit /b

+cmake -GNinja %cmake_flags% -DLLVM_USE_CRT_RELEASE=MT -DBUILD_CLANG_FORMAT_VS_PLUGIN=ON -DPYTHON_HOME=%python32_dir% ..\llvm || exit /b

 ninja clang_format_vsix || exit /b

 copy ..\llvm\tools\clang\tools\clang-format-vs\ClangFormat\bin\Release\ClangFormat.vsix ClangFormat-r%revision%.vsix

 cd ..

@@ -90,7 +98,7 @@
 mkdir build64_stage0

 cd build64_stage0

 REM Work around VS2017 bug by using MinSizeRel.

-cmake -GNinja %cmake_flags% -DCMAKE_BUILD_TYPE=MinSizeRel ..\llvm || exit /b

+cmake -GNinja %cmake_flags% -DPYTHON_HOME=%python64_dir% -DCMAKE_BUILD_TYPE=MinSizeRel ..\llvm || exit /b

 ninja all || ninja all || ninja all || exit /b

 ninja check || ninja check || ninja check || exit /b

 ninja check-clang || ninja check-clang || ninja check-clang ||  exit /b

@@ -100,7 +108,7 @@
 cd build64

 set CC=..\build64_stage0\bin\clang-cl

 set CXX=..\build64_stage0\bin\clang-cl

-cmake -GNinja %cmake_flags% ..\llvm || exit /b

+cmake -GNinja %cmake_flags% -DPYTHON_HOME=%python64_dir% ..\llvm || exit /b

 ninja all || ninja all || ninja all || exit /b

 ninja check || ninja check || ninja check || exit /b

 ninja check-clang || ninja check-clang || ninja check-clang ||  exit /b

diff --git a/utils/release/findRegressions-nightly.py b/utils/release/findRegressions-nightly.py
index ddf8983..e7e13b0 100755
--- a/utils/release/findRegressions-nightly.py
+++ b/utils/release/findRegressions-nightly.py
@@ -1,4 +1,6 @@
 #!/usr/bin/env python
+from __future__ import print_function
+
 import re, string, sys, os, time
 
 DEBUG = 0
@@ -22,45 +24,45 @@
   fname = ''
   for t in r:
     if DEBUG:
-      print t
+      print(t)
     if t[0] == 'PASS' or t[0] == 'FAIL' :
       tmp = t[2].split(testDirName)
       
       if DEBUG:
-        print tmp
+        print(tmp)
       
       if len(tmp) == 2:
         fname = tmp[1].strip('\r\n')
       else:
         fname = tmp[0].strip('\r\n')
       
-      if not test.has_key(fname) :
+      if fname not in test :
         test[fname] = {}
       
       for k in test:
         test[fname][k] = 'NA'
         test[fname][t[1]] = t[0]
         if DEBUG:
-          print test[fname][t[1]]
+          print(test[fname][t[1]])
     else :
       try:
         n = t[0].split('RESULT-')[1]
         
         if DEBUG:
-          print n;
+          print(n);
         
         if n == 'llc' or n == 'jit-comptime' or n == 'compile':
           test[fname][tp + n] = float(t[2].split(' ')[2])
           if DEBUG:
-            print test[fname][tp + n]
+            print(test[fname][tp + n])
         
         elif n.endswith('-time') :
             test[fname][exp + n] = float(t[2].strip('\r\n'))
             if DEBUG:
-              print test[fname][exp + n]
+              print(test[fname][exp + n])
         
         else :
-          print "ERROR!"
+          print("ERROR!")
           sys.exit(1)
       
       except:
@@ -73,52 +75,52 @@
 
   for t in sorted(d_old.keys()) :
     if DEBUG:
-      print t
+      print(t)
         
-    if d_new.has_key(t) :
+    if t in d_new :
     
       # Check if the test passed or failed.
       for x in test:
-        if d_old[t].has_key(x):
-          if d_new[t].has_key(x):
+        if x in d_old[t]:
+          if x in d_new[t]:
             if d_old[t][x] == 'PASS':
               if d_new[t][x] != 'PASS':
-                print t + " *** REGRESSION (" + x + ")\n"
+                print(t + " *** REGRESSION (" + x + ")\n")
             else:
               if d_new[t][x] == 'PASS':
-                print t + " * NEW PASS (" + x + ")\n"
+                print(t + " * NEW PASS (" + x + ")\n")
                 
           else :
-            print t + "*** REGRESSION (" + x + ")\n"
+            print(t + "*** REGRESSION (" + x + ")\n")
         
         # For execution time, if there is no result, its a fail.
         for x in exectime:
-          if d_old[t].has_key(tp + x):
-            if not d_new[t].has_key(tp + x):
-              print t + " *** REGRESSION (" + tp + x + ")\n"
+          if tp + x in d_old[t]:
+            if tp + x not in d_new[t]:
+              print(t + " *** REGRESSION (" + tp + x + ")\n")
                 
           else :
-            if d_new[t].has_key(tp + x):
-              print t + " * NEW PASS (" + tp + x + ")\n"
+            if tp + x in d_new[t]:
+              print(t + " * NEW PASS (" + tp + x + ")\n")
 
        
         for x in comptime:
-          if d_old[t].has_key(exp + x):
-            if not d_new[t].has_key(exp + x):
-              print t + " *** REGRESSION (" + exp + x + ")\n"
+          if exp + x in d_old[t]:
+            if exp + x not in d_new[t]:
+              print(t + " *** REGRESSION (" + exp + x + ")\n")
                 
           else :
-            if d_new[t].has_key(exp + x):
-              print t + " * NEW PASS (" + exp + x + ")\n"
+            if exp + x in d_new[t]:
+              print(t + " * NEW PASS (" + exp + x + ")\n")
               
     else :
-      print t + ": Removed from test-suite.\n"
+      print(t + ": Removed from test-suite.\n")
     
 
 #Main
 if len(sys.argv) < 3 :
-    print 'Usage:', sys.argv[0], \
-          '<old log> <new log>'
+    print('Usage:', sys.argv[0], \
+          '<old log> <new log>')
     sys.exit(-1)
 
 d_old = parse(sys.argv[1])
diff --git a/utils/release/findRegressions-simple.py b/utils/release/findRegressions-simple.py
index 8d3b4cf..7bd1523 100755
--- a/utils/release/findRegressions-simple.py
+++ b/utils/release/findRegressions-simple.py
@@ -1,4 +1,6 @@
 #!/usr/bin/env python
+
+from __future__ import print_function
 import re, string, sys, os, time, math
 
 DEBUG = 0
@@ -18,20 +20,20 @@
   fname = ''
   for t in r:
     if DEBUG:
-      print t
+      print(t)
 
     if t[0] == 'PASS' or t[0] == 'FAIL' :
       tmp = t[2].split('llvm-test/')
       
       if DEBUG:
-        print tmp
+        print(tmp)
 
       if len(tmp) == 2:
         fname = tmp[1].strip('\r\n')
       else:
         fname = tmp[0].strip('\r\n')
 
-      if not test.has_key(fname):
+      if fname not in test:
         test[fname] = {}
 
       test[fname][t[1] + ' state'] = t[0]
@@ -41,7 +43,7 @@
         n = t[0].split('RESULT-')[1]
 
         if DEBUG:
-          print "n == ", n;
+          print("n == ", n);
         
         if n == 'compile-success':
           test[fname]['compile time'] = float(t[2].split('program')[1].strip('\r\n'))
@@ -49,7 +51,7 @@
         elif n == 'exec-success':
           test[fname]['exec time'] = float(t[2].split('program')[1].strip('\r\n'))
           if DEBUG:
-            print test[fname][string.replace(n, '-success', '')]
+            print(test[fname][string.replace(n, '-success', '')])
 
         else :
           # print "ERROR!"
@@ -71,16 +73,16 @@
     passes[x] = ''
 
   for t in sorted(d_old.keys()) :
-    if d_new.has_key(t):
+    if t in d_new:
 
       # Check if the test passed or failed.
       for x in ['compile state', 'compile time', 'exec state', 'exec time']:
 
-        if not d_old[t].has_key(x) and not d_new[t].has_key(x):
+        if x not in d_old[t] and x not in d_new[t]:
           continue
 
-        if d_old[t].has_key(x):
-          if d_new[t].has_key(x):
+        if x in d_old[t]:
+          if x in d_new[t]:
 
             if d_old[t][x] == 'PASS':
               if d_new[t][x] != 'PASS':
@@ -96,11 +98,11 @@
           continue
 
         # For execution time, if there is no result it's a fail.
-        if not d_old[t].has_key(x) and not d_new[t].has_key(x):
+        if x not in d_old[t] and x not in d_new[t]:
           continue
-        elif not d_new[t].has_key(x):
+        elif x not in d_new[t]:
           regressions[x] += t + "\n"
-        elif not d_old[t].has_key(x):
+        elif x not in d_old[t]:
           passes[x] += t + "\n"
 
         if math.isnan(d_old[t][x]) and math.isnan(d_new[t][x]):
@@ -120,36 +122,36 @@
       removed += t + "\n"
 
   if len(regressions['compile state']) != 0:
-    print 'REGRESSION: Compilation Failed'
-    print regressions['compile state']
+    print('REGRESSION: Compilation Failed')
+    print(regressions['compile state'])
 
   if len(regressions['exec state']) != 0:
-    print 'REGRESSION: Execution Failed'
-    print regressions['exec state']
+    print('REGRESSION: Execution Failed')
+    print(regressions['exec state'])
 
   if len(regressions['compile time']) != 0:
-    print 'REGRESSION: Compilation Time'
-    print regressions['compile time']
+    print('REGRESSION: Compilation Time')
+    print(regressions['compile time'])
 
   if len(regressions['exec time']) != 0:
-    print 'REGRESSION: Execution Time'
-    print regressions['exec time']
+    print('REGRESSION: Execution Time')
+    print(regressions['exec time'])
 
   if len(passes['compile state']) != 0:
-    print 'NEW PASSES: Compilation'
-    print passes['compile state']
+    print('NEW PASSES: Compilation')
+    print(passes['compile state'])
 
   if len(passes['exec state']) != 0:
-    print 'NEW PASSES: Execution'
-    print passes['exec state']
+    print('NEW PASSES: Execution')
+    print(passes['exec state'])
 
   if len(removed) != 0:
-    print 'REMOVED TESTS'
-    print removed
+    print('REMOVED TESTS')
+    print(removed)
 
 # Main
 if len(sys.argv) < 3 :
-  print 'Usage:', sys.argv[0], '<old log> <new log>'
+  print('Usage:', sys.argv[0], '<old log> <new log>')
   sys.exit(-1)
 
 d_old = parse(sys.argv[1])
diff --git a/utils/schedcover.py b/utils/schedcover.py
index 8c0aeeb..9532f1b 100644
--- a/utils/schedcover.py
+++ b/utils/schedcover.py
@@ -39,7 +39,7 @@
     ordered_table  = sorted(table.items(), key=operator.itemgetter(0))
     ordered_models = ["itinerary", "default"]
     ordered_models.extend(sorted(models))
-    ordered_models = filter(filter_model, ordered_models)
+    ordered_models = [m for m in ordered_models if filter_model(m)]
 
     # print header
     sys.stdout.write("instruction")
diff --git a/utils/shuffle_fuzz.py b/utils/shuffle_fuzz.py
index eac3442..2d86cc0 100755
--- a/utils/shuffle_fuzz.py
+++ b/utils/shuffle_fuzz.py
@@ -13,6 +13,8 @@
 a bug.
 """
 
+from __future__ import print_function
+
 import argparse
 import itertools
 import random
@@ -105,22 +107,22 @@
                        else random.choice(range(shuffle_range))
                     for _ in itertools.repeat(None, width)]
                    for _ in itertools.repeat(None, args.max_shuffle_height - i)]
-                  for i in xrange(args.max_shuffle_height)]
+                  for i in range(args.max_shuffle_height)]
 
   if args.verbose:
     # Print out the shuffle sequence in a compact form.
-    print >>sys.stderr, ('Testing shuffle sequence "%s" (v%d%s):' %
-                         (args.seed, width, element_type))
+    print(('Testing shuffle sequence "%s" (v%d%s):' %
+                         (args.seed, width, element_type)), file=sys.stderr)
     for i, shuffles in enumerate(shuffle_tree):
-      print >>sys.stderr, '  tree level %d:' % (i,)
+      print('  tree level %d:' % (i,), file=sys.stderr)
       for j, s in enumerate(shuffles):
-        print >>sys.stderr, '    shuffle %d: %s' % (j, s)
-    print >>sys.stderr, ''
+        print('    shuffle %d: %s' % (j, s), file=sys.stderr)
+    print('', file=sys.stderr)
 
   # Symbolically evaluate the shuffle tree.
   inputs = [[int(j % element_modulus)
-             for j in xrange(i * width + 1, (i + 1) * width + 1)]
-            for i in xrange(args.max_shuffle_height + 1)]
+             for j in range(i * width + 1, (i + 1) * width + 1)]
+            for i in range(args.max_shuffle_height + 1)]
   results = inputs
   for shuffles in shuffle_tree:
     results = [[((results[i] if j < width else results[i + 1])[j % width]
@@ -128,15 +130,15 @@
                 for j in s]
                for i, s in enumerate(shuffles)]
   if len(results) != 1:
-    print >>sys.stderr, 'ERROR: Bad results: %s' % (results,)
+    print('ERROR: Bad results: %s' % (results,), file=sys.stderr)
     sys.exit(1)
   result = results[0]
 
   if args.verbose:
-    print >>sys.stderr, 'Which transforms:'
-    print >>sys.stderr, '  from: %s' % (inputs,)
-    print >>sys.stderr, '  into: %s' % (result,)
-    print >>sys.stderr, ''
+    print('Which transforms:', file=sys.stderr)
+    print('  from: %s' % (inputs,), file=sys.stderr)
+    print('  into: %s' % (result,), file=sys.stderr)
+    print('', file=sys.stderr)
 
   # The IR uses silly names for floating point types. We also need a same-size
   # integer type.
@@ -150,25 +152,25 @@
 
   # Now we need to generate IR for the shuffle function.
   subst = {'N': width, 'T': element_type, 'IT': integral_element_type}
-  print """
+  print("""
 define internal fastcc <%(N)d x %(T)s> @test(%(arguments)s) noinline nounwind {
 entry:""" % dict(subst,
                  arguments=', '.join(
                      ['<%(N)d x %(T)s> %%s.0.%(i)d' % dict(subst, i=i)
-                      for i in xrange(args.max_shuffle_height + 1)]))
+                      for i in range(args.max_shuffle_height + 1)])))
 
   for i, shuffles in enumerate(shuffle_tree):
    for j, s in enumerate(shuffles):
-    print """
+    print("""
   %%s.%(next_i)d.%(j)d = shufflevector <%(N)d x %(T)s> %%s.%(i)d.%(j)d, <%(N)d x %(T)s> %%s.%(i)d.%(next_j)d, <%(N)d x i32> <%(S)s>
 """.strip('\n') % dict(subst, i=i, next_i=i + 1, j=j, next_j=j + 1,
                        S=', '.join(['i32 ' + (str(si) if si != -1 else 'undef')
-                                    for si in s]))
+                                    for si in s])))
 
-  print """
+  print("""
   ret <%(N)d x %(T)s> %%s.%(i)d.0
 }
-""" % dict(subst, i=len(shuffle_tree))
+""" % dict(subst, i=len(shuffle_tree)))
 
   # Generate some string constants that we can use to report errors.
   for i, r in enumerate(result):
@@ -176,24 +178,24 @@
       s = ('FAIL(%(seed)s): lane %(lane)d, expected %(result)d, found %%d\n\\0A' %
            {'seed': args.seed, 'lane': i, 'result': r})
       s += ''.join(['\\00' for _ in itertools.repeat(None, 128 - len(s) + 2)])
-      print """
+      print("""
 @error.%(i)d = private unnamed_addr global [128 x i8] c"%(s)s"
-""".strip() % {'i': i, 's': s}
+""".strip() % {'i': i, 's': s})
 
   # Define a wrapper function which is marked 'optnone' to prevent
   # interprocedural optimizations from deleting the test.
-  print """
+  print("""
 define internal fastcc <%(N)d x %(T)s> @test_wrapper(%(arguments)s) optnone noinline {
   %%result = call fastcc <%(N)d x %(T)s> @test(%(arguments)s)
   ret <%(N)d x %(T)s> %%result
 }
 """ % dict(subst,
            arguments=', '.join(['<%(N)d x %(T)s> %%s.%(i)d' % dict(subst, i=i)
-                                for i in xrange(args.max_shuffle_height + 1)]))
+                                for i in range(args.max_shuffle_height + 1)])))
 
   # Finally, generate a main function which will trap if any lanes are mapped
   # incorrectly (in an observable way).
-  print """
+  print("""
 define i32 @main() {
 entry:
   ; Create a scratch space to print error messages.
@@ -212,18 +214,18 @@
                  '(<%(N)d x %(IT)s> <%(input)s> to <%(N)d x %(T)s>)' %
                  dict(subst, input=', '.join(['%(IT)s %(i)d' % dict(subst, i=i)
                                               for i in input])))
-                for input in inputs]))
+                for input in inputs])))
 
   # Test that each non-undef result lane contains the expected value.
   for i, r in enumerate(result):
     if r == -1:
-      print """
+      print("""
 test.%(i)d:
   ; Skip this lane, its value is undef.
   br label %%test.%(next_i)d
-""" % dict(subst, i=i, next_i=i + 1)
+""" % dict(subst, i=i, next_i=i + 1))
     else:
-      print """
+      print("""
 test.%(i)d:
   %%v.%(i)d = extractelement <%(N)d x %(IT)s> %%v.cast, i32 %(i)d
   %%cmp.%(i)d = icmp ne %(IT)s %%v.%(i)d, %(r)d
@@ -238,9 +240,9 @@
   call i32 @write(i32 2, i8* %%str.ptr, i32 %%length.%(i)d)
   call void @llvm.trap()
   unreachable
-""" % dict(subst, i=i, next_i=i + 1, r=r)
+""" % dict(subst, i=i, next_i=i + 1, r=r))
 
-  print """
+  print("""
 test.%d:
   ret i32 0
 }
@@ -249,7 +251,7 @@
 declare i32 @write(i32, i8*, i32)
 declare i32 @sprintf(i8*, i8*, ...)
 declare void @llvm.trap() noreturn nounwind
-""" % (len(result),)
+""" % (len(result),))
 
 if __name__ == '__main__':
   main()
diff --git a/utils/shuffle_select_fuzz_tester.py b/utils/shuffle_select_fuzz_tester.py
index 88d8d75..6b2f94a 100644
--- a/utils/shuffle_select_fuzz_tester.py
+++ b/utils/shuffle_select_fuzz_tester.py
@@ -13,6 +13,7 @@
 set of transforms you want to test, and run the program. If it crashes, it found
 a bug (an error message with the expected and actual result is printed).
 """
+from __future__ import print_function
 
 import random
 import uuid
@@ -145,7 +146,7 @@
 
   def calc_value(self):
     if self.value != None:
-      print 'Trying to calculate the value of a shuffle instruction twice'
+      print('Trying to calculate the value of a shuffle instruction twice')
       exit(1)
 
     result = []
@@ -179,7 +180,7 @@
 
   def calc_value(self):
     if self.value != None:
-      print 'Trying to calculate the value of a select instruction twice'
+      print('Trying to calculate the value of a select instruction twice')
       exit(1)
 
     result = []
@@ -343,7 +344,7 @@
                       help='Choose specific number of vector elements to be tested. (default: random)')
   args = parser.parse_args()
 
-  print '; The seed used for this test is ' + args.seed
+  print('; The seed used for this test is ' + args.seed)
 
   assert args.min_num_inputs < args.max_num_inputs , "Minimum value greater than maximum."
   assert args.type in [None, 'i8', 'i16', 'i32', 'i64', 'f32', 'f64'], "Illegal type."
@@ -362,14 +363,14 @@
 
   # print the actual test function by dumping the generated instructions.
   insts_str = ''.join([inst.dump() for inst in insts])
-  print test_template.format(ty = ty.dump(), inputs = inputs_str,
-                             instructions = insts_str, last_name = res.name)
+  print(test_template.format(ty = ty.dump(), inputs = inputs_str,
+                             instructions = insts_str, last_name = res.name))
 
   # Print the error message templates as global strings
   for i in range(len(res.value)):
     pad = ''.join(['\\00']*(31 - len(str(i)) - len(str(res.value[i]))))
-    print error_template.format(lane = str(i), exp = str(res.value[i]),
-                                padding = pad)
+    print(error_template.format(lane = str(i), exp = str(res.value[i]),
+                                padding = pad))
 
   # Prepare the runtime checks and failure handlers.
   scalar_ty = ty.get_scalar_type()
@@ -395,7 +396,7 @@
   inputs_values = [', '.join([scalar_ty.dump() + ' ' + str(i) for i in inp]) for inp in inputs_values]
   inputs = ', '.join([ty.dump() + ' <' + inp + '>' for inp in inputs_values])
 
-  print main_template.format(ty = ty.dump(), inputs = inputs, check_die = check_die)
+  print(main_template.format(ty = ty.dump(), inputs = inputs, check_die = check_die))
 
 
 if __name__ == '__main__':
diff --git a/utils/unicode-case-fold.py b/utils/unicode-case-fold.py
index 98c5683..ad8265b 100755
--- a/utils/unicode-case-fold.py
+++ b/utils/unicode-case-fold.py
@@ -17,9 +17,15 @@
 entries).
 """
 
+from __future__ import print_function
+
 import sys
 import re
-import urllib2
+try:
+    from urllib.request import urlopen
+except ImportError:
+    from urllib2 import urlopen
+
 
 # This variable will body of the mappings function
 body = ""
@@ -93,7 +99,7 @@
     body += pattern.format(last, stride(b), modulo, shift(b[0]))
 
 current_block = []
-f = urllib2.urlopen(sys.argv[1])
+f = urlopen(sys.argv[1])
 for m in mappings(f):
     if len(current_block) == 0:
         current_block.append(m)
@@ -116,22 +122,22 @@
 
 dump_block(current_block)
 
-print '//===---------- Support/UnicodeCaseFold.cpp -------------------------------===//'
-print '//'
-print '// This file was generated by utils/unicode-case-fold.py from the Unicode'
-print '// case folding database at'
-print '//   ', sys.argv[1]
-print '//'
-print '// To regenerate this file, run:'
-print '//   utils/unicode-case-fold.py \\'
-print '//     "{}" \\'.format(sys.argv[1])
-print '//     > lib/Support/UnicodeCaseFold.cpp'
-print '//'
-print '//===----------------------------------------------------------------------===//'
-print ''
-print '#include "llvm/Support/Unicode.h"'
-print ''
-print "int llvm::sys::unicode::foldCharSimple(int C) {"
-print body
-print "  return C;"
-print "}"
+print('//===---------- Support/UnicodeCaseFold.cpp -------------------------------===//')
+print('//')
+print('// This file was generated by utils/unicode-case-fold.py from the Unicode')
+print('// case folding database at')
+print('//   ', sys.argv[1])
+print('//')
+print('// To regenerate this file, run:')
+print('//   utils/unicode-case-fold.py \\')
+print('//     "{}" \\'.format(sys.argv[1]))
+print('//     > lib/Support/UnicodeCaseFold.cpp')
+print('//')
+print('//===----------------------------------------------------------------------===//')
+print('')
+print('#include "llvm/Support/Unicode.h"')
+print('')
+print("int llvm::sys::unicode::foldCharSimple(int C) {")
+print(body)
+print("  return C;")
+print("}")
diff --git a/utils/update_analyze_test_checks.py b/utils/update_analyze_test_checks.py
index b9175ae..64e64e0 100755
--- a/utils/update_analyze_test_checks.py
+++ b/utils/update_analyze_test_checks.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python
 
 """A script to generate FileCheck statements for 'opt' analysis tests.
 
@@ -29,6 +29,8 @@
 designed to be authoratitive about what constitutes a good test!
 """
 
+from __future__ import print_function
+
 import argparse
 import itertools
 import os         # Used to advertise this file's name ("autogenerated_note").
@@ -66,12 +68,12 @@
 
   opt_basename = os.path.basename(args.opt_binary)
   if (opt_basename != "opt"):
-    print >>sys.stderr, 'ERROR: Unexpected opt name: ' + opt_basename
+    print('ERROR: Unexpected opt name: ' + opt_basename, file=sys.stderr)
     sys.exit(1)
 
   for test in args.tests:
     if args.verbose:
-      print >>sys.stderr, 'Scanning for RUN lines in test file: %s' % (test,)
+      print('Scanning for RUN lines in test file: %s' % (test,), file=sys.stderr)
     with open(test) as f:
       input_lines = [l.rstrip() for l in f]
 
@@ -85,20 +87,20 @@
         run_lines.append(l)
 
     if args.verbose:
-      print >>sys.stderr, 'Found %d RUN lines:' % (len(run_lines),)
+      print('Found %d RUN lines:' % (len(run_lines),), file=sys.stderr)
       for l in run_lines:
-        print >>sys.stderr, '  RUN: ' + l
+        print('  RUN: ' + l, file=sys.stderr)
 
     prefix_list = []
     for l in run_lines:
       (tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split('|', 1)])
 
       if not tool_cmd.startswith(opt_basename + ' '):
-        print >>sys.stderr, 'WARNING: Skipping non-%s RUN line: %s' % (opt_basename, l)
+        print('WARNING: Skipping non-%s RUN line: %s' % (opt_basename, l), file=sys.stderr)
         continue
 
       if not filecheck_cmd.startswith('FileCheck '):
-        print >>sys.stderr, 'WARNING: Skipping non-FileChecked RUN line: ' + l
+        print('WARNING: Skipping non-FileChecked RUN line: ' + l, file=sys.stderr)
         continue
 
       tool_cmd_args = tool_cmd[len(opt_basename):].strip()
@@ -119,8 +121,8 @@
         func_dict.update({prefix: dict()})
     for prefixes, opt_args in prefix_list:
       if args.verbose:
-        print >>sys.stderr, 'Extracted opt cmd: ' + opt_basename + ' ' + opt_args
-        print >>sys.stderr, 'Extracted FileCheck prefixes: ' + str(prefixes)
+        print('Extracted opt cmd: ' + opt_basename + ' ' + opt_args, file=sys.stderr)
+        print('Extracted FileCheck prefixes: ' + str(prefixes), file=sys.stderr)
 
       raw_tool_outputs = common.invoke_tool(args.opt_binary, opt_args, test)
 
@@ -134,7 +136,7 @@
     is_in_function_start = False
     prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes])
     if args.verbose:
-      print >>sys.stderr, 'Rewriting FileCheck prefixes: %s' % (prefix_set,)
+      print('Rewriting FileCheck prefixes: %s' % (prefix_set,), file=sys.stderr)
     output_lines = []
     output_lines.append(autogenerated_note)
 
@@ -181,7 +183,7 @@
       is_in_function = is_in_function_start = True
 
     if args.verbose:
-      print>>sys.stderr, 'Writing %d lines to %s...' % (len(output_lines), test)
+      print('Writing %d lines to %s...' % (len(output_lines), test), file=sys.stderr)
 
     with open(test, 'wb') as f:
       f.writelines([l + '\n' for l in output_lines])
diff --git a/utils/update_llc_test_checks.py b/utils/update_llc_test_checks.py
index 09b49a7..960fee2 100755
--- a/utils/update_llc_test_checks.py
+++ b/utils/update_llc_test_checks.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python
 
 """A test case update script.
 
@@ -7,6 +7,8 @@
 a single test function.
 """
 
+from __future__ import print_function
+
 import argparse
 import os         # Used to advertise this file's name ("autogenerated_note").
 import string
@@ -42,7 +44,7 @@
 
   for test in args.tests:
     if args.verbose:
-      print >>sys.stderr, 'Scanning for RUN lines in test file: %s' % (test,)
+      print('Scanning for RUN lines in test file: %s' % (test,), file=sys.stderr)
     with open(test) as f:
       input_lines = [l.rstrip() for l in f]
 
@@ -63,9 +65,9 @@
         run_lines.append(l)
 
     if args.verbose:
-      print >>sys.stderr, 'Found %d RUN lines:' % (len(run_lines),)
+      print('Found %d RUN lines:' % (len(run_lines),), file=sys.stderr)
       for l in run_lines:
-        print >>sys.stderr, '  RUN: ' + l
+        print('  RUN: ' + l, file=sys.stderr)
 
     run_list = []
     for l in run_lines:
@@ -81,11 +83,11 @@
       if len(commands) > 1:
         filecheck_cmd = commands[1]
       if not llc_cmd.startswith('llc '):
-        print >>sys.stderr, 'WARNING: Skipping non-llc RUN line: ' + l
+        print('WARNING: Skipping non-llc RUN line: ' + l, file=sys.stderr)
         continue
 
       if not filecheck_cmd.startswith('FileCheck '):
-        print >>sys.stderr, 'WARNING: Skipping non-FileChecked RUN line: ' + l
+        print('WARNING: Skipping non-FileChecked RUN line: ' + l, file=sys.stderr)
         continue
 
       llc_cmd_args = llc_cmd[len('llc'):].strip()
@@ -107,12 +109,12 @@
         func_dict.update({prefix: dict()})
     for prefixes, llc_args, triple_in_cmd in run_list:
       if args.verbose:
-        print >>sys.stderr, 'Extracted LLC cmd: llc ' + llc_args
-        print >>sys.stderr, 'Extracted FileCheck prefixes: ' + str(prefixes)
+        print('Extracted LLC cmd: llc ' + llc_args, file=sys.stderr)
+        print('Extracted FileCheck prefixes: ' + str(prefixes), file=sys.stderr)
 
       raw_tool_output = common.invoke_tool(args.llc_binary, llc_args, test)
       if not (triple_in_cmd or triple_in_ir):
-        print >>sys.stderr, "Cannot find a triple. Assume 'x86'"
+        print("Cannot find a triple. Assume 'x86'", file=sys.stderr)
 
       asm.build_function_body_dictionary_for_triple(args, raw_tool_output,
           triple_in_cmd or triple_in_ir or 'x86', prefixes, func_dict)
@@ -122,7 +124,7 @@
     func_name = None
     prefix_set = set([prefix for p in run_list for prefix in p[0]])
     if args.verbose:
-      print >>sys.stderr, 'Rewriting FileCheck prefixes: %s' % (prefix_set,)
+      print('Rewriting FileCheck prefixes: %s' % (prefix_set,), file=sys.stderr)
     output_lines = []
     output_lines.append(autogenerated_note)
 
@@ -167,7 +169,7 @@
       is_in_function = is_in_function_start = True
 
     if args.verbose:
-      print>>sys.stderr, 'Writing %d lines to %s...' % (len(output_lines), test)
+      print('Writing %d lines to %s...' % (len(output_lines), test), file=sys.stderr)
 
     with open(test, 'wb') as f:
       f.writelines([l + '\n' for l in output_lines])
diff --git a/utils/update_mca_test_checks.py b/utils/update_mca_test_checks.py
index 54d1cb4..06fd655 100755
--- a/utils/update_mca_test_checks.py
+++ b/utils/update_mca_test_checks.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python
 
 """A test case update script.
 
diff --git a/utils/update_test_checks.py b/utils/update_test_checks.py
index 739fe04..6d0bf04 100755
--- a/utils/update_test_checks.py
+++ b/utils/update_test_checks.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python
 
 """A script to generate FileCheck statements for 'opt' regression tests.
 
@@ -29,6 +29,8 @@
 designed to be authoratitive about what constitutes a good test!
 """
 
+from __future__ import print_function
+
 import argparse
 import itertools
 import os         # Used to advertise this file's name ("autogenerated_note").
@@ -66,12 +68,12 @@
 
   opt_basename = os.path.basename(args.opt_binary)
   if (opt_basename != "opt"):
-    print >>sys.stderr, 'ERROR: Unexpected opt name: ' + opt_basename
+    print('ERROR: Unexpected opt name: ' + opt_basename, file=sys.stderr)
     sys.exit(1)
 
   for test in args.tests:
     if args.verbose:
-      print >>sys.stderr, 'Scanning for RUN lines in test file: %s' % (test,)
+      print('Scanning for RUN lines in test file: %s' % (test,), file=sys.stderr)
     with open(test) as f:
       input_lines = [l.rstrip() for l in f]
 
@@ -85,20 +87,20 @@
         run_lines.append(l)
 
     if args.verbose:
-      print >>sys.stderr, 'Found %d RUN lines:' % (len(run_lines),)
+      print('Found %d RUN lines:' % (len(run_lines),), file=sys.stderr)
       for l in run_lines:
-        print >>sys.stderr, '  RUN: ' + l
+        print('  RUN: ' + l, file=sys.stderr)
 
     prefix_list = []
     for l in run_lines:
       (tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split('|', 1)])
 
       if not tool_cmd.startswith(opt_basename + ' '):
-        print >>sys.stderr, 'WARNING: Skipping non-%s RUN line: %s' % (opt_basename, l)
+        print('WARNING: Skipping non-%s RUN line: %s' % (opt_basename, l), file=sys.stderr)
         continue
 
       if not filecheck_cmd.startswith('FileCheck '):
-        print >>sys.stderr, 'WARNING: Skipping non-FileChecked RUN line: ' + l
+        print('WARNING: Skipping non-FileChecked RUN line: ' + l, file=sys.stderr)
         continue
 
       tool_cmd_args = tool_cmd[len(opt_basename):].strip()
@@ -119,8 +121,8 @@
         func_dict.update({prefix: dict()})
     for prefixes, opt_args in prefix_list:
       if args.verbose:
-        print >>sys.stderr, 'Extracted opt cmd: ' + opt_basename + ' ' + opt_args
-        print >>sys.stderr, 'Extracted FileCheck prefixes: ' + str(prefixes)
+        print('Extracted opt cmd: ' + opt_basename + ' ' + opt_args, file=sys.stderr)
+        print('Extracted FileCheck prefixes: ' + str(prefixes), file=sys.stderr)
 
       raw_tool_output = common.invoke_tool(args.opt_binary, opt_args, test)
       common.build_function_body_dictionary(
@@ -131,7 +133,7 @@
     is_in_function_start = False
     prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes])
     if args.verbose:
-      print >>sys.stderr, 'Rewriting FileCheck prefixes: %s' % (prefix_set,)
+      print('Rewriting FileCheck prefixes: %s' % (prefix_set,), file=sys.stderr)
     output_lines = []
     output_lines.append(autogenerated_note)
 
@@ -178,7 +180,7 @@
       is_in_function = is_in_function_start = True
 
     if args.verbose:
-      print>>sys.stderr, 'Writing %d lines to %s...' % (len(output_lines), test)
+      print('Writing %d lines to %s...' % (len(output_lines), test), file=sys.stderr)
 
     with open(test, 'wb') as f:
       f.writelines([l + '\n' for l in output_lines])
diff --git a/utils/wciia.py b/utils/wciia.py
index eaa232f..4269db2 100755
--- a/utils/wciia.py
+++ b/utils/wciia.py
@@ -20,6 +20,7 @@
 
 """
 
+from __future__ import print_function
 import os
 
 code_owners = {}
@@ -97,7 +98,7 @@
 import sys
 
 if len(sys.argv) < 2:
-	print "usage " + sys.argv[0] + " file_or_folder"  
+	print("usage " + sys.argv[0] + " file_or_folder")
 	exit(-1)
 	
 # the path we are checking
@@ -105,13 +106,13 @@
 
 # check if this is real path
 if not os.path.exists(path):
-	print "path (" + path + ") does not exist"
+	print("path (" + path + ") does not exist")
 	exit(-1)
 	
 owners_name = find_owners(path)
 
 # be grammatically correct
-print "The owner(s) of the (" + path + ") is(are) : " + str(owners_name)
+print("The owner(s) of the (" + path + ") is(are) : " + str(owners_name))
 
 exit(0)
 
@@ -119,7 +120,7 @@
 # not yet used 
 root = "."
 for dir,subdirList,fileList in os.walk( root , topdown=False ) :
-   print "dir :" , dir
+   print("dir :" , dir)
    for fname in fileList :
-      print "-" , fname
-   print
+      print("-" , fname)
+   print()